diff --git a/.github/workflows/aarch64.yml b/.github/workflows/aarch64.yml deleted file mode 100644 index b4ce2d6b7c..0000000000 --- a/.github/workflows/aarch64.yml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2022 Proofcraft Pty Ltd -# -# SPDX-License-Identifier: BSD-2-Clause - -# This workflow is for ongoing seL4/AArch64 verification. - -name: AArch64 Proofs - -on: - push: - paths-ignore: - - '**.md' - - '**.txt' - branches: - - aarch64 - # this action needs access to secrets. - # The actual test runs in a no-privilege VM, so it's Ok to run on untrusted PRs. - pull_request_target: - paths-ignore: - - '**.md' - - '**.txt' - branches: - - aarch64 - -jobs: - all: - name: All - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - arch: [AARCH64] - # test only most recent push to PR: - concurrency: l4v-pr-${{ github.event.number }}-idx-${{ strategy.job-index }} - steps: - - name: Proofs - uses: seL4/ci-actions/aws-proofs@master - with: - L4V_ARCH: ${{ matrix.arch }} - token: ${{ secrets.READ_TOKEN }} - cache_bucket: ${{ secrets.CACHE_BUCKET }} - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_SSH: ${{ secrets.AWS_SSH }} - GH_HEAD_SHA: ${{ github.event.pull_request.head.sha }} - - name: Upload logs - uses: actions/upload-artifact@v2 - with: - name: logs-${{ matrix.arch }} - path: logs.tar.xz diff --git a/.github/workflows/binary.yml b/.github/workflows/binary.yml deleted file mode 100644 index 2d7ed5fe71..0000000000 --- a/.github/workflows/binary.yml +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2022 Kry10 Limited -# -# SPDX-License-Identifier: BSD-2-Clause - -name: Prepare binary verification - -on: - repository_dispatch: - types: - - binary-verification - workflow_dispatch: - inputs: - repo: - description: 'Repository' - required: true - default: 'seL4/l4v' - run_id: - description: 'Workflow run ID' - required: true - -jobs: - artifacts: - name: Initialise artifacts - runs-on: ubuntu-latest - outputs: - enabled_configs: ${{ steps.prepare.outputs.enabled_configs }} - sel4_commit: ${{ steps.prepare.outputs.sel4_commit }} - graph_refine_commit: ${{ steps.prepare.outputs.graph_refine_commit }} - isabelle_commit: ${{ steps.prepare.outputs.isabelle_commit }} - steps: - - name: Retrieve artifacts - id: retrieve - uses: actions/github-script@v6 - with: - github-token: ${{ secrets.PRIV_REPO_TOKEN }} - script: | - const inputs = (function() { - if ("${{ github.event_name }}" === "repository_dispatch") { - return { repo: "${{ github.event.client_payload.repo }}", - run_id: "${{ github.event.client_payload.run_id }}" }; - } else { - return { repo: "${{ github.event.inputs.repo }}", - run_id: "${{ github.event.inputs.run_id }}" }; - } - })(); - console.log(`Triggered by ${inputs.repo} run_id ${inputs.run_id}`) - console.log(`::set-output name=trigger_repo::${inputs.repo}`) - console.log(`::set-output name=trigger_run_id::${inputs.run_id}`) - const repo_parts = inputs.repo.split("/"); - const bv_artifacts = await (async function() { - console.log("::group::Waiting for artifacts"); - try { - // Wait up to 10 minutes for artifacts to appear, in case the - // triggering workflow isn't finished yet. - for (let attempt = 0; attempt < 60; attempt++) { - const all_artifacts = await github.rest.actions.listWorkflowRunArtifacts({ - owner: repo_parts[0], - repo: repo_parts[1], - run_id: inputs.run_id, - }); - const bv_artifacts = all_artifacts.data.artifacts.filter((artifact) => { - const name = artifact.name; - return name === "manifest" || name === "c-graph-lang"; - }); - if (bv_artifacts.length === 2) { - console.log("Artifacts found"); - return bv_artifacts; - } - console.log("Waiting..."); - await new Promise(resolve => setTimeout(resolve, 10000)); - } - throw "Expected artifacts not found"; - } - finally { - console.log("::endgroup::"); - } - })(); - const fs = require('fs/promises'); - console.log("::group::Downloading artifacts"); - const files = bv_artifacts.map(async function(artifact) { - let download = await github.rest.actions.downloadArtifact({ - owner: repo_parts[0], - repo: repo_parts[1], - artifact_id: artifact.id, - archive_format: 'zip', - }); - return await fs.writeFile( - `${process.env.GITHUB_WORKSPACE}/${artifact.name}.zip`, - Buffer.from(download.data), - ); - }); - await Promise.all(files); - console.log("Artifacts downloaded"); - console.log("::endgroup::"); - - name: Checkout graph-refine - uses: actions/checkout@v3 - with: - repository: seL4/graph-refine - # We currently use the ci-riscv64 branch for decompiling both ARM and RISCV64. - # We checkout here just to ensure that all matrix jobs use the same graph-refine commit. - ref: ci-riscv64 - path: graph-refine - - name: Prepare graph-refine job structure - id: prepare - env: - L4V_COMMIT: ${{ github.sha }} - TRIGGER_REPO: ${{ steps.retrieve.outputs.trigger_repo }} - TRIGGER_RUN_ID: ${{ steps.retrieve.outputs.trigger_run_id }} - WORKFLOW_REPO: ${{ github.repository }} - WORKFLOW_RUN_ID: ${{ github.run_id }} - run: | - # Unpack and reorganise artifacts fetched from the triggering workflow. - unzip -q manifest.zip - # Freeze the seL4 and graph-refine commits for all matrix jobs. - # TODO; Freeze the version of the decompiler Docker image used. - sudo apt-get update && sudo apt-get install libxml2-utils - SEL4_COMMIT=$(xmllint --xpath 'string(//project[@name="seL4"]/@revision)' verification-manifest.xml) - ISA_COMMIT=$(xmllint --xpath 'string(//project[@name="isabelle"]/@revision)' verification-manifest.xml) - GRAPH_REFINE_COMMIT=$(git -C graph-refine rev-parse --verify HEAD) - echo "::set-output name=sel4_commit::${SEL4_COMMIT}" - echo "::set-output name=graph_refine_commit::${GRAPH_REFINE_COMMIT}" - echo "::set-output name=isabelle_commit::${ISA_COMMIT}" - ( - echo "triggered-by:" - echo " repo: ${TRIGGER_REPO}" - echo " run_id: ${TRIGGER_RUN_ID}" - echo "workflow:" - echo " repo: ${WORKFLOW_REPO}" - echo " run_id: ${WORKFLOW_RUN_ID}" - echo "commits:" - echo " graph-refine: ${GRAPH_REFINE_COMMIT}" - echo " l4v: ${L4V_COMMIT}" - ) > decompile-manifest.yaml - # Check if we got any C graph-lang from the triggering workflow. - if [ -f c-graph-lang.zip ]; then - unzip -q -d simpl-export c-graph-lang.zip - # Filter out any configurations that we won't attempt to run, - # and reorganise into the shape that graph-refine expects. - for ARCH in ARM RISCV64; do - for API in "" MCS; do - ARCH_API="${ARCH}${API:+-${API}}" - C_FUNCTIONS="simpl-export/CFunctions-${ARCH_API}.txt" - if [ -f "${C_FUNCTIONS}" ]; then - mkdir -p "config-list/${ARCH_API}" - for OPT in O1 O2; do - TARGET="configs/${ARCH_API}-${OPT}/target" - mkdir -p "${TARGET}" - cp "${C_FUNCTIONS}" "${TARGET}/CFunctions.txt" - done - fi - done - done - if [ -d config-list ]; then - ENABLED_CONFIGS=$(ls config-list | perl -pe 's/%/%25/g; s/\n/%0A/g') - echo "::set-output name=enabled_configs::${ENABLED_CONFIGS}" - fi - fi - if [ -n "${ENABLED_CONFIGS}" ]; then - echo "C graph-lang found for configs:" $(ls config-list) - else - echo "No C graph-lang found, nothing to do" - fi - - name: Initialise output artifact - if: ${{ steps.prepare.outputs.enabled_configs }} - uses: actions/upload-artifact@v3 - with: - name: graph-refine-targets - path: | - decompile-manifest.yaml - verification-manifest.xml - configs - # This workflow uses `nix-shell` to run commands in an environment - # specified by `shell.nix` in the root of the graph-refine repo - # (ci-riscv64 branch). That environment provides the tools needed to run - # the commands in this workflow. `nix-shell` will download and install - # packages to create the environment, building any packages that are not - # present in a binary package cache. `nix-shell` is part of the Nix - # package manager (nixos.org). `install-nix-action` installs the Nix - # package manager, and configures it to use the `nixpkgs` collection of - # packages and the nixos.org binary package cache. `cachix-action` sets - # up an additional custom package cache provided by cachix.org, so that - # any packages built by `nix-shell` are saved for future `nix-shell` - # invocations. The following steps prime the cache, so that any package - # builds are peformed once here instead of in every parallel matrix job. - # In a future iteration, it would be good to pull this out into a - # separate workflow that builds a Docker image with all the tools needed - # for this workflow, and have this workflow use the Docker image without - # `nix-shell`. - - name: Install Nix - if: ${{ steps.prepare.outputs.enabled_configs }} - uses: cachix/install-nix-action@v16 - with: - nix_path: nixpkgs=channel:nixos-unstable - - name: Install Cachix - if: ${{ steps.prepare.outputs.enabled_configs }} - uses: cachix/cachix-action@v10 - with: - name: sel4-bv - authToken: ${{ secrets.BV_CACHIX_AUTH_TOKEN }} - - name: Prime the Nix cache - working-directory: graph-refine - run: nix-shell --run 'echo "Nix cache is primed"' - - decompilation: - name: Decompile - needs: artifacts - runs-on: ubuntu-latest - # `if` applies to the whole matrix, not to individual jobs within the matrix. - if: ${{ needs.artifacts.outputs.enabled_configs }} - strategy: - fail-fast: false - matrix: - arch: [ARM, RISCV64] - features: ["", MCS] - optimise: ["-O1", "-O2"] - steps: - # It would be nice if there was a way to prevent the job from starting. - - name: Check enabled - id: enabled - shell: bash - env: - ARCH: ${{ matrix.arch }} - FEATURES: ${{ matrix.features }} - OPTIMISE: ${{ matrix.optimise }} - ENABLED: ${{ needs.artifacts.outputs.enabled_configs }} - run: | - # Check whether this configuration is enabled - CONFIG="${ARCH}${FEATURES:+-${FEATURES}}" - if grep -qx "${CONFIG}" <<< "${ENABLED}"; then - echo "C graph-lang found for ${CONFIG}, proceeding with decompilation" - echo "::set-output name=config::${CONFIG}" - echo "::set-output name=target::${CONFIG}${OPTIMISE}" - else - echo "No C graph-lang found for ${CONFIG}, skipping decompilation" - fi - - name: Checkout l4v - uses: actions/checkout@v3 - # We use the ref on which this workflow was triggered, - # not the one that caused the trigger. - with: - path: l4v - - name: Checkout Isabelle - uses: actions/checkout@v3 - with: - repository: seL4/isabelle - ref: ${{ needs.artifacts.outputs.isabelle_commit }} - path: isabelle - - name: Checkout seL4 - uses: actions/checkout@v3 - with: - repository: seL4/seL4 - ref: ${{ needs.artifacts.outputs.sel4_commit }} - path: seL4 - - name: Checkout graph-refine - uses: actions/checkout@v3 - with: - repository: seL4/graph-refine - ref: ${{ needs.artifacts.outputs.graph_refine_commit }} - path: graph-refine - - name: Install Nix - if: steps.enabled.outputs.config - uses: cachix/install-nix-action@v16 - with: - nix_path: nixpkgs=channel:nixos-unstable - - name: Install Cachix - if: steps.enabled.outputs.config - uses: cachix/cachix-action@v10 - with: - name: sel4-bv - authToken: ${{ secrets.BV_CACHIX_AUTH_TOKEN }} - - name: Disable function clones - # TODO: Upstream this change to seL4. - # This disables some -O2 interprocedural optimisations that - # binary verificaation can't handle. - working-directory: seL4 - run: | - # Add compile options to gcc.cmake - ( echo - echo "# Binary verification cannot handle cloned functions." - echo "if(KernelVerificationBuild)" - echo " add_compile_options(-fno-partial-inlining -fno-ipa-cp -fno-ipa-sra)" - echo "endif()" - ) >> gcc.cmake - - name: Build target - if: steps.enabled.outputs.config - working-directory: graph-refine - shell: nix-shell --run "bash -eo pipefail {0}" - env: - L4V_ARCH: ${{ matrix.arch }} - L4V_FEATURES: ${{ matrix.features }} - CONFIG_OPTIMISATION_LEVEL: ${{ matrix.optimise }} - # upload-artifact will strip the `out` prefix. - TARGET_DIR: ${{ github.workspace }}/out/configs/${{ steps.enabled.outputs.target }}/target - run: | - # Build the graph-refine target - decompiler/setup-decompiler.py docker - make -C seL4-example ci_target - - name: Upload target - if: steps.enabled.outputs.config - uses: actions/upload-artifact@v3 - with: - name: graph-refine-targets - path: out - - submission: - name: Submit graph-refine job - needs: [artifacts, decompilation] - if: ${{ needs.artifacts.outputs.enabled_configs }} - runs-on: ubuntu-latest - steps: - - name: Checkout graph-refine - uses: actions/checkout@v3 - with: - repository: seL4/graph-refine - ref: ${{ needs.artifacts.outputs.graph_refine_commit }} - path: graph-refine - - name: Fetch targets - uses: actions/download-artifact@v3 - with: - name: graph-refine-targets - path: out - - name: Set up Python - uses: actions/setup-python@v2 - - name: Finalise graph-refine job - run: | - # Generate function lists. - for TARGET in out/configs/*/target; do - graph-refine/scripts/list_functions.py "${TARGET}" > "${TARGET}/functions.txt" - done - - name: Save final artifact - uses: actions/upload-artifact@v3 - with: - name: graph-refine-targets - path: out - - name: Submit graph-refine job - env: - BV_SSH_CONFIG: ${{ secrets.BV_SSH_CONFIG }} - BV_SSH_KEY: ${{ secrets.BV_SSH_KEY }} - BV_SSH_KNOWN_HOSTS: ${{ secrets.BV_SSH_KNOWN_HOSTS }} - BV_CI_JOB_DIR: graph-refine-work - shell: bash - run: graph-refine/scripts/ci-submit out diff --git a/.github/workflows/external.yml b/.github/workflows/external.yml index 2a24dc9ac9..621a4ededc 100644 --- a/.github/workflows/external.yml +++ b/.github/workflows/external.yml @@ -9,9 +9,11 @@ name: External on: schedule: - cron: '1 15 1,15 * *' # 15:01 UTC on 1st and 15th of month + # for testing: + workflow_dispatch: jobs: - all: + proofs: name: Proofs runs-on: ubuntu-latest strategy: @@ -29,8 +31,25 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_SSH: ${{ secrets.AWS_SSH }} GH_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + - name: Upload kernel builds + uses: actions/upload-artifact@v4 + with: + name: kernel-builds-${{ matrix.arch }} + path: artifacts/kernel-builds + if-no-files-found: ignore - name: Upload logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: logs-${{ matrix.arch }} path: logs.tar.xz + + binary-verification: + name: Trigger BV + runs-on: ubuntu-latest + needs: proofs + steps: + - name: Trigger binary verification + uses: seL4/ci-actions/bv-trigger@master + with: + token: ${{ secrets.PRIV_REPO_TOKEN }} + tag: l4v/external diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 38df0eb247..55324001e1 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -48,7 +48,7 @@ jobs: - uses: seL4/ci-actions/thylint@master with: token: ${{ secrets.READ_TOKEN }} - - uses: yuzutech/annotations-action@v0.2.1 + - uses: yuzutech/annotations-action@v0.4.0 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" title: 'File annotations for theory linter' diff --git a/.github/workflows/proof-deploy.yml b/.github/workflows/proof-deploy.yml index baa47a828a..7dcfad6b55 100644 --- a/.github/workflows/proof-deploy.yml +++ b/.github/workflows/proof-deploy.yml @@ -13,6 +13,8 @@ on: repository_dispatch: types: - manifest-update + # for testing: + workflow_dispatch: jobs: code: @@ -35,6 +37,7 @@ jobs: fail-fast: false matrix: arch: [ARM, ARM_HYP, AARCH64, RISCV64, X64] + num_domains: ['1', ''] # test only most recent push: concurrency: l4v-regression-${{ github.ref }}-${{ strategy.job-index }} steps: @@ -43,20 +46,21 @@ jobs: with: L4V_ARCH: ${{ matrix.arch }} xml: ${{ needs.code.outputs.xml }} + NUM_DOMAINS: ${{ matrix.num_domains }} env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_SSH: ${{ secrets.AWS_SSH }} - - name: Upload C graph-lang - uses: actions/upload-artifact@v3 + - name: Upload kernel builds + uses: actions/upload-artifact@v4 with: - name: c-graph-lang - path: artifacts/simpl-export + name: kernel-builds-${{ matrix.num_domains }}-${{ matrix.arch }} + path: artifacts/kernel-builds if-no-files-found: ignore - name: Upload logs - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: logs-${{ matrix.arch }} + name: logs-${{ matrix.num_domains }}-${{ matrix.arch }} path: logs.tar.xz deploy: @@ -69,44 +73,44 @@ jobs: xml: ${{ needs.code.outputs.xml }} env: GH_SSH: ${{ secrets.CI_SSH }} + - name: Trigger binary verification + uses: seL4/ci-actions/bv-trigger@master + with: + token: ${{ secrets.PRIV_REPO_TOKEN }} + tag: "l4v/proof-deploy/${{ github.event_name }}" - binary-verification: - name: Trigger binary verification +# Automatically rebase platform branches on pushes to master. +# This workflow here on the master branch attempts a git rebase of the platform +# branches listed in the build matrix below. If the rebase succeeds, the rebased +# branch is pushed under the name `-rebased`. This triggers the build +# workflow on the `-rebased` branch, which will run the proofs. If the +# proofs succeed, the `-rebased` branch is force-pushed over +# ``, becoming the new platform branch. + rebase: + name: Rebase platform branches runs-on: ubuntu-latest - needs: [code, proofs] + strategy: + fail-fast: false + matrix: + branch: [imx8-fpu-ver, exynos5-ver] steps: - # download-artifact doesn't have an option to ignore missing artifacts, - # so we download them all to test if c-graph-lang exists. - - name: Fetch artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - name: Check for C graph-lang artifacts - id: enabled - env: - MANIFEST: ${{ needs.code.outputs.xml }} - run: | - # Check if there are any C graph-lang artifacts - if [ -e artifacts/c-graph-lang ]; then - echo "C graph-lang artifacts found, will trigger binary verification" - echo -n "${MANIFEST}" > verification-manifest.xml - echo "::set-output name=enabled::true" - else - echo "No C graph-lang artifacts found, will not trigger binary verification" - fi - - name: Upload manifest - if: steps.enabled.outputs.enabled - uses: actions/upload-artifact@v3 - with: - name: manifest - path: verification-manifest.xml - - name: Trigger binary verification - if: steps.enabled.outputs.enabled - uses: peter-evans/repository-dispatch@v1 + - name: Checkout + uses: actions/checkout@v4 with: + ref: ${{ matrix.branch }} + path: l4v-${{ matrix.branch }} + fetch-depth: 0 + # needed to trigger push actions on the -rebased branch + # (implict GITHUB_TOKEN does not trigger further push actions). token: ${{ secrets.PRIV_REPO_TOKEN }} - repository: ${{ github.repository }} - event-type: binary-verification - # binary-verification uses the run_id to retrieve this workflow's artifacts. - client-payload: | - { "repo": "${{ github.repository }}", "run_id": "${{ github.run_id }}" } + - name: Rebase + run: | + cd l4v-${{ matrix.branch }} + git config --global user.name "seL4 CI" + git config --global user.email "ci@sel4.systems" + git rebase origin/master + git status + - name: Push + run: | + cd l4v-${{ matrix.branch }} + git push -f origin HEAD:${{ matrix.branch }}-rebased diff --git a/.github/workflows/proof.yml b/.github/workflows/proof.yml index 1fd0d4826e..02bb6ce83a 100644 --- a/.github/workflows/proof.yml +++ b/.github/workflows/proof.yml @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: BSD-2-Clause -name: Proofs +name: Proof PR on: push: @@ -17,11 +17,15 @@ on: paths-ignore: - '**.md' - '**.txt' - branches-ignore: - - aarch64 + workflow_dispatch: + inputs: + NUM_DOMAINS: + description: 'Number of domains to test' + type: number + default: 16 jobs: - all: + proofs: name: All runs-on: ubuntu-latest strategy: @@ -35,14 +39,24 @@ jobs: uses: seL4/ci-actions/aws-proofs@master with: L4V_ARCH: ${{ matrix.arch }} + NUM_DOMAINS: ${{ inputs.NUM_DOMAINS }} + skip_dups: true session: '-x AutoCorresSEL4' # exclude large AutoCorresSEL4 session for PRs + token: ${{ secrets.READ_TOKEN }} + cache_bucket: ${{ secrets.CACHE_BUCKET }} env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_SSH: ${{ secrets.AWS_SSH }} GH_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + - name: Upload kernel builds + uses: actions/upload-artifact@v4 + with: + name: kernel-builds-${{ matrix.arch }} + path: artifacts/kernel-builds + if-no-files-found: ignore - name: Upload logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: logs-${{ matrix.arch }} path: logs.tar.xz diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 355bede7ba..a3a3317ec9 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -12,6 +12,8 @@ on: - rt - aarch64 pull_request: + # for testing: + workflow_dispatch: jobs: check: diff --git a/.github/workflows/trigger.yml b/.github/workflows/trigger.yml index cd8fb9abc8..e154db2091 100644 --- a/.github/workflows/trigger.yml +++ b/.github/workflows/trigger.yml @@ -20,8 +20,8 @@ jobs: name: CParser Deploy runs-on: ubuntu-latest steps: - - uses: peter-evans/repository-dispatch@v1 + - uses: peter-evans/repository-dispatch@v3 with: - token: ${{ secrets.SSRG_BAMBOO_REPO }} + token: ${{ secrets.PRIV_REPO_TOKEN }} repository: seL4/ci-actions event-type: cparser-deploy diff --git a/.github/workflows/weekly-clean.yml b/.github/workflows/weekly-clean.yml index 8e12d2ac54..9c1dfe9cc1 100644 --- a/.github/workflows/weekly-clean.yml +++ b/.github/workflows/weekly-clean.yml @@ -7,28 +7,49 @@ name: Weekly Clean on: schedule: - cron: '1 15 * * 6' # 15:01 UTC every Sat = 1:01 am Syd every Sun + # for testing: + workflow_dispatch: jobs: - all: + proofs: name: Proofs runs-on: ubuntu-latest strategy: fail-fast: false matrix: arch: [ARM, ARM_HYP, AARCH64, RISCV64, X64] + num_domains: ['1', '7', ''] steps: - name: Proofs uses: seL4/ci-actions/aws-proofs@master with: L4V_ARCH: ${{ matrix.arch }} + NUM_DOMAINS: ${{ matrix.num_domains }} cache_read: '' # start with empty cache, but write cache back (default) env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_SSH: ${{ secrets.AWS_SSH }} GH_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + - name: Upload kernel builds + uses: actions/upload-artifact@v4 + with: + name: kernel-builds-${{ matrix.num_domains }}-${{ matrix.arch }} + path: artifacts/kernel-builds + if-no-files-found: ignore - name: Upload logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: - name: logs-${{ matrix.arch }} + name: logs-${{ matrix.num_domains }}-${{ matrix.arch }} path: logs.tar.xz + + binary-verification: + name: Trigger BV + runs-on: ubuntu-latest + needs: proofs + steps: + - name: Trigger binary verification + uses: seL4/ci-actions/bv-trigger@master + with: + token: ${{ secrets.PRIV_REPO_TOKEN }} + tag: l4v/weekly-clean diff --git a/.gitignore b/.gitignore index 7f40749144..c8bb96531e 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,8 @@ /spec/cspec/c/build /spec/cspec/c/config-build +/spec/cspec/c/export +/spec/cspec/c/overlays/*/overlay.dts /spec/haskell/stack.yaml.lock /spec/haskell/doc/**/*.aux diff --git a/.reuse/dep5 b/.reuse/dep5 index b08bbf633c..5db68fae1e 100644 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -6,7 +6,7 @@ Source: https://github.com/seL4/l4v/ # AFP entry https://www.isa-afp.org/entries/Simpl.html Files: tools/c-parser/Simpl/* Copyright: 2008, Norbert Schirmer, TU Muenchen -License: LGPL-2.1-only +License: BSD-3-Clause # AFP entry Word_Lib Files: lib/Word_Lib/Word_Next.thy diff --git a/LICENSES/LGPL-2.1-only.txt b/LICENSES/LGPL-2.1-only.txt deleted file mode 100644 index 130dffb311..0000000000 --- a/LICENSES/LGPL-2.1-only.txt +++ /dev/null @@ -1,467 +0,0 @@ -GNU LESSER GENERAL PUBLIC LICENSE - -Version 2.1, February 1999 - -Copyright (C) 1991, 1999 Free Software Foundation, Inc. - -51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -Everyone is permitted to copy and distribute verbatim copies of this license -document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts as the -successor of the GNU Library Public License, version 2, hence the version -number 2.1.] - -Preamble - -The licenses for most software are designed to take away your freedom to share -and change it. By contrast, the GNU General Public Licenses are intended to -guarantee your freedom to share and change free software--to make sure the -software is free for all its users. - -This license, the Lesser General Public License, applies to some specially -designated software packages--typically libraries--of the Free Software Foundation -and other authors who decide to use it. You can use it too, but we suggest -you first think carefully about whether this license or the ordinary General -Public License is the better strategy to use in any particular case, based -on the explanations below. - -When we speak of free software, we are referring to freedom of use, not price. -Our General Public Licenses are designed to make sure that you have the freedom -to distribute copies of free software (and charge for this service if you -wish); that you receive source code or can get it if you want it; that you -can change the software and use pieces of it in new free programs; and that -you are informed that you can do these things. - -To protect your rights, we need to make restrictions that forbid distributors -to deny you these rights or to ask you to surrender these rights. These restrictions -translate to certain responsibilities for you if you distribute copies of -the library or if you modify it. - -For example, if you distribute copies of the library, whether gratis or for -a fee, you must give the recipients all the rights that we gave you. You must -make sure that they, too, receive or can get the source code. If you link -other code with the library, you must provide complete object files to the -recipients, so that they can relink them with the library after making changes -to the library and recompiling it. And you must show them these terms so they -know their rights. - -We protect your rights with a two-step method: (1) we copyright the library, -and (2) we offer you this license, which gives you legal permission to copy, -distribute and/or modify the library. - -To protect each distributor, we want to make it very clear that there is no -warranty for the free library. Also, if the library is modified by someone -else and passed on, the recipients should know that what they have is not -the original version, so that the original author's reputation will not be -affected by problems that might be introduced by others. - -Finally, software patents pose a constant threat to the existence of any free -program. We wish to make sure that a company cannot effectively restrict the -users of a free program by obtaining a restrictive license from a patent holder. -Therefore, we insist that any patent license obtained for a version of the -library must be consistent with the full freedom of use specified in this -license. - -Most GNU software, including some libraries, is covered by the ordinary GNU -General Public License. This license, the GNU Lesser General Public License, -applies to certain designated libraries, and is quite different from the ordinary -General Public License. We use this license for certain libraries in order -to permit linking those libraries into non-free programs. - -When a program is linked with a library, whether statically or using a shared -library, the combination of the two is legally speaking a combined work, a -derivative of the original library. The ordinary General Public License therefore -permits such linking only if the entire combination fits its criteria of freedom. -The Lesser General Public License permits more lax criteria for linking other -code with the library. - -We call this license the "Lesser" General Public License because it does Less -to protect the user's freedom than the ordinary General Public License. It -also provides other free software developers Less of an advantage over competing -non-free programs. These disadvantages are the reason we use the ordinary -General Public License for many libraries. However, the Lesser license provides -advantages in certain special circumstances. - -For example, on rare occasions, there may be a special need to encourage the -widest possible use of a certain library, so that it becomes a de-facto standard. -To achieve this, non-free programs must be allowed to use the library. A more -frequent case is that a free library does the same job as widely used non-free -libraries. In this case, there is little to gain by limiting the free library -to free software only, so we use the Lesser General Public License. - -In other cases, permission to use a particular library in non-free programs -enables a greater number of people to use a large body of free software. For -example, permission to use the GNU C Library in non-free programs enables -many more people to use the whole GNU operating system, as well as its variant, -the GNU/Linux operating system. - -Although the Lesser General Public License is Less protective of the users' -freedom, it does ensure that the user of a program that is linked with the -Library has the freedom and the wherewithal to run that program using a modified -version of the Library. - -The precise terms and conditions for copying, distribution and modification -follow. Pay close attention to the difference between a "work based on the -library" and a "work that uses the library". The former contains code derived -from the library, whereas the latter must be combined with the library in -order to run. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License Agreement applies to any software library or other program -which contains a notice placed by the copyright holder or other authorized -party saying it may be distributed under the terms of this Lesser General -Public License (also called "this License"). Each licensee is addressed as -"you". - -A "library" means a collection of software functions and/or data prepared -so as to be conveniently linked with application programs (which use some -of those functions and data) to form executables. - -The "Library", below, refers to any such software library or work which has -been distributed under these terms. A "work based on the Library" means either -the Library or any derivative work under copyright law: that is to say, a -work containing the Library or a portion of it, either verbatim or with modifications -and/or translated straightforwardly into another language. (Hereinafter, translation -is included without limitation in the term "modification".) - -"Source code" for a work means the preferred form of the work for making modifications -to it. For a library, complete source code means all the source code for all -modules it contains, plus any associated interface definition files, plus -the scripts used to control compilation and installation of the library. - -Activities other than copying, distribution and modification are not covered -by this License; they are outside its scope. The act of running a program -using the Library is not restricted, and output from such a program is covered -only if its contents constitute a work based on the Library (independent of -the use of the Library in a tool for writing it). Whether that is true depends -on what the Library does and what the program that uses the Library does. - -1. You may copy and distribute verbatim copies of the Library's complete source -code as you receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice and disclaimer -of warranty; keep intact all the notices that refer to this License and to -the absence of any warranty; and distribute a copy of this License along with -the Library. - -You may charge a fee for the physical act of transferring a copy, and you -may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Library or any portion of it, -thus forming a work based on the Library, and copy and distribute such modifications -or work under the terms of Section 1 above, provided that you also meet all -of these conditions: - - a) The modified work must itself be a software library. - -b) You must cause the files modified to carry prominent notices stating that -you changed the files and the date of any change. - -c) You must cause the whole of the work to be licensed at no charge to all -third parties under the terms of this License. - -d) If a facility in the modified Library refers to a function or a table of -data to be supplied by an application program that uses the facility, other -than as an argument passed when the facility is invoked, then you must make -a good faith effort to ensure that, in the event an application does not supply -such function or table, the facility still operates, and performs whatever -part of its purpose remains meaningful. - -(For example, a function in a library to compute square roots has a purpose -that is entirely well-defined independent of the application. Therefore, Subsection -2d requires that any application-supplied function or table used by this function -must be optional: if the application does not supply it, the square root function -must still compute square roots.) - -These requirements apply to the modified work as a whole. If identifiable -sections of that work are not derived from the Library, and can be reasonably -considered independent and separate works in themselves, then this License, -and its terms, do not apply to those sections when you distribute them as -separate works. But when you distribute the same sections as part of a whole -which is a work based on the Library, the distribution of the whole must be -on the terms of this License, whose permissions for other licensees extend -to the entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest your -rights to work written entirely by you; rather, the intent is to exercise -the right to control the distribution of derivative or collective works based -on the Library. - -In addition, mere aggregation of another work not based on the Library with -the Library (or with a work based on the Library) on a volume of a storage -or distribution medium does not bring the other work under the scope of this -License. - -3. You may opt to apply the terms of the ordinary GNU General Public License -instead of this License to a given copy of the Library. To do this, you must -alter all the notices that refer to this License, so that they refer to the -ordinary GNU General Public License, version 2, instead of to this License. -(If a newer version than version 2 of the ordinary GNU General Public License -has appeared, then you can specify that version instead if you wish.) Do not -make any other change in these notices. - -Once this change is made in a given copy, it is irreversible for that copy, -so the ordinary GNU General Public License applies to all subsequent copies -and derivative works made from that copy. - -This option is useful when you wish to copy part of the code of the Library -into a program that is not a library. - -4. You may copy and distribute the Library (or a portion or derivative of -it, under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you accompany it with the complete corresponding -machine-readable source code, which must be distributed under the terms of -Sections 1 and 2 above on a medium customarily used for software interchange. - -If distribution of object code is made by offering access to copy from a designated -place, then offering equivalent access to copy the source code from the same -place satisfies the requirement to distribute the source code, even though -third parties are not compelled to copy the source along with the object code. - -5. A program that contains no derivative of any portion of the Library, but -is designed to work with the Library by being compiled or linked with it, -is called a "work that uses the Library". Such a work, in isolation, is not -a derivative work of the Library, and therefore falls outside the scope of -this License. - -However, linking a "work that uses the Library" with the Library creates an -executable that is a derivative of the Library (because it contains portions -of the Library), rather than a "work that uses the library". The executable -is therefore covered by this License. Section 6 states terms for distribution -of such executables. - -When a "work that uses the Library" uses material from a header file that -is part of the Library, the object code for the work may be a derivative work -of the Library even though the source code is not. Whether this is true is -especially significant if the work can be linked without the Library, or if -the work is itself a library. The threshold for this to be true is not precisely -defined by law. - -If such an object file uses only numerical parameters, data structure layouts -and accessors, and small macros and small inline functions (ten lines or less -in length), then the use of the object file is unrestricted, regardless of -whether it is legally a derivative work. (Executables containing this object -code plus portions of the Library will still fall under Section 6.) - -Otherwise, if the work is a derivative of the Library, you may distribute -the object code for the work under the terms of Section 6. Any executables -containing that work also fall under Section 6, whether or not they are linked -directly with the Library itself. - -6. As an exception to the Sections above, you may also combine or link a "work -that uses the Library" with the Library to produce a work containing portions -of the Library, and distribute that work under terms of your choice, provided -that the terms permit modification of the work for the customer's own use -and reverse engineering for debugging such modifications. - -You must give prominent notice with each copy of the work that the Library -is used in it and that the Library and its use are covered by this License. -You must supply a copy of this License. If the work during execution displays -copyright notices, you must include the copyright notice for the Library among -them, as well as a reference directing the user to the copy of this License. -Also, you must do one of these things: - -a) Accompany the work with the complete corresponding machine-readable source -code for the Library including whatever changes were used in the work (which -must be distributed under Sections 1 and 2 above); and, if the work is an -executable linked with the Library, with the complete machine-readable "work -that uses the Library", as object code and/or source code, so that the user -can modify the Library and then relink to produce a modified executable containing -the modified Library. (It is understood that the user who changes the contents -of definitions files in the Library will not necessarily be able to recompile -the application to use the modified definitions.) - -b) Use a suitable shared library mechanism for linking with the Library. A -suitable mechanism is one that (1) uses at run time a copy of the library -already present on the user's computer system, rather than copying library -functions into the executable, and (2) will operate properly with a modified -version of the library, if the user installs one, as long as the modified -version is interface-compatible with the version that the work was made with. - -c) Accompany the work with a written offer, valid for at least three years, -to give the same user the materials specified in Subsection 6a, above, for -a charge no more than the cost of performing this distribution. - -d) If distribution of the work is made by offering access to copy from a designated -place, offer equivalent access to copy the above specified materials from -the same place. - -e) Verify that the user has already received a copy of these materials or -that you have already sent this user a copy. - -For an executable, the required form of the "work that uses the Library" must -include any data and utility programs needed for reproducing the executable -from it. However, as a special exception, the materials to be distributed -need not include anything that is normally distributed (in either source or -binary form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component itself -accompanies the executable. - -It may happen that this requirement contradicts the license restrictions of -other proprietary libraries that do not normally accompany the operating system. -Such a contradiction means you cannot use both them and the Library together -in an executable that you distribute. - -7. You may place library facilities that are a work based on the Library side-by-side -in a single library together with other library facilities not covered by -this License, and distribute such a combined library, provided that the separate -distribution of the work based on the Library and of the other library facilities -is otherwise permitted, and provided that you do these two things: - -a) Accompany the combined library with a copy of the same work based on the -Library, uncombined with any other library facilities. This must be distributed -under the terms of the Sections above. - -b) Give prominent notice with the combined library of the fact that part of -it is a work based on the Library, and explaining where to find the accompanying -uncombined form of the same work. - -8. You may not copy, modify, sublicense, link with, or distribute the Library -except as expressly provided under this License. Any attempt otherwise to -copy, modify, sublicense, link with, or distribute the Library is void, and -will automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will not -have their licenses terminated so long as such parties remain in full compliance. - -9. You are not required to accept this License, since you have not signed -it. However, nothing else grants you permission to modify or distribute the -Library or its derivative works. These actions are prohibited by law if you -do not accept this License. Therefore, by modifying or distributing the Library -(or any work based on the Library), you indicate your acceptance of this License -to do so, and all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - -10. Each time you redistribute the Library (or any work based on the Library), -the recipient automatically receives a license from the original licensor -to copy, distribute, link with or modify the Library subject to these terms -and conditions. You may not impose any further restrictions on the recipients' -exercise of the rights granted herein. You are not responsible for enforcing -compliance by third parties with this License. - -11. If, as a consequence of a court judgment or allegation of patent infringement -or for any other reason (not limited to patent issues), conditions are imposed -on you (whether by court order, agreement or otherwise) that contradict the -conditions of this License, they do not excuse you from the conditions of -this License. If you cannot distribute so as to satisfy simultaneously your -obligations under this License and any other pertinent obligations, then as -a consequence you may not distribute the Library at all. For example, if a -patent license would not permit royalty-free redistribution of the Library -by all those who receive copies directly or indirectly through you, then the -only way you could satisfy both it and this License would be to refrain entirely -from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents -or other property right claims or to contest validity of any such claims; -this section has the sole purpose of protecting the integrity of the free -software distribution system which is implemented by public license practices. -Many people have made generous contributions to the wide range of software -distributed through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing to -distribute software through any other system and a licensee cannot impose -that choice. - -This section is intended to make thoroughly clear what is believed to be a -consequence of the rest of this License. - -12. If the distribution and/or use of the Library is restricted in certain -countries either by patents or by copyrighted interfaces, the original copyright -holder who places the Library under this License may add an explicit geographical -distribution limitation excluding those countries, so that distribution is -permitted only in or among countries not thus excluded. In such case, this -License incorporates the limitation as if written in the body of this License. - -13. The Free Software Foundation may publish revised and/or new versions of -the Lesser General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to address -new problems or concerns. - -Each version is given a distinguishing version number. If the Library specifies -a version number of this License which applies to it and "any later version", -you have the option of following the terms and conditions either of that version -or of any later version published by the Free Software Foundation. If the -Library does not specify a license version number, you may choose any version -ever published by the Free Software Foundation. - -14. If you wish to incorporate parts of the Library into other free programs -whose distribution conditions are incompatible with these, write to the author -to ask for permission. For software which is copyrighted by the Free Software -Foundation, write to the Free Software Foundation; we sometimes make exceptions -for this. Our decision will be guided by the two goals of preserving the free -status of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - -15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR -THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE -STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY -"AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE -OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE -THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE -OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA -OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES -OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH -HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Libraries - -If you develop a new library, and you want it to be of the greatest possible -use to the public, we recommend making it free software that everyone can -redistribute and change. You can do so by permitting redistribution under -these terms (or, alternatively, under the terms of the ordinary General Public -License). - -To apply these terms, attach the following notices to the library. It is safest -to attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the "copyright" -line and a pointer to where the full notice is found. - -< one line to give the library's name and an idea of what it does. > - -Copyright (C) < year > < name of author > - -This library is free software; you can redistribute it and/or modify it under -the terms of the GNU Lesser General Public License as published by the Free -Software Foundation; either version 2.1 of the License, or (at your option) -any later version. - -This library is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -details. - -You should have received a copy of the GNU Lesser General Public License along -with this library; if not, write to the Free Software Foundation, Inc., 51 -Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information -on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your school, -if any, to sign a "copyright disclaimer" for the library, if necessary. Here -is a sample; alter the names: - -Yoyodyne, Inc., hereby disclaims all copyright interest in - -the library `Frob' (a library for tweaking knobs) written - -by James Random Hacker. - -< signature of Ty Coon > , 1 April 1990 - -Ty Coon, President of Vice - -That's all there is to it! diff --git a/README.md b/README.md index fca6ba53d0..8fa990f058 100644 --- a/README.md +++ b/README.md @@ -28,26 +28,19 @@ assistant [Isabelle/HOL][2]. For an introduction to Isabelle, see its [official website][2] and [documentation][3]. [1]: https://github.com/seL4/l4v "L4.verified Repository" - [2]: http://isabelle.in.tum.de "Isabelle Website" - [3]: http://isabelle.in.tum.de/documentation.html "Isabelle Documentation" + [2]: https://isabelle.in.tum.de "Isabelle Website" + [3]: https://isabelle.in.tum.de/documentation.html "Isabelle Documentation" Setup ----- -This repository is meant to be used as part of a Google [repo][5] setup. -Instead of cloning it directly, follow the instructions at the [manifest git -repo](https://github.com/seL4/verification-manifest). - - [5]: http://source.android.com/source/downloading.html#installing-repo "google repo installation" - - -Dependencies ------------- - -For software dependencies and Isabelle setup, see the -[setup.md](docs/setup.md) file in the `docs` directory. +This repository is meant to be used as part of a Google [repo][5] setup. Instead +of cloning it directly, please follow the directions for software dependencies +and Isabelle installation in the [setup.md](docs/setup.md) file in the `docs` +directory. +[5]: https://gerrit.googlesource.com/git-repo/+/HEAD/README.md Contributing ------------ @@ -138,71 +131,59 @@ about 16GB of RAM. The proofs distribute reasonably well over multiple cores, up to about 8 cores are useful. - -jEdit ------ - -We provide a jEdit macro that is very useful when working with large theory -files, **goto-error**, which moves the cursor to the first error in the file. - -To install the macro, run the following commands in the directory -`verification/l4v/`: -```bash -mkdir -p ~/.isabelle/jedit/macros -cp misc/jedit/macros/goto-error.bsh ~/.isabelle/jedit/macros/. -``` - -You can add keybindings for this macro in the usual way, by going to -`Utilities -> Global Options -> jEdit -> Shortcuts`. - -Additionally, our fork of Isabelle/jEdit has an updated indenter which is more -proof-context aware than the 'original' indenter. Pressing `ctrl+i` while some -`apply`-script text is selected should auto-indent the script while respecting -subgoal depth and maintaining the relative indentation of multi-line `apply` -statements. - Running the Proofs ------------------ If Isabelle is set up correctly, a full test for the proofs in this repository -can be run with the command +for seL4 on the `ARM` architecture can be run with the command - ./run_tests + L4V_ARCH=ARM ./run_tests from the directory `l4v/`. -Not all of the proof sessions can be built directly with the `isabelle build` command. -The seL4 verification proofs depend on Isabelle specifications that are -generated from the C source code and Haskell model. -Therefore, it's recommended to always build using the supplied makefiles, -which will ensure that these generated specs are up to date. - -To do this, enter one level under the `l4v/` directory and run `make `. -For example, to build the C refinement proof session, do +Set the environment variable `L4V_ARCH` to one of `ARM`, `ARM_HYP`, `X64`, +`RISCV64`, or `AARCH64` to get the proofs for the respective architecture. `ARM` +has the most complete set of proofs, the other architectures tend to support +only a subset of the proof sessions defined for `ARM`. - cd l4v/proof - make CRefine +Not all of the proof sessions can be built directly with the `isabelle build` +command. The seL4 proofs depend on Isabelle specifications that are generated +from the C source code and Haskell model. Therefore, it is recommended to always +build using the `run_tests` command or the supplied Makefiles, which will ensure +that these generated specs are up to date. -As another example, to build the session for the Haskell model, do +To do this, enter one level under the `l4v/` directory and run `make `. +For example, to build the abstract specification, do + export L4V_ARCH=ARM cd l4v/spec - make ExecSpec + make ASpec See the `HEAPS` variable in the corresponding `Makefile` for available targets. +The sessions that directly depend on generated sources are `ASpec`, `ExecSpec`, +and `CKernel`. These, and all sessions that depend on them, need to be run using +`run_tests` or `make`. Proof sessions that do not depend on generated inputs can be built directly with ./isabelle/bin/isabelle build -d . -v -b -from the directory `l4v/`. For available sessions, see the corresponding -`ROOT` files in this repository. There is roughly one session corresponding to -each major directory in the repository. +from the directory `l4v/`. For available sessions and their dependencies, see +the corresponding `ROOT` files in this repository. There is roughly one session +corresponding to each major directory in the repository. For interactively exploring, say the invariant proof of the abstract -specification with a pre-built logic image for the abstract specification and -all of the invariant proof's dependencies, run +specification on `ARM`, note that in `proof/ROOT` the parent session for +`AInvs` is `ASpec` and therefore run: + export L4V_ARCH=ARM + ./run_tests ASpec ./isabelle/bin/isabelle jedit -d . -R AInvs -in `l4v/` and open one of the files in `proof/invariant-abstract`. +or, if you prefer `make`: + export L4V_ARCH=ARM + cd spec; make ASpec + ../isabelle/bin/isabelle jedit -d . -R AInvs + +in `l4v/` and open one of the files in `proof/invariant-abstract`. diff --git a/ROOTS b/ROOTS index e1221f73f7..3599f648bc 100644 --- a/ROOTS +++ b/ROOTS @@ -7,6 +7,10 @@ tools camkes sys-init lib +lib/Basics +lib/Eisbach_Tools +lib/ML_Utils +lib/Monads lib/Word_Lib lib/sep_algebra lib/EVTutorial diff --git a/camkes/cdl-refine/Eval_CAMKES_CDL.thy b/camkes/cdl-refine/Eval_CAMKES_CDL.thy index a02b4ad5e5..1a1347223d 100644 --- a/camkes/cdl-refine/Eval_CAMKES_CDL.thy +++ b/camkes/cdl-refine/Eval_CAMKES_CDL.thy @@ -211,7 +211,7 @@ lemma Collect_asid_high__eval_helper: section \Assorted helpers\ lemma fun_upds_to_map_of[THEN eq_reflection]: "Map.empty = map_of []" - "(map_of xs(k \ v)) = map_of ((k, v) # xs)" + "((map_of xs)(k \ v)) = map_of ((k, v) # xs)" by auto lemma subst_eqn_helper: diff --git a/docs/Style.thy b/docs/Style.thy index 1b37a02b1c..0dfcde2b12 100644 --- a/docs/Style.thy +++ b/docs/Style.thy @@ -219,6 +219,138 @@ lemma test_lemma3: case_tac h; simp) done +section \Right vs left operator-wrapping\ + +text \ + When a term is too long, there is a general consensus to wrap it at operators. However, consensus + has never emerged on whether the operator should then end up at the end of the line (right + operator wrapping), or start of the next one (left operator wrapping). + Some people have a tendency towards right-wrapping, others towards left-wrapping. They + each have advantages in specific contexts, thus both appear in the l4v proofs and are permitted + style.\ + +term \A \ B \ C \ D\ \ \no wrapping when A..D are small terms\ + +term \A \ + B \ + C \ + D\ \ \right-wrapping when A..D are large terms\ + +term \A + \ B + \ C + \ D\ \ \left-wrapping when A..D are large terms\ + +text \ + While both styles are permitted, do not mix them in the same lemma. If a lemma already uses + one style and you aren't doing a major rewrite, stick to the existing style.\ + +lemma + shows "\ A; B; C\ \ + D" \ \right-wrapping OK\ + and "\ A; B; C\ + \ D" \ \left-wrapping OK\ + oops \ \mixing styles: NOT OK\ + +text \ + Some operators and syntax only have ONE style. As seen in other sections: + * the `|` in `case` never appears on the right + * `;` is always on the right when wrapping lists of assumptions + * `shows .. and ... and` wraps with `and` on the left + * `|` in method invocations always goes on the left + * commas and (semi)colons, owing to our natural language origins, always end up on the right\ + +lemma + shows + "\ A + ; B \ \ \wrong: always on right\ + \ \ok: \ can be either left or right\ + \ C" and \ \wrong: `shows/and` only on left!\ + "D" + and "E" \ \ok: on left\ +proof - + have "True \ True" + by (rule conjI, + blast, + blast) \ \ok\ + have "True \ True" + by (rule conjI + , blast + , blast) \ \NOT OK: commas go on right\ + have "True \ True" + by (rule conjI; + blast) \ \ok\ + have "True \ True" + by (rule conjI + ; blast) \ \NOT OK: semicolons go on right\ + have "True \ True" + by (rule conjI + | blast)+ \ \ok\ + have "True \ True" + by (rule conjI | + blast)+ \ \NOT OK: `|` goes on the left\ + oops + +text \ + The general principle of "nothing indented less than what it belongs to" is in effect for both + wrapping styles. Remember, the goal of the exercise is to make it as clear to read for others as + you can. Sometimes, scanning the left side of the screen to see the overall term can help, + while other times putting @{text \} on the right will save space and prevent subsequent lines + from wrapping.\ + +text \ + Inner-syntax indentation is not automatable in the general case, so our goal is to help + ease of comprehension as much as possible, i.e. + @{term "A \ B \ C \ D \ E \ F"} is bearable if A..F are short, but if they are large terms, + please avoid doing either of these:\ + +term " + A \ + B \ + C \ + D \ + E \ + F" \ \avoid: requires building a parse tree in one's head\ + +term " + A + \ B + \ C + \ D + \ E + \ F" \ \can be marginally easier to scan, but still avoid due to mental parsing difficulty\ + +text \Instead, help out the reader like this:\ + +term " + A \ + B \ + C \ + D \ + E \ + F" + +term " + A + \ B + \ C + \ D + \ E + \ F" + +text \AVOID indentation that misrepresents the parse tree and confuses the reader:\ + +term " + A + \ B + \ C" \ \NOT OK: implies this parses as @{text "A \ (B \ C)"}\ + +term " + A \ + B \ + B \ + A" \ \NOT OK: implies this parses as @{text "((A \ B) \ B) \ A"}\ + section \Other\ text \ @@ -298,6 +430,64 @@ text \ approach it with a breadth-first approach. Since the default isabelle strategy is depth-first, prefers (or defers) will be needed, e.g. corres proofs.\ +subsection \Using by\ + +text \ + When all subgoals of a proof can be solved in one apply statement, use `by`.\ + +lemma + "True" + by simp + +lemma + "X" + apply (subgoal_tac "True") + prefer 2 + subgoal by blast + apply (subgoal_tac "True") + prefer 2 + subgoal + by blast \ \for tactic invocations that would overflow the line\ + oops + +text \ + When all subgoals of a proof can be solved in two apply statements, use `by` to indicate the + intermediate state is not interesting.\ + +lemma + "True \ True" + by (rule conjI) auto + +lemma + "True \ True" + by (rule conjI) + auto \ \for tactic invocations that would overflow the line\ + +text \ + Avoid using `by` at the end of an apply-style proof with multiple steps. + The exception to this rule are long-running statements (over 2 seconds) that complete the proof. + There, we favour parallelism (proof forking in interactive mode) over visual consistency. + + If you do use `by` starting on a line of its own, it should be indented as if it were an + apply statement. + NOTE: the default Isabelle auto-indenter will not indent `by` according to the number of goals, + which is another reason to avoid mixing it with apply style\ + +lemma + "True \ True \ True \ True \ True \ True \ True" + apply (intro conjI) + apply blast + apply blast + apply auto + done \ \use this style in general: no by\ + +lemma long_running_ending: + "True \ True \ True \ True \ True \ True \ True" + apply (intro conjI) + apply blast + apply blast + by auto \ \only if long-running, and note indentation!\ + subsection \Unfolding definitions\ text \ diff --git a/docs/Style_pre.thy b/docs/Style_pre.thy index 68d6676b43..8e7528f4a0 100644 --- a/docs/Style_pre.thy +++ b/docs/Style_pre.thy @@ -1,4 +1,5 @@ (* + Copyright 2022, Proofcraft Pty Ltd Copyright 2021, Data61, CSIRO (ABN 41 687 119 230) SPDX-License-Identifier: CC-BY-SA-4.0 diff --git a/docs/commit-messages.md b/docs/commit-messages.md new file mode 100644 index 0000000000..1036055007 --- /dev/null +++ b/docs/commit-messages.md @@ -0,0 +1,392 @@ + + +# Commit Messages + +## Objective + +The l4v repository is relatively large, active, and long-lived. It has a public +history of about one decade, and an additional decade of unreleased private +history. It will hopefully live on for another 20 years. + +This means that the commit history is important. Examples of questions the commit +history should be able to answer reasonably quickly and painlessly are: + +- > Is this written in a strange way for good reasons, or were we just in a hurry? + > What was the reason? Does it still apply? + +- > When did we change doing things this way and why? + +- > Has this always been broken or was there a seemingly unrelated change that broke it? + +- > How long did it take to finish this proof? + +- > How much change was necessary to do this proof? + +- > Where did this library lemma come from? + +## General + +The [seL4 repository guidelines][git-conventions] apply to the `l4v` repository, +with the following exceptions and additions: + +- header can be max 78 characters +- body is wrapped at 78 characters +- we use tags in the header to indicate which part of the repository + the commit applies to + +We are using 78 for the header because of the tags, which take up some space. If +you can manage to stay within 50 characters anyway, that is appreciated, but it's +not always practical. Using a body wrap of 72 is also allowed, since that is the +default for other seL4 repositories. + +We use tags, because the repository is relatively large and most commits only +apply to small parts of it. The tags make it easy to identify relevance of a +commit at a glance while scanning through a large number of commits. + +The general guidelines prescribe capitalising the commit header. We do not +capitalise the tag or after the tag, but we do capitalise the (rare) cases where +there is no tag in the commit header. + +## Header and Body + +There is good general advice on [commit message writing][commit-messages] +available on the internet and it is as relevant to proofs as it is to code. +You should read it, it's not long and it's good advice. + +Repeating some high-level points: + +- Use imperative voice (e.g. `proof: rename lemma foo to bar`) +- The header should be a short summary, focusing on "what" +- The body should explain what is going on in more detail (also in imperative + voice), but much more importantly *why* it is going on (is `bar` more + consistent than `foo`? Is the name `foo` needed for something else? Does `bar` + just better describe what is going on?). +- You are trying to explain things to your future self or a future colleague + 5-10 years from now. You can assume knowledge of the repository in general, + but you should not assume specific context that is obvious to you right now, + but that will not be known to a different person 5 years from now. + +## Tags + +- We use tags to indicate which part of the repository the commit applies to, + and if it is architecture-specific, then to which architecture it applies to. + +- We do not usually use branch tags, because git branches are ephemeral and we + are using rebase branches for most of our work. The one exception is the `rt` + branch, which has been alive for over half a decade. For this branch, we allow + merge commits (from branch `master` into `rt` only), and we want to be able to + reconstruct a rebase history from that at the end of the branch's life. + + This means, we do use the tag `rt` for commits that only make sense on this + branch. If you could apply the commit to the master branch directly (e.g. you're + adding a lemma to a library), it should not get the tag. Otherwise it should. + +### Tag Examples + +The main tags we use are mostly the directory names of the main proof something +is in, e.g. `refine`, `crefine`, `sys-init`, `camkes`. For some of these, there +are abbreviations, mainly `aspec` for the abstract spec and `ainvs` for the +abstract invariants. + +For large directories that have logical sub parts, we use slashes, e.g. +`lib/monads`. Not so much because the change is in that directory, but because +we want to see that it's a library change and applies to the monad part of the +library. + +If the change applies to many proofs, for instance large lemma renaming commits, +we use tags such as `proof` and `spec`. + +We combine tags with `+` if a change applies to multiple parts, e.g. +`clib+crefine` or `lib+proof`. + +If something is specific to an architecture we preface the tag with the +architecture, e.g. `arm refine:` or `aarch64 aspec+ainvs:`. The current +architecture tags are: `arm`, `arm-hyp`, `x64`, `riscv`, `aarch64`. +Please use these spellings only. + +More tag examples: + +- `trivial`: for small trivial changes like fixing a typo, where no proofs or + specs have changed, i.e.\ that would not need a proof CI run. +- `cleanup:` for cleanups that do not change proof content +- `github:` for GitHub CI changes +- `run_tests`: for changes to the top-level `run_tests` file +- `isabelle20xx`: for easily identifying commits related to Isabelle updates + +For more consistency there is an order between tags. More abstract/general +things should come first, e.g. `lib` < `aspec` < `haskell` < `ainvs` < `refine` +`orphanage` < `crefine`. Or `dspec` < `drefine` and `access` < `infoflow`. Specs +< proofs and `infoflow` < refinement proofs. This is not a total order, it's Ok +to use your own judgement. + +Because `lib` has many subdirectories and separate parts, it's fine to use +session names as tags there to shorten things a bit, e.g. `clib`, `monads`, +`word_lib` instead of `lib/clib`, `lib/monads`, or `lib/word_lib`. This makes +sense when the tags are session names. + +See also the longer example list of [good tags](#good-tags) below. + +## Tips and Tools + +### Looking at history + +The main tools to interact with the git history are browsing it on GitHub and +various forms of `git log`: + +```sh +git log --oneline # show only headings +git log # show commit info with author, date, message +git log --stat # additionally show which files have changed +git log --p # additionally show full diff +``` + +For all of these, you can supply a path to restrict the log to a certain file +or directory in the repo. You can also supply a branch, or a commit range like +`master..rt` to restrict the output. + +Especially `git log --oneline` is useful for quickly getting an overview. Example +output: + +``` +b3c6df48a clib: improve ccorres_While +49ff8457f clib+crefine: improve and consolidate variants of ccorres_to_vcg +8c433c085 clib: add some rules for hoarep +82b954817 clib: improve ccorres_call_getter_setter +8c59df449 lib/monads: remove more uses of _tac methods +563232397 run_tests+proof: exclude SimplExportAndRefine for AARCH64 +1cce5b3ff proof: switch AArch64 quick_and_dirty from Refine to CRefine +402a8342d run_tests: enable CBaseRefine for AARCH64 +32a672412 aarch64 cspec: add Kernel_C.thy to base CKernel image on +b2cd1ce4a aarch64 asmrefine: copy ArchSetup from RISCV64 +67c0109b7 lib/monads: avoid clarsimp as initial Isar method +bd5026438 lib/monads: fix remaining document preparation issues +4d0086567 lib/monads: add new Trace_* files to ROOT +598e19dd6 lib/monads: coherent document structure +4cbfb4ab0 lib/monads: minor style + warning cleanup +b2dd5db6d lib/monads: fix document preparation issues +03a045309 lib/monads: add AFP document setup +d0bab9c79 misc/scripts: remove Darwin cpp wrapper +``` + +You can very quickly see that C verification has been active recently, that +new tests were added, that AARCH64 refinement proofs have been done, and that there was +some work to do with the AFP and the monad library. You can see that nothing +has happened with the system initialiser or other user-level proofs, and that there +are no changes that should affect, for instance, the C parser. + +You only see such things quickly when the messages are consistent and follow the +same kind of pattern. It's not so important what the pattern is. It is important +that it is consistent. + +Note in e.g. `proof: switch AArch64 quick_and_dirty from Refine to CRefine` that +the architecture tag is used only when the change is specific to files for that +architecture. In this commit, the overall ROOTS file is changed, so it shouldn't +get the architecture tag. + +### What tag should I pick? + +If you're uncertain what tag to pick for a certain file or directory, the +easiest way to figure it out is to do + +```sh +git log --oneline +``` + +Do your tags the same way people have done before. This will make the pattern +consistent and should be reasonably easy to read even if it's not perfect. Look +at a few commits, not only a single one, so you can course correct instead of +amplify if someone happened to invent a new flavour. + +You can even do that when you're in the middle of writing a commit message, it's +safe to interrupt `git commit` with `Ctrl-Z`, then `bg` in your shell to put +it into the background, and then `git log --online ` to see the history. + +Any operation that doesn't change the state of the repository is fine (even +those that do are fine, but then the commit will probably fail). + +When you're looking into history for tags, use mainly commits from roughly 2018 +onwards. We weren't very consistent with tags before that. The more recent the +more consistent. + +### Good tags + +Here's a list of tags that have been used in the past and that follow the +guidelines above. + +``` +aarch64 ainvs +aarch64 ainvs+refine +aarch64 asmrefine +aarch64 aspec +aarch64 aspec+ainvs +aarch64 aspec+design +aarch64 aspec+haskell +aarch64 aspec+machine +aarch64 cspec +aarch64 design +aarch64 design+machine +aarch64 haskell +aarch64 haskell+design +aarch64 haskell+machine +aarch64 machine+ainvs +aarch64 proof +aarch64 refine +aarch64 spec+haskell +access+infoflow+drefine +access+infoflow+crefine+drefine +ainvs +ainvs+crefine +ainvs+refine +arm aspec+design +arm access +arm access+infoflow +arm ainvs +arm aspec +arm crefine +arm haskell +arm infoflow +arm refine +arm+arm-hyp crefine +arm+arm-hyp machine +arm-hyp aspec +arm-hyp aspec+design +arm-hyp ainvs +arm-hyp crefine +arm-hyp design +arm-hyp haskell +arm-hyp haskell+refine +arm-hyp machine +arm-hyp refine +asmrefine +aspec +aspec+access +aspec+ainvs +aspec+design+haskell +aspec+haskell +autocorres +bisim +c-parser +c-parser+autocorres +c-parser+crefine +camkes +capDL +ckernel +cleanup +cleanup ainvs +clib +clib+crefine +crefine +crefine+capDL +cspec +design +docs +dpolicy +drefine +dspec +dspec+drefine+infoflow +github +haskell +haskell+design +haskell-translator +infoflow +infoflow+crefine +infoflow+dpolicy+cdl-refine +isabelle-2021 +isabelle-2021 access +isabelle-2021 c-parser +lib +lib+READMEs +lib+aarch64 ainvs +lib+aarch64 refine +lib+ainvs +lib+ainvs+aarch64 ainvs +lib+ainvs+aarch64 refine +lib+ainvs+access+refine +lib+autocorres +lib+c-parser +lib+crefine +lib+proof +lib+proof+autocorres +lib+proof+tools +lib+proof +lib+refine+crefine +lib+spec +lib+spec+proof+autocorres +lib+spec+proof +lib+sysinit +lib+tools +lib/apply_debug +lib/clib +lib/corres_method +lib/crunch +lib/monads +lib/sep_algebra +license +misc +misc/regression +misc/scripts +misc/stats +proof +proof+autocorres +proof/Makefile +proof/ROOT +refine +refine cleanup +refine+crefine +refine+orphanage +riscv +riscv access +riscv ainvs +riscv ainvs+access +riscv aspec +riscv aspec+ainvs +riscv aspec+haskell +riscv crefine +riscv cspec+crefine +riscv design +riscv haskell +riscv haskell+design +riscv haskell+proof +riscv haskell+refine +riscv infoflow +riscv machine +riscv machine+ainvs +riscv machine+design+crefine +riscv orphanage +riscv platform +riscv refine +riscv access+infoflow+refine+crefine +riscv spec +riscv+aarch64 ainvs+refine +riscv+x64 crefine +riscv64+aarch64 ainvs +run_tests +run_tests+proof +spec+proof +style +sys-init +tools +tools/asmrefine +trivial +word_lib +word_lib+crefine +x64 ainvs+refine+crefine +x64 aspec +x64 crefine +x64 design +x64 design/skel +x64 haskell +x64 machine +x64 refine +x64 refine+crefine +``` + +Most of these could be prefixed with `rt` if they only made sense on the `rt` +branch, e.g. `rt arm ainv+refine:` + +[git-conventions]: https://docs.sel4.systems/processes/git-conventions.html +[commit-messages]: https://chris.beams.io/posts/git-commit/ diff --git a/docs/find-theorems.md b/docs/find-theorems.md index 0dcb829bc7..e5df394155 100644 --- a/docs/find-theorems.md +++ b/docs/find-theorems.md @@ -10,7 +10,7 @@ This command is for searching for theorems. If you are looking for a constant/function instead, look at [find_consts](find-consts.md). There is an introduction to the `find_theorems` command in the -[Isabelle/HOL tutorial](http://isabelle.in.tum.de/documentation.html). +[Isabelle/HOL tutorial](https://isabelle.in.tum.de/documentation.html). Here we cover some additional material and useful patterns. `find_theorems` can be written in the theory as a diagnostic command, or diff --git a/docs/setup.md b/docs/setup.md index c0a0a324b4..e7b96707f3 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -1,4 +1,5 @@ + +# Basic Library Theories + +This session contains basic library theories that are needed in other sessions +of this repository, such as [Monads] or [CParser], but that we do not want to +put into these sessions to avoid circular session dependencies. + +Dependencies on `Word_Lib` and the Isabelle distribution (e.g. `HOL-Libary`) are +fine, but avoid introducing any further session dependencies. + +[Monads]: ../Monads/ +[CParser]: ../../tools/c-parser diff --git a/lib/Basics/ROOT b/lib/Basics/ROOT new file mode 100644 index 0000000000..add4bd49bc --- /dev/null +++ b/lib/Basics/ROOT @@ -0,0 +1,12 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +chapter Lib + +session Basics (lib) = Word_Lib + + + theories + CLib \ No newline at end of file diff --git a/lib/Bisim_UL.thy b/lib/Bisim_UL.thy index a4d72b61e1..4eb36999f6 100644 --- a/lib/Bisim_UL.thy +++ b/lib/Bisim_UL.thy @@ -8,9 +8,9 @@ theory Bisim_UL imports - NonDetMonadVCG + Monads.Nondet_VCG Corres_UL - EmptyFailLib + Monads.Nondet_Empty_Fail begin (* This still looks a bit wrong to me, although it is more or less what I want \ we want to be @@ -159,7 +159,7 @@ lemma bisim_split_handle: (* Set up wpc *) lemma wpc_helper_bisim: - "bisim_underlying SR r Q Q' f f' \ wpc_helper (P, P') (Q, {s. Q' s}) (bisim_underlying SR r P (\s. s \ P') f f')" + "bisim_underlying SR r Q Q' f f' \ wpc_helper (P, P', P'') (Q, Q', Q'') (bisim_underlying SR r P P' f f')" apply (clarsimp simp: wpc_helper_def) apply (erule bisim_guard_imp) apply simp @@ -342,7 +342,7 @@ lemmas dets_to_det_on [wp] = det_det_on [OF det_gets] det_det_on [OF return_det] (* Set up wpc *) lemma wpc_helper_det_on: - "det_on Q f \ wpc_helper (P, P') (Q, Q') (det_on P f)" + "det_on Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (det_on P f)" apply (clarsimp simp: wpc_helper_def det_on_def) done @@ -426,7 +426,7 @@ lemma not_empty_gets [wp]: (* Set up wpc *) lemma wpc_helper_not_empty: - "not_empty Q f \ wpc_helper (P, P') (Q, Q') (not_empty P f)" + "not_empty Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (not_empty P f)" apply (clarsimp simp: wpc_helper_def not_empty_def) done diff --git a/lib/CorresK/CorresK_Lemmas.thy b/lib/CorresK/CorresK_Lemmas.thy index 1588afd6b9..df0a11085f 100644 --- a/lib/CorresK/CorresK_Lemmas.thy +++ b/lib/CorresK/CorresK_Lemmas.thy @@ -7,12 +7,12 @@ theory CorresK_Lemmas imports - "Lib.Corres_Method" + "Lib.CorresK_Method" "ExecSpec.Syscall_H" "ASpec.Syscall_A" begin -lemma corres_throwError_str [corres_concrete_rER]: +lemma corres_throwError_str [corresK_concrete_rER]: "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throw b)" "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" by (simp add: corres_underlyingK_def)+ @@ -41,7 +41,7 @@ lemma mapME_x_corresK_inv: show ?case apply (simp add: mapME_x_def sequenceE_x_def) apply (fold mapME_x_def sequenceE_x_def dc_def) - apply (corressimp corresK: x IH wp: y) + apply (corresKsimp corresK: x IH wp: y) done qed done @@ -141,7 +141,7 @@ lemma corresK_mapM_list_all2: lemma corresK_discard_rv: assumes A[corresK]: "corres_underlyingK sr nf nf' F r' P P' a c" shows "corres_underlyingK sr nf nf' F dc P P' (do x \ a; return () od) (do x \ c; return () od)" - by corressimp + by corresKsimp lemma corresK_mapM_mapM_x: assumes "corres_underlyingK sr nf nf' F r' P P' (mapM f as) (mapM f' cs)" @@ -163,12 +163,12 @@ lemma corresK_subst_both: "g' = f' \ g = f \ lemma if_fun_true: "(if A then B else (\_. True)) = (\s. (A \ B s))" by simp -lemmas corresK_whenE [corres_splits] = +lemmas corresK_whenE [corresK_splits] = corresK_if[THEN corresK_subst_both[OF whenE_def[THEN meta_eq_to_obj_eq] whenE_def[THEN meta_eq_to_obj_eq]], OF _ corresK_returnOk[where r="f \ dc" for f], simplified, simplified if_fun_true] -lemmas corresK_head_splits[corres_splits] = +lemmas corresK_head_splits[corresK_splits] = corresK_split[where d="return", simplified] corresK_splitE[where d="returnOk", simplified] corresK_split[where b="return", simplified] @@ -192,7 +192,7 @@ lemmas [corresK] = corresK_Id[where nf'=True and r="(=)", simplified] corresK_Id[where nf'=True, simplified] -lemma corresK_unit_rv_eq_any[corres_concrete_r]: +lemma corresK_unit_rv_eq_any[corresK_concrete_r]: "corres_underlyingK sr nf nf' F r P P' f f' \ corres_underlyingK sr nf nf' F (\(x :: unit) (y :: unit). x = y) P P' f f'" @@ -201,7 +201,7 @@ lemma corresK_unit_rv_eq_any[corres_concrete_r]: apply simp done -lemma corresK_unit_rv_dc_any[corres_concrete_r]: +lemma corresK_unit_rv_dc_any[corresK_concrete_r]: "corres_underlyingK sr nf nf' F r P P' f f' \ corres_underlyingK sr nf nf' F (\(x :: unit) (y :: unit). dc x y) P P' f f'" diff --git a/lib/CorresK_Method.thy b/lib/CorresK_Method.thy new file mode 100644 index 0000000000..30b494b903 --- /dev/null +++ b/lib/CorresK_Method.thy @@ -0,0 +1,1133 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory CorresK_Method +imports Corres_Cases SpecValid_R +begin + +(* Advanced Eisbach example for automating corres proofs via a new corresK calculus that improves + on some of properties that are problematic for automation in the original corres calculus. + + See also section 7.3 in + + Daniel Matichuk: Automation for proof engineering: Machine-checked proofs at scale, + PhD thesis, UNSW 2018. https://trustworthy.systems/publications/papers/Matichuk%3Aphd.abstract +*) + +chapter \CorresK Methods\ + +section \Boilerplate\ + +context begin + +private definition "my_true \ True" + +private lemma my_true: "my_true" by (simp add: my_true_def) + +method no_schematic_concl = (fails \rule my_true\) + +end + +definition + "corres_underlyingK sr nf nf' F r Q Q' f g \ + F \ corres_underlying sr nf nf' r Q Q' f g" + +lemma corresK_name_pre: + "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ + \ corres_underlyingK sr nf nf' F r ((=) s) ((=) s') f g \ + \ corres_underlyingK sr nf nf' F r P P' f g" + apply (clarsimp simp add: corres_underlyingK_def) + apply (rule corres_name_pre) + apply blast + done + +lemma corresK_assume_pre: + "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ + \ corres_underlyingK sr nf nf' F r P P' f g \ + \ corres_underlyingK sr nf nf' F r P P' f g" + apply (clarsimp simp add: corres_underlyingK_def) + apply (rule corres_assume_pre) + apply blast + done + +lemma corresK_drop_any_guard: + "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def) + +lemma corresK_assume_guard: + "(F \ corres_underlying sr nf nf' r Q Q' f g) \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def) + +lemma corresK_unlift: + "corres_underlyingK sr nf nf' F r Q Q' f g \ (F \ corres_underlying sr nf nf' r Q Q' f g)" + by (simp add: corres_underlyingK_def) + +lemma corresK_lift: + "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def) + +lemma corresK_lift_rule: + "corres_underlying sr nf nf' r Q Q' f g \ corres_underlying sra nfa nfa' ra Qa Qa' fa ga + \ corres_underlyingK sr nf nf' F r Q Q' f g \ corres_underlyingK sra nfa nfa' F ra Qa Qa' fa ga" + by (simp add: corres_underlyingK_def) + +lemmas corresK_drop = corresK_drop_any_guard[where F=True] + +context begin + +lemma corresK_start: + assumes x: "corres_underlyingK sr nf nf' F r Q Q' f g" + assumes y: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ F \ Q s \ Q' s'" + shows "corres_underlying sr nf nf' r P P' f g" + using x by (auto simp: y corres_underlying_def corres_underlyingK_def) + +lemma corresK_weaken: + assumes x: "corres_underlyingK sr nf nf' F' r Q Q' f g" + assumes y: "\s s'. \ P s; P' s'; F; (s, s') \ sr \ \ F' \ Q s \ Q' s'" + shows "corres_underlyingK sr nf nf' F r P P' f g" + using x by (auto simp: y corres_underlying_def corres_underlyingK_def) + +private lemma corres_trivial: + "False \ corres_underlying sr nf nf' r P P' f f'" + by simp + +method check_corres = + (succeeds \rule corres_trivial\, fails \rule TrueI\) + +private lemma corresK_trivial: + "False \ corres_underlyingK sr nf nf' F r P P' f f'" + by simp + +(* Ensure we don't apply calculational rules if either function is schematic *) + +private definition "dummy_fun \ undefined" + +private lemma corresK_dummy_left: + "False \ corres_underlyingK sr nf nf' F r P P' dummy_fun f'" + by simp + +private lemma corresK_dummy_right: + "False \ corres_underlyingK sr nf nf' F r P P' f dummy_fun" + by simp + +method check_corresK = + (succeeds \rule corresK_trivial\, fails \rule corresK_dummy_left corresK_dummy_right\) + +private definition "my_false s \ False" + +private + lemma corres_my_falseE: "my_false x \ P" by (simp add: my_false_def) + +private method no_schematic_prems = (fails \erule corres_my_falseE\) + +private lemma hoare_pre: "\my_false\ f \Q\" by (simp add: valid_def my_false_def) +private lemma hoareE_pre: "\my_false\ f \Q\,\Q'\" by (simp add: validE_def valid_def my_false_def) +private lemma hoare_E_E_pre: "\my_false\ f -,\Q\" by (simp add: validE_E_def validE_def valid_def my_false_def) +private lemma hoare_E_R_pre: "\my_false\ f \Q\,-" by (simp add: validE_R_def validE_def valid_def my_false_def) + +private lemmas hoare_pres = hoare_pre hoare_pre hoare_E_E_pre hoare_E_R_pre + +method schematic_hoare_pre = (succeeds \rule hoare_pres\) + +private + lemma corres_my_false: "corres_underlying sr nf nf' r my_false P f f'" + "corres_underlying sr nf nf' r P' my_false f f'" + by (auto simp add: my_false_def[abs_def] corres_underlying_def) + +private + lemma corresK_my_false: "corres_underlyingK sr nf nf' F r my_false P f f'" + "corres_underlyingK sr nf nf' F r P' my_false f f'" + by (auto simp add: corres_my_false corres_underlyingK_def) + + +method corresK_raw_pre = + (check_corres, (fails \rule corres_my_false\, rule corresK_start)?) + +lemma corresK_weaken_states: + "corres_underlyingK sr nf nf' F r Q Q' f g \ + corres_underlyingK sr nf nf' (F \ (\s s'. P s \ P' s' \ (s, s') \ sr \ Q s \ Q' s')) + r P P' f g" + apply (erule corresK_weaken) + apply simp + done + +private lemma + corresK_my_falseF: + "corres_underlyingK sr nf nf' (my_false undefined) r P P' f f'" + by (simp add: corres_underlyingK_def my_false_def) + +method corresK_pre = + (check_corresK, + (fails \rule corresK_my_false\, + ((succeeds \rule corresK_my_falseF\, rule corresK_weaken_states) | + rule corresK_weaken))) + +method corresK_pre' = (corresK_raw_pre | corresK_pre)? + +lemma corresK_weakenK: + "corres_underlyingK sr nf nf' F' r P P' f f' \ (F \ F') \ corres_underlyingK sr nf nf' F r P P' f f'" + by (simp add: corres_underlyingK_def) + +(* Special corres rules which should only be applied when the return value relation is + concrete, to avoid bare schematics. *) + +named_theorems corresK_concrete_r and corresK_concrete_rER + +private lemma corres_r_False: + "False \ corres_underlyingK sr nf nf' F (\_. my_false) P P' f f'" + by simp + +private lemma corres_r_FalseE: + "False \ corres_underlyingK sr nf nf' F ((\_. my_false) \ r) P P' f f'" + by simp + +private lemma corres_r_FalseE': + "False \ corres_underlyingK sr nf nf' F (r \ (\_. my_false)) P P' f f'" + by simp + +method corresK_concrete_r declares corresK_concrete_r corresK_concrete_rER = + (fails \rule corres_r_False corres_r_FalseE corres_r_FalseE'\, determ \rule corresK_concrete_r\) + | (fails \rule corres_r_FalseE\, determ \rule corresK_concrete_rER\) + + +end + + +section \CorresKc - Corres over case statements\ + +text + \Based on wpc, corresKc examines the split rule for top-level case statements on the left + and right hand sides, propagating backwards the stateless and left/right preconditions.\ + +definition + wpc2_helper :: "(('a \ bool) \ 'b set) + \ (('a \ bool) \ 'b set) \ (('a \ bool) \ 'b set) + \ (('a \ bool) \ 'b set) \ bool \ bool" where + "wpc2_helper \ \(P, P') (Q, Q') (PP, PP') (QQ,QQ') R. + ((\s. P s \ Q s) \ P' \ Q') \ ((\s. PP s \ QQ s) \ PP' \ QQ') \ R" + +definition + "wpc2_protect B Q \ (Q :: bool)" + +lemma wpc2_helperI: + "wpc2_helper (P, P') (P, P') (PP, PP') (PP, PP') Q \ Q" + by (simp add: wpc2_helper_def) + +lemma wpc2_conj_process: + "\ wpc2_helper (P, P') (A, A') (PP, PP') (AA, AA') C; wpc2_helper (P, P') (B, B') (PP, PP') (BB, BB') D \ + \ wpc2_helper (P, P') (\s. A s \ B s, A' \ B') (PP, PP') (\s. AA s \ BB s, AA' \ BB') (C \ D)" + by (clarsimp simp add: wpc2_helper_def) + +lemma wpc2_all_process: + "\ \x. wpc2_helper (P, P') (Q x, Q' x) (PP, PP') (QQ x, QQ' x) (R x) \ + \ wpc2_helper (P, P') (\s. \x. Q x s, {s. \x. s \ Q' x}) (PP, PP') (\s. \x. QQ x s, {s. \x. s \ QQ' x}) (\x. R x)" + by (clarsimp simp: wpc2_helper_def subset_iff) + +lemma wpc2_imp_process: + "\ wpc2_protect B Q \ wpc2_helper (P, P') (R, R') (PP, PP') (RR, RR') S \ + \ wpc2_helper (P, P') (\s. Q \ R s, {s. Q \ s \ R'}) (PP, PP') (\s. Q \ RR s, {s. Q \ s \ RR'}) (Q \ S)" + by (clarsimp simp add: wpc2_helper_def subset_iff wpc2_protect_def) + + + +text \ + Generate quadratic blowup of the case statements on either side of refinement. + Attempt to discharge resulting contradictions. +\ + +context +begin + +private method corresKc_body for B :: bool uses helper = + determ \(rule wpc2_helperI, + repeat_new \rule wpc2_conj_process wpc2_all_process wpc2_imp_process[where B=B]\ ; (rule helper))\ + +lemma wpc2_helper_corres_left: + "corres_underlyingK sr nf nf' QQ r Q A f f' \ + wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r P A f f')" + by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) + +private method corresKc_left_raw = + determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' + \ \apply_split f "\f. corres_underlyingK sr nf nf' F r P P' f f'"\, + corresKc_body False helper: wpc2_helper_corres_left)\ + +lemma wpc2_helper_corres_right: + "corres_underlyingK sr nf nf' QQ r A Q f f' \ + wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r A P f f')" + by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) + +private method corresKc_right_raw = + determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' + \ \apply_split f' "\f'. corres_underlyingK sr nf nf' F r P P' f f'"\, + corresKc_body True helper: wpc2_helper_corres_right)\ + +definition + "corres_protect r = (r :: bool)" + +lemma corres_protect_conj_elim[simp]: + "corres_protect (a \ b) = (corres_protect a \ corres_protect b)" + by (simp add: corres_protect_def) + +lemma wpc2_corres_protect: + "wpc2_protect B Q \ corres_protect Q" + by (simp add: wpc2_protect_def corres_protect_def) + +method corresKc_left = (corresKc_left_raw; (drule wpc2_corres_protect[where B=False])) +method corresKc_right = (corresKc_right_raw; (drule wpc2_corres_protect[where B=True])) + +named_theorems corresKc_simp + +declare wpc2_protect_def[corresKc_simp] +declare corres_protect_def[corresKc_simp] + +lemma corresK_false_guard_instantiate: + "False \ corres_underlyingK sr nf nf' True r P P' f f'" + by (simp add: corres_underlyingK_def) + +lemma + wpc_contr_helper: + "wpc2_protect False (A = B) \ wpc2_protect True (A = C) \ B \ C \ P" + by (auto simp: wpc2_protect_def) + +method corresKc declares corresKc_simp = + (check_corresK, corresKc_left_raw; corresKc_right_raw; + ((solves \rule corresK_false_guard_instantiate, + determ \(erule (1) wpc_contr_helper)?\, simp add: corresKc_simp\) + | (drule wpc2_corres_protect[where B=False], drule wpc2_corres_protect[where B=True])))[1] + +end + +section \CorresK_rv\ + +text \CorresK_rv is used to propagate backwards the stateless precondition (F) from corres_underlyingK. + Its main purpose is to defer the decision of where each condition should go: either continue + through the stateless precondition, or be pushed into the left/right side as a hoare triple.\ + + +(*Don't unfold the definition. Use corresK_rv method or associated rules. *) +definition corres_rv :: "bool \ ('a \ 'b \ bool) \ ('s \ bool) \ ('t \ bool) + \ ('s, 'a) nondet_monad \ ('t, 'b) nondet_monad \ + ('a \ 'b \ bool) \ bool" + where + "corres_rv F r P P' f f' Q \ + F \ (\s s'. P s \ P' s' \ + (\sa rv. (rv, sa) \ fst (f s) \ (\sa' rv'. (rv', sa') \ fst (f' s') \ r rv rv' \ Q rv rv')))" + +(*Don't unfold the definition. Use corresK_rv method or associated rules. *) +definition "corres_rvE_R F r P P' f f' Q \ + corres_rv F (\_ _. True) P P' f f' + (\rvE rvE'. case (rvE,rvE') of (Inr rv, Inr rv') \ r rv rv' \ Q rv rv' | _ \ True)" + +lemma corres_rvD: + "corres_rv F r P P' f f' Q \ + F \ P s \ P' s' \ (rv,sa) \ fst (f s) \ + (rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_RD: + "corres_rvE_R F r P P' f f' Q \ + F \ P s \ P' s' \ (Inr rv,sa) \ fst (f s) \ + (Inr rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemma corres_rv_prove: + "(\s s' sa sa' rv rv'. F \ + (rv,sa) \ fst (f s) \ (rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ + corres_rv F r P P' f f' Q" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_R_prove: + "(\s s' sa sa' rv rv'. F \ + (Inr rv,sa) \ fst (f s) \ (Inr rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ + corres_rvE_R F r P P' f f' Q" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemma corres_rv_wp_left: + "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\ \ corres_rv True r P \ f f' Q" + by (fastforce simp add: corres_rv_def valid_def) + +lemma corres_rvE_R_wp_left: + "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r P \ f f' Q" + apply (simp add: corres_rvE_R_def validE_def validE_R_def) + apply (rule corres_rv_wp_left) + apply (erule hoare_strengthen_post) + apply (auto split: sum.splits) + done + +lemma corres_rv_wp_right: + "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\ \ corres_rv True r \ P' f f' Q" + by (fastforce simp add: corres_rv_def valid_def) + +lemma corres_rvE_R_wp_right: + "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r \ P' f f' Q" + apply (simp add: corres_rvE_R_def validE_def validE_R_def) + apply (rule corres_rv_wp_right) + apply (erule hoare_strengthen_post) + apply (auto split: sum.splits) + done + +lemma corres_rv_weaken: + "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rv F r P P' f f' Q \ corres_rv F r P P' f f' Q'" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_R_weaken: + "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rvE_R F r P P' f f' Q \ corres_rvE_R F r P P' f f' Q'" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemma corres_rv_defer_no_args: + "corres_rv (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_R_defer_no_args: + "corres_rvE_R (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +(*UNSAFE*) +lemma corres_rv_defer: + "corres_rv (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" + by (auto simp add: corres_rv_def) + +(*UNSAFE*) +lemma corres_rvE_R_defer: + "corres_rvE_R (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemmas corres_rv_proveT = + corres_rv_prove[where P=\ and P'=\ and F=True, simplified] + +lemmas corres_rvE_R_proveT = + corres_rvE_R_prove[where P=\ and P'=\ and F=True,simplified] + +lemma corres_rv_conj_lift: + "corres_rv F r P PP f g Q \ corres_rv F' r P' PP' f g Q' \ + corres_rv (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" + by (clarsimp simp add: corres_rv_def) + +lemma corres_rvE_R_conj_lift: + "corres_rvE_R F r P PP f g Q \ corres_rvE_R F' r P' PP' f g Q' \ + corres_rvE_R (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +subsection \CorresK_rv method\ + +text \This method propagate corresK_rv obligations into each precondition according to the following +heuristic: + For each conjunct in the obligation: + + 1) Try to solve trivially (to handle schematic conditions) + 2) If it does not depend on function return values, propagate it as a stateless precondition + 3) If either side is a corres_noop (used by symbolic execution), propagate as hoare triple + for other side. + 4) If it can be phrased entirely with variables accessible to the left side, propagate it as + a left hoare triple. + 5) As in 3) but on the right. + + Fail if any of 1-5 are unsuccessful for any conjunct. + +In the case where corres_rv fails, the user will need to intervene, either +by specifying where to defer the obligation or solving the goal in-place. +\ + +definition "corres_noop = return undefined" + +context begin + +private lemma corres_rv_defer_left: + "corres_rv F r (\_. \rv rv'. Q rv rv') P' f f' Q" + by (simp add: corres_rv_def) + +private lemma corres_rvE_R_defer_left: + "corres_rvE_R F r (\_. \rv rv'. Q rv rv') P' f f' Q" + by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +private lemma corres_rv_defer_right: + "corres_rv F r P (\_. \rv rv'. Q rv rv') f f' Q" + by (simp add: corres_rv_def) + +private lemma corres_rvE_R_defer_right: + "corres_rvE_R F r P (\_. \rv rv'. Q rv rv') f f' Q" + by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemmas corres_rv_proves = + corres_rv_proveT corres_rvE_R_proveT + +(* Try to handle cases where corres_rv obligations have been left schematic *) +lemmas corres_rv_trivials = + corres_rv_proves[where Q="\_ _. True", OF TrueI] + corres_rv_proves[where Q="\rv rv'. F rv rv' \ True" for F, # \simp\] + corres_rv_proves[where Q=r and r=r for r, # \simp\] + +lemmas corres_rv_defers = + corres_rv_defer_no_args corres_rvE_R_defer_no_args + +lemmas corres_rv_wp_lefts = + corres_rv_wp_left corres_rvE_R_wp_left + +lemmas corres_rv_wp_rights = + corres_rv_wp_right corres_rvE_R_wp_right + +lemmas corres_rv_noops = + corres_rv_wp_lefts[where f'=corres_noop] corres_rv_wp_rights[where f=corres_noop] + +lemmas corres_rv_lifts' = + corres_rv_conj_lift corres_rvE_R_conj_lift + +lemmas corres_rv_lifts = + corres_rv_lifts' + corres_rv_lifts'[where P="\_. True" and P'="\_. True" and f="corres_noop", simplified] + corres_rv_lifts'[where PP="\_. True" and PP'="\_. True" and g="corres_noop", simplified] + +lemmas corres_rv_prove_simple = + corres_rv_proveT[# \thin_tac _, thin_tac _\, simplified] + +method corresK_rv = + (((repeat_new \rule corres_rv_trivials corres_rv_lifts\)?); + ((rule corres_rv_trivials corres_rv_defers corres_rv_noops | + (succeeds \rule corres_rv_defer_left corres_rvE_R_defer_left\, + rule corres_rv_wp_lefts) | + (succeeds \rule corres_rv_defer_right corres_rvE_R_defer_right\, + rule corres_rv_wp_rights)))) + +end + + +section \CorresK Split rules\ + +text \ + The corresK split allows preconditions to be propagated backward via the extra stateless precondition + (here given as @{term F}. The head function is propagated backward directly, while the tail + is propagated via corres_rv. Using the corresK_rv method, this condition is then decomposed and + pushed into the stateless, left, and right preconditions as appropriate. + + The return value relation is now almost never needed directly, and so it is wrapped in corres_protect + to prevent it from being used during simplification. + \ + +lemma corresK_split: + assumes x: "corres_underlyingK sr nf nf' F r' P P' a c" + assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') r (R rv) (R' rv') (b rv) (d rv')" + assumes c: "corres_rv F'' r' PP PP' a c F'" + assumes z: "\Q\ a \R\" "\Q'\ c \R'\" + shows "corres_underlyingK sr nf nf' (F \ F'') r (PP and P and Q) (PP' and P' and Q') (a >>= (\rv. b rv)) (c >>= (\rv'. d rv'))" + apply (clarsimp simp: corres_underlying_def corres_underlyingK_def bind_def) + apply (rule conjI) + apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) + apply clarsimp + apply (drule(1) bspec,clarsimp) + apply (drule (5) corres_rvD[OF c]) + apply (rule_tac x="(ac,bc)" in bexI,clarsimp) + apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) + apply assumption+ + apply (erule(1) use_valid[OF _ z(1)]) + apply (erule(1) use_valid[OF _ z(2)]) + apply fastforce + apply clarsimp + apply (drule(1) bspec,clarsimp) + apply simp + apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) + apply clarsimp + apply (drule(1) bspec,clarsimp) + apply (drule (5) corres_rvD[OF c]) + apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) + apply simp+ + apply (erule(1) use_valid[OF _ z(1)]) + apply (erule(1) use_valid[OF _ z(2)]) + apply fastforce + apply clarsimp + done + +section \CorresK_inst\ + +text \Handles rare in-place subgoals generated by corres rules which need to be solved immediately + in order to instantiate a schematic. + We peek into the generated return-value relation to see if we can solve the instantiation. +\ + +definition "corres_inst_eq x y \ x = y" + +lemma corres_inst_eqI[wp]: "corres_inst_eq x x" by (simp add: corres_inst_eq_def) + +lemma corres_inst_test: "False \ corres_inst_eq x y" by simp + +method corresK_inst = + (succeeds \rule corres_inst_test\, fails \rule TrueI\, + (rule corres_inst_eqI | + (clarsimp simp: corres_protect_def split del: if_split, rule corres_inst_eqI) + | (clarsimp simp: corres_protect_def split del: if_split, + fastforce intro!: corres_inst_eqI)))[1] + +section \CorresK Method\ + +text \Handles structured decomposition of corres goals\ + +named_theorems + corresK_splits and (* rules that, one applied, must + eventually yield a successful corres or corresK rule application*) + corresK (* calculational rules that are phrased as corresK rules *) + +context begin + +lemma corresK_fold_dc: + "corres_underlyingK sr nf nf' F dc P P' f f' \ corres_underlyingK sr nf nf' F (\_ _. True) P P' f f'" + by (simp add: dc_def[abs_def]) + +private method corresK_fold_dc = + (match conclusion in + "corres_underlyingK _ _ _ _ (\_ _. True) _ _ _ _" \ \rule corresK_fold_dc\) + +section \CorresK_apply method\ + +text \This is a private method that performs an in-place rewrite of corres rules into + corresK rules. This is primarily for backwards-compatibility with the existing corres proofs. + Works by trying to apply a corres rule, then folding the resulting subgoal state into a single + conjunct and atomizing it, then propagating the result into the stateless precondition. +\ + +private definition "guard_collect (F :: bool) \ F" +private definition "maybe_guard F \ True" + +private lemma corresK_assume_guard_guarded: + "(guard_collect F \ corres_underlying sr nf nf' r Q Q' f g) \ + maybe_guard F \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def guard_collect_def) + +private lemma guard_collect: "guard_collect F \ F" + by (simp add: guard_collect_def) + +private lemma has_guard: "maybe_guard F" by (simp add: maybe_guard_def) +private lemma no_guard: "maybe_guard True" by (simp add: maybe_guard_def) + +private method corresK_apply = + (rule corresK_assume_guard_guarded, + (determ \rule corres\, safe_fold_subgoals)[1], + #break "corres_apply", + ((focus_concl \(atomize (full))?\, erule guard_collect, rule has_guard) | rule no_guard))[1] + +private method corresK_alternate = corresK_inst | corresK_rv + +(* Corres_Method and CorresK_Method share the [corres] set. Corres_Method is more resilient against + unsafe terminal rules that set, so we list those [corres] rules here that might be problematic + for corresK. Users shouldn't need to interact with this set, but if you have declared something + [corres] and want it to be used by the corres method only (not corresK), then additionally + declare it [corres_unsafeK]. *) +named_theorems corres_unsafeK +lemmas [corres_unsafeK] = + whenE_throwError_corres + corres_if2 + corres_when + corres_whenE + corres_split_handle + corres_split_catch + corres_mapM_x + +method corresK_once declares corresK_splits corres corresK corresKc_simp = + use corres_unsafeK[corres del] in \use in \ + (no_schematic_concl, + (corresK_alternate | + (corresK_fold_dc?, + (corresK_pre', + #break "corres", + ( (check_corresK, determ \rule corresK\) + | corresK_apply + | corresK_concrete_r + | corresKc + | (rule corresK_splits, corresK_once) + )))))\\ + + +method corresK declares corresK_splits corres corresK corresKc_simp = + (corresK_once+)[1] + +text \Unconditionally try applying split rules. Useful for determining why corres is not applying + in a given proof.\ + +method corresK_unsafe_split declares corresK_splits corres corresK corresKc_simp = + ((rule corresK_splits | corresK_pre' | corresK_once)+)[1] + +end + +lemmas [corresK_splits] = + corresK_split + +lemma corresK_when [corresK_splits]: + "\corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F dc P P' a c\ +\ corres_underlyingK sr nf nf' ((G = G') \ F) dc ((\x. G \ P x)) (\x. G' \ P' x) (when G a) (when G' c)" + apply (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) + apply (cases "G = G'"; cases G; simp) + by (clarsimp simp: return_def) + +lemma corresK_return_trivial: + "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) (return ()) (return ())" + by (simp add: corres_underlyingK_def) + +lemma corresK_return_eq: + "corres_underlyingK sr nf nf' True (=) (\_. True) (\_. True) (return x) (return x)" + by (simp add: corres_underlyingK_def) + +lemma corres_lift_to_K: + "corres_underlying sra nfa nf'a ra Pa P'a fa f'a \ corres_underlying sr nf nf' r P P' f f' \ + corres_underlyingK sra nfa nf'a F ra Pa P'a fa f'a \ corres_underlyingK sr nf nf' F r P P' f f'" + by (simp add: corres_underlyingK_def) + +lemmas [THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, simplified o_def, corresK_splits] = + corres_liftE_rel_sum + corres_liftM_simp + corres_liftM2_simp + + +lemmas [corresK] = + corresK_return_trivial + corresK_return_eq + +lemma corresK_subst_left: "g = f \ + corres_underlyingK sr nf nf' F r P P' f f' \ + corres_underlyingK sr nf nf' F r P P' g f'" by simp + +lemma corresK_subst_right: "g' = f' \ + corres_underlyingK sr nf nf' F r P P' f f' \ + corres_underlyingK sr nf nf' F r P P' f g'" by simp + +lemmas corresK_fun_app_left[corresK_splits] = corresK_subst_left[OF fun_app_def[THEN meta_eq_to_obj_eq]] +lemmas corresK_fun_app_right[corresK_splits] = corresK_subst_right[OF fun_app_def[THEN meta_eq_to_obj_eq]] + +lemmas corresK_Let_left[corresK_splits] = corresK_subst_left[OF Let_def[THEN meta_eq_to_obj_eq]] +lemmas corresK_Let_right[corresK_splits] = corresK_subst_right[OF Let_def[THEN meta_eq_to_obj_eq]] + +lemmas corresK_return_bind_left[corresK_splits] = corresK_subst_left[OF return_bind] +lemmas corresK_return_bind_right[corresK_splits] = corresK_subst_right[OF return_bind] + +lemmas corresK_liftE_bindE_left[corresK_splits] = corresK_subst_left[OF liftE_bindE] +lemmas corresK_liftE_bindE_right[corresK_splits] = corresK_subst_right[OF liftE_bindE] + +lemmas corresK_K_bind_left[corresK_splits] = + corresK_subst_left[where g="K_bind f rv" and f="f" for f rv, # \simp\] + +lemmas corresK_K_bind_right[corresK_splits] = + corresK_subst_right[where g'="K_bind f' rv" and f'="f'" for f' rv, # \simp\] + + +section \CorresK Search - find symbolic execution path that allows a given rule to be applied\ + +lemma corresK_if [corresK_splits]: + "\(corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); + (corres_protect (\G) \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ +\ corres_underlyingK sr nf nf' ((G = G') \ (G \ F) \ (\G \ F')) r (if G then P else Q) (if G' then P' else Q') (if G then a else b) + (if G' then c else d)" + by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) + +lemma corresK_if_rev: + "\(corres_protect (\ G) \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); + (corres_protect G \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ +\ corres_underlyingK sr nf nf' ((\ G = G') \ (\G \ F) \ (G \ F')) r (if G then Q else P) (if G' then P' else Q') (if G then b else a) + (if G' then c else d)" + by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) + + + +named_theorems corresK_symb_exec_ls and corresK_symb_exec_rs + +lemma corresK_symb_exec_l_search[corresK_symb_exec_ls]: + fixes x :: "'b \ 'a \ ('d \ 'a) set \ bool" + notes [simp] = corres_noop_def + shows + "\\s. \PP s\ m \\_. (=) s\; \rv. corres_underlyingK sr nf True (F rv) r (Q rv) P' (x rv) y; + corres_rv F' dc RR (\_. True) m (corres_noop) (\rv _. F rv); + empty_fail m; no_fail P m; \R\ m \Q\\ +\ corres_underlyingK sr nf True F' r (RR and P and R and (\s. \s'. s = s' \ PP s' s)) P' (m >>= x) y" + apply (clarsimp simp add: corres_underlyingK_def) + apply (rule corres_name_pre) + apply (clarsimp simp: corres_underlying_def corres_underlyingK_def + bind_def valid_def empty_fail_def no_fail_def) + apply (drule_tac x=a in meta_spec)+ + apply (drule_tac x=a in spec)+ + apply (drule mp, assumption)+ + apply (clarsimp simp: not_empty_eq) + apply (drule corres_rvD; (assumption | simp add: return_def)?) + apply (drule_tac x="(aa,ba)" in bspec,simp)+ + apply clarsimp + apply (drule_tac x=aa in meta_spec) + apply clarsimp + apply (drule_tac x="(ba,b)" in bspec,simp) + apply clarsimp + apply (drule mp, fastforce) + apply clarsimp + apply (drule_tac x="(a,bb)" in bspec,simp) + apply clarsimp + apply (rule_tac x="(aa,ba)" in bexI) + apply (clarsimp) + apply (rule_tac x="(ab,bc)" in bexI) + apply (clarsimp)+ + done + + +lemmas corresK_symb_exec_liftME_l_search[corresK_symb_exec_ls] = + corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] + +lemma corresK_symb_exec_r_search[corresK_symb_exec_rs]: + fixes y :: "'b \ 'a \ ('e \ 'a) set \ bool" + assumes X: "\s. \PP' s\ m \\r. (=) s\" + assumes corres: "\rv. corres_underlyingK sr nf nf' (F rv) r P (Q' rv) x (y rv)" + assumes Y: "corres_rv F' dc (\_. True) RR (corres_noop) m (\_ rv'. F rv')" + assumes nf: "nf' \ no_fail P' m" + assumes Z: "\R\ m \Q'\" + notes [simp] = corres_noop_def + shows + "corres_underlyingK sr nf nf' F' r P (RR and P' and R and (\s. \s'. s = s' \ PP' s' s)) x (m >>= y)" + apply (insert corres) + apply (simp add: corres_underlyingK_def) + apply (rule impI) + apply (rule corres_name_pre) + apply (clarsimp simp: corres_underlying_def corres_underlyingK_def + bind_def valid_def empty_fail_def no_fail_def) + apply (intro impI conjI ballI) + apply clarsimp + apply (frule(1) use_valid[OF _ X]) + apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) + apply (frule(1) use_valid[OF _ Z]) + apply (drule_tac x=aa in meta_spec) + apply clarsimp + apply (drule_tac x="(a, ba)" in bspec,simp) + apply (clarsimp) + apply (drule(1) bspec) + apply clarsimp + apply clarsimp + apply (frule(1) use_valid[OF _ X]) + apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) + apply (frule(1) use_valid[OF _ Z]) + apply fastforce + apply (rule no_failD[OF nf],simp+) + done + +lemmas corresK_symb_exec_liftME_r_search[corresK_symb_exec_rs] = + corresK_symb_exec_r_search[where 'e="'x + 'y", folded liftE_bindE] + +context begin + +private method corresK_search_wp = solves \((wp | wpc | simp)+)[1]\ + +text \ + Depth-first search via symbolic execution of both left and right hand + sides, handling case statements and + potentially mismatched if statements. The find_goal + method handles searching each branch of case or if statements, while + we rely on backtracking to guess the order of left/right executions. + + According to the above rules, a symbolic execution step can be taken + when the function can be shown to not modify the state. Questions + of wellformedness (i.e. empty_fail or no_fail) are deferred to the user + after the search concludes. +\ + + +private method corresK_search_frame methods m uses search = + (#break "corresK_search", + ((corresK?, corresK_once corres: search corresK:search) + | (corresKc, find_goal \m\)[1] + | (rule corresK_if, find_goal \m\)[1] + | (rule corresK_if_rev, find_goal \m\)[1] + | (rule corresK_symb_exec_ls, corresK_search_wp, m) + | (rule corresK_symb_exec_rs, corresK_search_wp, m))) + +text \ + Set up local context where we make sure we don't know how to + corres our given rule. The search is finished when we can only + make corres progress once we add our rule back in +\ + +method corresK_search uses search + declares corres corresK_symb_exec_ls corresK_symb_exec_rs = + (corresK_pre', + use search[corres del] search[corresK del] search[corresK_splits del] in + \use in \corresK_search_frame \corresK_search search: search\ search: search\\)[1] + +end + +chapter \Misc Helper Lemmas\ + + +lemma corresK_assert[corresK]: + "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) dc \ \ (assert P) (assert Q)" + by (auto simp add: corres_underlyingK_def corres_underlying_def return_def assert_def fail_def) + +lemma corres_stateAssert_implied_frame: + assumes A: "\s s'. (s, s') \ sr \ F' \ P' s \ Q' s' \ A s'" + assumes C: "\x. corres_underlyingK sr nf nf' F r P Q f (g x)" + shows + "corres_underlyingK sr nf nf' (F \ F') r (P and P') (Q and Q') f (stateAssert A [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply (corresK_search search: C[THEN corresK_unlift]) + apply (wp corres_rv_defer | simp add: A)+ + done + +lemma corresK_return [corresK_concrete_r]: + "corres_underlyingK sr nf nf' (r a b) r \ \ (return a) (return b)" + by (simp add: corres_underlyingK_def) + +lemma corres_throwError_str [corresK_concrete_rER]: + "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" + by (simp add: corres_underlyingK_def)+ + +section \Error Monad\ + + + +lemma corresK_splitE [corresK_splits]: + assumes x: "corres_underlyingK sr nf nf' F (f \ r') P P' a c" + assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') (f \ r) (R rv) (R' rv') (b rv) (d rv')" + assumes c: "corres_rvE_R F'' r' PP PP' a c F'" + assumes z: "\Q\ a \R\, -" "\Q'\ c \R'\, -" + shows "corres_underlyingK sr nf nf' (F \ F'') (f \ r) (PP and P and Q) (PP' and P' and Q') (a >>=E (\rv. b rv)) (c >>=E (\rv'. d rv'))" + unfolding bindE_def + apply (rule corresK_weakenK) + apply (rule corresK_split[OF x, where F'="\rv rv'. case (rv,rv') of (Inr rva, Inr rva') \ F' rva rva' | _ \ True"]) + apply (simp add: corres_protect_def) + prefer 2 + apply simp + apply (rule corres_rv_prove[where F=F'']) + apply (case_tac rv; case_tac rv'; simp) + apply (rule corres_rvE_RD[OF c]; assumption) + apply (case_tac rv; case_tac rv'; simp) + apply (simp add: corres_underlyingK_def corres_protect_def) + apply (rule corresK_weaken) + apply (rule y) + apply (simp add: corres_protect_def) + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + apply (rule conjI) + apply (subgoal_tac "(case (Inr b) of (Inr b) \ R b s + | _ \ True)"; assumption?) + apply (subgoal_tac "(case (Inr ba) of (Inr ba) \ R' ba s' + | _ \ True)"; assumption?) + apply clarsimp+ + apply (insert z) + by ((fastforce simp: valid_def validE_def validE_R_def split: sum.splits)+) + +lemma corresK_returnOk [corresK_concrete_r]: + "corres_underlyingK sr nf nf' (r (Inr a) (Inr b)) r \ \ (returnOk a) (returnOk b)" + by (simp add: returnOk_def corres_underlyingK_def) + +lemma corres_assertE_str[corresK]: + "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) (f \ dc) \ \ (assertE P) (assertE Q)" + by (auto simp add: corres_underlying_def corres_underlyingK_def returnOk_def return_def assertE_def fail_def) + +lemmas corres_symb_exec_whenE_l_search[corresK_symb_exec_ls] = + corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] + +lemmas corres_returnOk_liftEs + [folded returnOk_liftE, THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, corresK] = + corres_liftE_rel_sum[where m="return x" for x] + corres_liftE_rel_sum[where m'="return x" for x] + + +(* Failure *) + +lemma corresK_fail[corresK]: + "corres_underlyingK sr nf True False r P P' f fail" + by (simp add: corres_underlyingK_def) + +lemma corresK_fail_no_fail'[corresK]: + "corres_underlyingK sr nf False True (\_ _. False) (\_. True) (\_. True) f fail" + apply (simp add: corres_underlyingK_def) + by (fastforce intro!: corres_fail) + +section \CorresKwp\ + +text + \This method wraps up wp and wpc to ensure that they don't accidentally generate schematic + assumptions when handling hoare triples that emerge from corres proofs. + This is partially due to wp not being smart enough to avoid applying certain wp_comb rules + when the precondition is schematic, and arises when the schematic precondition doesn't have + access to some meta-variables in the postcondition. + + To solve this, instead of meta-implication in the wp_comb rules we use corres_inst_eq, which + can only be solved by reflexivity. In most cases these comb rules are either never applied or + solved trivially. If users manually apply corres_rv rules to create postconditions with + inaccessible meta-variables (@{method corresK_rv} will never do this), then these rules will + be used. Since @{method corresK_inst} has access to the protected return-value relation, it has a chance + to unify the generated precondition with the original schematic one.\ + +named_theorems corresKwp_wp_comb and corresKwp_wp_comb_del + +lemma corres_inst_eq_imp: + "corres_inst_eq A B \ A \ B" by (simp add: corres_inst_eq_def) + +lemmas corres_hoare_pre = hoare_pre[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] + +method corresKwp uses wp = + (determ \ + (fails \schematic_hoare_pre\, (wp add: wp | wpc)) + | (schematic_hoare_pre, + (use corresKwp_wp_comb [wp_comb] + corresKwp_wp_comb_del[wp_comb del] + hoare_pre[wp_pre del] + corres_hoare_pre[wp_pre] + in + \use in \wp add: wp | wpc\\))\) + +lemmas [corresKwp_wp_comb_del] = + hoare_weaken_pre + hoare_weaken_preE + hoare_weaken_preE_R + +lemma corres_inst_conj_lift[corresKwp_wp_comb]: + "\\R\ f \Q\; \P'\ f \Q'\; \s. corres_inst_eq (R s) (P s)\ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + by (rule hoare_vcg_conj_lift; simp add: valid_def corres_inst_eq_def) + +lemmas [corresKwp_wp_comb] = + corresKwp_wp_comb_del[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] + valid_validE_R + hoare_vcg_R_conj[OF valid_validE_R] + hoare_vcg_E_elim[OF valid_validE_E] + hoare_vcg_E_conj[OF valid_validE_E] + validE_validE_R + hoare_vcg_R_conj + hoare_vcg_E_elim + hoare_vcg_E_conj + hoare_vcg_conj_lift + +declare hoare_post_comb_imp_conj[corresKwp_wp_comb_del] + +section \CorresKsimp\ +text \Combines corresK, wp and clarsimp\ + +text +\If clarsimp solves a terminal subgoal, its preconditions are left uninstantiated. We can +try to catch this by first attempting a trivial instantiation before invoking clarsimp, but +only keeping the result if clarsimp solves the goal,\ + +lemmas hoare_True_inst = + hoare_pre[where P="\_. True", of "\_. True", # \-\ \simp\] + asm_rl[of "\\_. True\ f \E\,\R\" for f E R] + +lemmas corres_rv_True_inst = + asm_rl[of "corres_rv True r (\_. True) (\_. True) f f' Q" for r f f' Q] + asm_rl[of "corres_rvE_R True r (\_. True) (\_. True) f f' Q" for r f f' Q] + +lemmas corresK_True_inst = + asm_rl[of "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) f g" for sr nf nf' f g] + asm_rl[of "corres_underlyingK sr nf nf' True r (\_. True) (\_. True) f g" for sr nf nf' r f g] + asm_rl[of "corres_underlying sr nf nf' dc (\_. True) (\_. True) f g" for sr nf nf' f g] + asm_rl[of "corres_underlying sr nf nf' r (\_. True) (\_. True) f g" for sr nf nf' r f g] + +lemmas calculus_True_insts = hoare_True_inst corres_rv_True_inst corresK_True_inst + +method corresKsimp uses simp cong search wp + declares corres corresK corresK_splits corresKc_simp = + ((no_schematic_concl, + (corresK corresKc_simp: simp + | corresKwp wp: wp + | (rule calculus_True_insts, solves \clarsimp cong: cong simp: simp corres_protect_def\) + | clarsimp cong: cong simp: simp simp del: corres_no_simp split del: if_split + | (match search in _ \ \corresK_search search: search\)))+)[1] + +section \Normalize corres rule into corresK rule\ + +lemma corresK_convert: + "A \ corres_underlying sr nf nf' r P Q f f' \ + corres_underlyingK sr nf nf' A r P Q f f'" + by (auto simp add: corres_underlyingK_def) + +method corresK_convert = (((drule uncurry)+)?, drule corresK_convert corresK_drop) + +section \Lifting corres results into wp proofs\ + +lemma use_corresK': + "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \Q\ \ + \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv' s'. \rv. r rv rv' \ ex_abs_underlying sr (Q rv) s'\" + by (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) + +lemma use_corresK [wp]: + "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \\rv s. \rv'. r rv rv' \ Q rv' s\ \ + \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv'. ex_abs_underlying sr (Q rv')\" + apply (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) + done + +lemma hoare_add_post': + "\\P'\ f \Q'\; \P''\ f \\rv s. Q' rv s \ Q rv s\\ \ \P' and P''\ f \Q\" + by (fastforce simp add: valid_def) + +lemma use_corresK_frame: + assumes corres: "corres_underlyingK sr False nf' F r PP P' f f'" + assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" + assumes valid: "\P\ f \Q\" + assumes valid': "\PP'\ f' \Q'\" + shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\" + apply (rule hoare_pre) + apply (rule hoare_add_post'[OF valid']) + apply (rule hoare_strengthen_post) + apply (rule use_corresK'[OF corres valid]) + apply (insert frame)[1] + apply (clarsimp simp: ex_abs_underlying_def) + apply clarsimp + done + +lemma use_corresK_frame_E_R: + assumes corres: "corres_underlyingK sr False nf' F (lf \ r) PP P' f f'" + assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" + assumes valid: "\P\ f \Q\, -" + assumes valid': "\PP'\ f' \Q'\, -" + shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\, -" + apply (simp only: validE_R_def validE_def) + apply (rule use_corresK_frame[OF corres _ valid[simplified validE_R_def validE_def] valid'[simplified validE_R_def validE_def]]) + by (auto simp: frame split: sum.splits) + +lemma K_True: "K True = (\_. True)" by simp +lemma True_And: "((\_. True) and P) = P" by simp + +method use_corresK uses frame = + (corresK_convert?, drule use_corresK_frame use_corresK_frame_E_R, rule frame, + (solves \wp\ | defer_tac), (solves \wp\ | defer_tac), (simp only: True_And K_True)?) + +experiment + fixes sr nf' r P P' f f' F G Q Q' QQ' PP PP' g g' + assumes f_corres[corres]: "G \ F \ corres_underlying sr False True r P P' f f'" and + g_corres[corres]: "corres_underlying sr False True dc \ \ g g'" and + wpl [wp]: "\PP\ f \Q\" and wpr [wp]: "\PP'\ f' \Q'\" + and [wp]: "\P\ g \\_. P\" "\PP\ g \\_. PP\" "\P'\ g' \\_. P'\" "\PP'\ g' \\_. PP'\" and + frameA: "\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s'" + begin + + lemmas f_Q' = f_corres[atomized, @\use_corresK frame: frameA\] + + lemma "G \ F \ corres_underlying sr False True dc (P and PP) (P' and PP') + (g >>= (K (f >>= K (assert True)))) (g' >>= (K (f' >>= (\rv'. (stateAssert (QQ' rv') [])))))" + apply (simp only: stateAssert_def K_def) + apply corresK + apply (corresK_search search: corresK_assert) + apply corresK_rv + apply (corresKwp | simp)+ + apply corresK_rv + apply (corresKwp wp: f_Q' | simp)+ + apply corresKsimp+ + by auto + +end + +section \Corres Argument lifting\ + +text \Used for rewriting corres rules with syntactically equivalent arguments\ + +lemma lift_args_corres: + "corres_underlying sr nf nf' r (P x) (P' x) (f x) (f' x) \ x = x' \ + corres_underlying sr nf nf' r (P x) (P' x') (f x) (f' x')" by simp + +method lift_corres_args = + (match premises in + H[thin]:"corres_underlying _ _ _ _ (P x) (P' x) (f x) (f' x)" (cut 5) for P P' f f' x \ + \match (f) in "\_. g" for g \ \fail\ \ _ \ + \match (f') in "\_. g'" for g' \ \fail\ \ _ \ + \cut_tac lift_args_corres[where f=f and f'=f' and P=P and P'=P', OF H]\\\)+ + +(* Use calculational rules. Don't unfold the definition! *) +lemmas corres_rv_def_I_know_what_I'm_doing = corres_rv_def +lemmas corres_rvE_R_def_I_know_what_I'm_doing = corres_rvE_R_def + +hide_fact corres_rv_def +hide_fact corres_rvE_R_def + +end diff --git a/lib/Corres_Adjust_Preconds.thy b/lib/Corres_Adjust_Preconds.thy index 6183683063..405d14880a 100644 --- a/lib/Corres_Adjust_Preconds.thy +++ b/lib/Corres_Adjust_Preconds.thy @@ -104,8 +104,7 @@ ML \ structure Corres_Adjust_Preconds = struct -val def_intros = @{thms conjI pred_conj_app[THEN iffD2] - bipred_conj_app[THEN fun_cong, THEN iffD2]} +val def_intros = @{thms conjI pred_conjI} (* apply an intro rule, splitting preconds assumptions to provide unique assumptions for each goal. *) diff --git a/lib/Corres_Cases.thy b/lib/Corres_Cases.thy new file mode 100644 index 0000000000..c6fea429ef --- /dev/null +++ b/lib/Corres_Cases.thy @@ -0,0 +1,323 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Corres_Cases +imports Corres_UL +begin + +text \ + This file defines the following main methods for safe data type case distinctions on + corres/corres_underlying predicates. + + \<^item> corres_cases_left: case distinction on abstract monad + \<^item> corres_cases_right: case distinction on concrete monad + \<^item> corres_cases: try corres_cases_left, then corres_cases_right + \<^item> corres_cases_both: simultaneous (quadratic) case distinction on both sides, with safe + elimination of trivially contradictory cases. + + The first 3 methods take no arguments, corres_cases_both takes an optional simp argument to, + for example, unfold relations that synchronise cases between the abstract and concrete side. + + The case distinctions work if the entire monad is a "case" statement, or if the monad is a + @{const bind} or @{const bindE} term with a "case" statement in the head position. + + There is an existing method for case distinctions (@{method wpc}), but this method is not + flexible enough for @{term corres}: consider the goal + @{text "\x. corres r (?G x) ?G' (case x of None \ a | Some y \ b y) m"} -- if we perform + case distinction on @{term x}, then we can transform @{text "?G x"} into + @{text "\x s. (x = None \ ?Q1 x s) \ (\y. x = Some y \ ?Q2 x y s)"}, + but we cannot do the same with @{text "?G'"}, because @{text "?G'"} does not depend on @{text x}. + The best we can do is @{text "?G' = \s. ?A s \ ?B s"}, which so far seems to be good enough + in our manual proofs. + + The @{method wpc} method will try to treat both preconditions uniformly and fail on @{text "?G'"}. + Extending @{method wpc} to deal with guards in a non-uniform way would be possible, but would make + setup for new constants even more messy than it already is. Instead we re-use the general idea + here (in Eisbach instead of ML), and leave the @{method wpc} setup clean for other uses. +\ + +section \Helper functions and definitions\ + +(* The following three definitions are originally by Dan Matichuck from the Eisbach + CorresK_Method example *) + +(* Retrieve a split rule for a target term that is expected to be a case statement. *) +ML \ +fun get_split_rule ctxt target = +let + val (hdTarget, args) = strip_comb (Envir.eta_contract target) + val (constNm, _) = dest_Const hdTarget + val constNm_fds = String.fields (fn c => c = #".") constNm + + val _ = if String.isPrefix "case_" (List.last constNm_fds) then () + else raise TERM ("Not a case statement", [target]) + + val typeNm = (String.concatWith "." o rev o tl o rev) constNm_fds + val split = Proof_Context.get_thm ctxt (typeNm ^ ".split") + val vars = Term.add_vars (Thm.prop_of split) [] + + val datatype_name = List.nth (rev constNm_fds, 1) + + fun T_is_datatype (Type (nm, _)) = (Long_Name.base_name nm = Long_Name.base_name datatype_name) + | T_is_datatype _ = false + + val datatype_var = + case find_first (fn ((_, _), T') => T_is_datatype T') vars of + SOME (ix, _) => ix + | NONE => error ("Couldn't find datatype in thm: " ^ datatype_name) + + val split' = Drule.infer_instantiate ctxt + [(datatype_var, Thm.cterm_of ctxt (List.last args))] split + +in SOME split' end +handle TERM _ => NONE; +\ + +(* The above function as an attribute. The term argument is expected to be a case statement. *) +attribute_setup get_split_rule = \Args.term >> + (fn t => Thm.rule_attribute [] (fn context => fn _ => + case get_split_rule (Context.proof_of context) t of + SOME thm => thm + | NONE => Drule.free_dummy_thm))\ + +(* Apply a split rule to a goal. Example usage: + + apply_split f "\f. corres_underlying sr nf nf' r P P' f f'" + + The first (free) f is expected to be a case statement and is used to extract the split rule. + The second term is expected to take this f as a parameter and provide the term context of the + case statement in the goal so the split rule is applied to the correct occurrence of the case + statement. +*) +method apply_split for f :: 'a and R :: "'a \ bool" = + (match [[get_split_rule f]] in U: "(?x :: bool) = ?y" \ + \match U[THEN iffD2] in U': "\H. ?A \ H (?z :: 'c)" \ + \match (R) in "R' :: 'c \ bool" for R' \ + \rule U'[where H=R']\\\) + +context +begin + +(* This predicate provides an abstraction for guard/precondition terms for transformations + on those guards. + + P and P' are the abstract and concrete preconditions before the transformation + Q and Q' are the abstract and concrete preconditions after the transformation + + R is the predicate to be transformed. +*) +private definition corres_case_helper :: + "(('a \ bool) \ ('b \ bool)) \ (('a \ bool) \ ('b \ bool)) \ bool \ bool" where + "corres_case_helper \ \(P, P') (Q, Q') R. (\s. P s \ Q s) \ (\s. P' s \ Q' s) \ R" + + +(* The following lemmas enable us to lift preconditions of corres_case_helper over conjunction, + universal quantifiers, and implication. Note that there are strong versions for forall/implies + where both guards are treated uniformly, and weak versions, where forall/implies is dropped + in one guard, but not the other. + + The collection of the lemmas below is used to process the term R in corres_case_helper and + create appropriately lifted guard/preconditions during that procedure. The names and general + idea are from the WPC theory. +*) + +private lemma corres_case_helperI: + "corres_case_helper (P, P') (P, P') R \ R" + by (simp add: corres_case_helper_def) + +private lemma corres_case_conj_process: + "\ corres_case_helper (P, P') (A, A') R; corres_case_helper (P, P') (B, B') S \ + \ corres_case_helper (P, P') (\s. A s \ B s, \s. A' s \ B' s) (R \ S)" + by (clarsimp simp add: corres_case_helper_def) + +private lemma corres_case_all_process: + "\ \x. corres_case_helper (P, P') (Q x, Q' x) (R x) \ + \ corres_case_helper (P, P') (\s. \x. Q x s, \s. \x. Q' x s) (\x. R x)" + by (clarsimp simp: corres_case_helper_def subset_iff) + +private lemma corres_case_all_process_weak: + "\ \x. corres_case_helper (P, P') (Q x, Q') (R x) \ + \ corres_case_helper (P, P') (\s. \x. Q x s, Q') (\x. R x)" + by (clarsimp simp: corres_case_helper_def subset_iff) + +private lemma corres_case_imp_process: + "\ S \ corres_case_helper (P, P') (Q, Q') R \ + \ corres_case_helper (P, P') (\s. S \ Q s, \s. S \ Q' s) (S \ R)" + by (clarsimp simp add: corres_case_helper_def subset_iff) + +private lemma corres_case_imp_process_weak: + "\ S \ corres_case_helper (P, P') (Q, Q') R \ + \ corres_case_helper (P, P') (\s. S \ Q s, Q') (S \ R)" + by (clarsimp simp add: corres_case_helper_def subset_iff) + +private lemmas corres_case_process = + corres_case_conj_process corres_case_all_process corres_case_imp_process + +private lemmas corres_case_process_weak = + corres_case_conj_process corres_case_all_process_weak corres_case_imp_process_weak + +(* Turn goals of the form + + (\y. x = SomeConstr y \ corres (?P x) P' (SomeConstr y) g) \ + (\y. x = OtherConstr y \ corres (?P x) P' (OtherConstr y) g) \ + ... + + into multiple goals of the form + + \y. x = SomeConstr y \ corres (?P1 x y) ?P'1 (SomeConstr y) g) + \y. x = OtherConstr y \ corres (?P2 x y) ?P'2 (OtherConstr y) g) + + with instantiations + + ?P x = \s. (\y. x = SomeConstr y \ ?P1 x y s) \ (\y. x = OtherConstr y \ ?P2 x y s) + ?P' = \s. ?P'1 s \ ?P'2 s + + We do this by first transforming the goal into a corres_case_helper goal, and then applying + the corresponding lifting rules. We first try to get both sides (?P and ?P') to have + quantifiers and implications to get a stronger statement, and fall back to the weaker \ for ?P' + shown above when that doesn't work (e.g. because ?P' might not depend on x). + + When all lifting rules have applied, we transform the goal back into a corres goal using the + provided helper rule (e.g. corres_case_helper_corres_left below). +*) +private method corres_cases_body uses helper = + determ \rule corres_case_helperI, repeat_new \rule corres_case_process\; rule helper + | rule corres_case_helperI, repeat_new \rule corres_case_process_weak\; rule helper\ + + +(* Instances of corres_case_helper for left and right side of the corres predicate. + These lemmas bind the corres guards to the corres_case_helper guards. *) +private lemma corres_case_helper_corres_left: + "corres_underlying sr nf nf' r Q Q' f f' \ + corres_case_helper (P, P') (Q, Q') (corres_underlying sr nf nf' r P P' f f')" + by (auto simp: corres_case_helper_def elim!: corres_guard_imp) + +private lemma corres_case_helper_corres_right: + "corres_underlying sr nf nf' r Q' Q f f' \ + corres_case_helper (P, P') (Q, Q') (corres_underlying sr nf nf' r P' P f f')" + by (auto simp: corres_case_helper_def elim!: corres_guard_imp) + + +section \Main method definitions\ + +(* Case distinction on abstract side *) +method corres_cases_left = + determ \ + corres_pre, + (match conclusion in + "corres_underlying sr nf nf' r P P' (f >>= g) f'" for sr nf nf' r P P' f g f' + \ \apply_split f "\f. corres_underlying sr nf nf' r P P' (f >>= g) f'"\ + \ "corres_underlying sr nf nf' r P P' (f >>=E g) f'" for sr nf nf' r P P' f g f' + \ \apply_split f "\f. corres_underlying sr nf nf' r P P' (f >>=E g) f'"\ + \ "corres_underlying sr nf nf' r P P' f f'" for sr nf nf' r P P' f f' + \ \apply_split f "\f. corres_underlying sr nf nf' r P P' f f'"\), + corres_cases_body helper: corres_case_helper_corres_left\ + +(* case distinction on concrete side *) +method corres_cases_right = + determ \ + corres_pre, + (match conclusion in + "corres_underlying sr nf nf' r P P' f (f' >>= g)" for sr nf nf' r P P' f g f' + \ \apply_split f' "\f'. corres_underlying sr nf nf' r P P' f (f' >>= g)"\ + \ "corres_underlying sr nf nf' r P P' f (f' >>=E g)" for sr nf nf' r P P' f g f' + \ \apply_split f' "\f'. corres_underlying sr nf nf' r P P' f (f' >>=E g)"\ + \ "corres_underlying sr nf nf' r P P' f f'" for sr nf nf' r P P' f f' + \ \apply_split f' "\f'. corres_underlying sr nf nf' r P P' f f'"\), + corres_cases_body helper: corres_case_helper_corres_right\ + +(* single case distinction on either left or right, whichever works first *) +method corres_cases = corres_cases_left | corres_cases_right + +(* Case distinction on abstract and concrete side with quadractic blowup, but attempt to solve + contradictory side conditions by simp. Cases that are solved by simp will produce \ as guard + so that no free schematics are introduced into later goals. *) +method corres_cases_both uses simp = + (* corres_pre first, so that the ";" later only refers to corres goals, not the final implications *) + determ \ + corres_pre, + (corres_cases_left; corres_cases_right; + (solves \rule corres_inst[where P=\ and P'=\], simp add: simp\)?)\ + +end + + +section \Examples and tests\ + +experiment +begin + +(* abstract side *) +lemma "corres_underlying srel nf nf' rrel (G x) G' (case x of None \ a | Some y \ b y) m" + (* produces strong (forall, implies) guard conditions in the final implications for both sides *) + apply corres_cases + oops + +schematic_goal + "\x. corres_underlying srel nf nf' rrel (?G x) ?G' (case x of None \ a | Some y \ b y) m" + (* produces weak (just ?A \ ?B) guard conditions for concrete side, because ?G' does not + depend on "x", on which we do the case distinction *) + apply corres_cases + oops + +(* abstract side, with bind *) +lemma "corres_underlying srel nf nf' rrel G G' ((case x of None \ a | Some y \ b y) >>= g) m" + apply corres_cases + oops + +(* abstract side, with bindE *) +lemma "corres_underlying srel nf nf' rrel G G' ((case x of None \ a | Some y \ b y) >>=E g) m" + apply corres_cases + oops + +(* concrete side: *) +lemma "corres_underlying srel nf nf' rrel G G' m (case x of None \ a | Some y \ b y)" + apply corres_cases + oops + +schematic_goal + "\x. corres_underlying srel nf nf' rrel ?G (?G' x) m (case x of None \ a | Some y \ b y)" + apply corres_cases + oops + +lemma "corres_underlying srel nf nf' rrel G G' m ((case x of None \ a | Some y \ b y) >>= g)" + apply corres_cases + oops + +lemma "corres_underlying srel nf nf' rrel G G' m ((case x of None \ a | Some y \ b y) >>=E g)" + apply corres_cases + oops + +(* both sides: *) +lemma "corres_underlying srel nf nf' rrel G G' (case x of None \ a | Some y \ b) + (case x of None \ a' | Some y \ b' y)" + (* two cases remain (both None, both Some); eliminated cases have guard \ in final implication *) + apply corres_cases_both + oops + +schematic_goal + "\x y. corres_underlying srel nf nf' rrel (?G x) (?G' y) (case x of None \ a | Some y \ b) + (case y of None \ a' | Some y \ b' y)" + (* 4 cases remain, because none are contradictory *) + apply corres_cases_both + oops + +(* some example relation between abstract and concrete values *) +definition + "none_rel x y \ (x = None) = (y = None)" + +lemma + "none_rel x y \ + corres_underlying srel nf nf' rrel G G' (case x of None \ a | Some y \ b) + (case y of None \ a' | Some y \ b' y)" + (* two cases remain, none_rel is untouched in the cases that remain, but unfolded in the + ones that were eliminated *) + apply (corres_cases_both simp: none_rel_def) + oops + +end + +end \ No newline at end of file diff --git a/lib/Corres_Method.thy b/lib/Corres_Method.thy index b01d922e37..f6b9b52ff5 100644 --- a/lib/Corres_Method.thy +++ b/lib/Corres_Method.thy @@ -1,1175 +1,223 @@ (* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2023, Proofcraft Pty Ltd * * SPDX-License-Identifier: BSD-2-Clause *) theory Corres_Method -imports Corres_UL SpecValid_R +imports Corres_Cases ExtraCorres begin -(*TODO move this *) - -method_setup repeat_new = - \Method.text_closure >> (fn m => fn ctxt => fn facts => - let - fun tac i st' = - Goal.restrict i 1 st' - |> method_evaluate m ctxt facts - |> Seq.map (Goal.unrestrict i) - - in SIMPLE_METHOD (SUBGOAL (fn (_,i) => REPEAT_ALL_NEW tac i) 1) facts end) -\ - -chapter \Corres Methods\ - -section \Boilerplate\ - -context begin - -private definition "my_true \ True" - -private lemma my_true: "my_true" by (simp add: my_true_def) - -method no_schematic_concl = (fails \rule my_true\) - -end - -definition - "corres_underlyingK sr nf nf' F r Q Q' f g \ - F \ corres_underlying sr nf nf' r Q Q' f g" - -lemma corresK_name_pre: - "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ - \ corres_underlyingK sr nf nf' F r ((=) s) ((=) s') f g \ - \ corres_underlyingK sr nf nf' F r P P' f g" - apply (clarsimp simp add: corres_underlyingK_def) - apply (rule corres_name_pre) - apply blast - done - -lemma corresK_assume_pre: - "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ - \ corres_underlyingK sr nf nf' F r P P' f g \ - \ corres_underlyingK sr nf nf' F r P P' f g" - apply (clarsimp simp add: corres_underlyingK_def) - apply (rule corres_assume_pre) - apply blast - done - -lemma corresK_drop_any_guard: - "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def) - -lemma corresK_assume_guard: - "(F \ corres_underlying sr nf nf' r Q Q' f g) \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def) - -lemma corresK_unlift: - "corres_underlyingK sr nf nf' F r Q Q' f g \ (F \ corres_underlying sr nf nf' r Q Q' f g)" - by (simp add: corres_underlyingK_def) - -lemma corresK_lift: - "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def) - -lemma corresK_lift_rule: - "corres_underlying sr nf nf' r Q Q' f g \ corres_underlying sra nfa nfa' ra Qa Qa' fa ga - \ corres_underlyingK sr nf nf' F r Q Q' f g \ corres_underlyingK sra nfa nfa' F ra Qa Qa' fa ga" - by (simp add: corres_underlyingK_def) - -lemmas corresK_drop = corresK_drop_any_guard[where F=True] - -context begin - -lemma corresK_start: - assumes x: "corres_underlyingK sr nf nf' F r Q Q' f g" - assumes y: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ F \ Q s \ Q' s'" - shows "corres_underlying sr nf nf' r P P' f g" - using x by (auto simp: y corres_underlying_def corres_underlyingK_def) - -lemma corresK_weaken: - assumes x: "corres_underlyingK sr nf nf' F' r Q Q' f g" - assumes y: "\s s'. \ P s; P' s'; F; (s, s') \ sr \ \ F' \ Q s \ Q' s'" - shows "corres_underlyingK sr nf nf' F r P P' f g" - using x by (auto simp: y corres_underlying_def corres_underlyingK_def) - -private lemma corres_trivial: - "False \ corres_underlying sr nf nf' r P P' f f'" - by simp - -method check_corres = - (succeeds \rule corres_trivial\, fails \rule TrueI\) - -private lemma corresK_trivial: - "False \ corres_underlyingK sr nf nf' F r P P' f f'" - by simp - -(* Ensure we don't apply calculational rules if either function is schematic *) - -private definition "dummy_fun \ undefined" - -private lemma corresK_dummy_left: - "False \ corres_underlyingK sr nf nf' F r P P' dummy_fun f'" - by simp - -private lemma corresK_dummy_right: - "False \ corres_underlyingK sr nf nf' F r P P' f dummy_fun" - by simp - -method check_corresK = - (succeeds \rule corresK_trivial\, fails \rule corresK_dummy_left corresK_dummy_right\) - -private definition "my_false s \ False" - -private - lemma corres_my_falseE: "my_false x \ P" by (simp add: my_false_def) - -method no_schematic_prems = (fails \erule corres_my_falseE\) - -private lemma hoare_pre: "\my_false\ f \Q\" by (simp add: valid_def my_false_def) -private lemma hoareE_pre: "\my_false\ f \Q\,\Q'\" by (simp add: validE_def valid_def my_false_def) -private lemma hoare_E_E_pre: "\my_false\ f -,\Q\" by (simp add: validE_E_def validE_def valid_def my_false_def) -private lemma hoare_E_R_pre: "\my_false\ f \Q\,-" by (simp add: validE_R_def validE_def valid_def my_false_def) - -private lemmas hoare_pres = hoare_pre hoare_pre hoare_E_E_pre hoare_E_R_pre - -method schematic_hoare_pre = (succeeds \rule hoare_pres\) - -private - lemma corres_my_false: "corres_underlying sr nf nf' r my_false P f f'" - "corres_underlying sr nf nf' r P' my_false f f'" - by (auto simp add: my_false_def[abs_def] corres_underlying_def) - -private - lemma corresK_my_false: "corres_underlyingK sr nf nf' F r my_false P f f'" - "corres_underlyingK sr nf nf' F r P' my_false f f'" - by (auto simp add: corres_my_false corres_underlyingK_def) - - -method corres_raw_pre = - (check_corres, (fails \rule corres_my_false\, rule corresK_start)?) - -lemma corresK_weaken_states: - "corres_underlyingK sr nf nf' F r Q Q' f g \ - corres_underlyingK sr nf nf' (F \ (\s s'. P s \ P' s' \ (s, s') \ sr \ Q s \ Q' s')) - r P P' f g" - apply (erule corresK_weaken) - apply simp - done - -private lemma - corresK_my_falseF: - "corres_underlyingK sr nf nf' (my_false undefined) r P P' f f'" - by (simp add: corres_underlyingK_def my_false_def) - -method corresK_pre = - (check_corresK, - (fails \rule corresK_my_false\, - ((succeeds \rule corresK_my_falseF\, rule corresK_weaken_states) | - rule corresK_weaken))) - -method corres_pre = (corres_raw_pre | corresK_pre)? - -lemma corresK_weakenK: - "corres_underlyingK sr nf nf' F' r P P' f f' \ (F \ F') \ corres_underlyingK sr nf nf' F r P P' f f'" - by (simp add: corres_underlyingK_def) - -(* Special corres rules which should only be applied when the return value relation is - concrete, to avoid bare schematics. *) - -named_theorems corres_concrete_r and corres_concrete_rER - -private lemma corres_r_False: - "False \ corres_underlyingK sr nf nf' F (\_. my_false) P P' f f'" - by simp - -private lemma corres_r_FalseE: - "False \ corres_underlyingK sr nf nf' F ((\_. my_false) \ r) P P' f f'" - by simp - -private lemma corres_r_FalseE': - "False \ corres_underlyingK sr nf nf' F (r \ (\_. my_false)) P P' f f'" - by simp - -method corres_concrete_r declares corres_concrete_r corres_concrete_rER = - (fails \rule corres_r_False corres_r_FalseE corres_r_FalseE'\, determ \rule corres_concrete_r\) - | (fails \rule corres_r_FalseE\, determ \rule corres_concrete_rER\) - - -end - - -section \Corresc - Corres over case statements\ - -text - \Based on wpc, corresc examines the split rule for top-level case statements on the left - and right hand sides, propagating backwards the stateless and left/right preconditions.\ - -ML \ - -fun get_split_rule ctxt target = -let - val (hdTarget,args) = strip_comb (Envir.eta_contract target) - val (constNm, _) = dest_Const hdTarget - val constNm_fds = (String.fields (fn c => c = #".") constNm) - - val _ = if String.isPrefix "case_" (List.last constNm_fds) then () - else raise TERM ("Not a case statement",[target]) - - val typeNm = (String.concatWith "." o rev o tl o rev) constNm_fds; - val split = Proof_Context.get_thm ctxt (typeNm ^ ".split"); - val vars = Term.add_vars (Thm.prop_of split) [] - - val datatype_name = List.nth (rev constNm_fds,1) - - fun T_is_datatype (Type (nm,_)) = (Long_Name.base_name nm) = (Long_Name.base_name datatype_name) - | T_is_datatype _ = false - - val datatype_var = - case (find_first (fn ((_,_),T') => (T_is_datatype T')) vars) of - SOME (ix,_) => ix - | NONE => error ("Couldn't find datatype in thm: " ^ datatype_name) - - val split' = Drule.infer_instantiate ctxt - [(datatype_var, Thm.cterm_of ctxt (List.last args))] split - -in - SOME split' end - handle TERM _ => NONE; -\ - -attribute_setup get_split_rule = \Args.term >> - (fn t => Thm.rule_attribute [] (fn context => fn _ => - case (get_split_rule (Context.proof_of context) t) of - SOME thm => thm - | NONE => Drule.free_dummy_thm))\ - -method apply_split for f :: 'a and R :: "'a \ bool"= - (match [[get_split_rule f]] in U: "(?x :: bool) = ?y" \ - \match U[THEN iffD2] in U': "\H. ?A \ H (?z :: 'c)" \ - \match (R) in "R' :: 'c \ bool" for R' \ - \rule U'[where H=R']\\\) - -definition - wpc2_helper :: "(('a \ bool) \ 'b set) - \ (('a \ bool) \ 'b set) \ (('a \ bool) \ 'b set) - \ (('a \ bool) \ 'b set) \ bool \ bool" where - "wpc2_helper \ \(P, P') (Q, Q') (PP, PP') (QQ,QQ') R. - ((\s. P s \ Q s) \ P' \ Q') \ ((\s. PP s \ QQ s) \ PP' \ QQ') \ R" - -definition - "wpc2_protect B Q \ (Q :: bool)" - -lemma wpc2_helperI: - "wpc2_helper (P, P') (P, P') (PP, PP') (PP, PP') Q \ Q" - by (simp add: wpc2_helper_def) - -lemma wpc2_conj_process: - "\ wpc2_helper (P, P') (A, A') (PP, PP') (AA, AA') C; wpc2_helper (P, P') (B, B') (PP, PP') (BB, BB') D \ - \ wpc2_helper (P, P') (\s. A s \ B s, A' \ B') (PP, PP') (\s. AA s \ BB s, AA' \ BB') (C \ D)" - by (clarsimp simp add: wpc2_helper_def) - -lemma wpc2_all_process: - "\ \x. wpc2_helper (P, P') (Q x, Q' x) (PP, PP') (QQ x, QQ' x) (R x) \ - \ wpc2_helper (P, P') (\s. \x. Q x s, {s. \x. s \ Q' x}) (PP, PP') (\s. \x. QQ x s, {s. \x. s \ QQ' x}) (\x. R x)" - by (clarsimp simp: wpc2_helper_def subset_iff) - -lemma wpc2_imp_process: - "\ wpc2_protect B Q \ wpc2_helper (P, P') (R, R') (PP, PP') (RR, RR') S \ - \ wpc2_helper (P, P') (\s. Q \ R s, {s. Q \ s \ R'}) (PP, PP') (\s. Q \ RR s, {s. Q \ s \ RR'}) (Q \ S)" - by (clarsimp simp add: wpc2_helper_def subset_iff wpc2_protect_def) - - - -text \ - Generate quadratic blowup of the case statements on either side of refinement. - Attempt to discharge resulting contradictions. -\ - - -method corresc_body for B :: bool uses helper = - determ \(rule wpc2_helperI, - repeat_new \rule wpc2_conj_process wpc2_all_process wpc2_imp_process[where B=B]\ ; (rule helper))\ - -lemma wpc2_helper_corres_left: - "corres_underlyingK sr nf nf' QQ r Q A f f' \ - wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r P A f f')" - by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) - -method corresc_left_raw = - determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' - \ \apply_split f "\f. corres_underlyingK sr nf nf' F r P P' f f'"\, - corresc_body False helper: wpc2_helper_corres_left)\ - -lemma wpc2_helper_corres_right: - "corres_underlyingK sr nf nf' QQ r A Q f f' \ - wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r A P f f')" - by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) - -method corresc_right_raw = - determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' - \ \apply_split f' "\f'. corres_underlyingK sr nf nf' F r P P' f f'"\, - corresc_body True helper: wpc2_helper_corres_right)\ - -definition - "corres_protect r = (r :: bool)" - -lemma corres_protect_conj_elim[simp]: - "corres_protect (a \ b) = (corres_protect a \ corres_protect b)" - by (simp add: corres_protect_def) - -lemma wpc2_corres_protect: - "wpc2_protect B Q \ corres_protect Q" - by (simp add: wpc2_protect_def corres_protect_def) - -method corresc_left = (corresc_left_raw; (drule wpc2_corres_protect[where B=False])) -method corresc_right = (corresc_right_raw; (drule wpc2_corres_protect[where B=True])) - -named_theorems corresc_simp - -declare wpc2_protect_def[corresc_simp] -declare corres_protect_def[corresc_simp] - -lemma corresK_false_guard_instantiate: - "False \ corres_underlyingK sr nf nf' True r P P' f f'" - by (simp add: corres_underlyingK_def) - -lemma - wpc_contr_helper: - "wpc2_protect False (A = B) \ wpc2_protect True (A = C) \ B \ C \ P" - by (auto simp: wpc2_protect_def) - -method corresc declares corresc_simp = - (check_corresK, corresc_left_raw; corresc_right_raw; - ((solves \rule corresK_false_guard_instantiate, - determ \(erule (1) wpc_contr_helper)?\, simp add: corresc_simp\) - | (drule wpc2_corres_protect[where B=False], drule wpc2_corres_protect[where B=True])))[1] - -section \Corres_rv\ - -text \Corres_rv is used to propagate backwards the stateless precondition (F) from corres_underlyingK. - It's main purpose is to defer the decision of where each condition should go: either continue - through the stateless precondition, or be pushed into the left/right side as a hoare triple.\ - - -(*Don't unfold the definition. Use corres_rv method or associated rules. *) -definition corres_rv :: "bool \ ('a \ 'b \ bool) \ ('s \ bool) \ ('t \ bool) - \ ('s, 'a) nondet_monad \ ('t, 'b) nondet_monad \ - ('a \ 'b \ bool) \ bool" - where - "corres_rv F r P P' f f' Q \ - F \ (\s s'. P s \ P' s' \ - (\sa rv. (rv, sa) \ fst (f s) \ (\sa' rv'. (rv', sa') \ fst (f' s') \ r rv rv' \ Q rv rv')))" - -(*Don't unfold the definition. Use corres_rv method or associated rules. *) -definition "corres_rvE_R F r P P' f f' Q \ - corres_rv F (\_ _. True) P P' f f' - (\rvE rvE'. case (rvE,rvE') of (Inr rv, Inr rv') \ r rv rv' \ Q rv rv' | _ \ True)" - -lemma corres_rvD: - "corres_rv F r P P' f f' Q \ - F \ P s \ P' s' \ (rv,sa) \ fst (f s) \ - (rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_RD: - "corres_rvE_R F r P P' f f' Q \ - F \ P s \ P' s' \ (Inr rv,sa) \ fst (f s) \ - (Inr rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemma corres_rv_prove: - "(\s s' sa sa' rv rv'. F \ - (rv,sa) \ fst (f s) \ (rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ - corres_rv F r P P' f f' Q" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_R_prove: - "(\s s' sa sa' rv rv'. F \ - (Inr rv,sa) \ fst (f s) \ (Inr rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ - corres_rvE_R F r P P' f f' Q" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemma corres_rv_wp_left: - "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\ \ corres_rv True r P \ f f' Q" - by (fastforce simp add: corres_rv_def valid_def) - -lemma corres_rvE_R_wp_left: - "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r P \ f f' Q" - apply (simp add: corres_rvE_R_def validE_def validE_R_def) - apply (rule corres_rv_wp_left) - apply (erule hoare_strengthen_post) - apply (auto split: sum.splits) - done - -lemma corres_rv_wp_right: - "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\ \ corres_rv True r \ P' f f' Q" - by (fastforce simp add: corres_rv_def valid_def) - -lemma corres_rvE_R_wp_right: - "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r \ P' f f' Q" - apply (simp add: corres_rvE_R_def validE_def validE_R_def) - apply (rule corres_rv_wp_right) - apply (erule hoare_strengthen_post) - apply (auto split: sum.splits) - done - -lemma corres_rv_weaken: - "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rv F r P P' f f' Q \ corres_rv F r P P' f f' Q'" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_R_weaken: - "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rvE_R F r P P' f f' Q \ corres_rvE_R F r P P' f f' Q'" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemma corres_rv_defer_no_args: - "corres_rv (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_R_defer_no_args: - "corres_rvE_R (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -(*UNSAFE*) -lemma corres_rv_defer: - "corres_rv (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" - by (auto simp add: corres_rv_def) - -(*UNSAFE*) -lemma corres_rvE_R_defer: - "corres_rvE_R (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemmas corres_rv_proveT = - corres_rv_prove[where P=\ and P'=\ and F=True, simplified] - -lemmas corres_rvE_R_proveT = - corres_rvE_R_prove[where P=\ and P'=\ and F=True,simplified] - -lemma corres_rv_conj_lift: - "corres_rv F r P PP f g Q \ corres_rv F' r P' PP' f g Q' \ - corres_rv (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" - by (clarsimp simp add: corres_rv_def) - -lemma corres_rvE_R_conj_lift: - "corres_rvE_R F r P PP f g Q \ corres_rvE_R F' r P' PP' f g Q' \ - corres_rvE_R (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -subsection \Corres_rv method\ - -text \This method propagate corres_rv obligations into each precondition according to the following -heuristic: - For each conjunct in the obligation: - - 1) Try to solve trivially (to handle schematic conditions) - 2) If it does not depend on function return values, propagate it as a stateless precondition - 3) If either side is a corres_noop (used by symbolic execution), propagate as hoare triple - for other side. - 4) If it can be phrased entirely with variables accessible to the left side, propagate it as - a left hoare triple. - 5) As in 3) but on the right. - - Fail if any of 1-5 are unsuccessful for any conjunct. - -In the case where corres_rv fails, the user will need to intervene, either -by specifying where to defer the obligation or solving the goal in-place. -\ - -definition "corres_noop = return undefined" - -context begin - -private lemma corres_rv_defer_left: - "corres_rv F r (\_. \rv rv'. Q rv rv') P' f f' Q" - by (simp add: corres_rv_def) - -private lemma corres_rvE_R_defer_left: - "corres_rvE_R F r (\_. \rv rv'. Q rv rv') P' f f' Q" - by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -private lemma corres_rv_defer_right: - "corres_rv F r P (\_. \rv rv'. Q rv rv') f f' Q" - by (simp add: corres_rv_def) - -private lemma corres_rvE_R_defer_right: - "corres_rvE_R F r P (\_. \rv rv'. Q rv rv') f f' Q" - by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemmas corres_rv_proves = - corres_rv_proveT corres_rvE_R_proveT - -(* Try to handle cases where corres_rv obligations have been left schematic *) -lemmas corres_rv_trivials = - corres_rv_proves[where Q="\_ _. True", OF TrueI] - corres_rv_proves[where Q="\rv rv'. F rv rv' \ True" for F, # \simp\] - corres_rv_proves[where Q=r and r=r for r, # \simp\] - -lemmas corres_rv_defers = - corres_rv_defer_no_args corres_rvE_R_defer_no_args - -lemmas corres_rv_wp_lefts = - corres_rv_wp_left corres_rvE_R_wp_left - -lemmas corres_rv_wp_rights = - corres_rv_wp_right corres_rvE_R_wp_right - -lemmas corres_rv_noops = - corres_rv_wp_lefts[where f'=corres_noop] corres_rv_wp_rights[where f=corres_noop] - -lemmas corres_rv_lifts' = - corres_rv_conj_lift corres_rvE_R_conj_lift - -lemmas corres_rv_lifts = - corres_rv_lifts' - corres_rv_lifts'[where P="\_. True" and P'="\_. True" and f="corres_noop", simplified] - corres_rv_lifts'[where PP="\_. True" and PP'="\_. True" and g="corres_noop", simplified] - -lemmas corres_rv_prove_simple = - corres_rv_proveT[# \thin_tac _, thin_tac _\, simplified] - -method corres_rv = - (((repeat_new \rule corres_rv_trivials corres_rv_lifts\)?); - ((rule corres_rv_trivials corres_rv_defers corres_rv_noops | - (succeeds \rule corres_rv_defer_left corres_rvE_R_defer_left\, - rule corres_rv_wp_lefts) | - (succeeds \rule corres_rv_defer_right corres_rvE_R_defer_right\, - rule corres_rv_wp_rights)))) - -end - - -section \CorresK Split rules\ - -text \ - The corresK split allows preconditions to be propagated backward via the extra stateless precondition - (here given as @{term F}. The head function is propagated backward directly, while the tail - is propagated via corres_rv. Using the corres_rv method, this condition is then decomposed and - pushed into the stateless, left, and right preconditions as appropriate. - - The return value relation is now almost never needed directly, and so it is wrapped in corres_protect - to prevent it from being used during simplification. - \ - -lemma corresK_split: - assumes x: "corres_underlyingK sr nf nf' F r' P P' a c" - assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') r (R rv) (R' rv') (b rv) (d rv')" - assumes c: "corres_rv F'' r' PP PP' a c F'" - assumes z: "\Q\ a \R\" "\Q'\ c \R'\" - shows "corres_underlyingK sr nf nf' (F \ F'') r (PP and P and Q) (PP' and P' and Q') (a >>= (\rv. b rv)) (c >>= (\rv'. d rv'))" - apply (clarsimp simp: corres_underlying_def corres_underlyingK_def bind_def) - apply (rule conjI) - apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) - apply clarsimp - apply (drule(1) bspec,clarsimp) - apply (drule (5) corres_rvD[OF c]) - apply (rule_tac x="(ac,bc)" in bexI,clarsimp) - apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) - apply assumption+ - apply (erule(1) use_valid[OF _ z(1)]) - apply (erule(1) use_valid[OF _ z(2)]) - apply fastforce - apply clarsimp - apply (drule(1) bspec,clarsimp) - apply simp - apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) - apply clarsimp - apply (drule(1) bspec,clarsimp) - apply (drule (5) corres_rvD[OF c]) - apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) - apply simp+ - apply (erule(1) use_valid[OF _ z(1)]) - apply (erule(1) use_valid[OF _ z(2)]) - apply fastforce - apply clarsimp - done - -section \Corres_inst\ - -text \Handles rare in-place subgoals generated by corres rules which need to be solved immediately - in order to instantiate a schematic. - We peek into the generated return-value relation to see if we can solve the instantiation. -\ - -definition "corres_inst_eq x y \ x = y" - -lemma corres_inst_eqI[wp]: "corres_inst_eq x x" by (simp add: corres_inst_eq_def) - -lemma corres_inst_test: "False \ corres_inst_eq x y" by simp - -method corres_inst = - (succeeds \rule corres_inst_test\, fails \rule TrueI\, - (rule corres_inst_eqI | - (clarsimp simp: corres_protect_def split del: if_split, rule corres_inst_eqI) - | (clarsimp simp: corres_protect_def split del: if_split, - fastforce intro!: corres_inst_eqI)))[1] - -section \Corres Method\ - -text \Handles structured decomposition of corres goals\ - -named_theorems - corres_splits and (* rules that, one applied, must - eventually yield a successful corres or corresK rule application*) - corres_simp_del and (* bad simp rules that break everything *) - corres and (* solving terminal corres subgoals *) - corresK (* calculational rules that are phrased as corresK rules *) - -context begin - -lemma corres_fold_dc: - "corres_underlyingK sr nf nf' F dc P P' f f' \ corres_underlyingK sr nf nf' F (\_ _. True) P P' f f'" - by (simp add: dc_def[abs_def]) - -private method corres_fold_dc = - (match conclusion in - "corres_underlyingK _ _ _ _ (\_ _. True) _ _ _ _" \ \rule corres_fold_dc\) - -section \Corres_apply method\ - -text \This is a private method that performs an in-place rewrite of corres rules into - corresK rules. This is primarily for backwards-compatibility with the existing corres proofs. - Works by trying to apply a corres rule, then folding the resulting subgoal state into a single - conjunct and atomizing it, then propagating the result into the stateless precondition. -\ - -private definition "guard_collect (F :: bool) \ F" -private definition "maybe_guard F \ True" - -private lemma corresK_assume_guard_guarded: - "(guard_collect F \ corres_underlying sr nf nf' r Q Q' f g) \ - maybe_guard F \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def guard_collect_def) - -private lemma guard_collect: "guard_collect F \ F" - by (simp add: guard_collect_def) - -private lemma has_guard: "maybe_guard F" by (simp add: maybe_guard_def) -private lemma no_guard: "maybe_guard True" by (simp add: maybe_guard_def) - -private method corres_apply = - (rule corresK_assume_guard_guarded, - (determ \rule corres\, safe_fold_subgoals)[1], - #break "corres_apply", - ((focus_concl \(atomize (full))?\, erule guard_collect, rule has_guard) | rule no_guard))[1] - -private method corres_alternate = corres_inst | corres_rv - - - -method corres_once declares corres_splits corres corresK corresc_simp = - (no_schematic_concl, - (corres_alternate | - (corres_fold_dc?, - (corres_pre, - #break "corres", - ( (check_corresK, determ \rule corresK\) - | corres_apply - | corres_concrete_r - | corresc - | (rule corres_splits, corres_once) - ))))) - - -method corres declares corres_splits corres corresK corresc_simp = - (corres_once+)[1] - -text \Unconditionally try applying split rules. Useful for determining why corres is not applying - in a given proof.\ - -method corres_unsafe_split declares corres_splits corres corresK corresc_simp = - ((rule corres_splits | corres_pre | corres_once)+)[1] - -end +(* A proof method for automating simple steps in corres proofs. + + While the method might solve some corres proofs completely, the purpose is to make simple + things more automatic, remove boilerplate, and to leave a proof state in which the user can make + more progress. The goal is not to provide full automation or deeper search. + + The main idea is to repeatedly try to apply terminal [corres] rules after splitting off the head + of a bind/bindE statement on both sides of a corres goal. The method provides options for less + safe rules such as moving asserts to guards etc when the user knows that this is safe to do in + a particular instance. + + See description at corres' method below for all parameters and options. +*) + +section \Goal predicates\ + +(* Succeed if the conclusion is a corres goal and also not purely schematic *) +method is_corres = succeeds \rule corres_inst\, fails \rule TrueI\ + +lemma no_fail_triv: "no_fail P f \ no_fail P f" . +lemmas hoare_trivs = hoare_triv hoare_trivE hoare_trivE_R hoare_trivR_R no_fail_triv + +(* Succeed if the conclusion is a wp/no_fail goal and also not purely schematic*) +method is_wp = succeeds \rule hoare_trivs\, fails \rule TrueI\ + +lemmas hoare_post_False = hoare_pre_cont[where P="\_. \"] +lemmas hoareE_post_False = hoare_FalseE[where Q="\_. \" and E="\_. \"] + +(* Succeed if the conclusion has a schematic post condition (assuming a wp goal). *) +method is_hoare_schematic_post = + (* If the post condition matches both \ and \, it must be schematic *) + succeeds \wp_pre, rule hoare_post_False hoareE_post_False\, + succeeds \wp_pre, rule wp_post_taut wp_post_tautE\ + +(* Succeed if wpsimp or wp can safely be applied *) +method is_safe_wp = is_wp, fails \is_hoare_schematic_post\ + +section \Main corres method\ + +named_theorems corres_splits +method corres_split declares corres_splits = no_name_eta, rule corres_splits + +(* This method is called on non-corres, non-wp side conditions after a corres rule has been + applied. At that point, there should be no schematic variables in those side condition goals. + Despite that, we are still careful with simp etc here, in case the user does provide a corres + rule that generates a schematic in those side condition goals. *) +method corres_cleanup methods m uses simp simp_del split split_del cong intro = + #break "corres_cleanup", + ( m + | assumption + | rule refl TrueI + | clarsimp simp del: corres_no_simp simp_del simp: simp split: split split del: split_del + cong: cong intro!: intro + (* enables passing in conjI for terminal goals: *) + | (rule intro; + corres_cleanup m simp: simp simp_del: simp_del split: split split_del: split_del + cong: cong intro: intro)) + +(* Apply a single corres rule and attempt to solve non-corres and non-wp side conditions. + We don't expect any wp side conditions, but check anyway for safety. If the rule is declared + as terminal rule, all side conditions must be solved and no corres or wp side conditions are + allowed. If the rule is declared as a regular corres rule, unsolved side conditions as well as + corres and wp side conditions will be left over unchanged. *) +method corres_rule + methods m uses simp simp_del split split_del cong intro declares corres corres_term = + determ \solves \((rule corres_term | corres_rrel_pre, rule corres_term); + solves \corres_cleanup m simp: simp simp_del: simp_del split: split + split_del: split_del cong: cong\)\ + | (rule corres | corres_rrel_pre, rule corres); + ((fails \is_corres\, fails \is_wp\, + solves \corres_cleanup m simp: simp simp_del: simp_del split: split + split_del: split_del cong: cong\)?)\ + +(* For normalisation of corres terms, e.g. liftE *) +named_theorems corres_simp + +(* The main method: + + After preliminaries such as wpfix and corres_pre, repeatedly try to either solve the goal + outright or split off the head from a bind/bindE statement and apply a corres rule (only + split when a corres rule applies). If none of these works, try a corres rule from the "fallback" + argument. (These are for things like moving asserts to a guard, which we only want to do if no + other corres rule applies). + + Attempt to solve side conditions with the corres_cleanup method. The cleanup method uses the + simp and term_simp arguments. + + Attempt simp on the head corres goal without rewriting guards or return relation when + none of these make progress (to process things such as liftM). Does not use the term_simp + argument. + + Attempt clarsimp on the head side condition and final implications. Does not use the term_simp + argument. + + Attempt wpsimp+ when the head goal is a wp goal (usually present when all corres goals have been + solved). Fail if we somehow ended up with a schematic post condition despite all safety measures. +*) +method corres' + methods m + uses simp term_simp simp_del split split_del cong intro wp wp_del fallback + declares corres corres_term corres_splits = + (((* debug breakpoint *) + #break "corres", + (* introduce schematic guards if they don't exist *) + corres_pre0 + (* fix up schematic guards if they contain constructor parameters *) + | wpfix + (* apply a single corres rule if possible *) + | corres_rule m simp: term_simp simp simp_del: simp_del split_del: split_del split: split + cong: cong corres: corres corres_term: corres_term + (* only split if we can apply a single corres rule afterwards *) + | corres_split corres_splits: corres_splits, + corres_rule m simp: simp term_simp simp_del: simp_del split_del: split_del split: split + cong: cong corres: corres corres_term: corres_term + (* apply potentially unsafe fallback rules if any are provided *) + | corres_rule m simp: simp term_simp simp_del: simp_del split_del: split_del split: split + cong: cong corres: fallback + (* simplify head corres goal, e.g. for things like liftM unfolding if the user provides such + a rule as "simp". Not clarsimp, because clarsimp will still perform hypsubst on assumptions + and might through that rewrite guards *) + | is_corres, + simp (no_asm_use) cong: corres_weaker_cong cong split: split split del: if_split split_del + add: simp corres_simp del: corres_no_simp simp_del + (* simplify any remaining side condition head goal (non-corres, non-wp). This is either a side + condition that was not solved by corres_cleanup, or it is one of the two terminal implication + goals. It is very likely that the method will stop after this and not have solved the goal, + but it also very likely that the first thing we want to do for such a goal is clarsimp. That + means, overall we might solve a few more goals, and not be detrimental to interactive proof + either. *) + | fails \is_corres\, fails \is_wp\, + clarsimp cong: cong simp del: simp_del simp: simp split del: if_split split_del split: split + intro!: intro + (* if (and only if) we get to the state where all corres goals and side conditions are solved, + attempt to solve all wp goals that were generated in order. We are not using then_all_new_fwd + here, because we should only start solving wp goals once *all* corres goals are solved -- + otherwise the goal will still have schematic post conditions. Fail if there is a + free schematic postcondition despite all these measures. + *) + | is_safe_wp, + (wpsimp wp: wp wp_del: wp_del simp: simp simp_del: simp_del split: split split_del: split_del + cong: cong)+ + )+)[1] + +(* Instance of the corres' method with default cleanup tactic. We provide "fail" as default to let + the other cleanup tactis run. "succeed" would stop without progress (useful for debugging). *) +method corres + uses simp term_simp simp_del split split_del cong intro wp wp_del fallback + declares corres corres_term corres_splits = + corres' \fail\ simp: simp term_simp: term_simp simp_del: simp_del split: split + split_del: split_del cong: cong intro: intro wp: wp wp_del: wp_del + fallback: fallback + corres: corres corres_term: corres_term corres_splits: corres_splits + + +section \Corres rule setup\ + +(* Avoid using equations in the assumptions. subst_all gets around (no_asm_use) in some cases, + which we don't want. *) +lemmas [corres_no_simp] = subst_all lemmas [corres_splits] = - corresK_split - -lemma corresK_when [corres_splits]: - "\corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F dc P P' a c\ -\ corres_underlyingK sr nf nf' ((G = G') \ F) dc ((\x. G \ P x)) (\x. G' \ P' x) (when G a) (when G' c)" - apply (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) - apply (cases "G = G'"; cases G; simp) - by (clarsimp simp: return_def) - -lemma corresK_return_trivial: - "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) (return ()) (return ())" - by (simp add: corres_underlyingK_def) - -lemma corresK_return_eq: - "corres_underlyingK sr nf nf' True (=) (\_. True) (\_. True) (return x) (return x)" - by (simp add: corres_underlyingK_def) - -lemma corres_lift_to_K: - "corres_underlying sra nfa nf'a ra Pa P'a fa f'a \ corres_underlying sr nf nf' r P P' f f' \ - corres_underlyingK sra nfa nf'a F ra Pa P'a fa f'a \ corres_underlyingK sr nf nf' F r P P' f f'" - by (simp add: corres_underlyingK_def) - -lemmas [THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, simplified o_def, corres_splits] = - corres_liftE_rel_sum - corres_liftM_simp - corres_liftM2_simp - - -lemmas [corresK] = - corresK_return_trivial - corresK_return_eq - -lemma corresK_subst_left: "g = f \ - corres_underlyingK sr nf nf' F r P P' f f' \ - corres_underlyingK sr nf nf' F r P P' g f'" by simp - -lemma corresK_subst_right: "g' = f' \ - corres_underlyingK sr nf nf' F r P P' f f' \ - corres_underlyingK sr nf nf' F r P P' f g'" by simp - -lemmas corresK_fun_app_left[corres_splits] = corresK_subst_left[OF fun_app_def[THEN meta_eq_to_obj_eq]] -lemmas corresK_fun_app_right[corres_splits] = corresK_subst_right[OF fun_app_def[THEN meta_eq_to_obj_eq]] - -lemmas corresK_Let_left[corres_splits] = corresK_subst_left[OF Let_def[THEN meta_eq_to_obj_eq]] -lemmas corresK_Let_right[corres_splits] = corresK_subst_right[OF Let_def[THEN meta_eq_to_obj_eq]] - -lemmas corresK_return_bind_left[corres_splits] = corresK_subst_left[OF return_bind] -lemmas corresK_return_bind_right[corres_splits] = corresK_subst_right[OF return_bind] - -lemmas corresK_liftE_bindE_left[corres_splits] = corresK_subst_left[OF liftE_bindE] -lemmas corresK_liftE_bindE_right[corres_splits] = corresK_subst_right[OF liftE_bindE] - -lemmas corresK_K_bind_left[corres_splits] = - corresK_subst_left[where g="K_bind f rv" and f="f" for f rv, # \simp\] - -lemmas corresK_K_bind_right[corres_splits] = - corresK_subst_right[where g'="K_bind f' rv" and f'="f'" for f' rv, # \simp\] - - -section \Corres Search - find symbolic execution path that allows a given rule to be applied\ - -lemma corresK_if [corres_splits]: - "\(corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); - (corres_protect (\G) \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ -\ corres_underlyingK sr nf nf' ((G = G') \ (G \ F) \ (\G \ F')) r (if G then P else Q) (if G' then P' else Q') (if G then a else b) - (if G' then c else d)" - by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) - -lemma corresK_if_rev: - "\(corres_protect (\ G) \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); - (corres_protect G \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ -\ corres_underlyingK sr nf nf' ((\ G = G') \ (\G \ F) \ (G \ F')) r (if G then Q else P) (if G' then P' else Q') (if G then b else a) - (if G' then c else d)" - by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) - - - -named_theorems corres_symb_exec_ls and corres_symb_exec_rs - -lemma corresK_symb_exec_l_search[corres_symb_exec_ls]: - fixes x :: "'b \ 'a \ ('d \ 'a) set \ bool" - notes [simp] = corres_noop_def - shows - "\\s. \PP s\ m \\_. (=) s\; \rv. corres_underlyingK sr nf True (F rv) r (Q rv) P' (x rv) y; - corres_rv F' dc RR (\_. True) m (corres_noop) (\rv _. F rv); - empty_fail m; no_fail P m; \R\ m \Q\\ -\ corres_underlyingK sr nf True F' r (RR and P and R and (\s. \s'. s = s' \ PP s' s)) P' (m >>= x) y" - apply (clarsimp simp add: corres_underlyingK_def) - apply (rule corres_name_pre) - apply (clarsimp simp: corres_underlying_def corres_underlyingK_def - bind_def valid_def empty_fail_def no_fail_def) - apply (drule_tac x=a in meta_spec)+ - apply (drule_tac x=a in spec)+ - apply (drule mp, assumption)+ - apply (clarsimp simp: not_empty_eq) - apply (drule corres_rvD; (assumption | simp add: return_def)?) - apply (drule_tac x="(aa,ba)" in bspec,simp)+ - apply clarsimp - apply (drule_tac x=aa in meta_spec) - apply clarsimp - apply (drule_tac x="(ba,b)" in bspec,simp) - apply clarsimp - apply (drule mp, fastforce) - apply clarsimp - apply (drule_tac x="(a,bb)" in bspec,simp) - apply clarsimp - apply (rule_tac x="(aa,ba)" in bexI) - apply (clarsimp) - apply (rule_tac x="(ab,bc)" in bexI) - apply (clarsimp)+ - done - - -lemmas corresK_symb_exec_liftME_l_search[corres_symb_exec_ls] = - corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] - -lemma corresK_symb_exec_r_search[corres_symb_exec_rs]: - fixes y :: "'b \ 'a \ ('e \ 'a) set \ bool" - assumes X: "\s. \PP' s\ m \\r. (=) s\" - assumes corres: "\rv. corres_underlyingK sr nf nf' (F rv) r P (Q' rv) x (y rv)" - assumes Y: "corres_rv F' dc (\_. True) RR (corres_noop) m (\_ rv'. F rv')" - assumes nf: "nf' \ no_fail P' m" - assumes Z: "\R\ m \Q'\" - notes [simp] = corres_noop_def - shows - "corres_underlyingK sr nf nf' F' r P (RR and P' and R and (\s. \s'. s = s' \ PP' s' s)) x (m >>= y)" - apply (insert corres) - apply (simp add: corres_underlyingK_def) - apply (rule impI) - apply (rule corres_name_pre) - apply (clarsimp simp: corres_underlying_def corres_underlyingK_def - bind_def valid_def empty_fail_def no_fail_def) - apply (intro impI conjI ballI) - apply clarsimp - apply (frule(1) use_valid[OF _ X]) - apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) - apply (frule(1) use_valid[OF _ Z]) - apply (drule_tac x=aa in meta_spec) - apply clarsimp - apply (drule_tac x="(a, ba)" in bspec,simp) - apply (clarsimp) - apply (drule(1) bspec) - apply clarsimp - apply clarsimp - apply (frule(1) use_valid[OF _ X]) - apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) - apply (frule(1) use_valid[OF _ Z]) - apply fastforce - apply (rule no_failD[OF nf],simp+) - done + corres_split + corres_splitEE -lemmas corresK_symb_exec_liftME_r_search[corres_symb_exec_rs] = - corresK_symb_exec_r_search[where 'e="'x + 'y", folded liftE_bindE] +lemmas corres_split_liftE_bindE [corres_splits] = + corres_splitEE[OF corres_liftE_rel_sum[THEN iffD2], simplified] -context begin +(* corres_term are rules that are safe when all side conditions can be solved immediately -- they + might have guards like \ that are too weak in general, but if the goal can be solved with + that weak guard, the rule is safe. This enables us to solve trivial cases without adding + unsafe rules to the [corres] set. *) +lemmas [corres_term] = + corres_return_eq_same corres_gets_trivial select_corres_eq + corres_underlying_assert_assert -private method corres_search_wp = solves \((wp | wpc | simp)+)[1]\ +lemmas corres_returnOk_eq_same[corres_term] = corres_returnOkTT[of "(=)"] +lemmas corres_throwError_eq_same[corres_term] = corres_throwErrorTT[of "(=)"] -text \ - Depth-first search via symbolic execution of both left and right hand - sides, handling case statements and - potentially mismatched if statements. The find_goal - method handles searching each branch of case or if statements, while - we rely on backtracking to guess the order of left/right executions. - - According to the above rules, a symbolic execution step can be taken - when the function can be shown to not modify the state. Questions - of wellformedness (i.e. empty_fail or no_fail) are deferred to the user - after the search concludes. -\ - - -private method corres_search_frame methods m uses search = - (#break "corres_search", - ((corres?, corres_once corres: search corresK:search) - | (corresc, find_goal \m\)[1] - | (rule corresK_if, find_goal \m\)[1] - | (rule corresK_if_rev, find_goal \m\)[1] - | (rule corres_symb_exec_ls, corres_search_wp, m) - | (rule corres_symb_exec_rs, corres_search_wp, m))) - -text \ - Set up local context where we make sure we don't know how to - corres our given rule. The search is finished when we can only - make corres progress once we add our rule back in -\ - -method corres_search uses search - declares corres corres_symb_exec_ls corres_symb_exec_rs = - (corres_pre, - use search[corres del] search[corresK del] search[corres_splits del] in - \use in \corres_search_frame \corres_search search: search\ search: search\\)[1] - -end - -chapter \Misc Helper Lemmas\ - - -lemma corresK_assert[corresK]: - "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) dc \ \ (assert P) (assert Q)" - by (auto simp add: corres_underlyingK_def corres_underlying_def return_def assert_def fail_def) - -lemma corres_stateAssert_implied_frame: - assumes A: "\s s'. (s, s') \ sr \ F' \ P' s \ Q' s' \ A s'" - assumes C: "\x. corres_underlyingK sr nf nf' F r P Q f (g x)" - shows - "corres_underlyingK sr nf nf' (F \ F') r (P and P') (Q and Q') f (stateAssert A [] >>= g)" - apply (clarsimp simp: bind_assoc stateAssert_def) - apply (corres_search search: C[THEN corresK_unlift]) - apply (wp corres_rv_defer | simp add: A)+ - done - -lemma corresK_return [corres_concrete_r]: - "corres_underlyingK sr nf nf' (r a b) r \ \ (return a) (return b)" - by (simp add: corres_underlyingK_def) - -lemma corres_throwError_str [corres_concrete_rER]: - "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" - by (simp add: corres_underlyingK_def)+ - -section \Error Monad\ - - - -lemma corresK_splitE [corres_splits]: - assumes x: "corres_underlyingK sr nf nf' F (f \ r') P P' a c" - assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') (f \ r) (R rv) (R' rv') (b rv) (d rv')" - assumes c: "corres_rvE_R F'' r' PP PP' a c F'" - assumes z: "\Q\ a \R\, -" "\Q'\ c \R'\, -" - shows "corres_underlyingK sr nf nf' (F \ F'') (f \ r) (PP and P and Q) (PP' and P' and Q') (a >>=E (\rv. b rv)) (c >>=E (\rv'. d rv'))" - unfolding bindE_def - apply (rule corresK_weakenK) - apply (rule corresK_split[OF x, where F'="\rv rv'. case (rv,rv') of (Inr rva, Inr rva') \ F' rva rva' | _ \ True"]) - apply (simp add: corres_protect_def) - prefer 2 - apply simp - apply (rule corres_rv_prove[where F=F'']) - apply (case_tac rv; case_tac rv'; simp) - apply (rule corres_rvE_RD[OF c]; assumption) - apply (case_tac rv; case_tac rv'; simp) - apply (simp add: corres_underlyingK_def corres_protect_def) - apply (rule corresK_weaken) - apply (rule y) - apply (simp add: corres_protect_def) - apply (subst conj_assoc[symmetric]) - apply (rule conjI) - apply (rule conjI) - apply (subgoal_tac "(case (Inr b) of (Inr b) \ R b s - | _ \ True)"; assumption?) - apply (subgoal_tac "(case (Inr ba) of (Inr ba) \ R' ba s' - | _ \ True)"; assumption?) - apply clarsimp+ - apply (insert z) - by ((fastforce simp: valid_def validE_def validE_R_def split: sum.splits)+) - -lemma corresK_returnOk [corres_concrete_r]: - "corres_underlyingK sr nf nf' (r (Inr a) (Inr b)) r \ \ (returnOk a) (returnOk b)" - by (simp add: returnOk_def corres_underlyingK_def) - -lemma corres_assertE_str[corresK]: - "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) (f \ dc) \ \ (assertE P) (assertE Q)" - by (auto simp add: corres_underlying_def corres_underlyingK_def returnOk_def return_def assertE_def fail_def) - -lemmas corres_symb_exec_whenE_l_search[corres_symb_exec_ls] = - corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] - -lemmas corres_returnOk_liftEs - [folded returnOk_liftE, THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, corresK] = - corres_liftE_rel_sum[where m="return x" for x] - corres_liftE_rel_sum[where m'="return x" for x] - - -(* Failure *) - -lemma corresK_fail[corresK]: - "corres_underlyingK sr nf True False r P P' f fail" - by (simp add: corres_underlyingK_def) - -lemma corresK_fail_no_fail'[corresK]: - "corres_underlyingK sr nf False True (\_ _. False) (\_. True) (\_. True) f fail" - apply (simp add: corres_underlyingK_def) - by (fastforce intro!: corres_fail) - -section \Correswp\ - -text - \This method wraps up wp and wpc to ensure that they don't accidentally generate schematic - assumptions when handling hoare triples that emerge from corres proofs. - This is partially due to wp not being smart enough to avoid applying certain wp_comb rules - when the precondition is schematic, and arises when the schematic precondition doesn't have - access to some meta-variables in the postcondition. - - To solve this, instead of meta-implication in the wp_comb rules we use corres_inst_eq, which - can only be solved by reflexivity. In most cases these comb rules are either never applied or - solved trivially. If users manually apply corres_rv rules to create postconditions with - inaccessible meta-variables (@{method corres_rv} will never do this), then these rules will - be used. Since @{method corres_inst} has access to the protected return-value relation, it has a chance - to unify the generated precondition with the original schematic one.\ - -named_theorems correswp_wp_comb and correswp_wp_comb_del - -lemma corres_inst_eq_imp: - "corres_inst_eq A B \ A \ B" by (simp add: corres_inst_eq_def) - -lemmas corres_hoare_pre = hoare_pre[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] - -method correswp uses wp = - (determ \ - (fails \schematic_hoare_pre\, (wp add: wp | wpc)) - | (schematic_hoare_pre, - (use correswp_wp_comb [wp_comb] - correswp_wp_comb_del[wp_comb del] - hoare_pre[wp_pre del] - corres_hoare_pre[wp_pre] - in - \use in \wp add: wp | wpc\\))\) - -lemmas [correswp_wp_comb_del] = - hoare_vcg_precond_imp - hoare_vcg_precond_impE - hoare_vcg_precond_impE_R - -lemma corres_inst_conj_lift[correswp_wp_comb]: - "\\R\ f \Q\; \P'\ f \Q'\; \s. corres_inst_eq (R s) (P s)\ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - by (rule hoare_vcg_conj_lift; simp add: valid_def corres_inst_eq_def) - -lemmas [correswp_wp_comb] = - correswp_wp_comb_del[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] - valid_validE_R - hoare_vcg_R_conj[OF valid_validE_R] - hoare_vcg_E_elim[OF valid_validE_E] - hoare_vcg_E_conj[OF valid_validE_E] - validE_validE_R - hoare_vcg_R_conj - hoare_vcg_E_elim - hoare_vcg_E_conj - hoare_vcg_conj_lift - -declare hoare_post_comb_imp_conj[correswp_wp_comb_del] - -section \Corressimp\ -text \Combines corres, wp and clarsimp\ - -text -\If clarsimp solves a terminal subgoal, its preconditions are left uninstantiated. We can -try to catch this by first attempting a trivial instantiation before invoking clarsimp, but -only keeping the result if clarsimp solves the goal,\ - -lemmas hoare_True_inst = - hoare_pre[where P="\_. True", of "\_. True", # \-\ \simp\] - asm_rl[of "\\_. True\ f \E\,\R\" for f E R] - -lemmas corres_rv_True_inst = - asm_rl[of "corres_rv True r (\_. True) (\_. True) f f' Q" for r f f' Q] - asm_rl[of "corres_rvE_R True r (\_. True) (\_. True) f f' Q" for r f f' Q] - -lemmas corresK_True_inst = - asm_rl[of "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) f g" for sr nf nf' f g] - asm_rl[of "corres_underlyingK sr nf nf' True r (\_. True) (\_. True) f g" for sr nf nf' r f g] - asm_rl[of "corres_underlying sr nf nf' dc (\_. True) (\_. True) f g" for sr nf nf' f g] - asm_rl[of "corres_underlying sr nf nf' r (\_. True) (\_. True) f g" for sr nf nf' r f g] - -lemmas calculus_True_insts = hoare_True_inst corres_rv_True_inst corresK_True_inst - -method corressimp uses simp cong search wp - declares corres corresK corres_splits corresc_simp = - ((no_schematic_concl, - (corres corresc_simp: simp - | correswp wp: wp - | (rule calculus_True_insts, solves \clarsimp cong: cong simp: simp corres_protect_def\) - | clarsimp cong: cong simp: simp simp del: corres_simp_del split del: if_split - | (match search in _ \ \corres_search search: search\)))+)[1] - -declare corres_return[corres_simp_del] - -section \Normalize corres rule into corresK rule\ - -lemma corresK_convert: - "A \ corres_underlying sr nf nf' r P Q f f' \ - corres_underlyingK sr nf nf' A r P Q f f'" - by (auto simp add: corres_underlyingK_def) - -method corresK_convert = (((drule uncurry)+)?, drule corresK_convert corresK_drop) - -section \Lifting corres results into wp proofs\ - -definition - "ex_abs_underlying sr P s' \ \s. (s,s') \ sr \ P s" - -lemma ex_absI[intro!]: - "(s, s') \ sr \ P s \ ex_abs_underlying sr P s'" - by (auto simp add: ex_abs_underlying_def) - -lemma use_corresK': - "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \Q\ \ - \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv' s'. \rv. r rv rv' \ ex_abs_underlying sr (Q rv) s'\" - by (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) - -lemma use_corresK [wp]: - "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \\rv s. \rv'. r rv rv' \ Q rv' s\ \ - \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv'. ex_abs_underlying sr (Q rv')\" - apply (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) - done - -lemma hoare_add_post': - "\\P'\ f \Q'\; \P''\ f \\rv s. Q' rv s \ Q rv s\\ \ \P' and P''\ f \Q\" - by (fastforce simp add: valid_def) - -lemma use_corresK_frame: - assumes corres: "corres_underlyingK sr False nf' F r PP P' f f'" - assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" - assumes valid: "\P\ f \Q\" - assumes valid': "\PP'\ f' \Q'\" - shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\" - apply (rule hoare_pre) - apply (rule hoare_add_post'[OF valid']) - apply (rule hoare_strengthen_post) - apply (rule use_corresK'[OF corres valid]) - apply (insert frame)[1] - apply (clarsimp simp: ex_abs_underlying_def) - apply clarsimp - done - -lemma use_corresK_frame_E_R: - assumes corres: "corres_underlyingK sr False nf' F (lf \ r) PP P' f f'" - assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" - assumes valid: "\P\ f \Q\, -" - assumes valid': "\PP'\ f' \Q'\, -" - shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\, -" - apply (simp only: validE_R_def validE_def) - apply (rule use_corresK_frame[OF corres _ valid[simplified validE_R_def validE_def] valid'[simplified validE_R_def validE_def]]) - by (auto simp: frame split: sum.splits) - -lemma K_True: "K True = (\_. True)" by simp -lemma True_And: "((\_. True) and P) = P" by simp - -method use_corres uses frame = - (corresK_convert?, drule use_corresK_frame use_corresK_frame_E_R, rule frame, - (solves \wp\ | defer_tac), (solves \wp\ | defer_tac), (simp only: True_And K_True)?) - -experiment - fixes sr nf' r P P' f f' F G Q Q' QQ' PP PP' g g' - assumes f_corres[corres]: "G \ F \ corres_underlying sr False True r P P' f f'" and - g_corres[corres]: "corres_underlying sr False True dc \ \ g g'" and - wpl [wp]: "\PP\ f \Q\" and wpr [wp]: "\PP'\ f' \Q'\" - and [wp]: "\P\ g \\_. P\" "\PP\ g \\_. PP\" "\P'\ g' \\_. P'\" "\PP'\ g' \\_. PP'\" and - frameA: "\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s'" - begin - - lemmas f_Q' = f_corres[atomized, @\use_corres frame: frameA\] - - lemma "G \ F \ corres_underlying sr False True dc (P and PP) (P' and PP') - (g >>= (K (f >>= K (assert True)))) (g' >>= (K (f' >>= (\rv'. (stateAssert (QQ' rv') [])))))" - apply (simp only: stateAssert_def K_def) - apply corres - apply (corres_search search: corresK_assert) - apply corres_rv - apply (correswp | simp)+ - apply corres_rv - apply (correswp wp: f_Q' | simp)+ - apply corressimp+ - by auto - -end - -section \Corres Argument lifting\ - -text \Used for rewriting corres rules with syntactically equivalent arguments\ - -lemma lift_args_corres: - "corres_underlying sr nf nf' r (P x) (P' x) (f x) (f' x) \ x = x' \ - corres_underlying sr nf nf' r (P x) (P' x') (f x) (f' x')" by simp - -method lift_corres_args = - (match premises in - H[thin]:"corres_underlying _ _ _ _ (P x) (P' x) (f x) (f' x)" (cut 5) for P P' f f' x \ - \match (f) in "\_. g" for g \ \fail\ \ _ \ - \match (f') in "\_. g'" for g' \ \fail\ \ _ \ - \cut_tac lift_args_corres[where f=f and f'=f' and P=P and P'=P', OF H]\\\)+ - -(* Use calculational rules. Don't unfold the definition! *) -lemmas corres_rv_def_I_know_what_I'm_doing = corres_rv_def -lemmas corres_rvE_R_def_I_know_what_I'm_doing = corres_rvE_R_def - -hide_fact corres_rv_def -hide_fact corres_rvE_R_def +lemma corres_get_trivial[corres_term]: + "corres_underlying sr nf nf' (\s s'. (s,s') \ sr) \ \ get get" + by simp -end +lemmas corres_underlying_stateAssert_stateAssert_trivial[corres_term] = + corres_underlying_stateAssert_stateAssert[where P=\ and P'=\, simplified] + +lemma corres_modify_tivial[corres_term]: + "(\s s'. (s, s') \ sr \ (f s, g s') \ sr) \ + corres_underlying sr nf nf' dc \ \ (modify f) (modify g)" + by (simp add: corres_modify) + +(* Regular corres rules are rules where we expect side conditions to be solvable once the rule + matches, but those side conditions might be too hard for automation, so they must be safe to + leave over for later manual proof. *) +lemmas [corres] = + corres_underlying_fail_fail + corres_fail + corres_assert + whenE_throwError_corres (* match this before corres_whenE *) + corres_whenE + corres_when + + (* not in corres_split, because head is usually not solvable by single rule: *) + corres_split_handle + corres_split_catch + corres_if2 + +(* Transform corres terms when no other rules match: *) +lemmas [corres_simp] = + liftE_bindE + unless_when + unlessE_whenE + +end \ No newline at end of file diff --git a/lib/Corres_UL.thy b/lib/Corres_UL.thy index f36723a898..395a327b04 100644 --- a/lib/Corres_UL.thy +++ b/lib/Corres_UL.thy @@ -7,8 +7,8 @@ theory Corres_UL imports Crunch_Instances_NonDet - WPEx - WPFix + Monads.WPEx + Monads.WPFix HaskellLemmaBucket begin @@ -72,39 +72,39 @@ lemma corres_underlying_serial: apply auto done -(* FIXME: duplicated with HOL.iff_allI *) -lemma All_eqI: - assumes ass: "\x. A x = B x" - shows "(\x. A x) = (\x. B x)" - apply (subst ass) - apply (rule refl) - done - lemma corres_singleton: "corres_underlying sr nf nf' r P P' (\s. ({(R s, S s)},x)) (\s. ({(R' s, S' s)},False)) = (\s s'. P s \ P' s' \ (s, s') \ sr \ (nf \ \ x) \ ((S s, S' s') \ sr \ r (R s) (R' s')))" by (auto simp: corres_underlying_def) -lemma corres_return[simp]: +(* Lemmas that should not be [simp] inside automated corres methods. + Shared between Corres_Method and CorresK_Method. *) +named_theorems corres_no_simp + +(* Safe terminal corres rules that instantiate return relation and guards. + Shared between Corres_Method and CorresK_Method. *) +named_theorems corres + +(* Terminal corres rules that instantiate return relation and guards and that are safe if side + conditions case be solved immediately. Used in Corres_Method. *) +named_theorems corres_term + +lemma corres_return[simp, corres_no_simp]: "corres_underlying sr nf nf' r P P' (return a) (return b) = ((\s s'. P s \ P' s' \ (s, s') \ sr) \ r a b)" by (simp add: return_def corres_singleton) -lemma corres_get[simp]: - "corres_underlying sr nf nf' r P P' get get = - (\ s s'. (s, s') \ sr \ P s \ P' s' \ r s s')" - apply (simp add: get_def corres_singleton) - apply (rule All_eqI)+ - apply safe - done +lemma corres_get[simp, corres_no_simp]: + "corres_underlying sr nf nf' r P P' get get = (\ s s'. (s, s') \ sr \ P s \ P' s' \ r s s')" + by (fastforce simp: get_def corres_singleton) -lemma corres_gets[simp]: +lemma corres_gets[simp, corres_no_simp]: "corres_underlying sr nf nf' r P P' (gets a) (gets b) = (\ s s'. P s \ P' s' \ (s, s') \ sr \ r (a s) (b s'))" by (simp add: simpler_gets_def corres_singleton) -lemma corres_throwError[simp]: +lemma corres_throwError[simp, corres_no_simp]: "corres_underlying sr nf nf' r P P' (throwError a) (throwError b) = ((\s s'. P s \ P' s' \ (s, s') \ sr) \ r (Inl a) (Inl b))" by (simp add: throwError_def) @@ -210,7 +210,13 @@ end text \The guard weakening rule\ -lemma stronger_corres_guard_imp: +named_theorems corres_pre +(* Introduce schematic corres guards; fail if already schematic *) +method corres_pre0 = WP_Pre.pre_tac corres_pre +(* Optionally introduce schematic corres guards *) +method corres_pre = corres_pre0? + +lemma stronger_corres_guard_imp[corres_pre]: assumes x: "corres_underlying sr nf nf' r Q Q' f g" assumes y: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ Q s" assumes z: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ Q' s'" @@ -221,12 +227,28 @@ lemma corres_guard_imp: assumes x: "corres_underlying sr nf nf' r Q Q' f g" assumes y: "\s. P s \ Q s" "\s. P' s \ Q' s" shows "corres_underlying sr nf nf' r P P' f g" - apply (rule stronger_corres_guard_imp) + apply corres_pre apply (rule x) apply (simp add: y)+ done -lemma corres_rel_imp: +lemma corres_guard_imp2: + "\corres_underlying sr nf nf' r Q P' f g; \s. P s \ Q s\ + \ corres_underlying sr nf nf' r P P' f g" + by corres_pre +(* FIXME: names\ (cf. corres_guard2_imp below) *) +lemmas corres_guard1_imp = corres_guard_imp2 + +lemma corres_guard2_imp: + "\corres_underlying sr nf nf' r P Q' f g; \s. P' s \ Q' s\ + \ corres_underlying sr nf nf' r P P' f g" + by corres_pre + +named_theorems corres_rrel_pre +(* Introduce schematic return relation, fail if already schematic *) +method corres_rrel_pre = WP_Pre.pre_tac corres_rrel_pre + +lemma corres_rel_imp[corres_rrel_pre]: assumes x: "corres_underlying sr nf nf' r' P P' f g" assumes y: "\x y. r' x y \ r x y" shows "corres_underlying sr nf nf' r P P' f g" @@ -317,6 +339,18 @@ lemma corres_splitEE: apply (clarsimp simp: lift_def y)+ done +lemma corres_splitEE_prod: + assumes x: "corres_underlying sr nf nf' (f \ r') P P' a c" + assumes y: "\x y x' y'. r' (x, y) (x', y') + \ corres_underlying sr nf nf' (f \ r) (R x y) (R' x' y') (b x y) (d x' y')" + assumes z: "\Q\ a \\(x, y). R x y \,\\\\" "\Q'\ c \\(x, y). R' x y\,\\\\" + shows "corres_underlying sr nf nf' (f \ r) (P and Q) (P' and Q') (a >>=E (\(x, y). b x y)) (c >>=E (\(x, y). d x y))" + using assms + apply (unfold bindE_def validE_def) + apply (rule corres_split[rotated 2], assumption+) + apply (fastforce simp: lift_def y split: sum.splits) + done + lemma corres_split_handle: assumes "corres_underlying sr nf nf' (f' \ r) P P' a c" assumes y: "\ft ft'. f' ft ft' @@ -447,35 +481,45 @@ lemma corres_if3: (if G then a else b) (if G' then c else d)" by simp +lemma corres_if_strong: + "\\s s'. \(s, s') \ sr; R s; R' s'\ \ G = G'; + \G; G'\ \ corres_underlying sr nf nf' r P P' a c; + \\ G; \ G'\ \ corres_underlying sr nf nf' r Q Q' b d \ + \ corres_underlying sr nf nf' r + (R and (if G then P else Q)) (R' and (if G' then P' else Q')) + (if G then a else b) (if G' then c else d)" + by (fastforce simp: corres_underlying_def) + +lemmas corres_if_strong' = + corres_if_strong[where R=R and P=R and Q=R for R, + where R'=R' and P'=R' and Q'=R' for R', simplified] text \Some equivalences about liftM and other useful simps\ -lemma snd_liftM [simp]: - "snd (liftM t f s) = snd (f s)" - by (auto simp: liftM_def bind_def return_def) +(* These rules are declared [simp], which in hindsight was not a good decision, because they + change the return relation which often is schematic when these rules apply in the goal. + In those circumstances it is usually safer to unfold liftM_def and proceed with the resulting + substituted term. + (We leave the [simp] attribute here, because too many proofs now depend on it) +*) lemma corres_liftM_simp[simp]: - "(corres_underlying sr nf nf' r P P' (liftM t f) g) - = (corres_underlying sr nf nf' (r \ t) P P' f g)" - apply (simp add: corres_underlying_def - handy_liftM_lemma Ball_def Bex_def) - apply (rule All_eqI)+ - apply blast - done + "corres_underlying sr nf nf' r P P' (liftM t f) g = + corres_underlying sr nf nf' (r \ t) P P' f g" + by (fastforce simp add: corres_underlying_def in_liftM) lemma corres_liftM2_simp[simp]: - "corres_underlying sr nf nf' r P P' f (liftM t g) = - corres_underlying sr nf nf' (\x. r x \ t) P P' f g" - apply (simp add: corres_underlying_def - handy_liftM_lemma Ball_def) - apply (rule All_eqI)+ - apply blast - done + "corres_underlying sr nf nf' r P P' f (liftM t g) = + corres_underlying sr nf nf' (\x. r x \ t) P P' f g" + by (fastforce simp add: corres_underlying_def in_liftM) lemma corres_liftE_rel_sum[simp]: - "corres_underlying sr nf nf' (f \ r) P P' (liftE m) (liftE m') = corres_underlying sr nf nf' r P P' m m'" + "corres_underlying sr nf nf' (f \ r) P P' (liftE m) (liftE m') = + corres_underlying sr nf nf' r P P' m m'" by (simp add: liftE_liftM o_def) +lemmas corres_liftE_lift = corres_liftE_rel_sum[THEN iffD2] + text \Support for proving correspondence to noop with hoare triples\ lemma corres_noop: @@ -505,7 +549,7 @@ proof - apply (rule P) apply assumption apply (erule(1) nf') - apply (case_tac ra, simp_all) + apply (simp split: sum.splits) done qed @@ -539,24 +583,12 @@ text \Support for dividing correspondence along lemma corres_disj_division: "\ P \ Q; P \ corres_underlying sr nf nf' r R S x y; Q \ corres_underlying sr nf nf' r T U x y \ \ corres_underlying sr nf nf' r (\s. (P \ R s) \ (Q \ T s)) (\s. (P \ S s) \ (Q \ U s)) x y" - apply safe - apply (rule corres_guard_imp) - apply simp - apply simp - apply simp - apply (rule corres_guard_imp) - apply simp - apply simp - apply simp - done + by (safe; corres_pre, simp+) lemma corres_weaker_disj_division: "\ P \ Q; P \ corres_underlying sr nf nf' r R S x y; Q \ corres_underlying sr nf nf' r T U x y \ \ corres_underlying sr nf nf' r (R and T) (S and U) x y" - apply (rule corres_guard_imp) - apply (rule corres_disj_division) - apply simp+ - done + by (corres_pre, rule corres_disj_division, simp+) lemma corres_symmetric_bool_cases: "\ P = P'; \ P; P' \ \ corres_underlying srel nf nf' r Q Q' f g; @@ -575,13 +607,13 @@ lemma corres_symb_exec_l: assumes y: "\P\ m \Q\" assumes nf: "nf' \ no_fail P m" shows "corres_underlying sr nf nf' r P P' (m >>= (\rv. x rv)) y" - apply (rule corres_guard_imp) + apply corres_pre apply (subst gets_bind_ign[symmetric], rule corres_split[OF _ z]) apply (rule corres_noop2) apply (erule x) apply (rule gets_wp) apply (erule nf) - apply (rule non_fail_gets) + apply (rule no_fail_gets) apply (rule y) apply (rule gets_wp) apply simp+ @@ -593,12 +625,12 @@ lemma corres_symb_exec_r: assumes x: "\s. P' s \ \(=) s\ m \\r. (=) s\" assumes nf: "nf' \ no_fail P' m" shows "corres_underlying sr nf nf' r P P' x (m >>= (\rv. y rv))" - apply (rule corres_guard_imp) + apply corres_pre apply (subst gets_bind_ign[symmetric], rule corres_split[OF _ z]) apply (rule corres_noop2) apply (simp add: simpler_gets_def exs_valid_def) apply (erule x) - apply (rule non_fail_gets) + apply (rule no_fail_gets) apply (erule nf) apply (rule gets_wp) apply (rule y) @@ -618,7 +650,7 @@ proof - apply (erule nf) done show ?thesis - apply (rule corres_guard_imp) + apply corres_pre apply (subst return_bind[symmetric], rule corres_split [OF P]) apply (rule z) @@ -683,6 +715,17 @@ lemma corres_trivial: "corres_underlying sr nf nf' r \ \ f g \ corres_underlying sr nf nf' r \ \ f g" by assumption +lemma corres_underlying_trivial[corres]: + "\ nf' \ no_fail P' f \ \ corres_underlying Id nf nf' (=) \ P' f f" + by (auto simp add: corres_underlying_def Id_def no_fail_def) + +(* Instance of corres_underlying_trivial for unit type with dc instead of (=) as return relation, + for nicer return relation instantiation. *) +lemma corres_underlying_trivial_dc[corres]: + "(nf' \ no_fail P' f) \ corres_underlying Id nf nf' dc (\_. True) P' f f" + for f :: "('s, unit) nondet_monad" + by (fastforce intro: corres_underlying_trivial corres_rrel_pre) + lemma corres_assume_pre: assumes R: "\s s'. \ P s; Q s'; (s,s') \ sr \ \ corres_underlying sr nf nf' r P Q f g" shows "corres_underlying sr nf nf' r P Q f g" @@ -692,24 +735,13 @@ lemma corres_assume_pre: apply blast done -lemma corres_guard_imp2: - "\corres_underlying sr nf nf' r Q P' f g; \s. P s \ Q s\ \ corres_underlying sr nf nf' r P P' f g" - by (blast intro: corres_guard_imp) -(* FIXME: names\ (cf. corres_guard2_imp below) *) -lemmas corres_guard1_imp = corres_guard_imp2 - -lemma corres_guard2_imp: - "\corres_underlying sr nf nf' r P Q' f g; \s. P' s \ Q' s\ - \ corres_underlying sr nf nf' r P P' f g" - by (drule (1) corres_guard_imp[where P'=P' and Q=P], assumption+) - lemma corres_initial_splitE: "\ corres_underlying sr nf nf' (f \ r') P P' a c; \rv rv'. r' rv rv' \ corres_underlying sr nf nf' (f \ r) (Q rv) (Q' rv') (b rv) (d rv'); \P\ a \Q\, \\r s. True\; \P'\ c \Q'\, \\r s. True\\ \ corres_underlying sr nf nf' (f \ r) P P' (a >>=E b) (c >>=E d)" - apply (rule corres_guard_imp) + apply corres_pre apply (erule corres_splitEE) apply fastforce+ done @@ -720,11 +752,16 @@ lemma corres_assert_assume: by (auto simp: bind_def assert_def fail_def return_def corres_underlying_def) +lemma corres_assert_assume_l: + "corres_underlying sr nf nf' rrel P Q (f ()) g + \ corres_underlying sr nf nf' rrel (P and (\s. P')) Q (assert P' >>= f) g" + by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) + lemma corres_assert_gen_asm_cross: "\ \s s'. \(s, s') \ sr; P' s; Q' s'\ \ A; A \ corres_underlying sr nf nf' r P Q f (g ()) \ \ corres_underlying sr nf nf' r (P and P') (Q and Q') f (assert A >>= g)" - by (metis corres_assert_assume corres_assume_pre corres_guard_imp pred_andE) + by (metis corres_assert_assume corres_assume_pre corres_weaker_disj_division) lemma corres_state_assert: "corres_underlying sr nf nf' rr P Q f (g ()) \ @@ -756,6 +793,20 @@ lemma corres_stateAssert_implied: apply (wp | rule no_fail_pre)+ done +lemmas corres_stateAssert_ignore = + corres_stateAssert_implied[where P=P and P'=P for P, simplified, rotated] + +lemma corres_stateAssert_r: + "corres_underlying sr nf nf' r P Q f (g ()) \ + corres_underlying sr nf nf' r P (Q and P') f (stateAssert P' [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply (rule corres_symb_exec_r [OF _ get_sp]) + apply (rule corres_assert_assume) + apply (rule corres_assume_pre) + apply (erule corres_guard_imp, clarsimp+) + apply (wp | rule no_fail_pre)+ + done + lemma corres_assert: "corres_underlying sr nf nf' dc (%_. P) (%_. Q) (assert P) (assert Q)" by (clarsimp simp add: corres_underlying_def return_def) @@ -849,6 +900,31 @@ lemma corres_assert_opt_assume: by (auto simp: bind_def assert_opt_def assert_def fail_def return_def corres_underlying_def split: option.splits) +lemma corres_assert_opt[corres]: + "r x x' \ + corres_underlying sr nf nf' (\x x'. r (Some x) x') (\s. x \ None) \ (assert_opt x) (return x')" + unfolding corres_underlying_def + by (clarsimp simp: assert_opt_def return_def split: option.splits) + +lemma assert_opt_assert_corres[corres]: + "(x = None) = (x' = None) \ + corres_underlying sr nf nf' (\y _. x = Some y) (K (x \ None)) \ + (assert_opt x) (assert (\y. x' = Some y))" + by (simp add: corres_underlying_def assert_opt_def return_def split: option.splits) + +lemma corres_assert_opt_l: + assumes "\x. P' = Some x \ corres_underlying sr nf nf' r (P x) Q (f x) g" + shows "corres_underlying sr nf nf' r (\s. \x. P' = Some x \ P x s) Q (assert_opt P' >>= f) g" + using assms + by (auto simp: bind_def assert_opt_def assert_def fail_def return_def corres_underlying_def + split: option.splits) + +lemma corres_gets_the_gets: + "corres_underlying sr False nf' r P P' (gets_the f) f' \ + corres_underlying sr nf nf' (\x x'. x \ None \ r (the x) x') P P' (gets f) f'" + apply (simp add: gets_the_def bind_def simpler_gets_def assert_opt_def) + apply (fastforce simp: corres_underlying_def in_monad split: option.splits) + done text \Support for proving correspondance by decomposing the state relation\ @@ -900,9 +976,15 @@ lemma corres_returnOk: apply wp done -lemmas corres_returnOkTT = corres_trivial [OF corres_returnOk] +lemma corres_returnOkTT: + "r x y \ corres_underlying sr nf nf' (r' \ r) \ \ (returnOk x) (returnOk y)" + by (simp add: corres_returnOk) -lemma corres_False [simp]: +lemma corres_throwErrorTT: + "r x y \ corres_underlying sr nf nf' (r \ r') \ \ (throwError x) (throwError y)" + by simp + +lemma corres_False [simp, corres_no_simp]: "corres_underlying sr nf nf' r P \ f f'" by (simp add: corres_underlying_def) @@ -958,7 +1040,7 @@ next show ?case apply (simp add: mapME_x_def sequenceE_x_def) apply (fold mapME_x_def sequenceE_x_def dc_def) - apply (rule corres_guard_imp) + apply corres_pre apply (rule corres_splitEE) apply (rule x) apply (rule IH) @@ -1094,6 +1176,18 @@ lemma corres_move_asm: lemmas corres_cross_over_guard = corres_move_asm[rotated] +lemma corres_cross_add_guard: + "\\s s'. \(s,s') \ sr; P s; P' s'\ \ Q' s'; + corres_underlying sr nf nf' r P (P' and Q') f g\ + \ corres_underlying sr nf nf' r P P' f g" + by (fastforce simp: corres_underlying_def) + +lemma corres_cross_add_abs_guard: + "\\s s'. \(s,s') \ sr; P s; P' s'\ \ Q s; + corres_underlying sr nf nf' r (P and Q) P' f g\ + \ corres_underlying sr nf nf' r P P' f g" + by (fastforce simp: corres_underlying_def) + lemma corres_either_alternate: "\ corres_underlying sr nf nf' r P Pa' a c; corres_underlying sr nf nf' r P Pb' b c \ \ corres_underlying sr nf nf' r P (Pa' or Pb') (a \ b) c" @@ -1160,7 +1254,7 @@ lemma corres_stateAssert_implied2: assumes g: "\Q\ g \\_. R'\" shows "corres_underlying sr nf nf' dc P Q f (g >>= (\_. stateAssert Q' []))" apply (subst bind_return[symmetric]) - apply (rule corres_guard_imp) + apply corres_pre apply (rule corres_split) apply (rule c) apply (clarsimp simp: corres_underlying_def return_def @@ -1174,6 +1268,18 @@ lemma corres_stateAssert_implied2: apply simp done +lemma corres_stateAssert_add_assertion: + "\ corres_underlying sr nf nf' r P (Q and Q') f (g ()); + \s s'. \ (s, s') \ sr; P s; Q s' \ \ Q' s' \ + \ corres_underlying sr nf nf' r P Q f (stateAssert Q' [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply (rule corres_symb_exec_r [OF _ get_sp]) + apply (rule corres_assume_pre) + apply (rule corres_assert_assume) + apply (erule corres_guard_imp, clarsimp+) + apply (wp | rule no_fail_pre)+ + done + lemma corres_add_noop_lhs: "corres_underlying sr nf nf' r P P' (return () >>= (\_. f)) g \ corres_underlying sr nf nf' r P P' f g" @@ -1194,7 +1300,7 @@ lemmas corres_split_dc = corres_split[where r'=dc, simplified] lemma isLeft_case_sum: "isLeft v \ (case v of Inl v' \ f v' | Inr v' \ g v') = f (theLeft v)" - by (clarsimp simp: isLeft_def) + by (clarsimp split: sum.splits) lemma corres_symb_exec_catch_r: "\ \rv. corres_underlying sr nf nf' r P (Q' rv) f (h rv); @@ -1207,19 +1313,18 @@ lemma corres_symb_exec_catch_r: apply assumption apply (simp add: validE_def) apply (erule hoare_chain, simp_all)[1] - apply (simp add: isLeft_def split: sum.split_asm) + apply (simp split: sum.split_asm) done -lemma corres_return_eq_same: - "a = b \ corres_underlying srel nf' nf (=) \ \ (return a) (return b)" - apply (simp add: corres_underlying_def return_def) - done +lemma corres_returnTT: + "r a b \ corres_underlying sr nf nf' r \ \ (return a) (return b)" + by simp + +lemmas corres_return_eq_same = corres_returnTT[of "(=)"] lemmas corres_discard_r = corres_symb_exec_r [where P'=P' and Q'="\_. P'" for P', simplified] -lemmas corres_returnTT = corres_return[where P=\ and P'=\, THEN iffD2] - lemma corres_assert_gen_asm: "\ F \ corres_underlying sr nf nf' r P Q f (g ()) \ \ corres_underlying sr nf nf' r (P and (\_. F)) Q f (assert F >>= g)" @@ -1246,6 +1351,19 @@ lemma corres_add_guard: corres_underlying sr nf nf' r Q Q' f g" by (auto simp: corres_underlying_def) +lemma corres_stateAssert_r_cross: + assumes A: "\s s'. (s, s') \ sr \ P' s \ Q' s' \ A s'" + assumes C: "corres_underlying sr nf nf' r P Q f (g ())" + shows + "corres_underlying sr nf nf' r (P and P') (Q and Q') f (stateAssert A [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply corres_pre + apply (rule corres_symb_exec_r) + apply (rule corres_assert_gen_asm2, rule C) + apply wpsimp+ + apply (simp add: A) + done + (* safer non-rewrite version of corres_gets *) lemma corres_gets_trivial: "\\s s'. (s,s') \ sr \ f s = f' s' \ @@ -1267,7 +1385,7 @@ lemma corres_underlying_assert_assert: lemma corres_underlying_stateAssert_stateAssert: assumes "\s s'. \ (s,s') \ rf_sr; P s; P' s' \ \ Q' s' = Q s" shows "corres_underlying rf_sr nf False dc P P' (stateAssert Q []) (stateAssert Q' [])" - by (auto simp: stateAssert_def get_def NonDetMonad.bind_def corres_underlying_def + by (auto simp: stateAssert_def get_def Nondet_Monad.bind_def corres_underlying_def assert_def return_def fail_def assms) (* We can ignore a stateAssert in the middle of a computation even if we don't ignore abstract @@ -1277,7 +1395,7 @@ lemma corres_stateAssert_no_fail: corres_underlying S False nf' r P Q (do v \ g; h v od) f \ \ corres_underlying S False nf' r P Q (do v \ g; _ \ stateAssert X []; h v od) f" apply (simp add: corres_underlying_def stateAssert_def get_def assert_def return_def no_fail_def fail_def cong: if_cong) - apply (clarsimp simp: split_def NonDetMonad.bind_def split: if_splits) + apply (clarsimp simp: split_def Nondet_Monad.bind_def split: if_splits) apply (erule allE, erule (1) impE) apply (drule (1) bspec, clarsimp)+ done diff --git a/lib/Crunch.ML b/lib/Crunch.ML index db4fd6200b..b6b6032346 100644 --- a/lib/Crunch.ML +++ b/lib/Crunch.ML @@ -44,11 +44,7 @@ fun read_sections sections = val crunch_parser = (((Scan.optional (P.$$$ "(" |-- P.name --| P.$$$ ")") "" -- P.name -- Parse.opt_attribs --| P.$$$ ":") -- P.list1 P.const -- Scan.optional P.term "" - -- Scan.optional - (P.$$$ "(" |-- read_sections [wp_sect,wp_del_sect,wps_sect,ignore_sect,simp_sect, - simp_del_sect,rule_sect,rule_del_sect,ignore_del_sect] - --| P.$$$ ")") - [] + -- Scan.optional (P.$$$ "(" |-- read_sections crunch_sections --| P.$$$ ")") [] ) >> (fn (((((crunch_instance, prp_name), att_srcs), consts), extra), wpigs) => (fn lthy => @@ -61,11 +57,7 @@ val crunches_parser = (((P.list1 P.const --| P.$$$ "for") -- P.and_list1 ((Scan.optional (P.$$$ "(" |-- P.name --| P.$$$ ")") "" -- P.name -- Parse.opt_attribs) -- Scan.optional (P.$$$ ":" |-- P.term) "") - -- Scan.optional - (P.$$$ "(" |-- read_sections [wp_sect,wp_del_sect,wps_sect,ignore_sect,simp_sect, - simp_del_sect,rule_sect,rule_del_sect,ignore_del_sect] - --| P.$$$ ")") - [] + -- Scan.optional (P.$$$ "(" |-- read_sections crunch_sections --| P.$$$ ")") [] ) >> (fn ((consts, confs), wpigs) => fold (fn (((crunch_instance, prp_name), att_srcs), extra) => fn lthy => diff --git a/lib/Crunch.thy b/lib/Crunch.thy index f96ca836e3..c47955e1d7 100644 --- a/lib/Crunch.thy +++ b/lib/Crunch.thy @@ -6,9 +6,9 @@ theory Crunch imports - WPSimp + Monads.WPSimp Lib - MLUtils + ML_Utils.ML_Utils keywords "crunch" "crunch_ignore" "crunches" :: thy_decl begin diff --git a/lib/Crunch_Instances_NonDet.thy b/lib/Crunch_Instances_NonDet.thy index 8e3927bf48..8ba1160844 100644 --- a/lib/Crunch_Instances_NonDet.thy +++ b/lib/Crunch_Instances_NonDet.thy @@ -7,8 +7,9 @@ theory Crunch_Instances_NonDet imports Crunch - WPEx - NonDetMonadVCG + Monads.WPEx + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail begin lemmas [crunch_param_rules] = Let_def return_bind returnOk_bindE diff --git a/lib/Crunch_Instances_Trace.thy b/lib/Crunch_Instances_Trace.thy index c21b11a63c..3d59877e6e 100644 --- a/lib/Crunch_Instances_Trace.thy +++ b/lib/Crunch_Instances_Trace.thy @@ -7,7 +7,8 @@ theory Crunch_Instances_Trace imports Crunch - TraceMonadVCG + Monads.Trace_No_Fail + Monads.Trace_More_RG begin lemmas [crunch_param_rules] = Let_def return_bind returnOk_bindE diff --git a/lib/CutMon.thy b/lib/CutMon.thy new file mode 100644 index 0000000000..e94097e38e --- /dev/null +++ b/lib/CutMon.thy @@ -0,0 +1,82 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* The cutMon predicate and supporting lemmas. + "cutMon P f" executes f when P is true, otherwise fails. Cuts off uninteresting executions. *) + +theory CutMon + imports + Monads.Nondet_Empty_Fail + Monads.Nondet_VCG +begin + +definition + cutMon :: "('s \ bool) \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad" where + "cutMon P f \ \s. if P s then f s else fail s" + +lemma cutMon_walk_bind: + "cutMon ((=) s) (f >>= g) = + (cutMon ((=) s) f >>= (\rv. cutMon (\s'. (rv, s') \ fst (f s)) (g rv)))" + apply (rule ext, simp add: cutMon_def bind_def fail_def) + apply (auto simp: split_def) + done + +lemma cutMon_walk_bindE: + "cutMon ((=) s) (f >>=E g) = + (cutMon ((=) s) f >>=E (\rv. cutMon (\s'. (Inr rv, s') \ fst (f s)) (g rv)))" + apply (simp add: bindE_def cutMon_walk_bind) + apply (rule bind_cong, rule refl) + apply (simp add: cutMon_def lift_def fail_def split: if_split_asm) + apply (clarsimp split: sum.split) + done + +lemma cutMon_walk_if: + "cutMon ((=) s) (if P then f else g) = (if P then cutMon ((=) s) f else cutMon ((=) s) g)" + by (simp add: cutMon_def) + +lemma cutMon_valid_drop: + "\P\ f \Q\ \ \P\ cutMon R f \Q\" + by (simp add: cutMon_def valid_def fail_def) + +lemma cutMon_validE_drop: + "\P\ f \Q\,\E\ \ \P\ cutMon R f \Q\,\E\" + by (simp add: validE_def cutMon_valid_drop) + +lemma snd_cutMon: + "snd (cutMon P f s) = (P s \ snd (f s))" + by (simp add: cutMon_def fail_def split: if_split) + +lemma cutMon_assert_opt: + "cutMon P (gets_the f >>= g) = gets_the (\s. if P s then f s else None) >>= g" + by (simp add: cutMon_def gets_the_def exec_gets + bind_assoc fun_eq_iff assert_opt_def + split: if_split) + + +(* empty_fail: *) + +lemma empty_fail_use_cutMon: + "\ \s. empty_fail (cutMon ((=) s) f) \ \ empty_fail f" + by (fastforce simp add: empty_fail_def cutMon_def split: if_split_asm) + +lemma empty_fail_drop_cutMon: + "empty_fail f \ empty_fail (cutMon P f)" + by (simp add: empty_fail_def fail_def cutMon_def split: if_split) + +lemma empty_fail_cutMon: + "\ \s. P s \ empty_fail (cutMon ((=) s) f) \ \ empty_fail (cutMon P f)" + by (fastforce simp: empty_fail_def cutMon_def fail_def split: if_split_asm) + +lemmas empty_fail_cutMon_intros = + cutMon_walk_bind[THEN arg_cong[where f=empty_fail], THEN iffD2, + OF empty_fail_bind, OF _ empty_fail_cutMon] + cutMon_walk_bindE[THEN arg_cong[where f=empty_fail], THEN iffD2, + OF empty_fail_bindE, OF _ empty_fail_cutMon] + cutMon_walk_if[THEN arg_cong[where f=empty_fail], THEN iffD2, + OF empty_fail_If] + +end diff --git a/lib/Defs.thy b/lib/Defs.thy index 87d396b212..e5c010afc5 100644 --- a/lib/Defs.thy +++ b/lib/Defs.thy @@ -15,4 +15,3 @@ begin ML_file "defs.ML" end - diff --git a/lib/EVTutorial/EquivValidTutorial.thy b/lib/EVTutorial/EquivValidTutorial.thy index 870e30fdaa..afb87cfd49 100644 --- a/lib/EVTutorial/EquivValidTutorial.thy +++ b/lib/EVTutorial/EquivValidTutorial.thy @@ -55,7 +55,7 @@ In this sense, EquivValid statements could be thought of as \<^emph>\relat text \ This tutorial will introduce some syntactic sugar to emphasise the similarity between Hoare triples and EquivValid statements. -(Here, \\\\ is an alias provided by Lib.NonDetMonad for the trivial binary predicate, +(Here, \\\\ is an alias provided by Monads.Nondet\_Monad for the trivial binary predicate, which always returns \True\; similarly, there is also \\\\ for \False\.) \ abbreviation @@ -140,15 +140,14 @@ where text \ We could just as well define equivalences similarly for \inbox\, \outbox\, or \job\ -- we omit these here. \ -text \ Furthermore, you can use the \And\ alias provided by Lib.NonDetMonad +text \ Furthermore, you can use the \and\ alias provided by @{theory Monads.Fun_Pred_Syntax} to write the conjunction of two binary predicates. -(Similarly, there is also an \Or\ alias for disjunction.)\ +(Similarly, there is also an \or\ alias for disjunction.)\ -thm bipred_conj_def bipred_disj_def lemma - "(desk_equivalence And window_equivalence) r1 r2 = + "(desk_equivalence and window_equivalence) r1 r2 = (desk r1 = desk r2 \ window r1 = window r2)" - unfolding bipred_conj_def desk_equivalence_def window_equivalence_def + unfolding desk_equivalence_def window_equivalence_def by simp text \ In this setting, we might consider a trivial program that just returns @@ -294,12 +293,12 @@ text \ As before, however, we should expect this to hold with a pre-equiva that includes \desk_equivalence\. \ lemma reveal_desk_equiv_window_equiv: - "\|desk_equivalence And window_equivalence|, \\ + "\|desk_equivalence and window_equivalence|, \\ reveal_desk_to_window - \|desk_equivalence And window_equivalence|\" + \|desk_equivalence and window_equivalence|\" unfolding reveal_desk_to_window_def apply(rule modify_ev'') - unfolding bipred_conj_def desk_equivalence_def window_equivalence_def + unfolding desk_equivalence_def window_equivalence_def by clarsimp @@ -349,13 +348,13 @@ thm reveal_desk_equiv_window_equiv modify_ev lemma reveal_desk_equiv_window_equiv_using_wp: - "\|desk_equivalence And window_equivalence|, \\ + "\|desk_equivalence and window_equivalence|, \\ reveal_desk_to_window - \|desk_equivalence And window_equivalence|\" + \|desk_equivalence and window_equivalence|\" unfolding reveal_desk_to_window_def apply(rule pre_ev) apply(wp add:modify_ev) - unfolding bipred_conj_def desk_equivalence_def window_equivalence_def + unfolding desk_equivalence_def window_equivalence_def by clarsimp text \ As another example: @@ -513,9 +512,9 @@ thm return_ev2 modify_ev2 lemma reveal_desk_equiv_window_equiv_using_ev2: - "\|desk_equivalence And window_equivalence|, \\ + "\|desk_equivalence and window_equivalence|, \\ reveal_desk_to_window - \|desk_equivalence And window_equivalence|\" + \|desk_equivalence and window_equivalence|\" unfolding reveal_desk_to_window_def apply(clarsimp simp:equiv_valid_def2) \ \ After peeling back to \equiv_valid_2\ form, we can use its \modify\ rule. \ diff --git a/lib/Apply_Debug.thy b/lib/Eisbach_Tools/Apply_Debug.thy similarity index 99% rename from lib/Apply_Debug.thy rename to lib/Eisbach_Tools/Apply_Debug.thy index ac9104eea0..78638579d9 100644 --- a/lib/Apply_Debug.thy +++ b/lib/Eisbach_Tools/Apply_Debug.thy @@ -484,14 +484,14 @@ fun maybe_bind st (_,[tok]) ctxt = val local_facts = Facts.dest_static true [(Proof_Context.facts_of target)] local_facts; - val _ = Token.assign (SOME (Token.Declaration (fn phi => - Data.put (SOME (phi,ctxt, {private_dyn_facts = private_dyns, local_facts = local_facts}))))) tok; + val _ = Token.assign (SOME (Token.Declaration (Morphism.entity (fn phi => + Data.put (SOME (phi,ctxt, {private_dyn_facts = private_dyns, local_facts = local_facts})))))) tok; in ctxt end else let val SOME (Token.Declaration decl) = Token.get_value tok; - val dummy_ctxt = decl Morphism.identity (Context.Proof ctxt); + val dummy_ctxt = Morphism.form decl (Context.Proof ctxt); val SOME (phi,static_ctxt,{private_dyn_facts, local_facts}) = Data.get dummy_ctxt; val old_facts = Proof_Context.facts_of static_ctxt; @@ -685,7 +685,7 @@ in let val (ctxt,thm) = get_state st; - val r = case Exn.interruptible_capture (fn st => + val r = case Exn.result (fn st => let val _ = Seq.pull (break ctxt NONE thm) in (case (Seq.pull o Proof.apply m) st of (SOME (Seq.Result st', _)) => RESULT (get_state st') diff --git a/lib/Apply_Trace.thy b/lib/Eisbach_Tools/Apply_Trace.thy similarity index 98% rename from lib/Apply_Trace.thy rename to lib/Eisbach_Tools/Apply_Trace.thy index 35eb5a5796..3e7a0943e8 100644 --- a/lib/Apply_Trace.thy +++ b/lib/Eisbach_Tools/Apply_Trace.thy @@ -8,8 +8,7 @@ * Provides an alternate refinement function which takes an additional stateful journaling operation. *) theory Apply_Trace imports - Main - "MLUtils" + ML_Utils.ML_Utils begin @@ -226,7 +225,7 @@ let val deps = case query of SOME (raw_query,pos) => let - val pos' = perhaps (try (Position.advance_offsets 1)) pos; + val pos' = perhaps (try (Position.shift_offsets {remove_id = false} 1)) pos; val q = Find_Theorems.read_query pos' raw_query; val results = Find_Theorems.find_theorems_cmd ctxt (SOME thm) (SOME 1000000000) false q |> snd diff --git a/lib/Apply_Trace_Cmd.thy b/lib/Eisbach_Tools/Apply_Trace_Cmd.thy similarity index 100% rename from lib/Apply_Trace_Cmd.thy rename to lib/Eisbach_Tools/Apply_Trace_Cmd.thy diff --git a/lib/Conjuncts.thy b/lib/Eisbach_Tools/Conjuncts.thy similarity index 100% rename from lib/Conjuncts.thy rename to lib/Eisbach_Tools/Conjuncts.thy diff --git a/lib/Eisbach_Methods.thy b/lib/Eisbach_Tools/Eisbach_Methods.thy similarity index 97% rename from lib/Eisbach_Methods.thy rename to lib/Eisbach_Tools/Eisbach_Methods.thy index 930e8fb96c..64c1d88470 100644 --- a/lib/Eisbach_Methods.thy +++ b/lib/Eisbach_Tools/Eisbach_Methods.thy @@ -17,6 +17,16 @@ imports Local_Method begin +section \Warnings\ + +method_setup not_visible = + \Method.text_closure >> (fn m => fn ctxt => fn facts => + let + val ctxt' = Context_Position.set_visible false ctxt + fun tac st' = method_evaluate m ctxt' facts st' + in SIMPLE_METHOD tac facts end)\ + \set context visibility to false for suppressing warnings in method execution\ + section \Debugging methods\ @@ -352,6 +362,11 @@ end section \Utility methods\ +subsection \Instantiations\ + +text \These do not admit goal parameters in "x", so erule_tac is still sometimes necessary\ +method spec for x :: "_ :: type" = (erule allE[of _ x]) +method bspec for x :: "_ :: type" = (erule ballE[of _ _ x]) subsection \Finding a goal based on successful application of a method\ diff --git a/lib/Local_Method.thy b/lib/Eisbach_Tools/Local_Method.thy similarity index 100% rename from lib/Local_Method.thy rename to lib/Eisbach_Tools/Local_Method.thy diff --git a/lib/test/Local_Method_Tests.thy b/lib/Eisbach_Tools/Local_Method_Tests.thy similarity index 99% rename from lib/test/Local_Method_Tests.thy rename to lib/Eisbach_Tools/Local_Method_Tests.thy index ec4e44bbf3..b255df56f7 100644 --- a/lib/test/Local_Method_Tests.thy +++ b/lib/Eisbach_Tools/Local_Method_Tests.thy @@ -6,7 +6,7 @@ theory Local_Method_Tests imports - Lib.Eisbach_Methods + Eisbach_Methods begin text \ diff --git a/lib/ProvePart.thy b/lib/Eisbach_Tools/ProvePart.thy similarity index 100% rename from lib/ProvePart.thy rename to lib/Eisbach_Tools/ProvePart.thy diff --git a/lib/Eisbach_Tools/README.md b/lib/Eisbach_Tools/README.md new file mode 100644 index 0000000000..9999b46a2a --- /dev/null +++ b/lib/Eisbach_Tools/README.md @@ -0,0 +1,19 @@ + + +# Eisbach and Tactic Tools + +This session contains tools and tactics for defining Isabelle [Eisbach] methods. + +Some of the theories in this session are not strictly speaking Eisbach-related, +but are mostly used from Eisbach methods, so we collect them here as well. + +We only include theories that are needed in the [Monads], [CParser], and +[AutoCorres] sessions. + +[Eisbach]: https://isabelle.in.tum.de/dist//doc/eisbach.pdf +[Monads]: ../Monads/ +[CParser]: ../../tools/c-parser/ +[AutoCorres]: ../../tools/autocorres/ diff --git a/lib/Eisbach_Tools/ROOT b/lib/Eisbach_Tools/ROOT new file mode 100644 index 0000000000..058206ba44 --- /dev/null +++ b/lib/Eisbach_Tools/ROOT @@ -0,0 +1,27 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +chapter Lib + +session Eisbach_Tools (lib) = HOL + + + sessions + "HOL-Eisbach" + ML_Utils + + theories + Apply_Debug + Apply_Trace_Cmd + Conjuncts + Eisbach_Methods + Local_Method + ProvePart + Rule_By_Method + Subgoal_Methods + Simp_No_Conditional + Trace_Schematic_Insts + + Local_Method_Tests diff --git a/lib/Rule_By_Method.thy b/lib/Eisbach_Tools/Rule_By_Method.thy similarity index 100% rename from lib/Rule_By_Method.thy rename to lib/Eisbach_Tools/Rule_By_Method.thy diff --git a/lib/Simp_No_Conditional.thy b/lib/Eisbach_Tools/Simp_No_Conditional.thy similarity index 100% rename from lib/Simp_No_Conditional.thy rename to lib/Eisbach_Tools/Simp_No_Conditional.thy diff --git a/lib/subgoal_focus/Subgoal_Methods.thy b/lib/Eisbach_Tools/Subgoal_Methods.thy similarity index 98% rename from lib/subgoal_focus/Subgoal_Methods.thy rename to lib/Eisbach_Tools/Subgoal_Methods.thy index 8896b490d9..1bd6ce1bdc 100644 --- a/lib/subgoal_focus/Subgoal_Methods.thy +++ b/lib/Eisbach_Tools/Subgoal_Methods.thy @@ -171,7 +171,7 @@ let let val ((_,prem'),ctxt') = Variable.focus NONE prem ctxt; val rule_prop = Thm.prop_of rule; - in Unify.matches_list (Context.Proof ctxt') [rule_prop] [prem'] end; + in is_none (Unify.matcher (Context.Proof ctxt') [rule_prop] [prem']) end; in filter_prems_tac' ctxt (not o member matches rules) end; diff --git a/lib/Trace_Schematic_Insts.thy b/lib/Eisbach_Tools/Trace_Schematic_Insts.thy similarity index 99% rename from lib/Trace_Schematic_Insts.thy rename to lib/Eisbach_Tools/Trace_Schematic_Insts.thy index 77896b0938..293b42c23e 100644 --- a/lib/Trace_Schematic_Insts.thy +++ b/lib/Eisbach_Tools/Trace_Schematic_Insts.thy @@ -6,9 +6,8 @@ theory Trace_Schematic_Insts imports - Main - MLUtils - TermPatternAntiquote + ML_Utils.ML_Utils + ML_Utils.TermPatternAntiquote begin text \ diff --git a/lib/Eisbach_Tools/tests.xml b/lib/Eisbach_Tools/tests.xml new file mode 100644 index 0000000000..87450bd877 --- /dev/null +++ b/lib/Eisbach_Tools/tests.xml @@ -0,0 +1,22 @@ + + + + + + + + ../../isabelle/bin/isabelle build -v -d ../.. Eisbach_Tools + + + diff --git a/lib/EmptyFailLib.thy b/lib/EmptyFailLib.thy deleted file mode 100644 index b8b93f7e58..0000000000 --- a/lib/EmptyFailLib.thy +++ /dev/null @@ -1,51 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) - -theory EmptyFailLib -imports - NonDetMonad - HaskellLib_H -begin - -(* Collect generic empty_fail lemmas here. naming convention is emtpy_fail_NAME. - Unless there is a good reason, they should all be [intro!, simp] *) - -lemma empty_fail_when [simp, intro!]: - "(P \ empty_fail x) \ empty_fail (when P x)" - unfolding when_def by simp - -lemma empty_fail_bindD1: - "empty_fail (a >>= b) \ empty_fail a" - unfolding empty_fail_def bind_def - apply (clarsimp simp: split_def image_image) - apply (drule_tac x = s in spec) - apply simp - done - -lemma empty_fail_liftM [simp, intro!]: - "empty_fail (liftM f m) = empty_fail m" - unfolding liftM_def - apply (rule iffI) - apply (erule empty_fail_bindD1) - apply (erule empty_fail_bind) - apply simp - done - -lemma empty_fail_assert [simp, intro!]: - "empty_fail (assert P)" - unfolding empty_fail_def assert_def - by (simp add: return_def fail_def) - -lemma empty_fail_unless [intro!, simp]: - "empty_fail f \ empty_fail (unless P f)" - by (simp add: unless_def) - -lemma empty_fail_stateAssert [intro!, simp]: - "empty_fail (stateAssert P l)" - by (simp add: stateAssert_def empty_fail_def get_def assert_def - return_def fail_def bind_def) - -end diff --git a/lib/EquivValid.thy b/lib/EquivValid.thy index 1fbeb5a16c..318e1b1d38 100644 --- a/lib/EquivValid.thy +++ b/lib/EquivValid.thy @@ -235,7 +235,7 @@ abbreviation equiv_valid_rv_inv where "equiv_valid_rv_inv I A R P f \ equiv_valid_rv I A A R P f" lemma get_evrv: - "equiv_valid_rv_inv I A (I And A) \ get" + "equiv_valid_rv_inv I A (I and A) \ get" by(auto simp: equiv_valid_2_def get_def) lemma equiv_valid_rv_bind_general: @@ -308,7 +308,7 @@ lemma get_bind_ev2: assumes "\ rv rv'. \I rv rv'; A rv rv'\ \ equiv_valid_2 I A B R (P and ((=) rv)) (P' and ((=) rv')) (f rv) (f' rv')" shows "equiv_valid_2 I A B R P P' (get >>= f) (get >>= f')" apply(rule equiv_valid_2_guard_imp) - apply(rule_tac R'="I And A" in equiv_valid_2_bind_general) + apply(rule_tac R'="I and A" in equiv_valid_2_bind_general) apply(rule assms, simp+) apply(rule get_evrv) apply(wp get_sp)+ @@ -513,7 +513,7 @@ lemma liftME_ev: shows "equiv_valid_inv I A P (liftME f g)" apply(simp add: liftME_def) apply (rule bindE_ev_pre[OF returnOk_ev reads_res]) - apply (rule hoare_True_E_R) + apply (rule hoareE_R_TrueI) apply fast done @@ -572,10 +572,10 @@ lemmas pre_ev = hoare_pre equiv_valid_guard_imp -subsection\Tom instantiates wpc\ +subsection\wpc setup\ lemma wpc_helper_equiv_valid: - "equiv_valid D A B Q f \ wpc_helper (P, P') (Q, Q') (equiv_valid D A B P f)" + "equiv_valid D A B Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (equiv_valid D A B P f)" using equiv_valid_guard_imp apply (simp add: wpc_helper_def) apply (blast) @@ -645,7 +645,7 @@ lemma mapME_ev_pre: apply(subst mapME_Cons) apply wp apply fastforce - apply (rule hoare_True_E_R[where P="\"]) + apply (rule wp_post_tautE_R) apply fastforce+ done diff --git a/lib/Eval_Bool.thy b/lib/Eval_Bool.thy index 19d3e1ba42..e561a72a3c 100644 --- a/lib/Eval_Bool.thy +++ b/lib/Eval_Bool.thy @@ -47,12 +47,12 @@ val eval_nat = eval (mk_constname_tab [@{term "Suc 0"}, @{term "Suc 1"}, val eval_int = eval (mk_constname_tab [@{term "0 :: int"}, @{term "1 :: int"}, @{term "18 :: int"}, @{term "(-9) :: int"}]) -val eval_bool_simproc = Simplifier.make_simproc @{context} "eval_bool" - { lhss = [@{term "b :: bool"}], proc = K eval_bool } -val eval_nat_simproc = Simplifier.make_simproc @{context} "eval_nat" - { lhss = [@{term "n :: nat"}], proc = K eval_nat } -val eval_int_simproc = Simplifier.make_simproc @{context} "eval_int" - { lhss = [@{term "i :: int"}], proc = K eval_int } +val eval_bool_simproc = Simplifier.make_simproc @{context} + { name = "eval_bool", lhss = [@{term "b :: bool"}], proc = K eval_bool, identifier = [] } +val eval_nat_simproc = Simplifier.make_simproc @{context} + { name = "eval_nat", lhss = [@{term "n :: nat"}], proc = K eval_nat, identifier = [] } +val eval_int_simproc = Simplifier.make_simproc @{context} + { name = "eval_int", lhss = [@{term "i :: int"}], proc = K eval_int, identifier = [] } end \ diff --git a/lib/ExtraCorres.thy b/lib/ExtraCorres.thy index db7c2aef7e..30dfd05ab5 100644 --- a/lib/ExtraCorres.thy +++ b/lib/ExtraCorres.thy @@ -5,9 +5,18 @@ *) theory ExtraCorres -imports Corres_UL +imports Corres_UL DetWPLib begin +(* FIXME: the S in this rule is mainly to make the induction work, we don't actually need it in + application. This means, this form should be hidden and the main form should be resolving the + last assumption with order_refl. *) + +(* The lemma looks weaker than in it could be -- the guards P and P' are not allowed to depend on + list elements. This is fine, because P/P' are a loop invariants that need to be supplied + manually anyway, and we want these to be true for all loop iterations. An instance such as + "\s. \x \ set xs. P x s" is possible and covers the cases the (not really) stronger formulation + would cover. *) lemma corres_mapM: assumes x: "r [] []" assumes y: "\x xs y ys. \ r xs ys; r' x y \ \ r (x # xs) (y # ys)" @@ -63,12 +72,13 @@ next show ?case apply (simp add: mapM_Cons) apply (rule corres_underlying_split [OF corr' _ ha [OF Cons(2)] hc [OF Cons(2)]]) - apply (rule corres_underlying_split [OF Cons(3) _ hoare_post_taut hoare_post_taut]) + apply (rule corres_underlying_split [OF Cons(3) _ hoare_TrueI hoare_TrueI]) apply (simp add: rc) apply (rule Cons.hyps)+ done qed +(* FIXME: see comment for mapM rule. Same applies for lemma strength *) lemma corres_mapM_x: assumes x: "\x y. (x, y) \ S \ corres_underlying sr nf nf' dc P P' (f x) (f' y)" assumes y: "\x y. (x, y) \ S \ \P\ f x \\rv. P\" @@ -83,6 +93,9 @@ lemma corres_mapM_x: apply (simp | wp)+ done +lemmas corres_mapM_x' = corres_mapM_x[OF _ _ _ _ order_refl] + +(* FIXME: see comment for mapM rule. Same applies for lemma strength *) lemma corres_mapME: assumes x: "r [] []" assumes y: "\x xs y ys. \ r xs ys; r' x y \ \ r (x # xs) (y # ys)" @@ -166,13 +179,19 @@ proof - done qed +text \Some results concerning the interaction of abstract and concrete states\ + +definition ex_abs_underlying :: "('a \ 'b) set \ ('a \ bool) \ 'b \ bool" where + "ex_abs_underlying sr P s' \ \s. (s,s') \ sr \ P s" + +lemma ex_absI[intro!]: + "(s, s') \ sr \ P s \ ex_abs_underlying sr P s'" + by (auto simp add: ex_abs_underlying_def) lemma corres_u_nofail: - "corres_underlying S nf True r P P' f g \ (nf \ no_fail P f) \ - no_fail (\s'. \s. (s,s') \ S \ P s \ P' s') g" - apply (clarsimp simp add: corres_underlying_def no_fail_def) - apply fastforce - done + "\corres_underlying S nf True r P P' f g; nf \ no_fail P f\ + \ no_fail (P' and ex_abs_underlying S P) g" + by (fastforce simp: corres_underlying_def ex_abs_underlying_def no_fail_def) lemma wp_from_corres_u: "\ corres_underlying R nf nf' r G G' f f'; \P\ f \Q\; \P'\ f' \Q'\; nf \ no_fail P f \ \ @@ -190,7 +209,7 @@ lemma wp_from_corres_u_unit: lemma corres_nofail: "corres_underlying state_relation nf True r P P' f g \ (nf \ no_fail P f) \ no_fail (\s'. \s. (s,s') \ state_relation \ P s \ P' s') g" - by (rule corres_u_nofail) + by (fastforce intro: no_fail_pre corres_u_nofail simp: ex_abs_underlying_def) lemma wp_from_corres_unit: "\ corres_underlying state_relation nf nf' r G G' f f'; @@ -199,4 +218,340 @@ lemma wp_from_corres_unit: f' \\_ s'. \s. (s,s') \ state_relation \ Q s \ Q' s'\" by (auto intro!: wp_from_corres_u_unit) +lemma corres_underlying_split_ex_abs: + assumes ac: "corres_underlying srel nf nf' r' G G' a c" + assumes bd: "\rv rv'. r' rv rv' \ + corres_underlying srel nf nf' r (P rv) (P' rv') (b rv) (d rv')" + assumes valid: "\G\ a \P\" "\G' and ex_abs_underlying srel G\ c \P'\" + shows "corres_underlying srel nf nf' r G G' (a >>= (\rv. b rv)) (c >>= (\rv'. d rv'))" + using assms + apply (clarsimp simp: corres_underlying_def bind_def) + apply (clarsimp simp: Bex_def Ball_def valid_def ex_abs_underlying_def) + by meson + +lemma hoare_from_abs: + assumes corres: "corres_underlying srel nf nf' rrel G G' f f'" + assumes cross2: "\s s' r r'. \(s, s') \ srel; rrel r r'; Q r s; S s\ \ Q' r' s'" + assumes abs_valid: "\P and R\ f \\rv. Q rv and S\" + assumes cross1: "\s s'. \(s, s') \ srel; P' s'; R' s'\ \ P s" + assumes nf: "nf \ no_fail (P and R and G) f" + shows "\P' and G' and R' and ex_abs_underlying srel (G and R)\ f' \Q'\" + using assms + apply (clarsimp simp: valid_def ex_abs_underlying_def corres_underlying_def no_fail_def) + by fast + +lemma hoare_from_abs_inv: + assumes abs_valid: "f \P\" + assumes cross: "\s s'. (s, s') \ srel \ P s = P' s'" + assumes corres: "corres_underlying srel nf nf' rrel G G' f f'" + assumes nf: "nf \ no_fail (P and G) f" + shows "\P' and G' and ex_abs_underlying srel G\ f' \\_. P'\" + using assms + by (fastforce intro: hoare_from_abs[where R=\ and S=\ and R'=\ and Q="\_. P" , simplified]) + +lemma corres_from_valid: + assumes nf': "nf' \ no_fail (P' and ex_abs_underlying srel P) f'" + assumes + "\s. \\s'. P s \ P' s' \ (s, s') \ srel\ + f' \\rv' t'. \(rv, t) \ fst (f s). (t, t') \ srel \ rrel rv rv'\" + shows "corres_underlying srel nf nf' rrel P P' f f'" + using assms + by (fastforce simp: corres_underlying_def valid_def no_fail_def) + +lemma corres_from_valid_det: + assumes det: "det_wp P f" + assumes nf': "nf' \ no_fail (P' and ex_abs_underlying srel P) f'" + assumes valid: + "\s rv t. + \fst (f s) = {(rv, t)}; P s\ + \ \\s'. P' s' \ (s, s') \ srel\ f' \\rv' t'. (t, t') \ srel \ rrel rv rv'\" + shows "corres_underlying srel nf nf' rrel P P' f f'" + apply (clarsimp simp: corres_underlying_def) + apply (intro conjI) + apply (insert det) + apply (clarsimp simp: det_wp_def) + apply (force dest: use_valid[OF _ valid]) + apply (fastforce dest: nf' simp: no_fail_def ex_abs_underlying_def) + done + +lemma corres_noop_ex_abs: + assumes P: "\s. P s \ \\s'. (s, s') \ sr \ P' s'\ f \\rv s'. (s, s') \ sr \ r x rv\" + assumes nf': "nf' \ no_fail (P' and ex_abs_underlying sr P) f" + shows "corres_underlying sr nf nf' r P P' (return x) f" + apply (simp add: corres_underlying_def return_def) + apply clarsimp + apply (frule P) + apply (insert nf') + apply (fastforce simp: valid_def no_fail_def ex_abs_underlying_def) + done + +lemma corres_symb_exec_r_conj_ex_abs: + assumes z: "\rv. corres_underlying sr nf nf' r Q (R' rv) x (y rv)" + assumes y: "\Q'\ m \R'\" + assumes x: "\s. Q s \ \\s'. (s, s') \ sr \ P' s'\ m \\rv s'. (s, s') \ sr\" + assumes nf: "nf' \ no_fail (P' and ex_abs_underlying sr Q) m" + shows "corres_underlying sr nf nf' r Q (P' and Q') x (m >>= (\rv. y rv))" +proof - + have P: "corres_underlying sr nf nf' dc Q P' (return undefined) m" + apply (rule corres_noop_ex_abs) + apply (simp add: x) + apply (erule nf) + done + show ?thesis + apply (rule corres_guard_imp) + apply (subst return_bind[symmetric], rule corres_split[OF P]) + apply (rule z) + apply wp + apply (rule y) + apply simp+ + done +qed + +lemmas corres_symb_exec_r_conj_ex_abs_forwards = + corres_symb_exec_r_conj_ex_abs[where P'=P' and Q'=P' for P', simplified] + +lemma gets_the_corres_ex_abs': + "\no_ofail P a; no_ofail (P' and ex_abs_underlying sr P) b\ \ + corres_underlying sr False True r P P' (gets_the a) (gets_the b) + = (\s s'. P s \ P' s' \ (s, s') \ sr \ r (the (a s)) (the (b s')))" + by (fastforce simp: gets_the_def no_ofail_def corres_underlying_def split_def exec_gets + assert_opt_def fail_def return_def ex_abs_underlying_def) + +lemmas gets_the_corres_ex_abs = gets_the_corres_ex_abs'[THEN iffD2] + +lemma gets_the_corres': + "\no_ofail P a; no_ofail P' b\ \ + corres_underlying sr False True r P P' (gets_the a) (gets_the b) + = (\s s'. P s \ P' s' \ (s, s') \ sr \ r (the (a s)) (the (b s')))" + apply (erule gets_the_corres_ex_abs') + apply (fastforce intro: no_ofail_pre_imp) + done + +lemmas gets_the_corres = gets_the_corres'[THEN iffD2] + +lemma gets_the_corres_relation: + "\no_ofail P f; corres_underlying sr False True r P P' (gets_the f) (gets_the f'); + P s; P' s'; (s, s') \ sr\ + \ r (the (f s)) (the (f' s'))" + apply (rule_tac P=P and a=f and b=f' and P'=P' + in gets_the_corres_ex_abs'[THEN iffD1, rule_format]; + fastforce?) + apply (rule no_ofail_gets_the_eq[THEN iffD2]) + apply (fastforce intro: corres_u_nofail) + done + + +\ \Some @{term corres_underlying} rules for @{term whileLoop}\ + +lemma in_whileLoop_corres: + assumes body_corres: + "\r r'. rrel r r' \ + corres_underlying srel nf nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes result: "(rv', t') \ fst (whileLoop C' B' r' s')" + assumes nf: "\r. nf \ no_fail (P r and C r) (B r)" + shows "\s r. (s, s') \ srel \ rrel r r' \ P r s \ P' r' s' + \ (\rv t. (rv, t) \ fst (whileLoop C B r s) \ (t, t') \ srel \ rrel rv rv')" + apply (rule in_whileLoop_induct[OF result]) + apply (force simp: cond whileLoop_def) + apply clarsimp + apply (frule (1) corres_underlyingD2[OF body_corres]; + (fastforce dest: nf simp: cond no_fail_def)?) + apply clarsimp + apply (frule use_valid[OF _ body_inv(1)]) + apply (fastforce dest: cond) + apply (frule use_valid[OF _ body_inv(2)]) + apply fastforce + apply (fastforce simp: whileLoop_def intro: whileLoop_results.intros(3) dest: cond) + done + +lemma corres_whileLoop_ret: + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes body_corres: + "\r r'. rrel r r' \ + corres_underlying srel False nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes rel: "rrel r r'" + assumes nf': "\r'. no_fail (P' r' and C' r') (B' r')" + assumes termin: "\r' s'. \P' r' s'; C' r' s'\ \ whileLoop_terminates C' B' r' s'" + shows "corres_underlying srel False nf' rrel (P r) (P' r') (whileLoop C B r) (whileLoop C' B' r')" + apply (rule corres_no_failI) + apply (simp add: no_fail_def) + apply (intro impI allI) + apply (erule_tac I="\r' s'. P' r' s'" + and R="{((r', s'), r, s). C' r s \ (r', s') \ fst (B' r s) + \ whileLoop_terminates C' B' r s}" + in not_snd_whileLoop) + apply (clarsimp simp: validNF_def) + apply (rule conjI) + apply (intro hoare_vcg_conj_lift_pre_fix; wpsimp?) + using body_inv + apply (fastforce simp: valid_def) + apply (clarsimp simp: valid_def) + apply (insert termin)[1] + apply wpsimp + apply (fastforce intro: no_fail_pre nf') + apply (fastforce intro: wf_subset[OF whileLoop_terminates_wf[where C=C']]) + apply clarsimp + apply (frule in_whileLoop_corres[OF body_corres body_inv]; (fastforce dest: cond)?) + apply (fastforce intro: assms) + done + +lemmas corres_whileLoop = + corres_whileLoop_ret[where P="\_. P" for P, where P'="\_. P'" for P', simplified] + +lemma whileLoop_terminates_cross: + assumes body_corres: + "\r r'. rrel r r' \ + corres_underlying srel nf nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes abs_termination: "\r s. \P r s; C r s\ \ whileLoop_terminates C B r s" + assumes ex_abs: "ex_abs_underlying srel (P r) s'" + assumes rrel: "rrel r r'" + assumes P': "P' r' s'" + assumes nf: "\r. nf \ no_fail (P r and C r) (B r)" + shows "whileLoop_terminates C' B' r' s'" +proof - + have helper: "\s. P r s \ C r s \ \r' s'. rrel r r' \ (s, s') \ srel \ P r s \ P' r' s' + \ whileLoop_terminates C' B' r' s'" + (is "\s. _ \ ?I r s") + apply (rule_tac P="?I" in whileLoop_terminates.induct) + apply (fastforce intro: abs_termination) + apply (fastforce simp: whileLoop_terminates.intros dest: cond) + apply (subst whileLoop_terminates.simps) + apply clarsimp + apply (frule (1) corres_underlyingD2[OF body_corres], (fastforce dest: nf simp: no_fail_def)+) + apply (fastforce dest: use_valid intro: body_inv) + done + + show ?thesis + apply (insert assms helper) + apply (cases "C' r' s'") + apply (fastforce simp: ex_abs_underlying_def) + apply (simp add: whileLoop_terminates.intros(1)) + done +qed + +lemma corres_whileLoop_abs_ret: + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes body_corres: + "\r r'. rrel r r' \ + corres_underlying srel nf nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes rrel: "rrel r r'" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes abs_termination: "\r s. \P r s; C r s\ \ whileLoop_terminates C B r s" + assumes nf: "\r. nf \ no_fail (P r and C r) (B r)" + shows "corres_underlying srel nf nf' rrel (P r) (P' r') (whileLoop C B r) (whileLoop C' B' r')" + apply (rule corres_underlyingI) + apply (frule in_whileLoop_corres[OF body_corres body_inv]; + (fastforce intro: body_corres body_inv rrel dest: nf cond)) + apply (rule_tac I="\rv' s'. \rv s. (s, s') \ srel \ rrel rv rv' \ P rv s \ P' rv' s'" + and R="{((r', s'), r, s). C' r s \ (r', s') \ fst (B' r s) + \ whileLoop_terminates C' B' r s}" + in not_snd_whileLoop) + apply (fastforce intro: rrel) + apply (rename_tac s s' conc_r new_s) + apply (clarsimp simp: validNF_def) + apply (rule conjI) + apply (intro hoare_vcg_conj_lift_pre_fix; (solves wpsimp)?) + apply (rule_tac Q="\s'. \rv s. (s, s') \ srel \ rrel rv conc_r + \ P rv s \ (P' conc_r s' \ C' conc_r s') \ s' = new_s" + in hoare_weaken_pre[rotated]) + apply clarsimp + apply (rule hoare_ex_pre) + apply (rename_tac abs_r) + apply (rule hoare_weaken_pre) + apply (rule_tac G="rrel abs_r conc_r" in hoare_grab_asm) + apply (wpsimp wp: wp_from_corres_u[OF body_corres] body_inv) + apply (fastforce dest: nf) + apply (fastforce dest: cond) + apply (fastforce simp: valid_def) + apply wpsimp + apply (rule whileLoop_terminates_cross[OF body_corres]; + (fastforce dest: nf cond intro: body_inv abs_termination)) + apply (rule_tac P="\s'. \rv s. (s, s') \ srel \ rrel rv conc_r + \ P rv s \ (P' conc_r s' \ C' conc_r s') \ s' = new_s" + in no_fail_pre[rotated]) + apply fastforce + apply (rule no_fail_ex_lift) + apply (rename_tac abs_r) + apply (rule no_fail_pre) + apply (rule_tac G="rrel abs_r conc_r" in no_fail_grab_asm) + apply (fastforce intro: corres_u_nofail dest: body_corres nf) + apply (fastforce simp: cond) + apply (fastforce intro: wf_subset[OF whileLoop_terminates_wf[where C=C']]) + done + +lemmas corres_whileLoop_abs = + corres_whileLoop_abs_ret[where P="\_. P" for P, where P'="\_. P'" for P', simplified] + +text \Some corres_underlying rules for monadic combinators\ + +lemma ifM_corres: + assumes test: "corres_underlying srel nf nf' (=) A A' test test'" + and l: "corres_underlying srel nf nf' rrel Q Q' a a'" + and r: "corres_underlying srel nf nf' rrel R R' b b'" + and abs_valid: "\B\ test \\c s. c \ Q s\" + "\C\ test \\c s. \ c \ R s\" + and conc_valid: "\B'\ test' \\c s. c \ Q' s\" + "\C'\ test' \\c s. \ c \ R' s\" + shows "corres_underlying srel nf nf' rrel (A and B and C) (A' and B' and C') + (ifM test a b) (ifM test' a' b')" + unfolding ifM_def + apply (rule corres_guard_imp) + apply (rule corres_split[OF test]) + apply (erule corres_if[OF _ l r]) + apply (wpsimp wp: abs_valid conc_valid hoare_vcg_if_lift2)+ + done + +lemmas ifM_corres' = + ifM_corres[where A=A and B=A and C=A for A, simplified, + where A'=A' and B'=A' and C'=A' for A', simplified] + +lemma orM_corres: + "\corres_underlying srel nf nf' (=) A A' a a'; corres_underlying srel nf nf' (=) R R' b b'; + \B\ a \\c s. \ c \ R s\; \B'\ a' \\c s. \ c \ R' s\\ + \ corres_underlying srel nf nf' (=) (A and B) (A' and B') (orM a b) (orM a' b')" + unfolding orM_def + apply (rule corres_guard_imp) + apply (rule ifM_corres[where Q=\ and Q'=\]) + apply (wpsimp | fastforce)+ + done + +lemmas orM_corres' = + orM_corres[where A=A and B=A for A, simplified, where A'=A' and B'=A' for A', simplified] + +lemma andM_corres: + "\corres_underlying srel nf nf' (=) A A' a a'; corres_underlying srel nf nf' (=) Q Q' b b'; + \B\ a \\c s. c \ Q s\; \B'\ a' \\c s. c \ Q' s\\ + \ corres_underlying srel nf nf' (=) (A and B) (A' and B') (andM a b) (andM a' b')" + unfolding andM_def + apply (rule corres_guard_imp) + apply (erule (1) ifM_corres[where R=\ and R'=\]) + apply (wpsimp | assumption)+ + done + +lemma notM_corres: + "corres_underlying srel nf nf' (=) G G' a a' + \ corres_underlying srel nf nf' (=) G G' (notM a) (notM a')" + unfolding notM_def + apply (rule corres_guard_imp) + apply (erule corres_split) + apply wpsimp+ + done + +lemma ifM_to_top_of_bind: + "((ifM test true false) >>= z) = ifM test (true >>= z) (false >>= z)" + by (force simp: ifM_def bind_def split: if_splits) + end diff --git a/lib/Extract_Conjunct.thy b/lib/Extract_Conjunct.thy index 03feba42eb..b9986c482f 100644 --- a/lib/Extract_Conjunct.thy +++ b/lib/Extract_Conjunct.thy @@ -7,7 +7,7 @@ theory Extract_Conjunct imports "Main" - "Eisbach_Methods" + Eisbach_Tools.Eisbach_Methods begin section \Extracting conjuncts in the conclusion\ diff --git a/lib/FP_Eval.thy b/lib/FP_Eval.thy index d84d6a17f2..5f04e30365 100644 --- a/lib/FP_Eval.thy +++ b/lib/FP_Eval.thy @@ -6,8 +6,8 @@ theory FP_Eval imports - HOL.HOL - TermPatternAntiquote + Main + ML_Utils.TermPatternAntiquote begin text \ diff --git a/lib/GenericLib.thy b/lib/GenericLib.thy index 18eb72ba17..f4004d7a5d 100644 --- a/lib/GenericLib.thy +++ b/lib/GenericLib.thy @@ -7,7 +7,7 @@ theory GenericLib imports Crunch_Instances_NonDet - WPEx + Monads.WPEx HaskellLemmaBucket begin diff --git a/lib/Guess_ExI.thy b/lib/Guess_ExI.thy index 7fd4ce52a4..875fb814fa 100644 --- a/lib/Guess_ExI.thy +++ b/lib/Guess_ExI.thy @@ -6,8 +6,8 @@ theory Guess_ExI imports - Eisbach_Methods - Apply_Debug + Eisbach_Tools.Eisbach_Methods + Eisbach_Tools.Apply_Debug begin (* diff --git a/lib/HaskellLemmaBucket.thy b/lib/HaskellLemmaBucket.thy index 4b5b564331..634b1738e1 100644 --- a/lib/HaskellLemmaBucket.thy +++ b/lib/HaskellLemmaBucket.thy @@ -123,10 +123,18 @@ lemma stateAssert_wp: "\\s. P s \ Q () s\ stateAssert P e \Q\" by (clarsimp simp: stateAssert_def) wp -lemma haskell_assert_wp: +lemma empty_fail_stateAssert[intro!, simp]: + "empty_fail (stateAssert P l)" + unfolding stateAssert_def by simp + +lemma haskell_assert_wp[wp]: "\\s. Q \ P s\ haskell_assert Q xs \\_. P\" by simp wp +lemma haskell_assert_inv: + "\P\ haskell_assert Q l \\_. P\" + by wpsimp + lemma init_append_last: "xs \ [] \ init xs @ [last xs] = xs" apply (induct xs rule: rev_induct) @@ -149,10 +157,6 @@ lemma no_fail_stateAssert: apply simp done -lemma empty_fail_stateAssert: - "empty_fail (stateAssert P s)" - by (simp add: stateAssert_def assert_def empty_fail_get) - lemma haskell_fail_wp: "\\\ haskell_fail x \P\" by simp @@ -221,7 +225,7 @@ lemma findM_on_outcome': lemma findM_on_outcome: assumes x: "\x ys. x \ set xs \ \Q None and I\ f x \\rv s. (rv \ Q (Some x) s) \ (\ rv \ Q None s \ I s)\" shows "\Q None and I\ findM f xs \Q\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule findM_on_outcome' [where fn="\s. if I s then set xs else {}"]) apply (case_tac "x \ set xs") apply simp @@ -229,7 +233,7 @@ lemma findM_on_outcome: apply (case_tac "\ set xsa \ set xs") apply simp apply simp - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule hoare_post_imp [OF _ x]) apply clarsimp apply assumption @@ -267,6 +271,8 @@ lemma filter_assocs_Cons: apply simp done +lemmas stateAssert_def = stateAssert_def[unfolded state_assert_def] + lemma snd_stateAssert_after: "\ snd ((do _ \ f; stateAssert R vs od) s) \ \snd (f s) \ (\(rv, s') \ fst (f s). R s')" diff --git a/lib/HaskellLib_H.thy b/lib/HaskellLib_H.thy index 3a25fac941..32e984889e 100644 --- a/lib/HaskellLib_H.thy +++ b/lib/HaskellLib_H.thy @@ -12,16 +12,16 @@ theory HaskellLib_H imports Lib - NatBitwise More_Numeral_Type - NonDetMonadVCG + Monads.Nondet_VCG + Monads.Nondet_Reader_Option begin abbreviation (input) "flip \ swp" abbreviation(input) bind_drop :: "('a, 'c) nondet_monad \ ('a, 'b) nondet_monad \ ('a, 'b) nondet_monad" (infixl ">>'_" 60) - where "bind_drop \ (\x y. bind x (K_bind y))" + where "bind_drop \ (\x y. Nondet_Monad.bind x (K_bind y))" lemma bind_drop_test: "foldr bind_drop x (return ()) = sequence_x x" @@ -60,7 +60,7 @@ declare haskell_assert_def [simp] haskell_assertE_def [simp] definition stateAssert :: "('a \ bool) \ unit list \ ('a, unit) nondet_monad" where - "stateAssert P L \ get >>= (\s. assert (P s))" + "stateAssert P L \ state_assert P" definition haskell_fail :: "unit list \ ('a, 'b) nondet_monad" where @@ -522,4 +522,32 @@ syntax (input) lemma "[(x,1) . x \ [0..10]] = [(x,1) | x \ [0..10]]" by (rule refl) +definition ohaskell_fail :: "unit list \ ('s, 'a) lookup" where + "ohaskell_fail = K ofail" + +definition ohaskell_assert :: "bool \ unit list \ ('s, unit) lookup" where + "ohaskell_assert P ls \ if P then oreturn () else ofail" + +lemma no_ofail_ohaskell_assert[wp]: + "no_ofail (\_. P) (ohaskell_assert P [])" + by (clarsimp simp: no_ofail_def ohaskell_assert_def) + +lemma ohaskell_assert_wp[wp]: + "\\s. Q \ P () s\ ohaskell_assert Q [] \P\" + apply (clarsimp simp: ohaskell_assert_def) + apply (intro conjI; wpsimp) + done + +lemma ohaskell_assert_sp: + "\P\ ohaskell_assert Q [] \\_ s. P s \ Q\" + apply (clarsimp simp: ohaskell_assert_def) + apply (intro conjI; wpsimp) + done + +lemma gets_the_ohaskell_assert: + "gets_the (ohaskell_assert P []) = assert P" + by (clarsimp simp: ohaskell_assert_def split: if_splits) + +lemmas omonad_defs = omonad_defs ohaskell_assert_def ohaskell_fail_def + end diff --git a/lib/Heap_List.thy b/lib/Heap_List.thy new file mode 100644 index 0000000000..1bbaaab1e8 --- /dev/null +++ b/lib/Heap_List.thy @@ -0,0 +1,520 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Singly-linked lists on heaps or projections from heaps, as predicate and as recursive function. + Loosely after ~~/src/HOL/Hoare/Pointer_Examples.thy *) + +theory Heap_List +imports Main "HOL-Library.Prefix_Order" ListLibLemmas +begin + +(* Given a heap projection that returns the next-pointer for an object at address x, + given a start pointer x, and an end pointer y, determine if the given list + is the path of addresses visited when following the next-pointers from x to y *) +primrec heap_path :: "('a \ 'a) \ 'a option \ 'a list \ 'a option \ bool" where + "heap_path hp x [] y = (x = y)" +| "heap_path hp x (a#as) y = (x = Some a \ heap_path hp (hp a) as y)" + +(* When a path ends in None, it is a singly-linked list *) +abbreviation heap_ls :: "('a \ 'a) \ 'a option \ 'a list \ bool" where + "heap_ls hp x xs \ heap_path hp x xs None" + +(* Walk a linked list of next pointers, recording which it visited. + Terminates artificially at loops, and otherwise because the address domain is finite *) +function heap_walk :: "('a::finite \ 'a) \ 'a option \ 'a list \ 'a list" where + "heap_walk hp None xs = xs" +| "heap_walk hp (Some x) xs = (if x \ set xs then xs else heap_walk hp (hp x) (xs@[x]))" + by pat_completeness auto + +lemma card_set_UNIV: + fixes xs :: "'a::finite list" + assumes "x \ set xs" + shows "card (set xs) < card(UNIV::'a set)" +proof - + have "finite (UNIV::'a set)" by simp + moreover + from assms have "set xs \ UNIV" by blast + ultimately + show ?thesis by (rule psubset_card_mono) +qed + +termination heap_walk + by (relation "measure (\(_, _, xs). card(UNIV :: 'a set) - card (set xs))"; + simp add: card_set_UNIV diff_less_mono2) + +lemma heap_path_append[simp]: + "heap_path hp start (xs @ ys) end = (\x. heap_path hp start xs x \ heap_path hp x ys end)" + by (induct xs arbitrary: start; simp) + +lemma heap_path_None[simp]: + "heap_path hp None xs end = (xs = [] \ end = None)" + by (cases xs, auto) + +lemma heap_ls_unique: + "\ heap_ls hp x xs; heap_ls hp x ys \ \ xs = ys" + by (induct xs arbitrary: ys x; simp) (case_tac ys; clarsimp) + +lemma heap_ls_hd_not_in_tl: + "heap_ls hp (hp x) xs \ x \ set xs" +proof + assume "x \ set xs" + then obtain ys zs where xs: "xs = ys @ x # zs" by (auto simp: in_set_conv_decomp) + moreover assume "heap_ls hp (hp x) xs" + moreover from this xs have "heap_ls hp (hp x) zs" by clarsimp + ultimately show False by (fastforce dest: heap_ls_unique) +qed + +lemma heap_ls_distinct: + "heap_ls hp x xs \ distinct xs" + by (induct xs arbitrary: x; clarsimp simp: heap_ls_hd_not_in_tl) + +lemma heap_ls_is_walk': + "\ heap_ls hp x xs; set xs \ set ys = {} \ \ heap_walk hp x ys = ys @ xs" + by (frule heap_ls_distinct) (induct xs arbitrary: x ys; clarsimp) + +lemma heap_ls_is_walk: + "heap_ls hp x xs \ heap_walk hp x [] = xs" + using heap_ls_is_walk' by fastforce + +lemma heap_path_end_unique: + "heap_path hp x xs y \ heap_path hp x xs y' \ y = y'" + by (induct xs arbitrary: x; clarsimp) + +lemma heap_path_head': + "heap_path hp st xs y \ xs \ [] \ st = Some (hd xs)" + by (induct xs arbitrary: st; clarsimp) + +lemmas heap_path_head = heap_path_head'[rule_format] + +lemma heap_path_non_nil_lookup_next: + "heap_path hp x (xs@z#ys) y \ hp z = (case ys of [] \ y | _ \ Some (hd ys))" + by (cases ys; fastforce) + +lemma heap_ls_next_of_hd: + "\a = hd ls; heap_ls hp st ls; Suc 0 < length ls\ \ hp a = Some (hd (tl ls))" + apply (cut_tac hp=hp and xs="[]" and z=a and ys="tl ls" in heap_path_non_nil_lookup_next) + apply (prop_tac "ls \ []", fastforce) + apply fastforce + apply (cases "tl ls"; clarsimp) + apply (cases ls; clarsimp) + done + +lemma heap_path_prefix: + "heap_path hp st ls ed \ \xs\ls. heap_path hp st xs (if xs = [] then st else hp (last xs))" + apply clarsimp + apply (erule Prefix_Order.prefixE) + by (metis append_butlast_last_id heap_path_append heap_path_non_nil_lookup_next list.case(1)) + +lemma heap_path_butlast: + "heap_path hp st ls ed \ ls \ [] \ heap_path hp st (butlast ls) (Some (last ls))" + by (induct ls rule: rev_induct; simp) + +lemma in_list_decompose_takeWhile: + "x \ set xs \ + xs = (takeWhile ((\) x) xs) @ x # (drop (length (takeWhile ((\) x) xs) + 1) xs)" + by (induct xs arbitrary: x; clarsimp) + +lemma takeWhile_neq_hd_eq_Nil[simp]: + "takeWhile ((\) (hd xs)) xs = Nil" + by (metis (full_types) hd_Cons_tl takeWhile.simps(1) takeWhile.simps(2)) + +lemma heap_not_in_dom[simp]: + "ptr \ dom hp \ hp(ptr := None) = hp" + by (auto simp: dom_def) + +lemma heap_path_takeWhile_lookup_next: + "\ heap_path hp st rs ed; r \ set rs \ + \ heap_path hp st (takeWhile ((\) r) rs) (Some r)" + apply (drule heap_path_prefix) + apply (subgoal_tac "takeWhile ((\) r) rs @ [r] \ rs", fastforce) + by (fastforce dest!: in_list_decompose_takeWhile intro: Prefix_Order.prefixI) + +lemma heap_path_heap_upd_not_in: + "\heap_path hp st rs ed; r \ set rs\ \ heap_path (hp(r:= x)) st rs ed" + by (induct rs arbitrary: st; clarsimp) + +lemma heap_path_last_update: + "\heap_path hp st xs end; xs \ []; distinct xs\ \ heap_path (hp(last xs := new)) st xs new" + by (induct xs arbitrary: st rule: rev_induct; simp add: heap_path_heap_upd_not_in) + +lemma heap_walk_lb: + "heap_walk hp x xs \ xs" + apply (induct xs rule: heap_walk.induct; clarsimp) + by (metis Prefix_Order.prefixE Prefix_Order.prefixI append_assoc) + +lemma heal_walk_Some_nonempty': + "heap_walk hp (Some x) [] > []" + by (fastforce intro: heap_walk_lb less_le_trans[where y="[x]"]) + +lemma heal_walk_Some_nonempty: + "heap_walk hp (Some x) [] \ []" + by (metis less_list_def heal_walk_Some_nonempty') + +lemma heap_walk_Nil_None: + "heap_walk hp st [] = [] \ st = None" + by (case_tac st; simp only: heal_walk_Some_nonempty) + +lemma heap_path_last_end: + "heap_path hp st xs end \ xs \ [] \ hp (last xs) = end" + by (induct xs rule: rev_induct; clarsimp) + +lemmas heap_ls_last_None = heap_path_last_end[where ?end=None] + +(* sym_heap *) + +definition sym_heap where + "sym_heap hp hp' \ \p p'. hp p = Some p' \ hp' p' = Some p" + +lemma sym_heapD1: + "sym_heap hp hp' \ hp p = Some p' \ hp' p' = Some p" + by (clarsimp simp: sym_heap_def) + +lemma sym_heapD2: + "sym_heap hp hp' \ hp' p' = Some p \ hp p = Some p'" + by (clarsimp simp: sym_heap_def) + +lemma sym_heap_symmetric: + "sym_heap hp hp' \ sym_heap hp' hp" + unfolding sym_heap_def by blast + +lemma sym_heap_None: + "\sym_heap hp hp'; hp p = None\ \ \p'. hp' p' \ Some p" unfolding sym_heap_def by force + +lemma sym_heap_remove_only: + "\sym_heap hp hp'; hp' y = Some x\ \ sym_heap (hp(x := None)) (hp'(y := None))" + apply (clarsimp simp: sym_heap_def) + apply (metis option.inject) + done + +lemma sym_heap_remove_only': + "\sym_heap hp hp'; hp y = Some x\ \ sym_heap (hp(y := None)) (hp'(x := None))" + apply (clarsimp simp: sym_heap_def) + apply (metis option.inject) + done + +lemma sym_heap_remove_middle_from_chain: + "\sym_heap hp hp'; before \ middle; middle \ after; + hp before = Some middle; hp middle = Some after\ + \ sym_heap (hp(before := Some after, middle := None)) + (hp'(after := Some before, middle := None))" + apply (clarsimp simp: sym_heap_def) + apply (metis option.simps(1)) + done + +lemma sym_heap_connect: + "\sym_heap hp hp'; hp a = None; hp' b = None \ \ sym_heap (hp(a \ b)) (hp'(b \ a))" + by (force simp: sym_heap_def) + +lemma sym_heap_insert_into_middle_of_chain: + "\sym_heap hp hp'; hp before = Some after; hp middle = None; hp' middle = None\ + \ sym_heap (hp(before \ middle, middle \ after)) (hp'(after \ middle, middle \ before))" + apply (clarsimp simp: sym_heap_def) + apply (metis option.simps) + done + +lemma sym_heap_path_reverse: + "sym_heap hp hp' \ + heap_path hp (Some p) (p#ps) (Some p') + \ heap_path hp' (Some p') (p'#(rev ps)) (Some p)" + unfolding sym_heap_def by (induct ps arbitrary: p p' rule: rev_induct; force) + +lemma sym_heap_ls_rev_Cons: + "\sym_heap hp hp'; heap_ls hp (Some p) (p#ps)\ + \ heap_path hp' (Some (last (p#ps))) (rev ps) (Some p)" + supply rev.simps[simp del] + apply (induct ps arbitrary: p rule: rev_induct; simp add: rev.simps) + by (auto dest!: sym_heap_path_reverse[THEN iffD1]) + +lemma sym_heap_ls_rev: + "\sym_heap hp hp'; heap_ls hp (Some p) ps\ + \ heap_path hp' (Some (last ps)) (butlast (rev ps)) (Some p) + \ hp (last ps) = None" + apply (induct ps arbitrary: p rule: rev_induct, simp) + apply (frule heap_path_head; clarsimp) + by (auto dest!: sym_heap_path_reverse[THEN iffD1]) + +lemma heap_path_sym_heap_non_nil_lookup_prev: + "\heap_ls hp x (xs @ z # ys); sym_heap hp hp'; xs \ []\ \ hp' z = (Some (last xs))" + supply heap_path_append[simp del] + apply (cut_tac xs="butlast xs" and z="last xs" and ys="z # ys" + in heap_path_non_nil_lookup_next[where hp=hp and x=x and y=None]) + apply (frule append_butlast_last_id) + apply (metis append_eq_Cons_conv append_eq_append_conv2) + apply (fastforce dest: sym_heapD1) + done + +lemma heap_ls_prev_of_last: + "\t = last ls; Suc 0 < length ls; heap_ls hp st ls; sym_heap hp hp'\ + \ hp' t = Some (last (butlast ls))" + apply (cut_tac hp=hp and xs="butlast ls" and z=t and ys="[]" + in heap_path_sym_heap_non_nil_lookup_prev) + apply (prop_tac "ls \ []", fastforce) + apply fastforce + apply fastforce + apply (fastforce intro!: length_gt_1_imp_butlast_nonempty) + apply assumption + done + +lemma ptr_in_middle_prev_next: + "\heap_ls hp st (xs @ ptr # ys); xs \ []; ys \ []; sym_heap hp hp'\ + \ hp' ptr = Some (last xs) \ hp ptr = Some (hd ys)" + apply (rule conjI) + apply (fastforce dest: heap_path_sym_heap_non_nil_lookup_prev) + apply (cut_tac hp=hp in heap_path_non_nil_lookup_next) + apply fastforce + apply (cases ys; clarsimp) + done + +lemma heap_ls_no_loops: + "\heap_ls hp st xs; p \ set xs\ \ hp p \ Some p" + apply (frule heap_ls_distinct) + apply (fastforce dest: split_list heap_path_non_nil_lookup_next split: list.splits) + done + +lemma heap_ls_prev_no_loops: + "\heap_ls hp st xs; p \ set xs; sym_heap hp hp'\ \ hp' p \ Some p" + by (fastforce dest: heap_ls_no_loops sym_heapD2) + +(* more on heap_path : next/prev in path *) + +lemma heap_path_extend: + "heap_path hp st (ls @ [p]) (hp p) \ heap_path hp st ls (Some p)" + by (induct ls rule: rev_induct; simp) + +lemma heap_path_prefix_heap_ls: + "\heap_ls hp st xs; heap_path hp st ys ed\ \ ys \ xs" + apply (induct xs arbitrary: ys st, simp) + apply (case_tac ys; clarsimp) + done + +lemma distinct_decompose2: + "\distinct xs; xs = ys @ x # y # zs\ + \ x \ y \ x \ set ys \ y \ set ys \ x \ set zs \ y \ set zs" + by (simp add: in_set_conv_decomp) + +lemma heap_path_distinct_next_cases: (* the other direction needs sym_heap *) + "\heap_path hp st xs ed; distinct xs; p \ set xs; hp p = Some np\ + \ ed = Some p \ ed = Some np \ np \ set xs" + apply (cases ed; simp) + apply (frule in_list_decompose_takeWhile) + apply (subgoal_tac "heap_ls hp st (takeWhile ((\) p) xs @ p # drop (length (takeWhile ((\) p) xs) + 1) xs)") + apply (drule heap_path_non_nil_lookup_next) + apply (case_tac "drop (length (takeWhile ((\) p) xs) + 1) xs"; simp) + apply (metis in_set_dropD list.set_intros(1)) + apply simp + apply (frule in_list_decompose_takeWhile) + apply (subgoal_tac "heap_path hp st (takeWhile ((\) p) xs @ p # drop (length (takeWhile ((\) p) xs) + 1) xs) ed") + apply (frule heap_path_non_nil_lookup_next) + apply (case_tac "drop (length (takeWhile ((\) p) xs) + 1) xs", simp) + apply (simp split: if_split_asm) + apply (drule (1) distinct_decompose2) + apply clarsimp + by (metis in_set_dropD list.set_intros(1)) simp + +lemma heap_ls_next_in_list: + "\heap_ls hp st xs; p \ set xs; hp p = Some np\ + \ np \ set xs" + apply (subgoal_tac "distinct xs") + by (fastforce dest!: heap_path_distinct_next_cases) (erule heap_ls_distinct) + +lemma heap_path_distinct_sym_prev_cases: + "\heap_path hp st xs ed; distinct xs; np \ set xs; hp p = Some np; sym_heap hp hp'\ + \ st = Some np \ p \ set xs" + apply (cases st; simp) + apply (rename_tac stp) + apply (case_tac "stp = np"; simp) + apply (cases xs; simp del: heap_path.simps) + apply (frule heap_path_head, simp) + apply (cases ed, clarsimp) + apply (frule sym_heap_ls_rev_Cons, fastforce) + apply (drule heap_path_distinct_next_cases[where hp=hp']; simp add: sym_heap_def) + apply simp + apply (simp del: heap_path.simps) + apply (frule (1) sym_heap_path_reverse[where hp'=hp', THEN iffD1]) + apply simp + apply (frule heap_path_distinct_next_cases[where hp=hp']; simp add: sym_heap_def) + apply fastforce + done + +lemma heap_ls_prev_cases: + "\heap_ls hp st xs; np \ set xs; hp p = Some np; sym_heap hp hp'\ + \ st = Some np \ p \ set xs" + apply (subgoal_tac "distinct xs") + by (fastforce dest!: heap_path_distinct_sym_prev_cases) (erule heap_ls_distinct) + +lemma heap_ls_prev_not_in: + "\heap_ls hp st xs; np \ set xs; hp p = Some np\ + \ p \ set xs" + by (meson heap_ls_next_in_list) + +lemma heap_path_distinct_prev_not_in: + "\heap_path hp st xs ed; distinct xs; np \ set xs; hp p = Some np; ed \ Some np; ed \ Some p\ + \ p \ set xs" + using heap_path_distinct_next_cases + by fastforce + +lemma heap_path_distinct_next_not_in: + "\heap_path hp st xs ed; distinct xs; p \ set xs; hp p = Some np; + sym_heap hp hp'; st \ Some np\ + \ np \ set xs" + by (fastforce dest!: heap_path_distinct_sym_prev_cases[simplified]) + +lemma heap_ls_next_not_in: + "\heap_ls hp st xs; p \ set xs; hp p = Some np; sym_heap hp hp'; st \ Some np\ + \ np \ set xs" + by (fastforce dest!: heap_ls_prev_cases[simplified]) + +lemma sym_heap_prev_None_is_start: + "\heap_ls hp st xs; sym_heap hp hp'; p \ set xs; hp' p = None\ + \ Some p = st" + using split_list_last heap_path_sym_heap_non_nil_lookup_prev + by fastforce + +lemma not_last_next_not_None: + "\heap_ls hp st xs; p \ set xs; p \ last xs\ \ hp p \ None" + by (fastforce intro: heap_path_head dest: split_list) + +lemma not_head_prev_not_None: + "\heap_ls hp st xs; p \ set xs; p \ hd xs; sym_heap hp hp'\ + \ hp' p \ None" + using sym_heap_prev_None_is_start heap_path_head + by fastforce + +lemma heap_ls_neighbour_in_set: + "\heap_ls hp st xs; sym_heap hp hp'; st \ None \ hp' (the st) = None; p \ set xs\ + \ \nbr. (hp p = Some nbr \ nbr \ set xs) \ (hp' p = Some nbr \ nbr \ set xs)" + apply (intro conjI impI allI) + apply (erule (2) heap_ls_next_in_list) + apply (fastforce dest: heap_ls_prev_cases[where np=p] sym_heapD2) + done + +(* more on heap_path *) + +lemma heap_ls_next_takeWhile_append: + "\heap_ls hp st xs; p \ set xs; hp p = Some np\ + \ takeWhile ((\) np) xs = (takeWhile ((\) p) xs) @ [p]" + apply (frule heap_ls_distinct) + apply (frule in_list_decompose_takeWhile) + apply (subgoal_tac "heap_ls hp st (takeWhile ((\) p) xs @ p # drop (length (takeWhile ((\) p) xs) + 1) xs)") + prefer 2 apply simp + apply (drule heap_path_non_nil_lookup_next) + apply (case_tac "drop (length (takeWhile ((\) p) xs) + 1) xs"; simp) + apply (subgoal_tac "np \ set xs") + prefer 2 apply (erule (2) heap_ls_next_in_list) + apply (frule in_list_decompose_takeWhile[where x=np]) + apply (drule (1) distinct_inj_middle[where x=np and xa="takeWhile ((\) np) xs" and ya="takeWhile ((\) p) xs @ [p]"]) + apply simp+ + done + +(* RT FIXME: Move *) +lemma takeWhile_neq_notin_same: + "x \ set xs \ takeWhile ((\) x) xs = xs" + using takeWhile_eq_all_conv by blast + +lemma heap_path_extend_takeWhile: + "\heap_ls hp st xs; heap_path hp st (takeWhile ((\) p) xs) (Some p); hp p = Some np\ + \ heap_path hp st (takeWhile ((\) np) xs) (Some np)" + apply (case_tac "p \ set xs") + apply (subst heap_ls_next_takeWhile_append[where p=p and np=np and hp=hp]; simp) + apply (drule takeWhile_neq_notin_same, simp) + apply (drule (1) heap_path_end_unique, simp) + done + +lemma heap_ls_next_takeWhile_append_sym: + "\heap_ls hp st xs; np \ set xs; st \ Some np; hp p = Some np; sym_heap hp hp'\ + \takeWhile ((\) np) xs = (takeWhile ((\) p) xs) @ [p]" + apply (frule (3) heap_ls_prev_cases, simp) + apply (fastforce elim!: heap_ls_next_takeWhile_append) + done + +lemma heap_path_curtail_takeWhile: + "\heap_ls hp st xs; heap_path hp st (takeWhile ((\) np) xs) (Some np); + st \ Some np; hp p = Some np; sym_heap hp hp'\ + \ heap_path hp st (takeWhile ((\) p) xs) (Some p)" + apply (case_tac "np \ set xs") + apply (drule (4) heap_ls_next_takeWhile_append_sym) + apply simp + apply (drule takeWhile_neq_notin_same, simp) + apply (drule (1) heap_path_end_unique, simp) + done + +(* more on heap_path : end *) + + +\ \Lemmas relating an update to the list to an update to the heap\ + +lemma heap_ls_prepend: + "\heap_ls hp st xs; new \ set xs; xs \ []\ + \ heap_ls (hp(new := Some (hd xs))) (Some new) (new # xs)" + apply simp + apply (erule heap_path_heap_upd_not_in[rotated]) + apply (frule (1) heap_path_head) + apply fastforce + done + +lemma heap_ls_append: + "\heap_ls hp st xs; xs \ []; new \ set xs\ + \ heap_ls (hp(last xs := Some new, new := None)) st (xs @ [new])" + apply (frule heap_ls_distinct) + apply simp + apply (rule heap_path_heap_upd_not_in) + apply (fastforce simp: heap_path_last_update) + apply assumption + done + +lemma heap_ls_list_insert_before: + "\heap_ls hp st (xs @ ys); new \ set (xs @ ys); xs \ []; ys \ []\ + \ heap_ls (hp(last xs := Some new, new := Some (hd ys))) st + (list_insert_before (xs @ ys) (hd ys) new)" + apply (frule heap_ls_distinct) + apply (subst list_insert_before_distinct; fastforce?) + apply simp + apply (rule conjI) + \ \the path until new\ + apply (fastforce intro: heap_path_heap_upd_not_in heap_path_last_update) + \ \the path from hd ys\ + apply (metis disjoint_iff_not_equal heap_path_head heap_path_heap_upd_not_in last_in_set) + done + +lemma heap_ls_remove_singleton: + "heap_ls hp st [x] \ heap_ls (hp(x := None)) None []" + by simp + +lemma heap_ls_remove_head_not_singleton: + "\heap_ls hp st xs; tl xs \ []\ + \ heap_ls (hp(hd xs := None)) (Some (hd (tl xs))) (tl xs)" + apply (frule heap_ls_distinct) + apply (cases xs; simp) + apply clarsimp + apply (frule heap_path_head) + apply fastforce + apply (fastforce elim!: heap_path_heap_upd_not_in) + done + +lemma heap_ls_remove_last_not_singleton: + "\heap_ls hp st xs; butlast xs \ []\ + \ heap_ls (hp((last (butlast xs)) := None)) st (butlast xs)" + apply (frule heap_ls_distinct) + apply (frule distinct_butlast) + apply (fastforce dest: heap_path_last_update heap_path_butlast) + done + +lemma heap_ls_remove_middle: + "\heap_ls hp st (xs @ a # ys); xs \ []; ys \ []\ + \ heap_ls (hp(last xs := Some (hd ys), a := None)) st (xs @ ys)" + apply (frule heap_ls_distinct) + apply simp + apply (rule_tac x="Some (hd ys)" in exI) + apply (rule conjI) + apply (fastforce intro: heap_path_heap_upd_not_in heap_path_last_update) + apply (rule heap_path_heap_upd_not_in) + apply (rule heap_path_heap_upd_not_in) + using heap_path_head apply fastforce + apply force + apply fastforce + done + +end \ No newline at end of file diff --git a/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy b/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy index e5025e991e..64cea66294 100644 --- a/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy +++ b/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy @@ -6,7 +6,7 @@ theory Hoare_Sep_Tactics imports - Lib.NonDetMonadVCG + Monads.Nondet_VCG Sep_Algebra.Sep_Algebra_L4v begin @@ -31,10 +31,10 @@ lemma hoare_eq_post: " \ \rv s. Q rv s = G rv s; \P\ \rv s. Q rv s = G rv s; \P\ f \Q\, \E\\ \ \P\ f \G\, \E\" - by (metis (full_types) hoare_post_impErr') + by (metis (full_types) hoare_strengthen_postE) lemma hoare_eq_postE_R: " \ \rv s. Q rv s = G rv s; \P\ f \Q\, -\ \ \P\ f \G\, -" - by (metis hoare_post_imp_R) + by (metis hoare_strengthen_postE_R) ML \ val sep_select_post_method = sep_select_generic_method false [@{thm hoare_eq_post}, @@ -81,14 +81,14 @@ schematic_goal strong_sep_impl_sep_wp': "\sep_lift. (\R. \(\s. (P \* R) (sep_lift s) )\ f \\rv. (\s. (Q rv \* R) (sep_lift s))\) \ \(\s. ( P \* (?f Q R)) (sep_lift s))\ f \\rv s . R rv (sep_lift s)\" - apply (atomize) - apply (erule_tac x="(\s. \x. (Q x \* R x) s)" in allE) - apply (rule hoare_strengthen_post) - apply (assumption) - apply (sep_drule (direct) extract_all) - apply (erule_tac x=r in allE) - apply (sep_solve) -done + apply (atomize) + apply (erule_tac x="(\s. \x. (Q x \* R x) s)" in allE) + apply (erule hoare_strengthen_post) + apply (rename_tac rv s) + apply (sep_drule (direct) extract_all) + apply (erule_tac x=rv in allE) + apply (sep_solve) + done lemma strong_sep_impl_sep_wp'': "\sep_lift. @@ -344,8 +344,7 @@ done ML \ - fun J f x = f x - handle _ => x (* FIXME! exceptions *) + fun J f x = \<^try>\f x catch _ => x\ fun sep_wp thms ctxt = let diff --git a/lib/Injection_Handler.thy b/lib/Injection_Handler.thy new file mode 100644 index 0000000000..fdae11064b --- /dev/null +++ b/lib/Injection_Handler.thy @@ -0,0 +1,51 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Definition of injection_handler and supporting lemmas. *) + +theory Injection_Handler + imports Monads.Nondet_VCG +begin + +definition injection_handler :: + "('a \ 'b) \ ('s, 'a + 'c) nondet_monad \ ('s, 'b + 'c) nondet_monad" where + "injection_handler f m \ m (\ft. throwError (f ft))" + +lemma injection_wp: + "\ t = injection_handler f; \P\ m \Q\,\\ft. E (f ft)\ \ \ \P\ t m \Q\,\E\" + unfolding injection_handler_def + by wpsimp + +lemma injection_wp_E: + "\ t = injection_handler f; \P\ m \Q\,- \ \ \P\ t m \Q\,-" + by (simp add: validE_R_def injection_wp) + +lemma injection_bindE: + "\ t = injection_handler f; t2 = injection_handler f \ + \ t (x >>=E y) = (t2 x) >>=E (\rv. t (y rv))" + apply (simp add: injection_handler_def bindE_def handleE'_def bind_assoc) + apply (rule arg_cong [where f="bind x"]) + apply (fastforce simp: lift_def throwError_def split: sum.splits) + done + +lemma injection_liftE: + "t = injection_handler f \ t (liftE x) = liftE x" + by (simp add: injection_handler_def handleE'_def liftE_def) + +lemma id_injection: + "id = injection_handler id" +proof - + have P: "case_sum throwError (\v. return (Inr v)) = return" + by (auto simp: throwError_def split: sum.splits) + show ?thesis + by (auto simp: injection_handler_def handleE'_def P) +qed + +lemma injection_handler_assertE: + "injection_handler inject (assertE f) = assertE f" + by (simp add: assertE_liftE injection_liftE) + +end \ No newline at end of file diff --git a/lib/Insulin.thy b/lib/Insulin.thy index 1418b56728..aa147b319f 100644 --- a/lib/Insulin.thy +++ b/lib/Insulin.thy @@ -34,11 +34,11 @@ * * Another example (l4v libraries): * > desugar_term "\ P and Q \ f \ \r _. r \ {0..<5} \!" "\" - * NonDetMonad.validNF (P and Q) f (\r _. r \ {0\'b..<5\'b}) + * Nondet_Total.validNF (P and Q) f (\r _. r \ {0\'b..<5\'b}) * * Desugar multiple operators: * > desugar_term "\ P and Q \ f \ \r _. r \ {0..<5} \!" "\" "and" ".." - * NonDetMonad.validNF (Lib.pred_conj P Q) f + * Nondet_Monad.validNF (Lib.pred_conj P Q) f * (\r _. r \ Set_Interval.ord_class.atLeastLessThan (0\'b) (5\'b)) * * diff --git a/lib/LemmaBucket.thy b/lib/LemmaBucket.thy index b1b6312748..be6996d83d 100644 --- a/lib/LemmaBucket.thy +++ b/lib/LemmaBucket.thy @@ -11,10 +11,6 @@ imports SubMonadLib begin -lemma corres_underlying_trivial: - "\ nf' \ no_fail P' f \ \ corres_underlying Id nf nf' (=) \ P' f f" - by (auto simp add: corres_underlying_def Id_def no_fail_def) - lemma hoare_spec_gen_asm: "\ F \ s \ \P\ f \Q\ \ \ s \ \P and K F\ f \Q\" "\ F \ s \ \P\ f' \Q\,\E\ \ \ s \ \P and K F\ f' \Q\,\E\" @@ -100,10 +96,10 @@ lemmas mapM_x_accumulate_checks lemma isRight_rel_sum_comb2: "\ (f \ r) v v'; isRight v' \ \ isRight v \ r (theRight v) (theRight v')" - by (clarsimp simp: isRight_def) + by (cases v; clarsimp) lemma isRight_case_sum: "isRight x \ case_sum f g x = g (theRight x)" - by (clarsimp simp add: isRight_def) + by (cases x; clarsimp) lemma enumerate_append:"enumerate i (xs @ ys) = enumerate i xs @ enumerate (i + length xs) ys" apply (induct xs arbitrary:ys i) @@ -514,4 +510,12 @@ lemma cases_conj_strg: "A \ B \ (P \ A) \ (\ lemma and_not_not_or_imp: "(~ A & ~ B | C) = ((A | B) \ C)" by blast +lemma filter_hd_equals_tl: + "\distinct q; q \ []\ \ filter ((\) (hd q)) q = tl q" + apply (induct q rule: length_induct) + apply (rename_tac list) + apply (case_tac list; simp) + apply (fastforce simp: filter_id_conv) + done + end diff --git a/lib/LexordList.thy b/lib/LexordList.thy index f5beecc108..3dee0a8062 100644 --- a/lib/LexordList.thy +++ b/lib/LexordList.thy @@ -6,7 +6,7 @@ theory LexordList imports Main - Eisbach_Methods (* for tests *) + Eisbach_Tools.Eisbach_Methods (* for tests *) begin text \ diff --git a/lib/Lib.thy b/lib/Lib.thy index 1e1873dd8a..c658531f36 100644 --- a/lib/Lib.thy +++ b/lib/Lib.thy @@ -18,6 +18,9 @@ imports Extract_Conjunct ML_Goal Eval_Bool + None_Top_Bot + Monads.Monad_Lib + Basics.CLib NICTATools "Word_Lib.WordSetup" begin @@ -45,25 +48,6 @@ lemma Collect_eq: (* FIXME: move next to HOL.iff_allI *) lemma iff_impI: "\P \ Q = R\ \ (P \ Q) = (P \ R)" by blast -(* Long ago, I, fun_app, the verification master of darkness, unleashed an unspeakable evil -upon the world. But a foolish proof engineer wielding an input abbreviation stepped forth -to oppose me. Before the final blow was struck, I tore open a hole in a number of refinement -proofs, and flung him into a broken proof state, where my evil is law. *) - -definition - fun_app :: "('a \ 'b) \ 'a \ 'b" (infixr "$" 10) where - "f $ x \ f x" - -declare fun_app_def [iff] - -lemma fun_app_cong[fundef_cong]: - "\ f x = f' x' \ \ (f $ x) = (f' $ x')" - by simp - -lemma fun_app_apply_cong[fundef_cong]: - "f x y = f' x' y' \ (f $ x) y = (f' $ x') y'" - by simp - lemma if_apply_cong[fundef_cong]: "\ P = P'; x = x'; P' \ f x' = f' x'; \ P' \ g x' = g' x' \ \ (if P then f else g) x = (if P' then f' else g') x'" @@ -78,64 +62,37 @@ lemma prod_injects: "p = (x,y) \ x = fst p \ y = snd p" by auto -definition - pred_conj :: "('a \ bool) \ ('a \ bool) \ ('a \ bool)" (infixl "and" 35) -where - "pred_conj P Q \ \x. P x \ Q x" - -lemma pred_conj_absorb[simp]: - "(P and P) = P" - by (simp add: pred_conj_def) - -definition - pred_disj :: "('a \ bool) \ ('a \ bool) \ ('a \ bool)" (infixl "or" 30) -where - "pred_disj P Q \ \x. P x \ Q x" - -lemma pred_disj_absorb[simp]: - "(P or P) = P" - by (simp add: pred_disj_def) - -definition - pred_neg :: "('a \ bool) \ ('a \ bool)" ("not _" [40] 40) -where - "pred_neg P \ \x. \ P x" - -lemma pred_neg_simp[simp]: - "(not P) s \ \ (P s)" - by (simp add: pred_neg_def) - -definition "K \ \x y. x" - -definition - zipWith :: "('a \ 'b \ 'c) \ 'a list \ 'b list \ 'c list" where - "zipWith f xs ys \ map (case_prod f) (zip xs ys)" - primrec delete :: "'a \ 'a list \ 'a list" where "delete y [] = []" | "delete y (x#xs) = (if y=x then xs else x # delete y xs)" -definition - "swp f \ \x y. f y x" +(* Nicer names for sum discriminators and selectors *) -lemma swp_apply[simp]: "swp f y x = f x y" - by (simp add: swp_def) +abbreviation theLeft :: "'a + 'b \ 'a" where + "theLeft \ projl" -primrec (nonexhaustive) - theRight :: "'a + 'b \ 'b" where - "theRight (Inr x) = x" +lemmas theLeft_simps = sum.sel(1) -primrec (nonexhaustive) - theLeft :: "'a + 'b \ 'a" where - "theLeft (Inl x) = x" +abbreviation theRight :: "'a + 'b \ 'b" where + "theRight \ projr" -definition - "isLeft x \ (\y. x = Inl y)" +lemmas theRight_simps = sum.sel(2) -definition - "isRight x \ (\y. x = Inr y)" +abbreviation isLeft :: "'a + 'b \ bool" where + "isLeft \ isl" + +lemmas isLeft_def = isl_def + +(* When you feel the urge to match something like "?P (isRight ?x)", consider using + "?P (isLeft ?x)" instead, because there is no primitive "isr", just "\isl _". *) +abbreviation isRight :: "'a + 'b \ bool" where + "isRight \ \x. \ isl x" + +lemma isRight_def: + "isRight x = (\y. x = Inr y)" + by (cases x; simp) definition "const x \ \y. x" @@ -238,24 +195,11 @@ lemma delete_remove1: lemma ignore_if: "(y and z) s \ (if x then y else z) s" - by (clarsimp simp: pred_conj_def) - -lemma zipWith_Nil2 : - "zipWith f xs [] = []" - unfolding zipWith_def by simp + by simp lemma isRight_right_map: "isRight (case_sum Inl (Inr o f) v) = isRight v" - by (simp add: isRight_def split: sum.split) - -lemma zipWith_nth: - "\ n < min (length xs) (length ys) \ \ zipWith f xs ys ! n = f (xs ! n) (ys ! n)" - unfolding zipWith_def by simp - -lemma length_zipWith [simp]: - "length (zipWith f xs ys) = min (length xs) (length ys)" - unfolding zipWith_def by simp - + by (simp split: sum.split) lemma first_in_uptoD: "a \ b \ (a::'a::order) \ {a..b}" @@ -279,10 +223,6 @@ lemma in_empty_interE: "\ A \ B = {}; x \ A; x \ B \ \ False" by blast -lemma None_upd_eq: - "g x = None \ g(x := None) = g" - by (rule ext) simp - lemma exx [iff]: "\x. x" by blast lemma ExNot [iff]: "Ex Not" by blast @@ -446,6 +386,10 @@ lemma allEI: shows "\x. Q x" using assms by (rule all_forward) +lemma bexEI: + "\\x\S. Q x; \x. \x \ S; Q x\ \ P x\ \ \x\S. P x" + by blast + text \General lemmas that should be in the library\ lemma dom_ran: @@ -636,11 +580,6 @@ lemma min_of_mono': unfolding min_def by (subst if_distrib [where f = f, symmetric], rule arg_cong [where f = f], rule if_cong [OF _ refl refl]) fact+ -lemma nat_diff_less: - fixes x :: nat - shows "\ x < y + z; z \ x\ \ x - z < y" - using less_diff_conv2 by blast - lemma take_map_Not: "(take n (map Not xs) = take n xs) = (n = 0 \ xs = [])" by (cases n; simp) (cases xs; simp) @@ -806,14 +745,6 @@ lemma map_conv_upd: "m v = None \ m o (f (x := v)) = (m o f) (x := None)" by (rule ext) (clarsimp simp: o_def) -lemma sum_all_ex [simp]: - "(\a. x \ Inl a) = (\a. x = Inr a)" - "(\a. x \ Inr a) = (\a. x = Inl a)" - by (metis Inr_not_Inl sum.exhaust)+ - -lemma split_distrib: "case_prod (\a b. T (f a b)) = (\x. T (case_prod (\a b. f a b) x))" - by (clarsimp simp: split_def) - lemma case_sum_triv [simp]: "(case x of Inl x \ Inl x | Inr x \ Inr x) = x" by (clarsimp split: sum.splits) @@ -1169,10 +1100,6 @@ proof (rule set_eqI) qed -lemma cart_singleton_image: - "S \ {s} = (\v. (v, s)) ` S" - by auto - lemma singleton_eq_o2s: "({x} = set_option v) = (v = Some x)" by (cases v, auto) @@ -1472,16 +1399,6 @@ lemma foldl_fun_upd: "foldl (\s r. s (r := g r)) f rs = (\x. if x \ set rs then g x else f x)" by (induct rs arbitrary: f) (auto simp: fun_eq_iff) -lemma all_rv_choice_fn_eq_pred: - "\ \rv. P rv \ \fn. f rv = g fn \ \ \fn. \rv. P rv \ f rv = g (fn rv)" - apply (rule_tac x="\rv. SOME h. f rv = g h" in exI) - apply (clarsimp split: if_split) - by (meson someI_ex) - -lemma ex_const_function: - "\f. \s. f (f' s) = v" - by force - lemma if_Const_helper: "If P (Con x) (Con y) = Con (If P x y)" by (simp split: if_split) @@ -1555,6 +1472,10 @@ lemma list_case_If: "(case xs of [] \ P | _ \ Q) = (if xs = [] then P else Q)" by (rule list.case_eq_if) +lemma lifted_if_collapse: + "(if P then \ else f) = (\s. \P \ f s)" + by auto + lemma remove1_Nil_in_set: "\ remove1 x xs = []; xs \ [] \ \ x \ set xs" by (induct xs) (auto split: if_split_asm) @@ -1992,6 +1913,37 @@ lemma length_takeWhile_gt: apply simp done +lemma map_fst_filter_zip[simp]: + "map fst (filter (\(_, x). P x) (zip ls (map f ls))) = filter (\x. P (f x)) ls" + by (induction ls; clarsimp) + +lemma map_fst_dropWhile_zip[simp]: + "map fst (dropWhile (\(_, x). P x) (zip ls (map f ls))) = dropWhile (\x. P (f x)) ls" + by (induction ls; clarsimp) + +lemma map_fst_takeWhile_zip[simp]: + "map fst (takeWhile (\(_, x). P x) (zip ls (map f ls))) = takeWhile (\x. P (f x)) ls" + by (induction ls; clarsimp) + +lemma suffix_tl: + "suffix xs ys \ suffix (tl xs) ys" + apply (clarsimp simp: suffix_def) + apply (metis append.assoc append_Cons append_Nil list.exhaust_sel list.sel(2)) + done + +lemma nonempty_proper_suffix_split: + "\sfx \ []; sfx \ queue; suffix sfx queue\ \ (\xs ys. xs \ [] \ queue = xs @ hd sfx # ys)" + apply (clarsimp simp: suffix_def) + apply (fastforce simp: neq_Nil_conv) + done + +lemma nonempty_proper_suffix_split_distinct: + "\sfx \ []; sfx \ queue; suffix sfx queue; distinct queue\ + \ \xs ys. xs \ [] \ queue = xs @ hd sfx # ys \ hd queue \ hd sfx" + apply (frule (2) nonempty_proper_suffix_split) + apply (cases sfx; fastforce) + done + lemma hd_drop_conv_nth2: "n < length xs \ hd (drop n xs) = xs ! n" by (rule hd_drop_conv_nth) clarsimp @@ -2111,6 +2063,22 @@ lemma filter_eq_If: "distinct xs \ filter (\v. v = x) xs = (if x \ set xs then [x] else [])" by (induct xs) auto +lemma filter_last_equals_butlast: + "\distinct q; q \ []\ \ filter ((\) (last q)) q = butlast q" + apply (induct q rule: length_induct) + apply (rename_tac list) + apply (case_tac list; simp) + apply (fastforce simp: filter_id_conv) + done + +lemma filter_middle_distinct: + "\distinct q; q = ys @ t # zs\ \ filter ((\) t) q = ys @ zs" + apply (induct q rule: length_induct) + apply (rename_tac list) + apply (case_tac list; simp) + apply (subst filter_True | fastforce)+ + done + lemma (in semigroup_add) foldl_assoc: shows "foldl (+) (x+y) zs = x + (foldl (+) y zs)" by (induct zs arbitrary: y) (simp_all add:add.assoc) @@ -2119,23 +2087,6 @@ lemma (in monoid_add) foldl_absorb0: shows "x + (foldl (+) 0 zs) = foldl (+) x zs" by (induct zs) (simp_all add:foldl_assoc) -lemma foldl_conv_concat: - "foldl (@) xs xss = xs @ concat xss" -proof (induct xss arbitrary: xs) - case Nil show ?case by simp -next - interpret monoid_add "(@)" "[]" proof qed simp_all - case Cons then show ?case by (simp add: foldl_absorb0) -qed - -lemma foldl_concat_concat: - "foldl (@) [] (xs @ ys) = foldl (@) [] xs @ foldl (@) [] ys" - by (simp add: foldl_conv_concat) - -lemma foldl_does_nothing: - "\ \x. x \ set xs \ f s x = s \ \ foldl f s xs = s" - by (induct xs) auto - lemma foldl_use_filter: "\ \v x. \ \ g x; x \ set xs \ \ f v x = v \ \ foldl f v xs = foldl f v (filter g xs)" by (induct xs arbitrary: v) auto @@ -2158,14 +2109,6 @@ lemma case_option_over_if: = (if G then Q v else P)" by (simp split: if_split)+ -lemma map_length_cong: - "\ length xs = length ys; \x y. (x, y) \ set (zip xs ys) \ f x = g y \ - \ map f xs = map g ys" - apply atomize - apply (erule rev_mp, erule list_induct2) - apply auto - done - lemma take_min_len: "take (min (length xs) n) xs = take n xs" by (simp add: min_def) @@ -2257,16 +2200,9 @@ lemma zip_take_triv: apply (case_tac as; simp) done -lemma zip_take_triv2: - "length as \ n \ zip as (take n bs) = zip as bs" - apply (induct as arbitrary: n bs; simp) - apply (case_tac n; simp) - apply (case_tac bs; simp) - done - lemma zip_take_length: "zip xs (take (length xs) ys) = zip xs ys" - by (metis order_refl zip_take_triv2) + by (metis length_take length_zip nat_le_linear take_all take_zip) lemma zip_singleton: "ys \ [] \ zip [a] ys = [(a, ys ! 0)]" @@ -2348,7 +2284,7 @@ lemma map_of_zip_is_index: lemma map_of_zip_take_update: "\i < length xs; length xs \ length ys; distinct xs\ - \ map_of (zip (take i xs) ys)(xs ! i \ (ys ! i)) = map_of (zip (take (Suc i) xs) ys)" + \ (map_of (zip (take i xs) ys)) (xs ! i \ ys ! i) = map_of (zip (take (Suc i) xs) ys)" apply (rule ext, rename_tac x) apply (case_tac "x=xs ! i"; clarsimp) apply (rule map_of_is_SomeI[symmetric]) @@ -2429,6 +2365,10 @@ lemma bspec_split: "\ \(a, b) \ S. P a b; (a, b) \ S \ \ P a b" by fastforce +lemma all_eq_trans: + "\ \x. P x = Q x; \x. Q x = R x \ \ \x. P x = R x" + by simp + lemma set_zip_same: "set (zip xs xs) = Id \ (set xs \ set xs)" by (induct xs) auto @@ -2593,8 +2533,18 @@ next by (simp only: pv) (erule not_prefix_cases, auto intro: r1 r2 ih) qed -lemma rsubst: - "\ P s; s = t \ \ P t" +lemmas rsubst = back_subst[where a=s and b=t for s t] + +lemma rsubst2: + "\P a b; a = s; b = t\ \ P s t" + by simp + +lemma rsubst3: + "\P a b c ; a = s; b = t; c = u\ \ P s t u" + by simp + +lemma rsubst4: + "\P a b c d; a = s; b = t; c = u; d = v\ \ P s t u v" by simp lemma ex_impE: "((\x. P x) \ Q) \ P x \ Q" @@ -2604,38 +2554,6 @@ lemma option_Some_value_independent: "\ f x = Some v; \v'. f x = Some v' \ f y = Some v' \ \ f y = Some v" by blast -text \Some int bitwise lemmas. Helpers for proofs about \<^file>\NatBitwise.thy\\ -lemma int_2p_eq_shiftl: - "(2::int)^x = 1 << x" - by (simp add: shiftl_int_def) - -lemma nat_int_mul: - "nat (int a * b) = a * nat b" - by (simp add: nat_mult_distrib) - -lemma int_shiftl_less_cancel: - "n \ m \ ((x :: int) << n < y << m) = (x < y << (m - n))" - apply (drule le_Suc_ex) - apply (clarsimp simp: shiftl_int_def power_add) - done - -lemma int_shiftl_lt_2p_bits: - "0 \ (x::int) \ x < 1 << n \ \i \ n. \ x !! i" - apply (clarsimp simp: shiftl_int_def) - by (metis bit_take_bit_iff not_less take_bit_int_eq_self_iff) -\ \TODO: The converse should be true as well, but seems hard to prove.\ - -lemmas int_eq_test_bit = bin_eq_iff -lemmas int_eq_test_bitI = int_eq_test_bit[THEN iffD2, rule_format] - -lemma le_nat_shrink_left: - "y \ z \ y = Suc x \ x < z" - by simp - -lemma length_ge_split: - "n < length xs \ \x xs'. xs = x # xs' \ n \ length xs'" - by (cases xs) auto - text \Support for defining enumerations on datatypes derived from enumerations\ lemma distinct_map_enum: "\ (\ x y. (F x = F y \ x = y )) \ @@ -2661,12 +2579,58 @@ lemma if_option_None_eq: "((if P then Some x else None) = None) = (\P)" by simp+ -lemmas if_option = if_option_None_eq if_option_Some if_option_Some2 +lemma option_case_all_conv: + "(case x of None \ True | Some v \ P v) = (\v. x = Some v \ P v)" + by (auto split: option.split) + +lemma prod_o_comp: + "(case x of (a, b) \ f a b) \ g = (case x of (a, b) \ f a b \ g)" + by (auto simp: split_def) + +lemma lhs_sym_eq: + "(a = b) = x \ (b = a) = x" + by auto + +(* if_option is not [simp], because it can add x = y equations to the premises of the goal, which + get picked up by the simplifier and may lead to non-termination. *) +lemmas if_option = + if_option_Some + if_option_Some[unfolded lhs_sym_eq] + if_option_Some2 + if_option_Some2[unfolded lhs_sym_eq] + +(* if_option_eq should be safer, but is still not [simp], because P can be an equation, which can + lead to non-termination. *) +lemmas if_option_eq = + if_option_None_eq + if_option_None_eq[unfolded lhs_sym_eq] + if_option_Some_eq + if_option_Some_eq[unfolded lhs_sym_eq] lemma not_in_ran_None_upd: "x \ ran m \ x \ ran (m(y := None))" by (auto simp: ran_def split: if_split) +lemma ranD: + "v \ ran f \ \x. f x = Some v" + by (auto simp: ran_def) + +lemma fun_upd_swap: + "a \ c \ hp(c := d, a := b) = hp(a := b, c := d)" + by fastforce + +lemma list_not_head: + "Suc 0 < length ls \ ls \ [hd ls]" + by (cases ls; clarsimp) + +lemma list_not_last: + "Suc 0 < length ls \ ls \ [last ls]" + by (cases ls; clarsimp) + +lemma length_tail_nonempty: + "Suc 0 < length list \ tl list \ []" + by (cases list, simp, simp) + text \Prevent clarsimp and others from creating Some from not None by folding this and unfolding again when safe.\ diff --git a/lib/ListLibLemmas.thy b/lib/ListLibLemmas.thy index bdb72d61f8..d1d1eec73b 100644 --- a/lib/ListLibLemmas.thy +++ b/lib/ListLibLemmas.thy @@ -371,6 +371,46 @@ lemma list_insert_after_after: apply fastforce done +lemma list_insert_before_not_found: + "a \ set ls \ list_insert_before ls a new = ls" + by (induct ls; fastforce) + +lemma list_insert_before_nonempty: + "ls \ [] \ list_insert_before ls a new \ []" + by (induct ls; clarsimp) + +lemma list_insert_before_head: + "xs \ [] \ list_insert_before xs (hd xs) new = new # xs" + by (induct xs; fastforce) + +lemma last_of_list_insert_before: + "xs \ [] \ last (list_insert_before xs a new) = last xs" + by (induct xs; clarsimp simp: list_insert_before_nonempty) + +lemma list_insert_before_distinct: + "\distinct (xs @ ys); ys \ []\ \ list_insert_before (xs @ ys) (hd ys) new = xs @ new # ys" + by (induct xs; fastforce simp add: list_insert_before_head) + +lemma set_list_insert_before: + "\new \ set ls; before \ set ls\ \ set (list_insert_before ls before new) = set ls \ {new}" + apply (induct ls; clarsimp) + apply fastforce + done + +lemma takeWhile_dropWhile_insert_list_before: + "\distinct ls; sorted (map f ls); sfx \ []; suffix sfx ls; + \pfx v. pfx @ sfx = ls \ v \ set pfx \ f v \ f new; f new < f (hd sfx)\ \ + map fst (takeWhile (\(_, t). t \ f new) (zip ls (map f ls))) + @ new + # map fst (dropWhile (\(_, t). t \ f new) (zip ls (map f ls))) + = list_insert_before ls (hd sfx) new" + apply (cases sfx; simp) + apply (simp add: suffix_def map_fst_takeWhile_zip map_fst_dropWhile_zip) + apply (elim exE, rename_tac xs) + apply (cut_tac xs=xs and ys="a # list" and new=new in list_insert_before_distinct) + apply auto + done + lemma list_remove_removed: "set (list_remove list x) = (set list) - {x}" apply (induct list,simp+) @@ -397,6 +437,16 @@ lemma list_remove_none: "x \ set list \ list_remove list apply clarsimp+ done +lemma list_remove_append[simp]: + "list_remove (xs @ ys) t = list_remove xs t @ list_remove ys t" + by (induct xs) auto + +lemma list_remove_middle_distinct: + "\distinct q; q = ys @ t # zs\ \ list_remove q t = ys @ zs" + apply (induct q rule: length_induct) + apply (simp add: list_remove_none) + done + lemma replace_distinct: "x \ set list \ distinct list \ distinct (list_replace list y x)" apply (induct list) apply (simp add: list_replace_def)+ @@ -1091,4 +1141,28 @@ lemma map2_append1: "map2 f (as @ bs) cs = map2 f as (take (length as) cs) @ map2 f bs (drop (length as) cs)" by (simp add: map_def zip_append1) +lemma sorted_lastD: + "\ sorted xs; xs \ [] \ \ \x\set xs. x \ last xs" + by (induct xs, auto) + +lemma sorted_last_leD: + "\ sorted xs; y \ last xs ; xs \ [] \ \ \x\set xs. x \ y" + by (fastforce dest: sorted_lastD) + +lemma length_gt_1_imp_butlast_nonempty: + "Suc 0 < length xs \ butlast xs \ []" + by (cases xs; clarsimp) + +lemma not_last_in_set_butlast: + "\ptr \ set ls; ptr \ last ls\ \ ptr \ set (butlast ls)" + apply (induct ls; simp) + apply fastforce + done + +lemma distinct_in_butlast_not_last: + "\distinct ls; x \ set (butlast ls)\ \ x \ last ls" + apply (induct ls; simp) + apply force + done + end diff --git a/lib/List_Lib.thy b/lib/List_Lib.thy index 936f86e6e6..e6471707cb 100644 --- a/lib/List_Lib.thy +++ b/lib/List_Lib.thy @@ -26,6 +26,12 @@ primrec list_insert_after :: "'a list \ 'a \ 'a \ \Inserts the value new immediately before the first occurence of a (if any) in the list\ +primrec list_insert_before :: "'a list \ 'a \ 'a \ 'a list" where + "list_insert_before [] a new = []" | + "list_insert_before (x # xs) a new = (if x = a then new # x # xs + else x # list_insert_before xs a new)" + primrec list_remove :: "'a list \ 'a \ 'a list" where "list_remove [] a = []" | "list_remove (x # xs) a = (if x = a then (list_remove xs a) diff --git a/lib/ML_Goal.thy b/lib/ML_Goal.thy index 3c9f499177..2121edb5d7 100644 --- a/lib/ML_Goal.thy +++ b/lib/ML_Goal.thy @@ -109,7 +109,7 @@ fun begin_proof ((name, attrs): Attrib.binding, ml_block: Input.source) ctxt = val ((res_name, res), ctxt') = Local_Theory.note (binding, thms) ctxt; val _ = - Proof_Display.print_results true start_pos ctxt' + Proof_Display.print_results { interactive = true, pos = start_pos, proof_state = true } ctxt' (("theorem", res_name), [("", res)]) in ctxt' end in diff --git a/lib/ML_Goal_Test.thy b/lib/ML_Goal_Test.thy index 989ff7f684..925b55d0ba 100644 --- a/lib/ML_Goal_Test.thy +++ b/lib/ML_Goal_Test.thy @@ -7,7 +7,7 @@ theory ML_Goal_Test imports ML_Goal - MLUtils + ML_Utils.ML_Utils begin experiment begin diff --git a/lib/ml-helpers/ListExtras.ML b/lib/ML_Utils/ListExtras.ML similarity index 100% rename from lib/ml-helpers/ListExtras.ML rename to lib/ML_Utils/ListExtras.ML diff --git a/lib/ml-helpers/MLUtils.thy b/lib/ML_Utils/ML_Utils.thy similarity index 88% rename from lib/ml-helpers/MLUtils.thy rename to lib/ML_Utils/ML_Utils.thy index c4dec527a0..34541723d6 100644 --- a/lib/ml-helpers/MLUtils.thy +++ b/lib/ML_Utils/ML_Utils.thy @@ -5,7 +5,7 @@ *) \\ - MLUtils is a collection of 'basic' ML utilities (kind of like @{file + ML_Utils is a collection of 'basic' ML utilities (kind of like @{file "~~/src/Pure/library.ML"}, but maintained by Trustworthy Systems). If you find yourself implementing: - A simple data-structure-shuffling task, @@ -16,7 +16,7 @@ consider adding it here. \ -theory MLUtils +theory ML_Utils imports Main begin ML_file "StringExtras.ML" diff --git a/lib/ml-helpers/MethodExtras.ML b/lib/ML_Utils/MethodExtras.ML similarity index 100% rename from lib/ml-helpers/MethodExtras.ML rename to lib/ML_Utils/MethodExtras.ML diff --git a/lib/ml-helpers/MkTermAntiquote.thy b/lib/ML_Utils/MkTermAntiquote.thy similarity index 100% rename from lib/ml-helpers/MkTermAntiquote.thy rename to lib/ML_Utils/MkTermAntiquote.thy diff --git a/lib/ml-helpers/MkTermAntiquote_Tests.thy b/lib/ML_Utils/MkTermAntiquote_Tests.thy similarity index 96% rename from lib/ml-helpers/MkTermAntiquote_Tests.thy rename to lib/ML_Utils/MkTermAntiquote_Tests.thy index 72cdb9a17a..48f09578f1 100644 --- a/lib/ml-helpers/MkTermAntiquote_Tests.thy +++ b/lib/ML_Utils/MkTermAntiquote_Tests.thy @@ -9,8 +9,8 @@ text \ \ theory MkTermAntiquote_Tests imports - Lib.MkTermAntiquote - Main + MkTermAntiquote + Main (* MkTermAntiquote imports only Pure *) begin text \ diff --git a/lib/ml-helpers/OptionExtras.ML b/lib/ML_Utils/OptionExtras.ML similarity index 100% rename from lib/ml-helpers/OptionExtras.ML rename to lib/ML_Utils/OptionExtras.ML diff --git a/lib/ML_Utils/README.md b/lib/ML_Utils/README.md new file mode 100644 index 0000000000..d29e6f7f73 --- /dev/null +++ b/lib/ML_Utils/README.md @@ -0,0 +1,15 @@ + + +# Isabelle/ML Utilities + +`ML_Utils` is a collection of 'basic' ML utilities (similar to +`~~/src/Pure/library.ML`}). If you find yourself implementing: + +- a simple data-structure-shuffling task, +- something that shows up in the standard library of other functional languages, or +- something that's "missing" from the general pattern of an Isabelle ML library, + +consider adding it here. diff --git a/lib/ML_Utils/ROOT b/lib/ML_Utils/ROOT new file mode 100644 index 0000000000..9ea3ec85b1 --- /dev/null +++ b/lib/ML_Utils/ROOT @@ -0,0 +1,18 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +chapter Lib + +session ML_Utils (lib) = HOL + + theories + ML_Utils + MkTermAntiquote + TermPatternAntiquote + TacticAntiquotation + TacticTutorial + MkTermAntiquote_Tests + TacticAntiquotation_Test + TermPatternAntiquote_Tests diff --git a/lib/ml-helpers/StringExtras.ML b/lib/ML_Utils/StringExtras.ML similarity index 100% rename from lib/ml-helpers/StringExtras.ML rename to lib/ML_Utils/StringExtras.ML diff --git a/lib/ml-helpers/Sum.ML b/lib/ML_Utils/Sum.ML similarity index 100% rename from lib/ml-helpers/Sum.ML rename to lib/ML_Utils/Sum.ML diff --git a/lib/ml-helpers/TacticAntiquotation.thy b/lib/ML_Utils/TacticAntiquotation.thy similarity index 100% rename from lib/ml-helpers/TacticAntiquotation.thy rename to lib/ML_Utils/TacticAntiquotation.thy diff --git a/lib/ml-helpers/TacticAntiquotation_Test.thy b/lib/ML_Utils/TacticAntiquotation_Test.thy similarity index 98% rename from lib/ml-helpers/TacticAntiquotation_Test.thy rename to lib/ML_Utils/TacticAntiquotation_Test.thy index 2d7c0c5b57..56cce273a0 100644 --- a/lib/ml-helpers/TacticAntiquotation_Test.thy +++ b/lib/ML_Utils/TacticAntiquotation_Test.thy @@ -8,7 +8,7 @@ theory TacticAntiquotation_Test imports - Lib.TacticAntiquotation + TacticAntiquotation begin text \Simple tests.\ diff --git a/lib/ml-helpers/TacticTutorial.thy b/lib/ML_Utils/TacticTutorial.thy similarity index 100% rename from lib/ml-helpers/TacticTutorial.thy rename to lib/ML_Utils/TacticTutorial.thy diff --git a/lib/ml-helpers/TermExtras.ML b/lib/ML_Utils/TermExtras.ML similarity index 100% rename from lib/ml-helpers/TermExtras.ML rename to lib/ML_Utils/TermExtras.ML diff --git a/lib/ml-helpers/TermPatternAntiquote.thy b/lib/ML_Utils/TermPatternAntiquote.thy similarity index 100% rename from lib/ml-helpers/TermPatternAntiquote.thy rename to lib/ML_Utils/TermPatternAntiquote.thy diff --git a/lib/ml-helpers/TermPatternAntiquote_Tests.thy b/lib/ML_Utils/TermPatternAntiquote_Tests.thy similarity index 96% rename from lib/ml-helpers/TermPatternAntiquote_Tests.thy rename to lib/ML_Utils/TermPatternAntiquote_Tests.thy index b1678ef288..9fe4b41f8a 100644 --- a/lib/ml-helpers/TermPatternAntiquote_Tests.thy +++ b/lib/ML_Utils/TermPatternAntiquote_Tests.thy @@ -6,8 +6,8 @@ theory TermPatternAntiquote_Tests imports - Lib.TermPatternAntiquote - Main + TermPatternAntiquote + Main (* TermPatternAntiquote imports only Pure *) begin text \ diff --git a/lib/ml-helpers/ThmExtras.ML b/lib/ML_Utils/ThmExtras.ML similarity index 100% rename from lib/ml-helpers/ThmExtras.ML rename to lib/ML_Utils/ThmExtras.ML diff --git a/lib/ml-helpers/mkterm_antiquote.ML b/lib/ML_Utils/mkterm_antiquote.ML similarity index 100% rename from lib/ml-helpers/mkterm_antiquote.ML rename to lib/ML_Utils/mkterm_antiquote.ML diff --git a/lib/ML_Utils/tests.xml b/lib/ML_Utils/tests.xml new file mode 100644 index 0000000000..dc8ed539b5 --- /dev/null +++ b/lib/ML_Utils/tests.xml @@ -0,0 +1,22 @@ + + + + + + + + ../../isabelle/bin/isabelle build -v -d ../.. ML_Utils + + + diff --git a/lib/Match_Abbreviation.thy b/lib/Match_Abbreviation.thy index a664d21e3e..83bb205bbf 100644 --- a/lib/Match_Abbreviation.thy +++ b/lib/Match_Abbreviation.thy @@ -138,12 +138,12 @@ fun do_adjust ctxt ((("select", []), [p]), fixes) t = let | do_adjust _ args _ = error ("do_adjust: unexpected: " ^ @{make_string} args) fun unvarify_types_same ty = ty - |> Term_Subst.map_atypsT_same + |> Term.map_atyps_same (fn TVar ((a, i), S) => TFree (a ^ "_var_" ^ string_of_int i, S) | _ => raise Same.SAME) fun unvarify_types tm = tm - |> Same.commit (Term_Subst.map_types_same unvarify_types_same) + |> Same.commit (Term.map_types_same unvarify_types_same) fun match_abbreviation mode name init adjusts int ctxt = let val init_term = init ctxt diff --git a/lib/Monad_Commute.thy b/lib/Monad_Commute.thy new file mode 100644 index 0000000000..a5bbf317bf --- /dev/null +++ b/lib/Monad_Commute.thy @@ -0,0 +1,198 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* The monad_commute predicate + lemmas. *) + +theory Monad_Commute + imports + Monads.Nondet_Monad_Equations + Monad_Lists (* for mapM_x *) +begin + + +definition monad_commute where + "monad_commute P a b \ + \s. (P s \ ((do x\a;y\b; return (x, y) od) s) = ((do y\b;x\a; return (x, y) od) s))" + + +lemma monad_eq: + "a s = b s \ (a >>= g) s = (b >>= g) s" + by (auto simp: bind_def) + +lemma monad_commute_simple: + "\ monad_commute P a b; P s \ \ (do x\a;y\b; g x y od) s = (do y\b;x\a; g x y od) s" + apply (clarsimp simp: monad_commute_def) + apply (drule spec) + apply (erule(1) impE) + apply (drule_tac g = "(\t. g (fst t) (snd t))" in monad_eq) + apply (simp add: bind_assoc) + done + +lemma monad_commute_simple': + "monad_commute \ a b \ (do x \ a; y \ b; g x y od) = (do y \ b; x \ a; g x y od)" + apply (clarsimp simp: monad_commute_def) + apply (fastforce simp: bind_def' return_def) + done + +lemma commute_commute: + "monad_commute P f h \ monad_commute P h f" + apply (simp (no_asm) add: monad_commute_def) + apply (clarsimp) + apply (erule monad_commute_simple[symmetric]) + apply simp + done + +lemma assert_commute: "monad_commute (K G) (assert G) f" + by (clarsimp simp: assert_def monad_commute_def) + +lemma cond_fail_commute: "monad_commute (K (\G)) (when G fail) f" + by (clarsimp simp: when_def fail_def monad_commute_def) + +lemma return_commute: "monad_commute \ (return a) f" + by (clarsimp simp: return_def bind_def monad_commute_def) + +lemma monad_commute_guard_imp: + "\monad_commute P f h; \s. Q s \ P s \ \ monad_commute Q f h" + by (clarsimp simp: monad_commute_def) + +lemma monad_commute_split: + "\\r. monad_commute (Q r) f (g r); monad_commute P f h; \P'\ h \\r. Q r\\ + \ monad_commute (P and P') f (h>>=g)" + apply (simp (no_asm) add: monad_commute_def) + apply (clarsimp simp: bind_assoc) + apply (subst monad_commute_simple) + apply simp+ + apply (rule_tac Q = "(\x. Q x)" in monad_eq_split) + apply (subst monad_commute_simple[where a = f]) + apply assumption + apply simp+ + done + +lemma monad_commute_get: + assumes hf: "\P. \P\ f \\r. P\" + and hg: "\P. \P\ g \\r. P\" + and eptyf: "empty_fail f" "empty_fail g" + shows "monad_commute \ f g" +proof - + have fsame: "\a b s. (a,b) \ fst (f s) \ b = s" + by (drule use_valid[OF _ hf],auto) + have gsame: "\a b s. (a,b) \ fst (g s) \ b = s" + by (drule use_valid[OF _ hg],auto) + note ef = empty_fail_not_snd[OF _ eptyf(1)] + note eg = empty_fail_not_snd[OF _ eptyf(2)] + show ?thesis + apply (simp add: monad_commute_def) + apply (clarsimp simp: bind_def split_def return_def) + apply (intro conjI) + apply (rule set_eqI) + apply (rule iffI) + apply (clarsimp simp:Union_eq) + apply (frule fsame) + apply clarsimp + apply (frule gsame) + apply (metis fst_conv snd_conv) + apply (clarsimp simp:Union_eq) + apply (frule gsame) + apply clarsimp + apply (frule fsame) + apply clarsimp + apply (metis fst_conv snd_conv) + apply (rule iffI) + apply (erule disjE) + apply (clarsimp simp: image_def) + apply (metis fsame) + apply (clarsimp simp: image_def) + apply (drule eg) + apply clarsimp + apply (rule bexI [rotated], assumption) + apply (frule gsame) + apply clarsimp + apply (erule disjE) + apply (clarsimp simp: image_def dest!: gsame) + apply (clarsimp simp: image_def) + apply (drule ef) + apply clarsimp + apply (frule fsame) + apply (erule bexI[rotated]) + apply simp + done +qed + +lemma mapM_x_commute: + assumes commute: "\r. monad_commute (P r) a (b r)" + and single: "\r x. \P r and K (f r \ f x) and P x\ b x \\v. P r \" + shows "monad_commute (\s. (distinct (map f list)) \ (\r\ set list. P r s)) a (mapM_x b list)" + apply (induct list) + apply (clarsimp simp: mapM_x_Nil return_def bind_def monad_commute_def) + apply (clarsimp simp: mapM_x_Cons) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split) + apply assumption + apply (rule monad_commute_guard_imp[OF commute]) + apply assumption + apply (wp hoare_vcg_const_Ball_lift) + apply (rule single) + apply (clarsimp simp: image_def) + apply auto + done + +lemma commute_name_pre_state: + assumes "\s. P s \ monad_commute ((=) s) f g" + shows "monad_commute P f g" + using assms + by (clarsimp simp: monad_commute_def) + +lemma commute_rewrite: + assumes rewrite: "\s. Q s \ f s = t s" + assumes hold: "\P\ g \\x. Q\" + shows "monad_commute R t g \ monad_commute (P and Q and R) f g" + apply (clarsimp simp: monad_commute_def bind_def split_def return_def) + apply (drule_tac x = s in spec) + apply (clarsimp simp: rewrite[symmetric]) + apply (intro conjI) + apply (rule set_eqI) + apply (rule iffI) + apply clarsimp + apply (rule bexI[rotated],assumption) + apply (subst rewrite) + apply (rule use_valid[OF _ hold]) + apply simp+ + apply (erule bexI[rotated],simp) + apply clarsimp + apply (rule bexI[rotated],assumption) + apply (subst rewrite[symmetric]) + apply (rule use_valid[OF _ hold]) + apply simp+ + apply (erule bexI[rotated],simp) + apply (intro iffI) + apply clarsimp + apply (rule bexI[rotated],assumption) + apply simp + apply (subst rewrite) + apply (erule(1) use_valid[OF _ hold]) + apply simp + apply (clarsimp) + apply (drule bspec,assumption) + apply clarsimp + apply (metis rewrite use_valid[OF _ hold]) + done + +lemma commute_grab_asm: + "(F \ monad_commute P f g) \ (monad_commute (P and (K F)) f g)" + by (clarsimp simp: monad_commute_def) + +lemma select_modify_comm: + "(do b \ select S; _ \ modify f; use b od) = + (do _ \ modify f; b \ select S; use b od)" + by (simp add: bind_def split_def select_def simpler_modify_def image_def) + +lemma select_f_modify_comm: + "(do b \ select_f S; _ \ modify f; use b od) = + (do _ \ modify f; b \ select_f S; use b od)" + by (simp add: bind_def split_def select_f_def simpler_modify_def image_def) + +end \ No newline at end of file diff --git a/lib/Monad_Lists.thy b/lib/Monad_Lists.thy new file mode 100644 index 0000000000..176101e3d2 --- /dev/null +++ b/lib/Monad_Lists.thy @@ -0,0 +1,680 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Monadic functions over lists: sequence, mapM, filter, etc + Definitions, equations, Hoare logic and no_fail/empty_fail setup. *) + +theory Monad_Lists + imports + Monads.Nondet_In_Monad + Monads.Nondet_Det + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail +begin + +lemma mapME_Cons: + "mapME m (x # xs) = (doE y \ m x; ys \ (mapME m xs); returnOk (y # ys) odE)" + by (simp add: mapME_def sequenceE_def Let_def) + +lemma mapME_Nil : "mapME f [] = returnOk []" + unfolding mapME_def by (simp add: sequenceE_def) + +lemmas mapME_simps = mapME_Nil mapME_Cons + +lemma zipWithM_x_inv': + assumes x: "\x y. m x y \P\" + shows "zipWithM_x m xs ys \P\" +proof (induct xs arbitrary: ys) + case Nil + show ?case + by (simp add: zipWithM_x_def sequence_x_def zipWith_def) +next + case (Cons x xs) + have zipWithM_x_Cons: + "\m x xs y ys. zipWithM_x m (x # xs) (y # ys) = do m x y; zipWithM_x m xs ys od" + by (simp add: zipWithM_x_def sequence_x_def zipWith_def) + have zipWithM_x_Nil: + "\m xs. zipWithM_x m xs [] = return ()" + by (simp add: zipWithM_x_def sequence_x_def zipWith_def) + show ?case + by (cases ys; wpsimp simp: zipWithM_x_Nil zipWithM_x_Cons wp: Cons x) +qed + +(* For compatibility with existing proofs. *) +lemma zipWithM_x_inv: + assumes x: "\x y. m x y \P\" + shows "length xs = length ys \ zipWithM_x m xs ys \P\" + by (rule zipWithM_x_inv', rule x) + +lemma sequence_x_Cons: "\x xs. sequence_x (x # xs) = (x >>= (\_. sequence_x xs))" + by (simp add: sequence_x_def) + +lemma mapM_Cons: "mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" + by (simp add: mapM_def sequence_def Let_def) + +lemma mapM_Nil: + "mapM m [] = return []" + by (simp add: mapM_def sequence_def) + +lemmas mapM_simps = mapM_Nil mapM_Cons + +lemma zipWithM_x_mapM: + "zipWithM_x f as bs = (mapM (case_prod f) (zip as bs) >>= (\_. return ()))" + apply (simp add: zipWithM_x_def zipWith_def) + apply (induct ("zip as bs")) + apply (simp add: sequence_x_def mapM_def sequence_def) + apply (simp add: sequence_x_Cons mapM_Cons bind_assoc) + done + +lemma mapM_x_mapM: + "mapM_x m l = (mapM m l >>= (\x. return ()))" + apply (simp add: mapM_x_def sequence_x_def mapM_def sequence_def) + apply (induct l, simp_all add: Let_def bind_assoc) + done + +lemma mapM_x_Nil: + "mapM_x f [] = return ()" + unfolding mapM_x_def sequence_x_def + by simp + +lemma sequence_xappend1: + "sequence_x (xs @ [x]) = (sequence_x xs >>= (\_. x))" + by (induct xs) (simp add: sequence_x_def, simp add: sequence_x_Cons bind_assoc) + +lemma mapM_append_single: + "mapM_x f (xs @ [y]) = (mapM_x f xs >>= (\_. f y))" + unfolding mapM_x_def + by (simp add: sequence_xappend1) + +lemma mapM_x_Cons: + "mapM_x m (x # xs) = (do m x; mapM_x m xs od)" + by (simp add: mapM_x_def sequence_x_def) + +lemma zipWithM_x_mapM_x: + "zipWithM_x f as bs = mapM_x (\(x, y). f x y) (zip as bs)" + apply (subst zipWithM_x_mapM) + apply (subst mapM_x_mapM) + apply (rule refl) + done + +lemma zipWithM_x_append1: + fixes f :: "'b \ 'c \ ('a, unit) nondet_monad" + assumes ls: "length xs = length ys" + shows "(zipWithM_x f (xs @ [x]) (ys @ [y])) = (zipWithM_x f xs ys >>= (\_. f x y))" + unfolding zipWithM_x_def zipWith_def + by (subst zip_append [OF ls], simp, rule sequence_xappend1) + +lemma zipWithM_x_Cons: + assumes ls: "length xs = length ys" + shows "(zipWithM_x f (x # xs) (y # ys)) = (f x y >>= (\_. zipWithM_x f xs ys))" + unfolding zipWithM_x_def zipWith_def + by (simp, rule sequence_x_Cons) + +lemma mapME_x_map_simp: + "mapME_x m (map f xs) = mapME_x (m o f) xs" + by (simp add: mapME_x_def sequenceE_x_def) + +lemma mapM_return: + "mapM (\x. return (f x)) xs = return (map f xs)" + apply (induct xs) + apply (simp add: mapM_def sequence_def) + apply (simp add: mapM_Cons) + done + +lemma liftM_return [simp]: + "liftM f (return x) = return (f x)" + by (simp add: liftM_def) + +lemma mapM_x_return : + "mapM_x (\_. return v) xs = return v" + by (induct xs) (auto simp: mapM_x_Nil mapM_x_Cons) + +lemma bind_comm_mapM_comm: + assumes bind_comm: + "\n z. do x \ a; y \ b z; (n x y :: ('a, 's) nondet_monad) od = + do y \ b z; x \ a; n x y od" + shows "\n'. do x \ a; ys \ mapM b zs; (n' x ys :: ('a, 's) nondet_monad) od = + do ys \ mapM b zs; x \ a; n' x ys od" +proof (induct zs) + case Nil + thus ?case + by (simp add: mapM_def sequence_def) + next + case (Cons z zs') + thus ?case + by (clarsimp simp: mapM_Cons bind_assoc bind_comm intro!: bind_cong [OF refl]) +qed + +lemma liftE_handle : + "(liftE f g) = liftE f" + by (simp add: handleE_def handleE'_def liftE_def) + +lemma mapM_empty: + "mapM f [] = return []" + unfolding mapM_def + by (simp add: sequence_def) + +lemma mapM_append: + "mapM f (xs @ ys) = + (do x \ mapM f xs; + y \ mapM f ys; + return (x @ y) + od)" +proof (induct xs) + case Nil + thus ?case by (simp add: mapM_empty) +next + case (Cons x xs) + + show ?case + by (simp add: mapM_Cons bind_assoc Cons.hyps) +qed + +lemma mapM_x_append: (* FIXME: remove extra return, fix proofs *) + "mapM_x f (xs @ ys) = + (do x \ mapM_x f xs; + y \ mapM_x f ys; + return () + od)" + by (simp add: mapM_x_mapM mapM_append bind_assoc) + +(* FIXME: duplicate, but mapM_x_append has an extra useless return *) +lemma mapM_x_append2: + "mapM_x f (xs @ ys) = do mapM_x f xs; mapM_x f ys od" + apply (simp add: mapM_x_def sequence_x_def) + apply (induct xs) + apply simp + apply (simp add: bind_assoc) + done + +lemma mapM_singleton: + "mapM f [x] = do r \ f x; return [r] od" + by (simp add: mapM_def sequence_def) + +lemma mapM_x_singleton: + "mapM_x f [x] = f x" + by (simp add: mapM_x_mapM mapM_singleton) + +lemma mapME_x_sequenceE: + "mapME_x f xs \ doE _ \ sequenceE (map f xs); returnOk () odE" + apply (induct xs, simp_all add: mapME_x_def sequenceE_def sequenceE_x_def) + apply (simp add: Let_def bindE_assoc) + done + +lemma sequenceE_Cons: + "sequenceE (x # xs) = (doE v \ x; vs \ sequenceE xs; returnOk (v # vs) odE)" + by (simp add: sequenceE_def Let_def) + +lemma zipWithM_Nil [simp]: + "zipWithM f xs [] = return []" + by (simp add: zipWithM_def zipWith_def sequence_def) + +lemma zipWithM_One: + "zipWithM f (x#xs) [a] = (do z \ f x a; return [z] od)" + by (simp add: zipWithM_def zipWith_def sequence_def) + +lemma zipWithM_x_Nil[simp]: + "zipWithM_x f xs [] = return ()" + by (simp add: zipWithM_x_def zipWith_def sequence_x_def) + +lemma zipWithM_x_One: + "zipWithM_x f (x#xs) [a] = f x a" + by (simp add: zipWithM_x_def zipWith_def sequence_x_def) + +lemma mapM_last_Cons: + "\ xs = [] \ g v = y; + xs \ [] \ do x \ f (last xs); return (g x) od + = do x \ f (last xs); return y od \ \ + do ys \ mapM f xs; return (g (last (v # ys))) od = do mapM_x f xs; return y od" + apply (cases "xs = []") + apply (simp add: mapM_x_Nil mapM_Nil) + apply (simp add: mapM_x_mapM) + apply (subst append_butlast_last_id[symmetric], assumption, + subst mapM_append)+ + apply (simp add: bind_assoc mapM_Cons mapM_Nil) + done + +lemma map_length_cong: + "\ length xs = length ys; \x y. (x, y) \ set (zip xs ys) \ f x = g y \ + \ map f xs = map g ys" + apply atomize + apply (erule rev_mp, erule list_induct2) + apply auto + done + +lemma mapM_length_cong: + "\ length xs = length ys; \x y. (x, y) \ set (zip xs ys) \ f x = g y \ + \ mapM f xs = mapM g ys" + by (simp add: mapM_def map_length_cong) + +(* FIXME: duplicate *) +lemma zipWithM_mapM: + "zipWithM f xs ys = mapM (case_prod f) (zip xs ys)" + by (simp add: zipWithM_def zipWith_def mapM_def) + +lemma zip_take_triv2: + "length as \ n \ zip as (take n bs) = zip as bs" + apply (induct as arbitrary: n bs; simp) + apply (case_tac n; simp) + apply (case_tac bs; simp) + done + +lemma zipWithM_If_cut: + "zipWithM (\a b. if a < n then f a b else g a b) [0 ..< m] xs + = do ys \ zipWithM f [0 ..< min n m] xs; + zs \ zipWithM g [n ..< m] (drop n xs); + return (ys @ zs) + od" + apply (cases "n < m") + apply (cut_tac i=0 and j=n and k="m - n" in upt_add_eq_append) + apply simp + apply (simp add: zipWithM_mapM) + apply (simp add: zip_append1 mapM_append zip_take_triv2 split_def) + apply (intro bind_cong bind_apply_cong refl mapM_length_cong + fun_cong[OF mapM_length_cong]) + apply (clarsimp simp: set_zip) + apply (clarsimp simp: set_zip) + apply (simp add: zipWithM_mapM mapM_Nil) + apply (intro mapM_length_cong refl) + apply (clarsimp simp: set_zip) + done + +lemma mapM_liftM_const: + "mapM (\x. liftM (\y. f x) (g x)) xs = liftM (\ys. map f xs) (mapM g xs)" + apply (induct xs) + apply (simp add: mapM_Nil) + apply (simp add: mapM_Cons) + apply (simp add: liftM_def bind_assoc) + done + +lemma mapM_discarded: + "mapM f xs >>= (\ys. g) = mapM_x f xs >>= (\_. g)" + by (simp add: mapM_x_mapM) + +lemma mapM_x_map: + "mapM_x f (map g xs) = mapM_x (\x. f (g x)) xs" + by (simp add: mapM_x_def o_def) + +lemma filterM_append: + "filterM f (xs @ ys) = do + xs' \ filterM f xs; + ys' \ filterM f ys; + return (xs' @ ys') + od" + apply (induct xs) + apply simp + apply (simp add: bind_assoc) + apply (rule ext bind_apply_cong [OF refl])+ + apply simp + done + +lemma filterM_mapM: + "filterM f xs = do + ys \ mapM (\x. do v \ f x; return (x, v) od) xs; + return (map fst (filter snd ys)) + od" + apply (induct xs) + apply (simp add: mapM_def sequence_def) + apply (simp add: mapM_Cons bind_assoc) + apply (rule bind_cong [OF refl] bind_apply_cong[OF refl])+ + apply simp + done + +lemma mapM_gets: + assumes P: "\x. m x = gets (f x)" + shows "mapM m xs = gets (\s. map (\x. f x s) xs)" +proof (induct xs) + case Nil show ?case + by (simp add: mapM_def sequence_def gets_def get_def bind_def) +next + case (Cons y ys) thus ?case + by (simp add: mapM_Cons P simpler_gets_def return_def bind_def) +qed + +lemma mapM_map_simp: + "mapM m (map f xs) = mapM (m \ f) xs" + apply (induct xs) + apply (simp add: mapM_def sequence_def) + apply (simp add: mapM_Cons) + done + +lemma filterM_voodoo: + "\ys. P ys (do zs \ filterM m xs; return (ys @ zs) od) + \ P [] (filterM m xs)" + by (drule spec[where x=Nil], simp) + +lemma mapME_x_Cons: + "mapME_x f (x # xs) = (doE f x; mapME_x f xs odE)" + by (simp add: mapME_x_def sequenceE_x_def) + +lemma liftME_map_mapME: + "liftME (map f) (mapME m xs) + = mapME (liftME f o m) xs" + apply (rule sym) + apply (induct xs) + apply (simp add: liftME_def mapME_Nil) + apply (simp add: mapME_Cons liftME_def bindE_assoc) + done + +lemma mapM_x_split_append: + "mapM_x f xs = do _ \ mapM_x f (take n xs); mapM_x f (drop n xs) od" + using mapM_x_append[where f=f and xs="take n xs" and ys="drop n xs"] + by simp + +lemma mapME_wp: + assumes x: "\x. x \ S \ \P\ f x \\_. P\, \\_. E\" + shows "set xs \ S \ \P\ mapME f xs \\_. P\, \\_. E\" + apply (induct xs) + apply (simp add: mapME_def sequenceE_def) + apply wp + apply simp + apply (simp add: mapME_Cons) + apply (wp x|simp)+ + done + +lemmas mapME_wp' = mapME_wp [OF _ subset_refl] + +lemma mapM_x_inv_wp3: + fixes m :: "'b \ ('a, unit) nondet_monad" + assumes hr: "\a as bs. xs = as @ [a] @ bs \ + \\s. I s \ V as s\ m a \\r s. I s \ V (as @ [a]) s\" + shows "\\s. I s \ V [] s\ mapM_x m xs \\rv s. I s \ V xs s\" + using hr +proof (induct xs rule: rev_induct) + case Nil thus ?case + by (simp add: mapM_x_Nil) +next + case (snoc x xs) + show ?case + apply (simp add: mapM_append_single) + apply (wp snoc.prems) + apply simp + apply (rule snoc.hyps [OF snoc.prems]) + apply simp + apply assumption + done +qed + +lemma mapME_x_inv_wp: + assumes x: "\x. \P\ f x \\rv. P\,\E\" + shows "\P\ mapME_x f xs \\rv. P\,\E\" + apply (induct xs) + apply (simp add: mapME_x_def sequenceE_x_def) + apply wp + apply (simp add: mapME_x_def sequenceE_x_def) + apply (fold mapME_x_def sequenceE_x_def) + apply wp + apply (rule x) + apply assumption + done + +lemma mapM_upd: + assumes "\x rv s s'. (rv,s') \ fst (f x s) \ x \ set xs \ (rv, g s') \ fst (f x (g s))" + shows "(rv,s') \ fst (mapM f xs s) \ (rv, g s') \ fst (mapM f xs (g s))" + using assms +proof (induct xs arbitrary: rv s s') + case Nil + thus ?case by (simp add: mapM_Nil return_def) +next + case (Cons z zs) + from Cons.prems + show ?case + apply (clarsimp simp: mapM_Cons in_monad) + apply (drule Cons.prems, simp) + apply (rule exI, erule conjI) + apply (erule Cons.hyps) + apply (erule Cons.prems) + apply simp + done +qed + +lemma no_fail_mapM_wp: + assumes "\x. x \ set xs \ no_fail (P x) (f x)" + assumes "\x y. \ x \ set xs; y \ set xs \ \ \P x\ f y \\_. P x\" + shows "no_fail (\s. \x \ set xs. P x s) (mapM f xs)" + using assms +proof (induct xs) + case Nil + thus ?case by (simp add: mapM_empty) +next + case (Cons z zs) + show ?case + apply (clarsimp simp: mapM_Cons) + apply (wp Cons.prems Cons.hyps hoare_vcg_const_Ball_lift|simp)+ + done +qed + +lemma no_fail_mapM: + "\x. no_fail \ (f x) \ no_fail \ (mapM f xs)" + apply (induct xs) + apply (simp add: mapM_def sequence_def) + apply (simp add: mapM_Cons) + apply (wp|fastforce)+ + done + +lemma filterM_preserved: + "\ \x. x \ set xs \ \P\ m x \\rv. P\ \ + \ \P\ filterM m xs \\rv. P\" + apply (induct xs) + apply (wp | simp | erule meta_mp | drule meta_spec)+ + done + +lemma filterM_distinct1: + "\\ and K (P \ distinct xs)\ filterM m xs \\rv s. (P \ distinct rv) \ set rv \ set xs\" + apply (rule hoare_gen_asm, erule rev_mp) + apply (rule rev_induct [where xs=xs]) + apply (clarsimp | wp)+ + apply (simp add: filterM_append) + apply (erule bind_wp_fwd) + apply (rule bind_wp_fwd, rule hoare_vcg_prop) + apply (wp, clarsimp) + apply blast + done + +lemma filterM_subset: + "\\\ filterM m xs \\rv s. set rv \ set xs\" + by (rule hoare_chain, rule filterM_distinct1[where P=False], simp_all) + +lemma filterM_all: + "\ \x y. \ x \ set xs; y \ set xs \ \ \P y\ m x \\rv. P y\ \ \ + \\s. \x \ set xs. P x s\ filterM m xs \\rv s. \x \ set rv. P x s\" + apply (rule_tac Q="\rv s. set rv \ set xs \ (\x \ set xs. P x s)" + in hoare_strengthen_post) + apply (wp filterM_subset hoare_vcg_const_Ball_lift filterM_preserved) + apply simp+ + apply blast + done + +lemma filterM_distinct: + "\K (distinct xs)\ filterM m xs \\rv s. distinct rv\" + by (rule hoare_chain, rule filterM_distinct1[where P=True], simp_all) + +lemma mapM_wp: + assumes x: "\x. x \ S \ \P\ f x \\rv. P\" + shows "set xs \ S \ \P\ mapM f xs \\rv. P\" + apply (induct xs) + apply (simp add: mapM_def sequence_def) + apply (simp add: mapM_Cons) + apply wp + apply (rule x, clarsimp) + apply simp + done + +lemma mapM_wp': + assumes x: "\x. x \ set xs \ \P\ f x \\rv. P\" + shows "\P\ mapM f xs \\rv. P\" + apply (rule mapM_wp) + apply (erule x) + apply simp + done + +lemma mapM_set: + assumes "\x. x \ set xs \ \P\ f x \\_. P\" + assumes "\x. x \ set xs \ \P\ f x \\_. Q x\" + assumes "\x y. \ x \ set xs; y \ set xs \ \ \P and Q y\ f x \\_. Q y\" + shows "\P\ mapM f xs \\_ s. \x \ set xs. Q x s\" +using assms +proof (induct xs) + case Nil show ?case + by (simp add: mapM_def sequence_def) wp +next + case (Cons y ys) + have PQ_inv: "\x. x \ set ys \ \P and Q y\ f x \\_. P and Q y\" + by (wpsimp wp: Cons) + show ?case + apply (simp add: mapM_Cons) + apply wp + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post) + apply (rule mapM_wp') + apply (erule PQ_inv) + apply simp + apply (wp Cons|simp)+ + done +qed + +lemma mapM_set_inv: + assumes "\x. x \ set xs \ \P\ f x \\_. P\" + assumes "\x. x \ set xs \ \P\ f x \\_. Q x\" + assumes "\x y. \ x \ set xs; y \ set xs \ \ \P and Q y\ f x \\_. Q y\" + shows "\P\ mapM f xs \\_ s. P s \ (\x \ set xs. Q x s)\" + apply (rule hoare_weaken_pre, rule hoare_vcg_conj_lift) + apply (rule mapM_wp', erule assms) + apply (rule mapM_set; rule assms; assumption) + apply simp + done + +lemma mapM_x_wp: + assumes x: "\x. x \ S \ \P\ f x \\rv. P\" + shows "set xs \ S \ \P\ mapM_x f xs \\rv. P\" + by (subst mapM_x_mapM) (wp mapM_wp x) + +lemma no_fail_mapM': + assumes rl: "\x. no_fail (\_. P x) (f x)" + shows "no_fail (\_. \x \ set xs. P x) (mapM f xs)" +proof (induct xs) + case Nil thus ?case by (simp add: mapM_def sequence_def) +next + case (Cons x xs) + + have nf: "no_fail (\_. P x) (f x)" by (rule rl) + have ih: "no_fail (\_. \x \ set xs. P x) (mapM f xs)" by (rule Cons) + + show ?case + apply (simp add: mapM_Cons) + apply (rule no_fail_pre) + apply (wp nf ih) + apply simp + done +qed + +lemma det_mapM: + assumes x: "\x. x \ S \ det (f x)" + shows "set xs \ S \ det (mapM f xs)" + apply (induct xs) + apply (simp add: mapM_def sequence_def) + apply (simp add: mapM_Cons x) + done + +lemma det_zipWithM_x: + assumes x: "\x y. (x, y) \ set (zip xs ys) \ det (f x y)" + shows "det (zipWithM_x f xs ys)" + apply (simp add: zipWithM_x_mapM) + apply (rule bind_detI) + apply (rule det_mapM [where S="set (zip xs ys)"]) + apply (clarsimp simp add: x) + apply simp + apply simp + done + +lemma empty_fail_sequence_x : + assumes "\m. m \ set ms \ empty_fail m" + shows "empty_fail (sequence_x ms)" using assms + by (induct ms) (auto simp: sequence_x_def) + +lemma mapME_set: + assumes est: "\x. \R\ f x \P\, -" + and invp: "\x y. \R and P x\ f y \\_. P x\, -" + and invr: "\x. \R\ f x \\_. R\, -" + shows "\R\ mapME f xs \\rv s. \x \ set rv. P x s\, -" +proof (rule hoare_strengthen_postE_R [where Q' = "\rv s. R s \ (\x \ set rv. P x s)"], induct xs) + case Nil + thus ?case by (simp add: mapME_Nil | wp returnOKE_R_wp)+ +next + case (Cons y ys) + + have minvp: "\x. \R and P x\ mapME f ys \\_. P x\, -" + apply (rule hoare_pre) + apply (rule_tac Q' = "\_ s. R s \ P x s" in hoare_strengthen_postE_R) + apply (wp mapME_wp' invr invp)+ + apply simp + apply simp + apply simp + done + + show ?case + apply (simp add: mapME_Cons) + apply (wp) + apply (rule_tac Q' = "\xs s. (R s \ (\x \ set xs. P x s)) \ P rv s" in hoare_strengthen_postE_R) + apply (wp Cons.hyps minvp) + apply simp + apply (fold validE_R_def) + apply simp + apply (wp invr est) + apply simp + done +qed clarsimp + + +lemma empty_fail_mapM_x [simp]: + "(\x. empty_fail (a x)) \ empty_fail (mapM_x a xs)" + apply (induct_tac xs) + apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Cons) + done + +lemma mapM_upd_inv: + assumes f: "\x rv. (rv,s) \ fst (f x s) \ x \ set xs \ (rv, g s) \ fst (f x (g s))" + assumes inv: "\x. \(=) s\ f x \\_. (=) s\" + shows "(rv,s) \ fst (mapM f xs s) \ (rv, g s) \ fst (mapM f xs (g s))" + using f inv +proof (induct xs arbitrary: rv s) + case Nil + thus ?case by (simp add: mapM_Nil return_def) +next + case (Cons z zs) + from Cons.prems + show ?case + apply (clarsimp simp: mapM_Cons in_monad) + apply (frule use_valid, assumption, rule refl) + apply clarsimp + apply (drule Cons.prems, simp) + apply (rule exI, erule conjI) + apply (drule Cons.hyps) + apply simp + apply assumption + apply simp + done +qed + +lemma case_option_find_give_me_a_map: + "case_option a return (find f xs) + = liftM projl + (mapME (\x. if (f x) then throwError x else returnOk ()) xs + >>=E (\x. assert (\x \ set xs. \ f x) + >>= (\_. liftM (Inl :: 'a \ 'a + unit) a)))" + apply (induct xs) + apply simp + apply (simp add: liftM_def mapME_Nil) + apply (simp add: mapME_Cons split: if_split) + apply (clarsimp simp add: throwError_def bindE_def bind_assoc + liftM_def) + apply (rule bind_cong [OF refl]) + apply (simp add: lift_def throwError_def returnOk_def split: sum.split) + done + +end \ No newline at end of file diff --git a/lib/Monad_WP/NonDetMonad.thy b/lib/Monad_WP/NonDetMonad.thy deleted file mode 100644 index ef68975c9e..0000000000 --- a/lib/Monad_WP/NonDetMonad.thy +++ /dev/null @@ -1,921 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) - -(* - Nondeterministic state and error monads with failure in Isabelle. -*) - -chapter "Nondeterministic State Monad with Failure" - -theory NonDetMonad -imports Lib -begin - -text \ - \label{c:monads} - - State monads are used extensively in the seL4 specification. They are - defined below. -\ - -section "The Monad" - -text \ - The basic type of the nondeterministic state monad with failure is - very similar to the normal state monad. Instead of a pair consisting - of result and new state, we return a set of these pairs coupled with - a failure flag. Each element in the set is a potential result of the - computation. The flag is @{const True} if there is an execution path - in the computation that may have failed. Conversely, if the flag is - @{const False}, none of the computations resulting in the returned - set can have failed.\ -type_synonym ('s,'a) nondet_monad = "'s \ ('a \ 's) set \ bool" - - -text \ - Print the type @{typ "('s,'a) nondet_monad"} instead of its unwieldy expansion. - Needs an AST translation in code, because it needs to check that the state variable - @{typ 's} occurs twice. This comparison is not guaranteed to always work as expected - (AST instances might have different decoration), but it does seem to work here. -\ -print_ast_translation \ - let - fun monad_tr _ [t1, Ast.Appl [Ast.Constant @{type_syntax prod}, - Ast.Appl [Ast.Constant @{type_syntax set}, - Ast.Appl [Ast.Constant @{type_syntax prod}, t2, t3]], - Ast.Constant @{type_syntax bool}]] = - if t3 = t1 - then Ast.Appl [Ast.Constant @{type_syntax "nondet_monad"}, t1, t2] - else raise Match - in [(@{type_syntax "fun"}, monad_tr)] end -\ - - -text \ - The definition of fundamental monad functions @{text return} and - @{text bind}. The monad function @{text "return x"} does not change - the state, does not fail, and returns @{text "x"}. -\ -definition - return :: "'a \ ('s,'a) nondet_monad" where - "return a \ \s. ({(a,s)},False)" - -text \ - The monad function @{text "bind f g"}, also written @{text "f >>= g"}, - is the execution of @{term f} followed by the execution of @{text g}. - The function @{text g} takes the result value \emph{and} the result - state of @{text f} as parameter. The definition says that the result of - the combined operation is the union of the set of sets that is created - by @{text g} applied to the result sets of @{text f}. The combined - operation may have failed, if @{text f} may have failed or @{text g} may - have failed on any of the results of @{text f}. -\ -definition - bind :: "('s, 'a) nondet_monad \ ('a \ ('s, 'b) nondet_monad) \ - ('s, 'b) nondet_monad" (infixl ">>=" 60) - where - "bind f g \ \s. (\(fst ` case_prod g ` fst (f s)), - True \ snd ` case_prod g ` fst (f s) \ snd (f s))" - -text \ - Sometimes it is convenient to write @{text bind} in reverse order. -\ -abbreviation(input) - bind_rev :: "('c \ ('a, 'b) nondet_monad) \ ('a, 'c) nondet_monad \ - ('a, 'b) nondet_monad" (infixl "=<<" 60) where - "g =<< f \ f >>= g" - -text \ - The basic accessor functions of the state monad. @{text get} returns - the current state as result, does not fail, and does not change the state. - @{text "put s"} returns nothing (@{typ unit}), changes the current state - to @{text s} and does not fail. -\ -definition - get :: "('s,'s) nondet_monad" where - "get \ \s. ({(s,s)}, False)" - -definition - put :: "'s \ ('s, unit) nondet_monad" where - "put s \ \_. ({((),s)}, False)" - - -subsection "Nondeterminism" - -text \ - Basic nondeterministic functions. @{text "select A"} chooses an element - of the set @{text A}, does not change the state, and does not fail - (even if the set is empty). @{text "f \ g"} executes @{text f} or - executes @{text g}. It retuns the union of results of @{text f} and - @{text g}, and may have failed if either may have failed. -\ -definition - select :: "'a set \ ('s,'a) nondet_monad" where - "select A \ \s. (A \ {s}, False)" - -definition - alternative :: "('s,'a) nondet_monad \ ('s,'a) nondet_monad \ - ('s,'a) nondet_monad" - (infixl "\" 20) -where - "f \ g \ \s. (fst (f s) \ fst (g s), snd (f s) \ snd (g s))" - -text \A variant of @{text select} that takes a pair. The first component - is a set as in normal @{text select}, the second component indicates - whether the execution failed. This is useful to lift monads between - different state spaces. -\ -definition - select_f :: "'a set \ bool \ ('s,'a) nondet_monad" where - "select_f S \ \s. (fst S \ {s}, snd S)" - -text \@{text select_state} takes a relationship between - states, and outputs nondeterministically a state - related to the input state.\ - -definition - state_select :: "('s \ 's) set \ ('s, unit) nondet_monad" -where - "state_select r \ \s. ((\x. ((), x)) ` {s'. (s, s') \ r}, \ (\s'. (s, s') \ r))" - -subsection "Failure" - -text \The monad function that always fails. Returns an empty set of -results and sets the failure flag.\ -definition - fail :: "('s, 'a) nondet_monad" where - "fail \ \s. ({}, True)" - -text \Assertions: fail if the property @{text P} is not true\ -definition - assert :: "bool \ ('a, unit) nondet_monad" where - "assert P \ if P then return () else fail" - -text \Fail if the value is @{const None}, - return result @{text v} for @{term "Some v"}\ -definition - assert_opt :: "'a option \ ('b, 'a) nondet_monad" where - "assert_opt v \ case v of None \ fail | Some v \ return v" - -text \An assertion that also can introspect the current state.\ - -definition - state_assert :: "('s \ bool) \ ('s, unit) nondet_monad" -where - "state_assert P \ get >>= (\s. assert (P s))" - -subsection "Generic functions on top of the state monad" - -text \Apply a function to the current state and return the result -without changing the state.\ -definition - gets :: "('s \ 'a) \ ('s, 'a) nondet_monad" where - "gets f \ get >>= (\s. return (f s))" - -text \Modify the current state using the function passed in.\ -definition - modify :: "('s \ 's) \ ('s, unit) nondet_monad" where - "modify f \ get >>= (\s. put (f s))" - -lemma simpler_gets_def: "gets f = (\s. ({(f s, s)}, False))" - apply (simp add: gets_def return_def bind_def get_def) - done - -lemma simpler_modify_def: - "modify f = (\s. ({((), f s)}, False))" - by (simp add: modify_def bind_def get_def put_def) - -text \Execute the given monad when the condition is true, - return @{text "()"} otherwise.\ -definition - "when" :: "bool \ ('s, unit) nondet_monad \ - ('s, unit) nondet_monad" where - "when P m \ if P then m else return ()" - -text \Execute the given monad unless the condition is true, - return @{text "()"} otherwise.\ -definition - unless :: "bool \ ('s, unit) nondet_monad \ - ('s, unit) nondet_monad" where - "unless P m \ when (\P) m" - -text \ - Perform a test on the current state, performing the left monad if - the result is true or the right monad if the result is false. -\ -definition - condition :: "('s \ bool) \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad" -where - "condition P L R \ \s. if (P s) then (L s) else (R s)" - -notation (output) - condition ("(condition (_)// (_)// (_))" [1000,1000,1000] 1000) - -text \ -Apply an option valued function to the current state, fail -if it returns @{const None}, return @{text v} if it returns -@{term "Some v"}. -\ -definition - gets_the :: "('s \ 'a option) \ ('s, 'a) nondet_monad" where - "gets_the f \ gets f >>= assert_opt" - - -text \ - Get a map (such as a heap) from the current state and apply an argument to the map. - Fail if the map returns @{const None}, otherwise return the value. -\ -definition - gets_map :: "('s \ 'a \ 'b option) \ 'a \ ('s, 'b) nondet_monad" where - "gets_map f p \ gets f >>= (\m. assert_opt (m p))" - - -subsection \The Monad Laws\ - -text \A more expanded definition of @{text bind}\ -lemma bind_def': - "(f >>= g) \ - \s. ({(r'', s''). \(r', s') \ fst (f s). (r'', s'') \ fst (g r' s') }, - snd (f s) \ (\(r', s') \ fst (f s). snd (g r' s')))" - apply (rule eq_reflection) - apply (auto simp add: bind_def split_def Let_def) - done - -text \Each monad satisfies at least the following three laws.\ - -text \@{term return} is absorbed at the left of a @{term bind}, - applying the return value directly:\ -lemma return_bind [simp]: "(return x >>= f) = f x" - by (simp add: return_def bind_def) - -text \@{term return} is absorbed on the right of a @{term bind}\ -lemma bind_return [simp]: "(m >>= return) = m" - apply (rule ext) - apply (simp add: bind_def return_def split_def) - done - -text \@{term bind} is associative\ -lemma bind_assoc: - fixes m :: "('a,'b) nondet_monad" - fixes f :: "'b \ ('a,'c) nondet_monad" - fixes g :: "'c \ ('a,'d) nondet_monad" - shows "(m >>= f) >>= g = m >>= (\x. f x >>= g)" - apply (unfold bind_def Let_def split_def) - apply (rule ext) - apply clarsimp - apply (auto intro: rev_image_eqI) - done - - -section \Adding Exceptions\ - -text \ - The type @{typ "('s,'a) nondet_monad"} gives us nondeterminism and - failure. We now extend this monad with exceptional return values - that abort normal execution, but can be handled explicitly. - We use the sum type to indicate exceptions. - - In @{typ "('s, 'e + 'a) nondet_monad"}, @{typ "'s"} is the state, - @{typ 'e} is an exception, and @{typ 'a} is a normal return value. - - This new type itself forms a monad again. Since type classes in - Isabelle are not powerful enough to express the class of monads, - we provide new names for the @{term return} and @{term bind} functions - in this monad. We call them @{text returnOk} (for normal return values) - and @{text bindE} (for composition). We also define @{text throwError} - to return an exceptional value. -\ -definition - returnOk :: "'a \ ('s, 'e + 'a) nondet_monad" where - "returnOk \ return o Inr" - -definition - throwError :: "'e \ ('s, 'e + 'a) nondet_monad" where - "throwError \ return o Inl" - -text \ - Lifting a function over the exception type: if the input is an - exception, return that exception; otherwise continue execution. -\ -definition - lift :: "('a \ ('s, 'e + 'b) nondet_monad) \ - 'e +'a \ ('s, 'e + 'b) nondet_monad" -where - "lift f v \ case v of Inl e \ throwError e - | Inr v' \ f v'" - -text \ - The definition of @{term bind} in the exception monad (new - name @{text bindE}): the same as normal @{term bind}, but - the right-hand side is skipped if the left-hand side - produced an exception. -\ -definition - bindE :: "('s, 'e + 'a) nondet_monad \ - ('a \ ('s, 'e + 'b) nondet_monad) \ - ('s, 'e + 'b) nondet_monad" (infixl ">>=E" 60) -where - "bindE f g \ bind f (lift g)" - - -text \ - Lifting a normal nondeterministic monad into the - exception monad is achieved by always returning its - result as normal result and never throwing an exception. -\ -definition - liftE :: "('s,'a) nondet_monad \ ('s, 'e+'a) nondet_monad" -where - "liftE f \ f >>= (\r. return (Inr r))" - - -text \ - Since the underlying type and @{text return} function changed, - we need new definitions for when and unless: -\ -definition - whenE :: "bool \ ('s, 'e + unit) nondet_monad \ - ('s, 'e + unit) nondet_monad" - where - "whenE P f \ if P then f else returnOk ()" - -definition - unlessE :: "bool \ ('s, 'e + unit) nondet_monad \ - ('s, 'e + unit) nondet_monad" - where - "unlessE P f \ if P then returnOk () else f" - - -text \ - Throwing an exception when the parameter is @{term None}, otherwise - returning @{term "v"} for @{term "Some v"}. -\ -definition - throw_opt :: "'e \ 'a option \ ('s, 'e + 'a) nondet_monad" where - "throw_opt ex x \ - case x of None \ throwError ex | Some v \ returnOk v" - - -text \ - Failure in the exception monad is redefined in the same way - as @{const whenE} and @{const unlessE}, with @{term returnOk} - instead of @{term return}. -\ -definition - assertE :: "bool \ ('a, 'e + unit) nondet_monad" where - "assertE P \ if P then returnOk () else fail" - -subsection "Monad Laws for the Exception Monad" - -text \More direct definition of @{const liftE}:\ -lemma liftE_def2: - "liftE f = (\s. ((\(v,s'). (Inr v, s')) ` fst (f s), snd (f s)))" - by (auto simp: liftE_def return_def split_def bind_def) - -text \Left @{const returnOk} absorbtion over @{term bindE}:\ -lemma returnOk_bindE [simp]: "(returnOk x >>=E f) = f x" - apply (unfold bindE_def returnOk_def) - apply (clarsimp simp: lift_def) - done - -lemma lift_return [simp]: - "lift (return \ Inr) = return" - by (rule ext) - (simp add: lift_def throwError_def split: sum.splits) - -text \Right @{const returnOk} absorbtion over @{term bindE}:\ -lemma bindE_returnOk [simp]: "(m >>=E returnOk) = m" - by (simp add: bindE_def returnOk_def) - -text \Associativity of @{const bindE}:\ -lemma bindE_assoc: - "(m >>=E f) >>=E g = m >>=E (\x. f x >>=E g)" - apply (simp add: bindE_def bind_assoc) - apply (rule arg_cong [where f="\x. m >>= x"]) - apply (rule ext) - apply (case_tac x, simp_all add: lift_def throwError_def) - done - -text \@{const returnOk} could also be defined via @{const liftE}:\ -lemma returnOk_liftE: - "returnOk x = liftE (return x)" - by (simp add: liftE_def returnOk_def) - -text \Execution after throwing an exception is skipped:\ -lemma throwError_bindE [simp]: - "(throwError E >>=E f) = throwError E" - by (simp add: bindE_def bind_def throwError_def lift_def return_def) - - -section "Syntax" - -text \This section defines traditional Haskell-like do-syntax - for the state monad in Isabelle.\ - -subsection "Syntax for the Nondeterministic State Monad" - -text \We use @{text K_bind} to syntactically indicate the - case where the return argument of the left side of a @{term bind} - is ignored\ -definition - K_bind_def [iff]: "K_bind \ \x y. x" - -nonterminal - dobinds and dobind and nobind - -syntax - "_dobind" :: "[pttrn, 'a] => dobind" ("(_ <-/ _)" 10) - "" :: "dobind => dobinds" ("_") - "_nobind" :: "'a => dobind" ("_") - "_dobinds" :: "[dobind, dobinds] => dobinds" ("(_);//(_)") - - "_do" :: "[dobinds, 'a] => 'a" ("(do ((_);//(_))//od)" 100) -syntax (xsymbols) - "_dobind" :: "[pttrn, 'a] => dobind" ("(_ \/ _)" 10) - -translations - "_do (_dobinds b bs) e" == "_do b (_do bs e)" - "_do (_nobind b) e" == "b >>= (CONST K_bind e)" - "do x <- a; e od" == "a >>= (\x. e)" - -text \Syntax examples:\ -lemma "do x \ return 1; - return (2::nat); - return x - od = - return 1 >>= - (\x. return (2::nat) >>= - K_bind (return x))" - by (rule refl) - -lemma "do x \ return 1; - return 2; - return x - od = return 1" - by simp - -subsection "Syntax for the Exception Monad" - -text \ - Since the exception monad is a different type, we - need to syntactically distinguish it in the syntax. - We use @{text doE}/@{text odE} for this, but can re-use - most of the productions from @{text do}/@{text od} - above. -\ - -syntax - "_doE" :: "[dobinds, 'a] => 'a" ("(doE ((_);//(_))//odE)" 100) - -translations - "_doE (_dobinds b bs) e" == "_doE b (_doE bs e)" - "_doE (_nobind b) e" == "b >>=E (CONST K_bind e)" - "doE x <- a; e odE" == "a >>=E (\x. e)" - -text \Syntax examples:\ -lemma "doE x \ returnOk 1; - returnOk (2::nat); - returnOk x - odE = - returnOk 1 >>=E - (\x. returnOk (2::nat) >>=E - K_bind (returnOk x))" - by (rule refl) - -lemma "doE x \ returnOk 1; - returnOk 2; - returnOk x - odE = returnOk 1" - by simp - - - -section "Library of Monadic Functions and Combinators" - - -text \Lifting a normal function into the monad type:\ -definition - liftM :: "('a \ 'b) \ ('s,'a) nondet_monad \ ('s, 'b) nondet_monad" -where - "liftM f m \ do x \ m; return (f x) od" - -text \The same for the exception monad:\ -definition - liftME :: "('a \ 'b) \ ('s,'e+'a) nondet_monad \ ('s,'e+'b) nondet_monad" -where - "liftME f m \ doE x \ m; returnOk (f x) odE" - -text \ - Run a sequence of monads from left to right, ignoring return values.\ -definition - sequence_x :: "('s, 'a) nondet_monad list \ ('s, unit) nondet_monad" -where - "sequence_x xs \ foldr (\x y. x >>= (\_. y)) xs (return ())" - -text \ - Map a monadic function over a list by applying it to each element - of the list from left to right, ignoring return values. -\ -definition - mapM_x :: "('a \ ('s,'b) nondet_monad) \ 'a list \ ('s, unit) nondet_monad" -where - "mapM_x f xs \ sequence_x (map f xs)" - -text \ - Map a monadic function with two parameters over two lists, - going through both lists simultaneously, left to right, ignoring - return values. -\ -definition - zipWithM_x :: "('a \ 'b \ ('s,'c) nondet_monad) \ - 'a list \ 'b list \ ('s, unit) nondet_monad" -where - "zipWithM_x f xs ys \ sequence_x (zipWith f xs ys)" - - -text \The same three functions as above, but returning a list of -return values instead of @{text unit}\ -definition - sequence :: "('s, 'a) nondet_monad list \ ('s, 'a list) nondet_monad" -where - "sequence xs \ let mcons = (\p q. p >>= (\x. q >>= (\y. return (x#y)))) - in foldr mcons xs (return [])" - -definition - mapM :: "('a \ ('s,'b) nondet_monad) \ 'a list \ ('s, 'b list) nondet_monad" -where - "mapM f xs \ sequence (map f xs)" - -definition - zipWithM :: "('a \ 'b \ ('s,'c) nondet_monad) \ - 'a list \ 'b list \ ('s, 'c list) nondet_monad" -where - "zipWithM f xs ys \ sequence (zipWith f xs ys)" - -definition - foldM :: "('b \ 'a \ ('s, 'a) nondet_monad) \ 'b list \ 'a \ ('s, 'a) nondet_monad" -where - "foldM m xs a \ foldr (\p q. q >>= m p) xs (return a) " - -definition - foldME ::"('b \ 'a \ ('s,('e + 'b)) nondet_monad) \ 'b \ 'a list \ ('s, ('e + 'b)) nondet_monad" -where "foldME m a xs \ foldr (\p q. q >>=E swp m p) xs (returnOk a)" - -text \The sequence and map functions above for the exception monad, -with and without lists of return value\ -definition - sequenceE_x :: "('s, 'e+'a) nondet_monad list \ ('s, 'e+unit) nondet_monad" -where - "sequenceE_x xs \ foldr (\x y. doE _ <- x; y odE) xs (returnOk ())" - -definition - mapME_x :: "('a \ ('s,'e+'b) nondet_monad) \ 'a list \ - ('s,'e+unit) nondet_monad" -where - "mapME_x f xs \ sequenceE_x (map f xs)" - -definition - sequenceE :: "('s, 'e+'a) nondet_monad list \ ('s, 'e+'a list) nondet_monad" -where - "sequenceE xs \ let mcons = (\p q. p >>=E (\x. q >>=E (\y. returnOk (x#y)))) - in foldr mcons xs (returnOk [])" - -definition - mapME :: "('a \ ('s,'e+'b) nondet_monad) \ 'a list \ - ('s,'e+'b list) nondet_monad" -where - "mapME f xs \ sequenceE (map f xs)" - - -text \Filtering a list using a monadic function as predicate:\ -primrec - filterM :: "('a \ ('s, bool) nondet_monad) \ 'a list \ ('s, 'a list) nondet_monad" -where - "filterM P [] = return []" -| "filterM P (x # xs) = do - b <- P x; - ys <- filterM P xs; - return (if b then (x # ys) else ys) - od" - - -section "Catching and Handling Exceptions" - -text \ - Turning an exception monad into a normal state monad - by catching and handling any potential exceptions: -\ -definition - catch :: "('s, 'e + 'a) nondet_monad \ - ('e \ ('s, 'a) nondet_monad) \ - ('s, 'a) nondet_monad" (infix "" 10) -where - "f handler \ - do x \ f; - case x of - Inr b \ return b - | Inl e \ handler e - od" - -text \ - Handling exceptions, but staying in the exception monad. - The handler may throw a type of exceptions different from - the left side. -\ -definition - handleE' :: "('s, 'e1 + 'a) nondet_monad \ - ('e1 \ ('s, 'e2 + 'a) nondet_monad) \ - ('s, 'e2 + 'a) nondet_monad" (infix "" 10) -where - "f handler \ - do - v \ f; - case v of - Inl e \ handler e - | Inr v' \ return (Inr v') - od" - -text \ - A type restriction of the above that is used more commonly in - practice: the exception handle (potentially) throws exception - of the same type as the left-hand side. -\ -definition - handleE :: "('s, 'x + 'a) nondet_monad \ - ('x \ ('s, 'x + 'a) nondet_monad) \ - ('s, 'x + 'a) nondet_monad" (infix "" 10) -where - "handleE \ handleE'" - - -text \ - Handling exceptions, and additionally providing a continuation - if the left-hand side throws no exception: -\ -definition - handle_elseE :: "('s, 'e + 'a) nondet_monad \ - ('e \ ('s, 'ee + 'b) nondet_monad) \ - ('a \ ('s, 'ee + 'b) nondet_monad) \ - ('s, 'ee + 'b) nondet_monad" - ("_ _ _" 10) -where - "f handler continue \ - do v \ f; - case v of Inl e \ handler e - | Inr v' \ continue v' - od" - -subsection "Loops" - -text \ - Loops are handled using the following inductive predicate; - non-termination is represented using the failure flag of the - monad. -\ - -inductive_set - whileLoop_results :: "('r \ 's \ bool) \ ('r \ ('s, 'r) nondet_monad) \ ((('r \ 's) option) \ (('r \ 's) option)) set" - for C B -where - "\ \ C r s \ \ (Some (r, s), Some (r, s)) \ whileLoop_results C B" - | "\ C r s; snd (B r s) \ \ (Some (r, s), None) \ whileLoop_results C B" - | "\ C r s; (r', s') \ fst (B r s); (Some (r', s'), z) \ whileLoop_results C B \ - \ (Some (r, s), z) \ whileLoop_results C B" - -inductive_cases whileLoop_results_cases_valid: "(Some x, Some y) \ whileLoop_results C B" -inductive_cases whileLoop_results_cases_fail: "(Some x, None) \ whileLoop_results C B" -inductive_simps whileLoop_results_simps: "(Some x, y) \ whileLoop_results C B" -inductive_simps whileLoop_results_simps_valid: "(Some x, Some y) \ whileLoop_results C B" -inductive_simps whileLoop_results_simps_start_fail [simp]: "(None, x) \ whileLoop_results C B" - -inductive - whileLoop_terminates :: "('r \ 's \ bool) \ ('r \ ('s, 'r) nondet_monad) \ 'r \ 's \ bool" - for C B -where - "\ C r s \ whileLoop_terminates C B r s" - | "\ C r s; \(r', s') \ fst (B r s). whileLoop_terminates C B r' s' \ - \ whileLoop_terminates C B r s" - -inductive_cases whileLoop_terminates_cases: "whileLoop_terminates C B r s" -inductive_simps whileLoop_terminates_simps: "whileLoop_terminates C B r s" - -definition - "whileLoop C B \ (\r s. - ({(r',s'). (Some (r, s), Some (r', s')) \ whileLoop_results C B}, - (Some (r, s), None) \ whileLoop_results C B \ (\ whileLoop_terminates C B r s)))" - -notation (output) - whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000) - -definition - whileLoopE :: "('r \ 's \ bool) \ ('r \ ('s, 'e + 'r) nondet_monad) - \ 'r \ 's \ (('e + 'r) \ 's) set \ bool" -where - "whileLoopE C body \ - \r. whileLoop (\r s. (case r of Inr v \ C v s | _ \ False)) (lift body) (Inr r)" - -notation (output) - whileLoopE ("(whileLoopE (_)// (_))" [1000, 1000] 1000) - -section "Hoare Logic" - -subsection "Validity" - -text \This section defines a Hoare logic for partial correctness for - the nondeterministic state monad as well as the exception monad. - The logic talks only about the behaviour part of the monad and ignores - the failure flag. - - The logic is defined semantically. Rules work directly on the - validity predicate. - - In the nondeterministic state monad, validity is a triple of precondition, - monad, and postcondition. The precondition is a function from state to - bool (a state predicate), the postcondition is a function from return value - to state to bool. A triple is valid if for all states that satisfy the - precondition, all result values and result states that are returned by - the monad satisfy the postcondition. Note that if the computation returns - the empty set, the triple is trivially valid. This means @{term "assert P"} - does not require us to prove that @{term P} holds, but rather allows us - to assume @{term P}! Proving non-failure is done via separate predicate and - calculus (see below). -\ -definition - valid :: "('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\") -where - "\P\ f \Q\ \ \s. P s \ (\(r,s') \ fst (f s). Q r s')" - -text \ - We often reason about invariant predicates. The following provides shorthand syntax - that avoids repeating potentially long predicates. -\ -abbreviation (input) - invariant :: "('s,'a) nondet_monad \ ('s \ bool) \ bool" ("_ \_\" [59,0] 60) -where - "invariant f P \ \P\ f \\_. P\" - -text \ - Validity for the exception monad is similar and build on the standard - validity above. Instead of one postcondition, we have two: one for - normal and one for exceptional results. -\ -definition - validE :: "('s \ bool) \ ('s, 'a + 'b) nondet_monad \ - ('b \ 's \ bool) \ - ('a \ 's \ bool) \ bool" -("\_\/ _ /(\_\,/ \_\)") -where - "\P\ f \Q\,\E\ \ \P\ f \ \v s. case v of Inr r \ Q r s | Inl e \ E e s \" - - -text \ - The following two instantiations are convenient to separate reasoning - for exceptional and normal case. -\ -definition - validE_R :: "('s \ bool) \ ('s, 'e + 'a) nondet_monad \ - ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\, -") -where - "\P\ f \Q\,- \ validE P f Q (\x y. True)" - -definition - validE_E :: "('s \ bool) \ ('s, 'e + 'a) nondet_monad \ - ('e \ 's \ bool) \ bool" - ("\_\/ _ /-, \_\") -where - "\P\ f -,\Q\ \ validE P f (\x y. True) Q" - - -text \Abbreviations for trivial preconditions:\ -abbreviation(input) - top :: "'a \ bool" ("\") -where - "\ \ \_. True" - -abbreviation(input) - bottom :: "'a \ bool" ("\") -where - "\ \ \_. False" - -text \Abbreviations for trivial postconditions (taking two arguments):\ -abbreviation(input) - toptop :: "'a \ 'b \ bool" ("\\") -where - "\\ \ \_ _. True" - -abbreviation(input) - botbot :: "'a \ 'b \ bool" ("\\") -where - "\\ \ \_ _. False" - -text \ - Lifting @{text "\"} and @{text "\"} over two arguments. - Lifting @{text "\"} and @{text "\"} over one argument is already - defined (written @{text "and"} and @{text "or"}). -\ -definition - bipred_conj :: "('a \ 'b \ bool) \ ('a \ 'b \ bool) \ ('a \ 'b \ bool)" - (infixl "And" 96) -where - "bipred_conj P Q \ \x y. P x y \ Q x y" - -definition - bipred_disj :: "('a \ 'b \ bool) \ ('a \ 'b \ bool) \ ('a \ 'b \ bool)" - (infixl "Or" 91) -where - "bipred_disj P Q \ \x y. P x y \ Q x y" - - -subsection "Determinism" - -text \A monad of type @{text nondet_monad} is deterministic iff it -returns exactly one state and result and does not fail\ -definition - det :: "('a,'s) nondet_monad \ bool" -where - "det f \ \s. \r. f s = ({r},False)" - -text \A deterministic @{text nondet_monad} can be turned - into a normal state monad:\ -definition - the_run_state :: "('s,'a) nondet_monad \ 's \ 'a \ 's" -where - "the_run_state M \ \s. THE s'. fst (M s) = {s'}" - - -subsection "Non-Failure" - -text \ - With the failure flag, we can formulate non-failure separately - from validity. A monad @{text m} does not fail under precondition - @{text P}, if for no start state in that precondition it sets - the failure flag. -\ -definition - no_fail :: "('s \ bool) \ ('s,'a) nondet_monad \ bool" -where - "no_fail P m \ \s. P s \ \ (snd (m s))" - - -text \ - It is often desired to prove non-failure and a Hoare triple - simultaneously, as the reasoning is often similar. The following - definitions allow such reasoning to take place. -\ - -definition - validNF ::"('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\!") -where - "validNF P f Q \ valid P f Q \ no_fail P f" - -definition - validE_NF :: "('s \ bool) \ ('s, 'a + 'b) nondet_monad \ - ('b \ 's \ bool) \ - ('a \ 's \ bool) \ bool" - ("\_\/ _ /(\_\,/ \_\!)") -where - "validE_NF P f Q E \ validE P f Q E \ no_fail P f" - -lemma validE_NF_alt_def: - "\ P \ B \ Q \,\ E \! = \ P \ B \ \v s. case v of Inl e \ E e s | Inr r \ Q r s \!" - by (clarsimp simp: validE_NF_def validE_def validNF_def) - -text \ - Usually, well-formed monads constructed from the primitives - above will have the following property: if they return an - empty set of results, they will have the failure flag set. -\ -definition - empty_fail :: "('s,'a) nondet_monad \ bool" -where - "empty_fail m \ \s. fst (m s) = {} \ snd (m s)" - - -text \ - Useful in forcing otherwise unknown executions to have - the @{const empty_fail} property. -\ -definition - mk_ef :: "'a set \ bool \ 'a set \ bool" -where - "mk_ef S \ (fst S, fst S = {} \ snd S)" - -section "Basic exception reasoning" - -text \ - The following predicates @{text no_throw} and @{text no_return} allow - reasoning that functions in the exception monad either do - no throw an exception or never return normally. -\ - -definition "no_throw P A \ \ P \ A \ \_ _. True \,\ \_ _. False \" - -definition "no_return P A \ \ P \ A \\_ _. False\,\\_ _. True \" - -end diff --git a/lib/Monad_WP/NonDetMonadLemmas.thy b/lib/Monad_WP/NonDetMonadLemmas.thy deleted file mode 100644 index efa3ba084e..0000000000 --- a/lib/Monad_WP/NonDetMonadLemmas.thy +++ /dev/null @@ -1,344 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) - -theory NonDetMonadLemmas -imports NonDetMonad -begin - -section "General Lemmas Regarding the Nondeterministic State Monad" - -subsection "Congruence Rules for the Function Package" - -lemma bind_cong[fundef_cong]: - "\ f = f'; \v s s'. (v, s') \ fst (f' s) \ g v s' = g' v s' \ \ f >>= g = f' >>= g'" - apply (rule ext) - apply (auto simp: bind_def Let_def split_def intro: rev_image_eqI) - done - -lemma bind_apply_cong [fundef_cong]: - "\ f s = f' s'; \rv st. (rv, st) \ fst (f' s') \ g rv st = g' rv st \ - \ (f >>= g) s = (f' >>= g') s'" - apply (simp add: bind_def) - apply (auto simp: split_def intro: SUP_cong [OF refl] intro: rev_image_eqI) - done - -lemma bindE_cong[fundef_cong]: - "\ M = M' ; \v s s'. (Inr v, s') \ fst (M' s) \ N v s' = N' v s' \ \ bindE M N = bindE M' N'" - apply (simp add: bindE_def) - apply (rule bind_cong) - apply (rule refl) - apply (unfold lift_def) - apply (case_tac v, simp_all) - done - -lemma bindE_apply_cong[fundef_cong]: - "\ f s = f' s'; \rv st. (Inr rv, st) \ fst (f' s') \ g rv st = g' rv st \ - \ (f >>=E g) s = (f' >>=E g') s'" - apply (simp add: bindE_def) - apply (rule bind_apply_cong) - apply assumption - apply (case_tac rv, simp_all add: lift_def) - done - -lemma K_bind_apply_cong[fundef_cong]: - "\ f st = f' st' \ \ K_bind f arg st = K_bind f' arg' st'" - by simp - -lemma when_apply_cong[fundef_cong]: - "\ C = C'; s = s'; C' \ m s' = m' s' \ \ whenE C m s = whenE C' m' s'" - by (simp add: whenE_def) - -lemma unless_apply_cong[fundef_cong]: - "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unlessE C m s = unlessE C' m' s'" - by (simp add: unlessE_def) - -lemma whenE_apply_cong[fundef_cong]: - "\ C = C'; s = s'; C' \ m s' = m' s' \ \ whenE C m s = whenE C' m' s'" - by (simp add: whenE_def) - -lemma unlessE_apply_cong[fundef_cong]: - "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unlessE C m s = unlessE C' m' s'" - by (simp add: unlessE_def) - -subsection "Simplifying Monads" - -lemma nested_bind [simp]: - "do x <- do y <- f; return (g y) od; h x od = - do y <- f; h (g y) od" - apply (clarsimp simp add: bind_def) - apply (rule ext) - apply (clarsimp simp add: Let_def split_def return_def) - done - -lemma fail_bind [simp]: - "fail >>= f = fail" - by (simp add: bind_def fail_def) - -lemma fail_bindE [simp]: - "fail >>=E f = fail" - by (simp add: bindE_def bind_def fail_def) - -lemma assert_False [simp]: - "assert False >>= f = fail" - by (simp add: assert_def) - -lemma assert_True [simp]: - "assert True >>= f = f ()" - by (simp add: assert_def) - -lemma assertE_False [simp]: - "assertE False >>=E f = fail" - by (simp add: assertE_def) - -lemma assertE_True [simp]: - "assertE True >>=E f = f ()" - by (simp add: assertE_def) - -lemma when_False_bind [simp]: - "when False g >>= f = f ()" - by (rule ext) (simp add: when_def bind_def return_def) - -lemma when_True_bind [simp]: - "when True g >>= f = g >>= f" - by (simp add: when_def bind_def return_def) - -lemma whenE_False_bind [simp]: - "whenE False g >>=E f = f ()" - by (simp add: whenE_def bindE_def returnOk_def lift_def) - -lemma whenE_True_bind [simp]: - "whenE True g >>=E f = g >>=E f" - by (simp add: whenE_def bindE_def returnOk_def lift_def) - -lemma when_True [simp]: "when True X = X" - by (clarsimp simp: when_def) - -lemma when_False [simp]: "when False X = return ()" - by (clarsimp simp: when_def) - -lemma unless_False [simp]: "unless False X = X" - by (clarsimp simp: unless_def) - -lemma unlessE_False [simp]: "unlessE False f = f" - unfolding unlessE_def by fastforce - -lemma unless_True [simp]: "unless True X = return ()" - by (clarsimp simp: unless_def) - -lemma unlessE_True [simp]: "unlessE True f = returnOk ()" - unfolding unlessE_def by fastforce - -lemma unlessE_whenE: - "unlessE P = whenE (~P)" - by (rule ext)+ (simp add: unlessE_def whenE_def) - -lemma unless_when: - "unless P = when (~P)" - by (rule ext)+ (simp add: unless_def when_def) - -lemma gets_to_return [simp]: "gets (\s. v) = return v" - by (clarsimp simp: gets_def put_def get_def bind_def return_def) - -lemma assert_opt_Some: - "assert_opt (Some x) = return x" - by (simp add: assert_opt_def) - -lemma assertE_liftE: - "assertE P = liftE (assert P)" - by (simp add: assertE_def assert_def liftE_def returnOk_def) - -lemma liftE_handleE' [simp]: "((liftE a) b) = liftE a" - apply (clarsimp simp: liftE_def handleE'_def) - done - -lemma liftE_handleE [simp]: "((liftE a) b) = liftE a" - apply (unfold handleE_def) - apply simp - done - -lemma condition_split: - "P (condition C a b s) = ((((C s) \ P (a s)) \ (\ (C s) \ P (b s))))" - apply (clarsimp simp: condition_def) - done - -lemma condition_split_asm: - "P (condition C a b s) = (\ (C s \ \ P (a s) \ \ C s \ \ P (b s)))" - apply (clarsimp simp: condition_def) - done - -lemmas condition_splits = condition_split condition_split_asm - -lemma condition_true_triv [simp]: - "condition (\_. True) A B = A" - apply (rule ext) - apply (clarsimp split: condition_splits) - done - -lemma condition_false_triv [simp]: - "condition (\_. False) A B = B" - apply (rule ext) - apply (clarsimp split: condition_splits) - done - -lemma condition_true: "\ P s \ \ condition P A B s = A s" - apply (clarsimp simp: condition_def) - done - -lemma condition_false: "\ \ P s \ \ condition P A B s = B s" - apply (clarsimp simp: condition_def) - done - -lemmas arg_cong_bind = arg_cong2[where f=bind] -lemmas arg_cong_bind1 = arg_cong_bind[OF refl ext] - -section "Low-level monadic reasoning" - -lemma monad_eqI [intro]: - "\ \r t s. (r, t) \ fst (A s) \ (r, t) \ fst (B s); - \r t s. (r, t) \ fst (B s) \ (r, t) \ fst (A s); - \x. snd (A x) = snd (B x) \ - \ (A :: ('s, 'a) nondet_monad) = B" - apply (fastforce intro!: set_eqI prod_eqI) - done - -lemma monad_state_eqI [intro]: - "\ \r t. (r, t) \ fst (A s) \ (r, t) \ fst (B s'); - \r t. (r, t) \ fst (B s') \ (r, t) \ fst (A s); - snd (A s) = snd (B s') \ - \ (A :: ('s, 'a) nondet_monad) s = B s'" - apply (fastforce intro!: set_eqI prod_eqI) - done - -subsection "General whileLoop reasoning" - -definition - "whileLoop_terminatesE C B \ (\r. - whileLoop_terminates (\r s. case r of Inr v \ C v s | _ \ False) (lift B) (Inr r))" - -lemma whileLoop_cond_fail: - "\ \ C x s \ \ (whileLoop C B x s) = (return x s)" - apply (auto simp: return_def whileLoop_def - intro: whileLoop_results.intros - whileLoop_terminates.intros - elim!: whileLoop_results.cases) - done - -lemma whileLoopE_cond_fail: - "\ \ C x s \ \ (whileLoopE C B x s) = (returnOk x s)" - apply (clarsimp simp: whileLoopE_def returnOk_def) - apply (auto intro: whileLoop_cond_fail) - done - -lemma whileLoop_results_simps_no_move [simp]: - shows "((Some x, Some x) \ whileLoop_results C B) = (\ C (fst x) (snd x))" - (is "?LHS x = ?RHS x") -proof (rule iffI) - assume "?LHS x" - then have "(\a. Some x = Some a) \ ?RHS (the (Some x))" - by (induct rule: whileLoop_results.induct, auto) - thus "?RHS x" - by clarsimp -next - assume "?RHS x" - thus "?LHS x" - by (metis surjective_pairing whileLoop_results.intros(1)) -qed - -lemma whileLoop_unroll: - "(whileLoop C B r) = ((condition (C r) (B r >>= (whileLoop C B)) (return r)))" - (is "?LHS r = ?RHS r") -proof - - have cond_fail: "\r s. \ C r s \ ?LHS r s = ?RHS r s" - apply (subst whileLoop_cond_fail, simp) - apply (clarsimp simp: condition_def bind_def return_def) - done - - have cond_pass: "\r s. C r s \ whileLoop C B r s = (B r >>= (whileLoop C B)) s" - apply (rule monad_state_eqI) - apply (clarsimp simp: whileLoop_def bind_def split_def) - apply (subst (asm) whileLoop_results_simps_valid) - apply fastforce - apply (clarsimp simp: whileLoop_def bind_def split_def) - apply (subst whileLoop_results.simps) - apply fastforce - apply (clarsimp simp: whileLoop_def bind_def split_def) - apply (subst whileLoop_results.simps) - apply (subst whileLoop_terminates.simps) - apply fastforce - done - - show ?thesis - apply (rule ext) - apply (metis cond_fail cond_pass condition_def) - done -qed - -lemma whileLoop_unroll': - "(whileLoop C B r) = ((condition (C r) (B r) (return r)) >>= (whileLoop C B))" - apply (rule ext) - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def return_def split_def) - apply (subst whileLoop_cond_fail, simp) - apply (clarsimp simp: return_def) - done - -lemma whileLoopE_unroll: - "(whileLoopE C B r) = ((condition (C r) (B r >>=E (whileLoopE C B)) (returnOk r)))" - apply (rule ext) - apply (unfold whileLoopE_def) - apply (subst whileLoop_unroll) - apply (clarsimp simp: whileLoopE_def bindE_def returnOk_def split: condition_splits) - apply (clarsimp simp: lift_def) - apply (rule_tac f="\a. (B r >>= a) x" in arg_cong) - apply (rule ext)+ - apply (clarsimp simp: lift_def split: sum.splits) - apply (subst whileLoop_unroll) - apply (subst condition_false) - apply fastforce - apply (clarsimp simp: throwError_def) - done - -lemma whileLoopE_unroll': - "(whileLoopE C B r) = ((condition (C r) (B r) (returnOk r)) >>=E (whileLoopE C B))" - apply (rule ext) - apply (subst whileLoopE_unroll) - apply (clarsimp simp: condition_def bindE_def bind_def returnOk_def return_def lift_def split_def) - apply (subst whileLoopE_cond_fail, simp) - apply (clarsimp simp: returnOk_def return_def) - done - -(* These lemmas are useful to apply to rules to convert valid rules into - * a format suitable for wp. *) - -lemma valid_make_schematic_post: - "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \) \ - \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \" - by (auto simp add: valid_def no_fail_def split: prod.splits) - -lemma validNF_make_schematic_post: - "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \!) \ - \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \!" - by (auto simp add: valid_def validNF_def no_fail_def split: prod.splits) - -lemma validE_make_schematic_post: - "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \) \ - \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') - \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \" - by (auto simp add: validE_def valid_def no_fail_def split: prod.splits sum.splits) - -lemma validE_NF_make_schematic_post: - "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \!) \ - \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') - \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \!" - by (auto simp add: validE_NF_def validE_def valid_def no_fail_def split: prod.splits sum.splits) - -lemma validNF_conjD1: "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q \!" - by (fastforce simp: validNF_def valid_def no_fail_def) - -lemma validNF_conjD2: "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q' \!" - by (fastforce simp: validNF_def valid_def no_fail_def) - -end diff --git a/lib/Monad_WP/NonDetMonadVCG.thy b/lib/Monad_WP/NonDetMonadVCG.thy deleted file mode 100644 index 6c824bf738..0000000000 --- a/lib/Monad_WP/NonDetMonadVCG.thy +++ /dev/null @@ -1,2400 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) - -theory NonDetMonadVCG -imports - NonDetMonadLemmas - WPSimp - Strengthen -begin - -declare K_def [simp] - -section "Satisfiability" - -text \ - The dual to validity: an existential instead of a universal - quantifier for the post condition. In refinement, it is - often sufficient to know that there is one state that - satisfies a condition. -\ -definition - exs_valid :: "('a \ bool) \ ('a, 'b) nondet_monad \ - ('b \ 'a \ bool) \ bool" - ("\_\ _ \\_\") -where - "exs_valid P f Q \ (\s. P s \ (\(rv, s') \ fst (f s). Q rv s'))" - - -text \The above for the exception monad\ -definition - ex_exs_validE :: "('a \ bool) \ ('a, 'e + 'b) nondet_monad \ - ('b \ 'a \ bool) \ ('e \ 'a \ bool) \ bool" - ("\_\ _ \\_\, \_\") -where - "ex_exs_validE P f Q E \ - exs_valid P f (\rv. case rv of Inl e \ E e | Inr v \ Q v)" - - -section "Lemmas" - -subsection \Determinism\ - -lemma det_set_iff: - "det f \ (r \ fst (f s)) = (fst (f s) = {r})" - apply (simp add: det_def) - apply (rule iffI) - apply (erule_tac x=s in allE) - apply auto - done - -lemma return_det [iff]: - "det (return x)" - by (simp add: det_def return_def) - -lemma put_det [iff]: - "det (put s)" - by (simp add: det_def put_def) - -lemma get_det [iff]: - "det get" - by (simp add: det_def get_def) - -lemma det_gets [iff]: - "det (gets f)" - by (auto simp add: gets_def det_def get_def return_def bind_def) - -lemma det_UN: - "det f \ (\x \ fst (f s). g x) = (g (THE x. x \ fst (f s)))" - unfolding det_def - apply simp - apply (drule spec [of _ s]) - apply clarsimp - done - -lemma bind_detI [simp, intro!]: - "\ det f; \x. det (g x) \ \ det (f >>= g)" - apply (simp add: bind_def det_def split_def) - apply clarsimp - apply (erule_tac x=s in allE) - apply clarsimp - apply (erule_tac x="a" in allE) - apply (erule_tac x="b" in allE) - apply clarsimp - done - -lemma the_run_stateI: - "fst (M s) = {s'} \ the_run_state M s = s'" - by (simp add: the_run_state_def) - -lemma the_run_state_det: - "\ s' \ fst (M s); det M \ \ the_run_state M s = s'" - by (simp add: the_run_stateI det_set_iff) - -subsection "Lifting and Alternative Basic Definitions" - -lemma liftE_liftM: "liftE = liftM Inr" - apply (rule ext) - apply (simp add: liftE_def liftM_def) - done - -lemma liftME_liftM: "liftME f = liftM (case_sum Inl (Inr \ f))" - apply (rule ext) - apply (simp add: liftME_def liftM_def bindE_def returnOk_def lift_def) - apply (rule_tac f="bind x" in arg_cong) - apply (rule ext) - apply (case_tac xa) - apply (simp_all add: lift_def throwError_def) - done - -lemma liftE_bindE: - "(liftE a) >>=E b = a >>= b" - apply (simp add: liftE_def bindE_def lift_def bind_assoc) - done - -lemma liftM_id[simp]: "liftM id = id" - apply (rule ext) - apply (simp add: liftM_def) - done - -lemma liftM_bind: - "(liftM t f >>= g) = (f >>= (\x. g (t x)))" - by (simp add: liftM_def bind_assoc) - -lemma gets_bind_ign: "gets f >>= (\x. m) = m" - apply (rule ext) - apply (simp add: bind_def simpler_gets_def) - done - -lemma get_bind_apply: "(get >>= f) x = f x x" - by (simp add: get_def bind_def) - -lemma exec_gets: - "(gets f >>= m) s = m (f s) s" - by (simp add: simpler_gets_def bind_def) - -lemma exec_get: - "(get >>= m) s = m s s" - by (simp add: get_def bind_def) - -lemma bind_eqI: - "\ f = f'; \x. g x = g' x \ \ f >>= g = f' >>= g'" - apply (rule ext) - apply (simp add: bind_def) - apply (auto simp: split_def) - done - -subsection "Simplification Rules for Lifted And/Or" - -lemma pred_andE[elim!]: "\ (A and B) x; \ A x; B x \ \ R \ \ R" - by(simp add:pred_conj_def) - -lemma pred_andI[intro!]: "\ A x; B x \ \ (A and B) x" - by(simp add:pred_conj_def) - -lemma pred_conj_app[simp]: "(P and Q) x = (P x \ Q x)" - by(simp add:pred_conj_def) - -lemma bipred_andE[elim!]: "\ (A And B) x y; \ A x y; B x y \ \ R \ \ R" - by(simp add:bipred_conj_def) - -lemma bipred_andI[intro!]: "\ A x y; B x y \ \ (A And B) x y" - by (simp add:bipred_conj_def) - -lemma bipred_conj_app[simp]: "(P And Q) x = (P x and Q x)" - by(simp add:pred_conj_def bipred_conj_def) - -lemma pred_disjE[elim!]: "\ (P or Q) x; P x \ R; Q x \ R \ \ R" - by (fastforce simp: pred_disj_def) - -lemma pred_disjI1[intro]: "P x \ (P or Q) x" - by (simp add: pred_disj_def) - -lemma pred_disjI2[intro]: "Q x \ (P or Q) x" - by (simp add: pred_disj_def) - -lemma pred_disj_app[simp]: "(P or Q) x = (P x \ Q x)" - by auto - -lemma bipred_disjI1[intro]: "P x y \ (P Or Q) x y" - by (simp add: bipred_disj_def) - -lemma bipred_disjI2[intro]: "Q x y \ (P Or Q) x y" - by (simp add: bipred_disj_def) - -lemma bipred_disj_app[simp]: "(P Or Q) x = (P x or Q x)" - by(simp add:pred_disj_def bipred_disj_def) - -lemma pred_notnotD[simp]: "(not not P) = P" - by(simp add:pred_neg_def) - -lemma pred_and_true[simp]: "(P and \) = P" - by(simp add:pred_conj_def) - -lemma pred_and_true_var[simp]: "(\ and P) = P" - by(simp add:pred_conj_def) - -lemma pred_and_false[simp]: "(P and \) = \" - by(simp add:pred_conj_def) - -lemma pred_and_false_var[simp]: "(\ and P) = \" - by(simp add:pred_conj_def) - -lemma pred_conj_assoc: - "(P and Q and R) = (P and (Q and R))" - unfolding pred_conj_def by simp - -subsection "Hoare Logic Rules" - -lemma validE_def2: - "\P\ f \Q\,\R\ \ \s. P s \ (\(r,s') \ fst (f s). case r of Inr b \ Q b s' - | Inl a \ R a s')" - by (unfold valid_def validE_def) - -lemma seq': - "\ \A\ f \B\; - \x. P x \ \C\ g x \D\; - \x s. B x s \ P x \ C s \ \ - \A\ do x \ f; g x od \D\" - apply (clarsimp simp: valid_def bind_def) - apply fastforce - done - -lemma seq: - assumes f_valid: "\A\ f \B\" - assumes g_valid: "\x. P x \ \C\ g x \D\" - assumes bind: "\x s. B x s \ P x \ C s" - shows "\A\ do x \ f; g x od \D\" -apply (insert f_valid g_valid bind) -apply (blast intro: seq') -done - -lemma seq_ext': - "\ \A\ f \B\; - \x. \B x\ g x \C\ \ \ - \A\ do x \ f; g x od \C\" - by (fastforce simp: valid_def bind_def Let_def split_def) - -lemma seq_ext: - assumes f_valid: "\A\ f \B\" - assumes g_valid: "\x. \B x\ g x \C\" - shows "\A\ do x \ f; g x od \C\" - apply(insert f_valid g_valid) - apply(blast intro: seq_ext') -done - -lemma seqE': - "\ \A\ f \B\,\E\; - \x. \B x\ g x \C\,\E\ \ \ - \A\ doE x \ f; g x odE \C\,\E\" - apply(simp add:bindE_def lift_def bind_def Let_def split_def) - apply(clarsimp simp:validE_def2) - apply (fastforce simp add: throwError_def return_def lift_def - split: sum.splits) - done - -lemma seqE: - assumes f_valid: "\A\ f \B\,\E\" - assumes g_valid: "\x. \B x\ g x \C\,\E\" - shows "\A\ doE x \ f; g x odE \C\,\E\" - apply(insert f_valid g_valid) - apply(blast intro: seqE') - done - -lemma hoare_TrueI: "\P\ f \\_. \\" - by (simp add: valid_def) - -lemma hoareE_TrueI: "\P\ f \\_. \\, \\r. \\" - by (simp add: validE_def valid_def) - -lemma hoare_True_E_R [simp]: - "\P\ f \\r s. True\, -" - by (auto simp add: validE_R_def validE_def valid_def split: sum.splits) - -lemma hoare_post_conj [intro]: - "\ \ P \ a \ Q \; \ P \ a \ R \ \ \ \ P \ a \ Q And R \" - by (fastforce simp: valid_def split_def bipred_conj_def) - -lemma hoare_pre_disj [intro]: - "\ \ P \ a \ R \; \ Q \ a \ R \ \ \ \ P or Q \ a \ R \" - by (simp add:valid_def pred_disj_def) - -lemma hoare_conj: - "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \P and P'\ f \Q And Q'\" - unfolding valid_def by auto - -lemma hoare_post_taut: "\ P \ a \ \\ \" - by (simp add:valid_def) - -lemma wp_post_taut: "\\r. True\ f \\r s. True\" - by (rule hoare_post_taut) - -lemma wp_post_tautE: "\\r. True\ f \\r s. True\,\\f s. True\" -proof - - have P: "\r. (case r of Inl a \ True | _ \ True) = True" - by (case_tac r, simp_all) - show ?thesis - by (simp add: validE_def P wp_post_taut) -qed - -lemma hoare_pre_cont [simp]: "\ \ \ a \ P \" - by (simp add:valid_def) - - -subsection \Strongest Postcondition Rules\ - -lemma get_sp: - "\P\ get \\a s. s = a \ P s\" - by(simp add:get_def valid_def) - -lemma put_sp: - "\\\ put a \\_ s. s = a\" - by(simp add:put_def valid_def) - -lemma return_sp: - "\P\ return a \\b s. b = a \ P s\" - by(simp add:return_def valid_def) - -lemma assert_sp: - "\ P \ assert Q \ \r s. P s \ Q \" - by (simp add: assert_def fail_def return_def valid_def) - -lemma hoare_gets_sp: - "\P\ gets f \\rv s. rv = f s \ P s\" - by (simp add: valid_def simpler_gets_def) - -lemma hoare_return_drop_var [iff]: "\ Q \ return x \ \r. Q \" - by (simp add:valid_def return_def) - -lemma hoare_gets [intro]: "\ \s. P s \ Q (f s) s \ \ \ P \ gets f \ Q \" - by (simp add:valid_def gets_def get_def bind_def return_def) - -lemma hoare_modifyE_var: - "\ \s. P s \ Q (f s) \ \ \ P \ modify f \ \r s. Q s \" - by(simp add: valid_def modify_def put_def get_def bind_def) - -lemma hoare_if: - "\ P \ \ Q \ a \ R \; \ P \ \ Q \ b \ R \ \ \ - \ Q \ if P then a else b \ R \" - by (simp add:valid_def) - -lemma hoare_pre_subst: "\ A = B; \A\ a \C\ \ \ \B\ a \C\" - by(clarsimp simp:valid_def split_def) - -lemma hoare_post_subst: "\ B = C; \A\ a \B\ \ \ \A\ a \C\" - by(clarsimp simp:valid_def split_def) - -lemma hoare_pre_tautI: "\ \A and P\ a \B\; \A and not P\ a \B\ \ \ \A\ a \B\" - by(fastforce simp:valid_def split_def pred_conj_def pred_neg_def) - -lemma hoare_pre_imp: "\ \s. P s \ Q s; \Q\ a \R\ \ \ \P\ a \R\" - by (fastforce simp add:valid_def) - -lemma hoare_post_imp: "\ \r s. Q r s \ R r s; \P\ a \Q\ \ \ \P\ a \R\" - by(fastforce simp:valid_def split_def) - -lemma hoare_post_impErr': "\ \P\ a \Q\,\E\; - \r s. Q r s \ R r s; - \e s. E e s \ F e s \ \ - \P\ a \R\,\F\" - apply (simp add: validE_def) - apply (rule_tac Q="\r s. case r of Inl a \ E a s | Inr b \ Q b s" in hoare_post_imp) - apply (case_tac r) - apply simp_all - done - -lemma hoare_post_impErr: "\ \P\ a \Q\,\E\; - \r s. Q r s \ R r s; - \e s. E e s \ F e s \ \ - \P\ a \R\,\F\" - apply (blast intro: hoare_post_impErr') - done - -lemma hoare_validE_cases: - "\ \ P \ f \ Q \, \ \_ _. True \; \ P \ f \ \_ _. True \, \ R \ \ - \ \ P \ f \ Q \, \ R \" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r. R\,\\r. R\" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc2: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r. R\,\\r s. True\" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc2E: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r s. True\, \\r. R\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_imp_dc2E_actual: - "\\P\ a \\r. R\\ \ \P\ a \\r s. True\, \\r. R\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_imp_dc2_actual: - "\\P\ a \\r. R\\ \ \P\ a \\r. R\, \\r s. True\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_impE: "\ \r s. Q r s \ R r s; \P\ a \Q\ \ \ \P\ a \R\" - by (fastforce simp:valid_def split_def) - -lemma hoare_conjD1: - "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. Q rv\" - unfolding valid_def by auto - -lemma hoare_conjD2: - "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. R rv\" - unfolding valid_def by auto - -lemma hoare_post_disjI1: - "\P\ f \\rv. Q rv\ \ \P\ f \\rv. Q rv or R rv\" - unfolding valid_def by auto - -lemma hoare_post_disjI2: - "\P\ f \\rv. R rv\ \ \P\ f \\rv. Q rv or R rv\" - unfolding valid_def by auto - -lemma hoare_weaken_pre: - "\\Q\ a \R\; \s. P s \ Q s\ \ \P\ a \R\" - apply (rule hoare_pre_imp) - prefer 2 - apply assumption - apply blast - done - -lemma hoare_strengthen_post: - "\\P\ a \Q\; \r s. Q r s \ R r s\ \ \P\ a \R\" - apply (rule hoare_post_imp) - prefer 2 - apply assumption - apply blast - done - -lemma use_valid: "\(r, s') \ fst (f s); \P\ f \Q\; P s \ \ Q r s'" - apply (simp add: valid_def) - apply blast - done - -lemma use_validE_norm: "\ (Inr r', s') \ fst (B s); \ P \ B \ Q \,\ E \; P s \ \ Q r' s'" - apply (clarsimp simp: validE_def valid_def) - apply force - done - -lemma use_validE_except: "\ (Inl r', s') \ fst (B s); \ P \ B \ Q \,\ E \; P s \ \ E r' s'" - apply (clarsimp simp: validE_def valid_def) - apply force - done - -lemma in_inv_by_hoareD: - "\ \P. \P\ f \\_. P\; (x,s') \ fst (f s) \ \ s' = s" - by (auto simp add: valid_def) blast - -subsection "Satisfiability" - -lemma exs_hoare_post_imp: "\\r s. Q r s \ R r s; \P\ a \\Q\\ \ \P\ a \\R\" - apply (simp add: exs_valid_def) - apply safe - apply (erule_tac x=s in allE, simp) - apply blast - done - -lemma use_exs_valid: "\\P\ f \\Q\; P s \ \ \(r, s') \ fst (f s). Q r s'" - by (simp add: exs_valid_def) - -definition "exs_postcondition P f \ (\a b. \(rv, s)\ f a b. P rv s)" - -lemma exs_valid_is_triple: - "exs_valid P f Q = triple_judgement P f (exs_postcondition Q (\s f. fst (f s)))" - by (simp add: triple_judgement_def exs_postcondition_def exs_valid_def) - -lemmas [wp_trip] = exs_valid_is_triple - -lemma exs_valid_weaken_pre[wp_pre]: - "\ \ P' \ f \\ Q \; \s. P s \ P' s \ \ \ P \ f \\ Q \" - apply atomize - apply (clarsimp simp: exs_valid_def) - done - -lemma exs_valid_chain: - "\ \ P \ f \\ Q \; \s. R s \ P s; \r s. Q r s \ S r s \ \ \ R \ f \\ S \" - apply atomize - apply (fastforce simp: exs_valid_def Bex_def) - done - -lemma exs_valid_assume_pre: - "\ \s. P s \ \ P \ f \\ Q \ \ \ \ P \ f \\ Q \" - apply (fastforce simp: exs_valid_def) - done - -lemma exs_valid_bind [wp_split]: - "\ \x. \B x\ g x \\C\; \A\ f \\B\ \ \ \ A \ f >>= (\x. g x) \\ C \" - apply atomize - apply (clarsimp simp: exs_valid_def bind_def') - apply blast - done - -lemma exs_valid_return [wp]: - "\ Q v \ return v \\ Q \" - by (clarsimp simp: exs_valid_def return_def) - -lemma exs_valid_select [wp]: - "\ \s. \r \ S. Q r s \ select S \\ Q \" - by (clarsimp simp: exs_valid_def select_def) - -lemma exs_valid_get [wp]: - "\ \s. Q s s \ get \\ Q \" - by (clarsimp simp: exs_valid_def get_def) - -lemma exs_valid_gets [wp]: - "\ \s. Q (f s) s \ gets f \\ Q \" - by (clarsimp simp: gets_def) wp - -lemma exs_valid_put [wp]: - "\ Q v \ put v \\ Q \" - by (clarsimp simp: put_def exs_valid_def) - -lemma exs_valid_state_assert [wp]: - "\ \s. Q () s \ G s \ state_assert G \\ Q \" - by (clarsimp simp: state_assert_def exs_valid_def get_def - assert_def bind_def' return_def) - -lemmas exs_valid_guard = exs_valid_state_assert - -lemma exs_valid_fail [wp]: - "\ \_. False \ fail \\ Q \" - by (clarsimp simp: fail_def exs_valid_def) - -lemma exs_valid_condition [wp]: - "\ \ P \ L \\ Q \; \ P' \ R \\ Q \ \ \ - \ \s. (C s \ P s) \ (\ C s \ P' s) \ condition C L R \\ Q \" - by (clarsimp simp: condition_def exs_valid_def split: sum.splits) - - -subsection MISC - -lemma hoare_return_simp: - "\P\ return x \Q\ = (\s. P s \ Q x s)" - by (simp add: valid_def return_def) - -lemma hoare_gen_asm: - "(P \ \P'\ f \Q\) \ \P' and K P\ f \Q\" - by (fastforce simp add: valid_def) - -lemma hoare_gen_asm_lk: - "(P \ \P'\ f \Q\) \ \K P and P'\ f \Q\" - by (fastforce simp add: valid_def) - -\ \Useful for forward reasoning, when P is known. - The first version allows weakening the precondition.\ -lemma hoare_gen_asm_spec': - "(\s. P s \ S \ R s) - \ (S \ \R\ f \Q\) - \ \P\ f \Q\" - by (fastforce simp: valid_def) - -lemma hoare_gen_asm_spec: - "(\s. P s \ S) - \ (S \ \P\ f \Q\) - \ \P\ f \Q\" - by (rule hoare_gen_asm_spec'[where S=S and R=P]) simp - -lemma hoare_conjI: - "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s\" - unfolding valid_def by blast - -lemma hoare_disjI1: - "\ \P\ f \Q\ \ \ \P\ f \\r s. Q r s \ R r s \" - unfolding valid_def by blast - -lemma hoare_disjI2: - "\ \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s \" - unfolding valid_def by blast - -lemma hoare_assume_pre: - "(\s. P s \ \P\ f \Q\) \ \P\ f \Q\" - by (auto simp: valid_def) - -lemma hoare_returnOk_sp: - "\P\ returnOk x \\r s. r = x \ P s\, \Q\" - by (simp add: valid_def validE_def returnOk_def return_def) - -lemma hoare_assume_preE: - "(\s. P s \ \P\ f \Q\,\R\) \ \P\ f \Q\,\R\" - by (auto simp: valid_def validE_def) - -lemma hoare_allI: - "(\x. \P\f\Q x\) \ \P\f\\r s. \x. Q x r s\" - by (simp add: valid_def) blast - -lemma validE_allI: - "(\x. \P\ f \\r s. Q x r s\,\E\) \ \P\ f \\r s. \x. Q x r s\,\E\" - by (fastforce simp: valid_def validE_def split: sum.splits) - -lemma hoare_exI: - "\P\ f \Q x\ \ \P\ f \\r s. \x. Q x r s\" - by (simp add: valid_def) blast - -lemma hoare_impI: - "(R \ \P\f\Q\) \ \P\f\\r s. R \ Q r s\" - by (simp add: valid_def) blast - -lemma validE_impI: - " \\E. \P\ f \\_ _. True\,\E\; (P' \ \P\ f \Q\,\E\)\ \ - \P\ f \\r s. P' \ Q r s\, \E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_case_option_wp: - "\ \P\ f None \Q\; - \x. \P' x\ f (Some x) \Q' x\ \ - \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\" - by (cases v) auto - -subsection "Reasoning directly about states" - -lemma in_throwError: - "((v, s') \ fst (throwError e s)) = (v = Inl e \ s' = s)" - by (simp add: throwError_def return_def) - -lemma in_returnOk: - "((v', s') \ fst (returnOk v s)) = (v' = Inr v \ s' = s)" - by (simp add: returnOk_def return_def) - -lemma in_bind: - "((r,s') \ fst ((do x \ f; g x od) s)) = - (\s'' x. (x, s'') \ fst (f s) \ (r, s') \ fst (g x s''))" - apply (simp add: bind_def split_def) - apply force - done - -lemma in_bindE_R: - "((Inr r,s') \ fst ((doE x \ f; g x odE) s)) = - (\s'' x. (Inr x, s'') \ fst (f s) \ (Inr r, s') \ fst (g x s''))" - apply (simp add: bindE_def lift_def split_def bind_def) - apply (clarsimp simp: throwError_def return_def lift_def split: sum.splits) - apply safe - apply (case_tac a) - apply fastforce - apply fastforce - apply force - done - -lemma in_bindE_L: - "((Inl r, s') \ fst ((doE x \ f; g x odE) s)) \ - (\s'' x. (Inr x, s'') \ fst (f s) \ (Inl r, s') \ fst (g x s'')) \ ((Inl r, s') \ fst (f s))" - apply (simp add: bindE_def lift_def bind_def) - apply safe - apply (simp add: return_def throwError_def lift_def split_def split: sum.splits if_split_asm) - apply force - done - -lemma in_liftE: - "((v, s') \ fst (liftE f s)) = (\v'. v = Inr v' \ (v', s') \ fst (f s))" - by (force simp add: liftE_def bind_def return_def split_def) - -lemma in_whenE: "((v, s') \ fst (whenE P f s)) = ((P \ (v, s') \ fst (f s)) \ - (\P \ v = Inr () \ s' = s))" - by (simp add: whenE_def in_returnOk) - -lemma inl_whenE: - "((Inl x, s') \ fst (whenE P f s)) = (P \ (Inl x, s') \ fst (f s))" - by (auto simp add: in_whenE) - -lemma inr_in_unlessE_throwError[termination_simp]: - "(Inr (), s') \ fst (unlessE P (throwError E) s) = (P \ s'=s)" - by (simp add: unlessE_def returnOk_def throwError_def return_def) - -lemma in_fail: - "r \ fst (fail s) = False" - by (simp add: fail_def) - -lemma in_return: - "(r, s') \ fst (return v s) = (r = v \ s' = s)" - by (simp add: return_def) - -lemma in_assert: - "(r, s') \ fst (assert P s) = (P \ s' = s)" - by (simp add: assert_def return_def fail_def) - -lemma in_assertE: - "(r, s') \ fst (assertE P s) = (P \ r = Inr () \ s' = s)" - by (simp add: assertE_def returnOk_def return_def fail_def) - -lemma in_assert_opt: - "(r, s') \ fst (assert_opt v s) = (v = Some r \ s' = s)" - by (auto simp: assert_opt_def in_fail in_return split: option.splits) - -lemma in_get: - "(r, s') \ fst (get s) = (r = s \ s' = s)" - by (simp add: get_def) - -lemma in_gets: - "(r, s') \ fst (gets f s) = (r = f s \ s' = s)" - by (simp add: simpler_gets_def) - -lemma in_put: - "(r, s') \ fst (put x s) = (s' = x \ r = ())" - by (simp add: put_def) - -lemma in_when: - "(v, s') \ fst (when P f s) = ((P \ (v, s') \ fst (f s)) \ (\P \ v = () \ s' = s))" - by (simp add: when_def in_return) - -lemma in_modify: - "(v, s') \ fst (modify f s) = (s'=f s \ v = ())" - by (simp add: modify_def bind_def get_def put_def) - -lemma gets_the_in_monad: - "((v, s') \ fst (gets_the f s)) = (s' = s \ f s = Some v)" - by (auto simp: gets_the_def in_bind in_gets in_assert_opt split: option.split) - -lemma in_alternative: - "(r,s') \ fst ((f \ g) s) = ((r,s') \ fst (f s) \ (r,s') \ fst (g s))" - by (simp add: alternative_def) - -lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L - in_bindE_R in_returnOk in_throwError in_fail - in_assertE in_assert in_return in_assert_opt - in_get in_gets in_put in_when unlessE_whenE - unless_when in_modify gets_the_in_monad - in_alternative - -subsection "Non-Failure" - -lemma no_failD: - "\ no_fail P m; P s \ \ \(snd (m s))" - by (simp add: no_fail_def) - -lemma non_fail_modify [wp,simp]: - "no_fail \ (modify f)" - by (simp add: no_fail_def modify_def get_def put_def bind_def) - -lemma non_fail_gets_simp[simp]: - "no_fail P (gets f)" - unfolding no_fail_def gets_def get_def return_def bind_def - by simp - -lemma non_fail_gets: - "no_fail \ (gets f)" - by simp - -lemma non_fail_select [simp]: - "no_fail \ (select S)" - by (simp add: no_fail_def select_def) - -lemma no_fail_pre: - "\ no_fail P f; \s. Q s \ P s\ \ no_fail Q f" - by (simp add: no_fail_def) - -lemma no_fail_alt [wp]: - "\ no_fail P f; no_fail Q g \ \ no_fail (P and Q) (f \ g)" - by (simp add: no_fail_def alternative_def) - -lemma no_fail_return [simp, wp]: - "no_fail \ (return x)" - by (simp add: return_def no_fail_def) - -lemma no_fail_get [simp, wp]: - "no_fail \ get" - by (simp add: get_def no_fail_def) - -lemma no_fail_put [simp, wp]: - "no_fail \ (put s)" - by (simp add: put_def no_fail_def) - -lemma no_fail_when [wp]: - "(P \ no_fail Q f) \ no_fail (if P then Q else \) (when P f)" - by (simp add: when_def) - -lemma no_fail_unless [wp]: - "(\P \ no_fail Q f) \ no_fail (if P then \ else Q) (unless P f)" - by (simp add: unless_def when_def) - -lemma no_fail_fail [simp, wp]: - "no_fail \ fail" - by (simp add: fail_def no_fail_def) - -lemmas [wp] = non_fail_gets - -lemma no_fail_assert [simp, wp]: - "no_fail (\_. P) (assert P)" - by (simp add: assert_def) - -lemma no_fail_assert_opt [simp, wp]: - "no_fail (\_. P \ None) (assert_opt P)" - by (simp add: assert_opt_def split: option.splits) - -lemma no_fail_case_option [wp]: - assumes f: "no_fail P f" - assumes g: "\x. no_fail (Q x) (g x)" - shows "no_fail (if x = None then P else Q (the x)) (case_option f g x)" - by (clarsimp simp add: f g) - -lemma no_fail_if [wp]: - "\ P \ no_fail Q f; \P \ no_fail R g \ \ - no_fail (if P then Q else R) (if P then f else g)" - by simp - -lemma no_fail_apply [wp]: - "no_fail P (f (g x)) \ no_fail P (f $ g x)" - by simp - -lemma no_fail_undefined [simp, wp]: - "no_fail \ undefined" - by (simp add: no_fail_def) - -lemma no_fail_returnOK [simp, wp]: - "no_fail \ (returnOk x)" - by (simp add: returnOk_def) - -lemma no_fail_bind [wp]: - assumes f: "no_fail P f" - assumes g: "\rv. no_fail (R rv) (g rv)" - assumes v: "\Q\ f \R\" - shows "no_fail (P and Q) (f >>= (\rv. g rv))" - apply (clarsimp simp: no_fail_def bind_def) - apply (rule conjI) - prefer 2 - apply (erule no_failD [OF f]) - apply clarsimp - apply (drule (1) use_valid [OF _ v]) - apply (drule no_failD [OF g]) - apply simp - done - -text \Empty results implies non-failure\ - -lemma empty_fail_modify [simp, wp]: - "empty_fail (modify f)" - by (simp add: empty_fail_def simpler_modify_def) - -lemma empty_fail_gets [simp, wp]: - "empty_fail (gets f)" - by (simp add: empty_fail_def simpler_gets_def) - -lemma empty_failD: - "\ empty_fail m; fst (m s) = {} \ \ snd (m s)" - by (simp add: empty_fail_def) - -lemma empty_fail_select_f [simp]: - assumes ef: "fst S = {} \ snd S" - shows "empty_fail (select_f S)" - by (fastforce simp add: empty_fail_def select_f_def intro: ef) - -lemma empty_fail_bind [simp]: - "\ empty_fail a; \x. empty_fail (b x) \ \ empty_fail (a >>= b)" - apply (simp add: bind_def empty_fail_def split_def) - apply clarsimp - apply (case_tac "fst (a s) = {}") - apply blast - apply (clarsimp simp: ex_in_conv [symmetric]) - done - -lemma empty_fail_return [simp, wp]: - "empty_fail (return x)" - by (simp add: empty_fail_def return_def) - -lemma empty_fail_mapM [simp]: - assumes m: "\x. empty_fail (m x)" - shows "empty_fail (mapM m xs)" -proof (induct xs) - case Nil - thus ?case by (simp add: mapM_def sequence_def) -next - case Cons - have P: "\m x xs. mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" - by (simp add: mapM_def sequence_def Let_def) - from Cons - show ?case by (simp add: P m) -qed - -lemma empty_fail [simp]: - "empty_fail fail" - by (simp add: fail_def empty_fail_def) - -lemma empty_fail_assert_opt [simp]: - "empty_fail (assert_opt x)" - by (simp add: assert_opt_def split: option.splits) - -lemma empty_fail_mk_ef: - "empty_fail (mk_ef o m)" - by (simp add: empty_fail_def mk_ef_def) - -lemma empty_fail_gets_map[simp]: - "empty_fail (gets_map f p)" - unfolding gets_map_def by simp - -subsection "Failure" - -lemma fail_wp: "\\x. True\ fail \Q\" - by (simp add: valid_def fail_def) - -lemma failE_wp: "\\x. True\ fail \Q\,\E\" - by (simp add: validE_def fail_wp) - -lemma fail_update [iff]: - "fail (f s) = fail s" - by (simp add: fail_def) - - -text \We can prove postconditions using hoare triples\ - -lemma post_by_hoare: "\ \P\ f \Q\; P s; (r, s') \ fst (f s) \ \ Q r s'" - apply (simp add: valid_def) - apply blast - done - -text \Weakest Precondition Rules\ - -lemma hoare_vcg_prop: - "\\s. P\ f \\rv s. P\" - by (simp add: valid_def) - -lemma return_wp: - "\P x\ return x \P\" - by(simp add:valid_def return_def) - -lemma get_wp: - "\\s. P s s\ get \P\" - by(simp add:valid_def split_def get_def) - -lemma gets_wp: - "\\s. P (f s) s\ gets f \P\" - by(simp add:valid_def split_def gets_def return_def get_def bind_def) - -lemma modify_wp: - "\\s. P () (f s)\ modify f \P\" - by(simp add:valid_def split_def modify_def get_def put_def bind_def) - -lemma put_wp: - "\\s. P () x\ put x \P\" - by(simp add:valid_def put_def) - -lemma returnOk_wp: - "\P x\ returnOk x \P\,\E\" - by(simp add:validE_def2 returnOk_def return_def) - -lemma throwError_wp: - "\E e\ throwError e \P\,\E\" - by(simp add:validE_def2 throwError_def return_def) - -lemma returnOKE_R_wp : "\P x\ returnOk x \P\, -" - by (simp add: validE_R_def validE_def valid_def returnOk_def return_def) - -lemma liftE_wp: - "\P\ f \Q\ \ \P\ liftE f \Q\,\E\" - by(clarsimp simp:valid_def validE_def2 liftE_def split_def Let_def bind_def return_def) - -lemma catch_wp: - "\ \x. \E x\ handler x \Q\; \P\ f \Q\,\E\ \ \ - \P\ catch f handler \Q\" - apply (unfold catch_def valid_def validE_def return_def) - apply (fastforce simp: bind_def split: sum.splits) - done - -lemma handleE'_wp: - "\ \x. \F x\ handler x \Q\,\E\; \P\ f \Q\,\F\ \ \ - \P\ f handler \Q\,\E\" - apply (unfold handleE'_def valid_def validE_def return_def) - apply (fastforce simp: bind_def split: sum.splits) - done - -lemma handleE_wp: - assumes x: "\x. \F x\ handler x \Q\,\E\" - assumes y: "\P\ f \Q\,\F\" - shows "\P\ f handler \Q\,\E\" - by (simp add: handleE_def handleE'_wp [OF x y]) - -lemma hoare_vcg_if_split: - "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ - \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" - by simp - -lemma hoare_vcg_if_splitE: - "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ - \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" - by simp - -lemma hoare_liftM_subst: "\P\ liftM f m \Q\ = \P\ m \Q \ f\" - apply (simp add: liftM_def bind_def return_def split_def) - apply (simp add: valid_def Ball_def) - apply (rule_tac f=All in arg_cong) - apply (rule ext) - apply fastforce - done - -lemma liftE_validE[simp]: "\P\ liftE f \Q\,\E\ = \P\ f \Q\" - apply (simp add: liftE_liftM validE_def hoare_liftM_subst o_def) - done - -lemma liftM_wp: "\P\ m \Q \ f\ \ \P\ liftM f m \Q\" - by (simp add: hoare_liftM_subst) - -lemma hoare_liftME_subst: "\P\ liftME f m \Q\,\E\ = \P\ m \Q \ f\,\E\" - apply (simp add: validE_def liftME_liftM hoare_liftM_subst o_def) - apply (rule_tac f="valid P m" in arg_cong) - apply (rule ext)+ - apply (case_tac x, simp_all) - done - -lemma liftME_wp: "\P\ m \Q \ f\,\E\ \ \P\ liftME f m \Q\,\E\" - by (simp add: hoare_liftME_subst) - -(* FIXME: Move *) -lemma o_const_simp[simp]: "(\x. C) \ f = (\x. C)" - by (simp add: o_def) - -lemma hoare_vcg_split_case_option: - "\ \x. x = None \ \P x\ f x \R x\; - \x y. x = Some y \ \Q x y\ g x y \R x\ \ \ - \\s. (x = None \ P x s) \ - (\y. x = Some y \ Q x y s)\ - case x of None \ f x - | Some y \ g x y - \R x\" - apply(simp add:valid_def split_def) - apply(case_tac x, simp_all) -done - -lemma hoare_vcg_split_case_optionE: - assumes none_case: "\x. x = None \ \P x\ f x \R x\,\E x\" - assumes some_case: "\x y. x = Some y \ \Q x y\ g x y \R x\,\E x\" - shows "\\s. (x = None \ P x s) \ - (\y. x = Some y \ Q x y s)\ - case x of None \ f x - | Some y \ g x y - \R x\,\E x\" - apply(case_tac x, simp_all) - apply(rule none_case, simp) - apply(rule some_case, simp) -done - -lemma hoare_vcg_split_case_sum: - "\ \x a. x = Inl a \ \P x a\ f x a \R x\; - \x b. x = Inr b \ \Q x b\ g x b \R x\ \ \ - \\s. (\a. x = Inl a \ P x a s) \ - (\b. x = Inr b \ Q x b s) \ - case x of Inl a \ f x a - | Inr b \ g x b - \R x\" - apply(simp add:valid_def split_def) - apply(case_tac x, simp_all) -done - -lemma hoare_vcg_split_case_sumE: - assumes left_case: "\x a. x = Inl a \ \P x a\ f x a \R x\" - assumes right_case: "\x b. x = Inr b \ \Q x b\ g x b \R x\" - shows "\\s. (\a. x = Inl a \ P x a s) \ - (\b. x = Inr b \ Q x b s) \ - case x of Inl a \ f x a - | Inr b \ g x b - \R x\" - apply(case_tac x, simp_all) - apply(rule left_case, simp) - apply(rule right_case, simp) -done - -lemma hoare_vcg_precond_imp: - "\ \Q\ f \R\; \s. P s \ Q s \ \ \P\ f \R\" - by (fastforce simp add:valid_def) - -lemma hoare_vcg_precond_impE: - "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" - by (fastforce simp add:validE_def2) - -lemma hoare_seq_ext: - assumes g_valid: "\x. \B x\ g x \C\" - assumes f_valid: "\A\ f \B\" - shows "\A\ do x \ f; g x od \C\" - apply(insert f_valid g_valid) - apply(blast intro: seq_ext') -done - -lemma hoare_vcg_seqE: - assumes g_valid: "\x. \B x\ g x \C\,\E\" - assumes f_valid: "\A\ f \B\,\E\" - shows "\A\ doE x \ f; g x odE \C\,\E\" - apply(insert f_valid g_valid) - apply(blast intro: seqE') -done - -lemma hoare_seq_ext_nobind: - "\ \B\ g \C\; - \A\ f \\_. B\ \ \ - \A\ do f; g od \C\" - apply (clarsimp simp: valid_def bind_def Let_def split_def) - apply fastforce -done - -lemma hoare_seq_ext_nobindE: - "\ \B\ g \C\,\E\; - \A\ f \\_. B\,\E\ \ \ - \A\ doE f; g odE \C\,\E\" - apply (clarsimp simp:validE_def) - apply (simp add:bindE_def Let_def split_def bind_def lift_def) - apply (fastforce simp add: valid_def throwError_def return_def lift_def - split: sum.splits) - done - -lemmas hoare_seq_ext_skip' - = hoare_seq_ext[where B=C and C=C for C] - -lemma validE_eq_valid: - "\P\ f \\rv. Q\,\\rv. Q\ = \P\ f \\rv. Q\" - by (simp add: validE_def) - -\ \For forward reasoning in Hoare proofs, these lemmas allow us to step over the - left-hand-side of monadic bind, while keeping the same precondition.\ - -named_theorems forward_inv_step_rules - -lemmas hoare_forward_inv_step_nobind[forward_inv_step_rules] = - hoare_seq_ext_nobind[where B=A and A=A for A, rotated] - -lemmas hoare_seq_ext_skip[forward_inv_step_rules] = - hoare_seq_ext[where B="\_. A" and A=A for A, rotated] - -lemmas hoare_forward_inv_step_nobindE_valid[forward_inv_step_rules] = - hoare_seq_ext_nobindE[where B=A and A=A and E="\_. C" and C="\_. C" for A C, - simplified validE_eq_valid, rotated] - -lemmas hoare_forward_inv_step_valid[forward_inv_step_rules] = - hoare_vcg_seqE[where B="\_. A" and A=A and E="\_. C" and C="\_. C" for A C, - simplified validE_eq_valid, rotated] - -lemmas hoare_forward_inv_step_nobindE[forward_inv_step_rules] = - hoare_seq_ext_nobindE[where B=A and A=A for A, rotated] - -lemmas hoare_seq_ext_skipE[forward_inv_step_rules] = - hoare_vcg_seqE[where B="\_. A" and A=A for A, rotated] - -lemmas hoare_forward_inv_step_nobindE_validE_E[forward_inv_step_rules] = - hoare_forward_inv_step_nobindE[where C="\\", simplified validE_E_def[symmetric]] - -lemmas hoare_forward_inv_step_validE_E[forward_inv_step_rules] = - hoare_seq_ext_skipE[where C="\\", simplified validE_E_def[symmetric]] - -lemmas hoare_forward_inv_step_nobindE_validE_R[forward_inv_step_rules] = - hoare_forward_inv_step_nobindE[where E="\\", simplified validE_R_def[symmetric]] - -lemmas hoare_forward_inv_step_validE_R[forward_inv_step_rules] = - hoare_seq_ext_skipE[where E="\\", simplified validE_R_def[symmetric]] - -method forward_inv_step uses wp simp = - rule forward_inv_step_rules, solves \wpsimp wp: wp simp: simp\ - - -lemma hoare_chain: - "\ \P\ f \Q\; - \s. R s \ P s; - \r s. Q r s \ S r s \ \ - \R\ f \S\" - by(fastforce simp add:valid_def split_def) - -lemma validE_weaken: - "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \r s. Q' r s \ Q r s; \r s. E' r s \ E r s \ \ \P\ A \Q\,\E\" - by (fastforce simp: validE_def2 split: sum.splits) - -lemmas hoare_chainE = validE_weaken - -lemma hoare_vcg_handle_elseE: - "\ \P\ f \Q\,\E\; - \e. \E e\ g e \R\,\F\; - \x. \Q x\ h x \R\,\F\ \ \ - \P\ f g h \R\,\F\" - apply (simp add: handle_elseE_def validE_def) - apply (rule seq_ext) - apply assumption - apply (case_tac x, simp_all) - done - -lemma alternative_valid: - assumes x: "\P\ f \Q\" - assumes y: "\P\ f' \Q\" - shows "\P\ f \ f' \Q\" - apply (simp add: valid_def alternative_def) - apply safe - apply (simp add: post_by_hoare [OF x]) - apply (simp add: post_by_hoare [OF y]) - done - -lemma alternative_wp: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f' \Q\" - shows "\P and P'\ f \ f' \Q\" - apply (rule alternative_valid) - apply (rule hoare_pre_imp [OF _ x], simp) - apply (rule hoare_pre_imp [OF _ y], simp) - done - -lemma alternativeE_wp: - assumes x: "\P\ f \Q\,\E\" and y: "\P'\ f' \Q\,\E\" - shows "\P and P'\ f \ f' \Q\,\E\" - apply (unfold validE_def) - apply (wp add: x y alternative_wp | simp | fold validE_def)+ - done - -lemma alternativeE_R_wp: - "\ \P\ f \Q\,-; \P'\ f' \Q\,- \ \ \P and P'\ f \ f' \Q\,-" - apply (simp add: validE_R_def) - apply (rule alternativeE_wp) - apply assumption+ - done - -lemma alternative_R_wp: - "\ \P\ f -,\Q\; \P'\ g -,\Q\ \ \ \P and P'\ f \ g -, \Q\" - by (fastforce simp: alternative_def validE_E_def validE_def valid_def) - -lemma select_wp: "\\s. \x \ S. Q x s\ select S \Q\" - by (simp add: select_def valid_def) - -lemma select_f_wp: - "\\s. \x\fst S. Q x s\ select_f S \Q\" - by (simp add: select_f_def valid_def) - -lemma state_select_wp [wp]: "\ \s. \t. (s, t) \ f \ P () t \ state_select f \ P \" - apply (clarsimp simp: state_select_def) - apply (clarsimp simp: valid_def) - done - -lemma condition_wp [wp]: - "\ \ Q \ A \ P \; \ R \ B \ P \ \ \ \ \s. if C s then Q s else R s \ condition C A B \ P \" - apply (clarsimp simp: condition_def) - apply (clarsimp simp: valid_def pred_conj_def pred_neg_def split_def) - done - -lemma conditionE_wp [wp]: - "\ \ P \ A \ Q \,\ R \; \ P' \ B \ Q \,\ R \ \ \ \ \s. if C s then P s else P' s \ condition C A B \Q\,\R\" - apply (clarsimp simp: condition_def) - apply (clarsimp simp: validE_def valid_def) - done - -lemma state_assert_wp [wp]: "\ \s. f s \ P () s \ state_assert f \ P \" - apply (clarsimp simp: state_assert_def get_def - assert_def bind_def valid_def return_def fail_def) - done - -text \The weakest precondition handler which works on conjunction\ - -lemma hoare_vcg_conj_lift: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f \Q'\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - apply (subst bipred_conj_def[symmetric], rule hoare_post_conj) - apply (rule hoare_pre_imp [OF _ x], simp) - apply (rule hoare_pre_imp [OF _ y], simp) - done - -lemma hoare_vcg_conj_liftE1: - "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ - \P and P'\ f \\r s. Q r s \ Q' r s\,\E\" - unfolding valid_def validE_R_def validE_def - apply (clarsimp simp: split_def split: sum.splits) - apply (erule allE, erule (1) impE) - apply (erule allE, erule (1) impE) - apply (drule (1) bspec) - apply (drule (1) bspec) - apply clarsimp - done - -lemma hoare_vcg_disj_lift: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f \Q'\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - apply (simp add: valid_def) - apply safe - apply (erule(1) post_by_hoare [OF x]) - apply (erule notE) - apply (erule(1) post_by_hoare [OF y]) - done - -lemma hoare_vcg_const_Ball_lift: - "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_const_Ball_lift_R: - "\ \x. x \ S \ \P x\ f \Q x\,- \ \ - \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" - apply (simp add: validE_R_def validE_def) - apply (rule hoare_strengthen_post) - apply (erule hoare_vcg_const_Ball_lift) - apply (simp split: sum.splits) - done - -lemma hoare_vcg_all_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_all_lift_R: - "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" - by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) - - -lemma hoare_vcg_imp_lift: - "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" - apply (simp only: imp_conv_disj) - apply (erule(1) hoare_vcg_disj_lift) - done - -lemma hoare_vcg_imp_lift': - "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" - apply (simp only: imp_conv_disj) - apply simp - apply (erule (1) hoare_vcg_imp_lift) - done - -lemma hoare_vcg_imp_conj_lift[wp_comb]: - "\P\ f \\rv s. Q rv s \ Q' rv s\ \ \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ - \ \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" - by (auto simp: valid_def) - -lemmas hoare_vcg_imp_conj_lift'[wp_unsafe] = hoare_vcg_imp_conj_lift[where Q'''="\\", simplified] - -lemma hoare_absorb_imp: - "\ P \ f \\rv s. Q rv s \ R rv s \ \ \ P \ f \\rv s. Q rv s \ R rv s \" - by (erule hoare_post_imp[rotated], blast) - -lemma hoare_weaken_imp: - "\ \rv s. Q rv s \ Q' rv s ; \P\ f \\rv s. Q' rv s \ R rv s\ \ - \ \P\ f \\rv s. Q rv s \ R rv s\" - by (clarsimp simp: NonDetMonad.valid_def split_def) - -lemma hoare_vcg_const_imp_lift: - "\ P \ \Q\ m \R\ \ \ - \\s. P \ Q s\ m \\rv s. P \ R rv s\" - by (cases P, simp_all add: hoare_vcg_prop) - -lemma hoare_vcg_const_imp_lift_R: - "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" - by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) - -lemma hoare_weak_lift_imp: - "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" - by (auto simp add: valid_def split_def) - -lemma hoare_vcg_weaken_imp: - "\ \rv s. Q rv s \ Q' rv s ; \ P \ f \\rv s. Q' rv s \ R rv s\ \ - \ \ P \ f \\rv s. Q rv s \ R rv s\" - by (clarsimp simp: valid_def split_def) - -lemma hoare_vcg_ex_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (clarsimp simp: valid_def, blast) - -lemma hoare_vcg_ex_lift_R1: - "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" - by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) - -lemma hoare_liftP_ext: - assumes "\P x. m \\s. P (f s x)\" - shows "m \\s. P (f s)\" - unfolding valid_def - apply clarsimp - apply (erule rsubst[where P=P]) - apply (rule ext) - apply (drule use_valid, rule assms, rule refl) - apply simp - done - -(* for instantiations *) -lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . -lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . -lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . -lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . - -lemma hoare_weaken_preE_E: - "\ \P'\ f -,\Q\; \s. P s \ P' s \ \ \P\ f -,\Q\" - by (fastforce simp add: validE_E_def validE_def valid_def) - -lemma hoare_vcg_E_conj: - "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ - \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" - apply (unfold validE_def validE_E_def) - apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all) - apply (case_tac r, simp_all) - done - -lemma hoare_vcg_E_elim: - "\ \P\ f -,\E\; \P'\ f \Q\,- \ - \ \\s. P s \ P' s\ f \Q\,\E\" - by (rule hoare_post_impErr [OF hoare_vcg_E_conj], - (simp add: validE_R_def)+) - -lemma hoare_vcg_R_conj: - "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ - \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" - apply (unfold validE_R_def validE_def) - apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all) - apply (case_tac r, simp_all) - done - -lemma valid_validE: - "\P\ f \\rv. Q\ \ \P\ f \\rv. Q\,\\rv. Q\" - apply (simp add: validE_def) - done - -lemma valid_validE2: - "\ \P\ f \\_. Q'\; \s. Q' s \ Q s; \s. Q' s \ E s \ \ \P\ f \\_. Q\,\\_. E\" - unfolding valid_def validE_def - by (clarsimp split: sum.splits) blast - -lemma validE_valid: "\P\ f \\rv. Q\,\\rv. Q\ \ \P\ f \\rv. Q\" - apply (unfold validE_def) - apply (rule hoare_post_imp) - defer - apply assumption - apply (case_tac r, simp_all) - done - -lemma hoare_lift_Pf_E_R: - assumes P: "\x. \P x\ m \\_. P x\, -" - assumes f: "\P. \\s. P (f s)\ m \\_ s. P (f s)\, -" - shows "\\s. P (f s) s\ m \\_ s. P (f s) s\, -" - using P f - apply (clarsimp simp: validE_R_def validE_def valid_def) - apply (rename_tac r s', case_tac r; simp) - apply fastforce - done - -lemma hoare_lift_Pf_E_E: - assumes P: "\x. \P x\ m -, \\_. P x\" - assumes f: "\P. \\s. P (f s)\ m -, \\_ s. P (f s)\" - shows "\\s. P (f s) s\ m -, \\_ s. P (f s) s\" - using P f - apply (clarsimp simp: validE_E_def validE_def valid_def) - apply (rename_tac r s', case_tac r; simp) - apply fastforce - done - -lemma hoare_vcg_const_Ball_lift_E_E: - "\ \x. x \ S \ \P x\ f -,\Q x\ \ \ - \\s. \x \ S. P x s\ f -,\\rv s. \x \ S. Q x rv s\" - apply (simp add: validE_E_def validE_def) - apply (rule hoare_strengthen_post) - apply (erule hoare_vcg_const_Ball_lift) - apply (simp split: sum.splits) - done - -lemma hoare_vcg_all_liftE_E: - "(\x. \P x\ f -, \Q x\) \ \\s. \x. P x s\ f -,\\rv s. \x. Q x rv s\" - by (rule hoare_vcg_const_Ball_lift_E_E[where S=UNIV, simplified]) - -lemma hoare_vcg_imp_liftE_E: - "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ - \\s. \ P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" - by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) - -lemma hoare_vcg_ex_liftE: - "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_vcg_ex_liftE_E: - "\ \x. \P x\ f -,\E x\ \ \ \\s. \x. P x s\ f -,\\rv s. \x. E x rv s\" - by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) - -lemma valid_validE_R: - "\P\ f \\rv. Q\ \ \P\ f \\rv. Q\,-" - by (simp add: validE_R_def hoare_post_impErr [OF valid_validE]) - -lemma valid_validE_E: - "\P\ f \\rv. Q\ \ \P\ f -,\\rv. Q\" - by (simp add: validE_E_def hoare_post_impErr [OF valid_validE]) - -lemma validE_validE_R: "\P\ f \Q\,\\\\ \ \P\ f \Q\,-" - by (simp add: validE_R_def) - -lemma validE_R_validE: "\P\ f \Q\,- \ \P\ f \Q\,\\\\" - by (simp add: validE_R_def) - -lemma validE_validE_E: "\P\ f \\\\,\E\ \ \P\ f -,\E\" - by (simp add: validE_E_def) - -lemma validE_E_validE: "\P\ f -,\E\ \ \P\ f \\\\,\E\" - by (simp add: validE_E_def) - -lemma hoare_post_imp_R: "\ \P\ f \Q'\,-; \r s. Q' r s \ Q r s \ \ \P\ f \Q\,-" - apply (unfold validE_R_def) - apply (erule hoare_post_impErr, simp+) - done - -lemma hoare_post_imp_E: "\ \P\ f -,\Q'\; \r s. Q' r s \ Q r s \ \ \P\ f -,\Q\" - apply (unfold validE_E_def) - apply (erule hoare_post_impErr, simp+) - done - -lemma hoare_post_comb_imp_conj: - "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" - apply (rule hoare_pre_imp) - defer - apply (rule hoare_vcg_conj_lift) - apply assumption+ - apply simp - done - -lemma hoare_vcg_precond_impE_R: "\ \P'\ f \Q\,-; \s. P s \ P' s \ \ \P\ f \Q\,-" - by (unfold validE_R_def, rule hoare_vcg_precond_impE, simp+) - -lemma valid_is_triple: - "valid P f Q = triple_judgement P f (postcondition Q (\s f. fst (f s)))" - by (simp add: triple_judgement_def valid_def postcondition_def) - -lemma validE_is_triple: - "validE P f Q E = triple_judgement P f - (postconditions (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ fst (f s)})) - (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ fst (f s)})))" - apply (simp add: validE_def triple_judgement_def valid_def postcondition_def - postconditions_def split_def split: sum.split) - apply fastforce - done - -lemma validE_R_is_triple: - "validE_R P f Q = triple_judgement P f - (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ fst (f s)}))" - by (simp add: validE_R_def validE_is_triple postconditions_def postcondition_def) - -lemma validE_E_is_triple: - "validE_E P f E = triple_judgement P f - (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ fst (f s)}))" - by (simp add: validE_E_def validE_is_triple postconditions_def postcondition_def) - -lemmas hoare_wp_combs = hoare_vcg_conj_lift - -lemmas hoare_wp_combsE = - validE_validE_R - hoare_vcg_R_conj - hoare_vcg_E_elim - hoare_vcg_E_conj - -lemmas hoare_wp_state_combsE = - valid_validE_R - hoare_vcg_R_conj[OF valid_validE_R] - hoare_vcg_E_elim[OF valid_validE_E] - hoare_vcg_E_conj[OF valid_validE_E] - -lemmas hoare_classic_wp_combs - = hoare_post_comb_imp_conj hoare_vcg_precond_imp hoare_wp_combs -lemmas hoare_classic_wp_combsE - = hoare_vcg_precond_impE hoare_vcg_precond_impE_R hoare_wp_combsE -lemmas hoare_classic_wp_state_combsE - = hoare_vcg_precond_impE[OF valid_validE] - hoare_vcg_precond_impE_R[OF valid_validE_R] hoare_wp_state_combsE -lemmas all_classic_wp_combs = - hoare_classic_wp_state_combsE hoare_classic_wp_combsE hoare_classic_wp_combs - -lemmas hoare_wp_splits [wp_split] = - hoare_seq_ext hoare_vcg_seqE handleE'_wp handleE_wp - validE_validE_R [OF hoare_vcg_seqE [OF validE_R_validE]] - validE_validE_R [OF handleE'_wp [OF validE_R_validE]] - validE_validE_R [OF handleE_wp [OF validE_R_validE]] - catch_wp hoare_vcg_if_split hoare_vcg_if_splitE - validE_validE_R [OF hoare_vcg_if_splitE [OF validE_R_validE validE_R_validE]] - liftM_wp liftME_wp - validE_validE_R [OF liftME_wp [OF validE_R_validE]] - validE_valid - -lemmas [wp_comb] = hoare_wp_state_combsE hoare_wp_combsE hoare_wp_combs - -lemmas [wp] = hoare_vcg_prop - wp_post_taut - return_wp - put_wp - get_wp - gets_wp - modify_wp - returnOk_wp - throwError_wp - fail_wp - failE_wp - liftE_wp - select_f_wp - -lemmas [wp_trip] = valid_is_triple validE_is_triple validE_E_is_triple validE_R_is_triple - -lemmas validE_E_combs[wp_comb] = - hoare_vcg_E_conj[where Q'="\\", folded validE_E_def] - valid_validE_E - hoare_vcg_E_conj[where Q'="\\", folded validE_E_def, OF valid_validE_E] - -text \Simplifications on conjunction\ - -lemma hoare_post_eq: "\ Q = Q'; \P\ f \Q'\ \ \ \P\ f \Q\" - by simp -lemma hoare_post_eqE1: "\ Q = Q'; \P\ f \Q'\,\E\ \ \ \P\ f \Q\,\E\" - by simp -lemma hoare_post_eqE2: "\ E = E'; \P\ f \Q\,\E'\ \ \ \P\ f \Q\,\E\" - by simp -lemma hoare_post_eqE_R: "\ Q = Q'; \P\ f \Q'\,- \ \ \P\ f \Q\,-" - by simp - -lemma pred_conj_apply_elim: "(\r. Q r and Q' r) = (\r s. Q r s \ Q' r s)" - by (simp add: pred_conj_def) -lemma pred_conj_conj_elim: "(\r s. (Q r and Q' r) s \ Q'' r s) = (\r s. Q r s \ Q' r s \ Q'' r s)" - by simp -lemma conj_assoc_apply: "(\r s. (Q r s \ Q' r s) \ Q'' r s) = (\r s. Q r s \ Q' r s \ Q'' r s)" - by simp -lemma all_elim: "(\rv s. \x. P rv s) = P" - by simp -lemma all_conj_elim: "(\rv s. (\x. P rv s) \ Q rv s) = (\rv s. P rv s \ Q rv s)" - by simp - -lemmas vcg_rhs_simps = pred_conj_apply_elim pred_conj_conj_elim - conj_assoc_apply all_elim all_conj_elim - -lemma if_apply_reduct: "\P\ If P' (f x) (g x) \Q\ \ \P\ If P' f g x \Q\" - by (cases P', simp_all) -lemma if_apply_reductE: "\P\ If P' (f x) (g x) \Q\,\E\ \ \P\ If P' f g x \Q\,\E\" - by (cases P', simp_all) -lemma if_apply_reductE_R: "\P\ If P' (f x) (g x) \Q\,- \ \P\ If P' f g x \Q\,-" - by (cases P', simp_all) - -lemmas hoare_wp_simps [wp_split] = - vcg_rhs_simps [THEN hoare_post_eq] vcg_rhs_simps [THEN hoare_post_eqE1] - vcg_rhs_simps [THEN hoare_post_eqE2] vcg_rhs_simps [THEN hoare_post_eqE_R] - if_apply_reduct if_apply_reductE if_apply_reductE_R TrueI - -schematic_goal if_apply_test: "\?Q\ (if A then returnOk else K fail) x \P\,\E\" - by wpsimp - -lemma hoare_elim_pred_conj: - "\P\ f \\r s. Q r s \ Q' r s\ \ \P\ f \\r. Q r and Q' r\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE1: - "\P\ f \\r s. Q r s \ Q' r s\,\E\ \ \P\ f \\r. Q r and Q' r\,\E\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE2: - "\P\ f \Q\, \\x s. E x s \ E' x s\ \ \P\ f \Q\,\\x. E x and E' x\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE_R: - "\P\ f \\r s. Q r s \ Q' r s\,- \ \P\ f \\r. Q r and Q' r\,-" - by (unfold pred_conj_def) - -lemmas hoare_wp_pred_conj_elims = - hoare_elim_pred_conj hoare_elim_pred_conjE1 - hoare_elim_pred_conjE2 hoare_elim_pred_conjE_R - -lemmas hoare_weaken_preE = hoare_vcg_precond_impE - -lemmas hoare_pre [wp_pre] = - hoare_weaken_pre - hoare_weaken_preE - hoare_vcg_precond_impE_R - hoare_weaken_preE_E - -declare no_fail_pre [wp_pre] - -bundle no_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del] - -bundle classic_wp_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del] - all_classic_wp_combs[wp_comb del] all_classic_wp_combs[wp_comb] - -text \Miscellaneous lemmas on hoare triples\ - -lemma hoare_vcg_mp: - assumes a: "\P\ f \Q\" - assumes b: "\P\ f \\r s. Q r s \ Q' r s\" - shows "\P\ f \Q'\" - using assms - by (auto simp: valid_def split_def) - -(* note about this precond stuff: rules get a chance to bind directly - before any of their combined forms. As a result, these precondition - implication rules are only used when needed. *) - -lemma hoare_add_post: - assumes r: "\P'\ f \Q'\" - assumes impP: "\s. P s \ P' s" - assumes impQ: "\P\ f \\rv s. Q' rv s \ Q rv s\" - shows "\P\ f \Q\" - apply (rule hoare_chain) - apply (rule hoare_vcg_conj_lift) - apply (rule r) - apply (rule impQ) - apply simp - apply (erule impP) - apply simp - done - -lemma hoare_gen_asmE: - "(P \ \P'\ f \Q\,-) \ \P' and K P\ f \Q\, -" - by (simp add: validE_R_def validE_def valid_def) blast - -lemma hoare_list_case: - assumes P1: "\P1\ f f1 \Q\" - assumes P2: "\y ys. xs = y#ys \ \P2 y ys\ f (f2 y ys) \Q\" - shows "\case xs of [] \ P1 | y#ys \ P2 y ys\ - f (case xs of [] \ f1 | y#ys \ f2 y ys) - \Q\" - apply (cases xs; simp) - apply (rule P1) - apply (rule P2) - apply simp - done - -lemma hoare_when_wp [wp_split]: - "\ P \ \Q\ f \R\ \ \ \if P then Q else R ()\ when P f \R\" - by (clarsimp simp: when_def valid_def return_def) - -lemma hoare_unless_wp[wp_split]: - "(\P \ \Q\ f \R\) \ \if P then R () else Q\ unless P f \R\" - unfolding unless_def by wp auto - -lemma hoare_whenE_wp: - "(P \ \Q\ f \R\, \E\) \ \if P then Q else R ()\ whenE P f \R\, \E\" - unfolding whenE_def by clarsimp wp - -lemmas hoare_whenE_wps[wp_split] - = hoare_whenE_wp hoare_whenE_wp[THEN validE_validE_R] hoare_whenE_wp[THEN validE_validE_E] - -lemma hoare_unlessE_wp: - "(\ P \ \Q\ f \R\, \E\) \ \if P then R () else Q\ unlessE P f \R\, \E\" - unfolding unlessE_def by wp auto - -lemmas hoare_unlessE_wps[wp_split] - = hoare_unlessE_wp hoare_unlessE_wp[THEN validE_validE_R] hoare_unlessE_wp[THEN validE_validE_E] - -lemma hoare_use_eq: - assumes x: "\P. \\s. P (f s)\ m \\rv s. P (f s)\" - assumes y: "\f. \\s. P f s\ m \\rv s. Q f s\" - shows "\\s. P (f s) s\ m \\rv s. Q (f s :: 'c :: type) s \" - apply (rule_tac Q="\rv s. \f'. f' = f s \ Q f' s" in hoare_post_imp) - apply simp - apply (wpsimp wp: hoare_vcg_ex_lift x y) - done - -lemma hoare_return_sp: - "\P\ return x \\r. P and K (r = x)\" - by (simp add: valid_def return_def) - -lemma hoare_fail_any [simp]: - "\P\ fail \Q\" by wp - -lemma hoare_failE [simp]: "\P\ fail \Q\,\E\" by wp - -lemma hoare_FalseE [simp]: - "\\s. False\ f \Q\,\E\" - by (simp add: valid_def validE_def) - -lemma hoare_K_bind [wp_split]: - "\P\ f \Q\ \ \P\ K_bind f x \Q\" - by simp - -lemma validE_K_bind [wp_split]: - "\ P \ x \ Q \, \ E \ \ \ P \ K_bind x f \ Q \, \ E \" - by simp - -text \Setting up the precondition case splitter.\ - -lemma wpc_helper_valid: - "\Q\ g \S\ \ wpc_helper (P, P') (Q, Q') \P\ g \S\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validE: - "\Q\ f \R\,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f \R\,\E\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validE_R: - "\Q\ f \R\,- \ wpc_helper (P, P') (Q, Q') \P\ f \R\,-" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validR_R: - "\Q\ f -,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f -,\E\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_no_fail_final: - "no_fail Q f \ wpc_helper (P, P') (Q, Q') (no_fail P f)" - by (clarsimp simp: wpc_helper_def elim!: no_fail_pre) - -lemma wpc_helper_empty_fail_final: - "empty_fail f \ wpc_helper (P, P') (Q, Q') (empty_fail f)" - by (clarsimp simp: wpc_helper_def) - -lemma wpc_helper_validNF: - "\Q\ g \S\! \ wpc_helper (P, P') (Q, Q') \P\ g \S\!" - apply (clarsimp simp: wpc_helper_def) - by (metis hoare_vcg_precond_imp no_fail_pre validNF_def) - -wpc_setup "\m. \P\ m \Q\" wpc_helper_valid -wpc_setup "\m. \P\ m \Q\,\E\" wpc_helper_validE -wpc_setup "\m. \P\ m \Q\,-" wpc_helper_validE_R -wpc_setup "\m. \P\ m -,\E\" wpc_helper_validR_R -wpc_setup "\m. no_fail P m" wpc_helper_no_fail_final -wpc_setup "\m. empty_fail m" wpc_helper_empty_fail_final -wpc_setup "\m. \P\ m \Q\!" wpc_helper_validNF - -lemma in_liftM: - "((r, s') \ fst (liftM t f s)) = (\r'. (r', s') \ fst (f s) \ r = t r')" - apply (simp add: liftM_def return_def bind_def) - apply (simp add: Bex_def) - done - -(* FIXME: eliminate *) -lemmas handy_liftM_lemma = in_liftM - -lemma hoare_fun_app_wp[wp]: - "\P\ f' x \Q'\ \ \P\ f' $ x \Q'\" - "\P\ f x \Q\,\E\ \ \P\ f $ x \Q\,\E\" - "\P\ f x \Q\,- \ \P\ f $ x \Q\,-" - "\P\ f x -,\E\ \ \P\ f $ x -,\E\" - by simp+ - -lemma hoare_validE_pred_conj: - "\ \P\f\Q\,\E\; \P\f\R\,\E\ \ \ \P\f\Q And R\,\E\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemma hoare_validE_conj: - "\ \P\f\Q\,\E\; \P\f\R\,\E\ \ \ \P\ f \\r s. Q r s \ R r s\,\E\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemmas hoare_valid_validE = valid_validE - -lemma liftE_validE_E [wp]: - "\\\ liftE f -, \Q\" - by (clarsimp simp: validE_E_def valid_def) - -declare validE_validE_E[wp_comb] - -(* - * if_validE_E: - * - * \?P1 \ \?Q1\ ?f1 -, \?E\; \ ?P1 \ \?R1\ ?g1 -, \?E\\ \ \\s. (?P1 \ ?Q1 s) \ (\ ?P1 \ ?R1 s)\ if ?P1 then ?f1 else ?g1 -, \?E\ - *) -lemmas if_validE_E [wp_split] = - validE_validE_E [OF hoare_vcg_if_splitE [OF validE_E_validE validE_E_validE]] - -lemma returnOk_E [wp]: - "\\\ returnOk r -, \Q\" - by (simp add: validE_E_def) wp - -lemma hoare_drop_imp: - "\P\ f \Q\ \ \P\ f \\r s. R r s \ Q r s\" - by (auto simp: valid_def) - -lemma hoare_drop_impE: - "\\P\ f \\r. Q\, \E\\ \ \P\ f \\r s. R r s \ Q s\, \E\" - by (simp add: validE_weaken) - -lemma hoare_drop_impE_R: - "\P\ f \Q\,- \ \P\ f \\r s. R r s \ Q r s\, -" - by (auto simp: validE_R_def validE_def valid_def split_def split: sum.splits) - -lemma hoare_drop_impE_E: - "\P\ f -,\Q\ \ \P\ f -,\\r s. R r s \ Q r s\" - by (auto simp: validE_E_def validE_def valid_def split_def split: sum.splits) - -lemmas hoare_drop_imps = hoare_drop_imp hoare_drop_impE_R hoare_drop_impE_E - -(*This is unsafe, but can be very useful when supplied as a comb rule.*) -lemma hoare_drop_imp_conj[wp_unsafe]: - "\P\ f \Q'\ \ \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ - \ \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" - by (auto simp: valid_def) - -lemmas hoare_drop_imp_conj'[wp_unsafe] = hoare_drop_imp_conj[where Q'''="\\", simplified] - -lemma bind_det_exec: - "fst (a s) = {(r,s')} \ fst ((a >>= b) s) = fst (b r s')" - by (simp add: bind_def) - -lemma in_bind_det_exec: - "fst (a s) = {(r,s')} \ (s'' \ fst ((a >>= b) s)) = (s'' \ fst (b r s'))" - by (simp add: bind_def) - -lemma exec_put: - "(put s' >>= m) s = m () s'" - by (simp add: bind_def put_def) - -lemma bind_execI: - "\ (r'',s'') \ fst (f s); \x \ fst (g r'' s''). P x \ \ - \x \ fst ((f >>= g) s). P x" - by (force simp: in_bind split_def bind_def) - -lemma True_E_E [wp]: "\\\ f -,\\\\" - by (auto simp: validE_E_def validE_def valid_def split: sum.splits) - -(* - * \\x. \?B1 x\ ?g1 x -, \?E\; \?P\ ?f1 \?B1\, \?E\\ \ \?P\ ?f1 >>=E ?g1 -, \?E\ - *) -lemmas [wp_split] = - validE_validE_E [OF hoare_vcg_seqE [OF validE_E_validE]] - -lemma case_option_wp: - assumes x: "\x. \P x\ m x \Q\" - assumes y: "\P'\ m' \Q\" - shows "\\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ - case_option m' m x \Q\" - apply (cases x; simp) - apply (rule y) - apply (rule x) - done - -lemma case_option_wpE: - assumes x: "\x. \P x\ m x \Q\,\E\" - assumes y: "\P'\ m' \Q\,\E\" - shows "\\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ - case_option m' m x \Q\,\E\" - apply (cases x; simp) - apply (rule y) - apply (rule x) - done - -lemma in_bindE: - "(rv, s') \ fst ((f >>=E (\rv'. g rv')) s) = - ((\ex. rv = Inl ex \ (Inl ex, s') \ fst (f s)) \ - (\rv' s''. (rv, s') \ fst (g rv' s'') \ (Inr rv', s'') \ fst (f s)))" - apply (rule iffI) - apply (clarsimp simp: bindE_def bind_def) - apply (case_tac a) - apply (clarsimp simp: lift_def throwError_def return_def) - apply (clarsimp simp: lift_def) - apply safe - apply (clarsimp simp: bindE_def bind_def) - apply (erule rev_bexI) - apply (simp add: lift_def throwError_def return_def) - apply (clarsimp simp: bindE_def bind_def) - apply (erule rev_bexI) - apply (simp add: lift_def) - done - -(* - * \?P\ ?m1 -, \?E\ \ \?P\ liftME ?f1 ?m1 -, \?E\ - *) -lemmas [wp_split] = validE_validE_E [OF liftME_wp, simplified, OF validE_E_validE] - -lemma assert_A_True[simp]: "assert True = return ()" - by (simp add: assert_def) - -lemma assert_wp [wp]: "\\s. P \ Q () s\ assert P \Q\" - by (cases P, (simp add: assert_def | wp)+) - -lemma list_cases_wp: - assumes a: "\P_A\ a \Q\" - assumes b: "\x xs. ts = x#xs \ \P_B x xs\ b x xs \Q\" - shows "\case_list P_A P_B ts\ case ts of [] \ a | x # xs \ b x xs \Q\" - by (cases ts, auto simp: a b) - -(* FIXME: make wp *) -lemma whenE_throwError_wp: - "\\s. \Q \ P s\ whenE Q (throwError e) \\rv. P\, -" - unfolding whenE_def by wpsimp - -lemma select_throwError_wp: - "\\s. \x\S. Q x s\ select S >>= throwError -, \Q\" - by (simp add: bind_def throwError_def return_def select_def validE_E_def - validE_def valid_def) - -lemma assert_opt_wp[wp]: - "\\s. x \ None \ Q (the x) s\ assert_opt x \Q\" - by (case_tac x, (simp add: assert_opt_def | wp)+) - -lemma gets_the_wp[wp]: - "\\s. (f s \ None) \ Q (the (f s)) s\ gets_the f \Q\" - by (unfold gets_the_def, wp) - -lemma gets_the_wp': - "\\s. \rv. f s = Some rv \ Q rv s\ gets_the f \Q\" - unfolding gets_the_def by wpsimp - -lemma gets_map_wp: - "\\s. f s p \ None \ Q (the (f s p)) s\ gets_map f p \Q\" - unfolding gets_map_def by wpsimp - -lemma gets_map_wp'[wp]: - "\\s. \rv. f s p = Some rv \ Q rv s\ gets_map f p \Q\" - unfolding gets_map_def by wpsimp - -lemma no_fail_gets_map[wp]: - "no_fail (\s. f s p \ None) (gets_map f p)" - unfolding gets_map_def by wpsimp - -lemma hoare_vcg_set_pred_lift: - assumes "\P x. m \ \s. P (f x s) \" - shows "m \ \s. P {x. f x s} \" - using assms[where P="\x . x"] assms[where P=Not] use_valid - by (fastforce simp: valid_def elim!: rsubst[where P=P]) - -lemma hoare_vcg_set_pred_lift_mono: - assumes f: "\x. m \ f x \" - assumes mono: "\A B. A \ B \ P A \ P B" - shows "m \ \s. P {x. f x s} \" - by (fastforce simp: valid_def elim!: mono[rotated] dest: use_valid[OF _ f]) - - -section "validNF Rules" - -subsection "Basic validNF theorems" - -lemma validNF [intro?]: - "\ \ P \ f \ Q \; no_fail P f \ \ \ P \ f \ Q \!" - by (clarsimp simp: validNF_def) - -lemma validNF_valid: "\ \ P \ f \ Q \! \ \ \ P \ f \ Q \" - by (clarsimp simp: validNF_def) - -lemma validNF_no_fail: "\ \ P \ f \ Q \! \ \ no_fail P f" - by (clarsimp simp: validNF_def) - -lemma snd_validNF: - "\ \ P \ f \ Q \!; P s \ \ \ snd (f s)" - by (clarsimp simp: validNF_def no_fail_def) - -lemma use_validNF: - "\ (r', s') \ fst (f s); \ P \ f \ Q \!; P s \ \ Q r' s'" - by (fastforce simp: validNF_def valid_def) - -subsection "validNF weakest pre-condition rules" - -lemma validNF_return [wp]: - "\ P x \ return x \ P \!" - by (wp validNF)+ - -lemma validNF_get [wp]: - "\ \s. P s s \ get \ P \!" - by (wp validNF)+ - -lemma validNF_put [wp]: - "\ \s. P () x \ put x \ P \!" - by (wp validNF)+ - -lemma validNF_K_bind [wp]: - "\ P \ x \ Q \! \ \ P \ K_bind x f \ Q \!" - by simp - -lemma validNF_fail [wp]: - "\ \s. False \ fail \ Q \!" - by (clarsimp simp: validNF_def fail_def no_fail_def) - -lemma validNF_prop [wp_unsafe]: - "\ no_fail (\s. P) f \ \ \ \s. P \ f \ \rv s. P \!" - by (wp validNF)+ - -lemma validNF_post_conj [intro!]: - "\ \ P \ a \ Q \!; \ P \ a \ R \! \ \ \ P \ a \ Q And R \!" - by (auto simp: validNF_def) - -lemma no_fail_or: - "\no_fail P a; no_fail Q a\ \ no_fail (P or Q) a" - by (clarsimp simp: no_fail_def) - -lemma validNF_pre_disj [intro!]: - "\ \ P \ a \ R \!; \ Q \ a \ R \! \ \ \ P or Q \ a \ R \!" - by (rule validNF) (auto dest: validNF_valid validNF_no_fail intro: no_fail_or) - -(* - * Set up combination rules for WP, which also requires - * a "wp_trip" rule for validNF. - *) - -definition "validNF_property Q s b \ \ snd (b s) \ (\(r', s') \ fst (b s). Q r' s')" - -lemma validNF_is_triple [wp_trip]: - "validNF P f Q = triple_judgement P f (validNF_property Q)" - apply (clarsimp simp: validNF_def triple_judgement_def validNF_property_def) - apply (auto simp: no_fail_def valid_def) - done - -lemma validNF_weaken_pre[wp_pre]: - "\\Q\ a \R\!; \s. P s \ Q s\ \ \P\ a \R\!" - by (metis hoare_pre_imp no_fail_pre validNF_def) - -lemma validNF_post_comb_imp_conj: - "\ \P'\ f \Q\!; \P\ f \Q'\!; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\!" - by (fastforce simp: validNF_def valid_def) - -lemma validNF_post_comb_conj_L: - "\ \P'\ f \Q\!; \P\ f \Q'\ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_post_comb_conj_R: - "\ \P'\ f \Q\; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_post_comb_conj: - "\ \P'\ f \Q\!; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_if_split [wp_split]: - "\P \ \Q\ f \S\!; \ P \ \R\ g \S\!\ \ \\s. (P \ Q s) \ (\ P \ R s)\ if P then f else g \S\!" - by simp - -lemma validNF_vcg_conj_lift: - "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" - apply (subst bipred_conj_def[symmetric], rule validNF_post_conj) - apply (erule validNF_weaken_pre, fastforce) - apply (erule validNF_weaken_pre, fastforce) - done - -lemma validNF_vcg_disj_lift: - "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def) - apply safe - apply (auto intro!: hoare_vcg_disj_lift)[1] - apply (clarsimp simp: no_fail_def) - done - -lemma validNF_vcg_all_lift [wp]: - "\ \x. \P x\ f \Q x\! \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\!" - apply atomize - apply (rule validNF) - apply (clarsimp simp: validNF_def) - apply (rule hoare_vcg_all_lift) - apply force - apply (clarsimp simp: no_fail_def validNF_def) - done - -lemma validNF_bind [wp_split]: - "\ \x. \B x\ g x \C\!; \A\ f \B\! \ \ - \A\ do x \ f; g x od \C\!" - apply (rule validNF) - apply (metis validNF_valid hoare_seq_ext) - apply (clarsimp simp: no_fail_def validNF_def bind_def' valid_def) - apply blast - done - -lemmas validNF_seq_ext = validNF_bind - -subsection "validNF compound rules" - -lemma validNF_state_assert [wp]: - "\ \s. P () s \ G s \ state_assert G \ P \!" - apply (rule validNF) - apply wpsimp - apply (clarsimp simp: no_fail_def state_assert_def - bind_def' assert_def return_def get_def) - done - -lemma validNF_modify [wp]: - "\ \s. P () (f s) \ modify f \ P \!" - apply (clarsimp simp: modify_def) - apply wp - done - -lemma validNF_gets [wp]: - "\\s. P (f s) s\ gets f \P\!" - apply (clarsimp simp: gets_def) - apply wp - done - -lemma validNF_condition [wp]: - "\ \ Q \ A \P\!; \ R \ B \P\!\ \ \\s. if C s then Q s else R s\ condition C A B \P\!" - apply rule - apply (drule validNF_valid)+ - apply (erule (1) condition_wp) - apply (drule validNF_no_fail)+ - apply (clarsimp simp: no_fail_def condition_def) - done - -lemma validNF_alt_def: - "validNF P m Q = (\s. P s \ ((\(r', s') \ fst (m s). Q r' s') \ \ snd (m s)))" - by (fastforce simp: validNF_def valid_def no_fail_def) - -lemma validNF_assert [wp]: - "\ (\s. P) and (R ()) \ assert P \ R \!" - apply (rule validNF) - apply (clarsimp simp: valid_def in_return) - apply (clarsimp simp: no_fail_def return_def) - done - -lemma validNF_false_pre: - "\ \_. False \ P \ Q \!" - by (clarsimp simp: validNF_def no_fail_def) - -lemma validNF_chain: - "\\P'\ a \R'\!; \s. P s \ P' s; \r s. R' r s \ R r s\ \ \P\ a \R\!" - by (fastforce simp: validNF_def valid_def no_fail_def Ball_def) - -lemma validNF_case_prod [wp]: - "\ \x y. validNF (P x y) (B x y) Q \ \ validNF (case_prod P v) (case_prod (\x y. B x y) v) Q" - by (metis prod.exhaust split_conv) - -lemma validE_NF_case_prod [wp]: - "\ \a b. \P a b\ f a b \Q\, \E\! \ \ - \case x of (a, b) \ P a b\ case x of (a, b) \ f a b \Q\, \E\!" - apply (clarsimp simp: validE_NF_alt_def) - apply (erule validNF_case_prod) - done - -lemma no_fail_is_validNF_True: "no_fail P s = (\ P \ s \ \_ _. True \!)" - by (clarsimp simp: no_fail_def validNF_def valid_def) - -subsection "validNF reasoning in the exception monad" - -lemma validE_NF [intro?]: - "\ \ P \ f \ Q \,\ E \; no_fail P f \ \ \ P \ f \ Q \,\ E \!" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_valid: - "\ \ P \ f \ Q \,\ E \! \ \ \ P \ f \ Q \,\ E \" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_no_fail: - "\ \ P \ f \ Q \,\ E \! \ \ no_fail P f" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_weaken_pre[wp_pre]: - "\\Q\ a \R\,\E\!; \s. P s \ Q s\ \ \P\ a \R\,\E\!" - apply (clarsimp simp: validE_NF_alt_def) - apply (erule validNF_weaken_pre) - apply simp - done - -lemma validE_NF_post_comb_conj_L: - "\ \P\ f \Q\, \ E \!; \P'\ f \Q'\, \ \_ _. True \ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_post_comb_conj_R: - "\ \P\ f \Q\, \ \_ _. True \; \P'\ f \Q'\, \ E \! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_post_comb_conj: - "\ \P\ f \Q\, \ E \!; \P'\ f \Q'\, \ E \! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_chain: - "\\P'\ a \R'\,\E'\!; - \s. P s \ P' s; - \r' s'. R' r' s' \ R r' s'; - \r'' s''. E' r'' s'' \ E r'' s''\ \ - \\s. P s \ a \\r' s'. R r' s'\,\\r'' s''. E r'' s''\!" - by (fastforce simp: validE_NF_def validE_def2 no_fail_def Ball_def split: sum.splits) - -lemma validE_NF_bind_wp [wp]: - "\\x. \B x\ g x \C\, \E\!; \A\ f \B\, \E\!\ \ \A\ f >>=E (\x. g x) \C\, \E\!" - apply (unfold validE_NF_alt_def bindE_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp simp: lift_def throwError_def split: sum.splits) - apply wpsimp - done - -lemma validNF_catch [wp]: - "\\x. \E x\ handler x \Q\!; \P\ f \Q\, \E\!\ \ \P\ f (\x. handler x) \Q\!" - apply (unfold validE_NF_alt_def catch_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp simp: lift_def throwError_def split: sum.splits) - apply wp - done - -lemma validNF_throwError [wp]: - "\E e\ throwError e \P\, \E\!" - by (unfold validE_NF_alt_def throwError_def o_def) wpsimp - -lemma validNF_returnOk [wp]: - "\P e\ returnOk e \P\, \E\!" - by (clarsimp simp: validE_NF_alt_def returnOk_def) wpsimp - -lemma validNF_whenE [wp]: - "(P \ \Q\ f \R\, \E\!) \ \if P then Q else R ()\ whenE P f \R\, \E\!" - unfolding whenE_def by clarsimp wp - -lemma validNF_nobindE [wp]: - "\ \B\ g \C\,\E\!; - \A\ f \\r s. B s\,\E\! \ \ - \A\ doE f; g odE \C\,\E\!" - by clarsimp wp - -text \ -Setup triple rules for @{term validE_NF} so that we can use -wp combinator rules. -\ - -definition "validE_NF_property Q E s b \ \ snd (b s) - \ (\(r', s') \ fst (b s). case r' of Inl x \ E x s' | Inr x \ Q x s')" - -lemma validE_NF_is_triple [wp_trip]: - "validE_NF P f Q E = triple_judgement P f (validE_NF_property Q E)" - apply (clarsimp simp: validE_NF_def validE_def2 no_fail_def triple_judgement_def - validE_NF_property_def split: sum.splits) - apply blast - done - -lemma validNF_cong: - "\ \s. P s = P' s; \s. P s \ m s = m' s; - \r' s' s. \ P s; (r', s') \ fst (m s) \ \ Q r' s' = Q' r' s' \ \ - (\ P \ m \ Q \!) = (\ P' \ m' \ Q' \!)" - by (fastforce simp: validNF_alt_def) - -lemma validE_NF_liftE [wp]: - "\P\ f \Q\! \ \P\ liftE f \Q\,\E\!" - by (wpsimp simp: validE_NF_alt_def liftE_def) - -lemma validE_NF_handleE' [wp]: - "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ - \P\ f (\x. handler x) \Q\,\E\!" - apply (unfold validE_NF_alt_def handleE'_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp split: sum.splits) - apply wpsimp - done - -lemma validE_NF_handleE [wp]: - "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ - \P\ f handler \Q\,\E\!" - apply (unfold handleE_def) - apply (metis validE_NF_handleE') - done - -lemma validE_NF_condition [wp]: - "\ \ Q \ A \P\,\ E \!; \ R \ B \P\,\ E \!\ - \ \\s. if C s then Q s else R s\ condition C A B \P\,\ E \!" - apply rule - apply (drule validE_NF_valid)+ - apply wp - apply (drule validE_NF_no_fail)+ - apply (clarsimp simp: no_fail_def condition_def) - done - -text \Strengthen setup.\ - -context strengthen_implementation begin - -lemma strengthen_hoare [strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\) (\P\ f \R\)" - by (cases F, auto elim: hoare_strengthen_post) - -lemma strengthen_validE_R_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" - by (cases F, auto intro: hoare_post_imp_R) - -lemma strengthen_validE_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" - by (cases F, auto elim: hoare_post_impErr) - -lemma strengthen_validE_E_cong[strg]: - "(\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" - by (cases F, auto elim: hoare_post_impErr simp: validE_E_def) - -lemma wpfix_strengthen_hoare: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (Q r s) (Q' r s)) - \ st F (\) (\P\ f \Q\) (\P'\ f \Q'\)" - by (cases F, auto elim: hoare_chain) - -lemma wpfix_strengthen_validE_R_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (Q r s) (Q' r s)) - \ st F (\) (\P\ f \Q\, -) (\P'\ f \Q'\, -)" - by (cases F, auto elim: hoare_chainE simp: validE_R_def) - -lemma wpfix_strengthen_validE_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (Q r s) (R r s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f \Q\, \S\) (\P'\ f \R\, \T\)" - by (cases F, auto elim: hoare_chainE) - -lemma wpfix_strengthen_validE_E_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f -, \S\) (\P'\ f -, \T\)" - by (cases F, auto elim: hoare_chainE simp: validE_E_def) - -lemma wpfix_no_fail_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ st F (\) (no_fail P f) (no_fail P' f)" - by (cases F, auto elim: no_fail_pre) - -lemmas nondet_wpfix_strgs = - wpfix_strengthen_validE_R_cong - wpfix_strengthen_validE_E_cong - wpfix_strengthen_validE_cong - wpfix_strengthen_hoare - wpfix_no_fail_cong - -end - -lemmas nondet_wpfix_strgs[wp_fix_strgs] - = strengthen_implementation.nondet_wpfix_strgs - -end diff --git a/lib/Monad_WP/TraceMonad.thy b/lib/Monad_WP/TraceMonad.thy deleted file mode 100644 index f06a538cc1..0000000000 --- a/lib/Monad_WP/TraceMonad.thy +++ /dev/null @@ -1,1191 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) -theory TraceMonad -imports - Lib - Strengthen -begin - -text \ -The ``Interference Trace Monad''. This nondeterministic monad -records the state at every interference point, permitting -nondeterministic interference by the environment at interference -points. - -The trace set initially includes all possible environment behaviours. -Trace steps are tagged as environment or self actions, and can then -be constrained to a smaller set where the environment acts according -to a rely constraint (i.e. rely-guarantee reasoning), or to set the -environment actions to be the self actions of another program (parallel -composition). -\ - -section "The Trace Monad" - -text \Trace monad identifier. Me corresponds to the current thread running and Env to the environment.\ -datatype tmid = Me | Env - -text \Results associated with traces. Traces may correspond to incomplete, failed, or completed executions.\ -datatype ('s, 'a) tmres = Failed | Incomplete | Result "('a \ 's)" - -abbreviation - map_tmres_rv :: "('a \ 'b) \ ('s, 'a) tmres \ ('s, 'b) tmres" -where - "map_tmres_rv f \ map_tmres id f" - -section "The Monad" - -text \ tmonad returns a set of non-deterministic computations, including - a trace as a list of "thread identifier" \ state, and an optional - pair result, state when the computation did not fail. \ -type_synonym ('s, 'a) tmonad = "'s \ ((tmid \ 's) list \ ('s, 'a) tmres) set" - -text \Returns monad results, ignoring failures and traces.\ -definition - mres :: "((tmid \ 's) list \ ('s, 'a) tmres) set \ ('a \ 's) set" -where - "mres r = Result -` (snd ` r)" - -text \ - The definition of fundamental monad functions @{text return} and - @{text bind}. The monad function @{text "return x"} does not change - the state, does not fail, and returns @{text "x"}. -\ -definition - return :: "'a \ ('s,'a) tmonad" -where - "return a \ \s. ({([], Result (a, s))})" - -text \ - The monad function @{text "bind f g"}, also written @{text "f >>= g"}, - is the execution of @{term f} followed by the execution of @{text g}. - The function @{text g} takes the result value \emph{and} the result - state of @{text f} as parameter. The definition says that the result of - the combined operation is the union of the set of sets that is created - by @{text g} applied to the result sets of @{text f}. The combined - operation may have failed, if @{text f} may have failed or @{text g} may - have failed on any of the results of @{text f}. -\ - -abbreviation (input) - fst_upd :: "('a \ 'c) \ 'a \ 'b \ 'c \ 'b" -where - "fst_upd f \ \(a,b). (f a, b)" - -abbreviation (input) - snd_upd :: "('b \ 'c) \ 'a \ 'b \ 'a \ 'c" -where - "snd_upd f \ \(a,b). (a, f b)" - -definition - bind :: "('s, 'a) tmonad \ ('a \ ('s, 'b) tmonad) \ - ('s, 'b) tmonad" (infixl ">>=" 60) -where - "bind f g \ \s. \(xs, r) \ (f s). case r of Failed \ {(xs, Failed)} - | Incomplete \ {(xs, Incomplete)} - | Result (rv, s) \ fst_upd (\ys. ys @ xs) ` g rv s" - -text \Sometimes it is convenient to write @{text bind} in reverse order.\ -abbreviation(input) - bind_rev :: "('c \ ('a, 'b) tmonad) \ ('a, 'c) tmonad \ - ('a, 'b) tmonad" (infixl "=<<" 60) -where - "g =<< f \ f >>= g" - -text \ - The basic accessor functions of the state monad. @{text get} returns - the current state as result, does not fail, and does not change the state. - @{text "put s"} returns nothing (@{typ unit}), changes the current state - to @{text s} and does not fail. -\ -definition - get :: "('s,'s) tmonad" -where - "get \ \s. {([], Result (s, s))}" - -definition - put :: "'s \ ('s, unit) tmonad" -where - "put s \ \st. {([], Result ((), s))}" - -definition - put_trace_elem :: "(tmid \ 's) \ ('s, unit) tmonad" -where - "put_trace_elem x = (\s. {([], Incomplete), ([x], Result ((), s))})" - -primrec - put_trace :: "(tmid \ 's) list \ ('s, unit) tmonad" -where - "put_trace [] = return ()" - | "put_trace (x # xs) = (put_trace xs >>= (\_. put_trace_elem x))" - -subsection "Nondeterminism" - -text \ - Basic nondeterministic functions. @{text "select A"} chooses an element - of the set @{text A}, does not change the state, and does not fail - (even if the set is empty). @{text "f \ g"} executes @{text f} or - executes @{text g}. It retuns the union of results of @{text f} and - @{text g}, and may have failed if either may have failed. -\ -definition - select :: "'a set \ ('s, 'a) tmonad" -where - (* Should we have Failed when A = {} ? *) - "select A \ \s. (Pair [] ` Result ` (A \ {s}))" - -definition - alternative :: "('s,'a) tmonad \ ('s,'a) tmonad \ - ('s,'a) tmonad" - (infixl "\" 20) -where - "f \ g \ \s. (f s \ g s)" - - -text \ The @{text selet_f} function was left out here until we figure - out what variant we actually need. -\ - -subsection "Failure" - -text \ The monad function that always fails. Returns an empty set of -results and sets the failure flag. \ -definition - fail :: "('s, 'a) tmonad" -where - "fail \ \s. {([], Failed)}" - -text \ Assertions: fail if the property @{text P} is not true \ -definition - assert :: "bool \ ('a, unit) tmonad" -where - "assert P \ if P then return () else fail" - -text \ Fail if the value is @{const None}, - return result @{text v} for @{term "Some v"} \ -definition - assert_opt :: "'a option \ ('b, 'a) tmonad" -where - "assert_opt v \ case v of None \ fail | Some v \ return v" - -text \ An assertion that also can introspect the current state. \ - -definition - state_assert :: "('s \ bool) \ ('s, unit) tmonad" -where - "state_assert P \ get >>= (\s. assert (P s))" - -subsection "Generic functions on top of the state monad" - -text \ Apply a function to the current state and return the result -without changing the state. \ -definition - gets :: "('s \ 'a) \ ('s, 'a) tmonad" -where - "gets f \ get >>= (\s. return (f s))" - -text \ Modify the current state using the function passed in. \ -definition - modify :: "('s \ 's) \ ('s, unit) tmonad" -where - "modify f \ get >>= (\s. put (f s))" - -lemma simpler_gets_def: "gets f = (\s. {([], Result ((f s), s))})" - by (simp add: fun_eq_iff gets_def return_def bind_def get_def split_def) - -lemma simpler_modify_def: - "modify f = (\s. {([], Result ((),(f s)))})" - by (simp add: fun_eq_iff modify_def bind_def get_def put_def split_def) - -text \ Execute the given monad when the condition is true, - return @{text "()"} otherwise. \ -definition - "when" :: "bool \ ('s, unit) tmonad \ - ('s, unit) tmonad" -where - "when P m \ if P then m else return ()" - -text \ Execute the given monad unless the condition is true, - return @{text "()"} otherwise. \ -definition - unless :: "bool \ ('s, unit) tmonad \ - ('s, unit) tmonad" -where - "unless P m \ when (\P) m" - -text \ - Perform a test on the current state, performing the left monad if - the result is true or the right monad if the result is false. -\ -definition - condition :: "('s \ bool) \ ('s, 'r) tmonad \ ('s, 'r) tmonad \ ('s, 'r) tmonad" -where - "condition P L R \ \s. if (P s) then (L s) else (R s)" - -notation (output) - condition ("(condition (_)// (_)// (_))" [1000,1000,1000] 1000) - -text \ -Apply an option valued function to the current state, fail -if it returns @{const None}, return @{text v} if it returns -@{term "Some v"}. -\ -definition - gets_the :: "('s \ 'a option) \ ('s, 'a) tmonad" -where - "gets_the f \ gets f >>= assert_opt" - - -subsection \ The Monad Laws \ - -text \An alternative definition of bind, sometimes more convenient.\ -lemma bind_def2: - "bind f g \ (\s. ((\xs. (xs, Failed)) ` {xs. (xs, Failed) \ f s}) - \ ((\xs. (xs, Incomplete)) ` {xs. (xs, Incomplete) \ f s}) - \ (\(xs, rv, s) \ {(xs, rv, s'). (xs, Result (rv, s')) \ f s}. fst_upd (\ys. ys @ xs) ` g rv s))" - apply (clarsimp simp add: bind_def fun_eq_iff - Un_Union_image split_def - intro!: eq_reflection) - apply (auto split: tmres.splits elim!:rev_bexI[where A="f x" for x]) - apply (fastforce intro: image_eqI[rotated]) - done - -lemma elem_bindE: - "(tr, res) \ bind f (\x. g x) s - \ ((res = Incomplete | res = Failed) \ (tr, map_tmres undefined undefined res) \ f s \ P) - \ (\tr' tr'' x s'. (tr', Result (x, s')) \ f s \ (tr'', res) \ g x s' - \ tr = tr'' @ tr' \ P) - \ P" - by (auto simp: bind_def2) - -text \ Each monad satisfies at least the following three laws. \ - -text \ @{term return} is absorbed at the left of a @{term bind}, - applying the return value directly: \ - -declare map_option.identity[simp] - -lemma return_bind [simp]: "(return x >>= f) = f x" - by (auto simp add: return_def bind_def split_def split:if_splits) - -text \ @{term return} is absorbed on the right of a @{term bind} \ -lemma bind_return [simp]: "(m >>= return) = m" - by (auto simp add: fun_eq_iff bind_def return_def - split: tmres.splits) - -text \ @{term bind} is associative \ -lemma bind_assoc: - fixes m :: "('a,'b) tmonad" - fixes f :: "'b \ ('a,'c) tmonad" - fixes g :: "'c \ ('a,'d) tmonad" - shows "(m >>= f) >>= g = m >>= (\x. f x >>= g)" - apply (unfold bind_def Let_def split_def) - apply (rule ext) - apply clarsimp - apply (rule SUP_cong[OF refl], clarsimp) - apply (split tmres.split; intro conjI impI; clarsimp) - apply (simp add: image_Union) - apply (rule SUP_cong[OF refl], clarsimp) - apply (split tmres.split; intro conjI impI; clarsimp) - apply (simp add: image_image) - done - -section \ Adding Exceptions \ - -text \ - The type @{typ "('s,'a) tmonad"} gives us nondeterminism and - failure. We now extend this monad with exceptional return values - that abort normal execution, but can be handled explicitly. - We use the sum type to indicate exceptions. - - In @{typ "('s, 'e + 'a) tmonad"}, @{typ "'s"} is the state, - @{typ 'e} is an exception, and @{typ 'a} is a normal return value. - - This new type itself forms a monad again. Since type classes in - Isabelle are not powerful enough to express the class of monads, - we provide new names for the @{term return} and @{term bind} functions - in this monad. We call them @{text returnOk} (for normal return values) - and @{text bindE} (for composition). We also define @{text throwError} - to return an exceptional value. -\ -definition - returnOk :: "'a \ ('s, 'e + 'a) tmonad" -where - "returnOk \ return o Inr" - -definition - throwError :: "'e \ ('s, 'e + 'a) tmonad" -where - "throwError \ return o Inl" - -text \ - Lifting a function over the exception type: if the input is an - exception, return that exception; otherwise continue execution. -\ -definition - lift :: "('a \ ('s, 'e + 'b) tmonad) \ - 'e +'a \ ('s, 'e + 'b) tmonad" -where - "lift f v \ case v of Inl e \ throwError e - | Inr v' \ f v'" - -text \ - The definition of @{term bind} in the exception monad (new - name @{text bindE}): the same as normal @{term bind}, but - the right-hand side is skipped if the left-hand side - produced an exception. -\ -definition - bindE :: "('s, 'e + 'a) tmonad \ - ('a \ ('s, 'e + 'b) tmonad) \ - ('s, 'e + 'b) tmonad" (infixl ">>=E" 60) -where - "bindE f g \ bind f (lift g)" - - -text \ - Lifting a normal nondeterministic monad into the - exception monad is achieved by always returning its - result as normal result and never throwing an exception. -\ -definition - liftE :: "('s,'a) tmonad \ ('s, 'e+'a) tmonad" -where - "liftE f \ f >>= (\r. return (Inr r))" - - -text \ - Since the underlying type and @{text return} function changed, - we need new definitions for when and unless: -\ -definition - whenE :: "bool \ ('s, 'e + unit) tmonad \ - ('s, 'e + unit) tmonad" -where - "whenE P f \ if P then f else returnOk ()" - -definition - unlessE :: "bool \ ('s, 'e + unit) tmonad \ - ('s, 'e + unit) tmonad" -where - "unlessE P f \ if P then returnOk () else f" - - -text \ - Throwing an exception when the parameter is @{term None}, otherwise - returning @{term "v"} for @{term "Some v"}. -\ -definition - throw_opt :: "'e \ 'a option \ ('s, 'e + 'a) tmonad" -where - "throw_opt ex x \ - case x of None \ throwError ex | Some v \ returnOk v" - - -text \ - Failure in the exception monad is redefined in the same way - as @{const whenE} and @{const unlessE}, with @{term returnOk} - instead of @{term return}. -\ -definition - assertE :: "bool \ ('a, 'e + unit) tmonad" -where - "assertE P \ if P then returnOk () else fail" - -subsection "Monad Laws for the Exception Monad" - -text \ More direct definition of @{const liftE}: \ -lemma liftE_def2: - "liftE f = (\s. snd_upd (map_tmres_rv Inr) ` (f s))" - apply (clarsimp simp: fun_eq_iff liftE_def return_def split_def bind_def image_def) - apply (rule set_eqI) - apply (rule iffI) - apply clarsimp - apply (erule rev_bexI[where A="f s" for s]) - apply (clarsimp split: tmres.splits) - apply clarsimp - apply (rule exI) - apply (rule conjI) - apply (erule rev_bexI[where A="f s" for s]) - apply (rule refl) - apply (clarsimp split: tmres.splits) - done - -text \ Left @{const returnOk} absorbtion over @{term bindE}: \ -lemma returnOk_bindE [simp]: "(returnOk x >>=E f) = f x" - apply (unfold bindE_def returnOk_def) - apply (clarsimp simp: lift_def) - done - -lemma lift_return [simp]: - "lift (return \ Inr) = return" - by (simp add: fun_eq_iff lift_def throwError_def split: sum.splits) - -text \ Right @{const returnOk} absorbtion over @{term bindE}: \ -lemma bindE_returnOk [simp]: "(m >>=E returnOk) = m" - by (simp add: bindE_def returnOk_def) - -text \ Associativity of @{const bindE}: \ -lemma bindE_assoc: - "(m >>=E f) >>=E g = m >>=E (\x. f x >>=E g)" - apply (simp add: bindE_def bind_assoc) - apply (rule arg_cong [where f="\x. m >>= x"]) - apply (rule ext) - apply (case_tac x, simp_all add: lift_def throwError_def) - done - -text \ @{const returnOk} could also be defined via @{const liftE}: \ -lemma returnOk_liftE: - "returnOk x = liftE (return x)" - by (simp add: liftE_def returnOk_def) - -text \ Execution after throwing an exception is skipped: \ -lemma throwError_bindE [simp]: - "(throwError E >>=E f) = throwError E" - by (simp add: fun_eq_iff bindE_def bind_def throwError_def lift_def return_def split_def) - - -section "Syntax" - -text \ This section defines traditional Haskell-like do-syntax - for the state monad in Isabelle. \ - -subsection "Syntax for the Nondeterministic State Monad" - -text \ We use @{text K_bind} to syntactically indicate the - case where the return argument of the left side of a @{term bind} - is ignored \ -definition - K_bind_def [iff]: "K_bind \ \x y. x" - -nonterminal - dobinds and dobind and nobind - -syntax - "_dobind" :: "[pttrn, 'a] => dobind" ("(_ <-/ _)" 10) - "" :: "dobind => dobinds" ("_") - "_nobind" :: "'a => dobind" ("_") - "_dobinds" :: "[dobind, dobinds] => dobinds" ("(_);//(_)") - - "_do" :: "[dobinds, 'a] => 'a" ("(do ((_);//(_))//od)" 100) -syntax (xsymbols) - "_dobind" :: "[pttrn, 'a] => dobind" ("(_ \/ _)" 10) - -translations - "_do (_dobinds b bs) e" == "_do b (_do bs e)" - "_do (_nobind b) e" == "b >>= (CONST K_bind e)" - "do x <- a; e od" == "a >>= (\x. e)" - -text \ Syntax examples: \ -lemma "do x \ return 1; - return (2::nat); - return x - od = - return 1 >>= - (\x. return (2::nat) >>= - K_bind (return x))" - by (rule refl) - -lemma "do x \ return 1; - return 2; - return x - od = return 1" - by simp - -subsection "Interference command" - -text \Interference commands must be inserted in between actions that can be interfered with commands -running in other threads. \ - -definition - last_st_tr :: "(tmid * 's) list \ 's \ 's" -where - "last_st_tr tr s0 = (hd (map snd tr @ [s0]))" - -definition - env_steps :: "('s,unit) tmonad" -where - "env_steps \ - do - s \ get; - \ \Add unfiltered environment events to the trace\ - xs \ select UNIV; - tr \ return (map (Pair Env) xs); - put_trace tr; - \ \Pick the last event of the trace as the final state\ - put (last_st_tr tr s) - od" - -definition - commit_step :: "('s, unit) tmonad" -where - "commit_step \ - do - s \ get; - put_trace [(Me,s)] - od" - -definition - interference :: "('s,unit) tmonad" -where - "interference \ - do - commit_step; - env_steps - od" - -subsection "Syntax for the Exception Monad" - -text \ - Since the exception monad is a different type, we - need to syntactically distinguish it in the syntax. - We use @{text doE}/@{text odE} for this, but can re-use - most of the productions from @{text do}/@{text od} - above. -\ - -syntax - "_doE" :: "[dobinds, 'a] => 'a" ("(doE ((_);//(_))//odE)" 100) - -translations - "_doE (_dobinds b bs) e" == "_doE b (_doE bs e)" - "_doE (_nobind b) e" == "b >>=E (CONST K_bind e)" - "doE x <- a; e odE" == "a >>=E (\x. e)" - -text \ Syntax examples: \ -lemma "doE x \ returnOk 1; - returnOk (2::nat); - returnOk x - odE = - returnOk 1 >>=E - (\x. returnOk (2::nat) >>=E - K_bind (returnOk x))" - by (rule refl) - -lemma "doE x \ returnOk 1; - returnOk 2; - returnOk x - odE = returnOk 1" - by simp - - - -section "Library of additional Monadic Functions and Combinators" - - -text \ Lifting a normal function into the monad type: \ -definition - liftM :: "('a \ 'b) \ ('s,'a) tmonad \ ('s, 'b) tmonad" -where - "liftM f m \ do x \ m; return (f x) od" - -text \ The same for the exception monad: \ -definition - liftME :: "('a \ 'b) \ ('s,'e+'a) tmonad \ ('s,'e+'b) tmonad" -where - "liftME f m \ doE x \ m; returnOk (f x) odE" - -text \ Run a sequence of monads from left to right, ignoring return values. \ -definition - sequence_x :: "('s, 'a) tmonad list \ ('s, unit) tmonad" -where - "sequence_x xs \ foldr (\x y. x >>= (\_. y)) xs (return ())" - -text \ - Map a monadic function over a list by applying it to each element - of the list from left to right, ignoring return values. -\ -definition - mapM_x :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, unit) tmonad" -where - "mapM_x f xs \ sequence_x (map f xs)" - -text \ - Map a monadic function with two parameters over two lists, - going through both lists simultaneously, left to right, ignoring - return values. -\ -definition - zipWithM_x :: "('a \ 'b \ ('s,'c) tmonad) \ - 'a list \ 'b list \ ('s, unit) tmonad" -where - "zipWithM_x f xs ys \ sequence_x (zipWith f xs ys)" - - -text \ The same three functions as above, but returning a list of -return values instead of @{text unit} \ -definition - sequence :: "('s, 'a) tmonad list \ ('s, 'a list) tmonad" -where - "sequence xs \ let mcons = (\p q. p >>= (\x. q >>= (\y. return (x#y)))) - in foldr mcons xs (return [])" - -definition - mapM :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, 'b list) tmonad" -where - "mapM f xs \ sequence (map f xs)" - -definition - zipWithM :: "('a \ 'b \ ('s,'c) tmonad) \ - 'a list \ 'b list \ ('s, 'c list) tmonad" -where - "zipWithM f xs ys \ sequence (zipWith f xs ys)" - -definition - foldM :: "('b \ 'a \ ('s, 'a) tmonad) \ 'b list \ 'a \ ('s, 'a) tmonad" -where - "foldM m xs a \ foldr (\p q. q >>= m p) xs (return a) " - -definition - foldME ::"('b \ 'a \ ('s,('e + 'b)) tmonad) \ 'b \ 'a list \ ('s, ('e + 'b)) tmonad" -where "foldME m a xs \ foldr (\p q. q >>=E swp m p) xs (returnOk a)" - -text \ The sequence and map functions above for the exception monad, -with and without lists of return value \ -definition - sequenceE_x :: "('s, 'e+'a) tmonad list \ ('s, 'e+unit) tmonad" -where - "sequenceE_x xs \ foldr (\x y. doE _ <- x; y odE) xs (returnOk ())" - -definition - mapME_x :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ - ('s,'e+unit) tmonad" -where - "mapME_x f xs \ sequenceE_x (map f xs)" - -definition - sequenceE :: "('s, 'e+'a) tmonad list \ ('s, 'e+'a list) tmonad" -where - "sequenceE xs \ let mcons = (\p q. p >>=E (\x. q >>=E (\y. returnOk (x#y)))) - in foldr mcons xs (returnOk [])" - -definition - mapME :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ - ('s,'e+'b list) tmonad" -where - "mapME f xs \ sequenceE (map f xs)" - - -text \ Filtering a list using a monadic function as predicate: \ -primrec - filterM :: "('a \ ('s, bool) tmonad) \ 'a list \ ('s, 'a list) tmonad" -where - "filterM P [] = return []" -| "filterM P (x # xs) = do - b <- P x; - ys <- filterM P xs; - return (if b then (x # ys) else ys) - od" - -text \ @{text select_state} takes a relationship between - states, and outputs nondeterministically a state - related to the input state. \ - -definition - state_select :: "('s \ 's) set \ ('s, unit) tmonad" -where - "state_select r \ (do - s \ get; - S \ return {s'. (s, s') \ r}; - assert (S \ {}); - s' \ select S; - put s' - od)" -section "Catching and Handling Exceptions" - -text \ - Turning an exception monad into a normal state monad - by catching and handling any potential exceptions: -\ -definition - catch :: "('s, 'e + 'a) tmonad \ - ('e \ ('s, 'a) tmonad) \ - ('s, 'a) tmonad" (infix "" 10) -where - "f handler \ - do x \ f; - case x of - Inr b \ return b - | Inl e \ handler e - od" - -text \ - Handling exceptions, but staying in the exception monad. - The handler may throw a type of exceptions different from - the left side. -\ -definition - handleE' :: "('s, 'e1 + 'a) tmonad \ - ('e1 \ ('s, 'e2 + 'a) tmonad) \ - ('s, 'e2 + 'a) tmonad" (infix "" 10) -where - "f handler \ - do - v \ f; - case v of - Inl e \ handler e - | Inr v' \ return (Inr v') - od" - -text \ - A type restriction of the above that is used more commonly in - practice: the exception handle (potentially) throws exception - of the same type as the left-hand side. -\ -definition - handleE :: "('s, 'x + 'a) tmonad \ - ('x \ ('s, 'x + 'a) tmonad) \ - ('s, 'x + 'a) tmonad" (infix "" 10) -where - "handleE \ handleE'" - - -text \ - Handling exceptions, and additionally providing a continuation - if the left-hand side throws no exception: -\ -definition - handle_elseE :: "('s, 'e + 'a) tmonad \ - ('e \ ('s, 'ee + 'b) tmonad) \ - ('a \ ('s, 'ee + 'b) tmonad) \ - ('s, 'ee + 'b) tmonad" - ("_ _ _" 10) -where - "f handler continue \ - do v \ f; - case v of Inl e \ handler e - | Inr v' \ continue v' - od" - -subsection "Loops" - -text \ - Loops are handled using the following inductive predicate; - non-termination is represented using the failure flag of the - monad. -\ -inductive_set - whileLoop_results :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ (('r \ 's) \ ((tmid \ 's) list \ ('s, 'r) tmres)) set" - for C B -where - "\ \ C r s \ \ ((r, s), ([], Result (r, s))) \ whileLoop_results C B" - | "\ C r s; (ts, Failed) \ B r s \ \ ((r, s), (ts, Failed)) \ whileLoop_results C B" - | "\ C r s; (ts, Incomplete) \ B r s \ \ ((r, s), (ts, Incomplete)) \ whileLoop_results C B" - | "\ C r s; (ts, Result (r', s')) \ B r s; ((r', s'), (ts',z)) \ whileLoop_results C B \ - \ ((r, s), (ts'@ts,z)) \ whileLoop_results C B" - -inductive_cases whileLoop_results_cases_result_end: "((x,y), ([],Result r)) \ whileLoop_results C B" -inductive_cases whileLoop_results_cases_fail: "((x,y), (ts, Failed)) \ whileLoop_results C B" -inductive_cases whileLoop_results_cases_incomplete: "((x,y), (ts, Incomplete)) \ whileLoop_results C B" - -inductive_simps whileLoop_results_simps_valid: "((x,y), ([], Result z)) \ whileLoop_results C B" - -inductive - whileLoop_terminates :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ 's \ bool" - for C B -where - "\ C r s \ whileLoop_terminates C B r s" - | "\ C r s; \(r', s') \ Result -` snd ` (B r s). whileLoop_terminates C B r' s' \ - \ whileLoop_terminates C B r s" - - -inductive_cases whileLoop_terminates_cases: "whileLoop_terminates C B r s" -inductive_simps whileLoop_terminates_simps: "whileLoop_terminates C B r s" - -definition - whileLoop :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" -where - "whileLoop C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B})" - -notation (output) - whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000) - -definition - whileLoopT :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" -where - "whileLoopT C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B - \ whileLoop_terminates C B r s})" - -notation (output) - whileLoopT ("(whileLoopT (_)// (_))" [1000, 1000] 1000) - -definition - whileLoopE :: "('r \ 's \ bool) \ ('r \ ('s, 'e + 'r) tmonad) - \ 'r \ ('s, ('e + 'r)) tmonad" -where - "whileLoopE C body \ - \r. whileLoop (\r s. (case r of Inr v \ C v s | _ \ False)) (lift body) (Inr r)" - -notation (output) - whileLoopE ("(whileLoopE (_)// (_))" [1000, 1000] 1000) - -subsection "Await command" - -text \ @{term "Await c f"} blocks the execution until the @{term "c"} is true, - and atomically executes @{term "f"}. -\ - -definition - Await :: "('s \ bool) \ ('s,unit) tmonad" -where - "Await c \ - do - s \ get; - \ \Add unfiltered environment events, with the last one - satisfying the `c' state predicate\ - xs \ select {xs. c (last_st_tr (map (Pair Env) xs) s)}; - tr \ return (map (Pair Env) xs); - put_trace tr; - \ \Pick the last event of the trace\ - put (last_st_tr tr s) - od" - -section "Hoare Logic" - -subsection "Validity" - -text \ This section defines a Hoare logic for partial correctness for - the nondeterministic state monad as well as the exception monad. - The logic talks only about the behaviour part of the monad and ignores - the failure flag. - - The logic is defined semantically. Rules work directly on the - validity predicate. - - In the nondeterministic state monad, validity is a triple of precondition, - monad, and postcondition. The precondition is a function from state to - bool (a state predicate), the postcondition is a function from return value - to state to bool. A triple is valid if for all states that satisfy the - precondition, all result values and result states that are returned by - the monad satisfy the postcondition. Note that if the computation returns - the empty set, the triple is trivially valid. This means @{term "assert P"} - does not require us to prove that @{term P} holds, but rather allows us - to assume @{term P}! Proving non-failure is done via separate predicate and - calculus (see below). -\ - - -definition - valid :: "('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\") -where - "\P\ f \Q\ \ \s. P s \ (\(r,s') \ mres (f s). Q r s')" - -text \ - We often reason about invariant predicates. The following provides shorthand syntax - that avoids repeating potentially long predicates. -\ -abbreviation (input) - invariant :: "('s,'a) tmonad \ ('s \ bool) \ bool" ("_ \_\" [59,0] 60) -where - "invariant f P \ \P\ f \\_. P\" - -text \rg_pred type: Rely-Guaranty predicates (state before => state after => bool)\ -type_synonym 's rg_pred = "'s \ 's \ bool" - - -text \ - Validity for the exception monad is similar and build on the standard - validity above. Instead of one postcondition, we have two: one for - normal and one for exceptional results. -\ -definition - validE :: "('s \ bool) \ ('s, 'a + 'b) tmonad \ - ('b \ 's \ bool) \ - ('a \ 's \ bool) \ bool" -("\_\/ _ /(\_\,/ \_\)" ) -where - "\P\ f \Q\,\E\ \ \P\ f \ \v s. case v of Inr r \ Q r s | Inl e \ E e s \" -(* -text \ Validity for exception monad with interferences. Not as easy to phrase - as we need to \ -definition - validIE :: "('s, 'a + 'b) tmonad \ - 's rg_pred \ - 's rg_pred \ 's rg_pred \ - ('b \ 's rg_pred) \ - ('a \ 's rg_pred) \ bool" - ("_ //PRE _//RELY _//GUAR _//POST _//EXC _" [59,0,0,0,0,0] 60) -where - "validIE f P R G Q E \ f SAT [P,R,G,\v. case v of Inr r \ Q r | Inl e \ E e]" - -abbreviation (input) - validIEsat :: "('s, 'a + 'b) tmonad \ - 's rg_pred \ - 's rg_pred \ 's rg_pred \ - ('b \ 's rg_pred) \ - ('a \ 's rg_pred) \ bool" - ("_ //SAT [_, _, _, _, _]" [59,0,0,0,0,0] 60) - where - "validIEsat f P R G Q E \ validIE f P R G Q E" - *) -text \ - The following two instantiations are convenient to separate reasoning - for exceptional and normal case. -\ -definition - validE_R :: "('s \ bool) \ ('s, 'e + 'a) tmonad \ - ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\, -") -where - "\P\ f \Q\,- \ validE P f Q (\x y. True)" - -definition - validE_E :: "('s \ bool) \ ('s, 'e + 'a) tmonad \ - ('e \ 's \ bool) \ bool" - ("\_\/ _ /-, \_\") -where - "\P\ f -,\Q\ \ validE P f (\x y. True) Q" - - -text \ Abbreviations for trivial preconditions: \ -abbreviation(input) - top :: "'a \ bool" ("\") -where - "\ \ \_. True" - -abbreviation(input) - bottom :: "'a \ bool" ("\") -where - "\ \ \_. False" - -text \ Abbreviations for trivial postconditions (taking two arguments): \ -abbreviation(input) - toptop :: "'a \ 'b \ bool" ("\\") -where - "\\ \ \_ _. True" - -abbreviation(input) - toptoptop :: "'a \ 'b \ 'b \ bool" ("\\\") -where - "\\\ \ \_ _ _. True" - -abbreviation(input) - botbot :: "'a \ 'b \ bool" ("\\") -where - "\\ \ \_ _. False" - -abbreviation(input) - botbotbot :: "'a \ 'b \ 'b \ bool" ("\\\") -where - "\\\ \ \_ _ _. False" - -text \ - Lifting @{text "\"} and @{text "\"} over two arguments. - Lifting @{text "\"} and @{text "\"} over one argument is already - defined (written @{text "and"} and @{text "or"}). -\ -definition - bipred_conj :: "('a \ 'b \ bool) \ ('a \ 'b \ bool) \ ('a \ 'b \ bool)" - (infixl "And" 96) -where - "bipred_conj P Q \ \x y. P x y \ Q x y" - -definition - bipred_disj :: "('a \ 'b \ bool) \ ('a \ 'b \ bool) \ ('a \ 'b \ bool)" - (infixl "Or" 91) -where - "bipred_disj P Q \ \x y. P x y \ Q x y" - -subsection "Determinism" - -text \ A monad of type @{text tmonad} is deterministic iff it -returns an empty trace, exactly one state and result and does not fail \ -definition - det :: "('a,'s) tmonad \ bool" -where - "det f \ \s. \r. f s = {([], Result r)}" - -text \ A deterministic @{text tmonad} can be turned - into a normal state monad: \ -definition - the_run_state :: "('s,'a) tmonad \ 's \ 'a \ 's" -where - "the_run_state M \ \s. THE s'. mres (M s) = {s'}" - - -subsection "Non-Failure" - -text \ - We can formulate non-failure separately from validity. -\ -definition - no_fail :: "('s \ bool) \ ('s,'a) tmonad \ bool" -where - "no_fail P m \ \s. P s \ Failed \ snd ` (m s)" - -text \ - It is often desired to prove non-failure and a Hoare triple - simultaneously, as the reasoning is often similar. The following - definitions allow such reasoning to take place. -\ - -definition - validNF ::"('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\!") -where - "validNF P f Q \ valid P f Q \ no_fail P f" - -definition - validE_NF :: "('s \ bool) \ ('s, 'a + 'b) tmonad \ - ('b \ 's \ bool) \ - ('a \ 's \ bool) \ bool" - ("\_\/ _ /(\_\,/ \_\!)") -where - "validE_NF P f Q E \ validE P f Q E \ no_fail P f" - -lemma validE_NF_alt_def: - "\ P \ B \ Q \,\ E \! = \ P \ B \ \v s. case v of Inl e \ E e s | Inr r \ Q r s \!" - by (clarsimp simp: validE_NF_def validE_def validNF_def) - -(* text \ - Usually, well-formed monads constructed from the primitives - above will have the following property: if they return an - empty set of results, they will have the failure flag set. -\ -definition - empty_fail :: "('s,'a) tmonad \ bool" -where - "empty_fail m \ \s. fst (m s) = {} \ snd (m s)" - -text \ - Useful in forcing otherwise unknown executions to have - the @{const empty_fail} property. -\ -definition - mk_ef :: "'a set \ bool \ 'a set \ bool" -where - "mk_ef S \ (fst S, fst S = {} \ snd S)" - *) -section "Basic exception reasoning" - -text \ - The following predicates @{text no_throw} and @{text no_return} allow - reasoning that functions in the exception monad either do - no throw an exception or never return normally. -\ - -definition "no_throw P A \ \ P \ A \ \_ _. True \,\ \_ _. False \" - -definition "no_return P A \ \ P \ A \\_ _. False\,\\_ _. True \" - -section "Trace monad Parallel" - -definition - parallel :: "('s,'a) tmonad \ ('s,'a) tmonad \ ('s,'a) tmonad" -where - "parallel f g = (\s. {(xs, rv). \f_steps. length f_steps = length xs - \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps xs), rv) \ f s - \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps xs), rv) \ g s})" - -abbreviation(input) - "parallel_mrg \ ((\((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)))" - -lemma parallel_def2: - "parallel f g = (\s. {(xs, rv). \ys zs. (ys, rv) \ f s \ (zs, rv) \ g s - \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs - \ xs = map parallel_mrg (zip ys zs)})" - apply (simp add: parallel_def fun_eq_iff set_eq_iff) - apply safe - apply (rule exI, rule conjI, assumption)+ - apply (simp add: list_all2_conv_all_nth list_eq_iff_nth_eq split_def prod_eq_iff) - apply clarsimp - apply (rule_tac x="map (((\) Env) o fst) ys" in exI) - apply (simp add: zip_map1 o_def split_def) - apply (strengthen subst[where P="\xs. (xs, v) \ S" for v S, mk_strg I _ E]) - apply (clarsimp simp: list_all2_conv_all_nth list_eq_iff_nth_eq - split_def prod_eq_iff - split del: if_split cong: if_cong) - apply auto - done - -lemma parallel_def3: - "parallel f g = (\s. (\(ys, zs, rv). (map parallel_mrg (zip ys zs), rv)) - ` {(ys, zs, rv). (ys, rv) \ f s \ (zs, rv) \ g s - \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs})" - by (simp add: parallel_def2, rule ext, auto simp: image_def) - -primrec - trace_steps :: "(tmid \ 's) list \ 's \ (tmid \ 's \ 's) set" -where - "trace_steps (elem # trace) s0 = {(fst elem, s0, snd elem)} \ trace_steps trace (snd elem)" -| "trace_steps [] s0 = {}" - -lemma trace_steps_nth: - "trace_steps xs s0 = (\i. (fst (xs ! i), (if i = 0 then s0 else snd (xs ! (i - 1))), snd (xs ! i))) ` {..< length xs}" -proof (induct xs arbitrary: s0) - case Nil - show ?case by simp -next - case (Cons a xs) - show ?case - apply (simp add: lessThan_Suc_eq_insert_0 Cons image_image nth_Cons') - apply (intro arg_cong2[where f=insert] refl image_cong) - apply simp - done -qed - -definition - rely_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" -where - "rely_cond R s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Env \ R s0 s)" - -definition - guar_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" -where - "guar_cond G s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Me \ G s0 s)" - -lemma rg_empty_conds[simp]: - "rely_cond R s0s []" - "guar_cond G s0s []" - by (simp_all add: rely_cond_def guar_cond_def) - -definition - rely :: "('s, 'a) tmonad \ 's rg_pred \ 's \ ('s, 'a) tmonad" -where - "rely f R s0s \ (\s. f s \ ({tr. rely_cond R s0s tr} \ UNIV))" - -definition - prefix_closed :: "('s, 'a) tmonad \ bool" -where - "prefix_closed f = (\s. \x xs. (x # xs) \ fst ` f s \ (xs, Incomplete) \ f s)" - -definition - validI :: "('s \ 's \ bool) \ 's rg_pred \ ('s,'a) tmonad - \ 's rg_pred \ ('a \ 's \ 's \ bool) \ bool" - ("(\_\,/ \_\)/ _ /(\_\,/ \_\)") -where - "\P\,\R\ f \G\,\Q\ \ prefix_closed f \ (\s0 s. P s0 s - \ (\tr res. (tr, res) \ (rely f R s0 s) \ guar_cond G s0 tr - \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s')))" - -lemma in_rely: - "\ (tr, res) \ f s; rely_cond R s0s tr \ \ (tr, res) \ rely f R s0s s" - by (simp add: rely_def) - -lemmas validI_D = validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, - THEN conjunct2, rule_format, OF _ _ in_rely] -lemmas validI_GD = validI_D[THEN conjunct1] -lemmas validI_rvD = validI_D[THEN conjunct2, rule_format, rotated -1, OF refl] -lemmas validI_prefix_closed = validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct1] -lemmas validI_prefix_closed_T = validI_prefix_closed[where P="\_ _. False" and R="\_ _. False" - and G="\_ _. True" and Q="\_ _ _. True"] - -lemmas prefix_closedD1 = prefix_closed_def[THEN iffD1, rule_format] - -lemma in_fst_snd_image_eq: - "x \ fst ` S = (\y. (x, y) \ S)" - "y \ snd ` S = (\x. (x, y) \ S)" - by (auto elim: image_eqI[rotated]) - -lemma in_fst_snd_image: - "(x, y) \ S \ x \ fst ` S" - "(x, y) \ S \ y \ snd ` S" - by (auto simp: in_fst_snd_image_eq) - -lemmas prefix_closedD = prefix_closedD1[OF _ in_fst_snd_image(1)] - -end diff --git a/lib/Monad_WP/TraceMonadLemmas.thy b/lib/Monad_WP/TraceMonadLemmas.thy deleted file mode 100644 index 45e4aedbeb..0000000000 --- a/lib/Monad_WP/TraceMonadLemmas.thy +++ /dev/null @@ -1,379 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) -theory TraceMonadLemmas -imports TraceMonadVCG -begin - -lemma mapM_Cons: - "mapM f (x # xs) = do - y \ f x; - ys \ mapM f xs; - return (y # ys) - od" - and mapM_Nil: - "mapM f [] = return []" - by (simp_all add: mapM_def sequence_def) - -lemma mapM_x_Cons: - "mapM_x f (x # xs) = do - y \ f x; - mapM_x f xs - od" - and mapM_x_Nil: - "mapM_x f [] = return ()" - by (simp_all add: mapM_x_def sequence_x_def) - -lemma mapM_append: - "mapM f (xs @ ys) = (do - fxs \ mapM f xs; - fys \ mapM f ys; - return (fxs @ fys) - od)" - by (induct xs, simp_all add: mapM_Cons mapM_Nil bind_assoc) - -lemma mapM_x_append: - "mapM_x f (xs @ ys) = (do - x \ mapM_x f xs; - mapM_x f ys - od)" - by (induct xs, simp_all add: mapM_x_Cons mapM_x_Nil bind_assoc) - -lemma mapM_map: - "mapM f (map g xs) = mapM (f o g) xs" - by (induct xs; simp add: mapM_Nil mapM_Cons) - -lemma mapM_x_map: - "mapM_x f (map g xs) = mapM_x (f o g) xs" - by (induct xs; simp add: mapM_x_Nil mapM_x_Cons) - -primrec - repeat_n :: "nat \ ('s, unit) tmonad \ ('s, unit) tmonad" -where - "repeat_n 0 f = return ()" - | "repeat_n (Suc n) f = do f; repeat_n n f od" - -lemma repeat_n_mapM_x: - "repeat_n n f = mapM_x (\_. f) (replicate n ())" - by (induct n, simp_all add: mapM_x_Cons mapM_x_Nil) - -definition - repeat :: "('s, unit) tmonad \ ('s, unit) tmonad" -where - "repeat f = do n \ select UNIV; repeat_n n f od" - -definition - env_step :: "('s,unit) tmonad" -where - "env_step = - (do - s' \ select UNIV; - put_trace_elem (Env, s'); - put s' - od)" - -abbreviation - "env_n_steps n \ repeat_n n env_step" - -lemma elem_select_bind: - "(tr, res) \ (do x \ select S; f x od) s - = (\x \ S. (tr, res) \ f x s)" - by (simp add: bind_def select_def) - -lemma select_bind_UN: - "(do x \ select S; f x od) = (\s. \x \ S. f x s)" - by (rule ext, auto simp: elem_select_bind) - -lemma select_early: - "S \ {} - \ do x \ f; y \ select S; g x y od - = do y \ select S; x \ f; g x y od" - apply (simp add: bind_def select_def Sigma_def) - apply (rule ext) - apply (fastforce elim: rev_bexI image_eqI[rotated] split: tmres.split_asm) - done - -lemma repeat_n_choice: - "S \ {} - \ repeat_n n (do x \ select S; f x od) - = (do xs \ select {xs. set xs \ S \ length xs = n}; mapM_x f xs od)" - apply (induct n; simp?) - apply (simp add: select_def bind_def mapM_x_Nil cong: conj_cong) - apply (simp add: select_early bind_assoc) - apply (subst select_early) - apply simp - apply (auto intro: exI[where x="replicate n xs" for n xs])[1] - apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) - apply (simp only: conj_comms[where Q="length xs = n" for xs n]) - apply (simp only: ex_simps[symmetric] conj_assoc length_Suc_conv, simp) - apply (auto simp: mapM_x_Cons) - done - -lemma repeat_choice: - "S \ {} - \ repeat (do x \ select S; f x od) - = (do xs \ select {xs. set xs \ S}; mapM_x f xs od)" - apply (simp add: repeat_def repeat_n_choice) - apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) - done - -lemma put_trace_append: - "put_trace (xs @ ys) = do put_trace ys; put_trace xs od" - by (induct xs; simp add: bind_assoc) - -lemma put_trace_elem_put_comm: - "do y \ put_trace_elem x; put s od - = do y \ put s; put_trace_elem x od" - by (simp add: put_def put_trace_elem_def bind_def insert_commute) - -lemma put_trace_put_comm: - "do y \ put_trace xs; put s od - = do y \ put s; put_trace xs od" - apply (rule sym; induct xs; simp) - apply (simp add: bind_assoc put_trace_elem_put_comm) - apply (simp add: bind_assoc[symmetric]) - done - -lemma mapM_x_comm: - "(\x \ set xs. do y \ g; f x od = do y \ f x; g od) - \ do y \ g; mapM_x f xs od = do y \ mapM_x f xs; g od" - apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons) - apply (simp add: bind_assoc[symmetric], simp add: bind_assoc) - done - -lemma mapM_x_split: - "(\x \ set xs. \y \ set xs. do z \ g y; f x od = do (z :: unit) \ f x; g y od) - \ mapM_x (\x. do z \ f x; g x od) xs = do y \ mapM_x f xs; mapM_x g xs od" - apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons bind_assoc) - apply (subst bind_assoc[symmetric], subst mapM_x_comm[where f=f and g="g x" for x]) - apply simp - apply (simp add: bind_assoc) - done - -lemma mapM_x_put: - "mapM_x put xs = unless (xs = []) (put (last xs))" - apply (induct xs rule: rev_induct) - apply (simp add: mapM_x_Nil unless_def when_def) - apply (simp add: mapM_x_append mapM_x_Cons mapM_x_Nil) - apply (simp add: bind_def unless_def when_def put_def return_def) - done - -lemma put_trace_mapM_x: - "put_trace xs = mapM_x put_trace_elem (rev xs)" - by (induct xs; simp add: mapM_x_Nil mapM_x_append mapM_x_Cons) - -lemma rev_surj: - "surj rev" - by (rule_tac f=rev in surjI, simp) - -lemma select_image: - "select (f ` S) = do x \ select S; return (f x) od" - by (auto simp add: bind_def select_def return_def Sigma_def) - -lemma env_steps_repeat: - "env_steps = repeat env_step" - apply (simp add: env_step_def repeat_choice env_steps_def - select_early) - apply (simp add: put_trace_elem_put_comm) - apply (simp add: mapM_x_split put_trace_elem_put_comm put_trace_put_comm - mapM_x_put) - apply (simp add: put_trace_mapM_x rev_map mapM_x_map o_def) - apply (subst rev_surj[symmetric], simp add: select_image bind_assoc) - apply (rule arg_cong2[where f=bind, OF refl ext]) - apply (simp add: bind_def get_def put_def unless_def when_def return_def) - apply (simp add: last_st_tr_def hd_map hd_rev) - done - -lemma repeat_n_plus: - "repeat_n (n + m) f = do repeat_n n f; repeat_n m f od" - by (induct n; simp add: bind_assoc) - -lemma repeat_eq_twice[simp]: - "(do x \ repeat f; repeat f od) = repeat f" - apply (simp add: repeat_def select_early) - apply (simp add: bind_assoc repeat_n_plus[symmetric, simplified]) - apply (simp add: bind_def select_def Sigma_def) - apply (rule ext, fastforce intro: exI[where x=0]) - done - -lemmas bind_then_eq = arg_cong2[where f=bind, OF _ refl] -lemmas repeat_eq_twice_then[simp] - = repeat_eq_twice[THEN bind_then_eq, simplified bind_assoc] - -lemmas env_steps_eq_twice[simp] - = repeat_eq_twice[where f=env_step, folded env_steps_repeat] -lemmas env_steps_eq_twice_then[simp] - = env_steps_eq_twice[THEN bind_then_eq, simplified bind_assoc] - -lemmas mapM_collapse_append = mapM_append[symmetric, THEN bind_then_eq, - simplified bind_assoc, simplified] - -lemma prefix_closed_mapM[rule_format, wp_split]: - "(\x \ set xs. prefix_closed (f x)) \ prefix_closed (mapM f xs)" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (clarsimp simp: mapM_Cons) - apply (intro prefix_closed_bind allI; clarsimp) - done - -lemma modify_id: - "modify id = return ()" - by (simp add: modify_def get_def bind_def put_def return_def) - -lemma modify_modify: - "(do x \ modify f; modify (g x) od) = modify (g () o f)" - by (simp add: bind_def simpler_modify_def) - -lemmas modify_modify_bind = arg_cong2[where f=bind, - OF modify_modify refl, simplified bind_assoc] - -lemma select_single: - "select {x} = return x" - by (simp add: select_def return_def) - -lemma put_then_get[unfolded K_bind_def]: - "do put s; get od = do put s; return s od" - by (simp add: put_def bind_def get_def return_def) - -lemmas put_then_get_then - = put_then_get[THEN bind_then_eq, simplified bind_assoc return_bind] - -lemmas bind_promote_If - = if_distrib[where f="\f. bind f g" for g] - if_distrib[where f="\g. bind f g" for f] - -lemma bind_promote_If2: - "do x \ f; if P then g x else h x od - = (if P then bind f g else bind f h)" - by simp - -lemma exec_put_trace[unfolded K_bind_def]: - "(do put_trace xs; f od) s - = (\n \ {n. 0 < n \ n \ length xs}. {(drop n xs, Incomplete)}) - \ ((\(a, b). (a @ xs, b)) ` f s)" - apply (simp add: put_trace_eq_drop bind_def) - apply (safe; (clarsimp split: if_split_asm)?; - fastforce intro: bexI[where x=0] rev_bexI) - done - -lemma if_fun_lift: - "(if P then f else g) x = (if P then f x else g x)" - by simp - -lemma UN_If_distrib: - "(\x \ S. if P x then A x else B x) - = ((\x \ S \ {x. P x}. A x) \ (\x \ S \ {x. \ P x}. B x))" - by (fastforce split: if_split_asm) - -lemma Await_redef: - "Await P = do - s1 \ select {s. P s}; - env_steps; - s \ get; - select (if P s then {()} else {}) - od" - apply (simp add: Await_def env_steps_def bind_assoc) - apply (cases "{s. P s} = {}") - apply (simp add: select_def bind_def get_def) - apply (rule ext) - apply (simp add: exec_get select_bind_UN put_then_get_then) - apply (simp add: bind_promote_If2 if_fun_lift if_distrib[where f=select]) - apply (simp add: exec_put_trace cong: if_cong) - apply (simp add: put_def bind_def select_def cong: if_cong) - apply (strengthen equalityI) - apply clarsimp - apply (strengthen exI[where x="s # xs" for s xs]) - apply (strengthen exI[where x="Suc n" for n]) - apply simp - apply blast - done - -lemma bind_apply_cong': - "f s = f' s - \ (\rv s'. (rv, s') \ mres (f s) \ g rv s' = g' rv s') - \ bind f g s = bind f' g' s" - apply (simp add: bind_def) - apply (rule SUP_cong; simp?) - apply (clarsimp split: tmres.split) - apply (drule spec2, drule mp, erule in_mres) - apply simp - done - -lemmas bind_apply_cong = bind_apply_cong'[rule_format] - -lemma select_empty_bind[simp]: - "select {} >>= f = select {}" - by (simp add: select_def bind_def) - -lemma fail_bind[simp]: - "fail >>= f = fail" - by (simp add: bind_def fail_def) - -lemma eq_Me_neq_Env: - "(x = Me) = (x \ Env)" - by (cases x; simp) - -lemma validI_invariant_imp: - assumes v: "\P\,\R\ f \G\,\Q\" - and P: "\s0 s. P s0 s \ I s0" - and R: "\s0 s. I s0 \ R s0 s \ I s" - and G: "\s0 s. I s0 \ G s0 s \ I s" - shows "\P\,\R\ f \\s0 s. I s0 \ I s \ G s0 s\,\\rv s0 s. I s0 \ Q rv s0 s\" -proof - - { fix tr s0 i - assume r: "rely_cond R s0 tr" and g: "guar_cond G s0 tr" - and I: "I s0" - hence imp: "\(_, s, s') \ trace_steps (rev tr) s0. I s \ I s'" - apply (clarsimp simp: guar_cond_def rely_cond_def) - apply (drule(1) bspec)+ - apply (clarsimp simp: eq_Me_neq_Env) - apply (metis R G) - done - hence "i < length tr \ I (snd (rev tr ! i))" - using I - apply (induct i) - apply (clarsimp simp: neq_Nil_conv[where xs="rev tr" for tr, simplified]) - apply clarsimp - apply (drule bspec, fastforce simp add: trace_steps_nth) - apply simp - done - } - note I = this - show ?thesis - using v - apply (clarsimp simp: validI_def rely_def imp_conjL) - apply (drule spec2, drule(1) mp)+ - apply clarsimp - apply (frule P[rule_format]) - apply (clarsimp simp: guar_cond_def trace_steps_nth I last_st_tr_def - hd_append last_rev[symmetric] - last_conv_nth rev_map) - done -qed - -lemma validI_guar_post_conj_lift: - "\P\,\R\ f \G1\,\Q1\ - \ \P\,\R\ f \G2\,\Q2\ - \ \P\,\R\ f \\s0 s. G1 s0 s \ G2 s0 s\,\\rv s0 s. Q1 rv s0 s \ Q2 rv s0 s\" - apply (frule validI_prefix_closed) - apply (subst validI_def, clarsimp simp: rely_def) - apply (drule(3) validI_D)+ - apply (auto simp: guar_cond_def) - done - -lemmas modify_prefix_closed[simp] = - modify_wp[THEN valid_validI_wp[OF no_trace_all(3)], THEN validI_prefix_closed] -lemmas await_prefix_closed[simp] = Await_sync_twp[THEN validI_prefix_closed] - -lemma repeat_prefix_closed[intro!]: - "prefix_closed f \ prefix_closed (repeat f)" - apply (simp add: repeat_def) - apply (rule prefix_closed_bind; clarsimp) - apply (rename_tac n) - apply (induct_tac n; simp) - apply (auto intro: prefix_closed_bind) - done - -end diff --git a/lib/Monad_WP/TraceMonadVCG.thy b/lib/Monad_WP/TraceMonadVCG.thy deleted file mode 100644 index 0888bcd573..0000000000 --- a/lib/Monad_WP/TraceMonadVCG.thy +++ /dev/null @@ -1,2653 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) -theory TraceMonadVCG -imports - TraceMonad - WPSimp - Strengthen -begin - -lemma trace_steps_append: - "trace_steps (xs @ ys) s - = trace_steps xs s \ trace_steps ys (last_st_tr (rev xs) s)" - by (induct xs arbitrary: s, simp_all add: last_st_tr_def hd_append) - -lemma rely_cond_append: - "rely_cond R s (xs @ ys) = (rely_cond R s ys \ rely_cond R (last_st_tr ys s) xs)" - by (simp add: rely_cond_def trace_steps_append ball_Un conj_comms) - -lemma guar_cond_append: - "guar_cond G s (xs @ ys) = (guar_cond G s ys \ guar_cond G (last_st_tr ys s) xs)" - by (simp add: guar_cond_def trace_steps_append ball_Un conj_comms) - -lemma prefix_closed_bind: - "prefix_closed f \ (\x. prefix_closed (g x)) \ prefix_closed (f >>= g)" - apply (subst prefix_closed_def, clarsimp simp: bind_def) - apply (auto simp: Cons_eq_append_conv split: tmres.split_asm - dest!: prefix_closedD[rotated]; - fastforce elim: rev_bexI) - done - -lemmas prefix_closed_bind[rule_format, wp_split] - -lemma last_st_tr_append[simp]: - "last_st_tr (tr @ tr') s = last_st_tr tr (last_st_tr tr' s)" - by (simp add: last_st_tr_def hd_append) - -lemma last_st_tr_Nil[simp]: - "last_st_tr [] s = s" - by (simp add: last_st_tr_def) - -lemma last_st_tr_Cons[simp]: - "last_st_tr (x # xs) s = snd x" - by (simp add: last_st_tr_def) - -lemma bind_twp[wp_split]: - "\ \r. \Q' r\,\R\ g r \G\,\Q\; \P\,\R\ f \G\,\Q'\ \ - \ \P\,\R\ f >>= (\r. g r) \G\,\Q\" - apply (subst validI_def, rule conjI) - apply (blast intro: prefix_closed_bind validI_prefix_closed) - apply (clarsimp simp: bind_def rely_def) - apply (drule(2) validI_D) - apply (clarsimp simp: rely_cond_append split: tmres.split_asm) - apply (clarsimp split: tmres.split_asm) - apply (drule meta_spec, frule(2) validI_D) - apply (clarsimp simp: rely_cond_append split: if_split_asm) - apply (clarsimp simp: guar_cond_append) - done - -lemma trace_steps_rev_drop_nth: - "trace_steps (rev (drop n tr)) s - = (\i. (fst (rev tr ! i), (if i = 0 then s else snd (rev tr ! (i - 1))), - snd (rev tr ! i))) ` {..< length tr - n}" - apply (simp add: trace_steps_nth) - apply (intro image_cong refl) - apply (simp add: rev_nth) - done - -lemma prefix_closed_drop: - "(tr, res) \ f s \ prefix_closed f \ \res'. (drop n tr, res') \ f s" -proof (induct n arbitrary: res) - case 0 then show ?case by auto -next - case (Suc n) - have drop_1: "\tr res. (tr, res) \ f s \ \res'. (drop 1 tr, res') \ f s" - by (case_tac tr; fastforce dest: prefix_closedD[rotated] intro: Suc) - show ?case - using Suc.hyps[OF Suc.prems] - by (metis drop_1[simplified] drop_drop add_0 add_Suc) -qed - -lemma validI_GD_drop: - "\ \P\, \R\ f \G\, \Q\; P s0 s; (tr, res) \ f s; - rely_cond R s0 (drop n tr) \ - \ guar_cond G s0 (drop n tr)" - apply (drule prefix_closed_drop[where n=n], erule validI_prefix_closed) - apply (auto dest: validI_GD) - done - -lemma parallel_prefix_closed[wp_split]: - "prefix_closed f \ prefix_closed g - \ prefix_closed (parallel f g)" - apply (subst prefix_closed_def, clarsimp simp: parallel_def) - apply (case_tac f_steps; clarsimp) - apply (drule(1) prefix_closedD)+ - apply fastforce - done - -lemma rely_cond_drop: - "rely_cond R s0 xs \ rely_cond R s0 (drop n xs)" - using rely_cond_append[where xs="take n xs" and ys="drop n xs"] - by simp - -lemma rely_cond_is_drop: - "rely_cond R s0 xs - \ (ys = drop (length xs - length ys) xs) - \ rely_cond R s0 ys" - by (metis rely_cond_drop) - -lemma bounded_rev_nat_induct: - "(\n. N \ n \ P n) \ (\n. n < N \ P (Suc n) \ P n) - \ P n" - by (induct diff\"N - n" arbitrary: n; simp) - -lemma drop_n_induct: - "P [] \ (\n. n < length xs \ P (drop (Suc n) xs) \ P (drop n xs)) - \ P (drop n xs)" - by (induct len\"length (drop n xs)" arbitrary: n xs; simp) - -lemma guar_cond_Cons_eq: - "guar_cond R s0 (x # xs) - = (guar_cond R s0 xs \ (fst x \ Env \ R (last_st_tr xs s0) (snd x)))" - by (cases "fst x"; simp add: guar_cond_def trace_steps_append conj_comms) - -lemma guar_cond_Cons: - "guar_cond R s0 xs - \ fst x \ Env \ R (last_st_tr xs s0) (snd x) - \ guar_cond R s0 (x # xs)" - by (simp add: guar_cond_Cons_eq) - -lemma guar_cond_drop_Suc_eq: - "n < length xs - \ guar_cond R s0 (drop n xs) = (guar_cond R s0 (drop (Suc n) xs) - \ (fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" - apply (rule trans[OF _ guar_cond_Cons_eq]) - apply (simp add: Cons_nth_drop_Suc) - done - -lemma guar_cond_drop_Suc: - "guar_cond R s0 (drop (Suc n) xs) - \ fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n)) - \ guar_cond R s0 (drop n xs)" - by (case_tac "n < length xs"; simp add: guar_cond_drop_Suc_eq) - -lemma rely_cond_Cons_eq: - "rely_cond R s0 (x # xs) - = (rely_cond R s0 xs \ (fst x = Env \ R (last_st_tr xs s0) (snd x)))" - by (simp add: rely_cond_def trace_steps_append conj_comms) - -lemma rely_cond_Cons: - "rely_cond R s0 xs - \ fst x = Env \ R (last_st_tr xs s0) (snd x) - \ rely_cond R s0 (x # xs)" - by (simp add: rely_cond_Cons_eq) - -lemma rely_cond_drop_Suc_eq: - "n < length xs - \ rely_cond R s0 (drop n xs) = (rely_cond R s0 (drop (Suc n) xs) - \ (fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" - apply (rule trans[OF _ rely_cond_Cons_eq]) - apply (simp add: Cons_nth_drop_Suc) - done - -lemma rely_cond_drop_Suc: - "rely_cond R s0 (drop (Suc n) xs) - \ fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n)) - \ rely_cond R s0 (drop n xs)" - by (cases "n < length xs"; simp add: rely_cond_drop_Suc_eq) - -lemma last_st_tr_map_zip_hd: - "length flags = length tr - \ tr \ [] \ snd (f (hd flags, hd tr)) = snd (hd tr) - \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" - apply (cases tr; simp) - apply (cases flags; simp) - apply (simp add: fun_eq_iff) - done - -lemma last_st_tr_drop_map_zip_hd: - "length flags = length tr - \ n < length tr \ snd (f (flags ! n, tr ! n)) = snd (tr ! n) - \ last_st_tr (drop n (map f (zip flags tr))) = last_st_tr (drop n tr)" - apply (simp add: drop_map drop_zip) - apply (rule last_st_tr_map_zip_hd; clarsimp) - apply (simp add: hd_drop_conv_nth) - done - -lemma last_st_tr_map_zip: - "length flags = length tr - \ \fl tmid s. snd (f (fl, (tmid, s))) = s - \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" - apply (erule last_st_tr_map_zip_hd) - apply (clarsimp simp: neq_Nil_conv) - done - -lemma parallel_rely_induct: - assumes preds: "(tr, v) \ parallel f g s" "Pf s0 s" "Pg s0 s" - assumes validI: "\Pf\,\Rf\ f' \Gf\,\Qf\" - "\Pg\,\Rg\ g' \Gg\,\Qg\" - "f s \ f' s" "g s \ g' s" - and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" - "Gf \ Rg" "Gg \ Rf" - and rely: "rely_cond R s0 (drop n tr)" - shows "\tr_f tr_g. (tr_f, v) \ f s \ (tr_g, v) \ g s - \ rely_cond Rf s0 (drop n tr_f) \ rely_cond Rg s0 (drop n tr_g) - \ map snd tr_f = map snd tr \ map snd tr_g = map snd tr - \ (\i \ length tr. last_st_tr (drop i tr_g) s0 = last_st_tr (drop i tr_f) s0) - \ guar_cond G s0 (drop n tr)" - (is "\ys zs. _ \ _ \ ?concl ys zs") -proof - - obtain ys zs where tr: "tr = map parallel_mrg (zip ys zs)" - and all2: "list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs" - and ys: "(ys, v) \ f s" and zs: "(zs, v) \ g s" - using preds - by (clarsimp simp: parallel_def2) - note len[simp] = list_all2_lengthD[OF all2] - - have ys': "(ys, v) \ f' s" and zs': "(zs, v) \ g' s" - using ys zs validI by auto - - note rely_f_step = validI_GD_drop[OF validI(1) preds(2) ys' rely_cond_drop_Suc] - note rely_g_step = validI_GD_drop[OF validI(2) preds(3) zs' rely_cond_drop_Suc] - - note snd[simp] = list_all2_nthD[OF all2, THEN conjunct2] - - have "?concl ys zs" - using rely tr all2 rely_f_step rely_g_step - apply (induct n rule: bounded_rev_nat_induct) - apply (subst drop_all, assumption) - apply clarsimp - apply (simp add: list_all2_conv_all_nth last_st_tr_def drop_map[symmetric] - hd_drop_conv_nth hd_append) - apply (fastforce simp: split_def intro!: nth_equalityI) - apply clarsimp - apply (erule_tac x=n in meta_allE)+ - apply (drule meta_mp, erule rely_cond_is_drop, simp) - apply (subst(asm) rely_cond_drop_Suc_eq[where xs="map f xs" for f xs], simp) - apply (clarsimp simp: last_st_tr_drop_map_zip_hd if_split[where P="\x. x = Env"] - split_def) - apply (intro conjI; (rule guar_cond_drop_Suc rely_cond_drop_Suc, assumption)) - apply (auto simp: guar_cond_drop_Suc_eq last_st_tr_drop_map_zip_hd - intro: compat[THEN predicate2D]) - done - - thus ?thesis - using ys zs - by auto -qed - -lemmas parallel_rely_induct0 = parallel_rely_induct[where n=0, simplified] - -lemma rg_validI: - assumes validI: "\Pf\,\Rf\ f \Gf\,\Qf\" - "\Pg\,\Rg\ g \Gg\,\Qg\" - and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" - "Gf \ Rg" "Gg \ Rf" - shows "\Pf And Pg\,\R\ parallel f g \G\,\\rv. Qf rv And Qg rv\" - apply (clarsimp simp: validI_def rely_def bipred_conj_def - parallel_prefix_closed validI[THEN validI_prefix_closed]) - apply (drule(3) parallel_rely_induct0[OF _ _ _ validI order_refl order_refl compat]) - apply clarsimp - apply (drule(2) validI[THEN validI_rvD])+ - apply (simp add: last_st_tr_def) - done - -lemma validI_weaken_pre[wp_pre]: - "\P'\,\R\ f \G\,\Q\ - \ (\s0 s. P s0 s \ P' s0 s) - \ \P\,\R\ f \G\,\Q\" - by (simp add: validI_def, blast) - -lemma rely_weaken: - "(\s0 s. R s0 s \ R' s0 s) - \ (tr, res) \ rely f R s s0 \ (tr, res) \ rely f R' s s0" - by (simp add: rely_def rely_cond_def, blast) - -lemma validI_weaken_rely[wp_pre]: - "\P\,\R'\ f \G\,\Q\ - \ (\s0 s. R s0 s \ R' s0 s) - \ \P\,\R\ f \G\,\Q\" - apply (simp add: validI_def) - by (metis rely_weaken) - -lemma validI_strengthen_post: - "\P\,\R\ f \G\,\Q'\ - \ (\v s0 s. Q' v s0 s \ Q v s0 s) - \ \P\,\R\ f \G\,\Q\" - by (simp add: validI_def) - -lemma validI_strengthen_guar: - "\P\, \R\ f \G'\, \Q\ - \ (\s0 s. G' s0 s \ G s0 s) - \ \P\, \R\ f \G\, \Q\" - by (force simp: validI_def guar_cond_def) - -lemma rely_prim[simp]: - "rely (\s. insert (v s) (f s)) R s0 = (\s. {x. x = v s \ rely_cond R s0 (fst x)} \ (rely f R s0 s))" - "rely (\s. {}) R s0 = (\_. {})" - by (auto simp: rely_def prod_eq_iff) - -lemma prefix_closed_put_trace_elem[iff]: - "prefix_closed (put_trace_elem x)" - by (clarsimp simp: prefix_closed_def put_trace_elem_def) - -lemma prefix_closed_return[iff]: - "prefix_closed (return x)" - by (simp add: prefix_closed_def return_def) - -lemma prefix_closed_put_trace[iff]: - "prefix_closed (put_trace tr)" - by (induct tr; clarsimp simp: prefix_closed_bind) - -lemma put_trace_eq_drop: - "put_trace xs s - = ((\n. (drop n xs, if n = 0 then Result ((), s) else Incomplete)) ` {.. length xs})" - apply (induct xs) - apply (clarsimp simp: return_def) - apply (clarsimp simp: put_trace_elem_def bind_def) - apply (simp add: atMost_Suc_eq_insert_0 image_image) - apply (rule equalityI; clarsimp) - apply (split if_split_asm; clarsimp) - apply (auto intro: image_eqI[where x=0])[1] - apply (rule rev_bexI, simp) - apply clarsimp - done - -lemma put_trace_res: - "(tr, res) \ put_trace xs s - \ \n. tr = drop n xs \ n \ length xs - \ res = (case n of 0 \ Result ((), s) | _ \ Incomplete)" - apply (clarsimp simp: put_trace_eq_drop) - apply (case_tac n; auto intro: exI[where x=0]) - done - -lemma put_trace_twp[wp]: - "\\s0 s. (\n. rely_cond R s0 (drop n xs) \ guar_cond G s0 (drop n xs)) - \ (rely_cond R s0 xs \ Q () (last_st_tr xs s0) s)\,\R\ put_trace xs \G\,\Q\" - apply (clarsimp simp: validI_def rely_def) - apply (drule put_trace_res) - apply (clarsimp; clarsimp split: nat.split_asm) - done - -lemmas put_trace_elem_twp = put_trace_twp[where xs="[x]" for x, simplified] - -lemma prefix_closed_select[iff]: - "prefix_closed (select S)" - by (simp add: prefix_closed_def select_def image_def) - -lemma select_wp[wp]: "\\s. \x \ S. Q x s\ select S \Q\" - by (simp add: select_def valid_def mres_def image_def) - -lemma rely_cond_rtranclp: - "rely_cond R s (map (Pair Env) xs) \ rtranclp R s (last_st_tr (map (Pair Env) xs) s)" - apply (induct xs arbitrary: s rule: rev_induct) - apply simp - apply (clarsimp simp add: rely_cond_def) - apply (erule converse_rtranclp_into_rtranclp) - apply simp - done - -lemma put_wp[wp]: - "\\_. Q () s\ put s \Q\" - by (simp add: put_def valid_def mres_def) - -lemma get_wp[wp]: - "\\s. Q s s\ get \Q\" - by (simp add: get_def valid_def mres_def) - -lemma bind_wp[wp_split]: - "\ \r. \Q' r\ g r \Q\; \P\f \Q'\ \ - \ \P\ f >>= (\r. g r) \Q\" - by (fastforce simp: valid_def bind_def2 mres_def intro: image_eqI[rotated]) - -lemma modify_wp[wp]: - "\\s. Q () (f s)\ modify f \Q\" - unfolding modify_def - by wp - -definition - no_trace :: "('s,'a) tmonad \ bool" -where - "no_trace f = (\tr res s. (tr, res) \ f s \ tr = [] \ res \ Incomplete)" - -lemmas no_traceD = no_trace_def[THEN iffD1, rule_format] - -lemma no_trace_emp: - "no_trace f \ (tr, r) \ f s \ tr = []" - by (simp add: no_traceD) - -lemma no_trace_Incomplete_mem: - "no_trace f \ (tr, Incomplete) \ f s" - by (auto dest: no_traceD) - -lemma no_trace_Incomplete_eq: - "no_trace f \ (tr, res) \ f s \ res \ Incomplete" - by (auto dest: no_traceD) - -lemma no_trace_prefix_closed: - "no_trace f \ prefix_closed f" - by (auto simp add: prefix_closed_def dest: no_trace_emp) - -(* Attempt to define triple_judgement to use valid_validI_wp as wp_comb rule. - It doesn't work. It seems that wp_comb rules cannot take more than 1 assumption *) -lemma validI_is_triple: - "\P\,\R\ f \G\,\Q\ = - triple_judgement (\(s0, s). prefix_closed f \ P s0 s) f - (\(s0,s) f. prefix_closed f \ (\tr res. (tr, res) \ rely f R s0 s - \ guar_cond G s0 tr - \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s')))" - apply (simp add: triple_judgement_def validI_def ) - apply (cases "prefix_closed f"; simp) - done - -lemma valid_is_triple: - "valid P f Q = - triple_judgement P f - (\s f. (\(r,s') \ (mres (f s)). Q r s'))" - by (simp add: triple_judgement_def valid_def mres_def) - -lemma no_trace_is_triple: - "no_trace f = triple_judgement \ f (\() f. id no_trace f)" - by (simp add: triple_judgement_def split: unit.split) - -lemmas [wp_trip] = valid_is_triple validI_is_triple no_trace_is_triple - -lemma valid_validI_wp[wp_comb]: - "no_trace f \ (\s0. \P s0\ f \\v. Q v s0 \) - \ \P\,\R\ f \G\,\Q\" - by (fastforce simp: rely_def validI_def valid_def mres_def no_trace_prefix_closed dest: no_trace_emp - elim: image_eqI[rotated]) - -(* Since valid_validI_wp in wp_comb doesn't work, we add the rules directly in the wp set *) -lemma no_trace_prim: - "no_trace get" - "no_trace (put s)" - "no_trace (modify f)" - "no_trace (return v)" - "no_trace fail" - by (simp_all add: no_trace_def get_def put_def modify_def bind_def - return_def select_def fail_def) - -lemma no_trace_select: - "no_trace (select S)" - by (clarsimp simp add: no_trace_def select_def) - -lemma no_trace_bind: - "no_trace f \ \rv. no_trace (g rv) - \ no_trace (do rv \ f; g rv od)" - apply (subst no_trace_def) - apply (clarsimp simp add: bind_def split: tmres.split_asm; - auto dest: no_traceD[rotated]) - done - -lemma no_trace_extra: - "no_trace (gets f)" - "no_trace (assert P)" - "no_trace (assert_opt Q)" - by (simp_all add: gets_def assert_def assert_opt_def no_trace_bind no_trace_prim - split: option.split) - -lemmas no_trace_all[wp, iff] = no_trace_prim no_trace_select no_trace_extra - -lemma env_steps_twp[wp]: - "\\s0 s. (\s'. R\<^sup>*\<^sup>* s0 s' \ Q () s' s') \ Q () s0 s\,\R\ env_steps \G\,\Q\" - apply (simp add: interference_def env_steps_def) - apply wp - apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) - apply (drule rely_cond_rtranclp) - apply (clarsimp simp add: last_st_tr_def hd_append) - done - -lemma interference_twp[wp]: - "\\s0 s. (\s'. R\<^sup>*\<^sup>* s s' \ Q () s' s') \ G s0 s\,\R\ interference \G\,\Q\" - apply (simp add: interference_def commit_step_def del: put_trace.simps) - apply wp - apply clarsimp - apply (simp add: drop_Cons nat.split rely_cond_def guar_cond_def) - done - -(* what Await does if we haven't committed our step is a little - strange. this assumes we have, which means s0 = s. we should - revisit this if we find a use for Await when this isn't the - case *) -lemma Await_sync_twp: - "\\s0 s. s = s0 \ (\x. R\<^sup>*\<^sup>* s0 x \ c x \ Q () x x)\,\R\ Await c \G\,\Q\" - apply (simp add: Await_def split_def) - apply wp - apply clarsimp - apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) - apply (drule rely_cond_rtranclp) - apply (simp add: o_def) - done - -(* Wrap up the standard usage pattern of wp/wpc/simp into its own command: *) -method wpsimp uses wp simp split split_del cong = - ((determ \wp add: wp|wpc|clarsimp simp: simp split: split split del: split_del cong: cong\)+)[1] - -declare K_def [simp] - -section "Satisfiability" - -text \ - The dual to validity: an existential instead of a universal - quantifier for the post condition. In refinement, it is - often sufficient to know that there is one state that - satisfies a condition. -\ -definition - exs_valid :: "('a \ bool) \ ('a, 'b) tmonad \ - ('b \ 'a \ bool) \ bool" - ("\_\ _ \\_\") -where - "exs_valid P f Q \ (\s. P s \ (\(rv, s') \ mres (f s). Q rv s'))" - - -text \The above for the exception monad\ -definition - ex_exs_validE :: "('a \ bool) \ ('a, 'e + 'b) tmonad \ - ('b \ 'a \ bool) \ ('e \ 'a \ bool) \ bool" - ("\_\ _ \\_\, \_\") -where - "ex_exs_validE P f Q E \ - exs_valid P f (\rv. case rv of Inl e \ E e | Inr v \ Q v)" - - -section "Lemmas" - -subsection \Determinism\ - -lemma det_set_iff: - "det f \ (r \ mres (f s)) = (mres (f s) = {r})" - apply (simp add: det_def mres_def) - apply (fastforce elim: allE[where x=s]) - done - -lemma return_det [iff]: - "det (return x)" - by (simp add: det_def return_def) - -lemma put_det [iff]: - "det (put s)" - by (simp add: det_def put_def) - -lemma get_det [iff]: - "det get" - by (simp add: det_def get_def) - -lemma det_gets [iff]: - "det (gets f)" - by (auto simp add: gets_def det_def get_def return_def bind_def) - -lemma det_UN: - "det f \ (\x \ mres (f s). g x) = (g (THE x. x \ mres (f s)))" - unfolding det_def mres_def - apply simp - apply (drule spec [of _ s]) - apply (clarsimp simp: vimage_def) - done - -lemma bind_detI [simp, intro!]: - "\ det f; \x. det (g x) \ \ det (f >>= g)" - apply (simp add: bind_def det_def split_def) - apply clarsimp - apply (erule_tac x=s in allE) - apply clarsimp - done - -lemma det_modify[iff]: - "det (modify f)" - by (simp add: modify_def) - -lemma the_run_stateI: - "mres (M s) = {s'} \ the_run_state M s = s'" - by (simp add: the_run_state_def) - -lemma the_run_state_det: - "\ s' \ mres (M s); det M \ \ the_run_state M s = s'" - by (simp only: the_run_stateI det_set_iff[where f=M and s=s]) - -subsection "Lifting and Alternative Basic Definitions" - -lemma liftE_liftM: "liftE = liftM Inr" - apply (rule ext) - apply (simp add: liftE_def liftM_def) - done - -lemma liftME_liftM: "liftME f = liftM (case_sum Inl (Inr \ f))" - apply (rule ext) - apply (simp add: liftME_def liftM_def bindE_def returnOk_def lift_def) - apply (rule_tac f="bind x" in arg_cong) - apply (rule ext) - apply (case_tac xa) - apply (simp_all add: lift_def throwError_def) - done - -lemma liftE_bindE: - "(liftE a) >>=E b = a >>= b" - apply (simp add: liftE_def bindE_def lift_def bind_assoc) - done - -lemma liftM_id[simp]: "liftM id = id" - apply (rule ext) - apply (simp add: liftM_def) - done - -lemma liftM_bind: - "(liftM t f >>= g) = (f >>= (\x. g (t x)))" - by (simp add: liftM_def bind_assoc) - -lemma gets_bind_ign: "gets f >>= (\x. m) = m" - apply (rule ext) - apply (simp add: bind_def simpler_gets_def) - done - -lemma get_bind_apply: "(get >>= f) x = f x x" - by (simp add: get_def bind_def) - -lemma exec_gets: - "(gets f >>= m) s = m (f s) s" - by (simp add: simpler_gets_def bind_def) - -lemma exec_get: - "(get >>= m) s = m s s" - by (simp add: get_def bind_def) - -lemma bind_eqI: - "\ f = f'; \x. g x = g' x \ \ f >>= g = f' >>= g'" - apply (rule ext) - apply (simp add: bind_def) - done - -subsection "Simplification Rules for Lifted And/Or" - -lemma pred_andE[elim!]: "\ (A and B) x; \ A x; B x \ \ R \ \ R" - by(simp add:pred_conj_def) - -lemma pred_andI[intro!]: "\ A x; B x \ \ (A and B) x" - by(simp add:pred_conj_def) - -lemma pred_conj_app[simp]: "(P and Q) x = (P x \ Q x)" - by(simp add:pred_conj_def) - -lemma bipred_andE[elim!]: "\ (A And B) x y; \ A x y; B x y \ \ R \ \ R" - by(simp add:bipred_conj_def) - -lemma bipred_andI[intro!]: "\ A x y; B x y \ \ (A And B) x y" - by (simp add:bipred_conj_def) - -lemma bipred_conj_app[simp]: "(P And Q) x = (P x and Q x)" - by(simp add:pred_conj_def bipred_conj_def) - -lemma pred_disjE[elim!]: "\ (P or Q) x; P x \ R; Q x \ R \ \ R" - by (fastforce simp: pred_disj_def) - -lemma pred_disjI1[intro]: "P x \ (P or Q) x" - by (simp add: pred_disj_def) - -lemma pred_disjI2[intro]: "Q x \ (P or Q) x" - by (simp add: pred_disj_def) - -lemma pred_disj_app[simp]: "(P or Q) x = (P x \ Q x)" - by auto - -lemma bipred_disjI1[intro]: "P x y \ (P Or Q) x y" - by (simp add: bipred_disj_def) - -lemma bipred_disjI2[intro]: "Q x y \ (P Or Q) x y" - by (simp add: bipred_disj_def) - -lemma bipred_disj_app[simp]: "(P Or Q) x = (P x or Q x)" - by(simp add:pred_disj_def bipred_disj_def) - -lemma pred_notnotD[simp]: "(not not P) = P" - by(simp add:pred_neg_def) - -lemma pred_and_true[simp]: "(P and \) = P" - by(simp add:pred_conj_def) - -lemma pred_and_true_var[simp]: "(\ and P) = P" - by(simp add:pred_conj_def) - -lemma pred_and_false[simp]: "(P and \) = \" - by(simp add:pred_conj_def) - -lemma pred_and_false_var[simp]: "(\ and P) = \" - by(simp add:pred_conj_def) - -lemma bipred_disj_op_eq[simp]: - "reflp R \ (=) Or R = R" - apply (rule ext, rule ext) - apply (auto simp: reflp_def) - done - -lemma bipred_le_true[simp]: "R \ \\" - by clarsimp - -subsection "Hoare Logic Rules" - -lemma validE_def2: - "validE P f Q R \ \s. P s \ (\(r,s') \ mres (f s). case r of Inr b \ Q b s' - | Inl a \ R a s')" - by (unfold valid_def validE_def) - -lemma seq': - "\ \A\ f \B\; - \x. P x \ \C\ g x \D\; - \x s. B x s \ P x \ C s \ \ - \A\ do x \ f; g x od \D\" - apply (erule bind_wp[rotated]) - apply (clarsimp simp: valid_def) - apply (fastforce elim: rev_bexI image_eqI[rotated]) - done - -lemma seq: - assumes f_valid: "\A\ f \B\" - assumes g_valid: "\x. P x \ \C\ g x \D\" - assumes bind: "\x s. B x s \ P x \ C s" - shows "\A\ do x \ f; g x od \D\" -apply (insert f_valid g_valid bind) -apply (blast intro: seq') -done - -lemma seq_ext': - "\ \A\ f \B\; - \x. \B x\ g x \C\ \ \ - \A\ do x \ f; g x od \C\" - by (metis bind_wp) - -lemmas seq_ext = bind_wp[rotated] - -lemma seqE': - "\ \A\ f \B\,\E\ ; - \x. \B x\ g x \C\,\E\ \ \ - \A\ doE x \ f; g x odE \C\,\E\" - apply (simp add: bindE_def validE_def) - apply (erule seq_ext') - apply (auto simp: lift_def valid_def throwError_def return_def mres_def - split: sum.splits) - done - -lemma seqE: - assumes f_valid: "\A\ f \B\,\E\" - assumes g_valid: "\x. \B x\ g x \C\,\E\" - shows "\A\ doE x \ f; g x odE \C\,\E\" - apply(insert f_valid g_valid) - apply(blast intro: seqE') - done - -lemma hoare_TrueI: "\P\ f \\_. \\" - by (simp add: valid_def) - -lemma hoareE_TrueI: "\P\ f \\_. \\, \\r. \\" - by (simp add: validE_def valid_def) - -lemma hoare_True_E_R [simp]: - "\P\ f \\r s. True\, -" - by (auto simp add: validE_R_def validE_def valid_def split: sum.splits) - -lemma hoare_post_conj [intro!]: - "\ \ P \ a \ Q \; \ P \ a \ R \ \ \ \ P \ a \ Q And R \" - by (fastforce simp: valid_def split_def bipred_conj_def) - -lemma hoare_pre_disj [intro!]: - "\ \ P \ a \ R \; \ Q \ a \ R \ \ \ \ P or Q \ a \ R \" - by (simp add:valid_def pred_disj_def) - -lemma hoare_conj: - "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \P and P'\ f \Q And Q'\" - unfolding valid_def - by (auto) - -lemma hoare_post_taut: "\ P \ a \ \\ \" - by (simp add:valid_def) - -lemma wp_post_taut: "\\r. True\ f \\r s. True\" - by (rule hoare_post_taut) - -lemma wp_post_tautE: "\\r. True\ f \\r s. True\,\\f s. True\" -proof - - have P: "\r. (case r of Inl a \ True | _ \ True) = True" - by (case_tac r, simp_all) - show ?thesis - by (simp add: validE_def P wp_post_taut) -qed - -lemma hoare_pre_cont [simp]: "\ \ \ a \ P \" - by (simp add:valid_def) - - -subsection \Strongest Postcondition Rules\ - -lemma get_sp: - "\P\ get \\a s. s = a \ P s\" - by(simp add:get_def valid_def mres_def) - -lemma put_sp: - "\\\ put a \\_ s. s = a\" - by(simp add:put_def valid_def mres_def) - -lemma return_sp: - "\P\ return a \\b s. b = a \ P s\" - by(simp add:return_def valid_def mres_def) - -lemma assert_sp: - "\ P \ assert Q \ \r s. P s \ Q \" - by (simp add: assert_def fail_def return_def valid_def mres_def) - -lemma hoare_gets_sp: - "\P\ gets f \\rv s. rv = f s \ P s\" - by (simp add: valid_def simpler_gets_def mres_def) - -lemma hoare_return_drop_var [iff]: "\ Q \ return x \ \r. Q \" - by (simp add:valid_def return_def mres_def) - -lemma hoare_gets [intro!]: "\ \s. P s \ Q (f s) s \ \ \ P \ gets f \ Q \" - by (simp add:valid_def gets_def get_def bind_def return_def mres_def) - -lemma hoare_modifyE_var [intro!]: - "\ \s. P s \ Q (f s) \ \ \ P \ modify f \ \r s. Q s \" - by(simp add: valid_def modify_def put_def get_def bind_def mres_def) - -lemma hoare_if [intro!]: - "\ P \ \ Q \ a \ R \; \ P \ \ Q \ b \ R \ \ \ - \ Q \ if P then a else b \ R \" - by (simp add:valid_def) - -lemma hoare_pre_subst: "\ A = B; \A\ a \C\ \ \ \B\ a \C\" - by(clarsimp simp:valid_def split_def) - -lemma hoare_post_subst: "\ B = C; \A\ a \B\ \ \ \A\ a \C\" - by(clarsimp simp:valid_def split_def) - -lemma hoare_pre_tautI: "\ \A and P\ a \B\; \A and not P\ a \B\ \ \ \A\ a \B\" - by(fastforce simp:valid_def split_def pred_conj_def pred_neg_def) - -lemma hoare_pre_imp: "\ \s. P s \ Q s; \Q\ a \R\ \ \ \P\ a \R\" - by (fastforce simp add:valid_def) - -lemma hoare_post_imp: "\ \r s. Q r s \ R r s; \P\ a \Q\ \ \ \P\ a \R\" - by(fastforce simp:valid_def split_def) - -lemma hoare_post_impErr': "\ \P\ a \Q\,\E\; - \r s. Q r s \ R r s; - \e s. E e s \ F e s \ \ - \P\ a \R\,\F\" - apply (simp add: validE_def) - apply (rule_tac Q="\r s. case r of Inl a \ E a s | Inr b \ Q b s" in hoare_post_imp) - apply (case_tac r) - apply simp_all - done - -lemma hoare_post_impErr: "\ \P\ a \Q\,\E\; - \r s. Q r s \ R r s; - \e s. E e s \ F e s \ \ - \P\ a \R\,\F\" - apply (blast intro: hoare_post_impErr') - done - -lemma hoare_validE_cases: - "\ \ P \ f \ Q \, \ \_ _. True \; \ P \ f \ \_ _. True \, \ R \ \ - \ \ P \ f \ Q \, \ R \" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r. R\,\\r. R\" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc2: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r. R\,\\r s. True\" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc2E: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r s. True\, \\r. R\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_imp_dc2E_actual: - "\\P\ a \\r. R\\ \ \P\ a \\r s. True\, \\r. R\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_imp_dc2_actual: - "\\P\ a \\r. R\\ \ \P\ a \\r. R\, \\r s. True\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_impE: "\ \r s. Q r s \ R r s; \P\ a \Q\ \ \ \P\ a \R\" - by (fastforce simp:valid_def split_def) - -lemma hoare_conjD1: - "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. Q rv\" - unfolding valid_def by auto - -lemma hoare_conjD2: - "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. R rv\" - unfolding valid_def by auto - -lemma hoare_post_disjI1: - "\P\ f \\rv. Q rv\ \ \P\ f \\rv. Q rv or R rv\" - unfolding valid_def by auto - -lemma hoare_post_disjI2: - "\P\ f \\rv. R rv\ \ \P\ f \\rv. Q rv or R rv\" - unfolding valid_def by auto - -lemma hoare_weaken_pre: - "\\Q\ a \R\; \s. P s \ Q s\ \ \P\ a \R\" - apply (rule hoare_pre_imp) - prefer 2 - apply assumption - apply blast - done - -lemma hoare_strengthen_post: - "\\P\ a \Q\; \r s. Q r s \ R r s\ \ \P\ a \R\" - apply (rule hoare_post_imp) - prefer 2 - apply assumption - apply blast - done - -lemma use_valid: "\(r, s') \ mres (f s); \P\ f \Q\; P s \ \ Q r s'" - apply (simp add: valid_def) - apply blast - done - -lemma use_validE_norm: "\ (Inr r', s') \ mres (B s); \ P \ B \ Q \,\ E \; P s \ \ Q r' s'" - apply (clarsimp simp: validE_def valid_def) - apply force - done - -lemma use_validE_except: "\ (Inl r', s') \ mres (B s); \ P \ B \ Q \,\ E \; P s \ \ E r' s'" - apply (clarsimp simp: validE_def valid_def) - apply force - done - -lemma in_inv_by_hoareD: - "\ \P. \P\ f \\_. P\; (x,s') \ mres (f s) \ \ s' = s" - apply (drule_tac x="(=) s" in meta_spec) - apply (auto simp add: valid_def mres_def) - done - -subsection "Satisfiability" - -lemma exs_hoare_post_imp: "\\r s. Q r s \ R r s; \P\ a \\Q\\ \ \P\ a \\R\" - apply (simp add: exs_valid_def) - apply safe - apply (erule_tac x=s in allE, simp) - apply blast - done - -lemma use_exs_valid: "\\P\ f \\Q\; P s \ \ \(r, s') \ mres (f s). Q r s'" - by (simp add: exs_valid_def) - -definition "exs_postcondition P f \ (\a b. \(rv, s)\ f a b. P rv s)" - -lemma exs_valid_is_triple: - "exs_valid P f Q = triple_judgement P f (exs_postcondition Q (\s f. mres (f s)))" - by (simp add: triple_judgement_def exs_postcondition_def exs_valid_def) - -lemmas [wp_trip] = exs_valid_is_triple - -lemma exs_valid_weaken_pre [wp_comb]: - "\ \ P' \ f \\ Q \; \s. P s \ P' s \ \ \ P \ f \\ Q \" - apply atomize - apply (clarsimp simp: exs_valid_def) - done - -lemma exs_valid_chain: - "\ \ P \ f \\ Q \; \s. R s \ P s; \r s. Q r s \ S r s \ \ \ R \ f \\ S \" - by (fastforce simp only: exs_valid_def Bex_def ) - -lemma exs_valid_assume_pre: - "\ \s. P s \ \ P \ f \\ Q \ \ \ \ P \ f \\ Q \" - apply (fastforce simp: exs_valid_def) - done - -lemma exs_valid_bind [wp_split]: - "\ \x. \B x\ g x \\C\; \A\ f \\B\ \ \ \ A \ f >>= (\x. g x) \\ C \" - apply atomize - apply (clarsimp simp: exs_valid_def bind_def2 mres_def) - apply (drule spec, drule(1) mp, clarsimp) - apply (drule spec2, drule(1) mp, clarsimp) - apply (simp add: image_def bex_Un) - apply (strengthen subst[where P="\x. x \ f s" for s, mk_strg I _ E], simp) - apply (fastforce elim: rev_bexI) - done - -lemma exs_valid_return [wp]: - "\ Q v \ return v \\ Q \" - by (clarsimp simp: exs_valid_def return_def mres_def) - -lemma exs_valid_select [wp]: - "\ \s. \r \ S. Q r s \ select S \\ Q \" - apply (clarsimp simp: exs_valid_def select_def mres_def) - apply (auto simp add: image_def) - done - -lemma exs_valid_get [wp]: - "\ \s. Q s s \ get \\ Q \" - by (clarsimp simp: exs_valid_def get_def mres_def) - -lemma exs_valid_gets [wp]: - "\ \s. Q (f s) s \ gets f \\ Q \" - by (clarsimp simp: gets_def) wp - -lemma exs_valid_put [wp]: - "\ Q v \ put v \\ Q \" - by (clarsimp simp: put_def exs_valid_def mres_def) - -lemma exs_valid_state_assert [wp]: - "\ \s. Q () s \ G s \ state_assert G \\ Q \" - by (clarsimp simp: state_assert_def exs_valid_def get_def - assert_def bind_def2 return_def mres_def) - -lemmas exs_valid_guard = exs_valid_state_assert - -lemma exs_valid_fail [wp]: - "\ \_. False \ fail \\ Q \" - by (clarsimp simp: fail_def exs_valid_def) - -lemma exs_valid_condition [wp]: - "\ \ P \ L \\ Q \; \ P' \ R \\ Q \ \ \ - \ \s. (C s \ P s) \ (\ C s \ P' s) \ condition C L R \\ Q \" - by (clarsimp simp: condition_def exs_valid_def split: sum.splits) - - -subsection MISC - -lemma hoare_return_simp: - "\P\ return x \Q\ = (\s. P s \ Q x s)" - by (simp add: valid_def return_def mres_def) - -lemma hoare_gen_asm: - "(P \ \P'\ f \Q\) \ \P' and K P\ f \Q\" - by (fastforce simp add: valid_def) - -lemma hoare_when_wp [wp]: - "\ P \ \Q\ f \R\ \ \ \if P then Q else R ()\ when P f \R\" - by (clarsimp simp: when_def valid_def return_def mres_def) - -lemma hoare_conjI: - "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s\" - unfolding valid_def by blast - -lemma hoare_disjI1: - "\ \P\ f \Q\ \ \ \P\ f \\r s. Q r s \ R r s \" - unfolding valid_def by blast - -lemma hoare_disjI2: - "\ \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s \" - unfolding valid_def by blast - -lemma hoare_assume_pre: - "(\s. P s \ \P\ f \Q\) \ \P\ f \Q\" - by (auto simp: valid_def) - -lemma hoare_returnOk_sp: - "\P\ returnOk x \\r s. r = x \ P s\, \Q\" - by (simp add: valid_def validE_def returnOk_def return_def mres_def) - -lemma hoare_assume_preE: - "(\s. P s \ \P\ f \Q\,\R\) \ \P\ f \Q\,\R\" - by (auto simp: valid_def validE_def) - -lemma hoare_allI: - "(\x. \P\f\Q x\) \ \P\f\\r s. \x. Q x r s\" - by (simp add: valid_def) blast - -lemma validE_allI: - "(\x. \P\ f \\r s. Q x r s\,\E\) \ \P\ f \\r s. \x. Q x r s\,\E\" - by (fastforce simp: valid_def validE_def split: sum.splits) - -lemma hoare_exI: - "\P\ f \Q x\ \ \P\ f \\r s. \x. Q x r s\" - by (simp add: valid_def) blast - -lemma hoare_impI: - "(R \ \P\f\Q\) \ \P\f\\r s. R \ Q r s\" - by (simp add: valid_def) blast - -lemma validE_impI: - " \\E. \P\ f \\_ _. True\,\E\; (P' \ \P\ f \Q\,\E\)\ \ - \P\ f \\r s. P' \ Q r s\, \E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_case_option_wp: - "\ \P\ f None \Q\; - \x. \P' x\ f (Some x) \Q' x\ \ - \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\" - by (cases v) auto - -subsection "Reasoning directly about states" - -lemma in_throwError: - "((v, s') \ mres (throwError e s)) = (v = Inl e \ s' = s)" - by (simp add: throwError_def return_def mres_def) - -lemma in_returnOk: - "((v', s') \ mres (returnOk v s)) = (v' = Inr v \ s' = s)" - by (simp add: returnOk_def return_def mres_def) - -lemma in_bind: - "((r,s') \ mres ((do x \ f; g x od) s)) = - (\s'' x. (x, s'') \ mres (f s) \ (r, s') \ mres (g x s''))" - apply (simp add: bind_def split_def mres_def) - apply (auto split: tmres.splits; force elim: rev_bexI image_eqI[rotated]) - done - -lemma in_bindE_R: - "((Inr r,s') \ mres ((doE x \ f; g x odE) s)) = - (\s'' x. (Inr x, s'') \ mres (f s) \ (Inr r, s') \ mres (g x s''))" - apply (simp add: bindE_def in_bind) - apply (simp add: lift_def split_def) - apply (clarsimp simp: throwError_def return_def lift_def mres_def split: sum.splits) - apply force - done - -lemma in_bindE_L: - "((Inl r, s') \ mres ((doE x \ f; g x odE) s)) \ - (\s'' x. (Inr x, s'') \ mres (f s) \ (Inl r, s') \ mres (g x s'')) \ ((Inl r, s') \ mres (f s))" - apply (simp add: bindE_def in_bind lift_def) - apply safe - apply (simp add: return_def throwError_def lift_def split_def mres_def split: sum.splits if_split_asm) - apply force+ - done - -lemma in_return: - "(r, s') \ mres (return v s) = (r = v \ s' = s)" - by (simp add: return_def mres_def) - -lemma in_liftE: - "((v, s') \ mres (liftE f s)) = (\v'. v = Inr v' \ (v', s') \ mres (f s))" - by (auto simp add: liftE_def in_bind in_return) - -lemma in_whenE: "((v, s') \ mres (whenE P f s)) = ((P \ (v, s') \ mres (f s)) \ - (\P \ v = Inr () \ s' = s))" - by (simp add: whenE_def in_returnOk) - -lemma inl_whenE: - "((Inl x, s') \ mres (whenE P f s)) = (P \ (Inl x, s') \ mres (f s))" - by (auto simp add: in_whenE) - -lemma in_fail: - "r \ mres (fail s) = False" - by (simp add: fail_def mres_def) - -lemma in_assert: - "(r, s') \ mres (assert P s) = (P \ s' = s)" - by (auto simp add: assert_def return_def fail_def mres_def) - -lemma in_assertE: - "(r, s') \ mres (assertE P s) = (P \ r = Inr () \ s' = s)" - by (auto simp add: assertE_def returnOk_def return_def fail_def mres_def) - -lemma in_assert_opt: - "(r, s') \ mres (assert_opt v s) = (v = Some r \ s' = s)" - by (auto simp: assert_opt_def in_fail in_return split: option.splits) - -lemma in_get: - "(r, s') \ mres (get s) = (r = s \ s' = s)" - by (simp add: get_def mres_def) - -lemma in_gets: - "(r, s') \ mres (gets f s) = (r = f s \ s' = s)" - by (simp add: simpler_gets_def mres_def) - -lemma in_put: - "(r, s') \ mres (put x s) = (s' = x \ r = ())" - by (simp add: put_def mres_def) - -lemma in_when: - "(v, s') \ mres (when P f s) = ((P \ (v, s') \ mres (f s)) \ (\P \ v = () \ s' = s))" - by (simp add: when_def in_return) - -lemma in_modify: - "(v, s') \ mres (modify f s) = (s'=f s \ v = ())" - by (auto simp add: modify_def bind_def get_def put_def mres_def) - -lemma gets_the_in_monad: - "((v, s') \ mres (gets_the f s)) = (s' = s \ f s = Some v)" - by (auto simp: gets_the_def in_bind in_gets in_assert_opt split: option.split) - -lemma in_alternative: - "(r,s') \ mres ((f \ g) s) = ((r,s') \ mres (f s) \ (r,s') \ mres (g s))" - by (auto simp add: alternative_def mres_def) - -lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L - in_bindE_R in_returnOk in_throwError in_fail - in_assertE in_assert in_return in_assert_opt - in_get in_gets in_put in_when (* unlessE_whenE *) - (* unless_when *) in_modify gets_the_in_monad - in_alternative - -subsection "Non-Failure" - -lemma no_failD: - "\ no_fail P m; P s \ \ Failed \ snd ` m s" - by (simp add: no_fail_def) - -lemma non_fail_modify [wp,simp]: - "no_fail \ (modify f)" - by (simp add: no_fail_def modify_def get_def put_def bind_def) - -lemma non_fail_gets_simp[simp]: - "no_fail P (gets f)" - unfolding no_fail_def gets_def get_def return_def bind_def - by simp - -lemma non_fail_gets: - "no_fail \ (gets f)" - by simp - -lemma snd_pair_image: - "snd ` Pair x ` S = S" - by (simp add: image_def) - -lemma non_fail_select [simp]: - "no_fail \ (select S)" - by (simp add: no_fail_def select_def image_def) - -lemma no_fail_pre: - "\ no_fail P f; \s. Q s \ P s\ \ no_fail Q f" - by (simp add: no_fail_def) - -lemma no_fail_alt [wp]: - "\ no_fail P f; no_fail Q g \ \ no_fail (P and Q) (f \ g)" - by (auto simp add: no_fail_def alternative_def) - -lemma no_fail_return [simp, wp]: - "no_fail \ (return x)" - by (simp add: return_def no_fail_def) - -lemma no_fail_get [simp, wp]: - "no_fail \ get" - by (simp add: get_def no_fail_def) - -lemma no_fail_put [simp, wp]: - "no_fail \ (put s)" - by (simp add: put_def no_fail_def) - -lemma no_fail_when [wp]: - "(P \ no_fail Q f) \ no_fail (if P then Q else \) (when P f)" - by (simp add: when_def) - -lemma no_fail_unless [wp]: - "(\P \ no_fail Q f) \ no_fail (if P then \ else Q) (unless P f)" - by (simp add: unless_def when_def) - -lemma no_fail_fail [simp, wp]: - "no_fail \ fail" - by (simp add: fail_def no_fail_def) - -lemmas [wp] = non_fail_gets - -lemma no_fail_assert [simp, wp]: - "no_fail (\_. P) (assert P)" - by (simp add: assert_def) - -lemma no_fail_assert_opt [simp, wp]: - "no_fail (\_. P \ None) (assert_opt P)" - by (simp add: assert_opt_def split: option.splits) - -lemma no_fail_case_option [wp]: - assumes f: "no_fail P f" - assumes g: "\x. no_fail (Q x) (g x)" - shows "no_fail (if x = None then P else Q (the x)) (case_option f g x)" - by (clarsimp simp add: f g) - -lemma no_fail_if [wp]: - "\ P \ no_fail Q f; \P \ no_fail R g \ \ - no_fail (if P then Q else R) (if P then f else g)" - by simp - -lemma no_fail_apply [wp]: - "no_fail P (f (g x)) \ no_fail P (f $ g x)" - by simp - -lemma no_fail_undefined [simp, wp]: - "no_fail \ undefined" - by (simp add: no_fail_def) - -lemma no_fail_returnOK [simp, wp]: - "no_fail \ (returnOk x)" - by (simp add: returnOk_def) - -(* text {* Empty results implies non-failure *} - -lemma empty_fail_modify [simp]: - "empty_fail (modify f)" - by (simp add: empty_fail_def simpler_modify_def) - -lemma empty_fail_gets [simp]: - "empty_fail (gets f)" - by (simp add: empty_fail_def simpler_gets_def) - -lemma empty_failD: - "\ empty_fail m; fst (m s) = {} \ \ snd (m s)" - by (simp add: empty_fail_def) - -lemma empty_fail_select_f [simp]: - assumes ef: "fst S = {} \ snd S" - shows "empty_fail (select_f S)" - by (fastforce simp add: empty_fail_def select_f_def intro: ef) - -lemma empty_fail_bind [simp]: - "\ empty_fail a; \x. empty_fail (b x) \ \ empty_fail (a >>= b)" - apply (simp add: bind_def empty_fail_def split_def) - apply clarsimp - apply (case_tac "fst (a s) = {}") - apply blast - apply (clarsimp simp: ex_in_conv [symmetric]) - done - -lemma empty_fail_return [simp]: - "empty_fail (return x)" - by (simp add: empty_fail_def return_def) - -lemma empty_fail_mapM [simp]: - assumes m: "\x. empty_fail (m x)" - shows "empty_fail (mapM m xs)" -proof (induct xs) - case Nil - thus ?case by (simp add: mapM_def sequence_def) -next - case Cons - have P: "\m x xs. mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" - by (simp add: mapM_def sequence_def Let_def) - from Cons - show ?case by (simp add: P m) -qed - -lemma empty_fail [simp]: - "empty_fail fail" - by (simp add: fail_def empty_fail_def) - -lemma empty_fail_assert_opt [simp]: - "empty_fail (assert_opt x)" - by (simp add: assert_opt_def split: option.splits) - -lemma empty_fail_mk_ef: - "empty_fail (mk_ef o m)" - by (simp add: empty_fail_def mk_ef_def) - *) -subsection "Failure" - -lemma fail_wp: "\\x. True\ fail \Q\" - by (simp add: valid_def fail_def mres_def vimage_def) - -lemma failE_wp: "\\x. True\ fail \Q\,\E\" - by (simp add: validE_def fail_wp) - -lemma fail_update [iff]: - "fail (f s) = fail s" - by (simp add: fail_def) - - -text \We can prove postconditions using hoare triples\ - -lemma post_by_hoare: "\ \P\ f \Q\; P s; (r, s') \ mres (f s) \ \ Q r s'" - apply (simp add: valid_def) - apply blast - done - -text \Weakest Precondition Rules\ - -lemma hoare_vcg_prop: - "\\s. P\ f \\rv s. P\" - by (simp add: valid_def) - -lemma return_wp: - "\P x\ return x \P\" - by(simp add:valid_def return_def mres_def) - -(* lemma get_wp: - "\\s. P s s\ get \P\" - by(auto simp add:valid_def split_def get_def mres_def) - *) -lemma gets_wp: - "\\s. P (f s) s\ gets f \P\" - by(simp add:valid_def split_def gets_def return_def get_def bind_def mres_def) - -(* lemma modify_wp: - "\\s. P () (f s)\ modify f \P\" - by(simp add:valid_def split_def modify_def get_def put_def bind_def ) - *) -(* lemma put_wp: - "\\s. P () x\ put x \P\" - by(simp add:valid_def put_def) - *) -lemma returnOk_wp: - "\P x\ returnOk x \P\,\E\" - by(simp add:validE_def2 returnOk_def return_def mres_def) - -lemma throwError_wp: - "\E e\ throwError e \P\,\E\" - by(simp add:validE_def2 throwError_def return_def mres_def) - -lemma returnOKE_R_wp : "\P x\ returnOk x \P\, -" - by (simp add: validE_R_def validE_def valid_def returnOk_def return_def mres_def) - -lemma catch_wp: - "\ \x. \E x\ handler x \Q\; \P\ f \Q\,\E\ \ \ - \P\ catch f handler \Q\" - apply (unfold catch_def validE_def) - apply (erule seq_ext) - apply (simp add: return_wp split: sum.splits) - done - -lemma handleE'_wp: - "\ \x. \F x\ handler x \Q\,\E\; \P\ f \Q\,\F\ \ \ - \P\ f handler \Q\,\E\" - apply (unfold handleE'_def validE_def) - apply (erule seq_ext) - apply (clarsimp split: sum.splits) - apply (simp add: valid_def return_def mres_def) - done - -lemma handleE_wp: - assumes x: "\x. \F x\ handler x \Q\,\E\" - assumes y: "\P\ f \Q\,\F\" - shows "\P\ f handler \Q\,\E\" - by (simp add: handleE_def handleE'_wp [OF x y]) - -lemma hoare_vcg_split_if: - "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ - \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" - by simp - -lemma hoare_vcg_split_ifE: - "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ - \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" - by simp - -lemma in_image_constant: - "(x \ (\_. v) ` S) = (x = v \ S \ {})" - by (simp add: image_constant_conv) - -lemma hoare_liftM_subst: "\P\ liftM f m \Q\ = \P\ m \Q \ f\" - apply (simp add: liftM_def bind_def2 return_def split_def mres_def) - apply (simp add: valid_def Ball_def mres_def image_Un) - apply (simp add: image_image in_image_constant) - apply (rule_tac f=All in arg_cong) - apply (rule ext) - apply force - done - -lemma liftE_validE[simp]: "\P\ liftE f \Q\,\E\ = \P\ f \Q\" - apply (simp add: liftE_liftM validE_def hoare_liftM_subst o_def) - done - -lemma liftE_wp: - "\P\ f \Q\ \ \P\ liftE f \Q\,\E\" - by simp - -lemma liftM_wp: "\P\ m \Q \ f\ \ \P\ liftM f m \Q\" - by (simp add: hoare_liftM_subst) - -lemma hoare_liftME_subst: "\P\ liftME f m \Q\,\E\ = \P\ m \Q \ f\,\E\" - apply (simp add: validE_def liftME_liftM hoare_liftM_subst o_def) - apply (rule_tac f="valid P m" in arg_cong) - apply (rule ext)+ - apply (case_tac x, simp_all) - done - -lemma liftME_wp: "\P\ m \Q \ f\,\E\ \ \P\ liftME f m \Q\,\E\" - by (simp add: hoare_liftME_subst) - -(* FIXME: Move *) -lemma o_const_simp[simp]: "(\x. C) \ f = (\x. C)" - by (simp add: o_def) - -lemma hoare_vcg_split_case_option: - "\ \x. x = None \ \P x\ f x \R x\; - \x y. x = Some y \ \Q x y\ g x y \R x\ \ \ - \\s. (x = None \ P x s) \ - (\y. x = Some y \ Q x y s)\ - case x of None \ f x - | Some y \ g x y - \R x\" - apply(simp add:valid_def split_def) - apply(case_tac x, simp_all) -done - -lemma hoare_vcg_split_case_optionE: - assumes none_case: "\x. x = None \ \P x\ f x \R x\,\E x\" - assumes some_case: "\x y. x = Some y \ \Q x y\ g x y \R x\,\E x\" - shows "\\s. (x = None \ P x s) \ - (\y. x = Some y \ Q x y s)\ - case x of None \ f x - | Some y \ g x y - \R x\,\E x\" - apply(case_tac x, simp_all) - apply(rule none_case, simp) - apply(rule some_case, simp) -done - -lemma hoare_vcg_split_case_sum: - "\ \x a. x = Inl a \ \P x a\ f x a \R x\; - \x b. x = Inr b \ \Q x b\ g x b \R x\ \ \ - \\s. (\a. x = Inl a \ P x a s) \ - (\b. x = Inr b \ Q x b s) \ - case x of Inl a \ f x a - | Inr b \ g x b - \R x\" - apply(simp add:valid_def split_def) - apply(case_tac x, simp_all) -done - -lemma hoare_vcg_split_case_sumE: - assumes left_case: "\x a. x = Inl a \ \P x a\ f x a \R x\" - assumes right_case: "\x b. x = Inr b \ \Q x b\ g x b \R x\" - shows "\\s. (\a. x = Inl a \ P x a s) \ - (\b. x = Inr b \ Q x b s) \ - case x of Inl a \ f x a - | Inr b \ g x b - \R x\" - apply(case_tac x, simp_all) - apply(rule left_case, simp) - apply(rule right_case, simp) -done - -lemma hoare_vcg_precond_imp: - "\ \Q\ f \R\; \s. P s \ Q s \ \ \P\ f \R\" - by (fastforce simp add:valid_def) - -lemma hoare_vcg_precond_impE: - "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" - by (fastforce simp add:validE_def2) - -lemma hoare_seq_ext: - assumes g_valid: "\x. \B x\ g x \C\" - assumes f_valid: "\A\ f \B\" - shows "\A\ do x \ f; g x od \C\" - apply(insert f_valid g_valid) - apply(blast intro: seq_ext') -done - -lemma hoare_vcg_seqE: - assumes g_valid: "\x. \B x\ g x \C\,\E\" - assumes f_valid: "\A\ f \B\,\E\" - shows "\A\ doE x \ f; g x odE \C\,\E\" - apply(insert f_valid g_valid) - apply(blast intro: seqE') -done - -lemma hoare_seq_ext_nobind: - "\ \B\ g \C\; - \A\ f \\r s. B s\ \ \ - \A\ do f; g od \C\" - apply (erule seq_ext) - apply (clarsimp simp: valid_def) - done - -lemma hoare_seq_ext_nobindE: - "\ \B\ g \C\,\E\; - \A\ f \\r s. B s\,\E\ \ \ - \A\ doE f; g odE \C\,\E\" - apply (erule seqE) - apply (clarsimp simp:validE_def) - done - -lemma hoare_chain: - "\ \P\ f \Q\; - \s. R s \ P s; - \r s. Q r s \ S r s \ \ - \R\ f \S\" - by(fastforce simp add:valid_def split_def) - -lemma validE_weaken: - "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \r s. Q' r s \ Q r s; \r s. E' r s \ E r s \ \ \P\ A \Q\,\E\" - by (fastforce simp: validE_def2 split: sum.splits) - -lemmas hoare_chainE = validE_weaken - -lemma hoare_vcg_handle_elseE: - "\ \P\ f \Q\,\E\; - \e. \E e\ g e \R\,\F\; - \x. \Q x\ h x \R\,\F\ \ \ - \P\ f g h \R\,\F\" - apply (simp add: handle_elseE_def validE_def) - apply (rule seq_ext) - apply assumption - apply (simp split: sum.split) - done - -lemma in_mres: - "(tr, Result (rv, s)) \ S \ (rv, s) \ mres S" - by (fastforce simp: mres_def intro: image_eqI[rotated]) - -lemma alternative_valid: - assumes x: "\P\ f \Q\" - assumes y: "\P\ f' \Q\" - shows "\P\ f \ f' \Q\" - apply (simp add: valid_def alternative_def mres_def) - using post_by_hoare[OF x _ in_mres] post_by_hoare[OF y _ in_mres] - apply auto - done - -lemma alternative_wp: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f' \Q\" - shows "\P and P'\ f \ f' \Q\" - apply (rule alternative_valid) - apply (rule hoare_pre_imp [OF _ x], simp) - apply (rule hoare_pre_imp [OF _ y], simp) - done - -lemma alternativeE_wp: - assumes x: "\P\ f \Q\,\E\" and y: "\P'\ f' \Q\,\E\" - shows "\P and P'\ f \ f' \Q\,\E\" - apply (unfold validE_def) - apply (wp add: x y alternative_wp | simp | fold validE_def)+ - done - -lemma alternativeE_R_wp: - "\ \P\ f \Q\,-; \P'\ f' \Q\,- \ \ \P and P'\ f \ f' \Q\,-" - apply (simp add: validE_R_def) - apply (rule alternativeE_wp) - apply assumption+ - done - -lemma alternative_R_wp: - "\ \P\ f -,\Q\; \P'\ g -,\Q\ \ \ \P and P'\ f \ g -, \Q\" - apply (simp add: validE_E_def) - apply (rule alternativeE_wp) - apply assumption+ - done - -lemma state_select_wp [wp]: "\ \s. \t. (s, t) \ f \ P () t \ state_select f \ P \" - apply (clarsimp simp: state_select_def assert_def) - apply (rule hoare_weaken_pre) - apply (wp select_wp hoare_vcg_split_if return_wp fail_wp) - apply simp - done - -lemma condition_wp [wp]: - "\ \ Q \ A \ P \; \ R \ B \ P \ \ \ \ \s. if C s then Q s else R s \ condition C A B \ P \" - apply (clarsimp simp: condition_def) - apply (clarsimp simp: valid_def pred_conj_def pred_neg_def split_def) - done - -lemma conditionE_wp [wp]: - "\ \ P \ A \ Q \,\ R \; \ P' \ B \ Q \,\ R \ \ \ \ \s. if C s then P s else P' s \ condition C A B \Q\,\R\" - apply (clarsimp simp: condition_def) - apply (clarsimp simp: validE_def valid_def) - done - -lemma state_assert_wp [wp]: "\ \s. f s \ P () s \ state_assert f \ P \" - apply (clarsimp simp: state_assert_def get_def - assert_def bind_def valid_def return_def fail_def mres_def) - done - -text \The weakest precondition handler which works on conjunction\ - -lemma hoare_vcg_conj_lift: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f \Q'\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - apply (subst bipred_conj_def[symmetric], rule hoare_post_conj) - apply (rule hoare_pre_imp [OF _ x], simp) - apply (rule hoare_pre_imp [OF _ y], simp) - done - -lemma hoare_vcg_conj_liftE1: - "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ - \P and P'\ f \\r s. Q r s \ Q' r s\,\E\" - unfolding valid_def validE_R_def validE_def - apply (clarsimp simp: split_def split: sum.splits) - apply (erule allE, erule (1) impE) - apply (erule allE, erule (1) impE) - apply (drule (1) bspec) - apply (drule (1) bspec) - apply clarsimp - done - -lemma hoare_vcg_disj_lift: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f \Q'\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - apply (simp add: valid_def) - apply safe - apply (erule(1) post_by_hoare [OF x]) - apply (erule notE) - apply (erule(1) post_by_hoare [OF y]) - done - -lemma hoare_vcg_const_Ball_lift: - "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_const_Ball_lift_R: - "\ \x. x \ S \ \P x\ f \Q x\,- \ \ - \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" - apply (simp add: validE_R_def validE_def) - apply (rule hoare_strengthen_post) - apply (erule hoare_vcg_const_Ball_lift) - apply (simp split: sum.splits) - done - -lemma hoare_vcg_all_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_all_lift_R: - "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" - by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) - -lemma hoare_vcg_const_imp_lift: - "\ P \ \Q\ m \R\ \ \ - \\s. P \ Q s\ m \\rv s. P \ R rv s\" - by (cases P, simp_all add: hoare_vcg_prop) - -lemma hoare_vcg_const_imp_lift_R: - "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" - by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) - -lemma hoare_weak_lift_imp: - "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" - by (auto simp add: valid_def split_def) - -lemma hoare_vcg_ex_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (clarsimp simp: valid_def, blast) - -lemma hoare_vcg_ex_lift_R1: - "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" - by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) - -(* for instantiations *) -lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . -lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . -lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . -lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . - -lemma hoare_weaken_preE_E: - "\ \P'\ f -,\Q\; \s. P s \ P' s \ \ \P\ f -,\Q\" - by (fastforce simp add: validE_E_def validE_def valid_def) - -lemma hoare_vcg_E_conj: - "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ - \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" - apply (unfold validE_def validE_E_def) - apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all) - apply (case_tac r, simp_all) - done - -lemma hoare_vcg_E_elim: - "\ \P\ f -,\E\; \P'\ f \Q\,- \ - \ \\s. P s \ P' s\ f \Q\,\E\" - by (rule hoare_post_impErr [OF hoare_vcg_E_conj], - (simp add: validE_R_def)+) - -lemma hoare_vcg_R_conj: - "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ - \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" - apply (unfold validE_R_def validE_def) - apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all) - apply (case_tac r, simp_all) - done - -lemma valid_validE: - "\P\ f \\rv. Q\ \ \P\ f \\rv. Q\,\\rv. Q\" - apply (simp add: validE_def) - done - -lemma valid_validE2: - "\ \P\ f \\_. Q'\; \s. Q' s \ Q s; \s. Q' s \ E s \ \ \P\ f \\_. Q\,\\_. E\" - unfolding valid_def validE_def - by (clarsimp split: sum.splits) blast - -lemma validE_valid: "\P\ f \\rv. Q\,\\rv. Q\ \ \P\ f \\rv. Q\" - apply (unfold validE_def) - apply (rule hoare_post_imp) - defer - apply assumption - apply (case_tac r, simp_all) - done - -lemma valid_validE_R: - "\P\ f \\rv. Q\ \ \P\ f \\rv. Q\,-" - by (simp add: validE_R_def hoare_post_impErr [OF valid_validE]) - -lemma valid_validE_E: - "\P\ f \\rv. Q\ \ \P\ f -,\\rv. Q\" - by (simp add: validE_E_def hoare_post_impErr [OF valid_validE]) - -lemma validE_validE_R: "\P\ f \Q\,\\\\ \ \P\ f \Q\,-" - by (simp add: validE_R_def) - -lemma validE_R_validE: "\P\ f \Q\,- \ \P\ f \Q\,\\\\" - by (simp add: validE_R_def) - -lemma hoare_post_imp_R: "\ \P\ f \Q'\,-; \r s. Q' r s \ Q r s \ \ \P\ f \Q\,-" - apply (unfold validE_R_def) - apply (rule hoare_post_impErr, simp+) - done - -lemma hoare_post_comb_imp_conj: - "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" - apply (rule hoare_pre_imp) - defer - apply (rule hoare_vcg_conj_lift) - apply assumption+ - apply simp - done - -lemma hoare_vcg_precond_impE_R: "\ \P'\ f \Q\,-; \s. P s \ P' s \ \ \P\ f \Q\,-" - by (unfold validE_R_def, rule hoare_vcg_precond_impE, simp+) - -(* lemma valid_is_triple: - "valid P f Q = triple_judgement P f (postcondition Q (\s f. fst (f s)))" - by (simp add: triple_judgement_def valid_def postcondition_def) - *) - -lemma validE_is_triple: - "validE P f Q E = triple_judgement P f - (postconditions (\s f. (\(r,s') \ {(rv, s'). (Inr rv, s') \ (mres (f s))}. Q r s')) - (\s f. (\(r,s') \ {(rv, s'). (Inl rv, s') \ (mres (f s))}. E r s')))" - apply (simp add: validE_def triple_judgement_def valid_def postcondition_def - postconditions_def split_def split: sum.split) - apply (fastforce elim: image_eqI[rotated]) - done - -lemma validE_R_is_triple: - "validE_R P f Q = triple_judgement P f - (\s f. (\(r,s') \ {(rv, s'). (Inr rv, s') \ mres (f s)}. Q r s'))" - by (simp add: validE_R_def validE_is_triple postconditions_def postcondition_def) - -lemma validE_E_is_triple: - "validE_E P f E = triple_judgement P f - (\s f. (\(r,s') \ {(rv, s'). (Inl rv, s') \ mres (f s)}. E r s'))" - by (simp add: validE_E_def validE_is_triple postconditions_def postcondition_def) - -lemmas hoare_wp_combs = - hoare_post_comb_imp_conj hoare_vcg_precond_imp hoare_vcg_conj_lift - -lemmas hoare_wp_combsE = - hoare_vcg_precond_impE - hoare_vcg_precond_impE_R - validE_validE_R - hoare_vcg_R_conj - hoare_vcg_E_elim - hoare_vcg_E_conj - -lemmas hoare_wp_state_combsE = - hoare_vcg_precond_impE[OF valid_validE] - hoare_vcg_precond_impE_R[OF valid_validE_R] - valid_validE_R - hoare_vcg_R_conj[OF valid_validE_R] - hoare_vcg_E_elim[OF valid_validE_E] - hoare_vcg_E_conj[OF valid_validE_E] - -lemmas hoare_wp_splits [wp_split] = - hoare_seq_ext hoare_vcg_seqE handleE'_wp handleE_wp - validE_validE_R [OF hoare_vcg_seqE [OF validE_R_validE]] - validE_validE_R [OF handleE'_wp [OF validE_R_validE]] - validE_validE_R [OF handleE_wp [OF validE_R_validE]] - catch_wp hoare_vcg_split_if hoare_vcg_split_ifE - validE_validE_R [OF hoare_vcg_split_ifE [OF validE_R_validE validE_R_validE]] - liftM_wp liftME_wp - validE_validE_R [OF liftME_wp [OF validE_R_validE]] - validE_valid - -lemmas [wp_comb] = hoare_wp_state_combsE hoare_wp_combsE hoare_wp_combs - -lemmas [wp] = hoare_vcg_prop - wp_post_taut - return_wp - put_wp - get_wp - gets_wp - modify_wp - returnOk_wp - throwError_wp - fail_wp - failE_wp - liftE_wp - -lemmas [wp_trip] = valid_is_triple validE_is_triple validE_E_is_triple validE_R_is_triple - - -text \Simplifications on conjunction\ - -lemma hoare_post_eq: "\ Q = Q'; \P\ f \Q'\ \ \ \P\ f \Q\" - by simp -lemma hoare_post_eqE1: "\ Q = Q'; \P\ f \Q'\,\E\ \ \ \P\ f \Q\,\E\" - by simp -lemma hoare_post_eqE2: "\ E = E'; \P\ f \Q\,\E'\ \ \ \P\ f \Q\,\E\" - by simp -lemma hoare_post_eqE_R: "\ Q = Q'; \P\ f \Q'\,- \ \ \P\ f \Q\,-" - by simp - -lemma pred_conj_apply_elim: "(\r. Q r and Q' r) = (\r s. Q r s \ Q' r s)" - by (simp add: pred_conj_def) -lemma pred_conj_conj_elim: "(\r s. (Q r and Q' r) s \ Q'' r s) = (\r s. Q r s \ Q' r s \ Q'' r s)" - by simp -lemma conj_assoc_apply: "(\r s. (Q r s \ Q' r s) \ Q'' r s) = (\r s. Q r s \ Q' r s \ Q'' r s)" - by simp -lemma all_elim: "(\rv s. \x. P rv s) = P" - by simp -lemma all_conj_elim: "(\rv s. (\x. P rv s) \ Q rv s) = (\rv s. P rv s \ Q rv s)" - by simp - -lemmas vcg_rhs_simps = pred_conj_apply_elim pred_conj_conj_elim - conj_assoc_apply all_elim all_conj_elim - -lemma if_apply_reduct: "\P\ If P' (f x) (g x) \Q\ \ \P\ If P' f g x \Q\" - by (cases P', simp_all) -lemma if_apply_reductE: "\P\ If P' (f x) (g x) \Q\,\E\ \ \P\ If P' f g x \Q\,\E\" - by (cases P', simp_all) -lemma if_apply_reductE_R: "\P\ If P' (f x) (g x) \Q\,- \ \P\ If P' f g x \Q\,-" - by (cases P', simp_all) - -lemmas hoare_wp_simps [wp_split] = - vcg_rhs_simps [THEN hoare_post_eq] vcg_rhs_simps [THEN hoare_post_eqE1] - vcg_rhs_simps [THEN hoare_post_eqE2] vcg_rhs_simps [THEN hoare_post_eqE_R] - if_apply_reduct if_apply_reductE if_apply_reductE_R TrueI - -schematic_goal if_apply_test: "\?Q\ (if A then returnOk else K fail) x \P\,\E\" - by wpsimp - -lemma hoare_elim_pred_conj: - "\P\ f \\r s. Q r s \ Q' r s\ \ \P\ f \\r. Q r and Q' r\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE1: - "\P\ f \\r s. Q r s \ Q' r s\,\E\ \ \P\ f \\r. Q r and Q' r\,\E\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE2: - "\P\ f \Q\, \\x s. E x s \ E' x s\ \ \P\ f \Q\,\\x. E x and E' x\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE_R: - "\P\ f \\r s. Q r s \ Q' r s\,- \ \P\ f \\r. Q r and Q' r\,-" - by (unfold pred_conj_def) - -lemmas hoare_wp_pred_conj_elims = - hoare_elim_pred_conj hoare_elim_pred_conjE1 - hoare_elim_pred_conjE2 hoare_elim_pred_conjE_R - -lemmas hoare_weaken_preE = hoare_vcg_precond_impE - -lemmas hoare_pre [wp_pre] = - hoare_weaken_pre - hoare_weaken_preE - hoare_vcg_precond_impE_R - hoare_weaken_preE_E - -declare no_fail_pre [wp_pre] - -bundle no_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del] - -text \Miscellaneous lemmas on hoare triples\ - -lemma hoare_vcg_mp: - assumes a: "\P\ f \Q\" - assumes b: "\P\ f \\r s. Q r s \ Q' r s\" - shows "\P\ f \Q'\" - using assms - by (auto simp: valid_def split_def) - -(* note about this precond stuff: rules get a chance to bind directly - before any of their combined forms. As a result, these precondition - implication rules are only used when needed. *) - -lemma hoare_add_post: - assumes r: "\P'\ f \Q'\" - assumes impP: "\s. P s \ P' s" - assumes impQ: "\P\ f \\rv s. Q' rv s \ Q rv s\" - shows "\P\ f \Q\" - apply (rule hoare_chain) - apply (rule hoare_vcg_conj_lift) - apply (rule r) - apply (rule impQ) - apply simp - apply (erule impP) - apply simp - done - -lemma hoare_whenE_wp: - "(P \ \Q\ f \R\, \E\) \ \if P then Q else R ()\ whenE P f \R\, \E\" - unfolding whenE_def by clarsimp wp - -lemma hoare_gen_asmE: - "(P \ \P'\ f \Q\,-) \ \P' and K P\ f \Q\, -" - by (simp add: validE_R_def validE_def valid_def) blast - -lemma hoare_list_case: - assumes P1: "\P1\ f f1 \Q\" - assumes P2: "\y ys. xs = y#ys \ \P2 y ys\ f (f2 y ys) \Q\" - shows "\case xs of [] \ P1 | y#ys \ P2 y ys\ - f (case xs of [] \ f1 | y#ys \ f2 y ys) - \Q\" - apply (cases xs; simp) - apply (rule P1) - apply (rule P2) - apply simp - done - -lemma hoare_unless_wp: - "(\P \ \Q\ f \R\) \ \if P then R () else Q\ unless P f \R\" - unfolding unless_def by wp auto - -lemma hoare_use_eq: - assumes x: "\P. \\s. P (f s)\ m \\rv s. P (f s)\" - assumes y: "\f. \\s. P f s\ m \\rv s. Q f s\" - shows "\\s. P (f s) s\ m \\rv s. Q (f s :: 'c :: type) s \" - apply (rule_tac Q="\rv s. \f'. f' = f s \ Q f' s" in hoare_post_imp) - apply simp - apply (wpsimp wp: hoare_vcg_ex_lift x y) - done - -lemma hoare_return_sp: - "\P\ return x \\r. P and K (r = x)\" - by (simp add: valid_def return_def mres_def) - -lemma hoare_fail_any [simp]: - "\P\ fail \Q\" by wp - -lemma hoare_failE [simp]: "\P\ fail \Q\,\E\" by wp - -lemma hoare_FalseE [simp]: - "\\s. False\ f \Q\,\E\" - by (simp add: valid_def validE_def) - -lemma hoare_K_bind [wp]: - "\P\ f \Q\ \ \P\ K_bind f x \Q\" - by simp - -text \Setting up the precondition case splitter.\ - -lemma wpc_helper_valid: - "\Q\ g \S\ \ wpc_helper (P, P') (Q, Q') \P\ g \S\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validE: - "\Q\ f \R\,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f \R\,\E\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validE_R: - "\Q\ f \R\,- \ wpc_helper (P, P') (Q, Q') \P\ f \R\,-" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validR_R: - "\Q\ f -,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f -,\E\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_no_fail_final: - "no_fail Q f \ wpc_helper (P, P') (Q, Q') (no_fail P f)" - by (clarsimp simp: wpc_helper_def elim!: no_fail_pre) - -lemma wpc_helper_validNF: - "\Q\ g \S\! \ wpc_helper (P, P') (Q, Q') \P\ g \S\!" - apply (clarsimp simp: wpc_helper_def) - by (metis hoare_wp_combs(2) no_fail_pre validNF_def) - -lemma wpc_helper_validI: - "(\Q\,\R\ g \G\,\S\) \ wpc_helper (P, P') (split Q, Q') (\curry P\,\R\ g \G\,\S\)" - by (clarsimp simp: wpc_helper_def elim!: validI_weaken_pre) - -wpc_setup "\m. \P\ m \Q\" wpc_helper_valid -wpc_setup "\m. \P\ m \Q\,\E\" wpc_helper_validE -wpc_setup "\m. \P\ m \Q\,-" wpc_helper_validE_R -wpc_setup "\m. \P\ m -,\E\" wpc_helper_validR_R -wpc_setup "\m. no_fail P m" wpc_helper_no_fail_final -wpc_setup "\m. \P\ m \Q\!" wpc_helper_validNF -wpc_setup "\m. \P\,\R\ m \G\,\S\" wpc_helper_validI - -lemma in_liftM: - "((r, s') \ mres (liftM t f s)) = (\r'. (r', s') \ mres (f s) \ r = t r')" - by (simp add: liftM_def in_return in_bind) - -(* FIXME: eliminate *) -lemmas handy_liftM_lemma = in_liftM - -lemma hoare_fun_app_wp[wp]: - "\P\ f' x \Q'\ \ \P\ f' $ x \Q'\" - "\P\ f x \Q\,\E\ \ \P\ f $ x \Q\,\E\" - "\P\ f x \Q\,- \ \P\ f $ x \Q\,-" - "\P\ f x -,\E\ \ \P\ f $ x -,\E\" - by simp+ - -lemma hoare_validE_pred_conj: - "\ \P\f\Q\,\E\; \P\f\R\,\E\ \ \ \P\f\Q And R\,\E\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemma hoare_validE_conj: - "\ \P\f\Q\,\E\; \P\f\R\,\E\ \ \ \P\ f \\r s. Q r s \ R r s\,\E\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemma hoare_valid_validE: - "\P\f\\r. Q\ \ \P\f\\r. Q\,\\r. Q\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemma liftE_validE_E [wp]: - "\\\ liftE f -, \Q\" - by (clarsimp simp: validE_E_def valid_def) - -lemma validE_validE_E [wp_comb]: - "\P\ f \\\\, \E\ \ \P\ f -, \E\" - by (simp add: validE_E_def) - -lemma validE_E_validE: - "\P\ f -, \E\ \ \P\ f \\\\, \E\" - by (simp add: validE_E_def) - -(* - * if_validE_E: - * - * \?P1 \ \?Q1\ ?f1 -, \?E\; \ ?P1 \ \?R1\ ?g1 -, \?E\\ \ \\s. (?P1 \ ?Q1 s) \ (\ ?P1 \ ?R1 s)\ if ?P1 then ?f1 else ?g1 -, \?E\ - *) -lemmas if_validE_E [wp_split] = - validE_validE_E [OF hoare_vcg_split_ifE [OF validE_E_validE validE_E_validE]] - -lemma returnOk_E [wp]: - "\\\ returnOk r -, \Q\" - by (simp add: validE_E_def) wp - -lemma hoare_drop_imp: - "\P\ f \Q\ \ \P\ f \\r s. R r s \ Q r s\" - by (auto simp: valid_def) - -lemma hoare_drop_impE: - "\\P\ f \\r. Q\, \E\\ \ \P\ f \\r s. R r s \ Q s\, \E\" - by (simp add: validE_weaken) - -lemma hoare_drop_impE_R: - "\P\ f \Q\,- \ \P\ f \\r s. R r s \ Q r s\, -" - by (auto simp: validE_R_def validE_def valid_def split_def split: sum.splits) - -lemma hoare_drop_impE_E: - "\P\ f -,\Q\ \ \P\ f -,\\r s. R r s \ Q r s\" - by (auto simp: validE_E_def validE_def valid_def split_def split: sum.splits) - -lemmas hoare_drop_imps = hoare_drop_imp hoare_drop_impE_R hoare_drop_impE_E -lemma mres_union: - "mres (a \ b) = mres a \ mres b" - by (simp add: mres_def image_Un) - -lemma mres_Failed_empty: - "mres ((\xs. (xs, Failed)) ` X ) = {}" - "mres ((\xs. (xs, Incomplete)) ` X ) = {}" - by (auto simp add: mres_def image_def) - -lemma det_set_option_eq: - "(\a\m. set_option (snd a)) = {(r, s')} \ - (ts, Some (rr, ss)) \ m \ rr = r \ ss = s'" - by (metis UN_I option.set_intros prod.inject singleton_iff snd_conv) - -lemma det_set_option_eq': - "(\a\m. set_option (snd a)) = {(r, s')} \ - Some (r, s') \ snd ` m" - using image_iff by fastforce - -lemma bind_det_exec: - "mres (a s) = {(r,s')} \ mres ((a >>= b) s) = mres (b r s')" - by (simp add: in_bind set_eq_iff) - -lemma in_bind_det_exec: - "mres (a s) = {(r,s')} \ (s'' \ mres ((a >>= b) s)) = (s'' \ mres (b r s'))" - by (cases s'', simp add: in_bind) - -lemma exec_put: - "(put s' >>= m) s = m () s'" - by (auto simp add: bind_def put_def mres_def split_def) - -lemma bind_execI: - "\ (r'',s'') \ mres (f s); \x \ mres (g r'' s''). P x \ \ - \x \ mres ((f >>= g) s). P x" - by (fastforce simp add: Bex_def in_bind) - -lemma True_E_E [wp]: "\\\ f -,\\\\" - by (auto simp: validE_E_def validE_def valid_def split: sum.splits) - -(* - * \\x. \?B1 x\ ?g1 x -, \?E\; \?P\ ?f1 \?B1\, \?E\\ \ \?P\ ?f1 >>=E ?g1 -, \?E\ - *) -lemmas [wp_split] = - validE_validE_E [OF hoare_vcg_seqE [OF validE_E_validE]] - -lemma case_option_wp: - assumes x: "\x. \P x\ m x \Q\" - assumes y: "\P'\ m' \Q\" - shows "\\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ - case_option m' m x \Q\" - apply (cases x; simp) - apply (rule y) - apply (rule x) - done - -lemma case_option_wpE: - assumes x: "\x. \P x\ m x \Q\,\E\" - assumes y: "\P'\ m' \Q\,\E\" - shows "\\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ - case_option m' m x \Q\,\E\" - apply (cases x; simp) - apply (rule y) - apply (rule x) - done - -lemma in_bindE: - "(rv, s') \ mres ((f >>=E (\rv'. g rv')) s) = - ((\ex. rv = Inl ex \ (Inl ex, s') \ mres (f s)) \ - (\rv' s''. (rv, s') \ mres (g rv' s'') \ (Inr rv', s'') \ mres (f s)))" - apply (clarsimp simp: bindE_def in_bind lift_def in_throwError) - apply (safe del: disjCI; strengthen subst[where P="\x. x \ mres (f s)", mk_strg I _ E]; - auto simp: in_throwError split: sum.splits) - done - -(* - * \?P\ ?m1 -, \?E\ \ \?P\ liftME ?f1 ?m1 -, \?E\ - *) -lemmas [wp_split] = validE_validE_E [OF liftME_wp, simplified, OF validE_E_validE] - -lemma assert_A_True[simp]: "assert True = return ()" - by (simp add: assert_def) - -lemma assert_wp [wp]: "\\s. P \ Q () s\ assert P \Q\" - by (cases P, (simp add: assert_def | wp)+) - -lemma list_cases_wp: - assumes a: "\P_A\ a \Q\" - assumes b: "\x xs. ts = x#xs \ \P_B x xs\ b x xs \Q\" - shows "\case_list P_A P_B ts\ case ts of [] \ a | x # xs \ b x xs \Q\" - by (cases ts, auto simp: a b) - -(* FIXME: make wp *) -lemma whenE_throwError_wp: - "\\s. \Q \ P s\ whenE Q (throwError e) \\rv. P\, -" - unfolding whenE_def by wp blast - -lemma select_throwError_wp: - "\\s. \x\S. Q x s\ select S >>= throwError -, \Q\" - by (clarsimp simp add: bind_def throwError_def return_def select_def validE_E_def - validE_def valid_def mres_def) - - -section "validNF Rules" - -subsection "Basic validNF theorems" - -lemma validNF [intro?]: - "\ \ P \ f \ Q \; no_fail P f \ \ \ P \ f \ Q \!" - by (clarsimp simp: validNF_def) - -lemma validNF_valid: "\ \ P \ f \ Q \! \ \ \ P \ f \ Q \" - by (clarsimp simp: validNF_def) - -lemma validNF_no_fail: "\ \ P \ f \ Q \! \ \ no_fail P f" - by (clarsimp simp: validNF_def) - -lemma snd_validNF: - "\ \ P \ f \ Q \!; P s \ \ Failed \ snd ` (f s)" - by (clarsimp simp: validNF_def no_fail_def) - -lemma use_validNF: - "\ (r', s') \ mres (f s); \ P \ f \ Q \!; P s \ \ Q r' s'" - by (fastforce simp: validNF_def valid_def) - -subsection "validNF weakest pre-condition rules" - -lemma validNF_return [wp]: - "\ P x \ return x \ P \!" - by (wp validNF)+ - -lemma validNF_get [wp]: - "\ \s. P s s \ get \ P \!" - by (wp validNF)+ - -lemma validNF_put [wp]: - "\ \s. P () x \ put x \ P \!" - by (wp validNF)+ - -lemma validNF_K_bind [wp]: - "\ P \ x \ Q \! \ \ P \ K_bind x f \ Q \!" - by simp - -lemma validNF_fail [wp]: - "\ \s. False \ fail \ Q \!" - by (clarsimp simp: validNF_def fail_def no_fail_def) - -lemma validNF_prop [wp_unsafe]: - "\ no_fail (\s. P) f \ \ \ \s. P \ f \ \rv s. P \!" - by (wp validNF)+ - -lemma validNF_post_conj [intro!]: - "\ \ P \ a \ Q \!; \ P \ a \ R \! \ \ \ P \ a \ Q And R \!" - by (clarsimp simp: validNF_def) - -lemma no_fail_or: - "\no_fail P a; no_fail Q a\ \ no_fail (P or Q) a" - by (clarsimp simp: no_fail_def) - -lemma validNF_pre_disj [intro!]: - "\ \ P \ a \ R \!; \ Q \ a \ R \! \ \ \ P or Q \ a \ R \!" - by (rule validNF) (auto dest: validNF_valid validNF_no_fail intro: no_fail_or) - -(* - * Set up combination rules for WP, which also requires - * a "wp_trip" rule for validNF. - *) - -definition "validNF_property Q s b \ Failed \ snd ` (b s) \ (\(r', s') \ mres (b s). Q r' s')" - -lemma validNF_is_triple [wp_trip]: - "validNF P f Q = triple_judgement P f (validNF_property Q)" - apply (clarsimp simp: validNF_def triple_judgement_def validNF_property_def) - apply (auto simp: no_fail_def valid_def) - done - -lemma validNF_weaken_pre [wp_comb]: - "\\Q\ a \R\!; \s. P s \ Q s\ \ \P\ a \R\!" - by (metis hoare_pre_imp no_fail_pre validNF_def) - -lemma validNF_post_comb_imp_conj: - "\ \P'\ f \Q\!; \P\ f \Q'\!; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\!" - by (fastforce simp: validNF_def valid_def) - -lemma validNF_post_comb_conj_L: - "\ \P'\ f \Q\!; \P\ f \Q'\ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_post_comb_conj_R: - "\ \P'\ f \Q\; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_post_comb_conj: - "\ \P'\ f \Q\!; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_split_if [wp_split]: - "\P \ \Q\ f \S\!; \ P \ \R\ g \S\!\ \ \\s. (P \ Q s) \ (\ P \ R s)\ if P then f else g \S\!" - by simp - -lemma validNF_vcg_conj_lift: - "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" - apply (subst bipred_conj_def[symmetric], rule validNF_post_conj) - apply (erule validNF_weaken_pre, fastforce) - apply (erule validNF_weaken_pre, fastforce) - done - -lemma validNF_vcg_disj_lift: - "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def) - apply safe - apply (auto intro!: hoare_vcg_disj_lift)[1] - apply (clarsimp simp: no_fail_def) - done - -lemma validNF_vcg_all_lift [wp]: - "\ \x. \P x\ f \Q x\! \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\!" - apply atomize - apply (rule validNF) - apply (clarsimp simp: validNF_def) - apply (rule hoare_vcg_all_lift) - apply force - apply (clarsimp simp: no_fail_def validNF_def) - done - -lemma no_fail_bind[wp_split]: - "\ no_fail P f; \x. no_fail (R x) (g x); \Q\ f \R\ \ - \ no_fail (P and Q) (do x \ f; g x od)" - apply (simp add: no_fail_def bind_def2 image_Un image_image - in_image_constant) - apply (intro allI conjI impI) - apply (fastforce simp: image_def) - apply clarsimp - apply (drule(1) post_by_hoare, erule in_mres) - apply (fastforce simp: image_def) - done - -lemma validNF_bind [wp_split]: - "\ \x. \B x\ g x \C\!; \A\ f \B\! \ \ - \A\ do x \ f; g x od \C\!" - apply (rule validNF) - apply (metis validNF_valid hoare_seq_ext) - apply (frule no_fail_bind[OF validNF_no_fail, where g=g]) - apply (rule validNF_no_fail, assumption) - apply (erule validNF_valid) - apply (simp add: no_fail_def) - done - -lemmas validNF_seq_ext = validNF_bind - -subsection "validNF compound rules" -lemma validNF_state_assert [wp]: - "\ \s. P () s \ G s \ state_assert G \ P \!" - apply (rule validNF) - apply wpsimp - apply (clarsimp simp: no_fail_def state_assert_def - bind_def2 assert_def return_def get_def) - done - -lemma validNF_modify [wp]: - "\ \s. P () (f s) \ modify f \ P \!" - apply (clarsimp simp: modify_def) - apply wp - done - -lemma validNF_gets [wp]: - "\\s. P (f s) s\ gets f \P\!" - apply (clarsimp simp: gets_def) - apply wp - done - -lemma validNF_condition [wp]: - "\ \ Q \ A \P\!; \ R \ B \P\!\ \ \\s. if C s then Q s else R s\ condition C A B \P\!" - apply rule - apply (drule validNF_valid)+ - apply (erule (1) condition_wp) - apply (drule validNF_no_fail)+ - apply (clarsimp simp: no_fail_def condition_def) - done - -lemma validNF_alt_def: - "validNF P m Q = (\s. P s \ ((\(r', s') \ mres (m s). Q r' s') \ Failed \ snd ` (m s)))" - by (auto simp: validNF_def valid_def no_fail_def mres_def image_def) - -lemma validNF_assert [wp]: - "\ (\s. P) and (R ()) \ assert P \ R \!" - apply (rule validNF) - apply (clarsimp simp: valid_def in_return) - apply (clarsimp simp: no_fail_def return_def) - done - -lemma validNF_false_pre: - "\ \_. False \ P \ Q \!" - by (clarsimp simp: validNF_def no_fail_def) - -lemma validNF_chain: - "\\P'\ a \R'\!; \s. P s \ P' s; \r s. R' r s \ R r s\ \ \P\ a \R\!" - by (fastforce simp: validNF_def valid_def no_fail_def Ball_def) - -lemma validNF_case_prod [wp]: - "\ \x y. validNF (P x y) (B x y) Q \ \ validNF (case_prod P v) (case_prod (\x y. B x y) v) Q" - by (metis prod.exhaust split_conv) - -lemma validE_NF_case_prod [wp]: - "\ \a b. \P a b\ f a b \Q\, \E\! \ \ - \case x of (a, b) \ P a b\ case x of (a, b) \ f a b \Q\, \E\!" - apply (clarsimp simp: validE_NF_alt_def) - apply (erule validNF_case_prod) - done - -lemma no_fail_is_validNF_True: "no_fail P s = (\ P \ s \ \_ _. True \!)" - by (clarsimp simp: no_fail_def validNF_def valid_def) - -subsection "validNF reasoning in the exception monad" - -lemma validE_NF [intro?]: - "\ \ P \ f \ Q \,\ E \; no_fail P f \ \ \ P \ f \ Q \,\ E \!" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_valid: - "\ \ P \ f \ Q \,\ E \! \ \ \ P \ f \ Q \,\ E \" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_no_fail: - "\ \ P \ f \ Q \,\ E \! \ \ no_fail P f" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_weaken_pre [wp_comb]: - "\\Q\ a \R\,\E\!; \s. P s \ Q s\ \ \P\ a \R\,\E\!" - apply (clarsimp simp: validE_NF_alt_def) - apply (erule validNF_weaken_pre) - apply simp - done - -lemma validE_NF_post_comb_conj_L: - "\ \P\ f \Q\, \ E \!; \P'\ f \Q'\, \ \_ _. True \ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_post_comb_conj_R: - "\ \P\ f \Q\, \ \_ _. True \; \P'\ f \Q'\, \ E \! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_post_comb_conj: - "\ \P\ f \Q\, \ E \!; \P'\ f \Q'\, \ E \! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_chain: - "\\P'\ a \R'\,\E'\!; - \s. P s \ P' s; - \r' s'. R' r' s' \ R r' s'; - \r'' s''. E' r'' s'' \ E r'' s''\ \ - \\s. P s \ a \\r' s'. R r' s'\,\\r'' s''. E r'' s''\!" - by (fastforce simp: validE_NF_def validE_def2 no_fail_def Ball_def split: sum.splits) - -lemma validE_NF_bind_wp [wp]: - "\\x. \B x\ g x \C\, \E\!; \A\ f \B\, \E\!\ \ \A\ f >>=E (\x. g x) \C\, \E\!" - apply (unfold validE_NF_alt_def bindE_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp simp: lift_def throwError_def split: sum.splits) - apply wpsimp - done - -lemma validNF_catch [wp]: - "\\x. \E x\ handler x \Q\!; \P\ f \Q\, \E\!\ \ \P\ f (\x. handler x) \Q\!" - apply (unfold validE_NF_alt_def catch_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp simp: lift_def throwError_def split: sum.splits) - apply wp - done - -lemma validNF_throwError [wp]: - "\E e\ throwError e \P\, \E\!" - by (unfold validE_NF_alt_def throwError_def o_def) wpsimp - -lemma validNF_returnOk [wp]: - "\P e\ returnOk e \P\, \E\!" - by (clarsimp simp: validE_NF_alt_def returnOk_def) wpsimp - -lemma validNF_whenE [wp]: - "(P \ \Q\ f \R\, \E\!) \ \if P then Q else R ()\ whenE P f \R\, \E\!" - unfolding whenE_def by clarsimp wp - -lemma validNF_nobindE [wp]: - "\ \B\ g \C\,\E\!; - \A\ f \\r s. B s\,\E\! \ \ - \A\ doE f; g odE \C\,\E\!" - by clarsimp wp - -(* - * Setup triple rules for validE_NF so that we can use the - * "wp_comb" attribute. - *) - -definition "validE_NF_property Q E s b \ Failed \ snd ` (b s) - \ (\(r', s') \ mres (b s). case r' of Inl x \ E x s' | Inr x \ Q x s')" - -lemma validE_NF_is_triple [wp_trip]: - "validE_NF P f Q E = triple_judgement P f (validE_NF_property Q E)" - apply (clarsimp simp: validE_NF_def validE_def2 no_fail_def triple_judgement_def - validE_NF_property_def split: sum.splits) - apply blast - done - -lemmas [wp_comb] = validE_NF_weaken_pre - -lemma validNF_cong: - "\ \s. P s = P' s; \s. P s \ m s = m' s; - \r' s' s. \ P s; (r', s') \ mres (m s) \ \ Q r' s' = Q' r' s' \ \ - (\ P \ m \ Q \!) = (\ P' \ m' \ Q' \!)" - by (fastforce simp: validNF_alt_def) - -lemma validE_NF_liftE [wp]: - "\P\ f \Q\! \ \P\ liftE f \Q\,\E\!" - by (wpsimp simp: validE_NF_alt_def liftE_def) - -lemma validE_NF_handleE' [wp]: - "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ - \P\ f (\x. handler x) \Q\,\E\!" - apply (unfold validE_NF_alt_def handleE'_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp split: sum.splits) - apply wpsimp - done - -lemma validE_NF_handleE [wp]: - "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ - \P\ f handler \Q\,\E\!" - apply (unfold handleE_def) - apply (metis validE_NF_handleE') - done - -lemma validE_NF_condition [wp]: - "\ \ Q \ A \P\,\ E \!; \ R \ B \P\,\ E \!\ - \ \\s. if C s then Q s else R s\ condition C A B \P\,\ E \!" - apply rule - apply (drule validE_NF_valid)+ - apply wp - apply (drule validE_NF_no_fail)+ - apply (clarsimp simp: no_fail_def condition_def) - done - -lemma validI_name_pre: - "prefix_closed f \ - (\s0 s. P s0 s \ \\s0' s'. s0' = s0 \ s' = s\,\R\ f \G\,\Q\) - \ \P\,\R\ f \G\,\Q\" - unfolding validI_def - by metis - -lemma validI_well_behaved': - "prefix_closed f - \ \P\,\R'\ f \G'\,\Q\ - \ R \ R' - \ G' \ G - \ \P\,\R\ f \G\,\Q\" - apply (subst validI_def, clarsimp) - apply (clarsimp simp add: rely_def) - apply (drule (2) validI_D) - apply (fastforce simp: rely_cond_def guar_cond_def)+ - done - -lemmas validI_well_behaved = validI_well_behaved'[unfolded le_fun_def, simplified] - -text \Strengthen setup.\ - -context strengthen_implementation begin - -lemma strengthen_hoare [strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\) (\P\ f \R\)" - by (cases F, auto elim: hoare_strengthen_post) - -lemma strengthen_validE_R_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" - by (cases F, auto intro: hoare_post_imp_R) - -lemma strengthen_validE_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" - by (cases F, auto elim: hoare_post_impErr) - -lemma strengthen_validE_E_cong[strg]: - "(\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" - by (cases F, auto elim: hoare_post_impErr simp: validE_E_def) - -lemma strengthen_validI[strg]: - "(\r s0 s. st F (\) (Q r s0 s) (Q' r s0 s)) - \ st F (\) (\P\,\G\ f \R\,\Q\) (\P\,\G\ f \R\,\Q'\)" - by (cases F, auto elim: validI_strengthen_post) - -end - -end diff --git a/lib/MonadicRewrite.thy b/lib/MonadicRewrite.thy index afbdc01078..0d258d7a6e 100644 --- a/lib/MonadicRewrite.thy +++ b/lib/MonadicRewrite.thy @@ -9,10 +9,10 @@ theory MonadicRewrite imports - NonDetMonadVCG - Corres_UL - EmptyFailLib - LemmaBucket + Monads.Nondet_VCG + ExtraCorres + Monads.Nondet_Empty_Fail + Rules_Tac begin definition monadic_rewrite :: @@ -40,7 +40,7 @@ lemma monadic_rewrite_impossible: "monadic_rewrite F E \ f g" by (clarsimp simp: monadic_rewrite_def) -lemma monadic_rewrite_imp: +lemma monadic_rewrite_guard_imp: "\ monadic_rewrite F E Q f g; \s. P s \ Q s \ \ monadic_rewrite F E P f g" by (auto simp add: monadic_rewrite_def) @@ -52,7 +52,7 @@ lemma monadic_rewrite_trans: lemma monadic_rewrite_trans_dup: "\ monadic_rewrite F E P f g; monadic_rewrite F E P g h \ \ monadic_rewrite F E P f h" - by (rule monadic_rewrite_imp, (rule monadic_rewrite_trans; assumption), simp) + by (rule monadic_rewrite_guard_imp, (rule monadic_rewrite_trans; assumption), simp) lemma monadic_rewrite_from_simple: "P \ f = g \ monadic_rewrite F E (\_. P) f g" @@ -76,11 +76,14 @@ lemma monadic_rewrite_is_refl: "x = y \ monadic_rewrite F E \ x y" by (simp add: monadic_rewrite_refl) -lemma monadic_rewrite_refl3: +(* precondition implies reflexivity *) +lemma monadic_rewrite_pre_imp_eq: "\ \s. P s \ f s = g s \ \ monadic_rewrite F E P f g" by (simp add: monadic_rewrite_def) -lemmas monadic_rewrite_refl2 = monadic_rewrite_refl3[where P=\] +lemma monadic_rewrite_guard_arg_cong: + "(\s. P s \ x = y) \ monadic_rewrite F E P (f x) (f y)" + by (clarsimp simp: monadic_rewrite_def) lemma monadic_rewrite_exists: "(\v. monadic_rewrite F E (Q v) m m') @@ -91,18 +94,18 @@ lemma monadic_rewrite_exists_v: "\ \v. monadic_rewrite E F (Q v) f g \ \ monadic_rewrite E F (\x. (\v. P v x) & (\v. P v x \ Q v x)) f g" by (rule monadic_rewrite_name_pre) - (fastforce elim: monadic_rewrite_imp) + (fastforce elim: monadic_rewrite_guard_imp) -lemma monadic_rewrite_weaken: +lemma monadic_rewrite_weaken_flags: "monadic_rewrite (F \ F') (E \ E') P f g \ monadic_rewrite F' E' P f g" by (auto simp: monadic_rewrite_def) -lemma monadic_rewrite_weaken2: +lemma monadic_rewrite_weaken_flags': "monadic_rewrite F E P f g \ monadic_rewrite F' E' ((\_. (F \ F') \ (E' \ E)) and P) f g" apply (rule monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_weaken[where F=F and E=E]) + apply (rule monadic_rewrite_weaken_flags[where F=F and E=E]) apply auto done @@ -147,39 +150,39 @@ lemma monadic_rewrite_bindE: apply (case_tac x; simp add: lift_def monadic_rewrite_refl) done -lemmas monadic_rewrite_bind_tail - = monadic_rewrite_bind[OF monadic_rewrite_refl, simplified pred_and_true_var] +(* in order to preserve bound names in the tail, bind_head must avoid eta on both sides *) +lemma monadic_rewrite_bind_head: + "monadic_rewrite F E P f g \ monadic_rewrite F E P (f >>= h) (g >>= h)" + by (rule monadic_rewrite_bind[OF _ monadic_rewrite_refl hoare_vcg_prop, + simplified pred_top_right_neutral]) -lemmas monadic_rewrite_bind_head - = monadic_rewrite_bind [OF _ monadic_rewrite_refl hoare_vcg_prop, simplified pred_and_true] +(* in order to preserve bound names in the tail, bindE_head must avoid eta on both sides *) +lemma monadic_rewrite_bindE_head: + "monadic_rewrite F E P f g \ monadic_rewrite F E (P and (\s. True)) (f >>=E h) (g >>=E h)" + by (rule monadic_rewrite_bindE[OF _ monadic_rewrite_refl hoare_vcg_propE_R]) -lemmas monadic_rewrite_bind_alt - = monadic_rewrite_trans[OF monadic_rewrite_bind_tail monadic_rewrite_bind_head, rotated -1] +lemmas monadic_rewrite_bind_tail + = monadic_rewrite_bind[OF monadic_rewrite_refl, simplified pred_top_left_neutral] -lemmas monadic_rewrite_bindE_head - = monadic_rewrite_bindE[OF _ monadic_rewrite_refl hoare_vcg_propE_R] +lemmas monadic_rewrite_bindE_tail + = monadic_rewrite_bindE[OF monadic_rewrite_refl, simplified pred_top_left_neutral] -lemma monadic_rewrite_bind2: +(* Same as monadic_rewrite_bind, but prove hoare triple over head of LHS instead of RHS. *) +lemma monadic_rewrite_bind_l: "\ monadic_rewrite F E P f g; \x. monadic_rewrite F E (Q x) (h x) (j x); \R\ f \Q\ \ \ monadic_rewrite F E (P and R) (f >>= (\x. h x)) (g >>= (\x. j x))" - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (erule(1) monadic_rewrite_bind_tail) - apply (erule monadic_rewrite_bind_head) - apply simp - done + using monadic_rewrite_trans[OF monadic_rewrite_bind_tail monadic_rewrite_bind_head] + by (metis pred_conj_comm) lemma monadic_rewrite_named_bindE: "\ monadic_rewrite F E ((=) s) f f'; \rv s'. (Inr rv, s') \ fst (f' s) \ monadic_rewrite F E ((=) s') (g rv) (g' rv) \ \ monadic_rewrite F E ((=) s) (f >>=E (\rv. g rv)) (f' >>=E g')" - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (erule_tac R="(=) s" and Q="\rv s'. (Inr rv, s') \ fst (f' s)" in monadic_rewrite_bindE) apply (rule monadic_rewrite_name_pre) - apply clarsimp - apply (clarsimp simp add: validE_R_def validE_def valid_def - split: sum.split) - apply simp + apply (clarsimp simp add: validE_R_def validE_def valid_def + split: sum.split)+ done lemma monadic_rewrite_drop_return: @@ -193,13 +196,43 @@ lemma monadic_rewrite_add_return: (* FIXME: poorly named, super-specific (could do this with maybe one bind?), used in Ipc_C *) lemma monadic_rewrite_do_flip: "monadic_rewrite E F P (do c \ j; a \ f; b \ g c; return (a, c) od) - (do c \ j; b \ g c; a \ f; return (a, c) od) - \ monadic_rewrite E F P (do c \ j; a \ f; b \ g c; h a c od) - (do c \ j; b \ g c; a \ f; h a c od)" + (do c \ j; b \ g c; a \ f; return (a, c) od) \ + monadic_rewrite E F P (do c \ j; a \ f; b \ g c; h a c od) + (do c \ j; b \ g c; a \ f; h a c od)" apply (drule_tac h="\(a, b). h a b" in monadic_rewrite_bind_head) apply (simp add: bind_assoc) done +text \control of lambda abstractions, bound variables and eta form\ + +(* Preserving bound names while iterating using bind*_tail-style rules is more complicated than + for a head-style binding: + we need an eta on the non-schematic side, and must not have an eta on the schematic side, + otherwise unification can't pick a side for name preservation automatically. + It therefore appears a generic name-preserving tail rule is not possible. + The following rules can eliminate an eta from either the LHS or RHS of a monadic_rewrite, + e.g. monadic_rewrite_bind_tail[THEN monadic_rewrite_bind_eta_r] will remove the RHS eta *) + +lemma monadic_rewrite_bind_eta_r: + "monadic_rewrite F E P f (do x <- g; h x od) + \ monadic_rewrite F E P f (g >>= h)" + by simp + +lemma monadic_rewrite_bind_eta_l: + "monadic_rewrite F E P (do x <- f; h x od) g + \ monadic_rewrite F E P (f >>= h) g" + by simp + +lemma monadic_rewrite_bindE_eta_r: + "monadic_rewrite F E P f (doE x <- g; h x odE) + \ monadic_rewrite F E P f (g >>=E h)" + by simp + +lemma monadic_rewrite_bindE_eta_l: + "monadic_rewrite F E P (doE x <- f; h x odE) g + \ monadic_rewrite F E P (f >>=E h) g" + by simp + text \catch\ lemma monadic_rewrite_catch: @@ -224,141 +257,186 @@ lemma monadic_rewrite_modify_noop: text \Symbolic execution\ -lemma monadic_rewrite_symb_exec_pre: - assumes inv: "\s. g \ (=) s\" - and ef: "empty_fail g" - and rv: "\P\ g \\y s. y \ S\" - and h': "\y. y \ S \ h y = h'" - shows "monadic_rewrite True True P (g >>= h) h'" -proof - - have P: "\s v. \ P s; v \ fst (g s) \ \ split h v = h' s" +(* When `F=True`, we assume LHS does not fail and show the RHS does not fail. When adding `m` to the + LHS this assumption extends to `m`, meaning we can assume `m` does not fail. *) +lemma monadic_rewrite_symb_exec_l': + "\ \rv. monadic_rewrite F E (Q rv) (x rv) y; + \P. m \P\; empty_fail m; + \ F \ no_fail P' m; + \P\ m \Q\ \ + \ monadic_rewrite F E (P and P') (m >>= (\rv. x rv)) y" + apply (cases E) + subgoal (* E *) + apply (clarsimp simp: monadic_rewrite_def bind_def prod_eq_iff) + apply (subgoal_tac "\ snd (m s)") + apply (simp add: empty_fail_def, drule_tac x=s in spec) + apply (prop_tac "\(rv, s') \ fst (m s). x rv s' = y s") + apply clarsimp + apply (drule (1) in_inv_by_hoareD) + apply (frule (2) use_valid) + apply (clarsimp simp: Ball_def prod_eq_iff) + apply (rule conjI) + apply (rule equalityI) + apply (clarsimp simp: Ball_def) + apply (fastforce simp: Ball_def elim!: nonemptyE elim: rev_bexI) + apply (simp add: Bex_def Ball_def cong: conj_cong) + apply auto[1] + apply (clarsimp simp: no_fail_def) + done + subgoal (* \ E *) + apply (clarsimp simp: monadic_rewrite_def bind_def) + apply (prop_tac "\ snd (m s)") + apply (fastforce simp: no_failD) + apply (prop_tac "\v \ fst (m s). Q (fst v) (snd v) \ snd v = s") + apply clarsimp + apply (frule(2) use_valid) + apply (frule use_valid, assumption, rule refl) + apply simp apply clarsimp - apply (frule use_valid[OF _ inv], rule refl) - apply (frule(1) use_valid[OF _ rv]) - apply (simp add: h') - done - - show ?thesis - apply (clarsimp simp: monadic_rewrite_def bind_def P image_constant_conv - cong: image_cong) - apply (drule empty_failD2[OF ef]) - apply (clarsimp simp: prod_eq_iff split: if_split_asm) + apply (frule (1) empty_failD2) + apply (clarsimp simp: split_def) + apply fastforce done -qed - -lemma singleton_eq_imp_helper: - "v \ {x} \ h v = h x" by simp - -lemmas monadic_rewrite_symb_exec - = monadic_rewrite_symb_exec_pre[OF _ _ _ singleton_eq_imp_helper, - THEN monadic_rewrite_trans, simplified] + done -lemma eq_UNIV_imp_helper: - "v \ UNIV \ x = x" by simp +(* as a lemma to preserve lambda binding erased by simplified *) +lemma monadic_rewrite_symb_exec_l_F: + "\ \rv. monadic_rewrite True E (Q rv) (x rv) y; + \P. m \P\; empty_fail m; + \P\ m \Q\ \ + \ monadic_rewrite True E P (m >>= (\rv. x rv)) y" + by (rule monadic_rewrite_symb_exec_l'[where P'=\ and F=True, simplified]) -lemmas monadic_rewrite_symb_exec2 - = monadic_rewrite_symb_exec_pre[OF _ _ _ eq_UNIV_imp_helper, where P=\, - simplified, THEN monadic_rewrite_trans] +lemmas monadic_rewrite_symb_exec_l_nF + = monadic_rewrite_symb_exec_l'[where F=False, simplified simp_thms] -lemma monadic_rewrite_symb_exec_r: - "\ \s. m \(=) s\; no_fail P' m; - \rv. monadic_rewrite F False (Q rv) x (y rv); - \P\ m \Q\ \ - \ monadic_rewrite F False (P and P') x (m >>= y)" - apply (clarsimp simp: monadic_rewrite_def bind_def) - apply (drule(1) no_failD) - apply (subgoal_tac "\v \ fst (m s). Q (fst v) (snd v) \ snd v = s") - apply fastforce - apply clarsimp - apply (frule(2) use_valid) - apply (frule use_valid, assumption, rule refl) - apply simp - done +(* perform symbolic execution on LHS; conveniently handles both cases of F *) +lemmas monadic_rewrite_symb_exec_l + = monadic_rewrite_symb_exec_l_F + monadic_rewrite_symb_exec_l_nF +(* When `E=False`, adding an `m` to the RHS that returns no results still fulfills the result subset + requirement (the empty set is a trivial subset of any LHS results). *) lemma monadic_rewrite_symb_exec_r': - "\ \s. m \(=) s\; no_fail P m; - \rv. monadic_rewrite F False (Q rv) x (y rv); + "\ \rv. monadic_rewrite F E (Q rv) x (y rv); + \P. m \P\; no_fail P' m; E \ empty_fail m; \P\ m \Q\ \ - \ monadic_rewrite F False P x (m >>= y)" - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_symb_exec_r; assumption) - apply simp + \ monadic_rewrite F E (P and P') x (m >>= (\rv. y rv))" + apply (cases E) + subgoal (* E *) + apply (clarsimp simp: monadic_rewrite_def bind_def prod_eq_iff) + apply (drule (1) no_failD) + apply clarsimp + apply (simp add: empty_fail_def, drule_tac x=s in spec) + apply (prop_tac "\(rv, s') \ fst (m s). y rv s' = x s") + apply clarsimp + apply (drule (1) in_inv_by_hoareD) + apply (drule (2) use_valid) + apply (clarsimp simp: Ball_def prod_eq_iff) + apply (auto elim!: nonemptyE elim: rev_bexI simp add: Bex_def Ball_def cong: conj_cong) + done + subgoal (* \ E *) + apply (clarsimp simp: monadic_rewrite_def bind_def) + apply (drule(1) no_failD) + apply (subgoal_tac "\v \ fst (m s). Q (fst v) (snd v) \ snd v = s") + apply fastforce + apply clarsimp + apply (frule(2) use_valid) + apply (frule use_valid, assumption, rule refl) + apply simp + done done -lemma monadic_rewrite_symb_exec_l'': - "\ \s. m \(=) s\; empty_fail m; - \ F \ no_fail P' m; - \rv. monadic_rewrite F False (Q rv) (x rv) y; +(* as a lemma to preserve lambda binding erased by simplified *) +lemma monadic_rewrite_symb_exec_r_nE: + "\ \rv. monadic_rewrite F False (Q rv) x (y rv); + \P. m \P\; no_fail P' m; \P\ m \Q\ \ - \ monadic_rewrite F False (P and P') (m >>= x) y" - apply (clarsimp simp: monadic_rewrite_def bind_def) - apply (subgoal_tac "\ snd (m s)") - apply (subgoal_tac "\v \ fst (m s). Q (fst v) (snd v) \ snd v = s") - apply (frule(1) empty_failD2) - apply (clarsimp simp: split_def) - apply fastforce - apply clarsimp - apply (frule(2) use_valid) - apply (frule use_valid, assumption, rule refl) - apply simp - apply (cases F, simp_all add: no_failD) + \ monadic_rewrite F False (P and P') x (m >>= (\rv. y rv))" + by (rule monadic_rewrite_symb_exec_r'[where E=False, simplified]) + +lemmas monadic_rewrite_symb_exec_r_E + = monadic_rewrite_symb_exec_r'[where E=True, simplified] + +(* perform symbolic execution on RHS; conveniently handles both cases of E *) +lemmas monadic_rewrite_symb_exec_r + = monadic_rewrite_symb_exec_r_nE + monadic_rewrite_symb_exec_r_E + +lemma monadic_rewrite_symb_exec_l_known_F: + "\ monadic_rewrite True E Q (x rv) y; + \P. m \P\; empty_fail m; + \P\ m \\rv' _. rv' = rv \ \ + \ monadic_rewrite True E (P and Q) (m >>= x) y" + by (erule monadic_rewrite_trans[rotated]) + (rule monadic_rewrite_symb_exec_l_F[OF monadic_rewrite_pre_imp_eq]; simp) + +lemma monadic_rewrite_symb_exec_l_known_nF: + "\ monadic_rewrite False E Q (x rv) y; + \P. m \P\; empty_fail m; no_fail P' m; + \P\ m \\rv' s. rv' = rv \ \ + \ monadic_rewrite False E (P and P' and Q) (m >>= x) y" + by (erule monadic_rewrite_trans[rotated]) + (rule monadic_rewrite_symb_exec_l_nF[OF monadic_rewrite_pre_imp_eq]; simp) + +(* perform symbolic execution on LHS, proving a specific value is returned *) +lemmas monadic_rewrite_symb_exec_l_known + = monadic_rewrite_symb_exec_l_known_F + monadic_rewrite_symb_exec_l_known_nF + +lemma monadic_rewrite_symb_exec_r_known_E: + "\ monadic_rewrite F True Q x (y rv); + \P. m \P\; empty_fail m; no_fail P' m; + \P\ m \\rv' s. rv' = rv \ \ + \ monadic_rewrite F True (P and P' and Q) x (m >>= y)" + apply (rule monadic_rewrite_guard_imp) + apply (erule monadic_rewrite_trans) + apply (rule monadic_rewrite_symb_exec_r_E[OF monadic_rewrite_pre_imp_eq, rotated -1], simp+) done -lemma monadic_rewrite_symb_exec_l': - "\ \P. m \P\; empty_fail m; - \ F \ no_fail P' m; - \rv. monadic_rewrite F E (Q rv) (x rv) y; - \P\ m \Q\ \ - \ monadic_rewrite F E (P and P') (m >>= x) y" - apply (cases E) - apply (clarsimp simp: monadic_rewrite_def bind_def prod_eq_iff) - apply (subgoal_tac "\ snd (m s)") - apply (simp add: empty_fail_def, drule_tac x=s in spec) - apply (subgoal_tac "\(rv, s') \ fst (m s). x rv s' = y s") - apply (rule conjI) - apply (rule equalityI) - apply (clarsimp simp: Ball_def) - apply (fastforce simp: Ball_def elim!: nonemptyE elim: rev_bexI) - apply (simp add: Bex_def Ball_def cong: conj_cong) - apply auto[1] - apply clarsimp - apply (drule(1) in_inv_by_hoareD) - apply (frule(2) use_valid) - apply (clarsimp simp: Ball_def prod_eq_iff) - apply (clarsimp simp: no_fail_def) - apply simp - apply (rule monadic_rewrite_symb_exec_l'', assumption+) +lemma monadic_rewrite_symb_exec_r_known_nE: + "\ monadic_rewrite F False Q x (y rv); + \P. m \P\; no_fail P' m; + \P\ m \\rv' s. rv' = rv \ \ + \ monadic_rewrite F False (P and P' and Q) x (m >>= y)" + apply (rule monadic_rewrite_guard_imp) + apply (erule monadic_rewrite_trans) + apply (rule monadic_rewrite_symb_exec_r_nE[OF monadic_rewrite_pre_imp_eq, rotated -1], simp+) done -(* FIXME this should replace monadic_rewrite_symb_exec_l' as it preserves names, - and this approach should be used everywhere else anyhow, however that breaks proofs - relying on arbitrarily generated names, so will be dealt with in future *) -lemma monadic_rewrite_symb_exec_l'_preserve_names: - "\ \P. m \P\; empty_fail m; - \ F \ no_fail P' m; - \rv. monadic_rewrite F E (Q rv) (x rv) y; - \P\ m \Q\ \ - \ monadic_rewrite F E (P and P') (m >>= (\rv. x rv)) y" - by (rule monadic_rewrite_symb_exec_l') +(* perform symbolic execution on RHS, proving a specific value is returned *) +lemmas monadic_rewrite_symb_exec_r_known + = monadic_rewrite_symb_exec_r_known_E + monadic_rewrite_symb_exec_r_known_nE -(* FIXME merge into below upon change-over desribed above *) -lemmas monadic_rewrite_symb_exec_l'_TT - = monadic_rewrite_symb_exec_l'_preserve_names[where P'="\" and F=True, simplified] +lemma monadic_rewrite_symb_exec_l_drop_F: + "\ monadic_rewrite True E P g h; \P. m \P\; empty_fail m \ + \ monadic_rewrite True E P (m >>= (\_. g)) h" + by (rule monadic_rewrite_symb_exec_l, auto) -lemmas monadic_rewrite_symb_exec_l - = monadic_rewrite_symb_exec_l''[where F=True and P'=\, simplified] - monadic_rewrite_symb_exec_l''[where F=False, simplified simp_thms] - -lemma monadic_rewrite_symb_exec_l_known: - "\ \s. m \(=) s\; empty_fail m; - monadic_rewrite True False Q (x rv) y; - \P\ m \\rv' s. rv' = rv \ Q s\ \ - \ monadic_rewrite True False P (m >>= x) y" - apply (erule(1) monadic_rewrite_symb_exec_l) - apply (rule_tac P="rva = rv" in monadic_rewrite_gen_asm) - apply simp - apply wpsimp - done +lemma monadic_rewrite_symb_exec_l_drop_nF: + "\ monadic_rewrite False E P g h; \P. m \P\; empty_fail m; no_fail P' m \ + \ monadic_rewrite False E (P and P') (m >>= (\_. g)) h" + by (rule monadic_rewrite_symb_exec_l, auto) + +(* perform symbolic execution on LHS, dropping state-idempotent operation whose results are unused *) +lemmas monadic_rewrite_symb_exec_l_drop + = monadic_rewrite_symb_exec_l_drop_F + monadic_rewrite_symb_exec_l_drop_nF + +lemma monadic_rewrite_symb_exec_r_drop_E: + "\ monadic_rewrite F True P g h; \P. m \P\; empty_fail m; no_fail P' m \ + \ monadic_rewrite F True (P and P') g (m >>= (\_. h))" + by (rule monadic_rewrite_symb_exec_r, auto) + +lemma monadic_rewrite_symb_exec_r_drop_nE: + "\ monadic_rewrite F False P g h; \P. m \P\; no_fail P' m \ + \ monadic_rewrite F False (P and P') g (m >>= (\_. h))" + by (rule monadic_rewrite_symb_exec_r, auto) + +lemmas monadic_rewrite_symb_exec_r_drop + = monadic_rewrite_symb_exec_r_drop_E + monadic_rewrite_symb_exec_r_drop_nE text \if\ @@ -368,19 +446,39 @@ lemma monadic_rewrite_if: (If P a b) (If P c d)" by (cases P, simp_all) -lemma monadic_rewrite_if_rhs: +lemma monadic_rewrite_if_r: "\ P \ monadic_rewrite F E Q a b; \ P \ monadic_rewrite F E R a c \ \ monadic_rewrite F E (\s. (P \ Q s) \ (\ P \ R s)) a (If P b c)" by (cases P, simp_all) +lemma monadic_rewrite_if_r_True: + "\ P \ monadic_rewrite F E Q a b \ + \ monadic_rewrite F E ((\_. P) and Q) a (If P b c)" + by (rule monadic_rewrite_gen_asm, cases P, simp_all) + +lemma monadic_rewrite_if_r_False: + "\ \ P \ monadic_rewrite F E R a c \ + \ monadic_rewrite F E ((\_. \ P) and R) a (If P b c)" + by (rule monadic_rewrite_gen_asm, cases P, simp_all) + lemmas monadic_rewrite_named_if = monadic_rewrite_if[where Q="(=) s" and R="(=) s", simplified] for s -lemma monadic_rewrite_if_lhs: +lemma monadic_rewrite_if_l: "\ P \ monadic_rewrite F E Q b a; \ P \ monadic_rewrite F E R c a \ \ monadic_rewrite F E (\s. (P \ Q s) \ (\ P \ R s)) (If P b c) a" by (cases P, simp_all) +lemma monadic_rewrite_if_l_True: + "\ P \ monadic_rewrite F E Q b a \ + \ monadic_rewrite F E ((\_. P) and Q) (If P b c) a" + by (rule monadic_rewrite_gen_asm, cases P, simp_all) + +lemma monadic_rewrite_if_l_False: + "\ \ P \ monadic_rewrite F E R c a \ + \ monadic_rewrite F E ((\_. \ P) and R) (If P b c) a" + by (rule monadic_rewrite_gen_asm, cases P, simp_all) + lemma monadic_rewrite_if_known: "monadic_rewrite F E ((\s. C = X) and \) (if C then f else g) (if X then f else g)" by (rule monadic_rewrite_gen_asm) @@ -395,12 +493,13 @@ lemma monadic_rewrite_liftM: lemmas monadic_rewrite_liftE = monadic_rewrite_liftM[where fn=Inr, folded liftE_liftM] +(* When splitting a bind, use name from left as we typically rewrite the LHS into a schematic RHS. *) lemma monadic_rewrite_split_fn: "\ monadic_rewrite F E P (liftM fn a) c; \rv. monadic_rewrite F E (Q rv) (b rv) (d (fn rv)); \R\ a \Q\ \ - \ monadic_rewrite F E (P and R) (a >>= b) (c >>= d)" - apply (rule monadic_rewrite_imp) + \ monadic_rewrite F E (P and R) (a >>= (\rv. b rv)) (c >>= d)" + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans[rotated]) apply (erule monadic_rewrite_bind_head) apply (simp add: liftM_def) @@ -425,17 +524,23 @@ lemma monadic_rewrite_assert2: \ monadic_rewrite F E ((\s. Q \ P s) and (\_. Q)) (assert Q >>= f) g" by (auto simp add: assert_def monadic_rewrite_def fail_def split: if_split) -lemma monadic_rewrite_state_assert_true: - "monadic_rewrite F E P (state_assert P) (return ())" - by (simp add: state_assert_def monadic_rewrite_def exec_get) +(* under assumption of non-failure of LHS, we can assume any assertions for showing preconditions + of rewrite *) +lemma monadic_rewrite_state_assert: + "monadic_rewrite F E P (f ()) g + \ monadic_rewrite F E (\s. (F \ Q s \ P s) \ (\F \ P s \ Q s)) + (state_assert Q >>= f) g" + by (fastforce simp: monadic_rewrite_def exec_get state_assert_def bind_assoc assert_def) lemma monadic_rewrite_stateAssert: - "monadic_rewrite F E P (stateAssert P xs) (return ())" - by (simp add: stateAssert_def monadic_rewrite_def exec_get) + "monadic_rewrite F E P (f ()) g + \ monadic_rewrite F E (\s. (F \ Q s \ P s) \ (\F \ P s \ Q s)) + (stateAssert Q xs >>= f) g" + by (fastforce simp: monadic_rewrite_def exec_get stateAssert_def bind_assoc assert_def) text \Non-determinism: alternative and select\ -lemma monadic_rewrite_alternative_rhs: +lemma monadic_rewrite_alternative_r: "\ monadic_rewrite F E P a b; monadic_rewrite F E Q a c \ \ monadic_rewrite F E (P and Q) a (b \ c)" by (auto simp: monadic_rewrite_def alternative_def) @@ -445,8 +550,8 @@ lemma monadic_rewrite_alternatives: \ monadic_rewrite E F (P and Q) (a \ b) (c \ d)" by (auto simp: monadic_rewrite_def alternative_def) -lemma monadic_rewrite_rdonly_bind: - "\ \s. f \(=) s\ \ +lemma monadic_rewrite_bind_alternative: + "\ \P. f \P\ \ \ monadic_rewrite F False \ (alternative (f >>= (\x. g x)) h) (f >>= (\x. alternative (g x) h))" apply (clarsimp simp: monadic_rewrite_def bind_def @@ -455,11 +560,12 @@ lemma monadic_rewrite_rdonly_bind: apply (simp add: image_image split_def cong: image_cong) apply fastforce apply clarsimp + apply (drule hoare_eq_P) apply (frule use_valid, (assumption | rule refl | simp)+) done -lemmas monadic_rewrite_rdonly_bind_l - = monadic_rewrite_trans[OF monadic_rewrite_rdonly_bind] +lemmas monadic_rewrite_bind_alternative_l + = monadic_rewrite_trans[OF monadic_rewrite_bind_alternative, simplified pred_top_left_neutral] lemma monadic_rewrite_alternative_l: "monadic_rewrite F False \ (alternative f g) g" @@ -506,31 +612,30 @@ lemma monadic_rewrite_gets_the_known_v: exec_gets assert_opt_def) lemma monadic_rewrite_gets_the_walk: - "\ \x. monadic_rewrite True False (P x) (g x) (gets_the pf >>= g' x); + "\ \rv. monadic_rewrite True False (P rv) (g rv) (gets_the pf >>= g' rv); \Q. f \\s. Q (pf s)\; \R\ f \P\; empty_fail f \ - \ monadic_rewrite True False R (f >>= g) + \ monadic_rewrite True False R (f >>= (\rv. g rv)) (do v \ gets_the pf; x \ f; g' x v od)" - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (erule(1) monadic_rewrite_bind_tail) apply (simp add: gets_the_def bind_assoc) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac rv=rv in monadic_rewrite_symb_exec_l_known, - (wp empty_fail_gets)+) - apply (rule monadic_rewrite_refl) - apply wp - apply assumption - apply (rule_tac P="rv = None" in monadic_rewrite_cases[where Q=\]) - apply (simp add: assert_opt_def) - apply (clarsimp simp: monadic_rewrite_def fail_def snd_bind) - apply (rule ccontr, drule(1) empty_failD2) - apply clarsimp - apply (simp add: assert_opt_def case_option_If2) - apply (rule monadic_rewrite_refl) - apply wp + apply (rule monadic_rewrite_symb_exec_r) + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind_tail) + apply (rule_tac rv=x in monadic_rewrite_symb_exec_l_known_F) + apply (rule monadic_rewrite_refl) + apply (wp empty_fail_gets)+ + apply assumption + apply (rule_tac P="x = None" in monadic_rewrite_cases[where Q=\]) + apply (simp add: assert_opt_def) + apply (clarsimp simp: monadic_rewrite_def fail_def snd_bind) + apply (rule ccontr, drule(1) empty_failD2) + apply clarsimp + apply (simp add: assert_opt_def case_option_If2) + apply (rule monadic_rewrite_refl) + apply wp+ apply simp done @@ -540,9 +645,9 @@ lemma monadic_rewrite_gets_l: by (auto simp add: monadic_rewrite_def exec_gets) lemma monadic_rewrite_gets_the_bind: - assumes mr: "(\v. monadic_rewrite F E (Q v) (g v) m)" - shows "monadic_rewrite F E (\s. f s \ None \ Q (the (f s)) s) (gets_the f >>= (\x. g x)) m" - apply (rule monadic_rewrite_imp) + assumes mr: "(\rv. monadic_rewrite F E (Q rv) (g rv) m)" + shows "monadic_rewrite F E (\s. f s \ None \ Q (the (f s)) s) (gets_the f >>= (\rv. g rv)) m" + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_exists[where P="\v s. f s = Some v"]) apply (subst return_bind[symmetric, where f="\_. m"]) apply (rule monadic_rewrite_bind) @@ -558,6 +663,13 @@ lemma monadic_rewrite_gets_the_gets: apply (auto simp: simpler_gets_def return_def) done +lemma gets_oapply_liftM_rewrite: + "monadic_rewrite False True (\s. f s p \ None) + (gets (oapply p \ f)) (liftM Some (gets_map f p))" + unfolding monadic_rewrite_def + by (simp add: liftM_def simpler_gets_def bind_def gets_map_def assert_opt_def return_def + split: option.splits) + text \Option cases\ lemma monadic_rewrite_case_option: @@ -572,7 +684,7 @@ lemma monadic_rewrite_case_sum: \v. x = Inr v \ monadic_rewrite F E (Q v) (b v) (d v) \ \ monadic_rewrite F E (\s. (\ isRight x \ P (theLeft x) s) \ (isRight x \ Q (theRight x) s)) (case_sum a b x) (case_sum c d x)" - by (cases x, simp_all add: isRight_def) + by (cases x; simp) text \WP proof via monadic rewriting\ @@ -591,7 +703,7 @@ lemma monadic_rewrite_refine_validE_R: by (simp add: validE_R_def validE_def monadic_rewrite_refine_valid) lemma monadic_rewrite_is_valid: - "\ monadic_rewrite False False P' f f'; \P\ do x <- f; g x od \Q\ \ + "\ monadic_rewrite False E P' f f'; \P\ do x <- f; g x od \Q\ \ \ \P and P'\ do x <- f'; g x od \Q\" by (fastforce simp: monadic_rewrite_def valid_def bind_def) @@ -613,77 +725,250 @@ lemma monadic_rewrite_weaken_failure: text \corres / refinement\ -lemma monadic_rewrite_corres: - assumes cu: "corres_underlying R False nf' r P P' a' c" - and me: "monadic_rewrite False True Q a a'" - shows "corres_underlying R False nf' r (P and Q) P' a c" -proof (rule corres_underlyingI) - fix s t rv' t' - assume st: "(s, t) \ R" and pq: "(P and Q) s" and pt: "P' t" and ct: "(rv', t') \ fst (c t)" - from pq have Ps: "P s" and Qs: "Q s" by simp_all - - from cu st Ps pt ct obtain s' rv where - as': "(rv, s') \ fst (a' s)" and rest: "nf' \ \ snd (c t)" "(s', t') \ R" "r rv rv'" - by (fastforce elim: corres_underlyingE) - - from me st Qs as' have as: "(rv, s') \ fst (a s)" - by (clarsimp simp: monadic_rewrite_def) - - with rest show "\(rv, s')\fst (a s). (s', t') \ R \ r rv rv'" by auto -next - fix s t - assume "(s, t) \ R" "(P and Q) s" "P' t" "nf'" - thus "\ snd (c t)" using cu - by (fastforce simp: corres_underlying_def split_def) -qed - -lemma monadic_rewrite_corres': - assumes cu: "corres_underlying R False nf' r P P' a c'" - and me: "monadic_rewrite False True Q c c'" - shows "corres_underlying R False nf' r P (P' and Q) a c" -proof (rule corres_underlyingI) - fix s t rv' t' - assume st: "(s, t) \ R" and pq: "(P' and Q) t" and ps: "P s" and ct: "(rv', t') \ fst (c t)" - from pq have P't: "P' t" and Qt: "Q t" by simp_all - - from me ct Qt have c't: "(rv', t') \ fst (c' t)" - by (clarsimp simp: monadic_rewrite_def) - - from cu st ps P't c't obtain s' rv where - as: "(rv, s') \ fst (a s)" and rest: "nf' \ \ snd (c' t)" "(s', t') \ R" "r rv rv'" - by (fastforce elim: corres_underlyingE) - - with rest as show "\(rv, s')\fst (a s). (s', t') \ R \ r rv rv'" by auto -next - fix s t - assume "(s, t) \ R" "(P' and Q) t" "P s" "nf'" - thus "\ snd (c t)" using cu me - by (fastforce simp: corres_underlying_def split_def monadic_rewrite_def) -qed - -lemma monadic_rewrite_corres_rhs: - "\ monadic_rewrite False True Q c c'; - corres_underlying R F F' r P P' a c' \ - \ corres_underlying R F F' r P (P' and Q) a c" - by (fastforce simp: corres_underlying_def monadic_rewrite_def) +lemma monadic_rewrite_corres_l_generic: + "\ monadic_rewrite F E Q a a'; + corres_underlying R nf nf' r P P' a' c; + F \ nf \ + \ corres_underlying R nf nf' r (P and Q) P' a c" + by (fastforce simp add: corres_underlying_def monadic_rewrite_def) -lemma monadic_rewrite_corres2: +lemma monadic_rewrite_corres_l: "\ monadic_rewrite False E Q a a'; - corres_underlying R False F r P P' a' c \ - \ corres_underlying R False F r (P and Q) P' a c" - by (fastforce simp add: corres_underlying_def monadic_rewrite_def) + corres_underlying R nf nf' r P P' a' c \ + \ corres_underlying R nf nf' r (P and Q) P' a c" + by (rule monadic_rewrite_corres_l_generic, simp+) + +(* The reasoning behind the failure flag logic is as follows: + * if we want to assume non-failure in monadic_rewrite (F), then we have to prove non-failure + in corres_underlying (nf'') + * if the goal requires us to prove non-failure of RHS, then the corres assumption also needs + to prove non-failure *) +lemma monadic_rewrite_corres_r_generic: + "\ monadic_rewrite F E Q c' c; + corres_underlying R nf nf'' r P P' a c'; + F \ nf''; nf' \ nf'' \ + \ corres_underlying R nf nf' r P (P' and Q) a c" + by (fastforce simp: corres_underlying_def monadic_rewrite_def) + +lemma monadic_rewrite_corres_r_generic_ex_abs: + "\ monadic_rewrite F E (\s'. Q s' \ ex_abs_underlying sr P s') c' c; + corres_underlying sr nf nf'' r P P' a c'; + F \ nf''; nf' \ nf'' \ + \ corres_underlying sr nf nf' r P (P' and Q) a c" + by (fastforce simp: corres_underlying_def monadic_rewrite_def) + +lemma monadic_rewrite_corres_r: + "\ monadic_rewrite False True Q c c'; + corres_underlying R nf nf' r P P' a c' \ + \ corres_underlying R nf nf' r P (P' and Q) a c" + by (erule (1) monadic_rewrite_corres_r_generic[OF monadic_rewrite_sym], simp+) lemmas corres_gets_the_bind - = monadic_rewrite_corres2[OF monadic_rewrite_bind_head[OF monadic_rewrite_gets_the_gets]] + = monadic_rewrite_corres_l[OF monadic_rewrite_bind_head[OF monadic_rewrite_gets_the_gets]] + +text \Interaction with @{term oblivious}\ + +lemma oblivious_monadic_rewrite: + "oblivious f m \ monadic_rewrite F E \ (do modify f; m od) (do m; modify f od)" + by (clarsimp simp: monadic_rewrite_def oblivious_modify_swap) text \Tool integration\ lemma wpc_helper_monadic_rewrite: - "monadic_rewrite F E Q' m m' - \ wpc_helper (P, P') (Q, {s. Q' s}) (monadic_rewrite F E (\s. s \ P') m m')" - by (auto simp: wpc_helper_def elim!: monadic_rewrite_imp) + "monadic_rewrite F E Q m m' + \ wpc_helper (P, P', P'') (Q, Q', Q'') (monadic_rewrite F E P m m')" + by (auto simp: wpc_helper_def elim!: monadic_rewrite_guard_imp) wpc_setup "\m. monadic_rewrite F E Q' m m'" wpc_helper_monadic_rewrite wpc_setup "\m. monadic_rewrite F E Q' (m >>= c) m'" wpc_helper_monadic_rewrite +wpc_setup "\m. monadic_rewrite F E Q' (m >>=E c) m'" wpc_helper_monadic_rewrite + +text \Tactics\ + +named_theorems monadic_rewrite_pre +declare monadic_rewrite_guard_imp[monadic_rewrite_pre] +method monadic_rewrite_pre = (WP_Pre.pre_tac monadic_rewrite_pre)? + +lemmas monadic_rewrite_step_l = + monadic_rewrite_bind_tail[THEN monadic_rewrite_bind_eta_r] + monadic_rewrite_bindE_tail[THEN monadic_rewrite_bindE_eta_r] + +lemmas monadic_rewrite_step_r = + monadic_rewrite_bind_tail[THEN monadic_rewrite_bind_eta_l] + monadic_rewrite_bindE_tail[THEN monadic_rewrite_bindE_eta_l] + +method monadic_rewrite_solve_head methods m = + (rule monadic_rewrite_bind_head monadic_rewrite_bindE_head)?, + solves \m, (rule monadic_rewrite_refl)?\ + +(* +The most common way of performing monadic rewrite is by doing a pass over the LHS/RHS via +a setup step, such as monadic_rewrite_gen_asm + monadic_rewrite_trans (or transverse). +We traverse the LHS/RHS with a step method (e.g. monadic_rewrite_step) until we can perform +some desired action. +This action should clear up the current monadic_rewrite goal and leave us with mostly WP goals to +resolve, e.g. by using monadic_rewrite_solve_head or via [OF ... monadic_rewrite_refl] style rules. +These goals (along with the WP goals generated at each step taken) are left to the finalise +method, which is likely to be some invocation of wp/wpsimp. + +Further notes: +* no backtracking to previous steps +* if there is no place where action can apply, step until the end and be left with a monadic_rewrite + goal that finalise will most likely fail on +* if action does not resolve the monadic_rewrite goal, traversal stops, potentially leading to goals + that are hard to make sense of +* we avoid applying finalise to monadic_rewrite goals emerging from action, and finalise all other + goals in the order they were generated +*) + +(* we don't want the finalise tactic being applied to the monadic_rewrite goal we just generated + unless it's exactly what is needed to solve it (e.g. monadic_rewrite_refl) *) +method monadic_rewrite_single_pass methods start step action finalise = + determ start, + fwd_all_new + \(repeat_unless action \determ step\)\ + \if_then_else \has_concl "monadic_rewrite ?F ?E ?P ?r ?l"\ succeed finalise\ + +(* Step over LHS until action applies, then finalise. *) +method monadic_rewrite_l_method methods action finalise = + monadic_rewrite_single_pass \monadic_rewrite_pre, rule monadic_rewrite_trans\ + \determ \rule monadic_rewrite_step_l\\ + action + finalise + +(* Step over RHS until action applies, then finalise. *) +method monadic_rewrite_r_method methods action finalise = + monadic_rewrite_single_pass \monadic_rewrite_pre, rule monadic_rewrite_trans[rotated]\ + \determ \rule monadic_rewrite_step_r\\ + action + finalise + +(* monadic_rewrite_symb_exec_[r,l]['][_known,_drop][_[n][F,E]] can yield these side-conditions + upon the statement being symbolically executed + m is likely to be some variant of wpsimp in nearly all cases *) +method monadic_rewrite_symb_exec_resolutions methods m = + if_then_else \has_concl "empty_fail ?f"\ m + \if_then_else \has_concl "no_fail ?P ?f"\ m + \if_then_else \has_concl "?Q \ empty_fail ?f"\ m + \if_then_else \has_concl "?Q \ no_fail ?P ?f"\ m + \if_then_else \has_concl "?f \?P\"\ m + fail\\\\ + +(* Symbolically execute non-state-modifying statement on LHS/RHS. In nearly all cases, the side + conditions should be solvable by wpsimp, but the _m versions allow specifying a method or + wpsimp options. *) +method monadic_rewrite_symb_exec methods r m = + (monadic_rewrite_pre, no_name_eta, r; (monadic_rewrite_symb_exec_resolutions m)?) + +ML \ +structure Monadic_Rewrite = struct + +val solves_wpsimp = + let + fun wpsimp st = Method_Closure.apply_method st @{method wpsimp} [] [] [] st + fun solves_wpsimp_tac st = Method_Closure.apply_method st @{method solves} [] [] [wpsimp] st + in solves_wpsimp_tac end + +(* monadic_rewrite_l/r_method \monadic_rewrite_solve_head \rule r\\ [finalise] *) +fun rewrite_rl monadic_rewrite_rl_method_name = + Attrib.thm -- (Scan.option Method.text_closure) + >> (fn (thm, finalise_method_opt) => fn ctxt => fn facts => + let + (* rule r *) + fun rtac st = METHOD (HEADGOAL o Method.rule_tac st [thm]); + (* monadic_rewrite_solve_head \rule r\ *) + fun mr_sh_tac st = Method_Closure.apply_method st @{method monadic_rewrite_solve_head} + [] [] [rtac] st; + (* finalise *) + fun finalise_tac st = + case finalise_method_opt + of SOME m => METHOD (method_evaluate m st) + | NONE => solves_wpsimp st + (* assemble *) + fun tac st = Method_Closure.apply_method st monadic_rewrite_rl_method_name + [] [] [mr_sh_tac, finalise_tac] st; + in + tac ctxt facts + end) + +(* monadic_rewrite_symb_exec \rule rule_thms\ [finalise] *) +fun symb_exec rule_thms = + Scan.option Method.text_closure >> (fn finalise_method_opt => fn ctxt => fn facts => + let + (* rule rule_thms *) + fun rtac st = METHOD (HEADGOAL o Method.rule_tac st rule_thms) + (* finalise *) + fun finalise_tac st = + case finalise_method_opt + of SOME m => METHOD (method_evaluate m st) + | NONE => solves_wpsimp st + (* assemble *) + fun tac st = Method_Closure.apply_method st @{method monadic_rewrite_symb_exec} + [] [] [rtac, finalise_tac] st + in + tac ctxt facts + end) + +(* apply (monadic_rewrite_symb_exec \rules_tac rv=value in thms\ [finalise]) *) +fun symb_exec_known thms = + (Scan.lift Parse.embedded_inner_syntax -- Scan.lift Parse.for_fixes + -- (Scan.option Method.text_closure)) + >> (fn ((syn, fixes), finalise_method_opt) => fn ctxt => fn facts => + let + (* rules_tac rv=syn in thms *) + val rtac = METHOD + o Multi_Rule_Insts.single_instantiate_tac Rule_Insts.res_inst_tac "rv" syn fixes thms; + (* finalise *) + fun finalise_tac st = + case finalise_method_opt + of SOME m => METHOD (method_evaluate m st) + | NONE => solves_wpsimp st + (* assemble *) + fun tac st = Method_Closure.apply_method st @{method monadic_rewrite_symb_exec} + [] [] [rtac, finalise_tac] st + in + tac ctxt facts + end) + +end\ + +method_setup monadic_rewrite_l = \Monadic_Rewrite.rewrite_rl @{method monadic_rewrite_l_method}\ + \apply rule in monadic_rewrite LHS with customisable side-condition method\ + +method_setup monadic_rewrite_r = \Monadic_Rewrite.rewrite_rl @{method monadic_rewrite_r_method}\ + \apply rule in monadic_rewrite RHS with customisable side-condition method\ + +(* Symbolic execution on LHS/RHS, trying specific-flag rules first, + falling back on generic symbolic execution rule. + Side-conditions can be discharged with a method if specified, otherwise \solves wpsimp\ *) +method_setup monadic_rewrite_symb_exec_l = + \Monadic_Rewrite.symb_exec @{thms monadic_rewrite_symb_exec_l monadic_rewrite_symb_exec_l'}\ + \symbolic execution on monadic_rewrite LHS with customisable side-condition method\ +method_setup monadic_rewrite_symb_exec_r = + \Monadic_Rewrite.symb_exec @{thms monadic_rewrite_symb_exec_r monadic_rewrite_symb_exec_r'}\ + \symbolic execution on monadic_rewrite RHS with customisable side-condition method\ + +(* Drop statement on LHS/RHS using symbolic execution. In nearly all cases, the side conditions + should be solvable by wpsimp, but one can optionally specify a method or wpsimp options. *) +method_setup monadic_rewrite_symb_exec_l_drop = + \Monadic_Rewrite.symb_exec @{thms monadic_rewrite_symb_exec_l_drop}\ + \drop monadic_rewrite LHS statement via symbolic execution with customisable side-condition method\ +method_setup monadic_rewrite_symb_exec_r_drop = + \Monadic_Rewrite.symb_exec @{thms monadic_rewrite_symb_exec_r_drop}\ + \drop monadic_rewrite RHS statement via symbolic execution with customisable side-condition method\ + +(* Symbolic execution on RHS/LHS statement, but fixing the return value. + In nearly all cases, the side conditions should be solvable by wpsimp, but one can optionally + specify a method or wpsimp options. *) +method_setup monadic_rewrite_symb_exec_l_known = + \Monadic_Rewrite.symb_exec_known @{thms monadic_rewrite_symb_exec_l_known}\ + \symbolic execution on monadic_rewrite LHS with known value and customisable side-condition method\ +method_setup monadic_rewrite_symb_exec_r_known = + \Monadic_Rewrite.symb_exec_known @{thms monadic_rewrite_symb_exec_r_known}\ + \symbolic execution on monadic_rewrite RHS with known value and customisable side-condition method\ + +(* FIXME: consider tactic for deployment on corres goals; + FIXME: add corresponding monadic_rewrite_transverse? who knows how useful it'll be +*) end diff --git a/lib/Monads/Fun_Pred_Syntax.thy b/lib/Monads/Fun_Pred_Syntax.thy new file mode 100644 index 0000000000..d733c00f9f --- /dev/null +++ b/lib/Monads/Fun_Pred_Syntax.thy @@ -0,0 +1,232 @@ +(* + * Copyright 2022, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Syntax for using multi-argument functions as predicates, e.g "P and Q" where P and Q are + functions to bool, taking one or more parameters. *) + +chapter \Function Predicate Syntax\ + +theory Fun_Pred_Syntax +imports Main +begin + +section \Definitions\ + +text \ + Functions are already instances of Boolean algebras and provide all the standard laws one + would like to have. Default simplifications are automatic. Search for @{const inf}/ + @{const sup}/@{const uminus} to find further laws and/or unfold via the definitions below. + + The abbreviations here provide special syntax for the function instance of Boolean + algebras only, leaving other instances (such as @{typ bool}) untouched.\ + +abbreviation pred_conj :: "('a \ 'b::boolean_algebra) \ ('a \ 'b) \ ('a \ 'b)" where + "pred_conj \ inf" + +abbreviation pred_disj :: "('a \ 'b::boolean_algebra) \ ('a \ 'b) \ ('a \ 'b)" where + "pred_disj \ sup" + +abbreviation pred_neg :: "('a \ 'b::boolean_algebra) \ ('a \ 'b)" where + "pred_neg \ uminus" + + +text \ + Lifted True/False: ideally, we'd map these to top/bot, but top/bot are constants and there are + currently too many rules and tools that expect these conditions to beta-reduce and match against + True/False directly.\ + +abbreviation (input) pred_top :: "'a \ bool" where + "pred_top \ \_. True" + +abbreviation (input) pred_bot :: "'a \ bool" where + "pred_bot \ \_. False" + + +text \Version with two arguments for compatibility. Can hopefully be eliminated at some point.\ + +abbreviation (input) pred_top2 :: "'a \ 'b \ bool" where + "pred_top2 \ \_ _. True" + +abbreviation (input) pred_bot2 :: "'a \ 'b \ bool" where + "pred_bot2 \ \_ _. False" + + +section \Syntax bundles\ + +bundle fun_pred_syntax +begin + (* infixl instead of infixr, because we want to split off conjuncts from the left *) + notation pred_conj (infixl "and" 35) + notation pred_disj (infixl "or" 30) + notation pred_neg ("not _" [40] 40) + notation pred_top ("\") + notation pred_bot ("\") + notation pred_top2 ("\\") + notation pred_bot2 ("\\") +end + +bundle no_fun_pred_syntax +begin + no_notation pred_conj (infixl "and" 35) + no_notation pred_disj (infixl "or" 30) + no_notation pred_neg ("not _" [40] 40) + no_notation pred_top ("\") + no_notation pred_bot ("\") + no_notation pred_top2 ("\\") + no_notation pred_bot2 ("\\") +end + +unbundle fun_pred_syntax + + +section \Definitions specialised to @{typ bool} and @{text fun} instance of @{class boolean_algebra}\ + +lemmas pred_conj_def = + inf_fun_def[where 'b=bool, simplified] + inf_fun_def[where f="f::'a \ 'b::boolean_algebra" for f] + +lemmas pred_disj_def = + sup_fun_def[where 'b=bool, simplified] + sup_fun_def[where f="f::'a \ 'b::boolean_algebra" for f] + +lemmas pred_neg_def = + fun_Compl_def[where 'b=bool, simplified] + fun_Compl_def[where A="A::'a \ 'b::boolean_algebra" for A] + +lemmas pred_top_def[simp] = + top_fun_def[where 'b=bool, simplified] top_fun_def[where 'b="'b::boolean_algebra"] + +lemmas pred_bot_def[simp] = + bot_fun_def[where 'b=bool, simplified] bot_fun_def[where 'b="'b::boolean_algebra"] + + +section \Other lemmas\ + +text \AC rewriting renamed and specialised, so we don't have to remember inf/sup\ + +lemmas pred_conj_aci = inf_aci[where 'a="'a \ 'b::boolean_algebra"] +lemmas pred_disj_aci = sup_aci[where 'a="'a \ 'b::boolean_algebra"] + + +text \Useful legacy names\ + +lemmas pred_conjI = inf1I inf2I + +lemmas pred_disjI1 = sup1I1[where 'a="'a \ 'b::boolean_algebra"] +lemmas pred_disjI2 = sup1I2[where 'a="'a \ 'b::boolean_algebra"] + +lemmas pred_disj1CI[intro!] = sup1CI[where 'a="'a \ 'b::boolean_algebra"] +lemmas pred_disj2CI = sup2CI[where 'a="'a \ 'b::boolean_algebra"] + +lemmas pred_conj_assoc = inf.assoc[where 'a="'a \ 'b::boolean_algebra", symmetric] +lemmas pred_conj_comm = inf.commute[where 'a="'a \ 'b::boolean_algebra"] + +lemmas pred_disj_assoc = sup.assoc[where 'a="'a \ 'b::boolean_algebra", symmetric] +lemmas pred_disj_comm = sup.commute[where 'a="'a \ 'b::boolean_algebra"] + + +text \Top/bot and function composition\ + +lemma pred_top_comp[simp]: + "\ \ f = \" + by (simp add: comp_def) + +lemma pred_bot_comp[simp]: + "\ \ f = \" + by (simp add: comp_def) + + +text \ + We would get these for free if we could instantiate @{const pred_top}/@{const pred_bot} to + @{const top}/@{const bot} directly:\ + +lemmas pred_top_left_neutral[simp] = + inf_top.left_neutral[where 'a="'a \ bool", unfolded pred_top_def] + +lemmas pred_top_right_neutral[simp] = + inf_top.right_neutral[where 'a="'a \ bool", unfolded pred_top_def] + +lemmas pred_bot_left_neutral[simp] = + sup_bot.left_neutral[where 'a="'a \ bool", unfolded pred_bot_def] + +lemmas pred_bot_right_neutral[simp] = + sup_bot.right_neutral[where 'a="'a \ bool", unfolded pred_bot_def] + +lemmas pred_top_left[simp] = + sup_top_left[where 'a="'a \ bool", unfolded pred_top_def] + +lemmas pred_top_right[simp] = + sup_top_right[where 'a="'a \ bool", unfolded pred_top_def] + +lemmas pred_bot_left[simp] = + inf_bot_left[where 'a="'a \ bool", unfolded pred_bot_def] + +lemmas pred_bot_right[simp] = + inf_bot_right[where 'a="'a \ bool", unfolded pred_bot_def] + +lemmas pred_neg_top_eq[simp] = + compl_top_eq[where 'a="'a \ bool", unfolded pred_bot_def pred_top_def] + +lemmas pred_neg_bot_eq[simp] = + compl_bot_eq[where 'a="'a \ bool", unfolded pred_bot_def pred_top_def] + +(* no special setup for pred_top2/pred_bot2 at the moment, since we hope to eliminate these + entirely in the future *) + + +subsection "Simplification Rules for Lifted And/Or" + +lemma bipred_disj_op_eq[simp]: + "reflp R \ ((=) or R) = R" + "reflp R \ (R or (=)) = R" + by (auto simp: reflp_def) + +lemma bipred_le_true[simp]: "R \ \\" + by clarsimp + +lemma bipred_and_or_True[simp]: + "(P or \\) = \\" + "(\\ or P) = \\" + "(P and \\) = P" + "(\\ and P) = P" + by auto + + +section \Examples\ + +experiment +begin + +(* Standard laws are available by default: *) +lemma "(P and P) = P" for P :: "'a \ bool" + by simp + +(* Works for functions with multiple arguments: *) +lemma "(P and Q) = (Q and P)" for P :: "'a \ 'b \ bool" + by (simp add: pred_conj_aci) + +(* Unfolds automatically when applied: *) +lemma "(P and Q) s t = (P s t \ Q s t)" + by simp + +(* pred_top and pred_bot work for only one argument currently: *) +lemma "(P and not P) = \" for P :: "'a \ bool" + by simp + +(* You can still use them with more arguments and sometimes get simplification: *) +lemma "(P and not P) = (\_ _. \)" for P :: "'a \ 'b \ 'c \ bool" + by simp + +(* But sometimes you need to fold pred_top_def/pred_bot_def for rules on top/bot to fire: *) +lemma "(P and (\_ _. \)) = (\_ _. \)" + by (simp flip: pred_bot_def) + +lemma "(P and \\) = \\" + by (simp flip: pred_bot_def) + +end + +end diff --git a/lib/Monad_WP/Less_Monad_Syntax.thy b/lib/Monads/Less_Monad_Syntax.thy similarity index 97% rename from lib/Monad_WP/Less_Monad_Syntax.thy rename to lib/Monads/Less_Monad_Syntax.thy index 28464acfd4..17c4c093d6 100644 --- a/lib/Monad_WP/Less_Monad_Syntax.thy +++ b/lib/Monads/Less_Monad_Syntax.thy @@ -12,7 +12,7 @@ begin no_syntax "_thenM" :: "['a, 'b] \ 'c" (infixl ">>" 54) -(* remove input version of >>= from Monad_Syntax, avoid clash with NonDetMonad *) +(* remove input version of >>= from Monad_Syntax, avoid clash with Nondet_Monad *) no_notation Monad_Syntax.bind (infixl ">>=" 54) diff --git a/lib/Monads/Monad_Lib.thy b/lib/Monads/Monad_Lib.thy new file mode 100644 index 0000000000..bc28b88960 --- /dev/null +++ b/lib/Monads/Monad_Lib.thy @@ -0,0 +1,80 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* This theory collects the minimum constant definitions and lemmas for the monad definition + theories (Nondet_Monad, Trace_Monad etc). Only things that are necessary for these and needed + by more than one of them should go in here. *) + +theory Monad_Lib + imports Main +begin + +(* This might have been better as an input abbreviation, but a lot of proofs break if we change it *) +definition + fun_app :: "('a \ 'b) \ 'a \ 'b" (infixr "$" 10) where + "f $ x \ f x" + +declare fun_app_def [iff] + +lemma fun_app_cong[fundef_cong]: + "\ f x = f' x' \ \ (f $ x) = (f' $ x')" + by simp + +lemma fun_app_apply_cong[fundef_cong]: + "f x y = f' x' y' \ (f $ x) y = (f' $ x') y'" + by simp + +definition + "swp f \ \x y. f y x" + +lemma swp_apply[simp]: "swp f y x = f x y" + by (simp add: swp_def) + +definition "K \ \x y. x" + +lemma K_apply[simp]: + "K x y = x" + by (simp add: K_def) + +(* FIXME: eliminate *) +declare K_def [simp] + +lemma o_const_simp[simp]: + "(\x. C) \ f = (\x. C)" + by (simp add: o_def) + +definition + zipWith :: "('a \ 'b \ 'c) \ 'a list \ 'b list \ 'c list" where + "zipWith f xs ys \ map (case_prod f) (zip xs ys)" + +lemma zipWith_Nil[simp]: + "zipWith f xs [] = []" + unfolding zipWith_def by simp + +lemma zipWith_nth: + "\ n < min (length xs) (length ys) \ \ zipWith f xs ys ! n = f (xs ! n) (ys ! n)" + unfolding zipWith_def by simp + +lemma length_zipWith [simp]: + "length (zipWith f xs ys) = min (length xs) (length ys)" + unfolding zipWith_def by simp + +lemmas None_upd_eq = fun_upd_idem[where y=None] + +lemma sum_all_ex[simp]: + "(\a. x \ Inl a) = (\a. x = Inr a)" + "(\a. x \ Inr a) = (\a. x = Inl a)" + by (metis Inr_not_Inl sum.exhaust)+ + +lemma context_disjE: + "\P \ Q; P \ R; \\P; Q\ \ R\ \ R" + by auto + +lemma subst2: + "\s = t; u = v; P s u\ \ P t v" + by clarsimp + +end \ No newline at end of file diff --git a/lib/Monads/README.md b/lib/Monads/README.md new file mode 100644 index 0000000000..9d9dac4b56 --- /dev/null +++ b/lib/Monads/README.md @@ -0,0 +1,48 @@ + + +# Monad Definitions and Tactics + +This session contains definitions of various monads useful in [AutoCorres] and +the [seL4 verification][l4v] for the verification of C programs. + +In particular, this session defines: + +- a [nondeterministic state monad][nondet] with failure to express stateful + computation. There is a variation of this monad that also allows computation + with exceptions (throw/catch). + +- a [reader option monad][option] to express computation that can depend on + state and can fail, but does not change state. It can also be used to express + projections from the state in preconditions and other state assertions. + +- a [trace monad][trace] that stores a set of traces for expressing concurrent + computation. + +- for each of these monads, weakest-precondition lemmas and corresponding tool + setup. + +- for the nondeterministic state monad, additional concepts such as + wellformedness with respect to failure (`empty_fail`), absence of failure + (`no_fail`), absence of exceptions (`no_throw`). See its [README][nondet] and + the respective theories for more details. + +- the trace monad has similar concepts where applicable, and its theories follow + the same structure as that of the nondet monad. + +The directory [`wp/`](./wp/) contains proof methods to reason about these monads +in weakest-precondition style. See its [README][wp] for more details. + +This session also introduces a [strengthen] method, which is useful for +performing rewriting steps within the complex conclusions that often appear when +working with these monads. + +[l4v]: https://github.com/seL4/l4v/ +[nondet]: ./nondet/Nondet_README.thy +[option]: ./reader_option/Reader_Option_Monad.thy +[trace]: ./trace/Trace_Monad.thy +[AutoCorres]: ../../tools/autocorres/ +[wp]: ./wp/WP_README.thy +[strengthen]: ./Strengthen_Demo.thy diff --git a/lib/Monads/ROOT b/lib/Monads/ROOT new file mode 100644 index 0000000000..35cd4923ac --- /dev/null +++ b/lib/Monads/ROOT @@ -0,0 +1,72 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +chapter Lib + +session Monads (lib) = HOL + + options [document = pdf] + + sessions + "HOL-Library" + "HOL-Eisbach" + Eisbach_Tools + ML_Utils + + directories + wp + nondet + reader_option + trace + + theories + Fun_Pred_Syntax + Nondet_Monad + Nondet_Lemmas + Nondet_VCG + Nondet_More_VCG + Nondet_In_Monad + Nondet_Sat + Nondet_Det + Nondet_No_Fail + Nondet_No_Throw + Nondet_Empty_Fail + Nondet_Monad_Equations + Nondet_While_Loop_Rules + Nondet_While_Loop_Rules_Completeness + Nondet_Reader_Option + Reader_Option_Monad + Reader_Option_VCG + Trace_Monad + Trace_Lemmas + Trace_VCG + Trace_Det + Trace_No_Throw + Trace_Empty_Fail + Trace_No_Trace + Trace_Total + Trace_Strengthen_Setup + Trace_Monad_Equations + Trace_RG + Trace_More_RG + Trace_In_Monad + Trace_More_VCG + Trace_No_Fail + Trace_Reader_Option + Trace_Sat + Strengthen + Nondet_Strengthen_Setup + Strengthen_Demo + WPBang + WPFix + Eisbach_WP + WPI + WPC + WP_Pre + WP + Datatype_Schematic + + document_files + "root.tex" diff --git a/lib/Monad_WP/Strengthen.thy b/lib/Monads/Strengthen.thy similarity index 99% rename from lib/Monad_WP/Strengthen.thy rename to lib/Monads/Strengthen.thy index 71171e2235..b2fcce21e1 100644 --- a/lib/Monad_WP/Strengthen.thy +++ b/lib/Monads/Strengthen.thy @@ -4,6 +4,8 @@ * SPDX-License-Identifier: BSD-2-Clause *) +section \Manipulating Hoare Logic Assertions\ + theory Strengthen imports Main begin diff --git a/lib/Monad_WP/Strengthen_Demo.thy b/lib/Monads/Strengthen_Demo.thy similarity index 93% rename from lib/Monad_WP/Strengthen_Demo.thy rename to lib/Monads/Strengthen_Demo.thy index f34409fa39..befb994d19 100644 --- a/lib/Monad_WP/Strengthen_Demo.thy +++ b/lib/Monads/Strengthen_Demo.thy @@ -73,11 +73,11 @@ thm subset_UNIV subset_UNIV[mk_strg] text \Rules which would introduce schematics are adjusted by @{attribute mk_strg} to introduce quantifiers -instead. The argument I to mk_strg prevents this step. +instead. The argument I to @{attribute mk_strg} prevents this step. \ thm subsetD subsetD[mk_strg I] subsetD[mk_strg] -text \The first argument to mk_strg controls the way +text \The first argument to @{attribute mk_strg} controls the way the rule will be applied. I: use rule in introduction style, matching its conclusion. D: use rule in destruction (forward) style, matching its first premise. @@ -101,10 +101,10 @@ lemma (* oops, overdid it *) oops -text \Subsequent arguments to mk_strg capture premises for +text \Subsequent arguments to @{attribute mk_strg} capture premises for special treatment. The 'A' argument (synonym 'E') specifies that a premise should be solved by assumption. Our fancy proof above -used a strengthen rule bexI[mk_strg I _ A], which tells strengthen +used a strengthen rule @{text "bexI[mk_strg I _ A]"}, which tells strengthen to do approximately the same thing as \apply (rule bexI) prefer 2 apply assumption\ diff --git a/lib/Monads/document/root.tex b/lib/Monads/document/root.tex new file mode 100644 index 0000000000..5b20e177ec --- /dev/null +++ b/lib/Monads/document/root.tex @@ -0,0 +1,81 @@ +% +% Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) +% +% SPDX-License-Identifier: CC-BY-SA-4.0 +% + +\documentclass[11pt,a4paper]{report} +\usepackage[T1]{fontenc} +\usepackage{isabelle,isabellesym} + +% this should be the last package used +\usepackage{pdfsetup} + +% urls in roman style, theory text in math-similar italics +\urlstyle{rm} +\isabellestyle{tt} + + +\begin{document} + +\title{State Monad Library} +\author{David Greenaway, + Gerwin Klein, + Corey Lewis, + Daniel Matichuk, + Thomas Sewell} +\maketitle + +\begin{abstract} + This entry contains a library of different state monads with a large set of + operators, laws, Hoare Logic, weakest precondition rules, and corresponding + automation. The formalisation is designed for reasoning about total and + partial correctness, as well as for reasoning about failure separately from + normal behaviour. Partial correctness in this context ignores program failure. + Total correctness implies the absence of program failure. + + The main monads formalised in this entry are a non-deterministic state monad + with failure, and a state-based trace monad for modelling concurrent executions. + Both support roughly the same set of operators. They come with a standard + Hoare Logic and Rely-Guarantee logic respectively. The entry also contains an + option reader monad that combines well with the non-deterministic state monad. + The option reader monad provides additional operators useful for building + state projections that can be used both in monadic functions and Hoare-Logic + assertions, enhancing specification re-use in proofs. + + This monad library is used in the verification of the seL4 microkernel and + predates some of the monad developments in the Isabelle library. In + particular, it defines its own syntax for do-notation, which can be overridden + with the generic monad syntax in the Isabelle library. We have opted not to do + so by default, because the overloading-based syntax from the Isabelle library + often necessitates additional type annotations when mixing different monad + types within one specification. For similar reasons, no attempt is made to + state generic state monad laws in a type class or locale and then instantiate + them for the two main monad instances. The resulting duplication from two + instances is still easy to handle, but if more instances are added to this + library, additional work on genericity would be useful. + + This library has grown over more than a decade with many substantial + contributions. We would specifically like to acknowledge the contributions by + Nelson Billing, Andrew Boyton, Matthew Brecknell, David Cock, Matthias Daum, + Alejandro Gomez-Londono, Rafal Kolanski, Japheth Lim, Michael McInerney, Toby + Murray, Lars Noschinski, Edward Pierzchalski, Sean Seefried, Miki Tanaka, Vernon + Tang, and Simon Windwood. +\end{abstract} + +\tableofcontents + +\parindent 0pt\parskip 0.5ex + +% generated text of all theories +\input{session} + +\bibliographystyle{abbrv} +\bibliography{root} + +\end{document} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: t +%%% End: diff --git a/lib/Monads/nondet/Nondet_Det.thy b/lib/Monads/nondet/Nondet_Det.thy new file mode 100644 index 0000000000..03e4118099 --- /dev/null +++ b/lib/Monads/nondet/Nondet_Det.thy @@ -0,0 +1,70 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_Det + imports + Nondet_Monad +begin + +subsection "Determinism" + +text \A monad of type @{text nondet_monad} is deterministic iff it returns exactly one state and + result and does not fail\ +definition det :: "('a,'s) nondet_monad \ bool" where + "det f \ \s. \r. f s = ({r},False)" + +text \A deterministic @{text nondet_monad} can be turned into a normal state monad:\ +definition the_run_state :: "('s,'a) nondet_monad \ 's \ 'a \ 's" where + "the_run_state M \ \s. THE s'. fst (M s) = {s'}" + + +lemma det_set_iff: + "det f \ (r \ fst (f s)) = (fst (f s) = {r})" + unfolding det_def + by (metis fst_conv singleton_iff) + +lemma return_det[iff]: + "det (return x)" + by (simp add: det_def return_def) + +lemma put_det[iff]: + "det (put s)" + by (simp add: det_def put_def) + +lemma get_det[iff]: + "det get" + by (simp add: det_def get_def) + +lemma det_gets[iff]: + "det (gets f)" + by (auto simp add: gets_def det_def get_def return_def bind_def) + +lemma det_UN: + "det f \ (\x \ fst (f s). g x) = (g (THE x. x \ fst (f s)))" + unfolding det_def + by (smt (verit) SUP_eq_const emptyE fst_conv singletonD singletonI the1_equality) + +lemma bind_detI[simp, intro!]: + "\ det f; \x. det (g x) \ \ det (f >>= g)" + unfolding bind_def det_def + apply (erule all_reg[rotated]) + apply clarsimp + by (metis fst_conv snd_conv) + +lemma det_modify[iff]: + "det (modify f)" + by (simp add: modify_def) + +lemma the_run_stateI: + "fst (M s) = {s'} \ the_run_state M s = s'" + by (simp add: the_run_state_def) + +lemma the_run_state_det: + "\ s' \ fst (M s); det M \ \ the_run_state M s = s'" + by (simp add: the_run_stateI det_set_iff) + +end diff --git a/lib/Monads/nondet/Nondet_Empty_Fail.thy b/lib/Monads/nondet/Nondet_Empty_Fail.thy new file mode 100644 index 0000000000..c0bcf38ddf --- /dev/null +++ b/lib/Monads/nondet/Nondet_Empty_Fail.thy @@ -0,0 +1,343 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_Empty_Fail + imports + Nondet_Monad + WPSimp +begin + +section \Monads that are wellformed w.r.t. failure\ + +text \ + Usually, well-formed monads constructed from the primitives in @{text Nondet_Monad} will have the following + property: if they return an empty set of results, they will have the failure flag set.\ +definition empty_fail :: "('s,'a) nondet_monad \ bool" where + "empty_fail m \ \s. fst (m s) = {} \ snd (m s)" + +text \Useful in forcing otherwise unknown executions to have the @{const empty_fail} property.\ +definition mk_ef :: "'a set \ bool \ 'a set \ bool" where + "mk_ef S \ (fst S, fst S = {} \ snd S)" + + +subsection \WPC setup\ + +lemma wpc_helper_empty_fail_final: + "empty_fail f \ wpc_helper (P, P', P'') (Q, Q', Q'') (empty_fail f)" + by (clarsimp simp: wpc_helper_def) + +wpc_setup "\m. empty_fail m" wpc_helper_empty_fail_final + + +subsection \@{const empty_fail} intro/dest rules\ + +lemma empty_failI: + "(\s. fst (m s) = {} \ snd (m s)) \ empty_fail m" + by (simp add: empty_fail_def) + +lemma empty_failD: + "\ empty_fail m; fst (m s) = {} \ \ snd (m s)" + by (simp add: empty_fail_def) + +lemma empty_fail_not_snd: + "\ \ snd (m s); empty_fail m \ \ \v. v \ fst (m s)" + by (fastforce simp: empty_fail_def) + +lemmas empty_failD2 = empty_fail_not_snd[rotated] + +lemma empty_failD3: + "\ empty_fail f; \ snd (f s) \ \ fst (f s) \ {}" + by (drule(1) empty_failD2, clarsimp) + +lemma empty_fail_bindD1: + "empty_fail (a >>= b) \ empty_fail a" + unfolding empty_fail_def bind_def + by (fastforce simp: split_def image_image) + + +subsection \Wellformed monads\ + +(* + Collect generic empty_fail lemmas here: + - naming convention is empty_fail_NAME. + - add lemmas with assumptions to [empty_fail_cond] set + - add lemmas without assumption to [empty_fail_term] set +*) + +named_theorems empty_fail_term +named_theorems empty_fail_cond + +lemma empty_fail_K_bind[empty_fail_cond]: + "empty_fail f \ empty_fail (K_bind f x)" + by simp + +lemma empty_fail_fun_app[empty_fail_cond]: + "empty_fail (f x) \ empty_fail (f $ x)" + by simp + +(* empty_fail as such does not need context, but empty_fail_select_f does, so we need to build + up context in other rules *) +lemma empty_fail_If[empty_fail_cond]: + "\ P \ empty_fail f; \P \ empty_fail g \ \ empty_fail (if P then f else g)" + by (simp split: if_split) + +lemma empty_fail_If_applied[empty_fail_cond]: + "\ P \ empty_fail (f x); \P \ empty_fail (g x) \ \ empty_fail ((if P then f else g) x)" + by simp + +lemma empty_fail_put[empty_fail_term]: + "empty_fail (put f)" + by (simp add: put_def empty_fail_def) + +lemma empty_fail_modify[empty_fail_term]: + "empty_fail (modify f)" + by (simp add: empty_fail_def simpler_modify_def) + +lemma empty_fail_gets[empty_fail_term]: + "empty_fail (gets f)" + by (simp add: empty_fail_def simpler_gets_def) + +lemma empty_fail_select[empty_fail_cond]: + "S \ {} \ empty_fail (select S)" + by (simp add: empty_fail_def select_def) + +lemma empty_fail_select_f[empty_fail_cond]: + assumes ef: "fst S = {} \ snd S" + shows "empty_fail (select_f S)" + by (fastforce simp add: empty_fail_def select_f_def intro: ef) + +lemma empty_fail_bind[empty_fail_cond]: + "\ empty_fail a; \x. empty_fail (b x) \ \ empty_fail (a >>= b)" + by (fastforce simp: bind_def empty_fail_def split_def) + +lemma empty_fail_return[empty_fail_term]: + "empty_fail (return x)" + by (simp add: empty_fail_def return_def) + +lemma empty_fail_returnOk[empty_fail_term]: + "empty_fail (returnOk v)" + by (fastforce simp: returnOk_def empty_fail_term) + +lemma empty_fail_throwError[empty_fail_term]: + "empty_fail (throwError v)" + by (fastforce simp: throwError_def empty_fail_term) + +lemma empty_fail_lift[empty_fail_cond]: + "\ \x. empty_fail (f x) \ \ empty_fail (lift f x)" + unfolding lift_def + by (auto simp: empty_fail_term split: sum.split) + +lemma empty_fail_liftE[empty_fail_cond]: + "empty_fail f \ empty_fail (liftE f)" + by (simp add: liftE_def empty_fail_cond empty_fail_term) + +lemma empty_fail_bindE[empty_fail_cond]: + "\ empty_fail f; \rv. empty_fail (g rv) \ \ empty_fail (f >>=E g)" + by (simp add: bindE_def empty_fail_cond) + +lemma empty_fail_mapM[empty_fail_cond]: + assumes m: "\x. x \ set xs \ empty_fail (m x)" + shows "empty_fail (mapM m xs)" +using m +proof (induct xs) + case Nil + thus ?case by (simp add: mapM_def sequence_def empty_fail_term) +next + case Cons + have P: "\m x xs. mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" + by (simp add: mapM_def sequence_def Let_def) + from Cons + show ?case by (simp add: P m empty_fail_cond empty_fail_term) +qed + +lemma empty_fail_fail[empty_fail_term]: + "empty_fail fail" + by (simp add: fail_def empty_fail_def) + +lemma empty_fail_assert[empty_fail_term]: + "empty_fail (assert P)" + unfolding assert_def by (simp add: empty_fail_term) + +lemma empty_fail_assert_opt[empty_fail_term]: + "empty_fail (assert_opt x)" + by (simp add: assert_opt_def empty_fail_term split: option.splits) + +lemma empty_fail_mk_ef[empty_fail_term]: + "empty_fail (mk_ef o m)" + by (simp add: empty_fail_def mk_ef_def) + +lemma empty_fail_gets_the[empty_fail_term]: + "empty_fail (gets_the f)" + unfolding gets_the_def + by (simp add: empty_fail_cond empty_fail_term) + +lemma empty_fail_gets_map[empty_fail_term]: + "empty_fail (gets_map f p)" + unfolding gets_map_def + by (simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_whenEs[empty_fail_cond]: + "(P \ empty_fail f) \ empty_fail (whenE P f)" + "(\P \ empty_fail f) \ empty_fail (unlessE P f)" + by (auto simp add: whenE_def unlessE_def empty_fail_term) + +lemma empty_fail_assertE[empty_fail_term]: + "empty_fail (assertE P)" + by (simp add: assertE_def empty_fail_term) + +lemma empty_fail_get[empty_fail_term]: + "empty_fail get" + by (simp add: empty_fail_def get_def) + +lemma empty_fail_catch[empty_fail_cond]: + "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catch f g)" + by (simp add: catch_def empty_fail_cond empty_fail_term split: sum.split) + +lemma empty_fail_guard[empty_fail_term]: + "empty_fail (state_assert G)" + by (clarsimp simp: state_assert_def empty_fail_cond empty_fail_term) + +lemma empty_fail_spec[empty_fail_term]: + "empty_fail (state_select F)" + by (clarsimp simp: state_select_def empty_fail_def) + +lemma empty_fail_when[empty_fail_cond]: + "(P \ empty_fail x) \ empty_fail (when P x)" + unfolding when_def + by (simp add: empty_fail_term) + +lemma empty_fail_unless[empty_fail_cond]: + "(\P \ empty_fail f) \ empty_fail (unless P f)" + unfolding unless_def + by (simp add: empty_fail_cond) + +lemma empty_fail_liftM[empty_fail_cond]: + "empty_fail m \ empty_fail (liftM f m)" + unfolding liftM_def + by (fastforce simp: empty_fail_term empty_fail_cond) + +lemma empty_fail_liftME[empty_fail_cond]: + "empty_fail m \ empty_fail (liftME f m)" + unfolding liftME_def + by (simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_handleE[empty_fail_cond]: + "\ empty_fail L; \r. empty_fail (R r) \ \ empty_fail (L R)" + by (clarsimp simp: handleE_def handleE'_def empty_fail_term empty_fail_cond split: sum.splits) + +lemma empty_fail_handle'[empty_fail_cond]: + "\empty_fail f; \e. empty_fail (handler e)\ \ empty_fail (f handler)" + unfolding handleE'_def + by (fastforce simp: empty_fail_term empty_fail_cond split: sum.splits) + +lemma empty_fail_sequence[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequence ms)" + unfolding sequence_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequence_x[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequence_x ms)" + unfolding sequence_x_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequenceE[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE ms)" + unfolding sequenceE_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequenceE_x[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE_x ms)" + unfolding sequenceE_x_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapM_x[empty_fail_cond]: + "(\m. m \ f ` set ms \ empty_fail m) \ empty_fail (mapM_x f ms)" + unfolding mapM_x_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapME[empty_fail_cond]: + "(\m. m \ f ` set xs \ empty_fail m) \ empty_fail (mapME f xs)" + unfolding mapME_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapME_x[empty_fail_cond]: + "(\m'. m' \ f ` set xs \ empty_fail m') \ empty_fail (mapME_x f xs)" + unfolding mapME_x_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_filterM[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail (P m)) \ empty_fail (filterM P ms)" + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_zipWithM_x[empty_fail_cond]: + "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM_x f xs ys)" + unfolding zipWithM_x_def zipWith_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_zipWithM[empty_fail_cond]: + "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM f xs ys)" + unfolding zipWithM_def zipWith_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_maybeM[empty_fail_cond]: + "\x. empty_fail (f x) \ empty_fail (maybeM f t)" + unfolding maybeM_def + by (fastforce intro: empty_fail_term split: option.splits) + +lemma empty_fail_ifM[empty_fail_cond]: + "\ empty_fail P; empty_fail a; empty_fail b \ \ empty_fail (ifM P a b)" + by (simp add: ifM_def empty_fail_cond) + +lemma empty_fail_ifME[empty_fail_cond]: + "\ empty_fail P; empty_fail a; empty_fail b \ \ empty_fail (ifME P a b)" + by (simp add: ifME_def empty_fail_cond) + +lemma empty_fail_whenM[empty_fail_cond]: + "\ empty_fail P; empty_fail f \ \ empty_fail (whenM P f)" + by (simp add: whenM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_andM[empty_fail_cond]: + "\ empty_fail A; empty_fail B \ \ empty_fail (andM A B)" + by (simp add: andM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_orM[empty_fail_cond]: + "\ empty_fail A; empty_fail B \ \ empty_fail (orM A B)" + by (simp add: orM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_notM[empty_fail_cond]: + "empty_fail A \ empty_fail (notM A)" + by (simp add: notM_def empty_fail_term empty_fail_cond) + +(* not everything [simp] by default, because side conditions can slow down simp a lot *) +lemmas empty_fail[wp, intro!] = empty_fail_term empty_fail_cond +lemmas [simp] = empty_fail_term + + +subsection \Equations and legacy names\ + +lemma empty_fail_select_eq[simp]: + "empty_fail (select V) = (V \ {})" + by (clarsimp simp: select_def empty_fail_def) + +lemma empty_fail_liftM_eq[simp]: + "empty_fail (liftM f m) = empty_fail m" + unfolding liftM_def + by (fastforce dest: empty_fail_bindD1) + +lemma empty_fail_liftE_eq[simp]: + "empty_fail (liftE f) = empty_fail f" + by (fastforce simp: liftE_def empty_fail_def bind_def) + +lemma liftME_empty_fail_eq[simp]: + "empty_fail (liftME f m) = empty_fail m" + unfolding liftME_def + by (fastforce dest: empty_fail_bindD1 simp: bindE_def) + +(* legacy name binding *) +lemmas empty_fail_error_bits = empty_fail_returnOk empty_fail_throwError empty_fail_liftE_eq + +end diff --git a/lib/Monads/nondet/Nondet_In_Monad.thy b/lib/Monads/nondet/Nondet_In_Monad.thy new file mode 100644 index 0000000000..720b1ac96d --- /dev/null +++ b/lib/Monads/nondet/Nondet_In_Monad.thy @@ -0,0 +1,150 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_In_Monad + imports Nondet_Lemmas +begin + +section \Reasoning directly about states\ + +(* Lemmas about terms of the form "(v, s') \ fst (m s)" *) + +lemma in_throwError: + "((v, s') \ fst (throwError e s)) = (v = Inl e \ s' = s)" + by (simp add: throwError_def return_def) + +lemma in_returnOk: + "((v', s') \ fst (returnOk v s)) = (v' = Inr v \ s' = s)" + by (simp add: returnOk_def return_def) + +lemma in_bind: + "((r,s') \ fst ((do x \ f; g x od) s)) = + (\s'' x. (x, s'') \ fst (f s) \ (r, s') \ fst (g x s''))" + by (force simp: bind_def split_def) + +lemma in_bindE_R: + "((Inr r,s') \ fst ((doE x \ f; g x odE) s)) = + (\s'' x. (Inr x, s'') \ fst (f s) \ (Inr r, s') \ fst (g x s''))" + unfolding bindE_def lift_def split_def bind_def + by (force simp: throwError_def return_def split: sum.splits) + +lemma in_bindE_L: + "((Inl r, s') \ fst ((doE x \ f; g x odE) s)) \ + (\s'' x. (Inr x, s'') \ fst (f s) \ (Inl r, s') \ fst (g x s'')) \ ((Inl r, s') \ fst (f s))" + by (simp add: bindE_def bind_def) + (force simp: return_def throwError_def lift_def split_def split: sum.splits if_split_asm) + +lemma in_return: + "(r, s') \ fst (return v s) = (r = v \ s' = s)" + by (simp add: return_def) + +lemma in_liftE: + "((v, s') \ fst (liftE f s)) = (\v'. v = Inr v' \ (v', s') \ fst (f s))" + by (force simp: liftE_def in_bind in_return) + +lemma in_whenE: + "((v, s') \ fst (whenE P f s)) = ((P \ (v, s') \ fst (f s)) \ (\P \ v = Inr () \ s' = s))" + by (simp add: whenE_def in_returnOk) + +lemma inl_whenE: + "((Inl x, s') \ fst (whenE P f s)) = (P \ (Inl x, s') \ fst (f s))" + by (auto simp add: in_whenE) + +lemma inr_in_unlessE_throwError[termination_simp]: + "(Inr (), s') \ fst (unlessE P (throwError E) s) = (P \ s'=s)" + by (simp add: unlessE_def returnOk_def throwError_def return_def) + +lemma in_fail: + "r \ fst (fail s) = False" + by (simp add: fail_def) + +lemma in_assert: + "(r, s') \ fst (assert P s) = (P \ s' = s)" + by (simp add: assert_def return_def fail_def) + +lemma in_assertE: + "(r, s') \ fst (assertE P s) = (P \ r = Inr () \ s' = s)" + by (simp add: assertE_def returnOk_def return_def fail_def) + +lemma in_assert_opt: + "(r, s') \ fst (assert_opt v s) = (v = Some r \ s' = s)" + by (auto simp: assert_opt_def in_fail in_return split: option.splits) + +lemma in_get: + "(r, s') \ fst (get s) = (r = s \ s' = s)" + by (simp add: get_def) + +lemma in_gets: + "(r, s') \ fst (gets f s) = (r = f s \ s' = s)" + by (simp add: simpler_gets_def) + +lemma in_put: + "(r, s') \ fst (put x s) = (s' = x \ r = ())" + by (simp add: put_def) + +lemma in_when: + "(v, s') \ fst (when P f s) = ((P \ (v, s') \ fst (f s)) \ (\P \ v = () \ s' = s))" + by (simp add: when_def in_return) + +lemma in_unless: + "(v, s') \ fst (unless P f s) = ((\ P \ (v, s') \ fst (f s)) \ (P \ v = () \ s' = s))" + by (simp add: unless_def in_when) + +lemma in_unlessE: + "(v, s') \ fst (unlessE P f s) = ((\ P \ (v, s') \ fst (f s)) \ (P \ v = Inr () \ s' = s))" + by (simp add: unlessE_def in_returnOk) + +lemma inl_unlessE: + "((Inl x, s') \ fst (unlessE P f s)) = (\ P \ (Inl x, s') \ fst (f s))" + by (auto simp add: in_unlessE) + +lemma in_modify: + "(v, s') \ fst (modify f s) = (s'=f s \ v = ())" + by (simp add: modify_def bind_def get_def put_def) + +lemma gets_the_in_monad: + "((v, s') \ fst (gets_the f s)) = (s' = s \ f s = Some v)" + by (auto simp: gets_the_def in_bind in_gets in_assert_opt split: option.split) + +lemma in_alternative: + "(r,s') \ fst ((f \ g) s) = ((r,s') \ fst (f s) \ (r,s') \ fst (g s))" + by (simp add: alternative_def) + +lemma in_liftM: + "((r, s') \ fst (liftM t f s)) = (\r'. (r', s') \ fst (f s) \ r = t r')" + by (simp add: liftM_def return_def bind_def Bex_def) + +lemma in_bindE: + "(rv, s') \ fst ((f >>=E (\rv'. g rv')) s) = + ((\ex. rv = Inl ex \ (Inl ex, s') \ fst (f s)) \ + (\rv' s''. (rv, s') \ fst (g rv' s'') \ (Inr rv', s'') \ fst (f s)))" + by (force simp: bindE_def bind_def lift_def throwError_def return_def split: sum.splits) + +lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L + in_bindE_R in_returnOk in_throwError in_fail + in_assertE in_assert in_return in_assert_opt + in_get in_gets in_put in_when inl_unlessE in_unlessE + in_unless in_modify gets_the_in_monad + in_alternative in_liftM + +lemma bind_det_exec: + "fst (a s) = {(r,s')} \ fst ((a >>= b) s) = fst (b r s')" + by (simp add: bind_def) + +lemma in_bind_det_exec: + "fst (a s) = {(r,s')} \ (s'' \ fst ((a >>= b) s)) = (s'' \ fst (b r s'))" + by (simp add: bind_def) + +lemma exec_put: + "(put s' >>= m) s = m () s'" + by (simp add: bind_def put_def) + +lemma bind_execI: + "\ (r'',s'') \ fst (f s); \x \ fst (g r'' s''). P x \ \ \x \ fst ((f >>= g) s). P x" + by (force simp: in_bind split_def bind_def) + +end diff --git a/lib/Monads/nondet/Nondet_Lemmas.thy b/lib/Monads/nondet/Nondet_Lemmas.thy new file mode 100644 index 0000000000..1ab44cdf17 --- /dev/null +++ b/lib/Monads/nondet/Nondet_Lemmas.thy @@ -0,0 +1,361 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_Lemmas + imports Nondet_Monad +begin + +section \General Lemmas Regarding the Nondeterministic State Monad\ + +subsection \Congruence Rules for the Function Package\ + +lemma bind_cong[fundef_cong]: + "\ f = f'; \v s s'. (v, s') \ fst (f' s) \ g v s' = g' v s' \ \ f >>= g = f' >>= g'" + by (auto simp: bind_def split_def) + +lemma bind_apply_cong [fundef_cong]: + "\ f s = f' s'; \rv st. (rv, st) \ fst (f' s') \ g rv st = g' rv st \ + \ (f >>= g) s = (f' >>= g') s'" + by (auto simp: bind_def split_def) + +lemma bindE_cong[fundef_cong]: + "\ M = M' ; \v s s'. (Inr v, s') \ fst (M' s) \ N v s' = N' v s' \ \ bindE M N = bindE M' N'" + by (auto simp: bindE_def lift_def split: sum.splits intro!: bind_cong) + +lemma bindE_apply_cong[fundef_cong]: + "\ f s = f' s'; \rv st. (Inr rv, st) \ fst (f' s') \ g rv st = g' rv st \ + \ (f >>=E g) s = (f' >>=E g') s'" + by (auto simp: bindE_def lift_def split: sum.splits intro!: bind_apply_cong) + +lemma K_bind_apply_cong[fundef_cong]: + "\ f st = f' st' \ \ K_bind f arg st = K_bind f' arg' st'" + by simp + +lemma when_apply_cong[fundef_cong]: + "\ C = C'; s = s'; C' \ m s' = m' s' \ \ when C m s = when C' m' s'" + by (simp add: when_def) + +lemma unless_apply_cong[fundef_cong]: + "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unless C m s = unless C' m' s'" + by (simp add: when_def unless_def) + +lemma whenE_apply_cong[fundef_cong]: + "\ C = C'; s = s'; C' \ m s' = m' s' \ \ whenE C m s = whenE C' m' s'" + by (simp add: whenE_def) + +lemma unlessE_apply_cong[fundef_cong]: + "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unlessE C m s = unlessE C' m' s'" + by (simp add: unlessE_def) + + +subsection \Simplifying Monads\ + +lemma nested_bind[simp]: + "do x <- do y <- f; return (g y) od; h x od = do y <- f; h (g y) od" + by (clarsimp simp: bind_def Let_def split_def return_def) + +lemma bind_dummy_ret_val: + "do y \ a; b od = do a; b od" + by simp + +lemma fail_update[iff]: + "fail (f s) = fail s" + by (simp add: fail_def) + +lemma fail_bind[simp]: + "fail >>= f = fail" + by (simp add: bind_def fail_def) + +lemma fail_bindE[simp]: + "fail >>=E f = fail" + by (simp add: bindE_def bind_def fail_def) + +lemma assert_A_False[simp]: + "assert False = fail" + by (simp add: assert_def) + +lemma assert_A_True[simp]: + "assert True = return ()" + by (simp add: assert_def) + +lemma assert_False[simp]: + "assert False >>= f = fail" + by simp + +lemma assert_True[simp]: + "assert True >>= f = f ()" + by simp + +lemma assertE_False[simp]: + "assertE False >>=E f = fail" + by (simp add: assertE_def) + +lemma assertE_True[simp]: + "assertE True >>=E f = f ()" + by (simp add: assertE_def) + +lemma when_False_bind[simp]: + "when False g >>= f = f ()" + by (rule ext) (simp add: when_def bind_def return_def) + +lemma when_True_bind[simp]: + "when True g >>= f = g >>= f" + by (simp add: when_def bind_def return_def) + +lemma whenE_False_bind[simp]: + "whenE False g >>=E f = f ()" + by (simp add: whenE_def bindE_def returnOk_def lift_def) + +lemma whenE_True_bind[simp]: + "whenE True g >>=E f = g >>=E f" + by (simp add: whenE_def bindE_def returnOk_def lift_def) + +lemma when_True[simp]: + "when True X = X" + by (clarsimp simp: when_def) + +lemma when_False[simp]: + "when False X = return ()" + by (clarsimp simp: when_def) + +lemma unless_False[simp]: + "unless False X = X" + by (clarsimp simp: unless_def) + +lemma unlessE_False[simp]: + "unlessE False f = f" + unfolding unlessE_def by fastforce + +lemma unless_True[simp]: + "unless True X = return ()" + by (clarsimp simp: unless_def) + +lemma unlessE_True[simp]: + "unlessE True f = returnOk ()" + unfolding unlessE_def by fastforce + +lemma unlessE_whenE: + "unlessE P = whenE (\P)" + by (rule ext) (simp add: unlessE_def whenE_def) + +lemma unless_when: + "unless P = when (\P)" + by (rule ext) (simp add: unless_def when_def) + +lemma gets_to_return[simp]: + "gets (\s. v) = return v" + by (clarsimp simp: gets_def put_def get_def bind_def return_def) + +lemma assert_opt_Some: + "assert_opt (Some x) = return x" + by (simp add: assert_opt_def) + +lemma assertE_liftE: + "assertE P = liftE (assert P)" + by (simp add: assertE_def assert_def liftE_def returnOk_def) + +lemma liftE_handleE'[simp]: + "(liftE a b) = liftE a" + by (clarsimp simp: liftE_def handleE'_def) + +lemma liftE_handleE[simp]: + "(liftE a b) = liftE a" + unfolding handleE_def by simp + +lemma alternative_bind: + "((a \ b) >>= c) = ((a >>= c) \ (b >>= c))" + by (fastforce simp add: alternative_def bind_def split_def) + +lemma alternative_refl: + "(a \ a) = a" + by (simp add: alternative_def) + +lemma alternative_com: + "(f \ g) = (g \ f)" + by (auto simp: alternative_def) + +lemma liftE_alternative: + "liftE (a \ b) = (liftE a \ liftE b)" + by (simp add: liftE_def alternative_bind) + + +subsection \Lifting and Alternative Basic Definitions\ + +lemma liftE_liftM: + "liftE = liftM Inr" + by (auto simp: liftE_def liftM_def) + +lemma liftME_liftM: + "liftME f = liftM (case_sum Inl (Inr \ f))" + unfolding liftME_def liftM_def bindE_def returnOk_def lift_def + apply (rule ext) + apply (rule arg_cong[where f="bind m" for m]) + apply (fastforce simp: throwError_def split: sum.splits) + done + +lemma liftE_bindE: + "liftE a >>=E b = a >>= b" + by (simp add: liftE_def bindE_def lift_def bind_assoc) + +lemma liftM_id[simp]: + "liftM id = id" + by (auto simp: liftM_def) + +lemma liftM_bind: + "liftM t f >>= g = f >>= (\x. g (t x))" + by (simp add: liftM_def bind_assoc) + +lemma gets_bind_ign: + "gets f >>= (\x. m) = m" + by (simp add: bind_def simpler_gets_def) + +lemma exec_get: + "(get >>= f) x = f x x" + by (simp add: get_def bind_def) + +lemmas get_bind_apply = exec_get (* FIXME lib: eliminate *) + +lemma exec_gets: + "(gets f >>= m) s = m (f s) s" + by (simp add: simpler_gets_def bind_def) + +lemma bind_eqI: + "\ f = f'; \x. g x = g' x \ \ f >>= g = f' >>= g'" + by (auto simp: bind_def split_def) + +lemma condition_split: + "P (condition C a b s) \ (C s \ P (a s)) \ (\C s \ P (b s))" + by (clarsimp simp: condition_def) + +lemma condition_split_asm: + "P (condition C a b s) \ (\(C s \ \ P (a s) \ \C s \ \P (b s)))" + by (clarsimp simp: condition_def) + +lemmas condition_splits = condition_split condition_split_asm + +lemma condition_true_triv[simp]: + "condition (\_. True) A B = A" + by (fastforce split: condition_splits) + +lemma condition_false_triv[simp]: + "condition (\_. False) A B = B" + by (fastforce split: condition_splits) + +lemma condition_true: + "P s \ condition P A B s = A s" + by (clarsimp simp: condition_def) + +lemma condition_false: + "\ P s \ condition P A B s = B s" + by (clarsimp simp: condition_def) + +lemmas arg_cong_bind = arg_cong2[where f=bind] +lemmas arg_cong_bind1 = arg_cong_bind[OF refl ext] + + +subsection \Low-level monadic reasoning\ + +lemma monad_eqI [intro]: + "\ \r t s. (r, t) \ fst (A s) \ (r, t) \ fst (B s); + \r t s. (r, t) \ fst (B s) \ (r, t) \ fst (A s); + \x. snd (A x) = snd (B x) \ + \ A = B" for A :: "('s, 'a) nondet_monad" + by (fastforce intro!: set_eqI prod_eqI) + +lemma monad_state_eqI [intro]: + "\ \r t. (r, t) \ fst (A s) \ (r, t) \ fst (B s'); + \r t. (r, t) \ fst (B s') \ (r, t) \ fst (A s); + snd (A s) = snd (B s') \ + \ A s = B s'" for A :: "('s, 'a) nondet_monad" + by (fastforce intro!: set_eqI prod_eqI) + + +subsection \General @{const whileLoop} reasoning\ + +definition whileLoop_terminatesE :: + "('a \ 's \ bool) \ ('a \ ('s, 'e + 'a) nondet_monad) \ 'a \ 's \ bool" + where + "whileLoop_terminatesE C B \ + \r. whileLoop_terminates (\r s. case r of Inr v \ C v s | _ \ False) (lift B) (Inr r)" + +lemma whileLoop_cond_fail: + "\ C x s \ whileLoop C B x s = return x s" + by (auto simp: return_def whileLoop_def + intro: whileLoop_results.intros whileLoop_terminates.intros + elim!: whileLoop_results.cases) + +lemma whileLoopE_cond_fail: + "\ C x s \ whileLoopE C B x s = returnOk x s" + unfolding whileLoopE_def returnOk_def + by (auto intro: whileLoop_cond_fail) + +lemma whileLoop_results_simps_no_move[simp]: + "(Some x, Some x) \ whileLoop_results C B \ \C (fst x) (snd x)" + (is "?LHS x \ ?RHS x") +proof (rule iffI) + assume "?LHS x" + then have "(\a. Some x = Some a) \ ?RHS (the (Some x))" + by (induct rule: whileLoop_results.induct, auto) + thus "?RHS x" + by clarsimp +next + assume "?RHS x" + thus "?LHS x" + by (metis surjective_pairing whileLoop_results.intros(1)) +qed + +lemma whileLoop_unroll: + "whileLoop C B r = condition (C r) (B r >>= whileLoop C B) (return r)" + (is "?LHS r = ?RHS r") +proof - + have "\r s. \ C r s \ ?LHS r s = ?RHS r s" + by (clarsimp simp: whileLoop_cond_fail condition_def bind_def return_def) + moreover + have "\r s. C r s \ ?LHS r s = (B r >>= whileLoop C B) s" + apply (rule monad_state_eqI) + apply (clarsimp simp: whileLoop_def bind_def split_def) + apply (subst (asm) whileLoop_results_simps_valid) + apply fastforce + apply (clarsimp simp: whileLoop_def bind_def split_def) + apply (subst whileLoop_results.simps) + apply fastforce + apply (clarsimp simp: whileLoop_def bind_def split_def) + apply (subst whileLoop_results.simps) + apply (subst whileLoop_terminates.simps) + apply fastforce + done + ultimately + show ?thesis by (fastforce simp: condition_def) +qed + +lemma whileLoop_unroll': + "whileLoop C B r = condition (C r) (B r) (return r) >>= whileLoop C B" + apply (subst whileLoop_unroll) + apply (auto simp: condition_def bind_def return_def split_def whileLoop_cond_fail) + done + +lemma whileLoopE_unroll: + "whileLoopE C B r = condition (C r) (B r >>=E whileLoopE C B) (returnOk r)" + unfolding whileLoopE_def + apply (rule ext) + apply (subst whileLoop_unroll) + apply (clarsimp simp: bindE_def returnOk_def lift_def split: condition_splits) + apply (rule arg_cong[where f="\a. (B r >>= a) x" for x]) + apply (rule ext)+ + apply (clarsimp simp: lift_def split: sum.splits) + apply (subst whileLoop_unroll) + apply (clarsimp simp: condition_false throwError_def) + done + +lemma whileLoopE_unroll': + "whileLoopE C B r = condition (C r) (B r) (returnOk r) >>=E whileLoopE C B" + apply (subst whileLoopE_unroll) + apply (fastforce simp: condition_def bindE_def bind_def lift_def split_def whileLoopE_cond_fail + returnOk_def return_def) + done + +end diff --git a/lib/Monads/nondet/Nondet_Monad.thy b/lib/Monads/nondet/Nondet_Monad.thy new file mode 100644 index 0000000000..e14cf17fd2 --- /dev/null +++ b/lib/Monads/nondet/Nondet_Monad.thy @@ -0,0 +1,667 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* + Nondeterministic state and error monads with failure in Isabelle. +*) + +chapter "Nondeterministic State Monad with Failure" + +theory Nondet_Monad + imports + Fun_Pred_Syntax + Monad_Lib +begin + +text \ + \label{c:monads} + + State monads are used extensively in the seL4 specification. They are defined below.\ + +section "The Monad" + +text \ + The basic type of the nondeterministic state monad with failure is + very similar to the normal state monad. Instead of a pair consisting + of result and new state, we return a set of these pairs coupled with + a failure flag. Each element in the set is a potential result of the + computation. The flag is @{const True} if there is an execution path + in the computation that may have failed. Conversely, if the flag is + @{const False}, none of the computations resulting in the returned + set can have failed.\ +type_synonym ('s, 'a) nondet_monad = "'s \ ('a \ 's) set \ bool" + + +text \ + Print the type @{typ "('s,'a) nondet_monad"} instead of its unwieldy expansion. + Needs an AST translation in code, because it needs to check that the state variable + @{typ 's} occurs twice. This comparison is not guaranteed to always work as expected + (AST instances might have different decoration), but it does seem to work here.\ +print_ast_translation \ + let + fun monad_tr _ [t1, Ast.Appl [Ast.Constant @{type_syntax prod}, + Ast.Appl [Ast.Constant @{type_syntax set}, + Ast.Appl [Ast.Constant @{type_syntax prod}, t2, t3]], + Ast.Constant @{type_syntax bool}]] = + if t3 = t1 + then Ast.Appl [Ast.Constant @{type_syntax "nondet_monad"}, t1, t2] + else raise Match + in [(@{type_syntax "fun"}, monad_tr)] end +\ + + +text \ + The definition of fundamental monad functions @{text return} and + @{text bind}. The monad function @{text "return x"} does not change + the state, does not fail, and returns @{text "x"}.\ +definition return :: "'a \ ('s,'a) nondet_monad" where + "return a \ \s. ({(a,s)},False)" + +text \ + The monad function @{text "bind f g"}, also written @{text "f >>= g"}, + is the execution of @{term f} followed by the execution of @{text g}. + The function @{text g} takes the result value \emph{and} the result + state of @{text f} as parameter. The definition says that the result of + the combined operation is the union of the set of sets that is created + by @{text g} applied to the result sets of @{text f}. The combined + operation may have failed, if @{text f} may have failed or @{text g} may + have failed on any of the results of @{text f}.\ +definition bind :: + "('s, 'a) nondet_monad \ ('a \ ('s, 'b) nondet_monad) \ ('s, 'b) nondet_monad" (infixl ">>=" 60) + where + "bind f g \ \s. (\(fst ` case_prod g ` fst (f s)), + True \ snd ` case_prod g ` fst (f s) \ snd (f s))" + +text \Sometimes it is convenient to write @{text bind} in reverse order.\ +abbreviation (input) bind_rev :: + "('c \ ('a, 'b) nondet_monad) \ ('a, 'c) nondet_monad \ ('a, 'b) nondet_monad" (infixl "=<<" 60) + where + "g =<< f \ f >>= g" + +text \ + The basic accessor functions of the state monad. @{text get} returns + the current state as result, does not fail, and does not change the state. + @{text "put s"} returns nothing (@{typ unit}), changes the current state + to @{text s} and does not fail.\ +definition get :: "('s,'s) nondet_monad" where + "get \ \s. ({(s,s)}, False)" + +definition put :: "'s \ ('s, unit) nondet_monad" where + "put s \ \_. ({((),s)}, False)" + + +subsection "Nondeterminism" + +text \ + Basic nondeterministic functions. @{text "select A"} chooses an element + of the set @{text A}, does not change the state, and does not fail + (even if the set is empty). @{text "f \ g"} executes @{text f} or + executes @{text g}. It retuns the union of results of @{text f} and + @{text g}, and may have failed if either may have failed.\ +definition select :: "'a set \ ('s,'a) nondet_monad" where + "select A \ \s. (A \ {s}, False)" + +definition alternative :: + "('s, 'a) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad" (infixl "\" 20) + where + "f \ g \ \s. (fst (f s) \ fst (g s), snd (f s) \ snd (g s))" + +text \ + A variant of @{text select} that takes a pair. The first component is a set + as in normal @{text select}, the second component indicates whether the + execution failed. This is useful to lift monads between different state + spaces.\ +definition select_f :: "'a set \ bool \ ('s,'a) nondet_monad" where + "select_f S \ \s. (fst S \ {s}, snd S)" + +text \ + @{text state_select} takes a relationship between states, and outputs + nondeterministically a state related to the input state. Fails if no such + state exists.\ +definition state_select :: "('s \ 's) set \ ('s, unit) nondet_monad" where + "state_select r \ \s. ((\x. ((), x)) ` {s'. (s, s') \ r}, \ (\s'. (s, s') \ r))" + + +subsection "Failure" + +text \ + The monad function that always fails. Returns an empty set of results and sets the failure flag.\ +definition fail :: "('s, 'a) nondet_monad" where + "fail \ \s. ({}, True)" + +text \Assertions: fail if the property @{text P} is not true\ +definition assert :: "bool \ ('a, unit) nondet_monad" where + "assert P \ if P then return () else fail" + +text \Fail if the value is @{const None}, return result @{text v} for @{term "Some v"}\ +definition assert_opt :: "'a option \ ('b, 'a) nondet_monad" where + "assert_opt v \ case v of None \ fail | Some v \ return v" + +text \An assertion that also can introspect the current state.\ +definition state_assert :: "('s \ bool) \ ('s, unit) nondet_monad" where + "state_assert P \ get >>= (\s. assert (P s))" + +subsection "Generic functions on top of the state monad" + +text \Apply a function to the current state and return the result without changing the state.\ +definition gets :: "('s \ 'a) \ ('s, 'a) nondet_monad" where + "gets f \ get >>= (\s. return (f s))" + +text \Modify the current state using the function passed in.\ +definition modify :: "('s \ 's) \ ('s, unit) nondet_monad" where + "modify f \ get >>= (\s. put (f s))" + +lemma simpler_gets_def: + "gets f = (\s. ({(f s, s)}, False))" + by (simp add: gets_def return_def bind_def get_def) + +lemma simpler_modify_def: + "modify f = (\s. ({((), f s)}, False))" + by (simp add: modify_def bind_def get_def put_def) + +text \Execute the given monad when the condition is true, return @{text "()"} otherwise.\ +definition "when" :: "bool \ ('s, unit) nondet_monad \ ('s, unit) nondet_monad" where + "when P m \ if P then m else return ()" + +text \Execute the given monad unless the condition is true, return @{text "()"} otherwise.\ +definition unless :: "bool \ ('s, unit) nondet_monad \ ('s, unit) nondet_monad" where + "unless P m \ when (\P) m" + +text \ + Perform a test on the current state, performing the left monad if + the result is true or the right monad if the result is false. \ +definition condition :: + "('s \ bool) \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad" + where + "condition P L R \ \s. if (P s) then (L s) else (R s)" + +notation (output) + condition ("(condition (_)// (_)// (_))" [1000,1000,1000] 1000) + +text \ + Apply an option valued function to the current state, fail if it returns @{const None}, + return @{text v} if it returns @{term "Some v"}.\ +definition gets_the :: "('s \ 'a option) \ ('s, 'a) nondet_monad" where + "gets_the f \ gets f >>= assert_opt" + +text \ + Get a map (such as a heap) from the current state and apply an argument to the map. + Fail if the map returns @{const None}, otherwise return the value.\ +definition gets_map :: "('s \ 'a \ 'b option) \ 'a \ ('s, 'b) nondet_monad" where + "gets_map f p \ gets f >>= (\m. assert_opt (m p))" + + +subsection \The Monad Laws\ + +text \An alternative definition of @{term bind}, sometimes more convenient.\ +lemma bind_def': + "(f >>= g) \ + \s. ({(r'', s''). \(r', s') \ fst (f s). (r'', s'') \ fst (g r' s') }, + snd (f s) \ (\(r', s') \ fst (f s). snd (g r' s')))" + by (rule eq_reflection) + (auto simp add: bind_def split_def Let_def) + +text \Each monad satisfies at least the following three laws.\ + +text \@{term return} is absorbed at the left of a @{term bind}, applying the return value directly:\ +lemma return_bind[simp]: + "(return x >>= f) = f x" + by (simp add: return_def bind_def) + +text \@{term return} is absorbed on the right of a @{term bind}\ +lemma bind_return[simp]: + "(m >>= return) = m" + by (simp add: bind_def return_def split_def) + +text \@{term bind} is associative\ +lemma bind_assoc: + fixes m :: "('a,'b) nondet_monad" + fixes f :: "'b \ ('a,'c) nondet_monad" + fixes g :: "'c \ ('a,'d) nondet_monad" + shows "(m >>= f) >>= g = m >>= (\x. f x >>= g)" + unfolding bind_def Let_def split_def + by (auto intro: rev_image_eqI) + + +section \Adding Exceptions\ + +text \ + The type @{typ "('s,'a) nondet_monad"} gives us nondeterminism and + failure. We now extend this monad with exceptional return values + that abort normal execution, but can be handled explicitly. + We use the sum type to indicate exceptions. + + In @{typ "('s, 'e + 'a) nondet_monad"}, @{typ "'s"} is the state, + @{typ 'e} is an exception, and @{typ 'a} is a normal return value. + + This new type itself forms a monad again. Since type classes in + Isabelle are not powerful enough to express the class of monads, + we provide new names for the @{term return} and @{term bind} functions + in this monad. We call them @{text returnOk} (for normal return values) + and @{text bindE} (for composition). We also define @{text throwError} + to return an exceptional value.\ +definition returnOk :: "'a \ ('s, 'e + 'a) nondet_monad" where + "returnOk \ return o Inr" + +definition throwError :: "'e \ ('s, 'e + 'a) nondet_monad" where + "throwError \ return o Inl" + +text \ + Lifting a function over the exception type: if the input is an + exception, return that exception; otherwise continue execution.\ +definition lift :: "('a \ ('s, 'e + 'b) nondet_monad) \ 'e +'a \ ('s, 'e + 'b) nondet_monad" where + "lift f v \ case v of Inl e \ throwError e | Inr v' \ f v'" + +text \ + The definition of @{term bind} in the exception monad (new + name @{text bindE}): the same as normal @{term bind}, but + the right-hand side is skipped if the left-hand side + produced an exception.\ +definition bindE :: + "('s, 'e + 'a) nondet_monad \ ('a \ ('s, 'e + 'b) nondet_monad) \ ('s, 'e + 'b) nondet_monad" + (infixl ">>=E" 60) where + "f >>=E g \ f >>= lift g" + +text \ + Lifting a normal nondeterministic monad into the + exception monad is achieved by always returning its + result as normal result and never throwing an exception.\ +definition liftE :: "('s,'a) nondet_monad \ ('s, 'e+'a) nondet_monad" where + "liftE f \ f >>= (\r. return (Inr r))" + +text \ + Since the underlying type and @{text return} function changed, + we need new definitions for when and unless:\ +definition whenE :: "bool \ ('s, 'e + unit) nondet_monad \ ('s, 'e + unit) nondet_monad" where + "whenE P f \ if P then f else returnOk ()" + +definition unlessE :: "bool \ ('s, 'e + unit) nondet_monad \ ('s, 'e + unit) nondet_monad" where + "unlessE P f \ if P then returnOk () else f" + +text \ + Throwing an exception when the parameter is @{term None}, otherwise + returning @{term "v"} for @{term "Some v"}.\ +definition throw_opt :: "'e \ 'a option \ ('s, 'e + 'a) nondet_monad" where + "throw_opt ex x \ case x of None \ throwError ex | Some v \ returnOk v" + +text \ + Failure in the exception monad is redefined in the same way + as @{const whenE} and @{const unlessE}, with @{term returnOk} + instead of @{term return}.\ +definition assertE :: "bool \ ('a, 'e + unit) nondet_monad" where + "assertE P \ if P then returnOk () else fail" + + +subsection "Monad Laws for the Exception Monad" + +text \More direct definition of @{const liftE}:\ +lemma liftE_def2: + "liftE f = (\s. ((\(v,s'). (Inr v, s')) ` fst (f s), snd (f s)))" + by (auto simp: liftE_def return_def split_def bind_def) + +text \Left @{const returnOk} absorbtion over @{term bindE}:\ +lemma returnOk_bindE[simp]: "(returnOk x >>=E f) = f x" + unfolding bindE_def returnOk_def + by (clarsimp simp: lift_def) + +lemma lift_return[simp]: + "lift (return \ Inr) = return" + by (auto simp: lift_def throwError_def split: sum.splits) + +text \Right @{const returnOk} absorbtion over @{term bindE}:\ +lemma bindE_returnOk[simp]: + "(m >>=E returnOk) = m" + by (simp add: bindE_def returnOk_def) + +text \Associativity of @{const bindE}:\ +lemma bindE_assoc: + "(m >>=E f) >>=E g = m >>=E (\x. f x >>=E g)" + unfolding bindE_def + by (fastforce simp: bind_assoc lift_def throwError_def + split: sum.splits + intro: arg_cong[where f="\x. m >>= x"]) + +text \@{const returnOk} could also be defined via @{const liftE}:\ +lemma returnOk_liftE: + "returnOk x = liftE (return x)" + by (simp add: liftE_def returnOk_def) + +text \Execution after throwing an exception is skipped:\ +lemma throwError_bindE[simp]: + "(throwError E >>=E f) = throwError E" + by (simp add: bindE_def bind_def throwError_def lift_def return_def) + + +section "Syntax" + +text \This section defines traditional Haskell-like do-syntax + for the state monad in Isabelle.\ + +subsection "Syntax for the Nondeterministic State Monad" + +text \ + We use @{text K_bind} to syntactically indicate the case where the return argument + of the left side of a @{term bind} is ignored\ +definition K_bind :: "'a \ 'b \ 'a" where + K_bind_def[iff]: "K_bind \ \x y. x" + +nonterminal + dobinds and dobind and nobind + +syntax + "_dobind" :: "[pttrn, 'a] => dobind" ("(_ <-/ _)" 10) + "" :: "dobind => dobinds" ("_") + "_nobind" :: "'a => dobind" ("_") + "_dobinds" :: "[dobind, dobinds] => dobinds" ("(_);//(_)") + + "_do" :: "[dobinds, 'a] => 'a" ("(do ((_);//(_))//od)" 100) +syntax (xsymbols) + "_dobind" :: "[pttrn, 'a] => dobind" ("(_ \/ _)" 10) + +translations + "_do (_dobinds b bs) e" == "_do b (_do bs e)" + "_do (_nobind b) e" == "b >>= (CONST K_bind e)" + "do x <- a; e od" == "a >>= (\x. e)" + +text \Syntax examples:\ +lemma "do x \ return 1; + return (2::nat); + return x + od = + return 1 >>= + (\x. return (2::nat) >>= + K_bind (return x))" + by (rule refl) + +lemma "do x \ return 1; + return 2; + return x + od = return 1" + by simp + +subsection "Syntax for the Exception Monad" + +text \ + Since the exception monad is a different type, we need to distinguish it in the syntax + if we want to avoid ambiguous terms. We use @{text doE}/@{text odE} for this, but can + re-use most of the productions from @{text do}/@{text od} above. \ +syntax + "_doE" :: "[dobinds, 'a] => 'a" ("(doE ((_);//(_))//odE)" 100) + +translations + "_doE (_dobinds b bs) e" == "_doE b (_doE bs e)" + "_doE (_nobind b) e" == "b >>=E (CONST K_bind e)" + "doE x <- a; e odE" == "a >>=E (\x. e)" + +text \Syntax examples:\ +lemma "doE x \ returnOk 1; + returnOk (2::nat); + returnOk x + odE = + returnOk 1 >>=E + (\x. returnOk (2::nat) >>=E + K_bind (returnOk x))" + by (rule refl) + +lemma "doE x \ returnOk 1; + returnOk 2; + returnOk x + odE = returnOk 1" + by simp + + +section "Library of additional Monadic Functions and Combinators" + +text \Lifting a normal function into the monad type:\ +definition liftM :: "('a \ 'b) \ ('s,'a) nondet_monad \ ('s, 'b) nondet_monad" where + "liftM f m \ do x \ m; return (f x) od" + +text \The same for the exception monad:\ +definition liftME :: "('a \ 'b) \ ('s,'e+'a) nondet_monad \ ('s,'e+'b) nondet_monad" where + "liftME f m \ doE x \ m; returnOk (f x) odE" + +text \Execute @{term f} for @{term "Some x"}, otherwise do nothing.\ +definition maybeM :: "('a \ ('s, unit) nondet_monad) \ 'a option \ ('s, unit) nondet_monad" where + "maybeM f y \ case y of Some x \ f x | None \ return ()" + +text \Run a sequence of monads from left to right, ignoring return values.\ +definition sequence_x :: "('s, 'a) nondet_monad list \ ('s, unit) nondet_monad" where + "sequence_x xs \ foldr (\x y. x >>= (\_. y)) xs (return ())" + +text \ + Map a monadic function over a list by applying it to each element + of the list from left to right, ignoring return values.\ +definition mapM_x :: "('a \ ('s,'b) nondet_monad) \ 'a list \ ('s, unit) nondet_monad" where + "mapM_x f xs \ sequence_x (map f xs)" + +text \ + Map a monadic function with two parameters over two lists, + going through both lists simultaneously, left to right, ignoring + return values.\ +definition zipWithM_x :: + "('a \ 'b \ ('s,'c) nondet_monad) \ 'a list \ 'b list \ ('s, unit) nondet_monad" + where + "zipWithM_x f xs ys \ sequence_x (zipWith f xs ys)" + +text \ + The same three functions as above, but returning a list of + return values instead of @{text unit}\ +definition sequence :: "('s, 'a) nondet_monad list \ ('s, 'a list) nondet_monad" where + "sequence xs \ let mcons = (\p q. p >>= (\x. q >>= (\y. return (x#y)))) + in foldr mcons xs (return [])" + +definition mapM :: "('a \ ('s,'b) nondet_monad) \ 'a list \ ('s, 'b list) nondet_monad" where + "mapM f xs \ sequence (map f xs)" + +definition zipWithM :: + "('a \ 'b \ ('s,'c) nondet_monad) \ 'a list \ 'b list \ ('s, 'c list) nondet_monad" + where + "zipWithM f xs ys \ sequence (zipWith f xs ys)" + +definition foldM :: + "('b \ 'a \ ('s, 'a) nondet_monad) \ 'b list \ 'a \ ('s, 'a) nondet_monad" + where + "foldM m xs a \ foldr (\p q. q >>= m p) xs (return a) " + +definition foldME :: + "('b \ 'a \ ('s,('e + 'b)) nondet_monad) \ 'b \ 'a list \ ('s, ('e + 'b)) nondet_monad" + where + "foldME m a xs \ foldr (\p q. q >>=E swp m p) xs (returnOk a)" + +text \ + The sequence and map functions above for the exception monad, with and without + lists of return value\ +definition sequenceE_x :: "('s, 'e+'a) nondet_monad list \ ('s, 'e+unit) nondet_monad" where + "sequenceE_x xs \ foldr (\x y. doE _ <- x; y odE) xs (returnOk ())" + +definition mapME_x :: "('a \ ('s,'e+'b) nondet_monad) \ 'a list \ ('s,'e+unit) nondet_monad" where + "mapME_x f xs \ sequenceE_x (map f xs)" + +definition sequenceE :: "('s, 'e+'a) nondet_monad list \ ('s, 'e+'a list) nondet_monad" where + "sequenceE xs \ let mcons = (\p q. p >>=E (\x. q >>=E (\y. returnOk (x#y)))) + in foldr mcons xs (returnOk [])" + +definition mapME :: + "('a \ ('s,'e+'b) nondet_monad) \ 'a list \ ('s,'e+'b list) nondet_monad" + where + "mapME f xs \ sequenceE (map f xs)" + +text \Filtering a list using a monadic function as predicate:\ +primrec filterM :: "('a \ ('s, bool) nondet_monad) \ 'a list \ ('s, 'a list) nondet_monad" where + "filterM P [] = return []" +| "filterM P (x # xs) = do + b <- P x; + ys <- filterM P xs; + return (if b then (x # ys) else ys) + od" + +text \An alternative definition of @{term state_select}\ +lemma state_select_def2: + "state_select r \ (do + s \ get; + S \ return {s'. (s, s') \ r}; + assert (S \ {}); + s' \ select S; + put s' + od)" + apply (clarsimp simp add: state_select_def get_def return_def assert_def fail_def select_def + put_def bind_def fun_eq_iff + intro!: eq_reflection) + apply fastforce + done + + +section "Catching and Handling Exceptions" + +text \ + Turning an exception monad into a normal state monad + by catching and handling any potential exceptions:\ +definition catch :: + "('s, 'e + 'a) nondet_monad \ ('e \ ('s, 'a) nondet_monad) \ ('s, 'a) nondet_monad" + (infix "" 10) where + "f handler \ + do x \ f; + case x of + Inr b \ return b + | Inl e \ handler e + od" + +text \ + Handling exceptions, but staying in the exception monad. + The handler may throw a type of exceptions different from + the left side.\ +definition handleE' :: + "('s, 'e1 + 'a) nondet_monad \ ('e1 \ ('s, 'e2 + 'a) nondet_monad) \ ('s, 'e2 + 'a) nondet_monad" + (infix "" 10) where + "f handler \ + do + v \ f; + case v of + Inl e \ handler e + | Inr v' \ return (Inr v') + od" + +text \ + A type restriction of the above that is used more commonly in + practice: the exception handle (potentially) throws exception + of the same type as the left-hand side.\ +definition handleE :: + "('s, 'x + 'a) nondet_monad \ ('x \ ('s, 'x + 'a) nondet_monad) \ ('s, 'x + 'a) nondet_monad" + (infix "" 10) where + "handleE \ handleE'" + +text \ + Handling exceptions, and additionally providing a continuation + if the left-hand side throws no exception:\ +definition handle_elseE :: + "('s, 'e + 'a) nondet_monad \ ('e \ ('s, 'ee + 'b) nondet_monad) \ + ('a \ ('s, 'ee + 'b) nondet_monad) \ ('s, 'ee + 'b) nondet_monad" ("_ _ _" 10) + where + "f handler continue \ + do v \ f; + case v of Inl e \ handler e + | Inr v' \ continue v' + od" + +subsection "Loops" + +text \ + Loops are handled using the following inductive predicate; + non-termination is represented using the failure flag of the + monad.\ + +inductive_set whileLoop_results :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) nondet_monad) \ ((('r \ 's) option) \ (('r \ 's) option)) set" + for C B where + "\ \ C r s \ \ (Some (r, s), Some (r, s)) \ whileLoop_results C B" + | "\ C r s; snd (B r s) \ \ (Some (r, s), None) \ whileLoop_results C B" + | "\ C r s; (r', s') \ fst (B r s); (Some (r', s'), z) \ whileLoop_results C B \ + \ (Some (r, s), z) \ whileLoop_results C B" + +inductive_cases whileLoop_results_cases_valid: "(Some x, Some y) \ whileLoop_results C B" +inductive_cases whileLoop_results_cases_fail: "(Some x, None) \ whileLoop_results C B" +inductive_simps whileLoop_results_simps: "(Some x, y) \ whileLoop_results C B" +inductive_simps whileLoop_results_simps_valid: "(Some x, Some y) \ whileLoop_results C B" +inductive_simps whileLoop_results_simps_start_fail[simp]: "(None, x) \ whileLoop_results C B" + +inductive whileLoop_terminates :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) nondet_monad) \ 'r \ 's \ bool" + for C B where + "\ C r s \ whileLoop_terminates C B r s" + | "\ C r s; \(r', s') \ fst (B r s). whileLoop_terminates C B r' s' \ + \ whileLoop_terminates C B r s" + +inductive_cases whileLoop_terminates_cases: "whileLoop_terminates C B r s" +inductive_simps whileLoop_terminates_simps: "whileLoop_terminates C B r s" + +definition whileLoop :: + "('a \ 'b \ bool) \ ('a \ ('b, 'a) nondet_monad) \ 'a \ ('b, 'a) nondet_monad" + where + "whileLoop C B \ \r s. + ({(r',s'). (Some (r, s), Some (r', s')) \ whileLoop_results C B}, + (Some (r, s), None) \ whileLoop_results C B \ \whileLoop_terminates C B r s)" + +notation (output) + whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000) + +definition whileLoopE :: + "('r \ 's \ bool) \ ('r \ ('s, 'e + 'r) nondet_monad) \ 'r \ 's \ (('e + 'r) \ 's) set \ bool" + where + "whileLoopE C body \ + \r. whileLoop (\r s. (case r of Inr v \ C v s | _ \ False)) (lift body) (Inr r)" + +notation (output) + whileLoopE ("(whileLoopE (_)// (_))" [1000, 1000] 1000) + + +section "Combinators that have conditions with side effects" + +definition notM :: "('s, bool) nondet_monad \ ('s, bool) nondet_monad" where + "notM m = do c \ m; return (\ c) od" + +definition whileM :: + "('s, bool) nondet_monad \ ('s, 'a) nondet_monad \ ('s, unit) nondet_monad" + where + "whileM C B \ do + c \ C; + whileLoop (\c s. c) (\_. do B; C od) c; + return () + od" + +definition ifM :: + "('s, bool) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad" + where + "ifM test t f = do + c \ test; + if c then t else f + od" + +definition ifME :: + "('a, 'b + bool) nondet_monad \ ('a, 'b + 'c) nondet_monad \ ('a, 'b + 'c) nondet_monad + \ ('a, 'b + 'c) nondet_monad" + where + "ifME test t f = doE + c \ test; + if c then t else f + odE" + +definition whenM :: + "('s, bool) nondet_monad \ ('s, unit) nondet_monad \ ('s, unit) nondet_monad" + where + "whenM t m = ifM t m (return ())" + +definition orM :: + "('s, bool) nondet_monad \ ('s, bool) nondet_monad \ ('s, bool) nondet_monad" + where + "orM a b = ifM a (return True) b" + +definition andM :: + "('s, bool) nondet_monad \ ('s, bool) nondet_monad \ ('s, bool) nondet_monad" + where + "andM a b = ifM a b (return False)" + +end diff --git a/lib/MonadEq.thy b/lib/Monads/nondet/Nondet_MonadEq.thy similarity index 87% rename from lib/MonadEq.thy rename to lib/Monads/nondet/Nondet_MonadEq.thy index fff338c739..b73a1df9e2 100644 --- a/lib/MonadEq.thy +++ b/lib/Monads/nondet/Nondet_MonadEq.thy @@ -19,8 +19,10 @@ * * are added to the "monad_eq" set. *) -theory MonadEq -imports NonDetMonadVCG +theory Nondet_MonadEq + imports + Nondet_In_Monad + Nondet_VCG begin (* Setup "monad_eq" attributes. *) @@ -52,24 +54,22 @@ method_setup monad_eq = \ Method.sections Clasimp.clasimp_modifiers >> (K (SIMPLE_METHOD o monad_eq_tac))\ "prove equality on monads" -lemma monad_eq_simp_state [monad_eq]: +lemma monad_eq_simp_state[monad_eq]: "((A :: ('s, 'a) nondet_monad) s = B s') = ((\r t. (r, t) \ fst (A s) \ (r, t) \ fst (B s')) \ (\r t. (r, t) \ fst (B s') \ (r, t) \ fst (A s)) \ (snd (A s) = snd (B s')))" - apply (auto intro!: set_eqI prod_eqI) - done + by (auto intro!: set_eqI prod_eqI) -lemma monad_eq_simp [monad_eq]: +lemma monad_eq_simp[monad_eq]: "((A :: ('s, 'a) nondet_monad) = B) = ((\r t s. (r, t) \ fst (A s) \ (r, t) \ fst (B s)) \ (\r t s. (r, t) \ fst (B s) \ (r, t) \ fst (A s)) \ (\x. snd (A x) = snd (B x)))" - apply (auto intro!: set_eqI prod_eqI) - done + by (auto intro!: set_eqI prod_eqI) -declare in_monad [monad_eq] -declare in_bindE [monad_eq] +declare in_monad[monad_eq] +declare in_bindE[monad_eq] (* Test *) lemma "returnOk 3 = liftE (return 3)" diff --git a/lib/Monads/nondet/Nondet_MonadEq_Lemmas.thy b/lib/Monads/nondet/Nondet_MonadEq_Lemmas.thy new file mode 100644 index 0000000000..51f8052402 --- /dev/null +++ b/lib/Monads/nondet/Nondet_MonadEq_Lemmas.thy @@ -0,0 +1,232 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Lemmas that support the monad_eq method. Either things that go into the[monad_eq] set + or that are needed for such lemmas. Usually about "snd (f s)" or "x \ fst (f s)" where f + is a nondet monad. + + If you are planning to use the monad_eq method, this is the theory you should import. + + See Nondet_MonadEq.thy for definition and description of the method. *) + +theory Nondet_MonadEq_Lemmas + imports Nondet_MonadEq +begin + +lemma snd_return[monad_eq]: + "\ snd (return a b)" + unfolding return_def by simp + +lemma snd_throwError[monad_eq]: + "\ snd (throwError e s)" + unfolding throwError_def by (simp add: snd_return) + +lemma snd_lift_Inr[monad_eq]: + "snd (lift b (Inr r) t) = snd (b r t)" + unfolding lift_def by simp + +lemma snd_lift_Inl[monad_eq]: + "\ snd (lift b (Inl r) t)" + unfolding lift_def by (simp add: snd_throwError) + +lemma snd_fail[monad_eq]: + "snd (fail s)" + by (clarsimp simp: fail_def) + +lemma not_snd_bindD: + "\ \ snd ((a >>= b) s); (rv, s') \ fst (a s) \ \ \ snd (a s) \ \ snd (b rv s')" + by (fastforce simp: bind_def) + +lemma not_snd_bindI1: + "\ snd ((a >>= b) s) \ \ snd (a s)" + by (fastforce simp: bind_def) + +lemma not_snd_bindI2: + "\ \ snd ((a >>= b) s); (rv, s') \ fst (a s) \ \ \ snd (b rv s')" + by (fastforce simp: bind_def) + +lemma in_returns[monad_eq]: + "(r, s) \ fst (return r s)" + "(Inr r, s) \ fst (returnOk r s)" + by (simp add: in_monad)+ + +lemma fst_return: + "fst (return v s) = {(v, s)}" + by (simp add: return_def) + +lemma in_bind_split[monad_eq]: + "(rv \ fst ((f >>= g) s)) = (\rv'. rv' \ fst (f s) \ rv \ fst (g (fst rv') (snd rv')))" + by (cases rv) (fastforce simp: in_bind) + +lemma Inr_in_liftE_simp[monad_eq]: + "((Inr rv, x) \ fst (liftE fn s)) = ((rv, x) \ fst (fn s))" + by (simp add: in_monad) + +lemma gets_the_member: + "(x, s') \ fst (gets_the f s) = (f s = Some x \ s' = s)" + by (cases "f s"; simp add: gets_the_def simpler_gets_def bind_def in_assert_opt) + +lemma fst_throwError_returnOk: + "fst (throwError e s) = {(Inl e, s)}" + "fst (returnOk v s) = {(Inr v, s)}" + by (simp add: throwError_def returnOk_def return_def)+ + +lemma not_snd_bindE_I1: + "\ snd ((a >>=E b) s) \ \ snd (a s)" + unfolding bindE_def + by (erule not_snd_bindI1) + +lemma snd_assert[monad_eq]: + "snd (assert P s) = (\ P)" + by (clarsimp simp: fail_def return_def assert_def) + +lemma not_snd_assert : + "(\ snd (assert P s)) = P" + by (metis snd_assert) + +lemma snd_assert_opt[monad_eq]: + "snd (assert_opt f s) = (f = None)" + by (monad_eq simp: assert_opt_def split: option.splits) + +declare in_assert_opt[monad_eq] + +lemma not_snd_bindD': + "\\ snd ((a >>= b) s); \ snd (a s) \ (rv, s') \ fst (a s)\ \ \ snd (a s) \ \ snd (b rv s')" + by (metis not_snd_bindI1 not_snd_bindI2) + +lemma snd_bind[monad_eq]: + "snd ((a >>= b) s) = (snd (a s) \ (\r s'. (r, s') \ fst (a s) \ snd (b r s')))" + apply (clarsimp simp add: bind_def Bex_def image_def) + apply (subst surjective_pairing, subst prod.inject, force) + done + +lemma in_lift[monad_eq]: + "(rv, s') \ fst (lift M v s) = + (case v of Inl x \ rv = Inl x \ s' = s + | Inr x \ (rv, s') \ fst (M x s))" + by (clarsimp simp: lift_def throwError_def return_def split: sum.splits) + +lemma snd_lift[monad_eq]: + "snd (lift M a b) = (\x. a = Inr x \ snd (M x b))" + by (clarsimp simp: lift_def throwError_def return_def split: sum.splits) + +lemma snd_bindE[monad_eq]: + "snd ((a >>=E b) s) = (snd (a s) \ (\r s'. (r, s') \ fst (a s) \ (\a. r = Inr a \ snd (b a s'))))" + unfolding bindE_def + by monad_eq + +lemma snd_get[monad_eq]: + "snd (get s) = False" + by (simp add: get_def) + +lemma snd_gets[monad_eq]: + "snd (gets f s) = False" + by (simp add: gets_def snd_bind snd_get snd_return) + +lemma in_handleE'[monad_eq]: + "((rv, s') \ fst ((f g) s)) = + ((\ex. rv = Inr ex \ (Inr ex, s') \ fst (f s)) \ + (\rv' s''. (rv, s') \ fst (g rv' s'') \ (Inl rv', s'') \ fst (f s)))" + unfolding handleE'_def return_def + by (simp add: in_bind_split) (fastforce split: sum.splits) + +lemma in_handleE[monad_eq]: + "(a, b) \ fst ((A B) s) = + ((\x. a = Inr x \ (Inr x, b) \ fst (A s)) \ + (\r t. (Inl r, t) \ fst (A s) \ (a, b) \ fst (B r t)))" + unfolding handleE_def + by (monad_eq split: sum.splits) blast + +lemma snd_handleE'[monad_eq]: + "snd ((A B) s) = (snd (A s) \ (\r s'. (r, s')\fst (A s) \ (\a. r = Inl a \ snd (B a s'))))" + unfolding handleE'_def + by (monad_eq simp: Bex_def split: sum.splits) fastforce + +lemma snd_handleE[monad_eq]: + "snd ((A B) s) = (snd (A s) \ (\r s'. (r, s')\fst (A s) \ (\a. r = Inl a \ snd (B a s'))))" + unfolding handleE_def + by (rule snd_handleE') + +lemma snd_liftM[monad_eq, simp]: + "snd (liftM t f s) = snd (f s)" + by (auto simp: liftM_def bind_def return_def) + +declare in_liftE[monad_eq] + +lemma snd_liftE[monad_eq]: + "snd ((liftE x) s) = snd (x s)" + by (clarsimp simp: liftE_def snd_bind snd_return) + +lemma snd_returnOk[monad_eq]: + "\ snd (returnOk x s)" + by (clarsimp simp: returnOk_def return_def) + +lemma snd_when[monad_eq]: + "snd (when P M s) = (P \ snd (M s))" + by (clarsimp simp: when_def return_def) + +lemma in_condition[monad_eq]: + "(a, b) \ fst (condition C L R s) = ((C s \ (a, b) \ fst (L s)) \ (\C s \ (a, b) \ fst (R s)))" + by (rule condition_split) + +lemma snd_condition[monad_eq]: + "snd (condition C L R s) = ((C s \ snd (L s)) \ (\C s \ snd (R s)))" + by (rule condition_split) + +declare snd_fail [simp] + +declare snd_returnOk [simp, monad_eq] + +lemma in_catch[monad_eq]: + "(r, t) \ fst ((M E) s) + = ((Inr r, t) \ fst (M s) \ (\r' s'. ((Inl r', s') \ fst (M s)) \ (r, t) \ fst (E r' s')))" + apply (rule iffI; clarsimp simp: catch_def in_bind in_return split: sum.splits) + apply (metis sumE) + apply fastforce + done + +lemma snd_catch[monad_eq]: + "snd ((M E) s) + = (snd (M s) \ (\r' s'. ((Inl r', s') \ fst (M s)) \ snd (E r' s')))" + by (force simp: catch_def snd_bind snd_return split: sum.splits) + +declare in_get[monad_eq] + +lemma returnOk_cong: + "\ \s. B a s = B' a s \ \ ((returnOk a) >>=E B) = ((returnOk a) >>=E B')" + by monad_eq + +lemma in_state_assert [monad_eq, simp]: + "(rv, s') \ fst (state_assert P s) = (rv = () \ s' = s \ P s)" + by (monad_eq simp: state_assert_def) + metis + +lemma snd_state_assert[monad_eq]: + "snd (state_assert P s) = (\ P s)" + by (monad_eq simp: state_assert_def Bex_def) + +lemma in_select[monad_eq]: + "(rv, s') \ fst (select S s) = (s' = s \ rv \ S)" + by (fastforce simp: select_def) + +lemma snd_select[monad_eq]: + "\ snd (select S s)" + by (clarsimp simp: select_def) + +lemma snd_put[monad_eq]: + "\ snd (put t s)" + by (clarsimp simp: put_def) + +lemma snd_modify[monad_eq]: + "\ snd (modify t s)" + by (clarsimp simp: modify_def put_def get_def bind_def) + +lemma snd_gets_the[monad_eq]: + "snd (gets_the X s) = (X s = None)" + by (monad_eq simp: gets_the_def gets_def get_def) + +end diff --git a/lib/Monads/nondet/Nondet_Monad_Equations.thy b/lib/Monads/nondet/Nondet_Monad_Equations.thy new file mode 100644 index 0000000000..ecc5e7c57a --- /dev/null +++ b/lib/Monads/nondet/Nondet_Monad_Equations.thy @@ -0,0 +1,593 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Equations between monads. Conclusions of the form "f = g" where f and g are monads. + Should not be Hoare triples (those go into a different theory). *) + +theory Nondet_Monad_Equations + imports + Nondet_Empty_Fail + Nondet_No_Fail + Nondet_MonadEq_Lemmas +begin + +lemmas assertE_assert = assertE_liftE + +lemma assert_def2: + "assert v = assert_opt (if v then Some () else None)" + by (cases v; simp add: assert_def assert_opt_def) + +lemma return_returnOk: + "return (Inr x) = returnOk x" + unfolding returnOk_def by simp + +lemma exec_modify: + "(modify f >>= g) s = g () (f s)" + by (simp add: bind_def simpler_modify_def) + +lemma bind_return_eq: + "(a >>= return) = (b >>= return) \ a = b" + by clarsimp + +lemmas bind_then_eq = arg_cong2[where f=bind, OF _ refl] + +lemma bindE_bind_linearise: + "((f >>=E g) >>= h) = + (f >>= case_sum (h o Inl) (\rv. g rv >>= h))" + apply (simp add: bindE_def bind_assoc) + apply (rule ext, rule bind_apply_cong, rule refl) + apply (simp add: lift_def throwError_def split: sum.split) + done + +lemma throwError_bind: + "(throwError e >>= f) = (f (Inl e))" + by (simp add: throwError_def) + +lemma bind_bindE_assoc: + "((f >>= g) >>=E h) + = f >>= (\rv. g rv >>=E h)" + by (simp add: bindE_def bind_assoc) + +lemma returnOk_bind: + "returnOk v >>= f = (f (Inr v))" + by (simp add: returnOk_def) + +lemma liftE_bind: + "(liftE m >>= m') = (m >>= (\rv. m' (Inr rv)))" + by (simp add: liftE_def) + +lemma catch_throwError: "catch (throwError ft) g = g ft" + by (simp add: catch_def throwError_bind) + +lemma cart_singleton_image: + "S \ {s} = (\v. (v, s)) ` S" + by auto + +lemma select_bind_eq2: + "\ v = v'; \x. x \ fst v \ f x s = g x s' \ \ + (select_f v >>= f) s = (select_f v' >>= g) s'" + by (simp add: select_f_def bind_def split_def + cart_singleton_image image_image + cong: image_cong) + +lemmas select_bind_eq = select_bind_eq2[OF refl] + +lemma select_f_singleton_return: + "select_f ({v}, False) = return v" + by (simp add: select_f_def return_def) + +lemma select_f_returns: + "select_f (return v s) = return (v, s)" + "select_f (get s) = return (s, s)" + "select_f (gets f s) = return (f s, s)" + "select_f (modify g s) = return ((), g s)" + by (simp add: select_f_def return_def get_def + simpler_gets_def simpler_modify_def)+ + +lemma select_eq_select_f: + "select S = select_f (S, False)" + by (simp add: select_def select_f_def) + +lemma select_f_select_f: + "select_f (select_f v s) = liftM (swp Pair s) (select_f v)" + apply (rule ext) + apply (simp add: select_f_def liftM_def swp_def + bind_def return_def split_def + image_image image_constant_conv) + apply fastforce + done + +lemma select_f_select: + "select_f (select S s) = liftM (swp Pair s) (select S)" + unfolding select_eq_select_f by (rule select_f_select_f) + +lemmas select_f_selects = select_f_select_f select_f_select + +lemma select_f_asserts: + "select_f (fail s) = fail" + "select_f (assert P s) = do assert P; return ((), s) od" + "select_f (assert_opt v s) = do v' \ assert_opt v; return (v', s) od" + by (simp add: select_f_def fail_def assert_def return_def bind_def + assert_opt_def split: if_split option.split)+ + +lemma liftE_bindE_handle: + "((liftE f >>=E (\x. g x)) h) + = f >>= (\x. g x h)" + by (simp add: liftE_bindE handleE_def handleE'_def + bind_assoc) + +lemma catch_liftE: + "catch (liftE g) h = g" + by (simp add: catch_def liftE_def) + +lemma catch_liftE_bindE: + "catch (liftE g >>=E (\x. f x)) h = g >>= (\x. catch (f x) h)" + by (simp add: liftE_bindE catch_def bind_assoc) + +lemma returnOk_catch_bind: + "catch (returnOk v) h >>= g = g v" + by (simp add: returnOk_liftE catch_liftE) + +lemma liftE_bindE_assoc: + "(liftE f >>=E g) >>= h = f >>= (\x. g x >>= h)" + by (simp add: liftE_bindE bind_assoc) + +lemma unlessE_throw_catch_If: + "catch (unlessE P (throwError e) >>=E f) g + = (if P then catch (f ()) g else g e)" + by (simp add: unlessE_def catch_throwError split: if_split) + +lemma whenE_bindE_throwError_to_if: + "whenE P (throwError e) >>=E (\_. b) = (if P then (throwError e) else b)" + unfolding whenE_def bindE_def + by (auto simp: lift_def throwError_def returnOk_def) + +lemma alternative_liftE_returnOk: + "(liftE m \ returnOk v) = liftE (m \ return v)" + by (simp add: liftE_def alternative_def returnOk_def bind_def return_def) + +lemma alternative_left_readonly_bind: + "\ \(=) s\ f \\rv. (=) s\; fst (f s) \ {} \ + \ alternative (f >>= (\x. g x)) h s + = (f >>= (\x. alternative (g x) h)) s" + apply (subgoal_tac "\x \ fst (f s). snd x = s") + apply (clarsimp simp: alternative_def bind_def split_def) + apply fastforce + apply clarsimp + apply (drule(1) use_valid, simp_all) + done + +lemma gets_the_return: + "(return x = gets_the f) = (\s. f s = Some x)" + apply (subst fun_eq_iff) + apply (simp add: return_def gets_the_def exec_gets + assert_opt_def fail_def + split: option.split) + apply auto + done + +lemma gets_the_returns: + "(return x = gets_the f) = (\s. f s = Some x)" + "(returnOk x = gets_the g) = (\s. g s = Some (Inr x))" + "(throwError x = gets_the h) = (\s. h s = Some (Inl x))" + by (simp_all add: returnOk_def throwError_def + gets_the_return) + +lemma gets_the_eq_bind: + "\ f = gets_the (fn_f o fn'); \rv. g rv = gets_the (fn_g rv o fn') \ + \ \fn. (f >>= g) = gets_the (fn o fn')" + apply clarsimp + apply (rule exI[where x="\s. case (fn_f s) of None \ None | Some v \ fn_g v s"]) + apply (simp add: gets_the_def bind_assoc exec_gets + assert_opt_def fun_eq_iff + split: option.split) + done + +lemma gets_the_eq_bindE: + "\ f = gets_the (fn_f o fn'); \rv. g rv = gets_the (fn_g rv o fn') \ + \ \fn. (f >>=E g) = gets_the (fn o fn')" + unfolding bindE_def + apply (erule gets_the_eq_bind[where fn_g="\rv s. case rv of Inl e \ Some (Inl e) | Inr v \ fn_g v s"]) + apply (simp add: lift_def gets_the_returns split: sum.split) + done + +lemma gets_the_fail: + "(fail = gets_the f) = (\s. f s = None)" + by (simp add: gets_the_def exec_gets assert_opt_def + fail_def return_def fun_eq_iff + split: option.split) + +lemma gets_the_asserts: + "(fail = gets_the f) = (\s. f s = None)" + "(assert P = gets_the g) = (\s. g s = (if P then Some () else None))" + "(assertE P = gets_the h) = (\s. h s = (if P then Some (Inr ()) else None))" + by (simp add: assert_def assertE_def gets_the_fail gets_the_returns + split: if_split)+ + +lemma ex_const_function: + "\f. \s. f (f' s) = v" + by force + +lemma gets_the_condsE: + "(\fn. whenE P f = gets_the (fn o fn')) + = (P \ (\fn. f = gets_the (fn o fn')))" + "(\fn. unlessE P g = gets_the (fn o fn')) + = (\ P \ (\fn. g = gets_the (fn o fn')))" + by (simp add: whenE_def unlessE_def gets_the_returns ex_const_function + split: if_split)+ + +lemma let_into_return: + "(let f = x in m f) = (do f \ return x; m f od)" + by simp + +lemma liftME_return: + "liftME f (returnOk v) = returnOk (f v)" + by (simp add: liftME_def) + +lemma fold_bindE_into_list_case: + "(doE v \ f; case_list (g v) (h v) x odE) + = (case_list (doE v \ f; g v odE) (\x xs. doE v \ f; h v x xs odE) x)" + by (simp split: list.split) + +lemma whenE_liftE: + "whenE P (liftE f) = liftE (when P f)" + by (simp add: whenE_def when_def returnOk_liftE) + +lemma whenE_whenE_body: + "whenE P (throwError f) >>=E (\_. whenE Q (throwError f) >>=E r) = whenE (P \ Q) (throwError f) >>=E r" + apply (cases P) + apply (simp add: whenE_def) + apply simp + done + +lemma whenE_whenE_same: + "whenE P (throwError f) >>=E (\_. whenE P (throwError g) >>=E r) = whenE P (throwError f) >>=E r" + apply (cases P) + apply (simp add: whenE_def) + apply simp + done + +lemma exec_select_f_singleton: + "(select_f ({v},False) >>= f) = f v" + by (simp add: select_f_def bind_def) + +lemma maybe_fail_bind_fail: + "unless P fail >>= (\_. fail) = fail" + "when P fail >>= (\_. fail) = fail" + by (clarsimp simp: bind_def fail_def return_def + unless_def when_def)+ + +lemma select_singleton[simp]: + "select {x} = return x" + by (simp add: select_def return_def) + +lemma return_modify: + "return () = modify id" + by (simp add: return_def simpler_modify_def) + +lemma liftE_liftM_liftME: + "liftE (liftM f m) = liftME f (liftE m)" + by (simp add: liftE_liftM liftME_liftM liftM_def) + +lemma bind_return_unit: + "f = (f >>= (\x. return ()))" + by simp + +lemma modify_id_return: + "modify id = return ()" + by (simp add: simpler_modify_def return_def) + +lemma liftE_bind_return_bindE_returnOk: + "liftE (v >>= (\rv. return (f rv))) + = (liftE v >>=E (\rv. returnOk (f rv)))" + by (simp add: liftE_bindE, simp add: liftE_def returnOk_def) + +lemma bind_eqI: + "g = g' \ f >>= g = f >>= g'" by simp + +lemma unlessE_throwError_returnOk: + "(if P then returnOk v else throwError x) + = (unlessE P (throwError x) >>=E (\_. returnOk v))" + by (cases P, simp_all add: unlessE_def) + +lemma gets_the_bind_eq: + "\ f s = Some x; g x s = h s \ + \ (gets_the f >>= g) s = h s" + by (simp add: gets_the_def bind_assoc exec_gets assert_opt_def) + +lemma zipWithM_x_modify: + "zipWithM_x (\a b. modify (f a b)) as bs + = modify (\s. foldl (\s (a, b). f a b s) s (zip as bs))" + apply (simp add: zipWithM_x_def zipWith_def sequence_x_def) + apply (induct ("zip as bs")) + apply (simp add: simpler_modify_def return_def) + apply (rule ext) + apply (simp add: simpler_modify_def bind_def split_def) + done + +lemma bind_return_subst: + assumes r: "\r. \\s. P x = r\ f x \\rv s. Q rv = r\" + shows + "do a \ f x; + g (Q a) + od = + do _ \ f x; + g (P x) + od" +proof - + have "do a \ f x; + return (Q a) + od = + do _ \ f x; + return (P x) + od" + using r + apply (subst fun_eq_iff) + apply (fastforce simp: bind_def valid_def return_def) + done + hence "do a \ f x; + return (Q a) + od >>= g = + do _ \ f x; + return (P x) + od >>= g" + by (rule bind_cong, simp) + thus ?thesis + by simp +qed + +lemma assert2: + "(do v1 \ assert P; v2 \ assert Q; c od) + = (do v \ assert (P \ Q); c od)" + by (simp add: assert_def split: if_split) + +lemma assert_opt_def2: + "assert_opt v = (do assert (v \ None); return (the v) od)" + by (simp add: assert_opt_def split: option.split) + +lemma gets_assert: + "(do v1 \ assert v; v2 \ gets f; c v1 v2 od) + = (do v2 \ gets f; v1 \ assert v; c v1 v2 od)" + by (simp add: simpler_gets_def return_def assert_def fail_def bind_def + split: if_split) + +lemma modify_assert: + "(do v2 \ modify f; v1 \ assert v; c v1 od) + = (do v1 \ assert v; v2 \ modify f; c v1 od)" + by (simp add: simpler_modify_def return_def assert_def fail_def bind_def + split: if_split) + +lemma gets_fold_into_modify: + "do x \ gets f; modify (g x) od = modify (\s. g (f s) s)" + "do x \ gets f; _ \ modify (g x); h od + = do modify (\s. g (f s) s); h od" + by (simp_all add: fun_eq_iff modify_def bind_assoc exec_gets + exec_get exec_put) + +lemma gets_return_gets_eq: + "gets f >>= (\g. return (h g)) = gets (\s. h (f s))" + by (simp add: simpler_gets_def bind_def return_def) + +lemma gets_prod_comp: + "gets (case x of (a, b) \ f a b) = (case x of (a, b) \ gets (f a b))" + by (auto simp: split_def) + +lemma bind_assoc2: + "(do x \ a; _ \ b; c x od) = (do x \ (do x' \ a; _ \ b; return x' od); c x od)" + by (simp add: bind_assoc) + +lemma bind_assoc_return_reverse: + "do x \ f; + _ \ g x; + h x + od = + do x \ do x \ f; + _ \ g x; + return x + od; + h x + od" + by (simp only: bind_assoc return_bind) + +lemma if_bind: + "(if P then (a >>= (\_. b)) else return ()) = + (if P then a else return ()) >>= (\_. if P then b else return ())" + by (cases P; simp) + +lemma bind_liftE_distrib: "(liftE (A >>= (\x. B x))) = (liftE A >>=E (\x. liftE (\s. B x s)))" + by (clarsimp simp: liftE_def bindE_def lift_def bind_assoc) + +lemma condition_apply_cong: + "\ c s = c' s'; s = s'; \s. c' s \ l s = l' s ; \s. \ c' s \ r s = r' s \ \ condition c l r s = condition c' l' r' s'" + by monad_eq + +lemma condition_cong [cong, fundef_cong]: + "\ c = c'; \s. c' s \ l s = l' s; \s. \ c' s \ r s = r' s \ \ condition c l r = condition c' l' r'" + by monad_eq + +lemma lift_Inr [simp]: "(lift X (Inr r)) = (X r)" + by monad_eq + +lemma lift_Inl [simp]: "lift C (Inl a) = throwError a" + by monad_eq + +lemma returnOk_def2: "returnOk a = return (Inr a)" + by monad_eq + +lemma liftE_fail[simp]: "liftE fail = fail" + by monad_eq + +lemma catch_bind_distrib: + "do _ <- m h; f od = (doE m; liftE f odE (\x. do h x; f od))" + by (force simp: catch_def bindE_def bind_assoc liftE_def lift_def bind_def + split_def return_def throwError_def + split: sum.splits) + +lemma if_catch_distrib: + "((if P then f else g) h) = (if P then f h else g h)" + by (simp split: if_split) + +lemma will_throw_and_catch: + "f = throwError e \ (f (\_. g)) = g" + by (simp add: catch_def throwError_def) + +lemma catch_is_if: + "(doE x <- f; g x odE h) = + do + rv <- f; + if sum.isl rv then h (projl rv) else g (projr rv) h + od" + apply (simp add: bindE_def catch_def bind_assoc cong: if_cong) + apply (rule bind_cong, rule refl) + apply (clarsimp simp: lift_def throwError_def split: sum.splits) + done + +lemma liftE_K_bind: "liftE ((K_bind (\s. A s)) x) = K_bind (liftE (\s. A s)) x" + by clarsimp + +lemma monad_eq_split: + assumes "\r s. Q r s \ f r s = f' r s" + "\P\ g \\r s. Q r s\" + "P s" + shows "(g >>= f) s = (g >>= f') s" +proof - + have pre: "\rv s'. \(rv, s') \ fst (g s)\ \ f rv s' = f' rv s'" + using assms unfolding valid_def apply - + by (erule allE[where x=s]) auto + show ?thesis + by (simp add: bind_def image_def case_prod_unfold pre) +qed + +lemma monad_eq_split2: + assumes eq: " g' s = g s" + assumes tail:"\r s. Q r s \ f r s = f' r s" + and hoare: "\P\ g \\r s. Q r s\" "P s" + shows "(g >>= f) s = (g' >>= f') s" +proof - + have pre: "\aa bb. \(aa, bb) \ fst (g s)\ \ Q aa bb" + using hoare by (auto simp: valid_def) + show ?thesis + by (simp add:bind_def eq image_def case_prod_unfold pre surjective_pairing tail) +qed + +lemma monad_eq_split_tail: + "\f = g; a s = b s\ \ (a >>= f) s = ((b >>= g) s)" + by (simp add:bind_def) + +lemma double_gets_drop_regets: + "(do x \ gets f; + xa \ gets f; + m xa x + od) = + (do xa \ gets f; + m xa xa + od)" + by monad_eq + +lemma bind_inv_inv_comm_weak: + "\ \s. \(=) s\ f \\_. (=) s\; \s. \(=) s\ g \\_. (=) s\; + empty_fail f; empty_fail g \ \ + do x \ f; y \ g; n od = do y \ g; x \ f; n od" + apply (rule ext) + apply (fastforce simp: bind_def valid_def empty_fail_def split_def image_def) + done + +lemma state_assert_false[simp]: + "state_assert (\_. False) = fail" + by monad_eq + +lemma condition_fail_rhs: + "condition C X fail = (state_assert C >>= (\_. X))" + by (monad_eq simp: Bex_def) + +lemma condition_swap: + "condition C A B = condition (\s. \ C s) B A" + by monad_eq auto + +lemma condition_fail_lhs: + "condition C fail X = (state_assert (\s. \ C s) >>= (\_. X))" + by (metis condition_fail_rhs condition_swap) + +lemma condition_bind_fail[simp]: + "(condition C A B >>= (\_. fail)) = condition C (A >>= (\_. fail)) (B >>= (\_. fail))" + by monad_eq blast + +lemma bind_fail_propagates: + "empty_fail A \ A >>= (\_. fail) = fail" + by (monad_eq simp: empty_fail_def) fastforce + +lemma simple_bind_fail [simp]: + "(state_assert X >>= (\_. fail)) = fail" + "(modify M >>= (\_. fail)) = fail" + "(return X >>= (\_. fail)) = fail" + "(gets X >>= (\_. fail)) = fail" + by (auto intro!: bind_fail_propagates) + +lemma bind_inv_inv_comm: + "\ \P. \P\ f \\_. P\; \P. \P\ g \\_. P\; + empty_fail f; empty_fail g \ \ + do x \ f; y \ g; n x y od = do y \ g; x \ f; n x y od" + apply (rule ext) + apply (rule trans[where s="(do (x, y) \ do x \ f; y \ (\_. g s) ; (\_. return (x, y) s) od; + n x y od) s" for s]) + apply (simp add: bind_assoc) + apply (intro bind_apply_cong, simp_all)[1] + apply (metis in_inv_by_hoareD) + apply (simp add: return_def bind_def) + apply (metis in_inv_by_hoareD) + apply (rule trans[where s="(do (x, y) \ do y \ g; x \ (\_. f s) ; (\_. return (x, y) s) od; + n x y od) s" for s, rotated]) + apply (simp add: bind_assoc) + apply (intro bind_apply_cong, simp_all)[1] + apply (metis in_inv_by_hoareD) + apply (simp add: return_def bind_def) + apply (metis in_inv_by_hoareD) + apply (rule bind_apply_cong, simp_all) + apply (clarsimp simp: bind_def split_def return_def) + apply (auto | drule(1) empty_failD3)+ + done + +lemma bind_known_operation_eq: + "\ no_fail P f; \Q\ f \\rv s. rv = x \ s = t\; P s; Q s; empty_fail f \ + \ (f >>= g) s = g x t" + apply (drule(1) no_failD) + apply (subgoal_tac "fst (f s) = {(x, t)}") + apply (clarsimp simp: bind_def) + apply (fastforce simp: valid_def empty_fail_def) + done + +lemma assert_opt_If: + "assert_opt v = If (v = None) fail (return (the v))" + by (simp add: assert_opt_def split: option.split) + +lemma if_to_top_of_bind: + "(bind (If P x y) z) = If P (bind x z) (bind y z)" + by (simp split: if_split) + +lemma if_to_top_of_bindE: + "(bindE (If P x y) z) = If P (bindE x z) (bindE y z)" + by (simp split: if_split) + +lemma modify_modify: + "(do x \ modify f; modify (g x) od) = modify (g () o f)" + by (simp add: bind_def simpler_modify_def) + +lemmas modify_modify_bind = + arg_cong2[where f=bind, OF modify_modify refl, simplified bind_assoc] + +lemma put_then_get[unfolded K_bind_def]: + "do put s; get od = do put s; return s od" + by (simp add: put_def bind_def get_def return_def) + +lemmas put_then_get_then = + put_then_get[THEN bind_then_eq, simplified bind_assoc return_bind] + +lemma select_empty_bind[simp]: + "select {} >>= f = select {}" + by (simp add: select_def bind_def) + +end \ No newline at end of file diff --git a/lib/Monads/nondet/Nondet_More_VCG.thy b/lib/Monads/nondet/Nondet_More_VCG.thy new file mode 100644 index 0000000000..664efd4d99 --- /dev/null +++ b/lib/Monads/nondet/Nondet_More_VCG.thy @@ -0,0 +1,744 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Partial correctness Hoare logic lemmas over the nondet monad. Hoare triples, lifting lemmas, etc. + If it doesn't contain a Hoare triple it likely doesn't belong in here. *) + +theory Nondet_More_VCG + imports + Nondet_VCG + Nondet_In_Monad +begin + +lemma hoare_take_disjunct: + "\P\ f \\rv s. P' rv s \ (False \ P'' rv s)\ + \ \P\ f \P''\" + by (erule hoare_strengthen_post, simp) + +lemma hoare_post_add: + "\P\ S \\r s. R r s \ Q r s\ \ \P\ S \Q\" + by (erule hoare_strengthen_post, simp) + +lemma hoare_post_addE: + "\P\ f \\_ s. R s \ Q s\, \T\ \ \P\ f \\_ s. Q s\, \T\" + by (erule hoare_strengthen_postE; simp) + +lemma hoare_pre_add: + "(\s. P s \ R s) \ (\P\ f \Q\ \ \P and R\ f \Q\)" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule hoare_weaken_pre, assumption, clarsimp) + +lemma hoare_pre_addE: + "(\s. P s \ R s) \ (\P\ f \Q\, \S\ \ \P and R\ f \Q\, \S\)" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule hoare_weaken_preE, assumption, clarsimp) + +lemma hoare_name_pre_state: + "\ \s. P s \ \(=) s\ f \Q\ \ \ \P\ f \Q\" + by (clarsimp simp: valid_def) + +lemma hoare_name_pre_stateE: + "\\s. P s \ \(=) s\ f \Q\, \E\\ \ \P\ f \Q\, \E\" + by (clarsimp simp: validE_def2) + +lemma hoare_vcg_if_lift_strong: + "\ \P'\ f \P\; \\s. \ P' s\ f \\rv s. \ P rv s\; \Q'\ f \Q\; \R'\ f \R\ \ \ + \\s. if P' s then Q' s else R' s\ f \\rv s. if P rv s then Q rv s else R rv s\" + + "\ \P'\ f \P\; \\s. \ P' s\ f \\rv s. \ P rv s\; \Q'\ f \ Q\; \R'\ f \R\ \ \ + \\s. if P' s then Q' s else R' s\ f \\rv s. (if P rv s then Q rv else R rv) s\" + by (wpsimp wp: hoare_vcg_imp_lift' | assumption | fastforce)+ + +lemma hoare_vcg_imp_lift_pre_add: + "\ \P and Q\ f \\rv s. R rv s\; f \\s. \ Q s\ \ \ \P\ f \\rv s. Q s \ R rv s\" + apply (rule hoare_weaken_pre) + apply (rule hoare_vcg_imp_lift') + apply fastforce + apply fastforce + apply (clarsimp simp: pred_conj_def valid_def) + done + +lemma hoare_pre_tautI: + "\ \A and P\ a \B\; \A and not P\ a \B\ \ \ \A\ a \B\" + by (fastforce simp: valid_def split_def pred_conj_def pred_neg_def) + +lemma hoare_lift_Pf_pre_conj: + assumes P: "\x. \\s. Q x s\ m \P x\" + assumes f: "\P. \\s. P (g s) \ R s\ m \\_ s. P (f s)\" + shows "\\s. Q (g s) s \ R s\ m \\rv s. P (f s) rv s\" + apply (clarsimp simp: valid_def) + apply (rule use_valid [OF _ P], simp) + apply (rule use_valid [OF _ f], simp, simp) + done + +lemmas hoare_lift_Pf4 = hoare_lift_Pf_pre_conj[where R=\, simplified] +lemmas hoare_lift_Pf3 = hoare_lift_Pf4[where f=f and g=f for f] +lemmas hoare_lift_Pf2 = hoare_lift_Pf3[where P="\f _. P f" for P] +lemmas hoare_lift_Pf = hoare_lift_Pf2[where Q=P and P=P for P] + +lemmas hoare_lift_Pf3_pre_conj = hoare_lift_Pf_pre_conj[where f=f and g=f for f] +lemmas hoare_lift_Pf2_pre_conj = hoare_lift_Pf3_pre_conj[where P="\f _. P f" for P] +lemmas hoare_lift_Pf_pre_conj' = hoare_lift_Pf2_pre_conj[where Q=P and P=P for P] + +lemma hoare_if_r_and: + "\P\ f \\r. if R r then Q r else Q' r\ + = \P\ f \\r s. (R r \ Q r s) \ (\R r \ Q' r s)\" + by (fastforce simp: valid_def) + +lemma hoare_convert_imp: + "\ \\s. \ P s\ f \\rv s. \ Q s\; \R\ f \S\ \ + \ \\s. P s \ R s\ f \\rv s. Q s \ S rv s\" + apply (simp only: imp_conv_disj) + apply (erule(1) hoare_vcg_disj_lift) + done + +lemma hoare_vcg_ex_lift_R: + "\ \v. \P v\ f \Q v\,- \ \ \\s. \v. P v s\ f \\rv s. \v. Q v rv s\,-" + apply (simp add: validE_R_def validE_def) + apply (rule hoare_strengthen_post, erule hoare_vcg_ex_lift) + apply (auto split: sum.split) + done + +lemma hoare_case_option_wpR: + "\\P\ f None \Q\,-; \x. \P' x\ f (Some x) \Q' x\,-\ + \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\,-" + by (cases v) auto + +lemma hoare_vcg_conj_liftE_R: + "\ \P\ f \P'\,-; \Q\ f \Q'\,- \ \ \P and Q\ f \\rv s. P' rv s \ Q' rv s\, -" + apply (simp add: validE_R_def validE_def valid_def split: sum.splits) + apply blast + done + +lemma K_valid[wp]: + "\K P\ f \\_. K P\" + by (simp add: valid_def) + +lemma hoare_exI_tuple: + "\P\ f \\(rv,rv') s. Q x rv rv' s\ \ \P\ f \\(rv,rv') s. \x. Q x rv rv' s\" + by (fastforce simp: valid_def) + +lemma hoare_ex_all: + "(\x. \P x\ f \Q\) = \\s. \x. P x s\ f \Q\" + apply (rule iffI) + apply (fastforce simp: valid_def)+ + done + +lemma hoare_imp_eq_substR: + "\P\ f \Q\,- \ \P\ f \\rv s. rv = x \ Q x s\,-" + by (fastforce simp add: valid_def validE_R_def validE_def split: sum.splits) + +lemma hoare_split_bind_case_sum: + assumes x: "\rv. \R rv\ g rv \Q\" + "\rv. \S rv\ h rv \Q\" + assumes y: "\P\ f \S\,\R\" + shows "\P\ f >>= case_sum g h \Q\" + apply (rule bind_wp[OF _ y[unfolded validE_def]]) + apply (wpsimp wp: x split: sum.splits) + done + +lemma hoare_split_bind_case_sumE: + assumes x: "\rv. \R rv\ g rv \Q\,\E\" + "\rv. \S rv\ h rv \Q\,\E\" + assumes y: "\P\ f \S\,\R\" + shows "\P\ f >>= case_sum g h \Q\,\E\" + apply (unfold validE_def) + apply (rule bind_wp[OF _ y[unfolded validE_def]]) + apply (wpsimp wp: x[unfolded validE_def] split: sum.splits) + done + +lemma assertE_sp: + "\P\ assertE Q \\rv s. Q \ P s\,\E\" + by (clarsimp simp: assertE_def) wp + +lemma throwErrorE_E [wp]: + "\Q e\ throwError e -, \Q\" + by (simp add: validE_E_def) wp + +lemma gets_inv [simp]: + "\ P \ gets f \ \r. P \" + by (simp add: gets_def, wp) + +lemma select_inv: + "\ P \ select S \ \r. P \" + by wpsimp + +lemmas return_inv = hoare_return_drop_var + +lemma assert_inv: "\P\ assert Q \\r. P\" + unfolding assert_def + by (cases Q) simp+ + +lemma assert_opt_inv: "\P\ assert_opt Q \\r. P\" + unfolding assert_opt_def + by (cases Q) simp+ + +lemma case_options_weak_wp: + "\ \P\ f \Q\; \x. \P'\ g x \Q\ \ \ \P and P'\ case opt of None \ f | Some x \ g x \Q\" + apply (cases opt) + apply (clarsimp elim!: hoare_weaken_pre) + apply (rule hoare_weaken_pre [where Q=P']) + apply simp+ + done + +lemma case_option_wp_None_return: + assumes [wp]: "\x. \P' x\ f x \\_. Q\" + shows "\\x s. (Q and P x) s \ P' x s \ + \ \Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ return () | Some x \ f x) + \\_. Q\" + by (cases opt; wpsimp) + +lemma case_option_wp_None_returnOk: + assumes [wp]: "\x. \P' x\ f x \\_. Q\,\E\" + shows "\\x s. (Q and P x) s \ P' x s \ + \ \Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ returnOk () | Some x \ f x) + \\_. Q\,\E\" + by (cases opt; wpsimp) + +lemma list_cases_weak_wp: + assumes "\P_A\ a \Q\" + assumes "\x xs. \P_B\ b x xs \Q\" + shows + "\P_A and P_B\ + case ts of + [] \ a + | x#xs \ b x xs + \Q\" + apply (cases ts) + apply (simp, rule hoare_weaken_pre, rule assms, simp)+ + done + +lemmas hoare_FalseE_R = hoare_FalseE[where E="\\", folded validE_R_def] + +lemma hoare_vcg_if_lift2: + "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\ \ + \R\ f \\rv s. if P rv s then X rv s else Y rv s\" + + "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\ \ + \R\ f \\rv. if P' rv then X rv else Y rv\" + by (auto simp: valid_def split_def) + +lemma hoare_vcg_if_lift_ER: (* Required because of lack of rv in lifting rules *) + "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\, - \ + \R\ f \\rv s. if P rv s then X rv s else Y rv s\, -" + + "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\, - \ + \R\ f \\rv. if P' rv then X rv else Y rv\, -" + by (auto simp: valid_def validE_R_def validE_def split_def) + +lemma hoare_list_all_lift: + "(\r. r \ set xs \ \Q r\ f \\rv. Q r\) + \ \\s. list_all (\r. Q r s) xs\ f \\rv s. list_all (\r. Q r s) xs\" + apply (induct xs; simp) + apply wpsimp + apply (rule hoare_vcg_conj_lift; simp) + done + +lemma undefined_valid: "\\\ undefined \Q\" + by (rule hoare_pre_cont) + +lemma assertE_wp: + "\\s. F \ Q () s\ assertE F \Q\,\E\" + apply (rule hoare_pre) + apply (unfold assertE_def) + apply wp + apply simp + done + +lemma doesn't_grow_proof: + assumes y: "\s. finite (S s)" + assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" + shows "\\s. card (S s) < n \ P s\ f \\rv s. card (S s) < n\" + apply (clarsimp simp: valid_def) + apply (erule le_less_trans[rotated]) + apply (rule card_mono[OF y]) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_valid[OF _ x, OF _ conjI]) + apply simp + done + +lemma hoare_vcg_propE_R: + "\\s. P\ f \\rv s. P\, -" + by (simp add: validE_R_def validE_def valid_def split_def split: sum.split) + +lemma set_preserved_proof: + assumes y: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" + assumes x: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" + shows "\\s. Q s \ P (S s)\ f \\rv s. P (S s)\" + apply (clarsimp simp: valid_def) + by (metis (mono_tags, lifting) equalityI post_by_hoare subsetI x y) + +lemma set_shrink_proof: + assumes x: "\x. \\s. x \ S s\ f \\rv s. x \ S s\" + shows + "\\s. \S'. S' \ S s \ P S'\ + f + \\rv s. P (S s)\" + apply (clarsimp simp: valid_def) + apply (drule spec, erule mp) + apply (clarsimp simp: subset_iff) + apply (rule ccontr) + apply (drule(1) use_valid [OF _ x]) + apply simp + done + +lemma shrinks_proof: + assumes y: "\s. finite (S s)" + assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" + assumes z: "\P\ f \\rv s. x \ S s\" + assumes w: "\s. P s \ x \ S s" + shows "\\s. card (S s) \ n \ P s\ f \\rv s. card (S s) < n\" + apply (clarsimp simp: valid_def) + apply (erule less_le_trans[rotated]) + apply (rule psubset_card_mono[OF y]) + apply (rule psubsetI) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_valid[OF _ x, OF _ conjI]) + apply simp + by (metis use_valid w z) + +lemma use_validE_R: + "\ (Inr r, s') \ fst (f s); \P\ f \Q\,-; P s \ \ Q r s'" + unfolding validE_R_def validE_def + by (frule(2) use_valid, simp) + +lemma valid_preservation_ex: + assumes x: "\x P. \\s. P (f s x :: 'b)\ m \\rv s. P (f s x)\" + shows "\\s. P (f s :: 'a \ 'b)\ m \\rv s. P (f s)\" + apply (clarsimp simp: valid_def) + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (erule use_valid [OF _ x]) + apply simp + done + +lemma whenE_inv: + assumes a: "\P\ f \\_. P\" + shows "\P\ whenE Q f \\_. P\" + by (wpsimp wp: a) + +lemma whenE_throwError_wp: + "\\s. \ P \ Q s\ whenE P (throwError e) \\_. Q\, \\\\" + by wpsimp + +lemma ifM_throwError_returnOk: + "\Q\ test \\c s. \ c \ P s\ \ \Q\ ifM test (throwError e) (returnOk ()) \\_. P\, -" + unfolding ifM_def + apply (fold liftE_bindE) + apply wpsimp + apply assumption + apply simp + done + +lemma ifME_liftE: + "ifME (liftE test) a b = ifM test a b" + by (simp add: ifME_def ifM_def liftE_bindE) + +lemma gets_the_inv: "\P\ gets_the V \\rv. P\" by wpsimp + +lemma select_f_inv: + "\P\ select_f S \\_. P\" + by (simp add: select_f_def valid_def) + +lemmas state_unchanged = in_inv_by_hoareD [THEN sym] + +lemma validI: + assumes rl: "\s r s'. \ P s; (r, s') \ fst (S s) \ \ Q r s'" + shows "\P\ S \Q\" + unfolding valid_def using rl by safe + +lemma opt_return_pres_lift: + assumes x: "\v. \P\ f v \\rv. P\" + shows "\P\ case x of None \ return () | Some v \ f v \\rv. P\" + by (wpsimp wp: x) + +lemma valid_return_unit: + "\P\ f >>= (\_. return ()) \\r. Q\ \ \P\ f \\r. Q\" + by (auto simp: valid_def in_bind in_return Ball_def) + +lemma hoare_weak_lift_imp_conj: + "\ \Q\ m \Q'\; \R\ m \R'\ \ + \ \\s. (P \ Q s) \ R s\ m \\rv s. (P \ Q' rv s) \ R' rv s\" + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_weak_lift_imp) + apply assumption+ + done + +lemma hoare_eq_P: + assumes "\P. \P\ f \\_. P\" + shows "\(=) s\ f \\_. (=) s\" + by (rule assms) + +lemma hoare_validE_R_conj: + "\\P\ f \Q\, -; \P\ f \R\, -\ \ \P\ f \Q and R\, -" + by (simp add: valid_def validE_def validE_R_def Let_def split_def split: sum.splits) + +lemmas throwError_validE_R = throwError_wp [where E="\\", folded validE_R_def] + +lemma valid_case_option_post_wp: + "\\x. \P x\ f \\rv. Q x\\ \ + \\s. case ep of Some x \ P x s | _ \ True\ + f + \\rv s. case ep of Some x \ Q x s | _ \ True\" + by (cases ep, simp_all add: hoare_vcg_prop) + +lemma P_bool_lift: + assumes t: "\Q\ f \\r. Q\" + assumes f: "\\s. \Q s\ f \\r s. \Q s\" + shows "\\s. P (Q s)\ f \\r s. P (Q s)\" + apply (clarsimp simp: valid_def) + apply (rule back_subst[where P=P], assumption) + apply (rule iffI) + apply (erule (1) use_valid [OF _ t]) + apply (rule classical) + apply (drule (1) use_valid [OF _ f]) + apply simp + done + +lemmas fail_inv = hoare_fail_any[where Q="\_. P" and P=P for P] + +lemma gets_sp: "\P\ gets f \\rv. P and (\s. f s = rv)\" + by (wp, simp) + +lemma hoare_Ball_helper: + assumes x: "\x. \P x\ f \Q x\" + assumes y: "\P. \\s. P (S s)\ f \\rv s. P (S s)\" + shows "\\s. \x \ S s. P x s\ f \\rv s. \x \ S s. Q x rv s\" + apply (clarsimp simp: valid_def) + apply (drule bspec, erule back_subst[where P="\A. x\A" for x]) + apply (erule post_by_hoare[OF y, rotated]) + apply (rule refl) + apply (erule (1) post_by_hoare[OF x]) + done + +lemma handy_prop_divs: + assumes x: "\P. \\s. P (Q s) \ S s\ f \\rv s. P (Q' rv s)\" + "\P. \\s. P (R s) \ S s\ f \\rv s. P (R' rv s)\" + shows "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" + "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" + apply (clarsimp simp: valid_def + elim!: subst[rotated, where P=P]) + apply (rule use_valid [OF _ x(1)], assumption) + apply (rule use_valid [OF _ x(2)], assumption) + apply simp + apply (clarsimp simp: valid_def + elim!: subst[rotated, where P=P]) + apply (rule use_valid [OF _ x(1)], assumption) + apply (rule use_valid [OF _ x(2)], assumption) + apply simp + done + +lemma hoare_as_subst: + "\ \P. \\s. P (fn s)\ f \\rv s. P (fn s)\; + \v :: 'a. \P v\ f \Q v\ \ \ + \\s. P (fn s) s\ f \\rv s. Q (fn s) rv s\" + by (rule hoare_lift_Pf3) + +lemmas hoare_vcg_ball_lift = hoare_vcg_const_Ball_lift + +lemma hoare_set_preserved: + assumes x: "\x. \fn' x\ m \\rv. fn x\" + shows "\\s. set xs \ {x. fn' x s}\ m \\rv s. set xs \ {x. fn x s}\" + apply (induct xs) + apply simp + apply wp + apply simp + apply (rule hoare_vcg_conj_lift) + apply (rule x) + apply assumption + done + +lemma hoare_ex_pre: (* safe, unlike hoare_vcg_ex_lift *) + "(\x. \P x\ f \Q\) \ \\s. \x. P x s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_ex_pre_conj: + "\\x. \\s. P x s \ P' s\ f \Q\\ + \ \\s. (\x. P x s) \ P' s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_conj_lift_inv: + "\\P\ f \Q\; \\s. P' s \ I s\ f \\rv. I\; + \s. P s \ P' s\ + \ \\s. P s \ I s\ f \\rv s. Q rv s \ I s\" + by (fastforce simp: valid_def) + +lemma hoare_in_monad_post: + assumes x: "\P. \P\ f \\x. P\" + shows "\\\ f \\rv s. (rv, s) \ fst (f s)\" + apply (clarsimp simp: valid_def) + apply (rule back_subst[where P="\s. x\fst (f s)" for x], assumption) + apply (simp add: state_unchanged[OF x]) + done + +lemma list_case_throw_validE_R: + "\ \y ys. xs = y # ys \ \P\ f y ys \Q\,- \ \ + \P\ case xs of [] \ throwError e | x # xs \ f x xs \Q\,-" + apply (cases xs, simp_all) + apply wp + done + +lemma validE_R_sp: + assumes x: "\P\ f \Q\,-" + assumes y: "\x. \Q x\ g x \R\,-" + shows "\P\ f >>=E (\x. g x) \R\,-" + by (rule hoare_pre, wp x y, simp) + +lemma valid_set_take_helper: + "\P\ f \\rv s. \x \ set (xs rv s). Q x rv s\ + \ \P\ f \\rv s. \x \ set (take (n rv s) (xs rv s)). Q x rv s\" + apply (erule hoare_strengthen_post) + apply (clarsimp dest!: in_set_takeD) + done + +lemma whenE_throwError_sp: + "\P\ whenE Q (throwError e) \\rv s. \ Q \ P s\, -" + apply (simp add: whenE_def validE_R_def) + apply (intro conjI impI; wp) + done + +lemma weaker_hoare_ifE: + assumes x: "\P \ a \Q\,\E\" + assumes y: "\P'\ b \Q\,\E\" + shows "\P and P'\ if test then a else b \Q\,\E\" + apply (rule hoare_weaken_preE) + apply (wp x y) + apply simp + done + +lemma wp_split_const_if: + assumes x: "\P\ f \Q\" + assumes y: "\P'\ f \Q'\" + shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\" + by (cases G; simp add: x y) + +lemma wp_split_const_if_R: + assumes x: "\P\ f \Q\,-" + assumes y: "\P'\ f \Q'\,-" + shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\,-" + by (cases G; simp add: x y) + +lemma hoare_disj_division: + "\ P \ Q; P \ \R\ f \S\; Q \ \T\ f \S\ \ + \ \\s. (P \ R s) \ (Q \ T s)\ f \S\" + apply safe + apply (rule hoare_pre_imp) + prefer 2 + apply simp + apply simp + apply (rule hoare_pre_imp) + prefer 2 + apply simp + apply simp + done + +lemma hoare_grab_asm: + "\ G \ \P\ f \Q\ \ \ \\s. G \ P s\ f \Q\" + by (cases G, simp+) + +lemma hoare_grab_asm2: + "\P' \ \\s. P s \ R s\ f \Q\\ + \ \\s. P s \ P' \ R s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_grab_exs: + assumes x: "\x. P x \ \P'\ f \Q\" + shows "\\s. \x. P x \ P' s\ f \Q\" + apply (clarsimp simp: valid_def) + apply (erule(2) use_valid [OF _ x]) + done + +lemma hoare_prop_E: "\\rv. P\ f -,\\rv s. P\" + unfolding validE_E_def + by (rule hoare_pre, wp, simp) + +lemma hoare_vcg_conj_lift_R: + "\ \P\ f \Q\,-; \R\ f \S\,- \ + \ \\s. P s \ R s\ f \\rv s. Q rv s \ S rv s\,-" + apply (simp add: validE_R_def validE_def) + apply (drule(1) hoare_vcg_conj_lift) + apply (erule hoare_strengthen_post) + apply (clarsimp split: sum.splits) + done + +lemma hoare_walk_assmsE: + assumes x: "\P\ f \\rv. P\" and y: "\s. P s \ Q s" and z: "\P\ g \\rv. Q\" + shows "\P\ doE x \ f; g odE \\rv. Q\" + apply (wp z) + apply (simp add: validE_def) + apply (rule hoare_strengthen_post [OF x]) + apply (auto simp: y split: sum.splits) + done + +lemma univ_wp: + "\\s. \(rv, s') \ fst (f s). Q rv s'\ f \Q\" + by (simp add: valid_def) + +lemma univ_get_wp: + assumes x: "\P. \P\ f \\rv. P\" + shows "\\s. \(rv, s') \ fst (f s). s = s' \ Q rv s'\ f \Q\" + apply (rule hoare_pre_imp[OF _ univ_wp]) + apply clarsimp + apply (drule bspec, assumption, simp) + apply (drule mp) + apply (simp add: state_unchanged[OF x]) + apply simp + done + +lemma other_hoare_in_monad_post: + assumes x: "\P. \P\ fn \\rv. P\" + shows "\\s. \(v, s) \ fst (fn s). F v = v\ fn \\v s'. (F v, s') \ fst (fn s')\" + proof - + have P: "\v s. (F v = v) \ (v, s) \ fst (fn s) \ (F v, s) \ fst (fn s)" + by simp + show ?thesis + apply (rule hoare_post_imp [OF P], assumption) + apply (rule hoare_pre_imp) + defer + apply (rule hoare_vcg_conj_lift) + apply (rule univ_get_wp [OF x]) + apply (rule hoare_in_monad_post [OF x]) + apply clarsimp + apply (drule bspec, assumption, simp) + done + qed + +lemma weak_if_wp: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ + \P and P'\ f \\r. if C r then Q r else Q' r\" + by (auto simp add: valid_def split_def) + +lemma weak_if_wp': + "\P\ f \\r. Q r and Q' r\ \ + \P\ f \\r. if C r then Q r else Q' r\" + by (auto simp add: valid_def split_def) + +lemma bindE_split_recursive_asm: + assumes x: "\x s'. \ (Inr x, s') \ fst (f s) \ \ \\s. B x s \ s = s'\ g x \C\, \E\" + shows "\A\ f \B\, \E\ \ \\st. A st \ st = s\ f >>=E g \C\, \E\" + apply (clarsimp simp: validE_def valid_def bindE_def in_bind lift_def) + apply (erule allE, erule(1) impE) + apply (drule(1) bspec, simp) + apply (clarsimp simp: in_throwError split: sum.splits) + apply (drule x) + apply (clarsimp simp: validE_def valid_def) + apply (drule(1) bspec, simp split: sum.splits) + done + +lemma validE_R_abstract_rv: + "\P\ f \\rv s. \rv'. Q rv' s\,- \ \P\ f \Q\,-" + by (erule hoare_strengthen_postE_R, simp) + +lemma validE_cases_valid: + "\P\ f \\rv s. Q (Inr rv) s\,\\rv s. Q (Inl rv) s\ + \ \P\ f \Q\" + apply (simp add: validE_def) + apply (erule hoare_strengthen_post) + apply (simp split: sum.split_asm) + done + +lemma liftM_pre: + assumes rl: "\\s. \ P s \ a \ \_ _. False \" + shows "\\s. \ P s \ liftM f a \ \_ _. False \" + unfolding liftM_def + apply (rule bind_wp_fwd) + apply (rule rl) + apply wp + done + +lemma hoare_gen_asm': + "(P \ \P'\ f \Q\) \ \P' and (\_. P)\ f \Q\" + apply (auto intro: hoare_assume_pre) + done + +lemma hoare_gen_asm_conj: + "(P \ \P'\ f \Q\) \ \\s. P' s \ P\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_add_K: + "\P\ f \Q\ \ \\s. P s \ I\ f \\rv s. Q rv s \ I\" + by (fastforce simp: valid_def) + +lemma valid_rv_lift: + "\P'\ f \\rv s. rv \ Q rv s\ \ \\s. P \ P' s\ f \\rv s. rv \ P \ Q rv s\" + by (fastforce simp: valid_def) + +lemma valid_imp_ex: + "\P\ f \\rv s. \x. rv \ Q rv s x\ \ \P\ f \\rv s. rv \ (\x. Q rv s x)\" + by (fastforce simp: valid_def) + +lemma valid_rv_split: + "\\P\ f \\rv s. rv \ Q s\; \P\ f \\rv s. \rv \ Q' s\\ + \ \P\ f \\rv s. if rv then Q s else Q' s\" + by (fastforce simp: valid_def) + +lemma hoare_rv_split: + "\\P\ f \\rv s. rv \ (Q rv s)\; \P\ f \\rv s. (\rv) \ (Q rv s)\\ + \ \P\ f \Q\" + apply (clarsimp simp: valid_def split_def) + by (metis (full_types) fst_eqD snd_conv) + +lemma combine_validE: + "\ \ P \ x \ Q \,\ E \; \ P' \ x \ Q' \,\ E' \ \ + \ \ P and P' \ x \ \r. (Q r) and (Q' r) \,\\r. (E r) and (E' r) \" + apply (clarsimp simp: validE_def valid_def split: sum.splits) + apply (erule allE, erule (1) impE)+ + apply (drule (1) bspec)+ + apply clarsimp + done + +lemma valid_case_prod: + "\ \x y. valid (P x y) (f x y) Q \ \ valid (case_prod P v) (case_prod (\x y. f x y) v) Q" + by (simp add: split_def) + +lemma validE_case_prod: + "\ \x y. validE (P x y) (f x y) Q E \ \ validE (case_prod P v) (case_prod (\x y. f x y) v) Q E" + by (simp add: split_def) + +lemma valid_pre_satisfies_post: + "\ \s r' s'. P s \ Q r' s' \ \ \ P \ m \ Q \" + by (clarsimp simp: valid_def) + +lemma validE_pre_satisfies_post: + "\ \s r' s'. P s \ Q r' s'; \s r' s'. P s \ R r' s' \ \ \ P \ m \ Q \,\ R \" + by (clarsimp simp: validE_def2 split: sum.splits) + +lemma hoare_validE_R_conjI: + "\ \P\ f \Q\, - ; \P\ f \Q'\, - \ \ \P\ f \\rv s. Q rv s \ Q' rv s\, -" + by (clarsimp simp: Ball_def validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_validE_E_conjI: + "\ \P\ f -, \Q\ ; \P\ f -, \Q'\ \ \ \P\ f -, \\rv s. Q rv s \ Q' rv s\" + by (clarsimp simp: Ball_def validE_E_def validE_def valid_def split: sum.splits) + +lemma validE_R_post_conjD1: + "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \Q\,-" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma validE_R_post_conjD2: + "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \R\,-" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma throw_opt_wp[wp]: + "\if v = None then E ex else Q (the v)\ throw_opt ex v \Q\,\E\" + unfolding throw_opt_def by wpsimp auto + +lemma hoare_name_pre_state2: + "(\s. \P and ((=) s)\ f \Q\) \ \P\ f \Q\" + by (auto simp: valid_def intro: hoare_name_pre_state) + +lemma returnOk_E': "\P\ returnOk r -,\E\" + by wpsimp + +lemma throwError_R': "\P\ throwError e \Q\,-" + by wpsimp + +end \ No newline at end of file diff --git a/lib/Monads/nondet/Nondet_No_Fail.thy b/lib/Monads/nondet/Nondet_No_Fail.thy new file mode 100644 index 0000000000..63b7d11590 --- /dev/null +++ b/lib/Monads/nondet/Nondet_No_Fail.thy @@ -0,0 +1,235 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Lemmas about the no_fail predicate. *) + +theory Nondet_No_Fail + imports + Nondet_In_Monad + Nondet_VCG + WPSimp +begin + +subsection "Non-Failure" + +text \ + With the failure flag, we can formulate non-failure separately from validity. + A monad @{text m} does not fail under precondition @{text P}, if for no start + state that satisfies the precondition it sets the failure flag. +\ +definition no_fail :: "('s \ bool) \ ('s,'a) nondet_monad \ bool" where + "no_fail P m \ \s. P s \ \snd (m s)" + + +subsection \@{method wpc} setup\ + +lemma no_fail_pre[wp_pre]: + "\ no_fail P f; \s. Q s \ P s\ \ no_fail Q f" + by (simp add: no_fail_def) + +lemma wpc_helper_no_fail_final: + "no_fail Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_fail P f)" + by (clarsimp simp: wpc_helper_def elim!: no_fail_pre) + +wpc_setup "\m. no_fail P m" wpc_helper_no_fail_final + + +subsection \Bundles\ + +bundle no_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del] + +bundle classic_wp_pre = + hoare_pre [wp_pre del] + all_classic_wp_combs[wp_comb del] + all_classic_wp_combs[wp_comb] + + +subsection \Lemmas\ + +lemma no_failD: + "\ no_fail P m; P s \ \ \(snd (m s))" + by (simp add: no_fail_def) + +lemma no_fail_return[simp, wp]: + "no_fail \ (return x)" + by (simp add: return_def no_fail_def) + +lemma no_fail_bind[wp]: + "\ \rv. no_fail (R rv) (g rv); \Q\ f \R\; no_fail P f \ \ no_fail (P and Q) (f >>= (\rv. g rv))" + unfolding no_fail_def bind_def + using post_by_hoare by fastforce + +lemma no_fail_get[simp, wp]: + "no_fail \ get" + by (simp add: get_def no_fail_def) + +lemma no_fail_put[simp, wp]: + "no_fail \ (put s)" + by (simp add: put_def no_fail_def) + +lemma no_fail_modify[wp,simp]: + "no_fail \ (modify f)" + by (wpsimp simp: modify_def) + +lemma no_fail_gets_simp[simp]: + "no_fail P (gets f)" + by (wpsimp simp: gets_def) + +lemma no_fail_gets[wp]: + "no_fail \ (gets f)" + by simp + +lemma no_fail_select[wp,simp]: + "no_fail \ (select S)" + by (simp add: no_fail_def select_def) + +lemma no_fail_alt[wp]: + "\ no_fail P f; no_fail Q g \ \ no_fail (P and Q) (f \ g)" + by (simp add: no_fail_def alternative_def) + +lemma no_fail_when[wp]: + "(P \ no_fail Q f) \ no_fail (if P then Q else \) (when P f)" + by (simp add: when_def) + +lemma no_fail_unless[wp]: + "(\P \ no_fail Q f) \ no_fail (if P then \ else Q) (unless P f)" + by (simp add: unless_def when_def) + +lemma no_fail_fail[simp, wp]: + "no_fail \ fail" + by (simp add: fail_def no_fail_def) + +lemma no_fail_assert[simp, wp]: + "no_fail (\_. P) (assert P)" + by (simp add: assert_def) + +lemma no_fail_assert_opt[simp, wp]: + "no_fail (\_. P \ None) (assert_opt P)" + by (simp add: assert_opt_def split: option.splits) + +lemma no_fail_case_option[wp]: + assumes f: "no_fail P f" + assumes g: "\x. no_fail (Q x) (g x)" + shows "no_fail (if x = None then P else Q (the x)) (case_option f g x)" + by (clarsimp simp add: f g) + +lemma no_fail_if[wp]: + "\ P \ no_fail Q f; \P \ no_fail R g \ \ no_fail (if P then Q else R) (if P then f else g)" + by simp + +lemma no_fail_apply[wp]: + "no_fail P (f (g x)) \ no_fail P (f $ g x)" + by simp + +lemma no_fail_undefined[simp, wp]: + "no_fail \ undefined" + by (simp add: no_fail_def) + +lemma no_fail_returnOK[simp, wp]: + "no_fail \ (returnOk x)" + by (simp add: returnOk_def) + +lemma no_fail_assume_pre: + "(\s. P s \ no_fail P f) \ no_fail P f" + by (simp add: no_fail_def) + +lemma no_fail_liftM_eq[simp]: + "no_fail P (liftM f m) = no_fail P m" + by (auto simp: liftM_def no_fail_def bind_def return_def) + +lemma no_fail_select_f[wp]: + "no_fail (\s. \snd S) (select_f S)" + by (simp add: select_f_def no_fail_def) + +lemma no_fail_liftM[wp]: + "no_fail P m \ no_fail P (liftM f m)" + by simp + +lemma no_fail_pre_and: + "no_fail P f \ no_fail (P and Q) f" + by (erule no_fail_pre) simp + +lemma no_fail_spec: + "\ \s. no_fail (((=) s) and P) f \ \ no_fail P f" + by (simp add: no_fail_def) + +lemma no_fail_assertE[wp]: + "no_fail (\_. P) (assertE P)" + by (simp add: assertE_def) + +lemma no_fail_spec_pre: + "\ no_fail (((=) s) and P') f; \s. P s \ P' s \ \ no_fail (((=) s) and P) f" + by (erule no_fail_pre, simp) + +lemma no_fail_whenE[wp]: + "\ G \ no_fail P f \ \ no_fail (\s. G \ P s) (whenE G f)" + by (simp add: whenE_def) + +lemma no_fail_unlessE[wp]: + "\ \ G \ no_fail P f \ \ no_fail (\s. \ G \ P s) (unlessE G f)" + by (simp add: unlessE_def split: if_split) + +lemma no_fail_throwError[wp]: + "no_fail \ (throwError e)" + by (simp add: throwError_def) + +lemma no_fail_liftE[wp]: + "no_fail P f \ no_fail P (liftE f)" + unfolding liftE_def by wpsimp + +lemma no_fail_gets_the[wp]: + "no_fail (\s. f s \ None) (gets_the f)" + unfolding gets_the_def + by wpsimp + +lemma no_fail_lift: + "(\y. x = Inr y \ no_fail P (f y)) \ no_fail (\s. \isl x \ P s) (lift f x)" + unfolding lift_def + by (wpsimp wp: no_fail_throwError split: sum.splits | assumption)+ + +lemma validE_R_valid_eq: + "\Q\ f \R\, - = \Q\ f \\rv s. \ isl rv \ R (projr rv) s\" + unfolding validE_R_def validE_def valid_def + by (fastforce split: sum.splits prod.split) + +lemma no_fail_bindE[wp]: + "\ no_fail P f; \rv. no_fail (R rv) (g rv); \Q\ f \R\,- \ + \ no_fail (P and Q) (f >>=E g)" + unfolding bindE_def + by (wpsimp wp: no_fail_lift simp: validE_R_valid_eq | assumption)+ + +lemma no_fail_False[simp]: + "no_fail (\_. False) X" + by (clarsimp simp: no_fail_def) + +lemma no_fail_gets_map[wp]: + "no_fail (\s. f s p \ None) (gets_map f p)" + unfolding gets_map_def by wpsimp + +lemma no_fail_or: + "\no_fail P a; no_fail Q a\ \ no_fail (P or Q) a" + by (clarsimp simp: no_fail_def) + +lemma no_fail_state_assert[wp]: + "no_fail P (state_assert P)" + unfolding state_assert_def + by wpsimp + +lemma no_fail_condition[wp]: + "\no_fail Q A; no_fail R B\ \ no_fail (\s. (C s \ Q s) \ (\ C s \ R s)) (condition C A B)" + unfolding condition_def no_fail_def + by clarsimp + +lemma no_fail_ex_lift: + "(\x. no_fail (P x) f) \ no_fail (\s. \x. P x s) f" + by (clarsimp simp: no_fail_def) + +lemma no_fail_grab_asm: + "(G \ no_fail P f) \ no_fail (\s. G \ P s) f" + by (cases G; clarsimp) + +end diff --git a/lib/Monads/nondet/Nondet_No_Throw.thy b/lib/Monads/nondet/Nondet_No_Throw.thy new file mode 100644 index 0000000000..b33ed85ca4 --- /dev/null +++ b/lib/Monads/nondet/Nondet_No_Throw.thy @@ -0,0 +1,129 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Lemmas about no_throw. Usually should have a conclusion "no_throw P m". + Includes some monad equations that have no_throw as a main assumption. *) + +theory Nondet_No_Throw + imports + Nondet_While_Loop_Rules + Nondet_MonadEq_Lemmas +begin + +section "Basic exception reasoning" + +text \ + The predicates @{text no_throw} and @{text no_return} allow us to reason about functions in + the exception monad that never throw an exception or never return normally.\ + +definition no_throw :: "('s \ bool) \ ('s, 'e + 'a) nondet_monad \ bool" where + "no_throw P A \ \P\ A \\_ _. True\, \\_ _. False\" + +definition no_return :: "('a \ bool) \ ('a, 'b + 'c) nondet_monad \ bool" where + "no_return P A \ \P\ A \\_ _. False\, \\_ _. True\" + +(* Alternative definition of no_throw; easier to work with than unfolding validE. *) +lemma no_throw_def': + "no_throw P A = (\s. P s \ (\(r, t) \ fst (A s). (\x. r = Inr x)))" + by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits) + + +subsection \@{text no_throw} rules\ + +lemma no_throw_returnOk[simp]: + "no_throw P (returnOk a)" + unfolding no_throw_def + by wp + +lemma no_throw_liftE[simp]: + "no_throw P (liftE x)" + by (wpsimp simp: liftE_def no_throw_def validE_def) + +lemma no_throw_bindE: + "\ no_throw A X; \a. no_throw B (Y a); \ A \ X \ \_. B \,\ \_ _. True \ \ + \ no_throw A (X >>=E Y)" + unfolding no_throw_def + using hoare_validE_cases bindE_wp_fwd by blast + +lemma no_throw_bindE_simple: + "\ no_throw \ L; \x. no_throw \ (R x) \ \ no_throw \ (L >>=E R)" + using hoareE_TrueI no_throw_bindE by blast + +lemma no_throw_handleE_simple: + "\ \x. no_throw \ L \ no_throw \ (R x) \ \ no_throw \ (L R)" + by (fastforce simp: no_throw_def' handleE_def handleE'_def validE_def valid_def bind_def return_def + split: sum.splits) + +lemma no_throw_handle2: + "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" + by (fastforce simp: no_throw_def' handleE'_def validE_def valid_def bind_def return_def + split: sum.splits) + +lemma no_throw_handle: + "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" + unfolding handleE_def + by (rule no_throw_handle2) + +lemma no_throw_fail[simp]: + "no_throw P fail" + by (clarsimp simp: no_throw_def) + +lemma bindE_fail_propagates: + "\ no_throw \ A; empty_fail A \ \ A >>=E (\_. fail) = fail" + by (fastforce simp: no_throw_def validE_def valid_def bind_def empty_fail_def + bindE_def split_def fail_def Nondet_Monad.lift_def throwError_def + split: sum.splits) + +lemma whileLoopE_nothrow: + "\ \x. no_throw \ (B x) \ \ no_throw \ (whileLoopE C B x)" + unfolding no_throw_def + by (fastforce intro!: validE_whileLoopE intro: hoare_chainE) + +lemma handleE'_nothrow_lhs: + "no_throw \ L \ no_throw \ (L R)" + unfolding no_throw_def + using handleE'_wp[rotated] by fastforce + +lemma handleE'_nothrow_rhs: + "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" + unfolding no_throw_def + by (metis hoareE_TrueI no_throw_def no_throw_handle2) + +lemma handleE_nothrow_lhs: + "no_throw \ L \ no_throw \ (L R)" + by (metis handleE'_nothrow_lhs handleE_def) + +lemma handleE_nothrow_rhs: + "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" + by (metis no_throw_handleE_simple) + +lemma condition_nothrow: + "\ no_throw \ L; no_throw \ R \ \ no_throw \ (condition C L R)" + by (clarsimp simp: condition_def no_throw_def validE_def2) + +lemma no_throw_Inr: + "\ x \ fst (A s); no_throw P A; P s \ \ \y. fst x = Inr y" + by (fastforce simp: no_throw_def' split: sum.splits) + +lemma no_throw_handleE': + "no_throw \ A \ (A B) = A" + apply (rule monad_eqI; monad_eq) + apply (fastforce dest: no_throw_Inr) + apply (metis (lifting) fst_conv no_throw_Inr) + apply (fastforce dest: no_throw_Inr) + done + +lemma no_throw_handleE: + "no_throw \ A \ (A B) = A" + unfolding handleE_def + by (auto simp: no_throw_handleE') + +lemma bindE_handleE_join: + "no_throw \ A \ (A >>=E (\x. (B x) C)) = ((A >>=E B C))" + by (monad_eq simp: Bex_def Ball_def no_throw_def') blast + +end \ No newline at end of file diff --git a/lib/Monads/nondet/Nondet_README.thy b/lib/Monads/nondet/Nondet_README.thy new file mode 100644 index 0000000000..4cf9de4a1a --- /dev/null +++ b/lib/Monads/nondet/Nondet_README.thy @@ -0,0 +1,142 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_README + imports + Nondet_More_VCG + Nondet_Det + Nondet_No_Throw + Nondet_Monad_Equations + WP_README +begin + +\ \Nondeterministic State Monad with Failure\ + +text \ +The type of the nondeterministic monad, @{typ "('s, 'a) nondet_monad"}, can be found in +@{theory Monads.Nondet_Monad}, along with definitions of fundamental monad primitives and +Haskell-like do-syntax. + +The basic type of the nondeterministic state monad with failure is very similar to the +normal state monad. Instead of a pair consisting of result and new state, we return a set +of these pairs coupled with a failure flag. Each element in the set is a potential result +of the computation. The flag is @{const True} if there is an execution path in the +computation that may have failed. Conversely, if the flag is @{const False}, none of the +computations resulting in the returned set can have failed. + +The following lemmas are basic examples of those primitives and that syntax.\ + +lemma "do x \ return 1; + return (2::nat); + return x + od = + return 1 >>= + (\x. return (2::nat) >>= + K_bind (return x))" + by (rule refl) + +lemma "do x \ return 1; + return 2; + return x + od = return 1" + by simp + +text \ +We also provide a variant of the nondeterministic monad extended with exceptional return +values. This is available by using the type @{typ "('s, 'e + 'a) nondet_monad"}, with +primitives and syntax existing for it as well\ + +lemma "doE x \ returnOk 1; + returnOk (2::nat); + returnOk x + odE = + returnOk 1 >>=E + (\x. returnOk (2::nat) >>=E + K_bind (returnOk x))" + by (rule refl) + +text \ +A Hoare logic for partial correctness for the nondeterministic state monad and the +exception monad is introduced in @{theory Monads.Nondet_VCG}. This comes along with a +family of lemmas and tools which together perform the role of a VCG (verification +condition generator). + +The Hoare logic is defined by the @{const valid} predicate, which is a triple of +precondition, monadic function, and postcondition. A version is also provided for the +exception monad, in the form of @{const validE}. Instead of one postcondition it has two: +one for normal and one for exceptional results. + +@{theory Monads.Nondet_VCG} also proves a collection of rules about @{const valid}, in +particular lifting rules for the common operators and weakest precondition rules for the +monadic primitives. The @{method wp} tool automates the storage and use of this +collection of rules. For more details about @{method wp} see @{theory Monads.WP_README}. + +The following is an example of one of these operator lifting rules and an example of a +relatively trivial Hoare triple being solved by @{method wp}.\ + +lemma hoare_vcg_if_split: + "\P \ \Q\ f \S\; \P \ \R\ g \S\\ + \ \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" + by simp + +lemma + "\\s. odd (value s)\ + do + x <- gets value; + return (3 * x) + od + \\rv s. odd rv\" + by wpsimp + +text \ +Lemmas directly about the monad primitives can be found in @{theory Monads.Nondet_Lemmas} +and @{theory Monads.Nondet_Monad_Equations}. Many of these lemmas use @{method monad_eq}, +which is a tactic for solving monadic equalities.\ + +lemma + "(do x \ gets f; + xa \ gets f; + m xa x + od) = + (do xa \ gets f; + m xa xa + od)" + by monad_eq + +lemma + "snd (gets_the X s) = (X s = None)" + by (monad_eq simp: gets_the_def gets_def get_def) + +text \ +While working with the monad primitives you sometimes end up needing to reason directly +on the states, with goals containing terms in the form of @{term "(v, s') \ fst (m s)"}. +Lemmas for handling these goals exist in @{theory Monads.Nondet_In_Monad}, with +@{thm in_monad} being particularly useful.\ + +lemma + "(r, s) \ fst (return r s)" + by (simp add: in_monad) + +text \ +There are additional properties of nondeterministic monadic functions that are often +useful. These include: + @{const no_fail} - a monad does not fail when starting in a state that satisfies a + given precondition. + @{const empty_fail} - if a monad returns an empty set of results then it must also have + the failure flag set. + @{const no_throw} - an exception monad does not throw an exception when starting in a + state that satisfies a given precondition. + @{const det} - a monad is deterministic and returns exactly one non-failing state.\ + +text \ +Variants of the basic validity definition are sometimes useful when working with the +nondeterministic monad. + @{const validNF} - a total correctness extension combining @{const valid} and + @{const no_fail}. + @{const exs_valid} - a dual to @{const valid} showing that after a monad executes there + exists at least one state that satisfies a given condition.\ + +end diff --git a/lib/Monad_WP/OptionMonadND.thy b/lib/Monads/nondet/Nondet_Reader_Option.thy similarity index 72% rename from lib/Monad_WP/OptionMonadND.thy rename to lib/Monads/nondet/Nondet_Reader_Option.thy index 36017208e6..918884bcd1 100644 --- a/lib/Monad_WP/OptionMonadND.thy +++ b/lib/Monads/nondet/Nondet_Reader_Option.thy @@ -1,19 +1,20 @@ (* + * Copyright 2024, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause *) -(* Option monad syntax plus the connection between the option monad and the nondet monad *) +(* Reader option monad syntax plus the connection between the reader option monad and the nondet monad *) -theory OptionMonadND +theory Nondet_Reader_Option imports - NonDetMonadLemmas - OptionMonad + Nondet_No_Fail + Reader_Option_VCG begin (* FIXME: remove this syntax, standardise on do {..} instead *) -(* Syntax defined here so we can reuse NonDetMonad definitions *) +(* Syntax defined here so we can reuse Nondet_Monad definitions *) syntax "_doO" :: "[dobinds, 'a] => 'a" ("(DO (_);// (_)//OD)" 100) @@ -23,48 +24,35 @@ translations "DO x <- a; e OD" == "a |>> (\x. e)" -lemma ogets_def: - "ogets f = (\s. Some (f s))" - by (clarsimp simp: asks_def obind_def) - -definition - ocatch :: "('s,('e + 'a)) lookup \ ('e \ ('s,'a) lookup) \ ('s, 'a) lookup" - (infix "" 10) -where - "f handler \ do { - x \ f; - case x of Inr b \ oreturn b | Inl e \ handler e - }" - - -definition - odrop :: "('s, 'e + 'a) lookup \ ('s, 'a) lookup" -where - "odrop f \ do { - x \ f; - case x of Inr b \ oreturn b | Inl e \ ofail - }" - -definition - osequence_x :: "('s, 'a) lookup list \ ('s, unit) lookup" -where - "osequence_x xs \ foldr (\x y. do { x; y }) xs (oreturn ())" - -definition - osequence :: "('s, 'a) lookup list \ ('s, 'a list) lookup" -where - "osequence xs \ let mcons = (\p q. p |>> (\x. q |>> (\y. oreturn (x#y)))) - in foldr mcons xs (oreturn [])" - -definition - omap :: "('a \ ('s,'b) lookup) \ 'a list \ ('s, 'b list) lookup" -where - "omap f xs \ osequence (map f xs)" - -definition - opt_cons :: "'a option \ 'a list \ 'a list" (infixr "o#" 65) -where - "opt_cons x xs \ case x of None \ xs | Some x' \ x' # xs" +lemma ovalid_K_bind_wp[wp]: + "ovalid P f Q \ ovalid P (K_bind f x) Q" + by simp + +lemma ovalidNF_K_bind_wp[wp]: + "ovalidNF P f Q \ ovalidNF P (K_bind f x) Q" + by simp + +lemma no_ofail_K_bind[wp]: + "no_ofail P f \ no_ofail P (K_bind f x)" + by simp + +lemma no_ofail_gets_the_eq: + "no_ofail P f \ no_fail P (gets_the (f :: ('s, 'a) lookup))" + by (auto simp: no_ofail_def no_fail_def gets_the_def gets_def + get_def assert_opt_def bind_def return_def fail_def + split: option.split) + +lemmas no_ofail_gets_the = + no_ofail_gets_the_eq[THEN iffD1] + + +(* Lemmas relating ovalid and valid *) +lemma ovalid_gets_the: + "ovalid P f Q \ \P\ gets_the f \Q\" + apply wpsimp + apply (fastforce dest: use_ovalid) + done + lemmas monad_simps = gets_the_def bind_def assert_def assert_opt_def @@ -84,24 +72,32 @@ lemma gets_the_obind: lemma gets_the_return: "gets_the (oreturn x) = return x" - by (simp add: monad_simps oreturn_def K_def) + by (simp add: monad_simps oreturn_def) lemma gets_the_fail: "gets_the ofail = fail" - by (simp add: monad_simps ofail_def K_def) + by (simp add: monad_simps ofail_def) + +lemma gets_the_ogets: + "gets_the (ogets s) = gets s" + by (clarsimp simp: monad_simps ogets_def) lemma gets_the_returnOk: "gets_the (oreturnOk x) = returnOk x" - by (simp add: monad_simps K_def oreturnOk_def returnOk_def) + by (simp add: monad_simps oreturnOk_def returnOk_def) lemma gets_the_throwError: "gets_the (othrow e) = throwError e" - by (simp add: monad_simps othrow_def throwError_def K_def) + by (simp add: monad_simps othrow_def throwError_def) lemma gets_the_assert: "gets_the (oassert P) = assert P" by (simp add: oassert_def assert_def gets_the_fail gets_the_return) +lemma gets_the_assert_opt: + "gets_the (oassert_opt P) = assert_opt P" + by (simp add: oassert_opt_def assert_opt_def gets_the_return gets_the_fail split: option.splits) + lemma gets_the_if_distrib: "gets_the (if P then f else g) = (if P then gets_the f else gets_the g)" by simp @@ -114,6 +110,15 @@ lemma gets_the_Some: "gets_the (\_. Some x) = return x" by (simp add: gets_the_def assert_opt_Some) +lemma gets_the_oapply2_comp: + "gets_the (oapply2 y x \ f) = gets_map (swp f y) x" + by (clarsimp simp: gets_map_def gets_the_def o_def gets_def) + +lemma gets_obind_bind_eq: + "(gets (f |>> (\x. g x))) = + (gets f >>= (\x. case x of None \ return None | Some y \ gets (g y)))" + by (auto simp: simpler_gets_def bind_def obind_def return_def split: option.splits) + lemma fst_assert_opt: "fst (assert_opt opt s) = (if opt = None then {} else {(the opt,s)})" by (clarsimp simp: assert_opt_def fail_def return_def split: option.split) @@ -125,8 +130,6 @@ lemmas omonad_simps [simp] = gets_the_throwError gets_the_assert gets_the_Some gets_the_oapply_comp -lemmas in_omonad = bind_eq_Some_conv in_obind_eq in_opt_map_eq in_opt_pred Let_def - section "Relation between option monad loops and non-deterministic monad loops." @@ -184,12 +187,12 @@ proof - note option_while'_None = this have "\s. owhile C B r s = None - \ whileLoop C (\a. gets_the (B a)) r s = ({}, True)" + \ whileLoop C (\a. gets_the (B a)) r s = ({}, True)" by (auto simp: whileLoop_def owhile_def option_while_def option_while'_THE gets_the_loop_terminates split: if_split_asm dest: option_while'_None wl'_Inl option_while'_inj) moreover have "\s r'. owhile C B r s = Some r' - \ whileLoop C (\a. gets_the (B a)) r s = ({(r', s)}, False)" + \ whileLoop C (\a. gets_the (B a)) r s = ({(r', s)}, False)" by (auto simp: whileLoop_def owhile_def option_while_def option_while'_THE gets_the_loop_terminates split: if_split_asm dest: wl'_Inl wl'_Inr option_while'_inj intro: option_while'_Some) ultimately diff --git a/lib/Monads/nondet/Nondet_Sat.thy b/lib/Monads/nondet/Nondet_Sat.thy new file mode 100644 index 0000000000..f49ff82b8e --- /dev/null +++ b/lib/Monads/nondet/Nondet_Sat.thy @@ -0,0 +1,149 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_Sat + imports + Nondet_Monad + WPSimp +begin + +section \Satisfiability\ + +text \ + The dual to validity: an existential instead of a universal quantifier for the post condition. + In refinement, it is often sufficient to know that there is one state that satisfies a condition.\ +definition exs_valid :: + "('a \ bool) \ ('a, 'b) nondet_monad \ ('b \ 'a \ bool) \ bool" + ("\_\ _ \\_\") where + "\P\ f \\Q\ \ \s. P s \ (\(rv, s') \ fst (f s). Q rv s')" + +text \The above for the exception monad\ +definition ex_exs_validE :: + "('a \ bool) \ ('a, 'e + 'b) nondet_monad \ ('b \ 'a \ bool) \ ('e \ 'a \ bool) \ bool" + ("\_\ _ \\_\, \_\") where + "\P\ f \\Q\, \E\ \ \P\ f \\\rv. case rv of Inl e \ E e | Inr v \ Q v\" + +text \ + Seen as predicate transformer, @{const exs_valid} is the so-called conjugate wp in the literature, + i.e. with + @{term "wp f Q \ \s. fst (f s) \ {(rv,s). Q rv s}"} and + @{term "cwp f Q \ not (wp f (not Q))"}, we get + @{prop "valid P f Q = (\s. P s \ wp f Q s)"} and + @{prop "exs_valid P f Q = (\s. P s \ cwp f Q s)"}. + + See also "Predicate Calculus and Program Semantics" by E. W. Dijkstra and C. S. Scholten.\ +experiment +begin + +definition + "wp f Q \ \s. fst (f s) \ {(rv,s). Q rv s}" + +definition + "cwp f Q \ not (wp f (not Q))" + +lemma + "exs_valid P f Q = (\s. P s \ cwp f Q s)" + unfolding exs_valid_def cwp_def wp_def by auto + +end + + +subsection \Set up for @{method wp}\ + +definition exs_postcondition where + "exs_postcondition P f \ \a b. \(rv, s) \ f a b. P rv s" + +lemma exs_valid_is_triple[wp_trip]: + "exs_valid P f Q = triple_judgement P f (exs_postcondition Q (\s f. fst (f s)))" + by (simp add: triple_judgement_def exs_postcondition_def exs_valid_def) + + +subsection \Rules\ + +lemma exs_hoare_post_imp: + "\\r s. Q r s \ R r s; \P\ a \\Q\\ \ \P\ a \\R\" + unfolding exs_valid_def by blast + +lemma use_exs_valid: + "\ \P\ f \\Q\; P s \ \ \(r, s') \ fst (f s). Q r s'" + by (simp add: exs_valid_def) + +lemma exs_valid_weaken_pre[wp_pre]: + "\ \P'\ f \\Q\; \s. P s \ P' s \ \ \P\ f \\Q\" + by (clarsimp simp: exs_valid_def) + +lemma exs_valid_chain: + "\ \P\ f \\Q\; \s. R s \ P s; \r s. Q r s \ S r s \ \ \R\ f \\S\" + by (fastforce simp: exs_valid_def Bex_def) + +lemma exs_valid_assume_pre: + "\ \s. P s \ \P\ f \\Q\ \ \ \P\ f \\Q\" + by (fastforce simp: exs_valid_def) + +lemma exs_valid_bind[wp_split]: + "\ \rv. \B rv\ g rv \\C\; \A\ f \\B\ \ \ \A\ f >>= (\rv. g rv) \\C\" + by (clarsimp simp: exs_valid_def bind_def') + blast + +lemma exs_valid_return[wp]: + "\Q v\ return v \\Q\" + by (clarsimp simp: exs_valid_def return_def) + +lemma exs_valid_select[wp]: + "\\s. \r \ S. Q r s\ select S \\Q\" + by (clarsimp simp: exs_valid_def select_def) + +lemma exs_valid_alt[wp]: + "\ \P\ f \\Q\; \P'\ g \\Q\ \ \ \P or P'\ f \ g \\Q\" + by (fastforce simp: exs_valid_def alternative_def) + +lemma exs_valid_get[wp]: + "\\s. Q s s\ get \\ Q \" + by (clarsimp simp: exs_valid_def get_def) + +lemma exs_valid_gets[wp]: + "\\s. Q (f s) s\ gets f \\Q\" + by (clarsimp simp: gets_def) wp + +lemma exs_valid_put[wp]: + "\Q v\ put v \\Q\" + by (clarsimp simp: put_def exs_valid_def) + +lemma exs_valid_fail[wp]: + "\\s. False\ fail \\Q\" + unfolding fail_def exs_valid_def + by simp + +lemma exs_valid_assert[wp]: + "\\s. Q () s \ G\ assert G \\Q\" + unfolding assert_def + by (wpsimp | rule conjI)+ + +lemma exs_valid_state_assert[wp]: + "\\s. Q () s \ G s\ state_assert G \\Q\" + unfolding state_assert_def + by wp + +lemmas exs_valid_guard = exs_valid_state_assert + +lemma exs_valid_condition[wp]: + "\ \P\ l \\Q\; \P'\ r \\Q\ \ \ \\s. (C s \ P s) \ (\ C s \ P' s)\ condition C l r \\Q\" + by (clarsimp simp: condition_def exs_valid_def split: sum.splits) + +lemma gets_exs_valid: + "\(=) s\ gets f \\\r. (=) s\" + by (rule exs_valid_gets) + +lemma exs_valid_assert_opt[wp]: + "\\s. \x. G = Some x \ Q x s\ assert_opt G \\Q\" + by (clarsimp simp: assert_opt_def exs_valid_def return_def) + +lemma gets_the_exs_valid[wp]: + "\\s. \x. h s = Some x \ Q x s\ gets_the h \\Q\" + by (wpsimp simp: gets_the_def) + +end \ No newline at end of file diff --git a/lib/Monads/nondet/Nondet_Strengthen_Setup.thy b/lib/Monads/nondet/Nondet_Strengthen_Setup.thy new file mode 100644 index 0000000000..134fd162f5 --- /dev/null +++ b/lib/Monads/nondet/Nondet_Strengthen_Setup.thy @@ -0,0 +1,77 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_Strengthen_Setup + imports + Strengthen + Nondet_No_Fail + Nondet_VCG +begin + +section \Strengthen setup.\ + +context strengthen_implementation begin + +lemma strengthen_hoare [strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\) (\P\ f \R\)" + by (cases F, auto elim: hoare_strengthen_post) + +lemma strengthen_validE_R_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" + by (cases F, auto intro: hoare_strengthen_postE_R) + +lemma strengthen_validE_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" + by (cases F, auto elim: hoare_strengthen_postE) + +lemma strengthen_validE_E_cong[strg]: + "\\r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" + by (cases F, auto elim: hoare_strengthen_postE simp: validE_E_def) + +lemma wpfix_strengthen_hoare: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\) (\P'\ f \Q'\)" + by (cases F, auto elim: hoare_chain) + +lemma wpfix_strengthen_validE_R_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\, -) (\P'\ f \Q'\, -)" + by (cases F, auto elim: hoare_chainE simp: validE_R_def) + +lemma wpfix_strengthen_validE_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (R r s); + \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P'\ f \R\, \T\)" + by (cases F, auto elim: hoare_chainE) + +lemma wpfix_strengthen_validE_E_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P'\ f -, \T\)" + by (cases F, auto elim: hoare_chainE simp: validE_E_def) + +lemma wpfix_no_fail_cong: + "\\s. st (\ F) (\) (P s) (P' s)\ + \ st F (\) (no_fail P f) (no_fail P' f)" + by (cases F, auto elim: no_fail_pre) + +lemmas nondet_wpfix_strgs = + wpfix_strengthen_validE_R_cong + wpfix_strengthen_validE_E_cong + wpfix_strengthen_validE_cong + wpfix_strengthen_hoare + wpfix_no_fail_cong + +end + +lemmas nondet_wpfix_strgs[wp_fix_strgs] + = strengthen_implementation.nondet_wpfix_strgs + +end \ No newline at end of file diff --git a/lib/Monads/nondet/Nondet_Total.thy b/lib/Monads/nondet/Nondet_Total.thy new file mode 100644 index 0000000000..6463d5a1f0 --- /dev/null +++ b/lib/Monads/nondet/Nondet_Total.thy @@ -0,0 +1,353 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Total correctness Hoare logic for the Nondet_Monad (= valid + no_fail) *) + +theory Nondet_Total + imports Nondet_No_Fail +begin + +section \Total correctness for @{text Nondet_Monad} and @{text Nondet_Monad} with exceptions\ + +subsection Definitions + +text \ + It is often desired to prove non-failure and a Hoare triple simultaneously, as the reasoning + is often similar. The following definitions allow such reasoning to take place.\ + +definition validNF :: + "('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\!") where + "\P\ f \Q\! \ \P\ f \Q\ \ no_fail P f" + +lemma validNF_alt_def: + "\P\ f \Q\! = (\s. P s \ ((\(r', s') \ fst (f s). Q r' s') \ \ snd (f s)))" + by (fastforce simp: validNF_def valid_def no_fail_def) + +definition validE_NF :: + "('s \ bool) \ ('s, 'a + 'b) nondet_monad \ ('b \ 's \ bool) \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /(\_\,/ \_\!)") where + "\P\ f \Q\, \E\! \ \P\ f \Q\, \E\ \ no_fail P f" + +lemma validE_NF_alt_def: + "\P\ f \Q\, \E\! = \P\ f \\v s. case v of Inl e \ E e s | Inr r \ Q r s\!" + by (clarsimp simp: validE_NF_def validE_def validNF_def) + + +subsection \@{method wpc} setup\ + +lemma wpc_helper_validNF: + "\Q\ g \S\! \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\!" + unfolding wpc_helper_def + by clarsimp (metis hoare_weaken_pre no_fail_pre validNF_def) + +wpc_setup "\m. \P\ m \Q\!" wpc_helper_validNF + + +subsection \Basic @{const validNF} theorems\ + +lemma validNF_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \!) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \!" + by (auto simp: valid_def validNF_def no_fail_def + split: prod.splits) + +lemma validE_NF_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \!) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') + \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \!" + by (auto simp: validE_NF_def validE_def valid_def no_fail_def + split: prod.splits sum.splits) + +lemma validNF_conjD1: + "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q \!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_conjD2: + "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q' \!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF[intro?]: (* FIXME lib: should be validNFI *) + "\ \ P \ f \ Q \; no_fail P f \ \ \ P \ f \ Q \!" + by (clarsimp simp: validNF_def) + +lemma validNFE: + "\ \ P \ f \ Q \!; \ \ P \ f \ Q \; no_fail P f \ \ R \ \ R" + by (clarsimp simp: validNF_def) + +lemma validNF_valid: + "\ \ P \ f \ Q \! \ \ \ P \ f \ Q \" + by (erule validNFE) + +lemma validNF_no_fail: + "\ \ P \ f \ Q \! \ \ no_fail P f" + by (erule validNFE) + +lemma validNF_not_failed: + "\ \ P \ f \ Q \!; P s \ \ \ snd (f s)" + by (clarsimp simp: validNF_def no_fail_def) + +lemma use_validNF: + "\ (r', s') \ fst (f s); \ P \ f \ Q \!; P s \ \ Q r' s'" + by (fastforce simp: validNF_def valid_def) + + +subsection \@{const validNF} weakest precondition rules\ + +lemma validNF_return[wp]: + "\ P x \ return x \ P \!" + by (wp validNF)+ + +lemma validNF_get[wp]: + "\ \s. P s s \ get \ P \!" + by (wp validNF)+ + +lemma validNF_put[wp]: + "\ \s. P () x \ put x \ P \!" + by (wp validNF)+ + +lemma validNF_K_bind[wp]: + "\ P \ x \ Q \! \ \ P \ K_bind x f \ Q \!" + by simp + +lemma validNF_fail[wp]: + "\ \s. False \ fail \ Q \!" + by (clarsimp simp: validNF_def fail_def no_fail_def) + +lemma validNF_prop[wp_unsafe]: + "\ no_fail (\s. P) f \ \ \ \s. P \ f \ \rv s. P \!" + by (wp validNF)+ + +lemma validNF_post_conj[intro!]: + "\ \ P \ a \ Q \!; \ P \ a \ R \! \ \ \ P \ a \ Q and R \!" + by (auto simp: validNF_def) + +lemma validNF_pre_disj[intro!]: + "\ \ P \ a \ R \!; \ Q \ a \ R \! \ \ \ P or Q \ a \ R \!" + by (rule validNF) (auto dest: validNF_valid validNF_no_fail intro: no_fail_or) + +text \ + Set up combination rules for @{method wp}, which also requires a @{text wp_trip} rule for + @{const validNF}.\ +definition validNF_property :: "('a \ 's \ bool) \ 's \ ('s,'a) nondet_monad \ bool" where + "validNF_property Q s b \ \ snd (b s) \ (\(r', s') \ fst (b s). Q r' s')" + +lemma validNF_is_triple[wp_trip]: + "validNF P f Q = triple_judgement P f (validNF_property Q)" + by (auto simp: validNF_def triple_judgement_def validNF_property_def no_fail_def valid_def) + +lemma validNF_weaken_pre[wp_pre]: + "\\Q\ a \R\!; \s. P s \ Q s\ \ \P\ a \R\!" + by (metis hoare_pre_imp no_fail_pre validNF_def) + +lemma validNF_post_comb_imp_conj: + "\ \P'\ f \Q\!; \P\ f \Q'\!; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def) + +lemma validNF_post_comb_conj_L: + "\ \P'\ f \Q\!; \P\ f \Q'\ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_post_comb_conj_R: + "\ \P'\ f \Q\; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_post_comb_conj: + "\ \P'\ f \Q\!; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_if_split[wp_split]: + "\P \ \Q\ f \S\!; \ P \ \R\ g \S\!\ \ + \\s. (P \ Q s) \ (\ P \ R s)\ if P then f else g \S\!" + by simp + +lemma validNF_vcg_conj_lift: + "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce intro!: validNF_post_conj[unfolded pred_conj_def] intro: validNF_weaken_pre) + +lemma validNF_vcg_disj_lift: + "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" + by (auto simp: validNF_def no_fail_def intro!: hoare_vcg_disj_lift) + +lemma validNF_vcg_all_lift[wp]: + "\ \x. \P x\ f \Q x\! \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\!" + by (auto simp: validNF_def no_fail_def intro!: hoare_vcg_all_lift) + +lemma validNF_bind_wp[wp_split]: + "\ \x. \B x\ g x \C\!; \A\ f \B\! \ \ \A\ do x \ f; g x od \C\!" + unfolding validNF_def + by (auto intro: bind_wp no_fail_bind[where P=Q and Q=Q for Q, simplified]) + +lemmas validNF_bind_wp_fwd = validNF_bind_wp[rotated] + + +subsection "validNF compound rules" + +lemma validNF_state_assert[wp]: + "\ \s. P () s \ G s \ state_assert G \ P \!" + by (rule validNF; wpsimp) + +lemma validNF_modify[wp]: + "\ \s. P () (f s) \ modify f \ P \!" + by (rule validNF; wpsimp) + +lemma validNF_gets[wp]: + "\\s. P (f s) s\ gets f \P\!" + by (rule validNF; wpsimp) + +lemma validNF_condition[wp]: + "\ \ Q \ A \P\!; \ R \ B \P\!\ \ \\s. if C s then Q s else R s\ condition C A B \P\!" + by (erule validNFE)+ + (rule validNF; wpsimp) + +lemma validNF_assert[wp]: + "\ (\s. P) and (R ()) \ assert P \ R \!" + by (rule validNF; wpsimp) + +lemma validNF_false_pre: + "\ \_. False \ P \ Q \!" + by (rule validNF; wpsimp) + +lemma validNF_chain: + "\\P'\ a \R'\!; \s. P s \ P' s; \r s. R' r s \ R r s\ \ \P\ a \R\!" + by (fastforce simp: validNF_def valid_def no_fail_def Ball_def) + +lemma validNF_case_prod[wp]: + "\\x y. \P x y\ B x y \Q\!\ \ \case v of (x, y) \ P x y\ case v of (x, y) \ B x y \Q\!" + by (metis prod.exhaust split_conv) + +lemma validE_NF_case_prod[wp]: + "\ \a b. \P a b\ f a b \Q\, \E\! \ \ + \case x of (a, b) \ P a b\ case x of (a, b) \ f a b \Q\, \E\!" + unfolding validE_NF_alt_def + by (erule validNF_case_prod) + +lemma no_fail_is_validNF_True: + "no_fail P s = (\ P \ s \ \_ _. True \!)" + by (clarsimp simp: no_fail_def validNF_def valid_def) + + +subsection \@{const validNF} reasoning in the exception monad\ + +lemma validE_NF[intro?]: (* FIXME lib: should be validE_NFI *) + "\ \ P \ f \ Q \,\ E \; no_fail P f \ \ \ P \ f \ Q \,\ E \!" + by (clarsimp simp: validE_NF_def) + +lemma validE_NFE: + "\ \ P \ f \ Q \,\ E \!; \ \ P \ f \ Q \,\ E \; no_fail P f \ \ R \ \ R" + by (clarsimp simp: validE_NF_def) + +lemma validE_NF_valid: + "\ \ P \ f \ Q \,\ E \! \ \ \ P \ f \ Q \,\ E \" + by (rule validE_NFE) + +lemma validE_NF_no_fail: + "\ \ P \ f \ Q \,\ E \! \ \ no_fail P f" + by (rule validE_NFE) + +lemma validE_NF_weaken_pre[wp_pre]: + "\\Q\ a \R\,\E\!; \s. P s \ Q s\ \ \P\ a \R\,\E\!" + by (simp add: validE_NF_alt_def validNF_weaken_pre) + +lemma validE_NF_post_comb_conj_L: + "\ \P\ f \Q\, \E\!; \P'\ f \Q'\, \\_ _. True\ \ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def + by (fastforce simp: validE_def validNF_def valid_def no_fail_def split: sum.splits) + +lemma validE_NF_post_comb_conj_R: + "\ \P\ f \Q\, \\_ _. True\; \P'\ f \Q'\, \E\! \ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def validE_def validNF_def valid_def no_fail_def + by (fastforce split: sum.splits) + +lemma validE_NF_post_comb_conj: + "\ \P\ f \Q\, \E\!; \P'\ f \Q'\, \E\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def validE_def validNF_def valid_def no_fail_def + by (fastforce split: sum.splits) + +lemma validE_NF_chain: + "\ \P'\ a \R'\,\E'\!; \s. P s \ P' s; \r' s'. R' r' s' \ R r' s'; + \r'' s''. E' r'' s'' \ E r'' s''\ \ + \\s. P s \ a \\r' s'. R r' s'\,\\r'' s''. E r'' s''\!" + by (fastforce simp: validE_NF_def validE_def2 no_fail_def Ball_def split: sum.splits) + +lemma validE_NF_bind_wp[wp]: + "\\x. \B x\ g x \C\, \E\!; \A\ f \B\, \E\!\ \ \A\ f >>=E (\x. g x) \C\, \E\!" + by (blast intro: validE_NF bindE_wp no_fail_pre no_fail_bindE validE_validE_R hoare_chainE + elim!: validE_NFE) + +lemma validNF_catch[wp]: + "\\x. \E x\ handler x \Q\!; \P\ f \Q\, \E\!\ \ \P\ f (\x. handler x) \Q\!" + unfolding validE_NF_alt_def catch_def lift_def throwError_def + by (clarsimp simp: validNF_return split: sum.splits elim!: validNF_bind_wp_fwd) + +lemma validNF_throwError[wp]: + "\E e\ throwError e \P\, \E\!" + by (unfold validE_NF_alt_def throwError_def o_def) wpsimp + +lemma validNF_returnOk[wp]: + "\P e\ returnOk e \P\, \E\!" + by (clarsimp simp: validE_NF_alt_def returnOk_def) wpsimp + +lemma validNF_whenE[wp]: + "(P \ \Q\ f \R\, \E\!) \ \if P then Q else R ()\ whenE P f \R\, \E\!" + unfolding whenE_def by wpsimp + +lemma validNF_nobindE[wp]: + "\ \B\ g \C\,\E\!; \A\ f \\r s. B s\,\E\! \ \ \A\ doE f; g odE \C\,\E\!" + by wpsimp + +text \ + Set up triple rules for @{term validE_NF} so that we can use @{method wp} combinator rules.\ +definition validE_NF_property :: + "('a \ 's \ bool) \ ('c \ 's \ bool) \ 's \ ('s, 'c+'a) nondet_monad \ bool" + where + "validE_NF_property Q E s b \ + \ snd (b s) \ (\(r', s') \ fst (b s). case r' of Inl x \ E x s' | Inr x \ Q x s')" + +lemma validE_NF_is_triple[wp_trip]: + "validE_NF P f Q E = triple_judgement P f (validE_NF_property Q E)" + by (fastforce simp: validE_NF_def validE_def2 no_fail_def triple_judgement_def + validE_NF_property_def + split: sum.splits) + +lemma validNF_cong: + "\ \s. P s = P' s; \s. P s \ m s = m' s; + \r' s' s. \ P s; (r', s') \ fst (m s) \ \ Q r' s' = Q' r' s' \ \ + (\P\ m \Q\!) = (\P'\ m' \Q'\!)" + by (fastforce simp: validNF_alt_def) + +lemma validE_NF_liftE[wp]: + "\P\ f \Q\! \ \P\ liftE f \Q\,\E\!" + by (wpsimp simp: validE_NF_alt_def liftE_def) + +lemma validE_NF_handleE'[wp]: + "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ + \P\ f (\x. handler x) \Q\,\E\!" + unfolding validE_NF_alt_def handleE'_def + apply (erule validNF_bind_wp_fwd) + apply (clarsimp split: sum.splits) + apply wpsimp + done + +lemma validE_NF_handleE[wp]: + "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ + \P\ f handler \Q\,\E\!" + unfolding handleE_def + by (metis validE_NF_handleE') + +lemma validE_NF_condition[wp]: + "\ \ Q \ A \P\,\ E \!; \ R \ B \P\,\ E \! \ \ + \\s. if C s then Q s else R s\ condition C A B \P\,\ E \!" + by (erule validE_NFE)+ (wpsimp wp: validE_NF) + +lemma hoare_assume_preNF: + "(\s. P s \ \P\ f \Q\!) \ \P\ f \Q\!" + by (simp add: validNF_alt_def) + +end \ No newline at end of file diff --git a/lib/Monads/nondet/Nondet_VCG.thy b/lib/Monads/nondet/Nondet_VCG.thy new file mode 100644 index 0000000000..b6ff25d648 --- /dev/null +++ b/lib/Monads/nondet/Nondet_VCG.thy @@ -0,0 +1,1453 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_VCG + imports + Nondet_Lemmas + WPSimp +begin + +section \Hoare Logic\ + +subsection \Validity\ + +text \ + This section defines a Hoare logic for partial correctness for + the nondeterministic state monad as well as the exception monad. + The logic talks only about the behaviour part of the monad and ignores + the failure flag. + + The logic is defined semantically. Rules work directly on the + validity predicate. + + In the nondeterministic state monad, validity is a triple of precondition, + monad, and postcondition. The precondition is a function from state to + bool (a state predicate), the postcondition is a function from return value + to state to bool. A triple is valid if for all states that satisfy the + precondition, all result values and result states that are returned by + the monad satisfy the postcondition. Note that if the computation returns + the empty set, the triple is trivially valid. This means @{term "assert P"} + does not require us to prove that @{term P} holds, but rather allows us + to assume @{term P}! Proving non-failure is done via a separate predicate and + calculus (see theory @{text Nondet_No_Fail}).\ +definition valid :: + "('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\") where + "\P\ f \Q\ \ \s. P s \ (\(r,s') \ fst (f s). Q r s')" + +text \ + We often reason about invariant predicates. The following provides shorthand syntax + that avoids repeating potentially long predicates.\ +abbreviation (input) invariant :: + "('s,'a) nondet_monad \ ('s \ bool) \ bool" + ("_ \_\" [59,0] 60) where + "invariant f P \ \P\ f \\_. P\" + +text \ + Validity for the exception monad is similar and build on the standard + validity above. Instead of one postcondition, we have two: one for + normal and one for exceptional results.\ +definition validE :: + "('s \ bool) \ ('s, 'a + 'b) nondet_monad \ ('b \ 's \ bool) \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /(\_\,/ \_\)") where + "\P\ f \Q\,\E\ \ \P\ f \ \v s. case v of Inr r \ Q r s | Inl e \ E e s \" + +lemma validE_def2: + "\P\ f \Q\,\E\ \ \s. P s \ (\(r,s') \ fst (f s). case r of Inr b \ Q b s' | Inl a \ E a s')" + by (unfold valid_def validE_def) + +text \ + The following two instantiations are convenient to separate reasoning for exceptional and + normal case.\ +(* Narrator: they are in fact not convenient, and are now considered a mistake that should have + been an abbreviation instead. *) +definition validE_R :: (* FIXME lib: this should be an abbreviation *) + "('s \ bool) \ ('s, 'e + 'a) nondet_monad \ ('a \ 's \ bool) \ bool" ("\_\/ _ /\_\, -") + where + "\P\ f \Q\,- \ \P\ f \Q\,\\_. \\" + +definition validE_E :: (* FIXME lib: this should be an abbreviation *) + "('s \ bool) \ ('s, 'e + 'a) nondet_monad \ ('e \ 's \ bool) \ bool" ("\_\/ _ /-, \_\") + where + "\P\ f -,\E\ \ \P\ f \\_. \\,\E\" + +(* These lemmas are useful to apply to rules to convert valid rules into a format suitable for wp. *) +lemma valid_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \" + by (auto simp add: valid_def split: prod.splits) + +lemma validE_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') + \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \" + by (auto simp add: validE_def valid_def split: prod.splits sum.splits) + + +section \Pre Lemmas\ + +lemma hoare_pre_imp: + "\ \s. P s \ Q s; \Q\ a \R\ \ \ \P\ a \R\" + by (fastforce simp: valid_def) + +lemmas hoare_weaken_pre = hoare_pre_imp[rotated] + +lemma hoare_weaken_preE: + "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" + by (fastforce simp: validE_def2) + +lemma hoare_weaken_preE_R: + "\ \P'\ f \Q\,-; \s. P s \ P' s \ \ \P\ f \Q\,-" + unfolding validE_R_def + by (rule hoare_weaken_preE) + +lemma hoare_weaken_preE_E: + "\ \P'\ f -,\Q\; \s. P s \ P' s \ \ \P\ f -,\Q\" + by (fastforce simp: validE_E_def validE_def valid_def) + +lemmas hoare_pre [wp_pre] = + hoare_weaken_pre + hoare_weaken_preE + hoare_weaken_preE_R + hoare_weaken_preE_E + + +subsection \Setting up the precondition case splitter.\ + +lemma wpc_helper_valid: + "\Q\ g \S\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validE: + "\Q\ f \R\,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,\E\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validE_R: + "\Q\ f \R\,- \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,-" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validR_R: + "\Q\ f -,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f -,\E\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +wpc_setup "\m. \P\ m \Q\" wpc_helper_valid +wpc_setup "\m. \P\ m \Q\,\E\" wpc_helper_validE +wpc_setup "\m. \P\ m \Q\,-" wpc_helper_validE_R +wpc_setup "\m. \P\ m -,\E\" wpc_helper_validR_R + + +subsection \Hoare Logic Rules\ + +lemma bind_wp[wp_split]: + "\\rv. \Q' rv\ g rv \Q\; \P\ f \Q'\\ \ \P\ f >>= (\rv. g rv) \Q\" + by (fastforce simp: valid_def bind_def' intro: image_eqI[rotated]) + +lemma bindE_wp[wp_split]: + "\\rv. \Q' rv\ g rv \Q\,\E\; \P\ f \Q'\,\E\\ \ \P\ f >>=E (\rv. g rv) \Q\,\E\" + by (fastforce simp: validE_def2 bindE_def bind_def throwError_def return_def lift_def + split: sum.splits) + +lemma bindE_R_wp: + "\\rv. \Q' rv\ g rv \Q\,-; \P\ f \Q'\,-\ \ \P\ f >>=E (\rv. g rv) \Q\,-" + apply (clarsimp simp: validE_R_def) + by (wp | assumption)+ + +lemma bindE_E_wp: + "\\rv. \Q' rv\ g rv -,\E\; \P\ f \Q'\,\E\\ \ \P\ f >>=E (\rv. g rv) -,\E\" + apply (clarsimp simp: validE_E_def) + by (wp | assumption)+ + +lemmas bind_wp_fwd = bind_wp[rotated] +lemmas bindE_wp_fwd = bindE_wp[rotated] + +lemma bind_wpE_R: + "\\rv. \Q' rv\ g rv \Q\,-; \P\ f \Q'\\ \ \P\ f >>= (\rv. g rv) \Q\,-" + apply (clarsimp simp: validE_R_def validE_def) + by (wp | assumption)+ + +lemma bind_wpE_E: + "\\rv. \Q' rv\ g rv -,\E\; \P\ f \Q'\\ \ \P\ f >>= (\rv. g rv) -,\E\" + apply (clarsimp simp: validE_E_def validE_def) + by (wp | assumption)+ + +lemma bind_wpE: + "\\rv. \Q' rv\ g rv \Q\,\E\; \P\ f \Q'\\ \ \P\ f >>= (\rv. g rv) \Q\,\E\" + apply (clarsimp simp: validE_def) + by (wp | assumption)+ + +lemma hoare_TrueI: + "\P\ f \\_. \\" + by (simp add: valid_def) + +lemma hoareE_TrueI: + "\P\ f \\_. \\, \\_. \\" + by (simp add: validE_def valid_def) + +lemma hoareE_R_TrueI: + "\P\ f \\_. \\, -" + by (auto simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoareE_E_TrueI: + "\P\ f -,\\_. \\" + by (auto simp: validE_E_def validE_def valid_def split: sum.splits) + +lemmas wp_post_taut = hoare_TrueI[where P=\] +lemmas wp_post_tautE = hoareE_TrueI[where P=\] +lemmas wp_post_tautE_R = hoareE_R_TrueI[where P=\] +lemmas wp_post_tautE_E = hoareE_E_TrueI[where P=\] +lemmas wp_post_tauts[intro] = wp_post_taut wp_post_tautE wp_post_tautE_R wp_post_tautE_E + +lemma hoare_post_conj[intro]: + "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \Q and R\" + by (fastforce simp: valid_def) + +lemma hoare_pre_disj[intro]: + "\ \P\ f \R\; \Q\ f \R\ \ \ \P or Q\ f \R\" + by (simp add:valid_def pred_disj_def) + +lemma hoare_conj: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \P and P'\ f \Q and Q'\" + unfolding valid_def by auto + +lemma hoare_pre_cont[simp]: + "\\\ f \P\" + by (simp add:valid_def) + +lemma hoare_FalseE[simp]: + "\\\ f \Q\, \E\" + by (simp add: valid_def validE_def) + +\ \FIXME: remove?\ +lemma hoare_return_drop_var[iff]: + "\Q\ return x \\r. Q\" + by (simp add: valid_def return_def) + +\ \FIXME: remove?\ +lemma hoare_gets[intro]: + "\ \s. P s \ Q (f s) s \ \ \P\ gets f \Q\" + by (simp add:valid_def gets_def get_def bind_def return_def) + +\ \FIXME: remove?\ +lemma hoare_modifyE_var: + "\ \s. P s \ Q (f s) \ \ \P\ modify f \\_ s. Q s\" + by(simp add: valid_def modify_def put_def get_def bind_def) + +\ \FIXME: remove?\ +lemma hoare_if: + "\ P \ \Q\ a \R\; \ P \ \Q\ b \R\ \ \ \Q\ if P then a else b \R\" + by (simp add: valid_def) + +\ \FIXME: remove?\ +lemma hoare_pre_subst: + "\ A = B; \A\ a \C\ \ \ \B\ a \C\" + by (erule subst) + +\ \FIXME: remove?\ +lemma hoare_post_subst: + "\ B = C; \A\ a \B\ \ \ \A\ a \C\" + by (erule subst) + +\ \FIXME: change R to Q and Q to Q'\ +lemma hoare_post_imp: + "\ \rv s. Q rv s \ R rv s; \P\ a \Q\ \ \ \P\ a \R\" + by(fastforce simp:valid_def split_def) + +lemma hoare_post_impE: + "\ \rv s. Q rv s \ R rv s; \e s. E e s \ F e s; \P\ a \Q\,\E\ \ \ \P\ a \R\,\F\" + by(fastforce simp: validE_def2 split: sum.splits) + +lemmas hoare_strengthen_post = hoare_post_imp[rotated] +lemmas hoare_strengthen_postE = hoare_post_impE[rotated 2] + +lemma hoare_strengthen_postE_R: + "\ \P\ f \Q'\,-; \rv s. Q' rv s \ Q rv s \ \ \P\ f \Q\,-" + unfolding validE_R_def + by (erule hoare_post_impE) + +lemma hoare_strengthen_postE_E: + "\ \P\ f -,\Q'\; \rv s. Q' rv s \ Q rv s \ \ \P\ f -,\Q\" + unfolding validE_E_def + by (rule hoare_post_impE) + +lemma hoare_validE_cases: + "\ \P\ f \Q\, \\_ _. True\; \P\ f \\_ _. True\, \R\ \ \ \P\ f \Q\, \R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. R\, \\_. R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. R\, \\_. \\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2E: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. \\, \\_. R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2_actual: + "\P\ a \\_. R\ \ \P\ a \\_. R\, \\_. \\" + by (rule hoare_post_imp_dc2) + +lemma hoare_post_imp_dc2E_actual: + "\P\ a \\_. R\ \ \P\ a \\_. \\, \\_. R\" + by (rule hoare_post_imp_dc2E) + +lemma hoare_conjD1: + "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. Q rv\" + unfolding valid_def by auto + +lemma hoare_conjD2: + "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. R rv\" + unfolding valid_def by auto + +lemma hoare_post_disjI1: + "\P\ f \\rv. Q rv\ \ \P\ f \\rv. Q rv or R rv\" + unfolding valid_def by auto + +lemma hoare_post_disjI2: + "\P\ f \\rv. R rv\ \ \P\ f \\rv. Q rv or R rv\" + unfolding valid_def by auto + +lemma use_valid: + "\(r, s') \ fst (f s); \P\ f \Q\; P s \ \ Q r s'" + unfolding valid_def by blast + +lemmas post_by_hoare = use_valid[rotated] + +lemma use_valid_inv: + assumes step: "(r, s') \ fst (f s)" + assumes pres: "\N. \\s. N (P s) \ E s\ f \\rv s. N (P s)\" + shows "E s \ P s = P s'" + using use_valid[where f=f, OF step pres[where N="\p. p = P s"]] by simp + +lemma use_validE_norm: + "\ (Inr r', s') \ fst (B s); \ P \ B \ Q \,\ E \; P s \ \ Q r' s'" + unfolding validE_def valid_def by force + +lemma use_validE_except: + "\ (Inl r', s') \ fst (B s); \ P \ B \ Q \,\ E \; P s \ \ E r' s'" + unfolding validE_def valid_def by force + +lemma in_inv_by_hoareD: + "\ \P. f \P\; (x,s') \ fst (f s) \ \ s' = s" + by (auto simp add: valid_def) blast + + +subsection \Misc\ + +lemma hoare_gen_asm: + "(P \ \P'\ f \Q\) \ \P' and K P\ f \Q\" + by (fastforce simp add: valid_def) + +lemmas hoare_gen_asm_single = hoare_gen_asm[where P'="\", simplified pred_conj_def simp_thms] + +lemma hoare_gen_asm_lk: + "(P \ \P'\ f \Q\) \ \K P and P'\ f \Q\" + by (fastforce simp add: valid_def) + +\ \Useful for forward reasoning, when P is known. + The first version allows weakening the precondition.\ +lemma hoare_gen_asm_spec': + "\ \s. P s \ S \ R s; S \ \R\ f \Q\ \ \ \P\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_gen_asm_spec: + "\ \s. P s \ S; S \ \P\ f \Q\ \ \ \P\ f \Q\" + by (rule hoare_gen_asm_spec'[where S=S and R=P]) simp + +lemma hoare_conjI: + "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s\" + unfolding valid_def by blast + +lemma hoare_disjI1: + "\ \P\ f \Q\ \ \ \P\ f \\rv s. Q rv s \ R rv s \" + unfolding valid_def by blast + +lemma hoare_disjI2: + "\ \P\ f \R\ \ \ \P\ f \\rv s. Q rv s \ R rv s \" + unfolding valid_def by blast + +lemma hoare_assume_pre: + "(\s. P s \ \P\ f \Q\) \ \P\ f \Q\" + by (auto simp: valid_def) + +lemma hoare_assume_preE: + "(\s. P s \ \P\ f \Q\,\R\) \ \P\ f \Q\,\R\" + by (auto simp: valid_def validE_def) + +lemma hoare_allI: + "(\x. \P\f\Q x\) \ \P\f\\rv s. \x. Q x rv s\" + by (simp add: valid_def) blast + +lemma validE_allI: + "(\x. \P\ f \\r s. Q x r s\,\E\) \ \P\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: valid_def validE_def split: sum.splits) + +lemma hoare_exI: + "\P\ f \Q x\ \ \P\ f \\rv s. \x. Q x rv s\" + by (simp add: valid_def) blast + +lemma hoare_impI: + "(R \ \P\ f \Q\) \ \P\ f \\rv s. R \ Q rv s\" + by (simp add: valid_def) blast + +lemma validE_impI: + "\\E. \P\ f \\_ _. True\,\E\; (P' \ \P\ f \Q\,\E\)\ \ + \P\ f \\rv s. P' \ Q rv s\, \E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_case_option_wp: + "\ \P\ f None \Q\; \x. \P' x\ f (Some x) \Q' x\ \ + \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\" + by (cases v) auto + +lemma hoare_case_option_wp2: + "\ \P\ f None \Q\; \x. \P' x\ f (Some x) \Q' x\ \ + \ \case_option P P' v\ f v \\rv s. case v of None \ Q rv s | Some x \ Q' x rv s\" + by (cases v) auto + +(* Might be useful for forward reasoning, when P is known. *) +lemma hoare_when_cases: + "\\s. \\B; P s\ \ Q s; B \ \P\ f \\_. Q\\ \ \P\ when B f \\_. Q\" + by (cases B; simp add: valid_def return_def) + +lemma hoare_vcg_prop: + "\\s. P\ f \\rv s. P\" + by (simp add: valid_def) + + +subsection \@{const valid} and @{const validE}, @{const validE_R}, @{const validE_E}\ + +lemma valid_validE: + "\P\ f \\_. Q\ \ \P\ f \\_. Q\, \\_. Q\" + by (rule hoare_post_imp_dc) + +lemma valid_validE2: + "\ \P\ f \\_. Q'\; \s. Q' s \ Q s; \s. Q' s \ E s \ \ \P\ f \\_. Q\, \\_. E\" + unfolding valid_def validE_def + by (clarsimp split: sum.splits) blast + +lemma validE_valid: + "\P\ f \\_. Q\, \\_. Q\ \ \P\ f \\_. Q\" + unfolding validE_def + by fastforce + +lemma valid_validE_R: + "\P\ f \\_. Q\ \ \P\ f \\_. Q\,-" + by (simp add: validE_R_def hoare_strengthen_postE[OF valid_validE]) + +lemma valid_validE_E: + "\P\ f \\_. Q\ \ \P\ f -,\\_. Q\" + by (simp add: validE_E_def hoare_strengthen_postE[OF valid_validE]) + +lemma validE_validE_R: + "\P\ f \Q\,\\\\ \ \P\ f \Q\,-" + by (simp add: validE_R_def) + +lemma validE_R_validE: + "\P\ f \Q\,- \ \P\ f \Q\,\\\\" + by (simp add: validE_R_def) + +lemma validE_validE_E: + "\P\ f \\\\, \E\ \ \P\ f -, \E\" + by (simp add: validE_E_def) + +lemma validE_E_validE: + "\P\ f -, \E\ \ \P\ f \\\\, \E\" + by (simp add: validE_E_def) + +lemma validE_eq_valid: + "\P\ f \\rv. Q\,\\rv. Q\ = \P\ f \\rv. Q\" + by (simp add: validE_def) + + +subsection \@{const liftM}\ + +lemma hoare_liftM_subst: + "\P\ liftM f m \Q\ = \P\ m \Q \ f\" + unfolding liftM_def bind_def return_def split_def + by (fastforce simp: valid_def Ball_def) + +lemma hoare_liftME_subst: + "\P\ liftME f m \Q\, \E\ = \P\ m \Q \ f\, \E\" + unfolding validE_def liftME_liftM hoare_liftM_subst o_def + by (fastforce intro!: arg_cong[where f="valid P m"] split: sum.splits) + +lemma liftE_validE[simp]: + "\P\ liftE f \Q\, \E\ = \P\ f \Q\" + by (simp add: liftE_liftM validE_def hoare_liftM_subst o_def) + + +subsection \Operator lifting/splitting\ + +lemma hoare_vcg_if_split: + "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" + by simp + +lemma hoare_vcg_if_splitE: + "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ + \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" + by simp + +lemma hoare_vcg_split_case_option: + "\ \x. x = None \ \P x\ f x \R x\; \x y. x = Some y \ \Q x y\ g x y \R x\ \ + \ \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_optionE: + "\ \x. x = None \ \P x\ f x \R x\,\E x\; \x y. x = Some y \ \Q x y\ g x y \R x\,\E x\ \ + \ \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\, \E x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_sum: + "\ \x a. x = Inl a \ \P x a\ f x a \R x\; \x b. x = Inr b \ \Q x b\ g x b \R x\ \ + \ \\s. (\a. x = Inl a \ P x a s) \ (\b. x = Inr b \ Q x b s)\ + case x of Inl a \ f x a | Inr b \ g x b + \R x\" + by (cases x; simp) + +lemma bind_wp_nobind: + "\ \Q'\ g \Q\; \P\ f \\_. Q'\ \ \ \P\ do f; g od \Q\" + by (erule bind_wp_fwd) (clarsimp simp: valid_def) + +lemma bindE_wp_nobind: + "\ \Q'\ g \Q\, \E\; \P\ f \\_. Q'\, \E\ \ \ \P\ doE f; g odE \Q\, \E\" + by (erule bindE_wp_fwd) (clarsimp simp: validE_def) + +lemmas bind_wp_skip = bind_wp[where Q=Q and Q'=Q for Q] + +lemma hoare_chain: + "\ \P\ f \Q\; \s. R s \ P s; \rv s. Q rv s \ S rv s \ \ \R\ f \S\" + by (wp_pre, rule hoare_post_imp) + +lemma hoare_chainE: + "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \rv s. Q' rv s \ Q rv s; \rv s. E' rv s \ E rv s \ + \ \P\ A \Q\,\E\" + by wp_pre (rule hoare_post_impE) + +lemma hoare_vcg_conj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +\ \A variant which works nicely with subgoals that do not contain schematics\ +lemmas hoare_vcg_conj_lift_pre_fix = hoare_vcg_conj_lift[where P=R and P'=R for R, simplified] + +lemma hoare_vcg_conj_liftE1: + "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ \P and P'\ f \\rv s. Q rv s \ Q' rv s\,\E\" + unfolding valid_def validE_R_def validE_def + by (fastforce simp: split_def split: sum.splits) + +lemma hoare_vcg_conj_liftE2: + "\ \P\ f -,\E\; \P'\ f \Q\,\E'\ \ \ \P and P'\ f \Q\,\\rv s. E rv s \ E' rv s\" + unfolding valid_def validE_E_def validE_def + by (fastforce simp: split_def split: sum.splits) + +lemma hoare_vcg_conj_liftE_weaker: + assumes "\P\ f \Q\, \E\" + assumes "\P'\ f \Q'\, \E\" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\" + apply (rule hoare_pre) + apply (fastforce intro: assms hoare_vcg_conj_liftE1 validE_validE_R hoare_post_impE) + apply simp + done + +lemma hoare_vcg_disj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +lemma hoare_vcg_disj_lift_R: + assumes x: "\P\ f \Q\,-" + assumes y: "\P'\ f \Q'\,-" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + using assms + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_lift: + "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_const_Ball_liftE: + "\ \x. x \ S \ \P x\ f \Q x\,\E\; \\s. True\ f \\r s. True\, \E\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_lift_R: + "\ \x. x \ S \ \P x\ f \Q x\,- \ \ \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_strengthen_post) + (fastforce intro!: hoare_vcg_const_Ball_lift split: sum.splits)+ + +lemma hoare_vcg_const_Ball_lift_E_E: + "(\x. x \ S \ \P x\ f -,\Q x\) \ \\s. \x \ S. P x s\ f -,\\rv s. \x \ S. Q x rv s\" + unfolding validE_E_def validE_def valid_def + by (fastforce split: sum.splits) + +lemma hoare_vcg_all_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_all_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_all_liftE_R: + "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" + by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) + +lemma hoare_vcg_all_liftE_E: + "(\x. \P x\ f -, \Q x\) \ \\s. \x. P x s\ f -,\\rv s. \x. Q x rv s\" + by (rule hoare_vcg_const_Ball_lift_E_E[where S=UNIV, simplified]) + +lemma hoare_vcg_imp_lift: + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (simp only: imp_conv_disj) (rule hoare_vcg_disj_lift) + +lemma hoare_vcg_imp_lift': + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (wpsimp wp: hoare_vcg_imp_lift) + +lemma hoare_vcg_imp_liftE: + "\ \P'\ f \\rv s. \ P rv s\, \E\; \Q'\ f \Q\, \E\ \ + \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_imp_liftE': + "\ \P'\ f \\rv s. \ P rv s\, \E\; \Q'\ f \Q\, \E\ \ + \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R: + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R': + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. \P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_liftE_E: + "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ + \\s. P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" + by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_liftE_E': + "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ + \\s. \ P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" + by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_conj_lift[wp_comb]: + "\ \P\ f \\rv s. Q rv s \ Q' rv s\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ + \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" + by (auto simp: valid_def) + +lemmas hoare_vcg_imp_conj_lift'[wp_unsafe] = hoare_vcg_imp_conj_lift[where Q'''="\\", simplified] + +lemma hoare_absorb_imp: + "\ P \ f \\rv s. Q rv s \ R rv s\ \ \ P \ f \\rv s. Q rv s \ R rv s\" + by (erule hoare_post_imp[rotated], blast) + +lemma hoare_weaken_imp: + "\ \rv s. Q rv s \ Q' rv s ; \P\ f \\rv s. Q' rv s \ R rv s\ \ + \ \P\ f \\rv s. Q rv s \ R rv s\" + by (clarsimp simp: valid_def split_def) + +lemma hoare_vcg_const_imp_lift: + "\ P \ \Q\ m \R\ \ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\" + by (cases P, simp_all add: hoare_vcg_prop) + +lemma hoare_vcg_const_imp_lift_E: + "(P \ \Q\ f -, \R\) \ \\s. P \ Q s\ f -, \\rv s. P \ R rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_vcg_const_imp_lift_R: + "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_weak_lift_imp: + "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" + by (auto simp add: valid_def split_def) + +lemma hoare_weak_lift_impE: + "\Q\ m \R\,\E\ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,\\rv s. P \ E rv s\" + by (cases P; simp add: validE_def hoare_vcg_prop) + +lemma hoare_weak_lift_imp_R: + "\Q\ m \R\,- \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (cases P; wpsimp wp: wp_post_tautE_R) + +lemma hoare_vcg_ex_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (clarsimp simp: valid_def, blast) + +lemma hoare_vcg_ex_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_ex_liftE_E: + "\ \x. \P x\ f -,\E x\ \ \ \\s. \x. P x s\ f -,\\rv s. \x. E x rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_ex_lift_R1: + "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" + by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) + +lemma hoare_liftP_ext: + assumes "\P x. m \\s. P (f s x)\" + shows "m \\s. P (f s)\" + unfolding valid_def + apply clarsimp + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (drule use_valid, rule assms, rule refl) + apply simp + done + +(* for instantiations *) +lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . +lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . +lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . +lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . + +lemma hoare_vcg_E_conj: + "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" + unfolding validE_def validE_E_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_vcg_E_elim: + "\ \P\ f -,\E\; \P'\ f \Q\,- \ \ \\s. P s \ P' s\ f \Q\,\E\" + by (rule hoare_strengthen_postE[OF hoare_vcg_E_conj]) (simp add: validE_R_def)+ + +lemma hoare_vcg_R_conj: + "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_lift_Pf_E_R: + "\ \x. \P x\ m \\_. P x\, -; \P. \\s. P (f s)\ m \\_ s. P (f s)\, - \ \ + \\s. P (f s) s\ m \\_ s. P (f s) s\, -" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_lift_Pf_E_E: + "\ \x. \P x\ m -, \\_. P x\; \P. \\s. P (f s)\ m -, \\_ s. P (f s)\ \ \ + \\s. P (f s) s\ m -, \\_ s. P (f s) s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_post_comb_imp_conj: + "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" + by (wpsimp wp: hoare_vcg_conj_lift) + +lemma hoare_vcg_if_lift: + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv s. if P then X rv s else Y rv s\" + + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv. if P then X rv else Y rv\" + by (auto simp: valid_def split_def) + +lemma hoare_vcg_split_lift[wp]: + "\P\ f x y \Q\ \ \P\ case (x, y) of (a, b) \ f a b \Q\" + by simp + +named_theorems hoare_vcg_op_lift +lemmas [hoare_vcg_op_lift] = + hoare_vcg_const_imp_lift + hoare_vcg_const_imp_lift_E + hoare_vcg_const_imp_lift_R + (* leaving out hoare_vcg_conj_lift*, because that is built into wp *) + hoare_vcg_disj_lift + hoare_vcg_disj_lift_R + hoare_vcg_ex_lift + hoare_vcg_ex_liftE + hoare_vcg_ex_liftE_E + hoare_vcg_all_lift + hoare_vcg_all_liftE + hoare_vcg_all_liftE_E + hoare_vcg_all_liftE_R + hoare_vcg_const_Ball_lift + hoare_vcg_const_Ball_liftE + hoare_vcg_const_Ball_lift_R + hoare_vcg_const_Ball_lift_E_E + hoare_vcg_split_lift + hoare_vcg_if_lift + hoare_vcg_imp_lift' + hoare_vcg_imp_liftE' + hoare_vcg_imp_lift_R' + hoare_vcg_imp_liftE_E' + + +subsection \Weakest Precondition Rules\ + +lemma fail_wp: + "\\\ fail \Q\" + by (simp add: valid_def fail_def) + +lemma return_wp: + "\P x\ return x \P\" + by(simp add: valid_def return_def) + +lemma get_wp: + "\\s. P s s\ get \P\" + by (simp add: valid_def get_def) + +lemma gets_wp: + "\\s. P (f s) s\ gets f \P\" + by(simp add: valid_def split_def gets_def return_def get_def bind_def) + +lemma put_wp: + "\\_. Q () s\ put s \Q\" + by (simp add: put_def valid_def) + +lemma modify_wp: + "\\s. Q () (f s)\ modify f \Q\" + unfolding modify_def + by (wp put_wp get_wp) + +lemma failE_wp: + "\\\ fail \Q\, \E\" + by (simp add: validE_def fail_wp) + +lemma returnOk_wp: + "\P x\ returnOk x \P\,\E\" + by (simp add: validE_def2 returnOk_def return_def) + +lemma throwError_wp: + "\E e\ throwError e \P\,\E\" + by(simp add: validE_def2 throwError_def return_def) + +lemma returnOKE_R_wp: + "\P x\ returnOk x \P\, -" + by (simp add: validE_R_def validE_def valid_def returnOk_def return_def) + +lemma liftE_wp: + "\P\ f \Q\ \ \P\ liftE f \Q\,\E\" + by simp + +lemma catch_wp: + "\ \x. \E x\ handler x \Q\; \P\ f \Q\,\E\ \ \ \P\ catch f handler \Q\" + unfolding catch_def valid_def validE_def return_def + by (fastforce simp: bind_def split: sum.splits) + +lemma handleE'_wp: + "\ \x. \F x\ handler x \Q\,\E\; \P\ f \Q\,\F\ \ \ \P\ f handler \Q\,\E\" + unfolding handleE'_def valid_def validE_def return_def + by (fastforce simp: bind_def split: sum.splits) + +lemma handleE_wp: + assumes x: "\x. \F x\ handler x \Q\,\E\" + assumes y: "\P\ f \Q\,\F\" + shows "\P\ f handler \Q\,\E\" + by (simp add: handleE_def handleE'_wp [OF x y]) + +lemma liftM_wp: + "\P\ m \Q \ f\ \ \P\ liftM f m \Q\" + by (simp add: hoare_liftM_subst) + +lemma liftME_wp: + "\P\ m \Q \ f\,\E\ \ \P\ liftME f m \Q\,\E\" + by (simp add: hoare_liftME_subst) + +lemma assert_wp: + "\\s. P \ Q () s\ assert P \Q\" + unfolding assert_def + by (wpsimp wp: return_wp fail_wp | rule conjI)+ + +lemma list_cases_wp: + assumes a: "\P_A\ a \Q\" + assumes b: "\x xs. ts = x#xs \ \P_B x xs\ b x xs \Q\" + shows "\case_list P_A P_B ts\ case ts of [] \ a | x # xs \ b x xs \Q\" + by (cases ts, auto simp: a b) + +lemma hoare_vcg_handle_elseE: + "\ \P\ f \Q\,\E\; \e. \E e\ g e \R\,\F\; \x. \Q x\ h x \R\,\F\ \ \ + \P\ f g h \R\,\F\" + unfolding handle_elseE_def validE_def + by (wpsimp wp: bind_wp_fwd | assumption | rule conjI)+ + +lemma alternative_wp: + assumes x: "\P\ f \Q\" + assumes y: "\P'\ f' \Q\" + shows "\P and P'\ f \ f' \Q\" + unfolding valid_def alternative_def + using post_by_hoare[OF x] post_by_hoare[OF y] + by fastforce + +lemma alternativeE_wp: + assumes "\P\ f \Q\,\E\" + assumes "\P'\ f' \Q\,\E\" + shows "\P and P'\ f \ f' \Q\,\E\" + unfolding validE_def + by (wpsimp wp: assms alternative_wp | fold validE_def)+ + +lemma alternativeE_R_wp: + "\ \P\ f \Q\,-; \P'\ f' \Q\,- \ \ \P and P'\ f \ f' \Q\,-" + unfolding validE_R_def + by (rule alternativeE_wp) + +lemma alternativeE_E_wp: + "\ \P\ f -,\Q\; \P'\ g -,\Q\ \ \ \P and P'\ f \ g -, \Q\" + unfolding validE_E_def + by (rule alternativeE_wp) + +lemma select_wp: + "\\s. \x \ S. Q x s\ select S \Q\" + by (simp add: select_def valid_def) + +lemma select_f_wp: + "\\s. \x\fst S. Q x s\ select_f S \Q\" + by (simp add: select_f_def valid_def) + +lemma state_select_wp: + "\\s. \t. (s, t) \ f \ P () t\ state_select f \P\" + unfolding state_select_def2 + by (wpsimp wp: put_wp select_wp return_wp get_wp assert_wp) + +lemma condition_wp: + "\ \Q\ A \P\; \R\ B \P\ \ \ \\s. if C s then Q s else R s\ condition C A B \P\" + by (clarsimp simp: condition_def valid_def) + +lemma conditionE_wp: + "\ \P\ A \Q\,\R\; \P'\ B \Q\,\R\ \ \ \\s. if C s then P s else P' s\ condition C A B \Q\,\R\" + by (clarsimp simp: condition_def validE_def valid_def) + +lemma state_assert_wp: + "\\s. f s \ P () s\ state_assert f \P\" + unfolding state_assert_def + by (wp bind_wp_fwd get_wp assert_wp) + +lemma when_wp[wp_split]: + "\ P \ \Q\ f \R\ \ \ \if P then Q else R ()\ when P f \R\" + by (clarsimp simp: when_def valid_def return_def) + +lemma unless_wp[wp_split]: + "(\P \ \Q\ f \R\) \ \if P then R () else Q\ unless P f \R\" + unfolding unless_def by wp auto + +lemma whenE_wp: + "(P \ \Q\ f \R\, \E\) \ \if P then Q else R ()\ whenE P f \R\, \E\" + unfolding whenE_def by clarsimp (wp returnOk_wp) + +lemma unlessE_wp: + "(\ P \ \Q\ f \R\, \E\) \ \if P then R () else Q\ unlessE P f \R\, \E\" + unfolding unlessE_def + by (wpsimp wp: returnOk_wp) + +lemma maybeM_wp: + "(\x. y = Some x \ \P x\ m x \Q\) \ + \\s. (\x. y = Some x \ P x s) \ (y = None \ Q () s)\ maybeM m y \Q\" + unfolding maybeM_def by (wpsimp wp: return_wp) auto + +lemma notM_wp: + "\P\ m \\c. Q (\ c)\ \ \P\ notM m \Q\" + unfolding notM_def by (wpsimp wp: return_wp) + +lemma ifM_wp: + assumes [wp]: "\Q\ f \S\" "\R\ g \S\" + assumes [wp]: "\A\ P \\c s. c \ Q s\" "\B\ P \\c s. \c \ R s\" + shows "\A and B\ ifM P f g \S\" + unfolding ifM_def + by (wpsimp wp: hoare_vcg_if_split hoare_vcg_conj_lift) + +lemma andM_wp: + assumes [wp]: "\Q'\ B \Q\" + assumes [wp]: "\P\ A \\c s. c \ Q' s\" "\P'\ A \\c s. \ c \ Q False s\" + shows "\P and P'\ andM A B \Q\" + unfolding andM_def by (wp ifM_wp return_wp) + +lemma orM_wp: + assumes [wp]: "\Q'\ B \Q\" + assumes [wp]: "\P\ A \\c s. c \ Q True s\" "\P'\ A \\c s. \ c \ Q' s\" + shows "\P and P'\ orM A B \Q\" + unfolding orM_def by (wp ifM_wp return_wp) + +lemma whenM_wp: + assumes [wp]: "\Q\ f \S\" + assumes [wp]: "\A\ P \\c s. c \ Q s\" "\B\ P \\c s. \c \ S () s\" + shows "\A and B\ whenM P f \S\" + unfolding whenM_def by (wp ifM_wp return_wp) + +lemma hoare_K_bind[wp_split]: + "\P\ f \Q\ \ \P\ K_bind f x \Q\" + by simp + +lemma validE_K_bind[wp_split]: + "\ P \ x \ Q \, \ E \ \ \ P \ K_bind x f \ Q \, \ E \" + by simp + +lemma hoare_fun_app_wp: + "\P\ f' x \Q'\ \ \P\ f' $ x \Q'\" + "\P\ f x \Q\,\E\ \ \P\ f $ x \Q\,\E\" + "\P\ f x \Q\,- \ \P\ f $ x \Q\,-" + "\P\ f x -,\E\ \ \P\ f $ x -,\E\" + by simp+ + +lemma liftE_validE_E: + "\\\ liftE f -, \Q\" + by (clarsimp simp: validE_E_def valid_def) + +lemma returnOk_E: + "\\\ returnOk r -, \Q\" + by (simp add: validE_E_def) (wp returnOk_wp) + +lemma case_option_wp: + "\ \x. \P x\ m x \Q\; \P'\ m' \Q\ \ \ + \\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ case_option m' m x \Q\" + by (cases x; simp) + +lemma case_option_wpE: + "\ \x. \P x\ m x \Q\,\E\; \P'\ m' \Q\,\E\ \ \ + \\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ case_option m' m x \Q\,\E\" + by (cases x; simp) + +lemmas liftME_E_E_wp[wp_split] = validE_validE_E [OF liftME_wp, simplified, OF validE_E_validE] + +lemma assert_opt_wp: + "\\s. x \ None \ Q (the x) s\ assert_opt x \Q\" + unfolding assert_opt_def + by (cases x; wpsimp wp: fail_wp return_wp) + +lemma gets_the_wp: + "\\s. (f s \ None) \ Q (the (f s)) s\ gets_the f \Q\" + unfolding gets_the_def + by (wp bind_wp_fwd gets_wp assert_opt_wp) + +lemma gets_the_wp': (* FIXME: should prefer this one in [wp] *) + "\\s. \rv. f s = Some rv \ Q rv s\ gets_the f \Q\" + unfolding gets_the_def + by (wpsimp wp: bind_wp_fwd gets_wp assert_opt_wp) + +lemma gets_map_wp: + "\\s. f s p \ None \ Q (the (f s p)) s\ gets_map f p \Q\" + unfolding gets_map_def + by (wpsimp wp: bind_wp_fwd gets_wp assert_opt_wp) + +lemma gets_map_wp': + "\\s. \rv. f s p = Some rv \ Q rv s\ gets_map f p \Q\" + unfolding gets_map_def + by (wpsimp wp: bind_wp_fwd gets_wp assert_opt_wp) + +(* FIXME: make wp *) +lemma whenE_throwError_wp: + "\\s. \Q \ P s\ whenE Q (throwError e) \\_. P\, -" + by (simp add: whenE_def returnOk_def throwError_def return_def validE_R_def validE_def valid_def) + +lemma select_throwError_wp: + "\\s. \x\S. Q x s\ select S >>= throwError -, \Q\" + by (simp add: bind_def throwError_def return_def select_def validE_E_def validE_def valid_def) + + +subsection \Setting up the @{method wp} method\ + +lemma valid_is_triple: + "valid P f Q = triple_judgement P f (postcondition Q (\s f. fst (f s)))" + by (simp add: triple_judgement_def valid_def postcondition_def) + +lemma validE_is_triple: + "validE P f Q E = + triple_judgement P f + (postconditions (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ fst (f s)})) + (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ fst (f s)})))" + by (fastforce simp: validE_def triple_judgement_def valid_def postcondition_def postconditions_def + split: sum.split) + +lemma validE_R_is_triple: + "validE_R P f Q = + triple_judgement P f (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ fst (f s)}))" + by (simp add: validE_R_def validE_is_triple postconditions_def postcondition_def) + +lemma validE_E_is_triple: + "validE_E P f E = + triple_judgement P f (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ fst (f s)}))" + by (simp add: validE_E_def validE_is_triple postconditions_def postcondition_def) + +lemmas hoare_wp_combs = hoare_vcg_conj_lift + +lemmas hoare_wp_combsE = + validE_validE_R + hoare_vcg_R_conj + hoare_vcg_E_elim + hoare_vcg_E_conj + +lemmas hoare_wp_state_combsE = + valid_validE_R + hoare_vcg_R_conj[OF valid_validE_R] + hoare_vcg_E_elim[OF valid_validE_E] + hoare_vcg_E_conj[OF valid_validE_E] + +lemmas hoare_classic_wp_combs = hoare_post_comb_imp_conj hoare_weaken_pre hoare_wp_combs +lemmas hoare_classic_wp_combsE = hoare_weaken_preE hoare_weaken_preE_R hoare_wp_combsE + +lemmas hoare_classic_wp_state_combsE = + hoare_weaken_preE[OF valid_validE] + hoare_weaken_preE_R[OF valid_validE_R] + hoare_wp_state_combsE + +lemmas all_classic_wp_combs = + hoare_classic_wp_state_combsE + hoare_classic_wp_combsE + hoare_classic_wp_combs + +lemmas hoare_wp_splits[wp_split] = + handleE'_wp handleE_wp + validE_validE_R [OF handleE'_wp [OF validE_R_validE]] + validE_validE_R [OF handleE_wp [OF validE_R_validE]] + catch_wp hoare_vcg_if_split hoare_vcg_if_splitE + validE_validE_R [OF hoare_vcg_if_splitE [OF validE_R_validE validE_R_validE]] + liftM_wp liftME_wp + validE_validE_R [OF liftME_wp [OF validE_R_validE]] + validE_valid + +lemmas [wp_comb] = hoare_wp_state_combsE hoare_wp_combsE hoare_wp_combs + +(* Add these rules to wp first to control when they are applied. We want them used last, only when + no other more specific wp rules apply. + bind_wp, bindE_wp and their variants are wp rules instead of wp_split rules because + they should be used before other wp_split rules, and in combination with wp_comb rules when + necessary. + hoare_vcg_prop is unsafe in certain circumstances but still useful to have applied automatically, + so we make it the very last rule to be tried. *) +lemmas [wp] = + hoare_vcg_prop bind_wp + bindE_R_wp bindE_E_wp bindE_wp + bind_wpE_R bind_wpE_E bind_wpE + +(* rules towards the bottom will be matched first *) +lemmas [wp] = wp_post_tauts + hoare_fun_app_wp + returnOk_E + liftE_validE_E + put_wp + get_wp + gets_wp + modify_wp + return_wp + returnOk_wp + throwError_wp + fail_wp + failE_wp + assert_wp + state_assert_wp + assert_opt_wp + gets_the_wp + gets_map_wp' + liftE_wp + alternative_wp + alternativeE_R_wp + alternativeE_E_wp + alternativeE_wp + select_wp + select_f_wp + state_select_wp + condition_wp + conditionE_wp + maybeM_wp notM_wp ifM_wp andM_wp orM_wp whenM_wp + +lemmas [wp_trip] = valid_is_triple validE_is_triple validE_E_is_triple validE_R_is_triple + +lemmas validE_E_combs[wp_comb] = + hoare_vcg_E_conj[where Q'="\\", folded validE_E_def] + valid_validE_E + hoare_vcg_E_conj[where Q'="\\", folded validE_E_def, OF valid_validE_E] + + +subsection \Simplifications on conjunction\ + +lemma hoare_post_eq: + "\ Q = Q'; \P\ f \Q'\ \ \ \P\ f \Q\" + by simp + +lemma hoare_post_eqE1: + "\ Q = Q'; \P\ f \Q'\,\E\ \ \ \P\ f \Q\,\E\" + by simp + +lemma hoare_post_eqE2: + "\ E = E'; \P\ f \Q\,\E'\ \ \ \P\ f \Q\,\E\" + by simp + +lemma hoare_post_eqE_R: + "\ Q = Q'; \P\ f \Q'\,- \ \ \P\ f \Q\,-" + by simp + +lemma pred_conj_apply_elim: + "(\rv. Q rv and Q' rv) = (\rv s. Q rv s \ Q' rv s)" + by (simp add: pred_conj_def) + +lemma pred_conj_conj_elim: + "(\rv s. (Q rv and Q' rv) s \ Q'' rv s) = (\rv s. Q rv s \ Q' rv s \ Q'' rv s)" + by simp + +lemma conj_assoc_apply: + "(\rv s. (Q rv s \ Q' rv s) \ Q'' rv s) = (\rv s. Q rv s \ Q' rv s \ Q'' rv s)" + by simp + +lemma all_elim: + "(\rv s. \x. P rv s) = P" + by simp + +lemma all_conj_elim: + "(\rv s. (\x. P rv s) \ Q rv s) = (\rv s. P rv s \ Q rv s)" + by simp + +lemmas vcg_rhs_simps = + pred_conj_apply_elim pred_conj_conj_elim conj_assoc_apply all_elim all_conj_elim + +lemma if_apply_reduct: + "\P\ If P' (f x) (g x) \Q\ \ \P\ If P' f g x \Q\" + by (cases P'; simp) + +lemma if_apply_reductE: + "\P\ If P' (f x) (g x) \Q\,\E\ \ \P\ If P' f g x \Q\,\E\" + by (cases P'; simp) + +lemma if_apply_reductE_R: + "\P\ If P' (f x) (g x) \Q\,- \ \P\ If P' f g x \Q\,-" + by (cases P'; simp) + +lemmas hoare_wp_simps[wp_split] = + vcg_rhs_simps[THEN hoare_post_eq] vcg_rhs_simps[THEN hoare_post_eqE1] + vcg_rhs_simps[THEN hoare_post_eqE2] vcg_rhs_simps[THEN hoare_post_eqE_R] + if_apply_reduct if_apply_reductE if_apply_reductE_R TrueI + +schematic_goal if_apply_test: + "\?Q\ (if A then returnOk else K fail) x \P\,\E\" + by wpsimp + +lemma hoare_elim_pred_conj: + "\P\ f \\rv s. Q rv s \ Q' rv s\ \ \P\ f \\rv. Q rv and Q' rv\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE1: + "\P\ f \\rv s. Q rv s \ Q' rv s\,\E\ \ \P\ f \\rv. Q rv and Q' rv\,\E\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE2: + "\P\ f \Q\, \\rv s. E rv s \ E' rv s\ \ \P\ f \Q\,\\rv. E rv and E' rv\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE_R: + "\P\ f \\rv s. Q rv s \ Q' rv s\,- \ \P\ f \\rv. Q rv and Q' rv\,-" + by (unfold pred_conj_def) + +lemmas hoare_wp_pred_conj_elims = + hoare_elim_pred_conj hoare_elim_pred_conjE1 + hoare_elim_pred_conjE2 hoare_elim_pred_conjE_R + + +subsection \Bundles\ + +bundle no_pre = hoare_pre [wp_pre del] + +bundle classic_wp_pre = hoare_pre [wp_pre del] + all_classic_wp_combs[wp_comb del] all_classic_wp_combs[wp_comb] + + +text \Miscellaneous lemmas on hoare triples\ + +lemma hoare_pre_cases: + "\ \\s. R s \ P s\ f \Q\; \\s. \R s \ P' s\ f \Q\ \ \ \P and P'\ f \Q\" + unfolding valid_def by fastforce + +lemma hoare_vcg_mp: + "\ \P\ f \Q\; \P\ f \\r s. Q r s \ Q' r s\ \ \ \P\ f \Q'\" + by (auto simp: valid_def split_def) + +(* note about this precond stuff: rules get a chance to bind directly + before any of their combined forms. As a result, these precondition + implication rules are only used when needed. *) +lemma hoare_add_post: + "\ \P'\ f \Q'\; \s. P s \ P' s; \P\ f \\rv s. Q' rv s \ Q rv s\ \ \ \P\ f \Q\" + unfolding valid_def + by fastforce + +lemma hoare_gen_asmE: + "(P \ \P'\ f \Q\,-) \ \P' and K P\ f \Q\, -" + by (simp add: validE_R_def validE_def valid_def) blast + +lemma hoare_list_case: + "\ \P1\ f f1 \Q\; \y ys. xs = y#ys \ \P2 y ys\ f (f2 y ys) \Q\ \ \ + \case xs of [] \ P1 | y#ys \ P2 y ys\ f (case xs of [] \ f1 | y#ys \ f2 y ys) \Q\" + by (cases xs; simp) + +lemmas whenE_wps[wp_split] = + whenE_wp whenE_wp[THEN validE_validE_R] whenE_wp[THEN validE_validE_E] + +lemmas unlessE_wps[wp_split] = + unlessE_wp unlessE_wp[THEN validE_validE_R] unlessE_wp[THEN validE_validE_E] + +lemma hoare_use_eq: + assumes "\P. \\s. P (f s)\ m \\_ s. P (f s)\" + assumes "\f. \\s. P f s\ m \\_ s. Q f s\" + shows "\\s. P (f s) s\ m \\_ s. Q (f s) s \" + apply (rule hoare_post_imp[where Q="\_ s. \y. y = f s \ Q y s"], simp) + apply (wpsimp wp: hoare_vcg_ex_lift assms) + done + +lemma hoare_fail_any[simp]: + "\P\ fail \Q\" + by wp + +lemma hoare_failE[simp]: + "\P\ fail \Q\, \E\" + by wp + +lemma hoare_validE_pred_conj: + "\ \P\ f \Q\, \E\; \P\ f \R\, \E\ \ \ \P\ f \Q and R\, \E\" + unfolding valid_def validE_def + by (simp add: split_def split: sum.splits) + +lemma hoare_validE_conj: + "\ \P\ f \Q\, \E\; \P\ f \R\, \E\ \ \ \P\ f \\rv s. Q rv s \ R rv s\, \E\" + unfolding valid_def validE_def + by (simp add: split_def split: sum.splits) + +lemmas hoare_valid_validE = valid_validE (* FIXME lib: eliminate one *) + +declare validE_validE_E[wp_comb] + +lemmas if_validE_E[wp_split] = + validE_validE_E[OF hoare_vcg_if_splitE[OF validE_E_validE validE_E_validE]] + +lemma hoare_drop_imp: + "\P\ f \Q\ \ \P\ f \\rv s. R rv s \ Q rv s\" + by (auto simp: valid_def) + +lemma hoare_drop_impE: + "\\P\ f \\r. Q\, \E\\ \ \P\ f \\rv s. R rv s \ Q s\, \E\" + by (simp add: hoare_chainE) + +lemma hoare_drop_impE_R: + "\P\ f \Q\,- \ \P\ f \\rv s. R rv s \ Q rv s\, -" + by (auto simp: validE_R_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_drop_impE_E: + "\P\ f -,\Q\ \ \P\ f -, \\rv s. R rv s \ Q rv s\" + by (auto simp: validE_E_def validE_def valid_def split_def split: sum.splits) + +lemmas hoare_drop_imps = hoare_drop_imp hoare_drop_impE_R hoare_drop_impE_E + +(*This is unsafe, but can be very useful when supplied as a comb rule.*) +lemma hoare_drop_imp_conj[wp_unsafe]: + "\ \P\ f \Q'\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ + \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" + by (auto simp: valid_def) + +lemmas hoare_drop_imp_conj'[wp_unsafe] = hoare_drop_imp_conj[where Q'''="\\", simplified] + +lemma hoare_vcg_set_pred_lift: + assumes "\P x. m \ \s. P (f x s) \" + shows "m \ \s. P {x. f x s} \" + using assms[where P="\x . x"] assms[where P=Not] use_valid + by (fastforce simp: valid_def elim!: subst[rotated, where P=P]) + +lemma hoare_vcg_set_pred_lift_mono: + assumes f: "\x. m \ f x \" + assumes mono: "\A B. A \ B \ P A \ P B" + shows "m \ \s. P {x. f x s} \" + by (fastforce simp: valid_def elim!: mono[rotated] dest: use_valid[OF _ f]) + +text \If a function contains an @{term assert}, or equivalent, then it might be + possible to strengthen the precondition of an already-proven hoare triple + @{text pos}, by additionally proving a side condition @{text neg}, that + violating some condition causes failure. The stronger hoare triple produced + by this theorem allows the precondition to assume that the condition is + satisfied.\ +lemma hoare_strengthen_pre_via_assert_forward: + assumes pos: "\ P \ f \ Q \" + assumes rel: "\s. S s \ P s \ N s" + assumes neg: "\ N \ f \ \\ \" + shows "\ S \ f \ Q \" + apply (rule hoare_weaken_pre) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_disj_lift[OF pos neg]) + by (auto simp: rel) + +text \Like @{thm hoare_strengthen_pre_via_assert_forward}, strengthen a precondition + by proving a side condition that the negation of that condition would cause + failure. This version is intended for backward reasoning. Apply it to a goal to + obtain a stronger precondition after proving the side condition.\ +lemma hoare_strengthen_pre_via_assert_backward: + assumes neg: "\ Not \ E \ f \ \\ \" + assumes pos: "\ P and E \ f \ Q \" + shows "\ P \ f \ Q \" + by (rule hoare_strengthen_pre_via_assert_forward[OF pos _ neg], simp) + + +subsection \Strongest postcondition rules\ + +lemma get_sp: + "\P\ get \\rv s. s = rv \ P s\" + by(simp add:get_def valid_def) + +lemma put_sp: + "\\\ put a \\_ s. s = a\" + by(simp add:put_def valid_def) + +lemma return_sp: + "\P\ return a \\rv s. rv = a \ P s\" + by(simp add:return_def valid_def) + +lemma hoare_return_sp: (* FIXME lib: eliminate *) + "\P\ return x \\rv. P and K (rv = x)\" + by (simp add: valid_def return_def) + +lemma assert_sp: + "\P\ assert Q \\_ s. P s \ Q\" + by (simp add: assert_def fail_def return_def valid_def) + +lemma hoare_gets_sp: + "\P\ gets f \\rv s. rv = f s \ P s\" + by (simp add: valid_def simpler_gets_def) + +lemma hoare_returnOk_sp: + "\P\ returnOk x \\rv s. rv = x \ P s\, \Q\" + by (simp add: valid_def validE_def returnOk_def return_def) + +\ \For forward reasoning in Hoare proofs, these lemmas allow us to step over the + left-hand-side of monadic bind, while keeping the same precondition.\ + +named_theorems forward_inv_step_rules + +lemmas hoare_forward_inv_step_nobind[forward_inv_step_rules] = + bind_wp_nobind[where Q'=P and P=P for P, rotated] + +lemmas bind_wp_fwd_skip[forward_inv_step_rules] = + bind_wp_fwd[where Q'="\_. P" and P=P for P] + +lemmas hoare_forward_inv_step_nobindE_valid[forward_inv_step_rules] = + bindE_wp_nobind[where Q'=P and P=P and E="\_. Q" and Q="\_. Q" for P Q, + simplified validE_eq_valid, rotated] + +lemmas hoare_forward_inv_step_valid[forward_inv_step_rules] = + bindE_wp_fwd[where Q'="\_. P" and P=P and E="\_. Q" and Q="\_. Q" for P Q, + simplified validE_eq_valid] + +lemmas hoare_forward_inv_step_nobindE[forward_inv_step_rules] = + bindE_wp_nobind[where Q'=P and P=P for P, rotated] + +lemmas bindE_wp_fwd_skip[forward_inv_step_rules] = + bindE_wp_fwd[where Q'="\_. P" and P=P for P] + +lemmas hoare_forward_inv_step_nobindE_validE_E[forward_inv_step_rules] = + hoare_forward_inv_step_nobindE[where Q="\\", simplified validE_E_def[symmetric]] + +lemmas hoare_forward_inv_step_validE_E[forward_inv_step_rules] = + bindE_wp_fwd_skip[where Q="\\", simplified validE_E_def[symmetric]] + +lemmas hoare_forward_inv_step_nobindE_validE_R[forward_inv_step_rules] = + hoare_forward_inv_step_nobindE[where E="\\", simplified validE_R_def[symmetric]] + +lemmas hoare_forward_inv_step_validE_R[forward_inv_step_rules] = + bindE_wp_fwd_skip[where E="\\", simplified validE_R_def[symmetric]] + +method forward_inv_step uses wp simp = + rule forward_inv_step_rules, solves \wpsimp wp: wp simp: simp\ + +end diff --git a/lib/Monad_WP/WhileLoopRules.thy b/lib/Monads/nondet/Nondet_While_Loop_Rules.thy similarity index 59% rename from lib/Monad_WP/WhileLoopRules.thy rename to lib/Monads/nondet/Nondet_While_Loop_Rules.thy index abd2b8d528..8cd774ab7d 100644 --- a/lib/Monad_WP/WhileLoopRules.thy +++ b/lib/Monads/nondet/Nondet_While_Loop_Rules.thy @@ -4,28 +4,32 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory WhileLoopRules -imports "NonDetMonadVCG" +theory Nondet_While_Loop_Rules + imports + Nondet_Empty_Fail + Nondet_Total + Nondet_Sat begin section "Well-ordered measures" -(* A version of "measure" that takes any wellorder, instead of - * being fixed to "nat". *) -definition measure' :: "('a \ 'b::wellorder) => ('a \ 'a) set" -where "measure' = (\f. {(a, b). f a < f b})" +(* A version of "measure" that takes any wellorder, instead of being fixed to "nat". *) +definition measure' :: "('a \ 'b::wellorder) => ('a \ 'a) set" where + "measure' = (\f. {(a, b). f a < f b})" lemma in_measure'[simp, code_unfold]: - "((x,y) : measure' f) = (f x < f y)" + "((x,y) \ measure' f) = (f x < f y)" by (simp add:measure'_def) -lemma wf_measure' [iff]: "wf (measure' f)" +lemma wf_measure'[iff]: + "wf (measure' f)" apply (clarsimp simp: measure'_def) apply (insert wf_inv_image [OF wellorder_class.wf, where f=f]) apply (clarsimp simp: inv_image_def) done -lemma wf_wellorder_measure: "wf {(a, b). (M a :: 'a :: wellorder) < M b}" +lemma wf_wellorder_measure: + "wf {(a, b). (M a :: 'a :: wellorder) < M b}" apply (subgoal_tac "wf (inv_image ({(a, b). a < b}) M)") apply (clarsimp simp: inv_image_def) apply (rule wf_inv_image) @@ -41,27 +45,32 @@ text \ @{const whileLoop} terms with information that can be used by automated tools. \ -definition - "whileLoop_inv (C :: 'a \ 'b \ bool) B x (I :: 'a \ 'b \ bool) (R :: (('a \ 'b) \ ('a \ 'b)) set) \ whileLoop C B x" - -definition - "whileLoopE_inv (C :: 'a \ 'b \ bool) B x (I :: 'a \ 'b \ bool) (R :: (('a \ 'b) \ ('a \ 'b)) set) \ whileLoopE C B x" - -lemma whileLoop_add_inv: "whileLoop B C = (\x. whileLoop_inv B C x I (measure' M))" +definition whileLoop_inv :: + "('a \ 'b \ bool) \ ('a \ ('b, 'a) nondet_monad) \ 'a \ ('a \ 'b \ bool) \ + (('a \ 'b) \ 'a \ 'b) set \ ('b, 'a) nondet_monad" + where + "whileLoop_inv C B x I R \ whileLoop C B x" + +definition whileLoopE_inv :: + "('a \ 'b \ bool) \ ('a \ ('b, 'c + 'a) nondet_monad) \ 'a \ ('a \ 'b \ bool) \ + (('a \ 'b) \ 'a \ 'b) set \ ('b, 'c + 'a) nondet_monad" + where + "whileLoopE_inv C B x I R \ whileLoopE C B x" + +lemma whileLoop_add_inv: + "whileLoop B C = (\x. whileLoop_inv B C x I (measure' M))" by (clarsimp simp: whileLoop_inv_def) -lemma whileLoopE_add_inv: "whileLoopE B C = (\x. whileLoopE_inv B C x I (measure' M))" +lemma whileLoopE_add_inv: + "whileLoopE B C = (\x. whileLoopE_inv B C x I (measure' M))" by (clarsimp simp: whileLoopE_inv_def) subsection "Simple base rules" lemma whileLoop_terminates_unfold: - "\ whileLoop_terminates C B r s; (r', s') \ fst (B r s); C r s \ - \ whileLoop_terminates C B r' s'" - apply (erule whileLoop_terminates.cases) - apply simp - apply force - done + "\ whileLoop_terminates C B r s; (r', s') \ fst (B r s); C r s \ \ + whileLoop_terminates C B r' s'" + by (force elim!: whileLoop_terminates.cases) lemma snd_whileLoop_first_step: "\ \ snd (whileLoop C B r s); C r s \ \ \ snd (B r s)" apply (subst (asm) whileLoop_unroll) @@ -77,21 +86,16 @@ lemma snd_whileLoopE_first_step: "\ \ snd (whileLoopE C B r s); C r done lemma snd_whileLoop_unfold: - "\ \ snd (whileLoop C B r s); C r s; (r', s') \ fst (B r s) \ \ \ snd (whileLoop C B r' s')" + "\ \ snd (whileLoop C B r s); C r s; (r', s') \ fst (B r s) \ \ \ snd (whileLoop C B r' s')" apply (clarsimp simp: whileLoop_def) - apply (auto simp: elim: whileLoop_results.cases whileLoop_terminates.cases - intro: whileLoop_results.intros whileLoop_terminates.intros) + apply (auto elim: whileLoop_results.cases whileLoop_terminates.cases + intro: whileLoop_results.intros whileLoop_terminates.intros) done lemma snd_whileLoopE_unfold: - "\ \ snd (whileLoopE C B r s); (Inr r', s') \ fst (B r s); C r s \ \ \ snd (whileLoopE C B r' s')" - apply (clarsimp simp: whileLoopE_def) - apply (drule snd_whileLoop_unfold) - apply clarsimp - apply (clarsimp simp: lift_def) - apply assumption - apply (clarsimp simp: lift_def) - done + "\ \ snd (whileLoopE C B r s); (Inr r', s') \ fst (B r s); C r s \ \ \ snd (whileLoopE C B r' s')" + unfolding whileLoopE_def + by (fastforce simp: lift_def dest: snd_whileLoop_unfold) lemma whileLoop_results_cong [cong]: assumes C: "\r s. C r s = C' r s" @@ -101,26 +105,17 @@ proof - { fix x y C B C' B' have "\ (x, y) \ whileLoop_results C B; - \(r :: 'r) (s :: 's). C r s = C' r s; - \r s. C' r s \ B r s = B' r s \ - \ (x, y) \ whileLoop_results C' B'" - apply (induct rule: whileLoop_results.induct) - apply clarsimp - apply clarsimp - apply (rule whileLoop_results.intros, auto)[1] - apply clarsimp - apply (rule whileLoop_results.intros, auto)[1] - done + \(r :: 'r) (s :: 's). C r s = C' r s; \r s. C' r s \ B r s = B' r s \ + \ (x, y) \ whileLoop_results C' B'" + by (induct rule: whileLoop_results.induct) (auto intro: whileLoop_results.intros) } thus ?thesis apply - - apply (rule set_eqI, rule iffI) - apply (clarsimp split: prod.splits) - apply (clarsimp simp: C B split: prod.splits) - apply (clarsimp split: prod.splits) - apply (clarsimp simp: C [symmetric] B [symmetric] split: prod.splits) - done + apply (rule set_eqI, rule iffI; clarsimp split: prod.splits) + apply (clarsimp simp: C B) + apply (clarsimp simp: C [symmetric] B [symmetric]) + done qed lemma whileLoop_terminates_cong [cong]: @@ -152,42 +147,30 @@ next done qed -lemma whileLoop_cong [cong]: +lemma whileLoop_cong[cong]: "\ \r s. C r s = C' r s; \r s. C r s \ B r s = B' r s \ \ whileLoop C B = whileLoop C' B'" - apply (rule ext, clarsimp simp: whileLoop_def) - done + by (clarsimp simp: whileLoop_def) -lemma whileLoopE_cong [cong]: - "\ \r s. C r s = C' r s ; \r s. C r s \ B r s = B' r s \ - \ whileLoopE C B = whileLoopE C' B'" - apply (clarsimp simp: whileLoopE_def) - apply (rule whileLoop_cong [THEN arg_cong]) - apply (clarsimp split: sum.splits) - apply (clarsimp split: sum.splits) - apply (clarsimp simp: lift_def throwError_def split: sum.splits) - done +lemma whileLoopE_cong[cong]: + "\ \r s. C r s = C' r s ; \r s. C r s \ B r s = B' r s \ \ whileLoopE C B = whileLoopE C' B'" + unfolding whileLoopE_def + by (rule whileLoop_cong[THEN arg_cong]; clarsimp simp: lift_def throwError_def split: sum.splits) lemma whileLoop_terminates_wf: - "wf {(x, y). C (fst y) (snd y) \ x \ fst (B (fst y) (snd y)) \ whileLoop_terminates C B (fst y) (snd y)}" - apply (rule wfI [where A="UNIV" and B="{(r, s). whileLoop_terminates C B r s}"]) - apply clarsimp - apply clarsimp - apply (erule whileLoop_terminates.induct) - apply blast - apply blast + "wf {(x, y). C (fst y) (snd y) \ x \ fst (B (fst y) (snd y)) \ whileLoop_terminates C B (fst y) (snd y)}" + apply (rule wfI[where A="UNIV" and B="{(r, s). whileLoop_terminates C B r s}"]; clarsimp) + apply (erule whileLoop_terminates.induct; blast) done subsection "Basic induction helper lemmas" lemma whileLoop_results_induct_lemma1: - "\ (a, b) \ whileLoop_results C B; b = Some (x, y) \ \ \ C x y" - apply (induct rule: whileLoop_results.induct, auto) - done + "\ (a, b) \ whileLoop_results C B; b = Some (x, y) \ \ \ C x y" + by (induct rule: whileLoop_results.induct) auto lemma whileLoop_results_induct_lemma1': - "\ (a, b) \ whileLoop_results C B; a \ b \ \ \x. a = Some x \ C (fst x) (snd x)" - apply (induct rule: whileLoop_results.induct, auto) - done + "\ (a, b) \ whileLoop_results C B; a \ b \ \ \x. a = Some x \ C (fst x) (snd x)" + by (induct rule: whileLoop_results.induct) auto lemma whileLoop_results_induct_lemma2 [consumes 1]: "\ (a, b) \ whileLoop_results C B; @@ -206,7 +189,7 @@ lemma whileLoop_results_induct_lemma3 [consumes 1]: and inv_step: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ I r' s'" shows "I r' s'" apply (rule whileLoop_results_induct_lemma2 [where P="case_prod I" and y="(r', s')" and x="(r, s)", - simplified case_prod_unfold, simplified]) + simplified case_prod_unfold, simplified]) apply (rule result) apply simp apply simp @@ -215,16 +198,14 @@ lemma whileLoop_results_induct_lemma3 [consumes 1]: apply clarsimp done -subsection "Inductive reasoning about whileLoop results" +subsection "Inductive reasoning about @{const whileLoop} results" -lemma in_whileLoop_induct [consumes 1]: +lemma in_whileLoop_induct[consumes 1]: assumes in_whileLoop: "(r', s') \ fst (whileLoop C B r s)" and init_I: "\ r s. \ C r s \ I r s r s" - and step: - "\r s r' s' r'' s''. - \ C r s; (r', s') \ fst (B r s); - (r'', s'') \ fst (whileLoop C B r' s'); - I r' s' r'' s'' \ \ I r s r'' s''" + and step: "\r s r' s' r'' s''. \ C r s; (r', s') \ fst (B r s); + (r'', s'') \ fst (whileLoop C B r' s'); + I r' s' r'' s'' \ \ I r s r'' s''" shows "I r s r' s'" proof cases assume "C r s" @@ -236,13 +217,11 @@ proof cases by blast have "\ (a, b) \ whileLoop_results C B; \x. a = Some x; \x. b = Some x \ - \ I (fst (the a)) (snd (the a)) (fst (the b)) (snd (the b))" - apply (induct rule: whileLoop_results.induct) - apply (auto simp: init_I whileLoop_def intro: step) - done + \ I (fst (the a)) (snd (the a)) (fst (the b)) (snd (the b))" + by (induct rule: whileLoop_results.induct) + (auto simp: init_I whileLoop_def intro: step) - hence "(Some (r, s), Some (r', s')) \ whileLoop_results C B - \ I r s r' s'" + hence "(Some (r, s), Some (r', s')) \ whileLoop_results C B \ I r s r' s'" by (clarsimp simp: a_def b_def) } @@ -254,8 +233,7 @@ next hence "r' = r \ s' = s" using in_whileLoop - by (subst (asm) whileLoop_unroll, - clarsimp simp: condition_def return_def) + by (subst (asm) whileLoop_unroll, clarsimp simp: condition_def return_def) thus ?thesis by (metis init_I \\ C r s\) @@ -265,10 +243,8 @@ lemma snd_whileLoop_induct [consumes 1]: assumes induct: "snd (whileLoop C B r s)" and terminates: "\ whileLoop_terminates C B r s \ I r s" and init: "\ r s. \ snd (B r s); C r s \ \ I r s" - and step: "\r s r' s' r'' s''. - \ C r s; (r', s') \ fst (B r s); - snd (whileLoop C B r' s'); - I r' s' \ \ I r s" + and step: "\r s r' s' r'' s''. \ C r s; (r', s') \ fst (B r s); snd (whileLoop C B r' s'); + I r' s' \ \ I r s" shows "I r s" apply (insert init induct) apply atomize @@ -276,8 +252,7 @@ lemma snd_whileLoop_induct [consumes 1]: apply clarsimp apply (erule disjE) apply (erule rev_mp) - apply (induct "Some (r, s)" "None :: ('a \ 'b) option" - arbitrary: r s rule: whileLoop_results.induct) + apply (induct "Some (r, s)" "None :: ('a \ 'b) option" arbitrary: r s rule: whileLoop_results.induct) apply clarsimp apply clarsimp apply (erule (1) step) @@ -296,18 +271,14 @@ lemma whileLoop_terminatesE_induct [consumes 1]: apply (subgoal_tac "(\r s. case (Inr r) of Inl x \ True | Inr x \ I x s) r s") apply simp apply (induction rule: whileLoop_terminates.induct) - apply (case_tac r) - apply simp - apply clarsimp - apply (erule init) + apply (clarsimp split: sum.splits elim!: init) apply (clarsimp split: sum.splits) apply (rule step) apply simp - apply (clarsimp simp: lift_def split: sum.splits) - apply force + apply (force simp: lift_def split: sum.splits) done -subsection "Direct reasoning about whileLoop components" +subsection "Direct reasoning about @{const whileLoop} components" lemma fst_whileLoop_cond_false: assumes loop_result: "(r', s') \ fst (whileLoop C B r s)" @@ -315,23 +286,35 @@ lemma fst_whileLoop_cond_false: using loop_result by (rule in_whileLoop_induct, auto) +lemma whileLoop_terminates_results: + assumes non_term: "\r. \ \s. I r s \ C r s \ \ snd (B r s) \ B r \\ \r' s'. C r' s' \ I r' s' \" + shows + "\whileLoop_terminates C B r s; (Some (r, s), None) \ whileLoop_results C B; I r s; C r s\ + \ False" +proof (induct rule: whileLoop_terminates.induct) + case (1 r s) + then show ?case + apply clarsimp + done +next + case (2 r s) + then show ?case + apply (cut_tac non_term[where r=r]) + apply (clarsimp simp: exs_valid_def) + apply (subst (asm) (2) whileLoop_results.simps) + apply clarsimp + apply (insert whileLoop_results.simps) + apply fast + done +qed + lemma snd_whileLoop: assumes init_I: "I r s" - and cond_I: "C r s" - and non_term: "\r. \ \s. I r s \ C r s \ \ snd (B r s) \ - B r \\ \r' s'. C r' s' \ I r' s' \" - shows "snd (whileLoop C B r s)" + and cond_I: "C r s" + and non_term: "\r. \ \s. I r s \ C r s \ \ snd (B r s) \ B r \\ \r' s'. C r' s' \ I r' s' \" + shows "snd (whileLoop C B r s)" apply (clarsimp simp: whileLoop_def) - apply (rotate_tac) - apply (insert init_I cond_I) - apply (induct rule: whileLoop_terminates.induct) - apply clarsimp - apply (cut_tac r=r in non_term) - apply (clarsimp simp: exs_valid_def) - apply (subst (asm) (2) whileLoop_results.simps) - apply clarsimp - apply (insert whileLoop_results.simps) - apply fast + apply (erule (1) whileLoop_terminates_results[OF non_term _ _ init_I cond_I]) done lemma whileLoop_terminates_inv: @@ -355,16 +338,16 @@ lemma not_snd_whileLoop: assumes init_I: "I r s" and inv_holds: "\r s. \\s'. I r s' \ C r s' \ s' = s \ B r \ \r' s'. I r' s' \ ((r', s'), (r, s)) \ R \!" and wf_R: "wf R" - shows "\ snd (whileLoop C B r s)" + shows "\ snd (whileLoop C B r s)" proof - { fix x y have "\ (x, y) \ whileLoop_results C B; x = Some (r, s); y = None \ \ False" apply (insert init_I) apply (induct arbitrary: r s rule: whileLoop_results.inducts) - apply simp apply simp - apply (insert snd_validNF [OF inv_holds])[1] + apply simp + apply (insert validNF_not_failed[OF inv_holds])[1] apply blast apply (drule use_validNF [OF _ inv_holds]) apply simp @@ -373,20 +356,20 @@ proof - } also have "whileLoop_terminates C B r s" - apply (rule whileLoop_terminates_inv [where I=I, OF init_I _ wf_R]) - apply (insert inv_holds) - apply (clarsimp simp: validNF_def) - done + apply (rule whileLoop_terminates_inv [where I=I, OF init_I _ wf_R]) + apply (insert inv_holds) + apply (clarsimp simp: validNF_def) + done ultimately show ?thesis - by (clarsimp simp: whileLoop_def, blast) + by (clarsimp simp: whileLoop_def, blast) qed lemma valid_whileLoop: assumes first_step: "\s. P r s \ I r s" and inv_step: "\r. \ \s. I r s \ C r s \ B r \ I \" and final_step: "\r s. \ I r s; \ C r s \ \ Q r s" - shows "\ P r \ whileLoop C B r \ Q \" + shows "\ P r \ whileLoop C B r \ Q \" proof - { fix r' s' s @@ -394,7 +377,7 @@ proof - assume step: "(r', s') \ fst (whileLoop C B r s)" have "I r' s'" - using step inv + using step inv apply (induct rule: in_whileLoop_induct) apply simp apply (drule use_valid, rule inv_step, auto) @@ -415,9 +398,25 @@ lemma whileLoop_wp: \ I r \ whileLoop C B r \ Q \" by (rule valid_whileLoop) +lemma whileLoop_valid_inv: + "(\r. \ \s. I r s \ C r s \ B r \ I \) \ \ I r \ whileLoop C B r \ I \" + apply (fastforce intro: whileLoop_wp) + done + +lemma valid_whileLoop_cond_fail: + assumes pre_implies_post: "\s. P r s \ Q r s" + and pre_implies_fail: "\s. P r s \ \ C r s" + shows "\ P r \ whileLoop C B r \ Q \" + using assms + apply (clarsimp simp: valid_def) + apply (subst (asm) whileLoop_cond_fail) + apply blast + apply (clarsimp simp: return_def) + done + lemma whileLoop_wp_inv [wp]: "\ \r. \\s. I r s \ C r s\ B r \I\; \r s. \I r s; \ C r s\ \ Q r s \ - \ \ I r \ whileLoop_inv C B r I M \ Q \" + \ \ I r \ whileLoop_inv C B r I M \ Q \" apply (clarsimp simp: whileLoop_inv_def) apply (rule valid_whileLoop [where P=I and I=I], auto) done @@ -425,11 +424,11 @@ lemma whileLoop_wp_inv [wp]: lemma validE_whileLoopE: "\\s. P r s \ I r s; \r. \ \s. I r s \ C r s \ B r \ I \,\ A \; - \r s. \ I r s; \ C r s \ \ Q r s - \ \ \ P r \ whileLoopE C B r \ Q \,\ A \" + \r s. \ I r s; \ C r s \ \ Q r s\ \ + \ P r \ whileLoopE C B r \ Q \,\ A \" apply (clarsimp simp: whileLoopE_def validE_def) apply (rule valid_whileLoop [where I="\r s. (case r of Inl x \ A x s | Inr x \ I x s)" - and Q="\r s. (case r of Inl x \ A x s | Inr x \ Q x s)"]) + and Q="\r s. (case r of Inl x \ A x s | Inr x \ Q x s)"]) apply atomize apply (clarsimp simp: valid_def lift_def split: sum.splits) apply (clarsimp simp: valid_def lift_def split: sum.splits) @@ -443,41 +442,44 @@ lemma whileLoopE_wp: by (rule validE_whileLoopE) lemma exs_valid_whileLoop: - assumes init_T: "\s. P s \ T r s" - and iter_I: "\ r s0. - \ \s. T r s \ C r s \ s = s0 \ - B r \\\r' s'. T r' s' \ ((r', s'),(r, s0)) \ R\" + assumes init_T: "\s. P s \ T r s" + and iter_I: "\r s0. \\s. T r s \ C r s \ s = s0\ B r \\\r' s'. T r' s' \ ((r', s'),(r, s0)) \ R\" and wf_R: "wf R" and final_I: "\r s. \ T r s; \ C r s \ \ Q r s" - shows "\ P \ whileLoop C B r \\ Q \" -proof (clarsimp simp: exs_valid_def Bex_def) - fix s - assume "P s" - + shows "\ P \ whileLoop C B r \\ Q \" +proof - { - fix x - have "T (fst x) (snd x) \ - \r' s'. (r', s') \ fst (whileLoop C B (fst x) (snd x)) \ T r' s'" - using wf_R - apply induction - apply atomize - apply (case_tac "C (fst x) (snd x)") - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' split: prod.splits) - apply (cut_tac ?s0.0=b and r=a in iter_I) - apply (clarsimp simp: exs_valid_def) - apply blast - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' return_def) - done + fix s + assume "P s" + + { + fix x + have "T (fst x) (snd x) \ \r' s'. (r', s') \ fst (whileLoop C B (fst x) (snd x)) \ T r' s'" + using wf_R + proof induct + case (less x) + then show ?case + apply atomize + apply (cases "C (fst x) (snd x)") + apply (subst whileLoop_unroll) + apply (clarsimp simp: condition_def bind_def') + apply (cut_tac iter_I[where ?s0.0="snd x" and r="fst x"]) + apply (clarsimp simp: exs_valid_def) + apply blast + apply (subst whileLoop_unroll) + apply (cases x) + apply (clarsimp simp: condition_def bind_def' return_def) + done + qed + } + + then have "\r' s'. (r', s') \ fst (whileLoop C B r s) \ Q r' s'" + by (metis \P s\ fst_conv init_T snd_conv final_I fst_whileLoop_cond_false) } - - thus "\r' s'. (r', s') \ fst (whileLoop C B r s) \ Q r' s'" - by (metis \P s\ fst_conv init_T snd_conv - final_I fst_whileLoop_cond_false) + thus ?thesis by (clarsimp simp: exs_valid_def Bex_def) qed -lemma empty_fail_whileLoop: +lemma empty_fail_whileLoop[empty_fail_cond, intro!, wp]: assumes body_empty_fail: "\r. empty_fail (B r)" shows "empty_fail (whileLoop C B r)" proof - @@ -495,8 +497,7 @@ proof - apply fact apply (rule cond_true, fact) apply (clarsimp simp: exs_valid_def) - apply (case_tac "fst (B r s) = {}") - apply (metis empty_failD [OF body_empty_fail]) + apply (drule empty_failD3[OF body_empty_fail]) apply (subst (asm) whileLoop_unroll) apply (fastforce simp: condition_def bind_def split_def cond_true) done @@ -506,43 +507,70 @@ proof - by (clarsimp simp: empty_fail_def) qed -lemma empty_fail_whileLoopE: - assumes body_empty_fail: "\r. empty_fail (B r)" +lemma empty_fail_whileLoopE[empty_fail_cond, intro!, wp]: + assumes "\r. empty_fail (B r)" shows "empty_fail (whileLoopE C B r)" - apply (clarsimp simp: whileLoopE_def) - apply (rule empty_fail_whileLoop) - apply (insert body_empty_fail) - apply (clarsimp simp: empty_fail_def lift_def throwError_def return_def split: sum.splits) - done + by (clarsimp simp: whileLoopE_def assms) -lemma whileLoop_results_bisim: +lemma empty_fail_whileM[empty_fail_cond, intro!, wp]: + "\ empty_fail C; empty_fail B \ \ empty_fail (whileM C B)" + unfolding whileM_def + by (wpsimp wp: empty_fail_whileLoop empty_fail_bind) + +lemma whileLoop_results_bisim_helper: assumes base: "(a, b) \ whileLoop_results C B" - and vars1: "Q = (case a of Some (r, s) \ Some (rt r, st s) | _ \ None)" - and vars2: "R = (case b of Some (r, s) \ Some (rt r, st s) | _ \ None)" - and inv_init: "case a of Some (r, s) \ I r s | _ \ True" - and inv_step: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ I r' s'" - and cond_match: "\r s. I r s \ C r s = C' (rt r) (st s)" - and fail_step: "\r s. \C r s; snd (B r s); I r s\ - \ (Some (rt r, st s), None) \ whileLoop_results C' B'" - and refine: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ - \ (rt r', st s') \ fst (B' (rt r) (st s))" - shows "(Q, R) \ whileLoop_results C' B'" - apply (subst vars1) - apply (subst vars2) - apply (insert base inv_init) - apply (induct rule: whileLoop_results.induct) + and inv_init: "case a of Some (r, s) \ I r s | _ \ True" + and inv_step: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ I r' s'" + and cond_match: "\r s. I r s \ C r s = C' (rt r) (st s)" + and fail_step: "\r s. \C r s; snd (B r s); I r s\ + \ (Some (rt r, st s), None) \ whileLoop_results C' B'" + and refine: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ + \ (rt r', st s') \ fst (B' (rt r) (st s))" + defines [simp]: "Q x \ (case x of Some (r, s) \ Some (rt r, st s) | _ \ None)" + and [simp]: "R y\ (case y of Some (r, s) \ Some (rt r, st s) | _ \ None)" + shows "(Q a, R b) \ whileLoop_results C' B'" + using base inv_init +proof (induct rule: whileLoop_results.induct) + case (1 r s) + then show ?case apply clarsimp apply (subst (asm) cond_match) apply (clarsimp simp: option.splits) apply (clarsimp simp: option.splits) - apply (clarsimp simp: option.splits) - apply (metis fail_step) - apply (case_tac z) - apply (clarsimp simp: option.splits) - apply (metis cond_match inv_step refine whileLoop_results.intros(3)) - apply (clarsimp simp: option.splits) - apply (metis cond_match inv_step refine whileLoop_results.intros(3)) - done + done +next + case (2 r s) + then show ?case + apply (clarsimp simp: option.splits) + apply (metis fail_step) + done +next + case (3 r s r' s' z) + then show ?case + apply (cases z) + apply (clarsimp simp: option.splits) + apply (metis cond_match inv_step refine whileLoop_results.intros(3)) + apply (clarsimp simp: option.splits) + apply (metis cond_match inv_step refine whileLoop_results.intros(3)) + done +qed + +lemma whileLoop_results_bisim: + assumes base: "(a, b) \ whileLoop_results C B" + and vars1: "Q = (case a of Some (r, s) \ Some (rt r, st s) | _ \ None)" + and vars2: "R = (case b of Some (r, s) \ Some (rt r, st s) | _ \ None)" + and inv_init: "case a of Some (r, s) \ I r s | _ \ True" + and inv_step: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ I r' s'" + and cond_match: "\r s. I r s \ C r s = C' (rt r) (st s)" + and fail_step: "\r s. \C r s; snd (B r s); I r s\ + \ (Some (rt r, st s), None) \ whileLoop_results C' B'" + and refine: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ + \ (rt r', st s') \ fst (B' (rt r) (st s))" + shows "(Q, R) \ whileLoop_results C' B'" + apply (subst vars1, subst vars2) + apply (rule whileLoop_results_bisim_helper) + apply (rule assms; assumption?)+ + done lemma whileLoop_terminates_liftE: "whileLoop_terminatesE C (\r. liftE (B r)) r s = whileLoop_terminates C B r s" @@ -555,14 +583,18 @@ lemma whileLoop_terminates_liftE: apply (clarsimp simp: split_def) apply (rule whileLoop_terminates.intros(2)) apply clarsimp - apply (clarsimp simp: liftE_def in_bind return_def lift_def [abs_def] - bind_def lift_def throwError_def o_def split: sum.splits - cong: sum.case_cong) + apply (clarsimp simp: liftE_def in_bind return_def lift_def[abs_def] + bind_def lift_def throwError_def o_def + split: sum.splits + cong: sum.case_cong) apply (drule (1) bspec) apply clarsimp - apply (subgoal_tac "case (Inr r) of Inl _ \ True | Inr r \ - whileLoop_terminates (\r s. (\r s. case r of Inl _ \ False | Inr v \ C v s) (Inr r) s) - (\r. (lift (\r. liftE (B r)) (Inr r)) >>= (\x. return (theRight x))) r s") + apply (subgoal_tac + "case Inr r of + Inl _ \ True + | Inr r \ whileLoop_terminates + (\r s. (\r s. case r of Inl _ \ False | Inr v \ C v s) (Inr r) s) + (\r. lift (\r. liftE (B r)) (Inr r) >>= (\x. return (projr x))) r s") apply (clarsimp simp: liftE_def lift_def) apply (erule whileLoop_terminates.induct) apply (clarsimp simp: liftE_def lift_def split: sum.splits) @@ -573,10 +605,14 @@ lemma whileLoop_terminates_liftE: apply force done -lemma snd_X_return [simp]: - "\A X s. snd ((A >>= (\a. return (X a))) s) = snd (A s)" +lemma snd_X_return[simp]: + "snd ((A >>= (\a. return (X a))) s) = snd (A s)" by (clarsimp simp: return_def bind_def split_def) +lemma isr_Inr_projr: + "\ isl a \ (a = Inr b) = (b = projr a)" + by auto + lemma whileLoopE_liftE: "whileLoopE C (\r. liftE (B r)) r = liftE (whileLoop C B r)" apply (rule ext) @@ -584,26 +620,34 @@ lemma whileLoopE_liftE: apply (rule prod_eqI) apply (rule set_eqI, rule iffI) apply clarsimp - apply (clarsimp simp: in_bind whileLoop_def liftE_def) - apply (rule_tac x="b" in exI) - apply (rule_tac x="theRight a" in exI) + apply (clarsimp simp: in_liftE whileLoop_def) + \ \The schematic existential is instantiated by @{text "subst isr_Inr_projr ... rule refl"} in two lines\ + apply (rule exI) apply (rule conjI) - apply (erule whileLoop_results_bisim [where rt=theRight and st="\x. x" and I="\r s. case r of Inr x \ True | _ \ False"], - auto intro: whileLoop_results.intros simp: bind_def return_def lift_def split: sum.splits)[1] - apply (drule whileLoop_results_induct_lemma2 [where P="\(r, s). case r of Inr x \ True | _ \ False"] ) + apply (subst isr_Inr_projr) + prefer 2 + apply (rule refl) + apply (drule whileLoop_results_induct_lemma2[where P="\(r, s). \ isl r"]) + apply (rule refl) apply (rule refl) - apply (rule refl) - apply clarsimp - apply (clarsimp simp: return_def bind_def lift_def split: sum.splits) - apply (clarsimp simp: return_def bind_def lift_def split: sum.splits) - apply (clarsimp simp: in_bind whileLoop_def liftE_def) - apply (erule whileLoop_results_bisim [where rt=Inr and st="\x. x" and I="\r s. True"], - auto intro: whileLoop_results.intros intro!: bexI simp: bind_def return_def lift_def split: sum.splits)[1] + apply clarsimp + apply (clarsimp simp: return_def bind_def lift_def liftE_def split: sum.splits) + apply clarsimp + apply (erule whileLoop_results_bisim[where rt=projr + and st="\x. x" + and I="\r s. \ isl r"], + auto intro: whileLoop_results.intros simp: bind_def return_def lift_def liftE_def split: sum.splits)[1] + apply (clarsimp simp: in_liftE whileLoop_def) + apply (erule whileLoop_results_bisim[where rt=Inr and st="\x. x" and I="\r s. True"], + auto intro: whileLoop_results.intros intro!: bexI + simp: bind_def return_def lift_def liftE_def split: sum.splits)[1] apply (rule iffI) apply (clarsimp simp: whileLoop_def liftE_def del: notI) apply (erule disjE) - apply (erule whileLoop_results_bisim [where rt=theRight and st="\x. x" and I="\r s. case r of Inr x \ True | _ \ False"], - auto intro: whileLoop_results.intros simp: bind_def return_def lift_def split: sum.splits)[1] + apply (erule whileLoop_results_bisim[where rt=projr + and st="\x. x" + and I="\r s. \ isl r"], + auto intro: whileLoop_results.intros simp: bind_def return_def lift_def split: sum.splits)[1] apply (subst (asm) whileLoop_terminates_liftE [symmetric]) apply (fastforce simp: whileLoop_def liftE_def whileLoop_terminatesE_def) apply (clarsimp simp: whileLoop_def liftE_def del: notI) @@ -614,7 +658,8 @@ lemma whileLoopE_liftE: apply (clarsimp split: option.splits) apply (clarsimp split: option.splits) apply (clarsimp split: option.splits) - apply (auto intro: whileLoop_results.intros intro!: bexI simp: bind_def return_def lift_def split: sum.splits) + apply (auto intro: whileLoop_results.intros intro!: bexI simp: bind_def return_def lift_def + split: sum.splits) done lemma validNF_whileLoop: @@ -626,8 +671,7 @@ lemma validNF_whileLoop: apply rule apply (rule valid_whileLoop) apply fact - apply (insert inv, clarsimp simp: validNF_def - valid_def split: prod.splits, force)[1] + apply (insert inv, clarsimp simp: validNF_def valid_def split: prod.splits, force)[1] apply (metis post_cond) apply (unfold no_fail_def) apply (intro allI impI) @@ -653,7 +697,7 @@ lemma validNF_whileLoop_inv_measure [wp]: and post_cond: "\r s. \I r s; \ C r s\ \ Q r s" shows "\I r\ whileLoop_inv C B r I (measure' (\(r, s). M r s)) \Q\!" apply (clarsimp simp: whileLoop_inv_def) - apply (rule validNF_whileLoop [where R="measure' (\(r, s). M r s)" and I=I]) + apply (rule validNF_whileLoop[where R="measure' (\(r, s). M r s)" and I=I]) apply simp apply clarsimp apply (rule inv) @@ -686,30 +730,24 @@ lemma validNF_whileLoopE: and post_cond: "\r s. \I r s; \ C r s\ \ Q r s" shows "\ P r \ whileLoopE C B r \ Q \,\ E \!" apply (unfold validE_NF_alt_def whileLoopE_def) - apply (rule validNF_whileLoop [ - where I="\r s. case r of Inl x \ E x s | Inr x \ I x s" - and R="{((r', s'), (r, s)). \x x'. r' = Inl x' \ r = Inr x} - \ {((r', s'), (r, s)). \x x'. r' = Inr x' \ r = Inr x \ ((x', s'),(x, s)) \ R}"]) + apply (rule validNF_whileLoop [where + I="\r s. case r of Inl x \ E x s | Inr x \ I x s" and + R="{((r', s'), (r, s)). \x x'. r' = Inl x' \ r = Inr x} \ + {((r', s'), (r, s)). \x x'. r' = Inr x' \ r = Inr x \ ((x', s'),(x, s)) \ R}"]) apply (simp add: pre) apply (insert inv)[1] - apply (fastforce simp: lift_def validNF_def valid_def - validE_NF_def throwError_def no_fail_def return_def - validE_def split: sum.splits prod.splits) + apply (fastforce simp: lift_def validNF_def valid_def validE_NF_def throwError_def no_fail_def + return_def validE_def + split: sum.splits prod.splits) apply (rule wf_Un) apply (rule wf_custom_measure [where f="\(r, s). case r of Inl _ \ 0 | _ \ 1"]) apply clarsimp - apply (insert wf_inv_image [OF wf, where f="\(r, s). (theRight r, s)"]) + apply (insert wf_inv_image [OF wf, where f="\(r, s). (projr r, s)"]) apply (drule wf_Int1 [where r'="{((r', s'),(r, s)). (\x. r = Inr x) \ (\x. r' = Inr x)}"]) apply (erule wf_subset) - apply rule - apply (clarsimp simp: inv_image_def split: prod.splits sum.splits) - apply clarsimp - apply rule - apply rule - apply clarsimp - apply clarsimp - apply (clarsimp split: sum.splits) - apply (blast intro: post_cond) + apply (fastforce simp: inv_image_def split: prod.splits sum.splits) + apply fastforce + apply (fastforce split: sum.splits intro: post_cond) done lemma validNF_whileLoopE_inv [wp]: @@ -733,7 +771,7 @@ lemma validNF_whileLoopE_inv_measure [wp]: done lemma validNF_whileLoopE_inv_measure_twosteps: - assumes inv: "\r s. \\s'. I r s' \ C r s' \ B r \ \r' s'. I r' s' \, \ E \!" + assumes inv: "\r. \\s'. I r s' \ C r s' \ B r \ \r' s'. I r' s' \, \ E \!" assumes measure: "\r m. \\s. I r s \ C r s \ M r s = m \ B r \ \r' s'. M r' s' < m \, \ \_ _. True \" and post_cond: "\r s. \I r s; \ C r s\ \ Q r s" shows "\I r\ whileLoopE_inv C B r I (measure' (\(r, s). M r s)) \Q\, \E\!" @@ -753,7 +791,7 @@ lemma whileLoopE_wp_inv [wp]: apply (rule validE_whileLoopE [where I=I], auto) done -subsection "Stronger whileLoop rules" +subsection "Stronger @{const whileLoop} rules" lemma whileLoop_rule_strong: assumes init_U: "\ \s'. s' = s \ whileLoop C B r \ \r s. (r, s) \ fst Q \" @@ -812,4 +850,23 @@ lemma snd_whileLoop_subset: apply (metis b_success_step snd_whileLoop_first_step snd_whileLoop_unfold) done + +subsection "Some rules for @{const whileM}" + +lemma whileM_wp_gen: + assumes termin:"\s. I False s \ Q s" + assumes [wp]: "\I'\ C \I\" + assumes [wp]: "\I True\ f \\_. I'\" + shows "\I'\ whileM C f \\_. Q\" + unfolding whileM_def + using termin + by (wpsimp wp: whileLoop_wp[where I=I]) + +lemma whileM_inv: + "\f \Q\; P \Q\\ \ whileM P f \Q\" + by (fastforce intro: whileM_wp_gen) + +lemmas whileM_post_inv + = hoare_strengthen_post[where R="\_. Q" for Q, OF whileM_inv[where P=C for C], rotated -1] + end diff --git a/lib/Monad_WP/WhileLoopRulesCompleteness.thy b/lib/Monads/nondet/Nondet_While_Loop_Rules_Completeness.thy similarity index 85% rename from lib/Monad_WP/WhileLoopRulesCompleteness.thy rename to lib/Monads/nondet/Nondet_While_Loop_Rules_Completeness.thy index 7d65847964..3085502cb8 100644 --- a/lib/Monad_WP/WhileLoopRulesCompleteness.thy +++ b/lib/Monads/nondet/Nondet_While_Loop_Rules_Completeness.thy @@ -6,12 +6,12 @@ (* * This is a purely theoretical theory containing proofs - * that the whileLoop rules in "WhileLoopRules" are complete. + * that the whileLoop rules in "Nondet_While_Loop_Rules" are complete. * * You probably don't care about this. *) -theory WhileLoopRulesCompleteness -imports WhileLoopRules +theory Nondet_While_Loop_Rules_Completeness + imports Nondet_While_Loop_Rules begin lemma whileLoop_rule_strong_complete: @@ -34,12 +34,14 @@ lemma valid_whileLoop_complete: = \ P r \ whileLoop C B r \ Q \" apply (rule iffI) apply clarsimp + apply (rename_tac I) apply (rule_tac I=I in valid_whileLoop, auto)[1] apply (rule exI [where x="\r s. \ \s'. s' = s \ whileLoop C B r \ Q \"]) apply (intro conjI) apply (clarsimp simp: valid_def) apply (subst (2) valid_def) apply clarsimp + apply (rename_tac a b) apply (subst (asm) (2) whileLoop_unroll) apply (case_tac "C a b") apply (clarsimp simp: valid_def bind_def' Bex_def condition_def split: if_split_asm) @@ -66,7 +68,7 @@ proof (rule iffI) by auto thus ?RHS - by (rule_tac validNF_whileLoop [where I=I and R=R], auto) + by - (rule validNF_whileLoop[where I=I and R=R], auto) next assume loop: "?RHS" @@ -225,6 +227,10 @@ where | "valid_path C B [x] = (\ C (fst x) (snd x))" | "valid_path C B (x#y#xs) = ((C (fst x) (snd x) \ y \ fst (B (fst x) (snd x)) \ valid_path C B (y#xs)))" +lemma valid_path_not_empty: + "valid_path C B xs \ xs \ []" + by clarsimp + definition "shortest_path_length C B x Q \ (LEAST n. \l. valid_path C B l \ hd l = x \ Q (fst (last l)) (snd (last l)) \ length l = n)" @@ -234,8 +240,7 @@ lemma shortest_path_length_same [simp]: apply (rule Least_equality) apply (rule exI [where x="[a]"]) apply clarsimp - apply (case_tac "y = 0") - apply clarsimp + apply (rule Suc_leI) apply clarsimp done @@ -243,9 +248,8 @@ lemma valid_path_simp: "valid_path C B l = (((\r s. l = [(r, s)] \ \ C r s) \ (\r s r' s' xs. l = (r, s)#(r', s')#xs \ C r s \ (r', s') \ fst (B r s) \ valid_path C B ((r', s')#xs))))" - apply (case_tac l) - apply clarsimp - apply (case_tac list) + apply (cases l rule: remdups_adj.cases) + apply clarsimp apply clarsimp apply clarsimp done @@ -260,15 +264,23 @@ proof - assume y: "Q r' s'" have ?thesis using x y - apply (induct rule: in_whileLoop_induct) - apply (rule_tac x="[(r,s)]" in exI) - apply clarsimp - apply clarsimp - apply (case_tac l) - apply clarsimp - apply (rule_tac x="(r, s)#l" in exI) - apply clarsimp - done + proof (induct rule: in_whileLoop_induct) + case (1 r s) + then show ?case + apply - + apply (rule exI[where x="[(r,s)]"]) + apply clarsimp + done + next + case (2 r s r' s' r'' s'') + then show ?case + apply clarsimp + apply (frule valid_path_not_empty) + apply (rename_tac l) + apply (rule_tac x="(r, s)#l" in exI) + apply (clarsimp simp: neq_Nil_conv) + done + qed } thus ?thesis @@ -297,27 +309,33 @@ lemma shortest_path_is_shortest: done lemma valid_path_implies_exs_valid_whileLoop: - "valid_path C B l \ \ \s. s = snd (hd l) \ whileLoop C B (fst (hd l)) \\ \r s. (r, s) = last l \" - apply (induct l) - apply clarsimp - apply clarsimp - apply rule - apply clarsimp - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) - apply clarsimp - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) - apply rule - apply (clarsimp split: prod.splits) - apply (case_tac l) + "valid_path C B l \ \ \s. s = snd (hd l) \ whileLoop C B (fst (hd l)) \\ \r s. (r, s) = last l \" +proof (induct l) + case Nil + then show ?case + by clarsimp +next + case (Cons a l) + then show ?case apply clarsimp - apply (clarsimp split del: if_split) - apply (erule bexI [rotated]) - apply clarsimp - apply clarsimp - apply (case_tac l, auto) - done + apply rule + apply clarsimp + apply (subst whileLoop_unroll) + apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) + apply clarsimp + apply (subst whileLoop_unroll) + apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) + apply rule + apply (clarsimp split: prod.splits) + apply (cases l) + apply clarsimp + apply (clarsimp split del: if_split) + apply (erule bexI[rotated]) + apply clarsimp + apply clarsimp + apply (cases l; clarsimp) + done +qed lemma shortest_path_gets_shorter: "\ \ \s'. s' = s \ whileLoop C B r \\ Q \; @@ -327,21 +345,22 @@ lemma shortest_path_gets_shorter: \ \ \s. s = s' \ whileLoop C B r' \\ Q \" apply (drule shortest_path_exists) apply clarsimp - apply (case_tac l) - apply clarsimp - apply (case_tac list) + apply (rename_tac l) + apply (case_tac l rule: remdups_adj.cases) + apply clarsimp apply clarsimp - apply (rule_tac x="aa" in bexI) - apply clarify - apply (simp only: valid_path.simps, clarify) - apply (frule shortest_path_is_shortest [where Q=Q]) - apply simp + apply (rule bexI[rotated]) apply clarsimp - apply (drule valid_path_implies_exs_valid_whileLoop) - apply (clarsimp simp: exs_valid_def) - apply (erule bexI [rotated]) - apply (clarsimp split: if_split_asm) + apply assumption + apply clarify + apply (simp only: valid_path.simps, clarify) + apply (frule shortest_path_is_shortest [where Q=Q]) + apply simp apply clarsimp + apply (drule valid_path_implies_exs_valid_whileLoop) + apply (clarsimp simp: exs_valid_def) + apply (erule bexI [rotated]) + apply (clarsimp split: if_split_asm) done lemma exs_valid_whileLoop_complete: diff --git a/lib/Monad_WP/OptionMonad.thy b/lib/Monads/reader_option/Reader_Option_Monad.thy similarity index 75% rename from lib/Monad_WP/OptionMonad.thy rename to lib/Monads/reader_option/Reader_Option_Monad.thy index f0ef5f3a12..686dd1d626 100644 --- a/lib/Monad_WP/OptionMonad.thy +++ b/lib/Monads/reader_option/Reader_Option_Monad.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -10,25 +11,38 @@ * Option monad while loop formalisation. *) -theory OptionMonad (* FIXME: this is really a Reader_Option_Monad *) +chapter \State Projections and Reader Option Monad\ + +theory Reader_Option_Monad imports - Lib (* FIXME: reduce dependencies *) + Monad_Lib + Fun_Pred_Syntax Less_Monad_Syntax begin +section \Projections\ + type_synonym ('s,'a) lookup = "'s \ 'a option" -text \Similar to map_option but the second function returns option as well\ -definition - opt_map :: "('s,'a) lookup \ ('a \ 'b option) \ ('s,'b) lookup" (infixl "|>" 54) -where +text \Similar to @{const map_option} but the second function returns @{text option} as well\ +definition opt_map :: "('s,'a) lookup \ ('a \ 'b option) \ ('s,'b) lookup" (infixl "|>" 54) where "f |> g \ \s. case f s of None \ None | Some x \ g x" abbreviation opt_map_Some :: "('s \ 'a) \ ('a \ 'b) \ 's \ 'b" (infixl "||>" 54) where "f ||> g \ f |> (Some \ g)" + lemmas opt_map_Some_def = opt_map_def -lemma opt_map_cong [fundef_cong]: +definition map_set :: "('a \ 'b set option) \ 'a \ 'b set" where + "map_set f \ case_option {} id \ f" + +definition opt_pred :: "('a \ bool) \ ('b \ 'a option) \ ('b \ bool)" (infixl "|<" 55) where + "P |< proj \ (\x. case_option False P (proj x))" + + +subsection \Lemmas for @{const opt_map}\ + +lemma opt_map_cong[fundef_cong]: "\ f = f'; \v s. f s = Some v \ g v = g' v\ \ f |> g = f' |> g'" by (rule ext) (simp add: opt_map_def split: option.splits) @@ -76,8 +90,7 @@ lemma opt_map_Some_upd_Some: "f(x \ v) ||> g = (f ||> g)(x \ g v)" by (simp add: opt_map_upd_Some) -lemmas opt_map_upd[simp] - = opt_map_upd_None opt_map_upd_Some opt_map_Some_upd_Some +lemmas opt_map_upd[simp] = opt_map_upd_None opt_map_upd_Some opt_map_Some_upd_Some lemma opt_map_upd_triv: "t k = Some x \ (t |> f)(k := f x) = t |> f" @@ -94,17 +107,16 @@ lemma opt_map_upd_triv_None: lemmas opt_map_upd_triv_simps = opt_map_upd_triv opt_map_Some_upd_triv opt_map_upd_triv_None lemma opt_map_foldr_upd: - "(foldr (\p kh. kh(p := new)) ptrs f)|> g + "foldr (\p kh. kh(p := new)) ptrs f |> g = foldr (\p kh. kh(p := (case new of Some x \ g x | _ \ None))) ptrs (f |> g)" by (induct ptrs arbitrary: new; clarsimp split: option.splits) lemma opt_map_Some_foldr_upd: - "(foldr (\p kh. kh(p := new)) ptrs f) ||> g - = foldr (\p kh. kh(p := (case new of Some x \ Some (g x) | _ \ None))) ptrs (f ||> g)" - by (induct ptrs arbitrary: new; clarsimp split: option.splits) + "foldr (\p kh. kh(p := new)) ptrs f ||> g + = foldr (\p kh. kh(p := map_option g new)) ptrs (f ||> g)" + by (induct ptrs arbitrary: new; clarsimp simp: map_option_case split: option.splits) -lemmas opt_map_foldr_upd_simps - = opt_map_foldr_upd opt_map_Some_foldr_upd +lemmas opt_map_foldr_upd_simps = opt_map_foldr_upd opt_map_Some_foldr_upd lemma opt_map_Some_comp[simp]: "f ||> g ||> h = f ||> h o g" @@ -136,8 +148,7 @@ lemma opt_map_Some_eta_fold: by (simp add: o_def) lemma case_opt_map_distrib: - "((\s. case_option None g (f s)) |> h) - = ((\s. case_option None (g |> h) (f s)))" + "(\s. case_option None g (f s)) |> h = (\s. case_option None (g |> h) (f s))" by (fastforce simp: opt_map_def split: option.splits) lemma opt_map_apply_left_eq: @@ -150,64 +161,59 @@ declare None_upd_eq[simp] lemma "\ (f |> g) x = None; g v = None \ \ f(x \ v) |> g = f |> g" by simp -definition map_set :: "('a \ 'b set option) \ 'a \ 'b set" where - "map_set f \ case_option {} id \ f" - -(* opt_pred *) - -abbreviation - opt_pred :: "('a \ bool) \ ('b \ 'a option) \ ('b \ bool)" (infixl "|<" 55) where - "P |< proj \ (\x. case_option False P (proj x))" +subsection \Lemmas for @{const opt_pred}\ lemma opt_pred_conj: - "((P1 |< hp) p \ (P2 |< hp) p) = (((P1 and P2) |< hp) p)" - by (fastforce simp: pred_conj_def split: option.splits) + "((P |< hp) p \ (Q |< hp) p) = ((P and Q) |< hp) p" + by (fastforce simp: pred_conj_def opt_pred_def split: option.splits) lemma opt_pred_disj: - "((P1 |< hp) p \ (P2 |< hp) p) = (((P1 or P2) |< hp) p)" - by (fastforce simp: pred_disj_def split: option.splits) + "((P |< hp) p \ (Q |< hp) p) = ((P or Q) |< hp) p" + by (fastforce simp: pred_disj_def opt_pred_def split: option.splits) + +lemma in_opt_pred: + "(P |< f) p = (\v. f p = Some v \ P v)" + by (auto simp: opt_pred_def split: option.splits) -lemma opt_predD: - "(P |< proj) x \ \y. proj x = Some y \ P y" - by (clarsimp split: option.splits) +lemmas opt_predD = in_opt_pred[THEN iffD1] lemma opt_predE: "\(P |< proj) x; \y. \proj x = Some y; P y\ \ R\ \ R" - by (clarsimp split: option.splits) - -lemma in_opt_pred: - "(P |< f) p = (\v. f p = Some v \ P v)" - by (auto dest: opt_predD) + by (blast dest: opt_predD) lemma opt_pred_unfold_map: - "(P |< (f |> g)) = ((P |< g) |< f)" - by (fastforce simp: opt_map_def split: option.splits) + "P |< (f |> g) = P |< g |< f" + by (fastforce simp: opt_map_def in_opt_pred split: option.splits) lemma opt_pred_unfold_proj: - "(P |< (f ||> g))= (P o g |< f)" - by (clarsimp simp: opt_map_def split: option.splits) + "P |< (f ||> g) = P o g |< f" + by (clarsimp simp: opt_map_def opt_pred_def split: option.splits) -(* obind, etc. *) +lemma opt_pred_proj_upd_eq[simp]: + "(P |< proj (p \ v)) p = P v" + by (simp add: in_opt_pred) -definition - obind :: "('s,'a) lookup \ ('a \ ('s,'b) lookup) \ ('s,'b) lookup" (infixl "|>>" 53) -where + +section \The Reader Option Monad\ + +definition obind :: "('s,'a) lookup \ ('a \ ('s,'b) lookup) \ ('s,'b) lookup" (infixl "|>>" 53) + where "f |>> g \ \s. case f s of None \ None | Some x \ g x s" (* Enable "do { .. }" syntax *) adhoc_overloading Monad_Syntax.bind obind -definition +definition ofail :: "('s, 'a) lookup" where "ofail = K None" -definition +definition oreturn :: "'a \ ('s, 'a) lookup" where "oreturn = K o Some" -definition +definition oassert :: "bool \ ('s, unit) lookup" where "oassert P \ if P then oreturn () else ofail" -definition +definition oassert_opt :: "'a option \ ('s, 'a) lookup" where "oassert_opt r \ case r of None \ ofail | Some x \ oreturn x" definition oapply :: "'a \ ('a \ 'b option) \ 'b option" where @@ -226,19 +232,18 @@ definition owhen :: "bool \ ('s, unit) lookup \ ('s, uni "owhen P f \ if P then f else oreturn ()" (* Reader monad interface: *) -abbreviation (input) +abbreviation (input) ask :: "('s, 's) lookup" where "ask \ Some" -definition +definition asks :: "('s \ 'a) \ ('s, 'a) lookup" where "asks f = do { v <- ask; oreturn (f v) }" -abbreviation +abbreviation ogets :: "('s \ 'a) \ ('s, 'a) lookup" where "ogets \ asks" text \ - If the result can be an exception. - Corresponding bindE would be analogous to lifting in NonDetMonad. -\ + Integration with exception monad. + Corresponding bindE would be analogous to lifting in @{text Nondet_Monad}.\ definition "oreturnOk x = K (Some (Inr x))" @@ -247,84 +252,95 @@ definition "othrow e = K (Some (Inl e))" definition - "oguard G \ (\s. if G s then Some () else None)" + "oguard G \ \s. if G s then Some () else None" definition - "ocondition c L R \ (\s. if c s then L s else R s)" + "ocondition c L R \ \s. if c s then L s else R s" definition "oskip \ oreturn ()" -text \Monad laws\ -lemma oreturn_bind [simp]: "(oreturn x |>> f) = f x" - by (auto simp add: oreturn_def obind_def K_def) +lemma ogets_def: + "ogets f = (\s. Some (f s))" + by (clarsimp simp: asks_def obind_def oreturn_def) + +lemmas omonad_defs = ofail_def oreturn_def oassert_def oassert_opt_def ogets_def ounless_def + owhen_def + -lemma obind_return [simp]: "(m |>> oreturn) = m" - by (auto simp add: oreturn_def obind_def K_def split: option.splits) +subsection \Monad laws\ + +lemma oreturn_bind[simp]: + "(oreturn x |>> f) = f x" + by (auto simp add: oreturn_def obind_def) + +lemma obind_return[simp]: + "(m |>> oreturn) = m" + by (auto simp add: oreturn_def obind_def split: option.splits) lemma obind_assoc: "(m |>> f) |>> g = m |>> (\x. f x |>> g)" - by (auto simp add: oreturn_def obind_def K_def split: option.splits) + by (auto simp add: oreturn_def obind_def split: option.splits) -text \Binding fail\ +subsection \Binding and fail\ lemma obind_fail [simp]: "f |>> (\_. ofail) = ofail" - by (auto simp add: ofail_def obind_def K_def split: option.splits) + by (auto simp add: ofail_def obind_def split: option.splits) lemma ofail_bind [simp]: "ofail |>> m = ofail" - by (auto simp add: ofail_def obind_def K_def split: option.splits) + by (auto simp add: ofail_def obind_def split: option.splits) +subsection \Function package setup\ -text \Function package setup\ -lemma opt_bind_cong [fundef_cong]: +lemma opt_bind_cong[fundef_cong]: "\ f = f'; \v s. f' s = Some v \ g v s = g' v s \ \ f |>> g = f' |>> g'" by (rule ext) (simp add: obind_def split: option.splits) -lemma opt_bind_cong_apply [fundef_cong]: +lemma opt_bind_cong_apply[fundef_cong]: "\ f s = f' s; \v. f' s = Some v \ g v s = g' v s \ \ (f |>> g) s = (f' |>> g') s" by (simp add: obind_def split: option.splits) -lemma oassert_bind_cong [fundef_cong]: +lemma oassert_bind_cong[fundef_cong]: "\ P = P'; P' \ m = m' \ \ oassert P |>> m = oassert P' |>> m'" by (auto simp: oassert_def) -lemma oassert_bind_cong_apply [fundef_cong]: +lemma oassert_bind_cong_apply[fundef_cong]: "\ P = P'; P' \ m () s = m' () s \ \ (oassert P |>> m) s = (oassert P' |>> m') s" by (auto simp: oassert_def) -lemma oreturn_bind_cong [fundef_cong]: +lemma oreturn_bind_cong[fundef_cong]: "\ x = x'; m x' = m' x' \ \ oreturn x |>> m = oreturn x' |>> m'" by simp -lemma oreturn_bind_cong_apply [fundef_cong]: +lemma oreturn_bind_cong_apply[fundef_cong]: "\ x = x'; m x' s = m' x' s \ \ (oreturn x |>> m) s = (oreturn x' |>> m') s" by simp -lemma oreturn_bind_cong2 [fundef_cong]: +lemma oreturn_bind_cong2[fundef_cong]: "\ x = x'; m x' = m' x' \ \ (oreturn $ x) |>> m = (oreturn $ x') |>> m'" by simp -lemma oreturn_bind_cong2_apply [fundef_cong]: +lemma oreturn_bind_cong2_apply[fundef_cong]: "\ x = x'; m x' s = m' x' s \ \ ((oreturn $ x) |>> m) s = ((oreturn $ x') |>> m') s" by simp -lemma ocondition_cong [fundef_cong]: +lemma ocondition_cong[fundef_cong]: "\c = c'; \s. c' s \ l s = l' s; \s. \c' s \ r s = r' s\ \ ocondition c l r = ocondition c' l' r'" by (auto simp: ocondition_def) -text \Decomposition\ +subsection \Decomposition\ -lemma ocondition_K_true [simp]: +lemma ocondition_K_true[simp]: "ocondition (\_. True) T F = T" by (simp add: ocondition_def) -lemma ocondition_K_false [simp]: +lemma ocondition_K_false[simp]: "ocondition (\_. False) T F = F" by (simp add: ocondition_def) @@ -338,33 +354,33 @@ lemma ocondition_True: lemma in_oreturn [simp]: "(oreturn x s = Some v) = (v = x)" - by (auto simp: oreturn_def K_def) + by (auto simp: oreturn_def) lemma oreturn_None[simp]: "\ oreturn x s = None" - by (simp add: oreturn_def K_def) + by (simp add: oreturn_def) lemma oreturnE: "\oreturn x s = Some v; v = x \ P\ \ P" by simp -lemma in_ofail [simp]: +lemma in_ofail[simp]: "ofail s \ Some v" - by (auto simp: ofail_def K_def) + by (auto simp: ofail_def) lemma ofailE: "ofail s = Some v \ P" by simp -lemma in_oassert_eq [simp]: +lemma in_oassert_eq[simp]: "(oassert P s = Some v) = P" by (simp add: oassert_def) -lemma oassert_True [simp]: +lemma oassert_True[simp]: "oassert True = oreturn ()" by (simp add: oassert_def) -lemma oassert_False [simp]: +lemma oassert_False[simp]: "oassert False = ofail" by (simp add: oassert_def) @@ -393,28 +409,26 @@ lemma obind_eqI_full: (clarsimp simp: obind_def split: option.splits) lemma obindE: - "\ (f |>> g) s = Some v; - \v'. \f s = Some v'; g v' s = Some v\ \ P\ \ P" + "\ (f |>> g) s = Some v; \v'. \f s = Some v'; g v' s = Some v\ \ P\ \ P" by (auto simp: in_obind_eq) -lemma in_othrow_eq [simp]: +lemma in_othrow_eq[simp]: "(othrow e s = Some v) = (v = Inl e)" - by (auto simp: othrow_def K_def) + by (auto simp: othrow_def) lemma othrowE: "\othrow e s = Some v; v = Inl e \ P\ \ P" by simp -lemma in_oreturnOk_eq [simp]: +lemma in_oreturnOk_eq[simp]: "(oreturnOk x s = Some v) = (v = Inr x)" - by (auto simp: oreturnOk_def K_def) + by (auto simp: oreturnOk_def) lemma oreturnOkE: "\oreturnOk x s = Some v; v = Inr x \ P\ \ P" by simp -lemmas omonadE [elim!] = - opt_mapE obindE oreturnE ofailE othrowE oreturnOkE oassertE +lemmas omonadE[elim!] = obindE oreturnE ofailE othrowE oreturnOkE oassertE lemma in_opt_map_Some_eq: "((f ||> g) x = Some y) = (\v. f x = Some v \ g v = y)" @@ -426,11 +440,11 @@ lemma in_opt_map_Some_None_eq[simp]: lemma oreturn_comp[simp]: "oreturn x \ f = oreturn x" - by (simp add: oreturn_def K_def o_def) + by (simp add: oreturn_def o_def) lemma ofail_comp[simp]: "ofail \ f = ofail" - by (auto simp: ofail_def K_def) + by (auto simp: ofail_def) lemma oassert_comp[simp]: "oassert P \ f = oassert P" @@ -438,7 +452,7 @@ lemma oassert_comp[simp]: lemma fail_apply[simp]: "ofail s = None" - by (simp add: ofail_def K_def) + by (simp add: ofail_def) lemma oassert_apply[simp]: "oassert P s = (if P then Some () else None)" @@ -466,7 +480,9 @@ lemma if_comp_dist: lemma obindK_is_opt_map: "f \ (\x. K $ g x) = f |> g" - by (simp add: obind_def opt_map_def K_def) + by (simp add: obind_def opt_map_def) + +lemmas in_omonad = bind_eq_Some_conv in_obind_eq in_opt_map_eq in_opt_pred Let_def section \"While" loops over option monad.\ @@ -476,10 +492,7 @@ text \ (without passing through a state) \ -inductive_set - option_while' :: "('a \ bool) \ ('a \ 'a option) \ 'a option rel" - for C B -where +inductive_set option_while' :: "('a \ bool) \ ('a \ 'a option) \ 'a option rel" for C B where final: "\ C r \ (Some r, Some r) \ option_while' C B" | fail: "\ C r; B r = None \ \ (Some r, None) \ option_while' C B" | step: "\ C r; B r = Some r'; (Some r', sr'') \ option_while' C B \ @@ -493,10 +506,12 @@ definition lemma option_while'_inj: assumes "(s,s') \ option_while' C B" "(s, s'') \ option_while' C B" shows "s' = s''" - using assms by (induct rule: option_while'.induct) (auto elim: option_while'.cases) + using assms + by (induct rule: option_while'.induct) (auto elim: option_while'.cases) lemma option_while'_inj_step: - "\ C s; B s = Some s'; (Some s, t) \ option_while' C B ; (Some s', t') \ option_while' C B \ \ t = t'" + "\ C s; B s = Some s'; (Some s, t) \ option_while' C B ; (Some s', t') \ option_while' C B \ + \ t = t'" by (metis option_while'.step option_while'_inj) lemma option_while'_THE: @@ -511,9 +526,9 @@ lemma option_while_simps: "(Some s, ss') \ option_while' C B \ option_while C B s = ss'" using option_while'_inj_step[of C s B s'] by (auto simp: option_while_def option_while'_THE - intro: option_while'.intros - dest: option_while'_inj - elim: option_while'.cases) + intro: option_while'.intros + dest: option_while'_inj + elim: option_while'.cases) lemma option_while_rule: assumes "option_while C B s = Some s'" @@ -575,19 +590,19 @@ proof - qed qed + section \Lift @{term option_while} to the @{typ "('a,'s) lookup"} monad\ -definition - owhile :: "('a \ 's \ bool) \ ('a \ ('s,'a) lookup) \ 'a \ ('s,'a) lookup" -where +definition owhile :: "('a \ 's \ bool) \ ('a \ ('s,'a) lookup) \ 'a \ ('s,'a) lookup" where "owhile c b a \ \s. option_while (\a. c a s) (\a. b a s) a" lemma owhile_unroll: "owhile C B r = ocondition (C r) (B r |>> owhile C B) (oreturn r)" by (auto simp: ocondition_def obind_def oreturn_def owhile_def - option_while_simps K_def split: option.split) + option_while_simps + split: option.split) -text \rule for terminating loops\ +text \Rule for terminating loops\ lemma owhile_rule: assumes "I r s" diff --git a/lib/Monad_WP/OptionMonadWP.thy b/lib/Monads/reader_option/Reader_Option_VCG.thy similarity index 59% rename from lib/Monad_WP/OptionMonadWP.thy rename to lib/Monads/reader_option/Reader_Option_VCG.thy index d84d20a3ec..512b271222 100644 --- a/lib/Monad_WP/OptionMonadWP.thy +++ b/lib/Monads/reader_option/Reader_Option_VCG.thy @@ -5,25 +5,25 @@ *) (* -Hoare reasoning and WP (weakest-precondition) generator rules for the option monad. +Hoare reasoning and WP (weakest-precondition) generator rules for the reader option monad. This list is almost certainly incomplete; add rules here as they are needed. *) -theory OptionMonadWP +theory Reader_Option_VCG imports - OptionMonadND - WP + Reader_Option_Monad + WPSimp begin -declare K_def [simp] - (* Hoare triples. TODO: design a sensible syntax for them. *) (* Partial correctness. *) -definition ovalid :: "('s \ bool) \ ('s \ 'a option) \ ('a \ 's \ bool) \ bool" where - "ovalid P f Q \ \s r. P s \ f s = Some r \ Q r s" +definition ovalid :: "('s \ bool) \ ('s \ 'a option) \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\") where + "\P\ f \Q\ \ \s r. P s \ f s = Some r \ Q r s" + (* Total correctness. *) definition ovalidNF :: "('s \ bool) \ ('s \ 'a option) \ ('a \ 's \ bool) \ bool" where "ovalidNF P f Q \ \s. P s \ (f s \ None \ (\r. f s = Some r \ Q r s))" @@ -39,6 +39,14 @@ lemma no_ofail_is_ovalidNF: "no_ofail P f \ ovalidNF P f (\_ _. T lemma ovalidNF_combine: "\ ovalid P f Q; no_ofail P f \ \ ovalidNF P f Q" by (auto simp: ovalidNF_def ovalid_def no_ofail_def) +(* use ovalid with f s = Some r *) +lemma use_ovalidE: + "\ovalid P f Q; P s; f s = Some r; Q r s \ R\ \ R" + by (clarsimp simp: ovalid_def) + +lemma use_ovalid: + "\ovalid P f Q; f s = Some r; P s \ \ Q r s" + by (clarsimp simp: ovalid_def) (* Annotating programs with loop invariant and measure. *) definition owhile_inv :: @@ -50,198 +58,271 @@ lemmas owhile_add_inv = owhile_inv_def[symmetric] (* WP rules for ovalid. *) -lemma obind_wp [wp]: +lemma ovalid_post_taut[wp]: + "\P\ f \\\\" + by (simp add: ovalid_def) + +lemma ovalid_inv[wp]: + "ovalid P f (\_. P)" + by (simp add: ovalid_def) + +lemma obind_wp[wp]: "\ \r. ovalid (R r) (g r) Q; ovalid P f R \ \ ovalid P (obind f g) Q" by (simp add: ovalid_def obind_def split: option.splits, fast) -lemma oreturn_wp [wp]: +lemma oreturn_wp[wp]: "ovalid (P x) (oreturn x) P" by (simp add: ovalid_def) -lemma ocondition_wp [wp]: +lemma ocondition_wp[wp]: "\ ovalid L l Q; ovalid R r Q \ \ ovalid (\s. if C s then L s else R s) (ocondition C l r) Q" by (auto simp: ovalid_def ocondition_def) -lemma ofail_wp [wp]: +lemma ofail_wp[wp]: "ovalid (\_. True) ofail Q" by (simp add: ovalid_def ofail_def) -lemma ovalid_K_bind_wp [wp]: - "ovalid P f Q \ ovalid P (K_bind f x) Q" - by simp +lemma asks_wp[wp]: + "ovalid (\s. P (f s) s) (asks f) P" + by (simp add: split_def asks_def oreturn_def obind_def ovalid_def) + +(* more direct form *) +lemma asks_SomeD: + "\asks f s = Some r; Q (f s) s\ \ Q r s" + by (rule use_ovalid[OF asks_wp]) -lemma ogets_wp [wp]: "ovalid (\s. P (f s) s) (ogets f) P" - by (simp add: ovalid_def ogets_def) +lemma oassert_wp[wp]: + "\\s. Q \ P () s\ oassert Q \P\" + apply (simp add: oassert_def) + apply (intro conjI; wpsimp) + done + +lemma ogets_wp[wp]: + "ovalid (\s. P (f s) s) (ogets f) P" + by wp -lemma oguard_wp [wp]: "ovalid (\s. f s \ P () s) (oguard f) P" +lemma oguard_wp[wp]: + "ovalid (\s. f s \ P () s) (oguard f) P" by (simp add: ovalid_def oguard_def) -lemma oskip_wp [wp]: +lemma oskip_wp[wp]: "ovalid (\s. P () s) oskip P" by (simp add: ovalid_def oskip_def) -lemma ovalid_case_prod [wp]: +lemma ovalid_if_split: + "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" + by simp + +lemma reader_case_option_wp[wp]: + "\\x. \P x\ m x \Q\; \P'\ m' \Q\\ + \ \\s. (x = None \ P' s) \ (\y. x = Some y \ P y s)\ case_option m' m x \Q\" + by (cases x; simp) + +lemma ovalid_case_prod[wp]: assumes "(\x y. ovalid (P x y) (B x y) Q)" shows "ovalid (case v of (x, y) \ P x y) (case v of (x, y) \ B x y) Q" using assms unfolding ovalid_def by auto -lemma owhile_ovalid [wp]: -"\\a. ovalid (\s. I a s \ C a s) (B a) I; - \a s. \I a s; \ C a s\ \ Q a s\ - \ ovalid (I a) (owhile_inv C B a I M) Q" +lemma owhile_ovalid[wp]: + "\\a. ovalid (\s. I a s \ C a s) (B a) I; + \a s. \I a s; \ C a s\ \ Q a s\ + \ ovalid (I a) (owhile_inv C B a I M) Q" unfolding owhile_inv_def owhile_def ovalid_def apply clarify - apply (frule_tac I = "\a. I a s" in option_while_rule) - apply auto + apply (frule (1) option_while_rule[where I = "\a. I a s" for s]) + apply auto done +lemma oassert_opt_ovalid[wp]: + "\\s. \y. x = Some y \ Q y s\ oassert_opt x \Q\" + unfolding oassert_opt_def + by (case_tac x; wpsimp) + definition ovalid_property where "ovalid_property P x = (\s f. (\r. Some r = x s f \ P r s))" -lemma ovalid_is_triple [wp_trip]: + +lemma ovalid_is_triple[wp_trip]: "ovalid P f Q = triple_judgement P f (ovalid_property Q (\s f. f s))" by (auto simp: triple_judgement_def ovalid_def ovalid_property_def) -lemma ovalid_wp_comb1 [wp_comb]: +lemma ovalid_wp_comb1[wp_comb]: "\ ovalid P' f Q; ovalid P f Q'; \s. P s \ P' s \ \ ovalid P f (\r s. Q r s \ Q' r s)" by (simp add: ovalid_def) -lemma ovalid_wp_comb2 [wp_comb]: +lemma ovalid_wp_comb2[wp_comb]: "\ ovalid P f Q; \s. P' s \ P s \ \ ovalid P' f Q" by (auto simp: ovalid_def) -lemma ovalid_wp_comb3 [wp_comb]: +lemma ovalid_wp_comb3[wp_comb]: "\ ovalid P f Q; ovalid P' f Q' \ \ ovalid (\s. P s \ P' s) f (\r s. Q r s \ Q' r s)" by (auto simp: ovalid_def) - (* WP rules for ovalidNF. *) -lemma obind_NF_wp [wp]: +lemma obind_NF_wp[wp]: "\ \r. ovalidNF (R r) (g r) Q; ovalidNF P f R \ \ ovalidNF P (obind f g) Q" by (auto simp: ovalidNF_def obind_def split: option.splits) -lemma oreturn_NF_wp [wp]: +lemma oreturn_NF_wp[wp]: "ovalidNF (P x) (oreturn x) P" by (simp add: ovalidNF_def oreturn_def) -lemma ocondition_NF_wp [wp]: +lemma ocondition_NF_wp[wp]: "\ ovalidNF L l Q; ovalidNF R r Q \ \ ovalidNF (\s. if C s then L s else R s) (ocondition C l r) Q" by (simp add: ovalidNF_def ocondition_def) -lemma ofail_NF_wp [wp]: +lemma ofail_NF_wp[wp]: "ovalidNF (\_. False) ofail Q" by (simp add: ovalidNF_def ofail_def) -lemma ovalidNF_K_bind_wp [wp]: - "ovalidNF P f Q \ ovalidNF P (K_bind f x) Q" - by simp - -lemma ogets_NF_wp [wp]: +lemma ogets_NF_wp[wp]: "ovalidNF (\s. P (f s) s) (ogets f) P" by (simp add: ovalidNF_def ogets_def) -lemma oguard_NF_wp [wp]: +lemma oguard_NF_wp[wp]: "ovalidNF (\s. f s \ P () s) (oguard f) P" by (simp add: ovalidNF_def oguard_def) -lemma oskip_NF_wp [wp]: +lemma oskip_NF_wp[wp]: "ovalidNF (\s. P () s) oskip P" by (simp add: ovalidNF_def oskip_def) -lemma ovalid_NF_case_prod [wp]: +lemma ovalid_NF_case_prod[wp]: assumes "(\x y. ovalidNF (P x y) (B x y) Q)" shows "ovalidNF (case v of (x, y) \ P x y) (case v of (x, y) \ B x y) Q" using assms unfolding ovalidNF_def by auto -lemma owhile_NF [wp]: -"\\a. ovalidNF (\s. I a s \ C a s) (B a) I; - \a m. ovalid (\s. I a s \ C a s \ M a s = m) (B a) (\r s. M r s < m); - \a s. \I a s; \ C a s\ \ Q a s\ - \ ovalidNF (I a) (owhile_inv C B a I M) Q" +lemma owhile_NF[wp]: + "\\a. ovalidNF (\s. I a s \ C a s) (B a) I; + \a m. ovalid (\s. I a s \ C a s \ M a s = m) (B a) (\r s. M r s < m); + \a s. \I a s; \ C a s\ \ Q a s\ + \ ovalidNF (I a) (owhile_inv C B a I M) Q" unfolding owhile_inv_def ovalidNF_def ovalid_def apply clarify - apply (rule_tac I = I and M = "measure (\r. M r s)" in owhile_rule) + apply (rule owhile_rule[where I = I and M = "measure (\r. M r s)" and s = s for s]) apply fastforce apply fastforce apply fastforce apply blast+ done -definition ovalidNF_property where "ovalidNF_property P x = (\s f. (x s f \ None \ (\r. Some r = x s f \ P r s)))" -lemma ovalidNF_is_triple [wp_trip]: +definition ovalidNF_property where + "ovalidNF_property P x = (\s f. (x s f \ None \ (\r. Some r = x s f \ P r s)))" + +lemma ovalidNF_is_triple[wp_trip]: "ovalidNF P f Q = triple_judgement P f (ovalidNF_property Q (\s f. f s))" by (auto simp: triple_judgement_def ovalidNF_def ovalidNF_property_def) -lemma ovalidNF_wp_comb1 [wp_comb]: +lemma ovalidNF_wp_comb1[wp_comb]: "\ ovalidNF P' f Q; ovalidNF P f Q'; \s. P s \ P' s \ \ ovalidNF P f (\r s. Q r s \ Q' r s)" by (simp add: ovalidNF_def) -lemma ovalidNF_wp_comb2 [wp_comb]: +lemma ovalidNF_wp_comb2[wp_comb]: "\ ovalidNF P f Q; \s. P' s \ P s \ \ ovalidNF P' f Q" by (simp add: ovalidNF_def) -lemma ovalidNF_wp_comb3 [wp_comb]: +lemma ovalidNF_wp_comb3[wp_comb]: "\ ovalidNF P f Q; ovalidNF P' f Q' \ \ ovalidNF (\s. P s \ P' s) f (\r s. Q r s \ Q' r s)" by (simp add: ovalidNF_def) - (* FIXME: WP rules for no_ofail, which might not be correct. *) -lemma no_ofail_ofail [wp]: "no_ofail (\_. False) ofail" - by (simp add: no_ofail_def ofail_def) +lemma no_ofailD: + "\ no_ofail P m; P s \ \ \y. m s = Some y" + by (simp add: no_ofail_def) -lemma no_ofail_ogets [wp]: "no_ofail (\_. True) (ogets f)" - by (simp add: no_ofail_def ogets_def) +lemma no_ofail_obind2[simp]: + assumes f: "no_ofail P f" + assumes v: "ovalid Q f R" + assumes g: "\r. no_ofail (R r) (g r)" + shows "no_ofail (P and Q) (f |>> g)" + using v g + by (fastforce simp: no_ofail_def obind_def pred_conj_def ovalid_def dest: no_ofailD [OF f]) -lemma no_ofail_obind [wp]: - "\ \r. no_ofail (P r) (g r); no_ofail Q f; ovalid Q f P \ \ no_ofail Q (obind f g)" - by (auto simp: no_ofail_def obind_def ovalid_def) +lemma no_ofail_ofail[wp]: + "no_ofail \ ofail" + by (simp add: no_ofail_def) + +lemma no_ofail_asks_simp[simp]: + "no_ofail P (asks f)" + unfolding asks_def oreturn_def obind_def no_ofail_def + by simp -lemma no_ofail_K_bind [wp]: - "no_ofail P f \ no_ofail P (K_bind f x)" +lemma no_ofail_asks[wp]: + "no_ofail \ (asks f)" by simp -lemma no_ofail_oguard [wp]: +lemma no_ofail_ogets[wp]: + "no_ofail \ (ogets f)" + by simp + +lemma no_ofail_obind[wp]: + "\ \r. no_ofail (R r) (g r); \Q\ f \R\; no_ofail P f \ \ no_ofail (P and Q) (f |>> g)" + by (auto simp: no_ofail_def obind_def ovalid_def) + +lemma no_ofail_oguard[wp]: "no_ofail (\s. f s) (oguard f)" by (auto simp: no_ofail_def oguard_def) -lemma no_ofail_ocondition [wp]: +lemma no_ofail_ocondition[wp]: "\ no_ofail L l; no_ofail R r \ \ no_ofail (\s. if C s then L s else R s) (ocondition C l r)" by (simp add: no_ofail_def ocondition_def) -lemma no_ofail_oreturn [wp]: +lemma no_ofail_oreturn[wp]: "no_ofail (\_. True) (oreturn x)" by (simp add: no_ofail_def oreturn_def) -lemma no_ofail_oskip [wp]: +lemma no_ofail_oskip[wp]: "no_ofail (\_. True) oskip" by (simp add: no_ofail_def oskip_def) -lemma no_ofail_is_triple [wp_trip]: +lemma no_ofail_oassert_opt[simp, wp]: + "no_ofail (\_. P \ None) (oassert_opt P)" + by (simp add: no_ofail_def oassert_opt_def split: option.splits) + +lemma no_ofail_if[wp]: + "\ P \ no_ofail Q f; \ P \ no_ofail R g \ + \ no_ofail (if P then Q else R) (if P then f else g)" + by simp + +lemma no_ofail_owhen[wp]: + "(P \ no_ofail Q f) \ no_ofail (if P then Q else \) (owhen P f)" + by (simp add: no_ofail_def owhen_def) + +lemma no_ofail_ounless[wp]: + "(\P \ no_ofail Q f) \ no_ofail (if P then \ else Q) (ounless P f)" + by (simp add: no_ofail_def ounless_def) + +lemma no_ofail_oassert[simp, wp]: + "no_ofail (\_. P) (oassert P)" + by (simp add: oassert_def no_ofail_def) + +lemma no_ofail_is_triple[wp_trip]: "no_ofail P f = triple_judgement P f (\s f. f s \ None)" by (auto simp: triple_judgement_def no_ofail_def) -lemma no_ofail_wp_comb1 [wp_comb]: +lemma no_ofail_wp_comb1[wp_comb]: "\ no_ofail P f; \s. P' s \ P s \ \ no_ofail P' f" by (simp add: no_ofail_def) -lemma no_ofail_wp_comb2 [wp_comb]: +lemma no_ofail_wp_comb2[wp_comb]: "\ no_ofail P f; no_ofail P' f \ \ no_ofail (\s. P s \ P' s) f" by (simp add: no_ofail_def) - (* Some extra lemmas for our predicates. *) lemma ovalid_grab_asm: "(G \ ovalid P f Q) \ ovalid (\s. G \ P s) f Q" by (simp add: ovalid_def) + lemma ovalidNF_grab_asm: "(G \ ovalidNF P f Q) \ ovalidNF (\s. G \ P s) f Q" by (simp add: ovalidNF_def) + lemma no_ofail_grab_asm: "(G \ no_ofail P f) \ no_ofail (\s. G \ P s) f" by (simp add: no_ofail_def) @@ -249,9 +330,11 @@ lemma no_ofail_grab_asm: lemma ovalid_assume_pre: "(\s. P s \ ovalid P f Q) \ ovalid P f Q" by (auto simp: ovalid_def) + lemma ovalidNF_assume_pre: "(\s. P s \ ovalidNF P f Q) \ ovalidNF P f Q" by (simp add: ovalidNF_def) + lemma no_ofail_assume_pre: "(\s. P s \ no_ofail P f) \ no_ofail P f" by (simp add: no_ofail_def) @@ -259,16 +342,19 @@ lemma no_ofail_assume_pre: lemma ovalid_pre_imp: "\ \s. P' s \ P s; ovalid P f Q \ \ ovalid P' f Q" by (simp add: ovalid_def) + lemma ovalidNF_pre_imp: "\ \s. P' s \ P s; ovalidNF P f Q \ \ ovalidNF P' f Q" by (simp add: ovalidNF_def) -lemma no_ofail_pre_imp: - "\ \s. P' s \ P s; no_ofail P f \ \ no_ofail P' f" + +lemma no_ofail_pre_imp[wp_pre]: + "\ no_ofail P f; \s. P' s \ P s \ \ no_ofail P' f" by (simp add: no_ofail_def) lemma ovalid_post_imp: "\ \r s. Q r s \ Q' r s; ovalid P f Q \ \ ovalid P f Q'" by (simp add: ovalid_def) + lemma ovalidNF_post_imp: "\ \r s. Q r s \ Q' r s; ovalidNF P f Q \ \ ovalidNF P f Q'" by (simp add: ovalidNF_def) @@ -276,6 +362,7 @@ lemma ovalidNF_post_imp: lemma ovalid_post_imp_assuming_pre: "\ \r s. \ P s; Q r s \ \ Q' r s; ovalid P f Q \ \ ovalid P f Q'" by (simp add: ovalid_def) + lemma ovalidNF_post_imp_assuming_pre: "\ \r s. \ P s; Q r s \ \ Q' r s; ovalidNF P f Q \ \ ovalidNF P f Q'" by (simp add: ovalidNF_def) diff --git a/lib/Monads/tests.xml b/lib/Monads/tests.xml new file mode 100644 index 0000000000..1061af3628 --- /dev/null +++ b/lib/Monads/tests.xml @@ -0,0 +1,22 @@ + + + + + + + + ../../isabelle/bin/isabelle build -v -d ../.. Monads + + + diff --git a/lib/Monads/trace/Trace_Det.thy b/lib/Monads/trace/Trace_Det.thy new file mode 100644 index 0000000000..54c7f03151 --- /dev/null +++ b/lib/Monads/trace/Trace_Det.thy @@ -0,0 +1,73 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Det + imports + Trace_Monad +begin + +subsection "Determinism" + +text \ + A monad of type @{text tmonad} is deterministic iff it + returns an empty trace, exactly one state and result and does not fail\ +definition det :: "('a,'s) tmonad \ bool" where + "det f \ \s. \r. f s = {([], Result r)}" + +text \A deterministic @{text tmonad} can be turned into a normal state monad:\ +definition the_run_state :: "('s,'a) tmonad \ 's \ 'a \ 's" where + "the_run_state M \ \s. THE s'. mres (M s) = {s'}" + + +lemma det_set_iff: + "det f \ (r \ mres (f s)) = (mres (f s) = {r})" + unfolding det_def mres_def + by (fastforce elim: allE[where x=s]) + +lemma return_det[iff]: + "det (return x)" + by (simp add: det_def return_def) + +lemma put_det[iff]: + "det (put s)" + by (simp add: det_def put_def) + +lemma get_det[iff]: + "det get" + by (simp add: det_def get_def) + +lemma det_gets[iff]: + "det (gets f)" + by (auto simp add: gets_def det_def get_def return_def bind_def) + +lemma det_UN: + "det f \ (\x \ mres (f s). g x) = (g (THE x. x \ mres (f s)))" + unfolding det_def mres_def + apply simp + apply (drule spec [of _ s]) + apply (clarsimp simp: vimage_def) + done + +lemma bind_detI[simp, intro!]: + "\ det f; \x. det (g x) \ \ det (f >>= g)" + unfolding bind_def det_def + apply (erule all_reg[rotated]) + by clarsimp + +lemma det_modify[iff]: + "det (modify f)" + by (simp add: modify_def) + +lemma the_run_stateI: + "mres (M s) = {s'} \ the_run_state M s = s'" + by (simp add: the_run_state_def) + +lemma the_run_state_det: + "\ s' \ mres (M s); det M \ \ the_run_state M s = s'" + by (simp add: the_run_stateI det_set_iff) + +end diff --git a/lib/Monads/trace/Trace_Empty_Fail.thy b/lib/Monads/trace/Trace_Empty_Fail.thy new file mode 100644 index 0000000000..7227712ff5 --- /dev/null +++ b/lib/Monads/trace/Trace_Empty_Fail.thy @@ -0,0 +1,397 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Empty_Fail + imports + Trace_Monad + WPSimp +begin + +section \Monads that are wellformed w.r.t. failure\ + +text \ + Usually, well-formed monads constructed from the primitives in @{text Trace_Monad} will have the + following property: if they return an empty set of completed results, there exists a trace + corresponding to a failed result.\ +definition empty_fail :: "('s,'a) tmonad \ bool" where + "empty_fail m \ \s. mres (m s) = {} \ failed (m s)" + +text \Useful in forcing otherwise unknown executions to have the @{const empty_fail} property.\ +definition mk_ef :: + "((tmid \ 's) list \ ('s, 'a) tmres) set \ ((tmid \ 's) list \ ('s, 'a) tmres) set" where + "mk_ef S \ if mres S = {} then S \ {([], Failed)} else S" + + +subsection \WPC setup\ + +lemma wpc_helper_empty_fail_final: + "empty_fail f \ wpc_helper (P, P', P'') (Q, Q', Q'') (empty_fail f)" + by (clarsimp simp: wpc_helper_def) + +wpc_setup "\m. empty_fail m" wpc_helper_empty_fail_final + + +subsection \@{const empty_fail} intro/dest rules\ + +lemma empty_failI: + "(\s. mres (m s) = {} \ failed (m s)) \ empty_fail m" + by (simp add: empty_fail_def) + +lemma empty_failD: + "\ empty_fail m; mres (m s) = {} \ \ failed (m s)" + by (simp add: empty_fail_def) + +lemma empty_fail_not_snd: + "\ \ failed (m s); empty_fail m \ \ \v. v \ mres (m s)" + by (fastforce simp: empty_fail_def) + +lemmas empty_failD2 = empty_fail_not_snd[rotated] + +lemma empty_failD3: + "\ empty_fail f; \ failed (f s) \ \ mres (f s) \ {}" + by (drule(1) empty_failD2, clarsimp) + +lemma empty_fail_not_empty: + "empty_fail f \ \s. f s \ {}" + by (auto simp: empty_fail_def mres_def) + +lemma empty_fail_bindD1: + "empty_fail (a >>= b) \ empty_fail a" + unfolding empty_fail_def bind_def + apply (erule all_reg[rotated]) + by (force simp: split_def mres_def vimage_def failed_def split: tmres.splits) + + +subsection \Wellformed monads\ + +(* + Collect generic empty_fail lemmas here: + - naming convention is empty_fail_NAME. + - add lemmas with assumptions to [empty_fail_cond] set + - add lemmas without assumption to [empty_fail_term] set +*) + +named_theorems empty_fail_term +named_theorems empty_fail_cond + +lemma empty_fail_K_bind[empty_fail_cond]: + "empty_fail f \ empty_fail (K_bind f x)" + by simp + +lemma empty_fail_fun_app[empty_fail_cond]: + "empty_fail (f x) \ empty_fail (f $ x)" + by simp + +(* empty_fail as such does not need context, but empty_fail_select_f does, so we need to build + up context in other rules *) +lemma empty_fail_If[empty_fail_cond]: + "\ P \ empty_fail f; \P \ empty_fail g \ \ empty_fail (if P then f else g)" + by (simp split: if_split) + +lemma empty_fail_If_applied[empty_fail_cond]: + "\ P \ empty_fail (f x); \P \ empty_fail (g x) \ \ empty_fail ((if P then f else g) x)" + by simp + +lemma empty_fail_put[empty_fail_term]: + "empty_fail (put f)" + by (simp add: put_def empty_fail_def mres_def vimage_def) + +lemma empty_fail_modify[empty_fail_term]: + "empty_fail (modify f)" + by (simp add: empty_fail_def simpler_modify_def mres_def vimage_def) + +lemma empty_fail_gets[empty_fail_term]: + "empty_fail (gets f)" + by (simp add: empty_fail_def simpler_gets_def mres_def vimage_def) + +lemma empty_fail_select[empty_fail_cond]: + "S \ {} \ empty_fail (select S)" + by (simp add: empty_fail_def select_def mres_def image_def) + +lemma mres_bind_empty: + "mres ((f >>= g) s) = {} + \ mres (f s) = {} \ (\res\mres (f s). mres (g (fst res) (snd res)) = {})" + apply (clarsimp simp: bind_def mres_def split_def) + apply (clarsimp simp: image_def) + apply fastforce + done + +lemma bind_failedI1: + "failed (f s) \ failed ((f >>= g) s)" + by (force simp: bind_def vimage_def failed_def) + +lemma bind_failedI2: + "\\res\mres (f s). failed (g (fst res) (snd res)); mres (f s) \ {}\ + \ failed ((f >>= g) s)" + by (force simp: bind_def mres_def image_def split_def failed_def) + +lemma empty_fail_bind[empty_fail_cond]: + "\ empty_fail a; \x. empty_fail (b x) \ \ empty_fail (a >>= b)" + unfolding empty_fail_def + apply clarsimp + apply (drule mres_bind_empty) + apply (erule context_disjE) + apply (fastforce intro: bind_failedI1) + apply (fastforce intro!: bind_failedI2) + done + +lemma empty_fail_return[empty_fail_term]: + "empty_fail (return x)" + by (simp add: empty_fail_def return_def mres_def vimage_def) + +lemma empty_fail_returnOk[empty_fail_term]: + "empty_fail (returnOk v)" + by (fastforce simp: returnOk_def empty_fail_term) + +lemma empty_fail_throwError[empty_fail_term]: + "empty_fail (throwError v)" + by (fastforce simp: throwError_def empty_fail_term) + +lemma empty_fail_lift[empty_fail_cond]: + "\ \x. empty_fail (f x) \ \ empty_fail (lift f x)" + unfolding lift_def + by (auto simp: empty_fail_term split: sum.split) + +lemma empty_fail_liftE[empty_fail_cond]: + "empty_fail f \ empty_fail (liftE f)" + by (simp add: liftE_def empty_fail_cond empty_fail_term) + +lemma empty_fail_bindE[empty_fail_cond]: + "\ empty_fail f; \rv. empty_fail (g rv) \ \ empty_fail (f >>=E g)" + by (simp add: bindE_def empty_fail_cond) + +lemma empty_fail_mapM[empty_fail_cond]: + assumes m: "\x. x \ set xs \ empty_fail (m x)" + shows "empty_fail (mapM m xs)" +using m +proof (induct xs) + case Nil + thus ?case by (simp add: mapM_def sequence_def empty_fail_term) +next + case Cons + have P: "\m x xs. mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" + by (simp add: mapM_def sequence_def Let_def) + from Cons + show ?case by (simp add: P m empty_fail_cond empty_fail_term) +qed + +lemma empty_fail_fail[empty_fail_term]: + "empty_fail fail" + by (simp add: fail_def empty_fail_def) + +lemma empty_fail_assert[empty_fail_term]: + "empty_fail (assert P)" + unfolding assert_def by (simp add: empty_fail_term) + +lemma empty_fail_assert_opt[empty_fail_term]: + "empty_fail (assert_opt x)" + by (simp add: assert_opt_def empty_fail_term split: option.splits) + +lemma empty_fail_mk_ef[empty_fail_term]: + "empty_fail (mk_ef o m)" + by (simp add: empty_fail_def mk_ef_def failed_def) + +lemma empty_fail_gets_the[empty_fail_term]: + "empty_fail (gets_the f)" + unfolding gets_the_def + by (simp add: empty_fail_cond empty_fail_term) + +lemma empty_fail_gets_map[empty_fail_term]: + "empty_fail (gets_map f p)" + unfolding gets_map_def + by (simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_whenEs[empty_fail_cond]: + "(P \ empty_fail f) \ empty_fail (whenE P f)" + "(\P \ empty_fail f) \ empty_fail (unlessE P f)" + by (auto simp add: whenE_def unlessE_def empty_fail_term) + +lemma empty_fail_assertE[empty_fail_term]: + "empty_fail (assertE P)" + by (simp add: assertE_def empty_fail_term) + +lemma empty_fail_get[empty_fail_term]: + "empty_fail get" + by (simp add: empty_fail_def get_def mres_def vimage_def) + +lemma empty_fail_catch[empty_fail_cond]: + "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catch f g)" + by (simp add: catch_def empty_fail_cond empty_fail_term split: sum.split) + +lemma empty_fail_guard[empty_fail_term]: + "empty_fail (state_assert G)" + by (clarsimp simp: state_assert_def empty_fail_cond empty_fail_term) + +lemma empty_fail_spec[empty_fail_term]: + "empty_fail (state_select F)" + by (clarsimp simp: state_select_def empty_fail_def default_elem_def mres_def image_def failed_def) + +lemma empty_fail_when[empty_fail_cond]: + "(P \ empty_fail x) \ empty_fail (when P x)" + unfolding when_def + by (simp add: empty_fail_term) + +lemma empty_fail_unless[empty_fail_cond]: + "(\P \ empty_fail f) \ empty_fail (unless P f)" + unfolding unless_def + by (simp add: empty_fail_cond) + +lemma empty_fail_liftM[empty_fail_cond]: + "empty_fail m \ empty_fail (liftM f m)" + unfolding liftM_def + by (fastforce simp: empty_fail_term empty_fail_cond) + +lemma empty_fail_liftME[empty_fail_cond]: + "empty_fail m \ empty_fail (liftME f m)" + unfolding liftME_def + by (simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_handleE[empty_fail_cond]: + "\ empty_fail L; \r. empty_fail (R r) \ \ empty_fail (L R)" + by (clarsimp simp: handleE_def handleE'_def empty_fail_term empty_fail_cond split: sum.splits) + +lemma empty_fail_handle'[empty_fail_cond]: + "\empty_fail f; \e. empty_fail (handler e)\ \ empty_fail (f handler)" + unfolding handleE'_def + by (fastforce simp: empty_fail_term empty_fail_cond split: sum.splits) + +lemma empty_fail_sequence[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequence ms)" + unfolding sequence_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequence_x[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequence_x ms)" + unfolding sequence_x_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequenceE[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE ms)" + unfolding sequenceE_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequenceE_x[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE_x ms)" + unfolding sequenceE_x_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapM_x[empty_fail_cond]: + "(\m. m \ f ` set ms \ empty_fail m) \ empty_fail (mapM_x f ms)" + unfolding mapM_x_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapME[empty_fail_cond]: + "(\m. m \ f ` set xs \ empty_fail m) \ empty_fail (mapME f xs)" + unfolding mapME_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapME_x[empty_fail_cond]: + "(\m'. m' \ f ` set xs \ empty_fail m') \ empty_fail (mapME_x f xs)" + unfolding mapME_x_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_filterM[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail (P m)) \ empty_fail (filterM P ms)" + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_zipWithM_x[empty_fail_cond]: + "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM_x f xs ys)" + unfolding zipWithM_x_def zipWith_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_zipWithM[empty_fail_cond]: + "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM f xs ys)" + unfolding zipWithM_def zipWith_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_maybeM[empty_fail_cond]: + "\x. empty_fail (f x) \ empty_fail (maybeM f t)" + unfolding maybeM_def + by (fastforce intro: empty_fail_term split: option.splits) + +lemma empty_fail_ifM[empty_fail_cond]: + "\ empty_fail P; empty_fail a; empty_fail b \ \ empty_fail (ifM P a b)" + by (simp add: ifM_def empty_fail_cond) + +lemma empty_fail_ifME[empty_fail_cond]: + "\ empty_fail P; empty_fail a; empty_fail b \ \ empty_fail (ifME P a b)" + by (simp add: ifME_def empty_fail_cond) + +lemma empty_fail_whenM[empty_fail_cond]: + "\ empty_fail P; empty_fail f \ \ empty_fail (whenM P f)" + by (simp add: whenM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_andM[empty_fail_cond]: + "\ empty_fail A; empty_fail B \ \ empty_fail (andM A B)" + by (simp add: andM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_orM[empty_fail_cond]: + "\ empty_fail A; empty_fail B \ \ empty_fail (orM A B)" + by (simp add: orM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_notM[empty_fail_cond]: + "empty_fail A \ empty_fail (notM A)" + by (simp add: notM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_put_trace_elem[empty_fail_term]: + "empty_fail (put_trace_elem x)" + by (clarsimp simp: put_trace_elem_def empty_fail_def mres_def vimage_def) + +lemma empty_fail_put_trace[empty_fail_term]: + "empty_fail (put_trace xs)" + apply (induct xs) + apply (clarsimp simp: empty_fail_term) + apply (clarsimp simp: empty_fail_term empty_fail_cond) + done + +lemma empty_fail_env_steps[empty_fail_term]: + "empty_fail env_steps" + by (simp add: env_steps_def empty_fail_term empty_fail_cond) + +lemma empty_fail_interference[empty_fail_term]: + "empty_fail interference" + by (simp add: interference_def commit_step_def empty_fail_term empty_fail_cond) + +lemma last_st_tr_not_empty: + "P s \ \xs. P (last_st_tr (map (Pair Env) xs) s')" + apply (rule exI[where x="[s]"]) + apply (auto simp: last_st_tr_def) + done + +lemma empty_fail_Await[empty_fail_term]: + "\s. c s \ empty_fail (Await c)" + by (clarsimp simp: Await_def last_st_tr_not_empty empty_fail_term empty_fail_cond) + +(* not everything [simp] by default, because side conditions can slow down simp a lot *) +lemmas empty_fail[wp, intro!] = empty_fail_term empty_fail_cond +lemmas [simp] = empty_fail_term + + +subsection \Equations and legacy names\ + +lemma empty_fail_select_eq[simp]: + "empty_fail (select V) = (V \ {})" + by (clarsimp simp: select_def empty_fail_def mres_def image_def) + +lemma empty_fail_liftM_eq[simp]: + "empty_fail (liftM f m) = empty_fail m" + unfolding liftM_def + by (fastforce dest: empty_fail_bindD1) + +lemma empty_fail_liftE_eq[simp]: + "empty_fail (liftE f) = empty_fail f" + by (auto simp: liftE_def empty_fail_bindD1) + +lemma liftME_empty_fail_eq[simp]: + "empty_fail (liftME f m) = empty_fail m" + unfolding liftME_def + by (fastforce dest: empty_fail_bindD1 simp: bindE_def) + +(* legacy name binding *) +lemmas empty_fail_error_bits = empty_fail_returnOk empty_fail_throwError empty_fail_liftE_eq + +end diff --git a/lib/Monads/trace/Trace_In_Monad.thy b/lib/Monads/trace/Trace_In_Monad.thy new file mode 100644 index 0000000000..0078c66c44 --- /dev/null +++ b/lib/Monads/trace/Trace_In_Monad.thy @@ -0,0 +1,153 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_In_Monad + imports Trace_Lemmas +begin + +section \Reasoning directly about states\ + +(* Lemmas about terms of the form "(v, s') \ mres (m s)" *) + +lemma in_throwError: + "((v, s') \ mres (throwError e s)) = (v = Inl e \ s' = s)" + by (simp add: throwError_def return_def mres_def) + +lemma in_returnOk: + "((v', s') \ mres (returnOk v s)) = (v' = Inr v \ s' = s)" + by (simp add: returnOk_def return_def mres_def) + +lemma in_bind: + "((r,s') \ mres ((do x \ f; g x od) s)) = + (\s'' x. (x, s'') \ mres (f s) \ (r, s') \ mres (g x s''))" + by (force simp: bind_def split_def mres_def split: tmres.splits) + +lemma in_bindE_R: + "((Inr r,s') \ mres ((doE x \ f; g x odE) s)) = + (\s'' x. (Inr x, s'') \ mres (f s) \ (Inr r, s') \ mres (g x s''))" + unfolding bindE_def lift_def split_def in_bind + by (force simp: throwError_def return_def mres_def split: sum.splits) + +lemma in_bindE_L: + "((Inl r, s') \ mres ((doE x \ f; g x odE) s)) \ + (\s'' x. (Inr x, s'') \ mres (f s) \ (Inl r, s') \ mres (g x s'')) \ ((Inl r, s') \ mres (f s))" + by (simp add: bindE_def in_bind) + (force simp: return_def throwError_def lift_def split_def mres_def split: sum.splits if_split_asm) + +lemma in_return: + "(r, s') \ mres (return v s) = (r = v \ s' = s)" + by (simp add: return_def mres_def) + +lemma in_liftE: + "((v, s') \ mres (liftE f s)) = (\v'. v = Inr v' \ (v', s') \ mres (f s))" + by (force simp: liftE_def in_bind in_return) + +lemma in_whenE: + "((v, s') \ mres (whenE P f s)) = ((P \ (v, s') \ mres (f s)) \ (\P \ v = Inr () \ s' = s))" + by (simp add: whenE_def in_returnOk) + +lemma inl_whenE: + "((Inl x, s') \ mres (whenE P f s)) = (P \ (Inl x, s') \ mres (f s))" + by (auto simp add: in_whenE) + +lemma inr_in_unlessE_throwError[termination_simp]: + "(Inr (), s') \ mres (unlessE P (throwError E) s) = (P \ s'=s)" + by (simp add: unlessE_def returnOk_def throwError_def in_return) + +lemma in_fail: + "r \ mres (fail s) = False" + by (simp add: fail_def mres_def) + +lemma in_assert: + "(r, s') \ mres (assert P s) = (P \ s' = s)" + by (simp add: assert_def return_def fail_def mres_def) + +lemma in_assertE: + "(r, s') \ mres (assertE P s) = (P \ r = Inr () \ s' = s)" + by (simp add: assertE_def returnOk_def return_def fail_def mres_def) + +lemma in_assert_opt: + "(r, s') \ mres (assert_opt v s) = (v = Some r \ s' = s)" + by (auto simp: assert_opt_def in_fail in_return split: option.splits) + +lemma in_get: + "(r, s') \ mres (get s) = (r = s \ s' = s)" + by (simp add: get_def mres_def) + +lemma in_gets: + "(r, s') \ mres (gets f s) = (r = f s \ s' = s)" + by (simp add: simpler_gets_def mres_def) + +lemma in_put: + "(r, s') \ mres (put x s) = (s' = x \ r = ())" + by (simp add: put_def mres_def) + +lemma in_when: + "(v, s') \ mres (when P f s) = ((P \ (v, s') \ mres (f s)) \ (\P \ v = () \ s' = s))" + by (simp add: when_def in_return) + +lemma in_unless: + "(v, s') \ mres (unless P f s) = ((\ P \ (v, s') \ mres (f s)) \ (P \ v = () \ s' = s))" + by (simp add: unless_def in_when) + +lemma in_unlessE: + "(v, s') \ mres (unlessE P f s) = ((\ P \ (v, s') \ mres (f s)) \ (P \ v = Inr () \ s' = s))" + by (simp add: unlessE_def in_returnOk) + +lemma inl_unlessE: + "((Inl x, s') \ mres (unlessE P f s)) = (\ P \ (Inl x, s') \ mres (f s))" + by (auto simp add: in_unlessE) + +lemma in_modify: + "(v, s') \ mres (modify f s) = (s'=f s \ v = ())" + by (auto simp add: modify_def bind_def get_def put_def mres_def) + +lemma gets_the_in_monad: + "((v, s') \ mres (gets_the f s)) = (s' = s \ f s = Some v)" + by (auto simp: gets_the_def in_bind in_gets in_assert_opt split: option.split) + +lemma in_alternative: + "(r,s') \ mres ((f \ g) s) = ((r,s') \ mres (f s) \ (r,s') \ mres (g s))" + by (auto simp add: alternative_def mres_def) + +lemma in_liftM: + "((r, s') \ mres (liftM t f s)) = (\r'. (r', s') \ mres (f s) \ r = t r')" + by (simp add: liftM_def in_return in_bind) + +lemma in_bindE: + "(rv, s') \ mres ((f >>=E (\rv'. g rv')) s) = + ((\ex. rv = Inl ex \ (Inl ex, s') \ mres (f s)) \ + (\rv' s''. (rv, s') \ mres (g rv' s'') \ (Inr rv', s'') \ mres (f s)))" + apply (clarsimp simp: bindE_def in_bind lift_def in_throwError) + apply (safe del: disjCI; strengthen subst[where P="\x. x \ mres (f s)", mk_strg I _ E]; + auto simp: in_throwError split: sum.splits) + done + +lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L + in_bindE_R in_returnOk in_throwError in_fail + in_assertE in_assert in_return in_assert_opt + in_get in_gets in_put in_when inl_unlessE in_unlessE + in_unless in_modify gets_the_in_monad + in_alternative in_liftM + +lemma bind_det_exec: + "mres (a s) = {(r,s')} \ mres ((a >>= b) s) = mres (b r s')" + by (simp add: in_bind set_eq_iff) + +lemma in_bind_det_exec: + "mres (a s) = {(r,s')} \ (s'' \ mres ((a >>= b) s)) = (s'' \ mres (b r s'))" + by (cases s'', simp add: in_bind) + +lemma exec_put: + "(put s' >>= m) s = m () s'" + by (simp add: bind_def put_def mres_def split_def) + +lemma bind_execI: + "\ (r'',s'') \ mres (f s); \x \ mres (g r'' s''). P x \ \ \x \ mres ((f >>= g) s). P x" + by (force simp: Bex_def in_bind) + +end diff --git a/lib/Monads/trace/Trace_Lemmas.thy b/lib/Monads/trace/Trace_Lemmas.thy new file mode 100644 index 0000000000..eb504156f8 --- /dev/null +++ b/lib/Monads/trace/Trace_Lemmas.thy @@ -0,0 +1,271 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Lemmas + imports Trace_Monad +begin + +section \General Lemmas Regarding the Interference Trace Monad\ + +subsection \Congruence Rules for the Function Package\ + +\ \FIXME: where should this go\ +lemma in_mres: + "(tr, Result (rv, s)) \ S \ (rv, s) \ mres S" + by (fastforce simp: mres_def intro: image_eqI[rotated]) + +lemma bind_apply_cong': + "\f s = f' s'; (\rv st. (rv, st) \ mres (f s) \ g rv st = g' rv st)\ + \ bind f g s = bind f' g' s'" + apply (simp add: bind_def) + apply (rule SUP_cong; simp?) + apply (clarsimp split: tmres.split) + apply (drule spec2, drule mp, erule in_mres) + apply simp + done + +lemmas bind_apply_cong = bind_apply_cong'[rule_format, fundef_cong] + +lemma bind_cong[fundef_cong]: + "\ f = f'; \v s s'. (v, s') \ mres (f' s) \ g v s' = g' v s' \ \ f >>= g = f' >>= g'" + by (auto intro!: bind_apply_cong) + +lemma bindE_cong[fundef_cong]: + "\ M = M' ; \v s s'. (Inr v, s') \ mres (M' s) \ N v s' = N' v s' \ \ bindE M N = bindE M' N'" + by (auto simp: bindE_def lift_def split: sum.splits intro!: bind_cong) + +lemma bindE_apply_cong[fundef_cong]: + "\ f s = f' s'; \rv st. (Inr rv, st) \ mres (f' s') \ g rv st = g' rv st \ + \ (f >>=E g) s = (f' >>=E g') s'" + by (auto simp: bindE_def lift_def split: sum.splits intro!: bind_apply_cong) + +lemma K_bind_apply_cong[fundef_cong]: + "\ f st = f' st' \ \ K_bind f arg st = K_bind f' arg' st'" + by simp + +lemma when_apply_cong[fundef_cong]: + "\ C = C'; s = s'; C' \ m s' = m' s' \ \ when C m s = when C' m' s'" + by (simp add: when_def) + +lemma unless_apply_cong[fundef_cong]: + "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unless C m s = unless C' m' s'" + by (simp add: when_def unless_def) + +lemma whenE_apply_cong[fundef_cong]: + "\ C = C'; s = s'; C' \ m s' = m' s' \ \ whenE C m s = whenE C' m' s'" + by (simp add: whenE_def) + +lemma unlessE_apply_cong[fundef_cong]: + "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unlessE C m s = unlessE C' m' s'" + by (simp add: unlessE_def) + + +subsection \Simplifying Monads\ + +lemma nested_bind[simp]: + "do x <- do y <- f; return (g y) od; h x od = do y <- f; h (g y) od" + by (fastforce simp: bind_def return_def split: tmres.splits) + +lemma bind_dummy_ret_val: + "do y \ a; b od = do a; b od" + by simp + +lemma fail_update[iff]: + "fail (f s) = fail s" + by (simp add: fail_def) + +lemma fail_bind[simp]: + "fail >>= f = fail" + by (simp add: bind_def fail_def) + +lemma fail_bindE[simp]: + "fail >>=E f = fail" + by (simp add: bindE_def bind_def fail_def) + +lemma assert_A_False[simp]: + "assert False = fail" + by (simp add: assert_def) + +lemma assert_A_True[simp]: + "assert True = return ()" + by (simp add: assert_def) + +lemma assert_False[simp]: + "assert False >>= f = fail" + by simp + +lemma assert_True[simp]: + "assert True >>= f = f ()" + by simp + +lemma assertE_False[simp]: + "assertE False >>=E f = fail" + by (simp add: assertE_def) + +lemma assertE_True[simp]: + "assertE True >>=E f = f ()" + by (simp add: assertE_def) + +lemma when_False_bind[simp]: + "when False g >>= f = f ()" + by (rule ext) (simp add: when_def bind_def return_def) + +lemma when_True_bind[simp]: + "when True g >>= f = g >>= f" + by (simp add: when_def bind_def return_def) + +lemma whenE_False_bind[simp]: + "whenE False g >>=E f = f ()" + by (simp add: whenE_def bindE_def returnOk_def lift_def) + +lemma whenE_True_bind[simp]: + "whenE True g >>=E f = g >>=E f" + by (simp add: whenE_def bindE_def returnOk_def lift_def) + +lemma when_True[simp]: + "when True X = X" + by (clarsimp simp: when_def) + +lemma when_False[simp]: + "when False X = return ()" + by (clarsimp simp: when_def) + +lemma unless_False[simp]: + "unless False X = X" + by (clarsimp simp: unless_def) + +lemma unlessE_False[simp]: + "unlessE False f = f" + unfolding unlessE_def by fastforce + +lemma unless_True[simp]: + "unless True X = return ()" + by (clarsimp simp: unless_def) + +lemma unlessE_True[simp]: + "unlessE True f = returnOk ()" + unfolding unlessE_def by fastforce + +lemma unlessE_whenE: + "unlessE P = whenE (\P)" + by (rule ext) (simp add: unlessE_def whenE_def) + +lemma unless_when: + "unless P = when (\P)" + by (rule ext) (simp add: unless_def when_def) + +lemma gets_to_return[simp]: + "gets (\s. v) = return v" + by (clarsimp simp: gets_def put_def get_def bind_def return_def) + +lemma assert_opt_Some: + "assert_opt (Some x) = return x" + by (simp add: assert_opt_def) + +lemma assertE_liftE: + "assertE P = liftE (assert P)" + by (simp add: assertE_def assert_def liftE_def returnOk_def) + +lemma liftE_handleE'[simp]: + "(liftE a b) = liftE a" + by (clarsimp simp: liftE_def handleE'_def) + +lemma liftE_handleE[simp]: + "(liftE a b) = liftE a" + unfolding handleE_def by simp + +lemma alternative_bind: + "((a \ b) >>= c) = ((a >>= c) \ (b >>= c))" + by (fastforce simp add: alternative_def bind_def split_def) + +lemma alternative_refl: + "(a \ a) = a" + by (simp add: alternative_def) + +lemma alternative_com: + "(f \ g) = (g \ f)" + by (auto simp: alternative_def) + +lemma liftE_alternative: + "liftE (a \ b) = (liftE a \ liftE b)" + by (simp add: liftE_def alternative_bind) + + +subsection \Lifting and Alternative Basic Definitions\ + +lemma liftE_liftM: + "liftE = liftM Inr" + by (auto simp: liftE_def liftM_def) + +lemma liftME_liftM: + "liftME f = liftM (case_sum Inl (Inr \ f))" + unfolding liftME_def liftM_def bindE_def returnOk_def lift_def + apply (rule ext) + apply (rule arg_cong[where f="bind m" for m]) + apply (fastforce simp: throwError_def split: sum.splits) + done + +lemma liftE_bindE: + "liftE a >>=E b = a >>= b" + by (simp add: liftE_def bindE_def lift_def bind_assoc) + +lemma liftM_id[simp]: + "liftM id = id" + by (auto simp: liftM_def) + +lemma liftM_bind: + "liftM t f >>= g = f >>= (\x. g (t x))" + by (simp add: liftM_def bind_assoc) + +lemma gets_bind_ign: + "gets f >>= (\x. m) = m" + by (simp add: bind_def simpler_gets_def) + +lemma exec_get: + "(get >>= f) x = f x x" + by (simp add: get_def bind_def) + +lemmas get_bind_apply = exec_get (* FIXME lib: eliminate *) + +lemma exec_gets: + "(gets f >>= m) s = m (f s) s" + by (simp add: simpler_gets_def bind_def) + +lemma bind_eqI: + "\ f = f'; \x. g x = g' x \ \ f >>= g = f' >>= g'" + by (auto simp: bind_def split_def) + +lemma condition_split: + "P (condition C a b s) \ (C s \ P (a s)) \ (\C s \ P (b s))" + by (clarsimp simp: condition_def) + +lemma condition_split_asm: + "P (condition C a b s) \ (\(C s \ \ P (a s) \ \C s \ \P (b s)))" + by (clarsimp simp: condition_def) + +lemmas condition_splits = condition_split condition_split_asm + +lemma condition_true_triv[simp]: + "condition (\_. True) A B = A" + by (fastforce split: condition_splits) + +lemma condition_false_triv[simp]: + "condition (\_. False) A B = B" + by (fastforce split: condition_splits) + +lemma condition_true: + "P s \ condition P A B s = A s" + by (clarsimp simp: condition_def) + +lemma condition_false: + "\ P s \ condition P A B s = B s" + by (clarsimp simp: condition_def) + +lemmas arg_cong_bind = arg_cong2[where f=bind] +lemmas arg_cong_bind1 = arg_cong_bind[OF refl ext] + +end diff --git a/lib/Monads/trace/Trace_Monad.thy b/lib/Monads/trace/Trace_Monad.thy new file mode 100644 index 0000000000..dc1c29b794 --- /dev/null +++ b/lib/Monads/trace/Trace_Monad.thy @@ -0,0 +1,847 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +chapter "Interference Trace Monad" + +theory Trace_Monad + imports + Fun_Pred_Syntax + Monad_Lib + Strengthen +begin + +text \ + The ``Interference Trace Monad''. This nondeterministic monad + records the state at every interference point, permitting + nondeterministic interference by the environment at these points. + + The trace set initially includes all possible environment behaviours. + Trace steps are tagged as environment or self actions, and can then + be constrained to a smaller set where the environment acts according + to a rely constraint (i.e. rely-guarantee reasoning), or to set the + environment actions to be the self actions of another program (parallel + composition).\ + +section "The Trace Monad" + +text \Trace monad identifier. Me corresponds to the current thread running and Env to the environment.\ +datatype tmid = Me | Env + +text \ + Results associated with traces. Traces may correspond to incomplete, failed, or completed executions.\ +datatype ('s, 'a) tmres = Failed | Incomplete | Result "('a \ 's)" + +abbreviation map_tmres_rv :: "('a \ 'b) \ ('s, 'a) tmres \ ('s, 'b) tmres" where + "map_tmres_rv f \ map_tmres id f" + +text \ + tmonad returns a set of non-deterministic computations, including + a trace as a list of @{text "thread identifier \ state"}, and an optional + pair of result and state when the computation did not fail.\ +type_synonym ('s, 'a) tmonad = "'s \ ((tmid \ 's) list \ ('s, 'a) tmres) set" + +text \ + Print the type @{typ "('s,'a) tmonad"} instead of its unwieldy expansion. + Needs an AST translation in code, because it needs to check that the state variable + @{typ 's} occurs three times. This comparison is not guaranteed to always work as expected + (AST instances might have different decoration), but it does seem to work here.\ +print_ast_translation \ + let + fun tmonad_tr _ [t1, Ast.Appl [Ast.Constant @{type_syntax set}, + Ast.Appl [Ast.Constant @{type_syntax prod}, + Ast.Appl [Ast.Constant @{type_syntax list}, + Ast.Appl [Ast.Constant @{type_syntax prod}, + Ast.Constant @{type_syntax tmid}, t2]], + Ast.Appl [Ast.Constant @{type_syntax tmres}, t3, t4]]]] = + if t1 = t2 andalso t1 = t3 + then Ast.Appl [Ast.Constant @{type_syntax "tmonad"}, t1, t4] + else raise Match + in [(@{type_syntax "fun"}, tmonad_tr)] end\ + + +text \Returns monad results, ignoring failures and traces.\ +definition mres :: "((tmid \ 's) list \ ('s, 'a) tmres) set \ ('a \ 's) set" where + "mres r = Result -` (snd ` r)" + +text \True if the monad has a computation resulting in Failed.\ +definition failed :: "((tmid \ 's) list \ ('s, 'a) tmres) set \ bool" where + "failed r \ Failed \ snd ` r" + +lemma failed_simps[simp]: + "failed {(x, y)} = (y = Failed)" + "failed (r \ r') = (failed r \ failed r')" + "\ failed {}" + by (auto simp: failed_def) + + +text \ + The definition of fundamental monad functions @{text return} and + @{text bind}. The monad function @{text "return x"} does not change + the state, does not fail, and returns @{text "x"}.\ +definition return :: "'a \ ('s,'a) tmonad" where + "return a \ \s. ({([], Result (a, s))})" + +text \ + The monad function @{text "bind f g"}, also written @{text "f >>= g"}, + is the execution of @{term f} followed by the execution of @{text g}. + The function @{text g} takes the result value \emph{and} the result + state of @{text f} as parameter. The definition says that the result of + the combined operation is the union of the set of sets that is created + by @{text g} applied to the result sets of @{text f}. The combined + operation may have failed, if @{text f} may have failed or @{text g} may + have failed on any of the results of @{text f}.\ +abbreviation (input) fst_upd :: "('a \ 'c) \ 'a \ 'b \ 'c \ 'b" where + "fst_upd f \ \(a,b). (f a, b)" + +abbreviation (input) snd_upd :: "('b \ 'c) \ 'a \ 'b \ 'a \ 'c" where + "snd_upd f \ \(a,b). (a, f b)" + +definition bind :: + "('s, 'a) tmonad \ ('a \ ('s, 'b) tmonad) \ ('s, 'b) tmonad" (infixl ">>=" 60) + where + "bind f g \ \s. \(xs, r) \ (f s). case r of Failed \ {(xs, Failed)} + | Incomplete \ {(xs, Incomplete)} + | Result (rv, s) \ fst_upd (\ys. ys @ xs) ` g rv s" + +text \Sometimes it is convenient to write @{text bind} in reverse order.\ +abbreviation (input) bind_rev :: + "('c \ ('a, 'b) tmonad) \ ('a, 'c) tmonad \ ('a, 'b) tmonad" (infixl "=<<" 60) + where + "g =<< f \ f >>= g" + +text \ + The basic accessor functions of the state monad. @{text get} returns the + current state as result, does not change the state, and does not add to the + trace. @{text "put s"} returns nothing (@{typ unit}), changes the current + state to @{text s}, and does not add to the trace. @{text "put_trace xs"} + returns nothing (@{typ unit}), does not change the state, and adds @{text xs} + to the trace.\ +definition get :: "('s,'s) tmonad" where + "get \ \s. {([], Result (s, s))}" + +definition put :: "'s \ ('s, unit) tmonad" where + "put s \ \_. {([], Result ((), s))}" + +definition put_trace_elem :: "(tmid \ 's) \ ('s, unit) tmonad" where + "put_trace_elem x = (\s. {([], Incomplete), ([x], Result ((), s))})" + +primrec put_trace :: "(tmid \ 's) list \ ('s, unit) tmonad" where + "put_trace [] = return ()" + | "put_trace (x # xs) = (put_trace xs >>= (\_. put_trace_elem x))" + + +subsection "Nondeterminism" + +text \ + Basic nondeterministic functions. @{text "select A"} chooses an element + of the set @{text A} as the result, does not change the state, does not add + to the trace, and does not fail (even if the set is empty). @{text "f \ g"} + executes @{text f} or executes @{text g}. It returns the union of results and + traces of @{text f} and @{text g}.\ +definition select :: "'a set \ ('s, 'a) tmonad" where + "select A \ \s. (Pair [] ` Result ` (A \ {s}))" + +definition alternative :: + "('s,'a) tmonad \ ('s,'a) tmonad \ ('s,'a) tmonad" (infixl "\" 20) + where + "f \ g \ \s. (f s \ g s)" + +text \ + FIXME: The @{text select_f} function was left out here until we figure + out what variant we actually need.\ + +definition + "default_elem dflt A \ if A = {} then {dflt} else A" + +text \ + @{text state_select} takes a relationship between states, and outputs + nondeterministically a state related to the input state. Fails if no such + state exists.\ +definition state_select :: "('s \ 's) set \ ('s, unit) tmonad" where + "state_select r \ + \s. (Pair [] ` default_elem Failed (Result ` (\x. ((), x)) ` {s'. (s, s') \ r}))" + + +subsection "Failure" + +text \ + The monad function that always fails. Returns an empty trace with a Failed result.\ +definition fail :: "('s, 'a) tmonad" where + "fail \ \s. {([], Failed)}" + +text \Assertions: fail if the property @{text P} is not true\ +definition assert :: "bool \ ('a, unit) tmonad" where + "assert P \ if P then return () else fail" + +text \Fail if the value is @{const None}, return result @{text v} for @{term "Some v"}\ +definition assert_opt :: "'a option \ ('b, 'a) tmonad" where + "assert_opt v \ case v of None \ fail | Some v \ return v" + +text \An assertion that also can introspect the current state.\ +definition state_assert :: "('s \ bool) \ ('s, unit) tmonad" where + "state_assert P \ get >>= (\s. assert (P s))" + +subsection "Generic functions on top of the state monad" + +text \Apply a function to the current state and return the result without changing the state.\ +definition gets :: "('s \ 'a) \ ('s, 'a) tmonad" where + "gets f \ get >>= (\s. return (f s))" + +text \Modify the current state using the function passed in.\ +definition modify :: "('s \ 's) \ ('s, unit) tmonad" where + "modify f \ get >>= (\s. put (f s))" + +lemma simpler_gets_def: + "gets f = (\s. {([], Result ((f s), s))})" + by (simp add: gets_def return_def bind_def get_def) + +lemma simpler_modify_def: + "modify f = (\s. {([], Result ((),(f s)))})" + by (simp add: modify_def bind_def get_def put_def) + +text \Execute the given monad when the condition is true, return @{text "()"} otherwise.\ +definition "when" :: "bool \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "when P m \ if P then m else return ()" + +text \Execute the given monad unless the condition is true, return @{text "()"} otherwise.\ +definition unless :: "bool \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "unless P m \ when (\P) m" + +text \ + Perform a test on the current state, performing the left monad if + the result is true or the right monad if the result is false.\ +definition condition :: + "('s \ bool) \ ('s, 'r) tmonad \ ('s, 'r) tmonad \ ('s, 'r) tmonad" + where + "condition P L R \ \s. if (P s) then (L s) else (R s)" + +notation (output) + condition ("(condition (_)// (_)// (_))" [1000,1000,1000] 1000) + +text \ + Apply an option valued function to the current state, fail if it returns @{const None}, + return @{text v} if it returns @{term "Some v"}.\ +definition gets_the :: "('s \ 'a option) \ ('s, 'a) tmonad" where + "gets_the f \ gets f >>= assert_opt" + +text \ + Get a map (such as a heap) from the current state and apply an argument to the map. + Fail if the map returns @{const None}, otherwise return the value.\ +definition gets_map :: "('s \ 'a \ 'b option) \ 'a \ ('s, 'b) tmonad" where + "gets_map f p \ gets f >>= (\m. assert_opt (m p))" + + +subsection \The Monad Laws\ + +text \An alternative definition of @{term bind}, sometimes more convenient.\ +lemma bind_def': + "bind f g \ + \s. ((\xs. (xs, Failed)) ` {xs. (xs, Failed) \ f s}) + \ ((\xs. (xs, Incomplete)) ` {xs. (xs, Incomplete) \ f s}) + \ (\(xs, rv, s) \ {(xs, rv, s'). (xs, Result (rv, s')) \ f s}. fst_upd (\ys. ys @ xs) ` g rv s)" + apply (clarsimp simp add: bind_def fun_eq_iff + Un_Union_image split_def + intro!: eq_reflection) + apply (fastforce split: tmres.splits elim!: rev_bexI[where A="f x" for x] + intro: image_eqI[rotated]) + done + +lemma elem_bindE: + "\(tr, res) \ bind f g s; + \res = Incomplete \ res = Failed; (tr, map_tmres undefined undefined res) \ f s\ \ P; + \tr' tr'' x s'. \(tr', Result (x, s')) \ f s; (tr'', res) \ g x s'; tr = tr'' @ tr'\ \ P\ + \ P" + by (auto simp: bind_def') + +text \Each monad satisfies at least the following three laws.\ + +\ \FIXME: is this necessary? If it is, move it\ +declare map_option.identity[simp] + +text \@{term return} is absorbed at the left of a @{term bind}, applying the return value directly:\ +lemma return_bind[simp]: + "(return x >>= f) = f x" + by (simp add: return_def bind_def) + +text \@{term return} is absorbed on the right of a @{term bind}\ +lemma bind_return[simp]: + "(m >>= return) = m" + by (auto simp: fun_eq_iff bind_def return_def + split: tmres.splits) + +text \@{term bind} is associative\ +lemma bind_assoc: + fixes m :: "('a,'b) tmonad" + fixes f :: "'b \ ('a,'c) tmonad" + fixes g :: "'c \ ('a,'d) tmonad" + shows "(m >>= f) >>= g = m >>= (\x. f x >>= g)" + apply (unfold bind_def Let_def split_def) + apply (rule ext) + apply clarsimp + apply (rule SUP_cong[OF refl], clarsimp) + apply (split tmres.split; intro conjI impI; clarsimp) + apply (simp add: image_Union) + apply (rule SUP_cong[OF refl], clarsimp) + apply (split tmres.split; intro conjI impI; clarsimp) + apply (simp add: image_image) + done + + +section \Adding Exceptions\ + +text \ + The type @{typ "('s,'a) tmonad"} gives us nondeterminism and + failure. We now extend this monad with exceptional return values + that abort normal execution, but can be handled explicitly. + We use the sum type to indicate exceptions. + + In @{typ "('s, 'e + 'a) tmonad"}, @{typ "'s"} is the state, + @{typ 'e} is an exception, and @{typ 'a} is a normal return value. + + This new type itself forms a monad again. Since type classes in + Isabelle are not powerful enough to express the class of monads, + we provide new names for the @{term return} and @{term bind} functions + in this monad. We call them @{text returnOk} (for normal return values) + and @{text bindE} (for composition). We also define @{text throwError} + to return an exceptional value.\ +definition returnOk :: "'a \ ('s, 'e + 'a) tmonad" where + "returnOk \ return o Inr" + +definition throwError :: "'e \ ('s, 'e + 'a) tmonad" where + "throwError \ return o Inl" + +text \ + Lifting a function over the exception type: if the input is an + exception, return that exception; otherwise continue execution.\ +definition lift :: "('a \ ('s, 'e + 'b) tmonad) \ 'e +'a \ ('s, 'e + 'b) tmonad" where + "lift f v \ case v of Inl e \ throwError e | Inr v' \ f v'" + +text \ + The definition of @{term bind} in the exception monad (new + name @{text bindE}): the same as normal @{term bind}, but + the right-hand side is skipped if the left-hand side + produced an exception.\ +definition bindE :: + "('s, 'e + 'a) tmonad \ ('a \ ('s, 'e + 'b) tmonad) \ ('s, 'e + 'b) tmonad" (infixl ">>=E" 60) + where + "f >>=E g \ f >>= lift g" + +text \ + Lifting a normal nondeterministic monad into the + exception monad is achieved by always returning its + result as normal result and never throwing an exception.\ +definition liftE :: "('s,'a) tmonad \ ('s, 'e+'a) tmonad" where + "liftE f \ f >>= (\r. return (Inr r))" + +text \ + Since the underlying type and @{text return} function changed, + we need new definitions for when and unless:\ +definition whenE :: "bool \ ('s, 'e + unit) tmonad \ ('s, 'e + unit) tmonad" where + "whenE P f \ if P then f else returnOk ()" + +definition unlessE :: "bool \ ('s, 'e + unit) tmonad \ ('s, 'e + unit) tmonad" where + "unlessE P f \ if P then returnOk () else f" + +text \ + Throwing an exception when the parameter is @{term None}, otherwise + returning @{term "v"} for @{term "Some v"}.\ +definition throw_opt :: "'e \ 'a option \ ('s, 'e + 'a) tmonad" where + "throw_opt ex x \ case x of None \ throwError ex | Some v \ returnOk v" + +text \ + Failure in the exception monad is redefined in the same way + as @{const whenE} and @{const unlessE}, with @{term returnOk} + instead of @{term return}.\ +definition assertE :: "bool \ ('a, 'e + unit) tmonad" where + "assertE P \ if P then returnOk () else fail" + + +subsection "Monad Laws for the Exception Monad" + +text \More direct definition of @{const liftE}:\ +lemma liftE_def2: + "liftE f = (\s. snd_upd (map_tmres_rv Inr) ` (f s))" + apply (clarsimp simp: fun_eq_iff liftE_def return_def split_def bind_def image_def) + apply (rule set_eqI) + apply (rule iffI) + apply clarsimp + apply (erule rev_bexI[where A="f s" for s]) + apply (clarsimp split: tmres.splits) + apply clarsimp + apply (rule exI) + apply (rule conjI) + apply (erule rev_bexI[where A="f s" for s]) + apply (rule refl) + apply (clarsimp split: tmres.splits) + done + +text \Left @{const returnOk} absorbtion over @{term bindE}:\ +lemma returnOk_bindE[simp]: "(returnOk x >>=E f) = f x" + unfolding bindE_def returnOk_def + by (clarsimp simp: lift_def) + +lemma lift_return[simp]: + "lift (return \ Inr) = return" + by (auto simp: lift_def throwError_def split: sum.splits) + +text \Right @{const returnOk} absorbtion over @{term bindE}:\ +lemma bindE_returnOk[simp]: + "(m >>=E returnOk) = m" + by (simp add: bindE_def returnOk_def) + +text \Associativity of @{const bindE}:\ +lemma bindE_assoc: + "(m >>=E f) >>=E g = m >>=E (\x. f x >>=E g)" + unfolding bindE_def + by (fastforce simp: bind_assoc lift_def throwError_def + split: sum.splits + intro: arg_cong[where f="\x. m >>= x"]) + +text \@{const returnOk} could also be defined via @{const liftE}:\ +lemma returnOk_liftE: + "returnOk x = liftE (return x)" + by (simp add: liftE_def returnOk_def) + +text \Execution after throwing an exception is skipped:\ +lemma throwError_bindE[simp]: + "(throwError E >>=E f) = throwError E" + by (simp add: bindE_def bind_def throwError_def lift_def return_def) + + +section "Syntax" + +text \This section defines traditional Haskell-like do-syntax + for the state monad in Isabelle.\ + +subsection "Syntax for the Interference Trace Monad" + +text \ + We use @{text K_bind} to syntactically indicate the case where the return argument + of the left side of a @{term bind} is ignored\ +definition K_bind :: "'a \ 'b \ 'a" where + K_bind_def[iff]: "K_bind \ \x y. x" + +nonterminal + dobinds and dobind and nobind + +syntax + "_dobind" :: "[pttrn, 'a] => dobind" ("(_ <-/ _)" 10) + "" :: "dobind => dobinds" ("_") + "_nobind" :: "'a => dobind" ("_") + "_dobinds" :: "[dobind, dobinds] => dobinds" ("(_);//(_)") + + "_do" :: "[dobinds, 'a] => 'a" ("(do ((_);//(_))//od)" 100) +syntax (xsymbols) + "_dobind" :: "[pttrn, 'a] => dobind" ("(_ \/ _)" 10) + +translations + "_do (_dobinds b bs) e" == "_do b (_do bs e)" + "_do (_nobind b) e" == "b >>= (CONST K_bind e)" + "do x <- a; e od" == "a >>= (\x. e)" + +text \Syntax examples:\ +lemma "do x \ return 1; + return (2::nat); + return x + od = + return 1 >>= + (\x. return (2::nat) >>= + K_bind (return x))" + by (rule refl) + +lemma "do x \ return 1; + return 2; + return x + od = return 1" + by simp + +subsection "Syntax for the Exception Monad" + +text \ + Since the exception monad is a different type, we need to distinguish it in the syntax + if we want to avoid ambiguous terms. We use @{text doE}/@{text odE} for this, but can + re-use most of the productions from @{text do}/@{text od} above. \ +syntax + "_doE" :: "[dobinds, 'a] => 'a" ("(doE ((_);//(_))//odE)" 100) + +translations + "_doE (_dobinds b bs) e" == "_doE b (_doE bs e)" + "_doE (_nobind b) e" == "b >>=E (CONST K_bind e)" + "doE x <- a; e odE" == "a >>=E (\x. e)" + +text \Syntax examples:\ +lemma "doE x \ returnOk 1; + returnOk (2::nat); + returnOk x + odE = + returnOk 1 >>=E + (\x. returnOk (2::nat) >>=E + K_bind (returnOk x))" + by (rule refl) + +lemma "doE x \ returnOk 1; + returnOk 2; + returnOk x + odE = returnOk 1" + by simp + + +subsection "Interference command" + +text \ + Interference commands must be inserted in between actions that can be interfered with by + commands running in other threads.\ + +definition last_st_tr :: "(tmid * 's) list \ 's \ 's" where + "last_st_tr tr s0 \ hd (map snd tr @ [s0])" + +lemma last_st_tr_simps[simp]: + "last_st_tr [] s = s" + "last_st_tr (x # xs) s = snd x" + "last_st_tr (tr @ tr') s = last_st_tr tr (last_st_tr tr' s)" + by (simp add: last_st_tr_def hd_append)+ + +text \Nondeterministically add all possible environment events to the trace.\ +definition env_steps :: "('s,unit) tmonad" where + "env_steps \ + do + s \ get; + \ \Add unfiltered environment events to the trace\ + xs \ select UNIV; + tr \ return (map (Pair Env) xs); + put_trace tr; + \ \Pick the last event of the trace as the final state\ + put (last_st_tr tr s) + od" + +text \Add the current state to the trace, tagged as a self action.\ +definition commit_step :: "('s,unit) tmonad" where + "commit_step \ + do + s \ get; + put_trace [(Me,s)] + od" + +text \ + Record the action taken by the current thread since the last interference point and + then add unfiltered environment events.\ +definition interference :: "('s,unit) tmonad" where + "interference \ + do + commit_step; + env_steps + od" + + +section "Library of additional Monadic Functions and Combinators" + +text \Lifting a normal function into the monad type:\ +definition liftM :: "('a \ 'b) \ ('s,'a) tmonad \ ('s, 'b) tmonad" where + "liftM f m \ do x \ m; return (f x) od" + +text \The same for the exception monad:\ +definition liftME :: "('a \ 'b) \ ('s,'e+'a) tmonad \ ('s,'e+'b) tmonad" where + "liftME f m \ doE x \ m; returnOk (f x) odE" + +text \Execute @{term f} for @{term "Some x"}, otherwise do nothing.\ +definition maybeM :: "('a \ ('s, unit) tmonad) \ 'a option \ ('s, unit) tmonad" where + "maybeM f y \ case y of Some x \ f x | None \ return ()" + +text \Run a sequence of monads from left to right, ignoring return values.\ +definition sequence_x :: "('s, 'a) tmonad list \ ('s, unit) tmonad" where + "sequence_x xs \ foldr (\x y. x >>= (\_. y)) xs (return ())" + +text \ + Map a monadic function over a list by applying it to each element + of the list from left to right, ignoring return values.\ +definition mapM_x :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, unit) tmonad" where + "mapM_x f xs \ sequence_x (map f xs)" + +text \ + Map a monadic function with two parameters over two lists, + going through both lists simultaneously, left to right, ignoring + return values.\ +definition zipWithM_x :: + "('a \ 'b \ ('s,'c) tmonad) \ 'a list \ 'b list \ ('s, unit) tmonad" + where + "zipWithM_x f xs ys \ sequence_x (zipWith f xs ys)" + +text \ + The same three functions as above, but returning a list of + return values instead of @{text unit}\ +definition sequence :: "('s, 'a) tmonad list \ ('s, 'a list) tmonad" where + "sequence xs \ let mcons = (\p q. p >>= (\x. q >>= (\y. return (x#y)))) + in foldr mcons xs (return [])" + +definition mapM :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, 'b list) tmonad" where + "mapM f xs \ sequence (map f xs)" + +definition zipWithM :: + "('a \ 'b \ ('s,'c) tmonad) \ 'a list \ 'b list \ ('s, 'c list) tmonad" + where + "zipWithM f xs ys \ sequence (zipWith f xs ys)" + +definition foldM :: "('b \ 'a \ ('s, 'a) tmonad) \ 'b list \ 'a \ ('s, 'a) tmonad" where + "foldM m xs a \ foldr (\p q. q >>= m p) xs (return a) " + +definition foldME :: + "('b \ 'a \ ('s,('e + 'b)) tmonad) \ 'b \ 'a list \ ('s, ('e + 'b)) tmonad" + where + "foldME m a xs \ foldr (\p q. q >>=E swp m p) xs (returnOk a)" + +text \ + The sequence and map functions above for the exception monad, with and without + lists of return value\ +definition sequenceE_x :: "('s, 'e+'a) tmonad list \ ('s, 'e+unit) tmonad" where + "sequenceE_x xs \ foldr (\x y. doE _ <- x; y odE) xs (returnOk ())" + +definition mapME_x :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ ('s,'e+unit) tmonad" where + "mapME_x f xs \ sequenceE_x (map f xs)" + +definition sequenceE :: "('s, 'e+'a) tmonad list \ ('s, 'e+'a list) tmonad" where + "sequenceE xs \ let mcons = (\p q. p >>=E (\x. q >>=E (\y. returnOk (x#y)))) + in foldr mcons xs (returnOk [])" + +definition mapME :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ ('s,'e+'b list) tmonad" where + "mapME f xs \ sequenceE (map f xs)" + +text \Filtering a list using a monadic function as predicate:\ +primrec filterM :: "('a \ ('s, bool) tmonad) \ 'a list \ ('s, 'a list) tmonad" where + "filterM P [] = return []" +| "filterM P (x # xs) = do + b <- P x; + ys <- filterM P xs; + return (if b then (x # ys) else ys) + od" + +text \An alternative definition of @{term state_select}\ +lemma state_select_def2: + "state_select r \ (do + s \ get; + S \ return {s'. (s, s') \ r}; + assert (S \ {}); + s' \ select S; + put s' + od)" + apply (clarsimp simp add: state_select_def get_def return_def assert_def fail_def select_def + put_def bind_def fun_eq_iff default_elem_def + intro!: eq_reflection) + apply fastforce + done + + +section "Catching and Handling Exceptions" + +text \ + Turning an exception monad into a normal state monad + by catching and handling any potential exceptions:\ +definition catch :: + "('s, 'e + 'a) tmonad \ ('e \ ('s, 'a) tmonad) \ ('s, 'a) tmonad" (infix "" 10) + where + "f handler \ + do x \ f; + case x of + Inr b \ return b + | Inl e \ handler e + od" + +text \ + Handling exceptions, but staying in the exception monad. + The handler may throw a type of exceptions different from + the left side.\ +definition handleE' :: + "('s, 'e1 + 'a) tmonad \ ('e1 \ ('s, 'e2 + 'a) tmonad) \ ('s, 'e2 + 'a) tmonad" + (infix "" 10) where + "f handler \ + do + v \ f; + case v of + Inl e \ handler e + | Inr v' \ return (Inr v') + od" + +text \ + A type restriction of the above that is used more commonly in + practice: the exception handle (potentially) throws exception + of the same type as the left-hand side.\ +definition handleE :: + "('s, 'x + 'a) tmonad \ ('x \ ('s, 'x + 'a) tmonad) \ ('s, 'x + 'a) tmonad" (infix "" 10) + where + "handleE \ handleE'" + +text \ + Handling exceptions, and additionally providing a continuation + if the left-hand side throws no exception:\ +definition handle_elseE :: + "('s, 'e + 'a) tmonad \ ('e \ ('s, 'ee + 'b) tmonad) \ ('a \ ('s, 'ee + 'b) tmonad) \ + ('s, 'ee + 'b) tmonad" ("_ _ _" 10) + where + "f handler continue \ + do v \ f; + case v of Inl e \ handler e + | Inr v' \ continue v' + od" + +subsection "Loops" + +text \ + Loops are handled using the following inductive predicate; + non-termination is represented using the failure flag of the + monad. +FIXME: update comment about non-termination\ + +inductive_set whileLoop_results :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ (('r \ 's) \ ((tmid \ 's) list \ ('s, 'r) tmres)) set" + for C B where + "\ \ C r s \ \ ((r, s), ([], Result (r, s))) \ whileLoop_results C B" + | "\ C r s; (ts, Failed) \ B r s \ \ ((r, s), (ts, Failed)) \ whileLoop_results C B" + | "\ C r s; (ts, Incomplete) \ B r s \ \ ((r, s), (ts, Incomplete)) \ whileLoop_results C B" + | "\ C r s; (ts, Result (r', s')) \ B r s; ((r', s'), (ts',z)) \ whileLoop_results C B; ts''=ts'@ts \ + \ ((r, s), (ts'',z)) \ whileLoop_results C B" + +\ \FIXME: there are fewer lemmas here than in NonDetMonad and I don't understand this well enough + to know whether this is correct or not.\ +inductive_cases whileLoop_results_cases_result_end: "((x,y), ([],Result r)) \ whileLoop_results C B" +inductive_cases whileLoop_results_cases_fail: "((x,y), (ts, Failed)) \ whileLoop_results C B" +inductive_cases whileLoop_results_cases_incomplete: "((x,y), (ts, Incomplete)) \ whileLoop_results C B" + +inductive_simps whileLoop_results_simps_valid: "((x,y), ([], Result z)) \ whileLoop_results C B" + +inductive whileLoop_terminates :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ 's \ bool" + for C B where + "\ C r s \ whileLoop_terminates C B r s" + | "\ C r s; \(r', s') \ Result -` snd ` (B r s). whileLoop_terminates C B r' s' \ + \ whileLoop_terminates C B r s" + +inductive_cases whileLoop_terminates_cases: "whileLoop_terminates C B r s" +inductive_simps whileLoop_terminates_simps: "whileLoop_terminates C B r s" + +definition whileLoop :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" + where + "whileLoop C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B})" + +notation (output) + whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000) + +\ \FIXME: why does this differ to @{text Nondet_Monad}?\ +definition whileLoopT :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" + where + "whileLoopT C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B + \ whileLoop_terminates C B r s})" + +notation (output) + whileLoopT ("(whileLoopT (_)// (_))" [1000, 1000] 1000) + +definition whileLoopE :: + "('r \ 's \ bool) \ ('r \ ('s, 'e + 'r) tmonad) \ 'r \ ('s, ('e + 'r)) tmonad" + where + "whileLoopE C body \ + \r. whileLoop (\r s. (case r of Inr v \ C v s | _ \ False)) (lift body) (Inr r)" + +notation (output) + whileLoopE ("(whileLoopE (_)// (_))" [1000, 1000] 1000) + + +section "Combinators that have conditions with side effects" + +definition notM :: "('s, bool) tmonad \ ('s, bool) tmonad" where + "notM m = do c \ m; return (\ c) od" + +definition whileM :: "('s, bool) tmonad \ ('s, 'a) tmonad \ ('s, unit) tmonad" where + "whileM C B \ do + c \ C; + whileLoop (\c s. c) (\_. do B; C od) c; + return () + od" + +definition ifM :: "('s, bool) tmonad \ ('s, 'a) tmonad \ ('s, 'a) tmonad \ ('s, 'a) tmonad" where + "ifM test t f = do + c \ test; + if c then t else f + od" + +definition ifME :: + "('a, 'b + bool) tmonad \ ('a, 'b + 'c) tmonad \ ('a, 'b + 'c) tmonad \ ('a, 'b + 'c) tmonad" + where + "ifME test t f = doE + c \ test; + if c then t else f + odE" + +definition whenM :: "('s, bool) tmonad \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "whenM t m = ifM t m (return ())" + +definition orM :: "('s, bool) tmonad \ ('s, bool) tmonad \ ('s, bool) tmonad" where + "orM a b = ifM a (return True) b" + +definition andM :: "('s, bool) tmonad \ ('s, bool) tmonad \ ('s, bool) tmonad" where + "andM a b = ifM a b (return False)" + + +section "Await command" + +text \@{term "Await c f"} blocks the execution until @{term "c"} is true, + and then atomically executes @{term "f"}.\ +definition Await :: "('s \ bool) \ ('s,unit) tmonad" where + "Await c \ + do + s \ get; + \ \Add unfiltered environment events, with the last one + satisfying the `c' state predicate\ + xs \ select {xs. c (last_st_tr (map (Pair Env) xs) s)}; + tr \ return (map (Pair Env) xs); + put_trace tr; + \ \Pick the last event of the trace\ + put (last_st_tr tr s) + od" + + +section "Parallel combinator" + +text \ + Programs combined with @{text parallel} should begin with an + @{const env_steps} and end with @{const interference} to be wellformed. + This ensures that there are at least enough enough environment steps in + each program for their traces to be able to be matched up; without it + the composed program would be trivially empty.\ +definition parallel :: "('s,'a) tmonad \ ('s,'a) tmonad \ ('s,'a) tmonad" where + "parallel f g = (\s. {(xs, rv). \f_steps. length f_steps = length xs + \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps xs), rv) \ f s + \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps xs), rv) \ g s})" + +abbreviation(input) + "parallel_mrg \ \((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)" + +lemma parallel_def2: + "parallel f g = (\s. {(xs, rv). \ys zs. (ys, rv) \ f s \ (zs, rv) \ g s + \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs + \ xs = map parallel_mrg (zip ys zs)})" + apply (simp add: parallel_def fun_eq_iff set_eq_iff) + apply safe + apply (rule exI, rule conjI, assumption)+ + apply (simp add: list_all2_conv_all_nth list_eq_iff_nth_eq split_def prod_eq_iff) + apply clarsimp + apply (rename_tac ys zs) + apply (rule_tac x="map (((\) Env) o fst) ys" in exI) + apply (simp add: zip_map1 o_def split_def) + apply (strengthen subst[where P="\xs. (xs, v) \ S" for v S, mk_strg I _ E]) + apply (clarsimp simp: list_all2_conv_all_nth list_eq_iff_nth_eq + split_def prod_eq_iff + split del: if_split cong: if_cong) + apply auto + done + +lemma parallel_def3: + "parallel f g = (\s. (\(ys, zs, rv). (map parallel_mrg (zip ys zs), rv)) + ` {(ys, zs, rv). (ys, rv) \ f s \ (zs, rv) \ g s + \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs})" + by (simp add: parallel_def2, rule ext, auto simp: image_def) + +end diff --git a/lib/Monads/trace/Trace_Monad_Equations.thy b/lib/Monads/trace/Trace_Monad_Equations.thy new file mode 100644 index 0000000000..21c22b6188 --- /dev/null +++ b/lib/Monads/trace/Trace_Monad_Equations.thy @@ -0,0 +1,767 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Equations between monads. Conclusions of the form "f = g" where f and g are monads. + Should not be Hoare triples (those go into a different theory). *) + +theory Trace_Monad_Equations + imports + Trace_Empty_Fail + Trace_No_Fail + Trace_No_Trace +begin + +lemmas assertE_assert = assertE_liftE + +lemma assert_def2: + "assert v = assert_opt (if v then Some () else None)" + by (cases v; simp add: assert_def assert_opt_def) + +lemma return_returnOk: + "return (Inr x) = returnOk x" + unfolding returnOk_def by simp + +lemma exec_modify: + "(modify f >>= g) s = g () (f s)" + by (simp add: bind_def simpler_modify_def) + +lemma bind_return_eq: + "(a >>= return) = (b >>= return) \ a = b" + by clarsimp + +lemmas bind_then_eq = arg_cong2[where f=bind, OF _ refl] + +lemma bindE_bind_linearise: + "((f >>=E g) >>= h) = + (f >>= case_sum (h o Inl) (\rv. g rv >>= h))" + apply (simp add: bindE_def bind_assoc) + apply (rule ext, rule bind_apply_cong, rule refl) + apply (simp add: lift_def throwError_def split: sum.split) + done + +lemma throwError_bind: + "(throwError e >>= f) = (f (Inl e))" + by (simp add: throwError_def) + +lemma bind_bindE_assoc: + "((f >>= g) >>=E h) + = f >>= (\rv. g rv >>=E h)" + by (simp add: bindE_def bind_assoc) + +lemma returnOk_bind: + "returnOk v >>= f = (f (Inr v))" + by (simp add: returnOk_def) + +lemma liftE_bind: + "(liftE m >>= m') = (m >>= (\rv. m' (Inr rv)))" + by (simp add: liftE_def) + +lemma catch_throwError: "catch (throwError ft) g = g ft" + by (simp add: catch_def throwError_bind) + +lemma cart_singleton_image: + "S \ {s} = (\v. (v, s)) ` S" + by auto + +lemma liftE_bindE_handle: + "((liftE f >>=E (\x. g x)) h) + = f >>= (\x. g x h)" + by (simp add: liftE_bindE handleE_def handleE'_def + bind_assoc) + +lemma catch_liftE: + "catch (liftE g) h = g" + by (simp add: catch_def liftE_def) + +lemma catch_liftE_bindE: + "catch (liftE g >>=E (\x. f x)) h = g >>= (\x. catch (f x) h)" + by (simp add: liftE_bindE catch_def bind_assoc) + +lemma returnOk_catch_bind: + "catch (returnOk v) h >>= g = g v" + by (simp add: returnOk_liftE catch_liftE) + +lemma liftE_bindE_assoc: + "(liftE f >>=E g) >>= h = f >>= (\x. g x >>= h)" + by (simp add: liftE_bindE bind_assoc) + +lemma unlessE_throw_catch_If: + "catch (unlessE P (throwError e) >>=E f) g + = (if P then catch (f ()) g else g e)" + by (simp add: unlessE_def catch_throwError split: if_split) + +lemma whenE_bindE_throwError_to_if: + "whenE P (throwError e) >>=E (\_. b) = (if P then (throwError e) else b)" + unfolding whenE_def bindE_def + by (auto simp: lift_def throwError_def returnOk_def) + +lemma alternative_liftE_returnOk: + "(liftE m \ returnOk v) = liftE (m \ return v)" + by (simp add: liftE_def alternative_def returnOk_def bind_def return_def) + +lemma gets_the_return: + "(return x = gets_the f) = (\s. f s = Some x)" + apply (subst fun_eq_iff) + apply (simp add: return_def gets_the_def exec_gets + assert_opt_def fail_def + split: option.split) + apply auto + done + +lemma gets_the_returns: + "(return x = gets_the f) = (\s. f s = Some x)" + "(returnOk x = gets_the g) = (\s. g s = Some (Inr x))" + "(throwError x = gets_the h) = (\s. h s = Some (Inl x))" + by (simp_all add: returnOk_def throwError_def + gets_the_return) + +lemma gets_the_eq_bind: + "\ f = gets_the (fn_f o fn'); \rv. g rv = gets_the (fn_g rv o fn') \ + \ \fn. (f >>= g) = gets_the (fn o fn')" + apply clarsimp + apply (rule exI[where x="\s. case (fn_f s) of None \ None | Some v \ fn_g v s"]) + apply (simp add: gets_the_def bind_assoc exec_gets + assert_opt_def fun_eq_iff + split: option.split) + done + +lemma gets_the_eq_bindE: + "\ f = gets_the (fn_f o fn'); \rv. g rv = gets_the (fn_g rv o fn') \ + \ \fn. (f >>=E g) = gets_the (fn o fn')" + unfolding bindE_def + apply (erule gets_the_eq_bind[where fn_g="\rv s. case rv of Inl e \ Some (Inl e) | Inr v \ fn_g v s"]) + apply (simp add: lift_def gets_the_returns split: sum.split) + done + +lemma gets_the_fail: + "(fail = gets_the f) = (\s. f s = None)" + by (simp add: gets_the_def exec_gets assert_opt_def + fail_def return_def fun_eq_iff + split: option.split) + +lemma gets_the_asserts: + "(fail = gets_the f) = (\s. f s = None)" + "(assert P = gets_the g) = (\s. g s = (if P then Some () else None))" + "(assertE P = gets_the h) = (\s. h s = (if P then Some (Inr ()) else None))" + by (simp add: assert_def assertE_def gets_the_fail gets_the_returns + split: if_split)+ + +lemma ex_const_function: + "\f. \s. f (f' s) = v" + by force + +lemma gets_the_condsE: + "(\fn. whenE P f = gets_the (fn o fn')) + = (P \ (\fn. f = gets_the (fn o fn')))" + "(\fn. unlessE P g = gets_the (fn o fn')) + = (\ P \ (\fn. g = gets_the (fn o fn')))" + by (simp add: whenE_def unlessE_def gets_the_returns ex_const_function + split: if_split)+ + +lemma let_into_return: + "(let f = x in m f) = (do f \ return x; m f od)" + by simp + +lemma liftME_return: + "liftME f (returnOk v) = returnOk (f v)" + by (simp add: liftME_def) + +lemma fold_bindE_into_list_case: + "(doE v \ f; case_list (g v) (h v) x odE) + = (case_list (doE v \ f; g v odE) (\x xs. doE v \ f; h v x xs odE) x)" + by (simp split: list.split) + +lemma whenE_liftE: + "whenE P (liftE f) = liftE (when P f)" + by (simp add: whenE_def when_def returnOk_liftE) + +lemma whenE_whenE_body: + "whenE P (throwError f) >>=E (\_. whenE Q (throwError f) >>=E r) = whenE (P \ Q) (throwError f) >>=E r" + apply (cases P) + apply (simp add: whenE_def) + apply simp + done + +lemma whenE_whenE_same: + "whenE P (throwError f) >>=E (\_. whenE P (throwError g) >>=E r) = whenE P (throwError f) >>=E r" + apply (cases P) + apply (simp add: whenE_def) + apply simp + done + +lemma maybe_fail_bind_fail: + "unless P fail >>= (\_. fail) = fail" + "when P fail >>= (\_. fail) = fail" + by (clarsimp simp: bind_def fail_def return_def + unless_def when_def)+ + +lemma select_singleton[simp]: + "select {x} = return x" + by (simp add: select_def return_def) + +lemma return_modify: + "return () = modify id" + by (simp add: return_def simpler_modify_def) + +lemma liftE_liftM_liftME: + "liftE (liftM f m) = liftME f (liftE m)" + by (simp add: liftE_liftM liftME_liftM liftM_def) + +lemma bind_return_unit: + "f = (f >>= (\x. return ()))" + by simp + +lemma modify_id_return: + "modify id = return ()" + by (simp add: simpler_modify_def return_def) + +lemma liftE_bind_return_bindE_returnOk: + "liftE (v >>= (\rv. return (f rv))) + = (liftE v >>=E (\rv. returnOk (f rv)))" + by (simp add: liftE_bindE, simp add: liftE_def returnOk_def) + +lemma bind_eqI: + "g = g' \ f >>= g = f >>= g'" by simp + +lemma unlessE_throwError_returnOk: + "(if P then returnOk v else throwError x) + = (unlessE P (throwError x) >>=E (\_. returnOk v))" + by (cases P, simp_all add: unlessE_def) + +lemma gets_the_bind_eq: + "\ f s = Some x; g x s = h s \ + \ (gets_the f >>= g) s = h s" + by (simp add: gets_the_def bind_assoc exec_gets assert_opt_def) + +lemma zipWithM_x_modify: + "zipWithM_x (\a b. modify (f a b)) as bs + = modify (\s. foldl (\s (a, b). f a b s) s (zip as bs))" + apply (simp add: zipWithM_x_def zipWith_def sequence_x_def) + apply (induct ("zip as bs")) + apply (simp add: simpler_modify_def return_def) + apply (rule ext) + apply (simp add: simpler_modify_def bind_def split_def) + done + +lemma bind_return_subst: + assumes r: "\r. \\s. P x = r\ f x \\rv s. Q rv = r\" + shows + "do a \ f x; + g (Q a) + od = + do _ \ f x; + g (P x) + od" +proof - + have "do a \ f x; + return (Q a) + od = + do _ \ f x; + return (P x) + od" + using r + apply (subst fun_eq_iff) + apply (auto simp: bind_def valid_def return_def mres_def vimage_def split: tmres.splits; + fastforce simp: image_def intro: rev_bexI) + done + hence "do a \ f x; + return (Q a) + od >>= g = + do _ \ f x; + return (P x) + od >>= g" + by (rule bind_cong, simp) + thus ?thesis + by simp +qed + +lemma assert2: + "(do v1 \ assert P; v2 \ assert Q; c od) + = (do v \ assert (P \ Q); c od)" + by (simp add: assert_def split: if_split) + +lemma assert_opt_def2: + "assert_opt v = (do assert (v \ None); return (the v) od)" + by (simp add: assert_opt_def split: option.split) + +lemma gets_assert: + "(do v1 \ assert v; v2 \ gets f; c v1 v2 od) + = (do v2 \ gets f; v1 \ assert v; c v1 v2 od)" + by (simp add: simpler_gets_def return_def assert_def fail_def bind_def + split: if_split) + +lemma modify_assert: + "(do v2 \ modify f; v1 \ assert v; c v1 od) + = (do v1 \ assert v; v2 \ modify f; c v1 od)" + by (simp add: simpler_modify_def return_def assert_def fail_def bind_def + split: if_split) + +lemma gets_fold_into_modify: + "do x \ gets f; modify (g x) od = modify (\s. g (f s) s)" + "do x \ gets f; _ \ modify (g x); h od + = do modify (\s. g (f s) s); h od" + by (simp_all add: fun_eq_iff modify_def bind_assoc exec_gets + exec_get exec_put) + +lemma gets_return_gets_eq: + "gets f >>= (\g. return (h g)) = gets (\s. h (f s))" + by (simp add: simpler_gets_def bind_def return_def) + +lemma gets_prod_comp: + "gets (case x of (a, b) \ f a b) = (case x of (a, b) \ gets (f a b))" + by (auto simp: split_def) + +lemma bind_assoc2: + "(do x \ a; _ \ b; c x od) = (do x \ (do x' \ a; _ \ b; return x' od); c x od)" + by (simp add: bind_assoc) + +lemma bind_assoc_return_reverse: + "do x \ f; + _ \ g x; + h x + od = + do x \ do x \ f; + _ \ g x; + return x + od; + h x + od" + by (simp only: bind_assoc return_bind) + +lemma if_bind: + "(if P then (a >>= (\_. b)) else return ()) = + (if P then a else return ()) >>= (\_. if P then b else return ())" + by (cases P; simp) + +lemma bind_liftE_distrib: "(liftE (A >>= (\x. B x))) = (liftE A >>=E (\x. liftE (\s. B x s)))" + by (clarsimp simp: liftE_def bindE_def lift_def bind_assoc) + +(*FIXME: the following lemmas were originally solved by monad_eq, which doesn't yet exist for the + trace monad due to traces making equality more complicated.*) +lemma condition_apply_cong: + "\ c s = c' s'; s = s'; \s. c' s \ l s = l' s ; \s. \ c' s \ r s = r' s \ \ condition c l r s = condition c' l' r' s'" + by (simp add: condition_def) + +lemma condition_cong [cong, fundef_cong]: + "\ c = c'; \s. c' s \ l s = l' s; \s. \ c' s \ r s = r' s \ \ condition c l r = condition c' l' r'" + by (simp add: condition_def fun_eq_iff) + +lemma lift_Inr [simp]: "(lift X (Inr r)) = (X r)" + by (simp add: lift_def) + +lemma lift_Inl [simp]: "lift C (Inl a) = throwError a" + by (simp add: lift_def) + +lemma returnOk_def2: "returnOk a = return (Inr a)" + by (simp add: returnOk_def) + +lemma liftE_fail[simp]: "liftE fail = fail" + by (simp add: liftE_def) + +lemma if_catch_distrib: + "((if P then f else g) h) = (if P then f h else g h)" + by (simp split: if_split) + +lemma will_throw_and_catch: + "f = throwError e \ (f (\_. g)) = g" + by (simp add: catch_def throwError_def) + +lemma catch_is_if: + "(doE x <- f; g x odE h) = + do + rv <- f; + if sum.isl rv then h (projl rv) else g (projr rv) h + od" + apply (simp add: bindE_def catch_def bind_assoc cong: if_cong) + apply (rule bind_cong, rule refl) + apply (clarsimp simp: lift_def throwError_def split: sum.splits) + done + +lemma liftE_K_bind: "liftE ((K_bind (\s. A s)) x) = K_bind (liftE (\s. A s)) x" + by clarsimp + +lemma monad_eq_split: + assumes "\r s. Q r s \ f r s = f' r s" + "\P\ g \\r s. Q r s\" + "P s" + shows "(g >>= f) s = (g >>= f') s" +proof - + have pre: "\rv s'. \(rv, s') \ mres (g s)\ \ f rv s' = f' rv s'" + using assms unfolding valid_def apply - + by (erule allE[where x=s]) (fastforce simp: mres_def image_def) + show ?thesis + by (fastforce intro!: bind_apply_cong simp: pre) +qed + +lemma monad_eq_split2: + assumes eq: " g' s = g s" + assumes tail:"\r s. Q r s \ f r s = f' r s" + and hoare: "\P\ g \\r s. Q r s\" "P s" + shows "(g >>= f) s = (g' >>= f') s" + apply (rule trans) + apply (rule monad_eq_split[OF tail hoare], assumption) + apply (clarsimp simp: bind_def eq) + done + +lemma monad_eq_split_tail: + "\f = g; a s = b s\ \ (a >>= f) s = ((b >>= g) s)" + by (simp add:bind_def) + +lemma double_gets_drop_regets: + "(do x \ gets f; + y \ gets f; + m y x + od) = + (do x \ gets f; + m x x + od)" + by (simp add: simpler_gets_def bind_def) + +lemma state_assert_false[simp]: + "state_assert (\_. False) = fail" + by (simp add: state_assert_def get_def bind_def) + +lemma condition_fail_rhs: + "condition C X fail = (state_assert C >>= (\_. X))" + by (auto simp: condition_def state_assert_def assert_def fail_def return_def get_def bind_def + fun_eq_iff) + +lemma condition_swap: + "condition C A B = condition (\s. \ C s) B A" + by (simp add: condition_def fun_eq_iff) + +lemma condition_fail_lhs: + "condition C fail X = (state_assert (\s. \ C s) >>= (\_. X))" + by (metis condition_fail_rhs condition_swap) + +lemma condition_bind_fail[simp]: + "(condition C A B >>= (\_. fail)) = condition C (A >>= (\_. fail)) (B >>= (\_. fail))" + by (auto simp: condition_def assert_def fail_def bind_def fun_eq_iff) + +lemma bind_fail_propagates: + "\no_trace A; empty_fail A\ \ A >>= (\_. fail) = fail" + by (fastforce simp: no_trace_def fail_def bind_def case_prod_unfold + dest!: empty_fail_not_empty split: tmres.splits) + +lemma simple_bind_fail [simp]: + "(state_assert X >>= (\_. fail)) = fail" + "(modify M >>= (\_. fail)) = fail" + "(return X >>= (\_. fail)) = fail" + "(gets X >>= (\_. fail)) = fail" + by (auto intro!: bind_fail_propagates) + +lemma bind_inv_inv_comm: + "\ \P. \P\ f \\_. P\; \P. \P\ g \\_. P\; + empty_fail f; empty_fail g; no_trace f; no_trace g \ \ + do x \ f; y \ g; n x y od = do y \ g; x \ f; n x y od" + apply (rule ext) + apply (rule trans[where s="(do (x, y) \ do x \ f; y \ (\_. g s) ; (\_. return (x, y) s) od; + n x y od) s" for s]) + apply (simp add: bind_assoc) + apply (intro bind_apply_cong, simp_all)[1] + apply (metis in_inv_by_hoareD) + apply (simp add: return_def bind_def) + apply (metis in_inv_by_hoareD) + apply (rule trans[where s="(do (x, y) \ do y \ g; x \ (\_. f s) ; (\_. return (x, y) s) od; + n x y od) s" for s, rotated]) + apply (simp add: bind_assoc) + apply (intro bind_apply_cong, simp_all)[1] + apply (metis in_inv_by_hoareD) + apply (simp add: return_def bind_def) + apply (metis in_inv_by_hoareD) + apply (rule bind_apply_cong, simp_all) + apply (clarsimp simp: bind_def split_def return_def) + apply (rule subset_antisym; + clarsimp simp: no_trace_def case_prod_unfold + split: tmres.splits dest!: empty_fail_not_empty) + apply ((drule_tac x=x in spec)+, fastforce)+ + done + +lemma bind_known_operation_eq: + "\ no_fail P f; \Q\ f \\rv s. rv = x \ s = t\; P s; Q s; empty_fail f; no_trace f \ + \ (f >>= g) s = g x t" + apply (drule(1) no_failD) + apply (subgoal_tac "f s = {([], Result (x, t))}") + apply (clarsimp simp: bind_def) + apply (rule subset_antisym; + clarsimp simp: valid_def empty_fail_def no_trace_def mres_def image_def failed_def) + apply (metis eq_snd_iff tmres.exhaust) + apply fastforce + done + +lemma assert_opt_If: + "assert_opt v = If (v = None) fail (return (the v))" + by (simp add: assert_opt_def split: option.split) + +lemma if_to_top_of_bind: + "(bind (If P x y) z) = If P (bind x z) (bind y z)" + by (simp split: if_split) + +lemma if_to_top_of_bindE: + "(bindE (If P x y) z) = If P (bindE x z) (bindE y z)" + by (simp split: if_split) + +lemma modify_modify: + "(do x \ modify f; modify (g x) od) = modify (g () o f)" + by (simp add: bind_def simpler_modify_def) + +lemmas modify_modify_bind = + arg_cong2[where f=bind, OF modify_modify refl, simplified bind_assoc] + +lemma put_then_get[unfolded K_bind_def]: + "do put s; get od = do put s; return s od" + by (simp add: put_def bind_def get_def return_def) + +lemmas put_then_get_then = + put_then_get[THEN bind_then_eq, simplified bind_assoc return_bind] + +lemma select_empty_bind[simp]: + "select {} >>= f = select {}" + by (simp add: select_def bind_def) + + +subsection \Alternative @{text env_steps} with repeat\ + +lemma mapM_Cons: + "mapM f (x # xs) = do + y \ f x; + ys \ mapM f xs; + return (y # ys) + od" + and mapM_Nil: + "mapM f [] = return []" + by (simp_all add: mapM_def sequence_def) + +lemma mapM_x_Cons: + "mapM_x f (x # xs) = do + y \ f x; + mapM_x f xs + od" + and mapM_x_Nil: + "mapM_x f [] = return ()" + by (simp_all add: mapM_x_def sequence_x_def) + +lemma mapME_Cons: + "mapME f (x # xs) = doE + y \ f x; + ys \ mapME f xs; + returnOk (y # ys) + odE" + and mapME_Nil: + "mapME f [] = returnOk []" + by (simp_all add: mapME_def sequenceE_def) + +lemma mapME_x_Cons: + "mapME_x f (x # xs) = doE + y \ f x; + mapME_x f xs + odE" + and mapME_x_Nil: + "mapME_x f [] = returnOk ()" + by (simp_all add: mapME_x_def sequenceE_x_def) + +lemma mapM_append: + "mapM f (xs @ ys) = (do + fxs \ mapM f xs; + fys \ mapM f ys; + return (fxs @ fys) + od)" + by (induct xs, simp_all add: mapM_Cons mapM_Nil bind_assoc) + +lemma mapM_x_append: + "mapM_x f (xs @ ys) = (do + x \ mapM_x f xs; + mapM_x f ys + od)" + by (induct xs, simp_all add: mapM_x_Cons mapM_x_Nil bind_assoc) + +lemma mapME_append: + "mapME f (xs @ ys) = (doE + fxs \ mapME f xs; + fys \ mapME f ys; + returnOk (fxs @ fys) + odE)" + by (induct xs, simp_all add: mapME_Cons mapME_Nil bindE_assoc) + +lemma mapME_x_append: + "mapME_x f (xs @ ys) = (doE + fxs \ mapME_x f xs; + mapME_x f ys + odE)" + by (induct xs, simp_all add: mapME_x_Cons mapME_x_Nil bindE_assoc) + +lemma mapM_map: + "mapM f (map g xs) = mapM (f o g) xs" + by (induct xs; simp add: mapM_Nil mapM_Cons) + +lemma mapM_x_map: + "mapM_x f (map g xs) = mapM_x (f o g) xs" + by (induct xs; simp add: mapM_x_Nil mapM_x_Cons) + +lemma mapME_map: + "mapME f (map g xs) = mapME (f o g) xs" + by (induct xs; simp add: mapME_Nil mapME_Cons) + +lemma mapME_x_map: + "mapME_x f (map g xs) = mapME_x (f o g) xs" + by (induct xs; simp add: mapME_x_Nil mapME_x_Cons) + +primrec repeat_n :: "nat \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "repeat_n 0 f = return ()" + | "repeat_n (Suc n) f = do f; repeat_n n f od" + +lemma repeat_n_mapM_x: + "repeat_n n f = mapM_x (\_. f) (replicate n ())" + by (induct n, simp_all add: mapM_x_Cons mapM_x_Nil) + +definition repeat :: "('s, unit) tmonad \ ('s, unit) tmonad" where + "repeat f = do n \ select UNIV; repeat_n n f od" + +definition env_step :: "('s,unit) tmonad" where + "env_step = + do + s' \ select UNIV; + put_trace_elem (Env, s'); + put s' + od" + +abbreviation + "env_n_steps n \ repeat_n n env_step" + +lemma elem_select_bind: + "(tr, res) \ (do x \ select S; f x od) s + = (\x \ S. (tr, res) \ f x s)" + by (simp add: bind_def select_def) + +lemma select_bind_UN: + "(do x \ select S; f x od) = (\s. \x \ S. f x s)" + by (rule ext, auto simp: elem_select_bind) + +lemma select_early: + "S \ {} + \ do x \ f; y \ select S; g x y od + = do y \ select S; x \ f; g x y od" + apply (simp add: bind_def select_def Sigma_def) + apply (rule ext) + apply (fastforce elim: rev_bexI image_eqI[rotated] split: tmres.split_asm) + done + +lemma repeat_n_choice: + "S \ {} + \ repeat_n n (do x \ select S; f x od) + = (do xs \ select {xs. set xs \ S \ length xs = n}; mapM_x f xs od)" + apply (induct n; simp?) + apply (simp add: select_def bind_def mapM_x_Nil cong: conj_cong) + apply (simp add: select_early bind_assoc) + apply (subst select_early) + apply simp + apply (auto intro: exI[where x="replicate n xs" for n xs])[1] + apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) + apply (simp only: conj_comms[where Q="length xs = n" for xs n]) + apply (simp only: ex_simps[symmetric] conj_assoc length_Suc_conv, simp) + apply (auto simp: mapM_x_Cons) + done + +lemma repeat_choice: + "S \ {} + \ repeat (do x \ select S; f x od) + = (do xs \ select {xs. set xs \ S}; mapM_x f xs od)" + apply (simp add: repeat_def repeat_n_choice) + apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) + done + +lemma put_trace_append: + "put_trace (xs @ ys) = do put_trace ys; put_trace xs od" + by (induct xs; simp add: bind_assoc) + +lemma put_trace_elem_put_comm: + "do y \ put_trace_elem x; put s od + = do y \ put s; put_trace_elem x od" + by (simp add: put_def put_trace_elem_def bind_def insert_commute) + +lemma put_trace_put_comm: + "do y \ put_trace xs; put s od + = do y \ put s; put_trace xs od" + apply (rule sym; induct xs; simp) + apply (simp add: bind_assoc put_trace_elem_put_comm) + apply (simp add: bind_assoc[symmetric]) + done + +lemma mapM_x_comm: + "(\x \ set xs. do y \ g; f x od = do y \ f x; g od) + \ do y \ g; mapM_x f xs od = do y \ mapM_x f xs; g od" + apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons) + apply (simp add: bind_assoc[symmetric], simp add: bind_assoc) + done + +lemma mapM_x_split: + "(\x \ set xs. \y \ set xs. do z \ g y; f x od = do (z :: unit) \ f x; g y od) + \ mapM_x (\x. do z \ f x; g x od) xs = do y \ mapM_x f xs; mapM_x g xs od" + apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons bind_assoc) + apply (subst bind_assoc[symmetric], subst mapM_x_comm[where f=f and g="g x" for x]) + apply simp + apply (simp add: bind_assoc) + done + +lemma mapM_x_put: + "mapM_x put xs = unless (xs = []) (put (last xs))" + apply (induct xs rule: rev_induct) + apply (simp add: mapM_x_Nil unless_def when_def) + apply (simp add: mapM_x_append mapM_x_Cons mapM_x_Nil) + apply (simp add: bind_def unless_def when_def put_def return_def) + done + +lemma put_trace_mapM_x: + "put_trace xs = mapM_x put_trace_elem (rev xs)" + by (induct xs; simp add: mapM_x_Nil mapM_x_append mapM_x_Cons) + +lemma rev_surj: + "surj rev" + by (rule surjI[where f=rev], simp) + +lemma select_image: + "select (f ` S) = do x \ select S; return (f x) od" + by (auto simp add: bind_def select_def return_def Sigma_def) + +lemma env_steps_repeat: + "env_steps = repeat env_step" + apply (simp add: env_step_def repeat_choice env_steps_def + select_early) + apply (simp add: put_trace_elem_put_comm) + apply (simp add: mapM_x_split put_trace_elem_put_comm put_trace_put_comm + mapM_x_put) + apply (simp add: put_trace_mapM_x rev_map mapM_x_map o_def) + apply (subst rev_surj[symmetric], simp add: select_image bind_assoc) + apply (rule arg_cong2[where f=bind, OF refl ext]) + apply (simp add: bind_def get_def put_def unless_def when_def return_def) + apply (simp add: last_st_tr_def hd_map hd_rev) + done + +lemma repeat_n_plus: + "repeat_n (n + m) f = do repeat_n n f; repeat_n m f od" + by (induct n; simp add: bind_assoc) + +lemma repeat_eq_twice[simp]: + "(do x \ repeat f; repeat f od) = repeat f" + apply (simp add: repeat_def select_early) + apply (simp add: bind_assoc repeat_n_plus[symmetric, simplified]) + apply (simp add: bind_def select_def Sigma_def) + apply (rule ext, fastforce intro: exI[where x=0]) + done + +lemmas repeat_eq_twice_then[simp] = + repeat_eq_twice[THEN bind_then_eq, simplified bind_assoc] + +lemmas env_steps_eq_twice[simp] = + repeat_eq_twice[where f=env_step, folded env_steps_repeat] +lemmas env_steps_eq_twice_then[simp] = + env_steps_eq_twice[THEN bind_then_eq, simplified bind_assoc] + +lemmas mapM_collapse_append = + mapM_append[symmetric, THEN bind_then_eq, simplified bind_assoc, simplified] + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_More_RG.thy b/lib/Monads/trace/Trace_More_RG.thy new file mode 100644 index 0000000000..e868b46f05 --- /dev/null +++ b/lib/Monads/trace/Trace_More_RG.thy @@ -0,0 +1,637 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Partial correctness RG logic lemmas over the trace monad. RG quintuples, lifting lemmas, etc. + If it doesn't contain a RG quintuple it likely doesn't belong in here. *) + +theory Trace_More_RG + imports + Trace_RG +begin + +lemma rg_take_disjunct: + "\P\,\R\ f \G\,\\rv s0 s. P' rv s0 s \ (False \ P'' rv s0 s)\ + \ \P\,\R\ f \G\,\P''\" + by (erule rg_strengthen_post, simp) + +lemma rg_post_add: + "\P\,\R\ S \G\,\\r s0 s. Q' r s0 s \ Q r s0 s\ \ \P\,\R\ S \G\,\Q\" + by (erule rg_strengthen_post, simp) + +lemma rg_post_addE: + "\P\,\R\ f \G\,\\_ s0 s. R s0 s \ Q s0 s\,\T\ \ \P\,\R\ f \G\,\\_ s0 s. Q s0 s\,\T\" + by (erule rg_strengthen_postE; simp) + +lemma rg_pre_add: + "(\s0 s. P s0 s \ P' s0 s) \ \P\,\R\ f \G\,\Q\ \ \P and P'\,\R\ f \G\,\Q\" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule rg_weaken_pre, assumption, clarsimp) + +lemma rg_pre_addE: + "(\s0 s. P s0 s \ R s0 s) \ \P\,\R\ f \G\,\Q\,\S\ \ \P and R\,\R\ f \G\,\Q\,\S\" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule rg_weaken_preE, assumption, clarsimp) + +lemma rg_name_pre_state: + "\ \s0 s. P s0 s \ \\_. (=) s\,\R\ f \G\,\Q\; prefix_closed f \ \ \P\,\R\ f \G\,\Q\" + by (clarsimp simp: validI_def) + +lemma rg_name_pre_stateE: + "\\s0 s. P s0 s \ \\_. (=) s\,\R\ f \G\,\Q\,\E\; prefix_closed f\ \ \P\,\R\ f \G\,\Q\,\E\" + by (clarsimp simp: validIE_def2) + +lemma rg_vcg_if_lift_strong: + "\ \P'\,\R\ f \G\,\P\; \\s0 s. \ P' s0 s\,\R\ f \G\,\\rv s0 s. \ P rv s0 s\; \Q'\,\R\ f \G\,\Q\; \S'\,\R\ f \G\,\S\ \ \ + \\s0 s. if P' s0 s then Q' s0 s else S' s0 s\,\R\ f \G\,\\rv s0 s. if P rv s0 s then Q rv s0 s else S rv s0 s\" + + "\ \P'\,\R\ f \G\,\P\; \\s0 s. \ P' s0 s\,\R\ f \G\,\\rv s0 s. \ P rv s0 s\; \Q'\,\R\ f \G\,\ Q\; \S'\,\R\ f \G\,\S\ \ \ + \\s0 s. if P' s0 s then Q' s0 s else S' s0 s\,\R\ f \G\,\\rv s0 s. (if P rv s0 s then Q rv else S rv) s0 s\" + by (wpsimp wp: rg_vcg_imp_lift' | assumption | fastforce)+ + +lemma rg_vcg_imp_lift_pre_add: + "\ \P and Q\,\R\ f \G\,\\rv s0 s. Q' rv s0 s\; f \R\,\G\,\\s0 s. \ Q s0 s\ \ + \ \P\,\R\ f \G\,\\rv s0 s. Q s0 s \ Q' rv s0 s\" + apply (rule rg_weaken_pre) + apply (rule rg_vcg_imp_lift') + apply fastforce + apply fastforce + apply clarsimp + done + +lemma rg_pre_tautI: + "\ \A and P\,\R\ a \G\,\B\; \A and not P\,\R\ a \G\,\B\ \ \ \A\,\R\ a \G\,\B\" + by (fastforce simp: validI_def) + +lemma rg_lift_Pf_pre_conj: + assumes P: "\x. \\s0 s. Q x s0 s\,\R\ m \G\,\P x\" + assumes f: "\P. \\s0 s. P (g s0 s) \ P' s0 s\,\R\ m \G\,\\_ s0 s. P (f s0 s)\" + shows "\\s0 s. Q (g s0 s) s0 s \ P' s0 s\,\R\ m \G\,\\rv s0 s. P (f s0 s) rv s0 s\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF f] guar_by_rg[OF f]) + apply (rule use_validI[OF _ P], simp) + apply (rule use_validI[OF _ f], simp+) + done + +lemmas rg_lift_Pf4 = rg_lift_Pf_pre_conj[where P'="\\", simplified] +lemmas rg_lift_Pf3 = rg_lift_Pf4[where f=f and g=f for f] +lemmas rg_lift_Pf2 = rg_lift_Pf3[where P="\f _. P f" for P] +lemmas rg_lift_Pf = rg_lift_Pf2[where Q=P and P=P for P] + +lemmas rg_lift_Pf3_pre_conj = rg_lift_Pf_pre_conj[where f=f and g=f for f] +lemmas rg_lift_Pf2_pre_conj = rg_lift_Pf3_pre_conj[where P="\f _. P f" for P] +lemmas rg_lift_Pf_pre_conj' = rg_lift_Pf2_pre_conj[where Q=P and P=P for P] + +lemma rg_if_r_and: + "\P\,\R'\ f \G\,\\r. if R r then Q r else Q' r\ + = \P\,\R'\ f \G\,\\r s0 s. (R r \ Q r s0 s) \ (\R r \ Q' r s0 s)\" + by (fastforce simp: validI_def) + +lemma rg_convert_imp: + "\ \\s0 s. \ P s0 s\,\R\ f \G\,\\rv s0 s. \ Q s0 s\; \P'\,\R\ f \G\,\S\ \ + \ \\s0 s. P s0 s \ P' s0 s\,\R\ f \G\,\\rv s0 s. Q s0 s \ S rv s0 s\" + apply (simp only: imp_conv_disj) + apply (erule(1) rg_vcg_disj_lift) + done + +lemma rg_case_option_wpR: + "\\P\,\R\ f None \G\,\Q\,\E\; \x. \P' x\,\R\ f (Some x) \G\,\Q' x\,\E\\ + \ \case_option P P' v\,\R\ f v \G\,\\rv. case v of None \ Q rv | Some x \ Q' x rv\,\E\" + by (cases v) auto + +lemma rg_exI_tuple: + "\P\,\R\ f \G\,\\(rv,rv') s0 s. Q x rv rv' s0 s\ \ \P\,\R\ f \G\,\\(rv,rv') s0 s. \x. Q x rv rv' s0 s\" + by (fastforce simp: validI_def) + +lemma rg_ex_all: + "(\x. \P x\,\R\ f \G\,\Q\) = \\s0 s. \x. P x s0 s\,\R\ f \G\,\Q\" + apply (rule iffI) + apply (fastforce simp: validI_def)+ + done + +lemma rg_imp_eq_substR: + "\P\,\R\ f \G\,\Q\,\E\ \ \P\,\R\ f \G\,\\rv s0 s. rv = x \ Q x s0 s\,\E\" + by (fastforce simp add: validI_def validIE_def split: sum.splits) + +lemma rg_split_bind_case_sum: + assumes x: "\rv. \E rv\,\R\ g rv \G\,\Q\" + "\rv. \S rv\,\R\ h rv \G\,\Q\" + assumes y: "\P\,\R\ f \G\,\S\,\E\" + shows "\P\,\R\ f >>= case_sum g h \G\,\Q\" + apply (rule bind_twp[OF _ y[unfolded validIE_def]]) + apply (wpsimp wp: x split: sum.splits) + done + +lemma rg_split_bind_case_sumE: + assumes x: "\rv. \S' rv\,\R\ g rv \G\,\Q\,\E\" + "\rv. \S rv\,\R\ h rv \G\,\Q\,\E\" + assumes y: "\P\,\R\ f \G\,\S\,\S'\" + shows "\P\,\R\ f >>= case_sum g h \G\,\Q\,\E\" + apply (unfold validIE_def) + apply (rule bind_twp[OF _ y[unfolded validIE_def]]) + apply (wpsimp wp: x[unfolded validIE_def] split: sum.splits) + done + +lemma assertE_tsp: + "\P\,\R\ assertE Q \G\,\\rv s0 s. Q \ P s0 s\,\E\" + by (wpsimp wp: assertE_wp) + +lemma case_options_weak_twp: + "\ \P\,\R\ f \G\,\Q\; \x. \P'\,\R\ g x \G\,\Q\ \ + \ \P and P'\,\R\ case opt of None \ f | Some x \ g x \G\,\Q\" + apply (cases opt) + apply (clarsimp elim!: rg_weaken_pre) + apply (rule rg_weaken_pre [where P'=P']) + apply simp+ + done + +lemma case_option_twp_None_return: + assumes [wp]: "\x. \P' x\,\R\ f x \G\,\\_. Q\" + shows "\\x s0 s. (Q and P x) s0 s \ P' x s0 s \ + \ \Q and (\s0 s. opt \ None \ P (the opt) s0 s)\,\R\ + (case opt of None \ return () | Some x \ f x) + \G\,\\_. Q\" + by (cases opt; wpsimp) + +lemma case_option_twp_None_returnOk: + assumes [wp]: "\x. \P' x\,\R\ f x \G\,\\_. Q\,\E\" + shows "\\x s0 s. (Q and P x) s0 s \ P' x s0 s \ + \ \Q and (\s0 s. opt \ None \ P (the opt) s0 s)\,\R\ + (case opt of None \ returnOk () | Some x \ f x) + \G\,\\_. Q\,\E\" + by (cases opt; wpsimp) + +lemma list_cases_weak_twp: + assumes "\P_A\,\R\ a \G\,\Q\" + assumes "\x xs. \P_B\,\R\ b x xs \G\,\Q\" + shows + "\P_A and P_B\,\R\ + case ts of + [] \ a + | x#xs \ b x xs + \G\,\Q\" + apply (cases ts) + apply (simp, rule rg_weaken_pre, rule assms, simp)+ + done + +lemma rg_vcg_if_lift2: + "\P\,\R\ f \G\,\\rv s0 s. (Q rv s0 s \ X rv s0 s) \ (\ Q rv s0 s \ Y rv s0 s)\ \ + \P\,\R\ f \G\,\\rv s0 s. if Q rv s0 s then X rv s0 s else Y rv s0 s\" + + "\P\,\R\ f \G\,\\rv s0 s. (Q' rv \ X rv s0 s) \ (\ Q' rv \ Y rv s0 s)\ \ + \P\,\R\ f \G\,\\rv. if Q' rv then X rv else Y rv\" + by (auto simp: validI_def) + +lemma rg_vcg_if_lift_ER: (* Required because of lack of rv in lifting rules *) + "\P\,\R\ f \G\,\\rv s0 s. (Q rv s0 s \ X rv s0 s) \ (\ Q rv s0 s \ Y rv s0 s)\,\E\ \ + \P\,\R\ f \G\,\\rv s0 s. if Q rv s0 s then X rv s0 s else Y rv s0 s\,\E\" + + "\P\,\R\ f \G\,\\rv s0 s. (Q' rv \ X rv s0 s) \ (\ Q' rv \ Y rv s0 s)\,\E\ \ + \P\,\R\ f \G\,\\rv. if Q' rv then X rv else Y rv\,\E\" + by (auto simp: validI_def validIE_def) + +lemma rg_list_all_lift: + "\\r. r \ set xs \ \Q r\,\R\ f -,\\rv. Q r\; \P\,\R\ f \G\,\\\\\\ + \ \\s0 s. list_all (\r. Q r s0 s) xs \ P s0 s\,\R\ f \G\,\\rv s0 s. list_all (\r. Q r s0 s) xs\" + apply (rule validI_split[rotated, simplified pred_conj_def]) + apply assumption + apply (induct xs; simp) + apply wpsimp + apply (rule rg_vcg_conj_lift; simp) + done + +lemma assertE_twp: + "\\s0 s. F \ Q () s0 s\,\R\ assertE F \G\,\Q\,\E\" + apply (rule rg_pre) + apply (unfold assertE_def) + apply wp + apply simp + done + +(*If there is a use case which requires a specific guarantee then this rule could be extended with + an extra assumption and precondition.*) +lemma rg_doesn't_grow_proof: + assumes y: "\s0 s. finite (S s0 s)" + assumes x: "\x. \\s0 s. x \ S s0 s \ P s0 s\,\R\ f -,\\rv s0 s. x \ S s0 s\" + shows "\\s0 s. card (S s0 s) < n \ P s0 s\,\R\ f -,\\rv s0 s. card (S s0 s) < n\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF x]) + apply (erule le_less_trans[rotated]) + apply (rule card_mono[OF y]) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_validI[OF _ x, OF _ conjI]) + apply simp + done + +lemma rg_vcg_propE_R: + "prefix_closed f \ \\s0 s. P\,\R\ f -,\\rv s0 s. P\,-" + by (simp add: validIE_def validI_def split: sum.split) + +lemma rg_set_preserved_proof: + assumes y: "\x. \\s0 s. Q s0 s \ x \ S s0 s\,\R\ f \G\,\\rv s0 s. x \ S s0 s\" + assumes x: "\x. \\s0 s. Q s0 s \ x \ S s0 s\,\R\ f \G\,\\rv s0 s. x \ S s0 s\" + shows "\\s0 s. Q s0 s \ P (S s0 s)\,\R\ f \G\,\\rv s0 s. P (S s0 s)\" + apply (clarsimp simp: validI_def) + by (metis (mono_tags, lifting) equalityI validI_prefix_closed post_by_rg guar_by_rg subsetI x y) + +(*If there is a use case which requires a specific guarantee then this rule could be extended with + an extra assumption and precondition.*) +lemma rg_set_shrink_proof: + assumes x: "\x. \\s0 s. x \ S s0 s\,\R\ f -,\\rv s0 s. x \ S s0 s\" + shows + "\\s0 s. \S'. S' \ S s0 s \ P S'\,\R\ + f + -,\\rv s0 s. P (S s0 s)\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF x]) + apply (drule spec, erule mp) + apply (clarsimp simp: subset_iff) + apply (rule ccontr) + apply (drule(1) use_validI [OF _ x]) + apply simp + done + +lemma rg_shrinks_proof: + assumes y: "\s0 s. finite (S s0 s)" + assumes x: "\x. \\s0 s. x \ S s0 s \ P s0 s\,\R\ f -,\\rv s0 s. x \ S s0 s\" + assumes z: "\P\,\R\ f \G\,\\rv s0 s. x \ S s0 s\" + assumes w: "\s0 s. P s0 s \ x \ S s0 s" + shows "\\s0 s. card (S s0 s) \ n \ P s0 s\,\R\ f -,\\rv s0 s. card (S s0 s) < n\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF x]) + apply (erule less_le_trans[rotated]) + apply (rule psubset_card_mono[OF y]) + apply (rule psubsetI) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_validI[OF _ x, OF _ conjI]) + apply simp + by (metis use_validI w z) + +lemma use_validIE_R': + "\(tr, Result (Inr r, s')) \ rely f R s0 s; \P\,\R\ f \G\,\Q\,\E\; P s0 s; s0' = last_st_tr tr s0\ + \ Q r s0' s'" + unfolding validIE_def + by (frule(3) use_validI', simp) + +lemmas use_validIE_R = use_validIE_R'[OF _ _ _ refl] + +lemma use_validIE_guar: + "\(tr, res) \ rely f R s0 s; \P\,\R\ f \G\,\Q\,\E\; P s0 s\ + \ guar_cond G s0 tr" + unfolding validIE_def + by (frule(2) use_validI_guar, simp) + +lemma validI_preservation_ex: + assumes x: "\x P. \\s0 s. P (f s0 s x :: 'b)\,\R\ m \G\,\\rv s0 s. P (f s0 s x)\" + shows "\\s0 s. P (f s0 s :: 'a \ 'b)\,\R\ m \G\,\\rv s0 s. P (f s0 s)\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF x] guar_by_rg[OF x]) + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (erule use_validI [OF _ x]) + apply simp + done + +lemma whenE_invI: + assumes a: "\P\,\R\ f \G\,\\_. P\" + shows "\P\,\R\ whenE Q f \G\,\\_. P\" + by (wpsimp wp: a) + +lemma ifM_throwError_returnOkI: + "\Q\,\R\ test \G\,\\c s0 s. \ c \ P s0 s\ + \ \Q\,\R\ ifM test (throwError e) (returnOk ()) \G\,\\_. P\, -" + unfolding ifM_def + apply (fold liftE_bindE) + apply wpsimp + apply assumption + apply simp + done + +lemmas state_unchanged_rg = in_inv_by_rgD [THEN sym] +lemmas last_state_unchanged_rg = last_st_in_inv_by_rgD [THEN sym] + +(*FIXME MC: name? move (both this one and validI in More_VCG)*) +lemma validI_I: + assumes px: "prefix_closed S" + assumes gc: "\s0 s tr res. \ P s0 s; (tr, res) \ (rely S R s0 s)\ \ guar_cond G s0 tr" + assumes rl: "\s0 s tr r s'. \ P s0 s; (tr, Result (r, s')) \ (rely S R s0 s) \ \ Q r (last_st_tr tr s0) s'" + shows "\P\,\R\ S \G\,\Q\" + unfolding validI_def using px gc rl by safe + +lemma opt_return_pres_lift_rg: + assumes x: "\v. \P\,\R\ f v \G\,\\rv. P\" + shows "\P\,\R\ case x of None \ return () | Some v \ f v \G\,\\rv. P\" + by (wpsimp wp: x) + +lemma rg_weak_lift_imp_conj: + "\ \Q\,\R\ m -,\Q'\; \R\,\R\ m -,\R'\; \S\,\R\ m \G\,\\\\\ \ + \ \\s0 s. (P \ Q s0 s) \ R s0 s \ S s0 s\,\R\ m \G\,\\rv s0 s. (P \ Q' rv s0 s) \ R' rv s0 s\" + apply wp_pre + apply (rule rg_vcg_conj_lift) + apply (rule rg_weak_lift_imp; assumption) + apply (rule validI_split; assumption) + apply clarsimp + done + +lemma rg_eq_P: + assumes "\P. \P\,\R\ f \G\,\\_. P\" + shows "\\_. (=) s\,\R\ f \G\,\\_ _. (=) s\" + by (rule assms) + +lemma valid_case_option_post_twp: + "\\x. \P x\,\R\ f -,\\rv. Q x\\ \ + \\s0 s. case ep of Some x \ P x s0 s | _ \ True\,\R\ + f + -,\\rv s0 s. case ep of Some x \ Q x s0 s | _ \ True\" + by (cases ep; fastforce simp: rg_TrueI) + +lemma P_bool_lift: + assumes t: "\Q\,\R\ f -,\\r. Q\" + assumes f: "\\s0 s. \Q s0 s\,\R\ f -,\\r s0 s. \Q s0 s\" + shows "\\s0 s. P (Q s0 s)\,\R\ f -,\\r s0 s. P (Q s0 s)\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF f]) + apply (rule back_subst[where P=P], assumption) + apply (rule iffI) + apply (erule (1) use_validI [OF _ t]) + apply (rule classical) + apply (drule (1) use_validI [OF _ f]) + apply simp + done + +lemma gets_sp: "\P\,\R\ gets f \G\,\\rv. P and (\s0 s. f s = rv)\" + by (wp, simp) + +lemma post_by_rg2: + "\\P\, \R\ f \G\, \Q\; (tr, Result (rv, s')) \ rely f R s0 s; P s0 s\ + \ Q rv (last_st_tr tr s0) s'" + by (rule post_by_rg, assumption+) + +lemma rg_Ball_helper: + assumes x: "\x. \P x\,\R\ f -,\Q x\" + assumes y: "\P. \\s0 s. P (S s0 s)\,\R\ f -,\\rv s0 s. P (S s0 s)\" + shows "\\s0 s. \x \ S s0 s. P x s0 s\,\R\ f -,\\rv s0 s. \x \ S s0 s. Q x rv s0 s\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF x]) + apply (drule bspec, erule back_subst[where P="\A. x\A" for x]) + apply (erule post_by_rg[OF y, rotated]) + apply (rule refl) + apply (erule (1) post_by_rg[OF x]) + done + +lemma handy_prop_divs_rg: + assumes x: "\P. \\s0 s. P (Q s0 s) \ S s0 s\,\R\ f -,\\rv s0 s. P (Q' rv s0 s)\" + "\P. \\s0 s. P (R s0 s) \ S s0 s\,\R\ f -,\\rv s0 s. P (R' rv s0 s)\" + shows "\\s0 s. P (Q s0 s \ R s0 s) \ S s0 s\,\R\ f -,\\rv s0 s. P (Q' rv s0 s \ R' rv s0 s)\" + "\\s0 s. P (Q s0 s \ R s0 s) \ S s0 s\,\R\ f -,\\rv s0 s. P (Q' rv s0 s \ R' rv s0 s)\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF x(1)] + elim!: subst[rotated, where P=P]) + apply (rule use_validI [OF _ x(1)], assumption) + apply (rule use_validI [OF _ x(2)], assumption) + apply simp + apply (clarsimp simp: validI_def validI_prefix_closed[OF x(1)] + elim!: subst[rotated, where P=P]) + apply (rule use_validI [OF _ x(1)], assumption) + apply (rule use_validI [OF _ x(2)], assumption) + apply simp + done + +lemma rg_as_subst: + "\ \P. \\s0 s. P (fn s0 s)\,\R\ f \G\,\\rv s0 s. P (fn s0 s)\; + \v :: 'a. \P v\,\R\ f \G\,\Q v\ \ \ + \\s0 s. P (fn s0 s) s0 s\,\R\ f \G\,\\rv s0 s. Q (fn s0 s) rv s0 s\" + by (rule rg_lift_Pf3) + +lemmas rg_vcg_ball_lift = rg_vcg_const_Ball_lift + +lemma rg_set_preserved: + assumes x: "\x. \fn' x\,\R\ m -,\\rv. fn x\" + shows "\\s0 s. set xs \ {x. fn' x s0 s}\,\R\ m -,\\rv s0 s. set xs \ {x. fn x s0 s}\" + apply (induct xs) + apply (simp add: rg_TrueI validI_prefix_closed[OF x]) + apply simp + apply (rule rg_vcg_conj_lift) + apply (rule x) + apply assumption + done + +lemma rg_ex_pre: (* safe, unlike rg_vcg_ex_lift *) + "(\x. \P x\,\R\ f \G\,\Q\) \ \\s0 s. \x. P x s0 s\,\R\ f \G\,\Q\" + by (fastforce simp: validI_def) + +lemma rg_ex_pre_conj: + "\\x. \\s0 s. P x s0 s \ P' s0 s\,\R\ f \G\,\Q\\ + \ \\s0 s. (\x. P x s0 s) \ P' s0 s\,\R\ f \G\,\Q\" + by (fastforce simp: validI_def) + +lemma rg_conj_lift_inv: + "\\P\,\R\ f \G\,\Q\; \\s0 s. P' s0 s \ I s0 s\,\R\ f \G\,\\rv. I\; + \s0 s. P s0 s \ P' s0 s\ + \ \\s0 s. P s0 s \ I s0 s\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ I s0 s\" + by (fastforce simp: validI_def) + +lemma rg_in_rely_post: + assumes x: "\P. \P\,\R\ f -,\\x. P\" + shows "\\\\,\R\ f -,\\rv s0 s. (rv, s) \ mres (rely f R s0 s)\" + apply (clarsimp simp: validI_def validI_prefix_closed[OF x]) + apply (rule back_subst[where P="\s0. x\mres (rely f R s0 s)" for x s]) + apply (rule back_subst[where P="\s. x\mres (rely f R s0 s)" for x s0]) + apply (drule in_mres, assumption) + apply (simp add: state_unchanged_rg[OF x]) + apply (simp add: last_state_unchanged_rg[OF x]) + done + +lemma list_case_throw_validIE_R: + "\ \y ys. xs = y # ys \ \P\,\R\ f y ys \G\,\Q\,- \ \ + \P\,\R\ case xs of [] \ throwError e | x # xs \ f x xs \G\,\Q\,-" + apply (cases xs, simp_all) + apply wp + done + +lemma validI_set_take_helper: + "\P\,\R\ f \G\,\\rv s0 s. \x \ set (xs rv s0 s). Q x rv s0 s\ + \ \P\,\R\ f \G\,\\rv s0 s. \x \ set (take (n rv s0 s) (xs rv s0 s)). Q x rv s0 s\" + apply (erule rg_strengthen_post) + apply (clarsimp dest!: in_set_takeD) + done + +lemma whenE_throwError_tsp: + "\P\,\R\ whenE Q (throwError e) \G\,\\rv s0 s. \ Q \ P s0 s\, -" + apply (simp add: whenE_def) + apply (intro conjI impI; wp) + done + +lemma weaker_rg_ifE: + assumes x: "\P \,\R\ a \G\,\Q\,\E\" + assumes y: "\P'\,\R\ b \G\,\Q\,\E\" + shows "\P and P'\,\R\ if test then a else b \G\,\Q\,\E\" + apply (rule rg_weaken_preE) + apply (wp x y) + apply simp + done + +lemma twp_split_const_if: + assumes x: "\P\,\R\ f \G\,\Q\" + assumes y: "\P'\,\R\ f \G\,\Q'\" + shows "\\s0 s. (S \ P s0 s) \ (\ S \ P' s0 s)\,\R\ f \G\,\\rv s0 s. (S \ Q rv s0 s) \ (\ S \ Q' rv s0 s)\" + by (cases S, simp_all add: x y) + +lemma twp_split_const_ifE_R: + assumes x: "\P\,\R\ f \G\,\Q\,\E\" + assumes y: "\P'\,\R\ f \G\,\Q'\,\E\" + shows "\\s0 s. (S \ P s0 s) \ (\ S \ P' s0 s)\,\R\ f \G\,\\rv s0 s. (S \ Q rv s0 s) \ (\ S \ Q' rv s0 s)\,\E\" + by (cases S, simp_all add: x y) + +lemma rg_disj_division: + "\ P \ Q; P \ \R\,\R\ f \G\,\S\; Q \ \T\,\R\ f \G\,\S\ \ + \ \\s0 s. (P \ R s0 s) \ (Q \ T s0 s)\,\R\ f \G\,\S\" + by (fastforce intro: rg_weaken_pre) + +lemma rg_grab_asm: + "\ P' \ \P\,\R\ f \G\,\Q\; \ P' \ prefix_closed f \ \ \\s0 s. P' \ P s0 s\,\R\ f \G\,\Q\" + by (cases P'; simp) + +lemma rg_grab_asm2: + "\P' \ \\s0 s. P s0 s \ P'' s0 s\,\R\ f \G\,\Q\; \ P' \ prefix_closed f\ + \ \\s0 s. P s0 s \ P' \ P'' s0 s\,\R\ f \G\,\Q\" + by (fastforce simp: validI_def) + +lemma rg_grab_exs: + assumes x: "\x. P x \ \P'\,\R\ f \G\,\Q\" + assumes y: "prefix_closed f" + shows "\\s0 s. \x. P x \ P' s0 s\,\R\ f \G\,\Q\" + apply (clarsimp simp: validI_def y use_validI_guar[OF _ x]) + apply (erule(2) use_validI [OF _ x]) + done + +lemma rg_prop_E: + "prefix_closed f \ \\s0 s. P\,\R\ f -,-,\\rv s0 s. P\" + by wpsimp + +lemma rg_walk_assmsE: + assumes x: "\P\,\R\ f \G\,\\rv. P\" and y: "\s0 s. P s0 s \ Q s0 s" and z: "\P\,\R\ g \G\,\\rv. Q\" + shows "\P\,\R\ doE x \ f; g odE \G\,\\rv. Q\" + apply (wp z) + apply (simp add: validIE_def) + apply (rule rg_strengthen_post [OF x]) + apply (auto simp: y split: sum.splits) + done + +(*FIXME MC: it is not immediately obvious what the validI equivalent of these rules should be, so I + think it is best to leave them until we have a specific use case. +lemma univ_twp: + "prefix_closed f \ \\s0 s. \(rv, s') \ mres (rely f R s0 s). Q rv s0 s'\,\R\ f -,\Q\" + by (simp add: validI_def) + +lemma univ_get_twp: + assumes x: "\P. \P\,\R\ f \G\,\\rv. P\" + shows "\\s0 s. \(rv, s0 s') \ mres (f s0 s). s0 s = s0 s' \ Q rv s0 s'\,\R\ f \G\,\Q\" + apply (rule rg_pre_imp[OF _ univ_wp]) + apply clarsimp + apply (drule bspec, assumption, simp) + apply (drule mp) + apply (simp add: state_unchanged[OF x]) + apply simp + done + +lemma other_rg_in_monad_post: + assumes x: "\P. \P\,\R\ fn \G\,\\rv. P\" + shows "\\s0 s. \(v, s0 s) \ mres (fn s0 s). F v = v\,\R\ fn \G\,\\v s0 s'. (F v, s0 s') \ mres (fn s0 s')\" + proof - + have P: "\v s0 s. (F v = v) \ (v, s0 s) \ mres (fn s0 s) \ (F v, s0 s) \ mres (fn s0 s)" + by simp + show ?thesis + apply (rule rg_post_imp [OF P], assumption) + apply (rule rg_pre_imp) + defer + apply (rule rg_vcg_conj_lift) + apply (rule univ_get_wp [OF x]) + apply (rule rg_in_monad_post [OF x]) + apply clarsimp + apply (drule bspec, assumption, simp) + done + qed*) + +lemma weak_if_twp: + "\ \P\,\R\ f \G\,\Q\; \P'\,\R\ f \G\,\Q'\ \ \ + \P and P'\,\R\ f \G\,\\r. if C r then Q r else Q' r\" + by (auto simp: validI_def) + +lemma weak_if_twp': + "\P\,\R\ f \G\,\\r. Q r and Q' r\ \ + \P\,\R\ f \G\,\\r. if C r then Q r else Q' r\" + by (auto simp: validI_def) + +lemma validE_R_abstract_rv: + "\P\,\R\ f \G\,\\rv s0 s. \rv'. Q rv' s0 s\,\E\ \ \P\,\R\ f \G\,\Q\,\E\" + by (erule rg_strengthen_postE; simp) + +lemma validIE_cases_validI: + "\P\,\R\ f \G\,\\rv s0 s. Q (Inr rv) s0 s\,\\rv s0 s. Q (Inl rv) s0 s\ + \ \P\,\R\ f \G\,\Q\" + apply (simp add: validIE_def) + apply (erule rg_strengthen_post) + apply (simp split: sum.split_asm) + done + +lemma liftM_pre_rg: + assumes rl: "\\s0 s. \ P s0 s \,\R\ a \G\,\ \_ _ _. False \" + shows "\\s0 s. \ P s0 s \,\R\ liftM f a \G\,\ \_ _ _. False \" + unfolding liftM_def + apply (rule bind_twp_fwd) + apply (rule rl) + apply wp + done + +lemma rg_gen_asm_conj: + "\P \ \P'\,\R\ f \G\,\Q\; \ P \ prefix_closed f\ \ \\s0 s. P' s0 s \ P\,\R\ f \G\,\Q\" + by (fastforce simp: validI_def) + +lemma rg_add_K: + "\P\,\R\ f \G\,\Q\ \ \\s0 s. P s0 s \ I\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ I\" + by (fastforce simp: validI_def) + +lemma valid_rv_lift: + "\P'\,\R\ f \G\,\\rv s0 s. rv \ Q rv s0 s\ \ \\s0 s. P \ P' s0 s\,\R\ f \G\,\\rv s0 s. rv \ P \ Q rv s0 s\" + by (fastforce simp: validI_def) + +lemma valid_rv_split: + "\\P\,\R\ f \G\,\\rv s0 s. rv \ Q s0 s\; \P\,\R\ f \G\,\\rv s0 s. \rv \ Q' s0 s\\ + \ \P\,\R\ f \G\,\\rv s0 s. if rv then Q s0 s else Q' s0 s\" + by (fastforce simp: validI_def) + +lemma rg_rv_split: + "\\P\,\R\ f \G\,\\rv s0 s. rv \ (Q rv s0 s)\; \P\,\R\ f \G\,\\rv s0 s. (\rv) \ (Q rv s0 s)\\ + \ \P\,\R\ f \G\,\Q\" + apply (clarsimp simp: validI_def) + by (metis (full_types)) + +lemma combine_validE: + "\ \ P \,\R\ x \G\,\ Q \,\ E \; \ P' \,\R\ x \G\,\ Q' \,\ E' \ \ + \ \ P and P' \,\R\ x \G\,\ \r. Q r and Q' r \,\\r. E r and E' r \" + apply (clarsimp simp: validIE_def validI_def split: sum.splits) + done + +lemma validI_case_prod: + "\ \x y. validI (P x y) R (f x y) G Q \ \ validI (case_prod P v) R (case_prod (\x y. f x y) v) G Q" + by (simp add: split_def) + +lemma validIE_case_prod: + "\ \x y. validIE (P x y) R (f x y) G Q E \ \ validIE (case_prod P v) R (case_prod (\x y. f x y) v) G Q E" + by (simp add: split_def) + +lemma rg_validIE_E_conjI: + "\ \P\,\R\ f \G\,\Q\,\E\; \P\,\R\ f \G\,\Q\,\E'\ \ \ \P\,\R\ f \G\,\Q\,\\rv s0 s. E rv s0 s \ E' rv s0 s\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma validIE_R_post_conjD1: + "\P\,\R\ f \G\,\\r s0 s. Q r s0 s \ Q' r s0 s\,\E\ \ \P\,\R\ f \G\,\Q\,\E\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma validIE_R_post_conjD2: + "\P\,\R\ f \G\,\\r s0 s. Q r s0 s \ Q' r s0 s\,\E\ \ \P\,\R\ f \G\,\Q'\,\E\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_name_pre_state2: + "(\s. \\s0' s'. P s0' s' \ (s' = s)\,\R\ f \G\,\Q\) \ \P\,\R\ f \G\,\Q\" + by (auto simp: validI_def) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_More_VCG.thy b/lib/Monads/trace/Trace_More_VCG.thy new file mode 100644 index 0000000000..425738765d --- /dev/null +++ b/lib/Monads/trace/Trace_More_VCG.thy @@ -0,0 +1,744 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Partial correctness Hoare logic lemmas over the trace monad. Hoare triples, lifting lemmas, etc. + If it doesn't contain a Hoare triple it likely doesn't belong in here. *) + +theory Trace_More_VCG + imports + Trace_VCG + Trace_In_Monad +begin + +lemma hoare_take_disjunct: + "\P\ f \\rv s. P' rv s \ (False \ P'' rv s)\ + \ \P\ f \P''\" + by (erule hoare_strengthen_post, simp) + +lemma hoare_post_add: + "\P\ S \\r s. R r s \ Q r s\ \ \P\ S \Q\" + by (erule hoare_strengthen_post, simp) + +lemma hoare_post_addE: + "\P\ f \\_ s. R s \ Q s\, \T\ \ \P\ f \\_ s. Q s\, \T\" + by (erule hoare_strengthen_postE; simp) + +lemma hoare_pre_add: + "(\s. P s \ R s) \ (\P\ f \Q\ \ \P and R\ f \Q\)" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule hoare_weaken_pre, assumption, clarsimp) + +lemma hoare_pre_addE: + "(\s. P s \ R s) \ (\P\ f \Q\, \S\ \ \P and R\ f \Q\, \S\)" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule hoare_weaken_preE, assumption, clarsimp) + +lemma hoare_name_pre_state: + "\ \s. P s \ \(=) s\ f \Q\ \ \ \P\ f \Q\" + by (clarsimp simp: valid_def) + +lemma hoare_name_pre_stateE: + "\\s. P s \ \(=) s\ f \Q\, \E\\ \ \P\ f \Q\, \E\" + by (clarsimp simp: validE_def2) + +lemma hoare_vcg_if_lift_strong: + "\ \P'\ f \P\; \\s. \ P' s\ f \\rv s. \ P rv s\; \Q'\ f \Q\; \R'\ f \R\ \ \ + \\s. if P' s then Q' s else R' s\ f \\rv s. if P rv s then Q rv s else R rv s\" + + "\ \P'\ f \P\; \\s. \ P' s\ f \\rv s. \ P rv s\; \Q'\ f \ Q\; \R'\ f \R\ \ \ + \\s. if P' s then Q' s else R' s\ f \\rv s. (if P rv s then Q rv else R rv) s\" + by (wpsimp wp: hoare_vcg_imp_lift' | assumption | fastforce)+ + +lemma hoare_vcg_imp_lift_pre_add: + "\ \P and Q\ f \\rv s. R rv s\; f \\s. \ Q s\ \ \ \P\ f \\rv s. Q s \ R rv s\" + apply (rule hoare_weaken_pre) + apply (rule hoare_vcg_imp_lift') + apply fastforce + apply fastforce + apply (clarsimp simp: pred_conj_def valid_def) + done + +lemma hoare_pre_tautI: + "\ \A and P\ a \B\; \A and not P\ a \B\ \ \ \A\ a \B\" + by (fastforce simp: valid_def split_def pred_conj_def pred_neg_def) + +lemma hoare_lift_Pf_pre_conj: + assumes P: "\x. \\s. Q x s\ m \P x\" + assumes f: "\P. \\s. P (g s) \ R s\ m \\_ s. P (f s)\" + shows "\\s. Q (g s) s \ R s\ m \\rv s. P (f s) rv s\" + apply (clarsimp simp: valid_def) + apply (rule use_valid [OF _ P], simp) + apply (rule use_valid [OF _ f], simp, simp) + done + +lemmas hoare_lift_Pf4 = hoare_lift_Pf_pre_conj[where R=\, simplified] +lemmas hoare_lift_Pf3 = hoare_lift_Pf4[where f=f and g=f for f] +lemmas hoare_lift_Pf2 = hoare_lift_Pf3[where P="\f _. P f" for P] +lemmas hoare_lift_Pf = hoare_lift_Pf2[where Q=P and P=P for P] + +lemmas hoare_lift_Pf3_pre_conj = hoare_lift_Pf_pre_conj[where f=f and g=f for f] +lemmas hoare_lift_Pf2_pre_conj = hoare_lift_Pf3_pre_conj[where P="\f _. P f" for P] +lemmas hoare_lift_Pf_pre_conj' = hoare_lift_Pf2_pre_conj[where Q=P and P=P for P] + +lemma hoare_if_r_and: + "\P\ f \\r. if R r then Q r else Q' r\ + = \P\ f \\r s. (R r \ Q r s) \ (\R r \ Q' r s)\" + by (fastforce simp: valid_def) + +lemma hoare_convert_imp: + "\ \\s. \ P s\ f \\rv s. \ Q s\; \R\ f \S\ \ + \ \\s. P s \ R s\ f \\rv s. Q s \ S rv s\" + apply (simp only: imp_conv_disj) + apply (erule(1) hoare_vcg_disj_lift) + done + +lemma hoare_vcg_ex_lift_R: + "\ \v. \P v\ f \Q v\,- \ \ \\s. \v. P v s\ f \\rv s. \v. Q v rv s\,-" + apply (simp add: validE_R_def validE_def) + apply (rule hoare_strengthen_post, erule hoare_vcg_ex_lift) + apply (auto split: sum.split) + done + +lemma hoare_case_option_wpR: + "\\P\ f None \Q\,-; \x. \P' x\ f (Some x) \Q' x\,-\ + \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\,-" + by (cases v) auto + +lemma hoare_vcg_conj_liftE_R: + "\ \P\ f \P'\,-; \Q\ f \Q'\,- \ \ \P and Q\ f \\rv s. P' rv s \ Q' rv s\, -" + apply (simp add: validE_R_def validE_def valid_def split: sum.splits) + apply blast + done + +lemma K_valid[wp]: + "\K P\ f \\_. K P\" + by (simp add: valid_def) + +lemma hoare_exI_tuple: + "\P\ f \\(rv,rv') s. Q x rv rv' s\ \ \P\ f \\(rv,rv') s. \x. Q x rv rv' s\" + by (fastforce simp: valid_def) + +lemma hoare_ex_all: + "(\x. \P x\ f \Q\) = \\s. \x. P x s\ f \Q\" + apply (rule iffI) + apply (fastforce simp: valid_def)+ + done + +lemma hoare_imp_eq_substR: + "\P\ f \Q\,- \ \P\ f \\rv s. rv = x \ Q x s\,-" + by (fastforce simp add: valid_def validE_R_def validE_def split: sum.splits) + +lemma hoare_split_bind_case_sum: + assumes x: "\rv. \R rv\ g rv \Q\" + "\rv. \S rv\ h rv \Q\" + assumes y: "\P\ f \S\,\R\" + shows "\P\ f >>= case_sum g h \Q\" + apply (rule bind_wp[OF _ y[unfolded validE_def]]) + apply (wpsimp wp: x split: sum.splits) + done + +lemma hoare_split_bind_case_sumE: + assumes x: "\rv. \R rv\ g rv \Q\,\E\" + "\rv. \S rv\ h rv \Q\,\E\" + assumes y: "\P\ f \S\,\R\" + shows "\P\ f >>= case_sum g h \Q\,\E\" + apply (unfold validE_def) + apply (rule bind_wp[OF _ y[unfolded validE_def]]) + apply (wpsimp wp: x[unfolded validE_def] split: sum.splits) + done + +lemma assertE_sp: + "\P\ assertE Q \\rv s. Q \ P s\,\E\" + by (clarsimp simp: assertE_def) wp + +lemma throwErrorE_E [wp]: + "\Q e\ throwError e -, \Q\" + by (simp add: validE_E_def) wp + +lemma gets_inv [simp]: + "\ P \ gets f \ \r. P \" + by (simp add: gets_def, wp) + +lemma select_inv: + "\ P \ select S \ \r. P \" + by wpsimp + +lemmas return_inv = hoare_return_drop_var + +lemma assert_inv: "\P\ assert Q \\r. P\" + unfolding assert_def + by (cases Q) simp+ + +lemma assert_opt_inv: "\P\ assert_opt Q \\r. P\" + unfolding assert_opt_def + by (cases Q) simp+ + +lemma case_options_weak_wp: + "\ \P\ f \Q\; \x. \P'\ g x \Q\ \ \ \P and P'\ case opt of None \ f | Some x \ g x \Q\" + apply (cases opt) + apply (clarsimp elim!: hoare_weaken_pre) + apply (rule hoare_weaken_pre [where Q=P']) + apply simp+ + done + +lemma case_option_wp_None_return: + assumes [wp]: "\x. \P' x\ f x \\_. Q\" + shows "\\x s. (Q and P x) s \ P' x s \ + \ \Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ return () | Some x \ f x) + \\_. Q\" + by (cases opt; wpsimp) + +lemma case_option_wp_None_returnOk: + assumes [wp]: "\x. \P' x\ f x \\_. Q\,\E\" + shows "\\x s. (Q and P x) s \ P' x s \ + \ \Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ returnOk () | Some x \ f x) + \\_. Q\,\E\" + by (cases opt; wpsimp) + +lemma list_cases_weak_wp: + assumes "\P_A\ a \Q\" + assumes "\x xs. \P_B\ b x xs \Q\" + shows + "\P_A and P_B\ + case ts of + [] \ a + | x#xs \ b x xs + \Q\" + apply (cases ts) + apply (simp, rule hoare_weaken_pre, rule assms, simp)+ + done + +lemmas hoare_FalseE_R = hoare_FalseE[where E="\\", folded validE_R_def] + +lemma hoare_vcg_if_lift2: + "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\ \ + \R\ f \\rv s. if P rv s then X rv s else Y rv s\" + + "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\ \ + \R\ f \\rv. if P' rv then X rv else Y rv\" + by (auto simp: valid_def split_def) + +lemma hoare_vcg_if_lift_ER: (* Required because of lack of rv in lifting rules *) + "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\, - \ + \R\ f \\rv s. if P rv s then X rv s else Y rv s\, -" + + "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\, - \ + \R\ f \\rv. if P' rv then X rv else Y rv\, -" + by (auto simp: valid_def validE_R_def validE_def split_def) + +lemma hoare_list_all_lift: + "(\r. r \ set xs \ \Q r\ f \\rv. Q r\) + \ \\s. list_all (\r. Q r s) xs\ f \\rv s. list_all (\r. Q r s) xs\" + apply (induct xs; simp) + apply wpsimp + apply (rule hoare_vcg_conj_lift; simp) + done + +lemma undefined_valid: "\\\ undefined \Q\" + by (rule hoare_pre_cont) + +lemma assertE_wp: + "\\s. F \ Q () s\ assertE F \Q\,\E\" + apply (rule hoare_pre) + apply (unfold assertE_def) + apply wp + apply simp + done + +lemma doesn't_grow_proof: + assumes y: "\s. finite (S s)" + assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" + shows "\\s. card (S s) < n \ P s\ f \\rv s. card (S s) < n\" + apply (clarsimp simp: valid_def) + apply (erule le_less_trans[rotated]) + apply (rule card_mono[OF y]) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_valid[OF _ x, OF _ conjI]) + apply simp + done + +lemma hoare_vcg_propE_R: + "\\s. P\ f \\rv s. P\, -" + by (simp add: validE_R_def validE_def valid_def split_def split: sum.split) + +lemma set_preserved_proof: + assumes y: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" + assumes x: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" + shows "\\s. Q s \ P (S s)\ f \\rv s. P (S s)\" + apply (clarsimp simp: valid_def) + by (metis (mono_tags, lifting) equalityI post_by_hoare subsetI x y) + +lemma set_shrink_proof: + assumes x: "\x. \\s. x \ S s\ f \\rv s. x \ S s\" + shows + "\\s. \S'. S' \ S s \ P S'\ + f + \\rv s. P (S s)\" + apply (clarsimp simp: valid_def) + apply (drule spec, erule mp) + apply (clarsimp simp: subset_iff) + apply (rule ccontr) + apply (drule(1) use_valid [OF _ x]) + apply simp + done + +lemma shrinks_proof: + assumes y: "\s. finite (S s)" + assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" + assumes z: "\P\ f \\rv s. x \ S s\" + assumes w: "\s. P s \ x \ S s" + shows "\\s. card (S s) \ n \ P s\ f \\rv s. card (S s) < n\" + apply (clarsimp simp: valid_def) + apply (erule less_le_trans[rotated]) + apply (rule psubset_card_mono[OF y]) + apply (rule psubsetI) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_valid[OF _ x, OF _ conjI]) + apply simp + by (metis use_valid w z) + +lemma use_validE_R: + "\ (Inr r, s') \ mres (f s); \P\ f \Q\,-; P s \ \ Q r s'" + unfolding validE_R_def validE_def + by (frule(2) use_valid, simp) + +lemma valid_preservation_ex: + assumes x: "\x P. \\s. P (f s x :: 'b)\ m \\rv s. P (f s x)\" + shows "\\s. P (f s :: 'a \ 'b)\ m \\rv s. P (f s)\" + apply (clarsimp simp: valid_def) + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (erule use_valid [OF _ x]) + apply simp + done + +lemma whenE_inv: + assumes a: "\P\ f \\_. P\" + shows "\P\ whenE Q f \\_. P\" + by (wpsimp wp: a) + +lemma whenE_throwError_wp: + "\\s. \ P \ Q s\ whenE P (throwError e) \\_. Q\, \\\\" + by wpsimp + +lemma ifM_throwError_returnOk: + "\Q\ test \\c s. \ c \ P s\ \ \Q\ ifM test (throwError e) (returnOk ()) \\_. P\, -" + unfolding ifM_def + apply (fold liftE_bindE) + apply wpsimp + apply assumption + apply simp + done + +lemma ifME_liftE: + "ifME (liftE test) a b = ifM test a b" + by (simp add: ifME_def ifM_def liftE_bindE) + +lemma gets_the_inv: "\P\ gets_the V \\rv. P\" by wpsimp + +lemmas state_unchanged = in_inv_by_hoareD [THEN sym] + +lemma validI: + assumes rl: "\s r s'. \ P s; (r, s') \ mres (S s) \ \ Q r s'" + shows "\P\ S \Q\" + unfolding valid_def using rl by safe + +lemma opt_return_pres_lift: + assumes x: "\v. \P\ f v \\rv. P\" + shows "\P\ case x of None \ return () | Some v \ f v \\rv. P\" + by (wpsimp wp: x) + +lemma valid_return_unit: + "\P\ f >>= (\_. return ()) \\r. Q\ \ \P\ f \\r. Q\" + by (auto simp: valid_def in_bind in_return Ball_def) + +lemma hoare_weak_lift_imp_conj: + "\ \Q\ m \Q'\; \R\ m \R'\ \ + \ \\s. (P \ Q s) \ R s\ m \\rv s. (P \ Q' rv s) \ R' rv s\" + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_weak_lift_imp) + apply assumption+ + done + +lemma hoare_eq_P: + assumes "\P. \P\ f \\_. P\" + shows "\(=) s\ f \\_. (=) s\" + by (rule assms) + +lemma hoare_validE_R_conj: + "\\P\ f \Q\, -; \P\ f \R\, -\ \ \P\ f \Q and R\, -" + by (simp add: valid_def validE_def validE_R_def Let_def split_def split: sum.splits) + +lemmas throwError_validE_R = throwError_wp [where E="\\", folded validE_R_def] + +lemma valid_case_option_post_wp: + "\\x. \P x\ f \\rv. Q x\\ \ + \\s. case ep of Some x \ P x s | _ \ True\ + f + \\rv s. case ep of Some x \ Q x s | _ \ True\" + by (cases ep, simp_all add: hoare_vcg_prop) + +lemma P_bool_lift: + assumes t: "\Q\ f \\r. Q\" + assumes f: "\\s. \Q s\ f \\r s. \Q s\" + shows "\\s. P (Q s)\ f \\r s. P (Q s)\" + apply (clarsimp simp: valid_def) + apply (rule back_subst[where P=P], assumption) + apply (rule iffI) + apply (erule (1) use_valid [OF _ t]) + apply (rule classical) + apply (drule (1) use_valid [OF _ f]) + apply simp + done + +lemmas fail_inv = hoare_fail_any[where Q="\_. P" and P=P for P] + +lemma gets_sp: "\P\ gets f \\rv. P and (\s. f s = rv)\" + by (wp, simp) + +lemma post_by_hoare2: + "\ \P\ f \Q\; (r, s') \ mres (f s); P s \ \ Q r s'" + by (rule post_by_hoare, assumption+) + +lemma hoare_Ball_helper: + assumes x: "\x. \P x\ f \Q x\" + assumes y: "\P. \\s. P (S s)\ f \\rv s. P (S s)\" + shows "\\s. \x \ S s. P x s\ f \\rv s. \x \ S s. Q x rv s\" + apply (clarsimp simp: valid_def) + apply (drule bspec, erule back_subst[where P="\A. x\A" for x]) + apply (erule post_by_hoare[OF y, rotated]) + apply (rule refl) + apply (erule (1) post_by_hoare[OF x]) + done + +lemma handy_prop_divs: + assumes x: "\P. \\s. P (Q s) \ S s\ f \\rv s. P (Q' rv s)\" + "\P. \\s. P (R s) \ S s\ f \\rv s. P (R' rv s)\" + shows "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" + "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" + apply (clarsimp simp: valid_def + elim!: subst[rotated, where P=P]) + apply (rule use_valid [OF _ x(1)], assumption) + apply (rule use_valid [OF _ x(2)], assumption) + apply simp + apply (clarsimp simp: valid_def + elim!: subst[rotated, where P=P]) + apply (rule use_valid [OF _ x(1)], assumption) + apply (rule use_valid [OF _ x(2)], assumption) + apply simp + done + +lemma hoare_as_subst: + "\ \P. \\s. P (fn s)\ f \\rv s. P (fn s)\; + \v :: 'a. \P v\ f \Q v\ \ \ + \\s. P (fn s) s\ f \\rv s. Q (fn s) rv s\" + by (rule hoare_lift_Pf3) + +lemmas hoare_vcg_ball_lift = hoare_vcg_const_Ball_lift + +lemma hoare_set_preserved: + assumes x: "\x. \fn' x\ m \\rv. fn x\" + shows "\\s. set xs \ {x. fn' x s}\ m \\rv s. set xs \ {x. fn x s}\" + apply (induct xs) + apply simp + apply wp + apply simp + apply (rule hoare_vcg_conj_lift) + apply (rule x) + apply assumption + done + +lemma hoare_ex_pre: (* safe, unlike hoare_vcg_ex_lift *) + "(\x. \P x\ f \Q\) \ \\s. \x. P x s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_ex_pre_conj: + "\\x. \\s. P x s \ P' s\ f \Q\\ + \ \\s. (\x. P x s) \ P' s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_conj_lift_inv: + "\\P\ f \Q\; \\s. P' s \ I s\ f \\rv. I\; + \s. P s \ P' s\ + \ \\s. P s \ I s\ f \\rv s. Q rv s \ I s\" + by (fastforce simp: valid_def) + +lemma hoare_in_monad_post: + assumes x: "\P. \P\ f \\x. P\" + shows "\\\ f \\rv s. (rv, s) \ mres (f s)\" + apply (clarsimp simp: valid_def) + apply (rule back_subst[where P="\s. x\mres (f s)" for x], assumption) + apply (simp add: state_unchanged[OF x]) + done + +lemma list_case_throw_validE_R: + "\ \y ys. xs = y # ys \ \P\ f y ys \Q\,- \ \ + \P\ case xs of [] \ throwError e | x # xs \ f x xs \Q\,-" + apply (cases xs, simp_all) + apply wp + done + +lemma validE_R_sp: + assumes x: "\P\ f \Q\,-" + assumes y: "\x. \Q x\ g x \R\,-" + shows "\P\ f >>=E (\x. g x) \R\,-" + by (rule hoare_pre, wp x y, simp) + +lemma valid_set_take_helper: + "\P\ f \\rv s. \x \ set (xs rv s). Q x rv s\ + \ \P\ f \\rv s. \x \ set (take (n rv s) (xs rv s)). Q x rv s\" + apply (erule hoare_strengthen_post) + apply (clarsimp dest!: in_set_takeD) + done + +lemma whenE_throwError_sp: + "\P\ whenE Q (throwError e) \\rv s. \ Q \ P s\, -" + apply (simp add: whenE_def validE_R_def) + apply (intro conjI impI; wp) + done + +lemma weaker_hoare_ifE: + assumes x: "\P \ a \Q\,\E\" + assumes y: "\P'\ b \Q\,\E\" + shows "\P and P'\ if test then a else b \Q\,\E\" + apply (rule hoare_weaken_preE) + apply (wp x y) + apply simp + done + +lemma wp_split_const_if: + assumes x: "\P\ f \Q\" + assumes y: "\P'\ f \Q'\" + shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\" + by (cases G, simp_all add: x y) + +lemma wp_split_const_if_R: + assumes x: "\P\ f \Q\,-" + assumes y: "\P'\ f \Q'\,-" + shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\,-" + by (cases G, simp_all add: x y) + +lemma hoare_disj_division: + "\ P \ Q; P \ \R\ f \S\; Q \ \T\ f \S\ \ + \ \\s. (P \ R s) \ (Q \ T s)\ f \S\" + apply safe + apply (rule hoare_pre_imp) + prefer 2 + apply simp + apply simp + apply (rule hoare_pre_imp) + prefer 2 + apply simp + apply simp + done + +lemma hoare_grab_asm: + "\ G \ \P\ f \Q\ \ \ \\s. G \ P s\ f \Q\" + by (cases G, simp+) + +lemma hoare_grab_asm2: + "\P' \ \\s. P s \ R s\ f \Q\\ + \ \\s. P s \ P' \ R s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_grab_exs: + assumes x: "\x. P x \ \P'\ f \Q\" + shows "\\s. \x. P x \ P' s\ f \Q\" + apply (clarsimp simp: valid_def) + apply (erule(2) use_valid [OF _ x]) + done + +lemma hoare_prop_E: "\\rv. P\ f -,\\rv s. P\" + unfolding validE_E_def + by (rule hoare_pre, wp, simp) + +lemma hoare_vcg_conj_lift_R: + "\ \P\ f \Q\,-; \R\ f \S\,- \ + \ \\s. P s \ R s\ f \\rv s. Q rv s \ S rv s\,-" + apply (simp add: validE_R_def validE_def) + apply (drule(1) hoare_vcg_conj_lift) + apply (erule hoare_strengthen_post) + apply (clarsimp split: sum.splits) + done + +lemma hoare_walk_assmsE: + assumes x: "\P\ f \\rv. P\" and y: "\s. P s \ Q s" and z: "\P\ g \\rv. Q\" + shows "\P\ doE x \ f; g odE \\rv. Q\" + apply (wp z) + apply (simp add: validE_def) + apply (rule hoare_strengthen_post [OF x]) + apply (auto simp: y split: sum.splits) + done + +lemma univ_wp: + "\\s. \(rv, s') \ mres (f s). Q rv s'\ f \Q\" + by (simp add: valid_def) + +lemma univ_get_wp: + assumes x: "\P. \P\ f \\rv. P\" + shows "\\s. \(rv, s') \ mres (f s). s = s' \ Q rv s'\ f \Q\" + apply (rule hoare_pre_imp[OF _ univ_wp]) + apply clarsimp + apply (drule bspec, assumption, simp) + apply (drule mp) + apply (simp add: state_unchanged[OF x]) + apply simp + done + +lemma other_hoare_in_monad_post: + assumes x: "\P. \P\ fn \\rv. P\" + shows "\\s. \(v, s) \ mres (fn s). F v = v\ fn \\v s'. (F v, s') \ mres (fn s')\" + proof - + have P: "\v s. (F v = v) \ (v, s) \ mres (fn s) \ (F v, s) \ mres (fn s)" + by simp + show ?thesis + apply (rule hoare_post_imp [OF P], assumption) + apply (rule hoare_pre_imp) + defer + apply (rule hoare_vcg_conj_lift) + apply (rule univ_get_wp [OF x]) + apply (rule hoare_in_monad_post [OF x]) + apply clarsimp + apply (drule bspec, assumption, simp) + done + qed + +lemma weak_if_wp: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ + \P and P'\ f \\r. if C r then Q r else Q' r\" + by (auto simp add: valid_def split_def) + +lemma weak_if_wp': + "\P\ f \\r. Q r and Q' r\ \ + \P\ f \\r. if C r then Q r else Q' r\" + by (auto simp add: valid_def split_def) + +lemma bindE_split_recursive_asm: + assumes x: "\x s'. \ (Inr x, s') \ mres (f s) \ \ \\s. B x s \ s = s'\ g x \C\, \E\" + shows "\A\ f \B\, \E\ \ \\st. A st \ st = s\ f >>=E g \C\, \E\" + apply (clarsimp simp: validE_def valid_def bindE_def in_bind lift_def) + apply (erule allE, erule(1) impE) + apply (drule(1) bspec, simp) + apply (clarsimp simp: in_throwError split: sum.splits) + apply (drule x) + apply (clarsimp simp: validE_def valid_def) + apply (drule(1) bspec, simp split: sum.splits) + done + +lemma validE_R_abstract_rv: + "\P\ f \\rv s. \rv'. Q rv' s\,- \ \P\ f \Q\,-" + by (erule hoare_strengthen_postE_R, simp) + +lemma validE_cases_valid: + "\P\ f \\rv s. Q (Inr rv) s\,\\rv s. Q (Inl rv) s\ + \ \P\ f \Q\" + apply (simp add: validE_def) + apply (erule hoare_strengthen_post) + apply (simp split: sum.split_asm) + done + +lemma liftM_pre: + assumes rl: "\\s. \ P s \ a \ \_ _. False \" + shows "\\s. \ P s \ liftM f a \ \_ _. False \" + unfolding liftM_def + apply (rule bind_wp_fwd) + apply (rule rl) + apply wp + done + +lemma hoare_gen_asm': + "(P \ \P'\ f \Q\) \ \P' and (\_. P)\ f \Q\" + apply (auto intro: hoare_assume_pre) + done + +lemma hoare_gen_asm_conj: + "(P \ \P'\ f \Q\) \ \\s. P' s \ P\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_add_K: + "\P\ f \Q\ \ \\s. P s \ I\ f \\rv s. Q rv s \ I\" + by (fastforce simp: valid_def) + +lemma valid_rv_lift: + "\P'\ f \\rv s. rv \ Q rv s\ \ \\s. P \ P' s\ f \\rv s. rv \ P \ Q rv s\" + by (fastforce simp: valid_def) + +lemma valid_imp_ex: + "\P\ f \\rv s. \x. rv \ Q rv s x\ \ \P\ f \\rv s. rv \ (\x. Q rv s x)\" + by (fastforce simp: valid_def) + +lemma valid_rv_split: + "\\P\ f \\rv s. rv \ Q s\; \P\ f \\rv s. \rv \ Q' s\\ + \ \P\ f \\rv s. if rv then Q s else Q' s\" + by (fastforce simp: valid_def) + +lemma hoare_rv_split: + "\\P\ f \\rv s. rv \ (Q rv s)\; \P\ f \\rv s. (\rv) \ (Q rv s)\\ + \ \P\ f \Q\" + apply (clarsimp simp: valid_def split_def) + by (metis (full_types) fst_eqD snd_conv) + +lemma combine_validE: + "\ \ P \ x \ Q \,\ E \; \ P' \ x \ Q' \,\ E' \ \ + \ \ P and P' \ x \ \r. (Q r) and (Q' r) \,\\r. (E r) and (E' r) \" + apply (clarsimp simp: validE_def valid_def split: sum.splits) + apply (erule allE, erule (1) impE)+ + apply (drule (1) bspec)+ + apply clarsimp + done + +lemma valid_case_prod: + "\ \x y. valid (P x y) (f x y) Q \ \ valid (case_prod P v) (case_prod (\x y. f x y) v) Q" + by (simp add: split_def) + +lemma validE_case_prod: + "\ \x y. validE (P x y) (f x y) Q E \ \ validE (case_prod P v) (case_prod (\x y. f x y) v) Q E" + by (simp add: split_def) + +lemma valid_pre_satisfies_post: + "\ \s r' s'. P s \ Q r' s' \ \ \ P \ m \ Q \" + by (clarsimp simp: valid_def) + +lemma validE_pre_satisfies_post: + "\ \s r' s'. P s \ Q r' s'; \s r' s'. P s \ R r' s' \ \ \ P \ m \ Q \,\ R \" + by (clarsimp simp: validE_def2 split: sum.splits) + +lemma hoare_validE_R_conjI: + "\ \P\ f \Q\, - ; \P\ f \Q'\, - \ \ \P\ f \\rv s. Q rv s \ Q' rv s\, -" + by (fastforce simp: Ball_def validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_validE_E_conjI: + "\ \P\ f -, \Q\ ; \P\ f -, \Q'\ \ \ \P\ f -, \\rv s. Q rv s \ Q' rv s\" + by (fastforce simp: Ball_def validE_E_def validE_def valid_def split: sum.splits) + +lemma validE_R_post_conjD1: + "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \Q\,-" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma validE_R_post_conjD2: + "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \R\,-" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma throw_opt_wp[wp]: + "\if v = None then E ex else Q (the v)\ throw_opt ex v \Q\,\E\" + unfolding throw_opt_def by wpsimp auto + +lemma hoare_name_pre_state2: + "(\s. \P and ((=) s)\ f \Q\) \ \P\ f \Q\" + by (auto simp: valid_def intro: hoare_name_pre_state) + +lemma returnOk_E': "\P\ returnOk r -,\E\" + by wpsimp + +lemma throwError_R': "\P\ throwError e \Q\,-" + by wpsimp + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_No_Fail.thy b/lib/Monads/trace/Trace_No_Fail.thy new file mode 100644 index 0000000000..42016257b2 --- /dev/null +++ b/lib/Monads/trace/Trace_No_Fail.thy @@ -0,0 +1,262 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Lemmas about the no_fail predicate. *) + +theory Trace_No_Fail + imports + Trace_In_Monad + Trace_VCG + WPSimp +begin + +subsection "Non-Failure" + +text \ + With the failure result, we can formulate non-failure separately from validity. + A monad @{text m} does not fail under precondition @{text P}, if for no start + state that satisfies the precondition it returns a @{term Failed} result. +\ +definition no_fail :: "('s \ bool) \ ('s,'a) tmonad \ bool" where + "no_fail P m \ \s. P s \ \ failed (m s)" + + +subsection \@{method wpc} setup\ + +lemma no_fail_pre[wp_pre]: + "\ no_fail P f; \s. Q s \ P s\ \ no_fail Q f" + by (simp add: no_fail_def) + +lemma wpc_helper_no_fail_final: + "no_fail Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_fail P f)" + by (clarsimp simp: wpc_helper_def elim!: no_fail_pre) + +wpc_setup "\m. no_fail P m" wpc_helper_no_fail_final + + +subsection \Bundles\ + +bundle no_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del] + +bundle classic_wp_pre = + hoare_pre [wp_pre del] + all_classic_wp_combs[wp_comb del] + all_classic_wp_combs[wp_comb] + + +subsection \Lemmas\ + +lemma no_failD: + "\ no_fail P m; P s \ \ \ failed (m s)" + by (simp add: no_fail_def) + +lemma no_fail_return[simp, wp]: + "no_fail \ (return x)" + by (simp add: return_def no_fail_def) + +lemma no_fail_bind[wp]: + "\ no_fail P f; \x. no_fail (R x) (g x); \Q\ f \R\ \ \ no_fail (P and Q) (f >>= (\rv. g rv))" + apply (simp add: no_fail_def bind_def' image_Un image_image + in_image_constant failed_def) + apply (intro allI conjI impI) + apply (fastforce simp: image_def) + apply clarsimp + apply (drule(1) post_by_hoare, erule in_mres) + apply (fastforce simp: image_def) + done + +lemma no_fail_get[simp, wp]: + "no_fail \ get" + by (simp add: get_def no_fail_def) + +lemma no_fail_put[simp, wp]: + "no_fail \ (put s)" + by (simp add: put_def no_fail_def) + +lemma no_fail_modify[wp,simp]: + "no_fail \ (modify f)" + by (wpsimp simp: modify_def) + +lemma no_fail_gets_simp[simp]: + "no_fail P (gets f)" + by (wpsimp simp: gets_def) + +lemma no_fail_gets[wp]: + "no_fail \ (gets f)" + by simp + +lemma no_fail_select[wp,simp]: + "no_fail \ (select S)" + by (simp add: no_fail_def select_def image_def failed_def) + +lemma no_fail_alt[wp]: + "\ no_fail P f; no_fail Q g \ \ no_fail (P and Q) (f \ g)" + by (auto simp: no_fail_def alternative_def) + +lemma no_fail_when[wp]: + "(P \ no_fail Q f) \ no_fail (if P then Q else \) (when P f)" + by (simp add: when_def) + +lemma no_fail_unless[wp]: + "(\P \ no_fail Q f) \ no_fail (if P then \ else Q) (unless P f)" + by (simp add: unless_def when_def) + +lemma no_fail_fail[simp, wp]: + "no_fail \ fail" + by (simp add: fail_def no_fail_def) + +lemma no_fail_assert[simp, wp]: + "no_fail (\_. P) (assert P)" + by (simp add: assert_def) + +lemma no_fail_assert_opt[simp, wp]: + "no_fail (\_. P \ None) (assert_opt P)" + by (simp add: assert_opt_def split: option.splits) + +lemma no_fail_case_option[wp]: + assumes f: "no_fail P f" + assumes g: "\x. no_fail (Q x) (g x)" + shows "no_fail (if x = None then P else Q (the x)) (case_option f g x)" + by (clarsimp simp add: f g) + +lemma no_fail_if[wp]: + "\ P \ no_fail Q f; \P \ no_fail R g \ \ no_fail (if P then Q else R) (if P then f else g)" + by simp + +lemma no_fail_apply[wp]: + "no_fail P (f (g x)) \ no_fail P (f $ g x)" + by simp + +lemma no_fail_undefined[simp, wp]: + "no_fail \ undefined" + by (simp add: no_fail_def) + +lemma no_fail_returnOK[simp, wp]: + "no_fail \ (returnOk x)" + by (simp add: returnOk_def) + +lemma no_fail_assume_pre: + "(\s. P s \ no_fail P f) \ no_fail P f" + by (simp add: no_fail_def) + +\<^cancel>\lemma no_fail_liftM_eq[simp]: + "no_fail P (liftM f m) = no_fail P m" + by (auto simp: liftM_def no_fail_def bind_def return_def)\ + +lemma no_fail_liftM[wp]: + "no_fail P m \ no_fail P (liftM f m)" + unfolding liftM_def + by wpsimp + +lemma no_fail_pre_and: + "no_fail P f \ no_fail (P and Q) f" + by (erule no_fail_pre) simp + +lemma no_fail_spec: + "\ \s. no_fail (((=) s) and P) f \ \ no_fail P f" + by (simp add: no_fail_def) + +lemma no_fail_assertE[wp]: + "no_fail (\_. P) (assertE P)" + by (simp add: assertE_def split: if_split) + +lemma no_fail_spec_pre: + "\ no_fail (((=) s) and P') f; \s. P s \ P' s \ \ no_fail (((=) s) and P) f" + by (erule no_fail_pre, simp) + +lemma no_fail_whenE[wp]: + "\ G \ no_fail P f \ \ no_fail (\s. G \ P s) (whenE G f)" + by (simp add: whenE_def split: if_split) + +lemma no_fail_unlessE[wp]: + "\ \ G \ no_fail P f \ \ no_fail (\s. \ G \ P s) (unlessE G f)" + by (simp add: unlessE_def split: if_split) + +lemma no_fail_throwError[wp]: + "no_fail \ (throwError e)" + by (simp add: throwError_def) + +lemma no_fail_liftE[wp]: + "no_fail P f \ no_fail P (liftE f)" + unfolding liftE_def by wpsimp + +lemma no_fail_gets_the[wp]: + "no_fail (\s. f s \ None) (gets_the f)" + unfolding gets_the_def + by wpsimp + +lemma no_fail_lift: + "(\y. x = Inr y \ no_fail P (f y)) \ no_fail (\s. \isl x \ P s) (lift f x)" + unfolding lift_def + by (wpsimp wp: no_fail_throwError split: sum.splits | assumption)+ + +lemma validE_R_valid_eq: + "\Q\ f \R\, - = \Q\ f \\rv s. \ isl rv \ R (projr rv) s\" + unfolding validE_R_def validE_def valid_def + by (fastforce split: sum.splits prod.split) + +lemma no_fail_bindE[wp]: + "\ no_fail P f; \rv. no_fail (R rv) (g rv); \Q\ f \R\,- \ + \ no_fail (P and Q) (f >>=E g)" + unfolding bindE_def + by (wpsimp wp: no_fail_lift simp: validE_R_valid_eq | assumption)+ + +lemma no_fail_False[simp]: + "no_fail (\_. False) X" + by (clarsimp simp: no_fail_def) + +lemma no_fail_gets_map[wp]: + "no_fail (\s. f s p \ None) (gets_map f p)" + unfolding gets_map_def by wpsimp + +lemma no_fail_or: + "\no_fail P a; no_fail Q a\ \ no_fail (P or Q) a" + by (clarsimp simp: no_fail_def) + +lemma no_fail_state_assert[wp]: + "no_fail P (state_assert P)" + unfolding state_assert_def + by wpsimp + +lemma no_fail_condition[wp]: + "\no_fail Q A; no_fail R B\ \ no_fail (\s. (C s \ Q s) \ (\ C s \ R s)) (condition C A B)" + unfolding condition_def no_fail_def + by clarsimp + +lemma no_fail_ex_lift: + "(\x. no_fail (P x) f) \ no_fail (\s. \x. P x s) f" + by (fastforce simp: no_fail_def) + +lemma no_fail_grab_asm: + "(G \ no_fail P f) \ no_fail (\s. G \ P s) f" + by (cases G; clarsimp) + +lemma no_fail_put_trace_elem[wp]: + "no_fail \ (put_trace_elem x)" + by (clarsimp simp: put_trace_elem_def no_fail_def failed_def) + +lemma no_fail_put_trace[wp]: + "no_fail \ (put_trace xs)" + by (induct xs; wpsimp) + +lemma no_fail_interference[wp]: + "no_fail \ interference" + by (wpsimp simp: interference_def commit_step_def env_steps_def) + +lemma no_fail_Await[wp]: + "\s. c s \ no_fail \ (Await c)" + by (wpsimp simp: Await_def) + +lemma parallel_failed: + "failed (parallel f g s) \ failed (f s) \ failed (g s)" + by (auto simp: parallel_def2 failed_def image_def intro!: bexI) + +lemma no_fail_parallel[wp]: + "\ no_fail P f \ no_fail Q g \ \ no_fail (P and Q) (parallel f g)" + by (auto simp: no_fail_def dest!: parallel_failed) + +end diff --git a/lib/Monads/trace/Trace_No_Throw.thy b/lib/Monads/trace/Trace_No_Throw.thy new file mode 100644 index 0000000000..1715354853 --- /dev/null +++ b/lib/Monads/trace/Trace_No_Throw.thy @@ -0,0 +1,115 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Lemmas about no_throw. Usually should have a conclusion "no_throw P m". + Includes some monad equations that have no_throw as a main assumption. *) + +theory Trace_No_Throw + imports + Trace_VCG +begin + +section "Basic exception reasoning" + +text \ + The predicates @{text no_throw} and @{text no_return} allow us to reason about functions in + the exception monad that never throw an exception or never return normally.\ + +definition no_throw :: "('s \ bool) \ ('s, 'e + 'a) tmonad \ bool" where + "no_throw P A \ \ P \ A \ \_ _. True \,\ \_ _. False \" + +definition no_return :: "('a \ bool) \ ('a, 'b + 'c) tmonad \ bool" where + "no_return P A \ \ P \ A \\_ _. False\,\\_ _. True \" + +(* Alternative definition of no_throw; easier to work with than unfolding validE. *) +lemma no_throw_def': + "no_throw P A = (\s. P s \ (\(r, t) \ mres (A s). (\x. r = Inr x)))" + by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits) + + +subsection \@{const no_throw} rules\ + +lemma no_throw_returnOk[simp]: + "no_throw P (returnOk a)" + unfolding no_throw_def + by wp + +lemma no_throw_liftE[simp]: + "no_throw P (liftE x)" + by (wpsimp simp: liftE_def no_throw_def validE_def) + +lemma no_throw_bindE: + "\ no_throw A X; \a. no_throw B (Y a); \ A \ X \ \_. B \,\ \_ _. True \ \ + \ no_throw A (X >>=E Y)" + unfolding no_throw_def + using hoare_validE_cases bindE_wp_fwd by blast + +lemma no_throw_bindE_simple: + "\ no_throw \ L; \x. no_throw \ (R x) \ \ no_throw \ (L >>=E R)" + using hoareE_TrueI no_throw_bindE by blast + +lemma no_throw_handleE_simple: + "\ \x. no_throw \ L \ no_throw \ (R x) \ \ no_throw \ (L R)" + by (fastforce simp: no_throw_def' handleE_def handleE'_def validE_def valid_def bind_def return_def + mres_def image_def + split: sum.splits tmres.splits) + +lemma no_throw_handle2: + "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" + by (fastforce simp: no_throw_def' handleE'_def validE_def valid_def bind_def return_def mres_def + image_def + split: sum.splits tmres.splits) + +lemma no_throw_handle: + "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" + unfolding handleE_def + by (rule no_throw_handle2) + +lemma no_throw_fail[simp]: + "no_throw P fail" + by (clarsimp simp: no_throw_def) + +lemma handleE'_nothrow_lhs: + "no_throw \ L \ no_throw \ (L R)" + unfolding no_throw_def + using handleE'_wp[rotated] by fastforce + +lemma handleE'_nothrow_rhs: + "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" + unfolding no_throw_def + by (metis hoareE_TrueI no_throw_def no_throw_handle2) + +lemma handleE_nothrow_lhs: + "no_throw \ L \ no_throw \ (L R)" + by (metis handleE'_nothrow_lhs handleE_def) + +lemma handleE_nothrow_rhs: + "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" + by (metis no_throw_handleE_simple) + +lemma condition_nothrow: + "\ no_throw \ L; no_throw \ R \ \ no_throw \ (condition C L R)" + by (clarsimp simp: condition_def no_throw_def validE_def2) + +lemma no_throw_Inr: + "\ x \ mres (A s); no_throw P A; P s \ \ \y. fst x = Inr y" + by (fastforce simp: no_throw_def' split: sum.splits) + +lemma mres_parallel: + "x \ mres (parallel f g s) \ x \ mres (f s) \ x \ mres (g s)" + unfolding parallel_def2 mres_def + apply (clarsimp simp: image_def) + apply (auto intro!: bexI) + done + +lemma no_throw_parallel: + "\ no_throw P f \ no_throw Q g \ \ no_throw (P and Q) (parallel f g)" + unfolding no_throw_def' + apply (auto dest!: mres_parallel) + done + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_No_Trace.thy b/lib/Monads/trace/Trace_No_Trace.thy new file mode 100644 index 0000000000..862c9bcb3a --- /dev/null +++ b/lib/Monads/trace/Trace_No_Trace.thy @@ -0,0 +1,360 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_No_Trace + imports + Trace_Monad + WPSimp +begin + +subsection "No Trace" + +text \ + A monad of type @{text tmonad} does not have a trace iff for all starting + states, all of the potential outcomes have the empty list as a trace and do + not return an @{term Incomplete} result.\ +definition no_trace :: "('s,'a) tmonad \ bool" where + "no_trace f = (\tr res s. (tr, res) \ f s \ tr = [] \ res \ Incomplete)" + +lemmas no_traceD = no_trace_def[THEN iffD1, rule_format] + +lemma no_trace_emp: + "\no_trace f; (tr, r) \ f s\ \ tr = []" + by (simp add: no_traceD) + +lemma no_trace_Incomplete_mem: + "no_trace f \ (tr, Incomplete) \ f s" + by (auto dest: no_traceD) + +lemma no_trace_Incomplete_eq: + "\no_trace f; (tr, res) \ f s\ \ res \ Incomplete" + by (auto dest: no_traceD) + + +subsection \Set up for @{method wp}\ + +lemma no_trace_is_triple[wp_trip]: + "no_trace f = triple_judgement \ f (\() f. id no_trace f)" + by (simp add: triple_judgement_def split: unit.split) + + +subsection \Rules\ + +(* + Collect generic no_trace lemmas here: + - naming convention is no_trace_NAME. + - add lemmas with assumptions to [no_trace_cond] set + - add lemmas without assumption to [no_trace_terminal] set +*) + +named_theorems no_trace_cond +named_theorems no_trace_terminal + +lemma no_trace_bind[wp_split]: + "\no_trace f; \rv. no_trace (g rv)\ + \ no_trace (do rv \ f; g rv od)" + apply (subst no_trace_def) + apply (clarsimp simp: bind_def split: tmres.split_asm; + fastforce dest: no_traceD) + done + +lemma no_trace_get[no_trace_terminal]: + "no_trace get" + by (simp add: no_trace_def get_def) + +lemma no_trace_put[no_trace_terminal]: + "no_trace (put s)" + by (simp add: no_trace_def put_def) + +lemma no_trace_modify[no_trace_terminal]: + "no_trace (modify f)" + by (wpsimp simp: modify_def wp: no_trace_terminal) + +lemma no_trace_return[no_trace_terminal]: + "no_trace (return v)" + by (simp add: no_trace_def return_def) + +lemma no_trace_fail[no_trace_terminal]: + "no_trace fail" + by (simp add: no_trace_def fail_def) + +lemma no_trace_returnOk[no_trace_terminal]: + "no_trace (returnOk v)" + by (simp add: returnOk_def no_trace_terminal) + +lemma no_trace_select[no_trace_terminal]: + "no_trace (select S)" + by (clarsimp simp add: no_trace_def select_def) + +lemma no_trace_gets[no_trace_terminal]: + "no_trace (gets f)" + by (wpsimp simp: gets_def wp: no_trace_terminal) + +lemma no_trace_assert[no_trace_terminal]: + "no_trace (assert P)" + by (simp add: assert_def no_trace_terminal) + +lemma no_trace_case_option[no_trace_cond]: + assumes f: "no_trace f" + assumes g: "\x. no_trace (g x)" + shows "no_trace (case_option f g x)" + by (clarsimp simp: f g split: option.split) + +lemma no_trace_assert_opt[no_trace_terminal]: + "no_trace (assert_opt Q)" + by (simp add: assert_opt_def no_trace_terminal no_trace_cond) + +lemma no_trace_alt[no_trace_cond]: + "\ no_trace f; no_trace g \ \ no_trace (f \ g)" + apply (subst no_trace_def) + apply (clarsimp simp add: alternative_def; + fastforce dest: no_traceD) + done + +lemma no_trace_when[no_trace_cond]: + "\P \ no_trace f\ \ no_trace (when P f)" + by (simp add: when_def no_trace_terminal) + +lemma no_trace_unless[no_trace_cond]: + "\\P \ no_trace f\ \ no_trace (unless P f)" + by (simp add: unless_def when_def no_trace_terminal) + +lemma no_trace_if[no_trace_cond]: + "\ P \ no_trace f; \P \ no_trace g \ \ no_trace (if P then f else g)" + by simp + +lemma no_trace_apply[no_trace_cond]: + "no_trace (f (g x)) \ no_trace (f $ g x)" + by simp + +\ \FIXME: make proof nicer\ +lemma no_trace_liftM_eq[simp]: + "no_trace (liftM f m) = no_trace m" + apply (clarsimp simp: no_trace_def bind_def liftM_def return_def) + apply safe + apply (drule_tac x=tr in spec) + apply (drule_tac x="map_tmres id f res" in spec) + apply (drule mp) + apply (rule exI) + apply (erule rev_bexI) + apply (clarsimp split: tmres.splits) + apply clarsimp + apply (drule spec, drule spec, drule mp, rule exI, erule rev_bexI) + apply (auto split: tmres.splits) + done + +lemma no_trace_liftM[no_trace_cond]: + "no_trace m \ no_trace (liftM f m)" + by simp + +lemma no_trace_assertE[no_trace_terminal]: + "no_trace (assertE P)" + by (simp add: assertE_def no_trace_terminal) + +lemma no_trace_whenE[no_trace_cond]: + "\ G \ no_trace f \ \ no_trace (whenE G f)" + by (simp add: whenE_def no_trace_terminal) + +lemma no_trace_unlessE[no_trace_cond]: + "\ \ G \ no_trace f \ \ no_trace (unlessE G f)" + by (simp add: unlessE_def no_trace_terminal) + +lemma no_trace_throwError[no_trace_terminal]: + "no_trace (throwError e)" + by (simp add: throwError_def no_trace_terminal) + +lemma no_trace_throw_opt[no_trace_terminal]: + "no_trace (throw_opt ex x)" + unfolding throw_opt_def + by (simp add: no_trace_terminal no_trace_cond) + +lemma no_trace_liftE[no_trace_cond]: + "no_trace f \ no_trace (liftE f)" + unfolding liftE_def by (wpsimp wp: no_trace_terminal) + +lemma no_trace_gets_the[no_trace_terminal]: + "no_trace (gets_the f)" + unfolding gets_the_def + by (wpsimp wp: no_trace_terminal) + +lemma no_trace_lift[no_trace_cond]: + "\\y. x = Inr y \ no_trace (f y)\ \ no_trace (lift f x)" + unfolding lift_def + by (wpsimp wp: no_trace_terminal split: sum.splits) + +lemma no_trace_bindE[wp_split]: + "\ no_trace f; \rv. no_trace (g rv)\ \ no_trace (f >>=E g)" + unfolding bindE_def + by (wpsimp wp: no_trace_cond) + +lemma no_trace_gets_map[no_trace_terminal]: + "no_trace (gets_map f p)" + unfolding gets_map_def by (wpsimp wp: no_trace_terminal) + +lemma no_trace_state_assert[no_trace_terminal]: + "no_trace (state_assert P)" + unfolding state_assert_def + by (wpsimp wp: no_trace_terminal) + +lemma no_trace_condition[no_trace_cond]: + "\no_trace A; no_trace B\ \ no_trace (condition C A B)" + unfolding condition_def no_trace_def + apply clarsimp + apply fastforce + done + +lemma no_trace_mapM[no_trace_cond]: + assumes m: "\x. x \ set xs \ no_trace (m x)" + shows "no_trace (mapM m xs)" + using m +proof (induct xs) + case Nil + thus ?case by (simp add: mapM_def sequence_def no_trace_terminal) +next + case Cons + have P: "\m x xs. mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" + by (simp add: mapM_def sequence_def Let_def) + from Cons + show ?case + apply (simp add: P) + apply (wpsimp wp: no_trace_terminal) + done +qed + +lemma no_trace_catch[no_trace_cond]: + "\ no_trace f; \x. no_trace (g x) \ \ no_trace (catch f g)" + unfolding catch_def + by (wpsimp wp: no_trace_terminal split: sum.split) + +lemma no_trace_state_select[no_trace_terminal]: + "no_trace (state_select F)" + unfolding state_select_def2 + by (wpsimp wp: no_trace_terminal) + +lemma no_trace_liftME[no_trace_cond]: + "no_trace m \ no_trace (liftME f m)" + unfolding liftME_def + by (wpsimp wp: no_trace_terminal) + +lemma no_trace_handle'[no_trace_cond]: + "\no_trace f; \e. no_trace (handler e)\ \ no_trace (f handler)" + unfolding handleE'_def + by (wpsimp wp: no_trace_terminal split: sum.split) + +lemma no_trace_handleE[no_trace_cond]: + "\ no_trace L; \r. no_trace (R r) \ \ no_trace (L R)" + unfolding handleE_def + by (simp add: no_trace_cond) + +lemma no_trace_handle_elseE[no_trace_cond]: + "\ no_trace f; \r. no_trace (g r); \r. no_trace (h r) \ \ no_trace (f g h)" + unfolding handle_elseE_def + by (wpsimp wp: no_trace_terminal split: sum.split) + +lemma no_trace_sequence[no_trace_cond]: + "(\m. m \ set ms \ no_trace m) \ no_trace (sequence ms)" + unfolding sequence_def + by (induct ms; wpsimp wp: no_trace_terminal) + +lemma no_trace_sequence_x[no_trace_cond]: + "(\m. m \ set ms \ no_trace m) \ no_trace (sequence_x ms)" + unfolding sequence_x_def + by (induct ms; wpsimp wp: no_trace_terminal) + +lemma no_trace_sequenceE[no_trace_cond]: + "(\m. m \ set ms \ no_trace m) \ no_trace (sequenceE ms)" + unfolding sequenceE_def + by (induct ms; wpsimp wp: no_trace_terminal) + +lemma no_trace_sequenceE_x[no_trace_cond]: + "(\m. m \ set ms \ no_trace m) \ no_trace (sequenceE_x ms)" + unfolding sequenceE_x_def + by (induct ms; wpsimp wp: no_trace_terminal) + +lemma no_trace_mapM_x[no_trace_cond]: + "(\m. m \ f ` set ms \ no_trace m) \ no_trace (mapM_x f ms)" + unfolding mapM_x_def + by (fastforce intro: no_trace_sequence_x) + +lemma no_trace_mapME[no_trace_cond]: + "(\m. m \ f ` set xs \ no_trace m) \ no_trace (mapME f xs)" + unfolding mapME_def + by (fastforce intro: no_trace_sequenceE) + +lemma no_trace_mapME_x[no_trace_cond]: + "(\m'. m' \ f ` set xs \ no_trace m') \ no_trace (mapME_x f xs)" + unfolding mapME_x_def + by (fastforce intro: no_trace_sequenceE_x) + +lemma no_trace_filterM[no_trace_cond]: + "(\m. m \ set ms \ no_trace (P m)) \ no_trace (filterM P ms)" + by (induct ms; wpsimp wp: no_trace_terminal split_del: if_split) + +lemma no_trace_zipWithM_x[no_trace_cond]: + "(\x y. no_trace (f x y)) \ no_trace (zipWithM_x f xs ys)" + unfolding zipWithM_x_def zipWith_def + by (fastforce intro: no_trace_sequence_x) + +lemma no_trace_zipWithM[no_trace_cond]: + "(\x y. no_trace (f x y)) \ no_trace (zipWithM f xs ys)" + unfolding zipWithM_def zipWith_def + by (fastforce intro: no_trace_sequence) + +lemma no_trace_foldM[no_trace_cond]: + "(\x y. no_trace (m x y)) \ no_trace (foldM m xs a)" + unfolding foldM_def + by (induct xs; wpsimp wp: no_trace_terminal) + +lemma no_trace_foldME[no_trace_cond]: + "(\x y. no_trace (m x y)) \ no_trace (foldME m a xs)" + unfolding foldME_def + by (induct xs; wpsimp wp: no_trace_terminal) + +lemma no_trace_maybeM[no_trace_cond]: + "\x. no_trace (f x) \ no_trace (maybeM f t)" + unfolding maybeM_def + by (fastforce intro: no_trace_terminal split: option.splits) + +lemma no_trace_notM[no_trace_cond]: + "no_trace A \ no_trace (notM A)" + unfolding notM_def + by (wpsimp wp: no_trace_terminal) + +lemma no_trace_ifM[no_trace_cond]: + "\ no_trace P; no_trace a; no_trace b \ \ no_trace (ifM P a b)" + unfolding ifM_def by wpsimp + +lemma no_trace_ifME[no_trace_cond]: + "\ no_trace P; no_trace a; no_trace b \ \ no_trace (ifME P a b)" + unfolding ifME_def by wpsimp + +lemma no_trace_whenM[no_trace_cond]: + "\ no_trace P; no_trace f \ \ no_trace (whenM P f)" + unfolding whenM_def + by (wpsimp wp: no_trace_terminal no_trace_cond) + +lemma no_trace_orM[no_trace_cond]: + "\ no_trace A; no_trace B \ \ no_trace (orM A B)" + unfolding orM_def + by (wpsimp wp: no_trace_terminal no_trace_cond) + +lemma no_trace_andM[no_trace_cond]: + "\ no_trace A; no_trace B \ \ no_trace (andM A B)" + unfolding andM_def + by (wpsimp wp: no_trace_terminal no_trace_cond) + +\ \While the parallel composition of traceless functions doesn't make much sense, this + still might be useful to handle trivial goals as part of a proof by contradiction.\ +lemma no_trace_parallel[no_trace_cond]: + "\ no_trace f; no_trace g \ \ no_trace (parallel f g)" + by (fastforce simp: parallel_def2 no_trace_def) + +(* not everything [simp] by default, because side conditions can slow down simp a lot *) +lemmas no_trace[wp, intro!] = no_trace_terminal no_trace_cond +lemmas [simp] = no_trace_terminal + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_Prefix_Closed.thy b/lib/Monads/trace/Trace_Prefix_Closed.thy new file mode 100644 index 0000000000..7e7ab925e5 --- /dev/null +++ b/lib/Monads/trace/Trace_Prefix_Closed.thy @@ -0,0 +1,320 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Prefix_Closed + imports + Trace_No_Trace + Trace_Monad_Equations + Eisbach_Tools.Rule_By_Method +begin + +subsection "Prefix Closed" + +text \ + A monad is @{text prefix_closed} if for all traces that it returns, it also returns all incomplete traces + leading up to it.\ +definition prefix_closed :: "('s, 'a) tmonad \ bool" where + "prefix_closed f = (\s. \x xs. (x # xs) \ fst ` f s \ (xs, Incomplete) \ f s)" + +lemmas prefix_closedD1 = prefix_closed_def[THEN iffD1, rule_format] + +lemma in_fst_snd_image_eq: + "x \ fst ` S = (\y. (x, y) \ S)" + "y \ snd ` S = (\x. (x, y) \ S)" + by (auto elim: image_eqI[rotated]) + +lemma in_fst_snd_image: + "(x, y) \ S \ x \ fst ` S" + "(x, y) \ S \ y \ snd ` S" + by (auto simp: in_fst_snd_image_eq) + +lemmas prefix_closedD = prefix_closedD1[OF _ in_fst_snd_image(1)] + +lemma no_trace_prefix_closed: + "no_trace f \ prefix_closed f" + by (auto simp add: prefix_closed_def dest: no_trace_emp) + + +subsection \Set up for @{method wp}\ + +lemma prefix_closed_is_triple[wp_trip]: + "prefix_closed f = triple_judgement \ f (\() f. id prefix_closed f)" + by (simp add: triple_judgement_def split: unit.split) + + +subsection \Rules\ + +(* + Collect generic prefix_closed lemmas here: + - naming convention is prefix_closed_NAME. + - add lemmas with assumptions to [prefix_closed_cond] set + - add lemmas without assumption to [prefix_closed_terminal] set +*) + +named_theorems prefix_closed_terminal +named_theorems prefix_closed_cond + +lemmas [prefix_closed_terminal] = no_trace_terminal[THEN no_trace_prefix_closed] + +lemma prefix_closed_bind[wp_split]: + "\prefix_closed f; \x. prefix_closed (g x)\ \ prefix_closed (f >>= g)" + apply (subst prefix_closed_def, clarsimp simp: bind_def) + apply (auto 4 4 simp: Cons_eq_append_conv split: tmres.split_asm + dest!: prefix_closedD[rotated] elim: rev_bexI) + done + +lemma prefix_closed_case_option[prefix_closed_cond]: + assumes f: "prefix_closed f" + assumes g: "\x. prefix_closed (g x)" + shows "prefix_closed (case_option f g x)" + by (clarsimp simp: f g split: option.split) + +lemma prefix_closed_alt[prefix_closed_cond]: + "\ prefix_closed f; prefix_closed g \ \ prefix_closed (f \ g)" + apply (subst prefix_closed_def) + apply (clarsimp simp add: alternative_def; + fastforce dest: prefix_closedD) + done + +lemma prefix_closed_when[prefix_closed_cond]: + "\P \ prefix_closed f\ \ prefix_closed (when P f)" + by (simp add: when_def prefix_closed_terminal) + +lemma prefix_closed_unless[prefix_closed_cond]: + "\\P \ prefix_closed f\ \ prefix_closed (unless P f)" + by (simp add: unless_def when_def prefix_closed_terminal) + +lemma prefix_closed_if[prefix_closed_cond]: + "\ P \ prefix_closed f; \P \ prefix_closed g \ \ prefix_closed (if P then f else g)" + by simp + +lemma prefix_closed_apply[prefix_closed_cond]: + "prefix_closed (f (g x)) \ prefix_closed (f $ g x)" + by simp + +\ \FIXME: make proof nicer\ +lemma prefix_closed_liftM_eq[simp]: + "prefix_closed (liftM f m) = prefix_closed m" + apply (clarsimp simp: prefix_closed_def bind_def' liftM_def return_def image_def) + apply (rule iff_allI)+ + apply safe + apply clarsimp + apply (drule_tac x="((a, b) # xs, map_tmres id f ba)" in bspec) + apply clarsimp + apply (case_tac ba; clarsimp) + apply (auto split: tmres.splits) + done + +lemma prefix_closed_liftM[prefix_closed_cond]: + "prefix_closed m \ prefix_closed (liftM f m)" + by simp + +lemma prefix_closed_whenE[prefix_closed_cond]: + "\ G \ prefix_closed f \ \ prefix_closed (whenE G f)" + by (simp add: whenE_def prefix_closed_terminal) + +lemma prefix_closed_unlessE[prefix_closed_cond]: + "\ \ G \ prefix_closed f \ \ prefix_closed (unlessE G f)" + by (simp add: unlessE_def prefix_closed_terminal) + +lemma prefix_closed_liftE[prefix_closed_cond]: + "prefix_closed f \ prefix_closed (liftE f)" + unfolding liftE_def by (wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_lift[prefix_closed_cond]: + "\\y. x = Inr y \ prefix_closed (f y)\ \ prefix_closed (lift f x)" + unfolding lift_def + by (wpsimp wp: prefix_closed_terminal split: sum.splits) + +lemma prefix_closed_bindE[wp_split]: + "\ prefix_closed f; \rv. prefix_closed (g rv)\ \ prefix_closed (f >>=E g)" + unfolding bindE_def + by (wpsimp wp: prefix_closed_cond) + +lemma prefix_closed_condition[prefix_closed_cond]: + "\prefix_closed A; prefix_closed B\ \ prefix_closed (condition C A B)" + unfolding condition_def prefix_closed_def + apply clarsimp + done + +lemma prefix_closed_mapM[prefix_closed_cond]: + "\\x. x \ set xs \ prefix_closed (f x)\ \ prefix_closed (mapM f xs)" + apply (induct xs) + apply (simp add: mapM_def sequence_def prefix_closed_terminal) + apply (clarsimp simp: mapM_Cons) + apply (wpsimp wp: prefix_closed_terminal) + done + +lemma prefix_closed_catch[prefix_closed_cond]: + "\ prefix_closed f; \x. prefix_closed (g x) \ \ prefix_closed (catch f g)" + unfolding catch_def + by (wpsimp wp: prefix_closed_terminal split: sum.split) + +lemma prefix_closed_liftME[prefix_closed_cond]: + "prefix_closed m \ prefix_closed (liftME f m)" + unfolding liftME_def + by (wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_handle'[prefix_closed_cond]: + "\prefix_closed f; \e. prefix_closed (handler e)\ \ prefix_closed (f handler)" + unfolding handleE'_def + by (wpsimp wp: prefix_closed_terminal split: sum.split) + +lemma prefix_closed_handleE[prefix_closed_cond]: + "\ prefix_closed L; \r. prefix_closed (R r) \ \ prefix_closed (L R)" + unfolding handleE_def + by (simp add: prefix_closed_cond) + +lemma prefix_closed_handle_elseE[prefix_closed_cond]: + "\ prefix_closed f; \r. prefix_closed (g r); \r. prefix_closed (h r) \ \ prefix_closed (f g h)" + unfolding handle_elseE_def + by (wpsimp wp: prefix_closed_terminal split: sum.split) + +lemma prefix_closed_sequence[prefix_closed_cond]: + "(\m. m \ set ms \ prefix_closed m) \ prefix_closed (sequence ms)" + unfolding sequence_def + by (induct ms; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_sequence_x[prefix_closed_cond]: + "(\m. m \ set ms \ prefix_closed m) \ prefix_closed (sequence_x ms)" + unfolding sequence_x_def + by (induct ms; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_sequenceE[prefix_closed_cond]: + "(\m. m \ set ms \ prefix_closed m) \ prefix_closed (sequenceE ms)" + unfolding sequenceE_def + by (induct ms; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_sequenceE_x[prefix_closed_cond]: + "(\m. m \ set ms \ prefix_closed m) \ prefix_closed (sequenceE_x ms)" + unfolding sequenceE_x_def + by (induct ms; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_mapM_x[prefix_closed_cond]: + "(\m. m \ f ` set ms \ prefix_closed m) \ prefix_closed (mapM_x f ms)" + unfolding mapM_x_def + by (fastforce intro: prefix_closed_sequence_x) + +lemma prefix_closed_mapME[prefix_closed_cond]: + "(\m. m \ f ` set xs \ prefix_closed m) \ prefix_closed (mapME f xs)" + unfolding mapME_def + by (fastforce intro: prefix_closed_sequenceE) + +lemma prefix_closed_mapME_x[prefix_closed_cond]: + "(\m'. m' \ f ` set xs \ prefix_closed m') \ prefix_closed (mapME_x f xs)" + unfolding mapME_x_def + by (fastforce intro: prefix_closed_sequenceE_x) + +lemma prefix_closed_filterM[prefix_closed_cond]: + "(\m. m \ set ms \ prefix_closed (P m)) \ prefix_closed (filterM P ms)" + by (induct ms; wpsimp wp: prefix_closed_terminal split_del: if_split) + +lemma prefix_closed_zipWithM_x[prefix_closed_cond]: + "(\x y. prefix_closed (f x y)) \ prefix_closed (zipWithM_x f xs ys)" + unfolding zipWithM_x_def zipWith_def + by (fastforce intro: prefix_closed_sequence_x) + +lemma prefix_closed_zipWithM[prefix_closed_cond]: + "(\x y. prefix_closed (f x y)) \ prefix_closed (zipWithM f xs ys)" + unfolding zipWithM_def zipWith_def + by (fastforce intro: prefix_closed_sequence) + +lemma prefix_closed_foldM[prefix_closed_cond]: + "(\x y. prefix_closed (m x y)) \ prefix_closed (foldM m xs a)" + unfolding foldM_def + by (induct xs; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_foldME[prefix_closed_cond]: + "(\x y. prefix_closed (m x y)) \ prefix_closed (foldME m a xs)" + unfolding foldME_def + by (induct xs; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_maybeM[prefix_closed_cond]: + "\x. prefix_closed (f x) \ prefix_closed (maybeM f t)" + unfolding maybeM_def + by (fastforce intro: prefix_closed_terminal split: option.splits) + +lemma prefix_closed_notM[prefix_closed_cond]: + "prefix_closed A \ prefix_closed (notM A)" + unfolding notM_def + by (wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_ifM[prefix_closed_cond]: + "\ prefix_closed P; prefix_closed a; prefix_closed b \ \ prefix_closed (ifM P a b)" + unfolding ifM_def by wpsimp + +lemma prefix_closed_ifME[prefix_closed_cond]: + "\ prefix_closed P; prefix_closed a; prefix_closed b \ \ prefix_closed (ifME P a b)" + unfolding ifME_def by wpsimp + +lemma prefix_closed_whenM[prefix_closed_cond]: + "\ prefix_closed P; prefix_closed f \ \ prefix_closed (whenM P f)" + unfolding whenM_def + by (wpsimp wp: prefix_closed_terminal prefix_closed_cond) + +lemma prefix_closed_orM[prefix_closed_cond]: + "\ prefix_closed A; prefix_closed B \ \ prefix_closed (orM A B)" + unfolding orM_def + by (wpsimp wp: prefix_closed_terminal prefix_closed_cond) + +lemma prefix_closed_andM[prefix_closed_cond]: + "\ prefix_closed A; prefix_closed B \ \ prefix_closed (andM A B)" + unfolding andM_def + by (wpsimp wp: prefix_closed_terminal prefix_closed_cond) + +lemma prefix_closed_put_trace_elem[prefix_closed_terminal]: + "prefix_closed (put_trace_elem x)" + by (clarsimp simp: prefix_closed_def put_trace_elem_def) + +lemma prefix_closed_put_trace[prefix_closed_terminal]: + "prefix_closed (put_trace tr)" + by (induct tr; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_env_steps[prefix_closed_terminal]: + "prefix_closed env_steps" + by (wpsimp simp: env_steps_def wp: prefix_closed_terminal) + +lemma prefix_closed_interference[prefix_closed_terminal]: + "prefix_closed interference" + by (wpsimp simp: interference_def commit_step_def wp: prefix_closed_terminal) + +lemma prefix_closed_await[prefix_closed_terminal]: + "prefix_closed (Await c)" + by (wpsimp simp: Await_def wp: prefix_closed_terminal) + +lemma prefix_closed_repeat_n[prefix_closed_cond]: + "prefix_closed f \ prefix_closed (repeat_n n f)" + by (induct n; wpsimp wp: prefix_closed_terminal) + +lemma prefix_closed_repeat[prefix_closed_cond]: + "prefix_closed f \ prefix_closed (repeat f)" + apply (simp add: repeat_def) + apply (wpsimp wp: prefix_closed_terminal prefix_closed_cond) + done + +lemma prefix_closed_parallel[wp_split]: + "\prefix_closed f; prefix_closed g\ + \ prefix_closed (parallel f g)" + apply (subst prefix_closed_def, clarsimp simp: parallel_def) + apply (subst (asm) zip.zip_Cons) + apply (clarsimp split: list.splits) + apply (drule(1) prefix_closedD)+ + apply fastforce + done + +context begin +(* We want to enforce that prefix_closed_terminal only contains lemmas that have no + assumptions. The following thm statement should fail if this is not true. *) +private lemmas check_no_assumptions_internal = iffD1[OF refl, where P="prefix_closed f" for f] +thm prefix_closed_terminal[atomized, THEN check_no_assumptions_internal] +end + +(* not everything [simp] by default, because side conditions can slow down simp a lot *) +lemmas prefix_closed[wp, intro!] = prefix_closed_terminal prefix_closed_cond +lemmas [simp] = prefix_closed_terminal + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_RG.thy b/lib/Monads/trace/Trace_RG.thy new file mode 100644 index 0000000000..e462485b84 --- /dev/null +++ b/lib/Monads/trace/Trace_RG.thy @@ -0,0 +1,1844 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_RG + imports + Trace_More_VCG + Trace_Monad_Equations + Trace_Prefix_Closed +begin + +section \Rely-Guarantee Logic\ + +subsection \Validity\ + +text \ + This section defines a Rely-Guarantee logic for partial correctness for + the interference trace monad. + + The logic is defined semantically. Rules work directly on the + validity predicate. + + In the interference trace monad, RG validity is a quintuple of precondition, + rely, monad, guarantee, and postcondition. The precondition is a function + from initial state to current state to bool (a state predicate), the rely and + guarantee are functions from state before to state after to bool, and the + postcondition is a function from return value to last state in the trace to + final state to bool. A quintuple is valid if for all initial and current + states that satisfy the precondition and all traces that satisfy the rely, + all of the pssible self steps performed by the monad satisfy the guarantee + and all of the result values and result states that are returned by the monad + satisfy the postcondition. Note that if the computation returns an empty + trace and no successful results then the quintuple is trivially valid. This + means @{term "assert P"} does not require us to prove that @{term P} holds, + but rather allows us to assume @{term P}! Proving non-failure is done via a + separate predicate and calculus (see @{text Trace_No_Fail}).\ + +primrec trace_steps :: "(tmid \ 's) list \ 's \ (tmid \ 's \ 's) set" where + "trace_steps (elem # trace) s0 = {(fst elem, s0, snd elem)} \ trace_steps trace (snd elem)" + | "trace_steps [] s0 = {}" + +lemma trace_steps_nth: + "trace_steps xs s0 = (\i. (fst (xs ! i), (if i = 0 then s0 else snd (xs ! (i - 1))), snd (xs ! i))) ` {..< length xs}" +proof (induct xs arbitrary: s0) + case Nil + show ?case by simp +next + case (Cons a xs) + show ?case + apply (simp add: lessThan_Suc_eq_insert_0 Cons image_image nth_Cons') + apply (intro arg_cong2[where f=insert] refl image_cong) + apply simp + done +qed + +text \@{text rg_pred} type: Rely-Guaranty predicates (@{text "state before => state after => bool"})\ +type_synonym 's rg_pred = "'s \ 's \ bool" + +text \Abbreviations for trivial postconditions (taking three arguments):\ +abbreviation(input) + toptoptop :: "'a \ 'b \ 'b \ bool" ("\\\") where + "\\\ \ \_ _ _. True" + +abbreviation(input) + botbotbot :: "'a \ 'b \ 'b \ bool" ("\\\") where + "\\\ \ \_ _ _. False" + +text \ + Test whether the environment steps in @{text tr} satisfy the rely condition @{text R}, + assuming that @{text s0s} was the initial state before the first step in the trace.\ +definition rely_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" where + "rely_cond R s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Env \ R s0 s)" + +text \ + Test whether the self steps in @{text tr} satisfy the guarantee condition @{text G}, + assuming that @{text s0s} was the initial state before the first step in the trace.\ +definition guar_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" where + "guar_cond G s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Me \ G s0 s)" + +lemma rg_empty_conds[simp]: + "rely_cond R s0s []" + "guar_cond G s0s []" + by (simp_all add: rely_cond_def guar_cond_def) + +lemma rg_conds_True[simp]: + "rely_cond \\ = \\" + "guar_cond \\ = \\" + by (clarsimp simp: rely_cond_def guar_cond_def fun_eq_iff)+ + +text \ + @{text "rely f R s0s"} constructs a new function from @{text f}, where the environment + steps are constrained by @{text R} and @{text s0s} was the initial state before the first + step in the trace.\ +definition rely :: "('s, 'a) tmonad \ 's rg_pred \ 's \ ('s, 'a) tmonad" where + "rely f R s0s \ (\s. f s \ ({tr. rely_cond R s0s tr} \ UNIV))" + +definition validI :: + "('s \ 's \ bool) \ 's rg_pred \ ('s,'a) tmonad \ 's rg_pred \ ('a \ 's \ 's \ bool) \ bool" + ("(\_\,/ \_\)/ _ /(\_\,/ \_\)") where + "\P\,\R\ f \G\,\Q\ \ + prefix_closed f + \ (\s0 s tr res. P s0 s \ (tr, res) \ (rely f R s0 s) + \ guar_cond G s0 tr + \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s'))" + +text \ + We often reason about invariant predicates. The following provides shorthand syntax + that avoids repeating potentially long predicates.\ +abbreviation (input) invariantI :: + "('s,'a) tmonad \ 's rg_pred \ 's rg_pred \ ('s \ 's \ bool) \ bool" + ("_/ \_\,/ \_\,/ \_\" [59,0] 60) where + "invariantI f R G P \ \P\,\R\ f \G\,\\_. P\" + +text \ Validity for exception monad with interferences, built on @{term validI}. \ +definition validIE :: + "('s \ 's \ bool) \ 's rg_pred \ ('s, 'a + 'b) tmonad \ 's rg_pred \ ('b \ 's \ 's \ bool) \ + ('a \ 's \ 's \ bool) \ bool" + ("(\_\,/ \_\)/ _ /(\_\,/ \_\,/ \_\)") where + "\P\,\R\ f \G\,\Q\,\E\ \ \P\,\R\ f \G\,\ \v s0 s. case v of Inr r \ Q r s0 s | Inl e \ E e s0 s \" + +lemma validIE_def2: + "\P\,\R\ f \G\,\Q\,\E\ \ + prefix_closed f + \ (\s0 s tr res. P s0 s \ (tr, res) \ (rely f R s0 s) + \ guar_cond G s0 tr + \ (\rv s'. res = Result (rv, s') + \ (case rv of Inr b \ Q b (last_st_tr tr s0) s' | Inl a \ E a (last_st_tr tr s0) s')))" + by (unfold validI_def validIE_def) + +text \ + The following two abbreviations are convenient to separate reasoning for exceptional and + normal case.\ +abbreviation validIE_R :: + "('s \ 's \ bool) \ 's rg_pred \ ('s, 'a + 'b) tmonad \ 's rg_pred \ ('b \ 's \ 's \ bool) \ + bool" + ("(\_\,/ \_\)/ _ /(\_\,/ \_\,/ -)") where + "\P\,\R\ f \G\,\Q\,- \ \P\,\R\ f \G\,\Q\,\\_ _ _. True\" + +abbreviation validIE_E :: + "('s \ 's \ bool) \ 's rg_pred \ ('s, 'a + 'b) tmonad \ 's rg_pred \ ('a \ 's \ 's \ bool) \ + bool" + ("(\_\,/ \_\)/ _ /(\_\,/ -,/ \_\)") where + "\P\,\R\ f \G\,-,\E\ \ \P\,\R\ f \G\,\\_ _ _. True\,\E\" + +text \ + The following abbreviations are convenient to separate reasoning about postconditions and the + guarantee condition.\ +abbreviation validI_no_guarantee :: + "('s \ 's \ bool) \ 's rg_pred \ ('s,'a) tmonad \ ('a \ 's \ 's \ bool) \ bool" + ("(\_\,/ \_\)/ _ /(-,/ \_\)") where + "\P\,\R\ f -,\Q\ \ \P\,\R\ f \\_ _. True\,\Q\" + +abbreviation (input) invariantI_no_guarantee :: + "('s,'a) tmonad \ 's rg_pred \ ('s \ 's \ bool) \ bool" + ("_/ \_\,/ -,/ \_\" [59,0] 60) where + "f \R\, -, \P\ \ \P\,\R\ f \\_ _. True\,\\_. P\" + +abbreviation validIE_no_guarantee :: + "('s \ 's \ bool) \ 's rg_pred \ ('s, 'a + 'b) tmonad \ ('b \ 's \ 's \ bool) \ + ('a \ 's \ 's \ bool) \ bool" + ("(\_\,/ \_\)/ _ /(-,/ \_\,/ \_\)") where + "\P\,\R\ f -,\Q\,\E\ \ \P\,\R\ f \\_ _. True\,\Q\,\E\" + +abbreviation validIE_R_no_guarantee :: + "('s \ 's \ bool) \ 's rg_pred \ ('s, 'a + 'b) tmonad \ ('b \ 's \ 's \ bool) \ bool" + ("(\_\,/ \_\)/ _ /(-,/ \_\,/ -)") where + "\P\,\R\ f -,\Q\,- \ \P\,\R\ f \\_ _. True\,\Q\,\\_ _ _. True\" + +abbreviation validIE_E_no_guarantee :: + "('s \ 's \ bool) \ 's rg_pred \ ('s, 'a + 'b) tmonad \ ('a \ 's \ 's \ bool) \ bool" + ("(\_\,/ \_\)/ _ /(-,/ -,/ \_\)") where + "\P\,\R\ f -,-,\E\ \ \P\,\R\ f \\_ _. True\,\\_ _ _. True\,\E\" + +lemma in_rely: + "\(tr, res) \ f s; rely_cond R s0s tr\ \ (tr, res) \ rely f R s0s s" + by (simp add: rely_def) + +lemmas validI_D = + validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct2, rule_format, + OF _ conjI, OF _ _ in_rely] +lemmas validI_GD = validI_D[THEN conjunct1] +lemmas validI_rvD = validI_D[THEN conjunct2, rule_format, rotated -1, OF refl] +lemmas validI_prefix_closed = validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct1] +lemmas validI_prefix_closed_T = + validI_prefix_closed[where P="\_ _. False" and R="\_ _. False" and G="\_ _. True" + and Q="\_ _ _. True"] + +declare validI_prefix_closed[elim!] + + +section \Pre Lemmas\ + +lemma rg_weaken_pre: + "\\P'\,\R\ f \G\,\Q\; \s0 s. P s0 s \ P' s0 s\ + \ \P\,\R\ f \G\,\Q\" + by (simp add: validI_def, blast) + +lemmas rg_pre_imp = rg_weaken_pre[rotated] + +lemma rg_weaken_preE: + "\\P'\,\R\ f \G\,\Q\,\E\; \s0 s. P s0 s \ P' s0 s\ + \ \P\,\R\ f \G\,\Q\,\E\" + by (simp add: validIE_def rg_weaken_pre) + +lemma rely_weaken: + "\\s0 s. R s0 s \ R' s0 s; (tr, res) \ rely f R s s0\ + \ (tr, res) \ rely f R' s s0" + by (simp add: rely_def rely_cond_def, blast) + +lemma rg_weaken_rely: + "\\P\,\R'\ f \G\,\Q\; \s0 s. R s0 s \ R' s0 s\ + \ \P\,\R\ f \G\,\Q\" + apply (simp add: validI_def) + by (metis rely_weaken) + +lemma rg_weaken_relyE: + "\\P\,\R'\ f \G\,\Q\,\E\; \s0 s. R s0 s \ R' s0 s\ + \ \P\,\R\ f \G\,\Q\,\E\" + by (simp add: validIE_def rg_weaken_rely) + +lemma rg_weaken_pre_rely: + "\\P'\,\R'\ f \G\,\Q\; \s0 s. R s0 s \ R' s0 s; \s0 s. P s0 s \ P' s0 s\ + \ \P\,\R\ f \G\,\Q\" + by (rule rg_weaken_pre, rule rg_weaken_rely; assumption) + +lemma rg_weaken_pre_relyE: + "\\P'\,\R'\ f \G\,\Q\,\E\; \s0 s. R s0 s \ R' s0 s; \s0 s. P s0 s \ P' s0 s\ + \ \P\,\R\ f \G\,\Q\,\E\" + by (simp add: validIE_def rg_weaken_pre_rely) + +lemmas rg_pre[wp_pre] = + rg_weaken_pre + rg_weaken_preE + + +subsection \Setting up the precondition case splitter.\ + +(* FIXME: this needs adjustment, case_prod Q is unlikely to unify *) +lemma wpc_helper_validI: + "\Q\,\R\ g \G\,\S\ \ wpc_helper (P, P', P'') (case_prod Q, Q', Q'') (\curry P\,\R\ g \G\,\S\)" + by (clarsimp simp: wpc_helper_def elim!: rg_pre) + +lemma wpc_helper_validIE: + "\Q\,\R\ g \G\,\S\,\E\ \ wpc_helper (P, P', P'') (case_prod Q, Q', Q'') (\curry P\,\R\ g \G\,\S\,\E\)" + by (clarsimp simp: wpc_helper_def elim!: rg_pre) + +wpc_setup "\m. \P\,\R\ m \G\,\S\" wpc_helper_validI +wpc_setup "\m. \P\,\R\ m \G\,\S\,\E\" wpc_helper_validIE + + +subsection \RG Logic Rules\ + +lemma trace_steps_append: + "trace_steps (xs @ ys) s + = trace_steps xs s \ trace_steps ys (last_st_tr (rev xs) s)" + by (induct xs arbitrary: s, simp_all add: last_st_tr_def hd_append) + +lemma rely_cond_append: + "rely_cond R s (xs @ ys) = (rely_cond R s ys \ rely_cond R (last_st_tr ys s) xs)" + by (simp add: rely_cond_def trace_steps_append ball_Un conj_comms) + +lemma guar_cond_append: + "guar_cond G s (xs @ ys) = (guar_cond G s ys \ guar_cond G (last_st_tr ys s) xs)" + by (simp add: guar_cond_def trace_steps_append ball_Un conj_comms) + +lemma no_trace_last_st_tr: + "\no_trace f; (tr, res) \ f s\ \ last_st_tr tr s0 = s0" + by (fastforce simp: no_trace_def) + +lemma no_trace_rely_cond: + "\no_trace f; (tr, res) \ f s\ \ rely_cond R s0 tr" + by (fastforce simp: no_trace_def rely_cond_def) + +lemma validI_valid_no_trace_eq: + "no_trace f \ \P\,\R\ f \G\,\Q\ = (\s0. \P s0\ f \\v. Q v s0\)" + apply (rule iffI) + apply (fastforce simp: rely_def validI_def valid_def mres_def + dest: no_trace_emp) + apply (clarsimp simp: rely_def validI_def valid_def mres_def no_trace_prefix_closed) + apply (fastforce simp: eq_snd_iff dest: no_trace_emp) + done + +lemma validIE_validE_no_trace_eq: + "no_trace f \ \P\,\R\ f \G\,\Q\,\E\ = (\s0. \P s0\ f \\v. Q v s0\,\\v. E v s0\)" + unfolding validIE_def validE_def + apply (clarsimp simp: validI_valid_no_trace_eq) + done + +(*FIXME MC: name*) +lemma validI_split: + "\\P\,\R\ f -,\Q\; \P'\,\R\ f \G\,\\\\\\ \ \P and P'\,\R\ f \G\,\Q\" + by (auto simp: validI_def) + +lemma validIE_split: + "\\P\,\R\ f -,\Q\,\E\; \P'\,\R\ f \G\,\\\\\,\\\\\\ \ \P and P'\,\R\ f \G\,\Q\,\E\" + by (auto simp: validIE_def validI_split) + +lemma bind_twp[wp_split]: + "\\r. \Q' r\,\R\ g r \G\,\Q\; \P\,\R\ f \G\,\Q'\\ + \ \P\,\R\ f >>= (\rv. g rv) \G\,\Q\" + apply (subst validI_def, rule conjI) + apply (blast intro: prefix_closed_bind validI_prefix_closed) + apply (clarsimp simp: bind_def rely_def) + apply (drule(2) validI_D) + apply (clarsimp simp: rely_cond_append split: tmres.split_asm) + apply (clarsimp split: tmres.split_asm) + apply (drule meta_spec, frule(2) validI_D) + apply (clarsimp simp: rely_cond_append split: if_split_asm) + apply (clarsimp simp: guar_cond_append) + done + +lemma bindE_twp[wp_split]: + "\\r. \Q' r\,\R\ g r \G\,\Q\,\E\; \P\,\R\ f \G\,\Q'\,\E\\ + \ \P\,\R\ f >>=E (\r. g r) \G\,\Q\,\E\" + apply (simp add: bindE_def validIE_def) + apply (erule bind_twp[rotated]) + apply (clarsimp simp: lift_def throwError_def split: sum.splits) + apply (subst validI_valid_no_trace_eq) + apply wpsimp+ + done + +lemmas bind_twp_fwd = bind_twp[rotated] +lemmas bindE_twp_fwd = bindE_twp[rotated] + +lemma bind_twpE: + "\\x. \Q' x\,\R\ g x \G\,\Q\,\E\; \P\,\R\ f \G\,\Q'\\ \ \P\,\R\ f >>= g \G\,\Q\,\E\" + apply (clarsimp simp: validIE_def) + by (wp | assumption)+ + +lemma rg_TrueI: + "\P\,\R\ f -,\\_ _. \\ = prefix_closed f" + by (simp add: validI_def) + +lemma rgE_TrueI: + "\P\,\R\ f -,\\_ _. \\,\\_ _. \\ = prefix_closed f" + by (simp add: validIE_def rg_TrueI) + +lemmas twp_post_taut = rg_TrueI[where P="\\", THEN iffD2] +lemmas twp_post_tautE = rgE_TrueI[where P="\\", THEN iffD2] +lemmas twp_post_tauts[intro] = twp_post_taut twp_post_tautE + +lemma rg_post_conj[intro]: + "\\P\,\R\ f \G\,\Q\; \P\,\R\ f \G\,\Q'\\ \ \P\,\R\ f \G\,\Q and Q'\" + by (fastforce simp: validI_def) + +lemma rg_pre_disj[intro]: + "\\P\,\R\ f \G\,\Q\; \P'\,\R\ f \G\,\Q\\ \ \P or P'\,\R\ f \G\,\Q\" + by (fastforce simp: validI_def pred_disj_def) + +lemma rg_conj: + "\\P\,\R\ f \G\,\Q\; \P'\,\R\ f \G\,\Q'\\ \ \P and P'\,\R\ f \G\,\Q and Q'\" + unfolding validI_def by auto + +lemma rg_pre_cont[simp]: + "\\\\,\R\ f \G\,\Q\ = prefix_closed f" + by (simp add: validI_def) + +lemma rg_FalseE[simp]: + "\\\\,\R\ f \G\,\Q\,\E\ = prefix_closed f" + by (simp add: validI_def validIE_def) + +lemma rg_post_imp: + "\\v s0 s. Q' v s0 s \ Q v s0 s; \P\,\R\ f \G\,\Q'\\ + \ \P\,\R\ f \G\,\Q\" + by (simp add: validI_def) + +lemma rg_post_impE: + "\\v s0 s. Q' v s0 s \ Q v s0 s; \v s0 s. E' v s0 s \ E v s0 s; \P\,\R\ f \G\,\Q'\,\E'\\ + \ \P\,\R\ f \G\,\Q\,\E\" + by (clarsimp simp: validIE_def2 split: sum.splits) + +lemmas rg_strengthen_post = rg_post_imp[rotated] +lemmas rg_strengthen_postE = rg_post_impE[rotated 2] + +lemma rg_post_imp_dc: + "\\P\,\R\ a \G\,\\_. Q'\; \s0 s. Q' s0 s \ Q s0 s\ \ \P\,\R\ a \G\,\\_. Q\,\\_. Q\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_post_imp_dc2: + "\\P\,\R\ a \G\,\\_. Q'\; \s0 s. Q' s0 s \ Q s0 s\ \ \P\,\R\ a \G\,\\_. Q\,-" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_post_imp_dc2E: + "\\P\,\R\ a \G\,\\_. Q'\; \s0 s. Q' s0 s \ Q s0 s\ \ \P\,\R\ a \G\,-,\\_. Q\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_guar_imp: + "\\s0 s. G' s0 s \ G s0 s; \P\,\R\ f \G'\,\Q\\ + \ \P\,\R\ f \G\,\Q\" + by (force simp: validI_def guar_cond_def) + +lemma rg_guar_impE: + "\\s0 s. G' s0 s \ G s0 s; \P\,\R\ f \G'\,\Q\,\E\\ + \ \P\,\R\ f \G\,\Q\,\E\" + by (clarsimp simp: validIE_def elim!: rg_guar_imp) + +lemmas rg_strengthen_guar = rg_guar_imp[rotated] +lemmas rg_strengthen_guarE = rg_guar_impE[rotated] + +lemma rg_conjD1: + "\P\,\R\ f \G\,\\rv s0. Q rv s0 and Q' rv s0\ \ \P\,\R\ f \G\,\\rv. Q rv\" + unfolding validI_def by auto + +lemma rg_conjD2: + "\P\,\R\ f \G\,\\rv s0. Q rv s0 and Q' rv s0\ \ \P\,\R\ f \G\,\\rv. Q' rv\" + unfolding validI_def by auto + +lemma rg_post_disjI1: + "\P\,\R\ f \G\,\\rv. Q rv\ \ \P\,\R\ f \G\,\\rv s0. Q rv s0 or Q' rv s0\" + unfolding validI_def by auto + +lemma rg_post_disjI2: + "\P\,\R\ f \G\,\\rv. Q' rv\ \ \P\,\R\ f \G\,\\rv s0. Q rv s0 or Q' rv s0\" + unfolding validI_def by auto + +lemma use_validI': + "\(tr, Result (rv, s')) \ rely f R s0 s; \P\,\R\ f \G\,\Q\; P s0 s; s0' = last_st_tr tr s0\ + \ Q rv s0' s'" + unfolding validI_def by auto + +lemmas use_validI = use_validI'[OF _ _ _ refl] + +lemmas post_by_rg = use_validI[rotated] + +lemma use_validI_guar: + "\(tr, res) \ rely f R s0 s; \P\,\R\ f \G\,\Q\; P s0 s\ + \ guar_cond G s0 tr" + unfolding validI_def by auto + +lemmas guar_by_rg = use_validI_guar[rotated] + +lemma in_inv_by_rgD: + "\\P. f \R\,-,\P\; (tr, Result (rv, s')) \ rely f R s0 s\ \ s' = s" + unfolding validI_def + apply (drule meta_spec[where x="\_. (=) s"]) + apply blast + done + +lemma last_st_in_inv_by_rgD: + "\\P. f \R\,-,\P\; (tr, Result (rv, s')) \ rely f R s0 s\ \ last_st_tr tr s0 = s0" + unfolding validI_def + apply (drule meta_spec[where x="\s0' _. s0' = s0"]) + apply blast + done + +lemma trace_steps_rev_drop_nth: + "trace_steps (rev (drop n tr)) s + = (\i. (fst (rev tr ! i), (if i = 0 then s else snd (rev tr ! (i - 1))), + snd (rev tr ! i))) ` {..< length tr - n}" + apply (simp add: trace_steps_nth) + apply (intro image_cong refl) + apply (simp add: rev_nth) + done + +lemma prefix_closed_drop: + "\(tr, res) \ f s; prefix_closed f\ \ \res'. (drop n tr, res') \ f s" +proof (induct n arbitrary: res) + case 0 then show ?case by auto +next + case (Suc n) + have drop_1: "(tr, res) \ f s \ \res'. (drop 1 tr, res') \ f s" for tr res + by (cases tr; fastforce dest: prefix_closedD[rotated] intro: Suc) + show ?case + using Suc.hyps[OF Suc.prems] + by (metis drop_1[simplified] drop_drop add_0 add_Suc) +qed + +lemma validI_GD_drop: + "\\P\, \R\ f \G\, \Q\; P s0 s; (tr, res) \ f s; + rely_cond R s0 (drop n tr)\ + \ guar_cond G s0 (drop n tr)" + apply (drule prefix_closed_drop[where n=n], erule validI_prefix_closed) + apply (auto dest: validI_GD) + done + +lemma rely_cond_drop: + "rely_cond R s0 xs \ rely_cond R s0 (drop n xs)" + using rely_cond_append[where xs="take n xs" and ys="drop n xs"] + by simp + +lemma rely_cond_is_drop: + "\rely_cond R s0 xs; (ys = drop (length xs - length ys) xs)\ + \ rely_cond R s0 ys" + by (metis rely_cond_drop) + +lemma bounded_rev_nat_induct: + "\(\n. N \ n \ P n); \n. \n < N; P (Suc n)\ \ P n\ + \ P n" + by (induct diff\"N - n" arbitrary: n; simp) + +lemma drop_n_induct: + "\P []; \n. \n < length xs; P (drop (Suc n) xs)\ \ P (drop n xs)\ + \ P (drop n xs)" + by (induct len\"length (drop n xs)" arbitrary: n xs; simp) + +lemma guar_cond_Cons_eq: + "guar_cond R s0 (x # xs) + = (guar_cond R s0 xs \ (fst x \ Env \ R (last_st_tr xs s0) (snd x)))" + by (cases "fst x"; simp add: guar_cond_def trace_steps_append conj_comms) + +lemma guar_cond_Cons: + "\guar_cond R s0 xs; fst x \ Env \ R (last_st_tr xs s0) (snd x)\ + \ guar_cond R s0 (x # xs)" + by (simp add: guar_cond_Cons_eq) + +lemma guar_cond_drop_Suc_eq: + "n < length xs + \ guar_cond R s0 (drop n xs) = (guar_cond R s0 (drop (Suc n) xs) + \ (fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" + apply (rule trans[OF _ guar_cond_Cons_eq]) + apply (simp add: Cons_nth_drop_Suc) + done + +lemma guar_cond_drop_Suc: + "\guar_cond R s0 (drop (Suc n) xs); + fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))\ + \ guar_cond R s0 (drop n xs)" + by (cases "n < length xs"; simp add: guar_cond_drop_Suc_eq) + +lemma rely_cond_Cons_eq: + "rely_cond R s0 (x # xs) + = (rely_cond R s0 xs \ (fst x = Env \ R (last_st_tr xs s0) (snd x)))" + by (simp add: rely_cond_def trace_steps_append conj_comms) + +lemma rely_cond_Cons: + "\rely_cond R s0 xs; fst x = Env \ R (last_st_tr xs s0) (snd x)\ + \ rely_cond R s0 (x # xs)" + by (simp add: rely_cond_Cons_eq) + +lemma rely_cond_drop_Suc_eq: + "n < length xs + \ rely_cond R s0 (drop n xs) = (rely_cond R s0 (drop (Suc n) xs) + \ (fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" + apply (rule trans[OF _ rely_cond_Cons_eq]) + apply (simp add: Cons_nth_drop_Suc) + done + +lemma rely_cond_drop_Suc: + "\rely_cond R s0 (drop (Suc n) xs); + fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))\ + \ rely_cond R s0 (drop n xs)" + by (cases "n < length xs"; simp add: rely_cond_drop_Suc_eq) + +lemma last_st_tr_map_zip_hd: + "\length flags = length tr; tr \ [] \ snd (f (hd flags, hd tr)) = snd (hd tr)\ + \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" + apply (cases tr; simp) + apply (cases flags; simp) + apply (simp add: fun_eq_iff) + done + +lemma last_st_tr_drop_map_zip_hd: + "\length flags = length tr; + n < length tr \ snd (f (flags ! n, tr ! n)) = snd (tr ! n)\ + \ last_st_tr (drop n (map f (zip flags tr))) = last_st_tr (drop n tr)" + apply (simp add: drop_map drop_zip) + apply (rule last_st_tr_map_zip_hd; clarsimp) + apply (simp add: hd_drop_conv_nth) + done + +lemma last_st_tr_map_zip: + "\length flags = length tr; \fl tmid s. snd (f (fl, (tmid, s))) = s\ + \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" + apply (erule last_st_tr_map_zip_hd) + apply (clarsimp simp: neq_Nil_conv) + done + +lemma parallel_rely_induct: + assumes preds: "(tr, v) \ parallel f g s" "Pf s0 s" "Pg s0 s" + and validI: "\Pf\,\Rf\ f' \Gf\,\Qf\" + "\Pg\,\Rg\ g' \Gg\,\Qg\" + "f s \ f' s" "g s \ g' s" + and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" "Gf \ Rg" "Gg \ Rf" + and rely: "rely_cond R s0 (drop n tr)" + shows + "\tr_f tr_g. (tr_f, v) \ f s \ (tr_g, v) \ g s + \ rely_cond Rf s0 (drop n tr_f) \ rely_cond Rg s0 (drop n tr_g) + \ map snd tr_f = map snd tr \ map snd tr_g = map snd tr + \ (\i \ length tr. last_st_tr (drop i tr_g) s0 = last_st_tr (drop i tr_f) s0) + \ guar_cond G s0 (drop n tr)" + (is "\ys zs. _ \ _ \ ?concl ys zs") +proof - + obtain ys zs where tr: "tr = map parallel_mrg (zip ys zs)" + and all2: "list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs" + and ys: "(ys, v) \ f s" and zs: "(zs, v) \ g s" + using preds + by (clarsimp simp: parallel_def2) + note len[simp] = list_all2_lengthD[OF all2] + + have ys': "(ys, v) \ f' s" and zs': "(zs, v) \ g' s" + using ys zs validI by auto + + note rely_f_step = validI_GD_drop[OF validI(1) preds(2) ys' rely_cond_drop_Suc] + note rely_g_step = validI_GD_drop[OF validI(2) preds(3) zs' rely_cond_drop_Suc] + + note snd[simp] = list_all2_nthD[OF all2, THEN conjunct2] + + have "?concl ys zs" + using rely tr all2 rely_f_step rely_g_step + apply (induct n rule: bounded_rev_nat_induct) + apply (subst drop_all, assumption) + apply clarsimp + apply (simp add: list_all2_conv_all_nth last_st_tr_def drop_map[symmetric] + hd_drop_conv_nth hd_append) + apply (fastforce simp: split_def intro!: nth_equalityI) + apply clarsimp + apply (drule meta_mp, erule rely_cond_is_drop, simp) + apply clarsimp + apply (erule meta_allE, drule meta_mp, assumption)+ + apply (subst(asm) rely_cond_drop_Suc_eq[where xs="map f xs" for f xs], simp) + apply (clarsimp simp: last_st_tr_drop_map_zip_hd if_split[where P="\x. x = Env"] + split_def) + apply (intro conjI; (rule guar_cond_drop_Suc rely_cond_drop_Suc, assumption)) + apply (auto simp: guar_cond_drop_Suc_eq last_st_tr_drop_map_zip_hd + intro: compat[THEN predicate2D]) + done + + thus ?thesis + using ys zs + by auto +qed + +lemmas parallel_rely_induct0 = parallel_rely_induct[where n=0, simplified] + +lemma rg_validI: + assumes validI: "\Pf\,\Rf\ f \Gf\,\Qf\" + "\Pg\,\Rg\ g \Gg\,\Qg\" + and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" "Gf \ Rg" "Gg \ Rf" + shows "\Pf and Pg\,\R\ parallel f g \G\,\\rv. Qf rv and Qg rv\" + apply (clarsimp simp: validI_def rely_def pred_conj_def + prefix_closed_parallel validI[THEN validI_prefix_closed]) + apply (drule(3) parallel_rely_induct0[OF _ _ _ validI order_refl order_refl compat]) + apply clarsimp + apply (drule(2) validI[THEN validI_rvD])+ + apply (simp add: last_st_tr_def) + done + +lemma rely_prim[simp]: + "rely (\s. insert (v s) (f s)) R s0 = (\s. {x. x = v s \ rely_cond R s0 (fst x)} \ (rely f R s0 s))" + "rely (\s. {}) R s0 = (\_. {})" + "rely (\s. (f s) \ (g s)) R s0 = (\s. (rely f R s0 s) \ (rely g R s0 s))" + "rely (\s. if C s then f s else g s) R s0 = (\s. if C s then rely f R s0 s else rely g R s0 s)" + by (auto simp: rely_def prod_eq_iff split: if_splits) + +lemma put_trace_eq_drop: + "put_trace xs s + = ((\n. (drop n xs, if n = 0 then Result ((), s) else Incomplete)) ` {.. length xs})" + apply (induct xs) + apply (clarsimp simp: return_def) + apply (clarsimp simp: put_trace_elem_def bind_def) + apply (simp add: atMost_Suc_eq_insert_0 image_image) + apply (rule equalityI; clarsimp) + apply (split if_split_asm; clarsimp) + apply (auto intro: image_eqI[where x=0])[1] + apply (rule rev_bexI, simp) + apply clarsimp + done + +lemma put_trace_res: + "(tr, res) \ put_trace xs s + \ \n. tr = drop n xs \ n \ length xs + \ res = (case n of 0 \ Result ((), s) | _ \ Incomplete)" + apply (clarsimp simp: put_trace_eq_drop) + apply (auto simp: gr0_conv_Suc intro: exI[where x=0]) + done + +lemma put_trace_twp[wp]: + "\\s0 s. (\n. rely_cond R s0 (drop n xs) \ guar_cond G s0 (drop n xs)) + \ (rely_cond R s0 xs \ Q () (last_st_tr xs s0) s)\,\R\ + put_trace xs + \G\,\Q\" + apply (clarsimp simp: validI_def rely_def) + apply (drule put_trace_res) + apply (clarsimp; clarsimp split: nat.split_asm) + done + +lemmas put_trace_elem_twp = put_trace_twp[where xs="[x]" for x, simplified] + +lemma rely_cond_rtranclp: + "rely_cond R s (map (Pair Env) xs) \ rtranclp R s (last_st_tr (map (Pair Env) xs) s)" + apply (induct xs arbitrary: s rule: rev_induct) + apply simp + apply (clarsimp simp add: rely_cond_def) + apply (erule converse_rtranclp_into_rtranclp) + apply simp + done + + +subsection \Misc\ + +lemma rg_gen_asm: + "\P \ \P'\,\R\ f \G\,\Q\; \ P \ prefix_closed f\ \ \P' and (\_ _. P)\,\R\ f \G\,\Q\" + by (auto simp: validI_def) + +lemmas rg_gen_asm_single = rg_gen_asm[where P'="\\", simplified pred_conj_def simp_thms] + +lemma rg_gen_asm_lk: + "\P \ \P'\,\R\ f \G\,\Q\; \ P \ prefix_closed f\ \ \(\_ _. P) and P'\,\R\ f \G\,\Q\" + by (auto simp: validI_def) + +\ \Useful for forward reasoning, when P is known. + The first version allows weakening the precondition.\ +lemma rg_gen_asm_spec': + "\\s0 s. P s0 s \ S \ P' s0 s; S \ \P'\,\R\ f \G\,\Q\; \ S \ prefix_closed f\ + \ \P\,\R\ f \G\,\Q\" + by (auto 6 2 simp: validI_def) + +lemma rg_gen_asm_spec: + "\\s0 s. P s0 s \ S; S \ \P\,\R\ f \G\,\Q\; \ S \ prefix_closed f\ + \ \P\,\R\ f \G\,\Q\" + by (rule rg_gen_asm_spec'[where S=S and P'=P]) simp + +lemma rg_conjI: + "\\P\,\R\ f \G\,\Q\; \P\,\R\ f \G\,\Q'\\ \ \P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\" + unfolding validI_def by auto + +lemma rg_disjI1: + "\\P\,\R\ f \G\,\Q\\ \ \P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\" + unfolding validI_def by blast + +lemma rg_disjI2: + "\\P\,\R\ f \G\,\Q'\\ \ \P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\" + unfolding validI_def by blast + +lemma rg_assume_pre: + "\\s0 s. P s0 s \ \P\,\R\ f \G\,\Q\; prefix_closed f\ \ \P\,\R\ f \G\,\Q\" + by (auto simp: validI_def) + +lemma rg_assume_preE: + "\\s0 s. P s0 s \ \P\,\R\ f \G\,\Q\,\E\; prefix_closed f\ \ \P\,\R\ f \G\,\Q\,\E\" + by (auto simp: validI_def validIE_def) + +lemma rg_allI: + "(\x. \P\,\R\ f \G\,\Q x\) \ \P\,\R\ f \G\,\\rv s0 s. \x. Q x rv s0 s\" + by (simp add: validI_def) + +lemma validE_allI: + "(\x. \P\,\R\ f \G\,\\r s. Q x r s\,\E\) \ \P\,\R\ f \G\,\\rv s0 s. \x. Q x rv s0 s\,\E\" + by (fastforce simp: validI_def validIE_def split: sum.splits) + +lemma rg_exI: + "\P\,\R\ f \G\,\Q x\ \ \P\,\R\ f \G\,\\rv s0 s. \x. Q x rv s0 s\" + by (simp add: validI_def) blast + +lemma rg_impI: + "P' \ \P\,\R\ f -,\Q\ \ \P\,\R\ f -,\\rv s0 s. P' \ Q rv s0 s\" + by (simp add: validI_def) + +lemma validIE_impI: + "\\P\,\R\ f \G\,\\_ _ _. True\,\E\; P' \ \P\,\R\ f \G\,\Q\,\E\\ \ + \P\,\R\ f \G\,\\rv s0 s. P' \ Q rv s0 s\,\E\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_case_option_wp: + "\\P\,\R\ f None \G\,\Q\; \x. \P' x\,\R\ f (Some x) \G\,\Q' x\\ + \ \case_option P P' v\,\R\ f v \G\,\\rv. case v of None \ Q rv | Some x \ Q' x rv\" + by (cases v) auto + +lemma rg_case_option_wp2: + "\\P\,\R\ f None \G\,\Q\; \x. \P' x\,\R\ f (Some x) \G\,\Q' x\\ + \ \case_option P P' v\,\R\ f v \G\,\\rv s0 s. case v of None \ Q rv s0 s | Some x \ Q' x rv s0 s\" + by (cases v) auto + +(* Might be useful for forward reasoning, when P is known. *) +lemma rg_when_cases: + "\\s0 s. \\B; P s0 s\ \ Q s0 s; B \ \P\,\R\ f \G\,\\_. Q\\ \ \P\,\R\ when B f \G\,\\_. Q\" + by (cases B; simp add: validI_def prefix_closed_def return_def) + +lemma rg_vcg_prop: + "prefix_closed f \ \\s0 s. P\,\R\ f -,\\rv s0 s. P\" + by (simp add: validI_def) + + +subsection \@{const validI} and @{const validIE}, @{const validIE_R}, @{const validIE_E}\ + +lemma validI_validIE: + "\P\,\R\ f \G\,\\_. Q\ \ \P\,\R\ f \G\,\\_. Q\,\\_. Q\" + by (rule rg_post_imp_dc) + +lemma validI_validIE2: + "\\P\,\R\ f \G\,\\_. Q'\; \s0 s. Q' s0 s \ Q s0 s; \s0 s. Q' s0 s \ E s0 s\ + \ \P\,\R\ f \G\,\\_. Q\, \\_. E\" + unfolding validI_def validIE_def + by (clarsimp split: sum.splits) + +lemma validIE_validI: + "\P\,\R\ f \G\,\\_. Q\,\\_. Q\ \ \P\,\R\ f \G\,\\_. Q\" + unfolding validIE_def + by fastforce + +lemma validI_validIE_R: + "\P\,\R\ f \G\,\\_. Q\ \ \P\,\R\ f \G\,\\_. Q\,-" + by (rule rg_post_imp_dc2) + +lemma validI_validIE_E: + "\P\,\R\ f \G\,\\_. Q\ \ \P\,\R\ f \G\,-,\\_. Q\" + by (rule rg_post_imp_dc2E) + +lemma validIE_eq_validI: + "\P\,\R\ f \G\,\\rv. Q\,\\rv. Q\ = \P\,\R\ f \G\,\\rv. Q\" + by (simp add: validIE_def) + + +subsection \@{const liftM}\ + +(*FIXME: make proof nicer*) +lemma rg_liftM_subst: + "\P\,\R\ liftM f m \G\,\Q\ = \P\,\R\ m \G\,\Q \ f\" + apply (clarsimp simp: validI_def) + apply (rule conj_cong, clarsimp) + apply (rule iff_allI)+ + apply (clarsimp simp: liftM_def bind_def' return_def image_def) + apply safe + apply (drule_tac x="map_tmres id f res" in spec) + apply (case_tac res; clarsimp) + apply (auto simp: rely_def split: tmres.splits)[3] + apply (drule_tac x="map_tmres id f (Result (rv, s'))" in spec) + apply (auto simp: rely_def split: tmres.splits) + done + +lemma rg_liftME_subst: + "\P\,\R\ liftME f m \G\,\Q\, \E\ = \P\,\R\ m \G\,\Q \ f\,\E\" + unfolding validIE_def liftME_liftM rg_liftM_subst o_def + by (fastforce intro!: arg_cong[where f="validI P R m G"] split: sum.splits) + +lemma liftE_validIE[simp]: + "\P\,\R\ liftE f \G\,\Q\,\E\ = \P\,\R\ f \G\,\Q\" + by (simp add: liftE_liftM validIE_def rg_liftM_subst o_def) + + +subsection \Operator lifting/splitting\ + +lemma rg_vcg_if_split: + "\P \ \P'\,\R\ f \G\,\Q\; \P \ \P''\,\R\ g \G\,\Q\\ + \ \\s0 s. (P \ P' s0 s) \ (\P \ P'' s0 s)\,\R\ if P then f else g \G\,\Q\" + by simp + +lemma rg_vcg_if_splitE: + "\P \ \P'\,\R\ f \G\,\Q\,\E\; \P \ \P''\,\R\ g \G\,\Q\,\E\\ + \ \\s0 s. (P \ P' s0 s) \ (\P \ P'' s0 s)\,\R\ if P then f else g \G\,\Q\,\E\" + by simp + +lemma rg_vcg_split_case_option: + "\\x. x = None \ \P x\,\R\ f x \G\,\Q x\; \x y. x = Some y \ \P' x y\,\R\ g x y \G\,\Q x\\ + \ \\s0 s. (x = None \ P x s0 s) \ (\y. x = Some y \ P' x y s0 s)\,\R\ + case x of None \ f x | Some y \ g x y + \G\,\Q x\" + by (cases x; simp) + +lemma rg_vcg_split_case_optionE: + "\\x. x = None \ \P x\,\R\ f x \G\,\Q x\,\E x\; \x y. x = Some y \ \P' x y\,\R\ g x y \G\,\Q x\,\E x\\ + \ \\s0 s. (x = None \ P x s0 s) \ (\y. x = Some y \ P' x y s0 s)\,\R\ + case x of None \ f x | Some y \ g x y + \G\,\Q x\,\E x\" + by (cases x; simp) + +lemma rg_vcg_split_case_sum: + "\\x a. x = Inl a \ \P x a\,\R\ f x a \G\,\Q x\; \x b. x = Inr b \ \P' x b\,\R\ g x b \G\,\Q x\\ + \ \\s0 s. (\a. x = Inl a \ P x a s0 s) \ (\b. x = Inr b \ P' x b s0 s)\,\R\ + case x of Inl a \ f x a | Inr b \ g x b + \G\, \Q x\" + by (cases x; simp) + +lemma bind_twp_nobind: + "\\Q'\,\R\ g \G\,\Q\; \P\,\R\ f \G\,\\_. Q'\\ \ \P\,\R\ do f; g od \G\,\Q\" + by (erule bind_twp_fwd) clarsimp + +lemma bindE_twp_nobind: + "\\Q'\,\R\ g \G\,\Q\, \E\; \P\,\R\ f \G\,\\_. Q'\, \E\\ \ \P\,\R\ doE f; g odE \G\,\Q\, \E\" + by (erule bindE_twp_fwd) clarsimp + +lemmas bind_twp_skip = bind_twp[where Q=Q and Q'=Q for Q] + +lemma rg_chain: + "\\P\,\R\ f \G\,\Q\; \s0 s. P' s0 s \ P s0 s; \rv s0 s. Q rv s0 s \ S rv s0 s\ + \ \P'\,\R\ f \G\,\S\" + by (wp_pre, rule rg_post_imp) + +lemma rg_chainE: + "\\P'\,\R\ A \G\,\Q'\,\E'\; \s0 s. P s0 s \ P' s0 s; \rv s0 s. Q' rv s0 s \ Q rv s0 s; + \rv s0 s. E' rv s0 s \ E rv s0 s\ + \ \P\,\R\ A \G\,\Q\,\E\" + by wp_pre (rule rg_post_impE) + +lemma rg_vcg_conj_lift: + "\\P\,\R\ f \G\,\Q\; \P'\,\R\ f \G\,\Q'\\ + \ \\s0 s. P s0 s \ P' s0 s\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\" + unfolding validI_def + by fastforce + +\ \A variant which works nicely with subgoals that do not contain schematics\ +lemmas rg_vcg_conj_lift_pre_fix = rg_vcg_conj_lift[where P=P and P'=P for P, simplified] + +lemma rg_vcg_conj_liftE1: + "\\P\,\R\ f \G\,\Q\,-; \P'\,\R\ f \G\,\Q'\,\E\\ + \ \P and P'\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\,\E\" + unfolding validI_def validIE_def + by (fastforce simp: split_def split: sum.splits) + +lemma rg_vcg_conj_liftE2: + "\\P\,\R\ f \G\,-,\E\; \P'\,\R\ f \G\,\Q\,\E'\\ + \ \P and P'\,\R\ f \G\,\Q\,\\rv s0 s. E rv s0 s \ E' rv s0 s\" + unfolding validI_def validIE_def + by (fastforce simp: split_def split: sum.splits) + +lemma rg_vcg_conj_liftE_weaker: + assumes "\P\,\R\ f \G\,\Q\,\E\" + assumes "\P'\,\R\ f \G\,\Q'\,\E\" + shows "\\s0 s. P s0 s \ P' s0 s\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\,\E\" + apply (rule rg_pre) + apply (fastforce intro: assms rg_vcg_conj_liftE1 validE_validE_R rg_post_impE) + apply simp+ + done + +lemma rg_vcg_disj_lift: + "\\P\,\R\ f \G\,\Q\; \P'\,\R\ f \G\,\Q'\\ + \ \\s0 s. P s0 s \ P' s0 s\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\" + unfolding validI_def + by fastforce + +lemma rg_vcg_disj_lift_R: + assumes x: "\P\,\R\ f \G\,\Q\,-" + assumes y: "\P'\,\R\ f \G\,\Q'\,-" + shows "\\s0 s. P s0 s \ P' s0 s\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\,-" + using assms + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_vcg_const_Ball_lift: + "\\x. x \ S \ \P x\,\R\ f -,\Q x\; \P'\,\R\ f \G\,\\\\\\ + \ \\s0 s. (\x\S. P x s0 s) \ P' s0 s\,\R\ f \G\,\\rv s0 s. \x\S. Q x rv s0 s\" + by (fastforce simp: validI_def) + +lemma rg_vcg_const_Ball_liftE: + "\\x. x \ S \ \P x\,\R\ f -,\Q x\,\E\; \P'\,\R\ f \G\,-, \E\\ + \ \\s0 s. (\x\S. P x s0 s) \ P' s0 s\,\R\ f \G\,\\rv s0 s. \x\S. Q x rv s0 s\,\E\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemmas rg_vcg_const_Ball_liftE_R = rg_vcg_const_Ball_liftE[where E="\\\", simplified validIE_eq_validI] + +lemma rg_vcg_const_Ball_liftE_E: + "\\x. x \ S \ \P x\,\R\ f -,-,\E x\; \P'\,\R\ f \G\,\\\\\\ + \ \\s0 s.(\x\S. P x s0 s) \ P' s0 s\,\R\ f \G\,-,\\rv s0 s. \x \ S. E x rv s0 s\" + unfolding validIE_def + by (rule rg_strengthen_post) + (fastforce intro!: rg_vcg_const_Ball_lift split: sum.splits)+ + +lemmas rg_vcg_const_Ball_lift_T = rg_vcg_const_Ball_lift[where G="\\" and P'="\\", simplified] +lemmas rg_vcg_const_Ball_liftE_R_T = rg_vcg_const_Ball_liftE_R[where G="\\" and P'="\\", simplified] +lemmas rg_vcg_const_Ball_liftE_E_T = rg_vcg_const_Ball_liftE_E[where G="\\" and P'="\\", simplified] + +lemma rg_vcg_all_lift: + "\\x. \P x\,\R\ f \G\,\Q x\\ \ \\s0 s. \x. P x s0 s\,\R\ f \G\,\\rv s0 s. \x. Q x rv s0 s\" + by (fastforce simp: validI_def) + +lemma rg_vcg_all_liftE_R: + "\\x. \P x\,\R\ f \G\,\Q x\,\E\\ \ \\s0 s. \x. P x s0 s\,\R\ f \G\,\\rv s0 s. \x. Q x rv s0 s\,\E\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_vcg_all_liftE_E: + "\\x. \P x\,\R\ f \G\,\Q\,\E x\\ \ \\s0 s. \x. P x s0 s\,\R\ f \G\,\Q\,\\rv s0 s. \x. E x rv s0 s\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_vcg_imp_lift: + "\\P'\,\R\ f \G\,\\rv s0 s. \ P rv s0 s\; \Q'\,\R\ f \G\,\Q\\ + \ \\s0 s. P' s0 s \ Q' s0 s\,\R\ f \G\,\\rv s0 s. P rv s0 s \ Q rv s0 s\" + by (simp only: imp_conv_disj) (rule rg_vcg_disj_lift) + +lemma rg_vcg_imp_lift': + "\\P'\,\R\ f \G\,\\rv s0 s. \ P rv s0 s\; \Q'\,\R\ f \G\,\Q\\ + \ \\s0 s. \ P' s0 s \ Q' s0 s\,\R\ f \G\,\\rv s0 s. P rv s0 s \ Q rv s0 s\" + by (wpsimp wp: rg_vcg_imp_lift) + +lemma rg_vcg_imp_liftE: + "\\P'\,\R\ f \G\,\\rv s0 s. \ P rv s0 s\,\E\; \Q'\,\R\ f \G\,\Q\,\E\\ + \ \\s0 s. P' s0 s \ Q' s0 s\,\R\ f \G\,\\rv s0 s. P rv s0 s \ Q rv s0 s\,\E\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_vcg_imp_liftE': + "\\P'\,\R\ f \G\,\\rv s0 s. \ P rv s0 s\,\E\; \Q'\,\R\ f \G\,\Q\,\E\\ + \ \\s0 s. \ P' s0 s \ Q' s0 s\,\R\ f \G\,\\rv s0 s. P rv s0 s \ Q rv s0 s\,\E\" + by (wpsimp wp: rg_vcg_imp_liftE) + +lemma rg_vcg_imp_liftE_E: + "\\P'\,\R\ f \G\,\Q\,\\rv s0 s. \ P rv s0 s\; \Q'\,\R\ f \G\,\Q\,\E\\ + \ \\s0 s. P' s0 s \ Q' s0 s\,\R\ f \G\,\Q\,\\rv s0 s. P rv s0 s \ E rv s0 s\" + by (auto simp add: validI_def validIE_def split: sum.splits) + +lemma rg_vcg_imp_liftE_E': + "\\P'\,\R\ f \G\,\Q\,\\rv s0 s. \ P rv s0 s\; \Q'\,\R\ f \G\,\Q\,\E\\ + \ \\s0 s. \ P' s0 s \ Q' s0 s\,\R\ f \G\,\Q\,\\rv s0 s. P rv s0 s \ E rv s0 s\" + by (wpsimp wp: rg_vcg_imp_liftE_E) + +lemma rg_vcg_imp_conj_lift[wp_comb]: + "\\P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\; \P'\,\R\ f \G\,\\rv s0 s. (Q rv s0 s \ Q'' rv s0 s) \ Q''' rv s0 s\\ + \ \P and P'\,\R\ f \G\,\\rv s0 s. (Q rv s0 s \ Q' rv s0 s \ Q'' rv s0 s) \ Q''' rv s0 s\" + by (auto simp: validI_def) + +lemmas rg_vcg_imp_conj_lift'[wp_unsafe] = rg_vcg_imp_conj_lift[where Q'''="\\\", simplified] + +lemma rg_absorb_imp: + "\P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\ \ \P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\" + by (erule rg_post_imp[rotated], blast) + +lemma rg_weaken_imp: + "\\rv s0 s. Q rv s0 s \ Q' rv s0 s ; \P\,\R\ f \G\,\\rv s0 s. Q' rv s0 s \ S rv s0 s\\ + \ \P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ S rv s0 s\" + by (clarsimp simp: validI_def split_def) + +lemma rg_vcg_const_imp_lift: + "\P \ \P'\,\R\ f -,\Q\; \P''\,\R\ f \G\,\\\\\\ + \ \\s0 s. (P \ P' s0 s) \ P'' s0 s\,\R\ f \G\,\\rv s0 s. P \ Q rv s0 s\" + apply (cases P; simp) + apply wp_pre + apply (rule validI_split; assumption) + apply clarsimp+ + done + +lemma rg_vcg_const_imp_liftE_E: + "\P \ \P'\,\R\ f -,-,\E\; \P''\,\R\ f \G\,\\\\\\ + \ \\s0 s. (P \ P' s0 s) \ P'' s0 s\,\R\ f \G\,-,\\rv s0 s. P \ E rv s0 s\" + unfolding validIE_def + by (rule rg_strengthen_post) + (fastforce intro!: rg_vcg_const_imp_lift split: sum.splits)+ + +lemma rg_vcg_const_imp_liftE_R: + "\P \ \P'\,\R\ f -,\Q\,-; \P''\,\R\ f \G\,\\\\\\ + \ \\s0 s. (P \ P' s0 s) \ P'' s0 s\,\R\ f \G\,\\rv s0 s. P \ Q rv s0 s\,-" + unfolding validIE_def + by (rule rg_strengthen_post) + (fastforce intro!: rg_vcg_const_imp_lift split: sum.splits)+ + +(*FIXME MC: not clear whether we want these _T variants, and if we do whether they should be in the + wp set along with the above rules*) +lemmas rg_vcg_const_imp_lift_T = rg_vcg_const_imp_lift[where G="\\" and P''="\\", simplified] +lemmas rg_vcg_const_imp_liftE_E_T = rg_vcg_const_imp_liftE_E[where G="\\" and P''="\\", simplified] +lemmas rg_vcg_const_imp_liftE_R_T = rg_vcg_const_imp_liftE_R[where G="\\" and P''="\\", simplified] + +lemma rg_weak_lift_imp: + "\\P'\,\R\ f -,\Q\; \P''\,\R\ f \G\,\\\\\\ + \ \\s0 s. (P \ P' s0 s) \ P'' s0 s\,\R\ f \G\,\\rv s0 s. P \ Q rv s0 s\" + by (auto simp: validI_def split_def) + +lemma rg_weak_lift_impE: + "\\P'\,\R\ f -,\Q\,\E\; \P''\,\R\ f \G\,\\\\\\ + \ \\s0 s. (P \ P' s0 s) \ P'' s0 s\,\R\ f \G\,\\rv s0 s. P \ Q rv s0 s\,\\rv s0 s. P \ E rv s0 s\" + unfolding validIE_def + by (rule rg_strengthen_post) + (fastforce intro!: rg_weak_lift_imp split: sum.splits)+ + +lemma rg_weak_lift_impE_R: + "\\P'\,\R\ f -,\Q\,-; \P''\,\R\ f \G\,\\\\\\ + \ \\s0 s. (P \ P' s0 s) \ P'' s0 s\,\R\ f \G\,\\rv s0 s. P \ Q rv s0 s\,-" + unfolding validIE_def + by (rule rg_strengthen_post) + (fastforce intro!: rg_weak_lift_imp split: sum.splits)+ + +lemmas rg_weak_lift_imp_T = rg_weak_lift_imp[where G="\\" and P''="\\", simplified] +lemmas rg_weak_lift_impE_T = rg_weak_lift_impE[where G="\\" and P''="\\", simplified] +lemmas rg_weak_lift_impE_R_T = rg_weak_lift_impE_R[where G="\\" and P''="\\", simplified] + +lemma rg_vcg_ex_lift: + "\\x. \P x\,\R\ f \G\,\Q x\\ \ \\s0 s. \x. P x s0 s\,\R\ f \G\,\\rv s0 s. \x. Q x rv s0 s\" + by (clarsimp simp: validI_def, blast) + +lemma rg_vcg_ex_liftE: + "\\x. \P x\,\R\ f \G\,\Q x\,\E\\ \ \\s0 s. \x. P x s0 s\,\R\ f \G\,\\rv s0 s. \x. Q x rv s0 s\,\E\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_vcg_ex_liftE_E: + "\\x. \P x\,\R\ f \G\,-,\E x\\ \ \\s0 s. \x. P x s0 s\,\R\ f \G\,-,\\rv s0 s. \x. E x rv s0 s\" + by (fastforce simp: validIE_def validI_def split: sum.splits) + +lemma rg_vcg_ex_lift_R1: + "\\x. \P x\,\R\ f \G\,\Q\,-\ \ \\s0 s. \x. P x s0 s\,\R\ f \G\,\Q\,-" + by (fastforce simp: validI_def validIE_def split: sum.splits) + +lemma rg_liftP_ext: + assumes "\P x. m \R\,\G\,\\s0 s. P s0 (f s x)\" + shows "m \R\,\G\,\\s0 s. P s0 (f s)\" + unfolding validI_def + apply (rule conjI, rule validI_prefix_closed[OF assms]) + apply clarsimp + apply (rule conjI, clarsimp simp: rely_def, erule (2) validI_GD[OF assms]) + apply clarsimp + apply (erule subst2[rotated 2, where P=P]) + apply (drule use_validI, rule assms, rule refl) + apply simp + apply (rule ext) + apply (drule use_validI, rule assms, rule refl) + apply simp + done + +(* for instantiations *) +lemma rg_triv: "\P\,\R\f\G\,\Q\ \ \P\,\R\f\G\,\Q\" . +lemma rg_trivE: "\P\,\R\ f \G\,\Q\,\E\ \ \P\,\R\ f \G\,\Q\,\E\" . +lemma rg_trivE_R: "\P\,\R\ f \G\,\Q\,- \ \P\,\R\ f \G\,\Q\,-" . +lemma rg_trivR_R: "\P\,\R\ f \G\,-,\E\ \ \P\,\R\ f \G\,-,\E\" . + +lemma rg_vcg_E_conj: + "\\P\,\R\ f \G\,-,\E\; \P'\,\R\ f \G\,\Q\,\E'\\ + \ \\s0 s. P s0 s \ P' s0 s\,\R\ f \G\,\Q\,\\rv s0 s. E rv s0 s \ E' rv s0 s\" + unfolding validIE_def + by (rule rg_post_imp[OF _ rg_vcg_conj_lift]; simp split: sum.splits) + +lemma rg_vcg_E_elim: + "\\P\,\R\ f \G\,-,\E\; \P'\,\R\ f \G\,\Q\,-\ \ \\s0 s. P s0 s \ P' s0 s\,\R\ f \G\,\Q\,\E\" + by (rule rg_strengthen_postE[OF rg_vcg_E_conj]) simp+ + +lemma rg_strengthen_post_R: + "\ \P\,\R\ f \G\,\Q'\,-; \rv s0 s. Q' rv s0 s \ Q rv s0 s \ \ \P\,\R\ f \G\,\Q\,-" + by (erule rg_post_impE) + +lemma rg_strengthen_post_E: + "\ \P\,\R\ f \G\,-,\Q'\; \rv s0 s. Q' rv s0 s \ Q rv s0 s \ \ \P\,\R\ f \G\,-,\Q\" + by (rule rg_post_impE) + +lemma rg_post_comb_imp_conj: + "\\P'\,\R\ f \G\,\Q\; \P\,\R\ f \G\,\Q'\; \s0 s. P s0 s \ P' s0 s\ + \ \P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\" + by (wpsimp wp: rg_vcg_conj_lift) + +lemma rg_vcg_if_lift: + "\R\,\R\ f \G\,\\rv s0 s. (P \ X rv s0 s) \ (\P \ Y rv s0 s)\ \ + \R\,\R\ f \G\,\\rv s0 s. if P then X rv s0 s else Y rv s0 s\" + + "\R\,\R\ f \G\,\\rv s0 s. (P \ X rv s0 s) \ (\P \ Y rv s0 s)\ \ + \R\,\R\ f \G\,\\rv. if P then X rv else Y rv\" + by (auto simp: validI_def split_def) + +lemma rg_vcg_split_lift[wp]: + "\P\,\R\ f x y \G\,\Q\ \ \P\,\R\ case (x, y) of (a, b) \ f a b \G\,\Q\" + by simp + +named_theorems rg_vcg_op_lift +lemmas [rg_vcg_op_lift] = + rg_vcg_const_imp_lift + rg_vcg_const_imp_liftE_E + rg_vcg_const_imp_liftE_R + (* leaving out rg_vcg_conj_lift*, because that is built into wp *) + rg_vcg_disj_lift + rg_vcg_disj_lift_R + rg_vcg_ex_lift + rg_vcg_ex_liftE + rg_vcg_ex_liftE_E + rg_vcg_all_lift + rg_vcg_all_liftE_R + rg_vcg_all_liftE_E + rg_vcg_const_Ball_lift + rg_vcg_const_Ball_liftE + rg_vcg_const_Ball_liftE_R + rg_vcg_const_Ball_liftE_E + rg_vcg_split_lift + rg_vcg_if_lift + rg_vcg_imp_lift' + rg_vcg_imp_liftE' + rg_vcg_imp_liftE_E' + + +subsection \Weakest Precondition Rules\ + +lemma valid_validI_wp: + "\no_trace f; \s0. \P s0\ f \\v. Q v s0 \\ + \ \P\,\R\ f \G\,\Q\" + by (clarsimp simp: validI_valid_no_trace_eq) + +lemma validE_validIE_wp: + "\no_trace f; \s0. \P s0\ f \\v. Q v s0 \,\\v. E v s0\\ + \ \P\,\R\ f \G\,\Q\,\E\" + by (clarsimp simp: validIE_validE_no_trace_eq) + +lemmas valid_validI_lifts[wp] = no_trace_terminal[THEN valid_validI_wp] + +lemmas validE_validIE_lifts[wp] = + no_trace_fail[THEN validE_validIE_wp] no_trace_returnOk[THEN validE_validIE_wp] + no_trace_assertE[THEN validE_validIE_wp] no_trace_throwError[THEN validE_validIE_wp] + no_trace_throw_opt[THEN validE_validIE_wp] + +lemma liftE_twp: + "\P\,\R\ f \G\,\Q\ \ \P\,\R\ liftE f \G\,\Q\,\E\" + by simp + +lemma catch_twp: + "\ \x. \E x\,\R\ handler x \G\,\Q\; \P\,\R\ f \G\,\Q\,\E\ \ + \ \P\,\R\ catch f handler \G\,\Q\" + unfolding validI_def validIE_def + apply (rule conjI, clarsimp) + unfolding catch_def return_def rely_def bind_def + apply (fastforce simp: rely_cond_append guar_cond_append + split: sum.splits tmres.splits) + done + +lemma handleE'_twp: + "\ \x. \F x\,\R\ handler x \G\,\Q\,\E\; \P\,\R\ f \G\,\Q\,\F\ \ + \ \P\,\R\ f handler \G\,\Q\,\E\" + unfolding validI_def validIE_def + apply (rule conjI, clarsimp) + unfolding handleE'_def return_def rely_def bind_def + apply (fastforce simp: rely_cond_append guar_cond_append + split: sum.splits tmres.splits) + done + +lemma handleE_twp: + assumes x: "\x. \F x\,\R\ handler x \G\,\Q\,\E\" + assumes y: "\P\,\R\ f \G\,\Q\,\F\" + shows "\P\,\R\ f handler \G\,\Q\,\E\" + by (simp add: handleE_def handleE'_twp[OF x y]) + +lemma liftM_twp: + "\P\,\R\ m \G\,\Q \ f\ \ \P\,\R\ liftM f m \G\,\Q\" + by (simp add: rg_liftM_subst) + +lemma liftME_twp: + "\P\,\R\ m \G\,\Q \ f\,\E\ \ \P\,\R\ liftME f m \G\,\Q\,\E\" + by (simp add: rg_liftME_subst) + +lemma list_cases_twp: + assumes a: "\P_A\,\R\ a \G\,\Q\" + assumes b: "\x xs. ts = x#xs \ \P_B x xs\,\R\ b x xs \G\,\Q\" + shows "\case_list P_A P_B ts\,\R\ case ts of [] \ a | x # xs \ b x xs \G\,\Q\" + by (cases ts, auto simp: a b) + +lemma rg_vcg_handle_elseE: + "\ \P\,\R\ f \G\,\Q'\,\E'\; \e. \E' e\,\R\ g e \G\,\Q\,\E\; \x. \Q' x\,\R\ h x \G\,\Q\,\E\ \ + \ \P\,\R\ f g h \G\,\Q\,\E\" + unfolding handle_elseE_def validIE_def + by (wpsimp wp: bind_twp_fwd | assumption | rule conjI)+ + +lemma alternative_twp: + assumes x: "\P\,\R\ f \G\,\Q\" + assumes y: "\P'\,\R\ f' \G\,\Q\" + shows "\P and P'\,\R\ f \ f' \G\,\Q\" + unfolding validI_def + apply (rule conjI, fastforce simp: validI_prefix_closed[OF x] validI_prefix_closed[OF y]) + by (fastforce simp: alternative_def post_by_rg[OF x] post_by_rg[OF y] guar_by_rg[OF x] guar_by_rg[OF y]) + +lemma alternativeE_twp: + assumes "\P\,\R\ f \G\,\Q\,\E\" + assumes "\P'\,\R\ f' \G\,\Q\,\E\" + shows "\P and P'\,\R\ f \ f' \G\,\Q\,\E\" + unfolding validIE_def + by (wpsimp wp: assms alternative_twp | fold validIE_def)+ + +lemma condition_twp: + "\ \P\,\R\ A \G\,\Q\; \P'\,\R\ B \G\,\Q\ \ + \ \\s0 s. if C s then P s0 s else P' s0 s\,\R\ condition C A B \G\,\Q\" + by (auto simp: condition_def validI_def prefix_closed_def) + +lemma conditionE_twp: + "\ \P\,\R\ A \G\,\Q\,\E\; \P'\,\R\ B \G\,\Q\,\E\ \ + \ \\s0 s. if C s then P s0 s else P' s0 s\,\R\ condition C A B \G\,\Q\,\E\" + by (clarsimp simp: validIE_def condition_twp) + +lemma when_twp[wp_split]: + "\ P \ \P'\,\R\ f \G\,\Q\ \ \ \if P then P' else Q ()\,\R\ when P f \G\,\Q\" + unfolding when_def by wpsimp + +lemma unless_twp[wp_split]: + "(\P \ \P'\,\R\ f \G\,\Q\) \ \if P then Q () else P'\,\R\ unless P f \G\,\Q\" + unfolding unless_def by wpsimp (simp split: if_splits)+ + +lemma whenE_twp: + "(P \ \P'\,\R\ f \G\,\Q\,\E\) \ \if P then P' else Q ()\,\R\ whenE P f \G\,\Q\,\E\" + unfolding whenE_def by wpsimp + +lemma unlessE_twp: + "(\ P \ \P'\,\R\ f \G\,\Q\,\E\) \ \if P then Q () else P'\,\R\ unlessE P f \G\,\Q\,\E\" + unfolding unlessE_def by wpsimp + +lemma maybeM_twp: + "(\x. y = Some x \ \P x\,\R\ m x \G\,\Q\) \ + \\s0 s. (\x. y = Some x \ P x s0 s) \ (y = None \ Q () s0 s)\,\R\ maybeM m y \G\,\Q\" + unfolding maybeM_def by wpsimp (simp split: if_splits)+ + +lemma notM_twp: + "\P\,\R\ m \G\,\\c. Q (\ c)\ \ \P\,\R\ notM m \G\,\Q\" + unfolding notM_def by wpsimp + +lemma ifM_twp: + assumes [wp]: "\Q\,\R\ f \G\,\S\" "\Q'\,\R\ g \G\,\S\" + assumes [wp]: "\A\,\R\ P \G\,\\c s0 s. c \ Q s0 s\" "\B\,\R\ P \G\,\\c s0 s. \c \ Q' s0 s\" + shows "\A and B\,\R\ ifM P f g \G\,\S\" + unfolding ifM_def + by (wpsimp wp: rg_vcg_if_split rg_vcg_conj_lift) + +lemma andM_twp: + assumes [wp]: "\Q'\,\R\ B \G\,\Q\" + assumes [wp]: "\P\,\R\ A \G\,\\c s0 s. c \ Q' s0 s\" "\P'\,\R\ A \G\,\\c s0 s. \ c \ Q False s0 s\" + shows "\P and P'\,\R\ andM A B \G\,\Q\" + unfolding andM_def by (wpsimp wp: ifM_twp) + +lemma orM_twp: + assumes [wp]: "\Q'\,\R\ B \G\,\Q\" + assumes [wp]: "\P\,\R\ A \G\,\\c s0 s. c \ Q True s0 s\" "\P'\,\R\ A \G\,\\c s0 s. \ c \ Q' s0 s\" + shows "\P and P'\,\R\ orM A B \G\,\Q\" + unfolding orM_def by (wp ifM_twp) + +lemma whenM_twp: + assumes [wp]: "\Q\,\R\ f \G\,\S\" + assumes [wp]: "\A\,\R\ P \G\,\\c s0 s. c \ Q s0 s\" "\B\,\R\ P \G\,\\c s0 s. \c \ S () s0 s\" + shows "\A and B\,\R\ whenM P f \G\,\S\" + unfolding whenM_def by (wp ifM_twp) + +lemma rg_K_bind[wp_split]: + "\P\,\R\ f \G\,\Q\ \ \P\,\R\ K_bind f x \G\,\Q\" + by simp + +lemma validE_K_bind[wp_split]: + "\P\,\R\ x \G\,\Q\, \E\ \ \P\,\R\ K_bind x f \G\,\Q\,\E\" + by simp + +lemma rg_fun_app_twp: + "\P\,\R\ f' x \G\,\Q'\ \ \P\,\R\ f' $ x \G\,\Q'\" + "\P\,\R\ f x \G\,\Q\,\E\ \ \P\,\R\ f $ x \G\,\Q\,\E\" + by simp+ + +lemma case_option_twp: + "\ \x. \P x\,\R\ m x \G\,\Q\; \P'\,\R\ m' \G\,\Q\ \\ + \\s0 s. (x = None \ P' s0 s) \ (x \ None \ P (the x) s0 s)\,\R\ case_option m' m x \G\,\Q\" + by (cases x; simp) + +lemma case_option_twpE: + "\ \x. \P x\,\R\ m x \G\,\Q\,\E\; \P'\,\R\ m' \G\,\Q\,\E\ \ \ + \\s0 s. (x = None \ P' s0 s) \ (x \ None \ P (the x) s0 s)\,\R\ case_option m' m x \G\,\Q\,\E\" + by (cases x; simp) + +(* FIXME: make wp *) +lemma whenE_throwError_twp: + "\\s0 s. \P \ Q s0 s\,\R\ whenE P (throwError e) \G\,\\_. Q\,-" + by (simp add: whenE_def returnOk_def throwError_def return_def validIE_def validI_def prefix_closed_def) + +(*FIXME MC: not used, worth updating for validI or just delete? +lemma select_throwError_twp: + "\\s0 s. \x\S. Q x s0 s\,\R\ select S >>= throwError \G\,-,\Q\" + by (simp add: bind_def throwError_def return_def select_def validIE_def validI_def prefix_closed_def)*) + +(*FIXME MC: explore adding a rely_preserves definition for the first part of this precondition*) +lemma env_steps_twp[wp]: + "\\s0 s. (\s'. R\<^sup>*\<^sup>* s0 s' \ Q () s' s') \ Q () s0 s\,\R\ env_steps \G\,\Q\" + apply (simp add: env_steps_def) + apply wp + apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) + apply (drule rely_cond_rtranclp) + apply (clarsimp simp add: last_st_tr_def hd_append) + done + +lemma interference_twp[wp]: + "\\s0 s. (\s'. R\<^sup>*\<^sup>* s s' \ Q () s' s') \ G s0 s\,\R\ interference \G\,\Q\" + apply (simp add: interference_def commit_step_def del: put_trace.simps) + apply wp + apply clarsimp + apply (simp add: drop_Cons nat.split rely_cond_def guar_cond_def) + done + +(* what Await does if we haven't committed our step is a little + strange. this assumes we have, which means s0 = s. we should + revisit this if we find a use for Await when this isn't the + case *) +lemma Await_sync_twp: + "\\s0 s. s = s0 \ (\x. R\<^sup>*\<^sup>* s0 x \ c x \ Q () x x)\,\R\ Await c \G\,\Q\" + apply (simp add: Await_def split_def) + apply wp + apply clarsimp + apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) + apply (drule rely_cond_rtranclp) + apply (simp add: o_def) + done + + +subsection \Setting up the @{method wp} method\ + +(* Attempt to define triple_judgement to use valid_validI_wp as wp_comb rule. + It doesn't work. It seems that wp_comb rules cannot take more than 1 assumption *) +lemma validI_is_triple: + "\P\,\R\ f \G\,\Q\ + = triple_judgement (\(s0,s). prefix_closed f \ P s0 s) f + (\(s0,s) f. prefix_closed f \ (\tr res. (tr, res) \ rely f R s0 s + \ guar_cond G s0 tr + \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s')))" + apply (simp add: triple_judgement_def validI_def ) + apply (cases "prefix_closed f"; fastforce) + done + +lemma validIE_is_triple: + "\P\,\R\ f \G\,\Q\,\E\ + = triple_judgement (\(s0,s). prefix_closed f \ P s0 s) f + (\(s0,s) f. prefix_closed f \ (\tr res. (tr, res) \ rely f R s0 s + \ guar_cond G s0 tr + \ (\rv s'. res = Result (rv, s') + \ (case rv of Inr b \ Q b (last_st_tr tr s0) s' + | Inl a \ E a (last_st_tr tr s0) s'))))" + by (fastforce simp: validIE_def2 triple_judgement_def) + +lemmas rg_wp_combs = rg_vcg_conj_lift + +lemmas rg_wp_combsE = + rg_vcg_conj_liftE1 + rg_vcg_conj_liftE2 + rg_vcg_E_elim + +lemmas rg_wp_state_combsE = + validI_validIE_R + rg_vcg_conj_liftE1[OF validI_validIE_R] + rg_vcg_E_elim[OF validI_validIE_E] + rg_vcg_conj_liftE2[OF validI_validIE_E] + +lemmas rg_classic_wp_combs = rg_post_comb_imp_conj rg_weaken_pre rg_wp_combs +lemmas rg_classic_wp_combsE = rg_weaken_preE rg_wp_combsE + +lemmas rg_classic_wp_state_combsE = + rg_weaken_preE[OF validI_validIE] + rg_wp_state_combsE + +lemmas all_rg_classic_wp_combs = + rg_classic_wp_state_combsE + rg_classic_wp_combsE + rg_classic_wp_combs + +lemmas rg_wp_splits[wp_split] = + handleE'_twp handleE_twp + catch_twp rg_vcg_if_split rg_vcg_if_splitE + liftM_twp liftME_twp whenE_twp unlessE_twp + validIE_validI + +lemmas [wp_comb] = rg_wp_state_combsE rg_wp_combsE rg_wp_combs + +(* Add these rules to wp first to control when they are applied. We want them used last, only when + no other more specific wp rules apply. + bind_twp, bindE_twp and bind_twpE are wp rules instead of wp_split rules because + they should be used before other wp_split rules, and in combination with wp_comb rules when + necessary. + rg_vcg_prop is unsafe in certain circumstances but still useful to have applied automatically, + so we make it the very last rule to be tried. *) +lemmas [wp] = rg_vcg_prop bind_twp bindE_twp bind_twpE + +(* rules towards the bottom will be matched first *) +lemmas [wp] = twp_post_tauts + rg_fun_app_twp + liftE_twp + alternative_twp + alternativeE_twp + condition_twp + conditionE_twp + maybeM_twp notM_twp ifM_twp andM_twp orM_twp whenM_twp + +lemmas [wp_trip] = validI_is_triple validIE_is_triple + + +subsection \Simplifications on conjunction\ + +lemma rg_post_eq: + "\ Q = Q'; \P\,\R\ f \G\,\Q'\ \ \ \P\,\R\ f \G\,\Q\" + by simp + +lemma rg_post_eqE1: + "\ Q = Q'; \P\,\R\ f \G\,\Q'\,\E\ \ \ \P\,\R\ f \G\,\Q\,\E\" + by simp + +lemma rg_post_eqE2: + "\ E = E'; \P\,\R\ f \G\,\Q\,\E'\ \ \ \P\,\R\ f \G\,\Q\,\E\" + by simp + +lemma pred_conj_apply_elim': + "(\rv. Q rv and Q' rv) = (\rv s0 s. Q rv s0 s \ Q' rv s0 s)" + by (simp add: pred_conj_def) + +lemma pred_conj_conj_elim': + "(\rv s0 s. (Q rv and Q' rv) s0 s \ Q'' rv s0 s) = (\rv s0 s. Q rv s0 s \ Q' rv s0 s \ Q'' rv s0 s)" + by simp + +lemma conj_assoc_apply': + "(\rv s0 s. (Q rv s0 s \ Q' rv s0 s) \ Q'' rv s0 s) = (\rv s0 s. Q rv s0 s \ Q' rv s0 s \ Q'' rv s0 s)" + by simp + +lemma all_elim': + "(\rv s0 s. \x. P rv s0 s) = P" + by simp + +lemma all_conj_elim': + "(\rv s0 s. (\x. P rv s0 s) \ Q rv s0 s) = (\rv s0 s. P rv s0 s \ Q rv s0 s)" + by simp + +lemmas rg_vcg_rhs_simps = + pred_conj_apply_elim' pred_conj_conj_elim' conj_assoc_apply' all_elim' all_conj_elim' + +lemma if_apply_reductI: + "\P\,\R\ If P' (f x) (g x) \G\,\Q\ \ \P\,\R\ If P' f g x \G\,\Q\" + by (cases P'; simp) + +lemma if_apply_reductIE: + "\P\,\R\ If P' (f x) (g x) \G\,\Q\,\E\ \ \P\,\R\ If P' f g x \G\,\Q\,\E\" + by (cases P'; simp) + +lemmas rg_wp_simps[wp_split] = + rg_vcg_rhs_simps[THEN rg_post_eq] rg_vcg_rhs_simps[THEN rg_post_eqE1] + rg_vcg_rhs_simps[THEN rg_post_eqE2] + if_apply_reductI if_apply_reductIE TrueI + +schematic_goal rg_if_apply_test: + "\?Q\,\R\ (if A then returnOk else K fail) x \G\,\P\,\E\" + by wpsimp + +lemma rg_elim_pred_conj: + "\P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\ \ \P\,\R\ f \G\,\\rv. Q rv and Q' rv\" + by (unfold pred_conj_def) + +lemma rg_elim_pred_conjE1: + "\P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\,\E\ \ \P\,\R\ f \G\,\\rv. Q rv and Q' rv\,\E\" + by (unfold pred_conj_def) + +lemma rg_elim_pred_conjE2: + "\P\,\R\ f \G\,\Q\,\\rv s0 s. E rv s0 s \ E' rv s0 s\ \ \P\,\R\ f \G\,\Q\,\\rv. E rv and E' rv\" + by (unfold pred_conj_def) + +lemmas rg_wp_pred_conj_elims = + rg_elim_pred_conj rg_elim_pred_conjE1 rg_elim_pred_conjE2 + + +subsection \Bundles\ + +bundle no_rg_pre = rg_pre [wp_pre del] + +bundle classic_twp_pre = rg_pre [wp_pre del] + all_rg_classic_wp_combs[wp_comb del] all_rg_classic_wp_combs[wp_comb] + + +text \Miscellaneous lemmas on rg quintuples\ + +lemma rg_pre_cases: + "\ \\s0 s. C s0 s \ P s0 s\,\R\ f \G\,\Q\; \\s0 s. \C s0 s \ P' s0 s\,\R\ f \G\,\Q\ \ + \ \P and P'\,\R\ f \G\,\Q\" + unfolding validI_def by fastforce + +lemma rg_vcg_mp: + "\ \P\,\R\ f \G\,\Q\; \P\,\R\ f \G\,\\r s0 s. Q r s0 s \ Q' r s0 s\ \ \ \P\,\R\ f \G\,\Q'\" + by (auto simp: validI_def) + +(* note about this precond stuff: rules get a chance to bind directly + before any of their combined forms. As a result, these precondition + implication rules are only used when needed. *) +lemma rg_add_post: + "\ \P'\,\R\ f \G\,\Q'\; \s0 s. P s0 s \ P' s0 s; \P\,\R\ f \G\,\\rv s0 s. Q' rv s0 s \ Q rv s0 s\ \ + \ \P\,\R\ f \G\,\Q\" + unfolding validI_def by fastforce + +lemma rg_gen_asmE: + "\P \ \P'\,\R\ f \G\,\Q\,\E\; \ P \ prefix_closed f\ + \ \P' and (\_ _. P)\,\R\ f \G\,\Q\,\E\" + by (simp add: validIE_def validI_def) blast + +lemma rg_list_case: + "\ \P1\,\R\ f f1 \G\,\Q\; \y ys. xs = y#ys \ \P2 y ys\,\R\ f (f2 y ys) \G\,\Q\ \ \ + \case xs of [] \ P1 | y#ys \ P2 y ys\,\R\ f (case xs of [] \ f1 | y#ys \ f2 y ys) \G\,\Q\" + by (cases xs; simp) + +lemma rg_use_eq: + assumes "\P. \\s0 s. P (f s)\,\R\ m \G\,\\_ s0 s. P (f s)\" + assumes "\f. \\s0 s. P f s0 s\,\R\ m \G\,\\_ s0 s. Q f s0 s\" + shows "\\s0 s. P (f s) s0 s\,\R\ m \G\,\\_ s0 s. Q (f s) s0 s \" + apply (rule rg_post_imp[where Q'="\_ s0 s. \y. y = f s \ Q y s0 s"], simp) + apply (wpsimp wp: rg_vcg_ex_lift assms) + done + +lemma rg_validE_pred_conj: + "\ \P\,\R\ f \G\,\Q\,\E\; \P\,\R\ f \G\,\Q'\,\E\ \ \ \P\,\R\ f \G\,\Q and Q'\,\E\" + unfolding validI_def validIE_def + by (simp split: sum.splits) + +lemma rg_validE_conj: + "\ \P\,\R\ f \G\,\Q\,\E\; \P\,\R\ f \G\,\Q'\,\E\ \ \ \P\,\R\ f \G\,\\rv s0 s. Q rv s0 s \ Q' rv s0 s\,\E\" + unfolding validI_def validIE_def + by (simp split: sum.splits) + +lemma rg_drop_imp: + "\P\,\R\ f \G\,\Q\ \ \P\,\R\ f \G\,\\rv s0 s. Q' rv s0 s \ Q rv s0 s\" + by (auto simp: validI_def) + +lemma rg_drop_impE: + "\\P\,\R\ f \G\,\\r. Q\, \E\\ \ \P\,\R\ f \G\,\\rv s0 s. Q' rv s0 s \ Q s0 s\, \E\" + by (simp add: rg_chainE) + +lemma rg_drop_impE_E: + "\P\,\R\ f \G\,\Q\,\E\ \ \P\,\R\ f \G\,\Q\, \\rv s0 s. E' rv s0 s \ E rv s0 s\" + by (auto simp: validIE_def validI_def split: sum.splits) + +lemmas rg_drop_imps = rg_drop_imp rg_drop_impE rg_drop_impE_E + +(*This is unsafe, but can be very useful when supplied as a comb rule.*) +lemma rg_drop_imp_conj[wp_unsafe]: + "\ \P\,\R\ f \G\,\Q'\; \P'\,\R\ f \G\,\\rv s0 s. (Q rv s0 s \ Q'' rv s0 s) \ Q''' rv s0 s\ \ \ + \P and P'\,\R\ f \G\,\\rv s0 s. (Q rv s0 s \ Q' rv s0 s \ Q'' rv s0 s) \ Q''' rv s0 s\" + by (auto simp: validI_def) + +lemmas rg_drop_imp_conj'[wp_unsafe] = rg_drop_imp_conj[where Q'''="\\\", simplified] + +lemma rg_vcg_set_pred_lift: + assumes "\P x. m \R\,\G\,\ \s0 s. P (f x s0 s) \" + shows "m \R\,\G\,\ \s0 s. P {x. f x s0 s} \" + using assms[where P="\x . x"] assms[where P=Not] + by (fastforce simp: validI_def elim!: subst[rotated, where P=P]) + +(*If there is a use case which requires a specific guarantee then this rule could be extended with + an extra assumption and precondition.*) +lemma rg_vcg_set_pred_lift_mono: + assumes f: "\x. m \R\,-,\ f x \" + assumes mono: "\A B. A \ B \ P A \ P B" + shows "m \R\,-,\ \s0 s. P {x. f x s0 s} \" + by (fastforce simp: validI_def validI_prefix_closed[OF f] elim!: mono[rotated] + dest: use_validI[OF _ f]) + +text \If a function contains an @{term assert}, or equivalent, then it might be + possible to strengthen the precondition of an already-proven rg quintuple + @{text pos}, by additionally proving a side condition @{text neg}, that + violating some condition causes failure. The stronger rg quintuple produced + by this theorem allows the precondition to assume that the condition is + satisfied.\ +lemma rg_strengthen_pre_via_assert_forward: + assumes pos: "\ P \,\R\ f \G\,\ Q \" + assumes rel: "\s0 s. S s0 s \ P s0 s \ N s0 s" + assumes neg: "\ N \,\R\ f \G\,\ \\\ \" + shows "\ S \,\R\ f \G\,\ Q \" + apply (rule rg_weaken_pre) + apply (rule rg_strengthen_post) + apply (rule rg_vcg_disj_lift[OF pos neg]) + by (auto simp: rel) + +text \Like @{thm rg_strengthen_pre_via_assert_forward}, strengthen a precondition + by proving a side condition that the negation of that condition would cause + failure. This version is intended for backward reasoning. Apply it to a goal to + obtain a stronger precondition after proving the side condition.\ +lemma rg_strengthen_pre_via_assert_backward: + assumes neg: "\ \s0 s. \ E s0 s \,\R\ f \G\,\ \\\ \" + assumes pos: "\ P and E \,\R\ f \G\,\ Q \" + shows "\ P \,\R\ f \G\,\ Q \" + by (rule rg_strengthen_pre_via_assert_forward[OF pos _ neg], simp) + + +subsection \Strongest postcondition rules\ + +lemma get_tsp: + "\P\,\R\ get \G\,\\rv s0 s. s = rv \ P s0 s\" + by (simp add: get_def validI_def prefix_closed_def) + +lemma put_tsp: + "\\\\,\R\ put a \G\,\\_ s0 s. s = a\" + by (simp add: put_def validI_def prefix_closed_def) + +lemma return_tsp: + "\P\,\R\ return a \G\,\\rv s0 s. rv = a \ P s0 s\" + by (simp add:return_def validI_def prefix_closed_def) + +lemma rg_return_tsp: (* FIXME lib: eliminate *) + "\P\,\R\ return x \G\,\\rv. P and (\_ _. rv = x)\" + by (simp add: validI_def return_def prefix_closed_def) + +lemma assert_tsp: + "\P\,\R\ assert Q \G\,\\_ s0 s. P s0 s \ Q\" + by (simp add: assert_def fail_def return_def validI_def prefix_closed_def) + +lemma rg_gets_tsp: + "\P\,\R\ gets f \G\,\\rv s0 s. rv = f s \ P s0 s\" + by (simp add: validI_def simpler_gets_def prefix_closed_def) + +lemma rg_returnOk_tsp: + "\P\,\R\ returnOk x \G\,\\rv s0 s. rv = x \ P s0 s\, \Q\" + by (simp add: validI_def validIE_def returnOk_def return_def prefix_closed_def) + +\ \For forward reasoning in RG proofs, these lemmas allow us to step over the + left-hand-side of monadic bind, while keeping the same precondition.\ + +named_theorems forward_inv_step_rules + +lemmas rg_forward_inv_step_nobind[forward_inv_step_rules] = + bind_twp_nobind[where Q'=P and P=P for P, rotated] + +lemmas bind_twp_fwd_skip[forward_inv_step_rules] = + bind_twp_fwd[where Q'="\_. P" and P=P for P] + +lemmas rg_forward_inv_step_nobindE_valid[forward_inv_step_rules] = + bindE_twp_nobind[where Q'=P and P=P and E="\_. Q" and Q="\_. Q" for P Q, + simplified validIE_eq_validI, rotated] + +lemmas rg_forward_inv_step_valid[forward_inv_step_rules] = + bindE_twp_fwd[where Q'="\_. P" and P=P and E="\_. Q" and Q="\_. Q" for P Q, + simplified validIE_eq_validI] + +lemmas rg_forward_inv_step_nobindE[forward_inv_step_rules] = + bindE_twp_nobind[where Q'=P and P=P for P, rotated] + +lemmas bindE_twp_fwd_skip[forward_inv_step_rules] = + bindE_twp_fwd[where Q'="\_. P" and P=P for P] + +method forward_inv_step uses wp simp = + rule forward_inv_step_rules, solves \wpsimp wp: wp simp: simp\ + + +subsection \FIXME MC: look at these lemmas and work out where they should go / what this section should be called\ + +lemma mres_union: + "mres (a \ b) = mres a \ mres b" + by (simp add: mres_def image_Un) + +lemma mres_Failed_empty: + "mres ((\xs. (xs, Failed)) ` X ) = {}" + "mres ((\xs. (xs, Incomplete)) ` X ) = {}" + by (auto simp add: mres_def image_def) + +lemma det_set_option_eq: + "(\a\m. set_option (snd a)) = {(r, s')} \ + (ts, Some (rr, ss)) \ m \ rr = r \ ss = s'" + by (metis UN_I option.set_intros prod.inject singleton_iff snd_conv) + +lemma det_set_option_eq': + "(\a\m. set_option (snd a)) = {(r, s')} \ + Some (r, s') \ snd ` m" + using image_iff by fastforce + +lemma validI_name_pre: + "prefix_closed f \ + (\s0 s. P s0 s \ \\s0' s'. s0' = s0 \ s' = s\,\R\ f \G\,\Q\) + \ \P\,\R\ f \G\,\Q\" + unfolding validI_def + by metis + +lemma validI_well_behaved': + "\prefix_closed f; \P\,\R'\ f \G'\,\Q\; R \ R'; G' \ G\ + \ \P\,\R\ f \G\,\Q\" + apply (subst validI_def, clarsimp) + apply (clarsimp simp add: rely_def) + apply (drule (2) validI_D) + apply (fastforce simp: rely_cond_def guar_cond_def)+ + done + +lemmas validI_well_behaved = validI_well_behaved'[unfolded le_fun_def, simplified] + +lemmas bind_promote_If = + if_distrib[where f="\f. bind f g" for g] + if_distrib[where f="\g. bind f g" for f] + +lemma bind_promote_If2: + "do x \ f; if P then g x else h x od + = (if P then bind f g else bind f h)" + by simp + +lemma exec_put_trace[unfolded K_bind_def]: + "(do put_trace xs; f od) s + = (\n \ {n. 0 < n \ n \ length xs}. {(drop n xs, Incomplete)}) + \ ((\(a, b). (a @ xs, b)) ` f s)" + apply (simp add: put_trace_eq_drop bind_def) + apply (safe; (clarsimp split: if_split_asm)?; + fastforce intro: bexI[where x=0] rev_bexI) + done + +lemma UN_If_distrib: + "(\x \ S. if P x then A x else B x) + = ((\x \ S \ {x. P x}. A x) \ (\x \ S \ {x. \ P x}. B x))" + by (fastforce split: if_split_asm) + +lemma Await_redef: + "Await P = do + s1 \ select {s. P s}; + env_steps; + s \ get; + select (if P s then {()} else {}) + od" + apply (simp add: Await_def env_steps_def bind_assoc) + apply (cases "{s. P s} = {}") + apply (simp add: select_def bind_def get_def) + apply (rule ext) + apply (simp add: exec_get select_bind_UN put_then_get_then) + apply (simp add: bind_promote_If2 if_distribR if_distrib[where f=select]) + apply (simp add: exec_put_trace cong: if_cong) + apply (simp add: put_def bind_def select_def cong: if_cong) + apply (strengthen equalityI) + apply clarsimp + apply (strengthen exI[where x="s # xs" for s xs]) + apply (strengthen exI[where x="Suc n" for n]) + apply simp + apply blast + done + +lemma eq_Me_neq_Env: + "(x = Me) = (x \ Env)" + by (cases x; simp) + +lemma validI_invariant_imp: + assumes v: "\P\,\R\ f \G\,\Q\" + and P: "\s0 s. P s0 s \ I s0" + and R: "\s0 s. I s0 \ R s0 s \ I s" + and G: "\s0 s. I s0 \ G s0 s \ I s" + shows "\P\,\R\ f \\s0 s. I s0 \ I s \ G s0 s\,\\rv s0 s. I s0 \ Q rv s0 s\" +proof - + { fix tr s0 i + assume r: "rely_cond R s0 tr" and g: "guar_cond G s0 tr" + and I: "I s0" + hence imp: "\(_, s, s') \ trace_steps (rev tr) s0. I s \ I s'" + apply (clarsimp simp: guar_cond_def rely_cond_def) + apply (drule(1) bspec)+ + apply (clarsimp simp: eq_Me_neq_Env) + apply (metis R G) + done + hence "i < length tr \ I (snd (rev tr ! i))" + using I + apply (induct i) + apply (clarsimp simp: neq_Nil_conv[where xs="rev tr" for tr, simplified]) + apply clarsimp + apply (drule bspec, fastforce simp add: trace_steps_nth) + apply simp + done + } + note I = this + show ?thesis + using v + apply (clarsimp simp: validI_def rely_def imp_conjL) + apply (drule spec2, drule(1) mp)+ + apply clarsimp + apply (frule P[rule_format]) + apply (clarsimp simp: guar_cond_def trace_steps_nth I last_st_tr_def + hd_append last_rev[symmetric] + last_conv_nth rev_map) + done +qed + +lemma validI_guar_post_conj_lift: + "\\P\,\R\ f \G1\,\Q1\; \P\,\R\ f \G2\,\Q2\\ + \ \P\,\R\ f \\s0 s. G1 s0 s \ G2 s0 s\,\\rv s0 s. Q1 rv s0 s \ Q2 rv s0 s\" + apply (frule validI_prefix_closed) + apply (subst validI_def, clarsimp simp: rely_def) + apply (drule(3) validI_D)+ + apply (auto simp: guar_cond_def) + done + +lemma validI_valid_wp: + "\\P\,\\\\ f -,\\rv _ s. Q rv s\\ + \ \P s0\ f \Q\" + by (auto simp: rely_def validI_def valid_def mres_def) + +lemma validI_triv_valid_eq: + "prefix_closed f \ \P\,\\\\ f -,\\rv _ s. Q rv s\ = (\s0. \\s. P s0 s\ f \Q\)" + by (fastforce simp: rely_def validI_def valid_def mres_def image_def) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_Reader_Option.thy b/lib/Monads/trace/Trace_Reader_Option.thy new file mode 100644 index 0000000000..27f59281d0 --- /dev/null +++ b/lib/Monads/trace/Trace_Reader_Option.thy @@ -0,0 +1,219 @@ +(* + * Copyright 2024, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Reader option monad syntax plus the connection between the reader option monad and the trace monad *) + +theory Trace_Reader_Option +imports + Trace_No_Fail + Reader_Option_VCG +begin + +(* FIXME: remove this syntax, standardise on do {..} instead *) +(* Syntax defined here so we can reuse Trace_Monad definitions *) +syntax + "_doO" :: "[dobinds, 'a] => 'a" ("(DO (_);// (_)//OD)" 100) + +translations + "_doO (_dobinds b bs) e" == "_doO b (_doO bs e)" + "_doO (_nobind b) e" == "b |>> (CONST K_bind e)" + "DO x <- a; e OD" == "a |>> (\x. e)" + + +lemma ovalid_K_bind_wp[wp]: + "ovalid P f Q \ ovalid P (K_bind f x) Q" + by simp + +lemma ovalidNF_K_bind_wp[wp]: + "ovalidNF P f Q \ ovalidNF P (K_bind f x) Q" + by simp + +lemma no_ofail_K_bind[wp]: + "no_ofail P f \ no_ofail P (K_bind f x)" + by simp + +lemma no_ofail_gets_the_eq: + "no_ofail P f \ no_fail P (gets_the (f :: ('s, 'a) lookup))" + by (auto simp: no_ofail_def no_fail_def gets_the_def gets_def + get_def assert_opt_def bind_def return_def fail_def + split: option.split) + +lemmas no_ofail_gets_the = + no_ofail_gets_the_eq[THEN iffD1] + + +(* Lemmas relating ovalid and valid *) +lemma ovalid_gets_the: + "ovalid P f Q \ \P\ gets_the f \Q\" + apply wpsimp + apply (fastforce dest: use_ovalid) + done + + +lemmas monad_simps = + gets_the_def bind_def assert_def assert_opt_def + simpler_gets_def fail_def return_def + +lemma gets_the_opt_map: + "gets_the (f |> g) = do x \ gets_the f; assert_opt (g x) od" + by (rule ext) (simp add: monad_simps opt_map_def split: option.splits) + +lemma gets_the_opt_o: + "gets_the (f |> Some o g) = do x \ gets_the f; return (g x) od" + by (simp add: gets_the_opt_map assert_opt_Some) + +lemma gets_the_obind: + "gets_the (f |>> g) = gets_the f >>= (\x. gets_the (g x))" + by (rule ext) (simp add: monad_simps obind_def split: option.splits) + +lemma gets_the_return: + "gets_the (oreturn x) = return x" + by (simp add: monad_simps oreturn_def) + +lemma gets_the_fail: + "gets_the ofail = fail" + by (simp add: monad_simps ofail_def) + +lemma gets_the_ogets: + "gets_the (ogets s) = gets s" + by (clarsimp simp: monad_simps ogets_def) + +lemma gets_the_returnOk: + "gets_the (oreturnOk x) = returnOk x" + by (simp add: monad_simps oreturnOk_def returnOk_def) + +lemma gets_the_throwError: + "gets_the (othrow e) = throwError e" + by (simp add: monad_simps othrow_def throwError_def) + +lemma gets_the_assert: + "gets_the (oassert P) = assert P" + by (simp add: oassert_def assert_def gets_the_fail gets_the_return) + +lemma gets_the_assert_opt: + "gets_the (oassert_opt P) = assert_opt P" + by (simp add: oassert_opt_def assert_opt_def gets_the_return gets_the_fail split: option.splits) + +lemma gets_the_if_distrib: + "gets_the (if P then f else g) = (if P then gets_the f else gets_the g)" + by simp + +lemma gets_the_oapply_comp: + "gets_the (oapply x \ f) = gets_map f x" + by (fastforce simp: gets_map_def gets_the_def o_def gets_def) + +lemma gets_the_Some: + "gets_the (\_. Some x) = return x" + by (simp add: gets_the_def assert_opt_Some) + +lemma gets_the_oapply2_comp: + "gets_the (oapply2 y x \ f) = gets_map (swp f y) x" + by (clarsimp simp: gets_map_def gets_the_def o_def gets_def) + +lemma gets_obind_bind_eq: + "(gets (f |>> (\x. g x))) = + (gets f >>= (\x. case x of None \ return None | Some y \ gets (g y)))" + by (auto simp: simpler_gets_def bind_def obind_def return_def split: option.splits) + +lemma mres_assert_opt: + "mres (assert_opt opt s) = (if opt = None then {} else {(the opt,s)})" + by (clarsimp simp: assert_opt_def fail_def return_def mres_def vimage_def split: option.split) + + +lemmas omonad_simps [simp] = + gets_the_opt_map assert_opt_Some gets_the_obind + gets_the_return gets_the_fail gets_the_returnOk + gets_the_throwError gets_the_assert gets_the_Some + gets_the_oapply_comp + + +section "Relation between option monad loops and trace monad loops." + +(* Option monad whileLoop formalisation thanks to Lars Noschinski . *) + +lemma gets_the_conv: + "(gets_the B s) = (case B s of Some r' \ ({([], Result (r', s))}) | _ \ {([], Failed)})" + by (auto simp: gets_the_def gets_def get_def bind_def return_def fail_def assert_opt_def split: option.splits) + +lemma gets_the_loop_terminates: + "whileLoop_terminates C (\a. gets_the (B a)) r s + \ (\rs'. (Some r, rs') \ option_while' (\a. C a s) (\a. B a s))" (is "?L \ ?R") +proof + assume ?L then show ?R + proof (induct rule: whileLoop_terminates.induct[case_names 1 2]) + case (2 r s) then show ?case + by (cases "B r s") (auto simp: gets_the_conv intro: option_while'.intros) + qed (auto intro: option_while'.intros) +next + assume ?R then show ?L + proof (elim exE) + fix rs' assume "(Some r, rs') \ option_while' (\a. C a s) (\a. B a s)" + then have "whileLoop_terminates C (\a. gets_the (B a)) (the (Some r)) s" + by induct (auto intro: whileLoop_terminates.intros simp: gets_the_conv) + then show ?thesis by simp + qed +qed + +lemma The_eqD: + "\The P = x; \!x. P x\ \ P x" + by (metis theI) + +lemma gets_the_whileLoop: + fixes C :: "'a \ 's \ bool" + assumes terminates: "\s. whileLoop_terminates C (\a. gets_the (B a)) r s" + shows "whileLoop C (\a. gets_the (B a)) r = gets_the (owhile C B r)" +proof - + { fix r s r' s' ts assume "((r,s), ts, Result (r', s')) \ whileLoop_results C (\a. gets_the (B a))" + then have "s = s' \ ts = [] \(Some r, Some r') \ option_while' (\a. C a s) (\a. B a s)" + by (induct r s ts "Result (r', s')") + (auto intro: option_while'.intros simp: gets_the_conv split: option.splits) } + note wl'_Result = this + + { fix r s ts assume "((r,s), ts, Failed) \ whileLoop_results C (\a. gets_the (B a))" + then have "ts = [] \ (Some r, None) \ option_while' (\a. C a s) (\a. B a s)" + by (induct r s ts "Failed :: (('s, 'a) tmres)") + (auto intro: option_while'.intros simp: gets_the_conv split: option.splits) } + note wl'_Failed = this + + { fix r s ts assume "((r,s), ts, Incomplete) \ whileLoop_results C (\a. gets_the (B a))" + then have "False" + by (induct r s ts "Incomplete :: (('s, 'a) tmres)") + (auto intro: option_while'.intros simp: gets_the_conv split: option.splits) } + note wl'_Incomplete = this + + { fix r s r' assume "(Some r, Some r') \ option_while' (\a. C a s) (\a. B a s)" + then have "((r,s), [], Result (r',s)) \ whileLoop_results C (\a. gets_the (B a))" + by (induct "Some r" "Some r'" arbitrary: r) + (auto intro: whileLoop_results.intros simp: gets_the_conv) } + note option_while'_Some = this + + { fix r s assume "(Some r, None) \ option_while' (\a. C a s) (\a. B a s)" + then have "((r,s), [], Failed) \ whileLoop_results C (\a. gets_the (B a))" + by (induct "Some r" "None :: 'a option" arbitrary: r) + (auto intro: whileLoop_results.intros simp: gets_the_conv) } + note option_while'_None = this + + have "\s. owhile C B r s = None + \ whileLoop C (\a. gets_the (B a)) r s = {([], Failed)}" + using terminates + by (intro subset_antisym; + clarsimp simp: whileLoop_def owhile_def option_while_def gets_the_loop_terminates + split: if_split_asm intro!: option_while'_None dest!: The_eqD) + (case_tac b, auto dest!: wl'_Result wl'_Failed wl'_Incomplete option_while'_inj option_while'_THE) + moreover + have "\s r'. owhile C B r s = Some r' + \ whileLoop C (\a. gets_the (B a)) r s = {([], Result (r', s))}" + by (intro subset_antisym; + clarsimp simp: whileLoop_def owhile_def option_while_def option_while'_THE + split: if_split_asm intro!: option_while'_Some) + (case_tac b, auto dest: wl'_Result wl'_Failed wl'_Incomplete option_while'_inj) + ultimately + show ?thesis + by (auto simp: fun_eq_iff gets_the_conv split: option.split) +qed + +end diff --git a/lib/Monads/trace/Trace_Sat.thy b/lib/Monads/trace/Trace_Sat.thy new file mode 100644 index 0000000000..af8fcc8ce7 --- /dev/null +++ b/lib/Monads/trace/Trace_Sat.thy @@ -0,0 +1,155 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Sat + imports + Trace_Monad + WPSimp +begin + +section \Satisfiability\ + +text \ + The dual to validity: an existential instead of a universal quantifier for the post condition. + In refinement, it is often sufficient to know that there is one state that satisfies a condition.\ +definition exs_valid :: + "('a \ bool) \ ('a, 'b) tmonad \ ('b \ 'a \ bool) \ bool" + ("\_\ _ \\_\") where + "\P\ f \\Q\ \ \s. P s \ (\(rv, s') \ mres (f s). Q rv s')" + +text \The above for the exception monad\ +definition ex_exs_validE :: + "('a \ bool) \ ('a, 'e + 'b) tmonad \ ('b \ 'a \ bool) \ ('e \ 'a \ bool) \ bool" + ("\_\ _ \\_\, \_\") where + "\P\ f \\Q\, \E\ \ \P\ f \\\rv. case rv of Inl e \ E e | Inr v \ Q v\" + +text \ + Seen as predicate transformer, @{const exs_valid} is the so-called conjugate wp in the literature, + i.e. with + @{term "wp f Q \ \s. mres (f s) \ {(rv,s). Q rv s}"} and + @{term "cwp f Q \ not (wp f (not Q))"}, we get + @{prop "valid P f Q = (\s. P s \ wp f Q s)"} and + @{prop "exs_valid P f Q = (\s. P s \ cwp f Q s)"}. + + See also "Predicate Calculus and Program Semantics" by E. W. Dijkstra and C. S. Scholten.\ +experiment +begin + +definition + "wp f Q \ \s. mres (f s) \ {(rv,s). Q rv s}" + +definition + "cwp f Q \ not (wp f (not Q))" + +lemma + "exs_valid P f Q = (\s. P s \ cwp f Q s)" + unfolding exs_valid_def cwp_def wp_def by auto + +end + + +subsection \Set up for @{method wp}\ + +definition exs_postcondition where + "exs_postcondition P f \ \a b. \(rv, s) \ f a b. P rv s" + +lemma exs_valid_is_triple[wp_trip]: + "exs_valid P f Q = triple_judgement P f (exs_postcondition Q (\s f. mres (f s)))" + by (simp add: triple_judgement_def exs_postcondition_def exs_valid_def) + + +subsection \Rules\ + +lemma exs_hoare_post_imp: + "\\r s. Q r s \ R r s; \P\ a \\Q\\ \ \P\ a \\R\" + unfolding exs_valid_def by blast + +lemma use_exs_valid: + "\ \P\ f \\Q\; P s \ \ \(r, s') \ mres (f s). Q r s'" + by (simp add: exs_valid_def) + +lemma exs_valid_weaken_pre[wp_pre]: + "\ \P'\ f \\Q\; \s. P s \ P' s \ \ \P\ f \\Q\" + by (clarsimp simp: exs_valid_def) + +lemma exs_valid_chain: + "\ \P\ f \\Q\; \s. R s \ P s; \r s. Q r s \ S r s \ \ \R\ f \\S\" + by (fastforce simp: exs_valid_def Bex_def) + +lemma exs_valid_assume_pre: + "\ \s. P s \ \P\ f \\Q\ \ \ \P\ f \\Q\" + by (fastforce simp: exs_valid_def) + +lemma exs_valid_bind[wp_split]: + "\ \rv. \B rv\ g rv \\C\; \A\ f \\B\ \ \ \A\ f >>= (\rv. g rv) \\C\" + apply atomize + apply (clarsimp simp: exs_valid_def bind_def' mres_def) + apply (drule spec, drule(1) mp, clarsimp) + apply (drule spec2, drule(1) mp, clarsimp) + apply (simp add: image_def bex_Un) + apply (strengthen subst[where P="\x. x \ f s" for s, mk_strg I _ E], simp) + apply (fastforce elim: rev_bexI) + done + +lemma exs_valid_return[wp]: + "\Q v\ return v \\Q\" + by (clarsimp simp: exs_valid_def return_def mres_def) + +lemma exs_valid_select[wp]: + "\\s. \r \ S. Q r s\ select S \\Q\" + by (auto simp: exs_valid_def select_def mres_def image_def) + +lemma exs_valid_alt[wp]: + "\ \P\ f \\Q\; \P'\ g \\Q\ \ \ \P or P'\ f \ g \\Q\" + by (fastforce simp: exs_valid_def alternative_def mres_def image_def) + +lemma exs_valid_get[wp]: + "\\s. Q s s\ get \\ Q \" + by (clarsimp simp: exs_valid_def get_def mres_def) + +lemma exs_valid_gets[wp]: + "\\s. Q (f s) s\ gets f \\Q\" + by (clarsimp simp: gets_def) wp + +lemma exs_valid_put[wp]: + "\Q v\ put v \\Q\" + by (clarsimp simp: put_def exs_valid_def mres_def) + +lemma exs_valid_fail[wp]: + "\\s. False\ fail \\Q\" + unfolding fail_def exs_valid_def + by simp + +lemma exs_valid_assert[wp]: + "\\s. Q () s \ G\ assert G \\Q\" + unfolding assert_def + by (wpsimp | rule conjI)+ + +lemma exs_valid_state_assert[wp]: + "\\s. Q () s \ G s\ state_assert G \\Q\" + unfolding state_assert_def + by wp + +lemmas exs_valid_guard = exs_valid_state_assert + +lemma exs_valid_condition[wp]: + "\ \P\ l \\Q\; \P'\ r \\Q\ \ \ \\s. (C s \ P s) \ (\ C s \ P' s)\ condition C l r \\Q\" + by (clarsimp simp: condition_def exs_valid_def split: sum.splits) + +lemma gets_exs_valid: + "\(=) s\ gets f \\\r. (=) s\" + by (rule exs_valid_gets) + +lemma exs_valid_assert_opt[wp]: + "\\s. \x. G = Some x \ Q x s\ assert_opt G \\Q\" + by (clarsimp simp: assert_opt_def exs_valid_def return_def mres_def) + +lemma gets_the_exs_valid[wp]: + "\\s. \x. h s = Some x \ Q x s\ gets_the h \\Q\" + by (wpsimp simp: gets_the_def) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_Strengthen_Setup.thy b/lib/Monads/trace/Trace_Strengthen_Setup.thy new file mode 100644 index 0000000000..2dbeaff83a --- /dev/null +++ b/lib/Monads/trace/Trace_Strengthen_Setup.thy @@ -0,0 +1,82 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Strengthen_Setup + imports + Strengthen + Trace_No_Fail + Trace_RG +begin + +section \Strengthen setup.\ + +context strengthen_implementation begin + +lemma strengthen_hoare[strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\) (\P\ f \R\)" + by (cases F, auto elim: hoare_strengthen_post) + +lemma strengthen_validE_R_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" + by (cases F, auto intro: hoare_strengthen_postE_R) + +lemma strengthen_validE_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" + by (cases F, auto elim: hoare_strengthen_postE) + +lemma strengthen_validE_E_cong[strg]: + "\\r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" + by (cases F, auto elim: hoare_strengthen_postE simp: validE_E_def) + +lemma strengthen_validI[strg]: + "\\r s0 s. st F (\) (Q r s0 s) (Q' r s0 s)\ + \ st F (\) (\P\,\G\ f \R\,\Q\) (\P\,\G\ f \R\,\Q'\)" + by (cases F, auto elim: rg_strengthen_post) + +lemma wpfix_strengthen_hoare: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\) (\P'\ f \Q'\)" + by (cases F, auto elim: hoare_chain) + +lemma wpfix_strengthen_validE_R_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\, -) (\P'\ f \Q'\, -)" + by (cases F, auto elim: hoare_chainE simp: validE_R_def) + +lemma wpfix_strengthen_validE_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (R r s); + \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P'\ f \R\, \T\)" + by (cases F, auto elim: hoare_chainE) + +lemma wpfix_strengthen_validE_E_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P'\ f -, \T\)" + by (cases F, auto elim: hoare_chainE simp: validE_E_def) + +lemma wpfix_no_fail_cong: + "\\s. st (\ F) (\) (P s) (P' s)\ + \ st F (\) (no_fail P f) (no_fail P' f)" + by (cases F, auto elim: no_fail_pre) + +lemmas nondet_wpfix_strgs = + wpfix_strengthen_validE_R_cong + wpfix_strengthen_validE_E_cong + wpfix_strengthen_validE_cong + wpfix_strengthen_hoare + wpfix_no_fail_cong + +end + +lemmas nondet_wpfix_strgs[wp_fix_strgs] + = strengthen_implementation.nondet_wpfix_strgs + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_Total.thy b/lib/Monads/trace/Trace_Total.thy new file mode 100644 index 0000000000..cbeb009491 --- /dev/null +++ b/lib/Monads/trace/Trace_Total.thy @@ -0,0 +1,353 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Total correctness Hoare logic for the Trace_Monad (= valid + no_fail) *) + +theory Trace_Total + imports Trace_No_Fail +begin + +section \Total correctness for @{text Trace_Monad} and @{text Trace_Monad} with exceptions\ + +subsection Definitions + +text \ + It is often desired to prove non-failure and a Hoare triple simultaneously, as the reasoning + is often similar. The following definitions allow such reasoning to take place.\ + +definition validNF :: + "('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\!") where + "\P\ f \Q\! \ \P\ f \Q\ \ no_fail P f" + +lemma validNF_alt_def: + "\P\ f \Q\! = (\s. P s \ ((\(r', s') \ mres (f s). Q r' s') \ \ failed (f s)))" + by (auto simp: validNF_def valid_def no_fail_def) + +definition validE_NF :: + "('s \ bool) \ ('s, 'a + 'b) tmonad \ ('b \ 's \ bool) \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /(\_\,/ \_\!)") where + "\P\ f \Q\, \E\! \ \P\ f \Q\, \E\ \ no_fail P f" + +lemma validE_NF_alt_def: + "\P\ f \Q\, \E\! = \P\ f \\v s. case v of Inl e \ E e s | Inr r \ Q r s\!" + by (clarsimp simp: validE_NF_def validE_def validNF_def) + + +subsection \@{method wpc} setup\ + +lemma wpc_helper_validNF: + "\Q\ g \S\! \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\!" + unfolding wpc_helper_def + by clarsimp (metis hoare_weaken_pre no_fail_pre validNF_def) + +wpc_setup "\m. \P\ m \Q\!" wpc_helper_validNF + + +subsection \Basic @{const validNF} theorems\ + +lemma validNF_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \!) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \!" + by (fastforce simp: valid_def validNF_def no_fail_def mres_def image_def + split: prod.splits) + +lemma validE_NF_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \!) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') + \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \!" + by (fastforce simp: validE_NF_def validE_def valid_def no_fail_def mres_def image_def + split: prod.splits sum.splits) + +lemma validNF_conjD1: + "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q \!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_conjD2: + "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q' \!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF[intro?]: (* FIXME lib: should be validNFI *) + "\ \ P \ f \ Q \; no_fail P f \ \ \ P \ f \ Q \!" + by (clarsimp simp: validNF_def) + +lemma validNFE: + "\ \ P \ f \ Q \!; \ \ P \ f \ Q \; no_fail P f \ \ R \ \ R" + by (clarsimp simp: validNF_def) + +lemma validNF_valid: + "\ \ P \ f \ Q \! \ \ \ P \ f \ Q \" + by (erule validNFE) + +lemma validNF_no_fail: + "\ \ P \ f \ Q \! \ \ no_fail P f" + by (erule validNFE) + +lemma validNF_not_failed: + "\ \ P \ f \ Q \!; P s \ \ \ failed (f s)" + by (clarsimp simp: validNF_def no_fail_def) + +lemma use_validNF: + "\ (r', s') \ mres (f s); \ P \ f \ Q \!; P s \ \ Q r' s'" + by (fastforce simp: validNF_def valid_def) + + +subsection \@{const validNF} weakest precondition rules\ + +lemma validNF_return[wp]: + "\ P x \ return x \ P \!" + by (wp validNF)+ + +lemma validNF_get[wp]: + "\ \s. P s s \ get \ P \!" + by (wp validNF)+ + +lemma validNF_put[wp]: + "\ \s. P () x \ put x \ P \!" + by (wp validNF)+ + +lemma validNF_K_bind[wp]: + "\ P \ x \ Q \! \ \ P \ K_bind x f \ Q \!" + by simp + +lemma validNF_fail[wp]: + "\ \s. False \ fail \ Q \!" + by (clarsimp simp: validNF_def fail_def no_fail_def) + +lemma validNF_prop[wp_unsafe]: + "\ no_fail (\s. P) f \ \ \ \s. P \ f \ \rv s. P \!" + by (wp validNF)+ + +lemma validNF_post_conj[intro!]: + "\ \ P \ a \ Q \!; \ P \ a \ R \! \ \ \ P \ a \ Q and R \!" + by (auto simp: validNF_def) + +lemma validNF_pre_disj[intro!]: + "\ \ P \ a \ R \!; \ Q \ a \ R \! \ \ \ P or Q \ a \ R \!" + by (rule validNF) (auto dest: validNF_valid validNF_no_fail intro: no_fail_or) + +text \ + Set up combination rules for @{method wp}, which also requires a @{text wp_trip} rule for + @{const validNF}.\ +definition validNF_property :: "('a \ 's \ bool) \ 's \ ('s,'a) tmonad \ bool" where + "validNF_property Q s b \ \ failed (b s) \ (\(r', s') \ mres (b s). Q r' s')" + +lemma validNF_is_triple[wp_trip]: + "validNF P f Q = triple_judgement P f (validNF_property Q)" + by (auto simp: validNF_def triple_judgement_def validNF_property_def no_fail_def valid_def) + +lemma validNF_weaken_pre[wp_pre]: + "\\Q\ a \R\!; \s. P s \ Q s\ \ \P\ a \R\!" + by (metis hoare_pre_imp no_fail_pre validNF_def) + +lemma validNF_post_comb_imp_conj: + "\ \P'\ f \Q\!; \P\ f \Q'\!; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def) + +lemma validNF_post_comb_conj_L: + "\ \P'\ f \Q\!; \P\ f \Q'\ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_post_comb_conj_R: + "\ \P'\ f \Q\; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_post_comb_conj: + "\ \P'\ f \Q\!; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_if_split[wp_split]: + "\P \ \Q\ f \S\!; \ P \ \R\ g \S\!\ \ + \\s. (P \ Q s) \ (\ P \ R s)\ if P then f else g \S\!" + by simp + +lemma validNF_vcg_conj_lift: + "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce intro!: validNF_post_conj[unfolded pred_conj_def] intro: validNF_weaken_pre) + +lemma validNF_vcg_disj_lift: + "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" + by (auto simp: validNF_def no_fail_def intro!: hoare_vcg_disj_lift) + +lemma validNF_vcg_all_lift[wp]: + "\ \x. \P x\ f \Q x\! \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\!" + by (auto simp: validNF_def no_fail_def intro!: hoare_vcg_all_lift) + +lemma validNF_bind_wp[wp_split]: + "\ \x. \B x\ g x \C\!; \A\ f \B\! \ \ \A\ do x \ f; g x od \C\!" + unfolding validNF_def + by (auto intro: bind_wp no_fail_bind[where P=Q and Q=Q for Q, simplified]) + +lemmas validNF_bind_wp_fwd = validNF_bind_wp[rotated] + + +subsection "validNF compound rules" + +lemma validNF_state_assert[wp]: + "\ \s. P () s \ G s \ state_assert G \ P \!" + by (rule validNF; wpsimp) + +lemma validNF_modify[wp]: + "\ \s. P () (f s) \ modify f \ P \!" + by (rule validNF; wpsimp) + +lemma validNF_gets[wp]: + "\\s. P (f s) s\ gets f \P\!" + by (rule validNF; wpsimp) + +lemma validNF_condition[wp]: + "\ \ Q \ A \P\!; \ R \ B \P\!\ \ \\s. if C s then Q s else R s\ condition C A B \P\!" + by (erule validNFE)+ + (rule validNF; wpsimp) + +lemma validNF_assert[wp]: + "\ (\s. P) and (R ()) \ assert P \ R \!" + by (rule validNF; wpsimp) + +lemma validNF_false_pre: + "\ \_. False \ P \ Q \!" + by (rule validNF; wpsimp) + +lemma validNF_chain: + "\\P'\ a \R'\!; \s. P s \ P' s; \r s. R' r s \ R r s\ \ \P\ a \R\!" + by (fastforce simp: validNF_def valid_def no_fail_def Ball_def) + +lemma validNF_case_prod[wp]: + "\\x y. \P x y\ B x y \Q\!\ \ \case v of (x, y) \ P x y\ case v of (x, y) \ B x y \Q\!" + by (metis prod.exhaust split_conv) + +lemma validE_NF_case_prod[wp]: + "\ \a b. \P a b\ f a b \Q\, \E\! \ \ + \case x of (a, b) \ P a b\ case x of (a, b) \ f a b \Q\, \E\!" + unfolding validE_NF_alt_def + by (erule validNF_case_prod) + +lemma no_fail_is_validNF_True: + "no_fail P s = (\ P \ s \ \_ _. True \!)" + by (clarsimp simp: no_fail_def validNF_def valid_def) + + +subsection \@{const validNF} reasoning in the exception monad\ + +lemma validE_NF[intro?]: + "\ \ P \ f \ Q \,\ E \; no_fail P f \ \ \ P \ f \ Q \,\ E \!" + by (clarsimp simp: validE_NF_def) + +lemma validE_NFE: + "\ \ P \ f \ Q \,\ E \!; \ \ P \ f \ Q \,\ E \; no_fail P f \ \ R \ \ R" + by (clarsimp simp: validE_NF_def) + +lemma validE_NF_valid: + "\ \ P \ f \ Q \,\ E \! \ \ \ P \ f \ Q \,\ E \" + by (rule validE_NFE) + +lemma validE_NF_no_fail: + "\ \ P \ f \ Q \,\ E \! \ \ no_fail P f" + by (rule validE_NFE) + +lemma validE_NF_weaken_pre[wp_pre]: + "\\Q\ a \R\,\E\!; \s. P s \ Q s\ \ \P\ a \R\,\E\!" + by (simp add: validE_NF_alt_def validNF_weaken_pre) + +lemma validE_NF_post_comb_conj_L: + "\ \P\ f \Q\, \E\!; \P'\ f \Q'\, \\_ _. True\ \ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def + by (fastforce simp: validE_def validNF_def valid_def no_fail_def split: sum.splits) + +lemma validE_NF_post_comb_conj_R: + "\ \P\ f \Q\, \\_ _. True\; \P'\ f \Q'\, \E\! \ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def validE_def validNF_def valid_def no_fail_def + by (force split: sum.splits) + +lemma validE_NF_post_comb_conj: + "\ \P\ f \Q\, \E\!; \P'\ f \Q'\, \E\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def validE_def validNF_def valid_def no_fail_def + by (force split: sum.splits) + +lemma validE_NF_chain: + "\ \P'\ a \R'\,\E'\!; \s. P s \ P' s; \r' s'. R' r' s' \ R r' s'; + \r'' s''. E' r'' s'' \ E r'' s''\ \ + \\s. P s \ a \\r' s'. R r' s'\,\\r'' s''. E r'' s''\!" + by (fastforce simp: validE_NF_def validE_def2 no_fail_def Ball_def split: sum.splits) + +lemma validE_NF_bind_wp[wp]: + "\\x. \B x\ g x \C\, \E\!; \A\ f \B\, \E\!\ \ \A\ f >>=E (\x. g x) \C\, \E\!" + by (blast intro: validE_NF bindE_wp no_fail_pre no_fail_bindE validE_validE_R hoare_chainE + elim!: validE_NFE) + +lemma validNF_catch[wp]: + "\\x. \E x\ handler x \Q\!; \P\ f \Q\, \E\!\ \ \P\ f (\x. handler x) \Q\!" + unfolding validE_NF_alt_def catch_def lift_def throwError_def + by (clarsimp simp: validNF_return split: sum.splits elim!: validNF_bind_wp_fwd) + +lemma validNF_throwError[wp]: + "\E e\ throwError e \P\, \E\!" + by (unfold validE_NF_alt_def throwError_def o_def) wpsimp + +lemma validNF_returnOk[wp]: + "\P e\ returnOk e \P\, \E\!" + by (clarsimp simp: validE_NF_alt_def returnOk_def) wpsimp + +lemma validNF_whenE[wp]: + "(P \ \Q\ f \R\, \E\!) \ \if P then Q else R ()\ whenE P f \R\, \E\!" + unfolding whenE_def by wpsimp + +lemma validNF_nobindE[wp]: + "\ \B\ g \C\,\E\!; \A\ f \\r s. B s\,\E\! \ \ \A\ doE f; g odE \C\,\E\!" + by wpsimp + +text \ + Set up triple rules for @{term validE_NF} so that we can use @{method wp} combinator rules.\ +definition validE_NF_property :: + "('a \ 's \ bool) \ ('c \ 's \ bool) \ 's \ ('s, 'c+'a) tmonad \ bool" + where + "validE_NF_property Q E s b \ + \ failed (b s) \ (\(r', s') \ mres (b s). case r' of Inl x \ E x s' | Inr x \ Q x s')" + +lemma validE_NF_is_triple[wp_trip]: + "validE_NF P f Q E = triple_judgement P f (validE_NF_property Q E)" + by (fastforce simp: validE_NF_def validE_def2 no_fail_def triple_judgement_def + validE_NF_property_def + split: sum.splits) + +lemma validNF_cong: + "\ \s. P s = P' s; \s. P s \ m s = m' s; + \r' s' s. \ P s; (r', s') \ mres (m s) \ \ Q r' s' = Q' r' s' \ \ + (\P\ m \Q\!) = (\P'\ m' \Q'\!)" + by (fastforce simp: validNF_alt_def) + +lemma validE_NF_liftE[wp]: + "\P\ f \Q\! \ \P\ liftE f \Q\,\E\!" + by (wpsimp simp: validE_NF_alt_def liftE_def) + +lemma validE_NF_handleE'[wp]: + "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ + \P\ f (\x. handler x) \Q\,\E\!" + unfolding validE_NF_alt_def handleE'_def + apply (erule validNF_bind_wp_fwd) + apply (clarsimp split: sum.splits) + apply wpsimp + done + +lemma validE_NF_handleE[wp]: + "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ + \P\ f handler \Q\,\E\!" + unfolding handleE_def + by (metis validE_NF_handleE') + +lemma validE_NF_condition[wp]: + "\ \ Q \ A \P\,\ E \!; \ R \ B \P\,\ E \!\ \ + \\s. if C s then Q s else R s\ condition C A B \P\,\ E \!" + by (erule validE_NFE)+ (wpsimp wp: validE_NF) + +lemma hoare_assume_preNF: + "(\s. P s \ \P\ f \Q\!) \ \P\ f \Q\!" + by (simp add: validNF_alt_def) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_VCG.thy b/lib/Monads/trace/Trace_VCG.thy new file mode 100644 index 0000000000..25bd787b85 --- /dev/null +++ b/lib/Monads/trace/Trace_VCG.thy @@ -0,0 +1,1447 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_VCG + imports + Trace_Lemmas + WPSimp +begin + +section \Hoare Logic\ + +subsection \Validity\ + +text \ + This section defines a Hoare logic for partial correctness for + the interference trace monad as well as the exception monad. + The logic talks only about the behaviour part of the monad and ignores + failures and the trace. + + The logic is defined semantically. Rules work directly on the + validity predicate. + + In the interference trace monad, validity is a triple of precondition, + monad, and postcondition. The precondition is a function from state to + bool (a state predicate), the postcondition is a function from return value + to state to bool. A triple is valid if for all states that satisfy the + precondition, all result values and result states that are returned by + the monad satisfy the postcondition. Note that if the result of the + computation is the empty set then the triple is trivially valid. This means + @{term "assert P"} does not require us to prove that @{term P} holds, but + rather allows us to assume @{term P}! Proving non-failure is done via a + separate predicate and calculus (see @{text Trace_No_Fail}).\ +definition valid :: + "('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\") where + "\P\ f \Q\ \ \s. P s \ (\(r,s') \ mres (f s). Q r s')" + +text \ + We often reason about invariant predicates. The following provides shorthand syntax + that avoids repeating potentially long predicates.\ +abbreviation (input) invariant :: "('s,'a) tmonad \ ('s \ bool) \ bool" ("_ \_\" [59,0] 60) where + "invariant f P \ \P\ f \\_. P\" + +text \ + Validity for the exception monad is similar and build on the standard + validity above. Instead of one postcondition, we have two: one for + normal and one for exceptional results.\ +definition validE :: + "('s \ bool) \ ('s, 'a + 'b) tmonad \ ('b \ 's \ bool) \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /(\_\,/ \_\)" ) where + "\P\ f \Q\,\E\ \ \P\ f \ \v s. case v of Inr r \ Q r s | Inl e \ E e s \" + +lemma validE_def2: + "\P\ f \Q\,\E\ \ \s. P s \ (\(r,s') \ mres (f s). case r of Inr b \ Q b s' | Inl a \ E a s')" + by (unfold valid_def validE_def) + +text \ + The following two instantiations are convenient to separate reasoning for exceptional and + normal case.\ +(* Narrator: they are in fact not convenient, and are now considered a mistake that should have + been an abbreviation instead. *) +definition validE_R :: (* FIXME lib: this should be an abbreviation *) + "('s \ bool) \ ('s, 'e + 'a) tmonad \ ('a \ 's \ bool) \ bool" ("\_\/ _ /\_\, -") where + "\P\ f \Q\,- \ validE P f Q (\x y. True)" + +definition validE_E :: (* FIXME lib: this should be an abbreviation *) + "('s \ bool) \ ('s, 'e + 'a) tmonad \ ('e \ 's \ bool) \ bool" ("\_\/ _ /-, \_\") where + "\P\ f -,\Q\ \ validE P f (\x y. True) Q" + +(* These lemmas are useful to apply to rules to convert valid rules into a format suitable for wp. *) +lemma valid_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \" + by (auto simp add: valid_def split: prod.splits) + +lemma validE_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') + \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \" + by (auto simp add: validE_def valid_def split: prod.splits sum.splits) + + +section \Pre Lemmas\ + +lemma hoare_pre_imp: + "\ \s. P s \ Q s; \Q\ a \R\ \ \ \P\ a \R\" + by (fastforce simp: valid_def) + +lemmas hoare_weaken_pre = hoare_pre_imp[rotated] + +lemma hoare_weaken_preE: + "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" + by (fastforce simp: validE_def2) + +lemma hoare_weaken_preE_R: (* FIXME lib: rename to hoare_weaken_preE_R *) + "\ \P'\ f \Q\,-; \s. P s \ P' s \ \ \P\ f \Q\,-" + unfolding validE_R_def + by (rule hoare_weaken_preE) + +lemma hoare_weaken_preE_E: + "\ \P'\ f -,\Q\; \s. P s \ P' s \ \ \P\ f -,\Q\" + by (fastforce simp: validE_E_def validE_def valid_def) + +lemmas hoare_pre [wp_pre] = + hoare_weaken_pre + hoare_weaken_preE + hoare_weaken_preE_R + hoare_weaken_preE_E + + +subsection \Setting up the precondition case splitter.\ + +lemma wpc_helper_valid: + "\Q\ g \S\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validE: + "\Q\ f \R\,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,\E\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validE_R: + "\Q\ f \R\,- \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,-" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validR_R: + "\Q\ f -,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f -,\E\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + + +wpc_setup "\m. \P\ m \Q\" wpc_helper_valid +wpc_setup "\m. \P\ m \Q\,\E\" wpc_helper_validE +wpc_setup "\m. \P\ m \Q\,-" wpc_helper_validE_R +wpc_setup "\m. \P\ m -,\E\" wpc_helper_validR_R + + +subsection "Hoare Logic Rules" + +lemma bind_wp[wp_split]: + "\ \r. \Q' r\ g r \Q\; \P\ f \Q'\ \ \ \P\ f >>= (\rv. g rv) \Q\" + by (fastforce simp: valid_def bind_def' mres_def intro: image_eqI[rotated]) + +lemma bindE_wp[wp_split]: + "\ \r. \Q' r\ g r \Q\,\E\; \P\ f \Q'\,\E\ \ \ \P\ f >>=E (\rv. g rv) \Q\,\E\" + by (fastforce simp: validE_def2 bindE_def bind_def throwError_def return_def lift_def mres_def image_def + split: sum.splits tmres.splits) + +lemma bindE_R_wp: + "\\r. \Q' r\ g r \Q\,-; \P\ f \Q'\,-\ \ \P\ f >>=E (\rv. g rv) \Q\,-" + apply (clarsimp simp: validE_R_def) + by (wp | assumption)+ + +lemma bindE_E_wp: + "\\r. \Q' r\ g r -,\E\; \P\ f \Q'\,\E\\ \ \P\ f >>=E (\rv. g rv) -,\E\" + apply (clarsimp simp: validE_E_def) + by (wp | assumption)+ + +lemmas bind_wp_fwd = bind_wp[rotated] +lemmas bindE_wp_fwd = bindE_wp[rotated] + +lemma bind_wpE_R: + "\\x. \Q' x\ g x \Q\,-; \P\ f \Q'\\ \ \P\ f >>= g \Q\,-" + apply (clarsimp simp: validE_R_def validE_def) + by (wp | assumption)+ + +lemma bind_wpE_E: + "\\x. \Q' x\ g x -,\E\; \P\ f \Q'\\ \ \P\ f >>= g -,\E\" + apply (clarsimp simp: validE_E_def validE_def) + by (wp | assumption)+ + +lemma bind_wpE: + "\\x. \Q' x\ g x \Q\,\E\; \P\ f \Q'\\ \ \P\ f >>= g \Q\,\E\" + apply (clarsimp simp: validE_def) + by (wp | assumption)+ + +lemma hoare_TrueI: + "\P\ f \\_. \\" + by (simp add: valid_def) + +lemma hoareE_TrueI: + "\P\ f \\_. \\, \\_. \\" + by (simp add: validE_def valid_def) + +lemma hoareE_R_TrueI: + "\P\ f \\_. \\, -" + by (auto simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoareE_E_TrueI: + "\P\ f -,\\_. \\" + by (auto simp: validE_E_def validE_def valid_def split: sum.splits) + +lemmas wp_post_taut = hoare_TrueI[where P=\] +lemmas wp_post_tautE = hoareE_TrueI[where P=\] +lemmas wp_post_tautE_R = hoareE_R_TrueI[where P=\] +lemmas wp_post_tautE_E = hoareE_E_TrueI[where P=\] +lemmas wp_post_tauts[intro] = wp_post_taut wp_post_tautE wp_post_tautE_R wp_post_tautE_E + +lemma hoare_post_conj[intro]: + "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \Q and R\" + by (fastforce simp: valid_def) + +lemma hoare_pre_disj[intro]: + "\ \P\ f \R\; \Q\ f \R\ \ \ \P or Q\ f \R\" + by (simp add:valid_def pred_disj_def) + +lemma hoare_conj: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \P and P'\ f \Q and Q'\" + unfolding valid_def by auto + +lemma hoare_pre_cont[simp]: + "\\\ f \P\" + by (simp add:valid_def) + +lemma hoare_FalseE[simp]: + "\\\ f \Q\, \E\" + by (simp add: valid_def validE_def) + +lemma hoare_return_drop_var[iff]: + "\Q\ return x \\r. Q\" + by (simp add: valid_def return_def mres_def) + +lemma hoare_gets[intro]: + "\ \s. P s \ Q (f s) s \ \ \P\ gets f \Q\" + by (simp add:valid_def gets_def get_def bind_def return_def mres_def) + +lemma hoare_modifyE_var: + "\ \s. P s \ Q (f s) \ \ \P\ modify f \\_ s. Q s\" + by(simp add: valid_def modify_def put_def get_def bind_def mres_def) + +lemma hoare_if: + "\ P \ \Q\ a \R\; \ P \ \Q\ b \R\ \ \ \Q\ if P then a else b \R\" + by (simp add: valid_def) + +lemma hoare_pre_subst: + "\ A = B; \A\ a \C\ \ \ \B\ a \C\" + by (erule subst) + +lemma hoare_post_subst: + "\ B = C; \A\ a \B\ \ \ \A\ a \C\" + by (erule subst) + +lemma hoare_post_imp: + "\ \rv s. Q rv s \ R rv s; \P\ a \Q\ \ \ \P\ a \R\" + by(fastforce simp:valid_def split_def) + +lemma hoare_post_impE: + "\ \rv s. Q rv s \ R rv s; \e s. E e s \ F e s; \P\ a \Q\,\E\ \ \ \P\ a \R\,\F\" + by(fastforce simp: validE_def2 split: sum.splits) + +lemmas hoare_strengthen_post = hoare_post_imp[rotated] +lemmas hoare_strengthen_postE = hoare_post_impE[rotated 2] + +lemma hoare_strengthen_postE_R: + "\ \P\ f \Q'\,-; \rv s. Q' rv s \ Q rv s \ \ \P\ f \Q\,-" + unfolding validE_R_def + by (erule hoare_post_impE) + +lemma hoare_strengthen_postE_E: + "\ \P\ f -,\Q'\; \rv s. Q' rv s \ Q rv s \ \ \P\ f -,\Q\" + unfolding validE_E_def + by (rule hoare_post_impE) + +lemma hoare_validE_cases: + "\ \P\ f \Q\, \\_ _. True\; \P\ f \\_ _. True\, \R\ \ \ \P\ f \Q\, \R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. R\, \\_. R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. R\, \\_. \\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2E: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. \\, \\_. R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2_actual: + "\P\ a \\_. R\ \ \P\ a \\_. R\, \\_. \\" + by (rule hoare_post_imp_dc2) + +lemma hoare_post_imp_dc2E_actual: + "\P\ a \\_. R\ \ \P\ a \\_. \\, \\_. R\" + by (rule hoare_post_imp_dc2E) + +lemma hoare_conjD1: + "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. Q rv\" + unfolding valid_def by auto + +lemma hoare_conjD2: + "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. R rv\" + unfolding valid_def by auto + +lemma hoare_post_disjI1: + "\P\ f \\rv. Q rv\ \ \P\ f \\rv. Q rv or R rv\" + unfolding valid_def by auto + +lemma hoare_post_disjI2: + "\P\ f \\rv. R rv\ \ \P\ f \\rv. Q rv or R rv\" + unfolding valid_def by auto + +lemma use_valid: + "\(r, s') \ mres (f s); \P\ f \Q\; P s \ \ Q r s'" + unfolding valid_def by blast + +lemmas post_by_hoare = use_valid[rotated] + +lemma use_valid_inv: + assumes step: "(r, s') \ mres (f s)" + assumes pres: "\N. \\s. N (P s) \ E s\ f \\rv s. N (P s)\" + shows "E s \ P s = P s'" + using use_valid[where f=f, OF step pres[where N="\p. p = P s"]] by simp + +lemma use_validE_norm: + "\ (Inr r', s') \ mres (B s); \P\ B \Q\,\ E \; P s \ \ Q r' s'" + unfolding validE_def valid_def by force + +lemma use_validE_except: + "\ (Inl r', s') \ mres (B s); \P\ B \Q\,\ E \; P s \ \ E r' s'" + unfolding validE_def valid_def by force + +lemma in_inv_by_hoareD: + "\ \P. f \P\; (x,s') \ mres (f s) \ \ s' = s" + by (auto simp add: valid_def) blast + + +subsection \Misc\ + +lemma hoare_gen_asm: + "(P \ \P'\ f \Q\) \ \P' and K P\ f \Q\" + by (fastforce simp add: valid_def) + +lemmas hoare_gen_asm_single = hoare_gen_asm[where P'="\", simplified pred_conj_def simp_thms] + +lemma hoare_gen_asm_lk: + "(P \ \P'\ f \Q\) \ \K P and P'\ f \Q\" + by (fastforce simp add: valid_def) + +\ \Useful for forward reasoning, when P is known. + The first version allows weakening the precondition.\ +lemma hoare_gen_asm_spec': + "\ \s. P s \ S \ R s; S \ \R\ f \Q\ \ \ \P\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_gen_asm_spec: + "\ \s. P s \ S; S \ \P\ f \Q\ \ \ \P\ f \Q\" + by (rule hoare_gen_asm_spec'[where S=S and R=P]) simp + +lemma hoare_conjI: + "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s\" + unfolding valid_def by blast + +lemma hoare_disjI1: + "\ \P\ f \Q\ \ \ \P\ f \\rv s. Q rv s \ R rv s \" + unfolding valid_def by blast + +lemma hoare_disjI2: + "\ \P\ f \R\ \ \ \P\ f \\rv s. Q rv s \ R rv s \" + unfolding valid_def by blast + +lemma hoare_assume_pre: + "(\s. P s \ \P\ f \Q\) \ \P\ f \Q\" + by (auto simp: valid_def) + +lemma hoare_assume_preE: + "(\s. P s \ \P\ f \Q\,\R\) \ \P\ f \Q\,\R\" + by (auto simp: valid_def validE_def) + +lemma hoare_allI: + "(\x. \P\f\Q x\) \ \P\f\\rv s. \x. Q x rv s\" + by (simp add: valid_def) blast + +lemma validE_allI: + "(\x. \P\ f \\r s. Q x r s\,\E\) \ \P\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: valid_def validE_def split: sum.splits) + +lemma hoare_exI: + "\P\ f \Q x\ \ \P\ f \\rv s. \x. Q x rv s\" + by (simp add: valid_def) blast + +lemma hoare_impI: + "(R \ \P\ f \Q\) \ \P\ f \\rv s. R \ Q rv s\" + by (simp add: valid_def) blast + +lemma validE_impI: + "\\E. \P\ f \\_ _. True\,\E\; (P' \ \P\ f \Q\,\E\)\ \ + \P\ f \\rv s. P' \ Q rv s\, \E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_case_option_wp: + "\ \P\ f None \Q\; \x. \P' x\ f (Some x) \Q' x\ \ + \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\" + by (cases v) auto + +lemma hoare_case_option_wp2: + "\ \P\ f None \Q\; \x. \P' x\ f (Some x) \Q' x\ \ + \ \case_option P P' v\ f v \\rv s. case v of None \ Q rv s | Some x \ Q' x rv s\" + by (cases v) auto + +(* Might be useful for forward reasoning, when P is known. *) +lemma hoare_when_cases: + "\\s. \\B; P s\ \ Q s; B \ \P\ f \\_. Q\\ \ \P\ when B f \\_. Q\" + by (cases B; simp add: valid_def return_def mres_def) + +lemma hoare_vcg_prop: + "\\s. P\ f \\rv s. P\" + by (simp add: valid_def) + + +subsection \@{const valid} and @{const validE}, @{const validE_R}, @{const validE_E}\ + +lemma valid_validE: + "\P\ f \\_. Q\ \ \P\ f \\_. Q\, \\_. Q\" + by (rule hoare_post_imp_dc) + +lemma valid_validE2: + "\ \P\ f \\_. Q'\; \s. Q' s \ Q s; \s. Q' s \ E s \ \ \P\ f \\_. Q\, \\_. E\" + unfolding valid_def validE_def + by (clarsimp split: sum.splits) blast + +lemma validE_valid: + "\P\ f \\_. Q\, \\_. Q\ \ \P\ f \\_. Q\" + unfolding validE_def + by fastforce + +lemma valid_validE_R: + "\P\ f \\_. Q\ \ \P\ f \\_. Q\,-" + by (simp add: validE_R_def hoare_strengthen_postE[OF valid_validE]) + +lemma valid_validE_E: + "\P\ f \\_. Q\ \ \P\ f -,\\_. Q\" + by (simp add: validE_E_def hoare_strengthen_postE[OF valid_validE]) + +lemma validE_validE_R: + "\P\ f \Q\,\\\\ \ \P\ f \Q\,-" + by (simp add: validE_R_def) + +lemma validE_R_validE: + "\P\ f \Q\,- \ \P\ f \Q\,\\\\" + by (simp add: validE_R_def) + +lemma validE_validE_E: + "\P\ f \\\\, \E\ \ \P\ f -, \E\" + by (simp add: validE_E_def) + +lemma validE_E_validE: + "\P\ f -, \E\ \ \P\ f \\\\, \E\" + by (simp add: validE_E_def) + +lemma validE_eq_valid: + "\P\ f \\rv. Q\,\\rv. Q\ = \P\ f \\rv. Q\" + by (simp add: validE_def) + + +subsection \@{const liftM}\ + +lemma in_image_constant: + "(x \ (\_. v) ` S) = (x = v \ S \ {})" + by (simp add: image_constant_conv) + +lemma hoare_liftM_subst: + "\P\ liftM f m \Q\ = \P\ m \Q \ f\" + apply (simp add: liftM_def bind_def' return_def split_def) + apply (simp add: valid_def Ball_def mres_def image_Un) + apply (simp add: image_image in_image_constant) + apply force + done + +lemma hoare_liftME_subst: + "\P\ liftME f m \Q\, \E\ = \P\ m \Q \ f\, \E\" + unfolding validE_def liftME_liftM hoare_liftM_subst o_def + by (fastforce intro!: arg_cong[where f="valid P m"] split: sum.splits) + +lemma liftE_validE[simp]: + "\P\ liftE f \Q\, \E\ = \P\ f \Q\" + by (simp add: liftE_liftM validE_def hoare_liftM_subst o_def) + + +subsection \Operator lifting/splitting\ + +lemma hoare_vcg_if_split: + "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" + by simp + +lemma hoare_vcg_if_splitE: + "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ + \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" + by simp + +lemma hoare_vcg_split_case_option: + "\ \x. x = None \ \P x\ f x \R x\; \x y. x = Some y \ \Q x y\ g x y \R x\ \ + \ \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_optionE: + "\ \x. x = None \ \P x\ f x \R x\,\E x\; \x y. x = Some y \ \Q x y\ g x y \R x\,\E x\ \ + \ \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\, \E x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_sum: + "\ \x a. x = Inl a \ \P x a\ f x a \R x\; \x b. x = Inr b \ \Q x b\ g x b \R x\ \ + \ \\s. (\a. x = Inl a \ P x a s) \ (\b. x = Inr b \ Q x b s)\ + case x of Inl a \ f x a | Inr b \ g x b + \R x\" + by (cases x; simp) + +lemma bind_wp_nobind: + "\ \Q'\ g \Q\; \P\ f \\_. Q'\ \ \ \P\ do f; g od \Q\" + by (erule bind_wp_fwd) (clarsimp simp: valid_def) + +lemma bindE_wp_nobind: + "\ \Q'\ g \Q\, \E\; \P\ f \\_. Q'\, \E\ \ \ \P\ doE f; g odE \Q\, \E\" + by (erule bindE_wp_fwd) (clarsimp simp: validE_def) + +lemmas bind_wp_skip = bind_wp[where Q=Q and Q'=Q for Q] + +lemma hoare_chain: + "\ \P\ f \Q\; \s. R s \ P s; \rv s. Q rv s \ S rv s \ \ \R\ f \S\" + by (wp_pre, rule hoare_post_imp) + +lemma hoare_chainE: + "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \rv s. Q' rv s \ Q rv s; \rv s. E' rv s \ E rv s \ + \ \P\ A \Q\,\E\" + by wp_pre (rule hoare_post_impE) + +lemma hoare_vcg_conj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +\ \A variant which works nicely with subgoals that do not contain schematics\ +lemmas hoare_vcg_conj_lift_pre_fix = hoare_vcg_conj_lift[where P=R and P'=R for R, simplified] + +lemma hoare_vcg_conj_liftE1: + "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ \P and P'\ f \\rv s. Q rv s \ Q' rv s\,\E\" + unfolding valid_def validE_R_def validE_def + by (fastforce simp: split_def split: sum.splits) + +lemma hoare_vcg_conj_liftE2: + "\ \P\ f -,\E\; \P'\ f \Q\,\E'\ \ \ \P and P'\ f \Q\,\\rv s. E rv s \ E' rv s\" + unfolding valid_def validE_E_def validE_def + by (fastforce simp: split_def split: sum.splits) + +lemma hoare_vcg_conj_liftE_weaker: + assumes "\P\ f \Q\, \E\" + assumes "\P'\ f \Q'\, \E\" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\" + apply (rule hoare_pre) + apply (fastforce intro: assms hoare_vcg_conj_liftE1 validE_validE_R hoare_post_impE) + apply simp + done + +lemma hoare_vcg_disj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +lemma hoare_vcg_disj_lift_R: + assumes x: "\P\ f \Q\,-" + assumes y: "\P'\ f \Q'\,-" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + using assms + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_lift: + "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_const_Ball_liftE: + "\ \x. x \ S \ \P x\ f \Q x\,\E\; \\s. True\ f \\r s. True\, \E\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_lift_R: + "\ \x. x \ S \ \P x\ f \Q x\,- \ \ \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_strengthen_post) + (fastforce intro!: hoare_vcg_const_Ball_lift split: sum.splits)+ + +lemma hoare_vcg_const_Ball_lift_E_E: + "(\x. x \ S \ \P x\ f -,\Q x\) \ \\s. \x \ S. P x s\ f -,\\rv s. \x \ S. Q x rv s\" + unfolding validE_E_def validE_def valid_def + by (fastforce split: sum.splits) + +lemma hoare_vcg_all_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_all_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_all_liftE_R: + "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" + by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) + +lemma hoare_vcg_all_liftE_E: + "(\x. \P x\ f -, \Q x\) \ \\s. \x. P x s\ f -,\\rv s. \x. Q x rv s\" + by (rule hoare_vcg_const_Ball_lift_E_E[where S=UNIV, simplified]) + +lemma hoare_vcg_imp_lift: + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (simp only: imp_conv_disj) (rule hoare_vcg_disj_lift) + +lemma hoare_vcg_imp_lift': + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (wpsimp wp: hoare_vcg_imp_lift) + +lemma hoare_vcg_imp_liftE: + "\ \P'\ f \\rv s. \ P rv s\, \E\; \Q'\ f \Q\, \E\ \ + \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_imp_liftE': + "\ \P'\ f \\rv s. \ P rv s\, \E\; \Q'\ f \Q\, \E\ \ + \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R: + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R': + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. \P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_liftE_E: + "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ + \\s. P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" + by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_liftE_E': + "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ + \\s. \ P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" + by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_conj_lift[wp_comb]: + "\ \P\ f \\rv s. Q rv s \ Q' rv s\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ + \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" + by (auto simp: valid_def) + +lemmas hoare_vcg_imp_conj_lift'[wp_unsafe] = hoare_vcg_imp_conj_lift[where Q'''="\\", simplified] + +lemma hoare_absorb_imp: + "\ P \ f \\rv s. Q rv s \ R rv s\ \ \ P \ f \\rv s. Q rv s \ R rv s\" + by (erule hoare_post_imp[rotated], blast) + +lemma hoare_weaken_imp: + "\ \rv s. Q rv s \ Q' rv s ; \P\ f \\rv s. Q' rv s \ R rv s\ \ + \ \P\ f \\rv s. Q rv s \ R rv s\" + by (clarsimp simp: valid_def split_def) + +lemma hoare_vcg_const_imp_lift: + "\ P \ \Q\ m \R\ \ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\" + by (cases P, simp_all add: hoare_vcg_prop) + +lemma hoare_vcg_const_imp_lift_E: + "(P \ \Q\ f -, \R\) \ \\s. P \ Q s\ f -, \\rv s. P \ R rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_vcg_const_imp_lift_R: + "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_weak_lift_imp: + "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" + by (auto simp add: valid_def split_def) + +lemma hoare_weak_lift_impE: + "\Q\ m \R\,\E\ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,\\rv s. P \ E rv s\" + by (cases P; simp add: validE_def hoare_vcg_prop) + +lemma hoare_weak_lift_imp_R: + "\Q\ m \R\,- \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (cases P; wpsimp wp: wp_post_tautE_R) + +lemma hoare_vcg_ex_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (clarsimp simp: valid_def, blast) + +lemma hoare_vcg_ex_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_ex_liftE_E: + "\ \x. \P x\ f -,\E x\ \ \ \\s. \x. P x s\ f -,\\rv s. \x. E x rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_ex_lift_R1: + "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" + by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) + +lemma hoare_liftP_ext: + assumes "\P x. m \\s. P (f s x)\" + shows "m \\s. P (f s)\" + unfolding valid_def + apply clarsimp + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (drule use_valid, rule assms, rule refl) + apply simp + done + +(* for instantiations *) +lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . +lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . +lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . +lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . + +lemma hoare_vcg_E_conj: + "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" + unfolding validE_def validE_E_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_vcg_E_elim: + "\ \P\ f -,\E\; \P'\ f \Q\,- \ \ \\s. P s \ P' s\ f \Q\,\E\" + by (rule hoare_strengthen_postE[OF hoare_vcg_E_conj]) (simp add: validE_R_def)+ + +lemma hoare_vcg_R_conj: + "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_lift_Pf_E_R: + "\ \x. \P x\ m \\_. P x\, -; \P. \\s. P (f s)\ m \\_ s. P (f s)\, - \ \ + \\s. P (f s) s\ m \\_ s. P (f s) s\, -" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_lift_Pf_E_E: + "\ \x. \P x\ m -, \\_. P x\; \P. \\s. P (f s)\ m -, \\_ s. P (f s)\ \ \ + \\s. P (f s) s\ m -, \\_ s. P (f s) s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_post_comb_imp_conj: + "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" + by (wpsimp wp: hoare_vcg_conj_lift) + +lemma hoare_vcg_if_lift: + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv s. if P then X rv s else Y rv s\" + + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv. if P then X rv else Y rv\" + by (auto simp: valid_def split_def) + +lemma hoare_vcg_split_lift[wp]: + "\P\ f x y \Q\ \ \P\ case (x, y) of (a, b) \ f a b \Q\" + by simp + +named_theorems hoare_vcg_op_lift +lemmas [hoare_vcg_op_lift] = + hoare_vcg_const_imp_lift + hoare_vcg_const_imp_lift_E + hoare_vcg_const_imp_lift_R + (* leaving out hoare_vcg_conj_lift*, because that is built into wp *) + hoare_vcg_disj_lift + hoare_vcg_disj_lift_R + hoare_vcg_ex_lift + hoare_vcg_ex_liftE + hoare_vcg_ex_liftE_E + hoare_vcg_all_lift + hoare_vcg_all_liftE + hoare_vcg_all_liftE_E + hoare_vcg_all_liftE_R + hoare_vcg_const_Ball_lift + hoare_vcg_const_Ball_liftE + hoare_vcg_const_Ball_lift_R + hoare_vcg_const_Ball_lift_E_E + hoare_vcg_split_lift + hoare_vcg_if_lift + hoare_vcg_imp_lift' + hoare_vcg_imp_liftE' + hoare_vcg_imp_lift_R' + hoare_vcg_imp_liftE_E' + + +subsection \Weakest Precondition Rules\ + +lemma fail_wp: + "\\\ fail \Q\" + by (simp add: valid_def fail_def mres_def vimage_def) + +lemma return_wp: + "\P x\ return x \P\" + by(simp add: valid_def return_def mres_def) + +lemma get_wp: + "\\s. P s s\ get \P\" + by (simp add: valid_def get_def mres_def) + +lemma gets_wp: + "\\s. P (f s) s\ gets f \P\" + by(simp add: valid_def split_def gets_def return_def get_def bind_def mres_def) + +lemma put_wp: + "\\_. Q () s\ put s \Q\" + by (simp add: put_def valid_def mres_def) + +lemma modify_wp: + "\\s. Q () (f s)\ modify f \Q\" + unfolding modify_def + by (wp put_wp get_wp) + +lemma failE_wp: + "\\\ fail \Q\, \E\" + by (simp add: validE_def fail_wp) + +lemma returnOk_wp: + "\P x\ returnOk x \P\,\E\" + by (simp add: validE_def2 returnOk_def return_def mres_def) + +lemma throwError_wp: + "\E e\ throwError e \P\,\E\" + by(simp add: validE_def2 throwError_def return_def mres_def) + +lemma returnOKE_R_wp: + "\P x\ returnOk x \P\, -" + by (simp add: validE_R_def validE_def valid_def returnOk_def return_def mres_def) + +lemma liftE_wp: + "\P\ f \Q\ \ \P\ liftE f \Q\,\E\" + by simp + +lemma catch_wp: + "\ \x. \E x\ handler x \Q\; \P\ f \Q\,\E\ \ \ \P\ catch f handler \Q\" + unfolding catch_def valid_def validE_def return_def mres_def image_def + by (fastforce simp: bind_def split: sum.splits tmres.splits) + +lemma handleE'_wp: + "\ \x. \F x\ handler x \Q\,\E\; \P\ f \Q\,\F\ \ \ \P\ f handler \Q\,\E\" + unfolding handleE'_def valid_def validE_def return_def mres_def image_def + by (fastforce simp: bind_def split: sum.splits tmres.splits) + +lemma handleE_wp: + assumes x: "\x. \F x\ handler x \Q\,\E\" + assumes y: "\P\ f \Q\,\F\" + shows "\P\ f handler \Q\,\E\" + by (simp add: handleE_def handleE'_wp [OF x y]) + +lemma liftM_wp: + "\P\ m \Q \ f\ \ \P\ liftM f m \Q\" + by (simp add: hoare_liftM_subst) + +lemma liftME_wp: + "\P\ m \Q \ f\,\E\ \ \P\ liftME f m \Q\,\E\" + by (simp add: hoare_liftME_subst) + +lemma assert_wp: + "\\s. P \ Q () s\ assert P \Q\" + unfolding assert_def + by (wpsimp wp: return_wp fail_wp | rule conjI)+ + +lemma list_cases_wp: + assumes a: "\P_A\ a \Q\" + assumes b: "\x xs. ts = x#xs \ \P_B x xs\ b x xs \Q\" + shows "\case_list P_A P_B ts\ case ts of [] \ a | x # xs \ b x xs \Q\" + by (cases ts, auto simp: a b) + +lemma hoare_vcg_handle_elseE: + "\ \P\ f \Q\,\E\; \e. \E e\ g e \R\,\F\; \x. \Q x\ h x \R\,\F\ \ \ + \P\ f g h \R\,\F\" + unfolding handle_elseE_def validE_def + by (wpsimp wp: bind_wp_fwd | assumption | rule conjI)+ + +lemma alternative_wp: + assumes x: "\P\ f \Q\" + assumes y: "\P'\ f' \Q\" + shows "\P and P'\ f \ f' \Q\" + unfolding valid_def alternative_def mres_def + using post_by_hoare[OF x _ in_mres] post_by_hoare[OF y _ in_mres] + by fastforce + +lemma alternativeE_wp: + assumes "\P\ f \Q\,\E\" + assumes "\P'\ f' \Q\,\E\" + shows "\P and P'\ f \ f' \Q\,\E\" + unfolding validE_def + by (wpsimp wp: assms alternative_wp | fold validE_def)+ + +lemma alternativeE_R_wp: + "\ \P\ f \Q\,-; \P'\ f' \Q\,- \ \ \P and P'\ f \ f' \Q\,-" + unfolding validE_R_def + by (rule alternativeE_wp) + +lemma alternativeE_E_wp: + "\ \P\ f -,\Q\; \P'\ g -,\Q\ \ \ \P and P'\ f \ g -, \Q\" + unfolding validE_E_def + by (rule alternativeE_wp) + +lemma select_wp: + "\\s. \x \ S. Q x s\ select S \Q\" + by (simp add: select_def valid_def mres_def image_def) + +lemma state_select_wp: + "\\s. \t. (s, t) \ f \ P () t\ state_select f \P\" + unfolding state_select_def2 + by (wpsimp wp: put_wp select_wp return_wp get_wp assert_wp) + +lemma condition_wp: + "\ \Q\ A \P\; \R\ B \P\ \ \ \\s. if C s then Q s else R s\ condition C A B \P\" + by (clarsimp simp: condition_def valid_def) + +lemma conditionE_wp: + "\ \P\ A \Q\,\R\; \P'\ B \Q\,\R\ \ \ \\s. if C s then P s else P' s\ condition C A B \Q\,\R\" + by (clarsimp simp: condition_def validE_def valid_def) + +lemma state_assert_wp: + "\\s. f s \ P () s\ state_assert f \P\" + unfolding state_assert_def + by (wp bind_wp_fwd get_wp assert_wp) + +lemma when_wp[wp_split]: + "\ P \ \Q\ f \R\ \ \ \if P then Q else R ()\ when P f \R\" + by (clarsimp simp: when_def valid_def return_def mres_def) + +lemma unless_wp[wp_split]: + "(\P \ \Q\ f \R\) \ \if P then R () else Q\ unless P f \R\" + unfolding unless_def by wp auto + +lemma whenE_wp: + "(P \ \Q\ f \R\, \E\) \ \if P then Q else R ()\ whenE P f \R\, \E\" + unfolding whenE_def by clarsimp (wp returnOk_wp) + +lemma unlessE_wp: + "(\ P \ \Q\ f \R\, \E\) \ \if P then R () else Q\ unlessE P f \R\, \E\" + unfolding unlessE_def + by (wpsimp wp: returnOk_wp) + +lemma maybeM_wp: + "(\x. y = Some x \ \P x\ m x \Q\) \ + \\s. (\x. y = Some x \ P x s) \ (y = None \ Q () s)\ maybeM m y \Q\" + unfolding maybeM_def by (wpsimp wp: return_wp) auto + +lemma notM_wp: + "\P\ m \\c. Q (\ c)\ \ \P\ notM m \Q\" + unfolding notM_def by (wpsimp wp: return_wp) + +lemma ifM_wp: + assumes [wp]: "\Q\ f \S\" "\R\ g \S\" + assumes [wp]: "\A\ P \\c s. c \ Q s\" "\B\ P \\c s. \c \ R s\" + shows "\A and B\ ifM P f g \S\" + unfolding ifM_def + by (wpsimp wp: hoare_vcg_if_split hoare_vcg_conj_lift) + +lemma andM_wp: + assumes [wp]: "\Q'\ B \Q\" + assumes [wp]: "\P\ A \\c s. c \ Q' s\" "\P'\ A \\c s. \ c \ Q False s\" + shows "\P and P'\ andM A B \Q\" + unfolding andM_def by (wp ifM_wp return_wp) + +lemma orM_wp: + assumes [wp]: "\Q'\ B \Q\" + assumes [wp]: "\P\ A \\c s. c \ Q True s\" "\P'\ A \\c s. \ c \ Q' s\" + shows "\P and P'\ orM A B \Q\" + unfolding orM_def by (wp ifM_wp return_wp) + +lemma whenM_wp: + assumes [wp]: "\Q\ f \S\" + assumes [wp]: "\A\ P \\c s. c \ Q s\" "\B\ P \\c s. \c \ S () s\" + shows "\A and B\ whenM P f \S\" + unfolding whenM_def by (wp ifM_wp return_wp) + +lemma hoare_K_bind[wp_split]: + "\P\ f \Q\ \ \P\ K_bind f x \Q\" + by simp + +lemma validE_K_bind[wp_split]: + "\ P \ x \ Q \, \ E \ \ \ P \ K_bind x f \ Q \, \ E \" + by simp + +lemma hoare_fun_app_wp: + "\P\ f' x \Q'\ \ \P\ f' $ x \Q'\" + "\P\ f x \Q\,\E\ \ \P\ f $ x \Q\,\E\" + "\P\ f x \Q\,- \ \P\ f $ x \Q\,-" + "\P\ f x -,\E\ \ \P\ f $ x -,\E\" + by simp+ + +lemma liftE_validE_E: + "\\\ liftE f -, \Q\" + by (clarsimp simp: validE_E_def valid_def) + +lemma returnOk_E: + "\\\ returnOk r -, \Q\" + by (simp add: validE_E_def) (wp returnOk_wp) + +lemma case_option_wp: + "\ \x. \P x\ m x \Q\; \P'\ m' \Q\ \ \ + \\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ case_option m' m x \Q\" + by (cases x; simp) + +lemma case_option_wpE: + "\ \x. \P x\ m x \Q\,\E\; \P'\ m' \Q\,\E\ \ \ + \\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ case_option m' m x \Q\,\E\" + by (cases x; simp) + +lemmas liftME_E_E_wp[wp_split] = validE_validE_E [OF liftME_wp, simplified, OF validE_E_validE] + +lemma assert_opt_wp: + "\\s. x \ None \ Q (the x) s\ assert_opt x \Q\" + unfolding assert_opt_def + by (cases x; wpsimp wp: fail_wp return_wp) + +lemma gets_the_wp: + "\\s. (f s \ None) \ Q (the (f s)) s\ gets_the f \Q\" + unfolding gets_the_def + by (wp bind_wp_fwd gets_wp assert_opt_wp) + +lemma gets_the_wp': (* FIXME: should prefer this one in [wp] *) + "\\s. \rv. f s = Some rv \ Q rv s\ gets_the f \Q\" + unfolding gets_the_def + by (wpsimp wp: bind_wp_fwd gets_wp assert_opt_wp) + +lemma gets_map_wp: + "\\s. f s p \ None \ Q (the (f s p)) s\ gets_map f p \Q\" + unfolding gets_map_def + by (wpsimp wp: bind_wp_fwd gets_wp assert_opt_wp) + +lemma gets_map_wp': + "\\s. \rv. f s p = Some rv \ Q rv s\ gets_map f p \Q\" + unfolding gets_map_def + by (wpsimp wp: bind_wp_fwd gets_wp assert_opt_wp) + +(* FIXME: make wp *) +lemma whenE_throwError_wp: + "\\s. \Q \ P s\ whenE Q (throwError e) \\_. P\, -" + by (simp add: whenE_def returnOk_def throwError_def return_def validE_R_def validE_def valid_def + mres_def) + +lemma select_throwError_wp: + "\\s. \x\S. Q x s\ select S >>= throwError -, \Q\" + by (clarsimp simp: bind_def throwError_def return_def select_def validE_E_def + validE_def valid_def mres_def) + + +subsection \Setting up the @{method wp} method\ + +lemma valid_is_triple: + "valid P f Q = triple_judgement P f (postcondition Q (\s f. mres (f s)))" + by (simp add: triple_judgement_def valid_def postcondition_def) + +lemma validE_is_triple: + "validE P f Q E = + triple_judgement P f + (postconditions (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ mres (f s)})) + (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ mres (f s)})))" + by (fastforce simp: validE_def triple_judgement_def valid_def postcondition_def postconditions_def + split: sum.split) + +lemma validE_R_is_triple: + "validE_R P f Q = + triple_judgement P f (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ mres (f s)}))" + by (simp add: validE_R_def validE_is_triple postconditions_def postcondition_def) + +lemma validE_E_is_triple: + "validE_E P f E = + triple_judgement P f (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ mres (f s)}))" + by (simp add: validE_E_def validE_is_triple postconditions_def postcondition_def) + +lemmas hoare_wp_combs = hoare_vcg_conj_lift + +lemmas hoare_wp_combsE = + validE_validE_R + hoare_vcg_R_conj + hoare_vcg_E_elim + hoare_vcg_E_conj + +lemmas hoare_wp_state_combsE = + valid_validE_R + hoare_vcg_R_conj[OF valid_validE_R] + hoare_vcg_E_elim[OF valid_validE_E] + hoare_vcg_E_conj[OF valid_validE_E] + +lemmas hoare_classic_wp_combs = hoare_post_comb_imp_conj hoare_weaken_pre hoare_wp_combs +lemmas hoare_classic_wp_combsE = hoare_weaken_preE hoare_weaken_preE_R hoare_wp_combsE + +lemmas hoare_classic_wp_state_combsE = + hoare_weaken_preE[OF valid_validE] + hoare_weaken_preE_R[OF valid_validE_R] + hoare_wp_state_combsE + +lemmas all_classic_wp_combs = + hoare_classic_wp_state_combsE + hoare_classic_wp_combsE + hoare_classic_wp_combs + +lemmas hoare_wp_splits[wp_split] = + handleE'_wp handleE_wp + validE_validE_R [OF handleE'_wp [OF validE_R_validE]] + validE_validE_R [OF handleE_wp [OF validE_R_validE]] + catch_wp hoare_vcg_if_split hoare_vcg_if_splitE + validE_validE_R [OF hoare_vcg_if_splitE [OF validE_R_validE validE_R_validE]] + liftM_wp liftME_wp + validE_validE_R [OF liftME_wp [OF validE_R_validE]] + validE_valid + +lemmas [wp_comb] = hoare_wp_state_combsE hoare_wp_combsE hoare_wp_combs + +(* Add these rules to wp first to control when they are applied. We want them used last, only when + no other more specific wp rules apply. + bind_wp, bindE_wp and their variants are wp rules instead of wp_split rules because + they should be used before other wp_split rules, and in combination with wp_comb rules when + necessary. + hoare_vcg_prop is unsafe in certain circumstances but still useful to have applied automatically, + so we make it the very last rule to be tried. *) +lemmas [wp] = + hoare_vcg_prop bind_wp + bindE_R_wp bindE_E_wp bindE_wp + bind_wpE_R bind_wpE_E bind_wpE + +(* rules towards the bottom will be matched first *) +lemmas [wp] = wp_post_tauts + hoare_fun_app_wp + returnOk_E + liftE_validE_E + put_wp + get_wp + gets_wp + modify_wp + return_wp + returnOk_wp + throwError_wp + fail_wp + failE_wp + assert_wp + state_assert_wp + assert_opt_wp + gets_the_wp + gets_map_wp' + liftE_wp + alternative_wp + alternativeE_R_wp + alternativeE_E_wp + alternativeE_wp + select_wp + state_select_wp + condition_wp + conditionE_wp + maybeM_wp notM_wp ifM_wp andM_wp orM_wp whenM_wp + +lemmas [wp_trip] = valid_is_triple validE_is_triple validE_E_is_triple validE_R_is_triple + +lemmas validE_E_combs[wp_comb] = + hoare_vcg_E_conj[where Q'="\\", folded validE_E_def] + valid_validE_E + hoare_vcg_E_conj[where Q'="\\", folded validE_E_def, OF valid_validE_E] + + +subsection \Simplifications on conjunction\ + +lemma hoare_post_eq: + "\ Q = Q'; \P\ f \Q'\ \ \ \P\ f \Q\" + by simp + +lemma hoare_post_eqE1: + "\ Q = Q'; \P\ f \Q'\,\E\ \ \ \P\ f \Q\,\E\" + by simp + +lemma hoare_post_eqE2: + "\ E = E'; \P\ f \Q\,\E'\ \ \ \P\ f \Q\,\E\" + by simp + +lemma hoare_post_eqE_R: + "\ Q = Q'; \P\ f \Q'\,- \ \ \P\ f \Q\,-" + by simp + +lemma pred_conj_apply_elim: + "(\rv. Q rv and Q' rv) = (\rv s. Q rv s \ Q' rv s)" + by (simp add: pred_conj_def) + +lemma pred_conj_conj_elim: + "(\rv s. (Q rv and Q' rv) s \ Q'' rv s) = (\rv s. Q rv s \ Q' rv s \ Q'' rv s)" + by simp + +lemma conj_assoc_apply: + "(\rv s. (Q rv s \ Q' rv s) \ Q'' rv s) = (\rv s. Q rv s \ Q' rv s \ Q'' rv s)" + by simp + +lemma all_elim: + "(\rv s. \x. P rv s) = P" + by simp + +lemma all_conj_elim: + "(\rv s. (\x. P rv s) \ Q rv s) = (\rv s. P rv s \ Q rv s)" + by simp + +lemmas vcg_rhs_simps = + pred_conj_apply_elim pred_conj_conj_elim conj_assoc_apply all_elim all_conj_elim + +lemma if_apply_reduct: + "\P\ If P' (f x) (g x) \Q\ \ \P\ If P' f g x \Q\" + by (cases P'; simp) + +lemma if_apply_reductE: + "\P\ If P' (f x) (g x) \Q\,\E\ \ \P\ If P' f g x \Q\,\E\" + by (cases P'; simp) + +lemma if_apply_reductE_R: + "\P\ If P' (f x) (g x) \Q\,- \ \P\ If P' f g x \Q\,-" + by (cases P'; simp) + +lemmas hoare_wp_simps[wp_split] = + vcg_rhs_simps[THEN hoare_post_eq] vcg_rhs_simps[THEN hoare_post_eqE1] + vcg_rhs_simps[THEN hoare_post_eqE2] vcg_rhs_simps[THEN hoare_post_eqE_R] + if_apply_reduct if_apply_reductE if_apply_reductE_R TrueI + +schematic_goal if_apply_test: + "\?Q\ (if A then returnOk else K fail) x \P\,\E\" + by wpsimp + +lemma hoare_elim_pred_conj: + "\P\ f \\rv s. Q rv s \ Q' rv s\ \ \P\ f \\rv. Q rv and Q' rv\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE1: + "\P\ f \\rv s. Q rv s \ Q' rv s\,\E\ \ \P\ f \\rv. Q rv and Q' rv\,\E\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE2: + "\P\ f \Q\, \\rv s. E rv s \ E' rv s\ \ \P\ f \Q\,\\rv. E rv and E' rv\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE_R: + "\P\ f \\rv s. Q rv s \ Q' rv s\,- \ \P\ f \\rv. Q rv and Q' rv\,-" + by (unfold pred_conj_def) + +lemmas hoare_wp_pred_conj_elims = + hoare_elim_pred_conj hoare_elim_pred_conjE1 + hoare_elim_pred_conjE2 hoare_elim_pred_conjE_R + + +subsection \Bundles\ + +bundle no_pre = hoare_pre [wp_pre del] + +bundle classic_wp_pre = hoare_pre [wp_pre del] + all_classic_wp_combs[wp_comb del] all_classic_wp_combs[wp_comb] + + +text \Miscellaneous lemmas on hoare triples\ + +lemma hoare_pre_cases: + "\ \\s. R s \ P s\ f \Q\; \\s. \R s \ P' s\ f \Q\ \ \ \P and P'\ f \Q\" + unfolding valid_def by fastforce + +lemma hoare_vcg_mp: + "\ \P\ f \Q\; \P\ f \\r s. Q r s \ Q' r s\ \ \ \P\ f \Q'\" + by (auto simp: valid_def split_def) + +(* note about this precond stuff: rules get a chance to bind directly + before any of their combined forms. As a result, these precondition + implication rules are only used when needed. *) +lemma hoare_add_post: + "\ \P'\ f \Q'\; \s. P s \ P' s; \P\ f \\rv s. Q' rv s \ Q rv s\ \ \ \P\ f \Q\" + unfolding valid_def + by fastforce + +lemma hoare_gen_asmE: + "(P \ \P'\ f \Q\,-) \ \P' and K P\ f \Q\, -" + by (simp add: validE_R_def validE_def valid_def) blast + +lemma hoare_list_case: + "\ \P1\ f f1 \Q\; \y ys. xs = y#ys \ \P2 y ys\ f (f2 y ys) \Q\ \ \ + \case xs of [] \ P1 | y#ys \ P2 y ys\ f (case xs of [] \ f1 | y#ys \ f2 y ys) \Q\" + by (cases xs; simp) + +lemmas whenE_wps[wp_split] = + whenE_wp whenE_wp[THEN validE_validE_R] whenE_wp[THEN validE_validE_E] + +lemmas unlessE_wps[wp_split] = + unlessE_wp unlessE_wp[THEN validE_validE_R] unlessE_wp[THEN validE_validE_E] + +lemma hoare_use_eq: + assumes "\P. \\s. P (f s)\ m \\_ s. P (f s)\" + assumes "\f. \\s. P f s\ m \\_ s. Q f s\" + shows "\\s. P (f s) s\ m \\_ s. Q (f s) s \" + apply (rule hoare_post_imp[where Q="\_ s. \y. y = f s \ Q y s"], simp) + apply (wpsimp wp: hoare_vcg_ex_lift assms) + done + +lemma hoare_fail_any[simp]: + "\P\ fail \Q\" + by wp + +lemma hoare_failE[simp]: + "\P\ fail \Q\, \E\" + by wp + +lemma hoare_validE_pred_conj: + "\ \P\ f \Q\, \E\; \P\ f \R\, \E\ \ \ \P\ f \Q and R\, \E\" + unfolding valid_def validE_def + by (simp add: split_def split: sum.splits) + +lemma hoare_validE_conj: + "\ \P\ f \Q\, \E\; \P\ f \R\, \E\ \ \ \P\ f \\rv s. Q rv s \ R rv s\, \E\" + unfolding valid_def validE_def + by (simp add: split_def split: sum.splits) + +lemmas hoare_valid_validE = valid_validE (* FIXME lib: eliminate one *) + +declare validE_validE_E[wp_comb] + +lemmas if_validE_E[wp_split] = + validE_validE_E[OF hoare_vcg_if_splitE[OF validE_E_validE validE_E_validE]] + +lemma hoare_drop_imp: + "\P\ f \Q\ \ \P\ f \\rv s. R rv s \ Q rv s\" + by (auto simp: valid_def) + +lemma hoare_drop_impE: + "\\P\ f \\r. Q\, \E\\ \ \P\ f \\rv s. R rv s \ Q s\, \E\" + by (simp add: hoare_chainE) + +lemma hoare_drop_impE_R: + "\P\ f \Q\,- \ \P\ f \\rv s. R rv s \ Q rv s\, -" + by (auto simp: validE_R_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_drop_impE_E: + "\P\ f -,\Q\ \ \P\ f -, \\rv s. R rv s \ Q rv s\" + by (auto simp: validE_E_def validE_def valid_def split_def split: sum.splits) + +lemmas hoare_drop_imps = hoare_drop_imp hoare_drop_impE_R hoare_drop_impE_E + +(*This is unsafe, but can be very useful when supplied as a comb rule.*) +lemma hoare_drop_imp_conj[wp_unsafe]: + "\ \P\ f \Q'\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ + \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" + by (auto simp: valid_def) + +lemmas hoare_drop_imp_conj'[wp_unsafe] = hoare_drop_imp_conj[where Q'''="\\", simplified] + +lemma hoare_vcg_set_pred_lift: + assumes "\P x. m \ \s. P (f x s) \" + shows "m \ \s. P {x. f x s} \" + using assms[where P="\x . x"] assms[where P=Not] use_valid + by (fastforce simp: valid_def elim!: subst[rotated, where P=P]) + +lemma hoare_vcg_set_pred_lift_mono: + assumes f: "\x. m \ f x \" + assumes mono: "\A B. A \ B \ P A \ P B" + shows "m \ \s. P {x. f x s} \" + by (fastforce simp: valid_def elim!: mono[rotated] dest: use_valid[OF _ f]) + +text \If a function contains an @{term assert}, or equivalent, then it might be + possible to strengthen the precondition of an already-proven hoare triple + @{text pos}, by additionally proving a side condition @{text neg}, that + violating some condition causes failure. The stronger hoare triple produced + by this theorem allows the precondition to assume that the condition is + satisfied.\ +lemma hoare_strengthen_pre_via_assert_forward: + assumes pos: "\ P \ f \ Q \" + assumes rel: "\s. S s \ P s \ N s" + assumes neg: "\ N \ f \ \\ \" + shows "\ S \ f \ Q \" + apply (rule hoare_weaken_pre) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_disj_lift[OF pos neg]) + by (auto simp: rel) + +text \Like @{thm hoare_strengthen_pre_via_assert_forward}, strengthen a precondition + by proving a side condition that the negation of that condition would cause + failure. This version is intended for backward reasoning. Apply it to a goal to + obtain a stronger precondition after proving the side condition.\ +lemma hoare_strengthen_pre_via_assert_backward: + assumes neg: "\ Not \ E \ f \ \\ \" + assumes pos: "\ P and E \ f \ Q \" + shows "\ P \ f \ Q \" + by (rule hoare_strengthen_pre_via_assert_forward[OF pos _ neg], simp) + + +subsection \Strongest postcondition rules\ + +lemma get_sp: + "\P\ get \\rv s. s = rv \ P s\" + by(simp add:get_def valid_def mres_def) + +lemma put_sp: + "\\\ put a \\_ s. s = a\" + by(simp add:put_def valid_def mres_def) + +lemma return_sp: + "\P\ return a \\rv s. rv = a \ P s\" + by(simp add:return_def valid_def mres_def) + +lemma hoare_return_sp: (* FIXME lib: eliminate *) + "\P\ return x \\rv. P and K (rv = x)\" + by (simp add: valid_def return_def mres_def) + +lemma assert_sp: + "\P\ assert Q \\_ s. P s \ Q \" + by (simp add: assert_def fail_def return_def valid_def mres_def) + +lemma hoare_gets_sp: + "\P\ gets f \\rv s. rv = f s \ P s\" + by (simp add: valid_def simpler_gets_def mres_def) + +lemma hoare_returnOk_sp: + "\P\ returnOk x \\rv s. rv = x \ P s\, \Q\" + by (simp add: valid_def validE_def returnOk_def return_def mres_def) + +\ \For forward reasoning in Hoare proofs, these lemmas allow us to step over the + left-hand-side of monadic bind, while keeping the same precondition.\ + +named_theorems forward_inv_step_rules + +lemmas hoare_forward_inv_step_nobind[forward_inv_step_rules] = + bind_wp_nobind[where Q'=P and P=P for P, rotated] + +lemmas bind_wp_fwd_skip[forward_inv_step_rules] = + bind_wp_fwd[where Q'="\_. P" and P=P for P] + +lemmas hoare_forward_inv_step_nobindE_valid[forward_inv_step_rules] = + bindE_wp_nobind[where Q'=P and P=P and E="\_. Q" and Q="\_. Q" for P Q, + simplified validE_eq_valid, rotated] + +lemmas hoare_forward_inv_step_valid[forward_inv_step_rules] = + bindE_wp_fwd[where Q'="\_. P" and P=P and E="\_. Q" and Q="\_. Q" for P Q, + simplified validE_eq_valid] + +lemmas hoare_forward_inv_step_nobindE[forward_inv_step_rules] = + bindE_wp_nobind[where Q'=P and P=P for P, rotated] + +lemmas bindE_wp_fwd_skip[forward_inv_step_rules] = + bindE_wp_fwd[where Q'="\_. P" and P=P for P] + +lemmas hoare_forward_inv_step_nobindE_validE_E[forward_inv_step_rules] = + hoare_forward_inv_step_nobindE[where Q="\\", simplified validE_E_def[symmetric]] + +lemmas hoare_forward_inv_step_validE_E[forward_inv_step_rules] = + bindE_wp_fwd_skip[where Q="\\", simplified validE_E_def[symmetric]] + +lemmas hoare_forward_inv_step_nobindE_validE_R[forward_inv_step_rules] = + hoare_forward_inv_step_nobindE[where E="\\", simplified validE_R_def[symmetric]] + +lemmas hoare_forward_inv_step_validE_R[forward_inv_step_rules] = + bindE_wp_fwd_skip[where E="\\", simplified validE_R_def[symmetric]] + +method forward_inv_step uses wp simp = + rule forward_inv_step_rules, solves \wpsimp wp: wp simp: simp\ + +end diff --git a/lib/Monad_WP/Datatype_Schematic.thy b/lib/Monads/wp/Datatype_Schematic.thy similarity index 92% rename from lib/Monad_WP/Datatype_Schematic.thy rename to lib/Monads/wp/Datatype_Schematic.thy index adbcf68835..9701c97e0b 100644 --- a/lib/Monad_WP/Datatype_Schematic.thy +++ b/lib/Monads/wp/Datatype_Schematic.thy @@ -6,8 +6,8 @@ theory Datatype_Schematic imports - MLUtils - TermPatternAntiquote + ML_Utils.ML_Utils + ML_Utils.TermPatternAntiquote begin text \ @@ -26,7 +26,7 @@ text \ schematic does not have as parameters. In the "constructor expression" case, we let users supply additional - constructor handlers via the `datatype_schematic` attribute. The method uses + constructor handlers via the @{text "datatype_schematic"} attribute. The method uses rules of the following form: @{term "\x1 x2 x3. getter (constructor x1 x2 x3) = x2"} @@ -69,18 +69,18 @@ structure Datatype_Schematic_Data = Generic_Data ( \ \ Keys are names of datatype constructors (like @{const Cons}), values are - `(index, function_name, thm)`. + @{text "(index, function_name, thm)"}. - - `function_name` is the name of an "accessor" function that accesses part + - @{text function_name} is the name of an "accessor" function that accesses part of the constructor specified by the key (so the accessor @{const hd} is related to the constructor/key @{const Cons}). - - `thm` is a theorem showing that the function accesses one of the + - @{text thm} is a theorem showing that the function accesses one of the arguments to the constructor (like @{thm list.sel(1)}). - - `idx` is the index of the constructor argument that the accessor - accesses. (eg. since `hd` accesses the first argument, `idx = 0`; since - `tl` accesses the second argument, `idx = 1`). + - @{text idx} is the index of the constructor argument that the accessor + accesses. (eg. since @{const hd} accesses the first argument, @{text "idx = 0"}; since + @{const tl} accesses the second argument, @{text "idx = 1"}). \ type T = ((int * string * thm) list) Symtab.table; val empty = Symtab.empty; @@ -261,6 +261,7 @@ method_setup datatype_schem = \ declare prod.sel[datatype_schematic] declare option.sel[datatype_schematic] declare list.sel(1,3)[datatype_schematic] +declare sum.sel[datatype_schematic] locale datatype_schem_demo begin @@ -286,7 +287,7 @@ lemma selectively_exposing_datatype_arugments: notes get_basic_0.simps[datatype_schematic] shows "\x. \a b. x (basic a b) = a" apply (rule exI, (rule allI)+) - apply datatype_schem \ \Only exposes `a` to the schematic.\ + apply datatype_schem \ \Only exposes @{text a} to the schematic.\ by (rule refl) lemma method_handles_primrecs_with_two_constructors: diff --git a/lib/Monad_WP/wp/Eisbach_WP.thy b/lib/Monads/wp/Eisbach_WP.thy similarity index 88% rename from lib/Monad_WP/wp/Eisbach_WP.thy rename to lib/Monads/wp/Eisbach_WP.thy index a4c7483b01..613b9f2257 100644 --- a/lib/Monad_WP/wp/Eisbach_WP.thy +++ b/lib/Monads/wp/Eisbach_WP.thy @@ -8,19 +8,19 @@ theory Eisbach_WP imports - Eisbach_Methods - NonDetMonadVCG - Conjuncts - Rule_By_Method + Eisbach_Tools.Eisbach_Methods + Nondet_VCG + Eisbach_Tools.Conjuncts + Eisbach_Tools.Rule_By_Method WPI begin text \ - Methods for manipulating the post conditions of hoare triples as if they + Methods for manipulating the post conditions of Hoare triples as if they were proper subgoals. - post_asm can be used with the @ attribute to modify existing proofs. Useful + @{text post_asm} can be used with the \@ attribute to modify existing proofs. Useful for proving large postconditions in one proof and then subsequently decomposing it. \ @@ -53,7 +53,7 @@ lemma uncurry2: "\r s. Q r s \ Q' r s \ Q'' r s \(simp only: bipred_conj_def pred_conj_def)?,(elim conjE)?,m\) + (post_asm_raw \(simp only: pred_conj_def)?,(elim conjE)?,m\) named_theorems packed_validEs @@ -86,7 +86,7 @@ method post_raw methods m = method post_strong methods m_distinct m_all = (post_raw - \(simp only: pred_conj_def bipred_conj_def)?, + \(simp only: pred_conj_def)?, (intro impI conjI allI)?, distinct_subgoals_strong \m_distinct\, all \m_all\, @@ -98,8 +98,8 @@ end text \ - Method (meant to be used with @ as an attribute) used for producing multiple facts out of - a single hoare triple with a conjunction in its post condition. + Method (meant to be used with \@ as an attribute) used for producing multiple facts out of + a single Hoare triple with a conjunction in its post condition. \ context begin @@ -122,6 +122,9 @@ private lemma hoare_decomposeE: private lemmas hoare_decomposes' = hoare_decompose hoare_decomposeE_R hoare_decomposeE_E hoare_decomposeE +private lemmas bipred_conj_def = + inf_fun_def[where 'b="'b \ bool", unfolded inf_fun_def[where 'b="bool"], simplified] + private method add_pred_conj = (subst pred_conj_def[symmetric]) private method add_bipred_conj = (subst bipred_conj_def[symmetric]) diff --git a/lib/Monad_WP/wp/WP-method.ML b/lib/Monads/wp/WP-method.ML similarity index 93% rename from lib/Monad_WP/wp/WP-method.ML rename to lib/Monads/wp/WP-method.ML index d25d8274cb..d7e2cbac82 100644 --- a/lib/Monad_WP/wp/WP-method.ML +++ b/lib/Monads/wp/WP-method.ML @@ -253,24 +253,9 @@ fun resolve_ruleset_tac' trace ctxt rs used_thms_ref n t = fun resolve_ruleset_tac trace ctxt rs used_thms_ref n = (Apply_Debug.break ctxt (SOME "wp")) THEN (resolve_ruleset_tac' trace ctxt rs used_thms_ref n) -fun trace_used_thm ctxt (name, tag, prop) = - let val adjusted_name = ThmExtras.adjust_thm_name ctxt (name, NONE) prop - in Pretty.block - (ThmExtras.pretty_adjusted_name ctxt adjusted_name :: - [Pretty.str ("[" ^ tag ^ "]:"),Pretty.brk 1, Syntax.unparse_term ctxt prop]) - end - -fun trace_used_thms trace ctxt used_thms_ref = - if trace - then Pretty.big_list "Theorems used by wp:" - (map (trace_used_thm ctxt) (!used_thms_ref)) - |> Pretty.writeln - handle Size => warning ("WP tracing information was too large to print.") - else (); - fun warn_unsafe_rules unsafe_rules n ctxt t = let val used_thms_dummy = Unsynchronized.ref [] : (string * string * term) list Unsynchronized.ref; - val ctxt' = Config.put WP_Pre.wp_trace false ctxt + val ctxt' = (Config.put WP_Pre.wp_trace false ctxt |> Config.put WP_Pre.wp_trace_instantiation false) val useful_unsafe_rules = filter (fn rule => (is_some o SINGLE ( @@ -284,10 +269,12 @@ fun warn_unsafe_rules unsafe_rules n ctxt t = fun apply_rules_tac_n trace ctxt extras n = let - val trace' = trace orelse Config.get ctxt WP_Pre.wp_trace + val trace' = trace orelse Config.get ctxt WP_Pre.wp_trace orelse Config.get ctxt WP_Pre.wp_trace_instantiation val used_thms_ref = Unsynchronized.ref [] : (string * string * term) list Unsynchronized.ref val rules = get_rules ctxt extras - val wp_pre_tac = TRY (WP_Pre.tac trace' used_thms_ref ctxt 1) + val wp_pre_tac = TRY (WP_Pre.pre_tac trace' ctxt + (Named_Theorems.get ctxt \<^named_theorems>\wp_pre\) + used_thms_ref 1) val wp_fix_tac = TRY (WPFix.both_tac ctxt 1) val cleanup_tac = TRY (REPEAT (resolve_tac ctxt [@{thm TrueI}, @{thm conj_TrueI}, @{thm conj_TrueI2}] 1 @@ -296,7 +283,7 @@ let THEN cleanup_tac in SELECT_GOAL ( - (fn t => Seq.map (fn thm => (trace_used_thms trace' ctxt used_thms_ref; + (fn t => Seq.map (fn thm => (WP_Pre.trace_used_thms trace' ctxt used_thms_ref; used_thms_ref := []; thm)) ((wp_pre_tac THEN wp_fix_tac THEN steps_tac) t)) THEN_ELSE @@ -308,10 +295,10 @@ fun apply_rules_tac trace ctxt extras = apply_rules_tac_n trace ctxt extras 1; fun apply_once_tac trace ctxt extras t = let - val trace' = trace orelse Config.get ctxt WP_Pre.wp_trace + val trace' = trace orelse Config.get ctxt WP_Pre.wp_trace orelse Config.get ctxt WP_Pre.wp_trace_instantiation val used_thms_ref = Unsynchronized.ref [] : (string * string * term) list Unsynchronized.ref val rules = get_rules ctxt extras - in Seq.map (fn thm => (trace_used_thms trace' ctxt used_thms_ref; thm)) + in Seq.map (fn thm => (WP_Pre.trace_used_thms trace' ctxt used_thms_ref; thm)) (SELECT_GOAL (resolve_ruleset_tac trace' ctxt rules used_thms_ref 1) 1 t) end diff --git a/lib/Monad_WP/wp/WP.thy b/lib/Monads/wp/WP.thy similarity index 93% rename from lib/Monad_WP/wp/WP.thy rename to lib/Monads/wp/WP.thy index 66ef60f117..9adec60420 100644 --- a/lib/Monad_WP/wp/WP.thy +++ b/lib/Monads/wp/WP.thy @@ -4,12 +4,14 @@ * SPDX-License-Identifier: BSD-2-Clause *) +section \Weakest Preconditions\ + theory WP imports WP_Pre WPFix - Apply_Debug - MLUtils + Eisbach_Tools.Apply_Debug + ML_Utils.ML_Utils begin definition diff --git a/lib/Monad_WP/wp/WPBang.thy b/lib/Monads/wp/WPBang.thy similarity index 93% rename from lib/Monad_WP/wp/WPBang.thy rename to lib/Monads/wp/WPBang.thy index 0c739478ac..dcb57538f4 100644 --- a/lib/Monad_WP/wp/WPBang.thy +++ b/lib/Monads/wp/WPBang.thy @@ -7,8 +7,8 @@ theory WPBang imports WP - ProvePart - NonDetMonadVCG + Eisbach_Tools.ProvePart + Nondet_VCG begin lemma conj_meta_forward: @@ -20,12 +20,12 @@ ML \ structure WP_Safe = struct fun check_has_frees_tac Ps (_ : int) thm = let - val fs = Term.add_frees (Thm.prop_of thm) [] |> filter (member (=) Ps) + val fs = Term.add_frees (Thm.prop_of thm) [] |> filter (member (op =) Ps) in if null fs then Seq.empty else Seq.single thm end fun wp_bang wp_safe_rules ctxt = let val wp_safe_rules_conj = ((wp_safe_rules RL @{thms hoare_vcg_conj_lift hoare_vcg_R_conj}) - RL @{thms hoare_strengthen_post hoare_post_imp_R}) + RL @{thms hoare_strengthen_post hoare_strengthen_postE_R hoare_strengthen_postE_E}) |> map (rotate_prems 1) in resolve_tac ctxt wp_safe_rules_conj diff --git a/lib/Monad_WP/wp/WPC.thy b/lib/Monads/wp/WPC.thy similarity index 55% rename from lib/Monad_WP/wp/WPC.thy rename to lib/Monads/wp/WPC.thy index cd19db0e4f..417e236ece 100644 --- a/lib/Monad_WP/wp/WPC.thy +++ b/lib/Monads/wp/WPC.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -7,61 +8,64 @@ theory WPC imports "WP_Pre" keywords "wpc_setup" :: thy_decl - begin -definition - wpc_helper :: "(('a \ bool) \ 'b set) - \ (('a \ bool) \ 'b set) \ bool \ bool" where - "wpc_helper \ \(P, P') (Q, Q') R. ((\s. P s \ Q s) \ P' \ Q') \ R" +(* Case splitting method producing independent guards (preconditions) for each case in a + datatype case split. The current setup can handle judgements such as valid, corres, or ccorres + with up to two independent predicate guards and one independent set-type guard. Unneeded guards + can be ignored in setup. + + The helper predicate unifies the treatment of guards in the proof method. The P guards will be + transformed into Q guards in each branch of the case. The R is the judgement (valid, corres, etc). + + The helper predicate encodes that the judgement supports a standard guard weakening rule, + from which rules for conjunction-lifting and forall-lifting follow below. These are then used + by the tactic to generate assumptions of the form "\y. x = SomeConstructor y \ P y". + + If more or other types of guards are needed, add them to the helper predicate and re-prove the + processing rules below. *) +definition wpc_helper :: + "('a \ bool) \ ('b \ bool) \ 'c set \ ('a \ bool) \ ('b \ bool) \ 'c set \ bool \ bool" + where + "wpc_helper \ \(P, P', P'') (Q, Q', Q'') R. + (\s. P s \ Q s) \ (\s. P' s \ Q' s) \ P'' \ Q'' \ R" lemma wpc_conj_process: - "\ wpc_helper (P, P') (A, A') C; wpc_helper (P, P') (B, B') D \ - \ wpc_helper (P, P') (\s. A s \ B s, A' \ B') (C \ D)" + "\ wpc_helper (P, P', P'') (A, A', A'') C; wpc_helper (P, P', P'') (B, B', B'') D \ + \ wpc_helper (P, P', P'') (\s. A s \ B s, \s. A' s \ B' s, A'' \ B'') (C \ D)" by (clarsimp simp add: wpc_helper_def) lemma wpc_all_process: - "\ \x. wpc_helper (P, P') (Q x, Q' x) (R x) \ - \ wpc_helper (P, P') (\s. \x. Q x s, {s. \x. s \ Q' x}) (\x. R x)" + "\ \x. wpc_helper (P, P', P'') (Q x, Q' x, Q'' x) (R x) \ + \ wpc_helper (P, P', P'') (\s. \x. Q x s, \s. \x. Q' x s, {s. \x. s \ Q'' x}) (\x. R x)" by (clarsimp simp: wpc_helper_def subset_iff) lemma wpc_all_process_very_weak: - "\ \x. wpc_helper (P, P') (Q, Q') (R x) \ \ wpc_helper (P, P') (Q, Q') (\x. R x)" + "\ \x. wpc_helper (P, P', P'') (Q, Q', Q'') (R x) \ + \ wpc_helper (P, P', P'') (Q, Q', Q'') (\x. R x)" by (clarsimp simp: wpc_helper_def) lemma wpc_imp_process: - "\ Q \ wpc_helper (P, P') (R, R') S \ - \ wpc_helper (P, P') (\s. Q \ R s, {s. Q \ s \ R'}) (Q \ S)" + "\ Q \ wpc_helper (P, P', P'') (R, R', R'') S \ + \ wpc_helper (P, P', P'') (\s. Q \ R s, \s. Q \ R' s, {s. Q \ s \ R''}) (Q \ S)" by (clarsimp simp add: wpc_helper_def subset_iff) lemma wpc_imp_process_weak: - "\ wpc_helper (P, P') (R, R') S \ \ wpc_helper (P, P') (R, R') (Q \ S)" + "\ wpc_helper (P, P', P'') (R, R', R'') S \ \ wpc_helper (P, P', P'') (R, R', R'') (Q \ S)" by (clarsimp simp add: wpc_helper_def) -lemmas wpc_processors - = wpc_conj_process wpc_all_process wpc_imp_process -lemmas wpc_weak_processors - = wpc_conj_process wpc_all_process wpc_imp_process_weak -lemmas wpc_vweak_processors - = wpc_conj_process wpc_all_process_very_weak wpc_imp_process_weak +lemmas wpc_processors = wpc_conj_process wpc_all_process wpc_imp_process +lemmas wpc_weak_processors = wpc_conj_process wpc_all_process wpc_imp_process_weak +lemmas wpc_vweak_processors = wpc_conj_process wpc_all_process_very_weak wpc_imp_process_weak lemma wpc_helperI: - "wpc_helper (P, P') (P, P') Q \ Q" + "wpc_helper (P, P', P'') (P, P', P'') Q \ Q" by (simp add: wpc_helper_def) lemma wpc_foo: "\ undefined x; False \ \ P x" by simp -lemma foo: - assumes foo_elim: "\P Q h. \ foo Q h; \s. P s \ Q s \ \ foo P h" - shows - "\ \x. foo (Q x) (f x); foo R g \ \ - foo (\s. (\x. Q x s) \ (y = None \ R s)) - (case y of Some x \ f x | None \ g)" - by (auto split: option.split intro: foo_elim) - ML \ - signature WPC = sig exception WPCFailed of string * term list * thm list; @@ -176,13 +180,9 @@ let val subst = split RS iffd2_thm; val subst2 = instantiate_concl_pred ctxt pred subst; in - (resolve_tac ctxt [subst2]) - THEN' - (resolve_tac ctxt [wpc_helperI]) - THEN' - (REPEAT_ALL_NEW (resolve_tac ctxt processors) - THEN_ALL_NEW - resolve_single_tac ctxt [fin]) + resolve_tac ctxt [subst2] + THEN' resolve_tac ctxt [wpc_helperI] + THEN' (REPEAT_ALL_NEW (resolve_tac ctxt processors) THEN_ALL_NEW resolve_single_tac ctxt [fin]) end; (* n.b. need to concretise the lazy sequence via a list to ensure exceptions @@ -213,67 +213,87 @@ end; val _ = Outer_Syntax.command @{command_keyword "wpc_setup"} - "Add wpc stuff" + "Add new WPC term and helper rule" (P.term -- P.name >> (fn (tm, thm) => Toplevel.local_theory NONE NONE (add_wpc tm thm))) end; end; - \ ML \ - -val wp_cases_tactic_weak = WeakestPreCases.wp_cases_tac @{thms wpc_weak_processors}; +val wp_cases_tactic_weak = WeakestPreCases.wp_cases_tac @{thms wpc_weak_processors}; val wp_cases_method_strong = WeakestPreCases.wp_cases_method @{thms wpc_processors}; val wp_cases_method_weak = WeakestPreCases.wp_cases_method @{thms wpc_weak_processors}; val wp_cases_method_vweak = WeakestPreCases.wp_cases_method @{thms wpc_vweak_processors}; - \ +(* Main proof methods: *) method_setup wpc0 = \wp_cases_method_strong\ "case splitter for weakest-precondition proofs" method_setup wpcw0 = \wp_cases_method_weak\ "weak-form case splitter for weakest-precondition proofs" +(* Instances specifically for wp (introducing schematic guards automatically): *) method wpc = (wp_pre, wpc0) method wpcw = (wp_pre, wpcw0) -definition - wpc_test :: "'a set \ ('a \ 'b) set \ 'b set \ bool" - where - "wpc_test P R S \ (R `` P) \ S" +(* Test and example *) +experiment +begin +(* Assume some kind of judgement wpc_test with a precondition P of type set and a + precondition Q of type 'a \ bool: *) +definition wpc_test :: "'a set \ ('a \ bool) \ ('a \ 'b) set \ 'b set \ bool" where + "wpc_test P Q R S \ (R `` P) \ S" + +(* Weakening rule to introduce schematics for the two guards *) lemma wpc_test_weaken: - "\ wpc_test Q R S; P \ Q \ \ wpc_test P R S" + "\ wpc_test Q X' R S; P \ Q; \s. X s \ X' s \ \ wpc_test P X R S" by (simp add: wpc_test_def, blast) -lemma wpc_helper_validF: - "wpc_test Q' R S \ wpc_helper (P, P') (Q, Q') (wpc_test P' R S)" - by (simp add: wpc_test_def wpc_helper_def, blast) +(* Setup rule, establishes connection between wpc_helper and judgment wpc_test. The precondition has + the judgement with transformed (Q) guards, the conclusion has the helper predicate with the + judgement applied to the original (P) guards. The guard arguments of wpc_helper must be in the + form below (no arguments or patterns) for the method to work properly. -setup \ -let - val tm = Thm.cterm_of @{context} (Logic.varify_global @{term "\R. wpc_test P R S"}); - val thm = @{thm wpc_helper_validF}; -in - WPCPredicateAndFinals.map (fn xs => (tm, thm) :: xs) -end -\ + Note that this example ignores the first predicate guard P, and only uses P'/P''. Use/leave out + guards as needed. *) +lemma wpc_helper_validF: + "wpc_test Q'' Q' R S \ wpc_helper (P, P', P'') (Q, Q', Q'') (wpc_test P'' P' R S)" + by (simp add: wpc_test_def wpc_helper_def) blast -lemma set_conj_Int_simp: - "{s \ S. P s} = S \ {s. P s}" - by auto +(* Set up the proof method for wpc_test. First parameter is a function that takes the argument + position on which the case split happens (here R) and returns the judgement. Second parameter + is the setup rule. *) +wpc_setup "\R. wpc_test P X R S" wpc_helper_validF +(* Demo for weak form (wpcw), produces a separate guard for each branch, no implications. *) lemma case_options_weak_wp: - "\ wpc_test P R S; \x. wpc_test P' (R' x) S \ - \ wpc_test (P \ P') (case opt of None \ R | Some x \ R' x) S" + "\ wpc_test P X R S; \x. wpc_test P' X' (R' x) S \ + \ wpc_test (P \ P') (\s. X s \ X' s) (case opt of None \ R | Some x \ R' x) S" apply (rule wpc_test_weaken) - apply wpcw + apply wpcw + apply assumption apply assumption - apply assumption + apply simp apply simp done +(* Demo for strong form (wpc), produces a separate guard for each branch with implications. *) +lemma + "\ wpc_test P X R S; \x. wpc_test (P' x) (X' x) (R' x) S \ + \ wpc_test (P \ {s. \x. opt = Some x \ s \ P' x}) + (\s. X s \ (\x. X' x s)) + (case opt of None \ R | Some x \ R' x) S" + apply (rule wpc_test_weaken) + apply wpc + apply assumption + apply assumption + apply fastforce + apply clarsimp + done + +end end diff --git a/lib/Monad_WP/wp/WPEx.thy b/lib/Monads/wp/WPEx.thy similarity index 97% rename from lib/Monad_WP/wp/WPEx.thy rename to lib/Monads/wp/WPEx.thy index d685897034..d915dd5dd2 100644 --- a/lib/Monad_WP/wp/WPEx.thy +++ b/lib/Monads/wp/WPEx.thy @@ -6,7 +6,8 @@ theory WPEx imports - NonDetMonadVCG + Nondet_In_Monad + Nondet_VCG Strengthen begin @@ -95,7 +96,7 @@ fun get_wp_simps_strgs ctxt rules asms = let fun postcond_ss ctxt = ctxt |> put_simpset HOL_basic_ss - |> (fn ctxt => ctxt addsimps [@{thm pred_conj_def}]) + |> (fn ctxt => ctxt addsimps @{thms pred_conj_def}) |> simpset_of fun wp_default_ss ctxt = ctxt @@ -129,7 +130,7 @@ begin lemma "\P\ do v \ return (Suc 0); return (Suc (Suc 0)) od \(=)\" apply (rule hoare_pre) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ apply (wps | rule hoare_vcg_prop)+ oops diff --git a/lib/Monad_WP/wp/WPFix.thy b/lib/Monads/wp/WPFix.thy similarity index 98% rename from lib/Monad_WP/wp/WPFix.thy rename to lib/Monads/wp/WPFix.thy index 00d0042b09..619d43a0b9 100644 --- a/lib/Monad_WP/wp/WPFix.thy +++ b/lib/Monads/wp/WPFix.thy @@ -18,7 +18,7 @@ after goals are solved. They should be instantiated to True. 2. Schematics which appear in multiple precondition positions. They should be instantiated to a conjunction and then separated. 3/4. Schematics applied to datatype expressions such as @{term True} or -@{term "Some x"}. See @{theory "Lib.Datatype_Schematic"} for details. +@{term "Some x"}. See @{theory "Monads.Datatype_Schematic"} for details. \ lemma use_strengthen_prop_intro: @@ -242,11 +242,13 @@ lemma demo2: \ (\x. I x \ (case x of None \ R (Inl 8) | Some y \ R (Inr y))) \ (\x. I x \ (case x of None \ R (Inr 9) | Some y \ R (Inl (y - 1))))" apply (intro exI conjI[rotated] allI) + apply (rename_tac x) apply (case_tac x; simp) apply wpfix apply (rule P) apply wpfix apply (rule P) + apply (rename_tac x) apply (case_tac x; simp) apply wpfix apply (rule P) diff --git a/lib/Monad_WP/wp/WPI.thy b/lib/Monads/wp/WPI.thy similarity index 97% rename from lib/Monad_WP/wp/WPI.thy rename to lib/Monads/wp/WPI.thy index 50dbd6239f..b19ff97709 100644 --- a/lib/Monad_WP/wp/WPI.thy +++ b/lib/Monads/wp/WPI.thy @@ -32,8 +32,8 @@ theory WPI imports - Eisbach_Methods - NonDetMonadLemmas + Eisbach_Tools.Eisbach_Methods + Nondet_Lemmas WPEx begin @@ -43,7 +43,7 @@ lemma bool_function_four_cases: by (auto simp add: fun_eq_iff all_bool_eq) -text \The ML version of repeat_new is slightly faster than the Eisbach one.\ +text \The ML version of @{text repeat_new} is slightly faster than the Eisbach one.\ method_setup repeat_new = \Method.text_closure >> (fn m => fn ctxt => fn facts => @@ -116,6 +116,7 @@ private lemma (atomic f (\s. A' s \ Pres' s) (\r s. A r s \ Pres r s) B Q' \ trip Ts) \ trip Ts" apply (erule meta_mp) apply (clarsimp simp: valid_def atomic_def) + apply (rename_tac P s r s') apply (drule_tac x=P in spec) apply (drule_tac x=P in meta_spec) apply (drule_tac x=s in spec)+ @@ -142,7 +143,7 @@ private lemma by (simp add: atomic_def ) text \Decomposing a static term is a waste of time as we know we can lift it - out all in one go. Additionally this stops wp_drop_imp from uselessly taking it apart.\ + out all in one go. Additionally this stops @{text wp_drop_imp} from uselessly taking it apart.\ private definition "static Q = (\r s. Q)" @@ -186,7 +187,7 @@ private lemma private lemma trips_True: "trip True" by (simp add: trip_def) -text \We need to push the quantifiers into the hoare triples. +text \We need to push the quantifiers into the Hoare triples. This is an unfortunate bit of manual work, but anything more than 2 levels of nesting is unlikely.\ @@ -209,7 +210,7 @@ text \Existentials are hard, and we don't see them often we fail to process the triple and it won't be lifted. Some more work here to allow the heuristics to drop any added implications - if they're deemed unecessary.\ + if they're deemed unnecessary.\ private lemma trips_push_ex1: "trip (\x. \\s. Q s\ f \\r s. Q' x r s\) \ @@ -269,7 +270,7 @@ private method uses_arg for C :: "'a \ 'b \ bool" = determ \(match (C) in "\r s. ?discard_r s" (cut) \ \fail\ \ _ \ \-\)\ text \Here the "test" constant holds information about the logical context of the atomic postcondition - in the original hoare triple. "f" is the function with its arguments, "C" is all the collected + in the original Hoare triple. "f" is the function with its arguments, "C" is all the collected premises and "Q" is the atomic postcondition that we want to solve in isolation. The method succeeds if the atomic postcondition seems to not depend on its context, i.e. @@ -305,7 +306,7 @@ private method make_goals methods wp_weak wp_strong tests = text \Once all the triples exist we simplify them all in one go to find trivial or self-contradictory rules. This avoids invoking the simplifier - once per postcondition. imp_conjL is used to curry our generated implications. + once per postcondition. @{thm imp_conjL} is used to curry our generated implications. If all the postconditions together are contradictory, the simplifier won't use it to strengthen the postcondition. As an optimization we simply bail out in that case, rather than @@ -324,7 +325,7 @@ method post_strengthen methods wp_weak wp_strong simp' tests = rule trip_drop, (rule hoare_vcg_prop)?) -text \The "wpi" named theorem is used to avoid the safety heuristics, effectively +text \The @{text wpi} named theorem is used to avoid the safety heuristics, effectively saying that the presence of that postcondition indicates that it should always be lifted.\ named_theorems wpi @@ -430,7 +431,7 @@ notepad begin apply wp apply (wpi wpi: Q') apply (wpi wpi: Q) - apply (rule hoare_strengthen_post[OF hoare_post_taut[where P=\]]) + apply (rule hoare_strengthen_post[OF wp_post_taut]) apply (simp add: C) using C apply blast diff --git a/lib/Monad_WP/wp/WPSimp.thy b/lib/Monads/wp/WPSimp.thy similarity index 94% rename from lib/Monad_WP/wp/WPSimp.thy rename to lib/Monads/wp/WPSimp.thy index d18b905e39..0ba723a2c2 100644 --- a/lib/Monad_WP/wp/WPSimp.thy +++ b/lib/Monads/wp/WPSimp.thy @@ -9,7 +9,7 @@ imports "WP" "WPC" "WPFix" - Simp_No_Conditional + Eisbach_Tools.Simp_No_Conditional begin (* Wrap up the standard usage pattern of wp/wpc/simp into its own command: *) diff --git a/lib/Monad_WP/wp/WP_Pre.thy b/lib/Monads/wp/WP_Pre.thy similarity index 51% rename from lib/Monad_WP/wp/WP_Pre.thy rename to lib/Monads/wp/WP_Pre.thy index 87f5311bcd..82879d7d68 100644 --- a/lib/Monad_WP/wp/WP_Pre.thy +++ b/lib/Monads/wp/WP_Pre.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -7,11 +8,11 @@ theory WP_Pre imports Main - Trace_Schematic_Insts + Eisbach_Tools.Trace_Schematic_Insts "HOL-Eisbach.Eisbach_Tools" begin -named_theorems wp_pre +section \Creating Schematic Preconditions\ ML \ structure WP_Pre = struct @@ -29,6 +30,11 @@ fun append_used_rule ctxt used_thms_ref tag used_thm insts = else Thm.prop_of used_thm in used_thms_ref := !used_thms_ref @ [(name, tag, inst_term)] end +fun remove_last_used_thm trace used_thms_ref = + if trace + then used_thms_ref := (!used_thms_ref |> rev |> tl |> rev) + else () + fun trace_rule' trace ctxt callback tac rule = if trace then Trace_Schematic_Insts.trace_schematic_insts_tac ctxt callback tac rule @@ -39,33 +45,61 @@ fun trace_rule trace ctxt used_thms_ref tag tac rule = (fn rule_insts => fn _ => append_used_rule ctxt used_thms_ref tag rule rule_insts) tac rule; +fun trace_used_thm ctxt (name, tag, prop) = + let val adjusted_name = ThmExtras.adjust_thm_name ctxt (name, NONE) prop + in Pretty.block + (ThmExtras.pretty_adjusted_name ctxt adjusted_name :: + [Pretty.str ("[" ^ tag ^ "]:"),Pretty.brk 1, Syntax.unparse_term ctxt prop]) + end + +fun trace_used_thms trace ctxt used_thms_ref = + if trace + then Pretty.big_list "Theorems used by wp:" + (map (trace_used_thm ctxt) (!used_thms_ref)) + |> Pretty.writeln + handle Size => warning ("WP tracing information was too large to print.") + else (); + fun rtac ctxt rule = resolve_tac ctxt [rule] -fun pre_tac trace ctxt pre_rules used_thms_ref i t = let - fun apply_rule t = trace_rule trace ctxt used_thms_ref "wp_pre" (rtac ctxt) t i - val t2 = FIRST (map apply_rule pre_rules) t |> Seq.hd +(* Test whether any resulting goals can be solved by FalseE. In particular, this lets us avoid + weakening a precondition that is already schematic. *) +fun test_goals ctxt pre_rules i t = + let + val t2 = FIRST (map (fn rule => rtac ctxt rule i) pre_rules) t |> Seq.hd val etac = TRY o eresolve_tac ctxt [@{thm FalseE}] fun dummy_t2 _ _ = Seq.single t2 val t3 = (dummy_t2 THEN_ALL_NEW etac) i t |> Seq.hd - in if Thm.nprems_of t3 <> Thm.nprems_of t2 - then Seq.empty else Seq.single t2 end - handle Option => Seq.empty + in Thm.nprems_of t3 <> Thm.nprems_of t2 + end -fun tac trace used_thms_ref ctxt = let - val pres = Named_Theorems.get ctxt @{named_theorems wp_pre} - in pre_tac trace ctxt pres used_thms_ref end +fun pre_tac trace ctxt pre_rules used_thms_ref i t = + let + fun apply_rule t = trace_rule trace ctxt used_thms_ref "wp_pre" (rtac ctxt) t i + fun t2 _ = FIRST (map apply_rule pre_rules) t |> Seq.hd + in if test_goals ctxt pre_rules i t + then Seq.empty else Seq.single (t2 ()) end + handle Option => Seq.empty -val method = +fun pre_tac' ctxt pre_rules i t = let + val trace = Config.get ctxt wp_trace orelse Config.get ctxt wp_trace_instantiation val used_thms_ref = Unsynchronized.ref [] : (string * string * term) list Unsynchronized.ref - in - Args.context >> (fn _ => fn ctxt => - Method.SIMPLE_METHOD' (tac (Config.get ctxt wp_trace) used_thms_ref ctxt)) + in Seq.map (fn thm => (trace_used_thms trace ctxt used_thms_ref; thm)) + (pre_tac trace ctxt pre_rules used_thms_ref i t) end + +val method = + Attrib.thms >> (fn thms => fn ctxt => Method.SIMPLE_METHOD' (pre_tac' ctxt thms)) end \ -method_setup wp_pre0 = \WP_Pre.method\ +(* This method takes a list of theorems as parameter. + See wp_pre definition below for an example use. *) +method_setup pre_tac = \WP_Pre.method\ + +named_theorems wp_pre +method wp_pre0 = pre_tac wp_pre method wp_pre = wp_pre0? definition diff --git a/lib/doc/WPReadme.txt b/lib/Monads/wp/WP_README.thy similarity index 55% rename from lib/doc/WPReadme.txt rename to lib/Monads/wp/WP_README.thy index fec3734aed..2c1646c4a8 100644 --- a/lib/doc/WPReadme.txt +++ b/lib/Monads/wp/WP_README.thy @@ -1,48 +1,56 @@ -# -# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) -# -# SPDX-License-Identifier: BSD-2-Clause -# +(* + * Copyright 2024, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause +*) -Readme for the 'WP' tool and friends +theory WP_README + imports + Nondet_More_VCG +begin -The nondeterministic monad framework comes with a collection of tools which +\ \The 'WP' tool and friends\ + +text\ +The monad framework comes with a collection of tools which together perform the role of a VCG (verification condition generator). The usual strategy for proving a Hoare triple is via backward propagation from the postcondition. The initial step is to replace the current precondition with an Isabelle schematic variable using a precondition weakening rule such as -'hoare_pre'. This schematic variable is progressively instantiated by applying +@{thm hoare_pre}. This schematic variable is progressively instantiated by applying weakest precondition rules as introduction rules. The implication between the assumed and generated preconditions must be solved by some other means. This approach requires a large family of weakest precondition rules, including one for each monadic combinator and operation, and further rules for -user-defined monadic operations. The 'wp' tool automates the storage and use of +user-defined monadic operations. The @{method wp} tool automates the storage and use of this collection of rules. Weakest precondition rules are marked with the 'wp' -attribute and will be automatically applied. +attribute and are then automatically applied. -The 'wp' tool also also handles the construction of variations of 'wp' rules +The 'wp' tool also handles the construction of variations of 'wp' rules via combinator rules. If the postcondition being proved is a conjunction and a weakest precondition rule is available for the first conjunct, progress can be -made by first applying the 'hoare_vcg_conj_lift' combinator rule and then the +made by first applying the @{thm hoare_vcg_conj_lift} combinator rule and then the rule of interest. The 'wp_comb' attribute given to such rules in -NonDetMonadVCG.thy specifies that they should be used in this way. +@{theory Monads.Nondet_VCG} specifies that they should be used in this way. -The 'wp' tool's semantics are defined entirely by these sets of rules. It -always either applies a 'wp' rule as an introduction rule, or applies a -'wp_comb' rule first and then a 'wp' rule. If multiple choices are available, -the one with the most recently added 'wp' rule is preferred. Application alone -is preferred to application with a combinator, and combinators are also -selected last-first. There is also a 'wp_split' rule set which are not combined -with combinators and which are applied only if no 'wp' rules can be applied. +The 'wp' tool's semantics are defined entirely by these sets of rules. +Selection from a set of rules ('wp' and 'wp_split') or combinators ('wp_comb') +occurs in last-to-first order, i.e. always preferring to apply the theorem most +recently added to a set. +First, each 'wp' rule is attempted in the following order: +- on its own, as an introduction rule +- prefixed by a 'wp_comb' rule (i.e. 'rule wp_comb_rule, rule wp_rule'). +If no 'wp' rule can be applied, rules from the 'wp_split' set are attempted +(on their own as introduction rules, without 'wp_comb' prefixes). Note that rules may be supplied which are not the actual weakest precondition. -This may cause the tool to produce unhelpfully weak conclusions. Perhaps the -tool should actually be named 'p'. The 'hoare_vcg_prop' rule supplied in -NonDetMonadVCG.thy is unsafe in this manner. It is convenient that -postconditions which ignore the results of the operation can be handled -immediately (especially when used with the combinator rules), however +This may cause the tool to produce unhelpfully weak conclusions. The +@{thm hoare_vcg_prop} rule supplied in @{theory Monads.Nondet_VCG} is unsafe in this manner. +It is convenient that postconditions which ignore the results of the operation can +be handled immediately (especially when used with the combinator rules), however information from assertions in the program may be discarded. Rules declared 'wp' do not have to match an unspecified postcondition. It was @@ -60,15 +68,19 @@ that case spliting cannot be done via Isabelle's normal mechanisms. Isabelle's implicitly doing case splits on meta-quantified tuples in a way that blocks unification. -The 'wpc' tool synthesises the needed case split rules for datatype case +The @{method wpc} tool synthesises the needed case split rules for datatype case statements in the function bodies in the Hoare triples. +There are several cases where unification of the schematic preconditions can cause +problems. The @{method wpfix} tool handles four of the most common of these cases. +See @{theory Monads.WPFix} and @{theory Monads.Datatype_Schematic} for more details. + A further caveat is that the 'wp' and 'wp_comb' rulesets provided are not necessarily ideal. Updating these rulesets would create difficult maintenance problems, and thus they are largely left as first defined. One issue that has not been addressed is the implicit precondition weakening done by combinator -rules 'hoare_post_comb_imp_conj' and 'hoare_vcg_precond_imp'. In hindsight it -would be better if 'hoare_pre' were always applied manually, or if the 'wp' +rules @{thm hoare_post_comb_imp_conj} and @{thm hoare_weaken_pre}. In hindsight it +would be better if @{thm hoare_pre} were always applied manually, or if the 'wp' tool itself could decide when they ought be applied. Note that such weakening rules were not supplied for the error hoare triples/quadruple, which postdate this realisation. @@ -83,4 +95,13 @@ The 'wp' tool may be extended to new triples or other judgements by supplying an appropriate set of rules. A 'wp_trip' rule may be provided to accelerate rule lookup. +The `wp` tool can also be traced, either by invoking it with `wp (trace)` or by +setting the config value `wp_trace` to `true`. This will list the rules used by `wp`, +in the order that they were applied. It is occasionally helpful to see the specific +instantiations of the rules used, to see how their preconditions were unified. This +can be done by setting `wp_trace_instantiation` to `true`. + +For ease of use, @{method wpsimp} is available and wraps up the standard usage pattern +of '(wpfix|wp|wpc|clarsimp)+'.\ +end diff --git a/lib/NICTATools.thy b/lib/NICTATools.thy index 6d42c7c09d..5fc6acaf50 100644 --- a/lib/NICTATools.thy +++ b/lib/NICTATools.thy @@ -7,16 +7,16 @@ (* Miscellaneous Isabelle tools. *) theory NICTATools imports - Apply_Trace_Cmd - Apply_Debug + Eisbach_Tools.Apply_Trace_Cmd + Eisbach_Tools.Apply_Debug Find_Names (* Solves_Tac *) - Rule_By_Method - Eisbach_Methods + Eisbach_Tools.Rule_By_Method + Eisbach_Tools.Eisbach_Methods Time_Methods_Cmd Try_Attribute Repeat_Attribute - Trace_Schematic_Insts + Eisbach_Tools.Trace_Schematic_Insts Insulin ShowTypes Locale_Abbrev diff --git a/lib/NonDetMonadLemmaBucket.thy b/lib/NonDetMonadLemmaBucket.thy index 989724b42a..bcc1e310c1 100644 --- a/lib/NonDetMonadLemmaBucket.thy +++ b/lib/NonDetMonadLemmaBucket.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -6,235 +7,58 @@ theory NonDetMonadLemmaBucket imports - NonDetMonadVCG - MonadEq - WhileLoopRulesCompleteness - "Word_Lib.Distinct_Prop" + Lib + Monads.Nondet_More_VCG + Monad_Lists + Monads.Nondet_Monad_Equations + Monad_Commute + Monads.Nondet_No_Fail + Monads.Nondet_No_Throw + CutMon + Oblivious + Injection_Handler + Monads.Nondet_While_Loop_Rules_Completeness + "Word_Lib.Distinct_Prop" (* for distinct_tuple_helper *) + Monads.Nondet_Reader_Option begin -lemma no_fail_assume_pre: - "(\s. P s \ no_fail P f) \ no_fail P f" - by (simp add: no_fail_def) - -lemma no_fail_liftM_eq [simp]: - "no_fail P (liftM f m) = no_fail P m" - by (auto simp: liftM_def no_fail_def bind_def return_def) - -lemma mapME_Cons: - "mapME m (x # xs) = (doE y \ m x; ys \ (mapME m xs); returnOk (y # ys) odE)" - by (simp add: mapME_def sequenceE_def Let_def) - - -lemma mapME_Nil : "mapME f [] = returnOk []" - unfolding mapME_def by (simp add: sequenceE_def) - -lemma hoare_take_disjunct: - "\P\ f \\rv s. P' rv s \ (False \ P'' rv s)\ - \ \P\ f \P''\" - by (erule hoare_strengthen_post, simp) - -lemma hoare_post_add: - "\P\ S \\r s. R r s \ Q r s\ \ \P\ S \Q\" - by (erule hoare_strengthen_post, simp) - -lemma hoare_disjI1: - "\R\ f \P\ \ \R\ f \\r s. P r s \ Q r s\" - apply (erule hoare_post_imp [rotated]) - apply simp - done - -lemma hoare_disjI2: - "\R\ f \Q\ \ \R\ f \\r s. P r s \ Q r s \" - by (rule hoare_post_imp [OF _ hoare_disjI1, where P1=Q], auto) - -lemma hoare_name_pre_state: - "\ \s. P s \ \(=) s\ f \Q\ \ \ \P\ f \Q\" - by (clarsimp simp: valid_def) - -lemma hoare_name_pre_stateE: - "\\s. P s \ \(=) s\ f \Q\, \E\\ \ \P\ f \Q\, \E\" - by (clarsimp simp: validE_def2) - -lemma valid_prove_more: - "\P\ f \\rv s. Q rv s \ Q' rv s\ \ \P\ f \Q'\" - by (erule hoare_strengthen_post, simp) - -lemma hoare_vcg_if_lift: - "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ - \R\ f \\rv s. if P then X rv s else Y rv s\" - - "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ - \R\ f \\rv. if P then X rv else Y rv\" - by (auto simp: valid_def split_def) - -lemma hoare_lift_Pf2: - assumes P: "\x. \Q x\ m \\_. P x\" - assumes f: "\P. \\s. P (f s)\ m \\_ s. P (f s)\" - shows "\\s. Q (f s) s\ m \\_ s. P (f s) s\" - apply (clarsimp simp add: valid_def) - apply (frule (1) use_valid [OF _ P], drule (2) use_valid [OF _ f]) - done - -lemma hoare_lift_Pf3: - assumes P: "\x. \Q x\ m \P x\" - assumes f: "\P. \\s. P (f s)\ m \\_ s. P (f s)\" - shows "\\s. Q (f s) s\ m \\rv s. P (f s) rv s\" - apply (clarsimp simp add: valid_def) - apply (frule (1) use_valid [OF _ P], drule (2) use_valid [OF _ f]) - done - -lemma no_fail_select_f [wp]: - "no_fail (\s. \snd S) (select_f S)" - by (simp add: select_f_def no_fail_def) - -lemma hoare_lift_Pf: - assumes P: "\x. \P x\ m \\_. P x\" - assumes f: "\P. \\s. P (f s)\ m \\_ s. P (f s)\" - shows "\\s. P (f s) s\ m \\_ s. P (f s) s\" - apply (clarsimp simp add: valid_def) - apply (frule (1) use_valid [OF _ P], drule (2) use_valid [OF _ f]) - done - -lemma assert_def2: "assert v = assert_opt (if v then Some () else None)" - by (cases v, simp_all add: assert_def assert_opt_def) - -lemma hoare_if_r_and: - "\P\ f \\r. if R r then Q r else Q' r\ - = \P\ f \\r s. (R r \ Q r s) \ (\R r \ Q' r s)\" - by (fastforce simp: valid_def) - - -lemma no_fail_liftM [wp]: - "no_fail P m \ no_fail P (liftM f m)" - by (simp) - -lemma no_fail_pre_and: - "no_fail P f \ no_fail (P and Q) f" - by (erule no_fail_pre) simp - -lemma hoare_convert_imp: - "\ \\s. \ P s\ f \\rv s. \ Q s\; \R\ f \S\ \ \ - \\s. P s \ R s\ f \\rv s. Q s \ S rv s\" - apply (simp only: imp_conv_disj) - apply (erule(1) hoare_vcg_disj_lift) - done - -lemma hoare_vcg_ex_lift_R: - "\ \v. \P v\ f \Q v\,- \ \ \\s. \v. P v s\ f \\rv s. \v. Q v rv s\,-" - apply (simp add: validE_R_def validE_def) - apply (rule hoare_strengthen_post, erule hoare_vcg_ex_lift) - apply (auto split: sum.split) - done - -lemma hoare_case_option_wpR: - "\\P\ f None \Q\,-; \x. \P' x\ f (Some x) \Q' x\,-\ \ - \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\,-" - by (cases v) auto - - -lemma hoare_vcg_conj_liftE_R: - "\ \P\ f \P'\,-; \Q\ f \Q'\,- \ \ \P and Q\ f \\rv s. P' rv s \ Q' rv s\, -" - apply (simp add: validE_R_def validE_def valid_def split: sum.splits) - apply blast - done - -lemma zipWithM_x_inv: - assumes x: "\x y. \P\ m x y \\rv. P\" - shows "length xs = length ys \ \P\ zipWithM_x m xs ys \\rv. P\" -proof (induct xs ys rule: list_induct2) - case Nil - show ?case - by (simp add: zipWithM_x_def sequence_x_def zipWith_def) -next - case (Cons a as b bs) - have zipWithM_x_Cons: - "\m x xs y ys. zipWithM_x m (x # xs) (y # ys) - = do m x y; zipWithM_x m xs ys od" - by (simp add: zipWithM_x_def sequence_x_def zipWith_def) - have IH: "\P\ zipWithM_x m as bs \\rv. P\" - by fact - show ?case - by (simp add: zipWithM_x_Cons) (wp IH x) -qed - -lemma K_valid[wp]: - "\K P\ f \\_. K P\" - by (simp add: valid_def) - -lemma mapME_wp: - assumes x: "\x. x \ S \ \P\ f x \\_. P\, \\_. E\" - shows "set xs \ S \ \P\ mapME f xs \\_. P\, \\_. E\" - apply (induct xs) - apply (simp add: mapME_def sequenceE_def) - apply wp - apply simp - apply (simp add: mapME_Cons) - apply (wp x|simp)+ +lemma distinct_tuple_helper: + "\P\ f \\rv s. distinct (x # xs rv s)\ \ + \P\ f \\rv s. distinct (x # (map fst (zip (xs rv s) (ys rv s))))\" + apply (erule hoare_strengthen_post) + apply (erule distinct_prefix) + apply (simp add: map_fst_zip_prefix) done -lemmas mapME_wp' = mapME_wp [OF _ subset_refl] - -lemma sequence_x_Cons: "\x xs. sequence_x (x # xs) = (x >>= (\_. sequence_x xs))" - by (simp add: sequence_x_def) - -lemma mapM_Cons: "mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" - by (simp add: mapM_def sequence_def Let_def) - -lemma mapM_simps: - "mapM m [] = return []" - "mapM m (x#xs) = do r \ m x; rs \ (mapM m xs); return (r#rs) od" - by (simp_all add: mapM_def sequence_def) - -lemma zipWithM_x_mapM: - "zipWithM_x f as bs = (mapM (split f) (zip as bs) >>= (\_. return ()))" - apply (simp add: zipWithM_x_def zipWith_def) - apply (induct ("zip as bs")) - apply (simp add: sequence_x_def mapM_def sequence_def) - apply (simp add: sequence_x_Cons mapM_Cons bind_assoc) +lemma gets_the_validE_R_wp: + "\\s. f s \ None \ isRight (the (f s)) \ Q (theRight (the (f s))) s\ + gets_the f + \Q\,-" + apply (simp add: gets_the_def validE_R_def validE_def) + apply (wp | wpc | simp add: assert_opt_def)+ + apply (clarsimp split: split: sum.splits) done -(* zipWithM_x and mapM_ *) +lemma return_bindE: + "isRight v \ return v >>=E f = f (theRight v)" + by (cases v; clarsimp simp: return_returnOk) -lemma mapM_wp: - assumes x: "\x. x \ S \ \P\ f x \\rv. P\" - shows "set xs \ S \ \P\ mapM f xs \\rv. P\" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (simp add: mapM_Cons) - apply wp - apply (rule x, clarsimp) - apply simp - done +lemma list_case_return: (* not in Lib, because "return" is not in scope there *) + "(case xs of [] \ return v | y # ys \ return (f y ys)) + = return (case xs of [] \ v | y # ys \ f y ys)" + by (simp split: list.split) -lemma mapM_x_mapM: - "mapM_x m l = (mapM m l >>= (\x. return ()))" - apply (simp add: mapM_x_def sequence_x_def mapM_def sequence_def) - apply (induct l, simp_all add: Let_def bind_assoc) +(* We use isLeft, because isLeft=isl is the primitive concept; isRight=\isl matches on isl. *) +lemma valid_isLeft_theRight_split: + "\P\ f \\rv s. Q False rv s\,\\e s. \v. Q True v s\ \ + \P\ f \\rv s. Q (isLeft rv) (theRight rv) s\" + apply (simp add: validE_def) + apply (erule hoare_strengthen_post) + apply (simp split: sum.split_asm) done -lemma mapM_x_wp: - assumes x: "\x. x \ S \ \P\ f x \\rv. P\" - shows "set xs \ S \ \P\ mapM_x f xs \\rv. P\" - by (subst mapM_x_mapM) (wp mapM_wp x) - -lemma mapM_x_Nil: - "mapM_x f [] = return ()" - unfolding mapM_x_def sequence_x_def - by simp - -lemma sequence_xappend1: - "sequence_x (xs @ [x]) = (sequence_x xs >>= (\_. x))" - by (induct xs) (simp add: sequence_x_def, simp add: sequence_x_Cons bind_assoc) - -lemma mapM_append_single: - "mapM_x f (xs @ [y]) = (mapM_x f xs >>= (\_. f y))" - unfolding mapM_x_def - by (simp add: sequence_xappend1) - -lemma mapM_x_Cons: - "mapM_x m (x # xs) = (do m x; mapM_x m xs od)" - by (simp add: mapM_x_def sequence_x_def) +(* depends on Lib.list_induct_suffix *) lemma mapM_x_inv_wp2: assumes post: "\s. \ I s; V [] s \ \ Q s" and hr: "\a as. suffix (a # as) xs \ \\s. I s \ V (a # as) s\ m a \\r s. I s \ V as s\" @@ -255,2915 +79,11 @@ next done qed -lemma zipWithM_x_mapM_x: - "zipWithM_x f as bs = mapM_x (\(x, y). f x y) (zip as bs)" - apply (subst zipWithM_x_mapM) - apply (subst mapM_x_mapM) - apply (rule refl) - done - -lemma zipWithM_x_append1: - fixes f :: "'b \ 'c \ ('a, unit) nondet_monad" - assumes ls: "length xs = length ys" - shows "(zipWithM_x f (xs @ [x]) (ys @ [y])) = (zipWithM_x f xs ys >>= (\_. f x y))" - unfolding zipWithM_x_def zipWith_def - by (subst zip_append [OF ls], simp, rule sequence_xappend1) - -lemma zipWithM_x_Cons: - assumes ls: "length xs = length ys" - shows "(zipWithM_x f (x # xs) (y # ys)) = (f x y >>= (\_. zipWithM_x f xs ys))" - unfolding zipWithM_x_def zipWith_def - by (simp, rule sequence_x_Cons) - -lemma mapM_x_inv_wp3: - fixes m :: "'b \ ('a, unit) nondet_monad" - assumes hr: "\a as bs. xs = as @ [a] @ bs \ - \\s. I s \ V as s\ m a \\r s. I s \ V (as @ [a]) s\" - shows "\\s. I s \ V [] s\ mapM_x m xs \\rv s. I s \ V xs s\" - using hr -proof (induct xs rule: rev_induct) - case Nil thus ?case - apply (simp add: mapM_x_Nil) - done -next - case (snoc x xs) - show ?case - apply (simp add: mapM_append_single) - apply (wp snoc.prems) - apply simp - apply (rule snoc.hyps [OF snoc.prems]) - apply simp - apply assumption - done -qed - - -lemma mapME_x_map_simp: - "mapME_x m (map f xs) = mapME_x (m o f) xs" - by (simp add: mapME_x_def sequenceE_x_def) - -lemma mapM_return: - "mapM (\x. return (f x)) xs = return (map f xs)" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (simp add: mapM_Cons) - done - -lemma mapME_x_inv_wp: - assumes x: "\x. \P\ f x \\rv. P\,\E\" - shows "\P\ mapME_x f xs \\rv. P\,\E\" - apply (induct xs) - apply (simp add: mapME_x_def sequenceE_x_def) - apply wp - apply (simp add: mapME_x_def sequenceE_x_def) - apply (fold mapME_x_def sequenceE_x_def) - apply wp - apply (rule x) - apply assumption - done - -lemma liftM_return [simp]: - "liftM f (return x) = return (f x)" - by (simp add: liftM_def) - -lemma mapM_x_return : - "mapM_x (\_. return v) xs = return v" - by (induct xs) (auto simp: mapM_x_Nil mapM_x_Cons) - -lemma hoare_imp_eq_substR: - "\P\ f \Q\,- \ \P\ f \\rv s. rv = x \ Q x s\,-" - by (fastforce simp add: valid_def validE_R_def validE_def split: sum.splits) - -lemma hoare_split_bind_case_sum: - assumes x: "\rv. \R rv\ g rv \Q\" - "\rv. \S rv\ h rv \Q\" - assumes y: "\P\ f \S\,\R\" - shows "\P\ f >>= case_sum g h \Q\" - apply (rule hoare_seq_ext [OF _ y[unfolded validE_def]]) - apply (case_tac x, simp_all add: x) - done - -lemma hoare_split_bind_case_sumE: - assumes x: "\rv. \R rv\ g rv \Q\,\E\" - "\rv. \S rv\ h rv \Q\,\E\" - assumes y: "\P\ f \S\,\R\" - shows "\P\ f >>= case_sum g h \Q\,\E\" - apply (unfold validE_def) - apply (rule hoare_seq_ext [OF _ y[unfolded validE_def]]) - apply (case_tac x, simp_all add: x [unfolded validE_def]) - done - -lemma bind_comm_mapM_comm: - assumes bind_comm: - "\n z. do x \ a; y \ b z; (n x y :: ('a, 's) nondet_monad) od = - do y \ b z; x \ a; n x y od" - shows "\n'. do x \ a; ys \ mapM b zs; (n' x ys :: ('a, 's) nondet_monad) od = - do ys \ mapM b zs; x \ a; n' x ys od" -proof (induct zs) - case Nil - thus ?case - by (simp add: mapM_def sequence_def) - next - case (Cons z zs') - thus ?case - by (clarsimp simp: mapM_Cons bind_assoc bind_comm intro!: bind_cong [OF refl]) -qed - -lemma liftE_handle : - "(liftE f g) = liftE f" - by (simp add: handleE_def handleE'_def liftE_def) - -lemma mapM_empty: - "mapM f [] = return []" - unfolding mapM_def - by (simp add: sequence_def) - -lemma mapM_append: - "mapM f (xs @ ys) = - (do x \ mapM f xs; - y \ mapM f ys; - return (x @ y) - od)" -proof (induct xs) - case Nil - thus ?case by (simp add: mapM_empty) -next - case (Cons x xs) - - show ?case - by (simp add: mapM_Cons bind_assoc Cons.hyps) -qed - -lemma mapM_x_append: - "mapM_x f (xs @ ys) = - (do x \ mapM_x f xs; - y \ mapM_x f ys; - return () - od)" - by (simp add: mapM_x_mapM mapM_append bind_assoc) - -lemma mapM_singleton: - "mapM f [x] = (do r \ f x; return [r] od)" - by (simp add: mapM_def sequence_def) - -lemma mapM_x_singleton: - "mapM_x f [x] = f x" - by (simp add: mapM_x_mapM mapM_singleton) - -lemma return_returnOk: - "return (Inr x) = returnOk x" - unfolding returnOk_def by simp - -lemma mapME_x_sequenceE: - "mapME_x f xs \ doE _ \ sequenceE (map f xs); returnOk () odE" - apply (induct xs, simp_all add: mapME_x_def sequenceE_def sequenceE_x_def) - apply (simp add: Let_def bindE_assoc) - done - -lemma sequenceE_Cons: - "sequenceE (x # xs) = (doE v \ x; vs \ sequenceE xs; returnOk (v # vs) odE)" - by (simp add: sequenceE_def Let_def) - -lemma snd_return [monad_eq]: - "\ snd (return a b)" - unfolding return_def by simp - -lemma snd_throwError [monad_eq]: - "\ snd (throwError e s)" - unfolding throwError_def by (simp add: snd_return) - -lemma snd_lift_Inr [monad_eq]: - "snd (lift b (Inr r) t) = snd (b r t)" - unfolding lift_def by simp - -lemma snd_lift_Inl [monad_eq]: - "\ snd (lift b (Inl r) t)" - unfolding lift_def by (simp add: snd_throwError) - -lemma snd_fail [monad_eq]: - "snd (fail s)" - apply (clarsimp simp: fail_def) - done - -lemma not_snd_bindD: - "\ \ snd ((a >>= b) s); (rv, s') \ fst (a s) \ \ \ snd (a s) \ \ snd (b rv s')" - by (fastforce simp: bind_def) - -lemma whenE_bindE_throwError_to_if: - "whenE P (throwError e) >>=E (\_. b) = (if P then (throwError e) else b)" - unfolding whenE_def bindE_def - by (auto simp: NonDetMonad.lift_def throwError_def returnOk_def) - -lemma not_snd_bindI1: - "\ snd ((a >>= b) s) \ \ snd (a s)" - by (fastforce simp: bind_def) - -lemma not_snd_bindI2: - "\ \ snd ((a >>= b) s); (rv, s') \ fst (a s) \ \ \ snd (b rv s')" - by (fastforce simp: bind_def) - -lemma empty_fail_not_snd: - "\ \ snd (m s); empty_fail m \ \ \v. v \ fst (m s)" - by (fastforce simp: empty_fail_def) - -lemma mapM_Nil: - "mapM f [] = return []" - by (simp add: mapM_def sequence_def) - -lemma hoare_vcg_exI: - "\P\ f \Q x\ \ \P\ f \\rv s. \x. Q x rv s\" - apply (simp add: valid_def split_def) - apply blast - done - -lemma hoare_exI_tuple: - "\P\ f \\(rv,rv') s. Q x rv rv' s\ \ \P\ f \\(rv,rv') s. \x. Q x rv rv' s\" - by (fastforce simp: valid_def) - -lemma hoare_ex_all: - "(\x. \P x\ f \Q\) = \\s. \x. P x s\ f \Q\" - apply (rule iffI) - apply (fastforce simp: valid_def)+ - done - -lemma empty_fail_bindE: - "\ empty_fail f; \rv. empty_fail (g rv) \ - \ empty_fail (f >>=E g)" - apply (simp add: bindE_def) - apply (erule empty_fail_bind) - apply (simp add: lift_def throwError_def split: sum.split) - done - -lemma empty_fail_error_bits: - "empty_fail (returnOk v)" - "empty_fail (throwError v)" - "empty_fail (liftE f) = empty_fail f" - apply (simp_all add: returnOk_def throwError_def) - apply (rule iffI, simp_all add: liftE_def) - apply (simp add: empty_fail_def bind_def return_def) - apply (erule allEI) - apply clarsimp - done - - -lemma mapM_upd: - assumes "\x rv s s'. (rv,s') \ fst (f x s) \ x \ set xs \ (rv, g s') \ fst (f x (g s))" - shows "(rv,s') \ fst (mapM f xs s) \ (rv, g s') \ fst (mapM f xs (g s))" - using assms -proof (induct xs arbitrary: rv s s') - case Nil - thus ?case by (simp add: mapM_Nil return_def) -next - case (Cons z zs) - from Cons.prems - show ?case - apply (clarsimp simp: mapM_Cons in_monad) - apply (drule Cons.prems, simp) - apply (rule exI, erule conjI) - apply (erule Cons.hyps) - apply (erule Cons.prems) - apply simp - done -qed - -definition - cutMon :: "('s \ bool) \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad" where - "cutMon P f \ \s. if P s then f s else fail s" - -lemma cutMon_walk_bind: - "(cutMon ((=) s) (f >>= g)) - = (cutMon ((=) s) f >>= (\rv. cutMon (\s'. (rv, s') \ fst (f s)) (g rv)))" - apply (rule ext, simp add: cutMon_def bind_def fail_def) - apply (auto simp: split_def) - done - -lemma cutMon_walk_bindE: - "(cutMon ((=) s) (f >>=E g)) - = (cutMon ((=) s) f >>=E (\rv. cutMon (\s'. (Inr rv, s') \ fst (f s)) (g rv)))" - apply (simp add: bindE_def cutMon_walk_bind) - apply (rule bind_cong, rule refl) - apply (simp add: cutMon_def lift_def fail_def - split: if_split_asm) - apply (clarsimp split: sum.split) - done - -lemma cutMon_walk_if: - "cutMon ((=) s) (if P then f else g) - = (if P then cutMon ((=) s) f else cutMon ((=) s) g)" - by (simp add: cutMon_def) - -lemma cutMon_valid_drop: - "\P\ f \Q\ \ \P\ cutMon R f \Q\" - by (simp add: cutMon_def valid_def fail_def) - -lemma cutMon_validE_drop: - "\P\ f \Q\,\E\ \ \P\ cutMon R f \Q\,\E\" - by (simp add: validE_def cutMon_valid_drop) - -lemma assertE_assert: - "assertE F = liftE (assert F)" - by (clarsimp simp: assertE_def assert_def liftE_def returnOk_def - split: if_split) - -lemma snd_cutMon: - "snd (cutMon P f s) = (P s \ snd (f s))" - by (simp add: cutMon_def fail_def split: if_split) - -lemma exec_modify: - "(modify f >>= g) s = g () (f s)" - by (simp add: bind_def simpler_modify_def) - -lemma no_fail_spec: - "\ \s. no_fail (((=) s) and P) f \ \ no_fail P f" - by (simp add: no_fail_def) - -lemma no_fail_assertE [wp]: - "no_fail (\_. P) (assertE P)" - by (simp add: assertE_def split: if_split) - -lemma no_fail_spec_pre: - "\ no_fail (((=) s) and P') f; \s. P s \ P' s \ \ no_fail (((=) s) and P) f" - by (erule no_fail_pre, simp) - -lemma no_fail_whenE [wp]: - "\ G \ no_fail P f \ \ no_fail (\s. G \ P s) (whenE G f)" - by (simp add: whenE_def split: if_split) - -lemma no_fail_unlessE [wp]: - "\ \ G \ no_fail P f \ \ no_fail (\s. \ G \ P s) (unlessE G f)" - by (simp add: unlessE_def split: if_split) - -lemma no_fail_throwError [wp]: - "no_fail \ (throwError e)" - by (simp add: throwError_def) - -lemma no_fail_liftE [wp]: - "no_fail P f \ no_fail P (liftE f)" - unfolding liftE_def by wpsimp - -lemma bind_return_eq: - "(a >>= return) = (b >>= return) \ a = b" - apply (clarsimp simp:bind_def) - apply (rule ext) - apply (drule_tac x= x in fun_cong) - apply (auto simp:return_def split_def) - done - -lemma bindE_bind_linearise: - "((f >>=E g) >>= h) = - (f >>= case_sum (h o Inl) (\rv. g rv >>= h))" - apply (simp add: bindE_def bind_assoc) - apply (rule ext, rule bind_apply_cong, rule refl) - apply (simp add: lift_def throwError_def split: sum.split) - done - -lemma throwError_bind: - "(throwError e >>= f) = (f (Inl e))" - by (simp add: throwError_def) - -lemma bind_bindE_assoc: - "((f >>= g) >>=E h) - = f >>= (\rv. g rv >>=E h)" - by (simp add: bindE_def bind_assoc) - -lemma returnOk_bind: - "returnOk v >>= f = (f (Inr v))" - by (simp add: returnOk_def) - -lemma liftE_bind: - "(liftE m >>= m') = (m >>= (\rv. m' (Inr rv)))" - by (simp add: liftE_def) - -lemma catch_throwError: "catch (throwError ft) g = g ft" - by (simp add: catch_def throwError_bind) - -lemma select_bind_eq2: - "\ v = v'; \x. x \ fst v \ f x s = g x s' \ \ - (select_f v >>= f) s = (select_f v' >>= g) s'" - by (simp add: select_f_def bind_def split_def - cart_singleton_image image_image - cong: image_cong) - -lemmas select_bind_eq = select_bind_eq2[OF refl] - -lemma select_f_singleton_return: - "select_f ({v}, False) = return v" - by (simp add: select_f_def return_def) - -lemma select_f_returns: - "select_f (return v s) = return (v, s)" - "select_f (get s) = return (s, s)" - "select_f (gets f s) = return (f s, s)" - "select_f (modify g s) = return ((), g s)" - by (simp add: select_f_def return_def get_def - simpler_gets_def simpler_modify_def)+ - -lemma select_eq_select_f: - "select S = select_f (S, False)" - by (simp add: select_def select_f_def) - -lemma select_f_select_f: - "select_f (select_f v s) = liftM (swp Pair s) (select_f v)" - apply (rule ext) - apply (simp add: select_f_def liftM_def swp_def - bind_def return_def split_def - image_image image_constant_conv) - apply fastforce - done - -lemma select_f_select: - "select_f (select S s) = liftM (swp Pair s) (select S)" - unfolding select_eq_select_f by (rule select_f_select_f) - -lemmas select_f_selects = select_f_select_f select_f_select - -lemma select_f_asserts: - "select_f (fail s) = fail" - "select_f (assert P s) = do assert P; return ((), s) od" - "select_f (assert_opt v s) = do v' \ assert_opt v; return (v', s) od" - by (simp add: select_f_def fail_def assert_def return_def bind_def - assert_opt_def split: if_split option.split)+ - -lemma liftE_bindE_handle: - "((liftE f >>=E (\x. g x)) h) - = f >>= (\x. g x h)" - by (simp add: liftE_bindE handleE_def handleE'_def - bind_assoc) - -lemma in_returns [monad_eq]: - "(r, s) \ fst (return r s)" - "(Inr r, s) \ fst (returnOk r s)" - by (simp add: in_monad)+ - -lemma assertE_sp: - "\P\ assertE Q \\rv s. Q \ P s\,\E\" - by (clarsimp simp: assertE_def) wp - -lemma catch_liftE: - "catch (liftE g) h = g" - by (simp add: catch_def liftE_def) - -lemma catch_liftE_bindE: - "catch (liftE g >>=E (\x. f x)) h = g >>= (\x. catch (f x) h)" - by (simp add: liftE_bindE catch_def bind_assoc) - -lemma returnOk_catch_bind: - "catch (returnOk v) h >>= g = g v" - by (simp add: returnOk_liftE catch_liftE) - -lemma alternative_left_readonly_bind: - "\ \(=) s\ f \\rv. (=) s\; fst (f s) \ {} \ \ - alternative (f >>= (\x. g x)) h s - = (f >>= (\x. alternative (g x) h)) s" - apply (subgoal_tac "\x \ fst (f s). snd x = s") - apply (clarsimp simp: alternative_def bind_def split_def) - apply fastforce - apply clarsimp - apply (drule(1) use_valid, simp_all) - done - -lemma liftE_bindE_assoc: - "(liftE f >>=E g) >>= h = f >>= (\x. g x >>= h)" - by (simp add: liftE_bindE bind_assoc) - -lemma empty_fail_use_cutMon: - "\ \s. empty_fail (cutMon ((=) s) f) \ \ empty_fail f" - apply (clarsimp simp add: empty_fail_def cutMon_def) - apply (fastforce split: if_split_asm) - done - -lemma empty_fail_drop_cutMon: - "empty_fail f \ empty_fail (cutMon P f)" - by (simp add: empty_fail_def fail_def cutMon_def split: if_split) - -lemma empty_fail_cutMon: - "\ \s. P s \ empty_fail (cutMon ((=) s) f) \ - \ empty_fail (cutMon P f)" - apply (clarsimp simp: empty_fail_def cutMon_def fail_def - split: if_split) - apply (fastforce split: if_split_asm) - done - -lemma empty_fail_If: - "\ P \ empty_fail f; \ P \ empty_fail g \ \ empty_fail (if P then f else g)" - by (simp split: if_split) - -lemmas empty_fail_cutMon_intros = - cutMon_walk_bind[THEN arg_cong[where f=empty_fail], THEN iffD2, - OF empty_fail_bind, OF _ empty_fail_cutMon] - cutMon_walk_bindE[THEN arg_cong[where f=empty_fail], THEN iffD2, - OF empty_fail_bindE, OF _ empty_fail_cutMon] - cutMon_walk_if[THEN arg_cong[where f=empty_fail], THEN iffD2, - OF empty_fail_If] - -lemma empty_fail_whenEs: - "empty_fail f \ empty_fail (whenE P f)" - "empty_fail f \ empty_fail (unlessE P f)" - by (auto simp add: whenE_def unlessE_def empty_fail_error_bits split: if_split) - -lemma empty_fail_assertE: - "empty_fail (assertE P)" - by (simp add: assertE_def empty_fail_error_bits split: if_split) - -lemma unlessE_throw_catch_If: - "catch (unlessE P (throwError e) >>=E f) g - = (if P then catch (f ()) g else g e)" - by (simp add: unlessE_def catch_throwError split: if_split) - -lemma gets_the_return: - "(return x = gets_the f) = (\s. f s = Some x)" - apply (subst fun_eq_iff) - apply (simp add: return_def gets_the_def exec_gets - assert_opt_def fail_def - split: option.split) - apply auto - done - -lemma gets_the_returns[unfolded K_def]: - "(return x = gets_the f) = (\s. f s = Some x)" - "(returnOk x = gets_the g) = (\s. g s = Some (Inr x))" - "(throwError x = gets_the h) = (\s. h s = Some (Inl x))" - by (simp_all add: returnOk_def throwError_def - gets_the_return) - -lemma all_rv_choice_fn_eq: - "\ \rv. \fn. f rv = g fn \ - \ \fn. f = (\rv. g (fn rv))" - using all_rv_choice_fn_eq_pred[where f=f and g=g and P=\] - by (simp add: fun_eq_iff) - -lemma cutMon_assert_opt: - "cutMon P (gets_the f >>= g) - = gets_the (\s. if P s then f s else None) >>= g" - by (simp add: cutMon_def gets_the_def exec_gets - bind_assoc fun_eq_iff assert_opt_def - split: if_split) - -lemma gets_the_eq_bind: - "\ \fn. f = gets_the (fn o fn'); - \rv. \fn. g rv - = gets_the (fn o fn') \ - \ \fn. (f >>= g) = gets_the (fn o fn')" - apply (clarsimp dest!: all_rv_choice_fn_eq) - apply (rule_tac x="\s. case (fn s) of None \ None | Some v \ fna v s" in exI) - apply (simp add: gets_the_def bind_assoc exec_gets - assert_opt_def fun_eq_iff - split: option.split) - done - -lemma gets_the_eq_bindE: - "\ \fn. f = gets_the (fn o fn'); - \rv. \fn. g rv = gets_the (fn o fn') \ - \ \fn. (f >>=E g) = gets_the (fn o fn')" - apply (simp add: bindE_def) - apply (erule gets_the_eq_bind) - apply (simp add: lift_def gets_the_returns split: sum.split) - apply fastforce - done - -lemma gets_the_fail: - "(fail = gets_the f) = (\s. f s = None)" - by (simp add: gets_the_def exec_gets assert_opt_def - fail_def return_def fun_eq_iff - split: option.split) - -lemma gets_the_asserts: - "(fail = gets_the f) = (\s. f s = None)" - "(assert P = gets_the g) = (\s. g s = (if P then Some () else None))" - "(assertE P = gets_the h) = (\s. h s = (if P then Some (Inr ()) else None))" - by (simp add: assert_def assertE_def - gets_the_fail gets_the_returns - split: if_split)+ - -lemma gets_the_condsE: - "(\fn. whenE P f = gets_the (fn o fn')) - = (P \ (\fn. f = gets_the (fn o fn')))" - "(\fn. unlessE P g = gets_the (fn o fn')) - = (\ P \ (\fn. g = gets_the (fn o fn')))" - by (simp add: whenE_def unlessE_def gets_the_returns - ex_const_function - split: if_split)+ - -lemma no_fail_gets_the [wp]: - "no_fail (\s. f s \ None) (gets_the f)" - apply (simp add: gets_the_def) - apply (rule no_fail_pre, wp) - apply simp - done - -lemma gets_the_validE_R_wp: - "\\s. f s \ None \ isRight (the (f s)) \ Q (theRight (the (f s))) s\ - gets_the f - \Q\,-" - apply (simp add: gets_the_def validE_R_def validE_def) - apply (wp | wpc | simp add: assert_opt_def)+ - apply (clarsimp split: split: sum.splits) - done - -lemma return_bindE: - "isRight v \ return v >>=E f = f (theRight v)" - by (clarsimp simp: isRight_def return_returnOk) - -lemma assert_opt_If: - "assert_opt v = If (v = None) fail (return (the v))" - by (simp_all add: assert_opt_def split: option.split) - -lemma if_to_top_of_bind: - "(bind (If P x y) z) = If P (bind x z) (bind y z)" - by (simp split: if_split) - -lemma if_to_top_of_bindE: - "(bindE (If P x y) z) = If P (bindE x z) (bindE y z)" - by (simp split: if_split) - -lemma alternative_bind: - "((a \ b) >>= c) = ((a >>= c) \ (b >>= c))" - apply (rule ext, simp add: alternative_def bind_def split_def) - apply blast - done - -lemma alternative_refl: - "(a \ a) = a" - by (rule ext, simp add: alternative_def) - -lemma alternative_com: - "(f \ g) = (g \ f)" - apply (rule ext) - apply (auto simp: alternative_def) - done - -lemma liftE_alternative: - "liftE (a \ b) = (liftE a \ liftE b)" - by (simp add: liftE_def alternative_bind) - -lemma fst_return: - "fst (return v s) = {(v, s)}" - by (simp add: return_def) - -(* FIXME: move *) -lemma in_bind_split [monad_eq]: - "(rv \ fst ((f >>= g) s)) = - (\rv'. rv' \ fst (f s) \ rv \ fst (g (fst rv') (snd rv')))" - apply (cases rv) - apply (fastforce simp add: in_bind) - done - -lemma no_fail_mapM_wp: - assumes "\x. x \ set xs \ no_fail (P x) (f x)" - assumes "\x y. \ x \ set xs; y \ set xs \ \ \P x\ f y \\_. P x\" - shows "no_fail (\s. \x \ set xs. P x s) (mapM f xs)" - using assms -proof (induct xs) - case Nil - thus ?case by (simp add: mapM_empty) -next - case (Cons z zs) - show ?case - apply (clarsimp simp: mapM_Cons) - apply (wp Cons.prems Cons.hyps hoare_vcg_const_Ball_lift|simp)+ - done -qed - -lemma zipWithM_Nil [simp]: - "zipWithM f xs [] = return []" - by (simp add: zipWithM_def zipWith_def sequence_def) - -lemma zipWithM_One: - "zipWithM f (x#xs) [a] = (do z \ f x a; return [z] od)" - by (simp add: zipWithM_def zipWith_def sequence_def) - -lemma zipWithM_x_Nil: - "zipWithM_x f xs [] = return ()" - by (simp add: zipWithM_x_def zipWith_def sequence_x_def) - -lemma zipWithM_x_One: - "zipWithM_x f (x#xs) [a] = f x a" - by (simp add: zipWithM_x_def zipWith_def sequence_x_def) - -lemma list_case_return: - "(case xs of [] \ return v | y # ys \ return (f y ys)) - = return (case xs of [] \ v | y # ys \ f y ys)" - by (simp split: list.split) - -lemma gets_exs_valid: - "\(=) s\ gets f \\\r. (=) s\" - apply (clarsimp simp: exs_valid_def split_def) - apply (rule bexI [where x = "(f s, s)"]) - apply simp - apply (simp add: in_monad) - done - -lemma empty_fail_get: - "empty_fail get" - by (simp add: empty_fail_def get_def) - -lemma alternative_liftE_returnOk: - "(liftE m \ returnOk v) = liftE (m \ return v)" - by (simp add: liftE_def alternative_def returnOk_def bind_def return_def) - -lemma bind_inv_inv_comm_weak: - "\ \s. \(=) s\ f \\_. (=) s\; \s. \(=) s\ g \\_. (=) s\; - empty_fail f; empty_fail g \ \ - do x \ f; y \ g; n od = do y \ g; x \ f; n od" - apply (rule ext) - apply (fastforce simp: bind_def valid_def empty_fail_def split_def image_def) - done - -lemma mapM_last_Cons: - "\ xs = [] \ g v = y; - xs \ [] \ do x \ f (last xs); return (g x) od - = do x \ f (last xs); return y od \ \ - do ys \ mapM f xs; - return (g (last (v # ys))) od - = do mapM_x f xs; - return y od" - apply (cases "xs = []") - apply (simp add: mapM_x_Nil mapM_Nil) - apply (simp add: mapM_x_mapM) - apply (subst append_butlast_last_id[symmetric], assumption, - subst mapM_append)+ - apply (simp add: bind_assoc mapM_Cons mapM_Nil) - done - -lemma mapM_length_cong: - "\ length xs = length ys; \x y. (x, y) \ set (zip xs ys) \ f x = g y \ - \ mapM f xs = mapM g ys" - by (simp add: mapM_def map_length_cong) - -(* FIXME: duplicate *) -lemma zipWithM_mapM: - "zipWithM f xs ys = mapM (split f) (zip xs ys)" - by (simp add: zipWithM_def zipWith_def mapM_def) - -lemma zipWithM_If_cut: - "zipWithM (\a b. if a < n then f a b else g a b) [0 ..< m] xs - = do ys \ zipWithM f [0 ..< min n m] xs; - zs \ zipWithM g [n ..< m] (drop n xs); - return (ys @ zs) od" - apply (cases "n < m") - apply (cut_tac i=0 and j=n and k="m - n" in upt_add_eq_append) - apply simp - apply (simp add: zipWithM_mapM) - apply (simp add: zip_append1 mapM_append zip_take_triv2 split_def) - apply (intro bind_cong bind_apply_cong refl mapM_length_cong - fun_cong[OF mapM_length_cong]) - apply (clarsimp simp: set_zip) - apply (clarsimp simp: set_zip) - apply (simp add: zipWithM_mapM mapM_Nil) - apply (intro mapM_length_cong refl) - apply (clarsimp simp: set_zip) - done - -lemma mapM_liftM_const: - "mapM (\x. liftM (\y. f x) (g x)) xs - = liftM (\ys. map f xs) (mapM g xs)" - apply (induct xs) - apply (simp add: mapM_Nil) - apply (simp add: mapM_Cons) - apply (simp add: liftM_def bind_assoc) +lemma gets_the_exs_valid_no_ofail: + "\no_ofail P h; ovalid P h Q\ \ \P\ gets_the h \\Q\" + apply (rule exs_valid_weaken_pre) + apply (rule gets_the_exs_valid) + apply (fastforce simp: ovalid_def no_ofail_def) done -lemma empty_failD2: - "\ empty_fail f; \ snd (f s) \ \ \v. v \ fst (f s)" - by (fastforce simp add: empty_fail_def) - -lemma empty_failD3: - "\ empty_fail f; \ snd (f s) \ \ fst (f s) \ {}" - by (drule(1) empty_failD2, clarsimp) - -lemma bind_inv_inv_comm: - "\ \P. \P\ f \\_. P\; \P. \P\ g \\_. P\; - empty_fail f; empty_fail g \ \ - do x \ f; y \ g; n x y od = do y \ g; x \ f; n x y od" - apply (rule ext) - apply (rename_tac s) - apply (rule_tac s="(do (x, y) \ do x \ f; y \ (\_. g s) ; (\_. return (x, y) s) od; - n x y od) s" in trans) - apply (simp add: bind_assoc) - apply (intro bind_apply_cong, simp_all)[1] - apply (metis in_inv_by_hoareD) - apply (simp add: return_def bind_def) - apply (metis in_inv_by_hoareD) - apply (rule_tac s="(do (x, y) \ do y \ g; x \ (\_. f s) ; (\_. return (x, y) s) od; - n x y od) s" in trans[rotated]) - apply (simp add: bind_assoc) - apply (intro bind_apply_cong, simp_all)[1] - apply (metis in_inv_by_hoareD) - apply (simp add: return_def bind_def) - apply (metis in_inv_by_hoareD) - apply (rule bind_apply_cong, simp_all) - apply (clarsimp simp: bind_def split_def return_def) - apply (auto | drule(1) empty_failD3)+ - done - -lemma throwErrorE_E [wp]: - "\Q e\ throwError e -, \Q\" - by (simp add: validE_E_def) wp - -lemma no_fail_mapM: - "\x. no_fail \ (f x) \ no_fail \ (mapM f xs)" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (simp add: mapM_Cons) - apply (wp|fastforce)+ - done - -lemma gets_inv [simp]: - "\ P \ gets f \ \r. P \" - by (simp add: gets_def, wp) - -lemma select_inv: - "\ P \ select S \ \r. P \" - by (simp add: select_def valid_def) - -lemmas return_inv = hoare_return_drop_var - -lemma assert_inv: "\P\ assert Q \\r. P\" - unfolding assert_def - by (cases Q) simp+ - -lemma assert_opt_inv: "\P\ assert_opt Q \\r. P\" - unfolding assert_opt_def - by (cases Q) simp+ - -lemma let_into_return: - "(let f = x in m f) = (do f \ return x; m f od)" - by simp - -definition - injection_handler :: "('a \ 'b) \ ('s, 'a + 'c) nondet_monad - \ ('s, 'b + 'c) nondet_monad" -where - "injection_handler f m \ m (\ft. throwError (f ft))" - -lemma injection_wp: - "\ t = injection_handler f; \P\ m \Q\,\\ft. E (f ft)\ \ - \ \P\ t m \Q\,\E\" - apply (simp add: injection_handler_def) - apply (wp|simp)+ - done - -lemma injection_wp_E: - "\ t = injection_handler f; \P\ m \Q\,- \ - \ \P\ t m \Q\,-" - by (simp add: validE_R_def injection_wp) - -lemma injection_bindE: - "\ t = injection_handler f; t2 = injection_handler f \ - \ t (x >>=E y) = (t2 x) >>=E (\rv. t (y rv))" - apply (simp add: injection_handler_def) - apply (simp add: bindE_def handleE'_def bind_assoc) - apply (rule arg_cong [where f="\y. x >>= y"]) - apply (rule ext) - apply (case_tac x, simp_all add: lift_def throwError_def) - done - -lemma injection_liftE: - "t = injection_handler f \ t (liftE x) = liftE x" - apply (simp add: injection_handler_def handleE'_def liftE_def) - done - -lemma id_injection: - "id = injection_handler id" -proof - - have P: "case_sum throwError (\v. return (Inr v)) = return" - by (auto simp: throwError_def split: sum.splits) - show ?thesis - by (auto simp: injection_handler_def handleE'_def P) -qed - -lemma injection_handler_assertE: - "injection_handler inject (assertE f) = assertE f" - by (simp add: assertE_liftE injection_liftE) - -lemma case_options_weak_wp: - "\ \P\ f \Q\; \x. \P'\ g x \Q\ \ \ \P and P'\ case opt of None \ f | Some x \ g x \Q\" - apply (cases opt) - apply (clarsimp elim!: hoare_weaken_pre) - apply (rule hoare_weaken_pre [where Q=P']) - apply simp+ - done - -lemma case_option_wp_None_return: - assumes [wp]: "\x. \P' x\ f x \\_. Q\" - shows "\\x s. (Q and P x) s \ P' x s \ - \ \Q and (\s. opt \ None \ P (the opt) s)\ - (case opt of None \ return () | Some x \ f x) - \\_. Q\" - by (cases opt; wpsimp) - -lemma case_option_wp_None_returnOk: - assumes [wp]: "\x. \P' x\ f x \\_. Q\,\E\" - shows "\\x s. (Q and P x) s \ P' x s \ - \ \Q and (\s. opt \ None \ P (the opt) s)\ - (case opt of None \ returnOk () | Some x \ f x) - \\_. Q\,\E\" - by (cases opt; wpsimp) - -lemma list_cases_weak_wp: - assumes "\P_A\ a \Q\" - assumes "\x xs. \P_B\ b x xs \Q\" - shows - "\P_A and P_B\ - case ts of - [] \ a - | x#xs \ b x xs \Q\" - apply (cases ts) - apply (simp, rule hoare_weaken_pre, rule assms, simp)+ - done - - -lemmas hoare_FalseE_R = hoare_FalseE[where E="\\", folded validE_R_def] - -lemma hoare_vcg_if_lift2: - "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\ \ - \R\ f \\rv s. if P rv s then X rv s else Y rv s\" - - "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\ \ - \R\ f \\rv. if P' rv then X rv else Y rv\" - by (auto simp: valid_def split_def) - -lemma hoare_vcg_if_lift_ER: (* Required because of lack of rv in lifting rules *) - "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\, - \ - \R\ f \\rv s. if P rv s then X rv s else Y rv s\, -" - - "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\, - \ - \R\ f \\rv. if P' rv then X rv else Y rv\, -" - by (auto simp: valid_def validE_R_def validE_def split_def) - -lemma liftME_return: - "liftME f (returnOk v) = returnOk (f v)" - by (simp add: liftME_def) - -lemma lifted_if_collapse: - "(if P then \ else f) = (\s. \P \ f s)" - by auto - -lemma undefined_valid: "\\\ undefined \Q\" - by (rule hoare_pre_cont) - -lemma Inr_in_liftE_simp [monad_eq]: - "((Inr rv, x) \ fst (liftE fn s)) = ((rv, x) \ fst (fn s))" - by (simp add: in_monad) - -lemma assertE_wp: - "\\s. F \ Q () s\ assertE F \Q\,\E\" - apply (rule hoare_pre) - apply (unfold assertE_def) - apply wp - apply simp - done - -lemma doesn't_grow_proof: - assumes y: "\s. finite (S s)" - assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" - shows "\\s. card (S s) < n \ P s\ f \\rv s. card (S s) < n\" - apply (clarsimp simp: valid_def) - apply (subgoal_tac "S b \ S s") - apply (drule card_mono [OF y], simp) - apply clarsimp - apply (rule ccontr) - apply (subgoal_tac "x \ S b", simp) - apply (erule use_valid [OF _ x]) - apply simp - done - -lemma fold_bindE_into_list_case: - "(doE v \ f; case_list (g v) (h v) x odE) - = (case_list (doE v \ f; g v odE) (\x xs. doE v \ f; h v x xs odE) x)" - by (simp split: list.split) - -lemma hoare_vcg_propE_R: - "\\s. P\ f \\rv s. P\, -" - by (simp add: validE_R_def validE_def valid_def split_def split: sum.split) - -lemma set_preserved_proof: - assumes y: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" - assumes x: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" - shows "\\s. Q s \ P (S s)\ f \\rv s. P (S s)\" - apply (clarsimp simp: valid_def) - by (metis (mono_tags, lifting) equalityI post_by_hoare subsetI x y) - -lemma set_shrink_proof: - assumes x: "\x. \\s. x \ S s\ f \\rv s. x \ S s\" - shows - "\\s. \S'. S' \ S s \ P S'\ - f - \\rv s. P (S s)\" - apply (clarsimp simp: valid_def) - apply (drule spec, erule mp) - apply (clarsimp simp: subset_iff) - apply (rule ccontr) - apply (drule(1) use_valid [OF _ x]) - apply simp - done - -lemma shrinks_proof: - assumes y: "\s. finite (S s)" - assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" - assumes z: "\P\ f \\rv s. x \ S s\" - assumes w: "\s. P s \ x \ S s" - shows "\\s. card (S s) \ n \ P s\ f \\rv s. card (S s) < n\" - apply (clarsimp simp: valid_def) - apply (subgoal_tac "S b \ S s") - apply (drule psubset_card_mono [OF y], simp) - apply (rule psubsetI) - apply clarsimp - apply (rule ccontr) - apply (subgoal_tac "x \ S b", simp) - apply (erule use_valid [OF _ x]) - apply simp - apply (rule not_sym) - apply (rule set_neqI[where x=x]) - apply (erule w) - apply (erule(1) use_valid [OF _ z]) - done - -lemma unlessE_wp : - "(\P \ \Q\ f \R\,\E\) \ \if P then R () else Q\ unlessE P f \R\,\E\" - apply (clarsimp simp: unlessE_whenE whenE_def) - apply wp - done - -lemma use_validE_R: - "\ (Inr r, s') \ fst (f s); \P\ f \Q\,-; P s \ \ Q r s'" - unfolding validE_R_def validE_def - by (frule(2) use_valid, simp) - -lemma valid_preservation_ex: - assumes x: "\x P. \\s. P (f s x :: 'b)\ m \\rv s. P (f s x)\" - shows "\\s. P (f s :: 'a \ 'b)\ m \\rv s. P (f s)\" - apply (clarsimp simp: valid_def) - apply (erule rsubst[where P=P]) - apply (rule ext) - apply (erule use_valid [OF _ x]) - apply simp - done - -lemmas valid_prove_more' = valid_prove_more[where Q="\rv. Q" for Q] - -lemma whenE_inv: - assumes a: "\P\ f \\_. P\" - shows "\P\ whenE Q f \\_. P\" - unfolding whenE_def - apply (cases Q) - apply simp - apply (wp a) - apply simp - apply wp - done - -lemma whenE_liftE: - "whenE P (liftE f) = liftE (when P f)" - by (simp add: whenE_def when_def returnOk_liftE) - -lemma whenE_throwError_wp: - "\\s. \ P \ Q s\ whenE P (throwError e) \\_. Q\, \\\\" - unfolding whenE_def - apply (cases P) - apply simp - apply wp - apply simp - apply wp - done - -lemma whenE_whenE_body: - "whenE P (throwError f) >>=E (\_. whenE Q (throwError f) >>=E r) = whenE (P \ Q) (throwError f) >>=E r" - apply (cases P) - apply (simp add: whenE_def) - apply simp - done - -lemma whenE_whenE_same: - "whenE P (throwError f) >>=E (\_. whenE P (throwError g) >>=E r) = whenE P (throwError f) >>=E r" - apply (cases P) - apply (simp add: whenE_def) - apply simp - done - -lemma gets_the_inv: "\P\ gets_the V \\rv. P\" by wpsimp - -lemma select_f_inv: - "\P\ select_f S \\_. P\" - by (simp add: select_f_def valid_def) - -lemmas state_unchanged = in_inv_by_hoareD [THEN sym] - -lemma validI: - assumes rl: "\s r s'. \ P s; (r, s') \ fst (S s) \ \ Q r s'" - shows "\P\ S \Q\" - unfolding valid_def using rl by safe - -lemma opt_return_pres_lift: - assumes x: "\v. \P\ f v \\rv. P\" - shows "\P\ case x of None \ return () | Some v \ f v \\rv. P\" - by (rule hoare_pre, (wpcw; wp x), simp) - -lemma exec_select_f_singleton: - "(select_f ({v},False) >>= f) = f v" - by (simp add: select_f_def bind_def) - -lemma mapM_discarded: - "mapM f xs >>= (\ys. g) = mapM_x f xs >>= (\_. g)" - by (simp add: mapM_x_mapM) - -lemma valid_return_unit: - "\P\ f >>= (\_. return ()) \\r. Q\ \ \P\ f \\r. Q\" - apply (rule validI) - apply (fastforce simp: valid_def return_def bind_def split_def) - done - -lemma mapM_x_map: - "mapM_x f (map g xs) = mapM_x (\x. f (g x)) xs" - by (simp add: mapM_x_def o_def) - -lemma maybe_fail_bind_fail: - "unless P fail >>= (\_. fail) = fail" - "when P fail >>= (\_. fail) = fail" - by (clarsimp simp: bind_def fail_def return_def - unless_def when_def)+ - -lemma unless_False: - "unless False f = f" - by (simp add: unless_def when_def) - -lemma unless_True: - "unless True f = return ()" - by (simp add: unless_def when_def) - -lemma filterM_preserved: - "\ \x. x \ set xs \ \P\ m x \\rv. P\ \ - \ \P\ filterM m xs \\rv. P\" - apply (induct xs) - apply (wp | simp | erule meta_mp | drule meta_spec)+ - done - -lemma filterM_append: - "filterM f (xs @ ys) = (do - xs' \ filterM f xs; - ys' \ filterM f ys; - return (xs' @ ys') - od)" - apply (induct xs) - apply simp - apply (simp add: bind_assoc) - apply (rule ext bind_apply_cong [OF refl])+ - apply simp - done - -lemma filterM_distinct1: - "\\ and K (P \ distinct xs)\ filterM m xs \\rv s. (P \ distinct rv) \ set rv \ set xs\" - apply (rule hoare_gen_asm, erule rev_mp) - apply (rule rev_induct [where xs=xs]) - apply (clarsimp | wp)+ - apply (simp add: filterM_append) - apply (erule hoare_seq_ext[rotated]) - apply (rule hoare_seq_ext[rotated], rule hoare_vcg_prop) - apply (wp, clarsimp) - apply blast - done - -lemma filterM_subset: - "\\\ filterM m xs \\rv s. set rv \ set xs\" - by (rule hoare_chain, rule filterM_distinct1[where P=False], simp_all) - -lemma filterM_all: - "\ \x y. \ x \ set xs; y \ set xs \ \ \P y\ m x \\rv. P y\ \ \ - \\s. \x \ set xs. P x s\ filterM m xs \\rv s. \x \ set rv. P x s\" - apply (rule_tac Q="\rv s. set rv \ set xs \ (\x \ set xs. P x s)" - in hoare_strengthen_post) - apply (wp filterM_subset hoare_vcg_const_Ball_lift filterM_preserved) - apply simp+ - apply blast - done - - -lemma filterM_distinct: - "\K (distinct xs)\ filterM m xs \\rv s. distinct rv\" - by (rule hoare_chain, rule filterM_distinct1[where P=True], simp_all) - -lemma filterM_mapM: - "filterM f xs = (do - ys \ mapM (\x. do v \ f x; return (x, v) od) xs; - return (map fst (filter snd ys)) - od)" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (simp add: mapM_Cons bind_assoc) - apply (rule bind_cong [OF refl] bind_apply_cong[OF refl])+ - apply simp - done - -lemma if_return_closure: - "(if P then return x else return y) - = (return (if P then x else y))" - by simp - -lemma select_singleton: - "select {x} = return x" - by (fastforce simp add: fun_eq_iff select_def return_def) - -lemma static_imp_wp: - "\Q\ m \R\ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\" - by (cases P, simp_all add: valid_def) - -lemma static_imp_wpE : - "\Q\ m \R\,- \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" - by (cases P, simp_all) - -lemma static_imp_conj_wp: - "\ \Q\ m \Q'\; \R\ m \R'\ \ - \ \\s. (P \ Q s) \ R s\ m \\rv s. (P \ Q' rv s) \ R' rv s\" - apply (rule hoare_vcg_conj_lift) - apply (rule static_imp_wp) - apply assumption+ - done - -lemma hoare_eq_P: - assumes "\P. \P\ f \\_. P\" - shows "\(=) s\ f \\_. (=) s\" - by (rule assms) - -lemma hoare_validE_R_conj: - "\\P\ f \Q\, -; \P\ f \R\, -\ \ \P\ f \Q And R\, -" - by (simp add: valid_def validE_def validE_R_def Let_def split_def split: sum.splits) - -lemma hoare_vcg_const_imp_lift_R: - "\P\ f \Q\,- \ \\s. F \ P s\ f \\rv s. F \ Q rv s\,-" - by (cases F, simp_all) - -lemma hoare_vcg_disj_lift_R: - assumes x: "\P\ f \Q\,-" - assumes y: "\P'\ f \Q'\,-" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" - using assms - by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) - -lemmas throwError_validE_R = throwError_wp [where E="\\", folded validE_R_def] - -lemma valid_case_option_post_wp: - "(\x. \P x\ f \\rv. Q x\) \ - \\s. case ep of Some x \ P x s | _ \ True\ - f \\rv s. case ep of Some x \ Q x s | _ \ True\" - by (cases ep, simp_all add: hoare_vcg_prop) - -lemma P_bool_lift: - assumes t: "\Q\ f \\r. Q\" - assumes f: "\\s. \Q s\ f \\r s. \Q s\" - shows "\\s. P (Q s)\ f \\r s. P (Q s)\" - apply (clarsimp simp: valid_def) - apply (subgoal_tac "Q b = Q s") - apply simp - apply (rule iffI) - apply (rule classical) - apply (drule (1) use_valid [OF _ f]) - apply simp - apply (erule (1) use_valid [OF _ t]) - done - -lemma fail_inv : - "\ P \ fail \ \r. P \" - by wp - -lemma gets_sp: "\P\ gets f \\rv. P and (\s. f s = rv)\" - by (wp, simp) - -lemma post_by_hoare2: - "\ \P\ f \Q\; (r, s') \ fst (f s); P s \ \ Q r s'" - by (rule post_by_hoare, assumption+) - -lemma hoare_Ball_helper: - assumes x: "\x. \P x\ f \Q x\" - assumes y: "\P. \\s. P (S s)\ f \\rv s. P (S s)\" - shows "\\s. \x \ S s. P x s\ f \\rv s. \x \ S s. Q x rv s\" - apply (clarsimp simp: valid_def) - apply (subgoal_tac "S b = S s") - apply (erule post_by_hoare2 [OF x]) - apply (clarsimp simp: Ball_def) - apply (erule_tac P1="\x. x = S s" in post_by_hoare2 [OF y]) - apply (rule refl) - done - -lemma hoare_gets_post: - "\ P \ gets f \ \r s. r = f s \ P s \" - by (simp add: valid_def get_def gets_def bind_def return_def) - -lemma hoare_return_post: - "\ P \ return x \ \r s. r = x \ P s \" - by (simp add: valid_def return_def) - -lemma mapM_wp': - assumes x: "\x. x \ set xs \ \P\ f x \\rv. P\" - shows "\P\ mapM f xs \\rv. P\" - apply (rule mapM_wp) - apply (erule x) - apply simp - done - -lemma mapM_set: - assumes "\x. x \ set xs \ \P\ f x \\_. P\" - assumes "\x y. x \ set xs \ \P\ f x \\_. Q x\" - assumes "\x y. \ x \ set xs; y \ set xs \ \ \P and Q y\ f x \\_. Q y\" - shows "\P\ mapM f xs \\_ s. \x \ set xs. Q x s\" -using assms -proof (induct xs) - case Nil show ?case - by (simp add: mapM_def sequence_def) wp -next - case (Cons y ys) - have PQ_inv: "\x. x \ set ys \ \P and Q y\ f x \\_. P and Q y\" - apply (simp add: pred_conj_def) - apply (rule hoare_pre) - apply (wp Cons|simp)+ - done - show ?case - apply (simp add: mapM_Cons) - apply wp - apply (rule hoare_vcg_conj_lift) - apply (rule hoare_strengthen_post) - apply (rule mapM_wp') - apply (erule PQ_inv) - apply simp - apply (wp Cons|simp)+ - done -qed - -lemma case_option_fail_return_val: - "(fst (case_option fail return v s) = {(rv, s')}) = (v = Some rv \ s = s')" - by (cases v, simp_all add: fail_def return_def) - -lemma return_expanded_inv: - "\P\ \s. ({(x, s)}, False) \\rv. P\" - by (simp add: valid_def) - -lemma empty_fail_assert : "empty_fail (assert P)" - unfolding assert_def by simp - -lemma no_fail_mapM': - assumes rl: "\x. no_fail (\_. P x) (f x)" - shows "no_fail (\_. \x \ set xs. P x) (mapM f xs)" -proof (induct xs) - case Nil thus ?case by (simp add: mapM_def sequence_def) -next - case (Cons x xs) - - have nf: "no_fail (\_. P x) (f x)" by (rule rl) - have ih: "no_fail (\_. \x \ set xs. P x) (mapM f xs)" by (rule Cons) - - show ?case - apply (simp add: mapM_Cons) - apply (rule no_fail_pre) - apply (wp nf ih) - apply simp - done -qed - -lemma handy_prop_divs: - assumes x: "\P. \\s. P (Q s) \ S s\ f \\rv s. P (Q' rv s)\" - "\P. \\s. P (R s) \ S s\ f \\rv s. P (R' rv s)\" - shows "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" - "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" - apply (clarsimp simp: valid_def - elim!: rsubst[where P=P]) - apply (rule use_valid [OF _ x(1)], assumption) - apply (rule use_valid [OF _ x(2)], assumption) - apply simp - apply (clarsimp simp: valid_def - elim!: rsubst[where P=P]) - apply (rule use_valid [OF _ x(1)], assumption) - apply (rule use_valid [OF _ x(2)], assumption) - apply simp - done - -lemma hoare_as_subst: - "\ \P. \\s. P (fn s)\ f \\rv s. P (fn s)\; - \v :: 'a. \P v\ f \Q v\ \ \ - \\s. P (fn s) s\ f \\rv s. Q (fn s) rv s\" - apply (rule_tac Q="\rv s. \v. v = fn s \ Q v rv s" in hoare_chain) - apply (rule hoare_vcg_ex_lift) - apply (rule hoare_vcg_conj_lift) - apply assumption+ - apply simp - apply simp - done - -lemmas hoare_vcg_ball_lift = hoare_vcg_const_Ball_lift - -lemma select_singleton_is_return : "select {A} = return A" - unfolding select_def return_def by (simp add: Sigma_def) - -lemma assert_opt_eq_singleton: - "(assert_opt f s = ({(v, s')},False)) = (s = s' \ f = Some v)" - by (case_tac f, simp_all add: assert_opt_def - fail_def return_def conj_comms) - -lemma hoare_set_preserved: - assumes x: "\x. \fn' x\ m \\rv. fn x\" - shows "\\s. set xs \ {x. fn' x s}\ m \\rv s. set xs \ {x. fn x s}\" - apply (induct xs) - apply simp - apply wp - apply simp - apply (rule hoare_vcg_conj_lift) - apply (rule x) - apply assumption - done - -lemma return_modify: - "return () = modify id" - by (simp add: return_def simpler_modify_def) - -lemma liftE_liftM_liftME: - "liftE (liftM f m) = liftME f (liftE m)" - by (simp add: liftE_liftM liftME_liftM liftM_def) - - -lemma assert_opt_member: - "(x, s') \ fst (assert_opt y s) = (y = Some x \ s' = s)" - apply (case_tac y, simp_all add: assert_opt_def fail_def return_def) - apply safe - done - -lemma bind_return_unit: - "f = (f >>= (\x. return ()))" - by simp - -lemma det_mapM: - assumes x: "\x. x \ S \ det (f x)" - shows "set xs \ S \ det (mapM f xs)" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (simp add: mapM_Cons x) - done - -lemma det_zipWithM_x: - assumes x: "\x y. (x, y) \ set (zip xs ys) \ det (f x y)" - shows "det (zipWithM_x f xs ys)" - apply (simp add: zipWithM_x_mapM) - apply (rule bind_detI) - apply (rule det_mapM [where S="set (zip xs ys)"]) - apply (clarsimp simp add: x) - apply simp - apply simp - done - -lemma empty_fail_sequence_x : - assumes "\m. m \ set ms \ empty_fail m" - shows "empty_fail (sequence_x ms)" using assms - by (induct ms) (auto simp: sequence_x_def) - -lemma gets_the_member: - "(x, s') \ fst (gets_the f s) = (f s = Some x \ s' = s)" - by (case_tac "f s", simp_all add: gets_the_def - simpler_gets_def bind_def assert_opt_member) - -lemma hoare_ex_wp: - assumes x: "\x. \P x\ f \Q x\" - shows "\\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - apply (clarsimp simp: valid_def) - apply (rule exI) - apply (rule post_by_hoare [OF x]) - apply assumption+ - done - -lemma hoare_ex_pre: (* safe, unlike hoare_ex_wp *) - "(\x. \P x\ f \Q\) \ \\s. \x. P x s\ f \Q\" - by (fastforce simp: valid_def) - -lemma hoare_ex_pre_conj: - "(\x. \\s. P x s \ P' s\ f \Q\) - \ \\s. (\x. P x s) \ P' s\ f \Q\" - by (fastforce simp: valid_def) - -lemma hoare_conj_lift_inv: - "\\P\ f \Q\; \\s. P' s \ I s\ f \\rv. I\; - \s. P s \ P' s\ - \ \\s. P s \ I s\ f \\rv s. Q rv s \ I s\" - by (fastforce simp: valid_def) - -lemma hoare_in_monad_post : - assumes x: "\P. \P\ f \\x. P\" - shows "\\\ f \\rv s. (rv, s) \ fst (f s)\" - apply (clarsimp simp: valid_def) - apply (subgoal_tac "s = b", simp) - apply (simp add: state_unchanged [OF x]) - done - -lemma mapM_gets: - assumes P: "\x. m x = gets (f x)" - shows "mapM m xs = gets (\s. map (\x. f x s) xs)" -proof (induct xs) - case Nil show ?case - by (simp add: mapM_def sequence_def gets_def get_def bind_def) -next - case (Cons y ys) thus ?case - by (simp add: mapM_Cons P simpler_gets_def return_def bind_def) -qed - -lemma mapM_map_simp: - "mapM m (map f xs) = mapM (m \ f) xs" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (simp add: mapM_Cons) - done - -lemma modify_id_return: - "modify id = return ()" - by (simp add: simpler_modify_def return_def) - -definition - oblivious :: "('a \ 'a) \ ('a, 'b) nondet_monad \ bool" where - "oblivious f m \ \s. (\(rv, s') \ fst (m s). (rv, f s') \ fst (m (f s))) - \ (\(rv, s') \ fst (m (f s)). \s''. (rv, s'') \ fst (m s) \ s' = f s'') - \ snd (m (f s)) = snd (m s)" - -lemma oblivious_return [simp]: - "oblivious f (return x)" - by (simp add: oblivious_def return_def) - -lemma oblivious_fail [simp]: - "oblivious f fail" - by (simp add: oblivious_def fail_def) - -lemma oblivious_assert [simp]: - "oblivious f (assert x)" - by (simp add: assert_def) - -lemma oblivious_assert_opt [simp]: - "oblivious f (assert_opt fn)" - by (simp add: assert_opt_def split: option.splits) - -lemma oblivious_bind: - "\ oblivious f m; \rv. oblivious f (m' rv) \ - \ oblivious f (m >>= m')" - apply atomize - apply (simp add: oblivious_def) - apply (erule allEI) - apply (intro conjI) - apply (drule conjunct1) - apply (clarsimp simp: in_monad) - apply fastforce - apply (drule conjunct2, drule conjunct1) - apply (clarsimp simp: in_monad) - apply fastforce - apply (clarsimp simp: bind_def disj_commute) - apply (rule disj_cong [OF refl]) - apply (rule iffI) - apply (clarsimp simp: split_def) - apply fastforce - apply clarsimp - apply (drule(1) bspec) - apply (clarsimp simp: split_def) - apply (erule (1) my_BallE) - apply (rule bexI [rotated], assumption) - apply clarsimp - done - -lemma oblivious_gets [simp]: - "oblivious f (gets f') = (\s. f' (f s) = f' s)" - by (fastforce simp add: oblivious_def simpler_gets_def) - -lemma oblivious_liftM: - "oblivious f m \ oblivious f (liftM g m)" - by (simp add: liftM_def oblivious_bind) - -lemma oblivious_modify [simp]: - "oblivious f (modify f') = (\s. f' (f s) = f (f' s))" - apply (simp add: oblivious_def simpler_modify_def) - apply (rule ball_cong[where A=UNIV, OF refl, simplified]) - apply fastforce - done - -lemma oblivious_modify_swap: - "\ oblivious f m \ \ - (modify f >>= (\rv. m)) - = (m >>= (\rv. modify f))" - apply (clarsimp simp: bind_def simpler_modify_def) - apply (rule ext)+ - apply (case_tac "m (f s)", clarsimp) - apply (simp add: oblivious_def) - apply (drule_tac x=s in spec) - apply (rule conjI) - apply (rule set_eqI) - apply (rule iffI) - apply (drule conjunct2, drule conjunct1) - apply (drule_tac x=x in bspec, simp) - apply clarsimp - apply (rule_tac x="((), s'')" in bexI) - apply simp - apply simp - apply (drule conjunct1) - apply fastforce - apply (drule conjunct2)+ - apply fastforce - done - -lemma univ_wp: - "\\s. \(rv, s') \ fst (f s). Q rv s'\ f \Q\" - by (simp add: valid_def) - -lemma univ_get_wp: - assumes x: "\P. \P\ f \\rv. P\" - shows "\\s. \(rv, s') \ fst (f s). s = s' \ Q rv s'\ f \Q\" - apply (rule hoare_pre_imp [OF _ univ_wp]) - apply clarsimp - apply (erule my_BallE, assumption, simp) - apply (subgoal_tac "s = b", simp) - apply (simp add: state_unchanged [OF x]) - done - -lemma result_in_set_wp : - assumes x: "\P. \P\ fn \\rv. P\" - shows "\\s. True\ fn \\v s'. (v, s') \ fst (fn s')\" - by (rule hoare_pre_imp [OF _ univ_get_wp], simp_all add: x split_def) clarsimp - -lemma other_result_in_set_wp: - assumes x: "\P. \P\ fn \\rv. P\" - shows "\\s. \(v, s) \ fst (fn s). F v = v\ fn \\v s'. (F v, s') \ fst (fn s')\" - proof - - have P: "\v s. (F v = v) \ (v, s) \ fst (fn s) \ (F v, s) \ fst (fn s)" - by simp - show ?thesis - apply (rule hoare_post_imp [OF P], assumption) - apply (rule hoare_pre_imp) - defer - apply (rule hoare_vcg_conj_lift) - apply (rule univ_get_wp [OF x]) - apply (rule result_in_set_wp [OF x]) - apply clarsimp - apply (erule my_BallE, assumption, simp) - done - qed - -lemma weak_if_wp: - "\ \P\ f \Q\; \P'\ f \Q'\ \ \ - \P and P'\ f \\r. if C r then Q r else Q' r\" - by (auto simp add: valid_def split_def) - -lemma zipWithM_x_modify: - "zipWithM_x (\a b. modify (f a b)) as bs - = modify (\s. foldl (\s (a, b). f a b s) s (zip as bs))" - apply (simp add: zipWithM_x_def zipWith_def sequence_x_def) - apply (induct ("zip as bs")) - apply (simp add: simpler_modify_def return_def) - apply (rule ext) - apply (simp add: simpler_modify_def bind_def split_def) - done - -lemma bindE_split_recursive_asm: - assumes x: "\x s'. \ (Inr x, s') \ fst (f s) \ \ \\s. B x s \ s = s'\ g x \C\, \E\" - shows "\A\ f \B\, \E\ \ \\st. A st \ st = s\ f >>=E g \C\, \E\" - apply (clarsimp simp: validE_def valid_def bindE_def bind_def lift_def) - apply (erule allE, erule(1) impE) - apply (erule(1) my_BallE, simp) - apply (case_tac a, simp_all add: throwError_def return_def) - apply (drule x) - apply (clarsimp simp: validE_def valid_def) - apply (erule(1) my_BallE, simp) - done - -lemma bind_known_operation_eq: - "\ no_fail P f; \Q\ f \\rv s. rv = x \ s = t\; P s; Q s; empty_fail f \ - \ (f >>= g) s = g x t" - apply (drule(1) no_failD) - apply (subgoal_tac "fst (f s) = {(x, t)}") - apply (clarsimp simp: bind_def) - apply (rule not_psubset_eq) - apply (drule(1) empty_failD2, clarsimp) - apply fastforce - apply clarsimp - apply (drule(1) use_valid, simp+) - done - - -lemma gets_the_bind_eq: - "\ f s = Some x; g x s = h s \ - \ (gets_the f >>= g) s = h s" - by (simp add: gets_the_def bind_assoc exec_gets assert_opt_def) - -lemma hoare_const_imp_R: - "\Q\ f \R\,- \ \\s. P \ Q s\ f \\rv s. P \ R rv s\,-" - by (cases P, simp_all) - -lemma hoare_vcg_imp_lift_R: - "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" - by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) - -lemma hoare_disj_division: - "\ P \ Q; P \ \R\ f \S\; Q \ \T\ f \S\ \ - \ \\s. (P \ R s) \ (Q \ T s)\ f \S\" - apply safe - apply (rule hoare_pre_imp) - prefer 2 - apply simp - apply simp - apply (rule hoare_pre_imp) - prefer 2 - apply simp - apply simp - done - -lemma hoare_grab_asm: - "\ G \ \P\ f \Q\ \ \ \\s. G \ P s\ f \Q\" - by (cases G, simp+) - -lemma hoare_grab_asm2: - "(P' \ \\s. P s \ R s\ f \Q\) - \ \\s. P s \ P' \ R s\ f \Q\" - by (fastforce simp: valid_def) - -lemma hoare_grab_exs: - assumes x: "\x. P x \ \P'\ f \Q\" - shows "\\s. \x. P x \ P' s\ f \Q\" - apply (clarsimp simp: valid_def) - apply (erule(2) use_valid [OF _ x]) - done - -lemma hoare_prop_E: "\\rv. P\ f -,\\rv s. P\" - unfolding validE_E_def - by (rule hoare_pre, wp, simp) - -lemma hoare_vcg_conj_lift_R: - "\ \P\ f \Q\,-; \R\ f \S\,- \ \ - \\s. P s \ R s\ f \\rv s. Q rv s \ S rv s\,-" - apply (simp add: validE_R_def validE_def) - apply (drule(1) hoare_vcg_conj_lift) - apply (erule hoare_strengthen_post) - apply (clarsimp split: sum.splits) - done - -lemma hoare_walk_assmsE: - assumes x: "\P\ f \\rv. P\" and y: "\s. P s \ Q s" and z: "\P\ g \\rv. Q\" - shows "\P\ doE x \ f; g odE \\rv. Q\" - apply (wp z) - apply (simp add: validE_def) - apply (rule hoare_strengthen_post [OF x]) - apply (case_tac r, simp_all add: y) - done - -lemma mapME_set: - assumes est: "\x. \R\ f x \P\, -" - and invp: "\x y. \R and P x\ f y \\_. P x\, -" - and invr: "\x. \R\ f x \\_. R\, -" - shows "\R\ mapME f xs \\rv s. \x \ set rv. P x s\, -" -proof (rule hoare_post_imp_R [where Q' = "\rv s. R s \ (\x \ set rv. P x s)"], induct xs) - case Nil - thus ?case by (simp add: mapME_Nil | wp returnOKE_R_wp)+ -next - case (Cons y ys) - - have minvp: "\x. \R and P x\ mapME f ys \\_. P x\, -" - apply (rule hoare_pre) - apply (rule_tac Q' = "\_ s. R s \ P x s" in hoare_post_imp_R) - apply (wp mapME_wp' invr invp)+ - apply simp - apply simp - apply simp - done - - show ?case - apply (simp add: mapME_Cons) - apply (wp) - apply (rule_tac Q' = "\xs s. (R s \ (\x \ set xs. P x s)) \ P x s" in hoare_post_imp_R) - apply (wp Cons.hyps minvp) - apply simp - apply (fold validE_R_def) - apply simp - apply (wp invr est) - apply simp - done -qed clarsimp - -lemma unlessE_throwError_returnOk: - "(if P then returnOk v else throwError x) - = (unlessE P (throwError x) >>=E (\_. returnOk v))" - by (cases P, simp_all add: unlessE_def) - -lemma weaker_hoare_ifE: - assumes x: "\P \ a \Q\,\E\" - assumes y: "\P'\ b \Q\,\E\" - shows "\P and P'\ if test then a else b \Q\,\E\" - apply (rule hoare_vcg_precond_impE) - apply (wp x y) - apply simp - done - -lemma wp_split_const_if: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f \Q'\" - shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\" - by (case_tac G, simp_all add: x y) - -lemma wp_split_const_if_R: - assumes x: "\P\ f \Q\,-" - assumes y: "\P'\ f \Q'\,-" - shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\,-" - by (case_tac G, simp_all add: x y) - -lemma wp_throw_const_imp: - assumes x: "\P\ f \Q\" - shows "\\s. G \ P s\ f \\rv s. G \ Q rv s\" - by (case_tac G, simp_all add: x hoare_vcg_prop) - -lemma wp_throw_const_impE: - assumes x: "\P\ f \Q\,\E\" - shows "\\s. G \ P s\ f \\rv s. G \ Q rv s\,\\rv s. G \ E rv s\" - apply (case_tac G, simp_all add: x) - apply wp - done - -lemma distinct_tuple_helper: - "\P\ f \\rv s. distinct (x # xs rv s)\ \ - \P\ f \\rv s. distinct (x # (map fst (zip (xs rv s) (ys rv s))))\" - apply (erule hoare_strengthen_post) - apply (erule distinct_prefix) - apply (simp add: map_fst_zip_prefix) - done - -lemma list_case_throw_validE_R: - "\ \y ys. xs = y # ys \ \P\ f y ys \Q\,- \ \ - \P\ case xs of [] \ throwError e | x # xs \ f x xs \Q\,-" - apply (case_tac xs, simp_all) - apply wp - done - -lemma validE_R_sp: - assumes x: "\P\ f \Q\,-" - assumes y: "\x. \Q x\ g x \R\,-" - shows "\P\ f >>=E (\x. g x) \R\,-" - by (rule hoare_pre, wp x y, simp) - -lemma valid_set_take_helper: - "\P\ f \\rv s. \x \ set (xs rv s). Q x rv s\ - \ \P\ f \\rv s. \x \ set (take (n rv s) (xs rv s)). Q x rv s\" - apply (erule hoare_strengthen_post) - apply (clarsimp dest!: in_set_takeD) - done - -lemma whenE_throwError_sp: - "\P\ whenE Q (throwError e) \\rv s. \ Q \ P s\, -" - apply (simp add: whenE_def validE_R_def) - apply (intro conjI impI; wp) - done - -lemma no_fail_bindE [wp]: - "\ no_fail P f; \rv. no_fail (R rv) (g rv); \Q\ f \R\,- \ - \ no_fail (P and Q) (f >>=E g)" - apply (simp add: bindE_def) - apply (erule no_fail_bind) - apply (simp add: lift_def) - apply wpc - apply (simp add: throwError_def) - apply wp - apply assumption - apply (simp add: validE_R_def validE_def) - apply (erule hoare_strengthen_post) - apply clarsimp - done - -lemma empty_fail_mapM_x [simp]: - "(\x. empty_fail (a x)) \ empty_fail (mapM_x a xs)" - apply (induct_tac xs) - apply (clarsimp simp: mapM_x_Nil) - apply (clarsimp simp: mapM_x_Cons) - done - -lemma fst_throwError_returnOk: - "fst (throwError e s) = {(Inl e, s)}" - "fst (returnOk v s) = {(Inr v, s)}" - by (simp add: throwError_def returnOk_def return_def)+ - -lemma liftE_bind_return_bindE_returnOk: - "liftE (v >>= (\rv. return (f rv))) - = (liftE v >>=E (\rv. returnOk (f rv)))" - by (simp add: liftE_bindE, simp add: liftE_def returnOk_def) - -lemma bind_eqI: - "g = g' \ f >>= g = f >>= g'" by simp - -lemma not_snd_bindE_I1: - "\ snd ((a >>=E b) s) \ \ snd (a s)" - unfolding bindE_def - by (erule not_snd_bindI1) - -lemma snd_assert [monad_eq]: - "snd (assert P s) = (\ P)" - apply (clarsimp simp: fail_def return_def assert_def) - done - -lemma not_snd_assert : - "(\ snd (assert P s)) = P" - by (metis snd_assert) - -lemma snd_assert_opt [monad_eq]: - "snd (assert_opt f s) = (f = None)" - by (monad_eq simp: assert_opt_def split: option.splits) - -declare in_assert_opt [monad_eq] - -lemma empty_fail_catch: - "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catch f g)" - apply (simp add: catch_def) - apply (erule empty_fail_bind) - apply (simp split: sum.split) - done - -lemma oblivious_returnOk [simp]: - "oblivious f (returnOk e)" - by (simp add: returnOk_def) - -lemma oblivious_assertE [simp]: - "oblivious f (assertE P)" - by (simp add: assertE_def split: if_split) - - -lemma oblivious_throwError [simp]: - "oblivious f (throwError e)" - by (simp add: throwError_def) - -lemma oblivious_bindE: - "\ oblivious u f; \v. oblivious u (g v) \ - \ oblivious u (f >>=E (\v. g v))" - apply (simp add: bindE_def) - apply (erule oblivious_bind) - apply (simp add: lift_def split: sum.split) - done - -lemma oblivious_catch: - "\ oblivious u f; \v. oblivious u (g v) \ - \ oblivious u (catch f g)" - apply (simp add: catch_def) - apply (erule oblivious_bind) - apply (simp split: sum.split) - done - -lemma oblivious_when [simp]: - "oblivious f (when P m) = (P \ oblivious f m)" - by (simp add: when_def split: if_split) - -lemma oblivious_whenE [simp]: - "oblivious f (whenE P g) = (P \ oblivious f g)" - by (simp add: whenE_def split: if_split) - -lemma select_f_oblivious [simp]: - "oblivious f (select_f v)" - by (simp add: oblivious_def select_f_def) - -lemma oblivious_select: - "oblivious f (select S)" - by (simp add: oblivious_def select_def) - -lemma validE_R_abstract_rv: - "\P\ f \\rv s. \rv'. Q rv' s\,- \ \P\ f \Q\,-" - by (erule hoare_post_imp_R, simp) - -lemma validE_cases_valid: - "\P\ f \\rv s. Q (Inr rv) s\,\\rv s. Q (Inl rv) s\ - \ \P\ f \Q\" - apply (simp add: validE_def) - apply (erule hoare_strengthen_post) - apply (simp split: sum.split_asm) - done - -lemma valid_isRight_theRight_split: - "\P\ f \\rv s. Q True rv s\,\\e s. \v. Q False v s\ \ - \P\ f \\rv s. Q (isRight rv) (theRight rv) s\" - apply (simp add: validE_def) - apply (erule hoare_strengthen_post) - apply (simp add: isRight_def split: sum.split_asm) - done - -lemma bind_return_subst: - assumes r: "\r. \\s. P x = r\ f x \\rv s. Q rv = r\" - shows - "do a \ f x; - g (Q a) - od = - do _ \ f x; - g (P x) - od" -proof - - have "do a \ f x; - return (Q a) - od = - do _ \ f x; - return (P x) - od" - using r - apply (subst fun_eq_iff) - apply (fastforce simp: bind_def valid_def return_def) - done - hence "do a \ f x; - return (Q a) - od >>= g = - do _ \ f x; - return (P x) - od >>= g" - by (rule bind_cong, simp) - thus ?thesis - by simp -qed - -lemma zipWithM_x_Nil2 : - "zipWithM_x f xs [] = return ()" - by (simp add: zipWithM_x_mapM mapM_Nil) - -lemma assert2: - "(do v1 \ assert P; v2 \ assert Q; c od) - = (do v \ assert (P \ Q); c od)" - by (simp add: assert_def split: if_split) - -lemma assert_opt_def2: - "assert_opt v = (do assert (v \ None); return (the v) od)" - by (simp add: assert_opt_def split: option.split) - -lemma filterM_voodoo: - "\ys. P ys (do zs \ filterM m xs; return (ys @ zs) od) - \ P [] (filterM m xs)" - by (drule spec[where x=Nil], simp) - -lemma gets_assert: - "(do v1 \ assert v; v2 \ gets f; c v1 v2 od) - = (do v2 \ gets f; v1 \ assert v; c v1 v2 od)" - by (simp add: simpler_gets_def return_def assert_def fail_def bind_def - split: if_split) - -lemma list_case_return2: - "(case x of [] \ return v | y # ys \ return (f y ys)) - = return (case x of [] \ v | y # ys \ f y ys)" - by (simp split: list.split) - -lemma modify_assert: - "(do v2 \ modify f; v1 \ assert v; c v1 od) - = (do v1 \ assert v; v2 \ modify f; c v1 od)" - by (simp add: simpler_modify_def return_def assert_def fail_def bind_def - split: if_split) - -lemma gets_fold_into_modify: - "do x \ gets f; modify (g x) od = modify (\s. g (f s) s)" - "do x \ gets f; _ \ modify (g x); h od - = do modify (\s. g (f s) s); h od" - by (simp_all add: fun_eq_iff modify_def bind_assoc exec_gets - exec_get exec_put) - -lemma bind_assoc2: - "(do x \ a; _ \ b; c x od) = (do x \ (do x' \ a; _ \ b; return x' od); c x od)" - by (simp add: bind_assoc) - -lemma liftM_pre: - assumes rl: "\\s. \ P s \ a \ \_ _. False \" - shows "\\s. \ P s \ liftM f a \ \_ _. False \" - unfolding liftM_def - apply (rule seq) - apply (rule rl) - apply wp - apply simp - done - -lemma not_snd_bindD': - "\\ snd ((a >>= b) s); \ snd (a s) \ (rv, s') \ fst (a s)\ \ \ snd (a s) \ \ snd (b rv s')" - apply (frule not_snd_bindI1) - apply (erule not_snd_bindD) - apply simp - done - -lemma snd_bind [monad_eq]: - "snd ((a >>= b) s) = (snd (a s) \ (\r s'. (r, s') \ fst (a s) \ snd (b r s')))" - apply (clarsimp simp add: bind_def Bex_def image_def) - apply (subst surjective_pairing, subst prod.inject, force) - done - -lemma in_lift [monad_eq]: - "(rv, s') \ fst (lift M v s) = - (case v of Inl x \ rv = Inl x \ s' = s - | Inr x \ (rv, s') \ fst (M x s))" - apply (clarsimp simp: lift_def throwError_def return_def split: sum.splits) - done - -lemma snd_lift [monad_eq]: - "snd (lift M a b) = (\x. a = Inr x \ snd (M x b))" - apply (clarsimp simp: lift_def throwError_def return_def split: sum.splits) - done - -lemma snd_bindE [monad_eq]: - "snd ((a >>=E b) s) = (snd (a s) \ (\r s'. (r, s') \ fst (a s) \ (\a. r = Inr a \ snd (b a s'))))" - apply (clarsimp simp: bindE_def) - apply monad_eq - done - -lemma snd_get [monad_eq]: - shows "(snd (get s)) = False" - by (simp add: get_def) - -lemma snd_gets [monad_eq]: - shows "(snd (gets f s)) = False" - by (simp add: gets_def snd_bind snd_get snd_return) - -lemma mapME_x_Cons: - "mapME_x f (x # xs) = (doE f x; mapME_x f xs odE)" - by (simp add: mapME_x_def sequenceE_x_def) - -lemma liftME_map_mapME: - "liftME (map f) (mapME m xs) - = mapME (liftME f o m) xs" - apply (rule sym) - apply (induct xs) - apply (simp add: liftME_def mapME_Nil) - apply (simp add: mapME_Cons liftME_def bindE_assoc) - done - -lemma mapM_upd_inv: - assumes f: "\x rv. (rv,s) \ fst (f x s) \ x \ set xs \ (rv, g s) \ fst (f x (g s))" - assumes inv: "\x. \(=) s\ f x \\_. (=) s\" - shows "(rv,s) \ fst (mapM f xs s) \ (rv, g s) \ fst (mapM f xs (g s))" - using f inv -proof (induct xs arbitrary: rv s) - case Nil - thus ?case by (simp add: mapM_Nil return_def) -next - case (Cons z zs) - from Cons.prems - show ?case - apply (clarsimp simp: mapM_Cons in_monad) - apply (frule use_valid, assumption, rule refl) - apply clarsimp - apply (drule Cons.prems, simp) - apply (rule exI, erule conjI) - apply (drule Cons.hyps) - apply simp - apply assumption - apply simp - done -qed - - -(* FUXME: duplicate *) -lemma mapM_x_append2: - "mapM_x f (xs @ ys) = do mapM_x f xs; mapM_x f ys od" - apply (simp add: mapM_x_def sequence_x_def) - apply (induct xs) - apply simp - apply (simp add: bind_assoc) - done - -lemma mapM_x_split_append: - "mapM_x f xs = do _ \ mapM_x f (take n xs); mapM_x f (drop n xs) od" - using mapM_x_append[where f=f and xs="take n xs" and ys="drop n xs"] - by simp - -lemma hoare_gen_asm': - "(P \ \P'\ f \Q\) \ \P' and (\_. P)\ f \Q\" - apply (auto intro: hoare_assume_pre) - done - -lemma hoare_gen_asm_conj: - "(P \ \P'\ f \Q\) \ \\s. P' s \ P\ f \Q\" - by (fastforce simp: valid_def) - - -lemma hoare_add_K: - "\P\ f \Q\ \ \\s. P s \ I\ f \\rv s. Q rv s \ I\" - by (fastforce simp: valid_def) - - -lemma valid_rv_lift: - "\P'\ f \\rv s. rv \ Q rv s\ \ \\s. P \ P' s\ f \\rv s. rv \ P \ Q rv s\" - by (fastforce simp: valid_def) - -lemma valid_imp_ex: - "\P\ f \\rv s. \x. rv \ Q rv s x\ \ \P\ f \\rv s. rv \ (\x. Q rv s x)\" - by (fastforce simp: valid_def) - -lemma valid_rv_split: - "\\P\ f \\rv s. rv \ Q s\; \P\ f \\rv s. \rv \ Q' s\\ - \ - \P\ f \\rv s. if rv then Q s else Q' s\" - by (fastforce simp: valid_def) - -lemma hoare_rv_split: - "\\P\ f \\rv s. rv \ (Q rv s)\; \P\ f \\rv s. (\rv) \ (Q rv s)\\ - \ \P\ f \Q\" - apply (clarsimp simp: valid_def) - apply (case_tac a, fastforce+) - done - -lemma case_option_find_give_me_a_map: - "case_option a return (find f xs) - = liftM theLeft - (mapME (\x. if (f x) then throwError x else returnOk ()) xs - >>=E (\x. assert (\x \ set xs. \ f x) - >>= (\_. liftM (Inl :: 'a \ 'a + unit) a)))" - apply (induct xs) - apply simp - apply (simp add: liftM_def mapME_Nil) - apply (simp add: mapME_Cons split: if_split) - apply (clarsimp simp add: throwError_def bindE_def bind_assoc - liftM_def) - apply (rule bind_cong [OF refl]) - apply (simp add: lift_def throwError_def returnOk_def split: sum.split) - done - -lemma if_bind: - "(if P then (a >>= (\_. b)) else return ()) = - (if P then a else return ()) >>= (\_. if P then b else return ())" - apply (cases P) - apply simp - apply simp - done - -lemma in_handleE' [monad_eq]: - "((rv, s') \ fst ((f g) s)) = - ((\ex. rv = Inr ex \ (Inr ex, s') \ fst (f s)) \ - (\rv' s''. (rv, s') \ fst (g rv' s'') \ (Inl rv', s'') \ fst (f s)))" - apply (clarsimp simp: handleE'_def) - apply (rule iffI) - apply (subst (asm) in_bind_split) - apply (clarsimp simp: return_def split: sum.splits) - apply (case_tac a) - apply (erule allE, erule (1) impE) - apply clarsimp - apply (erule allE, erule (1) impE) - apply clarsimp - apply (subst in_bind_split) - apply (clarsimp simp: return_def split: sum.splits) - apply blast - done - -lemma in_handleE [monad_eq]: - "(a, b) \ fst ((A B) s) = - ((\x. a = Inr x \ (Inr x, b) \ fst (A s)) - \ (\r t. ((Inl r, t) \ fst (A s)) \ ((a, b) \ fst (B r t))))" - apply (unfold handleE_def) - apply (monad_eq split: sum.splits) - apply blast - done - -lemma snd_handleE' [monad_eq]: - "snd ((A B) s) = (snd (A s) \ (\r s'. (r, s')\fst (A s) \ (\a. r = Inl a \ snd (B a s'))))" - apply (clarsimp simp: handleE'_def) - apply (monad_eq simp: Bex_def split: sum.splits) - apply (metis sum.sel(1) sum.distinct(1) sumE) - done - -lemma snd_handleE [monad_eq]: - "snd ((A B) s) = (snd (A s) \ (\r s'. (r, s')\fst (A s) \ (\a. r = Inl a \ snd (B a s'))))" - apply (unfold handleE_def) - apply (rule snd_handleE') - done - -declare in_liftE [monad_eq] - -lemma snd_liftE [monad_eq]: - "snd ((liftE x) s) = snd (x s)" - apply (clarsimp simp: liftE_def snd_bind snd_return) - done - -lemma snd_returnOk [monad_eq]: - "\ snd (returnOk x s)" - apply (clarsimp simp: returnOk_def return_def) - done - -lemma snd_when [monad_eq]: - "snd (when P M s) = (P \ snd (M s))" - apply (clarsimp simp: when_def return_def) - done - -lemma bind_liftE_distrib: "(liftE (A >>= (\x. B x))) = (liftE A >>=E (\x. liftE (\s. B x s)))" - apply (clarsimp simp: liftE_def bindE_def lift_def bind_assoc) - done - -lemma in_condition [monad_eq]: - "((a, b) \ fst (condition C L R s)) = ((C s \ (a, b) \ fst (L s)) \ (\ C s \ (a, b) \ fst (R s)))" - by (rule condition_split) - -lemma snd_condition [monad_eq]: - "(snd (condition C L R s)) = ((C s \ snd (L s)) \ (\ C s \ snd (R s)))" - by (rule condition_split) - -lemma condition_apply_cong: - "\ c s = c' s'; s = s'; \s. c' s \ l s = l' s ; \s. \ c' s \ r s = r' s \ \ condition c l r s = condition c' l' r' s'" - apply (clarsimp split: condition_splits) - done - -lemma condition_cong [cong, fundef_cong]: - "\ c = c'; \s. c' s \ l s = l' s; \s. \ c' s \ r s = r' s \ \ condition c l r = condition c' l' r'" - apply (rule ext) - apply (clarsimp split: condition_splits) - done - -(* Alternative definition of no_throw; easier to work with than unfolding validE. *) -lemma no_throw_def': "no_throw P A = (\s. P s \ (\(r, t) \ fst (A s). (\x. r = Inr x)))" - apply (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits) - done - -lemma no_throw_returnOk [simp]: - "no_throw P (returnOk a)" - apply (clarsimp simp: no_throw_def) - apply wp - done - -lemma no_throw_liftE [simp]: - "no_throw P (liftE x)" - apply (clarsimp simp: liftE_def no_throw_def validE_def) - apply (wp | simp)+ - done - -lemma no_throw_bindE: "\ no_throw A X; \a. no_throw B (Y a); \ A \ X \ \_. B \,\ \_ _. True \ \ \ no_throw A (X >>=E Y)" - apply atomize - apply (clarsimp simp: no_throw_def) - apply (rule seqE [rotated]) - apply force - apply (rule hoare_validE_cases) - apply simp - apply simp - done - -lemma no_throw_bindE_simple: "\ no_throw \ L; \x. no_throw \ (R x) \ \ no_throw \ (L >>=E R)" - apply (erule no_throw_bindE) - apply assumption - apply wp - done - -lemma no_throw_handleE_simple: - notes hoare_vcg_prop[wp del] - shows "\ \x. no_throw \ L \ no_throw \ (R x) \ \ no_throw \ (L R)" - apply (clarsimp simp: no_throw_def) - apply atomize - apply clarsimp - apply (erule disjE) - apply (wpsimp wp: hoare_vcg_prop[where f="R x" for x]) - apply assumption - apply simp - apply (rule handleE_wp) - apply (erule_tac x=x in allE) - apply assumption - apply wp - done - -lemma no_throw_handle2: "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" - apply atomize - apply (clarsimp simp: no_throw_def' handleE'_def bind_def) - apply (clarsimp simp: validE_def valid_def) - apply (erule allE, erule (1) impE) - apply (erule (1) my_BallE) - apply (clarsimp simp: return_def split: sum.splits) - apply force - done - -lemma no_throw_handle: "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" - apply (unfold handleE_def) - apply (erule (1) no_throw_handle2) - done - -lemma no_throw_fail [simp]: "no_throw P fail" - apply (clarsimp simp: no_throw_def) - done - -lemma lift_Inr [simp]: "(lift X (Inr r)) = (X r)" - apply (rule ext)+ - apply (clarsimp simp: lift_def bind_def split_def image_def) - done - -lemma lift_Inl [simp]: "lift C (Inl a) = throwError a" - apply (clarsimp simp: lift_def throwError_def) - done - -lemma returnOk_def2: "returnOk a = return (Inr a)" - apply (clarsimp simp: returnOk_def return_def) - done - -lemma empty_fail_spec [simp]: "empty_fail (state_select F)" - apply (clarsimp simp: state_select_def empty_fail_def) - done - -declare snd_fail [simp] - -lemma empty_fail_select [simp]: "empty_fail (select V) = (V \ {})" - apply (clarsimp simp: select_def empty_fail_def) - done - -lemma bind_fail_propagates: "\ empty_fail A \ \ A >>= (\_. fail) = fail" - apply (monad_eq simp: empty_fail_def) - by fastforce - -lemma bindE_fail_propagates: "\ no_throw \ A; empty_fail A \ \ A >>=E (\_. fail) = fail" - apply (rule ext) - apply (clarsimp simp: empty_fail_def) - apply (clarsimp simp: no_throw_def validE_def valid_def bind_def - bindE_def split_def fail_def NonDetMonad.lift_def throwError_def - split:sum.splits) - apply fastforce - done - -lemma empty_fail_liftE [simp]: - "empty_fail (liftE X) = empty_fail X" - apply (simp add: empty_fail_error_bits) - done - -declare snd_returnOk [simp, monad_eq] - -lemma liftE_fail [simp]: "liftE fail = fail" - apply monad_eq - done - -lemma in_catch [monad_eq]: - "(r, t) \ fst ((M E) s) - = ((Inr r, t) \ fst (M s) - \ (\r' s'. ((Inl r', s') \ fst (M s)) \ (r, t) \ fst (E r' s')))" - apply (rule iffI) - apply (clarsimp simp: catch_def in_bind in_return split: sum.splits) - apply (metis sumE) - apply (clarsimp simp: catch_def in_bind in_return split: sum.splits) - apply (metis sum.sel(1) sum.distinct(1) sum.inject(2)) - done - -lemma snd_catch [monad_eq]: - "snd ((M E) s) - = (snd (M s) - \ (\r' s'. ((Inl r', s') \ fst (M s)) \ snd (E r' s')))" - apply (rule iffI) - apply (clarsimp simp: catch_def snd_bind snd_return split: sum.splits) - apply (clarsimp simp: catch_def snd_bind snd_return split: sum.splits) - apply force - done - -lemma in_get [monad_eq]: - "(r, s') \ fst (get s) = (r = s \ s' = s)" - apply (clarsimp simp: get_def) - done - -lemma returnOk_cong: "\ \s. B a s = B' a s \ \ ((returnOk a) >>=E B) = ((returnOk a) >>=E B')" - apply monad_eq - done - -lemma in_state_assert [monad_eq, simp]: - "(rv, s') \ fst (state_assert P s) = (rv = () \ s' = s \ P s)" - apply (monad_eq simp: state_assert_def) - apply metis - done - -lemma snd_state_assert [monad_eq]: - "snd (state_assert P s) = (\ P s)" - apply (monad_eq simp: state_assert_def Bex_def) - done - -lemma state_assert_false [simp]: "state_assert (\_. False) = fail" - by monad_eq - -lemma no_fail_state_assert [wp]: "no_fail P (state_assert P)" - by (monad_eq simp: no_fail_def state_assert_def) - -lemma no_fail_modify [wp]: "no_fail \ (modify M)" - by (metis non_fail_modify) - -lemma combine_validE: "\ \ P \ x \ Q \,\ E \; - \ P' \ x \ Q' \,\ E' \ \ \ - \ P and P' \ x \ \r. (Q r) and (Q' r) \,\\r. (E r) and (E' r) \" - apply (clarsimp simp: validE_def valid_def split: sum.splits) - apply (erule allE, erule (1) impE)+ - apply (erule (1) my_BallE)+ - apply clarsimp - done - -lemma condition_swap: "(condition C A B) = (condition (\s. \ C s) B A)" - apply (rule ext) - apply (clarsimp split: condition_splits) - done - -lemma condition_fail_rhs: "(condition C X fail) = (state_assert C >>= (\_. X))" - apply (rule ext) - apply (monad_eq simp: Bex_def) - done - -lemma condition_fail_lhs: "(condition C fail X) = (state_assert (\s. \ C s) >>= (\_. X))" - apply (metis condition_fail_rhs condition_swap) - done - -lemma condition_bind_fail [simp]: - "(condition C A B >>= (\_. fail)) = condition C (A >>= (\_. fail)) (B >>= (\_. fail))" - apply monad_eq - apply blast - done - -lemma no_throw_Inr: - "\ x \ fst (A s); no_throw P A; P s \ \ \y. fst x = Inr y" - apply (clarsimp simp: no_throw_def' split: sum.splits) - apply (erule allE, erule (1) impE, erule (1) my_BallE) - apply clarsimp - done - -lemma no_throw_handleE': "no_throw \ A \ (A B) = A" - apply (rule monad_eqI) - apply monad_eq - apply (fastforce dest: no_throw_Inr) - apply monad_eq - apply (metis (lifting) fst_conv no_throw_Inr) - apply monad_eq - apply (fastforce dest: no_throw_Inr) - done - -lemma no_throw_handleE: "no_throw \ A \ (A B) = A" - apply (unfold handleE_def) - apply (subst no_throw_handleE') - apply auto - done - -lemma whileLoopE_nothrow: - "\ \x. no_throw \ (B x) \ \ no_throw \ (whileLoopE C B x)" - apply atomize - apply (clarsimp simp: no_throw_def) - apply (rule validE_whileLoopE [where I="\_ _. True"]) - apply simp - apply (rule validE_weaken) - apply force - apply simp - apply simp - apply simp - apply simp - done - -lemma handleE'_nothrow_lhs: - "\ no_throw \ L \ \ no_throw \ (L R)" - apply (clarsimp simp: no_throw_def) - apply (rule handleE'_wp [rotated]) - apply assumption - apply simp - done - -lemma handleE'_nothrow_rhs: - "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" - apply atomize - apply (clarsimp simp: no_throw_def) - apply (rule handleE'_wp) - apply (erule allE) - apply assumption - apply (rule hoareE_TrueI) - done - -lemma handleE_nothrow_lhs: - "\ no_throw \ L \ \ no_throw \ (L R)" - by (metis handleE'_nothrow_lhs handleE_def) - -lemma handleE_nothrow_rhs: - "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" - by (metis no_throw_handleE_simple) - -lemma condition_nothrow: "\ no_throw \ L; no_throw \ R \ \ no_throw \ (condition C L R)" - apply (clarsimp simp: condition_def no_throw_def validE_def2) - done - -lemma empty_fail_guard [simp]: "empty_fail (state_assert G)" - apply (clarsimp simp: state_assert_def assert_def empty_fail_def get_def return_def bind_def) - done - -lemma simple_bind_fail [simp]: - "(state_assert X >>= (\_. fail)) = fail" - "(modify M >>= (\_. fail)) = fail" - "(return X >>= (\_. fail)) = fail" - "(gets X >>= (\_. fail)) = fail" - apply (auto intro!: bind_fail_propagates) - done - -lemma valid_case_prod: - "\ \x y. valid (P x y) (f x y) Q \ \ valid (case_prod P v) (case_prod (\x y. f x y) v) Q" - by (simp add: split_def) - -lemma validE_case_prod: - "\ \x y. validE (P x y) (f x y) Q E \ \ validE (case_prod P v) (case_prod (\x y. f x y) v) Q E" - by (simp add: split_def) - -lemma in_select [monad_eq]: - "(rv, s') \ fst (select S s) = (s' = s \ rv \ S)" - apply (clarsimp simp: select_def) - apply blast - done - -lemma snd_select [monad_eq]: - "\ snd (select S s)" - by (clarsimp simp: select_def) - -lemma bindE_handleE_join: - "no_throw \ A \ (A >>=E (\x. (B x) C)) = ((A >>=E B C))" - apply (monad_eq simp: Bex_def Ball_def no_throw_def') - apply blast - done - -lemma catch_bind_distrib: - "do _ <- m h; f od = (doE m; liftE f odE (\x. do h x; f od))" - by (force simp: catch_def bindE_def bind_assoc liftE_def NonDetMonad.lift_def bind_def - split_def return_def throwError_def - split: sum.splits) - -lemma if_catch_distrib: - "((if P then f else g) h) = (if P then f h else g h)" - by (simp split: if_split) - -lemma will_throw_and_catch: - "f = throwError e \ (f (\_. g)) = g" - by (simp add: catch_def throwError_def) - -lemma catch_is_if: - "(doE x <- f; g x odE h) = - do - rv <- f; - if sum.isl rv then h (projl rv) else g (projr rv) h - od" - apply (simp add: bindE_def catch_def bind_assoc cong: if_cong) - apply (rule bind_cong, rule refl) - apply (clarsimp simp: NonDetMonad.lift_def throwError_def split: sum.splits) - done - -lemma snd_put [monad_eq]: - "\ snd (put t s)" - by (clarsimp simp: put_def) - -lemma snd_modify [monad_eq]: - "\ snd (modify t s)" - by (clarsimp simp: modify_def put_def get_def bind_def) - -lemma no_fail_False [simp]: - "no_fail (\_. False) X" - by (clarsimp simp: no_fail_def) - -lemma valid_pre_satisfies_post: - "\ \s r' s'. P s \ Q r' s' \ \ \ P \ m \ Q \" - by (clarsimp simp: valid_def) - -lemma validE_pre_satisfies_post: - "\ \s r' s'. P s \ Q r' s'; \s r' s'. P s \ R r' s' \ \ \ P \ m \ Q \,\ R \" - by (clarsimp simp: validE_def2 split: sum.splits) - -lemma snd_gets_the [monad_eq]: - "snd (gets_the X s) = (X s = None)" - by (monad_eq simp: gets_the_def gets_def get_def) - -lemma liftE_K_bind: "liftE ((K_bind (\s. A s)) x) = K_bind (liftE (\s. A s)) x" - by clarsimp - -lemma hoare_assume_preNF: - "(\s. P s \ \P\ f \Q\!) \ \P\ f \Q\!" - by (metis validNF_alt_def) - -lemma bexEI: "\\x\S. Q x; \x. \x \ S; Q x\ \ P x\ \ \x\S. P x" by blast - -lemma monad_eq_split: - assumes "\r s. Q r s \ f r s = f' r s" - "\P\ g \\r s. Q r s\" - "P s" - shows "(g >>= f) s = (g >>= f') s" -proof - - have pre: "\rv s'. \(rv, s') \ fst (g s)\ \ f rv s' = f' rv s'" - using assms unfolding valid_def - by (erule_tac x=s in allE) auto - show ?thesis - apply (simp add: bind_def image_def) - apply (intro conjI) - apply (rule set_eqI) - apply (clarsimp simp: Union_eq) - apply (rule iffI; elim exEI conjE; simp; elim exEI bexEI; clarsimp simp: pre) - apply (rule iffI; cases "snd (g s)"; simp; elim exEI bexEI; clarsimp simp: pre) - done -qed - -lemma monad_eq_split2: -assumes eq: " g' s = g s" -assumes tail:"\r s. Q r s \ f r s = f' r s" -and hoare: "\P\g\\r s. Q r s\" "P s" -shows "(g>>=f) s = (g'>>= f') s" -proof - -have pre: "\aa bb. \(aa, bb) \ fst (g s)\ \ Q aa bb" - using hoare by (auto simp: valid_def) -show ?thesis - apply (simp add:bind_def eq split_def image_def) - apply (rule conjI) - apply (rule set_eqI) - apply (clarsimp simp:Union_eq) - apply (metis pre surjective_pairing tail) - apply (metis pre surjective_pairing tail) - done -qed - -lemma monad_eq_split_tail: - "\f = g;a s = b s\ \ (a >>= f) s = ((b >>= g) s)" - by (simp add:bind_def) - -lemma double_gets_drop_regets: - "(do x \ gets f; - xa \ gets f; - m xa x - od) = - (do xa \ gets f; - m xa xa - od)" - by (simp add: gets_def get_def bind_def return_def) - -definition monad_commute where - "monad_commute P a b \ - (\s. (P s \ ((do x\a;y\b; return (x, y) od) s) = ((do y\b;x\a; return (x, y) od) s)))" - - -lemma monad_eq: - "a s = b s \ (a >>= g) s = (b >>= g) s" - by (auto simp:bind_def) - -lemma monad_commute_simple: - "\monad_commute P a b;P s\ \ ((do x\a;y\b; g x y od) s) = ((do y\b;x\a; g x y od) s)" - apply (clarsimp simp:monad_commute_def) - apply (drule spec) - apply (erule(1) impE) - apply (drule_tac g = "(\t. g (fst t) (snd t))" in monad_eq) - apply (simp add:bind_assoc) - done - -lemma commute_commute: - "monad_commute P f h \ monad_commute P h f" - apply (simp (no_asm) add: monad_commute_def) - apply (clarsimp) - apply (erule monad_commute_simple[symmetric]) - apply simp - done - -lemma assert_commute: "monad_commute (K G) (assert G) f" - by (clarsimp simp:assert_def monad_commute_def) - -lemma cond_fail_commute: "monad_commute (K (\G)) (when G fail) f" - by (clarsimp simp:when_def fail_def monad_commute_def) - -lemma return_commute: "monad_commute \ (return a) f" - by (clarsimp simp: return_def bind_def monad_commute_def) - -lemma monad_commute_guard_imp: - "\monad_commute P f h; \s. Q s \ P s \ \ monad_commute Q f h" - by (clarsimp simp:monad_commute_def) - -lemma monad_commute_split: - "\\r. monad_commute (Q r) f (g r); monad_commute P f h; - \P'\ h \\r. Q r\\ - \ monad_commute (P and P') f (h>>=g)" - apply (simp (no_asm) add:monad_commute_def) - apply (clarsimp simp:bind_assoc) - apply (subst monad_commute_simple) - apply simp+ - apply (rule_tac Q = "(\x. Q x)" in monad_eq_split) - apply (subst monad_commute_simple[where a = f]) - apply assumption - apply simp+ - done - -lemma monad_commute_get: - assumes hf: "\P. \P\ f \\r. P\" - and hg: "\P. \P\ g \\r. P\" - and eptyf: "empty_fail f" "empty_fail g" - shows "monad_commute \ f g" -proof - - have fsame: "\a b s. (a,b) \ fst (f s) \ b = s" - by (drule use_valid[OF _ hf],auto) - have gsame: "\a b s. (a,b) \ fst (g s) \ b = s" - by (drule use_valid[OF _ hg],auto) - note ef = empty_fail_not_snd[OF _ eptyf(1)] - note eg = empty_fail_not_snd[OF _ eptyf(2)] - show ?thesis - apply (simp add:monad_commute_def) - apply (clarsimp simp:bind_def split_def return_def) - apply (intro conjI) - apply (rule set_eqI) - apply (rule iffI) - apply (clarsimp simp:Union_eq) - apply (frule fsame) - apply clarsimp - apply (frule gsame) - apply (metis fst_conv snd_conv) - apply (clarsimp simp:Union_eq) - apply (frule gsame) - apply clarsimp - apply (frule fsame) - apply clarsimp - apply (metis fst_conv snd_conv) - apply (rule iffI) - apply (erule disjE) - apply (clarsimp simp:image_def) - apply (metis fsame) - apply (clarsimp simp:image_def) - apply (drule eg) - apply clarsimp - apply (rule bexI [rotated], assumption) - apply (frule gsame) - apply clarsimp - apply (erule disjE) - apply (clarsimp simp:image_def dest!:gsame) - apply (clarsimp simp:image_def) - apply (drule ef) - apply clarsimp - apply (frule fsame) - apply (erule bexI[rotated]) - apply simp - done -qed - -lemma mapM_x_commute: -assumes commute: - "\r. monad_commute (P r) a (b r)" -and single: - "\r x. \P r and K (f r \ f x) and P x\ b x \\v. P r \" -shows - "monad_commute (\s. (distinct (map f list)) \ (\r\ set list. P r s)) a (mapM_x b list)" - apply (induct list) - apply (clarsimp simp:mapM_x_Nil return_def bind_def monad_commute_def) - apply (clarsimp simp:mapM_x_Cons) - apply (rule monad_commute_guard_imp) - apply (rule monad_commute_split) - apply assumption - apply (rule monad_commute_guard_imp[OF commute]) - apply assumption - apply (wp hoare_vcg_ball_lift) - apply (rule single) - apply (clarsimp simp: image_def) - apply auto - done - -lemma commute_name_pre_state: -assumes "\s. P s \ monad_commute ((=) s) f g" -shows "monad_commute P f g" - using assms - by (clarsimp simp:monad_commute_def) - -lemma commute_rewrite: -assumes rewrite: "\s. Q s \ f s = t s" - and hold : "\P\ g \\x. Q\" - shows "monad_commute R t g \ monad_commute (P and Q and R) f g" - apply (clarsimp simp:monad_commute_def bind_def split_def return_def) - apply (drule_tac x = s in spec) - apply (clarsimp simp:rewrite[symmetric]) - apply (intro conjI) - apply (rule set_eqI) - apply (rule iffI) - apply clarsimp - apply (rule bexI[rotated],assumption) - apply (subst rewrite) - apply (rule use_valid[OF _ hold]) - apply simp+ - apply (erule bexI[rotated],simp) - apply clarsimp - apply (rule bexI[rotated],assumption) - apply (subst rewrite[symmetric]) - apply (rule use_valid[OF _ hold]) - apply simp+ - apply (erule bexI[rotated],simp) - apply (intro iffI) - apply clarsimp - apply (rule bexI[rotated],assumption) - apply simp - apply (subst rewrite) - apply (erule(1) use_valid[OF _ hold]) - apply simp - apply (clarsimp) - apply (drule bspec,assumption) - apply clarsimp - apply (metis rewrite use_valid[OF _ hold]) - done - - -lemma commute_grab_asm: - "(F \ monad_commute P f g) \ (monad_commute (P and (K F)) f g)" - by (clarsimp simp: monad_commute_def) - -lemma returnOk_E': "\P\ returnOk r -,\E\" - by (clarsimp simp: returnOk_def validE_E_def validE_def valid_def return_def) - -lemma throwError_R': "\P\ throwError e \Q\,-" - by (clarsimp simp:throwError_def validE_R_def validE_def valid_def return_def) - -lemma select_modify_comm: - "(do b \ select S; _ \ modify f; use b od) = - (do _ \ modify f; b \ select S; use b od)" - by (simp add: bind_def split_def select_def simpler_modify_def image_def) - -lemma select_f_modify_comm: - "(do b \ select_f S; _ \ modify f; use b od) = - (do _ \ modify f; b \ select_f S; use b od)" - by (simp add: bind_def split_def select_f_def simpler_modify_def image_def) - -lemma hoare_validE_R_conjI: - "\ \P\ f \Q\, - ; \P\ f \Q'\, - \ \ \P\ f \\rv s. Q rv s \ Q' rv s\, -" - apply (clarsimp simp: Ball_def validE_R_def validE_def valid_def) - by (case_tac a; fastforce) - -lemma validE_R_post_conjD1: - "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \Q\,-" - apply (clarsimp simp: validE_R_def validE_def valid_def) - by (case_tac a; fastforce) - -lemma validE_R_post_conjD2: - "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \R\,-" - apply (clarsimp simp: validE_R_def validE_def valid_def) - by (case_tac a; fastforce) - -lemma throw_opt_wp[wp]: - "\if v = None then E ex else Q (the v)\ throw_opt ex v \Q\,\E\" - unfolding throw_opt_def by wpsimp auto - -lemma hoare_name_pre_state2: - "(\s. \P and ((=) s)\ f \Q\) \ \P\ f \Q\" - by (auto simp: valid_def intro: hoare_name_pre_state) - end diff --git a/lib/None_Top_Bot.thy b/lib/None_Top_Bot.thy new file mode 100644 index 0000000000..6f9faf7c16 --- /dev/null +++ b/lib/None_Top_Bot.thy @@ -0,0 +1,244 @@ +(* + * Copyright 2024, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Predicates on option that map None to True/False, or more generally to top/bot. + E.g. "none_top ((\) 0) opt_ptr" or "none_top tcb_at p s". + + They are definitions, not abbreviations so that they don't participate in general + option case splits (separate split rules are provided), and so that we can control + simp/intro/elim setup a bit better. It should usually be unnecessary to unfold the + definitions (e.g. see the all/ex rules + intro/dest/elim rules), but it is not + harmful to do so. + + The main setup is for the general lattice case, followed by a section that spells + out properties for the more common bool and function cases together with additional + automation setup for these. +*) + +theory None_Top_Bot + imports Monads.Fun_Pred_Syntax +begin + +definition none_top :: "('a \ 'b) \ 'a option \ 'b::top" where + "none_top \ case_option top" + +definition none_bot :: "('a \ 'b) \ 'a option \ 'b::bot" where + "none_bot \ case_option bot" + + +section \General lattice properties for @{const none_top}\ + +lemma none_top_simps[simp]: + "none_top f None = top" + "none_top f (Some x) = f x" + by (auto simp: none_top_def) + +(* Mirrors option.splits *) +lemma none_top_split: + "P (none_top f opt) = ((opt = None \ P top) \ (\x. opt = Some x \ P (f x)))" + by (cases opt) auto + +lemma none_top_split_asm: + "P (none_top f opt) = (\ (opt = None \ \ P top \ (\x. opt = Some x \ \ P (f x))))" + by (cases opt) auto + +lemmas none_top_splits = none_top_split none_top_split_asm + +lemma none_top_if: + "none_top f opt = (if opt = None then top else f (the opt))" + by (cases opt) auto + +(* General version of none_top_bool_all below *) +lemma none_top_Inf: + "none_top f opt = (Inf {f x |x. opt = Some x} :: 'a :: complete_lattice)" + by (cases opt) auto + +lemma none_top_top[simp]: + "none_top top = top" + by (rule ext) (simp split: none_top_splits) + + +section \Bool/fun lemmas for @{const none_top}\ + +lemma none_top_bool_all: + "none_top f opt = (\x. opt = Some x \ f x)" + by (auto simp: none_top_Inf) + +lemma none_top_bool_cases: + "none_top f opt = (opt = None \ (\x. opt = Some x \ f x))" + by (cases opt) auto + +lemma none_top_boolI: + "(\x. opt = Some x \ f x) \ none_top f opt" + by (simp add: none_top_bool_all) + +lemma none_top_boolD: + "none_top f opt \ opt = None \ (\x. opt = Some x \ f x)" + by (cases opt) auto + +lemma none_top_boolE: + "\ none_top f opt; opt = None \ P; \x. \ opt = Some x; f x \ \ P \ \ P" + by (cases opt) auto + +lemma none_top_False_bool_None[simp]: + "none_top \ opt = (opt = None)" + by (cases opt) auto + +(* We don't usually use "bot" on bool -- only putting this here for completeness. *) +lemma none_top_bot_bool_None[intro!, simp]: + "none_top bot opt = (opt = None)" + by (cases opt) auto + +lemma none_top_True[simp]: + "none_top \ = \" + by (rule ext) (simp split: none_top_split) + +lemma none_top_True_bool[intro!]: + "none_top \ opt" + by simp + +lemma none_top_fun_all: + "none_top f opt s = (\x. opt = Some x \ f x s)" + by (cases opt) auto + +lemma none_top_fun_cases: + "none_top f opt s = (opt = None \ (\x. opt = Some x \ f x s))" + by (cases opt) auto + +lemma none_top_funI: + "(\x. opt = Some x \ f x s) \ none_top f opt s" + by (simp add: none_top_fun_all) + +lemma none_top_funD: + "none_top f opt s \ opt = None \ (\x. opt = Some x \ f x s)" + by (cases opt) auto + +lemma none_top_funE: + "\ none_top f opt s; opt = None \ P; \x. \ opt = Some x; f x s \ \ P \ \ P" + by (cases opt) auto + +lemma none_top_False_fun_None[simp]: + "none_top \\ opt s = (opt = None)" + by (cases opt) auto + +lemma none_top_True_True[simp]: + "none_top \\ = \\" + by (rule ext) (simp split: none_top_split) + +lemma none_top_True_fun[intro!]: + "none_top \\ opt s" + by simp + +section \General lattice properties for @{const none_bot}\ + +lemma none_bot_simps[simp]: + "none_bot f None = bot" + "none_bot f (Some x) = f x" + by (auto simp: none_bot_def) + +(* Mirrors option.splits *) +lemma none_bot_split: + "P (none_bot f opt) = ((opt = None \ P bot) \ (\x. opt = Some x \ P (f x)))" + by (cases opt) auto + +lemma none_bot_split_asm: + "P (none_bot f opt) = (\ (opt = None \ \ P bot \ (\x. opt = Some x \ \ P (f x))))" + by (cases opt) auto + +lemmas none_bot_splits = none_bot_split none_bot_split_asm + +(* General version of none_bot_bool_ex below *) +lemma none_bot_Sup: + "none_bot f opt = (Sup {f x |x. opt = Some x} :: 'a :: complete_lattice)" + by (cases opt) auto + +lemma none_bot_if: + "none_bot f opt = (if opt = None then bot else f (the opt))" + by (cases opt) auto + +lemma none_bot_bot[simp]: + "none_bot bot = bot" + by (rule ext) (simp split: none_bot_splits) + + +section \Bool/fun lemmas for @{const none_bot}\ + +lemma none_bot_bool_ex: + "none_bot f opt = (\x. opt = Some x \ f x)" + by (auto simp: none_bot_Sup) + +lemma none_bot_boolI: + "\x. opt = Some x \ f x \ none_bot f opt" + by (auto simp: none_bot_bool_ex) + +lemma none_bot_boolD: + "none_bot f opt \ \x. opt = Some x \ f x" + by (cases opt) auto + +lemma none_bot_boolE: + "\ none_bot f opt; \x. \ opt = Some x; f x \ \ P \ \ P" + by (cases opt) auto + +(* As for none_top_bot, it would be unusual to see "top" for bool. Only adding the lemma here for + completeness *) +lemma none_bot_top_bool_neq_None[simp]: + "none_bot top opt = (opt \ None)" + by (cases opt) auto + +lemma none_bot_True_bool_neq_None[simp]: + "none_bot \ opt = (opt \ None)" + by (cases opt) auto + +lemma none_bot_False_bool[simp]: + "none_bot \ = \" + by (rule ext) (simp split: none_bot_split) + +lemma none_bot_False_boolE[dest!]: + "none_bot \ opt \ False" + by simp + +lemma none_bot_fun_ex: + "none_bot f opt s = (\x. opt = Some x \ f x s)" + by (cases opt) auto + +lemma none_bot_funI: + "\x. opt = Some x \ f x s \ none_bot f opt s" + by (simp add: none_bot_fun_ex) + +lemma none_bot_funD: + "none_bot f opt s \ \x. opt = Some x \ f x s" + by (cases opt) auto + +lemma none_bot_funE: + "\ none_bot f opt s; \x. \ opt = Some x; f x s \ \ P \ \ P" + by (cases opt) auto + +lemma none_bot_True_fun_neq_None[simp]: + "none_bot \\ opt s = (opt \ None)" + by (cases opt) auto + +lemma none_bot_False_fun[simp]: + "none_bot \\ = \\" + by (rule ext) (simp split: none_bot_split) + +lemma none_bot_False_funE[dest!]: + "none_bot \\ opt s \ False" + by simp + +section \Automation setup and short-hand names\ + +lemmas none_topI[intro!] = none_top_boolI none_top_funI +lemmas none_topD = none_top_boolD none_top_funD +lemmas none_topE = none_top_boolE none_top_funE +lemmas none_top_all = none_top_bool_all none_top_fun_all +lemmas none_top_case = none_top_bool_cases none_top_fun_cases + +lemmas none_botI = none_bot_boolI none_bot_funI +lemmas none_botD = none_bot_boolD none_bot_funD +lemmas none_botE[elim!] = none_bot_boolE none_bot_funE +lemmas none_bot_ex = none_bot_bool_ex none_bot_fun_ex + +end \ No newline at end of file diff --git a/lib/Oblivious.thy b/lib/Oblivious.thy new file mode 100644 index 0000000000..7cc15fa478 --- /dev/null +++ b/lib/Oblivious.thy @@ -0,0 +1,147 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* The oblivious predicate and supporting lemmas. + + "oblivious f m" expresses that execution of the monad m is oblivious to the effects of the + function f on the state *) + +theory Oblivious + imports + Monads.Nondet_In_Monad + Monads.Nondet_VCG +begin + + +definition + oblivious :: "('a \ 'a) \ ('a, 'b) nondet_monad \ bool" where + "oblivious f m \ \s. (\(rv, s') \ fst (m s). (rv, f s') \ fst (m (f s))) + \ (\(rv, s') \ fst (m (f s)). \s''. (rv, s'') \ fst (m s) \ s' = f s'') + \ snd (m (f s)) = snd (m s)" + + +lemma oblivious_return[simp]: + "oblivious f (return x)" + by (simp add: oblivious_def return_def) + +lemma oblivious_fail[simp]: + "oblivious f fail" + by (simp add: oblivious_def fail_def) + +lemma oblivious_assert[simp]: + "oblivious f (assert x)" + by (simp add: assert_def) + +lemma oblivious_assert_opt[simp]: + "oblivious f (assert_opt fn)" + by (simp add: assert_opt_def split: option.splits) + +lemma oblivious_bind: + "\ oblivious f m; \rv. oblivious f (m' rv) \ \ oblivious f (m >>= m')" + apply (simp add: oblivious_def) + apply (rule allI) + apply (erule allE) + apply (intro conjI) + apply (drule conjunct1) + apply (clarsimp simp: in_monad) + apply fastforce + apply (drule conjunct2, drule conjunct1) + apply (clarsimp simp: in_monad) + apply fastforce + apply (clarsimp simp: bind_def disj_commute) + apply (rule disj_cong [OF refl]) + apply (rule iffI) + apply (clarsimp simp: split_def) + apply fastforce + apply clarsimp + apply (drule(1) bspec) + apply (clarsimp simp: split_def) + apply (drule (1) bspec) + apply (rule bexI [rotated], assumption) + apply clarsimp + done + +lemma oblivious_gets[simp]: + "oblivious f (gets f') = (\s. f' (f s) = f' s)" + by (fastforce simp add: oblivious_def simpler_gets_def) + +lemma oblivious_liftM: + "oblivious f m \ oblivious f (liftM g m)" + by (simp add: liftM_def oblivious_bind) + +lemma oblivious_modify[simp]: + "oblivious f (modify f') = (\s. f' (f s) = f (f' s))" + apply (simp add: oblivious_def simpler_modify_def) + apply (rule ball_cong[where A=UNIV, OF refl, simplified]) + apply fastforce + done + +lemma oblivious_modify_swap: + "oblivious f m \ (modify f >>= (\rv. m)) = (m >>= (\rv. modify f))" + apply (clarsimp simp: bind_def simpler_modify_def) + apply (rule ext)+ + apply (case_tac "m (f s)", clarsimp) + apply (simp add: oblivious_def) + apply (drule_tac x=s in spec) + apply (rule conjI) + apply (rule set_eqI) + apply (rule iffI) + apply (drule conjunct2, drule conjunct1) + apply (drule_tac x=x in bspec, simp) + apply clarsimp + apply (rule_tac x="((), s'')" in bexI) + apply simp + apply simp + apply (drule conjunct1) + apply fastforce + apply (drule conjunct2)+ + apply fastforce + done + +lemma oblivious_returnOk[simp]: + "oblivious f (returnOk e)" + by (simp add: returnOk_def) + +lemma oblivious_assertE[simp]: + "oblivious f (assertE P)" + by (simp add: assertE_def split: if_split) + +lemma oblivious_throwError[simp]: + "oblivious f (throwError e)" + by (simp add: throwError_def) + +lemma oblivious_bindE: + "\ oblivious u f; \v. oblivious u (g v) \ \ oblivious u (f >>=E (\v. g v))" + apply (simp add: bindE_def) + apply (erule oblivious_bind) + apply (simp add: lift_def split: sum.split) + done + +lemma oblivious_catch: + "\ oblivious u f; \v. oblivious u (g v) \ \ oblivious u (catch f g)" + apply (simp add: catch_def) + apply (erule oblivious_bind) + apply (simp split: sum.split) + done + +lemma oblivious_when[simp]: + "oblivious f (when P m) = (P \ oblivious f m)" + by (simp add: when_def split: if_split) + +lemma oblivious_whenE[simp]: + "oblivious f (whenE P g) = (P \ oblivious f g)" + by (simp add: whenE_def split: if_split) + +lemma select_f_oblivious[simp]: + "oblivious f (select_f v)" + by (simp add: oblivious_def select_f_def) + +lemma oblivious_select: + "oblivious f (select S)" + by (simp add: oblivious_def select_def) + +end \ No newline at end of file diff --git a/lib/Qualify.thy b/lib/Qualify.thy index f8353cf152..7a39b3a2a1 100644 --- a/lib/Qualify.thy +++ b/lib/Qualify.thy @@ -64,8 +64,8 @@ fun get_new_consts old_thy consts = val space = #const_space (Consts.dest consts); val consts = - filter (fn nm => not (can (Consts.the_const (Sign.consts_of old_thy)) nm) andalso - can (Consts.the_const consts) nm) new_consts + filter (fn nm => not (can (Consts.the_const_type (Sign.consts_of old_thy)) nm) andalso + can (Consts.the_const_type consts) nm) new_consts |> map (fn nm => `(make_bind space old_thy) nm); in consts end; @@ -110,7 +110,7 @@ val _ = Toplevel.theory (set_global_qualify {name = str, target_name = case target of SOME (nm, _) => nm | _ => str}))); fun syntax_alias global_alias local_alias b name = - Local_Theory.declaration {syntax = true, pervasive = true} (fn phi => + Local_Theory.declaration {syntax = true, pos = Position.none, pervasive = true} (fn phi => let val b' = Morphism.binding phi b in Context.mapping (global_alias b' name) (local_alias b' name) end); diff --git a/lib/ROOT b/lib/ROOT index 2f1cd87d60..10c3726175 100644 --- a/lib/ROOT +++ b/lib/ROOT @@ -9,29 +9,22 @@ chapter Lib session Lib (lib) = Word_Lib + sessions "HOL-Library" - "HOL-Eisbach" - directories - "ml-helpers" - "subgoal_focus" - "Monad_WP" - "Monad_WP/wp" + Basics + Eisbach_Tools + ML_Utils + Monads theories Lib - Apply_Trace_Cmd AddUpdSimps - EmptyFailLib List_Lib SubMonadLib Simulation - MonadEq SimpStrategy Extract_Conjunct GenericLib - ProvePart Corres_Adjust_Preconds Requalify Value_Abbreviation - Eisbach_Methods HaskellLib_H Eval_Bool Bisim_UL @@ -41,31 +34,21 @@ session Lib (lib) = Word_Lib + Crunch_Instances_Trace StateMonad Corres_UL + Corres_Method Find_Names LemmaBucket Try_Methods ListLibLemmas Time_Methods_Cmd - Apply_Debug MonadicRewrite HaskellLemmaBucket - "ml-helpers/MkTermAntiquote" - "ml-helpers/TermPatternAntiquote" - "ml-helpers/TacticAntiquotation" - "ml-helpers/MLUtils" - "ml-helpers/TacticTutorial" - "ml-helpers/MkTermAntiquote_Tests" - "ml-helpers/TacticAntiquotation_Test" - "ml-helpers/TermPatternAntiquote_Tests" FP_Eval - "subgoal_focus/Subgoal_Methods" Insulin ExtraCorres NICTATools BCorres_UL Qualify LexordList - Rule_By_Method Defs Distinct_Cmd Match_Abbreviation @@ -76,8 +59,7 @@ session Lib (lib) = Word_Lib + DataMap FastMap RangeMap - Corres_Method - Conjuncts + CorresK_Method DetWPLib Guess_ExI GenericTag @@ -85,30 +67,11 @@ session Lib (lib) = Word_Lib + Value_Type Named_Eta Rules_Tac + Heap_List + None_Top_Bot - (* should really be a separate session, but too entangled atm: *) + (* should move to Monads: *) NonDetMonadLemmaBucket - "Monad_WP/WhileLoopRules" - "Monad_WP/TraceMonad" - "Monad_WP/OptionMonadND" - "Monad_WP/OptionMonadWP" - "Monad_WP/Strengthen_Demo" - "Monad_WP/TraceMonadLemmas" - "Monad_WP/wp/WPBang" - "Monad_WP/wp/WPFix" - "Monad_WP/wp/Eisbach_WP" - "Monad_WP/wp/WPI" - "Monad_WP/wp/WPC" - "Monad_WP/wp/WP_Pre" - "Monad_WP/wp/WP" - "Monad_WP/Datatype_Schematic" - "Monad_WP/WhileLoopRulesCompleteness" - "Monad_WP/Strengthen" - "Monad_WP/OptionMonad" - "Monad_WP/TraceMonadVCG" - "Monad_WP/NonDetMonadVCG" - "Monad_WP/NonDetMonad" - "Monad_WP/NonDetMonadLemmas" session CLib (lib) in clib = CParser + sessions @@ -124,10 +87,8 @@ session CLib (lib) in clib = CParser + Simpl_Rewrite MonadicRewrite_C CTranslationNICTA - LemmaBucket_C SIMPL_Lemmas SimplRewrite - TypHeapLib BitFieldProofsLib XPres @@ -145,6 +106,7 @@ session LibTest (lib) in test = Refine + ASpec ExecSpec theories + Corres_Test Crunch_Test_NonDet Crunch_Test_Qualified_NonDet Crunch_Test_Qualified_Trace @@ -159,15 +121,15 @@ session LibTest (lib) in test = Refine + RangeMap_Test FP_Eval_Tests Trace_Schematic_Insts_Test - Local_Method_Tests Qualify_Test Locale_Abbrev_Test Value_Type_Test Named_Eta_Test Rules_Tac_Test + MonadicRewrite_Test (* use virtual memory function as an example, only makes sense on ARM: *) theories [condition = "L4V_ARCH_IS_ARM"] - Corres_Test + CorresK_Test session SepTactics (lib) in Hoare_Sep_Tactics = Sep_Algebra + theories diff --git a/lib/RangeMap.thy b/lib/RangeMap.thy index c37f8f1063..4c539d770d 100644 --- a/lib/RangeMap.thy +++ b/lib/RangeMap.thy @@ -8,7 +8,7 @@ theory RangeMap imports FastMap FP_Eval - MkTermAntiquote + ML_Utils.MkTermAntiquote begin text \ diff --git a/lib/Repeat_Attribute.thy b/lib/Repeat_Attribute.thy index 4d121e47e7..b869e1efa9 100644 --- a/lib/Repeat_Attribute.thy +++ b/lib/Repeat_Attribute.thy @@ -19,10 +19,7 @@ fun apply_attributes attrs thm ctxt = in if Thm.eq_thm (thm, thm') then (SOME ctxt', SOME thm) else - apply_attributes attrs thm' ctxt' - handle e => - (if Exn.is_interrupt e then Exn.reraise e else (); - (SOME ctxt', SOME thm')) + \<^try>\apply_attributes attrs thm' ctxt' catch _ => (SOME ctxt', SOME thm')\ end fun repeat_attribute_cmd attr_srcs (ctxt, thm) = diff --git a/lib/Requalify.thy b/lib/Requalify.thy index 34facce853..b06fdb3719 100644 --- a/lib/Requalify.thy +++ b/lib/Requalify.thy @@ -49,7 +49,7 @@ in end fun syntax_alias global_alias local_alias b (name : string) = - Local_Theory.declaration {syntax = false, pervasive = true} (fn phi => + Local_Theory.declaration {syntax = false, pos = Position.none, pervasive = true} (fn phi => let val b' = Morphism.binding phi b in Context.mapping (global_alias b' name) (local_alias b' name) end); diff --git a/lib/SimpStrategy.thy b/lib/SimpStrategy.thy index 063dbfc388..1d1087723e 100644 --- a/lib/SimpStrategy.thy +++ b/lib/SimpStrategy.thy @@ -90,15 +90,16 @@ fun simp_strategy_True_conv ct = case Thm.term_of ct of fun new_simp_strategy thy (name : term) ss rewr_True = let val ctxt = Proof_Context.init_global thy; - val ss = Simplifier.make_simproc ctxt ("simp_strategy_" ^ fst (dest_Const name)) - {lhss = [@{term simp_strategy} $ name $ @{term x}], + val ss = Simplifier.make_simproc ctxt + {name = "simp_strategy_" ^ fst (dest_Const name), + lhss = [@{term simp_strategy} $ name $ @{term x}], proc = (fn _ => fn ctxt' => fn ct => ct |> (Conv.arg_conv (Simplifier.rewrite (put_simpset ss ctxt')) then_conv (if rewr_True then simp_strategy_True_conv else Conv.all_conv)) - |> (fn c => if Thm.is_reflexive c then NONE else SOME c)) - } + |> (fn c => if Thm.is_reflexive c then NONE else SOME c)), + identifier = []} in ss end diff --git a/lib/SpecValid_R.thy b/lib/SpecValid_R.thy index f3bcff64b9..d05ed38c69 100644 --- a/lib/SpecValid_R.thy +++ b/lib/SpecValid_R.thy @@ -46,14 +46,14 @@ lemma drop_equalled_validE: lemma drop_spec_valid[wp_split]: "\P\ f \Q\ \ s \ \P\ f \Q\" apply (simp add: spec_valid_def) - apply (erule hoare_vcg_precond_imp) + apply (erule hoare_weaken_pre) apply clarsimp done lemma drop_spec_validE[wp_split]: "\P\ f \Q\,\E\ \ s \ \P\ f \Q\,\E\" apply (simp add: spec_validE_def) - apply (erule hoare_vcg_precond_impE) + apply (erule hoare_weaken_preE) apply clarsimp done diff --git a/lib/StateMonad.thy b/lib/StateMonad.thy index 750dd31430..1e665f24fa 100644 --- a/lib/StateMonad.thy +++ b/lib/StateMonad.thy @@ -10,7 +10,7 @@ chapter "Monads" -theory StateMonad (* FIXME: untested/unused *) +theory StateMonad (* unused *) imports Lib begin @@ -400,34 +400,6 @@ lemma validE_def2: "\P\ f \Q\,\R\ \ \P\ f \ \r s. case r of Inr b \ Q b s | Inl a \ R a s \" by (unfold valid_def validE_def) -(* FIXME: modernize *) -syntax top :: "'a \ bool" ("\") - bottom :: "'a \ bool" ("\") - -translations - "\" == "\_. CONST True" - "\" == "\_. CONST False" - -definition - bipred_conj :: "('a \ 'b \ bool) \ ('a \ 'b \ bool) \ ('a \ 'b \ bool)" (infixl "And" 96) -where - "bipred_conj P Q \ \x y. P x y \ Q x y" - -definition - bipred_disj :: "('a \ 'b \ bool) \ ('a \ 'b \ bool) \ ('a \ 'b \ bool)" (infixl "Or" 91) -where - "bipred_disj P Q \ \x y. P x y \ Q x y" - -definition - bipred_neg :: "('a \ 'b \ bool) \ ('a \ 'b \ bool)" ("Not _") where - "bipred_neg P \ \x y. \ P x y" - -syntax toptop :: "'a \ 'b \ bool" ("\\") - botbot :: "'a \ 'b \ bool" ("\\") - -translations "\\" == "\_ _. CONST True" - "\\" == "\_ _. CONST False" - definition pred_lift_exact :: "('a \ bool) \ ('b \ bool) \ ('a \ 'b \ bool)" ("\_,_\") where "pred_lift_exact P Q \ \x y. P x \ Q y" @@ -445,48 +417,12 @@ lemma pred_liftI[intro!]: "\ P x; Q y \ \ \P,Q\ = (\P,\\ And \\,Q\)" - by (simp add:pred_lift_exact_def bipred_conj_def) - -lemma pred_andE[elim!]: "\ (A and B) x; \ A x; B x \ \ R \ \ R" - by (simp add:pred_conj_def) - -lemma pred_andI[intro!]: "\ A x; B x \ \ (A and B) x" - by (simp add:pred_conj_def) - -lemma bipred_conj_app[simp]: "(P And Q) x = (P x and Q x)" - by (simp add:pred_conj_def bipred_conj_def) - -lemma bipred_disj_app[simp]: "(P Or Q) x = (P x or Q x)" - by (simp add:pred_disj_def bipred_disj_def) - -lemma pred_conj_app[simp]: "(P and Q) x = (P x \ Q x)" - by (simp add:pred_conj_def) - -lemma pred_disj_app[simp]: "(P or Q) x = (P x \ Q x)" - by (simp add:pred_disj_def) - -lemma pred_notnotD[simp]: "(not not P) = P" - by (simp add:pred_neg_def) - -lemma bipred_notnotD[simp]: "(Not Not P) = P" - by (simp add:bipred_neg_def) + "\P,Q\ = (\P,\\ and \\,Q\)" + by (simp add:pred_lift_exact_def pred_conj_def) lemma pred_lift_add[simp]: "\P,Q\ x = ((\s. P x) and Q)" by (simp add:pred_lift_exact_def pred_conj_def) -lemma pred_and_true[simp]: "(P and \) = P" - by (simp add:pred_conj_def) - -lemma pred_and_true_var[simp]: "(\ and P) = P" - by (simp add:pred_conj_def) - -lemma pred_and_false[simp]: "(P and \) = \" - by (simp add:pred_conj_def) - -lemma pred_and_false_var[simp]: "(\ and P) = \" - by (simp add:pred_conj_def) - lemma seq': "\ \A\ f \B\; \x. P x \ \C\ g x \D\; @@ -569,8 +505,8 @@ lemma return_sp: by (simp add:return_def valid_def) lemma hoare_post_conj [intro!]: - "\ \ P \ a \ Q \; \ P \ a \ R \ \ \ \ P \ a \ Q And R \" - by (simp add:valid_def split_def bipred_conj_def) + "\ \ P \ a \ Q \; \ P \ a \ R \ \ \ \ P \ a \ Q and R \" + by (simp add:valid_def split_def pred_conj_def) lemma hoare_pre_disj [intro!]: "\ \ P \ a \ R \; \ Q \ a \ R \ \ \ \ P or Q \ a \ R \" diff --git a/lib/SubMonadLib.thy b/lib/SubMonadLib.thy index 91c1f7d832..6449b57481 100644 --- a/lib/SubMonadLib.thy +++ b/lib/SubMonadLib.thy @@ -6,7 +6,7 @@ theory SubMonadLib imports - EmptyFailLib + Monads.Nondet_Empty_Fail Corres_UL begin @@ -168,11 +168,9 @@ lemma submonad_bind: apply (subst select_f_stateAssert, assumption) apply (subst gets_stateAssert) apply (subst bind_subst_lift [OF stateAssert_stateAssert]) - apply (clarsimp simp: pred_conj_def) - apply (clarsimp simp: bind_assoc split_def select_f_walk - empty_fail_stateAssert empty_failD - bind_subst_lift[OF modify_modify] submonad_args.args o_def - bind_subst_lift[OF bind_select_f_bind]) + apply (clarsimp simp: bind_assoc split_def select_f_walk empty_failD pred_conj_def + bind_subst_lift[OF modify_modify] submonad_args.args o_def + bind_subst_lift[OF bind_select_f_bind]) done lemma (in submonad) guard_preserved: @@ -275,7 +273,7 @@ proof (induct l) using sm sm' efm apply (simp add: mapM_Cons) apply (simp add: bind_subst_lift [OF submonad.stateAssert_fn]) - apply (simp add: bind_assoc submonad_bind submonad.return) + apply (simp add: bind_assoc submonad_bind submonad.return empty_fail_cond) apply (subst submonad.fn_stateAssert [OF sm']) apply (intro ext bind_apply_cong [OF refl]) apply (subgoal_tac "g sta") @@ -345,13 +343,13 @@ lemma corres_submonad: apply (fastforce simp: corres_underlying_def stateAssert_def get_def assert_def return_def bind_def) apply (rule corres_underlying_split [where r'="\x y. (x, y) \ ssr", - OF _ _ hoare_post_taut hoare_post_taut]) + OF _ _ hoare_TrueI hoare_TrueI]) apply clarsimp apply (rule corres_underlying_split [where r'="\(x, x') (y, y'). rvr x y \ (x', y') \ ssr", - OF _ _ hoare_post_taut hoare_post_taut]) + OF _ _ hoare_TrueI hoare_TrueI]) defer apply clarsimp - apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_post_taut hoare_post_taut]) + apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_TrueI hoare_TrueI]) apply (simp add: corres_modify') apply clarsimp apply (rule corres_select_f_stronger) @@ -415,6 +413,7 @@ proof - done note empty_failD [OF efim, simp] note empty_failD [OF efim', simp] + note empty_fail_select_f[simp] show ?thesis apply (clarsimp simp: submonad_fn_def y bind_assoc split_def) apply (subst bind_subst_lift [OF modify_stateAssert], rule gp gp')+ diff --git a/lib/Try_Attribute.thy b/lib/Try_Attribute.thy index 5cf608044c..e635fb0d21 100644 --- a/lib/Try_Attribute.thy +++ b/lib/Try_Attribute.thy @@ -19,12 +19,11 @@ fun try_attribute_cmd (warn, attr_srcs) (ctxt, thm) = let val attrs = map (attribute_generic ctxt) attr_srcs val (th', context') = - fold (uncurry o Thm.apply_attribute) attrs (thm, ctxt) - handle e => - (if Exn.is_interrupt e then Exn.reraise e - else if warn then warning ("TRY: ignoring exception: " ^ (@{make_string} e)) - else (); - (thm, ctxt)) + \<^try>\ + fold (uncurry o Thm.apply_attribute) attrs (thm, ctxt) + catch e => + (if warn then warning ("TRY: ignoring exception: " ^ (@{make_string} e)) else (); + (thm, ctxt))\ in (SOME context', SOME th') end in diff --git a/lib/Try_Methods.thy b/lib/Try_Methods.thy index 238e7a873f..a4aa2ce97a 100644 --- a/lib/Try_Methods.thy +++ b/lib/Try_Methods.thy @@ -6,7 +6,7 @@ theory Try_Methods -imports Eisbach_Methods +imports Eisbach_Tools.Eisbach_Methods keywords "trym" :: diag and "add_try_method" :: thy_decl diff --git a/lib/Value_Type.thy b/lib/Value_Type.thy index ce16539ffc..2c1c4a6a52 100644 --- a/lib/Value_Type.thy +++ b/lib/Value_Type.thy @@ -10,7 +10,7 @@ keywords "value_type" :: thy_decl begin (* - Define a type synonym from a term that evaluates to a numeral. + Define a type synonym from a term of type nat or int that evaluates to a (positive) numeral. Examples: @@ -18,7 +18,7 @@ begin value_type num_something = "10 * num_domains" *) -text \See theory @{file "test/Value_Type_Test.thy"} for further example/demo.\ +text \See theory @{text "test/Value_Type_Test.thy"} for further example/demo.\ ML \ @@ -41,6 +41,8 @@ fun force_nat_numeral (Const (@{const_name numeral}, Type ("fun", [num, _])) $ n | force_nat_numeral (Const (@{const_name "Groups.zero"}, _)) = @{term "0::nat"} | force_nat_numeral t = raise TERM ("force_nat_numeral: number expected", [t]) +fun cast_to_nat t = if type_of t = @{typ int} then @{term nat} $ t else t + fun make_type binding v lthy = let val n = case get_term_numeral v of @@ -51,12 +53,31 @@ fun make_type binding v lthy = lthy |> Typedecl.abbrev (binding, [], Mixfix.NoSyn) typ |> #2 end -fun make_def binding v lthy = +(* Copied from method eval in HOL.thy: *) +fun eval_tac ctxt = + let val conv = Code_Runtime.dynamic_holds_conv + in + CONVERSION (Conv.params_conv ~1 (Conv.concl_conv ~1 o conv) ctxt) THEN' + resolve_tac ctxt [TrueI] + end + +(* This produces two theorems: one symbolic _def theorem and one numeric _val theorem. + The _def theorem is a definition, via Specification.definition. + The _val theorem is proved from that definition using "eval_tac" via the code generator. *) +fun make_defs binding t v lthy = let + val t = cast_to_nat t val mk_eq = HOLogic.mk_Trueprop o HOLogic.mk_eq - val def_t = mk_eq (Free (Binding.name_of binding, @{typ nat}), force_nat_numeral v) + val def_t = mk_eq (Free (Binding.name_of binding, @{typ nat}), t) + val ((_, (_, def_thm)), lthy') = + lthy |> Specification.definition NONE [] [] (Binding.empty_atts, def_t) + val eq_t = mk_eq (t, force_nat_numeral v) + val eq_thm = + Goal.prove lthy' [] [] eq_t (fn {context = ctxt, prems = _} => eval_tac ctxt 1) + val thm = @{thm trans} OF [def_thm, eq_thm] + val val_binding = Binding.map_name (fn n => n ^ "_val") binding |> Binding.reset_pos in - lthy |> Specification.definition NONE [] [] (Binding.empty_atts, def_t) |> #2 + Local_Theory.note ((val_binding, []), [thm]) lthy' |> #2 end in @@ -68,7 +89,7 @@ fun value_type_cmd no_def binding t lthy = in lthy |> make_type binding v - |> (if no_def then I else make_def binding v) + |> (if no_def then I else make_defs binding t' v) end val no_def_option = @@ -86,20 +107,22 @@ end \ (* -Potential extension idea for the future: +Potential extension ideas for the future: -It may be possible to generalise this command to non-numeral types -- as long as the RHS can -be interpreted as some nat n, we can in theory always define a type with n elements, and instantiate -that type into class finite. Might have to present a goal to the user that RHS evaluates > 0 in nat. +* It may be possible to generalise this command to non-numeral types -- as long as the RHS can + be interpreted as some nat n, we can in theory always define a type with n elements, and + instantiate that type into class finite. Might have to present a goal to the user that RHS + evaluates > 0 in nat. -There are a few wrinkles with that, because currently you can use any type on the RHS without -complications. Requiring nat for the RHS term would not be great, because we often have word there. -We could add coercion to nat for word and int, though, that would cover all current use cases. + The benefit of defining a new type instead of a type synonym for a numeral type is that type + checking is now more meaningful and we do get some abstraction over the actual value, which would + help make proofs more generic. -The benefit of defining a new type instead of a type synonym for a numeral type is that type -checking is now more meaningful and we do get some abstraction over the actual value, which would -help make proofs more generic. -*) + The disadvantage of a non-numeral type is that it is not equal to the types that come out of the + C parser. +* We could add more automatic casts from known types to nat (e.g. from word). But it's relatively + low overhead to provide the cast as a user. +*) end diff --git a/lib/Word_Lib/Ancient_Numeral.thy b/lib/Word_Lib/Ancient_Numeral.thy deleted file mode 100644 index 88d054072e..0000000000 --- a/lib/Word_Lib/Ancient_Numeral.thy +++ /dev/null @@ -1,237 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) - -theory Ancient_Numeral - imports Main Reversed_Bit_Lists Legacy_Aliases -begin - -definition Bit :: "int \ bool \ int" (infixl "BIT" 90) - where "k BIT b = (if b then 1 else 0) + k + k" - -lemma Bit_B0: "k BIT False = k + k" - by (simp add: Bit_def) - -lemma Bit_B1: "k BIT True = k + k + 1" - by (simp add: Bit_def) - -lemma Bit_B0_2t: "k BIT False = 2 * k" - by (rule trans, rule Bit_B0) simp - -lemma Bit_B1_2t: "k BIT True = 2 * k + 1" - by (rule trans, rule Bit_B1) simp - -lemma uminus_Bit_eq: - "- k BIT b = (- k - of_bool b) BIT b" - by (cases b) (simp_all add: Bit_def) - -lemma power_BIT: "2 ^ Suc n - 1 = (2 ^ n - 1) BIT True" - by (simp add: Bit_B1) - -lemma bin_rl_simp [simp]: "bin_rest w BIT bin_last w = w" - by (simp add: Bit_def) - -lemma bin_rest_BIT [simp]: "bin_rest (x BIT b) = x" - by (simp add: Bit_def) - -lemma even_BIT [simp]: "even (x BIT b) \ \ b" - by (simp add: Bit_def) - -lemma bin_last_BIT [simp]: "bin_last (x BIT b) = b" - by simp - -lemma BIT_eq_iff [iff]: "u BIT b = v BIT c \ u = v \ b = c" - by (auto simp: Bit_def) arith+ - -lemma BIT_bin_simps [simp]: - "numeral k BIT False = numeral (Num.Bit0 k)" - "numeral k BIT True = numeral (Num.Bit1 k)" - "(- numeral k) BIT False = - numeral (Num.Bit0 k)" - "(- numeral k) BIT True = - numeral (Num.BitM k)" - by (simp_all only: Bit_B0 Bit_B1 numeral.simps numeral_BitM) - -lemma BIT_special_simps [simp]: - shows "0 BIT False = 0" - and "0 BIT True = 1" - and "1 BIT False = 2" - and "1 BIT True = 3" - and "(- 1) BIT False = - 2" - and "(- 1) BIT True = - 1" - by (simp_all add: Bit_def) - -lemma Bit_eq_0_iff: "w BIT b = 0 \ w = 0 \ \ b" - by (auto simp: Bit_def) arith - -lemma Bit_eq_m1_iff: "w BIT b = -1 \ w = -1 \ b" - by (auto simp: Bit_def) arith - -lemma expand_BIT: - "numeral (Num.Bit0 w) = numeral w BIT False" - "numeral (Num.Bit1 w) = numeral w BIT True" - "- numeral (Num.Bit0 w) = (- numeral w) BIT False" - "- numeral (Num.Bit1 w) = (- numeral (w + Num.One)) BIT True" - by (simp_all add: BitM_inc_eq add_One) - -lemma less_Bits: "v BIT b < w BIT c \ v < w \ v \ w \ \ b \ c" - by (auto simp: Bit_def) - -lemma le_Bits: "v BIT b \ w BIT c \ v < w \ v \ w \ (\ b \ c)" - by (auto simp: Bit_def) - -lemma pred_BIT_simps [simp]: - "x BIT False - 1 = (x - 1) BIT True" - "x BIT True - 1 = x BIT False" - by (simp_all add: Bit_B0_2t Bit_B1_2t) - -lemma succ_BIT_simps [simp]: - "x BIT False + 1 = x BIT True" - "x BIT True + 1 = (x + 1) BIT False" - by (simp_all add: Bit_B0_2t Bit_B1_2t) - -lemma add_BIT_simps [simp]: - "x BIT False + y BIT False = (x + y) BIT False" - "x BIT False + y BIT True = (x + y) BIT True" - "x BIT True + y BIT False = (x + y) BIT True" - "x BIT True + y BIT True = (x + y + 1) BIT False" - by (simp_all add: Bit_B0_2t Bit_B1_2t) - -lemma mult_BIT_simps [simp]: - "x BIT False * y = (x * y) BIT False" - "x * y BIT False = (x * y) BIT False" - "x BIT True * y = (x * y) BIT False + y" - by (simp_all add: Bit_B0_2t Bit_B1_2t algebra_simps) - -lemma B_mod_2': "X = 2 \ (w BIT True) mod X = 1 \ (w BIT False) mod X = 0" - by (simp add: Bit_B0 Bit_B1) - -lemma bin_ex_rl: "\w b. w BIT b = bin" - by (metis bin_rl_simp) - -lemma bin_exhaust: "(\x b. bin = x BIT b \ Q) \ Q" -by (metis bin_ex_rl) - -lemma bin_abs_lem: "bin = (w BIT b) \ bin \ -1 \ bin \ 0 \ nat \w\ < nat \bin\" - apply clarsimp - apply (unfold Bit_def) - apply (cases b) - apply (clarsimp, arith) - apply (clarsimp, arith) - done - -lemma bin_induct: - assumes PPls: "P 0" - and PMin: "P (- 1)" - and PBit: "\bin bit. P bin \ P (bin BIT bit)" - shows "P bin" - apply (rule_tac P=P and a=bin and f1="nat \ abs" in wf_measure [THEN wf_induct]) - apply (simp add: measure_def inv_image_def) - apply (case_tac x rule: bin_exhaust) - apply (frule bin_abs_lem) - apply (auto simp add : PPls PMin PBit) - done - -lemma Bit_div2: "(w BIT b) div 2 = w" - by (fact bin_rest_BIT) - -lemma twice_conv_BIT: "2 * x = x BIT False" - by (simp add: Bit_def) - -lemma BIT_lt0 [simp]: "x BIT b < 0 \ x < 0" -by(cases b)(auto simp add: Bit_def) - -lemma BIT_ge0 [simp]: "x BIT b \ 0 \ x \ 0" -by(cases b)(auto simp add: Bit_def) - -lemma bin_to_bl_aux_Bit_minus_simp [simp]: - "0 < n \ bin_to_bl_aux n (w BIT b) bl = bin_to_bl_aux (n - 1) w (b # bl)" - by (cases n) auto - -lemma bl_to_bin_BIT: - "bl_to_bin bs BIT b = bl_to_bin (bs @ [b])" - by (simp add: bl_to_bin_append Bit_def) - -lemma bin_nth_0_BIT: "bin_nth (w BIT b) 0 \ b" - by simp - -lemma bin_nth_Suc_BIT: "bin_nth (w BIT b) (Suc n) = bin_nth w n" - by (simp add: bit_Suc) - -lemma bin_nth_minus [simp]: "0 < n \ bin_nth (w BIT b) n = bin_nth w (n - 1)" - by (cases n) (simp_all add: bit_Suc) - -lemma bin_sign_simps [simp]: - "bin_sign (w BIT b) = bin_sign w" - by (simp add: bin_sign_def Bit_def) - -lemma bin_nth_Bit: "bin_nth (w BIT b) n \ n = 0 \ b \ (\m. n = Suc m \ bin_nth w m)" - by (cases n) auto - -lemmas sbintrunc_Suc_BIT [simp] = - signed_take_bit_Suc [where a="w BIT b", simplified bin_last_BIT bin_rest_BIT] for w b - -lemmas sbintrunc_0_BIT_B0 [simp] = - signed_take_bit_0 [where a="w BIT False", simplified bin_last_numeral_simps bin_rest_numeral_simps] - for w - -lemmas sbintrunc_0_BIT_B1 [simp] = - signed_take_bit_0 [where a="w BIT True", simplified bin_last_BIT bin_rest_numeral_simps] - for w - -lemma sbintrunc_Suc_minus_Is: - \0 < n \ - sbintrunc (n - 1) w = y \ - sbintrunc n (w BIT b) = y BIT b\ - by (cases n) (simp_all add: Bit_def signed_take_bit_Suc) - -lemma bin_cat_Suc_Bit: "bin_cat w (Suc n) (v BIT b) = bin_cat w n v BIT b" - by (auto simp add: Bit_def concat_bit_Suc) - -context - includes bit_operations_syntax -begin - -lemma int_not_BIT [simp]: "NOT (w BIT b) = (NOT w) BIT (\ b)" - by (simp add: not_int_def Bit_def) - -lemma int_and_Bits [simp]: "(x BIT b) AND (y BIT c) = (x AND y) BIT (b \ c)" - using and_int_rec [of \x BIT b\ \y BIT c\] by (auto simp add: Bit_B0_2t Bit_B1_2t) - -lemma int_or_Bits [simp]: "(x BIT b) OR (y BIT c) = (x OR y) BIT (b \ c)" - using or_int_rec [of \x BIT b\ \y BIT c\] by (auto simp add: Bit_B0_2t Bit_B1_2t) - -lemma int_xor_Bits [simp]: "(x BIT b) XOR (y BIT c) = (x XOR y) BIT ((b \ c) \ \ (b \ c))" - using xor_int_rec [of \x BIT b\ \y BIT c\] by (auto simp add: Bit_B0_2t Bit_B1_2t) - -end - -lemma mod_BIT: - "bin BIT bit mod 2 ^ Suc n = (bin mod 2 ^ n) BIT bit" for bit -proof - - have "2 * (bin mod 2 ^ n) + 1 = (2 * bin mod 2 ^ Suc n) + 1" - by (simp add: mod_mult_mult1) - also have "\ = ((2 * bin mod 2 ^ Suc n) + 1) mod 2 ^ Suc n" - by (simp add: ac_simps pos_zmod_mult_2) - also have "\ = (2 * bin + 1) mod 2 ^ Suc n" - by (simp only: mod_simps) - finally show ?thesis - by (auto simp add: Bit_def) -qed - -lemma minus_BIT_0: fixes x y :: int shows "x BIT b - y BIT False = (x - y) BIT b" -by(simp add: Bit_def) - -lemma int_lsb_BIT [simp]: fixes x :: int shows - "lsb (x BIT b) \ b" -by(simp add: lsb_int_def) - -lemma int_shiftr_BIT [simp]: fixes x :: int - shows int_shiftr0: "drop_bit 0 x = x" - and int_shiftr_Suc: "drop_bit (Suc n) (x BIT b) = drop_bit n x" - by (simp_all add: drop_bit_Suc) - -lemma msb_BIT [simp]: "msb (x BIT b) = msb x" -by(simp add: msb_int_def) - -end \ No newline at end of file diff --git a/lib/Word_Lib/Bit_Comprehension.thy b/lib/Word_Lib/Bit_Comprehension.thy index 647828efdd..8dbed8d013 100644 --- a/lib/Word_Lib/Bit_Comprehension.thy +++ b/lib/Word_Lib/Bit_Comprehension.thy @@ -22,46 +22,6 @@ lemma set_bits_False_eq [simp]: end -instantiation int :: bit_comprehension -begin - -definition - \set_bits f = ( - if \n. \m\n. f m = f n then - let n = LEAST n. \m\n. f m = f n - in signed_take_bit n (horner_sum of_bool 2 (map f [0.. - -instance proof - fix k :: int - from int_bit_bound [of k] - obtain n where *: \\m. n \ m \ bit k m \ bit k n\ - and **: \n > 0 \ bit k (n - 1) \ bit k n\ - by blast - then have ***: \\n. \n'\n. bit k n' \ bit k n\ - by meson - have l: \(LEAST q. \m\q. bit k m \ bit k q) = n\ - apply (rule Least_equality) - using * apply blast - apply (metis "**" One_nat_def Suc_pred le_cases le0 neq0_conv not_less_eq_eq) - done - show \set_bits (bit k) = k\ - apply (simp only: *** set_bits_int_def horner_sum_bit_eq_take_bit l) - apply simp - apply (rule bit_eqI) - apply (simp add: bit_signed_take_bit_iff min_def) - apply (auto simp add: not_le bit_take_bit_iff dest: *) - done -qed - -end - -lemma int_set_bits_K_False [simp]: "(BITS _. False) = (0 :: int)" - by (simp add: set_bits_int_def) - -lemma int_set_bits_K_True [simp]: "(BITS _. True) = (-1 :: int)" - by (simp add: set_bits_int_def) - instantiation word :: (len) bit_comprehension begin @@ -77,174 +37,51 @@ lemma bit_set_bits_word_iff [bit_simps]: \bit (set_bits P :: 'a::len word) n \ n < LENGTH('a) \ P n\ by (auto simp add: word_set_bits_def bit_horner_sum_bit_word_iff) -lemma set_bits_K_False [simp]: +lemma word_of_int_conv_set_bits: "word_of_int i = (BITS n. bit i n)" + by (rule bit_eqI) (auto simp add: bit_simps) + +lemma set_bits_K_False: \set_bits (\_. False) = (0 :: 'a :: len word)\ - by (rule bit_word_eqI) (simp add: bit_set_bits_word_iff) - -lemma set_bits_int_unfold': - \set_bits f = - (if \n. \n'\n. \ f n' then - let n = LEAST n. \n'\n. \ f n' - in horner_sum of_bool 2 (map f [0..n. \n'\n. f n' then - let n = LEAST n. \n'\n. f n' - in signed_take_bit n (horner_sum of_bool 2 (map f [0.. -proof (cases \\n. \m\n. f m \ f n\) - case True - then obtain q where q: \\m\q. f m \ f q\ - by blast - define n where \n = (LEAST n. \m\n. f m \ f n)\ - have \\m\n. f m \ f n\ - unfolding n_def - using q by (rule LeastI [of _ q]) - then have n: \\m. n \ m \ f m \ f n\ - by blast - from n_def have n_eq: \(LEAST q. \m\q. f m \ f n) = n\ - by (smt (verit, best) Least_le \\m\n. f m = f n\ dual_order.antisym wellorder_Least_lemma(1)) - show ?thesis - proof (cases \f n\) - case False - with n have *: \\n. \n'\n. \ f n'\ - by blast - have **: \(LEAST n. \n'\n. \ f n') = n\ - using False n_eq by simp - from * False show ?thesis - apply (simp add: set_bits_int_def n_def [symmetric] ** del: upt.upt_Suc) - apply (auto simp add: take_bit_horner_sum_bit_eq - bit_horner_sum_bit_iff take_map - signed_take_bit_def set_bits_int_def - horner_sum_bit_eq_take_bit simp del: upt.upt_Suc) - done - next - case True - with n have *: \\n. \n'\n. f n'\ - by blast - have ***: \\ (\n. \n'\n. \ f n')\ - apply (rule ccontr) - using * nat_le_linear by auto - have **: \(LEAST n. \n'\n. f n') = n\ - using True n_eq by simp - from * *** True show ?thesis - apply (simp add: set_bits_int_def n_def [symmetric] ** del: upt.upt_Suc) - apply (auto simp add: take_bit_horner_sum_bit_eq - bit_horner_sum_bit_iff take_map - signed_take_bit_def set_bits_int_def - horner_sum_bit_eq_take_bit nth_append simp del: upt.upt_Suc) - done - qed -next - case False - then show ?thesis - by (auto simp add: set_bits_int_def) -qed - -inductive wf_set_bits_int :: "(nat \ bool) \ bool" - for f :: "nat \ bool" -where - zeros: "\n' \ n. \ f n' \ wf_set_bits_int f" -| ones: "\n' \ n. f n' \ wf_set_bits_int f" - -lemma wf_set_bits_int_simps: "wf_set_bits_int f \ (\n. (\n'\n. \ f n') \ (\n'\n. f n'))" -by(auto simp add: wf_set_bits_int.simps) - -lemma wf_set_bits_int_const [simp]: "wf_set_bits_int (\_. b)" -by(cases b)(auto intro: wf_set_bits_int.intros) - -lemma wf_set_bits_int_fun_upd [simp]: - "wf_set_bits_int (f(n := b)) \ wf_set_bits_int f" (is "?lhs \ ?rhs") -proof - assume ?lhs - then obtain n' - where "(\n''\n'. \ (f(n := b)) n'') \ (\n''\n'. (f(n := b)) n'')" - by(auto simp add: wf_set_bits_int_simps) - hence "(\n''\max (Suc n) n'. \ f n'') \ (\n''\max (Suc n) n'. f n'')" by auto - thus ?rhs by(auto simp only: wf_set_bits_int_simps) -next - assume ?rhs - then obtain n' where "(\n''\n'. \ f n'') \ (\n''\n'. f n'')" (is "?wf f n'") - by(auto simp add: wf_set_bits_int_simps) - hence "?wf (f(n := b)) (max (Suc n) n')" by auto - thus ?lhs by(auto simp only: wf_set_bits_int_simps) -qed - -lemma wf_set_bits_int_Suc [simp]: - "wf_set_bits_int (\n. f (Suc n)) \ wf_set_bits_int f" (is "?lhs \ ?rhs") -by(auto simp add: wf_set_bits_int_simps intro: le_SucI dest: Suc_le_D) + by (fact set_bits_False_eq) + +lemma word_test_bit_set_bits: "bit (BITS n. f n :: 'a :: len word) n \ n < LENGTH('a) \ f n" + by (fact bit_set_bits_word_iff) context - fixes f - assumes wff: "wf_set_bits_int f" + includes bit_operations_syntax + fixes f :: \nat \ bool\ begin -lemma int_set_bits_unfold_BIT: - "set_bits f = of_bool (f 0) + (2 :: int) * set_bits (f \ Suc)" -using wff proof cases - case (zeros n) - show ?thesis - proof(cases "\n. \ f n") - case True - hence "f = (\_. False)" by auto - thus ?thesis using True by(simp add: o_def) - next - case False - then obtain n' where "f n'" by blast - with zeros have "(LEAST n. \n'\n. \ f n') = Suc (LEAST n. \n'\Suc n. \ f n')" - by(auto intro: Least_Suc) - also have "(\n. \n'\Suc n. \ f n') = (\n. \n'\n. \ f (Suc n'))" by(auto dest: Suc_le_D) - also from zeros have "\n'\n. \ f (Suc n')" by auto - ultimately show ?thesis using zeros - apply (simp (no_asm_simp) add: set_bits_int_unfold' exI - del: upt.upt_Suc flip: map_map split del: if_split) - apply (simp only: map_Suc_upt upt_conv_Cons) - apply simp - done - qed -next - case (ones n) - show ?thesis - proof(cases "\n. f n") - case True - hence "f = (\_. True)" by auto - thus ?thesis using True by(simp add: o_def) - next - case False - then obtain n' where "\ f n'" by blast - with ones have "(LEAST n. \n'\n. f n') = Suc (LEAST n. \n'\Suc n. f n')" - by(auto intro: Least_Suc) - also have "(\n. \n'\Suc n. f n') = (\n. \n'\n. f (Suc n'))" by(auto dest: Suc_le_D) - also from ones have "\n'\n. f (Suc n')" by auto - moreover from ones have "(\n. \n'\n. \ f n') = False" - by(auto intro!: exI[where x="max n m" for n m] simp add: max_def split: if_split_asm) - moreover hence "(\n. \n'\n. \ f (Suc n')) = False" - by(auto elim: allE[where x="Suc n" for n] dest: Suc_le_D) - ultimately show ?thesis using ones - apply (simp (no_asm_simp) add: set_bits_int_unfold' exI split del: if_split) - apply (auto simp add: Let_def hd_map map_tl[symmetric] map_map[symmetric] map_Suc_upt upt_conv_Cons signed_take_bit_Suc - not_le simp del: map_map) - done - qed -qed - -lemma bin_last_set_bits [simp]: - "odd (set_bits f :: int) = f 0" - by (subst int_set_bits_unfold_BIT) simp_all - -lemma bin_rest_set_bits [simp]: - "set_bits f div (2 :: int) = set_bits (f \ Suc)" - by (subst int_set_bits_unfold_BIT) simp_all - -lemma bin_nth_set_bits [simp]: - "bit (set_bits f :: int) m \ f m" -using wff proof (induction m arbitrary: f) - case 0 - then show ?case - by (simp add: Bit_Comprehension.bin_last_set_bits) -next - case Suc - from Suc.IH [of "f \ Suc"] Suc.prems show ?case - by (simp add: Bit_Comprehension.bin_rest_set_bits comp_def bit_Suc) -qed +definition set_bits_aux :: \nat \ 'a word \ 'a::len word\ + where \set_bits_aux n w = push_bit n w OR take_bit n (set_bits f)\ + +lemma bit_set_bit_aux [bit_simps]: + \bit (set_bits_aux n w) m \ m < LENGTH('a) \ + (if m < n then f m else bit w (m - n))\ for w :: \'a::len word\ + by (auto simp add: bit_simps set_bits_aux_def) + +corollary set_bits_conv_set_bits_aux: + \set_bits f = (set_bits_aux LENGTH('a) 0 :: 'a :: len word)\ + by (rule bit_word_eqI) (simp add: bit_simps) + +lemma set_bits_aux_0 [simp]: + \set_bits_aux 0 w = w\ + by (simp add: set_bits_aux_def) + +lemma set_bits_aux_Suc [simp]: + \set_bits_aux (Suc n) w = set_bits_aux n (push_bit 1 w OR (if f n then 1 else 0))\ + by (rule bit_word_eqI) (auto simp add: bit_simps le_less_Suc_eq mult.commute [of _ 2]) + +lemma set_bits_aux_simps [code]: + \set_bits_aux 0 w = w\ + \set_bits_aux (Suc n) w = set_bits_aux n (push_bit 1 w OR (if f n then 1 else 0))\ + by simp_all + +lemma set_bits_aux_rec: + \set_bits_aux n w = + (if n = 0 then w + else let n' = n - 1 in set_bits_aux n' (push_bit 1 w OR (if f n' then 1 else 0)))\ + by (cases n) simp_all end diff --git a/lib/Word_Lib/Bit_Comprehension_Int.thy b/lib/Word_Lib/Bit_Comprehension_Int.thy new file mode 100644 index 0000000000..b09d165947 --- /dev/null +++ b/lib/Word_Lib/Bit_Comprehension_Int.thy @@ -0,0 +1,225 @@ +(* + * Copyright Brian Huffman, PSU; Jeremy Dawson and Gerwin Klein, NICTA + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +section \Comprehension syntax for \int\\ + +theory Bit_Comprehension_Int + imports + Bit_Comprehension +begin + +instantiation int :: bit_comprehension +begin + +definition + \set_bits f = ( + if \n. \m\n. f m = f n then + let n = LEAST n. \m\n. f m = f n + in signed_take_bit n (horner_sum of_bool 2 (map f [0.. + +instance proof + fix k :: int + from int_bit_bound [of k] + obtain n where *: \\m. n \ m \ bit k m \ bit k n\ + and **: \n > 0 \ bit k (n - 1) \ bit k n\ + by blast + then have ***: \\n. \n'\n. bit k n' \ bit k n\ + by meson + have l: \(LEAST q. \m\q. bit k m \ bit k q) = n\ + apply (rule Least_equality) + using * apply blast + apply (metis "**" One_nat_def Suc_pred le_cases le0 neq0_conv not_less_eq_eq) + done + show \set_bits (bit k) = k\ + apply (simp only: *** set_bits_int_def horner_sum_bit_eq_take_bit l) + apply simp + apply (rule bit_eqI) + apply (simp add: bit_signed_take_bit_iff min_def) + apply (auto simp add: not_le bit_take_bit_iff dest: *) + done +qed + +end + +lemma int_set_bits_K_False [simp]: "(BITS _. False) = (0 :: int)" + by (simp add: set_bits_int_def) + +lemma int_set_bits_K_True [simp]: "(BITS _. True) = (-1 :: int)" + by (simp add: set_bits_int_def) + +lemma set_bits_code [code]: + "set_bits = Code.abort (STR ''set_bits is unsupported on type int'') (\_. set_bits :: _ \ int)" + by simp + +lemma set_bits_int_unfold': + \set_bits f = + (if \n. \n'\n. \ f n' then + let n = LEAST n. \n'\n. \ f n' + in horner_sum of_bool 2 (map f [0..n. \n'\n. f n' then + let n = LEAST n. \n'\n. f n' + in signed_take_bit n (horner_sum of_bool 2 (map f [0.. +proof (cases \\n. \m\n. f m \ f n\) + case True + then obtain q where q: \\m\q. f m \ f q\ + by blast + define n where \n = (LEAST n. \m\n. f m \ f n)\ + have \\m\n. f m \ f n\ + unfolding n_def + using q by (rule LeastI [of _ q]) + then have n: \\m. n \ m \ f m \ f n\ + by blast + from n_def have n_eq: \(LEAST q. \m\q. f m \ f n) = n\ + by (smt (verit, best) Least_le \\m\n. f m = f n\ dual_order.antisym wellorder_Least_lemma(1)) + show ?thesis + proof (cases \f n\) + case False + with n have *: \\n. \n'\n. \ f n'\ + by blast + have **: \(LEAST n. \n'\n. \ f n') = n\ + using False n_eq by simp + from * False show ?thesis + apply (simp add: set_bits_int_def n_def [symmetric] ** del: upt.upt_Suc) + apply (auto simp add: take_bit_horner_sum_bit_eq + bit_horner_sum_bit_iff take_map + signed_take_bit_def set_bits_int_def + horner_sum_bit_eq_take_bit simp del: upt.upt_Suc) + done + next + case True + with n have *: \\n. \n'\n. f n'\ + by blast + have ***: \\ (\n. \n'\n. \ f n')\ + apply (rule ccontr) + using * nat_le_linear by auto + have **: \(LEAST n. \n'\n. f n') = n\ + using True n_eq by simp + from * *** True show ?thesis + apply (simp add: set_bits_int_def n_def [symmetric] ** del: upt.upt_Suc) + apply (auto simp add: take_bit_horner_sum_bit_eq + bit_horner_sum_bit_iff take_map + signed_take_bit_def set_bits_int_def + horner_sum_bit_eq_take_bit nth_append simp del: upt.upt_Suc) + done + qed +next + case False + then show ?thesis + by (auto simp add: set_bits_int_def) +qed + +inductive wf_set_bits_int :: "(nat \ bool) \ bool" + for f :: "nat \ bool" +where + zeros: "\n' \ n. \ f n' \ wf_set_bits_int f" +| ones: "\n' \ n. f n' \ wf_set_bits_int f" + +lemma wf_set_bits_int_simps: "wf_set_bits_int f \ (\n. (\n'\n. \ f n') \ (\n'\n. f n'))" +by(auto simp add: wf_set_bits_int.simps) + +lemma wf_set_bits_int_const [simp]: "wf_set_bits_int (\_. b)" +by(cases b)(auto intro: wf_set_bits_int.intros) + +lemma wf_set_bits_int_fun_upd [simp]: + "wf_set_bits_int (f(n := b)) \ wf_set_bits_int f" (is "?lhs \ ?rhs") +proof + assume ?lhs + then obtain n' + where "(\n''\n'. \ (f(n := b)) n'') \ (\n''\n'. (f(n := b)) n'')" + by(auto simp add: wf_set_bits_int_simps) + hence "(\n''\max (Suc n) n'. \ f n'') \ (\n''\max (Suc n) n'. f n'')" by auto + thus ?rhs by(auto simp only: wf_set_bits_int_simps) +next + assume ?rhs + then obtain n' where "(\n''\n'. \ f n'') \ (\n''\n'. f n'')" (is "?wf f n'") + by(auto simp add: wf_set_bits_int_simps) + hence "?wf (f(n := b)) (max (Suc n) n')" by auto + thus ?lhs by(auto simp only: wf_set_bits_int_simps) +qed + +lemma wf_set_bits_int_Suc [simp]: + "wf_set_bits_int (\n. f (Suc n)) \ wf_set_bits_int f" (is "?lhs \ ?rhs") +by(auto simp add: wf_set_bits_int_simps intro: le_SucI dest: Suc_le_D) + +context + fixes f + assumes wff: "wf_set_bits_int f" +begin + +lemma int_set_bits_unfold_BIT: + "set_bits f = of_bool (f 0) + (2 :: int) * set_bits (f \ Suc)" +using wff proof cases + case (zeros n) + show ?thesis + proof(cases "\n. \ f n") + case True + hence "f = (\_. False)" by auto + thus ?thesis using True by(simp add: o_def) + next + case False + then obtain n' where "f n'" by blast + with zeros have "(LEAST n. \n'\n. \ f n') = Suc (LEAST n. \n'\Suc n. \ f n')" + by(auto intro: Least_Suc) + also have "(\n. \n'\Suc n. \ f n') = (\n. \n'\n. \ f (Suc n'))" by(auto dest: Suc_le_D) + also from zeros have "\n'\n. \ f (Suc n')" by auto + ultimately show ?thesis using zeros + apply (simp (no_asm_simp) add: set_bits_int_unfold' exI + del: upt.upt_Suc flip: map_map split del: if_split) + apply (simp only: map_Suc_upt upt_conv_Cons) + apply simp + done + qed +next + case (ones n) + show ?thesis + proof(cases "\n. f n") + case True + hence "f = (\_. True)" by auto + thus ?thesis using True by(simp add: o_def) + next + case False + then obtain n' where "\ f n'" by blast + with ones have "(LEAST n. \n'\n. f n') = Suc (LEAST n. \n'\Suc n. f n')" + by(auto intro: Least_Suc) + also have "(\n. \n'\Suc n. f n') = (\n. \n'\n. f (Suc n'))" by(auto dest: Suc_le_D) + also from ones have "\n'\n. f (Suc n')" by auto + moreover from ones have "(\n. \n'\n. \ f n') = False" + by(auto intro!: exI[where x="max n m" for n m] simp add: max_def split: if_split_asm) + moreover hence "(\n. \n'\n. \ f (Suc n')) = False" + by(auto elim: allE[where x="Suc n" for n] dest: Suc_le_D) + ultimately show ?thesis using ones + apply (simp (no_asm_simp) add: set_bits_int_unfold' exI split del: if_split) + apply (auto simp add: Let_def hd_map map_tl[symmetric] map_map[symmetric] map_Suc_upt upt_conv_Cons signed_take_bit_Suc + not_le simp del: map_map) + done + qed +qed + +lemma bin_last_set_bits [simp]: + "odd (set_bits f :: int) = f 0" + by (subst int_set_bits_unfold_BIT) simp_all + +lemma bin_rest_set_bits [simp]: + "set_bits f div (2 :: int) = set_bits (f \ Suc)" + by (subst int_set_bits_unfold_BIT) simp_all + +lemma bin_nth_set_bits [simp]: + "bit (set_bits f :: int) m \ f m" +using wff proof (induction m arbitrary: f) + case 0 + then show ?case + by (simp add: Bit_Comprehension_Int.bin_last_set_bits bit_0) +next + case Suc + from Suc.IH [of "f \ Suc"] Suc.prems show ?case + by (simp add: Bit_Comprehension_Int.bin_rest_set_bits comp_def bit_Suc) +qed + +end + +end diff --git a/lib/Word_Lib/Bits_Int.thy b/lib/Word_Lib/Bits_Int.thy index c81c48e02a..2587cfbf7c 100644 --- a/lib/Word_Lib/Bits_Int.thy +++ b/lib/Word_Lib/Bits_Int.thy @@ -812,10 +812,7 @@ lemma bin_sc_sc_diff: "m \ n \ bin_sc m c (bin_sc n b w) done lemma bin_nth_sc_gen: "(bit :: int \ nat \ bool) (bin_sc n b w) m = (if m = n then b else (bit :: int \ nat \ bool) w m)" - apply (induct n arbitrary: w m) - apply (case_tac m; simp add: bit_Suc) - apply (case_tac m; simp add: bit_Suc) - done + by (simp add: bit_simps) lemma bin_sc_eq: \bin_sc n False = unset_bit n\ @@ -1339,33 +1336,22 @@ lemma int_shiftr_numeral_Suc0 [simp]: lemma bin_nth_minus_p2: assumes sign: "bin_sign x = 0" - and y: "y = push_bit n 1" - and m: "m < n" - and x: "x < y" + and y: "y = push_bit n 1" + and m: "m < n" + and x: "x < y" shows "bit (x - y) m = bit x m" proof - - from sign y x have \x \ 0\ and \y = 2 ^ n\ and \x < 2 ^ n\ - by (simp_all add: bin_sign_def push_bit_eq_mult split: if_splits) - from \0 \ x\ \x < 2 ^ n\ \m < n\ have \bit x m \ bit (x - 2 ^ n) m\ - proof (induction m arbitrary: x n) - case 0 - then show ?case - by simp - next - case (Suc m) - moreover define q where \q = n - 1\ - ultimately have n: \n = Suc q\ - by simp - have \(x - 2 ^ Suc q) div 2 = x div 2 - 2 ^ q\ - by simp - moreover from Suc.IH [of \x div 2\ q] Suc.prems - have \bit (x div 2) m \ bit (x div 2 - 2 ^ q) m\ - by (simp add: n) - ultimately show ?case - by (simp add: bit_Suc n) - qed - with \y = 2 ^ n\ show ?thesis + from \bin_sign x = 0\ have \x \ 0\ + by (simp add: sign_Pls_ge_0) + moreover from x y have \x < 2 ^ n\ by simp + ultimately have \q < n\ if \bit x q\ for q + using that by (metis bit_take_bit_iff take_bit_int_eq_self) + then have \bit (x + NOT (mask n)) m = bit x m\ + using \m < n\ by (simp add: disjunctive_add bit_simps) + also have \x + NOT (mask n) = x - y\ + using y by (simp flip: minus_exp_eq_not_mask) + finally show ?thesis . qed lemma bin_clr_conv_NAND: @@ -1430,7 +1416,7 @@ lemma clearBit_no: lemma eq_mod_iff: "0 < n \ b = b mod n \ 0 \ b \ b < n" for b n :: int - by auto (metis pos_mod_conj)+ + using pos_mod_sign [of n b] pos_mod_bound [of n b] by (safe, auto) lemma split_uint_lem: "bin_split n (uint w) = (a, b) \ a = take_bit (LENGTH('a) - n) a \ b = take_bit (LENGTH('a)) b" diff --git a/lib/Word_Lib/Bitwise.thy b/lib/Word_Lib/Bitwise.thy index 54f7088a6a..dffa886cde 100644 --- a/lib/Word_Lib/Bitwise.thy +++ b/lib/Word_Lib/Bitwise.thy @@ -365,7 +365,7 @@ lemma upt_eq_list_intros: by (simp_all add: upt_eq_Cons_conv) -subsection \Tactic definition\ +text \Tactic definition\ lemma if_bool_simps: "If p True y = (p \ y) \ If p False y = (\ p \ y) \ @@ -400,9 +400,7 @@ fun upt_conv ctxt ct = end | _ => NONE; -val expand_upt_simproc = - Simplifier.make_simproc \<^context> "expand_upt" - {lhss = [\<^term>\upt x y\], proc = K upt_conv}; +val expand_upt_simproc = \<^simproc_setup>\passive expand_upt ("upt x y") = \K upt_conv\\; fun word_len_simproc_fn ctxt ct = (case Thm.term_of ct of @@ -420,8 +418,7 @@ fun word_len_simproc_fn ctxt ct = | _ => NONE); val word_len_simproc = - Simplifier.make_simproc \<^context> "word_len" - {lhss = [\<^term>\len_of x\], proc = K word_len_simproc_fn}; + \<^simproc_setup>\passive word_len ("len_of x") = \K word_len_simproc_fn\\; (* convert 5 or nat 5 to Suc 4 when n_sucs = 1, Suc (Suc 4) when n_sucs = 2, or just 5 (discarding nat) when n_sucs = 0 *) @@ -448,9 +445,11 @@ fun nat_get_Suc_simproc_fn n_sucs ctxt ct = end handle TERM _ => NONE; fun nat_get_Suc_simproc n_sucs ts = - Simplifier.make_simproc \<^context> "nat_get_Suc" - {lhss = map (fn t => t $ \<^term>\n :: nat\) ts, - proc = K (nat_get_Suc_simproc_fn n_sucs)}; + Simplifier.make_simproc \<^context> + {name = "nat_get_Suc", + lhss = map (fn t => t $ \<^term>\n :: nat\) ts, + proc = K (nat_get_Suc_simproc_fn n_sucs), + identifier = []}; val no_split_ss = simpset_of (put_simpset HOL_ss \<^context> @@ -502,6 +501,6 @@ end method_setup word_bitwise = \Scan.succeed (fn ctxt => Method.SIMPLE_METHOD (Word_Bitwise_Tac.tac ctxt 1))\ - "decomposer for word equalities and inequalities into bit propositions" + "decomposer for word equalities and inequalities into bit propositions on concrete word lengths" end diff --git a/lib/Word_Lib/Bitwise_Signed.thy b/lib/Word_Lib/Bitwise_Signed.thy index 03cb70d80e..3a73f691f4 100644 --- a/lib/Word_Lib/Bitwise_Signed.thy +++ b/lib/Word_Lib/Bitwise_Signed.thy @@ -25,6 +25,6 @@ in method_setup word_bitwise_signed = \Scan.succeed (fn ctxt => Method.SIMPLE_METHOD (bw_tac_signed ctxt 1))\ - "decomposer for word equalities and inequalities into bit propositions" + "decomposer for word equalities and inequalities into bit propositions on concrete word lengths" end diff --git a/lib/Word_Lib/Boolean_Inequalities.thy b/lib/Word_Lib/Boolean_Inequalities.thy new file mode 100644 index 0000000000..4478276b6b --- /dev/null +++ b/lib/Word_Lib/Boolean_Inequalities.thy @@ -0,0 +1,139 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Boolean_Inequalities + imports Word_EqI +begin + +section \All inequalities between binary Boolean operations on @{typ "'a word"}\ + +text \ + Enumerates all binary functions resulting from Boolean operations on @{typ "'a word"} + and derives all inequalities of the form @{term "f x y \ g x y"} between them. + We leave out the trivial @{term "0 \ g x y"}, @{term "f x y \ -1"}, and + @{term "f x y \ f x y"}, because these are already readily available to the simplifier and + other methods. + + This leaves 36 inequalities. Some of these are subsumed by each other, but we generate + the full list to avoid too much manual processing. + + All inequalities produced here are in simp normal form.\ + +context + includes bit_operations_syntax +begin + +(* The following are all 16 binary Boolean operations (on bool): + all_bool_funs \ [ + \x y. False, + \x y. x \ y, + \x y. x \ \y, + \x y. x, + \x y. \x \ y, + \x y. y, + \x y. x \ y, + \x y. x \ y, + \x y. \(x \ y), + \x y. x = y, + \x y. \y, + \x y. x \ \y, + \x y. \x, + \x y. \x \ y, + \x y. \(x \ y), + \x y. True + ] + + We can define the same for word operations: +*) +definition all_bool_word_funs :: "('a::len word \ 'a word \ 'a word) list" where + "all_bool_word_funs \ [ + \x y. 0, + \x y. x AND y, + \x y. x AND NOT y, + \x y. x, + \x y. NOT x AND y, + \x y. y, + \x y. x XOR y, + \x y. x OR y, + \x y. NOT (x OR y), + \x y. NOT (x XOR y), + \x y. NOT y, + \x y. x OR NOT y, + \x y. NOT x, + \x y. NOT x OR y, + \x y. NOT (x AND y), + \x y. -1 + ]" + + +text \ + The inequalities on @{typ "'a word"} follow directly from implications on propositional + Boolean logic, which @{method simp} can solve automatically. This means, we can simply + enumerate all combinations, reduce from @{typ "'a word"} to @{typ bool}, and attempt to + solve by @{method simp} to get the complete list.\ +local_setup \ +let + (* derived from Numeral.mk_num, but returns a term, not syntax. *) + fun mk_num n = + if n > 0 then + (case Integer.quot_rem n 2 of + (0, 1) => \<^const>\Num.One\ + | (n, 0) => \<^const>\Num.Bit0\ $ mk_num n + | (n, 1) => \<^const>\Num.Bit1\ $ mk_num n) + else raise Match + + (* derived from Numeral.mk_number, but returns a term, not syntax. *) + fun mk_number n = + if n = 0 then \<^term>\0::nat\ + else if n = 1 then \<^term>\1::nat\ + else \<^term>\numeral::num \ nat\ $ mk_num n; + + (* generic form of the goal statement *) + val goal = @{term "\n m. (all_bool_word_funs!n) x y \ (all_bool_word_funs!m) x y"} + (* instance of the goal statement for a pair (i,j) of Boolean functions *) + fun stmt (i,j) = HOLogic.Trueprop $ (goal $ mk_number i $ mk_number j) + + (* attempt to prove an inequality between functions i and j *) + fun le_thm ctxt (i,j) = Goal.prove ctxt ["x", "y"] [] (stmt (i,j)) (fn _ => + (asm_full_simp_tac (ctxt addsimps [@{thm all_bool_word_funs_def}]) + THEN_ALL_NEW resolve_tac ctxt @{thms word_leI} + THEN_ALL_NEW asm_full_simp_tac (ctxt addsimps @{thms word_eqI_simps bit_simps})) 1) + + (* generate all combinations for (i,j), collect successful inequality theorems, + unfold all_bool_word_funs, and put into simp normal form. We leave out 0 (bottom) + and 15 (top), as well as reflexive thms to remove trivial lemmas from the list.*) + fun thms ctxt = + map_product (fn x => fn y => (x,y)) (1 upto 14) (1 upto 14) + |> filter (fn (x,y) => x <> y) + |> map_filter (try (le_thm ctxt)) + |> map (Simplifier.simplify ctxt o Local_Defs.unfold ctxt @{thms all_bool_word_funs_def}) +in + fn ctxt => + Local_Theory.notes [((Binding.name "word_bool_le_funs", []), [(thms ctxt, [])])] ctxt |> #2 +end +\ + +(* Sanity checks: *) +lemma + "x AND y \ x" for x :: "'a::len word" + by (rule word_bool_le_funs(1)) + +lemma + "NOT x \ NOT x OR NOT y" for x :: "'a::len word" + by (rule word_bool_le_funs(36)) + +lemma + "x XOR y \ NOT x OR NOT y" for x :: "'a::len word" + by (rule word_bool_le_funs) + +(* Example use when the goal is not in simp normal form: *) +lemma word_xor_le_nand: + "x XOR y \ NOT (x AND y)" for x :: "'a::len word" + by (simp add: word_bool_le_funs) + +end + +end \ No newline at end of file diff --git a/lib/Word_Lib/Generic_set_bit.thy b/lib/Word_Lib/Generic_set_bit.thy index f3641eb8ce..743b32d220 100644 --- a/lib/Word_Lib/Generic_set_bit.thy +++ b/lib/Word_Lib/Generic_set_bit.thy @@ -38,6 +38,18 @@ instance end +context + includes bit_operations_syntax +begin + +lemma fixes i :: int + shows int_set_bit_True_conv_OR [code]: "Generic_set_bit.set_bit i n True = i OR push_bit n 1" + and int_set_bit_False_conv_NAND [code]: "Generic_set_bit.set_bit i n False = i AND NOT (push_bit n 1)" + and int_set_bit_conv_ops: "Generic_set_bit.set_bit i n b = (if b then i OR (push_bit n 1) else i AND NOT (push_bit n 1))" + by (simp_all add: bit_eq_iff) (auto simp add: bit_simps) + +end + instantiation word :: (len) set_bit begin @@ -121,4 +133,20 @@ lemma one_bit_shiftl: "set_bit 0 n True = (1 :: 'a :: len word) << n" lemma one_bit_pow: "set_bit 0 n True = (2 :: 'a :: len word) ^ n" by (simp add: one_bit_shiftl shiftl_def) +instantiation integer :: set_bit +begin + +context + includes integer.lifting +begin + +lift_definition set_bit_integer :: \integer \ nat \ bool \ integer\ is set_bit . + +instance + by (standard; transfer) (simp add: bit_simps) + +end + +end + end diff --git a/lib/Word_Lib/Guide.thy b/lib/Word_Lib/Guide.thy index ab5220883c..49f7911c97 100644 --- a/lib/Word_Lib/Guide.thy +++ b/lib/Word_Lib/Guide.thy @@ -6,7 +6,7 @@ (*<*) theory Guide - imports Word_Lib_Sumo Machine_Word_32 Machine_Word_64 Ancient_Numeral + imports Word_Lib_Sumo Machine_Word_32 Machine_Word_64 begin context semiring_bit_operations @@ -64,7 +64,7 @@ text \ \<^item> Equality rule: @{thm [display, mode=iff] bit_eq_iff [where ?'a = int, no_vars]} - \<^item> Induction rule: @{thm [display, mode=iff] bits_induct [where ?'a = int, no_vars]} + \<^item> Induction rule: @{thm [display, mode=iff] bit_induct [where ?'a = int, no_vars]} \<^item> Characteristic properties @{prop [source] \bit (f x) n \ P x n\} are available in fact collection \<^text>\bit_simps\. @@ -364,8 +364,13 @@ text \ \<^descr>[\<^theory>\Word_Lib.Bit_Comprehension\] Comprehension syntax for bit values over predicates - \<^typ>\nat \ bool\. For \<^typ>\'a::len word\, straightforward - alternatives exist; difficult to handle for \<^typ>\int\. + \<^typ>\nat \ bool\, for \<^typ>\'a::len word\; straightforward + alternatives exist. + + \<^descr>[\<^theory>\Word_Lib.Bit_Comprehension_Int\] + + Comprehension syntax for bit values over predicates + \<^typ>\nat \ bool\, for \<^typ>\int\; inherently non-computational. \<^descr>[\<^theory>\Word_Lib.Reversed_Bit_Lists\] @@ -388,9 +393,16 @@ text \ section \Changelog\ text \ + \<^descr>[Changes since AFP 2022] ~ + + \<^item> Theory \<^text>\Word_Lib.Ancient_Numeral\ has been removed from session. + + \<^item> Bit comprehension syntax for \<^typ>\int\ moved to separate theory + \<^theory>\Word_Lib.Bit_Comprehension_Int\. + \<^descr>[Changes since AFP 2021] ~ - \<^item> Theory \<^theory>\Word_Lib.Ancient_Numeral\ is not part of \<^theory>\Word_Lib.Word_Lib_Sumo\ + \<^item> Theory \<^text>\Word_Lib.Ancient_Numeral\ is not part of \<^theory>\Word_Lib.Word_Lib_Sumo\ any longer. \<^item> Infix syntax for \<^term>\(AND)\, \<^term>\(OR)\, \<^term>\(XOR)\ organized in diff --git a/lib/Word_Lib/Least_significant_bit.thy b/lib/Word_Lib/Least_significant_bit.thy index 587b0318ca..b1cfda83c8 100644 --- a/lib/Word_Lib/Least_significant_bit.thy +++ b/lib/Word_Lib/Least_significant_bit.thy @@ -25,7 +25,7 @@ definition lsb_int :: \int \ bool\ where \lsb i = bit i 0\ for i :: int instance - by standard (simp add: fun_eq_iff lsb_int_def) + by standard (simp add: fun_eq_iff lsb_int_def bit_0) end @@ -42,7 +42,7 @@ lemma int_lsb_numeral [simp]: "lsb (numeral (num.Bit1 w) :: int) = True" "lsb (- numeral (num.Bit0 w) :: int) = False" "lsb (- numeral (num.Bit1 w) :: int) = True" - by (simp_all add: lsb_int_def) + by (simp_all add: lsb_int_def bit_0) instantiation word :: (len) lsb begin @@ -64,7 +64,7 @@ lemma lsb_word_eq: lemma word_lsb_alt: "lsb w = bit w 0" for w :: "'a::len word" - by (simp add: lsb_word_eq) + by (simp add: lsb_word_eq bit_0) lemma word_lsb_1_0 [simp]: "lsb (1::'a::len word) \ \ lsb (0::'b::len word)" unfolding word_lsb_def by simp @@ -91,4 +91,20 @@ lemma word_lsb_nat:"lsb w = (unat w mod 2 = 1)" apply (simp add: even_nat_iff) done +instantiation integer :: lsb +begin + +context + includes integer.lifting +begin + +lift_definition lsb_integer :: \integer \ bool\ is lsb . + +instance + by (standard; transfer) (fact lsb_odd) + +end + +end + end diff --git a/lib/Word_Lib/Many_More.thy b/lib/Word_Lib/Many_More.thy index c255278ca4..62f0904dc1 100644 --- a/lib/Word_Lib/Many_More.thy +++ b/lib/Word_Lib/Many_More.thy @@ -167,17 +167,17 @@ lemma takeWhile_take_has_property_nth: "\ n < length (takeWhile P xs) \ \ P (xs ! n)" by (induct xs arbitrary: n; simp split: if_split_asm) (case_tac n, simp_all) -lemma takeWhile_replicate: - "takeWhile f (replicate len x) = (if f x then replicate len x else [])" - by (induct_tac len) auto - lemma takeWhile_replicate_empty: "\ f x \ takeWhile f (replicate len x) = []" - by (simp add: takeWhile_replicate) + by simp lemma takeWhile_replicate_id: "f x \ takeWhile f (replicate len x) = replicate len x" - by (simp add: takeWhile_replicate) + by simp + +lemma takeWhile_all: + "length (takeWhile P xs) = length xs \ \x \ set xs. P x" + by (induct xs) (auto split: if_split_asm) lemma nth_rev: "n < length xs \ rev xs ! n = xs ! (length xs - 1 - n)" using rev_nth by simp @@ -683,4 +683,7 @@ lemma plus_minus_one_rewrite: lemma Suc_0_lt_2p_len_of: "Suc 0 < 2 ^ LENGTH('a :: len)" by (metis One_nat_def len_gt_0 lessI numeral_2_eq_2 one_less_power) +lemma bin_rest_code: "i div 2 = drop_bit 1 i" for i :: int + by (simp add: drop_bit_eq_div) + end diff --git a/lib/Word_Lib/More_Bit_Ring.thy b/lib/Word_Lib/More_Bit_Ring.thy new file mode 100644 index 0000000000..20d8c9e6f4 --- /dev/null +++ b/lib/Word_Lib/More_Bit_Ring.thy @@ -0,0 +1,136 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory More_Bit_Ring + imports Main +begin + +(* Additional equalities on semiring_bit_operations and ring_bit_operations, in particular + relationships between Boolean and arithmetic operations. *) + +context semiring_bit_operations +begin + +context + includes bit_operations_syntax +begin + +lemma disjunctive_add2: + "(x AND y) = 0 \ x + y = x OR y" + by (metis disjunctive_add bit_0_eq bit_and_iff bot_apply bot_bool_def) + +end +end + +context ring_bit_operations +begin + +context + includes bit_operations_syntax +begin + +lemma not_xor_is_eqv: + "NOT (x XOR y) = (x AND y) OR (NOT x AND NOT y)" + by (simp add: bit.xor_def bit.disj_conj_distrib or.commute) + +lemma not_xor_eq_xor_not: + "(NOT x) XOR y = x XOR (NOT y)" + by simp + +lemma not_minus: + "NOT (x - y) = y - x - 1" + by (simp add: not_eq_complement) + +lemma minus_not_eq_plus_1: + "- NOT x = x + 1" + by (simp add: minus_eq_not_plus_1) + +lemma not_minus_eq_minus_1: + "NOT (- x) = x - 1" + by (simp add: not_eq_complement) + +lemma and_plus_not_and: + "(x AND y) + (x AND NOT y) = x" + by (metis and.left_commute and.right_neutral bit.conj_cancel_right bit.conj_disj_distrib + bit.conj_zero_right bit.disj_cancel_right disjunctive_add2) + +lemma or_eq_and_not_plus: + "x OR y = (x AND NOT y) + y" + by (simp add: and.assoc bit.disj_conj_distrib2 disjunctive_add2) + +lemma and_eq_not_or_minus: + "x AND y = (NOT x OR y) - NOT x" + by (metis and.idem and_eq_not_not_or eq_diff_eq or.commute or.idem or_eq_and_not_plus) + +lemma and_not_eq_or_minus: + "x AND NOT y = (x OR y) - y" + by (simp add: or_eq_and_not_plus) + +lemma and_not_eq_minus_and: + "x AND NOT y = x - (x AND y)" + by (simp add: add.commute eq_diff_eq and_plus_not_and) + +lemma or_minus_eq_minus_and: + "(x OR y) - y = x - (x AND y)" + by (metis and_not_eq_minus_and and_not_eq_or_minus) + +lemma plus_eq_and_or: + "x + y = (x OR y) + (x AND y)" + using add_commute local.add.semigroup_axioms or_eq_and_not_plus semigroup.assoc + by (fastforce simp: and_plus_not_and) + +lemma xor_eq_or_minus_and: + "x XOR y = (x OR y) - (x AND y)" + by (metis (no_types) bit.de_Morgan_conj bit.xor_def2 bit_and_iff bit_or_iff disjunctive_diff) + +lemma not_xor_eq_and_plus_not_or: + "NOT (x XOR y) = (x AND y) + NOT (x OR y)" + by (metis (no_types, lifting) not_diff_distrib add.commute bit.de_Morgan_conj bit.xor_def2 + bit_and_iff bit_or_iff disjunctive_diff) + +lemma not_xor_eq_and_minus_or: + "NOT (x XOR y) = (x AND y) - (x OR y) - 1" + by (metis not_diff_distrib add.commute minus_diff_eq not_minus_eq_minus_1 not_xor_eq_and_plus_not_or) + +lemma plus_eq_xor_plus_carry: + "x + y = (x XOR y) + 2 * (x AND y)" + by (metis plus_eq_and_or add.commute add.left_commute diff_add_cancel mult_2 xor_eq_or_minus_and) + +lemma plus_eq_or_minus_xor: + "x + y = 2 * (x OR y) - (x XOR y)" + by (metis add_diff_cancel_left' diff_diff_eq2 local.mult_2 plus_eq_and_or xor_eq_or_minus_and) + +lemma plus_eq_minus_neg: + "x + y = x - NOT y - 1" + using add_commute local.not_diff_distrib not_minus + by auto + +lemma minus_eq_plus_neg: + "x - y = x + NOT y + 1" + by (simp add: add.semigroup_axioms diff_conv_add_uminus minus_eq_not_plus_1 semigroup.assoc) + +lemma minus_eq_and_not_minus_not_and: + "x - y = (x AND NOT y) - (NOT x AND y)" + by (metis bit.de_Morgan_conj bit.double_compl not_diff_distrib plus_eq_and_or) + +lemma minus_eq_xor_minus_not_and: + "x - y = (x XOR y) - 2 * (NOT x AND y)" + by (metis (no_types) bit.compl_eq_compl_iff bit.xor_compl_left not_diff_distrib + plus_eq_xor_plus_carry) + +lemma minus_eq_and_not_minus_xor: + "x - y = 2 * (x AND NOT y) - (x XOR y)" + by (metis and.commute minus_diff_eq minus_eq_xor_minus_not_and xor.commute) + +lemma and_one_neq_simps[simp]: + "x AND 1 \ 0 \ x AND 1 = 1" + "x AND 1 \ 1 \ x AND 1 = 0" + by (clarsimp simp: and_one_eq)+ + +end +end + +end \ No newline at end of file diff --git a/lib/Word_Lib/More_Word.thy b/lib/Word_Lib/More_Word.thy index 4623d21af4..005e79dace 100644 --- a/lib/Word_Lib/More_Word.thy +++ b/lib/Word_Lib/More_Word.thy @@ -11,21 +11,9 @@ theory More_Word "HOL-Library.Word" More_Arithmetic More_Divides + More_Bit_Ring begin -context unique_euclidean_semiring_with_bit_operations \\TODO: move\ -begin - -lemma possible_bit [simp]: - \possible_bit TYPE('a) n\ - by (simp add: possible_bit_def) - -lemma drop_bit_mask_eq: - \drop_bit m (mask n) = mask (n - m)\ - by (rule bit_eqI) (auto simp add: bit_simps possible_bit_def) - -end - context includes bit_operations_syntax begin @@ -59,10 +47,6 @@ proof - prefer 10 apply (subst signed_take_bit_int_eq_self) apply (auto simp add: signed_take_bit_int_eq_self signed_take_bit_eq_take_bit_minus take_bit_Suc_from_most n not_less intro!: *) - apply (smt (z3) take_bit_nonnegative) - apply (smt (z3) take_bit_int_less_exp) - apply (smt (z3) take_bit_nonnegative) - apply (smt (z3) take_bit_int_less_exp) done then show ?thesis apply (simp only: One_nat_def word_size drop_bit_eq_zero_iff_not_bit_last bit_and_iff bit_xor_iff) @@ -79,7 +63,7 @@ lemma unat_p2: "n < LENGTH('a :: len) \ unat (2 ^ n :: 'a word) lemma word_div_lt_eq_0: "x < y \ x div y = 0" for x :: "'a :: len word" - by transfer simp + by (fact div_word_less) lemma word_div_eq_1_iff: "n div m = 1 \ n \ m \ unat n < 2 * unat (m :: 'a :: len word)" apply (simp only: word_arith_nat_defs word_le_nat_alt word_of_nat_eq_iff flip: nat_div_eq_Suc_0_iff) @@ -109,7 +93,7 @@ lemma p2_eq_0: lemma p2len: \(2 :: 'a word) ^ LENGTH('a::len) = 0\ - by simp + by (fact word_pow_0) lemma neg_mask_is_div: "w AND NOT (mask n) = (w div 2^n) * 2^n" @@ -173,10 +157,7 @@ lemma less_eq_mask_iff_take_bit_eq_self: lemma NOT_eq: "NOT (x :: 'a :: len word) = - x - 1" - apply (cut_tac x = "x" in word_add_not) - apply (drule add.commute [THEN trans]) - apply (drule eq_diff_eq [THEN iffD2]) - by simp + by (fact not_eq_complement) lemma NOT_mask: "NOT (mask n :: 'a::len word) = - (2 ^ n)" by (simp add : NOT_eq mask_2pm1) @@ -232,16 +213,12 @@ lemma of_int_uint: corollary word_plus_and_or_coroll: "x AND y = 0 \ x + y = x OR y" for x y :: \'a::len word\ - using word_plus_and_or[where x=x and y=y] - by simp + by (fact disjunctive_add2) corollary word_plus_and_or_coroll2: "(x AND w) + (x AND NOT w) = x" for x w :: \'a::len word\ - apply (subst disjunctive_add) - apply (simp add: bit_simps) - apply (simp flip: bit.conj_disj_distrib) - done + by (fact and_plus_not_and) lemma unat_mask_eq: \unat (mask n :: 'a::len word) = mask (min LENGTH('a) n)\ @@ -642,7 +619,7 @@ lemma word_power_less_1 [simp]: done lemma word_sub_1_le: - "x \ 0 \ x - 1 \ (x :: ('a :: len) word)" + "x \ 0 \ x - 1 \ (x :: 'a :: len word)" apply (subst no_ulen_sub) apply simp apply (cases "uint x = 0") @@ -765,13 +742,13 @@ qed lemma mask_out_sub_mask: "(x AND NOT (mask n)) = x - (x AND (mask n))" for x :: \'a::len word\ - by (simp add: field_simps word_plus_and_or_coroll2) + by (fact and_not_eq_minus_and) lemma subtract_mask: "p - (p AND mask n) = (p AND NOT (mask n))" "p - (p AND NOT (mask n)) = (p AND mask n)" for p :: \'a::len word\ - by (simp add: field_simps word_plus_and_or_coroll2)+ + by (auto simp: and_not_eq_minus_and) lemma take_bit_word_eq_self_iff: \take_bit n w = w \ n \ LENGTH('a) \ w < 2 ^ n\ @@ -780,7 +757,7 @@ lemma take_bit_word_eq_self_iff: by (transfer fixing: n) auto lemma word_power_increasing: - assumes x: "2 ^ x < (2 ^ y::'a::len word)" "x < LENGTH('a::len)" "y < LENGTH('a::len)" + assumes x: "2 ^ x < (2 ^ y::'a::len word)" "x < LENGTH('a)" "y < LENGTH('a)" shows "x < y" using x using assms by transfer simp @@ -804,18 +781,8 @@ lemma plus_one_helper2: unatSuc) lemma less_x_plus_1: - fixes x :: "'a :: len word" shows - "x \ - 1 \ (y < (x + 1)) = (y < x \ y = x)" - apply (rule iffI) - apply (rule disjCI) - apply (drule plus_one_helper) - apply simp - apply (subgoal_tac "x < x + 1") - apply (erule disjE, simp_all) - apply (rule plus_one_helper2 [OF order_refl]) - apply (rule notI, drule max_word_wrap) - apply simp - done + "x \ - 1 \ (y < x + 1) = (y < x \ y = x)" for x :: "'a::len word" + by (meson max_word_wrap plus_one_helper plus_one_helper2 word_le_less_eq) lemma word_Suc_leq: fixes k::"'a::len word" shows "k \ - 1 \ x < k + 1 \ x \ k" @@ -841,10 +808,10 @@ lemma word_atLeastLessThan_Suc_atLeastAtMost_union: fixes l::"'a::len word" assumes "m \ - 1" and "l \ m" and "m \ u" shows "{l..m} \ {m+1..u} = {l..u}" - proof - +proof - from ivl_disj_un_two(8)[OF assms(2) assms(3)] have "{l..u} = {l..m} \ {m<..u}" by blast with assms show ?thesis by(simp add: word_atLeastAtMost_Suc_greaterThanAtMost) - qed +qed lemma max_word_less_eq_iff [simp]: \- 1 \ w \ w = - 1\ for w :: \'a::len word\ @@ -859,7 +826,7 @@ lemma word_2p_mult_inc: assumes x: "2 * 2 ^ n < (2::'a::len word) * 2 ^ m" assumes suc_n: "Suc n < LENGTH('a::len)" shows "2^n < (2::'a::len word)^m" - by (smt suc_n le_less_trans lessI nat_less_le nat_mult_less_cancel_disj p2_gt_0 + by (smt (verit) suc_n le_less_trans lessI nat_less_le nat_mult_less_cancel_disj p2_gt_0 power_Suc power_Suc unat_power_lower word_less_nat_alt x) lemma power_overflow: @@ -1095,17 +1062,12 @@ lemma word_less_sub_1: by (fact word_le_minus_one_leq) lemma word_sub_mono2: - "\ a + b \ c + d; c \ a; b \ a + b; d \ c + d \ - \ b \ (d :: 'a :: len word)" - apply (drule(1) word_sub_mono) - apply simp - apply simp - apply simp - done + "\ a + b \ c + d; c \ a; b \ a + b; d \ c + d \ \ b \ (d :: 'a :: len word)" + by (drule(1) word_sub_mono; simp) lemma word_not_le: "(\ x \ (y :: 'a :: len word)) = (y < x)" - by fastforce + by (fact not_le) lemma word_subset_less: "\ {x .. x + r - 1} \ {y .. y + s - 1}; @@ -1236,18 +1198,16 @@ lemma unat_Suc2: lemma word_div_1: "(n :: 'a :: len word) div 1 = n" - by (fact bits_div_by_1) + by (fact div_by_1) lemma word_minus_one_le: "-1 \ (x :: 'a :: len word) = (x = -1)" by (fact word_order.extremum_unique) lemma up_scast_inj: - "\ scast x = (scast y :: 'b :: len word); size x \ LENGTH('b) \ - \ x = y" + "\ scast x = (scast y :: 'b :: len word); size x \ LENGTH('b) \ \ x = y" apply transfer - apply (cases \LENGTH('a)\) - apply simp_all + apply (cases \LENGTH('a)\; simp) apply (metis order_refl take_bit_signed_take_bit take_bit_tightened) done @@ -1262,12 +1222,8 @@ lemma word_le_add: by (rule exI [where x = "unat (y - x)"]) simp lemma word_plus_mcs_4': - fixes x :: "'a :: len word" - shows "\x + v \ x + w; x \ x + v\ \ v \ w" - apply (rule word_plus_mcs_4) - apply (simp add: add.commute) - apply (simp add: add.commute) - done + "\x + v \ x + w; x \ x + v\ \ v \ w" for x :: "'a::len word" + by (rule word_plus_mcs_4; simp add: add.commute) lemma unat_eq_1: \unat x = Suc 0 \ x = 1\ @@ -1482,11 +1438,12 @@ lemma unat_1_0: lemma x_less_2_0_1': fixes x :: "'a::len word" shows "\LENGTH('a) \ 1; x < 2\ \ x = 0 \ x = 1" - apply (cases \2 \ LENGTH('a)\) - apply simp_all + apply (cases \2 \ LENGTH('a)\; simp) apply transfer - apply auto - apply (metis add.commute add.right_neutral even_two_times_div_two mod_div_trivial mod_pos_pos_trivial mult.commute mult_zero_left not_less not_take_bit_negative odd_two_times_div_two_succ) + apply clarsimp + apply (metis add.commute add.right_neutral even_two_times_div_two mod_div_trivial + mod_pos_pos_trivial mult.commute mult_zero_left not_less not_take_bit_negative + odd_two_times_div_two_succ) done lemmas word_add_le_iff2 = word_add_le_iff [folded no_olen_add_nat] @@ -1833,7 +1790,7 @@ lemma test_bit_size: "bit w n \ n < size w" for w :: "'a::len word" by transfer simp -lemma word_eq_iff: "x = y \ (\n?P \ ?Q\) +lemma word_eq_iff: "x = y \ (\n bit (0 :: 'a::len word) n" lemma nth_minus1: "bit (-1 :: 'a::len word) n \ n < LENGTH('a)" by transfer simp -lemma nth_ucast: +lemma nth_ucast_weak: "bit (ucast w::'a::len word) n = (bit w n \ n < LENGTH('a))" by transfer (simp add: bit_take_bit_iff ac_simps) -lemma drop_bit_numeral_bit0_1 [simp]: - \drop_bit (Suc 0) (numeral k) = - (word_of_int (drop_bit (Suc 0) (take_bit LENGTH('a) (numeral k))) :: 'a::len word)\ - by (metis Word_eq_word_of_int drop_bit_word.abs_eq of_int_numeral) +lemma nth_ucast: + "bit (ucast (w::'a::len word)::'b::len word) n = + (bit w n \ n < min LENGTH('a) LENGTH('b))" + by (auto simp: not_le nth_ucast_weak dest: bit_imp_le_length) lemma nth_mask: \bit (mask n :: 'a::len word) i \ i < n \ i < size (mask n :: 'a word)\ @@ -2136,8 +2093,8 @@ lemma div_of_0_id[simp]:"(0::('a::len) word) div n = 0" lemma degenerate_word:"LENGTH('a) = 1 \ (x::('a::len) word) = 0 \ x = 1" by (metis One_nat_def less_irrefl_nat sint_1_cases) -lemma div_by_0_word:"(x::('a::len) word) div 0 = 0" - by (metis div_0 div_by_0 unat_0 word_arith_nat_defs(6) word_div_1) +lemma div_by_0_word: "(x::'a::len word) div 0 = 0" + by (fact div_by_0) lemma div_less_dividend_word:"\x \ 0; n \ 1\ \ (x::('a::len) word) div n < x" apply (cases \n = 0\) @@ -2178,10 +2135,10 @@ lemma odd_word_imp_even_next:"odd (unat (x::('a::len) word)) \ x done lemma overflow_imp_lsb:"(x::('a::len) word) + 1 = 0 \ bit x 0" - using even_plus_one_iff [of x] by simp + using even_plus_one_iff [of x] by (simp add: bit_0) lemma odd_iff_lsb:"odd (unat (x::('a::len) word)) = bit x 0" - by transfer (simp add: even_nat_iff) + by transfer (simp add: even_nat_iff bit_0) lemma of_nat_neq_iff_word: "x mod 2 ^ LENGTH('a) \ y mod 2 ^ LENGTH('a) \ @@ -2196,7 +2153,7 @@ lemma of_nat_neq_iff_word: done lemma lsb_this_or_next: "\ (bit ((x::('a::len) word) + 1) 0) \ bit x 0" - by simp + by (simp add: bit_0) lemma mask_or_not_mask: "x AND mask n OR x AND NOT (mask n) = x" @@ -2275,8 +2232,7 @@ lemma word_ops_nth: lemma word_power_nonzero: "\ (x :: 'a::len word) < 2 ^ (LENGTH('a) - n); n < LENGTH('a); x \ 0 \ \ x * 2 ^ n \ 0" - by (metis gr_implies_not0 mult_eq_0_iff nat_mult_power_less_eq numeral_2_eq_2 - p2_gt_0 unat_eq_zero unat_less_power unat_mult_lem unat_power_lower word_gt_a_gt_0 zero_less_Suc) + by (metis gr0I mult.commute not_less_eq p2_gt_0 power_0 word_less_1 word_power_less_diff zero_less_diff) lemma less_1_helper: "n \ m \ (n - 1 :: int) < m" @@ -2454,7 +2410,7 @@ lemma eq_ucast_ucast_eq: lemma le_ucast_ucast_le: "x \ ucast y \ ucast x \ y" for x :: "'a::len word" and y :: "'b::len word" - by (smt le_unat_uoi linorder_not_less order_less_imp_le ucast_nat_def unat_arith_simps(1)) + by (smt (verit) le_unat_uoi linorder_not_less order_less_imp_le ucast_nat_def unat_arith_simps(1)) lemma less_ucast_ucast_less: "LENGTH('b) \ LENGTH('a) \ x < ucast y \ ucast x < y" diff --git a/lib/Word_Lib/More_Word_Operations.thy b/lib/Word_Lib/More_Word_Operations.thy index cfb43dbc7b..7578a2a07b 100644 --- a/lib/Word_Lib/More_Word_Operations.thy +++ b/lib/Word_Lib/More_Word_Operations.thy @@ -14,6 +14,7 @@ theory More_Word_Operations More_Misc Signed_Words Word_Lemmas + Many_More Word_EqI begin @@ -301,13 +302,21 @@ lemma alignUp_not_aligned_eq: and sz: "n < LENGTH('a)" shows "alignUp a n = (a div 2 ^ n + 1) * 2 ^ n" proof - + from \n < LENGTH('a)\ have \(2::int) ^ n < 2 ^ LENGTH('a)\ + by simp + with take_bit_int_less_exp [of n] + have *: \take_bit n k < 2 ^ LENGTH('a)\ for k :: int + by (rule less_trans) have anz: "a mod 2 ^ n \ 0" by (rule not_aligned_mod_nz) fact+ - - then have um: "unat (a mod 2 ^ n - 1) div 2 ^ n = 0" using sz - by (meson Euclidean_Division.div_eq_0_iff le_m1_iff_lt measure_unat order_less_trans - unat_less_power word_less_sub_le word_mod_less_divisor) - + then have um: "unat (a mod 2 ^ n - 1) div 2 ^ n = 0" + apply (transfer fixing: n) using sz + apply (simp flip: take_bit_eq_mod add: div_eq_0_iff) + apply (subst take_bit_int_eq_self) + using * + apply (auto simp add: diff_less_eq intro: less_imp_le) + apply (simp add: less_le) + done have "a + 2 ^ n - 1 = (a div 2 ^ n) * 2 ^ n + (a mod 2 ^ n) + 2 ^ n - 1" by (simp add: word_mod_div_equality) also have "\ = (a mod 2 ^ n - 1) + (a div 2 ^ n + 1) * 2 ^ n" @@ -722,7 +731,7 @@ definition lemma to_bool_and_1: "to_bool (x AND 1) \ bit x 0" - by (simp add: to_bool_def and_one_eq mod_2_eq_odd) + by (simp add: to_bool_def word_and_1) lemma to_bool_from_bool [simp]: "to_bool (from_bool r) = r" @@ -785,7 +794,7 @@ lemma from_bool_eqI: lemma from_bool_odd_eq_and: "from_bool (odd w) = w AND 1" - unfolding from_bool_def by (simp add: word_and_1) + unfolding from_bool_def by (simp add: word_and_1 bit_0) lemma neg_mask_in_mask_range: "is_aligned ptr bits \ (ptr' AND NOT(mask bits) = ptr) = (ptr' \ mask_range ptr bits)" @@ -796,7 +805,7 @@ lemma neg_mask_in_mask_range: apply (subst word_plus_and_or_coroll, word_eqI_solve) apply (metis bit.disj_ac(2) bit.disj_conj_distrib2 le_word_or2 word_and_max word_or_not) apply clarsimp - apply (smt add.right_neutral eq_iff is_aligned_neg_mask_eq mask_out_add_aligned neg_mask_mono_le + apply (smt (verit) add.right_neutral eq_iff is_aligned_neg_mask_eq mask_out_add_aligned neg_mask_mono_le word_and_not) apply (simp add: power_overflow mask_eq_decr_exp) done @@ -1014,6 +1023,104 @@ lemma aligned_mask_diff: apply (meson aligned_add_mask_less_eq is_aligned_weaken le_less_trans) done +lemma Suc_mask_eq_mask: + "\bit a n \ a AND mask (Suc n) = a AND mask n" for a::"'a::len word" + by (metis sign_extend_def sign_extend_def') + +lemma word_less_high_bits: + fixes a::"'a::len word" + assumes high_bits: "\i > n. bit a i = bit b i" + assumes less: "a AND mask (Suc n) < b AND mask (Suc n)" + shows "a < b" +proof - + let ?masked = "\x. x AND NOT (mask (Suc n))" + from high_bits + have "?masked a = ?masked b" + by - word_eqI_solve + then + have "?masked a + (a AND mask (Suc n)) < ?masked b + (b AND mask (Suc n))" + by (metis AND_NOT_mask_plus_AND_mask_eq less word_and_le2 word_plus_strict_mono_right) + then + show ?thesis + by (simp add: AND_NOT_mask_plus_AND_mask_eq) +qed + +lemma word_less_bitI: + fixes a :: "'a::len word" + assumes hi_bits: "\i > n. bit a i = bit b i" + assumes a_bits: "\bit a n" + assumes b_bits: "bit b n" "n < LENGTH('a)" + shows "a < b" +proof - + from b_bits + have "a AND mask n < b AND mask (Suc n)" + by (metis bit_mask_iff impossible_bit le2p_bits_unset leI lessI less_Suc_eq_le mask_eq_decr_exp + word_and_less' word_ao_nth) + with a_bits + have "a AND mask (Suc n) < b AND mask (Suc n)" + by (simp add: Suc_mask_eq_mask) + with hi_bits + show ?thesis + by (rule word_less_high_bits) +qed + +lemma word_less_bitD: + fixes a::"'a::len word" + assumes less: "a < b" + shows "\n. (\i > n. bit a i = bit b i) \ \bit a n \ bit b n" +proof - + define xs where "xs \ zip (to_bl a) (to_bl b)" + define tk where "tk \ length (takeWhile (\(x,y). x = y) xs)" + define n where "n \ LENGTH('a) - Suc tk" + have n_less: "n < LENGTH('a)" + by (simp add: n_def) + moreover + { fix i + have "\ i < LENGTH('a) \ bit a i = bit b i" + using bit_imp_le_length by blast + moreover + assume "i > n" + with n_less + have "i < LENGTH('a) \ LENGTH('a) - Suc i < tk" + unfolding n_def by arith + hence "i < LENGTH('a) \ bit a i = bit b i" + unfolding n_def tk_def xs_def + by (fastforce dest: takeWhile_take_has_property_nth simp: rev_nth simp flip: nth_rev_to_bl) + ultimately + have "bit a i = bit b i" + by blast + } + note all = this + moreover + from less + have "a \ b" by simp + then + obtain i where "to_bl a ! i \ to_bl b ! i" + using nth_equalityI word_bl.Rep_eqD word_rotate.lbl_lbl by blast + then + have "tk \ length xs" + unfolding tk_def xs_def + by (metis length_takeWhile_less list_eq_iff_zip_eq nat_neq_iff word_rotate.lbl_lbl) + then + have "tk < length xs" + using length_takeWhile_le order_le_neq_trans tk_def by blast + from nth_length_takeWhile[OF this[unfolded tk_def]] + have "fst (xs ! tk) \ snd (xs ! tk)" + by (clarsimp simp: tk_def) + with `tk < length xs` + have "bit a n \ bit b n" + by (clarsimp simp: xs_def n_def tk_def nth_rev simp flip: nth_rev_to_bl) + with less all + have "\bit a n \ bit b n" + by (metis n_less order.asym word_less_bitI) + ultimately + show ?thesis by blast +qed + +lemma word_less_bit_eq: + "(a < b) = (\n < LENGTH('a). (\i > n. bit a i = bit b i) \ \bit a n \ bit b n)" for a::"'a::len word" + by (meson bit_imp_le_length word_less_bitD word_less_bitI) + end end \ No newline at end of file diff --git a/lib/Word_Lib/Most_significant_bit.thy b/lib/Word_Lib/Most_significant_bit.thy index e3f6f9d12a..89f8ef9676 100644 --- a/lib/Word_Lib/Most_significant_bit.thy +++ b/lib/Word_Lib/Most_significant_bit.thy @@ -191,4 +191,19 @@ lemma msb_big: apply (simp add: take_bit_eq_self_iff_drop_bit_eq_0 drop_bit_eq_zero_iff_not_bit_last) done +instantiation integer :: msb +begin + +context + includes integer.lifting +begin + +lift_definition msb_integer :: \integer \ bool\ is msb . + +instance .. + +end + +end + end diff --git a/lib/Word_Lib/ROOT b/lib/Word_Lib/ROOT index 0c3c2d4297..5a2b12301f 100644 --- a/lib/Word_Lib/ROOT +++ b/lib/Word_Lib/ROOT @@ -23,7 +23,6 @@ session Word_Lib (lib) = HOL + More_Misc Strict_part_mono Many_More - Ancient_Numeral Examples theories Guide diff --git a/lib/Word_Lib/Reversed_Bit_Lists.thy b/lib/Word_Lib/Reversed_Bit_Lists.thy index 2d4484cb20..74f4975219 100644 --- a/lib/Word_Lib/Reversed_Bit_Lists.thy +++ b/lib/Word_Lib/Reversed_Bit_Lists.thy @@ -498,15 +498,7 @@ lemma bin_nth_of_bl: "bit (bl_to_bin bl) n = (n < length bl \ rev bl ! n)" by (simp add: bl_to_bin_def bin_nth_of_bl_aux) lemma bin_nth_bl: "n < m \ bit w n = nth (rev (bin_to_bl m w)) n" - apply (induct n arbitrary: m w) - apply clarsimp - apply (case_tac m, clarsimp) - apply (clarsimp simp: bin_to_bl_def) - apply (simp add: bin_to_bl_aux_alt) - apply (case_tac m, clarsimp) - apply (clarsimp simp: bin_to_bl_def) - apply (simp add: bin_to_bl_aux_alt bit_Suc) - done + by (metis bin_bl_bin bin_nth_of_bl nth_bintr size_bin_to_bl) lemma nth_bin_to_bl_aux: "n < m + length bl \ (bin_to_bl_aux m w bl) ! n = @@ -852,7 +844,6 @@ next obtain b k where \bin = of_bool b + 2 * k\ using bin_exhaust by blast moreover have \(2 * k - 1) div 2 = k - 1\ - using even_succ_div_2 [of \2 * (k - 1)\] by simp ultimately show ?case using Suc [of \bin div 2\] @@ -1798,11 +1789,8 @@ lemma bl_word_roti_dt': apply safe apply (simp add: zmod_zminus1_eq_if) apply safe - apply (simp add: nat_mult_distrib) - apply (simp add: nat_diff_distrib [OF pos_mod_sign pos_mod_conj - [THEN conjunct2, THEN order_less_imp_le]] - nat_mod_distrib) - apply (simp add: nat_mod_distrib) + apply (auto simp add: nat_mult_distrib nat_mod_distrib) + using nat_0_le nat_minus_as_int zmod_int apply presburger done lemmas bl_word_roti_dt = bl_word_roti_dt' [unfolded word_size] @@ -1891,12 +1879,12 @@ lemma bin_to_bl_or: lemma word_and_1_bl: fixes x::"'a::len word" shows "(x AND 1) = of_bl [bit x 0]" - by (simp add: mod_2_eq_odd and_one_eq) + by (simp add: word_and_1) lemma word_1_and_bl: fixes x::"'a::len word" shows "(1 AND x) = of_bl [bit x 0]" - by (simp add: mod_2_eq_odd one_and_eq) + using word_and_1_bl [of x] by (simp add: ac_simps) lemma of_bl_drop: "of_bl (drop n xs) = (of_bl xs AND mask (length xs - n))" @@ -1955,7 +1943,7 @@ lemma word_lsb_last: \lsb w \ last (to_bl w)\ for w :: \'a::len word\ using nth_to_bl [of \LENGTH('a) - Suc 0\ w] - by (simp add: lsb_odd last_conv_nth) + by (simp add: last_conv_nth bit_0 lsb_odd) lemma is_aligned_to_bl: "is_aligned (w :: 'a :: len word) n = (True \ set (drop (size w - n) (to_bl w)))" @@ -2223,7 +2211,7 @@ text\Like @{thm shiftr_bl}\ lemma sshiftr_bl: "x >>> n \ of_bl (replicate n (msb x) @ take (LENGTH('a) - n) (to_bl x))" for x :: "'a::len word" unfolding word_msb_alt - by (smt (z3) length_to_bl_eq sshiftr_bl_of word_bl.Rep_inverse) + by (smt (verit) length_to_bl_eq sshiftr_bl_of word_bl.Rep_inverse) end diff --git a/lib/Word_Lib/Sgn_Abs.thy b/lib/Word_Lib/Sgn_Abs.thy new file mode 100644 index 0000000000..7b0b5c12f7 --- /dev/null +++ b/lib/Word_Lib/Sgn_Abs.thy @@ -0,0 +1,131 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Sgn_Abs + imports Most_significant_bit +begin + +section \@{const sgn} and @{const abs} for @{typ "'a word"}\ + +subsection \Instances\ + +text \@{const sgn} on words returns -1, 0, or 1.\ +instantiation word :: (len) sgn +begin + +definition sgn_word :: \'a word \ 'a word\ where + \sgn w = (if w = 0 then 0 else if 0 + +instance .. + +end + + +(* Simplification setup for sgn on numerals *) + +lemma word_sgn_0[simp]: + "sgn 0 = (0::'a::len word)" + by (simp add: sgn_word_def) + +lemma word_sgn_1[simp]: + "sgn 1 = (1::'a::len word)" + by (simp add: sgn_word_def) + +lemma word_sgn_max_word[simp]: + "sgn (- 1) = (-1::'a::len word)" + by (clarsimp simp: sgn_word_def word_sless_alt) + +lemmas word_sgn_numeral[simp] = sgn_word_def[where w="numeral w" for w] + + +text \@{const abs} on words is the usual definition.\ +instantiation word :: (len) abs +begin + +definition abs_word :: \'a word \ 'a word\ where + \abs w = (if w \s 0 then -w else w)\ + +instance .. + +end + + +(* Simplification setup for abs on numerals *) + +lemma word_abs_0[simp]: + "\0\ = (0::'a::len word)" + by (simp add: abs_word_def) + +lemma word_abs_1[simp]: + "\1\ = (1::'a::len word)" + by (simp add: abs_word_def) + +lemma word_abs_max_word[simp]: + "\- 1\ = (1::'a::len word)" + by (clarsimp simp: abs_word_def word_sle_eq) + +lemma word_abs_msb: + "\w\ = (if msb w then -w else w)" for w::"'a::len word" + by (simp add: abs_word_def msb_word_iff_sless_0 word_sless_eq) + +lemmas word_abs_numeral[simp] = word_abs_msb[where w="numeral w" for w] + + +subsection \Properties\ + +(* Many of these are from linordered_idom, but need a = 0" for a::"'a::len word" + by (simp add: sgn_word_def) + +lemma word_sgn_1_pos: + "1 < LENGTH('a) \ sgn a = 1 \ 0 a sgn a = 1" + by (simp add: sgn_word_def) + +lemma word_sgn_neg[simp]: + "a sgn a = - 1" + by (simp only: word_sgn_1_neg) + +lemma word_abs_sgn: + "\k\ = k * sgn k" for k :: "'a::len word" + unfolding sgn_word_def abs_word_def + by auto + +lemma word_sgn_greater[simp]: + "0 0 a 0 \ \sgn a\ = 1" for a::"'a::len word" + unfolding abs_word_def sgn_word_def + by (clarsimp simp: word_sle_eq) + +lemma word_abs_sgn_eq: + "\sgn a\ = (if a = 0 then 0 else 1)" for a::"'a::len word" + by clarsimp + +lemma word_sgn_mult_self_eq[simp]: + "sgn a * sgn a = of_bool (a \ 0)" for a::"'a::len word" + by (cases "0 + The following specification of division follows ISO C99, which in turn adopted the typical + behavior of hardware modern in the beginning of the 1990ies. + The underlying integer division is named ``T-division'' in \cite{leijen01}. +\ + instantiation word :: (len) signed_division begin @@ -21,10 +27,6 @@ lift_definition signed_modulo_word :: \'a::len word \ 'a word is \\k l. signed_take_bit (LENGTH('a) - Suc 0) k smod signed_take_bit (LENGTH('a) - Suc 0) l\ by (simp flip: signed_take_bit_decr_length_iff) -instance .. - -end - lemma sdiv_word_def [code]: \v sdiv w = word_of_int (sint v sdiv sint w)\ for v w :: \'a::len word\ @@ -35,10 +37,22 @@ lemma smod_word_def [code]: for v w :: \'a::len word\ by transfer simp +instance proof + fix v w :: \'a word\ + have \sint v sdiv sint w * sint w + sint v smod sint w = sint v\ + by (fact sdiv_mult_smod_eq) + then have \word_of_int (sint v sdiv sint w * sint w + sint v smod sint w) = (word_of_int (sint v) :: 'a word)\ + by simp + then show \v sdiv w * w + v smod w = v\ + by (simp add: sdiv_word_def smod_word_def) +qed + +end + lemma sdiv_smod_id: \(a sdiv b) * b + (a smod b) = a\ for a b :: \'a::len word\ - by (cases \sint a < 0\; cases \sint b < 0\) (simp_all add: signed_modulo_int_def sdiv_word_def smod_word_def) + by (fact sdiv_mult_smod_eq) lemma signed_div_arith: "sint ((a::('a::len) word) sdiv b) = signed_take_bit (LENGTH('a) - 1) (sint a sdiv sint b)" @@ -59,7 +73,7 @@ lemma word_sdiv_div0 [simp]: lemma smod_word_zero [simp]: \w smod 0 = w\ for w :: \'a::len word\ - by (simp add: smod_word_def signed_modulo_int_def) + by transfer (simp add: take_bit_signed_take_bit) lemma word_sdiv_div1 [simp]: "(a :: ('a::len) word) sdiv 1 = a" @@ -124,11 +138,7 @@ lemma minus_one_smod_word_eq [simp]: lemma smod_word_alt_def: "(a :: ('a::len) word) smod b = a - (a sdiv b) * b" - apply (cases \a \ - (2 ^ (LENGTH('a) - 1)) \ b \ - 1\) - apply (clarsimp simp: smod_word_def sdiv_word_def signed_modulo_int_def - simp flip: wi_hom_sub wi_hom_mult) - apply (clarsimp simp: smod_word_def signed_modulo_int_def) - done + by (simp add: minus_sdiv_mult_eq_smod) lemmas sdiv_word_numeral_numeral [simp] = sdiv_word_def [of \numeral a\ \numeral b\, simplified sint_sbintrunc sint_sbintrunc_neg] @@ -179,8 +189,8 @@ lemma sdiv_word_max: proof (cases \sint a = 0 \ sint b = 0 \ sgn (sint a) \ sgn (sint b)\) case True then show ?thesis apply (auto simp add: sgn_if not_less signed_divide_int_def split: if_splits) - apply (smt (z3) pos_imp_zdiv_neg_iff zero_le_power) - apply (smt (z3) not_exp_less_eq_0_int pos_imp_zdiv_neg_iff) + apply (smt (verit) pos_imp_zdiv_neg_iff zero_le_power) + apply (smt (verit) not_exp_less_eq_0_int pos_imp_zdiv_neg_iff) done next case False @@ -244,4 +254,4 @@ lemmas word_smod_numerals_lhs = smod_word_def[where v="numeral x" for x] lemmas word_smod_numerals = word_smod_numerals_lhs[where w="numeral y" for y] word_smod_numerals_lhs[where w=0] word_smod_numerals_lhs[where w=1] -end \ No newline at end of file +end diff --git a/lib/Word_Lib/Singleton_Bit_Shifts.thy b/lib/Word_Lib/Singleton_Bit_Shifts.thy index 4945d80a86..cfdff12243 100644 --- a/lib/Word_Lib/Singleton_Bit_Shifts.thy +++ b/lib/Word_Lib/Singleton_Bit_Shifts.thy @@ -11,31 +11,40 @@ theory Singleton_Bit_Shifts begin definition shiftl1 :: \'a::len word \ 'a word\ - where \shiftl1 = push_bit 1\ + where shiftl1_eq_double: \shiftl1 = times 2\ lemma bit_shiftl1_iff [bit_simps]: \bit (shiftl1 w) n \ 0 < n \ n < LENGTH('a) \ bit w (n - 1)\ for w :: \'a::len word\ - by (simp only: shiftl1_def bit_push_bit_iff) auto + by (auto simp add: shiftl1_eq_double bit_simps) definition shiftr1 :: \'a::len word \ 'a word\ - where \shiftr1 = drop_bit 1\ + where shiftr1_eq_half: \shiftr1 w = w div 2\ lemma bit_shiftr1_iff [bit_simps]: \bit (shiftr1 w) n \ bit w (Suc n)\ for w :: \'a::len word\ - by (simp add: shiftr1_def bit_drop_bit_eq) + by (simp add: shiftr1_eq_half bit_Suc) definition sshiftr1 :: \'a::len word \ 'a word\ - where \sshiftr1 \ signed_drop_bit 1\ + where sshiftr1_def: \sshiftr1 = signed_drop_bit 1\ lemma bit_sshiftr1_iff [bit_simps]: \bit (sshiftr1 w) n \ bit w (if n = LENGTH('a) - 1 then LENGTH('a) - 1 else Suc n)\ for w :: \'a::len word\ - by (auto simp add: sshiftr1_def bit_signed_drop_bit_iff) + by (auto simp add: sshiftr1_def bit_simps) -lemma shiftr1_1: "shiftr1 (1::'a::len word) = 0" - by (simp add: shiftr1_def) +lemma shiftl1_def: + \shiftl1 = push_bit 1\ + by (rule ext, rule bit_word_eqI) (simp add: bit_simps mult.commute [of _ 2]) + +lemma shiftr1_def: + \shiftr1 = drop_bit 1\ + by (rule ext, rule bit_word_eqI) (simp add: bit_simps) + +lemma shiftr1_1: + \shiftr1 (1::'a::len word) = 0\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma sshiftr1_eq: \sshiftr1 w = word_of_int (sint w div 2)\ @@ -50,105 +59,108 @@ lemma shiftr1_eq: by (rule bit_word_eqI) (simp add: bit_simps flip: bit_Suc) lemma shiftl1_rev: - "shiftl1 w = word_reverse (shiftr1 (word_reverse w))" + \shiftl1 w = word_reverse (shiftr1 (word_reverse w))\ by (rule bit_word_eqI) (auto simp add: bit_simps Suc_diff_Suc simp flip: bit_Suc) lemma shiftl1_p: - "shiftl1 w = w + w" - for w :: "'a::len word" - by (simp add: shiftl1_def) + \shiftl1 w = w + w\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma shiftr1_bintr: - "(shiftr1 (numeral w) :: 'a::len word) = - word_of_int (take_bit LENGTH('a) (numeral w) div 2)" - by (rule bit_word_eqI) (simp add: bit_simps bit_numeral_iff [where ?'a = int] flip: bit_Suc) + \(shiftr1 (numeral w) :: 'a::len word) = + word_of_int (take_bit LENGTH('a) (numeral w) div 2)\ + apply (rule bit_word_eqI) + apply (simp add: bit_simps flip: bit_Suc) + done lemma sshiftr1_sbintr: - "(sshiftr1 (numeral w) :: 'a::len word) = - word_of_int (signed_take_bit (LENGTH('a) - 1) (numeral w) div 2)" - apply (cases \LENGTH('a)\) - apply simp_all + \(sshiftr1 (numeral w) :: 'a::len word) = + word_of_int (signed_take_bit (LENGTH('a) - 1) (numeral w) div 2)\ apply (rule bit_word_eqI) - apply (auto simp add: bit_simps min_def simp flip: bit_Suc elim: le_SucE) + apply (simp add: bit_simps flip: bit_Suc) + apply transfer + apply auto done lemma shiftl1_wi: - "shiftl1 (word_of_int w) = word_of_int (2 * w)" - by (rule bit_word_eqI) (auto simp add: bit_simps) + \shiftl1 (word_of_int w) = word_of_int (2 * w)\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma shiftl1_numeral: - "shiftl1 (numeral w) = numeral (Num.Bit0 w)" - unfolding word_numeral_alt shiftl1_wi by simp + \shiftl1 (numeral w) = numeral (Num.Bit0 w)\ + by (rule bit_word_eqI) (auto simp add: bit_simps bit_numeral_Bit0_iff) lemma shiftl1_neg_numeral: - "shiftl1 (- numeral w) = - numeral (Num.Bit0 w)" - unfolding word_neg_numeral_alt shiftl1_wi by simp + \shiftl1 (- numeral w) = - numeral (Num.Bit0 w)\ + by (simp add: shiftl1_eq_double) lemma shiftl1_0: - "shiftl1 0 = 0" - by (simp add: shiftl1_def) + \shiftl1 0 = 0\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma shiftl1_def_u: - "shiftl1 w = word_of_int (2 * uint w)" - by (fact shiftl1_eq) + \shiftl1 w = word_of_int (2 * uint w)\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma shiftl1_def_s: - "shiftl1 w = word_of_int (2 * sint w)" - by (simp add: shiftl1_def) + \shiftl1 w = word_of_int (2 * sint w)\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma shiftr1_0: - "shiftr1 0 = 0" - by (simp add: shiftr1_def) + \shiftr1 0 = 0\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma sshiftr1_0: - "sshiftr1 0 = 0" - by (simp add: sshiftr1_def) + \sshiftr1 0 = 0\ + by (rule bit_word_eqI) (simp add: bit_simps) lemma sshiftr1_n1: - "sshiftr1 (- 1) = - 1" - by (simp add: sshiftr1_def) + \sshiftr1 (- 1) = - 1\ + by (rule bit_word_eqI) (auto simp add: bit_simps) lemma uint_shiftr1: - "uint (shiftr1 w) = uint w div 2" + \uint (shiftr1 w) = uint w div 2\ by (rule bit_eqI) (simp add: bit_simps flip: bit_Suc) lemma shiftr1_div_2: - "uint (shiftr1 w) = uint w div 2" + \uint (shiftr1 w) = uint w div 2\ by (fact uint_shiftr1) lemma sshiftr1_div_2: - "sint (sshiftr1 w) = sint w div 2" - by (rule bit_eqI) (auto simp add: bit_simps ac_simps min_def simp flip: bit_Suc elim: le_SucE) + \sint (sshiftr1 w) = sint w div 2\ + by (rule bit_eqI) (auto simp add: bit_simps simp flip: bit_Suc) lemma nth_shiftl1: - "bit (shiftl1 w) n \ n < size w \ n > 0 \ bit w (n - 1)" + \bit (shiftl1 w) n \ n < size w \ n > 0 \ bit w (n - 1)\ by (auto simp add: word_size bit_simps) lemma nth_shiftr1: - "bit (shiftr1 w) n = bit w (Suc n)" + \bit (shiftr1 w) n = bit w (Suc n)\ by (fact bit_shiftr1_iff) -lemma nth_sshiftr1: "bit (sshiftr1 w) n = (if n = size w - 1 then bit w n else bit w (Suc n))" +lemma nth_sshiftr1: + \bit (sshiftr1 w) n = (if n = size w - 1 then bit w n else bit w (Suc n))\ by (auto simp add: word_size bit_simps) lemma shiftl_power: - "(shiftl1 ^^ x) (y::'a::len word) = 2 ^ x * y" - by (induction x) (simp_all add: shiftl1_def) + \(shiftl1 ^^ x) (y::'a::len word) = 2 ^ x * y\ + by (simp add: shiftl1_eq_double funpow_double_eq_push_bit push_bit_eq_mult) lemma le_shiftr1: \shiftr1 u \ shiftr1 v\ if \u \ v\ - using that by (simp add: word_le_nat_alt unat_div div_le_mono shiftr1_def drop_bit_Suc) + using that by (simp add: shiftr1_eq_half word_less_eq_imp_half_less_eq) lemma le_shiftr1': - "\ shiftr1 u \ shiftr1 v ; shiftr1 u \ shiftr1 v \ \ u \ v" - by (meson dual_order.antisym le_cases le_shiftr1) + \u \ v\ if \shiftr1 u \ shiftr1 v\ \shiftr1 u \ shiftr1 v\ + by (rule word_half_less_imp_less_eq) (use that in \simp add: shiftr1_eq_half\) lemma sshiftr_eq_funpow_sshiftr1: \w >>> n = (sshiftr1 ^^ n) w\ - apply (rule sym) - apply (simp add: sshiftr1_def sshiftr_def) - apply (induction n) - apply simp_all - done +proof - + have \sshiftr1 ^^ n = (signed_drop_bit n :: 'a word \ _)\ + by (induction n) (simp_all add: sshiftr1_def) + then show ?thesis + by (simp add: sshiftr_def) +qed end diff --git a/lib/Word_Lib/Word_EqI.thy b/lib/Word_Lib/Word_EqI.thy index ce353d3cee..f8bab24cb6 100644 --- a/lib/Word_Lib/Word_EqI.thy +++ b/lib/Word_Lib/Word_EqI.thy @@ -15,8 +15,12 @@ begin text \ Some word equalities can be solved by considering the problem bitwise for all - @{prop "n < LENGTH('a::len)"}, which is different to running @{text word_bitwise} - and expanding into an explicit list of bits. + @{prop "n < LENGTH('a::len)"}. This is similar to the existing method @{text word_bitwise} + and expanding into an explicit list of bits. The @{text word_bitwise} only works on + concrete word lengths, but can treat a wider number of operators (in particular a mix of + arithmetic, order, and bit operations). The @{text word_eqI} method below works on words of + abstract size (@{typ "'a word"}) and produces smaller, more abstract goals, but does not deal + with arithmetic operations. \ lemmas le_mask_high_bits_len = le_mask_high_bits[unfolded word_size] @@ -49,6 +53,8 @@ lemma test_bit_lenD: "bit x n \ n < LENGTH('a) \ bit x n" for x :: "'a :: len word" by (fastforce dest: test_bit_size simp: word_size) +\ \Method to reduce goals of the form @{prop "P \ x = y"} for words of abstract length to + reasoning on bits of the words. Leaves open goal if unsolved.\ method word_eqI uses simp simp_del split split_del cong flip = ((* reduce conclusion to test_bit: *) rule word_eqI_rules, @@ -72,6 +78,8 @@ method word_eqI uses simp simp_del split split_del cong flip = (* helps sometimes, rarely: *) (simp add: simp test_bit_conj_lt del: simp_del flip: flip split: split split del: split_del cong: cong)?) +\ \Method to reduce goals of the form @{prop "P \ x = y"} for words of abstract length to + reasoning on bits of the words. Fails if goal unsolved, but tries harder than @{method word_eqI}.\ method word_eqI_solve uses simp simp_del split split_del cong flip dest = solves \word_eqI simp: simp simp_del: simp_del split: split split_del: split_del cong: cong simp flip: flip; diff --git a/lib/Word_Lib/Word_Lemmas.thy b/lib/Word_Lib/Word_Lemmas.thy index c2dba748be..bef7024e6c 100644 --- a/lib/Word_Lib/Word_Lemmas.thy +++ b/lib/Word_Lib/Word_Lemmas.thy @@ -16,6 +16,7 @@ theory Word_Lemmas Enumeration_Word Aligned Bit_Shifts_Infix_Syntax + Boolean_Inequalities Word_EqI begin @@ -25,6 +26,32 @@ context includes bit_operations_syntax begin +lemma word_max_le_or: + "max x y \ x OR y" for x :: "'a::len word" + by (simp add: word_bool_le_funs) + +lemma word_and_le_min: + "x AND y \ min x y" for x :: "'a::len word" + by (simp add: word_bool_le_funs) + +lemma word_not_le_eq: + "(NOT x \ y) = (NOT y \ x)" for x :: "'a::len word" + by transfer (auto simp: take_bit_not_eq_mask_diff) + +lemma word_not_le_not_eq[simp]: + "(NOT y \ NOT x) = (x \ y)" for x :: "'a::len word" + by (subst word_not_le_eq) simp + +lemma not_min_eq: + "NOT (min x y) = max (NOT x) (NOT y)" for x :: "'a::len word" + unfolding min_def max_def + by auto + +lemma not_max_eq: + "NOT (max x y) = min (NOT x) (NOT y)" for x :: "'a::len word" + unfolding min_def max_def + by auto + lemma ucast_le_ucast_eq: fixes x y :: "'a::len word" assumes x: "x < 2 ^ n" @@ -126,8 +153,8 @@ lemma sshiftr_n1: "-1 >>> n = -1" lemma nth_sshiftr: "bit (w >>> m) n = (n < size w \ (if n + m \ size w then bit w (size w - 1) else bit w (n + m)))" - apply (clarsimp simp add: bit_simps word_size ac_simps not_less) - apply (metis add.commute bit_imp_le_length bit_shiftr_word_iff le_diff_conv not_le) + apply (auto simp add: bit_simps word_size ac_simps not_less) + apply (meson bit_imp_le_length bit_shiftr_word_iff leD) done lemma sshiftr_numeral: @@ -481,8 +508,9 @@ next also have \\ \ unat x < 2 ^ n div 2 ^ y\ using * by (simp add: less_le) finally show ?thesis - using that \x \ 0\ by (simp flip: push_bit_eq_mult drop_bit_eq_div - add: shiftr_def shiftl_def unat_drop_bit_eq word_less_iff_unsigned [where ?'a = nat]) + using that \x \ 0\ + by (simp flip: push_bit_eq_mult drop_bit_eq_div + add: shiftr_def shiftl_def unat_drop_bit_eq word_less_iff_unsigned [where ?'a = nat]) qed qed qed @@ -622,16 +650,13 @@ lemma shiftr1_0_or_1:"(x::('a::len) word) >> 1 = 0 \ x = 0 \ done lemma shiftr1_irrelevant_lsb: "bit (x::('a::len) word) 0 \ x >> 1 = (x + 1) >> 1" - apply (cases \LENGTH('a)\; transfer) - apply (simp_all add: take_bit_drop_bit) - apply (simp add: drop_bit_take_bit drop_bit_Suc) - done + by (auto simp add: bit_0 shiftr_def drop_bit_Suc ac_simps elim: evenE) lemma shiftr1_0_imp_only_lsb:"((x::('a::len) word) + 1) >> 1 = 0 \ x = 0 \ x + 1 = 0" by (metis One_nat_def shiftr1_0_or_1 word_less_1 word_overflow) lemma shiftr1_irrelevant_lsb': "\ (bit (x::('a::len) word) 0) \ x >> 1 = (x + 1) >> 1" - by (metis shiftr1_irrelevant_lsb) + using shiftr1_irrelevant_lsb [of x] by simp (* Perhaps this one should be a simp lemma, but it seems a little dangerous. *) lemma cast_chunk_assemble_id: @@ -692,7 +717,8 @@ lemma word_and_notzeroD: lemma shiftr_le_0: "unat (w::'a::len word) < 2 ^ n \ w >> n = (0::'a::len word)" by (auto simp add: take_bit_word_eq_self_iff word_less_nat_alt shiftr_def - simp flip: take_bit_eq_self_iff_drop_bit_eq_0 intro: ccontr) + simp flip: take_bit_eq_self_iff_drop_bit_eq_0 + intro: ccontr) lemma of_nat_shiftl: "(of_nat x << n) = (of_nat (x * 2 ^ n) :: ('a::len) word)" @@ -1320,7 +1346,7 @@ lemma word_two_power_neg_ineq: lemma unat_shiftl_absorb: "\ x \ 2 ^ p; p + k < LENGTH('a) \ \ unat (x :: 'a :: len word) * 2 ^ k = unat (x * 2 ^ k)" - by (smt add_diff_cancel_right' add_lessD1 le_add2 le_less_trans mult.commute nat_le_power_trans + by (smt (verit) add_diff_cancel_right' add_lessD1 le_add2 le_less_trans mult.commute nat_le_power_trans unat_lt2p unat_mult_lem unat_power_lower word_le_nat_alt) lemma word_plus_mono_right_split: @@ -1442,9 +1468,9 @@ lemma mask_shift_sum: "\ a \ b; unat n = unat (p AND mask b) \ \ (p AND NOT(mask a)) + (p AND mask a >> b) * (1 << b) + n = (p :: 'a :: len word)" apply (simp add: shiftl_def shiftr_def flip: push_bit_eq_mult take_bit_eq_mask word_unat_eq_iff) - apply (subst disjunctive_add, clarsimp simp add: bit_simps)+ + apply (subst disjunctive_add, fastforce simp: bit_simps)+ apply (rule bit_word_eqI) - apply (auto simp add: bit_simps) + apply (fastforce simp: bit_simps)[1] done lemma is_up_compose: @@ -1486,7 +1512,7 @@ lemma sint_eq_uint_2pl: lemma pow_sub_less: "\ a + b \ LENGTH('a); unat (x :: 'a :: len word) = 2 ^ a \ \ unat (x * 2 ^ b - 1) < 2 ^ (a + b)" - by (smt (z3) eq_or_less_helperD le_add2 le_eq_less_or_eq le_trans power_add unat_mult_lem unat_pow_le_intro unat_power_lower word_eq_unatI) + by (smt (verit) eq_or_less_helperD le_add2 le_eq_less_or_eq le_trans power_add unat_mult_lem unat_pow_le_intro unat_power_lower word_eq_unatI) lemma sle_le_2pl: "\ (b :: 'a :: len word) < 2 ^ (LENGTH('a) - 1); a \ b \ \ a <=s b" @@ -1519,8 +1545,8 @@ context begin private lemma sbintrunc_uint_ucast: - "Suc n = LENGTH('b::len) \ signed_take_bit n (uint (ucast w :: 'b word)) = signed_take_bit n (uint w)" - by word_eqI + \signed_take_bit n (uint (ucast w :: 'b word)) = signed_take_bit n (uint w)\ if \Suc n = LENGTH('b::len)\ + by (rule bit_eqI) (use that in \auto simp add: bit_simps\) private lemma test_bit_sbintrunc: assumes "i < LENGTH('a)" @@ -1559,10 +1585,7 @@ next apply (rule impI) apply (subst bit_eq_iff) apply (simp add: bit_take_bit_iff bit_signed_take_bit_iff min_def) - apply (auto simp add: Suc_le_eq) - using less_imp_le_nat apply blast - using less_imp_le_nat apply blast - done + by (auto simp add: Suc_le_eq) (meson dual_order.strict_iff_not)+ qed lemma scast_ucast_mask_compare: @@ -1796,11 +1819,7 @@ proof (rule classical) apply (insert sdiv_int_range [where a="sint a" and b="sint b"])[1] apply (clarsimp simp: word_size) apply (insert sdiv_int_range [where a="sint a" and b="sint b"])[1] - apply auto - apply (cases \size a\) - apply simp_all - apply (smt (z3) One_nat_def diff_Suc_1 signed_word_eqI sint_int_min sint_range_size wsst_TYs(3)) - done + by (smt (verit, best) One_nat_def signed_word_eqI sint_greater_eq sint_int_min sint_less wsst_TYs(3)) have result_range_simple: "(sint a sdiv sint b \ ?range) \ ?thesis" apply (insert sdiv_int_range [where a="sint a" and b="sint b"]) diff --git a/lib/Word_Lib/Word_Lemmas_64_Internal.thy b/lib/Word_Lib/Word_Lemmas_64_Internal.thy index 71af9f3fd9..374e624684 100644 --- a/lib/Word_Lib/Word_Lemmas_64_Internal.thy +++ b/lib/Word_Lib/Word_Lemmas_64_Internal.thy @@ -57,4 +57,13 @@ lemmas mask_64_id[simp] = mask_len_id[where 'a=64, folded word_bits_def] lemma neq_0_unat: "x \ 0 \ 0 < unat x" for x::machine_word by (simp add: unat_gt_0) +(* The 32-bit version is occasionally needed on 64-bit platforms *) +lemma word_rsplit_0_32: + "word_rsplit (0::32 word) = [0, 0, 0, (0::8 word)]" + by (simp add: word_rsplit_def bin_rsplit_def word_bits_def word_size_def Cons_replicate_eq) + +lemma word_ctz_upcast_id_32_64: + "x \ 0 \ word_ctz (UCAST(32 \ 64) x) = word_ctz x" + by (simp add: word_ctz_upcast_id is_up) + end \ No newline at end of file diff --git a/lib/Word_Lib/Word_Lemmas_Internal.thy b/lib/Word_Lib/Word_Lemmas_Internal.thy index e8730d69ef..909316b470 100644 --- a/lib/Word_Lib/Word_Lemmas_Internal.thy +++ b/lib/Word_Lib/Word_Lemmas_Internal.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -30,6 +31,13 @@ unbundle bit_operations_syntax unbundle bit_projection_infix_syntax unbundle l4v_word_context +(* Override default word enum code generation setup, so that "value" and "eval" + work with quantification over word. *) +lemma [code]: + \Enum.enum_all P \ list_all P enum\ + \Enum.enum_ex P \ list_ex P enum\ for P :: \'a::len word \ bool\ + by (auto simp: list_all_iff list_ex_iff) + lemmas shiftl_nat_def = push_bit_eq_mult[of _ a for a::nat, folded shiftl_def] lemmas shiftr_nat_def = drop_bit_eq_div[of _ a for a::nat, folded shiftr_def] @@ -655,4 +663,355 @@ lemma FF_eq_minus_1: lemmas shiftl_t2n' = shiftl_eq_mult[where x="w::'a::len word" for w] + +(* candidates for moving to AFP Word_Lib: *) + +lemma word_mask_shift_eqI: + "\ x && mask n = y && mask n; x >> n = y >> n \ \ x = y" + apply (subst mask_or_not_mask[of x n, symmetric]) + apply (subst mask_or_not_mask[of y n, symmetric]) + apply (rule arg_cong2[where f="(OR)"]; blast intro: shiftr_eq_neg_mask_eq) + done + +lemma mask_shiftr_mask_eq: + "m \ m' + n \ (w && mask m >> n) && mask m' = w && mask m >> n" for w :: "'a::len word" + by word_eqI_solve + +lemma mask_split_aligned: + assumes len: "m \ a + len_of TYPE('a)" + assumes align: "is_aligned p a" + shows "(p && ~~ mask m) + (ucast ((ucast (p && mask m >> a))::'a::len word) << a) = p" + apply (insert align[simplified is_aligned_nth]) + apply (subst word_plus_and_or_coroll; word_eqI) + apply (rule iffI) + apply (erule disjE; clarsimp) + apply (case_tac "n < m"; case_tac "n < a") + using len by auto + +lemma mask_split_aligned_neg: + fixes x :: "'a::len word" + fixes p :: "'b::len word" + assumes len: "a + len_of TYPE('a) \ len_of TYPE('b)" + "m = a + len_of TYPE('a)" + assumes x: "x \ ucast (p && mask m >> a)" + shows "(p && ~~ mask m) + (ucast x << a) = p \ False" + apply (subst (asm) word_plus_and_or_coroll) + apply word_eqI + using len apply linarith + apply (insert x) + apply (erule notE) + apply word_eqI + subgoal for n + using len + apply (clarsimp) + apply (drule_tac x="n + a" in spec) + by (clarsimp simp: add.commute) + done + +lemma mask_alignment_ugliness: + "\ x \ x + z && ~~ mask m; + is_aligned (x + z && ~~ mask m) m; + is_aligned x m; + \n \ m. \z !! n\ + \ False" + apply (erule notE) + apply (subst word_plus_and_or_coroll; word_eqI) + apply (meson linorder_not_le) + by (auto simp: le_def) + +lemma word_and_mask_shift_eq_0: + "n \ m \ x && mask n >> m = 0" + by word_eqI + +lemma word_mask_and_neg_shift_eq_0: + "n < m \ - (1 << m) && mask n = 0" + by (metis NOT_mask_AND_mask add.inverse_neutral leD less_imp_le mask_AND_NOT_mask mask_eqs(10) + shiftl_1 t2n_mask_eq_if) + +lemma aligned_mask_plus_bounded: + "\ is_aligned x m; m < n; n < LENGTH('a) \ \ (x && mask n) + 2^m \ 2^n" for x :: "'a::len word" + by (metis add_mask_fold and_mask_less' is_aligned_add_step_le is_aligned_after_mask is_aligned_mask + leD leI order_less_imp_le t2n_mask_eq_if word_less_sub_1) + +lemma aligned_mask_le_mask_minus: + "\ is_aligned x m; m \ n; n < LENGTH('a)\ \ x && mask n \ mask n - mask m" for x :: "'a::len word" + by (metis and_mask_less' is_aligned_after_mask is_aligned_neg_mask_eq' + mask_2pm1 mask_sub neg_mask_mono_le word_less_sub_le) + +lemma shiftr_anti_mono: + "m \ n \ w >> n \ w >> m" for w :: "'a::len word" + apply transfer + apply (simp add: take_bit_drop_bit) + apply (simp add: drop_bit_eq_div zdiv_mono2) + done + +lemma mask_shift_neg_mask_eq: + "\ n' \ n; x \ mask (m+n') \ \ (x && ~~ mask n) && (mask m << n') = x && ~~ mask n" + by word_eqI_solve + +lemma from_bool_inj[simp]: + "(from_bool x = from_bool y) = (x = y)" + unfolding from_bool_def + by (auto split: bool.splits) + +lemma word_le_1_and_idem: + "w \ 1 \ w AND 1 = w" for w :: "_ word" + by (metis word_bw_same(1) word_le_1 word_log_esimps(7)) + +lemma from_to_bool_le_1_idem: + "w \ 1 \ from_bool (to_bool w) = w" + apply (subst word_le_1_and_idem[symmetric], assumption) + apply (simp add: from_to_bool_last_bit) + apply (simp add: word_le_1_and_idem) + done + +lemma and_1_0_not_bit_0: + "(w && 1 = 0) = (\ (w::'a::len word) !! 0)" + using to_bool_and_1[simplified to_bool_def, where x=w] + by auto + +lemma unat_scast_up: + "\ LENGTH('a) \ LENGTH('b); 0 \ sint x \ \unat (scast x::'b::len word) = unat x" + for x :: "'a :: len word" + apply (simp flip: bit_last_iff not_less) + apply word_eqI + apply (clarsimp simp: min_def split: if_splits) + apply (rule conjI; clarsimp) + apply (drule test_bit_lenD) + apply clarsimp + apply (metis le_antisym nat_le_Suc_less_imp) + apply fastforce + done + +lemma unat_uint_less: + "unat x < nat i \ uint x < i" for x :: "'a :: len word" + by (simp add: zless_nat_eq_int_zless) + +lemma sint_ge_zero_uint: + "uint x < 2 ^ (LENGTH('a) - 1) \ sint (x :: 'a :: len word) \ 0" + by (simp add: sint_eq_uint_2pl word_2p_lem wsst_TYs(3)) + +lemma sint_ge_zero_unat: + "unat x < 2 ^ (LENGTH('a) - 1) \ sint (x :: 'a :: len word) \ 0" + by (fastforce intro: sint_ge_zero_uint unat_uint_less simp: nat_power_eq) + +lemma mask_shiftl_le_mask: + "mask n << m \ (mask (n+m) :: 'a::len word)" + by (subst add.commute) + (rule leq_high_bits_shiftr_low_bits_leq_bits, simp) + +(* This is more useful for getting rid of a downcast *) +lemma unat_ucast_unat_id: + "unat x < 2 ^ LENGTH('b) \ unat (UCAST('a::len \ 'b::len) x) = unat x" + by (simp add: unat_eq_of_nat) + +lemma ucast_up_preserves_gt0: + "\ 0 < x; LENGTH('a) < LENGTH('b) \ \ 0 < (ucast x :: 'b::len word)" for x :: "'a::len word" + by (metis ucast_0 ucast_less_ucast_weak) + +lemma ucast_up_preserves_not0: + "\ x \ 0; LENGTH('a) < LENGTH('b::len) \ \ (ucast x :: 'b::len word) \ 0" + for x :: "'a::len word" + by (metis ucast_0 ucast_ucast_id) + +lemma unat_le_fold: + "n < 2 ^ LENGTH('a) \ (unat x \ n) = (x \ of_nat n)" for x::"'a::len word" + by (simp add: word_le_nat_alt unat_of_nat_eq) + +lemma ucast_and_mask_drop: + "LENGTH('a) \ n \ (ucast w :: 'b::len word) && mask n = ucast w" for w::"'a::len word" + by word_eqI (fastforce dest: test_bit_lenD) + +lemma add_mask_ignore: + "x && mask n = 0 \ v + x && mask n = v && mask n" + by (metis add_0_right mask_eqs(2)) + +lemma word_ctz_upcast_id: + "\ x \ 0; is_up (ucast::'a word \'b word) \ \ + word_ctz (ucast x :: 'b::len word) = word_ctz x" for x :: "'a::len word" + unfolding word_ctz_def + by (simp add: ucast_up_app[where n="LENGTH('b) - LENGTH('a)"] + source_size_def target_size_def is_up_def eq_zero_set_bl) + +lemma ucast_ucast_mask_id: + "\ LENGTH('b::len) < LENGTH('a); n = LENGTH('b) \ \ + UCAST('b \ 'a) (UCAST('a \ 'b) (x && mask n)) = x && mask n" for x :: "'a::len word" + by (simp add: ucast_ucast_len[OF eq_mask_less]) + +lemma msb_le_mono: + fixes v w :: "'a::len word" + shows "v \ w \ msb v \ msb w" + by (simp add: msb_big) + +lemma neg_msb_le_mono: + fixes v w :: "'a::len word" + shows "v \ w \ \ msb w \ \ msb v" + by (simp add: msb_big) + +lemmas msb_less_mono = msb_le_mono[OF less_imp_le] +lemmas neg_msb_less_mono = neg_msb_le_mono[OF less_imp_le] + +lemma word_sless_iff_less: + "\ \ msb v; \ msb w \ \ v v < w" + by (simp add: word_sless_alt sint_eq_uint word_less_alt) + +lemmas word_sless_imp_less = word_sless_iff_less[THEN iffD1, rotated 2] +lemmas word_less_imp_sless = word_sless_iff_less[THEN iffD2, rotated 2] + +lemma to_bool_if: + "(if w \ 0 then 1 else 0) = (if to_bool w then 1 else 0)" + by (auto simp: to_bool_def) + +lemma word_sle_iff_le: + "\ \ msb v; \ msb w \ \ v <=s w \ v \ w" + apply (simp add: word_sle_def sint_eq_uint word_le_def) + by (metis sint_eq_uint word_sle.rep_eq word_sle_eq) + +lemmas word_sle_imp_le = word_sle_iff_le[THEN iffD1, rotated 2] +lemmas word_le_imp_sle = word_sle_iff_le[THEN iffD2, rotated 2] + +lemma word_upcast_shiftr: + assumes "LENGTH('a::len) \ LENGTH('b::len)" + shows "UCAST('a \ 'b) (w >> n) = UCAST('a \ 'b) w >> n" + apply (intro word_eqI impI iffI; clarsimp simp: word_size nth_shiftr nth_ucast) + apply (drule test_bit_size) + using assms by (simp add: word_size) + +lemma word_upcast_neg_msb: + "LENGTH('a::len) < LENGTH('b::len) \ \ msb (UCAST('a \ 'b) w)" + unfolding ucast_eq msb_word_of_int + by clarsimp (metis Suc_pred bit_imp_le_length lens_gt_0(2) not_less_eq) + +lemma word_upcast_0_sle: + "LENGTH('a::len) < LENGTH('b::len) \ 0 <=s UCAST('a \ 'b) w" + by (simp add: word_sle_iff_le[OF word_msb_0 word_upcast_neg_msb]) + +lemma scast_ucast_up_eq_ucast: + assumes "LENGTH('a::len) < LENGTH('b::len)" + shows "SCAST('b \ 'c) (UCAST('a \ 'b) w) = UCAST('a \ 'c::len) w" + using assms + apply (subst scast_eq_ucast; simp) + apply (simp only: ucast_eq msb_word_of_int) + apply (metis bin_nth_uint_imp decr_length_less_iff numeral_nat(7) verit_comp_simplify1(3)) + by (metis less_or_eq_imp_le ucast_nat_def unat_ucast_up_simp) + +lemmas not_max_word_iff_less = word_order.not_eq_extremum + +lemma ucast_increment: + assumes "w \ max_word" + shows "UCAST('a::len \ 'b::len) w + 1 = UCAST('a \ 'b) (w + 1)" + apply (cases "LENGTH('b) \ LENGTH('a)") + apply (simp add: ucast_down_add is_down) + apply (subgoal_tac "uint w + 1 < 2 ^ LENGTH('a)") + apply (subgoal_tac "uint w + 1 < 2 ^ LENGTH('b)") + apply (subst word_uint_eq_iff) + apply (simp add: uint_arith_simps uint_up_ucast is_up) + apply (erule less_trans, rule power_strict_increasing, simp, simp) + apply (subst less_diff_eq[symmetric]) + using assms + apply (simp add: not_max_word_iff_less word_less_alt) + apply (erule less_le_trans) + apply simp + done + +lemma max_word_gt_0: + "0 < max_word" + by (simp add: le_neq_trans[OF max_word_max]) + +lemma and_not_max_word: + "m \ max_word \ w && m \ max_word" + by (simp add: not_max_word_iff_less word_and_less') + +lemma mask_not_max_word: + "m < LENGTH('a::len) \ mask m \ (max_word :: 'a word)" + by (simp add: mask_eq_exp_minus_1) + +lemmas and_mask_not_max_word = + and_not_max_word[OF mask_not_max_word] + +lemma shiftr_not_max_word: + "0 < n \ w >> n \ max_word" + by (metis and_mask_eq_iff_shiftr_0 and_mask_not_max_word diff_less len_gt_0 shiftr_le_0 word_shiftr_lt) + +lemma word_sandwich1: + fixes a b c :: "'a::len word" + assumes "a < b" + assumes "b <= c" + shows "0 < b - a \ b - a <= c" + using assms diff_add_cancel order_less_irrefl add_0 word_le_imp_diff_le + word_le_less_eq word_neq_0_conv + by metis + +lemma word_sandwich2: + fixes a b :: "'a::len word" + assumes "0 < a" + assumes "a <= b" + shows "b - a < b" + using assms less_le_trans word_diff_less + by blast + +lemma unat_and_mask_less_2p: + fixes w :: "'a::len word" + shows "m < LENGTH('a) \ unat (w && mask m) < 2 ^ m" + by (simp add: unat_less_helper and_mask_less') + +lemma unat_shiftr_less_2p: + fixes w :: "'a::len word" + shows "n + m = LENGTH('a) \ unat (w >> n) < 2 ^ m" + by (cases "n = 0"; simp add: unat_less_helper shiftr_less_t2n3) + +lemma nat_div_less_mono: + fixes m n :: nat + shows "m div d < n div d \ m < n" + by (meson div_le_mono not_less) + +lemma word_shiftr_less_mono: + fixes w :: "'a::len word" + shows "w >> n < v >> n \ w < v" + by (auto simp: word_less_nat_alt shiftr_div_2n' elim: nat_div_less_mono) + +lemma word_shiftr_less_mask: + fixes w :: "'a::len word" + shows "(w >> n < v >> n) \ (w && ~~mask n < v && ~~mask n)" + by (metis (mono_tags) le_shiftr mask_shift shiftr_eq_neg_mask_eq word_le_less_eq word_le_not_less) + +lemma word_shiftr_le_mask: + fixes w :: "'a::len word" + shows "(w >> n \ v >> n) \ (w && ~~mask n \ v && ~~mask n)" + by (metis (mono_tags) le_shiftr mask_shift shiftr_eq_neg_mask_eq word_le_less_eq word_le_not_less) + +lemma word_shiftr_eq_mask: + fixes w :: "'a::len word" + shows "(w >> n = v >> n) \ (w && ~~mask n = v && ~~mask n)" + by (metis (mono_tags) mask_shift shiftr_eq_neg_mask_eq) + +lemmas word_shiftr_cmp_mask = + word_shiftr_less_mask word_shiftr_le_mask word_shiftr_eq_mask + +lemma if_if_if_same_output: + "(if c1 then if c2 then t else f else if c3 then t else f) = (if c1 \ c2 \ \c1 \ c3 then t else f)" + by (simp split: if_splits) + +lemma word_le_split_mask: + "(w \ v) \ (w >> n < v >> n \ w >> n = v >> n \ w && mask n \ v && mask n)" + apply (simp add: word_shiftr_eq_mask word_shiftr_less_mask) + apply (rule subst[where P="\c. c \ d = e" for d e, OF AND_NOT_mask_plus_AND_mask_eq[where n=n]]) + apply (rule subst[where P="\c. d \ c = e" for d e, OF AND_NOT_mask_plus_AND_mask_eq[where n=n]]) + by (metis (no_types) Orderings.order_eq_iff and_not_eq_minus_and bit.double_compl linorder_linear + neg_mask_mono_le word_and_le2 word_le_minus_cancel word_not_le + word_plus_and_or_coroll2) + +lemma uint_minus_1_less_le_eq: + "0 < n \ (uint (n - 1) < m) = (uint n \ m)" + by uint_arith + +lemma scast_ucast_up_minus_1_ucast: + assumes "LENGTH('a::len) < LENGTH('b::len)" + assumes "0 < w" + shows "SCAST('b \ 'c) (UCAST('a \ 'b) w - 1) = UCAST('a \ 'c::len) (w - 1)" + using assms + apply (subst scast_eq_ucast; simp) + apply (metis gt0_iff_gem1 msb_less_mono ucast_less_ucast_weak unsigned_0 word_upcast_neg_msb) + by (metis less_le ucast_nat_def ucast_up_preserves_not0 unat_minus_one unat_ucast_up_simp) + end diff --git a/lib/Word_Lib/Word_Lib_Sumo.thy b/lib/Word_Lib/Word_Lib_Sumo.thy index a7457e8ae4..b12d5acdf5 100644 --- a/lib/Word_Lib/Word_Lib_Sumo.thy +++ b/lib/Word_Lib/Word_Lib_Sumo.thy @@ -11,6 +11,7 @@ imports "HOL-Library.Word" Aligned Bit_Comprehension + Bit_Comprehension_Int Bit_Shifts_Infix_Syntax Bits_Int Bitwise_Signed @@ -33,6 +34,7 @@ imports Rsplit Signed_Words Syntax_Bundles + Sgn_Abs Typedef_Morphisms Type_Syntax Word_EqI @@ -129,10 +131,4 @@ notation (input) lemmas cast_simps = cast_simps ucast_down_bl -(* shadows the slightly weaker Word.nth_ucast *) -lemma nth_ucast: - "(ucast (w::'a::len word)::'b::len word) !! n = - (w !! n \ n < min LENGTH('a) LENGTH('b))" - by (auto simp add: bit_simps not_le dest: bit_imp_le_length) - end diff --git a/lib/clib/BitFieldProofsLib.thy b/lib/clib/BitFieldProofsLib.thy index 25556d347a..99132586a7 100644 --- a/lib/clib/BitFieldProofsLib.thy +++ b/lib/clib/BitFieldProofsLib.thy @@ -6,8 +6,8 @@ theory BitFieldProofsLib imports - "Lib.Eisbach_Methods" - TypHeapLib + Eisbach_Tools.Eisbach_Methods + CParser.TypHeapLib begin lemmas guard_simps = diff --git a/lib/clib/CCorresLemmas.thy b/lib/clib/CCorresLemmas.thy index ee723426c4..74870f7ebf 100644 --- a/lib/clib/CCorresLemmas.thy +++ b/lib/clib/CCorresLemmas.thy @@ -5,7 +5,7 @@ *) theory CCorresLemmas -imports CCorres_Rewrite +imports CCorres_Rewrite MonadicRewrite_C begin lemma ccorres_rel_imp2: @@ -345,9 +345,6 @@ lemma ccorres_expand_while_iff: apply (auto elim!: exec_Normal_elim_cases intro: exec.intros) done -abbreviation - "ccorresG rf_sr \ r xf \ ccorres_underlying rf_sr \ r xf r xf" - lemma exec_handlers_Hoare_call_Basic: "\ \s' t x. s' \ P \ g s' t (ret s' t) \ Q; UNIV \ A \ \ exec_handlers_Hoare \ P (call initfn p ret (\x y. Basic (g x y)) # hs) Q A" @@ -521,7 +518,7 @@ lemma lift_t_super_update: and eu: "export_uinfo s = typ_uinfo_t TYPE('b)" and lp: "lift_t g (h, d) p = Some v'" shows "lift_t g (heap_update (Ptr &(p\f)) v h, d) - = lift_t g (h, d)(p \ field_update (field_desc s) (to_bytes_p v) v')" + = (lift_t g (h, d)) (p \ field_update (field_desc s) (to_bytes_p v) v')" using fl eu lp apply - apply (rule trans [OF lift_t_super_field_update super_field_update_lookup]) @@ -633,6 +630,13 @@ lemma ccorres_liftE: by (fastforce split: xstate.splits simp: liftE_def ccorres_underlying_def bind_def' return_def unif_rrel_def) +lemma ccorres_liftE': + fixes \ + assumes cc: "ccorresG sr \ (r \ Inr) xf P P' hs a c" + shows "ccorresG sr \ r xf P P' hs (liftE a) c" + using cc + by (auto intro!: ccorres_liftE cong: ccorres_context_cong) + lemma ccorres_if_bind: "ccorres_underlying sr Gamm r xf arrel axf G G' hs (if a then (b >>= f) else (c >>= f)) d \ ccorres_underlying sr Gamm r xf arrel axf G G' hs ((if a then b else c) >>= f) d" @@ -874,9 +878,9 @@ proof - qed thus ?thesis using lxs j pn apply (auto simp: init_xs_def word_less_nat_alt neq_Nil_conv unat_word_ariths unat_of_nat push_mods - simp del: unsigned_of_nat elim!: ccorres_guard_imp2 - dest!: spec[where x=Nil]) + dest!: spec[where x=Nil] + cong: ccorres_all_cong) done qed @@ -1036,4 +1040,264 @@ lemma ccorres_Guard_True_Seq: \ ccorres_underlying sr \ r xf arrel axf A C hs a (Guard F \True\ c ;; d)" by (simp, ccorres_rewrite, assumption) +lemma ccorres_While_Normal_helper: + assumes setter_inv: + "\ \ {s'. \rv s. G rv s s'} setter {s'. \rv s. G rv s s' \ (cond_xf s' \ 0 \ Cnd rv s s')}" + assumes body_inv: "\ \ {s'. \rv s. G rv s s' \ Cnd rv s s'} B {s'. \rv s. G rv s s'}" + shows "\ \ ({s'. \rv s. G rv s s' \ (cond_xf s' \ 0 \ Cnd rv s s')}) + While {s'. cond_xf s' \ 0} (Seq B setter) + {s'. \rv s. G rv s s'}" + apply (insert assms) + apply (rule hoare_complete) + apply (simp add: cvalid_def HoarePartialDef.valid_def) + apply (intro allI) + apply (rename_tac xstate xstate') + apply (rule impI) + apply (case_tac "\ isNormal xstate") + apply fastforce + apply (simp add: isNormal_def) + apply (elim exE) + apply (simp add: image_def) + apply (erule exec_While_final_inv''[where C="{s'. cond_xf s' \ 0}" and B="B;; setter"]; clarsimp) + apply (frule intermediate_Normal_state[OF _ _ body_inv]) + apply fastforce + apply clarsimp + apply (rename_tac inter_t) + apply (frule hoarep_exec[OF _ _ body_inv, rotated], fastforce) + apply (frule_tac s=inter_t in hoarep_exec[rotated 2], fastforce+)[1] + apply (metis (mono_tags, lifting) HoarePartial.SeqSwap empty_iff exec_abrupt mem_Collect_eq) + apply (metis (mono_tags, lifting) HoarePartial.SeqSwap exec_stuck mem_Collect_eq) + apply (metis (mono_tags, lifting) HoarePartial.SeqSwap empty_iff exec_fault mem_Collect_eq) + done + +text \ + This rule is intended to be used to show correspondence between a Haskell whileLoop and a C + while loop, where in particular, the C while loop is parsed into a Simpl While loop that + updates the cstate as part of the loop condition. In such a case, the CParser will produce a Simpl + program in the form that is seen in the conclusion of this rule.\ +lemma ccorres_While: + assumes body_ccorres: + "\r. ccorresG srel \ rrel xf + (\s. G r s \ C r s = Some True) (G' \ C' \ {s'. rrel r (xf s')}) [] + (B r) B'" + assumes cond_ccorres: + "\r. ccorresG srel \ (\rv rv'. rv = to_bool rv') cond_xf + (G r) (G' \ {s'. rrel r (xf s')}) [] + (gets_the (C r)) cond" + assumes nf: "\r. no_fail (\s. G r s \ C r s = Some True) (B r)" + assumes no_ofail: "\r. no_ofail (G r) (C r)" + assumes body_inv: + "\r. \\s. G r s \ C r s = Some True\ B r \G\" + "\r s. \ \ {s'. s' \ G' \ (s, s') \ srel \ G r s \ rrel r (xf s') + \ s' \ C' \ C r s = Some True} + B' G'" + assumes cond_hoarep: + "\r s. \ \ {s'. s' \ G' \ (s, s') \ srel \ G r s \ rrel r (xf s')} + cond + {s'. s' \ G' \ (cond_xf s' \ 0 \ s' \ C') \ rrel r (xf s')}" + shows + "ccorresG srel \ rrel xf (G r) (G' \ {s'. rrel r (xf s')}) hs + (whileLoop (\r s. the (C r s)) B r) + (cond;; While {s'. cond_xf s' \ 0} (B';; cond))" +proof - + note unif_rrel_def[simp add] to_bool_def[simp add] + have helper: + "\state xstate'. + \ \ \While {s'. cond_xf s' \ 0} (Seq B' cond), Normal state\ \ xstate' \ + \st r s. Normal st = xstate' \ (s, state) \ srel + \ (C r s \ None) \ (cond_xf state \ 0) = the (C r s) + \ rrel r (xf state) \ G r s \ state \ G' \ (cond_xf state \ 0 \ state \ C') + \ (\rv s'. (rv, s') \ fst (whileLoop (\r s. the (C r s)) B r s) + \ (s', st) \ srel \ rrel rv (xf st))" + apply (erule_tac C="{s'. cond_xf s' \ 0}" in exec_While_final_inv''; simp) + apply (fastforce simp: whileLoop_cond_fail return_def) + apply clarsimp + apply (rename_tac t t' t'' r s y) + apply (frule_tac P="{s'. s' \ G' \ (s, s') \ srel \ G r s \ rrel r (xf s') + \ s' \ C' \ (C r s \ None) \ the (C r s)}" + in intermediate_Normal_state) + apply fastforce + apply (fastforce intro: body_inv conseqPre) + apply clarsimp + apply (rename_tac inter_t) + apply (prop_tac "\rv' s'. rrel rv' (xf inter_t) \ (rv', s') \ fst (B r s) + \ (s', inter_t) \ srel") + apply (insert body_ccorres)[1] + apply (drule_tac x=r in meta_spec) + apply (erule (1) ccorresE) + apply fastforce + apply fastforce + using nf apply (fastforce simp: no_fail_def) + apply (fastforce dest!: EHOther) + apply fastforce + apply clarsimp + apply (prop_tac "G rv' s'") + apply (fastforce dest: use_valid intro: body_inv) + apply (prop_tac "inter_t \ G'") + apply (fastforce dest: hoarep_exec[rotated] intro: body_inv) + apply (drule_tac x=rv' in spec) + apply (drule_tac x=s' in spec) + apply (prop_tac "rrel rv' (xf inter_t)") + apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated]) + apply (elim impE) + apply (frule_tac s'=inter_t and r1=rv' in ccorresE_gets_the[OF cond_ccorres]; assumption?) + apply fastforce + apply (fastforce intro: no_ofail) + apply (fastforce dest: EHOther) + apply (intro conjI) + apply fastforce + using no_ofail apply (fastforce elim!: no_ofailD) + apply fastforce + apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated]) + apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated]) + apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated]) + apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated]) + apply (fastforce simp: whileLoop_def intro: whileLoop_results.intros(3)) + done + + have cond_hoarep': + "\r s s' n xstate. + \\\<^sub>h \(cond;; While {s'. cond_xf s' \ 0} (B';; cond)) # hs,s'\ \ (n, xstate) + \ \\ {s' \ G'. (s, s') \ srel \ G r s \ rrel r (xf s')} + cond + {s'. (s' \ G' \ (s, s') \ srel \ G r s \ rrel r (xf s')) + \ (cond_xf s' \ 0 \ (s' \ C' \ C r s = Some True))}" + apply (insert cond_ccorres) + apply (drule_tac x=r in meta_spec) + apply (frule_tac s=s in ccorres_to_vcg_gets_the) + apply (fastforce intro: no_ofail) + apply (insert cond_hoarep) + apply (drule_tac x=s in meta_spec) + apply (drule_tac x=r in meta_spec) + apply (rule hoarep_conj_lift_pre_fix) + apply (rule hoarep_conj_lift_pre_fix) + apply (insert cond_hoarep)[1] + apply (fastforce simp: conseq_under_new_pre) + apply (fastforce intro!: hoarep_conj_lift_pre_fix simp: Collect_mono conseq_under_new_pre) + apply (insert cond_hoarep) + apply (drule_tac x=s in meta_spec) + apply (drule_tac x=r in meta_spec) + apply (simp add: imp_conjR) + apply (rule hoarep_conj_lift_pre_fix) + apply (simp add: Collect_mono conseq_under_new_pre) + apply (rule_tac Q'="{s'. C r s \ None \ the (C r s) = (cond_xf s' \ 0)}" + in conseqPost[rotated]) + apply fastforce + apply fastforce + apply (simp add: Collect_mono conseq_under_new_pre) + done + + have cond_inv_guard: + "\r s. \ \ {s'. s' \ G' \ (s, s') \ srel \ G r s \ rrel r (xf s')} + cond + {s'. s' \ G' \ (cond_xf s' \ 0 \ s' \ C')}" + by (fastforce intro: conseqPost cond_hoarep) + + show ?thesis + apply (clarsimp simp: ccorres_underlying_def) + apply (rename_tac s s' n xstate) + apply (frule_tac R'="{s'. s' \ G' \ (s, s') \ srel \ G r s \ rrel r (xf s')}" + and Q'="{s'. \rv s. s' \ G' \ (s, s') \ srel \ G rv s \ rrel rv (xf s')}" + in exec_handlers_use_hoare_nothrow_hoarep) + apply fastforce + apply (rule HoarePartial.Seq) + apply (erule cond_hoarep') + apply (rule conseqPre) + apply (rule ccorres_While_Normal_helper) + apply (fastforce intro!: cond_hoarep' hoarep_ex_lift) + apply (intro hoarep_ex_pre, rename_tac rv new_s) + apply (insert cond_inv_guard)[1] + apply (drule_tac x=new_s in meta_spec) + apply (drule_tac x=rv in meta_spec) + apply (insert body_ccorres)[1] + apply (drule_tac x=rv in meta_spec) + apply (insert body_inv(2))[1] + apply (drule_tac x=new_s in meta_spec) + apply (drule_tac x=rv in meta_spec) + apply (frule_tac s=new_s in ccorres_to_vcg_with_prop) + using nf apply fastforce + using body_inv apply fastforce + apply (rule_tac Q'="{s'. s' \ G' + \ (\(rv, s) \fst (B rv new_s). (s, s') \ srel \ rrel rv (xf s') + \ G rv s)}" + in conseqPost; + fastforce?) + apply (rule hoarep_conj_lift_pre_fix; + fastforce simp: Collect_mono conseq_under_new_pre) + apply fastforce + apply (case_tac xstate; clarsimp) + apply (frule intermediate_Normal_state[OF _ _ cond_hoarep]; assumption?) + apply fastforce + apply clarsimp + apply (rename_tac inter_t) + apply (insert cond_ccorres) + apply (drule_tac x=r in meta_spec) + apply (drule (2) ccorresE_gets_the) + apply fastforce + apply (fastforce intro: no_ofail) + apply (fastforce dest: EHOther) + apply (prop_tac "rrel r (xf inter_t)") + apply (fastforce dest: hoarep_exec[rotated] intro: cond_hoarep) + apply (case_tac "\ the (C r s)") + apply (fastforce elim: exec_Normal_elim_cases simp: whileLoop_cond_fail return_def) + apply (insert no_ofail) + apply (fastforce dest!: helper hoarep_exec[OF _ _ cond_inv_guard, rotated] no_ofailD) + done +qed + +lemmas ccorres_While' = ccorres_While[where C'=UNIV, simplified] + + +\ \simp rules for rewriting common patterns in the return relations\ +lemma ccorres_dc_o_simp[simp]: + "ccorres_underlying srel \ (dc \ f) xf ar axf P Q hs m c + = ccorres_underlying srel \ dc xf ar axf P Q hs m c" + "ccorres_underlying srel \ r xf (dc \ f) axf P Q hs m c + = ccorres_underlying srel \ r xf dc axf P Q hs m c" + by (simp cong: ccorres_all_cong)+ + +lemma ccorres_inl_rrel_inl_rrel[simp]: + "ccorres_underlying srel \ r xf (inl_rrel (inl_rrel ar)) axf P Q hs m c + = ccorres_underlying srel \ r xf (inl_rrel ar) axf P Q hs m c" + by (simp add: inl_rrel_inl_rrel cong: ccorres_all_cong)+ + +lemma ccorres_inr_rrel_Inr[simp]: + "ccorres_underlying srel \ (inr_rrel r \ Inr) xf ar axf P Q hs m c + = ccorres_underlying srel \ r xf ar axf P Q hs m c" + by (simp cong: ccorres_context_cong)+ + +lemma add_remove_return: + "getter >>= setter = do (do val \ getter; setter val; return val od); return () od" + by (simp add: bind_assoc) + +lemma ccorres_call_getter_setter_dc: + assumes cul: "ccorresG sr \ r' xf' P (i ` P') [] getter (Call f)" + and gsr: "\x x' s t rv. + \ (x, t) \ sr; r' rv (xf' t); ((), x') \ fst (setter rv x) \ + \ (x', g s t (clean s t)) \ sr" + and ist: "\x s. (x, s) \ sr \ (x, i s) \ sr" + and ef: "\val. empty_fail (setter val)" + shows "ccorresG sr \ dc xfdc P P' hs + (getter >>= setter) + (call i f clean (\s t. Basic (g s t)))" + apply (rule ccorres_guard_imp) + apply (rule monadic_rewrite_ccorres_assemble[rotated]) + apply (rule monadic_rewrite_is_refl) + apply (rule add_remove_return) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_call_getter_setter) + apply (fastforce intro: cul) + apply (fastforce intro: gsr) + apply (simp add: gsr) + apply (fastforce intro: ist) + apply (fastforce intro: ef) + apply (rule ceqv_refl) + apply (fastforce intro: ccorres_return_Skip) + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply wpsimp + apply fastforce + done + end diff --git a/lib/clib/CCorres_Rewrite.thy b/lib/clib/CCorres_Rewrite.thy index 86455e345b..6fd0f053f7 100644 --- a/lib/clib/CCorres_Rewrite.thy +++ b/lib/clib/CCorres_Rewrite.thy @@ -17,7 +17,7 @@ lemma ccorres_com_eq_hom: elim: ccorres_semantic_equivD2) method ccorres_rewrite declares C_simp C_simp_pre C_simp_simps C_simp_throws - = simpl_rewrite hom: ccorres_com_eq_hom + = (simpl_rewrite hom: ccorres_com_eq_hom, no_name_eta) lemma hoarep_com_eq_hom: "com_eq_hom \ (\c. hoarep \ {} F P c Q A)" diff --git a/lib/clib/Corres_UL_C.thy b/lib/clib/Corres_UL_C.thy index 486aeb8eed..7580326cf6 100644 --- a/lib/clib/Corres_UL_C.thy +++ b/lib/clib/Corres_UL_C.thy @@ -11,9 +11,10 @@ theory Corres_UL_C imports - "LemmaBucket_C" - "Lib.LemmaBucket" - "SIMPL_Lemmas" + CParser.LemmaBucket_C + Lib.LemmaBucket + SIMPL_Lemmas + Monads.Nondet_Reader_Option begin declare word_neq_0_conv [simp del] @@ -47,7 +48,7 @@ lemma exec_handlers_use_hoare_nothrow: apply - apply (drule hoare_sound) apply (clarsimp elim: exec_Normal_elim_cases - simp: NonDetMonad.bind_def cvalid_def split_def HoarePartialDef.valid_def) + simp: Nondet_Monad.bind_def cvalid_def split_def HoarePartialDef.valid_def) apply (erule exec_handlers.cases) apply clarsimp apply (drule spec, drule spec, drule (1) mp) @@ -56,6 +57,12 @@ lemma exec_handlers_use_hoare_nothrow: apply simp done +lemma exec_handlers_use_hoare_nothrow_hoarep: + "\E \\<^sub>h \c # hs, s'\ \ (n, t); s' \ R'; E \ R' c Q'\ \ E \ \c, Normal s'\ \ t \ isNormal t" + apply (drule hoare_sound) + apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def) + apply (erule exec_handlers.cases; fastforce simp: isNormal_def isAbr_def) + done definition unif_rrel :: "bool \ ('a \ 'b \ bool) \ ('t \ 'b) @@ -86,6 +93,9 @@ where \ unif_rrel (n = length hs) rrel xf arrel axf r s'') | _ \ False))" +abbreviation + "ccorresG rf_sr \ r xf \ ccorres_underlying rf_sr \ r xf r xf" + declare isNormal_simps [simp] lemma ccorresI [case_names fail nofail]: @@ -162,6 +172,13 @@ lemma ccorresE: apply simp done +lemma ccorresE_gets_the: + "\ccorresG srel \ rrel xf G G' hs (gets_the c) c'; (s, s') \ srel; G s; s' \ G'; no_ofail G c; + \ \\<^sub>h \c' # hs, s'\ \ (n, Normal t')\ + \ (s, t') \ srel \ rrel (the (c s)) (xf t')" + by (fastforce simp: ccorres_underlying_def no_ofail_def unif_rrel_def gets_the_def gets_def + get_def bind_def return_def) + lemma ccorres_empty_handler_abrupt: assumes cc: "ccorres_underlying sr \ rrel xf' arrel axf P P' [] a c" and asms: "(s, s') \ sr" "P s" "s' \ P'" "\ snd (a s)" @@ -271,26 +288,45 @@ lemma ccorres_from_vcg_nofail: apply (rule hoare_complete, simp add: HoarePartialDef.valid_def) done - -lemma ccorres_to_vcg: - "ccorres_underlying srel \ rrel xf arrel axf P P' [] a c \ - (\\. \ snd (a \) \ \ \ {s. P \ \ s \ P' \ (\, s) \ srel} - c - {s. (\(rv, \') \ fst (a \). (\', s) \ srel \ rrel rv (xf s))})" - apply - - apply rule - apply (rule impI) +lemma ccorres_to_vcg_with_prop: + "\ccorres_underlying srel \ rrel xf arrel axf P P' [] a c; no_fail Q a; \P\ a \R\\ + \ \ \ {s'. P s \ Q s \ s' \ P' \ (s, s') \ srel} + c {t'. \(rv, t) \ fst (a s). (t, t') \ srel \ rrel rv (xf t') \ R rv t}" apply (rule hoare_complete) - apply (simp add: HoarePartialDef.valid_def cvalid_def) - apply (intro impI allI) + apply (clarsimp simp: HoarePartialDef.valid_def cvalid_def no_fail_def) + apply (drule_tac x=s in spec) apply clarsimp apply (frule (5) ccorres_empty_handler_abrupt) - apply (erule (4) ccorresE) - apply (erule (1) EHOther) - apply clarsimp - apply rule - apply simp - apply (fastforce simp: unif_rrel_simps) + apply (fastforce elim!: ccorresE EHOther simp: unif_rrel_simps valid_def) + done + +lemma ccorres_to_vcg: + "\ccorres_underlying srel \ rrel xf arrel axf P P' [] a c; no_fail Q a\ + \ \ \ {s'. P s \ Q s \ s' \ P' \ (s, s') \ srel} + c {t'. \(rv, t) \ fst (a s). (t, t') \ srel \ rrel rv (xf t')}" + apply (frule (1) ccorres_to_vcg_with_prop[where R="\\"]) + apply wpsimp + apply fastforce + done + +lemma ccorres_to_vcg_Normal: + "\ccorres_underlying srel \ rrel xf arrel axf P P' [] a c; no_fail Q a\ + \ \ \ {s'. P s \ Q s \ s' \ P' \ (s, s') \ srel} c UNIV" + apply (frule (1) ccorres_to_vcg_with_prop[where R="\\" and s=s]) + apply wpsimp + apply (fastforce elim: conseqPost) + done + +lemma ccorres_to_vcg_gets_the: + "\ccorres_underlying srel \ rrel xf arrel axf P P' [] (gets_the r) c; no_ofail P r\ + \ \ \ (P' \ {s'. (s, s') \ srel \ P s}) + c {t'. (s, t') \ srel \ P s \ (r s \ None) \ rrel (the (r s)) (xf t')}" + apply (frule ccorres_to_vcg_with_prop[where R="\_. P" and s=s]) + apply (erule no_ofail_gets_the) + apply wpsimp + apply (clarsimp simp: gets_the_def simpler_gets_def bind_def return_def get_def assert_opt_def + fail_def conseq_under_new_pre + split: option.splits) done lemma exec_handlers_Seq_cases0': @@ -803,6 +839,46 @@ lemma ccorres_call: apply simp done +text \ + This rule is intended to be used in the case where the C calls a function and then uses the + returned value to update a global variable. Typically, the Haskell will split this up into a + getter function followed by a setter function. Note that the getter function may change the + state. + + The extra return statement on the Haskell side allows us to establish a nontrivial return relation + between the values set on the concrete and abstract side. The @{thm bind_assoc_return_reverse} rule + may assist with rewriting statements to add the extra return needed by this rule\ +lemma ccorres_call_getter_setter: + assumes cul: "ccorresG sr \ r' xf' P (i ` P') [] getter (Call f)" + and gsr: "\x x' s t rv rv'. + \ (x, t) \ sr; r' rv (xf' t); (rv', x') \ fst (setter rv x) \ + \ (x', g s t (clean s t)) \ sr" + and res: "\s t rv. r' rv (xf' t) \ r rv (xf (g s t (clean s t)))" + and ist: "\x s. (x, s) \ sr \ (x, i s) \ sr" + and ef: "\val. empty_fail (setter val)" + shows "ccorresG sr \ r xf P P' hs + (do val \ getter; setter val; return val od) + (call i f clean (\s t. Basic (g s t)))" + apply (rule ccorresI') + apply (rename_tac s s' n z) + apply (prop_tac "\ snd (getter s)") + apply (clarsimp simp: bind_def) + apply (erule exec_handlers.cases; clarsimp) + apply (erule exec_call_Normal_elim; simp?) + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (fastforce intro: ccorresE[OF cul ist] EHAbrupt EHEmpty elim: exec.Call) + apply (erule exec_call_Normal_elim; simp?) + apply (rule ccorresE[OF cul ist], simp+) + apply (fastforce intro: EHOther elim: exec.Call) + using ef + apply (fastforce intro: gsr res + simp: unif_rrel_simps bind_def empty_fail_def return_def + elim: exec_Normal_elim_cases) + apply (fastforce intro: ccorresE[OF cul ist] EHOther elim: exec.Call) + apply (fastforce intro: ccorresE[OF cul ist] EHOther elim: exec.Call) + apply (fastforce intro: ccorresE[OF cul ist] EHOther elim: exec.CallUndefined) + done + declare semantic_equivD1 [dest] declare semantic_equivD2 [dest] @@ -988,7 +1064,7 @@ lemma ccorres_liftM_simp [simp]: apply (rule ccorresI') apply simp apply (erule (5) ccorresE) - apply (simp add: liftM_def NonDetMonad.bind_def return_def) + apply (simp add: liftM_def Nondet_Monad.bind_def return_def) apply (erule bexI [rotated]) apply (simp add: unif_rrel_def split: if_split_asm) done @@ -1115,8 +1191,8 @@ lemma ccorres_symb_exec_l: lemma ccorres_symb_exec_l': assumes cc: "\rv. ccorres_underlying sr \ r xf arrel axf (Q rv) G' hs (f rv) c" - and v1: "\s. NonDetMonad.valid ((=) s) m (\r. (=) s)" - and v2: "NonDetMonad.valid G m Q" + and v1: "\s. valid ((=) s) m (\r. (=) s)" + and v2: "valid G m Q" and ef: "empty_fail m" shows "ccorres_underlying sr \ r xf arrel axf G G' hs (m >>= (\rv. f rv)) c" apply (rule ccorres_guard_imp) @@ -1127,7 +1203,7 @@ lemma ccorres_symb_exec_l': lemma ccorres_symb_exec_l2: assumes cc: "\rv. ccorres_underlying sr \ r xf arrel axf (Q rv) (Q' rv) hs (f rv) c" and v1: "\s. G s \ exs_valid ((=) s) m (\r. (=) s)" - and v2: "NonDetMonad.valid G m Q" + and v2: "valid G m Q" shows "ccorres_underlying sr \ r xf arrel axf G {s'. \rv s. (s, s') \ sr \ Q rv s \ s' \ Q' rv} hs (m >>= (\rv. f rv)) c" apply (rule ccorresI') apply (frule use_exs_valid [OF v1]) @@ -1345,14 +1421,6 @@ lemma ccorres_move_Guard: section "novcg" -lemma ccorres_to_vcg': - "\ ccorres_underlying srel \ rrel xf arrel axf P P' [] a c; \ snd (a \) \ \ - \\ {s. P \ \ s \ P' \ (\, s) \ srel} c - {s. \(rv, \')\fst (a \). (\', s) \ srel \ rrel rv (xf s)}" - apply (drule ccorres_to_vcg) - apply clarsimp - done - lemma exec_handlers_Hoare_UNIV: "guard_is_UNIV r xf Q \ exec_handlers_Hoare \ UNIV cs (ccHoarePost r xf Q) UNIV" @@ -1662,4 +1730,110 @@ lemma ccorres_grab_asm: ccorres_underlying sr G rr xf ar ax (P and K Q) P' hs f g" by (fastforce simp: ccorres_underlying_def) + +\ \ An experimental cong rule for rewriting everywhere reasonable, with full context. + Can cause problems when there are schematic variables or when one of the return relations + takes a pair as a parameter. \ +lemma ccorres_context_cong_helper': + assumes c: "ccorres_underlying sr \ r xf ar axf P Q hs a c" + assumes "\s. P s = P' s" + \ \Don't use membership equality when rewriting Q, as the LHS can be simplified into something + that is unable to unify with the RHS. + assumes "\s s'. \ (s,s') \ sr; P' s \ \ s' \ Q = (s' \ Q')"\ + assumes "\s s'. \ (s, s') \ sr; P' s \ \ Q = Q'" + assumes "hs = hs'" + assumes "\s s'. \ (s, s') \ sr; P' s; s' \ Q' \ \ a s = a' s" + assumes "\s s' s''. \ (s, s') \ sr; P' s; s' \ Q' \ \ semantic_equiv \ s' s'' c c'" + assumes "\s s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + xf t' = xf' t'" + assumes "\x s t s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); + \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + r x (xf' t') = r' x (xf' t')" + assumes "\s s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; n \ size hs'; \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + axf t' = axf' t'" + assumes "\x s t s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); n \ size hs'; + \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + ar x (axf' t') = ar' x (axf' t')" + shows "ccorres_underlying sr \ r' xf' ar' axf' P' Q' hs' a' c'" + using c + apply - + apply (rule ccorresI') + apply (erule (1) ccorresE) + apply (force simp: assms) + apply (force simp: assms) + apply (force simp: assms) + apply (clarsimp simp: assms) + apply (erule exec_handlers_semantic_equivD2) + apply (force simp: assms) + apply (fastforce simp: unif_rrel_def assms) + done + +lemma ccorres_context_cong_helper: + assumes "\s. P s = P' s" + assumes "\s s'. \ (s, s') \ sr; P' s \ \ Q = Q'" + assumes "hs = hs'" + assumes "\s s'. \ (s, s') \ sr; P' s; s' \ Q' \ \ a s = a' s" + assumes "\s s' s''. \ (s, s') \ sr; P' s; s' \ Q' \ \ semantic_equiv \ s' s'' c c'" + assumes "\s s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + xf t' = xf' t'" + assumes "\x s t s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); + \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + r x (xf' t') = r' x (xf' t')" + assumes "\s s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; n \ size hs'; \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + axf t' = axf' t'" + assumes "\x s t s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); n \ size hs'; + \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + ar x (axf' t') = ar' x (axf' t')" + shows "ccorres_underlying sr \ r xf ar axf P Q hs a c + = ccorres_underlying sr \ r' xf' ar' axf' P' Q' hs' a' c'" + using assms + apply - + apply rule + apply (erule ccorres_context_cong_helper'; assumption) + apply (erule ccorres_context_cong_helper') + by (fastforce simp: semantic_equiv_sym exec_handlers_semantic_equiv[where a=c and b=c'])+ + +lemmas ccorres_context_cong = ccorres_context_cong_helper[OF _ _ _ _ semantic_equivI] + +\ \ Only rewrite guards, the handler stack and function bodies, with context. + This is often more useful, as we generally want the return relations and extraction + functions to be stable while working with a ccorres_underlying statement. \ +lemma ccorres_context_weak_cong: + assumes "\s. P s = P' s" + assumes "\s s'. \ (s, s') \ sr; P' s \ \ Q = Q'" + assumes "\s s'. \ (s, s') \ sr; P' s; s' \ Q' \ \ a s = a' s" + assumes "\s s' s''. \ (s, s') \ sr; P' s; s' \ Q' \ \ \\ \c,Normal s'\ \ s'' = \\ \c',Normal s'\ \ s''" + shows "ccorres_underlying sr \ r xf ar axf P Q hs a c + = ccorres_underlying sr \ r xf ar axf P' Q' hs a' c'" + by (clarsimp simp: assms cong: ccorres_context_cong) + +\ \ Even more restrictive: only rewrite the abstract monad. \ +lemma ccorres_abstract_cong: + "\ \s s'. \ (s, s') \ sr; P s ; s' \ P' \ \ a s = b s \ \ + ccorres_underlying sr G r xf ar axf P P' hs a c + = ccorres_underlying sr G r xf ar axf P P' hs b c" + by (clarsimp cong: ccorres_context_weak_cong) + +\ \ Rewrite almost everywhere, without context. This should behave the same as with normal + term rewriting with no cong rule, except it will not rewrite the state relation or function + environment. \ +lemma ccorres_all_cong: + "\ r=r'; xf=xf'; ar=ar'; axf=axf'; P=P'; Q=Q'; hs=hs'; m=m'; c=c' \ \ + ccorres_underlying srel \ r xf ar axf P Q hs m c + = ccorres_underlying srel \ r' xf' ar' axf' P' Q' hs' m' c'" + by (simp cong: ccorres_context_cong) + +\ \ Only rewrite guards, the handler stack and function bodies, without context. + We make this the default behaviour, so that the the return relations and extraction + functions are stable under simplification. \ +lemmas ccorres_weak_cong = ccorres_all_cong[OF refl refl refl refl, cong] + end diff --git a/lib/clib/MonadicRewrite_C.thy b/lib/clib/MonadicRewrite_C.thy index c4de4cc0b8..81760b73ae 100644 --- a/lib/clib/MonadicRewrite_C.thy +++ b/lib/clib/MonadicRewrite_C.thy @@ -14,12 +14,12 @@ begin lemma monadic_rewrite_ccorres_assemble: assumes cc: "ccorres_underlying sr G r xf ar axf P P' hs f c" - assumes mr: "monadic_rewrite True False Q g f" + assumes mr: "monadic_rewrite F E Q g f" shows "ccorres_underlying sr G r xf ar axf (P and Q) P' hs g c" proof - have snd: "\s. \ Q s; \ snd (g s) \ \ \ snd (f s)" using mr - by (simp add: monadic_rewrite_def) + by (fastforce simp: monadic_rewrite_def) have fst: "\s v. \ Q s; \ snd (g s); v \ fst (f s) \ \ v \ fst (g s)" using mr diff --git a/lib/clib/SIMPL_Lemmas.thy b/lib/clib/SIMPL_Lemmas.thy index 6f4d592886..6cc9034016 100644 --- a/lib/clib/SIMPL_Lemmas.thy +++ b/lib/clib/SIMPL_Lemmas.thy @@ -72,6 +72,7 @@ lemma hoarep_Int: apply fastforce done +lemmas hoarep_Int_pre_fix = hoarep_Int[where P=P and P'=P for P, simplified] lemma Normal_result: "\ \ \c, s\ \ Normal t' \ \t. s = Normal t" @@ -350,5 +351,62 @@ lemma hoarep_revert: apply simp done +lemma intermediate_Normal_state: + "\\ \ \Seq c\<^sub>1 c\<^sub>2, Normal t\ \ t''; t \ P; \ \ P c\<^sub>1 Q\ + \ \t'. \ \ \c\<^sub>1, Normal t\ \ Normal t' \ \ \ \c\<^sub>2, Normal t'\ \ t''" + apply (erule exec_Normal_elim_cases(8)) + apply (insert hoarep_exec) + apply fastforce + done + +lemma hoarep_ex_pre: + "(\x. \ \ {s. P x s} c Q) \ \ \ {s. \x. P x s} c Q" + apply (rule hoare_complete) + apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def) + apply (fastforce dest: hoarep_exec'[rotated]) + done + +lemma hoarep_ex_lift: + "(\x. \ \ {s. P x s} c {s. Q x s}) \ \ \ {s. \x. P x s} c {s. \x. Q x s}" + apply (rule hoare_complete) + apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def) + apply (rename_tac s x) + apply (drule_tac x=x in meta_spec) + apply (prop_tac "s \ Collect (P x)") + apply fastforce + apply (frule (2) hoarep_exec) + apply fastforce + done + +lemma hoarep_conj_lift_pre_fix: + "\\ \ P c {s. Q s}; \ \ P c {s. Q' s}\ + \ \ \ P c {s. Q s \ Q' s}" + apply (rule hoare_complete) + apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def) + apply (frule (2) hoarep_exec[where Q="Collect Q"]) + apply (frule (2) hoarep_exec[where Q="Collect Q'"]) + apply fastforce + done + +lemma exec_While_final_inv'': + "\ \ \ \b, x\ \ s'; b = While C B; x = Normal s; + \s. s \ C \ I s (Normal s); + \t t' t''. \ t \ C; \\ \B, Normal t\ \ Normal t'; \\ \While C B, Normal t'\ \ t''; + I t' t'' \ \ I t t''; + \t t'. \ t \ C; \\ \B, Normal t\ \ Abrupt t' \ \ I t (Abrupt t'); + \t. \ t \ C; \ \ \B, Normal t\ \ Stuck \ \ I t Stuck; + \t f. \ t \ C; \\ \B, Normal t\ \ Fault f \ \ I t (Fault f) \ + \ I s s'" + apply (induct arbitrary: s rule: exec.induct; simp) + apply (erule exec_elim_cases; fastforce simp: exec.WhileTrue exec.WhileFalse) + done + +lemma While_inv_from_body: + "\ \ (G \ C) B G \ \ \ G While C B G" + apply (drule hoare_sound)+ + apply (rule hoare_complete) + apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def) + by (erule exec_While_final_inv''[where I="\s s'. s \ G \ s' \ Normal ` G", THEN impE], + fastforce+) end diff --git a/lib/clib/SimplRewrite.thy b/lib/clib/SimplRewrite.thy index 952c8a1116..92e0df1a90 100644 --- a/lib/clib/SimplRewrite.thy +++ b/lib/clib/SimplRewrite.thy @@ -246,7 +246,7 @@ lemma com_initial_guards_extra_simps[simp]: "com_initial_guards (cbreak exn_upd) = UNIV" "com_initial_guards (ccatchbrk exn) = UNIV" by (simp_all add: whileAnno_def creturn_def creturn_void_def - call_def block_def cbreak_def ccatchbrk_def) + call_def block_def block_exn_def cbreak_def ccatchbrk_def) lemmas com_initial_guards_all_simps = com_initial_guards.simps com_initial_guards_extra_simps @@ -274,7 +274,7 @@ lemma com_final_guards_extra_simps[simp]: "com_final_guards S (cbreak exn_upd) = UNIV" "com_final_guards S (ccatchbrk exn) = UNIV" by (simp_all add: whileAnno_def creturn_def creturn_void_def - call_def block_def cbreak_def ccatchbrk_def) + call_def block_def block_exn_def cbreak_def ccatchbrk_def) lemmas com_final_guards_all_simps = com_final_guards.simps com_final_guards_extra_simps @@ -501,7 +501,7 @@ lemma exec_statefn_simulates_call: \s t. f (ret1 s t) = ret2 (f s) (f t); \s t. exec_statefn_simulates f UNIV T (save1 s t) (save2 (f s) (f t)) \ \ exec_statefn_simulates f S T (call init1 c ret1 save1) (call init2 c ret2 save2)" - apply (simp add: call_def block_def) + apply (simp add: call_def block_def block_exn_def) apply (intro exec_statefn_simulates_Seq exec_statefn_simulates_Catch exec_statefn_simulates_DynCom exec_statefn_simulates_Basic exec_statefn_simulates_Call diff --git a/lib/clib/Simpl_Rewrite.thy b/lib/clib/Simpl_Rewrite.thy index 62d229f3a4..90ceb4cead 100644 --- a/lib/clib/Simpl_Rewrite.thy +++ b/lib/clib/Simpl_Rewrite.thy @@ -10,8 +10,8 @@ text \A simple proof method for rewriting Simpl programs under a predicate theory Simpl_Rewrite imports "Simpl-VCG.Vcg" - "Lib.Eisbach_Methods" - "Lib.Apply_Debug" + "Eisbach_Tools.Eisbach_Methods" + "Eisbach_Tools.Apply_Debug" begin text \One layer of context around a Simpl program. @@ -460,7 +460,7 @@ text \Methods to automate rewriting.\ method do_rewrite uses hom ruleset declares C_simp_simps = (rule com_ctxt_focus_rewrite[OF hom], rule ruleset, - #break "simpl_rewrite_rewrite", (simp add: C_simp_simps; fail))+ + #break "simpl_rewrite_rewrite", (not_visible \simp add: C_simp_simps\; fail))+ method rewrite_pre uses hom declares C_simp_pre C_simp_simps = (do_rewrite hom: hom ruleset: C_simp_pre) diff --git a/lib/concurrency/Atomicity_Lib.thy b/lib/concurrency/Atomicity_Lib.thy index 5ecdccbe07..7801d68bfe 100644 --- a/lib/concurrency/Atomicity_Lib.thy +++ b/lib/concurrency/Atomicity_Lib.thy @@ -4,17 +4,17 @@ * SPDX-License-Identifier: BSD-2-Clause *) theory Atomicity_Lib - -imports "Prefix_Refinement" - + imports + Prefix_Refinement + Monads.Trace_Det begin -text \This library introduces a number of proofs about the question of -atomicity refinement, particularly in combination with the existing -prefix refinement notion. It introduces an additional notion of refinement -which left-composes with prefix refinement and can be used to rearrange -operations around interference points. -\ +text \ + This library introduces a number of proofs about the question of + atomicity refinement, particularly in combination with the existing + prefix refinement notion. It introduces an additional notion of refinement + which left-composes with prefix refinement and can be used to rearrange + operations around interference points.\ abbreviation "interferences \ repeat interference" @@ -26,31 +26,25 @@ lemma triv_refinement_Await_env_steps: apply simp done -lemmas prefix_refinement_env_steps_Await - = prefix_refinement_triv_refinement_conc[OF - prefix_refinement_env_steps triv_refinement_Await_env_steps] +lemmas prefix_refinement_env_steps_Await = + prefix_refinement_triv_refinement_conc[OF prefix_refinement_env_steps triv_refinement_Await_env_steps] lemma pfx_refn_interferences: - " env_stable AR R sr iosr (\t. True) - \ prefix_refinement sr iosr iosr (\\) (\\) (\\) AR R interferences interferences" + "env_stable AR R sr iosr (\_. True) + \ prefix_refinement sr iosr iosr dc AR R \\ \\ interferences interferences" apply (rule prefix_refinement_repeat) apply (erule prefix_refinement_interference) - apply wp - apply simp - apply wp - apply simp + apply wp+ done lemma repeat_n_validI: - "\I\,\R\ f \G\,\\_. I\ - \ \I\,\R\ repeat_n n f \G\,\\_. I\" + "\I\,\R\ f \G\,\\_. I\ \ \I\,\R\ repeat_n n f \G\,\\_. I\" apply (induct n) apply wpsimp+ done lemma repeat_validI: - "\I\,\R\ f \G\,\\_. I\ - \ \I\,\R\ repeat f \G\,\\_. I\" + "\I\,\R\ f \G\,\\_. I\ \ \I\,\R\ repeat f \G\,\\_. I\" apply (simp add: repeat_def) apply (wpsimp wp: repeat_n_validI) done @@ -58,9 +52,9 @@ lemma repeat_validI: lemma interferences_twp[wp]: "\\s0 s. (\s'. R\<^sup>*\<^sup>* s s' \ Q () s' s') \ G s0 s \ reflp G \ Q () s0 s\,\R\ interferences \G\,\Q\" (is "\?P\,\R\ ?f \G\,\?Q\") - apply (rule validI_strengthen_post, rule repeat_validI) + apply (rule rg_strengthen_post, rule repeat_validI) apply wp - apply (clarsimp simp: reflpD[where r=G]) + apply (clarsimp simp: reflpD[where R=G]) apply (metis rtranclp_trans) apply simp done @@ -70,29 +64,27 @@ lemma repeat_pre_triv_refinement[simplified]: apply (simp add: repeat_def select_early) apply (rule triv_refinement_select_concrete_All; clarsimp) apply (rule_tac x="Suc x" in triv_refinement_select_abstract_x; simp) - apply (rule triv_refinement_refl) done lemma repeat_none_triv_refinement: "triv_refinement (repeat f) (return ())" apply (simp add: repeat_def) apply (rule_tac x="0" in triv_refinement_select_abstract_x; simp) - apply (rule triv_refinement_refl) done -lemmas repeat_triv_refinement_consume_1 - = triv_refinement_trans[OF triv_refinement_mono_bind(1), - OF repeat_pre_triv_refinement, simplified bind_assoc, - OF triv_refinement_mono_bind(2), simplified] +lemmas repeat_triv_refinement_consume_1 = + triv_refinement_trans[OF triv_refinement_mono_bind(1), + OF repeat_pre_triv_refinement, simplified bind_assoc, + OF triv_refinement_mono_bind(2), simplified] -lemmas repeat_one_triv_refinement - = repeat_triv_refinement_consume_1[where b=return and d=return, - simplified, OF repeat_none_triv_refinement] +lemmas repeat_one_triv_refinement = + repeat_triv_refinement_consume_1[where b=return and d=return, simplified, + OF repeat_none_triv_refinement] schematic_goal prefix_refinement_interferences_split: - "prefix_refinement sr isr osr rvr P Q AR R ?aprog cprog - \ prefix_refinement sr isr osr rvr P Q AR R - (do y <- interferences; aprog od) cprog" + "prefix_refinement sr isr osr rvr AR R P Q ?aprog cprog + \ prefix_refinement sr isr osr rvr AR R P Q + (do y <- interferences; aprog od) cprog" apply (rule prefix_refinement_triv_refinement_abs) apply (rule triv_refinement_mono_bind) apply (rule triv_refinement_trans) @@ -101,21 +93,18 @@ schematic_goal prefix_refinement_interferences_split: apply (simp add: bind_assoc) done -text \Suppressing interference points. The constant below discards -the self actions within a trace and filters out traces in which the -environment acts. This reduces both env_steps and interference to -noops. -\ +text \ + Suppressing interference points. The constant below discards + the self actions within a trace and filters out traces in which the + environment acts. This reduces both env_steps and interference to + noops.\ -definition - detrace :: "('s, 'a) tmonad \ ('s, 'a) tmonad" -where - "detrace f = (\s. (\(tr, res). ([], res)) - ` (f s \ ({tr. Env \ fst ` set tr} \ {res. res \ Incomplete})))" +definition detrace :: "('s, 'a) tmonad \ ('s, 'a) tmonad" where + "detrace f = + (\s. (\(tr, res). ([], res)) ` (f s \ ({tr. Env \ fst ` set tr} \ {res. res \ Incomplete})))" lemma detrace_UN: - "detrace (\s. \x \ S s. f x s) - = (\s. \x \ S s. detrace (f x) s)" + "detrace (\s. \x \ S s. f x s) = (\s. \x \ S s. detrace (f x) s)" apply (simp add: detrace_def) apply (rule ext; fastforce) done @@ -153,13 +142,13 @@ lemma detrace_select[simp]: by (rule ext, auto simp add: select_def detrace_def image_image) lemma detrace_put_trace_elem: - "detrace (put_trace_elem (tmid, s)) = (if tmid = Env - then (\_. {}) else return ())" + "detrace (put_trace_elem (tmid, s)) = + (if tmid = Env then (\_. {}) else return ())" by (simp add: put_trace_elem_def detrace_def return_def) lemma detrace_put_trace: - "detrace (put_trace xs) = (if Env \ fst ` set xs - then (\_. {}) else return ())" + "detrace (put_trace xs) = + (if Env \ fst ` set xs then (\_. {}) else return ())" apply (induct xs; simp) apply (clarsimp simp: detrace_bind detrace_put_trace_elem) apply (simp add: bind_def) @@ -186,7 +175,7 @@ lemma repeat_n_nothing: lemma repeat_nothing: "repeat (\_. {}) = return ()" by (simp add: repeat_def bind_def select_def repeat_n_nothing - Sigma_def if_fun_lift UN_If_distrib return_def + Sigma_def if_distribR UN_If_distrib return_def cong del: image_cong_simp) lemma detrace_env_steps: @@ -200,21 +189,18 @@ lemma detrace_interference: apply (simp add: bind_def get_def) done -text \Decomposition of environment and program actions by strict -separation, possibly relevant for ``recovering'' atomicity.\ +text \ + Decomposition of environment and program actions by strict separation, possibly relevant for + ``recovering'' atomicity.\ lemma equivp_compare_f: "equivp (\x y. f x = f y)" by (simp add: equivp_def fun_eq_iff, metis) -definition - fst_split_eq :: "('s \ ('e \ 'p)) \ ('s \ 's \ bool)" -where +definition fst_split_eq :: "('s \ ('e \ 'p)) \ ('s \ 's \ bool)" where "fst_split_eq f = (\s s'. fst (f s) = fst (f s'))" -definition - snd_split_eq :: "('s \ ('e \ 'p)) \ ('s \ 's \ bool)" -where +definition snd_split_eq :: "('s \ ('e \ 'p)) \ ('s \ 's \ bool)" where "snd_split_eq f = (\s s'. snd (f s) = snd (f s'))" lemma equivp_split_eqs: @@ -222,18 +208,17 @@ lemma equivp_split_eqs: "equivp (snd_split_eq f)" by (simp_all add: fst_split_eq_def snd_split_eq_def equivp_compare_f) -text \One way of defining the "diamond" pattern in which two state -changes commute. Depends on a way of splitting the state into domains, -in which state changes can be observed to impact only certain domains. -This can define a unique way of reordering operations that impact -disjoint sets of domains.\ +text \ + One way of defining the "diamond" pattern in which two state + changes commute. Depends on a way of splitting the state into domains, + in which state changes can be observed to impact only certain domains. + This can define a unique way of reordering operations that impact + disjoint sets of domains.\ type_synonym ('s, 'd) domain_split = "'s \ 'd \ 's" -definition - dom_s_match :: "('s, 'd) domain_split \ 'd set \ 's \ 's \ bool" -where +definition dom_s_match :: "('s, 'd) domain_split \ 'd set \ 's \ 's \ bool" where "dom_s_match ds D s s' = (\d \ D. ds s' d = ds s d)" lemma dom_s_match_refl: @@ -248,15 +233,12 @@ lemma dom_s_match_equivp: done lemma dom_s_match_mono: - "dom_s_match ds D s s' \ D' \ D - \ dom_s_match ds D' s s'" + "\dom_s_match ds D s s'; D' \ D\ \ dom_s_match ds D' s s'" by (auto simp add: dom_s_match_def) -definition - diamond :: "('s, 'd) domain_split \ 's \ 's \ 's \ 's \ bool" -where - "diamond ds s sa sb sab = (\d. (ds sab d = ds sa d \ ds sb d = ds s d) - \ (ds sab d = ds sb d \ ds sa d = ds s d))" +definition diamond :: "('s, 'd) domain_split \ 's \ 's \ 's \ 's \ bool" where + "diamond ds s sa sb sab = + (\d. (ds sab d = ds sa d \ ds sb d = ds s d) \ (ds sab d = ds sb d \ ds sa d = ds s d))" lemma diamond_flips: "diamond ds s sa sb sab \ diamond ds sb sab s sa" @@ -267,35 +249,28 @@ lemma diamond_diag_flip: "diamond ds s sa sb sab \ diamond ds s sb sa sab" by (simp add: diamond_def, metis) -definition - domains_complete :: "('s, 'd) domain_split \ bool" -where +definition domains_complete :: "('s, 'd) domain_split \ bool" where "domains_complete ds = (\s s'. (\d. ds s d = ds s' d) \ s = s')" lemmas domains_completeD = domains_complete_def[THEN iffD1, rule_format] lemma diamond_unique: - "domains_complete ds \ diamond ds s sa sb sab - \ diamond ds s sa sb sab' \ sab = sab'" + "\domains_complete ds; diamond ds s sa sb sab; diamond ds s sa sb sab'\ \ sab = sab'" apply (erule domains_completeD) apply (simp add: diamond_def) apply metis done lemma diamond_uniques_other: - "domains_complete ds \ diamond ds s sa sb sab - \ diamond ds s sa sb' sab \ sb = sb'" - "domains_complete ds \ diamond ds s sa sb sab - \ diamond ds s sa' sb sab \ sa = sa'" - "domains_complete ds \ diamond ds s sa sb sab - \ diamond ds s' sa sb sab \ s = s'" + "\domains_complete ds; diamond ds s sa sb sab; diamond ds s sa sb' sab\ \ sb = sb'" + "\domains_complete ds; diamond ds s sa sb sab; diamond ds s sa' sb sab\ \ sa = sa'" + "\domains_complete ds; diamond ds s sa sb sab; diamond ds s' sa sb sab\ \ s = s'" by (metis diamond_unique diamond_flips)+ lemmas diamond_uniques = diamond_unique diamond_uniques_other lemma dom_s_match_diamond: - "dom_s_match ds D s sa \ diamond ds s sa sb sab - \ dom_s_match ds D sb sab" + "\dom_s_match ds D s sa; diamond ds s sa sb sab\ \ dom_s_match ds D sb sab" apply (simp add: dom_s_match_def diamond_def) apply metis done @@ -309,28 +284,26 @@ lemma diamond_trans_eq: by (simp add: fun_eq_iff, metis diamond_trans diamond_flips) text \ -A notion of refinement by traces related under a state relation. Simpler -than @{term prefix_refinement}, and left-composes with -@{term prefix_refinement}. - -We'll use this notion to show how the concrete side of a @{term prefix_refinement} -hypothesis can be reordered to better match its specification, in particular -how interference points can be moved. -\ - -definition - rel_tr_refinement :: "('s \ 's \ bool) \ ('s \ bool) \ 's rg_pred - \ bool \ ('s, 'a) tmonad \ ('s, 'a) tmonad \ bool" -where - "rel_tr_refinement sr P R commit f g = (\tr res s s0. P s - \ (tr, res) \ f s \ rely_cond R s0 tr \ (commit \ s0 = s) - \ (\tr'. (tr', res) \ g s \ rely_cond R s0 tr' - \ list_all2 (rel_prod (=) sr) tr tr'))" + A notion of refinement by traces related under a state relation. Simpler + than @{term prefix_refinement}, and left-composes with + @{term prefix_refinement}. + + We'll use this notion to show how the concrete side of a @{term prefix_refinement} + hypothesis can be reordered to better match its specification, in particular + how interference points can be moved.\ + +definition rel_tr_refinement :: + "('s \ 's \ bool) \ ('s \ bool) \ 's rg_pred \ bool \ ('s, 'a) tmonad \ ('s, 'a) tmonad \ bool" + where + "rel_tr_refinement sr P R commit f g = + (\tr res s s0. P s + \ (tr, res) \ f s \ rely_cond R s0 tr \ (commit \ s0 = s) + \ (\tr'. (tr', res) \ g s \ rely_cond R s0 tr' + \ list_all2 (rel_prod (=) sr) tr tr'))" lemma rely_cond_equiv_s: - "rely_cond R s0 tr - \ (\s. tr \ [] \ last tr = (Env, s) \ R s0 s \ R s0' s) - \ rely_cond R s0' tr" + "\rely_cond R s0 tr; \s. tr \ [] \ last tr = (Env, s) \ R s0 s \ R s0' s\ + \ rely_cond R s0' tr" apply (cases tr rule: rev_cases) apply simp apply (clarsimp simp: rely_cond_append rely_cond_def[where tr="Cons x xs" for x xs]) @@ -339,23 +312,19 @@ lemma rely_cond_equiv_s: lemmas rel_tr_refinementD = rel_tr_refinement_def[THEN iffD1, rule_format] lemma rel_tr_refinement_refl: - "reflp sr - \ rel_tr_refinement sr P R C f f" + "reflp sr \ rel_tr_refinement sr P R C f f" apply (clarsimp simp: rel_tr_refinement_def) apply (intro exI, rule conjI, assumption) apply (simp add: list_all2_same rel_prod_sel reflpD) done lemma rel_tr_refinement_drop_C: - "rel_tr_refinement sr P R False f g - \ rel_tr_refinement sr P R C f g" + "rel_tr_refinement sr P R False f g \ rel_tr_refinement sr P R C f g" by (clarsimp simp: rel_tr_refinement_def) lemma rel_tr_refinement_trans: - "transp sr - \ rel_tr_refinement sr P R C f g - \ rel_tr_refinement sr P R C g h - \ rel_tr_refinement sr P R C f h" + "\transp sr; rel_tr_refinement sr P R C f g; rel_tr_refinement sr P R C g h\ + \ rel_tr_refinement sr P R C f h" apply (subst rel_tr_refinement_def, clarsimp) apply (drule(3) rel_tr_refinementD, clarsimp+) apply (drule(3) rel_tr_refinementD, clarsimp+) @@ -367,7 +336,7 @@ lemma rel_tr_refinement_trans: lemma list_all2_matching_tr_pfx: "list_all2 (rel_prod (=) (\cs cs'. \as. sr as cs = sr as cs')) tr tr' - \ matching_tr_pfx sr atr tr = matching_tr_pfx sr atr tr'" + \ matching_tr_pfx sr atr tr = matching_tr_pfx sr atr tr'" apply (simp add: matching_tr_pfx_def list_all2_lengthD matching_tr_def) apply (intro conj_cong; simp?) apply (clarsimp simp: list_all2_conv_all_nth rel_prod_sel split_def) @@ -375,19 +344,19 @@ lemma list_all2_matching_tr_pfx: done lemma is_matching_fragment_list_all2: - "is_matching_fragment sr osr rvr tr' res s0 R s f - \ list_all2 (rel_prod (=) (\cs cs'. \as. sr as cs = sr as cs')) tr tr' - \ is_matching_fragment sr osr rvr tr res s0 R s f" + "\is_matching_fragment sr osr rvr tr' res s0 R s f; + list_all2 (rel_prod (=) (\cs cs'. \as. sr as cs = sr as cs')) tr tr'\ + \ is_matching_fragment sr osr rvr tr res s0 R s f" apply (clarsimp simp: is_matching_fragment_def) apply (subst(asm) list_all2_is_me[symmetric], assumption, simp) apply (simp add: list_all2_matching_tr_pfx list_all2_lengthD) done lemma pfx_refinement_use_rel_tr_refinement: - "rel_tr_refinement tr_r Q R False g g' - \ \s t t'. tr_r t t' \ sr s t = sr s t' - \ prefix_refinement sr isr osr rvr P Q' AR R f g' - \ prefix_refinement sr isr osr rvr P (\s0. Q and Q' s0) AR R f g" + "\rel_tr_refinement tr_r Q R False g g'; + \s t t'. tr_r t t' \ sr s t = sr s t'; + prefix_refinement sr isr osr rvr AR R P Q' f g'\ + \ prefix_refinement sr isr osr rvr AR R P (\s0. Q and Q' s0) f g" apply (subst prefix_refinement_def, clarsimp) apply (drule(3) rel_tr_refinementD, simp) apply clarsimp @@ -400,25 +369,21 @@ lemma pfx_refinement_use_rel_tr_refinement: done lemma pfx_refinement_use_rel_tr_refinement_equivp: - "rel_tr_refinement sr Q R False g g' - \ equivp sr - \ prefix_refinement sr isr osr rvr P Q' AR R f g' - \ prefix_refinement sr isr osr rvr P (\s0. Q and Q' s0) AR R f g" + "\rel_tr_refinement sr Q R False g g'; equivp sr; + prefix_refinement sr isr osr rvr AR R P Q' f g'\ + \ prefix_refinement sr isr osr rvr AR R P (\s0. Q and Q' s0) f g" apply (erule pfx_refinement_use_rel_tr_refinement, simp_all) apply (metis equivpE sympD transpD) done -definition - not_env_steps_first :: "('s, 'a) tmonad \ bool" -where +definition not_env_steps_first :: "('s, 'a) tmonad \ bool" where "not_env_steps_first f = (\tr res s. (tr, res) \ f s \ tr \ [] \ fst (last tr) = Me)" lemmas not_env_steps_firstD = not_env_steps_first_def[THEN iffD1, rule_format] lemma not_env_steps_first_bind: - "not_env_steps_first f - \ \x. not_env_steps_first (g x) - \ not_env_steps_first (do x \ f; g x od)" + "\not_env_steps_first f; \x. not_env_steps_first (g x)\ + \ not_env_steps_first (do x \ f; g x od)" apply (subst not_env_steps_first_def, clarsimp) apply (erule elem_bindE) apply (simp add: not_env_steps_firstD) @@ -428,7 +393,7 @@ lemma not_env_steps_first_bind: lemma not_env_steps_first_no_trace: "no_trace f \ not_env_steps_first f" - by (fastforce simp add: not_env_steps_first_def dest: no_trace_emp) + by (fastforce simp: not_env_steps_first_def dest: no_trace_emp) lemma not_env_steps_first_interference: "not_env_steps_first interference" @@ -437,8 +402,7 @@ lemma not_env_steps_first_interference: apply (clarsimp simp: not_env_steps_first_def) done -lemmas not_env_steps_first_simple - = no_trace_all[THEN not_env_steps_first_no_trace] +lemmas not_env_steps_first_simple = no_trace_terminal[THEN not_env_steps_first_no_trace] lemma not_env_steps_first_repeat_n: "not_env_steps_first f \ not_env_steps_first (repeat_n n f)" @@ -446,18 +410,17 @@ lemma not_env_steps_first_repeat_n: lemma not_env_steps_first_repeat: "not_env_steps_first f \ not_env_steps_first (repeat f)" - by (simp add: repeat_def not_env_steps_first_bind - not_env_steps_first_repeat_n not_env_steps_first_simple) + by (simp add: repeat_def not_env_steps_first_bind not_env_steps_first_repeat_n + not_env_steps_first_simple) -lemmas not_env_steps_first_all = not_env_steps_first_interference - not_env_steps_first_bind[rule_format] not_env_steps_first_repeat_n - not_env_steps_first_repeat not_env_steps_first_simple +lemmas not_env_steps_first_all = + not_env_steps_first_interference not_env_steps_first_bind[rule_format] + not_env_steps_first_repeat_n not_env_steps_first_repeat not_env_steps_first_simple lemma rel_tr_refinement_bind_left_general: - "reflp sr - \ (\x. not_env_steps_first (h x)) \ (\s s' t. sr s s' \ R s t = R s' t) - \ rel_tr_refinement sr P R C f g - \ rel_tr_refinement sr P R C (f >>= (\x. h x)) (g >>= h)" + "\reflp sr; (\x. not_env_steps_first (h x)) \ (\s s' t. sr s s' \ R s t = R s' t); + rel_tr_refinement sr P R C f g\ + \ rel_tr_refinement sr P R C (f >>= (\x. h x)) (g >>= h)" apply (subst rel_tr_refinement_def, clarsimp) apply (erule elem_bindE) apply (drule(3) rel_tr_refinementD, simp) @@ -470,8 +433,7 @@ lemma rel_tr_refinement_bind_left_general: apply (simp add: image_def) apply (strengthen bexI[mk_strg I _ E] | simp)+ apply (simp add: list_all2_append rely_cond_append - list_all2_same reflpD[where r=sr] rel_prod_sel - split del: if_split) + list_all2_same reflpD[where R=sr] rel_prod_sel) apply (erule rely_cond_equiv_s) apply (erule disjE) apply (drule spec, drule(2) not_env_steps_firstD) @@ -480,45 +442,39 @@ lemma rel_tr_refinement_bind_left_general: split: if_split_asm) done -lemmas rel_tr_refinement_bind_left - = rel_tr_refinement_bind_left_general[OF _ disjI1] +lemmas rel_tr_refinement_bind_left = rel_tr_refinement_bind_left_general[OF _ disjI1] lemma rel_tr_refinement_bind_right_general: - "reflp sr - \ \x. rel_tr_refinement sr Q R C' (g x) (h x) - \ \\s0 s. (C \ s0 = s) \ P s\,\R\ f - \\_ _. True\,\\_ s0 s. (C' \ s0 = s) \ Q s\ - \ rel_tr_refinement sr P R C (f >>= (\x. g x)) (f >>= h)" + "\reflp sr; \x. rel_tr_refinement sr Q R C' (g x) (h x); + \\s0 s. (C \ s0 = s) \ P s\,\R\ f \\_ _. True\,\\_ s0 s. (C' \ s0 = s) \ Q s\\ + \ rel_tr_refinement sr P R C (f >>= (\x. g x)) (f >>= h)" apply (subst rel_tr_refinement_def, clarsimp) apply (erule elem_bindE) apply (clarsimp simp: bind_def) apply (strengthen bexI[mk_strg I _ E], simp) - apply (auto simp: list_all2_same reflpD[where r=sr])[1] + apply (auto simp: list_all2_same reflpD[where R=sr])[1] apply (clarsimp simp: rely_cond_append) apply (drule validI_D, erule(1) conjI, assumption+, clarsimp) apply (drule spec, drule(3) rel_tr_refinementD, - simp add: hd_append hd_map split: if_split_asm) + simp add: hd_append hd_map split: if_split_asm) apply (clarsimp simp: bind_def) apply (simp add: image_def) apply (strengthen bexI[mk_strg I _ E] | simp)+ apply (simp add: list_all2_append list_all2_lengthD) - apply (simp add: rely_cond_append list_all2_same reflpD[where r=sr] rel_prod_sel - split del: if_split) + apply (simp add: rely_cond_append list_all2_same reflpD[where R=sr] rel_prod_sel) done -lemmas validI_triv' = validI_weaken_pre[OF validI_triv, simplified] -lemmas rel_tr_refinement_bind_right - = rel_tr_refinement_bind_right_general[where C'=False, simplified] +lemmas rel_tr_refinement_bind_right = + rel_tr_refinement_bind_right_general[where C'=False, simplified] lemma rel_tr_refinement_comm_repeat_n[simplified K_bind_def]: - "equivp sr - \ rel_tr_refinement sr P R C (do f; g od) (do x \ g; f; return x od) - \ not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t) - \ \\s0 s. (C \ s0 = s) \ P s\,\R\ f - \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\ - \ rel_tr_refinement sr P R C - (do repeat_n n f; g od) - (do x \ g; repeat_n n f; return x od)" + "\equivp sr; + rel_tr_refinement sr P R C (do f; g od) (do x \ g; f; return x od); + not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t); + \\s0 s. (C \ s0 = s) \ P s\,\R\ f \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\\ + \ rel_tr_refinement sr P R C + (do repeat_n n f; g od) + (do x \ g; repeat_n n f; return x od)" apply (induct n) apply simp apply (rule rel_tr_refinement_refl) @@ -529,40 +485,37 @@ lemma rel_tr_refinement_comm_repeat_n[simplified K_bind_def]: apply (rule rel_tr_refinement_bind_right_general[rule_format]) apply (metis equivpE) apply assumption - apply (rule validI_weaken_pre, wp repeat_n_validI) - apply simp + apply (wpsimp wp: repeat_n_validI) apply (drule_tac h="\x. do f; return x od" - in rel_tr_refinement_bind_left_general[rotated 2]) + in rel_tr_refinement_bind_left_general[rotated 2]) apply (metis equivpE) apply (auto intro!: not_env_steps_first_all)[1] apply (simp add: bind_assoc) done lemma rel_tr_refinement_comm_repeat[simplified K_bind_def]: - "equivp sr - \ rel_tr_refinement sr P R C (do f; g od) (do x \ g; f; return x od) - \ not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t) - \ \\s0 s. (C \ s0 = s) \ P s\,\R\ f - \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\ - \ rel_tr_refinement sr P R C - (do repeat f; g od) - (do x \ g; repeat f; return x od)" + "\equivp sr; + rel_tr_refinement sr P R C (do f; g od) (do x \ g; f; return x od); + not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t); + \\s0 s. (C \ s0 = s) \ P s\,\R\ f \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\\ + \ rel_tr_refinement sr P R C + (do repeat f; g od) + (do x \ g; repeat f; return x od)" apply (simp add: repeat_def select_early bind_assoc) apply (rule rel_tr_refinement_bind_right_general[rule_format]) apply (metis equivpE) apply (erule(1) rel_tr_refinement_comm_repeat_n, simp+) - apply (rule validI_weaken_pre, wp, simp) + apply wpsimp done lemma rel_tr_refinement_rev_comm_repeat_n[simplified K_bind_def]: - "equivp sr - \ rel_tr_refinement sr P R C (do x \ g; f; return x od) (do f; g od) - \ not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t) - \ \\s0 s. (C \ s0 = s) \ P s\,\R\ f - \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\ - \ rel_tr_refinement sr P R C - (do x \ g; repeat_n n f; return x od) - (do repeat_n n f; g od)" + "\equivp sr; + rel_tr_refinement sr P R C (do x \ g; f; return x od) (do f; g od); + not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t); + \\s0 s. (C \ s0 = s) \ P s\,\R\ f \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\\ + \ rel_tr_refinement sr P R C + (do x \ g; repeat_n n f; return x od) + (do repeat_n n f; g od)" apply (induct n) apply simp apply (rule rel_tr_refinement_refl) @@ -574,29 +527,27 @@ lemma rel_tr_refinement_rev_comm_repeat_n[simplified K_bind_def]: apply (rule rel_tr_refinement_bind_right_general[rule_format]) apply (metis equivpE) apply assumption - apply (rule validI_weaken_pre, wp repeat_n_validI) - apply simp + apply (wpsimp wp: repeat_n_validI) apply (drule_tac h="\x. do f; return x od" - in rel_tr_refinement_bind_left_general[rotated 2]) + in rel_tr_refinement_bind_left_general[rotated 2]) apply (metis equivpE) apply (auto intro!: not_env_steps_first_all)[1] apply (simp add: bind_assoc) done lemma rel_tr_refinement_rev_comm_repeat[simplified K_bind_def]: - "equivp sr - \ rel_tr_refinement sr P R C (do x \ g; f; return x od) (do f; g od) - \ not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t) - \ \\s0 s. (C \ s0 = s) \ P s\,\R\ f - \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\ - \ rel_tr_refinement sr P R C - (do x \ g; repeat f; return x od) - (do repeat f; g od)" + "\equivp sr; + rel_tr_refinement sr P R C (do x \ g; f; return x od) (do f; g od); + not_env_steps_first f \ (\s s' t. sr s s' \ R s t = R s' t); + \\s0 s. (C \ s0 = s) \ P s\,\R\ f \\_ _. True\,\\_ s0 s. (C \ s0 = s) \ P s\\ + \ rel_tr_refinement sr P R C + (do x \ g; repeat f; return x od) + (do repeat f; g od)" apply (simp add: repeat_def select_early bind_assoc) apply (rule rel_tr_refinement_bind_right_general[rule_format]) apply (metis equivpE) apply (erule(1) rel_tr_refinement_rev_comm_repeat_n, simp+) - apply (rule validI_weaken_pre, wp, simp) + apply wpsimp done lemma alternative_distrib_lhs_bind: @@ -604,16 +555,18 @@ lemma alternative_distrib_lhs_bind: by (simp add: bind_def alternative_def) lemma shuttle_modify_commit_step[simplified K_bind_def]: - "\s. sr s (f s) \ rel_tr_refinement sr P R C - (do x \ commit_step; modify f od) (do x \ modify f; commit_step od)" + "\s. sr s (f s) + \ rel_tr_refinement sr P R C + (do x \ commit_step; modify f od) (do x \ modify f; commit_step od)" apply (simp add: rel_tr_refinement_def commit_step_def put_trace_elem_def bind_def get_def return_def modify_def put_def) apply (simp add: rely_cond_def) done lemma shuttle_gets_commit_step[simplified K_bind_def]: - "reflp sr \ rel_tr_refinement sr P R C - (do x \ commit_step; gets f od) (do x \ gets f; commit_step; return x od)" + "reflp sr + \ rel_tr_refinement sr P R C + (do x \ commit_step; gets f od) (do x \ gets f; commit_step; return x od)" apply (simp add: rel_tr_refinement_def commit_step_def put_trace_elem_def bind_def get_def return_def gets_def) apply (simp add: rely_cond_def reflpD) @@ -624,9 +577,9 @@ lemma shuttle_modify_interference[simplified K_bind_def]: and P_stable: "\s t. P s \ R s t \ P t" and R: "\s0 s. P s0 \ R s0 s \ R (f s0) (f s)" shows - "rel_tr_refinement sr P R C - (do interference; modify f od) - (do modify f; interference od)" + "rel_tr_refinement sr P R C + (do interference; modify f od) + (do modify f; interference od)" proof - have list_all2_map: "\xs. list_all2 (rel_prod (=) sr) xs (map (apsnd f) xs)" @@ -647,8 +600,7 @@ proof - apply (clarsimp simp: rel_tr_refinement_def) apply (clarsimp simp: bind_def commit_step_def get_def return_def put_trace_elem_def modify_def put_def - interference_def env_steps_def select_def - ) + interference_def env_steps_def select_def) apply (erule disjE; clarsimp) apply (simp add: put_trace_eq_drop) apply (clarsimp; split if_split_asm) @@ -681,24 +633,23 @@ lemma rshuttle_modify_interference[simplified K_bind_def]: and P_stable: "\s t. P s \ R s t \ P t" and R: "\s0 s. R (f s0) s \ P s0 \ (\s_pre. s = f s_pre \ R s0 s_pre)" shows - "rel_tr_refinement sr P R C - (do modify f; interference od) - (do interference; modify f od)" + "rel_tr_refinement sr P R C + (do modify f; interference od) + (do interference; modify f od)" proof - - have last_st_tr: "\s ss. last_st_tr (map (Pair Env \ f) ss) (f s) - = f (last_st_tr (map (Pair Env) ss) s)" + have last_st_tr: + "\s ss. last_st_tr (map (Pair Env \ f) ss) (f s) = f (last_st_tr (map (Pair Env) ss) s)" by (simp add: last_st_tr_def hd_append hd_map) { fix s ss s' - have rely_cond_P_stable[rule_format]: - "P s \ rely_cond R s (map (Pair Env) ss) - \ s' \ set (ss @ [s]) \ P s'" - apply (induct ss arbitrary: s' rule: list.induct) - apply simp - apply clarsimp - apply (clarsimp simp: rely_cond_Cons_eq P_stable) - apply (erule P_stable[rule_format, rotated]) - apply (case_tac x2; simp add: last_st_tr_in_set) - done + have rely_cond_P_stable[rule_format]: + "P s \ rely_cond R s (map (Pair Env) ss) \ s' \ set (ss @ [s]) \ P s'" + apply (induct ss arbitrary: s' rule: list.induct) + apply simp + apply clarsimp + apply (clarsimp simp: rely_cond_Cons_eq P_stable) + apply (erule P_stable[rule_format, rotated]) + apply (case_tac x2; simp add: last_st_tr_in_set) + done } note rely_cond_P_stable = this have rely_cond_ex: "\s ss. rely_cond R (f s) (map (Pair Env) ss) \ P s @@ -715,8 +666,7 @@ proof - apply (clarsimp simp: rel_tr_refinement_def) apply (clarsimp simp: bind_def commit_step_def get_def return_def put_trace_elem_def modify_def put_def - interference_def env_steps_def select_def - ) + interference_def env_steps_def select_def) apply (erule disjE; clarsimp) apply (clarsimp simp: put_trace_eq_drop) apply (split if_split_asm) @@ -744,30 +694,26 @@ proof - qed lemma shuttle_gets_env_step[simplified K_bind_def]: - "reflp sr \ \s t. P s \ R s t \ f s = f t - \ rel_tr_refinement sr P R True - (do x \ env_step; gets f od) (do x \ gets f; env_step; return x od)" + "\reflp sr; \s t. P s \ R s t \ f s = f t\ + \ rel_tr_refinement sr P R True + (do x \ env_step; gets f od) (do x \ gets f; env_step; return x od)" apply (simp add: rel_tr_refinement_def env_step_def select_def put_trace_elem_def bind_def get_def return_def gets_def put_def) apply (clarsimp simp: rely_cond_def reflpD) done -lemmas prefix_closed_interference[simp] = interference_twp[THEN validI_prefix_closed] - lemma env_step_twp[wp]: "\\s0 s. (\s'. R s0 s' \ Q () s' s')\,\R\ env_step \G\,\Q\" apply (simp add: env_step_def) - apply (rule validI_weaken_pre) - apply (wp put_trace_elem_twp) + apply (wp put_trace_elem_twp) apply (clarsimp simp: rely_cond_def drop_Cons' guar_cond_def) done lemma shuttle_modify_interferences[simplified K_bind_def]: - "equivp sr \ \s. sr s (f s) \ \s t. P s \ R s t \ R (f s) (f t) - \ not_env_steps_first g - \ \s t. P s \ R\<^sup>*\<^sup>* s t \ P t - \ rel_tr_refinement sr P R C - (do x \ interferences; modify f; g od) (do x \ modify f; interferences; g od)" + "\equivp sr; \s. sr s (f s); \s t. P s \ R s t \ R (f s) (f t); + not_env_steps_first g; \s t. P s \ R\<^sup>*\<^sup>* s t \ P t\ + \ rel_tr_refinement sr P R C + (do x \ interferences; modify f; g od) (do x \ modify f; interferences; g od)" apply (simp only: bind_assoc[symmetric]) apply (rule rel_tr_refinement_bind_left_general) apply (metis equivpE) @@ -777,20 +723,19 @@ lemma shuttle_modify_interferences[simplified K_bind_def]: apply assumption apply (rule shuttle_modify_interference, (simp add: r_into_rtranclp)+) apply (simp add: not_env_steps_first_interference) - apply (rule validI_weaken_pre, wp, simp) + apply wpsimp done -lemmas shuttle_modify_interferences_flat - = shuttle_modify_interferences[where g="return ()", simplified] +lemmas shuttle_modify_interferences_flat = + shuttle_modify_interferences[where g="return ()", simplified] lemma rshuttle_modify_interferences[simplified K_bind_def]: - "equivp sr \ \s. sr (f s) s - \ \s0 s. R (f s0) s \ P s0 \ (\s_pre. s = f s_pre \ R s0 s_pre) - \ not_env_steps_first g - \ \s t. P s \ R\<^sup>*\<^sup>* s t \ P t - \ rel_tr_refinement sr P R C - (do x \ modify f; interferences; g od) - (do x \ interferences; modify f; g od)" + "\equivp sr; \s. sr (f s) s; + \s0 s. R (f s0) s \ P s0 \ (\s_pre. s = f s_pre \ R s0 s_pre); + not_env_steps_first g; \s t. P s \ R\<^sup>*\<^sup>* s t \ P t\ + \ rel_tr_refinement sr P R C + (do x \ modify f; interferences; g od) + (do x \ interferences; modify f; g od)" apply (simp only: bind_assoc[symmetric]) apply (rule rel_tr_refinement_bind_left_general) apply (metis equivpE) @@ -800,16 +745,14 @@ lemma rshuttle_modify_interferences[simplified K_bind_def]: apply assumption apply (rule rshuttle_modify_interference, (simp add: r_into_rtranclp)+) apply (simp add: not_env_steps_first_interference) - apply (rule validI_weaken_pre, wp) - apply simp + apply wpsimp done lemma shuttle_gets_interference[simplified K_bind_def]: - "equivp sr \ \s t. P s \ R s t \ f s = f t - \ (\s s' t. sr s s' \ R s t = R s' t) - \ \s t. P s \ R\<^sup>*\<^sup>* s t \ P t - \ rel_tr_refinement sr P R C - (do x \ interference; gets f od) (do x \ gets f; interference; return x od)" + "\equivp sr; \s t. P s \ R s t \ f s = f t; + \s s' t. sr s s' \ R s t = R s' t; \s t. P s \ R\<^sup>*\<^sup>* s t \ P t\ + \ rel_tr_refinement sr P R C + (do x \ interference; gets f od) (do x \ gets f; interference; return x od)" apply (simp add: interference_def bind_assoc env_steps_repeat) apply (rule rel_tr_refinement_trans) apply (metis equivpE) @@ -821,23 +764,22 @@ lemma shuttle_gets_interference[simplified K_bind_def]: apply (metis equivpE) apply simp apply simp - apply (rule validI_weaken_pre, wp) + apply wpsimp apply (clarsimp simp: r_into_rtranclp) - apply (simp add: commit_step_def, rule validI_weaken_pre, wp put_trace_elem_twp) + apply (simp add: commit_step_def, wp put_trace_elem_twp) apply (simp add: drop_Cons' guar_cond_def) - apply (rule shuttle_gets_commit_step[THEN - rel_tr_refinement_bind_left_general[rotated -1], simplified bind_assoc return_bind]) + apply (rule shuttle_gets_commit_step[THEN rel_tr_refinement_bind_left_general[rotated -1], + simplified bind_assoc return_bind]) apply (metis equivpE) apply (metis equivpE) apply simp done lemma shuttle_gets_interferences[simplified K_bind_def]: - "equivp sr \ \s t. P s \ R s t \ f s = f t - \ (\s s' t. sr s s' \ R s t = R s' t) - \ \s t. P s \ R\<^sup>*\<^sup>* s t \ P t - \ rel_tr_refinement sr P R C - (do interferences; x \ gets f; g x od) (do x \ gets f; interferences; g x od)" + "\equivp sr; \s t. P s \ R s t \ f s = f t; + \s s' t. sr s s' \ R s t = R s' t; \s t. P s \ R\<^sup>*\<^sup>* s t \ P t\ + \ rel_tr_refinement sr P R C + (do interferences; x \ gets f; g x od) (do x \ gets f; interferences; g x od)" apply (rule rel_tr_refinement_trans) apply (metis equivpE) apply (simp only: bind_assoc[symmetric] K_bind_def) @@ -846,40 +788,31 @@ lemma shuttle_gets_interferences[simplified K_bind_def]: apply simp apply (rule rel_tr_refinement_comm_repeat, assumption) apply (rule shuttle_gets_interference; simp) - apply simp - apply (rule validI_weaken_pre, wp, simp) + apply simp apply wpsimp apply (simp add: bind_assoc) apply (rule rel_tr_refinement_refl) apply (metis equivpE) done -lemmas shuttle_gets_interferences_flat - = shuttle_gets_interferences[where g = return, simplified] +lemmas shuttle_gets_interferences_flat = shuttle_gets_interferences[where g = return, simplified] -definition - adjust_tr_relation :: "('t \ 't \ bool) \ ('s \ 't \ bool) \ bool" -where +definition adjust_tr_relation :: "('t \ 't \ bool) \ ('s \ 't \ bool) \ bool" where "adjust_tr_relation tr_r sr = (equivp tr_r \ (\s t t'. tr_r t t' \ sr s t = sr s t'))" lemma adjust_tr_relation_equivp: - "equivp sr - \ adjust_tr_relation sr sr" + "equivp sr \ adjust_tr_relation sr sr" apply (simp add: adjust_tr_relation_def) apply (metis equivpE sympD transpD) done lemma prefix_refinement_i_modify_split: - "adjust_tr_relation tr_r sr - \ \s t. isr s t \ P s \ Q t \ intsr (f s) (g t) - \ \s. tr_r s (g s) - \ \s t. R s t \ R (g s) (g t) - \ not_env_steps_first b - \ prefix_refinement sr intsr osr rvr' P' Q' AR R - d (do x \ interferences; b od) - \ prefix_refinement sr isr osr rvr' (\s0 s. P s \ P' s0 (f s)) (\s0 s. Q s \ Q' s0 (g s)) AR R - (do z \ modify f; d od) - (do x \ interferences; y \ modify g; b od)" + "\adjust_tr_relation tr_r sr; \s t. \isr s t; P s; Q t\ \ intsr (f s) (g t); + \s. tr_r s (g s); \s t. R s t \ R (g s) (g t); not_env_steps_first b; + prefix_refinement sr intsr osr rvr' AR R P' Q' d (do x \ interferences; b od)\ + \ prefix_refinement sr isr osr rvr' AR R (\s0 s. P s \ P' s0 (f s)) (\s0 s. Q s \ Q' s0 (g s)) + (do z \ modify f; d od) + (do x \ interferences; y \ modify g; b od)" apply (clarsimp simp: adjust_tr_relation_def) apply (rule prefix_refinement_weaken_pre) apply (rule pfx_refinement_use_rel_tr_refinement[where tr_r=tr_r and Q=\]) diff --git a/lib/concurrency/Prefix_Refinement.thy b/lib/concurrency/Prefix_Refinement.thy index 7e74faf508..8cf0221198 100644 --- a/lib/concurrency/Prefix_Refinement.thy +++ b/lib/concurrency/Prefix_Refinement.thy @@ -1,4 +1,5 @@ (* + * Copyright 2024, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -6,98 +7,84 @@ theory Prefix_Refinement imports + Monads.Trace_Empty_Fail Triv_Refinement - "Lib.TraceMonadLemmas" - + Monads.Trace_Reader_Option + Monads.Trace_Sat begin -section \Definition of prefix fragment refinement.\ +section \Definition of prefix fragment refinement\ text \ -This is a notion of refinement/simulation making use of prefix closure. -For a concrete program to refine an abstract program, then for every -trace of the concrete program there must exist a well-formed fragment -of the abstract program that matches (according to the simulation -relation) but which leaves enough decisions available to the abstract -environment to permit parallel composition. -\ + This is a notion of refinement/simulation making use of prefix closure. + For a concrete program to refine an abstract program, for every + trace of the concrete program there must exist a well-formed fragment + of the abstract program that matches (according to the simulation + relation) but which leaves enough decisions available to the abstract + environment to permit parallel composition.\ text \ -Fragments must be self-closed, or enabled. Certain incomplete traces -must be possible to extend by a program step. -\ -definition - self_closed :: "((tmid \ 's) list \ bool) \ 's \ ('s, 'a) tmonad \ bool" -where - "self_closed cond s f = (\xs. (xs, Incomplete) \ f s - \ cond xs \ (\s'. (Me, s') # xs \ fst ` f s))" + Fragments must be self-closed, or enabled. Certain incomplete traces + must be possible to extend by a program step.\ +definition self_closed :: "((tmid \ 's) list \ bool) \ 's \ ('s, 'a) tmonad \ bool" where + "self_closed cond s f = + (\xs. (xs, Incomplete) \ f s \ cond xs \ (\s'. (Me, s') # xs \ fst ` f s))" lemmas self_closedD = self_closed_def[THEN iffD1, rule_format] text \ -Fragments must be environment-closed. Certain incomplete traces -must be possible to extend by any environment step that is -compatible with some condition. -\ -definition - env_closed :: "((tmid \ 's) list \ 's \ bool) \ 's \ ('s, 'a) tmonad \ bool" -where - "env_closed cond s f = (\xs s'. (xs, Incomplete) \ f s - \ cond xs s' \ ((Env, s') # xs) \ fst ` f s)" + Fragments must be environment-closed. Certain incomplete traces + must be possible to extend by any environment step that is + compatible with some condition.\ +definition env_closed :: "((tmid \ 's) list \ 's \ bool) \ 's \ ('s, 'a) tmonad \ bool" where + "env_closed cond s f = + (\xs s'. (xs, Incomplete) \ f s \ cond xs s' \ ((Env, s') # xs) \ fst ` f s)" lemmas env_closedD = env_closed_def[THEN iffD1, rule_format] lemma env_closed_strengthen_cond: - "env_closed P s f - \ (\xs s. Q xs s \ P xs s) - \ env_closed Q s f" + "\env_closed P s f; \xs s. Q xs s \ P xs s\ \ env_closed Q s f" by (simp add: env_closed_def) -text \ -Two traces match according to some state relation if they match at every step. -\ -definition - matching_tr :: "('s \ 't \ bool) \ (tmid \ 's) list \ (tmid \ 't) list \ bool" -where +text \Two traces match according to some state relation if they match at every step.\ +definition matching_tr :: "('s \ 't \ bool) \ (tmid \ 's) list \ (tmid \ 't) list \ bool" where "matching_tr sr = list_all2 (\(aid, as) (cid, cs). aid = cid \ sr as cs)" -definition - matching_tr_pfx :: "('s \ 't \ bool) \ (tmid \ 's) list \ (tmid \ 't) list \ bool" -where - "matching_tr_pfx sr atr ctr = (length atr \ length ctr - \ matching_tr sr (rev atr) (take (length atr) (rev ctr)))" +definition matching_tr_pfx :: + "('s \ 't \ bool) \ (tmid \ 's) list \ (tmid \ 't) list \ bool" + where + "matching_tr_pfx sr atr ctr = + (length atr \ length ctr \ matching_tr sr (rev atr) (take (length atr) (rev ctr)))" abbreviation "matching_tr_tmids_pfx \ matching_tr_pfx (\_ _. True)" -abbreviation(input) +abbreviation (input) "matching_self_cond ctr \ (\xs. length xs < length ctr \ fst (rev ctr ! length xs) = Me)" -abbreviation(input) - "matching_env_cond sr ctr s0 R \ (\xs s. matching_tr_pfx sr ((Env, s) # xs) ctr - \ rely_cond R s0 ((Env, s) # xs))" +abbreviation (input) + "matching_env_cond sr ctr s0 R \ + (\xs s. matching_tr_pfx sr ((Env, s) # xs) ctr \ rely_cond R s0 ((Env, s) # xs))" text \ -The collection of properties a fragment must have to match some concrete -trace. It must be prefix, self and environment closed, nonempty, and all -outcomes must be matching. The outcomes (trace and result) must match -the rely condition, the concrete trace (or a prefix), and must either be -a matching result or @{term Incomplete} if a prefix. -\ -definition - is_matching_fragment :: "('s \ 't \ bool) \ ('s \ 't \ bool) - \ ('a \ 'b \ bool) \ (tmid \ 't) list \ ('t, 'b) tmres - \ 's \ ('s \ 's \ bool) \ 's \ ('s, 'a) tmonad \ bool" -where + The collection of properties a fragment must have to match some concrete + trace. It must be prefix, self and environment closed, nonempty, and all + outcomes must be matching. The outcomes (trace and result) must match + the rely condition, the concrete trace (or a prefix), and must either be + a matching result or @{term Incomplete} if a prefix.\ +definition is_matching_fragment :: + "('s \ 't \ bool) \ ('s \ 't \ bool) \ ('a \ 'b \ bool) \ (tmid \ 't) list \ + ('t, 'b) tmres \ 's \ ('s \ 's \ bool) \ 's \ ('s, 'a) tmonad \ bool" + where "is_matching_fragment sr osr rvr ctr cres s0 R s f - = ((prefix_closed f - \ self_closed (matching_self_cond ctr) s f - \ env_closed (matching_env_cond sr ctr s0 R) s f) - \ (f s \ {}) + = ((prefix_closed f + \ self_closed (matching_self_cond ctr) s f + \ env_closed (matching_env_cond sr ctr s0 R) s f) + \ f s \ {} \ (\(tr, res) \ f s. rely_cond R s0 tr - \ matching_tr_pfx sr tr ctr - \ (length tr < length ctr \ res = Incomplete) - \ (length tr = length ctr \ rel_tmres osr rvr res cres)))" + \ matching_tr_pfx sr tr ctr + \ (length tr < length ctr \ res = Incomplete) + \ (length tr = length ctr \ rel_tmres osr rvr res cres)))" lemmas is_matching_fragmentD = is_matching_fragment_def[THEN iffD1, rule_format] @@ -108,38 +95,50 @@ lemmas is_matching_fragment_env_closed = is_matching_fragment_wf[THEN conjunct2, lemmas is_matching_fragment_defD = is_matching_fragmentD[THEN conjunct2, THEN conjunct1, rule_format] lemmas is_matching_fragment_trD - = is_matching_fragmentD[THEN conjunct2, THEN conjunct2, - rule_format, where x="(tr, res)" for tr res, simplified, rule_format] + = is_matching_fragmentD[THEN conjunct2, THEN conjunct2, rule_format, + where x="(tr, res)" for tr res, simplified, rule_format] text \ -Prefix fragment refinement. Given the initial conditions, every concrete outcome -(trace and result) must have a matching fragment which is a simple refinement of -the abstract program. -\ -definition - prefix_refinement :: "('s \ 't \ bool) \ ('s \ 't \ bool) \ ('s \ 't \ bool) - \ ('a \ 'b \ bool) \ ('s \ 's \ bool) \ ('t \ 't \ bool) - \ ('s \ 's \ bool) \ ('t \ 't \ bool) - \ ('s, 'a) tmonad \ ('t, 'b) tmonad \ bool" -where - "prefix_refinement sr isr osr rvr P Q AR R aprog cprog + Prefix fragment refinement. Given the initial conditions, every concrete outcome + (trace and result) must have a matching fragment which is a simple refinement of + the abstract program.\ +\ \FIXME: do we want to be able to assume non-failure of the abstract program.\ +\ \FIXME: should we have an option for showing non-failure of the concrete program.\ +\ \FIXME: corres uses a set for the state relation, this uses a predicate. Do we care?\ +definition prefix_refinement :: + "('s \ 't \ bool) \ ('s \ 't \ bool) \ ('s \ 't \ bool) \ ('a \ 'b \ bool) \ + ('s \ 's \ bool) \ ('t \ 't \ bool) \ ('s \ 's \ bool) \ ('t \ 't \ bool) \ + ('s, 'a) tmonad \ ('t, 'b) tmonad \ bool" + where + "prefix_refinement sr isr osr rvr AR R P Q aprog cprog = (\s s0 t0 t. isr s t \ P s0 s \ sr s0 t0 \ Q t0 t - \ (\(ctr, cres) \ cprog t. - rely_cond R t0 ctr \ (\f. is_matching_fragment sr osr rvr ctr cres s0 AR s f - \ triv_refinement aprog f)))" + \ (\(ctr, cres) \ cprog t. + rely_cond R t0 ctr \ + (\f. is_matching_fragment sr osr rvr ctr cres s0 AR s f + \ triv_refinement aprog f)))" abbreviation - "pfx_refn sr rvr P \ prefix_refinement sr sr sr rvr P" + "pfx_refn sr \ prefix_refinement sr sr sr" + +section \Base case facts about refinement\ lemmas prefix_refinementD = prefix_refinement_def[THEN iffD1, rule_format] lemmas split_iffD1 = Product_Type.split[THEN iffD1] lemmas pfx_refnD = prefix_refinementD lemmas pfx_refnD2 = pfx_refnD[THEN split_iffD1[where a=tr and b=res for tr res], rule_format] +lemma prefix_refinement_False: + "prefix_refinement sr isr osr rvr AR R P \\ f g" + by (clarsimp simp: prefix_refinement_def) + +lemma prefix_refinement_False': + "prefix_refinement sr isr osr rvr AR R \\ Q f g" + by (clarsimp simp: prefix_refinement_def) + lemma matching_tr_pfx_aCons: "matching_tr_pfx sr ((tmid, s) # atr) ctr = (\s'. length atr < length ctr \ rev ctr ! length atr = (tmid, s') - \ sr s s' \ matching_tr_pfx sr atr ctr)" + \ sr s s' \ matching_tr_pfx sr atr ctr)" apply (simp add: matching_tr_pfx_def matching_tr_def Suc_le_eq list_all2_conv_all_nth less_Suc_eq all_conj_distrib) apply (simp add: nth_append prod_eq_iff) @@ -147,35 +146,168 @@ lemma matching_tr_pfx_aCons: done lemma rely_cond_hd: - "rely_cond R s0 xs \ xs \ [] - \ fst (hd xs) = Env \ R (last_st_tr (tl xs) s0) (snd (hd xs))" + "\rely_cond R s0 xs; xs \ []\ + \ fst (hd xs) = Env \ R (last_st_tr (tl xs) s0) (snd (hd xs))" by (clarsimp simp: rely_cond_def neq_Nil_conv trace_steps_append split: if_split_asm) -lemma diff_Suc_eq_if: - "(Suc n - m) = (if m \ n then Suc (n - m) else 0)" - by auto - lemma rely_cond_nth: - "rely_cond R s0 tr \ n < length tr - \ fst (rev tr ! n) = Env \ R ((if n = 0 then s0 else snd (rev tr ! (n - 1)))) (snd (rev tr ! n))" + "\rely_cond R s0 tr; n < length tr\ + \ fst (rev tr ! n) = Env \ R ((if n = 0 then s0 else snd (rev tr ! (n - 1)))) (snd (rev tr ! n))" by (simp add: rely_cond_def trace_steps_rev_drop_nth[where n=0, simplified]) lemma is_matching_fragment_Nil: - "is_matching_fragment sr osr rvr ctr cres s0 R s f - \ [] \ fst ` f s" + "is_matching_fragment sr osr rvr ctr cres s0 R s f \ [] \ fst ` f s" apply (clarsimp simp: is_matching_fragment_def) apply (clarsimp simp only: set_eq_iff empty_iff simp_thms not_all) apply (drule(1) prefix_closed_drop[where tr=tr and n="length tr" for tr]) - apply (clarsimp simp add: in_fst_snd_image) + apply (clarsimp simp: in_fst_snd_image) + done + +\ \FIXME: it would be good to show this but it needs more thought to determine how best to handle + the case where the concrete function has failing traces that do not satisy the rely. +lemma prefix_refinement_propagate_no_fail: + "\prefix_refinement sr isr osr rvr AR R P Q f f'; + \s0. no_fail (P s0) f; \t0 t. Q t0 t \ (\s0 s. P s0 s \ sr s0 t0 \ isr s t)\ + \ \t0. no_fail (Q t0) f'" + apply (clarsimp simp: prefix_refinement_def no_fail_def failed_def) + apply (erule allE, erule allE, erule (1) impE) + apply clarsimp + apply ((drule spec)+, (drule (1) mp)+) + apply (drule (1) bspec, clarsimp) + oops\ + +\ \FIXME: this needs some sort of assumption saying that the rely R does not lead to an empty set + of results. +lemma prefix_refinement_serial: + "\prefix_refinement sr isr osr rvr AR R P Q f f'; empty_fail f'; no_fail Q' f'; + \t0 t. Q t0 t \ Q' t\ + \ \s0 s. (\t0 t. isr s t \ P s0 s \ sr s0 t0 \ Q t0 t) \ mres (f s) \ {}" + apply (clarsimp simp: prefix_refinement_def empty_fail_def) + apply (drule no_failD, fastforce) + apply (drule_tac x=t in spec, drule mp; simp?) + apply ((drule spec)+, (drule (1) mp)+) + apply (clarsimp simp: mres_def vimage_def) + apply (drule (1) bspec) + apply clarsimp + oops\ + +lemma is_matching_fragment_no_trace: + "is_matching_fragment sr osr rvr [] cres s0 R s (\s. {([], ares s)}) + = rel_tmres osr rvr (ares s) cres" + by (simp add: is_matching_fragment_def prefix_closed_def self_closed_def env_closed_def + matching_tr_pfx_def matching_tr_def) + +\ \Singleton trace monads must have an empty trace to be prefix_closed\ +lemma prefix_refinement_singleton: + "prefix_refinement sr isr osr rvr AR R P Q (\s. {([], res s)}) (\s. {([], cres s)}) + = (\s0 s t0 t. isr s t \ P s0 s \ sr s0 t0 \ Q t0 t + \ rel_tmres osr rvr (res s) (cres t))" + (is "prefix_refinement _ _ _ _ _ _ _ _ ?f _ = _") + apply (rule iffI; clarsimp simp: prefix_refinement_def) + apply ((drule spec)+, (drule (1) mp)+) + apply clarsimp + apply (subgoal_tac "f s = ?f s") + prefer 2 + apply (clarsimp simp: triv_refinement_def fun_eq_iff) + apply (drule_tac x=s in spec, drule subset_singletonD) + apply (clarsimp simp: is_matching_fragment_def) + apply (drule_tac tr="[]" and res="res s" in is_matching_fragment_trD) + apply clarsimp + apply clarsimp + apply ((drule spec)+, (drule (1) mp)+) + apply (rule_tac x="?f" in exI) + apply (clarsimp simp: is_matching_fragment_no_trace) + done + +lemma prefix_refinement_no_trace: + "no_trace g + \ prefix_refinement sr isr osr rvr AR R P Q f g + = (\s0 s t0 t. isr s t \ P s0 s \ sr s0 t0 \ Q t0 t + \ (\cres \ snd ` (g t). \(tr, res) \ (f s). tr = [] \ rel_tmres osr rvr res cres))" + apply (rule iffI; clarsimp simp: prefix_refinement_def; drule (1) no_traceD; clarsimp) + apply ((drule spec)+, (drule (1) mp)+) + apply (drule (1) bspec, clarsimp) + apply (frule_tac s=s in is_matching_fragment_defD) + apply (clarsimp simp: ex_in_conv[symmetric]) + apply (drule (1) is_matching_fragment_trD) + apply (clarsimp simp: matching_tr_pfx_def matching_tr_def) + apply (drule (1) triv_refinement_elemD) + apply (rule_tac x= "([], ba)" in bexI; clarsimp) + apply ((drule spec)+, (drule (1) mp)+) + apply (drule (1) bspec, clarsimp) + apply (rename_tac gres fres) + apply (rule_tac x="\s'. if s'=s then {([],fres)} else {}" in exI) + apply (auto simp: is_matching_fragment_def prefix_closed_def self_closed_def env_closed_def + matching_tr_pfx_def matching_tr_def triv_refinement_def) + done + +lemma prefix_refinement_no_trace': + "\no_trace g; + \s0 s t0 t. \isr s t; P s0 s; sr s0 t0; Q t0 t\ + \ (\cres \ snd ` (g t). \(tr, res) \ (f s). tr = [] \ rel_tmres osr rvr res cres)\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by (simp add: prefix_refinement_no_trace) + +section \Building blocks\ +text \Prefix refinement rules for basic constructs.\ + +lemma default_prefix_refinement_ex: + "is_matching_fragment sr osr rvr ctr cres s0 R s + (\s. aprog s \ ({tr'. length tr' \ length ctr} \ UNIV)) + \ \f. is_matching_fragment sr osr rvr ctr cres s0 R s f \ triv_refinement aprog f" + apply (intro exI conjI, assumption) + apply (simp add: triv_refinement_def) + done + +lemma default_prefix_refinement_ex_match_iosr_R: + "is_matching_fragment sr osr rvr ctr cres s0 R s + (rely (\s. aprog s \ ({tr'. matching_tr_pfx iosr tr' ctr} \ UNIV)) R s0) + \ \f. is_matching_fragment sr osr rvr ctr cres s0 R s f \ triv_refinement aprog f" + apply (intro exI conjI, assumption) + apply (clarsimp simp: triv_refinement_def rely_def) + done + +lemma prefix_refinement_return_imp: + "\\s0 s t0 t. \P s0 s; Q t0 t; isr s t\ \ rvr rv rv' \ osr s t\ + \ prefix_refinement sr isr osr rvr AR R P Q (return rv) (return rv')" + apply (clarsimp simp: prefix_refinement_def) + apply (rule default_prefix_refinement_ex) + apply (clarsimp simp: return_def is_matching_fragment_no_trace) + done + +lemma prefix_refinement_get_imp: + "\\s0 s t0 t. \P s0 s; Q t0 t; isr s t\ \ rvr s t \ osr s t\ + \ prefix_refinement sr isr osr rvr AR R P Q get get" + apply (clarsimp simp: prefix_refinement_def) + apply (rule default_prefix_refinement_ex) + apply (clarsimp simp: get_def is_matching_fragment_no_trace) + done + +lemma prefix_refinement_gets_imp: + "\\s0 s t0 t. \P s0 s; Q t0 t; isr s t\ \ rvr (f s) (g t) \ osr s t\ + \ prefix_refinement sr isr osr rvr AR R P Q (gets f) (gets g)" + apply (clarsimp simp: prefix_refinement_def) + apply (rule default_prefix_refinement_ex) + apply (clarsimp simp: simpler_gets_def is_matching_fragment_no_trace) done +lemma prefix_refinement_returnOk_imp: + "\\s0 s t0 t. \P s0 s; Q t0 t; isr s t\ \ rvr (Inr rv) (Inr rv') \ osr s t\ + \ prefix_refinement sr isr osr rvr AR R P Q (returnOk rv) (returnOk rv')" + by (simp add: returnOk_def prefix_refinement_return_imp) + +lemma prefix_refinement_throwError_imp: + "\\s0 s t0 t. \P s0 s; Q t0 t; isr s t\ \ rvr (Inl e) (Inl e') \ osr s t\ + \ prefix_refinement sr isr osr rvr AR R P Q (throwError e) (throwError e')" + by (simp add: throwError_def prefix_refinement_return_imp) + + section \Implications\ text \ -The notions of matching fragment and prefix refinement we have defined -allow us to prove the existence of a matching trace in the abstract -program. -\ + The notions of matching fragment and prefix refinement we have defined + allow us to prove the existence of a matching trace in the abstract + program.\ theorem matching_fragment_matching_tr: assumes match: "is_matching_fragment sr osr rvr ctr cres s0 R' s f" and rely: "rely_cond R t0 ctr" @@ -258,23 +390,21 @@ corollary matching_fragment_matching_tr_trivR: assumes match: "is_matching_fragment sr osr rvr ctr cres s0 R s f" and sr: "(\s t t'. sr s t \ (\s'. sr s' t' \ R s s'))" and srx: "sr s0 t0" - shows "\(atr, ares) \ f s. matching_tr sr atr ctr - \ rel_tmres osr rvr ares cres" - using matching_fragment_matching_tr[where R="\_ _. True", - OF match _ srx] - by (auto simp add: rely_cond_def sr) + shows "\(atr, ares) \ f s. matching_tr sr atr ctr \ rel_tmres osr rvr ares cres" + using matching_fragment_matching_tr[where R="\_ _. True", OF match _ srx] + by (auto simp: rely_cond_def sr) theorem prefix_refinement_rely_cond_trD: - assumes preds: "prefix_refinement sr isr osr rvr P Q R' R aprog cprog" + assumes preds: "prefix_refinement sr isr osr rvr AR R P Q aprog cprog" "isr s t" "P s0 s" "Q t0 t" "(ctr, cres) \ cprog t" "rely_cond R t0 ctr" "sr s0 t0" - and sr: "(\s t t'. sr s t \ R t t' \ (\s'. sr s' t' \ R' s s'))" + and sr: "(\s t t'. sr s t \ R t t' \ (\s'. sr s' t' \ AR s s'))" shows "\(atr, ares) \ aprog s. matching_tr sr atr ctr \ rel_tmres osr rvr ares cres - \ rely_cond R' s0 atr" + \ rely_cond AR s0 atr" proof - obtain f where subs: "f s \ aprog s" - and match: "is_matching_fragment sr osr rvr ctr cres s0 R' s f" + and match: "is_matching_fragment sr osr rvr ctr cres s0 AR s f" using prefix_refinementD[OF preds(1-3) _ preds(4-5)] preds(6-) by (auto simp add: triv_refinement_def) show ?thesis @@ -282,55 +412,249 @@ proof - by blast qed -lemma rely_cond_True: - "rely_cond (\_ _. True) = (\_ _. True)" - by (simp add: rely_cond_def fun_eq_iff) +section \Using prefix refinement\ +text \ + Using prefix refinement to map the validI Hoare quadruple + (precond/rely/guarantee/postcond). Proofs of quadruples for + abstract programs imply related quadruples for concrete + programs.\ + +lemma list_all2_all_trace_steps: + assumes P: "\x\trace_steps (rev tr) s0. P x" + and lR': "list_all2 (\(aid, as) (cid, cs). aid = cid \ R' as cs) tr tr'" + and R': "R' s0 s0'" + and Q: "\idn as1 as2 cs1 cs2. R' as1 cs1 \ R' as2 cs2 + \ P (idn, as1, as2) \ Q (idn, cs1, cs2)" + shows "\x\trace_steps (rev tr') s0'. Q x" +proof - + note lR'' = lR'[simplified trans[OF list_all2_rev[symmetric] list_all2_conv_all_nth], + simplified split_def, THEN conjunct2, rule_format] + note len[simp] = lR'[THEN list_all2_lengthD] + show ?thesis + using P R' + apply (clarsimp simp: trace_steps_nth) + apply (drule_tac x=x in bspec, simp) + apply (frule lR''[simplified]) + apply (cut_tac i="x - 1" in lR'', simp) + apply (auto simp: Q) + done +qed + +theorem prefix_refinement_validI: + "\prefix_refinement sr isr osr rvr AR R prP' prP f g; + \P'\,\AR\ f \G'\,\Q'\; + \t0 t. P t0 t \ (\s0 s. P' s0 s \ prP' s0 s \ prP t0 t \ isr s t \ sr s0 t0); + \s0 t0 t. \sr s0 t0; R t0 t\ \ (\s. AR s0 s \ sr s t); + \s0 t0 s t. \G' s0 s; sr s0 t0; sr s t\ \ G t0 t; + \rv rv' s0 t0 s t. \Q' rv s0 s; sr s0 t0; osr s t; rvr rv rv'\ \ Q rv' t0 t; prefix_closed g\ + \ \P\,\R\ g \G\,\Q\" + apply (subst validI_def, clarsimp simp: rely_def) + apply (drule meta_spec2, drule(1) meta_mp, clarsimp) + apply (drule(6) prefix_refinement_rely_cond_trD[where AR=AR, simplified]) + apply blast + apply clarsimp + apply (rule conjI) + apply (frule(3) validI_GD) + apply (simp add: guar_cond_def matching_tr_def) + apply (erule_tac R'="\s cs. sr s cs" in list_all2_all_trace_steps) + apply (clarsimp simp: list_all2_conv_all_nth split_def) + apply simp + apply clarsimp + apply clarsimp + apply (erule tmres.rel_cases; clarsimp) + apply (drule(1) validI_rvD, simp add: rely_def) + apply simp + apply (case_tac tr; clarsimp simp: list_all2_Cons2 matching_tr_def) + done + +section \Weakening rules\ + +named_theorems pfx_refn_pre +(* Introduce schematic prefix_refinement guards; fail if already schematic *) +method pfx_refn_pre0 = WP_Pre.pre_tac pfx_refn_pre +(* Optionally introduce schematic prefix_refinement guards *) +method pfx_refn_pre = pfx_refn_pre0? + +lemma stronger_prefix_refinement_weaken_pre[pfx_refn_pre]: + "\prefix_refinement sr isr osr rvr AR R P' Q' f g; + \s t s0 t0. \isr s t; sr s0 t0; P s0 s; Q t0 t\ \ P' s0 s; + \s t s0 t0. \isr s t; sr s0 t0; P s0 s; Q t0 t\ \ Q' t0 t\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by (fastforce simp: prefix_refinement_def) + +lemma prefix_refinement_weaken_pre: + "\prefix_refinement sr isr osr rvr AR R P' Q' f g; + \s s0. P s0 s \ P' s0 s; \t t0. Q t0 t \ Q' t0 t\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by pfx_refn_pre + +lemma prefix_refinement_weaken_pre1: + "\prefix_refinement sr isr osr rvr AR R P' Q f g; \s s0. P s0 s \ P' s0 s\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by pfx_refn_pre + +lemma prefix_refinement_weaken_pre2: + "\prefix_refinement sr isr osr rvr AR R P Q' f g; \t t0. Q t0 t \ Q' t0 t\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by pfx_refn_pre + +lemma prefix_refinement_weaken_srs: + "\prefix_refinement sr isr osr r AR R P Q f g; isr' \ isr; osr \ osr'; sr \ sr\ + \ prefix_refinement sr isr' osr' r AR R P Q f g" + apply (subst prefix_refinement_def, clarsimp) + apply (drule(1) predicate2D) + apply (drule(5) prefix_refinementD) + apply clarsimp + apply (rule exI, rule conjI[rotated], assumption) + apply (clarsimp simp: is_matching_fragment_def) + apply (drule(1) bspec, clarsimp) + apply (erule tmres.rel_cases; clarsimp) + apply (erule(1) predicate2D) + done + +named_theorems pfx_refn_rvr_pre +(* Introduce schematic return value relation, fail if already schematic *) +method pfx_refn_rvr_pre = WP_Pre.pre_tac pfx_refn_rvr_pre + +lemma prefix_refinement_weaken_rvr[pfx_refn_rvr_pre]: + "\prefix_refinement sr isr osr rvr AR R P Q f g; \rv rv'. rvr rv rv' \ rvr' rv rv'\ + \ prefix_refinement sr isr osr rvr' AR R P Q f g" + apply (subst prefix_refinement_def, clarsimp) + apply (drule(5) prefix_refinementD, clarsimp) + apply (rule exI, rule conjI[rotated], assumption) + apply (clarsimp simp: is_matching_fragment_def) + apply (drule(1) bspec, clarsimp) + apply (erule tmres.rel_cases; clarsimp) + done + +lemma prefix_closed_rely: + "prefix_closed f \ prefix_closed (rely f R s0)" + apply (subst prefix_closed_def, clarsimp simp: rely_def rely_cond_Cons_eq) + apply (erule(1) prefix_closedD) + done + +lemma rely_self_closed: + "self_closed P s f \ self_closed P s (rely f R s0)" + apply (subst self_closed_def, clarsimp simp: rely_def rely_cond_Cons_eq) + apply (drule(2) self_closedD) + apply (fastforce simp: rely_cond_Cons_eq) + done + +lemma rely_env_closed: + "\env_closed P s f; + \xs s. \P' xs s; rely_cond R s0 xs\ \ P xs s \ R (last_st_tr xs s0) s\ + \ env_closed P' s (rely f R s0)" + apply (subst env_closed_def, clarsimp simp: rely_def) + apply (drule_tac s'=s' in env_closedD, assumption) + apply simp + apply (clarsimp simp: image_def) + apply (fastforce intro: rely_cond_Cons rev_bexI) + done + +lemma rely_cond_mono: + "R \ R' \ rely_cond R \ rely_cond R'" + by (simp add: le_fun_def rely_cond_def split_def) + +lemma is_matching_fragment_add_rely: + "\is_matching_fragment sr osr r ctr cres s0 AR s f; AR' \ AR\ + \ is_matching_fragment sr osr r ctr cres s0 AR' s (rely f AR' s0)" + apply (frule is_matching_fragment_Nil) + apply (clarsimp simp: is_matching_fragment_def prefix_closed_rely + rely_self_closed) + apply (intro conjI) + apply (erule rely_env_closed) + apply (frule rely_cond_mono) + apply (simp add: le_fun_def rely_cond_Cons_eq) + apply (fastforce simp: rely_def) + apply (auto simp: rely_def)[1] + done + +named_theorems pfx_refn_rely_pre +(* Introduce schematic rely relations, fail if already schematic *) +method pfx_refn_rely_pre = WP_Pre.pre_tac pfx_refn_rely_pre -section \Compositionality.\ -text \The crucial rules for proving prefix refinement -of parallel and sequential compositions.\ +lemma prefix_refinement_weaken_rely[pfx_refn_rely_pre]: + "\prefix_refinement sr isr osr r AR R P Q f g; R' \ R; AR' \ AR\ + \ prefix_refinement sr isr osr r AR' R' P Q f g" + apply (subst prefix_refinement_def, clarsimp) + apply (drule(3) prefix_refinementD, assumption+) + apply (clarsimp simp: rely_cond_def split_def le_fun_def) + apply (rule exI, rule conjI, erule is_matching_fragment_add_rely) + apply (simp add: le_fun_def) + apply (auto simp add: triv_refinement_def rely_def) + done + + +section \Inserting assumptions to be proved later\ + +lemma prefix_refinement_req: + "\\s0 s t0 t. \sr s0 t0; isr s t; P s0 s; Q t0 t\ \ F; + F \ prefix_refinement sr isr osr rvr AR R P Q f g\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by (auto simp: prefix_refinement_def) + +lemma prefix_refinement_gen_asm: + "\P \ prefix_refinement sr isr osr rvr AR R P' Q' f g\ + \ prefix_refinement sr isr osr rvr AR R (P' and (\_ _. P)) Q' f g" + by (auto simp: prefix_refinement_def) + +lemma prefix_refinement_gen_asm2: + "\P \ prefix_refinement sr isr osr rvr AR R P' Q' f g\ + \ prefix_refinement sr isr osr rvr AR R P' (Q' and (\_ _. P)) f g" + by (auto simp: prefix_refinement_def) + +lemmas prefix_refinement_gen_asms = prefix_refinement_gen_asm prefix_refinement_gen_asm2 + +lemma prefix_refinement_assume_pre: + "\\s s0 t t0. \isr s t; sr s0 t0; P s0 s; Q t0 t\ \ prefix_refinement sr isr osr rvr AR R P Q f g\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by (fastforce simp: prefix_refinement_def) + +lemma prefix_refinement_name_pre: + "\\s s0 t t0. + \isr s t; sr s0 t0; P s0 s; Q t0 t\ + \ prefix_refinement sr isr osr rvr AR R + (\s0' s'. s0' = s0 \ s' = s) (\t0' t'. t0' = t0 \ t' = t) f g\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" + by (fastforce simp: prefix_refinement_def) + + +section \Compositionality\ +text \The crucial rules for proving prefix refinement of parallel and sequential compositions.\ lemma ball_set_zip_conv_nth: - "(\x \ set (zip ys zs). P x) - = (\n. n < length ys \ n < length zs \ P (ys ! n, zs ! n))" - by (auto simp add: Ball_def in_set_zip) + "(\x \ set (zip ys zs). P x) = (\n. n < length ys \ n < length zs \ P (ys ! n, zs ! n))" + by (auto simp: Ball_def in_set_zip) -definition - par_tr_fin_principle :: "('s, unit) tmonad \ bool" -where +definition par_tr_fin_principle :: "('s, unit) tmonad \ bool" where "par_tr_fin_principle f = (\s tr s'. (tr, Result ((), s')) \ f s \ s' = last_st_tr tr s \ tr \ [])" lemmas par_tr_fin_principleD = par_tr_fin_principle_def[THEN iffD1, rule_format] lemma tr_in_parallel: "(tr, res) \ parallel f g s - \ \f_tr g_tr. (f_tr, res) \ f s \ (g_tr, res) \ g s - \ (tr, res) \ parallel (K {(f_tr, res)}) (K {(g_tr, res)}) s" + \ \f_tr g_tr. (f_tr, res) \ f s \ (g_tr, res) \ g s + \ (tr, res) \ parallel (K {(f_tr, res)}) (K {(g_tr, res)}) s" apply (clarsimp simp: parallel_def) apply fastforce done lemma matching_env_closedD: - "(tr, res) \ f s - \ is_matching_fragment sr osr rvr ctr cres s0 R s f - \ length tr < length ctr - \ fst (rev ctr ! length tr) = Env - \ sr s' (snd (rev ctr ! length tr)) - \ R (last_st_tr tr s0) s' - \ (Env, s') # tr \ fst ` f s" + "\(tr, res) \ f s; is_matching_fragment sr osr rvr ctr cres s0 R s f; + length tr < length ctr; fst (rev ctr ! length tr) = Env; + sr s' (snd (rev ctr ! length tr)); R (last_st_tr tr s0) s'\ + \ (Env, s') # tr \ fst ` f s" apply (frule(1) is_matching_fragment_trD, clarsimp) apply (erule(1) env_closedD[OF is_matching_fragment_env_closed]) apply (clarsimp simp: matching_tr_pfx_aCons rely_cond_Cons_eq prod_eq_iff) done lemma par_tr_fin_fragment: - "par_tr_fin_principle f - \ (tr, res) \ f s - \ is_matching_fragment sr osr rvr ctr cres s0 R s f - \ res = (case (length ctr - length tr, cres) - of (0, Failed) \ Failed - | (0, Result _) \ Result ((), last_st_tr tr s) - | _ \ Incomplete)" + "\par_tr_fin_principle f; (tr, res) \ f s; is_matching_fragment sr osr rvr ctr cres s0 R s f\ + \ res = (case (length ctr - length tr, cres) of + (0, Failed) \ Failed + | (0, Result _) \ Result ((), last_st_tr tr s) + | _ \ Incomplete)" apply (frule(1) is_matching_fragment_trD) apply (cases "length tr < length ctr") apply (clarsimp split: nat.split) @@ -341,15 +665,14 @@ lemma par_tr_fin_fragment: done lemma par_tr_fragment_in_parallel: - "par_tr_fin_principle f - \ par_tr_fin_principle g - \ is_matching_fragment sr osr rvr ctr1 cres s0 R s f - \ is_matching_fragment sr osr' rvr ctr2 cres s0 R' s g - \ length ctr1 = length ctr2 - \ \f_steps res'. length f_steps = length tr - \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps tr), res) \ f s - \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps tr), res') \ g s - \ (tr, res) \ parallel f g s" + "\par_tr_fin_principle f; par_tr_fin_principle g; + is_matching_fragment sr osr rvr ctr1 cres s0 R s f; + is_matching_fragment sr osr' rvr ctr2 cres s0 R' s g; + length ctr1 = length ctr2; + \f_steps res'. length f_steps = length tr + \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps tr), res) \ f s + \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps tr), res') \ g s\ + \ (tr, res) \ parallel f g s" apply (clarsimp simp: parallel_def) apply (rule_tac x=f_steps in exI, clarsimp) apply (drule(2) par_tr_fin_fragment)+ @@ -359,75 +682,64 @@ lemma par_tr_fragment_in_parallel: done lemma par_tr_fragment_parallel_def: - "par_tr_fin_principle f - \ par_tr_fin_principle g - \ is_matching_fragment sr osr rvr ctr1 cres s0 R s f - \ is_matching_fragment sr osr' rvr ctr2 cres s0 R' s g - \ length ctr1 = length ctr2 - \ parallel f g s = {(tr, res). \f_steps res'. length f_steps = length tr - \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps tr), res) \ f s - \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps tr), res') \ g s}" + "\par_tr_fin_principle f; par_tr_fin_principle g; + is_matching_fragment sr osr rvr ctr1 cres s0 R s f; + is_matching_fragment sr osr' rvr ctr2 cres s0 R' s g; + length ctr1 = length ctr2\ + \ parallel f g s = {(tr, res). \f_steps res'. length f_steps = length tr + \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps tr), res) \ f s + \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps tr), res') \ g s}" apply (rule equalityI; clarsimp) apply (auto simp: parallel_def)[1] apply (erule(4) par_tr_fragment_in_parallel) apply blast done -lemmas list_all2_rev_nthD - = list_all2_nthD[OF list_all2_rev[THEN iffD2], simplified] +lemmas list_all2_rev_nthD = list_all2_nthD[OF list_all2_rev[THEN iffD2], simplified] -definition - forward_enabled :: "'s rg_pred \ bool" -where +definition forward_enabled :: "'s rg_pred \ bool" where "forward_enabled P = (\s_pre. \s. P s_pre s)" lemmas forward_enabledD = forward_enabled_def[THEN iffD1, rule_format] lemma forward_enabled_mono: - "P \ Q \ forward_enabled P \ forward_enabled Q" + "\P \ Q; forward_enabled P\ \ forward_enabled Q" by (fastforce simp: forward_enabled_def le_fun_def) lemma forward_enabled_reflp: "reflp P \ forward_enabled P" - by (auto simp add: reflp_def forward_enabled_def) + by (auto simp: reflp_def forward_enabled_def) lemma par_tr_fin_principle_triv_refinement: - "par_tr_fin_principle aprog - \ triv_refinement aprog cprog - \ par_tr_fin_principle cprog" + "\par_tr_fin_principle aprog; triv_refinement aprog cprog\ + \ par_tr_fin_principle cprog" by (fastforce simp: par_tr_fin_principle_def triv_refinement_def) lemma matching_tr_pfx_parallel_zip: - "matching_tr_pfx sr a_pfx a_tr - \ matching_tr_pfx sr b_pfx b_tr - \ length a_pfx = length b_pfx - \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) a_tr b_tr - \ matching_tr_pfx sr (map parallel_mrg (zip a_pfx b_pfx)) (map parallel_mrg (zip a_tr b_tr))" + "\matching_tr_pfx sr a_pfx a_tr; matching_tr_pfx sr b_pfx b_tr; + length a_pfx = length b_pfx; + list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) a_tr b_tr\ + \ matching_tr_pfx sr (map parallel_mrg (zip a_pfx b_pfx)) (map parallel_mrg (zip a_tr b_tr))" apply (clarsimp simp: matching_tr_pfx_def matching_tr_def list_all2_lengthD) apply (clarsimp simp: list_all2_conv_all_nth) apply (clarsimp simp: rev_map split_def zip_rev[symmetric]) done lemma drop_sub_Suc_is_Cons: - "n = length xs \ m < length xs \ drop (n - Suc m) xs = (rev xs ! m) # drop (n - m) xs" + "\n = length xs; m < length xs\ \ drop (n - Suc m) xs = (rev xs ! m) # drop (n - m) xs" apply (rule nth_equalityI; clarsimp) - apply (clarsimp simp add: nth_Cons' rev_nth) + apply (clarsimp simp: nth_Cons' rev_nth) done -lemma le_sub_eq_0: - "((x :: nat) \ x - y) = (x = 0 \ y = 0)" - by arith - -lemmas rely_cond_append_split - = rely_cond_append[where xs="take n xs" and ys="drop n xs" for n xs, simplified] -lemmas guar_cond_append_split - = guar_cond_append[where xs="take n xs" and ys="drop n xs" for n xs, simplified] +lemmas rely_cond_append_split = + rely_cond_append[where xs="take n xs" and ys="drop n xs" for n xs, simplified] +lemmas guar_cond_append_split = + guar_cond_append[where xs="take n xs" and ys="drop n xs" for n xs, simplified] lemma validI_drop_next_G: - "\ \P\, \R\ f \G\, \Q\; P s0 s; (tr, res) \ f s; - rely_cond R s0 (drop (n - m) tr); n = length tr; m < length tr \ - \ fst (rev tr ! m) \ Env - \ G (last_st_tr (rev (take m (rev tr))) s0) (snd (rev tr ! m))" + "\\P\, \R\ f \G\, \Q\; P s0 s; (tr, res) \ f s; rely_cond R s0 (drop (n - m) tr); + n = length tr; m < length tr\ + \ fst (rev tr ! m) \ Env \ G (last_st_tr (rev (take m (rev tr))) s0) (snd (rev tr ! m))" apply clarify apply (drule(2) validI_GD_drop[where n="n - Suc m"]) apply (simp add: drop_sub_Suc_is_Cons) @@ -441,25 +753,24 @@ lemma validI_drop_next_G: lemma tr_in_parallel_validI: assumes elem: "(tr, res) \ parallel (K {(f_tr, res)}) (K {(g_tr, res)}) s" and trs: "(f_tr, res) \ f s" "(g_tr, res) \ g s" - and validI: "\P\, \E Or Gg\ f \Gf\, \Q\" "\P\, \E Or Gf\ g \Gg\, \Q2\" + and validI: "\P\, \E or Gg\ f \Gf\, \Q\" "\P\, \E or Gf\ g \Gg\, \Q2\" and P: "P s0 s" and rel: "rely_cond E s0 tr" - shows "rely_cond (E Or Gg) s0 f_tr \ rely_cond (E Or Gf) s0 g_tr" + shows "rely_cond (E or Gg) s0 f_tr \ rely_cond (E or Gf) s0 g_tr" using parallel_rely_induct0[where R=E and G="\\", OF elem _ _ validI, OF P P] by (clarsimp simp: rel trs predicate2I) lemma env_closed_parallel_fragment: - "is_matching_fragment sr osr rvr ctr1 cres1 s0 (E Or Gg) s f - \ is_matching_fragment sr osr' rvr ctr2 cres2 s0 (E Or Gf) s g - \ par_tr_fin_principle f - \ par_tr_fin_principle g - \ cres1 = cres2 \ length ctr1 = length ctr2 - \ \s xs. Q xs s \ (sr s (snd (rev ctr1 ! length xs))) - \ (sr s (snd (rev ctr2 ! length xs))) - \ length xs < length ctr2 - \ fst (rev ctr1 ! length xs) = Env - \ fst (rev ctr2 ! length xs) = Env - \ E (last_st_tr xs s0) s - \ env_closed Q s (parallel f g)" + "\is_matching_fragment sr osr rvr ctr1 cres1 s0 (E or Gg) s f; + is_matching_fragment sr osr' rvr ctr2 cres2 s0 (E or Gf) s g; + par_tr_fin_principle f; par_tr_fin_principle g; + cres1 = cres2; length ctr1 = length ctr2; + \s xs. Q xs s \ (sr s (snd (rev ctr1 ! length xs))) + \ (sr s (snd (rev ctr2 ! length xs))) + \ length xs < length ctr2 + \ fst (rev ctr1 ! length xs) = Env + \ fst (rev ctr2 ! length xs) = Env + \ E (last_st_tr xs s0) s\ + \ env_closed Q s (parallel f g)" apply (subst env_closed_def, clarsimp) apply (frule is_matching_fragment_prefix_closed[where f=f]) apply (frule is_matching_fragment_prefix_closed[where f=g]) @@ -470,14 +781,14 @@ lemma env_closed_parallel_fragment: apply (frule(1) is_matching_fragment_trD[where f=f]) apply (frule(1) is_matching_fragment_trD[where f=g]) apply (clarsimp simp: matching_tr_pfx_aCons rely_cond_Cons_eq - last_st_tr_map_zip bipred_disj_def) + last_st_tr_map_zip pred_disj_def) apply (drule spec2, drule(1) mp[where P="Q xs s" for xs s]) apply clarsimp apply (drule_tac s'=s' in env_closedD[where f=f, OF is_matching_fragment_env_closed, rotated]; - simp?) + simp?) apply (simp add: matching_tr_pfx_aCons rely_cond_Cons_eq last_st_tr_map_zip prod_eq_iff) apply (drule_tac s'=s' in env_closedD[where f=g, OF is_matching_fragment_env_closed, rotated]; - simp?) + simp?) apply (simp add: matching_tr_pfx_aCons rely_cond_Cons_eq last_st_tr_map_zip prod_eq_iff) apply clarsimp apply blast @@ -486,18 +797,15 @@ lemma env_closed_parallel_fragment: lemma self_closed_parallel_fragment: notes if_split[split del] shows - "is_matching_fragment sr osr rvr ctr1 cres1 s0 (E Or Gg) s f - \ is_matching_fragment sr osr' rvr ctr2 cres2 s0 (E Or Gf) s g - \ par_tr_fin_principle f - \ par_tr_fin_principle g - \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ctr1 ctr2 - \ \P\,\E Or Gg\ f \Gf\,\\_ _ _. True\ - \ \P\,\E Or Gf\ g \Gg\,\\_ _ _. True\ - \ P s0 s - \ cres1 = cres2 - \ Q = (\xs. length xs < length ctr1 \ (fst (rev ctr1 ! length xs) \ Env - \ fst (rev ctr2 ! length xs) \ Env)) - \ self_closed Q s (parallel f g)" + "\is_matching_fragment sr osr rvr ctr1 cres1 s0 (E or Gg) s f; + is_matching_fragment sr osr' rvr ctr2 cres2 s0 (E or Gf) s g; + par_tr_fin_principle f; par_tr_fin_principle g; + list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ctr1 ctr2; + \P\,\E or Gg\ f \Gf\,\\_ _ _. True\; \P\,\E or Gf\ g \Gg\,\\_ _ _. True\; + P s0 s; cres1 = cres2; + Q = (\xs. length xs < length ctr1 \ (fst (rev ctr1 ! length xs) \ Env + \ fst (rev ctr2 ! length xs) \ Env))\ + \ self_closed Q s (parallel f g)" apply (subst self_closed_def, clarsimp) apply (subst(asm) parallel_def, clarsimp) apply (frule list_all2_lengthD[symmetric]) @@ -508,7 +816,7 @@ lemma self_closed_parallel_fragment: apply (frule(1) list_all2_rev_nthD, clarsimp) apply (case_tac "fst (rev ctr1 ! length xs) = Env"; simp) apply (frule is_matching_fragment_self_closed[where f=g], - drule(1) self_closedD, simp add: eq_Me_neq_Env) + drule(1) self_closedD, simp add: eq_Me_neq_Env) apply (thin_tac "v \ g s" for v s) apply clarsimp apply (frule(1) is_matching_fragment_trD[where f=g]) @@ -521,7 +829,7 @@ lemma self_closed_parallel_fragment: apply (blast intro: in_fst_snd_image) (* pretty much identical proof for symmetric case. sad. *) apply (frule is_matching_fragment_self_closed[where f=f], - drule(1) self_closedD, simp add: eq_Me_neq_Env) + drule(1) self_closedD, simp add: eq_Me_neq_Env) apply (thin_tac "v \ f s" for v s) apply clarsimp apply (frule(1) is_matching_fragment_trD[where f=f]) @@ -535,51 +843,23 @@ lemma self_closed_parallel_fragment: done lemma is_matching_fragment_validI_disj: - "is_matching_fragment sr osr rvr b_tr bd_res s0 R s f - \ triv_refinement g f - \ G = \\ \ \P\,\R\ g \G\,\\_ _ _. True\ - \ \P\,\R\ f \G\,\\_ _ _. True\" + "\is_matching_fragment sr osr rvr b_tr bd_res s0 R s f; triv_refinement g f; + G = \\ \ \P\,\R\ g \G\,\\_ _ _. True\\ + \ \P\,\R\ f \G\,\\_ _ _. True\" apply (frule is_matching_fragment_prefix_closed) apply (erule disjE) - apply (simp add: validI_def guar_cond_def) + apply wpsimp apply (erule(2) validI_triv_refinement) done -lemma rely_prefix_closed: - "prefix_closed f \ prefix_closed (rely f R s0)" - apply (subst prefix_closed_def, clarsimp simp: rely_def rely_cond_Cons_eq) - apply (erule(1) prefix_closedD) - done - -lemma rely_self_closed: - "self_closed P s f \ self_closed P s (rely f R s0)" - apply (subst self_closed_def, clarsimp simp: rely_def rely_cond_Cons_eq) - apply (drule(2) self_closedD) - apply (fastforce simp: rely_cond_Cons_eq) - done - -lemma rely_env_closed: - "env_closed P s f - \ (\xs s. P' xs s \ rely_cond R s0 xs \ P xs s \ R (last_st_tr xs s0) s) - \ env_closed P' s (rely f R s0)" - apply (subst env_closed_def, clarsimp simp: rely_def) - apply (drule_tac s'=s' in env_closedD, assumption) - apply simp - apply (clarsimp simp: image_def) - apply (fastforce intro: rely_cond_Cons rev_bexI) - done - theorem prefix_refinement_parallel: - "prefix_refinement sr isr osr rvr P Q (AE Or Gc) (E Or Gd) a b - \ prefix_refinement sr isr osr rvr P Q (AE Or Ga) (E Or Gb) c d - \ par_tr_fin_principle a - \ par_tr_fin_principle c - \ \Q\,\E Or Gd\ b \Gb\,\\_ _ _. True\ - \ \Q\,\E Or Gb\ d \Gd\,\\_ _ _. True\ - \ (Ga = \\ \ Gc = \\) - \ (\P\,\AE Or Gc\ a \Ga\,\\_ _ _. True\ - \ \P\,\AE Or Ga\ c \Gc\,\\_ _ _. True\) - \ prefix_refinement sr isr osr rvr P Q AE E (parallel a c) (parallel b d)" + "\prefix_refinement sr isr osr rvr (AE or Gc) (E or Gd) P Q a b; + prefix_refinement sr isr osr rvr (AE or Ga) (E or Gb) P Q c d; + par_tr_fin_principle a; par_tr_fin_principle c; + \Q\,\E or Gd\ b \Gb\,\\_ _ _. True\; \Q\,\E or Gb\ d \Gd\,\\_ _ _. True\; + (Ga = \\ \ Gc = \\) + \ (\P\,\AE or Gc\ a \Ga\,\\_ _ _. True\ \ \P\,\AE or Ga\ c \Gc\,\\_ _ _. True\)\ + \ prefix_refinement sr isr osr rvr AE E P Q (parallel a c) (parallel b d)" apply (subst prefix_refinement_def, clarsimp) apply (drule tr_in_parallel, clarify) apply (frule(6) tr_in_parallel_validI) @@ -595,10 +875,10 @@ theorem prefix_refinement_parallel: apply (subst is_matching_fragment_def, clarsimp) apply (frule(1) is_matching_fragment_validI_disj[where g=a and G=Ga], blast) apply (frule(1) is_matching_fragment_validI_disj[where g=c and G=Gc], blast) - apply (intro conjI parallel_prefix_closed rely_prefix_closed rely_self_closed, - simp_all add: is_matching_fragment_prefix_closed) + apply (intro conjI prefix_closed_parallel prefix_closed_rely rely_self_closed, + simp_all add: is_matching_fragment_prefix_closed) apply (rule self_closed_parallel_fragment, - (assumption | erule par_tr_fin_principle_triv_refinement[rotated])+) + (assumption | erule par_tr_fin_principle_triv_refinement[rotated])+) apply simp apply (frule list_all2_lengthD) apply (simp add: list_all2_lengthD eq_Me_neq_Env rev_nth split_def fun_eq_iff @@ -606,7 +886,7 @@ theorem prefix_refinement_parallel: apply (rule rely_env_closed[where P=P and P'=P for P, rotated]) apply (simp add: rely_cond_Cons_eq) apply (rule env_closed_parallel_fragment, - (assumption | erule par_tr_fin_principle_triv_refinement[rotated])+) + (assumption | erule par_tr_fin_principle_triv_refinement[rotated])+) apply simp apply (simp add: list_all2_lengthD) apply (clarsimp simp: matching_tr_pfx_aCons rev_map split_def @@ -626,35 +906,27 @@ theorem prefix_refinement_parallel: apply (simp add: list_all2_lengthD) done -lemma validI_triv': - "prefix_closed f \ \P\,\R\ f \\_ _. True\,\\_ _ _. True\" - by (simp add: validI_def guar_cond_def) -lemmas validI_triv = validI_triv'[where P="\\"] - lemmas prefix_refinement_parallel_ART - = prefix_refinement_parallel[OF _ _ _ _ _ _ disjI1[OF conjI, OF refl refl]] + = prefix_refinement_parallel[OF _ _ _ _ _ _ disjI1[OF conjI, OF refl refl]] lemmas prefix_refinement_parallel_triv - = prefix_refinement_parallel_ART[OF _ _ _ _ validI_triv' validI_triv'] + = prefix_refinement_parallel_ART[where Gb="\\" and Gd="\\", simplified] lemmas prefix_refinement_parallel' - = prefix_refinement_parallel[OF _ _ _ _ _ _ disjI2[OF conjI]] + = prefix_refinement_parallel[OF _ _ _ _ _ _ disjI2[OF conjI]] lemma pfx_trace_set_allD: - "\n. \v\set (take n xs). P n v \ v \ set (take n xs) - \ P n v" + "\\n. \v\set (take n xs). P n v; v \ set (take n xs)\ \ P n v" by simp lemma prefix_closed_UNION: - "(\s' x. x \ S s' \ prefix_closed (f x)) - \ prefix_closed (\s. \x \ S s. f x s)" + "\\s' x. x \ S s' \ prefix_closed (f x)\ \ prefix_closed (\s. \x \ S s. f x s)" apply (simp add: prefix_closed_def) apply (blast intro: in_fst_snd_image) done lemma is_matching_fragment_UNION: - "(\x. x \ S s \ is_matching_fragment sr osr rvr ctr cres s0 R s (f x)) - \ (\s' x. x \ S s' \ prefix_closed (f x)) - \ S s \ {} - \ is_matching_fragment sr osr rvr ctr cres s0 R s (\s. \x \ S s. f x s)" + "\\x. x \ S s \ is_matching_fragment sr osr rvr ctr cres s0 R s (f x); + \s' x. x \ S s' \ prefix_closed (f x); S s \ {}\ + \ is_matching_fragment sr osr rvr ctr cres s0 R s (\s. \x \ S s. f x s)" apply (clarsimp simp: is_matching_fragment_def prefix_closed_UNION) apply (intro conjI impI) apply (clarsimp simp: self_closed_def split_def in_fst_snd_image_eq) @@ -663,22 +935,26 @@ lemma is_matching_fragment_UNION: apply blast done -definition - mbind :: "('s, 'a) tmonad \ ('s \ 'a \ ('s, 'b) tmonad) \ - 's \ ('s, 'b) tmonad" +\ \ + This is a variant of @{term Trace_Monad.bind}, that is used to build up the fragment required + for proving @{text prefix_refinement_bind_general}.\ +definition mbind :: + "('s, 'a) tmonad \ ('s \ 'a \ ('s, 'b) tmonad) \ 's \ ('s, 'b) tmonad" where - "mbind f g s0 \ \s. \(xs, r) \ (f s). case r of Failed \ {(xs, Failed)} - | Incomplete \ {(xs, Incomplete)} - | Result (rv, s) \ fst_upd (\ys. ys @ xs) ` g (last_st_tr xs s0) rv s" + "mbind f g s0 \ \s. \(xs, r) \ (f s). + case r of + Failed \ {(xs, Failed)} + | Incomplete \ {(xs, Incomplete)} + | Result (rv, s) \ fst_upd (\ys. ys @ xs) ` g (last_st_tr xs s0) rv s" lemma self_closed_mbind: - "is_matching_fragment sr osr rvr ctr cres s0 R s f - \ (\tr x s'. (tr, Result (x, s')) \ f s - \ self_closed (\xs. length xs < length ctr' \ fst (rev ctr' ! length xs) = Me) s' - (g (last_st_tr tr s0) x) \ [] \ fst ` g (last_st_tr tr s0) x s') - \ Q = matching_self_cond (ctr' @ ctr) - \ cres = Incomplete \ ctr' = [] - \ self_closed Q s (mbind f g s0)" + "\is_matching_fragment sr osr rvr ctr cres s0 R s f; + \tr x s'. (tr, Result (x, s')) \ f s + \ self_closed (\xs. length xs < length ctr' \ fst (rev ctr' ! length xs) = Me) s' + (g (last_st_tr tr s0) x) + \ [] \ fst ` g (last_st_tr tr s0) x s'; + Q = matching_self_cond (ctr' @ ctr); cres = Incomplete \ ctr' = []\ + \ self_closed Q s (mbind f g s0)" apply (frule is_matching_fragment_self_closed) apply (subst self_closed_def, clarsimp simp: mbind_def) apply (rename_tac tr res) @@ -708,14 +984,12 @@ lemma matching_tr_pfx_rhs_is_extend: fixes ys ys' defines "N == length ys' - length ys" shows - "matching_tr_pfx sr xs ys - \ length xs \ length ys \ drop N ys' = ys - \ matching_tr_pfx sr xs ys'" + "\matching_tr_pfx sr xs ys; length xs \ length ys \ drop N ys' = ys\ + \ matching_tr_pfx sr xs ys'" apply (clarsimp simp: matching_tr_pfx_def) apply (rule context_conjI) apply simp - apply (simp add: matching_tr_def list_all2_conv_all_nth - min_def) + apply (simp add: matching_tr_def list_all2_conv_all_nth min_def) apply (clarsimp simp: rev_nth) done @@ -723,24 +997,21 @@ lemma matching_tr_pfx_rhs_is_drop: fixes ys ys' defines "N == length ys - length ys'" shows - "matching_tr_pfx sr xs ys - \ drop N ys = ys' - \ length xs \ length ys' - \ matching_tr_pfx sr xs ys'" + "\matching_tr_pfx sr xs ys; drop N ys = ys'; length xs \ length ys'\ + \ matching_tr_pfx sr xs ys'" apply (clarsimp simp: matching_tr_pfx_def) - apply (simp add: matching_tr_def list_all2_conv_all_nth - min_def) + apply (simp add: matching_tr_def list_all2_conv_all_nth min_def) apply (clarsimp simp: rev_nth) done lemma env_closed_mbind: - "is_matching_fragment sr osr rvr ctr' cres s0 R s f - \ \tr x s'. (tr, Result (x, s')) \ f s - \ env_closed (matching_env_cond sr ctr'' (last_st_tr tr s0) R) s' (g (last_st_tr tr s0) x) - \ [] \ fst ` g (last_st_tr tr s0) x s' - \ (if cres \ {Incomplete, Failed} then ctr = ctr' else ctr = ctr'' @ ctr') - \ Q = matching_env_cond sr ctr s0 R - \ env_closed Q s (mbind f g s0)" + "\is_matching_fragment sr osr rvr ctr' cres s0 R s f; + \tr x s'. (tr, Result (x, s')) \ f s + \ env_closed (matching_env_cond sr ctr'' (last_st_tr tr s0) R) s' (g (last_st_tr tr s0) x) + \ [] \ fst ` g (last_st_tr tr s0) x s'; + if cres \ {Incomplete, Failed} then ctr = ctr' else ctr = ctr'' @ ctr'; + Q = matching_env_cond sr ctr s0 R\ + \ env_closed Q s (mbind f g s0)" apply (simp add: if_bool_eq_conj) apply (subst env_closed_def, clarsimp simp: mbind_def) apply (strengthen in_fst_snd_image, simp) @@ -774,13 +1045,12 @@ lemma env_closed_mbind: apply (fastforce elim: image_eqI[rotated]) done -lemma mbind_prefix_closed: - "prefix_closed f - \ \tr x s' s. (tr, Result (x, s')) \ f s \ prefix_closed (g (last_st_tr tr s0) x) +lemma prefix_closed_mbind: + "\prefix_closed f; \tr x s' s. (tr, Result (x, s')) \ f s \ prefix_closed (g (last_st_tr tr s0) x)\ \ prefix_closed (mbind f g s0)" apply (subst prefix_closed_def, clarsimp simp: mbind_def) apply (split tmres.split_asm; clarsimp; - (drule(1) prefix_closedD, fastforce elim: rev_bexI)?) + (drule(1) prefix_closedD, fastforce elim: rev_bexI)?) apply (simp add: Cons_eq_append_conv, safe) apply (drule(1) prefix_closedD) apply (fastforce elim: rev_bexI) @@ -791,26 +1061,25 @@ lemma mbind_prefix_closed: done lemma is_matching_fragment_mbind: - "is_matching_fragment sr intsr rvr ctr cres s0 R s f_a - \ \tr x s'. (tr, Result (x, s')) \ f_a s - \ is_matching_fragment sr osr rvr' ctr' cres' (last_st_tr tr s0) R s' (f_b (last_st_tr tr s0) x) - \ \s0' x. prefix_closed (f_b s0' x) - \ ctr'' = ctr' @ ctr - \ cres'' = (case cres of Failed \ Failed | Incomplete \ Incomplete | _ \ cres') - \ (cres = Incomplete \ cres = Failed) \ ctr' = [] - \ is_matching_fragment sr osr rvr' ctr'' cres'' s0 R s - (mbind f_a f_b s0)" + "\is_matching_fragment sr intsr rvr ctr cres s0 R s f_a; + \tr x s'. (tr, Result (x, s')) \ f_a s + \ is_matching_fragment sr osr rvr' ctr' cres' (last_st_tr tr s0) R s' (f_b (last_st_tr tr s0) x); + \s0' x. prefix_closed (f_b s0' x); + ctr'' = ctr' @ ctr; + cres'' = (case cres of Failed \ Failed | Incomplete \ Incomplete | _ \ cres'); + (cres = Incomplete \ cres = Failed) \ ctr' = []\ + \ is_matching_fragment sr osr rvr' ctr'' cres'' s0 R s (mbind f_a f_b s0)" apply (subst is_matching_fragment_def, clarsimp) apply (strengthen env_closed_mbind[where ctr''=ctr', mk_strg I E] - mbind_prefix_closed + prefix_closed_mbind self_closed_mbind[where ctr'="ctr'", mk_strg I E]) apply (simp add: conj_comms if_bool_eq_conj mres_def split del: if_split) apply (intro conjI allI impI; clarsimp?; - (blast intro: is_matching_fragment_prefix_closed - is_matching_fragment_env_closed - is_matching_fragment_Nil - is_matching_fragment_self_closed - dest: post_by_hoare)?) + (blast intro: is_matching_fragment_prefix_closed + is_matching_fragment_env_closed + is_matching_fragment_Nil + is_matching_fragment_self_closed + dest: post_by_hoare)?) apply (clarsimp simp: mbind_def) apply (frule_tac s=s in is_matching_fragment_defD) apply (drule ex_in_conv[THEN iffD2], clarsimp) @@ -828,16 +1097,18 @@ lemma is_matching_fragment_mbind: done lemma is_matching_fragment_mbind_union: - "is_matching_fragment sr intsr rvr ctr cres s0 R s f_a - \ ctr'' = ctr' @ ctr - \ cres'' = (case cres of Failed \ Failed | Incomplete \ Incomplete | _ \ cres') - \ cres = Incomplete \ cres = Failed \ ctr' = [] - \ \tr x s'. (tr, Result (x, s')) \ f_a s + "\is_matching_fragment sr intsr rvr ctr cres s0 R s f_a; + ctr'' = ctr' @ ctr; + cres'' = (case cres of Failed \ Failed | Incomplete \ Incomplete | _ \ cres'); + cres = Incomplete \ cres = Failed \ ctr' = []; + \tr x s'. (tr, Result (x, s')) \ f_a s \ (\f. is_matching_fragment sr osr rvr' ctr' cres' (last_st_tr tr s0) R s' f - \ triv_refinement (aprog x) f) - \ is_matching_fragment sr osr rvr' ctr'' cres'' s0 R s - (mbind f_a (\s0' rv s. \f \ {f. is_matching_fragment sr osr rvr' ctr' cres' s0' R s f - \ triv_refinement (aprog rv) f}. f s) s0)" + \ triv_refinement (aprog x) f)\ + \ is_matching_fragment sr osr rvr' ctr'' cres'' s0 R s + (mbind f_a + (\s0' rv s. \f \ {f. is_matching_fragment sr osr rvr' ctr' cres' s0' R s f + \ triv_refinement (aprog rv) f}. f s) + s0)" apply (rule is_matching_fragment_mbind; assumption?) apply clarsimp apply (rule is_matching_fragment_UNION) @@ -850,9 +1121,8 @@ lemma is_matching_fragment_mbind_union: done lemma is_matching_fragment_mresD: - "is_matching_fragment sr osr rvr ctr cres s0 R s f - \ (x, s') \ mres (f s) - \ \y s''. cres = Result (y, s'') \ osr s' s'' \ rvr x y" + "\is_matching_fragment sr osr rvr ctr cres s0 R s f; (x, s') \ mres (f s)\ + \ \y s''. cres = Result (y, s'') \ osr s' s'' \ rvr x y" apply (clarsimp simp: mres_def) apply (frule(1) is_matching_fragment_trD) apply (clarsimp simp: matching_tr_pfx_def) @@ -860,40 +1130,26 @@ lemma is_matching_fragment_mresD: done lemma matching_tr_pfx_sr_hd_append: - "matching_tr_pfx sr tr tr' - \ sr s0 t0 - \ length tr \ length tr' - \ sr (hd (map snd tr @ [s0])) (hd (map snd tr' @ [t0]))" + "\matching_tr_pfx sr tr tr'; sr s0 t0; length tr \ length tr'\ + \ sr (hd (map snd tr @ [s0])) (hd (map snd tr' @ [t0]))" apply (clarsimp simp: matching_tr_pfx_def matching_tr_def) apply (erule list.rel_cases; clarsimp) done lemma matching_tr_pfx_last_st_tr: - "matching_tr_pfx sr tr tr' - \ sr s0 t0 - \ length tr \ length tr' - \ sr (last_st_tr tr s0) (last_st_tr tr' t0)" + "\matching_tr_pfx sr tr tr'; sr s0 t0; length tr \ length tr'\ + \ sr (last_st_tr tr s0) (last_st_tr tr' t0)" apply (clarsimp simp: matching_tr_pfx_def matching_tr_def) apply (erule list.rel_cases; clarsimp) done -lemma validI_relyT_mresD: - "\P'\,\\\\ f \G\,\P''\ - \ (rv, s') \ mres (f s) - \ P' s0 s - \ \s0'. P'' rv s0' s'" - apply (clarsimp simp: mres_def) - apply (drule(2) validI_rvD) - apply (simp add: rely_cond_def) - apply blast - done - -theorem prefix_refinement_bind_general[rule_format]: - "prefix_refinement sr isr intsr rvr P Q AR R a c - \ (\x y. rvr x y \ prefix_refinement sr intsr osr rvr' (P'' x) (Q'' y) AR R (b x) (d y)) - \ \P'\,\AR\ a \\\\,\P''\ \ \\s. \s0. P' s0 s\ a \\rv s. \s0. P'' rv s0 s\ - \ \Q'\,\R\ c \\\\,\Q''\ - \ prefix_refinement sr isr osr rvr' (P And P') (Q And Q') AR R (a >>= b) (c >>= d)" +\ \FIXME: do we want to follow the corres naming pattern and use split instead of bind?\ +theorem prefix_refinement_bind_general: + "\prefix_refinement sr isr intsr rvr' AR R P Q a c; + \rv rv'. rvr' rv rv' \ prefix_refinement sr intsr osr rvr AR R (P'' rv) (Q'' rv') (b rv) (d rv'); + \P'\,\AR\ a \\\\,\P''\ \ \\s. \s0. P' s0 s\ a \\rv s. \s0. P'' rv s0 s\; + \Q'\,\R\ c \\\\,\Q''\\ + \ prefix_refinement sr isr osr rvr AR R (P and P') (Q and Q') (a >>= (\rv. b rv)) (c >>= (\rv'. d rv'))" apply (subst prefix_refinement_def, clarsimp simp: bind_def) apply (rename_tac c_tr c_res cd_tr cd_res) apply (drule(5) prefix_refinementD, simp) @@ -904,7 +1160,8 @@ theorem prefix_refinement_bind_general[rule_format]: apply (case_tac "c_res = Incomplete \ c_res = Failed") apply (intro exI conjI) apply (rule_tac ctr'=Nil and cres'=Failed and f_b="\_ _ _. {}" - in is_matching_fragment_mbind, assumption, simp_all add: prefix_closed_def)[1] + in is_matching_fragment_mbind, + assumption, simp_all add: prefix_closed_def)[1] apply clarsimp apply (frule is_matching_fragment_mresD, erule in_mres) apply clarsimp @@ -917,20 +1174,20 @@ theorem prefix_refinement_bind_general[rule_format]: apply (frule(2) validI_rvD, simp add: rely_cond_append) apply (intro exI conjI) apply (rule is_matching_fragment_mbind_union[where aprog="b"], - assumption, simp_all)[1] + assumption, simp_all)[1] apply clarsimp apply (frule is_matching_fragment_mresD, erule in_mres) apply (clarsimp simp: mres_def) apply (frule(1) is_matching_fragment_trD) apply clarsimp apply (rule prefix_refinementD[where x="(a, b)" for a b, simplified, rule_format], - blast, simp_all)[1] + blast, simp_all)[1] prefer 2 apply (erule(1) matching_tr_pfx_last_st_tr) apply simp apply (erule disjE) apply (drule(1) validI_rvD[OF validI_triv_refinement], - erule is_matching_fragment_prefix_closed, assumption+) + erule is_matching_fragment_prefix_closed, assumption+) apply (drule(2) use_valid[OF in_mres valid_triv_refinement], blast, simp) apply (clarsimp simp: rely_cond_append hd_append hd_map cong: if_cong) apply (clarsimp simp: triv_refinement_def mbind_def) @@ -940,163 +1197,599 @@ theorem prefix_refinement_bind_general[rule_format]: apply blast done -section \Using prefix refinement.\ -text \ -Using prefix refinement to map the validI Hoare quadruple -(precond/rely/guarantee/postcond). Proofs of quadruples for -abstract programs imply related quadruples for concrete -programs. -\ - -lemma list_all2_all_trace_steps: - assumes P: "\x\trace_steps (rev tr) s0. P x" - and lR': "list_all2 (\(aid, as) (cid, cs). aid = cid \ R' as cs) tr' tr" - and R': "R' s0' s0" - and Q: "\idn as1 as2 cs1 cs2. R' as1 cs1 \ R' as2 cs2 - \ P (idn, cs1, cs2) \ Q (idn, as1, as2)" - shows "\x\trace_steps (rev tr') s0'. Q x" -proof - - note lR'' = lR'[simplified trans[OF list_all2_rev[symmetric] list_all2_conv_all_nth], - simplified split_def, THEN conjunct2, rule_format] - note len[simp] = lR'[THEN list_all2_lengthD] - show ?thesis - using P R' - apply (clarsimp simp: trace_steps_nth) - apply (drule_tac x=x in bspec, simp) - apply (frule lR''[simplified]) - apply (cut_tac i="x - 1" in lR'', simp) - apply (auto simp: Q) - done -qed +section \Derivative splitting rules\ -theorem prefix_refinement_validI: - "prefix_refinement sr isr osr rvr prP' prP R' R f g - \ \P'\,\R'\ - f \\s0 s. \cs0 cs. sr s0 cs0 \ sr s cs \ G cs0 cs\,\\rv - s0 s. \rv' cs0 cs. sr s0 cs0 \ osr s cs \ rvr rv rv' \ Q rv' cs0 cs\ - \ \t0 t. P t0 t \ (\s0 s. P' s0 s \ prP' s0 s \ prP t0 t \ isr s t \ sr s0 t0) - \ \s0 t0 t. sr s0 t0 \ R t0 t \ (\s. R' s0 s \ sr s t) - \ prefix_closed g - \ \P\,\R\ g \G\,\Q\" - apply (subst validI_def, clarsimp simp: bipred_conj_def rely_def) - apply (drule spec2, drule(1) mp, clarsimp) - apply (drule(6) prefix_refinement_rely_cond_trD[where R'=R', simplified]) - apply blast - apply clarsimp - apply (rule conjI) - apply (frule(3) validI_GD) - apply (simp add: guar_cond_def matching_tr_def) - apply (erule_tac R'="\cs s. sr s cs" in list_all2_all_trace_steps) - apply (clarsimp simp: list_all2_conv_all_nth split_def) - apply simp +lemma prefix_refinement_bind_v: + "\prefix_refinement sr isr intsr rvr' AR R P Q a c; + \rv rv'. rvr' rv rv' \ prefix_refinement sr intsr osr rvr AR R (\_. P'' rv) (Q'' rv') (b rv) (d rv'); + \P'\ a \P''\; \Q'\,\R\ c \\\\,\Q''\\ + \ prefix_refinement sr isr osr rvr AR R (\s0. P s0 and P') (Q and Q') (a >>= (\rv. b rv)) (c >>= (\rv'. d rv'))" + apply (rule prefix_refinement_weaken_pre, + rule prefix_refinement_bind_general[where P'="\_. P'"]) + apply assumption + apply (elim meta_allE, erule(1) meta_mp) + apply (rule disjI2) + apply simp + apply assumption apply clarsimp apply clarsimp - apply (erule tmres.rel_cases; clarsimp) - apply (drule(1) validI_rvD, simp add: rely_def) - apply simp - apply (case_tac tr; clarsimp simp: list_all2_Cons2 matching_tr_def) done -lemmas prefix_refinement_validI' = prefix_refinement_validI[OF _ validI_strengthen_guar, OF _ validI_strengthen_post] +lemmas prefix_refinement_bind = prefix_refinement_bind_general[OF _ _ disjI1] -section \Building blocks.\ -text \ -Prefix refinement rules for various basic constructs. -\ +lemmas prefix_refinement_bind_sr = prefix_refinement_bind[where sr=sr and intsr=sr for sr] +lemmas prefix_refinement_bind_isr = prefix_refinement_bind[where isr=isr and intsr=isr for isr] +lemmas pfx_refn_bind = + prefix_refinement_bind_v[where sr=sr and isr=sr and osr=sr and intsr=sr for sr] +lemmas pfx_refn_bindT = + pfx_refn_bind[where P'="\" and Q'="\_ _. True", OF _ _ hoare_TrueI twp_post_taut, + simplified pred_conj_def, simplified] -lemma prefix_refinement_weaken_pre: - "prefix_refinement sr isr osr rvr P' Q' AR R f g - \ \s s0. P s0 s \ P' s0 s - \ (\s t s0 t0. isr s t \ sr s0 t0 \ P s0 s \ Q t0 t \ Q' t0 t) - \ prefix_refinement sr isr osr rvr P Q AR R f g" - by (fastforce simp: prefix_refinement_def) +\ \FIXME: these are copied from Corres_UL.thy, move somewhere that they can be shared\ +primrec rel_sum_comb :: + "('a \ 'b \ bool) \ ('c \ 'd \ bool) \ ('a + 'c \ 'b + 'd \ bool)" (infixl "\" 95) + where + "(f \ g) (Inr x) y = (\y'. y = Inr y' \ (g x y'))" + | "(f \ g) (Inl x) y = (\y'. y = Inl y' \ (f x y'))" + +lemma rel_sum_comb_r2[simp]: + "(f \ g) x (Inr y) = (\x'. x = Inr x' \ g x' y)" + apply (case_tac x, simp_all) + done + +lemma rel_sum_comb_l2[simp]: + "(f \ g) x (Inl y) = (\x'. x = Inl x' \ f x' y)" + apply (case_tac x, simp_all) + done + +lemma prefix_refinement_bindE_general: + "\prefix_refinement sr isr intsr (f \ rvr') AR R P Q a c; + \rv rv'. rvr' rv rv' \ prefix_refinement sr intsr osr (f \ rvr) AR R (P'' rv) (Q'' rv') (b rv) (d rv'); + \P'\,\AR\ a \\\\,\P''\,\E\ \ \\s. \s0. P' s0 s\ a \\rv s. \s0. P'' rv s0 s\,\\rv s. \s0. E rv s0 s\; + \Q'\,\R\ c \\\\,\Q''\,\E'\; + \rv s0 s rv' t0 t. \E rv s0 s; E' rv' t0 t; intsr s t\ \ osr s t\ + \ prefix_refinement sr isr osr (f \ rvr) AR R (P and P') (Q and Q') (a >>=E (\rv. b rv)) (c >>=E (\rv'. d rv'))" + apply (unfold bindE_def validIE_def validE_def) + apply (erule prefix_refinement_bind_general) + defer + apply (erule disj_forward; assumption?) + apply (fastforce simp: valid_def split_def split: sum.splits) + apply assumption + apply (case_tac rv; clarsimp simp: lift_def) + apply (rule prefix_refinement_throwError_imp) + apply clarsimp + done -lemma prefix_refinement_name_pre: - "(\s s0 t t0. isr s t \ sr s0 t0 \ P s0 s \ Q t0 t - \ prefix_refinement sr isr osr rvr (\s0' s'. s0' = s0 \ s' = s) (\t0' t'. t0' = t0 \ t' = t) AR R f g) - \ prefix_refinement sr isr osr rvr P Q AR R f g" - by (fastforce simp: prefix_refinement_def) - -lemma prefix_refinement_bind_v[rule_format]: - "prefix_refinement sr isr intsr rvr P Q AR R a c - \ (\x y. rvr x y \ prefix_refinement sr intsr osr rvr' (\s0. P'' x) (Q'' y) AR R (b x) (d y)) - \ \P'\ a \P''\ \ \Q'\,\R\ c \\\\,\Q''\ - \ prefix_refinement sr isr osr rvr' (\s0. P s0 and P') (Q And Q') AR R (a >>= b) (c >>= d)" +lemma prefix_refinement_bindE_v: + "\prefix_refinement sr isr intsr (f \ rvr') AR R P Q a c; + \rv rv'. rvr' rv rv' \ prefix_refinement sr intsr osr (f \ rvr) AR R (\_. P'' rv) (Q'' rv') (b rv) (d rv'); + \P'\ a \P''\,\E\; \Q'\,\R\ c \\\\,\Q''\,\E'\; + \rv s rv' t0 t. \E rv s; E' rv' t0 t; intsr s t\ \ osr s t\ + \ prefix_refinement sr isr osr (f \ rvr) AR R (\s0. P s0 and P') (Q and Q') (a >>=E (\rv. b rv)) (c >>=E (\rv'. d rv'))" apply (rule prefix_refinement_weaken_pre, - rule prefix_refinement_bind_general[where P'="\_. P'"]) - apply assumption - apply (elim allE, erule(1) mp) - apply (rule disjI2) - apply simp - apply assumption + rule prefix_refinement_bindE_general[where P'="\_. P'"and E="\rv _. E rv"]) + apply assumption + apply (elim meta_allE, erule(1) meta_mp) + apply (rule disjI2) + apply simp + apply assumption + apply clarsimp apply clarsimp apply clarsimp done -lemmas prefix_refinement_bind - = prefix_refinement_bind_general[OF _ _ disjI1] +lemmas prefix_refinement_bindE = prefix_refinement_bindE_general[OF _ _ disjI1] + +lemmas prefix_refinement_bindE_sr = prefix_refinement_bindE[where sr=sr and intsr=sr for sr] +lemmas prefix_refinement_bindE_isr = prefix_refinement_bindE[where isr=isr and intsr=isr for isr] +lemmas pfx_refn_bindE = + prefix_refinement_bindE_v[where sr=sr and isr=sr and osr=sr and intsr=sr for sr, where E="\\" and E'="\\\", + atomized, simplified, rule_format] (*this sequence of attributes removes a trivial assumption*) +lemmas pfx_refn_bindET = + pfx_refn_bindE[where P'="\" and Q'="\_ _. True", OF _ _ hoareE_TrueI twp_post_tautE, + simplified pred_conj_def, simplified] + +lemma prefix_refinement_handle: + "\prefix_refinement sr isr osr (rvr'' \ rvr) AR R P Q a c; + \ft ft'. rvr'' ft ft' \ prefix_refinement sr osr osr (rvr' \ rvr) AR R (E ft) (E' ft') (b ft) (d ft'); + \P'\,\AR\ a -,-,\E\; \Q'\,\R\ c -, -,\E'\\ + \ prefix_refinement sr isr osr (rvr' \ rvr) AR R (P and P') (Q and Q') (a (\ft. b ft)) (c (\ft'. d ft'))" + apply (simp add: handleE_def handleE'_def validIE_def) + apply (erule prefix_refinement_bind) + defer + apply assumption+ + apply (case_tac v; clarsimp) + apply (rule prefix_refinement_return_imp) + apply clarsimp + done -lemma default_prefix_refinement_ex: - "is_matching_fragment sr osr rvr ctr cres s0 R s - (\s. aprog s \ ({tr'. length tr' \ length ctr} \ UNIV)) - \ \f. is_matching_fragment sr osr rvr ctr cres s0 R s f - \ triv_refinement aprog f" - apply (intro exI conjI, assumption) - apply (simp add: triv_refinement_def) +lemma prefix_refinement_catch: + "\prefix_refinement sr isr osr (rvr' \ rvr) AR R P Q a c; + \ft ft'. rvr' ft ft' \ prefix_refinement sr osr osr rvr AR R (E ft) (E' ft') (b ft) (d ft'); + \P'\,\AR\ a -,-,\E\; \Q'\,\R\ c -, -,\E'\\ + \ prefix_refinement sr isr osr rvr AR R (P and P') (Q and Q') (a (\ft. b ft)) (c (\ft'. d ft'))" + apply (simp add: catch_def validIE_def) + apply (erule prefix_refinement_bind) + defer + apply assumption+ + apply (case_tac x; clarsimp) + apply (rule prefix_refinement_return_imp) + apply clarsimp done -lemma default_prefix_refinement_ex_match_iosr_R: - "is_matching_fragment sr osr rvr ctr cres s0 R s - (rely (\s. aprog s \ ({tr'. matching_tr_pfx iosr tr' ctr} \ UNIV)) R s0) - \ \f. is_matching_fragment sr osr rvr ctr cres s0 R s f - \ triv_refinement aprog f" - apply (intro exI conjI, assumption) - apply (clarsimp simp add: triv_refinement_def rely_def) +lemma prefix_refinement_handle_elseE: + "\prefix_refinement sr isr osr (fr' \ rvr') AR R P Q a c; + \ft ft'. fr' ft ft' \ prefix_refinement sr osr osr (fr \ rvr) AR R (E ft) (E' ft') (b ft) (d ft'); + \rv rv'. rvr' rv rv' \ prefix_refinement sr osr osr (fr \ rvr) AR R (P'' rv) (Q'' rv') (f rv) (g rv'); + \P'\,\AR\ a -,\P''\,\E\; \Q'\,\R\ c -, \Q''\,\E'\\ + \ prefix_refinement sr isr osr (fr \ rvr) AR R (P and P') (Q and Q') + (a (\ft. b ft) (\rv. f rv)) (c (\ft'. d ft') (\rv. g rv))" + apply (simp add: handle_elseE_def validIE_def) + apply (erule prefix_refinement_bind) + defer + apply assumption+ + apply (case_tac v; clarsimp) done -lemma is_matching_fragment_no_trace: - "is_matching_fragment sr osr rvr [] cres s0 R s (\s. {([], ares s)}) - = rel_tmres osr rvr (ares s) cres" - by (simp add: is_matching_fragment_def prefix_closed_def - self_closed_def env_closed_def - matching_tr_pfx_def matching_tr_def) +lemmas prefix_refinement_bind_eqr = prefix_refinement_bind[where rvr'="(=)", simplified] +lemmas prefix_refinement_bind_eqrE = prefix_refinement_bindE[where rvr'="(=)", simplified] -lemma prefix_refinement_return_imp: - "(\s s0 t0 t. P s0 s \ Q t0 t \ isr s t \ rvr rv rv' \ osr s t) - \ prefix_refinement sr isr osr rvr P Q AR R (return rv) (return rv')" - apply (clarsimp simp: prefix_refinement_def) - apply (rule default_prefix_refinement_ex) - apply (auto simp add: return_def is_matching_fragment_no_trace) +\ \FIXME: these are copied from Corres_UL.thy, move somewhere that they can be shared\ +definition + "dc \ \rv rv'. True" + +lemma dc_simp[simp]: "dc a b" + by (simp add: dc_def) + +lemma dc_o_simp1[simp]: "dc \ f = dc" + by (simp add: dc_def o_def) + +lemma dc_o_simp2[simp]: "dc x \ f = dc x" + by (simp add: dc_def o_def) + +lemma unit_dc_is_eq: + "(dc::unit\_\_) = (=)" + by (fastforce simp: dc_def) + +lemma prefix_refinement_bind_nor: + "\prefix_refinement sr isr intsr dc AR R P Q a c; + prefix_refinement sr intsr osr rvr AR R P'' Q'' b d; + \P'\, \AR\ a -, \\_. P''\; \Q'\, \R\ c -, \\_. Q''\\ + \ prefix_refinement sr isr osr rvr AR R (P and P') (Q and Q') (a >>= (\_. b)) (c >>= (\_. d))" + by (rule prefix_refinement_bind; assumption) + +lemma prefix_refinement_bind_norE: + "\prefix_refinement sr isr intsr (f \ dc) AR R P Q a c; + prefix_refinement sr intsr osr (f \ rvr) AR R P'' Q'' b d; + \P'\, \AR\ a -, \\_. P''\, \E\; \Q'\, \R\ c -, \\_. Q''\, \E'\; + \rv s0 s rv' t0 t. \E rv s0 s; E' rv' t0 t; intsr s t\ \ osr s t\ + \ prefix_refinement sr isr osr (f \ rvr) AR R (P and P') (Q and Q') (a >>=E (\_. b)) (c >>=E (\_. d))" + by (rule prefix_refinement_bindE; assumption) + +lemmas prefix_refinement_bind_mapr = prefix_refinement_bind[where rvr'="(=) \ g" for g, simplified] +lemmas prefix_refinement_bind_maprE = prefix_refinement_bindE[where rvr'="(=) \ g" for g, simplified] + + +section \Rules for walking prefix refinement into basic constructs\ + +lemma prefix_refinement_if: + "\G = G'; prefix_refinement sr isr osr rvr AR R P Q a c; + prefix_refinement sr isr osr rvr AR R P' Q' b d \ + \ prefix_refinement sr isr osr rvr AR R + (if G then P else P') (if G' then Q else Q') + (if G then a else b) (if G' then c else d)" + by simp + +\ \FIXME: copied from Word_Lib.Many_More, where would be a good spot to put it?\ +lemma if_apply_def2: + "(if P then F else G) = (\x y. (P \ F x y) \ (\ P \ G x y))" + by simp + +\ \FIXME: this could have slightly better bound variable names if written out, should we just do + that and avoid the previous FIXME?\ +lemmas prefix_refinement_if2 = prefix_refinement_if[unfolded if_apply_def2] + +lemma prefix_refinement_when: + "\G = G'; prefix_refinement sr isr isr rvr AR R P Q a c; rvr () ()\ + \ prefix_refinement sr isr isr rvr AR R (\x y. G \ P x y) (\x y. G' \ Q x y) + (when G a) (when G' c)" + unfolding when_def + apply clarsimp + apply (rule prefix_refinement_return_imp) + apply simp + done + +lemma prefix_refinement_whenE: + "\G = G'; prefix_refinement sr isr isr (f \ rvr) AR R P Q a c; rvr () ()\ + \ prefix_refinement sr isr isr (f \ rvr) AR R (\x y. G \ P x y) (\x y. G' \ Q x y) + (whenE G a) (whenE G' c)" + unfolding whenE_def returnOk_def + apply clarsimp + apply (rule prefix_refinement_return_imp) + apply simp done -abbreviation(input) - "dc2 \ (\_ _. True)" +lemma prefix_refinement_unless: + "\G = G'; prefix_refinement sr isr isr rvr AR R P Q a c; rvr () ()\ + \ prefix_refinement sr isr isr rvr AR R (\x y. \ G \ P x y) (\x y. \ G' \ Q x y) + (unless G a) (unless G' c)" + by (simp add: unless_def prefix_refinement_when) + +lemma prefix_refinement_unlessE: + "\G = G'; prefix_refinement sr isr isr (f \ rvr) AR R P Q a c; rvr () ()\ + \ prefix_refinement sr isr isr (f \ rvr) AR R (\x y. \ G \ P x y) (\x y. \ G' \ Q x y) + (unlessE G a) (unlessE G' c)" + by (simp add: unlessE_whenE prefix_refinement_whenE) + +lemma prefix_refinement_if_r: + "\G' \ prefix_refinement sr isr osr rvr AR R P Q a c; + \G' \ prefix_refinement sr isr osr rvr AR R P Q' a d \ + \ prefix_refinement sr isr osr rvr AR R + P (if G' then Q else Q') + a (if G' then c else d)" + by simp + +lemma prefix_refinement_if3: + "\G = G'; G' \ prefix_refinement sr isr osr rvr AR R P Q a c; + \G' \ prefix_refinement sr isr osr rvr AR R P' Q' b d \ + \ prefix_refinement sr isr osr rvr AR R + (if G then P else P') (if G' then Q else Q') + (if G then a else b) (if G' then c else d)" + by simp + +lemma prefix_refinement_if_strong: + "\\s0 s t0 t. \sr s0 t0; isr s t; P'' s0 s; Q'' t0 t\ \ G = G'; + \G; G'\ \ prefix_refinement sr isr osr rvr AR R P Q a c; + \\G; \G'\ \ prefix_refinement sr isr osr rvr AR R P' Q' b d \ + \ prefix_refinement sr isr osr rvr AR R + (P'' and (if G then P else P')) (Q'' and (if G' then Q else Q')) + (if G then a else b) (if G' then c else d)" + by (fastforce simp: prefix_refinement_def) + + +\ \FIXME: Put more thought into whether we want this section, and if not what alternative rules + would we want. The comment copied from Corres_UL suggests we might not want them. + They would be a fair bit more complicated to prove for prefix_refinement.\ +section \Some equivalences about liftM and other useful simps\ + +(* These rules are declared [simp], which in hindsight was not a good decision, because they + change the return relation which often is schematic when these rules apply in the goal. + In those circumstances it is usually safer to unfold liftM_def and proceed with the resulting + substituted term. + + (We leave the [simp] attribute here, because too many proofs now depend on it) +*) +(* +lemma prefix_refinement_liftM_simp[simp]: + "prefix_refinement sr isr osr rvr AR R P P' (liftM t f) g = + prefix_refinement sr isr osr (rvr \ t) AR R P P' f g" + by (auto simp: prefix_refinement_def triv_refinement_def in_liftM) + +lemma prefix_refinement_liftM2_simp[simp]: + "prefix_refinement sr isr osr rvr AR R P P' f (liftM t g) = + prefix_refinement sr isr osr (\x. rvr x \ t) AR R P P' f g" + by (fastforce simp: prefix_refinement_def in_liftM) + +lemma prefix_refinement_liftE_rel_sum[simp]: + "prefix_refinement sr isr osr (f \ rvr) AR R P P' (liftE m) (liftE m') = + prefix_refinement sr isr osr rvr AR R P P' m m'" + by (simp add: liftE_liftM o_def) + +lemmas corres_liftE_lift = corres_liftE_rel_sum[THEN iffD2]*) + + +section \Support for proving correspondence to noop with hoare triples\ + +lemma prefix_refinement_noop: + assumes P: "\s0 s. P s0 s \ \\t. isr s t \ Q t\ f \\rv t. osr s t \ rvr x rv\" + assumes nt: "no_trace f" + assumes nf: "\s0 s. P s0 s \ no_fail (\t. isr s t \ Q t) f" + shows "prefix_refinement sr isr osr rvr AR R P (\_. Q) (return x) f" + apply (subst prefix_refinement_no_trace) + apply (rule nt) + apply clarsimp + apply (frule P) + apply (insert nf) + apply (insert nt) + apply (clarsimp simp: valid_def no_fail_def no_trace_def return_def mres_def failed_def image_def) + apply (case_tac b; fastforce) + done + +lemma prefix_refinement_noopE: + assumes P: "\s0 s. P s0 s \ \\t. isr s t \ Q t\ f \\rv t. osr s t \ rvr x rv\,\\\\" + assumes nt: "no_trace f" + assumes nf: "\s0 s. P s0 s \ no_fail (\t. isr s t \ Q t) f" + shows "prefix_refinement sr isr osr (frvr \ rvr) AR R P (\_. Q) (returnOk x) f" +proof - + have Q: "\P f Q E. \P\f\Q\,\E\ \ \P\ f \\r s. case_sum (\e. E e s) (\r. Q r s) r\" + by (simp add: validE_def) + thus ?thesis + apply (simp add: returnOk_def) + apply (rule prefix_refinement_noop) + apply (rule hoare_strengthen_post) + apply (rule Q) + apply (rule P) + apply assumption + apply (simp split: sum.splits) + apply (rule nt) + apply (erule nf) + done +qed + +lemma prefix_refinement_noop2: + assumes f: "\s. P s \ \(=) s\ f \\\_. (=) s\" + assumes g: "\t. Q t \ \(=) t\ g \\_. (=) t\" + assumes nt: "no_trace f" "no_trace g" + assumes nf: "no_fail P f" "no_fail Q g" + shows "prefix_refinement sr iosr iosr dc AR R (\_. P) (\_. Q) f g" + apply (subst prefix_refinement_no_trace) + apply (rule nt) + apply clarsimp + apply (insert nt) + apply (insert nf) + apply (clarsimp simp: no_trace_def no_fail_def failed_def image_def) + apply (subgoal_tac "\(r, s')\mres (f s). rel_tmres iosr dc (Result (r, s')) b") + apply (case_tac b; fastforce simp: mres_def intro: rev_bexI) + apply (rule use_exs_valid) + apply (rule exs_hoare_post_imp[rotated]) + apply (erule f) + apply (case_tac b; clarsimp) + apply fastforce + apply fastforce + apply (subgoal_tac "ba = t") + apply simp + apply (rule sym) + apply (rule use_valid[OF _ g], erule in_mres) + apply simp+ + done + + +section \Support for dividing correspondence along logical boundaries\ + +lemma prefix_refinement_disj_division: + "\S \ T; S \ prefix_refinement sr isr osr rvr AR R P Q x y; + T \ prefix_refinement sr isr osr rvr AR R P' Q' x y\ + \ prefix_refinement sr isr osr rvr AR R + (\s0 s. (S \ P s0 s) \ (T \ P' s0 s)) (\s0 s. (S \ Q s0 s) \ (T \ Q' s0 s)) x y" + by (safe; pfx_refn_pre, simp+) + +lemma prefix_refinement_weaker_disj_division: + "\S \ T; S \ prefix_refinement sr isr osr rvr AR R P Q x y; + T \ prefix_refinement sr isr osr rvr AR R P' Q' x y\ + \ prefix_refinement sr isr osr rvr AR R (P and P') (Q and Q') x y" + by (pfx_refn_pre, rule prefix_refinement_disj_division, simp+) + +lemma prefix_refinement_symmetric_bool_cases: + "\S = T; \S; T\ \ prefix_refinement sr isr osr rvr AR R P Q f g; + \\S; \T\ \ prefix_refinement sr isr osr rvr AR R P' Q' f g\ + \ prefix_refinement sr isr osr rvr AR R (\s0 s. (S \ P s0 s) \ (\S \ P' s0 s)) + (\s0 s. (T \ Q s0 s) \ (\T \ Q' s0 s)) + f g" + by (cases S, simp_all) + +text \Support for symbolically executing into the guards and manipulating them\ + +lemma prefix_refinement_symb_exec_l: + assumes z: "\rv. prefix_refinement sr isr osr rvr AR R (\_. P' rv) Q (x rv) y" + assumes x: "\s. P s \ \(=) s\ m \\\r. (=) s\" + assumes y: "\P\ m \\rv s. P' rv s\" + assumes nf: "no_fail P m" + assumes nt: "no_trace m" + shows "prefix_refinement sr isr osr rvr AR R (\_. P) Q (m >>= (\rv. x rv)) y" + apply pfx_refn_pre + apply (subst gets_bind_ign[symmetric], rule prefix_refinement_bind[OF _ z]) + apply (rule prefix_refinement_noop2) + apply (erule x) + apply (rule gets_wp) + apply (rule nt) + apply (rule no_trace_gets) + apply (rule nf) + apply (rule no_fail_gets) + apply (rule valid_validI_wp[OF nt y]) + apply wp + apply simp+ + done + +lemma prefix_refinement_symb_exec_r: + assumes z: "\rv. prefix_refinement sr isr osr rvr AR R P (\_. Q' rv) x (y rv)" + assumes y: "\Q\ m \Q'\" + assumes x: "\s. Q s \ \(=) s\ m \\r. (=) s\" + assumes nf: "no_fail Q m" + assumes nt: "no_trace m" + shows "prefix_refinement sr isr osr rvr AR R P (\_. Q) x (m >>= (\rv. y rv))" + apply pfx_refn_pre + apply (subst gets_bind_ign[symmetric], rule prefix_refinement_bind[OF _ z]) + apply (rule prefix_refinement_noop2) + apply (clarsimp simp: simpler_gets_def exs_valid_def mres_def vimage_def) + apply (erule x) + apply (rule no_trace_gets) + apply (rule nt) + apply (rule no_fail_gets) + apply (rule nf) + apply wp + apply (rule valid_validI_wp[OF nt y]) + apply simp+ + done + +lemma prefix_refinement_symb_exec_r_conj: + assumes z: "\rv. prefix_refinement sr isr osr rvr AR R P (\_. Q' rv) x (y rv)" + assumes y: "\Q\ m \Q'\" + assumes x: "\s. \\s'. isr s s' \ S s'\ m \\rv s'. isr s s'\" + assumes nf: "\s. no_fail (\t. isr s t \ S t) m" + assumes nt: "no_trace m" + shows "prefix_refinement sr isr osr rvr AR R P (\_. S and Q) x (m >>= (\rv. y rv))" +proof - + have P: "prefix_refinement sr isr isr dc AR R \\ (\_. S) (return undefined) m" + apply (rule prefix_refinement_noop) + apply (simp add: x nt nf)+ + done + show ?thesis + apply pfx_refn_pre + apply (subst return_bind[symmetric], rule prefix_refinement_bind[OF P]) + apply (rule z) + apply wp + apply (rule valid_validI_wp[OF nt y]) + apply simp+ + done +qed + +\ \FIXME: improve automation of this proof\ +lemma prefix_refinement_bind_return_r: + "prefix_refinement sr isr osr (\x y. rvr x (h y)) AR R P Q f g \ + prefix_refinement sr isr osr rvr AR R P Q f (do x \ g; return (h x) od)" + apply (clarsimp simp: prefix_refinement_def bind_def return_def) + apply ((drule spec)+, (drule (1) mp)+) + apply (drule (1) bspec, clarsimp) + apply (subgoal_tac "aa=a") + prefer 2 + apply (fastforce split: tmres.splits) + apply clarsimp + apply (rule_tac x=fa in exI) + apply (clarsimp simp: is_matching_fragment_def split: tmres.splits) + apply (case_tac b; fastforce) + apply (case_tac b; fastforce) + apply (case_tac bc; fastforce) + done + +lemma prefix_refinement_symb_exec_l': + "\prefix_refinement sr isr isr dc AR R P P' f (return ()); + \rv. prefix_refinement sr isr osr rvr AR R (Q rv) P' (g rv) h; + \s0. \P s0\ f \\rv. Q rv s0\; no_trace f\ + \ prefix_refinement sr isr osr rvr AR R P P' (f >>= g) h" + apply (drule prefix_refinement_bind) + apply assumption + apply (erule valid_validI_wp) + apply assumption + apply wp + apply clarsimp + done + + +section \Building blocks\ + +lemma prefix_refinement_triv_pre: + "prefix_refinement sr isr osr rvr AR R \\ \\ f g + \ prefix_refinement sr isr osr rvr AR R \\ \\ f g" + by assumption + +\ \FIXME: Need to work out the best fragment to provide for prefix_refinement_trivial. + Defining and using prefix_close like this is one option but might be more work than needed.\ +\ \definition prefix_close_set :: + "((tmid \ 's) list \ ('s, 'a) tmres) set \ ((tmid \ 's) list \ ('s, 'a) tmres) set" + where + "prefix_close_set f = {(xs, Incomplete). (\xs' \ fst ` f. \n\length xs'. drop n xs' = xs)} \ f" + +definition prefix_close :: "('a, 'b) tmonad \ ('a, 'b) tmonad" where + "prefix_close f \ \s. prefix_close_set (f s)" + +lemma drop_Suc_eq: + "drop n xs = y # ys \ drop (Suc n) xs = ys" + by (auto simp: drop_Suc drop_tl) + +lemma prefix_close_set_closed: + "x # xs \ fst ` prefix_close_set (f s) \ (xs, Incomplete) \ prefix_close_set (f s)" + unfolding prefix_close_set_def + apply safe + apply (rule_tac x=aa in bexI) + apply (rule_tac x="Suc n" in exI) + apply (fastforce intro!: Suc_leI le_neq_trans elim!: drop_Suc_eq[OF sym]) + apply (auto simp: image_def intro!: bexI)[1] + apply (rule_tac x=a in bexI) + apply (rule_tac x="Suc 0" in exI) + apply fastforce + apply (auto simp: image_def intro!: bexI)[1] + done + +lemma prefix_close_closed: + "prefix_closed (prefix_close f)" + unfolding prefix_closed_def prefix_close_def + by (auto simp: prefix_close_set_closed) + +lemma triv_refinement_prefix_close: + "\prefix_closed f; triv_refinement f g\ \ triv_refinement f (prefix_close g)" + apply (clarsimp simp: triv_refinement_def) + oops + +lemma prefix_refinement_trivial: + "prefix_closed f \ pfx_refn (=) (=) R R \\ \\ f f" + apply (clarsimp simp: prefix_refinement_def) + apply (rule_tac x="prefix_close (\s'. if s'=s then {(a,b)} else {})" in exI) + apply (clarsimp simp: triv_refinement_def) +apply (auto simp: is_matching_fragment_def prefix_closed_def self_closed_def env_closed_def + matching_tr_pfx_def matching_tr_def)[1] +oops\ abbreviation - "pfx_refnT sr rvr \ pfx_refn sr rvr (\_ _. True) dc2" + "pfx_refnT sr rvr AR R \ pfx_refn sr rvr AR R \\ \\" -lemma pfx_refn_return: - "rvr rv rv' - \ pfx_refnT sr rvr AR R (return rv) (return rv')" +lemma prefix_refinement_returnTT: + "rvr rv rv' \ prefix_refinement sr iosr iosr rvr AR R \\ \\ (return rv) (return rv')" by (rule prefix_refinement_return_imp, simp) lemma prefix_refinement_get: - "prefix_refinement sr iosr iosr iosr dc2 dc2 AR R get get" - apply (clarsimp simp: prefix_refinement_def get_def) + "prefix_refinement sr iosr iosr iosr AR R \\ \\ get get" + by (rule prefix_refinement_get_imp, simp) + +lemma prefix_refinement_put_imp: + "\\s0 s t0 t. \sr s0 t0; isr s t; P s0 s; Q t0 t\ \ osr s' t'\ + \ prefix_refinement sr isr osr dc AR R P Q (put s') (put t')" + apply (clarsimp simp: prefix_refinement_def) apply (rule default_prefix_refinement_ex) - apply (simp add: is_matching_fragment_no_trace) + apply (clarsimp simp: put_def is_matching_fragment_no_trace) done lemma prefix_refinement_put: - "osr s t \ prefix_refinement sr isr osr dc2 dc2 dc2 AR R (put s) (put t)" - apply (clarsimp simp: prefix_refinement_def put_def) + "osr s t \ prefix_refinement sr isr osr dc AR R \\ \\ (put s) (put t)" + by (rule prefix_refinement_put_imp, simp) + +lemma prefix_refinement_modify: + "\\s0 s t0 t. \sr s0 t0; isr s t; P s0 s; Q t0 t\ \ osr (f s) (g t)\ + \ prefix_refinement sr isr osr dc AR R P Q (modify f) (modify g)" + apply (simp add: modify_def) + apply (rule prefix_refinement_weaken_pre) + apply (rule prefix_refinement_bind[where intsr=isr, OF prefix_refinement_get]) + apply (rule_tac P="\s0 _. P s0 s" and Q="\s0 _. Q s0 sa" in prefix_refinement_put_imp) + apply wpsimp+ + done + +lemmas prefix_refinement_modifyT = + prefix_refinement_modify[where P="\\" and Q="\\", simplified] + +lemmas pfx_refn_modifyT = + prefix_refinement_modifyT[where sr=sr and isr=sr and osr=sr for sr] + +lemmas prefix_refinement_get_pre = + prefix_refinement_bind[OF prefix_refinement_get _ valid_validI_wp[OF _ get_sp] + valid_validI_wp[OF _ get_sp], + simplified pred_conj_def, simplified] + +lemma prefix_refinement_gets: + "\\s t. \iosr s t; P s; Q t\ \ rvr (f s) (f' t)\ + \ prefix_refinement sr iosr iosr rvr AR R (\_. P) (\_. Q) (gets f) (gets f')" + apply (simp add: gets_def) + apply (rule prefix_refinement_get_pre) + apply (rule prefix_refinement_return_imp) + apply simp + done + +lemma prefix_refinement_fail: + "prefix_refinement sr isr osr rvr AR R \\ \\ fail fail" + apply (clarsimp simp: prefix_refinement_def fail_def) apply (rule default_prefix_refinement_ex) apply (simp add: is_matching_fragment_no_trace) done +lemma prefix_refinement_returnOkTT: + "rvr rv rv' \ prefix_refinement sr iosr iosr (rvr' \ rvr) AR R \\ \\ (returnOk rv) (returnOk rv')" + by (rule prefix_refinement_returnOk_imp, simp) + +lemma prefix_refinement_throwErrorTT: + "rvr e e' \ prefix_refinement sr iosr iosr (rvr \ rvr') AR R \\ \\ (throwError e) (throwError e')" + by (rule prefix_refinement_throwError_imp, simp) + lemma prefix_refinement_select: - "(\x \ T. \y \ S. rvr y x) - \ prefix_refinement sr iosr iosr rvr dc2 dc2 AR R (select S) (select T)" + "\\x \ T. \y \ S. rvr y x\ + \ prefix_refinement sr iosr iosr rvr AR R \\ \\ (select S) (select T)" apply (clarsimp simp: prefix_refinement_def select_def) apply (drule(1) bspec, clarsimp) apply (rule_tac x="return y" in exI) @@ -1105,36 +1798,267 @@ lemma prefix_refinement_select: apply (auto simp add: triv_refinement_def return_def image_def) done +lemma prefix_refinement_assert: + "P = P' \ prefix_refinement sr iosr iosr dc AR R \\ \\ (assert P) (assert P')" + by (simp add: assert_def prefix_refinement_fail prefix_refinement_return_imp) + +lemma prefix_refinement_assert_opt: + "\rel_option rvr v v'\ + \ prefix_refinement sr iosr iosr rvr AR R \\ \\ (assert_opt v) (assert_opt v')" + by (auto simp: assert_opt_def prefix_refinement_fail prefix_refinement_return_imp + split: option.splits) + +lemma prefix_refinement_assertE: + "P = P' \ prefix_refinement sr iosr iosr dc AR R \\ \\ (assertE P) (assertE P')" + by (simp add: assertE_def prefix_refinement_fail prefix_refinement_returnOk_imp) + +lemma prefix_refinement_gets_the: + "\\s t. \iosr s t; P s; Q t\ \ rel_option rvr (f s) (g t)\ + \ prefix_refinement sr iosr iosr rvr AR R (\_. P) (\_. Q) (gets_the f) (gets_the g)" + apply (simp add: gets_the_def) + apply (rule prefix_refinement_weaken_pre) + apply (rule prefix_refinement_bind[where rvr'="rel_option rvr"]) + apply (rule prefix_refinement_gets[where P=P and Q=Q]) + apply simp + apply (erule prefix_refinement_assert_opt) + apply wpsimp+ + done + +lemma prefix_refinement_gets_map: + "\\s t. \iosr s t; P s; Q t\ \ rel_option rvr (f s p) (g t q)\ + \ prefix_refinement sr iosr iosr rvr AR R (\_. P) (\_. Q) (gets_map f p) (gets_map g q)" + apply (subst gets_the_oapply_comp[symmetric])+ + apply (rule prefix_refinement_gets_the) + apply simp + done + +lemma prefix_refinement_throw_opt: + "\\s t. \iosr s t; P s; Q t\ \ rvr ex ex' \ rel_option rvr' x x'\ + \ prefix_refinement sr iosr iosr (rvr \ rvr') AR R (\_. P) (\_. Q) (throw_opt ex x) (throw_opt ex' x')" + apply (simp add: throw_opt_def) + apply (cases x; cases x') + apply (clarsimp simp: prefix_refinement_throwError_imp) + apply (fastforce simp: prefix_refinement_def) + apply (fastforce simp: prefix_refinement_def) + apply (clarsimp simp: prefix_refinement_returnOk_imp) + done + +lemma prefix_refinement_alternate1: + "prefix_refinement sr iosr iosr rvr AR R P Q a c \ prefix_refinement sr iosr iosr rvr AR R P Q (a \ b) c" + apply (subst prefix_refinement_def, clarsimp) + apply (drule(6) pfx_refnD2, clarsimp) + apply (fastforce intro: triv_refinement_trans[rotated] triv_refinement_alternative1) + done + +lemma prefix_refinement_alternate2: + "prefix_refinement sr iosr iosr rvr AR R P Q b c \ prefix_refinement sr iosr iosr rvr AR R P Q (a \ b) c" + apply (subst prefix_refinement_def, clarsimp) + apply (drule(6) pfx_refnD2, clarsimp) + apply (fastforce intro: triv_refinement_trans[rotated] triv_refinement_alternative2) + done + +lemma prefix_refinement_either_alternate1: + "\prefix_refinement sr iosr iosr rvr AR R P Q a c; prefix_refinement sr iosr iosr rvr AR R P' Q b c\ + \ prefix_refinement sr iosr iosr rvr AR R (P or P') Q (a \ b) c" + apply (subst prefix_refinement_def, clarsimp simp del: imp_disjL) + apply (erule disjE) + apply (drule(6) pfx_refnD2, clarsimp) + apply (fastforce intro: triv_refinement_trans[rotated] triv_refinement_alternative1) + apply (drule(6) pfx_refnD2, clarsimp) + apply (fastforce intro: triv_refinement_trans[rotated] triv_refinement_alternative2) + done + +lemma prefix_refinement_either_alternate2: + "\prefix_refinement sr iosr iosr rvr AR R P Q a c; prefix_refinement sr iosr iosr rvr AR R P Q' b c\ + \ prefix_refinement sr iosr iosr rvr AR R P (Q or Q') (a \ b) c" + apply (subst prefix_refinement_def, clarsimp simp del: imp_disjL) + apply (erule disjE) + apply (drule(6) pfx_refnD2, clarsimp) + apply (fastforce intro: triv_refinement_trans[rotated] triv_refinement_alternative1) + apply (drule(6) pfx_refnD2, clarsimp) + apply (fastforce intro: triv_refinement_trans[rotated] triv_refinement_alternative2) + done + +lemma is_matching_fragment_restrict: + "is_matching_fragment sr osr rvr ctr cres s0 R s f + \ is_matching_fragment sr osr rvr ctr cres s0 R s (\s'. if s'=s then f s else {})" + by (simp add: is_matching_fragment_def prefix_closed_def self_closed_def env_closed_def + matching_tr_pfx_def matching_tr_def) + +lemma triv_refinement_restrict: + "triv_refinement f (\s'. if s'=s then f s else {})" + by (clarsimp simp: triv_refinement_def) + +\ \FIXME: corres rules for condition don't exist, so maybe we don't bother with this. It feels + like it shouldn't be this hard to prove, but providing correct fragments is frustrating. + It might make it easier to instantiate if the exists was wrapped in a new definition.\ +lemma prefix_refinement_condition_strong: + "\\s0 s t0 t. \sr s0 t0; isr s t; P'' s0 s; Q'' t0 t\ \ C s = C' t; + prefix_refinement sr isr osr rvr AR R P Q a c; + prefix_refinement sr isr osr rvr AR R P' Q' b d \ + \ prefix_refinement sr isr osr rvr AR R + (P'' and (\s0 s. if C s then P s0 s else P' s0 s)) + (Q'' and (\s0 s. if C' s then Q s0 s else Q' s0 s)) + (condition C a b) (condition C' c d)" + apply (clarsimp simp: condition_def) + apply (auto simp: prefix_refinement_def simp del: not_ex ) + apply (erule notE) + apply (drule spec | drule (1) mp | drule (1) bspec)+ + apply clarsimp + apply (rule_tac x="\s'. if s'=s then f s else {}" in exI) + apply (clarsimp simp: triv_refinement_def is_matching_fragment_restrict) + apply (drule spec | drule (1) mp | drule (1) bspec)+ + apply clarsimp + apply (rule_tac x="\s'. if s'=s then f s else {}" in exI) + apply (clarsimp simp: triv_refinement_def is_matching_fragment_restrict) + apply (drule spec | drule (1) mp | drule (1) bspec)+ + apply clarsimp + apply (rule_tac x="\s'. if s'=s then f s else {}" in exI) + apply (clarsimp simp: triv_refinement_def is_matching_fragment_restrict) + apply (drule spec | drule (1) mp | drule (1) bspec)+ + apply clarsimp + apply (rule_tac x="\s'. if s'=s then f s else {}" in exI) + apply (clarsimp simp: triv_refinement_def is_matching_fragment_restrict) + done + +lemma prefix_refinement_mapM[rule_format]: + "\list_all2 xyr xs ys; + \x y. x \ set xs \ y \ set ys \ xyr x y + \ prefix_refinement sr intsr intsr rvr AR R P Q (f x) (g y); + \x. x \ set xs \ \P\,\AR\ f x \\_ _. True\,\\_. P\; + \y. y \ set ys \ \Q\,\R\ g y \\_ _. True\,\\_. Q\\ + \ prefix_refinement sr intsr intsr (list_all2 rvr) AR R P Q (mapM f xs) (mapM g ys)" + apply (induct xs ys rule: list_all2_induct) + apply (simp add: mapM_def sequence_def prefix_refinement_return_imp) + apply (clarsimp simp: mapM_Cons all_conj_distrib imp_conjR) + apply (rule prefix_refinement_weaken_pre) + apply (rule prefix_refinement_bind, assumption) + apply (rule prefix_refinement_bind, assumption) + apply (rule prefix_refinement_triv_pre, rule prefix_refinement_return_imp, simp) + apply wp + apply fastforce + apply (simp | wp | blast dest: validI_prefix_closed)+ + done + +lemma prefix_refinement_mapME[rule_format]: + "\list_all2 xyr xs ys; + \x y. x \ set xs \ y \ set ys \ xyr x y + \ prefix_refinement sr intsr intsr (F \ rvr) AR R P Q (f x) (g y); + \x. x \ set xs \ \P\,\AR\ f x \\_ _. True\,\\_. P\; + \y. y \ set ys \ \Q\,\R\ g y \\_ _. True\,\\_. Q\\ + \ prefix_refinement sr intsr intsr (F \ list_all2 rvr) AR R P Q (mapME f xs) (mapME g ys)" + apply (induct xs ys rule: list_all2_induct) + apply (simp add: mapME_def sequenceE_def prefix_refinement_returnOk_imp) + apply (clarsimp simp add: mapME_Cons all_conj_distrib imp_conjR) + apply (rule prefix_refinement_weaken_pre) + apply (unfold bindE_def validE_def) + apply (rule prefix_refinement_bind, assumption) + apply (case_tac rv) + apply (clarsimp simp: prefix_refinement_throwError_imp) + apply clarsimp + apply (rule prefix_refinement_bind, assumption) + apply (rule prefix_refinement_triv_pre) + apply (case_tac rv) + apply (clarsimp simp: prefix_refinement_throwError_imp) + apply (clarsimp simp: prefix_refinement_returnOk_imp) + apply (simp | wp | blast dest: validI_prefix_closed)+ + done + + +section \Some prefix_refinement rules for monadic combinators\ + +\ \FIXME: naming of these lemmas\ +lemma ifM_prefix_refinement: + assumes test: "prefix_refinement sr isr isr (=) AR R A A' test test'" + and l: "prefix_refinement sr isr osr rvr AR R P P' a a'" + and r: "prefix_refinement sr isr osr rvr AR R Q Q' b b'" + and abs_valid: "\B\,\AR\ test -,\\c s0 s. c \ P s0 s\" + "\C\,\AR\ test -,\\c s0 s. \ c \ Q s0 s\" + and conc_valid: "\B'\,\R\ test' -,\\c s0 s. c \ P' s0 s\" + "\C'\,\R\ test' -,\\c s0 s. \ c \ Q' s0 s\" + shows "prefix_refinement sr isr osr rvr AR R (A and B and C) (A' and B' and C') + (ifM test a b) (ifM test' a' b')" + unfolding ifM_def + apply pfx_refn_pre + apply (rule prefix_refinement_bind[OF test]) + apply (erule prefix_refinement_if[OF _ l r]) + apply (wpsimp wp: abs_valid conc_valid rg_vcg_if_lift2)+ + done + +lemmas ifM_prefix_refinement' = + ifM_prefix_refinement[where A=A and B=A and C=A for A, + where A'=A' and B'=A' and C'=A' for A', simplified] + +lemma orM_prefix_refinement: + "\prefix_refinement sr isr isr (=) AR R A A' a a'; prefix_refinement sr isr isr (=) AR R C C' b b'; + \B\,\AR\ a -,\\c s0 s. \ c \ C s0 s\; \B'\,\R\ a' -,\\c s0 s. \ c \ C' s0 s\\ + \ prefix_refinement sr isr isr (=) AR R (A and B) (A' and B') (orM a b) (orM a' b')" + unfolding orM_def + apply pfx_refn_pre + apply (rule ifM_prefix_refinement[where P="\\" and P'="\\"]) + apply (wpsimp | fastforce simp: prefix_refinement_return_imp)+ + done + +lemmas orM_prefix_refinement' = + orM_prefix_refinement[where A=A and B=A for A, simplified, where A'=A' and B'=A' for A', simplified] + +lemma andM_prefix_refinement: + "\prefix_refinement sr isr isr (=) AR R A A' a a'; prefix_refinement sr isr isr (=) AR R C C' b b'; + \B\,\AR\ a -,\\c s0 s. c \ C s0 s\; \B'\,\R\ a' -,\\c s0 s. c \ C' s0 s\\ + \ prefix_refinement sr isr isr (=) AR R (A and B) (A' and B') (andM a b) (andM a' b')" + unfolding andM_def + apply pfx_refn_pre + apply (rule ifM_prefix_refinement[where Q="\\" and Q'="\\"]) + apply (wpsimp | fastforce simp: prefix_refinement_return_imp)+ + done + +lemma notM_prefix_refinement: + "\prefix_refinement sr isr isr (=) AR R G G' a a'; prefix_closed a; prefix_closed a'\ + \ prefix_refinement sr isr isr (=) AR R G G' (notM a) (notM a')" + unfolding notM_def + apply pfx_refn_pre + apply (erule prefix_refinement_bind) + apply (rule prefix_refinement_returnTT) + apply wpsimp+ + done + +lemma whenM_prefix_refinement: + "\prefix_refinement sr isr isr (=) AR R A A' a a'; prefix_refinement sr isr isr dc AR R C C' b b'; + \B\,\AR\ a -,\\c s0 s. c \ C s0 s\; \B'\,\R\ a' -,\\c s0 s. c \ C' s0 s\\ + \ prefix_refinement sr isr isr dc AR R (A and B) (A' and B') (whenM a b) (whenM a' b')" + unfolding whenM_def + apply pfx_refn_pre + apply (rule ifM_prefix_refinement[where Q="\\" and Q'="\\"]) + apply (wpsimp | fastforce simp: prefix_refinement_return_imp)+ + done + + +section \prefix_refinement rules for env_step, commit_step, interference and Await\ +\ \FIXME: better name for section\ + lemma Int_insert_left2: "(insert a B \ C) = ((if a \ C then {a} else {}) \ (B \ C))" by auto -definition - rely_stable :: "('t \ 't \ bool) \ ('s \ 't \ bool) \ ('t \ bool) \ bool" -where +definition rely_stable :: "('t \ 't \ bool) \ ('s \ 't \ bool) \ ('t \ bool) \ bool" where "rely_stable R sr Q = (\s t t'. Q t \ sr s t \ R t t' \ Q t' \ (\s'. sr s' t'))" lemmas rely_stableD = rely_stable_def[THEN iffD1, simplified imp_conjL, rule_format] -definition - env_rely_stable_iosr :: "'s rg_pred \ 't rg_pred - \ ('s \ 't \ bool) \ ('s \ 't \ bool) \ ('t \ bool) \ bool" -where - "env_rely_stable_iosr AR R sr iosr Q - = (\s0 t0 s t. Q t0 \ iosr s0 t0 \ R t0 t \ AR s0 s \ sr s t \ iosr s t)" +definition env_rely_stable_iosr :: + "'s rg_pred \ 't rg_pred \ ('s \ 't \ bool) \ ('s \ 't \ bool) \ ('t \ bool) \ bool" + where + "env_rely_stable_iosr AR R sr iosr Q = + (\s0 t0 s t. Q t0 \ iosr s0 t0 \ R t0 t \ AR s0 s \ sr s t \ iosr s t)" lemmas env_rely_stable_iosrD = env_rely_stable_iosr_def[THEN iffD1, rule_format] -definition - env_stable :: "'s rg_pred \ 't rg_pred - \ ('s \ 't \ bool) \ ('s \ 't \ bool) \ ('t \ bool) \ bool" -where - "env_stable AR R sr iosr Q = (rely_stable R sr Q - \ env_rely_stable_iosr AR R sr iosr Q \ iosr \ sr)" +definition env_stable :: + "'s rg_pred \ 't rg_pred \ ('s \ 't \ bool) \ ('s \ 't \ bool) \ ('t \ bool) \ bool" + where + "env_stable AR R sr iosr Q = (rely_stable R sr Q \ env_rely_stable_iosr AR R sr iosr Q \ iosr \ sr)" -definition - abs_rely_stable :: "('s \ 's \ bool) \ ('s \ bool) \ bool" -where +definition abs_rely_stable :: "('s \ 's \ bool) \ ('s \ bool) \ bool" where "abs_rely_stable R P = (\s s'. P s \ R s s' \ P s')" lemmas abs_rely_stableD = abs_rely_stable_def[THEN iffD1, simplified imp_conjL, rule_format] @@ -1144,10 +2068,7 @@ lemma abs_rely_stableT: by (simp add: abs_rely_stable_def) lemma rely_stable_rtranclp: - "rely_stable R sr Q - \ sr s t \ Q t - \ rtranclp R t t' - \ Q t'" + "\rely_stable R sr Q; sr s t; Q t; rtranclp R t t'\ \ Q t'" apply (rotate_tac 3, induct arbitrary: s rule: converse_rtranclp_induct) apply simp apply (clarsimp simp: rely_stable_def) @@ -1155,9 +2076,7 @@ lemma rely_stable_rtranclp: done lemma abs_rely_stable_rtranclp: - "abs_rely_stable R P - \ P s \ rtranclp R s s' - \ P s'" + "\abs_rely_stable R P; P s; rtranclp R s s'\ \ P s'" apply (rotate_tac 2, induct rule: converse_rtranclp_induct) apply simp apply (clarsimp simp: abs_rely_stable_def) @@ -1165,8 +2084,8 @@ lemma abs_rely_stable_rtranclp: lemma prefix_refinement_env_step: assumes env_stable: "env_stable AR R sr iosr Q" - shows "prefix_refinement sr iosr iosr dc2 (\s0 s. s0 = s) (\t0 t. t0 = t \ Q t0) - AR R env_step env_step" + shows "prefix_refinement sr iosr iosr dc AR R (\s0 s. s0 = s) (\t0 t. t0 = t \ Q t0) + env_step env_step" proof - have P: "\S. {xs. length xs = Suc 0} = (\x. [x]) ` UNIV" apply (safe, simp_all) @@ -1188,9 +2107,9 @@ proof - apply (simp only: UN_extend_simps Int_insert_left2) apply (simp add: is_matching_fragment_def UN_If_distrib) apply (intro conjI allI impI; - simp add: prefix_closed_def in_fst_snd_image_eq self_closed_def - matching_tr_pfx_def matching_tr_def - env_closed_def) + simp add: prefix_closed_def in_fst_snd_image_eq self_closed_def + matching_tr_pfx_def matching_tr_def + env_closed_def) apply (metis env_rely_stable_iosrD[OF est]) apply clarsimp apply (auto dest: rely_stableD[OF st] predicate2D[OF sr])[1] @@ -1198,23 +2117,22 @@ proof - qed lemma prefix_refinement_repeat_n: - "prefix_refinement sr iosr iosr (\_ _. True) P Q AR R f g - \ \P\,\AR\ f \\\\,\\_. P\ - \ \\t0 t. Q t0 t \ (\s0 s. sr s0 t0 \ iosr s t)\,\R\ g \\\\,\\_. Q\ - \ prefix_refinement sr iosr iosr (\_ _. True) P Q AR R (repeat_n n f) (repeat_n n g)" + "\prefix_refinement sr iosr iosr dc AR R P Q f g; \P\,\AR\ f \\\\,\\_. P\; + \\t0 t. Q t0 t \ (\s0 s. sr s0 t0 \ iosr s t)\,\R\ g \\\\,\\_. Q\\ + \ prefix_refinement sr iosr iosr dc AR R P Q (repeat_n n f) (repeat_n n g)" apply (induct n) apply (simp add: prefix_refinement_return_imp) - apply (rule prefix_refinement_weaken_pre) + apply pfx_refn_pre apply simp - apply (rule prefix_refinement_bind, assumption+) + apply (rule prefix_refinement_bind, assumption+) apply simp apply auto done lemma prefix_refinement_env_n_steps: assumes env_stable: "env_stable AR R sr iosr Q" - shows "prefix_refinement sr iosr iosr (\_ _. True) - (=) (\t0 t. t0 = t \ Q t0) AR R (env_n_steps n) (env_n_steps n)" + shows "prefix_refinement sr iosr iosr dc AR R + (=) (\t0 t. t0 = t \ Q t0) (env_n_steps n) (env_n_steps n)" apply (rule prefix_refinement_repeat_n) apply (rule prefix_refinement_env_step[OF env_stable]) apply (simp add: env_step_def) @@ -1231,18 +2149,17 @@ lemma prefix_refinement_env_n_steps: done lemma prefix_refinement_repeat: - "prefix_refinement sr iosr iosr (\_ _. True) P Q AR R f g - \ \P\,\AR\ f \\\\,\\_. P\ - \ \\t0 t. Q t0 t \ (\s0 s. sr s0 t0 \ iosr s t)\,\R\ g \\\\,\\_. Q\ - \ prefix_refinement sr iosr iosr (\_ _. True) P Q AR R (repeat f) (repeat g)" + "\prefix_refinement sr iosr iosr dc AR R P Q f g; \P\,\AR\ f \\\\,\\_. P\; + \\t0 t. Q t0 t \ (\s0 s. sr s0 t0 \ iosr s t)\,\R\ g \\\\,\\_. Q\\ + \ prefix_refinement sr iosr iosr dc AR R P Q (repeat f) (repeat g)" apply (simp add: repeat_def) apply (rule prefix_refinement_weaken_pre) apply (rule prefix_refinement_bind, rule prefix_refinement_select[where rvr="(=)"]) apply simp apply simp apply (rule prefix_refinement_repeat_n, assumption+) - apply (rule validI_weaken_pre, assumption, simp) - apply (wp select_wp) + apply (rule rg_weaken_pre, assumption, simp) + apply wp apply wp apply clarsimp apply clarsimp @@ -1250,8 +2167,7 @@ lemma prefix_refinement_repeat: lemma prefix_refinement_env_steps: "env_stable AR R sr iosr Q - \ prefix_refinement sr iosr iosr (\_ _. True) - (=) (\t0 t. t0 = t \ Q t0) AR R env_steps env_steps" + \ prefix_refinement sr iosr iosr dc AR R (=) (\t0 t. t0 = t \ Q t0) env_steps env_steps" apply (simp add: env_steps_repeat) apply (rule prefix_refinement_repeat) apply (erule prefix_refinement_env_step) @@ -1268,7 +2184,7 @@ lemma prefix_refinement_env_steps: lemma prefix_refinement_commit_step: "\s t. isr s t \ sr s t \ osr s t - \ prefix_refinement sr isr osr (\_ _. True) (\\) (\\) AR R commit_step commit_step" + \ prefix_refinement sr isr osr dc AR R (\\) (\\) commit_step commit_step" apply (clarsimp simp: prefix_refinement_def) apply (rule default_prefix_refinement_ex) apply (simp add: commit_step_def bind_def get_def return_def put_trace_elem_def) @@ -1279,24 +2195,9 @@ lemma prefix_refinement_commit_step: apply (simp add: matching_tr_pfx_def matching_tr_def rely_cond_def) done -lemma prefix_refinement_weaken_srs: - "prefix_refinement sr isr osr r P Q AR R f g - \ isr' \ isr \ osr \ osr' \ sr \ sr - \ prefix_refinement sr isr' osr' r P Q AR R f g" - apply (subst prefix_refinement_def, clarsimp) - apply (drule(1) predicate2D) - apply (drule(5) prefix_refinementD) - apply clarsimp - apply (rule exI, rule conjI[rotated], assumption) - apply (clarsimp simp: is_matching_fragment_def) - apply (drule(1) bspec, clarsimp) - apply (erule tmres.rel_cases; clarsimp) - apply (erule(1) predicate2D) - done - lemma prefix_refinement_interference: "env_stable AR R sr iosr Q - \ prefix_refinement sr iosr iosr (\_ _. True) \\ (\t0 t. Q t) AR R interference interference" + \ prefix_refinement sr iosr iosr dc AR R \\ (\t0 t. Q t) interference interference" apply (simp add: interference_def) apply (rule prefix_refinement_weaken_pre) apply (rule prefix_refinement_bind[where intsr=iosr]) @@ -1311,67 +2212,31 @@ lemma prefix_refinement_interference: apply (clarsimp simp: guar_cond_def) done -lemma mapM_x_Cons: - "mapM_x f (x # xs) = do f x; mapM_x f xs od" - by (simp add: mapM_x_def sequence_x_def) - -lemmas prefix_refinement_bind_sr = prefix_refinement_bind[where sr=sr - and intsr=sr for sr] -lemmas prefix_refinement_bind_isr = prefix_refinement_bind[where isr=isr - and intsr=isr for isr] -lemmas pfx_refn_bind = prefix_refinement_bind_v[where sr=sr - and isr=sr and osr=sr and intsr=sr for sr] -lemmas pfx_refn_bindT - = pfx_refn_bind[where P'="\" and Q'="\_ _. True", OF _ _ hoare_post_taut validI_triv, - simplified bipred_conj_def, simplified] - -lemma prefix_refinement_assume_pre: - "(P \ prefix_refinement sr isr osr rvr P' Q' AR R f g) - \ prefix_refinement sr isr osr rvr (P' And (\_ _. P)) Q' AR R f g" - "(P \ prefix_refinement sr isr osr rvr P' Q' AR R f g) - \ prefix_refinement sr isr osr rvr P' (Q' And (\_ _. P)) AR R f g" - by (auto simp: prefix_refinement_def) - -lemma prefix_refinement_modify: - "\s t. isr s t \ P s \ Q t \ osr (f s) (g t) - \ prefix_refinement sr isr osr (\_ _. True) (\_. P) (\_. Q) AR R (modify f) (modify g)" - apply (simp add: modify_def) - apply (rule prefix_refinement_weaken_pre) - apply (rule prefix_refinement_bind[where intsr=isr, OF prefix_refinement_get]) - apply (rule_tac P="P x" in prefix_refinement_assume_pre(1)) - apply (rule_tac P="Q y" in prefix_refinement_assume_pre(2)) - apply (rule prefix_refinement_put, simp) - apply wp+ +lemma prefix_refinement_Await: + "\env_stable AR R sr iosr Q; abs_rely_stable AR P; + \s t. P s \ Q t \ iosr s t \ G' t \ G s; + (\s. G' s) \ (\s. G s)\ + \ prefix_refinement sr iosr iosr (\_ _. True) AR R (\s0 s. s0 = s \ P s) (\t0 t. t0 = t \ Q t) + (Await G) (Await G')" + apply (simp add: Await_redef) + apply pfx_refn_pre + apply (rule prefix_refinement_bind[where intsr=iosr] + prefix_refinement_select[where rvr="\s s'. G s = G' s'"] + prefix_refinement_env_steps + | simp add: if_split[where P="\S. x \ S" for x] split del: if_split + | (rule prefix_refinement_get, rename_tac s s', + rule_tac P="P s" in prefix_refinement_gen_asm, + rule_tac P="Q s'" in prefix_refinement_gen_asm2) + | (rule prefix_refinement_select[where rvr="\\"]) + | wp)+ apply clarsimp - apply clarsimp - done - -lemmas pfx_refn_modifyT = prefix_refinement_modify[where P="\" and Q="\"] - -lemmas prefix_refinement_get_pre - = prefix_refinement_bind[OF prefix_refinement_get _ - valid_validI_wp[OF _ get_sp] valid_validI_wp[OF _ get_sp], - simplified bipred_conj_def no_trace_all, simplified] - -lemma prefix_refinement_gets: - "\s t. iosr s t \ P s \ Q t \ rvr (f s) (f' t) - \ prefix_refinement sr iosr iosr rvr (\_. P) (\_. Q) AR R (gets f) (gets f')" - apply (simp add: gets_def) - apply (rule prefix_refinement_get_pre) - apply (rule prefix_refinement_return_imp) - apply simp + apply (erule(2) abs_rely_stable_rtranclp) + apply (clarsimp simp: env_stable_def) + apply (erule(3) rely_stable_rtranclp) done -lemma prefix_refinement_fail: - "prefix_refinement sr isr osr rvr \\ \\ AR R fail fail" - apply (clarsimp simp: prefix_refinement_def fail_def) - apply (rule default_prefix_refinement_ex) - apply (simp add: is_matching_fragment_no_trace) - done -lemma prefix_refinement_assert: - "P = P' \ prefix_refinement sr iosr iosr \\ \\ \\ AR R (assert P) (assert P')" - by (simp add: assert_def prefix_refinement_fail prefix_refinement_return_imp) +section \FIXME: name for this section\ lemma par_tr_fin_bind: "(\x. par_tr_fin_principle (g x)) \ par_tr_fin_principle (f >>= g)" @@ -1381,8 +2246,7 @@ lemma par_tr_fin_bind: done lemma par_tr_fin_add_env_n_steps: - "par_tr_fin_principle f - \ par_tr_fin_principle (do x \ f; env_n_steps n od)" + "par_tr_fin_principle f \ par_tr_fin_principle (do x \ f; env_n_steps n od)" proof (induct n) case 0 then show ?case by simp @@ -1418,93 +2282,24 @@ lemma par_tr_fin_interference: done lemma prefix_refinement_triv_refinement_abs: - "triv_refinement f f' - \ prefix_refinement sr isr osr rvr P Q AR R f' g - \ prefix_refinement sr isr osr rvr P Q AR R f g" + "\triv_refinement f f'; prefix_refinement sr isr osr rvr AR R P Q f' g\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" apply (clarsimp simp: prefix_refinement_def) apply (strengthen triv_refinement_trans[mk_strg I E]) apply fastforce done lemma prefix_refinement_triv_refinement_conc: - "prefix_refinement sr isr osr rvr P Q AR R f g' - \ triv_refinement g' g - \ prefix_refinement sr isr osr rvr P Q AR R f g" + "\prefix_refinement sr isr osr rvr AR R P Q f g'; triv_refinement g' g\ + \ prefix_refinement sr isr osr rvr AR R P Q f g" apply (clarsimp simp: prefix_refinement_def triv_refinement_def) apply blast done -lemmas prefix_refinement_triv_pre - = Pure.asm_rl[where psi="prefix_refinement sr isr osr rvr - (\_ _. True) (\_ _. True) AR R f g"] for sr isr osr rvr AR R f g -lemma prefix_refinement_mapM: - "list_all2 xyr xs ys - \ (\x y. x \ set xs \ y \ set ys \ xyr x y - \ prefix_refinement sr intsr intsr rvr P Q AR R (f x) (g y)) - \ (\x. x \ set xs \ \P\,\AR\ f x \\_ _. True\,\\_. P\) - \ (\y. y \ set ys \ \Q\,\R\ g y \\_ _. True\,\\_. Q\) - \ prefix_refinement sr intsr intsr (list_all2 rvr) P Q AR R (mapM f xs) (mapM g ys)" - apply (induct xs ys rule: list_all2_induct) - apply (simp add: mapM_def sequence_def prefix_refinement_return_imp) - apply (clarsimp simp add: mapM_Cons all_conj_distrib imp_conjR) - apply (rule prefix_refinement_weaken_pre) - apply (rule prefix_refinement_bind, assumption) - apply (rule prefix_refinement_bind, assumption) - apply (rule prefix_refinement_triv_pre, rule prefix_refinement_return_imp, simp) - apply (wp validI_triv)+ - apply (blast intro: validI_prefix_closed) - apply (wp validI_triv | simp add: bipred_conj_def - | blast dest: validI_prefix_closed)+ - done - -lemma prefix_refinement_weaken_rel: - "prefix_refinement sr isr osr r P Q AR R f g - \ \rv rv'. r rv rv' \ r' rv rv' - \ prefix_refinement sr isr osr r' P Q AR R f g" - apply (subst prefix_refinement_def, clarsimp) - apply (drule(5) prefix_refinementD, clarsimp) - apply (rule exI, rule conjI[rotated], assumption) - apply (clarsimp simp: is_matching_fragment_def) - apply (drule(1) bspec, clarsimp) - apply (erule tmres.rel_cases; clarsimp) - done - -lemma rely_cond_mono: - "R \ R' \ rely_cond R \ rely_cond R'" - by (simp add: le_fun_def rely_cond_def split_def) - -lemma is_matching_fragment_add_rely: - "is_matching_fragment sr osr r ctr cres s0 AR s f - \ AR' \ AR - \ is_matching_fragment sr osr r ctr cres s0 AR' s (rely f AR' s0)" - apply (frule is_matching_fragment_Nil) - apply (clarsimp simp: is_matching_fragment_def rely_prefix_closed - rely_self_closed) - apply (intro conjI) - apply (erule rely_env_closed) - apply (frule rely_cond_mono) - apply (simp add: le_fun_def rely_cond_Cons_eq) - apply (fastforce simp: rely_def) - apply (auto simp: rely_def)[1] - done - -lemma prefix_refinement_weaken_rely: - "prefix_refinement sr isr osr r P Q AR R f g - \ R' \ R \ AR' \ AR - \ prefix_refinement sr isr osr r P Q AR' R' f g" - apply (subst prefix_refinement_def, clarsimp) - apply (drule(3) prefix_refinementD, assumption+) - apply (clarsimp simp: rely_cond_def split_def le_fun_def) - apply (rule exI, rule conjI, erule is_matching_fragment_add_rely) - apply (simp add: le_fun_def) - apply (auto simp add: triv_refinement_def rely_def) - done - -text \Using prefix refinement as an in-place calculus, permitting -multiple applications at the same level.\ - -lemmas trivial = imp_refl[rule_format] +section \ + Using prefix refinement as an in-place calculus, permitting multiple applications at the + same level\ lemma matching_tr_transp: "transp sr \ transp (matching_tr sr)" @@ -1522,10 +2317,8 @@ lemma matching_tr_symp: done lemma list_all2_is_me: - "list_all2 P tr tr' - \ \x y. P x y \ fst x = fst y - \ (n < length tr \ fst (rev tr ! n) = Me) - = (n < length tr' \ fst (rev tr' ! n) = Me)" + "\list_all2 P tr tr'; \x y. P x y \ fst x = fst y\ + \ (n < length tr \ fst (rev tr ! n) = Me) = (n < length tr' \ fst (rev tr' ! n) = Me)" apply (rule conj_cong, simp add: list_all2_lengthD) apply (frule list_all2_rev_nthD, simp add: list_all2_lengthD) apply (cases "rev tr ! n", cases "rev tr' ! n", auto) @@ -1552,11 +2345,9 @@ proof - apply (simp add: matching_tr_pfx_def assms) apply (rule conj_cong; simp?) apply (strengthen iffI) - apply (metis matching_tr transpD[OF matching_tr_transp] - sympD[OF matching_tr_symp]) + apply (metis matching_tr transpD[OF matching_tr_transp] sympD[OF matching_tr_symp]) done - note is_me = matching_tr1[unfolded matching_tr_def, simplified, - THEN list_all2_is_me, symmetric] + note is_me = matching_tr1[unfolded matching_tr_def, simplified, THEN list_all2_is_me, symmetric] show ?thesis using assms apply (clarsimp simp: is_matching_fragment_def matching is_me) apply (drule(1) bspec)+ @@ -1568,10 +2359,8 @@ proof - qed lemma matching_tr_rely_cond: - "matching_tr sr (rev tr) (rev tr') - \ rely_cond R s0 tr - \ sr s0 t0 - \ rely_cond (\t0 t. \s0 s. sr s0 t0 \ sr s t \ R s0 s) t0 tr'" + "\matching_tr sr (rev tr) (rev tr'); rely_cond R s0 tr; sr s0 t0\ + \ rely_cond (\t0 t. \s0 s. sr s0 t0 \ sr s t \ R s0 s) t0 tr'" apply (simp add: matching_tr_def) apply (induct arbitrary: s0 t0 rule: list_all2_induct) apply simp @@ -1583,21 +2372,23 @@ lemma matching_tr_rely_cond: done lemma prefix_refinement_in_place_trans: - "prefix_refinement sr isr osr (=) P (\_ _. True) AR (\t0 t. \s0 s. sr s0 t0 \ sr s t \ R s0 s) f g - \ prefix_refinement sr isr osr (=) (\_ _. True) Q AR R g h - \ equivp sr \ equivp osr \ equivp isr - \ (\s t t'. sr s t \ R t t' \ (\s'. sr s' t' \ AR s s')) - \ prefix_refinement sr isr osr (=) P Q AR R f h" + "\prefix_refinement sr isr osr (=) AR (\t0 t. \s0 s. sr s0 t0 \ sr s t \ R s0 s) P (\_ _. True) f g; + prefix_refinement sr isr osr (=) AR R (\_ _. True) Q g h; + equivp sr; equivp osr; equivp isr; + \s t t'. sr s t \ R t t' \ (\s'. sr s' t' \ AR s s')\ + \ prefix_refinement sr isr osr (=) AR R P Q f h" apply (subst prefix_refinement_def, clarsimp) apply (drule_tac s=t and t=t and ?t0.0=t0 and cprog=h in pfx_refnD; - assumption?) + assumption?) apply (metis equivp_reflp_symp_transp reflpD) apply metis apply clarsimp apply (rename_tac h_tr h_res frag_g) apply (rule_tac x="\s. \(tr, res) \ frag_g t \ ({tr. length tr = length h_tr} \ UNIV). - \frag_f \ {frag_f. is_matching_fragment sr osr (=) tr res s0 AR s frag_f - \ triv_refinement f frag_f}. frag_f s" in exI) + \frag_f \ {frag_f. is_matching_fragment sr osr (=) tr res s0 AR s frag_f + \ triv_refinement f frag_f}. + frag_f s" + in exI) apply (rule conjI) apply (rule is_matching_fragment_UNION) apply clarsimp @@ -1624,29 +2415,4 @@ lemma prefix_refinement_in_place_trans: apply blast done -lemma prefix_refinement_Await: - "env_stable AR R sr iosr Q - \ abs_rely_stable AR P - \ \s t. P s \ Q t \ iosr s t \ G' t \ G s - \ (\s. G' s) \ (\s. G s) - \ prefix_refinement sr iosr iosr (\_ _. True) (\s0 s. s0 = s \ P s) - (\t0 t. t0 = t \ Q t) AR R - (Await G) (Await G')" - apply (simp add: Await_redef) - apply (rule prefix_refinement_weaken_pre) - apply (rule prefix_refinement_bind[where intsr=iosr] - prefix_refinement_select[where rvr="\s s'. G s = G' s'"] - prefix_refinement_env_steps - | simp add: if_split[where P="\S. x \ S" for x] split del: if_split - | (rule prefix_refinement_get, rename_tac s s', - rule_tac P="P s" in prefix_refinement_assume_pre(1), - rule_tac P="Q s'" in prefix_refinement_assume_pre(2)) - | (rule prefix_refinement_select[where rvr=dc2]) - | wp)+ - apply clarsimp - apply (erule(2) abs_rely_stable_rtranclp) - apply (clarsimp simp: env_stable_def) - apply (erule(3) rely_stable_rtranclp) - done - end diff --git a/lib/concurrency/Triv_Refinement.thy b/lib/concurrency/Triv_Refinement.thy index 730276e382..465d09f278 100644 --- a/lib/concurrency/Triv_Refinement.thy +++ b/lib/concurrency/Triv_Refinement.thy @@ -1,42 +1,40 @@ (* + * Copyright 2024, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause *) theory Triv_Refinement - -imports - "Lib.TraceMonadVCG" - "Lib.Strengthen" - + imports + "Monads.Trace_More_RG" + "Monads.Trace_Strengthen_Setup" begin -text \This is a simple (almost trivial) definition of refinement, -which simply resolves nondeterminism to a smaller set of options.\ -definition - triv_refinement :: "('s,'a) tmonad \ ('s,'a) tmonad \ bool" -where +text \ + This is a simple (almost trivial) definition of refinement, which simply resolves nondeterminism + to a smaller set of options.\ +definition triv_refinement :: "('s,'a) tmonad \ ('s,'a) tmonad \ bool" where "triv_refinement aprog cprog = (\s. cprog s \ aprog s)" +lemmas triv_refinement_elemD = triv_refinement_def[THEN iffD1, rule_format, THEN subsetD] + +lemma triv_refinement_trans: + "\triv_refinement f g; triv_refinement g h\ \ triv_refinement f h" + by (auto simp add: triv_refinement_def) + lemma triv_refinement_mono_bind: "triv_refinement a c \ triv_refinement (a >>= b) (c >>= b)" - "(\x. triv_refinement (b x) (d x)) \ triv_refinement (a >>= b) (a >>= d)" + "\\x. triv_refinement (b x) (d x)\ \ triv_refinement (a >>= b) (a >>= d)" apply (simp add: triv_refinement_def bind_def) apply (intro allI UN_mono; simp) - apply (simp only: triv_refinement_def bind_def2 split_def) + apply (simp only: triv_refinement_def bind_def' split_def) apply (intro Un_mono allI order_refl UN_mono image_mono) apply simp done -lemma triv_refinement_trans: - "triv_refinement f g \ triv_refinement g h - \ triv_refinement f h" - by (auto simp add: triv_refinement_def) - lemma triv_refinement_bind: - "triv_refinement a c - \ (\x. triv_refinement (b x) (d x)) - \ triv_refinement (bind a b) (bind c d)" + "\triv_refinement a c; \x. triv_refinement (b x) (d x)\ + \ triv_refinement (bind a b) (bind c d)" by (metis triv_refinement_mono_bind triv_refinement_trans) lemma triv_refinement_mono_parallel: @@ -52,11 +50,10 @@ lemma triv_refinement_mono_parallel': done lemma triv_refinement_parallel: - "triv_refinement a b - \ triv_refinement c d - \ triv_refinement (parallel a c) (parallel b d)" + "\triv_refinement a b; triv_refinement c d\ + \ triv_refinement (parallel a c) (parallel b d)" by (metis triv_refinement_mono_parallel triv_refinement_mono_parallel' - triv_refinement_trans) + triv_refinement_trans) lemma select_subset: "(select S s \ select S' s') = (S \ S' \ (S \ {} \ s = s'))" @@ -71,52 +68,49 @@ lemma triv_refinement_select_eq: by (simp add: triv_refinement_def select_subset) lemma triv_refinement_rely: - "(\s0 s. R' s0 s \ R s0 s) \ - triv_refinement (rely f R s0) (rely f R' s0)" + "\\s0 s. R' s0 s \ R s0 s\ + \ triv_refinement (rely f R s0) (rely f R' s0)" unfolding rely_def triv_refinement_def rely_cond_def by auto lemma triv_refinement_rely2: - "triv_refinement f g \ - triv_refinement (rely f R s0) (rely g R s0)" + "triv_refinement f g \ triv_refinement (rely f R s0) (rely g R s0)" unfolding rely_def triv_refinement_def rely_cond_def by auto lemma rely_rely_triv_refinement: - "(\s0 s. R s0 s \ R' s0 s) \ - triv_refinement (rely f R' s0) (rely (rely f R s0) R' s0)" + "\\s0 s. R s0 s \ R' s0 s\ \ triv_refinement (rely f R' s0) (rely (rely f R s0) R' s0)" by (clarsimp simp: triv_refinement_def rely_def parallel_def) -lemma validI_triv_refinement: - "triv_refinement f g - \ \P\,\R\ f \G\,\Q\ - \ prefix_closed g - \ \P\,\R\ g \G\,\Q\" - unfolding rely_def triv_refinement_def validI_def - by fastforce - -lemma valid_triv_refinement: - "triv_refinement f g - \ \P\ f \Q\ - \ \P\ g \Q\" - unfolding rely_def triv_refinement_def valid_def mres_def - by (fastforce elim: image_eqI[rotated]) - -lemma triv_refinement_refl: +lemma triv_refinement_refl[simp]: "triv_refinement f f" by (simp add: triv_refinement_def) lemma triv_refinement_select_concrete_All: - "\x \ S. triv_refinement aprog (cprog x) - \ triv_refinement aprog (select S >>= cprog)" - by (auto simp add: triv_refinement_def bind_def select_def) + "\x \ S. triv_refinement aprog (cprog x) \ triv_refinement aprog (select S >>= cprog)" + by (auto simp: triv_refinement_def bind_def select_def) lemma triv_refinement_select_abstract_x: - "x \ S \ triv_refinement (aprog x) cprog - \ triv_refinement (select S >>= aprog) cprog" - by (auto simp add: triv_refinement_def bind_def select_def) + "\x \ S; triv_refinement (aprog x) cprog\ \ triv_refinement (select S >>= aprog) cprog" + by (auto simp: triv_refinement_def bind_def select_def) + +lemma triv_refinement_alternative1: + "triv_refinement (a \ b) a" + by (clarsimp simp: triv_refinement_def alternative_def) -lemmas triv_refinement_elemD - = triv_refinement_def[THEN iffD1, rule_format, THEN subsetD] +lemma triv_refinement_alternative2: + "triv_refinement (a \ b) b" + by (clarsimp simp: triv_refinement_def alternative_def) + + +lemma validI_triv_refinement: + "\triv_refinement f g; \P\,\R\ f \G\,\Q\; prefix_closed g\ \ \P\,\R\ g \G\,\Q\" + unfolding rely_def triv_refinement_def validI_def + by fastforce + +lemma valid_triv_refinement: + "\triv_refinement f g; \P\ f \Q\\ \ \P\ g \Q\" + unfolding rely_def triv_refinement_def valid_def mres_def + by (fastforce elim: image_eqI[rotated]) end diff --git a/lib/concurrency/examples/Peterson_Atomicity.thy b/lib/concurrency/examples/Peterson_Atomicity.thy index 2dc98c753c..04907808bb 100644 --- a/lib/concurrency/examples/Peterson_Atomicity.thy +++ b/lib/concurrency/examples/Peterson_Atomicity.thy @@ -5,6 +5,7 @@ *) theory Peterson_Atomicity imports + Lib.Lib Atomicity_Lib begin @@ -14,8 +15,7 @@ text \ datatype ident = A | B -primrec other_ident -where +primrec other_ident where "other_ident A = B" | "other_ident B = A" @@ -33,7 +33,7 @@ lemma neq_A_B[simp]: lemma forall_ident_eq: "(\ident. P ident) = (P A \ P B)" using ident.nchotomy - by metis + by (metis (full_types)) lemma other_other_ident_simps[simp]: "other_ident (other_ident x) = x" @@ -42,14 +42,13 @@ lemma other_other_ident_simps[simp]: by (simp_all split: other_ident_split add: eq_commute) text \ -The state of the algorithm. The variables A/B are condensed into -an ab_v variable, so we can parametrise by thread A/B. The priority -variable is t_v, and the critical section cs has two variable to -operate on, cs1_v and cs2_v. + The state of the algorithm. The variables A/B are condensed into + an ab_v variable, so we can parametrise by thread A/B. The priority + variable is t_v, and the critical section cs has two variable to + operate on, cs1_v and cs2_v. -Labels are needed to track where we're up to for the preconditions, -relies and guarantees. -\ + Labels are needed to track where we're up to for the preconditions, + relies and guarantees.\ datatype label = Awaiting | Critical | Exited @@ -66,136 +65,107 @@ locale mx_locale = and csI :: "'b \ bool" begin -definition - set_ab :: "ident \ bool \ ('a, 'b) p_state \ ('a, 'b) p_state" -where +definition set_ab :: "ident \ bool \ ('a, 'b) p_state \ ('a, 'b) p_state" where "set_ab ident trying s = (s \ ab_v := (ab_v s) (ident := trying) \)" -definition - set_label :: "ident \ label \ ('a, 'b) p_state \ ('a, 'b) p_state" -where +definition set_label :: "ident \ label \ ('a, 'b) p_state \ ('a, 'b) p_state" where "set_label ident label s = (s \ ab_label := (ab_label s) (ident := label) \)" -definition - locked :: "ident \ ('a, 'b) p_state \ bool" -where +definition locked :: "ident \ ('a, 'b) p_state \ bool" where "locked ident s = (ab_v s (other_ident ident) \ t_v s = ident)" -definition - acquire_lock :: "ident \ (('a, 'b) p_state, unit) tmonad" -where +definition acquire_lock :: "ident \ (('a, 'b) p_state, unit) tmonad" where "acquire_lock ident = do - interference; - modify (set_ab ident True); - modify (\s. s \ t_v := other_ident ident \); - modify (set_label ident Awaiting); - interference; - Await (locked ident); - modify (set_label ident Critical) + interference; + modify (set_ab ident True); + modify (\s. s \ t_v := other_ident ident \); + modify (set_label ident Awaiting); + interference; + Await (locked ident); + modify (set_label ident Critical) od" -definition - release_lock :: "ident \ (('a, 'b) p_state, unit) tmonad" -where +definition release_lock :: "ident \ (('a, 'b) p_state, unit) tmonad" where "release_lock ident = do - modify (set_ab ident False); - modify (set_label ident Exited); - interference; - return () + modify (set_ab ident False); + modify (set_label ident Exited); + interference; + return () od" -definition - abs_critical_section :: "(('a, 'b) p_state, unit) tmonad" -where +definition abs_critical_section :: "(('a, 'b) p_state, unit) tmonad" where "abs_critical_section = do - interferences; - modify (\s. s \ cs1_v := cs1 (cs2_v s) \); - cs2; - interference - od" + interferences; + modify (\s. s \ cs1_v := cs1 (cs2_v s) \); + cs2; + interference + od" -definition - abs_peterson_proc :: - "ident \ (('a, 'b) p_state, unit) tmonad" -where +definition abs_peterson_proc :: "ident \ (('a, 'b) p_state, unit) tmonad" where "abs_peterson_proc ident = do - acquire_lock ident; - abs_critical_section; - release_lock ident - od" + acquire_lock ident; + abs_critical_section; + release_lock ident + od" -definition - critical_section :: "(('a, 'b) p_state, unit) tmonad" -where +definition critical_section :: "(('a, 'b) p_state, unit) tmonad" where "critical_section = do - interference; - modify (\s. s \ cs1_v := cs1 (cs2_v s) \); - interference; - cs2; - interference - od" + interference; + modify (\s. s \ cs1_v := cs1 (cs2_v s) \); + interference; + cs2; + interference + od" -definition - peterson_proc :: "ident \ (('a, 'b) p_state, unit) tmonad" -where +definition peterson_proc :: "ident \ (('a, 'b) p_state, unit) tmonad" where "peterson_proc ident = do - acquire_lock ident; - critical_section; - release_lock ident - od" + acquire_lock ident; + critical_section; + release_lock ident + od" abbreviation "critical label \ label = Critical" -text \The required invariant. We can't both be in the critical section. -Whenever neither of us is in the critical section, its invariant holds.\ -definition - req_peterson_inv :: "('a, 'b) p_state \ bool" -where - "req_peterson_inv s = (\ (critical (ab_label s A) \ critical (ab_label s B)) - \ (critical (ab_label s A) \ critical (ab_label s B) \ csI (cs2_v s)))" +text \ + The required invariant. We can't both be in the critical section. + Whenever neither of us is in the critical section, its invariant holds.\ +definition req_peterson_inv :: "('a, 'b) p_state \ bool" where + "req_peterson_inv s = + (\ (critical (ab_label s A) \ critical (ab_label s B)) + \ (critical (ab_label s A) \ critical (ab_label s B) \ csI (cs2_v s)))" -text \The key invariant. We can't both be enabled, where that means -either we're in the critical section or waiting to enter with priority. -\ -abbreviation(input) - enabled :: "ident \ ('a, 'b) p_state \ bool" -where - "enabled ident s \ (critical (ab_label s ident) - \ (ab_label s ident = Awaiting \ t_v s = ident))" +text \ + The key invariant. We can't both be enabled, where that means + either we're in the critical section or waiting to enter with priority.\ +abbreviation (input) enabled :: "ident \ ('a, 'b) p_state \ bool" where + "enabled ident s \ + critical (ab_label s ident) \ ab_label s ident = Awaiting \ t_v s = ident" -definition - key_peterson_inv :: "('a, 'b) p_state \ bool" -where +definition key_peterson_inv :: "('a, 'b) p_state \ bool" where "key_peterson_inv s = (\ (enabled A s \ enabled B s))" text \Some trivia about labels and variables.\ -definition - local_peterson_inv :: "('a, 'b) p_state \ bool" -where - "local_peterson_inv s - = (\ident. ab_label s ident \ Exited \ ab_v s ident)" +definition local_peterson_inv :: "('a, 'b) p_state \ bool" where + "local_peterson_inv s = (\ident. ab_label s ident \ Exited \ ab_v s ident)" definition - "invs s = (req_peterson_inv s - \ key_peterson_inv s \ local_peterson_inv s)" + "invs s = (req_peterson_inv s \ key_peterson_inv s \ local_peterson_inv s)" lemmas invs_defs = req_peterson_inv_def key_peterson_inv_def local_peterson_inv_def -definition - peterson_rel :: "ident \ ('a, 'b) p_state \ ('a, 'b) p_state \ bool" -where - "peterson_rel ident s_prior s = (\ \assume invs\ invs s_prior \ +definition peterson_rel :: "ident \ ('a, 'b) p_state \ ('a, 'b) p_state \ bool" where + "peterson_rel ident s_prior s = + (\ \assume invs\ invs s_prior \ \ \invariants are preserved\ (invs s \ \I won't adjust your variables\ - \ (ab_v s (other_ident ident) = ab_v s_prior (other_ident ident)) - \ (ab_label s (other_ident ident) = ab_label s_prior (other_ident ident)) + \ (ab_v s (other_ident ident) = ab_v s_prior (other_ident ident)) + \ (ab_label s (other_ident ident) = ab_label s_prior (other_ident ident)) \ \I will only ever give you priority\ - \ (t_v s_prior = other_ident ident \ t_v s = other_ident ident) + \ (t_v s_prior = other_ident ident \ t_v s = other_ident ident) \ \If you're in the critical section, I won't change cs2_v and cs1_v\ - \ (critical (ab_label s_prior (other_ident ident)) - \ cs2_v s = cs2_v s_prior \ cs1_v s = cs1_v s_prior) - ))" + \ (critical (ab_label s_prior (other_ident ident)) + \ cs2_v s = cs2_v s_prior \ cs1_v s = cs1_v s_prior)))" lemma peterson_rel_rtranclp[simp]: "rtranclp (peterson_rel ident) = (peterson_rel ident)" @@ -212,57 +182,56 @@ lemma reflp_peterson_rel[simp]: declare reflp_peterson_rel[THEN reflpD, simp] lemma peterson_rel_imp_assume_invs: - "invs x \ (peterson_rel ident x y \ invs x \ invs y \ P x y) - \ (peterson_rel ident x y \ P x y)" + "\invs x; peterson_rel ident x y \ invs x \ invs y \ P x y\ + \ peterson_rel ident x y \ P x y" by (simp add: peterson_rel_def) end text \ -We assume validity for the underspecified critical section code represented by -@{text cs2}. + We assume validity for the underspecified critical section code represented by + @{text cs2}. -We also assume some basic sanity properties about the structure of @{text cs2}. -\ + We also assume some basic sanity properties about the structure of @{text cs2}.\ locale mx_locale_wp = mx_locale cs1 cs2 csI for cs1 :: "'b \ 'a" and cs2 and csI + assumes cs_wp: "\s c. I s \ lockf s \ I s \ lockf (s \ cs2_v := c \) \ - \ \s0' s'. csI (cs2_v s') \ s0' = s0 \ s' = s \ I s \ lockf s - \ cs1_v s' = cs1 (cs2_v s') \, - \ \s0 s. I s0 \ lockf s0 \ cs2_v s = cs2_v s0 \ I s \ lockf s - \ cs1_v s = cs1_v s0 \ - cs2 - \ \s0 s. I s0 \ (\c. s = s0 \ cs2_v := c \) \ I s \ lockf s \, - \ \_ s0' s'. \c. csI c \ s' = s \ cs2_v := c \ - \ (\c'. s0' = s0 \ s0' = s \ cs2_v := c' \) - \ I s' \ lockf s' \" + \\s0' s'. csI (cs2_v s') \ s0' = s0 \ s' = s \ I s \ lockf s + \ cs1_v s' = cs1 (cs2_v s')\, + \\s0 s. I s0 \ lockf s0 \ cs2_v s = cs2_v s0 \ I s \ lockf s \ cs1_v s = cs1_v s0\ + cs2 + \\s0 s. I s0 \ (\c. s = s0 \ cs2_v := c \) \ I s \ lockf s\, + \\_ s0' s'. \c. csI c \ s' = s \ cs2_v := c \ + \ (\c'. s0' = s0 \ s0' = s \ cs2_v := c' \) + \ I s' \ lockf s'\" and cs_closed: "prefix_closed cs2" and cs_not_env_steps_first: "not_env_steps_first cs2" begin -method_setup rev_drule = \ - Attrib.thms >> curry (fn (thms, ctxt) - => SIMPLE_METHOD (dresolve_tac ctxt thms 1 #> Seq.list_of #> rev #> Seq.of_list)) -\ +method_setup rev_drule = + \Attrib.thms >> + curry (fn (thms, ctxt) => + SIMPLE_METHOD (dresolve_tac ctxt thms 1 #> Seq.list_of #> rev #> Seq.of_list))\ lemma cs2_wp_apply_peterson[wp]: - "\ (\s0 s. csI (cs2_v s) - \ invs s0 \ invs s \ critical (ab_label s ident) - \ cs1_v s = cs1 (cs2_v s) - \ (\s0' c' c. csI c \ (\c'. s0' = s0 \ s0' = s \ cs2_v := c' \) - \ Q () s0' (s \ cs2_v := c\))) \, - \ peterson_rel (other_ident ident) \ - cs2 - \ peterson_rel ident \, - \ Q \" + "\\s0 s. csI (cs2_v s) + \ invs s0 \ invs s \ critical (ab_label s ident) + \ cs1_v s = cs1 (cs2_v s) + \ (\s0' c' c. csI c \ (\c'. s0' = s0 \ s0' = s \ cs2_v := c' \) + \ Q () s0' (s \ cs2_v := c\))\, + \peterson_rel (other_ident ident)\ + cs2 + \peterson_rel ident\, + \Q\" apply (rule validI_name_pre[OF cs_closed], clarsimp simp del: imp_disjL) - apply (rule validI_weaken_pre) + apply (rule rg_weaken_pre) apply (rule validI_well_behaved[OF cs_closed]) - apply (rule validI_strengthen_post) + apply (rule rg_strengthen_post) apply (rule_tac s=s and ?s0.0=s0 - and lockf="\s. critical (ab_label s ident)" - and I="invs" in cs_wp) + and lockf="\s. critical (ab_label s ident)" + and I="invs" + in cs_wp) apply (clarsimp simp: invs_defs invs_def) apply (clarsimp simp del: imp_disjL) apply (simp only: imp_conjL[symmetric]) @@ -275,105 +244,85 @@ lemma cs2_wp_apply_peterson[wp]: done lemma release_lock_mutual_excl: - "\ \s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Critical \ csI (cs2_v s) \, - \ peterson_rel (other_ident ident) \ - release_lock ident - \ peterson_rel ident \, - \ \rv s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \" + "\\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Critical \ csI (cs2_v s)\, + \peterson_rel (other_ident ident)\ + release_lock ident + \peterson_rel ident\, + \\rv s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\" apply (simp add: release_lock_def) - apply (rule validI_weaken_pre) - apply wpsimp+ + apply wpsimp apply (strengthen peterson_rel_imp_assume_invs | simp)+ apply (cases ident) - apply (safe, simp_all) - by ((clarsimp simp: peterson_rel_def set_label_def - set_ab_def invs_defs - | rule invs_def[THEN iffD2] conjI - | rev_drule invs_def[THEN iffD1])+) + apply (safe, simp_all) + by (clarsimp simp: peterson_rel_def set_label_def set_ab_def invs_defs + | rule invs_def[THEN iffD2] conjI + | rev_drule invs_def[THEN iffD1])+ lemma abs_critical_section_mutual_excl: - "\ \s0 s. peterson_rel ident s0 s \ invs s \ invs s0 - \ ab_label s ident = Critical \ csI (cs2_v s) \, - \ peterson_rel (other_ident ident) \ - abs_critical_section - \ peterson_rel ident \, - \ \rv s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Critical \ csI (cs2_v s) \" + "\\s0 s. peterson_rel ident s0 s \ invs s \ invs s0 \ ab_label s ident = Critical \ csI (cs2_v s)\, + \peterson_rel (other_ident ident)\ + abs_critical_section + \peterson_rel ident\, + \\rv s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Critical \ csI (cs2_v s)\" apply (simp add: abs_critical_section_def) - apply (rule validI_weaken_pre) - apply wpsimp+ + apply wpsimp apply (strengthen peterson_rel_imp_assume_invs | simp)+ apply (cases ident) - apply (safe, simp_all) - by ((clarsimp simp: peterson_rel_def invs_defs - | rule invs_def[THEN iffD2] conjI - | rev_drule invs_def[THEN iffD1])+) + apply (safe, simp_all) + by (clarsimp simp: peterson_rel_def invs_defs + | rule invs_def[THEN iffD2] conjI + | rev_drule invs_def[THEN iffD1])+ lemma acquire_lock_mutual_excl: - "\ \s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \, - \ peterson_rel (other_ident ident) \ - acquire_lock ident - \ peterson_rel ident \, - \ \rv s0 s. peterson_rel ident s0 s \ invs s \ invs s0 - \ ab_label s ident = Critical \ csI (cs2_v s) \" + "\\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\, + \peterson_rel (other_ident ident)\ + acquire_lock ident + \peterson_rel ident\, + \\rv s0 s. peterson_rel ident s0 s \ invs s \ invs s0 + \ ab_label s ident = Critical \ csI (cs2_v s)\" apply (simp add: acquire_lock_def) - apply (rule validI_weaken_pre) - apply (wpsimp wp: Await_sync_twp)+ + apply (wpsimp wp: Await_sync_twp) apply (strengthen peterson_rel_imp_assume_invs | simp)+ apply (cases ident) - apply (safe, simp_all) - by ((clarsimp simp: peterson_rel_def set_label_def set_ab_def - locked_def invs_defs - | rule invs_def[THEN iffD2] conjI - | rev_drule invs_def[THEN iffD1])+) + apply (safe, simp_all) + by (clarsimp simp: peterson_rel_def set_label_def set_ab_def locked_def invs_defs + | rule invs_def[THEN iffD2] conjI + | rev_drule invs_def[THEN iffD1])+ theorem abs_peterson_proc_mutual_excl: - "\ \s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \, - \ peterson_rel (other_ident ident) \ - abs_peterson_proc ident - \ peterson_rel ident \, - \ \rv s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \" + "\\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\, + \peterson_rel (other_ident ident)\ + abs_peterson_proc ident + \peterson_rel ident\, + \\rv s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\" apply (simp add: abs_peterson_proc_def bind_assoc) - apply (rule validI_weaken_pre) - apply (wpsimp wp: release_lock_mutual_excl acquire_lock_mutual_excl - abs_critical_section_mutual_excl)+ + apply (wpsimp wp: release_lock_mutual_excl acquire_lock_mutual_excl + abs_critical_section_mutual_excl) done -definition - peterson_sr :: "(('a, 'b) p_state \ ('a, 'b) p_state \ bool)" -where +definition peterson_sr :: "(('a, 'b) p_state \ ('a, 'b) p_state \ bool)" where "peterson_sr sa sc \ - t_v sa = t_v sc \ ab_v sa = ab_v sc - \ ab_label sa = ab_label sc \ cs2_v sa = cs2_v sc" + t_v sa = t_v sc \ ab_v sa = ab_v sc \ ab_label sa = ab_label sc \ cs2_v sa = cs2_v sc" -definition - peterson_sr' :: "(('a, 'b) p_state \ ('a, 'b) p_state \ bool)" -where +definition peterson_sr' :: "(('a, 'b) p_state \ ('a, 'b) p_state \ bool)" where "peterson_sr' sa sc \ sa = sc \ cs1_v := cs1_v sa \" -definition - peterson_sr_cs1 :: "(('a, 'b) p_state \ ('a, 'b) p_state \ bool)" -where +definition peterson_sr_cs1 :: "(('a, 'b) p_state \ ('a, 'b) p_state \ bool)" where "peterson_sr_cs1 sa sc \ peterson_sr sa sc \ cs1_v sa = cs1_v sc" end + text \ -Finally we assume that we can prove refinement for @{text cs2}, although this -may depend on being in a state where @{term cs1_v} has been correctly -initialised. -\ + Finally we assume that we can prove refinement for @{text cs2}, although this + may depend on being in a state where @{term cs1_v} has been correctly + initialised.\ locale mx_locale_refine = mx_locale_wp cs1 cs2 csI for cs1 :: "'b \ 'a" and cs2 and csI + assumes cs_refine: - "prefix_refinement peterson_sr peterson_sr_cs1 peterson_sr \\ - (\_ s. cs1_v s = cs1 (cs2_v s)) \\ - (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) - cs2 cs2" + "prefix_refinement peterson_sr peterson_sr_cs1 peterson_sr \\ + (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) + (\_ s. cs1_v s = cs1 (cs2_v s)) \\ + cs2 cs2" begin lemma @@ -381,7 +330,7 @@ lemma by (auto simp: p_state.splits peterson_sr_def peterson_sr'_def intro!: ext) lemma peterson_sr_set_ab[simp]: - "peterson_sr s t \ peterson_sr (set_ab ident v s) (set_ab ident v t)" + "peterson_sr s t \ peterson_sr (set_ab ident v s) (set_ab ident v t)" by (simp add: peterson_sr_def set_ab_def) lemma env_stable_peterson_sr: @@ -399,10 +348,11 @@ lemma peterson_sr_equivp[simp]: "equivp peterson_sr" by (auto simp: peterson_sr_def intro!: sympI equivpI transpI) -lemma peterson_sr_cs1_invs: "peterson_sr_cs1 s t \ invs s = invs t" - apply (auto simp: peterson_sr_def peterson_sr_cs1_def invs_def - req_peterson_inv_def key_peterson_inv_def - local_peterson_inv_def)[1] +lemma peterson_sr_cs1_invs: + "peterson_sr_cs1 s t \ invs s = invs t" + apply (auto simp: peterson_sr_def peterson_sr_cs1_def invs_def + req_peterson_inv_def key_peterson_inv_def + local_peterson_inv_def)[1] done lemma env_stable_peterson_sr_cs1: @@ -423,30 +373,30 @@ lemmas prefix_refinement_interference_peterson_cs1 = prefix_refinement_interference[OF env_stable_peterson_sr_cs1] lemmas prefix_refinement_bind_2left_2right - = prefix_refinement_bind[where a="bind a a'" and c="bind c c'" for a a' c c', simplified bind_assoc] + = prefix_refinement_bind[where a="Trace_Monad.bind a a'" and c="Trace_Monad.bind c c'" for a a' c c', simplified bind_assoc] lemmas rel_tr_refinement_bind_left_general_2left_2right - = rel_tr_refinement_bind_left_general[where f="bind f f'" and g="bind g g'" for f f' g g', simplified bind_assoc] + = rel_tr_refinement_bind_left_general[where f="Trace_Monad.bind f f'" and g="Trace_Monad.bind g g'" for f f' g g', + simplified bind_assoc] lemma peterson_rel_imp_invs: - "peterson_rel ident x y \ invs x \ invs y" + "\peterson_rel ident x y; invs x\ \ invs y" by (simp add: peterson_rel_def) lemma peterson_rel_imp_label: - "peterson_rel (other_ident ident) x y \ invs x + "\peterson_rel (other_ident ident) x y; invs x\ \ ab_label x ident = ab_label y ident" by (simp add: peterson_rel_def) lemma peterson_rel_set_label: - "peterson_rel (other_ident ident) (set_label ident label s) s' - \ invs (set_label ident label s) + "\peterson_rel (other_ident ident) (set_label ident label s) s'; invs (set_label ident label s)\ \ ab_label s' ident = label" by (simp add: peterson_rel_def set_label_def) lemma acquire_lock_refinement: - "prefix_refinement peterson_sr peterson_sr peterson_sr \\ - \\ \\ - (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) - (acquire_lock ident) (acquire_lock ident)" + "prefix_refinement peterson_sr peterson_sr peterson_sr dc + (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) + \\ \\ + (acquire_lock ident) (acquire_lock ident)" apply (unfold acquire_lock_def) apply (rule prefix_refinement_weaken_pre) apply (rule prefix_refinement_bind_sr) @@ -467,11 +417,11 @@ lemma acquire_lock_refinement: done lemma peterson_sr_invs[simp]: - "peterson_sr as cs \ invs as \ invs cs" + "\peterson_sr as cs; invs as\ \ invs cs" by (simp add: peterson_sr_def invs_def invs_defs) lemma peterson_sr_invs_sym: - "peterson_sr as cs \ invs cs \ invs as" + "\peterson_sr as cs; invs cs\ \ invs as" by (simp add: peterson_sr_def invs_def invs_defs) lemma peterson_sr_ab_label: @@ -479,12 +429,12 @@ lemma peterson_sr_ab_label: by (simp add: peterson_sr_def) lemma critical_section_refinement: - "prefix_refinement peterson_sr peterson_sr peterson_sr \\ - (\_ s. invs s \ ab_label s ident = Critical) \\ - (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) - abs_critical_section critical_section" + "prefix_refinement peterson_sr peterson_sr peterson_sr dc + (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) + (\_ s. invs s \ ab_label s ident = Critical) \\ + abs_critical_section critical_section" apply (simp add: abs_critical_section_def critical_section_def) - apply (rule prefix_refinement_weaken_pre) + apply pfx_refn_pre apply (rule prefix_refinement_interferences_split) apply (rule prefix_refinement_bind_sr) apply (rule prefix_refinement_interference_peterson) @@ -507,23 +457,23 @@ lemma critical_section_refinement: apply (rule prefix_refinement_bind_sr) apply (rule prefix_refinement_interference_peterson) apply (rule prefix_refinement_bind[where intsr=peterson_sr_cs1]) - apply (rule pfx_refn_modifyT) + apply (rule prefix_refinement_modifyT) apply (clarsimp simp add: peterson_sr_def peterson_sr_cs1_def) apply (rule prefix_refinement_bind_sr) apply (rule cs_refine) apply (rule prefix_refinement_interference_peterson) - apply (wpsimp wp: validI_triv[OF cs_closed])+ + apply (wpsimp wp: cs_closed)+ apply (subst peterson_rel_imp_label[symmetric], assumption, simp) apply (drule peterson_rel_imp_invs, simp) apply (simp add: peterson_sr_ab_label) done lemma release_lock_refinement: - "prefix_refinement peterson_sr peterson_sr peterson_sr \\ - \\ \\ - (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) - (release_lock ident) (release_lock ident)" + "prefix_refinement peterson_sr peterson_sr peterson_sr dc + (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) + \\ \\ + (release_lock ident) (release_lock ident)" apply (unfold release_lock_def) apply (rule prefix_refinement_weaken_pre) apply (simp add: modify_modify_bind) @@ -545,24 +495,23 @@ lemma abs_critical_section_prefix_closed[simp]: simp: cs_closed abs_critical_section_def) lemma peterson_rel_trans: - "peterson_rel ident x y \ peterson_rel ident y z \ - peterson_rel ident x z" - by (clarsimp simp: peterson_rel_def) + "\peterson_rel ident x y; peterson_rel ident y z\ + \ peterson_rel ident x z" + by (clarsimp simp: peterson_rel_def) lemma invs_set_label_Critical: - "invs s \ locked ident s \ ab_label s ident = Awaiting + "\invs s; locked ident s; ab_label s ident = Awaiting\ \ invs (set_label ident Critical s)" by (auto simp: invs_def invs_defs set_label_def locked_def) lemma acquire_lock_wp: - "\ \s0 s. invs s \ ab_label s ident = Exited \, - \ peterson_rel (other_ident ident) \ - acquire_lock ident - \ \\ \, - \ \rv s0 s. invs s \ ab_label s ident = Critical \" + "\\s0 s. invs s \ ab_label s ident = Exited\, + \peterson_rel (other_ident ident)\ + acquire_lock ident + \\\\, + \\rv s0 s. invs s \ ab_label s ident = Critical\" apply (simp add: acquire_lock_def) - apply (rule validI_weaken_pre) - apply (wpsimp wp: Await_sync_twp)+ + apply (wpsimp wp: Await_sync_twp) apply (subst (asm) peterson_rel_imp_label, assumption+) apply (drule(1) peterson_rel_imp_invs) apply (drule(1) peterson_rel_trans) @@ -583,45 +532,42 @@ lemma acquire_lock_prefix_closed[simp]: simp: cs_closed acquire_lock_def) theorem peterson_proc_refinement: - "prefix_refinement peterson_sr peterson_sr peterson_sr \\ - (\_ s. invs s \ ab_label s ident = Exited) - (\_ s. invs s \ ab_label s ident = Exited) - (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) - (abs_peterson_proc ident) - (peterson_proc ident)" - apply (simp add: abs_peterson_proc_def peterson_proc_def) - apply (rule prefix_refinement_weaken_pre) - apply (rule prefix_refinement_bind_sr) + "prefix_refinement peterson_sr peterson_sr peterson_sr dc + (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) + (\_ s. invs s \ ab_label s ident = Exited) + (\_ s. invs s \ ab_label s ident = Exited) + (abs_peterson_proc ident) + (peterson_proc ident)" + apply (simp add: abs_peterson_proc_def peterson_proc_def) + apply (rule prefix_refinement_weaken_pre) + apply (rule prefix_refinement_bind_sr) apply (rule acquire_lock_refinement) apply (rule prefix_refinement_bind_sr) apply (rule critical_section_refinement) apply (rule release_lock_refinement) - apply (wpsimp wp: validI_triv acquire_lock_wp - simp: bipred_conj_def)+ + apply (wpsimp wp: acquire_lock_wp + simp: pred_conj_def)+ done -definition - peterson_rel2 :: "ident \ ('a, 'b) p_state \ ('a, 'b) p_state \ bool" -where - "peterson_rel2 ident s_prior s = (\ \assume invs\ invs s_prior \ - \ \If you're in the critical section, I won't change cs1_v\ - (critical (ab_label s_prior (other_ident ident)) - \ cs1_v s = cs1_v s_prior))" +definition peterson_rel2 :: "ident \ ('a, 'b) p_state \ ('a, 'b) p_state \ bool" where + "peterson_rel2 ident s_prior s = + (\ \assume invs\ invs s_prior \ + \ \If you're in the critical section, I won't change cs1_v\ + (critical (ab_label s_prior (other_ident ident)) \ cs1_v s = cs1_v s_prior))" -definition - peterson_rel3 :: "ident \ ('a, 'b) p_state \ ('a, 'b) p_state \ bool" -where - "peterson_rel3 ident s_prior s = (\ \assume invs\ invs s_prior \ - \ \invariants are preserved\ +definition peterson_rel3 :: "ident \ ('a, 'b) p_state \ ('a, 'b) p_state \ bool" where + "peterson_rel3 ident s_prior s = + (\ \assume invs\ invs s_prior \ + \ \invariants are preserved\ (invs s \ \I won't adjust your variables\ - \ (ab_v s (other_ident ident) = ab_v s_prior (other_ident ident)) - \ (ab_label s (other_ident ident) = ab_label s_prior (other_ident ident)) + \ (ab_v s (other_ident ident) = ab_v s_prior (other_ident ident)) + \ (ab_label s (other_ident ident) = ab_label s_prior (other_ident ident)) \ \I will only ever give you priority\ - \ (t_v s_prior = other_ident ident \ t_v s = other_ident ident) + \ (t_v s_prior = other_ident ident \ t_v s = other_ident ident) \ \If you're in the critical section, I won't change cs2_v\ - \ (critical (ab_label s_prior (other_ident ident)) - \ cs2_v s = cs2_v s_prior)))" + \ (critical (ab_label s_prior (other_ident ident)) + \ cs2_v s = cs2_v s_prior)))" lemma peterson_rel_helpers: "peterson_rel2 ident s0 s \ peterson_rel3 ident s0 s @@ -629,12 +575,12 @@ lemma peterson_rel_helpers: by (clarsimp simp: peterson_rel_def peterson_rel2_def peterson_rel3_def) lemma peterson_rel_peterson_rel2: - "peterson_rel ident s0 s \ peterson_rel2 ident s0 s" + "peterson_rel ident s0 s \ peterson_rel2 ident s0 s" by (clarsimp simp: peterson_rel_def peterson_rel2_def) lemma peterson_sr_peterson_rel3: - "peterson_sr as0 cs0 \ peterson_sr as cs - \ peterson_rel ident as0 as \ peterson_rel3 ident cs0 cs" + "\peterson_sr as0 cs0; peterson_sr as cs; peterson_rel ident as0 as\ + \ peterson_rel3 ident cs0 cs" apply (clarsimp simp: peterson_rel_def peterson_rel3_def invs_def invs_defs peterson_sr_ab_label) apply (clarsimp simp: peterson_sr_def) @@ -646,87 +592,79 @@ lemma peterson_proc_prefix_closed[simp]: simp: cs_closed peterson_proc_def acquire_lock_def release_lock_def) lemma peterson_proc_mutual_excl_helper: - "\ \s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \, - \ peterson_rel (other_ident ident) \ - peterson_proc ident - \ peterson_rel3 ident \, - \ \rv s0 s. peterson_rel3 ident s0 s \ invs s - \ ab_label s ident = Exited \" - apply (rule prefix_refinement_validI') + "\\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\, + \peterson_rel (other_ident ident)\ + peterson_proc ident + \peterson_rel3 ident\, + \\rv s0 s. peterson_rel3 ident s0 s \ invs s \ ab_label s ident = Exited\" + apply (rule prefix_refinement_validI) apply (rule peterson_proc_refinement) apply (rule abs_peterson_proc_mutual_excl) - apply (clarsimp simp: peterson_sr_peterson_rel3 peterson_sr_ab_label) - apply (clarsimp simp: peterson_sr_peterson_rel3) - apply clarsimp - apply (rule_tac x=t0 in exI) - apply (rule_tac x="t \cs1_v := cs1_v t0\" in exI) - apply (clarsimp simp: peterson_rel_def peterson_sr_def) - apply clarsimp - apply (rule_tac x="t \cs1_v := cs1_v s0\" in exI) - apply (clarsimp simp: peterson_rel_def peterson_sr_def invs_def invs_defs) + apply clarsimp + apply (rule_tac x=t0 in exI) + apply (rule_tac x="t \cs1_v := cs1_v t0\" in exI) + apply (clarsimp simp: peterson_rel_def peterson_sr_def) + apply (rule_tac x="t \cs1_v := cs1_v s0\" in exI) + apply (clarsimp simp: peterson_rel_def peterson_sr_def invs_def invs_defs) + apply (clarsimp simp: peterson_sr_peterson_rel3) + apply (clarsimp simp: peterson_sr_peterson_rel3 peterson_sr_ab_label) apply clarsimp done lemma peterson_proc_mutual_excl_helper': - "\ \s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \, - \ peterson_rel (other_ident ident) \ - peterson_proc ident - \ peterson_rel2 ident \, - \ \rv s0 s. peterson_rel2 ident s0 s \ invs s - \ ab_label s ident = Exited \" + "\\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\, + \peterson_rel (other_ident ident)\ + peterson_proc ident + \peterson_rel2 ident\, + \\rv s0 s. peterson_rel2 ident s0 s \ invs s \ ab_label s ident = Exited\" apply (simp add: peterson_proc_def acquire_lock_def release_lock_def critical_section_def) - apply (rule validI_weaken_pre) - apply (wp Await_sync_twp | simp add: split_def - | rule validI_strengthen_guar[OF _ allI[OF allI[OF peterson_rel_peterson_rel2]]])+ + apply (wp Await_sync_twp | simp add: split_def + | rule rg_strengthen_guar[OF _ peterson_rel_peterson_rel2])+ apply (clarsimp simp: imp_conjL) apply (strengthen peterson_rel_imp_assume_invs | simp)+ apply (cases ident) - apply (safe, simp_all) - by ((clarsimp simp: peterson_rel_def peterson_rel2_def forall_ident_eq - set_label_def set_ab_def locked_def invs_defs cs_closed - | rule invs_def[THEN iffD2] conjI - | rev_drule invs_def[THEN iffD1])+) + apply (safe, simp_all) + by (clarsimp simp: peterson_rel_def peterson_rel2_def forall_ident_eq + set_label_def set_ab_def locked_def invs_defs cs_closed + | rule invs_def[THEN iffD2] conjI + | rev_drule invs_def[THEN iffD1])+ lemma peterson_proc_mutual_excl: - "\ \s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \, - \ peterson_rel (other_ident ident) \ - peterson_proc ident - \ peterson_rel ident \, - \ \rv s0 s. peterson_rel ident s0 s \ invs s - \ ab_label s ident = Exited \" - apply (rule validI_strengthen_guar, rule validI_strengthen_post, rule validI_guar_post_conj_lift) + "\\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\, + \peterson_rel (other_ident ident)\ + peterson_proc ident + \peterson_rel ident\, + \\rv s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited\" + apply (rule rg_strengthen_guar, rule rg_strengthen_post, rule validI_guar_post_conj_lift) apply (rule peterson_proc_mutual_excl_helper) apply (rule peterson_proc_mutual_excl_helper') apply (clarsimp simp: peterson_rel_helpers)+ done -definition "abs_peterson_proc_system \ +definition + "abs_peterson_proc_system \ parallel (do repeat (abs_peterson_proc A); interference od) (do repeat (abs_peterson_proc B); interference od)" lemma validI_repeat_interference: - "\P\, \R\ f \G\, \\_. P\ - \ \s0 s. P s0 s \ (\s'. R\<^sup>*\<^sup>* s s' \ Q () s' s') \ G s0 s - \ \P\, \R\ do repeat f; interference od \G\, \Q\" + "\\P\,\R\ f \G\,\\_. P\; \s0 s. P s0 s \ (\s'. R\<^sup>*\<^sup>* s s' \ Q () s' s') \ G s0 s\ + \ \P\,\R\ do repeat f; interference od \G\,\Q\" apply (rule bind_twp) apply simp apply (rule interference_twp) - apply (rule validI_strengthen_post) + apply (rule rg_strengthen_post) apply (rule repeat_validI, assumption) apply simp done lemma abs_peterson_proc_system_mutual_excl: - "\ \s0 s. s0 = s \ invs s \ ab_label s = (\_. Exited) \, - \ \s0 s. s0 = s \ - abs_peterson_proc_system - \ \s0 s. invs s0 \ invs s \, - \ \rv s0 s. invs s \" - apply (rule validI_weaken_pre, rule validI_strengthen_post) + "\\s0 s. s0 = s \ invs s \ ab_label s = (\_. Exited)\, + \\s0 s. s0 = s\ + abs_peterson_proc_system + \\s0 s. invs s0 \ invs s\, + \\rv s0 s. invs s\" + apply (rule rg_weaken_pre, rule rg_strengthen_post) apply (unfold abs_peterson_proc_system_def) apply (rule rg_validI[where Qf="\_ _. invs" and Qg="\_ _. invs"]) apply (rule validI_repeat_interference[OF abs_peterson_proc_mutual_excl]) @@ -734,10 +672,11 @@ lemma abs_peterson_proc_system_mutual_excl: apply (rule validI_repeat_interference[OF abs_peterson_proc_mutual_excl]) apply (clarsimp simp: peterson_rel_imp_invs) apply (simp add: reflp_ge_eq)+ - apply (clarsimp simp: peterson_rel_def)+ + apply (clarsimp simp: peterson_rel_def)+ done -definition "peterson_proc_system \ +definition + "peterson_proc_system \ parallel (do repeat (peterson_proc A); interference od) (do repeat (peterson_proc B); interference od)" @@ -747,35 +686,35 @@ lemma abs_peterson_proc_prefix_closed[simp]: simp: cs_closed abs_peterson_proc_def acquire_lock_def release_lock_def) lemma peterson_repeat_refinement: - "prefix_refinement peterson_sr peterson_sr peterson_sr \\ - (\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited) - (\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited) - (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) - (do repeat (abs_peterson_proc ident); - interference - od) - (do repeat (peterson_proc ident); - interference - od)" + "prefix_refinement peterson_sr peterson_sr peterson_sr dc + (peterson_rel (other_ident ident)) (peterson_rel (other_ident ident)) + (\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited) + (\s0 s. peterson_rel ident s0 s \ invs s \ ab_label s ident = Exited) + (do repeat (abs_peterson_proc ident); + interference + od) + (do repeat (peterson_proc ident); + interference + od)" apply (rule prefix_refinement_weaken_pre) apply (rule prefix_refinement_bind_sr) apply (rule prefix_refinement_repeat[rotated]) - apply (rule abs_peterson_proc_mutual_excl[THEN validI_strengthen_guar]) + apply (rule abs_peterson_proc_mutual_excl[THEN rg_strengthen_guar]) apply simp - apply (rule peterson_proc_mutual_excl[THEN validI_strengthen_guar, THEN validI_weaken_pre]) + apply (rule peterson_proc_mutual_excl[THEN rg_strengthen_guar, THEN rg_weaken_pre]) apply simp+ apply (rule peterson_proc_refinement[THEN prefix_refinement_weaken_pre]) apply simp+ apply (rule prefix_refinement_interference_peterson) - apply (wpsimp wp: validI_triv)+ + apply wpsimp+ done theorem peterson_proc_system_refinement: - "prefix_refinement peterson_sr peterson_sr peterson_sr \\ - (\s0 s. s0 = s \ invs s \ ab_label s = (\_. Exited)) - (\t0 t. t0 = t \ invs t \ ab_label t = (\_. Exited)) - (\s0 s. s0 = s) (\t0 t. t0 = t) - abs_peterson_proc_system peterson_proc_system" + "prefix_refinement peterson_sr peterson_sr peterson_sr dc + (\s0 s. s0 = s) (\t0 t. t0 = t) + (\s0 s. s0 = s \ invs s \ ab_label s = (\_. Exited)) + (\t0 t. t0 = t \ invs t \ ab_label t = (\_. Exited)) + abs_peterson_proc_system peterson_proc_system" apply (unfold abs_peterson_proc_system_def peterson_proc_system_def) apply (rule prefix_refinement_parallel') apply (rule prefix_refinement_weaken_rely, rule prefix_refinement_weaken_pre) @@ -792,19 +731,19 @@ theorem peterson_proc_system_refinement: apply (rule eq_refl, rule bipred_disj_op_eq, simp) apply (clarsimp intro!: par_tr_fin_bind par_tr_fin_interference) apply (clarsimp intro!: par_tr_fin_bind par_tr_fin_interference) - apply (rule validI_weaken_pre, rule validI_weaken_rely) + apply (rule rg_weaken_pre, rule rg_weaken_rely) apply (rule validI_repeat_interference; simp) apply (rule peterson_proc_mutual_excl) apply (simp+)[3] - apply (rule validI_weaken_pre, rule validI_weaken_rely) + apply (rule rg_weaken_pre, rule rg_weaken_rely) apply (rule validI_repeat_interference; simp) apply (rule peterson_proc_mutual_excl) apply (simp+)[3] - apply (rule validI_weaken_pre, rule validI_weaken_rely) + apply (rule rg_weaken_pre, rule rg_weaken_rely) apply (rule validI_repeat_interference; simp) apply (rule abs_peterson_proc_mutual_excl) apply (simp+)[3] - apply (rule validI_weaken_pre, rule validI_weaken_rely) + apply (rule rg_weaken_pre, rule rg_weaken_rely) apply (rule validI_repeat_interference; simp) apply (rule abs_peterson_proc_mutual_excl) apply (simp+)[3] @@ -812,21 +751,21 @@ theorem peterson_proc_system_refinement: lemma peterson_proc_system_prefix_closed[simp]: "prefix_closed (peterson_proc_system)" - by (auto intro!: prefix_closed_bind parallel_prefix_closed + by (auto intro!: prefix_closed_bind prefix_closed_parallel simp: cs_closed peterson_proc_system_def) theorem peterson_proc_system_mutual_excl: - "\ \s0 s. s0 = s \ invs s \ ab_label s = (\_. Exited) \, - \ \s0 s. s0 = s \ - peterson_proc_system - \ \s0 s. invs s0 \ invs s \, - \ \rv s0 s. invs s \" - apply (rule prefix_refinement_validI') + "\\s0 s. s0 = s \ invs s \ ab_label s = (\_. Exited)\, + \\s0 s. s0 = s\ + peterson_proc_system + \\s0 s. invs s0 \ invs s\, + \\rv s0 s. invs s\" + apply (rule prefix_refinement_validI) apply (rule peterson_proc_system_refinement) apply (rule abs_peterson_proc_system_mutual_excl) - apply clarsimp - apply (clarsimp simp: peterson_sr_invs_sym ) - apply (fastforce simp: peterson_rel_def peterson_sr_def) + apply (fastforce simp: peterson_rel_def peterson_sr_def) + apply clarsimp + apply (clarsimp simp: peterson_sr_invs_sym ) apply clarsimp apply (fastforce simp: peterson_sr_def) done diff --git a/lib/concurrency/examples/Plus2_Prefix.thy b/lib/concurrency/examples/Plus2_Prefix.thy index 0e338cb580..aba52c3b8a 100644 --- a/lib/concurrency/examples/Plus2_Prefix.thy +++ b/lib/concurrency/examples/Plus2_Prefix.thy @@ -5,50 +5,59 @@ *) theory Plus2_Prefix imports + Lib.Lib Prefix_Refinement begin section \The plus2 example.\ text \ -This example presents an application of prefix refinement -to solve the plus2 verification problem. - -The function below can be proven to do two increments per -instance when grouped in parallel. But RG-reasoning doesn't -work well for this proof, since the state change (+1) performed -by the counterparts must be allowed by the rely, which must be -transitively closed, allowing any number of (+1) operations, -which is far too general. - -We address the issue with a ghost variable which records the number -of increments by each thread. We use prefix refinement to relate the -program with ghost variable to the initial program. -\ + This example presents an application of prefix refinement + to solve the plus2 verification problem. + + The function below can be proven to do two increments per + instance when grouped in parallel. But RG-reasoning doesn't + work well for this proof, since the state change (+1) performed + by the counterparts must be allowed by the rely, which must be + transitively closed, allowing any number of (+1) operations, + which is far too general. + + We address the issue with a ghost variable which records the number + of increments by each thread. We use prefix refinement to relate the + program with ghost variable to the initial program. + + Note that the programs defined here begin with an @{const env_steps} + and ends with @{const interference}. This is required so that they + can be combined with @{const parallel}; without these steps the + traces would not be able to be matched up and the composed program + would be trivially empty.\ definition - "plus2 \ do env_steps; modify ((+) (1 :: nat)); - interference; modify ((+) 1); interference od" + "plus2 \ do + env_steps; + modify ((+) (1 :: nat)); + interference; + modify ((+) 1); + interference + od" section \The ghost-extended program.\ -record - plus2_xstate = - mainv :: nat - threadv :: "nat \ nat" +record plus2_xstate = + mainv :: nat + threadv :: "nat \ nat" -definition - point_update :: "'a \ ('b \ 'b) \ (('a \ 'b) \ ('a \ 'b))" -where +definition point_update :: "'a \ ('b \ 'b) \ (('a \ 'b) \ ('a \ 'b))" where "point_update x fup f = f (x := fup (f x))" definition - "plus2_x tid \ do env_steps; - modify (mainv_update ((+) 1) o threadv_update (point_update tid ((+) 1))); - interference; - modify (mainv_update ((+) 1) o threadv_update (point_update tid ((+) 1))); - interference - od" + "plus2_x tid \ do + env_steps; + modify (mainv_update ((+) 1) o threadv_update (point_update tid ((+) 1))); + interference; + modify (mainv_update ((+) 1) o threadv_update (point_update tid ((+) 1))); + interference + od" section \Verifying the extended @{term plus2}.\ text \The RG-reasoning needed to verify the @{term plus2_x} program.\ @@ -56,8 +65,8 @@ definition "plus2_inv tids s = (mainv s = sum (threadv s) tids)" definition - "plus2_rel tids fix_tids s0 s - = ((plus2_inv tids s0 \ plus2_inv tids s) \ (\tid \ fix_tids. threadv s tid = threadv s0 tid))" + "plus2_rel tids fix_tids s0 s = + ((plus2_inv tids s0 \ plus2_inv tids s) \ (\tid \ fix_tids. threadv s tid = threadv s0 tid))" lemma plus2_rel_trans[simp]: "rtranclp (plus2_rel tids ftids) = plus2_rel tids ftids" @@ -67,36 +76,35 @@ lemma plus2_rel_trans[simp]: done lemma plus2_inv_Suc[simp]: - "tid \ tids \ finite tids - \ plus2_inv tids (mainv_update Suc (threadv_update (point_update tid Suc) s)) - = plus2_inv tids s" + "\tid \ tids; finite tids\ + \ plus2_inv tids (mainv_update Suc (threadv_update (point_update tid Suc) s)) + = plus2_inv tids s" apply (simp add: plus2_inv_def point_update_def) apply (simp add: sum.If_cases[where h=f and g=f and P="(=) tid" and A="tids" for f x, simplified]) done theorem plus2_x_property: "\\s0 s. plus2_inv tids s \ threadv s tid = 0 \ s = s0 \ tid \ tids \ finite tids\,\plus2_rel tids {tid}\ - plus2_x tid \plus2_rel tids (- {tid})\,\\_ _ s. plus2_inv tids s \ threadv s tid = 2\" + plus2_x tid + \plus2_rel tids (- {tid})\,\\_ _ s. plus2_inv tids s \ threadv s tid = 2\" apply (simp add: plus2_x_def) - apply (rule validI_weaken_pre) - apply wp - apply clarsimp + apply wpsimp apply (clarsimp simp: plus2_rel_def point_update_def) done corollary plus2_x_parallel: "\\s0 s. mainv s = 0 \ (\tid \ {1, 2}. threadv s tid = 0) \ s = s0\,\\a b. a = b\ - parallel (plus2_x 1) (plus2_x 2) \\s0 s. True\,\\_ s0 s. mainv s = 4\" - apply (rule validI_weaken_pre) - apply (rule validI_strengthen_post) + parallel (plus2_x 1) (plus2_x 2) + \\s0 s. True\,\\_ s0 s. mainv s = 4\" + apply (rule rg_weaken_pre) + apply (rule rg_strengthen_post) apply ((rule rg_validI plus2_x_property[where tids="{1, 2}"])+; simp add: plus2_rel_def le_fun_def) - apply (clarsimp simp: plus2_inv_def bipred_conj_def) - apply (clarsimp simp add: bipred_conj_def plus2_inv_def) + apply (clarsimp simp: plus2_inv_def) + apply (clarsimp simp add: plus2_inv_def) done section \Mapping across prefix refinement.\ -text \Proving prefix refinement of @{term plus2} and @{term plus2_x} and -deriving the final result.\ +text \Proving prefix refinement of @{term plus2} and @{term plus2_x} and deriving the final result.\ lemma env_stable: "env_stable AR R (\s t. t = mainv s) (\s t. t = mainv s) \" @@ -104,19 +112,17 @@ lemma env_stable: apply (simp add: plus2_xstate.splits) done -abbreviation(input) - "p_refn rvr P Q AR R \ prefix_refinement (\s t. t = mainv s) (\s t. t = mainv s) - (\s t. t = mainv s) rvr P Q AR R" +abbreviation (input) + "p_refn rvr AR R P Q \ + prefix_refinement (\s t. t = mainv s) (\s t. t = mainv s) (\s t. t = mainv s) rvr AR R P Q" theorem pfx_refn_plus2_x: - "p_refn (\\) (=) (\\) AR R (plus2_x tid) plus2" + "p_refn dc AR R (=) (\\) (plus2_x tid) plus2" apply (simp add: plus2_x_def plus2_def) - apply (rule prefix_refinement_weaken_pre) + apply pfx_refn_pre apply (rule pfx_refn_bind prefix_refinement_interference - prefix_refinement_env_steps allI - pfx_refn_modifyT env_stable - | simp - | wp)+ + prefix_refinement_env_steps pfx_refn_modifyT env_stable + | wpsimp)+ done lemma par_tr_fin_plus2_x: @@ -126,59 +132,59 @@ lemma par_tr_fin_plus2_x: lemma prefix_closed_plus2: "prefix_closed plus2" apply (simp add: plus2_def) - apply (rule validI_prefix_closed_T, rule validI_weaken_pre, wp) - apply simp + apply (rule validI_prefix_closed_T, wpsimp) done theorem plus2_parallel: "\\s0 s. s = 0 \ s = s0\,\\a b. a = b\ - parallel plus2 plus2 \\s0 s. True\,\\_ s0 s. s = 4\" + parallel plus2 plus2 + \\s0 s. True\,\\_ s0 s. s = 4\" apply (rule_tac sr="\s t. t = mainv s" in prefix_refinement_validI) - apply (rule prefix_refinement_parallel_triv; - ((rule par_tr_fin_plus2_x prefix_closed_plus2)?)) - apply (rule pfx_refn_plus2_x[where tid=1]) - apply (rule pfx_refn_plus2_x[where tid=2]) + apply (rule prefix_refinement_parallel_triv; + ((rule par_tr_fin_plus2_x prefix_closed_plus2 twp_post_taut)+)?) + apply (rule pfx_refn_plus2_x[where tid=1]) + apply (rule pfx_refn_plus2_x[where tid=2]) + apply (rule plus2_x_parallel) + apply clarsimp + apply (clarsimp simp: plus2_xstate.splits) + apply (strengthen exI[where x="f(1 := x, 2 := y)" for f x y]) + apply simp apply clarsimp - apply (rule validI_strengthen_post) - apply (rule plus2_x_parallel[simplified]) - apply clarsimp - apply (clarsimp simp: plus2_xstate.splits) - apply (strengthen exI[where x="f(1 := x, 2 := y)" for f x y]) - apply simp + apply clarsimp apply clarsimp - apply (intro parallel_prefix_closed prefix_closed_plus2) + apply (intro prefix_closed_parallel prefix_closed_plus2) done section \Generalising\ -text \Just for fun, generalise to arbitrarily many -copies of the @{term plus2} program.\ +text \Just for fun, generalise to arbitrarily many copies of the @{term plus2} program.\ lemma plus2_x_n_parallel_induct: - "n > 0 \ n \ N \ - \\s0 s. plus2_inv {..< N} s \ (\i < N. threadv s i = 0) \ s = s0\,\plus2_rel {..< N} {..< n}\ - fold parallel (map plus2_x [1 ..< n]) (plus2_x 0) \plus2_rel {..< N} ( - {..< n})\,\\_ _ s. - plus2_inv {..< N} s \ (\i < n. threadv s i = 2)\" + "\n > 0; n \ N\ \ + \\s0 s. plus2_inv {..< N} s \ (\i < N. threadv s i = 0) \ s = s0\,\plus2_rel {..< N} {..< n}\ + fold parallel (map plus2_x [1 ..< n]) (plus2_x 0) + \plus2_rel {..< N} ( - {..< n})\,\\_ _ s. plus2_inv {..< N} s \ (\i < n. threadv s i = 2)\" apply (induct n) apply simp apply (case_tac n) apply (simp only: lessThan_Suc) apply simp - apply (rule validI_weaken_pre, rule plus2_x_property) + apply (wp plus2_x_property) apply clarsimp apply (clarsimp split del: if_split) - apply (rule validI_weaken_pre, rule validI_strengthen_post, - rule rg_validI, rule plus2_x_property[where tids="{..< N}"], - assumption, (clarsimp simp: plus2_rel_def)+) + apply (rule rg_weaken_pre, rule rg_strengthen_post, + rule rg_validI, rule plus2_x_property[where tids="{..< N}"], assumption, + (clarsimp simp: plus2_rel_def)+) apply (auto dest: less_Suc_eq[THEN iffD1])[1] - apply (clarsimp simp: bipred_conj_def) + apply clarsimp done theorem plus2_x_n_parallel: "n > 0 \ - \\s0 s. mainv s = 0 \ (\i < n. threadv s i = 0) \ s = s0\,\plus2_rel {..< n} {..< n}\ - fold parallel (map plus2_x [1 ..< n]) (plus2_x 0) \\s0 s. True\,\\_ _ s. mainv s = (n * 2)\" - apply (rule validI_weaken_pre, rule validI_strengthen_post, - rule validI_strengthen_guar, erule plus2_x_n_parallel_induct) + \\s0 s. mainv s = 0 \ (\i < n. threadv s i = 0) \ s = s0\,\plus2_rel {..< n} {..< n}\ + fold parallel (map plus2_x [1 ..< n]) (plus2_x 0) + \\s0 s. True\,\\_ _ s. mainv s = (n * 2)\" + apply (rule rg_weaken_pre, rule rg_strengthen_post, + rule rg_strengthen_guar, erule plus2_x_n_parallel_induct) apply simp apply simp apply (clarsimp simp: plus2_inv_def) @@ -186,8 +192,8 @@ theorem plus2_x_n_parallel: done lemma par_tr_fin_principle_parallel: - "par_tr_fin_principle f \ par_tr_fin_principle g - \ par_tr_fin_principle (parallel f g)" + "\par_tr_fin_principle f; par_tr_fin_principle g\ + \ par_tr_fin_principle (parallel f g)" apply (subst par_tr_fin_principle_def, clarsimp simp: parallel_def3) apply (drule(1) par_tr_fin_principleD)+ apply (clarsimp simp: neq_Nil_conv) @@ -195,56 +201,58 @@ lemma par_tr_fin_principle_parallel: lemma fold_parallel_par_tr_fin_principle[where xs="rev xs" for xs, simplified]: "\x \ insert x (set xs). par_tr_fin_principle x - \ par_tr_fin_principle (fold parallel (rev xs) x)" + \ par_tr_fin_principle (fold parallel (rev xs) x)" by (induct xs, simp_all add: par_tr_fin_principle_parallel) lemma fold_parallel_prefix_closed[where xs="rev xs" for xs, simplified]: "\x \ insert x (set xs). prefix_closed x - \ prefix_closed (fold parallel (rev xs) x)" - by (induct xs, simp_all add: parallel_prefix_closed) + \ prefix_closed (fold parallel (rev xs) x)" + by (induct xs, simp_all add: prefix_closed_parallel) lemma bipred_disj_top_eq: - "(Rel Or (\_ _. True)) = (\_ _. True)" - "((\_ _. True) Or Rel) = (\_ _. True)" - by (auto simp add: bipred_disj_def) + "(Rel or (\_ _. True)) = (\_ _. True)" + "((\_ _. True) or Rel) = (\_ _. True)" + by auto lemma fold_parallel_pfx_refn_induct: - "list_all2 (prefix_refinement sr sr sr (\_ _. True) P Q (\\) (\\)) xs ys - \ prefix_refinement sr sr sr (\_ _. True) P Q (\\) (\\) x y - \ \x \ set (x # xs). par_tr_fin_principle x - \ \y \ set (y # ys). prefix_closed y - \ prefix_refinement sr sr sr (\_ _. True) P Q (\\) (\\) - (fold parallel (rev xs) x) (fold parallel (rev ys) y)" + "\list_all2 (prefix_refinement sr sr sr dc (\\) (\\) P Q) xs ys; + prefix_refinement sr sr sr dc (\\) (\\) P Q x y; + \x \ set (x # xs). par_tr_fin_principle x; + \y \ set (y # ys). prefix_closed y\ + \ prefix_refinement sr sr sr dc(\\) (\\) P Q + (fold parallel (rev xs) x) (fold parallel (rev ys) y)" apply (induct rule: list_all2_induct; simp) apply (rule prefix_refinement_parallel_triv[simplified bipred_disj_top_eq]; simp?) apply (clarsimp simp: fold_parallel_par_tr_fin_principle - fold_parallel_prefix_closed)+ + fold_parallel_prefix_closed rg_TrueI)+ done -lemmas fold_parallel_pfx_refn - = fold_parallel_pfx_refn_induct[where xs="rev xs" and ys="rev ys" for xs ys, simplified] +lemmas fold_parallel_pfx_refn = + fold_parallel_pfx_refn_induct[where xs="rev xs" and ys="rev ys" for xs ys, simplified] theorem plus2_n_parallel: - "n > 0 - \ \\s0 s. s = 0 \ s = s0\,\\a b. a = b\ - fold parallel (replicate (n - 1) plus2) plus2 \\s0 s. True\,\\_ s0 s. s = n * 2\" + "n > 0 \ + \\s0 s. s = 0 \ s = s0\,\\a b. a = b\ + fold parallel (replicate (n - 1) plus2) plus2 + \\s0 s. True\,\\_ s0 s. s = n * 2\" apply (rule_tac sr="\s t. t = mainv s" in prefix_refinement_validI) - apply (rule prefix_refinement_weaken_rely, - rule_tac xs="map plus2_x [1 ..< n]" in fold_parallel_pfx_refn) - apply (clarsimp simp: list_all2_conv_all_nth) - apply (rule pfx_refn_plus2_x) - apply (rule pfx_refn_plus2_x[where tid=0]) - apply (simp add: par_tr_fin_plus2_x) - apply (simp add: prefix_closed_plus2) - apply (simp add: le_fun_def) - apply (simp add: le_fun_def) - apply simp - apply (rule validI_strengthen_post, rule plus2_x_n_parallel[simplified], simp) + apply (rule prefix_refinement_weaken_rely, + rule_tac xs="map plus2_x [1 ..< n]" in fold_parallel_pfx_refn) + apply (clarsimp simp: list_all2_conv_all_nth) + apply (rule pfx_refn_plus2_x) + apply (rule pfx_refn_plus2_x[where tid=0]) + apply (simp add: par_tr_fin_plus2_x) + apply (simp add: prefix_closed_plus2) + apply (simp add: le_fun_def) + apply (simp add: le_fun_def) + apply (rule plus2_x_n_parallel, simp) + apply clarsimp + apply (clarsimp simp: plus2_xstate.splits exI[where x="\_. 0"]) apply clarsimp - apply (clarsimp simp: plus2_xstate.splits exI[where x="\_. 0"]) + apply (rule exI, strengthen refl) + apply (clarsimp simp: plus2_rel_def plus2_inv_def) + apply clarsimp apply clarsimp - apply (rule exI, strengthen refl) - apply (clarsimp simp: plus2_rel_def plus2_inv_def) apply (rule fold_parallel_prefix_closed) apply (simp add: prefix_closed_plus2) done diff --git a/lib/crunch-cmd.ML b/lib/crunch-cmd.ML index f065a41101..56930fdf64 100644 --- a/lib/crunch-cmd.ML +++ b/lib/crunch-cmd.ML @@ -71,8 +71,6 @@ fun funkysplit [_,b,c] = [b,c] fun real_base_name name = name |> Long_Name.explode |> funkysplit |> Long_Name.implode (*Handles locales properly-ish*) -fun handle_int exn func = if Exn.is_interrupt exn then Exn.reraise exn else func - val Thm : (Facts.ref * Token.src list) -> ((Facts.ref * Token.src list), xstring) sum = Inl val Constant : xstring -> ((Facts.ref * Token.src list), xstring) sum = Inr val thms = lefts @@ -87,6 +85,12 @@ val simp_sect = ("simp", Parse.thm >> Thm); val simp_del_sect = ("simp_del", Parse.thm >> Thm); val rule_sect = ("rule", Parse.thm >> Thm); val rule_del_sect = ("rule_del", Parse.thm >> Thm); +val wp_comb_sect = ("wp_comb", Parse.thm >> Thm); +val wp_comb_del_sect = ("wp_comb_del", Parse.thm >> Thm); + +val crunch_sections = + [wp_sect,wp_del_sect,wps_sect,ignore_sect,simp_sect,simp_del_sect,rule_sect,rule_del_sect, + ignore_del_sect,wp_comb_sect,wp_comb_del_sect] fun read_const ctxt = Proof_Context.read_const {proper = true, strict = false} ctxt; @@ -229,6 +233,7 @@ fun simps_of n = n ^ simps_sfx; fun num_args t = length (binder_types t) - 1; fun real_const_from_name const nmspce ctxt = + \<^try>\ let val qual::locale::nm::nil = Long_Name.explode const; val SOME some_nmspce = nmspce; @@ -237,7 +242,7 @@ fun real_const_from_name const nmspce ctxt = in nm end - handle exn => handle_int exn const; + catch _ => const\; fun get_monad ctxt f xs = if is_Const f then @@ -383,12 +388,14 @@ fun eq_cname (Const (s, _)) (Const (t, _)) = (s = t) | eq_cname _ _ = false fun resolve_abbreviated ctxt abbrev = - let + \<^try>\ + let val (abbrevn,_) = dest_Const abbrev val origin = (head_of (snd ((Consts.the_abbreviation o Proof_Context.consts_of) ctxt abbrevn))); val (originn,_) = dest_Const origin; val (_::_::_::nil) = Long_Name.explode originn; - in origin end handle exn => handle_int exn abbrev + in origin end + catch _ => abbrev\; fun map_consts f = let @@ -436,10 +443,14 @@ fun induct_inst ctxt const goal nmspce = then error ("Unfold rule generated for " ^ const ^ " does not apply") else (ms', induct_inst_simplified) end -fun unfold_data ctxt constn goal nmspce NONE = ( - induct_inst ctxt constn goal nmspce handle exn => handle_int exn - unfold ctxt constn goal nmspce handle exn => handle_int exn - error ("unfold_data: couldn't find defn or induct rule for " ^ constn)) +fun unfold_data ctxt constn goal nmspce NONE = + \<^try>\ + induct_inst ctxt constn goal nmspce + catch _ => + \<^try>\ + unfold ctxt constn goal nmspce + catch _ => + error ("unfold_data: couldn't find defn or induct rule for " ^ constn)\\ | unfold_data ctxt constn goal _ (SOME thm) = let val trivial_rule = Thm.trivial goal @@ -493,7 +504,7 @@ fun get_inst_precond ctxt pre extra (mapply, goal) = let match what we were trying to prove, thus a THM exception from RS *) handle THM _ => NONE; -fun split_precond (Const (@{const_name pred_conj}, _) $ P $ Q) +fun split_precond (Const (@{const_name inf}, _) $ P $ Q) = split_precond P @ split_precond Q | split_precond (Abs (n, T, @{const "HOL.conj"} $ P $ Q)) = maps (split_precond o Envir.eta_contract) [Abs (n, T, P), Abs (n, T, Q)] @@ -517,7 +528,7 @@ fun combine_preconds ctxt pre pres = let |> remove (op aconv) pre |> distinct (op aconv) |> filter (precond_needed ctxt pre ctxt); val T = fastype_of pre; - val conj = Const (@{const_name pred_conj}, T --> T --> T) + val conj = Const (@{const_name inf}, T --> T --> T) in case pres' of [] => pre | _ => let val precond = foldl1 (fn (a, b) => conj $ a $ b) pres' @@ -749,8 +760,8 @@ fun crunch cfg pre extra stack const' thms = let val _ = "crunching constant: " ^ Proof_Context.markup_const ctxt const |> writeln; val const_term = read_const ctxt const; - val goal = make_goal const_term const pre extra ctxt - handle exn => handle_int exn (raise WrongType); + val goal = + \<^try>\make_goal const_term const pre extra ctxt catch _ => raise WrongType\; val _ = debug_trace_bl [K (Pretty.str "goal: "), fn () => Syntax.pretty_term ctxt goal] in (* first check: has this constant already been done or supplied? *) @@ -789,7 +800,7 @@ fun crunch cfg pre extra stack const' thms = val _ = writeln ("attempting: " ^ Syntax.string_of_term ctxt''' goal_prop); fun wp' wp_rules = wp ctxt (map snd (thms @ #wp_rules cfg) @ goals' @ wp_rules) val thm = Goal.prove_future ctxt''' [] [] goal_prop - ( (*DupSkip.goal_prove_wrapper *) (fn _ => + (fn _ => resolve_tac ctxt''' [rule] 1 THEN maybe_cheat_tac ctxt''' THEN ALLGOALS (simp_tac ctxt''') @@ -814,7 +825,7 @@ fun crunch cfg pre extra stack const' thms = CHANGED_GOAL (simp_tac ctxt''') ) n)) THEN proof_failed_warnings const stack cfg wp' ctxt''' - )) |> singleton (Proof_Context.export ctxt''' ctxt) + ) |> singleton (Proof_Context.export ctxt''' ctxt) in (SOME thm, (get_thm_name cfg const, thm) :: thms') end end handle WrongType => @@ -826,15 +837,17 @@ fun crunch cfg pre extra stack const' thms = fun get_locale_origins full_const_names ctxt = let fun get_locale_origin abbrev = - let - (*Check if the given const is an abbreviation*) - val (origin,_) = dest_Const (head_of (snd ((Consts.the_abbreviation o Proof_Context.consts_of) ctxt abbrev))); - (*Check that the origin can be broken into 3 parts (i.e. it is from a locale) *) - val [_,_,_] = Long_Name.explode origin; - (*Remember the theory for the abbreviation*) - - val [qual,nm] = Long_Name.explode abbrev - in SOME qual end handle exn => handle_int exn NONE + \<^try>\ + let + (*Check if the given const is an abbreviation*) + val (origin,_) = dest_Const (head_of (snd ((Consts.the_abbreviation o Proof_Context.consts_of) ctxt abbrev))); + (*Check that the origin can be broken into 3 parts (i.e. it is from a locale) *) + val [_,_,_] = Long_Name.explode origin; + (*Remember the theory for the abbreviation*) + + val [qual,nm] = Long_Name.explode abbrev + in SOME qual end + catch _ => NONE\ in fold (curry (fn (abbrev,qual) => case (get_locale_origin abbrev) of SOME q => SOME q | NONE => NONE)) full_const_names NONE @@ -867,6 +880,12 @@ fun crunch_x atts extra prp_name wpigs consts ctxt = |> get_thms_from_facts ctxt val rules = rules @ Named_Theorems.get ctxt @{named_theorems crunch_rules} + val wp_combs = wpigs |> filter (fn (s,_) => s = #1 wp_comb_sect) |> map #2 |> thms + |> get_thms_from_facts ctxt + + val wp_comb_dels = wpigs |> filter (fn (s,_) => s = #1 wp_comb_del_sect) |> map #2 |> thms + |> get_thms_from_facts ctxt + fun mk_wp thm = let val ms = Thm.prop_of thm |> monads_of ctxt; val m = if length ms = 1 @@ -887,13 +906,17 @@ fun crunch_x atts extra prp_name wpigs consts ctxt = (const_terms ~~ full_const_names) val wp_dels = get_thms_from_facts ctxt wp_dels'; - val ctxt' = fold (fn thm => fn ctxt => Thm.proof_attributes [WeakestPre.wp_del] thm ctxt |> snd) - wp_dels ctxt; - - val ctxt'' = ctxt' delsimps simp_dels; + val ctxt' = + ctxt delsimps simp_dels + |> fold (fn thm => fn ctxt => Thm.proof_attributes [WeakestPre.wp_del] thm ctxt |> snd) + wp_dels + |> fold (fn thm => fn ctxt => Thm.proof_attributes [WeakestPre.combs_add] thm ctxt |> snd) + wp_combs + |> fold (fn thm => fn ctxt => Thm.proof_attributes [WeakestPre.combs_del] thm ctxt |> snd) + wp_comb_dels; val crunch_cfg = - {ctxt = ctxt'', prp_name = prp_name, nmspce = nmspce, wp_rules = wp_rules, + {ctxt = ctxt', prp_name = prp_name, nmspce = nmspce, wp_rules = wp_rules, wps_rules = wps_rules, igs = igs, simps = simps, ig_dels = ig_dels, rules = rules} val (_, thms) = @@ -902,7 +925,7 @@ fun crunch_x atts extra prp_name wpigs consts ctxt = val atts' = map (Attrib.check_src ctxt) atts; - val ctxt''' = fold (fn (name, thm) => add_thm thm atts' name) thms ctxt; + val new_ctxt = fold (fn (name, thm) => add_thm thm atts' name) thms ctxt; in Pretty.writeln (Pretty.big_list "proved:" @@ -911,7 +934,7 @@ fun crunch_x atts extra prp_name wpigs consts ctxt = [Pretty.str (n ^ ": "), Syntax.pretty_term ctxt (Thm.prop_of t)]) thms)); - ctxt''' + new_ctxt end end diff --git a/lib/defs.ML b/lib/defs.ML index 324867ed36..6dbf83b50c 100644 --- a/lib/defs.ML +++ b/lib/defs.ML @@ -4,6 +4,16 @@ * SPDX-License-Identifier: BSD-2-Clause *) +(* This is a slightly modified and simplified version of the old Isabelle "defs" command. + It still uses Global_Theory.add_def as "defs" did. + + The modifications are: + - "overloading" and "unchecked" are removed + - no attributes for the definition theorem + - only one equation per "defs" command + - deprecation warning removed +*) + signature OLD_DEFS= sig end @@ -15,21 +25,10 @@ fun read ctxt (b, str) = Syntax.read_prop ctxt str handle ERROR msg => cat_error msg ("The error(s) above occurred in definition " ^ Binding.print b); -fun add_defs ctxt ((unchecked, overloaded), args) thy = - (legacy_feature "Old 'defs' command -- use 'definition' (with 'overloading') instead"; - thy |> - (if unchecked then Global_Theory.add_defs_unchecked else Global_Theory.add_defs) - overloaded - (map (fn ((b, ax), srcs) => ((b, read ctxt (b, ax)), map (Attrib.attribute_cmd ctxt) srcs)) args)); - -val opt_unchecked_overloaded = - Scan.optional (@{keyword "("} |-- Parse.!!! - (((@{keyword "unchecked"} >> K true) -- - Scan.optional (@{keyword "overloaded"} >> K true) false || - @{keyword "overloaded"} >> K (false, true)) --| @{keyword ")"})) (false, false); +fun add_def ctxt (b, str) thy = Global_Theory.add_def (b, read ctxt (b, str)) thy fun syntax_alias global_alias local_alias b name = - Local_Theory.declaration {syntax = true, pervasive = true} (fn phi => + Local_Theory.declaration {syntax = true, pos = Position.none, pervasive = true} (fn phi => let val b' = Morphism.binding phi b in Context.mapping (global_alias b' name) (local_alias b' name) end); @@ -39,18 +38,14 @@ val const_alias = syntax_alias Sign.const_alias Proof_Context.const_alias; val _ = Outer_Syntax.command @{command_keyword defs} "define constants" - (Parse.opt_target -- (opt_unchecked_overloaded -- - Scan.repeat1 (Parse_Spec.thm_name ":" -- Parse.prop >> (fn ((x, y), z) => ((x, z), y)))) - >> (fn (target, (b, args)) => Toplevel.local_theory NONE target (fn lthy => + (Parse.opt_target -- (Parse.binding -- (Parse.$$$ ":" |-- Parse.prop)) + >> (fn (target, (b, str)) => Toplevel.local_theory NONE target (fn lthy => let - val args' = map (fn ((b, def), x) => ((Binding.suffix_name "__internal__" b, def), x)) args - val (thms, lthy') = Local_Theory.background_theory_result (add_defs lthy (b, args')) lthy; - val lthy'' = fold2 (fn ((b, _), _) => fn thm => - fn lthy => - let val (_, lthy') = Local_Theory.note ((b,[]), [thm]) lthy - in lthy' end) args thms lthy'; - val lthy''' = Local_Theory.raw_theory (fold (fn thm => - Global_Theory.hide_fact true (Thm.derivation_name thm)) thms) lthy'' + val b' = Binding.suffix_name "__internal__" b + val (thm, lthy') = Local_Theory.background_theory_result (add_def lthy (b', str)) lthy; + val (_, lthy'') = Local_Theory.note ((b,[]), [thm]) lthy' + val lthy''' = Local_Theory.raw_theory + (Global_Theory.hide_fact true (Thm.derivation_name thm)) lthy'' in lthy''' end))); diff --git a/lib/sep_algebra/Sep_Forward.thy b/lib/sep_algebra/Sep_Forward.thy index be46d40ca0..a87302e373 100644 --- a/lib/sep_algebra/Sep_Forward.thy +++ b/lib/sep_algebra/Sep_Forward.thy @@ -59,7 +59,8 @@ lemma sep_conj_coimpl_cancel''': lemma sep_coimpl_cancel': "(\s. pred_imp Q P s) \ (P \* R) s \ (\s. R s \ R' s) \ (Q \* R') s" - by (metis pred_neg_def sep_coimpl_def sep_conj_def) + by (metis sep_coimpl_weaken sep_snake_septraction septraction_snake_trivial) + definition "pointer P \ (\x y. \s R. (P x \* R) s \ (P y \* R and (\s. x = y)) s)" diff --git a/lib/test/Apply_Debug_Test.thy b/lib/test/Apply_Debug_Test.thy index b01ef6b7f4..c63f530130 100644 --- a/lib/test/Apply_Debug_Test.thy +++ b/lib/test/Apply_Debug_Test.thy @@ -6,8 +6,8 @@ theory Apply_Debug_Test imports - Lib.Apply_Debug - Lib.Apply_Trace_Cmd + Eisbach_Tools.Apply_Debug + Eisbach_Tools.Apply_Trace_Cmd begin chapter \Apply_Debug\ diff --git a/lib/test/CorresK_Test.thy b/lib/test/CorresK_Test.thy new file mode 100644 index 0000000000..c13943c6c4 --- /dev/null +++ b/lib/test/CorresK_Test.thy @@ -0,0 +1,409 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Test proofs for corres methods. Builds on AInvs image. +*) + +theory CorresK_Test +imports "Refine.VSpace_R" "Lib.CorresK_Method" +begin + +chapter \The Corres Method\ + +section \Introduction\ + +text \The @{method corresK} method tries to do for corres-style refinement proofs what +@{method wp} did for hoare logic proofs. The intention is to automate the application +of corres calculational rules, so that the bulk of the manual proof is now handling +a verification condition. In general refinement proofs are difficult to automate, so here we +exploit the fact that in l4v the abstract and executable specifications tend to be structurally +similar. Corres proofs are based on the @{const corres_underlying} constant, which takes a number +of parameters that allow it to be specialized for different flavours of refinement. + +A corres statement has the following form: @{term "corres_underlying sr nf nf' r P P' f f'"}, where +@{term sr} is a state-relation, @{term nf} and @{term nf'} refer to whether or not the left and +right hand functions may fail, @{term r} is a return value relation between the functions, @{term P} +and @{term P'} are preconditions for the functions @{term f} and @{term f'} respectively. Informally +the statement says that: under the given preconditions, for every execution of @{term f'} there exists +an execution of @{term f} that is related by the given state relation @{term sr} and return-value +relation @{term r}. + +If the left and right side of a corres statement share similar structure, we can "unzip" the function +into one corres obligation for each atomic function. This is done through the application of + @{thm corres_split}. +\ + +thm corres_split[no_vars] + +text \Briefly this states that: given a corres goal proving refinement between @{term "a >>= b"} and + @{term "c >>= d"}, we can decompose this into a proof showing refinement between @{term a} and +@{term c}, and between @{term a} and @{term c}. Additionally @{term a} and @{term c} must establish +appropriate postconditions to satisfy the obligations of proving refinement between @{term b} and @{term d}. + +The first subgoal that is produced has an important characteristic: the preconditions for each +side may only discuss the return value of its respective side. This means that rules such as +@{term "corres_underlying sr nf nf' r (\s. x = x') (\_. True) (f x) (f' x')"} will not apply to a goal + if @{term x} and @{term x'} are variables generated by applying @{thm corres_split} (i.e. the +return values of functions). + +This means that any such conditions must instead be phrased as an assumption to the rule, and our rule must be +rephrased as follows: + @{term "x = x' \ corres_underlying sr nf nf' r (\_. True) (\_. True) (f x) (f' x')"}. +The result is that we must solve @{term "x = x'"} immediately after applying our rule. While this +is not a major concern for a manual proof, it proves to be a significant obstacle if we're trying +to focus on automating the "corres" part of the refinement. +\ + +section \corres_underlyingK and corres_rv\ + +text \To remedy this situation, we augment the @{const corres_underlying} definition to include +yet another flag: a single boolean. This new constant: @{const corres_underlyingK}, +will form the basis of the calculus for our corres method.\ + +thm corres_underlyingK_def[no_vars] + +text \The boolean in @{const corres_underlyingK} can be thought of as a stateless precondition. It +is used to propagate additional proof obligations for rules that either do not need to discuss +either the left or right hand state, or must discuss bound variables from both sides.\ + +thm corresK_split[no_vars] + +text \In this split rule for @{const corres_underlyingK} we see that the additional precondition @{term F'} +may discuss both @{term rv} and @{term rv'}. To show that this condition is satisified, however, +we can't use hoare logic and instead need a new definition: @{const corres_rv}.\ + +thm corres_rv_def_I_know_what_I'm_doing[no_vars] + +text \This is a weaker form of @{const corres_underlying} that is only interested in the return value +of the functions. In essence, it states the given functions will establish @{term Q} after executing, +assuming the given return-value relation @{term r} holds, along with the given stateless precondition +@{term F} and left/right preconditions @{term P} and @{term P'}. + +The assumption in general is that corresK_rv rules should never be written, instead corresK_rv obligations +should be propagated into either the stateless precondition (@{term F} from @{term corres_underlyingK}), +the left precondition (@{term P}) or the right precondition @{term P'}. This is implicitly handled +by @{method corresK_rv} (called from @{method corresK}) by applying one of the following rules to each conjunct:\ + +thm corres_rv_defer +thm corres_rv_wp_left +thm corres_rv_wp_right + +text \If none of these rules can be safely applied, then @{method corresK_rv} will leave the + obligation untouched. The user can manually apply one of them if desired, but this is liable to + create unsolvable proof obligations. In the worst case, the user may manually solve the goal in-place.\ + +thm corres_rv_proveT[no_vars] + +section \The corres method\ + +text \The core algorithm of the corres method is simple: + 1) start by applying any necessary weakening rules to ensure the goal has schematic preconditions + 2) apply a known @{thm corres} or @{thm corresK} rule (see next section) + 3) if unsuccessful, apply a split rule (i.e. @{thm corresK_split}) and go to 2 + +Importantly, @{method corresK} will not split a goal if it ultimately is not able to apply at least +one @{thm corres} or @{thm corresK} rule. +\ + +subsection \The corres and corresK named_theorems\ + +text \To address the fact that existing refinement rules are phrased as @{const corres_underlying} +and not @{const corres_underlyingK} there are two different named_theorems that are used for different +kind of rules @{thm corres} and @{thm corresK}. A @{thm corres} rule is understood to be phrased +with @{const corres_underlying} and may have additional assumptions. These assumptions will be +propagated through the additional @{term F} flag in @{const corres_underlyingK}, rather than presented +as proof obligations immediately. A @{thm corresK} rule is understood to be phrased with +@{const corres_underlyingK}, and is meant for calculational rules which may have proper assumptions that +should not be propagated. +\ +thm corresK +thm corres + +subsection \The corresc method\ + +text \Similar to @{method wpc}, @{method corresKc} can handle case statements in @{const corres_underlyingK} +proof goals. Importantly, however, it is split into two sub-methods @{method corresKc_left} and +@{method corresKc_right}, which perform case-splitting on each side respectively. The combined method +@{method corresKc}, however, attempts to discharge the contradictions that arise from the quadratic +blowup of a case analysis on both the left and right sides.\ + +subsection \corres_concrete_r, corres_concrete_rE\ + +text \Some @{thm corresK} rules should only be applied if certain variables are concrete +(i.e. not schematic) in the goal. These are classified separately with the named_theorems +@{thm corresK_concrete_r} and @{thm corresK_concrete_rER}. The first +indicates that the return value relation of the goal must be concrete, the second indicates that +only the left side of the error relation must be concrete.\ + +thm corresK_concrete_r +thm corresK_concrete_rER + +subsection \The corresK_search method\ + +text \The purpose of @{method corresK_search} is to address cases where there is non-trivial control flow. +In particular: in the case where there is an "if" statement or either side needs to be symbolically +executed. The core idea is that corresK_search should be provided with a "search" rule that acts +as an anchoring point. Symbolic execution and control flow is decomposed until either the given +rule is successfully applied or all search branches are exhausted.\ + +subsubsection \Symbolic Execution\ + +text \Symbolic execution is handled by two named theorems: + @{thm corresK_symb_exec_ls} and @{thm corresK_symb_exec_rs}, which perform symbolic execution on +the left and right hand sides of a corres goal.\ + +thm corresK_symb_exec_ls +thm corresK_symb_exec_rs + +text \A function may be symbolically executed if it does not modify the state, i.e. its only purpose +is to compute some value and return it. After being symbolically executed, +this value can only be discussed by the precondition of the associated side or the stateless +precondition of corresK. The resulting @{const corres_rv} goal has @{const corres_noop} as the +function on the alternate side. This gives @{method corresK_rv} a hint that the resulting obligation +should be aggressively re-written into a hoare triple over @{term m} if it can't be propagated +back statelessly safely. +\ + + +section \Demo\ + + +context begin interpretation Arch . + +(* VSpace_R *) + + +lemmas load_hw_asid_corres_args[corres] = + loadHWASID_corres[@lift_corres_args] + +lemmas invalidate_asid_corres_args[corres] = + invalidateASID_corres[@lift_corres_args] + +lemmas invalidate_hw_asid_entry_corres_args[corres] = + invalidateHWASIDEntry_corres[@lift_corres_args] + +lemma invalidateASIDEntry_corres: + "corres dc (valid_vspace_objs and valid_asid_map + and K (asid \ mask asid_bits \ asid \ 0) + and vspace_at_asid asid pd and valid_vs_lookup + and unique_table_refs o caps_of_state + and valid_global_objs and valid_arch_state + and pspace_aligned and pspace_distinct) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (invalidate_asid_entry asid) (invalidateASIDEntry asid)" + apply (simp add: invalidate_asid_entry_def invalidateASIDEntry_def) + apply_debug (trace) (* apply_trace between steps *) + (tags "corres") (* break at breakpoints labelled "corres" *) + corresK (* weaken precondition *) + continue (* split *) + continue (* solve load_hw_asid *) + continue (* split *) + continue (* apply corres_when *) + continue (* trivial simplification *) + continue (* invalidate _hw_asid_entry *) + finish (* invalidate_asid *) + + apply (corresKsimp wp: load_hw_asid_wp)+ + apply (fastforce simp: pd_at_asid_uniq) + done + + +crunch typ_at'[wp]: invalidateASIDEntry, flushSpace "typ_at' T t" +crunch ksCurThread[wp]: invalidateASIDEntry, flushSpace "\s. P (ksCurThread s)" +crunch obj_at'[wp]: invalidateASIDEntry, flushSpace "obj_at' P p" + +lemmas flush_space_corres_args[corres] = + flushSpace_corres[@lift_corres_args] + +lemmas invalidate_asid_entry_corres_args[corres] = + invalidateASIDEntry_corres[@lift_corres_args] + + +lemma corres_inst_eq_ext: + "(\x. corres_inst_eq (f x) (f' x)) \ corres_inst_eq f f'" + by (auto simp add: corres_inst_eq_def) + +lemma delete_asid_corresb: + notes [corres] = corres_gets_asid getCurThread_corres setObject_ASIDPool_corres and + [@lift_corres_args, corres] = get_asid_pool_corres_inv' + invalidateASIDEntry_corres + setVMRoot_corres + notes [wp] = set_asid_pool_asid_map_unmap set_asid_pool_vs_lookup_unmap' + set_asid_pool_vspace_objs_unmap' + invalidate_asid_entry_invalidates + getASID_wp + notes if_weak_cong[cong] option.case_cong_weak[cong] + shows + "corres dc + (invs and valid_etcbs and K (asid \ mask asid_bits \ asid \ 0)) + (pspace_aligned' and pspace_distinct' and no_0_obj' + and valid_arch_state' and cur_tcb') + (delete_asid asid pd) (deleteASID asid pd)" + apply (simp add: delete_asid_def deleteASID_def) + apply_debug (trace) (* apply_trace between steps *) + (tags "corres") (* break at breakpoints labelled "corres" *) + corresK (* weaken precondition *) + continue (* split *) + continue (* gets rule *) + continue (* corresc *) + continue (* return rule *) + continue (* split *) + continue (* function application *) + continue (* liftM rule *) + continue (* get_asid_pool_corres_inv' *) + continue (* function application *) + continue (* function application *) + continue (* corresK_when *) + continue (* split *) + continue (* flushSpace_corres *) + continue (* K_bind *) + continue (* K_bind *) + continue (* split *) + continue (* invalidateASIDEntry_corres *) + continue (* K_bind *) + continue (* return bind *) + continue (* K_bind *) + continue (* split *) + continue (* backtracking *) + continue (* split *) + continue (* function application *) + continue (* setObject_ASIDPool_corres *) + continue (* K_bind *) + continue (* K_bind *) + continue (* split *) + continue (* getCurThread_corres *) + continue (* setVMRoot_corres *) + finish (* backtracking? *) + apply (corresKsimp simp: mask_asid_low_bits_ucast_ucast + | fold cur_tcb_def | wps)+ + apply (frule arm_asid_table_related,clarsimp) + apply (rule conjI) + apply (intro impI allI) + apply (rule conjI) + apply (safe; assumption?) + apply (rule ext) + apply (fastforce simp: inv_def dest: ucast_ucast_eq) + apply (rule context_conjI) + apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) + apply (intro allI impI) + apply (subgoal_tac "vspace_at_asid asid pd s") + prefer 2 + apply (simp add: vspace_at_asid_def) + apply (rule vs_lookupI) + apply (simp add: vs_asid_refs_def) + apply (rule image_eqI[OF refl]) + apply (rule graph_ofI) + apply fastforce + apply (rule r_into_rtrancl) + apply simp + apply (rule vs_lookup1I [OF _ _ refl], assumption) + apply (simp add: vs_refs_def) + apply (rule image_eqI[rotated], erule graph_ofI) + apply (simp add: mask_asid_low_bits_ucast_ucast) + prefer 2 + apply (intro allI impI context_conjI; assumption?) + apply (rule aligned_distinct_relation_asid_pool_atI'; fastforce?) + apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) + apply (simp add: cur_tcb'_def) + apply (safe; assumption?) + apply (erule ko_at_weakenE) + apply (clarsimp simp: graph_of_def) + apply (fastforce split: if_split_asm) + apply (frule invs_vspace_objs) + apply (drule (2) valid_vspace_objsD) + apply (erule ranE) + apply (fastforce split: if_split_asm) + apply (erule ko_at_weakenE) + apply (clarsimp simp: graph_of_def) + apply (fastforce split: if_split_asm) + done + +lemma cte_wp_at_ex: + "cte_wp_at (\_. True) p s \ (\cap. cte_wp_at ((=) cap) p s)" + by (simp add: cte_wp_at_def) + +(* Sadly broken: +lemma setVMRootForFlush_corres: + notes [corres] = getCurThread_corres getSlotCap_corres + shows + "corres (=) + (cur_tcb and vspace_at_asid asid pd + and K (asid \ 0 \ asid \ mask asid_bits) + and valid_asid_map and valid_vs_lookup + and valid_vspace_objs and valid_global_objs + and unique_table_refs o caps_of_state + and valid_arch_state + and pspace_aligned and pspace_distinct) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (set_vm_root_for_flush pd asid) + (setVMRootForFlush pd asid)" + apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) + apply corres + apply_debug (trace) (tags "corresK_search") (corresK_search search: armv_contextSwitch_corres) + continue (* step left *) + continue (* if rule *) + continue (* failed corres on first subgoal, trying next *) + continue (* fail corres on last subgoal, trying reverse if rule *) + continue (* can't make corres progress here, trying other goal *) + finish (* successful goal discharged by corres *) + + apply (corresKsimp wp: get_cap_wp getSlotCap_wp)+ + apply (rule context_conjI) + subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def + tcbVTableSlot_def to_bl_1 cte_level_bits_def) + apply (rule context_conjI) + subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) + apply (rule conjI) + subgoal by (fastforce simp: isCap_simps) + apply (drule cte_wp_at_ex) + apply clarsimp + apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) + apply (drule cte_wp_at_norm') + apply clarsimp + apply (rule_tac x="cteCap cte" in exI) + apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) + done + +text \Note we can wrap it all up in corresKsimp\ + +lemma setVMRootForFlush_corres': + notes [corres] = getCurThread_corres getSlotCap_corres + shows + "corres (=) + (cur_tcb and vspace_at_asid asid pd + and K (asid \ 0 \ asid \ mask asid_bits) + and valid_asid_map and valid_vs_lookup + and valid_vspace_objs and valid_global_objs + and unique_table_refs o caps_of_state + and valid_arch_state + and pspace_aligned and pspace_distinct) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (set_vm_root_for_flush pd asid) + (setVMRootForFlush pd asid)" + apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) + apply (corresKsimp search: armv_contextSwitch_corres + wp: get_cap_wp getSlotCap_wp + simp: isCap_simps) + apply (rule context_conjI) + subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def + tcbVTableSlot_def to_bl_1 cte_level_bits_def) + apply (rule context_conjI) + subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) + apply (rule conjI) + subgoal by (fastforce) + apply (drule cte_wp_at_ex) + apply clarsimp + apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) + apply (drule cte_wp_at_norm') + apply clarsimp + apply (rule_tac x="cteCap cte" in exI) + apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) + done +*) + +end +end diff --git a/lib/test/Corres_Test.thy b/lib/test/Corres_Test.thy old mode 100755 new mode 100644 index 3f18cdbe3a..eec44b9e1e --- a/lib/test/Corres_Test.thy +++ b/lib/test/Corres_Test.thy @@ -1,410 +1,312 @@ (* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2023, Proofcraft Pty Ltd * - * SPDX-License-Identifier: GPL-2.0-only + * SPDX-License-Identifier: BSD-2-Clause *) -(* - Test proofs for corres methods. Builds on AInvs image. -*) - theory Corres_Test -imports "Refine.VSpace_R" "Lib.Corres_Method" +imports Lib.Corres_Method begin -chapter \The Corres Method\ - -section \Introduction\ - -text \The @{method corres} method tries to do for corres-style refinement proofs what -@{method wp} did for hoare logic proofs. The intention is to automate the application -of corres calculational rules, so that the bulk of the manual proof is now handling -a verification condition. In general refinement proofs are difficult to automate, so here we -exploit the fact that in l4v the abstract and executable specifications tend to be structurally -similar. Corres proofs are based on the @{const corres_underlying} constant, which takes a number -of parameters that allow it to be specialized for different flavours of refinement. - -A corres statement has the following form: @{term "corres_underlying sr nf nf' r P P' f f'"}, where -@{term sr} is a state-relation, @{term nf} and @{term nf'} refer to whether or not the left and -right hand functions may fail, @{term r} is a return value relation between the functions, @{term P} -and @{term P'} are preconditions for the functions @{term f} and @{term f'} respectively. Informally -the statement says that: under the given preconditions, for every execution of @{term f'} there exists -an execution of @{term f} that is related by the given state relation @{term sr} and return-value -relation @{term r}. - -If the left and right side of a corres statement share similar structure, we can "unzip" the function -into one corres obligation for each atomic function. This is done through the application of - @{thm corres_split}. -\ - -thm corres_split[no_vars] - -text \Briefly this states that: given a corres goal proving refinement between @{term "a >>= b"} and - @{term "c >>= d"}, we can decompose this into a proof showing refinement between @{term a} and -@{term c}, and between @{term a} and @{term c}. Additionally @{term a} and @{term c} must establish -appropriate postconditions to satisfy the obligations of proving refinement between @{term b} and @{term d}. - -The first subgoal that is produced has an important characteristic: the preconditions for each -side may only discuss the return value of its respective side. This means that rules such as -@{term "corres_underlying sr nf nf' r (\s. x = x') (\_. True) (f x) (f' x')"} will not apply to a goal - if @{term x} and @{term x'} are variables generated by applying @{thm corres_split} (i.e. the -return values of functions). +(* Test cases and tutorial/docs for Corres_Method *) -This means that any such conditions must instead be phrased as an assumption to the rule, and our rule must be -rephrased as follows: - @{term "x = x' \ corres_underlying sr nf nf' r (\_. True) (\_. True) (f x) (f' x')"}. -The result is that we must solve @{term "x = x'"} immediately after applying our rule. While this -is not a major concern for a manual proof, it proves to be a significant obstacle if we're trying -to focus on automating the "corres" part of the refinement. -\ -section \corres_underlyingK and corres_rv\ +section "Setup" -text \To remedy this situation, we augment the @{const corres_underlying} definition to include -yet another flag: a single boolean. This new constant: @{const corres_underlyingK}, -will form the basis of the calculus for our corres method.\ - -thm corres_underlyingK_def[no_vars] - -text \The boolean in @{const corres_underlyingK} can be thought of as a stateless precondition. It -is used to propagate additional proof obligations for rules that either do not need to discuss -either the left or right hand state, or must discuss bound variables from both sides.\ +(* Setting up some monads and lemmas to play with later *) +experiment + fixes sr nf nf' -thm corresK_split[no_vars] + fixes f f' :: "('s, nat) nondet_monad" + assumes f: "corres_underlying sr nf nf' (=) \ \ f f'" -text \In this split rule for @{const corres_underlyingK} we see that the additional precondition @{term F'} -may discuss both @{term rv} and @{term rv'}. To show that this condition is satisified, however, -we can't use hoare logic and instead need a new definition: @{const corres_rv}.\ + fixes Q g g' t + assumes g: "\x x'::nat. x = t x' \ corres_underlying sr nf nf' (=) Q \ (g x) (g' x')" + assumes t: "\x. t x = x" -thm corres_rv_def_I_know_what_I'm_doing[no_vars] + fixes P + assumes Q: "\P\ f \\_. Q\" -text \This is a weaker form of @{const corres_underlying} that is only interested in the return value -of the functions. In essence, it states the given functions will establish @{term Q} after executing, -assuming the given return-value relation @{term r} holds, along with the given stateless precondition -@{term F} and left/right preconditions @{term P} and @{term P'}. - -The assumption in general is that corres_rv rules should never be written, instead corres_rv obligations -should be propagated into either the stateless precondition (@{term F} from @{term corres_underlyingK}), -the left precondition (@{term P}) or the right precondition @{term P'}. This is implicitly handled -by @{method corres_rv} (called from @{method corres}) by applying one of the following rules to each conjunct:\ + fixes h h' + assumes h: "corres_underlying sr nf nf' (=) \ \ h h'" +begin -thm corres_rv_defer -thm corres_rv_wp_left -thm corres_rv_wp_right +abbreviation "corres \ corres_underlying sr nf nf'" -text \If none of these rules can be safely applied, then @{method corres_rv} will leave the - obligation untouched. The user can manually apply one of them if desired, but this is liable to - create unsolvable proof obligations. In the worst case, the user may manually solve the goal in-place.\ -thm corres_rv_proveT[no_vars] +section "Examples" -section \The corres method\ +(* The purpose of the corres method is to make progres on easy corres steps, where things + "obviously" match up on the concrete and abstract side. You can provide basic terminal + corres rules like f and g to try. You can provide simp rules to rewrite corres goals + and to solve side conditions of terminal rules such as the rule for g above. Finally, + you can provide wp rules to solve or make progress on the final wp goals that a corres + proof produces. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + by (corres corres: f g wp: Q simp: t) -text \The core algorithm of the corres method is simple: - 1) start by applying any necessary weakening rules to ensure the goal has schematic preconditions - 2) apply a known @{thm corres} or @{thm corresK} rule (see next section) - 3) if unsuccessful, apply a split rule (i.e. @{thm corresK_split}) and go to 2 - -Importantly, @{method corres} will not split a goal if it ultimately is not able to apply at least -one @{thm corres} or @{thm corresK} rule. -\ - -subsection \The corres and corresK named_theorems\ - -text \To address the fact that existing refinement rules are phrased as @{const corres_underlying} -and not @{const corres_underlyingK} there are two different named_theorems that are used for different -kind of rules @{thm corres} and @{thm corresK}. A @{thm corres} rule is understood to be phrased -with @{const corres_underlying} and may have additional assumptions. These assumptions will be -propagated through the additional @{term F} flag in @{const corres_underlyingK}, rather than presented -as proof obligations immediately. A @{thm corresK} rule is understood to be phrased with -@{const corres_underlyingK}, and is meant for calculational rules which may have proper assumptions that -should not be propagated. -\ -thm corresK -thm corres - -subsection \The corresc method\ - -text \Similar to @{method wpc}, @{method corresc} can handle case statements in @{const corres_underlyingK} -proof goals. Importantly, however, it is split into two sub-methods @{method corresc_left} and -@{method corresc_right}, which perform case-splitting on each side respectively. The combined method -@{method corresc}, however, attempts to discharge the contradictions that arise from the quadratic -blowup of a case analysis on both the left and right sides.\ - -subsection \corres_concrete_r, corres_concrete_rE\ - -text \Some @{thm corresK} rules should only be applied if certain variables are concrete -(i.e. not schematic) in the goal. These are classified separately with the named_theorems -@{thm corres_concrete_r} and @{thm corres_concrete_rER}. The first -indicates that the return value relation of the goal must be concrete, the second indicates that -only the left side of the error relation must be concrete.\ - -thm corres_concrete_r -thm corres_concrete_rER - -subsection \The corres_search method\ +(* All of these can be declared globally and will be picked up by the method *) +context + notes [corres] = f g + notes [wp] = Q + notes [simp] = t +begin -text \The purpose of @{method corres_search} is to address cases where there is non-trivial control flow. -In particular: in the case where there is an "if" statement or either side needs to be symbolically -executed. The core idea is that corres_search should be provided with a "search" rule that acts -as an anchoring point. Symbolic execution and control flow is decomposed until either the given -rule is successfully applied or all search branches are exhausted.\ +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + by corres -subsubsection \Symbolic Execution\ +end -text \Symbolic execution is handled by two named theorems: - @{thm corres_symb_exec_ls} and @{thm corres_symb_exec_rs}, which perform symbolic execution on -the left and right hand sides of a corres goal.\ +(* During development, the rules needed are often not declared [corres] yet or the right + simp rules for side conditions etc have yet to be figured out. The following proof + demonstrates this process. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* We begin by invoking "corres" *) + apply corres + (* In this case, not much has happened yet, corres has only produced schematic preconditions. + However, we can see that f and f' are the heads of both sides, and searching with find_theorems + for a corres rule that mentions those two turns up the rule "f", which we provided to the corres + method. At this point we can either go back and add it to the previous line, or we + add a new invocation. The process is very similar to using wpsimp. *) + apply (corres corres: f) + (* We see that f has been split off, and we now have a goal for g. Same process as above finds + the corresponding rule. *) + apply (corres corres: g) + (* This solves the corres goal but leaves the side condition of the "g" rule. We can + now either solve it manually with "apply (simp add: t)" and then continue, or, if it really + is as simple as a few simp rules, we can tell the corres method to apply it directly *) + apply (corres simp: t) + (* We now have only wp goals and the final implication left. *) + apply (wp Q) + apply wp + apply simp + apply simp + done -thm corres_symb_exec_ls -thm corres_symb_exec_rs +(* Once we have found this proof, we can roll it up, and merge eg. the "simp: t" into the corres + line before. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + apply corres + apply (corres corres: f) + apply (corres corres: g simp: t) + (* Adding "wp: Q" to the previous line does not help at this stage, because this wp goal + is produced in the (corres corres: f) line above. We could do + apply (corres corres: g simp: t wp: Q)+ + above, which *would* solve the rest of the goals, but using + in an uncontrolled way + is not very stable and therefore not recommended style. *) + apply (wp Q) + apply wp + apply simp + apply simp + done -text \A function may be symbolically executed if it does not modify the state, i.e. its only purpose -is to compute some value and return it. After being symbolically executed, -this value can only be discussed by the precondition of the associated side or the stateless -precondition of corresK. The resulting @{const corres_rv} goal has @{const corres_noop} as the -function on the alternate side. This gives @{method corres_rv} a hint that the resulting obligation -should be aggressively re-written into a hoare triple over @{term m} if it can't be propagated -back statelessly safely. -\ +(* Merging the g and f corres lines does enable us to prove the Q wp rule. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + apply corres + apply (corres corres: f g simp: t wp: Q) + (* This will still leave the final implication, because we have produced that implication + outside this subgoal. Merging the two corres invocations above will attempt the final + implications automatically as well. *) + apply simp + apply simp + done -section \Demo\ - +section "More controlled single-stepping" + +(* Sometimes invoking "corres" does too much or too little. + Too much can occur when the method applies a rule we didn't know is in the [corres] set and + which leaves us with a strange side condition to solve. Or we may have added an unsafe, + not-really-terminal rule to [corres] and now we are getting an unprovable goal. Too little + can occur when the method refuses to split off the head terms even though it looks like a + terminal corres rule should apply. For these cases, we can take apart some of the internal + steps like this: *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* Controlled way to only introduce schematic preconditions and the final implication *) + apply corres_pre + (* Invoking "corres" would now fail. Maybe we are convinced that the "f" rule is declared + [corres] and we want to figure out why it does not apply. Invoking the corres_split method + will give us the goal the terminal corres rule is tried on: *) + apply corres_split + (* Trying out "rule f" does work now -- if it didn't we could debug that and find out why *) + apply (succeeds \rule f\) + (* Turns out we forgot to declare it, so we add it manually, and the corres method now + succeeds on the subgoal *) + apply (corres corres: f) + (* For the next goal, we have only g. Maybe we want to debug why corres doesn't solve the + application of the "g" rule automatically, or where the "x = t x" side condition comes from. + To do that, we can apply the rule manually: *) + apply (rule g) + (* Now it is clear where that side condition comes from, and we can look for rules to solve + it. *) + apply (simp add: t) + apply (wpsimp wp: Q)+ + done -context begin interpretation Arch . - -(* VSpace_R *) - -lemmas load_hw_asid_corres_args[corres] = - loadHWASID_corres[@lift_corres_args] - -lemmas invalidate_asid_corres_args[corres] = - invalidateASID_corres[@lift_corres_args] - -lemmas invalidate_hw_asid_entry_corres_args[corres] = - invalidateHWASIDEntry_corres[@lift_corres_args] - -lemma invalidateASIDEntry_corres: - "corres dc (valid_vspace_objs and valid_asid_map - and K (asid \ mask asid_bits \ asid \ 0) - and vspace_at_asid asid pd and valid_vs_lookup - and unique_table_refs o caps_of_state - and valid_global_objs and valid_arch_state - and pspace_aligned and pspace_distinct) - (pspace_aligned' and pspace_distinct' and no_0_obj') - (invalidate_asid_entry asid) (invalidateASIDEntry asid)" - apply (simp add: invalidate_asid_entry_def invalidateASIDEntry_def) - apply_debug (trace) (* apply_trace between steps *) - (tags "corres") (* break at breakpoints labelled "corres" *) - corres (* weaken precondition *) - continue (* split *) - continue (* solve load_hw_asid *) - continue (* split *) - continue (* apply corres_when *) - continue (* trivial simplification *) - continue (* invalidate _hw_asid_entry *) - finish (* invalidate_asid *) - - apply (corressimp wp: load_hw_asid_wp)+ - apply clarsimp - apply (fastforce simp: pd_at_asid_uniq) +(* Using apply_debug *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* The corres method declares a "corres" breakpoint tag that can be used with apply_debug to + step through what it does. This is useful if the method goes too far or applies rules we + didn't expect. The (trace) option to apply_debug allows us to see which rules were applied. *) + apply_debug (trace) (tags "corres") (corres corres: f g simp: t wp: Q) + continue (* guard implication *) + continue (* application of f *) + continue (* application of g, including solved side condition for t *) + continue (* wpsimp+, which happens to solve all remaining goals *) + finish done +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* There is also a corres_cleanup breakpoint for further data *) + apply_debug (trace) (tags "corres", "corres_cleanup") (corres corres: f g simp: t wp: Q) + continue (* guard implication *) + continue (* application of f *) + continue (* application of g, showing side condition *) + continue (* solve side condition (separate goal) *) + continue (* wpsimp+, which happens to solve all remaining goals *) + finish + done -crunch typ_at'[wp]: invalidateASIDEntry, flushSpace "typ_at' T t" -crunch ksCurThread[wp]: invalidateASIDEntry, flushSpace "\s. P (ksCurThread s)" -crunch obj_at'[wp]: invalidateASIDEntry, flushSpace "obj_at' P p" - -lemmas flush_space_corres_args[corres] = - flushSpace_corres[@lift_corres_args] - -lemmas invalidate_asid_entry_corres_args[corres] = - invalidateASIDEntry_corres[@lift_corres_args] - - -lemma corres_inst_eq_ext: - "(\x. corres_inst_eq (f x) (f' x)) \ corres_inst_eq f f'" - by (auto simp add: corres_inst_eq_def) - -lemma delete_asid_corresb: - notes [corres] = corres_gets_asid getCurThread_corres setObject_ASIDPool_corres and - [@lift_corres_args, corres] = get_asid_pool_corres_inv' - invalidateASIDEntry_corres - setVMRoot_corres - notes [wp] = set_asid_pool_asid_map_unmap set_asid_pool_vs_lookup_unmap' - set_asid_pool_vspace_objs_unmap' - invalidate_asid_entry_invalidates - getASID_wp - notes if_weak_cong[cong] option.case_cong_weak[cong] - shows - "corres dc - (invs and valid_etcbs and K (asid \ mask asid_bits \ asid \ 0)) - (pspace_aligned' and pspace_distinct' and no_0_obj' - and valid_arch_state' and cur_tcb') - (delete_asid asid pd) (deleteASID asid pd)" - apply (simp add: delete_asid_def deleteASID_def) - apply_debug (trace) (* apply_trace between steps *) - (tags "corres") (* break at breakpoints labelled "corres" *) - corres (* weaken precondition *) - continue (* split *) - continue (* gets rule *) - continue (* corresc *) - continue (* return rule *) - continue (* split *) - continue (* function application *) - continue (* liftM rule *) - continue (* get_asid_pool_corres_inv' *) - continue (* function application *) - continue (* function application *) - continue (* corresK_when *) - continue (* split *) - continue (* flushSpace_corres *) - continue (* K_bind *) - continue (* K_bind *) - continue (* split *) - continue (* invalidateASIDEntry_corres *) - continue (* K_bind *) - continue (* return bind *) - continue (* K_bind *) - continue (* split *) - continue (* backtracking *) - continue (* split *) - continue (* function application *) - continue (* setObject_ASIDPool_corres *) - continue (* K_bind *) - continue (* K_bind *) - continue (* split *) - continue (* getCurThread_corres *) - continue (* setVMRoot_corres *) - finish (* backtracking? *) - apply (corressimp simp: mask_asid_low_bits_ucast_ucast - | fold cur_tcb_def | wps)+ - apply (frule arm_asid_table_related,clarsimp) - apply (rule conjI) - apply (intro impI allI) - apply (rule conjI) - apply (safe; assumption?) - apply (rule ext) - apply (fastforce simp: inv_def dest: ucast_ucast_eq) - apply (rule context_conjI) - apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) - apply (intro allI impI) - apply (subgoal_tac "vspace_at_asid asid pd s") - prefer 2 - apply (simp add: vspace_at_asid_def) - apply (rule vs_lookupI) - apply (simp add: vs_asid_refs_def) - apply (rule image_eqI[OF refl]) - apply (rule graph_ofI) - apply fastforce - apply (rule r_into_rtrancl) - apply simp - apply (rule vs_lookup1I [OF _ _ refl], assumption) - apply (simp add: vs_refs_def) - apply (rule image_eqI[rotated], erule graph_ofI) - apply (simp add: mask_asid_low_bits_ucast_ucast) - prefer 2 - apply (intro allI impI context_conjI; assumption?) - apply (rule aligned_distinct_relation_asid_pool_atI'; fastforce?) - apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) - apply (simp add: cur_tcb'_def) - apply (safe; assumption?) - apply (erule ko_at_weakenE) - apply (clarsimp simp: graph_of_def) - apply (fastforce split: if_split_asm) - apply (frule invs_vspace_objs) - apply (drule (2) valid_vspace_objsD) - apply (erule ranE) - apply (fastforce split: if_split_asm) - apply (erule ko_at_weakenE) - apply (clarsimp simp: graph_of_def) - apply (fastforce split: if_split_asm) +(* Rewriting corres terms *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ liftM t f'; g' y od)" + (* In this goal, corres will stop at liftM without finding a rule to apply. Unfolding + liftM_def exposes the bare f' to the toplevel and lets it apply the existing "f" rule. + The "t" rewrite happens to solve the now more complex side condition for g. + Unfolding liftM_def is generally preferred to the liftM corres simp rules, because + these transform schematic guards in ways that later hinder unification. *) + by (corres corres: f g simp: liftM_def t wp: Q) + +(* Rewriting corres terms more carefully *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ liftM t f'; g' y od)" + (* "term_simp" tells corres to apply the following simp rules only to the side conditions + of terminal corres steps, not to the corres terms themselves. Usually those simp rules + are fairly distinct and side-condition rules don't do anything to the corres terms, so + it's fine to put them in the "simp:" section, but occasionally we want more control. *) + by (corres corres: f g simp: liftM_def term_simp: t wp: Q) + +(* Dealing with asserts and symbolic execution *) +lemma "corres (=) P \ (do s \ get; assert (P s); x \ f; g x od) (do y \ f'; g' y od)" + (* Here we'd like to do symbolic execution on "get" and then use the unsafe rule + corres_assert_gen_asm_l for the assert. Often it is good enough to locally + provide such rules as [corres], but adding corres_symb_exec_l here for instance will + go too far. It will try to execute all of get, assert, and f: *) + apply (corres corres: corres_symb_exec_l[where P=P]) + (* unsolvable *) + oops + +lemma "corres (=) P \ (do s \ get; assert (P s); x \ f; g x od) (do y \ f'; g' y od)" + (* We can provide the same rule as a fallback rule. This means it will be tried only when + no other rule has worked. This lets f and corres_assert_gen_asm_l go first. *) + by (corres corres: corres_assert_gen_asm_l f g + fallback: corres_symb_exec_l[where P=P] + simp: t wp: Q) + +lemma "corres (=) P \ (do s \ get; assert (P s); x \ f; g x od) (do y \ f'; g' y od)" + (* For even more control, we can instantiate the rule further: *) + by (corres corres: corres_assert_gen_asm_l f g + fallback: corres_symb_exec_l[where P=P and m=get] + simp: t wp: Q) + + +section "@{method corres'} and @{method corres_cleanup} parameter methods" + +(* First with corres only, no cleanup method: *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + apply (corres corres: f g) + (* Imagine we get here, and (simp add: t) wasn't strong enough to solve the side condition. + Maybe we needed fastforce for it: *) + apply (fastforce simp: t) + (* It is absolutely fine to leave this fastforce here, and continue the corres proof *) + apply (corres corres: h) + apply (wpsimp wp: Q)+ done -lemma cte_wp_at_ex: - "cte_wp_at (\_. True) p s \ (\cap. cte_wp_at ((=) cap) p s)" - by (simp add: cte_wp_at_def) - -(* Sadly broken: -lemma setVMRootForFlush_corres: - notes [corres] = getCurThread_corres getSlotCap_corres - shows - "corres (=) - (cur_tcb and vspace_at_asid asid pd - and K (asid \ 0 \ asid \ mask asid_bits) - and valid_asid_map and valid_vs_lookup - and valid_vspace_objs and valid_global_objs - and unique_table_refs o caps_of_state - and valid_arch_state - and pspace_aligned and pspace_distinct) - (pspace_aligned' and pspace_distinct' and no_0_obj') - (set_vm_root_for_flush pd asid) - (setVMRootForFlush pd asid)" - apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) - apply corres - apply_debug (trace) (tags "corres_search") (corres_search search: armv_contextSwitch_corres) - continue (* step left *) - continue (* if rule *) - continue (* failed corres on first subgoal, trying next *) - continue (* fail corres on last subgoal, trying reverse if rule *) - continue (* can't make corres progress here, trying other goal *) - finish (* successful goal discharged by corres *) - - apply (corressimp wp: get_cap_wp getSlotCap_wp)+ - apply (rule context_conjI) - subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def - tcbVTableSlot_def to_bl_1 cte_level_bits_def) - apply (rule context_conjI) - subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) - apply (rule conjI) - subgoal by (fastforce simp: isCap_simps) - apply (drule cte_wp_at_ex) - apply clarsimp - apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) - apply (drule cte_wp_at_norm') - apply clarsimp - apply (rule_tac x="cteCap cte" in exI) - apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) +(* Sometimes that one fastforce is the only thing standing in the way of full automation. Providing + the fastforce as a cleanup method can help here. *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + by (corres' \fastforce simp: t\ corres: f g h wp: Q) + +(* Providing "succeed" will stop at any side condition without solving it. Occasionally useful for + debugging: *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + apply (corres' \succeed\ corres: f g h term_simp: t) + (* stops at side condition for g, even though t was available in term_simp *) + apply (simp add: t) + apply (corres corres: h) + apply (wpsimp wp: Q)+ done -text \Note we can wrap it all up in corressimp\ - -lemma setVMRootForFlush_corres': - notes [corres] = getCurThread_corres getSlotCap_corres - shows - "corres (=) - (cur_tcb and vspace_at_asid asid pd - and K (asid \ 0 \ asid \ mask asid_bits) - and valid_asid_map and valid_vs_lookup - and valid_vspace_objs and valid_global_objs - and unique_table_refs o caps_of_state - and valid_arch_state - and pspace_aligned and pspace_distinct) - (pspace_aligned' and pspace_distinct' and no_0_obj') - (set_vm_root_for_flush pd asid) - (setVMRootForFlush pd asid)" - apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) - apply (corressimp search: armv_contextSwitch_corres - wp: get_cap_wp getSlotCap_wp - simp: isCap_simps) - apply (rule context_conjI) - subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def - tcbVTableSlot_def to_bl_1 cte_level_bits_def) - apply (rule context_conjI) - subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) - apply (rule conjI) - subgoal by (fastforce) - apply (drule cte_wp_at_ex) - apply clarsimp - apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) - apply (drule cte_wp_at_norm') - apply clarsimp - apply (rule_tac x="cteCap cte" in exI) - apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) +(* Providing something like fastforce can lead to non-termination or slowdown, because the method + will be tried for any side condition. If there is a distinctive goal pattern that can + distinguish when the cleanup method should be run, you can use "match" to restrict the method: *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + by (corres' \match conclusion in "x = t y" for x y \ \fastforce simp: t\\ corres: f g h wp: Q) + + +section "Form of [@{attribute corres}] rules" + +(* The method expects terminal corres rules to instantiate return relation and guards. + It also expects distinct variables for the abstract and concrete side and tries hard to + not accidentally mix these by rewriting corres terms with assumptions. + + For instance, it would be tempting to write the "g" rule as follows: *) +lemma g': "corres (=) Q \ (g x) (g' x)" + by (simp add: g t) + +(* This will usually not apply in the corres proof, because the goal will tend to have + the form "corres (=) Q \ (g x) (g' y)" with a side condition connecting x and y, and not + "corres (=) Q \ (g x) (g' x)" *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + apply (corres corres: f g') + (* \x y. x = y \ corres (=) (?R2 x) (?R'2 y) (g x) (g' y) *) + apply (fails \rule g'\) + (* The original "g" rule from the top of this file works, because it has separate x and y *) + apply (rule g) + apply (wpsimp wp: Q simp: t)+ done + +(* The corres method refuses to rewrite guards for the same reason. + Because corres is careful with keeping abstract and concrete variables separate, + it is usually safe to interleave corres with corres_cases or corres_cases_both *) +lemma "corres (=) P \ + (do x \ case z of None \ f | Some x' \ do return x'; f od; g x od) + (do y \ f'; g' y od)" + by (corres corres: f g simp: t wp: Q | corres_cases)+ + +(* It is usually safe to interleave corres with methods that solve their goal, such as + fastforce, blast, etc. + + It is *not* generally safe to interleave corres with simp or clarsimp. It can occasionally be + useful to invoke simp or clarsimp manually on corres terms with schematics, but + generally it is unsafe and should be avoided. Use the "simp:" facility of the corres method + instead wherever possible, because it provides some protection against common pitfalls. + + Occasionally it is useful to interleave with tactics that work on specific kinds of goals + only, e.g. a clarsimp on goals that are not corres goals. For this, the predicate methods + is_corres, is_wp, and is_safe_wp are available. These do not change the proof state, but they + fail when their predicate does not hold. + + is_corres succeeds on corres goals only + is_wp succeeds on wp goals only (valid, validE, no_fail) + is_safe_wp succeeds only on wp goals without a schematic post condition (where wpsimp is not safe) + + Boolean combinations of predicates can be obtained with "," "|" and "fails" for "and", "or", and + "not". *) +(* Example of predicate methods *) +lemma "corres (=) P \ + (do x \ case z of None \ f | Some x' \ do return x'; f od; g x od) + (do y \ f'; g' y od)" + (* Do case distinction and apply the corres method only to the corres goals: *) + apply (corres_cases; (is_corres, corres corres: f g)?) + (* Find all safe wp goals and run wpsimp on them *) + apply (all \(is_safe_wp, wpsimp wp: Q)?\) + (* Only non-corres and non-wp should remain -- fail if that is not the case *) + apply (all \fails \is_corres | is_wp\, simp add: t\) + done + end + end diff --git a/lib/test/Crunch_Test_NonDet.thy b/lib/test/Crunch_Test_NonDet.thy index cd746d404c..1961523b7a 100644 --- a/lib/test/Crunch_Test_NonDet.thy +++ b/lib/test/Crunch_Test_NonDet.thy @@ -26,7 +26,7 @@ definition crunch_foo1 13 od" -crunch_ignore (valid, empty_fail, no_fail) (add: bind) +crunch_ignore (valid, empty_fail, no_fail) (add: Nondet_Monad.bind) crunch (empty_fail) empty_fail: crunch_foo2 diff --git a/lib/test/Match_Abbreviation_Test.thy b/lib/test/Match_Abbreviation_Test.thy index d9462baf73..0ebf750674 100644 --- a/lib/test/Match_Abbreviation_Test.thy +++ b/lib/test/Match_Abbreviation_Test.thy @@ -7,7 +7,7 @@ theory Match_Abbreviation_Test imports Lib.Match_Abbreviation - Lib.NonDetMonad + Monads.Nondet_Monad begin experiment diff --git a/lib/test/MonadicRewrite_Test.thy b/lib/test/MonadicRewrite_Test.thy new file mode 100644 index 0000000000..5ec082106c --- /dev/null +++ b/lib/test/MonadicRewrite_Test.thy @@ -0,0 +1,270 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory MonadicRewrite_Test +imports Lib.MonadicRewrite +begin + +(* in order to see the way bound variables are handled with bind/bindE by various rules, show etas + in this file *) +declare [[eta_contract=false]] + +section \Function definitions to use in examples\ + +definition + "example_k x \ gets (K x)" +definition + "example_f \ example_k 2" + +(* linear examples in normal and error monad *) + +definition + "example_add = do + a \ example_f; + b \ example_k a; + c \ example_f; + return (a+b+c) + od" + +definition + "example_addE = doE + a \ liftE example_f; + b \ liftE (example_k a); + c \ liftE example_f; + returnOk (a+b+c) + odE" + +section \Sanity checks\ + +(* pass through entire LHS while doing nothing, should get exact same state out *) + +lemma + "monadic_rewrite True False \ example_add example_add" + unfolding example_add_def + apply monadic_rewrite_pre + apply (rule monadic_rewrite_trans) \ \schematise RHS\ + apply (rule monadic_rewrite_step_l)+ + apply (rule monadic_rewrite_refl) + apply wp+ + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +lemma + "monadic_rewrite True False \ example_addE example_addE" + unfolding example_addE_def + apply monadic_rewrite_pre + apply (rule monadic_rewrite_trans) \ \schematise RHS\ + apply (rule monadic_rewrite_step_l)+ + apply (rule monadic_rewrite_refl) + apply wp+ + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +(* now do the same using automation (note automation needs a specific target to hit, as achieving + nothing is considered a failure *) + +lemma + "monadic_rewrite True False \ example_add example_add" + unfolding example_add_def + apply (monadic_rewrite_l monadic_rewrite_refl[where f="return (a+b+c)" for a b c]) + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +lemma + "monadic_rewrite True False \ example_addE example_addE" + unfolding example_addE_def + apply (monadic_rewrite_l monadic_rewrite_refl[where f="returnOk (a+b+c)" for a b c]) + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +section \Example of rewriting with a matching rule: selecting branches of if statements\ + +(* in this example, we know we'll always take the left branch because b will be 2 *) + +definition + "example_if = do + a \ example_f; + b \ example_k a; + if (b = 2) + then do + c \ example_f; + return (a+2+c) + od + else do + c \ example_f; + return (a+b+c) + od + od" + +definition + "example_removed_if = do + a \ example_f; + b \ example_k a; + c \ example_f; + return (a+2+c) + od" + +lemma example_k_wp: + "\K (a = n)\ example_k a \\rv s. rv = n\" + unfolding example_k_def + by wpsimp + +lemma example_f_wp_2: + "\\\ example_f \\rv s. rv = 2\" + unfolding example_f_def + by (wpsimp wp: example_k_wp) + +lemma example_f_wp: + "\K (n = 2)\ example_f \\rv s. rv = n\" + unfolding example_f_def + by (wpsimp wp: example_k_wp) + +(* rewrite the if, but use succeed to show remaining wp goals *) +lemma + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + apply (monadic_rewrite_l monadic_rewrite_if_l_True succeed) + apply (wpsimp wp: example_k_wp) + apply (wpsimp wp: example_f_wp_2) + (* note: bound names are preserved *) + apply (rule monadic_rewrite_refl) + apply simp + done + +(* RHS version: rewrite the if, but use succeed to show remaining wp goals *) +lemma + "monadic_rewrite True False \ example_removed_if example_if" + unfolding example_if_def example_removed_if_def + apply (monadic_rewrite_r monadic_rewrite_if_r_True succeed) + apply (wpsimp wp: example_k_wp) + apply (wpsimp wp: example_f_wp_2) + (* note: bound names are preserved *) + apply (rule monadic_rewrite_refl) + apply simp + done + +(* rewrite the if completely automatically *) +lemma (* on left *) + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + by (monadic_rewrite_l monadic_rewrite_if_l_True \wpsimp wp: example_k_wp example_f_wp\) + (rule monadic_rewrite_refl, simp) +lemma (* on right *) + "monadic_rewrite True False \ example_removed_if example_if" + unfolding example_if_def example_removed_if_def + by (monadic_rewrite_r monadic_rewrite_if_r_True \wpsimp wp: example_k_wp example_f_wp\) + (rule monadic_rewrite_refl, simp) + +(* if the required rules are already present in the environment, no need to specify a method *) +lemma (* on left *) + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + supply example_k_wp[wp] example_f_wp[wp] + by (monadic_rewrite_l monadic_rewrite_if_l_True) + (rule monadic_rewrite_refl, simp) + +section \Symbolic execution\ + +(* performing symbolic execution within a monadic_rewrite requires discharging no_fail/empty_fail + conditions depending on RHS/LHS and flags *) +crunches example_k, example_f + for inv[wp]: "P" + and (empty_fail) empty_fail[wp] + and (no_fail) no_fail[wp] + +(* If you know the value and can prove it later: monadic_rewrite_symb_exec_l/r_known *) + +lemma + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + supply example_k_wp[wp] example_f_wp[wp] + (* LHS: we know example_f will return 2, but will prove it later *) + apply (monadic_rewrite_symb_exec_l_known 2) + (* LHS: we know example_k 2 will return 2, but will prove it later *) + (* observe that symb_exec methods attempt to discharge inv/no_/empty_fail goals in the + background and optionally take a custom method; here we examine them with succeed *) + apply (monadic_rewrite_symb_exec_l_known 2 succeed) + prefer 2 apply wp (* inv *) + prefer 2 apply wp (* empty_fail *) + (* can simplify if condition normally *) + apply simp + (* we know the same return values occur on RHS, but that isn't very interesting as we won't + normally symbolically execute if we have the same term on both sides, so let's schematise + LHS and rewrite RHS to match it via symbolic execution *) + apply (monadic_rewrite_pre, rule monadic_rewrite_trans[rotated]) + (* RHS: we know example_f will return 2, but will prove it later *) + apply (monadic_rewrite_symb_exec_r_known 2) + (* RHS: we know example_k 2 will return 2, but will prove it later *) + apply (monadic_rewrite_symb_exec_r_known 2) + (* done with RHS rewrite *) + apply (rule monadic_rewrite_refl) + (* discharge RHS obligations of returning 2 that we deferred earlier *) + apply wpsimp+ + (* rewrite was successful, LHS = RHS *) + apply (rule monadic_rewrite_refl) + (* discharge LHS obligations of returning 2 that we deferred earlier *) + apply wpsimp+ + done + +(* The basic form of symbolic execution acts as one would expect: it does not specify any return + value, discharging any obligations later *) +lemma + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + (* let's rewrite the LHS as in previous example, but this time not knowing what the values will + be *) + apply monadic_rewrite_symb_exec_l+ + (* we still know we will take the first branch of the if, but we'll prove it later *) + apply (rule_tac P="b = 2" in monadic_rewrite_gen_asm) + (* we can simplify the if statement as usual *) + apply simp + + (* let's rewrite RHS into new LHS now, but with normal symbolic execution *) + apply (monadic_rewrite_pre, rule monadic_rewrite_trans[rotated]) + apply monadic_rewrite_symb_exec_r (* name collision: a \ aa *) + (* the rewrite is only true if the two "a" are the same, so assume that *) + apply (rule_tac P="aa = a" in monadic_rewrite_gen_asm, simp) + apply monadic_rewrite_symb_exec_r + (* done with RHS rewrite *) + apply (rule monadic_rewrite_refl) + (* discharge RHS obligations *) + apply (wpsimp wp: example_f_wp)+ + + (* rewrite was successful, LHS = RHS *) + apply no_name_eta + apply (rule monadic_rewrite_refl) + (* clear up LHS obligations w.r.t. precondition (bit fiddly due to equalities) *) + apply (clarsimp simp: pred_conj_def cong: conj_cong) + apply (wpsimp wp: example_k_wp example_f_wp)+ + done + +(* The "drop" form of symbolic execution is mainly used when a combination of rewrites and + assertions results in a state-invariant operation whose results are not used, such as + a number of getters whose results are used on branches not taken under the precondition. *) +lemma + "monadic_rewrite True False \ example_if example_if" + unfolding example_if_def + apply (monadic_rewrite_pre, rule monadic_rewrite_trans) + (* we artificially add operations to LHS that are irrelevant *) + apply (repeat 10 \rule monadic_rewrite_add_return\) + (* done with rewriting *) + apply (rule monadic_rewrite_refl) + (* we can remove added operations in one pass *) + apply monadic_rewrite_symb_exec_l_drop+ + (* both sides equal again *) + apply (rule monadic_rewrite_refl) + apply simp + done + +end diff --git a/lib/test/ShowTypes_Test.thy b/lib/test/ShowTypes_Test.thy index ddf7dc758a..097c544abc 100644 --- a/lib/test/ShowTypes_Test.thy +++ b/lib/test/ShowTypes_Test.thy @@ -7,7 +7,7 @@ theory ShowTypes_Test imports Lib.ShowTypes - CLib.LemmaBucket_C + CParser.LemmaBucket_C CParser.CTranslation begin diff --git a/lib/test/Time_Methods_Cmd_Test.thy b/lib/test/Time_Methods_Cmd_Test.thy index b4efee3bb2..8803fa5786 100644 --- a/lib/test/Time_Methods_Cmd_Test.thy +++ b/lib/test/Time_Methods_Cmd_Test.thy @@ -6,7 +6,7 @@ theory Time_Methods_Cmd_Test imports Lib.Time_Methods_Cmd - Lib.Eisbach_Methods + Eisbach_Tools.Eisbach_Methods "HOL-Library.Sublist" begin diff --git a/lib/test/Trace_Schematic_Insts_Test.thy b/lib/test/Trace_Schematic_Insts_Test.thy index 076c99174d..e4e1e3d0a3 100644 --- a/lib/test/Trace_Schematic_Insts_Test.thy +++ b/lib/test/Trace_Schematic_Insts_Test.thy @@ -6,7 +6,7 @@ theory Trace_Schematic_Insts_Test imports - Lib.Trace_Schematic_Insts + Eisbach_Tools.Trace_Schematic_Insts begin text \ diff --git a/lib/test/Value_Type_Test.thy b/lib/test/Value_Type_Test.thy index 36636c97d6..d008226dd7 100644 --- a/lib/test/Value_Type_Test.thy +++ b/lib/test/Value_Type_Test.thy @@ -5,11 +5,13 @@ *) theory Value_Type_Test -imports Lib.Value_Type + imports + Lib.Value_Type + "Word_Lib.WordSetup" begin (* - Define a type synonym from a term that evaluates to a numeral. + Define a type synonym from a term of type nat or int that evaluates to a positive numeral. *) definition num_domains :: int where @@ -18,31 +20,51 @@ definition num_domains :: int where definition num_prio :: int where "num_prio = 256" -text \The RHS does not have to be of type nat, it just has to evaluate to any numeral:\ +text \ + The RHS has to be of type @{typ nat} or @{typ int}. @{typ int} will be automatically cast to + @{term nat}:\ value_type num_queues = "num_prio * num_domains" text \This produces a type of the specified size and a constant of type nat:\ typ num_queues term num_queues -thm num_queues_def -text \You can leave out the constant definition, and just define the type:\ +text \You get a symbolic definition theorem:\ +lemma "num_queues = nat (num_prio * num_domains)" + by (rule num_queues_def) + +text \And a numeric value theorem:\ +lemma "num_queues = 4096" + by (rule num_queues_val) + + +text \You can leave out the constant definitions, and just define the type:\ value_type (no_def) num_something = "10 * num_domains" typ num_something +text \ + If the value on the rhs is not of type @{typ nat}, it can still be cast to @{typ nat} manually:\ +definition some_word :: "8 word" where + "some_word \ 0xFF" + +value_type word_val = "unat (some_word && 0xF0)" + +lemma "word_val = (0xF0::nat)" + by (rule word_val_val) + + text \ @{command value_type} uses @{command value} in the background, so all of this also works in anonymous local contexts, provided they don't have assumptions (so that @{command value} can produce code) - Example: -\ + Example:\ context begin -definition X::int where "X = 10" +definition X::nat where "X = 10" value_type x_t = X diff --git a/misc/isa-common.mk b/misc/isa-common.mk index d372c1052c..faea0cdc87 100644 --- a/misc/isa-common.mk +++ b/misc/isa-common.mk @@ -34,12 +34,6 @@ ifndef L4V_ARCH export L4V_ARCH=ARM endif -ifdef BUILD_CACHE - ISABELLE_BUILD_OPTS=-o "save_skip_cache=true" -else - ISABELLE_BUILD_OPTS= -endif - # Setup rules for the heaps. $(HEAPS): .FORCE $(ISABELLE_TOOL) build -b -v ${ISABELLE_BUILD_OPTS} -d $(ROOT_PATH) $@ @@ -75,4 +69,3 @@ realclean: clean # Common targets that should be considered PHONY. .PHONY: all default images test - diff --git a/misc/jedit/macros/goto-error.bsh b/misc/jedit/macros/goto-error.bsh index d727d5dfb4..3fc260c0d8 100644 --- a/misc/jedit/macros/goto-error.bsh +++ b/misc/jedit/macros/goto-error.bsh @@ -25,8 +25,8 @@ import isabelle.jedit.*; msg(s) { Macros.message(view, s); } // isabelle setup -model = Document_Model.get(textArea.getBuffer()); -snapshot = model.get().snapshot(); +model = Document_Model.get_model(textArea.getBuffer()); +snapshot = Document_Model.snapshot(model.get()); class FirstError { public int first_error_pos = -1; diff --git a/misc/scripts/cpp b/misc/scripts/cpp deleted file mode 100755 index 5984ff080c..0000000000 --- a/misc/scripts/cpp +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# -# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) -# -# SPDX-License-Identifier: BSD-2-Clause -# - -# -# Wrapper for clang C preprocessor on MacOS -# -export L4CPP="-DTARGET=ARM -DTARGET_ARM -DPLATFORM=Sabre -DPLATFORM_Sabre" - -llvm-gcc -Wno-invalid-pp-token -E -x c $@ diff --git a/proof/Makefile b/proof/Makefile index 6a00a6ca70..7ee45d2a3e 100644 --- a/proof/Makefile +++ b/proof/Makefile @@ -10,11 +10,6 @@ default: images test test: all: images test -# Allow sorry command in AARCH64 AInvs during development: -ifeq "$(L4V_ARCH)" "AARCH64" - export AINVS_QUICK_AND_DIRTY=1 -endif - # # Setup heaps. # diff --git a/proof/ROOT b/proof/ROOT index e72d47147b..cb8a159bab 100644 --- a/proof/ROOT +++ b/proof/ROOT @@ -52,10 +52,12 @@ session Refine in "refine" = BaseRefine + (* * This theory is in a separate session because the proofs currently - * only work for ARM. + * work only for ARM, RISCV64, and AARCH64. *) session RefineOrphanage in "refine/$L4V_ARCH/orphanage" = Refine + description \Proof that the kernel does not orphan threads.\ + theories [condition = "REFINE_QUICK_AND_DIRTY", quick_and_dirty] + "Orphanage" theories "Orphanage" diff --git a/proof/access-control/ADT_AC.thy b/proof/access-control/ADT_AC.thy index eb85735b24..f0c20cbbf2 100644 --- a/proof/access-control/ADT_AC.thy +++ b/proof/access-control/ADT_AC.thy @@ -95,7 +95,8 @@ lemma do_user_op_respects: apply (rule dmo_device_update_respects_Write) apply (wpsimp wp: dmo_um_upd_machine_state dmo_user_memory_update_respects_Write - hoare_vcg_all_lift hoare_vcg_imp_lift)+ + hoare_vcg_all_lift hoare_vcg_imp_lift + wp_del: select_wp)+ apply (rule hoare_pre_cont) apply (wpsimp wp: select_wp)+ apply (simp add: restrict_map_def split: if_splits) diff --git a/proof/access-control/ARM/ArchADT_AC.thy b/proof/access-control/ARM/ArchADT_AC.thy index c5dff0cc2c..c4c6efa8cd 100644 --- a/proof/access-control/ARM/ArchADT_AC.thy +++ b/proof/access-control/ARM/ArchADT_AC.thy @@ -101,7 +101,7 @@ lemma ptable_state_objs_to_policy: vspace_cap_rights_to_auth b)" in bexI) apply clarsimp apply (rule_tac x="(ptrFromPAddr a + (x && mask aa), auth)" in image_eqI) - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def) + apply (simp add: ptrFromPAddr_def) apply (simp add: ptr_offset_in_ptr_range) apply (simp add: kernel_mappings_kernel_mapping_slots') apply (clarsimp simp: graph_of_def) @@ -165,7 +165,7 @@ lemma get_page_info_state_objs_to_policy: vspace_cap_rights_to_auth r)" in bexI) apply clarsimp apply (rule_tac x="(ptrFromPAddr base + (x && mask sz), auth)" in image_eqI) - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def) + apply (simp add: ptrFromPAddr_def) apply (simp add: ptr_offset_in_ptr_range) apply (clarsimp simp: get_page_info_def get_pd_entry_def get_pt_info_def get_pt_entry_def get_arch_obj_def pte_ref_def graph_of_def diff --git a/proof/access-control/ARM/ArchAccess.thy b/proof/access-control/ARM/ArchAccess.thy index 4538c300a7..60797287eb 100644 --- a/proof/access-control/ARM/ArchAccess.thy +++ b/proof/access-control/ARM/ArchAccess.thy @@ -196,10 +196,10 @@ lemmas integrity_asids_kh_upds = declare integrity_asids_def[simp] lemma integrity_asids_kh_upds': - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ CNode sz cs)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ TCB tcb)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Endpoint ep)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Notification ntfn)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ CNode sz cs)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ TCB tcb)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Endpoint ep)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Notification ntfn)\) s" by auto lemma integrity_asids_kh_update: @@ -210,10 +210,10 @@ lemma integrity_asids_kh_update: subsection \Misc definitions\ -definition ctxt_IP_update where - "ctxt_IP_update ctxt \ ctxt(NextIP := ctxt FaultIP)" +definition ctxt_IP_update :: "user_context \ user_context" where + "ctxt_IP_update ctxt \ UserContext ((user_regs ctxt)(NextIP := user_regs ctxt FaultIP))" -abbreviation arch_IP_update where +abbreviation arch_IP_update :: "arch_tcb \ arch_tcb" where "arch_IP_update arch \ arch_tcb_context_set (ctxt_IP_update (arch_tcb_context_get arch)) arch" definition asid_pool_integrity :: @@ -261,7 +261,6 @@ requalify_consts state_vrefs state_asids_to_policy_arch integrity_asids - ctxt_IP_update arch_IP_update arch_cap_auth_conferred arch_integrity_obj_atomic diff --git a/proof/access-control/ARM/ArchAccess_AC.thy b/proof/access-control/ARM/ArchAccess_AC.thy index af0d42118d..39112a65cc 100644 --- a/proof/access-control/ARM/ArchAccess_AC.thy +++ b/proof/access-control/ARM/ArchAccess_AC.thy @@ -91,7 +91,7 @@ lemma integrity_asids_refl[Access_AC_assms, simp]: lemma integrity_asids_update_autarch[Access_AC_assms]: "\ \x a. integrity_asids aag subjects x a st s; is_subject aag ptr \ - \ \x a. integrity_asids aag subjects x a st (s\kheap := kheap s(ptr \ obj)\)" + \ \x a. integrity_asids aag subjects x a st (s\kheap := (kheap s)(ptr \ obj)\)" by simp end diff --git a/proof/access-control/ARM/ArchArch_AC.thy b/proof/access-control/ARM/ArchArch_AC.thy index 06a62a5fe5..b6a04d10bc 100644 --- a/proof/access-control/ARM/ArchArch_AC.thy +++ b/proof/access-control/ARM/ArchArch_AC.thy @@ -159,7 +159,7 @@ lemma unmap_page_table_respects: apply (rule hoare_gen_asm) apply (simp add: unmap_page_table_def page_table_mapped_def ) apply (rule hoare_pre) - apply (wpsimp wp: store_pde_respects page_table_mapped_wp_weak get_pde_wp hoare_vcg_all_lift_R + apply (wpsimp wp: store_pde_respects page_table_mapped_wp_weak get_pde_wp hoare_vcg_all_liftE_R simp: cleanByVA_PoU_def | wp (once) hoare_drop_imps)+ apply auto @@ -372,7 +372,7 @@ lemma lookup_pt_slot_authorised3: \\rv _. \x\set [rv, rv + 4 .e. rv + 0x3C]. is_subject aag (x && ~~ mask pt_bits)\, -" apply (rule_tac Q'="\rv s. is_aligned rv 6 \ (\x\set [0, 4 .e. 0x3C]. is_subject aag (x + rv && ~~ mask pt_bits))" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (rule hoare_pre) apply (wp lookup_pt_slot_is_aligned_6 lookup_pt_slot_authorised2) apply (fastforce simp: vmsz_aligned_def pd_bits_def pageBits_def) @@ -549,7 +549,7 @@ lemma perform_asid_control_invocation_respects: apply (rule hoare_pre) apply (wpc, simp) apply (wpsimp wp: set_cap_integrity_autarch cap_insert_integrity_autarch - retype_region_integrity[where sz=12] static_imp_wp) + retype_region_integrity[where sz=12] hoare_weak_lift_imp) apply (clarsimp simp: authorised_asid_control_inv_def ptr_range_def page_bits_def add.commute range_cover_def obj_bits_api_def default_arch_object_def @@ -576,12 +576,12 @@ lemma perform_asid_control_invocation_pas_refined [wp]: \\_. pas_refined aag\" apply (simp add: perform_asid_control_invocation_def) apply (rule hoare_pre) - apply (wp cap_insert_pas_refined' static_imp_wp + apply (wp cap_insert_pas_refined' hoare_weak_lift_imp | strengthen pas_refined_set_asid_strg | wpc | simp add: delete_objects_def2 fun_upd_def[symmetric])+ apply (wp retype_region_pas_refined'[where sz=pageBits] - hoare_vcg_ex_lift hoare_vcg_all_lift static_imp_wp hoare_wp_combs hoare_drop_imp + hoare_vcg_ex_lift hoare_vcg_all_lift hoare_weak_lift_imp hoare_wp_combs hoare_drop_imp retype_region_invs_extras(1)[where sz = pageBits] retype_region_invs_extras(4)[where sz = pageBits] retype_region_invs_extras(6)[where sz = pageBits] @@ -591,7 +591,7 @@ lemma perform_asid_control_invocation_pas_refined [wp]: max_index_upd_invs_simple max_index_upd_caps_overlap_reserved hoare_vcg_ex_lift set_cap_cte_wp_at hoare_vcg_disj_lift set_free_index_valid_pspace set_cap_descendants_range_in set_cap_no_overlap get_cap_wp set_cap_caps_no_overlap - hoare_vcg_all_lift static_imp_wp retype_region_invs_extras + hoare_vcg_all_lift hoare_weak_lift_imp retype_region_invs_extras set_cap_pas_refined_not_transferable | simp add: do_machine_op_def split_def cte_wp_at_neg2 region_in_kernel_window_def)+ apply (rename_tac frame slot parent base cap) @@ -826,12 +826,10 @@ lemma decode_arch_invocation_authorised: apply (rule hoare_pre) apply (simp add: split_def Let_def split del: if_split cong: cap.case_cong arch_cap.case_cong if_cong option.case_cong) - apply (wp select_wp whenE_throwError_wp check_vp_wpR - find_pd_for_asid_authority2 + apply (wp whenE_throwError_wp check_vp_wpR find_pd_for_asid_authority2 | wpc | simp add: authorised_asid_control_inv_def authorised_page_inv_def authorised_page_directory_inv_def - del: hoare_True_E_R split del: if_split)+ apply (clarsimp simp: authorised_asid_pool_inv_def authorised_page_table_inv_def neq_Nil_conv invs_psp_aligned invs_vspace_objs cli_no_irqs) @@ -959,8 +957,9 @@ lemma delete_asid_pool_pas_refined [wp]: crunch respects[wp]: invalidate_asid_entry "integrity aag X st" crunch respects[wp]: flush_space "integrity aag X st" - (ignore: do_machine_op simp: invalidateLocalTLB_ASID_def cleanCaches_PoU_def - dsb_def clean_D_PoU_def invalidate_I_PoU_def do_machine_op_bind) + (ignore: do_machine_op + simp: invalidateLocalTLB_ASID_def cleanCaches_PoU_def dsb_def clean_D_PoU_def invalidate_I_PoU_def + do_machine_op_bind empty_fail_cond) lemma delete_asid_pool_respects[wp]: "\integrity aag X st and diff --git a/proof/access-control/ARM/ArchCNode_AC.thy b/proof/access-control/ARM/ArchCNode_AC.thy index a385de3008..9c69e20017 100644 --- a/proof/access-control/ARM/ArchCNode_AC.thy +++ b/proof/access-control/ARM/ArchCNode_AC.thy @@ -75,17 +75,17 @@ crunches set_cdt crunches prepare_thread_delete, arch_finalise_cap for cur_domain[CNode_AC_assms, wp]:"\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) lemma state_vrefs_tcb_upd[CNode_AC_assms]: - "tcb_at t s \ state_vrefs (s\kheap := kheap s(t \ TCB tcb)\) = state_vrefs s" + "tcb_at t s \ state_vrefs (s\kheap := (kheap s)(t \ TCB tcb)\) = state_vrefs s" apply (rule ext) apply (auto simp: state_vrefs_def vs_refs_no_global_pts_def tcb_at_def dest!: get_tcb_SomeD) done lemma state_vrefs_simple_type_upd[CNode_AC_assms]: "\ ko_at ko ptr s; is_simple_type ko; a_type ko = a_type (f val) \ - \ state_vrefs (s\kheap := kheap s(ptr \ f val)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(ptr \ f val)\) = state_vrefs s" apply (rule ext) apply (auto simp: state_vrefs_def vs_refs_no_global_pts_def obj_at_def partial_inv_def a_type_def split: kernel_object.splits arch_kernel_obj.splits if_splits) diff --git a/proof/access-control/ARM/ArchDomainSepInv.thy b/proof/access-control/ARM/ArchDomainSepInv.thy index 73e9df6bee..eb3ac5ab85 100644 --- a/proof/access-control/ARM/ArchDomainSepInv.thy +++ b/proof/access-control/ARM/ArchDomainSepInv.thy @@ -49,7 +49,7 @@ lemma perform_page_invocation_domain_sep_inv: \\_. domain_sep_inv irqs st\" apply (rule hoare_pre) apply (wp mapM_wp[OF _ subset_refl] set_cap_domain_sep_inv mapM_x_wp[OF _ subset_refl] - perform_page_invocation_domain_sep_inv_get_cap_helper static_imp_wp + perform_page_invocation_domain_sep_inv_get_cap_helper hoare_weak_lift_imp | simp add: perform_page_invocation_def o_def | wpc)+ apply (clarsimp simp: valid_page_inv_def) apply (case_tac xa, simp_all add: domain_sep_inv_cap_def is_pg_cap_def) @@ -79,7 +79,7 @@ lemma perform_asid_control_invocation_domain_sep_inv: unfolding perform_asid_control_invocation_def apply (rule hoare_pre) apply (wp modify_wp cap_insert_domain_sep_inv' set_cap_domain_sep_inv - get_cap_domain_sep_inv_cap[where st=st] static_imp_wp + get_cap_domain_sep_inv_cap[where st=st] hoare_weak_lift_imp | wpc | simp )+ done diff --git a/proof/access-control/ARM/ArchFinalise_AC.thy b/proof/access-control/ARM/ArchFinalise_AC.thy index 901acccf2c..680665df67 100644 --- a/proof/access-control/ARM/ArchFinalise_AC.thy +++ b/proof/access-control/ARM/ArchFinalise_AC.thy @@ -28,7 +28,7 @@ lemma sbn_st_vrefs[Finalise_AC_assms, wp]: lemma arch_finalise_cap_auth'[Finalise_AC_assms]: "\pas_refined aag\ arch_finalise_cap x12 final \\rv s. pas_cap_cur_auth aag (fst rv)\" unfolding arch_finalise_cap_def - by (wp | wpc | simp add: comp_def hoare_post_taut[where P = \] split del: if_split)+ + by (wp | wpc | simp add: comp_def hoare_TrueI[where P = \] split del: if_split)+ lemma arch_finalise_cap_obj_refs[Finalise_AC_assms]: "\\s. \x \ aobj_ref' acap. P x\ @@ -93,7 +93,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) qed lemma finalise_cap_caps_of_state_nullinv[Finalise_AC_assms]: - "\\s. P (caps_of_state s) \ (\p. P (caps_of_state s(p \ NullCap)))\ + "\\s. P (caps_of_state s) \ (\p. P ((caps_of_state s)(p \ NullCap)))\ finalise_cap cap final \\_ s. P (caps_of_state s)\" by (cases cap; @@ -107,7 +107,7 @@ lemma finalise_cap_fst_ret[Finalise_AC_assms]: "\\_. P NullCap \ (\a b c. P (Zombie a b c))\ finalise_cap cap is_final \\rv _. P (fst rv)\" - including no_pre + including classic_wp_pre apply (cases cap, simp_all add: arch_finalise_cap_def split del: if_split) apply (wp | simp add: comp_def split del: if_split | fastforce)+ apply (rule hoare_pre) diff --git a/proof/access-control/ARM/ArchInterrupt_AC.thy b/proof/access-control/ARM/ArchInterrupt_AC.thy index a58fc92270..e7a833797f 100644 --- a/proof/access-control/ARM/ArchInterrupt_AC.thy +++ b/proof/access-control/ARM/ArchInterrupt_AC.thy @@ -87,17 +87,12 @@ lemma arch_decode_irq_control_invocation_authorised[Interrupt_AC_assms]: (args \ [] \ (pasSubject aag, Control, pasIRQAbs aag (ucast (args ! 0))) \ pasPolicy aag))\ arch_decode_irq_control_invocation info_label args slot caps \\x _. arch_authorised_irq_ctl_inv aag x\, -" - unfolding decode_irq_control_invocation_def arch_decode_irq_control_invocation_def + unfolding decode_irq_control_invocation_def arch_decode_irq_control_invocation_def Let_def authorised_irq_ctl_inv_def arch_authorised_irq_ctl_inv_def arch_check_irq_def apply (rule hoare_gen_asmE) - apply (rule hoare_pre) - apply (simp add: Let_def split del: if_split cong: if_cong) - apply (wp whenE_throwError_wp hoare_vcg_imp_lift hoare_drop_imps - | strengthen aag_Control_owns_strg - | simp add: o_def del: hoare_True_E_R)+ + apply (wpsimp wp: weak_if_wp) apply (cases args, simp_all) apply (cases caps, simp_all) - apply (simp add: ucast_mask_drop) apply (auto simp: is_cap_simps cap_auth_conferred_def pas_refined_wellformed pas_refined_all_auth_is_owns aag_cap_auth_def) diff --git a/proof/access-control/ARM/ArchIpc_AC.thy b/proof/access-control/ARM/ArchIpc_AC.thy index ccd47639e2..2e8bac3dd8 100644 --- a/proof/access-control/ARM/ArchIpc_AC.thy +++ b/proof/access-control/ARM/ArchIpc_AC.thy @@ -123,7 +123,8 @@ lemma set_mrs_respects_in_ipc[Ipc_AC_assms]: apply simp apply wp+ apply (clarsimp simp: arch_tcb_set_registers_def) - by (rule update_tcb_context_in_ipc [unfolded fun_upd_def]; fastforce) + by (rule update_tcb_context_in_ipc [unfolded fun_upd_def] + ; fastforce simp: arch_tcb_context_set_def) lemma lookup_ipc_buffer_ptr_range_in_ipc[Ipc_AC_assms]: "\valid_objs and integrity_tcb_in_ipc aag X thread epptr tst st\ @@ -175,7 +176,7 @@ lemma handle_arch_fault_reply_respects[Ipc_AC_assms]: lemma auth_ipc_buffers_kheap_update[Ipc_AC_assms]: "\ x \ auth_ipc_buffers st thread; kheap st thread = Some (TCB tcb); kheap s thread = Some (TCB tcb'); tcb_ipcframe tcb = tcb_ipcframe tcb' \ - \ x \ auth_ipc_buffers (s\kheap := kheap s(thread \ TCB tcb)\) thread" + \ x \ auth_ipc_buffers (s\kheap := (kheap s)(thread \ TCB tcb)\) thread" by (clarsimp simp: auth_ipc_buffers_member_def get_tcb_def caps_of_state_tcb) lemma auth_ipc_buffers_machine_state_update[Ipc_AC_assms, simp]: diff --git a/proof/access-control/ARM/ArchRetype_AC.thy b/proof/access-control/ARM/ArchRetype_AC.thy index 647bf990c3..736d8df1e6 100644 --- a/proof/access-control/ARM/ArchRetype_AC.thy +++ b/proof/access-control/ARM/ArchRetype_AC.thy @@ -98,7 +98,7 @@ lemma copy_global_mappings_integrity: \\_. integrity aag X st\" apply (rule hoare_gen_asm) apply (simp add: copy_global_mappings_def) - apply (wp mapM_x_wp[OF _ subset_refl] store_pde_respects) + apply (wp mapM_x_wp[OF _ subset_refl] store_pde_respects)+ apply (drule subsetD[OF copy_global_mappings_index_subset]) apply (fastforce simp: pd_shifting') apply wpsimp+ @@ -174,7 +174,7 @@ lemma copy_global_mappings_pas_refined: valid_global_objs s \ valid_global_refs s \ pas_refined aag s)" in hoare_strengthen_post) apply (rule mapM_x_wp[OF _ subset_refl]) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (unfold o_def) (* Use [1] so wp doesn't filter out the global_pd condition *) apply (wp store_pde_pas_refined store_pde_valid_kernel_mappings_map_global)[1] @@ -329,8 +329,7 @@ lemma dmo_freeMemory_respects[Retype_AC_assms]: apply wp apply clarsimp apply (erule use_valid) - apply (wp mol_respects mapM_x_wp' storeWord_integrity_autarch) - apply simp + apply (wpsimp wp: mol_respects mapM_x_wp' storeWord_integrity_autarch) apply (clarsimp simp: word_size_def word_bits_def word_size_bits_def upto_enum_step_shift_red[where us=2, simplified]) apply (erule bspec) diff --git a/proof/access-control/ARM/ArchSyscall_AC.thy b/proof/access-control/ARM/ArchSyscall_AC.thy index 711b396dee..bfd7439001 100644 --- a/proof/access-control/ARM/ArchSyscall_AC.thy +++ b/proof/access-control/ARM/ArchSyscall_AC.thy @@ -154,7 +154,7 @@ crunch arch_state[Syscall_AC_assms, wp]: init_arch_objects "\s. P (arch_ (wp: crunch_wps) crunch ct_active [Syscall_AC_assms, wp]: arch_post_cap_deletion "ct_active" - (wp: crunch_wps filterM_preserved hoare_unless_wp + (wp: crunch_wps filterM_preserved unless_wp simp: crunch_simps ignore: do_extended_op) crunches diff --git a/proof/access-control/ARM/ArchTcb_AC.thy b/proof/access-control/ARM/ArchTcb_AC.thy index dd78504af0..99bebc86a4 100644 --- a/proof/access-control/ARM/ArchTcb_AC.thy +++ b/proof/access-control/ARM/ArchTcb_AC.thy @@ -33,19 +33,19 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: apply (subst invoke_tcb.simps) apply (subst option_update_thread_def) apply (subst set_priority_extended.dxo_eq) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule_tac P="case ep of Some v \ length v = word_bits | _ \ True" in hoare_gen_asm) apply (simp only: split_def) - apply (((simp add: conj_comms del: hoare_True_E_R, + apply (((simp add: conj_comms, strengthen imp_consequent[where Q="x = None" for x], simp cong: conj_cong) | strengthen invs_psp_aligned invs_vspace_objs invs_arch_state - | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_lift_R + | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | wp restart_integrity_autarch set_mcpriority_integrity_autarch as_user_integrity_autarch thread_set_integrity_autarch option_update_thread_integrity_autarch - opt_update_thread_valid_sched static_imp_wp + opt_update_thread_valid_sched hoare_weak_lift_imp cap_insert_integrity_autarch checked_insert_pas_refined cap_delete_respects' cap_delete_pas_refined' check_cap_inv2[where Q="\_. integrity aag X st"] @@ -54,7 +54,7 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap out_tcb_valid - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -78,7 +78,6 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: cap_delete_pas_refined'[THEN valid_validE_E] thread_set_cte_wp_at_trivial | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def a_type_def partial_inv_def - del: hoare_True_E_R | wpc | strengthen invs_mdb use_no_cap_to_obj_asid_strg tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] diff --git a/proof/access-control/Access_AC.thy b/proof/access-control/Access_AC.thy index d0efd7c79b..1c061ed0d2 100644 --- a/proof/access-control/Access_AC.thy +++ b/proof/access-control/Access_AC.thy @@ -208,17 +208,17 @@ lemmas state_objs_to_policy_cases lemma tcb_states_of_state_preserved: "\ get_tcb thread s = Some tcb; tcb_state tcb' = tcb_state tcb \ - \ tcb_states_of_state (s\kheap := kheap s(thread \ TCB tcb')\) = tcb_states_of_state s" + \ tcb_states_of_state (s\kheap := (kheap s)(thread \ TCB tcb')\) = tcb_states_of_state s" by (auto split: option.splits simp: tcb_states_of_state_def get_tcb_def) lemma thread_st_auth_preserved: "\ get_tcb thread s = Some tcb; tcb_state tcb' = tcb_state tcb \ - \ thread_st_auth (s\kheap := kheap s(thread \ TCB tcb')\) = thread_st_auth s" + \ thread_st_auth (s\kheap := (kheap s)(thread \ TCB tcb')\) = thread_st_auth s" by (simp add: tcb_states_of_state_preserved thread_st_auth_def) lemma thread_bound_ntfns_preserved: "\ get_tcb thread s = Some tcb; tcb_bound_notification tcb' = tcb_bound_notification tcb \ - \ thread_bound_ntfns (s\kheap := kheap s(thread \ TCB tcb')\) = thread_bound_ntfns s" + \ thread_bound_ntfns (s\kheap := (kheap s)(thread \ TCB tcb')\) = thread_bound_ntfns s" by (auto simp: thread_bound_ntfns_def get_tcb_def split: option.splits) lemma is_transferable_null_filter[simp]: @@ -865,7 +865,7 @@ locale Access_AC_2 = Access_AC_1 + \ (\x a. integrity_asids aag subjects x a s s'')" and integrity_asids_update_autarch: "\ \x a. integrity_asids aag {pasSubject aag} x a s s'; is_subject aag ptr \ - \ \x a. integrity_asids aag {pasSubject aag} x a s (s'\kheap := kheap s'(ptr \ obj)\)" + \ \x a. integrity_asids aag {pasSubject aag} x a s (s'\kheap := (kheap s')(ptr \ obj)\)" begin section \Generic AC stuff\ @@ -980,7 +980,7 @@ lemma integrity_refl [simp]: lemma integrity_update_autarch: "\ integrity aag X st s; is_subject aag ptr \ - \ integrity aag X st (s\kheap := kheap s(ptr \ obj)\)" + \ integrity aag X st (s\kheap := (kheap s)(ptr \ obj)\)" unfolding integrity_subjects_def apply (intro conjI,simp_all) apply clarsimp @@ -1530,7 +1530,7 @@ lemma integrity_mono: apply (erule integrity_obj_atomic.cases[OF _ integrity_obj_atomic.intros]; auto simp: indirect_send_def direct_send_def direct_call_def direct_reply_def elim: reply_cap_deletion_integrity_mono cnode_integrity_mono - arch_integrity_obj_atomic_mono) + arch_integrity_obj_atomic_mono)[1] apply (rule conjI) apply clarsimp apply (drule_tac x=x in spec)+ diff --git a/proof/access-control/CNode_AC.thy b/proof/access-control/CNode_AC.thy index 429df91828..c0ba8511ac 100644 --- a/proof/access-control/CNode_AC.thy +++ b/proof/access-control/CNode_AC.thy @@ -56,11 +56,11 @@ locale CNode_AC_1 = \ state_asids_to_policy_arch aag (caps(ptr \ cap, ptr' \ cap')) as vrefs \ pasPolicy aag" and state_vrefs_tcb_upd: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; tcb_at tptr s \ - \ state_vrefs (s\kheap := kheap s(tptr \ TCB tcb)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(tptr \ TCB tcb)\) = state_vrefs s" and state_vrefs_simple_type_upd: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; ko_at ko p s; is_simple_type ko; a_type ko = a_type (f (val :: 'b)) \ - \ state_vrefs (s\kheap := kheap s(p \ f val)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(p \ f val)\) = state_vrefs s" and a_type_arch_object_not_tcb[simp]: "a_type (ArchObj arch_kernel_obj) \ ATCB" and set_cap_state_vrefs: @@ -211,7 +211,7 @@ lemma lookup_slot_for_cnode_op_authorised[wp]: apply (simp add: lookup_slot_for_cnode_op_def split del: if_split) apply (wp whenE_throwError_wp hoare_drop_imps resolve_address_bits_authorised - [THEN hoare_post_imp_R[where Q'="\x s. is_subject aag (fst (fst x))"]] + [THEN hoare_strengthen_postE_R[where Q'="\x s. is_subject aag (fst (fst x))"]] | wpc | fastforce)+ done @@ -246,7 +246,7 @@ lemma decode_cnode_inv_authorised: apply (simp add: authorised_cnode_inv_def decode_cnode_invocation_def split_def whenE_def unlessE_def set_eq_iff cong: if_cong Invocations_A.cnode_invocation.case_cong split del: if_split) - apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R lsfco_cte_at + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R lsfco_cte_at | wp (once) get_cap_cur_auth)+ apply (subgoal_tac "\n. n < length excaps \ (is_cnode_cap (excaps ! n) @@ -727,7 +727,7 @@ lemmas[monad_commute_wp] = (* Sort-of VCG for monad_commute goals *) lemma wpc_helper_monad_commute: - "monad_commute P f g \ wpc_helper (P, P') (Q, Q') (monad_commute P f g)" + "monad_commute P f g \ wpc_helper (P, P', P'') (Q, Q', Q'') (monad_commute P f g)" by (clarsimp simp: wpc_helper_def) wpc_setup "\m. monad_commute P f m" wpc_helper_monad_commute @@ -820,9 +820,6 @@ lemma set_cap_empty_slot_ext_comm: apply (case_tac y; simp add: return_def fail_def split: option.splits) done -(* FIXME: MOVE *) -lemmas bind_eqI' = NonDetMonadVCG.bind_eqI[OF _ refl] - lemma K_bind_assoc: "(do (do f; g od); h od) = (do f; g; h od)" by (simp add: bind_assoc) @@ -972,10 +969,10 @@ lemma set_untyped_cap_as_full_is_transferable[wp]: using untyped_not_transferable max_free_index_update_preserve_untyped by simp lemma set_untyped_cap_as_full_is_transferable': - "\\s. is_transferable ((caps_of_state s(slot2 \ new_cap)) slot3) \ + "\\s. is_transferable (((caps_of_state s)(slot2 \ new_cap)) slot3) \ Some src_cap = (caps_of_state s slot)\ set_untyped_cap_as_full src_cap new_cap slot - \\_ s. is_transferable ((caps_of_state s(slot2 \ new_cap)) slot3)\" + \\_ s. is_transferable (((caps_of_state s)(slot2 \ new_cap)) slot3)\" apply (clarsimp simp: set_untyped_cap_as_full_def) apply safe apply (wp,fastforce)+ @@ -1525,10 +1522,10 @@ lemma post_cap_deletion_cur_domain[wp]: by (wpsimp simp: post_cap_deletion_def) crunch cur_domain[wp]: cap_swap_for_delete, empty_slot "\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) crunch cur_domain[wp]: finalise_cap "\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) lemma rec_del_cur_domain[wp]: "rec_del call \\s. P (cur_domain s)\" diff --git a/proof/access-control/DomainSepInv.thy b/proof/access-control/DomainSepInv.thy index fe540a54d6..3ee3cb92f7 100644 --- a/proof/access-control/DomainSepInv.thy +++ b/proof/access-control/DomainSepInv.thy @@ -7,7 +7,7 @@ theory DomainSepInv imports "ArchIpc_AC" (* for transfer_caps_loop_pres_dest lec_valid_cap' set_simple_ko_get_tcb thread_set_tcb_fault_update_valid_mdb *) - "Lib.WPBang" + "Monads.WPBang" begin text \ @@ -133,7 +133,7 @@ crunch domain_sep_inv[wp]: set_extra_badge "domain_sep_inv irqs st" lemma set_cap_neg_cte_wp_at_other_helper': "\ oslot \ slot; ko_at (TCB x) (fst oslot) s; tcb_cap_cases (snd oslot) = Some (ogetF, osetF, orestr); - kheap (s\kheap := kheap s(fst oslot \ TCB (osetF (\ x. cap) x))\) (fst slot) = Some (TCB tcb); + kheap (s\kheap := (kheap s)(fst oslot \ TCB (osetF (\ x. cap) x))\) (fst slot) = Some (TCB tcb); tcb_cap_cases (snd slot) = Some (getF, setF, restr); P (getF tcb) \ \ cte_wp_at P slot s" apply (case_tac "fst oslot = fst slot") @@ -150,7 +150,7 @@ lemma set_cap_neg_cte_wp_at_other_helper': lemma set_cap_neg_cte_wp_at_other_helper: "\ \ cte_wp_at P slot s; oslot \ slot; ko_at (TCB x) (fst oslot) s; tcb_cap_cases (snd oslot) = Some (getF, setF, restr) \ - \ \ cte_wp_at P slot (s\kheap := kheap s(fst oslot \ TCB (setF (\ x. cap) x))\)" + \ \ cte_wp_at P slot (s\kheap := (kheap s)(fst oslot \ TCB (setF (\ x. cap) x))\)" apply (rule notI) apply (erule cte_wp_atE) apply (fastforce elim: notE intro: cte_wp_at_cteI split: if_splits) @@ -336,7 +336,7 @@ lemma empty_slot_domain_sep_inv: \\_ s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state)\" unfolding empty_slot_def post_cap_deletion_def by (wpsimp wp: get_cap_wp set_cap_domain_sep_inv set_original_wp dxo_wp_weak - static_imp_wp deleted_irq_handler_domain_sep_inv) + hoare_weak_lift_imp deleted_irq_handler_domain_sep_inv) end @@ -432,14 +432,14 @@ context DomainSepInv_1 begin crunches cap_delete_one for domain_sep_inv[wp]: "\s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state)" - (wp: mapM_x_wp' hoare_unless_wp dxo_wp_weak simp: crunch_simps) + (wp: mapM_x_wp' unless_wp dxo_wp_weak simp: crunch_simps) lemma reply_cancel_ipc_domain_sep_inv[wp]: "\domain_sep_inv irqs st\ reply_cancel_ipc t \\_ s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state)\" apply (simp add: reply_cancel_ipc_def) - apply (wp select_wp) + apply wp apply (rule hoare_strengthen_post[OF thread_set_tcb_fault_update_domain_sep_inv]) apply auto done @@ -452,7 +452,7 @@ lemma finalise_cap_domain_sep_inv_cap: "\\s. domain_sep_inv_cap irqs cap\ finalise_cap cap b \\rv s :: det_ext state. domain_sep_inv_cap irqs (fst rv)\" - including no_pre + including classic_wp_pre apply (case_tac cap) apply (wp | simp add: o_def split del: if_split split: cap.splits | fastforce split: if_splits simp: domain_sep_inv_cap_def)+ @@ -553,7 +553,7 @@ lemma cap_revoke_domain_sep_inv': apply (wp drop_spec_validE[OF valid_validE[OF preemption_point_domain_sep_inv]] drop_spec_validE[OF valid_validE[OF cap_delete_domain_sep_inv]] drop_spec_validE[OF assertE_wp] drop_spec_validE[OF returnOk_wp] - drop_spec_validE[OF liftE_wp] select_wp + drop_spec_validE[OF liftE_wp] | simp | wp (once) hoare_drop_imps)+ done qed @@ -568,7 +568,7 @@ lemma cap_move_cte_wp_at_other: cap_move cap src_slot dest_slot \\_. cte_wp_at P slot\" unfolding cap_move_def - by (wpsimp wp: set_cdt_cte_wp_at set_cap_cte_wp_at' dxo_wp_weak static_imp_wp set_original_wp) + by (wpsimp wp: set_cdt_cte_wp_at set_cap_cte_wp_at' dxo_wp_weak hoare_weak_lift_imp set_original_wp) lemma cte_wp_at_weak_derived_ReplyCap: "cte_wp_at ((=) (ReplyCap x False R)) slot s @@ -784,7 +784,7 @@ lemma invoke_control_domain_sep_inv: "\domain_sep_inv irqs st and irq_control_inv_valid i\ invoke_irq_control i \\_ s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state)\" - including no_pre + including classic_wp_pre apply (case_tac i) apply (case_tac irqs) apply (wp cap_insert_domain_sep_inv' | simp )+ @@ -902,8 +902,8 @@ lemma receive_ipc_domain_sep_inv: \\_ s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state)\" unfolding receive_ipc_def apply (simp add: receive_ipc_def split: cap.splits, clarsimp) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (case_tac ntfnptr, simp) apply (wp receive_ipc_base_domain_sep_inv get_simple_ko_wp | simp split: if_split option.splits)+ done @@ -1040,9 +1040,9 @@ lemma invoke_tcb_domain_sep_inv: apply (case_tac option) apply ((wp | simp)+)[1] apply (simp add: split_def cong: option.case_cong) - apply (wp checked_cap_insert_domain_sep_inv hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wp checked_cap_insert_domain_sep_inv hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R cap_delete_domain_sep_inv cap_delete_deletes - dxo_wp_weak cap_delete_valid_cap cap_delete_cte_at static_imp_wp + dxo_wp_weak cap_delete_valid_cap cap_delete_cte_at hoare_weak_lift_imp | wpc | strengthen | simp add: option_update_thread_def emptyable_def tcb_cap_cases_def tcb_cap_valid_def tcb_at_st_tcb_at @@ -1080,11 +1080,11 @@ lemma handle_invocation_domain_sep_inv: | simp split del: if_split)+ apply (rule_tac E="\ft. domain_sep_inv irqs st and valid_objs and sym_refs \ state_refs_of and valid_mdb and (\y. valid_fault ft)" - and R="Q" and Q=Q for Q in hoare_post_impErr) + and R="Q" and Q=Q for Q in hoare_strengthen_postE) apply (wp | simp | clarsimp)+ apply (rule_tac E="\ft. domain_sep_inv irqs st and valid_objs and sym_refs \ state_refs_of and valid_mdb and (\y. valid_fault (CapFault x False ft))" - and R="Q" and Q=Q for Q in hoare_post_impErr) + and R="Q" and Q=Q for Q in hoare_strengthen_postE) apply (wp lcs_ex_cap_to2 | clarsimp)+ apply (auto intro: st_tcb_ex_cap simp: ct_in_state_def) done @@ -1150,7 +1150,7 @@ lemma handle_recv_domain_sep_inv: | (rule_tac Q="\rv. invs and (\s. cur_thread s = thread)" in hoare_strengthen_post, wp, clarsimp simp: invs_valid_objs invs_sym_refs))+ apply (rule_tac Q'="\r s. domain_sep_inv irqs st s \ invs s \ - tcb_at thread s \ thread = cur_thread s" in hoare_post_imp_R) + tcb_at thread s \ thread = cur_thread s" in hoare_strengthen_postE_R) apply wp apply ((clarsimp simp add: invs_valid_objs invs_sym_refs | intro impI allI conjI @@ -1174,14 +1174,14 @@ lemma handle_event_domain_sep_inv: handle_recv_domain_sep_inv handle_reply_domain_sep_inv hy_inv | simp add: invs_valid_objs invs_mdb invs_sym_refs valid_fault_def)+ apply (rule_tac E="\rv s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state) \ - invs s \ valid_fault rv" and R="Q" and Q=Q for Q in hoare_post_impErr) + invs s \ valid_fault rv" and R="Q" and Q=Q for Q in hoare_strengthen_postE) apply (wp | simp add: invs_valid_objs invs_mdb invs_sym_refs valid_fault_def | auto)+ done lemma schedule_domain_sep_inv: "(schedule :: (unit,det_ext) s_monad) \domain_sep_inv irqs (st :: 'state_ext state)\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp add: alternative_wp select_wp guarded_switch_to_lift hoare_drop_imps + apply (wp add: guarded_switch_to_lift hoare_drop_imps del: ethread_get_wp | wpc | clarsimp simp: get_thread_state_def thread_get_def trans_state_update'[symmetric] schedule_choose_new_thread_def)+ diff --git a/proof/access-control/Finalise_AC.thy b/proof/access-control/Finalise_AC.thy index b122ad4d15..9c299d9c1f 100644 --- a/proof/access-control/Finalise_AC.thy +++ b/proof/access-control/Finalise_AC.thy @@ -163,7 +163,7 @@ lemma cancel_badged_sends_respects[wp]: and Q="P" and I="P" for P]) apply simp apply (simp add: bind_assoc) - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule hoare_pre) apply (wp sts_respects_restart_ep hoare_vcg_const_Ball_lift sts_st_tcb_at_neq) apply clarsimp @@ -347,7 +347,7 @@ lemma reply_cancel_ipc_pas_refined[wp]: \\_. pas_refined aag\" apply (rule hoare_gen_asm) apply (simp add: reply_cancel_ipc_def) - apply (wp add: select_wp wp_transferable del: wp_not_transferable) + apply (wp add: wp_transferable del: wp_not_transferable) apply (rule hoare_strengthen_post[where Q="\_. invs and tcb_at t and pas_refined aag"]) apply (wpsimp wp: hoare_wp_combs thread_set_tcb_fault_reset_invs thread_set_pas_refined)+ apply (frule(1) reply_cap_descends_from_master0) @@ -368,7 +368,7 @@ crunches suspend for pspace_aligned[wp]: "\s :: det_ext state. pspace_aligned s" and valid_vspace_objs[wp]: "\s :: det_ext state. valid_vspace_objs s" and valid_arch_state[wp]: "\s :: det_ext state. valid_arch_state s" - (wp: dxo_wp_weak select_wp hoare_drop_imps simp: crunch_simps) + (wp: dxo_wp_weak hoare_drop_imps simp: crunch_simps) crunch pas_refined[wp]: suspend "pas_refined aag" @@ -391,7 +391,7 @@ lemma cancel_all_signals_respects[wp]: \\_. integrity aag X st\" apply (rule hoare_gen_asm) apply (clarsimp simp add: cancel_all_signals_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp], rule hoare_pre) + apply (rule bind_wp[OF _ get_simple_ko_sp], rule hoare_pre) apply (wp mapM_x_inv_wp2 [where I="integrity aag X st" and V="\q s. distinct q \ (\x \ set q. st_tcb_at (blocked_on epptr) x s)"] @@ -444,7 +444,7 @@ lemma unbind_notification_respects: unbind_notification t \\_. integrity aag X st\" apply (clarsimp simp: unbind_notification_def) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (wp set_ntfn_respects hoare_vcg_ex_lift gbn_wp | wpc | simp)+ apply (clarsimp simp: pred_tcb_at_def obj_at_def split: option.splits) apply blast @@ -528,12 +528,12 @@ lemma reply_cancel_ipc_respects[wp]: \\_. integrity aag X st\" apply (simp add: reply_cancel_ipc_def) apply (rule hoare_pre) - apply (wp add: select_wp wp_transferable del:wp_not_transferable) + apply (wp add: wp_transferable del:wp_not_transferable) apply simp apply (rule hoare_lift_Pf2[where f="cdt"]) apply (wpsimp wp: hoare_vcg_const_Ball_lift thread_set_integrity_autarch thread_set_invs_trivial[OF ball_tcb_cap_casesI] thread_set_tcb_state_trivial - thread_set_not_state_valid_sched static_imp_wp thread_set_cte_wp_at_trivial + thread_set_not_state_valid_sched hoare_weak_lift_imp thread_set_cte_wp_at_trivial thread_set_pas_refined simp: ran_tcb_cap_cases)+ apply (strengthen invs_psp_aligned invs_vspace_objs invs_arch_state, clarsimp) @@ -549,7 +549,7 @@ lemma cancel_signal_respects[wp]: cancel_signal t ntfnptr \\_. integrity aag X st\" apply (simp add: cancel_signal_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp set_thread_state_integrity_autarch set_ntfn_respects | wpc | fastforce)+ done @@ -559,7 +559,7 @@ lemma cancel_ipc_respects[wp]: cancel_ipc t \\_. integrity aag X st\" apply (simp add: cancel_ipc_def) - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule hoare_pre) apply (wp set_thread_state_integrity_autarch set_endpoint_respects get_simple_ko_wp | wpc @@ -700,10 +700,10 @@ lemma finalise_cap_auth': "\pas_refined aag and K (pas_cap_cur_auth aag cap)\ finalise_cap cap final \\rv _. pas_cap_cur_auth aag (fst rv)\" - including no_pre + including classic_wp_pre apply (rule hoare_gen_asm) apply (cases cap, simp_all split del: if_split) - apply (wp | simp add: comp_def hoare_post_taut[where P = \] split del: if_split + apply (wp | simp add: comp_def hoare_TrueI[where P = \] split del: if_split | fastforce simp: aag_cap_auth_Zombie aag_cap_auth_CNode aag_cap_auth_Thread)+ apply (rule hoare_pre) apply (wp | simp)+ @@ -799,7 +799,7 @@ proof (induct arbitrary: st rule: rec_del.induct, simp_all only: rec_del_fails) apply (simp only: split_def) apply (rule hoare_pre_spec_validE) apply (rule split_spec_bindE) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (rule spec_strengthen_postE) apply (rule spec_valid_conj_liftE1) apply (rule valid_validE_R, rule rec_del_valid_list, rule preemption_point_inv'; @@ -816,7 +816,7 @@ next apply (subst rec_del.simps) apply (simp only: split_def) apply (rule hoare_pre_spec_validE) - apply (wp set_cap_integrity_autarch set_cap_pas_refined_not_transferable "2.hyps" static_imp_wp) + apply (wp set_cap_integrity_autarch set_cap_pas_refined_not_transferable "2.hyps" hoare_weak_lift_imp) apply ((wp preemption_point_inv' | simp add: integrity_subjects_def pas_refined_def)+)[1] apply (simp(no_asm)) apply (rule spec_strengthen_postE) @@ -833,7 +833,7 @@ next apply (simp add: conj_comms) apply (wp set_cap_integrity_autarch set_cap_pas_refined_not_transferable replace_cap_invs final_cap_same_objrefs set_cap_cte_cap_wp_to - set_cap_cte_wp_at hoare_vcg_const_Ball_lift static_imp_wp + set_cap_cte_wp_at hoare_vcg_const_Ball_lift hoare_weak_lift_imp | rule finalise_cap_not_reply_master | simp add: in_monad)+ apply (rule hoare_strengthen_post) @@ -848,7 +848,7 @@ next apply (wp finalise_cap_invs[where slot=slot] finalise_cap_replaceable[where sl=slot] finalise_cap_makes_halted[where slot=slot] - finalise_cap_auth' static_imp_wp)[1] + finalise_cap_auth' hoare_weak_lift_imp)[1] apply (rule finalise_cap_cases[where slot=slot]) apply (clarsimp simp: cte_wp_at_caps_of_state) apply (erule disjE) @@ -871,7 +871,7 @@ next case (3 ptr bits n slot s) show ?case apply (simp add: spec_validE_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply clarsimp done next @@ -889,7 +889,7 @@ next apply (wpsimp wp: rec_del_invs) apply (rule "4.hyps", assumption+) apply (wpsimp wp: set_cap_integrity_autarch set_cap_pas_refined_not_transferable - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: invs_psp_aligned invs_vspace_objs invs_arch_state cte_wp_at_caps_of_state clas_no_asid cli_no_irqs aag_cap_auth_def) apply (drule_tac auth=auth in sta_caps, simp+) @@ -958,18 +958,11 @@ lemma rec_del_respects_CTEDelete_transferable': apply (wp rec_del_respects'') apply (solves \simp\) apply (subst rec_del.simps[abs_def]) - apply (wp add: hoare_K_bind without_preemption_wp static_imp_wp wp_transferable - rec_del_Finalise_transferable - del: wp_not_transferable - | wpc)+ - apply (rule hoare_post_impErr,rule rec_del_Finalise_transferable) - apply simp apply (elim conjE) apply simp apply simp - apply (wp add: hoare_K_bind without_preemption_wp static_imp_wp wp_transferable - rec_del_Finalise_transferable - del: wp_not_transferable - | wpc)+ - apply (rule hoare_post_impErr,rule rec_del_Finalise_transferable) - apply simp apply (elim conjE) apply simp apply simp + apply (wpsimp wp: wp_transferable hoare_weak_lift_imp) + apply (rule hoare_strengthen_postE,rule rec_del_Finalise_transferable) + apply simp apply simp + apply (rule hoare_strengthen_postE,rule rec_del_Finalise_transferable) + apply simp apply simp apply (clarsimp) apply (frule(3) cca_to_transferable_or_subject[OF invs_valid_objs invs_mdb]) by (safe; simp) @@ -1085,7 +1078,7 @@ lemma empty_slot_cte_wp_at: by (wpsimp wp: empty_slot_caps_of_state) lemma deleting_irq_handler_caps_of_state_nullinv: - "\\s. \p. P (caps_of_state s(p \ NullCap))\ + "\\s. \p. P ((caps_of_state s)(p \ NullCap))\ deleting_irq_handler irq \\_ s. P (caps_of_state s)\" unfolding deleting_irq_handler_def @@ -1104,7 +1097,7 @@ locale Finalise_AC_2 = Finalise_AC_1 + \\_. (\s. trp \ integrity aag X st s) and pas_refined aag\, \\_. (\s. trp \ integrity aag X st s) and pas_refined aag\" and finalise_cap_caps_of_state_nullinv: - "\P. \\s :: det_ext state. P (caps_of_state s) \ (\p. P (caps_of_state s(p \ NullCap)))\ + "\P. \\s :: det_ext state. P (caps_of_state s) \ (\p. P ((caps_of_state s)(p \ NullCap)))\ finalise_cap cap final \\rv s. P (caps_of_state s)\" and finalise_cap_fst_ret: @@ -1144,7 +1137,7 @@ proof (induct rule: rec_del.induct, simp_all only: rec_del_fails) apply (insert P_Null) apply (subst rec_del.simps) apply (simp only: split_def) - apply (wp static_imp_wp | simp)+ + apply (wp hoare_weak_lift_imp | simp)+ apply (wp empty_slot_cte_wp_at)[1] apply (rule spec_strengthen_postE) apply (rule hoare_pre_spec_validE) @@ -1160,7 +1153,7 @@ next apply (subst rec_del.simps) apply (simp only: split_def without_preemption_def rec_del_call.simps) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp set_cap_cte_wp_at')[1] apply (wp "2.hyps"[simplified without_preemption_def rec_del_call.simps]) apply ((wp preemption_point_inv | simp)+)[1] @@ -1172,7 +1165,7 @@ next apply (rule_tac Q = "\rv' s. (slot \ p \ exposed \ cte_wp_at P p s) \ P (fst rv') \ cte_at slot s" in hoare_post_imp) apply (clarsimp simp: cte_wp_at_caps_of_state) - apply (wp static_imp_wp set_cap_cte_wp_at' finalise_cap_cte_wp_at_nullinv + apply (wp hoare_weak_lift_imp set_cap_cte_wp_at' finalise_cap_cte_wp_at_nullinv finalise_cap_fst_ret get_cap_wp | simp add: is_final_cap_def)+ apply (clarsimp simp add: P_Zombie is_cap_simps cte_wp_at_caps_of_state)+ @@ -1231,7 +1224,7 @@ proof (induct rule: cap_revoke.induct) apply (subst cap_revoke.simps) apply (unfold P_def) apply (wp "1.hyps"[unfolded P_def], simp+) - apply (wp preemption_point_inv hoare_drop_imps select_wp + apply (wp preemption_point_inv hoare_drop_imps rec_del_preserves_cte_zombie_null_insts[where P=Q] | simp add: Q_Null Q_Zombie)+ done diff --git a/proof/access-control/Interrupt_AC.thy b/proof/access-control/Interrupt_AC.thy index f85af98fb9..7fc6003aab 100644 --- a/proof/access-control/Interrupt_AC.thy +++ b/proof/access-control/Interrupt_AC.thy @@ -152,17 +152,11 @@ lemma decode_irq_control_invocation_authorised [wp]: (args \ [] \ (pasSubject aag, Control, pasIRQAbs aag (ucast (args ! 0))) \ pasPolicy aag))\ decode_irq_control_invocation info_label args slot caps \\x s. authorised_irq_ctl_inv aag x\, -" - unfolding decode_irq_control_invocation_def authorised_irq_ctl_inv_def + unfolding decode_irq_control_invocation_def authorised_irq_ctl_inv_def Let_def apply (rule hoare_gen_asmE) - apply (rule hoare_pre) - apply (simp add: Let_def split del: if_split cong: if_cong) - apply (wp arch_decode_irq_control_invocation_authorised - whenE_throwError_wp hoare_vcg_imp_lift hoare_drop_imps - | strengthen aag_Control_owns_strg - | simp add: o_def del: hoare_True_E_R)+ + apply (wpsimp wp: weak_if_wp arch_decode_irq_control_invocation_authorised simp: o_def) apply (cases args, simp_all) apply (cases caps, simp_all) - apply (simp add: ucast_mask_drop) apply (auto simp: is_cap_simps cap_auth_conferred_def pas_refined_wellformed pas_refined_all_auth_is_owns aag_cap_auth_def) diff --git a/proof/access-control/Ipc_AC.thy b/proof/access-control/Ipc_AC.thy index 12cdfef121..7a56525566 100644 --- a/proof/access-control/Ipc_AC.thy +++ b/proof/access-control/Ipc_AC.thy @@ -19,7 +19,7 @@ lemma cancel_ipc_receive_blocked_caps_of_state: cancel_ipc t \\_ s. P (caps_of_state s)\" apply (clarsimp simp: cancel_ipc_def) - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule hoare_pre) apply (wp gts_wp | wpc | simp)+ apply (rule hoare_pre_cont)+ @@ -30,21 +30,21 @@ lemma cancel_ipc_receive_blocked_caps_of_state: lemma send_signal_caps_of_state[wp]: "send_signal ntfnptr badge \\s. P (caps_of_state s)\" apply (clarsimp simp: send_signal_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) - apply (wpsimp wp: dxo_wp_weak cancel_ipc_receive_blocked_caps_of_state gts_wp static_imp_wp + apply (rule bind_wp[OF _ get_simple_ko_sp]) + apply (wpsimp wp: dxo_wp_weak cancel_ipc_receive_blocked_caps_of_state gts_wp hoare_weak_lift_imp simp: update_waiting_ntfn_def) apply (clarsimp simp: fun_upd_def[symmetric] st_tcb_def2) done crunch mdb[wp]: blocked_cancel_ipc, update_waiting_ntfn "\s. P (cdt (s :: det_ext state))" - (wp: crunch_wps hoare_unless_wp select_wp dxo_wp_weak simp: crunch_simps) + (wp: crunch_wps unless_wp dxo_wp_weak simp: crunch_simps) lemma cancel_ipc_receive_blocked_mdb: "\\s :: det_ext state. P (cdt s) \ st_tcb_at receive_blocked t s\ cancel_ipc t \\_ s. P (cdt s)\" apply (clarsimp simp: cancel_ipc_def) - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule hoare_pre) apply (wp gts_wp | wpc | simp)+ apply (rule hoare_pre_cont)+ @@ -55,7 +55,7 @@ lemma cancel_ipc_receive_blocked_mdb: lemma send_signal_mdb[wp]: "send_signal ntfnptr badge \\s :: det_ext state. P (cdt s)\" apply (clarsimp simp: send_signal_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp dxo_wp_weak gts_wp cancel_ipc_receive_blocked_mdb | wpc | simp)+ apply (clarsimp simp: st_tcb_def2) @@ -77,7 +77,7 @@ lemma cancel_ipc_receive_blocked_pas_refined: cancel_ipc t \\_. pas_refined aag\" apply (clarsimp simp: cancel_ipc_def) - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (wp gts_wp | wpc | simp)+ apply (clarsimp simp: st_tcb_def2 receive_blocked_def) done @@ -87,7 +87,7 @@ lemma send_signal_pas_refined: send_signal ntfnptr badge \\_. pas_refined aag\" apply (simp add: send_signal_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (wpsimp wp: set_simple_ko_pas_refined update_waiting_ntfn_pas_refined gts_wp set_thread_state_pas_refined cancel_ipc_receive_blocked_pas_refined) apply (fastforce simp: st_tcb_def2) @@ -100,7 +100,7 @@ lemma receive_signal_pas_refined: \\_. pas_refined aag\" apply (simp add: receive_signal_def) apply (cases cap, simp_all) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (wpsimp wp: set_simple_ko_pas_refined set_thread_state_pas_refined simp: do_nbrecv_failed_transfer_def) done @@ -145,7 +145,7 @@ lemma receive_signal_integrity_autarch: \\_. integrity aag X st\" apply (simp add: receive_signal_def) apply (cases cap, simp_all) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (wpsimp wp: set_notification_respects[where auth=Receive] set_thread_state_integrity_autarch as_user_integrity_autarch simp: do_nbrecv_failed_transfer_def) @@ -178,8 +178,8 @@ lemma send_upd_ctxintegrity: integrity aag X st s; st_tcb_at ((=) Running) thread s; get_tcb thread st = Some tcb; get_tcb thread s = Some tcb'\ \ integrity aag X st - (s\kheap := kheap s(thread \ - TCB (tcb'\tcb_arch := arch_tcb_context_set c' (tcb_arch tcb')\))\)" + (s\kheap := (kheap s) + (thread \ TCB (tcb'\tcb_arch := arch_tcb_context_set c' (tcb_arch tcb')\))\)" apply (clarsimp simp: integrity_def tcb_states_of_state_preserved st_tcb_def2) apply (rule conjI) prefer 2 @@ -314,7 +314,7 @@ lemma cancel_ipc_receive_blocked_respects: cancel_ipc t \\_. integrity_once_ts_upd t Running aag X st\" apply (clarsimp simp: cancel_ipc_def bind_assoc) - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule hoare_name_pre_state) apply (subgoal_tac "case state of BlockedOnReceive x y \ True | _ \ False") apply (simp add: blocked_cancel_ipc_def bind_assoc set_simple_ko_def set_object_def @@ -416,14 +416,14 @@ lemma send_signal_respects: send_signal ntfnptr badge \\_. integrity aag X st\" apply (simp add: send_signal_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (rule hoare_name_pre_state) apply (case_tac "ntfn_obj ntfn = IdleNtfn \ ntfn_bound_tcb ntfn \ None") \ \ntfn-binding case\ apply (rule hoare_pre) apply (wp set_notification_respects[where auth=Notify] as_user_set_register_respects_indirect[where ntfnptr=ntfnptr] - set_thread_state_integrity' sts_st_tcb_at' static_imp_wp + set_thread_state_integrity' sts_st_tcb_at' hoare_weak_lift_imp cancel_ipc_receive_blocked_respects[where ntfnptr=ntfnptr] gts_wp | wpc | simp)+ @@ -451,7 +451,7 @@ lemma send_signal_respects: sts_st_tcb_at' as_user_set_register_respects set_thread_state_pas_refined set_simple_ko_pas_refined set_thread_state_respects_in_signalling [where ntfnptr = ntfnptr] - set_ntfn_valid_objs_at hoare_vcg_disj_lift static_imp_wp + set_ntfn_valid_objs_at hoare_vcg_disj_lift hoare_weak_lift_imp | wpc | simp add: update_waiting_ntfn_def)+ apply clarsimp @@ -535,7 +535,7 @@ next out of the postcondition the conjunct that the return value is derived, and solve this using derived_cap_is_derived, and then solve the rest using derive_cap_is_derived_foo *) - apply (rule_tac Q'="\r s. S r s \ Q r s" for S Q in hoare_post_imp_R) + apply (rule_tac Q'="\r s. S r s \ Q r s" for S Q in hoare_strengthen_postE_R) apply (rule hoare_vcg_conj_lift_R) apply (rule derive_cap_is_derived) prefer 2 @@ -626,7 +626,7 @@ lemma get_receive_slots_authorised: | rule hoare_drop_imps | simp add: lookup_cap_def split_def)+ apply (strengthen cnode_cap_all_auth_owns, simp add: aag_cap_auth_def) - apply (wp hoare_vcg_all_lift_R hoare_drop_imps)+ + apply (wp hoare_vcg_all_liftE_R hoare_drop_imps)+ apply clarsimp apply (fastforce simp: is_cap_simps) done @@ -671,7 +671,7 @@ lemma remove_rights_cur_auth: by (clarsimp dest!: remove_rights_cap_auth_conferred_subset) (* FIXME MOVE *) -lemmas hoare_gen_asmE2 = hoare_gen_asmE[where P'=\,simplified pred_and_true_var] +lemmas hoare_gen_asmE2 = hoare_gen_asmE[where P'=\,simplified pred_top_left_neutral] lemma derive_cap_is_transferable: "\K (is_transferable_cap cap)\ derive_cap slot cap \\rv _. is_transferable_cap rv\, -" @@ -756,15 +756,15 @@ lemma transfer_caps_loop_presM_extended: apply (clarsimp simp add: Let_def split_def whenE_def cong: if_cong list.case_cong split del: if_split) apply (rule hoare_pre) - apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift static_imp_wp + apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift hoare_weak_lift_imp | assumption | simp split del: if_split)+ apply (rule cap_insert_assume_null) - apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at static_imp_wp)+ + apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at hoare_weak_lift_imp)+ apply (rule hoare_vcg_conj_liftE_R) apply (rule derive_cap_is_derived_foo') apply (rule_tac Q' ="\cap' s. (vo \ cap'\ NullCap \ cte_wp_at (is_derived (cdt s) (aa, b) cap') (aa, b) s) \ - (cap'\ NullCap \ QM s cap')" for QM in hoare_post_imp_R) + (cap'\ NullCap \ QM s cap')" for QM in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption @@ -1048,7 +1048,7 @@ lemma send_ipc_pas_refined: \\_. pas_refined aag\" apply (rule hoare_gen_asm) apply (simp add: send_ipc_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wpc | wp set_thread_state_pas_refined)+ apply (simp add: hoare_if_r_and split del:if_split) @@ -1061,7 +1061,7 @@ lemma send_ipc_pas_refined: (pasObjectAbs aag x21, Reply, pasSubject aag) \ pasPolicy aag)" in hoare_strengthen_post[rotated]) apply simp - apply (wp set_thread_state_pas_refined do_ipc_transfer_pas_refined static_imp_wp gts_wp + apply (wp set_thread_state_pas_refined do_ipc_transfer_pas_refined hoare_weak_lift_imp gts_wp | wpc | simp add: hoare_if_r_and)+ apply (wp hoare_vcg_all_lift hoare_imp_lift_something | simp add: st_tcb_at_tcb_states_of_state_eq)+ @@ -1107,7 +1107,7 @@ lemma complete_signal_integrity: complete_signal ntfnptr thread \\_. integrity aag X st\" apply (simp add: complete_signal_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (wpsimp wp: set_notification_respects[where auth=Receive] set_thread_state_integrity_autarch as_user_integrity_autarch) apply (drule_tac t="pasSubject aag" in sym) @@ -1176,7 +1176,7 @@ lemma complete_signal_pas_refined: complete_signal ntfnptr thread \\_. pas_refined aag\" apply (simp add: complete_signal_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp set_simple_ko_pas_refined set_thread_state_pas_refined | wpc)+ apply clarsimp @@ -1206,7 +1206,7 @@ lemma receive_ipc_base_pas_refined: aag_has_auth_to aag Reply (hd list))" in hoare_strengthen_post[rotated]) apply (fastforce simp: pas_refined_refl) - apply (wp static_imp_wp do_ipc_transfer_pas_refined set_simple_ko_pas_refined + apply (wp hoare_weak_lift_imp do_ipc_transfer_pas_refined set_simple_ko_pas_refined set_thread_state_pas_refined get_simple_ko_wp hoare_vcg_all_lift hoare_vcg_imp_lift [OF set_simple_ko_get_tcb, unfolded disj_not1] | wpc @@ -1264,8 +1264,8 @@ lemma receive_ipc_pas_refined: apply (rule hoare_gen_asm) apply (simp add: receive_ipc_def thread_get_def split: cap.split) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (case_tac ntfnptr, simp_all) (* old receive_ipc stuff *) apply (rule hoare_pre) @@ -1273,7 +1273,7 @@ lemma receive_ipc_pas_refined: apply (fastforce simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def) (* ntfn-binding case *) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (case_tac "isActive ntfn", simp_all) apply (wp complete_signal_pas_refined, clarsimp) apply fastforce @@ -1365,7 +1365,7 @@ lemma do_normal_transfer_send_integrity_autarch: by (wpsimp wp: as_user_integrity_autarch set_message_info_integrity_autarch copy_mrs_pas_refined copy_mrs_integrity_autarch transfer_caps_integrity_autarch lookup_extra_caps_authorised lookup_extra_caps_length get_mi_length get_mi_valid' - static_imp_wp hoare_vcg_conj_lift hoare_vcg_ball_lift lec_valid_cap') + hoare_weak_lift_imp hoare_vcg_conj_lift hoare_vcg_ball_lift lec_valid_cap') crunch integrity_autarch: setup_caller_cap "integrity aag X st" @@ -1568,13 +1568,13 @@ lemma receive_ipc_integrity_autarch: apply (rule hoare_gen_asm) apply (simp add: receive_ipc_def split: cap.splits) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (case_tac ntfnptr, simp_all) (* old receive case, not bound *) apply (rule hoare_pre, wp receive_ipc_base_integrity) apply (fastforce simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (case_tac "isActive ntfn", simp_all) (* new ntfn-binding case *) apply (rule hoare_pre, wp complete_signal_integrity, clarsimp) @@ -1742,7 +1742,7 @@ locale Ipc_AC_2 = Ipc_AC_1 + and auth_ipc_buffers_kheap_update: "\ x \ auth_ipc_buffers st thread; kheap st thread = Some (TCB tcb); kheap s thread = Some (TCB tcb'); tcb_ipcframe tcb = tcb_ipcframe tcb' \ - \ x \ auth_ipc_buffers (s\kheap := kheap s(thread \ TCB tcb)\) thread" + \ x \ auth_ipc_buffers (s\kheap := (kheap s)(thread \ TCB tcb)\) thread" and auth_ipc_buffers_machine_state_update[simp]: "auth_ipc_buffers (machine_state_update f s) = auth_ipc_buffers (s :: det_ext state)" and empty_slot_extended_list_integ_lift_in_ipc: @@ -1781,7 +1781,7 @@ lemma cap_insert_ext_integrity_in_ipc_autarch: apply (clarsimp simp: integrity_tcb_in_ipc_def integrity_def tcb_states_of_state_def get_tcb_def split del: if_split cong: if_cong) - including no_pre + including classic_wp_pre apply wp apply (rule hoare_vcg_conj_lift) apply (simp add: list_integ_def del: split_paired_All) @@ -2234,7 +2234,7 @@ lemma set_cap_respects_in_ipc_reply: \\_. integrity_tcb_in_ipc aag X receiver epptr TRFinal st\" unfolding set_cap_def apply simp - apply (rule hoare_seq_ext[OF _ get_object_sp]) + apply (rule bind_wp[OF _ get_object_sp]) including no_pre apply (wp set_object_wp) apply (rule use_spec') @@ -2325,7 +2325,7 @@ lemma send_ipc_integrity_autarch: \\_. integrity aag X st\" apply (rule hoare_gen_asm) apply (simp add: send_ipc_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (case_tac ep) \ \IdleEP\ apply simp @@ -2351,7 +2351,7 @@ lemma send_ipc_integrity_autarch: apply (wp set_thread_state_integrity_autarch thread_get_wp' do_ipc_transfer_integrity_autarch hoare_vcg_all_lift hoare_drop_imps set_endpoint_respects - | wpc | simp add: get_thread_state_def split del: if_split del: hoare_True_E_R)+ + | wpc | simp add: get_thread_state_def split del: if_split)+ apply (fastforce simp: a_type_def obj_at_def is_ep elim: send_ipc_valid_ep_helper) \ \we don't own head of queue\ apply clarsimp @@ -2365,7 +2365,7 @@ lemma send_ipc_integrity_autarch: apply (fastforce dest!: integrity_tcb_in_ipc_final elim!: integrity_trans) apply (wp setup_caller_cap_respects_in_ipc_reply set_thread_state_respects_in_ipc_autarch[where param_b = Inactive] - hoare_vcg_if_lift static_imp_wp possible_switch_to_respects_in_ipc_autarch + hoare_vcg_if_lift hoare_weak_lift_imp possible_switch_to_respects_in_ipc_autarch set_thread_state_running_respects_in_ipc do_ipc_transfer_respects_in_ipc thread_get_inv set_endpoint_integrity_in_ipc | wpc @@ -2415,7 +2415,7 @@ lemma send_fault_ipc_pas_refined: simp: split_def) apply (rule_tac Q'="\rv s. pas_refined aag s \ is_subject aag (cur_thread s) \ invs s \ valid_fault fault \ is_subject aag (fst (fst rv))" - in hoare_post_imp_R[rotated]) + in hoare_strengthen_postE_R[rotated]) apply (fastforce dest!: cap_auth_caps_of_state simp: invs_valid_objs invs_sym_refs cte_wp_at_caps_of_state aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def AllowSend_def) @@ -2433,7 +2433,7 @@ lemma handle_fault_pas_refined: apply (clarsimp simp: send_fault_ipc_def Let_def) apply wp apply wpsimp - apply (rule hoare_post_impErr[where E=E and F=E for E]) + apply (rule hoare_strengthen_postE[where E=E and F=E for E]) apply (rule valid_validE) apply (wpsimp wp: send_fault_ipc_pas_refined)+ apply fastforce @@ -2481,7 +2481,7 @@ lemma send_fault_ipc_integrity_autarch: \ valid_fault fault \ is_subject aag (cur_thread s) \ is_subject aag (fst (fst rv))" - in hoare_post_imp_R[rotated]) + in hoare_strengthen_postE_R[rotated]) apply (clarsimp simp: invs_valid_objs invs_sym_refs cte_wp_at_caps_of_state obj_at_def) apply (frule(1) caps_of_state_valid) @@ -2655,7 +2655,7 @@ lemma empty_slot_respects_in_ipc_autarch: unfolding empty_slot_def post_cap_deletion_def apply simp apply (wp add: set_cap_respects_in_ipc_autarch set_original_respects_in_ipc_autarch) - apply (wp empty_slot_extended_list_integ_lift_in_ipc empty_slot_list_integrity') + apply (wpsimp wp: empty_slot_extended_list_integ_lift_in_ipc empty_slot_list_integrity') apply simp apply wp+ apply (wp set_cdt_empty_slot_respects_in_ipc_autarch) @@ -2788,9 +2788,9 @@ lemma do_reply_transfer_respects: \\_. integrity aag X st\" apply (rule hoare_gen_asm)+ apply (simp add: do_reply_transfer_def thread_get_def get_thread_state_def) - apply (rule hoare_seq_ext[OF _ assert_get_tcb_sp];force?) - apply (rule hoare_seq_ext[OF _ assert_sp]) - apply (rule hoare_seq_ext[OF _ assert_get_tcb_sp];force?) + apply (rule bind_wp[OF _ assert_get_tcb_sp];force?) + apply (rule bind_wp[OF _ assert_sp]) + apply (rule bind_wp[OF _ assert_get_tcb_sp];force?) apply wpc \ \No fault case\ apply (rule hoare_vcg_if_split[where P= "is_subject aag receiver" and f=f and g=f for f, @@ -2823,7 +2823,7 @@ lemma do_reply_transfer_respects: | simp | intro conjI impI)+)[1] \ \receiver is not a subject\ - apply (rule hoare_seq_ext, simp) + apply (rule bind_wp, simp) apply (rule use_spec') \ \Name initial state\ apply (simp add: spec_valid_def) \ \no imp rule?\ apply wp diff --git a/proof/access-control/README.md b/proof/access-control/README.md index 7ade71b431..a56d87e667 100644 --- a/proof/access-control/README.md +++ b/proof/access-control/README.md @@ -24,9 +24,9 @@ top of the [Abstract Spec Invariant Proof](../invariant-abstract/). Building -------- -To build from the `l4v/` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - ./isabelle/bin/isabelle build -d . -v -b Access + L4V_ARCH=ARM ./run_tests Access Important Theories @@ -35,4 +35,3 @@ Important Theories The top-level theory where these two properties are proved for the kernel is [`Syscall_AC`](Syscall_AC.thy); the bottom-level theory where the properties are defined is [`Access`](Access.thy). - diff --git a/proof/access-control/RISCV64/ArchAccess.thy b/proof/access-control/RISCV64/ArchAccess.thy index 98caca94ee..8c48f69eb3 100644 --- a/proof/access-control/RISCV64/ArchAccess.thy +++ b/proof/access-control/RISCV64/ArchAccess.thy @@ -186,10 +186,10 @@ lemmas integrity_asids_kh_upds = declare integrity_asids_def[simp] lemma integrity_asids_kh_upds': - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ CNode sz cs)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ TCB tcb)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Endpoint ep)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Notification ntfn)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ CNode sz cs)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ TCB tcb)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Endpoint ep)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Notification ntfn)\) s" by (auto simp: opt_map_def split: option.splits) lemma integrity_asids_kh_update: diff --git a/proof/access-control/RISCV64/ArchAccess_AC.thy b/proof/access-control/RISCV64/ArchAccess_AC.thy index c07e39b7d6..0d5405dbed 100644 --- a/proof/access-control/RISCV64/ArchAccess_AC.thy +++ b/proof/access-control/RISCV64/ArchAccess_AC.thy @@ -82,7 +82,7 @@ lemma integrity_asids_refl[Access_AC_assms, simp]: lemma integrity_asids_update_autarch[Access_AC_assms]: "\ \x a. integrity_asids aag {pasSubject aag} x a st s; is_subject aag ptr \ - \ \x a. integrity_asids aag {pasSubject aag} x a st (s\kheap := kheap s(ptr \ obj)\)" + \ \x a. integrity_asids aag {pasSubject aag} x a st (s\kheap := (kheap s)(ptr \ obj)\)" by (auto simp: opt_map_def) end diff --git a/proof/access-control/RISCV64/ArchArch_AC.thy b/proof/access-control/RISCV64/ArchArch_AC.thy index e1735b0338..7d1c4cb27a 100644 --- a/proof/access-control/RISCV64/ArchArch_AC.thy +++ b/proof/access-control/RISCV64/ArchArch_AC.thy @@ -541,7 +541,7 @@ lemma perform_pt_inv_unmap_pas_refined: lemma vs_lookup_PageTablePTE: "\ vs_lookup_table level asid vref s' = Some (lvl', pt); pspace_aligned s; valid_vspace_objs s; valid_asid_table s; - invalid_pte_at p s; ptes_of s' = ptes_of s (p \ pte); is_PageTablePTE pte; + invalid_pte_at p s; ptes_of s' = (ptes_of s)(p \ pte); is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; vref \ user_region; pts_of s (the (pte_ref pte)) = Some empty_pt; pt \ pptr_from_pte pte \ @@ -584,7 +584,7 @@ lemma vs_lookup_PageTablePTE: lemma vs_lookup_PageTablePTE': "\ vs_lookup_table level asid vref s = Some (lvl', pt); pspace_aligned s; valid_vspace_objs s; valid_asid_table s; - invalid_pte_at p s; ptes_of s' = ptes_of s (p \ pte); is_PageTablePTE pte; + invalid_pte_at p s; ptes_of s' = (ptes_of s)(p \ pte); is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; vref \ user_region \ \ \level' \ level. vs_lookup_table level' asid vref s' = Some (lvl', pt)" apply (induct level arbitrary: lvl' pt rule: bit0.from_top_full_induct[where y=max_pt_level]) @@ -915,7 +915,7 @@ lemma unmap_page_table_respects: unmap_page_table asid vaddr pt \\_. integrity aag X st\" apply (simp add: unmap_page_table_def sfence_def) - apply (wpsimp wp: pt_lookup_from_level_is_subject dmo_mol_respects hoare_vcg_conj_liftE + apply (wpsimp wp: pt_lookup_from_level_is_subject dmo_mol_respects hoare_vcg_conj_liftE_weaker store_pte_respects pt_lookup_from_level_wrp[where Q="\_. integrity aag X st"] | wp (once) hoare_drop_imps hoare_vcg_E_elim)+ apply (intro conjI; clarsimp) @@ -1237,7 +1237,7 @@ lemma perform_asid_control_invocation_respects: apply (wpc, simp) apply (wpsimp wp: set_cap_integrity_autarch cap_insert_integrity_autarch asid_table_entry_update_integrity retype_region_integrity[where sz=12] - static_imp_wp delete_objects_valid_vspace_objs delete_objects_valid_arch_state) + hoare_weak_lift_imp delete_objects_valid_vspace_objs delete_objects_valid_arch_state) apply (clarsimp simp: authorised_asid_control_inv_def ptr_range_def add.commute range_cover_def obj_bits_api_def default_arch_object_def pageBits_def word_bits_def) apply (subst is_aligned_neg_mask_eq[THEN sym], assumption) @@ -1317,10 +1317,10 @@ lemma perform_asid_control_invocation_pas_refined: apply (rule hoare_gen_asm) apply (simp add: perform_asid_control_invocation_def ) apply wpc - apply (rule pas_refined_asid_control_helper hoare_seq_ext hoare_K_bind)+ - apply (wp cap_insert_pas_refined' static_imp_wp | simp)+ + apply (rule pas_refined_asid_control_helper bind_wp hoare_K_bind)+ + apply (wp cap_insert_pas_refined' hoare_weak_lift_imp | simp)+ apply ((wp retype_region_pas_refined'[where sz=pageBits] - hoare_vcg_ex_lift hoare_vcg_all_lift static_imp_wp hoare_wp_combs hoare_drop_imp + hoare_vcg_ex_lift hoare_vcg_all_lift hoare_weak_lift_imp hoare_wp_combs hoare_drop_imp retype_region_invs_extras(1)[where sz = pageBits] retype_region_invs_extras(4)[where sz = pageBits] retype_region_invs_extras(6)[where sz = pageBits] @@ -1329,7 +1329,7 @@ lemma perform_asid_control_invocation_pas_refined: max_index_upd_invs_simple max_index_upd_caps_overlap_reserved hoare_vcg_ex_lift set_cap_cte_wp_at hoare_vcg_disj_lift set_free_index_valid_pspace set_cap_descendants_range_in set_cap_no_overlap get_cap_wp set_cap_caps_no_overlap - hoare_vcg_all_lift static_imp_wp retype_region_invs_extras + hoare_vcg_all_lift hoare_weak_lift_imp retype_region_invs_extras set_cap_pas_refined_not_transferable arch_update_cap_valid_mdb | simp add: do_machine_op_def region_in_kernel_window_def cte_wp_at_neg2)+)[3] apply (rename_tac frame slot parent base ) diff --git a/proof/access-control/RISCV64/ArchCNode_AC.thy b/proof/access-control/RISCV64/ArchCNode_AC.thy index 245bc1ee9b..ee263cd699 100644 --- a/proof/access-control/RISCV64/ArchCNode_AC.thy +++ b/proof/access-control/RISCV64/ArchCNode_AC.thy @@ -97,18 +97,18 @@ crunches set_cdt crunches prepare_thread_delete, arch_finalise_cap for cur_domain[CNode_AC_assms, wp]:"\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) lemma state_vrefs_tcb_upd[CNode_AC_assms]: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; tcb_at t s \ - \ state_vrefs (s\kheap := kheap s(t \ TCB tcb)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(t \ TCB tcb)\) = state_vrefs s" apply (rule state_vrefs_eqI) by (fastforce simp: opt_map_def obj_at_def is_obj_defs valid_arch_state_def)+ lemma state_vrefs_simple_type_upd[CNode_AC_assms]: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; ko_at ko ptr s; is_simple_type ko; a_type ko = a_type (f val) \ - \ state_vrefs (s\kheap := kheap s(ptr \ f val)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(ptr \ f val)\) = state_vrefs s" apply (case_tac ko; case_tac "f val"; clarsimp) by (fastforce intro!: state_vrefs_eqI simp: opt_map_def obj_at_def is_obj_defs valid_arch_state_def)+ diff --git a/proof/access-control/RISCV64/ArchDomainSepInv.thy b/proof/access-control/RISCV64/ArchDomainSepInv.thy index 9c20d3ae96..442b1f0946 100644 --- a/proof/access-control/RISCV64/ArchDomainSepInv.thy +++ b/proof/access-control/RISCV64/ArchDomainSepInv.thy @@ -52,7 +52,7 @@ lemma perform_page_invocation_domain_sep_inv: \\_. domain_sep_inv irqs st\" apply (rule hoare_pre) apply (wp mapM_wp[OF _ subset_refl] set_cap_domain_sep_inv mapM_x_wp[OF _ subset_refl] - perform_page_invocation_domain_sep_inv_get_cap_helper static_imp_wp + perform_page_invocation_domain_sep_inv_get_cap_helper hoare_weak_lift_imp | simp add: perform_page_invocation_def o_def | wpc)+ done @@ -72,7 +72,7 @@ lemma perform_asid_control_invocation_domain_sep_inv: unfolding perform_asid_control_invocation_def apply (rule hoare_pre) apply (wp modify_wp cap_insert_domain_sep_inv' set_cap_domain_sep_inv - get_cap_domain_sep_inv_cap[where st=st] static_imp_wp + get_cap_domain_sep_inv_cap[where st=st] hoare_weak_lift_imp | wpc | simp )+ done diff --git a/proof/access-control/RISCV64/ArchFinalise_AC.thy b/proof/access-control/RISCV64/ArchFinalise_AC.thy index 821507266c..ccd7b00481 100644 --- a/proof/access-control/RISCV64/ArchFinalise_AC.thy +++ b/proof/access-control/RISCV64/ArchFinalise_AC.thy @@ -68,7 +68,7 @@ lemma delete_asid_pool_pas_refined[wp]: lemma delete_asid_pas_refined[wp]: "delete_asid asid pt \pas_refined aag\" unfolding delete_asid_def - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (wpsimp simp: set_asid_pool_def wp: set_object_wp hoare_vcg_imp_lift' hoare_vcg_all_lift) apply (rule_tac Q="\_ s. riscv_asid_table (arch_state s) = asid_table \ ako_at (ASIDPool pool) x2 s \ pas_refined aag s" @@ -121,7 +121,7 @@ lemma sbn_st_vrefs[Finalise_AC_assms]: lemma arch_finalise_cap_auth'[Finalise_AC_assms]: "\pas_refined aag\ arch_finalise_cap x12 final \\rv s. pas_cap_cur_auth aag (fst rv)\" unfolding arch_finalise_cap_def - by (wp | wpc | simp add: comp_def hoare_post_taut[where P = \] split del: if_split)+ + by (wp | wpc | simp add: comp_def hoare_TrueI[where P = \] split del: if_split)+ lemma arch_finalise_cap_obj_refs[Finalise_AC_assms]: "\\s. \x \ aobj_ref' acap. P x\ @@ -172,7 +172,7 @@ crunches set_asid_pool lemma set_asid_pool_tcb_states_of_state[wp]: "set_asid_pool p pool \\s. P (tcb_states_of_state s)\" apply (wpsimp wp: set_object_wp_strong simp: obj_at_def set_asid_pool_def) - apply (prop_tac "\x. get_tcb x (s\kheap := kheap s(p \ ArchObj (ASIDPool pool))\) = get_tcb x s") + apply (prop_tac "\x. get_tcb x (s\kheap := (kheap s)(p \ ArchObj (ASIDPool pool))\) = get_tcb x s") apply (auto simp: tcb_states_of_state_def get_tcb_def) done @@ -266,7 +266,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) qed lemma finalise_cap_caps_of_state_nullinv[Finalise_AC_assms]: - "\\s. P (caps_of_state s) \ (\p. P (caps_of_state s(p \ NullCap)))\ + "\\s. P (caps_of_state s) \ (\p. P ((caps_of_state s)(p \ NullCap)))\ finalise_cap cap final \\_ s. P (caps_of_state s)\" by (cases cap; @@ -280,7 +280,7 @@ lemma finalise_cap_fst_ret[Finalise_AC_assms]: "\\_. P NullCap \ (\a b c. P (Zombie a b c))\ finalise_cap cap is_final \\rv _. P (fst rv)\" - including no_pre + including classic_wp_pre apply (cases cap, simp_all add: arch_finalise_cap_def split del: if_split) apply (wp | simp add: comp_def split del: if_split | fastforce)+ apply (rule hoare_pre) diff --git a/proof/access-control/RISCV64/ArchInterrupt_AC.thy b/proof/access-control/RISCV64/ArchInterrupt_AC.thy index 6aec17f88b..cf742b0c30 100644 --- a/proof/access-control/RISCV64/ArchInterrupt_AC.thy +++ b/proof/access-control/RISCV64/ArchInterrupt_AC.thy @@ -87,17 +87,12 @@ lemma arch_decode_irq_control_invocation_authorised[Interrupt_AC_assms]: (args \ [] \ (pasSubject aag, Control, pasIRQAbs aag (ucast (args ! 0))) \ pasPolicy aag))\ arch_decode_irq_control_invocation info_label args slot caps \\x _. arch_authorised_irq_ctl_inv aag x\, -" - unfolding decode_irq_control_invocation_def arch_decode_irq_control_invocation_def + unfolding decode_irq_control_invocation_def arch_decode_irq_control_invocation_def Let_def authorised_irq_ctl_inv_def arch_authorised_irq_ctl_inv_def arch_check_irq_def apply (rule hoare_gen_asmE) - apply (rule hoare_pre) - apply (simp add: Let_def split del: if_split cong: if_cong) - apply (wp whenE_throwError_wp hoare_vcg_imp_lift hoare_drop_imps - | strengthen aag_Control_owns_strg - | simp add: o_def del: hoare_True_E_R)+ + apply (wpsimp wp: weak_if_wp) apply (cases args, simp_all) apply (cases caps, simp_all) - apply (simp add: ucast_mask_drop) apply (auto simp: is_cap_simps cap_auth_conferred_def pas_refined_wellformed pas_refined_all_auth_is_owns aag_cap_auth_def) diff --git a/proof/access-control/RISCV64/ArchIpc_AC.thy b/proof/access-control/RISCV64/ArchIpc_AC.thy index 091abf877a..c3cd95d626 100644 --- a/proof/access-control/RISCV64/ArchIpc_AC.thy +++ b/proof/access-control/RISCV64/ArchIpc_AC.thy @@ -175,7 +175,7 @@ lemma handle_arch_fault_reply_respects[Ipc_AC_assms]: lemma auth_ipc_buffers_kheap_update[Ipc_AC_assms]: "\ x \ auth_ipc_buffers st thread; kheap st thread = Some (TCB tcb); kheap s thread = Some (TCB tcb'); tcb_ipcframe tcb = tcb_ipcframe tcb' \ - \ x \ auth_ipc_buffers (s\kheap := kheap s(thread \ TCB tcb)\) thread" + \ x \ auth_ipc_buffers (s\kheap := (kheap s)(thread \ TCB tcb)\) thread" by (clarsimp simp: auth_ipc_buffers_member_def get_tcb_def caps_of_state_tcb) lemma auth_ipc_buffers_machine_state_update[Ipc_AC_assms, simp]: diff --git a/proof/access-control/RISCV64/ArchRetype_AC.thy b/proof/access-control/RISCV64/ArchRetype_AC.thy index ddfe34ac64..22abf7683e 100644 --- a/proof/access-control/RISCV64/ArchRetype_AC.thy +++ b/proof/access-control/RISCV64/ArchRetype_AC.thy @@ -292,8 +292,7 @@ lemma dmo_freeMemory_respects[Retype_AC_assms]: apply wp apply clarsimp apply (erule use_valid) - apply (wp mol_respects mapM_x_wp' storeWord_integrity_autarch) - apply simp + apply (wpsimp wp: mol_respects mapM_x_wp' storeWord_integrity_autarch) apply (clarsimp simp: word_size_def word_size_bits_def word_bits_def upto_enum_step_shift_red[where us=3, simplified]) apply (erule bspec) diff --git a/proof/access-control/RISCV64/ArchSyscall_AC.thy b/proof/access-control/RISCV64/ArchSyscall_AC.thy index a68f708e41..782a59223c 100644 --- a/proof/access-control/RISCV64/ArchSyscall_AC.thy +++ b/proof/access-control/RISCV64/ArchSyscall_AC.thy @@ -145,7 +145,7 @@ lemma handle_reserved_irq_arch_state[Syscall_AC_assms, wp]: unfolding handle_reserved_irq_def by wpsimp crunch ct_active [Syscall_AC_assms, wp]: arch_post_cap_deletion "ct_active" - (wp: crunch_wps filterM_preserved hoare_unless_wp simp: crunch_simps ignore: do_extended_op) + (wp: crunch_wps filterM_preserved unless_wp simp: crunch_simps ignore: do_extended_op) crunches arch_post_modify_registers, arch_invoke_irq_control, diff --git a/proof/access-control/RISCV64/ArchTcb_AC.thy b/proof/access-control/RISCV64/ArchTcb_AC.thy index 703a1ae1f6..9a9314a0c9 100644 --- a/proof/access-control/RISCV64/ArchTcb_AC.thy +++ b/proof/access-control/RISCV64/ArchTcb_AC.thy @@ -33,19 +33,19 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: apply (subst invoke_tcb.simps) apply (subst option_update_thread_def) apply (subst set_priority_extended.dxo_eq) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule_tac P="case ep of Some v \ length v = word_bits | _ \ True" in hoare_gen_asm) apply (simp only: split_def) - apply (((simp add: conj_comms del: hoare_True_E_R, + apply (((simp add: conj_comms, strengthen imp_consequent[where Q="x = None" for x], simp cong: conj_cong) | strengthen invs_psp_aligned invs_vspace_objs invs_arch_state - | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_lift_R + | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | wp restart_integrity_autarch set_mcpriority_integrity_autarch as_user_integrity_autarch thread_set_integrity_autarch option_update_thread_integrity_autarch - opt_update_thread_valid_sched static_imp_wp + opt_update_thread_valid_sched hoare_weak_lift_imp cap_insert_integrity_autarch checked_insert_pas_refined cap_delete_respects' cap_delete_pas_refined' check_cap_inv2[where Q="\_. integrity aag X st"] @@ -54,7 +54,7 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap out_tcb_valid - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -78,7 +78,6 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: cap_delete_pas_refined'[THEN valid_validE_E] thread_set_cte_wp_at_trivial | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def a_type_def partial_inv_def - del: hoare_True_E_R | wpc | strengthen invs_mdb use_no_cap_to_obj_asid_strg tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] diff --git a/proof/access-control/Retype_AC.thy b/proof/access-control/Retype_AC.thy index 913640a602..c17bf3140a 100644 --- a/proof/access-control/Retype_AC.thy +++ b/proof/access-control/Retype_AC.thy @@ -266,7 +266,7 @@ lemma delete_objects_respects[wp]: delete_objects ptr bits \\_. integrity aag X st\" apply (simp add: delete_objects_def) - apply (rule_tac seq_ext) + apply (rule_tac bind_wp_fwd) apply (rule hoare_triv[of P _ "%_. P" for P]) apply (wp dmo_freeMemory_respects | simp)+ by (fastforce simp: ptr_range_def intro!: detype_integrity) @@ -354,7 +354,7 @@ lemma invoke_untyped_integrity: init_arch_objects_integrity retype_region_integrity retype_region_ret_is_subject set_cap_integrity_autarch hoare_vcg_if_lift - hoare_whenE_wp reset_untyped_cap_integrity + whenE_wp reset_untyped_cap_integrity | clarsimp simp: split_paired_Ball | erule in_set_zipE | blast)+ @@ -497,7 +497,7 @@ lemma retype_region_ext_pas_refined: "\pas_refined aag and pas_cur_domain aag and K (\x\ set xs. is_subject aag x)\ retype_region_ext xs ty \\_. pas_refined aag\" - including no_pre + including classic_wp_pre apply (subst and_assoc[symmetric]) apply (wp retype_region_ext_extended.pas_refined_tcb_domain_map_wellformed') apply (simp add: retype_region_ext_def, wp) @@ -970,7 +970,7 @@ lemma reset_untyped_cap_valid_vspace_objs: \\_. valid_vspace_objs\" unfolding reset_untyped_cap_def apply (wpsimp wp: mapME_x_inv_wp preemption_point_inv) - apply (wp static_imp_wp delete_objects_valid_vspace_objs) + apply (wp hoare_weak_lift_imp delete_objects_valid_vspace_objs) apply (wpsimp wp: get_cap_wp)+ apply (cases src_slot) apply (auto simp: cte_wp_at_caps_of_state) @@ -1008,7 +1008,7 @@ lemma reset_untyped_cap_valid_arch_state: \\_. valid_arch_state\" unfolding reset_untyped_cap_def apply (wpsimp wp: mapME_x_inv_wp preemption_point_inv) - apply (wp static_imp_wp delete_objects_valid_arch_state) + apply (wp hoare_weak_lift_imp delete_objects_valid_arch_state) apply (wpsimp wp: get_cap_wp)+ apply (cases src_slot) apply (auto simp: cte_wp_at_caps_of_state) @@ -1192,7 +1192,7 @@ lemma decode_untyped_invocation_authorised: is_subject aag (fst slot) \ pas_refined aag s \ word_size_bits \ sz \ sz < word_bits \ is_aligned base sz \ (is_cnode_cap (excaps ! 0) \ (\x\obj_refs_ac (excaps ! 0). is_subject aag x))" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp data_to_obj_type_ret_not_asid_pool data_to_obj_type_inv2) apply (case_tac "excaps ! 0", simp_all, fastforce simp: nonzero_data_to_nat_simp)[1] apply (wp whenE_throwError_wp)+ diff --git a/proof/access-control/Syscall_AC.thy b/proof/access-control/Syscall_AC.thy index 7850cf705a..abe043fae1 100644 --- a/proof/access-control/Syscall_AC.thy +++ b/proof/access-control/Syscall_AC.thy @@ -135,7 +135,7 @@ lemma decode_invocation_authorised: decode_arch_invocation_authorised | strengthen cnode_eq_strg | wpc | simp add: comp_def authorised_invocation_def decode_invocation_def - split del: if_split del: hoare_True_E_R + split del: if_split | wp (once) hoare_FalseE_R)+ apply (clarsimp simp: aag_has_Control_iff_owns split_def aag_cap_auth_def) apply (cases cap, simp_all) @@ -201,7 +201,7 @@ lemma lcs_reply_owns: "\pas_refined aag and K (is_subject aag thread)\ lookup_cap_and_slot thread ptr \\rv _. \ep. (\m R. fst rv = ReplyCap ep m R \ AllowGrant \ R) \ is_subject aag ep\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_pre) apply (rule hoare_vcg_conj_lift_R [where S = "K (pas_refined aag)"]) apply (rule lookup_cap_and_slot_cur_auth) @@ -218,7 +218,7 @@ lemma lookup_cap_and_slot_valid_fault3: lookup_cap_and_slot thread cptr -, \\ft _. valid_fault (CapFault (of_bl cptr) rp ft)\" apply (unfold validE_E_def) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule lookup_cap_and_slot_valid_fault) apply auto done @@ -279,8 +279,7 @@ lemma handle_invocation_pas_refined: | strengthen invs_psp_aligned invs_vspace_objs invs_arch_state | wpc | rule hoare_drop_imps - | simp add: if_apply_def2 conj_comms split del: if_split - del: hoare_True_E_R)+, + | simp add: if_apply_def2 conj_comms split del: if_split)+, (wp lookup_extra_caps_auth lookup_extra_caps_authorised decode_invocation_authorised lookup_cap_and_slot_authorised lookup_cap_and_slot_cur_auth as_user_pas_refined lookup_cap_and_slot_valid_fault3 hoare_vcg_const_imp_lift_R @@ -301,7 +300,7 @@ lemma handle_invocation_respects: reply_from_kernel_integrity_autarch set_thread_state_integrity_autarch hoare_vcg_conj_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift | rule hoare_drop_imps | wpc | simp add: if_apply_def2 @@ -333,14 +332,14 @@ lemma handle_recv_pas_refined: apply (wp handle_fault_pas_refined receive_ipc_pas_refined receive_signal_pas_refined get_cap_auth_wp [where aag=aag] lookup_slot_for_cnode_op_authorised lookup_slot_for_thread_authorised lookup_slot_for_thread_cap_fault - hoare_vcg_all_lift_R get_simple_ko_wp + hoare_vcg_all_liftE_R get_simple_ko_wp | wpc | simp | (rule_tac Q="\rv s. invs s \ is_subject aag thread \ aag_has_auth_to aag Receive thread" in hoare_strengthen_post, wp, clarsimp simp: invs_valid_objs invs_sym_refs))+ apply (rule_tac Q'="\rv s. pas_refined aag s \ invs s \ tcb_at thread s \ cur_thread s = thread \ is_subject aag (cur_thread s) - \ is_subject aag thread" in hoare_post_imp_R [rotated]) + \ is_subject aag thread" in hoare_strengthen_postE_R [rotated]) apply (fastforce simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def valid_fault_def) apply (wp user_getreg_inv | strengthen invs_vobjs_strgs | simp)+ @@ -365,7 +364,7 @@ lemma handle_recv_integrity: in hoare_strengthen_post, wp, clarsimp simp: invs_valid_objs invs_sym_refs)+ apply (rule_tac Q'="\rv s. pas_refined aag s \ einvs s \ is_subject aag (cur_thread s) \ tcb_at thread s \ cur_thread s = thread \ is_subject aag thread - \ integrity aag X st s" in hoare_post_imp_R [rotated]) + \ integrity aag X st s" in hoare_strengthen_postE_R [rotated]) apply (fastforce simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def valid_fault_def) apply wpsimp+ @@ -699,7 +698,7 @@ lemma handle_event_integrity: handle_reply_respects handle_fault_integrity_autarch handle_interrupt_integrity handle_vm_fault_integrity handle_reply_pas_refined handle_vm_fault_valid_fault - handle_reply_valid_sched alternative_wp select_wp + handle_reply_valid_sched hoare_vcg_conj_lift hoare_vcg_all_lift hoare_drop_imps simp: domain_sep_inv_def | rule dmo_wp hoare_vcg_E_elim @@ -899,12 +898,12 @@ lemma schedule_integrity: schedule \\_. integrity aag X st\" apply (simp add: schedule_def) - apply (wpsimp wp: alternative_wp switch_to_thread_respects' select_wp guarded_switch_to_lift + apply (wpsimp wp: switch_to_thread_respects' guarded_switch_to_lift switch_to_idle_thread_respects choose_thread_respects gts_wp hoare_drop_imps set_scheduler_action_cnt_valid_sched append_thread_queued enqueue_thread_queued tcb_sched_action_enqueue_valid_blocked_except tcb_sched_action_append_integrity' | simp add: allActiveTCBs_def schedule_choose_new_thread_def - | rule hoare_pre_cont[where a=next_domain])+ + | rule hoare_pre_cont[where f=next_domain])+ apply (auto simp: obj_at_def st_tcb_at_def not_cur_thread_2_def valid_sched_def valid_sched_action_def weak_valid_sched_action_def valid_sched_action_switch_subject_thread) @@ -949,14 +948,14 @@ crunch pas_refined[wp]: choose_thread "pas_refined aag" lemma schedule_pas_refined: "schedule \pas_refined aag\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp add: alternative_wp guarded_switch_to_lift switch_to_thread_pas_refined select_wp - switch_to_idle_thread_pas_refined gts_wp - guarded_switch_to_lift switch_to_thread_respects_pasMayEditReadyQueues - choose_thread_respects_pasMayEditReadyQueues - next_domain_valid_sched next_domain_valid_queues gts_wp hoare_drop_imps - set_scheduler_action_cnt_valid_sched enqueue_thread_queued - tcb_sched_action_enqueue_valid_blocked_except - del: ethread_get_wp + apply (wp add: guarded_switch_to_lift switch_to_thread_pas_refined + switch_to_idle_thread_pas_refined gts_wp + guarded_switch_to_lift switch_to_thread_respects_pasMayEditReadyQueues + choose_thread_respects_pasMayEditReadyQueues + next_domain_valid_sched next_domain_valid_queues gts_wp hoare_drop_imps + set_scheduler_action_cnt_valid_sched enqueue_thread_queued + tcb_sched_action_enqueue_valid_blocked_except + del: ethread_get_wp | wpc | simp add: schedule_choose_new_thread_def)+ done @@ -983,7 +982,7 @@ lemma ct_active_update[simp]: lemma set_cap_ct_active[wp]: "set_cap ptr c \ct_active \" apply (rule hoare_pre) - apply (wps | wpsimp wp: select_wp sts_st_tcb_at_cases thread_set_no_change_tcb_state + apply (wps | wpsimp wp: sts_st_tcb_at_cases thread_set_no_change_tcb_state simp: crunch_simps ct_in_state_def)+ done @@ -1027,14 +1026,14 @@ lemma cancel_all_ipc_ct_active[wp]: done crunch ct_active[wp]: cap_swap_for_delete "ct_active" - (wp: crunch_wps filterM_preserved hoare_unless_wp simp: crunch_simps ignore: do_extended_op) + (wp: crunch_wps filterM_preserved unless_wp simp: crunch_simps ignore: do_extended_op) crunch ct_active[wp]: post_cap_deletion, empty_slot "\s :: det_ext state. ct_active s" (simp: crunch_simps empty_slot_ext_def ignore: do_extended_op - wp: crunch_wps filterM_preserved hoare_unless_wp) + wp: crunch_wps filterM_preserved unless_wp) crunch cur_thread[wp]: cap_swap_for_delete, finalise_cap "\s :: det_ext state. P (cur_thread s)" - (wp: select_wp dxo_wp_weak crunch_wps simp: crunch_simps) + (wp: dxo_wp_weak crunch_wps simp: crunch_simps) lemma rec_del_cur_thread[wp]: "rec_del a \\s :: det_ext state. P (cur_thread s)\" @@ -1139,8 +1138,7 @@ lemma call_kernel_integrity': apply (simp add: call_kernel_def) apply (simp only: spec_valid_def) apply (wpsimp wp: activate_thread_respects schedule_integrity_pasMayEditReadyQueues - handle_interrupt_integrity dmo_wp alternative_wp - select_wp handle_interrupt_pas_refined) + handle_interrupt_integrity dmo_wp handle_interrupt_pas_refined) apply (clarsimp simp: if_fun_split) apply (rule_tac Q="\rv ms. (rv \ None \ the rv \ non_kernel_IRQs) \ R True (domain_sep_inv (pasMaySendIrqs aag) st' s) rv ms" @@ -1148,7 +1146,7 @@ lemma call_kernel_integrity': (pasMaySendIrqs aag \ interrupt_states s (the rv) \ IRQSignal) rv ms" for R in hoare_strengthen_post[rotated], fastforce simp: domain_sep_inv_def) apply (wpsimp wp: getActiveIRQ_rv_None hoare_drop_imps getActiveIRQ_inv) - apply (rule hoare_post_impErr, + apply (rule hoare_strengthen_postE, rule_tac Q="integrity aag X st and pas_refined aag and einvs and guarded_pas_domain aag and domain_sep_inv (pasMaySendIrqs aag) st' and is_subject aag \ cur_thread @@ -1182,7 +1180,7 @@ lemma call_kernel_pas_refined: \\_. pas_refined aag\" apply (simp add: call_kernel_def ) apply (wp activate_thread_pas_refined schedule_pas_refined handle_interrupt_pas_refined - do_machine_op_pas_refined dmo_wp alternative_wp select_wp hoare_drop_imps getActiveIRQ_inv + do_machine_op_pas_refined dmo_wp hoare_drop_imps getActiveIRQ_inv | simp add: if_fun_split | strengthen invs_psp_aligned invs_vspace_objs invs_arch_state)+ apply (wp he_invs handle_event_pas_refined) diff --git a/proof/access-control/Tcb_AC.thy b/proof/access-control/Tcb_AC.thy index bf72128deb..710e082f00 100644 --- a/proof/access-control/Tcb_AC.thy +++ b/proof/access-control/Tcb_AC.thy @@ -60,7 +60,7 @@ lemmas itr_wps = restart_integrity_autarch as_user_integrity_autarch thread_set_integrity_autarch option_update_thread_integrity_autarch thread_set_pas_refined cap_insert_integrity_autarch cap_insert_pas_refined - hoare_vcg_all_liftE wp_throw_const_impE hoare_weak_lift_imp hoare_vcg_all_lift + hoare_vcg_all_liftE hoare_weak_lift_impE hoare_weak_lift_imp hoare_vcg_all_lift check_cap_inv[where P="valid_cap c" for c] check_cap_inv[where P="tcb_cap_valid c p" for c p] check_cap_inv[where P="cte_at p0" for p0] @@ -257,7 +257,7 @@ lemma bind_notification_respects: \\_. integrity aag X st\" apply (rule hoare_gen_asm) apply (clarsimp simp: bind_notification_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (wp set_ntfn_respects hoare_vcg_imp_lift sbn_bind_respects | wpc | clarsimp)+ apply fastforce done @@ -322,7 +322,7 @@ subsubsection\@{term "pas_refined"}\ lemmas ita_wps = as_user_pas_refined restart_pas_refined cap_insert_pas_refined thread_set_pas_refined cap_delete_pas_refined' check_cap_inv2 hoare_vcg_all_liftE - wp_throw_const_impE hoare_weak_lift_imp hoare_vcg_all_lift + hoare_weak_lift_impE hoare_weak_lift_imp hoare_vcg_all_lift lemma hoare_st_refl: "\ \st. \P st\ f \Q st\; \r s st. Q st r s \ Q' r s \ \ \\s. P s s\ f \Q'\" @@ -416,7 +416,7 @@ lemma decode_set_ipc_buffer_authorised: apply (rule hoare_pre) apply (clarsimp simp: ball_Un aag_cap_auth_def split del: if_split split: prod.split | wp (once) derive_cap_obj_refs_auth derive_cap_untyped_range_subset derive_cap_clas - derive_cap_cli hoare_vcg_all_lift_R whenE_throwError_wp slot_long_running_inv + derive_cap_cli hoare_vcg_all_liftE_R whenE_throwError_wp slot_long_running_inv | wpc)+ apply (cases excaps, simp) apply fastforce @@ -432,7 +432,7 @@ lemma decode_set_space_authorised: apply (simp cong: list.case_cong split del: if_split) apply (clarsimp simp: ball_Un split del: if_split | wp (once) derive_cap_obj_refs_auth derive_cap_untyped_range_subset derive_cap_clas - derive_cap_cli hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + derive_cap_cli hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R whenE_throwError_wp slot_long_running_inv)+ apply (clarsimp simp: not_less all_set_conv_all_nth dest!: P_0_1_spec) apply (auto simp: aag_cap_auth_def update_cap_cli @@ -445,7 +445,7 @@ lemma decode_tcb_configure_authorised_helper: "\K True and K (is_subject aag t \ (\x \ set excaps. is_subject aag (fst (snd x))) \ (\x \ set excaps. pas_cap_cur_auth aag (fst x)) \ authorised_tcb_inv aag set_param - \ is_thread_control set_param)\ + \ is_ThreadControl set_param)\ decode_set_space ws (ThreadCap t) slot excaps \\rv _ :: det_ext state. authorised_tcb_inv aag (ThreadControl t slot (tc_new_fault_ep rv) None None (tc_new_croot rv) @@ -453,7 +453,7 @@ lemma decode_tcb_configure_authorised_helper: (tc_new_buffer set_param))\, -" apply (rule hoare_gen_asmE) apply (cases set_param) - apply (simp_all add: is_thread_control_def decode_set_space_def authorised_tcb_inv_def + apply (simp_all add: decode_set_space_def authorised_tcb_inv_def cong: list.case_cong option.case_cong prod.case_cong split: prod.split_asm split del: if_split) apply (cases "excaps!0") @@ -461,7 +461,7 @@ lemma decode_tcb_configure_authorised_helper: apply (rule hoare_pre) apply (clarsimp simp: ball_Un split del: if_split split: prod.split | wp (once) derive_cap_obj_refs_auth derive_cap_untyped_range_subset derive_cap_clas derive_cap_cli - hoare_vcg_all_lift_R whenE_throwError_wp slot_long_running_inv)+ + hoare_vcg_all_liftE_R whenE_throwError_wp slot_long_running_inv)+ apply (clarsimp cong: list.case_cong option.case_cong prod.case_cong split: prod.split_asm) apply (clarsimp simp: not_less all_set_conv_all_nth dest!: P_0_1_spec) apply (auto simp: aag_cap_auth_def update_cap_cli diff --git a/proof/bisim/README.md b/proof/bisim/README.md index 818317d862..2c7748b2aa 100644 --- a/proof/bisim/README.md +++ b/proof/bisim/README.md @@ -14,9 +14,9 @@ kernel that has no other system calls than signalling notifications. Building -------- -To build from the `l4v/` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - ./isabelle/bin/isabelle build -d . -v -b Bisim + L4V_ARCH=ARM ./run_tests Bisim Important Theories ------------------ diff --git a/proof/bisim/Syscall_S.thy b/proof/bisim/Syscall_S.thy index d7ee4fd478..882b0374a4 100644 --- a/proof/bisim/Syscall_S.thy +++ b/proof/bisim/Syscall_S.thy @@ -282,12 +282,12 @@ lemma send_fault_ipc_bisim: apply (clarsimp simp: handle_double_fault_def) apply (rule bisim_refl') apply (simp add: Let_def) - apply (rule hoare_vcg_seqE) - apply (rule hoare_vcg_seqE) + apply (rule bindE_wp) + apply (rule bindE_wp) apply (wpc; wp) apply wp apply simp - apply (rule hoare_post_imp_R [OF lc_sep]) + apply (rule hoare_strengthen_postE_R [OF lc_sep]) apply (clarsimp simp: separate_cap_def) apply (wp | simp add: Let_def)+ apply (rule_tac P = "separate_cap handler_cap" in hoare_gen_asmE') @@ -323,12 +323,12 @@ lemma bisim_liftME_same: shows "bisim (f \ (=)) P P' (liftME g m) (liftME g m')" unfolding liftME_def apply (rule bisim_guard_imp) - apply (rule bisim_splitE [OF bs]) - apply simp - apply (rule bisim_returnOk) - apply simp - apply wp - apply simp+ + apply (rule bisim_splitE [OF bs]) + apply simp + apply (rule bisim_returnOk) + apply simp + apply wp+ + apply simp+ done lemma bisim_split_if: @@ -596,10 +596,10 @@ lemma handle_recv_bisim: apply (simp split del: if_split) apply (rule bisim_refl [where P = \ and P' = \]) apply (case_tac rc, simp_all)[1] - apply (wp get_cap_wp' lsft_sep | simp add: lookup_cap_def split_def del: hoare_True_E_R)+ + apply (wp get_cap_wp' lsft_sep | simp add: lookup_cap_def split_def)+ apply (rule handle_fault_bisim) apply (wp get_simple_ko_wp | wpc | simp)+ - apply (rule_tac Q' = "\_. separate_state and valid_objs and tcb_at r" in hoare_post_imp_R) + apply (rule_tac Q' = "\_. separate_state and valid_objs and tcb_at r" in hoare_strengthen_postE_R) prefer 2 apply simp apply (wp | simp add: cur_tcb_def)+ @@ -699,7 +699,7 @@ lemma schedule_separate_state [wp]: "\separate_state\ schedule :: (unit,unit) s_monad \\_. separate_state\" unfolding schedule_def switch_to_thread_def arch_switch_to_thread_def switch_to_idle_thread_def arch_switch_to_idle_thread_def allActiveTCBs_def - by (wpsimp wp: select_inv separate_state_pres' alternative_valid + by (wpsimp wp: select_inv separate_state_pres' simp: arch_activate_idle_thread_def | strengthen imp_consequent)+ @@ -723,7 +723,7 @@ lemma send_signal_separate_state [wp]: unfolding send_signal_def cancel_ipc_def apply (rule separate_state_pres) apply (rule hoare_pre) - apply (wp gts_wp get_simple_ko_wp hoare_pre_cont[where a = "reply_cancel_ipc x" for x] + apply (wp gts_wp get_simple_ko_wp hoare_pre_cont[where f="reply_cancel_ipc x" for x] | wpc | wps | simp add: update_waiting_ntfn_def)+ apply (clarsimp) diff --git a/proof/capDL-api/Arch_DP.thy b/proof/capDL-api/Arch_DP.thy index 2f6f08c0fa..33f60e5bf4 100644 --- a/proof/capDL-api/Arch_DP.thy +++ b/proof/capDL-api/Arch_DP.thy @@ -25,7 +25,7 @@ lemma cdl_lookup_pt_slot_rv: apply (rule validE_validE_R) apply (clarsimp simp : cdl_lookup_pt_slot_def) apply (clarsimp simp: validE_def valid_def bindE_def - bind_def bind_assoc NonDetMonad.lift_def) + bind_def bind_assoc Nondet_Monad.lift_def) apply (case_tac a) apply (clarsimp simp:liftE_def bindE_def bind_def return_def) apply (clarsimp simp:liftE_def bindE_def bind_def return_def) @@ -65,9 +65,9 @@ lemma decode_page_map_intent_rv_20_24: \\r s. R r\, -" apply (simp add: decode_invocation_def get_index_def get_page_intent_def throw_opt_def cap_rights_def decode_page_invocation_def throw_on_none_def get_mapped_asid_def) - apply (wp alternativeE_wp select_wp | wpc)+ + apply (wp | wpc)+ apply (rule validE_validE_R) - apply (wp alternativeE_wp) + apply wp apply (simp add:cdl_page_mapping_entries_def split del:if_split | wp | wpc)+ apply auto done @@ -86,9 +86,9 @@ lemma decode_page_map_intent_rv_16_12: get_page_intent_def throw_opt_def cap_rights_def decode_page_invocation_def throw_on_none_def get_mapped_asid_def) - apply (wp alternativeE_wp select_wp) + apply wp apply (rule validE_validE_R) - apply (wp alternativeE_wp) + apply wp apply (simp add:cdl_page_mapping_entries_def) apply (wp cdl_lookup_pt_slot_rv | wpc | simp)+ apply auto @@ -130,13 +130,13 @@ lemma invoke_page_table_wp: done crunch cdl_cur_thread[wp]: invoke_page "\s. P (cdl_current_thread s)" -(wp: crunch_wps select_wp alternative_wp simp : swp_def ) + (wp: crunch_wps simp: swp_def) crunch cdl_cur_thread[wp]: invoke_page_table "\s. P (cdl_current_thread s)" -(wp: crunch_wps select_wp alternative_wp simp : swp_def ) + (wp: crunch_wps simp: swp_def) crunch cdl_cur_domain[wp]: invoke_page_table, invoke_page "\s. P (cdl_current_domain s)" -(wp: crunch_wps select_wp alternative_wp simp : swp_def unless_def) + (wp: crunch_wps simp: swp_def unless_def) lemmas cap_asid_simps[simp] = cap_asid_def[split_simps cdl_cap.split] lemmas cap_mapped_simps[simp] = cap_mapped_def[split_simps cdl_cap.split] @@ -153,7 +153,7 @@ lemma decode_page_table_rv: apply (simp add:decode_invocation_def get_page_table_intent_def throw_opt_def decode_page_table_invocation_def) apply (rule hoare_pre) - apply (wp alternativeE_wp throw_on_none_wp | wpc | simp)+ + apply (wp throw_on_none_wp | wpc | simp)+ apply (clarsimp split:option.splits simp:get_index_def cap_object_def cap_has_object_def get_mapped_asid_def) done @@ -232,7 +232,7 @@ lemma seL4_Page_Table_Map: \* cnode_id \f CNode (empty_cnode root_size) \* R> s \ iv = InvokePageTable (PageTableMap (PageTableCap ptr Real (get_mapped_asid asid' (vaddr && ~~ mask 20))) (PageTableCap ptr Fake None) (cnode_id,pt_offset) (cdl_lookup_pd_slot pd_ptr vaddr))" - in hoare_post_impErr[rotated -1]) + in hoare_strengthen_postE[rotated -1]) apply assumption apply clarsimp apply (rule hoare_vcg_E_elim) @@ -362,7 +362,7 @@ lemma seL4_Section_Map_wp: (PageMap (FrameCap dev frame_ptr rights n Real (get_mapped_asid asid' vaddr)) (FrameCap False frame_ptr (validate_vm_rights (rights \ perms)) n Fake None) (cnode_id,frame_offset) [cdl_lookup_pd_slot pd_ptr vaddr])" - in hoare_post_impErr[rotated -1]) + in hoare_strengthen_postE[rotated -1]) apply assumption apply (rule hoare_vcg_E_elim) apply wp @@ -503,7 +503,7 @@ lemma seL4_Page_Map_wp: (PageMap (FrameCap dev frame_ptr rights n Real (get_mapped_asid asid' vaddr)) (FrameCap False frame_ptr (validate_vm_rights (rights \ perms)) n Fake None) (cnode_id,frame_offset) [ (pt_ptr, unat ((vaddr >> 12) && 0xFF))] )" - in hoare_post_impErr[rotated -1]) + in hoare_strengthen_postE[rotated -1]) apply assumption apply (rule hoare_vcg_E_elim) apply wp @@ -564,7 +564,7 @@ lemma decode_invocation_asid_pool_assign: decode_asid_pool_invocation_def get_index_def throw_opt_def throw_on_none_def) apply (rule validE_validE_R) - apply (wp alternativeE_wp select_wp) + apply wp apply (clarsimp simp:cap_object_def cap_has_object_def) done diff --git a/proof/capDL-api/CNode_DP.thy b/proof/capDL-api/CNode_DP.thy index 5566d8dae0..9f19df9f55 100644 --- a/proof/capDL-api/CNode_DP.thy +++ b/proof/capDL-api/CNode_DP.thy @@ -31,7 +31,7 @@ lemma decode_cnode_copy_same_parent_rvu: \\rv s. Q rv\, -" apply (clarsimp simp:user_pointer_at_def Let_def) apply (clarsimp simp: decode_cnode_invocation_def split_def split: sum.splits) - apply (wp hoare_whenE_wp | simp)+ + apply (wp whenE_wp | simp)+ apply (rule validE_validE_R) apply (wp derive_cap_invE)+ apply (rule validE_validE_R) @@ -60,7 +60,7 @@ lemma invoke_cnode_insert_cdl_current_domain[wp]: \\_ s. P (cdl_current_domain s) \" apply (simp add: invoke_cnode_def) apply (rule hoare_pre) - apply (wp alternative_wp | wpc | clarsimp)+ + apply (wp | wpc | clarsimp)+ done lemma invoke_cnode_move_cdl_current_domain[wp]: @@ -177,7 +177,7 @@ lemma seL4_CNode_Mint_sep: apply (wp cnode_insert_cap_cdl_current_thread)[1] apply (rule no_exception_conj') apply (wp)[1] - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule_tac R = "(root_tcb_id, tcb_pending_op_slot) \c RestartCap \* R" for R in invoke_cnode_insert_cap') apply simp @@ -343,7 +343,7 @@ lemma seL4_CNode_Mutate_sep: apply (wp cnode_move_cap_cdl_current_thread)[1] apply (rule no_exception_conj') apply wp[1] - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule_tac R = "(root_tcb_id, tcb_pending_op_slot) \c RestartCap \* R" for R in invoke_cnode_move_cap) apply clarsimp @@ -502,7 +502,7 @@ lemma seL4_CNode_Move_sep: apply (wp cnode_move_cap_cdl_current_thread)[1] apply (rule no_exception_conj') apply wp[1] - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule_tac R = "(root_tcb_id, tcb_pending_op_slot) \c RestartCap \* R" for R in invoke_cnode_move_cap) apply clarsimp @@ -657,7 +657,7 @@ lemma seL4_CNode_Copy_sep: apply (wp cnode_insert_cap_cdl_current_thread)[1] apply (rule no_exception_conj') apply wp[1] - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule_tac R = "(root_tcb_id, tcb_pending_op_slot) \c RestartCap \* R" for R in invoke_cnode_insert_cap') apply simp diff --git a/proof/capDL-api/IRQ_DP.thy b/proof/capDL-api/IRQ_DP.thy index 9066484c56..8e4ea2bc58 100644 --- a/proof/capDL-api/IRQ_DP.thy +++ b/proof/capDL-api/IRQ_DP.thy @@ -46,7 +46,6 @@ lemma invoke_irq_handler_set_handler_wp: invoke_irq_handler (SetIrqHandler irq cap slot) \\_. < irq \irq obj \* (obj, 0) \c cap \* R> \" apply (clarsimp simp: invoke_irq_handler_def, wp) - apply (wp alternative_wp) apply (wp sep_wp: insert_cap_child_wp insert_cap_sibling_wp)+ apply (sep_wp delete_cap_simple_format[where cap=cap'])+ apply (safe) @@ -71,7 +70,7 @@ lemma decode_invocation_irq_ack_rv': decode_irq_handler_invocation cap cap_ref caps (IrqHandlerAckIntent) \P\, -" apply (clarsimp simp: decode_irq_handler_invocation_def) - apply (wp alternativeE_R_wp) + apply wp apply (clarsimp) done @@ -80,7 +79,7 @@ lemma decode_invocation_irq_clear_rv': decode_irq_handler_invocation cap cap_ref caps (IrqHandlerClearIntent) \P\, -" apply (clarsimp simp: decode_irq_handler_invocation_def) - apply (wp alternativeE_R_wp) + apply wp apply (clarsimp) done @@ -105,7 +104,7 @@ decode_irq_handler_invocation cap cap_ref caps (IrqHandlerSetEndpointIntent) \P\, -" apply (rule validE_R_gen_asm_conj) apply (clarsimp simp: decode_irq_handler_invocation_def) - apply (wp alternativeE_R_wp | wpc)+ + apply (wp | wpc)+ apply (clarsimp split: cdl_cap.splits, safe) apply ((wp throw_on_none_rv)+, clarsimp simp: get_index_def) apply simp @@ -117,13 +116,13 @@ lemma decode_irq_control_issue_irq_rv: <\ (r, (unat depth)) : root_cap index \u cap \* R> s\ decode_irq_control_invocation target target_ref caps (IrqControlIssueIrqHandlerIntent irq index depth) \P\, -" apply (clarsimp simp: decode_irq_control_invocation_def) - apply (wp alternativeE_R_wp lookup_slot_for_cnode_op_rvu'[where cap=cap and r=r] throw_on_none_rv) + apply (wp lookup_slot_for_cnode_op_rvu'[where cap=cap and r=r] throw_on_none_rv) apply (clarsimp simp: get_index_def) apply (sep_solve) done schematic_goal lookup_extra_caps_once_wp: "\?P\ lookup_extra_caps root_tcb_id [endpoint_cptr] \Q\, \Q'\" -apply (clarsimp simp: lookup_extra_caps_def mapME_def sequenceE_def, wp, clarsimp) +apply (clarsimp simp: lookup_extra_caps_def mapME_def sequenceE_def, wp) apply (rule lookup_cap_and_slot_rvu) done @@ -342,7 +341,7 @@ shows "\\root_tcb_id \f root_tcb \* (root_t apply (rule_tac P = "c=irq_cap" in hoare_gen_asmEx, simp) apply (simp add: unify decode_invocation_def) apply (wp) - apply (rule_tac P = "x = (IrqHandlerSetEndpointIntent)" in hoare_gen_asmE, simp) + apply (rule_tac P = "rv = (IrqHandlerSetEndpointIntent)" in hoare_gen_asmE, simp) apply (wp decode_invocation_irq_endpoint_rv'[where endpoint_cap=endpoint_cap and endpoint_ptr = endpoint_ptr and xs = "[]"]) apply (unfold throw_opt_def get_irq_handler_intent_def, simp) apply (rule returnOk_wp) @@ -433,7 +432,7 @@ lemma seL4_IRQHandler_SetEndpoint_wp: apply (rule hoare_gen_asm) apply (wp seL4_IRQHandler_SetEndpoint_wp_helper [where irq_handler_slot=endpoint_slot and cap'=old_cap and t="obj_tcb root_tcb"], simp) - apply (rule pred_andI) + apply (rule pred_conjI) apply sep_solve apply clarsimp apply (case_tac endpoint_cap, simp_all add: is_memory_cap_def cap_type_def) diff --git a/proof/capDL-api/Invocation_DP.thy b/proof/capDL-api/Invocation_DP.thy index dd9459c777..0c31cc4a21 100644 --- a/proof/capDL-api/Invocation_DP.thy +++ b/proof/capDL-api/Invocation_DP.thy @@ -12,10 +12,10 @@ crunch cdl_current_domain[wp]: update_available_range, generate_object_ids, upda mark_tcb_intent_error, corrupt_ipc_buffer, insert_cap_sibling, insert_cap_child, move_cap, invoke_irq_control, invoke_irq_handler "\s. P (cdl_current_domain s)" -(wp: crunch_wps select_wp alternative_wp alternativeE_wp hoare_unless_wp simp: split_def corrupt_intents_def) +(wp: crunch_wps unless_wp simp: split_def corrupt_intents_def) crunch cdl_irq_node [wp]: corrupt_ipc_buffer "\s. P (cdl_irq_node s)" -(wp: crunch_wps select_wp simp: corrupt_intents_def) +(wp: crunch_wps simp: corrupt_intents_def) crunch cdl_irq_node [wp]: mark_tcb_intent_error "\s. P (cdl_irq_node s)" (wp: crunch_wps) @@ -124,7 +124,7 @@ lemma corrupt_tcb_intent_sep_helper[wp]: \\rv s. A (object_at (\obj. P (object_clean obj)) ptr s)\" apply (simp add:corrupt_tcb_intent_def update_thread_def set_object_def) - apply (wp select_wp | wpc | simp add:set_object_def)+ + apply (wp | wpc | simp add:set_object_def)+ apply (clarsimp simp:object_at_def) apply (simp add:object_clean_def intent_reset_def object_slots_def asid_reset_def update_slots_def) @@ -141,7 +141,7 @@ lemma corrupt_frame_sep_helper[wp]: "\\s. A (object_at (\obj. P (object_clean obj)) ptr s)\ corrupt_frame a \\rv s. A (object_at (\obj. P (object_clean obj)) ptr s)\" apply (simp add:corrupt_frame_def) - apply (wp select_wp) + apply wp apply (clarsimp simp:corrupt_intents_def object_at_def map_add_def split:option.splits cdl_object.splits) apply (simp add:object_clean_def intent_reset_def @@ -157,7 +157,7 @@ lemma corrupt_ipc_buffer_sep_inv[wp]: \\rv s. < P > s\" apply (rule sep_nonimpact_valid_lift) apply (simp add:corrupt_ipc_buffer_def) - apply (wp select_wp hoare_drop_imps | wpc | simp)+ + apply (wp hoare_drop_imps | wpc | simp)+ done lemma update_thread_intent_update: @@ -231,55 +231,48 @@ lemma no_exception_conj': done crunch inv[wp]: decode_untyped_invocation P - (wp:crunch_wps alternativeE_wp mapME_x_inv_wp - unlessE_wp simp:crunch_simps throw_on_none_def) + (wp: crunch_wps mapME_x_inv_wp unlessE_wp simp: crunch_simps throw_on_none_def) crunch inv[wp]: decode_irq_handler_invocation P - (wp:crunch_wps alternativeE_wp - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_tcb_invocation P - (wp:crunch_wps alternativeE_wp - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_domain_invocation P - (wp:crunch_wps alternativeE_wp - simp:liftE_bindE throw_on_none_def) + (wp:crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_irq_control_invocation P - (wp:crunch_wps alternativeE_wp select_wp - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_asid_control_invocation P - (wp:crunch_wps alternativeE_wp select_wp ignore:returnOk - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps ignore: returnOk simp: liftE_bindE throw_on_none_def) crunch inv[wp]: lookup_cap_and_slot P (wp:crunch_wps resolve_address_bits_wp) crunch inv[wp]: decode_page_invocation P - (wp:crunch_wps alternativeE_wp select_wp resolve_address_bits_wp - simp:throw_on_none_def) + (wp: crunch_wps resolve_address_bits_wp simp: throw_on_none_def) lemma decode_page_table_invocation_inv[wp]: "\P\ decode_page_table_invocation a b c d \\_. P\" apply (simp add:decode_page_table_invocation_def) apply (rule hoare_pre) - apply (wpc|wp alternativeE_wp select_wp |simp add:throw_on_none_def)+ + apply (wpc|wp |simp add:throw_on_none_def)+ done lemma decode_page_directory_invocation_inv[wp]: "\P\ decode_page_directory_invocation a b c d \\_. P\" apply (simp add:decode_page_directory_invocation_def) apply (rule hoare_pre) - apply (wpc|wp alternativeE_wp select_wp |simp add:throw_on_none_def)+ + apply (wpc|wp |simp add:throw_on_none_def)+ done lemma decode_asid_pool_invocation_inv[wp]: "\P\ decode_asid_pool_invocation a b c d \\_. P\" apply (simp add:decode_asid_pool_invocation_def) apply (rule hoare_pre) - apply (wpc|wp alternativeE_wp select_wp |simp add:throw_on_none_def)+ + apply (wpc|wp |simp add:throw_on_none_def)+ done lemma decode_invocation_inv[wp]: @@ -301,7 +294,7 @@ lemma decode_invocation_nonep: decode_invocation cap cap_ref extra_caps intent \\rv s. nonep_invocation rv\, -" apply (simp add: decode_invocation_def) - apply (wpsimp simp: o_def nonep_invocation_def simp_del: hoare_True_E_R) + apply (wpsimp simp: o_def nonep_invocation_def wp: wp_post_tauts) apply (auto simp: ep_related_cap_def) done @@ -325,8 +318,8 @@ lemma wp_no_exception_seq_r: and validE_f: "\P\f\\r. P' r\,\\r. Inv\" shows "\P\ f >>=E g \Q\,\\r. Inv\" apply (rule hoare_pre) - apply (rule hoare_vcg_seqE) - apply (rule hoare_post_impErr[OF validE_g]) + apply (rule bindE_wp) + apply (rule hoare_strengthen_postE[OF validE_g]) apply simp apply simp apply (wp validE_f) @@ -371,7 +364,7 @@ lemma handle_event_syscall_no_decode_exception: apply (rule liftE_wp_split_r)+ apply (rule wp_no_exception_seq_r) apply (rule liftE_wp_no_exception) - apply (rule hoare_whenE_wp) + apply (rule whenE_wp) apply simp apply wp apply (rule_tac P = "y = cur_thread" in hoare_gen_asm) @@ -403,7 +396,7 @@ lemma handle_event_syscall_no_decode_exception: apply (rule_tac P = " y = cur_thread \ cdl_intent_extras (cdl_tcb_intent ya) = intent_extra" in hoare_gen_asmEx) apply simp - apply (rule hoare_post_impErr[OF no_exception_conj]) + apply (rule hoare_strengthen_postE[OF no_exception_conj]) apply (rule_tac r = yb in lookup_extra_caps_exec) prefer 2 apply (elim conjE) @@ -413,7 +406,7 @@ lemma handle_event_syscall_no_decode_exception: apply (rule_tac P = "(cdl_intent_cap (cdl_tcb_intent ya)) = intent_cptr \ y = cur_thread" in hoare_gen_asmEx) - apply (rule hoare_post_impErr[OF no_exception_conj]) + apply (rule hoare_strengthen_postE[OF no_exception_conj]) apply simp apply (rule lookup_cap_and_slot_exec) prefer 2 @@ -427,16 +420,15 @@ lemma handle_event_syscall_no_decode_exception: done crunch cdl_current_thread [wp]: delete_cap_simple "\s. P (cdl_current_thread s)" -(wp:crunch_wps select_wp simp:split_def unless_def) + (wp: crunch_wps simp: split_def unless_def) crunch cdl_current_thread [wp]: mark_tcb_intent_error "\s. P (cdl_current_thread s)" -(wp:crunch_wps select_wp simp:split_def unless_def) + (wp: crunch_wps simp: split_def unless_def) crunch cdl_current_thread [wp]: corrupt_ipc_buffer "\s. P (cdl_current_thread s)" -(wp:crunch_wps select_wp simp:split_def unless_def corrupt_frame_def corrupt_intents_def) + (wp: crunch_wps simp: split_def unless_def corrupt_frame_def corrupt_intents_def) crunch cdl_current_thread [wp]: invoke_irq_control, invoke_irq_handler "\s. P (cdl_current_thread s)" -(wp:alternative_wp) lemma corrupt_tcb_intent_all_active_tcbs[wp]: @@ -478,7 +470,7 @@ lemma send_signal_no_pending: \\r. P\" apply (simp add: send_signal_def send_signal_bound_def) apply (rule hoare_pre) - apply (wp alternative_wp | wpc)+ + apply (wp | wpc)+ apply (rule hoare_pre_cont) apply (rule_tac P = "waiters = {}" in hoare_gen_asm) apply (clarsimp simp: option_select_def) @@ -495,7 +487,7 @@ lemma send_signal_no_pending: done crunch invs[wp]: get_active_irq P - (wp: crunch_wps alternative_wp select_wp) + (wp: crunch_wps) lemma handle_pending_interrupts_no_ntf_cap: "\P and no_pending\ @@ -506,7 +498,7 @@ lemma handle_pending_interrupts_no_ntf_cap: apply (wp send_signal_no_pending | wpc | simp add: option_select_def handle_interrupt_def split del: if_split)+ - apply (wp alternative_wp select_wp hoare_drop_imps hoare_vcg_all_lift) + apply (wp hoare_drop_imps hoare_vcg_all_lift) apply simp done @@ -556,61 +548,59 @@ lemma call_kernel_with_intent_no_fault_helper: using unify apply (simp add:call_kernel_with_intent_def) apply wp - apply (rule_tac P = "thread_ptr = root_tcb_id" in hoare_gen_asm) - apply (simp add:call_kernel_loop_def) - apply (rule_tac Q = "\r s. cdl_current_thread s = Some root_tcb_id - \ cdl_current_domain s = minBound \ Q s - " in hoare_strengthen_post[rotated]) - apply fastforce - apply clarsimp - apply wp - apply (rule hoare_vcg_imp_lift) - apply (wpc|wp hoare_vcg_imp_lift|simp cong: if_cong)+ - apply (rule hoare_pre_cont) - apply (wp has_restart_cap_sep_wp[where cap = RunningCap])[1] - apply wp + apply (rule_tac P = "thread_ptr = root_tcb_id" in hoare_gen_asm) + apply (simp add:call_kernel_loop_def) apply (rule_tac Q = "\r s. cdl_current_thread s = Some root_tcb_id - \ cdl_current_domain s = minBound \ (Q s - \ <(root_tcb_id, tcb_pending_op_slot) \c RunningCap \* (\s. True)> s)" - in hoare_strengthen_post) - apply (rule schedule_no_choice_wp) - apply fastforce - apply (rule whileLoop_wp[where - I = "\rv s. case rv of Inl _ \ (tcb_at' - (\tcb. cdl_intent_op (cdl_tcb_intent tcb) = Some intent_op \ - cdl_intent_cap (cdl_tcb_intent tcb) = cdl_intent_cap intent \ - cdl_intent_extras (cdl_tcb_intent tcb) = cdl_intent_extras intent) - root_tcb_id s - \ cdl_current_thread s = Some root_tcb_id - \ cdl_current_domain s = minBound \ Pd2 s) - | Inr rv \ Q (Inr rv) s" and Q=Q for Q, rotated]) - apply (case_tac r, simp_all add: isLeft_def)[1] - apply (simp add: validE_def[symmetric]) - apply (rule hoare_pre, wp) - apply (simp add: validE_def, (wp | simp add: validE_def[symmetric])+) - apply (wp handle_pending_interrupts_no_ntf_cap) - apply (rule handle_event_syscall_no_decode_exception - [where cur_thread = root_tcb_id - and intent_op = intent_op - and intent_cptr = intent_cptr - and intent_extra = intent_extra]) - apply (wp set_cap_wp set_cap_all_scheduable_tcbs - set_cap_hold delete_cap_simple_wp[where cap = RestartCap])[1] - apply (rule decode_invocation_no_exception) - apply (rule lookup_extra_caps_exec) - apply (rule lookup_cap_and_slot_exec) - apply (rule non_ep_cap) - apply ((wp corrupt_ipc_buffer_sep_inv corrupt_ipc_buffer_active_tcbs - mark_tcb_intent_error_hold corrupt_ipc_buffer_hold | simp)+)[2] - apply (rule hoare_post_impErr[OF perform_invocation_hold]) - apply (fastforce simp:sep_state_projection_def sep_any_def - sep_map_c_def sep_conj_def) - apply simp - apply (wp set_restart_cap_hold) - apply (clarsimp simp: isLeft_def) + \ cdl_current_domain s = minBound \ Q s + " in hoare_strengthen_post[rotated]) + apply fastforce + apply clarsimp + apply wp + apply (rule hoare_vcg_imp_lift) + apply (wpc|wp hoare_vcg_imp_lift|simp cong: if_cong)+ + apply (rule hoare_pre_cont) + apply (wp has_restart_cap_sep_wp[where cap = RunningCap])[1] + apply wp + apply (rule_tac Q = "\r s. cdl_current_thread s = Some root_tcb_id + \ cdl_current_domain s = minBound \ (Q s + \ <(root_tcb_id, tcb_pending_op_slot) \c RunningCap \* (\s. True)> s)" + in hoare_strengthen_post) + apply (rule schedule_no_choice_wp) + apply fastforce + apply (rule whileLoop_wp[where + I = "\rv s. case rv of Inl _ \ (tcb_at' + (\tcb. cdl_intent_op (cdl_tcb_intent tcb) = Some intent_op \ + cdl_intent_cap (cdl_tcb_intent tcb) = cdl_intent_cap intent \ + cdl_intent_extras (cdl_tcb_intent tcb) = cdl_intent_extras intent) + root_tcb_id s + \ cdl_current_thread s = Some root_tcb_id + \ cdl_current_domain s = minBound \ Pd2 s) + | Inr rv \ Q (Inr rv) s" and Q=Q for Q, rotated]) + apply (case_tac r, simp_all)[1] + apply (simp add: validE_def[symmetric]) + apply (wp handle_pending_interrupts_no_ntf_cap) + apply (rule handle_event_syscall_no_decode_exception + [where cur_thread = root_tcb_id + and intent_op = intent_op + and intent_cptr = intent_cptr + and intent_extra = intent_extra]) + apply (wp set_cap_wp set_cap_all_scheduable_tcbs + set_cap_hold delete_cap_simple_wp[where cap = RestartCap])[1] + apply (rule decode_invocation_no_exception) + apply (rule lookup_extra_caps_exec) + apply (rule lookup_cap_and_slot_exec) + apply (rule non_ep_cap) + apply ((wp corrupt_ipc_buffer_sep_inv corrupt_ipc_buffer_active_tcbs + mark_tcb_intent_error_hold corrupt_ipc_buffer_hold | simp)+)[2] + apply (rule hoare_strengthen_postE[OF perform_invocation_hold]) + apply (fastforce simp:sep_state_projection_def sep_any_def + sep_map_c_def sep_conj_def) + apply simp + apply (wp set_restart_cap_hold) + apply (clarsimp split: sum.split_asm) apply (rule_tac P = "thread_ptr = root_tcb_id" in hoare_gen_asm) - apply simp - apply (wp upd_thread update_thread_wp)+ + apply simp + apply (wp upd_thread update_thread_wp)+ apply auto done @@ -622,13 +612,7 @@ lemma invoke_cnode_insert_cap: apply (simp add:validE_def) apply (rule hoare_name_pre_state) apply (clarsimp simp:invoke_cnode_def liftE_bindE validE_def[symmetric]) - apply (rule alternative_valid) - apply (rule hoare_pre) - apply (rule insert_cap_sibling_wp) - apply simp - apply (rule hoare_pre) - apply (rule insert_cap_child_wp) - apply simp + apply (wpsimp wp: insert_cap_sibling_wp insert_cap_child_wp) done lemma invoke_cnode_move_wp: @@ -682,19 +666,17 @@ lemma cdl_cur_thread_detype: by (simp add:detype_def) crunch cdl_current_thread[wp]: reset_untyped_cap "\s. P (cdl_current_thread s)" - (wp: select_wp alternativeE_wp mapME_x_inv_wp hoare_whenE_wp - simp: cdl_cur_thread_detype crunch_simps) + (wp: mapME_x_inv_wp whenE_wp simp: cdl_cur_thread_detype crunch_simps) lemmas helper = valid_validE_E[OF reset_untyped_cap_cdl_current_thread] crunch cdl_current_thread[wp]: invoke_untyped "\s. P (cdl_current_thread s)" -(wp:select_wp mapM_x_wp' crunch_wps hoare_unless_wp alternativeE_wp - helper - simp:cdl_cur_thread_detype crunch_simps) + (wp: mapM_x_wp' crunch_wps unless_wp helper + simp:cdl_cur_thread_detype crunch_simps) crunch cdl_current_thread[wp]: move_cap "\s. P (cdl_current_thread s)" -(wp:select_wp mapM_x_wp' crunch_wps hoare_unless_wp - simp:crunch_simps) + (wp: mapM_x_wp' crunch_wps unless_wp + simp:crunch_simps) lemma cnode_insert_cap_cdl_current_thread: "\\s. P (cdl_current_thread s) \ @@ -704,7 +686,7 @@ lemma cnode_insert_cap_cdl_current_thread: apply (clarsimp simp: invoke_cnode_def liftE_bindE validE_def[symmetric]) apply (rule hoare_pre) - apply (wp alternative_valid | simp | wpc)+ + apply (wp | simp | wpc)+ done lemma cnode_move_cap_cdl_current_thread: @@ -715,7 +697,7 @@ lemma cnode_move_cap_cdl_current_thread: apply (clarsimp simp: invoke_cnode_def liftE_bindE validE_def[symmetric]) apply (rule hoare_pre) - apply (wp alternative_valid | simp | wpc)+ + apply (wp | simp | wpc)+ done lemma sep_any_imp_c'_conj: @@ -854,7 +836,7 @@ lemma syscall_valid_helper_allow_error: apply (wp mark_tcb_intent_error_no_error) apply (rule hoare_drop_imp,simp) apply simp - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply fastforce apply simp apply simp @@ -871,7 +853,7 @@ lemma tcb_has_error_set_cap: apply (simp add:set_cap_def gets_the_def set_object_def split_def) - apply (wp select_wp|wpc|simp)+ + apply (wp|wpc|simp)+ apply (clarsimp simp:tcb_has_error_def object_at_def,simp split:cdl_object.split_asm) apply (intro conjI impI) @@ -930,7 +912,7 @@ lemma handle_event_syscall_allow_error: apply (rule liftE_wp_split_r)+ apply (rule wp_no_exception_seq_r) apply (rule liftE_wp_no_exception) - apply (rule hoare_whenE_wp) + apply (rule whenE_wp) apply (simp) apply wp apply (rule_tac P = "y = cur_thread" in hoare_gen_asm) @@ -963,7 +945,7 @@ lemma handle_event_syscall_allow_error: apply (rule_tac P = " cdl_intent_extras (cdl_tcb_intent ya) = intent_extra" in hoare_gen_asmEx) apply simp - apply (rule hoare_post_impErr[OF no_exception_conj]) + apply (rule hoare_strengthen_postE[OF no_exception_conj]) apply (rule_tac r = r in lookup_extra_caps_exec) prefer 2 apply (elim conjE) @@ -1043,87 +1025,83 @@ lemma call_kernel_with_intent_allow_error_helper: using unify apply (simp add:call_kernel_with_intent_def) apply (wp thread_has_error_wp) - apply (simp add:call_kernel_loop_def) - apply (rule_tac P = "thread_ptr = root_tcb_id" in hoare_gen_asm) - apply (rule_tac Q = "\r s. (cdl_current_thread s = Some root_tcb_id - \ cdl_current_domain s = minBound) \ ( - ((\ tcb_has_error (the (cdl_current_thread s)) s) \ Q s) \ - ((tcb_has_error (the (cdl_current_thread s)) s) \ Perror s))" - in hoare_strengthen_post[rotated]) - apply (fastforce simp: error_imp) - apply wp - (* fragile , do not know why *) - apply (rule hoare_vcg_imp_lift[where P' = "\s. cdl_current_thread s \ Some root_tcb_id - \ cdl_current_domain s \ minBound"]) - apply (rule hoare_pre,(wp hoare_vcg_imp_lift|wpc|simp cong: if_cong)+)[1] - apply (wp | wpc | simp)+ - apply (rule hoare_pre_cont) - apply (wp has_restart_cap_sep_wp[where cap = RunningCap])+ - apply simp - apply (rule_tac current_thread1=root_tcb_id and current_domain1=minBound in - hoare_strengthen_post[OF schedule_no_choice_wp]) - apply (clarsimp, assumption) - apply clarsimp - apply (rule_tac Q = "\r a. (\ tcb_has_error root_tcb_id a \ (Q a - \ cdl_current_thread a = Some root_tcb_id - \ cdl_current_domain a = minBound - \ <(root_tcb_id, tcb_pending_op_slot) \c RunningCap \* (\s. True)> a)) - \ (tcb_has_error root_tcb_id a \ (Perror a - \ cdl_current_thread a = Some root_tcb_id - \ cdl_current_domain a = minBound - \ <(root_tcb_id, tcb_pending_op_slot) \c RunningCap \* (\s. True)> a))" - in hoare_strengthen_post[rotated]) - apply fastforce - apply (rule whileLoop_wp[where - I = "\rv s. case rv of Inl _ \ (tcb_at' - (\tcb. cdl_intent_op (cdl_tcb_intent tcb) = Some intent_op \ - cdl_intent_cap (cdl_tcb_intent tcb) = cdl_intent_cap intent \ - cdl_intent_extras (cdl_tcb_intent tcb) = cdl_intent_extras intent \ - cdl_intent_error (cdl_tcb_intent tcb) = False) - root_tcb_id s - \ cdl_current_thread s = Some root_tcb_id - \ cdl_current_domain s = minBound \ Pd2 s) - | Inr rv \ Q (Inr rv) s" and Q=Q for Q, rotated]) - apply (case_tac r, simp_all add: isLeft_def)[1] - apply (simp add: validE_def[symmetric]) - apply (rule hoare_pre, wp) - apply (simp add: validE_def, (wp | simp add: validE_def[symmetric])+) - apply (wp handle_pending_interrupts_no_ntf_cap) - - apply (rule handle_event_syscall_allow_error - [where cur_thread = root_tcb_id - and intent_op = intent_op - and intent_cptr = intent_cptr - and intent_extra = intent_extra]) - apply (wp set_cap_wp - set_cap_hold delete_cap_simple_wp[where cap = RestartCap])[1] - apply (rule decode_invocation_allow_error) - apply (rule lookup_extra_caps_exec) - apply (rule lookup_cap_and_slot_exec) - apply (unfold validE_R_def) - apply (rule hoare_post_impErr) - apply (rule lookup_cap_and_slot_exec) + apply (simp add:call_kernel_loop_def) + apply (rule_tac P = "thread_ptr = root_tcb_id" in hoare_gen_asm) + apply (rule_tac Q = "\r s. (cdl_current_thread s = Some root_tcb_id + \ cdl_current_domain s = minBound) \ + (\tcb_has_error (the (cdl_current_thread s)) s \ Q s) \ + (tcb_has_error (the (cdl_current_thread s)) s \ Perror s)" + in hoare_strengthen_post[rotated]) + apply (fastforce simp: error_imp) + apply wp (* fragile , do not know why *) + apply (rule hoare_vcg_imp_lift[where P' = "\s. cdl_current_thread s \ Some root_tcb_id + \ cdl_current_domain s \ minBound"]) + apply (rule hoare_pre,(wp hoare_vcg_imp_lift|wpc|simp cong: if_cong)+)[1] + apply (wp | wpc | simp)+ + apply (rule hoare_pre_cont) + apply (wp has_restart_cap_sep_wp[where cap = RunningCap])+ apply simp - apply simp - apply ((wp corrupt_ipc_buffer_sep_inv corrupt_ipc_buffer_active_tcbs - mark_tcb_intent_error_hold corrupt_ipc_buffer_hold | simp)+)[4] - apply (rule hoare_post_impErr[OF perform_invocation_hold]) - apply (fastforce simp:sep_state_projection_def sep_any_def - sep_map_c_def sep_conj_def) - apply simp - apply (wp set_restart_cap_hold) - apply (clarsimp dest!:error_imp) - apply (sep_cancel, simp) - apply (clarsimp simp: isLeft_def) - apply (rule_tac P = "thread_ptr = root_tcb_id" in hoare_gen_asm) - apply simp - apply (wp upd_thread update_thread_wp)+ - apply (clarsimp) - apply (clarsimp simp:sep_map_c_conj sep_map_f_conj object_at_def - object_project_def sep_state_projection_def - split:option.splits cdl_object.split) + apply (rule_tac current_thread1=root_tcb_id and current_domain1=minBound in + hoare_strengthen_post[OF schedule_no_choice_wp]) + apply (clarsimp, assumption) + apply clarsimp + apply (rule_tac Q = + "\r a. (\ tcb_has_error root_tcb_id a \ (Q a + \ cdl_current_thread a = Some root_tcb_id + \ cdl_current_domain a = minBound + \ <(root_tcb_id, tcb_pending_op_slot) \c RunningCap \* (\s. True)> a)) + \ (tcb_has_error root_tcb_id a \ (Perror a + \ cdl_current_thread a = Some root_tcb_id + \ cdl_current_domain a = minBound + \ <(root_tcb_id, tcb_pending_op_slot) \c RunningCap \* (\s. True)> a))" + in hoare_strengthen_post[rotated]) + apply fastforce + apply (rule whileLoop_wp[where + I = "\rv s. case rv of Inl _ \ + (tcb_at' + (\tcb. cdl_intent_op (cdl_tcb_intent tcb) = Some intent_op \ + cdl_intent_cap (cdl_tcb_intent tcb) = cdl_intent_cap intent \ + cdl_intent_extras (cdl_tcb_intent tcb) = cdl_intent_extras intent \ + cdl_intent_error (cdl_tcb_intent tcb) = False) + root_tcb_id s \ + cdl_current_thread s = Some root_tcb_id \ + cdl_current_domain s = minBound \ Pd2 s) + | Inr rv \ Q (Inr rv) s" and Q=Q for Q, rotated]) + apply (case_tac r, simp_all)[1] + apply (simp add: validE_def[symmetric]) + apply (wp handle_pending_interrupts_no_ntf_cap) + apply (rule handle_event_syscall_allow_error + [where cur_thread = root_tcb_id + and intent_op = intent_op + and intent_cptr = intent_cptr + and intent_extra = intent_extra]) + apply (wp set_cap_wp + set_cap_hold delete_cap_simple_wp[where cap = RestartCap])[1] + apply (rule decode_invocation_allow_error) + apply (rule lookup_extra_caps_exec) + apply (rule lookup_cap_and_slot_exec) + apply (unfold validE_R_def) + apply (rule hoare_strengthen_postE) + apply (rule lookup_cap_and_slot_exec) + apply simp + apply simp + apply ((wp corrupt_ipc_buffer_sep_inv corrupt_ipc_buffer_active_tcbs + mark_tcb_intent_error_hold corrupt_ipc_buffer_hold | simp)+)[4] + apply (rule hoare_strengthen_postE[OF perform_invocation_hold]) + apply (fastforce simp:sep_state_projection_def sep_any_def sep_map_c_def sep_conj_def) + apply simp + apply (wp set_restart_cap_hold) + apply (clarsimp dest!:error_imp) + apply (sep_cancel, simp) + apply (clarsimp split: sum.split_asm) + apply (rule_tac P = "thread_ptr = root_tcb_id" in hoare_gen_asm) + apply simp + apply (wp upd_thread update_thread_wp)+ + apply (clarsimp simp: sep_map_c_conj sep_map_f_conj object_at_def + object_project_def sep_state_projection_def + split:option.splits cdl_object.split) apply (case_tac z) - apply (clarsimp dest!:arg_cong[where f= object_type],simp add:object_type_def)+ + apply (clarsimp dest!:arg_cong[where f= object_type],simp add:object_type_def)+ done definition @@ -1137,13 +1115,8 @@ lemma invoke_cnode_insert_cap': apply (simp add:validE_def) apply (rule hoare_name_pre_state) apply (clarsimp simp:invoke_cnode_def liftE_bindE validE_def[symmetric]) - apply (rule alternative_valid) - apply (rule hoare_pre) - apply (rule insert_cap_sibling_wp) - apply (simp add:cap_of_insert_call_def) - apply (rule hoare_pre) - apply (rule insert_cap_child_wp) - apply (simp add:cap_of_insert_call_def) + apply (wpsimp wp: insert_cap_sibling_wp insert_cap_child_wp + simp: cap_of_insert_call_def) done lemma object_to_sep_state_slot: @@ -1164,13 +1137,13 @@ lemma sep_map_c_asid_reset: apply clarsimp apply (case_tac "\ has_slots obj") apply simp - apply (rule_tac x = "update_slots (object_slots obj(snd ptr \ cap')) obj" + apply (rule_tac x = "update_slots ((object_slots obj)(snd ptr \ cap')) obj" in exI) apply (simp add:sep_map_general_def object_to_sep_state_slot) apply clarsimp apply (case_tac "\ has_slots obj") apply simp - apply (rule_tac x = "update_slots (object_slots obj(snd ptr \ cap)) obj" + apply (rule_tac x = "update_slots ((object_slots obj)(snd ptr \ cap)) obj" in exI) apply (simp add:sep_map_general_def object_to_sep_state_slot) done diff --git a/proof/capDL-api/KHeap_DP.thy b/proof/capDL-api/KHeap_DP.thy index d7755622af..69fc145073 100644 --- a/proof/capDL-api/KHeap_DP.thy +++ b/proof/capDL-api/KHeap_DP.thy @@ -343,7 +343,7 @@ lemma decode_tcb_invocation: "\P\decode_tcb_invocation cap cap_ref caps (TcbWriteRegistersIntent resume flags count regs) \\_. P\" apply (clarsimp simp: decode_tcb_invocation_def) -apply (wp alternative_wp) +apply wp apply (clarsimp) done @@ -373,7 +373,7 @@ lemma invoke_cnode_insert_wp: \\_. c cap \* R>\" apply (rule hoare_gen_asm) apply (clarsimp simp: invoke_cnode_def) - apply (wp insert_cap_sibling_wp insert_cap_child_wp alternative_wp) + apply (wp insert_cap_sibling_wp insert_cap_child_wp) apply (clarsimp) done @@ -441,7 +441,7 @@ lemma lookup_slot_for_cnode_op_wp [wp]: apply (clarsimp simp: fault_to_except_def) apply (wp) apply (clarsimp simp: gets_the_resolve_cap[symmetric]) - apply (wp gets_the_wpE hoare_whenE_wp)+ + apply (wp gets_the_wpE whenE_wp)+ apply (clarsimp split: option.splits sum.splits) done @@ -453,7 +453,7 @@ lemma lookup_slot_for_cnode_op_wpE: apply (wp) apply (clarsimp simp: gets_the_resolve_cap[symmetric]) apply (clarsimp simp: fault_to_except_def) - apply (wp gets_the_wpE hoare_whenE_wp)+ + apply (wp gets_the_wpE whenE_wp)+ apply (clarsimp split: option.splits split: sum.splits) done @@ -526,7 +526,7 @@ lemma lookup_slot_for_cnode_op_rv': lookup_slot_for_cnode_op cnode_cap cap_ptr remaining_size \\rv s. Q rv s\,-" apply (clarsimp simp: lookup_slot_for_cnode_op_def gets_the_resolve_cap[symmetric] split_def fault_to_except_def) - apply (wp resolve_cap_rv1 hoare_whenE_wp) + apply (wp resolve_cap_rv1 whenE_wp) apply (fastforce) done @@ -559,7 +559,7 @@ lemma lookup_slot_for_cnode_op_rvu': lookup_slot_for_cnode_op cnode_cap cap_ptr remaining_size \Q\,\Q'\" apply (clarsimp simp: lookup_slot_for_cnode_op_def gets_the_resolve_cap[symmetric] split_def fault_to_except_def) - apply (wp resolve_cap_u_nf[where r=r and R=R and cap=cap] hoare_whenE_wp) + apply (wp resolve_cap_u_nf[where r=r and R=R and cap=cap] whenE_wp) apply (clarsimp simp add:user_pointer_at_def Let_def guard_equal_def cap_guard_reset_cap_asid one_lvl_lookup_def) done @@ -580,26 +580,26 @@ lemma derive_cap_rv: derive_cap slot cap \\rv s. P s \ ( rv = cap \ rv = NullCap )\, \\_ _. True\" apply (clarsimp simp: derive_cap_def returnOk_def split: cdl_cap.splits,safe) - apply (wp return_rv hoare_whenE_wp alternativeE_wp | clarsimp simp: ensure_no_children_def)+ + apply (wp return_rv whenE_wp | clarsimp simp: ensure_no_children_def)+ done lemma derive_cap_wp [wp]: "\P\ derive_cap slot cap \\_. P\" apply (clarsimp simp: derive_cap_def returnOk_def split: cdl_cap.splits) apply (safe) - apply ((wp alternative_wp hoare_whenE_wp)|(clarsimp simp: ensure_no_children_def))+ + apply ((wp whenE_wp)|(clarsimp simp: ensure_no_children_def))+ done lemma derive_cap_wpE: "\P\ derive_cap slot cap \\_.P\,\\_.P\" apply (clarsimp simp: derive_cap_def) - apply (case_tac cap, (wp hoare_whenE_wp alternative_wp | + apply (case_tac cap, (wp whenE_wp | simp add: ensure_no_children_def)+) done lemma derive_cap_wp2: "\P\ derive_cap slot cap \\rv s. if rv = NullCap then True else P s\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (wp (once) derive_cap_wpE) apply (clarsimp) done @@ -616,7 +616,7 @@ lemma decode_cnode_copy_wp: "\P\ decode_cnode_invocation target target_ref caps (CNodeCopyIntent dest_index dest_depth src_index src_depth rights) \ \_. P \,\\_. P\" apply (clarsimp simp: decode_cnode_invocation_def split_def) - apply (wp hoare_whenE_wp hoare_drop_imps | simp cong: if_cong)+ + apply (wp whenE_wp hoare_drop_imps | simp cong: if_cong)+ done lemma ensure_empty_wp [wp]: "\P\ ensure_empty slot \\_. P\" @@ -624,7 +624,7 @@ lemma ensure_empty_wp [wp]: "\P\ ensure_empty slot \\P\ ensure_no_children slot \\_. P\" apply (clarsimp simp: ensure_no_children_def) - apply (wp hoare_whenE_wp) + apply (wp whenE_wp) apply (clarsimp) done @@ -710,7 +710,7 @@ lemma derive_cap_invE: "\P (derived_cap cap) and Q\ derive_cap slot cap \P\, \\r. Q\" apply (simp add:derive_cap_def) apply (rule hoare_pre) - apply (wp alternative_wp alternativeE_wp|wpc|simp)+ + apply (wp|wpc|simp)+ apply (auto simp:derived_cap_def) done @@ -759,7 +759,7 @@ lemma decode_cnode_move_rvu: crunch preserve [wp]: decode_cnode_invocation "P" -(wp: derive_cap_wpE unlessE_wp hoare_whenE_wp select_wp hoare_drop_imps simp: if_apply_def2 throw_on_none_def) + (wp: derive_cap_wpE unlessE_wp whenE_wp hoare_drop_imps simp: if_apply_def2 throw_on_none_def) lemma decode_invocation_wp: "\P\ decode_invocation (CNodeCap x y z sz) ref caps (CNodeIntent intent) \\_. P\, -" @@ -792,7 +792,7 @@ lemma delete_cap_simple_wp: delete_cap_simple ptr \\_. < ptr \c NullCap \* R>\" apply (clarsimp simp: delete_cap_simple_def is_final_cap_def) - apply (wp hoare_unless_wp always_empty_wp fast_finalise_cap_non_ep_wp) + apply (wp unless_wp always_empty_wp fast_finalise_cap_non_ep_wp) apply clarsimp apply (frule opt_cap_sep_imp) apply (clarsimp, rule conjI) @@ -832,7 +832,7 @@ lemma lookup_slot_rvu: lookup_slot thread cap_ptr \Q\, \Q'\ " apply (clarsimp simp: lookup_slot_def gets_the_resolve_cap[symmetric] split_def) - apply (rule hoare_vcg_seqE)+ + apply (rule bindE_wp)+ apply (rule returnOk_wp) apply (rule resolve_cap_u_nf [where r=r]) apply (rule hoare_pre, wp) @@ -862,14 +862,9 @@ lemma lookup_cap_rvu : done lemma lookup_cap_wp: - "\P\ - lookup_cap thread cap_ptr - \\_. P\, \\_ .P \ " - apply (clarsimp simp: lookup_cap_def) - apply (wp lookup_slot_wp get_cap_wp) - apply (clarsimp) - apply (wp lookup_slot_wp) - apply assumption + "\P\ lookup_cap thread cap_ptr \\_. P\, \\_ .P \ " + apply (clarsimp simp: lookup_cap_def) + apply (wp lookup_slot_wp get_cap_wp) done @@ -881,7 +876,7 @@ lemma lookup_cap_and_slot_rvu: lookup_cap_and_slot thread cap_ptr \Q\, \Q'\ " apply (clarsimp simp: lookup_cap_and_slot_def) - apply (rule hoare_vcg_seqE)+ + apply (rule bindE_wp)+ apply (rule returnOk_wp) apply (wp get_cap_rv) apply (rule hoare_pre, wp lookup_slot_rvu) @@ -1119,9 +1114,9 @@ lemma get_thread_sep_wp: done lemma get_thread_inv: -"\ Q \ - get_thread thread \\t s. Q s\" - by (simp add:get_thread_def | wp | wpc)+ + "\ Q \ get_thread thread \\t s. Q s\" + unfolding get_thread_def + by wpsimp lemma get_thread_sep_wp_precise: "\\s. tcb_at' (\tcb. Q tcb s) thread s \ @@ -1145,8 +1140,7 @@ lemma has_restart_cap_sep_wp: \\rv. Q rv\" apply (rule hoare_name_pre_state) apply (clarsimp simp: object_at_def) - apply (simp add: object_at_def get_thread_def has_restart_cap_def - | wp+ | wpc | intro conjI)+ + apply (wpsimp simp: object_at_def get_thread_def has_restart_cap_def | intro conjI)+ apply (clarsimp dest!: opt_cap_sep_imp simp: opt_cap_def slots_of_def) apply (clarsimp simp: object_slots_def) @@ -1157,14 +1151,14 @@ lemma has_restart_cap_sep_wp: lemma lift_do_kernel_op': "\\s'. P s'\ f \\_ s'. Q s'\ \ \\s. P (kernel_state s)\ do_kernel_op f \\_ s. Q (kernel_state s)\" apply (simp add: do_kernel_op_def split_def) - apply (wp select_wp) + apply wp apply (simp add: valid_def split_def) done lemma lift_do_kernel_op: "\\s. s = s'\ f \\_ s. s = s'\ \ \\s. (kernel_state s) = s'\ do_kernel_op f \\_ s. (kernel_state s) = s'\" apply (simp add: do_kernel_op_def split_def) - apply (wp select_wp) + apply wp apply (simp add: valid_def split_def) done @@ -1185,7 +1179,7 @@ lemma schedule_no_choice_wp: schedule \\r s. cdl_current_thread s = Some current_thread \ cdl_current_domain s = current_domain \ P s\" apply (simp add:schedule_def switch_to_thread_def change_current_domain_def) - apply (wp alternative_wp select_wp) + apply wp apply (case_tac s,clarsimp) done diff --git a/proof/capDL-api/ProofHelpers_DP.thy b/proof/capDL-api/ProofHelpers_DP.thy index bf1609f26d..2a9aa2dcd1 100644 --- a/proof/capDL-api/ProofHelpers_DP.thy +++ b/proof/capDL-api/ProofHelpers_DP.thy @@ -11,7 +11,7 @@ imports begin crunch_ignore (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE diff --git a/proof/capDL-api/README.md b/proof/capDL-api/README.md index dbea447f71..92a8a16313 100644 --- a/proof/capDL-api/README.md +++ b/proof/capDL-api/README.md @@ -24,13 +24,12 @@ and Andrew Boyton's PhD thesis. Building -------- -To build from the `l4v/` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - ./isabelle/bin/isabelle build -d . -v -b DSpecProofs + L4V_ARCH=ARM ./run_tests DSpecProofs Important Theories ------------------ The top-level theory is [`API_DP`](API_DP.thy). The seL4 API and kernel model are located in [`Kernel_DP`](Kernel_DP.thy). - diff --git a/proof/capDL-api/Retype_DP.thy b/proof/capDL-api/Retype_DP.thy index 485d0eba3e..e85991fe9a 100644 --- a/proof/capDL-api/Retype_DP.thy +++ b/proof/capDL-api/Retype_DP.thy @@ -50,7 +50,7 @@ lemma create_objects_mapM_x': qed crunch inv[wp]: generate_object_ids P -(wp:crunch_wps select_wp) + (wp: crunch_wps) lemma pick_rev: assumes "target_object_ids = map (\x. {x}) ids" @@ -111,7 +111,7 @@ lemma generate_object_ids_rv: \\r s. r = map (\x. {x}) (map pick r) \ length r = n \ set (map pick r) \ obj_range \ distinct (map pick r) \" apply (clarsimp simp:generate_object_ids_def) - apply (wp select_wp) + apply wp apply clarsimp apply (simp add: distinct_map) apply (intro conjI) @@ -185,7 +185,7 @@ lemma update_available_range_wp: apply (rule_tac x = new_range in exI) apply (intro conjI,assumption+) apply (sep_select 2,assumption) - apply (wp select_wp) + apply wp apply clarsimp+ done @@ -250,22 +250,22 @@ lemma reset_untyped_cap_wp: cdl.lift (uref \c UntypedCap dev obj_range fr \* (\*ptr\tot_free_range. ptr \o Untyped) \* P) s\,-" apply (simp add:reset_untyped_cap_def bind_assoc bindE_assoc) apply (rule hoare_pre) - apply (wp hoare_whenE_wp) + apply (wp whenE_wp) apply (rule_tac P = "\fr. cap = UntypedCap dev obj_range fr - \ (\fr\ set x. free_range \ fr \ fr \ obj_range)" in hoare_gen_asmE) + \ (\fr\ set rv. free_range \ fr \ fr \ obj_range)" in hoare_gen_asmE) apply clarsimp - apply (wp hoare_whenE_wp mapME_x_wp alternativeE_wp) + apply (wp whenE_wp mapME_x_wp) apply (rule ballI) apply (rule hoare_pre) - apply (wp alternative_wp) + apply wp apply simp apply (rule hoare_post_imp[OF _ set_cap_wp]) apply clarsimp - apply (rule_tac x = xa in exI) + apply (rule_tac x = x in exI) apply ((rule conjI, fastforce)+, sep_solve) apply clarsimp apply sep_solve - apply (wp select_wp | clarsimp)+ + apply (wp | clarsimp)+ apply (subst dummy_detype_if_untyped) apply simp apply (sep_select_asm 2) @@ -292,12 +292,12 @@ lemma reset_untyped_cap_wp: done crunch cdl_current_domain[wp]: reset_untyped_cap "\s. P (cdl_current_domain s)" -(wp:select_wp mapM_x_wp' mapME_x_inv_wp alternativeE_wp crunch_wps hoare_unless_wp - simp: detype_def crunch_simps) + (wp: mapM_x_wp' mapME_x_inv_wp crunch_wps unless_wp + simp: detype_def crunch_simps) crunch cdl_current_domain[wp]: invoke_untyped "\s. P (cdl_current_domain s)" -(wp: select_wp mapM_x_wp' mapME_x_inv_wp alternativeE_wp crunch_wps hoare_unless_wp - simp: detype_def crunch_simps validE_E_def) + (wp: mapM_x_wp' mapME_x_inv_wp crunch_wps unless_wp + simp: detype_def crunch_simps validE_E_def) lemma invoke_untyped_wp: "\ K (default_object nt ts minBound = Some obj \ nt \ UntypedType @@ -371,7 +371,7 @@ lemma invoke_untyped_wp: apply (drule(1) subset_trans[rotated],fastforce)+ apply (wp reset_untyped_cap_wp unlessE_wp| simp)+ apply (wp hoare_drop_impE_R) - apply (erule hoare_post_imp_R[OF reset_untyped_cap_wp + apply (erule hoare_strengthen_postE_R[OF reset_untyped_cap_wp [where free_range = free_range and obj_range = obj_range]]) apply clarsimp apply (rule conjI) @@ -416,7 +416,7 @@ lemma decode_untyped_invocation_rvu: get_index_def throw_on_none_def decode_untyped_invocation_def mapME_x_singleton) apply (rule hoare_pre) - apply (wp alternativeE_wp unlessE_wp + apply (wp unlessE_wp lookup_slot_for_cnode_op_rvu' | wpc | clarsimp)+ done @@ -432,25 +432,20 @@ lemma set_parent_has_children[wp]: lemma create_cap_has_children[wp]: "\\\ create_cap new_type sz uref slot dev \\r. has_children uref\" apply (clarsimp simp :create_cap_def split_def) - apply wp - apply simp + apply wpsimp done abbreviation (input) "retype_with_kids uinv \ (case uinv of (InvokeUntyped (Retype uref nt ts dest has_kids n)) \ has_kids)" -crunch cdt[wp]: retype_region "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) +crunches retype_region, update_available_range + for cdt[wp]: "\s. P (cdl_cdt s)" + (simp: crunch_simps corrupt_intents_def) -crunch has_children[wp]: retype_region "has_children slot" -(wp:select_wp simp:crunch_simps corrupt_intents_def simp:has_children_def is_cdt_parent_def) - -crunch cdt[wp]: update_available_range "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) - -crunch has_children[wp]: update_available_range "has_children slot" -(wp:select_wp simp:crunch_simps corrupt_intents_def simp:has_children_def is_cdt_parent_def) +crunches retype_region, update_available_range + for has_children[wp]: "has_children slot" + (simp: crunch_simps corrupt_intents_def has_children_def is_cdt_parent_def) lemma invoke_untyped_one_has_children: "uinv = (Retype uref nt ts [slot] has_kids (Suc 0)) @@ -485,7 +480,7 @@ lemma invoke_untyped_exception: apply (rule hoare_name_pre_stateE) apply (cases uinv) apply clarsimp - apply (wp unlessE_wp alternative_wp + apply (wp unlessE_wp | wpc | simp add: reset_untyped_cap_def)+ apply (rule_tac P = "available_range cap = cap_objects cap" in hoare_gen_asmEx) apply (simp add: whenE_def) @@ -517,7 +512,7 @@ lemma invoke_untyped_one_wp: \* P > s) \, -" apply simp apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule invoke_untyped_wp [where free_range = free_range and obj_range = obj_range and tot_free_range = tot_free_range and obj = obj and P = P]) @@ -533,21 +528,14 @@ lemma invoke_untyped_one_wp: lemma mark_tcb_intent_error_has_children[wp]: "\\s. P (has_children ptr s)\ - mark_tcb_intent_error cur_thread b - \\rv s. P (has_children ptr s)\" - apply (simp add:has_children_def is_cdt_parent_def - mark_tcb_intent_error_def update_thread_def - set_object_def | wp | wpc)+ - done - -crunch cdt[wp]: corrupt_frame "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) - -crunch cdt[wp]: corrupt_tcb_intent "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) + mark_tcb_intent_error cur_thread b + \\rv s. P (has_children ptr s)\" + by (wpsimp simp: has_children_def is_cdt_parent_def mark_tcb_intent_error_def update_thread_def + set_object_def) -crunch cdt[wp]: corrupt_ipc_buffer "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) +crunches corrupt_frame, corrupt_tcb_intent, corrupt_ipc_buffer + for cdt[wp]: "\s. P (cdl_cdt s)" + (simp: crunch_simps corrupt_intents_def) lemma corrupt_ipc_buffer_has_children[wp]: "\\s. P (has_children ptr s)\ @@ -624,7 +612,7 @@ lemma seL4_Untyped_Retype_sep: apply clarsimp apply (rule hoare_vcg_E_elim[where P = P and P' = P for P,simplified,rotated]) apply wp - apply (rule hoare_post_imp_R[OF hoare_vcg_conj_lift_R]) + apply (rule hoare_strengthen_postE_R[OF hoare_vcg_conj_lift_R]) apply (rule invoke_untyped_one_has_children) apply fastforce apply (rule_tac P = "P1 \* P2" for P1 P2 in @@ -706,10 +694,10 @@ lemma seL4_Untyped_Retype_sep: **********************************************************************) crunch cdt_inc[wp]: schedule "\s. cdl_cdt s child = parent" -(wp:select_wp alternative_wp crunch_wps simp:crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch cdt_inc[wp]: handle_pending_interrupts "\s. cdl_cdt s child = parent" -(wp:select_wp alternative_wp simp:crunch_simps) + (wp: simp: crunch_simps) lemmas gets_the_resolve_cap_sym = gets_the_resolve_cap[symmetric] @@ -793,12 +781,12 @@ lemma invoke_untyped_cdt_inc[wp]: apply (wp mapM_x_wp[OF _ subset_refl]) apply (simp add:create_cap_def) apply (rule hoare_pre) - apply (wp set_parent_other hoare_unless_wp unlessE_wp + apply (wp set_parent_other unless_wp unlessE_wp | wpc | simp)+ apply (simp add: reset_untyped_cap_def validE_def sum.case_eq_if) apply (rule_tac Q = "\r s. cdl_cdt s child = Some parent" in hoare_post_imp) apply simp - apply (wp hoare_whenE_wp alternativeE_wp mapME_x_inv_wp select_wp | simp)+ + apply (wp whenE_wp mapME_x_inv_wp | simp)+ apply (clarsimp simp:detype_def) done @@ -842,10 +830,9 @@ lemma lookup_cap_rvu': done crunch cdl_current_thread [wp]: handle_pending_interrupts "\s. P (cdl_current_thread s)" -(wp: alternative_wp select_wp) crunch cdl_current_thread [wp]: lookup_cap "\s. P (cdl_current_thread s)" -(wp: alternative_wp select_wp hoare_drop_imps) + (wp: hoare_drop_imps) lemma throw_opt_wp_valid: "\P\ throw_opt err x \\r. P\" @@ -871,12 +858,10 @@ lemma update_thread_no_pending: K(\x. (case cdl_tcb_caps x tcb_pending_op_slot of Some cap \ \ is_pending_cap cap | _ \ True)\ (case cdl_tcb_caps (t x) tcb_pending_op_slot of Some cap \ \ is_pending_cap cap | _ \ True))\ update_thread thread_ptr t \\rv. no_pending\" - apply (simp add: update_thread_def set_object_def | (wp modify_wp)+ | wpc)+ - apply (clarsimp simp: no_pending_def) - apply (drule_tac x = oid in spec) - apply (clarsimp simp: opt_cap_def slots_of_def - object_slots_def - split: if_splits option.splits) + unfolding update_thread_def set_object_def + apply wpsimp + apply (fastforce simp: opt_cap_def slots_of_def object_slots_def no_pending_def + split: if_splits option.splits) done lemma update_thread_tcb_at: @@ -896,10 +881,10 @@ lemma corrupt_intents_no_pending: done crunch no_pending[wp]: corrupt_ipc_buffer no_pending - (wp: crunch_wps select_wp update_thread_no_pending corrupt_intents_no_pending) + (wp: crunch_wps update_thread_no_pending corrupt_intents_no_pending) crunch no_pending[wp]: mark_tcb_intent_error no_pending - (wp: crunch_wps select_wp update_thread_no_pending corrupt_intents_no_pending) + (wp: crunch_wps update_thread_no_pending corrupt_intents_no_pending) lemma detype_one_wp: "o - \* R> s @@ -960,11 +945,11 @@ lemma invoke_untyped_preempt: sep_map_set_conj sep_any_map_o obj_range \* Q) s\" apply (simp add: invoke_untyped_def) apply (wp unlessE_wp) - apply (simp add: reset_untyped_cap_def whenE_liftE | wp hoare_whenE_wp alternative_wp)+ + apply (simp add: reset_untyped_cap_def whenE_liftE | wp whenE_wp)+ apply (rule_tac P = "\a. cap = UntypedCap dev obj_range a" in hoare_gen_asmEx) - apply (rule hoare_post_impErr[where E = E and F = E for E]) + apply (rule hoare_strengthen_postE[where E = E and F = E for E]) apply (rule mapME_x_inv_wp[where P = P and E = "\r. P" for P]) - apply (wp alternative_wp) + apply wp apply simp apply (wp hoare_vcg_ex_lift) apply (rule hoare_post_imp[OF _ set_cap_wp]) @@ -974,7 +959,7 @@ lemma invoke_untyped_preempt: apply sep_solve apply simp apply simp - apply (wp select_wp)+ + apply wp+ apply clarsimp apply (frule opt_cap_sep_imp) apply (clarsimp dest!: reset_cap_asid_untyped_cap_eqD) @@ -1009,14 +994,13 @@ lemma set_parent_cdl_parent: done crunch cdl_parent[wp]: reset_untyped_cap "\s. cdl_cdt s slot = Some parent" - (wp: assert_inv crunch_wps select_wp mapME_x_inv_wp alternative_wp -simp: crunch_simps detype_def) + (wp: assert_inv crunch_wps mapME_x_inv_wp + simp: crunch_simps detype_def) crunch cdl_parent[wp]: insert_cap_child, corrupt_ipc_buffer, corrupt_tcb_intent, update_thread, derive_cap, insert_cap_sibling "\s. cdl_cdt s slot = Some parent" - (wp: crunch_wps select_wp set_parent_cdl_parent simp: crunch_simps -corrupt_intents_def) + (wp: crunch_wps set_parent_cdl_parent simp: crunch_simps corrupt_intents_def) lemma transfer_caps_loop_cdl_parent: "\\s. cdl_cdt s slot = Some parent\ @@ -1024,7 +1008,7 @@ lemma transfer_caps_loop_cdl_parent: \\_ s. cdl_cdt s slot = Some parent\" apply (induct caps arbitrary: dest; clarsimp split del: if_split) apply (rule hoare_pre) - apply (wp alternative_wp crunch_wps | assumption + apply (wp crunch_wps | assumption | simp add: crunch_simps split del: if_split)+ done @@ -1058,7 +1042,7 @@ lemma set_cap_no_pending[wp]: \no_pending\ set_cap slot cap \\rv s. no_pending s\" apply (simp add: set_cap_def) apply (cases slot, simp) - apply (wp set_object_no_pending select_wp | wpc | simp add: no_pending_def)+ + apply (wp set_object_no_pending | wpc | simp add: no_pending_def)+ apply (drule_tac x = a in spec) apply (rule conjI) apply (clarsimp simp: tcb_pending_op_slot_def tcb_ipcbuffer_slot_def) @@ -1107,10 +1091,9 @@ lemma is_pending_cap_set_available_range[simp]: lemma reset_untyped_cap_no_pending[wp]: "\no_pending \ reset_untyped_cap cref \\rv. no_pending\" apply (simp add: reset_untyped_cap_def) - apply (wp hoare_whenE_wp) - apply (rule_tac P = "snd cref = tcb_pending_op_slot \ \ is_pending_cap cap" in hoare_gen_asmEx) - apply (wp mapME_x_inv_wp alternativeE_wp | simp)+ - apply (wp select_wp)+ + apply (wp whenE_wp) + apply (rule_tac P = "snd cref = tcb_pending_op_slot \ \ is_pending_cap cap" in hoare_gen_asmEx) + apply (wp mapME_x_inv_wp | simp)+ apply (clarsimp simp: detype_no_pending) apply (cases cref, clarsimp simp: no_pending_def) done @@ -1161,11 +1144,11 @@ lemma reset_untyped_cap_not_pending_cap[wp]: reset_untyped_cap cref \\rv s. (\cap. opt_cap cref s = Some cap) \ \ is_pending_cap (the (opt_cap cref s))\" apply (simp add: reset_untyped_cap_def) - apply (wp hoare_whenE_wp) + apply (wp whenE_wp) apply (rule_tac P = " \ is_pending_cap cap" in hoare_gen_asmEx) - apply (wp mapME_x_inv_wp alternativeE_wp set_cap_opt_cap)+ + apply (wp mapME_x_inv_wp set_cap_opt_cap)+ apply simp - apply (wp select_wp)+ + apply wp+ apply (clarsimp simp: detype_no_pending) apply (cases cref) apply (clarsimp simp: detype_def opt_cap_def slots_of_def object_slots_def @@ -1177,13 +1160,12 @@ lemma invoke_untyped_no_pending[wp]: invoke_untyped (Retype ref a b c d e) \\rv. no_pending\" apply (simp add: invoke_untyped_def create_cap_def) - apply (wpsimp wp: mapM_x_wp' set_cap_no_pending_asm_in_pre get_cap_wp select_wp - simp: update_available_range_def - )+ + apply (wpsimp wp: mapM_x_wp' set_cap_no_pending_asm_in_pre get_cap_wp + simp: update_available_range_def) apply (wp (once) hoare_drop_imps) - apply (wpsimp split_del: if_split)+ + apply (wpsimp split_del: if_split)+ apply (rule_tac Q' = "\r s. no_pending s \ ((\y. opt_cap ref s = Some y) \ - \ is_pending_cap (the (opt_cap ref s)))" in hoare_post_imp_R) + \ is_pending_cap (the (opt_cap ref s)))" in hoare_strengthen_postE_R) apply (wp reset_untyped_cap_no_pending) apply simp apply auto @@ -1240,7 +1222,7 @@ lemma seL4_Untyped_Retype_inc_no_preempt: apply clarsimp apply (rule hoare_vcg_E_elim[where P = P and P' = P for P,simplified,rotated]) apply wp - apply (rule hoare_post_imp_R[OF hoare_vcg_conj_lift_R]) + apply (rule hoare_strengthen_postE_R[OF hoare_vcg_conj_lift_R]) apply (rule valid_validE_R) apply (rule invoke_untyped_cdt_inc) apply (rule_tac P = "P1 \* P2" for P1 P2 in diff --git a/proof/capDL-api/TCB_DP.thy b/proof/capDL-api/TCB_DP.thy index 49202dc48d..edfcd78008 100644 --- a/proof/capDL-api/TCB_DP.thy +++ b/proof/capDL-api/TCB_DP.thy @@ -51,7 +51,7 @@ lemma restart_wp: restart tcb \\_. < (tcb,tcb_pending_op_slot) \c cap \* R > \" apply (clarsimp simp: restart_def) - apply (wp alternative_wp) + apply wp apply (wp set_cap_wp[sep_wand_wp])+ apply (clarsimp) apply (rule hoare_pre_cont) @@ -70,7 +70,7 @@ lemma invoke_tcb_write: invoke_tcb (WriteRegisters tcb x y z) \\_. < (tcb,tcb_pending_op_slot) \c cap \* R >\" apply (clarsimp simp: invoke_tcb_def) - apply (wp alternative_wp restart_wp | simp)+ + apply (wp restart_wp | simp)+ done lemma not_memory_cap_reset_asid: @@ -93,16 +93,15 @@ lemma tcb_update_thread_slot_wp: apply (clarsimp simp: tcb_update_thread_slot_def) apply (rule hoare_name_pre_state) apply (clarsimp) - apply (wp) - apply (wp alternative_wp) - apply (wp insert_cap_child_wp) - apply (wp insert_cap_sibling_wp get_cap_rv)+ + apply wp + apply (wp insert_cap_child_wp) + apply (wp insert_cap_sibling_wp get_cap_rv)+ apply (safe) apply (sep_solve) apply (drule not_memory_cap_reset_asid') apply (clarsimp simp: is_memory_cap_def split:cdl_cap.splits) apply (clarsimp) -done + done lemma tcb_empty_thread_slot_wp: "\<(target_tcb,slot) \c NullCap \* R>\ tcb_empty_thread_slot target_tcb slot \\_. <(target_tcb,slot) \c NullCap \* R>\ " apply (simp add:tcb_empty_thread_slot_def whenE_def | wp)+ @@ -128,7 +127,7 @@ lemma tcb_update_ipc_buffer_wp: \\_. <(target_tcb, tcb_ipcbuffer_slot) \c ipc_buffer_cap \* tcb_cap_slot \c (TcbCap target_tcb) \* (ipc_buffer_slot) \c cap \* R>\, \E\" apply (clarsimp simp: tcb_update_ipc_buffer_def sep_any_All) apply (rule hoare_name_pre_stateE) - apply (wp hoare_whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE]) + apply (wp whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE]) apply (clarsimp) apply (wp get_cap_rv'[where cap=cap]) apply (clarsimp) @@ -148,7 +147,7 @@ lemma tcb_update_ipc_buffer_wp': \\_. <(target_tcb, tcb_ipcbuffer_slot) \c ipc_buffer_cap \* tcb_cap_slot \c (TcbCap target_tcb) \* (ipc_buffer_slot) \c cap \* R>\, \E\" apply (rule hoare_name_pre_stateE) apply (clarsimp simp: tcb_update_ipc_buffer_def sep_any_All) - apply (wp hoare_whenE_wp tcb_update_thread_slot_wp[sep_wandise] get_cap_rv[where cap=cap]) + apply (wp whenE_wp tcb_update_thread_slot_wp[sep_wandise] get_cap_rv[where cap=cap]) apply (rule hoare_allI) apply (rule hoare_impI) apply (clarsimp) @@ -172,7 +171,7 @@ lemma tcb_update_vspace_root_wp: \\_. < (target_tcb, tcb_vspace_slot) \c vrt_cap \* tcb_cap_slot \c (TcbCap target_tcb) \* (vrt_slot) \c cap \* R>\, \E\" apply (rule hoare_name_pre_stateE) apply (clarsimp simp: tcb_update_vspace_root_def sep_any_All) - apply (wp hoare_whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE] get_cap_rv) + apply (wp whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE] get_cap_rv) apply (wp get_cap_rv'[where cap=cap]) apply (clarsimp) apply (wp tcb_empty_thread_slot_wpE[sep_wand_wpE]) @@ -188,7 +187,7 @@ lemma tcb_update_vspace_root_wp': \\_. < (target_tcb, tcb_vspace_slot) \c vrt_cap \* tcb_cap_slot \c (TcbCap target_tcb) \* (vrt_slot) \c cap \* R>\, \E\" apply (rule hoare_name_pre_stateE) apply (clarsimp simp: tcb_update_vspace_root_def sep_any_All) - apply (wp hoare_whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE'] get_cap_rv)+ + apply (wp whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE'] get_cap_rv)+ apply (wp hoare_vcg_conj_liftE1) apply (wp tcb_empty_thread_slot_wpE[sep_wand_wpE], (clarsimp simp: sep_conj_assoc | sep_solve) +) apply (wp tcb_empty_thread_slot_wpE[sep_wand_wpE], (clarsimp simp: sep_conj_assoc | sep_solve) +) @@ -218,10 +217,10 @@ lemma tcb_update_cspace_root_wp: \\_. < (target_tcb, tcb_cspace_slot) \c crt_cap \* tcb_cap_slot \c (TcbCap target_tcb) \* (crt_slot) \c cap \* R>\, \E\" apply (rule hoare_name_pre_stateE) apply (clarsimp simp: tcb_update_cspace_root_def sep_any_All_side cong:cap_type_bad_cong) - apply (wpsimp wp: hoare_whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE] get_cap_rv + apply (wpsimp wp: whenE_wp tcb_update_thread_slot_wp[sep_wand_side_wpE] get_cap_rv hoare_vcg_conj_liftE1) apply (wpsimp wp: tcb_empty_thread_slot_wpE[sep_wand_wpE] simp: sep_conj_assoc) - apply (wpsimp wp: hoare_vcg_all_lift_R[THEN hoare_vcg_E_elim[rotated]] + apply (wpsimp wp: hoare_vcg_all_liftE_R[THEN hoare_vcg_E_elim[rotated]] hoare_vcg_const_imp_lift_R tcb_empty_thread_slot_wpE[sep_wand_wpE] split_del: if_split simp: if_apply_def2) @@ -330,7 +329,7 @@ lemma decode_tcb_invocation_wp[wp]: decode_tcb_invocation cap cap_ref caps (TcbConfigureIntent fault_ep cspace_root_data vspace_root_data buffer) \\_. P\, \\_. P\" apply (clarsimp simp: decode_tcb_invocation_def) - apply (wp alternative_wp) + apply wp apply (clarsimp) done @@ -356,7 +355,7 @@ lemma decode_invocation_tcb_rv': decode_tcb_invocation cap cap_ref caps (TcbConfigureIntent fault_ep cspace_root_data vspace_root_data buffer) \P\, -" apply (clarsimp simp: decode_tcb_invocation_def) - apply (wp alternativeE_R_wp) + apply wp apply (wp throw_on_none_rvR)+ apply (safe) apply (clarsimp simp: get_index_def) @@ -474,7 +473,7 @@ lemma tcb_update_vspace_root_inv: tcb_update_vspace_root a b c \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: tcb_update_vspace_root_def) - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (wp tcb_empty_thread_slot_wp_inv) apply auto @@ -486,7 +485,7 @@ lemma tcb_update_cspace_root_inv: tcb_update_cspace_root a b c \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: tcb_update_cspace_root_def) - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (wp tcb_empty_thread_slot_wp_inv) apply auto @@ -497,7 +496,7 @@ lemma tcb_update_ipc_buffer_inv: tcb_update_ipc_buffer a b c \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: tcb_update_ipc_buffer_def) - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (wp tcb_empty_thread_slot_wp_inv) apply auto @@ -516,7 +515,7 @@ lemma invoke_tcb_ThreadControl_cur_thread: \\_ s. P (cdl_current_thread s) \" including no_pre apply (simp add:invoke_tcb_def comp_def) - apply (wp alternative_wp hoare_whenE_wp + apply (wp whenE_wp tcb_empty_thread_slot_wp_inv [where R = "(target_tcb, tcb_vspace_slot) \c - \* (target_tcb,tcb_cspace_slot) \c - @@ -525,9 +524,9 @@ lemma invoke_tcb_ThreadControl_cur_thread: |simp add:tcb_update_ipc_buffer_def tcb_update_thread_slot_def)+ apply (clarsimp simp:conj_comms) - apply (rule hoare_post_impErr[OF valid_validE,rotated],assumption) + apply (rule hoare_strengthen_postE[OF valid_validE,rotated],assumption) apply (fastforce split:option.splits) - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_erule_concl refl_imp sep_any_imp, assumption) @@ -548,12 +547,12 @@ lemma invoke_tcb_ThreadControl_cur_thread: \* (target_tcb,tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply assumption apply (wp tcb_empty_thread_slot_wp_inv) apply clarsimp apply (sep_solve) - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_select 2) @@ -578,7 +577,7 @@ lemma invoke_tcb_ThreadControl_cur_thread: \* (target_tcb, tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply assumption apply (wp tcb_empty_thread_slot_wp_inv) apply clarsimp @@ -588,10 +587,10 @@ lemma invoke_tcb_ThreadControl_cur_thread: \* (target_tcb,tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply assumption - apply (wp hoare_whenE_wp |wpc|simp add:tcb_update_cspace_root_def)+ - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp whenE_wp |wpc|simp add:tcb_update_cspace_root_def)+ + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_schem) @@ -611,7 +610,7 @@ lemma invoke_tcb_ThreadControl_cur_thread: \* (target_tcb, tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply clarsimp apply assumption apply (wp tcb_empty_thread_slot_wp_inv) @@ -660,7 +659,7 @@ lemma decode_tcb_invocation_current_thread_inv[wp]: (TcbConfigureIntent fault_ep cspace_root_data vspace_root_data buffer_addr) \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: decode_tcb_invocation_def) - apply (wp alternative_wp) + apply wp apply (safe) done @@ -782,7 +781,7 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: \ invoke_tcb (ThreadControl target_tcb tcb_cap_slot faultep croot vroot ipc_buffer) \\_ s. P (cdl_current_domain s) \" apply (simp add:invoke_tcb_def comp_def) - apply (wp alternative_wp hoare_whenE_wp + apply (wp whenE_wp tcb_empty_thread_slot_wp_inv [where R = "(target_tcb, tcb_vspace_slot) \c - \* (target_tcb,tcb_cspace_slot) \c - @@ -791,9 +790,9 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: |simp add:tcb_update_ipc_buffer_def tcb_update_thread_slot_def)+ apply (clarsimp simp:conj_comms) - apply (rule hoare_post_impErr[OF valid_validE,rotated],assumption) + apply (rule hoare_strengthen_postE[OF valid_validE,rotated],assumption) apply (fastforce split:option.splits) - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_schem) @@ -813,12 +812,12 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: \* (target_tcb,tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply assumption apply (wp tcb_empty_thread_slot_wp_inv) apply clarsimp apply (sep_solve) - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_select 2) @@ -843,7 +842,7 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: \* (target_tcb, tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply assumption apply (wp tcb_empty_thread_slot_wp_inv) apply clarsimp @@ -853,10 +852,10 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: \* (target_tcb,tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply assumption - apply (wp hoare_whenE_wp |wpc|simp add:tcb_update_cspace_root_def)+ - apply (wp hoare_drop_imps hoare_whenE_wp alternative_wp + apply (wp whenE_wp |wpc|simp add:tcb_update_cspace_root_def)+ + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_select 2) @@ -881,7 +880,7 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: \* (target_tcb, tcb_cspace_slot) \c - \* (target_tcb, tcb_ipcbuffer_slot) \c NullCap \* target_tcb \f - \* R> s) - " in hoare_post_impErr[rotated -1]) + " in hoare_strengthen_postE[rotated -1]) apply clarsimp apply assumption apply (wp tcb_empty_thread_slot_wp_inv) @@ -1043,7 +1042,7 @@ shows apply (wp invoke_tcb_ThreadControl_cdl_current_domain)[1] apply (clarsimp cong:reset_cap_asid_cap_type) apply (clarsimp dest!:reset_cap_asid_cap_type) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule_tac R = "(root_tcb_id, tcb_pending_op_slot) \c RestartCap \* R'" for R' in invoke_tcb_threadcontrol_wp'[where vrt_cap = vspace_cap and crt_cap = "cdl_update_cnode_cap_data cspace_cap cspace_root_data" and @@ -1167,9 +1166,9 @@ lemma restart_cdl_current_domain: "\\s. <(ptr,tcb_pending_op_slot) \c cap \* \ > s \ \ is_pending_cap cap \ P (cdl_current_domain s)\ restart ptr \\r s. P (cdl_current_domain s)\" apply (simp add:restart_def) - apply (wp alternative_wp) + apply wp apply (simp add:cancel_ipc_def) - apply (wpsimp wp: hoare_pre_cont[where a="revoke_cap_simple sl" for sl])+ + apply (wpsimp wp: hoare_pre_cont[where f="revoke_cap_simple sl" for sl])+ apply (drule opt_cap_sep_imp) apply (clarsimp dest!: reset_cap_asid_pending) apply (auto simp add: is_pending_cap_def) @@ -1180,9 +1179,9 @@ lemma restart_cdl_current_thread: "\\s. <(ptr,tcb_pending_op_slot) \c cap \* \ > s \ \ is_pending_cap cap \ P (cdl_current_thread s)\ restart ptr \\r s. P (cdl_current_thread s)\" apply (simp add:restart_def) - apply (wp alternative_wp) + apply wp apply (simp add:cancel_ipc_def) - apply (wpsimp wp: hoare_pre_cont[where a="revoke_cap_simple sl" for sl])+ + apply (wpsimp wp: hoare_pre_cont[where f="revoke_cap_simple sl" for sl])+ apply (drule opt_cap_sep_imp) apply (clarsimp dest!: reset_cap_asid_pending) apply (auto simp add: is_pending_cap_def) @@ -1222,8 +1221,9 @@ lemma seL4_TCB_WriteRegisters_wp: apply (wp do_kernel_op_pull_back) apply (rule hoare_post_imp[OF _ call_kernel_with_intent_allow_error_helper [where check = False,simplified]]) + apply (rename_tac rv s) apply clarsimp - apply (case_tac r,(clarsimp,assumption)+)[1] + apply (case_tac rv, (clarsimp,assumption)+)[1] apply fastforce apply (rule hoare_strengthen_post[OF set_cap_wp]) apply (sep_select 3,sep_cancel) @@ -1243,8 +1243,6 @@ lemma seL4_TCB_WriteRegisters_wp: apply (simp add: decode_invocation_def throw_opt_def get_tcb_intent_def decode_tcb_invocation_def) apply wp - apply (rule alternativeE_wp) - apply (wp+)[2] apply (clarsimp simp:conj_comms lookup_extra_caps_def mapME_def sequenceE_def) apply (rule returnOk_wp) @@ -1334,8 +1332,6 @@ lemma seL4_TCB_Resume_wp: apply (simp add: decode_invocation_def throw_opt_def get_tcb_intent_def decode_tcb_invocation_def) apply wp - apply (rule alternativeE_wp) - apply (wp+)[2] apply (clarsimp simp: lookup_extra_caps_def mapME_def sequenceE_def) apply (rule returnOk_wp) apply (rule lookup_cap_and_slot_rvu diff --git a/proof/crefine/AARCH64/ADT_C.thy b/proof/crefine/AARCH64/ADT_C.thy new file mode 100644 index 0000000000..c8c1a67caf --- /dev/null +++ b/proof/crefine/AARCH64/ADT_C.thy @@ -0,0 +1,1753 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ADT_C +imports + Schedule_C Retype_C Recycle_C + "AInvs.BCorres2_AI" +begin + +unbundle l4v_word_context + +definition + exec_C :: "(cstate, int, strictc_errortype) body \ + (cstate, int, strictc_errortype) com \ + (cstate,unit) nondet_monad" +where + "exec_C \ c \ \s. ({()} \ {s'. \ \ \c,Normal s\ \ Normal s'}, + \xs. xs \ range Normal \ \ \ \c, Normal s\ \ xs)" + +definition + ct_running_C :: "cstate \ bool" +where + "ct_running_C s \ let + g = globals s; + hp = clift (t_hrs_' g); + ct = ksCurThread_' g + in \tcb. hp ct = Some tcb \ + tsType_CL (thread_state_lift (tcbState_C tcb)) = scast ThreadState_Running" + +context kernel_m +begin + +(* currently hypervisor events redirect directly to handleVCPUFault *) +definition + handleHypervisorEvent_C :: "hyp_fault_type \ (globals myvars, int, strictc_errortype) com" +where + "handleHypervisorEvent_C t = (case t + of hyp_fault_type.ARMVCPUFault hsr \ (CALL handleVCPUFault(ucast hsr)))" + +definition + "callKernel_C e \ case e of + SyscallEvent n \ exec_C \ (\ret__unsigned_long :== CALL handleSyscall(syscall_from_H n)) + | UnknownSyscall n \ exec_C \ (\ret__unsigned_long :== CALL handleUnknownSyscall(of_nat n)) + | UserLevelFault w1 w2 \ exec_C \ (\ret__unsigned_long :== CALL handleUserLevelFault(w1,w2)) + | Interrupt \ exec_C \ (Call handleInterruptEntry_'proc) + | VMFaultEvent t \ exec_C \ (\ret__unsigned_long :== CALL handleVMFaultEvent(vm_fault_type_from_H t)) + | HypervisorEvent t \ exec_C \ (handleHypervisorEvent_C t)" + +definition + "callKernel_withFastpath_C e \ + if e = SyscallEvent syscall.SysCall \ e = SyscallEvent syscall.SysReplyRecv + then exec_C \ (\cptr :== CALL getRegister(\ksCurThread, scast Kernel_C.capRegister);; + \msgInfo :== CALL getRegister(\ksCurThread, scast Kernel_C.msgInfoRegister);; + IF e = SyscallEvent syscall.SysCall + THEN CALL fastpath_call(\cptr, \msgInfo) + ELSE CALL fastpath_reply_recv(\cptr, \msgInfo) FI) + else callKernel_C e" + +definition + setTCBContext_C :: "user_context_C \ tcb_C ptr \ (cstate,unit) nondet_monad" +where + "setTCBContext_C ct thread \ + exec_C \ (\t_hrs :== hrs_mem_update (heap_update ( + Ptr &((Ptr &(thread\[''tcbArch_C'']) :: (arch_tcb_C ptr))\[''tcbContext_C''])) ct) \t_hrs)" + +lemma Basic_sem_eq: + "\\\Basic f,s\ \ s' = ((\t. s = Normal t \ s' = Normal (f t)) \ (\t. s \ Normal t \ s' = s))" + apply (rule iffI) + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule exec.Basic) + apply clarsimp + apply (cases s, auto) + done + +lemma setTCBContext_C_corres: + "\ ccontext_relation tc tc'; t' = tcb_ptr_to_ctcb_ptr t \ \ + corres_underlying rf_sr nf nf' dc (pspace_domain_valid and tcb_at' t) \ + (threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb)\) t) (setTCBContext_C tc' t')" + apply (simp add: setTCBContext_C_def exec_C_def Basic_sem_eq corres_underlying_def) + apply clarsimp + apply (simp add: threadSet_def bind_assoc split_def exec_gets) + apply (frule (1) obj_at_cslift_tcb) + apply clarsimp + apply (frule getObject_eq [rotated -1], simp) + apply (simp add: objBits_simps') + apply (simp add: Nondet_Monad.bind_def split_def) + apply (rule bexI) + prefer 2 + apply assumption + apply simp + apply (frule setObject_eq [rotated -1], simp) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply clarsimp + apply (clarsimp simp: typ_heap_simps') + apply (thin_tac "(a,b) \ fst t" for a b t)+ + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def + carch_state_relation_def cmachine_state_relation_def + typ_heap_simps' update_tcb_map_tos) + apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def + cvariable_relation_upd_const ko_at_projectKO_opt cteSizeBits_def) + apply (simp add: cep_relations_drop_fun_upd) + apply (drule ko_at_projectKO_opt) + apply (erule (2) cmap_relation_upd_relI) + apply (simp add: ctcb_relation_def carch_tcb_relation_def) + apply assumption + apply simp + done + +end + +definition + "register_to_H \ inv register_from_H" + +definition (in state_rel) to_user_context_C :: "user_context \ user_context_C" where + "to_user_context_C uc \ + user_context_C (ARRAY r. user_regs uc (register_to_H (of_nat r))) + (user_fpu_state_C (ARRAY r. fpuRegs (fpu_state uc) (of_nat r)) + (fpuSr (fpu_state uc)) (fpuCr (fpu_state uc)))" + +(* FIXME ARMHYP is this useful in any other file? *) +(* Note: depends on vcpuactive being false when vcpuptr is NULL! *) +definition + ccur_vcpu_to_H :: "vcpu_C ptr \ machine_word \ (machine_word \ bool) option" +where + "ccur_vcpu_to_H vcpuptr vcpuactive \ + if vcpuptr = NULL + then None + else Some (ptr_val vcpuptr, to_bool vcpuactive)" + +lemma (in kernel_m) ccontext_rel_to_C: + "ccontext_relation uc (to_user_context_C uc)" + unfolding ccontext_relation_def to_user_context_C_def cregs_relation_def fpu_relation_def + by (clarsimp simp: register_to_H_def register_from_H_inj) + +context state_rel begin + +definition from_user_context_C :: "user_context_C \ user_context" where + "from_user_context_C uc \ + UserContext (FPUState (\r. (vregs_C (fpuState_C uc)).[size r]) + (fpsr_C (fpuState_C uc)) (fpcr_C (fpuState_C uc))) + (\r. (registers_C uc).[unat (register_from_H r)])" + +definition + getContext_C :: "tcb_C ptr \ cstate \ user_context" +where + "getContext_C thread \ + \s. from_user_context_C (tcbContext_C (the (clift (t_hrs_' (globals s)) (Ptr &(thread\[''tcbArch_C''])))))" + +lemma from_user_context_C: + "ccontext_relation uc uc' \ from_user_context_C uc' = uc" + unfolding ccontext_relation_def cregs_relation_def + apply (cases uc) + apply (rename_tac fpu_state regs) + apply (simp add: from_user_context_C_def) + apply (rule conjI2, fastforce) + apply (clarsimp simp: fpu_relation_def) + apply (case_tac fpu_state) + apply clarsimp + apply (rename_tac fpu_regs) + apply (rule ext) + apply (drule_tac x="size r" in spec) + apply (simp add: size_64_less_64) + done + +end + +context kernel_m begin + +definition + kernelEntry_C :: + "bool \ event \ user_context \ (cstate, user_context) nondet_monad" + where + "kernelEntry_C fp e tc \ do + t \ gets (ksCurThread_' o globals); + setTCBContext_C (to_user_context_C tc) t; + if fp then callKernel_withFastpath_C e else callKernel_C e; + t \ gets (ksCurThread_' o globals); + gets $ getContext_C t + od" + +definition + "kernel_call_C fp e \ + {(s, m, s'). s' \ fst (split (kernelEntry_C fp e) s) \ + m = (if ct_running_C (snd s') then UserMode else IdleMode) + \ snd (split (kernelEntry_C fp e) s)}" + +definition + "getActiveIRQ_C \ exec_C \ (Call getActiveIRQ_'proc)" + +definition + checkActiveIRQ_C :: "(cstate, bool) nondet_monad" + where + "checkActiveIRQ_C \ + do getActiveIRQ_C; + irq \ gets ret__unsigned_long_'; + return (irq \ scast irqInvalid) + od" + +definition + check_active_irq_C :: "((user_context \ cstate) \ bool \ (user_context \ cstate)) set" + where + "check_active_irq_C \ {((tc, s), irq_raised, (tc, s')). (irq_raised, s') \ fst (checkActiveIRQ_C s)}" + +end + +(*Restrict our undefined initial state to only use the nondeterministic state*) +consts + Init_C' :: "unit observable \ cstate global_state set" + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition "Init_C \ \((tc,s),m,e). Init_C' ((tc, truncate_state s),m,e)" + +definition + user_mem_C :: "globals \ machine_word \ word8 option" +where + "user_mem_C s \ \p. + case clift (t_hrs_' s) (Ptr (p && ~~mask pageBits)) of + Some v \ let off = p && mask pageBits >> word_size_bits; + w = index (user_data_C.words_C v) (unat off); + i = 7 - unat (p && mask word_size_bits); + bs = (word_rsplit w :: word8 list) + in Some (bs!i) + | None \ None" + + +definition + device_mem_C :: "globals \ machine_word \ machine_word option" +where + "device_mem_C s \ \p. + case clift (t_hrs_' s) (Ptr (p && ~~mask pageBits)) of + Some (v::user_data_device_C) \ Some p + | None \ None" + + +(* FIXME: move to somewhere sensible *) +definition + "setUserMem_C um \ + modify (\s. s\globals := (globals s)\t_hrs_' := + (%p. case um p of None \ fst (t_hrs_' (globals s)) p | Some x \ x, + snd (t_hrs_' (globals s))) \\)" + +definition + "setDeviceState_C um \ + modify (\s. s\globals := globals s\phantom_machine_state_' := (phantom_machine_state_' (globals s))\device_state := ((device_state (phantom_machine_state_' (globals s))) ++ um)\\\)" + +lemma setUserMem_C_def_foldl: + "setUserMem_C um \ + modify (\s. s\globals := (globals s)\t_hrs_' := + (foldl (%f p. f(p := the (um p))) (fst (t_hrs_' (globals s))) + (filter (%p. p : dom um) enum), + snd (t_hrs_' (globals s))) \\)" + apply (rule eq_reflection) + apply (simp add: setUserMem_C_def) + apply (rule arg_cong[where f=modify]) + apply (rule ext) + apply (rule_tac f=globals_update in arg_cong2) + apply (rule ext) + apply (rule_tac f=t_hrs_'_update in arg_cong2) + apply (rule ext) + apply simp_all + apply (rule ext) + apply (simp add: foldl_fun_upd_value) + apply (intro conjI impI) + apply clarsimp + apply (clarsimp split: option.splits) + done + +(* FIXME: move *_to_H to StateRelation_C.thy? *) +definition + "cscheduler_action_to_H p \ + if p = NULL then ResumeCurrentThread + else if p = Ptr 1 then ChooseNewThread + else SwitchToThread (ctcb_ptr_to_tcb_ptr p)" + + +lemma csch_act_rel_to_H: + "(\t. a = SwitchToThread t \ is_aligned t tcbBlockSizeBits) \ + cscheduler_action_relation a p \ cscheduler_action_to_H p = a" + apply (cases a) + apply (simp_all add: cscheduler_action_relation_def + cscheduler_action_to_H_def) + apply safe + apply (simp_all add: tcb_ptr_to_ctcb_ptr_def ctcb_ptr_to_tcb_ptr_def + ctcb_offset_defs is_aligned_mask mask_def + objBits_defs) + subgoal by word_bitwise simp + by word_bitwise simp + +definition + cirqstate_to_H :: "machine_word \ irqstate" +where + "cirqstate_to_H w \ + if w = scast Kernel_C.IRQSignal then irqstate.IRQSignal + else if w = scast Kernel_C.IRQTimer then irqstate.IRQTimer + else if w = scast Kernel_C.IRQInactive then irqstate.IRQInactive + else irqstate.IRQReserved" + +lemma cirqstate_cancel: + "cirqstate_to_H \ irqstate_to_C = id" + apply (rule ext) + apply (case_tac x) + apply (auto simp: cirqstate_to_H_def Kernel_C.IRQInactive_def + Kernel_C.IRQTimer_def Kernel_C.IRQSignal_def + Kernel_C.IRQReserved_def) + done + +definition + "cint_state_to_H cnode cirqs \ + InterruptState (ptr_val cnode) + (\i::irq_len word. if i \ scast AARCH64.maxIRQ then cirqstate_to_H (index cirqs (unat i)) + else irqstate.IRQInactive)" + +lemma cint_rel_to_H: + "irqs_masked' s \ + cinterrupt_relation (ksInterruptState s) n t \ + cint_state_to_H n t = (ksInterruptState s)" + apply (simp add: irqs_masked'_def) + apply (cases "ksInterruptState s") + apply (rename_tac "fun") + apply (clarsimp simp: cinterrupt_relation_def cint_state_to_H_def + AARCH64.maxIRQ_def Kernel_C.maxIRQ_def) + apply (rule ext) + apply clarsimp + apply (drule spec, erule impE, assumption) + apply (drule_tac s="irqstate_to_C (fun i)" in sym, + simp add: cirqstate_cancel[THEN fun_cong, simplified]) + done + +definition + "cstate_to_machine_H s \ + (phantom_machine_state_' s)\underlying_memory := option_to_0 \ (user_mem_C s)\" + +lemma projectKO_opt_UserData [simp]: + "projectKO_opt KOUserData = Some UserData" + by (simp add: projectKO_opts_defs) + +lemma ucast_ucast_mask_pageBits_shift: + "ucast (ucast (p && mask pageBits >> 3) :: 9 word) = p && mask pageBits >> 3" + apply (rule word_eqI) + apply (auto simp: word_size nth_ucast nth_shiftr pageBits_def) + done + +definition + "processMemory s \ (ksMachineState s) \underlying_memory := option_to_0 \ (user_mem' s)\" + +lemma unat_ucast_mask_pageBits_shift: + "unat (ucast (p && mask pageBits >> 3) :: 9 word) = unat ((p::word64) && mask pageBits >> 3)" + apply (simp only: unat_ucast) + apply (rule Divides.mod_less, simp) + apply (rule unat_less_power) + apply (simp add: word_bits_def) + apply (rule shiftr_less_t2n) + apply (rule order_le_less_trans [OF word_and_le1]) + apply (simp add: pageBits_def mask_def) + done + +lemma mask_pageBits_shift_sum: + "unat n = unat (p && mask 3) \ + (p && ~~ mask pageBits) + (p && mask pageBits >> 3) * 8 + n = (p::machine_word)" + apply (clarsimp simp: ArchMove_C.word_shift_by_3) + apply (subst word_plus_and_or_coroll) + apply (rule word_eqI) + apply (clarsimp simp: word_size pageBits_def nth_shiftl nth_shiftr word_ops_nth_size) + apply arith + apply (subst word_plus_and_or_coroll) + apply (rule word_eqI) + apply (clarsimp simp: word_size pageBits_def nth_shiftl nth_shiftr word_ops_nth_size) + apply (rule word_eqI) + apply (clarsimp simp: word_size pageBits_def nth_shiftl nth_shiftr word_ops_nth_size) + apply (auto simp: linorder_not_less SucSucMinus) + done + +lemma user_mem_C_relation: + "\cpspace_user_data_relation (ksPSpace s') + (underlying_memory (ksMachineState s')) (t_hrs_' s); + pspace_distinct' s'\ + \ user_mem_C s = user_mem' s'" + apply (rule ext) + apply (rename_tac p) + apply (clarsimp simp: user_mem_C_def user_mem'_def + split: if_splits option.splits) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: pointerInUserData_def) + apply (drule user_data_at_ko) + apply (clarsimp simp: cmap_relation_def) + apply (subgoal_tac "(Ptr (p && ~~mask pageBits) :: user_data_C ptr) \ + Ptr ` dom (heap_to_user_data (ksPSpace s') (underlying_memory (ksMachineState s')))") + apply simp + apply clarsimp + apply (thin_tac "Ball A P" for A P) + apply (thin_tac "t = dom (clift (t_hrs_' s))" for t) + apply (rule imageI) + apply (clarsimp simp: dom_def heap_to_user_data_def obj_at'_def) + apply (clarsimp simp: pointerInUserData_def) + apply (clarsimp simp: cmap_relation_def) + apply (drule equalityD2) + apply (drule subsetD) + apply (fastforce simp: dom_def) + apply clarsimp + apply (drule bspec) + apply (fastforce simp: dom_def) + apply (clarsimp simp: heap_to_user_data_def map_comp_def + split: option.splits) + apply (rule conjI) + prefer 2 + apply (clarsimp simp: typ_at'_def ko_wp_at'_def objBitsKO_def) + apply (drule pspace_distinctD') + apply fastforce + apply (clarsimp simp: objBitsKO_def) + apply clarsimp + apply (clarsimp simp: cuser_user_data_relation_def byte_to_word_heap_def Let_def) + apply (erule_tac x="ucast (p && mask pageBits >> 3)" in allE) + apply (simp add: ucast_ucast_mask_pageBits_shift unat_ucast_mask_pageBits_shift) + apply (rotate_tac -1) + apply (drule sym) + apply (simp add: word_rsplit_rcat_size word_size word_size_bits_def) + apply (case_tac "unat (p && mask 3)") + apply (simp add: mask_pageBits_shift_sum [where n=0, simplified]) + apply (case_tac "nat") + apply (simp add: mask_pageBits_shift_sum [where n=1, simplified]) + apply (case_tac "nata") + apply (simp add: mask_pageBits_shift_sum [where n=2, simplified]) + apply (case_tac "natb") + apply (simp add: mask_pageBits_shift_sum [where n=3, simplified]) + apply (case_tac "natc") + apply (simp add: mask_pageBits_shift_sum [where n=4, simplified]) + apply (case_tac "natd") + apply (simp add: mask_pageBits_shift_sum [where n=5, simplified]) + apply (case_tac "nate") + apply (simp add: mask_pageBits_shift_sum [where n=6, simplified]) + apply (case_tac "natf") + apply (simp add: mask_pageBits_shift_sum [where n=7, simplified]) + apply clarsimp + apply (subgoal_tac "unat (p && mask 3) < unat (2^3::machine_word)") + apply simp + apply (fold word_less_nat_alt) + apply (rule and_mask_less_size) + apply (clarsimp simp: word_size) + done + + +lemma device_mem_C_relation: + "\cpspace_device_data_relation (ksPSpace s') + (underlying_memory (ksMachineState s')) (t_hrs_' s); + pspace_distinct' s'\ + \ device_mem_C s = device_mem' s'" + apply (rule ext) + apply (rename_tac p) + apply (clarsimp simp: device_mem_C_def device_mem'_def + split: if_splits option.splits) + apply (rule conjI) + apply (clarsimp simp: pointerInDeviceData_def) + apply (clarsimp simp: cmap_relation_def) + apply (subgoal_tac "(Ptr (p && ~~mask pageBits) :: user_data_device_C ptr) \ + Ptr ` dom (heap_to_device_data (ksPSpace s') (underlying_memory (ksMachineState s')))") + apply clarsimp + apply (thin_tac "Ball A P" for A P) + apply (thin_tac "t = dom (clift (t_hrs_' s))" for t) + apply (drule device_data_at_ko) + apply (rule imageI) + apply (clarsimp simp: dom_def heap_to_device_data_def obj_at'_def) + apply (clarsimp simp: pointerInDeviceData_def) + apply (clarsimp simp: cmap_relation_def) + apply (drule equalityD2) + apply (drule subsetD) + apply (fastforce simp: dom_def) + apply clarsimp + apply (drule bspec) + apply (fastforce simp: dom_def) + apply (clarsimp simp: heap_to_device_data_def map_comp_def + split: option.splits) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def objBitsKO_def) + apply (drule pspace_distinctD') + apply fastforce + apply (clarsimp simp: objBitsKO_def) + done + +lemma + assumes vms': "valid_machine_state' a" + assumes mach_rel: "cmachine_state_relation (ksMachineState a) s" + assumes um_rel: + "cpspace_user_data_relation (ksPSpace a) + (underlying_memory (ksMachineState a)) (t_hrs_' s)" + "pspace_distinct' a" + shows cstate_to_machine_H_correct: "cstate_to_machine_H s = observable_memory (ksMachineState a) (user_mem' a)" +proof - + have "underlying_memory (observable_memory (ksMachineState a) (user_mem' a)) = option_to_0 \ user_mem' a" + apply (rule ext) + using vms'[simplified valid_machine_state'_def] + apply (auto simp: user_mem'_def option_to_0_def typ_at'_def ko_wp_at'_def + option_to_ptr_def pointerInUserData_def observable_memory_def + split: option.splits if_split_asm) + done + with mach_rel[simplified cmachine_state_relation_def] + user_mem_C_relation[OF um_rel] + show ?thesis + apply (simp add: cstate_to_machine_H_def) + apply (intro MachineTypes.machine_state.equality,simp_all add:observable_memory_def) + done +qed + + +definition + "array_to_map n c \ \i. if i\n then Some (index c (unat i)) else None" +lemma array_relation_to_map: + "array_relation r n a c \ \i\n. r (a i) (the (array_to_map n c i))" + by (simp add: array_relation_def array_to_map_def) + +lemma dom_array_to_map[simp]: "dom (array_to_map n c) = {i. i\n}" + by (simp add: array_to_map_def dom_def) +lemma ran_array_to_map: + "ran (array_to_map n c) = {y. \i\n. index c (unat i) = y}" + by (auto simp: array_to_map_def ran_def Collect_eq) + +text \Note: Sometimes, @{text array_map_conv} might be more convenient + in conjunction with @{const array_relation}.\ +definition "array_map_conv f n c \ map_comp f (array_to_map n c)" + +lemma array_map_conv_def2: + "array_map_conv f n c \ \i. if i\n then f (index c (unat i)) else None" + by (rule eq_reflection, rule ext) (simp add: array_map_conv_def map_comp_def + array_to_map_def) +lemma array_relation_map_conv: + "array_relation r n a c \ \x y. r y x \ (f x) = y \ + \i>n. a i = None \ array_map_conv f n c = a" + by (rule ext) (simp add: array_relation_def array_map_conv_def2) +lemma array_relation_map_conv2: + "array_relation r n a c \ \x. \y\range a. r y x \ (f x) = y \ + \i>n. a i = None \ array_map_conv f n c = a" + by (rule ext) (simp add: array_relation_def array_map_conv_def2) +lemma array_map_conv_Some[simp]: "array_map_conv Some n c = array_to_map n c" + by (simp add: array_map_conv_def map_comp_def) +lemma map_comp_array_map_conv_comm: + "map_comp f (array_map_conv g n c) = array_map_conv (map_comp f g) n c" + by (rule ext) (simp add: array_map_conv_def2 map_option_def map_comp_def) +lemma ran_array_map_conv: + "ran (array_map_conv f n c) = {y. \i\n. f (index c (unat i)) = Some y}" + by (auto simp add: array_map_conv_def2 ran_def Collect_eq) + +lemmas word_le_p2m1 = word_up_bound[of w for w] + +end + +context state_rel begin + +definition + "carch_state_to_H cstate \ + ARMKernelState + (array_map_conv (\x. if x=NULL then None else Some (ptr_val x)) + (mask asid_high_bits) (armKSASIDTable_' cstate)) + armKSKernelVSpace_C + (array_map_conv (\x. if x = 0 then None else Some x) + (mask vmid_bits) (armKSHWASIDTable_' cstate)) + (armKSNextASID_' cstate) + (symbol_table ''armKSGlobalUserVSpace'') + (ccur_vcpu_to_H (armHSCurVCPU_' cstate) (armHSVCPUActive_' cstate)) + (unat (gic_vcpu_num_list_regs_' cstate)) + (fst (snd (snd (ghost'state_' cstate))))" + +lemma eq_option_to_ptr_rev: + "Some 0 \ A \ + \x. \y\A. ((=) \ option_to_ptr) y x \ + (if x=NULL then None else Some (ptr_val x)) = y" + by (force simp: option_to_ptr_def option_to_0_def split: option.splits) + +lemma ccur_vcpu_to_H_correct: + assumes valid: "valid_arch_state' astate" + assumes rel: "carch_state_relation (ksArchState astate) cstate" + shows + "ccur_vcpu_to_H (armHSCurVCPU_' cstate) (armHSVCPUActive_' cstate) = + armHSCurVCPU (ksArchState astate)" + using valid rel + by (clarsimp simp: valid_arch_state'_def carch_state_relation_def + ccur_vcpu_to_H_def cur_vcpu_relation_def + split: option.splits) + +lemma carch_state_to_H_correct: + assumes valid: "valid_arch_state' astate" + assumes rel: "carch_state_relation (ksArchState astate) (cstate)" + assumes pt_t: "fst (snd (snd (ghost'state_' cstate))) = gsPTTypes (ksArchState astate)" + shows "carch_state_to_H cstate = ksArchState astate" + apply (case_tac "ksArchState astate", simp) + using rel[simplified carch_state_relation_def carch_globals_def] + apply (clarsimp simp: carch_state_to_H_def) + apply (rule conjI) + apply (rule array_relation_map_conv2[OF _ eq_option_to_ptr_rev]) + apply assumption + using valid[simplified valid_arch_state'_def] + apply (fastforce simp: valid_asid_table'_def) + using valid[simplified valid_arch_state'_def] + apply (clarsimp simp: valid_asid_table'_def mask_2pm1) + apply fastforce + apply (rule conjI) + apply (rule array_relation_map_conv2[OF _ eq_option_to_0_rev]) + apply assumption + using valid + apply (simp add: valid_arch_state'_def) + apply fastforce + apply (clarsimp simp: mask_def vmid_bits_val) + apply (rule conjI) + using valid rel + apply (simp add: ccur_vcpu_to_H_correct) + apply (rule conjI) + using valid[simplified valid_arch_state'_def] + apply (clarsimp simp: max_armKSGICVCPUNumListRegs_def unat_of_nat_eq) + apply (simp add: pt_t) + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma tcb_queue_rel_unique: + "hp NULL = None \ + tcb_queue_relation gn gp' hp as pp cp \ + tcb_queue_relation gn gp' hp as' pp cp \ as' = as" +proof (induct as arbitrary: as' pp cp) + case Nil thus ?case by (cases as', simp+) +next + case (Cons x xs) thus ?case + by (cases as') (clarsimp simp: tcb_ptr_to_ctcb_ptr_def)+ +qed + +lemma tcb_queue_rel'_unique: + "hp NULL = None \ + tcb_queue_relation' gn gp' hp as pp cp \ + tcb_queue_relation' gn gp' hp as' pp cp \ as' = as" + apply (clarsimp simp: tcb_queue_relation'_def split: if_split_asm) + apply (clarsimp simp: neq_Nil_conv) + apply (clarsimp simp: neq_Nil_conv) + apply (erule(2) tcb_queue_rel_unique) + done + + +definition tcb_queue_C_to_tcb_queue :: "tcb_queue_C \ tcb_queue" where + "tcb_queue_C_to_tcb_queue q \ + TcbQueue (if head_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (head_C q))) + (if end_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (end_C q)))" + +definition cready_queues_to_H :: + "tcb_queue_C[num_tcb_queues] \ (domain \ priority \ ready_queue)" + where + "cready_queues_to_H cs \ + \(qdom, prio). + if qdom \ maxDomain \ prio \ maxPriority + then let cqueue = index cs (cready_queues_index_to_C qdom prio) + in tcb_queue_C_to_tcb_queue cqueue + else TcbQueue None None" + +lemma cready_queues_to_H_correct: + "\cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' ch); + no_0_obj' s; ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ cready_queues_to_H (ksReadyQueues_' ch) = ksReadyQueues s" + apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def Let_def) + apply (clarsimp simp: fun_eq_iff) + apply (rename_tac d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (rule conjI) + apply (clarsimp simp: tcb_queue_C_to_tcb_queue_def ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (case_tac "tcbQueueHead (ksReadyQueues s (d, p)) = None") + apply (clarsimp simp: tcb_queue.expand) + apply clarsimp + apply (rename_tac queue_head queue_end) + apply (prop_tac "tcb_at' queue_head s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (prop_tac "tcb_at' queue_end s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (drule tcb_at_not_NULL)+ + apply (fastforce simp: tcb_queue.expand kernel.ctcb_ptr_to_ctcb_ptr) + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits; + metis tcb_queue.exhaust_sel word_not_le) + done + +(* showing that cpspace_relation is actually unique >>>*) + +lemma cmap_relation_unique_0: + assumes inj_f: "inj f" + assumes r: "\x y z p . \ r x z; r y z; a p = Some x; a' p = Some y; P p x; P' p y \ \ x=y" + shows "\ cmap_relation a c f r; cmap_relation a' c f r; + \p x. a p = Some x \ P p x; \p x. a' p = Some x \ P' p x \ \ a' = a" + apply (clarsimp simp add: cmap_relation_def) + apply (drule inj_image_inv[OF inj_f])+ + apply simp + apply (rule ext) + apply (case_tac "x:dom a") + apply (drule bspec, assumption)+ + apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) + apply (drule (1) r) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply (thin_tac "inv f ` A = B" for A B) + apply (thin_tac "\p x. a p = Some x \ P p x" for a P) + apply (thin_tac "\x. a p = Some x \ P p x" for a P p) + apply (erule_tac x=x in allE) + apply clarsimp + apply (thin_tac "\p x. a p = Some x \ P p x" for a P)+ + apply fastforce + done + +lemma cmap_relation_unique': + assumes inj_f: "inj f" + assumes r: "\x y z. r x z \ r y z \ P x \ P' y \ x=y" + shows "\ cmap_relation a c f r; cmap_relation a' c f r; \x \ ran a. P x; \x \ ran a'. P' x \ \ a' = a" + using assms + apply (clarsimp simp: ran_def) + apply (rule cmap_relation_unique_0[OF inj_f, where P="\_. P" and P'="\_. P'"]) + defer + apply assumption+ + apply fastforce + apply fastforce + apply fastforce + done + +lemma cmap_relation_unique: + assumes inj_f: "inj f" + assumes r: "\x y z. r x z \ r y z \ x=y" + shows "cmap_relation a c f r \ cmap_relation a' c f r \ a' = a" + apply (rule cmap_relation_unique'[OF inj_f]) + defer + apply (fastforce intro: r)+ + done + +lemma cpspace_cte_relation_unique: + assumes "cpspace_cte_relation ah ch" "cpspace_cte_relation ah' ch" + shows "map_to_ctes ah' = map_to_ctes ah" + apply (rule cmap_relation_unique[OF inj_Ptr _ assms]) + by (clarsimp simp: ccte_relation_def Some_the[symmetric]) (drule sym, simp) + +lemma inj_tcb_ptr_to_ctcb_ptr: "inj tcb_ptr_to_ctcb_ptr" + by (rule kernel.inj_tcb_ptr_to_ctcb_ptr) + +lemma cregs_relation_imp_eq: + "cregs_relation f x \ cregs_relation g x \ f=g" + by (auto simp: cregs_relation_def) + +lemma fpu_relation_imp_eq: + "fpu_relation f x \ fpu_relation g x \ f=g" + unfolding fpu_relation_def + apply (cases f, cases g) + apply clarsimp + apply (rule ext, rename_tac z) + apply (drule_tac x="size z" in spec)+ + apply (simp add: size_64_less_64) + done + +lemma ccontext_relation_imp_eq: + "ccontext_relation f x \ ccontext_relation g x \ f=g" + unfolding ccontext_relation_def + apply (cases f, cases g) + apply (auto dest: fpu_relation_imp_eq cregs_relation_imp_eq) + done + +lemma map_to_ctes_tcb_ctes: + notes if_cong[cong] + shows + "ctes_of s' = ctes_of s \ + ko_at' tcb p s \ ko_at' tcb' p s' \ + \x\ran tcb_cte_cases. fst x tcb' = fst x tcb" + apply (clarsimp simp add: ran_tcb_cte_cases) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKO_opt_tcb + split: kernel_object.splits) + apply (case_tac ko, simp_all, clarsimp) + apply (clarsimp simp: objBits_type[of "KOTCB tcb" "KOTCB undefined"] + objBits_type[of "KOTCB tcb'" "KOTCB undefined"]) + apply (rule conjI) + apply (drule ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps')+ + apply (clarsimp simp: map_to_ctes_def Let_def fun_eq_iff) + apply (drule_tac x=p in spec, simp) + apply (rule conjI) + apply (clarsimp simp: map_to_ctes_def Let_def fun_eq_iff) + apply (drule_tac x="p+0x20" in spec, simp add: objBitsKO_def) + apply (frule_tac s1=s in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (frule_tac s1=s' in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (drule (1) ps_clear_32)+ + apply (simp add: is_aligned_add_helper[of _ 11 "0x20", simplified] + split_def objBits_simps') + apply (rule conjI) + apply (clarsimp simp: map_to_ctes_def Let_def fun_eq_iff) + apply (drule_tac x="p+0x40" in spec, simp add: objBitsKO_def) + apply (frule_tac s1=s in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (frule_tac s1=s' in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (drule (1) ps_clear_is_aligned_ctes_None(1))+ + apply (simp add: is_aligned_add_helper[of _ 11 "0x40", simplified] + split_def objBits_simps') + apply (rule conjI) + apply (clarsimp simp: map_to_ctes_def Let_def fun_eq_iff) + apply (drule_tac x="p+0x60" in spec, simp add: objBitsKO_def) + apply (frule_tac s1=s in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (frule_tac s1=s' in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (drule (1) ps_clear_is_aligned_ctes_None(2))+ + apply (simp add: is_aligned_add_helper[of _ 11 "0x60", simplified] + split_def objBits_simps') + apply (clarsimp simp: map_to_ctes_def Let_def fun_eq_iff) + apply (drule_tac x="p+0x80" in spec, simp add: objBitsKO_def) + apply (frule_tac s1=s in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (frule_tac s1=s' in ps_clear_def3[THEN iffD1,rotated 2], + assumption, simp add: objBits_simps') + apply (drule (1) ps_clear_is_aligned_ctes_None(3))+ + apply (simp add: is_aligned_add_helper[of _ 11 "0x80", simplified] + split_def objBits_simps') + done + +lemma cfault_rel_imp_eq: + "cfault_rel x a b \ cfault_rel y a b \ x=y" + by (clarsimp simp: cfault_rel_def is_cap_fault_def + split: if_split_asm seL4_Fault_CL.splits) + +lemma cthread_state_rel_imp_eq: + "cthread_state_relation x z \ cthread_state_relation y z \ x=y" + apply (simp add: cthread_state_relation_def split_def) + apply (cases x) + apply (cases y, simp_all add: ThreadState_defs)+ + done + +lemma map_to_tcbs_Some_refs_nonzero: + "\map_to_tcbs (ksPSpace s) p = Some tcb; no_0_obj' s; valid_objs' s\ + \ tcbBoundNotification tcb \ Some 0 + \ tcbSchedPrev tcb \ Some 0 + \ tcbSchedNext tcb \ Some 0" + supply word_neq_0_conv[simp del] + apply (clarsimp simp: map_comp_def split: option.splits) + apply (erule (1) valid_objsE') + apply (fastforce simp: valid_obj'_def valid_tcb'_def) + done + +lemma ksPSpace_valid_objs_atcbVCPUPtr_nonzero: + "\ no_0_obj' s; valid_objs' s \ \ + map_to_tcbs (ksPSpace s) p = Some tcb \ atcbVCPUPtr (tcbArch tcb) \ Some 0" + apply (clarsimp simp: map_comp_def split: option.splits) + apply (erule(1) valid_objsE') + apply (clarsimp simp: valid_obj'_def valid_tcb'_def valid_arch_tcb'_def) + done + +lemma carch_tcb_relation_imp_eq: + "atcbVCPUPtr f \ Some 0 \ atcbVCPUPtr g \ Some 0 + \ carch_tcb_relation f x \ carch_tcb_relation g x \ f = g" + apply (cases f) + apply (rename_tac tc1 vcpuptr1) + apply (cases g) + apply (rename_tac tc2 vcpuptr2) + apply clarsimp + apply (clarsimp simp add: carch_tcb_relation_def) + apply (rule context_conjI) + subgoal by (clarsimp simp add: ccontext_relation_imp_eq atcbContextGet_def) + apply (clarsimp) + apply (case_tac "tcbVCPU_C x = NULL") + apply (fastforce dest!: option_to_ptr_NULL_eq) + apply (cases "tcbVCPU_C x") + apply (case_tac vcpuptr1 ; simp) + apply (case_tac vcpuptr2 ; simp) + done + +lemma tcb_ptr_to_ctcb_ptr_inj: + "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" + by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) + +lemma + assumes "pspace_aligned' as" "pspace_distinct' as" "valid_tcb' atcb as" + shows tcb_at'_tcbBoundNotification: + "bound (tcbBoundNotification atcb) \ ntfn_at' (the (tcbBoundNotification atcb)) as" + and tcb_at'_tcbSchedPrev: + "tcbSchedPrev atcb \ None \ tcb_at' (the (tcbSchedPrev atcb)) as" + and tcb_at'_tcbSchedNext: + "tcbSchedNext atcb \ None \ tcb_at' (the (tcbSchedNext atcb)) as" + using assms + by (clarsimp simp: valid_tcb'_def obj_at'_def)+ + +lemma cpspace_tcb_relation_unique: + assumes tcbs: "cpspace_tcb_relation (ksPSpace as) ch" "cpspace_tcb_relation (ksPSpace as') ch" + assumes vs: "no_0_obj' as" "valid_objs' as" + assumes vs': "no_0_obj' as'" "valid_objs' as'" + assumes ad: "pspace_aligned' as" "pspace_distinct' as" + assumes ad': "pspace_aligned' as'" "pspace_distinct' as'" + assumes ctes: "\tcb tcb'. (\p. map_to_tcbs (ksPSpace as) p = Some tcb \ + map_to_tcbs (ksPSpace as') p = Some tcb') \ + (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" + shows "map_to_tcbs (ksPSpace as') = map_to_tcbs (ksPSpace as)" + using tcbs(2) tcbs(1) + apply (clarsimp simp add: cmap_relation_def) + apply (drule inj_image_inv[OF inj_tcb_ptr_to_ctcb_ptr])+ + apply (simp add: tcb_ptr_to_ctcb_ptr_def[abs_def] ctcb_offset_def) + apply (rule ext) + apply (case_tac "x \ dom (map_to_tcbs (ksPSpace as))") + apply (drule bspec, assumption)+ + apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) + apply clarsimp + apply (rename_tac p x y) + apply (cut_tac ctes) + apply (drule_tac x=x in spec, drule_tac x=y in spec, erule impE, fastforce) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs]) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs']) + apply (frule ksPSpace_valid_objs_atcbVCPUPtr_nonzero[OF vs]) + apply (frule ksPSpace_valid_objs_atcbVCPUPtr_nonzero[OF vs']) + apply (rename_tac atcb atcb') + apply (prop_tac "valid_tcb' atcb as") + apply (fastforce intro: vs ad map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (prop_tac "valid_tcb' atcb' as'") + apply (fastforce intro: vs' ad' map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (frule tcb_at'_tcbSchedPrev[OF ad]) + apply (frule tcb_at'_tcbSchedPrev[OF ad']) + apply (frule tcb_at'_tcbSchedNext[OF ad]) + apply (frule tcb_at'_tcbSchedNext[OF ad']) + apply (thin_tac "map_to_tcbs x y = Some z" for x y z)+ + apply (case_tac "the (clift ch (tcb_Ptr (p + 2 ^ ctcb_size_bits)))") + apply (clarsimp simp: ctcb_relation_def ran_tcb_cte_cases) + apply (clarsimp simp: option_to_ctcb_ptr_def option_to_ptr_def option_to_0_def) + apply (rule tcb.expand) + apply clarsimp + apply (intro conjI) + apply (simp add: cthread_state_rel_imp_eq) + apply (simp add: cfault_rel_imp_eq) + apply (case_tac "tcbBoundNotification atcb'", case_tac "tcbBoundNotification atcb"; clarsimp) + apply (clarsimp split: option.splits) + apply (case_tac "tcbSchedPrev atcb'"; case_tac "tcbSchedPrev atcb"; clarsimp) + apply (force dest!: tcb_at_not_NULL) + apply (force dest!: tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (case_tac "tcbSchedNext atcb'"; case_tac "tcbSchedNext atcb"; clarsimp) + apply (force dest!: tcb_at_not_NULL) + apply (force dest!: tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (clarsimp simp: carch_tcb_relation_imp_eq) + apply auto + done + +lemma tcb_queue_rel_clift_unique: + "tcb_queue_relation gn gp' (clift s) as pp cp \ + tcb_queue_relation gn gp' (clift s) as' pp cp \ as' = as" +by (rule tcb_queue_rel_unique, rule lift_t_NULL) + +lemma tcb_queue_rel'_clift_unique: + "tcb_queue_relation' gn gp' (clift s) as pp cp \ + tcb_queue_relation' gn gp' (clift s) as' pp cp \ as' = as" + by (clarsimp simp add: tcb_queue_relation'_def) + (rule tcb_queue_rel_clift_unique) + +lemma cpspace_ep_relation_unique: + assumes "cpspace_ep_relation ah ch" "cpspace_ep_relation ah' ch" + shows "map_to_eps ah' = map_to_eps ah" + apply (rule cmap_relation_unique[OF inj_Ptr _ assms]) + apply (clarsimp simp: EPState_Idle_def EPState_Recv_def EPState_Send_def + cendpoint_relation_def Let_def tcb_queue_rel'_clift_unique + split: endpoint.splits) + done + +lemma is_aligned_no_overflow_0: + "\ is_aligned x n; y \ 2^n-1; y \ 0 \ \ 0 < x + y" + apply (drule is_aligned_no_overflow) + apply simp + by unat_arith + +abbreviation + "is_aligned_opt x n \ case x of None \ True | Some y \ is_aligned y n" + +lemma option_to_ctcb_ptr_inj: + "\ is_aligned_opt a tcbBlockSizeBits; is_aligned_opt b tcbBlockSizeBits \ + \ (option_to_ctcb_ptr a = option_to_ctcb_ptr b) = (a = b)" + apply (simp add: option_to_ctcb_ptr_def tcb_ptr_to_ctcb_ptr_def ctcb_offset_defs objBits_defs + split: option.splits) + apply (erule is_aligned_no_overflow_0; simp) + apply (erule is_aligned_no_overflow_0; simp) + done + +lemma cpspace_vcpu_relation_unique: + assumes "cpspace_vcpu_relation ah ch" "cpspace_vcpu_relation ah' ch" + assumes "\x \ ran (map_to_vcpus ah). is_aligned_opt (vcpuTCBPtr x) tcbBlockSizeBits" + assumes "\x \ ran (map_to_vcpus ah'). is_aligned_opt (vcpuTCBPtr x) tcbBlockSizeBits" + shows "map_to_vcpus ah' = map_to_vcpus ah" + apply (rule cmap_relation_unique' [OF inj_Ptr _ assms]) + apply (simp add: cvcpu_relation_def Let_def cvgic_relation_def cvcpu_vppi_masked_relation_def + split: vcpu.splits) + apply (case_tac x, case_tac y) + apply (rename_tac t vgic regs vppimask vtimer + t' vgic' regs' vppimask' vtimer') + apply (clarsimp simp: cvcpu_regs_relation_def vcpuSCTLR_def option_to_ctcb_ptr_inj) + apply (rule conjI) + apply (case_tac vgic, case_tac vgic') + apply clarsimp + apply (rule ext) + apply (rename_tac r) + apply (case_tac "64 \ r"; simp) + apply (rule conjI) + apply (rule ext, blast) + apply (rule conjI, blast) + apply (case_tac vtimer, case_tac vtimer') + apply clarsimp + done + +lemma ksPSpace_valid_pspace_ntfnBoundTCB_nonzero: + "\s. ksPSpace s = ah \ valid_pspace' s + \ map_to_ntfns ah p = Some ntfn \ ntfnBoundTCB ntfn \ Some 0" + apply (clarsimp simp: map_comp_def valid_pspace'_def split: option.splits) + apply (erule(1) valid_objsE') + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + done + +lemma cpspace_ntfn_relation_unique: + assumes ntfns: "cpspace_ntfn_relation ah ch" "cpspace_ntfn_relation ah' ch" + and vs: "\s. ksPSpace s = ah \ valid_pspace' s" + and vs': "\s. ksPSpace s = ah' \ valid_pspace' s" + shows "map_to_ntfns ah' = map_to_ntfns ah" + using ntfns + apply (clarsimp simp: cmap_relation_def) + apply (drule inj_image_inv[OF inj_Ptr])+ + apply simp + apply (rule ext) + apply (case_tac "x:dom (map_to_ntfns ah)") + apply (drule bspec, assumption)+ + apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) + apply (clarsimp) + apply (frule ksPSpace_valid_pspace_ntfnBoundTCB_nonzero[OF vs]) + apply (frule ksPSpace_valid_pspace_ntfnBoundTCB_nonzero[OF vs']) + apply (cut_tac vs vs') + apply (clarsimp simp: valid_pspace'_def) + apply (frule (2) map_to_ko_atI) + apply (frule_tac v=ya in map_to_ko_atI, simp+) + apply (clarsimp dest!: obj_at_valid_objs' split: option.splits) + apply (thin_tac "map_to_ntfns x y = Some z" for x y z)+ + apply (case_tac y, case_tac ya, case_tac "the (clift ch (ntfn_Ptr x))") + by (auto simp: NtfnState_Active_def NtfnState_Idle_def NtfnState_Waiting_def typ_heap_simps + cnotification_relation_def Let_def tcb_queue_rel'_clift_unique + option_to_ctcb_ptr_def valid_obj'_def valid_ntfn'_def valid_bound_tcb'_def + tcb_at_not_NULL tcb_ptr_to_ctcb_ptr_inj + split: ntfn.splits option.splits) (* long *) + +lemma canonical_pageBits_shift_inj: + "\ canonical_address x; canonical_address y; x << pageBits = y << pageBits \ \ x = y" + unfolding canonical_address_mask_eq canonical_bit_def pageBits_def + by word_bitwise clarsimp + +lemma cpspace_pte_relation_unique: + assumes ptes: "cpspace_pte_relation ah ch" "cpspace_pte_relation ah' ch" + assumes vs: "\s. ksPSpace s = ah \ valid_objs' s" + assumes vs': "\s. ksPSpace s = ah' \ valid_objs' s" + shows "map_to_ptes ah' = map_to_ptes ah" + using ptes + apply (clarsimp simp: cmap_relation_def) + apply (drule inj_image_inv[OF inj_Ptr])+ + apply simp + apply (rule ext, rename_tac x) + apply (case_tac "x \ dom (map_to_ptes ah)") + apply (drule bspec, assumption)+ + apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) + using vs vs' + apply (clarsimp simp: map_comp_Some_iff) + apply (erule (1) valid_objsE')+ + apply (clarsimp simp: valid_obj'_def projectKO_opt_pte) + subgoal by (clarsimp simp: cpte_relation_def Let_def attridx_from_vmattributes_def + ap_from_vm_rights_def mair_s2_types_defs canonical_pageBits_shift_inj + dest!: from_bool_eqI + split: pte.splits vmrights.splits if_splits) + by fastforce + +lemma cpspace_asidpool_relation_unique: + assumes rels: "cpspace_asidpool_relation ah ch" + "cpspace_asidpool_relation ah' ch" + shows "map_to_asidpools ah' = map_to_asidpools ah" + (* FIXME: Should we generalize cmap_relation_unique, instead? *) + using rels + apply (clarsimp simp add: cmap_relation_def) + apply (drule inj_image_inv[OF inj_Ptr])+ + apply clarsimp + apply (rule ext, rename_tac p) + apply (case_tac "p \ dom (map_to_asidpools ah)") + apply (drule bspec, assumption)+ + apply (simp add: dom_def Collect_eq, drule_tac x=p in spec) + apply (clarsimp simp: casid_pool_relation_def Let_def + split: asidpool.splits asid_pool_C.splits) + apply (rename_tac ap ap' apC) + apply (drule array_relation_to_map)+ + apply (rule ext, rename_tac i) + apply (drule_tac x=i in spec)+ + apply (case_tac "i \ mask asid_low_bits"; simp) + apply (clarsimp simp: casid_map_relation_def split: option.splits asid_map_CL.splits) + apply (drule_tac c=i in contra_subsetD, simp)+ + apply (clarsimp simp: non_dom_eval_eq) + apply force + done + +lemma cpspace_user_data_relation_unique: + "\cmap_relation (heap_to_user_data ah bh) (clift ch) Ptr cuser_user_data_relation; + cmap_relation (heap_to_user_data ah' bh')(clift ch) Ptr cuser_user_data_relation\ + \ map_to_user_data ah' = map_to_user_data ah" + apply (clarsimp simp add: cmap_relation_def) + apply (drule inj_image_inv[OF inj_Ptr])+ + apply simp + apply (rule ext) + apply (case_tac "x:dom (heap_to_user_data ah bh)") + apply (drule bspec, assumption)+ + apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) + apply (clarsimp simp add: cuser_user_data_relation_def heap_to_user_data_def) + apply (rule ccontr) + apply (case_tac z, case_tac za) + apply simp + apply (fastforce simp: dom_def heap_to_user_data_def Collect_eq) + done + +lemma cpspace_device_data_relation_unique: + "\cmap_relation (heap_to_device_data ah bh) (clift ch) Ptr cuser_device_data_relation; + cmap_relation (heap_to_device_data ah' bh')(clift ch) Ptr cuser_device_data_relation\ + \ map_to_user_data_device ah' = map_to_user_data_device ah" + apply (clarsimp simp add: cmap_relation_def) + apply (drule inj_image_inv[OF inj_Ptr])+ + apply simp + apply (rule ext) + apply (case_tac "x:dom (heap_to_device_data ah bh)") + apply (drule bspec, assumption)+ + apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) + apply (clarsimp simp add: heap_to_device_data_def) + apply (rule ccontr) + apply (case_tac z, case_tac za) + apply simp + apply (fastforce simp: dom_def heap_to_device_data_def Collect_eq) + done + +lemmas projectKO_opts = projectKO_opt_ep projectKO_opt_ntfn projectKO_opt_tcb + projectKO_opt_pte + projectKO_opt_asidpool projectKO_opt_cte + projectKO_opt_user_data projectKO_opt_user_data_device + projectKO_opt_vcpu + +(* Following from the definition of map_to_ctes, + there are two kinds of capability tables, namely CNodes and TCBs. + I would like to talk specifically about CNode Entries. + Hence, the name cnes. *) +abbreviation + map_to_cnes :: "(word32 \ kernel_object) \ word32 \ cte" +where + "map_to_cnes \ (\\<^sub>m) projectKO_opt" + +lemma map_to_cnes_eq: + assumes aligned: "pspace_aligned' s" and aligned': "pspace_aligned' s'" + assumes distinct: "pspace_distinct' s" and distinct': "pspace_distinct' s'" + shows + "ctes_of s' = ctes_of s \ + map_to_tcbs (ksPSpace s') = map_to_tcbs (ksPSpace s) \ + (projectKO_opt::kernel_object\cte) \\<^sub>m ksPSpace s' = + (projectKO_opt::kernel_object\cte) \\<^sub>m ksPSpace s" + apply (rule ext) + apply (simp add: fun_eq_iff) + apply (drule_tac x=x in spec) + apply (case_tac "ksPSpace s x", case_tac "ksPSpace s' x", simp) + apply (clarsimp simp add: projectKO_opt_cte split: kernel_object.splits) + apply (frule SR_lemmas_C.ctes_of_ksI[OF _ aligned' distinct'], simp) + apply (drule_tac t="ctes_of s x" in sym) + apply (frule ctes_of_cte_at, simp add: cte_at'_obj_at') + apply (elim disjE) + apply (simp add: obj_at'_def) + apply (clarsimp simp add: obj_at'_real_def ko_wp_at'_def) + apply (drule_tac x="x-n" in spec) + apply (clarsimp simp add: map_comp_def projectKO_opt_tcb + split: option.splits kernel_object.splits) + apply (frule_tac x="x-n" in pspace_distinctD'[OF _ distinct']) + apply (simp add: objBitsKO_def) + apply (erule_tac y=x and s=s' and getF=a and setF=b + in tcb_space_clear[rotated], assumption+, simp+) + apply (case_tac "ksPSpace s' x") + apply (clarsimp simp add: projectKO_opt_cte split: kernel_object.splits) + apply (frule SR_lemmas_C.ctes_of_ksI[OF _ aligned distinct], simp) + apply (frule ctes_of_cte_at, simp add: cte_at'_obj_at') + apply (elim disjE) + apply (simp add: obj_at'_def) + apply (clarsimp simp add: obj_at'_real_def ko_wp_at'_def) + apply (drule_tac x="x-n" in spec) + apply (clarsimp simp add: map_comp_def projectKO_opt_tcb + split: option.splits kernel_object.splits) + apply (frule_tac x="x-n" in pspace_distinctD'[OF _ distinct]) + apply (simp add: objBitsKO_def) + apply (erule_tac y=x and s=s and getF=a and setF=b + in tcb_space_clear[rotated], assumption+, simp+) + apply (case_tac "EX cte. ksPSpace s x = Some (KOCTE cte)", clarsimp) + apply (frule SR_lemmas_C.ctes_of_ksI[OF _ aligned distinct], simp) + apply (drule ctes_of_cte_wpD)+ + apply (simp add: cte_wp_at'_obj_at') + apply (elim disjE) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def) + apply (thin_tac "Bex A P" for A P) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKO_opt_cte) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def + projectKO_opt_cte projectKO_opt_tcb) + apply (case_tac ko, simp_all, clarsimp simp: objBitsKO_def) + apply (erule_tac y=x and s=s' and getF=a and setF=b + in tcb_space_clear[rotated], assumption+) + apply (drule_tac x=x in spec, simp add: projectKO_opt_tcb) + apply simp + apply (thin_tac "Bex A P" for A P) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def + projectKO_opt_cte projectKO_opt_tcb) + apply (case_tac ko, simp_all, clarsimp simp: objBitsKO_def) + apply (erule_tac y=x and s=s and getF=a and setF=b + in tcb_space_clear[rotated], assumption+, simp+) + apply (case_tac "EX cte. ksPSpace s' x = Some (KOCTE cte)", clarsimp) + apply (frule SR_lemmas_C.ctes_of_ksI[OF _ aligned' distinct'], simp) + apply (drule_tac t="ctes_of s x" in sym) + apply (drule ctes_of_cte_wpD)+ + apply (simp add: cte_wp_at'_obj_at') + apply (elim disjE) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKO_opt_cte) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def + projectKO_opt_cte projectKO_opt_tcb) + apply (case_tac ko, simp_all, clarsimp simp: objBitsKO_def) + apply (erule_tac y=x and s=s and getF=aa and setF=b + in tcb_space_clear[rotated], assumption+) + apply (drule_tac x=x in spec, simp add: projectKO_opt_tcb) + apply simp + apply (clarsimp simp: projectKO_opt_cte split: kernel_object.splits) + done + +lemma valid_objs'_aligned_vcpuTCB: + "valid_objs' s \ \x\ran (map_to_vcpus (ksPSpace s)). is_aligned_opt (vcpuTCBPtr x) tcbBlockSizeBits" + apply (clarsimp simp: valid_objs'_def ran_def pspace_aligned'_def) + apply (rename_tac p) + apply (case_tac "ksPSpace s p"; simp) + apply (rename_tac ko, drule_tac x=ko in spec) + apply (case_tac ko; clarsimp) + apply (erule impE, fastforce) + apply (clarsimp simp: valid_obj'_def valid_vcpu'_def split: option.splits) + done + +lemma cpspace_relation_unique: + assumes valid_pspaces: "valid_pspace' s" "valid_pspace' s'" + shows "cpspace_relation (ksPSpace s) bh ch \ + cpspace_relation (ksPSpace s') bh ch \ + (ksPSpace s') = (ksPSpace (s::kernel_state))" (is "PROP ?goal") +proof - + from valid_pspaces + have no_kdatas: "KOKernelData \ ran (ksPSpace s)" + "KOKernelData \ ran (ksPSpace s')" + by (auto simp add: valid_pspace'_def valid_objs'_def valid_obj'_def) + + from valid_pspaces + have valid_objs: "valid_objs' s" and valid_objs': "valid_objs' s'" + and aligned: "pspace_aligned' s" and aligned': "pspace_aligned' s'" + and distinct: "pspace_distinct' s" and distinct': "pspace_distinct' s'" + and no_0_objs: "no_0_obj' s" and no_0_objs': "no_0_obj' s'" + by auto + + show "PROP ?goal" + apply (clarsimp simp add: cpspace_relation_def) + apply (drule (1) cpspace_cte_relation_unique) + apply (drule (1) cpspace_ep_relation_unique) + apply (drule (1) cpspace_ntfn_relation_unique) + apply (fastforce intro: valid_pspaces) + apply (fastforce intro: valid_pspaces) + apply (drule (1) cpspace_pte_relation_unique) + apply (fastforce intro: valid_objs) + apply (fastforce intro: valid_objs') + apply (drule (1) cpspace_asidpool_relation_unique) + apply (drule (1) cpspace_vcpu_relation_unique) + apply (rule valid_objs'_aligned_vcpuTCB [OF valid_objs]) + apply (rule valid_objs'_aligned_vcpuTCB [OF valid_objs']) + apply (drule (1) cpspace_tcb_relation_unique) + apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs')+ + apply (fastforce intro: aligned distinct aligned' distinct')+ + apply (intro allI impI,elim exE conjE) + apply (rule_tac p=p in map_to_ctes_tcb_ctes, assumption) + apply (frule (1) map_to_ko_atI[OF _ aligned distinct]) + apply (frule (1) map_to_ko_atI[OF _ aligned' distinct']) + apply (drule (1) map_to_cnes_eq[OF aligned aligned' distinct distinct']) + apply (drule (1) cpspace_user_data_relation_unique) + apply (drule (1) cpspace_device_data_relation_unique) + apply (thin_tac "cmap_relation a c f r" for a c f r)+ + apply (cut_tac no_kdatas) + apply (clarsimp simp add: ran_def fun_eq_iff) + apply (drule_tac x=x in spec)+ + apply (case_tac "ksPSpace s x") + apply (case_tac "ksPSpace s' x", simp) + apply (case_tac a, simp_all add: projectKO_opts + split: arch_kernel_object.splits) + by (clarsimp simp: projectKO_opts map_comp_def + split: kernel_object.splits arch_kernel_object.splits + option.splits) +qed +(*<<< end showing that cpspace_relation is actually unique *) + +lemma ksPSpace_eq_imp_ko_wp_at'_eq: + "ksPSpace s' = ksPSpace s \ ko_wp_at' P a s' = ko_wp_at' P a s" + by (clarsimp simp: ko_wp_at'_def ps_clear_def) + +lemma ksPSpace_eq_imp_obj_at'_eq: + assumes ksPSpace: "ksPSpace s' = ksPSpace s" + shows "obj_at' P a s' = obj_at' P a s" + by (clarsimp simp: obj_at'_real_def ksPSpace_eq_imp_ko_wp_at'_eq[OF ksPSpace]) + +lemma ksPSpace_eq_imp_typ_at'_eq: + assumes ksPSpace: "ksPSpace s' = ksPSpace s" + shows "typ_at' P a s' = typ_at' P a s" + by (simp add: typ_at'_def ksPSpace_eq_imp_ko_wp_at'_eq[OF ksPSpace]) + +lemma ksPSpace_eq_imp_valid_cap'_eq: + assumes ksPSpace: "ksPSpace s' = ksPSpace s" + shows "valid_cap' c s' = valid_cap' c s" + by (auto simp: valid_cap'_def page_table_at'_def frame_at'_def + valid_untyped'_def + ksPSpace_eq_imp_obj_at'_eq[OF ksPSpace] + ksPSpace_eq_imp_typ_at'_eq[OF ksPSpace] + ksPSpace_eq_imp_ko_wp_at'_eq[OF ksPSpace] + valid_arch_cap'_def wellformed_acap'_def + split: capability.splits zombie_type.splits arch_capability.splits) + +lemma ksPSpace_eq_imp_valid_tcb'_eq: + assumes ksPSpace: "ksPSpace s' = ksPSpace s" + shows "valid_tcb' tcb s' = valid_tcb' tcb s" + by (auto simp: ksPSpace_eq_imp_obj_at'_eq[OF ksPSpace] + ksPSpace_eq_imp_valid_cap'_eq[OF ksPSpace] + ksPSpace_eq_imp_typ_at'_eq[OF ksPSpace] + valid_tcb'_def valid_tcb_state'_def valid_bound_ntfn'_def valid_arch_tcb'_def + split: thread_state.splits option.splits) + +lemma ksPSpace_eq_imp_valid_objs'_eq: + assumes ksPSpace: "ksPSpace s' = ksPSpace s" + shows "valid_objs' s' = valid_objs' s" + using assms + by (clarsimp simp: valid_objs'_def valid_obj'_def valid_ep'_def + ksPSpace_eq_imp_obj_at'_eq[OF ksPSpace] + ksPSpace_eq_imp_valid_tcb'_eq[OF ksPSpace] + ksPSpace_eq_imp_valid_cap'_eq[OF ksPSpace] + valid_ntfn'_def valid_cte'_def valid_bound_tcb'_def + split: kernel_object.splits endpoint.splits ntfn.splits option.splits) + +lemma ksPSpace_eq_imp_valid_pspace'_eq: + assumes ksPSpace: "ksPSpace s' = ksPSpace s" + shows "valid_pspace' s = valid_pspace' s'" + using assms + by (clarsimp simp: valid_pspace'_def pspace_aligned'_def + pspace_distinct'_def ps_clear_def no_0_obj'_def valid_mdb'_def + pspace_canonical'_def + ksPSpace_eq_imp_valid_objs'_eq[OF ksPSpace]) + +(* The awkwardness of this definition is only caused by the fact + that valid_pspace' is defined over the complete state. *) +definition + cstate_to_pspace_H :: "globals \ machine_word \ kernel_object" +where + "cstate_to_pspace_H c \ + THE h. valid_pspace' (undefined\ksPSpace := h\) \ + cpspace_relation h (underlying_memory (cstate_to_machine_H c)) + (t_hrs_' c)" + +lemma cstate_to_pspace_H_correct: + "valid_pspace' a \ + cpspace_relation (ksPSpace a) + (underlying_memory (cstate_to_machine_H c)) (t_hrs_' c) \ + cstate_to_pspace_H c = ksPSpace a" + apply (simp add: cstate_to_pspace_H_def) + apply (rule the_equality, simp) + apply (rule_tac s1=a in ksPSpace_eq_imp_valid_pspace'_eq[THEN iffD1], + clarsimp+) + apply (drule (2) cpspace_relation_unique, simp+) + done + +end + +context state_rel begin + +lemma cDomScheduleIdx_to_H_correct: + assumes valid: "valid_state' as" + assumes cstate_rel: "cstate_relation as cs" + assumes ms: "cstate_to_machine_H cs = observable_memory (ksMachineState as) (user_mem' as)" + shows "unat (ksDomScheduleIdx_' cs) = ksDomScheduleIdx as" + using assms + by (clarsimp simp: cstate_relation_def Let_def observable_memory_def valid_state'_def + newKernelState_def unat_of_nat_eq cdom_schedule_relation_def) + +definition + cDomSchedule_to_H :: "(dschedule_C['b :: finite]) \ (8 word \ machine_word) list" +where + "cDomSchedule_to_H cs \ THE as. cdom_schedule_relation as cs" + +(* FIXME: The assumption of this is unnecessarily strong *) +lemma cDomSchedule_to_H_correct: + assumes valid: "valid_state' as" + assumes cstate_rel: "cstate_relation as cs" + assumes ms: "cstate_to_machine_H cs = observable_memory (ksMachineState as) (user_mem' as)" + shows "cDomSchedule_to_H kernel_all_global_addresses.ksDomSchedule = kernel_state.ksDomSchedule as" + using assms + apply (clarsimp simp: cstate_relation_def Let_def valid_state'_def newKernelState_def cDomSchedule_to_H_def cdom_schedule_relation_def) + apply (rule the_equality, simp) + apply (rule nth_equalityI) + apply simp + apply (clarsimp simp: dom_schedule_entry_relation_def) + apply (drule_tac x=i in spec)+ + apply (rule prod_eqI) + apply (subst up_ucast_inj_eq[where 'b=64, symmetric]) + apply auto + done + +definition + cbitmap_L1_to_H :: "machine_word[num_domains] \ (8 word \ machine_word)" +where + "cbitmap_L1_to_H l1 \ \d. if d \ maxDomain then l1.[unat d] else 0" + +definition + cbitmap_L2_to_H :: "machine_word[4][num_domains] \ (8 word \ nat \ machine_word)" +where + "cbitmap_L2_to_H l2 \ \(d, i). + if d \ maxDomain \ i < l2BitmapSize + then l2.[unat d].[i] else 0" + +lemma cbitmap_L1_to_H_correct: + "cbitmap_L1_relation cs as \ + cbitmap_L1_to_H cs = as" + unfolding cbitmap_L1_to_H_def cbitmap_L1_relation_def + apply (rule ext) + apply (clarsimp split: if_split) + done + +lemma cbitmap_L2_to_H_correct: + "cbitmap_L2_relation cs as \ + cbitmap_L2_to_H cs = as" + unfolding cbitmap_L2_to_H_def cbitmap_L2_relation_def + apply (rule ext) + apply (clarsimp split: if_split) + done + +definition + mk_gsUntypedZeroRanges +where + "mk_gsUntypedZeroRanges s + = ran (untypedZeroRange \\<^sub>m (option_map cteCap o map_to_ctes (cstate_to_pspace_H s)))" + +lemma cpspace_user_data_relation_user_mem'[simp]: + "\pspace_aligned' as;pspace_distinct' as\ \ cpspace_user_data_relation (ksPSpace as) (option_to_0 \ user_mem' as) (t_hrs_' cs) + = cpspace_user_data_relation (ksPSpace as) (underlying_memory (ksMachineState as)) (t_hrs_' cs)" + by (simp add: cmap_relation_def) + +lemma cpspace_device_data_relation_user_mem'[simp]: + "cpspace_device_data_relation (ksPSpace as) (option_to_0 \ user_mem' as) (t_hrs_' cs) + = cpspace_device_data_relation (ksPSpace as) (underlying_memory (ksMachineState as)) (t_hrs_' cs)" + apply (clarsimp simp: cmap_relation_def cuser_user_data_device_relation_def heap_to_device_data_def) + apply (rule_tac arg_cong[where f = "%x. x = y" for y]) + by auto + +lemma mk_gsUntypedZeroRanges_correct: + assumes valid: "valid_state' as" + assumes cstate_rel: "cstate_relation as cs" + shows "mk_gsUntypedZeroRanges cs = gsUntypedZeroRanges as" + using assms + apply (clarsimp simp: valid_state'_def untyped_ranges_zero_inv_def + mk_gsUntypedZeroRanges_def cteCaps_of_def) + apply (subst cstate_to_pspace_H_correct[where c=cs], simp_all) + apply (clarsimp simp: cstate_relation_def Let_def) + apply (subst cstate_to_machine_H_correct, assumption, simp_all) + apply (clarsimp simp: cpspace_relation_def)+ + apply (clarsimp simp: observable_memory_def valid_pspace'_def) + done + + +definition + cstate_to_H :: "globals \ kernel_state" +where + "cstate_to_H s \ + \ksPSpace = cstate_to_pspace_H s, + gsUserPages = fst (ghost'state_' s), gsCNodes = fst (snd (ghost'state_' s)), + gsUntypedZeroRanges = mk_gsUntypedZeroRanges s, + gsMaxObjectSize = (let v = unat (gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' s)) + in if v = 0 then card (UNIV :: machine_word set) else v), + ksDomScheduleIdx = unat (ksDomScheduleIdx_' s), + ksDomSchedule = cDomSchedule_to_H kernel_all_global_addresses.ksDomSchedule, + ksCurDomain = ucast (ksCurDomain_' s), + ksDomainTime = ksDomainTime_' s, + ksReadyQueues = cready_queues_to_H (ksReadyQueues_' s), + ksReadyQueuesL1Bitmap = cbitmap_L1_to_H (ksReadyQueuesL1Bitmap_' s), + ksReadyQueuesL2Bitmap = cbitmap_L2_to_H (ksReadyQueuesL2Bitmap_' s), + ksCurThread = ctcb_ptr_to_tcb_ptr (ksCurThread_' s), + ksIdleThread = ctcb_ptr_to_tcb_ptr (ksIdleThread_' s), + ksSchedulerAction = cscheduler_action_to_H (ksSchedulerAction_' s), + ksInterruptState = + cint_state_to_H intStateIRQNode_array_Ptr (intStateIRQTable_' s), + ksWorkUnitsCompleted = ksWorkUnitsCompleted_' s, + ksArchState = carch_state_to_H s, + ksMachineState = cstate_to_machine_H s\" + +end + +context kernel_m begin + +lemma trivial_eq_conj: "B = C \ (A \ B) = (A \ C)" + by simp + +lemma cstate_to_H_correct: + assumes valid: "valid_state' as" + assumes cstate_rel: "cstate_relation as cs" + assumes rdyqs: "ksReadyQueues_asrt as" + shows "cstate_to_H cs = as \ksMachineState:= observable_memory (ksMachineState as) (user_mem' as)\" + apply (subgoal_tac "cstate_to_machine_H cs = observable_memory (ksMachineState as) (user_mem' as)") + apply (rule kernel_state.equality, simp_all add: cstate_to_H_def) + apply (rule cstate_to_pspace_H_correct) + using valid + apply (simp add: valid_state'_def) + using cstate_rel valid + apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def + observable_memory_def valid_state'_def valid_pspace'_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def prod_eq_iff) + using cstate_rel + apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def prod_eq_iff) + using valid cstate_rel + apply (rule mk_gsUntypedZeroRanges_correct) + subgoal + using cstate_rel + by (fastforce simp: cstate_relation_def cpspace_relation_def + Let_def ghost_size_rel_def unat_eq_0 + split: if_split) + using valid cstate_rel + apply (rule cDomScheduleIdx_to_H_correct) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using valid cstate_rel + apply (rule cDomSchedule_to_H_correct) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def ucast_up_ucast_id is_up_8_32) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + apply (rule cready_queues_to_H_correct) + using cstate_rel rdyqs + apply (fastforce intro!: cready_queues_to_H_correct + simp: cstate_relation_def Let_def) + using valid apply (fastforce simp: valid_state'_def) + using rdyqs apply fastforce + using valid apply (fastforce simp: valid_state'_def) + using valid apply (fastforce simp: valid_state'_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + apply (rule cbitmap_L1_to_H_correct) + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + apply (rule cbitmap_L2_to_H_correct) + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + apply (rule csch_act_rel_to_H[THEN iffD1]) + apply (case_tac "ksSchedulerAction as", simp+) + using valid + subgoal + by (clarsimp simp: valid_state'_def st_tcb_at'_def + obj_at'_real_def ko_wp_at'_def objBitsKO_def projectKO_opt_tcb + split: kernel_object.splits) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + apply (rule cint_rel_to_H) + using valid + apply (simp add: valid_state'_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + apply (rule carch_state_to_H_correct) + using valid + apply (simp add: valid_state'_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel + apply (clarsimp simp: cstate_relation_def Let_def carch_state_relation_def carch_globals_def) + apply (rule cstate_to_machine_H_correct[simplified]) + using valid + apply (simp add: valid_state'_def) + using cstate_rel + apply (simp add: cstate_relation_def Let_def) + using cstate_rel + apply (simp add: cstate_relation_def Let_def cpspace_relation_def) + using cstate_rel + apply (simp add: cstate_relation_def Let_def cpspace_relation_def) + using valid + apply (clarsimp simp add: valid_state'_def) + done + +end + +context state_rel begin + +definition + cstate_to_A :: "cstate \ det_state" +where + "cstate_to_A \ absKState \ cstate_to_H \ globals" + +definition + "Fin_C \ \((tc,s),m,e). ((tc, cstate_to_A s),m,e)" + +definition + doUserOp_C + :: "user_transition \ user_context \ + (cstate, event option \ user_context) nondet_monad" + where + "doUserOp_C uop tc \ + do t \ gets (ctcb_ptr_to_tcb_ptr \ ksCurThread_' \ globals); + conv \ gets (ptable_lift t \ cstate_to_A); + rights \ gets (ptable_rights t \ cstate_to_A); + um \ gets (\s. user_mem_C (globals s) \ ptrFromPAddr); + dm \ gets (\s. device_mem_C (globals s) \ ptrFromPAddr); + ds \ gets (\s. (device_state (phantom_machine_state_' (globals s)))); + + assert (dom (um \ addrFromPPtr) \ - dom ds); + assert (dom (dm \ addrFromPPtr) \ dom ds); + + (e,tc',um',ds') \ select (fst (uop t (restrict_map conv {pa. rights pa \ {}}) rights + (tc, restrict_map um + {pa. \va. conv va = Some pa \ AllowRead \ rights va}, + (ds \ ptrFromPAddr) |` {pa. \va. conv va = Some pa \ AllowRead \ rights va} ))); + setUserMem_C ((um' |` {pa. \va. conv va = Some pa \ AllowWrite \ rights va} + \ addrFromPPtr) |` (- dom ds)); + setDeviceState_C ((ds' |` {pa. \va. conv va = Some pa \ AllowWrite \ rights va} + \ addrFromPPtr) |` (dom ds)); + return (e,tc') + od" + +definition + do_user_op_C :: "user_transition \ + ((user_context \ cstate) \ (event option \ user_context \ cstate)) set" +where + "do_user_op_C uop \ monad_to_transition (doUserOp_C uop)" + +end + +context kernel_m begin + +definition + ADT_C :: "user_transition \ (cstate global_state, det_ext observable, unit) data_type" +where + "ADT_C uop \ \ Init = Init_C, Fin = Fin_C, + Step = (\u. global_automaton check_active_irq_C (do_user_op_C uop) (kernel_call_C False)) \" + +definition + ADT_FP_C :: "user_transition \ (cstate global_state, det_ext observable, unit) data_type" +where + "ADT_FP_C uop \ \ Init = Init_C, Fin = Fin_C, + Step = (\u. global_automaton check_active_irq_C (do_user_op_C uop) (kernel_call_C True)) \" + +end + +locale kernel_global = state_rel + kernel_all_global_addresses +(* repeating ADT definitions in the c-parser's locale now (not the substitute) *) +begin + +definition + handleHypervisorEvent_C :: "hyp_fault_type \ (globals myvars, int, strictc_errortype) com" +where + "handleHypervisorEvent_C t = (case t + of hyp_fault_type.ARMVCPUFault hsr \ (CALL handleVCPUFault(ucast hsr)))" + +definition + "callKernel_C e \ case e of + SyscallEvent n \ exec_C \ (\ret__unsigned_long :== CALL handleSyscall(syscall_from_H n)) + | UnknownSyscall n \ exec_C \ (\ret__unsigned_long :== CALL handleUnknownSyscall(of_nat n)) + | UserLevelFault w1 w2 \ exec_C \ (\ret__unsigned_long :== CALL handleUserLevelFault(w1,w2)) + | Interrupt \ exec_C \ (Call handleInterruptEntry_'proc) + | VMFaultEvent t \ exec_C \ (\ret__unsigned_long :== CALL handleVMFaultEvent(kernel_m.vm_fault_type_from_H t)) + | HypervisorEvent t \ exec_C \ (handleHypervisorEvent_C t)" + +definition + "callKernel_withFastpath_C e \ + if e = SyscallEvent syscall.SysCall \ e = SyscallEvent syscall.SysReplyRecv + then exec_C \ (\cptr :== CALL getRegister(\ksCurThread, scast Kernel_C.capRegister);; + \msgInfo :== CALL getRegister(\ksCurThread, scast Kernel_C.msgInfoRegister);; + IF e = SyscallEvent syscall.SysCall + THEN CALL fastpath_call(\cptr, \msgInfo) + ELSE CALL fastpath_reply_recv(\cptr, \msgInfo) FI) + else callKernel_C e" + +definition + setTCBContext_C :: "user_context_C \ tcb_C ptr \ (cstate,unit) nondet_monad" +where + "setTCBContext_C ct thread \ + exec_C \ (\t_hrs :== hrs_mem_update (heap_update ( + Ptr &((Ptr &(thread\[''tcbArch_C'']) :: (arch_tcb_C ptr))\[''tcbContext_C''])) ct) \t_hrs)" + +definition + "kernelEntry_C fp e tc \ do + t \ gets (ksCurThread_' o globals); + setTCBContext_C (to_user_context_C tc) t; + if fp then callKernel_withFastpath_C e else callKernel_C e; + t \ gets (ksCurThread_' o globals); + gets $ getContext_C t + od" + +definition + "kernel_call_C fp e \ + {(s, m, s'). s' \ fst (split (kernelEntry_C fp e) s) \ + m = (if ct_running_C (snd s') then UserMode else IdleMode)}" + +definition + "getActiveIRQ_C \ exec_C \ (Call getActiveIRQ_'proc)" + +definition + "checkActiveIRQ_C \ + do getActiveIRQ_C; + irq \ gets ret__unsigned_long_'; + return (irq \ scast irqInvalid) + od" + +definition + check_active_irq_C :: "((user_context \ cstate) \ bool \ (user_context \ cstate)) set" + where + "check_active_irq_C \ {((tc, s), irq_raised, (tc, s')). (irq_raised, s') \ fst (checkActiveIRQ_C s)}" + +definition + ADT_C :: "user_transition \ (cstate global_state, det_ext observable, unit) data_type" +where + "ADT_C uop \ \ Init = Init_C, Fin = Fin_C, + Step = (\u. global_automaton check_active_irq_C (do_user_op_C uop) (kernel_call_C False)) \" + +definition + ADT_FP_C :: "user_transition \ (cstate global_state, det_ext observable, unit) data_type" +where + "ADT_FP_C uop \ \ Init = Init_C, Fin = Fin_C, + Step = (\u. global_automaton check_active_irq_C (do_user_op_C uop) (kernel_call_C True)) \" + +end + +end diff --git a/proof/crefine/AARCH64/ArchMove_C.thy b/proof/crefine/AARCH64/ArchMove_C.thy new file mode 100644 index 0000000000..e251a25afc --- /dev/null +++ b/proof/crefine/AARCH64/ArchMove_C.thy @@ -0,0 +1,716 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch specific lemmas that should be moved into theory files before CRefine *) + +theory ArchMove_C +imports Move_C +begin + +lemma aligned_no_overflow_less: (* FIXME AARCH64: move to Word_Lib *) + "\ is_aligned p n; p + 2 ^ n \ 0 \ \ p < p + 2 ^ n" + by (erule word_leq_minus_one_le) (erule is_aligned_no_overflow) + +lemma ps_clear_is_aligned_ksPSpace_None: + "\ps_clear p n s; is_aligned p n; 0 mask n\ + \ ksPSpace s (p + d) = None" + apply (simp add: ps_clear_def add_diff_eq[symmetric] mask_2pm1[symmetric]) + apply (drule equals0D[where a="p + d"]) + apply (simp add: dom_def word_gt_0) + apply (drule mp) + apply (rule word_plus_mono_right) + apply simp + apply (simp add: mask_2pm1) + apply (erule is_aligned_no_overflow') + apply (drule mp) + apply (case_tac "(0::machine_word)<2^n") + apply (frule le_m1_iff_lt[of "(2::machine_word)^n" d, THEN iffD1]) + apply (simp add: mask_2pm1[symmetric]) + apply (erule (1) is_aligned_no_wrap') + apply (simp add: is_aligned_mask mask_2pm1 not_less word_bits_def + power_overflow) + by assumption + +lemma ps_clear_is_aligned_ctes_None: + assumes "ps_clear p tcbBlockSizeBits s" + and "is_aligned p tcbBlockSizeBits" + shows "ksPSpace s (p + 2*2^cteSizeBits) = None" + and "ksPSpace s (p + 3*2^cteSizeBits) = None" + and "ksPSpace s (p + 4*2^cteSizeBits) = None" + by (auto intro: assms ps_clear_is_aligned_ksPSpace_None + simp: objBits_defs mask_def)+ + +lemma word_shift_by_3: + "x * 8 = (x::'a::len word) << 3" + by (simp add: shiftl_t2n) + +lemma unat_mask_3_less_8: + "unat (p && mask 3 :: word64) < 8" + apply (rule unat_less_helper) + apply (rule order_le_less_trans, rule word_and_le1) + apply (simp add: mask_def) + done + +lemma ucast_le_ucast_6_64: + "(ucast x \ (ucast y :: word64)) = (x \ (y :: 6 word))" + by (simp add: ucast_le_ucast) + +(* FIXME AARCH64: this is very specific and rather ugly, is it possible to generalise? *) +lemma size_64_less_64: + "size (r::64) < (64::nat)" + apply (induct r rule: bit0.plus_induct, simp) + apply (frule bit0.Suc_size) + apply (case_tac "x = 64 - 1"; clarsimp) + apply (prop_tac "size x \ size (64 - 1 :: 64)") + apply (subst bit0.size_inj, simp) + apply simp + done + +definition + user_word_at :: "machine_word \ machine_word \ kernel_state \ bool" +where + "user_word_at x p \ \s. is_aligned p 3 + \ pointerInUserData p s + \ x = word_rcat (map (underlying_memory (ksMachineState s)) + [p + 7, p + 6, p + 5, p + 4, p + 3, p + 2, p + 1, p])" +definition + device_word_at :: "machine_word \ machine_word \ kernel_state \ bool" +where + "device_word_at x p \ \s. is_aligned p 3 + \ pointerInDeviceData p s + \ x = word_rcat (map (underlying_memory (ksMachineState s)) + [p + 7, p + 6, p + 5, p + 4, p + 3, p + 2, p + 1, p])" + +(* FIXME: move to GenericLib *) +lemmas unat64_eq_of_nat = unat_eq_of_nat[where 'a=64, folded word_bits_def] + +context begin interpretation Arch . + +crunch inv'[wp]: archThreadGet P + +(* FIXME MOVE near thm tg_sp' *) +lemma atg_sp': + "\P\ archThreadGet f p \\t. obj_at' (\t'. f (tcbArch t') = t) p and P\" + including no_pre + apply (simp add: archThreadGet_def) + apply wp + apply (rule hoare_strengthen_post) + apply (rule getObject_tcb_sp) + apply clarsimp + apply (erule obj_at'_weakenE) + apply simp + done + +(* FIXME: MOVE to EmptyFail *) +lemma empty_fail_archThreadGet [intro!, wp, simp]: + "empty_fail (archThreadGet f p)" + by (fastforce simp: archThreadGet_def getObject_def split_def) + +lemma valid_untyped': + notes usableUntypedRange.simps[simp del] + assumes pspace_distinct': "pspace_distinct' s" and + pspace_aligned': "pspace_aligned' s" and + al: "is_aligned ptr bits" + shows "valid_untyped' d ptr bits idx s = + (\p ko. ksPSpace s p = Some ko \ + obj_range' p ko \ {ptr..ptr + 2 ^ bits - 1} \ {} \ + obj_range' p ko \ {ptr..ptr + 2 ^ bits - 1} \ + obj_range' p ko \ + usableUntypedRange (UntypedCap d ptr bits idx) = {})" + apply (simp add: valid_untyped'_def) + apply (simp add: ko_wp_at'_def) + apply (rule arg_cong[where f=All]) + apply (rule ext) + apply (rule arg_cong[where f=All]) + apply (rule ext) + apply (case_tac "ksPSpace s ptr' = Some ko", simp_all) + apply (frule pspace_alignedD'[OF _ pspace_aligned']) + apply (frule pspace_distinctD'[OF _ pspace_distinct']) + apply (simp add: ptr_range_mask_range) + apply (frule aligned_ranges_subset_or_disjoint[OF al]) + apply (simp only: ptr_range_mask_range) + apply (fold obj_range'_def) + apply (rule iffI) + apply auto[1] + apply (rule conjI) + apply (rule ccontr, simp) + apply (simp add: Set.psubset_eq) + apply (erule conjE) + apply (case_tac "obj_range' ptr' ko \ mask_range ptr bits \ {}", simp) + apply (cut_tac is_aligned_no_overflow[OF al]) + apply (clarsimp simp add: obj_range'_def mask_def add_diff_eq) + apply (clarsimp simp add: usableUntypedRange.simps Int_commute) + apply (case_tac "obj_range' ptr' ko \ mask_range ptr bits \ {}", simp+) + apply (cut_tac is_aligned_no_overflow[OF al]) + apply (clarsimp simp add: obj_range'_def mask_def add_diff_eq) + apply (frule is_aligned_no_overflow) + by (metis al intvl_range_conv' le_m1_iff_lt less_is_non_zero_p1 + nat_le_linear power_overflow sub_wrap add_0 + add_0_right word_add_increasing word_less_1 word_less_sub_1) + +lemma more_pageBits_inner_beauty: + fixes x :: "9 word" + fixes p :: machine_word + assumes x: "x \ ucast (p && mask pageBits >> 3)" + shows "(p && ~~ mask pageBits) + (ucast x * 8) \ p" + apply clarsimp + apply (simp add: word_shift_by_3) + apply (subst (asm) word_plus_and_or_coroll) + apply (word_eqI_solve dest: test_bit_size simp: pageBits_def) + apply (insert x) + apply (erule notE) + apply word_eqI + apply (erule_tac x="3+n" in allE) + apply (clarsimp simp: word_size pageBits_def) + done + +(* used in StoreWord_C *) +lemma mask_pageBits_inner_beauty: + "is_aligned p 3 \ + (p && ~~ mask pageBits) + (ucast ((ucast (p && mask pageBits >> 3)):: 9 word) * 8) = (p::machine_word)" + apply (simp add: is_aligned_nth word_shift_by_3) + apply (subst word_plus_and_or_coroll) + apply (rule word_eqI) + apply (clarsimp simp: word_size word_ops_nth_size nth_ucast nth_shiftr nth_shiftl) + apply (rule word_eqI) + apply (clarsimp simp: word_size word_ops_nth_size nth_ucast nth_shiftr nth_shiftl + pageBits_def) + apply (rule iffI) + apply (erule disjE) + apply clarsimp + apply clarsimp + apply simp + apply clarsimp + apply (rule context_conjI) + apply (rule leI) + apply clarsimp + apply simp + apply arith + done + +lemma prio_ucast_shiftr_wordRadix_helper: (* FIXME generalise *) + "(ucast (p::priority) >> wordRadix :: machine_word) < 4" + unfolding maxPriority_def numPriorities_def wordRadix_def + using unat_lt2p[where x=p] + apply (clarsimp simp add: word_less_nat_alt shiftr_div_2n' unat_ucast_upcast is_up word_le_nat_alt) + apply arith + done + +lemma prio_ucast_shiftr_wordRadix_helper': (* FIXME generalise *) + "(ucast (p::priority) >> wordRadix :: machine_word) \ 3" + unfolding maxPriority_def numPriorities_def wordRadix_def + using unat_lt2p[where x=p] + apply (clarsimp simp add: word_less_nat_alt shiftr_div_2n' unat_ucast_upcast is_up word_le_nat_alt) + apply arith + done + +lemma prio_unat_shiftr_wordRadix_helper': (* FIXME generalise *) + "unat ((p::priority) >> wordRadix) \ 3" + unfolding maxPriority_def numPriorities_def wordRadix_def + using unat_lt2p[where x=p] + apply (clarsimp simp add: word_less_nat_alt shiftr_div_2n' unat_ucast_upcast is_up word_le_nat_alt) + apply arith + done + +lemma prio_ucast_shiftr_wordRadix_helper2: (* FIXME possibly unused *) + "(ucast (p::priority) >> wordRadix :: machine_word) < 0x20" + by (rule order_less_trans[OF prio_ucast_shiftr_wordRadix_helper]; simp) + +lemma prio_ucast_shiftr_wordRadix_helper3: + "(ucast (p::priority) >> wordRadix :: machine_word) < 0x40" + by (rule order_less_trans[OF prio_ucast_shiftr_wordRadix_helper]; simp) + +lemma unat_ucast_prio_L1_cmask_simp: + "unat (ucast (p::priority) && 0x3F :: machine_word) = unat (p && 0x3F)" + using unat_ucast_prio_mask_simp[where m=6] + by (simp add: mask_def) + +lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb + +(* FIXME: Move to Schedule_R.thy. Make Arch_switchToThread_obj_at a specialisation of this *) +lemma Arch_switchToThread_obj_at_pre: + "\obj_at' (Not \ tcbQueued) t\ + Arch.switchToThread t + \\rv. obj_at' (Not \ tcbQueued) t\" + apply (simp add: AARCH64_H.switchToThread_def) + apply (wp asUser_obj_at_notQ doMachineOp_obj_at hoare_drop_imps|wpc)+ + done + +lemma loadWordUser_submonad_fn: + "loadWordUser p = submonad_fn ksMachineState (ksMachineState_update \ K) + (pointerInUserData p) (loadWord p)" + by (simp add: loadWordUser_def submonad_doMachineOp.fn_is_sm submonad_fn_def) + +lemma storeWordUser_submonad_fn: + "storeWordUser p v = submonad_fn ksMachineState (ksMachineState_update \ K) + (pointerInUserData p) (storeWord p v)" + by (simp add: storeWordUser_def submonad_doMachineOp.fn_is_sm submonad_fn_def) + +lemma threadGet_tcbFault_loadWordUser_comm: + "do x \ threadGet tcbFault t; y \ loadWordUser p; n x y od = + do y \ loadWordUser p; x \ threadGet tcbFault t; n x y od" + apply (rule submonad_comm [OF tcbFault_submonad_args _ + threadGet_tcbFault_submonad_fn + loadWordUser_submonad_fn]) + apply (simp add: submonad_args_def pointerInUserData_def) + apply (simp add: thread_replace_def Let_def) + apply simp + apply (clarsimp simp: thread_replace_def Let_def typ_at'_def ko_wp_at'_def + ps_clear_upd ps_clear_upd_None pointerInUserData_def + split: option.split kernel_object.split) + apply (simp add: get_def empty_fail_def) + apply simp + done + +lemma threadGet_tcbFault_storeWordUser_comm: + "do x \ threadGet tcbFault t; y \ storeWordUser p v; n x y od = + do y \ storeWordUser p v; x \ threadGet tcbFault t; n x y od" + apply (rule submonad_comm [OF tcbFault_submonad_args _ + threadGet_tcbFault_submonad_fn + storeWordUser_submonad_fn]) + apply (simp add: submonad_args_def pointerInUserData_def) + apply (simp add: thread_replace_def Let_def) + apply simp + apply (clarsimp simp: thread_replace_def Let_def typ_at'_def ko_wp_at'_def + ps_clear_upd ps_clear_upd_None pointerInUserData_def + split: option.split kernel_object.split) + apply (simp add: get_def empty_fail_def) + apply simp + done + +lemma asUser_getRegister_discarded: + "(asUser t (getRegister r)) >>= (\_. n) = + stateAssert (tcb_at' t) [] >>= (\_. n)" + apply (rule ext) + apply (clarsimp simp: submonad_asUser.fn_is_sm submonad_fn_def + submonad_asUser.args assert_def select_f_def + gets_def get_def modify_def put_def + getRegister_def bind_def split_def + return_def fail_def stateAssert_def) + done + +crunches switchToIdleThread + for ksCurDomain[wp]: "\s. P (ksCurDomain s)" + +crunches vcpuUpdate + for pspace_canonical'[wp]: pspace_canonical' + +lemma vcpuUpdate_valid_pspace'[wp]: + "(\vcpu. vcpuTCBPtr (f vcpu) = vcpuTCBPtr vcpu) \ + vcpuUpdate vr f \valid_pspace'\" + unfolding valid_pspace'_def valid_mdb'_def + by wpsimp + +lemma updateASIDPoolEntry_valid_pspace'[wp]: + "updateASIDPoolEntry p f \valid_pspace'\" + unfolding updateASIDPoolEntry_def valid_pspace'_def getPoolPtr_def + by (wpsimp wp: getASID_wp) + +lemma getMessageInfo_less_4: + "\\\ getMessageInfo t \\rv s. msgExtraCaps rv < 4\" + including no_pre + apply (simp add: getMessageInfo_def) + apply wp + apply (rule hoare_strengthen_post, rule hoare_vcg_prop) + apply (simp add: messageInfoFromWord_def Let_def + Types_H.msgExtraCapBits_def) + apply (rule word_leq_minus_one_le, simp) + apply simp + apply (rule word_and_le1) + done + +lemma getMessageInfo_msgLength': + "\\\ getMessageInfo t \\rv s. msgLength rv \ 0x78\" + including no_pre + apply (simp add: getMessageInfo_def) + apply wp + apply (rule hoare_strengthen_post, rule hoare_vcg_prop) + apply (simp add: messageInfoFromWord_def Let_def msgMaxLength_def not_less + Types_H.msgExtraCapBits_def split: if_split ) + done + +definition + "isPTCap' cap \ \p pt_t asid. cap = (ArchObjectCap (PageTableCap p pt_t asid))" + +lemma asid_shiftr_low_bits_less[simplified]: + "(asid :: machine_word) \ mask asid_bits \ asid >> asid_low_bits < 2^LENGTH(asid_high_len)" + apply (rule_tac y="2 ^ 7" in order_less_le_trans) + apply (rule shiftr_less_t2n) + apply (simp add: le_mask_iff_lt_2n[THEN iffD1] asid_bits_def asid_low_bits_def) + apply simp + done + +(* We don't have access to n_msgRegisters from C here, but the number of msg registers in C should + be equivalent to what we have in the abstract/design specs. We want a number for this definition + that automatically updates if the number of registers changes, and we sanity check it later + in msgRegisters_size_sanity *) +definition size_msgRegisters :: nat where + size_msgRegisters_pre_def: "size_msgRegisters \ size (AARCH64.msgRegisters)" + +schematic_goal size_msgRegisters_def: + "size_msgRegisters = numeral ?x" + unfolding size_msgRegisters_pre_def AARCH64.msgRegisters_def + by (simp add: upto_enum_red fromEnum_def enum_register del: Suc_eq_numeral) + (simp only: Suc_eq_plus1_left, simp del: One_nat_def) + +lemma length_msgRegisters[simplified size_msgRegisters_def]: + "length AARCH64_H.msgRegisters = size_msgRegisters" + by (simp add: size_msgRegisters_pre_def AARCH64_H.msgRegisters_def) + +lemma empty_fail_loadWordUser[intro!, simp]: + "empty_fail (loadWordUser x)" + by (fastforce simp: loadWordUser_def ef_dmo') + +lemma empty_fail_getMRs[iff]: + "empty_fail (getMRs t buf mi)" + by (auto simp add: getMRs_def split: option.split) + +lemma empty_fail_getReceiveSlots: + "empty_fail (getReceiveSlots r rbuf)" +proof - + note + empty_fail_resolveAddressBits[wp] + empty_fail_rethrowFailure[wp] + empty_fail_rethrowFailure[wp] + show ?thesis + unfolding getReceiveSlots_def loadCapTransfer_def lookupCap_def lookupCapAndSlot_def + by (wpsimp simp: emptyOnFailure_def unifyFailure_def lookupSlotForThread_def + capTransferFromWords_def getThreadCSpaceRoot_def locateSlot_conv bindE_assoc + lookupSlotForCNodeOp_def lookupErrorOnFailure_def rangeCheck_def) +qed + +lemma user_getreg_rv: + "\obj_at' (\tcb. P ((user_regs o atcbContextGet o tcbArch) tcb r)) t\ + asUser t (getRegister r) + \\rv s. P rv\" + apply (simp add: asUser_def split_def) + apply (wp threadGet_wp) + apply (clarsimp simp: obj_at'_def getRegister_def in_monad atcbContextGet_def) + done + +crunches insertNewCap, Arch_createNewCaps, threadSet, Arch.createObject, setThreadState, + updateFreeIndex, preemptionPoint + for gsCNodes[wp]: "\s. P (gsCNodes s)" + (wp: crunch_wps setObject_ksPSpace_only + simp: unless_def updateObject_default_def crunch_simps + ignore_del: preemptionPoint) + +(* FIXME AARCH64 vcpu-related items adapted from ARM_HYP's ArchMove_C, possibly not all are useful *) + +lemma vcpu_at_ko: + "vcpu_at' p s \ \vcpu. ko_at' (vcpu::vcpu) p s" + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, auto)[1] + done + +lemma vcpu_at_ko'_eq: + "(\vcpu :: vcpu. ko_at' vcpu p s) = vcpu_at' p s" + apply (rule iffI) + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + apply (case_tac ko, auto) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, auto)[1] + done + +lemmas vcpu_at_ko' = vcpu_at_ko'_eq[THEN iffD2] + +lemma sym_refs_tcb_vcpu': + "\ ko_at' (tcb::tcb) t s; atcbVCPUPtr (tcbArch tcb) = Some v; sym_refs (state_hyp_refs_of' s) \ \ + \vcpu. ko_at' vcpu v s \ vcpuTCBPtr vcpu = Some t" + apply (drule (1) hyp_sym_refs_obj_atD') + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def) + apply (case_tac ko; simp add: tcb_vcpu_refs'_def) + apply (rename_tac koa) + apply (case_tac koa; clarsimp simp: refs_of_ao_def vcpu_tcb_refs'_def) + done + + +lemma ko_at'_tcb_vcpu_not_NULL: + "\ ko_at' (tcb::tcb) t s ; valid_objs' s ; no_0_obj' s ; atcbVCPUPtr (tcbArch tcb) = Some p \ + \ 0 < p" + \ \when C pointer is NULL, need this to show atcbVCPUPtr is None\ + unfolding valid_pspace'_def + by (fastforce simp: valid_tcb'_def valid_arch_tcb'_def word_gt_0 typ_at'_no_0_objD + dest: valid_objs_valid_tcb') + +lemma ko_at_vcpu_at'D: + "ko_at' (vcpu :: vcpu) vcpuptr s \ vcpu_at' vcpuptr s" + by (fastforce simp: typ_at_to_obj_at_arches elim: obj_at'_weakenE) + +(* FIXME: change the original to be predicated! *) +crunch pred_tcb_at'2[wp]: doMachineOp "\s. P (pred_tcb_at' a b p s)" + (simp: crunch_simps) + +crunch valid_objs'[wp]: readVCPUReg "\s. valid_objs' s" + +crunch sch_act_wf'[wp]: readVCPUReg "\s. P (sch_act_wf (ksSchedulerAction s) s)" + +crunch ko_at'[wp]: readVCPUReg "\s. P (ko_at' a p s)" + +crunch obj_at'[wp]: readVCPUReg "\s. P (obj_at' a p s)" + +crunch ksCurThread[wp]: readVCPUReg "\s. P (ksCurThread s)" + +(* schematic_goal leads to Suc (Suc ..) form only *) +lemma fromEnum_maxBound_vcpureg_def: + "fromEnum (maxBound :: vcpureg) = 23" + by (clarsimp simp: fromEnum_def maxBound_def enum_vcpureg) + +lemma unat_of_nat_mword_fromEnum_vcpureg[simp]: + "unat ((of_nat (fromEnum e)) :: machine_word) = fromEnum (e :: vcpureg)" + apply (subst unat_of_nat_eq, clarsimp) + apply (rule order_le_less_trans[OF maxBound_is_bound]) + apply (clarsimp simp: fromEnum_maxBound_vcpureg_def)+ + done + +lemma unat_of_nat_mword_length_upto_vcpureg[simp]: + "unat ((of_nat (length [(start :: vcpureg) .e. end])) :: machine_word) = length [start .e. end]" + apply (subst unat_of_nat_eq ; clarsimp) + apply (rule order_le_less_trans[OF length_upto_enum_le_maxBound]) + apply (simp add: fromEnum_maxBound_vcpureg_def) + done + +lemma fromEnum_maxBound_vppievent_irq_def: + "fromEnum (maxBound :: vppievent_irq) = 0" + by (clarsimp simp: fromEnum_def maxBound_def enum_vppievent_irq) + +(* when creating a new object, the entire slot including starting address should be free *) +(* FIXME move *) +lemma ps_clear_entire_slotI: + "({p..p + 2 ^ n - 1}) \ dom (ksPSpace s) = {} \ ps_clear p n s" + by (fastforce simp: ps_clear_def mask_def field_simps) + +lemma ps_clear_ksPSpace_upd_same[simp]: + "ps_clear p n (s\ksPSpace := (ksPSpace s)(p \ v)\) = ps_clear p n s" + by (fastforce simp: ps_clear_def) + +lemma getObject_vcpu_prop: + "\obj_at' P t\ getObject t \\(vcpu :: vcpu) s. P vcpu\" + apply (rule obj_at_getObject) + apply (clarsimp simp: loadObject_default_def in_monad) + done + +(* FIXME would be interesting to generalise these kinds of lemmas to other KOs *) +lemma setObject_sets_object_vcpu: + "\ vcpu_at' v \ setObject v (vcpu::vcpu) \ \_. ko_at' vcpu v \" + supply fun_upd_apply[simp del] + apply (clarsimp simp: setObject_def updateObject_default_def bind_assoc) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift simp: alignError_def) + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: obj_at'_def objBitsKO_def archObjSize_def dest!: vcpu_at_ko') + apply (fastforce simp: fun_upd_apply) + done + +(* FIXME would be interesting to generalise these kinds of lemmas to other KOs *) +lemma placeNewObject_creates_object_vcpu: + "\ \ \ placeNewObject v (vcpu::vcpu) 0 \ \_. ko_at' vcpu v \" + supply fun_upd_apply[simp del] haskell_assert_inv[wp del] + apply (clarsimp simp: placeNewObject_def placeNewObject'_def split_def alignError_def) + apply (wpsimp wp: assert_wp hoare_vcg_imp_lift' hoare_vcg_ex_lift) + apply (clarsimp simp: is_aligned_mask[symmetric] objBitsKO_def archObjSize_def) + apply (case_tac "is_aligned v vcpuBits"; clarsimp) + apply (rule conjI; clarsimp) + apply (subst (asm) lookupAround2_None1) + apply (clarsimp simp: obj_at'_def objBitsKO_def archObjSize_def fun_upd_apply) + apply (fastforce intro: ps_clear_entire_slotI simp add: field_simps fun_upd_apply) + apply (subst (asm) lookupAround2_char1) + apply (clarsimp simp: obj_at'_def objBitsKO_def archObjSize_def fun_upd_apply) + apply (fastforce intro: ps_clear_entire_slotI simp add: field_simps) + done + +(* FIXME would be interesting to generalise these kinds of lemmas to other KOs *) +lemma placeNewObject_object_at_vcpu: + "\ \ \ placeNewObject v (vcpu::vcpu) 0 \ \_. vcpu_at' v \" + by (rule hoare_post_imp[OF _ placeNewObject_creates_object_vcpu]) + (fastforce simp: ko_at_vcpu_at'D) + +lemma case_option_both[simp]: (* FIXME AARCH64: move to Lib, remove duplicates *) + "(case f of None \ P | _ \ P) = P" + by (auto split: option.splits) + +lemma if_case_opt_same_branches: (* FIXME AARCH64: move to Lib, remove duplicates *) + "cond \ Option.is_none opt \ + (if cond then case opt of None \ f | Some x \ g x else f) = f" + by (cases cond; cases opt; clarsimp) + +(* FIXME AARCH64: move these up to Refine or AInvs *) +lemma haskell_assertE_wp[wp]: + "\\s. F \ Q () s\ haskell_assertE F L \Q\,\E\" + unfolding haskell_assertE_def + by (rule assertE_wp) + +(* FIXME AARCH64: this needs to exist before VSpace_R.haskell_assertE_inv, so that the crunch there + does not make it [wp] *) +lemma haskell_assertE_inv: + "haskell_assertE F L \P\" + unfolding haskell_assertE_def + by wpsimp + +lemma cte_wp_cteCap_valid: + "\ cte_wp_at' ((=) cap \ cteCap) slot s; valid_objs' s \ \ valid_cap' cap s" + by (clarsimp simp: cte_wp_at_ctes_of ctes_of_valid') + +lemma not_VSRootPT_T_eq: + "(pt_t \ VSRootPT_T) = (pt_t = NormalPT_T)" + by (cases pt_t; simp) + +lemma unat_of_nat_pt_bits_mw: + "unat (of_nat (pt_bits pt_t)::machine_word) = pt_bits pt_t" + by (rule unat_of_nat_eq) (simp add: bit_simps split: if_split) + +lemma unat_mask_pt_bits_shift_neq_0[simp]: + "0 < unat (mask (pt_bits pt_t) >> pte_bits :: machine_word)" + by (simp add: bit_simps mask_def split: if_split) + +lemma pptrBaseOffset_alignment_pt_bits[simp, intro!]: + "pt_bits pt_t \ pptrBaseOffset_alignment" + by (simp add: bit_simps pptrBaseOffset_alignment_def split: if_split) + +lemma canonical_address_mask_shift: + "\ canonical_address p; is_aligned p m'; m \ m'; n + m = Suc canonical_bit; 0 < n \ \ + p && (mask n << m) = p" + apply (prop_tac "m = Suc canonical_bit - n", arith) + apply (simp add: canonical_address_def canonical_address_of_def canonical_bit_def) + apply word_eqI + apply (rule iffI; clarsimp) + apply (rename_tac n') + apply (prop_tac "n' < 48", fastforce) + apply fastforce + done + +schematic_goal pptrUserTop_val: + "pptrUserTop = numeral ?n" + by (simp add: pptrUserTop_def mask_def Kernel_Config.config_ARM_PA_SIZE_BITS_40_def + del: word_eq_numeral_iff_iszero) + +lemma user_region_canonical: + "p \ user_region \ canonical_address p" + apply (simp add: canonical_address_range user_region_def canonical_user_def) + apply (erule order_trans) + apply (rule mask_mono) + apply (simp add: ipa_size_def canonical_bit_def split: if_split) + done + +lemma pptrUserTop_eq_mask_ipa_size: + "pptrUserTop = mask ipa_size" + by (simp add: pptrUserTop_def ipa_size_def) + +lemma mask_pptrUserTop_user_region: + "\ is_aligned v n; v + mask n \ pptrUserTop \ \ v \ user_region" + apply (simp add: user_region_def canonical_user_def pptrUserTop_eq_mask_ipa_size + word_and_or_mask_aligned) + apply (simp flip: and_mask_eq_iff_le_mask) + apply word_eqI_solve + done + +lemma canonical_address_pptrUserTop_mask: + "\ p + 2^n - 1 \ pptrUserTop; is_aligned p n \ \ canonical_address p" + apply (rule user_region_canonical) + apply (erule mask_pptrUserTop_user_region) + apply (simp add: mask_def field_simps) + done + +lemma isVTableRoot_ex: + "isVTableRoot cap = (\p m. cap = ArchObjectCap (PageTableCap p VSRootPT_T m))" + by (simp add: isVTableRoot_def split: capability.splits arch_capability.splits pt_type.splits) + +lemma isVTableRoot_cap_eq: + "isVTableRoot cap = + (isArchObjectCap cap \ isPageTableCap (capCap cap) \ capPTType (capCap cap) = VSRootPT_T)" + by (auto simp: isCap_simps isVTableRoot_ex) + +(* FIXME AARCH64: try to make the 48 less magic *) +lemma canonical_address_and_maskD: + "canonical_address p \ p && mask 48 = p" + apply (simp add: word_and_mask_shiftl pageBits_def canonical_address_range canonical_bit_def) + apply word_eqI + apply fastforce + done + +(* FIXME AARCH64: try to make the 48 less magic *) +lemma canonical_address_and_maskI: + "p && mask 48 = p \ canonical_address p" + by (simp add: word_and_mask_shiftl pageBits_def canonical_address_range canonical_bit_def + and_mask_eq_iff_le_mask) + + +lemma addrFromPPtr_canonical_in_kernel_window: + "\ pptrBase \ p; p < pptrTop \ \ canonical_address (addrFromPPtr p)" + apply (simp add: addrFromPPtr_def pptrBaseOffset_def paddrBase_def canonical_address_mask_eq + canonical_bit_def pptrBase_def pageBits_def pptrTop_def) + by word_bitwise clarsimp + +lemma levelType_0[simp]: + "levelType 0 = NormalPT_T" + by (simp add: levelType_def maxPTLevel_def split: if_splits) + +lemma levelType_maxPTLevel[simp]: + "levelType maxPTLevel = VSRootPT_T" + by (simp add: levelType_def) + +(* FIXME AARCH64: move; could be simp *) +lemma pt_bits_minus_pte_bits: + "pt_bits pt_t - pte_bits = ptTranslationBits pt_t" + by (simp add: bit_simps) + +(* FIXME AARCH64: move; could be simp *) +lemma ptTranslationBits_plus_pte_bits: + "ptTranslationBits pt_t + pte_bits = pt_bits pt_t" + by (simp add: bit_simps) + +lemma page_table_pte_at': + "page_table_at' pt_t p s \ pte_at' p s" + apply (clarsimp simp: page_table_at'_def) + apply (erule_tac x=0 in allE) + apply simp + done + +lemma pte_at_ko': + "pte_at' p s \ \pte. ko_at' (pte::pte) p s" + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, auto)[1] + done + +lemma getObject_asidpool_inv[wp]: + "\P\ getObject l \\rv :: asidpool. P\" + apply (rule getObject_inv) + apply simp + apply (rule loadObject_default_inv) + done + +lemma asid_pool_at_ko'_eq: + "(\ap :: asidpool. ko_at' ap p s) = asid_pool_at' p s" + apply (rule iffI) + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + apply (case_tac ko, auto) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, auto)[1] + done + +lemma asid_pool_at_ko': + "asid_pool_at' p s \ \pool. ko_at' (ASIDPool pool) p s" + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + apply (case_tac ko, auto) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, auto)[1] + apply (rename_tac asidpool) + apply (case_tac asidpool, auto)[1] + done + +(* FIXME AARCH64: move; also add vmid_bits_val to relevant bit defs *) +value_type vmid_bits = "size (0::vmid)" + +(* end of move to Refine/AInvs *) + +end + +end diff --git a/proof/crefine/AARCH64/Arch_C.thy b/proof/crefine/AARCH64/Arch_C.thy new file mode 100644 index 0000000000..7a49041d5c --- /dev/null +++ b/proof/crefine/AARCH64/Arch_C.thy @@ -0,0 +1,4415 @@ +(* + * Copyright 2024, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Arch_C +imports Recycle_C +begin + +unbundle l4v_word_context + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunches unmapPageTable + for gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + (wp: crunch_wps simp: crunch_simps) + +end + +context kernel_m begin + +lemma storePTE_def': + "storePTE slot pte = setObject slot pte" + unfolding storePTE_def + by (simp add: tailM_def headM_def) + +lemma objBits_InvalidPTE: + "objBits AARCH64_H.InvalidPTE = word_size_bits" + by (simp add: objBits_simps bit_simps) + +lemma objBits_InvalidPTE_pte_bits: + "objBits AARCH64_H.InvalidPTE = pte_bits" + by (simp add: objBits_InvalidPTE bit_simps) + +lemma clearMemory_PT_setObject_PTE_ccorres: + "ccorres dc xfdc + (page_table_at' pt_t ptr and (\s. 2 ^ ptBits pt_t \ gsMaxObjectSize s) and + (\_. is_aligned ptr (ptBits pt_t) \ ptr \ 0 \ pstart = addrFromPPtr ptr)) + (\\ptr___ptr_to_unsigned_long = Ptr ptr\ \ \\bits = of_nat (ptBits pt_t)\) [] + (do x \ mapM_x (\p. setObject p InvalidPTE) + [ptr , ptr + 2 ^ objBits InvalidPTE .e. ptr + 2 ^ ptBits pt_t - 1]; + doMachineOp (cleanCacheRange_PoU ptr (ptr + 2 ^ ptBits pt_t - 1) pstart) + od) + (Call clearMemory_PT_'proc)" + apply (rule ccorres_gen_asm)+ + apply (cinit' lift: ptr___ptr_to_unsigned_long_' bits_') + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac P="page_table_at' pt_t ptr and (\s. 2 ^ pt_bits pt_t \ gsMaxObjectSize s)" + in ccorres_from_vcg_nofail[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: unat_of_nat_pt_bits_mw) + apply (subst ghost_assertion_size_logic[unfolded o_def]) + apply simp + apply assumption + apply (simp add: is_aligned_no_overflow') + apply (intro conjI) + apply (erule is_aligned_weaken, simp add: bit_simps) + apply (clarsimp simp: is_aligned_def bit_simps split: if_splits) + apply (erule (1) page_table_at_rf_sr_dom_s[simplified]) + apply (clarsimp simp: replicateHider_def[symmetric] + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (simp add: objBits_simps upto_enum_step_def aligned_no_overflow_less + not_less[symmetric] upto_enum_word + split: if_split_asm cong: if_cong) + apply (split if_split) + apply (rule conjI; clarsimp) + apply (fold mask_2pm1 shiftr_div_2n_w) + apply (erule mapM_x_store_memset_ccorres_assist[OF _ _ _ _ _ _ subset_refl]; + simp add: shiftl_t2n hd_map objBits_simps) + apply (clarsimp simp: less_Suc_eq_le nth_append split: if_split) + apply (simp add: bit_simps mask_def split: if_split) + apply (rule cmap_relationE1, erule rf_sr_cpte_relation, erule ko_at_projectKO_opt) + apply (simp add: pte_bits_def word_size_bits_def) + apply (subst coerce_memset_to_heap_update_pte) + apply (clarsimp simp: rf_sr_def Let_def cstate_relation_def typ_heap_simps) + apply (rule conjI) + apply (simp add: cpspace_relation_def typ_heap_simps update_pte_map_tos + update_pte_map_to_ptes carray_map_relation_upd_triv) + apply (rule cmap_relation_updI, simp_all)[1] + apply (simp add: cpte_relation_def Let_def pte_lift_def + pte_get_tag_def pte_tag_defs) + apply (simp add: carch_state_relation_def cmachine_state_relation_def + typ_heap_simps update_pte_map_tos) + apply csymbr + apply (rule ccorres_Guard) + apply (ctac add: cleanCacheRange_PoU_ccorres) + apply (wpsimp wp: mapM_x_wp' setObject_ksPSpace_only updateObject_default_inv) + apply (clarsimp simp: guard_is_UNIV_def bit_simps split: if_split) + apply clarsimp + apply (frule is_aligned_addrFromPPtr_n, simp) + apply (simp add: is_aligned_no_overflow' addrFromPPtr_mask_cacheLineSize) + apply (rule conjI) + apply (simp add: unat_mask_eq flip: mask_2pm1) + apply (simp add: mask_eq_exp_minus_1) + apply (simp add: bit_simps split: if_split) + done + +lemma performPageTableInvocationUnmap_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and cte_wp_at' ((=) (ArchObjectCap cap) \ cteCap) ctSlot + and (\_. isPageTableCap cap \ capPTType cap = NormalPT_T)) + (\ccap_relation (ArchObjectCap cap) \cap\ \ \\ctSlot = Ptr ctSlot\) + [] + (liftE (performPageTableInvocation (PageTableUnmap cap ctSlot))) + (Call performPageTableInvocationUnmap_'proc)" + apply (simp only: liftE_liftM ccorres_liftM_simp) + apply (rule ccorres_gen_asm) + apply (cinit lift: cap_' ctSlot_') + apply (rename_tac cap') + apply csymbr + apply (simp del: Collect_const) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (subgoal_tac "capPTMappedAddress cap + = (\cp. if to_bool (capPTIsMapped_CL cp) + then Some (capPTMappedASID_CL cp, capPTMappedAddress_CL cp) + else None) (cap_page_table_cap_lift cap')") + apply (rule ccorres_Cond_rhs) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (ctac add: unmapPageTable_ccorres) + apply (simp add: storePTE_def' swp_def) + apply clarsimp + apply (simp only: bit_simps_corres[symmetric]) + apply csymbr + apply (ctac add: clearMemory_PT_setObject_PTE_ccorres[simplified objBits_InvalidPTE_pte_bits]) + apply wp + apply (simp del: Collect_const) + apply (vcg exspec=unmapPageTable_modifies) + apply simp + apply (rule ccorres_return_Skip') + apply (simp add: cap_get_tag_isCap_ArchObject[symmetric] split: if_split) + apply (clarsimp simp: cap_lift_page_table_cap cap_to_H_def cap_page_table_cap_lift_def + elim!: ccap_relationE cong: if_cong) + apply (simp add: liftM_def getSlotCap_def updateCap_def del: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_getCTE)+ + apply (rename_tac cte cte') + apply (rule_tac P="cte_wp_at' ((=) cte) ctSlot + and (\_. cte = cte' \ + isArchCap (\acap. isPageTableCap acap \ + capPTType acap = NormalPT_T) (cteCap cte))" + in ccorres_from_vcg_throws [where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of cap_get_tag_isCap_ArchObject) + apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (frule ccte_relation_ccap_relation) + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap_ArchObject) + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption) + apply (erule rev_bexI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + typ_heap_simps') + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps') + apply (subst setCTE_tcb_case, assumption+) + apply (clarsimp dest!: ksPSpace_update_eq_ExD) + apply (erule cmap_relation_updI, assumption) + apply (clarsimp simp: isCap_simps) + apply (drule cap_get_tag_isCap_unfolded_H_cap) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: ccte_relation_def c_valid_cte_def + elim!: ccap_relationE) + apply (subst cteCap_update_cte_to_H) + apply (clarsimp simp: map_option_Some_eq2) + apply (rule trans, rule sym, rule option.sel, rule sym, erule arg_cong) + apply (erule iffD1[OF cap_page_table_cap_lift]) + apply (clarsimp simp: map_option_Some_eq2 + cap_lift_page_table_cap cap_to_H_def + cap_page_table_cap_lift_def) + apply simp + apply (clarsimp simp: carch_state_relation_def cmachine_state_relation_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"] + dest!: ksPSpace_update_eq_ExD) + apply (simp add: cte_wp_at_ctes_of) + apply (wp mapM_x_wp' | wpc | simp)+ + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: cap_get_tag_isCap_ArchObject[symmetric] cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarsimp) + apply (drule_tac t="cteCap cte" in sym) + apply (frule valid_global_refsD_with_objSize, clarsimp) + apply (clarsimp simp: cap_lift_page_table_cap cap_to_H_def + cap_page_table_cap_lift_def isCap_simps + valid_cap'_def get_capSizeBits_CL_def + bit_simps capAligned_def + to_bool_def mask_def page_table_at'_def + capRange_def Int_commute asid_bits_def + wellformed_mapdata'_def + simp flip: canonical_bit_def + elim!: ccap_relationE cong: if_cong) + apply (drule spec[where x=0]) + apply auto + done + +lemma ap_eq_D: + "x \array_C := arr'\ = asid_pool_C.asid_pool_C arr \ arr' = arr" + by (cases x) simp + +declare Kernel_C.asid_pool_C_size [simp del] + +lemma createObjects_asidpool_ccorres: + shows "ccorres dc xfdc + ((\s. \p. cte_wp_at' (\cte. cteCap cte = UntypedCap isdev frame pageBits idx ) p s) + and pspace_aligned' and pspace_distinct' and valid_objs' + and ret_zero frame (2 ^ pageBits) + and valid_global_refs' and pspace_no_overlap' frame pageBits) + ({s. region_actually_is_bytes frame (2^pageBits) s}) + hs + (placeNewObject frame (makeObject::asidpool) 0) + (CALL memzero(Ptr frame, (2 ^ pageBits));; + (global_htd_update (\_. ptr_retyp (ap_Ptr frame))))" +proof - + have helper: "\\ x. (\, x) \ rf_sr \ is_aligned frame pageBits \ frame \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ pspace_no_overlap' frame pageBits \ + \ ret_zero frame (2 ^ pageBits) \ + \ region_actually_is_bytes frame (2 ^ pageBits) x + \ {frame ..+ 2 ^ pageBits} \ kernel_data_refs = {} + \ + (\\ksPSpace := foldr (\addr. data_map_insert addr (KOArch (KOASIDPool makeObject))) (new_cap_addrs (Suc 0) frame (KOArch (KOASIDPool makeObject))) (ksPSpace \)\, + x\globals := globals x + \t_hrs_' := hrs_htd_update (ptr_retyps_gen 1 (ap_Ptr frame) False) + (hrs_mem_update + (heap_update_list frame (replicate (2 ^ pageBits) 0)) + (t_hrs_' (globals x)))\\) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") + proof (intro impI allI) + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "ap_Ptr frame" + + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" and al: "is_aligned frame pageBits" and ptr0: "frame \ 0" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' frame pageBits \" + and zro: "ret_zero frame (2 ^ pageBits) \" + and actually: "region_actually_is_bytes frame (2 ^ pageBits) x" + and kdr: "{frame ..+ 2 ^ pageBits} \ kernel_data_refs = {}" + by simp_all + + note empty = region_actually_is_bytes[OF actually] + + have relrl: + "casid_pool_relation makeObject (from_bytes (replicate (size_of TYPE(asid_pool_C)) 0))" + unfolding casid_pool_relation_def casid_map_relation_def + apply (clarsimp simp: makeObject_asidpool split: asid_pool_C.splits) + apply (clarsimp simp: array_relation_def option_to_ptr_def) + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps asid_pool_C_tag_def asid_map_C_tag_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine) + apply (simp add: padup_def align_td_array') + apply (subst (asm) size_td_array) + apply (simp add: dom_def ran_def) + apply (simp add: size_td_array ti_typ_pad_combine_def ti_typ_combine_def + Let_def empty_typ_info_def update_ti_adjust_ti + del: replicate_numeral Kernel_C.pte_C_size) + apply (simp add: typ_info_array array_tag_def + del: replicate_numeral) + supply replicate_numeral[simp del] + apply (clarsimp dest!: ap_eq_D + simp: update_ti_t_array_tag_n_rep asid_low_bits_def word_le_nat_alt) + + apply (simp add: typ_info_simps asid_pool_C_tag_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + + apply (subst index_fold_update; auto simp: replicate_numeral update_ti_t_ptr_0s mask_def) + (* casid_map relation *) + apply (clarsimp simp: asid_map_lift_def asid_map_get_tag_def asid_map_C_tag_def) + apply (simp add: final_pad_def padup_def align_td_array') + apply (simp add: size_td_array ti_typ_pad_combine_def ti_typ_combine_def + empty_typ_info_def update_ti_adjust_ti) + apply (simp add: typ_info_array array_tag_def) + apply (subst update_ti_t_array_tag_n_rep[where v=0]) + apply (simp add: replicate_numeral) + apply simp + apply (clarsimp simp: update_ti_t_machine_word_0s replicate_numeral asid_map_tag_defs) + done + + define ko where "ko \ KOArch (KOASIDPool makeObject)" + + have rc :"range_cover frame (objBitsKO ko) (objBitsKO ko) (Suc 0)" + by (simp add:objBits_simps ko_def archObjSize_def al range_cover_full) + + have rc' :"range_cover frame (objBitsKO ko) (objBitsKO ko) (2 ^ 0)" + by (simp add:objBits_simps ko_def archObjSize_def al range_cover_full range_cover_rel) + + have pno': "pspace_no_overlap' frame (objBitsKO ko) \" + by (simp add:objBits_simps pno ko_def archObjSize_def al) + + have al': "is_aligned frame (objBitsKO (ko::kernel_object))" + by (simp add:objBits_simps ko_def archObjSize_def al) + + (* s/obj/obj'/ *) + have szo: "size_of TYPE(asid_pool_C) = 2 ^ objBitsKO ko" + by (simp add: size_of_def objBits_simps ko_def archObjSize_def pageBits_def) + have szko: "objBitsKO ko = pageBits" + by (simp add: objBits_simps ko_def archObjSize_def) + hence sz: "objBitsKO ko \ pageBits" by simp + have szo': "2 ^ pageBits = 2 ^ (pageBits - objBitsKO ko) * size_of TYPE(asid_pool_C)" using szko + apply (subst szo) + apply (simp add: power_add [symmetric]) + done + + have [simp]: "(2::nat) ^ (pageBits - objBitsKO ko) * 2 ^ objBitsKO ko = 2 ^ pageBits" + by (clarsimp simp:pageBits_def objBits_simps ko_def archObjSize_def) + + have ptr_retyp: + "hrs_htd_update (ptr_retyps (2 ^ (pageBits - objBitsKO ko)) (ap_Ptr frame)) = hrs_htd_update (ptr_retyp (ap_Ptr frame))" + apply (simp add: szko hrs_htd_update_def) + done + + note rl' = cslift_ptr_retyp_memset_other_inst [OF _ rc' _ szo, + simplified, OF empty[folded szko] szo[symmetric], unfolded szko] + + have szb: "pageBits < word_bits" by simp + have mko: "\dev. makeObjectKO dev (Inl (KOArch (KOASIDPool f))) = Some ko" + by (simp add: ko_def makeObjectKO_def) + + + note rl = projectKO_opt_retyp_other [OF rc pal pno' ko_def] + + note cterl = retype_ctes_helper + [OF pal pdst pno' al' le_refl + range_cover_sz'[where 'a=machine_word_len, + folded word_bits_def, OF rc] + mko rc, simplified] + + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + uinfo_array_tag_n_m_not_le_typ_name + + have guard: + "\n<2 ^ (pageBits - objBitsKO ko). c_guard (CTypesDefs.ptr_add ?ptr (of_nat n))" + apply (rule retype_guard_helper[where m=3]) + apply (rule range_cover_rel[OF rc]) + apply fastforce + apply simp + apply (clarsimp simp:objBits_simps ko_def archObjSize_def) + apply (simp add:ptr0) + apply (simp add:szo) + apply (simp add:align_of_def objBits_simps pageBits_def ko_def archObjSize_def)+ + done + + have cslift_ptr_retyp_helper: + "\x::asid_pool_C ptr\dom (cslift x). is_aligned (ptr_val x) (objBitsKO ko) + \ clift (hrs_htd_update (ptr_retyps_gen 1 (ap_Ptr frame) False) + (hrs_mem_update (heap_update_list frame (replicate ((2::nat) ^ pageBits) (0::word8))) + (t_hrs_' (globals x)))) = + (\y::asid_pool_C ptr. + if y \ (CTypesDefs.ptr_add (ap_Ptr frame) \ of_nat) ` {k::nat. k < (2::nat) ^ (pageBits - objBitsKO ko)} + then Some (from_bytes (replicate (size_of TYPE(asid_pool_C)) (0::word8))) else cslift x y)" + using guard + apply (subst clift_ptr_retyps_gen_memset_same, simp_all add: szo szko) + apply (simp add: szo empty szko) + done + + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + apply - + supply image_cong_simp [cong del] + apply (clarsimp simp: rl' cterl[unfolded ko_def] tag_disj_via_td_name + foldr_upd_app_if [folded data_map_insert_def] cte_C_size tcb_C_size) + apply (subst cslift_ptr_retyp_helper[simplified]) + apply (erule pspace_aligned_to_C [OF pal]) + apply (simp add: projectKOs ko_def) + apply (simp add: ko_def projectKOs objBits_simps archObjSize_def) + apply (simp add: ptr_add_to_new_cap_addrs [OF szo] ht_rl) + apply (simp add: rl[unfolded ko_def] projectKO_opt_retyp_same ko_def projectKOs cong: if_cong) + apply (simp add:objBits_simps archObjSize_def) + apply (erule cmap_relation_retype) + apply (rule relrl) + done + + thus ?thesis using rf empty kdr zro + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name + ko_def[symmetric]) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (simp add: rl' cterl tag_disj_via_td_name h_t_valid_clift_Some_iff tcb_C_size) + apply (clarsimp simp: hrs_htd_update ptr_retyps_htd_safe_neg szo szko + kernel_data_refs_domain_eq_rotate + cvariable_array_ptr_retyps[OF szo] + foldr_upd_app_if [folded data_map_insert_def] + zero_ranges_ptr_retyps + rl empty projectKOs) + done + qed + + have [simp]: + "of_nat pageBits < (4::word32) = False" by (simp add: pageBits_def) + + show ?thesis + apply (rule ccorres_from_vcg_nofail2, rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: cte_wp_at_ctes_of split: if_split_asm) + apply (frule(1) ctes_of_valid', clarsimp) + apply (subst ghost_assertion_size_logic[unfolded o_def, rotated], assumption) + apply (drule(1) valid_global_refsD_with_objSize) + apply (simp add: pageBits_def) + apply (erule valid_untyped_capE) + apply (subst simpler_placeNewObject_def) + apply ((simp add: word_bits_conv objBits_simps archObjSize_def + capAligned_def)+)[4] + apply (simp add: simpler_modify_def rf_sr_htd_safe) + apply (subgoal_tac "{frame ..+ 2 ^ pageBits} \ kernel_data_refs = {}") + prefer 2 + apply (drule(1) valid_global_refsD') + apply (clarsimp simp: Int_commute pageBits_def + intvl_range_conv[where bits=pageBits, unfolded pageBits_def word_bits_def, + simplified]) + apply (intro conjI impI) + apply (erule is_aligned_no_wrap') + apply (clarsimp simp: pageBits_def) + apply (erule is_aligned_weaken, simp add:pageBits_def) + apply (simp add: is_aligned_def bit_simps) + apply (simp add: region_actually_is_bytes_dom_s pageBits_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate + size_of_def pageBits_def + ptr_retyp_htd_safe_neg) + apply clarsimp + apply (cut_tac helper[rule_format]) + prefer 2 + apply fastforce + apply (subst data_map_insert_def[symmetric]) + apply (erule iffD1[OF rf_sr_upd, rotated -1]) + apply simp_all + apply (simp add: hrs_htd_update_def hrs_mem_update_def split_def) + apply (simp add: pageBits_def ptr_retyps_gen_def + del: replicate_numeral) + done +qed + +lemma cmap_relation_ccap_relation: + "\cmap_relation (ctes_of s) (cslift s') cte_Ptr ccte_relation;ctes_of s p = Some cte; cteCap cte = cap\ + \ ccap_relation cap + (h_val (hrs_mem (t_hrs_' (globals s'))) (cap_Ptr &(cte_Ptr p\[''cap_C''])))" + apply (erule(1) cmap_relationE1) + apply (clarsimp simp add: typ_heap_simps' ccte_relation_ccap_relation) + done + +lemma ccorres_move_Guard_Seq_strong: + "\\s s'. (s, s') \ sr \ P s \ P' s' \ G' s'; + ccorres_underlying sr \ r xf arrel axf A C' hs a (c;;d) \ + \ ccorres_underlying sr \ r xf arrel axf (A and P) {s. P' s \ (G' s \ s \ C')} hs a + (Guard F (Collect G') c;; + d)" + apply (rule ccorres_guard_imp2, erule ccorres_move_Guard_Seq) + apply assumption + apply auto + done + +lemma ghost_assertion_data_get_gs_clear_region: + "gs_get_assn proc (gs_clear_region addr n gs) = gs_get_assn proc gs" + by (clarsimp simp: ghost_assertion_data_get_def gs_clear_region_def) + +lemma ghost_assertion_size_logic_flex: + "unat (sz :: machine_word) \ gsMaxObjectSize s + \ (s, \') \ rf_sr + \ gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' (globals \')) + = gs_get_assn cap_get_capSizeBits_'proc gs + \ gs_get_assn cap_get_capSizeBits_'proc gs = 0 \ + sz \ gs_get_assn cap_get_capSizeBits_'proc gs" + by (metis ghost_assertion_size_logic) + +(* FIXME move *) +lemma ucast_x3_shiftr_asid_low_bits: + "\ is_aligned base asid_low_bits ; base \ mask asid_bits \ + \ UCAST(7 \ 64) (UCAST(16 \ 7) (UCAST(64 \ 16) base >> asid_low_bits)) = base >> asid_low_bits" + apply (simp add: ucast_shiftr word_le_mask_eq asid_bits_def) + apply (subst le_max_word_ucast_id) + apply simp + apply (drule_tac n=asid_low_bits in le_shiftr) + apply (simp add: asid_low_bits_def asid_bits_def mask_def )+ + done + +lemma performASIDControlInvocation_ccorres: +notes replicate_numeral[simp del] +shows + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' + and ct_active' + and sch_act_simple + and cte_wp_at' (\cte. cteCap cte = capability.UntypedCap isdev frame pageBits idx) parent + and (\s. descendants_of' parent (ctes_of s) = {}) + and ex_cte_cap_to' parent + and (\_. base \ mask asid_bits \ is_aligned base asid_low_bits)) + (UNIV \ {s. frame_' s = Ptr frame} + \ {s. slot_' s = cte_Ptr slot} + \ {s. parent_' s = cte_Ptr parent} + \ {s. asid_base_' s = base}) [] + (liftE (performASIDControlInvocation (MakePool frame slot parent base))) + (Call performASIDControlInvocation_'proc)" + apply (rule ccorres_gen_asm) + apply (simp only: liftE_liftM ccorres_liftM_simp) + apply (cinit lift: frame_' slot_' parent_' asid_base_') + apply (rule_tac P="is_aligned frame pageBits \ canonical_address frame" in ccorres_gen_asm) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow[where c="Seq c c'" for c c']) + apply (fold pageBits_def)[1] + apply (simp add: hrs_htd_update) + apply (rule deleteObjects_ccorres) + apply ceqv + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_abstract_cleanup) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P = "rv = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (simp add: hrs_htd_update del:fun_upd_apply) + apply (rule ccorres_split_nothrow) + + apply (rule_tac cap'="UntypedCap isdev frame pageBits idx" in updateFreeIndex_ccorres) + apply (rule allI, rule conseqPre, vcg) + apply (rule subsetI, clarsimp simp: typ_heap_simps' pageBits_def isCap_simps) + apply (frule ccte_relation_ccap_relation, clarsimp) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: isCap_simps cap_lift_untyped_cap + cap_to_H_simps cap_untyped_cap_lift_def + ccap_relation_def modify_map_def + fun_eq_iff + dest!: word_unat.Rep_inverse' split: if_split) + apply (rule exI, strengthen refl) + apply (case_tac cte', simp add: cap_lift_untyped_cap max_free_index_def mask_def) + apply (simp add: mex_def meq_def del: split_paired_Ex) + apply blast + apply ceqv + apply (ctac (c_lines 2) add: createObjects_asidpool_ccorres + [where idx="max_free_index pageBits", + unfolded pageBits_def, simplified] + pre del: ccorres_Guard_Seq) + apply csymbr + apply (ctac (no_vcg) add: cteInsert_ccorres) + apply (simp add: ccorres_seq_skip del: fun_upd_apply) + apply (rule ccorres_assert) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: gets_def modify_def return_def put_def get_def bind_def + simp del: fun_upd_apply Collect_const) + apply (prop_tac "base >> asid_low_bits < 0x80") + apply (drule_tac n=asid_low_bits in le_shiftr) + apply (fastforce simp: asid_low_bits_def asid_bits_def mask_def dest: plus_one_helper2) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cmachine_state_relation_def + simp del: fun_upd_apply) + apply (clarsimp simp: carch_state_relation_def carch_globals_def + simp del: fun_upd_apply) + apply (simp add: asid_high_bits_of_def fun_upd_def[symmetric] + del: fun_upd_apply) + apply (simp add: ucast_x3_shiftr_asid_low_bits) + apply (erule array_relation_update, rule refl) + apply (clarsimp simp: option_to_ptr_def option_to_0_def) + apply (clarsimp simp: asid_high_bits_def mask_def) + apply wp+ + apply (strengthen valid_pspace_mdb' vp_strgs' valid_pspace_valid_objs' valid_pspace_canonical') + apply (clarsimp simp: is_simple_cap'_def isCap_simps conj_comms placeNewObject_def2) + apply (wp createObjects_valid_pspace'[where ty="Inl (KOArch (KOASIDPool f))" and sz = pageBits for f] + createObjects_cte_wp_at'[where sz = pageBits] + | simp add:makeObjectKO_def objBits_simps archObjSize_def range_cover_full + | simp add: bit_simps untypedBits_defs)+ + apply (clarsimp simp:valid_cap'_def capAligned_def) + apply (wp createObject_typ_at') + apply clarsimp + apply vcg + apply (clarsimp simp:conj_comms objBits_simps simp flip: pageBits_def | + strengthen valid_pspace_mdb' vp_strgs' invs_valid_pspace' + valid_pspace_valid_objs' invs_valid_global' + invs_urz)+ + apply (wp updateFreeIndex_forward_invs' + updateFreeIndex_caps_no_overlap''[where sz=pageBits] + updateFreeIndex_pspace_no_overlap'[where sz=pageBits] + updateFreeIndex_caps_overlap_reserved + updateFreeIndex_cte_wp_at) + apply (strengthen exI[where x=parent]) + apply (wp updateFreeIndex_cte_wp_at) + apply clarsimp + apply vcg + apply wp + apply clarsimp + apply (wp getSlotCap_wp) + apply clarsimp + apply (rule_tac Q="\_. cte_wp_at' ((=) (UntypedCap isdev frame pageBits idx) o cteCap) parent + and (\s. descendants_range_in' {frame..frame + (2::machine_word) ^ pageBits - (1::machine_word)} parent (ctes_of s)) + and pspace_no_overlap' frame pageBits + and invs' + and ct_active'" + in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of invs_valid_objs' range_cover_full word_bits_conv + pageBits_def max_free_index_def asid_low_bits_def) + apply (case_tac cte,clarsimp simp:invs_valid_pspace') + apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) + apply (clarsimp simp:valid_cap'_def asid_low_bits_def invs_urz) + apply (strengthen descendants_range_in_subseteq'[mk_strg I E] refl) + apply (simp add: untypedBits_defs word_size_bits_def asid_wf_def) + apply (intro context_conjI) + apply (simp add: is_aligned_def) + apply (simp add: mask_def) + apply (rule descendants_range_caps_no_overlapI'[where d=isdev and cref = parent]) + apply simp + apply (fastforce simp: cte_wp_at_ctes_of) + apply (clarsimp simp flip: add_mask_fold) + apply (clarsimp dest!: upto_intvl_eq simp: mask_2pm1) + apply (wp deleteObjects_cte_wp_at'[where d=isdev and idx = idx and p = parent] + deleteObjects_descendants[where d=isdev and p = parent and idx = idx] + deleteObjects_invs'[where d=isdev and p = parent and idx = idx] + Detype_R.deleteObjects_descendants[where p = parent and idx = idx] + deleteObjects_ct_active'[where d=isdev and cref = parent and idx = idx]) + apply clarsimp + apply vcg + apply (clarsimp simp: conj_comms invs_valid_pspace') + apply (frule cte_wp_at_valid_objs_valid_cap', fastforce) + apply (clarsimp simp: valid_cap'_def capAligned_def cte_wp_at_ctes_of untypedBits_defs + descendants_range'_def2 empty_descendants_range_in') + apply (intro conjI; (rule refl)?) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range[where ptr = frame]) + apply (fastforce simp: cte_wp_at_ctes_of) + apply assumption+ + apply fastforce + apply simp + apply assumption + apply simp + apply simp + apply (erule empty_descendants_range_in') + apply (fastforce) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap dest!: ccte_relation_ccap_relation) + apply (clarsimp simp: is_aligned_mask max_free_index_def pageBits_def) + apply (rule conjI, rule UNIV_I)? + apply clarsimp? + apply (erule_tac s = sa in rf_sr_ctes_of_cliftE) + apply assumption + apply (frule_tac s = sa in rf_sr_cte_relation) + apply simp+ + apply (clarsimp simp:typ_heap_simps' region_is_bytes'_def[where sz=0]) + apply (frule ccte_relation_ccap_relation) + apply (clarsimp simp: cap_get_tag_isCap hrs_htd_update) + apply (clarsimp simp: hrs_htd_update_def split_def + pageBits_def + split: if_split) + apply (clarsimp simp: word_sle_def is_aligned_mask[symmetric] + ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + apply (subst ghost_assertion_size_logic_flex[unfolded o_def, rotated]) + apply assumption + apply (simp add: ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + apply (drule valid_global_refsD_with_objSize, clarsimp)+ + apply (clarsimp simp: isCap_simps dest!: ccte_relation_ccap_relation) + apply (cut_tac ptr=frame and bits=12 + and htd="typ_region_bytes frame 12 (hrs_htd (t_hrs_' (globals s')))" + in typ_region_bytes_actually_is_bytes) + apply (simp add: hrs_htd_update) + apply (clarsimp simp: region_actually_is_bytes'_def[where len=0]) + apply (intro conjI) + apply (clarsimp elim!:is_aligned_weaken) + apply (erule is_aligned_no_wrap', simp) + apply (simp add:is_aligned_def) + apply (simp add: hrs_htd_def) + apply (drule region_actually_is_bytes_dom_s[OF _ order_refl]) + apply (simp add: hrs_htd_def split_def) + apply (clarsimp simp: ccap_relation_def) + apply (clarsimp simp: cap_asid_pool_cap_lift) + apply (clarsimp simp: cap_to_H_def) + apply (clarsimp simp: asid_bits_def) + apply (drule word_le_mask_eq, simp) + apply (simp add: canonical_address_mask_shift canonical_bit_def) + done + +lemmas performARMMMUInvocations + = ccorres_invocationCatch_Inr performInvocation_def + AARCH64_H.performInvocation_def performARMMMUInvocation_def + liftE_bind_return_bindE_returnOk + +(* FIXME AARCH64: consider using isVTableRoot *) +lemma slotcap_in_mem_PageTable: + "\ slotcap_in_mem cap slot (ctes_of s); (s, s') \ rf_sr \ + \ \v. cslift s' (cte_Ptr slot) = Some v + \ (cap_get_tag (cte_C.cap_C v) = scast cap_page_table_cap) + = (isArchObjectCap cap \ isPageTableCap (capCap cap) \ capPTType (capCap cap) = NormalPT_T) + \ ccap_relation cap (cte_C.cap_C v)" + apply (clarsimp simp: slotcap_in_mem_def) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp dest!: ccte_relation_ccap_relation) + apply (simp add: cap_get_tag_isCap_ArchObject2) + done + +lemma ccap_relation_PageTableCap_IsMapped: + "\ ccap_relation (capability.ArchObjectCap (arch_capability.PageTableCap p NormalPT_T m)) ccap \ + \ (capPTIsMapped_CL (cap_page_table_cap_lift ccap) = 0) = (m = None)" + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (simp add: cap_page_table_cap_lift_def) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + apply (simp add: to_bool_def) + done + +lemma ccap_relation_VSpaceCap_IsMapped: + "\ ccap_relation (capability.ArchObjectCap (arch_capability.PageTableCap p VSRootPT_T m)) ccap \ + \ (capVSIsMapped_CL (cap_vspace_cap_lift ccap) = 0) = (m = None)" + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (simp add: cap_vspace_cap_lift_def) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + apply (simp add: to_bool_def) + done + +lemma ccap_relation_PageTableCap_BasePtr: + "\ ccap_relation (capability.ArchObjectCap (arch_capability.PageTableCap p NormalPT_T m)) ccap \ + \ capPTBasePtr_CL (cap_page_table_cap_lift ccap) = p" + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (simp add: cap_page_table_cap_lift_def) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + done + +lemma ccap_relation_PageTableCap_MappedASID: + "\ ccap_relation (capability.ArchObjectCap (arch_capability.PageTableCap p NormalPT_T (Some (a,b)))) + ccap \ + \ capPTMappedASID_CL (cap_page_table_cap_lift ccap) = a" + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (simp add: cap_page_table_cap_lift_def) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + done + +lemma bind_bindE_liftE: + "f >>= g >>=E h + = doE a <- liftE f; + g a >>=E h + odE" + by (simp add: liftE_def bindE_def lift_def bind_assoc) + +lemma liftME_option_catch_bind: + "(liftME Some m const (return None)) + = do x <- m; + case x of Inl e \ return None | Inr b \ return (Some b) + od" + apply (clarsimp simp: const_def catch_def liftME_def bindE_def returnOk_def bind_def) + apply (rule ext) + apply (clarsimp simp: return_def) + apply (case_tac "m s", clarsimp) + apply (auto simp: split_def throwError_def return_def Nondet_Monad.lift_def + split: prod.splits sum.splits) + done + +lemma maybeVSpaceForASID_findVSpaceForASID_ccorres: + "ccorres + (\rv rv'. (case rv of None \ (findVSpaceForASID_ret_C.status_C rv' \ scast EXCEPTION_NONE) + | Some pteptr \ (findVSpaceForASID_ret_C.status_C rv' = scast EXCEPTION_NONE) + \ pte_Ptr pteptr = (vspace_root_C rv'))) + ret__struct_findVSpaceForASID_ret_C_' + (valid_arch_state' and (\_. asid_wf asid)) + (\\asid___unsigned_long = asid\) + hs + (maybeVSpaceForASID asid) + (Call findVSpaceForASID_'proc)" + apply (rule ccorres_gen_asm) + apply (clarsimp simp: maybeVSpaceForASID_def liftME_option_catch_bind) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_guard_imp) + apply (ctac (no_vcg) add: findVSpaceForASID_ccorres) + apply (wpc; clarsimp) + apply (rule ccorres_return_Skip') + apply (rule ccorres_return_Skip') + apply wpsimp + apply simp_all + apply (rule conjI; clarsimp) + done + +lemma cap_case_PageTableCap2: + "(case cap of ArchObjectCap (PageTableCap p VSRootPT_T mapdata) + \ f p mapdata | _ \ g) + = (if isArchObjectCap cap \ isPageTableCap (capCap cap) \ capPTType (capCap cap) = VSRootPT_T + then f (capPTBasePtr (capCap cap)) (capPTMappedAddress (capCap cap)) + else g)" + by (simp add: isCap_simps split: capability.split arch_capability.split pt_type.split) + +lemma lookupPTSlotFromLevel_bitsLeft_less_64: + "n \ maxPTLevel \ \\_. True\ lookupPTSlotFromLevel n p vptr \\rv _. fst rv < 64\" + apply (induct n arbitrary: p) + apply (simp add: lookupPTSlotFromLevel.simps) + apply (wpsimp simp: pageBits_def) + apply (simp add: lookupPTSlotFromLevel.simps) + apply wpsimp + apply assumption + apply (wpsimp wp: hoare_drop_imps)+ + apply (simp add: ptBitsLeft_def ptTranslationBits_def pageBits_def maxPTLevel_def + split: if_splits) + done + +lemma lookupPTSlotFromLevel_bitsLeft_le_pptrBaseOffset_alignment: + "n \ maxPTLevel \ \\_. True\ lookupPTSlotFromLevel n p vptr \\rv _. fst rv \ pptrBaseOffset_alignment\" + apply (induct n arbitrary: p) + apply (simp add: lookupPTSlotFromLevel.simps) + apply (wpsimp simp: pageBits_def pptrBaseOffset_alignment_def) + apply (simp add: lookupPTSlotFromLevel.simps) + apply wpsimp + apply assumption + apply (wpsimp wp: hoare_drop_imps)+ + apply (simp add: ptBitsLeft_def ptTranslationBits_def pageBits_def maxPTLevel_def + pptrBaseOffset_alignment_def + split: if_splits) + done + +lemma lookupPTSlot_bitsLeft_less_64: + "\\\ lookupPTSlot p vptr \\rv _. fst rv < 64\" + unfolding lookupPTSlot_def + by (wpsimp wp: lookupPTSlotFromLevel_bitsLeft_less_64) + +lemma lookupPTSlot_bitsLeft_le_pptrBaseOffset_alignment[wp]: + "\\\ lookupPTSlot p vptr \\rv _. fst rv \ pptrBaseOffset_alignment\" + unfolding lookupPTSlot_def + by (wpsimp wp: lookupPTSlotFromLevel_bitsLeft_le_pptrBaseOffset_alignment) + +(* See comment in decode ARMPageTableInvocation for why "20" *) +definition + "enoughPTBits n \ n \ pageBits \ 20 \ n" + +lemma lookupPTSlotFromLevel_enoughPTBits: + "n \ maxPTLevel \ \\_. True\ lookupPTSlotFromLevel n p vptr \\rv _. enoughPTBits (fst rv)\" + unfolding enoughPTBits_def + apply (induct n arbitrary: p) + apply (simp add: lookupPTSlotFromLevel.simps) + apply (wpsimp simp: pageBits_def) + apply (simp add: lookupPTSlotFromLevel.simps) + apply wpsimp + apply assumption + apply (wpsimp wp: hoare_drop_imps)+ + apply (simp add: ptBitsLeft_def ptTranslationBits_def pageBits_def maxPTLevel_def + split: if_splits) + done + +lemma lookupPTSlot_enoughPTBits[wp]: + "\\\ lookupPTSlot p vptr \\rv _. enoughPTBits (fst rv)\" + unfolding lookupPTSlot_def + by (wpsimp wp: lookupPTSlotFromLevel_enoughPTBits) + +lemma slotcap_in_mem_VSpace: + "\ slotcap_in_mem cap slot (ctes_of s); (s, s') \ rf_sr \ + \ \v. cslift s' (cte_Ptr slot) = Some v + \ (cap_get_tag (cte_C.cap_C v) = scast cap_vspace_cap) + = (isArchObjectCap cap \ isPageTableCap (capCap cap) \ capPTType (capCap cap) = VSRootPT_T) + \ ccap_relation cap (cte_C.cap_C v)" + apply (clarsimp simp: slotcap_in_mem_def) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp dest!: ccte_relation_ccap_relation) + apply (simp add: cap_get_tag_isCap_ArchObject2) + done + +lemma Restart_valid[simp]: + "valid_tcb_state' Restart s" + by (simp add: valid_tcb_state'_def) + +lemma canonical_address_mask_shift2: + "\ canonical_address p; n + m = Suc canonical_bit; 0 < n \ \ p && (mask n << m) = p >> m << m" + apply (prop_tac "m = Suc canonical_bit - n", arith) + supply canonical_bit_def[simp] + apply (simp add: canonical_address_def canonical_address_of_def) + apply word_eqI + apply (rule iffI; clarsimp) + apply (rename_tac n') + apply (prop_tac "n' < Suc canonical_bit"; fastforce) + done + +lemma capVSMappedASID_CL_masked[unfolded asid_bits_def, simplified]: + "ccap_relation (ArchObjectCap (PageTableCap p VSRootPT_T (Some (asid, vref)))) cap + \ capVSMappedASID_CL (cap_vspace_cap_lift cap) && mask asid_bits = + capVSMappedASID_CL (cap_vspace_cap_lift cap)" + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + apply (clarsimp simp: cap_vspace_cap_lift_def) + apply (clarsimp simp: cap_lift_def Let_def asid_bits_def split: if_splits) + done + +lemma pptrUserTop_le_canonical_val[unfolded canonical_bit_def, simplified]: + "x \ pptrUserTop \ x \ mask (Suc canonical_bit)" + by (simp add: pptrUserTop_def canonical_bit_def order_trans[OF _ mask_mono] split: if_split_asm) + +lemma decodeARMPageTableInvocation_ccorres: + "\interpret_excaps extraCaps' = excaps_map extraCaps; + isPageTableCap cp; capPTType cp = NormalPT_T\ \ + ccorres + (intr_and_se_rel \ dc) + (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and cte_wp_at' ((=) (ArchObjectCap cp) \ cteCap) slot + and valid_cap' (ArchObjectCap cp) + and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer) + (UNIV \ {s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. cte_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer}) + hs + (decodeARMMMUInvocation label args cptr slot cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeARMPageTableInvocation_'proc)" + supply Collect_const[simp del] if_cong[cong] option.case_cong[cong] + apply (clarsimp simp only: isCap_simps) + apply (cinit' lift: invLabel_' length___unsigned_long_' cte_' + current_extra_caps_' cap_' buffer_' + simp: decodeARMMMUInvocation_def invocation_eq_use_types + decodeARMPageTableInvocation_def) + apply (simp add: Let_def isCap_simps if_to_top_of_bind + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_Cond_rhs_Seq) + (* ARMPageTableUnmap *) + apply (rule ccorres_split_throws) + apply (simp add: liftE_bindE bind_assoc) + apply (rule ccorres_symb_exec_l[OF _ getCTE_inv _ empty_fail_getCTE]) + apply (rule ccorres_rhs_assoc)+ + (* check cap is final *) + apply (ctac add: isFinalCapability_ccorres) + apply (simp add: unlessE_def if_to_top_of_bind if_to_top_of_bindE ccorres_seq_cond_raise) + apply (rule ccorres_cond2'[where R=\]) + apply (clarsimp simp: from_bool_0) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: returnOk_bind bindE_assoc performARMMMUInvocations) + apply (ctac add: setThreadState_ccorres) + apply (ctac add: performPageTableInvocationUnmap_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply wpsimp + apply (vcg exspec=performPageTableInvocationUnmap_modifies) + apply (wpsimp wp: sts_invs_minor' simp: isCap_simps) + apply clarsimp + apply (vcg exspec=setThreadState_modifies) + apply clarsimp + apply (wp (once) hoare_drop_imp, wp isFinalCapability_inv) + apply simp + apply (vcg exspec=isFinalCapability_modifies) + apply (wp getCTE_wp) + apply (vcg exspec=performPageTableInvocationUnmap_modifies exspec=isFinalCapability_modifies + exspec=findVSpaceForASID_modifies exspec=setThreadState_modifies) + + (* we're done with unmap case *) + apply simp + apply (rule ccorres_Cond_rhs_Seq) + (* neither map nor unmap, throw *) + apply (rule ccorres_equals_throwError) + apply (simp split: invocation_label.split arch_invocation_label.split + add: throwError_bind invocationCatch_def) + apply fastforce + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + + (* ARMPageTableMap *) + apply clarsimp + apply csymbr + apply clarsimp + (* ensure we have enough extraCaps *) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: word_less_nat_alt throwError_bind invocationCatch_def) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_equals_throwError) + apply (simp add: throwError_bind split: list.split) + apply fastforce + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply (simp add: interpret_excaps_test_null excaps_map_def) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind split: list.split) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* we have enough extraCaps *) + apply (simp add: list_case_If2 split_def + word_less_nat_alt length_ineq_not_Nil Let_def + whenE_bindE_throwError_to_if if_to_top_of_bind + decodeARMPageTableInvocationMap_def) + (* ensure the page table cap is mapped *) + apply csymbr + apply (simp add: ccap_relation_PageTableCap_IsMapped) + apply (rule ccorres_Cond_rhs_Seq; clarsimp) + (* not mapped *) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind split: list.split) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* mapped *) + apply (simp add: checkVSpaceRoot_def cap_case_PageTableCap2 split_def) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (rule_tac r'="(\rv _ rv'. ((cap_get_tag rv' = scast cap_vspace_cap) + = (isArchObjectCap rv \ isPageTableCap (capCap rv) \ + capPTType (capCap rv) = VSRootPT_T)) + \ (ccap_relation rv rv')) (fst (extraCaps ! 0))" + and xf'=vspaceRootCap_' in ccorres_split_nothrow) + apply (rule ccorres_from_vcg[where P="excaps_in_mem extraCaps \ ctes_of" and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: excaps_in_mem_def return_def neq_Nil_conv) + apply (drule(1) slotcap_in_mem_VSpace) + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (clarsimp simp: typ_heap_simps' mask_def) + apply (rename_tac rv' t t') + apply (simp add: word_sless_def word_sle_def) + apply ceqv + apply csymbr + apply clarsimp + apply (simp add: whenE_def if_to_top_of_bind if_to_top_of_bindE) + apply (clarsimp simp: hd_conv_nth) + (* is first extra cap a vspace cap? *) + apply (rule ccorres_if_lhs[rotated]; clarsimp) + (* if not vspace cap, clear up the C if condition calculation, then throw *) + apply (rule ccorres_cond_true_seq) + apply ccorres_rewrite + apply (rule ccorres_equals_throwError) + apply (simp split: invocation_label.split arch_invocation_label.split + add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* first extracap is a vspace cap *) + apply (clarsimp simp: isCap_simps isValidVTableRoot_def isVTableRoot_def) + (* ensure the vspace is mapped *) + apply (clarsimp simp: option.case_eq_if if_to_top_of_bind if_to_top_of_bindE) + apply (rename_tac m) + apply (prop_tac "m = capPTMappedAddress (capCap (fst (extraCaps ! 0)))", simp) + apply hypsubst + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (solves \clarsimp simp: from_bool_def split: bool.split\) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (solves \clarsimp simp: hd_conv_nth not_less length_le_helper mask_def pptrUserTop_val\) + apply (fold not_None_def)[1] (* avoid expanding capPTMappedAddress *) + apply clarsimp + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: lookupError_injection invocationCatch_use_injection_handler + injection_bindE[OF refl refl] injection_handler_If bindE_assoc + injection_handler_throwError injection_liftE[OF refl]) + apply (ctac add: ccorres_injection_handler_csum1[OF ccorres_injection_handler_csum1, + OF findVSpaceForASID_ccorres]) + (* ensure level 1 pt pointer supplied by user is actually a vspace root *) + apply (simp add: Collect_False if_to_top_of_bindE) + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (solves\clarsimp simp: asidInvalid_def isCap_simps ccap_relation_vspace_base\) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: bindE_assoc) + apply (ctac pre: ccorres_liftE_Seq add: lookupPTSlot_ccorres) + apply (simp add: liftE_bindE) + apply (rule ccorres_pre_getObject_pte) + apply (rename_tac pte) + apply (simp add: whenE_def if_to_top_of_bind if_to_top_of_bindE) + apply clarsimp + apply (rename_tac pte_slot ptSlot___struct_lookupPTSlot_ret_C pte) + apply wpfix + (* 20 in enoughPTBits is from the bitfield generator: capPTMappedAddress is a field_high + of width 28, which leaves 20 bottom bits at 48 canonical width -- the only reason + this works is because ptBitsLeft must be \ 20 when it returns more than pageBits and + we later mask by ptBitsLeft. *) + apply (rule_tac P="enoughPTBits (unat (ptBitsLeft_C ptSlot___struct_lookupPTSlot_ret_C))" + in ccorres_gen_asm) + (* ensure we have found a valid pte with more bits than pageBits left to look up *) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac val="from_bool (unat (ptBitsLeft_C ptSlot___struct_lookupPTSlot_ret_C) + = pageBits + \ \ pte = AARCH64_H.InvalidPTE)" + and xf'=ret__int_' and R="ko_at' pte pte_slot" and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply vcg + apply clarsimp + apply (simp add: from_bool_eq_if' pageBits_def) + apply (erule cmap_relationE1[OF rf_sr_cpte_relation], erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps from_bool_eq_if) + apply (simp flip: word_unat.Rep_inject) + apply (auto simp: cpte_relation_def Let_def pte_lift_def case_bool_If pte_tag_defs + split: pte.split_asm if_splits)[1] + apply ceqv + apply clarsimp + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (solves clarsimp) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* checks are done, move on to doing the mapping *) + apply (clarsimp simp: injection_handler_returnOk injection_handler_assertE bindE_assoc) + apply (simp add: assertE_liftE liftE_bindE) + apply (rule ccorres_assert) + apply (simp add: performARMMMUInvocations bindE_assoc) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac P="unat (ptBitsLeft_C ptSlot___struct_lookupPTSlot_ret_C) < 64" + in ccorres_gen_asm) (* bitsLeft should not exceed word bits *) + apply ccorres_rewrite + apply csymbr + apply (ctac add: setThreadState_ccorres) + apply (rule_tac A="cte_at' slot" and A'=UNIV in ccorres_guard_imp2) + apply (ctac add: performPageTableInvocationMap_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply wpsimp+ + apply (vcg exspec=performPageTableInvocationMap_modifies) + apply clarsimp + apply (rule conjI) + apply (simp (no_asm) add: ccap_relation_def) + apply (clarsimp simp: cap_get_tag_to_H map_option_Some_eq2 cap_page_table_cap_lift) + apply (clarsimp simp: cap_to_H_def ccap_relation_PageTableCap_BasePtr + ccap_relation_vspace_mapped_asid) + apply (subst ccap_relation_vspace_mapped_asid) + apply assumption + apply (simp add: capVSMappedASID_CL_masked) + apply (simp flip: mask_2pm1) + apply (drule at_least_2_args) + apply (clarsimp simp: hd_conv_nth) + apply (solves \simp add: mask_shift_neg_mask_eq not_less pptrUserTop_le_canonical_val + enoughPTBits_def + flip: word_le_nat_alt\) + apply (clarsimp simp: cpte_relation_def pte_lift_def Let_def pte_pte_table_lift_def + pte_tag_defs ccap_relation_PageTableCap_BasePtr + split: if_splits) + apply (drule addrFromPPtr_canonical_in_kernel_window, simp add: word_less_nat_alt) + apply (solves \simp add: canonical_address_mask_shift2 pageBits_def canonical_bit_def\) + apply wpsimp+ + apply (vcg exspec=setThreadState_modifies) + apply (simp add: get_capPtr_CL_def cap_get_tag_isCap_unfolded_H_cap typ_heap_simps) + apply vcg + apply clarsimp + apply (wpsimp wp: lookupPTSlot_inv hoare_vcg_all_lift hoare_drop_imps lookupPTSlot_bitsLeft_less_64) + apply (simp add: typ_heap_simps) + (* guard against False by schematic instantiation because of simp above *) + apply (rule conseqPre, vcg exspec=lookupPTSlot_modifies, rule order_refl) + (* throw on failed lookup *) + apply clarsimp + apply ccorres_rewrite + apply (rule_tac P'="{s. errstate s = find_ret}" in ccorres_from_vcg_throws[where P=\]) + apply clarsimp + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply (erule lookup_failure_rel_fault_lift[rotated]) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply clarsimp + apply (wp injection_wp[OF refl] findVSpaceForASID_inv hoare_imp_eq_substR) + apply clarsimp + apply (vcg exspec=findVSpaceForASID_modifies) + apply clarsimp + apply wp + apply clarsimp + apply vcg + apply wpsimp + apply clarsimp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: cte_wp_at_ctes_of excaps_map_def + word_sle_def word_sless_def bit_simps not_None_def) + apply (rule conjI) + subgoal + (* ARMPageTableUnmap: Haskell preconditions *) + by (fastforce simp: ct_in_state'_def cte_wp_at_ctes_of isCap_simps rf_sr_ksCurThread + elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread') + + apply (rule conjI) + subgoal for \ s _ _ + (* ARMPageTableMap: Haskell preconditions *) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: sysargs_rel_to_n word_le_nat_alt + linorder_not_less) + apply (clarsimp | drule length_le_helper)+ + apply (prop_tac "s \' fst (extraCaps ! 0)") + apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def + slotcap_in_mem_def dest!: ctes_of_valid') + by (auto simp: ct_in_state'_def pred_tcb_at' mask_def valid_tcb_state'_def + valid_cap'_def wellformed_acap'_def wellformed_mapdata'_def + elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] + + apply (rule conjI) + subgoal for _ v1 + (* ARMPageTableUnmap: C preconditions *) + by (clarsimp simp: rf_sr_ksCurThread) + + subgoal for p + (* ARMPageTableMap: C preconditions *) + apply (clarsimp simp: neq_Nil_conv[where xs=extraCaps] + dest!: sym[where s="ArchObjectCap cp" for cp]) + apply (clarsimp simp: cap_get_tag_isCap_ArchObject isCap_simps + word_sle_def word_sless_def word_less_nat_alt) + apply (frule length_ineq_not_Nil) + apply (clarsimp simp: rf_sr_ksCurThread hd_conv_nth isValidVTableRoot_def2) + apply (rule conjI) + apply (clarsimp simp: ccap_relation_vspace_mapped_asid[symmetric]) + apply (clarsimp simp: from_bool_0) + done + done + +lemma checkVPAlignment_spec: + "\s. \\ \s. \sz < 3\ Call checkVPAlignment_'proc + {t. ret__unsigned_long_' t = from_bool + (vmsz_aligned (w_' s) (framesize_to_H (sz_' s)))}" + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: mask_eq_iff_w2p word_size) + apply (rule conjI) + apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) + apply (simp add: vmsz_aligned_def is_aligned_mask mask_def split: if_split) + done + +definition + ptr_range_to_list :: "('a :: c_type) ptr \ machine_word \ 'a ptr list" +where + "ptr_range_to_list ptr lenV \ + map (\x. CTypesDefs.ptr_add ptr (of_nat x)) [0 ..< unat lenV]" + +definition + "pte_range_relation xs pte_ran \ + xs = map ptr_val (ptr_range_to_list (pte_range_C.base_C pte_ran) + (pte_range_C.length_C pte_ran)) + \ 1 \ pte_range_C.length_C pte_ran" + +definition + "pde_range_relation xs pde_ran \ + xs = map ptr_val (ptr_range_to_list (pde_range_C.base_C pde_ran) + (pde_range_C.length_C pde_ran)) + \ 1 \ pde_range_C.length_C pde_ran" + +lemma framesize_from_H_eqs: + "(framesize_from_H vsz = scast Kernel_C.ARMSmallPage) = (vsz = ARMSmallPage)" + "(framesize_from_H vsz = scast Kernel_C.ARMLargePage) = (vsz = ARMLargePage)" + "(framesize_from_H vsz = scast Kernel_C.ARMHugePage) = (vsz = ARMHugePage)" + by (simp add: framesize_from_H_def vm_page_size_defs split: vmpage_size.split)+ + +lemma ptr_add_uint_of_nat [simp]: + "a +\<^sub>p uint (of_nat b :: machine_word) = a +\<^sub>p (int b)" + by (clarsimp simp: CTypesDefs.ptr_add_def) + +declare int_unat[simp] + +lemma obj_at_pte_aligned: + "obj_at' (\a::AARCH64_H.pte. True) ptr s ==> is_aligned ptr word_size_bits" + apply (drule obj_at_ko_at') + apply (clarsimp dest!:ko_at_is_aligned' + simp: objBits_simps bit_simps + elim!: is_aligned_weaken) + done + +lemma storePTE_Basic_ccorres'': + "ccorres dc xfdc \ {s. ptr_val (f s) = p \ cpte_relation pte pte'} hs + (storePTE p pte) + (Guard C_Guard {s. s \\<^sub>c f s} + (Basic (\s. globals_update( t_hrs_'_update + (hrs_mem_update (heap_update (f s) pte'))) s)))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_gen_asm2, erule storePTE_Basic_ccorres') + apply simp + done + +lemma pageBitsForSize_le_64: "of_nat (pageBitsForSize x) < (64::machine_word)" + by (cases x; simp add: bit_simps) + +lemma pte_lift_pte_invalid_eq: + "(pte_get_tag pte = scast pte_pte_invalid) = (pte_lift pte = Some Pte_pte_invalid)" + by (auto simp: pte_lift_def pte_tag_defs Let_def split: if_splits) + +lemma cpte_relation_InvalidPTE[simp]: + "cpte_relation InvalidPTE pte = (pte_lift pte = Some Pte_pte_invalid)" + by (clarsimp simp: cpte_relation_def) + +lemma cpte_relation_pte_invalid_eq: + "cpte_relation pte pte' \ (pte_lift pte' = Some Pte_pte_invalid) = (pte = InvalidPTE)" + by (clarsimp simp: cpte_relation_def Let_def split: if_splits pte.splits) + +lemma performPageInvocationMap_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and cte_at' slot and (\s. 7 \ gsMaxObjectSize s) + and (\_. (isFrameCap acap \ + case_option False wellformed_mapdata' (capFMappedAddress acap)))) + ({s. asid___unsigned_long_' s = fst (the (capFMappedAddress acap))} \ + {s. cpte_relation (fst mapping) (pte_' s)} \ + {s. ccap_relation (ArchObjectCap acap) (cap_' s)} \ + {s. ctSlot_' s = cte_Ptr slot} \ + {s. ptSlot_' s = pte_Ptr (snd mapping)}) [] + (liftE (performPageInvocation (PageMap acap slot mapping))) + (Call performPageInvocationMap_'proc)" + apply (rule ccorres_gen_asm) + apply (simp only: liftE_liftM ccorres_liftM_simp) + apply (clarsimp split: option.splits) + apply (cinit lift: pte_' cap_' ctSlot_' ptSlot_') + apply wpc (* split mapping *) + apply (rename_tac m_pte m_slot) + apply (rule ccorres_pre_getObject_pte) + apply (rename_tac oldPTE) + apply (rule_tac xf'=tlbflush_required_' and + val="from_bool (oldPTE \ InvalidPTE)" and + R="\" and + R'="{s. \old_pte. cslift s (pte_Ptr m_slot) = Some old_pte \ + cpte_relation oldPTE old_pte}" in + ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: typ_heap_simps from_bool_0 pte_lift_pte_invalid_eq + cpte_relation_pte_invalid_eq) + apply ceqv + apply (rule ccorres_move_c_guard_cte) + apply ctac (* updateCap *) + apply (rule ccorres_split_nothrow) + apply clarsimp + apply (rule storePTE_Basic_ccorres, simp) + apply ceqv + apply csymbr + apply (ctac add: cleanByVA_PoU_ccorres) + apply (clarsimp simp: when_def if_to_top_of_bind simp del: Collect_const) + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R=\], clarsimp) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (ctac (no_vcg) add: invalidateTLBByASIDVA_ccorres) + apply (rule ccorres_rel_imp, rule ccorres_return_C[where xf=ret__unsigned_long_']; simp) + apply wp + apply ccorres_rewrite + apply (clarsimp cong: ccorres_prog_only_cong) + apply (rule ccorres_rel_imp, rule ccorres_return_C[where xf=ret__unsigned_long_']; simp) + apply wpsimp + apply (wpsimp wp: hoare_drop_imp) + apply (vcg exspec=cleanByVA_PoU_modifies) + apply (wp hoare_drop_imp) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (fastforce simp: wellformed_mapdata'_def typ_heap_simps isCap_simps cap_get_tag_isCap + ccap_relation_FrameCap_MappedAddress) + done + +lemma performPageGetAddress_ccorres: + "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) + (\\base_ptr = ptr\ \ \\call = from_bool isCall\) [] + (do reply \ performPageInvocation (PageGetAddr ptr); + liftE (replyOnRestart thread reply isCall) od) + (Call performPageGetAddress_'proc)" + supply Collect_const[simp del] + apply (cinit' lift: base_ptr_' call_' simp: performPageInvocation_def) + apply (clarsimp simp: bind_assoc) + apply csymbr + apply csymbr + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=thread_' in ccorres_abstract, ceqv) + apply (rename_tac cthread) + apply (rule_tac P="cthread = tcb_ptr_to_ctcb_ptr thread" in ccorres_gen_asm2) + apply (rule ccorres_Cond_rhs_Seq[rotated]; clarsimp) + apply (simp add: replyOnRestart_def liftE_def bind_assoc) + apply (rule getThreadState_ccorres_foo, rename_tac tstate) + apply (rule_tac P="tstate = Restart" in ccorres_gen_asm) + apply clarsimp + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac (no_vcg) add: setThreadState_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) + apply (rule ccorres_rhs_assoc)+ + apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (rule getThreadState_ccorres_foo, rename_tac tstate) + apply (rule_tac P="tstate = Restart" in ccorres_gen_asm) + apply (clarsimp simp: bind_assoc) + apply (simp add: replyFromKernel_def bind_assoc) + apply (ctac add: lookupIPCBuffer_ccorres) + apply (ctac add: setRegister_ccorres) + apply (simp add: setMRs_single) + apply (ctac add: setMR_as_setRegister_ccorres[where offset=0]) + apply clarsimp + apply csymbr + apply (simp only: setMessageInfo_def bind_assoc) + apply ctac + apply simp + apply (ctac add: setRegister_ccorres) + apply (ctac add: setThreadState_ccorres) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) + apply (vcg exspec=setThreadState_modifies) + apply wpsimp + apply (vcg exspec=setRegister_modifies) + apply wpsimp + apply clarsimp + apply (vcg) + apply wpsimp + apply (clarsimp simp: msgInfoRegister_def AARCH64.msgInfoRegister_def + Kernel_C.msgInfoRegister_def) + apply (vcg exspec=setMR_modifies) + apply wpsimp + apply clarsimp + apply (vcg exspec=setRegister_modifies) + apply wpsimp + apply clarsimp + apply (vcg exspec=lookupIPCBuffer_modifies) + apply clarsimp + apply vcg + apply clarsimp + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + rf_sr_ksCurThread msgRegisters_unfold + seL4_MessageInfo_lift_def message_info_to_H_def mask_def) + apply (cases isCall) + apply (auto simp: AARCH64.badgeRegister_def AARCH64_H.badgeRegister_def Kernel_C.badgeRegister_def + fromPAddr_def ThreadState_defs Kernel_C.X0_def Kernel_C.X1_def + pred_tcb_at'_def obj_at'_def ct_in_state'_def) + done + +lemma vmsz_aligned_addrFromPPtr': + "vmsz_aligned (addrFromPPtr p) sz + = vmsz_aligned p sz" + apply (simp add: vmsz_aligned_def AARCH64.addrFromPPtr_def pptrBaseOffset_def paddrBase_def) + apply (subgoal_tac "is_aligned AARCH64.pptrBase (pageBitsForSize sz)") + apply (rule iffI) + apply (drule(1) aligned_add_aligned) + apply (simp add: pageBitsForSize_def word_bits_def split: vmpage_size.split) + apply simp + apply (erule(1) aligned_sub_aligned) + apply (simp add: pageBitsForSize_def word_bits_def bit_simps split: vmpage_size.split) + apply (simp add: pageBitsForSize_def AARCH64.pptrBase_def is_aligned_def bit_simps + canonical_bit_def + split: vmpage_size.split)+ + done + +lemmas vmsz_aligned_addrFromPPtr + = vmsz_aligned_addrFromPPtr' + vmsz_aligned_addrFromPPtr'[unfolded addrFromPPtr_def] + vmsz_aligned_addrFromPPtr'[unfolded vmsz_aligned_def] + vmsz_aligned_addrFromPPtr'[unfolded addrFromPPtr_def vmsz_aligned_def] + +lemmas framesize_from_H_simps + = framesize_from_H_def[split_simps vmpage_size.split] + +lemma shiftr_asid_low_bits_mask_asid_high_bits: + "(asid :: machine_word) \ mask asid_bits + \ (asid >> asid_low_bits) && mask asid_high_bits = asid >> asid_low_bits" + apply (rule iffD2 [OF mask_eq_iff_w2p]) + apply (simp add: asid_high_bits_def word_size) + apply (rule shiftr_less_t2n) + apply (simp add: asid_low_bits_def asid_high_bits_def mask_def) + apply (simp add: asid_bits_def) + done + +lemma slotcap_in_mem_valid: + "\ slotcap_in_mem cap slot (ctes_of s); valid_objs' s \ + \ s \' cap" + apply (clarsimp simp: slotcap_in_mem_def) + apply (erule(1) ctes_of_valid') + done + +lemma injection_handler_if_returnOk: + "injection_handler Inl (if a then b else returnOk c) + = (if a then (injection_handler Inl b) else returnOk c)" + apply (clarsimp simp:whenE_def injection_handler_def) + apply (clarsimp simp:injection_handler_def + throwError_def return_def bind_def returnOk_def + handleE'_def split:if_splits) + done + +lemma pbfs_less: "pageBitsForSize sz < 31" + by (case_tac sz,simp_all add: bit_simps) + +lemma cte_wp_at_eq_gsMaxObjectSize: + "cte_wp_at' ((=) cap o cteCap) slot s + \ valid_global_refs' s + \ 2 ^ capBits cap \ gsMaxObjectSize s" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) valid_global_refsD_with_objSize) + apply (clarsimp simp: capMaster_eq_capBits_eq[OF capMasterCap_maskCapRights]) + done + +lemma two_nat_power_pageBitsForSize_le: + "(2 :: nat) ^ pageBits \ 2 ^ pageBitsForSize vsz" + by (cases vsz; simp add: bit_simps) + +lemma ptrFromPAddr_add_left: + "ptrFromPAddr (x + y) = ptrFromPAddr x + y" + unfolding ptrFromPAddr_def by simp + +lemma at_least_3_args: + "\ length args < 3 \ \a b c d. args = a#b#c#d" + apply (case_tac args; simp) + apply (rename_tac list, case_tac list; simp)+ + done + +lemma list_3_collapse: + "\ length xs \ 3; a = xs ! 0; b = xs ! 1; c = xs ! 2; d = drop 3 xs \ \ a # b # c # d = xs" + apply (case_tac xs; simp) + apply (rename_tac list, case_tac list; simp)+ + done + +lemma pde_case_isPageTablePDE: + "(case pte of PageTablePTE _ \ P | _ \ Q) + = (if isPageTablePTE pte then P else Q)" + by (clarsimp simp: isPageTablePTE_def split: pte.splits) + +lemma framesize_to_from_H: + "sz < 3 \ framesize_from_H (framesize_to_H sz) = sz" + apply (clarsimp simp: framesize_to_H_def framesize_from_H_def framesize_defs + split: if_split vmpage_size.splits) + by (word_bitwise, auto) + +lemma ccap_relation_FrameCap_generics: + "ccap_relation (ArchObjectCap (FrameCap word vmrights vmpage_size d map_data)) cap' + \ (map_data \ None \ + capFMappedAddress_CL (cap_frame_cap_lift cap') + = snd (the map_data) + \ capFMappedASID_CL (cap_frame_cap_lift cap') + = fst (the map_data)) + \ ((capFMappedASID_CL (cap_frame_cap_lift cap') = 0) + = (map_data = None)) + \ vmrights_to_H (capFVMRights_CL (cap_frame_cap_lift cap')) = vmrights + \ framesize_to_H (capFSize_CL (cap_frame_cap_lift cap')) = vmpage_size + \ capFBasePtr_CL (cap_frame_cap_lift cap') = word + \ to_bool (capFIsDevice_CL (cap_frame_cap_lift cap')) = d + \ capFSize_CL (cap_frame_cap_lift cap') < 3 + \ capFVMRights_CL (cap_frame_cap_lift cap') < 4" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (frule cap_get_tag_PageCap_frame) + apply (frule ccap_relation_c_valid_cap) + apply (clarsimp simp: cap_frame_cap_lift c_valid_cap_def cl_valid_cap_def split: if_split_asm) + done + +lemma throwError_invocationCatch: + "throwError a >>= invocationCatch b c d e = throwError (Inl a)" + by (simp add: invocationCatch_def throwError_bind) + +lemma canonical_address_cap_frame_cap: + "cap_get_tag cap = SCAST(32 signed \ 64) cap_frame_cap \ + canonical_address (capFMappedAddress_CL (cap_frame_cap_lift cap))" + apply (frule_tac cap_lift_frame_cap) + apply (subst(asm) cap_frame_cap_lift) + apply clarsimp + apply (drule_tac t="cap_frame_cap_lift cap" in sym) + apply (rule canonical_address_and_maskI) + apply fastforce + done + +lemma of_nat_pageBitsForSize_eq: + "(x = of_nat (pageBitsForSize sz)) = (unat x = pageBitsForSize sz)" for x::machine_word + by (auto simp: of_nat_pageBitsForSize) + +lemma ccap_relation_FrameCap_IsMapped: + "\ ccap_relation (capability.ArchObjectCap (arch_capability.FrameCap p r sz d m)) ccap \ + \ (capFMappedASID_CL (cap_frame_cap_lift ccap) = 0) = (m = None)" + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (simp add: cap_frame_cap_lift_def) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + done + +lemma cte_wp_at'_frame_at': + "\ cte_wp_at' + ((=) (capability.ArchObjectCap (arch_capability.FrameCap p v1 sz d m)) \ cteCap) slot s; + valid_objs' s \ + \ frame_at' p sz d s" + apply (drule (1) cte_wp_at_valid_objs_valid_cap') + apply clarsimp + apply (drule_tac t="cteCap _" in sym) + apply (clarsimp simp: valid_cap'_def) + done + +lemma canonical_address_frame_at': + "\frame_at' p sz d s; pspace_canonical' s\ \ canonical_address p" + apply (clarsimp simp: frame_at'_def) + apply (drule_tac x=0 in spec, clarsimp simp: bit_simps typ_at_to_obj_at_arches) + apply (cases sz + ; auto simp: bit_simps split: if_splits + dest!: device_data_at_ko user_data_at_ko intro!: obj_at'_is_canonical) + done + +definition flushtype_relation :: "flush_type \ machine_word \ bool" where + "flushtype_relation typ label \ case typ of + Clean \ + label \ scast ` {Kernel_C.ARMPageClean_Data, Kernel_C.ARMVSpaceClean_Data} + | Invalidate \ + label \ scast ` {Kernel_C.ARMPageInvalidate_Data, Kernel_C.ARMVSpaceInvalidate_Data} + | CleanInvalidate \ + label \ scast ` {Kernel_C.ARMPageCleanInvalidate_Data, Kernel_C.ARMVSpaceCleanInvalidate_Data} + | Unify \ + label \ scast ` {Kernel_C.ARMPageUnify_Instruction, Kernel_C.ARMVSpaceUnify_Instruction}" + +lemma doFlush_ccorres: + "ccorres dc xfdc (\s. vs \ ve \ ps \ ps + (ve - vs) \ vs && mask cacheLineSize = ps && mask cacheLineSize + \ ptrFromPAddr ps \ ptrFromPAddr ps + (ve - vs) + \ unat (ve - vs) \ gsMaxObjectSize s) + (\flushtype_relation t \invLabel\ \ \\start = vs\ \ \\end = ve\ \ \\pstart = ps\) [] + (doMachineOp (doFlush t vs ve ps)) (Call doFlush_'proc)" + apply (cases t; clarsimp simp: doFlush_def doMachineOp_bind) + apply (cinit' lift: pstart_') + apply (rule ccorres_cond_true) + apply (ctac (no_vcg) add: cleanCacheRange_RAM_ccorres) + apply (clarsimp simp: flushtype_relation_def) + apply (cinit' lift: pstart_') + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (ctac (no_vcg) add: invalidateCacheRange_RAM_ccorres) + apply (fastforce simp: flushtype_relation_def + sel4_arch_invocation_label_defs arch_invocation_label_defs) + apply (cinit' lift: pstart_') + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (ctac (no_vcg) add: cleanInvalidateCacheRange_RAM_ccorres) + apply (fastforce simp: flushtype_relation_def + sel4_arch_invocation_label_defs arch_invocation_label_defs) + apply (simp add: doMachineOp_bind empty_fail_bind bind_assoc) + apply (cinit' lift: pstart_') + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: cleanCacheRange_PoU_ccorres) + apply (ctac (no_vcg) add: dsb_ccorres) + apply (ctac (no_vcg) add: invalidateCacheRange_I_ccorres) + apply (ctac (no_vcg) add: isb_ccorres) + apply wp+ + apply (fastforce simp: flushtype_relation_def + sel4_arch_invocation_label_defs arch_invocation_label_defs) + done + +(* The precondition is slightly different here to ARM/ARM_HYP, because we're flushing on kernel + virtual addresses instead of user virtual addresses (hence also no VM root switching). *) +lemma performPageFlush_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (\s. pstart \ pstart + (end - start) \ + ptrFromPAddr pstart \ ptrFromPAddr pstart + (end - start) \ + unat (end - start) \ gsMaxObjectSize s) + (\\start = start\ \ \\end = end\ \ \\pstart = pstart\ \ + \flushtype_relation typ \invLabel \) + [] + (liftE (performPageInvocation (PageFlush typ start end pstart pt asid))) + (Call performPageFlush_'proc)" + apply (simp only: liftE_liftM ccorres_liftM_simp) + apply (cinit lift: start_' end_' pstart_' invLabel_') + apply (unfold when_def) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_cond2[where R=\]) + apply (simp split: if_split) + apply (ctac (no_vcg) add: doFlush_ccorres) + apply (rule ccorres_return_Skip) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply wpsimp + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: order_less_imp_le) + done + +lemma ivc_label_flush_case: + "label = ArchInvocationLabel ARMPageUnify_Instruction \ + label = ArchInvocationLabel ARMPageCleanInvalidate_Data \ + label = ArchInvocationLabel ARMPageInvalidate_Data \ + label = ArchInvocationLabel ARMPageClean_Data + \ (case label of + ArchInvocationLabel ARMPageMap \ A + | ArchInvocationLabel ARMPageUnmap \ B + | ArchInvocationLabel ARMPageUnify_Instruction \ C + | ArchInvocationLabel ARMPageCleanInvalidate_Data \ C + | ArchInvocationLabel ARMPageInvalidate_Data \ C + | ArchInvocationLabel ARMPageClean_Data \ C + | ArchInvocationLabel ARMPageGetAddress \ D + | _ \ E) + = C" + by (auto split: invocation_label.split arch_invocation_label.split) + +lemma isPageFlushLabel_disj: + "(label = ArchInvocationLabel ARMPageUnify_Instruction \ + label = ArchInvocationLabel ARMPageCleanInvalidate_Data \ + label = ArchInvocationLabel ARMPageInvalidate_Data \ + label = ArchInvocationLabel ARMPageClean_Data) = + isPageFlushLabel label" + by (auto simp: isPageFlushLabel_def split: invocation_label.split arch_invocation_label.split) + +lemma flushtype_relation_triv: + "isPageFlushLabel (invocation_type label) \ isVSpaceFlushLabel (invocation_type label) + \ flushtype_relation (labelToFlushType label) label" + by (clarsimp simp: labelToFlushType_def flushtype_relation_def invocation_eq_use_types + isPageFlushLabel_def isVSpaceFlushLabel_def + split: flush_type.splits invocation_label.splits arch_invocation_label.splits) + +lemma vmAttributesFromWord_spec: + "\s. \ \ \s. True\ Call vmAttributesFromWord_'proc + \ vm_attributes_lift \ret__struct_vm_attributes_C = + \ armExecuteNever_CL = (\<^bsup>s\<^esup>w >> 2) && 1, + armParityEnabled_CL = (\<^bsup>s\<^esup>w >> 1) && 1, + armPageCacheable_CL = \<^bsup>s\<^esup>w && 1 \ \" + by (vcg, simp add: vm_attributes_lift_def word_sless_def word_sle_def mask_def) + +lemma maskVMRights_spec: (* FIXME AARCH64: replace the one in CSpace_C -- preconditions mess up csymbr *) + "\s. \ \ {s} + Call maskVMRights_'proc + \ \<^bsup>s\<^esup>vm_rights && mask 2 = \<^bsup>s\<^esup>vm_rights + \ vmrights_to_H \ret__unsigned_long = + maskVMRights (vmrights_to_H \<^bsup>s\<^esup>vm_rights) (cap_rights_to_H (seL4_CapRights_lift \<^bsup>s\<^esup>cap_rights_mask)) \ + \ret__unsigned_long && mask 2 = \ret__unsigned_long \ + \ret__unsigned_long \ 2 \" + apply (rule HoarePartial.ProcNoRec1) + apply (simp add: maskVMRights_impl) + apply (unfold maskVMRights_body_def) + apply vcg + apply (clarsimp simp: vmrights_defs vmrights_to_H_def maskVMRights_def mask_def + cap_rights_to_H_def to_bool_def + split: bool.split) + apply (simp add: maskVMRights_impl dom_def) + done + +lemma checkVSpaceRoot_def2: + "checkVSpaceRoot cap n = + (if isValidVTableRoot cap + then returnOk (capPTBasePtr (capCap cap), fst (the (capPTMappedAddress (capCap cap)))) + else throwError (InvalidCapability n))" + unfolding checkVSpaceRoot_def isValidVTableRoot_def2 + by (clarsimp split: capability.splits arch_capability.splits pt_type.splits option.splits) + +lemma frame_at'_is_aligned_addrFromPPtr: + "\ frame_at' p sz dev s; pspace_aligned' s \ \ is_aligned (addrFromPPtr p) pageBits" + apply (clarsimp simp: frame_at'_def) + apply (erule_tac x=0 in allE) + apply (erule impE) + apply (clarsimp simp: bit_simps pageBitsForSize_def split: vmpage_size.splits) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def objBits_simps koTypeOf_eq_UserDataT + split: if_splits) + apply (simp split: kernel_object.splits) + done + +lemma cap_frame_cap_lift_asid_upd_idem: + "cap_get_tag cap = scast cap_frame_cap \ + cap_frame_cap_lift cap\capFMappedASID_CL := capFMappedASID_CL (cap_frame_cap_lift cap) && mask 16\ = + cap_frame_cap_lift cap" + by (clarsimp simp: cap_frame_cap_lift_def cap_lift_def Let_def cap_tag_defs) + +lemma cpte_relation_makeUserPTE: + "\ vm_attributes_lift v = + \armExecuteNever_CL = (attrs >> 2) && 1, armParityEnabled_CL = (attrs >> Suc 0) && 1, + armPageCacheable_CL = attrs && 1\; + let uxn = uxn_from_vmattributes (vm_attributes_to_H v); + ap = ap_from_vm_rights rights; + attridx = attridx_from_vmattributes (vm_attributes_to_H v) + in if framesize_from_H sz = scast Kernel_C.ARMSmallPage + then pte_lift cpte = + Some (Pte_pte_4k_page \pte_pte_4k_page_CL.UXN_CL = uxn, page_base_address_CL = p, + nG_CL = 0, AF_CL = 1, SH_CL = 0, AP_CL = ap, + AttrIndx_CL = attridx\) + else pte_lift cpte = + Some (Pte_pte_page \pte_pte_page_CL.UXN_CL = uxn, page_base_address_CL = p, + nG_CL = 0, AF_CL = 1, SH_CL = 0, AP_CL = ap, + AttrIndx_CL = attridx\)\ + \ cpte_relation (makeUserPTE p rights (attribsFromWord attrs) sz) cpte" + apply (clarsimp simp: cpte_relation_def Let_def makeUserPTE_def vm_attributes_to_H_def + uxn_from_vmattributes_def framesize_from_H_eqs + split: if_splits) + apply (clarsimp simp: pte_lift_def Let_def attribsFromWord_def + simp flip: of_bool_nth to_bool_and_1)+ + done + +lemma ccap_relation_decodePageMap[unfolded asid_bits_def canonical_bit_def, simplified]: + "\ ccap_relation (ArchObjectCap (FrameCap p R sz dev m)) cap; + cap_get_tag cap = scast cap_frame_cap; + 0 < asid; asid_wf asid; canonical_address vref; + cap_frame_cap_lift ccap = + cap_frame_cap_lift cap\capFMappedASID_CL := asid && mask asid_bits, + capFMappedAddress_CL := vref && mask (Suc canonical_bit)\; + cap_get_tag ccap = scast cap_frame_cap \ + \ ccap_relation (ArchObjectCap (FrameCap p R sz dev (Some (asid, vref)))) ccap" + apply (simp (no_asm) add: ccap_relation_def map_option_Some_eq2 c_valid_cap_def) + apply (simp (no_asm_simp) add: cap_frame_cap_lift[THEN iffD1]) + apply (simp add: cap_to_H_def ccap_relation_FrameCap_generics asid_wf_def asid_bits_def + and_mask_eq_iff_le_mask[symmetric] cl_valid_cap_def canonical_bit_def + canonical_address_and_maskD) + apply (rule conjI, clarsimp) + apply (clarsimp simp: ccap_relation_def c_valid_cap_def cap_frame_cap_lift[THEN iffD1] + cl_valid_cap_def) + done + +lemmas canonical_address_C_pptrUserTop = + canonical_address_pptrUserTop_mask[unfolded pptrUserTop_val word_le_nat_alt, simplified] + +lemma capFMappedAddress_CL_canonical: + "\ capFMappedAddress_CL (cap_frame_cap_lift cap) = p; cap_get_tag cap = scast cap_frame_cap \ \ + canonical_address p" + by (drule sym, simp add: canonical_address_cap_frame_cap) + +lemma decodeARMFrameInvocation_ccorres: + notes if_cong[cong] Collect_const[simp del] + shows + "\ interpret_excaps extraCaps' = excaps_map extraCaps; isFrameCap cp \ + \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and cte_wp_at' ((=) (ArchObjectCap cp) \ cteCap) slot + and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer and valid_objs') + ({s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. cte_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer} + \ {s. call_' s = from_bool isCall}) [] + (decodeARMMMUInvocation label args cptr slot cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeARMFrameInvocation_'proc)" + (is "\ _; _ \ \ ccorres _ _ ?P _ _ _ _") + apply (clarsimp simp only: isCap_simps) + apply (cinit' lift: invLabel_' length___unsigned_long_' cte_' + current_extra_caps_' cap_' buffer_' call_' + simp: decodeARMMMUInvocation_def) + apply (simp add: Let_def isCap_simps invocation_eq_use_types split_def decodeARMFrameInvocation_def + cong: StateSpace.state.fold_congs globals.fold_congs + if_cong invocation_label.case_cong arch_invocation_label.case_cong list.case_cong) + apply (rule ccorres_Cond_rhs[rotated])+ + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply clarsimp + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def + split: invocation_label.split arch_invocation_label.split) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + + \ \PageGetAddress\ + apply (rule ccorres_guard_imp2[where A="?P" and A'=UNIV]) + apply (simp add: returnOk_bind bindE_assoc performARMMMUInvocations) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (rule ccorres_nondet_refinement) + apply (rule is_nondet_refinement_bindE) + apply (rule is_nondet_refinement_refl) + apply (simp split: if_split, rule conjI[rotated]) + apply (rule impI, rule is_nondet_refinement_refl) + apply (rule impI, rule is_nondet_refinement_alternative1) + apply (clarsimp simp: liftE_bindE) + apply (rule ccorres_add_returnOk) + apply (ctac(no_vcg) add: performPageGetAddress_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wpsimp wp: sts_invs_minor' ct_in_state'_set)+ + apply (vcg exspec=setThreadState_modifies) + apply (fastforce simp: ct_in_state'_def valid_tcb_state'_def rf_sr_ksCurThread + ccap_relation_FrameCap_BasePtr ccap_relation_frame_tags + elim!: pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread') + + \ \Flush\ + apply (rule ccorres_guard_imp2[where A="?P" and A'=UNIV]) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (simp add: ivc_label_flush_case decodeARMFrameInvocationFlush_def + list_case_If2 if3_fold2 + cong: StateSpace.state.fold_congs globals.fold_congs + if_cong invocation_label.case_cong arch_invocation_label.case_cong + list.case_cong) + apply (simp add: split_def case_option_If2 if_to_top_of_bind + cong: if_cong invocation_label.case_cong arch_invocation_label.case_cong) + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (clarsimp simp:list_length_less ) + apply (drule unat_less_iff[where c=2]) + apply (simp add:word_bits_def) + apply simp + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply (rule ccorres_if_cond_throws2[rotated -1,where Q = \ and Q' = \]) + apply vcg + apply clarsimp + apply (frule ccap_relation_mapped_asid_0) + apply fastforce + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (simp add: invocationCatch_use_injection_handler + injection_bindE[OF refl refl] bindE_assoc + injection_handler_returnOk injection_handler_whenE + lookupError_injection) + apply (ctac add: ccorres_injection_handler_csum1 + [OF ccorres_injection_handler_csum1, OF findVSpaceForASID_ccorres]) + apply (rule ccorres_if_cond_throws[where P = False and Q = \ and Q'=\, simplified]) + apply simp + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo + [where args = args and n = 1 and buffer = buffer]) + apply (simp only:if_to_top_of_bindE whenE_def) + apply (rule ccorres_if_cond_throws[rotated -1, where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp:hd_drop_conv_nth hd_conv_nth) + apply (simp add:injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp only:returnOk_bindE) + apply csymbr + apply csymbr + apply (rule ccorres_Guard_Seq) + apply csymbr + apply (rule ccorres_if_cond_throws[rotated -1,where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp:hd_drop_conv_nth hd_conv_nth) + apply (clarsimp dest!: ccap_relation_FrameCap_generics) + apply (simp add:injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_if_cond_throws[rotated -1,where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp: hd_drop_conv_nth hd_conv_nth paddrBase_def paddrTop_def + pptrBaseOffset_def pptrTop_def pptrBase_def fromPAddr_def) + apply (clarsimp dest!: ccap_relation_FrameCap_generics) + apply (simp add:injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: performARMMMUInvocations bindE_assoc) + apply simp + apply (ctac add: setThreadState_ccorres) + apply (ctac(no_vcg) add: performPageFlush_ccorres) + apply (rule ccorres_gen_asm) + apply (erule ssubst[OF if_P, where P="\x. ccorres _ _ _ _ _ x _"]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply (wpsimp simp: performPageInvocation_def) + apply simp + apply (strengthen unat_sub_le_strg[where v="2 ^ pageBitsForSize (capFSize cp)"]) + apply (simp add: linorder_not_less linorder_not_le order_less_imp_le) + apply (wp sts_invs_minor') + apply simp + apply (vcg exspec=setThreadState_modifies) + apply wp + apply simp + apply vcg + apply wp + apply vcg + apply vcg + apply clarsimp + apply (rule_tac P'="{s. find_ret = errstate s}" in ccorres_from_vcg_split_throws[where P=\]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply (erule lookup_failure_rel_fault_lift[rotated]) + apply (simp add: exception_defs) + apply (wp injection_wp[OF refl]) + apply simp + apply (vcg exspec=findVSpaceForASID_modifies) + apply (clarsimp simp: ct_in_state'_def valid_tcb_state'_def rf_sr_ksCurThread isCap_simps + ccap_relation_FrameCap_BasePtr ccap_relation_frame_tags + sysargs_rel_to_n ccap_relation_FrameCap_MappedASID) + apply (frule cte_wp_at_eq_gsMaxObjectSize, fastforce) + apply (frule cte_wp_cteCap_valid, fastforce) + apply (clarsimp simp: valid_cap'_def capAligned_def wellformed_mapdata'_def + cong: option.case_cong) + apply (rule conjI; clarsimp) + (* Haskell side *) + apply (fastforce simp: not_less not_le paddrBase_def ptrFromPAddr_add_left + is_aligned_no_overflow3 + is_aligned_no_overflow3[OF vmsz_aligned_addrFromPPtr(3)[THEN iffD2]]) + (* C side *) + apply (prop_tac "2 \ length args", clarsimp simp: not_less word_le_nat_alt) + apply (drule at_least_2_args[simplified not_less]) + apply (solves \clarsimp simp: ccap_relation_capFMappedASID_CL_0 ccap_relation_FrameCap_MappedASID + pageBitsForSize_le_64 ccap_relation_FrameCap_MappedAddress + isPageFlushLabel_disj ccap_relation_FrameCap_Size + framesize_from_H_bounded flushtype_relation_triv + split: option.splits\) + + \ \PageUnmap\ + apply (rule ccorres_guard_imp2[where A="?P" and A'=UNIV]) + apply (simp add: returnOk_bind bindE_assoc performARMMMUInvocations) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setThreadState_ccorres) + apply (ctac(no_vcg) add: performPageInvocationUnmap_ccorres) + apply (rule ccorres_gen_asm) + apply (erule ssubst[OF if_P, where P="\x. ccorres _ _ _ _ _ x _"]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply (wpsimp simp: performPageInvocation_def) + apply (wp sts_invs_minor') + apply simp + apply (vcg exspec=setThreadState_modifies) + apply clarsimp (* needed *) + apply (fastforce simp: ct_in_state'_def valid_tcb_state'_def rf_sr_ksCurThread isCap_simps + cte_wp_at_ctes_of + elim!: pred_tcb'_weakenE) + + \ \PageMap\ + apply (rule ccorres_guard_imp2[where A="?P" and A'=UNIV]) + apply (rename_tac word rights pg_sz maptype mapdata call' buffer' cap excaps cte + length___unsigned_long invLabel) + apply clarsimp + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (simp add: word_less_nat_alt) + (* throw on length < 3 *) + apply (rule ccorres_Cond_rhs_Seq) + apply simp + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def split: list.split) + apply (rule ccorres_cond_true_seq) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (prop_tac "args \ []", fastforce dest: at_least_3_args) + apply csymbr + apply (simp add: interpret_excaps_test_null excaps_map_def) + (* throw if no excaps *) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def split: list.split) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: list_case_If2 decodeARMFrameInvocationMap_def) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply csymbr + apply (rule ccorres_add_return) + apply (rule_tac r'="(\rv _ rv'. ((cap_get_tag rv' = scast cap_vspace_cap) = isVTableRoot rv) + \ (ccap_relation rv rv')) (fst (extraCaps ! 0))" + and xf'=vspaceRootCap_' in ccorres_split_nothrow) + apply (rule ccorres_from_vcg[where P="excaps_in_mem extraCaps \ ctes_of" and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: excaps_in_mem_def return_def neq_Nil_conv) + apply (drule(1) slotcap_in_mem_VSpace) + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (clarsimp simp: typ_heap_simps' mask_def isVTableRoot_ex isCap_simps) + apply (thin_tac _)+ + apply fastforce + apply (rename_tac rv' t t') + apply (simp add: word_sless_def word_sle_def) + apply ceqv + apply (clarsimp simp add: split_def cap_case_PageTableCap2 hd_conv_nth option.case_eq_if) + apply (simp add: whenE_def if_to_top_of_bind if_to_top_of_bindE) + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (clarsimp simp: assertE_liftE liftE_bindE bind_assoc cong: if_weak_cong) + apply (rule ccorres_assert2) + (* checkVSpaceRoot and isValidNativeRoot_'proc *) + apply csymbr + apply csymbr + apply csymbr + apply (erule allE, erule (1) impE) + apply (simp add: checkVSpaceRoot_def2 if_to_top_of_bind if_to_top_of_bindE from_bool_0) + apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\ and PT'=UNIV]) + apply (vcg, clarsimp) + apply (solves simp) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + (* findVSpaceForASID *) + apply (simp add: invocationCatch_use_injection_handler + injection_bindE[OF refl refl] bindE_assoc + injection_handler_returnOk injection_handler_throwError + injection_handler_If if_to_top_of_bindE + lookupError_injection injection_liftE + cong: if_cong) + apply (ctac add: ccorres_injection_handler_csum1 + [OF ccorres_injection_handler_csum1, OF findVSpaceForASID_ccorres]) + apply (rule ccorres_if_cond_throws[rotated -1, where P=False and Q=\ and Q'=\ and + R'=UNIV and PT'=UNIV, simplified]) + apply (vcg, clarsimp) + apply (solves simp) + (* base ptr equality *) + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (clarsimp simp: ccap_relation_vspace_base isValidVTableRoot_def2) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* check vaddr alignment *) + apply csymbr + apply (clarsimp simp: checkVPAlignment_def unlessE_def injection_handler_If + injection_handler_returnOk injection_handler_throwError + if_to_top_of_bind if_to_top_of_bindE + cong: if_cong) + apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (solves \clarsimp simp: vmsz_aligned_def from_bool_0 is_aligned_mask + ccap_relation_FrameCap_Size framesize_from_to_H\) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* mapdata = None / capFMappedASID = asidInvalid*) + apply csymbr + apply csymbr + apply (simp only: if_swap[where P="x = None" for x]) + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond_both'[where Q=\ and Q'=\]) + apply (solves \clarsimp simp: ccap_relation_FrameCap_IsMapped asidInvalid_def\) + apply (fold not_None_def)[1] + apply (rule ccorres_cond_seq) + (* asids not equal? *) + apply (rule ccorres_cond_both'[where Q=\ and Q'=\]) + apply (solves \clarsimp simp: isValidVTableRoot_def2 not_None_def + ccap_relation_vspace_mapped_asid[symmetric] + ccap_relation_FrameCap_MappedASID\) + apply ccorres_rewrite + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_cond_seq) + (* base addresses not equal? *) + apply (rule ccorres_cond_both'[where Q=\ and Q'=\]) + apply (solves \clarsimp simp: ccap_relation_FrameCap_MappedAddress not_None_def\) + apply ccorres_rewrite + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: liftE_bindE) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (ctac add: lookupPTSlot_ccorres) + apply (rename_tac lookupPTSlot_ret) + apply csymbr + apply (rule ccorres_cond_seq) + apply (simp only: if_swap[where P="x = pageBitsForSize y" for x y]) + apply (rule ccorres_cond_both'[where Q=\ and Q'=\]) + apply (clarsimp simp: ccap_relation_FrameCap_Size framesize_from_to_H) + apply (rule unat_eq_of_nat) + apply (rule less_trans, rule pageBitsForSize_64, simp) + apply clarsimp + apply ccorres_rewrite + apply wpfix + apply (rule_tac P="\s. unat (ptBitsLeft_C lookupPTSlot_ret) < 64" and + P'=UNIV in ccorres_from_vcg_throws) + apply (clarsimp simp: throwError_def return_def) + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: exception_defs syscall_error_rel_def + syscall_error_to_H_cases and_mask_eq_iff_le_mask + lookup_fault_missing_capability_lift_def + lookup_fault_lift_def lookup_fault_tag_defs) + apply (solves \clarsimp simp: mask_def word_le_nat_alt\) + apply (clarsimp simp: performARMMMUInvocations) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (ctac (no_vcg) add: performPageInvocationMap_ccorres) + apply (rule ccorres_gen_asm) + apply (erule ssubst[OF if_P, where P="\x. ccorres _ _ _ _ _ x _"]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply (wpsimp simp: performPageInvocation_def) + apply (wp sts_invs_minor') + apply clarsimp + apply (vcg exspec=setThreadState_modifies) + apply (wpsimp wp: hoare_drop_imp lookupPTSlot_bitsLeft_less_64 lookupPTSlot_inv) + apply (clarsimp simp: ccap_relation_frame_tags ccap_relation_FrameCap_Size + ccap_relation_FrameCap_BasePtr) + apply (vcg exspec=lookupPTSlot_modifies) + (* mapdata \ None --> remap case *) + apply (rule ccorres_rhs_assoc) + apply csymbr + apply clarsimp (* simplifies Guard to True*) + apply ccorres_rewrite (* removes Guard \True\ *) + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (solves \clarsimp simp: pptrUserTop_val ccap_relation_FrameCap_Size + framesize_from_to_H word_less_nat_alt field_simps\) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: liftE_bindE) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (ctac add: lookupPTSlot_ccorres) + apply (rename_tac lookupPTSlot_ret) + apply csymbr + apply (rule ccorres_cond_seq) + apply (simp only: if_swap[where P="x = pageBitsForSize y" for x y]) + apply (rule ccorres_cond_both'[where Q=\ and Q'=\]) + apply (clarsimp simp: ccap_relation_FrameCap_Size framesize_from_to_H) + apply (rule unat_eq_of_nat) + apply (rule less_trans, rule pageBitsForSize_64, simp) + apply clarsimp + apply ccorres_rewrite + apply wpfix + apply (rule_tac P="\s. unat (ptBitsLeft_C lookupPTSlot_ret) < 64" and + P'=UNIV in ccorres_from_vcg_throws) + apply (clarsimp simp: throwError_def return_def) + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: exception_defs syscall_error_rel_def + syscall_error_to_H_cases and_mask_eq_iff_le_mask + lookup_fault_missing_capability_lift_def + lookup_fault_lift_def lookup_fault_tag_defs) + apply (solves \clarsimp simp: mask_def word_le_nat_alt\) + apply (clarsimp simp: performARMMMUInvocations) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (ctac (no_vcg) add: performPageInvocationMap_ccorres) + apply (rule ccorres_gen_asm) + apply (erule ssubst[OF if_P, where P="\x. ccorres _ _ _ _ _ x _"]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply (wpsimp simp: performPageInvocation_def) + apply (wp sts_invs_minor') + apply clarsimp + apply (vcg exspec=setThreadState_modifies) + apply (wpsimp wp: hoare_drop_imp lookupPTSlot_bitsLeft_less_64 lookupPTSlot_inv) + apply clarsimp + apply (vcg exspec=lookupPTSlot_modifies) + apply ccorres_rewrite + apply clarsimp + apply (rename_tac fault) + apply (rule_tac P'="{s. errstate s = find_ret}" in ccorres_from_vcg_throws[where P=\]) + apply (clarsimp simp: throwError_def return_def) + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: exception_defs syscall_error_rel_def syscall_error_to_H_cases) + apply (case_tac fault; clarsimp) + apply clarsimp + apply (wpsimp wp: injection_handler_wp) + apply (wp (once) hoare_drop_imps) (* drop rv = capPTBasePtr (capCap (fst (extraCaps ! 0))) *) + apply (wpsimp wp: hoare_vcg_const_imp_lift) + apply (clarsimp simp: ccap_relation_frame_tags ccap_relation_FrameCap_Size + ccap_relation_FrameCap_BasePtr) + apply (vcg exspec=findVSpaceForASID_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply vcg + apply clarsimp (* removes vaddr from post condition *) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: rf_sr_ksCurThread) + apply (frule cte_wp_at_eq_gsMaxObjectSize, fastforce) + apply (clarsimp simp: cte_wp_at_ctes_of is_aligned_mask[symmetric] vmsz_aligned_def + vmsz_aligned_addrFromPPtr isCap_simps sysargs_rel_to_n not_less) + apply (frule ctes_of_valid', fastforce) + apply (drule sym[where t="cteCap cap" for cap]) + apply (clarsimp simp: valid_cap'_def capAligned_def isValidVTableRoot_def2) + apply (rule conjI) + (* Haskell side of PageMap *) + apply (clarsimp dest!: at_least_3_args[simplified not_less]) + apply (prop_tac "s \' fst (extraCaps ! 0)") + apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def + slotcap_in_mem_def dest!: ctes_of_valid') + apply (clarsimp simp: valid_cap'_def capAligned_def wellformed_mapdata'_def) + apply (prop_tac "7 \ gsMaxObjectSize s") + subgoal for _ _ v2 + by (cases v2; clarsimp simp: bit_simps') + subgoal + by (auto simp: ct_in_state'_def pred_tcb_at' valid_tcb_state'_def mask_2pm1[symmetric] + valid_cap'_def wellformed_acap'_def wellformed_mapdata'_def + mask_pptrUserTop_user_region + elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread') + (* length___unsigned_long <-> length args *) + apply (rule conjI, solves \clarsimp simp: word_less_nat_alt\) + (* C side of PageMap *) + apply (frule ccap_relation_frame_tags) + apply (clarsimp dest!: at_least_3_args[simplified not_less]) + apply (prop_tac "s \' fst (extraCaps ! 0)") + apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def + slotcap_in_mem_def dest!: ctes_of_valid') + apply (solves + \clarsimp simp: ccap_relation_vspace_mapped_asid[symmetric] valid_cap'_def capAligned_def + wellformed_mapdata'_def ccap_relation_FrameCap_generics + addrFromPPtr_canonical_in_kernel_window frame_at'_is_aligned_addrFromPPtr + is_aligned_addrFromPPtr invs_pspace_aligned' + isVTableRoot_def ccap_relation_FrameCap_Size framesize_from_H_bounded + framesize_from_to_H and_mask_eq_iff_le_mask le_mask_iff_lt_2n[THEN iffD1] + cpte_relation_makeUserPTE ccap_relation_decodePageMap + canonical_address_C_pptrUserTop capFMappedAddress_CL_canonical\) + (* PageMap done *) + + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +(* adapted from X64 *) +lemma asidHighBits_handy_convs: + "sint Kernel_C.asidHighBits = 7" + "Kernel_C.asidHighBits \ 0x20" + "unat Kernel_C.asidHighBits = asid_high_bits" + by (simp add: Kernel_C.asidHighBits_def + asid_high_bits_def)+ + +lemma sts_Restart_ct_active [wp]: + "\\s. thread = ksCurThread s\ setThreadState Restart thread \\_. ct_active'\" + apply (clarsimp simp: ct_in_state'_def) + apply (rule hoare_lift_Pf2 [where f=ksCurThread]) + apply (wp sts_st_tcb') + apply (simp split: if_split) + apply wp + done + +lemma maskCapRights_eq_Untyped [simp]: + "(maskCapRights R cap = UntypedCap d p sz idx) = (cap = UntypedCap d p sz idx)" + apply (cases cap) + apply (auto simp: Let_def isCap_simps maskCapRights_def) + apply (simp add: AARCH64_H.maskCapRights_def isFrameCap_def Let_def split: arch_capability.splits) + done + + +lemma le_mask_asid_bits_helper: + "x \ 2 ^ asid_high_bits - 1 \ (x::machine_word) << asid_low_bits \ mask asid_bits" + apply (simp add: mask_def) + apply (drule le2p_bits_unset_64) + apply (simp add: asid_high_bits_def word_bits_def) + apply (subst upper_bits_unset_is_l2p_64 [symmetric]) + apply (simp add: asid_bits_def word_bits_def) + apply (clarsimp simp: asid_bits_def asid_low_bits_def asid_high_bits_def nth_shiftl) + done + +lemma injection_handler_liftE: + "injection_handler a (liftE f) = liftE f" + by (simp add:injection_handler_def) + + +lemma liftE_case_sum: + "liftE f >>= case_sum (throwError \ Inr) g = f >>= g" + by (simp add:liftE_def) + +lemma framesize_from_H_mask2: + "framesize_from_H a && mask 2 = framesize_from_H a" + apply (rule less_mask_eq) + apply (simp add:framesize_from_H_def + split: vmpage_size.splits) + apply (simp add: framesize_defs)+ + done + +lemma performVSpaceFlush_ccorres: + "ccorres (\_ rv'. rv' = scast EXCEPTION_NONE) ret__unsigned_long_' + (\s. pstart \ pstart + (end - start) \ + ptrFromPAddr pstart \ ptrFromPAddr pstart + (end - start) \ + unat (end - start) \ gsMaxObjectSize s) + (\\start = start\ \ \\end = end\ \ \\pstart = pstart\ \ + \flushtype_relation typ \invLabel \) + [] + (performVSpaceInvocation (VSpaceFlush typ start end pstart vs asid)) + (Call performVSpaceFlush_'proc)" + apply (cinit lift: start_' end_' pstart_' invLabel_') + apply (unfold when_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_add_return2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_cond2[where R=\]) + apply (simp split: if_split) + apply (rule ccorres_call[where xf'=xfdc]) + apply (rule doFlush_ccorres) + apply simp + apply simp + apply simp + apply (rule ccorres_return_Skip) + apply ceqv + apply (rule ccorres_return_C[where xf=ret__unsigned_long_']; simp) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply clarsimp + done + +lemma isVSpaceFlushLabel_disj: + "(label = ArchInvocationLabel ARMVSpaceUnify_Instruction \ + label = ArchInvocationLabel ARMVSpaceCleanInvalidate_Data \ + label = ArchInvocationLabel ARMVSpaceInvalidate_Data \ + label = ArchInvocationLabel ARMVSpaceClean_Data) = + isVSpaceFlushLabel label" + by (auto simp: isVSpaceFlushLabel_def split: invocation_label.split arch_invocation_label.split) + +lemma flush_range_le: + fixes start :: "'a::len word" + assumes "pageBase start n = pageBase end n" + assumes "start \ end" + assumes "w && mask n = start && mask n" + assumes "n < LENGTH('a)" + shows "w \ w + (end - start)" "unat (end - start) \ 2 ^ n" +proof - + have q: "w && mask n \ (w && mask n) + (end - start)" + using assms + apply (subst AND_NOT_mask_plus_AND_mask_eq[where w = start,symmetric,where n=n]) + apply (simp add: page_base_def) + apply (drule word_le_minus_mono_left[where x= "start && ~~ mask n"]) + apply (rule word_and_le2) + apply simp + done + + have a: "unat (w && mask n) + unat (end - start) = unat ((w && mask n) + (end - start))" + apply (rule unat_plus_simple[THEN iffD1,symmetric]) + apply (rule q) + done + + have b: "end + (start && mask n) - start = end - (start && ~~ mask n)" + by (simp add:mask_out_sub_mask) + have c: "unat (w && mask n) + unat (end - start) < 2 ^ n" + using assms a + apply (simp add:field_simps) + apply (rule unat_less_helper) + apply simp + apply (rule_tac P =" \x. x < y" for y in ssubst[OF b]) + apply (subst AND_NOT_mask_plus_AND_mask_eq[where w = "end",symmetric,where n=n]) + apply (simp add: pageBase_def) + apply (rule and_mask_less') + apply simp + done + + show "w \ w + (end - start)" + using assms + by - (rule word_plus_mono_right_split, rule c, simp) + + show "unat (end - start) \ 2 ^ n" + using q c + by (simp add: olen_add_eqv) +qed + +lemmas flush_range_le1 = flush_range_le(2)[OF _ _ refl] + +lemma ptrFromPAddr_and_mask_eq: + "n \ pptrBaseOffset_alignment \ ptrFromPAddr p && mask n = p && mask n" + apply (simp add: ptrFromPAddr_def pptrBaseOffset_def paddrBase_def pptrBaseOffset_alignment_def + pptrBase_def) + apply word_bitwise + apply clarsimp + done + +lemma decodeARMVSpaceRootInvocation_ccorres: + "\ interpret_excaps extraCaps' = excaps_map extraCaps; + isPageTableCap cp; capPTType cp = VSRootPT_T \ \ + ccorres + (intr_and_se_rel \ dc) + (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and cte_wp_at' ((=) (ArchObjectCap cp) \ cteCap) slot + and valid_cap' (ArchObjectCap cp) + and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer) + ({s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. cte_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer}) + hs + (decodeARMMMUInvocation label args cptr slot cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeARMVSpaceRootInvocation_'proc)" + apply (clarsimp simp only: isCap_simps) + apply (cinit' lift: invLabel_' length___unsigned_long_' cte_' current_extra_caps_' cap_' buffer_') + apply (simp add: Let_def isCap_simps invocation_eq_use_types decodeARMMMUInvocation_def + decodeARMVSpaceInvocation_def + del: Collect_const + cong: StateSpace.state.fold_congs globals.fold_congs + if_cong invocation_label.case_cong arch_invocation_label.case_cong list.case_cong) + apply (rule ccorres_Cond_rhs[rotated]) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (clarsimp simp: isVSpaceFlushLabel_disj) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: isVSpaceFlushLabel_disj decodeARMFrameInvocationFlush_def + list_case_If2 if3_fold2 + cong: StateSpace.state.fold_congs globals.fold_congs + if_cong invocation_label.case_cong arch_invocation_label.case_cong + list.case_cong + del: Collect_const) + apply (simp add: case_option_If2 if_to_top_of_bind del: Collect_const + cong: if_cong invocation_label.case_cong arch_invocation_label.case_cong) + apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) + apply vcg + apply (clarsimp simp: word_less_nat_alt list_length_less) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo + [where args = args and n = 1 and buffer = buffer]) + apply (simp only:if_to_top_of_bindE whenE_def) + apply (simp add: case_option_If2 if_to_top_of_bind del: Collect_const + cong: if_cong invocation_label.case_cong arch_invocation_label.case_cong) + apply (rule ccorres_if_cond_throws[rotated -1, where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp:hd_drop_conv_nth hd_conv_nth) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_if_cond_throws[rotated -1, where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp: hd_drop_conv_nth hd_conv_nth pptrUserTop_val) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply (erule allE, erule (1) impE) + apply (clarsimp simp: checkVSpaceRoot_def isValidVTableRoot_def isVTableRoot_def + case_option_If2 if_to_top_of_bindE if_to_top_of_bind + simp del: Collect_const) + apply (rule ccorres_if_cond_throws2[rotated -1, where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp: from_bool_def split: bool.split) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply clarsimp + apply (simp add: lookupError_injection invocationCatch_use_injection_handler + injection_bindE[OF refl refl] injection_handler_If bindE_assoc + injection_handler_throwError injection_liftE[OF refl]) + apply wpfix + apply (ctac add: ccorres_injection_handler_csum1[OF ccorres_injection_handler_csum1, + OF findVSpaceForASID_ccorres]) + prefer 2 (* throw exception *) + apply ccorres_rewrite + apply (rule_tac P'="{s. errstate s = find_ret}" in ccorres_from_vcg_throws[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply (erule lookup_failure_rel_fault_lift[rotated]) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + (* findVSpace succeeded *) + apply ccorres_rewrite + apply (clarsimp simp: if_to_top_of_bindE if_to_top_of_bind + simp del: Collect_const) + apply (rule ccorres_if_cond_throws[rotated -1, where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp: ccap_relation_vspace_base) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def injection_handler_def handleE'_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: injection_handler_bindE injection_handler_liftE bindE_assoc) + apply (clarsimp simp: lookupFrame_def liftE_bindE bind_assoc split_def) + apply (ctac add: lookupPTSlot_ccorres) + apply (rule ccorres_split_nothrow, rule getObject_pte_ccorres) + apply clarsimp + apply ceqv + apply (rename_tac pte pte') + apply csymbr + apply (clarsimp simp: if_to_top_of_bind + simp del: Collect_const) + apply (rule ccorres_if_cond_throws2[rotated -1, where Q = \ and Q' = \]) + apply (vcg exspec=setThreadState_modifies) + apply (clarsimp simp: cpte_relation_def Let_def isPagePTE_def pte_lifts + pte_pte_4k_page_lift_def pte_pte_page_lift_def + split: pte.splits if_splits) + apply (clarsimp simp: injection_handler_returnOk ccorres_invocationCatch_Inr) + apply (ctac (no_vcg) add: setThreadState_ccorres) + apply (clarsimp simp: performInvocation_def AARCH64_H.performInvocation_def + performARMMMUInvocation_def performVSpaceInvocation_def + liftE_bindE) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE; simp) + apply wp + apply (clarsimp simp: injection_handler_bindE injection_handler_liftE injection_handler_If + injection_handler_returnOk bindE_assoc injection_handler_throwError + checkValidMappingSize_def + cong: if_cong + simp del: Collect_const) + apply (rule ccorres_assert) + apply (clarsimp simp: liftE_bindE simp del: Collect_const) + apply (rule ccorres_stateAssert) + apply (clarsimp simp: if_to_top_of_bindE cong: if_cong simp del: Collect_const) + apply (rule ccorres_move_const_guard)+ + apply (rule ccorres_if_cond_throws[rotated -1, where Q = \ and Q' = \]) + apply vcg + apply (clarsimp simp: pageBase_def shiftr_shiftl1 hd_conv_nth) + apply (rule_tac P="unat (ptBitsLeft_C resolve_ret) < 0x40" in ccorres_gen_asm) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def exception_defs syscall_error_rel_def + syscall_error_to_H_def syscall_error_type_defs hd_conv_nth + pageBase_def shiftr_shiftl1 mask_def word_less_nat_alt) + apply (clarsimp simp: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc + AARCH64_H.performInvocation_def performARMMMUInvocation_def + liftE_bindE + simp del: Collect_const) + apply csymbr + apply (erule allE, erule (1) impE, erule (1) impE) + apply csymbr + apply (rule ccorres_move_const_guard)+ + apply csymbr + apply (ctac (no_vcg) add: setThreadState_ccorres) + apply (rule_tac A="\s. unat (ptBitsLeft_C resolve_ret) < 0x40 \ + unat (ptBitsLeft_C resolve_ret) \ pptrBaseOffset_alignment \ + 2 ^ unat (ptBitsLeft_C resolve_ret) \ gsMaxObjectSize s" and + A'=UNIV in + ccorres_guard_imp) + apply (ctac (no_vcg) add: performVSpaceFlush_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE; simp) + apply wp + apply (clarsimp cong: conj_cong simp flip: is_aligned_mask simp: fromPAddr_def) + apply (rule conjI) + apply (erule flush_range_le; simp add: linorder_not_le) + apply (erule word_less_sub_1) + apply (simp add: mask_add_aligned mask_twice) + apply (rule conjI) + apply (erule flush_range_le; simp add: linorder_not_le) + apply (erule word_less_sub_1) + apply (simp add: ptrFromPAddr_and_mask_eq mask_add_aligned mask_twice) + apply (erule order_trans[rotated]) + apply (rule flush_range_le1; simp add: not_le) + apply (erule word_less_sub_1) + apply (clarsimp simp: hd_conv_nth mask_def flushtype_relation_triv) + apply wp + apply (clarsimp simp: word_less_nat_alt) + apply wpfix + apply (wp getPTE_wp) + apply vcg + apply (wp hoare_vcg_all_lift hoare_drop_imps lookupPTSlot_inv lookupPTSlot_bitsLeft_less_64) + apply (vcg exspec=lookupPTSlot_modifies) + apply (wp injection_handler_wp hoare_drop_imps) + apply (vcg exspec=findVSpaceForASID_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (simp only: isVSpaceFlushLabel_disj) + apply (clarsimp simp: sysargs_rel_to_n valid_cap'_def capAligned_def valid_tcb_state'_def) + apply (rule conjI; clarsimp simp: wellformed_mapdata'_def) + apply fastforce + apply (clarsimp simp: isValidVTableRoot_def ccap_relation_vspace_mapped_asid[symmetric]) + apply (clarsimp simp: word_less_nat_alt hd_conv_nth wellformed_mapdata'_def rf_sr_ksCurThread + cap_get_tag_isCap_unfolded_H_cap + dest!: at_least_2_args) + done + +lemma injection_handler_stateAssert_relocate: + "injection_handler Inl (stateAssert ass xs >>= f) >>=E g + = do v \ stateAssert ass xs; injection_handler Inl (f ()) >>=E g od" + by (simp add: injection_handler_def handleE'_def bind_bindE_assoc bind_assoc) + +lemma decodeARMMMUInvocation_ccorres: + notes Collect_const[simp del] if_cong[cong] + shows + "\ interpret_excaps extraCaps' = excaps_map extraCaps; \ isVCPUCap cp \ + \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and cte_wp_at' ((=) (ArchObjectCap cp) \ cteCap) slot + and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer and valid_objs') + (UNIV \ {s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. cte_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer} + \ {s. call_' s = from_bool isCall}) [] + (decodeARMMMUInvocation label args cptr slot cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeARMMMUInvocation_'proc)" + supply ccorres_prog_only_cong[cong] + apply (cinit' lift: invLabel_' length___unsigned_long_' cte_' + current_extra_caps_' cap_' buffer_' call_') + apply csymbr + apply (simp add: cap_get_tag_isCap_ArchObject + AARCH64_H.decodeInvocation_def + invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_Cond_rhs) + (* PageTableCap, VSRootPT_T *) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeARMVSpaceRootInvocation_ccorres]; solves simp) + apply (rule ccorres_Cond_rhs) + (* PageTableCap, NormalPT_T *) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeARMPageTableInvocation_ccorres]; solves simp) + apply (rule ccorres_Cond_rhs) + (* FrameCap *) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeARMFrameInvocation_ccorres]; solves simp) + (* ASIDControlCap *) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_equals_throwError) + apply (fastforce simp: decodeARMMMUInvocation_def decodeARMASIDControlInvocation_def isCap_simps + throwError_bind invocationCatch_def + split: invocation_label.split arch_invocation_label.split) + apply ccorres_rewrite + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + (* ARMASIDControlMakePool *) + apply (simp add: decodeARMMMUInvocation_def decodeARMASIDControlInvocation_def isCap_simps) + apply (simp add: word_less_nat_alt list_case_If2 split_def) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + (* args malformed *) + apply (rule ccorres_cond_true_seq | simp)+ + apply (simp add: throwError_bind invocationCatch_def) + apply ccorres_rewrite + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + apply (simp add: interpret_excaps_test_null excaps_map_def) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + (* extraCaps malformed *) + apply (rule ccorres_cond_true_seq | simp)+ + apply (simp add: throwError_bind invocationCatch_def) + apply ccorres_rewrite + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + apply csymbr + apply (simp add: interpret_excaps_test_null[OF Suc_leI]) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: length_ineq_not_Nil throwError_bind invocationCatch_def) + apply ccorres_rewrite + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (prop_tac "1 < length extraCaps") + apply (rule neq_le_trans, simp) + apply (fastforce simp: Suc_leI) + apply (simp add: Let_def split_def liftE_bindE bind_assoc length_ineq_not_Nil) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply csymbr + apply (rule ccorres_add_return, + rule_tac xf'=untyped_' and + r'="(\rv _ un. + (cap_get_tag un = scast cap_untyped_cap) = isUntypedCap rv \ + (isUntypedCap rv \ ccap_relation rv un)) + (fst (extraCaps ! 0))" + in ccorres_split_nothrow) + apply (rule_tac P="excaps_in_mem extraCaps \ ctes_of" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (frule length_ineq_not_Nil[where xs=extraCaps]) + apply (clarsimp simp: return_def neq_Nil_conv excaps_in_mem_def + slotcap_in_mem_def) + apply (drule interpret_excaps_eq[rule_format, where n=0], simp) + apply (simp add: mask_def[where n=4]) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (rule conjI, fastforce intro: typ_heap_simps) + apply (drule ccte_relation_ccap_relation) + apply (simp add: typ_heap_simps cap_get_tag_isCap) + apply ceqv + apply (rename_tac untyped') + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=1]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (rule ccorres_pre_gets_armKSASIDTable_ksArchState) + apply (rename_tac asid_table) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_rhs_assoc2) + apply (rule ccorres_add_return) + apply (rule_tac r'="\rv rv'. rv' = (case [p \ assocs asid_table. + fst p < 2 ^ asid_high_bits \ snd p = None] + of [] \ 2 ^ asid_high_bits | x # xs \ fst x)" + and xf'=i_' in ccorres_split_nothrow) + apply (rule_tac P="\x \ ran asid_table. x \ 0" + in ccorres_gen_asm) + apply (rule_tac P="\s. asid_table = armKSASIDTable (ksArchState s)" + in ccorres_from_vcg[where P'=UNIV]) + apply (clarsimp simp: return_def) + apply (rule HoarePartial.SeqSwap) + (* i_' = i___unsigned_long_' *) + apply (rule_tac I="{t. (\, t) \ rf_sr \ i_' t \ 2 ^ asid_high_bits + \ asid_table = armKSASIDTable (ksArchState \) + \ (\x < i_' t. asid_table x \ None) + \ ret__int_' t = from_bool (i_' t < 2 ^ asid_high_bits \ + asid_table (i_' t) \ None)}" + in HoarePartial.reannotateWhileNoGuard) + apply (rule HoarePartial.While[OF order_refl]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: asidHighBits_handy_convs + word_sle_def word_sless_def + word_less_nat_alt[symmetric] + from_bool_0) + apply (cut_tac P="\y. y < i_' x + 1 = rhs y" for rhs in allI, + rule less_x_plus_1) + apply (fastforce simp: asid_high_bits_def) + apply (clarsimp simp: rf_sr_armKSASIDTable + asid_high_bits_word_bits + option_to_ptr_def option_to_0_def + order_less_imp_le + linorder_not_less + order_antisym[OF inc_le]) + apply (clarsimp split: option.split if_split) + apply (rule conjI; clarsimp simp: Kernel_C_defs asid_high_bits_def word_less_nat_alt + from_bool_0 unat_add_lem[THEN iffD1]) + apply (drule_tac n="i_' x + 1" in rf_sr_armKSASIDTable) + apply (simp add: asid_high_bits_def mask_def word_le_nat_alt) + apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.splits) + apply (drule_tac n="i_' x + 1" in rf_sr_armKSASIDTable) + apply (simp add: asid_high_bits_def mask_def word_le_nat_alt) + apply (clarsimp simp: option_to_ptr_def option_to_0_def ran_def split: option.splits) + apply blast + apply (clarsimp simp: from_bool_0) + apply (case_tac "i_' x = 2 ^ asid_high_bits") + apply (clarsimp split: list.split) + apply (drule_tac f="\xs. (a, b) \ set xs" in arg_cong) + apply (clarsimp simp: in_assocs_is_fun) + apply fastforce + apply (frule(1) neq_le_trans) + apply (subst filter_assocs_Cons) + apply fastforce + apply simp + apply simp + apply (rule conseqPre, vcg) + apply (clarsimp simp: asidHighBits_handy_convs word_sle_def + word_sless_def from_bool_0 + rf_sr_armKSASIDTable[where n=0, simplified]) + apply (simp add: asid_high_bits_def option_to_ptr_def option_to_0_def Kernel_C_defs + split: option.split if_split) + apply fastforce + apply ceqv + apply (rule ccorres_Guard_Seq)+ + apply (simp add: whenE_bindE_throwError_to_if if_to_top_of_bind) + apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) + apply clarsimp + apply (rule conseqPre, vcg, rule subset_refl) + apply (clarsimp simp: asid_high_bits_word_bits asidHighBits_handy_convs null_def) + apply (clarsimp split: list.split) + apply (fastforce dest!: filter_eq_ConsD) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + apply (simp add: invocationCatch_use_injection_handler + injection_bindE[OF refl refl] injection_handler_If + injection_handler_returnOk bindE_assoc + injection_handler_throwError + cong: if_cong) + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__int_' in ccorres_abstract, ceqv) + apply (rule_tac P="rv'a = from_bool (\ (isUntypedCap (fst (hd extraCaps)) \ + capBlockSize (fst (hd extraCaps)) = objBits (makeObject ::asidpool)))" + in ccorres_gen_asm2) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__int_' in ccorres_abstract, ceqv) + apply (rule_tac P="rv'b = from_bool (\ (isUntypedCap (fst (hd extraCaps)) \ + capBlockSize (fst (hd extraCaps)) = objBits (makeObject ::asidpool) \ + \ capIsDevice (fst (hd extraCaps))))" + in ccorres_gen_asm2) + apply (clarsimp simp: to_bool_if cond_throw_whenE bindE_assoc) + apply (rule ccorres_split_when_throwError_cond[where Q = \ and Q' = \]) + apply fastforce + apply (rule syscall_error_throwError_ccorres_n) + apply (clarsimp simp: syscall_error_rel_def shiftL_nat syscall_error_to_H_cases) + prefer 2 + apply vcg + apply clarsimp + apply (ctac add: ccorres_injection_handler_csum1[OF ensureNoChildren_ccorres]) + apply (clarsimp simp: Collect_False) + apply csymbr + apply csymbr + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupTargetSlot_ccorres, + unfolded lookupTargetSlot_def]) + apply (simp add: Collect_False split_def) + apply csymbr + apply (ctac add: ccorres_injection_handler_csum1 + [OF ensureEmptySlot_ccorres]) + apply (simp add: ccorres_invocationCatch_Inr + performInvocation_def + AARCH64_H.performInvocation_def + performARMMMUInvocation_def) + apply (simp add: liftE_bindE) + apply ccorres_rewrite + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: setThreadState_ccorres) + apply (simp only: liftE_bindE[symmetric]) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (ctac (no_vcg) add: performASIDControlInvocation_ccorres + [where idx = "capFreeIndex (fst (extraCaps ! 0))"]) + apply (rule ccorres_alternative2) + apply (rule ccorres_returnOk_skip) + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply wp + apply (wp sts_invs_minor' sts_Restart_ct_active) + apply simp + apply (vcg exspec=setThreadState_modifies) + apply ccorres_rewrite + apply (rule ccorres_return_C_errorE, simp+) + apply (wp injection_wp[OF refl]) + apply (simp add: all_ex_eq_helper) + (* This manual conseqPost prevents the VCG from instantiating False *) + apply (rule_tac Q'=UNIV and A'="{}" in conseqPost) + apply (vcg exspec=ensureEmptySlot_modifies) + apply (frule length_ineq_not_Nil) + apply (clarsimp simp: null_def ThreadState_defs mask_def hd_conv_nth + isCap_simps rf_sr_ksCurThread cap_get_tag_UntypedCap + word_le_make_less asid_high_bits_def + split: list.split) + apply (frule interpret_excaps_eq[rule_format, where n=0], fastforce) + apply (fastforce simp: interpret_excaps_test_null excaps_map_def split_def) + apply fastforce + apply ccorres_rewrite + apply (rule ccorres_return_C_errorE, simp+) + apply (wp injection_wp[OF refl] hoare_drop_imps) + apply (simp add: split_def all_ex_eq_helper) + apply (vcg exspec=lookupTargetSlot_modifies) + apply simp + apply ccorres_rewrite + apply (rule ccorres_return_C_errorE, simp+) + apply (wp injection_wp[OF refl] ensureNoChildren_wp) + apply (simp add: all_ex_eq_helper cap_get_tag_isCap) + apply (vcg exspec=ensureNoChildren_modifies) + apply clarsimp + apply vcg + apply clarsimp + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply vcg + apply clarsimp + apply (rule conseqPre, vcg, clarsimp) + apply wp + apply (simp add: cap_get_tag_isCap) + apply (rule HoarePartial.SeqSwap) + apply (rule_tac I="\Prop \ksCurThread \root\" + and Q="\Bonus \i \ Prop \ksCurThread \root\" + for Prop Bonus in HoarePartial.reannotateWhileNoGuard) + apply (rule HoarePartial.While[OF order_refl]) + apply (rule conseqPre, vcg) + apply clarify + apply (rule conjI) + apply clarify + apply (simp (no_asm)) + apply clarify + apply clarsimp + apply vcg + apply simp + apply (rule hoare_drop_imps) + apply wp + apply simp + apply vcg + apply simp + apply wp + apply vcg + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + (* ASIDPoolCap case *) + apply (rule ccorres_Cond_rhs) + apply (simp add: imp_conjR[symmetric] decodeARMMMUInvocation_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: isCap_simps decodeARMASIDPoolInvocation_def) + apply ccorres_rewrite + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def + split: invocation_label.split arch_invocation_label.split) + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + apply (simp add: interpret_excaps_test_null excaps_map_def + list_case_If2 split_def) + apply (rule ccorres_Cond_rhs_Seq) + apply ccorres_rewrite + apply (clarsimp simp: isCap_simps decodeARMASIDPoolInvocation_def + throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + apply (simp add: isCap_simps decodeARMASIDPoolInvocation_def split: list.split) + apply (intro allI impI) + apply csymbr + apply (rule ccorres_add_return) + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_rhs_assoc2) + apply (rule_tac R="excaps_in_mem extraCaps \ ctes_of" and + R'="UNIV" and + val="from_bool (\isVTableRoot (fst (extraCaps ! 0)) \ + capPTMappedAddress (capCap (fst (extraCaps ! 0))) \ None)" and + xf'=ret__int_' in ccorres_symb_exec_r_known_rv) + apply vcg + apply clarsimp + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (clarsimp simp: excaps_in_mem_def) + apply (frule (1) slotcap_in_mem_VSpace) + apply (clarsimp simp: typ_heap_simps' from_bool_0 isVTableRoot_cap_eq) + apply (fastforce simp: isCap_simps ccap_relation_VSpaceCap_IsMapped split: if_splits) + apply ceqv + apply (rule ccorres_Cond_rhs_Seq) + apply ccorres_rewrite + apply (rule_tac v="Inl (InvalidCapability 1)" in ccorres_equals_throwError) + apply (fastforce simp: isCap_simps throwError_bind invocationCatch_def isVTableRoot_def + split: capability.split arch_capability.split option.splits pt_type.splits) + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + apply (simp add: isCap_simps isVTableRoot_ex, elim exE conjE) + apply (simp add: isCap_simps Kernel_C_defs liftE_bindE bind_assoc isVTableRoot_def) + apply (rule ccorres_pre_gets_armKSASIDTable_ksArchState) + apply csymbr + apply (rule ccorres_Guard_Seq)+ + apply (rule ccorres_add_return) + apply (rule_tac r'="\_ rv'. rv' = option_to_ptr (x (ucast (asid_high_bits_of (ucast (capASIDBase cp))))) + \ x (ucast (asid_high_bits_of (ucast (capASIDBase cp)))) \ Some 0" + and xf'=pool_' in ccorres_split_nothrow) + apply (rule_tac P="\s. x = armKSASIDTable (ksArchState s) + \ valid_arch_state' s \ s \' ArchObjectCap cp" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def valid_arch_state'_def valid_asid_table'_def) + apply (frule cap_get_tag_isCap_ArchObject(2)) + apply (clarsimp simp: isCap_simps) + apply (erule_tac v=cap in ccap_relationE) + apply (clarsimp simp: cap_lift_asid_pool_cap cap_to_H_simps valid_cap_simps' + cap_asid_pool_cap_lift_def) + apply (subst rf_sr_armKSASIDTable, assumption) + apply (rule leq_asid_bits_shift, simp) + apply (fastforce simp: ucast_asid_high_bits_is_shift asid_wf_def mask_def) + apply ceqv + apply (simp add: whenE_bindE_throwError_to_if if_to_top_of_bind cong: if_cong) + apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) + apply vcg + apply (simp add: option_to_0_def option_to_ptr_def split: option.split) + apply fastforce + apply (simp add: throwError_bind invocationCatch_def) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + syscall_error_rel_def exception_defs syscall_error_to_H_cases) + apply (simp add: lookup_fault_lift_invalid_root) + apply csymbr + apply (simp add: liftME_def bindE_assoc if_to_top_of_bind) + apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) + apply vcg + apply (frule cap_get_tag_isCap_ArchObject(2)) + apply (clarsimp simp: isCap_simps) + apply (erule_tac v=cap in ccap_relationE) + apply (fastforce simp: cap_lift_asid_pool_cap cap_to_H_simps valid_cap_simps' + cap_asid_pool_cap_lift_def) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (fastforce simp: syscall_error_to_H_cases) + apply csymbr + apply csymbr + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_rhs_assoc2) + apply (simp add: bind_assoc liftE_bindE) + apply (rule_tac xf'=i_' and r'="\rv rv'. rv' = (case [(x, y) \ assocs (inv ASIDPool rv). + x \ 2 ^ asid_low_bits - 1 \ x + capASIDBase cp \ 0 + \ y = None] of [] \ 2 ^ asid_low_bits + | x # xs \ fst x)" + in ccorres_split_nothrow) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_getObject_asidpool) + apply (rename_tac pool) + apply (rule_tac P="ko_at' pool (capASIDPool cp)" + in ccorres_from_vcg[where P'=UNIV]) + apply (clarsimp simp: option_to_0_def option_to_ptr_def + return_def) + apply (rule HoarePartial.SeqSwap) + apply (rule_tac I="{t. (\, t) \ rf_sr \ i_' t \ 2 ^ asid_low_bits + \ ko_at' pool (capASIDPool cp) \ + \ (\v. cslift t (ap_Ptr (capASIDPool cp)) = Some v \ + (\x < i_' t. capASIDBase cp + x = 0 \ + asid_map_get_tag (index (array_C v) (unat x)) = + scast asid_map_asid_map_vspace) \ + ret__int_' t = from_bool (i_' t < 2 ^ asid_low_bits \ + (capASIDBase cp + (i_' t) = 0 \ + asid_map_get_tag (index (array_C v) (unat (i_' t))) = + scast asid_map_asid_map_vspace)))}" + in HoarePartial.reannotateWhileNoGuard) + apply (rule HoarePartial.While[OF order_refl]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: asidLowBits_handy_convs word_sle_def word_sless_def from_bool_0) + apply (rename_tac s v) + apply (subgoal_tac "capASIDBase_CL (cap_asid_pool_cap_lift cap) + = capASIDBase cp") + apply (subgoal_tac "\x. (x < (i_' s + 1)) + = (x < i_' s \ x = i_' s)") + apply (clarsimp simp: typ_heap_simps ccap_relation_capASIDBase) + apply (clarsimp simp: inc_le asid_low_bits_def not_less split: if_split) + apply unat_arith + apply (solves \simp add: ccap_relation_capASIDBase\) + apply (clarsimp simp: from_bool_0) + apply (rename_tac s v) + apply (erule cmap_relationE1[OF rf_sr_cpspace_asidpool_relation], + erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps casid_pool_relation_def + inv_ASIDPool array_relation_def + split: asidpool.split_asm asid_pool_C.split_asm) + apply (case_tac "i_' s = 2 ^ asid_low_bits") + apply (clarsimp split: list.split) + apply (rename_tac a b xs) + apply (drule_tac f="\xs. (a, b) \ set xs" in arg_cong) + apply (clarsimp simp: in_assocs_is_fun mask_def) + apply (drule spec, drule(1) mp) + apply (simp add: asid_low_bits_word_bits) + apply (drule spec, drule(1) mp) + apply (simp add: option_to_ptr_def option_to_0_def field_simps) + apply (frule(1) neq_le_trans) + apply (subst filter_assocs_Cons) + apply (simp add: split_def asid_low_bits_word_bits) + apply (rule conjI, assumption) + apply (clarsimp simp add: field_simps) + apply (erule_tac x="i_' s" in allE, erule impE, + solves \clarsimp simp: le_mask_iff_lt_2n[THEN iffD1] asid_low_bits_def\) + apply (fastforce simp: casid_map_relation_def asid_map_lift_def) + apply (simp add: asid_low_bits_word_bits) + apply (erule allEI, rule impI, erule(1) impE) + apply (clarsimp simp: field_simps) + apply (rename_tac x') + apply (drule_tac x=x' in spec) + apply (simp add: le_mask_iff_lt_2n[THEN iffD1] asid_low_bits_def + split: if_split_asm option.splits) + apply simp + apply (rule conseqPre, vcg) + apply (clarsimp simp: asidLowBits_handy_convs + signed_shift_guard_simpler_64 asid_low_bits_def + word_sless_def word_sle_def) + apply (erule cmap_relationE1[OF rf_sr_cpspace_asidpool_relation], + erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps ccap_relation_capASIDBase split: if_split) + apply ceqv + apply (simp add: assertE_liftE liftE_bindE bind_assoc) + apply (simp add: if_to_top_of_bind asid_low_bits_def) + apply ccorres_rewrite + apply (rule ccorres_if_cond_throws[where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (clarsimp simp: null_def asid_low_bits_def split: list.split + dest!: filter_eq_ConsD) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr + performInvocation_def + AARCH64_H.performInvocation_def liftE_bindE) + apply csymbr + apply (ctac add: setThreadState_ccorres) + apply (simp add: performARMMMUInvocation_def bindE_assoc flip: liftE_liftE returnOk_liftE) + apply (ctac(no_vcg) add: performASIDPoolInvocation_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply simp + apply wp + apply simp + apply (wp sts_invs_minor') + apply simp + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (wp getASID_wp) + apply simp + apply (rule HoarePartial.SeqSwap) + apply (rule_tac I="\\rv. Prop \ksCurThread \vspaceCapSlot rv\" + and Q="\\rv. Bonus \i rv \ Prop \ksCurThread \vspaceCapSlot rv\" + for Prop Bonus in HoarePartial.reannotateWhileNoGuard) + apply vcg + apply fastforce + apply clarsimp + apply (rule conseqPre, vcg, rule subset_refl) + apply simp + (* HACK: rewrites to fix schematic dependency problems *) + apply (rule_tac t=v0 and s="capASIDPool cp" in subst, fastforce) + apply (rule_tac t=v1 and s="capASIDBase cp" in subst, fastforce) + apply (rule_tac t=b and s="snd (extraCaps ! 0)" in subst, fastforce) + apply (rule return_wp) + apply (rule conseqPre, vcg) + apply (rule_tac t=v0 and s="capASIDPool cp" in subst, fastforce) + apply (rule_tac t=v1 and s="capASIDBase cp" in subst, fastforce) + apply (rule_tac t=b and s="snd (extraCaps ! 0)" in subst, fastforce) + apply (rule subset_refl) + apply (rule_tac t=b and s="snd (extraCaps ! 0)" in subst, fastforce) + apply (rule conseqPre, vcg, rule subset_refl) + (* Can't reach *) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (cases cp; simp add: isCap_simps) + apply (rename_tac pt_t m) + apply (case_tac pt_t; simp) + apply clarsimp + apply (rule conjI) (* PTCap VSRoot *) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule_tac t="cteCap cte" in sym) + apply (frule(1) ctes_of_valid', simp) + apply (rule conjI) (* not PTCap VSRoot *) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule_tac t="cteCap cte" in sym) + apply (frule(1) ctes_of_valid', simp) + apply (rule conjI, clarsimp, simp) (* PTCap NormalPT_T *) + apply clarsimp + apply (rule conjI, clarsimp, simp) (* FrameCap *) + apply clarsimp + apply (rule conjI, clarsimp simp: isCap_simps) (* ASIDControlCap *) + apply (clarsimp simp: cte_wp_at_ctes_of ct_in_state'_def + interpret_excaps_eq excaps_map_def) + apply (clarsimp simp: sysargs_rel_to_n word_less_nat_alt not_le) + apply (rule conjI; clarsimp) + apply (frule invs_arch_state') + apply (rule conjI, clarsimp simp: valid_arch_state'_def valid_asid_table'_def) + apply (clarsimp simp: neq_Nil_conv excaps_map_def valid_tcb_state'_def invs_sch_act_wf' + unat_lt2p[where 'a=machine_word_len, folded word_bits_def]) + apply (frule interpret_excaps_eq[rule_format, where n=1], simp) + apply (rule conjI; clarsimp)+ + apply (rule conjI, erule ctes_of_valid', clarsimp) + apply (intro conjI) + apply fastforce + apply fastforce + apply fastforce + apply (fastforce elim!: pred_tcb'_weakenE) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) + apply (case_tac "tcbState obj", (simp add: runnable'_def)+)[1] + apply (clarsimp simp: excaps_in_mem_def slotcap_in_mem_def) + apply (rule sym, simp add: objBits_simps) + apply (simp add: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of) + apply clarsimp + apply (rule exI)+ + apply (rule conjI; assumption) + apply (clarsimp simp: null_def neq_Nil_conv) + apply (drule_tac f="\xs. (a, bb) \ set xs" in arg_cong) + apply (clarsimp simp: in_assocs_is_fun) + apply (clarsimp simp: le_mask_asid_bits_helper) + apply (simp add: is_aligned_shiftl_self) + (* ARMASIDPoolAssign *) + apply (clarsimp simp: isCap_simps valid_tcb_state'_def invs_sch_act_wf') + apply (frule invs_arch_state', clarsimp) + apply (intro conjI) + apply fastforce + apply fastforce + apply fastforce + apply (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) + apply (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) + apply (cases extraCaps; simp) + apply (clarsimp simp: excaps_in_mem_def slotcap_in_mem_def isPTCap'_def) + apply (simp add: valid_cap'_def) + apply (clarsimp simp: null_def neq_Nil_conv mask_def field_simps + asid_low_bits_word_bits asidInvalid_def asid_wf_def + dest!: filter_eq_ConsD) + apply (subst is_aligned_add_less_t2n[rotated]; assumption?) + apply (simp add: asid_low_bits_def asid_bits_def) + apply (clarsimp simp: asid_wf_def valid_cap'_def asid_bits_def mask_def word_le_nat_alt + word_less_nat_alt) + apply (simp add: asid_bits_def asid_low_bits_def) + apply (rule TrueI) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of asidHighBits_handy_convs + word_sle_def word_sless_def asidLowBits_handy_convs + rf_sr_ksCurThread ThreadState_defs mask_def[where n=4] + cong: if_cong) + apply (clarsimp simp: ccap_relation_isDeviceCap2 objBits_simps pageBits_def case_bool_If) + apply (rule conjI; clarsimp) + apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def excaps_map_def) + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (frule interpret_excaps_eq[rule_format, where n=1], simp) + apply (clarsimp simp: mask_def[where n=4] slotcap_in_mem_def + ccap_rights_relation_def rightsFromWord_wordFromRights) + apply (clarsimp simp: asid_high_bits_word_bits Kernel_C.asidHighBits_def split: list.split_asm) + apply (clarsimp simp: cap_untyped_cap_lift_def cap_lift_untyped_cap + cap_to_H_def[split_simps cap_CL.split] + hd_conv_nth length_ineq_not_Nil Kernel_C_defs + elim!: ccap_relationE) + apply (clarsimp simp: to_bool_def unat_eq_of_nat objBits_simps pageBits_def case_bool_If + split: if_splits) + apply (clarsimp simp: asid_low_bits_word_bits isCap_simps neq_Nil_conv + excaps_map_def excaps_in_mem_def from_bool_0 + p2_gt_0[where 'a=machine_word_len, folded word_bits_def]) + apply (drule_tac t="cteCap cte" in sym, simp) + apply (frule cap_get_tag_isCap_unfolded_H_cap(13)) (* ASIDPoolCap *) + apply (frule ctes_of_valid', clarsimp) + apply (clarsimp simp: valid_cap'_def capAligned_def + ccap_relation_capASIDPool ccap_relation_capASIDBase) + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (elim conjE) + apply (frule (1) slotcap_in_mem_VSpace) + apply (clarsimp simp: typ_heap_simps isVTableRoot_ex isCap_simps asid_wf_table_guard inv_ASIDPool) + apply (prop_tac "asid_low_bits < LENGTH(machine_word_len)", simp add: asid_low_bits_def) + apply (simp add: le_mask_iff_lt_2n[THEN iffD1, symmetric]) + apply (clarsimp simp: asid_low_bits_def mask_def cong: list.case_cong split: list.split) + done + +lemma setMessageInfo_ksCurThread_ccorres: + "ccorres dc xfdc (tcb_at' thread and (\s. ksCurThread s = thread)) + (UNIV \ \mi = message_info_to_H mi'\) hs + (setMessageInfo thread mi) + (\ret__unsigned_long :== CALL wordFromMessageInfo(mi');; + CALL setRegister(\ksCurThread, + scast Kernel_C.msgInfoRegister, + \ret__unsigned_long))" + unfolding setMessageInfo_def + apply (rule ccorres_guard_imp2) + apply ctac + apply simp + apply (ctac add: setRegister_ccorres) + apply wp + apply vcg + apply (simp add: AARCH64.msgInfoRegister_def C_register_defs rf_sr_ksCurThread + AARCH64_H.msgInfoRegister_def) + done + +lemma foldl_all_False: + "(\ foldl (\b x. b \ f x) False xs) = (\x \ set xs. \ f x)" + apply (subst foldl_fun_or_alt) + apply (fold orList_def) + apply (simp add: orList_False image_subset_iff) + done + +lemma unat_length_2_helper: + "\unat (l::machine_word) = length args; \ l < 2\ \ \x xa xs. args = x#xa#xs" + apply (case_tac args; clarsimp simp: unat_eq_0) + by (case_tac list; clarsimp simp: unat_eq_1) + +lemma ct_active_st_tcb_at_minor': + assumes "ct_active' s" + shows "st_tcb_at' (\st'. tcb_st_refs_of' st' = {} \ st' \ Inactive \ st' \ IdleThreadState) (ksCurThread s) s" + "st_tcb_at' runnable' (ksCurThread s) s" + using assms + by (clarsimp simp: st_tcb_at'_def ct_in_state'_def obj_at'_def projectKOs, + case_tac "tcbState obj"; clarsimp)+ + +lemma vcpu_reg_saved_when_disabled_spec: + "\s. \ \ {s} + Call vcpu_reg_saved_when_disabled_'proc + \ \ret__unsigned_long = from_bool (\<^bsup>s\<^esup>field \ {scast seL4_VCPUReg_SCTLR, + scast seL4_VCPUReg_CNTV_CTL, + scast seL4_VCPUReg_CPACR}) \" + by vcg clarsimp + +lemma vcpuRegSavedWhenDisabled_spec[simp]: + "vcpuRegSavedWhenDisabled reg = (reg = VCPURegSCTLR \ reg = VCPURegCNTV_CTL \ reg = VCPURegCPACR)" + by (simp add: vcpuRegSavedWhenDisabled_def split: vcpureg.splits) + +lemma writeVCPUReg_ccorres: + notes Collect_const[simp del] + shows + "ccorres dc xfdc + (vcpu_at' vcpuptr and no_0_obj') + (UNIV \ \\vcpu = vcpu_Ptr vcpuptr \ + \ \\field = of_nat (fromEnum reg) \ + \ \\value = val\) hs + (writeVCPUReg vcpuptr reg val) (Call writeVCPUReg_'proc)" + apply (cinit lift: vcpu_' field_' value_') + apply clarsimp + apply (rule ccorres_pre_getCurVCPU, rename_tac cvcpuopt) + (* abstract out check for "is vcpuptr the current vcpu" in terms of cvcpuopt *) + apply (rule_tac C'="{s. cvcpuopt \ None \ (cvcpuopt \ None \ fst (the cvcpuopt) = vcpuptr) }" + and Q="\s. vcpuptr \ 0 \ (armHSCurVCPU \ ksArchState) s = cvcpuopt" + and Q'=UNIV in ccorres_rewrite_cond_sr) + subgoal by (fastforce dest: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def + split: option.splits) + apply (rule ccorres_Cond_rhs) + \ \vcpuptr is current vcpu\ + apply clarsimp + apply (rename_tac curvcpuactive) + apply csymbr + apply (rule_tac C'="{s. (reg = VCPURegSCTLR \ reg = VCPURegCNTV_CTL \ reg = VCPURegCPACR) \ \curvcpuactive }" + and Q="\s. (armHSCurVCPU \ ksArchState) s = Some (vcpuptr, curvcpuactive)" + and Q'=UNIV in ccorres_rewrite_cond_sr) + subgoal by (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU + simp: cur_vcpu_relation_def from_bool_0 vcpureg_eq_use_types + split: option.splits) + (* unification choking on schematics with pairs *) + apply (rule_tac A="vcpu_at' vcpuptr" and A'=UNIV in ccorres_guard_imp) + apply (rule ccorres_Cond_rhs, clarsimp) + apply (ctac (no_vcg) add: vcpu_write_reg_ccorres) + apply (simp (no_asm_simp)) + apply (ctac (no_vcg) add: vcpu_hw_write_reg_ccorres) + apply fastforce + apply fastforce + \ \no current vcpu\ + apply clarsimp + apply wpc + apply (rename_tac cur b, prop_tac "\cur", fastforce) + apply simp + apply (ctac (no_vcg) add: vcpu_write_reg_ccorres) + apply fastforce + done + +lemma readVCPUReg_ccorres: + notes Collect_const[simp del] + shows + "ccorres ((=)) ret__unsigned_long_' + (vcpu_at' vcpuptr and no_0_obj') + (UNIV \ \\vcpu = vcpu_Ptr vcpuptr \ \ \\field = of_nat (fromEnum reg) \) hs + (readVCPUReg vcpuptr reg) (Call readVCPUReg_'proc)" + apply (cinit lift: vcpu_' field_') + apply clarsimp + apply (rule ccorres_pre_getCurVCPU, rename_tac cvcpuopt) + (* abstract out check for "is vcpuptr the current vcpu" in terms of cvcpuopt *) + apply (rule_tac C'="{s. cvcpuopt \ None \ (cvcpuopt \ None \ fst (the cvcpuopt) = vcpuptr) }" + and Q="\s. vcpuptr \ 0 \ (armHSCurVCPU \ ksArchState) s = cvcpuopt" + and Q'=UNIV in ccorres_rewrite_cond_sr) + subgoal by (fastforce dest: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def + split: option.splits) + apply (rule ccorres_Cond_rhs) + \ \vcpuptr is current vcpu\ + apply clarsimp + apply (rename_tac curvcpuactive) + apply csymbr + apply (rule_tac C'="{s. (reg = VCPURegSCTLR \ reg = VCPURegCNTV_CTL \ reg = VCPURegCPACR) \ \curvcpuactive }" + and Q="\s. (armHSCurVCPU \ ksArchState) s = Some (vcpuptr, curvcpuactive)" + and Q'=UNIV in ccorres_rewrite_cond_sr) + subgoal by (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU + simp: cur_vcpu_relation_def from_bool_0 vcpureg_eq_use_types + split: option.splits) + (* unification choking on schematics with pairs *) + apply (rule_tac A="vcpu_at' vcpuptr" and A'=UNIV in ccorres_guard_imp) + apply (rule ccorres_Cond_rhs, clarsimp) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: vcpu_read_reg_ccorres) + apply (fastforce intro!: ccorres_return_C) + apply wp + apply (simp (no_asm_simp)) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: vcpu_hw_read_reg_ccorres) + apply (fastforce intro!: ccorres_return_C) + apply wp + apply fastforce + apply fastforce + \ \no current vcpu\ + apply clarsimp + apply wpc + apply (rename_tac cur b, prop_tac "\cur", fastforce) + apply simp + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: vcpu_read_reg_ccorres) + apply (fastforce intro!: ccorres_return_C) + apply wp + apply fastforce + done + +crunches readVCPUReg + for st_tcb_at'[wp]: "\s. Q (st_tcb_at' P t s)" + and pspace_aligned'[wp]: "pspace_aligned'" + and pspace_distinct'[wp]: "pspace_distinct'" + (wp: crunch_wps simp: crunch_simps) + +lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres *) + notes Collect_const[simp del] + shows + "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart) + and vcpu_at' vcpuptr) + (UNIV \ \\vcpu = Ptr vcpuptr \ + \ \\field = of_nat (fromEnum reg) \ + \ \\call = from_bool isCall \) + hs + (do reply \ invokeVCPUReadReg vcpuptr reg; + liftE (replyOnRestart thread reply isCall) od) + (Call invokeVCPUReadReg_'proc)" + apply (cinit' lift: vcpu_' field_' call_' simp: invokeVCPUReadReg_def) + apply (clarsimp simp: bind_assoc) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=thread_' in ccorres_abstract, ceqv) + apply (rename_tac cthread, + rule_tac P="cthread = tcb_ptr_to_ctcb_ptr thread" in ccorres_gen_asm2) + apply (rule ccorres_pre_getCurThread, rename_tac curthread) + apply (rule_tac P="curthread = thread" in ccorres_gen_asm) + apply clarsimp + apply (ctac add: readVCPUReg_ccorres) + apply (rule ccorres_Cond_rhs_Seq[rotated]; clarsimp) + + \ \if we are not part of a call\ + apply (simp add: replyOnRestart_def liftE_def bind_assoc) + apply (rule getThreadState_ccorres_foo, rename_tac tstate) + apply (rule_tac P="tstate = Restart" in ccorres_gen_asm) + apply clarsimp + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac (no_vcg) add: setThreadState_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) + + \ \now if we are part of a call\ + apply (rule ccorres_rhs_assoc)+ + apply (rename_tac rval) + apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (rule getThreadState_ccorres_foo, rename_tac tstate) + apply (rule_tac P="tstate = Restart" in ccorres_gen_asm) + apply (clarsimp simp: bind_assoc) + apply (simp add: replyFromKernel_def bind_assoc) + apply (ctac add: lookupIPCBuffer_ccorres) + apply (ctac add: setRegister_ccorres) + apply (simp add: setMRs_single) + apply (ctac add: setMR_as_setRegister_ccorres[where offset=0]) + apply clarsimp + apply csymbr + (* setMessageInfo_ccorres does not fire here, no idea why *) + apply (simp only: setMessageInfo_def bind_assoc) + apply ctac + apply simp + apply (ctac add: setRegister_ccorres) + apply (ctac add: setThreadState_ccorres) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) + apply (vcg exspec=setThreadState_modifies) + apply wpsimp + apply (vcg exspec=setRegister_modifies) + apply wpsimp + apply clarsimp + apply (vcg) + apply wpsimp + apply (clarsimp simp: msgInfoRegister_def AARCH64.msgInfoRegister_def Kernel_C.msgInfoRegister_def Kernel_C.X1_def) + apply (vcg exspec=setMR_modifies) + apply wpsimp + apply clarsimp + apply (vcg exspec=setRegister_modifies) + apply wpsimp + apply clarsimp + apply (vcg exspec=lookupIPCBuffer_modifies) + apply clarsimp + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_all_lift hoare_vcg_imp_lift) + apply clarsimp + apply (vcg exspec=readVCPUReg_modifies) + apply vcg + apply clarsimp + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + rf_sr_ksCurThread msgRegisters_unfold ThreadState_defs + seL4_MessageInfo_lift_def message_info_to_H_def mask_def) + apply (cases isCall; clarsimp) + apply (rule conjI, clarsimp simp: ct_in_state'_def st_tcb_at'_def comp_def) + apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: AARCH64.badgeRegister_def AARCH64_H.badgeRegister_def C_register_defs) + apply (simp add: rf_sr_def cstate_relation_def Let_def) + apply (rule conjI, fastforce simp: pred_tcb_at'_def obj_at'_def ct_in_state'_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + done + +lemma liftE_invokeVCPUWriteReg_empty_return: + "liftE (invokeVCPUWriteReg vcpu reg val) >>=E (\rv. m rv) + = liftE (invokeVCPUWriteReg vcpu reg val) >>=E (\_. m [])" + unfolding invokeVCPUWriteReg_def + by (clarsimp simp: liftE_bindE bind_assoc) + +lemma invokeVCPUWriteReg_ccorres: + notes Collect_const[simp del] + shows + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and vcpu_at' vcpuptr) + (UNIV \ \\vcpu = Ptr vcpuptr \ + \ \\field = of_nat (fromEnum reg) \ + \ \\value = val \) + hs + (liftE (invokeVCPUWriteReg vcpuptr reg val)) + (Call invokeVCPUWriteReg_'proc)" + apply (cinit' lift: vcpu_' field_' value_' + simp: invokeVCPUWriteReg_def gets_bind_ign liftE_liftM) + apply clarsimp + apply (ctac (no_vcg) add: writeVCPUReg_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + by (wpsimp simp: invs_no_0_obj')+ + +lemma decodeVCPUWriteReg_ccorres: + notes if_cong[cong] Collect_const[simp del] + shows + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and sysargs_rel args buffer + and (valid_cap' (ArchObjectCap cp)) and K (isVCPUCap cp)) + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer}) hs + (decodeVCPUWriteReg args cp + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeVCPUWriteReg_'proc)" + apply (rule ccorres_grab_asm) + apply (cinit' lift: length___unsigned_long_' cap_' buffer_' simp: decodeVCPUWriteReg_def Let_def) + apply (rule ccorres_Cond_rhs_Seq ; clarsimp) + apply (rule_tac ccorres_gen_asm[where P="length args < 2"]) + apply clarsimp + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule_tac ccorres_gen_asm[where P="Suc 0 < length args"]) + apply clarsimp + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (clarsimp simp: fromEnum_maxBound_vcpureg_def seL4_VCPUReg_Num_def hd_conv_nth[symmetric]) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: word_le_nat_alt throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: word_le_nat_alt) + apply (simp add: returnOk_bind bindE_assoc + performARMMMUInvocations performARMVCPUInvocation_def) + \ \we want the second alternative - nothing to return to user\ + apply (subst liftE_invokeVCPUWriteReg_empty_return, clarsimp) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (ctac add: invokeVCPUWriteReg_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply wp + apply (vcg exspec=invokeVCPUWriteReg_modifies) + apply (wpsimp wp: sts_invs_minor' ct_in_state'_set)+ + apply (vcg exspec=setThreadState_modifies) + apply clarsimp + apply (rule return_inv) (* force getting rid of schematic, wp does wrong thing here *) + apply (vcg exspec=getSyscallArg_modifies) + apply (rule return_inv) + apply (vcg exspec=getSyscallArg_modifies) + + apply (clarsimp simp: word_less_nat_alt word_le_nat_alt conj_commute + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + rf_sr_ksCurThread msgRegisters_unfold + valid_tcb_state'_def ThreadState_defs mask_def) + apply (rule conjI; clarsimp) \ \not enough args\ + apply (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq) + apply (subst from_to_enum; clarsimp simp: fromEnum_maxBound_vcpureg_def) + \ \enough args\ + apply (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq valid_cap'_def) + apply (subgoal_tac "args \ []") + prefer 2 subgoal by (cases args; clarsimp, unat_arith?) + by (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def + elim: obj_at'_weakenE) + +lemma liftE_invokeVCPUInjectIRQ_empty_return: + "liftE (invokeVCPUInjectIRQ vcpu reg val) >>=E (\rv. m rv) + = liftE (invokeVCPUInjectIRQ vcpu reg val) >>=E (\_. m [])" + unfolding invokeVCPUInjectIRQ_def + by (clarsimp simp: liftE_bindE bind_assoc) + +lemma invokeVCPUInjectIRQ_ccorres: + notes Collect_const[simp del] + shows + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and vcpu_at' vcpuptr and K (idx < 64)) + (UNIV \ \\vcpu = Ptr vcpuptr \ + \ \\index = of_nat idx \ + \ \ virq_to_H \virq = virq \) + hs + (liftE (invokeVCPUInjectIRQ vcpuptr idx virq)) + (Call invokeVCPUInjectIRQ_'proc)" + apply (rule ccorres_grab_asm) + apply (cinit' lift: vcpu_' index_' virq_') + supply not_None_eq[simp del] + apply (simp add: invokeVCPUInjectIRQ_def gets_bind_ign liftE_liftM) + apply clarsimp + apply (rule_tac P="vcpuptr \ 0" in ccorres_gen_asm) + apply (rule ccorres_pre_getCurVCPU, rename_tac hsCurVCPU) + apply (rule_tac Q="\s. hsCurVCPU = (armHSCurVCPU \ ksArchState) s" + and Q'=UNIV + and C'="{s. hsCurVCPU \ None \ fst (the hsCurVCPU) = vcpuptr}" + in ccorres_rewrite_cond_sr_Seq) + apply (clarsimp) + apply (frule rf_sr_ksArchState_armHSCurVCPU) + apply (clarsimp simp: cur_vcpu_relation_def split_def split: option.splits) + apply (rule ccorres_Cond_rhs_Seq) + apply clarsimp + apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_lr_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg, clarsimp simp: return_def) + apply (rule wp_post_taut) + apply (simp only:) + apply (clarsimp simp: bind_assoc) + apply (rule ccorres_move_const_guards) + apply (rule ccorres_move_c_guard_vcpu) + apply (ctac (no_vcg) add: vgicUpdateLR_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg, clarsimp simp: return_def) + apply wpsimp+ + apply (clarsimp simp: unat_of_nat_eq word_of_nat_less) + apply (rule conjI) + apply (clarsimp elim: typ_at'_no_0_objD invs_no_0_obj') + apply (subst scast_eq_ucast; simp?) + apply (rule not_msb_from_less) + apply (clarsimp simp: word_less_nat_alt unat_of_nat_eq word_of_nat_less) + done + +(* Note: only works for virqEOIIRQEN = 1 because that is the only type we are using *) +lemma virq_virq_pending_EN_new_spec: + shows + "\s. \ \ {s} + Call virq_virq_pending_new_'proc + \ virqEOIIRQEN_' s = 1 \ virq_to_H \ret__struct_virq_C = makeVIRQ (virqGroup_' s) (virqPriority_' s) (virqIRQ_' s) \" + apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) + apply vcg + apply (clarsimp simp: virq_to_H_def makeVIRQ_def virq_virq_pending_def) + by (simp add: bit.disj_commute bit.disj_assoc bit.disj_ac) + +lemma decodeVCPUInjectIRQ_ccorres: + notes if_cong[cong] Collect_const[simp del] + (* csymbr will use this instead now *) + notes virq_virq_pending_new_spec = virq_virq_pending_EN_new_spec + shows + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and sysargs_rel args buffer + and (valid_cap' (ArchObjectCap cp)) + and K (isVCPUCap cp)) + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer} + ) hs + (decodeVCPUInjectIRQ args cp + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeVCPUInjectIRQ_'proc)" + apply (rule ccorres_grab_asm) + apply (cinit' lift: length___unsigned_long_' cap_' buffer_' + simp: decodeVCPUInjectIRQ_def Let_def shiftL_nat ) + apply csymbr + apply csymbr + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_gen_asm[where P="\ 0 < length args"]) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule_tac ccorres_gen_asm[where P="0 < length args"]) + apply (prop_tac "args \ []") + apply fastforce + apply clarsimp + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq) + apply ccorres_rewrite + apply (simp add: rangeCheck_def not_le[symmetric]) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: rangeCheck_def not_le[symmetric]) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: rangeCheck_def not_le[symmetric]) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + + apply (simp add: returnOk_bind bindE_assoc + performARMMMUInvocations performARMVCPUInvocation_def) + apply (clarsimp simp: rangeCheck_def not_le[symmetric] + liftE_liftM[symmetric] liftE_bindE_assoc) + + (* symbolically execute the gets on LHS *) + apply (rule_tac ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState, + rename_tac nregs) + + apply (rule_tac P="nregs \ max_armKSGICVCPUNumListRegs" in ccorres_gen_asm) + apply (rule_tac P="nregs \ max_armKSGICVCPUNumListRegs" + in ccorres_cross_over_guard_no_st) + + (* unfortunately directly looking at \gic_vcpu_num_list_regs means we need to abstract the + IF condition, and because of 32/64-bit casting we need to know \ max_armKSGICVCPUNumListRegs *) + apply (rule_tac Q="\s. valid_arch_state' s \ nregs = armKSGICVCPUNumListRegs (ksArchState s)" + and Q'=UNIV + and C'="{s. of_nat nregs \ (args ! 0 >> 32) && 0xFF}" + in ccorres_rewrite_cond_sr_Seq) + apply (clarsimp simp: not_le[symmetric] word_le_nat_alt unat_of_nat_eq) + apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + valid_arch_state'_def max_armKSGICVCPUNumListRegs_def + unat_of_nat64' unat_of_nat32') + + apply (rule ccorres_Cond_rhs_Seq) + apply (subgoal_tac "(of_nat nregs \ (args ! 0 >> 32) && 0xFF)") + prefer 2 + subgoal by (simp add: word_le_nat_alt not_le) + + apply (simp add: rangeCheck_def not_le[symmetric]) + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + + (* can't use syscall_error_throwError_ccorres_n, since one of the globals updates reads a global + var itself: gic_vcpu_num_list_regs_', need to split off up to the first return_C else + vcg barfs *) + apply (rule ccorres_split_throws) + apply (rule_tac P="\s. valid_arch_state' s \ nregs = armKSGICVCPUNumListRegs (ksArchState s)" + and P'="UNIV" in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre) + apply (vcg exspec=invokeVCPUInjectIRQ_modifies) + apply (clarsimp split: prod.splits + simp: throwError_def return_def EXCEPTION_SYSCALL_ERROR_def + EXCEPTION_NONE_def syscall_error_rel_def syscall_error_to_H_def + syscall_error_type_defs) + apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def) + apply (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + valid_arch_state'_def max_armKSGICVCPUNumListRegs_def + unat_of_nat64' unat_of_nat32') + apply vcg + + apply (subgoal_tac "\ (of_nat nregs \ (args ! 0 >> 32) && 0xFF)") + prefer 2 + subgoal by (simp add: word_le_nat_alt not_le) + apply clarsimp + apply (rule ccorres_move_const_guard) + apply (rule ccorres_move_c_guard_vcpu) + apply (simp add: liftM_def) + apply (clarsimp simp: rangeCheck_def not_le[symmetric] + liftE_liftM[symmetric] liftE_bindE_assoc) + + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply (simp add: virq_virq_active_def) + +(* FIXME AARCH64 cleanup and re-indent needed in this lemma *) + +(* FIXME AARCH64 magic numbers: 3 here is the mask in virq_get_virqType, 28 is the shift *) + apply (rule_tac + P="ret__unsigned_longlong = + (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)) >> 28) && 3" + in ccorres_gen_asm2) + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq) + apply (subgoal_tac "isVIRQActive (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)))") + prefer 2 + subgoal + apply (clarsimp simp: isVIRQActive_def virq_type_def word_unat_eq_iff) + done + + apply (simp add: rangeCheck_def not_le[symmetric]) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + + apply (subgoal_tac "\ isVIRQActive (vgicLR (vcpuVGIC vcpu) (unat ((args ! 0 >> 32) && 0xFF)))") + prefer 2 + subgoal by (clarsimp simp: isVIRQActive_def virq_type_def word_unat_eq_iff) + + apply clarsimp + apply (simp add: returnOk_bind bindE_assoc + performARMMMUInvocations performARMVCPUInvocation_def) + apply csymbr + apply (subst liftE_invokeVCPUInjectIRQ_empty_return) + apply clarsimp + + \ \we want the second alternative - nothing to return to user\ + apply (ctac add: setThreadState_ccorres) + apply (ctac add: invokeVCPUInjectIRQ_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply wp + apply clarsimp + apply (vcg exspec=invokeVCPUInjectIRQ_modifies) + apply (wpsimp wp: sts_invs_minor' ct_in_state'_set)+ + apply (vcg exspec=setThreadState_modifies) + (* wp does wrong thing, have to clarsimp to use return_wp instead of getting asm schematic*) + apply clarsimp + apply (rule return_wp) + apply clarsimp + apply (vcg exspec=getSyscallArg_modifies) + + apply (clarsimp simp: word_less_nat_alt word_le_nat_alt conj_commute + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + rf_sr_ksCurThread msgRegisters_unfold + valid_tcb_state'_def ThreadState_defs mask_def) + + apply (frule invs_arch_state') + apply (clarsimp simp: valid_arch_state'_def max_armKSGICVCPUNumListRegs_def rf_sr_armKSGICVCPUNumListRegs) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq) + apply (clarsimp simp: sysargs_rel_to_n word_le_nat_alt linorder_not_less) + apply (clarsimp simp: valid_cap'_def) + apply (clarsimp simp: not_le word_le_nat_alt) + + apply (subgoal_tac "armKSGICVCPUNumListRegs (ksArchState s) < 2 ^ LENGTH(machine_word_len)") + prefer 2 subgoal by (erule order_le_less_trans; simp) + + apply (safe; clarsimp?) + apply (simp add: unat_eq_zero) + apply (subgoal_tac "armKSGICVCPUNumListRegs (ksArchState s) < 2 ^ LENGTH(machine_word_len)") + prefer 2 subgoal by (erule order_le_less_trans; simp) + apply (erule order_less_le_trans) + apply (simp add: unat_of_nat_eq) + apply (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def + elim: obj_at'_weakenE) + apply (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def + elim: obj_at'_weakenE) + + apply (subgoal_tac "armKSGICVCPUNumListRegs (ksArchState s) < 2 ^ LENGTH(machine_word_len)") + prefer 2 subgoal by (erule order_le_less_trans; simp) + apply (erule order_less_le_trans) + apply (simp add: unat_of_nat_eq) + apply (clarsimp simp: typ_heap_simps') + apply (simp add: virq_get_tag_def mask_def shiftr_over_and_dist) + apply (simp add: cvcpu_relation_def cvgic_relation_def virq_to_H_def) + apply (clarsimp simp: cvcpu_relation_def cvgic_relation_def virq_get_tag_def + shiftr_over_and_dist mask_def cvcpu_vppi_masked_relation_def) + + apply (subgoal_tac "unat ((args ! 0 >> 32) && 0xFF) \ 63") + apply (rule sym) + apply simp + apply (fastforce simp: unat_of_nat_eq) + done + +lemma decodeVCPUReadReg_ccorres: + notes if_cong[cong] Collect_const[simp del] + shows + "\ isVCPUCap cp \ + \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and sysargs_rel args buffer + and (valid_cap' (ArchObjectCap cp))) + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer} + \ \\call = from_bool isCall \) hs + (decodeVCPUReadReg args cp + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeVCPUReadReg_'proc)" + apply (cinit' lift: length___unsigned_long_' cap_' buffer_' call_') + apply (clarsimp simp: decodeVCPUReadReg_def Let_def) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule_tac P="args \ []" in ccorres_gen_asm) + apply clarsimp + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (clarsimp simp: fromEnum_maxBound_vcpureg_def seL4_VCPUReg_Num_def hd_conv_nth[symmetric]) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: word_le_nat_alt throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: word_le_nat_alt) + (* unpack invocationCatch and resolve non-determinism (copied from use of + invokeTCB_ReadRegisters_ccorres after unsuccessful attempts at abstraction) *) + apply (simp add: Collect_const returnOk_def uncurry_def) + apply (simp (no_asm) add: ccorres_invocationCatch_Inr split_def + performInvocation_def liftE_bindE bind_assoc) + apply (ctac add: setThreadState_ccorres) + apply (rule ccorres_nondet_refinement) + apply (rule is_nondet_refinement_bindE) + apply (rule is_nondet_refinement_refl) + apply (simp split: if_split) + apply (rule conjI[rotated], rule impI, rule is_nondet_refinement_refl) + apply (rule impI) + apply (rule is_nondet_refinement_alternative1) + apply csymbr + (* drill down to invoke level *) + apply (clarsimp simp: AARCH64_H.performInvocation_def performARMVCPUInvocation_def) + apply (clarsimp simp: liftE_bindE) + apply (rule ccorres_add_returnOk) + apply (ctac (no_vcg) add: invokeVCPUReadReg_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wpsimp wp: sts_invs_minor' ct_in_state'_set)+ + apply (vcg exspec=setThreadState_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + + apply (clarsimp simp: word_le_nat_alt conj_commute + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + rf_sr_ksCurThread msgRegisters_unfold + valid_tcb_state'_def ThreadState_defs mask_def) + + apply (rule conjI; clarsimp) \ \no args\ + subgoal by (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq) + (subst from_to_enum; clarsimp simp: fromEnum_maxBound_vcpureg_def) + \ \at least one arg\ + apply (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq valid_cap'_def) + apply (subgoal_tac "args \ []") + prefer 2 apply (cases args; clarsimp, unat_arith?) + apply (fastforce simp: sysargs_rel_to_n ct_in_state'_def st_tcb_at'_def comp_def + elim: obj_at'_weakenE) + done + +lemma invokeVCPUSetTCB_ccorres: + notes Collect_const[simp del] + shows + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and tcb_at' tptr and vcpu_at' vcpuptr) + (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr tptr \ + \ \\vcpu = Ptr vcpuptr \) hs + (liftE (associateVCPUTCB vcpuptr tptr)) + (Call invokeVCPUSetTCB_'proc)" + apply (cinit' lift: tcb_' vcpu_' simp: gets_bind_ign liftE_liftM) + apply clarsimp + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: associateVCPUTCB_ccorres) + apply (clarsimp simp: return_def) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + by (wpsimp simp: invs_no_0_obj')+ + +lemma liftE_associateVCPUTCB_empty_return: + "liftE (associateVCPUTCB vcpu val) >>=E (\rv. m rv) + = liftE (associateVCPUTCB vcpu val) >>=E (\_. m [])" + unfolding associateVCPUTCB_def + by (clarsimp simp: liftE_bindE bind_assoc) + +lemma decodeVCPUSetTCB_ccorres: + notes if_cong[cong] Collect_const[simp del] + shows + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and (valid_cap' (ArchObjectCap cp)) + and (excaps_in_mem extraCaps \ ctes_of) + and K (isVCPUCap cp \ interpret_excaps extraCaps' = excaps_map extraCaps)) + (UNIV \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + ) hs + (decodeVCPUSetTCB cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeVCPUSetTCB_'proc)" + apply (rule ccorres_grab_asm) + apply (cinit' lift: cap_' current_extra_caps_' + simp: decodeVCPUSetTCB_def Let_def) + apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_Cond_rhs_Seq ; clarsimp) + apply (rule ccorres_split_throws) + apply (subgoal_tac "null extraCaps") + prefer 2 subgoal by (clarsimp simp: interpret_excaps_test_null excaps_map_def null_def) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply vcg + apply (subgoal_tac "extraCaps \ []") + prefer 2 subgoal by (clarsimp simp: idButNot_def interpret_excaps_test_null + excaps_map_def neq_Nil_conv) + apply (clarsimp simp: null_def bindE) + (* lookup first slot in extracaps and its type *) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply clarsimp + apply csymbr + apply (simp add: cap_case_TCBCap2 unlessE_def if_to_top_of_bind if_to_top_of_bindE + ccorres_seq_cond_raise) + apply (rule ccorres_cond2'[where R=\]) + apply (cases extraCaps ; clarsimp simp add: cap_get_tag_isCap cnode_cap_case_if) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply clarsimp + apply (simp add: returnOk_bind bindE_assoc + performARMMMUInvocations performARMVCPUInvocation_def) + \ \we want the second alternative - nothing to return to user\ + apply (subst liftE_associateVCPUTCB_empty_return, clarsimp) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply csymbr + apply (ctac add: invokeVCPUSetTCB_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply wp + apply (vcg exspec=invokeVCPUSetTCB_modifies) + apply (wpsimp wp: sts_invs_minor' ct_in_state'_set)+ + apply (vcg exspec=setThreadState_modifies) + apply (wpsimp | wp (once) hoare_drop_imps)+ + apply vcg + + apply (clarsimp simp: word_less_nat_alt word_le_nat_alt conj_commute + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + rf_sr_ksCurThread msgRegisters_unfold + invs_pspace_aligned' invs_pspace_distinct' + valid_tcb_state'_def ThreadState_defs mask_def) + apply (clarsimp simp: idButNot_def interpret_excaps_test_null + excaps_map_def neq_Nil_conv) + apply (rule conjI; clarsimp) + apply (drule interpret_excaps_eq) + apply (clarsimp simp: excaps_in_mem_def slotcap_in_mem_def isCap_simps ctes_of_cte_at) + apply (rule conjI) + apply (fastforce simp: ct_in_state'_def st_tcb_at'_def comp_def elim: obj_at'_weakenE) + apply (rule conjI) + apply (fastforce simp: ct_in_state'_def st_tcb_at'_def comp_def + elim: obj_at'_weakenE dest!: interpret_excaps_eq) + apply (frule ctes_of_valid'; simp add: invs_valid_objs' valid_cap'_def) + apply (fastforce simp: isCap_simps valid_cap'_def valid_tcb_state'_def excaps_map_def + cap_get_tag_ThreadCap capVCPUPtr_eq isVCPUCap_def cap_get_tag_isCap + dest!: interpret_excaps_eq)[1] +done + +lemma invokeVCPUAckVPPI_ccorres: + notes Collect_const[simp del] + shows + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and vcpu_at' vcpuptr) + (UNIV \ \\vcpu = Ptr vcpuptr \ + \ \ unat \vppi = fromEnum vppi \) + hs + (liftE (invokeVCPUAckVPPI vcpuptr vppi)) + (Call invokeVCPUAckVPPI_'proc)" + apply (cinit' lift: vcpu_' vppi_' simp: liftE_liftM) + apply (simp add: invokeVCPUAckVPPI_def) + apply (rule ccorres_move_const_guards) + apply (rule ccorres_move_c_guard_vcpu) + apply (ctac (no_vcg) add: vcpuVPPIMasked_update_ccorres[ + where v=False, simplified from_bool_vals]) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply wpsimp+ + apply (case_tac vppi, simp add: fromEnum_def enum_vppievent_irq flip: word_unat.Rep_inject) + done + +lemma unat_of_nat_fromEnum_vppievent_irq[simp]: + "unat (of_nat (fromEnum (vppi :: vppievent_irq)) :: machine_word) = fromEnum vppi" + by (cases vppi, clarsimp simp: fromEnum_def enum_vppievent_irq) + +lemma liftE_invokeVCPUAckVPPI_empty_return: + "liftE (invokeVCPUAckVPPI vcpu val) >>=E (\rv. m rv) + = liftE (invokeVCPUAckVPPI vcpu val) >>=E (\_. m [])" + unfolding invokeVCPUAckVPPI_def + by (clarsimp simp: liftE_bindE bind_assoc) + +lemma decodeVCPUAckVPPI_ccorres: + notes if_cong[cong] Collect_const[simp del] + shows + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and sysargs_rel args buffer + and (valid_cap' (ArchObjectCap cp)) + and K (isVCPUCap cp)) + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer} + ) hs + (decodeVCPUAckVPPI args cp + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeVCPUAckVPPI_'proc)" +proof - + + have ucast_scast_invalid[simp]: + "UCAST(32 signed \ 32) VPPIEventIRQ_invalid = SCAST(32 signed \ 32) VPPIEventIRQ_invalid" + by (simp flip: word_unat.Rep_inject add: VPPIEventIRQ_invalid_def) + + have irqVPPIEventIndex_not_invalid: + "\vppi. irqVPPIEventIndex (UCAST(machine_word_len \ irq_len) (args ! 0)) = Some vppi + \ of_nat (fromEnum vppi) \ SCAST(32 signed \ machine_word_len) VPPIEventIRQ_invalid" + by (clarsimp simp: irqVPPIEventIndex_def VPPIEventIRQ_invalid_def IRQ_def + fromEnum_def enum_vppievent_irq + split: if_splits) + + show ?thesis + apply (rule ccorres_grab_asm) + apply (cinit' lift: length___unsigned_long_' cap_' buffer_') + apply (clarsimp simp: decodeVCPUAckVPPI_def) + apply (csymbr, rename_tac cp') + apply csymbr + apply (rule ccorres_Cond_rhs_Seq ; clarsimp) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule_tac ccorres_gen_asm[where P="args \ []"], simp add: Let_def) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply csymbr + (* isolate checkIRQ for ctac by using injection_handler *) + apply (fold checkIRQ_def[simplified]) + apply (simp add: invocationCatch_use_injection_handler) + apply (simp add: split_def invocationCatch_use_injection_handler + injection_handler_bindE bindE_assoc) + apply (ctac add: ccorres_injection_handler_csum1[OF Arch_checkIRQ_ccorres]; clarsimp) + apply ccorres_rewrite + apply (prop_tac "toEnum (unat (args ! 0)) = UCAST(machine_word_len \ irq_len) (args ! 0)") + apply (fastforce simp: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def) + apply csymbr + apply clarsimp + (* simplify outcome of irqVPPIEventIndex_'proc *) + apply (rule_tac Q=\ and Q'=UNIV + and C'="{s. irqVPPIEventIndex (UCAST(machine_word_len \ irq_len) (args ! 0)) = None}" + in ccorres_rewrite_cond_sr_Seq) + apply (prop_tac "\ msb VPPIEventIRQ_invalid") + apply (solves \simp add: VPPIEventIRQ_invalid_def\) + apply (solves \clarsimp simp: irqVPPIEventIndex_not_invalid split: option.splits, + simp add: scast_eq_ucast\) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: irqVPPIEventIndex_not_invalid; ccorres_rewrite) + apply (simp add: throwError_bind invocationCatch_def whenE_def injection_handler_throwError) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (solves \simp add: syscall_error_to_H_cases\) + + apply (clarsimp simp: irqVPPIEventIndex_not_invalid; ccorres_rewrite) + apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr + performInvocation_def AARCH64_H.performInvocation_def + performARMVCPUInvocation_def bindE_assoc) + \ \we want the second alternative - nothing to return to user\ + apply (subst liftE_invokeVCPUAckVPPI_empty_return, clarsimp) + apply (ctac add: invokeVCPUAckVPPI_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE; solves simp) + apply wpsimp+ + apply (vcg exspec=invokeVCPUAckVPPI_modifies) + apply (wpsimp wp: sts_invs_minor' ct_in_state'_set) + apply clarsimp + apply (vcg exspec=setThreadState_modifies) + apply (ccorres_rewrite) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wpsimp wp: injection_wp_E[OF refl] checkIRQ_ret_good) + apply clarsimp + apply (vcg exspec=Arch_checkIRQ_modifies) + apply wpsimp + apply (vcg exspec=getSyscallArg_modifies) + + apply (clarsimp simp: cap_get_tag_isCap) + apply (cases args; clarsimp simp: unat_eq_0) + apply (rule conjI) + (* Haskell side *) + apply (clarsimp simp: excaps_in_mem_def slotcap_in_mem_def isCap_simps ctes_of_cte_at) + apply (clarsimp simp: word_le_nat_alt conj_commute + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + rf_sr_ksCurThread msgRegisters_unfold + valid_tcb_state'_def mask_def invs_pspace_aligned' invs_pspace_distinct' + valid_cap'_def ct_in_state'_def sysargs_rel_to_n st_tcb_at'_def comp_def + runnable'_eq) + apply (fastforce elim: obj_at'_weakenE) + (* C side *) + apply (clarsimp simp: word_le_nat_alt rf_sr_ksCurThread msgRegisters_unfold + Kernel_C.maxIRQ_def and_mask_eq_iff_le_mask capVCPUPtr_eq) + apply (clarsimp simp: mask_def) + done +qed + +lemma decodeARMVCPUInvocation_ccorres: + notes if_cong[cong] Collect_const[simp del] + shows + "\ interpret_excaps extraCaps' = excaps_map extraCaps ; isVCPUCap cp \ + \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and cte_wp_at' ((=) (ArchObjectCap cp) \ cteCap) slot + and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer and valid_objs' + and (valid_cap' (ArchObjectCap cp))) + (UNIV + \ {s. label___unsigned_long_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer} + \ \\call = from_bool isCall \) [] + (decodeARMVCPUInvocation label args cptr slot cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call decodeARMVCPUInvocation_'proc)" + apply (cinit' lift: label___unsigned_long_' length___unsigned_long_' slot_' current_extra_caps_' + cap_' buffer_' call_') + apply (clarsimp simp: decodeARMVCPUInvocation_def) + + apply (rule ccorres_Cond_rhs) + apply (simp add: invocation_eq_use_types) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeVCPUSetTCB_ccorres]; simp) + + apply (rule ccorres_Cond_rhs) + apply (simp add: invocation_eq_use_types) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeVCPUReadReg_ccorres]; simp) + + apply (rule ccorres_Cond_rhs) + apply (simp add: invocation_eq_use_types) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeVCPUWriteReg_ccorres]; simp) + + apply (rule ccorres_Cond_rhs) + apply (simp add: invocation_eq_use_types) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeVCPUInjectIRQ_ccorres]; simp) + + apply (rule ccorres_Cond_rhs) + apply (simp add: invocation_eq_use_types) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call[OF decodeVCPUAckVPPI_ccorres]; simp) + + \ \unknown (arch) invocation labels all throw IllegalOperation in line with the Haskell\ + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (intro allI, rule conseqPre, vcg) + subgoal + apply (clarsimp simp: invocation_eq_use_types + split: invocation_label.splits arch_invocation_label.splits) + apply (safe + ; simp add: invocation_eq_use_types throwError_invocationCatch fst_throwError_returnOk + exception_defs syscall_error_rel_def syscall_error_to_H_cases) + done + \ \preconditions imply calculated preconditions\ + apply auto + done + +lemma Arch_decodeInvocation_ccorres: + notes if_cong[cong] + assumes "interpret_excaps extraCaps' = excaps_map extraCaps" + shows + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and cte_wp_at' ((=) (ArchObjectCap cp) \ cteCap) slot + and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer and valid_objs') + (UNIV \ {s. label___unsigned_long_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer} + \ {s. call_' s = from_bool isCall}) [] + (Arch.decodeInvocation label args cptr slot cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeArchObject) + (Call Arch_decodeInvocation_'proc)" + (is "ccorres ?r ?xf ?P (?P' slot_') [] ?a ?c") +proof - + note trim_call = ccorres_trim_returnE[rotated 2, OF ccorres_call] + + have not_VCPUCap_case_helper_eq: + "\P Q. \ isVCPUCap cp \ (case cp of arch_capability.VCPUCap x \ P cp | _ \ Q cp) = Q cp" + by (clarsimp simp: isVCPUCap_def split: arch_capability.splits) + + from assms show ?thesis + apply (cinit' lift: label___unsigned_long_' length___unsigned_long_' slot_' + current_extra_caps_' cap_' buffer_' call_') + apply csymbr + apply (simp only: cap_get_tag_isCap_ArchObject AARCH64_H.decodeInvocation_def) + apply (rule ccorres_Cond_rhs) + apply wpc + apply (clarsimp simp: isVCPUCap_def)+ + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call, + rule decodeARMVCPUInvocation_ccorres, (simp add: isVCPUCap_def)+)[1] + (* will not rewrite any other way, and we do not want to repeat proof for each MMU cap case + of decodeARMMMUInvocation *) + apply (subst not_VCPUCap_case_helper_eq, assumption) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call, + rule decodeARMMMUInvocation_ccorres, simp+)[1] + apply (clarsimp simp: cte_wp_at_ctes_of ct_in_state'_def) + apply (drule_tac t="cteCap cte" in sym, simp) + apply (frule(1) ctes_of_valid', simp) + apply (clarsimp split: arch_capability.splits simp: isVCPUCap_def) + done +qed + +end + +end diff --git a/proof/crefine/AARCH64/CLevityCatch.thy b/proof/crefine/AARCH64/CLevityCatch.thy new file mode 100644 index 0000000000..0f30569422 --- /dev/null +++ b/proof/crefine/AARCH64/CLevityCatch.thy @@ -0,0 +1,258 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory CLevityCatch +imports + "CBaseRefine.Include_C" + ArchMove_C + "CParser.LemmaBucket_C" + "Lib.LemmaBucket" + Boolean_C +begin + +(* FIXME AARCH64: holding area for things to move to CParser/TypHeapLib or higher. + Check other architectures for use. *) + +lemma lift_t_Some_iff: + "lift_t g hrs p = Some v \ hrs_htd hrs, g \\<^sub>t p \ h_val (hrs_mem hrs) p = v" + unfolding hrs_htd_def hrs_mem_def by (cases hrs) (auto simp: lift_t_if) + +context + fixes p :: "'a::mem_type ptr" + fixes q :: "'b::c_type ptr" + fixes d g\<^sub>p g\<^sub>q + assumes val_p: "d,g\<^sub>p \\<^sub>t p" + assumes val_q: "d,g\<^sub>q \\<^sub>t q" + assumes disj: "typ_uinfo_t TYPE('a) \\<^sub>t typ_uinfo_t TYPE('b)" +begin + +lemma h_val_heap_same_typ_disj: + "h_val (heap_update p v h) q = h_val h q" + using disj by (auto intro: h_val_heap_same[OF val_p val_q] + simp: tag_disj_def sub_typ_proper_def field_of_t_def typ_tag_lt_def + field_of_def typ_tag_le_def) + +lemma h_val_heap_same_hrs_mem_update_typ_disj: + "h_val (hrs_mem (hrs_mem_update (heap_update p v) s)) q = h_val (hrs_mem s) q" + by (simp add: hrs_mem_update h_val_heap_same_typ_disj) + +end + +lemmas h_t_valid_nested_fields = + h_t_valid_field[OF h_t_valid_field[OF h_t_valid_field]] + h_t_valid_field[OF h_t_valid_field] + h_t_valid_field + +lemmas h_t_valid_fields_clift = + h_t_valid_nested_fields[OF h_t_valid_clift] + h_t_valid_clift + +lemma aligned_intvl_0: + "\ is_aligned p n; n < LENGTH('a) \ \ (0 \ {p..+2^n}) = (p = 0)" for p::"'a::len word" + apply (rule iffI; clarsimp simp: intvl_def) + apply (drule_tac d="of_nat k" in is_aligned_add_or) + apply (simp add: word_less_nat_alt unat_of_nat order_le_less_trans[rotated]) + apply word_eqI_solve + apply (rule_tac x=0 in exI) + apply simp + done + +lemma heap_list_h_eq_better: (* FIXME AARCH64: replace heap_list_h_eq *) + "\p. \ x \ {p..+q}; heap_list h q p = heap_list h' q p \ + \ h x = h' x" +proof (induct q) + case 0 thus ?case by simp +next + case (Suc n) thus ?case by (force dest: intvl_neq_start) +qed + +(* end holding area *) + + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* Short-hand for unfolding cumbersome machine constants *) +(* FIXME MOVE these should be in refine, and the _eq forms should NOT be declared [simp]! *) + +declare word_neq_0_conv [simp del] + +(* Rule previously in the simpset, now not. *) +declare ptr_add_def' [simp] + +(* works much better *) +lemmas typ_heap_simps' = typ_heap_simps c_guard_clift + +lemmas asUser_return = submonad.return [OF submonad_asUser] + +lemmas asUser_bind_distrib = + submonad_bind [OF submonad_asUser submonad_asUser submonad_asUser] + +declare ef_dmo'[intro!, simp] + +(* FIXME: move to Kernel_C *) +(* adapted from include/arch/arm/arch/64/mode/machine/registerset.h *) +lemmas C_register_defs = + Kernel_C.X0_def + Kernel_C.capRegister_def + Kernel_C.badgeRegister_def + Kernel_C.X1_def + Kernel_C.msgInfoRegister_def + Kernel_C.X2_def + Kernel_C.X3_def + Kernel_C.X4_def + Kernel_C.X5_def + Kernel_C.X6_def + Kernel_C.X7_def + Kernel_C.X8_def + Kernel_C.X9_def + Kernel_C.X10_def + Kernel_C.X11_def + Kernel_C.X12_def + Kernel_C.X13_def + Kernel_C.X14_def + Kernel_C.X15_def + Kernel_C.X16_def + Kernel_C.X17_def + Kernel_C.X18_def + Kernel_C.X19_def + Kernel_C.X20_def + Kernel_C.X21_def + Kernel_C.X22_def + Kernel_C.X23_def + Kernel_C.X24_def + Kernel_C.X25_def + Kernel_C.X26_def + Kernel_C.X27_def + Kernel_C.X28_def + Kernel_C.X29_def + Kernel_C.X30_def + Kernel_C.LR_def + Kernel_C.SP_EL0_def + Kernel_C.ELR_EL1_def + Kernel_C.NextIP_def + Kernel_C.SPSR_EL1_def + Kernel_C.FaultIP_def + Kernel_C.TPIDR_EL0_def + Kernel_C.TLS_BASE_def + Kernel_C.TPIDRRO_EL0_def + + +(* + Kernel_C.ra_def Kernel_C.LR_def + Kernel_C.sp_def Kernel_C.SP_def + Kernel_C.gp_def Kernel_C.GP_def + Kernel_C.tp_def Kernel_C.TP_def + Kernel_C.TLS_BASE_def + Kernel_C.t0_def Kernel_C.t1_def Kernel_C.t2_def + Kernel_C.t3_def Kernel_C.t4_def Kernel_C.t5_def Kernel_C.t6_def + Kernel_C.s0_def Kernel_C.s1_def Kernel_C.s2_def Kernel_C.s3_def Kernel_C.s4_def + Kernel_C.s5_def Kernel_C.s6_def Kernel_C.s7_def Kernel_C.s8_def Kernel_C.s9_def + Kernel_C.s10_def Kernel_C.s11_def + Kernel_C.a0_def Kernel_C.a1_def Kernel_C.a2_def Kernel_C.a3_def Kernel_C.a4_def + Kernel_C.a5_def Kernel_C.a6_def Kernel_C.a7_def + Kernel_C.capRegister_def Kernel_C.badgeRegister_def Kernel_C.msgInfoRegister_def + Kernel_C.SCAUSE_def Kernel_C.SSTATUS_def Kernel_C.FaultIP_def Kernel_C.NextIP_def +*) + +(* Levity: moved from Retype_C (20090419 09:44:41) *) +lemma no_overlap_new_cap_addrs_disjoint: + "\ range_cover ptr sz (objBitsKO ko) n; + pspace_aligned' s; + pspace_no_overlap' ptr sz s \ \ + set (new_cap_addrs n ptr ko) \ dom (ksPSpace s) = {}" + apply (erule disjoint_subset [OF new_cap_addrs_subset, where sz1=sz]) + apply (clarsimp simp: More_Word_Operations.ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint') + apply auto + done + +lemma empty_fail_getExtraCPtrs [intro!, simp]: + "empty_fail (getExtraCPtrs sendBuffer info)" + apply (simp add: getExtraCPtrs_def) + apply (cases info, simp) + apply (cases sendBuffer; fastforce) + done + +lemma empty_fail_loadCapTransfer [intro!, simp]: + "empty_fail (loadCapTransfer a)" + by (fastforce simp: loadCapTransfer_def capTransferFromWords_def) + +lemma empty_fail_emptyOnFailure [intro!, simp]: + "empty_fail m \ empty_fail (emptyOnFailure m)" + by (auto simp: emptyOnFailure_def catch_def split: sum.splits) + +lemma empty_fail_unifyFailure [intro!, simp]: + "empty_fail m \ empty_fail (unifyFailure m)" + by (auto simp: unifyFailure_def catch_def rethrowFailure_def + handleE'_def throwError_def + split: sum.splits) + +lemma asUser_get_registers: + "\tcb_at' target\ + asUser target (mapM getRegister xs) + \\rv s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) xs = rv) target s\" + apply (induct xs) + apply (simp add: mapM_empty asUser_return) + apply wp + apply simp + apply (simp add: mapM_Cons asUser_bind_distrib asUser_return empty_fail_cond) + apply wp + apply simp + apply (rule hoare_strengthen_post) + apply (erule hoare_vcg_conj_lift) + apply (rule asUser_inv) + apply (simp add: getRegister_def) + apply (wp mapM_wp') + apply clarsimp + apply (erule(1) obj_at_conj') + apply (wp) + apply (simp add: asUser_def split_def threadGet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: getRegister_def simpler_gets_def + obj_at'_def) + done + +lemma exec_Basic_Guard_UNIV: + "Semantic.exec \ (Basic f;; Guard F UNIV (Basic g)) x y = + Semantic.exec \ (Basic (g o f)) x y" + apply (rule iffI) + apply (elim exec_elim_cases, simp_all, clarsimp)[1] + apply (simp add: o_def, rule exec.Basic) + apply (elim exec_elim_cases) + apply simp_all + apply (rule exec_Seq' exec.Basic exec.Guard | simp)+ + done + +end + +definition + "option_to_ptr \ Ptr o option_to_0" + +lemma option_to_ptr_simps [simp]: + "option_to_ptr None = NULL" + "option_to_ptr (Some x) = Ptr x" + by (auto simp: option_to_ptr_def split: option.split) + +lemma option_to_ptr_NULL_eq: + "\ option_to_ptr p = p' \ \ (p' = NULL) = (p = None \ p = Some 0)" + unfolding option_to_ptr_def option_to_0_def + by (clarsimp split: option.splits) + +lemma option_to_ptr_not_0: + "\ p \ 0 ; option_to_ptr v = Ptr p \ \ v = Some p" + by (clarsimp simp: option_to_ptr_def option_to_0_def split: option.splits) + +schematic_goal sz8_helper: + "((-1) << 8 :: addr) = ?v" + by (simp add: shiftl_t2n) + +lemmas reset_name_seq_bound_helper2 + = reset_name_seq_bound_helper[where sz=8 and v="v :: addr" for v, + simplified sz8_helper word_bits_def[symmetric], + THEN name_seq_bound_helper] + +end diff --git a/proof/crefine/AARCH64/CSpaceAcc_C.thy b/proof/crefine/AARCH64/CSpaceAcc_C.thy new file mode 100644 index 0000000000..60195f426b --- /dev/null +++ b/proof/crefine/AARCH64/CSpaceAcc_C.thy @@ -0,0 +1,377 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* collects lemmas common to the various CSpace branches *) + +theory CSpaceAcc_C +imports "Refine.EmptyFail" Ctac_lemmas_C +begin + +(* For resolving schematics *) +lemma lift_t_cslift: + "cslift x p = Some y \ + lift_t c_guard (hrs_mem (t_hrs_' (globals x)), hrs_htd (t_hrs_' (globals x))) p = Some y" + by (simp add: hrs_htd_def hrs_mem_def) + +context kernel begin + +lemma ccorres_pre_getNotification: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. \ntfn. ko_at' ntfn p s \ P ntfn s) + ({s'. \ntfn s. (s, s') \ rf_sr \ ko_at' ntfn p s \ s' \ P' ntfn}) + hs (getNotification p >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule get_ntfn_sp') + apply simp + apply assumption + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply simp + apply assumption + apply fastforce + done + +lemma ccorres_pre_getCTE: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\cte. ctes_of s p = Some cte \ P cte s)) + {s. \cte'. (cte_lift \\<^sub>m cslift s) (Ptr p) = Some cte' \ cl_valid_cte cte' \ s \ P' (cte_to_H cte') } + hs (getCTE p >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule getCTE_sp) + apply simp + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply (simp add: cte_wp_at_ctes_of) + apply assumption + apply (simp add: cte_wp_at_ctes_of) + apply (drule (1) rf_sr_ctes_of_clift) + apply clarsimp + apply (simp add: c_valid_cte_eq) + done + +lemmas ccorres_getCTE = ccorres_pre_getCTE + + + +lemma getCurThread_sp: + "\P\ getCurThread \\rv s. ksCurThread s = rv \ P s\" + by wpsimp + +lemma rf_sr_ksCurThread: + "(s, s') \ rf_sr \ ksCurThread_' (globals s') + = tcb_ptr_to_ctcb_ptr (ksCurThread s)" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma ccorres_pre_getCurThread: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. ksCurThread s = rv \ P rv s)) + {s. \rv. ksCurThread_' (globals s) = tcb_ptr_to_ctcb_ptr rv + \ s \ P' rv } + hs (getCurThread >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule getCurThread_sp) + apply (clarsimp simp: empty_fail_def getCurThread_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (clarsimp simp: rf_sr_ksCurThread) + done + +lemma getSchedulerAction_sp: + "\P\ getSchedulerAction \\rv s. ksSchedulerAction s = rv \ P s\" + by wpsimp + +lemma ccorres_pre_getSchedulerAction: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. ksSchedulerAction s = rv \ P rv s)) + {s. \rv. cscheduler_action_relation rv (ksSchedulerAction_' (globals s)) + \ s \ P' rv } + hs (getSchedulerAction >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule getSchedulerAction_sp) + apply simp + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (clarsimp dest!: rf_sr_sched_action_relation) + done + +lemma rf_sr_ksDomainTime: + "(s, s') \ rf_sr \ ksDomainTime_' (globals s') = ksDomainTime s" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma ccorres_pre_getDomainTime: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. ksDomainTime s = rv \ P rv s)) + {s. \rv. ksDomainTime_' (globals s) = rv + \ s \ P' rv } + hs (getDomainTime >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (simp add: getDomainTime_def) + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def getDomainTime_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (clarsimp simp: rf_sr_ksDomainTime) + done + +lemma rf_sr_ksIdleThread: + "(s, s') \ rf_sr \ ksIdleThread_' (globals s') + = tcb_ptr_to_ctcb_ptr (ksIdleThread s)" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma getIdleThread_sp: + "\P\ getIdleThread \\rv s. ksIdleThread s = rv \ P s\" + by wpsimp + +lemma ccorres_pre_getIdleThread: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. ksIdleThread s = rv \ P rv s)) + {s. \rv. ksIdleThread_' (globals s) = tcb_ptr_to_ctcb_ptr rv + \ s \ P' rv } + hs (getIdleThread >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule getIdleThread_sp) + apply (clarsimp simp: empty_fail_def getIdleThread_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (clarsimp simp: rf_sr_ksIdleThread) + done + +lemma curDomain_sp: + "\P\ curDomain \\rv s. ksCurDomain s = rv \ P s\" + apply wp + apply simp +done + +lemma rf_sr_ksCurDomain: + "(s, s') \ rf_sr \ ksCurDomain_' (globals s') + = ucast (ksCurDomain s)" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma ccorres_pre_curDomain: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. ksCurDomain s = rv \ P rv s)) + {s. \rv. ksCurDomain_' (globals s) = ucast rv + \ s \ P' rv } + hs (curDomain >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule curDomain_sp) + apply (clarsimp simp: empty_fail_def curDomain_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (clarsimp simp: rf_sr_ksCurDomain) + done + +lemma scast_EXCPT_NONE [simp]: "scast EXCEPTION_NONE = EXCEPTION_NONE" + unfolding scast_def EXCEPTION_NONE_def + by simp + +lemma pageBitsForSize_spec: + "\s. \ \ \s. \pagesize < 3\ Call pageBitsForSize_'proc + \ \ret__unsigned_long = of_nat (pageBitsForSize (framesize_to_H \<^bsup>s\<^esup>pagesize)) \" + apply vcg + apply clarsimp + apply (clarsimp simp: pageBitsForSize_def framesize_to_H_def + ARMSmallPage_def ARMLargePage_def ARMHugePage_def + ARMSmallPageBits_def ARMLargePageBits_def ARMHugePageBits_def bit_simps) + apply (drule word_less_cases, erule disjE; simp)+ + done + +lemma updateMDB_pre_cte_at: + "\\s. \ (p \ 0 \ cte_at' p s) \ updateMDB p f \ \_ _. False \" + unfolding updateMDB_def Let_def + apply simp + apply (intro impI) + apply (wp getCTE_wp) + apply clarsimp + done + +lemma getSlotCap_pre_cte_at: + "\\s. \ cte_at' p s \ getSlotCap p \ \_ _. False \" + unfolding getSlotCap_def by (wpsimp wp: getCTE_wp) + +lemma updateCap_pre_cte_at: + "\\s. \ cte_at' p s \ updateCap p f \ \_ _. False \" + unfolding updateCap_def by (wpsimp wp: getCTE_wp) + +lemmas ccorres_updateMDB_cte_at = ccorres_guard_from_wp [OF updateMDB_pre_cte_at empty_fail_updateMDB] + ccorres_guard_from_wp_bind [OF updateMDB_pre_cte_at empty_fail_updateMDB] + +lemmas ccorres_getSlotCap_cte_at = ccorres_guard_from_wp [OF getSlotCap_pre_cte_at empty_fail_getSlotCap] + ccorres_guard_from_wp_bind [OF getSlotCap_pre_cte_at empty_fail_getSlotCap] + +lemmas ccorres_updateCap_cte_at = ccorres_guard_from_wp [OF updateCap_pre_cte_at empty_fail_updateCap] + ccorres_guard_from_wp_bind [OF updateCap_pre_cte_at empty_fail_updateCap] + +lemma array_assertion_abs_cnode_ctes: + "\s s'. (s, s') \ rf_sr \ (\n. gsCNodes s p = Some n \ n' \ 2 ^ n) \ True + \ (x s' = 0 \ array_assertion (cte_Ptr p) n' (hrs_htd (t_hrs_' (globals s'))))" + apply (clarsimp, drule(1) rf_sr_gsCNodes_array_assertion) + apply (metis array_assertion_shrink_right) + done + +lemmas ccorres_move_array_assertion_cnode_ctes [ccorres_pre] + = ccorres_move_Guard_Seq [OF array_assertion_abs_cnode_ctes] + ccorres_move_Guard [OF array_assertion_abs_cnode_ctes] + +lemma locateSlotCNode_ccorres [corres]: + assumes gl: "\v s. globals (xfu v s) = globals s" \ \for state rel. preservation\ + and fg: "\v s. xf (xfu (\_. v) s) = v" + shows "ccorres (\v v'. v' = Ptr v) xf \ {_. cnode = cnode' \ offset = offset'} hs + (locateSlotCNode cnode bits offset) + (Guard MemorySafety + {s. x s = 0 \ array_assertion (cte_Ptr cnode') (unat offset') (hrs_htd (t_hrs_' (globals s)))} + (Basic (\s. xfu (\_. cte_Ptr (cnode' + offset' + * of_nat (size_of TYPE(cte_C)))) s)))" + apply (simp add: locateSlot_conv split del: if_split) + apply (rule ccorres_guard_imp2) + apply (rule_tac P="cnode = cnode' \ offset = offset'" in ccorres_gen_asm2) + apply (rule ccorres_stateAssert) + apply (rule ccorres_move_array_assertion_cnode_ctes) + apply (rule ccorres_return[where R="\" and R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: fg rf_sr_def gl cte_level_bits_def field_simps) + apply (clarsimp simp: Collect_const_mem split: option.split_asm) + apply (rule unat_le_helper, simp) + done + +lemma locateSlotTCB_ccorres [corres]: + assumes gl: "\v s. globals (xfu v s) = globals s" \ \for state rel. preservation\ + and fg: "\v s. xf (xfu (\_. v) s) = v" + shows "ccorres (\v v'. v' = Ptr v) xf \ {_. cnode = cnode' \ offset = offset'} hs + (locateSlotTCB cnode offset) + (Basic (\s. xfu (\_. Ptr (cnode' + offset' * of_nat (size_of TYPE(cte_C))) :: cte_C ptr) s))" + unfolding locateSlot_conv using gl fg + apply - + apply (simp add: size_of_def split del: if_split) + apply (rule ccorres_return) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: fg objBits_simps cte_level_bits_def) + done + +lemma getSlotCap_h_val_ccorres [corres]: + fixes p :: "cstate \ cte_C ptr" + assumes gl: "\v s. globals (xfu v s) = globals s" \ \for state rel. preservation\ + and fg: "\v s. xf (xfu (\_. v) s) = v" + shows "ccorres ccap_relation xf \ {s. p s = Ptr a} hs + (getSlotCap a) (Basic (\s. xfu (\_. h_val (hrs_mem (t_hrs_' (globals s))) (Ptr &(p s\[''cap_C'']) :: cap_C ptr)) s))" + unfolding getSlotCap_def + apply (rule ccorres_add_UNIV_Int) + apply (cinitlift p) \ \EVIL!\ + apply simp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_getCTE) + apply (rule_tac R = "\s. ctes_of s a = Some cte" in ccorres_return [where R' = UNIV]) + apply vcg + apply (clarsimp simp: gl fg cte_wp_at_ctes_of) + apply (erule (1) rf_sr_ctes_of_cliftE) + apply (clarsimp simp add: typ_heap_simps ccap_relation_def cte_to_H_def cl_valid_cte_def c_valid_cap_def) + apply simp + done + +lemma ccorres_pre_gets_armKSASIDTable_ksArchState: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. armKSASIDTable (ksArchState s) = rv \ P rv s)) + {s. \rv. s \ P' rv } hs + (gets (armKSASIDTable \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply clarsimp + done + +lemma ccorres_pre_gets_riscvKSASIDTable_ksArchState': + assumes cc: "\rv. ccorres r xf (P and (\s. rv = (armKSASIDTable \ ksArchState) s)) (P' rv) hs (f rv) c" + shows "ccorres r xf P {s. \rv. s \ P' rv } hs + (gets (armKSASIDTable \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply clarsimp + done + +end +end diff --git a/proof/crefine/AARCH64/CSpace_All.thy b/proof/crefine/AARCH64/CSpace_All.thy new file mode 100644 index 0000000000..5054835fd9 --- /dev/null +++ b/proof/crefine/AARCH64/CSpace_All.thy @@ -0,0 +1,357 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory CSpace_All +imports CSpace_RAB_C CSpace_C +begin + +context kernel_m +begin + + + + +abbreviation + "lookupCapAndSlot_xf \ liftxf errstate lookupCapAndSlot_ret_C.status_C + (\ x . (lookupCapAndSlot_ret_C.cap_C x, lookupCapAndSlot_ret_C.slot_C x)) + ret__struct_lookupCapAndSlot_ret_C_'" + + + + +(* FIXME: move *) +lemma ccorres_return_into_rel: + "ccorres (r \ f) xf G G' hs a c + \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" + by (simp add: liftM_def[symmetric]) + +lemma lookupCap_ccorres': + "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf + (valid_pspace' and tcb_at' a) + (UNIV \ {s. cPtr_' s = b} \ {s. thread_' s = tcb_ptr_to_ctcb_ptr a}) [] + (lookupCap a b) (Call lookupCap_'proc)" + apply (cinit lift: cPtr_' thread_' simp: lookupCapAndSlot_def liftME_def bindE_assoc) + + apply (ctac (no_vcg) add: lookupSlotForThread_ccorres') + \ \case where lu_ret.status is EXCEPTION_NONE\ + apply (simp add: split_beta cong:call_ignore_cong) + apply csymbr \ \call status_C_update\ + apply (simp add: Collect_const[symmetric] lookupSlot_raw_rel_def liftE_def + del: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (ctac ) + apply (rule ccorres_return_CE [unfolded returnOk_def, simplified], simp+)[1] + apply wp + apply vcg + \ \case where lu_ret.status is *not* EXCEPTION_NONE\ + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr \ \call cap_null_cap_new_'proc\ + apply csymbr + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply (wp | simp)+ + + \ \last subgoal\ + apply (clarsimp simp: valid_pspace_valid_objs') + apply (intro impI conjI allI) + apply (simp add: lookupSlot_raw_rel_def) + apply (rule_tac y="ret___struct_lookupCap_ret_C_' s" for s + in arg_cong, fastforce) + apply simp + apply (case_tac err, simp+) [1] +done + +lemma lookupCap_ccorres: + "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf + (\s. invs' s \ (tcb_at' a s)) + (UNIV \ {s. cPtr_' s = b} \ {s. thread_' s = tcb_ptr_to_ctcb_ptr a}) [] + (lookupCap a b) (Call lookupCap_'proc)" + apply (rule ccorres_guard_imp2, rule lookupCap_ccorres') + apply fastforce + done + + +lemma lookupCapAndSlot_ccorres : + "ccorres + (lookup_failure_rel \ (\(c,s) (c',s'). ccap_relation c c' \ s'= Ptr s)) lookupCapAndSlot_xf + (\s. invs' s \ tcb_at' thread s) + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \\cPtr = cPtr\ ) [] + (lookupCapAndSlot thread cPtr) + (Call lookupCapAndSlot_'proc)" + apply (cinit lift: thread_' cPtr_') + + apply (ctac (no_vcg)) + \ \case where lu_ret.status is EXCEPTION_NONE\ + apply (simp add: split_beta cong:call_ignore_cong) + apply csymbr \ \call status_C_update\ + apply csymbr \ \call slot_C_update\ + apply (simp add: Collect_const[symmetric] lookupSlot_raw_rel_def liftE_bindE + del: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac P="cte_at' rv" and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: Bex_def in_monad getSlotCap_def in_getCTE2 cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (simp add: ccte_relation_ccap_relation typ_heap_simps') + \ \case where lu_ret.status is *not* EXCEPTION_NONE\ + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_return_C_errorE, simp+)[1] + + apply vcg + apply (wp | simp)+ + + \ \last subgoal\ + apply clarsimp + apply (rule conjI, fastforce) + apply clarsimp + apply (case_tac err, simp+) [1] +done + + +lemma lookupErrorOnFailure_ccorres: + "ccorres (f \ r) xf P P' hs a c + \ ccorres ((\x y z. \w. x = FailedLookup isSource w \ f w y z) \ r) + xf P P' hs + (lookupErrorOnFailure isSource a) c" + apply (simp add: lookupError_injection injection_handler_liftM) + apply (erule ccorres_rel_imp) + apply (auto split: sum.split) + done + + +lemma lookup_failure_rel_fault_lift: + " \ st \ scast EXCEPTION_NONE; + lookup_failure_rel err st (errstate t)\ + \ \v. lookup_fault_lift (current_lookup_fault_' (globals t)) = Some v \ lookup_fault_to_H v = err" + apply (case_tac err, clarsimp+) + done + +lemma le_64_mask_eq: + "(bits::machine_word) \ 64 \ bits && mask 7 = bits" + apply (rule less_mask_eq, simp) + apply (erule le_less_trans, simp) + done + +lemma lookupSlotForCNodeOp_ccorres': + "ccorres + (syscall_error_rel \ (\w w'. w'= Ptr w \ depth \ word_bits)) lookupSlot_xf + (\s. valid_pspace' s \ s \' croot \ depth < 2 ^ word_bits) + (UNIV \ {s. capptr_' s = capptr} \ + {s. to_bool (isSource_' s) = isSource} \ + {s. ccap_relation croot (root___struct_cap_C_' s)} \ + {s. depth_' s = of_nat depth} ) [] + (lookupSlotForCNodeOp isSource croot capptr depth) + (Call lookupSlotForCNodeOp_'proc)" + apply (cinit lift: capptr_' isSource_' root___struct_cap_C_' depth_') + apply csymbr \ \slot_C_update\ + apply csymbr \ \cap_get_capType\ + + apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws2) + \ \correspondance of Haskell and C conditions\ + apply (clarsimp simp: Collect_const_mem cap_get_tag_isCap) + + \ \case where root is *not* a CNode => throw InvalidRoot\ + apply simp + apply (rule_tac P=\ and P' =UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def) + apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_SYSCALL_ERROR_def) + apply (drule_tac lookup_fault_lift_invalid_root) + apply clarsimp + apply (subst syscall_error_to_H_cases(6), simp+)[1] + + \ \case where root is a CNode\ + apply (simp add: rangeCheck_def) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (rule_tac P="depth < 2 ^ word_bits" in ccorres_gen_asm) + apply (drule unat_of_nat64) + apply (simp add: unlessE_def fromIntegral_def integral_inv) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_split_throws) + apply (rule_tac P= \ and P' =UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + word_sle_def syscall_error_to_H_cases + word_size exception_defs) + apply vcg + apply csymbr + apply (rule_tac Q="\s. depth < 2 ^ word_bits" and Q'=\ in ccorres_split_unless_throwError_cond) + \ \correspondance of Haskell and C conditions\ + apply (clarsimp simp: Collect_const_mem fromIntegral_def integral_inv) + apply (simp add: word_size unat_of_nat64 word_less_nat_alt + word_less_1[symmetric] linorder_not_le) + + \ \case of RangeError\ + apply (rule_tac P= \ and P' =UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def) + apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_SYSCALL_ERROR_def) + apply (subst syscall_error_to_H_cases(4), simp+)[1] + apply (simp add: word_size word_sle_def) + + \ \case where there is *no* RangeError\ + apply (rule_tac xf'=lookupSlot_xf in ccorres_rel_imp) + apply (rule_tac r="\w w'. w'= Ptr w" + and f="\ st fl es. fl = scast EXCEPTION_SYSCALL_ERROR \ + syscall_error_to_H (errsyscall es) (errlookup_fault es) = Some (FailedLookup isSource st)" + in lookupErrorOnFailure_ccorres) + apply (ctac (no_vcg)) \ \resolveAddressBits\ + \ \case where resolveAddressBits results in EXCEPTION_NONE\ + apply clarsimp + apply (rule_tac A=\ and A'=UNIV in ccorres_guard_imp2) + apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws2) + \ \correspondance of Haskell and C conditions\ + apply (clarsimp simp: Collect_const_mem unat_gt_0) + \ \case where bits are remaining\ + apply (rule_tac P= \ and P' =UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def) + apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_SYSCALL_ERROR_def) + apply (subst syscall_error_to_H_cases(6), simp+)[1] + apply (simp add: lookup_fault_depth_mismatch_lift) + apply (erule le_64_mask_eq) + + \ \case where *no* bits are remaining\ + apply csymbr \ \slot_C_update\ + apply csymbr \ \status_C_update\ + apply (rule ccorres_return_CE, simp+)[1] + + apply vcg + + \ \guard_imp subgoal\ + apply clarsimp + + \ \case where resolveAddressBits does *not* results in EXCEPTION_NONE\ + apply clarsimp + apply (rule_tac P= \ and P' ="\\v. (lookup_fault_lift (\current_lookup_fault)) = Some v + \ lookup_fault_to_H v = err \" + in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def) + apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_SYSCALL_ERROR_def) + apply (subst syscall_error_to_H_cases(6), simp+)[1] + apply wp + + \ \rel_imp\ + apply clarsimp + apply (case_tac x, clarsimp) + apply (simp add: syscall_error_rel_def errstate_def) + apply (clarsimp simp: word_bits_def word_size fromIntegral_def + integral_inv) + + apply vcg + apply vcg + + \ \last subgoal\ + apply (clarsimp simp: word_size fromIntegral_def integral_inv) + apply (case_tac "cap_get_tag root = scast cap_cnode_cap") + prefer 2 apply clarsimp + apply (clarsimp simp: unat_of_nat64 word_sle_def) + apply (simp add: Collect_const_mem lookup_failure_rel_fault_lift) + done + + +lemma lookupSlotForCNodeOp_ccorres: + "ccorres + (syscall_error_rel \ (\w w'. w'= Ptr w \ depth \ word_bits)) lookupSlot_xf + (\s. invs' s \ s \' croot \ depth < 2 ^ word_bits) + (UNIV \ {s. capptr_' s = capptr} \ + {s. to_bool (isSource_' s) = isSource} \ + {s. ccap_relation croot (root___struct_cap_C_' s)} \ + {s. depth_' s = of_nat depth} ) [] + (lookupSlotForCNodeOp isSource croot capptr depth) + (Call lookupSlotForCNodeOp_'proc)" + apply (rule ccorres_guard_imp2, rule lookupSlotForCNodeOp_ccorres') + apply fastforce + done + +lemma lookupSourceSlot_ccorres': + "ccorres + (syscall_error_rel \ (\w w'. w'= Ptr w \ depth \ word_bits)) lookupSlot_xf + (\s. valid_pspace' s \ s \' croot \ depth < 2 ^ word_bits) + (UNIV \ {s. capptr_' s = capptr} \ + {s. ccap_relation croot (root___struct_cap_C_' s)} \ + {s. depth_' s = of_nat depth} ) [] + (lookupSourceSlot croot capptr depth) + (Call lookupSourceSlot_'proc)" + apply (cinit lift: capptr_' root___struct_cap_C_' depth_') + apply (rule ccorres_trim_returnE) + apply simp + apply simp + apply (ctac add: lookupSlotForCNodeOp_ccorres') + apply clarsimp + done + +lemma lookupSourceSlot_ccorres: + "ccorres + (syscall_error_rel \ (\w w'. w'= Ptr w \ depth \ word_bits)) lookupSlot_xf + (\s. invs' s \ s \' croot \ depth < 2 ^ word_bits) + (UNIV \ {s. capptr_' s = capptr} \ + {s. ccap_relation croot (root___struct_cap_C_' s)} \ + {s. depth_' s = of_nat depth} ) [] + (lookupSourceSlot croot capptr depth) + (Call lookupSourceSlot_'proc)" + apply (rule ccorres_guard_imp2, rule lookupSourceSlot_ccorres') + apply fastforce + done + +lemma lookupTargetSlot_ccorres': + "ccorres + (syscall_error_rel \ (\w w'. w'= Ptr w \ depth \ word_bits)) lookupSlot_xf + (\s. valid_pspace' s \ s \' croot \ depth < 2 ^ word_bits) + (UNIV \ {s. capptr_' s = capptr} \ + {s. ccap_relation croot (root___struct_cap_C_' s)} \ + {s. depth_' s = of_nat depth} ) [] + (lookupTargetSlot croot capptr depth) + (Call lookupTargetSlot_'proc)" + apply (cinit lift: capptr_' root___struct_cap_C_' depth_') + apply (rule ccorres_trim_returnE) + apply simp + apply simp + apply (ctac add: lookupSlotForCNodeOp_ccorres') + apply clarsimp + done + +lemma lookupTargetSlot_ccorres: + "ccorres + (syscall_error_rel \ (\w w'. w'= Ptr w \ depth \ word_bits)) lookupSlot_xf + (\s. invs' s \ s \' croot \ depth < 2 ^ word_bits) + (UNIV \ {s. capptr_' s = capptr} \ + {s. ccap_relation croot (root___struct_cap_C_' s)} \ + {s. depth_' s = of_nat depth} ) [] + (lookupTargetSlot croot capptr depth) + (Call lookupTargetSlot_'proc)" + apply (rule ccorres_guard_imp2, rule lookupTargetSlot_ccorres') + apply fastforce + done + +lemma lookupPivotSlot_ccorres: + "ccorres + (syscall_error_rel \ (\w w'. w'= Ptr w \ depth \ word_bits)) lookupSlot_xf + (\s. invs' s \ s \' croot \ depth < 2 ^ word_bits) + (UNIV \ {s. capptr_' s = capptr} \ + {s. ccap_relation croot (root___struct_cap_C_' s)} \ + {s. depth_' s = of_nat depth} ) [] + (lookupPivotSlot croot capptr depth) + (Call lookupPivotSlot_'proc)" + apply (cinit lift: capptr_' root___struct_cap_C_' depth_') + apply (rule ccorres_trim_returnE) + apply simp + apply simp + apply (ctac add: lookupSlotForCNodeOp_ccorres) + apply clarsimp + done + +end +end diff --git a/proof/crefine/AARCH64/CSpace_C.thy b/proof/crefine/AARCH64/CSpace_C.thy new file mode 100644 index 0000000000..50d876f18a --- /dev/null +++ b/proof/crefine/AARCH64/CSpace_C.thy @@ -0,0 +1,3599 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + + +theory CSpace_C +imports CSpaceAcc_C Machine_C +begin + +unbundle l4v_word_context + +context kernel_m +begin + +lemma maskCapRights_cap_cases: + "return (maskCapRights R c) = + (case c of + ArchObjectCap ac \ return (Arch.maskCapRights R ac) + | EndpointCap _ _ _ _ _ _ \ + return (capEPCanGrantReply_update (\_. capEPCanGrantReply c \ capAllowGrantReply R) + (capEPCanGrant_update (\_. capEPCanGrant c \ capAllowGrant R) + (capEPCanReceive_update (\_. capEPCanReceive c \ capAllowRead R) + (capEPCanSend_update (\_. capEPCanSend c \ capAllowWrite R) c)))) + | NotificationCap _ _ _ _ \ + return (capNtfnCanReceive_update + (\_. capNtfnCanReceive c \ capAllowRead R) + (capNtfnCanSend_update + (\_. capNtfnCanSend c \ capAllowWrite R) c)) + | ReplyCap _ _ _ \ + return (capReplyCanGrant_update + (\_. capReplyCanGrant c \ capAllowGrant R) c) + | _ \ return c)" + apply (simp add: maskCapRights_def Let_def split del: if_split) + apply (cases c; simp add: isCap_simps split del: if_split) + done + + +(* FIXME x64: ucast? see how it goes *) +lemma wordFromVMRights_spec: + "\s. \ \ {s} Call wordFromVMRights_'proc \\ret__unsigned_long = \<^bsup>s\<^esup>vm_rights\" + by vcg simp? + +(* FIXME x64: ucast? see how it goes *) +lemma vmRightsFromWord_spec: + "\s. \ \ {s} Call vmRightsFromWord_'proc \\ret__unsigned_long = \<^bsup>s\<^esup>w\" + by vcg simp? + +lemmas vmrights_defs = + Kernel_C.VMReadOnly_def + Kernel_C.VMKernelOnly_def + Kernel_C.VMReadWrite_def + +lemma maskVMRights_spec: + "\s. \ \ ({s} \ + \ \vm_rights && mask 2 = \vm_rights \) + Call maskVMRights_'proc + \ vmrights_to_H \ret__unsigned_long = + maskVMRights (vmrights_to_H \<^bsup>s\<^esup>vm_rights) (cap_rights_to_H (seL4_CapRights_lift \<^bsup>s\<^esup>cap_rights_mask)) \ + \ret__unsigned_long && mask 2 = \ret__unsigned_long \ + \ret__unsigned_long \ 2 \" + apply vcg + apply (clarsimp simp: vmrights_defs vmrights_to_H_def maskVMRights_def mask_def + cap_rights_to_H_def to_bool_def + split: bool.split) + done + +lemma frame_cap_rights [simp]: + "cap_get_tag cap = scast cap_frame_cap + \ cap_frame_cap_CL.capFVMRights_CL (cap_frame_cap_lift cap) && mask 2 = + cap_frame_cap_CL.capFVMRights_CL (cap_frame_cap_lift cap)" + apply (simp add: cap_frame_cap_lift_def) + by (simp add: cap_lift_def cap_tag_defs mask_def word_bw_assocs) + +lemma Arch_maskCapRights_ccorres [corres]: + "ccorres ccap_relation ret__struct_cap_C_' + \ + (UNIV \ \ccap_relation (ArchObjectCap arch_cap) \cap\ \ + \ccap_rights_relation R \cap_rights_mask\) + [] + (return (Arch.maskCapRights R arch_cap)) + (Call Arch_maskCapRights_'proc)" + apply (cinit' lift: cap_' cap_rights_mask_') + apply csymbr + apply (unfold AARCH64_H.maskCapRights_def) + apply (simp only: Let_def) + apply (case_tac "cap_get_tag cap = scast cap_frame_cap") + apply (clarsimp simp add: ccorres_cond_iffs cap_get_tag_isCap isCap_simps split del: if_splits) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply (clarsimp simp: return_def) + apply (unfold ccap_relation_def)[1] + apply (simp add: cap_frame_cap_lift [THEN iffD1]) + apply (clarsimp simp: cap_to_H_def) + apply (simp add: map_option_case split: option.splits) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_split_asm) + apply (clarsimp simp: cap_frame_cap_lift_def) + apply (clarsimp simp: ccap_rights_relation_def cap_frame_cap_lift c_valid_cap_def + cl_valid_cap_def mask_eq_iff word_less_alt + split: option.splits cap_CL.splits) + apply (clarsimp simp: cap_frame_cap_lift_def) + apply (clarsimp simp: ccap_rights_relation_def c_valid_cap_def cap_frame_cap_lift + cl_valid_cap_def mask_eq_iff word_less_alt) + apply (clarsimp simp add: cap_get_tag_isCap isCap_simps simp del: not_ex) + apply (rule ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def simp del: not_ex) + apply (cases arch_cap) + by (fastforce simp add: cap_get_tag_isCap isCap_simps simp del: not_ex simp_thms(44))+ + +lemma to_bool_cap_rights_bf: + "to_bool (capAllowRead_CL (seL4_CapRights_lift R)) = + to_bool_bf (capAllowRead_CL (seL4_CapRights_lift R))" + "to_bool (capAllowWrite_CL (seL4_CapRights_lift R)) = + to_bool_bf (capAllowWrite_CL (seL4_CapRights_lift R))" + "to_bool (capAllowGrant_CL (seL4_CapRights_lift R)) = + to_bool_bf (capAllowGrant_CL (seL4_CapRights_lift R))" + "to_bool (capAllowGrantReply_CL (seL4_CapRights_lift R)) = + to_bool_bf (capAllowGrantReply_CL (seL4_CapRights_lift R))" + by (subst to_bool_bf_to_bool_mask, + simp add: seL4_CapRights_lift_def mask_def word_bw_assocs, simp)+ + +lemma to_bool_ntfn_cap_bf: + "cap_lift c = Some (Cap_notification_cap cap) \ + to_bool (capNtfnCanSend_CL cap) = to_bool_bf (capNtfnCanSend_CL cap) \ + to_bool (capNtfnCanReceive_CL cap) = to_bool_bf (capNtfnCanReceive_CL cap)" + apply (simp add:cap_lift_def Let_def split: if_split_asm) + apply (subst to_bool_bf_to_bool_mask, + clarsimp simp: cap_lift_thread_cap mask_def word_bw_assocs)+ + apply simp + done + +lemma to_bool_reply_cap_bf: + "cap_lift c = Some (Cap_reply_cap cap) + \ to_bool (capReplyMaster_CL cap) = to_bool_bf (capReplyMaster_CL cap) + \ to_bool (capReplyCanGrant_CL cap) = to_bool_bf (capReplyCanGrant_CL cap)" + apply (simp add: cap_lift_def Let_def split: if_split_asm) + apply (subst to_bool_bf_to_bool_mask, + clarsimp simp: cap_lift_thread_cap mask_def word_bw_assocs)+ + apply simp + done + +lemma to_bool_ep_cap_bf: + "cap_lift c = Some (Cap_endpoint_cap cap) \ + to_bool (capCanSend_CL cap) = to_bool_bf (capCanSend_CL cap) \ + to_bool (capCanReceive_CL cap) = to_bool_bf (capCanReceive_CL cap) \ + to_bool (capCanGrant_CL cap) = to_bool_bf (capCanGrant_CL cap) \ + to_bool (capCanGrantReply_CL cap) = to_bool_bf (capCanGrantReply_CL cap)" + apply (simp add:cap_lift_def Let_def split: if_split_asm) + apply (subst to_bool_bf_to_bool_mask, + clarsimp simp: cap_lift_thread_cap mask_def word_bw_assocs)+ + apply simp + done + +lemma isArchCap_spec: + "\s. \\ {s} Call isArchCap_'proc \\ret__unsigned_long = from_bool (isArchCap_tag (cap_get_tag (cap_' s)))\" + apply vcg + apply (clarsimp simp: from_bool_def isArchCap_tag_def bool.split) + done + +lemma maskCapRights_ccorres [corres]: + "ccorres ccap_relation ret__struct_cap_C_' + \ + (UNIV \ \ccap_relation cap \cap\ \ \ccap_rights_relation R \cap_rights\) + [] + (return (RetypeDecls_H.maskCapRights R cap)) (Call maskCapRights_'proc)" + apply (cinit' lift: cap_' cap_rights_') + apply csymbr + apply (simp add: maskCapRights_cap_cases cap_get_tag_isCap del: Collect_const) + apply wpc + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply (simp add: cap_get_tag_isCap isCap_simps return_def) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: return_def) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply (simp add: cap_get_tag_isCap isCap_simps return_def) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply clarsimp + apply (unfold ccap_relation_def)[1] + apply (simp add: cap_notification_cap_lift [THEN iffD1]) + apply (clarsimp simp: cap_to_H_def) + apply (simp add: map_option_case split: option.splits) + apply (clarsimp simp add: cap_to_H_def Let_def + split: cap_CL.splits if_split_asm) + apply (simp add: cap_notification_cap_lift_def) + apply (simp add: ccap_rights_relation_def cap_rights_to_H_def + to_bool_ntfn_cap_bf + to_bool_mask_to_bool_bf to_bool_cap_rights_bf) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply (simp add: cap_get_tag_isCap isCap_simps return_def) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply (rule imp_ignore) + apply clarsimp + apply (unfold ccap_relation_def)[1] + apply (simp add: cap_endpoint_cap_lift [THEN iffD1]) + apply (clarsimp simp: cap_to_H_def) + apply (simp add: map_option_case split: option.splits) + apply (clarsimp simp add: cap_to_H_def Let_def + split: cap_CL.splits if_split_asm) + apply (simp add: cap_endpoint_cap_lift_def) + apply (simp add: ccap_rights_relation_def cap_rights_to_H_def + to_bool_ep_cap_bf + to_bool_mask_to_bool_bf to_bool_cap_rights_bf) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: return_def) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) + apply (simp add: Collect_const_mem) + apply (subst bind_return [symmetric]) + apply (rule ccorres_split_throws) + apply ctac + apply (rule_tac P=\ and P'="\\ret__struct_cap_C = ret__struct_cap_C\" in ccorres_inst) + apply (rule ccorres_from_vcg_throws) + apply (clarsimp simp: return_def) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply wp + apply vcg + apply vcg + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply ccorres_rewrite + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (simp add: cap_get_tag_isCap isCap_simps return_def) + apply clarsimp + apply (unfold ccap_relation_def)[1] + apply (simp add: cap_reply_cap_lift [THEN iffD1]) + apply (clarsimp simp: cap_to_H_def) + apply (simp add: map_option_case split: option.splits) + apply (clarsimp simp add: cap_to_H_def Let_def + split: cap_CL.splits if_split_asm) + apply (simp add: cap_reply_cap_lift_def) + apply (simp add: ccap_rights_relation_def cap_rights_to_H_def + to_bool_reply_cap_bf + to_bool_mask_to_bool_bf[simplified] to_bool_cap_rights_bf) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: return_def) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply (simp add: cap_get_tag_isCap isCap_simps return_def) + apply (simp add: Collect_const_mem) + apply csymbr + apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: return_def) + apply clarsimp + done + +abbreviation + "lookupCap_xf \ liftxf errstate lookupCap_ret_C.status_C lookupCap_ret_C.cap_C ret__struct_lookupCap_ret_C_'" + +lemma ccorres_return_cte_cteCap [corres]: + fixes ptr' :: "cstate \ cte_C ptr" + assumes r1: "\s s' g. (s, s') \ rf_sr \ (s, xfu g s') \ rf_sr" + and xf_xfu: "\s g. xf (xfu g s) = g s" + shows "ccorres ccap_relation xf + (\s. ctes_of s ptr = Some cte) {s. ptr_val (ptr' s) = ptr} hs + (return (cteCap cte)) + (Basic (\s. xfu (\_. h_val (hrs_mem (t_hrs_' (globals s))) + (Ptr &(ptr' s \[''cap_C'']))) s))" + apply (rule ccorres_return) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: xf_xfu ccap_relation_def) + apply rule + apply (erule r1) + apply (drule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps) + apply (simp add: c_valid_cte_def) +done + + +lemma ccorres_return_cte_mdbnode [corres]: + fixes ptr' :: "cstate \ cte_C ptr" + assumes r1: "\s s' g. (s, s') \ rf_sr \ (s, xfu g s') \ rf_sr" + and xf_xfu: "\s g. xf (xfu g s) = g s" + shows "ccorres cmdbnode_relation xf + (\s. ctes_of s ptr = Some cte) {s. ptr_val (ptr' s) = ptr} hs + (return (cteMDBNode cte)) + (Basic (\s. xfu (\_. h_val (hrs_mem (t_hrs_' (globals s))) + (Ptr &(ptr' s \[''cteMDBNode_C'']))) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def xf_xfu) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps) + apply (erule r1) + done + + +(* FIXME: MOVE *) +lemma heap_update_field_ext: + "\field_ti TYPE('a :: packed_type) f = Some t; c_guard p; + export_uinfo t = export_uinfo (typ_info_t TYPE('b :: packed_type))\ + \ heap_update (Ptr &(p\f) :: 'b ptr) = + (\v hp. heap_update p (update_ti t (to_bytes_p v) (h_val hp p)) hp)" + apply (rule ext, rule ext) + apply (erule (2) heap_update_field) + done + +lemma ccorres_updateCap [corres]: + fixes ptr :: "cstate \ cte_C ptr" and val :: "cstate \ cap_C" + shows "ccorres dc xfdc \ + ({s. ccap_relation cap (val s)} \ {s. ptr s = Ptr dest}) hs + (updateCap dest cap) + (Basic + (\s. globals_update + (t_hrs_'_update + (hrs_mem_update (heap_update (Ptr &(ptr s\[''cap_C''])) (val s)))) s))" + unfolding updateCap_def + apply (cinitlift ptr) + apply (erule ssubst) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_getCTE) + apply (rule_tac P = "\s. ctes_of s dest = Some rva" in + ccorres_from_vcg [where P' = "{s. ccap_relation cap (val s)}"]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption) + apply (erule bexI [rotated]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp add: rf_sr_def cstate_relation_def + Let_def cpspace_relation_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"]) + apply (simp add:typ_heap_simps) + apply (rule conjI) + apply (erule (3) cpspace_cte_relation_upd_capI) + apply (frule_tac f="ksPSpace" in arg_cong) + apply (erule_tac t = s' in ssubst) + apply simp + apply (simp add: heap_to_user_data_def heap_to_device_data_def) + apply (rule conjI) + apply (erule (1) setCTE_tcb_case) + by (auto simp: carch_state_relation_def cmachine_state_relation_def) + +lemma ccorres_updateMDB_const [corres]: + fixes ptr :: "cstate \ cte_C ptr" and val :: "cstate \ mdb_node_C" + shows "ccorres dc xfdc (\_. dest \ 0) + ({s. cmdbnode_relation m (val s)} \ {s. ptr s = Ptr dest}) hs + (updateMDB dest (const m)) + (Basic + (\s. globals_update + (t_hrs_'_update + (hrs_mem_update (heap_update (Ptr &(ptr s\[''cteMDBNode_C''])) (val s)))) s))" + unfolding updateMDB_def + apply (cinitlift ptr) + apply (erule ssubst) + apply (rule ccorres_gen_asm [where G = \, simplified]) + apply (simp only: Let_def) + apply simp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_getCTE) + apply (rule_tac P = "\s. ctes_of s dest = Some cte" in ccorres_from_vcg [where P' = "{s. cmdbnode_relation m (val s)}"]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption ) + apply (erule bexI [rotated]) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp add: rf_sr_def cstate_relation_def typ_heap_simps + Let_def cpspace_relation_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"]) + apply (rule conjI) + apply (erule (3) cspace_cte_relation_upd_mdbI) + apply (erule_tac t = s' in ssubst) + apply (simp add: heap_to_user_data_def) + apply (rule conjI) + apply (erule (1) setCTE_tcb_case) + by (auto simp: carch_state_relation_def cmachine_state_relation_def) + +(* 64 == badgeBits *) +lemma cap_lift_capNtfnBadge_mask_eq: + "cap_lift cap = Some (Cap_notification_cap ec) + \ capNtfnBadge_CL ec && mask 64 = capNtfnBadge_CL ec" + unfolding cap_lift_def + by (fastforce simp: Let_def mask_def word_bw_assocs split: if_split_asm) + +lemma cap_lift_capEPBadge_mask_eq: + "cap_lift cap = Some (Cap_endpoint_cap ec) + \ capEPBadge_CL ec && mask 64 = capEPBadge_CL ec" + unfolding cap_lift_def + by (fastforce simp: Let_def mask_def word_bw_assocs split: if_split_asm) + +lemma Arch_isCapRevocable_spec: + "\s. \\ {\. s = \ \ True} + Call Arch_isCapRevocable_'proc + {t. \c c'. ccap_relation c (derivedCap_' s) \ ccap_relation c' (srcCap_' s) + \ ret__unsigned_long_' t = from_bool (Arch.isCapRevocable c c')}" + apply vcg + by (auto simp: AARCH64_H.isCapRevocable_def + cap_get_tag_isCap_unfolded_H_cap cap_tag_defs isCap_simps + cap_get_tag_isCap[unfolded, simplified] + split: capability.splits arch_capability.splits bool.splits) + +lemmas isCapRevocable_simps[simp] = Retype_H.isCapRevocable_def[split_simps capability.split] + +context begin (* revokable_ccorres *) + +private method revokable'_hammer = solves \( + simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs, + rule ccorres_guard_imp, + rule ccorres_return_C; clarsimp)\ + +lemma revokable_ccorres: + "ccorres (\a c. from_bool a = c) ret__unsigned_long_' + (\_. capMasterCap cap = capMasterCap parent \ is_simple_cap' cap) + (UNIV \ {s. ccap_relation cap (derivedCap_' s)} \ {s. ccap_relation parent (srcCap_' s)}) hs + (return (isCapRevocable cap parent)) + (Call isCapRevocable_'proc)" + apply (rule ccorres_gen_asm[where G=\, simplified]) + apply (cinit' lift: derivedCap_' srcCap_') + \ \Clear up Arch cap case\ + apply csymbr + apply (clarsimp simp: cap_get_tag_isCap simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc) + apply (clarsimp simp: isCap_simps) + apply csymbr + apply (drule spec, drule spec, drule mp, fastforce) + apply ccorres_rewrite + apply (drule sym, simp only:) + apply (rule ccorres_return_C, clarsimp+) + apply csymbr + apply (rule_tac P'=UNIV and P=\ in ccorres_inst) + apply (cases cap) + \ \Uninteresting caps\ + apply revokable'_hammer+ + \ \NotificationCap\ + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) + apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) + apply (rule ccorres_return_C, clarsimp+) + apply (frule_tac cap'1=srcCap in cap_get_tag_NotificationCap[THEN iffD1]) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps is_simple_cap'_def) + apply (frule_tac cap'1=derivedCap in cap_get_tag_NotificationCap[THEN iffD1]) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply (fastforce simp: cap_get_tag_isCap isCap_simps) + \ \IRQHandlerCap\ + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) + apply (rule ccorres_guard_imp, csymbr) + apply (rule ccorres_return_C, clarsimp+) + apply (fastforce simp: cap_get_tag_isCap isCap_simps) + \ \EndpointCap\ + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) + apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) + apply (rule ccorres_return_C, clarsimp+) + apply (frule_tac cap'1=srcCap in cap_get_tag_EndpointCap[THEN iffD1]) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps is_simple_cap'_def) + apply (frule_tac cap'1=derivedCap in cap_get_tag_EndpointCap[THEN iffD1]) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply (fastforce simp: cap_get_tag_isCap isCap_simps) + \ \Other Caps\ + by (revokable'_hammer | fastforce simp: isCap_simps)+ + +end (* revokable_ccorres *) + +lemma cteInsert_ccorres_mdb_helper: + "\cmdbnode_relation rva srcMDB; from_bool rvc = (newCapIsRevocable :: machine_word); srcSlot = Ptr src\ + \ ccorres cmdbnode_relation newMDB_' (K (is_aligned src 3)) + UNIV hs + (return + (mdbFirstBadged_update (\_. rvc) + (mdbRevocable_update (\_. rvc) + (mdbPrev_update (\_. src) rva)))) + (\newMDB :== CALL mdb_node_set_mdbPrev(srcMDB, + ptr_val srcSlot);; + \newMDB :== CALL mdb_node_set_mdbRevocable(\newMDB, + newCapIsRevocable);; + \newMDB :== CALL mdb_node_set_mdbFirstBadged(\newMDB, + newCapIsRevocable))" + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: return_def cmdbnode_relation_def mask_def) + done + +lemma ccorres_updateMDB_set_mdbNext [corres]: + "src=src' \ + ccorres dc xfdc ((\_. src \ 0 \ is_aligned dest cteSizeBits \ canonical_address dest)) + ({s. mdb_node_ptr_' s = Ptr &((Ptr src' :: cte_C ptr)\[''cteMDBNode_C''])} \ + {s. v64_' s = dest}) [] + (updateMDB src (mdbNext_update (\_. dest))) + (Call mdb_node_ptr_set_mdbNext_'proc)" + unfolding updateMDB_def + apply (hypsubst) + apply (rule ccorres_gen_asm [where G = \, simplified]) + apply (simp only: Let_def) + apply simp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_getCTE + [where P = "\cte s. ctes_of s src' = Some cte" + and P'= "\_. (\\mdb_node_ptr = Ptr &((Ptr src' :: cte_C ptr)\[''cteMDBNode_C''])\ + \ \\v64 = dest\)"]) + apply (rule ccorres_from_spec_modifies_heap) + apply (rule mdb_node_ptr_set_mdbNext_spec) + apply (rule mdb_node_ptr_set_mdbNext_modifies) + apply simp + apply clarsimp + apply (rule rf_sr_cte_at_valid) + apply simp + apply (erule ctes_of_cte_at) + apply assumption + apply clarsimp + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps) + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption) + apply (erule bexI [rotated]) + apply (clarsimp simp: rf_sr_def cstate_relation_def + Let_def cpspace_relation_def cte_wp_at_ctes_of heap_to_user_data_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"] + typ_heap_simps') + apply (rule conjI) + apply (erule (2) cspace_cte_relation_upd_mdbI) + apply (simp add: cmdbnode_relation_def) + apply (intro arg_cong[where f="\f. mdbNext_update f mdb" for mdb] ext word_eqI) + apply (match premises in C: "canonical_address _" and A: "is_aligned _ _" (multi) \ + \match premises in H[thin]: _ (multi) \ \insert C A\\) + apply (clarsimp simp: word_size) + apply (drule is_aligned_weaken[where y=2], simp add: objBits_defs) + apply (case_tac "n < 2"; case_tac "n \ canonical_bit"; + clarsimp simp: linorder_not_less linorder_not_le is_aligned_nth[THEN iffD1]) + apply (fastforce simp: canonical_bit_def) + apply (fastforce simp: canonical_address_range canonical_bit_def le_mask_high_bits_len) + apply (erule_tac t = s'a in ssubst) + apply simp + apply (rule conjI) + apply (erule (1) setCTE_tcb_case) + by (auto simp: carch_state_relation_def cmachine_state_relation_def) + +lemma ccorres_updateMDB_set_mdbPrev [corres]: + "src=src' \ + ccorres dc xfdc (\_. src \ 0 \ is_aligned dest cteSizeBits) + ({s. mdb_node_ptr_' s = Ptr &((Ptr src' :: cte_C ptr)\[''cteMDBNode_C''])} \ + {s. v64_' s = dest}) [] + (updateMDB src (mdbPrev_update (\_. dest))) + (Call mdb_node_ptr_set_mdbPrev_'proc)" + unfolding updateMDB_def + apply (hypsubst) + apply (rule ccorres_gen_asm [where G = \, simplified]) + apply (simp only: Let_def) + apply simp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_getCTE + [where P = "\cte s. ctes_of s src' = Some cte" + and P' = "\_. (\\mdb_node_ptr = Ptr &((Ptr src' :: cte_C ptr)\[''cteMDBNode_C''])\ + \ \\v64 = dest\)"]) + apply (rule ccorres_from_spec_modifies_heap) + apply (rule mdb_node_ptr_set_mdbPrev_spec) + apply (rule mdb_node_ptr_set_mdbPrev_modifies) + apply simp + apply clarsimp + apply (rule rf_sr_cte_at_valid) + apply simp + apply (erule ctes_of_cte_at) + apply assumption + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps) + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption) + apply (erule bexI[rotated]) + apply (clarsimp simp: rf_sr_def cstate_relation_def + Let_def cpspace_relation_def cte_wp_at_ctes_of heap_to_user_data_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"] + typ_heap_simps') + apply (rule conjI) + apply (erule (2) cspace_cte_relation_upd_mdbI) + apply (simp add: cmdbnode_relation_def mask_def) + apply (erule_tac t = s'a in ssubst) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (erule (1) setCTE_tcb_case) + by clarsimp + +lemma ccorres_updateMDB_skip: + "ccorres dc xfdc (\ and (\_. n = 0)) UNIV hs (updateMDB n f) SKIP" + unfolding updateMDB_def + apply (rule ccorres_gen_asm) + apply simp + apply (rule ccorres_return) + apply simp + apply vcg + done + +definition + "is_simple_cap_tag (tag :: machine_word) \ + tag \ scast cap_null_cap \ tag \ scast cap_irq_control_cap + \ tag \ scast cap_untyped_cap \ tag \ scast cap_reply_cap + \ tag \ scast cap_endpoint_cap \ tag \ scast cap_notification_cap + \ tag \ scast cap_thread_cap \ tag \ scast cap_cnode_cap + \ tag \ scast cap_zombie_cap \ tag \ scast cap_frame_cap" + +(* Useful: + apply (tactic {* let val _ = reset CtacImpl.trace_ceqv; val _ = reset CtacImpl.trace_ctac in all_tac end; *}) + *) +declare word_neq_0_conv [simp del] + +schematic_goal ccap_relation_tag_Master: + "\ccap. \ ccap_relation cap ccap \ + \ cap_get_tag ccap = + case_capability ?a ?b ?c ?d ?e ?f ?g + (case_arch_capability ?aa ?ab ?ac + (\base_ptr pt_t maddr. if pt_t = VSRootPT_T + then scast cap_vspace_cap + else scast cap_page_table_cap) ?ae) + ?h ?i ?j ?k + (capMasterCap cap)" + by (fastforce simp: ccap_relation_def map_option_Some_eq2 + Let_def cap_lift_def cap_to_H_def + split: if_split_asm) + +lemma ccap_relation_is_derived_tag_equal: + "\ is_derived' cs p cap cap'; ccap_relation cap ccap; ccap_relation cap' ccap' \ + \ cap_get_tag ccap' = cap_get_tag ccap" + unfolding badge_derived'_def is_derived'_def + by (clarsimp simp: ccap_relation_tag_Master) + +lemma ccap_relation_Master_tags_eq: + "\ capMasterCap cap = capMasterCap cap'; ccap_relation cap ccap; ccap_relation cap' ccap' \ + \ cap_get_tag ccap' = cap_get_tag ccap" + by (clarsimp simp: ccap_relation_tag_Master) + +lemma is_simple_cap_get_tag_relation: + "ccap_relation cap ccap + \ is_simple_cap_tag (cap_get_tag ccap) = is_simple_cap' cap" + apply (simp add: is_simple_cap_tag_def is_simple_cap'_def + cap_get_tag_isCap) + apply (auto simp: isCap_simps) + done + +lemma setUntypedCapAsFull_cte_at_wp [wp]: + "\ cte_at' x \ setUntypedCapAsFull rvb cap src \ \_. cte_at' x \" + apply (clarsimp simp: setUntypedCapAsFull_def) + apply wp + done + +lemma valid_cap_untyped_inv: + "valid_cap' (UntypedCap d r n f) s \ + n \ minUntypedSizeBits \ is_aligned (of_nat f :: machine_word) minUntypedSizeBits + \ n \ maxUntypedSizeBits \ n < word_bits" + apply (clarsimp simp:valid_cap'_def capAligned_def) + done + +lemma update_freeIndex': + assumes i'_align: "is_aligned (of_nat i' :: machine_word) minUntypedSizeBits" + assumes sz_bound: "sz \ maxUntypedSizeBits" + assumes i'_bound: "i'\ 2^sz" + shows "ccorres dc xfdc + (cte_wp_at' (\cte. \i. cteCap cte = capability.UntypedCap d p sz i) srcSlot) + (UNIV \ \\cap_ptr = cap_Ptr &(cte_Ptr srcSlot\[''cap_C''])\ + \ \\v64 = of_nat i' >> minUntypedSizeBits\) [] + (updateCap srcSlot (capability.UntypedCap d p sz i')) + (Call cap_untyped_cap_ptr_set_capFreeIndex_'proc)" + proof - + note i'_bound_concrete + = order_trans[OF i'_bound power_increasing[OF sz_bound], simplified untypedBits_defs, simplified] + have i'_bound_word: "(of_nat i' :: machine_word) \ 2 ^ maxUntypedSizeBits" + using order_trans[OF i'_bound power_increasing[OF sz_bound], simplified] + by (simp add: word_of_nat_le untypedBits_defs) + show ?thesis + apply (cinit lift: cap_ptr_' v64_') + apply (rule ccorres_pre_getCTE) + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" + in ccorres_from_vcg[where P' = UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: guard_simps) + apply (intro conjI) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: split_def) + apply (simp add: hrs_htd_def typ_heap_simps) + apply (rule fst_setCTE[OF ctes_of_cte_at], assumption) + apply (erule bexI[rotated], clarsimp) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"]) + apply (simp add: cpspace_relation_def) + apply (clarsimp simp: typ_heap_simps') + apply (rule conjI) + apply (erule (2) cpspace_cte_relation_upd_capI) + apply (simp only: cte_lift_def split: option.splits; simp) + apply (simp add: cap_to_H_def Let_def split: cap_CL.splits if_split_asm) + apply (case_tac y) + apply (simp add: cap_lift_def Let_def split: if_split_asm) + apply (case_tac cte', simp) + apply (clarsimp simp: ccap_relation_def cap_lift_def cap_get_tag_def cap_to_H_def) + apply (thin_tac _)+ + apply (simp add: mask_def to_bool_and_1 nth_shiftr word_ao_dist and.assoc) + apply (rule inj_onD[OF word_unat.Abs_inj_on[where 'a=machine_word_len]], simp) + apply (cut_tac i'_align i'_bound_word) + apply (simp add: is_aligned_mask) + apply word_bitwise + subgoal by (simp add: word_size untypedBits_defs mask_def) + apply (cut_tac i'_bound_concrete) + subgoal by (simp add: unats_def) + subgoal by (simp add: word_unat.Rep[where 'a=machine_word_len, simplified]) + apply (erule_tac t = s' in ssubst) + apply clarsimp + apply (rule conjI) + subgoal by (erule (1) setCTE_tcb_case) + apply (clarsimp simp: carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + by (clarsimp simp: cte_wp_at_ctes_of) + qed + +lemma update_freeIndex: + "ccorres dc xfdc + (valid_objs' and cte_wp_at' (\cte. \i. cteCap cte = UntypedCap d p sz i) srcSlot + and (\_. is_aligned (of_nat i' :: machine_word) minUntypedSizeBits \ i' \ 2 ^ sz)) + (UNIV \ \\cap_ptr = cap_Ptr &(cte_Ptr srcSlot\[''cap_C''])\ + \ \\v64 = of_nat i' >> minUntypedSizeBits\) [] + (updateCap srcSlot (UntypedCap d p sz i')) + (Call cap_untyped_cap_ptr_set_capFreeIndex_'proc)" + apply (rule ccorres_assume_pre, rule ccorres_guard_imp) + apply (rule update_freeIndex'; clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte; clarsimp dest!: ctes_of_valid_cap' simp: valid_cap'_def) + by auto + +lemma capBlockSize_CL_maxSize: + "\ cap_get_tag c = scast cap_untyped_cap \ \ capBlockSize_CL (cap_untyped_cap_lift c) < 0x40" + apply (clarsimp simp: cap_untyped_cap_lift_def) + apply (clarsimp simp: cap_lift_def) + apply (clarsimp simp: cap_untyped_cap_def cap_null_cap_def) + apply (rule word_and_less') + apply (simp add: mask_def) + done + +lemma setUntypedCapAsFull_ccorres [corres]: + notes if_split [split del] + notes Collect_const [simp del] + notes Collect_True [simp] Collect_False [simp] + shows + "ccorres dc xfdc + ((cte_wp_at' (\c. (cteCap c) = srcCap) srcSlot) and valid_mdb' and pspace_aligned' and valid_objs' + and (K (isUntypedCap newCap \ (minUntypedSizeBits \ capBlockSize newCap))) + and (K (isUntypedCap srcCap \ (minUntypedSizeBits \ capBlockSize srcCap)))) + (UNIV \ {s. ccap_relation srcCap (srcCap_' s)} + \ {s. ccap_relation newCap (newCap_' s)} + \ {s. srcSlot_' s = Ptr srcSlot}) + [] + (setUntypedCapAsFull srcCap newCap srcSlot) + (Call setUntypedCapAsFull_'proc)" + apply (cinit lift: srcCap_' newCap_' srcSlot_') + apply (rule ccorres_if_lhs) + apply (clarsimp simp: isCap_simps) + apply csymbr + apply csymbr + apply (simp add: if_then_0_else_1 if_then_1_else_0 cap_get_tag_isCap_unfolded_H_cap) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (simp add: cap_get_tag_isCap_unfolded_H_cap ccorres_cond_univ_iff) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply (frule cap_get_tag_to_H(9)) + apply (simp add: cap_get_tag_isCap_unfolded_H_cap) + apply (rotate_tac 1) + apply (frule cap_get_tag_to_H(9)) + apply (simp add: cap_get_tag_isCap_unfolded_H_cap) + apply simp + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply (simp add: ccorres_cond_univ_iff) + apply csymbr+ + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_Guard) + apply (rule ccorres_call) + apply (rule update_freeIndex) + apply simp + apply simp + apply simp + apply clarsimp + apply (csymbr) + apply (csymbr) + apply (simp add: cap_get_tag_isCap) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (simp add: cap_get_tag_isCap) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_cases [where P="capPtr srcCap = capPtr newCap"]) + apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) + apply clarsimp + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) + apply (frule(1) cte_wp_at_valid_objs_valid_cap') + apply (clarsimp simp: untypedBits_defs) + apply (intro conjI impI allI) + apply (erule cte_wp_at_weakenE') + apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) + apply clarsimp + apply (drule valid_cap_untyped_inv,clarsimp simp:max_free_index_def) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) + apply assumption + apply (clarsimp simp: max_free_index_def shiftL_nat valid_cap'_def capAligned_def) + apply (simp add:power_minus_is_div unat_sub word_le_nat_alt t2p_shiftr) + apply clarsimp + apply (erule cte_wp_at_weakenE', simp) + apply clarsimp + apply (drule valid_cap_untyped_inv) + apply (clarsimp simp: max_free_index_def t2p_shiftr unat_sub word_le_nat_alt word_bits_def) + apply (rule word_less_imp_diff_less) + apply (subst (asm) eq_commute, fastforce simp: unat_sub word_le_nat_alt) + apply (rule capBlockSize_CL_maxSize) + apply (clarsimp simp: cap_get_tag_UntypedCap) + apply (clarsimp simp: cap_get_tag_isCap_unfolded_H_cap) + done + +lemma ccte_lift: + "\(s, s') \ rf_sr; cslift s' (cte_Ptr p) = Some cte'; + cte_lift cte' = Some y; c_valid_cte cte'\ + \ ctes_of s p = Some (cte_to_H (the (cte_lift cte')))" + apply (clarsimp simp:rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply (drule(1) cmap_relation_cs_atD) + apply simp + apply (clarsimp simp:ccte_relation_def) + done + +lemma cmdb_node_relation_mdbNext: + "cmdbnode_relation n n' + \ mdbNext_CL (mdb_node_lift n') = mdbNext n" + by (simp add:cmdbnode_relation_def) + +lemma cslift_ptr_safe: + "cslift x ptr = Some a + \ ptr_safe ptr (hrs_htd (t_hrs_' (globals x)))" + apply (rule_tac h = "fst (t_hrs_' (globals x))" + in lift_t_ptr_safe[where g = c_guard]) + apply (fastforce simp add:typ_heap_simps hrs_htd_def) + done + +lemma ccorres_move_ptr_safe: + "ccorres_underlying rf_sr \ r xf arrel axf A C' hs a c \ + ccorres_underlying rf_sr \ r xf arrel axf + (A and K (dest = cte_Ptr (ptr_val dest)) and cte_wp_at' (\_. True) (ptr_val dest)) + (C' \ \True\) hs a (Guard MemorySafety \ptr_safe (dest) (hrs_htd \t_hrs) \ c)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Guard) + apply simp + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule(1) rf_sr_ctes_of_clift) + apply (case_tac dest) + apply (clarsimp simp:ptr_coerce_def) + apply (erule cslift_ptr_safe) + done + +lemma ccorres_move_ptr_safe_Seq: + "ccorres_underlying rf_sr \ r xf arrel axf A C' hs a (c;;d) \ + ccorres_underlying rf_sr \ r xf arrel axf + (A and cte_wp_at' (\_. True) (ptr_val dest) and K (dest = cte_Ptr (ptr_val dest))) + (C' \ \True\) hs a + (Guard MemorySafety \ptr_safe (dest) (hrs_htd \t_hrs) \ c;;d)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Guard_Seq) + apply simp + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule(1) rf_sr_ctes_of_clift) + apply clarsimp + apply (erule cslift_ptr_safe) + done + +lemmas ccorres_move_guard_ptr_safe = ccorres_move_ptr_safe_Seq ccorres_move_ptr_safe + +lemma cteInsert_ccorres: + "ccorres dc xfdc + (cte_wp_at' (\scte. capMasterCap (cteCap scte) = capMasterCap cap \ is_simple_cap' cap) src + and valid_mdb' and valid_objs' and pspace_aligned' and pspace_canonical' + and (valid_cap' cap)) + (UNIV \ {s. destSlot_' s = Ptr dest} + \ {s. srcSlot_' s = Ptr src} + \ {s. ccap_relation cap (newCap_' s)}) [] + (cteInsert cap src dest) + (Call cteInsert_'proc)" + supply ctes_of_aligned_bits[simp] + apply (cinit (no_ignore_call) lift: destSlot_' srcSlot_' newCap_' + simp del: return_bind simp add: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (ctac pre: ccorres_pre_getCTE) + apply (rule ccorres_move_c_guard_cte) + apply (ctac pre: ccorres_pre_getCTE) + apply (ctac (no_vcg) add: revokable_ccorres) + apply (ctac (c_lines 3) add: cteInsert_ccorres_mdb_helper) + apply (simp del: Collect_const) + apply (rule ccorres_pre_getCTE ccorres_assert)+ + apply (ctac add: setUntypedCapAsFull_ccorres) + apply (rule ccorres_move_c_guard_cte) + apply (ctac) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_move_c_guard_cte) + apply (ctac(no_vcg)) + apply csymbr + apply (erule_tac t = ret__unsigned_longlong in ssubst) + apply (rule ccorres_cond_both [where R = \, simplified]) + apply (erule mdbNext_not_zero_eq) + apply csymbr + apply simp + apply (rule ccorres_move_c_guard_cte) + apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) + apply (ctac ccorres: ccorres_updateMDB_skip) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) + apply vcg + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) + apply vcg + apply (clarsimp simp:cmdb_node_relation_mdbNext) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) + apply (vcg exspec=setUntypedCapAsFull_modifies) + apply wp + apply vcg + apply wp + apply wp + apply vcg + apply wp + apply vcg + apply (simp add: Collect_const_mem split del: if_split) \ \Takes a while\ + apply (rule conjI) + apply (clarsimp simp: conj_comms cte_wp_at_ctes_of) + apply (intro conjI) + apply clarsimp + apply simp + apply simp + apply (clarsimp simp: ctes_of_canonical objBits_defs cte_level_bits_def) + apply (rule conjI) + apply (clarsimp simp: isUntypedCap_def split: capability.split_asm) + apply (frule valid_cap_untyped_inv) + apply clarsimp + apply (rule conjI) + apply (case_tac ctea) + apply (clarsimp simp: isUntypedCap_def split: capability.splits) + apply (frule valid_cap_untyped_inv[OF ctes_of_valid_cap']) + apply fastforce + apply clarsimp+ + apply (drule valid_dlist_nextD) + apply (simp add:valid_mdb'_def valid_mdb_ctes_def) + apply simp + apply clarsimp + apply (clarsimp simp: map_comp_Some_iff cte_wp_at_ctes_of + split del: if_split) + apply (clarsimp simp: typ_heap_simps c_guard_clift split_def) + apply (clarsimp simp: is_simple_cap_get_tag_relation ccte_relation_ccap_relation cmdb_node_relation_mdbNext[symmetric]) + done + +(****************************************************************************) +(* *) +(* Lemmas dealing with updateMDB on Haskell side and IF-THEN-ELSE on C side *) +(* *) +(****************************************************************************) + +lemma updateMDB_mdbNext_set_mdbPrev: + "\ slotc = Ptr slota; cmdbnode_relation mdba mdbc\ \ + ccorres dc xfdc + (\s. is_aligned slota cteSizeBits) + UNIV hs + (updateMDB (mdbNext mdba) (mdbPrev_update (\_. slota))) + (IF mdbNext_CL (mdb_node_lift mdbc) \ 0 + THEN Guard C_Guard \hrs_htd \t_hrs \\<^sub>t (Ptr (mdbNext_CL (mdb_node_lift mdbc)) :: cte_C ptr)\ + (call (\ta. ta(| mdb_node_ptr_' := Ptr &(Ptr (mdbNext_CL (mdb_node_lift mdbc)):: cte_C ptr + \[''cteMDBNode_C'']), + v64_' := ptr_val slotc |)) + mdb_node_ptr_set_mdbPrev_'proc + (\s t. s\ globals := globals t \) (\ta s'. Basic (\a. a))) + FI)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_cond_both[where R=\, simplified]) + apply (erule mdbNext_not_zero_eq) + apply (rule ccorres_updateMDB_cte_at) + apply (ctac add: ccorres_updateMDB_set_mdbPrev) + apply (ctac ccorres: ccorres_updateMDB_skip) + apply (clarsimp simp: cmdbnode_relation_def cte_wp_at_ctes_of) + done + +lemma updateMDB_mdbPrev_set_mdbNext: + "\ slotc = Ptr slota; cmdbnode_relation mdba mdbc\ \ + ccorres dc xfdc + (\s. is_aligned slota cteSizeBits \ canonical_address slota) + UNIV hs + (updateMDB (mdbPrev mdba) (mdbNext_update (\_. slota))) + (IF mdbPrev_CL (mdb_node_lift mdbc) \ 0 + THEN Guard C_Guard \hrs_htd \t_hrs \\<^sub>t (Ptr (mdbPrev_CL (mdb_node_lift mdbc)):: cte_C ptr)\ + (call (\ta. ta(| mdb_node_ptr_' := Ptr &(Ptr (mdbPrev_CL (mdb_node_lift mdbc)):: cte_C ptr + \[''cteMDBNode_C'']), + v64_' := ptr_val slotc |)) + mdb_node_ptr_set_mdbNext_'proc + (\s t. s\ globals := globals t \) (\ta s'. Basic (\a. a))) + FI)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_cond_both[where R=\, simplified]) + apply (erule mdbPrev_not_zero_eq) + apply (rule ccorres_updateMDB_cte_at) + apply (ctac add: ccorres_updateMDB_set_mdbNext) + apply (ctac ccorres: ccorres_updateMDB_skip) + apply (clarsimp simp: cte_wp_at_ctes_of cmdbnode_relation_def) + done + + +(************************************************************************) +(* *) +(* cteMove_ccorres ******************************************************) +(* *) +(************************************************************************) + +(* FIXME: rename *) +lemma is_aligned_3_prev: + "\ valid_mdb' s; pspace_aligned' s; ctes_of s p = Some cte \ + \ is_aligned (mdbPrev (cteMDBNode cte)) cteSizeBits" + apply (cases "mdbPrev (cteMDBNode cte) = 0", simp) + apply (drule (2) valid_mdb_ctes_of_prev) + apply (clarsimp simp: cte_wp_at_ctes_of cteSizeBits_eq ctes_of_aligned_bits) + done + +(* FIXME: rename *) +lemma is_aligned_3_next: + "\ valid_mdb' s; pspace_aligned' s; ctes_of s p = Some cte \ + \ is_aligned (mdbNext (cteMDBNode cte)) cteSizeBits" + apply (cases "mdbNext (cteMDBNode cte) = 0", simp) + apply (drule (2) valid_mdb_ctes_of_next) + apply (clarsimp simp: cte_wp_at_ctes_of cteSizeBits_eq ctes_of_aligned_bits) + done + +lemma cteMove_ccorres: + "ccorres dc xfdc + (valid_mdb' and pspace_aligned' and pspace_canonical') + (UNIV \ {s. destSlot_' s = Ptr dest} + \ {s. srcSlot_' s = Ptr src} + \ {s. ccap_relation cap (newCap_' s)}) [] + (cteMove cap src dest) + (Call cteMove_'proc)" + apply (cinit (no_ignore_call) lift: destSlot_' srcSlot_' newCap_' simp del: return_bind) + apply (ctac pre: ccorres_pre_getCTE ccorres_assert) + apply (ctac+, csymbr+)+ + apply (erule_tac t=ret__unsigned_longlong in ssubst) + apply (ctac add: updateMDB_mdbPrev_set_mdbNext) + apply csymbr + apply csymbr + apply (erule_tac t=ret__unsigned_longlong in ssubst) + apply (rule updateMDB_mdbNext_set_mdbPrev) + apply simp+ + apply (wp, vcg)+ + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of cteSizeBits_eq ctes_of_canonical ctes_of_aligned_bits) + apply assumption + apply (clarsimp simp: ccap_relation_NullCap_iff cmdbnode_relation_def + mdb_node_to_H_def nullMDBNode_def) + done + +(************************************************************************) +(* *) +(* lemmas used in cteSwap_ccorres ***************************************) +(* *) +(************************************************************************) + + + +(*---------------------------------------------------------------------------------------*) +(* corres lemma for return of mdbnode but 'safer' than ccorres_return_cte_mdbnode ------ *) +(*---------------------------------------------------------------------------------------*) + +lemma ccorres_return_cte_mdbnode_safer: + fixes ptr' :: "cstate \ cte_C ptr" + assumes r1: "\s s' g. (s, s') \ rf_sr \ (s, xfu g s') \ rf_sr" + and xf_xfu: "\s g. xf (xfu g s) = g s" + shows "ccorres cmdbnode_relation xf + (\s. \ cte'. ctes_of s ptr = Some cte' \ cteMDBNode cte = cteMDBNode cte') {s. ptr_val (ptr' s) = ptr} hs + (return (cteMDBNode cte)) + (Basic (\s. xfu (\_. h_val (hrs_mem (t_hrs_' (globals s))) + (Ptr &(ptr' s \[''cteMDBNode_C'']))) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def) + apply rule + apply (erule r1) + apply (simp add: xf_xfu) + apply (drule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps) + done + + + + + + +(*-----------------------------------------------------------------------*) +(* lemmas about map and hrs_mem -----------------------------------------*) +(*-----------------------------------------------------------------------*) + +declare modify_map_exists_cte[simp] + + + + + + + +(*------------------------------------------------------------------------------*) +(* lemmas about pointer equality given valid_mdb (prev\next, prev\myself, etc) *) +(*------------------------------------------------------------------------------*) + +lemma valid_mdb_Prev_neq_Next: + "\ valid_mdb' s; ctes_of s p = Some cte; mdbPrev (cteMDBNode cte) \ 0 \ \ + (mdbNext (cteMDBNode cte)) \ (mdbPrev (cteMDBNode cte))" + apply (simp add: valid_mdb'_def) + apply (simp add: valid_mdb_ctes_def) + apply (elim conjE) + apply (drule (1) mdb_chain_0_no_loops) + apply (simp add: valid_dlist_def) + apply (erule_tac x=p in allE) + apply (erule_tac x=cte in allE) + apply (simp add: Let_def) + apply clarsimp + apply (drule_tac s="mdbNext (cteMDBNode cte)" in sym) + apply simp + apply (simp add: no_loops_def) + apply (erule_tac x= "(mdbNext (cteMDBNode cte))" in allE) + apply (erule notE, rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: mdb_next_unfold) + apply (rule r_into_trancl) + apply (simp add: mdb_next_unfold) +done + +lemma valid_mdb_Prev_neq_itself: + "\ valid_mdb' s; ctes_of s p = Some cte \ \ + (mdbPrev (cteMDBNode cte)) \ p" + apply (unfold valid_mdb'_def) + apply (simp add: CSpace_I.no_self_loop_prev) +done + +lemma valid_mdb_Next_neq_itself: + "\ valid_mdb' s; ctes_of s p = Some cte \ \ + (mdbNext (cteMDBNode cte)) \ p" + apply (unfold valid_mdb'_def) + apply (simp add: CSpace_I.no_self_loop_next) +done + +lemma valid_mdb_not_same_Next : + "\ valid_mdb' s; p\p'; ctes_of s p = Some cte; ctes_of s p' = Some cte'; + (mdbNext (cteMDBNode cte))\0 \ (mdbNext (cteMDBNode cte'))\0 \ \ + (mdbNext (cteMDBNode cte)) \ (mdbNext (cteMDBNode cte')) " + apply (clarsimp) + apply (case_tac cte, clarsimp) + apply (rename_tac capability mdbnode) + apply (case_tac cte', clarsimp) + apply (subgoal_tac "mdb_ptr (ctes_of s) p capability mdbnode") + apply (drule (2) mdb_ptr.p_nextD) + apply clarsimp + apply (unfold mdb_ptr_def vmdb_def mdb_ptr_axioms_def valid_mdb'_def, simp) + done + +lemma valid_mdb_not_same_Prev : + "\ valid_mdb' s; p\p'; ctes_of s p = Some cte; ctes_of s p' = Some cte'; + (mdbPrev (cteMDBNode cte))\0 \ (mdbPrev (cteMDBNode cte'))\0 \ \ + (mdbPrev (cteMDBNode cte)) \ (mdbPrev (cteMDBNode cte')) " + apply (clarsimp) + apply (case_tac cte, clarsimp) + apply (rename_tac capability mdbnode) + apply (case_tac cte', clarsimp) + apply (subgoal_tac "mdb_ptr (ctes_of s) p capability mdbnode") + apply (drule (2) mdb_ptr.p_prevD) + apply clarsimp + apply (unfold mdb_ptr_def vmdb_def mdb_ptr_axioms_def valid_mdb'_def, simp) + done + + + + +(*---------------------------------------------------------------------------------*) +(* lemmas to simplify the big last goal on C side to avoid proving things twice ---*) +(*---------------------------------------------------------------------------------*) + +lemma c_guard_and_h_t_valid_eq_h_t_valid: + "(POINTER \ 0 \ + c_guard ((Ptr &(Ptr POINTER ::cte_C ptr \[''cteMDBNode_C''])) ::mdb_node_C ptr) \ + s' \\<^sub>c (Ptr (POINTER)::cte_C ptr)) = + (POINTER \ 0 \ + s' \\<^sub>c (Ptr (POINTER)::cte_C ptr))" + apply (rule iffI, clarsimp+) + apply (rule c_guard_field_lvalue) + apply (rule c_guard_h_t_valid, assumption) + apply (fastforce simp: typ_uinfo_t_def)+ +done + + +lemma c_guard_and_h_t_valid_and_rest_eq_h_t_valid_and_rest: + "(POINTER \ 0 \ + c_guard ((Ptr &(Ptr POINTER ::cte_C ptr \[''cteMDBNode_C''])) ::mdb_node_C ptr) \ + s' \\<^sub>c (Ptr (POINTER)::cte_C ptr) \ REST) = + (POINTER \ 0 \ + s' \\<^sub>c (Ptr (POINTER)::cte_C ptr) \ REST)" + apply (rule iffI, clarsimp+) + apply (rule c_guard_field_lvalue) + apply (rule c_guard_h_t_valid, assumption) + apply (fastforce simp: typ_uinfo_t_def)+ +done + + +(************************************************************************) +(* *) +(* cteSwap_ccorres ******************************************************) +(* *) +(************************************************************************) + +lemma cteSwap_ccorres: + "ccorres dc xfdc + (valid_mdb' and pspace_aligned' and pspace_canonical' + and (\_. slot1 \ slot2)) + (UNIV \ {s. slot1_' s = Ptr slot1} + \ {s. slot2_' s = Ptr slot2} + \ {s. ccap_relation cap1 (cap1_' s)} + \ {s. ccap_relation cap2 (cap2_' s)}) + [] + (cteSwap cap1 slot1 cap2 slot2) + (Call cteSwap_'proc)" + supply ctes_of_aligned_bits[simp] + apply (cinit (no_ignore_call) lift: slot1_' slot2_' cap1_' cap2_' simp del: return_bind) + apply (ctac (no_vcg) pre: ccorres_pre_getCTE ccorres_move_guard_ptr_safe) + apply (rule ccorres_updateCap_cte_at) + apply (ctac (no_vcg) add: ccorres_return_cte_mdbnode_safer [where ptr=slot1])+ + apply csymbr + apply csymbr + apply (erule_tac t=ret__unsigned_longlong in ssubst) + apply (ctac (no_vcg) add: updateMDB_mdbPrev_set_mdbNext) + apply csymbr + apply csymbr + apply (erule_tac t=ret__unsigned_longlong in ssubst) + apply (ctac (no_vcg) add: updateMDB_mdbNext_set_mdbPrev) + apply (rule ccorres_move_c_guard_cte) + apply (ctac (no_vcg) pre: ccorres_getCTE ccorres_move_guard_ptr_safe + add: ccorres_return_cte_mdbnode[where ptr=slot2] + ccorres_move_guard_ptr_safe)+ + apply csymbr + apply csymbr + apply (erule_tac t=ret__unsigned_longlong in ssubst) + apply (ctac (no_vcg) add: updateMDB_mdbPrev_set_mdbNext) + apply csymbr + apply csymbr + apply (erule_tac t=ret__unsigned_longlong in ssubst) + apply (ctac (no_vcg) add: updateMDB_mdbNext_set_mdbPrev) + apply wp + apply simp + apply wp + apply simp + apply wp + apply simp + apply wp + apply simp + apply (clarsimp simp : cte_wp_at_ctes_of) + apply wp + apply simp + apply wp + apply simp + apply wp + apply simp + apply (clarsimp simp : cte_wp_at_ctes_of) + apply (wp updateCap_ctes_of_wp) + apply simp + apply (clarsimp simp : cte_wp_at_ctes_of) + apply (wp updateCap_ctes_of_wp) + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (apply_conjunct \match conclusion in \no_0 _\ + \ \simp add: valid_mdb'_def, erule (1) valid_mdb_ctesE\\) + apply (case_tac cte; simp add: modify_map_if ctes_of_canonical) + done + +(* todo change in cteMove (\s. ctes_of s src = Some scte) *) + + +(************************************************************************) +(* *) +(* lemmas used in emptySlot_ccorres *************************************) +(* *) +(************************************************************************) + + +declare if_split [split del] + +(* rq CALL mdb_node_ptr_set_mdbNext_'proc \) is a printing bug + one should write CALL mdb_node_ptr_set_mdbNext +*) + +lemma not_NullCap_eq_not_cap_null_cap: + " \ccap_relation cap cap' ; (s, s') \ rf_sr \ \ + (cap \ NullCap) = (s' \ {_. (cap_get_tag cap' \ scast cap_null_cap)})" + apply (rule iffI) + apply (case_tac "cap_get_tag cap' \ scast cap_null_cap", clarsimp+) + apply (erule notE) + apply (simp add: cap_get_tag_NullCap) + apply (case_tac "cap_get_tag cap' \ scast cap_null_cap") + apply (rule notI) + apply (erule notE) + apply (simp add: cap_get_tag_NullCap) + apply clarsimp +done + +lemma emptySlot_helper: + fixes mdbNode + defines "nextmdb \ Ptr &(Ptr ((mdbNext_CL (mdb_node_lift mdbNode)))::cte_C ptr\[''cteMDBNode_C'']) :: mdb_node_C ptr" + defines "nextcte \ Ptr ((mdbNext_CL (mdb_node_lift mdbNode)))::cte_C ptr" + shows "\cmdbnode_relation rva mdbNode\ + \ ccorres dc xfdc \ UNIV hs + (updateMDB (mdbNext rva) + (\mdb. mdbFirstBadged_update (\_. mdbFirstBadged mdb \ mdbFirstBadged rva) (mdbPrev_update (\_. mdbPrev rva) mdb))) + (IF mdbNext_CL (mdb_node_lift mdbNode) \ 0 THEN + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t nextcte\ + (CALL mdb_node_ptr_set_mdbPrev(nextmdb, ptr_val (Ptr (mdbPrev_CL (mdb_node_lift mdbNode))))) + FI;; + IF mdbNext_CL (mdb_node_lift mdbNode) \ 0 THEN + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t nextcte\ + (\ret__unsigned_longlong :== CALL mdb_node_get_mdbFirstBadged(h_val (hrs_mem \t_hrs) nextmdb));; + \ret__int :== (if \ret__unsigned_longlong \ 0 then 1 else 0);; + IF \ret__int \ 0 THEN + SKIP + ELSE + \ret__unsigned_longlong :== CALL mdb_node_get_mdbFirstBadged(mdbNode);; + \ret__int :== (if \ret__unsigned_longlong \ 0 then 1 else 0) + FI;; + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t nextcte\ + (CALL mdb_node_ptr_set_mdbFirstBadged(nextmdb,scast \ret__int)) + FI)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_updateMDB_cte_at) + apply (subgoal_tac "mdbNext rva=(mdbNext_CL (mdb_node_lift mdbNode))") + prefer 2 + apply (simp add: cmdbnode_relation_def) + + apply (case_tac "mdbNext rva \ 0") + apply (case_tac "mdbNext_CL (mdb_node_lift mdbNode) = 0", simp) + + \ \case where mdbNext rva \ 0 and mdbNext_CL (mdb_node_lift mdbNode) \ 0\ + apply (unfold updateMDB_def) + apply (clarsimp simp: Let_def) + apply (rule ccorres_pre_getCTE [where P = "\cte s. ctes_of s (mdbNext rva) = Some cte" and P' = "\_. UNIV"]) + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (rule conseqPre, vcg) + apply clarsimp + + apply (frule(1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps' nextmdb_def nextcte_def) + apply (intro conjI impI allI) + \ \\ \x\fst \\ + apply clarsimp + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption ) + apply (erule bexI [rotated]) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp add: rf_sr_def cstate_relation_def typ_heap_simps + Let_def cpspace_relation_def) + apply (rule conjI) + prefer 2 + apply (erule_tac t = s' in ssubst) + apply (simp add: carch_state_relation_def cmachine_state_relation_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"] + h_t_valid_clift_Some_iff typ_heap_simps' + cong: lifth_update) + apply (erule (1) setCTE_tcb_case) + + apply (erule (2) cspace_cte_relation_upd_mdbI) + apply (simp add: cmdbnode_relation_def) + apply (simp add: mdb_node_to_H_def) + + apply (subgoal_tac "mdbFirstBadged_CL (mdb_node_lift mdbNode) && mask (Suc 0) = + mdbFirstBadged_CL (mdb_node_lift mdbNode)") + prefer 2 + subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) + apply (subgoal_tac "mdbFirstBadged_CL (cteMDBNode_CL y) && mask (Suc 0) = + mdbFirstBadged_CL (cteMDBNode_CL y)") + prefer 2 + apply (drule cteMDBNode_CL_lift [symmetric]) + subgoal by (simp add: mdb_node_lift_def word_bw_assocs) + subgoal by (simp add: to_bool_def) + \ \\ \x\fst \\ + apply clarsimp + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption ) + apply (erule bexI [rotated]) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp add: rf_sr_def cstate_relation_def typ_heap_simps + Let_def cpspace_relation_def) + apply (rule conjI) + prefer 2 + apply (erule_tac t = s' in ssubst) + apply (simp add: carch_state_relation_def cmachine_state_relation_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"] + typ_heap_simps' h_t_valid_clift_Some_iff + cong: lifth_update) + apply (erule (1) setCTE_tcb_case) + + apply (erule (2) cspace_cte_relation_upd_mdbI) + apply (simp add: cmdbnode_relation_def) + apply (simp add: mdb_node_to_H_def) + + apply (subgoal_tac "mdbFirstBadged_CL (mdb_node_lift mdbNode) && mask (Suc 0) = + mdbFirstBadged_CL (mdb_node_lift mdbNode)") + prefer 2 + subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) + apply (subgoal_tac "mdbFirstBadged_CL (cteMDBNode_CL y) && mask (Suc 0) = + mdbFirstBadged_CL (cteMDBNode_CL y)") + prefer 2 + apply (drule cteMDBNode_CL_lift [symmetric]) + subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) + apply (simp add: to_bool_def split: if_split) + + \ \trivial case where mdbNext rva = 0\ + apply (simp add:ccorres_cond_empty_iff) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_return_Skip) + apply simp + apply (clarsimp simp: cmdbnode_relation_def) +done + + + +(************************************************************************) +(* *) +(* emptySlot_ccorres ****************************************************) +(* *) +(************************************************************************) + + +(* ML "set CtacImpl.trace_ctac"*) + + +lemma mdbNext_CL_mdb_node_lift_eq_mdbNext: + "cmdbnode_relation n n' \ (mdbNext_CL (mdb_node_lift n')) =(mdbNext n)" + by (erule cmdbnode_relationE, fastforce simp: mdbNext_to_H) + +lemma mdbPrev_CL_mdb_node_lift_eq_mdbPrev: + "cmdbnode_relation n n' \ (mdbPrev_CL (mdb_node_lift n')) =(mdbPrev n)" + by (erule cmdbnode_relationE, fastforce simp: mdbNext_to_H) + + +lemma mdbNext_not_zero_eq_simpler: + "cmdbnode_relation n n' \ (mdbNext n \ 0) = (mdbNext_CL (mdb_node_lift n') \ 0)" + apply clarsimp + apply (erule cmdbnode_relationE) + apply (fastforce simp: mdbNext_to_H) + done + + + +lemma mdbPrev_not_zero_eq_simpler: + "cmdbnode_relation n n' \ (mdbPrev n \ 0) = (mdbPrev_CL (mdb_node_lift n') \ 0)" + apply clarsimp + apply (erule cmdbnode_relationE) + apply (fastforce simp: mdbPrev_to_H) + done + +lemma h_t_valid_and_cslift_and_c_guard_field_mdbPrev_CL: + " \(s, s') \ rf_sr; cte_at' slot s; valid_mdb' s; cslift s' (Ptr slot) = Some cte'\ + \ (mdbPrev_CL (mdb_node_lift (cteMDBNode_C cte')) \ 0) \ + s' \\<^sub>c ( Ptr (mdbPrev_CL (mdb_node_lift (cteMDBNode_C cte'))) :: cte_C ptr) \ + (\ cten. cslift s' (Ptr (mdbPrev_CL (mdb_node_lift (cteMDBNode_C cte'))) :: cte_C ptr) = Some cten) \ + c_guard (Ptr &(Ptr (mdbPrev_CL (mdb_node_lift (cteMDBNode_C cte')))::cte_C ptr\[''cteMDBNode_C'']) :: mdb_node_C ptr)" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_mdb_ctes_of_prev) + apply (frule (2) rf_sr_cte_relation) + apply (drule ccte_relation_cmdbnode_relation) + apply (simp add: mdbPrev_not_zero_eq_simpler) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) rf_sr_ctes_of_clift [rotated])+ + apply (clarsimp simp: typ_heap_simps) + + apply (rule c_guard_field_lvalue [rotated]) + apply (fastforce simp: typ_uinfo_t_def)+ + apply (rule c_guard_clift) + apply (simp add: typ_heap_simps) +done + +lemma h_t_valid_and_cslift_and_c_guard_field_mdbNext_CL: + " \(s, s') \ rf_sr; cte_at' slot s; valid_mdb' s; cslift s' (Ptr slot) = Some cte'\ + \ (mdbNext_CL (mdb_node_lift (cteMDBNode_C cte')) \ 0) \ + s' \\<^sub>c ( Ptr (mdbNext_CL (mdb_node_lift (cteMDBNode_C cte'))) :: cte_C ptr) \ + (\ cten. cslift s' (Ptr (mdbNext_CL (mdb_node_lift (cteMDBNode_C cte'))) :: cte_C ptr) = Some cten) \ + c_guard (Ptr &(Ptr (mdbNext_CL (mdb_node_lift (cteMDBNode_C cte')))::cte_C ptr\[''cteMDBNode_C'']) :: mdb_node_C ptr)" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_mdb_ctes_of_next) + apply (frule (2) rf_sr_cte_relation) + apply (drule ccte_relation_cmdbnode_relation) + apply (simp add: mdbNext_not_zero_eq_simpler) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) rf_sr_ctes_of_clift [rotated])+ + apply (clarsimp simp: typ_heap_simps) + + apply (rule c_guard_field_lvalue [rotated]) + apply (fastforce simp: typ_uinfo_t_def)+ + apply (rule c_guard_clift) + apply (simp add: typ_heap_simps) +done + + +lemma valid_mdb_Prev_neq_Next_better: + "\ valid_mdb' s; ctes_of s p = Some cte \ \ mdbPrev (cteMDBNode cte) \ 0 \ + (mdbNext (cteMDBNode cte)) \ (mdbPrev (cteMDBNode cte))" + apply (rule impI) + apply (simp add: valid_mdb'_def) + apply (simp add: valid_mdb_ctes_def) + apply (elim conjE) + apply (drule (1) mdb_chain_0_no_loops) + apply (simp add: valid_dlist_def) + apply (erule_tac x=p in allE) + apply (erule_tac x=cte in allE) + apply (simp add: Let_def) + apply clarsimp + apply (drule_tac s="mdbNext (cteMDBNode cte)" in sym) + apply simp + apply (simp add: no_loops_def) + apply (erule_tac x= "(mdbNext (cteMDBNode cte))" in allE) + apply (erule notE, rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: mdb_next_unfold) + apply (rule r_into_trancl) + apply (simp add: mdb_next_unfold) +done + +declare unat_ucast_up_simp[simp] + +lemma setIRQState_ccorres: + "ccorres dc xfdc + (\ and (\s. ucast irq \ (ucast Kernel_C.maxIRQ :: machine_word))) + (UNIV \ {s. irqState_' s = irqstate_to_C irqState} + \ {s. irq_' s = ucast irq}) + [] + (setIRQState irqState irq) + (Call setIRQState_'proc )" + apply (rule ccorres_gen_asm) + apply (cinit simp del: return_bind) + apply (rule ccorres_symb_exec_l) + apply simp + apply (rule_tac r'="dc" and xf'="xfdc" in ccorres_split_nothrow) + apply (rule_tac P= "\s. st = (ksInterruptState s)" + and P'= "(UNIV \ {s. irqState_' s = irqstate_to_C irqState} + \ {s. irq_' s = ucast irq} )" + in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setInterruptState_def) + apply (clarsimp simp: simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (simp add: cinterrupt_relation_def Kernel_C.maxIRQ_def) + apply (clarsimp simp: word_sless_msb_less order_le_less_trans + unat_ucast_no_overflow_le word_le_nat_alt ucast_ucast_b + split: if_split ) + apply ceqv + apply (ctac add: maskInterrupt_ccorres) + apply wp + apply vcg + apply wp + apply (simp add: getInterruptState_def gets_def) + apply wp + apply (simp add: empty_fail_def getInterruptState_def simpler_gets_def) + apply clarsimp + apply (cases irqState, simp_all) + apply (simp add: Kernel_C.IRQSignal_def Kernel_C.IRQInactive_def) + apply (simp add: Kernel_C.IRQTimer_def Kernel_C.IRQInactive_def) + apply (simp add: Kernel_C.IRQInactive_def Kernel_C.IRQReserved_def) + done + + +lemma deletedIRQHandler_ccorres: + "ccorres dc xfdc + (\s. ucast irq \ (ucast Kernel_C.maxIRQ :: machine_word)) + (UNIV \ {s. irq_' s = ucast irq}) [] + (deletedIRQHandler irq) + (Call deletedIRQHandler_'proc)" + apply (cinit simp del: return_bind) + apply (ctac add: setIRQState_ccorres) + apply clarsimp +done + +lemmas ccorres_split_noop_lhs + = ccorres_split_nothrow[where c=Skip, OF _ ceqv_refl _ _ hoarep.Skip, + simplified ccorres_seq_skip] + +(* FIXME: to SR_Lemmas *) +lemma region_is_bytes_subset: + "region_is_bytes' ptr sz htd + \ {ptr' ..+ sz'} \ {ptr ..+ sz} + \ region_is_bytes' ptr' sz' htd" + by (auto simp: region_is_bytes'_def) + +lemma region_actually_is_bytes_subset: + "region_actually_is_bytes' ptr sz htd + \ {ptr' ..+ sz'} \ {ptr ..+ sz} + \ region_actually_is_bytes' ptr' sz' htd" + by (auto simp: region_actually_is_bytes'_def) + +lemma intvl_both_le: + "\ a \ x; unat x + y \ unat a + b \ + \ {x ..+ y} \ {a ..+ b}" + apply (rule order_trans[OF _ intvl_sub_offset[where x="x - a"]]) + apply (simp, rule order_refl) + apply unat_arith + done + +lemma untypedZeroRange_idx_forward_helper: + "isUntypedCap cap + \ capFreeIndex cap \ idx + \ idx \ 2 ^ capBlockSize cap + \ valid_cap' cap s + \ (case (untypedZeroRange cap, untypedZeroRange (capFreeIndex_update (\_. idx) cap)) + of (Some (a, b), Some (a', b')) \ {a' ..+ unat (b' + 1 - a')} \ {a ..+ unat (b + 1 - a)} + | _ \ True)" + apply (clarsimp split: option.split) + apply (clarsimp simp: untypedZeroRange_def max_free_index_def Let_def + isCap_simps valid_cap_simps' capAligned_def untypedBits_defs + split: if_split_asm) + apply (erule subsetD[rotated], rule intvl_both_le) + apply (clarsimp simp: getFreeRef_def) + apply (rule word_plus_mono_right) + apply (rule PackedTypes.of_nat_mono_maybe_le) + apply (erule order_le_less_trans, rule power_strict_increasing, simp_all) + apply (erule is_aligned_no_wrap') + apply (rule word_of_nat_less, simp) + apply (simp add: getFreeRef_def) + apply (simp add: unat_plus_simple[THEN iffD1, OF is_aligned_no_wrap'] + word_of_nat_less) + apply (simp add: word_of_nat_le unat_sub + order_le_less_trans[OF _ power_strict_increasing] + unat_of_nat_eq[where 'a=machine_word_len, folded word_bits_def]) + done + +lemma intvl_close_Un: + "y = x + of_nat n + \ ({x ..+ n} \ {y ..+ m}) = {x ..+ n + m}" + apply ((simp add: intvl_def, safe, simp_all, + simp_all only: of_nat_add[symmetric]); (rule exI, strengthen refl)) + apply simp_all + apply (rule ccontr) + apply (drule_tac x="k - n" in spec) + apply simp + done + +lemma untypedZeroRange_idx_backward_helper: + "isUntypedCap cap + \ idx \ capFreeIndex cap + \ idx \ 2 ^ capBlockSize cap + \ valid_cap' cap s + \ (case untypedZeroRange (capFreeIndex_update (\_. idx) cap) + of None \ True | Some (a', b') \ + {a' ..+ unat (b' + 1 - a')} \ {capPtr cap + of_nat idx ..+ (capFreeIndex cap - idx)} + \ (case untypedZeroRange cap + of Some (a, b) \ {a ..+ unat (b + 1 - a)} + | None \ {}) + )" + apply (clarsimp split: option.split, intro impI conjI allI) + apply (rule intvl_both_le; clarsimp simp: untypedZeroRange_def + max_free_index_def Let_def + isCap_simps valid_cap_simps' capAligned_def + split: if_split_asm) + apply (clarsimp simp: getFreeRef_def) + apply (clarsimp simp: getFreeRef_def) + apply (simp add: word_of_nat_le unat_sub + order_le_less_trans[OF _ power_strict_increasing] + unat_of_nat_eq[where 'a=machine_word_len, folded word_bits_def]) + apply (subst intvl_close_Un) + apply (clarsimp simp: untypedZeroRange_def + max_free_index_def Let_def + getFreeRef_def + split: if_split_asm) + apply (clarsimp simp: untypedZeroRange_def + max_free_index_def Let_def + getFreeRef_def isCap_simps valid_cap_simps' + split: if_split_asm) + apply (simp add: word_of_nat_le unat_sub capAligned_def + order_le_less_trans[OF _ power_strict_increasing] + order_le_less_trans[where x=idx] + unat_of_nat_eq[where 'a=machine_word_len, folded word_bits_def]) + done + +lemma ctes_of_untyped_zero_rf_sr_case: + "\ ctes_of s p = Some cte; (s, s') \ rf_sr; + untyped_ranges_zero' s \ + \ case untypedZeroRange (cteCap cte) + of None \ True + | Some (start, end) \ region_actually_is_zero_bytes start (unat ((end + 1) - start)) s'" + by (simp split: option.split add: ctes_of_untyped_zero_rf_sr) + +lemma gsUntypedZeroRanges_update_helper: + "(\, s) \ rf_sr + \ (zero_ranges_are_zero (gsUntypedZeroRanges \) (t_hrs_' (globals s)) + \ zero_ranges_are_zero (f (gsUntypedZeroRanges \)) (t_hrs_' (globals s))) + \ (gsUntypedZeroRanges_update f \, s) \ rf_sr" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma heap_list_zero_Ball_intvl: + "heap_list_is_zero hmem ptr n = (\x \ {ptr ..+ n}. hmem x = 0)" + apply safe + apply (erule heap_list_h_eq_better) + apply (simp add: heap_list_rpbs) + apply (rule trans[OF heap_list_h_eq2 heap_list_rpbs]) + apply simp + done + +lemma untypedZeroRange_not_device: + "untypedZeroRange cap = Some r + \ \ capIsDevice cap" + by (clarsimp simp: untypedZeroRange_def cong: if_cong) + +lemma updateTrackedFreeIndex_noop_ccorres: + "ccorres dc xfdc (cte_wp_at' ((\cap. isUntypedCap cap + \ idx \ 2 ^ capBlockSize cap + \ (capFreeIndex cap \ idx \ cap' = cap)) o cteCap) slot + and valid_objs' and untyped_ranges_zero') + {s. \ capIsDevice cap' \ region_actually_is_zero_bytes (capPtr cap' + of_nat idx) (capFreeIndex cap' - idx) s} hs + (updateTrackedFreeIndex slot idx) Skip" + (is "ccorres dc xfdc ?P ?P' _ _ _") + apply (simp add: updateTrackedFreeIndex_def getSlotCap_def) + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_getCTE[where P="\rv. + cte_wp_at' ((=) rv) slot and ?P" and P'="K ?P'"]) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule(1) ctes_of_valid') + apply (frule(2) ctes_of_untyped_zero_rf_sr_case) + apply (clarsimp simp: simpler_modify_def bind_def cte_wp_at_ctes_of) + apply (erule gsUntypedZeroRanges_update_helper) + apply (clarsimp simp: zero_ranges_are_zero_def + split: if_split) + apply (case_tac "(a, b) \ gsUntypedZeroRanges \") + apply (drule(1) bspec, simp) + apply (erule disjE_L) + apply (frule(3) untypedZeroRange_idx_forward_helper) + apply (clarsimp simp: isCap_simps valid_cap_simps') + apply (case_tac "untypedZeroRange (cteCap cte)") + apply (clarsimp simp: untypedZeroRange_def + valid_cap_simps' + max_free_index_def Let_def + split: if_split_asm) + apply clarsimp + apply (thin_tac "\ capIsDevice cap' \ P" for P) + apply (clarsimp split: option.split_asm) + apply (subst region_actually_is_bytes_subset, simp+) + apply (subst heap_list_is_zero_mono2, simp+) + apply (frule untypedZeroRange_idx_backward_helper[where idx=idx], + simp+) + apply (clarsimp simp: isCap_simps valid_cap_simps') + apply (clarsimp split: option.split_asm) + apply (clarsimp dest!: untypedZeroRange_not_device) + apply (subst region_actually_is_bytes_subset, simp+) + apply (subst heap_list_is_zero_mono2, simp+) + apply (simp add: region_actually_is_bytes'_def heap_list_zero_Ball_intvl) + apply (clarsimp dest!: untypedZeroRange_not_device) + apply blast + apply (clarsimp simp: cte_wp_at_ctes_of) + apply clarsimp + done + +lemma updateTrackedFreeIndex_forward_noop_ccorres: + "ccorres dc xfdc (cte_wp_at' ((\cap. isUntypedCap cap + \ capFreeIndex cap \ idx \ idx \ 2 ^ capBlockSize cap) o cteCap) slot + and valid_objs' and untyped_ranges_zero') UNIV hs + (updateTrackedFreeIndex slot idx) Skip" + (is "ccorres dc xfdc ?P UNIV _ _ _") + apply (rule ccorres_name_pre) + apply (rule ccorres_guard_imp2, + rule_tac cap'="cteCap (the (ctes_of s slot))" in updateTrackedFreeIndex_noop_ccorres) + apply (clarsimp simp: cte_wp_at_ctes_of region_actually_is_bytes'_def) + done + +lemma clearUntypedFreeIndex_noop_ccorres: + "ccorres dc xfdc (valid_objs' and untyped_ranges_zero') UNIV hs + (clearUntypedFreeIndex p) Skip" + apply (simp add: clearUntypedFreeIndex_def getSlotCap_def) + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_getCTE[where P="\rv. cte_wp_at' ((=) rv) p + and valid_objs' and untyped_ranges_zero'" and P'="K UNIV"]) + apply (case_tac "cteCap cte", simp_all add: ccorres_guard_imp[OF ccorres_return_Skip])[1] + apply (rule ccorres_guard_imp, rule updateTrackedFreeIndex_forward_noop_ccorres) + apply (clarsimp simp: cte_wp_at_ctes_of max_free_index_def) + apply (frule(1) Finalise_R.ctes_of_valid') + apply (clarsimp simp: valid_cap_simps') + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + done + +lemma canonical_address_mdbNext_CL: + "canonical_address (mdbNext_CL (mdb_node_lift (cteMDBNode_C cte')))" + apply (simp add: mdb_node_lift_def canonical_address_def canonical_address_of_def + canonical_bit_def) + apply (subst le_max_word_ucast_id; simp) + apply (rule word_and_le') + apply (rule order.trans[OF mask_shiftl_le_mask]) + apply simp + done + +lemma canonical_address_mdbNext': + "ccte_relation cte cte' \ canonical_address (mdbNext (cteMDBNode cte))" + apply (rule rsubst[where P=canonical_address, OF canonical_address_mdbNext_CL]) + apply (rule cmdb_node_relation_mdbNext) + apply (erule ccte_relation_cmdbnode_relation) + done + +lemma canonical_address_mdbNext: + "\ (s, s') \ rf_sr; ctes_of s slot = Some cte \ \ canonical_address (mdbNext (cteMDBNode cte))" + apply (drule cmap_relation_cte) + apply (erule (1) cmap_relationE1) + apply (erule canonical_address_mdbNext') + done + +definition arch_cleanup_info_wf' :: "arch_capability \ bool" where + "arch_cleanup_info_wf' acap \ True" + +definition cleanup_info_wf' :: "capability \ bool" where + "cleanup_info_wf' cap \ case cap of + IRQHandlerCap irq \ + UCAST(9\machine_word_len) irq \ SCAST(int_literal_len\machine_word_len) Kernel_C.maxIRQ + | ArchObjectCap acap \ arch_cleanup_info_wf' acap + | _ \ True" + +(* FIXME: move *) +lemma hrs_mem_update_compose: + "hrs_mem_update f (hrs_mem_update g h) = hrs_mem_update (f \ g) h" + by (auto simp: hrs_mem_update_def split: prod.split) + +(* FIXME: move *) +lemma packed_heap_update_collapse': + fixes p :: "'a::packed_type ptr" + shows "heap_update p v \ heap_update p u = heap_update p v" + by (auto simp: packed_heap_update_collapse) + +(* FIXME: move *) +lemma access_array_from_elements: + fixes v :: "'a::packed_type['b::finite]" + assumes "\i < CARD('b). h_val h (array_ptr_index p False i) = v.[i]" + shows "h_val h p = v" + by (rule cart_eq[THEN iffD2]) (simp add: assms heap_access_Array_element') + +(* FIXME: move *) +lemma h_val_foldr_heap_update: + fixes v :: "'i \ 'a::mem_type" + assumes "\x y. {x,y} \ set xs \ x \ y \ ptr_span (p x) \ ptr_span (p y) = {}" + assumes "distinct xs" "i \ set xs" + shows "h_val (foldr (\i. heap_update (p i) (v i)) xs h) (p i) = v i" + using assms by (induct xs arbitrary: h; + fastforce simp: h_val_heap_update h_val_update_regions_disjoint) + +(* FIXME: move *) +lemma ptr_span_array_ptr_index_disjoint: + fixes p :: "('a::packed_type['b::finite]) ptr" + assumes s: "CARD('b) * size_of TYPE('a) \ 2 ^ addr_bitsize" + assumes b: "x < CARD('b)" "y < CARD('b)" + assumes n: "x \ y" + shows "ptr_span (array_ptr_index p False x) \ ptr_span (array_ptr_index p False y) = {}" + proof - + have l: "CARD('b) * size_of TYPE('a) \ 2 ^ LENGTH(64)" using s by simp + have p: "\x k. x < CARD('b) \ k < size_of TYPE('a) + \ x * size_of TYPE('a) + k < 2 ^ LENGTH(64)" + by (metis less_le_trans[OF _ l] less_imp_not_less mod_lemma mult.commute nat_mod_lem neq0_conv) + show ?thesis + apply (clarsimp simp: array_ptr_index_def ptr_add_def intvl_disj_offset) + apply (rule disjointI) + apply (clarsimp simp: intvl_def) + apply (subst (asm) of_nat_mult[symmetric])+ + apply (subst (asm) of_nat_add[symmetric])+ + apply (subst (asm) of_nat_inj[OF p p]; (simp add: b)?) + apply (drule arg_cong[where f="\x. x div size_of TYPE('a)"]; simp add: n) + done + qed + +(* FIXME: move *) +lemma h_val_heap_update_Array: + fixes v :: "'a::packed_type['b::finite]" + assumes s: "CARD('b) * size_of TYPE('a) \ 2 ^ addr_bitsize" + shows "h_val (heap_update p v h) p = v" + apply (rule access_array_from_elements) + apply (clarsimp simp: heap_update_Array foldl_conv_foldr) + apply (rule h_val_foldr_heap_update; clarsimp simp: ptr_span_array_ptr_index_disjoint[OF s]) + done + +(* FIXME: move *) +lemma foldr_heap_update_commute: + assumes "\y. y \ set xs \ ptr_span q \ ptr_span (p y) = {}" + shows "foldr (\i. heap_update (p i) (v i)) xs (heap_update q u h) + = heap_update q u (foldr (\i. heap_update (p i) (v i)) xs h)" + using assms by (induct xs) (auto simp: LemmaBucket_C.heap_update_commute) + +(* FIXME: move *) +lemma foldr_packed_heap_update_collapse: + fixes u v :: "'i \ 'a::packed_type" + assumes "\x y. {x,y} \ set xs \ y \ x \ ptr_span (p x) \ ptr_span (p y) = {}" + assumes "distinct xs" + shows "foldr (\i. heap_update (p i) (v i)) xs (foldr (\i. heap_update (p i) (u i)) xs h) + = foldr (\i. heap_update (p i) (v i)) xs h" + using assms + apply - + apply (induct xs arbitrary: h; clarsimp; rename_tac x xs h) + apply (drule_tac x=x in spec; clarsimp) + apply (subst foldr_heap_update_commute; clarsimp simp: packed_heap_update_collapse) + apply (drule_tac x=y in spec; clarsimp) + done + +(* FIXME: move *) +lemma packed_Array_heap_update_collapse: + fixes p :: "('a::packed_type['b::finite]) ptr" + assumes s: "CARD('b) * size_of TYPE('a) \ 2 ^ addr_bitsize" + shows "heap_update p v (heap_update p u h) = heap_update p v h" + by (simp add: heap_update_Array foldl_conv_foldr foldr_packed_heap_update_collapse + ptr_span_array_ptr_index_disjoint[OF s]) + +(* FIXME: move *) +lemma packed_Array_heap_update_collapse': + fixes p :: "('a::packed_type['b::finite]) ptr" + assumes s: "CARD('b) * size_of TYPE('a) \ 2 ^ addr_bitsize" + shows "heap_update p v \ heap_update p u = heap_update p v" + by (auto simp: packed_Array_heap_update_collapse[OF s]) + +(* FIXME: move *) +definition + heap_modify :: "'a::c_type ptr \ ('a \ 'a) \ heap_mem \ heap_mem" +where + "heap_modify p f \ \h. heap_update p (f (h_val h p)) h" + +(* FIXME: move *) +lemma heap_modify_def2: + "heap_modify (p::'a::c_type ptr) f \ + \h. let bytes = heap_list h (size_of TYPE('a)) (ptr_val p) in + heap_update_list (ptr_val p) (to_bytes (f (from_bytes bytes)) bytes) h" + by (simp add: heap_modify_def Let_def heap_update_def h_val_def) + +(* FIXME: move *) +lemma heap_modify_compose: + fixes p :: "'a::packed_type ptr" + shows "heap_modify p f \ heap_modify p g = heap_modify p (f \ g)" + and "heap_modify p f (heap_modify p g h) = heap_modify p (f \ g) h" + by (auto simp: heap_modify_def h_val_heap_update packed_heap_update_collapse) + +(* FIXME: move *) +lemma heap_modify_compose_Array: + fixes p :: "('a::packed_type['b::finite]) ptr" + assumes s: "CARD('b) * size_of TYPE('a) \ 2 ^ addr_bitsize" + shows "heap_modify p f \ heap_modify p g = heap_modify p (f \ g)" + and "heap_modify p f (heap_modify p g h) = heap_modify p (f \ g) h" + by (auto simp: heap_modify_def h_val_heap_update_Array[OF s] + packed_Array_heap_update_collapse[OF s]) + +(* FIXME: move *) +lemma fold_heap_modify_commute: + fixes p :: "'a::packed_type ptr" + shows "fold (heap_modify p \ f) upds = heap_modify p (fold f upds)" + apply (induction upds) + apply (simp add: heap_modify_def heap_update_id) + apply (clarsimp simp: heap_modify_compose[THEN fun_cong, simplified] o_def) + done + +(* FIXME: move *) +lemma fold_heap_modify_commute_Array: + fixes p :: "('a::packed_type['b::finite]) ptr" + assumes s: "CARD('b) * size_of TYPE('a) \ 2 ^ addr_bitsize" + shows "fold (heap_modify p \ f) upds = heap_modify p (fold f upds)" + apply (induction upds) + apply (simp add: heap_modify_def heap_update_id_Array) + apply (clarsimp simp: heap_modify_compose_Array[OF s, THEN fun_cong, simplified] o_def) + done + +definition + word_set_or_clear :: "bool \ 'a::len word \ 'a::len word \ 'a::len word" +where + "word_set_or_clear s p w \ if s then w || p else w && ~~ p" + +(* FIXME: move *) +lemma whileAnno_subst_invariant: + "whileAnno b I' V c = whileAnno b I V c" + by (simp add: whileAnno_def) + +lemma hoarep_conseq_spec_state: + fixes \ :: "'p \ ('s,'p,'f) com option" + assumes "\\. \ \ {s. s = \ \ P s} c (Q \)" + assumes "\\. \ \ P' \ P \ \ Q \ \ Q'" + shows "\ \ P' c Q'" + using assms by (fastforce intro: hoarep.Conseq) + +lemma hrs_simps: + "hrs_mem (mem, htd) = mem" + "hrs_mem_update f (mem, htd) = (f mem, htd)" + "hrs_htd (mem, htd) = htd" + "hrs_htd_update g (mem, htd) = (mem, g htd)" + by (auto simp: hrs_mem_def hrs_mem_update_def hrs_htd_def hrs_htd_update_def) + +lemma clift_heap_modify_same: + fixes p :: "'a :: mem_type ptr" + assumes "hrs_htd hp \\<^sub>t p" + assumes "typ_uinfo_t TYPE('a) \\<^sub>t typ_uinfo_t TYPE('b)" + shows "clift (hrs_mem_update (heap_modify p f) hp) = (clift hp :: 'b :: mem_type typ_heap)" + using assms unfolding hrs_mem_update_def + apply (cases hp) + apply (simp add: split_def hrs_htd_def heap_modify_def) + apply (erule lift_t_heap_update_same) + apply simp + done + +lemma zero_ranges_are_zero_modify[simp]: + "h_t_valid (hrs_htd hrs) c_guard (ptr :: 'a ptr) + \ typ_uinfo_t TYPE('a :: wf_type) \ typ_uinfo_t TYPE(word8) + \ zero_ranges_are_zero rs (hrs_mem_update (heap_modify ptr f) hrs) + = zero_ranges_are_zero rs hrs" + apply (clarsimp simp: zero_ranges_are_zero_def hrs_mem_update + intro!: ball_cong[OF refl] conj_cong[OF refl]) + apply (drule region_actually_is_bytes) + apply (drule(2) region_is_bytes_disjoint) + apply (simp add: heap_modify_def heap_update_def heap_list_update_disjoint_same Int_commute) + done + +lemma h_val_heap_modify: + fixes p :: "'a::mem_type ptr" + shows "h_val (heap_modify p f h) p = f (h_val h p)" + by (simp add: heap_modify_def h_val_heap_update) + +lemma array_fupdate_index: + fixes arr :: "'a::c_type['b::finite]" + assumes "i < CARD('b)" "j < CARD('b)" + shows "fupdate i f arr.[j] = (if i = j then f (arr.[i]) else arr.[j])" + using assms by (cases "i = j"; simp add: fupdate_def) + +lemma foldl_map_pair_constant: + "foldl (\acc p. f acc (fst p) (snd p)) z (map (\x. (x,v)) xs) = foldl (\acc x. f acc x v) z xs" + by (induct xs arbitrary: z) auto + +lemma word_set_or_clear_test_bit: + fixes w :: "'a::len word" + shows "i < LENGTH('a) \ word_set_or_clear b p w !! i = (if p !! i then b else w !! i)" + by (auto simp: word_set_or_clear_def word_ops_nth_size word_size split: if_splits) + +lemma heap_modify_fold: + "heap_update p (f (h_val h p)) h = heap_modify p f h" + by (simp add: heap_modify_def) + +lemma fold_array_update_index: + fixes arr :: "'a::c_type['b::finite]" + assumes "i < CARD('b)" + shows "fold (\i arr. Arrays.update arr i (f i)) is arr.[i] = (if i \ set is then f i else arr.[i])" + using assms by (induct "is" arbitrary: arr) (auto split: if_splits) + +lemma t_hrs_'_update_heap_modify_fold: + "gs\ t_hrs_' := hrs_mem_update (heap_update p (f (h_val (hrs_mem (t_hrs_' gs)) p))) (t_hrs_' gs) \ + = t_hrs_'_update (hrs_mem_update (heap_modify p f)) gs" + by (clarsimp simp: heap_modify_def hrs_mem_update_def hrs_mem_def split: prod.splits) + +lemma heap_modify_Array_element: + fixes p :: "'a::packed_type ptr" + fixes p' :: "('a['b::finite]) ptr" + assumes "p = ptr_coerce p' +\<^sub>p int n" + assumes "n < CARD('b)" + assumes "CARD('b) * size_of TYPE('a) < 2 ^ addr_bitsize" + shows "heap_modify p f = heap_modify p' (fupdate n f)" + using assms by (simp add: heap_access_Array_element heap_update_Array_element' + heap_modify_def fupdate_def) + +lemma fupdate_word_set_or_clear_max_word: + "fupdate i (word_set_or_clear b max_word) arr = Arrays.update arr i (if b then max_word else 0)" + by (simp add: fupdate_def word_set_or_clear_def cong: if_cong) + +lemma h_t_valid_Array_element': + "\ htd \\<^sub>t (p :: (('a :: mem_type)['b :: finite]) ptr); 0 \ n; n < int CARD('b) \ + \ htd \\<^sub>t ((ptr_coerce p :: 'a ptr) +\<^sub>p n)" + by (fact h_t_valid_Array_element) + +lemma Arch_postCapDeletion_ccorres: + "ccorres dc xfdc + (\ and (\s. arch_cleanup_info_wf' acap)) + (UNIV \ {s. ccap_relation (ArchObjectCap acap) (cap_' s)}) hs + (AARCH64_H.postCapDeletion acap) + (Call Arch_postCapDeletion_'proc)" + apply (cinit lift: cap_') + apply (rule ccorres_return_Skip) + apply simp + done + +lemma not_irq_or_arch_cap_case: + "\\isIRQHandlerCap cap; \ isArchCap \ cap\ \ + (case cap of IRQHandlerCap irq \ f irq | ArchObjectCap acap \ g acap | _ \ h) = h" + by (case_tac cap; clarsimp simp: isCap_simps) + +lemma postCapDeletion_ccorres: + "cleanup_info_wf' cap \ + ccorres dc xfdc + \ (UNIV \ {s. ccap_relation cap (cap_' s)}) hs + (postCapDeletion cap) + (Call postCapDeletion_'proc)" + supply Collect_const[simp del] + apply (cinit lift: cap_' simp: Retype_H.postCapDeletion_def) + apply csymbr + apply (clarsimp simp: cap_get_tag_isCap) + apply (rule ccorres_Cond_rhs) + apply (clarsimp simp: isCap_simps ) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) + apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) + apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) + apply (clarsimp simp: cap_irq_handler_cap_lift) + apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply csymbr + apply (clarsimp simp: cap_get_tag_isCap) + apply (rule ccorres_Cond_rhs) + apply (wpc; clarsimp simp: isCap_simps) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) + apply (simp add: not_irq_or_arch_cap_case) + apply (rule ccorres_return_Skip) + apply clarsimp + apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) + apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) + apply (clarsimp simp: cap_irq_handler_cap_lift ccap_relation_def cap_to_H_def + cleanup_info_wf'_def maxIRQ_def Kernel_C.maxIRQ_def) + apply (rule conjI, clarsimp simp: isCap_simps cleanup_info_wf'_def) + apply (rule conjI[rotated], clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) + apply (clarsimp simp: cap_irq_handler_cap_lift ccap_relation_def cap_to_H_def + cleanup_info_wf'_def c_valid_cap_def cl_valid_cap_def) + apply (simp add: mask_eq_ucast_eq) + done + +lemma emptySlot_ccorres: + "ccorres dc xfdc + (valid_mdb' and valid_objs' and pspace_aligned' and untyped_ranges_zero') + (UNIV \ {s. slot_' s = Ptr slot} + \ {s. ccap_relation info (cleanupInfo_' s) \ cleanup_info_wf' info} ) + [] + (emptySlot slot info) + (Call emptySlot_'proc)" + supply if_cong[cong] + apply (cinit lift: slot_' cleanupInfo_' simp: case_Null_If) + + \ \--- handle the clearUntypedFreeIndex\ + apply (rule ccorres_split_noop_lhs, rule clearUntypedFreeIndex_noop_ccorres) + + \ \--- instruction: newCTE \ getCTE slot; ---\ + apply (rule ccorres_pre_getCTE) + \ \--- instruction: CALL on C side\ + apply (rule ccorres_move_c_guard_cte) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply (rename_tac cap_tag) + apply (rule_tac P="(cap_tag = scast cap_null_cap) + = (cteCap newCTE = NullCap)" in ccorres_gen_asm2) + apply (simp del: Collect_const) + + \ \--- instruction: if-then-else / IF-THEN-ELSE\ + apply (rule ccorres_cond2'[where R=\]) + + \ \*** link between abstract and concrete conditionals ***\ + apply (clarsimp split: if_split) + + \ \*** proof for the 'else' branch (return () and SKIP) ***\ + prefer 2 + apply (ctac add: ccorres_return_Skip) + + \ \*** proof for the 'then' branch ***\ + + \ \---instructions: multiple on C side, including mdbNode fetch\ + apply (rule ccorres_rhs_assoc)+ + \ \we have to do it here because the first assoc did not apply inside the then block\ + apply (rule ccorres_move_c_guard_cte | csymbr)+ + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'="mdbNode_'" in ccorres_abstract, ceqv) + apply (rename_tac "cmdbNode") + apply (rule_tac P="cmdbnode_relation (cteMDBNode newCTE) cmdbNode" + in ccorres_gen_asm2) + apply csymbr+ + + \ \--- instruction: updateMDB (mdbPrev rva) (mdbNext_update \) but with Ptr\\ NULL on C side\ + apply (simp only:Ptr_not_null_pointer_not_zero) \ \replaces Ptr p \ NULL with p\0\ + + \ \--- instruction: y \ updateMDB (mdbPrev rva) (mdbNext_update (\_. mdbNext rva))\ + apply (ctac (no_simp, no_vcg) pre:ccorres_move_guard_ptr_safe + add: updateMDB_mdbPrev_set_mdbNext) + \ \here ctac alone does not apply because the subgoal generated + by the rule are not solvable by simp\ + \ \so we have to use (no_simp) (or apply (rule ccorres_split_nothrow))\ + apply (simp add: cmdbnode_relation_def) + apply assumption + \ \*** Main goal ***\ + \ \--- instruction: updateMDB (mdbNext rva) + (\mdb. mdbFirstBadged_update (\_. mdbFirstBadged mdb \ mdbFirstBadged rva) + (mdbPrev_update (\_. mdbPrev rva) mdb));\ + apply (rule ccorres_rhs_assoc2 ) \ \to group the 2 first C instrutions together\ + apply (ctac (no_vcg) add: emptySlot_helper) + + \ \--- instruction: y \ updateCap slot capability.NullCap;\ + apply (simp del: Collect_const) + apply csymbr + apply (ctac (no_vcg) pre:ccorres_move_guard_ptr_safe) + apply csymbr + apply (rule ccorres_move_c_guard_cte) + \ \--- instruction y \ updateMDB slot (\a. nullMDBNode);\ + apply (ctac (no_vcg) pre: ccorres_move_guard_ptr_safe + add: ccorres_updateMDB_const [unfolded const_def]) + + \ \the post_cap_deletion case\ + + apply (ctac(no_vcg) add: postCapDeletion_ccorres) + + \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ + apply wp + \ \C pre/post for y \ updateMDB slot (\a. nullMDBNode);\ + apply simp + \ \C pre/post for the 2nd CALL\ + \ \Haskell pre/post for y \ updateCap slot capability.NullCap;\ + apply wp + \ \C pre/post for y \ updateCap slot capability.NullCap;\ + apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) + \ \Haskell pre/post for the two nested updates\ + apply wp + \ \C pre/post for the two nested updates\ + apply (simp add: Collect_const_mem ccap_relation_NullCap_iff) + \ \Haskell pre/post for (updateMDB (mdbPrev rva) (mdbNext_update (\_. mdbNext rva)))\ + apply (simp, wp) + \ \C pre/post for (updateMDB (mdbPrev rva) (mdbNext_update (\_. mdbNext rva)))\ + apply simp+ + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply simp + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift) + + \ \final precondition proof\ + apply (clarsimp simp: typ_heap_simps Collect_const_mem + cte_wp_at_ctes_of) + + apply (rule conjI) + \ \Haskell side\ + apply (simp add: is_aligned_3_next canonical_address_mdbNext) + + \ \C side\ + apply (clarsimp simp: map_comp_Some_iff typ_heap_simps) + apply (subst cap_get_tag_isCap) + apply (rule ccte_relation_ccap_relation) + apply (simp add: ccte_relation_def c_valid_cte_def + cl_valid_cte_def c_valid_cap_def) + apply simp + done + + +(************************************************************************) +(* *) +(* capSwapForDelete_ccorres *********************************************) +(* *) +(************************************************************************) + +lemma ccorres_return_void_C: + "ccorres dc xfdc \ UNIV (SKIP # hs) (return rv) (return_void_C)" + apply (rule ccorres_from_vcg_throws) + apply (simp add: return_def) + apply (rule allI, rule conseqPre) + apply vcg + apply simp + done + +declare Collect_const [simp del] + +lemma capSwapForDelete_ccorres: + "ccorres dc xfdc + (valid_mdb' and pspace_aligned' and pspace_canonical') + (UNIV \ {s. slot1_' s = Ptr slot1} + \ {s. slot2_' s = Ptr slot2}) + [] + (capSwapForDelete slot1 slot2) + (Call capSwapForDelete_'proc)" + apply (cinit lift: slot1_' slot2_' simp del: return_bind) + \ \***Main goal***\ + \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ + apply (simp add:when_def) + apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) + + \ \***Main goal***\ + \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ + \ \--- \ execute each part independently\ + apply (simp add: liftM_def cong: call_ignore_cong) + apply (rule ccorres_pre_getCTE)+ + apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ + \ \***Main goal***\ + apply (ctac (no_vcg) add: cteSwap_ccorres) + \ \C Hoare triple for \cap2 :== \\ + apply vcg + \ \C existential Hoare triple for \cap2 :== \\ + apply simp + apply (rule conseqPre) + apply vcg + apply simp + \ \C Hoare triple for \cap1 :== \\ + apply vcg + \ \C existential Hoare triple for \cap1 :== \\ + apply simp + apply (rule conseqPre) + apply vcg + apply simp + + \ \Hoare triple for return_void\ + apply vcg + + \ \***Generalized preconditions***\ + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of map_comp_Some_iff + typ_heap_simps ccap_relation_def) + apply (simp add: cl_valid_cte_def c_valid_cap_def) +done + + + +declare Collect_const [simp add] + +(************************************************************************) +(* *) +(* Arch_sameRegionAs_ccorres ********************************************) +(* *) +(************************************************************************) + +lemma cap_get_tag_PageCap_frame: + "ccap_relation cap cap' \ + (cap_get_tag cap' = scast cap_frame_cap) = + (cap = + capability.ArchObjectCap + (FrameCap (cap_frame_cap_CL.capFBasePtr_CL (cap_frame_cap_lift cap')) + (vmrights_to_H (cap_frame_cap_CL.capFVMRights_CL (cap_frame_cap_lift cap'))) + (framesize_to_H (capFSize_CL (cap_frame_cap_lift cap'))) + (to_bool (cap_frame_cap_CL.capFIsDevice_CL (cap_frame_cap_lift cap'))) + (if cap_frame_cap_CL.capFMappedASID_CL (cap_frame_cap_lift cap') = 0 + then None else + Some ((cap_frame_cap_CL.capFMappedASID_CL (cap_frame_cap_lift cap')), + cap_frame_cap_CL.capFMappedAddress_CL (cap_frame_cap_lift cap')))))" + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def Let_def split: if_split) + apply (simp add: cap_get_tag_isCap isCap_simps frameSize_def) +done + +lemma fff_is_pageBits: + "(0xFFF :: machine_word) = 2 ^ pageBits - 1" + by (simp add: pageBits_def) + + +(* used? *) +lemma valid_cap'_PageCap_is_aligned: + "valid_cap' (ArchObjectCap (arch_capability.FrameCap w r sz d option)) t \ + is_aligned w (pageBitsForSize sz)" + apply (simp add: valid_cap'_def capAligned_def) +done + +lemma Arch_sameRegionAs_spec: + notes cap_get_tag = ccap_rel_cap_get_tag_cases_arch2' + shows + "\capa capb. \ \ \ ccap_relation (ArchObjectCap capa) \cap_a \ + ccap_relation (ArchObjectCap capb) \cap_b \ + Call Arch_sameRegionAs_'proc + \ \ret__unsigned_long = from_bool (Arch.sameRegionAs capa capb) \" + supply if_cong[cong] + apply vcg + apply clarsimp + apply (simp add: AARCH64_H.sameRegionAs_def) + subgoal for capa capb cap_b cap_a + apply (cases capa; cases capb; + frule (1) cap_get_tag[where cap'=cap_a]; (frule cap_lifts[where c=cap_a, THEN iffD1])?; + frule (1) cap_get_tag[where cap'=cap_b]; (frule cap_lifts[where c=cap_b, THEN iffD1])?; + simp add: cap_tag_defs isCap_simps from_bool_def if_0_1_eq split: if_splits; + clarsimp simp: ccap_relation_def cap_to_H_def c_valid_cap_def cl_valid_cap_def Let_def) + subgoal by (clarsimp simp: cap_frame_cap_lift_def'[simplified cap_tag_defs] + framesize_to_H_def pageBitsForSize_def field_simps + pageBits_def ptTranslationBits_def mask_def + split: vmpage_size.splits if_splits) + subgoal by (clarsimp simp: cap_lift_def cap_tag_defs cap_vspace_cap_lift_def cap_to_H_def + split: option.splits) + by (clarsimp simp: cap_lift_def cap_tag_defs cap_page_table_cap_lift_def cap_to_H_def + split: option.splits) + done + +(* combination of cap_get_capSizeBits + cap_get_archCapSizeBits from C *) +definition + get_capSizeBits_CL :: "cap_CL option \ nat" where + "get_capSizeBits_CL \ \cap. case cap of + Some (Cap_untyped_cap c) \ unat (cap_untyped_cap_CL.capBlockSize_CL c) + | Some (Cap_endpoint_cap c) \ epSizeBits + | Some (Cap_notification_cap c) \ ntfnSizeBits + | Some (Cap_cnode_cap c) \ unat (capCNodeRadix_CL c) + cteSizeBits + | Some (Cap_thread_cap c) \ tcbBlockSizeBits + | Some (Cap_frame_cap c) \ pageBitsForSize (framesize_to_H $ cap_frame_cap_CL.capFSize_CL c) + | Some (Cap_vspace_cap c) \ if config_ARM_PA_SIZE_BITS_40 then pageBits + 1 else pageBits + | Some (Cap_page_table_cap c) \ pageBits + | Some (Cap_asid_pool_cap c) \ asidPoolBits + | Some (Cap_zombie_cap c) \ + let type = cap_zombie_cap_CL.capZombieType_CL c in + if isZombieTCB_C type + then tcbBlockSizeBits + else unat (type && mask wordRadix) + cteSizeBits + | Some (Cap_vcpu_cap c) \ vcpuBits + | _ \ 0" + +lemma frame_cap_size [simp]: + "cap_get_tag cap = scast cap_frame_cap + \ cap_frame_cap_CL.capFSize_CL (cap_frame_cap_lift cap) && mask 2 = + cap_frame_cap_CL.capFSize_CL (cap_frame_cap_lift cap)" + apply (simp add: cap_frame_cap_lift_def) + by (simp add: cap_lift_def cap_tag_defs) + +lemma cap_get_tag_bound: + "cap_get_tag x < 32" + apply (simp add: cap_get_tag_def mask_def) + by word_bitwise + +lemma cap_get_tag_scast: + "UCAST(64 \ 32 signed) (cap_get_tag cap) = tag \ cap_get_tag cap = SCAST(32 signed \ 64) tag" + apply (rule iffI; simp add: cap_get_tag_def) + apply (drule sym; simp add: ucast_and_mask scast_eq_ucast msb_nth ucast_ucast_mask mask_twice) + done + +lemma cap_get_capSizeBits_spec: + "\s. \ \ \s. c_valid_cap (cap_' s)\ + \ret__unsigned_long :== PROC cap_get_capSizeBits(\cap) + \\ret__unsigned_long = of_nat (get_capSizeBits_CL (cap_lift \<^bsup>s\<^esup>cap))\" + apply vcg + apply (clarsimp simp: get_capSizeBits_CL_def) + (* slow *) + apply (intro conjI impI; + clarsimp simp: cap_lifts + cap_lift_asid_control_cap + cap_lift_irq_control_cap cap_lift_null_cap + cap_lift_vcpu_cap + Kernel_C.asidLowBits_def asid_low_bits_def + word_sle_def Let_def mask_def + isZombieTCB_C_def ZombieTCB_C_def + cap_lift_domain_cap cap_get_tag_scast + objBits_defs wordRadix_def + c_valid_cap_def cl_valid_cap_def pageBits_def asidPoolBits_def + Kernel_Config.config_ARM_PA_SIZE_BITS_40_def (* FIXME AARCH64: #define in C, so no other option for now *) + cong: option.case_cong + dest!: sym [where t = "ucast (cap_get_tag cap)" for cap]) + apply (clarsimp split: option.splits cap_CL.splits dest!: cap_lift_Some_CapD) + done + +lemma ccap_relation_get_capSizeBits_physical: + "\ ccap_relation hcap ccap; capClass hcap = PhysicalClass; capAligned hcap \ + \ 2 ^ get_capSizeBits_CL (cap_lift ccap) = capUntypedSize hcap" + supply if_cong[cong] + apply (cases hcap; + (match premises in "hcap = ArchObjectCap c" for c \ \cases c\)?; + (frule (1) ccap_rel_cap_get_tag_cases_generic)?; + (frule (2) ccap_rel_cap_get_tag_cases_arch)?; + (frule cap_lifts[THEN iffD1])?) + apply (all \clarsimp simp: get_capSizeBits_CL_def objBits_simps Let_def AARCH64_H.capUntypedSize_def + asid_low_bits_def pt_bits_def asidPoolBits_def table_size\) + + (* Zombie, Frame, PageTable, Untyped, CNode caps remain. *) + apply (all \thin_tac \hcap = _\\) + apply (all \rule arg_cong[where f="\s. 2 ^ s"]\) + + (* Zombie *) + apply (simp add: ccap_relation_def cap_lift_defs cap_lift_def cap_tag_defs cap_to_H_def) + apply (clarsimp simp: Let_def objBits_simps' wordRadix_def capAligned_def + word_bits_def word_less_nat_alt + intro!: less_mask_eq + split: if_splits) + + (* Page Table / VSpace *) + prefer 2 + subgoal for p pt_t m + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 cap_to_H_def) + apply (rename_tac cap) + by (case_tac cap; clarsimp simp add: Let_def pageBits_def split: if_split_asm) + + (* Frame, Untyped, CNode *) + by (simp add: ccap_relation_def cap_lift_defs cap_lift_def cap_tag_defs cap_to_H_def)+ + +lemma ccap_relation_get_capSizeBits_untyped: + "\ ccap_relation (UntypedCap d word bits idx) ccap \ \ + get_capSizeBits_CL (cap_lift ccap) = bits" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + by (clarsimp simp: get_capSizeBits_CL_def ccap_relation_def + map_option_case cap_to_H_def cap_lift_def cap_tag_defs) + +definition + get_capZombieBits_CL :: "cap_zombie_cap_CL \ machine_word" where + "get_capZombieBits_CL \ \cap. + let type = cap_zombie_cap_CL.capZombieType_CL cap in + if isZombieTCB_C type then 4 else type && mask 6" + + +lemma get_capSizeBits_valid_shift: + "\ ccap_relation hcap ccap; capAligned hcap \ \ + get_capSizeBits_CL (cap_lift ccap) < 64" + apply (cases hcap; + (match premises in "hcap = ArchObjectCap c" for c \ \cases c\)?; + (frule (1) ccap_rel_cap_get_tag_cases_generic)?; + (frule (2) ccap_rel_cap_get_tag_cases_arch2)?; + (frule cap_lifts[THEN iffD1])?) + (* Deal with simple cases quickly. *) + apply (all \clarsimp simp: get_capSizeBits_CL_def objBits_simps' wordRadix_def Let_def + asidPoolBits_def + split: option.splits if_split_asm; + thin_tac \hcap = _\\) + (* Deal with non-physical caps quickly. *) + apply (all \(match conclusion in "case_cap_CL _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ < _" \ + \clarsimp simp: cap_lift_def cap_tag_defs pageBits_def split: if_split\)?\) + (* Slow cases: Zombie, Frame, Untyped and CNode caps. *) + apply (all \clarsimp simp: cap_lift_def cap_lift_defs cap_tag_defs + ccap_relation_def cap_to_H_def Let_def + capAligned_def objBits_simps' word_bits_def + unat_ucast_less_no_overflow_simp\) + (* Zombie arithmetic. *) + apply (clarsimp simp: objBits_simps' Let_def cap_zombie_cap_lift_def cap_lift_def cap_tag_defs + split: if_split) + apply (subst less_mask_eq[where n=wordRadix]; + clarsimp elim!: less_trans simp: wordRadix_def word_less_nat_alt) + done + +lemma get_capSizeBits_valid_shift_word: + "\ ccap_relation hcap ccap; capAligned hcap \ \ + of_nat (get_capSizeBits_CL (cap_lift ccap)) < (0x40::machine_word)" + apply (subgoal_tac "of_nat (get_capSizeBits_CL (cap_lift ccap)) < (of_nat 64::machine_word)", simp) + apply (rule of_nat_mono_maybe, simp+) + apply (simp add: get_capSizeBits_valid_shift) + done + +lemma cap_zombie_cap_get_capZombieBits_spec: + "\s. \ \ \s. cap_get_tag \cap = scast cap_zombie_cap \ + \ret__unsigned_long :== PROC cap_zombie_cap_get_capZombieBits(\cap) + \\ret__unsigned_long = get_capZombieBits_CL (cap_zombie_cap_lift \<^bsup>s\<^esup>cap)\" + apply vcg + apply (clarsimp simp: get_capZombieBits_CL_def word_sle_def mask_def + isZombieTCB_C_def ZombieTCB_C_def Let_def) + done + +definition + get_capZombiePtr_CL :: "cap_zombie_cap_CL \ machine_word" where + "get_capZombiePtr_CL \ \cap. + let radix = unat (get_capZombieBits_CL cap) in + cap_zombie_cap_CL.capZombieID_CL cap && ~~ (mask (radix+1))" + +lemma cap_zombie_cap_get_capZombiePtr_spec: + "\s. \ \ \s. cap_get_tag \cap = scast cap_zombie_cap + \ get_capZombieBits_CL (cap_zombie_cap_lift \cap) < 0x3F \ + \ret__unsigned_long :== PROC cap_zombie_cap_get_capZombiePtr(\cap) + \\ret__unsigned_long = get_capZombiePtr_CL (cap_zombie_cap_lift \<^bsup>s\<^esup>cap)\" + apply vcg + apply (clarsimp simp: get_capZombiePtr_CL_def word_sle_def mask_def + isZombieTCB_C_def ZombieTCB_C_def Let_def) + apply (intro conjI) + apply (simp add: word_add_less_mono1[where k=1 and j="0x3F", simplified]) + apply (subst unat_plus_if_size) + apply (clarsimp split: if_split) + apply (clarsimp simp: get_capZombieBits_CL_def Let_def word_size + split: if_split if_split_asm) + apply (subgoal_tac "unat (capZombieType_CL (cap_zombie_cap_lift cap) && mask 6) + < unat ((2::machine_word) ^ 6)") + apply (clarsimp simp: shiftl_eq_mult) + apply (rule unat_mono) + apply (rule and_mask_less_size) + apply (clarsimp simp: word_size) + done + +definition + get_capPtr_CL :: "cap_CL option \ unit ptr" where + "get_capPtr_CL \ \cap. Ptr (case cap of + Some (Cap_untyped_cap c) \ cap_untyped_cap_CL.capPtr_CL c + | Some (Cap_endpoint_cap c) \ cap_endpoint_cap_CL.capEPPtr_CL c + | Some (Cap_notification_cap c) \ cap_notification_cap_CL.capNtfnPtr_CL c + | Some (Cap_cnode_cap c) \ cap_cnode_cap_CL.capCNodePtr_CL c + | Some (Cap_thread_cap c) \ (cap_thread_cap_CL.capTCBPtr_CL c && ~~ mask (objBits (undefined :: tcb))) + | Some (Cap_frame_cap c) \ cap_frame_cap_CL.capFBasePtr_CL c + | Some (Cap_vspace_cap c) \ cap_vspace_cap_CL.capVSBasePtr_CL c + | Some (Cap_page_table_cap c) \ cap_page_table_cap_CL.capPTBasePtr_CL c + | Some (Cap_asid_pool_cap c) \ cap_asid_pool_cap_CL.capASIDPool_CL c + | Some (Cap_zombie_cap c) \ get_capZombiePtr_CL c + | Some (Cap_vcpu_cap c) \ cap_vcpu_cap_CL.capVCPUPtr_CL c + | _ \ 0)" + +lemma cap_get_capPtr_spec: + "\s. \ \ \s. (cap_get_tag \cap = scast cap_zombie_cap + \ get_capZombieBits_CL (cap_zombie_cap_lift \cap) < 0x3F)\ + \ret__ptr_to_void :== PROC cap_get_capPtr(\cap) + \\ret__ptr_to_void = get_capPtr_CL (cap_lift \<^bsup>s\<^esup>cap)\" + apply vcg + apply (clarsimp simp: get_capPtr_CL_def) + apply (intro impI conjI; + clarsimp simp: cap_lifts pageBitsForSize_def + cap_lift_asid_control_cap word_sle_def + cap_lift_irq_control_cap cap_lift_null_cap + mask_def objBits_simps' cap_lift_domain_cap + ptr_add_assertion_positive cap_get_tag_scast + dest!: sym [where t = "ucast (cap_get_tag cap)" for cap] + split: vmpage_size.splits)+ + (* XXX: slow. there should be a rule for this *) + by (case_tac "cap_lift cap", simp_all, case_tac "a", + auto simp: cap_lift_def cap_lift_defs cap_tag_defs Let_def + split: if_split_asm) + +definition get_capIsPhysical_CL :: "cap_CL option \ bool" +where + "get_capIsPhysical_CL \ \cap. (case cap of + Some (Cap_untyped_cap c) \ True + | Some (Cap_endpoint_cap c) \ True + | Some (Cap_notification_cap c) \ True + | Some (Cap_cnode_cap c) \ True + | Some (Cap_thread_cap c) \ True + | Some (Cap_frame_cap c) \ True + | Some (Cap_vspace_cap c) \ True + | Some (Cap_page_table_cap c) \ True + | Some (Cap_asid_pool_cap c) \ True + | Some (Cap_zombie_cap c) \ True + | Some (Cap_vcpu_cap c) \ True + | _ \ False)" + +lemma cap_get_capIsPhysical_spec: + "\s. \ \ {s} + Call cap_get_capIsPhysical_'proc + \\ret__unsigned_long = from_bool (get_capIsPhysical_CL (cap_lift \<^bsup>s\<^esup>cap))\" + apply vcg + apply (clarsimp simp: get_capIsPhysical_CL_def) + apply (intro impI conjI; clarsimp simp: cap_lifts pageBitsForSize_def + cap_lift_asid_control_cap word_sle_def + cap_lift_irq_control_cap cap_lift_null_cap + mask_def objBits_simps cap_lift_domain_cap + ptr_add_assertion_positive cap_get_tag_scast + dest!: sym [where t = "ucast (cap_get_tag cap)" for cap] + split: vmpage_size.splits)+ + by (fastforce dest!: cap_lift_Some_CapD split: option.splits cap_CL.splits) + +lemma ccap_relation_get_capPtr_not_physical: + "\ ccap_relation hcap ccap; capClass hcap \ PhysicalClass \ \ + get_capPtr_CL (cap_lift ccap) = Ptr 0" + by (clarsimp simp: ccap_relation_def get_capPtr_CL_def cap_to_H_def Let_def + split: option.split cap_CL.split_asm if_split_asm) + +lemma ccap_relation_get_capIsPhysical: + "ccap_relation hcap ccap \ isPhysicalCap hcap = get_capIsPhysical_CL (cap_lift ccap)" + apply (case_tac hcap; clarsimp simp: cap_lifts cap_lift_domain_cap cap_lift_null_cap + cap_lift_irq_control_cap cap_to_H_def + get_capIsPhysical_CL_def + dest!: cap_get_tag_isCap_unfolded_H_cap) + apply (rename_tac arch_cap) + apply (case_tac arch_cap; clarsimp simp: cap_lifts cap_lift_asid_control_cap + dest!: cap_get_tag_isCap_unfolded_H_cap) + (* FIXME AARCH64 this should be automatable, probably in a way related to other handling of + PageTableCap vs cap_lift in this file *) + apply (rename_tac pt_t maddr) + apply (case_tac pt_t; clarsimp simp: cap_lifts cap_lift_asid_control_cap + dest!: cap_get_tag_isCap_unfolded_H_cap) + done + +lemma ctcb_ptr_to_tcb_ptr_mask': + "is_aligned (ctcb_ptr_to_tcb_ptr (tcb_Ptr x)) (objBits (undefined :: tcb)) \ + ctcb_ptr_to_tcb_ptr (tcb_Ptr x) = x && ~~ mask (objBits (undefined :: tcb))" + apply (simp add: ctcb_ptr_to_tcb_ptr_def) + apply (drule_tac d=ctcb_offset in is_aligned_add_helper) + apply (simp add: objBits_simps' ctcb_offset_defs) + apply simp + done + +lemmas ctcb_ptr_to_tcb_ptr_mask + = ctcb_ptr_to_tcb_ptr_mask'[simplified objBits_simps, simplified] + +lemma ccap_relation_get_capPtr_physical: + "\ ccap_relation hcap ccap; capClass hcap = PhysicalClass; capAligned hcap \ \ + get_capPtr_CL (cap_lift ccap) + = Ptr (capUntypedPtr hcap)" + apply (cases hcap; + (match premises in "hcap = ArchObjectCap c" for c \ \cases c\)?; + (frule (1) ccap_rel_cap_get_tag_cases_generic)?; + (frule (2) ccap_rel_cap_get_tag_cases_arch2)?; + (frule cap_lifts[THEN iffD1])?) + apply (all \clarsimp simp: get_capPtr_CL_def get_capZombiePtr_CL_def get_capZombieBits_CL_def + objBits_simps ccap_relation_def cap_to_H_def Let_def capAligned_def + ctcb_ptr_to_tcb_ptr_mask map_option_Some_eq2 + split: if_splits; + thin_tac \hcap = _\\) + apply (rule arg_cong[OF less_mask_eq]) + (* zombie *) + apply (clarsimp simp: cap_lift_def cap_lift_defs Let_def cap_tag_defs word_less_nat_alt + word_bits_conv) + (* PageTable cases *) + by (simp add: Let_def split: cap_CL.split_asm if_splits)+ + +lemma ccap_relation_get_capPtr_untyped: + "\ ccap_relation (UntypedCap d word bits idx) ccap \ \ + get_capPtr_CL (cap_lift ccap) = Ptr word" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + by (clarsimp simp: get_capPtr_CL_def ccap_relation_def + map_option_case cap_to_H_def cap_lift_def cap_tag_defs) + +lemma cap_get_tag_isArchCap_unfolded_H_cap: + "ccap_relation (capability.ArchObjectCap a_cap) cap' \ + (isArchCap_tag (cap_get_tag cap'))" + apply (frule cap_get_tag_isCap(12), simp) + done + +lemmas ccap_rel_cap_get_tag_cases_generic' = + ccap_rel_cap_get_tag_cases_generic + cap_get_tag_isArchCap_unfolded_H_cap[OF back_subst[of "\cap. ccap_relation cap cap'" for cap']] + +lemma sameRegionAs_spec: + notes cap_get_tag = ccap_rel_cap_get_tag_cases_generic' + shows + "\capa capb. \ \ \ccap_relation capa \cap_a \ ccap_relation capb \cap_b \ capAligned capb\ + Call sameRegionAs_'proc + \ \ret__unsigned_long = from_bool (sameRegionAs capa capb) \" + apply vcg + apply clarsimp + apply (simp add: sameRegionAs_def isArchCap_tag_def2 ccap_relation_c_valid_cap) + apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps) + \ \capa is a ThreadCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(1)) + apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(1)) + apply (simp add: ccap_relation_def map_option_case) + apply (simp add: cap_thread_cap_lift) + apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split) + apply (clarsimp simp: case_bool_If ctcb_ptr_to_tcb_ptr_def if_distrib + cong: if_cong) + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (clarsimp simp: isArchCap_tag_def2) + \ \capa is a NullCap\ + apply (simp add: cap_tag_defs) + \ \capa is an NotificationCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(3)) + apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(3)) + apply (simp add: ccap_relation_def map_option_case) + apply (simp add: cap_notification_cap_lift) + apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split) + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (clarsimp simp: isArchCap_tag_def2) + \ \capa is an IRQHandlerCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(5)) + apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(5)) + apply (simp add: ccap_relation_def map_option_case) + apply (simp add: cap_irq_handler_cap_lift) + apply (simp add: cap_to_H_def) + apply (clarsimp simp: up_ucast_inj_eq c_valid_cap_def ucast_eq_mask + cl_valid_cap_def mask_twice from_bool_0 + split: if_split bool.split + | intro impI conjI + | simp) + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (clarsimp simp: isArchCap_tag_def2) + \ \capa is an EndpointCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(4)) + apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(4)) + apply (simp add: ccap_relation_def map_option_case) + apply (simp add: cap_endpoint_cap_lift) + apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split) + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (clarsimp simp: isArchCap_tag_def2) + \ \capa is a DomainCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (fastforce simp: isArchCap_tag_def2 split: if_split) + \ \capa is a Zombie\ + apply (simp add: cap_tag_defs) + \ \capa is an Arch object cap\ + apply (frule_tac cap'=cap_a in cap_get_tag_isArchCap_unfolded_H_cap) + apply (clarsimp simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) + apply (rule conjI, clarsimp, rule impI)+ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + \ \capb is an Arch object cap\ + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (fastforce simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) + \ \capa is a ReplyCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (clarsimp simp: isArchCap_tag_def2) + apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(8)) + apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(8)) + apply (simp add: ccap_relation_def map_option_case) + apply (simp add: cap_reply_cap_lift) + apply (simp add: cap_to_H_def ctcb_ptr_to_tcb_ptr_def) + apply (clarsimp simp: from_bool_0 split: if_split) + \ \capa is an UntypedCap\ + apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(9)) + apply (intro conjI) + apply (rule impI, intro conjI) + apply (rule impI, drule(1) cap_get_tag_to_H)+ + apply (clarsimp simp: capAligned_def word_bits_conv + objBits_simps' get_capZombieBits_CL_def + Let_def word_less_nat_alt less_mask_eq + split: if_split_asm) + apply (subgoal_tac "capBlockSize_CL (cap_untyped_cap_lift cap_a) \ 0x3F") + apply (simp add: word_le_make_less) + apply (simp add: cap_untyped_cap_lift_def cap_lift_def + cap_tag_defs word_and_le1 mask_def) + apply (clarsimp simp: get_capSizeBits_valid_shift_word) + apply (clarsimp simp: from_bool_def Let_def split: if_split bool.splits) + apply (subst unat_of_nat64, + clarsimp simp: unat_of_nat64 word_bits_def + dest!: get_capSizeBits_valid_shift)+ + apply (clarsimp simp: ccap_relation_get_capPtr_physical + ccap_relation_get_capPtr_untyped + ccap_relation_get_capIsPhysical[symmetric] + ccap_relation_get_capSizeBits_physical + ccap_relation_get_capSizeBits_untyped) + apply (intro conjI impI) + apply ((clarsimp simp: ccap_relation_def map_option_case + cap_untyped_cap_lift cap_to_H_def + field_simps valid_cap'_def)+)[4] + apply (rule impI, simp add: from_bool_0 ccap_relation_get_capIsPhysical[symmetric]) + \ \capa is a CNodeCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (clarsimp simp: isArchCap_tag_def2) + apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(10)) + apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(10)) + apply (simp add: ccap_relation_def map_option_case) + apply (simp add: cap_cnode_cap_lift) + apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split bool.split) + \ \capa is an IRQControlCap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (fastforce simp: isArchCap_tag_def2 split: if_split) + done + +lemma framesize_to_H_eq: + "\ a \ 2; b \ 2 \ \ + (framesize_to_H a = framesize_to_H b) = (a = b)" + by (fastforce simp: framesize_to_H_def ARMSmallPage_def ARMLargePage_def ARMHugePage_def + word_le_make_less + split: if_split + dest: word_less_cases) + +lemma capFSize_range: + "\cap. cap_get_tag cap = scast cap_frame_cap \ c_valid_cap cap \ + capFSize_CL (cap_frame_cap_lift cap) \ 2" + apply (simp add: cap_frame_cap_lift_def c_valid_cap_def cl_valid_cap_def cong: option.case_cong) + apply (clarsimp simp: cap_frame_cap_lift) + apply (drule word_less_sub_1, simp) + done + +lemma ccap_relation_FrameCap_BasePtr: + "ccap_relation (ArchObjectCap (FrameCap p r s d m)) ccap + \ capFBasePtr_CL (cap_frame_cap_lift ccap) = p" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + by (clarsimp simp: ccap_relation_def cap_to_H_def cap_lift_def cap_lift_defs cap_tag_defs + Let_def) + +lemma ccap_relation_FrameCap_IsDevice: + "ccap_relation (ArchObjectCap (FrameCap p r s d m)) ccap + \ capFIsDevice_CL (cap_frame_cap_lift ccap) = (if d then 1 else 0)" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_lift_def cap_lift_defs cap_tag_defs + Let_def) + apply (thin_tac _)+ + by (clarsimp simp: word_and_1 split: if_splits) + +lemma ccap_relation_FrameCap_Size: + "ccap_relation (ArchObjectCap (FrameCap p r s d m)) ccap + \ capFSize_CL (cap_frame_cap_lift ccap) = framesize_from_H s" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_lift_def cap_lift_defs cap_tag_defs + Let_def c_valid_cap_def cl_valid_cap_def) + apply (thin_tac "p = _", thin_tac "r = _", thin_tac "d = _", thin_tac "m = _") + apply (cases s; clarsimp simp: framesize_to_H_def framesize_from_H_def vm_page_size_defs + split: if_splits cong: conj_cong) + apply (word_bitwise, simp) + done + +lemma ccap_relation_FrameCap_MappedASID: + "ccap_relation (ArchObjectCap (FrameCap p r s d (Some (a, b)))) ccap + \ capFMappedASID_CL (cap_frame_cap_lift ccap) = a" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (frule cap_get_tag_PageCap_frame) + apply (clarsimp split: if_split_asm) + done + +lemma ccap_relation_FrameCap_MappedAddress: + "ccap_relation (ArchObjectCap (FrameCap p r s d (Some (a, b)))) ccap + \ capFMappedAddress_CL (cap_frame_cap_lift ccap) = b" + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (frule cap_get_tag_PageCap_frame) + apply (clarsimp split: if_split_asm) + done + +lemmas ccap_relation_FrameCap_fields = + ccap_relation_FrameCap_BasePtr ccap_relation_FrameCap_IsDevice ccap_relation_FrameCap_Size + +lemma case_bool_of_nat_eq: + defines "cases_of c \ case c of True \ of_nat 1 | False \ of_nat 0" + shows "(cases_of c = 0) = (\ c)" + "(cases_of c = 1) = c" + "(cases_of c = cases_of d) = (c = d)" + by (cases c; simp add: cases_of_def; cases d; simp)+ + +lemma Arch_sameObjectAs_spec: + "\capa capb. \ \ \ccap_relation (ArchObjectCap capa) \cap_a \ + ccap_relation (ArchObjectCap capb) \cap_b \ + capAligned (ArchObjectCap capa) \ + capAligned (ArchObjectCap capb) \ + Call Arch_sameObjectAs_'proc + \ \ret__unsigned_long = from_bool (Arch.sameObjectAs capa capb) \" +proof - + note cap_get_tag = ccap_rel_cap_get_tag_cases_arch2' + note case_bool_of_nat_eq[simp] + have [simp]: "(\d. d) = False" "(\d. \d) = False" by auto + show ?thesis + apply vcg + apply (clarsimp simp: AARCH64_H.sameObjectAs_def) + subgoal for capa capb cap_b cap_a + apply (cases capa) + apply (all \frule (1) cap_get_tag[where cap'=cap_a]\) + apply (all \(frule cap_lifts[where c=cap_a, THEN iffD1])?\) + apply (all \clarsimp simp: cap_tag_defs isCap_simps + split: if_splits\) + apply (all \fastforce?\) + (* frames remain. *) + apply (all \cases capb\) + apply (all \frule (1) cap_get_tag[where cap'=cap_b]\) + apply (all \(frule cap_lifts[where c=cap_b, THEN iffD1])?\) + apply (all \clarsimp simp: cap_tag_defs isCap_simps ccap_relation_FrameCap_fields + framesize_from_H_eq capAligned_def + split: if_splits\) + by (all \(fastforce simp: AARCH64_H.sameRegionAs_def isCap_simps is_aligned_no_overflow_mask)?\) + done +qed + +lemma sameObjectAs_spec: + "\capa capb. \ \ \ccap_relation capa \cap_a \ + ccap_relation capb \cap_b \ + capAligned capa \ capAligned capb \ (\s. s \' capa)\ + Call sameObjectAs_'proc + \ \ret__unsigned_long = from_bool (sameObjectAs capa capb) \" + apply vcg + apply (clarsimp simp: sameObjectAs_def isArchCap_tag_def2) + apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs) + apply fastforce+ + \ \capa is an arch cap\ + apply (frule cap_get_tag_isArchCap_unfolded_H_cap) + apply (simp add: isArchCap_tag_def2) + apply (rule conjI, rule impI, clarsimp, rule impI)+ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs)[1] + apply ((fastforce)+)[7] + \ \capb is an arch cap\ + apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) + apply (fastforce simp: isArchCap_tag_def2 linorder_not_less [symmetric])+ + \ \capa is an irq handler cap\ + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps cap_tag_defs) + apply fastforce+ + \ \capb is an arch cap\ + apply (frule cap_get_tag_isArchCap_unfolded_H_cap) + apply (fastforce simp: isArchCap_tag_def2)+ + done + +lemma sameRegionAs_EndpointCap: + shows "\ccap_relation capa capc; + RetypeDecls_H.sameRegionAs (capability.EndpointCap p b cs cr cg cgr) capa\ + \ cap_get_tag capc = scast cap_endpoint_cap" + apply (simp add: sameRegionAs_def Let_def) + apply (case_tac capa; + simp add: isUntypedCap_def isEndpointCap_def isNotificationCap_def + isCNodeCap_def isThreadCap_def isReplyCap_def isIRQControlCap_def + isIRQHandlerCap_def isArchObjectCap_def) + apply (clarsimp simp: ccap_relation_def map_option_case) + apply (case_tac "cap_lift capc"; simp) + apply (simp add: cap_to_H_def) + apply (case_tac a; simp) + apply (simp add:cap_endpoint_cap_lift cap_endpoint_cap_lift_def) + apply (rename_tac zombie_cap) + apply (case_tac "isZombieTCB_C (capZombieType_CL zombie_cap)"; simp add: Let_def) + done + +lemma sameRegionAs_NotificationCap: + shows "\ccap_relation capa capc; + RetypeDecls_H.sameRegionAs + (capability.NotificationCap x y z u ) capa\ + \ cap_get_tag capc = scast cap_notification_cap" + apply (simp add: sameRegionAs_def Let_def) + apply (case_tac capa; + simp add: isUntypedCap_def isEndpointCap_def isNotificationCap_def + isCNodeCap_def isThreadCap_def isReplyCap_def isIRQControlCap_def + isIRQHandlerCap_def isArchObjectCap_def) + apply (clarsimp simp: ccap_relation_def map_option_case) + apply (case_tac "cap_lift capc"; simp) + apply (simp add: cap_to_H_def) + apply (case_tac a; simp) + apply (simp add: cap_notification_cap_lift cap_notification_cap_lift_def) + apply (rename_tac zombie_cap) + apply (case_tac "isZombieTCB_C (capZombieType_CL zombie_cap)"; simp add: Let_def) + done + +lemma isMDBParentOf_spec: + notes option.case_cong_weak [cong] + shows "\ctea cte_a cteb cte_b. + \ \ {s. cslift s (cte_a_' s) = Some cte_a \ + ccte_relation ctea cte_a \ + cslift s (cte_b_' s) = Some cte_b \ + ccte_relation cteb cte_b \ + capAligned (cteCap cteb) \ + (\s. s \' (cteCap ctea)) } + Call isMDBParentOf_'proc + \ \ret__unsigned_long = from_bool (isMDBParentOf ctea cteb) \" + supply if_cong[cong] + apply (intro allI, rule conseqPre) + apply vcg + apply (clarsimp simp: isMDBParentOf_def) + apply (frule_tac cte=ctea in ccte_relation_ccap_relation) + apply (frule_tac cte=cteb in ccte_relation_ccap_relation) + + apply (rule conjI, clarsimp simp: typ_heap_simps dest!: lift_t_g) + apply (intro conjI impI) + apply (simp add: ccte_relation_def map_option_case) + apply (simp add: cte_lift_def) + apply (clarsimp simp: cte_to_H_def mdb_node_to_H_def split: option.split_asm) + apply (clarsimp simp: Let_def to_bool_def + split: if_split bool.splits) + apply ((clarsimp simp: typ_heap_simps dest!: lift_t_g)+)[3] + apply (rule_tac x="cteCap ctea" in exI, rule conjI) + apply (clarsimp simp: ccte_relation_ccap_relation typ_heap_simps + dest!: lift_t_g) + apply (rule_tac x="cteCap cteb" in exI, rule conjI) + apply (clarsimp simp: ccte_relation_ccap_relation typ_heap_simps + dest!: lift_t_g) + apply (clarsimp simp: ccte_relation_def map_option_case) + apply (simp add: cte_lift_def) + apply (clarsimp simp: cte_to_H_def mdb_node_to_H_def + split: option.split_asm) + + apply (rule conjI) + \ \sameRegionAs = 0\ + apply (rule impI) + apply (clarsimp simp: from_bool_def + split: if_split bool.splits) + + \ \sameRegionAs \ 0\ + apply (clarsimp simp: from_bool_def) + apply (clarsimp cong:bool.case_cong if_cong simp: typ_heap_simps) + + apply (rule conjI) + \ \cap_get_tag of cte_a is an endpoint\ + apply clarsimp + apply (frule cap_get_tag_EndpointCap) + apply simp + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ + + apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_endpoint_cap") \ \needed also after\ + prefer 2 + apply (rule sameRegionAs_EndpointCap, assumption+) + + apply (clarsimp simp: if_1_0_0 typ_heap_simps' Let_def case_bool_If) + apply (frule_tac cap="(cap_to_H x2c)" in cap_get_tag_EndpointCap) + apply (clarsimp split: if_split_asm simp: if_distrib [where f=scast]) + + apply (clarsimp, rule conjI) + \ \cap_get_tag of cte_a is an notification\ + apply clarsimp + apply (frule cap_get_tag_NotificationCap) + apply simp + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ + + apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_notification_cap") \ \needed also after\ + prefer 2 + apply (rule sameRegionAs_NotificationCap, assumption+) + + apply (rule conjI, simp) + apply clarsimp + apply (simp add: Let_def case_bool_If) + apply (frule_tac cap="(cap_to_H x2c)" in cap_get_tag_NotificationCap) + apply clarsimp + + \ \main goal\ + apply clarsimp + apply (simp add: to_bool_def) + apply (subgoal_tac "(\ (isEndpointCap (cap_to_H x2b))) \ ( \ (isNotificationCap (cap_to_H x2b)))") + apply clarsimp + apply (clarsimp simp: cap_get_tag_isCap [symmetric]) + done + +lemma updateCapData_spec: + "\cap. \ \ \ ccap_relation cap \cap \ preserve = to_bool (\preserve) \ newData = \newData\ + Call updateCapData_'proc + \ ccap_relation (updateCapData preserve newData cap) \ret__struct_cap_C \" + supply if_cong[cong] + apply (rule allI, rule conseqPre) + apply vcg + apply (clarsimp simp: if_1_0_0) + + apply (simp add: updateCapData_def) + + apply (case_tac cap, simp_all add: cap_get_tag_isCap_unfolded_H_cap + isCap_simps isArchCap_tag_def2 cap_tag_defs Let_def) + \ \NotificationCap\ + apply clarsimp + apply (frule cap_get_tag_isCap_unfolded_H_cap(3)) + apply (frule (1) iffD1[OF cap_get_tag_NotificationCap]) + apply clarsimp + + apply (intro conjI impI) + \ \preserve is zero and capNtfnBadge_CL \ = 0\ + apply clarsimp + apply (clarsimp simp:cap_notification_cap_lift_def cap_lift_def cap_tag_defs) + apply (simp add: ccap_relation_def cap_lift_def cap_tag_defs cap_to_H_def) + \ \preserve is zero and capNtfnBadge_CL \ \ 0\ + apply clarsimp + apply (simp add: ccap_relation_NullCap_iff cap_tag_defs) + \ \preserve is not zero\ + apply clarsimp + apply (simp add: to_bool_def) + apply (case_tac "preserve_' x = 0 \ capNtfnBadge_CL (cap_notification_cap_lift (cap_' x))= 0", + clarsimp) + apply (simp add: if_not_P) + apply (simp add: ccap_relation_NullCap_iff cap_tag_defs) + + \ \EndpointCap\ + apply clarsimp + apply (frule cap_get_tag_isCap_unfolded_H_cap(4)) + apply (frule (1) iffD1[OF cap_get_tag_EndpointCap]) + apply clarsimp + + apply (intro impI conjI) + \ \preserve is zero and capNtfnBadge_CL \ = 0\ + apply clarsimp + apply (clarsimp simp:cap_endpoint_cap_lift_def cap_lift_def cap_tag_defs) + apply (simp add: ccap_relation_def cap_lift_def cap_tag_defs cap_to_H_def) + \ \preserve is zero and capNtfnBadge_CL \ \ 0\ + apply clarsimp + apply (simp add: ccap_relation_NullCap_iff cap_tag_defs) + \ \preserve is not zero\ + apply clarsimp + apply (simp add: to_bool_def) + apply (case_tac "preserve_' x = 0 \ capEPBadge_CL (cap_endpoint_cap_lift (cap_' x))= 0", clarsimp) + apply (simp add: if_not_P) + apply (simp add: ccap_relation_NullCap_iff cap_tag_defs) + + \ \ArchObjectCap\ + apply clarsimp + apply (frule cap_get_tag_isArchCap_unfolded_H_cap) + apply (simp add: isArchCap_tag_def2) + apply (simp add: AARCH64_H.updateCapData_def) + + \ \CNodeCap\ + apply (clarsimp simp: cteRightsBits_def cteGuardBits_def) + apply (frule cap_get_tag_isCap_unfolded_H_cap(10)) + apply (frule (1) iffD1[OF cap_get_tag_CNodeCap]) + apply clarsimp + + apply (thin_tac "ccap_relation x y" for x y) + apply (thin_tac "ret__unsigned_long_' t = v" for t v)+ + + apply (simp add: seL4_CNode_CapData_lift_def fupdate_def word_size word_less_nat_alt mask_def + cong: if_cong) + apply (simp only: unat_word_ariths(1)) + apply (rule ssubst [OF nat_mod_eq' [where n = "2 ^ len_of TYPE(64)"]]) + \ \unat (\ && 0x3F) + unat (\ mod 0x40) < 2 ^ len_of TYPE(64)\ + apply (rule order_le_less_trans, rule add_le_mono) + apply (rule word_le_nat_alt[THEN iffD1]) + apply (rule word_and_le1) + apply (simp add: cap_cnode_cap_lift_def cap_lift_cnode_cap) + apply (rule word_le_nat_alt[THEN iffD1]) + apply (rule word_and_le1) + apply (simp add: mask_def) + + apply (simp add: word_sle_def) + apply (rule conjI, clarsimp simp: ccap_relation_NullCap_iff cap_tag_defs) + apply clarsimp + apply (rule conjI) + apply (rule unat_less_power[where sz=6, simplified], simp add: word_bits_def) + apply (rule and_mask_less'[where n=6, unfolded mask_def, simplified], simp) + + apply clarsimp + apply (simp add: ccap_relation_def c_valid_cap_def cl_valid_cap_def + cap_lift_cnode_cap cap_tag_defs cap_to_H_simps + cap_cnode_cap_lift_def) + apply (simp add: word_bw_assocs word_bw_comms word_bw_lcs) + done + +abbreviation + "deriveCap_xf \ liftxf errstate deriveCap_ret_C.status_C deriveCap_ret_C.cap_C ret__struct_deriveCap_ret_C_'" + +lemma ensureNoChildren_ccorres: + "ccorres (syscall_error_rel \ dc) (liftxf errstate id undefined ret__unsigned_long_') + (\s. valid_objs' s \ valid_mdb' s) (UNIV \ \slot = ptr_val (\slot)\) [] + (ensureNoChildren slot) (Call ensureNoChildren_'proc)" + apply (cinit lift: slot_') + apply (rule ccorres_liftE_Seq) + apply (rule ccorres_getCTE) + apply (rule ccorres_move_c_guard_cte) + + apply (rule_tac P= "\ s. valid_objs' s \ valid_mdb' s \ ctes_of s (ptr_val slota) = Some cte" + and P' =UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + + apply (frule (1) rf_sr_ctes_of_clift, clarsimp) + apply (simp add: typ_heap_simps) + + apply (clarsimp simp: whenE_def throwError_def return_def nullPointer_def liftE_bindE) + + apply (clarsimp simp: returnOk_def return_def) \ \solve the case where mdbNext is zero\ + + \ \main goal\ + apply (simp add: ccte_relation_def) + apply (frule_tac cte="cte_to_H y" in valid_mdb_ctes_of_next, simp+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule_tac cte=cte in rf_sr_ctes_of_clift, assumption, clarsimp) + apply (rule conjI) + apply (frule_tac cte="(cte_to_H ya)" in ctes_of_valid', assumption, simp) + apply (rule valid_capAligned, assumption) + apply (rule conjI) + apply (frule_tac cte="(cte_to_H y)" in ctes_of_valid', assumption, simp) + apply blast + + apply clarsimp + apply (rule conjI) + \ \isMDBParentOf is not zero\ + apply clarsimp + apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] + + apply (simp add: bind_def) + apply (simp add: split_paired_Bex) + apply (clarsimp simp: in_getCTE_cte_wp_at') + apply (simp add: cte_wp_at_ctes_of) + apply (simp add: syscall_error_rel_def EXCEPTION_NONE_def EXCEPTION_SYSCALL_ERROR_def) + apply (simp add: syscall_error_to_H_cases(9)) + \ \isMDBParentOf is zero\ + apply clarsimp + apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] + apply (simp add: bind_def) + apply (simp add: split_paired_Bex) + apply (clarsimp simp: in_getCTE_cte_wp_at') + apply (simp add: cte_wp_at_ctes_of) + apply (simp add: returnOk_def return_def) + + \ \last goal\ + apply clarsimp + apply (simp add: cte_wp_at_ctes_of) + done + +lemma Arch_deriveCap_ccorres: + "ccorres (syscall_error_rel \ ccap_relation) deriveCap_xf + \ (UNIV \ {s. ccap_relation (ArchObjectCap cap) (cap_' s)}) [] + (Arch.deriveCap slot cap) (Call Arch_deriveCap_'proc)" + apply (cinit lift: cap_') + apply csymbr + apply (unfold AARCH64_H.deriveCap_def Let_def) + apply (fold case_bool_If) + apply wpc + apply (clarsimp simp: cap_get_tag_isCap_ArchObject + ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: Collect_const_mem isCap_simps not_VSRootPT_T_eq returnOk_def return_def) + apply (simp add: cap_get_tag_isCap_ArchObject isCap_simps) + subgoal by (clarsimp simp: ccap_relation_def cap_lift_def Let_def map_option_Some_eq2 + cap_tag_defs cap_to_H_def cap_vspace_cap_lift_def + cap_page_table_cap_lift_def + split: if_split_asm) (* FIXME AARCH64: slow, could potentially be sped up *) + apply wpc + apply (clarsimp simp: cap_get_tag_isCap_ArchObject + ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: Collect_const_mem isCap_simps not_VSRootPT_T_eq throwError_def return_def) + apply (clarsimp simp: errstate_def syscall_error_rel_def + syscall_error_to_H_cases + exception_defs) + subgoal by (simp add: ccap_relation_def cap_lift_def Let_def + cap_tag_defs cap_to_H_def to_bool_def cap_vspace_cap_lift_def + cap_page_table_cap_lift_def + split: if_split_asm) (* FIXME AARCH64: slow, could potentially be sped up *) + \ \FrameCap\ + apply wpc + apply (clarsimp simp: cap_get_tag_isCap_ArchObject + ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cap_get_tag_isCap_unfolded_H_cap isCap_simps returnOk_def return_def) + subgoal + apply (frule cap_get_tag_isCap_unfolded_H_cap) + by (clarsimp simp: cap_frame_cap_lift[simplified cap_tag_defs, simplified] cap_tag_defs + ccap_relation_def cap_to_H_def asidInvalid_def + c_valid_cap_def cl_valid_cap_def + split: if_splits) + apply (simp add: cap_get_tag_isCap_ArchObject + ccorres_cond_iffs) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def subset_iff + split: bool.split) + apply (cases cap, simp_all add: isCap_simps ccap_relation_NullCap_iff)[1] + apply clarsimp + done + +lemma isArchCap_T_isArchObjectCap: + "isArchCap \ = isArchObjectCap" + by (rule ext, auto simp: isCap_simps) + +lemma deriveCap_ccorres': + "ccorres (syscall_error_rel \ ccap_relation) deriveCap_xf + (valid_objs' and valid_mdb') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. slot_' s = Ptr slot}) [] + (deriveCap slot cap) (Call deriveCap_'proc)" + apply (cinit lift: cap_' slot_') + apply csymbr + apply (fold case_bool_If) + apply wpc + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply csymbr + apply (clarsimp simp: cap_get_tag_isCap) + apply (rule ccorres_from_vcg_throws [where P=\ and P' = UNIV]) + apply (simp add: returnOk_def return_def ccap_relation_NullCap_iff) + apply (rule allI, rule conseqPre) + apply vcg + apply clarsimp + apply wpc + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) + apply csymbr + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def + ccap_relation_NullCap_iff) + apply wpc + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) + apply csymbr + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) + apply (rule ccorres_rhs_assoc)+ + apply ctac_print_xf + apply (rule ccorres_split_nothrow_call_novcgE + [where xf'="ret__unsigned_long_'"]) + apply (rule ensureNoChildren_ccorres) + apply simp+ + apply ceqv + apply simp + apply (rule_tac P'="\\ret__unsigned_long = scast EXCEPTION_NONE\" + in ccorres_from_vcg_throws[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def returnOk_def) + apply simp + apply (rule_tac P'="{s. ret__unsigned_long_' s = rv' \ errstate s = err'}" + in ccorres_from_vcg_throws[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + errstate_def) + apply wp + apply wpc + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) + apply csymbr + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def + ccap_relation_NullCap_iff) + apply wpc + apply (rule ccorres_split_throws[rotated]) + apply (clarsimp simp: cap_get_tag_isCap + liftME_def Let_def isArchCap_T_isArchObjectCap) + apply vcg + apply (clarsimp simp: cap_get_tag_isCap + liftME_def Let_def isArchCap_T_isArchObjectCap + ccorres_cond_univ_iff) + apply (rule ccorres_add_returnOk) + apply (rule ccorres_split_nothrow_call_novcgE + [where xf'=ret__struct_deriveCap_ret_C_']) + apply (rule Arch_deriveCap_ccorres) + apply simp+ + apply (rule ceqv_refl) + apply (rule_tac P'="\\ret__struct_deriveCap_ret_C + = rv'\" + in ccorres_from_vcg_throws[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def returnOk_def) + apply (rule_tac P'="{s. (ret__struct_deriveCap_ret_C_' s) + = rv' \ errstate s = err'}" + in ccorres_from_vcg_throws[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def throwError_def) + apply wp + apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap) + apply csymbr + apply (simp add: cap_get_tag_isCap) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def returnOk_def) + apply (clarsimp simp: errstate_def isCap_simps + Collect_const_mem from_bool_0 + cap_get_tag_isArchCap_unfolded_H_cap) + done + +lemma deriveCap_ccorres: + "ccorres (syscall_error_rel \ ccap_relation) deriveCap_xf + (invs') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. slot_' s = Ptr slot}) [] + (deriveCap slot cap) (Call deriveCap_'proc)" + apply (rule ccorres_guard_imp2, rule deriveCap_ccorres') + apply fastforce + done + +lemma ensureEmptySlot_ccorres: + "ccorres (syscall_error_rel \ dc) (liftxf errstate id undefined ret__unsigned_long_') + \ (UNIV \ \slot = ptr_val (\slot)\) [] + (ensureEmptySlot slot) (Call ensureEmptySlot_'proc)" + apply (cinit lift: slot_') + apply (rule ccorres_liftE_Seq) + apply (rule ccorres_getCTE) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac P= "\ s. ctes_of s (ptr_val slota) = Some cte" + and P' =UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + + apply (frule (1) rf_sr_ctes_of_clift, clarsimp) + apply (simp add: typ_heap_simps) + + apply (rule conjI) + apply (clarsimp simp: unlessE_def throwError_def return_def) + apply (subgoal_tac "cap_to_H (cap_CL y) \ capability.NullCap") + apply simp + apply (simp add: syscall_error_rel_def EXCEPTION_NONE_def EXCEPTION_SYSCALL_ERROR_def) + apply (rule syscall_error_to_H_cases(8)) + apply simp + apply (subst cap_get_tag_NullCap [symmetric]) + prefer 2 apply assumption + apply (simp add: ccap_relation_def c_valid_cte_def) + + apply (clarsimp simp: unlessE_def throwError_def return_def) + apply (subgoal_tac "cap_to_H (cap_CL y) = capability.NullCap") + apply simp + apply (simp add: returnOk_def return_def) + apply (subst cap_get_tag_NullCap [symmetric]) + prefer 2 apply assumption + apply (simp add: ccap_relation_def c_valid_cte_def) + + apply clarsimp + apply (simp add: cte_wp_at_ctes_of) +done + +lemma updateMDB_set_mdbPrev: + "ccorres dc xfdc + (\s. is_aligned slota cteSizeBits) + {s. slotc = slota } hs + (updateMDB ptr (mdbPrev_update (\_. slota))) + (IF ptr \ 0 + THEN + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t (Ptr ptr:: cte_C ptr)\ + (call (\ta. ta(| mdb_node_ptr_' := Ptr &(Ptr ptr:: cte_C ptr\[''cteMDBNode_C'']), + v64_' := slotc |)) + mdb_node_ptr_set_mdbPrev_'proc (\s t. s\ globals := globals t \) (\ta s'. Basic (\a. a))) + FI)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_updateMDB_cte_at) + apply (ctac add: ccorres_updateMDB_set_mdbPrev) + apply (ctac ccorres: ccorres_updateMDB_skip) + apply (simp) + done + +lemma updateMDB_set_mdbNext: + "ccorres dc xfdc + (\s. is_aligned slota cteSizeBits \ canonical_address slota) + {s. slotc = slota} hs + (updateMDB ptr (mdbNext_update (\_. slota))) + (IF ptr \ 0 + THEN + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t (Ptr ptr:: cte_C ptr)\ + (call (\ta. ta(| mdb_node_ptr_' := Ptr &(Ptr ptr:: cte_C ptr\[''cteMDBNode_C'']), + v64_' := slotc |)) + mdb_node_ptr_set_mdbNext_'proc (\s t. s\ globals := globals t \) (\ta s'. Basic (\a. a))) + FI)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_updateMDB_cte_at) + apply (ctac add: ccorres_updateMDB_set_mdbNext) + apply (ctac ccorres: ccorres_updateMDB_skip) + apply simp + done + +end +end diff --git a/proof/crefine/AARCH64/CSpace_RAB_C.thy b/proof/crefine/AARCH64/CSpace_RAB_C.thy new file mode 100644 index 0000000000..9818059f28 --- /dev/null +++ b/proof/crefine/AARCH64/CSpace_RAB_C.thy @@ -0,0 +1,633 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory CSpace_RAB_C +imports CSpaceAcc_C "CLib.MonadicRewrite_C" +begin + +unbundle l4v_word_context + +context kernel +begin + +abbreviation + "rab_xf \ (liftxf errstate resolveAddressBits_ret_C.status_C + (\v. (resolveAddressBits_ret_C.slot_C v, bitsRemaining_C v)) + ret__struct_resolveAddressBits_ret_C_')" + +lemma rab_failure_case_ccorres: + assumes spec: "\\ G' call_part {s. resolveAddressBits_ret_C.status_C v \ scast EXCEPTION_NONE + \ lookup_failure_rel e (resolveAddressBits_ret_C.status_C v) + (errstate s)}" + assumes mod: "\s. \\ {s'. (s, s') \ rf_sr} call_part {s'. (s, s') \ rf_sr}" + shows "ccorres (lookup_failure_rel \ r) rab_xf \ G' (SKIP # hs) + (throwError e) + (call_part ;; + \ret___struct_resolveAddressBits_ret_C :== v;; + return_C ret__struct_resolveAddressBits_ret_C_'_update ret___struct_resolveAddressBits_ret_C_')" + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r [where R=\, OF _ spec]) + apply (rule ccorres_from_vcg_throws) + apply (simp add: throwError_def return_def) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (auto simp add: exception_defs errstate_def)[1] + apply (rule conseqPre [OF mod]) + apply clarsimp + done + +lemma not_snd_bindE_I1: + "\ snd ((a >>=E b) s) \ \ snd (a s)" + unfolding bindE_def + by (erule not_snd_bindI1) + +lemma ccorres_remove_bind_returnOk_noguard: + assumes ac: "ccorres (f \ r') xf P P' (SKIP # hs) a c" + and rr: "\v s'. r' v (exvalue (xf s')) \ r (g v) (exvalue (xf s'))" + shows "ccorres (f \ r) xf P P' (SKIP # hs) (a >>=E (\v. returnOk (g v))) c" + apply (rule ccorresI') + apply clarsimp + apply (drule not_snd_bindE_I1) + apply (erule (4) ccorresE[OF ac]) + apply (clarsimp simp add: bindE_def returnOk_def Nondet_Monad.lift_def bind_def return_def + split_def) + apply (rule bexI [rotated], assumption) + apply (simp add: throwError_def return_def unif_rrel_def + split: sum.splits) + apply (auto elim!: rr) + done + +declare isCNodeCap_CNodeCap[simp] + +(* MOVE *) +lemma ccorres_gen_asm_state: + assumes rl: "\s. P s \ ccorres r xf G G' hs a c" + shows "ccorres r xf (G and P) G' hs a c" +proof (rule ccorres_guard_imp2) + show "ccorres r xf (G and (\_. \s. P s)) G' hs a c" + apply (rule ccorres_gen_asm) + apply (erule exE) + apply (erule rl) + done +next + fix s s' + assume "(s, s') \ rf_sr" and "(G and P) s" and "s' \ G'" + thus "(G and (\_. \s. P s)) s \ s' \ G'" + by (clarsimp elim!: exI) +qed + +(* MOVE, generalise *) +lemma ccorres_req: + assumes rl: "\s s'. \ (s, s') \ rf_sr; Q s; Q' s' \ \ F s s'" + and cc: "\s s'. F s s' \ ccorres r xf P P' hs a c" + shows "ccorres r xf (P and Q) (P' \ Collect Q') hs a c" + apply (rule ccorresI') + apply clarsimp + apply (frule (2) rl) + apply (erule (5) ccorresE [OF cc]) + apply (clarsimp elim!: bexI [rotated]) + done + +declare mask_64_max_word [simp] + +lemma rightsFromWord_wordFromRights: + "rightsFromWord (wordFromRights rghts) = rghts" + apply (cases rghts) + apply (simp add: wordFromRights_def rightsFromWord_def + split: if_split) + done + +lemma wordFromRights_inj: + "inj wordFromRights" + by (rule inj_on_inverseI, rule rightsFromWord_wordFromRights) + +lemmas wordFromRights_eq = inj_eq [OF wordFromRights_inj] + +lemma rightsFromWord_and: + "rightsFromWord (a && b) = andCapRights (rightsFromWord a) (rightsFromWord b)" + by (simp add: rightsFromWord_def andCapRights_def) + +lemma andCapRights_ac: + "andCapRights (andCapRights a b) c = andCapRights a (andCapRights b c)" + "andCapRights a b = andCapRights b a" + "andCapRights a (andCapRights b c) = andCapRights b (andCapRights a c)" + by (simp add: andCapRights_def conj_comms split: cap_rights.split)+ + +lemma ccorres_locateSlotCap_push: + "ccorres_underlying sr \ r xf ar axf P P' hs + (a >>=E (\x. locateSlotCap cp n >>= (\p. b p x))) c + \ (\P. \P\ a \\_. P\, - ) + \ ccorres_underlying sr \ r xf ar axf P P' hs + (locateSlotCap cp n >>= (\p. a >>=E (\x. b p x))) c" + apply (simp add: locateSlot_conv) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_stateAssert) + apply (erule monadic_rewrite_ccorres_assemble) + apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) + apply (rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_bind_head) + apply (rule monadic_rewrite_stateAssert[where f="return", simplified]) + apply (rule monadic_rewrite_refl) + apply simp + apply (rule monadic_rewrite_refl) + apply assumption + apply simp + done + +declare Kernel_C.cte_C_size[simp del] + +(* FIXME x64: redo after guard bits changes *) +lemma resolveAddressBits_ccorres [corres]: + shows "ccorres (lookup_failure_rel \ + (\(cte, bits) (cte', bits'). cte' = Ptr cte \ bits = unat bits' \ bits'\ 64)) rab_xf + (valid_pspace' and valid_cap' cap' + and K (guard' \ 64)) + ({s. ccap_relation cap' (nodeCap_' s)} \ + {s. capptr_' s = cptr'} \ {s. unat (n_bits_' s) = guard'}) [] + (resolveAddressBits cap' cptr' guard') (Call resolveAddressBits_'proc)" + (is "ccorres ?rvr rab_xf ?P ?P' [] ?rab ?rab'") +proof (cases "isCNodeCap cap'") + case False + + note Collect_const [simp del] + + show ?thesis using False + apply (cinit' lift: nodeCap_' capptr_' n_bits_') + apply csymbr+ + \ \Exception stuff\ + apply (rule ccorres_split_throws) + apply (simp add: Collect_const cap_get_tag_isCap isCap_simps ccorres_cond_iffs + resolveAddressBits.simps scast_id) + apply (rule ccorres_from_vcg_throws [where P = \ and P' = UNIV]) + apply (rule allI) + apply (rule conseqPre) + apply (simp add: throwError_def return_def split) + apply vcg + apply (clarsimp simp add: exception_defs lookup_fault_lift_def) + apply (simp split: if_split) + apply (vcg strip_guards=true) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + done +next + case True + + note word_neq_0_conv [simp del] + + from True show ?thesis + apply - + apply (cinit' simp add: whileAnno_def ucast_id) + \ \This is done here as init lift usually throws away the relationship between nodeCap_' s and nodeCap. Normally + this OK, but the induction messes with everything :(\ + apply (rule ccorres_abstract [where xf' = nodeCap_']) + apply ceqv + apply (rename_tac "nodeCap") + apply (rule ccorres_abstract [where xf' = n_bits_']) + apply ceqv + apply (rename_tac "n_bits") + apply (rule ccorres_abstract [where xf' = capptr_']) + apply ceqv + apply (rename_tac "capptr") + apply (rule_tac P = "capptr = cptr' \ ccap_relation cap' nodeCap" in ccorres_gen_asm2) + apply (erule conjE) + apply (erule_tac t = capptr in ssubst) + apply csymbr+ + apply (simp add: cap_get_tag_isCap split del: if_split) + apply (thin_tac "ret__unsigned_longlong = X" for X) + apply (rule ccorres_split_throws [where P = "?P"]) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) + apply (rule_tac r' = "?rvr" in + ccorres_rel_imp [where xf' = rab_xf]) + defer + apply (case_tac x) + apply clarsimp + apply clarsimp + apply (rule_tac I = "{s. cap_get_tag (nodeCap_' s) = scast cap_cnode_cap}" + in HoarePartial.While [unfolded whileAnno_def, OF subset_refl]) + apply (vcg strip_guards=true) \ \takes a while\ + apply clarsimp + apply simp + apply (clarsimp simp: cap_get_tag_isCap) + \ \Main thm\ + proof (induct cap' cptr' guard' rule: resolveAddressBits.induct[case_names ind]) + case (ind cap cptr guard) + + note conj_refl = conjI [OF refl refl] + have imp_rem: "\P X. P \ P \ (P \ X = X)" by clarsimp + have imp_rem': "\P R X. P \ R \ P \ R \ (P \ R \ X = X)" by clarsimp + note conj_refl_r = conjI [OF _ refl] + + have getSlotCap_in_monad: + "\a b p rs s. ((a, b) \ fst (getSlotCap p s)) = + (option_map cteCap (ctes_of s p) = Some a + \ b = s)" + apply (simp add: getSlotCap_def return_def bind_def objBits_simps split_def) + apply rule + apply (clarsimp simp: in_getCTE_cte_wp_at' cte_wp_at_ctes_of) + apply clarsimp + apply (rename_tac s p z) + apply (subgoal_tac "cte_wp_at' ((=) z) p s") + apply (clarsimp simp: getCTE_def cte_wp_at'_def) + apply (simp add: cte_wp_at_ctes_of) + done + + note ih = ind.hyps[simplified, simplified in_monad + getSlotCap_in_monad locateSlot_conv stateAssert_def, simplified] + + have gsCNodes: "\s bits p x P Q. bits = capCNodeBits cap \ capCNodeBits cap < 64 \ + (case gsCNodes (s \ gsCNodes := [p \ bits ] \) p of None \ False + | Some n \ ((n = capCNodeBits cap \ Q n)) + \ (x && mask bits :: machine_word) < 2 ^ n) \ P" + by (clarsimp simp: word_size and_mask_less_size) + + have case_into_if: + "\c f g. (case c of CNodeCap _ _ _ _ \ f | _ \ g) = (if isCNodeCap c then f else g)" + by (case_tac c, simp_all add: isCap_simps) + + note [split del] = if_split + + have gbD: "\guardBits cap cap'. \ guardBits = capCNodeGuardSize_CL (cap_cnode_cap_lift cap'); + ccap_relation cap cap'; isCNodeCap cap \ + \ unat guardBits = capCNodeGuardSize cap \ capCNodeGuardSize cap < 64" + apply (simp add: cap_get_tag_isCap[symmetric]) + apply (frule(1) cap_get_tag_CNodeCap [THEN iffD1]) + apply simp + apply (simp add: cap_cnode_cap_lift_def cap_lift_cnode_cap) + apply (rule order_le_less_trans, rule word_le_nat_alt[THEN iffD1], + rule word_and_le1) + apply (simp add: mask_def) + done + + have cgD: "\capGuard cap cap'. \ capGuard = capCNodeGuard_CL (cap_cnode_cap_lift cap'); + ccap_relation cap cap'; isCNodeCap cap \ \ capGuard = capCNodeGuard cap" + apply (frule cap_get_tag_CNodeCap [THEN iffD1]) + apply (simp add: cap_get_tag_isCap) + apply simp + done + + have rbD: "\radixBits cap cap'. \ radixBits = capCNodeRadix_CL (cap_cnode_cap_lift cap'); + ccap_relation cap cap'; isCNodeCap cap \ + \ unat radixBits = capCNodeBits cap \ capCNodeBits cap < 64" + apply (simp add: cap_get_tag_isCap[symmetric]) + apply (frule(1) cap_get_tag_CNodeCap [THEN iffD1]) + apply simp + apply (simp add: cap_cnode_cap_lift_def cap_lift_cnode_cap) + apply (rule order_le_less_trans, rule word_le_nat_alt[THEN iffD1], + rule word_and_le1) + apply (simp add: mask_def) + done + + have rxgd: + "\cap cap'. \ ccap_relation cap cap'; isCNodeCap cap \ + \ unat (capCNodeRadix_CL (cap_cnode_cap_lift cap') + + capCNodeGuardSize_CL (cap_cnode_cap_lift cap')) + = unat (capCNodeRadix_CL (cap_cnode_cap_lift cap')) + + unat (capCNodeGuardSize_CL (cap_cnode_cap_lift cap'))" + apply (simp add: cap_get_tag_isCap[symmetric]) + apply (frule(1) cap_get_tag_CNodeCap [THEN iffD1]) + apply (simp add: cap_cnode_cap_lift_def cap_lift_cnode_cap) + apply (subst unat_plus_simple[symmetric], subst no_olen_add_nat) + apply (rule order_le_less_trans, rule add_le_mono) + apply (rule word_le_nat_alt[THEN iffD1], rule word_and_le1)+ + apply (simp add: mask_def) + done + + (* Move outside this context? *) + note cap_simps = rxgd cgD [OF refl] + rbD [OF refl, THEN conjunct1] rbD [OF refl, THEN conjunct2] + gbD [OF refl, THEN conjunct1] gbD [OF refl, THEN conjunct2] + + have cond1: "\(nb :: machine_word) guardBits capGuard. + \unat nb = guard; unat guardBits = capCNodeGuardSize cap; capGuard = capCNodeGuard cap; + guard \ 64\ + \ \s s'. + (s, s') \ rf_sr \ True \ True \ + (\ (capCNodeGuardSize cap \ guard \ + (cptr >> guard - capCNodeGuardSize cap) && + mask (capCNodeGuardSize cap) = + capCNodeGuard cap)) = + (s' \ \nb < guardBits \ + (cptr >> unat (nb - guardBits && 0x3F)) && + 2 ^ unat guardBits - 1 \ capGuard\)" + apply (subst not_le [symmetric]) + apply (clarsimp simp: mask_def unat_of_nat Collect_const_mem) + apply (cases "capCNodeGuardSize cap = 0") + apply (simp add: word_le_nat_alt) + apply (subgoal_tac "(0x3F :: machine_word) = mask 6") + apply (erule ssubst [where t = "0x3F"]) + apply (simp add: less_mask_eq word_less_nat_alt word_le_nat_alt) + apply (subst imp_cong) + apply (rule refl) + prefer 2 + apply (rule refl) + apply (subst less_mask_eq) + apply (simp add: word_less_nat_alt word_le_nat_alt unat_sub) + apply (simp add: word_less_nat_alt word_le_nat_alt unat_sub) + apply (simp add: mask_def) + done + + have cond2: "\nb (radixBits :: machine_word) (guardBits :: machine_word). + \ unat nb = guard; unat radixBits = capCNodeBits cap; capCNodeBits cap < 64; capCNodeGuardSize cap < 64; + unat guardBits = capCNodeGuardSize cap \ \ + \s s'. (s, s') \ rf_sr \ True \ True \ + (guard < capCNodeBits cap + capCNodeGuardSize cap) = (s' \ \nb < radixBits + guardBits\)" + by (simp add: Collect_const_mem word_less_nat_alt unat_word_ariths) + + have cond3: "\nb (radixBits :: machine_word) (guardBits :: machine_word). + \ unat nb = guard; unat radixBits = capCNodeBits cap; capCNodeBits cap < 64; capCNodeGuardSize cap < 64; + unat guardBits = capCNodeGuardSize cap; + \ guard < capCNodeBits cap + capCNodeGuardSize cap \ \ + \s s'. (s, s') \ rf_sr \ True \ True \ + (guard = capCNodeBits cap + capCNodeGuardSize cap) = (s' \ \nb = radixBits + guardBits\)" + by clarsimp unat_arith + + have cond4: + "\rva nodeCapb ret__unsigned_long. + \ ccap_relation rva nodeCapb; ret__unsigned_long = cap_get_tag nodeCapb\ + \ \s s'. (s, s') \ rf_sr \ True \ True \ (\ isCNodeCap rva) = (s' \ \ret__unsigned_long \ scast cap_cnode_cap\)" + by (simp add: cap_get_tag_isCap Collect_const_mem) + + let ?p = "(capCNodePtr cap + 0x20 * ((cptr >> guard - (capCNodeBits cap + capCNodeGuardSize cap)) && + mask (capCNodeBits cap)))" + + have n_bits_guard: "\nb :: machine_word. \ guard \ 64; unat nb = guard \ \ unat (nb && mask 7) = guard" + apply (subgoal_tac "nb \ 64") + apply (clarsimp) + apply (rule less_mask_eq) + apply (erule order_le_less_trans) + apply simp + apply (simp add: word_le_nat_alt) + done + + have mask7_eqs: + "\cap ccap. \ ccap_relation cap ccap; isCNodeCap cap \ + \ (capCNodeRadix_CL (cap_cnode_cap_lift ccap) + capCNodeGuardSize_CL (cap_cnode_cap_lift ccap)) && mask 7 + = capCNodeRadix_CL (cap_cnode_cap_lift ccap) + capCNodeGuardSize_CL (cap_cnode_cap_lift ccap)" + "\cap ccap. \ ccap_relation cap ccap; isCNodeCap cap \ + \ capCNodeRadix_CL (cap_cnode_cap_lift ccap) && mask 7 = capCNodeRadix_CL (cap_cnode_cap_lift ccap)" + "\cap ccap. \ ccap_relation cap ccap; isCNodeCap cap \ + \ capCNodeGuardSize_CL (cap_cnode_cap_lift ccap) && mask 7 = capCNodeGuardSize_CL (cap_cnode_cap_lift ccap)" + apply (frule(1) rxgd) + defer + apply (simp_all add: cap_cnode_cap_lift_def cap_get_tag_isCap[symmetric] + cap_lift_cnode_cap) + apply (rule less_mask_eq + | rule order_le_less_trans, (rule word_and_le1)+ + | simp add: mask_def)+ + apply (simp add: word_less_nat_alt) + apply (rule order_le_less_trans, rule add_le_mono) + apply (rule word_le_nat_alt[THEN iffD1], rule word_and_le1)+ + apply simp + done + + have gm: "\(nb :: machine_word) cap cap'. \ unat nb = guard; ccap_relation cap cap'; isCNodeCap cap \ + \ nb \ capCNodeRadix_CL (cap_cnode_cap_lift cap') + + capCNodeGuardSize_CL (cap_cnode_cap_lift cap') + \ unat (nb - + (capCNodeRadix_CL (cap_cnode_cap_lift cap') + + capCNodeGuardSize_CL (cap_cnode_cap_lift cap'))) + = guard - (capCNodeBits cap + capCNodeGuardSize cap)" + apply (simp add: unat_sub) + apply (subst unat_plus_simple[THEN iffD1]) + apply (subst no_olen_add_nat) + apply (simp add: cap_lift_cnode_cap cap_cnode_cap_lift_def + cap_get_tag_isCap[symmetric] mask_def) + apply (rule order_le_less_trans, rule add_le_mono) + apply (rule word_le_nat_alt[THEN iffD1], rule word_and_le1)+ + apply simp + apply (simp add: cap_simps) + done + + note if_cong[cong] + show ?case + using ind.prems + supply option.case_cong[cong] + apply - + apply (rule iffD1 [OF ccorres_expand_while_iff]) + apply (subst resolveAddressBits.simps) + apply (unfold case_into_if) + apply (simp add: Let_def ccorres_cond_iffs split del: if_split) + apply (rule ccorres_rhs_assoc)+ + apply (cinitlift nodeCap_' n_bits_') + apply (erule_tac t = nodeCapa in ssubst) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_gen_asm [where P="0 < capCNodeBits cap \ 0 < capCNodeGuardSize cap"]) + apply (rule ccorres_assertE) + apply (csymbr | rule iffD2 [OF ccorres_seq_skip])+ + apply (rule ccorres_Guard_Seq)+ + apply csymbr + \ \handle the stateAssert in locateSlotCap very carefully\ + apply (simp(no_asm) only: liftE_bindE[where a="locateSlotCap a b" for a b]) + apply (rule ccorres_locateSlotCap_push[rotated]) + apply (simp add: unlessE_def) + apply (rule hoare_pre, wp, simp) + \ \Convert guardBits, radixBits and capGuard to their Haskell versions\ + apply (drule (2) cgD, drule (2) rbD, drule (2) gbD) + apply (elim conjE) + apply (rule ccorres_gen_asm [where P = "guard \ 64"]) + apply (rule ccorres_split_unless_throwError_cond [OF cond1], assumption+) + apply (rule rab_failure_case_ccorres, vcg, rule conseqPre, vcg) + apply clarsimp + apply (rule ccorres_locateSlotCap_push[rotated]) + apply (rule hoare_pre, wp whenE_throwError_wp, simp) + apply (rule ccorres_split_when_throwError_cond [OF cond2], assumption+) + apply (rule rab_failure_case_ccorres, vcg, rule conseqPre, vcg) + apply clarsimp + apply (rule ccorres_Guard_Seq)+ + apply csymbr + apply csymbr + apply (simp only: locateSlotCap_def Let_def if_True) + apply (rule ccorres_split_nothrow) + apply (rule locateSlotCNode_ccorres[where xf="slot_'" and xfu="slot_'_update"], + simp+)[1] + apply ceqv + apply (rename_tac rv slot) + apply (erule_tac t = slot in ssubst) + apply (simp del: Collect_const) + apply (rule ccorres_if_cond_throws [OF cond3], assumption+) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_return_CE, simp_all)[1] + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_basic_srnoop2, simp) + apply csymbr + apply (ctac pre: ccorres_liftE_Seq) + apply (rename_tac rva nodeCapa) + apply csymbr + apply (rule ccorres_if_cond_throws2 [OF cond4], assumption+) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_return_CE, simp_all)[1] + apply (frule_tac v1 = rva in iffD1 [OF isCap_simps(4)]) + apply (elim exE) + apply (rule_tac + Q = "\s. option_map cteCap (ctes_of s ?p) = Some rva" + and F = "\s s'. + (option_map cteCap (ctes_of s ?p) = Some rva + \ (ccap_relation rva (h_val (hrs_mem (t_hrs_' (globals s'))) (Ptr &(Ptr ?p :: cte_C ptr\[''cap_C'']) :: cap_C ptr))))" + in ccorres_req [where Q' = "\s'. s' \\<^sub>c (Ptr ?p :: cte_C ptr)"]) + apply (thin_tac "rva = X" for X) + apply (clarsimp simp: h_t_valid_clift_Some_iff typ_heap_simps) + apply (rule ccte_relation_ccap_relation) + apply (erule (2) rf_sr_cte_relation) + apply (elim conjE) + apply (rule_tac nodeCap1 = "nodeCapa" in ih, + (simp | rule conjI refl gsCNodes)+)[1] + apply (clarsimp simp: cte_level_bits_def field_simps isCap_simps, fast) + apply (rule refl) + apply assumption + apply assumption + apply assumption + apply vcg + apply (simp add: getSlotCap_def imp_conjR) + apply (wp getCTE_ctes_of | (wp (once) hoare_drop_imps))+ + apply (clarsimp simp: Collect_const_mem if_then_simps lookup_fault_lifts cong: imp_cong conj_cong) + apply vcg + apply (vcg strip_guards=true) + apply (simp add: locateSlot_conv) + apply wp + apply vcg + apply (vcg strip_guards=true) + apply (vcg strip_guards=true) + apply (rule conjI) + \ \Haskell guard\ + apply (clarsimp simp del: imp_disjL) \ \take a while\ + apply (intro impI conjI allI) + apply fastforce + apply clarsimp + apply arith + apply (clarsimp simp: isCap_simps cte_level_bits_def + option.split[where P="\x. x"]) + apply (clarsimp simp: isCap_simps valid_cap_simps' cte_level_bits_def cteSizeBits_def + real_cte_at') + apply (clarsimp simp: isCap_simps valid_cap'_def) + \ \C guard\ + apply (frule (1) cgD [OF refl], frule (1) rbD [OF refl], frule (1) gbD [OF refl]) + apply (simp add: Collect_const_mem cap_get_tag_isCap exception_defs lookup_fault_lifts + n_bits_guard mask7_eqs word_le_nat_alt word_less_nat_alt gm) + apply (elim conjE) + apply (frule rf_sr_cte_at_valid [where p = + "cte_Ptr (capCNodePtr cap + 2^cteSizeBits * ((cptr >> guard - (capCNodeBits cap + capCNodeGuardSize cap)) && mask (capCNodeBits cap)))", rotated]) + apply simp + apply (erule (1) valid_cap_cte_at') + apply (simp add: objBits_defs) + apply (frule(2) gm) + apply (simp add: word_less_nat_alt word_le_nat_alt less_mask_eq) + apply (intro impI conjI allI, simp_all) + apply (simp add: cap_simps) + apply (frule iffD1 [OF cap_get_tag_CNodeCap]) + apply (simp add: cap_get_tag_isCap) + apply (erule ssubst [where t = cap]) + apply simp + apply (simp add: mask_def) + apply (subgoal_tac "capCNodeBits cap \ 0") + apply (clarsimp simp: linorder_not_less cap_simps) + apply (clarsimp simp: isCap_simps valid_cap'_def) + apply (clarsimp simp: linorder_not_less cap_simps) + apply (clarsimp simp: isCap_simps valid_cap'_def) + apply (clarsimp simp: linorder_not_less cap_simps) + apply (clarsimp simp: isCap_simps valid_cap'_def) + apply arith + apply (subgoal_tac "(0x3F :: machine_word) = mask 6") + apply (erule ssubst [where t = "0x3F"]) + apply (subst word_mod_2p_is_mask [symmetric]) + apply simp + apply (simp add: unat_word_ariths) + apply (simp add: mask_def) + done + qed +qed + +abbreviation + "lookupSlot_xf \ liftxf errstate lookupSlot_ret_C.status_C + lookupSlot_ret_C.slot_C ret__struct_lookupSlot_ret_C_'" + + +lemma rightsFromWord_spec: + shows "\s. \ \ {s} \ret__struct_seL4_CapRights_C :== PROC rightsFromWord(\w) + \seL4_CapRights_lift \ret__struct_seL4_CapRights_C = cap_rights_from_word_canon \<^bsup>s\<^esup>w \" + apply vcg + apply (simp add: seL4_CapRights_lift_def nth_shiftr mask_shift_simps nth_shiftr + cap_rights_from_word_canon_def word_and_1 eval_nat_numeral + word_sless_def word_sle_def) + done + + +abbreviation + "lookupSlot_rel' \ \(cte, rm) (cte', rm'). cte' = Ptr cte \ rm = cap_rights_to_H (seL4_CapRights_lift rm')" + +(* MOVE *) +lemma cap_rights_to_H_from_word_canon [simp]: + "cap_rights_to_H (cap_rights_from_word_canon wd) = rightsFromWord wd" + unfolding cap_rights_from_word_def rightsFromWord_def + apply (simp add: cap_rights_from_word_canon_def) + apply (simp add: cap_rights_to_H_def) + done + +lemma tcb_ptr_to_ctcb_ptr_mask [simp]: + assumes tcbat: "tcb_at' thread s" + shows "ptr_val (tcb_ptr_to_ctcb_ptr thread) && ~~ mask tcbBlockSizeBits = thread" +proof - + have "thread + ctcb_offset && ~~ mask tcbBlockSizeBits = thread" + proof (rule add_mask_lower_bits) + show "is_aligned thread tcbBlockSizeBits" using tcbat by (rule tcb_aligned') + qed (auto simp: word_bits_def ctcb_offset_defs objBits_defs) + thus ?thesis + unfolding tcb_ptr_to_ctcb_ptr_def ctcb_offset_def + by (simp add: mask_def) +qed + +abbreviation + "lookupSlot_raw_xf \ + liftxf errstate lookupSlot_raw_ret_C.status_C + lookupSlot_raw_ret_C.slot_C + ret__struct_lookupSlot_raw_ret_C_'" + +definition + lookupSlot_raw_rel :: "machine_word \ cte_C ptr \ bool" +where + "lookupSlot_raw_rel \ \slot slot'. slot' = cte_Ptr slot" + +lemma lookupSlotForThread_ccorres': + "ccorres (lookup_failure_rel \ lookupSlot_raw_rel) lookupSlot_raw_xf + (valid_pspace' and tcb_at' thread) + ({s. capptr_' s = cptr} \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (lookupSlotForThread thread cptr) (Call lookupSlot_'proc)" + apply (cinit lift: capptr_' thread_' + simp add: getThreadCSpaceRoot_def locateSlot_conv + returnOk_liftE [symmetric] split_def) + apply (ctac pre: ccorres_liftE_Seq) + apply (ctac (no_vcg) add: resolveAddressBits_ccorres) + apply csymbr+ + apply (ctac add: ccorres_return_CE) + apply csymbr+ + apply (ctac add: ccorres_return_C_errorE) + apply wp+ + apply vcg + apply (rule conjI) + apply (clarsimp simp add: conj_comms word_size tcbSlots Kernel_C.tcbCTable_def) + apply (rule conjI) + apply fastforce + apply (erule tcb_at_cte_at') + apply (clarsimp simp add: Collect_const_mem errstate_def tcbSlots + Kernel_C.tcbCTable_def word_size lookupSlot_raw_rel_def + word_sle_def + split del: if_split) + done + +lemma lookupSlotForThread_ccorres[corres]: + "ccorres (lookup_failure_rel \ lookupSlot_raw_rel) lookupSlot_raw_xf + (invs' and tcb_at' thread) + (UNIV \ {s. capptr_' s = cptr} \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (lookupSlotForThread thread cptr) (Call lookupSlot_'proc)" + apply (rule ccorres_guard_imp2, rule lookupSlotForThread_ccorres') + apply fastforce + done + +end +end diff --git a/proof/crefine/AARCH64/Ctac_lemmas_C.thy b/proof/crefine/AARCH64/Ctac_lemmas_C.thy new file mode 100644 index 0000000000..cb42899d81 --- /dev/null +++ b/proof/crefine/AARCH64/Ctac_lemmas_C.thy @@ -0,0 +1,249 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* seL4-specific lemmas for automation framework for C refinement *) + +theory Ctac_lemmas_C +imports + Ctac +begin + +context kernel +begin + +lemma c_guard_abs_cte: + fixes p :: "cte_C ptr" + shows "\s s'. (s, s') \ rf_sr \ cte_at' (ptr_val p) s \ True \ s' \\<^sub>c p" + apply (cases p) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule (1) rf_sr_ctes_of_cliftE) + apply (simp add: typ_heap_simps') + done + +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] + +lemma c_guard_abs_tcb: + fixes p :: "tcb_C ptr" + shows "\s s'. (s, s') \ rf_sr \ tcb_at' (ctcb_ptr_to_tcb_ptr p) s \ True \ s' \\<^sub>c p" + apply clarsimp + apply (drule (1) tcb_at_h_t_valid) + apply simp + done + +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] + +lemma cte_array_relation_array_assertion: + "gsCNodes s p = Some n \ cte_array_relation s cstate + \ array_assertion (cte_Ptr p) (2 ^ n) (hrs_htd (t_hrs_' cstate))" + apply (rule h_t_array_valid_array_assertion) + apply (clarsimp simp: cvariable_array_map_relation_def) + apply simp + done + +lemma rf_sr_tcb_ctes_array_assertion': + "\ (s, s') \ rf_sr; tcb_at' (ctcb_ptr_to_tcb_ptr tcb) s \ + \ array_assertion (cte_Ptr (ptr_val tcb && ~~mask tcbBlockSizeBits)) + (unat tcbCNodeEntries) (hrs_htd (t_hrs_' (globals s')))" + apply (rule h_t_array_valid_array_assertion, simp_all add: tcbCNodeEntries_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cvariable_array_map_relation_def + cpspace_relation_def) + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule mp, rule exI, erule ko_at_projectKO_opt) + apply (frule ptr_val_tcb_ptr_mask) + apply (simp add: mask_def) + done + +lemmas rf_sr_tcb_ctes_array_assertion + = rf_sr_tcb_ctes_array_assertion'[simplified objBits_defs mask_def, simplified] + +lemma rf_sr_tcb_ctes_array_assertion2: + "\ (s, s') \ rf_sr; tcb_at' tcb s \ + \ array_assertion (cte_Ptr tcb) + (unat tcbCNodeEntries) (hrs_htd (t_hrs_' (globals s')))" + apply (frule(1) rf_sr_tcb_ctes_array_assertion[where + tcb="tcb_ptr_to_ctcb_ptr t" for t, simplified]) + apply (simp add: ptr_val_tcb_ptr_mask) + done + +lemma array_assertion_abs_tcb_ctes': + "\s s'. (s, s') \ rf_sr \ tcb_at' (ctcb_ptr_to_tcb_ptr (tcb s')) s \ (n s' \ unat tcbCNodeEntries) + \ (x s' = 0 \ array_assertion (cte_Ptr (ptr_val (tcb s') && ~~mask tcbBlockSizeBits)) (n s') (hrs_htd (t_hrs_' (globals s'))))" + "\s s'. (s, s') \ rf_sr \ tcb_at' tcb' s \ (n s' \ unat tcbCNodeEntries) + \ (x s' = 0 \ array_assertion (cte_Ptr tcb') (n s') (hrs_htd (t_hrs_' (globals s'))))" + apply (safe intro!: disjCI2) + apply (drule(1) rf_sr_tcb_ctes_array_assertion' rf_sr_tcb_ctes_array_assertion2 + | erule array_assertion_shrink_right | simp)+ + done + +lemmas array_assertion_abs_tcb_ctes + = array_assertion_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] + +lemma array_assertion_abs_tcb_ctes_add': + "\s s'. (s, s') \ rf_sr \ tcb_at' (ctcb_ptr_to_tcb_ptr (tcb s')) s + \ (n s' \ 0 \ (case strong of True \ n s' + 1 | False \ n s') \ uint tcbCNodeEntries) + \ ptr_add_assertion (cte_Ptr (ptr_val (tcb s') && ~~mask tcbBlockSizeBits)) (n s') + strong (hrs_htd (t_hrs_' (globals s')))" + apply (clarsimp, drule(1) rf_sr_tcb_ctes_array_assertion') + apply (simp add: ptr_add_assertion_positive, rule disjCI2) + apply (erule array_assertion_shrink_right) + apply (cases strong, simp_all add: unat_def del: nat_uint_eq) + done + +lemmas array_assertion_abs_tcb_ctes_add + = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] + +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] + = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] + ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] + ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] + ccorres_move_Guard[OF array_assertion_abs_tcb_ctes_add] + +lemma c_guard_abs_tcb_ctes': + fixes p :: "cte_C ptr" + shows "\s s'. (s, s') \ rf_sr \ tcb_at' (ctcb_ptr_to_tcb_ptr (tcb s')) s + \ (n < ucast tcbCNodeEntries) \ s' \\<^sub>c cte_Ptr (((ptr_val (tcb s') && ~~mask tcbBlockSizeBits) + + n * 2^cteSizeBits))" + apply (clarsimp) + apply (rule c_guard_abs_cte[rule_format], intro conjI, simp_all) + apply (simp add: cte_at'_obj_at', rule disjI2) + apply (frule ptr_val_tcb_ptr_mask) + apply (rule_tac x="n * 2^cteSizeBits" in bexI) + apply (simp add: mask_def) + apply (simp add: word_less_nat_alt tcbCNodeEntries_def tcb_cte_cases_def objBits_defs) + apply (case_tac "unat n", simp_all add: unat_eq_of_nat, rename_tac n_rem) + apply (case_tac "n_rem", simp_all add: unat_eq_of_nat, (rename_tac n_rem)?)+ + done + +lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] + +lemma c_guard_abs_pte: + "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True + \ s' \\<^sub>c (p :: pte_C ptr)" + apply (clarsimp simp: typ_at_to_obj_at_arches) + apply (drule obj_at_ko_at', clarsimp) + apply (erule cmap_relationE1[OF rf_sr_cpte_relation]) + apply (erule ko_at_projectKO_opt) + apply (fastforce intro: typ_heap_simps) + done + +lemmas ccorres_move_c_guard_pte = ccorres_move_c_guards [OF c_guard_abs_pte] + +lemma array_assertion_abs_vspace: + "\s s'. (s, s') \ rf_sr + \ (page_table_at' VSRootPT_T pd s \ gsPTTypes (ksArchState s) pd = Some VSRootPT_T) + \ (n s' \ 2 ^ ptTranslationBits VSRootPT_T \ (x s' \ 0 \ n s' \ 0)) + \ (x s' = 0 \ array_assertion (pte_Ptr pd) (n s') (hrs_htd (t_hrs_' (globals s'))))" + apply (intro allI impI disjCI2, clarsimp) + apply (drule (2) vspace_at_rf_sr, clarsimp) + apply (erule clift_array_assertion_imp; simp) + apply (rule_tac x=0 in exI, simp add: ptTranslationBits_vs_array_len) + done + +lemmas ccorres_move_array_assertion_vspace + = ccorres_move_array_assertions[OF array_assertion_abs_vspace] + +lemma array_assertion_abs_pt: + "\s s'. (s, s') \ rf_sr + \ (page_table_at' NormalPT_T pt s \ gsPTTypes (ksArchState s) pt = Some NormalPT_T) + \ (n s' \ 2 ^ ptTranslationBits NormalPT_T \ (x s' \ 0 \ n s' \ 0)) + \ (x s' = 0 \ array_assertion (pte_Ptr pt) (n s') (hrs_htd (t_hrs_' (globals s'))))" + apply (intro allI impI disjCI2, clarsimp) + apply (drule (2) ptable_at_rf_sr, clarsimp) + apply (erule clift_array_assertion_imp; simp) + apply (rule_tac x=0 in exI, simp add: bit_simps) + done + +lemmas ccorres_move_array_assertion_pt + = ccorres_move_array_assertions[OF array_assertion_abs_pt] + +lemma array_assertion_abs_pt_gen: + "\s s'. (s, s') \ rf_sr + \ (page_table_at' pt_t pt s \ gsPTTypes (ksArchState s) pt = Some pt_t) + \ (n s' \ 2 ^ ptTranslationBits pt_t \ (x s' \ 0 \ n s' \ 0)) + \ (x s' = 0 \ array_assertion (pte_Ptr pt) (n s') (hrs_htd (t_hrs_' (globals s'))))" + apply (intro allI impI disjCI2, clarsimp) + apply (cases pt_t; simp) + apply (drule (2) vspace_at_rf_sr, clarsimp) + apply (erule clift_array_assertion_imp; simp) + apply (rule_tac x=0 in exI, simp add: bit_simps Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) + apply (drule (2) ptable_at_rf_sr, clarsimp) + apply (erule clift_array_assertion_imp; simp) + apply (rule_tac x=0 in exI, simp add: bit_simps) + done + +lemmas ccorres_move_array_assertion_pt_gen + = ccorres_move_array_assertions[OF array_assertion_abs_pt_gen] + +lemma move_c_guard_ap: + "\s s'. (s, s') \ rf_sr \ asid_pool_at' (ptr_val p) s \ True + \ s' \\<^sub>c (p :: asid_pool_C ptr)" + apply (clarsimp simp: typ_at_to_obj_at_arches) + apply (drule obj_at_ko_at', clarsimp) + apply (erule cmap_relationE1 [OF rf_sr_cpspace_asidpool_relation]) + apply (erule ko_at_projectKO_opt) + apply (fastforce intro: typ_heap_simps) + done + +lemmas ccorres_move_c_guard_ap = ccorres_move_c_guards [OF move_c_guard_ap] + +lemma array_assertion_abs_irq: + "\s s'. (s, s') \ rf_sr \ True + \ (n s' \ 2 ^ LENGTH(irq_len) \ (x s' \ 0 \ n s' \ 0)) + \ (x s' = 0 \ array_assertion intStateIRQNode_Ptr (n s') (hrs_htd (t_hrs_' (globals s'))))" + apply (intro allI impI disjCI2) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (erule clift_array_assertion_imp, (simp add: exI[where x=0])+) + done + +lemmas ccorres_move_array_assertion_irq + = ccorres_move_array_assertions [OF array_assertion_abs_irq] + +lemma ccorres_Guard_intStateIRQNode_array_Ptr_Seq: + assumes "ccorres_underlying rf_sr \ r xf arrel axf A C hs a (c;; d)" + shows "ccorres_underlying rf_sr \ r xf arrel axf A C hs a (Guard F {s. s \\<^sub>c intStateIRQNode_array_Ptr} c;; d)" + by (rule ccorres_guard_imp2[OF ccorres_move_Guard_Seq[where P=\ and P'=\, OF _ assms]] + ; simp add: rf_sr_def cstate_relation_def Let_def) + +lemmas ccorres_Guard_intStateIRQNode_array_Ptr = + ccorres_Guard_intStateIRQNode_array_Ptr_Seq[where d=SKIP, simplified ccorres_seq_skip'] + ccorres_Guard_intStateIRQNode_array_Ptr_Seq + +lemma rf_sr_gsCNodes_array_assertion: + "gsCNodes s p = Some n \ (s, s') \ rf_sr + \ array_assertion (cte_Ptr p) (2 ^ n) (hrs_htd (t_hrs_' (globals s')))" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cte_array_relation_array_assertion) + + +lemma move_c_guard_vcpu: + "\s s'. (s, s') \ rf_sr \ vcpu_at' (ptr_val p) s \ True + \ s' \\<^sub>c (p :: vcpu_C ptr)" + apply (clarsimp simp: typ_at_to_obj_at_arches) + apply (drule obj_at_ko_at', clarsimp) + apply (erule cmap_relationE1 [OF cmap_relation_vcpu]) + apply (erule ko_at_projectKO_opt) + apply (fastforce intro: typ_heap_simps) + done + +lemmas ccorres_move_c_guard_vcpu = ccorres_move_c_guards[OF move_c_guard_vcpu] + +lemma ccorres_h_t_valid_armKSGlobalUserVSpace: + "ccorres r xf P P' hs f (f' ;; g') \ + ccorres r xf P P' hs f (Guard C_Guard {s'. s' \\<^sub>c armKSGlobalUserVSpace_Ptr} f';; g')" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_move_c_guards[where P = \]) + apply clarsimp + apply assumption + apply simp + by (clarsimp simp add: rf_sr_def cstate_relation_def Let_def) + +end + +end diff --git a/proof/crefine/AARCH64/Delete_C.thy b/proof/crefine/AARCH64/Delete_C.thy new file mode 100644 index 0000000000..bf1367ce0a --- /dev/null +++ b/proof/crefine/AARCH64/Delete_C.thy @@ -0,0 +1,1043 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Delete_C +imports Finalise_C +begin + +context kernel_m +begin + +lemma ccorres_drop_cutMon: + "ccorres_underlying sr Gamm r xf arrel axf P P' hs f g + \ ccorres_underlying sr Gamm r xf arrel axf P P' hs (cutMon Q f) g" + apply (clarsimp simp: ccorres_underlying_def + cutMon_def fail_def + split: if_split_asm) + apply (subst if_P, simp) + apply fastforce + done + +lemma ccorres_drop_cutMon_bind: + "ccorres_underlying sr Gamm r xf arrel axf P P' hs (f >>= f') g + \ ccorres_underlying sr Gamm r xf arrel axf P P' hs (cutMon Q f >>= f') g" + apply (clarsimp simp: ccorres_underlying_def + cutMon_def fail_def bind_def + split: if_split_asm) + apply (subst if_P, simp)+ + apply fastforce + done + +lemma ccorres_drop_cutMon_bindE: + "ccorres_underlying sr Gamm r xf arrel axf P P' hs (f >>=E f') g + \ ccorres_underlying sr Gamm r xf arrel axf P P' hs (cutMon Q f >>=E f') g" + apply (clarsimp simp: ccorres_underlying_def + cutMon_def fail_def bind_def bindE_def lift_def + split: if_split_asm) + apply (subst if_P, simp)+ + apply fastforce + done + +lemma ccorres_cutMon: + "(\s. Q s \ ccorres_underlying sr Gamm r xf arrel axf P P' hs (cutMon ((=) s) f) g) + \ ccorres_underlying sr Gamm r xf arrel axf P P' hs (cutMon Q f) g" + apply (clarsimp simp: ccorres_underlying_def + cutMon_def fail_def bind_def + split: if_split_asm) + apply (erule meta_allE, drule(1) meta_mp) + apply (drule(1) bspec) + apply (clarsimp simp: fail_def + split: if_split_asm) + apply (subst if_P, assumption)+ + apply fastforce + done + +value_abbreviation zombie_magic "word_bits - cte_level_bits" + +lemma unat_of_nat_zombie_magic[simp]: "unat ((of_nat zombie_magic) :: machine_word) = zombie_magic" + by (clarsimp simp: unat_of_nat64) + +lemma ccap_zombie_radix_less1: + "\ ccap_relation cap ccap; isZombie cap; capAligned cap \ + \ \ isZombieTCB_C (capZombieType_CL (cap_zombie_cap_lift ccap)) + \ capZombieType_CL (cap_zombie_cap_lift ccap) < of_nat zombie_magic" + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap[THEN iffD2]) + apply (clarsimp simp: Let_def capAligned_def + objBits_simps' word_bits_conv word_less_nat_alt + word_le_nat_alt less_mask_eq + split: if_split_asm) + done + +lemmas ccap_zombie_radix_less2 + = order_less_le_trans [OF mp [OF ccap_zombie_radix_less1]] + +lemma ccap_zombie_radix_less3: + "\ ccap_relation cap ccap; isZombie cap; capAligned cap \ + \ get_capZombieBits_CL (cap_zombie_cap_lift ccap) < of_nat zombie_magic" + apply (clarsimp simp: get_capZombieBits_CL_def Let_def + less_mask_eq ccap_zombie_radix_less2 + split: if_split) + done + +lemmas ccap_zombie_radix_less4 + = order_less_le_trans [OF ccap_zombie_radix_less3] + +lemma cap_zombie_cap_get_capZombieNumber_spec: + notes if_cong[cong] + shows + "\cap s. \\ \s. ccap_relation cap \cap \ isZombie cap \ capAligned cap\ + Call cap_zombie_cap_get_capZombieNumber_'proc + {s'. ret__unsigned_long_' s' = of_nat (capZombieNumber cap)}" + apply vcg + apply clarsimp + apply (rule context_conjI, simp add: cap_get_tag_isCap) + apply (frule(2) ccap_zombie_radix_less1) + apply (frule(2) ccap_zombie_radix_less3) + apply (drule(1) cap_get_tag_to_H) + apply clarsimp + apply (rule conjI) + apply unat_arith + apply (fold mask_2pm1) + apply (simp add: get_capZombieBits_CL_def Let_def split: if_split_asm) + apply (subst unat_Suc2) + apply clarsimp + apply (subst less_mask_eq, erule order_less_le_trans) + apply simp+ + done + +lemma cap_zombie_cap_set_capZombieNumber_spec: + "\cap s. \\ \s. ccap_relation cap \cap \ isZombie cap \ capAligned cap + \ unat (n_' s) \ zombieCTEs (capZombieType cap)\ + Call cap_zombie_cap_set_capZombieNumber_'proc + {s'. ccap_relation (capZombieNumber_update (\_. unat (n_' s)) cap) + (ret__struct_cap_C_' s')}" + apply vcg + apply (rule context_conjI, simp add: cap_get_tag_isCap) + apply clarsimp + apply (frule(2) ccap_zombie_radix_less3) + apply (rule conjI, unat_arith) + apply clarsimp + apply (frule(2) ccap_zombie_radix_less1) + apply (clarsimp simp: cap_zombie_cap_lift + ccap_relation_def map_option_Some_eq2 + cap_to_H_def get_capZombieBits_CL_def + split: if_split_asm) + apply (simp add: mask_def word_bw_assocs word_ao_dist) + apply (rule sym, rule less_mask_eq[where n=5, unfolded mask_def, simplified]) + apply unat_arith + apply (clarsimp simp: Let_def mask_2pm1[symmetric]) + apply (subst unat_Suc2, clarsimp)+ + apply (subst less_mask_eq, erule order_less_le_trans, simp)+ + apply (simp add: word_ao_dist word_bw_assocs) + apply (rule sym, rule less_mask_eq) + apply (simp only: word_less_nat_alt) + apply (subst unat_power_lower) + apply (simp add: word_bits_conv) + apply (erule order_le_less_trans) + apply simp + done + +lemma capRemovable_spec: + "\cap s. \\ \s. ccap_relation cap \cap \ (isZombie cap \ cap = NullCap) \ capAligned cap\ + Call capRemovable_'proc + {s'. ret__unsigned_long_' s' = from_bool (capRemovable cap (ptr_val (slot_' s)))}" + supply if_cong[cong] + apply vcg + apply (clarsimp simp: cap_get_tag_isCap(1-8)[THEN trans[OF eq_commute]]) + apply (simp add: capRemovable_def) + apply (clarsimp simp: ccap_zombie_radix_less4) + apply (subst eq_commute, subst from_bool_eq_if) + apply (rule exI, rule conjI, assumption) + apply clarsimp + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap[THEN iffD2]) + apply (case_tac slot) + apply (clarsimp simp: get_capZombiePtr_CL_def Let_def get_capZombieBits_CL_def + isCap_simps unat_eq_0 unat_eq_1 + less_mask_eq ccap_zombie_radix_less2 + split: if_split_asm) + done + +lemma capCyclicZombie_spec: + "\cap s. \\ \s. ccap_relation cap \cap \ isZombie cap \ capAligned cap\ + Call capCyclicZombie_'proc + {s'. ret__unsigned_long_' s' = from_bool (capCyclicZombie cap (ptr_val (slot_' s)))}" + supply if_cong[cong] + apply vcg + apply (clarsimp simp: if_1_0_0 from_bool_0) + apply (frule(1) cap_get_tag_isCap [THEN iffD2], simp) + apply (subst eq_commute, subst from_bool_eq_if) + apply (simp add: ccap_zombie_radix_less4) + apply (case_tac slot, simp) + apply (frule(1) cap_get_tag_to_H) + apply (clarsimp simp: capCyclicZombie_def Let_def + get_capZombieBits_CL_def get_capZombiePtr_CL_def + split: if_split_asm) + apply (auto simp: less_mask_eq ccap_zombie_radix_less2) + done + +lemma case_assertE_to_assert: + "(case cap of + Zombie ptr2 x xa \ + haskell_assertE (P ptr2 x xa) [] + | _ \ returnOk ()) + = liftE (assert (case cap of Zombie ptr2 x xa \ P ptr2 x xa | _ \ True))" + apply (simp add: assertE_def returnOk_liftE assert_def + split: capability.split if_split) + done + +lemma cteDelete_ccorres1: + assumes fs_cc: + "ccorres (cintr \ (\(success, cap) (success', cap'). success' = from_bool success \ ccap_relation cap cap' \ cleanup_info_wf' cap)) + (liftxf errstate finaliseSlot_ret_C.status_C (\v. (success_C v, finaliseSlot_ret_C.cleanupInfo_C v)) + ret__struct_finaliseSlot_ret_C_') + (\s. invs' s \ sch_act_simple s \ (expo \ ex_cte_cap_to' slot s)) + (UNIV \ {s. slot_' s = Ptr slot} \ {s. immediate_' s = from_bool expo}) [] + (cutMon ((=) s) (finaliseSlot slot expo)) (Call finaliseSlot_'proc)" + shows + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_' ) + (\s. invs' s \ sch_act_simple s \ (expo \ ex_cte_cap_to' slot s)) + (UNIV \ {s. slot_' s = Ptr slot} \ {s. exposed_' s = from_bool expo}) [] + (cutMon ((=) s) (cteDelete slot expo)) (Call cteDelete_'proc)" + apply (cinit' lift: slot_' exposed_' cong: call_ignore_cong) + apply (simp add: cteDelete_def split_def cutMon_walk_bindE + del: Collect_const cong: call_ignore_cong) + apply (clarsimp simp del: Collect_const cong: call_ignore_cong) + apply (ctac(no_vcg) add: fs_cc) + apply (rule ccorres_drop_cutMon) + apply (simp add: from_bool_0 whenE_def split_def + Collect_False + del: Collect_const) + apply (rule ccorres_if_lhs) + apply (simp only: imp_conv_disj simp_thms) + apply (simp add: liftE_liftM liftM_def) + apply (ctac(no_vcg) add: emptySlot_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply wp + apply (simp only: imp_conv_disj simp_thms) + apply (simp add: returnOk_def) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply wp + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R) + apply (wp cutMon_validE_drop finaliseSlot_invs) + apply fastforce + apply (auto simp: cintr_def) + done + +lemma zombie_rf_sr_helperE: + "\ cte_wp_at' P p s; (s, s') \ rf_sr; invs' s; + \cte. P cte \ isZombie (cteCap cte); + \cap ccap cte. \ cap = cteCap cte; ccap = h_val + (hrs_mem (t_hrs_' (globals s'))) + (cap_Ptr &(cte_Ptr p\[''cap_C''])); + P cte; ccap_relation cap ccap; capAligned cap; + cap_get_tag ccap = scast cap_zombie_cap; + get_capZombiePtr_CL (cap_zombie_cap_lift ccap) + = capZombiePtr cap; + capZombieType_CL (cap_zombie_cap_lift ccap) + = (case_zombie_type ZombieTCB_C of_nat (capZombieType cap)) \ + \ Q \ + \ Q" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarsimp) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply atomize + apply (simp add: typ_heap_simps) + apply (drule spec, drule(1) mp)+ + apply (clarsimp simp: cap_get_tag_isCap + dest!: ccte_relation_ccap_relation + valid_capAligned) + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap[THEN iffD2]) + apply (clarsimp simp: get_capZombiePtr_CL_def Let_def + get_capZombieBits_CL_def + isZombieTCB_C_def + split: if_split_asm) + apply (simp add: less_mask_eq ccap_zombie_radix_less2 + isZombieTCB_C_def) + done + +lemma of_nat_ZombieTCB_C: + "n < zombie_magic \ of_nat n \ (ZombieTCB_C :: word64)" + apply (drule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply simp + apply (clarsimp simp: ZombieTCB_C_def) + done + +lemma case_zombie_type_map_inj: + "case_zombie_type (ZombieTCB_C :: machine_word) of_nat (capZombieType cap) + = case_zombie_type (ZombieTCB_C :: machine_word) of_nat (capZombieType cap') + \ capAligned cap \ capAligned cap' + \ isZombie cap \ isZombie cap' \ + capZombieType cap = capZombieType cap'" + apply (clarsimp simp: capAligned_def word_bits_conv + objBits_simps' isCap_simps + of_nat_ZombieTCB_C + not_sym [OF of_nat_ZombieTCB_C] + split: zombie_type.split_asm) + apply (subst(asm) word_unat.Abs_inject) + apply (simp add: unats_def)+ + done + +lemma valid_cap_capZombieNumber_unats: + "\ s \' cap; isZombie cap \ + \ capZombieNumber cap \ unats word_bits" + apply (clarsimp simp: valid_cap'_def isCap_simps + split: zombie_type.split_asm) + apply (simp add: unats_def word_bits_def) + apply (clarsimp simp only: unats_def mem_simps) + apply (erule order_le_less_trans) + apply (rule power_strict_increasing) + apply (simp add: word_bits_conv) + apply simp + done + +lemma cteDelete_invs'': + "\invs' and sch_act_simple and (\s. ex \ ex_cte_cap_to' ptr s)\ cteDelete ptr ex \\rv. invs'\" + apply (simp add: cteDelete_def whenE_def split_def) + apply (rule hoare_pre, wp finaliseSlot_invs) + apply (rule hoare_strengthen_postE_R) + apply (unfold validE_R_def) + apply (rule use_spec) + apply (rule spec_valid_conj_liftE1) + apply (rule valid_validE_R, rule finaliseSlot_invs) + apply (rule spec_valid_conj_liftE1) + apply (rule finaliseSlot_removeable) + apply (rule spec_valid_conj_liftE1) + apply (rule finaliseSlot_irqs) + apply (rule finaliseSlot_abort_cases'[folded finaliseSlot_def]) + apply clarsimp + apply clarsimp + done + +lemma ccorres_Cond_rhs_Seq_ret_int: + "\ P \ ccorres rvr xf Q S hs absf (f;;h); + \rv' t t'. ceqv \ ret__int_' rv' t t' (g ;; h) (j rv'); + \ P \ ccorres rvr xf R T hs absf (j 0) \ + \ ccorres rvr xf (\s. (P \ Q s) \ (\ P \ R s)) + {s. (P \ s \ S) \ (\ P \ s \ {s. s \ T \ ret__int_' s = 0})} + hs absf (Cond {s. P} f g ;; h)" + apply (rule ccorres_guard_imp2) + apply (erule ccorres_Cond_rhs_Seq) + apply (erule ccorres_abstract) + apply (rule_tac P="rv' = 0" in ccorres_gen_asm2) + apply simp + apply simp + done + +(* it's a little painful to have to do this from first principles *) +lemma ccorres_cutMon_stateAssert: + "\ Q s \ ccorres_underlying sr Gamm r xf arrel axf P P' hs + (cutMon ((=) s) (a ())) c \ \ + ccorres_underlying sr Gamm r xf arrel axf (\s. Q s \ P s) P' hs + (cutMon ((=) s) (stateAssert Q [] >>= a)) c" + apply (simp add: cutMon_walk_bind) + apply (cases "\ Q s") + apply (simp add: stateAssert_def cutMon_def exec_get assert_def + ccorres_fail' + cong: if_cong[OF eq_commute]) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_drop_cutMon_bind) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_cutMon) + apply (simp add: stateAssert_def exec_get return_def) + apply (wp | simp)+ + done + +lemma valid_Zombie_number_word_bits: + "valid_cap' cap s \ isZombie cap + \ capZombieNumber cap < 2 ^ word_bits" + apply (clarsimp simp: valid_cap'_def isCap_simps) + apply (erule order_le_less_trans) + apply (rule order_le_less_trans[OF zombieCTEs_le]) + apply simp + done + +lemma ccorres_cutMon_locateSlotCap_Zombie: + "\ (capZombiePtr cap + 2 ^ cte_level_bits * n, s) \ fst (locateSlotCap cap n s) + \ ccorres_underlying rf_sr Gamm r xf arrel axf + Q Q' hs + (cutMon ((=) s) (a (capZombiePtr cap + 2 ^ cte_level_bits * n))) c \ + \ ccorres_underlying rf_sr Gamm r xf arrel axf + (Q and valid_cap' cap and (\_. isZombie cap \ n = of_nat (capZombieNumber cap - 1))) + {s. array_assertion (cte_Ptr (capZombiePtr cap)) (capZombieNumber cap - 1) + (hrs_htd (t_hrs_' (globals s))) \ s \ Q'} hs + (cutMon ((=) s) (locateSlotCap cap n >>= a)) c" + apply (simp add: locateSlot_conv in_monad cutMon_walk_bind) + apply (rule ccorres_gen_asm) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_drop_cutMon_bind) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_cutMon) + apply (clarsimp simp: in_monad stateAssert_def) + apply (rule_tac P="\s'. (capZombieType cap \ ZombieTCB \ + (case gsCNodes s' (capUntypedPtr cap) of None \ False + | Some n \ of_nat (capZombieNumber cap - 1) < 2 ^ n))" + in ccorres_cross_over_guard) + apply (clarsimp simp: isCap_simps) + apply assumption + apply (wp | simp)+ + apply (clarsimp simp: isCap_simps stateAssert_def in_monad) + apply (cases "capZombieType cap = ZombieTCB") + apply (clarsimp simp: valid_cap_simps') + apply (drule(1) rf_sr_tcb_ctes_array_assertion[ + where tcb="tcb_ptr_to_ctcb_ptr t" for t, simplified]) + apply (simp add: tcb_cnode_index_defs array_assertion_shrink_right) + apply (clarsimp simp: option.split[where P="\x. x"]) + apply (rule conjI) + apply clarsimp + apply blast + apply (clarsimp dest!: of_nat_less_t2n) + apply (drule(1) rf_sr_gsCNodes_array_assertion) + apply (erule notE, erule array_assertion_shrink_right) + apply (frule valid_Zombie_number_word_bits, simp+) + by (simp add: unat_arith_simps unat_of_nat word_bits_def + valid_cap_simps') + +lemma reduceZombie_ccorres1: + assumes fs_cc: + "\slot. \ capZombieNumber cap \ 0; expo; + (slot, s) \ fst (locateSlotCap cap + (fromIntegral (capZombieNumber cap - 1)) s) \ \ + ccorres (cintr \ (\(success, irqopt) (success', irq'). success' = from_bool success \ ccap_relation irqopt irq' \ cleanup_info_wf' irqopt)) + (liftxf errstate finaliseSlot_ret_C.status_C (\v. (success_C v, finaliseSlot_ret_C.cleanupInfo_C v)) + ret__struct_finaliseSlot_ret_C_') + (\s. invs' s \ sch_act_simple s \ ex_cte_cap_to' slot s) + (UNIV \ {s. slot_' s = Ptr slot} \ {s. immediate_' s = from_bool False}) [] + (cutMon ((=) s) (finaliseSlot slot False)) (Call finaliseSlot_'proc)" + shows + "isZombie cap \ + ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_' ) + (invs' and sch_act_simple and cte_wp_at' (\cte. cteCap cte = cap) slot) + (UNIV \ {s. slot_' s = Ptr slot} \ {s. immediate_' s = from_bool expo}) [] + (cutMon ((=) s) (reduceZombie cap slot expo)) (Call reduceZombie_'proc)" + apply (cinit' lift: slot_' immediate_') + apply (simp add: from_bool_0 del: Collect_const) + apply (rule_tac P="capZombieNumber cap < 2 ^ word_bits" in ccorres_gen_asm) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=ret__unsigned_long_' + and val="capZombiePtr cap" + and R="cte_wp_at' (\cte. cteCap cte = cap) slot and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (erule(2) zombie_rf_sr_helperE) + apply simp + apply (clarsimp simp: ccap_zombie_radix_less4) + apply ceqv + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=n_' + and val="of_nat (capZombieNumber cap)" + and R="cte_wp_at' (\cte. cteCap cte = cap) slot and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (erule(2) zombie_rf_sr_helperE) + apply simp + apply fastforce + apply ceqv + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=type_' + and val="case_zombie_type ZombieTCB_C of_nat (capZombieType cap)" + and R="cte_wp_at' (\cte. cteCap cte = cap) slot and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (erule(2) zombie_rf_sr_helperE) + apply simp + apply clarsimp + apply ceqv + apply (simp add: reduceZombie_def del: Collect_const) + apply (simp only: cutMon_walk_if) + apply (rule ccorres_if_lhs) + apply (simp, rule ccorres_drop_cutMon, rule ccorres_fail) + apply (rule ccorres_if_lhs) + apply (simp add: Let_def Collect_True Collect_False assertE_assert liftE_bindE + del: Collect_const) + apply (rule ccorres_drop_cutMon, rule ccorres_assert) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: liftE_bindE liftM_def case_assertE_to_assert + del: Collect_const) + apply (rule ccorres_symb_exec_l[OF _ getCTE_inv _ empty_fail_getCTE]) + apply (rule ccorres_assert) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (simp add: liftE_liftM liftM_def) + apply (ctac(no_vcg) add: capSwapForDelete_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (simp add: return_def) + apply wp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wp + apply (rule ccorres_if_lhs) + apply (simp add: Let_def liftE_bindE del: Collect_const) + apply (rule ccorres_cutMon_locateSlotCap_Zombie) + apply (simp add: cutMon_walk_bindE Collect_True del: Collect_const) + apply (rule ccorres_rhs_assoc ccorres_move_c_guard_cte ccorres_Guard_Seq)+ + apply csymbr + apply (ctac(no_vcg, no_simp) add: cteDelete_ccorres1) + apply (rule ccorres_guard_imp2) + apply (rule fs_cc, clarsimp+)[1] + apply simp + apply (rule ccorres_drop_cutMon) + apply (simp add: Collect_False del: Collect_const) + apply (rule ccorres_rhs_assoc ccorres_move_c_guard_cte)+ + apply (rule ccorres_symb_exec_l[OF _ getCTE_inv _ empty_fail_getCTE]) + apply (rule_tac F="\rv'. \cp. ccap_relation (cteCap rv) cp + \ rv' = cap_get_tag cp" + and xf'=ret__unsigned_longlong_' + and R="cte_wp_at' ((=) rv) slot" + in ccorres_symb_exec_r_abstract_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp dest!: ccte_relation_ccap_relation simp: typ_heap_simps) + apply fastforce + apply ceqv + apply (clarsimp simp: cap_get_tag_isCap simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply simp + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: Let_def del: Collect_const) + apply (rule ccorres_move_c_guard_cte ccorres_rhs_assoc)+ + apply (rule_tac xf'=ret__unsigned_long_' + and val="capZombiePtr (cteCap rv)" + and R="cte_wp_at' ((=) rv) slot and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (erule(2) zombie_rf_sr_helperE) + apply simp + apply (clarsimp simp: ccap_zombie_radix_less4) + apply ceqv + apply csymbr + apply csymbr + apply (simp only: if_1_0_0 simp_thms) + apply (rule ccorres_Cond_rhs_Seq[rotated]) + apply (simp add: assertE_assert liftE_def) + apply (rule ccorres_assert) + apply (rule ccorres_cond_false_seq | simp)+ + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=ret__unsigned_long_' + and val="of_nat (capZombieNumber (cteCap rv))" + and R="cte_wp_at' ((=) rv) slot and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (erule(2) zombie_rf_sr_helperE) + apply simp + apply fastforce + apply ceqv + apply csymbr + apply (simp only: if_1_0_0 simp_thms) + apply (rule ccorres_Cond_rhs_Seq[rotated]) + apply (rule_tac P="\s. s \' cteCap rv \ s \' cap" in ccorres_gen_asm) + apply clarsimp + apply (subst (asm) word_unat.Abs_inject) + apply (rule valid_cap_capZombieNumber_unats[unfolded word_bits_def]; simp) + apply (rule valid_cap_capZombieNumber_unats[unfolded word_bits_def]; simp) + apply (simp add: assertE_def) + apply (rule ccorres_fail) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case_zombie_type ZombieTCB_C of_nat (capZombieType (cteCap rv))" + and R="cte_wp_at' ((=) rv) slot and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (erule(2) zombie_rf_sr_helperE) + apply simp + subgoal by clarsimp + apply ceqv + apply csymbr + apply (simp only: if_1_0_0 simp_thms) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule_tac P="\s. s \' cap \ s \' cteCap rv" in ccorres_gen_asm) + apply (subgoal_tac "P" for P, subst if_P, assumption) + prefer 2 + apply clarsimp + apply (subst (asm) word_unat.Abs_inject) + apply (rule valid_cap_capZombieNumber_unats[unfolded word_bits_def]; simp) + apply (rule valid_cap_capZombieNumber_unats[unfolded word_bits_def]; simp) + apply clarsimp + apply (drule valid_capAligned)+ + apply (drule(4) case_zombie_type_map_inj) + apply simp + apply (rule ccorres_rhs_assoc)+ + apply (simp del: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (simp add: liftE_def bind_assoc del: Collect_const) + apply (rule ccorres_symb_exec_l [OF _ getCTE_inv _ empty_fail_getCTE]) + apply (rule ccorres_assert) + apply (rule_tac xf'=ret__struct_cap_C_' + and F="\rv'. ccap_relation (capZombieNumber_update (\x. x - 1) cap) rv'" + and R="cte_wp_at' ((=) rv) slot and invs'" + in ccorres_symb_exec_r_abstract_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (erule(2) zombie_rf_sr_helperE, simp) + apply clarsimp + apply (rule exI, rule conjI, assumption, clarsimp) + apply (rule ssubst, rule unat_minus_one) + apply (erule of_nat_neq_0) + apply (drule(1) valid_cap_capZombieNumber_unats) + subgoal by (simp add: unats_def word_bits_def) + apply (rule conjI) + apply (clarsimp simp: isCap_simps valid_cap'_def) + apply (erule order_trans[rotated]) + apply (rule order_trans, rule diff_le_self) + subgoal by (simp add: unat_of_nat) + apply clarsimp + apply (erule_tac P="\cap. ccap_relation cap cap'" for cap' in rsubst) + apply (clarsimp simp: isCap_simps capAligned_def) + apply (drule valid_cap_capZombieNumber_unats | simp)+ + apply (simp add: word_unat.Abs_inverse word_bits_def) + apply ceqv + apply (rule ccorres_move_c_guard_cte) + apply (ctac(no_vcg) add: ccorres_updateCap) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply wp + apply simp + apply (simp add: guard_is_UNIV_def Collect_const_mem) + apply (clarsimp simp: isCap_simps) + apply wp + apply (subst if_not_P) + apply clarsimp + apply (simp add: assertE_assert liftE_def) + apply (rule ccorres_fail) + apply (simp add: guard_is_UNIV_def)+ + apply (rule ccorres_fail) + apply (simp add: guard_is_UNIV_def) + apply (simp add: conj_comms) + apply (wp getCTE_wp) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def cintr_def) + apply vcg + apply (wp cutMon_validE_drop) + apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_strengthen_postE_R) + apply (wp cteDelete_invs'') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (fastforce dest: ctes_of_valid') + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply simp + apply (simp add: guard_is_UNIV_def Collect_const_mem) + apply (clarsimp simp: isCap_simps size_of_def cte_level_bits_def) + apply (simp only: word_bits_def unat_of_nat unat_arith_simps, simp) + apply (simp add: guard_is_UNIV_def)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: isCap_simps) + apply (frule ctes_of_valid', clarsimp+) + apply (frule valid_Zombie_number_word_bits, clarsimp+) + apply (frule(1) ex_Zombie_to2, clarsimp+) + apply (clarsimp simp: cte_level_bits_def) + apply (frule_tac n="v2 - 1" in valid_Zombie_cte_at') + apply (fastforce simp add: valid_cap'_def) + apply (frule_tac n=0 in valid_Zombie_cte_at') + apply (fastforce simp: valid_cap'_def) + apply (clarsimp simp: cte_wp_at_ctes_of size_of_def) + apply (auto simp: cteSizeBits_def) + done + +lemma induction_setup_helper: + "\ \s slot exposed. P s slot exposed \ Q s slot exposed; + \ \s slot exposed. P s slot exposed \ Q s slot exposed \ + \ P s slot exposed \ + \ Q s slot exposed" + by auto + +schematic_goal finaliseSlot_ccorres_induction_helper: + "\s slot exposed. ?P s slot exposed + \ ccorres (cintr \ (\(success, irqopt) (success', irq'). success' = from_bool success \ ccap_relation irqopt irq' \ cleanup_info_wf' irqopt)) + (liftxf errstate finaliseSlot_ret_C.status_C (\v. (success_C v, finaliseSlot_ret_C.cleanupInfo_C v)) + ret__struct_finaliseSlot_ret_C_') + (\s. invs' s \ sch_act_simple s \ (exposed \ ex_cte_cap_to' slot s)) + (UNIV \ {s. slot_' s = Ptr slot} \ {s. immediate_' s = from_bool exposed}) [] + (cutMon ((=) s) (finaliseSlot slot exposed)) (Call finaliseSlot_'proc)" + unfolding finaliseSlot_def + apply (rule ccorres_Call) + apply (rule finaliseSlot_impl[unfolded finaliseSlot_body_def]) + apply (unfold whileAnno_def) + apply (cinitlift slot_' immediate_') + apply safe + apply (rule ccorres_guard_imp2) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule_tac P="\s. invs' s \ sch_act_simple s \ (exposed \ ex_cte_cap_to' slota s)" + in ccorres_inst[where P'=UNIV]) + apply assumption + apply simp + done + +lemma finaliseSlot_ccorres: + notes from_bool_neq_0 [simp del] + shows + "ccorres (cintr \ (\(success, irqopt) (success', irq'). success' = from_bool success \ ccap_relation irqopt irq' \ cleanup_info_wf' irqopt)) + (liftxf errstate finaliseSlot_ret_C.status_C (\v. (success_C v, finaliseSlot_ret_C.cleanupInfo_C v)) + ret__struct_finaliseSlot_ret_C_') + (\s. invs' s \ sch_act_simple s \ (exposed \ ex_cte_cap_to' slot s)) + (UNIV \ {s. slot_' s = Ptr slot} \ {s. immediate_' s = from_bool exposed}) [] + (cutMon ((=) s) (finaliseSlot slot exposed)) (Call finaliseSlot_'proc)" + apply (rule finaliseSlot_ccorres_induction_helper) + apply (induct rule: finaliseSlot'.induct[where ?a0.0=slot and ?a1.0=exposed and ?a2.0=s]) + subgoal premises hyps for slot' expo s' + apply (subst finaliseSlot'.simps) + apply (fold cteDelete_def[unfolded finaliseSlot_def]) + apply (fold reduceZombie_def) + apply (simp only: liftE_bindE cutMon_walk_bind + withoutPreemption_def fun_app_def) + apply (rule ccorres_drop_cutMon_bind) + apply (rule ccorres_symb_exec_l' + [OF _ getCTE_inv getCTE_sp empty_fail_getCTE]) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_cutMon, simp only: cutMon_walk_if) + apply (rule ccorres_symb_exec_r) + apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) + apply (rule_tac xf'=ret__unsigned_longlong_' in ccorres_abstract) + apply ceqv + apply (rule_tac P="(rv' = scast cap_null_cap) = (cteCap rv = NullCap)" + in ccorres_gen_asm2) + apply (rule ccorres_if_lhs) + apply (simp del: Collect_const add: Collect_True Collect_False + ccorres_cond_iffs) + apply (rule ccorres_drop_cutMon) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) + apply (simp add: Collect_True liftE_bindE split_def + ccorres_cond_iffs cutMon_walk_bind + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_drop_cutMon_bind) + apply (clarsimp simp only: from_bool_0) + apply (rule ccorres_rhs_assoc)+ + apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot']) + apply (rule ccorres_cutMon, simp only: cutMon_walk_bind) + apply (rule ccorres_drop_cutMon_bind) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac A="\s. invs' s \ sch_act_simple s \ cte_wp_at' ((=) rv) slot' s + \ (expo \ ex_cte_cap_to' slot' s) + \ (final_matters' (cteCap rv) \ rva = isFinal (cteCap rv) slot' (cteCaps_of s))" + and A'=UNIV + in ccorres_guard_imp2) + apply (ctac(no_vcg) add: finaliseCap_ccorres) + apply (rule ccorres_add_return) + apply (rule_tac r'="\rv rv'. rv' = from_bool (capRemovable (fst rvb) slot')" + and xf'=ret__unsigned_long_' in ccorres_split_nothrow_novcg) + apply (rule_tac P="\s. capAligned (fst rvb) \ (isZombie (fst rvb) \ fst rvb = NullCap)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply auto[1] + apply ceqv + apply (rule ccorres_cutMon) + apply (simp add: cutMon_walk_if from_bool_0 + del: Collect_const + cong: call_ignore_cong) + apply (rule ccorres_if_lhs) + apply simp + apply (rule ccorres_drop_cutMon, + rule ccorres_split_throws) + apply (rule_tac P="\s. cleanup_info_wf' (snd rvb)" + in ccorres_from_vcg_throws[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (clarsimp simp: cleanup_info_wf'_def arch_cleanup_info_wf'_def + split: if_split capability.splits) + apply vcg + apply (simp only: cutMon_walk_if Collect_False ccorres_seq_cond_empty + ccorres_seq_skip) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_if_lhs) + apply (simp only: cutMon_walk_bind withoutPreemption_def + liftE_bindE fun_app_def) + apply (rule ccorres_drop_cutMon_bind, + rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow_novcg) + apply (rule ccorres_guard_imp2[where A=\ and A'=UNIV]) + apply (rule ccorres_updateCap) + apply clarsimp + apply (rule ceqv_refl) + apply csymbr + apply (simp only: if_1_0_0 simp_thms Collect_True + ccorres_seq_cond_univ) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_add_return, + rule_tac r'="\rv rv'. rv' = from_bool True" + and xf'=ret__unsigned_long_' in ccorres_split_nothrow_novcg) + apply (rule_tac P="\s. capAligned (fst rvb)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (auto simp: isCap_simps capCyclicZombie_def)[1] + apply ceqv + apply csymbr + apply (simp add: from_bool_0) + apply (rule ccorres_split_throws) + apply (rule ccorres_drop_cutMon, + rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (drule use_valid [OF _ finaliseCap_cases, OF _ TrueI]) + apply (simp add: irq_opt_relation_def + split: if_split_asm) + apply vcg + apply wp + apply (simp add: guard_is_UNIV_def) + apply wp + apply (simp add: guard_is_UNIV_def) + apply (simp only: liftE_bindE cutMon_walk_bind Let_def + withoutPreemption_def fun_app_def + split_def K_bind_def fst_conv snd_conv) + apply (rule ccorres_drop_cutMon_bind, + rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow_novcg) + apply (rule ccorres_guard_imp2[where A=\ and A'=UNIV]) + apply (rule ccorres_updateCap) + apply clarsimp + apply (rule ceqv_refl) + apply (subgoal_tac "isZombie (fst rvb)") + prefer 2 + apply (drule use_valid [OF _ finaliseCap_cases, OF _ TrueI]) + apply (auto simp add: capRemovable_def)[1] + apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE cutMon_walk_if) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_add_return, + rule_tac r'="\rv rv'. rv' = from_bool False" + and xf'=ret__int_' in ccorres_split_nothrow_novcg) + apply (rule_tac P="\s. capAligned (fst rvb)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply fastforce + apply ceqv + apply (simp only: from_bool_0 simp_thms Collect_False + ccorres_seq_cond_empty ccorres_seq_skip) + apply (ctac (no_vcg, no_simp) add: reduceZombie_ccorres1) + apply (rule ccorres_guard_imp2) + apply (rule finaliseSlot_ccorres_induction_helper) + apply (rule hyps(1), (simp add: in_monad | rule conjI refl)+)[1] + apply simp + apply assumption + apply (rule ccorres_cutMon) + apply (simp only: cutMon_walk_bindE id_apply simp_thms + Collect_False ccorres_seq_cond_empty + ccorres_seq_skip) + apply (rule ccorres_drop_cutMon_bindE) + apply (ctac(no_vcg) add: preemptionPoint_ccorres) + apply (rule ccorres_cutMon) + apply (simp only: id_apply simp_thms + Collect_False ccorres_seq_cond_empty + ccorres_seq_skip) + apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) + apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], + unfolded split_def], + (simp add: in_monad)+) + apply (simp add: from_bool_0) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def cintr_def) + apply vcg + apply (wp preemptionPoint_invR) + apply simp + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def cintr_def) + apply vcg + apply (wp cutMon_validE_drop reduceZombie_invs reduceZombie_sch_act_simple) + apply (wp reduceZombie_cap_to[simplified imp_conv_disj, simplified])+ + apply (simp add: guard_is_UNIV_def) + apply (simp add: conj_comms) + apply (wp make_zombie_invs' updateCap_cte_wp_at_cases + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ + apply (simp add: guard_is_UNIV_def) + apply wp + apply (simp add: guard_is_UNIV_def) + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fin s. invs' s \ sch_act_simple s \ s \' (fst fin) + \ (expo \ ex_cte_cap_to' slot' s) + \ cte_wp_at' (\cte. cteCap cte = cteCap rv) slot' s" + in hoare_vcg_conj_lift) + apply (wp hoare_vcg_disj_lift finaliseCap_invs[where sl=slot'])[1] + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_cte_refs) + apply (rule finaliseCap_replaceable[where slot=slot']) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule disjE[where P="F \ G" for F G]) + apply (clarsimp simp: capRemovable_def cte_wp_at_ctes_of cap_has_cleanup'_def + split: option.split capability.splits) + apply (auto dest!: ctes_of_valid' + simp: valid_cap'_def Kernel_C.maxIRQ_def AARCH64.maxIRQ_def + unat_ucast word_le_nat_alt cleanup_info_wf'_def arch_cleanup_info_wf'_def)[1] + subgoal by (auto dest!: valid_capAligned ctes_of_valid' + simp: isCap_simps final_matters'_def o_def) + apply clarsimp + apply (frule valid_globals_cte_wpD'[rotated], clarsimp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (frule valid_global_refsD_with_objSize, clarsimp) + apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI, (auto)[1]) + apply clarsimp + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap + dest!: ccte_relation_ccap_relation) + done + done + +lemma ccorres_use_cutMon: + "(\s. ccorres rvr xf P P' hs (cutMon ((=) s) f) g) + \ ccorres rvr xf P P' hs f g" + apply (simp add: ccorres_underlying_def + snd_cutMon) + apply (simp add: cutMon_def cong: xstate.case_cong) + apply blast + done + +lemmas cteDelete_ccorres2 = cteDelete_ccorres1 [OF finaliseSlot_ccorres] +lemmas cteDelete_ccorres = ccorres_use_cutMon [OF cteDelete_ccorres2] + +lemma cteRevoke_ccorres1: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple) (UNIV \ {s. slot_' s = cte_Ptr slot}) [] + (cutMon ((=) s) (cteRevoke slot)) (Call cteRevoke_'proc)" + apply (cinit' lift: slot_' simp: whileAnno_def) + apply simp + apply (rule ccorres_inst[where P="invs' and sch_act_simple" and P'=UNIV]) + prefer 2 + apply simp + apply (induct rule: cteRevoke.induct[where ?a0.0=slot and ?a1.0=s]) + subgoal premises hyps for slot' s' + apply (rule ccorres_guard_imp2) + apply (subst cteRevoke.simps[abs_def]) + apply (simp add: liftE_bindE cutMon_walk_bind + del: Collect_const) + apply (rule ccorres_drop_cutMon_bind) + apply (rule ccorres_symb_exec_l' + [OF _ getCTE_inv _ empty_fail_getCTE]) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=ret__unsigned_longlong_' and val="mdbNext (cteMDBNode rv)" + and R="cte_wp_at' ((=) rv) slot'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (simp add: typ_heap_simps) + apply (clarsimp simp: ccte_relation_def map_option_Some_eq2) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (rule iffD1 [OF ccorres_expand_while_iff_Seq2]) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (simp only: if_1_0_0 simp_thms unlessE_def) + apply (rule ccorres_Cond_rhs_Seq[rotated]) + apply simp + apply (rule ccorres_cond_false) + apply (rule ccorres_drop_cutMon) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (rule ccorres_cutMon, simp only: cutMon_walk_if cutMon_walk_bind) + apply (rule ccorres_if_lhs) + apply (rule ccorres_False[where P'=UNIV]) + apply (rule ccorres_drop_cutMon_bind) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'="ret__unsigned_long_'" + and val="from_bool (isMDBParentOf rv rva)" + and R="cte_wp_at' ((=) rv) slot' and invs' + and cte_wp_at' ((=) rva) (mdbNext (cteMDBNode rv))" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule_tac p="slot'" in ctes_of_valid', clarsimp) + apply (frule_tac p="mdbNext m" for m in ctes_of_valid', clarsimp) + apply (drule(1) rf_sr_ctes_of_clift[rotated])+ + apply (clarsimp simp: ccte_relation_def) + apply (auto intro: valid_capAligned)[1] + apply ceqv + apply csymbr + apply (rule ccorres_cutMon) + apply (simp add: whenE_def cutMon_walk_if cutMon_walk_bindE + from_bool_0 if_1_0_0 + del: Collect_const cong: if_cong call_ignore_cong) + apply (rule ccorres_if_lhs) + apply (rule ccorres_cond_true) + apply (rule ccorres_drop_cutMon_bindE) + apply (rule ccorres_rhs_assoc)+ + apply (ctac(no_vcg) add: cteDelete_ccorres) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) + apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) + apply (rule ccorres_drop_cutMon_bindE) + apply (ctac(no_vcg) add: preemptionPoint_ccorres) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) + apply (rule ccorres_cutMon) + apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) + apply (rule hyps; fastforce simp: in_monad) + apply simp + apply (simp, rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE; simp) + apply vcg + apply (wp preemptionPoint_invR) + apply simp + apply simp + apply (simp, rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE; simp) + apply vcg + apply (wp cteDelete_invs' cteDelete_sch_act_simple) + apply (rule ccorres_cond_false) + apply (rule ccorres_drop_cutMon) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (simp add: guard_is_UNIV_def cintr_def Collect_const_mem exception_defs) + apply (simp add: guard_is_UNIV_def) + apply (rule getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def) + apply (drule invs_mdb') + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply (case_tac cte, clarsimp) + apply (fastforce simp: nullMDBNode_def) + done + done + +lemmas cteRevoke_ccorres = ccorres_use_cutMon [OF cteRevoke_ccorres1] + +end + +end diff --git a/proof/crefine/AARCH64/DetWP.thy b/proof/crefine/AARCH64/DetWP.thy new file mode 100644 index 0000000000..222fe22aa5 --- /dev/null +++ b/proof/crefine/AARCH64/DetWP.thy @@ -0,0 +1,158 @@ +(* + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory DetWP +imports "Lib.DetWPLib" "CBaseRefine.Include_C" +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma det_wp_doMachineOp [wp]: + "det_wp (\_. P) f \ det_wp (\_. P) (doMachineOp f)" + apply (simp add: doMachineOp_def split_def) + apply (rule det_wp_pre, wp) + apply (erule det_wp_select_f) + apply wp+ + apply simp + done + +lemma det_wp_loadWordUser [wp]: + "det_wp (pointerInUserData x and K (is_aligned x 3)) (loadWordUser x)" + apply (simp add: loadWordUser_def loadWord_def) + apply (rule det_wp_pre, wp) + apply (rule det_wp_pre, wp) + apply clarsimp + apply assumption + apply wp + apply (clarsimp simp: is_aligned_mask) + done + +declare det_wp_liftM[wp] + +declare det_wp_assert_opt[wp] + +declare det_wp_when[wp] + +declare det_wp_unless[wp] + +declare word_neq_0_conv [simp del] + +lemma det_wp_loadObject_default [wp]: + "det_wp (\s. \obj. projectKO_opt ko = Some (obj::'a) \ + is_aligned p (objBits obj) \ q = p + \ case_option True (\x. 2 ^ (objBits obj) \ x - p) n) + (loadObject_default p q n ko :: ('a::pre_storable) kernel)" + apply (simp add: loadObject_default_def split_def projectKO_def + alignCheck_def alignError_def magnitudeCheck_def + unless_def) + apply (rule det_wp_pre) + apply (wp case_option_wp) + apply (clarsimp simp: is_aligned_mask[symmetric]) + apply simp + done + +lemma det_wp_getTCB [wp]: + "det_wp (tcb_at' t) (getObject t :: tcb kernel)" + supply option.case_cong[cong] + apply (simp add: getObject_def split_def) + apply (rule det_wp_pre) + apply (wp|wpc)+ + apply (clarsimp simp: obj_at'_def objBits_simps cong: conj_cong) + apply (simp add: lookupAround2_known1) + apply (rule ps_clear_lookupAround2, assumption+) + apply simp + apply (erule is_aligned_no_overflow) + apply (simp add: word_bits_def) + done + +lemma det_wp_setObject_other [wp]: + fixes ob :: "'a :: pspace_storable" + assumes x: "updateObject ob = updateObject_default ob" + shows "det_wp (obj_at' (\k::'a. objBits k = objBits ob) ptr) + (setObject ptr ob)" + apply (simp add: setObject_def x split_def updateObject_default_def + magnitudeCheck_def + projectKO_def2 alignCheck_def alignError_def) + apply (rule det_wp_pre) + apply (wp ) + apply (clarsimp simp: is_aligned_mask[symmetric] obj_at'_def objBits_def[symmetric] + project_inject lookupAround2_known1) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_get_word_bits) + apply (subst add_diff_eq[symmetric]) + apply (erule is_aligned_no_wrap') + apply simp + apply simp + apply fastforce + done + +lemma det_wp_setTCB [wp]: + "det_wp (tcb_at' t) (setObject t (v::tcb))" + apply (rule det_wp_pre) + apply (wp|wpc|simp)+ + apply (clarsimp simp: objBits_simps) + done + +lemma det_wp_threadGet [wp]: + "det_wp (tcb_at' t) (threadGet f t)" + apply (simp add: threadGet_def) + apply (rule det_wp_pre, wp) + apply simp + done + +lemma det_wp_threadSet [wp]: + "det_wp (tcb_at' t) (threadSet f t)" + apply (simp add: threadSet_def) + apply (rule det_wp_pre, wp) + apply simp + done + +lemma det_wp_asUser [wp]: + "det f \ det_wp (tcb_at' t) (asUser t f)" + apply (simp add: asUser_def split_def) + apply (rule det_wp_pre) + apply wp + apply (drule det_wp_det) + apply (erule det_wp_select_f) + apply wp+ + apply (rule_tac Q="\_. tcb_at' t" in hoare_post_imp) + apply simp + apply wp + apply simp + done + +(* FIXME move into Refine somewhere *) +lemma wordSize_def': + "wordSize = 8" + unfolding wordSize_def wordBits_def + by (simp add: word_size) + +lemma det_wp_getMRs: + "det_wp (tcb_at' thread and case_option \ valid_ipc_buffer_ptr' buffer) (getMRs thread buffer mi)" + apply (clarsimp simp: getMRs_def) + apply (rule det_wp_pre) + apply (wp det_mapM det_getRegister order_refl det_wp_mapM) + apply (simp add: word_size) + apply (wp asUser_inv mapM_wp' getRegister_inv) + apply clarsimp + apply (rule conjI) + apply (simp add: pointerInUserData_def wordSize_def' word_size) + apply (erule valid_ipc_buffer_ptr'D2[unfolded word_size_def, simplified]) + apply (rule word_mult_less_mono1) + apply (erule order_le_less_trans) + apply (simp add: msgMaxLength_def max_ipc_words) + apply simp + apply (simp add: max_ipc_words) + apply (simp add: is_aligned_mult_triv2 [where n = 3, simplified] word_bits_conv word_size_bits_def) + apply (erule valid_ipc_buffer_ptr_aligned_word_size_bits[simplified word_size_bits_def]) + apply (simp add: wordSize_def' is_aligned_mult_triv2 [where n = 3, simplified] word_bits_conv) + done + +end + +end diff --git a/proof/crefine/AARCH64/Detype_C.thy b/proof/crefine/AARCH64/Detype_C.thy new file mode 100644 index 0000000000..6e7d6055fb --- /dev/null +++ b/proof/crefine/AARCH64/Detype_C.thy @@ -0,0 +1,1967 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Detype_C +imports Ctac_lemmas_C TcbQueue_C +begin + +lemma typ_clear_region_out: + "x \ {p..+2 ^ bits} \ typ_clear_region p bits d x = d x" + unfolding typ_clear_region_def + by simp + +lemma typ_bytes_region_out: + "x \ {p..+2 ^ bits} \ typ_region_bytes p bits d x = d x" + unfolding typ_region_bytes_def + by simp + +lemma h_t_valid_ptr_clear_region: + fixes p :: "'a :: c_type ptr" + shows "typ_clear_region ptr bits hp,g \\<^sub>t p = + ({ptr_val p ..+ size_of (TYPE ('a))} \ {ptr ..+ 2 ^ bits} = {} \ hp,g \\<^sub>t p)" + unfolding h_t_valid_def + apply (clarsimp simp: valid_footprint_def Let_def) + apply (rule iffI) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: disjoint_iff_not_equal) + apply (drule intvlD) + apply (clarsimp simp: size_of_def) + apply (drule spec, drule (1) mp) + apply (clarsimp simp: typ_clear_region_def) + apply clarsimp + apply (drule spec, drule (1) mp) + apply (clarsimp simp: typ_clear_region_def split: if_split_asm) + apply clarsimp + apply (drule spec, drule (1) mp) + apply (subgoal_tac "ptr_val p + of_nat y \ {ptr..+2 ^ bits}") + apply (simp add: typ_clear_region_out) + apply clarsimp + apply (drule intvlD) + apply (clarsimp simp: disjoint_iff_not_equal ) + apply (drule_tac x = "ptr_val p + of_nat y" in bspec) + apply (rule intvlI) + apply (simp add: size_of_def) + apply (drule_tac x = "ptr + of_nat k" in bspec) + apply (erule intvlI) + apply simp + done + +lemma map_of_le: + "map_le (map_of xs) m \ distinct (map fst xs) \ \(x, v) \ set xs. m x = Some v" + apply (induct xs) + apply simp + apply clarsimp + apply (clarsimp simp: map_le_def dom_map_of_conv_image_fst) + apply (drule(1) bspec, simp) + apply (simp(no_asm_use) split: if_split_asm) + apply (fastforce simp: image_def) + apply simp + done + +lemma list_map_le_singleton: + "map_le (list_map xs) [n \ x] = (xs = [] \ n = 0 \ xs = [x])" + apply (simp add: list_map_def) + apply (rule iffI) + apply (drule map_of_le) + apply simp + apply (cases xs, simp_all add: list_map_def upt_conv_Cons + split: if_split_asm del: upt.simps) + apply (case_tac list, simp_all add: upt_conv_Cons del: upt.simps) + apply auto + done + +lemma neq_types_not_typ_slice_eq: + "\ s \ t \ \ typ_slice_t s k \ [(t, v)]" + using ladder_set_self[where s=s and n=k] + by clarsimp + +lemma valid_footprint_typ_region_bytes: + assumes neq_byte: "td \ typ_uinfo_t TYPE (word8)" + shows "valid_footprint (typ_region_bytes ptr bits hp) p td = + ({p ..+ size_td td} \ {ptr ..+ 2 ^ bits} = {} \ valid_footprint hp p td)" + apply (clarsimp simp: valid_footprint_def Let_def) + apply (rule iffI) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: disjoint_iff_not_equal) + apply (drule intvlD) + apply (clarsimp simp: size_of_def) + apply (drule spec, drule (1) mp) + apply (clarsimp simp: typ_region_bytes_def list_map_le_singleton neq_byte + neq_types_not_typ_slice_eq) + apply clarsimp + apply (drule spec, drule (1) mp) + apply (clarsimp simp: typ_region_bytes_def list_map_le_singleton neq_byte + neq_types_not_typ_slice_eq + split: if_split_asm) + apply clarsimp + apply (drule spec, drule (1) mp) + apply (subgoal_tac "p + of_nat y \ {ptr..+2 ^ bits}") + apply (simp add: typ_bytes_region_out) + apply clarsimp + apply (drule intvlD) + apply (clarsimp simp: disjoint_iff_not_equal ) + apply (drule_tac x = "p + of_nat y" in bspec) + apply (rule intvlI) + apply (simp add: size_of_def) + apply (drule_tac x = "ptr + of_nat k" in bspec) + apply (erule intvlI) + apply simp + done + +lemma h_t_valid_typ_region_bytes: + fixes p :: "'a :: c_type ptr" + assumes neq_byte: "typ_uinfo_t TYPE('a) \ typ_uinfo_t TYPE (word8)" + shows "typ_region_bytes ptr bits hp,g \\<^sub>t p = + ({ptr_val p ..+ size_of (TYPE ('a))} \ {ptr ..+ 2 ^ bits} = {} \ hp,g \\<^sub>t p)" + unfolding h_t_valid_def + by (simp add: valid_footprint_typ_region_bytes[OF neq_byte] + size_of_def) + +lemma heap_list_s_heap_list': + fixes p :: "'a :: c_type ptr" + shows "hrs_htd hp,\ \\<^sub>t p \ + heap_list_s (lift_state hp) (size_of TYPE('a)) (ptr_val p) = + heap_list (hrs_mem hp) (size_of TYPE('a)) (ptr_val p)" + apply (cases hp) + apply (simp add: hrs_htd_def hrs_mem_def heap_list_s_heap_list) + done + +lemma lift_t_typ_clear_region: + assumes doms: "\x :: 'a :: mem_type ptr. \ hrs_htd hp,g \\<^sub>t x; x \ - (Ptr ` {ptr ..+2 ^ bits}) \ + \ {ptr_val x..+size_of TYPE('a)} \ {ptr..+2 ^ bits} = {}" + shows "(lift_t g (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'a :: mem_type typ_heap) = + lift_t g hp |` (- Ptr ` {ptr ..+2 ^ bits})" + apply (rule ext) + apply (case_tac "({ptr_val x..+size_of TYPE('a)} \ {ptr..+2 ^ bits} = {} \ hrs_htd hp,g \\<^sub>t x)") + apply (clarsimp simp add: lift_t_def lift_typ_heap_if s_valid_def h_t_valid_ptr_clear_region) + apply (subgoal_tac "x \ - Ptr ` {ptr..+2 ^ bits}") + apply clarsimp + apply (subst heap_list_s_heap_list') + apply (clarsimp simp add: hrs_htd_update h_t_valid_ptr_clear_region) + apply (erule h_t_valid_taut) + apply (subst heap_list_s_heap_list') + apply (clarsimp elim!: h_t_valid_taut) + apply simp + apply clarsimp + apply (drule (1) orthD2) + apply (erule contrapos_np, rule intvl_self) + apply (simp add: size_of_def wf_size_desc_gt) + apply (simp add: lift_t_def lift_typ_heap_if s_valid_def h_t_valid_ptr_clear_region del: disj_not1 split del: if_split) + apply (subst if_not_P) + apply simp + apply (case_tac "x \ (- Ptr ` {ptr..+2 ^ bits})") + apply (simp del: disj_not1) + apply (erule contrapos_pn) + apply simp + apply (erule doms) + apply simp + apply simp + done + +lemma image_Ptr: + "Ptr ` S = {x. ptr_val x \ S}" + apply (safe, simp_all) + apply (case_tac x, simp_all) + done + +lemma lift_t_typ_region_bytes: + assumes doms: "\x :: 'a :: mem_type ptr. \ hrs_htd hp,g \\<^sub>t x; x \ - (Ptr ` {ptr ..+2 ^ bits}) \ + \ {ptr_val x..+size_of TYPE('a)} \ {ptr..+2 ^ bits} = {}" + assumes neq_byte: "typ_uinfo_t TYPE('a) \ typ_uinfo_t TYPE (word8)" + shows "(lift_t g (hrs_htd_update (typ_region_bytes ptr bits) hp) :: 'a :: mem_type typ_heap) = + lift_t g hp |` (- Ptr ` {ptr ..+2 ^ bits})" + apply (rule ext) + apply (case_tac "({ptr_val x..+size_of TYPE('a)} \ {ptr..+2 ^ bits} = {} \ hrs_htd hp,g \\<^sub>t x)") + apply (clarsimp simp add: lift_t_def lift_typ_heap_if s_valid_def + h_t_valid_typ_region_bytes neq_byte) + apply (subgoal_tac "x \ - Ptr ` {ptr..+2 ^ bits}") + apply clarsimp + apply (subst heap_list_s_heap_list') + apply (clarsimp simp add: hrs_htd_update h_t_valid_typ_region_bytes neq_byte) + apply (erule h_t_valid_taut) + apply (subst heap_list_s_heap_list') + apply (clarsimp elim!: h_t_valid_taut) + apply simp + apply (simp add: image_Ptr) + apply (cut_tac p=x in mem_type_self) + apply blast + apply (simp add: lift_t_def lift_typ_heap_if s_valid_def neq_byte + h_t_valid_typ_region_bytes del: disj_not1 split del: if_split) + apply (clarsimp simp add: restrict_map_def) + apply (blast dest: doms) + done + +context kernel +begin + +lemma cmap_relation_h_t_valid: + fixes p :: "'a :: c_type ptr" + shows "\cmap_relation am (cslift s' :: 'a typ_heap) f rel; s' \\<^sub>c p; + \v v' x. \f x = p; am x = Some v; cslift s' p = Some v'; rel v v'\ \ R \ \ R" + unfolding cmap_relation_def + apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (drule equalityD2) + apply (drule (1) subsetD [OF _ domI]) + apply clarsimp + apply (drule (1) bspec [OF _ domI]) + apply clarsimp + done + +lemma valid_untyped_capE: + assumes vuc: "s \' UntypedCap d ptr bits idx" + and rl: "\is_aligned ptr bits; valid_untyped' d ptr bits idx s; ptr \ 0; bits < word_bits \ \ P" + shows P +proof (rule rl) + from vuc show al: "is_aligned ptr bits" and vu: "valid_untyped' d ptr bits idx s" and p0: "ptr \ 0" + unfolding valid_cap'_def capAligned_def by auto + + from al p0 show wb: "bits < word_bits" + by (clarsimp elim!: is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) +qed + +(* FIXME: move *) +lemma valid_untyped_pspace_no_overlap': + assumes vuc: "s \' UntypedCap d ptr bits idx" + and idx: "idx< 2^ bits" + and psp_al: "pspace_aligned' s" "pspace_distinct' s" + shows "pspace_no_overlap' (ptr + of_nat idx) bits s" + +proof - + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + from vuc have al: "is_aligned ptr bits" and vu: "valid_untyped' d ptr bits idx s" and p0: "ptr \ 0" + and wb: "bits < word_bits" + by (auto elim!: valid_untyped_capE) + + from vuc idx + have [simp]: "(ptr + (of_nat idx) && ~~ mask bits) = ptr" + apply - + apply (rule is_aligned_add_helper[THEN conjunct2]) + apply (clarsimp simp:valid_cap'_def capAligned_def)+ + apply (rule word_of_nat_less) + apply simp + done + + show "pspace_no_overlap' (ptr + of_nat idx) bits s" + using vuc idx psp_al + apply - + apply (clarsimp simp:valid_cap'_def valid_untyped'_def pspace_no_overlap'_def) + apply (drule_tac x = x in spec) + apply (frule(1) pspace_alignedD') + apply (frule(1) pspace_distinctD') + apply (clarsimp simp:ko_wp_at'_def obj_range'_def p_assoc_help) + done + qed + +lemma cmap_relation_disjoint: + fixes rel :: "'a :: pspace_storable \ 'b :: mem_type \ bool" and x :: "'b :: mem_type ptr" + assumes vuc: "s \' UntypedCap d ptr bits idx" + and invs: "invs' s" + and cm: "cmap_relation (proj \\<^sub>m (ksPSpace s)) (cslift s') Ptr rel" + and ht: "s' \\<^sub>c x" + and tp: "\ko v. proj ko = Some v \ koType TYPE('a) = koTypeOf ko" + and xv: "x \ Ptr ` {ptr..+2 ^ bits}" + and sof: "size_td (typ_info_t TYPE('b)) \ 2 ^ objBits (undefined :: 'a)" + shows "{ptr_val x..+size_of TYPE('b)} \ {ptr..+2 ^ bits} = {}" +proof - + from vuc have al: "is_aligned ptr bits" + and vu: "valid_untyped' d ptr bits idx s" and p0: "ptr \ 0" + and wb: "bits < word_bits" + and [simp]: "(ptr && ~~ mask bits) = ptr" + by (auto elim!: valid_untyped_capE) + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + let ?ran = "{ptr..ptr + 2 ^ bits - 1}" + let ?s = "(s\ksPSpace := (ksPSpace s) |` (- ?ran)\)" + from cm ht obtain ko and v :: 'a where ks: "ksPSpace s (ptr_val x) = Some ko" and po: "proj ko = Some v" + apply (rule cmap_relation_h_t_valid) + apply (clarsimp simp: map_comp_Some_iff) + done + + let ?oran = "{ptr_val x .. ptr_val x + 2 ^ objBitsKO ko - 1}" + let ?ran' = "{ptr..+2 ^ bits}" + let ?oran' = "{ptr_val x..+2 ^ objBitsKO ko}" + + have ran' [simp]: "?ran' = ?ran" using al wb upto_intvl_eq by blast + + have oran' [simp]: "?oran' = ?oran" + proof (rule upto_intvl_eq) + from invs have "pspace_aligned' s" .. + with ks show "is_aligned (ptr_val x) (objBitsKO ko)" .. + qed + + from xv have "ptr_val x \ (- ?ran)" apply (simp only: ran' Compl_iff) + apply (erule contrapos_nn) + apply (erule image_eqI [rotated]) + apply simp + done + + hence "ksPSpace ?s (ptr_val x) = Some ko" using ks by auto + hence "?oran \ ?ran = {}" + proof (rule pspace_no_overlapD'[where p = ptr and bits = bits,simplified]) + from invs have "valid_pspace' s" .. + with vu al show "pspace_no_overlap' ptr bits ?s" using valid_untyped_no_overlap + by (clarsimp simp: mask_def add_diff_eq) + qed + + hence "?oran' \ ?ran' = {}" by simp + thus "{ptr_val x..+size_of TYPE('b)} \ ?ran' = {}" + proof (rule disjoint_subset [rotated]) + have "objBits (undefined :: 'a) = objBitsKO ko" using po + apply (simp add: objBits_def) + apply (rule objBits_type) + apply (subst iffD1 [OF project_koType]) + apply (fastforce simp add: project_inject) + apply (erule tp) + done + thus "{ptr_val x..+size_of TYPE('b)} \ ?oran'" using sof + apply - + apply (rule intvl_start_le) + apply (simp add: size_of_def) + done + qed +qed + +lemma vut_subseteq: +notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex +shows "\s \' UntypedCap d ptr bits idx;idx < 2 ^ bits\ + \ Ptr ` {ptr + of_nat idx..ptr + 2 ^ bits - 1} \ Ptr ` {ptr ..+ 2^bits}" (is "\?a1;?a2\ \ ?b \ ?c") + apply (subgoal_tac "?c = Ptr ` {ptr ..ptr + 2 ^ bits - 1}") + apply (simp add:inj_image_subset_iff) + apply (clarsimp simp:blah valid_cap'_def capAligned_def) + apply (rule is_aligned_no_wrap') + apply simp+ + apply (rule word_of_nat_less) + apply simp + apply (rule arg_cong[where f = "\x. Ptr ` x"]) + apply (rule upto_intvl_eq) + apply (clarsimp simp:valid_cap'_def capAligned_def)+ + done + +(* CLAG : IpcCancel_C *) +lemma tcb_ptr_to_ctcb_ptr_imageD: + "x \ tcb_ptr_to_ctcb_ptr ` S \ ctcb_ptr_to_tcb_ptr x \ S" + apply (erule imageE) + apply simp + done + +lemma ctcb_ptr_to_tcb_ptr_imageI: + "ctcb_ptr_to_tcb_ptr x \ S \ x \ tcb_ptr_to_ctcb_ptr ` S" + apply (drule imageI [where f = tcb_ptr_to_ctcb_ptr]) + apply simp + done + +lemma aligned_ranges_subset_or_disjointE [consumes 2, case_names disjoint subset1 subset2]: + "\is_aligned p n; is_aligned p' n'; + {p..p + 2 ^ n - 1} \ {p'..p' + 2 ^ n' - 1} = {} \ P; + {p..p + 2 ^ n - 1} \ {p'..p' + 2 ^ n' - 1} \ P; + {p'..p' + 2 ^ n' - 1} \ {p..p + 2 ^ n - 1} \ P \ \ P" + apply (drule (1) aligned_ranges_subset_or_disjoint) + apply blast + done + +lemma valid_untyped_cap_ko_at_disjoint: + assumes vu: "s \' UntypedCap d ptr bits idx" + and koat: "ko_at' ko x s" + and pv: "{x .. x + 2 ^ objBits ko - 1} \ {ptr .. ptr + 2 ^ bits - 1} \ {}" + shows "{x .. x + 2 ^ objBits ko - 1} \ {ptr .. ptr + 2 ^ bits - 1}" + +proof - + from vu have "is_aligned ptr bits" + unfolding valid_cap'_def capAligned_def by simp + + moreover from koat have "is_aligned x (objBits ko)" + by (rule obj_atE') (simp add: objBits_def project_inject) + + ultimately show ?thesis + proof (cases rule: aligned_ranges_subset_or_disjointE) + case disjoint + thus ?thesis using pv by auto + next + case subset2 thus ?thesis . + next + case subset1 + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + have "\ ko_wp_at' (\ko. {ptr..ptr + 2 ^ bits - 1} \ obj_range' x ko) x s" + using vu unfolding valid_cap'_def valid_untyped'_def + apply clarsimp + apply (drule_tac x = x in spec) + apply (clarsimp simp: ko_wp_at'_def mask_def add_diff_eq) + done + + with koat have "\ {ptr..ptr + 2 ^ bits - 1} \ {x..x + 2 ^ objBits ko - 1}" + apply - + apply (erule obj_atE')+ + apply (simp add: ko_wp_at'_def obj_range'_def not_less objBits_def project_inject + mask_def add_diff_eq) + done + + thus ?thesis using subset1 + by (simp add: psubset_eq del: Icc_eq_Icc) + qed +qed + +(* FIXME x64: tcb bits change *) +lemma tcb_ptr_to_ctcb_ptr_in_range: + fixes tcb :: tcb + assumes tat: "ko_at' tcb x s" + shows "ptr_val (tcb_ptr_to_ctcb_ptr x) \ {x..x + 2 ^ objBits tcb - 1}" +proof - + from tat have al: "is_aligned x tcbBlockSizeBits" by (clarsimp elim!: obj_atE' simp: objBits_simps') + hence "x \ x + 2 ^ tcbBlockSizeBits - 1" + by (rule is_aligned_no_overflow) + + moreover from al have "x \ x + 2 ^ ctcb_size_bits" + by (rule is_aligned_no_wrap') (simp add: ctcb_size_bits_def objBits_defs) + + ultimately show ?thesis + unfolding tcb_ptr_to_ctcb_ptr_def + by (simp add: ctcb_offset_defs objBits_simps' add.commute) + (subst word_plus_mono_right; simp) +qed + +lemma tcb_ptr_to_ctcb_ptr_in_range': + fixes tcb :: tcb + assumes al: "is_aligned (ctcb_ptr_to_tcb_ptr x) tcbBlockSizeBits" + shows "{ptr_val x ..+ size_of TYPE (tcb_C)} + \ {ctcb_ptr_to_tcb_ptr x..+2 ^ objBits tcb}" +proof - + from al have "ctcb_ptr_to_tcb_ptr x \ ctcb_ptr_to_tcb_ptr x + 2 ^ tcbBlockSizeBits - 1" + by (rule is_aligned_no_overflow) + + moreover from al have "ctcb_ptr_to_tcb_ptr x \ ctcb_ptr_to_tcb_ptr x + 2 ^ ctcb_size_bits" + by (rule is_aligned_no_wrap') (simp add: ctcb_size_bits_def objBits_defs) + + moreover from al have "is_aligned (ptr_val x) ctcb_size_bits" by (rule ctcb_ptr_to_tcb_ptr_aligned) + moreover from al have "{ctcb_ptr_to_tcb_ptr x..+2 ^ objBits tcb} = {ctcb_ptr_to_tcb_ptr x.. ctcb_ptr_to_tcb_ptr x + 2 ^ objBits tcb - 1}" + apply - + apply (rule upto_intvl_eq) + apply (simp add: objBits_simps) + done + + ultimately show ?thesis + unfolding ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs + apply - + apply (clarsimp simp: field_simps objBits_simps' size_of_def) + apply (drule intvlD) + apply clarsimp + apply (rule conjI) + apply (erule order_trans, erule is_aligned_no_wrap') + apply (rule of_nat_power) + apply simp + apply simp + apply (rule word_plus_mono_right) + apply (simp add: word_le_nat_alt unat_of_nat) + apply (erule is_aligned_no_wrap') + apply simp + done +qed + +lemma valid_untyped_cap_ctcb_member: + fixes tcb :: tcb + assumes vu: "s \' UntypedCap d ptr bits idx" + and koat: "ko_at' tcb x s" + and pv: "ptr_val (tcb_ptr_to_ctcb_ptr x) \ {ptr .. ptr + 2 ^ bits - 1}" + shows "x \ {ptr .. ptr + 2 ^ bits - 1}" + using vu +proof - + from vu koat have "{x..x + 2 ^ objBits tcb - 1} \ {ptr..ptr + 2 ^ bits - 1}" + proof (rule valid_untyped_cap_ko_at_disjoint) + from koat have "ptr_val (tcb_ptr_to_ctcb_ptr x) \ {x..x + 2 ^ objBits tcb - 1}" + by (rule tcb_ptr_to_ctcb_ptr_in_range) + thus "{x..x + 2 ^ objBits tcb - 1} \ {ptr..ptr + 2 ^ bits - 1} \ {}" using pv + apply - + apply rule + apply (drule (1) orthD1) + apply simp + done + qed + + thus ?thesis + proof (rule set_mp) + from koat have "is_aligned x (objBits tcb)" by (clarsimp elim!: obj_atE' simp: objBits_simps) + thus "x \ {x..x + 2 ^ objBits tcb - 1}" + apply (rule base_member_set [simplified field_simps]) + apply (simp add: objBits_simps' word_bits_conv) + done + qed +qed + +lemma ko_at_is_aligned' [intro?]: + "ko_at' ko p s \ is_aligned p (objBits ko)" + apply (erule obj_atE') + apply (simp add: objBits_def project_inject) + done + +lemma cmap_relation_disjoint_tcb: + fixes x :: "tcb_C ptr" + assumes vuc: "s \' UntypedCap d ptr bits idx" + and invs: "invs' s" + and cm: "cmap_relation (projectKO_opt \\<^sub>m (ksPSpace s)) (cslift s') tcb_ptr_to_ctcb_ptr ctcb_relation" + and ht: "s' \\<^sub>c x" + and xv: "x \ Ptr ` {ptr..+2 ^ bits}" + shows "{ptr_val x..+size_of TYPE(tcb_C)} \ {ptr..+2 ^ bits} = {}" +proof - + let ?ran = "{ptr..ptr + 2 ^ bits - 1}" + let ?s = "(s\ksPSpace := (ksPSpace s) |` (- ?ran)\)" + from cm ht invs obtain tcb :: tcb where koat: "ko_at' tcb (ctcb_ptr_to_tcb_ptr x) s" + apply - + apply (erule (1) cmap_relation_h_t_valid) + apply (drule (1) map_to_ko_atI') + apply clarsimp + done + + let ?oran = "{ctcb_ptr_to_tcb_ptr x .. ctcb_ptr_to_tcb_ptr x + 2 ^ objBits tcb - 1}" + let ?ran' = "{ptr..+2 ^ bits}" + let ?oran' = "{ctcb_ptr_to_tcb_ptr x..+2 ^ objBits tcb}" + + from vuc have al: "is_aligned ptr bits" and vu: "valid_untyped' d ptr bits idx s" and p0: "ptr \ 0" + and wb: "bits < word_bits" + by (auto elim!: valid_untyped_capE) + + have ran' [simp]: "?ran' = ?ran" using al wb upto_intvl_eq by blast + + have oran' [simp]: "?oran' = ?oran" + proof (rule upto_intvl_eq) + from koat show "is_aligned (ctcb_ptr_to_tcb_ptr x) (objBits tcb)" .. + qed + + show ?thesis + proof (rule disjoint_subset) + from xv koat have "\ ?oran \ ?ran" + apply - + apply (erule contrapos_nn) + apply (drule tcb_ptr_to_ctcb_ptr_in_range) + apply (rule image_eqI [where x = "ptr_val x"]) + apply simp + apply (drule (1) subsetD) + apply simp + done + + thus "{ctcb_ptr_to_tcb_ptr x..+2 ^ objBits tcb} \ {ptr..+2 ^ bits} = {}" + apply (rule contrapos_np) + apply (rule valid_untyped_cap_ko_at_disjoint [OF vuc koat]) + apply simp + done + + from koat show "{ptr_val x..+size_of TYPE(tcb_C)} \ {ctcb_ptr_to_tcb_ptr x..+2 ^ objBits tcb}" + by (metis tcb_ptr_to_ctcb_ptr_in_range' tcb_aligned' obj_at'_weakenE) + qed +qed + +lemma ctes_of_is_aligned: + fixes s :: "kernel_state" + assumes ks: "ctes_of s p = Some cte" + shows "is_aligned p (objBits cte)" +proof - + have "cte_wp_at' ((=) cte) p s" using ks by (clarsimp simp: cte_wp_at_ctes_of) + thus ?thesis + apply (simp add: cte_wp_at_cases' objBits_simps' cte_level_bits_def) + apply (erule disjE) + apply simp + apply clarsimp + supply cteSizeBits_def[simp] + apply (drule_tac y = n in aligned_add_aligned [where m = cte_level_bits, simplified cte_level_bits_def]) + apply (simp add: tcb_cte_cases_def is_aligned_def split: if_split_asm) + apply (simp add: word_bits_conv) + apply simp + done +qed + +lemma cte_wp_at_casesE' [consumes 1, case_names cte tcb]: + "\cte_wp_at' P p s; + \cte. \ ksPSpace s p = Some (KOCTE cte); is_aligned p cte_level_bits; P cte; ps_clear p cteSizeBits s \ \ R; + \n tcb getF setF. \ + ksPSpace s (p - n) = Some (KOTCB tcb); + is_aligned (p - n) tcbBlockSizeBits; + tcb_cte_cases n = Some (getF, setF); + P (getF tcb); ps_clear (p - n) tcbBlockSizeBits s\ \ R \ \ R" + by (fastforce simp: cte_wp_at_cases') + + +(* FIXME: MOVE *) +lemma tcb_cte_cases_in_range3: + assumes tc: "tcb_cte_cases (y - x) = Some v" + and al: "is_aligned x tcbBlockSizeBits" + shows "y + 2 ^ cteSizeBits - 1 \ x + 2 ^ tcbBlockSizeBits - 1" +proof - + note [simp] = objBits_defs ctcb_size_bits_def + from tc obtain q where yq: "y = x + q" and qv: "q \ 2 ^ ctcb_size_bits - 1" + unfolding tcb_cte_cases_def + by (simp add: diff_eq_eq split: if_split_asm) + + have "q + (2 ^ cteSizeBits - 1) \ (2 ^ ctcb_size_bits - 1) + (2 ^ cteSizeBits - 1)" using qv + by (rule word_plus_mcs_3) simp + also have "\ \ 2 ^ tcbBlockSizeBits - 1" by simp + finally have "x + (q + (2 ^ cteSizeBits - 1)) \ x + (2 ^ tcbBlockSizeBits - 1)" + apply (rule word_plus_mono_right) + apply (rule is_aligned_no_overflow' [OF al]) + done + + thus ?thesis using yq by (simp add: field_simps) +qed + + +lemma tcb_cte_in_range: + "\ ksPSpace s p = Some (KOTCB tcb); is_aligned p tcbBlockSizeBits; + tcb_cte_cases n = Some (getF, setF) \ + \ {p + n.. (p + n) + 2 ^ objBits (cte :: cte) - 1} \ {p .. p + 2 ^ objBits tcb - 1}" + apply (rule range_subsetI) + apply (erule (1) tcb_cte_cases_in_range1 [where y = "p + n" and x = p, simplified]) + apply (frule (1) tcb_cte_cases_in_range3 [where y = "p + n" and x = p, simplified]) + apply (simp add: objBits_simps) + done + +lemma tcb_cte_cases_aligned: + "\is_aligned p tcbBlockSizeBits; tcb_cte_cases n = Some (getF, setF)\ + \ is_aligned (p + n) (objBits (cte :: cte))" + apply (erule aligned_add_aligned) + apply (simp add: tcb_cte_cases_def is_aligned_def objBits_simps' split: if_split_asm) + apply (simp add: objBits_simps') + done + +lemma tcb_cte_in_range': + "\ ksPSpace s p = Some (KOTCB tcb); is_aligned p tcbBlockSizeBits; + tcb_cte_cases n = Some (getF, setF) \ + \ {p + n..+2 ^ objBits (cte :: cte)} \ {p ..+ 2 ^ objBits tcb}" + apply (subst upto_intvl_eq) + apply (erule (1) tcb_cte_cases_aligned) + apply (simp add: objBits_def) + apply (subst upto_intvl_eq) + apply (simp add: objBits_simps) + apply (simp add: objBits_def) + apply (erule (2) tcb_cte_in_range[unfolded objBits_def, simplified]) + done + +(* clagged from above :( Were I smarter or if I cared more I could probably factor out more \*) +lemma cmap_relation_disjoint_cte: + assumes vuc: "s \' UntypedCap d ptr bits idx" + and invs: "invs' s" + and cm: "cmap_relation (ctes_of s) (cslift s') Ptr ccte_relation" + and ht: "s' \\<^sub>c (x :: cte_C ptr)" + and xv: "x \ Ptr ` {ptr..+2 ^ bits}" + shows "{ptr_val x..+size_of TYPE(cte_C)} \ {ptr..+2 ^ bits} = {}" +proof - + from vuc have al: "is_aligned ptr bits" and vu: "valid_untyped' d ptr bits idx s" and p0: "ptr \ 0" + and wb: "bits < word_bits" and [simp]: "(ptr && ~~ mask bits) = ptr" + by (auto elim!: valid_untyped_capE) + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + let ?ran = "{ptr..ptr + 2 ^ bits - 1}" + let ?s = "(s\ksPSpace := (ksPSpace s) |` (- ?ran)\)" + from cm ht obtain cte where ks: "ctes_of s (ptr_val x) = Some cte" + apply (rule cmap_relation_h_t_valid) + apply (clarsimp simp: map_comp_Some_iff) + done + + let ?oran = "{ptr_val x .. ptr_val x + 2 ^ objBits cte - 1}" + + let ?ran' = "{ptr..+2 ^ bits}" + let ?oran' = "{ptr_val x..+2 ^ objBits cte}" + + have ran' [simp]: "?ran' = ?ran" using al wb upto_intvl_eq by auto + + have oran' [simp]: "?oran' = ?oran" + proof (rule upto_intvl_eq) + from ks show "is_aligned (ptr_val x) (objBits cte)" + by (rule ctes_of_is_aligned) + qed + + from xv have px: "ptr_val x \ (- ?ran)" + apply (simp only: ran' Compl_iff) + apply (erule contrapos_nn) + apply (erule image_eqI [rotated]) + apply simp + done + + from ks have "cte_wp_at' ((=) cte) (ptr_val x) s" by (clarsimp simp: cte_wp_at_ctes_of) + thus ?thesis + proof (cases rule: cte_wp_at_casesE') + case (cte cte') + + hence "ksPSpace ?s (ptr_val x) = Some (injectKOS cte)" using px by simp + hence "?oran \ ?ran = {}" + unfolding objBits_def + proof (rule pspace_no_overlapD'[where p = ptr and bits = bits,simplified]) + from invs have "valid_pspace' s" .. + with vu al show "pspace_no_overlap' ptr bits ?s" using valid_untyped_no_overlap + by (clarsimp simp: mask_def add_diff_eq) + qed + hence "?oran' \ ?ran' = {}" by simp + thus ?thesis by (simp add: objBits_simps' size_of_def) + next + case (tcb n tcb _ _) + + hence koat: "ko_at' tcb (ptr_val x - n) s" + apply - + apply (erule obj_atI', simp_all add: objBits_simps) + done + + let ?tran = "{ptr_val x - n .. (ptr_val x - n) + 2 ^ objBits tcb - 1}" + have ot: "?oran \ ?tran" + by (rule tcb_cte_in_range[where p = "ptr_val x - n" and n = "n", + simplified diff_add_cancel]) fact+ + + also + from xv have "\ \ \ ?ran" + proof (rule contrapos_nn) + assume "?tran \ ?ran" + with ot have "?oran \ ?ran" by (rule order_trans) + hence "ptr_val x \ ?ran" + proof (rule subsetD) + from tcb have "is_aligned (ptr_val x) (objBits cte)" + apply - + apply (drule (1) tcb_cte_cases_aligned) + apply simp + done + hence "ptr_val x \ ptr_val x + 2 ^ objBits cte - 1" + by (rule is_aligned_no_overflow) + thus "ptr_val x \ ?oran" by (rule first_in_uptoD) + qed + thus "x \ Ptr ` {ptr..+2 ^ bits}" + unfolding ran' + by (rule image_eqI [where x = "ptr_val x", rotated]) simp + qed + + hence "?tran \ ?ran' = {}" + apply (rule contrapos_np) + apply (rule valid_untyped_cap_ko_at_disjoint [OF vuc koat]) + apply simp + done + + finally have "{ptr_val x..+2 ^ objBits cte} \ ?ran' = {}" + using ot unfolding oran' + by blast + thus ?thesis by (simp add: objBits_simps' size_of_def) + qed +qed + +lemma cmap_relation_disjoint_user_data: + fixes x :: "user_data_C ptr" + assumes vuc: "s \' UntypedCap d ptr bits idx" + and invs: "invs' s" + and cm: "cmap_relation (heap_to_user_data (ksPSpace s) (underlying_memory (ksMachineState s))) (cslift s') Ptr cuser_user_data_relation" + and ht: "s' \\<^sub>c x" + and xv: "x \ Ptr ` {ptr..+2 ^ bits}" + shows "{ptr_val x..+size_of TYPE(user_data_C)} \ {ptr..+2 ^ bits} = {}" +proof - + from vuc have al: "is_aligned ptr bits" and vu: "valid_untyped' d ptr bits idx s" and p0: "ptr \ 0" + and wb: "bits < word_bits" and [simp]:"ptr && ~~ mask bits = ptr" + by (auto elim!: valid_untyped_capE) + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + let ?ran = "{ptr..ptr + 2 ^ bits - 1}" + let ?s = "(s\ksPSpace := (ksPSpace s) |` (- ?ran)\)" + from cm ht have ks: "ksPSpace s (ptr_val x) = Some KOUserData" + apply (rule cmap_relation_h_t_valid) + apply (clarsimp simp: map_comp_Some_iff heap_to_user_data_def Let_def projectKO_opts_defs) + apply (case_tac k') + apply simp_all + done + + let ?oran = "{ptr_val x .. ptr_val x + 2 ^ objBitsKO KOUserData - 1}" + + let ?ran' = "{ptr..+2 ^ bits}" + let ?oran' = "{ptr_val x..+2 ^ objBitsKO KOUserData}" + + have ran' [simp]: "?ran' = ?ran" using al wb upto_intvl_eq by auto + + have oran' [simp]: "?oran' = ?oran" + proof (rule upto_intvl_eq) + from invs have "pspace_aligned' s" .. + with ks show "is_aligned (ptr_val x) (objBitsKO KOUserData)" .. + qed + + from xv have "ptr_val x \ (- ?ran)" + apply (simp only: ran' Compl_iff) + apply (erule contrapos_nn) + apply (erule image_eqI [rotated]) + apply simp + done + + hence "ksPSpace ?s (ptr_val x) = Some KOUserData" using ks by simp + hence "?oran \ ?ran = {}" + proof (rule pspace_no_overlapD'[where p = ptr and bits = bits,simplified]) + from invs have "valid_pspace' s" .. + with vu al show "pspace_no_overlap' ptr bits ?s" using valid_untyped_no_overlap + by (clarsimp simp: mask_def add_diff_eq) + qed + + hence "?oran' \ ?ran' = {}" by simp + thus "{ptr_val x..+size_of TYPE(user_data_C)} \ ?ran' = {}" + proof (rule disjoint_subset [rotated]) + show "{ptr_val x..+size_of TYPE(user_data_C)} \ ?oran'" + apply - + apply (rule intvl_start_le) + apply (simp add: size_of_def objBits_simps pageBits_def) + done + qed +qed + +lemma cmap_relation_disjoint_device_data: + fixes x :: "user_data_device_C ptr" + assumes vuc: "s \' UntypedCap d ptr bits idx" + and invs: "invs' s" + and cm: "cmap_relation (heap_to_device_data (ksPSpace s) (underlying_memory (ksMachineState s))) (cslift s') Ptr cuser_user_data_device_relation" + and ht: "s' \\<^sub>c x" + and xv: "x \ Ptr ` {ptr..+2 ^ bits}" + shows "{ptr_val x..+size_of TYPE(user_data_device_C)} \ {ptr..+2 ^ bits} = {}" +proof - + from vuc have al: "is_aligned ptr bits" and vu: "valid_untyped' d ptr bits idx s" and p0: "ptr \ 0" + and wb: "bits < word_bits" and [simp]:"ptr && ~~ mask bits = ptr" + by (auto elim!: valid_untyped_capE) + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + let ?ran = "{ptr..ptr + 2 ^ bits - 1}" + let ?s = "(s\ksPSpace := (ksPSpace s) |` (- ?ran)\)" + from cm ht have ks: "ksPSpace s (ptr_val x) = Some KOUserDataDevice" + apply (rule cmap_relation_h_t_valid) + apply (clarsimp simp: map_comp_Some_iff heap_to_device_data_def Let_def projectKO_opts_defs) + apply (case_tac k') + apply simp_all + done + + let ?oran = "{ptr_val x .. ptr_val x + 2 ^ objBitsKO KOUserDataDevice - 1}" + + let ?ran' = "{ptr..+2 ^ bits}" + let ?oran' = "{ptr_val x..+2 ^ objBitsKO KOUserDataDevice}" + + have ran' [simp]: "?ran' = ?ran" using al wb upto_intvl_eq by auto + + have oran' [simp]: "?oran' = ?oran" + proof (rule upto_intvl_eq) + from invs have "pspace_aligned' s" .. + with ks show "is_aligned (ptr_val x) (objBitsKO KOUserDataDevice)" .. + qed + + from xv have "ptr_val x \ (- ?ran)" + apply (simp only: ran' Compl_iff) + apply (erule contrapos_nn) + apply (erule image_eqI [rotated]) + apply simp + done + + hence "ksPSpace ?s (ptr_val x) = Some KOUserDataDevice" using ks by simp + hence "?oran \ ?ran = {}" + proof (rule pspace_no_overlapD'[where p = ptr and bits = bits,simplified]) + from invs have "valid_pspace' s" .. + with vu al show "pspace_no_overlap' ptr bits ?s" using valid_untyped_no_overlap + by (clarsimp simp: mask_def add_diff_eq) + qed + + hence "?oran' \ ?ran' = {}" by simp + thus "{ptr_val x..+size_of TYPE(user_data_device_C)} \ ?ran' = {}" + proof (rule disjoint_subset [rotated]) + show "{ptr_val x..+size_of TYPE(user_data_device_C)} \ ?oran'" + apply - + apply (rule intvl_start_le) + apply (simp add: size_of_def objBits_simps pageBits_def) + done + qed +qed + + +lemma tcb_queue_relation_live_restrict: + assumes vuc: "s \' capability.UntypedCap d ptr bits idx" + and rel: "\t \ set q. tcb_at' t s" + and live: "\t \ set q. ko_wp_at' live' t s" + and rl: "\(p :: machine_word) P. ko_wp_at' P p s \ (\ko. P ko \ live' ko) \ p \ {ptr..ptr + 2 ^ bits - 1}" + shows "tcb_queue_relation' getNext getPrev (cm |` (- Ptr ` {ptr..+2 ^ bits})) q cend chead = + tcb_queue_relation' getNext getPrev cm q cend chead" +proof (rule tcb_queue_relation'_cong [OF refl refl refl]) + fix p + assume "p \ tcb_ptr_to_ctcb_ptr ` set q" + + hence pin: "ctcb_ptr_to_tcb_ptr p \ set q" by (rule tcb_ptr_to_ctcb_ptr_imageD) + + with rel have "tcb_at' (ctcb_ptr_to_tcb_ptr p) s" .. + then obtain tcb :: tcb where koat: "ko_at' tcb (ctcb_ptr_to_tcb_ptr p) s" + by (clarsimp simp: obj_at'_real_def ko_wp_at'_def) + + from live pin have "ko_wp_at' live' (ctcb_ptr_to_tcb_ptr p) s" .. + hence notin: "(ctcb_ptr_to_tcb_ptr p) \ {ptr..ptr + 2 ^ bits - 1}" + by (fastforce intro!: rl [rule_format]) + + from vuc have al: "is_aligned ptr bits" and wb: "bits < word_bits" + by (auto elim!: valid_untyped_capE) + + hence ran': " {ptr..+2 ^ bits} = {ptr..ptr + 2 ^ bits - 1}" by (simp add: upto_intvl_eq) + + hence "p \ - Ptr ` {ptr..+2 ^ bits}" using vuc koat notin + apply - + apply (erule contrapos_np) + apply (erule (1) valid_untyped_cap_ctcb_member) + apply fastforce + done + + thus "(cm |` (- Ptr ` {ptr..+2 ^ bits})) p = cm p" by simp +qed + +fun + epQ :: "endpoint \ machine_word list" + where + "epQ IdleEP = []" + | "epQ (RecvEP ts) = ts" + | "epQ (SendEP ts) = ts" + +lemma ep_queue_live: + assumes invs: "invs' s" + and koat: "ko_at' ep p s" + shows "\t \ set (epQ ep). ko_wp_at' live' t s" +proof + fix t + assume tin: "t \ set (epQ ep)" + + from invs koat tin show "ko_wp_at' live' t s" + apply - + apply (drule sym_refs_ko_atD') + apply clarsimp + apply (cases ep) + by (auto elim!: ko_wp_at'_weakenE [OF _ refs_of_live']) +qed + +fun + ntfnQ :: "ntfn \ machine_word list" + where + "ntfnQ (WaitingNtfn ts) = ts" + | "ntfnQ _ = []" + +lemma ntfn_queue_live: + assumes invs: "invs' s" + and koat: "ko_at' ntfn p s" + shows "\t \ set (ntfnQ (ntfnObj ntfn)). ko_wp_at' live' t s" +proof + fix t + assume tin: "t \ set (ntfnQ (ntfnObj ntfn))" + + from invs koat tin show "ko_wp_at' live' t s" + apply - + apply (drule sym_refs_ko_atD') + apply clarsimp + apply (cases "ntfnObj ntfn") + apply (auto elim!: ko_wp_at'_weakenE [OF _ refs_of_live'] + dest!: bspec) + done +qed + +lemma cendpoint_relation_restrict: + assumes vuc: "s \' capability.UntypedCap d ptr bits idx" + and invs: "invs' s" + and rl: "\(p :: machine_word) P. ko_wp_at' P p s \ (\ko. P ko \ live' ko) \ p \ {ptr..ptr + 2 ^ bits - 1}" + and meps: "map_to_eps (ksPSpace s) p = Some ep" + shows "cendpoint_relation (cslift s' |` (- Ptr ` {ptr..+2 ^ bits})) ep b = cendpoint_relation (cslift s') ep b" +proof - + from invs have "valid_objs' s" .. + with meps have vep: "valid_ep' ep s" + apply - + apply (clarsimp simp add: map_comp_Some_iff) + apply (erule (1) valid_objsE') + apply (simp add: valid_obj'_def) + done + + from meps have koat: "ko_at' ep p s" by (rule map_to_ko_atI') fact+ + + show ?thesis + proof (cases ep) + case (RecvEP ts) + + from vep RecvEP have tats: "\t \ set ts. tcb_at' t s" + by (simp add: valid_ep'_def) + + have tlive: "\t\set ts. ko_wp_at' live' t s" using RecvEP invs koat + apply - + apply (drule (1) ep_queue_live) + apply simp + done + + show ?thesis using RecvEP + unfolding cendpoint_relation_def Let_def + by (simp add: tcb_queue_relation_live_restrict [OF vuc tats tlive rl]) + next + case (SendEP ts) + + from vep SendEP have tats: "\t \ set ts. tcb_at' t s" + by (simp add: valid_ep'_def) + + have tlive: "\t\set ts. ko_wp_at' live' t s" using SendEP invs koat + apply - + apply (drule (1) ep_queue_live) + apply simp + done + + show ?thesis using SendEP + unfolding cendpoint_relation_def Let_def + by (simp add: tcb_queue_relation_live_restrict [OF vuc tats tlive rl]) + next + case IdleEP + thus ?thesis unfolding cendpoint_relation_def Let_def by simp + qed +qed + +lemma cnotification_relation_restrict: + assumes vuc: "s \' capability.UntypedCap d ptr bits idx" + and invs: "invs' s" + and rl: "\(p :: machine_word) P. ko_wp_at' P p s \ (\ko. P ko \ live' ko) \ p \ {ptr..ptr + 2 ^ bits - 1}" + and meps: "map_to_ntfns (ksPSpace s) p = Some ntfn" + shows "cnotification_relation (cslift s' |` (- Ptr ` {ptr..+2 ^ bits})) ntfn b = cnotification_relation (cslift s') ntfn b" +proof - + from invs have "valid_objs' s" .. + with meps have vep: "valid_ntfn' ntfn s" + apply - + apply (clarsimp simp add: map_comp_Some_iff) + apply (erule (1) valid_objsE') + apply (simp add: valid_obj'_def) + done + + from meps have koat: "ko_at' ntfn p s" by (rule map_to_ko_atI') fact+ + + show ?thesis + proof (cases "ntfnObj ntfn") + case (WaitingNtfn ts) + + with vep have tats: "\t \ set ts. tcb_at' t s" + by (simp add: valid_ntfn'_def) + + have tlive: "\t\set ts. ko_wp_at' live' t s" using WaitingNtfn invs koat + apply - + apply (drule (1) ntfn_queue_live) + apply simp + done + + show ?thesis using WaitingNtfn + unfolding cnotification_relation_def Let_def + by (simp add: tcb_queue_relation_live_restrict [OF vuc tats tlive rl]) + qed (simp_all add: cnotification_relation_def Let_def) +qed + +declare bij_Ptr[simp] + +lemma surj_tcb_ptr_to_ctcb_ptr [simp]: + "surj tcb_ptr_to_ctcb_ptr" + by (rule surjI [where f = "ctcb_ptr_to_tcb_ptr"], simp) + +lemma bij_tcb_ptr_to_ctcb_ptr [simp]: + "bij tcb_ptr_to_ctcb_ptr" by (simp add: bijI) + + +lemma inj_ctcb_ptr_to_tcb_ptr [simp]: + "inj ctcb_ptr_to_tcb_ptr" + apply (rule injI) + apply (simp add: ctcb_ptr_to_tcb_ptr_def) + done + +lemma surj_ctcb_ptr_to_tcb_ptr [simp]: + "surj ctcb_ptr_to_tcb_ptr" + by (rule surjI [where f = "tcb_ptr_to_ctcb_ptr"], simp) + +lemma bij_ctcb_ptr_to_tcb_ptr [simp]: + "bij ctcb_ptr_to_tcb_ptr" by (simp add: bijI) + +lemma cmap_relation_restrict_both: + "\ cmap_relation am cm f rel; bij f\ \ cmap_relation (am |` (- S)) (cm |` (- f ` S)) f rel" + unfolding cmap_relation_def + apply (rule conjI) + apply (clarsimp simp: image_Int bij_image_Compl_eq bij_def) + apply (rule ballI) + apply (clarsimp simp: image_iff2 bij_def) + done + +lemma cmap_relation_restrict_both_proj: + "\ cmap_relation (projectKO_opt \\<^sub>m am) cm f rel; bij f\ + \ cmap_relation (projectKO_opt \\<^sub>m (am |` (- S))) (cm |` (- f ` S)) f rel" + unfolding cmap_relation_def + apply (rule conjI) + apply (rule equalityI) + apply rule + apply (clarsimp simp: map_comp_restrict_map_Some_iff image_iff2 bij_def) + apply (erule (1) cmap_domE1) + apply simp + apply (clarsimp simp: image_Int bij_image_Compl_eq bij_def) + apply (erule (1) cmap_domE2) + apply (clarsimp simp: image_Int bij_image_Compl_eq bij_def map_comp_restrict_map_Some_iff intro!: imageI) + apply clarsimp + apply (subst restrict_in) + apply (clarsimp simp add: image_iff map_comp_restrict_map_Some_iff inj_eq bij_def) + apply (clarsimp simp add: map_comp_restrict_map_Some_iff) + apply (drule (1) bspec [OF _ domI]) + apply simp + done + +declare not_snd_assert[simp] + +lemma ccorres_stateAssert_fwd: + "ccorres r xf (P and R) P' hs b c \ ccorres r xf P P' hs (stateAssert R vs >>= (\_. b)) c" + apply (rule ccorresI') + apply (simp add: stateAssert_def bind_assoc) + apply (drule not_snd_bindD) + apply (fastforce simp add: in_monad) + apply clarsimp + apply (frule not_snd_bindI1) + apply simp + apply (erule (1) ccorresE) + apply simp + apply assumption + apply assumption + apply assumption + apply (fastforce simp: in_monad') + done + +(* FIXME: generalise to above *) +lemma tcb_ptr_to_ctcb_ptr_comp: + "tcb_ptr_to_ctcb_ptr = Ptr o (\p. p + ctcb_offset)" + apply (rule ext) + apply (simp add: tcb_ptr_to_ctcb_ptr_def) + done + +lemma tcb_ptr_to_ctcb_ptr_to_Ptr: + "tcb_ptr_to_ctcb_ptr ` {p..+b} = Ptr ` {p + ctcb_offset..+b}" + apply (simp add: tcb_ptr_to_ctcb_ptr_comp image_comp [symmetric]) + apply (rule equalityI) + apply clarsimp + apply (rule imageI) + apply (drule intvlD) + apply clarsimp + apply (subgoal_tac "p + ctcb_offset + of_nat k \ {p + ctcb_offset..+b}") + apply (simp add: field_simps) + apply (erule intvlI) + apply clarsimp + apply (drule intvlD) + apply clarsimp + apply (rule image_eqI) + apply simp + apply (erule intvlI) + done + + +lemma valid_untyped_cap_ctcb_member': + fixes tcb :: tcb + shows "\s \' capability.UntypedCap d ptr bits idx; ko_at' tcb x s; + ptr_val (tcb_ptr_to_ctcb_ptr x) \ {ptr..+2 ^ bits}\ + \ x \ {ptr..+ 2 ^ bits}" + apply (rule valid_untyped_capE, assumption) + apply (simp only: upto_intvl_eq) + apply (erule (2) valid_untyped_cap_ctcb_member) + done + +lemma cpspace_tcb_relation_address_subset: + assumes vuc: "s \' capability.UntypedCap d ptr bits idx" + and invs: "invs' s" + and ctcbrel: "cpspace_tcb_relation (ksPSpace s) (t_hrs_' (globals s'))" + shows "cmap_relation (map_to_tcbs (ksPSpace s |` (- {ptr..+2 ^ bits - unat ctcb_offset}))) + (cslift s' |` (- tcb_ptr_to_ctcb_ptr ` {ptr..+2 ^ bits - unat ctcb_offset})) tcb_ptr_to_ctcb_ptr + ctcb_relation + = cmap_relation (map_to_tcbs (ksPSpace s |` (- {ptr..+2 ^ bits}))) + (cslift s' |` (- Ptr ` {ptr..+2 ^ bits})) tcb_ptr_to_ctcb_ptr + ctcb_relation" (is "cmap_relation ?am ?cm tcb_ptr_to_ctcb_ptr ctcb_relation + = cmap_relation ?am' ?cm' tcb_ptr_to_ctcb_ptr ctcb_relation") +proof (rule cmap_relation_cong) + from vuc have al: "is_aligned ptr bits" and wb: "bits < word_bits" by (auto elim: valid_untyped_capE) + + have r1: "\x tcb. \ (map_to_tcbs (ksPSpace s) x) = Some tcb; x \ {ptr..+2 ^ bits}\ + \ x \ {ptr..+2 ^ bits - unat ctcb_offset}" + proof (subst intvl_aligned_top [where 'a=machine_word_len, folded word_bits_def, OF _ al _ _ wb]) + fix x tcb + assume mtcb: "map_to_tcbs (ksPSpace s) x = Some tcb" and xin: "x \ {ptr..+2 ^ bits}" + + from mtcb invs have koat: "ko_at' tcb x s" + by (fastforce simp: map_comp_Some_iff intro: aligned_distinct_obj_atI') + + thus "is_aligned x (objBits tcb)" + by (clarsimp elim!: obj_atE' simp: objBits_def) + + (* FIXME: generalise *) + with xin koat show "objBits tcb \ bits" using wb + apply - + apply (frule is_aligned_no_overflow) + apply (drule valid_untyped_cap_ko_at_disjoint [OF vuc]) + apply (erule contrapos_pn) + apply (subst upto_intvl_eq [OF al]) + apply (erule orthD1) + apply simp + apply (drule (1) range_subsetD) + apply clarsimp + apply (drule (1) word_sub_mono2 [where b = "(2 :: machine_word) ^ objBits tcb - 1" + and d = "2 ^ bits - 1", simplified field_simps]) + apply (subst field_simps [symmetric], subst olen_add_eqv [symmetric]) + apply (simp add: field_simps) + apply (subst field_simps [symmetric], subst olen_add_eqv [symmetric]) + apply (rule is_aligned_no_overflow' [OF al]) + apply (subgoal_tac "(2 :: machine_word) ^ objBits tcb \ 0") + apply (simp add: word_le_nat_alt unat_minus_one le_diff_iff word_bits_def) + apply (simp add: objBits_simps) + done + + show "unat ctcb_offset < 2 ^ objBits tcb" + by (fastforce simp add: ctcb_offset_defs objBits_simps' project_inject) + + show "x \ {ptr..+2 ^ bits}" by fact + qed + + show "dom ?am = dom ?am'" + apply (rule dom_eqI) + apply (clarsimp dest!: r1 simp: map_comp_restrict_map_Some_iff) + apply (clarsimp dest!: intvl_mem_weaken simp: map_comp_restrict_map_Some_iff) + done + + let ?ran = "{ptr..+2 ^ bits}" + let ?small_ran = "{ptr..+2 ^ bits - unat ctcb_offset}" + + show "dom ?cm = dom ?cm'" + proof (rule dom_eqI) + fix x y + assume "?cm' x = Some y" + hence cl: "cslift s' x = Some y" and xni: "x \ Ptr ` ?ran" + by (auto simp: restrict_map_Some_iff) + + with ctcbrel obtain x' tcb where mt: "map_to_tcbs (ksPSpace s) x' = Some tcb" + and xv: "x = tcb_ptr_to_ctcb_ptr x'" + by (fastforce elim!: cmap_relation_h_t_valid simp add: h_t_valid_clift_Some_iff) + + have "(cslift s' |` (- tcb_ptr_to_ctcb_ptr ` ?small_ran)) x = Some y" + proof (subst restrict_in) + show "x \ - tcb_ptr_to_ctcb_ptr ` ?small_ran" using xni + proof (rule contrapos_np) + assume "x \ - tcb_ptr_to_ctcb_ptr ` ?small_ran" + hence "x \ tcb_ptr_to_ctcb_ptr ` ?small_ran" by simp + hence "x \ Ptr ` {ptr + ctcb_offset ..+ 2 ^ bits - unat ctcb_offset}" by (simp add: tcb_ptr_to_ctcb_ptr_to_Ptr) + thus "x \ Ptr ` ?ran" + by (clarsimp intro!: imageI elim!: intvl_plus_sub_offset) + qed + qed fact + thus "\y. (cslift s' |` (- tcb_ptr_to_ctcb_ptr ` ?small_ran)) x = Some y" .. + next + fix x y + assume "?cm x = Some y" + hence cl: "cslift s' x = Some y" and xni: "x \ tcb_ptr_to_ctcb_ptr ` ?small_ran" + by (auto simp: restrict_map_Some_iff) + + with ctcbrel obtain x' tcb where mt: "map_to_tcbs (ksPSpace s) x' = Some tcb" and xv: "x = tcb_ptr_to_ctcb_ptr x'" + by (fastforce elim!: cmap_relation_h_t_valid simp add: h_t_valid_clift_Some_iff) + + from mt invs have koat: "ko_at' tcb x' s" by (rule map_to_ko_atI') + + have "(cslift s' |` (- Ptr ` ?ran)) x = Some y" + proof (subst restrict_in) + show "x \ - Ptr ` ?ran" using xni + apply (rule contrapos_np) + apply (simp add: xv) + apply (rule imageI) + apply (rule r1 [OF mt]) + apply (rule valid_untyped_cap_ctcb_member' [OF vuc koat]) + apply (erule imageE) + apply simp + done + qed fact + thus "\y. (cslift s' |` (- Ptr ` ?ran)) x = Some y" .. + qed +qed (clarsimp simp: map_comp_Some_iff restrict_map_Some_iff) + +lemma heap_to_user_data_restrict: + "heap_to_user_data (mp |` S) bhp = (heap_to_user_data mp bhp |` S)" + unfolding heap_to_user_data_def + apply (rule ext) + apply (case_tac "p \ S") + apply (simp_all add: Let_def map_comp_def split: option.splits) + done + +lemma heap_to_device_data_restrict: + "heap_to_device_data (mp |` S) bhp = (heap_to_device_data mp bhp |` S)" + unfolding heap_to_device_data_def + apply (rule ext) + apply (case_tac "p \ S") + apply (simp_all add: Let_def map_comp_def split: option.splits) + done + +lemma ccorres_stateAssert_after: + assumes "ccorres r xf (P and (\s. (\(rv,s') \ fst (f s). R s'))) P' hs f c" + shows "ccorres r xf P P' hs (do _ \ f; stateAssert R vs od) c" using assms + apply (clarsimp simp: ccorres_underlying_def split: xstate.splits) + apply (drule snd_stateAssert_after) + apply clarsimp + apply (drule (1) bspec) + apply (clarsimp simp: split_def) + apply (rule conjI) + apply clarsimp + apply (rename_tac s) + apply (erule_tac x=n in allE) + apply (erule_tac x="Normal s" in allE) + apply clarsimp + apply (simp add: bind_def stateAssert_def get_def assert_def) + apply (rule bexI) + prefer 2 + apply assumption + apply (clarsimp simp: return_def fail_def) + apply fastforce + apply fastforce + done + +lemma word_add_offset_pageBits_in_S: + assumes v: "\v. v < 2 ^ pageBits \ (x + v \ S) = ((x :: machine_word) \ S)" + assumes n: "n < 8" + shows "(x + ucast (y::9 word) * 8 + n \ S) = (x \ S)" + apply (simp add: add.assoc) + apply (subst v) + apply (rule word_add_offset_less[where n=3 and m=9, simplified], rule n) + apply (rule less_le_trans, rule ucast_less; simp) + apply (simp add: pageBits_def) + apply (rule less_le_trans, rule ucast_less; simp) + apply (simp add: pageBits_def) + apply (rule refl) + done + +lemma heap_to_user_data_update_region: + assumes foo: "\x y v. \ map_to_user_data psp x = Some y; + v < 2 ^ pageBits \ \ (x + v \ S) = (x \ S)" + shows + "heap_to_user_data psp (\x. if x \ S then v else f x) + = (\x. if x \ S \ dom (map_to_user_data psp) then Some (K (word_rcat [v,v,v,v,v,v,v,v])) + else heap_to_user_data psp f x)" + apply (rule ext) + apply (simp add: heap_to_user_data_def Let_def) + apply (rule conjI) + apply clarsimp + apply (rule ext) + apply (clarsimp simp: byte_to_word_heap_def Let_def foo + word_add_offset_pageBits_in_S + word_add_offset_pageBits_in_S[where n=0, simplified]) + apply clarsimp + apply (case_tac "map_to_user_data psp x"; clarsimp) + apply (rule ext) + apply (clarsimp simp: byte_to_word_heap_def Let_def foo + word_add_offset_pageBits_in_S + word_add_offset_pageBits_in_S[where n=0, simplified]) + done + +lemma heap_to_device_data_update_region: + assumes foo: "\x y v. \ map_to_user_data_device psp x = Some y; + v < 2 ^ pageBits \ \ (x + v \ S) = (x \ S)" + shows + "heap_to_device_data psp (\x. if x \ S then v else f x) + = (\x. if x \ S \ dom (map_to_user_data_device psp) then Some (K (word_rcat [v,v,v,v,v,v,v,v])) + else heap_to_device_data psp f x)" + apply (rule ext) + apply (simp add: heap_to_device_data_def Let_def) + apply (rule conjI) + apply clarsimp + apply (rule ext) + apply (clarsimp simp: byte_to_word_heap_def Let_def foo + word_add_offset_pageBits_in_S + word_add_offset_pageBits_in_S[where n=0, simplified]) + apply clarsimp + apply (case_tac "map_to_user_data_device psp x"; clarsimp) + apply (rule ext) + apply (clarsimp simp: byte_to_word_heap_def Let_def foo + word_add_offset_pageBits_in_S + word_add_offset_pageBits_in_S[where n=0, simplified]) + done + +lemma ksPSpace_ksMSu_comm: + "ksPSpace_update f (ksMachineState_update g s) = + ksMachineState_update g (ksPSpace_update f s)" + by simp + +lemma ksPSpace_update_ext: + "(\s. s\ksPSpace := f (ksPSpace s)\) = (ksPSpace_update f)" + by (rule ext) simp + +lemma hrs_ghost_update_comm: + "(t_hrs_'_update f \ ghost'state_'_update g) = + (ghost'state_'_update g \ t_hrs_'_update f)" + by (rule ext) simp + +lemma htd_safe_typ_clear_region: + "htd_safe S htd \ htd_safe S (typ_clear_region ptr bits htd)" + apply (clarsimp simp: htd_safe_def dom_s_def typ_clear_region_def) + apply (simp add: subset_iff) + apply blast + done + +lemma htd_safe_typ_region_bytes: + "htd_safe S htd \ {ptr ..+ 2 ^ bits} \ S \ htd_safe S (typ_region_bytes ptr bits htd)" + apply (clarsimp simp: htd_safe_def dom_s_def typ_region_bytes_def) + apply (simp add: subset_iff) + apply blast + done + +lemma untyped_cap_rf_sr_ptr_bits_domain: + "cte_wp_at' (\cte. cteCap cte = capability.UntypedCap d ptr bits idx) p s + \ invs' s \ (s, s') \ rf_sr + \ {ptr..+2 ^ bits} \ domain" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarsimp) + apply (drule valid_global_refsD', clarsimp) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + valid_cap'_def capAligned_def) + apply (simp only: upto_intvl_eq) + apply blast + done + +lemma distinct_aligned_addresses_accumulate: + "is_aligned p n \ is_aligned ptr bits + \ n \ m \ n < size p \ m \ bits + \ (\y<2 ^ (n - m). p + (y << m) \ {ptr..ptr + 2 ^ bits - 1}) + \ {p .. p + 2 ^ n - 1} \ {ptr..ptr + 2 ^ bits - 1} = {}" + apply safe + apply (simp only: mask_in_range[symmetric]) + apply (drule_tac x="(x && mask n) >> m" in spec) + apply (simp add: shiftr_shiftl1 word_bw_assocs) + apply (drule mp, rule shiftr_less_t2n) + apply (subst add_diff_inverse, simp, rule and_mask_less', simp add: word_size) + apply (clarsimp simp: multiple_mask_trivia word_bw_assocs neg_mask_twice max_absorb2) + done + +lemma offs_in_intvl_iff: + "(p + x \ {p ..+ n}) = (unat x < n)" + apply (simp add: intvl_def, safe) + apply (erule order_le_less_trans[rotated], simp add: unat_of_nat) + apply (rule exI, erule conjI[rotated]) + apply simp + done + +lemma objBits_koTypeOf: + fixes v :: "'a :: pspace_storable" shows + "objBits v = objBitsT (koType TYPE('a))" + using project_inject[where v=v, THEN iffD2, OF refl] + project_koType[THEN iffD1, OF exI[where x=v]] + by (simp add: objBits_def objBitsT_koTypeOf[symmetric]) + +lemma cmap_array_typ_region_bytes: + "ptrf = (Ptr :: _ \ 'b ptr) + \ carray_map_relation bits' amap (h_t_valid htd c_guard) ptrf + \ is_aligned ptr bits + \ typ_uinfo_t TYPE('b :: c_type) \ typ_uinfo_t TYPE(8 word) + \ size_of TYPE('b) = 2 ^ bits' + \ objBitsT (koType TYPE('a :: pspace_storable)) \ bits + \ objBitsT (koType TYPE('a :: pspace_storable)) \ bits' + \ bits' < word_bits + \ carray_map_relation bits' (restrict_map (amap :: _ \ 'a option) (- {ptr ..+ 2 ^ bits})) + (h_t_valid (typ_region_bytes ptr bits htd) c_guard) ptrf" + apply (clarsimp simp: carray_map_relation_def h_t_valid_typ_region_bytes) + apply (case_tac "h_t_valid htd c_guard (ptrf p)", simp_all) + apply (clarsimp simp: objBits_koTypeOf) + apply (drule spec, drule(1) iffD2, clarsimp) + apply (rule iffI[rotated]) + apply clarsimp + apply (drule equals0D, erule notE, erule IntI[rotated]) + apply (simp only: upto_intvl_eq is_aligned_neg_mask2 mask_in_range[symmetric]) + apply (simp only: upto_intvl_eq, rule distinct_aligned_addresses_accumulate, + simp_all add: upto_intvl_eq[symmetric] word_size word_bits_def) + apply clarsimp + apply (drule_tac x="p + (y << objBitsT (koType TYPE('a)))" in spec)+ + apply (simp add: is_aligned_add[OF is_aligned_weaken is_aligned_shiftl]) + apply (simp add: is_aligned_add_helper shiftl_less_t2n word_bits_def) + apply clarsimp + apply (drule_tac x=p in spec) + apply (clarsimp simp: objBits_koTypeOf) + apply auto + done + +lemma map_comp_restrict_map: + "(f \\<^sub>m (restrict_map m S)) = (restrict_map (f \\<^sub>m m) S)" + by (rule ext, simp add: restrict_map_def map_comp_def) + +lemma modify_machinestate_assert_cnodes_swap: + "do x \ modify (ksMachineState_update f); + y \ stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) S) []; g od + = do y \ stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) S) []; + x \ modify (ksMachineState_update f); g od" + by (simp add: fun_eq_iff exec_modify stateAssert_def + bind_assoc exec_get assert_def) + +lemma h_t_array_valid_typ_region_bytes: + "h_t_array_valid htd (p :: ('a :: c_type) ptr) n + \ {ptr_val p..+n * size_of TYPE('a)} \ {ptr..+2 ^ bits} = {} + \ h_t_array_valid (typ_region_bytes ptr bits htd) p n" + apply (clarsimp simp: h_t_array_valid_def) + apply (subst valid_footprint_typ_region_bytes) + apply (simp add: uinfo_array_tag_n_m_def typ_uinfo_t_def typ_info_word) + apply (simp add: field_simps) + done + +lemma cvariable_array_map_relation_detype: + "cvariable_array_map_relation mp szs ptrfun htd + \ ptrfun = (Ptr :: _ \ ('a :: c_type ptr)) + \ \p v. mp p = Some v \ p \ {ptr ..+ 2 ^ bits} + \ {p ..+ szs v * size_of TYPE('a)} \ {ptr ..+ 2 ^ bits} = {} + \ cvariable_array_map_relation (mp |` (- {ptr..+2 ^ bits})) + szs ptrfun (typ_region_bytes ptr bits htd)" + apply (clarsimp simp: cvariable_array_map_relation_def restrict_map_def) + apply (elim allE, (drule(1) mp)+) + apply (simp add: h_t_array_valid_typ_region_bytes) + done + +lemma zero_ranges_are_zero_typ_region_bytes: + "zero_ranges_are_zero rs hrs + \ zero_ranges_are_zero rs (hrs_htd_update (typ_region_bytes ptr bits) hrs)" + apply (clarsimp simp: zero_ranges_are_zero_def) + apply (drule(1) bspec) + apply (clarsimp simp: region_actually_is_bytes'_def typ_region_bytes_def hrs_htd_update) + done + +lemma modify_machinestate_assert_ptables_swap: + "do x \ modify (ksMachineState_update f); + y \ stateAssert (\s. \ pTablePartialOverlap (gsPTTypes (ksArchState s)) S) []; g od + = do y \ stateAssert (\s. \ pTablePartialOverlap (gsPTTypes (ksArchState s)) S) []; + x \ modify (ksMachineState_update f); g od" + by (simp add: fun_eq_iff exec_modify stateAssert_def + bind_assoc exec_get assert_def) + +lemma deleteObjects_ccorres': + notes if_cong[cong] + shows + (* the 4 \ bits appears related to smallest retypeable object size, see valid_cap_def *) + "ccorres dc xfdc + (cte_wp_at' (\cte. cteCap cte = capability.UntypedCap d ptr bits idx) p and + (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) and + invs' and ct_active' and sch_act_simple and + K ( 4 \ bits \ bits < word_bits)) + UNIV hs + (deleteObjects ptr bits) + (Basic (\s. globals_update + (t_hrs_'_update (hrs_htd_update (typ_region_bytes ptr bits)) \ + ghost'state_'_update (gs_clear_region ptr bits)) s))" + apply (rule ccorres_grab_asm) + apply (rule ccorres_name_pre) + apply (simp add: deleteObjects_def3 hrs_ghost_update_comm) + apply (rule ccorres_assert) + apply (rule ccorres_stateAssert_fwd) + apply (subst bind_assoc[symmetric]) + apply (unfold freeMemory_def) + apply (subst ksPSpace_update_ext) + apply (subgoal_tac "bits \ word_bits") + prefer 2 + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (clarsimp simp: mapM_x_storeWord_step[simplified word_size_bits_def] + intvl_range_conv intvl_range_conv[where 'a=machine_word_len, folded word_bits_def] + doMachineOp_modify modify_modify o_def ksPSpace_ksMSu_comm + bind_assoc modify_machinestate_assert_cnodes_swap modify_machinestate_assert_ptables_swap + modify_modify_bind) + apply (rule ccorres_stateAssert_fwd)+ + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: in_monad) + apply (rule bexI [rotated]) + apply (rule iffD2 [OF in_modify]) + apply (rule conjI [OF refl refl]) + apply (clarsimp simp: simpler_modify_def) +proof - + let ?mmu = "(\h x. if ptr \ x \ x \ ptr + 2 ^ bits - 1 then 0 else h x)" + let ?psu = "(\h x. if ptr \ x \ x \ ptr + mask bits then None else h x)" + + fix s s' + assume al: "is_aligned ptr bits" + and cte: "cte_wp_at' (\cte. cteCap cte = UntypedCap d ptr bits idx) p s" + and desc_range: "descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)" + and invs: "invs' s" and "ct_active' s" + and "sch_act_simple s" and wb: "bits < word_bits" and b2: "4 \ bits" + and "deletionIsSafe ptr bits s" + and cNodePartial: "\ cNodePartialOverlap (gsCNodes s) + (\x. ptr \ x \ x \ ptr + mask bits)" + and pTablePartial: "\ pTablePartialOverlap (gsPTTypes (ksArchState s)) + (\x. ptr \ x \ x \ ptr + mask bits)" + and sr: "(s, s') \ rf_sr" + and safe_asids: + "ksASIDMapSafe + (ksArchState_update (\_. gsPTTypes_update + (\_. ?psu (gsPTTypes (ksArchState s))) (ksArchState s)) + (gsCNodes_update (\_. ?psu (gsCNodes s)) + (gsUserPages_update + (\_. ?psu (gsUserPages s)) + (ksMachineState_update + (underlying_memory_update ?mmu) + (s\ksPSpace := ?psu (ksPSpace s) \)))))" (is "ksASIDMapSafe ?t") + + interpret D: delete_locale s ptr bits p + apply (unfold_locales) + apply fact+ + done + + let ?ks = "?psu (ksPSpace s)" + let ?ks' = "ksPSpace s |` (- {ptr..+2 ^ bits})" + + have psu_restrict: "\h. ?psu h = h |` (- {ptr..+2 ^ bits})" + apply (intro allI ext) + apply (subst upto_intvl_eq [OF al]) + apply (clarsimp simp: mask_2pm1 add_diff_eq) + done + + have ks': "?ks = ?ks'" by (simp add: psu_restrict) + + let ?th = "hrs_htd_update (typ_region_bytes ptr bits)" + let ?th_s = "?th (t_hrs_' (globals s'))" + + have map_to_ctes_delete': + "map_to_ctes ?ks' = ctes_of s |` (- {ptr..+2 ^ bits})" using invs + apply (subst ks' [symmetric]) + apply (subst map_to_ctes_delete [OF D.valid_untyped, simplified field_simps, simplified]) + apply clarsimp + apply (rule ext) + apply (subst upto_intvl_eq [OF al]) + apply (clarsimp simp: mask_2pm1 add_diff_eq) + done + + note cm_disj = cmap_relation_disjoint [OF D.valid_untyped invs, atomize] + note cm_disj_tcb = cmap_relation_disjoint_tcb [OF D.valid_untyped invs] + note cm_disj_cte = cmap_relation_disjoint_cte [OF D.valid_untyped invs] + note cm_disj_user = cmap_relation_disjoint_user_data [OF D.valid_untyped invs] + note cm_disj_device = cmap_relation_disjoint_device_data [OF D.valid_untyped invs] + + note upto_rew = upto_intvl_eq[OF al, THEN eqset_imp_iff, symmetric, simplified] + + have rl: "\(p :: machine_word) P. ko_wp_at' P p s \ + (\ko. P ko \ live' ko) \ p \ {ptr..ptr + 2 ^ bits - 1}" + apply (intro allI impI conjI) + apply (elim conjE) + using D.live_notRange + apply (clarsimp simp: mask_def add_diff_eq) + done + + note cmaptcb = cmap_relation_tcb [OF sr] + note cmap_array_helper = arg_cong2[where f=carray_map_relation, OF refl map_comp_restrict_map] + have trivia: "size_of TYPE(pte_C[vs_array_len]) = 2 ^ (ptBits VSRootPT_T)" + by (auto simp: bit_simps Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) + note cmap_array = cmap_array_typ_region_bytes[where 'a=pte, OF refl _ al _ trivia(1)] + note cmap_array = cmap_array[simplified, simplified objBitsT_simps b2 + bit_simps word_bits_def, simplified] + + note pspace_distinct' = invs_pspace_distinct'[OF invs] and + pspace_aligned' = invs_pspace_aligned'[OF invs] and + valid_objs' = invs_valid_objs'[OF invs] and + valid_untyped'_def2 = + valid_untyped'[OF pspace_distinct' pspace_aligned' al] + + have s_ksPSpace_adjust: "ksPSpace_update ?psu s = s\ksPSpace := ?psu (ksPSpace s)\" + by simp + + from invs have "valid_global_refs' s" by fastforce + with cte + have ptr_refs: "kernel_data_refs \ {ptr..ptr + 2 ^ bits - 1} = {}" + by (fastforce simp: valid_global_refs'_def valid_refs'_def cte_wp_at_ctes_of ran_def) + + have bits_ge_3[simp]: "3 \ bits" using b2 by linarith + + (* calculation starts here *) + have cs: "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState s)) + (t_hrs_' (globals s'))" + using sr + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + hence "cpspace_relation ?ks' (underlying_memory (ksMachineState s)) ?th_s" + unfolding cpspace_relation_def + using cendpoint_relation_restrict [OF D.valid_untyped invs rl] + cnotification_relation_restrict [OF D.valid_untyped invs rl] + using cmap_array[simplified bit_simps] + apply - + apply (elim conjE) + supply if_split[split del] + apply ((subst lift_t_typ_region_bytes, + rule cm_disj cm_disj_tcb cm_disj_cte cm_disj_user cm_disj_device + , assumption +, + simp_all add: objBits_simps' bit_simps + heap_to_user_data_restrict heap_to_device_data_restrict)[1])+ \ \waiting ...\ + apply (simp add: map_to_ctes_delete' cmap_relation_restrict_both_proj + cmap_relation_restrict_both cmap_array_helper hrs_htd_update + bit_simps) + apply (frule cmap_relation_restrict_both_proj[where f = tcb_ptr_to_ctcb_ptr], simp) + apply (intro conjI) + apply (erule iffD1[OF cpspace_tcb_relation_address_subset, + OF D.valid_untyped invs cmaptcb]) + apply (subst cmap_relation_cong [OF refl refl, + where rel' = "cendpoint_relation (cslift s')"]) + apply (clarsimp simp: restrict_map_Some_iff image_iff + map_comp_restrict_map_Some_iff) + apply (simp add: cmap_relation_restrict_both_proj) + apply (subst cmap_relation_cong[OF refl refl, + where rel' = "cnotification_relation (cslift s')"]) + apply (clarsimp simp: restrict_map_Some_iff image_iff + map_comp_restrict_map_Some_iff) + apply (simp add: cmap_relation_restrict_both_proj) + done + + moreover + { + assume "s' \\<^sub>c armKSGlobalUserVSpace_Ptr " + moreover + from sr ptr_refs have "ptr_span armKSGlobalUserVSpace_Ptr + \ {ptr..ptr + 2 ^ bits - 1} = {}" + by (fastforce simp: rf_sr_def cstate_relation_def Let_def) + ultimately + have "hrs_htd (hrs_htd_update (typ_region_bytes ptr bits) (t_hrs_' (globals s'))) \\<^sub>t armKSGlobalUserVSpace_Ptr" + using al wb + apply (cases "t_hrs_' (globals s')") + apply (simp add: hrs_htd_update_def hrs_htd_def h_t_valid_typ_region_bytes upto_intvl_eq) + done + } + + moreover + have h2ud_eq: + "heap_to_user_data (?psu (ksPSpace s)) + (?mmu (underlying_memory (ksMachineState s))) = + heap_to_user_data (?psu (ksPSpace s)) + (underlying_memory (ksMachineState s))" + supply mask_2pm1[simp] add_diff_eq[simp] + apply (subst heap_to_user_data_update_region + [where S="{ptr..ptr + 2 ^ bits - 1}", simplified]) + prefer 2 + apply (rule ext) + apply clarsimp + apply (simp add: map_option_def map_comp_def + split: if_split_asm option.splits) + apply (frule pspace_alignedD'[OF _ pspace_aligned']) + apply (case_tac "pageBits \ bits") + apply (simp add: objBitsKO_def split: kernel_object.splits) + apply clarsimp + apply (rule aligned_range_offset_mem + [where 'a=machine_word_len, folded word_bits_def, simplified, OF _ _ al _ wb]) + apply assumption+ + apply (rule iffI[rotated], simp) + apply (simp add: objBits_simps) + apply (rule FalseE) + apply (case_tac "ptr \ x", simp) + apply clarsimp + apply (frule_tac y="ptr + 2 ^ bits - 1" in le_less_trans) + apply (simp only: not_le) + apply (drule (1) is_aligned_no_wrap') + apply simp + apply (cut_tac cte[simplified cte_wp_at_ctes_of]) + apply clarsimp + apply (frule ctes_of_valid'[OF _ valid_objs']) + apply (frule pspace_distinctD'[OF _ pspace_distinct']) + apply (clarsimp simp add: valid_cap'_def valid_untyped'_def2 capAligned_def) + apply (drule_tac x=x in spec) + apply (simp add: obj_range'_def objBitsKO_def mask_def) + apply (simp only: not_le) + apply (cut_tac is_aligned_no_overflow[OF al]) + apply (case_tac "ptr \ x + 2 ^ pageBits - 1", + simp_all only: simp_thms not_le) + apply clarsimp + apply (thin_tac "psp = Some ko" for psp ko)+ + apply (thin_tac "ps_clear x y z" for x y z) + apply (thin_tac "cteCap x = y" for x y)+ + apply (frule is_aligned_no_overflow) + apply (simp only: x_power_minus_1) + apply (frule_tac x=x in word_plus_strict_mono_right[of _ "2^pageBits"]) + apply (rule ccontr) + apply (simp only: not_le) + apply (frule_tac y="x" in less_le_trans, assumption) + apply (simp add: word_sub_less_iff) + apply simp + done + moreover + have h2dd_eq: + "heap_to_device_data (?psu (ksPSpace s)) + (?mmu (underlying_memory (ksMachineState s))) = + heap_to_device_data (?psu (ksPSpace s)) + (underlying_memory (ksMachineState s))" + supply mask_2pm1[simp] add_diff_eq[simp] + apply (subst heap_to_device_data_update_region + [where S="{ptr..ptr + 2 ^ bits - 1}", simplified]) + prefer 2 + apply (rule ext) + apply clarsimp + apply (simp add: map_option_def map_comp_def + split: if_split_asm option.splits) + apply (frule pspace_alignedD'[OF _ pspace_aligned']) + apply (case_tac "pageBits \ bits") + apply (simp add: objBitsKO_def split: kernel_object.splits) + apply clarsimp + apply (rule aligned_range_offset_mem + [where 'a=machine_word_len, folded word_bits_def, simplified, OF _ _ al _ wb]) + apply assumption+ + apply (rule iffI[rotated], simp) + apply (simp add: objBits_simps) + apply (rule FalseE) + apply (case_tac "ptr \ x", simp) + apply clarsimp + apply (frule_tac y="ptr + 2 ^ bits - 1" in le_less_trans) + apply (simp only: not_le) + apply (drule (1) is_aligned_no_wrap') + apply simp + apply (cut_tac cte[simplified cte_wp_at_ctes_of]) + apply clarsimp + apply (frule ctes_of_valid'[OF _ valid_objs']) + apply (frule pspace_distinctD'[OF _ pspace_distinct']) + apply (clarsimp simp add: valid_cap'_def valid_untyped'_def2 capAligned_def) + apply (drule_tac x=x in spec) + apply (simp add: obj_range'_def objBitsKO_def mask_def) + apply (simp only: not_le) + apply (cut_tac is_aligned_no_overflow[OF al]) + apply (case_tac "ptr \ x + 2 ^ pageBits - 1", + simp_all only: simp_thms not_le) + apply clarsimp + apply (thin_tac "psp = Some ko" for psp ko)+ + apply (thin_tac "ps_clear x y z" for x y z) + apply (thin_tac "cteCap x = y" for x y)+ + apply (frule is_aligned_no_overflow) + apply (simp only: x_power_minus_1) + apply (frule_tac x=x in word_plus_strict_mono_right[of _ "2^pageBits"]) + apply (rule ccontr) + apply (simp only: not_le) + apply (frule_tac y="x" in less_le_trans, assumption) + apply (simp add: word_sub_less_iff) + apply simp + done + + moreover { + from D.valid_untyped invs have tcb_no_overlap: + "\p v. map_to_tcbs (ksPSpace s) p = Some v + \ p \ {ptr..+2 ^ bits} + \ {p ..+ 2 ^ objBitsT TCBT} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp: valid_cap'_def) + apply (drule(1) map_to_ko_atI') + apply (clarsimp simp: obj_at'_def valid_untyped'_def2 mask_2pm1) + apply (elim allE, drule(1) mp) + apply (clarsimp simp only: obj_range'_def upto_intvl_eq[symmetric] al add_mask_fold[symmetric]) + apply (subgoal_tac "objBitsKO (KOTCB v) = objBitsT TCBT") + apply (subgoal_tac "p \ {p ..+ 2 ^ objBitsT TCBT}") + apply simp + apply blast + apply (simp add: upto_intvl_eq) + apply (clarsimp simp: objBits_simps objBitsT_simps) + done + + from cNodePartial[folded add_mask_fold, simplified upto_rew] + have cn_no_overlap: + "\p n. gsCNodes s p = Some n \ p \ {ptr..+2 ^ bits} + \ {p ..+ 2 ^ (n + cte_level_bits)} \ {ptr..+2 ^ bits} = {}" + apply (simp add: cNodePartialOverlap_def) + apply (elim allE, drule(1) mp) + apply (clarsimp simp flip: add_mask_fold) + apply (frule base_member_set, simp add: word_bits_def) + apply (clarsimp simp only: upto_intvl_eq[symmetric] field_simps) + apply blast + done + + from sr have "cvariable_array_map_relation (gsCNodes s|\<^bsub>(- {ptr..+2 ^ bits})\<^esub>) ((^) 2) cte_Ptr + (typ_region_bytes ptr bits (hrs_htd (t_hrs_' (globals s'))))" + "cvariable_array_map_relation (map_to_tcbs (ksPSpace s|\<^bsub>(- {ptr..+2 ^ bits})\<^esub>)) (\x. 5) cte_Ptr + (typ_region_bytes ptr bits (hrs_htd (t_hrs_' (globals s'))))" + apply (simp_all add: map_comp_restrict_map rf_sr_def cstate_relation_def Let_def) + apply (rule cvariable_array_map_relation_detype, clarsimp+) + apply (drule(1) cn_no_overlap) + apply (simp add: cte_level_bits_def power_add) + apply (rule cvariable_array_map_relation_detype, clarsimp+) + apply (drule(1) tcb_no_overlap) + apply (erule disjoint_subset[rotated]) + apply (rule intvl_start_le) + apply (simp add: objBitsT_simps objBits_defs) + done + } + + moreover from sr + have "apsnd fst (gs_clear_region ptr bits (ghost'state_' (globals s'))) = + (gsUserPages s|\<^bsub>(- {ptr..+2 ^ bits})\<^esub>, gsCNodes s|\<^bsub>(- {ptr..+2 ^ bits})\<^esub>) + \ ghost_size_rel (gs_clear_region ptr bits (ghost'state_' (globals s'))) + (gsMaxObjectSize s)" + apply (case_tac "ghost'state_' (globals s')") + apply (simp add: rf_sr_def cstate_relation_def Let_def gs_clear_region_def + upto_intvl_eq[OF al] carch_state_relation_def + cmachine_state_relation_def ghost_size_rel_def + ghost_assertion_data_get_def restrict_map_def + if_flip[symmetric, where F=None]) + done + + moreover from sr + have "fst (snd (snd (gs_clear_region ptr bits (ghost'state_' (globals s'))))) = + gsPTTypes (ksArchState s)|\<^bsub>(- {ptr..+2 ^ bits})\<^esub>" + by (simp add: rf_sr_def cstate_relation_def Let_def gs_clear_region_def + restrict_map_def if_flip[symmetric, where F=None]) + + moreover from sr + have "h_t_valid (typ_region_bytes ptr bits (hrs_htd (t_hrs_' (globals s')))) + c_guard intStateIRQNode_array_Ptr" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (simp add: h_t_valid_typ_region_bytes) + apply (simp add: upto_intvl_eq al) + apply (rule disjoint_subset[OF _ ptr_refs]) + apply (simp add: cinterrupt_relation_def cte_level_bits_def) + done + + moreover + from pTablePartial[folded add_mask_fold, simplified upto_rew] + have "\p v. \gsPTTypes (ksArchState s) p = Some v; p \ {ptr ..+ 2 ^ bits}\ + \ {p ..+ 2 ^ pt_bits v} \ {ptr ..+ 2 ^ bits} = {}" + apply (simp add: pTablePartialOverlap_def) + apply (elim allE, drule(1) mp) + apply (clarsimp simp flip: add_mask_fold) + apply (frule base_member_set, simp add: bit_simps) + apply (clarsimp simp only: upto_intvl_eq[symmetric] field_simps power_add) + apply blast + done + + with sr + have "cvariable_array_map_relation (gsPTTypes (ksArchState s)|\<^bsub>(- {ptr..+2 ^ bits})\<^esub>) + (\pt_t. 2 ^ ptTranslationBits pt_t) + pte_Ptr + (typ_region_bytes ptr bits (hrs_htd (t_hrs_' (globals s'))))" + apply (simp add: map_comp_restrict_map rf_sr_def cstate_relation_def Let_def pte_bits_def + word_size_bits_def) + apply (rule cvariable_array_map_relation_detype; clarsimp) + apply (simp add: pt_bits_def table_size_def power_add pte_bits_def word_size_bits_def) + done + + ultimately + show "(?t, globals_update + (%x. ghost'state_'_update (gs_clear_region ptr bits) + (t_hrs_'_update ?th x)) s') \ rf_sr" + using sr untyped_cap_rf_sr_ptr_bits_domain[OF cte invs sr] + by (simp add: rf_sr_def cstate_relation_def Let_def + psu_restrict cpspace_relation_def + carch_state_relation_def carch_globals_def + cmachine_state_relation_def + hrs_htd_update htd_safe_typ_region_bytes + zero_ranges_are_zero_typ_region_bytes) + +qed + +abbreviation (input) + "global_htd_update f == Guard MemorySafety \htd_safe domain (hrs_htd \t_hrs) + \ htd_safe domain (\(\s. f s) (hrs_htd \t_hrs))\ + (Basic (\s. globals_update (t_hrs_'_update (hrs_htd_update (f s))) s))" + +lemma kernel_data_refs_domain_eq_rotate: + "(kernel_data_refs = - domain) = (domain = - kernel_data_refs)" + by blast + +lemma deleteObjects_ccorres[corres]: + "ccorres dc xfdc + (cte_wp_at' (\cte. cteCap cte = UntypedCap d ptr bits idx) p and + (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) and + invs' and ct_active' and sch_act_simple and + K ( 4 \ bits \ bits < word_bits)) + UNIV hs + (deleteObjects ptr bits) + (Seq (global_htd_update (\_. typ_region_bytes ptr bits)) + (Basic (\s. globals_update + (ghost'state_'_update (gs_clear_region ptr bits)) s)))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Guard_Seq) + apply (rule Corres_UL_C.ccorres_exec_cong + [THEN iffD2, OF _ deleteObjects_ccorres'[where idx=idx and p=p and d=d]]) + apply (simp add: exec_Basic_Seq_Basic o_def + hrs_ghost_update_comm[simplified o_def]) + apply clarsimp + apply (frule(2) untyped_cap_rf_sr_ptr_bits_domain) + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate htd_safe_typ_region_bytes + untyped_cap_rf_sr_ptr_bits_domain) + +end +end diff --git a/proof/crefine/AARCH64/Fastpath_C.thy b/proof/crefine/AARCH64/Fastpath_C.thy new file mode 100644 index 0000000000..c39edefe45 --- /dev/null +++ b/proof/crefine/AARCH64/Fastpath_C.thy @@ -0,0 +1,3452 @@ +(* + * Copyright 2024, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Proof that the C fast path functions are refinements of their design + specifications in Fastpath_Defs. *) + +theory Fastpath_C +imports + SyscallArgs_C + Delete_C + Syscall_C + Fastpath_Defs + "CLib.MonadicRewrite_C" +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma setCTE_obj_at'_queued: + "\obj_at' (\tcb. P (tcbQueued tcb)) t\ setCTE p v \\rv. obj_at' (\tcb. P (tcbQueued tcb)) t\" + unfolding setCTE_def + by (rule setObject_cte_obj_at_tcb', simp+) + +crunch obj_at'_queued: cteInsert "obj_at' (\tcb. P (tcbQueued tcb)) t" + (wp: setCTE_obj_at'_queued crunch_wps) + +crunch obj_at'_not_queued: emptySlot "obj_at' (\a. \ tcbQueued a) p" + (wp: setCTE_obj_at'_queued) + +lemma getEndpoint_obj_at': + "\obj_at' P ptr\ getEndpoint ptr \\rv s. P rv\" + apply (wp getEndpoint_wp) + apply (clarsimp simp: obj_at'_def ) + done + +lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb + +lemma tcbSchedEnqueue_tcbContext[wp]: + "tcbSchedEnqueue t' \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_when) + apply (wp threadSet_obj_at' hoare_drop_imps threadGet_wp + | simp split: if_split)+ + done + +lemma setCTE_tcbContext: + "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ + setCTE slot cte + \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +lemma setThreadState_tcbContext: + "setThreadState a b \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def + tcbQueuePrepend_def rescheduleRequired_def + by (wp threadSet_obj_at' hoare_drop_imps threadGet_wp | wpc + | simp split: if_split)+ + +lemma setBoundNotification_tcbContext: + "setBoundNotification a b \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setBoundNotification_def + by wpsimp + +declare comp_apply [simp del] +crunch tcbContext[wp]: deleteCallerCap "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" + (wp: setEndpoint_obj_at_tcb' setBoundNotification_tcbContext + setNotification_tcb crunch_wps setThreadState_tcbContext + simp: crunch_simps unless_def) +declare comp_apply [simp] + + +crunch ksArch[wp]: asUser "\s. P (ksArchState s)" + (wp: crunch_wps) + +(* FIXME AARCH64 consider moving this, on MCS there is a tcbs_of as well *) +definition tcbs_of :: "kernel_state \ machine_word \ tcb option" where + "tcbs_of s = (\x. if tcb_at' x s then projectKO_opt (the (ksPSpace s x)) else None)" + +lemma obj_at_tcbs_of: + "obj_at' P t s = (EX tcb. tcbs_of s t = Some tcb & P tcb)" + apply (simp add: tcbs_of_def split: if_split) + apply (intro conjI impI) + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) + done + +lemma st_tcb_at_tcbs_of: + "st_tcb_at' P t s = (EX tcb. tcbs_of s t = Some tcb & P (tcbState tcb))" + by (simp add: st_tcb_at'_def obj_at_tcbs_of) + +lemma tcbs_of_ko_at': + "\ tcbs_of s p = Some tcb \ \ ko_at' tcb p s" + by (simp add: obj_at_tcbs_of) + +lemma tcbs_of_valid_tcb': + "\ valid_objs' s; tcbs_of s p = Some tcb \ \ valid_tcb' tcb s" + by (frule tcbs_of_ko_at') + (drule (1) ko_at_valid_objs', auto simp: valid_obj'_def) + + +end + +context kernel_m begin + +lemma getCTE_h_val_ccorres_split: + assumes var: "\s f s'. var (var_update f s) = f (var s) + \ ((s', var_update f s) \ rf_sr) = ((s', s) \ rf_sr)" + and "\rv' t t'. ceqv \ var rv' t t' g (g' rv')" + and "\rv rv'. \ ccap_relation (cteCap rv) rv'; P rv \ + \ ccorres r xf (Q rv) (Q' rv rv') hs (f rv) (g' rv')" + shows + "ccorres r xf (\s. \cte. ctes_of s slot = Some cte \ P cte \ Q cte s) + {s. (\cte cap. ccap_relation (cteCap cte) cap \ P cte + \ var_update (\_. cap) s \ Q' cte cap) + \ slot' = cte_Ptr slot} hs + (getCTE slot >>= (\rv. f rv)) + ((Basic (\s. var_update (\_. h_val (hrs_mem (t_hrs_' (globals s))) (cap_Ptr &(slot' \[''cap_C'']))) s));; g)" + (is "ccorres r xf ?G ?G' hs ?f ?g") + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_getCTE) + apply (rule_tac A="cte_wp_at' ((=) rv and P) slot and Q rv" and A'="?G'" in ccorres_guard_imp2) + apply (rule_tac P="P rv" in ccorres_gen_asm) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=var in ccorres_abstract) + apply (rule assms) + apply (rule ccorres_gen_asm2, erule(1) assms) + apply vcg + apply (rule conseqPre, vcg, clarsimp simp: var) + apply (clarsimp simp: cte_wp_at_ctes_of var) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps' dest!: ccte_relation_ccap_relation) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma cap_'_cap_'_update_var_props: + "cap_' (cap_'_update f s) = f (cap_' s) \ + ((s', cap_'_update f s) \ rf_sr) = ((s', s) \ rf_sr)" + by simp + +lemmas getCTE_cap_h_val_ccorres_split + = getCTE_h_val_ccorres_split[where var_update=cap_'_update and P=\, + OF cap_'_cap_'_update_var_props] + +lemma getCTE_ccorres_helper: + "\ \\ cte cte'. \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ctes_of \ slot = Some cte + \ cslift s (cte_Ptr slot) = Some cte' + \ ccte_relation cte cte'} + f {s. (\, s) \ rf_sr \ r cte (xf s)} \ \ + ccorres r xf P P' hs (getCTE slot) f" + apply atomize + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_getCTE) + apply (rule_tac P="cte_wp_at' ((=) x) slot and P" + in ccorres_from_vcg[where P'=P']) + apply (erule allEI) + apply (drule_tac x="the (ctes_of \ slot)" in spec) + apply (erule HoarePartial.conseq) + apply (clarsimp simp: return_def cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma acc_CNodeCap_repr: + "isCNodeCap cap + \ cap = CNodeCap (capCNodePtr cap) (capCNodeBits cap) + (capCNodeGuard cap) (capCNodeGuardSize cap)" + by (clarsimp simp: isCap_simps) + +lemma valid_cnode_cap_cte_at': + "\ s \' c; isCNodeCap c; ptr = capCNodePtr c; v < 2 ^ capCNodeBits c \ + \ cte_at' (ptr + v * 2^cteSizeBits) s" + apply (drule less_mask_eq) + apply (drule(1) valid_cap_cte_at'[where addr=v]) + apply (simp add: mult.commute mult.left_commute) + done + +lemmas valid_cnode_cap_cte_at'' + = valid_cnode_cap_cte_at'[simplified objBits_defs, simplified] + +lemma ccorres_abstract_all: + "\\rv' t t'. ceqv Gamm xf' rv' t t' d (d' rv'); + \rv'. ccorres_underlying sr Gamm r xf arrel axf (G rv') (G' rv') hs a (d' rv')\ + \ ccorres_underlying sr Gamm r xf arrel axf (\s. \rv'. G rv' s) {s. s \ G' (xf' s)} hs a d" + apply (erule ccorres_abstract) + apply (rule ccorres_guard_imp2) + apply assumption + apply simp + done + +declare of_int_sint_scast[simp] + +lemma isCNodeCap_capUntypedPtr_capCNodePtr: + "isCNodeCap c \ capUntypedPtr c = capCNodePtr c" + by (clarsimp simp: isCap_simps) + +lemma of_bl_from_bool: + "of_bl [x] = from_bool x" + by (cases x, simp_all) + +lemma lookup_fp_ccorres': + assumes bits: "bits = size cptr" + shows + "ccorres (\mcp ccp. ccap_relation (case mcp of Inl v => NullCap | Inr v => v) ccp) + ret__struct_cap_C_' + (valid_cap' cap and valid_objs') + ({s. ccap_relation cap (cap_' s)} \ {s. cptr_' s = cptr}) [] + (cutMon ((=) s) (doE t \ resolveAddressBits cap cptr bits; + liftE (getSlotCap (fst t)) + odE)) + (Call lookup_fp_'proc)" + apply (cinit' lift: cptr_') + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__int_' in ccorres_abstract, ceqv) + apply (rule_tac P="rv' = from_bool (isCNodeCap cap)" in ccorres_gen_asm2) + apply (simp add: from_bool_0 del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: resolveAddressBits.simps split_def del: Collect_const) + apply (rule ccorres_drop_cutMon) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def isRight_def isLeft_def + ccap_relation_NullCap_iff) + apply (clarsimp simp del: Collect_const) + apply (rule_tac P="valid_cap' cap and valid_objs'" + and P'="{s. ccap_relation cap (cap_' s) \ isCNodeCap cap} + \ {s. bits_' s = 64 - of_nat bits \ bits \ 64 \ bits \ 0}" (* FIXME AARCH64 64 *) + in ccorres_inst) + apply (thin_tac "isCNodeCap cap") + defer + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: word_size cap_get_tag_isCap bits + of_bl_from_bool from_bool_0) + proof (induct cap cptr bits arbitrary: s + rule: resolveAddressBits.induct) + case (1 acap acptr abits as) + + have valid_cnode_bits_0: + "\s acap. \ isCNodeCap acap; s \' acap \ \ capCNodeBits acap \ 0" + by (clarsimp simp: isCap_simps valid_cap'_def) + + have cap_get_tag_update_1: + "\f cap. cap_get_tag (cap_C.words_C_update (\w. Arrays.update w (Suc 0) (f w)) cap) = cap_get_tag cap" + by (simp add: cap_get_tag_def cong: if_cong) + + show ?case + supply if_cong[cong] option.case_cong[cong] + apply (cinitlift cap_' bits_') + apply (rename_tac cbits ccap) + apply (elim conjE) + apply (rule_tac F="capCNodePtr_CL (cap_cnode_cap_lift ccap) + = capCNodePtr acap + \ capCNodeGuardSize acap < 64 + \ capCNodeBits acap < 64 + \ capCNodeGuard_CL (cap_cnode_cap_lift ccap) + = capCNodeGuard acap + \ unat (capCNodeGuardSize_CL (cap_cnode_cap_lift ccap)) + = capCNodeGuardSize acap + \ unat (capCNodeRadix_CL (cap_cnode_cap_lift ccap)) + = capCNodeBits acap + \ unat (0x40 - capCNodeRadix_CL (cap_cnode_cap_lift ccap)) + = 64 - capCNodeBits acap + \ unat ((0x40 :: machine_word) - of_nat abits) = 64 - abits + \ unat (capCNodeGuardSize_CL (cap_cnode_cap_lift ccap) + + capCNodeRadix_CL (cap_cnode_cap_lift ccap)) + = capCNodeGuardSize acap + capCNodeBits acap" + in Corres_UL_C.ccorres_req) + apply (clarsimp simp: cap_get_tag_isCap[symmetric]) + apply (clarsimp simp: cap_lift_cnode_cap cap_to_H_simps valid_cap'_def + capAligned_def cap_cnode_cap_lift_def objBits_simps + word_mod_2p_is_mask[where n=6, simplified] + elim!: ccap_relationE) + apply (simp add: unat_sub[unfolded word_le_nat_alt] + unat_of_nat64 word_bits_def) + apply (subst unat_plus_simple[symmetric], subst no_olen_add_nat) + apply (rule order_le_less_trans, rule add_le_mono) + apply (rule word_le_nat_alt[THEN iffD1], rule word_and_le1)+ + apply (simp add: mask_def) + apply (rule ccorres_guard_imp2) + apply csymbr+ + apply (rule ccorres_Guard_Seq, csymbr) + apply (simp add: resolveAddressBits.simps bindE_assoc extra_sle_sless_unfolds + Collect_True + split del: if_split del: Collect_const) + apply (simp add: cutMon_walk_bindE del: Collect_const + split del: if_split) + apply (rule ccorres_drop_cutMon_bindE, rule ccorres_assertE) + apply (rule ccorres_cutMon) + apply csymbr + apply (simp add: locateSlot_conv liftE_bindE cutMon_walk_bind) + apply (rule ccorres_drop_cutMon_bind, rule ccorres_stateAssert) + + apply (rule_tac P="abits < capCNodeBits acap + capCNodeGuardSize acap" + in ccorres_case_bools2) + apply (rule ccorres_drop_cutMon) + apply csymbr+ + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__int_' in ccorres_abstract_all, ceqv) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: unlessE_def split: if_split) + apply (simp add: throwError_def return_def cap_tag_defs + isRight_def isLeft_def + ccap_relation_NullCap_iff + in_bindE) + apply auto[1] + apply (simp del: Collect_const) + apply (rule ccorres_Guard_Seq)+ + apply csymbr+ + apply (simp del: Collect_const) + apply (rule ccorres_move_array_assertion_cnode_ctes ccorres_move_c_guard_cte + | csymbr)+ + apply (rule ccorres_symb_exec_r) + apply ccorres_remove_UNIV_guard + apply csymbr+ + apply (rule ccorres_cond_false_seq) + apply (simp add: ccorres_expand_while_iff_Seq[symmetric] + whileAnno_def) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: unlessE_def split: if_split) + apply (simp add: throwError_def return_def cap_tag_defs isRight_def + isLeft_def ccap_relation_NullCap_iff) + apply fastforce + apply (simp del: Collect_const) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (simp del: Collect_const) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (rule ccorres_cutMon) + apply (simp add: cutMon_walk_bindE unlessE_whenE + del: Collect_const + split del: if_split) + apply (rule ccorres_drop_cutMon_bindE) + apply csymbr+ + apply (rule ccorres_rhs_assoc2) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_splitE[OF _ ceqv_refl]) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_Guard_Seq)+ + apply csymbr + apply (simp add: unat_sub word_le_nat_alt if_1_0_0 shiftl_shiftr3 word_size + del: Collect_const) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: whenE_def throwError_def return_def + ccap_relation_NullCap_iff isRight_def isLeft_def split_def split: if_split) + apply (simp add: whenE_def) + apply (prop_tac "(acptr >> abits - capCNodeGuardSize acap) && mask (capCNodeGuardSize acap) = capCNodeGuard acap") + apply clarsimp + apply simp + apply (rule ccorres_returnOk_skip) + apply simp + apply (rule ccorres_cond_false) + apply (rule_tac P="valid_cap' acap" in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: valid_cap'_def isCap_simps if_1_0_0) + apply (simp add: unat_eq_0[symmetric] whenE_def returnOk_def return_def) + apply (rule ccorres_cutMon) + apply (simp add: liftE_bindE locateSlot_conv + del: Collect_const) + apply (rule_tac P="abits = capCNodeBits acap + capCNodeGuardSize acap" + in ccorres_case_bools2) + apply (rule ccorres_drop_cutMon) + apply (simp del: Collect_const) + apply (simp add: liftE_def getSlotCap_def del: Collect_const) + apply (rule ccorres_Guard_Seq)+ + apply csymbr+ + apply simp + apply (rule ccorres_move_array_assertion_cnode_ctes + ccorres_move_c_guard_cte + ccorres_rhs_assoc | csymbr)+ + apply (rule getCTE_cap_h_val_ccorres_split) + apply ceqv + apply (rename_tac "getCTE_cap") + apply (csymbr | rule ccorres_Guard_Seq)+ + apply (rule ccorres_cond_false_seq) + apply (simp add: ccorres_expand_while_iff_Seq[symmetric] + whileAnno_def del: Collect_const) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_false_seq) + apply (simp del: Collect_const) + apply (rule_tac P'="{s. cap_' s = getCTE_cap}" + in ccorres_from_vcg_throws[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: word_sle_def return_def returnOk_def + isRight_def) + apply (simp add: bind_bindE_assoc + del: Collect_const if_cong) + apply (simp add: liftE_bindE "1.prems" unlessE_def + cutMon_walk_bind cnode_cap_case_if + del: Collect_const cong: if_cong call_ignore_cong) + apply (rule ccorres_Guard_Seq)+ + apply csymbr+ + apply (simp del: Collect_const) + apply (rule ccorres_drop_cutMon_bind) + apply (rule ccorres_getSlotCap_cte_at) + apply (rule ccorres_move_c_guard_cte + ccorres_move_array_assertion_cnode_ctes + | csymbr)+ + apply ctac + apply (csymbr | rule ccorres_Guard_Seq)+ + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc | csymbr)+ + apply (simp add: ccorres_expand_while_iff_Seq[symmetric] + whileAnno_def if_to_top_of_bindE bindE_assoc + split_def + cong: if_cong call_ignore_cong) + apply (rule ccorres_cutMon) + apply (simp add: cutMon_walk_if) + apply (rule_tac Q'="\s. ret__int_' s = from_bool (isCNodeCap rv)" + in ccorres_cond_both'[where Q=\]) + apply (clarsimp simp: from_bool_0) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac P="ccorres r xf Gd Gd' hs a" for r xf Gd Gd' hs a in rsubst) + apply (rule "1.hyps", + (rule refl in_returns in_bind[THEN iffD2, OF exI, OF exI, OF conjI] + acc_CNodeCap_repr + | assumption + | clarsimp simp: unlessE_whenE locateSlot_conv + "1.prems" + | clarsimp simp: whenE_def[where P=False])+)[1] + apply (simp add: whileAnno_def extra_sle_sless_unfolds) + apply (rule ccorres_drop_cutMon) + apply (simp add: liftE_def getSlotCap_def) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_cond_false_seq) + apply (rule_tac P="\s. cteCap rva = rv" and P'="{s. cap_' s = cap}" + in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def returnOk_def word_sle_def isRight_def) + apply simp + apply (wp getSlotCap_wp) + apply simp + apply vcg + apply (wp whenE_throwError_wp) + apply (simp add: ccHoarePost_def del: Collect_const) + apply vcg + apply (clarsimp simp: of_bl_from_bool cong: if_cong) + apply (clarsimp simp: cap_get_tag_isCap + option.split[where P="\x. x"] + isCNodeCap_capUntypedPtr_capCNodePtr) + apply (clarsimp simp: word_less_nat_alt word_le_nat_alt linorder_not_less + cong: conj_cong) + apply (clarsimp simp: word_less_nat_alt word_le_nat_alt linorder_not_less + cong: rev_conj_cong) + apply (subgoal_tac "\ isZombie acap \ \ isThreadCap acap") + prefer 2 + apply (clarsimp simp: isCap_simps) + apply (simp add: imp_conjL) + apply (simp only: all_simps[symmetric] imp_conjL cong: imp_cong, + simp only: all_simps, simp) + apply (simp add: unat_shiftr_le_bound) + apply (frule(1) valid_cnode_bits_0, clarsimp) + apply (intro conjI impI) + apply (simp add: size_of_def) + apply (erule (1) valid_cnode_cap_cte_at'') + apply simp + apply (rule shiftr_less_t2n') + apply simp + apply simp + apply (simp add:size_of_def) + apply (erule (1) valid_cnode_cap_cte_at'') + apply simp + apply (rule shiftr_less_t2n') + apply simp + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp dest!: ctes_of_valid') + apply (simp add: cte_level_bits_def size_of_def field_simps) + apply (simp add: shiftl_shiftr3 word_size) + apply (simp add: word_bw_assocs mask_and_mask) + apply (simp_all add: unat_sub word_le_nat_alt unat_eq_0[symmetric]) + apply (simp_all add: unat_plus_if' if_P) + apply (clarsimp simp: shiftr_over_and_dist + size_of_def cte_level_bits_def field_simps shiftl_shiftl + shiftl_shiftr3 word_size)+ + apply (clarsimp simp: unat_gt_0 from_bool_0 trans [OF eq_commute from_bool_eq_if]) + apply (intro conjI impI, simp_all)[1] + apply (rule word_unat.Rep_inject[THEN iffD1], subst unat_plus_if') + apply (simp add: unat_plus_if' unat_of_nat64 word_bits_def) + apply (clarsimp simp: shiftr_over_and_dist + size_of_def cte_level_bits_def field_simps shiftl_shiftl + shiftl_shiftr3 word_size)+ + apply (clarsimp simp: unat_gt_0 from_bool_0 trans [OF eq_commute from_bool_eq_if]) + + apply (intro conjI impI, simp_all)[1] + apply (rule word_unat.Rep_inject[THEN iffD1], simp add: unat_of_nat64 word_bits_def) + done +qed + +lemmas lookup_fp_ccorres + = lookup_fp_ccorres'[OF refl, THEN ccorres_use_cutMon] + +lemma ccap_relation_case_sum_Null_endpoint: + "ccap_relation (case x of Inl v => NullCap | Inr v => v) ccap + \ (cap_get_tag ccap = scast cap_endpoint_cap) + = (isRight x \ isEndpointCap (theRight x))" + by (clarsimp simp: cap_get_tag_isCap isRight_def isCap_simps + split: sum.split_asm) + +(* FIXME AARCH64 move *) +lemma ccorres_catch_bindE_symb_exec_l: + "\ \s. \(=) s\ f \\rv. (=) s\; empty_fail f; + \rv. ccorres_underlying sr G r xf ar axf (Q rv) (Q' rv) hs (catch (g rv) h >>= j) c; + \ex. ccorres_underlying sr G r xf ar axf (R ex) (R' ex) hs (h ex >>= j) c; + \P\ f \Q\,\R\ \ + \ + ccorres_underlying sr G r xf ar axf P {s. (\rv. s \ Q' rv) \ (\ex. s \ R' ex)} hs + (catch (f >>=E g) h >>= j) c" + apply (simp add: catch_def bindE_def bind_assoc lift_def) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l[where G=P]) + apply wpc + apply (simp add: throwError_bind) + apply assumption+ + apply (clarsimp simp: valid_def validE_def split_def split: sum.split_asm) + apply assumption + apply clarsimp + done + +lemma vcpuSwitch_armKSVMIDTable[wp]: + "vcpuSwitch v \\s. P (armKSVMIDTable (ksArchState s))\" + by (wpsimp simp: vcpuSwitch_def split_def modifyArchState_def) + +lemmas vcpuSwitch_typ_ats[wp] = typ_at_lifts [OF vcpuSwitch_typ_at'] + +crunches findVSpaceForASID + for (empty_fail) empty_fail[wp,intro!] + (wp: empty_fail_getObject ignore: withoutFailure) + +(* FIXME AARCH64 move to Monadic_Rewrite, also duplicated in Fastpath_Equiv *) +lemma monadic_rewrite_fail: + "monadic_rewrite True E \ fail g" + by (simp add: monadic_rewrite_def) + +(* FIXME AARCH64 move to Monadic_Rewrite in lib *) +lemma monadic_rewrite_return_eq: + "monadic_rewrite F E (\_. (x = y)) (return x) (return y)" + unfolding monadic_rewrite_def + by fastforce + +(* FIXME AARCH64 taken from EmptyFail_H which is not visible here *) +lemma empty_fail_getObject_ap [intro!, wp, simp]: + "empty_fail (getObject p :: asidpool kernel)" + by (simp add: empty_fail_getObject) + +(* specific ASID has a specific entry in its ASID pool, or no entry meaning it's not in the pool *) +definition asid_has_entry :: "asid \ asidpool_entry option \ kernel_state \ bool" where + "asid_has_entry asid asid_map_entry_opt \ \s. + case armKSASIDTable (ksArchState s) (ucast (asid_high_bits_of (ucast asid))) + of Some ap_ptr \ \pool. ko_at' (asidpool.ASIDPool pool) ap_ptr s + \ pool (asid && mask asid_low_bits) = asid_map_entry_opt + | None \ asid_map_entry_opt = None" + +(* the minimum needed to ensure the given asid is mapped to the given vmid *) +(* FIXME AARCH64 consider rename to asid_has_vsroot *) +definition asid_has_vmid :: "asid \ vmid \ machine_word \ kernel_state \ bool" where + "asid_has_vmid asid vmid vsroot \ asid_has_entry asid (Some (ASIDPoolVSpace (Some vmid) vsroot))" + +lemma getASIDPoolEntry_wp: + "\\s. (\asid_entry. asid_has_entry asid (Some asid_entry) s \ P (Some asid_entry) s) + \ (asid_has_entry asid None s \ P None s) \ + getASIDPoolEntry asid + \\rv s. P rv s \" + unfolding getASIDPoolEntry_def asid_has_entry_def getPoolPtr_def + apply (wpsimp wp: haskell_assert_inv getASID_wp) + apply normalise_obj_at' + apply (rename_tac pool) + apply (case_tac "pool (asid AND mask asid_low_bits)"; simp) + done + +(* after fastpath checks, next time we getVMID (during thread switch) we know what we'll get *) +lemma getVMID_fp_rewrite: + "monadic_rewrite True False + (\s. \vsroot. asid_has_vmid asid vmid vsroot s) + (getVMID asid) (return vmid)" + unfolding getVMID_def loadVMID_def getASIDPoolEntry_def getPoolPtr_def liftM_def + apply (clarsimp simp: bind_assoc) + apply monadic_rewrite_pre + apply (rule monadic_rewrite_assert)+ + apply monadic_rewrite_symb_exec_l (* get ASID table *) + apply (subst case_option_If2) + apply (monadic_rewrite_l monadic_rewrite_if_l_True) + apply (clarsimp simp: bind_assoc) + apply monadic_rewrite_symb_exec_l (* get pool *) + apply wpc + apply monadic_rewrite_symb_exec_l (* get entry in pool *) + apply wpc + apply simp + apply (rule monadic_rewrite_fail) (* no entry *) + apply wpc + apply simp + apply (wpc, rule monadic_rewrite_impossible) (* no VMID *) + apply (drule Some_to_the, simp)+ + apply (rule_tac P="the x1 = vmid" in monadic_rewrite_gen_asm, simp) + apply (rule monadic_rewrite_refl) + apply clarsimp + apply (wpsimp wp: getASID_wp)+ + apply (clarsimp simp: asid_has_vmid_def asid_has_entry_def cong: conj_cong split: option.splits) + apply normalise_obj_at' + done + +(* after fastpath checks (during thread switch), returned ASID map entry is known *) +lemma getASIDPoolEntry_rewrite_fp: + "monadic_rewrite True False + (asid_has_vmid asid vmid vsroot) + (getASIDPoolEntry asid) (return (Some (asidpool_entry.ASIDPoolVSpace (Some vmid) vsroot)))" + unfolding getASIDPoolEntry_def getPoolPtr_def + apply monadic_rewrite_pre + apply (clarsimp simp: bind_assoc) + apply (rule monadic_rewrite_assert)+ + apply monadic_rewrite_symb_exec_l (* get ASID table *) + apply wpc + apply (rule monadic_rewrite_impossible) (* not found in ASID table *) + apply (clarsimp simp: liftM_def) + apply monadic_rewrite_symb_exec_l (* pool ptr *) + apply wpc + apply clarsimp + apply (rule monadic_rewrite_return_eq) + apply (wpsimp wp: getASID_wp)+ + apply (clarsimp simp: asid_has_vmid_def asid_has_entry_def split: option.splits) + apply normalise_obj_at' + done + +(* after fastpath checks (during thread switch), returned vspace for ASID is known *) +lemma findVSpaceForASID_rewrite_fp: + "monadic_rewrite True False + (asid_has_vmid asid vmid vsroot) + (findVSpaceForASID asid) (returnOk vsroot)" + unfolding findVSpaceForASID_def + apply monadic_rewrite_pre + apply (clarsimp simp: liftE_bindE) + apply (monadic_rewrite_l getASIDPoolEntry_rewrite_fp[where vmid=vmid and vsroot=vsroot]) + apply (clarsimp simp: assertE_liftE liftE_bindE) + apply (rule monadic_rewrite_assert) + apply (monadic_rewrite_symb_exec_l) (* checkPTAt *) + apply (rule monadic_rewrite_refl) + apply wpsimp+ + done + +(* After fastpath checks, we know the VMID that armContextSwitch will use. + Note: there is some confusion here, because the "asid" on the C side is actually a VMID + (or "hardware asid"), and setVSpaceRoot in Haskell also takes an "asid" that is really + a VMID, while armContextSwitch takes an actual ASID. *) +lemma armv_contextSwitch_HWASID_ccorres: + "ccorres dc xfdc + (\s. asid_has_vmid asid vmid vsroot s \ canonical_address (addrFromPPtr vsroot)) + (\\vspace = pte_Ptr vsroot\ \ \\asid___unsigned_long = ucast (vmid :: vmid) \) hs + (armContextSwitch vsroot asid) + (Call armv_contextSwitch_HWASID_'proc)" + apply (cinit lift: vspace_' asid___unsigned_long_') + apply (rule monadic_rewrite_ccorres_assemble[rotated]) + apply (rule monadic_rewrite_bind_head) + apply (rule getVMID_fp_rewrite[where vmid=vmid]) + apply clarsimp + apply csymbr+ + apply (ctac add: setVSpaceRoot_ccorres) + apply (clarsimp simp: ucast_and_mask_drop canonical_address_and_maskD) + apply blast + done + +lemma modifyArchState_valid_lift: + "\ \s. P s \ P (f s) \ \ modifyArchState f \\s. P (ksArchState s) \" + unfolding modifyArchState_def + by wpsimp + +lemma asid_has_vmid_lift: + assumes asids: "\P. f \\s. P (armKSASIDTable (ksArchState s)) \" + assumes kos: "\ko p. f \\s. ko_at' (ko::asidpool) p s \" + shows "f \ asid_has_vmid asid vmid vsroot \" + unfolding asid_has_vmid_def asid_has_entry_def + apply (subst case_option_If2)+ + apply wp_pre + apply (wps asids) + apply (rule hoare_vcg_if_lift) (* does not work when combined with wpsimp *) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift hoare_vcg_conj_lift[OF kos]) + apply auto + done + +lemma vcpuSwitch_asid_has_vmid[wp]: + "vcpuSwitch t \asid_has_vmid asid vmid vsroot\" + unfolding vcpuSwitch_def + by (wpc + | wpsimp wp: asid_has_vmid_lift modifyArchState_valid_lift simp: split_def + | simp add: modifyArchState_def)+ + +(* FIXME AARCH64 for the 0x20, see valid_cnode_cap_cte_at'' *) +lemma switchToThread_fp_ccorres: + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' + and tcb_at' thread + and cte_wp_at' (\cte. isValidVTableRoot (cteCap cte) + \ capPTBasePtr (capCap (cteCap cte)) = vsroot + \ (\vaddr. capPTMappedAddress (capCap (cteCap cte)) + = Some (asid, vaddr))) + (thread + tcbVTableSlot * 0x20) + and asid_has_vmid asid vmid vsroot) + (\ \thread = tcb_ptr_to_ctcb_ptr thread \ + \ \ \vroot = pte_Ptr vsroot\ + \ \ pte_C.words_C \stored_hw_asid.[unat 0] = ucast vmid \) [] + (do _ <- Arch.switchToThread thread; + setCurThread thread + od) + (Call switchToThread_fp_'proc)" + supply Collect_const[simp del] + apply (cinit' lift: thread_' vroot_' stored_hw_asid_') + apply (simp add: AARCH64_H.switchToThread_def bind_assoc setVMRoot_def + cap_case_isPageTableCap) + apply (simp add: getThreadVSpaceRoot_def locateSlot_conv getSlotCap_def) + apply (rule ccorres_pre_getObject_tcb) + apply (ctac (no_vcg) add: vcpu_switch_ccorres) + apply (rule ccorres_getCTE, rename_tac cte) + apply (rule_tac P="isValidVTableRoot (cteCap cte) + \ capPTBasePtr (capCap (cteCap cte)) = vsroot + \ (\vaddr. capPTMappedAddress (capCap (cteCap cte)) + = Some (asid, vaddr))" in ccorres_gen_asm) + apply (erule conjE, drule isValidVTableRootD) + apply (rule ccorres_assert) + apply simp + apply (prop_tac "cteCap cte \ capability.NullCap") + apply (clarsimp simp: isArchObjectCap_def) + apply (clarsimp simp: isValidVTableRoot_def2) + (* rewrite findVSpaceForASID within a bindE+catch *) + apply (rule monadic_rewrite_ccorres_assemble[rotated]) + apply (rule monadic_rewrite_bind_head) + apply (rule monadic_rewrite_catch) + apply (monadic_rewrite_l findVSpaceForASID_rewrite_fp[where vmid=vmid and vsroot=vsroot]) + apply (rule monadic_rewrite_refl) + apply (rule monadic_rewrite_refl) + apply wpsimp (* findVSpaceForASID rewrite complete *) + apply clarsimp + apply (simp add: catch_liftE bind_assoc assertE_liftE + flip: bind_liftE_distrib) + apply (rule ccorres_assert2) + apply csymbr + apply (ctac (no_vcg) add: armv_contextSwitch_HWASID_ccorres[where vmid=vmid]) + apply (clarsimp simp: setCurThread_def) + apply (rule ccorres_stateAssert) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: setCurThread_def simpler_modify_def rf_sr_def cstate_relation_def + Let_def carch_state_relation_def cmachine_state_relation_def) + apply (wp hoare_drop_imp) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift')+ + + apply (rule conjI) + (* haskell precondition *) + apply (clarsimp simp: cte_level_bits_def field_simps cte_wp_at_ctes_of + addrFromPPtr_canonical_in_kernel_window split: option.splits) + apply (erule (3) valid_tcb'_vcpuE[OF valid_objs_valid_tcb']) + (* C precondition *) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_tcbVCPU + ucast_and_mask_drop[where n=16, simplified mask_def, simplified]) + done + +lemma thread_state_ptr_set_tsType_np_spec: + defines "ptr s \ cparent \<^bsup>s\<^esup>ts_ptr [''tcbState_C''] :: tcb_C ptr" + shows + "\s. \\ \s. hrs_htd \<^bsup>s\<^esup>t_hrs \\<^sub>t ptr s + \ (tsType_' s = scast ThreadState_Running \ tsType_' s = scast ThreadState_Restart + \ tsType_' s = scast ThreadState_BlockedOnReply)\ + Call thread_state_ptr_set_tsType_np_'proc + {t. (\thread_state. + tsType_CL (thread_state_lift thread_state) = tsType_' s \ + tcbQueued_CL (thread_state_lift thread_state) + = tcbQueued_CL (thread_state_lift (tcbState_C (the (cslift s (ptr s))))) \ + t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s) + (the (cslift s (ptr s))\tcbState_C := thread_state\)) + (t_hrs_' (globals s)) + )}" + apply (intro allI, rule conseqPre, vcg) + apply (clarsimp simp: ptr_def) + apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (frule h_t_valid_c_guard_cparent[OF h_t_valid_clift], simp+, + simp add: typ_uinfo_t_def) + apply (frule clift_subtype, simp+) + apply (clarsimp simp: typ_heap_simps' word_sle_def word_sless_def) + apply (subst parent_update_child, erule typ_heap_simps', simp+) + apply (clarsimp simp: typ_heap_simps') + apply (rule exI, rule conjI[OF _ conjI [OF _ refl]]) + apply (simp_all add: thread_state_lift_def) + apply (auto simp: ThreadState_defs mask_def) + done + +(* from the bitfield generator: ep_ref and tsType are stored in the same word, tsType is the lowest + 4 bits, while the at-least-4-aligned ep_ref takes up the next 44 bits (i.e. must be less than + 48 in total, due to canonicity); see structures_64.bf *) +lemma thread_state_ptr_mset_blockingObject_tsType_spec: + defines "ptr s \ cparent \<^bsup>s\<^esup>ts_ptr [''tcbState_C''] :: tcb_C ptr" + shows + "\s. \\ \s. hrs_htd \<^bsup>s\<^esup>t_hrs \\<^sub>t ptr s + \ is_aligned (ep_ref_' s) 4 \ canonical_address (ep_ref_' s) + \ tsType_' s && mask 4 = tsType_' s\ + Call thread_state_ptr_mset_blockingObject_tsType_'proc + {t. (\thread_state. + tsType_CL (thread_state_lift thread_state) = tsType_' s + \ blockingObject_CL (thread_state_lift thread_state) = ep_ref_' s + \ tcbQueued_CL (thread_state_lift thread_state) + = tcbQueued_CL (thread_state_lift (tcbState_C (the (cslift s (ptr s))))) + \ t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s) + (the (cslift s (ptr s))\tcbState_C := thread_state\)) + (t_hrs_' (globals s)))}" + apply (intro allI, rule conseqPre, vcg) + apply (clarsimp simp: ptr_def) + apply (frule h_t_valid_c_guard_cparent, simp+) + apply (simp add: typ_uinfo_t_def) + apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (frule clift_subtype, simp+) + apply (clarsimp simp: typ_heap_simps') + apply (subst parent_update_child, erule typ_heap_simps', simp+) + apply (clarsimp simp: typ_heap_simps' word_sless_def word_sle_def) + apply (rule exI, intro conjI[rotated], rule refl) + apply (simp_all add: thread_state_lift_def word_ao_dist canonical_address_mask_shift + canonical_bit_def is_aligned_mask) + apply (simp add: and_mask_eq_iff_shiftr_0 shiftr_and_eq_shiftl) + done + +(* FIXME AARCH64 move and consider name, based on or.left_neutral *) +lemma or_left_neutral_eq: + "x = 0 \ x OR y = y" + by simp + +(* FIXME AARCH64 move and consider name *) +lemma or_right_neutral_eq: + "y = 0 \ x OR y = x" + by simp + +(* FIXME AARCH64 move to word lib *) +lemma smaller_mask_0: + "\ x && mask n = 0; m \ n; k \ n - m \ \ (x >> m) && mask k = 0" + by (metis and_mask_eq_iff_shiftr_0 linorder_linear mask_AND_less_0 mask_eq_0_eq_x mask_exceed + shiftr_mask2 shiftr_over_and_dist) + +(* FIXME AARCH64 move to word lib *) +lemma shiftr_0_shiftr: + "\ x >> n = 0; n \ m \ \ x >> m = 0" + by (metis Groups.add_ac(2) drop_bit_drop_bit le_add_diff_inverse shiftr_0 shiftr_def) + +(* FIXME AARCH64 move to word lib *) +lemma shiftl_shiftr_0_low: + "\ x >> (m - n) = 0 \ \ x << n >> m = 0" + by (metis shiftl_def shiftr_def take_bit_eq_self_iff_drop_bit_eq_0 take_bit_push_bit) + +lemma mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_spec: + defines "ptr s \ cparent \<^bsup>s\<^esup>node_ptr [''cteMDBNode_C''] :: cte_C ptr" + shows + "\s. \\ \s. hrs_htd \<^bsup>s\<^esup>t_hrs \\<^sub>t ptr s + \ is_aligned (mdbNext___unsigned_long_' s) 4 + \ canonical_address (mdbNext___unsigned_long_' s) + \ mdbRevocable___unsigned_long_' s && mask 1 = mdbRevocable___unsigned_long_' s + \ mdbFirstBadged___unsigned_long_' s && mask 1 = mdbFirstBadged___unsigned_long_' s\ + Call mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_'proc + {t. (\mdb_node. + mdb_node_lift mdb_node = mdb_node_lift (cteMDBNode_C (the (cslift s (ptr s)))) + \ mdbNext_CL := mdbNext___unsigned_long_' s, mdbRevocable_CL := mdbRevocable___unsigned_long_' s, + mdbFirstBadged_CL := mdbFirstBadged___unsigned_long_' s \ + \ t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s) + (the (cslift s (ptr s)) \ cteMDBNode_C := mdb_node \)) + (t_hrs_' (globals s)) + )}" + supply shiftl_of_Suc[simp del] + (* FIXME AARCH64 More_Word.mask_Suc_0 is unnecessary given the other mask_Suc_0, but that one has + a \ intead of a =, probably how this confusion started *) + supply More_Word.mask_Suc_0[simp del] semiring_bit_operations_class.mask_Suc_0[simp del] + apply (intro allI, rule conseqPre, vcg) + apply (clarsimp simp: ptr_def h_t_valid_clift_Some_iff) + apply (frule h_t_valid_c_guard_cparent[OF h_t_valid_clift], simp+, + simp add: typ_uinfo_t_def) + apply (frule clift_subtype, simp+) + apply (clarsimp simp: typ_heap_simps' word_sle_def word_sless_def) + apply (subst parent_update_child, erule typ_heap_simps', simp+) + apply (clarsimp simp: typ_heap_simps') + apply (rule exI, rule conjI[OF _ refl]) + apply (simp add: mdb_node_lift_def word_ao_dist shiftr_over_or_dist) + (* rest of proof is resolving magic number shifts+masks based on fields in bitfield generator *) + apply (simp add: canonical_address_mask_shift canonical_bit_def is_aligned_mask or.assoc) + apply (fold limited_and_def) + apply (simp add: limited_and_simps and_mask2 word_size word_and_mask_shiftl) + apply (simp add: limited_and_def) + apply (intro conjI) + apply (rule or_right_neutral_eq) + apply (simp add: word_or_zero) + apply (simp add: shiftl_shiftr_0_low and_mask_eq_iff_shiftr_0) + apply (fastforce simp: shiftr_0_shiftr[where m=2]) + apply (rule or_left_neutral_eq[OF smaller_mask_0[where m=1 and k=1, simplified]], simp+) + apply (rule or_left_neutral_eq[OF smaller_mask_0[where m=0 and k=1, simplified]], simp+) + done + +lemma mdb_node_ptr_set_mdbPrev_np_spec: + defines "ptr s \ cparent \<^bsup>s\<^esup>node_ptr [''cteMDBNode_C''] :: cte_C ptr" + shows + "\s. \\ \s. hrs_htd \<^bsup>s\<^esup>t_hrs \\<^sub>t ptr s \ is_aligned (mdbPrev___unsigned_long_' s) 4\ + Call mdb_node_ptr_set_mdbPrev_np_'proc + {t. (\mdb_node. + mdb_node_lift mdb_node = mdb_node_lift (cteMDBNode_C (the (cslift s (ptr s)))) + \ mdbPrev_CL := mdbPrev___unsigned_long_' s \ + \ t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s) + (the (cslift s (ptr s)) \ cteMDBNode_C := mdb_node \)) + (t_hrs_' (globals s)) + )}" + apply (intro allI, rule conseqPre, vcg) + apply (clarsimp simp: ptr_def) + apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (frule h_t_valid_c_guard_cparent[OF h_t_valid_clift], simp+, + simp add: typ_uinfo_t_def) + apply (frule clift_subtype, simp+) + apply (clarsimp simp: typ_heap_simps') + apply (subst parent_update_child, erule typ_heap_simps', simp+) + apply (clarsimp simp: typ_heap_simps' word_sle_def word_sless_def) + apply (rule exI, rule conjI [OF _ refl]) + apply (simp add: mdb_node_lift_def limited_and_simps mask_def) + done + +lemma endpoint_ptr_mset_epQueue_tail_state_spec: + "\s. \\ \s. hrs_htd \<^bsup>s\<^esup>t_hrs \\<^sub>t ep_ptr_' s + \ is_aligned (epQueue_tail_' s) 4 \ canonical_address (epQueue_tail_' s) + \ state_' s && mask 2 = state_' s\ + Call endpoint_ptr_mset_epQueue_tail_state_'proc + {t. (\endpoint. + endpoint_lift endpoint = endpoint_lift (the (cslift s (ep_ptr_' s))) + \ endpoint_CL.state_CL := state_' s, epQueue_tail_CL := epQueue_tail_' s \ + \ t_hrs_' (globals t) = hrs_mem_update (heap_update (ep_ptr_' s) + endpoint) + (t_hrs_' (globals s)) + )}" + apply (intro allI, rule conseqPre, vcg) + apply (clarsimp simp: h_t_valid_clift_Some_iff typ_heap_simps' + word_sle_def word_sless_def) + apply (rule exI, rule conjI[OF _ refl]) + apply (simp add: endpoint_lift_def word_ao_dist) + apply (subst canonical_address_mask_shift, (simp add: canonical_bit_def)+) + apply (fold limited_and_def) + apply (simp add: limited_and_simps mask_def) + done + +lemma endpoint_ptr_set_epQueue_head_np_spec: + "\s. \\ \s. hrs_htd \<^bsup>s\<^esup>t_hrs \\<^sub>t ep_ptr_' s \ is_aligned (epQueue_head_' s) 4\ + Call endpoint_ptr_set_epQueue_head_np_'proc + {t. (\endpoint. + endpoint_lift endpoint = endpoint_lift (the (cslift s (ep_ptr_' s))) + \ epQueue_head_CL := epQueue_head_' s \ + \ t_hrs_' (globals t) = hrs_mem_update (heap_update (ep_ptr_' s) + endpoint) + (t_hrs_' (globals s)) + )}" + apply (intro allI, rule conseqPre, vcg) + apply (clarsimp simp: h_t_valid_clift_Some_iff typ_heap_simps' + word_sless_def word_sle_def) + apply (rule exI, rule conjI[OF _ refl]) + apply (simp add: endpoint_lift_def word_ao_dist + mask_def) + done + +lemma ccorres_call_hSkip': + assumes cul: "ccorres_underlying sr \ r xf' r xf' P (i ` P') [SKIP] a (Call f)" + and gsr: "\a b x s t. (x, t) \ sr \ (x, g a b (clean s t)) \ sr" + and csr: "\x s t. (x, t) \ sr \ (x, clean s t) \ sr" + and res: "\a s t rv. r rv (xf' t) \ r rv (xf (g a t (clean s t)))" + and ares: "\s t rv. r rv (xf' t) \ r rv (xf (clean s t))" + and ist: "\x s. (x, s) \ sr \ (x, i s) \ sr" + shows "ccorres_underlying sr \ r xf r xf P P' [SKIP] a (call i f clean (\x y. Basic (g x y)))" + apply (rule ccorresI') + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply (erule exec_call_Normal_elim, simp_all)[1] + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (rule ccorresE[OF cul ist], assumption+, simp+) + apply (rule EHAbrupt) + apply (erule(1) exec.Call) + apply (rule EHOther, rule exec.Skip, simp) + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (erule rev_bexI) + apply (simp add: unif_rrel_simps csr ares) + apply clarsimp + apply (erule exec_call_Normal_elim, simp_all)[1] + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (rule ccorresE[OF cul ist], assumption+, simp+) + apply (rule EHOther, erule(1) exec.Call) + apply simp + apply (simp add: unif_rrel_simps) + apply (erule rev_bexI) + apply (simp add: gsr res) + apply (rule ccorresE[OF cul ist], assumption+, simp+) + apply (rule EHOther, erule(1) exec.Call) + apply simp + apply simp + apply (rule ccorresE[OF cul ist], assumption+, simp+) + apply (rule EHOther, erule(1) exec.Call) + apply simp + apply simp + apply (rule ccorresE[OF cul ist], assumption+, simp+) + apply (rule EHOther, erule exec.CallUndefined) + apply simp + apply simp + done + +(* The naming convention here is that xf', xfr, and xfru are the terms we instantiate *) +lemma ccorres_call_hSkip: + assumes cul: "ccorres_underlying rf_sr \ r xfdc r xfdc A C' [SKIP] a (Call f)" + and ggl: "\x y s. globals (g x y s) = globals s" + and igl: "\s. globals (i s) = globals s" + shows "ccorres_underlying rf_sr \ r xfdc r xfdc + A {s. i s \ C'} [SKIP] a (call i f (\s t. s\globals := globals t\) (\x y. Basic (g x y)))" + using cul + unfolding rf_sr_def + apply - + apply (rule ccorres_call_hSkip') + apply (erule ccorres_guard_imp) + apply (clarsimp simp: ggl igl xfdc_def)+ + done + +lemma bind_case_sum_rethrow: + "rethrowFailure fl f >>= case_sum e g + = f >>= case_sum (e \ fl) g" + apply (simp add: rethrowFailure_def handleE'_def bind_assoc) + apply (rule bind_cong[OF refl]) + apply (simp add: throwError_bind split: sum.split) + done + +lemma ccorres_pre_getCTE2: + "(\rv. ccorresG rf_sr \ r xf (P rv) (P' rv) hs (f rv) c) \ + ccorresG rf_sr \ r xf (\s. \cte. ctes_of s p = Some cte \ P cte s) + {s. \cte cte'. cslift s (cte_Ptr p) = Some cte' \ ccte_relation cte cte' + \ s \ P' cte} hs + (getCTE p >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp2, erule ccorres_pre_getCTE) + apply (clarsimp simp: map_comp_Some_iff ccte_relation_def c_valid_cte_def cl_valid_cte_def + c_valid_cap_def) + done + +declare empty_fail_resolveAddressBits[iff] + +(* this condition is copied directly from fastpath_mi_check_spec, which comes out of guards there *) +lemma fastpath_mi_check: + "(\ 4 < mi && 0x1FF) + = (msgExtraCaps (messageInfoFromWord mi) = 0 + \ msgLength (messageInfoFromWord mi) \ scast n_msgRegisters + \ length_CL (seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K mi)))) + \ scast n_msgRegisters)" + (is "?P = (?Q \ ?R \ ?S)") +proof - + have le_Q: "?P = (?Q \ ?S)" + apply (simp add: mask_def messageInfoFromWord_def Let_def + msgExtraCapBits_def msgLengthBits_def + seL4_MessageInfo_lift_def fcp_beta n_msgRegisters_def) + apply word_bitwise + apply blast + done + have Q_R: "?S \ ?R" + apply (clarsimp simp: messageInfoFromWord_def Let_def msgLengthBits_def + msgExtraCapBits_def mask_def n_msgRegisters_def + seL4_MessageInfo_lift_def fcp_beta) + apply (subst if_not_P, simp_all) + apply (simp add: msgMaxLength_def linorder_not_less) + apply (erule order_trans, simp) + done + from le_Q Q_R show ?thesis + by blast +qed + +lemma messageInfoFromWord_raw_spec: + "\s. \\ {s} Call messageInfoFromWord_raw_'proc + \\ret__struct_seL4_MessageInfo_C + = (seL4_MessageInfo_C (FCP (K \<^bsup>s\<^esup>w)))\" + apply vcg + apply (clarsimp simp: word_sless_def word_sle_def) + apply (case_tac v) + apply (simp add: cart_eq) + done + +lemma mi_check_messageInfo_raw: + "length_CL (seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K mi)))) \ scast n_msgRegisters + \ seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K mi))) + = mi_from_H (messageInfoFromWord mi)" + apply (simp add: messageInfoFromWord_def Let_def mi_from_H_def + seL4_MessageInfo_lift_def msgLengthBits_def msgExtraCapBits_def + msgMaxExtraCaps_def shiftL_nat mask_def msgLabelBits_def) + apply (subst if_not_P) + apply (simp add: linorder_not_less msgMaxLength_def n_msgRegisters_def) + apply (erule order_trans, simp) + apply simp + done + +lemma fastpath_mi_check_spec: + "\s. \ \ \s. True\ Call fastpath_mi_check_'proc + \(\ret__int = 0) = (msgExtraCaps (messageInfoFromWord \<^bsup>s\<^esup>msgInfo) = 0 + \ msgLength (messageInfoFromWord \<^bsup>s\<^esup>msgInfo) \ scast n_msgRegisters + \ seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K \<^bsup>s\<^esup>msgInfo))) + = mi_from_H (messageInfoFromWord \<^bsup>s\<^esup>msgInfo))\" + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: seL4_MsgLengthBits_def seL4_MsgExtraCapBits_def word_sle_def if_1_0_0) + apply (cut_tac mi="msgInfo_' s" in fastpath_mi_check) + apply (auto intro: mi_check_messageInfo_raw[unfolded K_def]) + done + +lemma isValidVTableRoot_fp_spec: + "\s. \ \ {s} Call isValidVTableRoot_fp_'proc + {t. ret__unsigned_long_' t = from_bool (isValidVTableRoot_C (vspace_root_cap_' s))}" + apply vcg + apply (clarsimp simp: word_sle_def word_sless_def cong: if_cong) + (* FIXME AARCH64 how are most of these not in the simpset already? *) + apply (simp add: to_bool_if of_bl_from_bool from_bool_eq_if' from_bool_0) + apply (clarsimp simp: isValidVTableRoot_C_def) + done + +lemma isRecvEP_endpoint_case: + "isRecvEP ep \ case_endpoint f g h ep = f (epQueue ep)" + by (clarsimp simp: isRecvEP_def split: endpoint.split_asm) + +lemma unifyFailure_catch_If: + "catch (unifyFailure f >>=E g) h + = f >>= (\rv. if isRight rv then catch (g (theRight rv)) h else h ())" + apply (simp add: unifyFailure_def rethrowFailure_def + handleE'_def catch_def bind_assoc + bind_bindE_assoc cong: if_cong) + apply (rule bind_cong[OF refl]) + apply (simp add: throwError_bind isRight_def return_returnOk + split: sum.split) + done + +end + +abbreviation "tcb_Ptr_Ptr \ (Ptr :: machine_word \ tcb_C ptr ptr)" + +abbreviation(input) + "ptr_basic_update ptrfun vfun + \ Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update + (heap_update (ptrfun s) (vfun s)))) s)" + +context kernel_m begin + +lemma fastpath_dequeue_ccorres: + "dest1 = dest2 \ dest2 = tcb_ptr_to_ctcb_ptr dest \ ep_ptr1 = ep_Ptr ep_ptr \ + ccorres dc xfdc + (ko_at' (RecvEP (dest # xs)) ep_ptr and invs') + {s. dest2 = tcb_ptr_to_ctcb_ptr dest + \ dest1 = tcb_ptr_to_ctcb_ptr dest + \ ep_ptr1 = ep_Ptr ep_ptr} hs + (setEndpoint ep_ptr (case xs of [] \ IdleEP | _ \ RecvEP xs)) + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t dest1\ + (CALL endpoint_ptr_set_epQueue_head_np(ep_ptr1,ptr_val (h_val (hrs_mem \t_hrs) (tcb_Ptr_Ptr &(dest2\[''tcbEPNext_C''])))));; + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t dest1\ + (IF h_val (hrs_mem \t_hrs) (tcb_Ptr_Ptr &(dest1\[''tcbEPNext_C''])) \ tcb_Ptr 0 THEN + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t h_val (hrs_mem \t_hrs) (tcb_Ptr_Ptr &(dest1\[''tcbEPNext_C'']))\ + (Guard C_Guard {s. s \\<^sub>c dest1} ( + (ptr_basic_update (\s. tcb_Ptr_Ptr &(h_val (hrs_mem (t_hrs_' (globals s))) + (tcb_Ptr_Ptr &(dest1\[''tcbEPNext_C'']))\[''tcbEPPrev_C''])) (\_. NULL)))) + ELSE + CALL endpoint_ptr_mset_epQueue_tail_state(ep_ptr1,0,scast EPState_Idle) + FI))" + unfolding setEndpoint_def + apply (rule setObject_ccorres_helper[rotated]) + apply simp + apply (simp add: objBits_simps') + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule(1) ko_at_obj_congD') + apply (frule ko_at_valid_ep', clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_ep], assumption, + erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps' valid_ep'_def + isRecvEP_endpoint_case neq_Nil_conv) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps') + apply (case_tac "xs") + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_endpoint_case + tcb_queue_relation'_def + typ_heap_simps' endpoint_state_defs) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (erule(1) cpspace_relation_ep_update_ep2) + apply (simp add: cendpoint_relation_def endpoint_state_defs) + apply simp + apply (simp add: carch_state_relation_def cmachine_state_relation_def + h_t_valid_clift_Some_iff update_ep_map_tos + typ_heap_simps') + apply (clarsimp simp: neq_Nil_conv cendpoint_relation_def Let_def + isRecvEP_endpoint_case tcb_queue_relation'_def + typ_heap_simps' endpoint_state_defs) + apply (simp add: is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr, simplified ctcb_size_bits_def]) + apply (drule(1) obj_at_cslift_tcb)+ + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + typ_heap_simps' tcb_at_not_NULL[OF obj_at'_weakenE, OF _ TrueI]) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def update_ep_map_tos + update_tcb_map_tos typ_heap_simps') + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def + split: if_split) + apply (rule conjI) + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: Let_def cendpoint_relation_def EPState_Recv_def) + apply (simp add: tcb_queue_relation'_def tcb_queue_update_other) + apply (simp add: isRecvEP_def) + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply (simp add: isRecvEP_def) + apply simp + apply (erule (1) map_to_ko_atI') + apply (simp add: carch_state_relation_def typ_heap_simps' + cmachine_state_relation_def h_t_valid_clift_Some_iff + update_ep_map_tos) + done + +lemma st_tcb_at_not_in_ep_queue: + "\ st_tcb_at' P t s; ko_at' ep epptr s; sym_refs (state_refs_of' s); + ep \ IdleEP; \ts. P ts \ tcb_st_refs_of' ts = {} \ + \ t \ set (epQueue ep)" + apply clarsimp + apply (drule(1) sym_refs_ko_atD') + apply (cases ep, simp_all add: st_tcb_at_refs_of_rev') + apply (fastforce simp: st_tcb_at'_def obj_at'_def)+ + done + +lemma st_tcb_at_not_in_ntfn_queue: + "\ st_tcb_at' P t s; ko_at' ntfn ntfnptr s; sym_refs (state_refs_of' s); ntfnObj ntfn = WaitingNtfn xs; + \ts. P ts \ (ntfnptr, TCBSignal) \ tcb_st_refs_of' ts \ + \ t \ set xs" + apply (drule(1) sym_refs_ko_atD') + apply (clarsimp simp: st_tcb_at_refs_of_rev') + apply (drule_tac x="(t, NTFNSignal)" in bspec, simp) + apply (fastforce simp: st_tcb_at'_def obj_at'_def ko_wp_at'_def tcb_bound_refs'_def) + done + +lemma cntfn_relation_double_fun_upd: + "\ cnotification_relation mp ntfn ntfn' + = cnotification_relation (mp(a := b)) ntfn ntfn'; + cnotification_relation (mp(a := b)) ntfn ntfn' + = cnotification_relation (mp(a := b, c := d)) ntfn ntfn' \ + \ cnotification_relation mp ntfn ntfn' + = cnotification_relation (mp(a := b, c := d)) ntfn ntfn'" + by simp + +lemma sym_refs_upd_sD: + "\ sym_refs ((state_refs_of' s) (p := S)); valid_pspace' s; + ko_at' ko p s; refs_of' (injectKO koEx) = S; + objBits koEx = objBits ko \ + \ \s'. sym_refs (state_refs_of' s') + \ (\p' (ko' :: endpoint). ko_at' ko' p' s \ injectKO ko' \ injectKO ko + \ ko_at' ko' p' s') + \ (\p' (ko' :: Structures_H.notification). ko_at' ko' p' s \ injectKO ko' \ injectKO ko + \ ko_at' ko' p' s') + \ (ko_at' koEx p s')" + apply (rule exI, rule conjI) + apply (rule state_refs_of'_upd[where ko'="injectKO koEx" and ptr=p and s=s, + THEN ssubst[where P=sym_refs], rotated 2]) + apply simp+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + apply (clarsimp simp: project_inject objBits_def) + apply (clarsimp simp: obj_at'_def ps_clear_upd + split: if_split) + apply (clarsimp simp: project_inject objBits_def) + apply auto + done + +lemma sym_refs_upd_tcb_sD: + "\ sym_refs ((state_refs_of' s) (p := {r \ state_refs_of' s p. snd r = TCBBound})); valid_pspace' s; + ko_at' (tcb :: tcb) p s \ + \ \s'. sym_refs (state_refs_of' s') + \ (\p' (ko' :: endpoint). + ko_at' ko' p' s \ ko_at' ko' p' s') + \ (\p' (ko' :: Structures_H.notification). + ko_at' ko' p' s \ ko_at' ko' p' s') + \ (st_tcb_at' ((=) Running) p s')" + apply (drule(2) sym_refs_upd_sD[where koEx="makeObject\tcbState := Running, tcbBoundNotification := tcbBoundNotification tcb\"]) + apply (clarsimp dest!: ko_at_state_refs_ofD') + apply (simp add: objBits_simps) + apply (erule exEI) + apply clarsimp + apply (auto simp: st_tcb_at'_def elim!: obj_at'_weakenE) + done + +lemma fastpath_enqueue_ccorres: + "\ epptr' = ep_Ptr epptr \ \ + ccorres dc xfdc + (ko_at' ep epptr and (\s. thread = ksCurThread s) + and (\s. sym_refs ((state_refs_of' s) (thread := {r \ state_refs_of' s thread. snd r = TCBBound}))) + and K (\ isSendEP ep) and valid_pspace' and cur_tcb') + UNIV hs + (setEndpoint epptr (case ep of IdleEP \ RecvEP [thread] | RecvEP ts \ RecvEP (ts @ [thread]))) + (\ret__unsigned_longlong :== CALL endpoint_ptr_get_epQueue_tail(epptr');; + \endpointTail :== tcb_Ptr \ret__unsigned_longlong;; + IF \endpointTail = tcb_Ptr 0 THEN + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \ksCurThread\ + (ptr_basic_update (\s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\[''tcbEPPrev_C''])) (\_. NULL)));; + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \ksCurThread\ + (ptr_basic_update (\s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\[''tcbEPNext_C''])) (\_. NULL)));; + (CALL endpoint_ptr_set_epQueue_head_np(epptr',ucast (ptr_val \ksCurThread)));; + (CALL endpoint_ptr_mset_epQueue_tail_state(epptr',ucast (ptr_val \ksCurThread), + scast EPState_Recv)) + ELSE + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \endpointTail\ + (ptr_basic_update (\s. tcb_Ptr_Ptr &((endpointTail_' s)\[''tcbEPNext_C''])) + (ksCurThread_' o globals));; + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \ksCurThread\ + (ptr_basic_update (\s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\[''tcbEPPrev_C''])) + endpointTail_'));; + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \ksCurThread\ + (ptr_basic_update (\s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\[''tcbEPNext_C''])) + (\_. NULL)));; + (CALL endpoint_ptr_mset_epQueue_tail_state(epptr',ucast (ptr_val \ksCurThread), + scast EPState_Recv)) + FI)" + unfolding setEndpoint_def + apply clarsimp + apply (rule setObject_ccorres_helper[rotated]) + apply simp + apply (simp add: objBits_simps') + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule(1) ko_at_obj_congD') + apply (frule ko_at_valid_ep', clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_ep], assumption, + erule ko_at_projectKO_opt) + apply (simp add: cur_tcb'_def) + apply (frule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps' valid_ep'_def rf_sr_ksCurThread) + apply (prop_tac "canonical_address (ptr_val (tcb_ptr_to_ctcb_ptr (ksCurThread \)))") + apply (fastforce intro!: canonical_address_tcb_ptr intro: obj_at'_is_canonical tcb_aligned') + apply (prop_tac "is_aligned (ptr_val (tcb_ptr_to_ctcb_ptr (ksCurThread \))) 4") + apply (meson is_aligned_tcb_ptr_to_ctcb_ptr is_aligned_weaken kernel.ctcb_size_bits_ge_4) + apply (cases ep, + simp_all add: isSendEP_def cendpoint_relation_def Let_def + tcb_queue_relation'_def) + apply (rename_tac list) + apply (clarsimp simp: NULL_ptr_val[symmetric] tcb_queue_relation_last_not_NULL + ct_in_state'_def + dest!: trans [OF sym [OF ptr_val_def] arg_cong[where f=ptr_val]]) + apply (frule obj_at_cslift_tcb[rotated], erule(1) bspec[OF _ last_in_set]) + apply clarsimp + apply (drule(2) sym_refs_upd_tcb_sD) + apply clarsimp + apply (frule st_tcb_at_not_in_ep_queue, + fastforce, simp+) + apply (prop_tac "ksCurThread \ \ last list") + apply clarsimp + apply (clarsimp simp: typ_heap_simps' EPState_Recv_def mask_def + is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr]) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def + split: if_split) + apply (rule conjI) + apply (rule_tac S="tcb_ptr_to_ctcb_ptr ` set (ksCurThread \ # list)" + in cpspace_relation_ep_update_an_ep, + assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Recv_def + tcb_queue_relation'_def) + apply (drule_tac qend="tcb_ptr_to_ctcb_ptr (last list)" + and qend'="tcb_ptr_to_ctcb_ptr (ksCurThread \)" + and tn_update="tcbEPNext_C_update" + and tp_update="tcbEPPrev_C_update" + in tcb_queue_relation_append, + clarsimp+, simp_all)[1] + apply (rule sym, erule init_append_last) + apply (fastforce simp: tcb_at_not_NULL) + apply (clarsimp simp add: tcb_at_not_NULL[OF obj_at'_weakenE[OF _ TrueI]]) + apply clarsimp+ + apply (subst st_tcb_at_not_in_ep_queue, assumption, blast, clarsimp+) + apply (drule(1) ep_ep_disjoint[rotated -1, where epptr=epptr], + blast, blast, + simp_all add: Int_commute endpoint_not_idle_cases image_image)[1] + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cntfn_relation_double_fun_upd) + apply (rule cnotification_relation_ep_queue, assumption+) + apply fastforce + apply (simp add: isRecvEP_def) + apply simp + apply (fastforce dest!: map_to_ko_atI) + + apply (rule cnotification_relation_q_cong) + apply (clarsimp split: if_split) + apply (clarsimp simp: restrict_map_def ntfn_q_refs_of'_def + split: if_split Structures_H.notification.split_asm Structures_H.ntfn.split_asm) + apply (erule notE[rotated], erule_tac ntfnptr=p and ntfn=a in st_tcb_at_not_in_ntfn_queue, + auto dest!: map_to_ko_atI)[1] + apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos + cmachine_state_relation_def h_t_valid_clift_Some_iff) + apply (clarsimp simp: typ_heap_simps' EPState_Recv_def mask_def + is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr]) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (drule(2) sym_refs_upd_tcb_sD) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def update_ep_map_tos + typ_heap_simps' ct_in_state'_def) + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def + split: if_split) + apply (rule conjI) + apply (rule_tac S="{tcb_ptr_to_ctcb_ptr (ksCurThread \)}" + in cpspace_relation_ep_update_an_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Recv_def + tcb_queue_relation'_def) + apply clarsimp+ + apply (erule notE[rotated], erule st_tcb_at_not_in_ep_queue, + auto)[1] + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_q_cong) + apply (clarsimp split: if_split) + apply (clarsimp simp: restrict_map_def ntfn_q_refs_of'_def + split: if_split Structures_H.notification.split_asm Structures_H.ntfn.split_asm) + apply (erule notE[rotated], rule_tac ntfnptr=p and ntfn=a in st_tcb_at_not_in_ntfn_queue, + assumption+, auto dest!: map_to_ko_atI)[1] + apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos + cmachine_state_relation_def h_t_valid_clift_Some_iff) + done + +lemma setCTE_rf_sr: + "\ (\, s) \ rf_sr; ctes_of \ ptr = Some cte''; + t_hrs_' (globals s') = hrs_mem_update + (heap_update (cte_Ptr ptr) cte') + (t_hrs_' (globals s)); + ccte_relation cte cte'; + (globals s')\ t_hrs_' := undefined \ + = (globals s)\ t_hrs_' := undefined \ \ + \ + \x\fst (setCTE ptr cte \). + (snd x, s') \ rf_sr" + apply (rule fst_setCTE[OF ctes_of_cte_at], assumption) + apply (erule rev_bexI) + apply clarsimp + apply (frule(1) rf_sr_ctes_of_clift) + apply (subgoal_tac "\hrs. globals s' = globals s + \ t_hrs_' := hrs \") + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + typ_heap_simps' cpspace_relation_def) + apply (rule conjI) + apply (erule(2) cmap_relation_updI, simp) + apply (erule_tac t = s'a in ssubst) + apply (simp add: heap_to_user_data_def) + apply (rule conjI) + apply (erule(1) setCTE_tcb_case) + apply (simp add: carch_state_relation_def cmachine_state_relation_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"] + typ_heap_simps' h_t_valid_clift_Some_iff) + apply (cases "globals s", cases "globals s'") + apply simp + done + +lemma getCTE_setCTE_rf_sr: + "\ (\, s) \ rf_sr; ctes_of \ ptr = Some cte; + t_hrs_' (globals s') = hrs_mem_update + (heap_update (cte_Ptr ptr) cte') + (t_hrs_' (globals s)); + ccte_relation (f cte) cte'; + (globals s')\ t_hrs_' := undefined \ + = (globals s)\ t_hrs_' := undefined \ \ + + \ + \x\fst ((do cte \ getCTE ptr; + setCTE ptr (f cte) + od) + \). + (snd x, s') \ rf_sr" + apply (drule setCTE_rf_sr, assumption+) + apply (clarsimp simp: Bex_def in_bind_split in_getCTE2 cte_wp_at_ctes_of) + done + +lemma ccte_relation_eq_ccap_relation: + notes option.case_cong_weak [cong] + shows + "ccte_relation cte ccte + = (ccap_relation (cteCap cte) (cte_C.cap_C ccte) + \ mdb_node_to_H (mdb_node_lift (cteMDBNode_C ccte)) + = (cteMDBNode cte))" + apply (simp add: ccte_relation_def map_option_Some_eq2 cte_lift_def + ccap_relation_def) + apply (simp add: cte_to_H_def split: option.split) + apply (cases cte, clarsimp simp: c_valid_cte_def conj_comms) + done + +lemma cap_reply_cap_ptr_new_np_updateCap_ccorres: + "ccorres dc xfdc + (cte_at' ptr and tcb_at' thread) + ({s. cap_ptr_' s = cap_Ptr &(cte_Ptr ptr \ [''cap_C''])} + \ {s. capTCBPtr___unsigned_long_' s = ptr_val (tcb_ptr_to_ctcb_ptr thread)} + \ {s. capReplyMaster___unsigned_long_' s = from_bool m} + \ {s. capReplyCanGrant___unsigned_long_' s = from_bool canGrant}) [] + (updateCap ptr (ReplyCap thread m canGrant)) + (Call cap_reply_cap_ptr_new_np_'proc)" + apply (rule ccorres_from_vcg, rule allI) + apply (rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of word_sle_def) + apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (clarsimp simp: updateCap_def split_def typ_heap_simps' + word_sless_def word_sle_def) + apply (erule(1) getCTE_setCTE_rf_sr, simp_all add: packed_heap_update_collapse_hrs typ_heap_simps') + apply (clarsimp simp: ccte_relation_eq_ccap_relation ccap_relation_def c_valid_cap_def) + apply (frule is_aligned_tcb_ptr_to_ctcb_ptr) + apply (rule ssubst[OF cap_lift_reply_cap]) + apply (simp add: cap_get_tag_def cap_reply_cap_def mask_def) + apply (cases m ; cases canGrant ; clarsimp simp: true_def false_def) + apply (simp add: cap_to_H_simps cl_valid_cap_def cap_reply_cap_def + limited_and_simps1[OF lshift_limited_and, OF limited_and_from_bool]) + apply (cases m ; cases canGrant ; clarsimp simp: true_def false_def) + done + +lemma fastpath_copy_mrs_ccorres: +notes nat_min_simps [simp del] +shows + "ccorres dc xfdc (\ and (\_. len <= length AARCH64_H.msgRegisters)) + (UNIV \ {s. unat (length___unsigned_long_' s) = len} + \ {s. src_' s = tcb_ptr_to_ctcb_ptr src} + \ {s. dest_' s = tcb_ptr_to_ctcb_ptr dest}) [] + (forM_x (take len AARCH64_H.msgRegisters) + (\r. do v \ asUser src (getRegister r); + asUser dest (setRegister r v) od)) + (Call fastpath_copy_mrs_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit' lift: length___unsigned_long_' src_' dest_' simp: word_sle_def word_sless_def) + apply (unfold whileAnno_def) + apply (rule ccorres_rel_imp) + apply (rule_tac F="K \" in ccorres_mapM_x_while) + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'="i_'" in ccorres_abstract, ceqv) + apply csymbr + apply (ctac(no_vcg)) + apply ctac + apply wp + apply (clarsimp simp: rf_sr_ksCurThread) + apply (simp add: msgRegisters_ccorres[symmetric] length_msgRegisters) + apply (simp add: n_msgRegisters_def msgRegisters_unfold) + apply (drule(1) order_less_le_trans) + apply ((clarsimp simp: "StrictC'_register_defs" msgRegistersC_def fupdate_def + | drule nat_less_cases' | erule disjE)+)[2] + apply (rule allI, rule conseqPre, vcg) + apply simp + apply (simp add: length_msgRegisters n_msgRegisters_def word_bits_def hoare_TrueI)+ + done + +lemma updateCap_cte_wp_at_cteMDBNode: + "\cte_wp_at' (\cte. P (cteMDBNode cte)) p\ + updateCap ptr cap + \\rv. cte_wp_at' (\cte. P (cteMDBNode cte)) p\" + apply (wp updateCap_cte_wp_at_cases) + apply (simp add: o_def) + done + +lemma ctes_of_Some_cte_wp_at: + "ctes_of s p = Some cte \ cte_wp_at' P p s = P cte" + by (clarsimp simp: cte_wp_at_ctes_of) + +lemma user_getreg_wp: + "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (user_regs \ atcbContextGet \ tcbArch) tcb r = rv) t s + \ Q rv s)\ + asUser t (getRegister r) \Q\" + apply (rule_tac Q="\rv s. \rv'. rv' = rv \ Q rv' s" in hoare_post_imp) + apply simp + apply (rule hoare_pre, wp hoare_vcg_ex_lift user_getreg_rv) + apply (clarsimp simp: obj_at'_def) + done + +lemma ccorres_flip_Guard2: + assumes cc: "ccorres_underlying sr \ r xf arrel axf A C hs a (Guard F S (Guard F1 S1 c) ;; d)" + shows "ccorres_underlying sr \ r xf arrel axf A C hs a (Guard F1 S1 (Guard F S c) ;; d)" + apply (rule ccorres_name_pre_C) + using cc + apply (case_tac "s \ (S1 \ S)") + apply (clarsimp simp: ccorres_underlying_def) + apply (erule exec_handlers.cases; + fastforce elim!: exec_Normal_elim_cases intro: exec_handlers.intros exec.Guard exec.Seq) + apply (clarsimp simp: ccorres_underlying_def) + apply (case_tac "s \ S") + apply (fastforce intro: exec.Guard exec.GuardFault exec_handlers.intros exec.Seq) + apply (fastforce intro: exec.Guard exec.GuardFault exec_handlers.intros exec.Seq) + done + +lemmas cte_C_numeral_fold = + cte_C_size[THEN meta_eq_to_obj_eq, + THEN arg_cong[where f="of_nat :: _ \ machine_word"], simplified, symmetric] + +lemmas ccorres_move_c_guard_tcb_ctes2 = ccorres_move_c_guard_tcb_ctes[unfolded cte_C_numeral_fold] + +lemma setUntypedCapAsFull_replyCap[simp]: + "setUntypedCapAsFull cap (ReplyCap curThread False cg) slot = return ()" + by (clarsimp simp:setUntypedCapAsFull_def isCap_simps) + +end + +context kernel_m begin + +lemma obj_at_bound_tcb_grandD: + "\ obj_at' P t s; valid_objs' s; no_0_obj' s; (s, s') \ rf_sr \ + \ \tcb tcb' ntfn ntfn'. ko_at' tcb t s \ P tcb + \ cslift s' (tcb_ptr_to_ctcb_ptr t) = Some tcb' + \ ctcb_relation tcb tcb' + \ ((tcbBoundNotification_C tcb' = NULL) = (tcbBoundNotification tcb = None)) + \ (tcbBoundNotification tcb \ None \ ko_at' ntfn (the (tcbBoundNotification tcb)) s) + \ (tcbBoundNotification tcb \ None \ cslift s' (tcbBoundNotification_C tcb') = Some ntfn') + \ (tcbBoundNotification tcb \ None \ cnotification_relation (cslift s') ntfn ntfn')" + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule(1) obj_at_cslift_tcb, clarsimp) + apply (rule exI, rule conjI, assumption) + apply (clarsimp simp: ctcb_relation_def + option_to_ptr_def option_to_0_def) + apply (simp add: return_def split: option.split_asm) + apply (drule_tac s="ntfn_Ptr x"for x in sym) + apply (drule(1) ko_at_valid_objs', clarsimp simp: ) + apply (clarsimp simp: valid_obj'_def valid_tcb'_def) + apply (drule obj_at_ko_at', clarsimp) + apply (rule conjI, clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt) + apply auto + done + +lemma cnotification_relation_isActive: + "cnotification_relation tcbs ntfn ntfn' + \ (notification_CL.state_CL (notification_lift ntfn') = scast NtfnState_Active) + = EndpointDecls_H.isActive ntfn" + apply (clarsimp simp: cnotification_relation_def Let_def) + apply (cases ntfn, simp) + apply (rename_tac ntfna ooeuoue) + apply (case_tac ntfna, simp_all add: notification_state_defs isActive_def) + done + +lemma option_case_liftM_getNotification_wp: + "\\s. \rv. (case x of None \ rv = v | Some p \ obj_at' (\ntfn. f ntfn = rv) p s) + \ Q rv s\ case x of None \ return v | Some ptr \ liftM f $ getNotification ptr \ Q \" + apply (rule hoare_pre, (wpc; wp getNotification_wp)) + apply (auto simp: obj_at'_def) + done + +lemma threadSet_st_tcb_at_state: + "\\s. tcb_at' t s \ (if p = t + then obj_at' (\tcb. P (tcbState (f tcb))) t s + else st_tcb_at' P p s)\ + threadSet f t \\_. st_tcb_at' P p\" + apply (rule hoare_chain) + apply (rule threadSet_obj_at'_really_strongest) + prefer 2 + apply (simp add: st_tcb_at'_def) + apply (clarsimp split: if_splits simp: st_tcb_at'_def o_def) + done + +lemma recv_ep_queued_st_tcb_at': + "\ ko_at' (Structures_H.endpoint.RecvEP ts) epptr s ; + t \ set ts; + sym_refs (state_refs_of' s) \ + \ st_tcb_at' isBlockedOnReceive t s" + apply (drule obj_at_ko_at') + apply clarsimp + apply (drule (1) sym_refs_ko_atD') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def refs_of_rev') + apply (erule_tac x=t in ballE; clarsimp?) + apply (erule ko_wp_at'_weakenE) + apply (clarsimp simp: isBlockedOnReceive_def ) + done + +lemma signed_n_msgRegisters_to_H: + "(signed n_msgRegisters :: machine_word) = of_nat size_msgRegisters" + by (simp add: n_msgRegisters_def size_msgRegisters_def) + +(* FIXME AARCH64 isValidVTableRootD is too weak, doesn't enforce pt_t *) +lemma isValidVTableRootD': + "isValidVTableRoot cap + \ isArchObjectCap cap \ isArchVSpacePTCap cap + \ capPTMappedAddress (capCap cap) \ None" + by (simp add: isValidVTableRoot_def isVTableRoot_def isCap_simps + split: capability.split_asm arch_capability.split_asm + option.split_asm) + +(* FIXME AARCH64 in this file, we see ptr + 0x20 * tcbVTableSlot a lot, where the 0x20 is + 2 ^ cte_level_bits + This used to be 0x10 on 32-bit, so unlikely to change soon, but still might be worth a cleanup + or abbreviation. This is one way it could work (and can go outside Arch locale): *) +declare cte_level_bits_def[code] +value_abbreviation (input) cte_size "(2::machine_word) ^ cte_level_bits" + +lemma casid_map_relation_get_tag_None: + "(casid_map_relation amap_opt amap') + \ (asid_map_get_tag amap' = scast asid_map_asid_map_none) = (amap_opt = None)" + apply (cases amap_opt, clarsimp) + apply (rename_tac amap) + apply (case_tac amap, clarsimp simp: casid_map_relation_vspace_tag) + done + +lemma vspace_cap_capUntypedPtr_capPTBasePtr: + "isArchVSpacePTCap cap \ capUntypedPtr cap = capPTBasePtr (capCap cap)" + unfolding isArchVSpacePTCap_def + by (clarsimp split: capability.splits arch_capability.splits) + +(* FIXME AARCH64 move *) +lemma setObject_tcb_asidpool_obj_at'[wp]: + "\obj_at' (P :: asidpool \ bool) ptr\ setObject ptr' (tcb :: tcb) \\rv. obj_at' P ptr\" + apply (rule obj_at_setObject2, simp_all) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +(* FIXME AARCH64 move *) +crunch asidpool_obj_at'[wp]: setThreadState "obj_at' (P :: asidpool \ bool) ptr" + (simp: unless_def) + +(* FIXME AARCH64 move, used to be in CNodeInv_R *) +lemma updateMDB_cte_wp_at_other: + "\cte_wp_at' P p and (\s. m \ p)\ + updateMDB m f + \\uu. cte_wp_at' P p\" + unfolding updateMDB_def + by (wpsimp wp: setCTE_cte_wp_at_other)+ + +(* This is needed since the fast path takes the capVSBasePtr_CL of a cap before it knows it's + a vspace cap, then it checks that it's a vspace cap afterwards. Using the normal spec rule would + result in an unprovable obligation that we're reading a vspace cap *) +lemma cap_vspace_cap_get_capVSBasePtr_spec2: + "\s. \\ \s. True\ + Call cap_vspace_cap_get_capVSBasePtr_'proc + \cap_get_tag \<^bsup>s\<^esup>cap = scast cap_vspace_cap + \ \ret__unsigned_longlong = capVSBasePtr_CL (cap_vspace_cap_lift \<^bsup>s\<^esup>cap)\" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg + apply (clarsimp simp: word_sle_def word_sless_def + cap_vspace_cap_lift_def + cap_lift_vspace_cap mask_def) + done + +lemma asUser_obj_at_asidpool[wp]: + "\obj_at' (P :: asidpool \ bool) t\ + asUser t' f + \\rv. obj_at' P t\" + apply (simp add: asUser_def threadGet_stateAssert_gets_asUser) + apply (wp threadSet_ko_wp_at2', clarsimp) (* FIXME AARCH64 wpsimp fails here *) + done + +(* FIXME AARCH64 original setCTE_asidpool' is too specific by demanding a constructor! *) +lemma setCTE_asidpool'[wp]: + "\ ko_at' (ko :: asidpool) p \ setCTE c p' \\_. ko_at' ko p\" + by (cases ko, simp) + (wp setCTE_asidpool') + +lemma updateMDB_ko_at'_asidpool[wp]: + "\ko_at' (ko :: asidpool) p \ updateMDB ptr f \\_. ko_at' ko p \" + unfolding updateMDB_def Let_def + by (wpsimp wp: setCTE_asidpool') + +lemma isArchVSpacePTCap_def2: + "isArchVSpacePTCap cap + = (\base map_data. cap = ArchObjectCap (PageTableCap base VSRootPT_T map_data))" + by (clarsimp simp: isArchVSpacePTCap_def + split: capability.splits arch_capability.splits pt_type.splits) + +lemma capVSBasePtr_CL_capUntypedPtr_helper: + "\ ccap_relation cap cap'; isValidVTableRoot cap \ + \ capVSBasePtr_CL (cap_vspace_cap_lift cap') = global.capUntypedPtr cap" + by (clarsimp simp: cap_get_tag_isCap vspace_cap_capUntypedPtr_capPTBasePtr + isArchVSpacePTCap_def2 ccap_relation_vspace_base + dest!: isValidVTableRootD') + +lemma casid_map_relation_Some_get_tag: + "casid_map_relation (Some asid_entry) asid_entry' + \ asid_map_get_tag asid_entry' = signed asid_map_asid_map_vspace" + by (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def + split: option.splits if_splits asid_map_CL.splits) + +lemma setObject_endpoint_asidpool_obj_at'[wp]: + "\obj_at' (P :: asidpool \ bool) ptr\ setObject ptr' (ep :: endpoint) \\rv. obj_at' P ptr\" + apply (rule obj_at_setObject2, simp_all) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setEndpoint_obj_at'_asidpool[wp]: + "\obj_at' (P :: asidpool \ bool) t \ setEndpoint ptr (e::endpoint) \\_. obj_at' (P :: asidpool \ bool) t\" + by (clarsimp simp: setEndpoint_def, wp) + +lemma casid_map_relation_Some_c_lift_eqs: + "\ casid_map_relation (Some asid_map_entry) asid_map_entry' \ + \ (apVMID asid_map_entry = Some vmid + \ stored_hw_vmid_CL (asid_map_asid_map_vspace_lift asid_map_entry') = ucast vmid) + \ (stored_vmid_valid_CL (asid_map_asid_map_vspace_lift asid_map_entry') = 0) + = (apVMID asid_map_entry = None) + \ vspace_root_CL (asid_map_asid_map_vspace_lift asid_map_entry') = apVSpace asid_map_entry" + apply (clarsimp simp: casid_map_relation_def casid_map_relation_Some_get_tag + to_bool_def asid_map_asid_map_vspace_lift_def + split: option.splits asid_map_CL.splits) + apply (clarsimp simp: asid_map_lift_def Let_def ucast_ucast_mask + split: if_splits) + done + +(* when we know P is true, but we can't use subst or clarsimp due to schematics *) +lemma ccorres_If_True_drop: + "\ P; ccorres_underlying sr Gamm r xf arrel axf R R' hs a c \ + \ ccorres_underlying sr Gamm r xf arrel axf R R' hs (If P a b) c" + by simp + +lemma fastpath_call_ccorres: + notes hoare_TrueI[simp] if_cong[cong] option.case_cong[cong] + notes from_bool_0[simp] (* FIXME AARCH64 should go in simpset *) + shows "ccorres dc xfdc + (\s. invs' s \ ct_in_state' ((=) Running) s + \ obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb AARCH64_H.capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb AARCH64_H.msgInfoRegister = msginfo) + (ksCurThread s) s) + ({s. cptr_' s = cptr} \ {s. msgInfo_' s = msginfo}) [] + (fastpaths SysCall) (Call fastpath_call_'proc)" +proof - + have [simp]: "scast Kernel_C.tcbCaller = tcbCallerSlot" + by (simp add: Kernel_C.tcbCaller_def tcbCallerSlot_def) + have [simp]: "scast Kernel_C.tcbVTable = tcbVTableSlot" + by (simp add: Kernel_C.tcbVTable_def tcbVTableSlot_def) + + have tcbs_of_cte_wp_at_vtable: + "\s tcb ptr. tcbs_of s ptr = Some tcb \ + cte_wp_at' \ (ptr + 0x20 * tcbVTableSlot) s" + apply (clarsimp simp: tcbs_of_def cte_at'_obj_at' + split: if_splits) + apply (drule_tac x = "0x20 * tcbVTableSlot" in bspec) + apply (simp add: tcb_cte_cases_def tcbVTableSlot_def cteSizeBits_def) + apply simp + done + + have tcbs_of_cte_wp_at_caller: + "\s tcb ptr. tcbs_of s ptr = Some tcb \ + cte_wp_at' \ (ptr + 0x20 * tcbCallerSlot) s" + apply (clarsimp simp: tcbs_of_def cte_at'_obj_at' + split: if_splits) + apply (drule_tac x = "0x20 * tcbCallerSlot" in bspec) + apply (simp add: tcb_cte_cases_def tcbCallerSlot_def cteSizeBits_def) + apply simp + done + + have tcbs_of_aligned': + "\s ptr tcb. \tcbs_of s ptr = Some tcb;pspace_aligned' s\ \ is_aligned ptr tcbBlockSizeBits" + apply (clarsimp simp: tcbs_of_def obj_at'_def split: if_splits) + apply (drule pspace_alignedD') + apply simp+ + apply (simp add: projectKO_opt_tcb objBitsKO_def + split: Structures_H.kernel_object.splits) + done + + show ?thesis + supply if_cong[cong] option.case_cong[cong] Collect_const[simp del] + apply (cinit lift: cptr_' msgInfo_') + (* this also lifts out pickFastpath alternative to general alternative, but not clear what + pickFastpath is for *) + apply (simp add: catch_liftE_bindE unlessE_throw_catch_If + unifyFailure_catch_If catch_liftE + getMessageInfo_def alternative_bind + cong: if_cong call_ignore_cong) + apply (rule ccorres_pre_getCurThread) + apply (rename_tac curThread) + apply (rule ccorres_symb_exec_l3[OF _ user_getreg_inv' _ empty_fail_user_getreg])+ + apply (rename_tac msginfo' cptr') + apply (rule_tac P="msginfo' = msginfo \ cptr' = cptr" in ccorres_gen_asm) + (* the call_ignore_cong in this proof is required to prevent corruption of arguments in + endpoint_ptr_mset_epQueue_tail_state_'proc so that eventually fastpath_dequeue_ccorres + can apply *) + apply (simp cong: call_ignore_cong) + apply (simp only:) + apply (csymbr, csymbr) + (* get fault type *) + apply csymbr + apply (rule_tac r'="\ft ft'. (ft' = scast seL4_Fault_NullFault) = (ft = None)" and + xf'=ret__unsigned_longlong_' in ccorres_split_nothrow) + apply (rule_tac P="cur_tcb' and (\s. curThread = ksCurThread s)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) + apply (drule(1) obj_at_cslift_tcb, clarsimp) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_def cfault_rel_def) + apply (rule rev_bexI, erule threadGet_eq) + apply (clarsimp simp: seL4_Fault_lift_def Let_def split: if_split_asm) + apply ceqv + apply (rename_tac fault fault') + apply (rule ccorres_alternative1) (* pick pickFastpath = True, still not clear what it's for *) + apply csymbr + apply csymbr + apply (simp cong: call_ignore_cong) + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\, rotated -1]) + (* fault, message info overflows registers, or extra caps present: abort *) + apply (vcg exspec=slowpath_noreturn_spec) + apply (clarsimp simp: signed_n_msgRegisters_to_H messageInfoFromWord_def Let_def mi_from_H_def + seL4_MessageInfo_lift_def msgLengthBits_def msgExtraCapBits_def + msgMaxExtraCaps_def shiftL_nat mask_def msgLabelBits_def) + apply (fastforce simp: size_msgRegisters_def msgMaxLength_def split: if_splits) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres) + apply simp + apply simp + + apply (simp cong: call_ignore_cong) + apply (elim conjE) + (* look up invoked cap *) + apply (rule ccorres_abstract_ksCurThread, ceqv) + apply (rule_tac P="ct = curThread" in ccorres_gen_asm, simp only:, thin_tac "ct = curThread") + apply (simp add: getThreadCSpaceRoot_def locateSlot_conv cong: call_ignore_cong) + apply (rule ccorres_pre_getCTE2) + apply (rule ccorres_move_array_assertion_tcb_ctes + ccorres_move_c_guard_tcb_ctes2 + ccorres_move_const_guard + ccorres_rhs_assoc)+ + apply (ctac add: lookup_fp_ccorres) + apply (rename_tac luRet ep_cap) + apply (rule ccorres_abstract_ksCurThread, ceqv) + apply (rule_tac P="ct = curThread" in ccorres_gen_asm, simp only:, thin_tac "ct = curThread") + apply (rule ccorres_move_array_assertion_tcb_ctes | simp cong: call_ignore_cong)+ + (* check invoked cap *) + apply (csymbr, csymbr) + apply (simp add: ccap_relation_case_sum_Null_endpoint of_bl_from_bool cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* not an endpoint cap *) + apply (simp cong: if_cong) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (erule disjE; simp; rule slowpath_ccorres) + apply simp + apply simp + apply (vcg exspec=slowpath_noreturn_spec) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (simp add: isRight_case_sum cong: call_ignore_cong) + apply (elim conjE) + apply (frule (1) cap_get_tag_isCap[THEN iffD2]) + apply (simp add: ccap_relation_ep_helpers cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* can't send to ep *) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + (* get the endpoint address *) + apply (csymbr, csymbr) + apply (simp add: ccap_relation_ep_helpers cong: call_ignore_cong) + (* get destination thread from ep queue, get endpoint / get endpoint state on C side *) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) + apply (rule_tac xf'="\s. (dest_' s, ret__unsigned_longlong_' s)" + and r'="\ep v. snd v = scast EPState_Recv = isRecvEP ep + \ (isRecvEP ep \ epQueue ep \ [] + \ fst v = tcb_ptr_to_ctcb_ptr (hd (epQueue ep)))" + in ccorres_split_nothrow) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_getEndpoint, rename_tac ep) + apply (rule_tac P="ko_at' ep (capEPPtr (theRight luRet)) and valid_objs'" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (erule cmap_relationE1[OF cmap_relation_ep], erule ko_at_projectKO_opt) + apply (frule(1) ko_at_valid_ep') + apply (clarsimp simp: typ_heap_simps') + apply (simp add: cendpoint_relation_def Let_def isRecvEP_def + endpoint_state_defs valid_ep'_def + split: endpoint.split_asm) + apply (clarsimp simp: tcb_queue_relation'_def neq_Nil_conv) + apply (rule ceqv_tuple2) + apply ceqv + apply ceqv + apply (rename_tac send_ep dest_and_ep_state') + apply (rule_tac P="ko_at' send_ep (capEPPtr (theRight luRet)) + and valid_objs'" in ccorres_cross_over_guard) + apply (simp cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* ep state not ready to receive *) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + + (* get vspace root *) + apply (simp add: getThreadVSpaceRoot_def locateSlot_conv cong: call_ignore_cong) + apply (rule ccorres_move_c_guard_tcb_ctes2 + ccorres_move_array_assertion_tcb_ctes + ccorres_move_const_guard)+ + apply (rule_tac var="newVTable_'" and var_update="newVTable_'_update" + in getCTE_h_val_ccorres_split[where P=\]) + apply simp + apply ceqv + apply (rename_tac vs_cap vs_cap') + (* get capVSBasePtr from the vspace cap on C side (we don't know it's vspace cap yet) *) + apply (rule ccorres_symb_exec_r) (* can't use csymbr, we need use alternative spec rule *) + apply (rule_tac xf'=ret__unsigned_longlong_' in ccorres_abstract, ceqv) + apply (rename_tac vspace_cap_c_ptr_maybe) + apply csymbr+ + apply (simp add: isValidVTableRoot_conv cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* \ isValidVTableRoot (cteCap vs_cap) *) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + (* AARCH64+hyp specific *) + apply (drule isValidVTableRootD') + (* we now know base address we read on the C side was from a vspace cap *) + apply (rule_tac P="vspace_cap_c_ptr_maybe = capUntypedPtr (cteCap vs_cap)" + in ccorres_gen_asm2) + apply (simp cong: call_ignore_cong) + + (* C: get the asid *) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=asid___unsigned_long_' + and val="fst (the (capPTMappedAddress (capCap (cteCap vs_cap))))" + in ccorres_symb_exec_r_known_rv[where R=\ and R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: cap_get_tag_isCap isArchVSpacePTCap_def2 + ccap_relation_vspace_mapped_asid[symmetric]) + apply ceqv + (* get asid_entry for asid *) + apply (ctac add: findMapForASID_ccorres) + apply (rename_tac asid_map_entry_opt asid_map_entry') + apply csymbr + apply csymbr + apply (simp add: casid_map_relation_get_tag_None cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* no asid map entry for asid *) + apply simp + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + apply (rule_tac xf'=ret__int_' + and val="from_bool (apVSpace (the asid_map_entry_opt) + \ capPTBasePtr (capCap (cteCap vs_cap)))" + in ccorres_symb_exec_r_known_rv[where R=\ and R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: from_bool_eq_if' vspace_cap_capUntypedPtr_capPTBasePtr + casid_map_relation_Some_get_tag + casid_map_relation_Some_c_lift_eqs) + apply ceqv + apply (simp cong: call_ignore_cong) + + (* C does two checks/aborts, but Haskell only one, so direct If-condition + equivalence doesn't match. Step over C side, resolve Haskell check using + resulting non-abort conditions. *) + apply (rule ccorres_Cond_rhs_Seq) + (* vspace in asid map entry doesn't match vspace cap *) + apply clarsimp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + (* we can't directly relate getting stored_vmid_valid_CL to None/Some on the + Haskell side, but we can relate equating the result to 0 (i.e. no valid vmid) *) + apply csymbr + apply (simp cong: call_ignore_cong) + apply (rule_tac C'="{_. apVMID (the asid_map_entry_opt) = None}" + in ccorres_rewrite_cond_sr_Seq[where Q=\ and Q'=UNIV]) + apply (clarsimp simp: casid_map_relation_Some_c_lift_eqs) + apply (rule ccorres_Cond_rhs_Seq) + (* no valid VMID *) + apply clarsimp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + (* ap_entry/VMID check on Haskell side is now trivially true with clarsimp, but we + have to avoid exploding our free variables to avoid having to wrap them + back up during wp reasoning (e.g. \y. asid_map_entry_opt = Some y) *) + apply (simp cong: call_ignore_cong) + apply (rule ccorres_If_True_drop, solves clarsimp) + + (* store VMID in first word of stored_hw_asid, which will be used later on the C side *) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="ucast (the (apVMID (the asid_map_entry_opt)))" + in ccorres_symb_exec_r_known_rv[where R=\ and R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: casid_map_relation_Some_get_tag + casid_map_relation_Some_c_lift_eqs) + apply ceqv + apply (simp cong: call_ignore_cong) + apply csymbr + + (* checking for highest priority *) + apply (rule ccorres_abstract_ksCurThread, ceqv) + apply (rule_tac P="ct = curThread" in ccorres_gen_asm, simp only:, thin_tac "ct = curThread") + apply (simp cong: call_ignore_cong if_cong) + apply (ctac add: getCurDomain_maxDom_ccorres_dom_') + apply (rename_tac curDom curDom') + apply (rule ccorres_move_c_guard_tcb ccorres_move_const_guard)+ + apply (simp add: prio_and_dom_limit_helpers cong: call_ignore_cong) + apply (rule ccorres_pre_threadGet) + apply (rule ccorres_pre_threadGet) + apply (rename_tac curPrio destPrio) + (* use isHighestPrio on the left, but entire if condition on the right *) + apply (rule ccorres_rhs_assoc2) + (* we can do this with just knowledge from abstract and state relation, + which avoids some False schematic instantiation on the C side *) + apply (rule_tac xf'=ret__int_' + and r'="\hi rv'. rv' = from_bool (\ (curPrio \ destPrio \ hi))" + and P'=UNIV in ccorres_split_nothrow) + apply (rule ccorres_guard_impR) + apply (simp add: from_bool_eq_if from_bool_eq_if' if_1_0_0 + ccorres_IF_True) + (* but as a result, we have to duplicate some info to the C side *) + apply (rule_tac P="obj_at' ((=) curPrio \ tcbPriority) curThread + and obj_at' ((=) destPrio \ tcbPriority) + (hd (epQueue send_ep)) + and (\s. ksCurThread s = curThread)" + in ccorres_cross_over_guard) + apply (rule_tac xf'=ret__int_' and val="from_bool (destPrio < curPrio)" + and R="obj_at' ((=) curPrio \ tcbPriority) curThread + and obj_at' ((=) destPrio \ tcbPriority) + (hd (epQueue send_ep)) + and (\s. ksCurThread s = curThread)" + and R'=UNIV in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply clarsimp + apply (simp add: from_bool_eq_if from_bool_eq_if' ccorres_IF_True) + apply (drule(1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurThread) + apply (simp add: ctcb_relation_unat_tcbPriority_C + word_less_nat_alt linorder_not_le) + apply ceqv + apply (simp add: from_bool_eq_if from_bool_eq_if' ccorres_IF_True ) + + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_add_return2) + apply (ctac add: isHighestPrio_ccorres) + apply (simp add: from_bool_eq_if from_bool_eq_if' ccorres_IF_True ) + apply (clarsimp simp: to_bool_def) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: from_bool_eq_if' word_le_not_less) + apply (clarsimp simp: return_def) + apply (rule wp_post_taut) + apply (vcg exspec=isHighestPrio_modifies) + apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) + apply (clarsimp simp: isHighestPrio_def' simpler_gets_def) + apply (rule conseqPre, vcg) + apply clarsimp + apply clarsimp + apply vcg + apply (simp add: from_bool_eq_if from_bool_eq_if' ccorres_IF_True) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurThread) + apply (simp add: ctcb_relation_unat_tcbPriority_C ctcb_relation_tcbPriority + word_less_nat_alt linorder_not_le) + apply ceqv + apply (simp cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: bindE_assoc catch_throwError ) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac val="from_bool (\ (capEPCanGrant (theRight luRet) + \ capEPCanGrantReply (theRight luRet)))" + and xf'=ret__int_' and R=\ and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (clarsimp, rule conseqPre, vcg) + apply (fastforce simp: ccap_relation_ep_helpers from_bool_eq_if') + apply ceqv + apply (rule ccorres_Cond_rhs_Seq) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + (* Note: AArch32 has pde_pde_invalid_get_stored_asid_valid check here *) + apply (simp cong: call_ignore_cong) + apply (rule ccorres_move_c_guard_tcb ccorres_move_const_guard)+ + apply (rule ccorres_pre_threadGet) + apply (rename_tac destDom) + (* The C does not compare domains when maxDomain is 0, since then both + threads will be in the current domain. Since we can show both threads + must be \ maxDomain, we can rewrite this test to only comparing domains + even when maxDomain is 0, making the check identical to the Haskell. *) + apply (rule_tac C'="{s. destDom \ curDom}" + and Q="obj_at' ((=) destDom \ tcbDomain) (hd (epQueue send_ep)) + and (\s. ksCurDomain s = curDom \ curDom \ maxDomain + \ destDom \ maxDomain)" + and Q'=UNIV in ccorres_rewrite_cond_sr_Seq) + apply (simp add: from_bool_eq_if from_bool_eq_if' ccorres_IF_True) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurDomain) + apply (drule ctcb_relation_tcbDomain[symmetric]) + apply (case_tac "0 < maxDomain" + ; solves \clarsimp simp: maxDom_sgt_0_maxDomain not_less\) + apply (rule ccorres_seq_cond_raise[THEN iffD2]) + apply (rule_tac R=\ in ccorres_cond2', blast) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + (* Now fully committed to fastpath *) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) + apply (simp only: ucast_id tl_drop_1 One_nat_def scast_0) + apply (rule fastpath_dequeue_ccorres) + apply simp + apply ceqv + apply csymbr + apply csymbr + apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) + apply (rule_tac P="cur_tcb' and (\s. ksCurThread s = curThread)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps') + apply (rule rev_bexI, erule threadSet_eq) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_tcb_map_tos map_to_tcbs_upd) + apply (subst map_to_ctes_upd_tcb_no_ctes, assumption) + apply (rule ball_tcb_cte_casesI, simp_all)[1] + apply (simp add: cep_relations_drop_fun_upd) + apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) + apply (simp add: ctcb_relation_def cthread_state_relation_def) + apply simp + apply (simp add: carch_state_relation_def cmachine_state_relation_def + typ_heap_simps' map_comp_update projectKO_opt_tcb + cvariable_relation_upd_const ko_at_projectKO_opt) + apply ceqv + apply (rule ccorres_abstract_ksCurThread, ceqv) + apply (rule ccorres_move_c_guard_tcb_ctes + ccorres_move_array_assertion_tcb_ctes + ccorres_move_const_guard)+ + apply (simp add: getThreadReplySlot_def getThreadCallerSlot_def + locateSlot_conv) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'="replySlot_'" in ccorres_abstract, ceqv) + apply (rename_tac replySlot, + rule_tac P="replySlot + = cte_Ptr (curThread + + (tcbReplySlot << cte_level_bits))" + in ccorres_gen_asm2) + apply (rule ccorres_move_const_guard + ccorres_move_array_assertion_tcb_ctes + ccorres_move_c_guard_tcb_ctes)+ + apply csymbr + apply (simp add: cteInsert_def bind_assoc) + apply (rule ccorres_pre_getCTE2, rename_tac curThreadReplyCTE) + apply (simp only: getThreadState_def) + apply (rule ccorres_assert2) + apply (rule ccorres_pre_threadGet, rename_tac destState) + apply (rule_tac P="isReceive destState" in ccorres_gen_asm) + apply (rule ccorres_pre_getCTE2, rename_tac curThreadReplyCTE2) + apply (rule ccorres_pre_getCTE2, rename_tac destCallerCTE) + apply (rule ccorres_assert2)+ + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="from_bool (blockingIPCCanGrant destState)" + and R="st_tcb_at' ((=) destState) (hd (epQueue send_ep)) + and K(isReceive destState)" + and R'=UNIV in ccorres_symb_exec_r_known_rv) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps' st_tcb_at_h_t_valid + pred_tcb_at'_def) + apply (drule (1) obj_at_cslift_tcb) + apply clarsimp + apply (drule ctcb_relation_blockingIPCCanGrantD, blast) + apply fastforce + apply ceqv + apply csymbr + + apply (rule_tac P="curThreadReplyCTE2 = curThreadReplyCTE" + in ccorres_gen_asm) + apply (rule ccorres_move_c_guard_tcb_ctes2) + apply (ctac add: cap_reply_cap_ptr_new_np_updateCap_ccorres) + apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) + apply (rule_tac P="cte_wp_at' (\cte. cteMDBNode cte = nullMDBNode) + (hd (epQueue send_ep) + + (tcbCallerSlot << cte_level_bits)) + and cte_wp_at' ((=) curThreadReplyCTE) (curThread + + (tcbReplySlot << cte_level_bits)) + and tcb_at' curThread and (no_0 o ctes_of) + and tcb_at' (hd (epQueue send_ep))" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of size_of_def + tcb_cnode_index_defs tcbCallerSlot_def + tcbReplySlot_def cte_level_bits_def + valid_mdb'_def valid_mdb_ctes_def) + apply (subst aligned_add_aligned, erule tcb_aligned', + simp add: is_aligned_def, simp add: objBits_defs, simp) + apply (rule_tac x="hd (epQueue send_ep) + v" for v + in cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (clarsimp simp: typ_heap_simps' updateMDB_def Let_def) + apply (subst if_not_P) + apply clarsimp + apply (simp add: split_def) + apply (rule getCTE_setCTE_rf_sr, simp_all)[1] + apply (case_tac destCallerCTE, case_tac curThreadReplyCTE, + case_tac "cteMDBNode curThreadReplyCTE") + apply (clarsimp simp: ccte_relation_eq_ccap_relation nullMDBNode_def) + apply ceqv + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) + apply (rule_tac P="pspace_canonical' + and cte_at' (hd (epQueue send_ep) + + (tcbCallerSlot << cte_level_bits)) + and cte_wp_at' ((=) curThreadReplyCTE) (curThread + + (tcbReplySlot << cte_level_bits)) + and tcb_at' (hd (epQueue send_ep)) + and (no_0 o ctes_of)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of size_of_def + tcb_cnode_index_defs tcbCallerSlot_def + tcbReplySlot_def cte_level_bits_def + ctes_of_canonical) + apply (subst aligned_add_aligned, erule tcb_aligned', + simp add: is_aligned_def, simp add: objBits_defs, simp) + apply (rule_tac x="curThread + 0x40" in cmap_relationE1[OF cmap_relation_cte], + assumption+) + apply (clarsimp simp: typ_heap_simps' updateMDB_def Let_def) + apply (subst if_not_P) + apply clarsimp + apply (simp add: split_def) + apply (rule getCTE_setCTE_rf_sr, simp_all)[1] + apply (simp add: ccte_relation_eq_ccap_relation) + apply (case_tac curThreadReplyCTE, + case_tac "cteMDBNode curThreadReplyCTE", + simp) + apply ceqv + apply (simp add: updateMDB_def) + apply (rule ccorres_split_nothrow_dc) + apply (ctac add: fastpath_copy_mrs_ccorres[unfolded forM_x_def]) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (simp add: setThreadState_runnable_simp) + apply (rule_tac P=\ in threadSet_ccorres_lemma2, vcg) + apply (clarsimp simp: typ_heap_simps' rf_sr_def + cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_tcb_map_tos map_to_tcbs_upd) + apply (subst map_to_ctes_upd_tcb_no_ctes, assumption) + apply (rule ball_tcb_cte_casesI, simp_all)[1] + apply (simp add: cep_relations_drop_fun_upd) + apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) + apply (simp add: ctcb_relation_def cthread_state_relation_def) + apply simp + apply (simp add: carch_state_relation_def cmachine_state_relation_def + typ_heap_simps' map_comp_update projectKO_opt_tcb + cvariable_relation_upd_const ko_at_projectKO_opt) + apply ceqv + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_split_nothrow_novcg_dc) + apply simp + apply (rule ccorres_call, + rule_tac vmid="ucast (the (apVMID (the asid_map_entry_opt)))" + and vsroot="capUntypedPtr (cteCap vs_cap)" + and asid="fst (the (capPTMappedAddress (capCap (cteCap vs_cap))))" + in switchToThread_fp_ccorres, + simp+)[1] + apply (rule_tac P="\s. ksCurThread s = hd (epQueue send_ep)" + in ccorres_cross_over_guard) + apply csymbr + apply csymbr + apply (rule ccorres_call_hSkip) + apply (rule fastpath_restore_ccorres) + apply simp + apply simp + apply (simp add: setCurThread_def) + apply wp + apply (rule_tac P=\ in hoare_triv, simp) + apply simp + apply (simp add: imp_conjL rf_sr_ksCurThread) + apply (clarsimp simp: signed_n_msgRegisters_to_H + messageInfoFromWord_def Let_def + mi_from_H_def + seL4_MessageInfo_lift_def + msgLengthBits_def msgExtraCapBits_def + msgMaxExtraCaps_def shiftL_nat + mask_def msgLabelBits_def + guard_is_UNIV_def) + apply (force simp: size_msgRegisters_def msgMaxLength_def + ccap_relation_ep_helpers + split: if_splits) + apply (wp sts_valid_objs' asid_has_vmid_lift) + apply simp + apply (vcg exspec=thread_state_ptr_set_tsType_np_modifies) + apply (simp add: pred_conj_def) + apply (rule mapM_x_wp'[OF hoare_weaken_pre]) + apply (wp asid_has_vmid_lift) + apply clarsimp + apply simp + apply (vcg exspec=fastpath_copy_mrs_modifies) + apply (simp add: valid_tcb_state'_def) + apply wp + apply (wp updateMDB_weak_cte_wp_at asid_has_vmid_lift) + apply simp + apply (vcg exspec=mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_modifies) + apply simp + apply (wp asid_has_vmid_lift | simp + | wp (once) updateMDB_weak_cte_wp_at + | wp (once) updateMDB_cte_wp_at_other)+ + apply (vcg exspec=mdb_node_ptr_set_mdbPrev_np_modifies) + apply (wp updateCap_cte_wp_at_cteMDBNode + updateCap_cte_wp_at_cases + updateCap_no_0 asid_has_vmid_lift | simp)+ + apply (vcg exspec=cap_reply_cap_ptr_new_np_modifies) + (* imp_disjL causes duplication of conclusions of implications involving + disjunctions which chokes the vcg; we want fewer implications at the + expense of disjunctions on asm side *) + supply imp_disjL[simp del] + supply imp_disjL[symmetric, simp] + apply clarsimp + apply (vcg exspec=thread_state_ptr_get_blockingIPCCanGrant_modifies) + apply (simp add: word_sle_def) + apply vcg + + apply (rule conseqPre, vcg, clarsimp) + apply (simp add: cte_level_bits_def field_simps shiftl_t2n + ctes_of_Some_cte_wp_at + del: all_imp_to_ex cong: imp_cong conj_cong) + apply (wp hoare_vcg_all_lift threadSet_ctes_of + hoare_vcg_imp_lift' threadSet_valid_objs' + threadSet_st_tcb_at_state threadSet_cte_wp_at' + threadSet_cur asid_has_vmid_lift + | simp add: cur_tcb'_def[symmetric] + | strengthen not_obj_at'_strengthen)+ + apply (vcg exspec=thread_state_ptr_set_tsType_np_modifies) + apply (wp hoare_vcg_all_lift threadSet_ctes_of + hoare_vcg_imp_lift' threadSet_valid_objs' + threadSet_st_tcb_at_state threadSet_cte_wp_at' + threadSet_cur + | simp add: cur_tcb'_def[symmetric])+ + apply (simp add: valid_tcb'_def tcb_cte_cases_def + valid_tcb_state'_def cteSizeBits_def) + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift + set_ep_valid_objs' asid_has_vmid_lift + setObject_no_0_obj'[where 'a=endpoint, folded setEndpoint_def] + | strengthen not_obj_at'_strengthen)+ + apply (rule_tac Q="\_ s. hd (epQueue send_ep) \ curThread + \ pred_tcb_at' itcbState ((=) (tcbState xa)) (hd (epQueue send_ep)) s" + in hoare_post_imp) + apply fastforce + apply wp+ + apply simp + apply (vcg exspec=endpoint_ptr_mset_epQueue_tail_state_modifies + exspec=endpoint_ptr_set_epQueue_head_np_modifies) + apply simp + apply (vcg exspec=cap_endpoint_cap_get_capCanGrant_modifies + exspec=cap_endpoint_cap_get_capCanGrantReply_modifies) + apply simp + (* throw away results of isHighestPrio and the fastfail shortcut *) + apply (wp (once) hoare_drop_imp, wp) + apply (wp (once) hoare_drop_imp, wp) + +(* FIXME AARCH64 this is frustrating: + - can't use clarsimp because it'll introduce new free variables (\_. _ = Some _) that will + cause schematic unification problems + - for this same reason, can't use wpsimp + So we are restricted to simp. But: + - if there are any unification problems, wp will generate a goal that has a schematic assumption + - the next simp will immediately instantiate that assumption to False + - vcg will happily do the same thing as wp if there's a unification problem, but *also* has a + built-in simp so we won't immediately see anything went wrong. + This results in using rule rather than wp in several cases below, but the proof still remains + fragile and very difficult to get right. *) + apply simp + apply (vcg exspec=isHighestPrio_modifies) + apply simp + apply (rule cd_wp) + apply simp + apply vcg + apply simp + apply vcg + apply simp + apply wpfix + apply vcg + apply simp + apply (rule getASIDPoolEntry_wp) + apply simp + apply (vcg exspec=findMapForASID_modifies) + apply simp + apply (vcg exspec=cap_vspace_cap_get_capVSMappedASID_modifies) + apply simp + (* accessing VSBasePtr without knowing it's a VSpace, can't use default spec *) + apply (rule conseqPre, vcg exspec=cap_vspace_cap_get_capVSBasePtr_spec2) + apply (rule subset_refl) + apply (rule conseqPre, vcg exspec=cap_vspace_cap_get_capVSBasePtr_spec2) + apply clarsimp + apply clarsimp + apply (rule getEndpoint_wp) + apply simp + apply (vcg exspec=endpoint_ptr_get_epQueue_head_modifies + exspec=endpoint_ptr_get_state_modifies) + apply (simp add: getSlotCap_def) + apply (rule valid_isLeft_theRight_split) + apply simp + apply (wp getCTE_wp') + apply (rule validE_R_abstract_rv) + apply wp + apply simp + apply (vcg exspec=lookup_fp_modifies) + apply simp + apply (rule threadGet_wp) + apply clarsimp + apply vcg + apply simp + apply (rule user_getreg_wp) + apply simp + apply (rule user_getreg_wp) + + apply (rule conjI) + (* Haskell precondition *) + apply (clarsimp simp: obj_at_tcbs_of ct_in_state'_def st_tcb_at_tcbs_of + invs_cur' invs_valid_objs' ctes_of_valid' + word_sle_def + tcb_ptr_to_ctcb_ptr_mask[OF tcb_at_invs'] + invs'_bitmapQ_no_L1_orphans) + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp) + apply (clarsimp simp: isCap_simps valid_cap'_def maskCapRights_def) + apply (clarsimp simp add: obj_at'_def) (* FIXME AARCH64 not a fan of obj_at' expansion *) + apply (frule invs_valid_objs') + apply (erule valid_objsE') + apply simp + apply (clarsimp simp: isRecvEP_def valid_obj'_def valid_ep'_def + split: Structures_H.endpoint.split_asm) + apply (erule not_NilE) + + (* sort out destination being queued in the endpoint, hence not the current thread *) + apply (prop_tac "st_tcb_at' isBlockedOnReceive x s") + apply (rule_tac ts="x # xs'" and epptr=v0 in recv_ep_queued_st_tcb_at' + ; clarsimp simp: obj_at'_def invs_sym') + apply (prop_tac "x \ ksCurThread s") + apply (fastforce simp: st_tcb_at_tcbs_of isBlockedOnReceive_def) + apply (drule_tac x = x in bspec) + apply fastforce + apply (clarsimp simp:obj_at_tcbs_of) + apply (frule_tac ptr2 = x in tcbs_of_aligned') + apply (simp add:invs_pspace_aligned') + apply (frule_tac ptr2 = x in tcbs_of_cte_wp_at_vtable) + apply (clarsimp simp: size_of_def field_simps word_sless_def word_sle_def + dest!: ptr_val_tcb_ptr_mask2[unfolded mask_def]) + apply (frule_tac p="x + offs" for offs in ctes_of_valid', clarsimp) + apply (rule conjI, fastforce) (* valid_arch_state' *) + apply (rule conjI) (* asid_wf *) + apply (clarsimp simp: isCap_simps valid_cap'_def + dest!: isValidVTableRootD) + apply (clarsimp simp: wellformed_mapdata'_def) + apply (clarsimp simp: invs_sym' tcbCallerSlot_def + tcbVTableSlot_def tcbReplySlot_def + conj_comms tcb_cnode_index_defs field_simps + obj_at_tcbs_of) + apply (clarsimp simp: cte_level_bits_def isValidVTableRoot_def + cte_wp_at_ctes_of capAligned_def objBits_simps) + apply (simp cong: conj_cong) + apply (clarsimp simp add: invs_ksCurDomain_maxDomain') + apply (rule conjI) + subgoal (* dest thread domain \ maxDomain *) + by (drule (1) tcbs_of_valid_tcb'[OF invs_valid_objs'], solves \clarsimp simp: valid_tcb'_def\) + apply clarsimp + apply (rule conjI) (* isReceive on queued tcb state *) + apply (fastforce simp: st_tcb_at_tcbs_of isBlockedOnReceive_def isReceive_def) + apply clarsimp + apply (rule conjI, solves clarsimp)+ (* a bunch of consequences of invs' *) + apply (frule invs_mdb', clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (case_tac xb, clarsimp, drule(1) nullcapsD') + apply (clarsimp simp: to_bool_def length_msgRegisters word_le_nat_alt[symmetric]) + apply (frule tcb_aligned'[OF obj_at_tcbs_of[THEN iffD2], OF exI, simplified]) + apply (clarsimp simp: objBits_defs signed_n_msgRegisters_to_H) + apply (rule conjI, clarsimp simp: word_bits_def) + apply (prop_tac "capPTBasePtr (capCap (cteCap cteb)) = global.capUntypedPtr (cteCap cteb)") + apply (solves \clarsimp simp add: isVTableRoot_ex\) + apply simp + apply (rule conjI) + apply (clarsimp simp: asid_has_vmid_def asid_has_entry_def split: option.splits) + apply (rule_tac x=pool in exI) + apply clarsimp + apply (case_tac asid_entry, clarsimp) + + apply (safe del: notI disjE)[1] + apply (rule not_sym, clarsimp) + apply (drule Aligned.aligned_sub_aligned[where x="x + 0x20" and y=x for x]) + apply (erule tcbs_of_aligned') + apply (simp add:invs_pspace_aligned') + apply (simp add: objBits_defs) + apply (simp add: objBits_defs is_aligned_def dvd_def) + apply (clarsimp simp: tcbs_of_def obj_at'_def projectKO_opt_tcb + split: if_splits Structures_H.kernel_object.splits) (* slow *) + apply (drule pspace_distinctD') + apply (simp add: invs_pspace_distinct') + apply (simp add: objBits_simps) + + apply (clarsimp simp: obj_at_tcbs_of split: list.split) + apply (erule_tac x = v0 in valid_objsE'[OF invs_valid_objs',rotated]) + apply (clarsimp simp: valid_obj'_def valid_ep'_def isRecvEP_def neq_Nil_conv size_of_def + split: Structures_H.endpoint.split_asm + cong: list.case_cong) + apply (simp add: obj_at_tcbs_of) + apply simp + + (* C precondition *) + apply (clarsimp simp: syscall_from_H_def[split_simps syscall.split] + word_sle_def word_sless_def rf_sr_ksCurThread + size_of_def cte_level_bits_def + tcb_cnode_index_defs tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def) + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: ccte_relation_eq_ccap_relation of_bl_from_bool + ccap_relation_case_sum_Null_endpoint from_bool_eq_if' + isRight_case_sum typ_heap_simps') + apply (frule (1) cap_get_tag_isCap[THEN iffD2]) + apply (clarsimp simp: typ_heap_simps' ccap_relation_ep_helpers) + apply (erule cmap_relationE1[OF cmap_relation_ep], + erule ko_at_projectKO_opt) + apply (frule (1) ko_at_valid_ep') + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_endpoint_case neq_Nil_conv + tcb_queue_relation'_def valid_ep'_def + mi_from_H_def) + apply (rule conjI; clarsimp?) + apply (rule conjI; clarsimp simp: casid_map_relation_Some_get_tag) + (* cap_get_tag_isCap hides info obtained from isValidVTableRootD' *) + apply (frule (1) capVSBasePtr_CL_capUntypedPtr_helper) + apply (clarsimp simp: cap_get_tag_isCap dest!: isValidVTableRootD') + done +qed + +lemma ccap_relation_reply_helper: + "\ ccap_relation cap cap'; isReplyCap cap \ + \ cap_reply_cap_CL.capTCBPtr_CL (cap_reply_cap_lift cap') + = ptr_val (tcb_ptr_to_ctcb_ptr (capTCBPtr cap))" + by (clarsimp simp: cap_get_tag_isCap[symmetric] + cap_lift_reply_cap cap_to_H_simps + cap_reply_cap_lift_def + elim!: ccap_relationE) + +lemma valid_ep_typ_at_lift': + "\ \p. \typ_at' TCBT p\ f \\rv. typ_at' TCBT p\ \ + \ \\s. valid_ep' ep s\ f \\rv s. valid_ep' ep s\" + apply (cases ep, simp_all add: valid_ep'_def) + apply (wp hoare_vcg_const_Ball_lift typ_at_lifts | assumption)+ + done + +lemma threadSet_tcbState_valid_objs: + "\valid_tcb_state' st and valid_objs'\ + threadSet (tcbState_update (\_. st)) t + \\rv. valid_objs'\" + apply (wp threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemmas array_assertion_abs_tcb_ctes_add + = array_assertion_abs_tcb_ctes_add[where + tcb="\s. Ptr (tcb' s)" for tcb', simplified] + +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] + = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)[where + tcb="\s. Ptr (tcb' s)" for tcb', simplified]] + ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] + ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] + ccorres_move_Guard[OF array_assertion_abs_tcb_ctes_add] + +lemmas ccorres_move_c_guard_tcb_ctes3 + = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes[where + tcb="\s. Ptr (tcb' s)" for tcb', simplified], + unfolded cte_C_numeral_fold] + +lemma fastpath_reply_cap_check_ccorres: + "ccorres (\rv rv'. \cap. ccap_relation cap ccap + \ rv' = from_bool (isReplyCap cap)) + ret__int_' + \ (\ \cap = ccap \) hs + (return ()) (Call fastpath_reply_cap_check_'proc)" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: from_bool_def return_def of_bl_from_bool cap_get_tag_isCap) + done + +lemma fastpath_reply_recv_ccorres: + notes hoare_TrueI[simp] if_cong[cong] option.case_cong[cong] + notes from_bool_0[simp] (* FIXME AARCH64 should go in simpset *) + shows "ccorres dc xfdc + (\s. invs' s \ ct_in_state' ((=) Running) s + \ obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb msgInfoRegister = msginfo) + (ksCurThread s) s) + ({s. cptr_' s = cptr} \ {s. msgInfo_' s = msginfo}) [] + (fastpaths SysReplyRecv) (Call fastpath_reply_recv_'proc)" +proof - + have [simp]: "Kernel_C.tcbCaller = scast tcbCallerSlot" + by (simp add:Kernel_C.tcbCaller_def tcbCallerSlot_def) + have [simp]: "Kernel_C.tcbVTable = scast tcbVTableSlot" + by (simp add:Kernel_C.tcbVTable_def tcbVTableSlot_def) + + have tcbs_of_cte_wp_at_vtable: + "\s tcb ptr. tcbs_of s ptr = Some tcb \ cte_wp_at' \ (ptr + 0x20 * tcbVTableSlot) s" + apply (clarsimp simp: tcbs_of_def cte_at'_obj_at' + split: if_splits) + apply (drule_tac x = "0x20 * tcbVTableSlot" in bspec) + apply (simp add: tcb_cte_cases_def tcbVTableSlot_def cteSizeBits_def) + apply simp + done + + have tcbs_of_cte_wp_at_caller: + "\s tcb ptr. tcbs_of s ptr = Some tcb \ cte_wp_at' \ (ptr + 0x20 * tcbCallerSlot) s" + apply (clarsimp simp: tcbs_of_def cte_at'_obj_at' + split: if_splits) + apply (drule_tac x = "0x20 * tcbCallerSlot" in bspec) + apply (simp add:tcb_cte_cases_def tcbCallerSlot_def cteSizeBits_def) + apply simp + done + + have tcbs_of_aligned': + "\s ptr tcb. \tcbs_of s ptr = Some tcb; pspace_aligned' s\ \ is_aligned ptr tcbBlockSizeBits" + apply (clarsimp simp: tcbs_of_def obj_at'_def split: if_splits) + apply (drule pspace_alignedD') + apply simp+ + apply (simp add: projectKO_opt_tcb objBitsKO_def + split: Structures_H.kernel_object.splits) + done + + (* FIXME indentation is wonky in this proof, fix will come in a future patch, hopefully when + automatic indentation is improved *) + show ?thesis + supply option.case_cong_weak[cong del] + supply if_cong[cong] option.case_cong[cong] Collect_const[simp del] + + apply (cinit lift: cptr_' msgInfo_') + + (* this also lifts out pickFastpath alternative to general alternative, but not clear what + pickFastpath is for *) + apply (simp add: catch_liftE_bindE unlessE_throw_catch_If + unifyFailure_catch_If catch_liftE + getMessageInfo_def alternative_bind + cong: if_cong call_ignore_cong) + apply (rule ccorres_pre_getCurThread) + apply (rename_tac curThread) + apply (rule ccorres_symb_exec_l3[OF _ user_getreg_inv' _ empty_fail_user_getreg])+ + apply (rename_tac msginfo' cptr') + apply (rule_tac P="msginfo' = msginfo \ cptr' = cptr" in ccorres_gen_asm) + (* the call_ignore_cong in this proof is required to prevent corruption of arguments in + endpoint_ptr_mset_epQueue_tail_state_'proc so that eventually fastpath_enqueue_ccorres + can apply *) + apply (simp cong: call_ignore_cong) + apply (simp only:) + apply (csymbr, csymbr) + (* get fault type *) + apply csymbr + apply (rule_tac r'="\ft ft'. (ft' = scast seL4_Fault_NullFault) = (ft = None)" and + xf'=ret__unsigned_longlong_' in ccorres_split_nothrow) + apply (rule_tac P="cur_tcb' and (\s. curThread = ksCurThread s)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) + apply (drule(1) obj_at_cslift_tcb, clarsimp) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_def cfault_rel_def) + apply (rule rev_bexI, erule threadGet_eq) + apply (clarsimp simp: seL4_Fault_lift_def Let_def split: if_split_asm) + apply ceqv + apply (rename_tac fault fault') + apply (rule ccorres_alternative1) (* pick pickFastpath = True, still not clear what it's for *) + apply csymbr + apply csymbr + apply (simp cong: call_ignore_cong) + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\, rotated -1]) + (* fault, message info overflows registers, or extra caps present: abort *) + apply (vcg exspec=slowpath_noreturn_spec) + apply (clarsimp simp: signed_n_msgRegisters_to_H messageInfoFromWord_def Let_def mi_from_H_def + seL4_MessageInfo_lift_def msgLengthBits_def msgExtraCapBits_def + msgMaxExtraCaps_def shiftL_nat mask_def msgLabelBits_def) + apply (fastforce simp: size_msgRegisters_def msgMaxLength_def split: if_splits) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres) + apply simp + apply simp + + apply (simp cong: call_ignore_cong) + apply (elim conjE) + (* look up invoked cap *) + apply (rule ccorres_abstract_ksCurThread, ceqv) + apply (rule_tac P="ct = curThread" in ccorres_gen_asm, simp only:, thin_tac "ct = curThread") + apply (simp add: getThreadCSpaceRoot_def locateSlot_conv cong: call_ignore_cong) + apply (rule ccorres_pre_getCTE2) + apply (rule ccorres_move_array_assertion_tcb_ctes + ccorres_move_c_guard_tcb_ctes2 + ccorres_move_const_guard + ccorres_rhs_assoc)+ + apply (ctac add: lookup_fp_ccorres) + apply (rename_tac luRet ep_cap) + apply (rule ccorres_abstract_ksCurThread, ceqv) + apply (rule_tac P="ct = curThread" in ccorres_gen_asm, simp only:, thin_tac "ct = curThread") + apply (rule ccorres_move_array_assertion_tcb_ctes | simp cong: call_ignore_cong)+ + (* check invoked cap *) + apply (csymbr, csymbr) + apply (simp add: ccap_relation_case_sum_Null_endpoint of_bl_from_bool cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* not an endpoint cap *) + apply (simp cong: if_cong) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (erule disjE; simp; rule slowpath_ccorres) + apply simp + apply simp + apply (vcg exspec=slowpath_noreturn_spec) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (simp add: isRight_case_sum cong: call_ignore_cong) + apply (elim conjE) + apply (frule (1) cap_get_tag_isCap[THEN iffD2]) + apply (simp add: ccap_relation_ep_helpers cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* can't receive from ep *) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + (* check there is nothing waiting on the notification *) + apply (rule ccorres_pre_getBoundNotification) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' and r'="\rv rv'. rv' = from_bool rv" + in ccorres_split_nothrow) + apply (rule_tac P="bound_tcb_at' ((=) bound_ntfn) curThread and valid_objs' + and no_0_obj' and (\s. curThread = ksCurThread s)" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_ksCurThread pred_tcb_at'_def) + apply (drule(3) obj_at_bound_tcb_grandD, clarsimp simp: typ_heap_simps return_def) + apply (simp add: in_liftM Bex_def getNotification_def getObject_return objBits_simps' + return_def cnotification_relation_isActive + trans [OF eq_commute from_bool_eq_if]) + apply ceqv + apply (simp only:) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_split_throws) + apply simp + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + (* get the endpoint address *) + apply (csymbr, csymbr) + apply (simp add: ccap_relation_ep_helpers cong: call_ignore_cong) + (* consolidate enpoint state \ EPState_Send check on C side with result of getEndpoint *) + apply (rule_tac xf'="ret__unsigned_longlong_'" + and r'="\ep v. (v = scast EPState_Send) = isSendEP ep" + in ccorres_split_nothrow) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_getEndpoint, rename_tac ep) + apply (rule_tac P="ko_at' ep (capEPPtr (theRight luRet)) and valid_objs'" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (erule cmap_relationE1[OF cmap_relation_ep], erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps') + apply (simp add: cendpoint_relation_def Let_def isSendEP_def endpoint_state_defs + split: endpoint.split_asm) + apply ceqv + apply (rename_tac send_ep send_ep_is_send) + apply (rule_tac P="ko_at' send_ep (capEPPtr (theRight luRet)) and valid_objs'" + in ccorres_cross_over_guard) + apply (simp cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* endpoint has a thread waiting to send *) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + (* get caller slot of current thread *) + apply (simp add: locateSlot_conv getThreadCallerSlot_def + cong: if_cong call_ignore_cong) + apply (rule ccorres_abstract_ksCurThread, ceqv) + apply (rule_tac P="ct = curThread" in ccorres_gen_asm, simp only:, thin_tac "ct = curThread") + apply (rule ccorres_move_const_guard + ccorres_move_c_guard_tcb_ctes2 + ccorres_move_array_assertion_tcb_ctes)+ + apply csymbr + (* get caller cap *) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac var="callerCap_'" and var_update="callerCap_'_update" + in getCTE_h_val_ccorres_split[where P=\]) + apply simp + apply ceqv + apply (rename_tac caller_cap caller_cap') + apply (rule_tac P="\_. capAligned (cteCap caller_cap)" + in ccorres_cross_over_guard) + (* check caller cap is reply cap *) + apply (rule ccorres_add_return, ctac add: fastpath_reply_cap_check_ccorres) + apply (drule spec, drule_tac P="ccap_relation cp caller_cap'" for cp in mp, assumption) + apply (simp cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* not reply cap *) + apply (simp cong: conj_cong) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + (* get caller from cap on C side *) + apply (csymbr, csymbr) + apply (rename_tac caller') + (* get caller fault, reducing relation to whether there is no fault + Note: CONFIG_EXCEPTION_FASTPATH is not set, otherwise we'd need to deal with + generating fault replies for other fault types (currently only VM faults) *) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac r'="\ft ft'. (ft' = scast seL4_Fault_NullFault) = (ft = None)" + and xf'=fault_type_' in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: obj_at_tcbs_of) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_def cfault_rel_def + ccap_relation_reply_helper) + apply (clarsimp simp: seL4_Fault_lift_def Let_def split: if_split_asm) + apply ceqv + apply (rename_tac fault fault') + apply (simp cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* caller has faulted *) + apply (simp flip: not_None_eq) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + (* get vspace root *) + apply (simp add: getThreadVSpaceRoot_def locateSlot_conv cong: call_ignore_cong) + apply (rule ccorres_move_c_guard_tcb_ctes3 + ccorres_move_array_assertion_tcb_ctes + ccorres_move_const_guard)+ + apply (rule_tac var="newVTable_'" and var_update="newVTable_'_update" + in getCTE_h_val_ccorres_split[where P=\]) + apply simp + apply ceqv + apply (rename_tac vs_cap vs_cap') + (* get capVSBasePtr from the vspace cap on C side (we don't know it's vspace cap yet) *) + apply (rule ccorres_symb_exec_r) (* can't use csymbr, we need to use alternative spec rule *) + apply (rule_tac xf'=ret__unsigned_longlong_' in ccorres_abstract, ceqv) + apply (rename_tac vspace_cap_c_ptr_maybe) + apply csymbr+ + apply (simp add: isValidVTableRoot_conv cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* \ isValidVTableRoot (cteCap vs_cap) *) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + (* AARCH64+hyp specific *) + apply (drule isValidVTableRootD') + (* we now know base address we read on the C side was from a vspace cap *) + apply (rule_tac P="vspace_cap_c_ptr_maybe = capUntypedPtr (cteCap vs_cap)" + in ccorres_gen_asm2) + apply (simp cong: call_ignore_cong) + (* C: get the asid *) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=asid___unsigned_long_' + and val="fst (the (capPTMappedAddress (capCap (cteCap vs_cap))))" + in ccorres_symb_exec_r_known_rv[where R=\ and R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: cap_get_tag_isCap isArchVSpacePTCap_def2 + ccap_relation_vspace_mapped_asid[symmetric]) + apply ceqv + (* get asid_entry for asid *) + apply (ctac add: findMapForASID_ccorres) + apply (rename_tac asid_map_entry_opt asid_map_entry') + apply csymbr + apply csymbr + apply (simp add: casid_map_relation_get_tag_None cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + (* no asid map entry for asid *) + apply simp + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + apply (rule_tac xf'=ret__int_' + and val="from_bool (apVSpace (the asid_map_entry_opt) + \ capPTBasePtr (capCap (cteCap vs_cap)))" + in ccorres_symb_exec_r_known_rv[where R=\ and R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: from_bool_eq_if' vspace_cap_capUntypedPtr_capPTBasePtr + casid_map_relation_Some_get_tag + casid_map_relation_Some_c_lift_eqs) + apply ceqv + apply (simp cong: call_ignore_cong) + (* C does two checks/aborts, but Haskell only one, so direct If-condition + equivalence doesn't match. Step over C side, resolve Haskell check using + resulting non-abort conditions. *) + apply (rule ccorres_Cond_rhs_Seq) + (* vspace in asid map entry doesn't match vspace cap *) + apply clarsimp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + (* we can't directly relate getting stored_vmid_valid_CL to None/Some on the + Haskell side, but we can relate equating the result to 0 (i.e. no valid vmid) *) + apply csymbr + apply (simp cong: call_ignore_cong) + apply (rule_tac C'="{_. apVMID (the asid_map_entry_opt) = None}" + in ccorres_rewrite_cond_sr_Seq[where Q=\ and Q'=UNIV]) + apply (clarsimp simp: casid_map_relation_Some_c_lift_eqs) + apply (rule ccorres_Cond_rhs_Seq) + (* no valid VMID *) + apply clarsimp + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + (* ap_entry/VMID check on Haskell side is now trivially true with clarsimp, but we + have to avoid exploding our free variables to avoid having to wrap them + back up during wp reasoning (e.g. \y. asid_map_entry_opt = Some y) *) + apply (simp cong: call_ignore_cong) + apply (rule ccorres_If_True_drop, solves clarsimp) + + (* store VMID in first word of stored_hw_asid, which will be used later on the C side *) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="ucast (the (apVMID (the asid_map_entry_opt)))" + in ccorres_symb_exec_r_known_rv[where R=\ and R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: casid_map_relation_Some_get_tag + casid_map_relation_Some_c_lift_eqs) + apply ceqv + apply (simp cong: call_ignore_cong) + apply csymbr + + apply (ctac add: getCurDomain_maxDom_ccorres_dom_') + apply (rename_tac curDom curDom') + apply (rule_tac P="curDom \ maxDomain" in ccorres_gen_asm) + apply (simp add: prio_and_dom_limit_helpers cong: call_ignore_cong) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_pre_threadGet) + apply simp (* we have to do this simp without the call_ignore_cong, + otherwise isHighestPrio_ccorres's args won't get simplified *) + apply (ctac add: isHighestPrio_ccorres) + apply (rename_tac highest highest') + apply (simp add: to_bool_def cong: call_ignore_cong) + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond2'[where R=\], blast) + (* not highest priority *) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + (* Note: AArch32 does its vspace checks here *) + + (* get caller domain on Haskell side *) + apply (simp add: ccap_relation_reply_helper cong: call_ignore_cong) + apply (rule ccorres_move_c_guard_tcb ccorres_move_const_guard)+ + apply (rule ccorres_pre_threadGet) + apply (rename_tac destDom) + (* The C does not compare domains when maxDomain is 0, since then both + threads will be in the current domain. Since we can show both threads + must be \ maxDomain, we can rewrite this test to only comparing domains + even when maxDomain is 0, making the check identical to the Haskell. *) + apply (rule_tac C'="{s. destDom \ curDom}" + and Q="obj_at' ((=) destDom \ tcbDomain) + (capTCBPtr (cteCap caller_cap)) + and (\s. ksCurDomain s = curDom \ curDom \ maxDomain + \ destDom \ maxDomain)" + and Q'=UNIV in ccorres_rewrite_cond_sr_Seq) + apply (simp add: from_bool_eq_if from_bool_eq_if' ccorres_IF_True) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurDomain) + apply (drule ctcb_relation_tcbDomain[symmetric]) + apply (case_tac "0 < maxDomain"; + solves \clarsimp simp: maxDom_sgt_0_maxDomain not_less\) + apply (rule ccorres_seq_cond_raise[THEN iffD2]) + apply (rule_tac R=\ in ccorres_cond2', blast) + apply (rule ccorres_split_throws) + apply (rule ccorres_call_hSkip) + apply (rule slowpath_ccorres, simp+) + apply (vcg exspec=slowpath_noreturn_spec) + apply (simp cong: call_ignore_cong) + + (* Now fully committed to fastpath *) + (* set current thread to BlockedOnReceive *) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) + apply (rule_tac P="capAligned (theRight luRet)" in ccorres_gen_asm) + apply (rule_tac P="canonical_address (capEPPtr (projr luRet))" in ccorres_gen_asm) + apply (rule_tac P=\ and P'="\s. ksCurThread s = curThread" + in threadSet_ccorres_lemma3) + apply vcg + apply (clarsimp simp: rf_sr_ksCurThread typ_heap_simps' + h_t_valid_clift_Some_iff) + apply (clarsimp simp: capAligned_def isCap_simps objBits_simps + ThreadState_defs mask_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + typ_heap_simps' objBits_defs) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_tcb_map_tos map_to_tcbs_upd) + apply (subst map_to_ctes_upd_tcb_no_ctes, assumption) + apply (rule ball_tcb_cte_casesI, simp_all)[1] + apply (simp add: cep_relations_drop_fun_upd) + apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) + apply (simp add: ctcb_relation_def cthread_state_relation_def + ThreadState_defs) + apply (clarsimp simp: ccap_relation_ep_helpers) + apply simp + apply (simp add: carch_state_relation_def cmachine_state_relation_def + typ_heap_simps' map_comp_update projectKO_opt_tcb + cvariable_relation_upd_const ko_at_projectKO_opt) + apply ceqv + (* update endpoint queue *) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) + apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) + apply (rule fastpath_enqueue_ccorres[simplified]) + apply simp + apply ceqv + + (* update MDB and reset tcbCallerSlot in current thread *) + apply (simp add: liftM_def) + apply (rule ccorres_move_c_guard_tcb_ctes3) + apply (rule_tac r'="\rv rv'. rv' = mdbPrev (cteMDBNode rv)" + and xf'=ret__unsigned_longlong_' in ccorres_split_nothrow) + apply (rule_tac P="tcb_at' curThread + and (\s. ksCurThread s = curThread)" + in getCTE_ccorres_helper[where P'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps' cte_level_bits_def + tcbCallerSlot_def size_of_def + tcb_cnode_index_defs) + apply (clarsimp simp: ccte_relation_def map_option_Some_eq2) + apply ceqv + apply (rule ccorres_assert) + apply (rename_tac mdbPrev_cte mdbPrev_cte_c) + apply (rule ccorres_split_nothrow_dc) + apply (simp add: updateMDB_def Let_def + cong: if_cong) + apply (rule_tac P="cte_wp_at' ((=) mdbPrev_cte) + (curThread + (tcbCallerSlot << cte_level_bits)) + and valid_mdb'" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(2) valid_mdb_ctes_of_prev[rotated]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (clarsimp simp: typ_heap_simps' split_def) + apply (rule getCTE_setCTE_rf_sr, simp_all)[1] + apply (clarsimp simp: ccte_relation_def map_option_Some_eq2 + cte_to_H_def mdb_node_to_H_def + c_valid_cte_def) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_dc) + apply (rule_tac P="cte_at' (curThread + (tcbCallerSlot << cte_level_bits)) + and tcb_at' curThread" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (clarsimp simp: typ_heap_simps' split_def tcbCallerSlot_def + tcb_cnode_index_defs + cte_level_bits_def size_of_def + packed_heap_update_collapse_hrs) + apply (rule setCTE_rf_sr, simp_all add: typ_heap_simps')[1] + apply (clarsimp simp: ccte_relation_eq_ccap_relation makeObject_cte + mdb_node_to_H_def nullMDBNode_def + ccap_relation_NullCap_iff) + (* copy message registers *) + apply (simp add: ccap_relation_reply_helper) + apply csymbr + apply (ctac add: fastpath_copy_mrs_ccorres[unfolded forM_x_def]) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (simp add: setThreadState_runnable_simp) + apply (rule_tac P=\ in threadSet_ccorres_lemma2, vcg) + apply (clarsimp simp: typ_heap_simps' rf_sr_def + cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_tcb_map_tos map_to_tcbs_upd) + apply (subst map_to_ctes_upd_tcb_no_ctes, assumption) + apply (rule ball_tcb_cte_casesI, simp_all)[1] + apply (simp add: cep_relations_drop_fun_upd) + apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) + apply (simp add: ctcb_relation_def cthread_state_relation_def) + apply simp + apply (simp add: carch_state_relation_def cmachine_state_relation_def + typ_heap_simps' map_comp_update projectKO_opt_tcb + cvariable_relation_upd_const ko_at_projectKO_opt) + apply ceqv + (* switch to thread *) + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_split_nothrow_novcg_dc) + apply simp + apply (rule ccorres_call, + rule_tac vmid="ucast (the (apVMID (the asid_map_entry_opt)))" + and vsroot="capUntypedPtr (cteCap vs_cap)" + and asid="fst (the (capPTMappedAddress (capCap (cteCap vs_cap))))" + in switchToThread_fp_ccorres, + simp+)[1] + (* set message info registers and restore *) + apply (rule_tac P="\s. ksCurThread s = capTCBPtr (cteCap caller_cap)" + in ccorres_cross_over_guard) + apply csymbr + apply csymbr + apply (rule ccorres_call_hSkip) + apply (rule fastpath_restore_ccorres) + apply simp + apply simp + (* now WP/VCG reasoning mop-up operation starts *) + apply (simp add: setCurThread_def) + apply wp + apply (rule_tac P=\ in hoare_triv, simp) + apply simp + apply (simp add: imp_conjL rf_sr_ksCurThread del: all_imp_to_ex) + apply (clarsimp simp: ccap_relation_ep_helpers guard_is_UNIV_def + mi_from_H_def signed_n_msgRegisters_to_H + messageInfoFromWord_def Let_def + seL4_MessageInfo_lift_def + msgLengthBits_def msgExtraCapBits_def + msgMaxExtraCaps_def shiftL_nat + mask_def msgLabelBits_def) + apply (force simp: size_msgRegisters_def msgMaxLength_def + split: if_splits) + apply (wp sts_valid_objs' asid_has_vmid_lift) + apply simp + apply (vcg exspec=thread_state_ptr_set_tsType_np_modifies) + apply (simp add: pred_conj_def) + apply (rule mapM_x_wp'[OF hoare_weaken_pre]) + apply (wp asid_has_vmid_lift) + apply clarsimp + apply simp + apply (vcg exspec=fastpath_copy_mrs_modifies) + apply simp + apply wp + apply (wp setCTE_cte_wp_at_other asid_has_vmid_lift) + apply simp + apply vcg + apply simp + apply (wp | simp + | wp (once) updateMDB_weak_cte_wp_at + | wp (once) updateMDB_cte_wp_at_other asid_has_vmid_lift)+ + apply (vcg exspec=mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_modifies) + apply simp + apply (wp getCTE_wp') + apply simp + apply vcg + apply (simp add: shiftl_t2n) + apply (wp hoare_drop_imps setEndpoint_valid_mdb' set_ep_valid_objs' + setObject_no_0_obj'[where 'a=endpoint, folded setEndpoint_def] + asid_has_vmid_lift) + apply simp + apply (vcg exspec=endpoint_ptr_mset_epQueue_tail_state_modifies + exspec=endpoint_ptr_set_epQueue_head_np_modifies + exspec=endpoint_ptr_get_epQueue_tail_modifies) + apply (simp add: valid_pspace'_def pred_conj_def conj_comms + valid_mdb'_def) + apply (wp threadSet_cur threadSet_tcbState_valid_objs + threadSet_state_refs_of' threadSet_ctes_of + valid_ep_typ_at_lift' threadSet_cte_wp_at' asid_has_vmid_lift + | simp)+ + apply (vcg exspec=thread_state_ptr_mset_blockingObject_tsType_modifies) + apply simp + (* throw away results of isHighestPrio and the fastfail shortcut *) + apply (wp (once) hoare_drop_imp, wp) + apply (wp (once) hoare_drop_imp, wp) + apply simp + apply (vcg exspec=isHighestPrio_modifies) + apply simp + apply (rule cd_wp) + apply simp + apply vcg + apply simp + apply vcg + apply simp + apply vcg + apply simp + apply (rule getASIDPoolEntry_wp) + apply simp + apply (vcg exspec=findMapForASID_modifies) + apply simp + apply (vcg exspec=cap_vspace_cap_get_capVSMappedASID_modifies) + apply simp + (* accessing VSBasePtr without knowing it's a VSpace, can't use default spec *) + apply (rule conseqPre, vcg exspec=cap_vspace_cap_get_capVSBasePtr_spec2) + apply (rule subset_refl) + apply (rule conseqPre, vcg exspec=cap_vspace_cap_get_capVSBasePtr_spec2) + apply clarsimp + apply simp + apply (rule threadGet_wp) + apply (simp add: syscall_from_H_def ccap_relation_reply_helper) + apply (vcg exspec=seL4_Fault_get_seL4_FaultType_modifies) + apply simp + apply (simp add: ccap_relation_reply_helper) + apply (rule return_wp) + apply simp + apply (vcg exspec=fastpath_reply_cap_check_modifies) + apply simp + apply (rule getEndpoint_wp) + apply (simp add: syscall_from_H_def ccap_relation_reply_helper) + apply (vcg exspec=endpoint_ptr_get_state_modifies) + apply simp + apply (wp option_case_liftM_getNotification_wp[unfolded fun_app_def]) + apply simp + apply vcg + apply (simp add: getSlotCap_def) + apply (rule valid_isLeft_theRight_split) + apply (wp getCTE_wp') + apply (rule validE_R_abstract_rv) + apply wp + apply simp + apply (vcg exspec=lookup_fp_modifies) + apply simp + apply (rule threadGet_wp) + apply simp + apply vcg + apply simp + apply (rule user_getreg_wp) + apply simp + apply (rule user_getreg_wp) + apply (rule conjI) + (* Haskell precondition *) + apply (prop_tac "scast (scast tcbCallerSlot :: int_word) = tcbCallerSlot") + apply (simp add: tcbCallerSlot_def) + apply (clarsimp simp: ct_in_state'_def invs_cur' invs_arch_state' obj_at_tcbs_of word_sle_def) + apply (clarsimp simp: invs_ksCurDomain_maxDomain') + apply (rename_tac cur_tcb cte) + apply (frule invs_valid_objs') + apply (frule tcbs_of_aligned') + apply (simp add: invs_pspace_aligned') + apply (frule tcbs_of_cte_wp_at_caller) + apply (clarsimp simp: size_of_def field_simps + dest!: ptr_val_tcb_ptr_mask2[unfolded mask_def]) + apply (frule st_tcb_at_state_refs_ofD') + apply (frule ctes_of_valid', fastforce) + apply (clarsimp simp: obj_at_tcbs_of ct_in_state'_def st_tcb_at_tcbs_of + invs_valid_objs' ctes_of_valid' + fun_upd_def[symmetric] fun_upd_idem pred_tcb_at'_def invs_no_0_obj' + cong: conj_cong) + apply (rule conjI) (* obj_at' of ep ptr *) + apply (fastforce dest: ctes_of_valid' simp: valid_cap_simps' isCap_simps cte_wp_at_ctes_of) + apply clarsimp + apply (frule_tac p="p + tcbCallerSlot * cte_size" for p cte_size in ctes_of_valid', clarsimp) + apply (clarsimp simp: valid_capAligned) + apply (frule_tac p="p + tcbVTableSlot * cte_size" for p cte_size in ctes_of_valid', clarsimp) + apply (rule conjI) (* asid_wf *) + apply (clarsimp simp: isCap_simps valid_cap'_def dest!: isValidVTableRootD) + apply (solves \clarsimp simp: wellformed_mapdata'_def\) + apply (frule_tac tcb=tcb in tcbs_of_valid_tcb'[OF invs_valid_objs', rotated], simp) + apply (clarsimp simp add: valid_tcb'_def) + apply (frule invs_valid_objs') + apply (frule invs_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply clarsimp + apply (rule conjI; clarsimp?) (* canonical_address (capEPPtr (cteCap ctea)) *) + apply (clarsimp simp: obj_at'_is_canonical dest!: invs_pspace_canonical') + apply (clarsimp simp: isCap_simps valid_cap'_def[split_simps capability.split] + maskCapRights_def cte_wp_at_ctes_of cte_level_bits_def) + apply (frule_tac p=a in ctes_of_valid', clarsimp) + apply (frule invs_mdb') + apply (simp add: valid_cap_simps') + apply (clarsimp simp: cte_wp_at_ctes_of cte_level_bits_def + makeObject_cte isValidVTableRoot_def + to_bool_def + valid_mdb'_def valid_tcb_state'_def) + apply (rule conjI; clarsimp?) (* msglength *) + apply (simp add: word_le_nat_alt size_msgRegisters_def n_msgRegisters_def length_msgRegisters) + apply (prop_tac "capPTBasePtr (capCap (cteCap ctec)) = capUntypedPtr (cteCap ctec)") + apply (solves \clarsimp simp add: isVTableRoot_ex\) + apply (rule conjI) (* asid_has_vmid *) + apply (clarsimp simp: asid_has_vmid_def asid_has_entry_def) + apply (case_tac asid_entry, fastforce) + apply (frule ko_at_valid_ep', fastforce) + apply (safe del: notI disjE)[1] + apply (simp add: isSendEP_def valid_ep'_def tcb_at_invs' + split: Structures_H.endpoint.split_asm) + apply (rule subst[OF endpoint.sel(1)], + erule st_tcb_at_not_in_ep_queue[where P="(=) Running", rotated], + clarsimp+) + apply (simp add: obj_at_tcbs_of st_tcb_at_tcbs_of) + apply (drule invs_sym') + apply (erule_tac P=sym_refs in subst[rotated]) + apply (rule fun_upd_idem[symmetric]) + apply (clarsimp simp: tcb_bound_refs'_def) + apply (case_tac ntfnptr, simp_all)[1] + apply (clarsimp simp: set_eq_subset) + apply (solves \clarsimp simp: capAligned_def isVTableRoot_def field_simps\) + + (* C precondition *) + apply (clarsimp simp: syscall_from_H_def[split_simps syscall.split] + word_sle_def word_sless_def rf_sr_ksCurThread + size_of_def cte_level_bits_def + tcb_cnode_index_defs tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def from_bool_eq_if' of_bl_from_bool) + apply (frule obj_at_bound_tcb_grandD, clarsimp, clarsimp, assumption) + apply (clarsimp simp: typ_heap_simps' ccap_relation_ep_helpers) + apply (clarsimp simp: ccte_relation_eq_ccap_relation ccap_relation_case_sum_Null_endpoint + typ_heap_simps' cap_get_tag_isCap mi_from_H_def) + apply (intro conjI impI allI; + clarsimp simp: isCap_simps capAligned_def objBits_simps' + typ_heap_simps + casid_map_relation_Some_get_tag + dest!: ptr_val_tcb_ptr_mask2[unfolded objBits_def mask_def, simplified]; + (solves \clarsimp simp: ctcb_relation_def\)?) + (* cap_get_tag_isCap hides info obtained from isValidVTableRootD' *) + apply (frule (1) capVSBasePtr_CL_capUntypedPtr_helper, + clarsimp simp: cap_get_tag_isCap isCap_simps dest!: isValidVTableRootD')+ + + done +qed + +end + +end diff --git a/proof/crefine/AARCH64/Fastpath_Defs.thy b/proof/crefine/AARCH64/Fastpath_Defs.thy new file mode 100644 index 0000000000..39399599b0 --- /dev/null +++ b/proof/crefine/AARCH64/Fastpath_Defs.thy @@ -0,0 +1,180 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Contains the design specification of optimised fast paths though the kernel. + These paths check for specific circumstances before engaging, otherwise + falling back to the full kernel design specification (callKernel). + For this reason, fastpath + callKernel is expected to be semantically + identical to callKernel. *) + +theory Fastpath_Defs +imports ArchMove_C +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "fastpaths sysc \ case sysc of + SysCall \ doE + curThread \ liftE $ getCurThread; + mi \ liftE $ getMessageInfo curThread; + cptr \ liftE $ asUser curThread $ getRegister capRegister; + + fault \ liftE $ threadGet tcbFault curThread; + pickFastpath \ liftE $ alternative (return True) (return False); + unlessE (fault = None \ msgExtraCaps mi = 0 + \ msgLength mi \ of_nat size_msgRegisters \ pickFastpath) + $ throwError (); + + ctab \ liftE $ getThreadCSpaceRoot curThread >>= getCTE; + epCap \ unifyFailure (doE t \ resolveAddressBits (cteCap ctab) cptr (size cptr); + liftE (getSlotCap (fst t)) odE); + unlessE (isEndpointCap epCap \ capEPCanSend epCap) + $ throwError (); + ep \ liftE $ getEndpoint (capEPPtr epCap); + unlessE (isRecvEP ep) $ throwError (); + dest \ returnOk $ hd $ epQueue ep; + newVTable \ liftE $ getThreadVSpaceRoot dest >>= getCTE; + unlessE (isValidVTableRoot $ cteCap newVTable) $ throwError (); + + vspace_root \ returnOk $ capPTBasePtr $ capCap $ cteCap newVTable; \ \cap_pd in C\ + asid \ returnOk $ fst $ the $ capPTMappedAddress $ capCap $ cteCap newVTable; + ap_entry_opt \ liftE $ getASIDPoolEntry asid; \ \asid_map_t asid_map in C\ + unlessE (\ap_entry. ap_entry_opt = Some ap_entry \ apVSpace ap_entry = vspace_root + \ apVMID ap_entry \ None) \ \C makes VMID check separate\ + $ throwError (); + \ \C code now saves the VMID from the ap_entry to stored_hw_asid.words[0] and the Haskell does + nothing, and these need to sync up in the preconditions of switchToThread_fp_ccorres\ + + curDom \ liftE $ curDomain; + curPrio \ liftE $ threadGet tcbPriority curThread; + destPrio \ liftE $ threadGet tcbPriority dest; + highest \ liftE $ isHighestPrio curDom destPrio; + unlessE (destPrio \ curPrio \ highest) $ throwError (); + unlessE (capEPCanGrant epCap \ capEPCanGrantReply epCap) $ throwError (); + + destDom \ liftE $ threadGet tcbDomain dest; + unlessE (destDom = curDom) $ throwError (); + + liftE $ do + setEndpoint (capEPPtr epCap) + (case tl (epQueue ep) of [] \ IdleEP | _ \ RecvEP (tl (epQueue ep))); + threadSet (tcbState_update (\_. BlockedOnReply)) curThread; + replySlot \ getThreadReplySlot curThread; + callerSlot \ getThreadCallerSlot dest; + replySlotCTE \ getCTE replySlot; + assert (mdbNext (cteMDBNode replySlotCTE) = 0 + \ isReplyCap (cteCap replySlotCTE) + \ capReplyMaster (cteCap replySlotCTE) + \ mdbFirstBadged (cteMDBNode replySlotCTE) + \ mdbRevocable (cteMDBNode replySlotCTE)); + destState \ getThreadState dest; + cteInsert (ReplyCap curThread False (blockingIPCCanGrant destState)) replySlot callerSlot; + + forM_x (take (unat (msgLength mi)) msgRegisters) + (\r. do v \ asUser curThread (getRegister r); + asUser dest (setRegister r v) od); + setThreadState Running dest; + Arch.switchToThread dest; + setCurThread dest; + + asUser dest $ zipWithM_x setRegister + [badgeRegister, msgInfoRegister] + [capEPBadge epCap, wordFromMessageInfo (mi\ msgCapsUnwrapped := 0 \)]; + + stateAssert kernelExitAssertions [] + od + + odE (\_. callKernel (SyscallEvent sysc)) + | SysReplyRecv \ doE + curThread \ liftE $ getCurThread; + mi \ liftE $ getMessageInfo curThread; + cptr \ liftE $ asUser curThread $ getRegister capRegister; + + fault \ liftE $ threadGet tcbFault curThread; + pickFastpath \ liftE $ alternative (return True) (return False); + unlessE (fault = None \ msgExtraCaps mi = 0 + \ msgLength mi \ of_nat size_msgRegisters \ pickFastpath) + $ throwError (); + + ctab \ liftE $ getThreadCSpaceRoot curThread >>= getCTE; + epCap \ unifyFailure (doE t \ resolveAddressBits (cteCap ctab) cptr (size cptr); + liftE (getSlotCap (fst t)) odE); + + unlessE (isEndpointCap epCap \ capEPCanReceive epCap) + $ throwError (); + + bound_ntfn \ liftE $ getBoundNotification curThread; + active_ntfn \ liftE $ case bound_ntfn of None \ return False + | Some ntfnptr \ liftM isActive $ getNotification ntfnptr; + unlessE (\ active_ntfn) $ throwError (); + + ep \ liftE $ getEndpoint (capEPPtr epCap); + unlessE (\ isSendEP ep) $ throwError (); + + callerSlot \ liftE $ getThreadCallerSlot curThread; + callerCTE \ liftE $ getCTE callerSlot; + callerCap \ returnOk $ cteCap callerCTE; + \ \(* AArch64 does not check whether the caller cap is a ReplyMaster cap, since slow path + fails in that case. AArch32 C code does perform the redundant check. + See fastpath_reply_cap_check *)\ + unlessE (isReplyCap callerCap) $ throwError (); + + caller \ returnOk $ capTCBPtr callerCap; + callerFault \ liftE $ threadGet tcbFault caller; + unlessE (callerFault = None) $ throwError (); + newVTable \ liftE $ getThreadVSpaceRoot caller >>= getCTE; + unlessE (isValidVTableRoot $ cteCap newVTable) $ throwError (); + + vspace_root \ returnOk $ capPTBasePtr $ capCap $ cteCap newVTable; \ \cap_pd in C\ + asid \ returnOk $ fst $ the $ capPTMappedAddress $ capCap $ cteCap newVTable; + ap_entry_opt \ liftE $ getASIDPoolEntry asid; \ \asid_map_t asid_map in C\ + unlessE (\ap_entry. ap_entry_opt = Some ap_entry \ apVSpace ap_entry = vspace_root + \ apVMID ap_entry \ None) \ \C makes VMID check separate\ + $ throwError (); + \ \C code now saves the VMID from the ap_entry to stored_hw_asid.words[0] and the Haskell does + nothing, and these need to sync up in the preconditions of switchToThread_fp_ccorres\ + + curDom \ liftE $ curDomain; + callerPrio \ liftE $ threadGet tcbPriority caller; + highest \ liftE $ isHighestPrio curDom callerPrio; + unlessE highest $ throwError (); + + callerDom \ liftE $ threadGet tcbDomain caller; + unlessE (callerDom = curDom) $ throwError (); + liftE $ do + epCanGrant \ return $ capEPCanGrant epCap; + threadSet (tcbState_update (\_. BlockedOnReceive (capEPPtr epCap) epCanGrant)) curThread; + setEndpoint (capEPPtr epCap) + (case ep of IdleEP \ RecvEP [curThread] | RecvEP ts \ RecvEP (ts @ [curThread])); + mdbPrev \ liftM (mdbPrev o cteMDBNode) $ getCTE callerSlot; + assert (mdbPrev \ 0); + updateMDB mdbPrev (mdbNext_update (K 0) o mdbFirstBadged_update (K True) + o mdbRevocable_update (K True)); + setCTE callerSlot makeObject; + + forM_x (take (unat (msgLength mi)) msgRegisters) + (\r. do v \ asUser curThread (getRegister r); + asUser caller (setRegister r v) od); + setThreadState Running caller; + Arch.switchToThread caller; + setCurThread caller; + + asUser caller $ zipWithM_x setRegister + [badgeRegister, msgInfoRegister] + [0, wordFromMessageInfo (mi\ msgCapsUnwrapped := 0 \)]; + + stateAssert kernelExitAssertions [] + od + + odE (\_. callKernel (SyscallEvent sysc)) + + | _ \ callKernel (SyscallEvent sysc)" + +end + +end diff --git a/proof/crefine/AARCH64/Fastpath_Equiv.thy b/proof/crefine/AARCH64/Fastpath_Equiv.thy new file mode 100644 index 0000000000..0a6da25e76 --- /dev/null +++ b/proof/crefine/AARCH64/Fastpath_Equiv.thy @@ -0,0 +1,1941 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Contains proofs that fastpath + callKernel is semantically identical to + callKernel. *) + +theory Fastpath_Equiv +imports Fastpath_Defs IsolatedThreadAction Refine.RAB_FN +begin + +lemma setCTE_obj_at'_queued: + "\obj_at' (\tcb. P (tcbQueued tcb)) t\ setCTE p v \\rv. obj_at' (\tcb. P (tcbQueued tcb)) t\" + unfolding setCTE_def + by (rule setObject_cte_obj_at_tcb', simp+) + +crunch obj_at'_queued: cteInsert "obj_at' (\tcb. P (tcbQueued tcb)) t" + (wp: setCTE_obj_at'_queued crunch_wps) + +crunch obj_at'_not_queued: emptySlot "obj_at' (\a. \ tcbQueued a) p" + (wp: setCTE_obj_at'_queued) + +lemma getEndpoint_obj_at': + "\obj_at' P ptr\ getEndpoint ptr \\rv s. P rv\" + apply (wp getEndpoint_wp) + apply (clarsimp simp: obj_at'_def ) + done + +lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb + +crunches tcbSchedEnqueue + for tcbContext[wp]: "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" + (simp: tcbQueuePrepend_def) + +lemma setCTE_tcbContext: + "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ + setCTE slot cte + \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma setThreadState_tcbContext: + "setThreadState st tptr \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setThreadState_def rescheduleRequired_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps) + apply (fastforce simp: obj_at'_def objBits_simps projectKOs atcbContext_def ps_clear_upd) + done + +lemma setBoundNotification_tcbContext: + "setBoundNotification ntfnPtr tptr \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setBoundNotification_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps) + apply (fastforce simp: obj_at'_def objBits_simps projectKOs) + done + +declare comp_apply [simp del] +crunch tcbContext[wp]: deleteCallerCap "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" + (wp: setEndpoint_obj_at_tcb' setBoundNotification_tcbContext + setNotification_tcb crunch_wps setThreadState_tcbContext + simp: crunch_simps unless_def) +declare comp_apply [simp] + + +crunch ksArch[wp]: asUser "\s. P (ksArchState s)" + (wp: crunch_wps) + +definition + tcbs_of :: "kernel_state => word64 => tcb option" +where + "tcbs_of s = (%x. if tcb_at' x s then projectKO_opt (the (ksPSpace s x)) else None)" + +lemma obj_at_tcbs_of: + "obj_at' P t s = (EX tcb. tcbs_of s t = Some tcb & P tcb)" + apply (simp add: tcbs_of_def split: if_split) + apply (intro conjI impI) + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) + done + +lemma st_tcb_at_tcbs_of: + "st_tcb_at' P t s = (EX tcb. tcbs_of s t = Some tcb & P (tcbState tcb))" + by (simp add: st_tcb_at'_def obj_at_tcbs_of) + +lemma tcbs_of_ko_at': + "\ tcbs_of s p = Some tcb \ \ ko_at' tcb p s" + by (simp add: obj_at_tcbs_of) + +lemma tcbs_of_valid_tcb': + "\ valid_objs' s; tcbs_of s p = Some tcb \ \ valid_tcb' tcb s" + by (frule tcbs_of_ko_at') + (drule (1) ko_at_valid_objs', auto simp: valid_obj'_def) + +lemma acc_CNodeCap_repr: + "isCNodeCap cap + \ cap = CNodeCap (capCNodePtr cap) (capCNodeBits cap) + (capCNodeGuard cap) (capCNodeGuardSize cap)" + by (clarsimp simp: isCap_simps) + +lemma valid_cnode_cap_cte_at': + "\ s \' c; isCNodeCap c; ptr = capCNodePtr c; v < 2 ^ capCNodeBits c \ + \ cte_at' (ptr + v * 2^cteSizeBits) s" + apply (drule less_mask_eq) + apply (drule(1) valid_cap_cte_at'[where addr=v]) + apply (simp add: mult.commute mult.left_commute) + done + +lemmas valid_cnode_cap_cte_at'' + = valid_cnode_cap_cte_at'[simplified objBits_defs, simplified] + +declare of_int_sint_scast[simp] + +lemma of_bl_from_bool: + "of_bl [x] = from_bool x" + by (cases x, simp_all add: from_bool_def) + +(* FIXME AARCH64 +lemma dmo_clearExMonitor_setCurThread_swap: + "(do _ \ doMachineOp RISCV64.clearExMonitor; + setCurThread thread + od) + = (do _ \ setCurThread thread; + doMachineOp ARM.clearExMonitor od)" + apply (simp add: setCurThread_def doMachineOp_def split_def) + apply (rule oblivious_modify_swap[symmetric]) + apply (intro oblivious_bind, simp_all) + done + +lemma pd_at_asid_inj': + "pd_at_asid' pd asid s \ pd_at_asid' pd' asid s \ pd' = pd" + by (clarsimp simp: pd_at_asid'_def obj_at'_def) +*) + +lemma bind_case_sum_rethrow: + "rethrowFailure fl f >>= case_sum e g + = f >>= case_sum (e \ fl) g" + apply (simp add: rethrowFailure_def handleE'_def + bind_assoc) + apply (rule bind_cong[OF refl]) + apply (simp add: throwError_bind split: sum.split) + done + +declare empty_fail_resolveAddressBits[iff] + +(* FIXME AARCH64 rename and remove *) +lemmas lookupExtraCaps_null = lookupExtraCaps_simple_rewrite + +lemma isRecvEP_endpoint_case: + "isRecvEP ep \ case_endpoint f g h ep = f (epQueue ep)" + by (clarsimp simp: isRecvEP_def split: endpoint.split_asm) + +lemma unifyFailure_catch_If: + "catch (unifyFailure f >>=E g) h + = f >>= (\rv. if isRight rv then catch (g (theRight rv)) h else h ())" + apply (simp add: unifyFailure_def rethrowFailure_def + handleE'_def catch_def bind_assoc + bind_bindE_assoc cong: if_cong) + apply (rule bind_cong[OF refl]) + apply (simp add: throwError_bind isRight_def return_returnOk + split: sum.split) + done + +lemma st_tcb_at_not_in_ep_queue: + "\ st_tcb_at' P t s; ko_at' ep epptr s; sym_refs (state_refs_of' s); + ep \ IdleEP; \ts. P ts \ tcb_st_refs_of' ts = {} \ + \ t \ set (epQueue ep)" + apply clarsimp + apply (drule(1) sym_refs_ko_atD') + apply (cases ep, simp_all add: st_tcb_at_refs_of_rev') + apply (fastforce simp: st_tcb_at'_def obj_at'_def )+ + done + +lemma st_tcb_at_not_in_ntfn_queue: + "\ st_tcb_at' P t s; ko_at' ntfn ntfnptr s; sym_refs (state_refs_of' s); ntfnObj ntfn = WaitingNtfn xs; + \ts. P ts \ (ntfnptr, TCBSignal) \ tcb_st_refs_of' ts \ + \ t \ set xs" + apply (drule(1) sym_refs_ko_atD') + apply (clarsimp simp: st_tcb_at_refs_of_rev') + apply (drule_tac x="(t, NTFNSignal)" in bspec, simp) + apply (fastforce simp: st_tcb_at'_def obj_at'_def ko_wp_at'_def tcb_bound_refs'_def) + done + +lemma sym_refs_upd_sD: + "\ sym_refs ((state_refs_of' s) (p := S)); valid_pspace' s; + ko_at' ko p s; refs_of' (injectKO koEx) = S; + objBits koEx = objBits ko \ + \ \s'. sym_refs (state_refs_of' s') + \ (\p' (ko' :: endpoint). ko_at' ko' p' s \ injectKO ko' \ injectKO ko + \ ko_at' ko' p' s') + \ (\p' (ko' :: Structures_H.notification). ko_at' ko' p' s \ injectKO ko' \ injectKO ko + \ ko_at' ko' p' s') + \ (ko_at' koEx p s')" + apply (rule exI, rule conjI) + apply (rule state_refs_of'_upd[where ko'="injectKO koEx" and ptr=p and s=s, + THEN ssubst[where P=sym_refs], rotated 2]) + apply simp+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def ) + apply (clarsimp simp: project_inject objBits_def) + apply (clarsimp simp: obj_at'_def ps_clear_upd + split: if_split) + apply (clarsimp simp: project_inject objBits_def) + apply auto + done + +lemma sym_refs_upd_tcb_sD: + "\ sym_refs ((state_refs_of' s) (p := {r \ state_refs_of' s p. snd r = TCBBound})); valid_pspace' s; + ko_at' (tcb :: tcb) p s \ + \ \s'. sym_refs (state_refs_of' s') + \ (\p' (ko' :: endpoint). + ko_at' ko' p' s \ ko_at' ko' p' s') + \ (\p' (ko' :: Structures_H.notification). + ko_at' ko' p' s \ ko_at' ko' p' s') + \ (st_tcb_at' ((=) Running) p s')" + apply (drule(2) sym_refs_upd_sD[where koEx="makeObject\tcbState := Running, tcbBoundNotification := tcbBoundNotification tcb\"]) + apply (clarsimp dest!: ko_at_state_refs_ofD') + apply (simp add: objBits_simps) + apply (erule exEI) + apply clarsimp + apply (auto simp: st_tcb_at'_def elim!: obj_at'_weakenE) + done + +lemma updateCap_cte_wp_at_cteMDBNode: + "\cte_wp_at' (\cte. P (cteMDBNode cte)) p\ + updateCap ptr cap + \\rv. cte_wp_at' (\cte. P (cteMDBNode cte)) p\" + apply (wp updateCap_cte_wp_at_cases) + apply (simp add: o_def) + done + +lemma ctes_of_Some_cte_wp_at: + "ctes_of s p = Some cte \ cte_wp_at' P p s = P cte" + by (clarsimp simp: cte_wp_at_ctes_of) + +lemma user_getreg_wp: + "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (user_regs \ atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ + asUser t (getRegister r) \Q\" + apply (rule_tac Q="\rv s. \rv'. rv' = rv \ Q rv' s" in hoare_post_imp) + apply simp + apply (rule hoare_pre, wp hoare_vcg_ex_lift user_getreg_rv) + apply (clarsimp simp: obj_at'_def) + done + +lemma setUntypedCapAsFull_replyCap[simp]: + "setUntypedCapAsFull cap (ReplyCap curThread False cg) slot = return ()" + by (clarsimp simp:setUntypedCapAsFull_def isCap_simps) + +lemma option_case_liftM_getNotification_wp: + "\\s. \rv. (case x of None \ rv = v | Some p \ obj_at' (\ntfn. f ntfn = rv) p s) + \ Q rv s\ case x of None \ return v | Some ptr \ liftM f $ getNotification ptr \ Q \" + apply (rule hoare_pre, (wpc; wp getNotification_wp)) + apply (auto simp: obj_at'_def) + done + +lemma threadSet_st_tcb_at_state: + "\\s. tcb_at' t s \ (if p = t + then obj_at' (\tcb. P (tcbState (f tcb))) t s + else st_tcb_at' P p s)\ + threadSet f t \\_. st_tcb_at' P p\" + apply (rule hoare_chain) + apply (rule threadSet_obj_at'_really_strongest) + prefer 2 + apply (simp add: st_tcb_at'_def) + apply (clarsimp split: if_splits simp: st_tcb_at'_def o_def) + done + +lemma recv_ep_queued_st_tcb_at': + "\ ko_at' (Structures_H.endpoint.RecvEP ts) epptr s ; + t \ set ts; + sym_refs (state_refs_of' s) \ + \ st_tcb_at' isBlockedOnReceive t s" + apply (drule obj_at_ko_at') + apply clarsimp + apply (drule (1) sym_refs_ko_atD') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def refs_of_rev') + apply (erule_tac x=t in ballE; clarsimp?) + apply (erule ko_wp_at'_weakenE) + apply (clarsimp simp: isBlockedOnReceive_def ) + done + +lemma valid_ep_typ_at_lift': + "\ \p. \typ_at' TCBT p\ f \\rv. typ_at' TCBT p\ \ + \ \\s. valid_ep' ep s\ f \\rv s. valid_ep' ep s\" + apply (cases ep, simp_all add: valid_ep'_def) + apply (wp hoare_vcg_const_Ball_lift typ_at_lifts | assumption)+ + done + +lemma threadSet_tcbState_valid_objs: + "\valid_tcb_state' st and valid_objs'\ + threadSet (tcbState_update (\_. st)) t + \\rv. valid_objs'\" + apply (wp threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma possibleSwitchTo_rewrite: + "monadic_rewrite True True + (\s. obj_at' (\tcb. tcbPriority tcb = destPrio \ tcbDomain tcb = destDom) t s + \ ksSchedulerAction s = ResumeCurrentThread + \ ksCurThread s = thread + \ ksCurDomain s = curDom + \ destDom = curDom) + (possibleSwitchTo t) (setSchedulerAction (SwitchToThread t))" + supply if_split[split del] + apply (simp add: possibleSwitchTo_def) + (* under current preconditions both branch conditions are false *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: threadGet_wp cd_wp\) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: threadGet_wp cd_wp\) + (* discard unused getters before setSchedulerAction *) + apply (simp add: getCurThread_def curDomain_def gets_bind_ign getSchedulerAction_def) + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) + apply (auto simp: obj_at'_def) + done + +lemma scheduleSwitchThreadFastfail_False_wp: + "\\s. ct \ it \ cprio \ tprio \ + scheduleSwitchThreadFastfail ct it cprio tprio + \\rv s. \ rv \" + unfolding scheduleSwitchThreadFastfail_def + by (wp threadGet_wp) + (auto dest!: obj_at_ko_at' simp: le_def obj_at'_def) + +lemma lookupBitmapPriority_lift: + assumes prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\\s. P (lookupBitmapPriority d s) \ f \\_ s. P (lookupBitmapPriority d s) \" + unfolding lookupBitmapPriority_def + apply (rule hoare_pre) + apply (wps prqL1 prqL2) + apply wpsimp+ + done + +(* slow path additionally requires current thread not idle *) +definition + "fastpathBestSwitchCandidate t \ \s. + ksReadyQueuesL1Bitmap s (ksCurDomain s) = 0 + \ (\tprio. obj_at' (\tcb. tcbPriority tcb = tprio) t s + \ (obj_at' (\tcb. tcbPriority tcb \ tprio) (ksCurThread s) s + \ lookupBitmapPriority (ksCurDomain s) s \ tprio))" + +lemma fastpathBestSwitchCandidateI: + "\ ksReadyQueuesL1Bitmap s (ksCurDomain s) = 0 + \ tcbPriority ctcb \ tcbPriority ttcb + \ lookupBitmapPriority (ksCurDomain s) s \ tcbPriority ttcb; + ko_at' ttcb t s; ko_at' ctcb (ksCurThread s) s\ + \ fastpathBestSwitchCandidate t s" + unfolding fastpathBestSwitchCandidate_def + by normalise_obj_at' + +lemma fastpathBestSwitchCandidate_lift: + assumes ct[wp]: "\P. \\s. P (ksCurThread s) \ f \ \_ s. P (ksCurThread s) \" + assumes cd[wp]: "\P. \\s. P (ksCurDomain s) \ f \ \_ s. P (ksCurDomain s) \" + assumes l1[wp]: "\P. \\s. P (ksReadyQueuesL1Bitmap s) \ f \ \_ s. P (ksReadyQueuesL1Bitmap s) \" + assumes l2[wp]: "\P. \\s. P (ksReadyQueuesL2Bitmap s) \ f \ \_ s. P (ksReadyQueuesL2Bitmap s) \" + assumes p[wp]: "\P t. \ obj_at' (\tcb. P (tcbPriority tcb)) t \ f \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t \" + shows "\ tcb_at' t and fastpathBestSwitchCandidate t \ f \\rv. fastpathBestSwitchCandidate t \" + unfolding fastpathBestSwitchCandidate_def lookupBitmapPriority_def l1IndexToPrio_def + apply (rule hoare_pre) + apply (rule hoare_lift_Pf2[where f=ksCurDomain]) + apply (wp hoare_vcg_disj_lift hoare_vcg_all_lift) + apply (rule hoare_lift_Pf2[where f=ksCurThread]) + apply (rule hoare_lift_Pf2[where f=ksReadyQueuesL1Bitmap]) + apply (rule hoare_lift_Pf2[where f=ksReadyQueuesL2Bitmap]) + apply (wp hoare_vcg_imp_lift') + apply (strengthen not_obj_at'_strengthen) + apply (wpsimp simp: comp_def wp: l1 l2 hoare_vcg_disj_lift)+ + apply (drule (1) tcb_at_not_obj_at_elim'[rotated]) + apply (rename_tac tprio, erule_tac x=tprio in allE) + apply clarsimp + apply (drule (1) tcb_at_not_obj_at_elim'[rotated]) + apply (clarsimp simp: obj_at'_def) + done + +lemma fastpathBestSwitchCandidate_ksSchedulerAction_simp[simp]: + "fastpathBestSwitchCandidate t (s\ksSchedulerAction := a\) + = fastpathBestSwitchCandidate t s" + unfolding fastpathBestSwitchCandidate_def lookupBitmapPriority_def + by simp + +lemma sched_act_SwitchToThread_rewrite: + "\ sa = SwitchToThread t \ monadic_rewrite F E Q (m_sw t) f \ + \ monadic_rewrite F E ((\_. sa = SwitchToThread t) and Q) + (case_scheduler_action m_res m_ch (\t. m_sw t) sa) f" + apply (cases sa; simp add: monadic_rewrite_impossible) + apply (rename_tac t') + apply (case_tac "t' = t"; simp add: monadic_rewrite_impossible) + done + +lemma schedule_rewrite_ct_not_runnable': + "monadic_rewrite True True + (\s. ksSchedulerAction s = SwitchToThread t \ ct_in_state' (Not \ runnable') s + \ (ksCurThread s \ ksIdleThread s) + \ fastpathBestSwitchCandidate t s) + (schedule) + (do setSchedulerAction ResumeCurrentThread; switchToThread t od)" + supply subst_all [simp del] + apply (simp add: schedule_def) + (* switching to t *) + apply (monadic_rewrite_l sched_act_SwitchToThread_rewrite[where t=t]) + (* not wasRunnable, skip enqueue *) + apply (simp add: when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + (* fastpath: \ (fastfail \ \ highest) *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* fastpath: no scheduleChooseNewThread *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* remove no-ops *) + apply (repeat 10 monadic_rewrite_symb_exec_l) (* until switchToThread *) + apply (simp add: setSchedulerAction_def) + apply (subst oblivious_modify_swap[symmetric], + rule oblivious_switchToThread_schact) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: empty_fail_isRunnable simp: isHighestPrio_def')+ + apply (clarsimp simp: ct_in_state'_def not_pred_tcb_at'_strengthen + fastpathBestSwitchCandidate_def) + apply normalise_obj_at' + done + +lemma resolveAddressBits_points_somewhere: + "\\s. \slot. Q slot s\ resolveAddressBits cp cptr bits \Q\,-" + apply (rule_tac Q'="\rv s. \rv. Q rv s" in hoare_strengthen_postE_R) + apply wp + apply clarsimp + done + +lemma foldr_copy_register_tsrs: + "foldr (\r . copy_register_tsrs x y r r (\x. x)) rs s + = (s (y := TCBStateRegs (tsrState (s y)) + (\r. if r \ set rs then tsrContext (s x) r + else tsrContext (s y) r)))" + apply (induct rs) + apply simp + apply (simp add: copy_register_tsrs_def fun_eq_iff + split: if_split) + done + +lemmas cteInsert_obj_at'_not_queued = cteInsert_obj_at'_queued[of "\a. \ a"] + +(* FIXME: why did this go away since ARM? *) +lemma empty_fail_getObject_tcb [intro!, wp, simp]: + shows "empty_fail (getObject x :: tcb kernel)" + by (auto intro: empty_fail_getObject) + +lemma monadic_rewrite_threadGet: + "monadic_rewrite E F (obj_at' (\tcb. f tcb = v) t) + (threadGet f t) (return v)" + unfolding getThreadState_def threadGet_def + apply (simp add: liftM_def) + apply monadic_rewrite_symb_exec_l + apply (rule_tac P="\_. f x = v" in monadic_rewrite_pre_imp_eq) + apply blast + apply (wpsimp wp: OMG_getObject_tcb simp: obj_tcb_at')+ + done + +lemma monadic_rewrite_getThreadState: + "monadic_rewrite E F (obj_at' (\tcb. tcbState tcb = v) t) + (getThreadState t) (return v)" + unfolding getThreadState_def + by (rule monadic_rewrite_threadGet) + +lemma setCTE_obj_at'_tcbIPCBuffer: + "\obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\ setCTE p v \\rv. obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\" + unfolding setCTE_def + by (rule setObject_cte_obj_at_tcb', simp+) + +context +notes if_cong[cong] +begin +crunches cteInsert, asUser + for obj_at'_tcbIPCBuffer[wp]: "obj_at' (\tcb. P (tcbIPCBuffer tcb)) t" + (wp: setCTE_obj_at'_queued crunch_wps threadSet_obj_at'_really_strongest) +end + +crunches cteInsert, threadSet, asUser, emptySlot + for ksReadyQueuesL1Bitmap_inv[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap_inv[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (wp: hoare_drop_imps) + +crunch ksReadyQueuesL1Bitmap_inv[wp]: setEndpoint "\s. P (ksReadyQueuesL1Bitmap s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) +crunch ksReadyQueuesL2Bitmap_inv[wp]: setEndpoint "\s. P (ksReadyQueuesL2Bitmap s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma setThreadState_runnable_bitmap_inv: + "runnable' ts \ + \ \s. P (ksReadyQueuesL1Bitmap s) \ setThreadState ts t \\rv s. P (ksReadyQueuesL1Bitmap s) \" + "runnable' ts \ + \ \s. Q (ksReadyQueuesL2Bitmap s) \ setThreadState ts t \\rv s. Q (ksReadyQueuesL2Bitmap s) \" + by (simp_all add: setThreadState_runnable_simp, wp+) + +(* FIXME move *) +crunches curDomain + for (no_fail) no_fail[intro!, wp, simp] + +lemma setThreadState_tcbDomain_tcbPriority_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb) (tcbPriority tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps) + done + +lemma setThreadState_tcbQueued_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbQueued tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps) + done + +lemma setThreadState_tcbFault_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbFault tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps) + done + +lemma setThreadState_tcbArch_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbArch tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps) + done + +(* FIXME AARCH64 MOVE *) +lemma setRegister_simple_modify_registers: + "setRegister r v = (\con. ({((), modify_registers (\f. f(r := v)) con)}, False))" + by (simp add: setRegister_def simpler_modify_def modify_registers_def) + +term "modify_registers (\regs. foldl (\f (r,v). f(r := v)) regs (zip rs vs)) con" + +(* FIXME AARCH64 MOVE *) +lemma zipWithM_setRegister_simple_modify_registers: + "zipWithM_x setRegister rs vs + = (\con. ({((), modify_registers (\regs. foldl (\f (r,v). f(r := v)) regs (zip rs vs)) con)}, + False))" + apply (simp add: zipWithM_x_mapM_x) + apply (induct ("zip rs vs")) + apply (simp add: mapM_x_Nil return_def modify_registers_def) + apply (clarsimp simp: mapM_x_Cons bind_def setRegister_def modify_registers_def + simpler_modify_def fun_upd_def[symmetric]) + done + +(* FIXME AARCH64 move to IsolatedThreadAction where existing setRegister_simple is commented out *) +lemma setRegister_simple: + "setRegister r v = (\con. ({((), UserContext (fpu_state con) ((user_regs con)(r := v)))}, False))" + by (simp add: setRegister_def simpler_modify_def) + +lemma no_fail_getObject_asidpool[wp]: + "no_fail (asid_pool_at' pool_ptr) (getObject pool_ptr :: asidpool kernel)" + (* FIXME AARCH64 no_fail_getObject_tcb and no_fail_getObject_vcpu don't need this at their locations + in Refine; move this lemma next to them and delete this supply *) + supply lookupAround2_same1[simp del] + apply (simp add: getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps' typ_at_to_obj_at_arches + cong: conj_cong option.case_cong) + apply (rule ps_clear_lookupAround2; assumption?) + apply simp + apply (erule is_aligned_no_overflow) + apply clarsimp + done + +(* FIXME AARCH64 move *) +crunches getPoolPtr + for (no_fail) no_fail[intro!, wp] + +(* FIXME AARCH64 move? *) +lemma no_fail_getASIDPoolEntry[intro!, wp]: + "no_fail (\s. 0 < asid \ asid_wf asid + \ ((\ap. armKSASIDTable (ksArchState s) (asidHighBitsOf asid) = Some ap + \ asid_pool_at' ap s))) + (getASIDPoolEntry asid)" + unfolding getASIDPoolEntry_def getPoolPtr_def + supply asidHighBitsOf[simp del] + apply (wpsimp simp: if_apply_def2 asid_wf_def) + apply (simp add: mask_def asid_bits_def asidRange_def) + done + +(* FIXME AARCH64 stolen from VSpace_C, both should get moved to Move_C *) +lemma isValidVTableRoot_def2: + "isValidVTableRoot cap = + (\pt asid vref. cap = ArchObjectCap (PageTableCap pt VSRootPT_T (Some (asid,vref))))" + unfolding isValidVTableRoot_def + by (auto simp: isVTableRoot_def + split: capability.splits arch_capability.splits option.splits pt_type.splits) + +(* FIXME AARCH64: make better comment + We are rewriting the slow path (LHS) to the fastpath (RHS) during the Call system call. + We assume non-failure on the slow path side, and need to show that the fastpath side's results + are a subset of slow path results, and that fastpath non-failure implies slow path non-failure. +*) +lemma fastpath_callKernel_SysCall_corres: + "monadic_rewrite True False + (invs' and ct_in_state' ((=) Running) + and (\s. ksSchedulerAction s = ResumeCurrentThread) + and (\s. ksDomainTime s \ 0) + and ready_qs_runnable) + (callKernel (SyscallEvent SysCall)) (fastpaths SysCall)" + supply if_cong[cong] option.case_cong[cong] if_split[split del] + supply empty_fail_getMRs[wp] (* FIXME *) + supply empty_fail_getEndpoint[wp] (* FIXME *) + (* rewrite callKernel as callKernel \ callKernel, expanding only the left alternative; when slow + path throws, the catch goes back to slow path, which will correspond to the right alternative *) + apply (rule monadic_rewrite_introduce_alternative[OF callKernel_def[simplified atomize_eq]]) + apply monadic_rewrite_pre + apply (rule monadic_rewrite_bind_alternative_l, wpsimp) + (* lift out fastpathKernelAssertions, we'll need them to show non-failure in fastpath checks *) + apply (rule monadic_rewrite_stateAssert) + apply (simp add: handleEvent_def handleCall_def + handleInvocation_def liftE_bindE_handle + bind_assoc getMessageInfo_def) + apply (simp add: catch_liftE_bindE unlessE_throw_catch_If + unifyFailure_catch_If catch_liftE + getMessageInfo_def alternative_bind + fastpaths_def + cong: if_cong) + (* getters such as getCurThread don't change state, so we can pull their bind out of the alternative *) + apply (rule monadic_rewrite_bind_alternative_l, wp) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind_alternative_l, wp) + apply (rule monadic_rewrite_bind_tail) + apply (rename_tac msgInfo) + apply (rule monadic_rewrite_bind_alternative_l, wp) + apply (rule monadic_rewrite_bind_tail) + (* both sides same until we hit syscall on LHS and tcbFault check on RHS *) + apply monadic_rewrite_symb_exec_r + apply (rename_tac tcbFault) + (* clear up alternative on RHS introduced by pickFastpath *) + (* FIXME AARCH64 is pickFastpath needed? it seems a completely arbitrary alternative that we + get rid of here without further use *) + apply (rule monadic_rewrite_alternative_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* any "if P then A else callKernel" on RHS can be rewritten to A since callKernel + is an alternative of LHS, with P becoming an assumption of the goal *) + (* FIXME AARCH64: this pattern is very common, might make sense to make it a rule with a name *) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* unfold syscall on LHS *) + apply (simp add: split_def Syscall_H.syscall_def + liftE_bindE_handle bind_assoc + capFaultOnFailure_def) + apply (simp only: bindE_bind_linearise[where f="rethrowFailure fn f'" for fn f'] + bind_case_sum_rethrow) + apply (simp add: lookupCapAndSlot_def + lookupSlotForThread_def bindE_assoc + liftE_bind_return_bindE_returnOk split_def + getThreadCSpaceRoot_def locateSlot_conv + returnOk_liftE[symmetric] const_def + getSlotCap_def) + apply (simp only: liftE_bindE_assoc) + apply (rule monadic_rewrite_bind_alternative_l, wp) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind_alternative_l, wpsimp) + (* getting endpoint cap *) + (* head of LHS can be rewritten into head of RHS using fn; split off and use fn on tail of LHS *) + apply (rule_tac fn="case_sum Inl (Inr \ fst)" in monadic_rewrite_split_fn) + apply (simp add: liftME_liftM[symmetric] liftME_def bindE_assoc) + apply (rule monadic_rewrite_refl) + (* LHS checks is_Right of the result, so we can throw away Inl case *) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply (simp add: isRight_right_map isRight_case_sum) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* can't do anything with lookupIPCBuffer on LHS right now *) + apply (rule monadic_rewrite_bind_alternative_l[OF lookupIPC_inv]) + apply monadic_rewrite_symb_exec_l + apply (simp add: lookupExtraCaps_null returnOk_bind liftE_bindE_handle + bind_assoc liftE_bindE_assoc + decodeInvocation_def Let_def from_bool_0 + performInvocation_def liftE_handle + liftE_bind) + apply monadic_rewrite_symb_exec_r + apply (rename_tac "send_ep") + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* getThreadVSpaceRoot, check isValidVTableRoot *) + apply (simp add: getThreadVSpaceRoot_def locateSlot_conv) + apply monadic_rewrite_symb_exec_r + apply (rename_tac "pdCapCTE") + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* getASIDPoolEntry, check valid root and VMID *) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* check thread priorities *) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r + apply (simp add: isHighestPrio_def') + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* check endpoint can grant *) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* check both threads are in same domain *) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + + (* fastpath checks completed, no further need for callKernel alternative *) + apply (rule monadic_rewrite_trans, + rule monadic_rewrite_pick_alternative_1) + apply monadic_rewrite_symb_exec_l + (* now committed to fastpath *) + (* proof proceeds by repeated rewrites (mostly of the slow path) under the + assumption of that all fastpath checks succeeded *) + apply (rule monadic_rewrite_trans) + (* strengthen rewrite proof to showing equivalence of results *) + apply (rule_tac F=True and E=True in monadic_rewrite_weaken_flags) + apply simp + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known thread) + apply (simp add: sendIPC_def bind_assoc) + apply (monadic_rewrite_symb_exec_l_known send_ep) + apply (rule_tac P="epQueue send_ep \ []" in monadic_rewrite_gen_asm) + apply (simp add: isRecvEP_endpoint_case list_case_helper bind_assoc) + apply (rule monadic_rewrite_bind_tail) + apply (elim conjE) + apply (rule monadic_rewrite_bind_tail, rename_tac dest_st) + apply (rule_tac P="\gr. dest_st = BlockedOnReceive (capEPPtr (fst (theRight rv))) gr" + in monadic_rewrite_gen_asm) + apply monadic_rewrite_symb_exec_l_drop + apply (rule monadic_rewrite_bind) + apply clarsimp + apply (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind) + apply (rule_tac destPrio=destPrio + and curDom=curDom and destDom=destDom and thread=thread + in possibleSwitchTo_rewrite) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_trans) + apply (rule setupCallerCap_rewrite) + apply (rule monadic_rewrite_bind_head) + apply (rule setThreadState_rewrite_simple, simp) + apply (rule monadic_rewrite_trans) + apply (monadic_rewrite_symb_exec_l_known BlockedOnReply) + apply simp + apply (rule monadic_rewrite_refl) + apply wpsimp + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind_head) + apply (rule_tac t="hd (epQueue send_ep)" + in schedule_rewrite_ct_not_runnable') + apply (simp add: bind_assoc) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind) + apply (rule switchToThread_rewrite) + apply (rule monadic_rewrite_bind) + apply (rule activateThread_simple_rewrite) + apply (rule monadic_rewrite_refl) + apply wp + apply (wp setCurThread_ct_in_state) + apply (simp only: st_tcb_at'_def[symmetric]) + apply (wp, clarsimp simp: cur_tcb'_def ct_in_state'_def) + apply (simp add: getThreadCallerSlot_def getThreadReplySlot_def + locateSlot_conv ct_in_state'_def cur_tcb'_def) + + apply ((wp assert_inv threadSet_pred_tcb_at_state + cteInsert_obj_at'_not_queued + | wps)+)[1] + + apply (wp fastpathBestSwitchCandidate_lift[where f="cteInsert c w w'" for c w w']) + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] + apply (wpsimp wp: fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t]) + apply ((wp assert_inv threadSet_pred_tcb_at_state + cteInsert_obj_at'_not_queued + | wps)+)[1] + apply (simp add: setSchedulerAction_def) + apply wp[1] + apply (simp cong: if_cong HOL.conj_cong add: if_bool_simps) + apply (simp_all only:)[5] + apply ((wp asUser_obj_at_unchanged mapM_x_wp' + sts_st_tcb_at'_cases + setThreadState_no_sch_change + setEndpoint_obj_at_tcb' + fastpathBestSwitchCandidate_lift[where f="setThreadState f t" for f t] + fastpathBestSwitchCandidate_lift[where f="asUser t f" for f t] + fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] + lookupBitmapPriority_lift + setThreadState_runnable_bitmap_inv + getEndpoint_obj_at' + | simp add: setMessageInfo_def obj_at'_conj + | wp (once) hoare_vcg_disj_lift)+) + apply (simp add: setThreadState_runnable_simp + getThreadCallerSlot_def getThreadReplySlot_def + locateSlot_conv bind_assoc) + apply (rule_tac P="\v. obj_at' (\tcb. tcbIPCBuffer tcb = v) (hd (epQueue send_ep))" + in monadic_rewrite_exists_v) + apply (rename_tac ipcBuffer) + + apply (rule_tac P="\v. obj_at' (\tcb. tcbState tcb = v) (hd (epQueue send_ep))" + in monadic_rewrite_exists_v) + apply (rename_tac destState) + + apply (simp add: AARCH64_H.switchToThread_def getTCB_threadGet bind_assoc) + (* retrieving state or thread registers is not thread_action_isolatable, + translate into return with suitable precondition *) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + apply (rule_tac v=destState in monadic_rewrite_getThreadState + | rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ + apply (rule_tac v=destState in monadic_rewrite_getThreadState + | rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ + (* rewrite both sides into isolate_thread_actions *) + apply (rule_tac P="inj (case_bool thread (hd (epQueue send_ep)))" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) + apply (rule isolate_thread_actions_rewrite_bind + fastpath_isolate_rewrites fastpath_isolatables + bool.simps setRegister_simple setRegister_simple_modify_registers + threadGet_vcpu_isolatable[THEN thread_actions_isolatableD, simplified o_def] + threadGet_vcpu_isolatable[simplified o_def] + vcpuSwitch_isolatable[THEN thread_actions_isolatableD] vcpuSwitch_isolatable + setVMRoot_isolatable[THEN thread_actions_isolatableD] setVMRoot_isolatable + doMachineOp_isolatable[THEN thread_actions_isolatableD] doMachineOp_isolatable + kernelExitAssertions_isolatable[THEN thread_actions_isolatableD] + kernelExitAssertions_isolatable + zipWithM_setRegister_simple + zipWithM_setRegister_simple_modify_registers + thread_actions_isolatable_bind + | assumption + | wp assert_inv)+ + apply (rule_tac P="\s. ksSchedulerAction s = ResumeCurrentThread + \ tcb_at' thread s" + and F=True and E=False in monadic_rewrite_weaken_flags) + apply simp + (* establish RHS is equivalent to LHS on all parts of isolate_thread_actions *) + apply (rule monadic_rewrite_isolate_final) + apply (simp add: isRight_case_sum cong: list.case_cong) + apply (clarsimp simp: fun_eq_iff if_flip + cong: if_cong) + apply (drule obj_at_ko_at', clarsimp) + apply (frule get_tcb_state_regs_ko_at') + apply (clarsimp simp: zip_map2 zip_same_conv_map foldl_map + foldl_fun_upd + foldr_copy_register_tsrs + isRight_case_sum + cong: if_cong) + apply (simp add: upto_enum_def fromEnum_def + enum_register toEnum_def + msgRegisters_unfold + cong: if_cong) + apply (clarsimp split: if_split) + apply (rule ext) + apply (simp add: badgeRegister_def msgInfoRegister_def + AARCH64.badgeRegister_def + AARCH64.msgInfoRegister_def + split: if_split) + apply simp + (* monadic rewriting of RHS to LHS complete *) + (* facts about the result of getASIDPoolEntry aren't used in the remainder of the + postcondition; roughly speaking the slow path and fastpath are equivalent + without this check, but the check is needed to connect to the C which uses the + fetched asid pool entry *) + apply (wp + | simp cong: if_cong bool.case_cong + | rule getCTE_wp' gts_wp' threadGet_wp getEndpoint_wp + | wp (once) hoare_drop_imp[where f="getASIDPoolEntry a" for a])+ + apply (rule validE_cases_valid) + apply (simp add: isRight_def getSlotCap_def) + apply (wp getCTE_wp') + apply (rule resolveAddressBits_points_somewhere) + apply (simp cong: if_cong bool.case_cong) + apply wp + apply simp + apply (wp user_getreg_wp threadGet_wp)+ + + apply clarsimp + apply (prop_tac "ksCurThread s \ ksIdleThread s") + apply (fastforce simp: ct_in_state'_def dest: ct_running_not_idle' elim: pred_tcb'_weakenE) + apply (clarsimp simp: ct_in_state'_def pred_tcb_at') + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+) + apply (clarsimp simp: isCap_simps valid_cap'_def maskCapRights_def) + apply (frule ko_at_valid_ep', clarsimp) + apply (frule sym_refs_ko_atD'[where 'a=endpoint], clarsimp) + + apply (prop_tac "cte_wp_at' (\_. True) (hd (epQueue ep) + 2 ^ cte_level_bits * tcbVTableSlot) s") + apply (solves \clarsimp simp: valid_ep'_def isRecvEP_endpoint_case tcbVTableSlot_def + cte_level_bits_def cte_at_tcb_at_32'\) + apply clarsimp + apply (frule_tac cte=ctea in ctes_of_valid, fastforce) + apply (clarsimp simp: isValidVTableRoot_def2 valid_cap'_def wellformed_mapdata'_def + fastpathKernelAssertions_def) + apply (clarsimp simp: valid_ep'_def isRecvEP_endpoint_case neq_Nil_conv length_msgRegisters + size_msgRegisters_def ep_q_refs_of'_def st_tcb_at_refs_of_rev' + cong: if_cong) + + apply (rename_tac blockedThread ys) + apply (frule invs_mdb') + apply (thin_tac "Ball S P" for S P)+ + supply imp_disjL[simp del] + apply (subst imp_disjL[symmetric]) + + (* clean up broken up disj implication and excessive references to same tcbs *) + apply normalise_obj_at' + apply (clarsimp simp: invs'_def valid_state'_def) + apply (fold imp_disjL, intro allI impI) + + apply (prop_tac "ksCurThread s \ blockedThread") + apply normalise_obj_at' + apply clarsimp + apply (extract_conjunct \match conclusion in "\ tcbQueued _" \ -\) + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x=blockedThread in spec) + apply (solves \clarsimp simp: obj_at'_def st_tcb_at'_def objBits_simps\) + apply (prop_tac "fastpathBestSwitchCandidate blockedThread s") + apply (rule_tac ttcb=tcbb and ctcb=tcb in fastpathBestSwitchCandidateI) + apply (solves \simp only: disj_ac\) + apply simp+ + apply (clarsimp simp: obj_at'_def objBits_simps valid_mdb'_def + valid_mdb_ctes_def inj_case_bool + split: bool.split) + done + +lemma capability_case_Null_ReplyCap: + "(case cap of NullCap \ f | ReplyCap t b cg \ g t b cg | _ \ h) + = (if isReplyCap cap then g (capTCBPtr cap) (capReplyMaster cap) (capReplyCanGrant cap) + else if isNullCap cap then f else h)" + by (simp add: isCap_simps split: capability.split split del: if_split) + +lemma injection_handler_catch: + "catch (injection_handler f x) y + = catch x (y o f)" + apply (simp add: injection_handler_def catch_def handleE'_def + bind_assoc) + apply (rule bind_cong[OF refl]) + apply (simp add: throwError_bind split: sum.split) + done + +lemma doReplyTransfer_simple: + "monadic_rewrite True False + (obj_at' (\tcb. tcbFault tcb = None) receiver) + (doReplyTransfer sender receiver slot grant) + (do state \ getThreadState receiver; + assert (isReply state); + cte \ getCTE slot; + mdbnode \ return $ cteMDBNode cte; + assert (mdbPrev mdbnode \ 0 \ mdbNext mdbnode = 0); + parentCTE \ getCTE (mdbPrev mdbnode); + assert (isReplyCap (cteCap parentCTE) \ capReplyMaster (cteCap parentCTE)); + doIPCTransfer sender Nothing 0 grant receiver; + cteDeleteOne slot; + setThreadState Running receiver; + possibleSwitchTo receiver + od)" + apply (simp add: doReplyTransfer_def liftM_def nullPointer_def getSlotCap_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (monadic_rewrite_symb_exec_l_known None, simp) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: threadGet_const gts_wp' getCTE_wp' simp: o_def)+ + done + +lemma receiveIPC_simple_rewrite: + "monadic_rewrite True False + ((\_. isEndpointCap ep_cap \ \ isSendEP ep) and (ko_at' ep (capEPPtr ep_cap) and + (\s. \ntfnptr. bound_tcb_at' ((=) (Some ntfnptr)) thread s \ obj_at' (Not \ isActive) ntfnptr s))) + (receiveIPC thread ep_cap True) + (do + setThreadState (BlockedOnReceive (capEPPtr ep_cap) (capEPCanGrant ep_cap)) thread; + setEndpoint (capEPPtr ep_cap) (RecvEP (case ep of RecvEP q \ (q @ [thread]) | _ \ [thread])) + od)" + supply empty_fail_getEndpoint[wp] + apply (rule monadic_rewrite_gen_asm) + apply (simp add: receiveIPC_def) + apply (monadic_rewrite_symb_exec_l_known ep) + apply monadic_rewrite_symb_exec_l+ + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + apply (rule monadic_rewrite_is_refl) + apply (cases ep; simp add: isSendEP_def) + apply (wpsimp wp: getNotification_wp gbn_wp' getEndpoint_wp + simp: getBoundNotification_def)+ + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + done + +lemma empty_fail_isFinalCapability: + "empty_fail (isFinalCapability cte)" + by (wpsimp simp: isFinalCapability_def Let_def split_del: if_split) + +lemma cteDeleteOne_replycap_rewrite: + "monadic_rewrite True False + (cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot) + (cteDeleteOne slot) + (emptySlot slot NullCap)" + supply isFinalCapability_inv[wp] empty_fail_isFinalCapability[wp] + apply (simp add: cteDeleteOne_def) + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="cteCap cte \ NullCap \ isReplyCap (cteCap cte) + \ \ isEndpointCap (cteCap cte) + \ \ isNotificationCap (cteCap cte)" + in monadic_rewrite_gen_asm) + apply (simp add: finaliseCapTrue_standin_def capRemovable_def) + apply monadic_rewrite_symb_exec_l + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp')+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + done + +lemma cteDeleteOne_nullcap_rewrite: + "monadic_rewrite True False + (cte_wp_at' (\cte. cteCap cte = NullCap) slot) + (cteDeleteOne slot) + (return ())" + apply (simp add: cteDeleteOne_def unless_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: getCTE_wp'\) + apply (monadic_rewrite_symb_exec_l, rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ + done + +lemma deleteCallerCap_nullcap_rewrite: + "monadic_rewrite True False + (cte_wp_at' (\cte. cteCap cte = NullCap) (thread + 2 ^ cte_level_bits * tcbCallerSlot)) + (deleteCallerCap thread) + (return ())" + apply (simp add: deleteCallerCap_def getThreadCallerSlot_def locateSlot_conv + getSlotCap_def) + apply (monadic_rewrite_l cteDeleteOne_nullcap_rewrite \wpsimp wp: getCTE_wp\) + apply (monadic_rewrite_symb_exec_l+, rule monadic_rewrite_refl) + apply (wpsimp simp: cte_wp_at_ctes_of)+ + done + +lemma emptySlot_cnode_caps: + "\\s. P (only_cnode_caps (ctes_of s)) \ cte_wp_at' (\cte. \ isCNodeCap (cteCap cte)) slot s\ + emptySlot slot NullCap + \\rv s. P (only_cnode_caps (ctes_of s))\" + apply (simp add: only_cnode_caps_def map_option_comp2 + o_assoc[symmetric] cteCaps_of_def[symmetric]) + apply (wp emptySlot_cteCaps_of) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of + elim!: rsubst[where P=P] del: ext intro!: ext + split: if_split) + done + +lemma asUser_obj_at_ep[wp]: + "\obj_at' P p\ asUser t m \\rv. obj_at' (P :: endpoint \ bool) p\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + done + +lemma setCTE_obj_at_ep[wp]: + "\obj_at' (P :: endpoint \ bool) p\ setCTE ptr cte \\rv. obj_at' P p\" + unfolding setCTE_def + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_cte typeError_def in_monad + split: Structures_H.kernel_object.split_asm + if_split_asm) + done + +lemma setCTE_obj_at_ntfn[wp]: + "\obj_at' (P :: Structures_H.notification \ bool) p\ setCTE ptr cte \\rv. obj_at' P p\" + unfolding setCTE_def + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_cte typeError_def in_monad + split: Structures_H.kernel_object.split_asm + if_split_asm) + done + +crunch obj_at_ep[wp]: emptySlot "obj_at' (P :: endpoint \ bool) p" + +crunches emptySlot, asUser + for gsCNodes[wp]: "\s. P (gsCNodes s)" + (wp: crunch_wps) + +crunch tcbContext[wp]: possibleSwitchTo "obj_at' (\tcb. P ( (atcbContextGet o tcbArch) tcb)) t" + (wp: crunch_wps simp_del: comp_apply) + +crunch only_cnode_caps[wp]: doFaultTransfer "\s. P (only_cnode_caps (ctes_of s))" + (wp: crunch_wps simp: crunch_simps) + +(* FIXME: monadic_rewrite_l does not work with stateAssert here *) +lemma tcbSchedDequeue_rewrite_not_queued: + "monadic_rewrite True False (tcb_at' t and obj_at' (Not \ tcbQueued) t) + (tcbSchedDequeue t) (return ())" + apply (simp add: tcbSchedDequeue_def) + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: threadGet_const)+ + done + +lemma schedule_known_rewrite: + "monadic_rewrite True False + (\s. ksSchedulerAction s = SwitchToThread t + \ tcb_at' t s + \ obj_at' (Not \ tcbQueued) t s + \ ksCurThread s = t' + \ st_tcb_at' (Not \ runnable') t' s + \ (ksCurThread s \ ksIdleThread s) + \ fastpathBestSwitchCandidate t s) + (schedule) + (do Arch.switchToThread t; + setCurThread t; + setSchedulerAction ResumeCurrentThread od)" + supply subst_all[simp del] if_split[split del] + apply (simp add: schedule_def) + apply (simp only: Thread_H.switchToThread_def) + (* switching to t *) + apply (monadic_rewrite_l sched_act_SwitchToThread_rewrite[where t=t]) + (* not wasRunnable, skip enqueue *) + apply (simp add: when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + (* fastpath: \ (fastfail \ \ highest) *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* fastpath: no scheduleChooseNewThread *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + apply (simp add: bind_assoc) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite_not_queued + \wpsimp wp: Arch_switchToThread_obj_at_pre\) + (* remove no-ops *) + apply simp + apply (repeat 13 \rule monadic_rewrite_symb_exec_l\) (* until switchToThread *) + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: isHighestPrio_def')+ + apply (clarsimp simp: ct_in_state'_def not_pred_tcb_at'_strengthen + fastpathBestSwitchCandidate_def) + apply normalise_obj_at' + done + +lemma tcb_at_cte_at_offset: + "\ tcb_at' t s; 2 ^ cte_level_bits * off \ dom tcb_cte_cases \ + \ cte_at' (t + 2 ^ cte_level_bits * off) s" + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule(2) cte_wp_at_tcbI') + apply fastforce + apply simp + done + +lemma emptySlot_cte_wp_at_cteCap: + "\\s. (p = p' \ P NullCap) \ (p \ p' \ cte_wp_at' (\cte. P (cteCap cte)) p s)\ + emptySlot p' irqopt + \\rv s. cte_wp_at' (\cte. P (cteCap cte)) p s\" + apply (simp add: tree_cte_cteCap_eq[unfolded o_def]) + apply (wp emptySlot_cteCaps_of) + apply (clarsimp split: if_split) + done + +lemma setEndpoint_getCTE_pivot[unfolded K_bind_def]: + "do setEndpoint p val; v <- getCTE slot; f v od + = do v <- getCTE slot; setEndpoint p val; f v od" + apply (simp add: getCTE_assert_opt setEndpoint_def + setObject_modify_assert + fun_eq_iff bind_assoc) + apply (simp add: exec_gets assert_def assert_opt_def + exec_modify update_ep_map_to_ctes + split: if_split option.split) + done + +lemma setEndpoint_setCTE_pivot[unfolded K_bind_def]: + "do setEndpoint p val; setCTE slot cte; f od = + do setCTE slot cte; setEndpoint p val; f od" + supply if_split[split del] + apply (rule monadic_rewrite_to_eq) + apply simp + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_trans, + rule_tac f="ep_at' p" in monadic_rewrite_add_gets) + apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets, + rule monadic_rewrite_bind_tail) + apply (rename_tac epat) + apply (rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_bind_tail) + apply (simp add: setEndpoint_def setObject_modify_assert bind_assoc) + apply (rule_tac rv=epat in monadic_rewrite_gets_known) + apply (wp setCTE_typ_at'[where T="koType TYPE(endpoint)", unfolded typ_at_to_obj_at'] + | simp)+ + apply (simp add: setCTE_assert_modify bind_assoc) + apply (rule monadic_rewrite_trans, rule monadic_rewrite_add_gets, + rule monadic_rewrite_bind_tail)+ + apply (rename_tac cteat tcbat) + apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_trans) + apply (rule_tac rv=cteat in monadic_rewrite_gets_known) + apply (rule_tac rv=tcbat in monadic_rewrite_gets_known) + apply (wp setEndpoint_typ_at'[where T="koType TYPE(tcb)", unfolded typ_at_to_obj_at'] + setEndpoint_typ_at'[where T="koType TYPE(cte)", unfolded typ_at_to_obj_at'] + | simp)+ + apply (rule_tac P="\s. epat = ep_at' p s \ cteat = real_cte_at' slot s + \ tcbat = (tcb_at' (slot && ~~ mask tcbBlockSizeBits) and (%y. slot && mask tcbBlockSizeBits : dom tcb_cte_cases)) s" + in monadic_rewrite_pre_imp_eq) + apply (simp add: setEndpoint_def setObject_modify_assert bind_assoc + exec_gets assert_def exec_modify + split: if_split) + apply (auto split: if_split simp: obj_at'_def objBits_defs + del: ext + intro!: arg_cong[where f=f] ext kernel_state.fold_congs)[1] + apply wp+ + apply (simp add: objBits_defs) + done + +lemma setEndpoint_updateMDB_pivot[unfolded K_bind_def]: + "do setEndpoint p val; updateMDB slot mf; f od = + do updateMDB slot mf; setEndpoint p val; f od" + by (clarsimp simp: updateMDB_def bind_assoc + setEndpoint_getCTE_pivot + setEndpoint_setCTE_pivot + split: if_split) + +lemma setEndpoint_updateCap_pivot[unfolded K_bind_def]: + "do setEndpoint p val; updateCap slot mf; f od = + do updateCap slot mf; setEndpoint p val; f od" + by (clarsimp simp: updateCap_def bind_assoc + setEndpoint_getCTE_pivot + setEndpoint_setCTE_pivot) + +lemma modify_setEndpoint_pivot[unfolded K_bind_def]: + "\ \ksf s. ksPSpace_update ksf (sf s) = sf (ksPSpace_update ksf s) \ + \ (do modify sf; setEndpoint p val; f od) = + (do setEndpoint p val; modify sf; f od)" + apply (subgoal_tac "\s. ep_at' p (sf s) = ep_at' p s") + apply (simp add: setEndpoint_def setObject_modify_assert + bind_assoc fun_eq_iff + exec_gets exec_modify assert_def + split: if_split) + apply atomize + apply clarsimp + apply (drule_tac x="\_. ksPSpace s" in spec) + apply (drule_tac x="s" in spec) + apply (drule_tac f="ksPSpace" in arg_cong) + apply simp + apply (metis obj_at'_pspaceI) + done + +lemma setEndpoint_clearUntypedFreeIndex_pivot[unfolded K_bind_def]: + "do setEndpoint p val; v <- clearUntypedFreeIndex slot; f od + = do v <- clearUntypedFreeIndex slot; setEndpoint p val; f od" + supply option.case_cong_weak[cong del] + by (simp add: clearUntypedFreeIndex_def bind_assoc getSlotCap_def setEndpoint_getCTE_pivot + updateTrackedFreeIndex_def modify_setEndpoint_pivot + split: capability.split + | rule bind_cong[OF refl] allI impI bind_apply_cong[OF refl])+ + +lemma emptySlot_setEndpoint_pivot[unfolded K_bind_def]: + "(do emptySlot slot NullCap; setEndpoint p val; f od) = + (do setEndpoint p val; emptySlot slot NullCap; f od)" + apply (rule ext) + apply (simp add: emptySlot_def bind_assoc + setEndpoint_getCTE_pivot + setEndpoint_updateCap_pivot + setEndpoint_updateMDB_pivot + case_Null_If Retype_H.postCapDeletion_def + setEndpoint_clearUntypedFreeIndex_pivot + split: if_split + | rule bind_apply_cong[OF refl])+ + done + +lemma set_getCTE[unfolded K_bind_def]: + "do setCTE p cte; v <- getCTE p; f v od + = do setCTE p cte; f cte od" + apply (simp add: getCTE_assert_opt bind_assoc) + apply (rule monadic_rewrite_to_eq) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known cte, rule monadic_rewrite_refl) + apply (wpsimp simp: assert_opt_def wp: gets_wp)+ + done + +lemma set_setCTE[unfolded K_bind_def]: + "do setCTE p val; setCTE p val' od = setCTE p val'" + supply if_split[split del] + apply simp + apply (rule monadic_rewrite_to_eq) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_trans, + rule_tac f="real_cte_at' p" in monadic_rewrite_add_gets) + apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets, + rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_trans, + rule_tac f="tcb_at' (p && ~~ mask tcbBlockSizeBits) and K (p && mask tcbBlockSizeBits \ dom tcb_cte_cases)" + in monadic_rewrite_add_gets) + apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets, + rule monadic_rewrite_bind_tail) + apply (rename_tac cteat tcbat) + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind_tail) + apply (simp add: setCTE_assert_modify) + apply (rule monadic_rewrite_trans, rule_tac rv=cteat in monadic_rewrite_gets_known) + apply (rule_tac rv=tcbat in monadic_rewrite_gets_known) + apply (wp setCTE_typ_at'[where T="koType TYPE(tcb)", unfolded typ_at_to_obj_at'] + setCTE_typ_at'[where T="koType TYPE(cte)", unfolded typ_at_to_obj_at'] + | simp)+ + apply (simp add: setCTE_assert_modify bind_assoc) + apply (rule monadic_rewrite_bind_tail)+ + apply (rule_tac P="c = cteat \ t = tcbat + \ (tcbat \ + (\ getF setF. tcb_cte_cases (p && mask tcbBlockSizeBits) = Some (getF, setF) + \ (\ f g tcb. setF f (setF g tcb) = setF (f o g) tcb)))" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_is_refl[OF ext]) + apply (simp add: exec_modify split: if_split) + apply (auto simp: simpler_modify_def projectKO_opt_tcb objBits_defs + del: ext + intro!: kernel_state.fold_congs ext + split: if_split)[1] + apply wp+ + apply (clarsimp simp: objBits_defs intro!: all_tcbI) + apply (auto simp: tcb_cte_cases_def split: if_split_asm) + done + +lemma setCTE_updateCapMDB: + "p \ 0 \ + setCTE p cte = do updateCap p (cteCap cte); updateMDB p (const (cteMDBNode cte)) od" + supply if_split[split del] + apply (simp add: updateCap_def updateMDB_def bind_assoc set_getCTE + cte_overwrite set_setCTE) + apply (simp add: getCTE_assert_opt setCTE_assert_modify bind_assoc) + apply (rule ext, simp add: exec_gets assert_opt_def exec_modify + split: if_split option.split) + apply (cut_tac P=\ and p=p and s=x in cte_wp_at_ctes_of) + apply (cases cte) + apply (simp add: cte_wp_at_obj_cases') + apply (auto simp: mask_out_sub_mask) + done + +lemma clearUntypedFreeIndex_simple_rewrite: + "monadic_rewrite True False + (cte_wp_at' (Not o isUntypedCap o cteCap) slot) + (clearUntypedFreeIndex slot) (return ())" + apply (simp add: clearUntypedFreeIndex_def getSlotCap_def) + apply (rule monadic_rewrite_name_pre) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (monadic_rewrite_symb_exec_l_known cte) + apply (simp split: capability.split, strengthen monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ + done + +lemma emptySlot_replymaster_rewrite[OF refl]: + "mdbn = cteMDBNode cte \ + monadic_rewrite True False + ((\_. mdbNext mdbn = 0 \ mdbPrev mdbn \ 0) + and ((\_. cteCap cte \ NullCap) + and (cte_wp_at' ((=) cte) slot + and cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot + and cte_wp_at' (\cte. isReplyCap (cteCap cte) \ capReplyMaster (cteCap cte)) + (mdbPrev mdbn) + and (\s. reply_masters_rvk_fb (ctes_of s)) + and (\s. no_0 (ctes_of s))))) + (emptySlot slot NullCap) + (do updateMDB (mdbPrev mdbn) (mdbNext_update (K 0) o mdbFirstBadged_update (K True) + o mdbRevocable_update (K True)); + setCTE slot makeObject + od)" + supply if_split[split del] + apply (rule monadic_rewrite_gen_asm)+ + apply (rule monadic_rewrite_guard_imp) + apply (rule_tac P="slot \ 0" in monadic_rewrite_gen_asm) + apply (clarsimp simp: emptySlot_def setCTE_updateCapMDB) + apply (monadic_rewrite_l clearUntypedFreeIndex_simple_rewrite, simp) + apply (monadic_rewrite_symb_exec_l_known cte) + apply (simp add: updateMDB_def Let_def bind_assoc makeObject_cte case_Null_If) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind) + apply (rule_tac P="mdbFirstBadged (cteMDBNode ctea) \ mdbRevocable (cteMDBNode ctea)" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_is_refl) + apply (case_tac ctea, rename_tac mdbnode, case_tac mdbnode) + apply simp + apply (simp add: Retype_H.postCapDeletion_def) + apply (rule monadic_rewrite_refl) + apply (solves wp | wp getCTE_wp')+ + apply (clarsimp simp: cte_wp_at_ctes_of reply_masters_rvk_fb_def) + apply (fastforce simp: isCap_simps) + done + +lemma all_prio_not_inQ_not_tcbQueued: "\ obj_at' (\a. (\d p. \ inQ d p a)) t s \ \ obj_at' (\a. \ tcbQueued a) t s" + apply (clarsimp simp: obj_at'_def inQ_def) + done + +crunches setThreadState, emptySlot, asUser + for ntfn_obj_at[wp]: "obj_at' (P::(Structures_H.notification \ bool)) ntfnptr" + (wp: obj_at_setObject2 crunch_wps + simp: crunch_simps updateObject_default_def in_monad) + +lemma st_tcb_at_is_Reply_imp_not_tcbQueued: + "\s t. \ ready_qs_runnable s; st_tcb_at' isReply t s\ \ obj_at' (\tcb. \ tcbQueued tcb) t s" + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x=t in spec) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def isReply_def) + apply (case_tac "tcbState obj"; clarsimp) + done + +lemma valid_objs_ntfn_at_tcbBoundNotification: + "ko_at' tcb t s \ valid_objs' s \ tcbBoundNotification tcb \ None + \ ntfn_at' (the (tcbBoundNotification tcb)) s" + apply (drule(1) ko_at_valid_objs', simp add: ) + apply (simp add: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def) + apply clarsimp + done + +crunch bound_tcb_at'_Q[wp]: setThreadState "\s. Q (bound_tcb_at' P t s)" + (wp: threadSet_pred_tcb_no_state crunch_wps simp: unless_def) + +lemmas emptySlot_pred_tcb_at'_Q[wp] = lift_neg_pred_tcb_at'[OF emptySlot_typ_at' emptySlot_pred_tcb_at'] + +lemma emptySlot_tcb_at'[wp]: + "\\s. Q (tcb_at' t s)\ emptySlot a b \\_ s. Q (tcb_at' t s)\" + by (simp add: tcb_at_typ_at', wp) + +lemmas cnode_caps_gsCNodes_lift + = hoare_lift_Pf2[where P="\gs s. cnode_caps_gsCNodes (f s) gs" and f=gsCNodes for f] + hoare_lift_Pf2[where P="\gs s. Q s \ cnode_caps_gsCNodes (f s) gs" and f=gsCNodes for f Q] + +lemma resolveAddressBitsFn_eq_name_slot: + "monadic_rewrite F E (\s. (isCNodeCap cap \ cte_wp_at' (\cte. cteCap cte = cap) (slot s) s) + \ valid_objs' s \ cnode_caps_gsCNodes' s) + (resolveAddressBits cap capptr bits) + (gets (resolveAddressBitsFn cap capptr bits o only_cnode_caps o ctes_of))" + apply (rule monadic_rewrite_guard_imp, rule resolveAddressBitsFn_eq) + apply auto + done + +crunch bound_tcb_at'_Q[wp]: asUser "\s. Q (bound_tcb_at' P t s)" + (simp: crunch_simps wp: threadSet_pred_tcb_no_state crunch_wps) + + +lemma asUser_tcb_at'_Q[wp]: + "\\s. Q (tcb_at' t s)\ asUser a b \\_ s. Q (tcb_at' t s)\" + by (simp add: tcb_at_typ_at', wp) + +lemma active_ntfn_check_wp: + "\\s. Q (\ntfnptr. bound_tcb_at' ((=) (Some ntfnptr)) thread s + \ \ obj_at' (Not o isActive) ntfnptr s) s \ do bound_ntfn \ getBoundNotification thread; + case bound_ntfn of None \ return False + | Some ntfnptr \ liftM EndpointDecls_H.isActive $ getNotification ntfnptr + od \Q\" + apply (rule hoare_pre) + apply (wp getNotification_wp gbn_wp' | wpc)+ + apply (auto simp: pred_tcb_at'_def obj_at'_def ) + done + +lemma tcbSchedEnqueue_tcbIPCBuffer: + "\obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\ + tcbSchedEnqueue t' + \\_. obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_when) + apply (wp threadSet_obj_at' hoare_drop_imps threadGet_wp + |simp split: if_split)+ + done + +crunch obj_at'_tcbIPCBuffer[wp]: rescheduleRequired "obj_at' (\tcb. P (tcbIPCBuffer tcb)) t" + (wp: crunch_wps tcbSchedEnqueue_tcbIPCBuffer simp: rescheduleRequired_def) + +context +notes if_cong[cong] +begin +crunch obj_at'_tcbIPCBuffer[wp]: setThreadState "obj_at' (\tcb. P (tcbIPCBuffer tcb)) t" + (wp: crunch_wps threadSet_obj_at'_really_strongest) + +crunch obj_at'_tcbIPCBuffer[wp]: handleFault "obj_at' (\tcb. P (tcbIPCBuffer tcb)) t" + (wp: crunch_wps constOnFailure_wp tcbSchedEnqueue_tcbIPCBuffer threadSet_obj_at'_really_strongest + simp: zipWithM_x_mapM) +end + +crunch obj_at'_tcbIPCBuffer[wp]: emptySlot "obj_at' (\tcb. P (tcbIPCBuffer tcb)) t" + (wp: crunch_wps) + +(* FIXME AARCH64: rename and remove *) +lemmas getCTE_known_cap = getCTE_get + +(* FIXME AARCH64: this was removed since ARM in RAB_FN.thy back in refine *) +lemma resolveAddressBitsFn_real_cte_at': + "resolveAddressBitsFn cap addr depth (only_cnode_caps (ctes_of s)) = Inr rv + \ (isCNodeCap cap \ cte_wp_at' (\cte. cteCap cte = cap) slot s) + \ cnode_caps_gsCNodes (only_cnode_caps (ctes_of s)) (gsCNodes s) + \ valid_objs' s \ valid_cap' cap s + \ real_cte_at' (fst rv) s" + using monadic_rewrite_refine_validE_R[where F=False and P''=\, + OF resolveAddressBitsFn_eq resolveAddressBits_real_cte_at'] + apply (clarsimp simp: valid_def validE_R_def validE_def simpler_gets_def) + apply (cases rv, clarsimp) + apply metis + done + +(* FIXME move *) +crunches getBoundNotification + for (no_fail) no_fail[intro!, wp, simp] + +(* FIXME AARCH64 move to Monadic_Rewrite *) +lemma monadic_rewrite_fail: + "monadic_rewrite True E \ fail g" + by (simp add: monadic_rewrite_def) + +lemma threadSet_tcb_at'[wp]: + "threadSet f t' \\s. P (tcb_at' addr s)\" + apply (wpsimp wp: threadSet_wp) + apply (erule rsubst[where P=P]) + by (clarsimp simp: obj_at'_def ps_clear_upd objBits_simps) + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for tcb''[wp]: "\s. P (tcb_at' addr s)" + (wp: crunch_wps) + +(* FIXME AARCH64 pick up commentary and cleanup from fastpath_callKernel_SysCall_corres *) +lemma fastpath_callKernel_SysReplyRecv_corres: + "monadic_rewrite True False + (invs' and ct_in_state' ((=) Running) and (\s. ksSchedulerAction s = ResumeCurrentThread) + and cnode_caps_gsCNodes' and ready_qs_runnable) + (callKernel (SyscallEvent SysReplyRecv)) (fastpaths SysReplyRecv)" + including classic_wp_pre + supply if_cong[cong] option.case_cong[cong] + supply if_split[split del] + supply user_getreg_inv[wp] (* FIXME *) + apply (rule monadic_rewrite_introduce_alternative[OF callKernel_def[simplified atomize_eq]]) + apply (rule monadic_rewrite_guard_imp) + (* lift out fastpathKernelAssertions, we'll need them to show non-failure in fastpath checks *) + apply (rule monadic_rewrite_bind_alternative_l, wpsimp) + apply (rule monadic_rewrite_stateAssert) + apply (simp add: handleEvent_def handleReply_def + handleRecv_def liftE_bindE_handle liftE_handle + bind_assoc getMessageInfo_def liftE_bind) + apply (simp add: catch_liftE_bindE unlessE_throw_catch_If + unifyFailure_catch_If catch_liftE + getMessageInfo_def alternative_bind + fastpaths_def getThreadCallerSlot_def + locateSlot_conv capability_case_Null_ReplyCap + getThreadCSpaceRoot_def + cong: if_cong) + apply (rule monadic_rewrite_bind_alternative_l, wp) + apply (rule monadic_rewrite_bind_tail) + apply monadic_rewrite_symb_exec_r + apply (rename_tac msgInfo) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r + apply (rename_tac tcbFault) + apply (rule monadic_rewrite_alternative_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply (simp add: lookupCap_def liftME_def lookupCapAndSlot_def + lookupSlotForThread_def bindE_assoc + split_def getThreadCSpaceRoot_def + locateSlot_conv liftE_bindE bindE_bind_linearise + capFaultOnFailure_def rethrowFailure_injection + injection_handler_catch bind_bindE_assoc + getThreadCallerSlot_def bind_assoc + getSlotCap_def + case_bool_If o_def + isRight_def[where x="Inr v" for v] + isRight_def[where x="Inl v" for v] + cong: if_cong) + apply monadic_rewrite_symb_exec_r + apply (rename_tac "cTableCTE") + apply (rule monadic_rewrite_transverse, + monadic_rewrite_l resolveAddressBitsFn_eq wpsimp, rule monadic_rewrite_refl) + apply monadic_rewrite_symb_exec_r + apply (rename_tac "rab_ret") + + apply (rule_tac P="isRight rab_ret" in monadic_rewrite_cases[rotated]) + apply (case_tac rab_ret, simp_all add: isRight_def)[1] + apply (rule monadic_rewrite_alternative_l) + apply clarsimp + apply (simp add: isRight_case_sum liftE_bind + isRight_def[where x="Inr v" for v]) + apply monadic_rewrite_symb_exec_r + apply (rename_tac ep_cap) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply (monadic_rewrite_symb_exec + \rule monadic_rewrite_symb_exec_r_nE[OF _ _ _ active_ntfn_check_wp, unfolded bind_assoc fun_app_def]\ + \wpsimp simp: getBoundNotification_def wp: threadGet_wp\) + apply (rename_tac ep) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply monadic_rewrite_symb_exec_r + apply (rename_tac ep) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply (rule monadic_rewrite_bind_alternative_l, wp) + apply (rule monadic_rewrite_bind_tail) + apply (rename_tac replyCTE) + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply (simp add: bind_assoc) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* getThreadVSpaceRoot, check isValidVTableRoot *) + apply (simp add: getThreadVSpaceRoot_def locateSlot_conv) + apply monadic_rewrite_symb_exec_r + apply (rename_tac "pdCapCTE") + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* getASIDPoolEntry, check valid root and VMID *) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* check thread priorities *) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r + apply (simp add: isHighestPrio_def') + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + (* check thread domains *) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply (rule monadic_rewrite_trans, + rule monadic_rewrite_pick_alternative_1) + (* now committed to fastpath *) + apply (rule_tac P="\v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (capTCBPtr (cteCap replyCTE))" + in monadic_rewrite_exists_v) + apply (rename_tac ipcBuffer) + + apply (simp add: AARCH64_H.switchToThread_def bind_assoc) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp add: + | wp (once) hoare_drop_imps )+ + + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ + + (* capReplyMaster (cteCap replyCTE) causes fail on slow path, so we can assume + it's not a reply cap *) + apply (rule_tac P="capReplyMaster (cteCap replyCTE)" in monadic_rewrite_cases) + apply simp + apply (rule monadic_rewrite_fail) + apply (simp add: bind_assoc) + apply (rule monadic_rewrite_assert) + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind_head) + apply (rule monadic_rewrite_trans) + apply (rule doReplyTransfer_simple) + apply simp + apply (((rule monadic_rewrite_weaken_flags', + (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite + | rule_tac destPrio=callerPrio + and curDom=curDom and destDom=callerDom + and thread=thread in possibleSwitchTo_rewrite)) + | rule cteDeleteOne_replycap_rewrite + | rule monadic_rewrite_bind monadic_rewrite_refl + | wp assert_inv mapM_x_wp' sts_valid_objs' + asUser_obj_at_unchanged + hoare_strengthen_post[OF _ obj_at_conj'[simplified atomize_conjL], rotated] + lookupBitmapPriority_lift + setThreadState_runnable_bitmap_inv + | simp add: setMessageInfo_def setThreadState_runnable_simp + | wp (once) hoare_vcg_disj_lift)+)[1] + apply (simp add: setMessageInfo_def) + apply (rule monadic_rewrite_bind_tail) + apply (rename_tac unblocked) + apply (monadic_rewrite_symb_exec_l_known thread) + apply (monadic_rewrite_symb_exec_l_known cptr) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_catch[OF _ monadic_rewrite_refl wp_post_tautE_E]) + apply monadic_rewrite_symb_exec_l + apply (rename_tac cTableCTE2, + rule_tac P="cteCap cTableCTE2 = cteCap cTableCTE" + in monadic_rewrite_gen_asm) + apply simp + apply (rule monadic_rewrite_trans, + rule monadic_rewrite_bindE[OF _ monadic_rewrite_refl]) + apply (rule_tac slot="\s. ksCurThread s + 2 ^ cte_level_bits * tcbCTableSlot" + in resolveAddressBitsFn_eq_name_slot) + apply wp + apply (rule monadic_rewrite_trans) + apply (rule_tac rv=rab_ret + in monadic_rewrite_gets_known[where m="Nondet_Monad.lift f" + for f, folded bindE_def]) + apply (simp add: Nondet_Monad.lift_def isRight_case_sum) + apply monadic_rewrite_symb_exec_l + apply (rename_tac ep_cap2) + apply (rule_tac P="cteCap ep_cap2 = cteCap ep_cap" in monadic_rewrite_gen_asm) + apply (simp add: cap_case_EndpointCap_NotificationCap) + apply (rule monadic_rewrite_liftE) + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind) + apply (rule deleteCallerCap_nullcap_rewrite) + apply (rule_tac ep=ep in receiveIPC_simple_rewrite) + apply (wp, simp) + apply (rule monadic_rewrite_bind_head) + + apply (rule monadic_rewrite_weaken_flags[where E=True and F=True], simp) + apply (rule setThreadState_rewrite_simple) + apply clarsimp + apply (wp getCTE_known_cap)+ + apply (rule monadic_rewrite_bind) + apply (rule_tac t="capTCBPtr (cteCap replyCTE)" + and t'=thread + in schedule_known_rewrite) + apply (rule monadic_rewrite_weaken_flags[where E=True and F=True], simp) + apply (rule monadic_rewrite_bind) + apply (rule activateThread_simple_rewrite) + apply (rule monadic_rewrite_refl) + apply wp + apply wp + apply (simp add: ct_in_state'_def, simp add: ct_in_state'_def[symmetric]) + apply ((wp setCurThread_ct_in_state[folded st_tcb_at'_def] + Arch_switchToThread_pred_tcb')+)[2] + apply (simp add: catch_liftE) + apply ((wpsimp wp: user_getreg_rv setEndpoint_obj_at_tcb' + threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj] + fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] + fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t] + | wps)+)[3] + apply (simp cong: rev_conj_cong) + apply (wpsimp wp: setThreadState_tcbContext[simplified comp_apply] + user_getreg_rv + setThreadState_no_sch_change sts_valid_objs' + sts_st_tcb_at'_cases sts_bound_tcb_at' + fastpathBestSwitchCandidate_lift[where f="setThreadState s t" for s t] + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + | wps)+ + apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s]) + apply ((wp user_getreg_rv setThreadState_no_sch_change + sts_st_tcb_at'_cases sts_bound_tcb_at' + emptySlot_obj_at'_not_queued emptySlot_obj_at_ep + emptySlot_tcbContext[simplified comp_apply] + emptySlot_cte_wp_at_cteCap + emptySlot_cnode_caps + user_getreg_inv asUser_typ_ats + asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + fastpathBestSwitchCandidate_lift[where f="emptySlot a b" for a b] + | simp del: comp_apply + | clarsimp simp: obj_at'_weakenE[OF _ TrueI] + | wps)+) + + apply (wpsimp wp: fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b])+ + apply (clarsimp cong: conj_cong) + apply ((wp user_getreg_inv asUser_typ_ats + asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + | clarsimp simp: obj_at'_weakenE[OF _ TrueI] + | solves \ + wp fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b] + \)+) + + apply (clarsimp | wp getCTE_wp' gts_imp')+ + + apply (simp add: AARCH64_H.switchToThread_def getTCB_threadGet bind_assoc) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' handleFault_obj_at'_tcbIPCBuffer getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ + + apply (simp add: bind_assoc catch_liftE + receiveIPC_def Let_def liftM_def + setThreadState_runnable_simp) + apply monadic_rewrite_symb_exec_l + apply (rule monadic_rewrite_assert) + + apply (rule_tac P="inj (case_bool thread (capTCBPtr (cteCap replyCTE)))" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) + apply (rule isolate_thread_actions_rewrite_bind + fastpath_isolate_rewrites fastpath_isolatables + bool.simps setRegister_simple setRegister_simple_modify_registers + threadGet_vcpu_isolatable[THEN thread_actions_isolatableD, simplified o_def] + threadGet_vcpu_isolatable[simplified o_def] + vcpuSwitch_isolatable[THEN thread_actions_isolatableD] vcpuSwitch_isolatable + zipWithM_setRegister_simple + zipWithM_setRegister_simple_modify_registers + thread_actions_isolatable_bind + thread_actions_isolatableD[OF setCTE_isolatable] + setCTE_isolatable + setVMRoot_isolatable[THEN thread_actions_isolatableD] setVMRoot_isolatable + doMachineOp_isolatable[THEN thread_actions_isolatableD] doMachineOp_isolatable + kernelExitAssertions_isolatable[THEN thread_actions_isolatableD] + kernelExitAssertions_isolatable + | assumption + | wp assert_inv)+ + apply (simp only: ) + apply (rule_tac P="(\s. ksSchedulerAction s = ResumeCurrentThread) + and tcb_at' thread + and (cte_wp_at' (\cte. isReplyCap (cteCap cte)) + (thread + 2 ^ cte_level_bits * tcbCallerSlot) + and (\s. \x. tcb_at' (case_bool thread (capTCBPtr (cteCap replyCTE)) x) s) + and valid_mdb')" + and F=True and E=False in monadic_rewrite_weaken_flags) + apply (rule monadic_rewrite_isolate_final2) + apply simp + apply monadic_rewrite_symb_exec_l + apply (rename_tac callerCTE) + apply (rule monadic_rewrite_assert) + apply monadic_rewrite_symb_exec_l + apply (rule monadic_rewrite_assert) + apply (simp add: emptySlot_setEndpoint_pivot) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_is_refl) + apply (clarsimp simp: isSendEP_def split: Structures_H.endpoint.split) + apply (monadic_rewrite_symb_exec_r_known callerCTE) + apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_head, + rule_tac cte=callerCTE in emptySlot_replymaster_rewrite) + apply (simp add: bind_assoc o_def) + apply (rule monadic_rewrite_refl) + apply (simp add: cte_wp_at_ctes_of pred_conj_def) + apply (clarsimp | wp getCTE_ctes_wp)+ + apply (clarsimp simp: zip_map2 zip_same_conv_map foldl_map + foldl_fun_upd + foldr_copy_register_tsrs + isRight_case_sum + cong: if_cong) + apply (clarsimp simp: fun_eq_iff if_flip + cong: if_cong) + apply (drule obj_at_ko_at', clarsimp) + apply (frule get_tcb_state_regs_ko_at') + apply (clarsimp simp: zip_map2 zip_same_conv_map foldl_map + foldl_fun_upd + foldr_copy_register_tsrs + isRight_case_sum + cong: if_cong) + apply (simp add: upto_enum_def fromEnum_def + enum_register toEnum_def + msgRegisters_unfold + cong: if_cong) + apply (clarsimp split: if_split) + apply (rule ext) + apply (simp add: badgeRegister_def msgInfoRegister_def + AARCH64.msgInfoRegister_def + AARCH64.badgeRegister_def + cong: if_cong + split: if_split) + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps + map_to_ctes_partial_overwrite) + apply (simp add: valid_mdb'_def valid_mdb_ctes_def) + apply simp + (* monadic rewriting of RHS to LHS complete *) + (* facts about the result of getASIDPoolEntry aren't used in the remainder of the + postcondition; roughly speaking the slow path and fastpath are equivalent + without this check, but the check is needed to connect to the C which uses the + fetched asid pool entry *) + apply (simp cong: if_cong bool.case_cong + | rule getCTE_wp' gts_wp' threadGet_wp + getEndpoint_wp gets_wp + user_getreg_wp + gets_the_wp gct_wp getNotification_wp + return_wp liftM_wp gbn_wp' + | (simp only: curDomain_def, wp)[1] + | wp (once) hoare_drop_imp[where f="getASIDPoolEntry a" for a])+ + + apply clarsimp + apply (subgoal_tac "ksCurThread s \ ksIdleThread s") + prefer 2 + apply (fastforce simp: ct_in_state'_def dest: ct_running_not_idle' elim: pred_tcb'_weakenE) + + apply (clarsimp simp: ct_in_state'_def pred_tcb_at') + apply (subst tcb_at_cte_at_offset, + erule obj_at'_weakenE[OF _ TrueI], + simp add: tcb_cte_cases_def cte_level_bits_def tcbSlots) + apply (clarsimp simp: valid_objs_ntfn_at_tcbBoundNotification + invs_valid_objs' if_apply_def2) + apply (rule conjI[rotated]) + apply (fastforce elim: cte_wp_at_weakenE') + apply (clarsimp simp: isRight_def) + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+) + apply (frule resolveAddressBitsFn_real_cte_at', + (clarsimp | erule cte_wp_at_weakenE')+) + apply (frule real_cte_at', clarsimp) + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp, + clarsimp simp: isCap_simps, simp add: valid_cap_simps') + apply (clarsimp simp: maskCapRights_def isCap_simps) + apply (frule_tac p="p' + 2 ^ cte_level_bits * tcbCallerSlot" for p' + in cte_wp_at_valid_objs_valid_cap', clarsimp+) + apply (clarsimp simp: valid_cap_simps') + apply (subst tcb_at_cte_at_offset, + assumption, simp add: tcb_cte_cases_def cte_level_bits_def tcbSlots cteSizeBits_def) + apply clarsimp + apply (frule_tac cte=ctec in ctes_of_valid, fastforce) + apply (clarsimp simp: isValidVTableRoot_def2 valid_cap'_def wellformed_mapdata'_def + fastpathKernelAssertions_def) + apply (clarsimp simp: inj_case_bool cte_wp_at_ctes_of + length_msgRegisters + order_less_imp_le + tcb_at_invs' invs_mdb' + split: bool.split) + apply (subst imp_disjL[symmetric], intro allI impI) + apply (clarsimp simp: inj_case_bool cte_wp_at_ctes_of + length_msgRegisters size_msgRegisters_def order_less_imp_le + tcb_at_invs' invs_mdb' + split: bool.split) + apply (subgoal_tac "fastpathBestSwitchCandidate v0a s") + prefer 2 + apply normalise_obj_at' + apply (rule_tac ttcb=tcba and ctcb=tcb in fastpathBestSwitchCandidateI) + apply (erule disjE, blast, blast) + apply simp+ + apply (clarsimp simp: obj_at_tcbs_of tcbSlots + cte_level_bits_def) + apply (frule(1) st_tcb_at_is_Reply_imp_not_tcbQueued) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (auto simp: obj_at_tcbs_of tcbSlots projectKOs + cte_level_bits_def) + done + +end + +lemma cnode_caps_gsCNodes_from_sr: + "\ valid_objs s; (s, s') \ state_relation \ \ cnode_caps_gsCNodes' s'" + apply (clarsimp simp: cnode_caps_gsCNodes_def only_cnode_caps_def + o_def ran_map_option) + apply (safe, simp_all) + apply (clarsimp elim!: ranE) + apply (frule(1) pspace_relation_cte_wp_atI[rotated]) + apply clarsimp + apply (clarsimp simp: is_cap_simps) + apply (frule(1) cte_wp_at_valid_objs_valid_cap) + apply (clarsimp simp: valid_cap_simps cap_table_at_gsCNodes_eq) + done + +end diff --git a/proof/crefine/AARCH64/Finalise_C.thy b/proof/crefine/AARCH64/Finalise_C.thy new file mode 100644 index 0000000000..4dcda90638 --- /dev/null +++ b/proof/crefine/AARCH64/Finalise_C.thy @@ -0,0 +1,2972 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Finalise_C +imports IpcCancel_C +begin + +context kernel_m +begin + +declare if_split [split del] + +definition + "option_map2 f m = option_map f \ m" + +definition ksReadyQueues_head_end_2 :: "(domain \ priority \ ready_queue) \ bool" where + "ksReadyQueues_head_end_2 qs \ + \d p. tcbQueueHead (qs (d, p)) \ None \ tcbQueueEnd (qs (d, p)) \ None" + +abbreviation "ksReadyQueues_head_end s \ ksReadyQueues_head_end_2 (ksReadyQueues s)" + +lemmas ksReadyQueues_head_end_def = ksReadyQueues_head_end_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end: + "ksReadyQueues_asrt s \ ksReadyQueues_head_end s" + by (fastforce dest: tcbQueueHead_iff_tcbQueueEnd + simp: ready_queue_relation_def ksReadyQueues_asrt_def ksReadyQueues_head_end_def) + +lemma tcbSchedEnqueue_ksReadyQueues_head_end[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: tcbQueueEmpty_def obj_at'_def ksReadyQueues_head_end_def split: if_splits) + done + +lemma ksReadyQueues_head_end_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end (s\ksSchedulerAction := ChooseNewThread\) = ksReadyQueues_head_end s" + by (simp add: ksReadyQueues_head_end_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + +lemma setThreadState_ksReadyQueues_head_end[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end\" + unfolding setThreadState_def + by (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + +definition ksReadyQueues_head_end_tcb_at'_2 :: + "(domain \ priority \ ready_queue) \ (obj_ref \ tcb) \ bool" where + "ksReadyQueues_head_end_tcb_at'_2 qs tcbs \ + \d p. (\head. tcbQueueHead (qs (d, p)) = Some head \ tcbs head \ None) + \ (\end. tcbQueueEnd (qs (d, p)) = Some end \ tcbs end \ None)" + +abbreviation "ksReadyQueues_head_end_tcb_at' s \ + ksReadyQueues_head_end_tcb_at'_2 (ksReadyQueues s) (tcbs_of' s)" + +lemmas ksReadyQueues_head_end_tcb_at'_def = ksReadyQueues_head_end_tcb_at'_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at': + "\ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ ksReadyQueues_head_end_tcb_at' s" + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def + ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI allI) + apply (case_tac "ts = []", clarsimp) + apply (fastforce dest!: heap_path_head hd_in_set + simp: opt_pred_def tcbQueueEmpty_def split: option.splits) + apply (fastforce simp: queue_end_valid_def opt_pred_def tcbQueueEmpty_def + split: option.splits) + done + +lemma tcbSchedEnqueue_ksReadyQueues_head_end_tcb_at'[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma ksReadyQueues_head_end_tcb_at'_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end_tcb_at' (s\ksSchedulerAction := ChooseNewThread\) + = ksReadyQueues_head_end_tcb_at' s" + by (simp add: ksReadyQueues_head_end_tcb_at'_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + +lemma setThreadState_ksReadyQueues_head_end_tcb_at'[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma head_end_ksReadyQueues_': + "\ (s, s') \ rf_sr; ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s; + d \ maxDomain; p \ maxPriority \ + \ head_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL + \ end_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL" + apply (frule (2) rf_sr_ctcb_queue_relation[where d=d and p=p]) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: option.splits) + apply (rename_tac "end" head end_tcb head_tcb) + apply (prop_tac "tcb_at' head s \ tcb_at' end s") + apply (fastforce intro!: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (fastforce dest: tcb_at_not_NULL) + done + +lemma tcbSchedEnqueue_cslift_spec: + "\s. \\\<^bsub>/UNIV\<^esub> \s. \d v. option_map2 tcbPriority_C (cslift s) \tcb = Some v + \ unat v \ numPriorities + \ option_map2 tcbDomain_C (cslift s) \tcb = Some d + \ unat d < Kernel_Config.numDomains + \ (end_C (index \ksReadyQueues (unat (d*0x100 + v))) \ NULL + \ option_map2 tcbPriority_C (cslift s) + (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) + \ None + \ option_map2 tcbDomain_C (cslift s) + (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) + \ None) + \ (head_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL + \ end_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL)\ + Call tcbSchedEnqueue_'proc + {s'. option_map2 tcbEPNext_C (cslift s') = option_map2 tcbEPNext_C (cslift s) + \ option_map2 tcbEPPrev_C (cslift s') = option_map2 tcbEPPrev_C (cslift s) + \ option_map2 tcbPriority_C (cslift s') = option_map2 tcbPriority_C (cslift s) + \ option_map2 tcbDomain_C (cslift s') = option_map2 tcbDomain_C (cslift s)}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: option_map2_def fun_eq_iff h_t_valid_clift + h_t_valid_field[OF h_t_valid_clift]) + apply (rule conjI) + apply (clarsimp simp: typ_heap_simps le_maxDomain_eq_less_numDomains) + apply unat_arith + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: typ_heap_simps cong: if_cong) + apply (simp split: if_split) + by (auto simp: typ_heap_simps' if_Some_helper numPriorities_def + cong: if_cong split: if_splits) + +lemma setThreadState_cslift_spec: + "\s. \\\<^bsub>/UNIV\<^esub> \s. s \\<^sub>c \tptr \ (\x. ksSchedulerAction_' (globals s) = tcb_Ptr x + \ x \ 0 \ x \ 1 + \ (\d v. option_map2 tcbPriority_C (cslift s) (tcb_Ptr x) = Some v + \ unat v \ numPriorities + \ option_map2 tcbDomain_C (cslift s) (tcb_Ptr x) = Some d + \ unat d < Kernel_Config.numDomains + \ (end_C (index \ksReadyQueues (unat (d*0x100 + v))) \ NULL + \ option_map2 tcbPriority_C (cslift s) + (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) + \ None + \ option_map2 tcbDomain_C (cslift s) + (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) + \ None)))\ + Call setThreadState_'proc + {s'. option_map2 tcbEPNext_C (cslift s') = option_map2 tcbEPNext_C (cslift s) + \ option_map2 tcbEPPrev_C (cslift s') = option_map2 tcbEPPrev_C (cslift s) + \ option_map2 tcbPriority_C (cslift s') = option_map2 tcbPriority_C (cslift s) + \ option_map2 tcbDomain_C (cslift s') = option_map2 tcbDomain_C (cslift s) + \ ksReadyQueues_' (globals s') = ksReadyQueues_' (globals s)}" + apply (rule allI, rule conseqPre) + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply (vcg exspec=tcbSchedEnqueue_cslift_spec) + apply (vcg_step+)[2] + apply vcg_step + apply (vcg exspec=isRunnable_modifies) + apply vcg + apply vcg_step + apply vcg_step + apply (vcg_step+)[1] + apply vcg + apply vcg_step+ + apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff + fun_eq_iff option_map2_def) + by (simp split: if_split) + +lemma ep_queue_relation_shift: + "(option_map2 tcbEPNext_C (cslift s') + = option_map2 tcbEPNext_C (cslift s) + \ option_map2 tcbEPPrev_C (cslift s') + = option_map2 tcbEPPrev_C (cslift s)) + \ ep_queue_relation (cslift s') ts qPrev qHead + = ep_queue_relation (cslift s) ts qPrev qHead" + apply clarsimp + apply (induct ts arbitrary: qPrev qHead) + apply simp + apply simp + apply (simp add: option_map2_def fun_eq_iff + map_option_case) + apply (drule_tac x=qHead in spec)+ + apply (clarsimp split: option.split_asm) + done + +lemma rf_sr_cscheduler_relation: + "(s, s') \ rf_sr \ cscheduler_action_relation + (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma obj_at_ko_at2': + "\ obj_at' P p s; ko_at' ko p s \ \ P ko" + apply (drule obj_at_ko_at') + apply clarsimp + apply (drule ko_at_obj_congD', simp+) + done + +lemma ctcb_relation_tcbDomain: + "ctcb_relation tcb tcb' \ ucast (tcbDomain tcb) = tcbDomain_C tcb'" + by (simp add: ctcb_relation_def) + +lemma ctcb_relation_tcbPriority: + "ctcb_relation tcb tcb' \ ucast (tcbPriority tcb) = tcbPriority_C tcb'" + by (simp add: ctcb_relation_def) + +lemma ctcb_relation_tcbDomain_maxDomain_numDomains: + "\ ctcb_relation tcb tcb'; tcbDomain tcb \ maxDomain \ + \ unat (tcbDomain_C tcb') < Kernel_Config.numDomains" + apply (subst ctcb_relation_tcbDomain[symmetric], simp) + apply (simp add: le_maxDomain_eq_less_numDomains) + done + +lemma ctcb_relation_tcbPriority_maxPriority_numPriorities: + "\ ctcb_relation tcb tcb'; tcbPriority tcb \ maxPriority \ + \ unat (tcbPriority_C tcb') < numPriorities" + apply (subst ctcb_relation_tcbPriority[symmetric], simp) + apply (simp add: maxPriority_def numPriorities_def word_le_nat_alt) + done + +lemma tcbSchedEnqueue_cslift_precond_discharge: + "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; valid_objs' s ; + ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s\ \ + (\d v. option_map2 tcbPriority_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some v + \ unat v < numPriorities + \ option_map2 tcbDomain_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some d + \ unat d < Kernel_Config.numDomains + \ (end_C (index (ksReadyQueues_' (globals s')) (unat (d*0x100 + v))) \ NULL + \ option_map2 tcbPriority_C (cslift s') + (head_C (index (ksReadyQueues_' (globals s')) (unat (d*0x100 + v)))) + \ None + \ option_map2 tcbDomain_C (cslift s') + (head_C (index (ksReadyQueues_' (globals s')) (unat (d*0x100 + v)))) + \ None) + \ (head_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL + \ end_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL))" + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps' option_map2_def) + apply (rename_tac tcb tcb') + apply (frule_tac t=x in valid_objs'_maxPriority, fastforce simp: obj_at'_def) + apply (frule_tac t=x in valid_objs'_maxDomain, fastforce simp: obj_at'_def) + apply (drule_tac P="\tcb. tcbPriority tcb \ maxPriority" in obj_at_ko_at2', simp) + apply (drule_tac P="\tcb. tcbDomain tcb \ maxDomain" in obj_at_ko_at2', simp) + apply (simp add: ctcb_relation_tcbDomain_maxDomain_numDomains + ctcb_relation_tcbPriority_maxPriority_numPriorities) + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_ctcb_queue_relation) + apply (simp add: maxDom_to_H maxPrio_to_H)+ + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in head_end_ksReadyQueues_', fastforce+) + apply (simp add: cready_queues_index_to_C_def2 numPriorities_def le_maxDomain_eq_less_numDomains) + apply (clarsimp simp: ctcb_relation_def) + apply (frule arg_cong[where f=unat], subst(asm) unat_ucast_up_simp, simp) + apply (frule (3) head_end_ksReadyQueues_', fastforce+) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (prop_tac "\ tcbQueueEmpty ((ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)))") + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (clarsimp simp: tcbQueueEmpty_def) + apply (rename_tac head "end" head_tcb end_tcb) + apply (prop_tac "tcb_at' head s") + apply (fastforce intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (frule_tac thread=head in obj_at_cslift_tcb) + apply fastforce + apply (clarsimp dest: obj_at_cslift_tcb simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + done + +lemma cancel_all_ccorres_helper: + "ccorres dc xfdc + (\s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s + \ (\t\set ts. tcb_at' t s \ t \ 0) + \ sch_act_wf (ksSchedulerAction s) s) + {s'. \p. ep_queue_relation (cslift s') ts + p (thread_' s')} hs + (mapM_x (\t. do + y \ setThreadState Restart t; + tcbSchedEnqueue t + od) ts) + (WHILE \thread \ tcb_Ptr 0 DO + (CALL setThreadState(\thread, scast ThreadState_Restart));; + (CALL tcbSchedEnqueue(\thread));; + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \thread\ + (\thread :== h_val (hrs_mem \t_hrs) (Ptr &(\thread\[''tcbEPNext_C'']) :: tcb_C ptr ptr)) + OD)" + unfolding whileAnno_def +proof (induct ts) + case Nil + show ?case + apply (simp del: Collect_const) + apply (rule iffD1 [OF ccorres_expand_while_iff]) + apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) + apply ceqv + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) + apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) + apply simp + done +next + case (Cons thread threads) + show ?case + apply (rule iffD1 [OF ccorres_expand_while_iff]) + apply (simp del: Collect_const + add: mapM_x_Cons) + apply (rule ccorres_guard_imp2) + apply (rule_tac xf'=thread_' in ccorres_abstract) + apply ceqv + apply (rule_tac P="rv' = tcb_ptr_to_ctcb_ptr thread" + in ccorres_gen_asm2) + apply (rule_tac P="tcb_ptr_to_ctcb_ptr thread \ Ptr 0" + in ccorres_gen_asm) + apply (clarsimp simp add: Collect_True ccorres_cond_iffs + simp del: Collect_const) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow[where F=UNIV]) + apply (intro ccorres_rhs_assoc) + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (rule ccorres_add_return2) + apply (ctac(no_vcg) add: tcbSchedEnqueue_ccorres) + apply (rule_tac P="tcb_at' thread" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (drule obj_at_ko_at', clarsimp) + apply (erule cmap_relationE1 [OF cmap_relation_tcb]) + apply (erule ko_at_projectKO_opt) + apply (fastforce intro: typ_heap_simps) + apply (wp sts_valid_objs' | simp)+ + apply (rule ceqv_refl) + apply (rule "Cons.hyps") + apply (wp sts_valid_objs' sts_sch_act sch_act_wf_lift hoare_vcg_const_Ball_lift + sts_st_tcb' | simp)+ + + apply (vcg exspec=setThreadState_cslift_spec exspec=tcbSchedEnqueue_cslift_spec) + apply (clarsimp simp: tcb_at_not_NULL Collect_const_mem valid_tcb_state'_def + ThreadState_defs mask_def valid_objs'_maxDomain valid_objs'_maxPriority) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (rule conjI) + apply clarsimp + apply (frule rf_sr_cscheduler_relation) + apply (clarsimp simp: cscheduler_action_relation_def + st_tcb_at'_def + split: scheduler_action.split_asm) + apply (rename_tac word) + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; clarsimp?) + apply simp + apply clarsimp + apply (rule conjI) + apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) + apply clarsimp + apply clarsimp+ + apply (subst ep_queue_relation_shift, fastforce) + apply (drule_tac x="tcb_ptr_to_ctcb_ptr thread" + in fun_cong)+ + apply (clarsimp simp add: option_map2_def typ_heap_simps) + apply fastforce + done +qed + +crunches setEndpoint, setNotification + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + and ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + (simp: updateObject_default_def) + +lemma cancelAllIPC_ccorres: + "ccorres dc xfdc + invs' (UNIV \ {s. epptr_' s = Ptr epptr}) [] + (cancelAllIPC epptr) (Call cancelAllIPC_'proc)" + apply (cinit lift: epptr_') + apply (rule ccorres_stateAssert) + apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case ep of IdleEP \ scast EPState_Idle + | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (erule cmap_relationE1 [OF cmap_relation_ep]) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp add: typ_heap_simps) + apply (simp add: cendpoint_relation_def Let_def + split: endpoint.split_asm) + apply ceqv + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) + apply wpc + apply (rename_tac list) + apply (simp add: endpoint_state_defs + Collect_False Collect_True + ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply csymbr + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) + apply (rule_tac r'=dc and xf'=xfdc + in ccorres_split_nothrow) + apply (rule_tac P="ko_at' (RecvEP list) epptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule cmap_relationE1 [OF cmap_relation_ep]) + apply assumption + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps setEndpoint_def) + apply (rule rev_bexI) + apply (rule setObject_eq; simp add: objBits_simps')[1] + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def carch_globals_def + cmachine_state_relation_def + packed_heap_update_collapse_hrs) + apply (clarsimp simp: cpspace_relation_def + update_ep_map_tos typ_heap_simps') + apply (erule(2) cpspace_relation_ep_update_ep) + subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) + subgoal by simp + apply (rule ceqv_refl) + apply (simp only: ccorres_seq_skip) + apply (rule ccorres_split_nothrow_novcg) + apply (rule cancel_all_ccorres_helper) + apply ceqv + apply (ctac add: rescheduleRequired_ccorres) + apply (wp weak_sch_act_wf_lift_linear + | simp)+ + apply (rule mapM_x_wp', wp)+ + apply (wp sts_st_tcb') + apply (clarsimp split: if_split) + apply (rule mapM_x_wp', wp sts_valid_objs')+ + apply (clarsimp simp: valid_tcb_state'_def) + apply (simp add: guard_is_UNIV_def) + apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift + weak_sch_act_wf_lift_linear) + apply vcg + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (rename_tac list) + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply csymbr + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) + apply (rule_tac r'=dc and xf'=xfdc + in ccorres_split_nothrow) + apply (rule_tac P="ko_at' (SendEP list) epptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule cmap_relationE1 [OF cmap_relation_ep]) + apply assumption + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps setEndpoint_def) + apply (rule rev_bexI) + apply (rule setObject_eq, simp_all add: objBits_simps')[1] + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def carch_globals_def + cmachine_state_relation_def + packed_heap_update_collapse_hrs) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_ep_map_tos) + apply (erule(2) cpspace_relation_ep_update_ep) + subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) + subgoal by simp + apply (rule ceqv_refl) + apply (simp only: ccorres_seq_skip) + apply (rule ccorres_split_nothrow_novcg) + apply (rule cancel_all_ccorres_helper) + apply ceqv + apply (ctac add: rescheduleRequired_ccorres) + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' + sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ + apply (simp add: guard_is_UNIV_def) + apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift + weak_sch_act_wf_lift_linear) + apply vcg + apply (clarsimp simp: valid_ep'_def invs_valid_objs') + apply (rule cmap_relationE1[OF cmap_relation_ep], assumption) + apply (erule ko_at_projectKO_opt) + apply (frule obj_at_valid_objs', clarsimp+) + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + subgoal by (auto simp: typ_heap_simps cendpoint_relation_def + Let_def tcb_queue_relation'_def + invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority + intro!: obj_at_conj') + apply (clarsimp simp: guard_is_UNIV_def) + apply (wp getEndpoint_wp) + apply clarsimp + done + +lemma cancelAllSignals_ccorres: + "ccorres dc xfdc + invs' (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] + (cancelAllSignals ntfnptr) (Call cancelAllSignals_'proc)" + apply (cinit lift: ntfnPtr_') + apply (rule ccorres_stateAssert) + apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle + | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp add: typ_heap_simps) + apply (simp add: cnotification_relation_def Let_def + split: ntfn.split_asm) + apply ceqv + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) + apply wpc + apply (simp add: notification_state_defs ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (simp add: notification_state_defs ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (rename_tac list) + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply csymbr + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps setNotification_def) + apply (rule rev_bexI) + apply (rule setObject_eq, simp_all add: objBits_simps')[1] + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def carch_globals_def + cmachine_state_relation_def + packed_heap_update_collapse_hrs) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_ntfn_map_tos) + apply (erule(2) cpspace_relation_ntfn_update_ntfn) + subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) + subgoal by simp + apply (rule ceqv_refl) + apply (simp only: ccorres_seq_skip) + apply (rule ccorres_split_nothrow_novcg) + apply (rule cancel_all_ccorres_helper) + apply ceqv + apply (ctac add: rescheduleRequired_ccorres) + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' + sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ + apply (simp add: guard_is_UNIV_def) + apply (wp set_ntfn_valid_objs' hoare_vcg_const_Ball_lift + weak_sch_act_wf_lift_linear) + apply vcg + apply clarsimp + apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption) + apply (erule ko_at_projectKO_opt) + apply (frule obj_at_valid_objs', clarsimp+) + apply (clarsimp simp add: valid_obj'_def valid_ntfn'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + subgoal by (auto simp: typ_heap_simps cnotification_relation_def + Let_def tcb_queue_relation'_def + invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority + intro!: obj_at_conj') + apply (clarsimp simp: guard_is_UNIV_def) + apply (wp getNotification_wp) + apply clarsimp + done + +lemma tcb_queue_concat: + "tcb_queue_relation getNext getPrev mp (xs @ z # ys) qprev qhead + \ tcb_queue_relation getNext getPrev mp (z # ys) + (tcb_ptr_to_ctcb_ptr (last ((ctcb_ptr_to_tcb_ptr qprev) # xs))) (tcb_ptr_to_ctcb_ptr z)" + apply (induct xs arbitrary: qprev qhead) + apply clarsimp + apply clarsimp + apply (elim meta_allE, drule(1) meta_mp) + apply (clarsimp cong: if_cong) + done + +lemma tcb_fields_ineq_helper: + "\ tcb_at' (ctcb_ptr_to_tcb_ptr x) s; tcb_at' (ctcb_ptr_to_tcb_ptr y) s \ \ + &(x\[''tcbSchedPrev_C'']) \ &(y\[''tcbSchedNext_C''])" + apply (clarsimp dest!: tcb_aligned'[OF obj_at'_weakenE, OF _ TrueI] + ctcb_ptr_to_tcb_ptr_aligned) + apply (clarsimp simp: field_lvalue_def ctcb_size_bits_def) + apply (subgoal_tac "is_aligned (ptr_val y - ptr_val x) 10" (*ctcb_size_bits*)) + apply (drule sym, fastforce simp: is_aligned_def dvd_def) + apply (erule(1) aligned_sub_aligned) + apply (simp add: word_bits_conv) + done + +end + +primrec + tcb_queue_relation2 :: "(tcb_C \ tcb_C ptr) \ (tcb_C \ tcb_C ptr) + \ (tcb_C ptr \ tcb_C) \ tcb_C ptr list + \ tcb_C ptr \ tcb_C ptr \ bool" +where + "tcb_queue_relation2 getNext getPrev hp [] before after = True" +| "tcb_queue_relation2 getNext getPrev hp (x # xs) before after = + (\tcb. hp x = Some tcb \ getPrev tcb = before + \ getNext tcb = hd (xs @ [after]) + \ tcb_queue_relation2 getNext getPrev hp xs x after)" + +lemma use_tcb_queue_relation2: + "tcb_queue_relation getNext getPrev hp xs qprev qhead + = (tcb_queue_relation2 getNext getPrev hp + (map tcb_ptr_to_ctcb_ptr xs) qprev (tcb_Ptr 0) + \ qhead = (hd (map tcb_ptr_to_ctcb_ptr xs @ [tcb_Ptr 0])))" + apply (induct xs arbitrary: qhead qprev) + apply simp + apply (simp add: conj_comms cong: conj_cong) + done + +lemma tcb_queue_relation2_concat: + "tcb_queue_relation2 getNext getPrev hp + (xs @ ys) before after + = (tcb_queue_relation2 getNext getPrev hp + xs before (hd (ys @ [after])) + \ tcb_queue_relation2 getNext getPrev hp + ys (last (before # xs)) after)" + apply (induct xs arbitrary: before) + apply simp + apply (rename_tac x xs before) + apply (simp split del: if_split) + apply (case_tac "hp x") + apply simp + apply simp + done + +lemma tcb_queue_relation2_cong: + "\queue = queue'; before = before'; after = after'; + \p. p \ set queue' \ mp p = mp' p\ + \ tcb_queue_relation2 getNext getPrev mp queue before after = + tcb_queue_relation2 getNext getPrev mp' queue' before' after'" + using [[hypsubst_thin = true]] + apply clarsimp + apply (induct queue' arbitrary: before') + apply simp+ + done + +context kernel_m begin + +lemma setThreadState_ccorres_simple: + "ccorres dc xfdc (\s. tcb_at' thread s \ \ runnable' st \ sch_act_simple s) + ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (setThreadState st thread) (Call setThreadState_'proc)" + apply (cinit lift: tptr_' cong add: call_ignore_cong) + apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) + apply (ctac add: scheduleTCB_ccorres_valid_queues'_simple) + apply (wp threadSet_tcbState_st_tcb_at') + apply (fastforce simp: weak_sch_act_wf_def) + done + +lemma updateRestartPC_ccorres: + "ccorres dc xfdc (tcb_at' thread) \\tcb = tcb_ptr_to_ctcb_ptr thread \ hs + (updateRestartPC thread) (Call updateRestartPC_'proc)" + apply (cinit lift: tcb_') + apply (subst asUser_bind_distrib; (wp add: empty_fail_getRegister)?) + apply (ctac (no_vcg) add: getRegister_ccorres) + apply (ctac (no_vcg) add: setRegister_ccorres) + apply wpsimp+ + apply (simp add: AARCH64_H.faultRegister_def AARCH64_H.nextInstructionRegister_def + AARCH64.faultRegister_def AARCH64.nextInstructionRegister_def) + done + +crunches updateRestartPC + for sch_act_simple[wp]: sch_act_simple + and valid_objs'[wp]: valid_objs' + and tcb_at'[wp]: "tcb_at' p" + +lemma suspend_ccorres: + assumes cteDeleteOne_ccorres: + "\w slot. ccorres dc xfdc + (invs' and cte_wp_at' (\ct. w = -1 \ cteCap ct = NullCap + \ (\cap'. ccap_relation (cteCap ct) cap' \ cap_get_tag cap' = w)) slot) + ({s. gs_get_assn cteDeleteOne_'proc (ghost'state_' (globals s)) = w} + \ {s. slot_' s = Ptr slot}) [] + (cteDeleteOne slot) (Call cteDeleteOne_'proc)" + shows + "ccorres dc xfdc + (invs' and sch_act_simple and tcb_at' thread and (\s. thread \ ksIdleThread s)) + (UNIV \ {s. target_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (suspend thread) (Call suspend_'proc)" + apply (cinit lift: target_') + apply (ctac(no_vcg) add: cancelIPC_ccorres1 [OF cteDeleteOne_ccorres]) + apply (rule getThreadState_ccorres_foo) + apply (rename_tac threadState) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="thread_state_to_tsType threadState" + and R="st_tcb_at' ((=) threadState) thread" + and R'=UNIV + in + ccorres_symb_exec_r_known_rv) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: st_tcb_at'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType) + apply ceqv + supply Collect_const[simp del] + apply (rule ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\ and xf=xfdc]) + apply clarsimp + apply (rule iffI) + apply simp + apply (erule thread_state_to_tsType.elims; simp add: ThreadState_defs) + apply (ctac (no_vcg) add: updateRestartPC_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac(no_vcg) add: setThreadState_ccorres_simple) + apply (ctac add: tcbSchedDequeue_ccorres) + apply (rule_tac Q="\_. valid_objs' and tcb_at' thread and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) + apply clarsimp + apply (wp sts_valid_objs')[1] + apply clarsimp + apply (wpsimp simp: valid_tcb_state'_def) + apply clarsimp + apply (rule conseqPre, vcg exspec=updateRestartPC_modifies) + apply (rule subset_refl) + apply clarsimp + apply (rule conseqPre, vcg) + apply (rule subset_refl) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_vcg_conj_lift) + apply (rule cancelIPC_sch_act_simple) + apply (rule cancelIPC_tcb_at'[where t=thread]) + apply (rule delete_one_conc_fr.cancelIPC_invs) + apply (fastforce simp: invs_valid_objs' valid_tcb_state'_def) + apply (auto simp: ThreadState_defs) + done + +lemma cap_to_H_NTFNCap_tag: + "\ cap_to_H cap = NotificationCap word1 word2 a b; + cap_lift C_cap = Some cap \ \ + cap_get_tag C_cap = scast cap_notification_cap" + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_split_asm) + by (simp_all add: Let_def cap_lift_def split: if_splits) + +lemmas ccorres_pre_getBoundNotification = ccorres_pre_threadGet [where f=tcbBoundNotification, folded getBoundNotification_def] + +lemma option_to_ptr_not_NULL: + "option_to_ptr x \ NULL \ x \ None" + by (auto simp: option_to_ptr_def option_to_0_def split: option.splits) + +lemma doUnbindNotification_ccorres: + "ccorres dc xfdc (invs' and tcb_at' tcb) + (UNIV \ {s. ntfnPtr_' s = ntfn_Ptr ntfnptr} \ {s. tcbptr_' s = tcb_ptr_to_ctcb_ptr tcb}) [] + (do ntfn \ getNotification ntfnptr; doUnbindNotification ntfnptr ntfn tcb od) + (Call doUnbindNotification_'proc)" + apply (cinit' lift: ntfnPtr_' tcbptr_') + apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) + apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: option_to_ptr_def option_to_0_def) + apply (frule cmap_relation_ntfn) + apply (erule (1) cmap_relation_ko_atE) + apply (rule conjI) + apply (erule h_t_valid_clift) + apply (clarsimp simp: setNotification_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def init_def + typ_heap_simps' + cpspace_relation_def update_ntfn_map_tos) + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=2] NtfnState_Waiting_def) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) + subgoal by (simp add: carch_state_relation_def) + subgoal by (simp add: cmachine_state_relation_def) + subgoal by (simp add: h_t_valid_clift_Some_iff) + subgoal by (simp add: objBits_simps') + subgoal by (simp add: objBits_simps) + apply assumption + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (simp add: setBoundNotification_def) + apply (rule_tac P'="\" and P="\" + in threadSet_ccorres_lemma3) + apply vcg + apply simp + apply (erule(1) rf_sr_tcb_update_no_queue2) + apply (simp add: typ_heap_simps')+ + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: ctcb_relation_def option_to_ptr_def option_to_0_def) + apply (simp add: invs'_def valid_state'_def) + apply (wp get_ntfn_ko' | simp add: guard_is_UNIV_def)+ + done + +lemma doUnbindNotification_ccorres': + "ccorres dc xfdc (invs' and tcb_at' tcb and ko_at' ntfn ntfnptr) + (UNIV \ {s. ntfnPtr_' s = ntfn_Ptr ntfnptr} \ {s. tcbptr_' s = tcb_ptr_to_ctcb_ptr tcb}) [] + (doUnbindNotification ntfnptr ntfn tcb) + (Call doUnbindNotification_'proc)" + apply (cinit' lift: ntfnPtr_' tcbptr_') + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) + apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: option_to_ptr_def option_to_0_def) + apply (frule cmap_relation_ntfn) + apply (erule (1) cmap_relation_ko_atE) + apply (rule conjI) + apply (erule h_t_valid_clift) + apply (clarsimp simp: setNotification_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def init_def + typ_heap_simps' + cpspace_relation_def update_ntfn_map_tos) + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=2] NtfnState_Waiting_def) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems + by (case_tac "ntfnObj ntfn", (simp add: option_to_ctcb_ptr_def)+) + subgoal by (simp add: carch_state_relation_def) + subgoal by (simp add: cmachine_state_relation_def) + subgoal by (simp add: h_t_valid_clift_Some_iff) + subgoal by (simp add: objBits_simps') + subgoal by (simp add: objBits_simps) + apply assumption + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (simp add: setBoundNotification_def) + apply (rule_tac P'="\" and P="\" + in threadSet_ccorres_lemma3) + apply vcg + apply simp + apply (erule(1) rf_sr_tcb_update_no_queue2) + apply (simp add: typ_heap_simps')+ + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: ctcb_relation_def option_to_ptr_def option_to_0_def) + apply (simp add: invs'_def valid_state'_def) + apply (wp get_ntfn_ko' | simp add: guard_is_UNIV_def)+ + done + + +lemma unbindNotification_ccorres: + "ccorres dc xfdc + (invs') (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr tcb}) [] + (unbindNotification tcb) (Call unbindNotification_'proc)" + supply option.case_cong[cong] + apply (cinit lift: tcb_') + apply (rule_tac xf'=ntfnPtr_' + and r'="\rv rv'. rv' = option_to_ptr rv \ rv \ Some 0" + in ccorres_split_nothrow) + apply (simp add: getBoundNotification_def) + apply (rule_tac P="no_0_obj' and valid_objs'" in threadGet_vcg_corres_P) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (drule(1) ko_at_valid_objs', simp) + apply (clarsimp simp: option_to_ptr_def option_to_0_def valid_obj'_def valid_tcb'_def) + apply ceqv + apply simp + apply wpc + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (rule ccorres_cond_true) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) + apply (wp gbn_wp') + apply vcg + apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def + obj_at'_weakenE[OF _ TrueI] + split: option.splits) + apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) + done + + +lemma unbindMaybeNotification_ccorres: + "ccorres dc xfdc (invs') (UNIV \ {s. ntfnPtr_' s = ntfn_Ptr ntfnptr}) [] + (unbindMaybeNotification ntfnptr) (Call unbindMaybeNotification_'proc)" + supply option.case_cong[cong] + apply (cinit lift: ntfnPtr_') + apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) + apply (rule_tac xf'=boundTCB_' + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (erule cmap_relationE1[OF cmap_relation_ntfn]) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def) + apply ceqv + apply wpc + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (rule ccorres_cond_true) + apply (rule ccorres_call[where xf'=xfdc]) + apply (rule doUnbindNotification_ccorres'[simplified]) + apply simp + apply simp + apply simp + apply (clarsimp simp add: guard_is_UNIV_def option_to_ctcb_ptr_def ) + apply (wp getNotification_wp) + apply (clarsimp ) + apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) + by (auto simp: valid_ntfn'_def obj_at'_def + objBitsKO_def is_aligned_def option_to_ctcb_ptr_def tcb_at_not_NULL + split: ntfn.splits) + +(* TODO: move *) +(* On ARM machines, irqInvalid is 2^16-1 (i.e. -1 in 16-bit), which is greater than maxIRQ *) +definition + irq_opt_relation_def: + "irq_opt_relation (airq :: (irq_len word) option) (cirq :: machine_word) \ + case airq of + Some irq \ (cirq = ucast irq \ + irq \ scast irqInvalid \ + ucast irq \ UCAST(32 signed \ irq_len) Kernel_C.maxIRQ) + | None \ cirq = ucast irqInvalid" + +lemma irq_opt_relation_Some_ucast: + "\ x && mask (LENGTH(irq_len)) = x; ucast x \ irqInvalid; + ucast x \ (scast Kernel_C.maxIRQ :: irq_len word) \ x \ (scast Kernel_C.maxIRQ :: machine_word) \ + \ irq_opt_relation (Some (ucast x)) x" + unfolding irq_opt_relation_def + apply (simp add: and_mask_eq_iff_le_mask) + apply (simp add: mask_def) + apply (subst ucast_ucast_len) + apply (erule order_le_less_trans, simp) + apply (clarsimp simp: irqInvalid_def Kernel_C.maxIRQ_def mask_def unat_arith_simps unat_ucast) + done + +lemma finaliseCap_True_cases_ccorres: + "\final. isEndpointCap cap \ isNotificationCap cap + \ isReplyCap cap \ isDomainCap cap \ cap = NullCap \ + ccorres (\rv rv'. ccap_relation (fst rv) (finaliseCap_ret_C.remainder_C rv') + \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) + ret__struct_finaliseCap_ret_C_' + (invs') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. final_' s = from_bool final} + \ {s. exposed_' s = from_bool flag}) [] + (finaliseCap cap final flag) (Call finaliseCap_'proc)" + apply (subgoal_tac "\ isArchCap \ cap") + prefer 2 + apply (clarsimp simp: isCap_simps) + apply (cinit lift: cap_' final_' exposed_' cong: call_ignore_cong) + apply csymbr + apply (simp add: cap_get_tag_isCap Collect_False del: Collect_const) + apply (fold case_bool_If) + apply simp + apply csymbr + apply wpc + apply (simp add: cap_get_tag_isCap ccorres_cond_univ_iff Let_def) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_split_nothrow_novcg) + apply (simp add: when_def) + apply (rule ccorres_cond2) + apply (clarsimp simp: Collect_const_mem from_bool_0) + apply csymbr + apply (rule ccorres_call[where xf'=xfdc], rule cancelAllIPC_ccorres) + apply simp + apply simp + apply simp + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (simp add: return_def, vcg) + apply (rule ceqv_refl) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def ccap_relation_NullCap_iff + irq_opt_relation_def) + apply vcg + apply wp + apply (simp add: guard_is_UNIV_def) + apply wpc + apply (simp add: cap_get_tag_isCap Let_def + ccorres_cond_empty_iff ccorres_cond_univ_iff) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_split_nothrow_novcg) + apply (simp add: when_def) + apply (rule ccorres_cond2) + apply (clarsimp simp: Collect_const_mem from_bool_0) + apply (subgoal_tac "cap_get_tag capa = scast cap_notification_cap") prefer 2 + apply (clarsimp simp: ccap_relation_def isNotificationCap_def) + apply (case_tac cap, simp_all)[1] + apply (clarsimp simp: option_map_def split: option.splits) + apply (drule (2) cap_to_H_NTFNCap_tag[OF sym]) + apply (rule ccorres_rhs_assoc) + apply (rule ccorres_rhs_assoc) + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: unbindMaybeNotification_ccorres) + apply (rule ccorres_call[where xf'=xfdc], rule cancelAllSignals_ccorres) + apply simp + apply simp + apply simp + apply (wp | wpc | simp add: guard_is_UNIV_def)+ + apply (rule ccorres_return_Skip') + apply (rule ceqv_refl) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def ccap_relation_NullCap_iff + irq_opt_relation_def) + apply vcg + apply wp + apply (simp add: guard_is_UNIV_def) + apply wpc + apply (simp add: cap_get_tag_isCap Let_def + ccorres_cond_empty_iff ccorres_cond_univ_iff) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def ccap_relation_NullCap_iff + irq_opt_relation_def) + apply vcg + apply wpc + apply (simp add: cap_get_tag_isCap Let_def + ccorres_cond_empty_iff ccorres_cond_univ_iff) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def ccap_relation_NullCap_iff) + apply (clarsimp simp add: irq_opt_relation_def) + apply vcg + \ \NullCap case by exhaustion\ + apply (simp add: cap_get_tag_isCap Let_def + ccorres_cond_empty_iff ccorres_cond_univ_iff) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: return_def ccap_relation_NullCap_iff + irq_opt_relation_def) + apply vcg + apply (clarsimp simp: Collect_const_mem cap_get_tag_isCap) + apply (rule TrueI conjI impI TrueI)+ + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def isNotificationCap_def + isEndpointCap_def valid_obj'_def valid_ntfn'_def + dest!: obj_at_valid_objs') + apply clarsimp + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) + apply clarsimp + done + +lemma finaliseCap_True_standin_ccorres: + "\final. + ccorres (\rv rv'. ccap_relation (fst rv) (finaliseCap_ret_C.remainder_C rv') + \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) + ret__struct_finaliseCap_ret_C_' + (invs') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. final_' s = from_bool final} + \ {s. exposed_' s = from_bool True \ \dave has name wrong\}) [] + (finaliseCapTrue_standin cap final) (Call finaliseCap_'proc)" + unfolding finaliseCapTrue_standin_simple_def + apply (case_tac "P :: bool" for P) + apply (erule finaliseCap_True_cases_ccorres) + apply (simp add: finaliseCap_def ccorres_fail') + done + +lemma invalidateTLBByASID_ccorres: + "ccorres dc xfdc + (valid_arch_state' and (\_. asid_wf asid)) + (\\asid___unsigned_long = asid\) [] + (invalidateTLBByASID asid) + (Call invalidateTLBByASID_'proc)" + apply (cinit lift: asid___unsigned_long_') + apply (ctac(no_vcg) add: findMapForASID_loadVMID_ccorres) + apply csymbr + apply (clarsimp simp: when_def) + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) + apply (clarsimp simp: casid_map_relation_def to_bool_def asid_map_asid_map_vspace_lift_def + split: option.split_asm asid_map_CL.split_asm if_split) + apply (rule ccorres_return_void_C) + apply csymbr + apply (ctac add: invalidateTranslationASID_ccorres) + apply vcg + apply wpsimp + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def + asid_map_asid_map_vspace_lift_def + split: option.split if_split) + done + +lemma offset_xf_for_sequence: + "\s f. offset___unsigned_long_' (offset___unsigned_long_'_update f s) = f (offset___unsigned_long_' s) + \ globals (offset___unsigned_long_'_update f s) = globals s" + by simp + +lemma invs'_invs_no_cicd': + "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" + by (simp add: invs'_invs_no_cicd) + +(* FIXME AARCH64: vcpu_vppi_masked_C_Ptr is too generic, applies to most bit fields *) + +crunches invalidateTLBByASID, invalidateASIDEntry + for valid_arch_state'[wp]: valid_arch_state' + and cur_tcb'[wp]: cur_tcb' + (wp: crunch_wps) + +lemma updateASIDPoolEntry_dom_ap_inv: + "(\x. f x \ None) \ + updateASIDPoolEntry f asid \\s. obj_at' (\ap. dom (inv asidpool.ASIDPool ap) = + dom (inv asidpool.ASIDPool pool)) p s\" + unfolding updateASIDPoolEntry_def getPoolPtr_def obj_at'_real_def + supply not_None_eq[simp del] + apply wpsimp + apply (rule setObject_ko_wp_at) + apply simp + apply (simp add: objBits_simps) + apply (simp add: bit_simps) + apply (wp getASID_wp haskell_assert_wp)+ + apply (fastforce simp: obj_at'_real_def ko_wp_at'_def objBits_simps split: if_split) + done + +crunches invalidateASIDEntry, invalidateTLBByASID + for dom_ap_inv[wp]: "\s. obj_at' (\ap. dom (inv asidpool.ASIDPool ap) = + dom (inv asidpool.ASIDPool pool)) p s" + +lemma deleteASIDPool_ccorres: + "ccorres dc xfdc (invs' and (\_. asid_wf base \ pool \ 0)) + ({s. asid_base_' s = base} \ {s. pool_' s = Ptr pool}) [] + (deleteASIDPool base pool) (Call deleteASIDPool_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) + apply (rule ccorres_assert) + apply (clarsimp simp: liftM_def when_def) + apply (rule ccorres_Guard)+ + apply (rule ccorres_pre_gets_armKSASIDTable_ksArchState) + apply (rule_tac R="\s. rv = armKSASIDTable (ksArchState s)" in ccorres_cond2) + apply clarsimp + apply (subst rf_sr_armKSASIDTable, assumption) + apply (simp add: asid_high_bits_word_bits asid_wf_def mask_def) + apply (rule shiftr_less_t2n) + apply (simp add: asid_low_bits_def asid_high_bits_def asid_bits_def) + apply (subst ucast_asid_high_bits_is_shift, assumption) + apply (simp add: option_to_ptr_def option_to_0_def split: option.split) + apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ + apply (rule ccorres_pre_getObject_asidpool) + apply (rename_tac poolKO) + apply (simp only: mapM_discarded) + apply (rule ccorres_rhs_assoc2, + rule ccorres_split_nothrow_novcg) + apply (rule_tac F="\n. obj_at' (\ap. dom (inv ASIDPool ap) = dom (inv ASIDPool poolKO)) pool + and valid_arch_state'" + in ccorres_mapM_x_while_gen[OF _ _ _ _ _ offset_xf_for_sequence, + where j=1, simplified]) + apply (intro allI impI) + apply (rule ccorres_guard_imp2) + apply (rule_tac xf'="offset___unsigned_long_'" in ccorres_abstract, ceqv) + apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2) + apply (rule ccorres_rhs_assoc) + apply (rule ccorres_Guard_Seq[where F=ArrayBounds]) + apply (rule ccorres_move_c_guard_ap) + apply (rule_tac ccorres_symb_exec_r2) + apply csymbr + apply (rule ccorres_guard_imp) + apply (rule_tac R="\_. True" and + R'="\(inv asidpool.ASIDPool poolKO (of_nat n) \ None) \ + (asid_map_get_tag \asid_map = scast asid_map_asid_map_vspace)\ \ + \ret__unsigned_longlong = asid_map_get_tag \asid_map\" + in ccorres_cond_strong) + apply (fastforce simp: upto_enum_word simp del: upt.simps) + apply (ctac (no_vcg) add: invalidateTLBByASID_ccorres) + apply (ctac (no_vcg) add: invalidateASIDEntry_ccorres) + apply wpsimp + apply (rule ccorres_return_Skip) + apply clarsimp + apply assumption + apply (clarsimp split: if_split simp: upto_enum_word simp del: upt.simps) + apply (drule CollectD, assumption) + apply clarsimp + apply vcg + apply vcg + apply (clarsimp simp: meq_def) + apply (clarsimp simp: upto_enum_word simp del: upt.simps simp flip: mask_2pm1) + apply (rule conjI) + apply (clarsimp simp: asid_wf_def) + apply (prop_tac "(of_nat n :: machine_word) \ mask asid_low_bits") + apply (clarsimp simp: less_Suc_eq_le word_le_nat_alt mask_def asid_low_bits_def + unat_of_nat_eq) + apply (subst word_plus_and_or_coroll, word_eqI_solve) + apply (simp add: le_mask_high_bits_len asid_bits_def asid_low_bits_def) + apply (rule conjI) + apply (clarsimp simp : typ_at'_def ko_wp_at'_def obj_at'_def) + apply (rule conjI) + prefer 2 + apply (simp add: mask_def asid_low_bits_def word_less_nat_alt unat_of_nat_eq) + apply (simp split: if_split) + apply normalise_obj_at' + apply (rename_tac ko, case_tac ko, clarsimp, rename_tac pool') + apply (case_tac poolKO, clarsimp, rename_tac orig_pool) + apply (drule (1) asid_pool_at_rf_sr, clarsimp) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: casid_pool_relation_def split: asid_pool_C.splits) + apply (rename_tac c_pool) + apply (clarsimp simp: array_relation_def) + apply (erule_tac x="of_nat n" in allE) + apply (erule impE, fastforce simp: mask_def asid_low_bits_def word_le_nat_alt unat_of_nat_eq) + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def asid_map_tag_defs + split: option.splits asid_map_CL.splits if_splits; + blast) + apply (simp add: asid_low_bits_of_def) + apply (vcg exspec=invalidateASIDEntry_modifies exspec=invalidateTLBByASID_modifies) + apply clarsimp + apply wpsimp + apply (simp add: asid_low_bits_def) + apply ceqv + apply (rule ccorres_Guard_Seq) + apply (rule_tac P="\s. rv= armKSASIDTable (ksArchState s)" and P'=UNIV + in ccorres_split_nothrow_novcg[where xf'=xfdc and r'=dc]) + apply (rule ccorres_from_vcg) + apply (clarsimp simp: simpler_modify_def simp del: fun_upd_apply) + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_def cstate_relation_def cmachine_state_relation_def + carch_state_relation_def carch_globals_def Let_def + ucast_asid_high_bits_is_shift + simp del: fun_upd_apply) + apply (erule array_relation_update, rule refl, simp) + apply (simp add: asid_high_bits_def mask_def) + apply ceqv + apply (rule ccorres_pre_getCurThread) + apply (ctac add: setVMRoot_ccorres) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply (simp add: pred_conj_def fun_upd_def[symmetric] cur_tcb'_def[symmetric]) + apply (strengthen invs_arch_state' invs_pspace_canonical' invs_valid_objs' invs_asid_update_strg')+ + apply (simp cong: conj_cong) + apply (wpsimp wp: mapM_x_wp') + apply (clarsimp simp: guard_is_UNIV_def asid_wf_table_guard) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: asid_wf_table_guard) + apply normalise_obj_at' + apply fastforce + done + +lemma deleteASID_ccorres: + "ccorres dc xfdc (invs' and K (asid_wf asid \ vs \ 0)) + ({s. asid___unsigned_long_' s = asid} \ {s. vspace_' s = Ptr vs}) [] + (deleteASID asid vs) (Call deleteASID_'proc)" + apply (cinit lift: asid___unsigned_long_' vspace_' cong: call_ignore_cong) + apply (rule ccorres_Guard_Seq)+ + apply simp + apply (ctac (no_vcg) add: getPoolPtr_assign_ccorres) + apply wpc + apply ccorres_rewrite + apply (rule ccorres_return_Skip) + apply (rename_tac ap) + apply (rule_tac P="ap \ 0" in ccorres_gen_asm) + apply (clarsimp simp: when_def liftM_def mask_2pm1[symmetric] + cong: conj_cong call_ignore_cong) + apply ccorres_rewrite + apply (rule ccorres_pre_getObject_asidpool) + apply (rename_tac pool) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_ap) + apply (rule_tac F="\asid_map. casid_map_relation (inv ASIDPool pool (asid && mask asid_low_bits)) asid_map" + and xf'=asid_map_' + and R="ko_at' pool ap" + in ccorres_symb_exec_r_abstract_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (case_tac pool, clarsimp) + apply (drule (1) asid_pool_at_rf_sr) + apply (clarsimp simp: typ_heap_simps casid_pool_relation_def) + apply (case_tac pool', clarsimp) + apply (clarsimp simp: array_relation_def) + apply (erule allE, erule impE, rule word_and_le1, assumption) + apply ceqv + apply (rename_tac asid_map) + apply csymbr + apply (rule ccorres_if_lhs) + (* ASIDPool entry = Some vs case *) + apply (clarsimp split: option.splits) + apply (rule_tac xf'=ret__int_' and val="1" and R="\" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (fastforce simp: casid_map_relation_def asid_map_lift_def Let_def asid_map_tag_defs + split: if_splits) + apply ceqv + apply clarsimp + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (prop_tac "vspace_root_CL (asid_map_asid_map_vspace_lift asid_map) = apVSpace x2") + apply (clarsimp simp: casid_map_relation_def Let_def asid_map_asid_map_vspace_lift_def + asid_map_lift_def + split: if_splits) + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: invalidateTLBByASID_ccorres) + apply (ctac (no_vcg) add: invalidateASIDEntry_ccorres) + apply csymbr + apply (rule ccorres_pre_getObject_asidpool, rename_tac pool) + apply (rule ccorres_move_c_guard_ap) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac P="ko_at' pool ap" in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg, clarsimp) + apply (rule cmap_relationE1[OF rf_sr_cpspace_asidpool_relation], + assumption, erule ko_at_projectKO_opt) + apply (rule bexI [OF _ setObject_eq]; simp add: objBits_simps pageBits_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_asidpool_map_tos + update_asidpool_map_to_asidpools) + apply (rule cmap_relation_updI, simp_all)[1] + apply (clarsimp simp: casid_pool_relation_def fun_upd_def[symmetric] inv_ASIDPool + split: asidpool.split_asm asid_pool_C.split_asm) + apply (rule conjI) + apply (erule array_relation_update) + apply (simp add: mask_def) + apply (simp add: casid_map_relation_def asid_map_lift_def split: option.splits) + apply (simp add: asid_low_bits_def mask_def) + apply blast + apply (simp add: carch_state_relation_def cmachine_state_relation_def + carch_globals_def update_asidpool_map_tos + typ_heap_simps') + apply (rule ccorres_pre_getCurThread) + apply (ctac add: setVMRoot_ccorres) + apply (simp add: cur_tcb'_def[symmetric]) + apply (strengthen invs_arch_state' invs_pspace_canonical' invs_valid_objs')+ + apply wp + apply (clarsimp simp: rf_sr_def guard_is_UNIV_def cstate_relation_def Let_def) + apply (wpsimp wp: hoare_drop_imps) + apply wp + apply (clarsimp simp: guard_is_UNIV_def casid_map_relation_def asid_map_lift_def Let_def + split: if_splits) + (* ASIDPool entry \ Some vs *) + apply (clarsimp split: option.splits) + (* None case *) + apply (rule_tac xf'=ret__int_' and val="0" and R="\" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def asid_map_tag_defs + split: option.splits) + apply ceqv + apply ccorres_rewrite + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def) + (* ASIDPool entry = Some vs', with vs' \ vs *) + apply (rule_tac xf'=ret__int_' and val="1" and R="\" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def asid_map_tag_defs + split: option.splits if_splits) + apply ceqv + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def casid_map_relation_def asid_map_lift_def Let_def + asid_map_tag_defs asid_map_asid_map_vspace_lift_def + split: if_splits) + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp simp: getPoolPtr_def) + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: asid_wf_table_guard) + apply (rule conjI; clarsimp) + apply (clarsimp simp: typ_at'_def) + apply normalise_obj_at' + apply fastforce + apply (drule invs_arch_state') + apply (fastforce simp: valid_arch_state'_def valid_asid_table'_def) + done + +lemma setObject_ccorres_lemma: + fixes val :: "'a :: pspace_storable" shows + "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val) \, s') \ rf_sr},{}; + \s s' val'::'a. \ ko_at' val' ptr s; (s, s') \ rf_sr \ + \ s' \ Q s; + \val :: 'a. updateObject val = updateObject_default val; + \val :: 'a. (1 :: machine_word) < 2 ^ objBits val; + \(val :: 'a) (val' :: 'a). objBits val = objBits val'; + \ \ Q' c UNIV \ + \ ccorres dc xfdc \ Q' hs + (setObject ptr val) c" + apply (rule ccorres_from_vcg_nofail) + apply (rule allI) + apply (case_tac "obj_at' (\x :: 'a. True) ptr \") + apply (rule_tac P'="Q \" in conseqPre, rule conseqPost, assumption) + apply clarsimp + apply (rule bexI [OF _ setObject_eq], simp+) + apply (drule obj_at_ko_at') + apply clarsimp + apply clarsimp + apply (rule conseqPre, erule conseqPost) + apply clarsimp + apply (subgoal_tac "fst (setObject ptr val \) = {}") + apply simp + apply (erule notE, erule_tac s=\ in empty_failD[rotated]) + apply (simp add: setObject_def split_def empty_fail_cond) + apply (rule ccontr) + apply (clarsimp elim!: nonemptyE) + apply (frule use_valid [OF _ obj_at_setObject3[where P=\]], simp_all)[1] + apply (simp add: typ_at_to_obj_at'[symmetric]) + apply (frule(1) use_valid [OF _ setObject_typ_at']) + apply simp + apply simp + apply clarsimp + done + +lemma findVSpaceForASID_nonzero: + "\\\ findVSpaceForASID asid \\rv s. rv \ 0\,-" + unfolding findVSpaceForASID_def + by (wpsimp wp: haskell_assertE_wp) + +lemma ccorres_ul_pre_getObject_pte: + assumes cc: "\rv. ccorres_underlying rf_sr \ (inr_rrel r') xf' (inl_rel r) xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres_underlying rf_sr \ (inr_rrel r') xf' (inl_rel r) xf + (\s. (\pte. ko_at' pte p s \ P pte s)) + {s. \pte pte'. cslift s (pte_Ptr p) = Some pte' \ cpte_relation pte pte' \ s \ P' pte} + hs + (getObject p >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_guard_imp2) + apply (rule cc) + apply (rule conjI) + apply (rule_tac Q="ko_at' rv p s" in conjunct1) + apply assumption + apply assumption + apply (wp getPTE_wp empty_fail_getObject | simp)+ + apply clarsimp + apply (erule cmap_relationE1 [OF rf_sr_cpte_relation], + erule ko_at_projectKO_opt) + apply simp + done + +schematic_goal of_nat_maxPTLevel_val: + "of_nat maxPTLevel = numeral ?n" + by (simp add: maxPTLevel_val del: word_eq_numeral_iff_iszero) + +schematic_goal of_nat_ptBitsLeft_maxPTLevel_val: + "of_nat (ptBitsLeft maxPTLevel) = numeral ?n" + by (simp add: maxPTLevel_val ptBitsLeft_def bit_simps del: word_eq_numeral_iff_iszero) + +schematic_goal of_nat_ptTranslationBits_NormalPT_T_val: + "of_nat (ptTranslationBits NormalPT_T) = numeral ?n" + by (simp add: bit_simps del: word_eq_numeral_iff_iszero) + +schematic_goal of_nat_ptTranslationBits_VSRootPT_T_val: + "of_nat (ptTranslationBits VSRootPT_T) = numeral ?n" + by (simp add: bit_simps Kernel_Config.config_ARM_PA_SIZE_BITS_40_def + del: word_eq_numeral_iff_iszero) + +lemmas lookupPTFromLevel_val_helpers = + of_nat_maxPTLevel_val + of_nat_ptBitsLeft_maxPTLevel_val + of_nat_ptTranslationBits_NormalPT_T_val + of_nat_ptTranslationBits_VSRootPT_T_val + +lemma lookupPTFromLevel_ccorres[unfolded lookupPTFromLevel_val_helpers]: + notes Collect_const[simp del] call_ignore_cong[cong] + defines "idx i \ of_nat (ptBitsLeft maxPTLevel) - of_nat (ptTranslationBits NormalPT_T) * i" + defines "levelMask i \ 2 ^ (if i = 0 then unat (of_nat (ptTranslationBits VSRootPT_T)::int_word) + else unat (of_nat (ptTranslationBits NormalPT_T)::int_word)) - 1" + defines "vshift vaddr i \ (vaddr >> unat (idx i)) && levelMask i" + defines "maxPT \ of_nat maxPTLevel" + assumes max: "level \ maxPTLevel" + shows + "ccorres_underlying rf_sr \ + (inr_rrel (\ptSlot ptSlot_C. ptSlot_C = pte_Ptr ptSlot)) ptSlot_' (inl_rrel dc) xfdc + (\s. page_table_at' (levelType level) pt s \ + gsPTTypes (ksArchState s) pt = Some (levelType (level))) + (\\i = of_nat (maxPTLevel - level)\ \ \\pt = pte_Ptr pt\) + [SKIP] + (lookupPTFromLevel level pt vaddr target_pt) + (WHILE \i < maxPT \ \pt \ pte_Ptr target_pt DO + Guard ShiftError \idx \i < 0x40\ + (Guard MemorySafety + \vshift vaddr \i = 0 \ + array_assertion \pt (unat (vshift vaddr \i)) (hrs_htd \t_hrs)\ + (\ptSlot :== \pt +\<^sub>p uint (vshift vaddr \i)));; + \ret__unsigned_long :== CALL pte_pte_table_ptr_get_present(\ptSlot);; + IF \ret__unsigned_long = 0 THEN + return_void_C + FI;; + \ret__unsigned_longlong :== CALL pte_pte_table_ptr_get_pt_base_address(\ptSlot);; + \ret__ptr_to_void :== CALL ptrFromPAddr(\ret__unsigned_longlong);; + \pt :== PTR_COERCE(unit \ pte_C) \ret__ptr_to_void;; + \i :== \i + 1 + OD;; + IF \pt \ pte_Ptr target_pt THEN + return_void_C + FI)" +using max +proof (induct level arbitrary: pt) + case 0 + show ?case + apply (subst lookupPTFromLevel.simps) + apply (simp add: 0 whileAnno_def maxPT_def maxPTLevel_def) + apply (rule ccorres_assertE) + apply (rule ccorres_expand_while_iff_Seq[THEN iffD1]) + apply (subst Int_commute) + apply (cinitlift i_') + apply (simp add: throwError_def) + apply ccorres_rewrite + apply (cinitlift pt_') + apply (rule ccorres_guard_imp) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (simp add: return_def) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply simp + apply simp + done +next + case (Suc level) + then + have level: "level < maxPTLevel" by simp + then + have [simp]: "word_of_nat maxPTLevel - (1 + of_nat level) < maxPT" (is "?i < maxPT") + by (cases "config_ARM_PA_SIZE_BITS_40"; + simp add: maxPTLevel_def maxPT_def unat_arith_simps unat_of_nat) + + from level + have [simp]: "idx ?i < 0x40" + by (simp add: idx_def maxPT_def maxPTLevel_def unat_word_ariths unat_arith_simps unat_of_nat + bit_simps ptBitsLeft_def + split: if_splits) + + from level + have [simp]: "pt + vshift vaddr ?i * 8 = ptSlotIndex (Suc level) pt vaddr" + apply (simp add: ptSlotIndex_def ptIndex_def vshift_def levelMask_def ptBitsLeft_def + ptTranslationBits_def levelType_def idx_def pte_bits_def word_size_bits_def + mask_def unat_word_ariths unat_of_nat maxPTLevel_def shiftl_t2n pageBits_def) + apply (cases config_ARM_PA_SIZE_BITS_40; + fastforce dest!: word_unat_eq_iff[THEN iffD1] + simp: unat_of_nat_eq ptTranslationBits_def + split: if_splits) + done + + from level + have level_NormalPT_T: "levelType level = NormalPT_T" + by (simp add: levelType_def) + + show ?case + supply if_cong[cong] option.case_cong[cong] + apply (simp add: Suc(2) lookupPTFromLevel.simps whileAnno_def cong: if_weak_cong) + apply (rule ccorres_assertE) + apply (rule ccorres_expand_while_iff_Seq[THEN iffD1]) + apply (cinitlift i_' pt_') + apply (rename_tac pt' i) + apply ccorres_rewrite + apply (simp add: liftE_bindE) + apply (rule ccorres_guard_imp) + supply ccorres_prog_only_cong[cong] + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_ul_pre_getObject_pte, rename_tac pte) + apply (rule ccorres_move_array_assertion_pt_gen[where pt_t="levelType (Suc level)"]) + apply csymbr + apply (rule_tac xf'=ret__unsigned_long_' and + val="from_bool (isPageTablePTE pte)" and + R'=UNIV and + R="\s. ko_at' pte (ptSlotIndex (Suc level) pt vaddr) s" + in ccorres_symb_exec_r_known_rv) + apply (clarsimp, rule conseqPre, vcg, clarsimp) + apply (drule (1) pte_at_rf_sr) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: cpte_relation_def Let_def isPageTablePTE_def pte_pte_table_lift + pte_pte_table_lift_def + split: pte.splits if_splits) + apply ceqv + apply (simp add: from_bool_0) + apply (rule ccorres_Cond_rhs_Seq) + apply ccorres_rewrite + apply simp + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (simp add: return_def throwError_def) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply simp + apply (rule_tac P="getPPtrFromPTE pte = target_pt" in ccorres_cases; simp) + apply (rule_tac xf'=ret__unsigned_longlong_' and + val="ptePPN pte << pageBits" and + R'=UNIV and + R="\s. ko_at' pte (ptSlotIndex (Suc level) pt vaddr) s" + in ccorres_symb_exec_r_known_rv) + apply (clarsimp, rule conseqPre, vcg, clarsimp) + apply (drule (1) pte_at_rf_sr) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: cpte_relation_def Let_def isPageTablePTE_def2 + pte_pte_table_lift pte_pte_table_lift_def) + apply ceqv + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_expand_while_iff_Seq[THEN iffD1]) + apply (rule_tac P'="P' \ \ \pt = pte_Ptr (getPPtrFromPTE pte) \" for P' in ccorres_inst) + apply (cinitlift pt_') + apply ccorres_rewrite + apply (rule ccorres_inst[where P=\ and + P'="\ \ptSlot = pte_Ptr (pt + vshift vaddr ?i * 8) \"]) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: returnOk_def return_def) + apply vcg + (* getPPtrFromPTE pte \ target_pt *) + apply (rule ccorres_checkPTAt) + apply (rule_tac xf'=ret__unsigned_longlong_' and + val="ptePPN pte << pageBits" and + R'=UNIV and + R="\s. ko_at' pte (ptSlotIndex (Suc level) pt vaddr) s" + in ccorres_symb_exec_r_known_rv) + apply (clarsimp, rule conseqPre, vcg, clarsimp) + apply (drule (1) pte_at_rf_sr) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: cpte_relation_def Let_def isPageTablePTE_def2 + pte_pte_table_lift pte_pte_table_lift_def) + apply ceqv + apply csymbr + apply csymbr + apply csymbr + apply (rule Suc.hyps[unfolded whileAnno_def]) + using level apply simp + apply vcg + apply vcg + apply (clarsimp simp: level_NormalPT_T) + apply (clarsimp simp: typ_heap_simps) + using level + apply (fastforce dest!: word_unat_eq_iff[THEN iffD1] + intro!: word_and_le' unat_le_helper + simp: unat_of_nat_eq levelMask_def levelType_def maxPTLevel_val bit_simps + word_neq_0_conv word_less_nat_alt vshift_def + getPPtrFromPTE_def isPageTablePTE_def2 isPagePTE_def + split: if_split) + done +qed + +(* FIXME AARCH64 future work: wrap UPT_LEVELS and other possible magic numbers in enums in C so + that we can abstract the address space sizes / page table levels *) +lemma unmapPageTable_ccorres: + "ccorres dc xfdc (invs' and page_table_at' NormalPT_T ptPtr and (\s. asid_wf asid)) + (\\asid___unsigned_long = asid\ \ \\vptr = vaddr\ \ \\target_pt = pte_Ptr ptPtr\) + [] (unmapPageTable asid vaddr ptPtr) (Call unmapPageTable_'proc)" + supply ccorres_prog_only_cong[cong] Collect_const[simp del] ccorres_dc_comp[simp] + apply (rule ccorres_gen_asm) + apply (rule ccorres_guard_imp[where Q'="\\vptr = vaddr\ \ \\target_pt = pte_Ptr ptPtr\ \ + \\asid___unsigned_long = asid\ \ \\vptr = vaddr\ \ + \\target_pt = pte_Ptr ptPtr\" and + Q=A and A=A for A]; simp?) + supply if_cancel[simp del] + apply (cinit lift: asid___unsigned_long_' vptr_' target_pt_') + apply (clarsimp simp add: ignoreFailure_liftM) + apply (ctac add: findVSpaceForASID_ccorres,rename_tac vspace find_ret) + prefer 2 + apply ccorres_rewrite + apply (clarsimp simp: throwError_def) + apply (rule ccorres_return_void_C) + (* In C, these come out as either 9 or 10, depending on config_ARM_PA_SIZE_BITS_40 *) + apply (prop_tac "\i::machine_word. 0 \s (if i = 0 then of_nat (ptTranslationBits VSRootPT_T) + else (9::int_word))") + apply (simp add: of_nat_ptTranslationBits_VSRootPT_T_val split: if_split) + apply (prop_tac "\i::machine_word. (if i = 0 then of_nat (ptTranslationBits VSRootPT_T) + else (9::int_word)) UNIV (SKIP # hs) + (return NullCap) (\ret__struct_cap_C :== CALL cap_null_cap_new() + ;; return_C ret__struct_cap_C_'_update ret__struct_cap_C_')" + apply (rule ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp add: ccap_relation_NullCap_iff return_def) + done + +lemma no_0_page_table_at'[elim!]: + "\ page_table_at' pt_t 0 s; no_0_obj' s \ \ P" + apply (clarsimp simp: page_table_at'_def) + apply (drule spec[where x=0], clarsimp simp: bit_simps) + done + +lemma isFinalCapability_ccorres: + "ccorres ((=) \ from_bool) ret__unsigned_long_' + (cte_wp_at' ((=) cte) slot and invs') + (UNIV \ {s. cte_' s = Ptr slot}) [] + (isFinalCapability cte) (Call isFinalCapability_'proc)" + apply (cinit lift: cte_') + apply (rule ccorres_Guard_Seq) + apply (simp add: Let_def del: Collect_const) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'="mdb_'" in ccorres_abstract) + apply ceqv + apply (rule_tac P="mdb_node_to_H (mdb_node_lift rv') = cteMDBNode cte" in ccorres_gen_asm2) + apply csymbr + apply (rule_tac r'="(=) \ from_bool" and xf'="prevIsSameObject_'" + in ccorres_split_nothrow_novcg) + apply (rule ccorres_cond2[where R=\]) + apply (clarsimp simp: Collect_const_mem nullPointer_def) + apply (simp add: mdbPrev_to_H[symmetric]) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (simp add: return_def) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_l[OF _ getCTE_inv getCTE_wp empty_fail_getCTE]) + apply (rule_tac P="cte_wp_at' ((=) cte) slot + and cte_wp_at' ((=) rv) (mdbPrev (cteMDBNode cte)) + and valid_cap' (cteCap rv) + and K (capAligned (cteCap cte) \ capAligned (cteCap rv))" + and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def mdbPrev_to_H[symmetric]) + apply (simp add: rf_sr_cte_at_validD) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule cmap_relationE1 [OF cmap_relation_cte], assumption+, + simp?, simp add: typ_heap_simps)+ + apply (drule ccte_relation_ccap_relation)+ + apply (rule exI, rule conjI, assumption)+ + apply (auto)[1] + apply ceqv + apply (clarsimp simp del: Collect_const) + apply (rule ccorres_cond2[where R=\]) + apply (simp add: from_bool_0 Collect_const_mem) + apply (rule ccorres_return_C, simp+)[1] + apply csymbr + apply (rule ccorres_cond2[where R=\]) + apply (simp add: nullPointer_def Collect_const_mem mdbNext_to_H[symmetric]) + apply (rule ccorres_return_C, simp+)[1] + apply (rule ccorres_symb_exec_l[OF _ getCTE_inv getCTE_wp empty_fail_getCTE]) + apply (rule_tac P="cte_wp_at' ((=) cte) slot + and cte_wp_at' ((=) rva) (mdbNext (cteMDBNode cte)) + and K (capAligned (cteCap rva) \ capAligned (cteCap cte)) + and valid_cap' (cteCap cte)" + and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def from_bool_eq_if from_bool_0 + mdbNext_to_H[symmetric] rf_sr_cte_at_validD) + apply (clarsimp simp: cte_wp_at_ctes_of split: if_split) + apply (rule cmap_relationE1 [OF cmap_relation_cte], assumption+, + simp?, simp add: typ_heap_simps)+ + apply (drule ccte_relation_ccap_relation)+ + apply (auto simp: from_bool_def split: bool.splits)[1] + apply (wp getCTE_wp') + apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: Collect_const_mem) + apply (frule(1) rf_sr_cte_at_validD, simp add: typ_heap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (simp add: typ_heap_simps) + apply (clarsimp simp add: ccte_relation_def map_option_Some_eq2) + by (auto, + auto dest!: ctes_of_valid' [OF _ invs_valid_objs'] + elim!: valid_capAligned) + +lemmas cleanup_info_wf'_simps[simp] = cleanup_info_wf'_def[split_simps capability.split] + +lemma cteDeleteOne_ccorres: + "ccorres dc xfdc + (invs' and cte_wp_at' (\ct. w = -1 \ cteCap ct = NullCap + \ (\cap'. ccap_relation (cteCap ct) cap' \ cap_get_tag cap' = w)) slot) + ({s. gs_get_assn cteDeleteOne_'proc (ghost'state_' (globals s)) = w} + \ {s. slot_' s = Ptr slot}) [] + (cteDeleteOne slot) (Call cteDeleteOne_'proc)" + unfolding cteDeleteOne_def + apply (rule ccorres_symb_exec_l' + [OF _ getCTE_inv getCTE_sp empty_fail_getCTE]) + apply (cinit' lift: slot_' cong: call_ignore_cong) + apply (rule ccorres_move_c_guard_cte) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply csymbr + apply (rule ccorres_gen_asm2, + erule_tac t="ret__unsigned_longlong = scast cap_null_cap" + and s="cteCap cte = NullCap" + in ssubst) + apply (clarsimp simp only: when_def unless_def) + apply (rule ccorres_cond2[where R=\]) + apply (clarsimp simp: Collect_const_mem) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_basic_srnoop) + apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) + apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" + in ccorres_guard_imp2[where A'=UNIV]) + apply (simp add: split_def + del: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) + apply (rule ccorres_assert) + apply simp + apply csymbr + apply (ctac add: emptySlot_ccorres) + apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) + apply (strengthen invs_mdb_strengthen' invs_urz) + apply (wp typ_at_lifts isFinalCapability_inv + | strengthen invs_valid_objs')+ + apply (clarsimp simp: irq_opt_relation_def invs_pspace_aligned' cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps ccte_relation_ccap_relation ccap_relation_NullCap_iff) + apply (wp isFinalCapability_inv) + apply simp + apply (simp del: Collect_const) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: Collect_const_mem cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap + dest!: ccte_relation_ccap_relation) + apply (auto simp: o_def) + done + +lemma getIRQSlot_ccorres_stuff: + "\ (s, s') \ rf_sr \ \ + CTypesDefs.ptr_add intStateIRQNode_Ptr (uint (irq :: irq_len word)) + = Ptr (irq_node' s + 2 ^ cte_level_bits * ucast irq)" + apply (clarsimp simp add: rf_sr_def cstate_relation_def Let_def + cinterrupt_relation_def) + apply (simp add: objBits_simps cte_level_bits_def + size_of_def mult.commute mult.left_commute of_int_uint_ucast ) + done + +lemma deletingIRQHandler_ccorres: + "ccorres dc xfdc (invs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + ({s. irq_opt_relation (Some irq) (irq_' s)}) [] + (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" + apply (cinit lift: irq_' cong: call_ignore_cong) + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def + cong: call_ignore_cong ) + apply (rule_tac r'="\rv rv'. rv' = Ptr rv" + and xf'="slot_'" in ccorres_split_nothrow) + apply (rule ccorres_Guard_intStateIRQNode_array_Ptr) + apply (rule ccorres_move_array_assertion_irq) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getIRQSlot_def liftM_def getInterruptState_def + locateSlot_conv) + apply (simp add: bind_def simpler_gets_def return_def getIRQSlot_ccorres_stuff[simplified]) + apply ceqv + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_symb_exec_r) + apply (ctac add: cteDeleteOne_ccorres[where w="scast cap_notification_cap"]) + apply vcg + apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def + gs_set_assn_Delete_cstate_relation[unfolded o_def]) + apply (wp getCTE_wp' | simp add: getSlotCap_def getIRQSlot_def locateSlot_conv + getInterruptState_def)+ + apply vcg + apply (clarsimp simp: cap_get_tag_isCap ghost_assertion_data_get_def + ghost_assertion_data_set_def) + apply (simp add: cap_tag_defs) + apply (clarsimp simp: cte_wp_at_ctes_of Collect_const_mem + irq_opt_relation_def Kernel_C.maxIRQ_def) + apply (drule word_le_nat_alt[THEN iffD1]) + apply (clarsimp simp: uint_0_iff unat_gt_0 uint_up_ucast is_up) + done + +(* 6 = wordRadix, + 5 = tcb_cnode_radix + 1, + 7 = wordRadix+1*) +lemma Zombie_new_spec: + "\s. \\ ({s} \ {s. type_' s = 64 \ type_' s < 63}) Call Zombie_new_'proc + {s'. cap_zombie_cap_lift (ret__struct_cap_C_' s') = + \ capZombieID_CL = \<^bsup>s\<^esup>ptr && ~~ mask (if \<^bsup>s\<^esup>type = (1 << 6) then 5 else unat (\<^bsup>s\<^esup>type + 1)) + || \<^bsup>s\<^esup>number___unsigned_long && mask (if \<^bsup>s\<^esup>type = (1 << 6) then 5 else unat (\<^bsup>s\<^esup>type + 1)), + capZombieType_CL = \<^bsup>s\<^esup>type && mask 7 \ + \ cap_get_tag (ret__struct_cap_C_' s') = scast cap_zombie_cap}" + apply vcg + apply (clarsimp simp: word_sle_def) + apply (simp add: mask_def word_log_esimps[where 'a=machine_word_len, simplified]) + apply clarsimp + apply (simp add: word_add_less_mono1[where k=1 and j="0x3F", simplified]) + done + +lemma ccap_relation_IRQHandler_mask: + "\ ccap_relation acap ccap; isIRQHandlerCap acap \ + \ capIRQ_CL (cap_irq_handler_cap_lift ccap) && mask 9 + = capIRQ_CL (cap_irq_handler_cap_lift ccap)" + apply (simp only: cap_get_tag_isCap[symmetric]) + apply (drule ccap_relation_c_valid_cap) + apply (simp add: c_valid_cap_def cap_irq_handler_cap_lift cl_valid_cap_def) + done + +lemma option_to_ctcb_ptr_not_0: + "\ tcb_at' p s; option_to_ctcb_ptr v = tcb_ptr_to_ctcb_ptr p\ \ v = Some p" + apply (clarsimp simp: option_to_ctcb_ptr_def tcb_ptr_to_ctcb_ptr_def + split: option.splits) + apply (frule tcb_aligned') + apply (frule_tac y=ctcb_offset and n=tcbBlockSizeBits in aligned_offset_non_zero) + apply (clarsimp simp: ctcb_offset_defs objBits_defs)+ + done + +lemma update_tcb_map_to_tcb: + "map_to_tcbs ((ksPSpace s)(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" + by (rule ext, clarsimp simp: map_comp_def split: if_split) + +lemma ep_queue_relation_shift2: + "(option_map2 tcbEPNext_C (f (cslift s)) = option_map2 tcbEPNext_C (cslift s) + \ option_map2 tcbEPPrev_C (f (cslift s)) = option_map2 tcbEPPrev_C (cslift s)) + \ ep_queue_relation (f (cslift s)) ts qPrev qHead + = ep_queue_relation (cslift s) ts qPrev qHead" + apply (induct ts arbitrary: qPrev qHead; clarsimp) + apply (simp add: option_map2_def fun_eq_iff map_option_case) + apply (drule_tac x=qHead in spec)+ + apply (clarsimp split: option.split_asm) + done + +lemma cendpoint_relation_udpate_arch: + "\ cslift x p = Some tcb ; cendpoint_relation (cslift x) v v' \ + \ cendpoint_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def + split: endpoint.splits) + apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) + apply (safe ; case_tac "xa = p" ; clarsimp simp: option_map2_def map_option_case) + apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) + apply (safe ; case_tac "xa = p" ; clarsimp simp: option_map2_def map_option_case) + done + +lemma cnotification_relation_udpate_arch: + "\ cslift x p = Some tcb ; cnotification_relation (cslift x) v v' \ + \ cnotification_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" + apply (clarsimp simp: cnotification_relation_def Let_def tcb_queue_relation'_def + split: notification.splits ntfn.splits) + apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) + apply (safe ; case_tac "xa = p" ; clarsimp simp: option_map2_def map_option_case) + done + +(* FIXME AARCH64 slow, uses unfolding of state relation for typ_heap_simps, the vcpuUpdate or + equivalent approach for TCB would be better *) +lemma archThreadSet_tcbVCPU_Basic_ccorres: + "ccorres dc xfdc \ UNIV hs + (archThreadSet (atcbVCPUPtr_update (\_. vcpuptr)) tptr) + ((Basic (\s. globals_update( t_hrs_'_update + (hrs_mem_update (heap_update (Ptr &((atcb_Ptr &(tcb_ptr_to_ctcb_ptr tptr\[''tcbArch_C'']))\[''tcbVCPU_C''])) + (option_to_ptr vcpuptr :: vcpu_C ptr)))) s)))" + apply (simp add: archThreadSet_def) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_getObject_tcb) + apply (rule_tac P="tcb_at' tptr and ko_at' tcb tptr" and P'=UNIV in setObject_ccorres_helper) + apply (simp_all add: objBits_simps' obj_tcb_at') + apply (rule conseqPre, vcg, clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_tcb], assumption, erule ko_at_projectKO_opt) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def update_tcb_map_tos + typ_heap_simps' cpspace_relation_def) + apply (safe ; (clarsimp simp: cpspace_relation_def typ_heap_simps + carch_state_relation_def Let_def + update_tcb_map_to_tcb + cmachine_state_relation_def + update_tcb_map_tos)?) + apply (subst map_to_ctes_upd_tcb_no_ctes; simp add: tcb_cte_cases_def cteSizeBits_def) + apply (erule cmap_relation_updI, erule ko_at_projectKO_opt, simp+) + apply (clarsimp simp: ctcb_relation_def carch_tcb_relation_def ccontext_relation_def + atcbContextGet_def) + apply clarsimp + apply (rule cmap_relation_rel_upd[OF _ cendpoint_relation_udpate_arch], simp+) + apply (rule cmap_relation_rel_upd[OF _ cnotification_relation_udpate_arch], simp+) + apply (clarsimp simp: cvariable_relation_upd_const) + done + +lemma setObject_vcpuTCB_updated_Basic_ccorres: + "ccorres dc xfdc (ko_at' (vcpuTCBPtr_update t vcpu) vcpuptr) UNIV hs + (setObject vcpuptr (vcpuTCBPtr_update (\_. tptr) vcpu)) + ((Basic (\s. globals_update( t_hrs_'_update + (hrs_mem_update (heap_update (Ptr &(vcpu_Ptr vcpuptr\[''vcpuTCB_C''])) + (option_to_ctcb_ptr tptr :: tcb_C ptr)))) s)))" + apply (rule ccorres_guard_imp2) + apply (rule_tac P="ko_at' (vcpuTCBPtr_update t vcpu) vcpuptr" and P'=UNIV in setObject_ccorres_helper) + apply (simp_all add: objBits_simps pageBits_def obj_tcb_at' vcpuBits_def) + apply (rule conseqPre, vcg, clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_vcpu], assumption, erule ko_at_projectKO_opt) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps' + cpspace_relation_def update_vcpu_map_tos) + apply (safe ; (clarsimp simp: cpspace_relation_def typ_heap_simps carch_state_relation_def Let_def + update_vcpu_map_to_vcpu cmachine_state_relation_def update_vcpu_map_tos)?) + apply (erule cmap_relation_updI, erule ko_at_projectKO_opt, simp+) + apply (clarsimp simp add: cvcpu_relation_def cvcpu_vppi_masked_relation_def option_to_ctcb_ptr_def + cvcpu_regs_relation_def Let_def vcpuSCTLR_def) + apply simp + done + +lemma vcpuTCBPtr_update_trivial: + "vcpuTCBPtr_update id vcpu = vcpu" + by (cases vcpu) + clarsimp + +lemmas setObject_vcpuTCB_Basic_ccorres = + setObject_vcpuTCB_updated_Basic_ccorres[where t=id, simplified vcpuTCBPtr_update_trivial] + +lemma modify_armHSCurVCPU_split: + "modifyArchState (armHSCurVCPU_update (\_. p)) = do modifyArchState (armHSCurVCPU_update f); + modifyArchState (armHSCurVCPU_update (\_. p)) + od" + apply (clarsimp simp: modifyArchState_def modify_modify) + apply (rule ext) + apply (rule_tac f="\t. modify t s" for s in arg_cong) + apply (rule ext) + apply (case_tac "ksArchState s") + apply clarsimp + done + +lemma modify_armHSCurVCPU_when_split: + "modifyArchState (armHSCurVCPU_update (\_. p)) = do when P (modifyArchState (armHSCurVCPU_update f)); + modifyArchState (armHSCurVCPU_update (\_. p)) + od" + apply (cases P; clarsimp) + apply (subst modify_armHSCurVCPU_split[of _ f]) + apply simp + done + +lemma modifyArchState_armHSCurVCPU_Skip: + "ccorres dc xfdc (\s. armHSCurVCPU (ksArchState s) = curvcpu) UNIV hs + (modifyArchState (armHSCurVCPU_update (\_. curvcpu))) SKIP" + apply (clarsimp simp: modifyArchState_def) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def) + done + +(* FIX ARMHYP MOVE *) +lemma armHSCurVCPU_update_active_false_ccorres: + "ccorres dc xfdc \ UNIV hs + (modifyArchState (armHSCurVCPU_update (case_option None (\(a, _). Some (a, False))))) + (Basic (\s. globals_update (armHSVCPUActive_'_update (\_. scast false)) s))" + apply (clarsimp simp: modifyArchState_def) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: cmachine_state_relation_def) + apply (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def) + apply (case_tac "armHSCurVCPU (ksArchState \)"; clarsimp) + done + +lemma armHSCurVCPU_update_curv_Null_ccorres: + "ccorres dc xfdc (\s. case armHSCurVCPU (ksArchState s) of None \ True | Some (a,b) \ \ b) UNIV hs + (modifyArchState (armHSCurVCPU_update Map.empty)) + (Basic (\s. globals_update (armHSCurVCPU_'_update (\_. NULL)) s))" + apply (clarsimp simp: modifyArchState_def) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def + cmachine_state_relation_def + split: bool.split option.splits) + done + +lemma vcpuInvalidateActive_ccorres: + "ccorres dc xfdc invs' UNIV hs + vcpuInvalidateActive + (Call vcpu_invalidate_active_'proc)" + apply cinit + apply (rule ccorres_pre_getCurVCPU) + apply (subst modify_armHSCurVCPU_when_split) + apply (subst bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule_tac Q="\s. (armHSCurVCPU \ ksArchState) s = hsCurVCPU" + and Q'=UNIV + and C'="{s. \ t. hsCurVCPU = Some t \ snd t}" + in ccorres_rewrite_cond_sr) + apply clarsimp + apply (frule rf_sr_ksArchState_armHSCurVCPU) + apply (case_tac "\ t. (armHSCurVCPU \ ksArchState) s = Some t \ snd t") + apply (clarsimp simp: cur_vcpu_relation_def) + apply (clarsimp simp: cur_vcpu_relation_def) + apply (case_tac "(armHSCurVCPU \ ksArchState) s"; clarsimp) + apply (rule_tac a=" _ >>= (\_. when (hsCurVCPU \ None \ snd (the hsCurVCPU)) + (modifyArchState(armHSCurVCPU_update + (\a. case a of None \ None + | Some (a,_) \ Some (a, False)))))" + in match_ccorres) + apply (wpc; clarsimp ; ccorres_rewrite) + apply (rule ccorres_return_Skip) + apply (rule_tac Q="\s. (b \ _ s) \ (\ b \ _ s)" in ccorres_guard_imp) + apply (case_tac b) + apply clarsimp + apply ccorres_rewrite + apply (rule ccorres_guard_imp) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (ctac add: vcpu_disable_ccorres) + apply ceqv + apply (rule armHSCurVCPU_update_active_false_ccorres) + apply wp + apply (vcg exspec=vcpu_disable_modifies) + apply clarsimp + apply assumption + apply simp + apply simp + apply ccorres_rewrite + apply (rule ccorres_return_Skip) + apply clarsimp + apply assumption + apply simp + apply ceqv + apply clarsimp + apply (rule armHSCurVCPU_update_curv_Null_ccorres) + apply (wpsimp simp: modifyArchState_def if_apply_def2) + apply (vcg exspec=vcpu_disable_modifies) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (rule UNIV_I) + done + +lemma sanitiseSetRegister_ccorres: + "\ val = val'; reg' = register_from_H reg\ \ + ccorres dc xfdc (tcb_at' tptr) UNIV hs + (asUser tptr (setRegister reg (local.sanitiseRegister False reg val))) + (\unsigned_long_eret_2 :== CALL sanitiseRegister(reg',val',scast false);; + CALL setRegister(tcb_ptr_to_ctcb_ptr tptr,reg',\unsigned_long_eret_2))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_r) + apply (ctac add: setRegister_ccorres) + apply (vcg) + apply (rule conseqPre, vcg) + apply (fastforce simp: sanitiseRegister_def C_register_defs split: register.splits) + by (auto simp: sanitiseRegister_def from_bool_def simp del: Collect_const + split: register.splits bool.splits) + +lemma dissociateVCPUTCB_ccorres: + "ccorres dc xfdc + (invs' and (tcb_at' tptr or vcpu_at' vcpuptr)) + ({s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr} \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs + (dissociateVCPUTCB vcpuptr tptr) (Call dissociateVCPUTCB_'proc)" + apply (cinit lift: tcb_' vcpu_') + apply (rule ccorres_pre_archThreadGet, rename_tac tcbVCPU) + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_rhs_assoc2) + apply clarsimp + apply (rule_tac xf'=ret__int_' + and R="(\s. \tcb. ko_at' tcb tptr s \ (atcbVCPUPtr o tcbArch) tcb = tcbVCPU) and + no_0_obj' and + ko_at' vcpu vcpuptr" + and val="from_bool (tcbVCPU = Some vcpuptr \ vcpuTCBPtr vcpu \ Some tptr)" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply vcg + apply clarsimp + apply (frule cmap_relation_vcpu) + apply (frule cmap_relation_tcb) + apply (erule cmap_relationE1) + apply (erule ko_at_projectKO_opt, rename_tac cvcpu) + apply (erule cmap_relationE1) + apply (erule ko_at_projectKO_opt, rename_tac ctcb) + apply (clarsimp simp add: typ_heap_simps) + apply (rule conjI) + apply (case_tac "tcbVCPU_C (tcbArch_C ctcb) \ vcpu_Ptr vcpuptr"; + clarsimp simp add: ctcb_relation_def carch_tcb_relation_def) + apply (case_tac "tcbVCPU_C (tcbArch_C ctcb) \ vcpu_Ptr vcpuptr"; + case_tac "vcpuTCB_C cvcpu \ tcb_ptr_to_ctcb_ptr tptr"; + clarsimp simp add: cvcpu_relation_def + ctcb_relation_def + carch_tcb_relation_def + option_to_ctcb_ptr_def[of "Some tptr"] + from_bool_0) + apply (frule_tac p=vcpuptr in option_to_ptr_not_0[OF ko_at'_not_NULL];simp?) + apply (frule_tac p=tptr in option_to_ctcb_ptr_not_0[OF obj_tcb_at']; simp?) + apply ceqv + apply (case_tac "tcbVCPU = Some vcpuptr \ vcpuTCBPtr vcpu \ Some tptr") + apply simp + apply (rule ccorres_fail') + apply simp + apply ccorres_rewrite + apply (rule ccorres_pre_getCurVCPU) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply wpc + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (simp add: split_def) + apply (rule_tac R="\s. Some x2 = (armHSCurVCPU \ ksArchState) s" in ccorres_when) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cur_vcpu_relation_def + dest!: sym[where s="Some x" for x]) + apply (simp add: eq_commute) + apply (ctac add: vcpuInvalidateActive_ccorres) + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule archThreadSet_tcbVCPU_Basic_ccorres[of _ None, simplified]) + apply ceqv + apply (rule ccorres_move_c_guard_vcpu) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule setObject_vcpuTCB_Basic_ccorres[of _ _ _ None, simplified option_to_ctcb_ptr_def, simplified]) + apply ceqv + apply (subst asUser_bind_distrib; simp) + apply (rule ccorres_split_nothrow[where r'="(=)" and xf'=ret__unsigned_long_']) + apply (ctac add: getRegister_ccorres) + apply ceqv + apply (erule sanitiseSetRegister_ccorres, simp) + apply wpsimp + apply vcg + apply (wpsimp wp: hoare_drop_imp setObject_vcpu_valid_objs') + apply vcg + apply wpsimp + apply vcg + apply (wpsimp simp: valid_arch_tcb'_def) + apply wpsimp + apply (vcg exspec=vcpu_invalidate_active_modifies) + apply vcg + apply clarsimp + apply (safe ; (clarsimp simp: valid_arch_tcb'_def + typ_at_to_obj_at_arches obj_at'_def + obj_at'_def invs_valid_objs' + split: if_split + | fastforce + | clarsimp simp: ctcb_relation_def carch_tcb_relation_def typ_heap_simps' + cur_vcpu_relation_def invs_no_0_obj' c_typ_rewrs)+) + done + +lemma dissociateVCPUTCB_tcb_ccorres: + "ccorres dc xfdc + (invs' and tcb_at' tptr ) + ({s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr } \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs + (dissociateVCPUTCB vcpuptr tptr) (Call dissociateVCPUTCB_'proc)" + by (rule ccorres_guard_imp2[OF dissociateVCPUTCB_ccorres]) clarsimp + +lemma dissociateVCPUTCB_vcpu_ccorres: + "ccorres dc xfdc + (invs' and vcpu_at' vcpuptr ) + ({s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr } \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs + (dissociateVCPUTCB vcpuptr tptr) (Call dissociateVCPUTCB_'proc)" + by (rule ccorres_guard_imp2[OF dissociateVCPUTCB_ccorres]) clarsimp + +lemma associateVCPUTCB_ccorres: + "ccorres dc xfdc + (invs' and tcb_at' tptr and vcpu_at' vcpuptr) + ({s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr } \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs + (associateVCPUTCB vcpuptr tptr) (Call associateVCPUTCB_'proc)" + apply (cinit lift: tcb_' vcpu_') + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_pre_archThreadGet, rename_tac tcbVCPU) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule_tac Q="(\s. \tcb. ko_at' tcb tptr s \ (atcbVCPUPtr o tcbArch) tcb = tcbVCPU) and + no_0_obj' and + valid_objs'" + and Q'=UNIV + and C'="{s. (tcbVCPU \ None) }" + in ccorres_rewrite_cond_sr) + apply clarsimp + apply (rename_tac tcb') + apply (frule cmap_relation_tcb) + apply (erule cmap_relationE1) + apply (erule ko_at_projectKO_opt, rename_tac ctcb) + apply (clarsimp simp: typ_heap_simps') + apply (case_tac "atcbVCPUPtr (tcbArch tcb') \ None") + apply (clarsimp simp add: ctcb_relation_def carch_tcb_relation_def) + apply(frule valid_objs_valid_tcb', simp) + apply (clarsimp simp: valid_tcb'_def valid_arch_tcb'_def) + apply (clarsimp simp: ctcb_relation_def carch_tcb_relation_def) + apply (wpc ; clarsimp ; ccorres_rewrite) + apply (rule ccorres_return_Skip) + apply (ctac add: dissociateVCPUTCB_tcb_ccorres) + apply ceqv + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (rule ccorres_move_c_guard_vcpu) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule_tac Q="ko_at' vcpu vcpuptr and valid_objs'" + and Q'=UNIV + and C'="{s. vcpuTCBPtr vcpu \ None}" + in ccorres_rewrite_cond_sr) + apply clarsimp + apply (frule (1) ko_at_valid_objs', simp) + apply (clarsimp simp: valid_obj'_def valid_vcpu'_def) + apply (drule (1) vcpu_at_rf_sr) + apply (clarsimp simp: typ_heap_simps' cvcpu_relation_def option_to_ctcb_ptr_def + dest!: aligned_tcb_ctcb_not_NULL + split: if_split) + apply (wpc; clarsimp; ccorres_rewrite) + apply (rule ccorres_return_Skip) + apply (rule ccorres_move_c_guard_vcpu) + apply (ctac add: dissociateVCPUTCB_vcpu_ccorres) + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply simp + apply (rule archThreadSet_tcbVCPU_Basic_ccorres[of _ "Some vcpuptr", simplified]) + apply ceqv + apply (rule ccorres_move_c_guard_vcpu) + apply clarsimp + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule setObject_vcpuTCB_updated_Basic_ccorres[where tptr="Some tptr" and t="Map.empty", + simplified option_to_ctcb_ptr_def, simplified]) + apply ceqv + apply (rule ccorres_pre_getCurThread, rename_tac curThread) + apply (subst ccorres_seq_skip'[symmetric]) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_when) + apply (clarsimp simp: rf_sr_ksCurThread) + apply (ctac add: vcpu_switch_ccorres_Some) + apply ceqv + apply (rule ccorres_return_Skip) + apply (rule wp_post_taut) + apply (vcg exspec=vcpu_switch_modifies) + apply (wpsimp wp: setObject_vcpu_valid_objs' hoare_drop_imps) + apply vcg + apply wpsimp + apply (vcg exspec=dissociateVCPUTCB_modifies) + apply ((wpsimp wp: hoare_vcg_all_lift hoare_drop_imps + | strengthen invs_valid_objs' invs_arch_state')+)[1] + apply (vcg exspec=dissociateVCPUTCB_modifies) + apply (rule_tac Q="\_. invs' and vcpu_at' vcpuptr and tcb_at' tptr" in hoare_post_imp) + apply (clarsimp simp: typ_at_tcb' obj_at'_def) + apply (rename_tac vcpu obj, case_tac vcpu) + apply (fastforce simp: valid_arch_tcb'_def valid_vcpu'_def objBits_simps) + apply wpsimp + apply (vcg exspec=dissociateVCPUTCB_modifies) + apply (fastforce simp: ctcb_relation_def carch_tcb_relation_def typ_heap_simps + cvcpu_relation_def option_to_ctcb_ptr_def) + done + +lemma vcpuFinalise_ccorres: + "ccorres dc xfdc (invs' and vcpu_at' vcpuptr) + ({s. vcpu_' s = Ptr vcpuptr}) [] + (vcpuFinalise vcpuptr) (Call vcpu_finalise_'proc)" + apply (cinit lift: vcpu_') + apply (rule ccorres_move_c_guard_vcpu) + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (rule_tac Q="ko_at' vcpu vcpuptr and valid_objs'" + and Q'=UNIV + and C'="{s. vcpuTCBPtr vcpu \ None}" + in ccorres_rewrite_cond_sr) + apply clarsimp + apply (frule cmap_relation_vcpu) + apply (erule cmap_relationE1) + apply (erule ko_at_projectKO_opt, rename_tac cvcpu) + apply (frule (1) ko_at_valid_objs', simp) + apply (clarsimp simp: valid_obj'_def valid_vcpu'_def) + apply (clarsimp simp: typ_heap_simps' cvcpu_relation_def option_to_ctcb_ptr_def + dest!: aligned_tcb_ctcb_not_NULL + split: if_split) + apply (wpc; clarsimp; ccorres_rewrite) + apply (rule ccorres_return_Skip) + apply (rule ccorres_move_c_guard_vcpu) + apply (ctac add: dissociateVCPUTCB_vcpu_ccorres) + apply (fastforce simp: ctcb_relation_def carch_tcb_relation_def typ_heap_simps + cvcpu_relation_def option_to_ctcb_ptr_def)+ + done + +method return_NullCap_pair_ccorres = + solves \((rule ccorres_rhs_assoc2)+), (rule ccorres_from_vcg_throws), + (rule allI, rule conseqPre, vcg), (clarsimp simp: return_def ccap_relation_NullCap_iff)\ + +lemma ccap_relation_capFMappedASID_CL_0: + "ccap_relation (ArchObjectCap (FrameCap x0 x1 x2 x3 None)) cap \ + capFMappedASID_CL (cap_frame_cap_lift cap) = 0" + apply (clarsimp simp: ccap_relation_def cap_frame_cap_lift_def) + apply (case_tac "cap_lift cap") + apply (fastforce simp: cap_to_H_def Let_def split: cap_CL.splits if_split_asm)+ + done + +lemma Arch_finaliseCap_ccorres: + notes Collect_const[simp del] if_weak_cong[cong] + shows + "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ + ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) + ret__struct_finaliseCap_ret_C_' + (invs' and valid_cap' (ArchObjectCap cp) + and (\s. 2 ^ acapBits cp \ gsMaxObjectSize s)) + (UNIV \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} + \ {s. final_' s = from_bool is_final}) [] + (Arch.finaliseCap cp is_final) (Call Arch_finaliseCap_'proc)" + (is "ccorres _ _ ?abstract_pre ?c_pre _ _ _") + supply if_cong[cong] option.case_cong[cong] + apply (cinit lift: cap_' final_' cong: call_ignore_cong) + apply csymbr + apply (simp add: AARCH64_H.finaliseCap_def cap_get_tag_isCap_ArchObject) + apply (rule ccorres_cases[where P=is_final]; clarsimp cong: arch_capability.case_cong) + prefer 2 + apply (subgoal_tac "isFrameCap cp \ \ isPageTableCap cp \ \ isASIDPoolCap cp \ \ isASIDControlCap cp") + apply (rule ccorres_cases[where P="isFrameCap cp"]; clarsimp) + prefer 2 + apply (rule ccorres_inst[where P="?abstract_pre" and P'=UNIV]) + apply (cases cp; clarsimp simp: isCap_simps; ccorres_rewrite) + apply return_NullCap_pair_ccorres (* ASIDControlCap *) + apply return_NullCap_pair_ccorres (* ASIDPoolCap *) + \ \PageTableCap\ + apply (rule ccorres_guard_imp) + apply (wpc; + (* proof is the same for both cases, but C code is duplicated *) + (clarsimp, + ccorres_rewrite, + (rule ccorres_rhs_assoc)+, + csymbr, + ccorres_rewrite, + rule ccorres_cond_false_seq, + ccorres_rewrite, + rule ccorres_inst[where P=\ and P'=UNIV], + (return_NullCap_pair_ccorres; simp))) + apply simp + apply simp + \ \VCPUCap\ + apply return_NullCap_pair_ccorres + \ \FrameCap\ + apply (clarsimp simp: isCap_simps) + apply (rule ccorres_guard_imp[where A="?abstract_pre" and A'=UNIV]) + apply ccorres_rewrite + apply (rule ccorres_add_return2) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply wpc + apply (prop_tac "ret__unsigned_longlong = 0") + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (drule cap_to_H_Frame_unfold, clarsimp) + apply (simp add: cap_frame_cap_lift_def split: if_split_asm) + apply ccorres_rewrite + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply simp + apply (rule ccorres_return_C; simp) + apply (prop_tac "ret__unsigned_longlong \ 0") + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (drule cap_to_H_Frame_unfold, clarsimp) + apply (simp add: cap_frame_cap_lift_def split: if_split_asm) + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply wpc + apply simp + apply (ctac (no_vcg) add: unmapPage_ccorres) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_return_C; simp) + apply wp + apply (clarsimp simp: valid_cap'_def wellformed_mapdata'_def) + apply (clarsimp simp: ccap_relation_NullCap_iff ccap_relation_frame_tags) + apply (clarsimp simp: ccap_relation_def c_valid_cap_def map_option_Some_eq2 + cl_valid_cap_def + dest!: cap_to_H_Frame_unfold) + apply (simp add: cap_frame_cap_lift_def split: if_splits) + apply (clarsimp simp: isCap_simps) + apply (wpc; simp add: isCap_simps; ccorres_rewrite) + \ \ASIDControlCap\ + apply (rule ccorres_inst[where P="?abstract_pre" and P'=UNIV]) + apply return_NullCap_pair_ccorres + \ \ASIDPoolCap\ + apply (rule ccorres_inst[where P="?abstract_pre" and P'=UNIV]) + apply (rule ccorres_guard_imp) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: deleteASIDPool_ccorres) + apply (csymbr, csymbr, csymbr, csymbr) + apply (rule ccorres_return_C; simp) + apply wp + apply (clarsimp simp: valid_cap'_def) + apply (clarsimp simp: ccap_relation_NullCap_iff cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (clarsimp simp: cap_to_H_def Let_def cap_asid_pool_cap_lift_def + split: cap_CL.splits if_splits) + \ \FrameCap\ + apply (rule ccorres_inst[where P="?abstract_pre" and P'=UNIV]) + apply (rule ccorres_guard_imp) + apply (rule ccorres_add_return2) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply wpc + apply (prop_tac "ret__unsigned_longlong = 0") + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (drule cap_to_H_Frame_unfold, clarsimp) + apply (simp add: cap_frame_cap_lift_def split: if_split_asm) + apply ccorres_rewrite + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply simp + apply (rule ccorres_return_C; simp) + apply (prop_tac "ret__unsigned_longlong \ 0") + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (drule cap_to_H_Frame_unfold, clarsimp) + apply (simp add: cap_frame_cap_lift_def split: if_split_asm) + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply wpc + apply simp + apply (ctac (no_vcg) add: unmapPage_ccorres) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_return_C; simp) + apply wp + apply (clarsimp simp: valid_cap'_def wellformed_mapdata'_def) + apply (clarsimp simp: ccap_relation_NullCap_iff cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 cap_frame_cap_lift_def + c_valid_cap_def cl_valid_cap_def + dest!: cap_to_H_Frame_unfold + split: if_splits) + \ \PageTableCap\ + apply (rule ccorres_inst[where P="?abstract_pre" and P'=UNIV]) + apply (rule ccorres_guard_imp) + apply wpc (* NormalPT_T / VSRootPT_T *) + apply (in_case VSRootPT_T) + apply clarsimp + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply wpc (* mapped None/Some *) + apply (in_case None) + apply (rule ccorres_cond_false_seq, ccorres_rewrite) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (return_NullCap_pair_ccorres; simp) + apply (in_case "Some ?m") + apply clarsimp + apply (rename_tac asid vref) + apply (rule ccorres_cond_true_seq, ccorres_rewrite) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'="ret__unsigned_longlong_'" and + F="\asid'. asid' = asid" and + R=\ and + R'=UNIV + in ccorres_symb_exec_r_abstract_UNIV) + apply vcg + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 dest!: cap_to_H_VSCap) + apply (clarsimp simp: cap_vspace_cap_lift_def cap_vspace_cap_lift) + apply ceqv + apply csymbr + apply wpfix + apply (ctac (no_vcg) add: deleteASID_ccorres) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (return_NullCap_pair_ccorres; simp) + apply wp + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def map_option_Some_eq2 + cap_vspace_cap_lift_def cap_vspace_cap_lift + dest!: cap_to_H_VSCap) + apply (in_case NormalPT_T) + apply clarsimp + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply wpc (* mapped None/Some *) + apply (in_case None) + apply (rule ccorres_cond_false_seq, ccorres_rewrite) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (return_NullCap_pair_ccorres; simp) + apply (in_case "Some ?m") + apply clarsimp + apply (rename_tac asid vref) + apply (rule ccorres_cond_true_seq, ccorres_rewrite) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'="ret__unsigned_longlong_'" and + F="\asid'. asid' = asid" and + R=\ and + R'=UNIV + in ccorres_symb_exec_r_abstract_UNIV) + apply vcg + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 dest!: cap_to_H_PTCap) + apply (clarsimp simp: cap_page_table_cap_lift_def cap_page_table_cap_lift) + apply ceqv + apply csymbr + apply csymbr + apply wpfix + apply (ctac (no_vcg) add: unmapPageTable_ccorres) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (return_NullCap_pair_ccorres; simp) + apply wp + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def map_option_Some_eq2 + cap_page_table_cap_lift_def cap_page_table_cap_lift + dest!: cap_to_H_PTCap) + apply (fastforce simp: valid_cap'_def wellformed_mapdata'_def) + apply (fastforce simp: ccap_relation_def map_option_Some_eq2 to_bool_def + cap_vspace_cap_lift_def cap_vspace_cap_lift + cap_page_table_cap_lift_def cap_page_table_cap_lift + dest!: cap_to_H_VSCap cap_to_H_PTCap) + \ \VCPUCap\ + apply (rule ccorres_inst[where P="?abstract_pre" and P'=UNIV]) + apply (rule ccorres_guard_imp) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (ctac (no_vcg) add: vcpuFinalise_ccorres) + apply (csymbr, csymbr, csymbr, csymbr) + apply (rule ccorres_return_C; simp) + apply wpsimp + apply (clarsimp simp: valid_cap'_def) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (frule cap_lift_vcpu_cap) + apply (clarsimp simp: cap_vcpu_cap_lift cap_to_H_def + case_option_over_if + elim!: ccap_relationE) + apply (clarsimp simp: ccap_relation_NullCap_iff cap_get_tag_isCap_unfolded_H_cap) + apply fastforce + done + +lemma prepareThreadDelete_ccorres: + "ccorres dc xfdc + (invs' and tcb_at' thread) + (\\thread = tcb_ptr_to_ctcb_ptr thread\) hs + (prepareThreadDelete thread) (Call Arch_prepareThreadDelete_'proc)" + apply (cinit lift: thread_', rename_tac cthread) + apply (ctac add: fpuThreadDelete_ccorres) + + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_pre_archThreadGet, rename_tac vcpuopt) + apply simp + apply (rule_tac Q="valid_objs' and + no_0_obj' and + obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = vcpuopt) thread" + and Q'=UNIV and C'="{s. vcpuopt \ None}" in ccorres_rewrite_cond_sr) + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (fastforce simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def + option_to_ptr_NULL_eq + dest: ko_at'_tcb_vcpu_not_NULL + split: option.splits) + apply (wpc; clarsimp; ccorres_rewrite) + apply (rule ccorres_return_Skip) + apply (rule ccorres_move_c_guard_tcb) + apply (ctac add: dissociateVCPUTCB_tcb_ccorres) + apply (clarsimp simp: invs_valid_pspace') + apply (solves \(wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_all_lift | strengthen invs_valid_objs')+\) + apply (vcg exspec=fpuThreadDelete_modifies) + apply clarsimp + apply (rule conjI) + (* haskell *) + apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def obj_at'_def) + apply (rule cguard_UNIV, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def) + done + +lemma finaliseCap_ccorres: + "\final. + ccorres (\rv rv'. ccap_relation (fst rv) (finaliseCap_ret_C.remainder_C rv') + \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) + ret__struct_finaliseCap_ret_C_' + (invs' and sch_act_simple and valid_cap' cap and (\s. ksIdleThread s \ capRange cap) + and (\s. 2 ^ capBits cap \ gsMaxObjectSize s)) + (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. final_' s = from_bool final} + \ {s. exposed_' s = from_bool flag}) [] + (finaliseCap cap final flag) (Call finaliseCap_'proc)" + apply (rule_tac F="capAligned cap" in Corres_UL_C.ccorres_req) + apply (clarsimp simp: valid_capAligned) + apply (case_tac "P :: bool" for P) + apply (rule ccorres_guard_imp2, erule finaliseCap_True_cases_ccorres) + apply simp + apply (subgoal_tac "\acap. (0 <=s (-1 :: word8)) \ acap = capCap cap") + prefer 2 apply simp + apply (erule exE) + apply (cinit lift: cap_' final_' exposed_' cong: call_ignore_cong) + apply csymbr + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps + cong: if_cong) + apply (clarsimp simp: word_sle_def) + apply (rule ccorres_if_lhs) + apply (rule ccorres_fail) + apply (simp add: liftM_def del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_add_return2) + apply (ccorres_rewrite) + apply (ctac add: Arch_finaliseCap_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def Collect_const_mem) + apply wp + apply (vcg exspec=Arch_finaliseCap_modifies) + apply (simp add: cap_get_tag_isCap Collect_False + del: Collect_const) + apply csymbr + apply (simp add: cap_get_tag_isCap Collect_False Collect_True + del: Collect_const) + apply (rule ccorres_if_lhs) + apply (simp, rule ccorres_fail) + apply (simp add: from_bool_0 Collect_True Collect_False + del: Collect_const) + apply csymbr + apply (simp add: cap_get_tag_isCap Collect_False Collect_True + del: Collect_const) + apply (rule ccorres_if_lhs) + apply (simp add: Let_def) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cap_get_tag_isCap word_sle_def + return_def word_mod_less_divisor + less_imp_neq [OF word_mod_less_divisor]) + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) + apply (clarsimp simp: isCap_simps capAligned_def + objBits_simps word_bits_conv + signed_shift_guard_simpler_64) + apply (rule conjI) + apply (simp add: word_less_nat_alt) + apply (rule conjI) + apply (auto simp: word_less_nat_alt word_le_not_less[symmetric] bit_simps objBits_defs)[1] + apply (simp add: ccap_relation_def cap_zombie_cap_lift) + apply (simp add: cap_to_H_def isZombieTCB_C_def ZombieTCB_C_def) + apply (simp add: less_mask_eq word_less_nat_alt less_imp_neq) + apply (simp add: mod_mask_drop[where n=6] mask_def[where n=6] + less_imp_neq [OF word_mod_less_divisor] + less_imp_neq Let_def objBits_simps') + apply (thin_tac "a = b" for a b)+ + apply (subgoal_tac "P" for P) + apply (subst add.commute, subst unatSuc, assumption)+ + apply clarsimp + apply (rule conjI) + apply (rule word_eqI) + apply (simp add: word_size word_ops_nth_size nth_w2p + less_Suc_eq_le is_aligned_nth) + apply (safe, simp_all)[1] + apply (simp add: shiftL_nat ccap_relation_NullCap_iff[symmetric, simplified ccap_relation_def]) + apply (rule trans, rule unat_power_lower64[symmetric]) + apply (simp add: word_bits_conv) + apply (rule unat_cong, rule word_eqI) + apply (simp add: word_size word_ops_nth_size nth_w2p + is_aligned_nth less_Suc_eq_le) + apply (safe, simp_all)[1] + apply (subst add.commute, subst eq_diff_eq[symmetric]) + apply (clarsimp simp: minus_one_norm) + apply (rule ccorres_if_lhs) + apply (simp add: Let_def getThreadCSpaceRoot_def locateSlot_conv + Collect_True Collect_False + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_Guard_Seq)+ + apply csymbr + apply (ctac(no_vcg) add: unbindNotification_ccorres) + apply (ctac(no_vcg) add: suspend_ccorres[OF cteDeleteOne_ccorres]) + apply (ctac(no_vcg) add: prepareThreadDelete_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: word_sle_def return_def) + apply (subgoal_tac "cap_get_tag capa = scast cap_thread_cap") + apply (drule(1) cap_get_tag_to_H) + apply (clarsimp simp: isCap_simps capAligned_def ccap_relation_NullCap_iff) + apply (simp add: ccap_relation_def cap_zombie_cap_lift) + apply (simp add: cap_to_H_def isZombieTCB_C_def ZombieTCB_C_def + mask_def) + apply (simp add: cte_level_bits_def tcbCTableSlot_def + Kernel_C.tcbCTable_def tcbCNodeEntries_def + bit.conj_disj_distrib2 + word_bw_assocs) + apply (simp add: objBits_simps ctcb_ptr_to_tcb_ptr_def) + apply (frule is_aligned_add_helper[where p="tcbptr - ctcb_offset" and d=ctcb_offset for tcbptr]) + apply (simp add: ctcb_offset_defs objBits_defs) + apply (simp add: mask_def objBits_defs) + apply (simp add: cap_get_tag_isCap) + apply wp+ + apply (rule ccorres_if_lhs) + apply (simp add: Let_def) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def ccap_relation_NullCap_iff) + apply (simp add: isArchCap_T_isArchObjectCap[symmetric] + del: Collect_const) + apply (rule ccorres_if_lhs) + apply (simp add: Collect_False Collect_True Let_def + del: Collect_const) + apply (rule_tac P="(capIRQ cap) \ AARCH64.maxIRQ" in ccorres_gen_asm) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (rule_tac xf'=irq_' in ccorres_abstract,ceqv) + apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) + apply (ctac(no_vcg) add: deletingIRQHandler_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ ]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) + apply (simp add: ccap_relation_NullCap_iff split: if_split) + apply wp + apply (rule ccorres_if_lhs) + apply simp + apply (rule ccorres_fail) + apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) + apply (rule ccorres_Cond_rhs) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (rule ccorres_Cond_rhs) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply simp + apply (rule ccorres_Cond_rhs) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (rule ceqv_refl) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def ccap_relation_NullCap_iff + irq_opt_relation_def) + apply wp + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem) + apply (intro impI conjI) + apply (clarsimp split: bool.splits) + apply (clarsimp split: bool.splits) + apply (clarsimp simp: valid_cap'_def isCap_simps) + apply (clarsimp simp: isCap_simps capRange_def capAligned_def) + apply (clarsimp simp: isCap_simps valid_cap'_def) + apply (clarsimp simp: isCap_simps capRange_def capAligned_def) + apply (clarsimp simp: isCap_simps valid_cap'_def ) + apply clarsimp + apply (clarsimp simp: isCap_simps valid_cap'_def ) + apply (clarsimp simp: tcb_ptr_to_ctcb_ptr_def ccap_relation_def isCap_simps + c_valid_cap_def cap_thread_cap_lift_def cap_to_H_def + ctcb_ptr_to_tcb_ptr_def Let_def + split: option.splits cap_CL.splits if_splits) + apply clarsimp + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: tcb_cnode_index_defs ptr_add_assertion_def) + apply clarsimp + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) + apply (frule(1) ccap_relation_IRQHandler_mask) + apply (clarsimp simp: isCap_simps irqInvalid_def valid_cap'_def) + apply (rule irq_opt_relation_Some_ucast) + apply fastforce + apply (simp add: irqInvalid_def and_mask_eq_iff_le_mask) + apply (fastforce simp: mask_def) (* faster when not combined with prev simp *) + apply (simp add: Kernel_C.maxIRQ_def maxIRQ_def) + apply fastforce + apply clarsimp + apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) + apply (frule(1) ccap_relation_IRQHandler_mask) + apply (clarsimp simp add:mask_eq_ucast_eq) + done + +lemma checkIRQ_ret_good: + "\\s. (irq \ scast Kernel_C.maxIRQ \ P s) \ Q s\ checkIRQ irq \\rv. P\, \\rv. Q\" + apply (clarsimp simp: checkIRQ_def rangeCheck_def maxIRQ_def minIRQ_def) + apply (rule hoare_pre,wp) + by (clarsimp simp: Kernel_C.maxIRQ_def split: if_split) + +lemma Arch_checkIRQ_ccorres: + "ccorres (syscall_error_rel \ (\r r'. irq \ scast Kernel_C.maxIRQ)) + (liftxf errstate id undefined ret__unsigned_long_') + \ (UNIV \ \irq = \irq_w___unsigned_long\) [] + (checkIRQ irq) (Call Arch_checkIRQ_'proc)" + apply (cinit lift: irq_w___unsigned_long_' ) + apply (simp add: rangeCheck_def unlessE_def AARCH64.minIRQ_def checkIRQ_def + ucast_nat_def word_le_nat_alt[symmetric] + linorder_not_le[symmetric] maxIRQ_def + length_ineq_not_Nil hd_conv_nth cast_simps + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def Kernel_C.maxIRQ_def + exception_defs syscall_error_rel_def + syscall_error_to_H_cases) + apply (clarsimp simp: Kernel_C.maxIRQ_def) + apply (rule ccorres_return_CE, simp+) + done + +end + +end diff --git a/proof/crefine/AARCH64/Init_C.thy b/proof/crefine/AARCH64/Init_C.thy new file mode 100644 index 0000000000..ef6cd4527f --- /dev/null +++ b/proof/crefine/AARCH64/Init_C.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +chapter "Toplevel Refinement Statement" + +theory Init_C +imports ADT_C +begin + +text \ + Currently, we assume correctness of the init code. +\ + +axiomatization where init_refinement_C: + "Init_C s \ lift_state_relation rf_sr `` Init_H" + +end diff --git a/proof/crefine/AARCH64/Interrupt_C.thy b/proof/crefine/AARCH64/Interrupt_C.thy new file mode 100644 index 0000000000..423709498a --- /dev/null +++ b/proof/crefine/AARCH64/Interrupt_C.thy @@ -0,0 +1,797 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Interrupt_C +imports CSpace_All Finalise_C +begin + +context kernel_m begin + +lemma invokeIRQHandler_AckIRQ_ccorres: + "ccorres dc xfdc + invs' (UNIV \ {s. irq_' s = ucast irq}) [] + (InterruptDecls_H.invokeIRQHandler (AckIRQ irq)) (Call invokeIRQHandler_AckIRQ_'proc)" + apply (cinit lift: irq_' simp: Interrupt_H.invokeIRQHandler_def invokeIRQHandler_def) + apply (ctac add: maskInterrupt_ccorres) + apply simp + done + +lemma getIRQSlot_ccorres: + "ccorres ((=) \ Ptr) irqSlot_' + \ UNIV hs + (getIRQSlot irq) + (\irqSlot :== CTypesDefs.ptr_add intStateIRQNode_Ptr (uint irq))" + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getIRQSlot_def liftM_def getInterruptState_def + locateSlot_conv) + apply (simp add: simpler_gets_def bind_def return_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cinterrupt_relation_def size_of_def + sint_ucast_eq_uint is_down of_int_uint_ucast + cte_level_bits_def mult.commute mult.left_commute ucast_nat_def + ) + done + +lemma ptr_add_assertion_irq_guard: +"ccorres dc xfdc P Q hs a + (Guard F + \uint irq = 0 \ array_assertion intStateIRQNode_Ptr (nat (uint irq)) (hrs_htd \t_hrs)\ + c;;m) + \ ccorres dc xfdc P Q hs a + (Guard F + \ptr_add_assertion intStateIRQNode_Ptr + (sint (ucast (irq :: 8 word)::32 signed word)) False + (hrs_htd \t_hrs)\ c ;; m)" + by (simp add: ptr_add_assertion_def sint_ucast_eq_uint is_down) + +lemma cte_at_irq_node': + "invs' s \ + cte_at' (irq_node' s + 2 ^ cte_level_bits * ucast (irq :: irq_len word)) s" + by (clarsimp simp: invs'_def valid_state'_def valid_irq_node'_def + cte_level_bits_def real_cte_at' cteSizeBits_def shiftl_t2n) + +lemma invokeIRQHandler_SetIRQHandler_ccorres: + "ccorres dc xfdc + (invs' and sch_act_simple + and irq_handler_inv_valid' (SetIRQHandler irq cp slot)) + (UNIV \ {s. irq_' s = ucast irq} \ {s. slot_' s = Ptr slot} + \ {s. ccap_relation cp (cap_' s)}) [] + (InterruptDecls_H.invokeIRQHandler (SetIRQHandler irq cp slot)) + (Call invokeIRQHandler_SetIRQHandler_'proc)" +proof - + have valid_objs_invs'_strg: "\s. invs' s \ valid_objs' s" + by (clarsimp) + show ?thesis + apply (cinit lift: irq_' slot_' cap_' simp: Interrupt_H.invokeIRQHandler_def) + apply (rule ccorres_Guard_intStateIRQNode_array_Ptr) + apply (rule ccorres_move_array_assertion_irq) + apply (simp) + apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) + apply (rule ccorres_symb_exec_r) + apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) + apply (rule ccorres_call) + apply (rule cteInsert_ccorres) + apply simp + apply simp + apply simp + apply (simp add: pred_conj_def) + apply (strengthen invs_mdb_strengthen' valid_objs_invs'_strg + invs_pspace_canonical' invs_pspace_aligned') + apply (wp cteDeleteOne_other_cap[unfolded o_def])[1] + apply vcg + apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def + gs_set_assn_Delete_cstate_relation[unfolded o_def]) + apply (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + apply wp + apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def + ghost_assertion_data_set_def) + apply (clarsimp simp: cte_at_irq_node' ucast_nat_def) + apply (clarsimp simp: cte_wp_at_ctes_of badge_derived'_def + Collect_const_mem unat_gt_0 valid_cap_simps' AARCH64.maxIRQ_def) + apply (drule word_le_nat_alt[THEN iffD1]) + apply clarsimp + apply (drule valid_globals_ex_cte_cap_irq[where irq=irq]) + apply auto + done +qed + +(* 0x1FF is nearest mask above maxIRQ *) +lemma invokeIRQHandler_ClearIRQHandler_ccorres: + "ccorres dc xfdc + (invs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and K(irq \ 0x1FF)) + (UNIV \ {s. irq_' s = ucast irq}) [] + (InterruptDecls_H.invokeIRQHandler (ClearIRQHandler irq)) + (Call invokeIRQHandler_ClearIRQHandler_'proc)" + apply (cinit lift: irq_' simp: Interrupt_H.invokeIRQHandler_def) + apply (rule ccorres_Guard_intStateIRQNode_array_Ptr) + apply (rule ccorres_move_array_assertion_irq) + apply (simp add: ucast_up_ucast is_up) + apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) + apply (rule ccorres_symb_exec_r) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) + apply vcg + apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def + gs_set_assn_Delete_cstate_relation[unfolded o_def]) + apply (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + apply wp + apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def + ghost_assertion_data_set_def) + apply (clarsimp simp: cte_at_irq_node' ucast_nat_def) + apply (drule word_le_nat_alt[THEN iffD1]) + apply (auto simp add:Word.uint_up_ucast) + apply (case_tac "of_int (uint irq) \ 0 \ 0 < unat irq") + by (auto simp: Collect_const_mem unat_eq_0) + + +lemma ntfn_case_can_send: + "(case cap of NotificationCap x1 x2 x3 x4 \ f x3 + | _ \ v) = (if isNotificationCap cap then f (capNtfnCanSend cap) + else v)" + by (cases cap, simp_all add: isCap_simps) + +lemma list_length_geq_helper[simp]: + "\\ length args < 2\ + \ \y ys. args = y # ys" + by (frule length_ineq_not_Nil(3), simp, metis list.exhaust) + +lemma decodeIRQHandlerInvocation_ccorres: + notes if_cong[cong] gen_invocation_type_eq[simp] + shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps o ctes_of) + and (\s. \slot. cte_wp_at' (\cte. cteCap cte = IRQHandlerCap irq) slot s) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s)) + (UNIV + \ {s. invLabel_' s = label} + \ {s. irq_' s = ucast irq} + \ {s. current_extra_caps_' (globals s) = extraCaps'}) [] + (decodeIRQHandlerInvocation label irq extraCaps + >>= invocationCatch thread isBlocking isCall InvokeIRQHandler) + (Call decodeIRQHandlerInvocation_'proc)" + apply (cinit' lift: invLabel_' irq_' current_extra_caps_' + simp: decodeIRQHandlerInvocation_def invocation_eq_use_types) +supply [[goals_limit=20]] + apply (rule ccorres_Cond_rhs) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: performInvocation_def bindE_assoc, simp add: liftE_bindE) + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (ctac(no_vcg) add: invokeIRQHandler_AckIRQ_ccorres) + apply (simp add: liftE_alternative returnOk_liftE[symmetric]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (wp sts_invs_minor')+ + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (simp add: list_case_If2 split_def del: Collect_const) + apply (rule ccorres_if_bind) + apply (rule ccorres_if_lhs[rotated]) + apply (rule ccorres_cond_false_seq) + apply (simp add: Let_def split_def ntfn_case_can_send + del: Collect_const) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply (ctac(no_vcg)) + apply (rule ccorres_assert) + apply (rule_tac P="\s. ksCurThread s = thread" + in ccorres_cross_over_guard) + apply (csymbr | rule ccorres_Guard_Seq)+ + apply (simp add: cap_get_tag_isCap del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: hd_conv_nth del: Collect_const) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (simp add: throwError_def return_def) + apply (simp add: syscall_error_rel_def syscall_error_to_H_cases) + apply (simp add: exception_defs) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (subgoal_tac "(capNtfnCanSend_CL (cap_notification_cap_lift ntfnCap) = 0) + = (\ capNtfnCanSend rv)") + apply (simp add: from_bool_0 hd_conv_nth del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (simp add: throwError_def return_def) + apply (simp add: syscall_error_rel_def syscall_error_to_H_cases) + apply (simp add: exception_defs) + apply vcg + apply (simp add: hd_conv_nth liftE_bindE returnOk_bind + invocationCatch_def performInvocation_def + bind_assoc bind_bindE_assoc excaps_map_def + del: Collect_const) + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (ctac(no_vcg) add: invokeIRQHandler_SetIRQHandler_ccorres) + apply (simp add: liftE_alternative returnOk_liftE[symmetric]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (wp sts_invs_minor' hoare_vcg_ex_lift + | simp)+ + apply (clarsimp simp: cap_get_tag_isCap[symmetric] + dest!: cap_get_tag_to_H) + apply (simp add: to_bool_def) + apply simp + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: Collect_const_mem neq_Nil_conv + dest!: interpret_excaps_eq) + apply (simp add: rf_sr_ksCurThread mask_def[where n=4] + ThreadState_defs cap_get_tag_isCap excaps_map_def + word_sless_def word_sle_def) + apply (simp add: invocationCatch_def throwError_bind + interpret_excaps_test_null Collect_True + excaps_map_def + del: Collect_const + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_Cond_rhs) + apply (simp add: invocationCatch_def performInvocation_def + returnOk_bind liftE_bindE bind_assoc bind_bindE_assoc) + apply (rule ccorres_rhs_assoc)+ + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (ctac(no_vcg) add: invokeIRQHandler_ClearIRQHandler_ccorres) + apply (simp add: liftE_alternative returnOk_liftE[symmetric]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (wp sts_invs_minor')+ + apply (rule ccorres_equals_throwError) + apply (fastforce simp: invocationCatch_def throwError_bind + split: gen_invocation_labels.split) + apply (simp add: ccorres_cond_iffs cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply simp + apply (clarsimp simp: Collect_const_mem tcb_at_invs') + apply (clarsimp simp: invs_valid_objs' + ct_in_state'_def + ccap_rights_relation_def + mask_def[where n=4] ThreadState_defs) + apply (subst pred_tcb'_weakenE, assumption, fastforce)+ + apply (clarsimp simp: rf_sr_ksCurThread word_sle_def word_sless_def + sysargs_rel_n_def word_less_nat_alt) + apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def + excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth + slotcap_in_mem_def valid_tcb_state'_def + dest!: interpret_excaps_eq split: bool.splits) + apply (intro conjI impI allI) + apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def + excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth + slotcap_in_mem_def valid_tcb_state'_def + dest!: interpret_excaps_eq split: bool.splits)+ + apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[6] + apply (drule ctes_of_valid') + apply fastforce + apply (clarsimp simp add:valid_cap_simps' AARCH64.maxIRQ_def) + apply (erule order.trans,simp) + apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid') + done + +declare mask_of_mask[simp] + +(* FIXME AARCH64 unused? +lemma ucast_maxIRQ_le_eq: + "UCAST(irq_len \ 64) irq \ SCAST(32 signed \ 64) Kernel_C.maxIRQ \ + irq \ SCAST(32 signed \ irq_len) Kernel_C.maxIRQ" + apply (subst ucast_le_ucast_6_64[symmetric]) + by (clarsimp simp: ucast_up_ucast is_up Kernel_C.maxIRQ_def) *) + +(* FIXME AARCH64 unused? *) +lemma ucast_maxIRQ_le_eq': + "UCAST(irq_len \ 64) irq \ SCAST(32 signed \ 64) Kernel_C.maxIRQ \ irq \ maxIRQ" + apply (clarsimp simp: Kernel_C.maxIRQ_def maxIRQ_def) + by word_bitwise + +lemma invokeIRQControl_expanded_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and cte_at' parent + and (\_. (ucast irq) \ (ucast Kernel_C.maxIRQ :: machine_word))) + (UNIV \ {s. irq_' s = (ucast irq :: machine_word)} + \ {s. handlerSlot_' s = cte_Ptr slot} + \ {s. controlSlot_' s = cte_Ptr parent}) + hs + (do y <- setIRQState irqstate.IRQSignal irq; + liftE (cteInsert (capability.IRQHandlerCap irq) parent slot) + od) + (Call invokeIRQControl_'proc)" + apply (cinit' lift: irq_' handlerSlot_' controlSlot_') + apply (ctac add: setIRQState_ccorres) + apply csymbr + apply (rule ccorres_add_returnOk) + apply (simp only: liftE_bindE) + apply (ctac add: cteInsert_ccorres) + apply (rule ccorres_return_CE) + apply clarsimp+ + apply wp + apply (vcg exspec=cteInsert_modifies) + apply wp + apply (vcg exspec=setIRQState_modifies) + apply (clarsimp simp: is_simple_cap'_def isCap_simps valid_cap_simps' capAligned_def) + apply (rule conjI, fastforce simp: word_bits_def)+ + apply (rule conjI) + apply (clarsimp simp: word_le_nat_alt Kernel_C.maxIRQ_def maxIRQ_def) + apply (clarsimp simp: Collect_const_mem ccap_relation_def cap_irq_handler_cap_lift + cap_to_H_def c_valid_cap_def cl_valid_cap_def + word_bw_assocs mask_twice Kernel_C.maxIRQ_def ucast_ucast_a + is_up ucast_ucast_b is_down) + apply (subst less_mask_eq) + apply (rule le_m1_iff_lt[THEN iffD1,THEN iffD1]) + apply simp + apply (erule order.trans, simp) + apply (simp add: mask_def) + apply word_bitwise + done + +lemma invokeIRQControl_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and cte_at' parent + and (\_. (ucast irq) \ (ucast Kernel_C.maxIRQ :: machine_word))) + (UNIV \ {s. irq_' s = ucast irq} + \ {s. handlerSlot_' s = cte_Ptr slot} + \ {s. controlSlot_' s = cte_Ptr parent}) [] + (performIRQControl (Invocations_H.irqcontrol_invocation.IssueIRQHandler irq slot parent)) + (Call invokeIRQControl_'proc)" + by (clarsimp simp: performIRQControl_def liftE_def bind_assoc + intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def, simplified]) + +lemma isIRQActive_ccorres: + "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' + (\s. irq \ scast Kernel_C.maxIRQ) (UNIV \ {s. irq_' s = ucast irq}) [] + (isIRQActive irq) (Call isIRQActive_'proc)" + apply (cinit lift: irq_') + apply (simp add: getIRQState_def getInterruptState_def) + apply (rule_tac P="irq \ ucast Kernel_C.maxIRQ \ unat irq \ unat maxIRQ" in ccorres_gen_asm) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def word_sless_msb_less maxIRQ_def + word_less_nat_alt) + apply (clarsimp simp: order_le_less_trans unat_less_helper Kernel_C.IRQInactive_def + Kernel_C.maxIRQ_def word_0_sle_from_less[OF order_less_le_trans, OF ucast_less]) + apply (clarsimp simp: rf_sr_def cstate_relation_def Kernel_C.maxIRQ_def + Let_def cinterrupt_relation_def) + apply (drule spec, drule(1) mp) + apply (case_tac "intStateIRQTable (ksInterruptState \) irq") + apply (simp add: irq_state_defs Kernel_C.maxIRQ_def maxIRQ_def word_le_nat_alt)+ + done + +lemma Platform_maxIRQ: + "AARCH64.maxIRQ = scast Kernel_C.maxIRQ" + by (simp add: AARCH64.maxIRQ_def Kernel_C.maxIRQ_def) + +lemma Arch_invokeIRQControl_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and cte_at' parent + and (\_. ucast irq \ (scast Kernel_C.maxIRQ :: machine_word))) + (UNIV \ {s. irq_' s = ucast irq} + \ {s. handlerSlot_' s = cte_Ptr slot} + \ {s. controlSlot_' s = cte_Ptr parent} + \ {s. trigger_' s = from_bool trigger} + ) + hs + (AARCH64_H.performIRQControl (IssueIRQHandler irq slot parent trigger)) + (Call Arch_invokeIRQControl_'proc)" + apply (cinit' lift: irq_' handlerSlot_' controlSlot_' trigger_') + apply (clarsimp simp: AARCH64_H.performIRQControl_def simp flip: liftE_liftE) + apply (rule ccorres_liftE_Seq) + apply (ctac (no_vcg) add: setIRQTrigger_ccorres) + apply (rule ccorres_liftE_Seq) + apply (rule ccorres_add_returnOk) + apply (ctac add: invokeIRQControl_expanded_ccorres) + apply (ctac add: ccorres_return_CE) + apply (ctac add: ccorres_inst[where P=\ and P'=UNIV]) + apply wp + apply (vcg exspec=invokeIRQControl_modifies) + apply wpsimp + apply (clarsimp simp: Kernel_C.maxIRQ_def maxIRQ_def IRQ_def) + done + +lemma ucast_ucast_mask_le_64_32: + "n \ 32 \ UCAST (32 \ 64) (UCAST (64 \ 32) x && mask n) = x && mask n" + by (simp add: ucast_and_mask[symmetric], word_bitwise, clarsimp) + +(* Bundle of definitions for minIRQ, maxIRQ, minUserIRQ, etc *) +lemmas c_irq_const_defs = irq_const_defs + +lemma liftME_invocationCatch: + "liftME f m >>= invocationCatch thread isBlocking isCall f' + = m >>= invocationCatch thread isBlocking isCall (f' \ f)" + apply (simp add: liftME_def bindE_def bind_assoc) + apply (rule bind_cong [OF refl]) + apply (simp add: lift_def throwError_bind invocationCatch_def + returnOk_bind + split: sum.split) + done + +lemma maxIRQ_ucast_scast [simp]: + "ucast (scast Kernel_C.maxIRQ :: irq_len word) = scast Kernel_C.maxIRQ" + by (clarsimp simp: Kernel_C.maxIRQ_def) + +lemma decodeIRQ_arch_helper: "x \ IRQIssueIRQHandler \ + (case x of IRQIssueIRQHandler \ f | _ \ g) = g" + by (clarsimp split: gen_invocation_labels.splits) + +lemma checkIRQ_wpE: + "\ \s. irq \ ucast maxIRQ \ P () s \ checkIRQ irq \P\, \\_. \\" + unfolding checkIRQ_def rangeCheck_def + by (wpsimp simp: maxIRQ_def minIRQ_def irqInvalid_def not_le split: if_split) + +lemma maxIRQ_ucast_toEnum_eq: + "x \ ucast maxIRQ \ toEnum (unat x) = x" for x::machine_word + by (simp add: word_le_nat_alt maxIRQ_def) + +lemma maxIRQ_ucast_toEnum_irq_t: + "x \ ucast maxIRQ \ (toEnum (unat x)::irq) \ scast Kernel_C.maxIRQ" for x::machine_word + by (simp add: word_le_nat_alt maxIRQ_def Kernel_C.maxIRQ_def ucast_nat_def unat_ucast) + +lemma maxIRQ_ucast_toEnum_irq_t2: + "x \ ucast maxIRQ \ UCAST(_ \ machine_word_len) (toEnum (unat x)::irq) \ ucast Kernel_C.maxIRQ" + for x::machine_word + by (simp add: word_le_nat_alt maxIRQ_def Kernel_C.maxIRQ_def ucast_nat_def unat_ucast) + +lemma maxIRQ_ucast_toEnum_irq_t3: + "x \ ucast maxIRQ \ UCAST(_ \ machine_word_len) (toEnum (unat x)::irq) \ scast Kernel_C.maxIRQ" + for x::machine_word + by (simp add: word_le_nat_alt maxIRQ_def Kernel_C.maxIRQ_def ucast_nat_def unat_ucast) + +lemmas maxIRQ_casts = maxIRQ_ucast_toEnum_irq_t maxIRQ_ucast_toEnum_irq_t2 + maxIRQ_ucast_toEnum_eq maxIRQ_ucast_toEnum_irq_t3 + +lemma Arch_decodeIRQControlInvocation_ccorres: + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and sch_act_simple and ct_active' + and (excaps_in_mem extraCaps o ctes_of) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and cte_wp_at' (\cte. cteCap cte = IRQControlCap) srcSlot + and sysargs_rel args buffer) + (UNIV \ {s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. srcSlot_' s = cte_Ptr srcSlot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) + [] + (Arch.decodeIRQControlInvocation label args srcSlot (map fst extraCaps) + >>= invocationCatch thread isBlocking isCall (InvokeIRQControl o ArchIRQControl)) + (Call Arch_decodeIRQControlInvocation_'proc)" + supply maxIRQ_casts[simp] + supply gen_invocation_type_eq[simp] if_cong[cong] Collect_const[simp del] + apply (cinit' lift: invLabel_' length___unsigned_long_' srcSlot_' current_extra_caps_' buffer_' + simp: ArchInterrupt_H.AARCH64_H.decodeIRQControlInvocation_def) + apply (simp add: invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_Cond_rhs) + apply (simp add: list_case_If2 cong: call_ignore_cong) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: word_less_nat_alt throwError_bind invocationCatch_def) + apply (rule ccorres_cond_true_seq) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: interpret_excaps_test_null excaps_map_def + throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: interpret_excaps_test_null excaps_map_def + word_less_nat_alt Let_def + cong: call_ignore_cong) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply csymbr + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply csymbr + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=3 and buffer=buffer]) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (simp add: rangeCheck_def unlessE_def AARCH64.minIRQ_def + ucast_nat_def word_le_nat_alt[symmetric] + linorder_not_le[symmetric] Platform_maxIRQ + length_ineq_not_Nil hd_conv_nth + cong: call_ignore_cong ccorres_prog_only_cong) + apply (simp add: split_def invocationCatch_use_injection_handler injection_handler_bindE + bindE_assoc + cong: call_ignore_cong ccorres_prog_only_cong) + apply (ctac add: ccorres_injection_handler_csum1[OF Arch_checkIRQ_ccorres]) + apply (simp add: injection_liftE liftE_bindE) + apply ccorres_rewrite + apply (ctac add: isIRQActive_ccorres) + apply (simp only: injection_handler_whenE injection_handler_throwError) + apply (rule ccorres_split_when_throwError_cond[where Q=\ and Q'=\]) + apply simp + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def exception_defs + syscall_error_rel_def syscall_error_to_H_def + syscall_error_type_defs) + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupTargetSlot_ccorres, + unfolded lookupTargetSlot_def]) + prefer 2 + apply ccorres_rewrite + apply (ctac add: ccorres_return_C_errorE) + apply ccorres_rewrite + apply csymbr + apply (ctac add: ccorres_injection_handler_csum1[OF ensureEmptySlot_ccorres]) + prefer 2 + apply ccorres_rewrite + apply (ctac add: ccorres_return_C_errorE) + apply ccorres_rewrite + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def + liftE_bindE bindE_assoc injection_handler_returnOk + cong: ccorres_prog_only_cong) + apply (ctac (no_vcg) add: setThreadState_ccorres) + apply (simp add: performIRQControl_def) + apply (ctac add: Arch_invokeIRQControl_ccorres) + apply (clarsimp simp: liftE_alternative) + apply (rule ccorres_alternative2) + apply (ctac add: ccorres_return_CE) + apply (ctac add: ccorres_inst[where P=\ and P'=UNIV]) + apply wp + apply (vcg exspec=invokeIRQControl_modifies) + apply (wp sts_invs_minor') + apply (rule injection_wp, rule refl, wp) + apply simp + apply vcg + apply (rule injection_wp, rule refl) + apply clarsimp + apply (wp hoare_drop_imps) + apply simp + apply (vcg exspec=lookupTargetSlot_modifies) + apply vcg + apply simp + apply (wp isIRQActive_wp) + apply simp + apply (vcg exspec=isIRQActive_modifies) + apply ccorres_rewrite + apply (ctac add: ccorres_return_C_errorE) + apply (rule injection_wp, rule refl) + apply simp + apply (wp checkIRQ_wpE) + apply (rule_tac P'="\ \ksCurThread = tcb_ptr_to_ctcb_ptr thread\" in conseqPre) + apply (prop_tac "\x::machine_word. \ scast Kernel_C.maxIRQ < x + \ x = ucast (toEnum (unat x)::irq)") + apply (clarsimp simp: Kernel_C.maxIRQ_def not_less word_le_nat_alt ucast_nat_def + ucast_ucast_mask) + apply (rule sym) + apply (simp add: and_mask_eq_iff_le_mask) + apply (simp add: mask_def word_le_nat_alt) + apply (clarsimp simp: numeral_2_eq_2 numeral_3_eq_3 exception_defs + ThreadState_defs mask_def) + apply (rule conseqPre, vcg) + apply (fastforce simp: exception_defs split: if_split) + apply (rule subset_refl) + apply simp + apply (wp getSlotCap_wp) + apply vcg + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply clarsimp + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp, wp) + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp, wp) + apply (vcg exspec=getSyscallArg_modifies) + apply ccorres_rewrite + apply (auto split: invocation_label.split arch_invocation_label.split + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] + simp: throwError_def invocationCatch_def syscall_error_to_H_cases invocation_eq_use_types)[1] + apply clarsimp + apply (clarsimp simp: interpret_excaps_test_null excaps_map_def + Collect_const_mem word_sless_def word_sle_def + sysargs_rel_to_n + cong: if_cong) + apply (rule conjI) + apply (cut_tac unat_lt2p[where x="args ! 3"]) + apply clarsimp + apply (clarsimp simp: word_less_nat_alt unat_ucast) + apply (auto simp: ct_in_state'_def neq_Nil_conv word_bits_def + excaps_in_mem_def slotcap_in_mem_def + cte_wp_at_ctes_of numeral_eqs[symmetric] + valid_tcb_state'_def + elim!: pred_tcb'_weakenE cte_wp_at_weakenE' + dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] + apply (clarsimp simp: neq_Nil_conv) + apply (drule interpret_excaps_eq[rule_format, where n=0], simp) + apply (clarsimp simp: rf_sr_ksCurThread) + done + +lemma decodeIRQControlInvocation_ccorres: + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and sch_act_simple and ct_active' + and (excaps_in_mem extraCaps o ctes_of) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and cte_wp_at' (\cte. cteCap cte = IRQControlCap) slot + and sysargs_rel args buffer) + (UNIV + \ {s. invLabel_' s = label} \ {s. srcSlot_' s = cte_Ptr slot} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeIRQControlInvocation label args slot (map fst extraCaps) + >>= invocationCatch thread isBlocking isCall InvokeIRQControl) + (Call decodeIRQControlInvocation_'proc)" + supply gen_invocation_type_eq[simp] if_cong[cong] Collect_const[simp del] + supply maxIRQ_ucast_toEnum_eq[simp] maxIRQ_ucast_toEnum_irq_t[simp] + supply maxIRQ_ucast_toEnum_irq_t2[simp] + apply (cinit' lift: invLabel_' srcSlot_' length___unsigned_long_' current_extra_caps_' buffer_') + apply (simp add: decodeIRQControlInvocation_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_Cond_rhs) + apply (simp add: list_case_If2 cong: call_ignore_cong) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: word_less_nat_alt throwError_bind invocationCatch_def) + apply (rule ccorres_cond_true_seq) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: interpret_excaps_test_null excaps_map_def + throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: interpret_excaps_test_null excaps_map_def + word_less_nat_alt Let_def + cong: call_ignore_cong) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply csymbr + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (simp add: rangeCheck_def unlessE_def AARCH64.minIRQ_def + ucast_nat_def word_le_nat_alt[symmetric] + linorder_not_le[symmetric] Platform_maxIRQ + length_ineq_not_Nil hd_conv_nth + cong: call_ignore_cong ccorres_prog_only_cong) + apply (simp add: split_def invocationCatch_use_injection_handler injection_handler_bindE + bindE_assoc + cong: call_ignore_cong ccorres_prog_only_cong) + apply (ctac add: ccorres_injection_handler_csum1[OF Arch_checkIRQ_ccorres]) + apply (simp add: injection_liftE liftE_bindE) + apply ccorres_rewrite + apply (ctac add: isIRQActive_ccorres) + apply (simp only: injection_handler_whenE injection_handler_throwError) + apply (rule ccorres_split_when_throwError_cond[where Q=\ and Q'=\]) + apply simp + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def exception_defs + syscall_error_rel_def syscall_error_to_H_def + syscall_error_type_defs) + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupTargetSlot_ccorres, + unfolded lookupTargetSlot_def]) + prefer 2 + apply ccorres_rewrite + apply (ctac add: ccorres_return_C_errorE) + apply ccorres_rewrite + apply csymbr + apply (ctac add: ccorres_injection_handler_csum1[OF ensureEmptySlot_ccorres]) + prefer 2 + apply ccorres_rewrite + apply (ctac add: ccorres_return_C_errorE) + apply ccorres_rewrite + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def + liftE_bindE bindE_assoc injection_handler_returnOk + cong: ccorres_prog_only_cong) + apply (ctac (no_vcg) add: setThreadState_ccorres) + apply (ctac add: invokeIRQControl_ccorres) + apply (clarsimp simp: liftE_alternative) + apply (rule ccorres_alternative2) + apply (ctac add: ccorres_return_CE) + apply (ctac add: ccorres_inst[where P=\ and P'=UNIV]) + apply wp + apply (vcg exspec=invokeIRQControl_modifies) + apply (wp sts_invs_minor') + apply (rule injection_wp, rule refl, wp) + apply simp + apply vcg + apply (rule injection_wp, rule refl) + apply clarsimp + apply (wp hoare_drop_imps) + apply simp + apply (vcg exspec=lookupTargetSlot_modifies) + apply vcg + apply simp + apply (wp isIRQActive_wp) + apply simp + apply (vcg exspec=isIRQActive_modifies) + apply ccorres_rewrite + apply (ctac add: ccorres_return_C_errorE) + apply (rule injection_wp, rule refl) + apply simp + apply (wp checkIRQ_wpE) + apply (rule_tac P'="\ \ksCurThread = tcb_ptr_to_ctcb_ptr thread\" in conseqPre) + apply (prop_tac "\x::machine_word. \ scast Kernel_C.maxIRQ < x + \ x = ucast (toEnum (unat x)::irq)") + apply (clarsimp simp: Kernel_C.maxIRQ_def not_less word_le_nat_alt ucast_nat_def + ucast_ucast_mask) + apply (rule sym) + apply (simp add: and_mask_eq_iff_le_mask) + apply (simp add: mask_def word_le_nat_alt) + apply (clarsimp simp: numeral_2_eq_2 exception_defs ThreadState_defs mask_def) + apply (rule conseqPre, vcg) + apply (fastforce simp: exception_defs) + apply (rule subset_refl) + apply simp + apply (wp getSlotCap_wp) + apply vcg + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply clarsimp + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp, wp) + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply (clarsimp simp: decodeIRQ_arch_helper) + apply (simp add: liftME_invocationCatch) + apply (rule ccorres_add_returnOk) + apply (ctac add: Arch_decodeIRQControlInvocation_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=Arch_decodeIRQControlInvocation_modifies) + apply clarsimp + apply (clarsimp simp: interpret_excaps_test_null excaps_map_def + Collect_const_mem word_sless_def word_sle_def + sysargs_rel_to_n + cong: if_cong) + apply (rule conjI) + apply (cut_tac unat_lt2p[where x="args ! 2"]) + apply clarsimp + apply (clarsimp simp: word_less_nat_alt unat_ucast) + apply (auto simp: ct_in_state'_def neq_Nil_conv word_bits_def + excaps_in_mem_def slotcap_in_mem_def + cte_wp_at_ctes_of numeral_eqs[symmetric] + valid_tcb_state'_def + elim!: pred_tcb'_weakenE cte_wp_at_weakenE' + dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] + apply (clarsimp simp: neq_Nil_conv) + apply (drule interpret_excaps_eq[rule_format, where n=0], simp) + apply (clarsimp simp: rf_sr_ksCurThread) + done + +end +end diff --git a/proof/crefine/AARCH64/Invoke_C.thy b/proof/crefine/AARCH64/Invoke_C.thy new file mode 100644 index 0000000000..9a70b33346 --- /dev/null +++ b/proof/crefine/AARCH64/Invoke_C.thy @@ -0,0 +1,3491 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Invoke_C +imports Recycle_C "CLib.MonadicRewrite_C" +begin + +context kernel_m +begin + +(************************************************************************) +(* *) +(* decodeDomainInvocation **********************************************) +(* *) +(************************************************************************) + +lemma slotcap_in_mem_ThreadCap: + "\ slotcap_in_mem cap slot (ctes_of s); (s, s') \ rf_sr \ + \ \v. cslift s' (cte_Ptr slot) = Some v + \ (cap_get_tag (cte_C.cap_C v) = scast cap_thread_cap) + = (isThreadCap cap) + \ (isThreadCap cap + \ ccap_relation cap (cte_C.cap_C v))" + apply (clarsimp simp: slotcap_in_mem_def) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp dest!: ccte_relation_ccap_relation) + apply (simp add: cap_get_tag_isCap) + done + +lemma cap_case_ThreadCap2: + "(case cap of (ThreadCap ptr) \ f ptr | _ \ g) + = (if isThreadCap cap + then f (capTCBPtr cap) + else g)" + by (simp add: isCap_simps + split: capability.split) + +lemma setDomain_ccorres: + "ccorres dc xfdc + (invs' and tcb_at' t and sch_act_simple + and (\s. d \ maxDomain)) + (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. dom_' s = ucast d}) + [] (setDomain t d) (Call setDomain_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: tptr_' dom_') + apply (rule ccorres_pre_getCurThread) + apply (ctac(no_vcg) add: tcbSchedDequeue_ccorres) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule threadSet_ccorres_lemma2[where P=\]) + apply vcg + apply clarsimp + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps')+, simp_all?)[1] + apply (rule ball_tcb_cte_casesI, simp+) + apply (simp add: ctcb_relation_def) + apply (ctac(no_vcg) add: isRunnable_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (simp add: when_def to_bool_def del: Collect_const) + apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) + apply (ctac add: tcbSchedEnqueue_ccorres) + apply (rule ccorres_return_Skip) + apply (simp add: when_def) + apply (rule_tac R="\s. curThread = ksCurThread s" + in ccorres_cond2) + apply (clarsimp simp: rf_sr_ksCurThread) + apply (ctac add: rescheduleRequired_ccorres) + apply (rule ccorres_return_Skip') + apply simp + apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) + apply (simp add: guard_is_UNIV_def) + apply simp + apply wp + apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) + apply (wp threadSet_all_invs_but_sch_extra) + apply (fastforce simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) + apply (simp add: guard_is_UNIV_def) + apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) + apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued + hoare_vcg_imp_lift hoare_vcg_all_lift) + apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) + apply (fastforce simp: valid_tcb'_def tcb_cte_cases_def + invs'_def valid_state'_def valid_pspace'_def) + done + +lemma active_runnable': + "active' state \ runnable' state" + by (fastforce simp: runnable'_def) + +lemma decodeDomainInvocation_ccorres: + notes Collect_const[simp del] + shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and sch_act_simple and ct_active' + and (excaps_in_mem extraCaps \ ctes_of) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). + ex_nonz_cap_to' y s) + and (\s. \v \ set extraCaps. + s \' fst v) + and sysargs_rel args buffer) + (UNIV + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. call_' s = from_bool isCall} + \ {s. invLabel_' s = lab} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeDomainInvocation lab args extraCaps + >>= invocationCatch thread isBlocking isCall (uncurry InvokeDomain)) + (Call decodeDomainInvocation_'proc)" + supply gen_invocation_type_eq[simp] + apply (cinit' lift: length___unsigned_long_' current_extra_caps_' call_' invLabel_' buffer_' + simp: decodeDomainInvocation_def list_case_If2 whenE_def) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: invocation_eq_use_types) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: unat_gt_0[symmetric] del:) + apply (rule ccorres_add_return) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (simp add: hd_conv_nth word_le_nat_alt) + apply (simp add: unat_scast_numDomains) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def + hd_conv_nth word_le_nat_alt unat_numDomains_to_H + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: null_def excaps_map_def unat_numDomains_to_H) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def + interpret_excaps_test_null + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: interpret_excaps_test_null hd_conv_nth cap_case_ThreadCap2) + apply (rule ccorres_add_return) + apply (rule_tac r'="(\rv _ rv'. ((cap_get_tag rv' = scast cap_thread_cap) + = (isThreadCap rv)) + \ (cap_get_tag rv' = scast cap_thread_cap \ ccap_relation rv rv')) + (fst (extraCaps ! 0))" + and xf'=tcap_' in ccorres_split_nothrow) + apply (rule ccorres_from_vcg[where P="excaps_in_mem extraCaps \ ctes_of" and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: excaps_in_mem_def return_def neq_Nil_conv) + apply (drule(1) slotcap_in_mem_ThreadCap) + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (clarsimp simp: typ_heap_simps' mask_def word_sless_def word_sle_def) + apply ceqv + apply csymbr + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: Collect_const) + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: Collect_const returnOk_def uncurry_def) + apply (simp (no_asm) add: ccorres_invocationCatch_Inr split_def + performInvocation_def liftE_bindE bind_assoc) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (ctac add: setDomain_ccorres) + apply (rule ccorres_alternative2) + apply (ctac add: ccorres_return_CE) + apply wp + apply (vcg exspec=setDomain_modifies) + apply (wp sts_invs_minor') + apply (vcg exspec=setThreadState_modifies) + apply wp + apply simp + apply clarsimp + apply vcg + apply wp + apply simp + apply clarsimp + apply (vcg exspec=getSyscallArg_modifies) + + apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' + invs_sch_act_wf' ct_in_state'_def pred_tcb_at' + rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_to_n + mask_eq_iff_w2p mask_eq_iff_w2p word_size ThreadState_defs) + apply (rule conjI) + apply (clarsimp simp: linorder_not_le isCap_simps) + apply (rule conjI, clarsimp simp: unat64_eq_of_nat) + apply clarsimp + apply (drule_tac x="extraCaps ! 0" and P="\v. valid_cap' (fst v) s" in bspec) + apply (clarsimp simp: nth_mem interpret_excaps_test_null excaps_map_def) + apply (clarsimp simp: valid_cap_simps' pred_tcb'_weakenE active_runnable') + apply (intro conjI; fastforce?) + apply (fastforce simp: tcb_st_refs_of'_def elim:pred_tcb'_weakenE) + apply (simp add: word_le_nat_alt unat_ucast unat_numDomains_to_H le_maxDomain_eq_less_numDomains) + apply (clarsimp simp: ccap_relation_def cap_to_H_simps cap_thread_cap_lift) + subgoal (* args ! 0 can be contained in a domain-sized word *) + by (clarsimp simp: not_le unat_numDomains_to_H ucast_ucast_len[simplified word_less_nat_alt] + dest!: less_numDomains_is_domain) + done + +(************************************************************************) +(* *) +(* invokeCNodeDelete_ccorres ********************************************) +(* *) +(************************************************************************) + +lemma invokeCNodeDelete_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (\s. invs' s \ sch_act_simple s) + (UNIV \ {s. destSlot_' s = Ptr destSlot}) [] + (invokeCNode (Delete destSlot)) + (Call invokeCNodeDelete_'proc)" + apply (cinit lift: destSlot_' simp del: return_bind cong:call_ignore_cong) + apply (rule ccorres_trim_returnE, simp, simp) + apply (rule ccorres_callE) + apply (rule cteDelete_ccorres[simplified]) + apply simp+ +done + + + +(************************************************************************) +(* *) +(* invokeCNodeRevoke_ccorres ********************************************) +(* *) +(************************************************************************) +lemma invokeCNodeRevoke_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple) + (UNIV \ {s. destSlot_' s = cte_Ptr destSlot}) [] + (invokeCNode (Revoke destSlot)) + (Call invokeCNodeRevoke_'proc)" + apply (cinit lift: destSlot_' simp del: return_bind cong:call_ignore_cong) + apply (rule ccorres_trim_returnE, simp, simp) + apply (rule ccorres_callE) + apply (rule cteRevoke_ccorres[simplified]) + apply simp+ +done + + +(************************************************************************) +(* *) +(* invokeCNodeCancelBadgedSends_ccorres *********************************) +(* *) +(************************************************************************) + +lemma invokeCNodeCancelBadgedSends_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple and valid_cap' cap and K (isEndpointCap cap)) (UNIV \ {s. ccap_relation cap (cap_' s)}) [] + (invokeCNode (CancelBadgedSends cap)) + (Call invokeCNodeCancelBadgedSends_'proc)" + apply (simp) + apply (rule ccorres_gen_asm) + apply (clarsimp simp: isCap_simps) + apply (cinit lift: cap_' simp del: return_bind cong:call_ignore_cong) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (simp add: cap_get_tag_EndpointCap del: Collect_const) + apply csymbr + apply csymbr + apply (simp add: unless_def liftE_def when_def Collect_True del: Collect_const) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow_novcg) + apply (rule_tac R=\ in ccorres_cond2) + apply (clarsimp simp: cap_get_tag_isCap[symmetric] simp del: Collect_const dest!: cap_get_tag_to_H) + apply (rule ccorres_rhs_assoc | csymbr)+ + apply (ctac add: cancelBadgedSends_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre,vcg) + apply clarsimp + apply (simp add: return_def) + apply wp + apply (simp add: guard_is_UNIV_def) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: valid_cap_simps' cap_get_tag_EndpointCap) + done + + + +(************************************************************************) +(* *) +(* invokeCNodeInsert_ccorres ********************************************) +(* *) +(************************************************************************) + +lemma invokeCNodeInsert_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (cte_wp_at' (\scte. capMasterCap (cteCap scte) = capMasterCap cap) src + and valid_mdb' and pspace_aligned' and pspace_canonical' + and valid_objs' and valid_cap' cap) + (UNIV \ {s. destSlot_' s = Ptr dest} \ + {s. srcSlot_' s = Ptr src} \ + {s. ccap_relation cap (cap_' s)}) [] + (invokeCNode (Insert cap src dest)) + (Call invokeCNodeInsert_'proc)" + apply (cinit lift: destSlot_' srcSlot_' cap_' simp del: return_bind cong:call_ignore_cong) + apply (simp add: liftE_def) + apply (ctac (no_vcg) add: cteInsert_ccorres) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre,vcg) apply clarsimp apply (simp add: return_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + + +(************************************************************************) +(* *) +(* invokeCNodeMove_ccorres *********************************************) +(* *) +(************************************************************************) + +lemma invokeCNodeMove_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (valid_mdb' and pspace_aligned' and pspace_canonical') + (UNIV \ {s. destSlot_' s = Ptr dest} \ + {s. srcSlot_' s = Ptr src} \ + {s. ccap_relation cap (cap_' s)}) [] + (invokeCNode (Move cap src dest)) + (Call invokeCNodeMove_'proc)" + apply (cinit lift: destSlot_' srcSlot_' cap_' simp del: return_bind cong:call_ignore_cong) + apply (simp add: liftE_def) + apply (ctac (no_vcg) add: cteMove_ccorres) + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre,vcg) apply clarsimp apply (simp add: return_def) + apply wp + apply clarsimp + done + + +(************************************************************************) +(* *) +(* invokeCNodeRotate_ccorres *******************************************) +(* *) +(************************************************************************) + +lemma invokeCNodeRotate_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (\s. cte_at' slot1 s \ cte_at' slot2 s \ slot1 \ slot2 + \ valid_pspace' s \ valid_cap' cap2 s + \ cte_wp_at' (\c. weak_derived' cap2 (cteCap c)) slot2 s + \ cte_wp_at' (\c. isUntypedCap (cteCap c) \ (cteCap c) = cap2) slot2 s + \ cte_wp_at' (\c. cteCap c \ NullCap) slot2 s + \ (slot1 \ slot3 \ cte_wp_at' (\c. cteCap c = capability.NullCap) slot3 s)) + (UNIV \ {s. slot1_' s = Ptr slot1} \ + {s. slot2_' s = Ptr slot2} \ + {s. slot3_' s = Ptr slot3} \ + {s. ccap_relation cap1 (cap1_' s)} \ + {s. ccap_relation cap2 (cap2_' s)}) [] + (invokeCNode (Rotate cap1 cap2 slot1 slot2 slot3)) + (Call invokeCNodeRotate_'proc)" + apply (cinit lift: slot1_' slot2_' slot3_' cap1_' cap2_' simp del: return_bind cong:call_ignore_cong) + apply (simp split del: if_split del: Collect_const) + apply (simp only: liftE_def) + apply (rule_tac r'="dc" and xf'="xfdc" in ccorres_split_nothrow_novcg) + apply (rule ccorres_cond [where R = \]) + apply (clarsimp simp: Collect_const_mem) + apply (ctac (no_vcg) add: cteSwap_ccorres) + apply (ctac (no_vcg) add: cteMove_ccorres) + apply (simp only: K_bind_def) + apply (ctac (no_vcg) add: cteMove_ccorres) + apply (rule hoare_strengthen_post) + apply (rule cteMove_valid_pspace') + apply (simp add: valid_pspace'_def) + apply ceqv + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre,vcg) + apply clarsimp + apply (simp add: return_def) + apply wp + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (clarsimp simp: weak_derived'_def isCap_simps) + done + + + +(************************************************************************) +(* *) +(* invokeCNodeSaveCaller ***********************************************) +(* *) +(************************************************************************) + +lemma invokeCNodeSaveCaller_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (valid_mdb' and pspace_aligned' and pspace_canonical' and cur_tcb') + (UNIV \ {s. destSlot_' s = Ptr destSlot}) [] + (invokeCNode (SaveCaller destSlot)) + (Call invokeCNodeSaveCaller_'proc)" + apply (cinit lift: destSlot_' simp del: return_bind cong:call_ignore_cong) + apply (simp add: Collect_True split del: if_split del: Collect_const cong:call_ignore_cong) + apply (simp only: liftE_def) + apply (rule ccorres_Guard_Seq)+ + apply (simp only: bind_assoc) + apply (rule ccorres_pre_getCurThread) + + apply (simp only: getThreadCallerSlot_def locateSlot_conv) + + apply (rule_tac P="\s. rv=ksCurThread s \ is_aligned rv tcbBlockSizeBits" and r'="\ a c. c = Ptr a" + and xf'="srcSlot_'" and P'=UNIV in ccorres_split_nothrow) + + apply (rule ccorres_return) + apply vcg + apply clarsimp + apply (simp add: cte_level_bits_def size_of_def of_nat_def) + apply (simp add: rf_sr_def cstate_relation_def Kernel_C.tcbCaller_def tcbCallerSlot_def) + apply (clarsimp simp: Let_def objBits_defs) + apply (subst ptr_val_tcb_ptr_mask2[simplified mask_def objBits_defs, simplified]) + apply assumption + apply simp + apply ceqv + + apply (simp del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_getSlotCap_cte_at) + apply (rule ccorres_move_c_guard_cte) + apply (ctac (no_vcg)) + apply csymbr + apply (wpc, simp_all add: cap_get_tag_isCap isCap_simps + Collect_False Collect_True + del: Collect_const)[1] + apply (rule ccorres_fail)+ + \ \NullCap case\ + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (simp add: return_def) + apply (rule ccorres_fail)+ + \ \ReplyCap case\ + apply (rule ccorres_rhs_assoc) + apply csymbr + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (simp add: cap_get_tag_ReplyCap) + apply (case_tac " (capReplyMaster_CL (cap_reply_cap_lift cap))=0", simp_all add: to_bool_def)[1] + apply (ctac (no_vcg) add: cteMove_ccorres) + apply (rule ccorres_return_CE [unfolded returnOk_def,simplified], simp+)[1] + apply wp + apply (rule ccorres_fail') + apply (rule ccorres_fail)+ + apply (wp getSlotCap_wp) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply wp + apply vcg + apply (clarsimp simp: word_sle_def cte_wp_at_ctes_of + tcb_aligned' cur_tcb'_def + tcb_cnode_index_defs ptr_add_assertion_positive) + apply (frule(1) rf_sr_tcb_ctes_array_assertion2[rotated]) + apply (simp add: tcb_cnode_index_defs array_assertion_shrink_right + rf_sr_ksCurThread) + done + + +(************************************************************************) +(* *) +(* decodeCNodeInvocation ***********************************************) +(* *) +(************************************************************************) + +lemma ccorres_basic_srnoop2: + "\ \s. globals (g s) = globals s; + ccorres_underlying rf_sr Gamm r xf arrel axf G (g ` G') hs a c \ + \ ccorres_underlying rf_sr Gamm r xf arrel axf G G' + hs a (Basic g ;; c)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_r) + apply assumption + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply simp + done + +lemma updateCapData_spec2: + "\cap preserve newData. \ \ \ ccap_relation cap \cap \ preserve = to_bool (\preserve) \ newData = \newData\ + Call updateCapData_'proc + \ ccap_relation (updateCapData preserve newData cap) \ret__struct_cap_C \" + by (simp add: updateCapData_spec) + +lemma mdbRevocable_CL_cte_to_H: + "(mdbRevocable_CL (cteMDBNode_CL clcte) = 0) + = (\ mdbRevocable (cteMDBNode (cte_to_H clcte)))" + by (simp add: cte_to_H_def mdb_node_to_H_def to_bool_def) + +lemma ccorres_add_specific_return: + "ccorres_underlying sr \ r xf arrel axf P P' hs + (do v \ return val; f v od) c + \ ccorres_underlying sr \ r xf arrel axf P P' hs (f val) c" + by simp + +(* FIXME: also in Tcb_C *) +lemma ccorres_subgoal_tailE: + "\ ccorres rvr xf Q Q' hs (b ()) d; + ccorres rvr xf Q Q' hs (b ()) d \ ccorres rvr xf P P' hs (a >>=E b) (c ;; d) \ + \ ccorres rvr xf P P' hs (a >>=E b) (c ;; d)" + by simp + + +lemmas invocation_eq_use_types_sym = invocation_eq_use_types[TRY [symmetric]] + +lemma label_in_CNodeInv_ranges: + "(label < scast Kernel_C.CNodeRevoke \ scast Kernel_C.CNodeSaveCaller < label) + = (gen_invocation_type label \ set [CNodeRevoke .e. CNodeSaveCaller])" + "(scast Kernel_C.CNodeCopy \ label \ label \ scast Kernel_C.CNodeMutate) + = (gen_invocation_type label \ set [CNodeCopy .e. CNodeMutate])" + apply (simp_all add: upto_enum_def fromEnum_def enum_gen_invocation_labels + del: upt.simps) + apply (simp_all add: atLeastLessThanSuc) + apply (simp_all add: toEnum_def enum_invocation_label enum_gen_invocation_labels) + apply (simp_all flip: gen_invocation_type_eq) + apply (simp_all add: invocation_eq_use_types_sym invocation_label_defs) + apply (simp_all add: unat_arith_simps) + apply arith+ + done + +lemma cnode_invok_case_cleanup2: + "i \ set [CNodeCopy .e. CNodeMutate] \ + (case i of CNodeRevoke \ P | CNodeDelete \ Q | CNodeCancelBadgedSends \ R + | CNodeRotate \ S | CNodeSaveCaller \ T | _ \ U) = U" + apply (rule cnode_invok_case_cleanup) + apply (simp add: upto_enum_def fromEnum_def toEnum_def + enum_invocation_label enum_gen_invocation_labels) + apply auto + done + +lemma hasCancelSendRights_spec: + "\cap. \ \ \ ccap_relation cap \cap \ + Call hasCancelSendRights_'proc + \ \ret__unsigned_long = from_bool (hasCancelSendRights cap) \" + apply vcg + apply (clarsimp simp: if_1_0_0) + apply (rule conjI) + apply clarsimp + apply (drule sym, drule (1) cap_get_tag_to_H) + apply (clarsimp simp: hasCancelSendRights_def to_bool_def + split: if_split bool.splits) + apply (rule impI) + apply (case_tac cap, + auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs hasCancelSendRights_def + dest: cap_get_tag_isArchCap_unfolded_H_cap + split: capability.splits bool.splits)[1] + done + +lemma decodeCNodeInvocation_ccorres: + notes gen_invocation_type_eq[simp] + shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and sch_act_simple and ct_active' + and valid_cap' cp + and (excaps_in_mem extraCaps \ ctes_of) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). + ex_nonz_cap_to' y s) + and (\s. \v \ set extraCaps. + s \' fst v) + and sysargs_rel args buffer) + (UNIV + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. ccap_relation cp (cap_' s)} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. call_' s = from_bool isCall} + \ {s. invLabel_' s = lab} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeCNodeInvocation lab args cp (map fst extraCaps) + >>= invocationCatch thread isBlocking isCall InvokeCNode) + (Call decodeCNodeInvocation_'proc)" + supply if_cong[cong] + apply (cases "\isCNodeCap cp") + apply (simp add: decodeCNodeInvocation_def + cong: conj_cong) + apply (rule ccorres_fail') + apply (cinit' (no_subst_asm) lift: length___unsigned_long_' cap_' current_extra_caps_' + call_' invLabel_' buffer_') + apply (clarsimp simp: word_less_nat_alt decodeCNodeInvocation_def + list_case_If2 invocation_eq_use_types + label_in_CNodeInv_ranges[unfolded word_less_nat_alt] + cnode_invok_case_cleanup2 + simp del: Collect_const + cong: call_ignore_cong globals.fold_congs + StateSpace.state.fold_congs bool.case_cong + cong del: invocation_label.case_cong_weak gen_invocation_labels.case_cong_weak) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: unlessE_def throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: length_ineq_not_Nil unlessE_whenE + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (simp add: split_def Let_def invocationCatch_use_injection_handler + injection_bindE [OF refl refl] bindE_assoc + del: Collect_const) + (* sigh \ just going to blog this here. i can't use call_ignore_cong + because we need to rewrite within at least one argument in order to + match the rewrite that's happened in the argument to ?R13918 and we + can't apply ctac below. but once we have simplified away + newData = newData below there's no way to get it back. sigh *) + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupTargetSlot_ccorres, + unfolded lookupTargetSlot_def]) + apply (simp add: Collect_False del: Collect_const + cong: call_ignore_cong) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: word_less_nat_alt + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc | csymbr)+ + apply (simp add: invocationCatch_use_injection_handler[symmetric] + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add:if_P del: Collect_const) + apply (rule ccorres_cond_true_seq) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: linorder_not_less del: Collect_const cong: call_ignore_cong) + apply csymbr + apply (simp add: if_1_0_0 interpret_excaps_test_null + excaps_map_def + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=3 and buffer=buffer]) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (simp add: split_def invocationCatch_use_injection_handler + injection_bindE [OF refl refl] bindE_assoc + injection_liftE [OF refl] if_not_P + cong: call_ignore_cong) + apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) + prefer 2 + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply simp + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupSourceSlot_ccorres, + unfolded lookupSourceSlot_def]) + prefer 2 + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply (simp add: liftE_bindE cong: call_ignore_cong) + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_add_return) + apply (rule_tac xf'=ret__unsigned_longlong_' + and r'="\_ rv'. (rv' = scast cap_null_cap) + = (cteCap rvb = NullCap)" + in ccorres_split_nothrow) + apply (rule_tac P'="{s. \v. cslift s (cte_Ptr rva) = Some v + \ ccte_relation rvb v}" + in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (rule subsetI, clarsimp simp: Collect_const_mem return_def) + apply (clarsimp dest!: ccte_relation_ccap_relation + simp: cap_get_tag_NullCap + typ_heap_simps) + apply ceqv + apply (simp del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_throwError) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def hd_conv_nth + syscall_error_rel_def exception_defs + syscall_error_to_H_cases numeral_eqs) + apply (clarsimp simp: lookup_fault_missing_capability_lift + mask_eq_iff_w2p word_size word_less_nat_alt + word_bits_def hd_conv_nth take_bit_Suc) + apply (simp add: whenE_def[where P=False] + injection_handler_returnOk Collect_const[symmetric] + cong: call_ignore_cong del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + \ \CNodeCopy case\ + apply (simp add: Collect_const[symmetric] del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_throwError if_P) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: list_case_helper injection_handler_returnOk + split_def hd_conv_nth numeral_eqs[symmetric] + if_not_P + del: Collect_const) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=4 and buffer=buffer]) + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (simp add: split_def del: Collect_const) + apply (rule_tac val="maskCapRights (rightsFromWord (args ! 4)) (cteCap rvb)" + in ccorres_add_specific_return) + apply (ctac add: maskCapRights_ccorres) + apply (ctac add: ccorres_injection_handler_csum1 [OF deriveCap_ccorres]) + prefer 2 + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply simp + apply csymbr + apply csymbr + apply csymbr + apply (simp add: cap_get_tag_NullCap del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_throwError whenE_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: whenE_def injection_handler_returnOk + ccorres_invocationCatch_Inr performInvocation_def + bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (simp add: ccorres_cond_iffs) + apply (ctac(no_vcg) add: invokeCNodeInsert_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (wp sts_valid_pspace_hangers) + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (wp injection_wp_E[OF refl]) + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' + and valid_cap' rv and valid_objs' + and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" + in hoare_vcg_R_conj) + apply (rule deriveCap_Null_helper[OF deriveCap_derived]) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (fastforce simp: is_derived'_def badge_derived'_def + valid_tcb_state'_def) + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply (vcg exspec=deriveCap_modifies) + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=maskCapRights_modifies) + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=getSyscallArg_modifies) + apply (rule ccorres_Cond_rhs_Seq) + \ \CNodeMint case\ + apply (simp flip: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp split: if_split simp: injection_handler_throwError) + apply (auto simp: throwError_def return_def + syscall_error_to_H_cases syscall_error_rel_def + exception_defs)[1] + apply (simp add: list_case_helper injection_handler_returnOk + split_def linorder_not_less numeral_eqs[symmetric] + hd_conv_nth le_Suc_eq le_imp_diff_is_add if_not_P + del: Collect_const) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=4 and buffer=buffer]) + apply csymbr + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=5 and buffer=buffer]) + apply (rule ccorres_move_c_guard_cte) + apply (simp del: Collect_const) + apply (rule_tac val="maskCapRights (rightsFromWord (args ! 4)) (cteCap rvb)" + in ccorres_add_specific_return) + apply (ctac add: maskCapRights_ccorres) + apply (rule ccorres_symb_exec_r) + apply (ctac add: ccorres_injection_handler_csum1 [OF deriveCap_ccorres]) + prefer 2 + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply simp + apply csymbr + apply csymbr + apply csymbr + apply (simp add: cap_get_tag_NullCap del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_returnOk + invocationCatch_def injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: whenE_def injection_handler_returnOk + ccorres_invocationCatch_Inr + performInvocation_def bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (simp add: ccorres_cond_iffs) + apply (ctac(no_vcg) add: invokeCNodeInsert_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (wp sts_valid_pspace_hangers) + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply (simp add: conj_comms valid_tcb_state'_def) + apply (wp injection_wp_E[OF refl]) + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' + and valid_cap' rv and valid_objs' + and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" + in hoare_vcg_R_conj) + apply (rule deriveCap_Null_helper [OF deriveCap_derived]) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (fastforce simp: is_derived'_def badge_derived'_def) + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply (vcg exspec=deriveCap_modifies) + apply (simp add: Collect_const_mem) + apply (vcg exspec=updateCapData_spec2) + apply (rule conseqPre, vcg exspec=updateCapData_spec2, clarsimp) + apply fastforce + apply simp + apply wp + apply (simp add: Collect_const_mem hd_drop_conv_nth) + apply (vcg exspec=maskCapRights_modifies) + apply simp + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=getSyscallArg_modifies) + apply (rule ccorres_Cond_rhs_Seq) + \ \CNodeMove case\ + apply (simp add: Collect_const[symmetric] split_def + injection_handler_returnOk whenE_def + ccorres_invocationCatch_Inr + performInvocation_def bindE_assoc + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=newCap_' in ccorres_abstract, ceqv) + apply (rule_tac P="ccap_relation (cteCap rvb) rv'a" + in ccorres_gen_asm2) + apply csymbr + apply csymbr + apply (simp add: cap_get_tag_NullCap) + apply (ctac add: setThreadState_ccorres) + apply (simp add: ccorres_cond_iffs) + apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (wp sts_valid_pspace_hangers) + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (rule ccorres_Cond_rhs_Seq) + \ \CNodeMutate case\ + apply (rule ccorres_rhs_assoc)+ + apply (simp add: flip: Collect_const + cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_throwError if_P) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: if_not_P del: Collect_const) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=4 and buffer=buffer]) + apply (simp add: list_case_helper split_def hd_conv_nth + Collect_const[symmetric] + injection_handler_returnOk + del: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'="newCap_'" in ccorres_abstract, ceqv) + apply (rule_tac P="ccap_relation (updateCapData True (args ! 4) (cteCap rvb)) rv'a" + in ccorres_gen_asm2) + apply csymbr + apply csymbr + apply (simp add: cap_get_tag_isCap del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: whenE_def injection_handler_returnOk + ccorres_invocationCatch_Inr numeral_eqs + performInvocation_def bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (simp add: ccorres_cond_iffs) + apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wp sts_valid_pspace_hangers)+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply (simp add: Collect_const_mem exception_defs) + apply (vcg exspec=updateCapData_spec2) + apply (rule conseqPre, vcg exspec=updateCapData_spec2, clarsimp) + apply fastforce + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (simp add: upto_enum_def fromEnum_def toEnum_def + enum_gen_invocation_labels) + apply (wp getCTE_wp') + apply (simp add: Collect_const_mem) + apply vcg + apply (simp add: cte_wp_at_ctes_of[where P="(=) cte" for cte] + cte_wp_at_ctes_of[where P="\cte. Q cte \ R cte" for Q R] + badge_derived_updateCapData) + apply (rule validE_R_validE) + apply (rule_tac Q'="\a b. cte_wp_at' (\x. True) a b \ invs' b \ + tcb_at' thread b \ sch_act_wf (ksSchedulerAction b) b \ valid_tcb_state' Restart b + \ Q2 b" for Q2 in hoare_strengthen_postE_R) + prefer 2 + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule ctes_of_valid') + apply (erule invs_valid_objs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (clarsimp simp:valid_updateCapDataI invs_valid_objs' invs_valid_pspace') + apply assumption + apply (wp hoare_vcg_all_liftE_R injection_wp_E[OF refl] + lsfco_cte_at' hoare_vcg_const_imp_lift_R + )+ + apply (simp add: Collect_const_mem word_sle_def word_sless_def + all_ex_eq_helper) + apply (vcg exspec=lookupSourceSlot_modifies) + apply simp + apply (wp injection_wp_E[OF refl]) + apply (simp add: Collect_const_mem) + apply (vcg exspec=ensureEmptySlot_modifies) + apply simp + apply (wp hoare_drop_imps)[1] + apply (simp add: Collect_const_mem) + apply vcg + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=getSyscallArg_modifies) + apply (vcg exspec=getSyscallArg_modifies + exspec=ensureEmptySlot_modifies exspec=lookupSourceSlot_modifies + exspec=maskCapRights_modifies exspec=updateCapData_modifies + exspec=setThreadState_modifies exspec=invokeCNodeMove_modifies + exspec=invokeCNodeInsert_modifies exspec=deriveCap_modifies) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr + performInvocation_def bindE_assoc) + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setThreadState_ccorres) + apply (ctac(no_vcg) add: invokeCNodeRevoke_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wp sts_invs_minor')+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply (vcg exspec=setThreadState_modifies exspec=invokeCNodeRevoke_modifies) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr + performInvocation_def bindE_assoc) + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setThreadState_ccorres) + apply (ctac(no_vcg) add: invokeCNodeDelete_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wp sts_invs_minor')+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply (vcg exspec=setThreadState_modifies exspec=invokeCNodeDelete_modifies) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_returnOk bindE_assoc + injection_bindE[OF refl refl] split_def) + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wp sts_valid_pspace_hangers)+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply (wp injection_wp_E[OF refl]) + apply (simp add: all_ex_eq_helper) + apply (vcg exspec=ensureEmptySlot_modifies) + apply (vcg exspec=invokeCNodeSaveCaller_modifies exspec=setThreadState_modifies + exspec=ensureEmptySlot_modifies) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr + injection_bindE[OF refl refl] bindE_assoc + injection_liftE [OF refl] split_def + del: Collect_const) + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc)+ + apply (simp only: liftE_bindE) + apply (rule ccorres_pre_getCTE) + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=destCap_' in ccorres_abstract, ceqv) + apply (rule_tac P="ccap_relation (cteCap rva) rv'" in ccorres_gen_asm2) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__unsigned_long_' in ccorres_abstract, ceqv) + apply (rule_tac P="rv'a = from_bool (hasCancelSendRights (cteCap rva))" + in ccorres_gen_asm2) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: unlessE_def whenE_def injection_handler_returnOk + from_bool_neq_0 + del: Collect_const) + apply (simp add: unlessE_def injection_handler_returnOk + ccorres_invocationCatch_Inr + performInvocation_def bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (ctac(no_vcg) add: invokeCNodeCancelBadgedSends_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wp sts_invs_minor')+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply (simp add: Collect_const_mem) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply fastforce + apply (vcg) + apply (simp del: Collect_const) + apply (rule conseqPre, vcg) + apply (simp del: Collect_const) + apply (vcg exspec=setThreadState_modifies + exspec=invokeCNodeCancelBadgedSends_modifies + exspec=hasCancelSendRights_modifies) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (simp add: if_1_0_0 del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: if_P | rule ccorres_cond_true_seq)+ + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: injection_handler_throwError + split: list.split) + apply (simp add: throwError_def return_def exception_defs + syscall_error_rel_def syscall_error_to_H_cases) + apply clarsimp + apply (simp add: invocationCatch_use_injection_handler[symmetric] + del: Collect_const) + apply csymbr + apply (simp add: interpret_excaps_test_null excaps_map_def + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule ccorres_cond_true_seq) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply (simp add: interpret_excaps_test_null Suc_length_not_empty' + if_1_0_0 del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: neq_Nil_conv) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: split_def Collect_const[symmetric] del: Collect_const) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=3 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=4 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=5 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=6 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=7 and buffer=buffer]) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=1]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (simp add: le_Suc_eq if_not_P length_ineq_not_Nil) + apply (simp add: invocationCatch_use_injection_handler + injection_liftE [OF refl] injection_handler_returnOk + injection_bindE [OF refl refl] bindE_assoc) + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupSourceSlot_ccorres, unfolded lookupSourceSlot_def]) + prefer 2 + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply (simp add: Collect_False del: Collect_const) + apply csymbr + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupPivotSlot_ccorres, unfolded lookupPivotSlot_def]) + prefer 2 + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply (simp add: Collect_False split_def + del: Collect_const) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: whenE_def[where P=False] injection_handler_returnOk + del: Collect_const) + apply (rule ccorres_subgoal_tailE) + apply (rule ccorres_move_c_guard_cte) + apply (simp add: injection_handler_returnOk del: Collect_const) + apply (simp add: liftE_bindE liftM_def del: Collect_const) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__unsigned_longlong_' in ccorres_abstract, ceqv) + apply (rule_tac P="(rv' = scast cap_null_cap) = (cteCap rvc = NullCap)" + in ccorres_gen_asm2) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_throwError) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def hd_conv_nth + syscall_error_rel_def exception_defs + syscall_error_to_H_cases numeral_eqs[symmetric]) + apply (clarsimp simp: lookup_fault_missing_capability_lift + mask_eq_iff_w2p word_size word_less_nat_alt + word_bits_def) + apply (simp add: injection_handler_returnOk whenE_def[where P=False] + del: Collect_const) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__unsigned_longlong_' in ccorres_abstract, ceqv) + apply (rule_tac P="(rv'a = scast cap_null_cap) = (cteCap x = NullCap)" + in ccorres_gen_asm2) + apply (simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_throwError) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def hd_conv_nth + exception_defs syscall_error_rel_def + numeral_eqs) + apply (simp add: syscall_error_to_H_cases + lookup_fault_missing_capability_lift) + apply (simp add: mask_eq_iff_w2p word_less_nat_alt + word_size word_bits_def take_bit_Suc) + apply (simp add: whenE_def[where P=False] injection_handler_returnOk + hd_conv_nth numeral_eqs[symmetric]) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'="newSrcCap_'" in ccorres_abstract, ceqv) + apply (rule_tac P="ccap_relation (updateCapData True (args ! 5) (cteCap rvc)) rv'b" + in ccorres_gen_asm2) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'="newPivotCap_'" in ccorres_abstract, ceqv) + apply (rule_tac P="ccap_relation (updateCapData True (args ! 2) (cteCap x)) rv'c" + in ccorres_gen_asm2) + apply csymbr + apply (simp add: cap_get_tag_NullCap del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: whenE_def[where P=False] injection_handler_returnOk + del: Collect_const) + apply csymbr + apply (simp add: cap_get_tag_NullCap del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: whenE_def injection_handler_returnOk + ccorres_invocationCatch_Inr + performInvocation_def bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (rule ccorres_rhs_assoc2, rule ccorres_split_throws) + apply (ctac(no_vcg) add: invokeCNodeRotate_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeCNodeRotate_modifies) + apply (wp hoare_weak_lift_imp)+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply (simp add: Collect_const_mem) + apply (vcg exspec=updateCapData_spec2) + apply (rule conseqPre, vcg exspec=updateCapData_spec2) + apply clarsimp + apply fastforce + apply (simp add: Collect_const_mem) + apply (vcg exspec=updateCapData_spec2) + apply (rule conseqPre, vcg exspec=updateCapData_spec2) + apply clarsimp + apply fastforce + apply (simp add: Collect_const_mem) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (simp add: Collect_const_mem) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: unlessE_def) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: ccorres_injection_handler_csum1 + [OF ensureEmptySlot_ccorres]) + apply (simp add: Collect_False del: Collect_const) + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply simp + apply (wp injection_wp_E[OF refl] ensureEmptySlot_stronger) + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply (vcg exspec=ensureEmptySlot_modifies) + apply (simp add: unlessE_def injection_handler_returnOk + del: Collect_const) + apply (wp injection_wp_E[OF refl]) + apply (rule_tac Q'="\rvb. invs' + and cte_at' rv and cte_at' rva + and tcb_at' thread" + in hoare_strengthen_postE_R) + apply (wp lsfco_cte_at') + apply (clarsimp simp: cte_wp_at_ctes_of weak_derived_updateCapData + capBadge_updateCapData_True) + apply (simp add: valid_tcb_state'_def) + apply (auto simp:updateCapData_Untyped + elim!: valid_updateCapDataI[OF ctes_of_valid'])[1] + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply (vcg exspec=lookupPivotSlot_modifies) + apply simp + apply (wp injection_wp_E[OF refl] + lsfco_cte_at') + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply (vcg exspec=lookupSourceSlot_modifies) + apply simp + apply (wp | wp (once) hoare_drop_imps)+ + apply simp + apply vcg + apply simp + apply (wp | wp (once) hoare_drop_imps)+ + apply simp + apply vcg + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply (wp hoare_weak_lift_imp) + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply (wp hoare_weak_lift_imp) + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply (wp hoare_weak_lift_imp) + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (simp add: upto_enum_def fromEnum_def toEnum_def + enum_gen_invocation_labels) + apply (rule ccorres_split_throws) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply simp + apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R + hoare_vcg_all_liftE_R lsfco_cte_at' hoare_weak_lift_imp + | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen + cong: conj_cong + | wp (once) hoare_drop_imps)+ + apply (simp add: all_ex_eq_helper) + apply (vcg exspec=lookupTargetSlot_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' invs_valid_pspace' + ct_in_state'_def pred_tcb_at' + cur_tcb'_def word_sle_def word_sless_def + unat_lt2p[where 'a=machine_word_len, folded word_bits_def]) + apply (rule conjI) + apply (clarsimp simp: sysargs_rel_n_def n_msgRegisters_def excaps_map_def + cte_wp_at_ctes_of excaps_in_mem_def slotcap_in_mem_def + sysargs_rel_def length_ineq_not_Nil + dest!: interpret_excaps_eq) + apply ((rule conjI | clarsimp simp:split_def neq_Nil_conv + | erule pred_tcb'_weakenE disjE + | drule st_tcb_at_idle_thread')+)[1] + apply (frule interpret_excaps_eq) + apply (clarsimp simp: excaps_map_def mask_def[where n=4] + ccap_rights_relation_def rightsFromWord_wordFromRights + ThreadState_defs map_comp_Some_iff + rf_sr_ksCurThread hd_conv_nth hd_drop_conv_nth) + apply ((rule conjI + | clarsimp simp: rightsFromWord_wordFromRights + ccte_relation_def c_valid_cte_def + cl_valid_cte_def c_valid_cap_def + map_option_Some_eq2 neq_Nil_conv ccap_relation_def + numeral_eqs hasCancelSendRights_not_Null + ccap_relation_NullCap_iff[symmetric] + interpret_excaps_test_null mdbRevocable_CL_cte_to_H + | clarsimp simp: typ_heap_simps' + | frule length_ineq_not_Nil)+) + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemmas setCTE_def3 = setCTE_def2[THEN eq_reflection] + +lemma setCTE_sch_act_wf[wp]: + "\ \s. sch_act_wf (ksSchedulerAction s) s \ + setCTE src cte + \\x s. sch_act_wf (ksSchedulerAction s) s \" + by (wp sch_act_wf_lift setCTE_pred_tcb_at' setCTE_tcb_in_cur_domain') + +crunch sch_act_wf[wp]: insertNewCap "\s. sch_act_wf (ksSchedulerAction s) s" + (wp: crunch_wps ignore: setCTE) + +crunch ksCurThread[wp]: deleteObjects "\s. P (ksCurThread s)" + (wp: crunch_wps simp: unless_def) + +lemma deleteObjects_gsCNodes_at_pt: + "\(\s. P (gsCNodes s ptr)) + and K (ptr \ {ptr_base .. ptr_base + 2 ^ sz - 1} \ is_aligned ptr_base sz)\ + deleteObjects ptr_base sz + \\rv s. P (gsCNodes s ptr)\" + apply (rule hoare_gen_asm) + apply (simp add: deleteObjects_def2 add_mask_fold) + apply (wpsimp cong: conj_cong + | wp (once) hoare_drop_imps)+ + done + +crunches setThreadState, updateFreeIndex, preemptionPoint + for gsCNodes[wp]: "\s. P (gsCNodes s)" + (simp: unless_def whenE_def ignore_del: preemptionPoint) + +lemma resetUntypedCap_gsCNodes_at_pt: + "\(\s. P (gsCNodes s ptr)) + and cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ ptr \ untypedRange (cteCap cte)) slot + and valid_objs'\ + resetUntypedCap slot + \\rv s. P (gsCNodes s ptr)\, -" + apply (simp add: resetUntypedCap_def unlessE_def) + apply (rule hoare_pre) + apply (wp mapME_x_wp' | simp add: unless_def)+ + apply (wp hoare_vcg_const_imp_lift + deleteObjects_gsCNodes_at_pt + getSlotCap_wp)+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (frule(1) ctes_of_valid') + apply (clarsimp simp: valid_cap_simps' capAligned_def) + done + +end + +context kernel_m begin + +lemma wordFromMessageInfo_spec: + "\s. \ \ {s} Call wordFromMessageInfo_'proc + \\ret__unsigned_long = index (seL4_MessageInfo_C.words_C (mi_' s)) (unat 0)\" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg + apply (simp add: word_sless_def word_sle_def) + done + +lemma seL4_MessageInfo_lift_def2: + "seL4_MessageInfo_lift message_info \ + \label_CL = (index (seL4_MessageInfo_C.words_C message_info) 0 >> 12) && mask 52, + capsUnwrapped_CL = (index (seL4_MessageInfo_C.words_C message_info) 0 >> 9) && mask 3, + extraCaps_CL = (index (seL4_MessageInfo_C.words_C message_info) 0 >> 7) && mask 2, + length_CL = (index (seL4_MessageInfo_C.words_C message_info) 0 >> 0) && mask 7\" + apply (simp add: seL4_MessageInfo_lift_def mask_def) + done + +lemma globals_update_id: + "globals_update (t_hrs_'_update (hrs_htd_update id)) x = x" + by (simp add: hrs_htd_update_def) + +lemma getObjectSize_spec: + "\s. \\\s. \t \ of_nat (length (enum::object_type list) - 1)\ Call getObjectSize_'proc + \\ret__unsigned_long = of_nat (getObjectSize (object_type_to_H (t_' s)) (unat (userObjSize_' s)))\" + apply vcg + apply (clarsimp simp: Kernel_C_defs + bit_simps objBits_simps' framesize_to_H_def pageBitsForSize_def + object_type_to_H_def Kernel_C_defs APIType_capBits_def) + apply (simp add:nAPIObjects_def) + (* FIXME AARCH64 abstraction violation, looks to be not true when config_ARM_PA_SIZE_BITS_40 *) + apply (simp add:enum_object_type enum_apiobject_type frameSizeConstants_defs + Kernel_Config.config_ARM_PA_SIZE_BITS_40_def + split: if_split) + apply unat_arith + done + +lemma object_type_from_H_bound: + "object_type_from_H newType \ of_nat (length (enum::object_type list) - Suc 0)" + apply (simp add:enum_object_type enum_apiobject_type object_type_from_H_def) + apply (case_tac newType) + apply (clarsimp simp: Kernel_C_defs objBits_simps + split:apiobject_type.splits)+ + done + +lemma updateCap_ct_active'[wp]: + "\ct_active'\ updateCap srcSlot cap \\rva. ct_active' \" + apply (rule hoare_pre) + apply (simp add:ct_in_state'_def) + apply (wps|wp|simp add:ct_in_state'_def)+ + done + +lemma APIType_capBits_low: + "\ newType = APIObjectType apiobject_type.CapTableObject \ 0 < us; + newType = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits\ + \ 4 \ APIType_capBits newType us" + apply (case_tac newType) + apply (clarsimp simp: invokeUntyped_proofs_def APIType_capBits_def objBits_simps' bit_simps + untypedBits_defs + split: apiobject_type.splits if_split)+ + done + +lemma APIType_capBits_high: + "\ newType = APIObjectType apiobject_type.CapTableObject \ us < 59; + newType = APIObjectType apiobject_type.Untyped \ us \ 61\ + \ APIType_capBits newType us < 64" + apply (case_tac newType) + apply (clarsimp simp: invokeUntyped_proofs_def APIType_capBits_def objBits_simps' bit_simps + split: apiobject_type.splits if_split)+ + done + +lemma typ_clear_region_eq: +notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff +shows + "\ctes_of (s::kernel_state) (ptr_val p) = Some cte; is_aligned ptr bits; bits < word_bits; + {ptr..ptr + 2 ^ bits - 1} \ {ptr_val p..ptr_val p + mask cteSizeBits} = {}; ((clift hp) :: (cte_C ptr \ cte_C)) p = Some to\ \ + (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: (cte_C ptr \ cte_C)) p = Some to" + apply (clarsimp simp:lift_t_def lift_typ_heap_def restrict_map_def split:if_splits) + apply (intro conjI impI) + apply (case_tac hp) + apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) + apply (rule arg_cong[where f = from_bytes]) + apply (clarsimp simp:heap_list_s_def lift_state_def proj_h_def) + apply (case_tac hp) + apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) + apply (clarsimp simp:heap_list_s_def lift_state_def proj_h_def) + apply (clarsimp simp:s_valid_def h_t_valid_def) + apply (clarsimp simp:valid_footprint_def Let_def) + apply (drule spec) + apply (erule(1) impE) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp add:map_le_def) + apply (drule_tac x = aa in bspec) + apply simp + apply (clarsimp simp:proj_d_def) + apply (clarsimp simp:hrs_htd_update_def typ_clear_region_def + split:if_splits option.splits) + apply (simp add:intvl_range_conv[where 'a=machine_word_len, folded word_bits_def]) + apply (subgoal_tac "ptr_val p + of_nat y \ {ptr_val p..ptr_val p + mask cteSizeBits}") + apply blast + apply (clarsimp simp:blah) + apply (rule context_conjI) + apply (rule is_aligned_no_wrap') + apply (rule ctes_of_is_aligned[where cte = cte and s = s]) + apply simp + apply (rule of_nat_power; simp add: objBits_simps') + apply (rule word_plus_mono_right) + apply (simp add: word_of_nat_le objBits_defs mask_def) + apply (rule is_aligned_no_wrap') + apply (rule ctes_of_is_aligned[where cte = cte and s = s]) + apply simp + apply (clarsimp simp: objBits_simps' mask_def) + apply (clarsimp simp: proj_d_def) + done + +lemma region_is_typelessI: + "\hrs_htd (t_hrs_' (globals t)) = hrs_htd (hrs_htd_update (typ_clear_region ptr sz) h) \ + \ region_is_typeless ptr (2^sz) t" + apply (case_tac h) + apply (clarsimp simp: typ_clear_region_def region_is_typeless_def + hrs_htd_def hrs_htd_update_def split:if_splits) + done + +lemma rf_sr_cpspace_relation: + "(s,s') \ rf_sr \ cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState s)) (t_hrs_' (globals s'))" + by (clarsimp simp:rf_sr_def cstate_relation_def Let_def) + +lemma cNodeNoOverlap_retype_have_size: + "\ cNodeOverlap cns (\x. ptr \ x \ x \ ptr + of_nat num * 2 ^ bits - 1) + \ cnodes_retype_have_size {ptr .. ptr + of_nat num * 2 ^ bits - 1} anysz cns" + apply (clarsimp simp: cnodes_retype_have_size_def cNodeOverlap_def) + apply (elim allE, drule(1) mp, clarsimp simp: upto_intvl_eq[symmetric]) + apply (erule disjoint_subset2[rotated]) + apply clarsimp + done + +lemma pt_bits_cte_level_bits[simp]: + "cte_level_bits + (pt_bits pt_t - cte_level_bits) = pt_bits pt_t" + by (simp add: bit_simps cte_level_bits_def split: if_split) + +(* FIXME AACH64: could introduce a ptable version of cnodes_retype_have_size, but too much duplication. Generalise instead? *) +lemma archNoOverlap_retype_have_size: + "\ archOverlap s (\x. ptr \ x \ x \ ptr + of_nat num * 2 ^ bits - 1) + \ cnodes_retype_have_size {ptr .. ptr + of_nat num * 2 ^ bits - 1} anysz + (gsPTTypes (ksArchState s) ||> (\pt_t. pt_bits pt_t - cte_level_bits))" + apply (clarsimp simp: cnodes_retype_have_size_def archOverlap_def in_omonad) + apply (elim allE, drule(1) mp, clarsimp simp: upto_intvl_eq[symmetric]) + apply (erule disjoint_subset2[rotated]) + apply clarsimp + done + +lemma range_cover_compare_bound_word: + "range_cover ptr sz sbit n + \ (of_nat n * 2 ^ sbit) + (ptr && mask sz) \ 2 ^ sz" + apply (simp add: word_le_nat_alt range_cover_unat + add.commute) + apply (frule range_cover.range_cover_compare_bound) + apply (simp add: range_cover.sz range_cover.unat_of_nat_shift) + done + +lemma isUntypedCap_ccap_relation_helper: + "ccap_relation cap ccap + \ isUntypedCap cap + \ cap_get_tag ccap = scast cap_untyped_cap + \ cap_lift ccap = Some (Cap_untyped_cap (cap_untyped_cap_lift ccap)) + \ cap_untyped_cap_lift ccap = + \ capFreeIndex_CL = of_nat (capFreeIndex cap) >> 4, + capIsDevice_CL = from_bool (capIsDevice cap), + capBlockSize_CL = of_nat (capBlockSize cap), + capPtr_CL = capPtr cap\" + apply (simp add: cap_get_tag_isCap[symmetric]) + apply (frule(1) cap_get_tag_UntypedCap[THEN iffD1]) + apply (frule cap_lift_untyped_cap) + apply (simp add: cap_untyped_cap_lift_def) + apply (clarsimp simp: shiftl_shiftr1 word_size from_to_bool_last_bit) + apply (simp add: mask_def word_bw_assocs ) + done + +lemma pspace_no_overlap_underlying_zero_update: + "pspace_no_overlap' ptr sz s + \ invs' s + \ S \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} + \ s\ksMachineState := underlying_memory_update + (\m x. if x \ S then 0 else m x) (ksMachineState s)\ + = s" + apply (subgoal_tac "\x \ S. underlying_memory (ksMachineState s) x = 0") + apply (cases "ksMachineState s") + apply (cases s, simp add: fun_eq_iff split: if_split) + apply (clarsimp split: if_split_asm) + apply (erule pspace_no_overlap_underlying_zero) + apply (simp add: invs'_def valid_state'_def) + apply blast + done + +lemma clearMemory_untyped_ccorres: + "ccorres dc xfdc ((\s. invs' s + \ (\cap. cte_wp_at' (\cte. cteCap cte = cap) ut_slot s + \ isUntypedCap cap + \ {ptr ..+ 2 ^ sz} \ untypedRange cap + \ pspace_no_overlap' (capPtr cap) (capBlockSize cap) s) + \ 2 ^ sz \ gsMaxObjectSize s) + and (\_. is_aligned ptr sz \ sz \ 3 \ sz \ resetChunkBits)) + ({s. region_actually_is_bytes ptr (2 ^ sz) s} + \ {s. bits_' s = of_nat sz} + \ {s. ptr___ptr_to_unsigned_long_' s = Ptr ptr}) + [] + (doMachineOp (clearMemory ptr (2 ^ sz))) (Call clearMemory_'proc)" + (is "ccorres dc xfdc ?P ?P' [] ?m ?c") + apply (rule ccorres_gen_asm) + apply (cinit' lift: bits_' ptr___ptr_to_unsigned_long_') + apply (rule_tac P="ptr \ 0 \ sz < word_bits" in ccorres_gen_asm) + apply (simp add: clearMemory_def) + apply (simp add: doMachineOp_bind storeWord_empty_fail) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac P="?P" and P'="{s. region_actually_is_bytes ptr (2 ^ sz) s}" in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule conjI; clarsimp) + apply (simp add: word_less_nat_alt unat_of_nat word_bits_def) + apply (clarsimp simp: isCap_simps valid_cap'_def capAligned_def + is_aligned_no_wrap'[OF _ word64_power_less_1] + unat_of_nat_eq word_bits_def) + apply (simp add: is_aligned_weaken is_aligned_triv[THEN is_aligned_weaken]) + apply (clarsimp simp: ghost_assertion_size_logic[unfolded o_def] region_actually_is_bytes_dom_s) + apply (clarsimp simp: field_simps word_size_def mapM_x_storeWord_step + word_bits_def cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarify+) + apply (simp add: doMachineOp_def split_def exec_gets) + apply (simp add: select_f_def simpler_modify_def bind_def valid_cap_simps' capAligned_def) + apply (subst pspace_no_overlap_underlying_zero_update; simp?) + apply (case_tac sz, simp_all)[1] + apply (case_tac nat, simp_all)[1] + apply (case_tac nata, simp_all)[1] + apply (clarsimp dest!: region_actually_is_bytes) + apply (drule(1) rf_sr_rep0) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply csymbr + apply (ctac add: cleanCacheRange_RAM_ccorres) + apply wp + apply (simp add: guard_is_UNIV_def unat_of_nat + word_bits_def capAligned_def word_of_nat_less) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid'; clarify?) + apply (clarsimp simp: isCap_simps valid_cap_simps' capAligned_def + word_of_nat_less Kernel_Config.resetChunkBits_def + word_bits_def unat_2p_sub_1) + apply (strengthen is_aligned_no_wrap'[where sz=sz] is_aligned_addrFromPPtr_n)+ + apply (simp add: addrFromPPtr_mask_cacheLineSize pptrBaseOffset_alignment_def) + apply (cases "ptr = 0"; simp) + apply (drule subsetD, rule intvl_self, simp) + apply simp + done + +lemma t_hrs_update_use_t_hrs: + "t_hrs_'_update f s + = (t_hrs_'_update (\_. f (t_hrs_' s)) $ s)" + by simp + +lemma reset_untyped_inner_offs_helper: + "\ cteCap cte = capability.UntypedCap dev ptr sz idx; + i \ unat ((of_nat idx - 1 :: addr) div 2 ^ sz2); + sz2 \ sz; idx \ 0; + valid_cap' (cteCap cte) s + \ + \ of_nat i * 2 ^ sz2 < (2 ^ sz :: addr)" + apply (clarsimp simp: valid_cap_simps' untypedBits_defs) + apply (rule word_less_power_trans2, simp_all) + apply (rule word_of_nat_less) + apply (erule order_le_less_trans) + apply (simp only: word_less_nat_alt[symmetric]) + apply (simp add: shiftr_div_2n_w[symmetric] word_size) + apply (rule shiftr_less_t2n) + apply (simp add: word_of_nat_le) + apply (rule of_nat_neq_0, simp) + apply (erule order_le_less_trans) + apply (rule power_strict_increasing, simp_all) + done + +lemma typ_region_bytes_dom_s: + "S \ {ptr ..+ 2 ^ bits} + \ S \ {SIndexVal, SIndexTyp 0} \ dom_s (typ_region_bytes ptr bits htd)" + apply (clarsimp simp: typ_region_bytes_def dom_s_def) + apply fastforce + done + +lemma aligned_intvl_offset_subset: + assumes al: "is_aligned (ptr :: 'a :: len word) sz" and al': "is_aligned x sz'" + and szv: "sz' \ sz" and xsz: "x < 2 ^ sz" + shows "{ptr + x ..+ 2 ^ sz'} \ {ptr ..+ 2 ^ sz}" + apply (simp only: upto_intvl_eq al aligned_add_aligned[OF al al' szv]) + apply (rule aligned_range_offset_subset[OF al al' szv xsz]) + done + +lemma aligned_intvl_offset_subset_ran: + assumes al: "is_aligned (ptr :: 'a :: len word) sz" and al': "is_aligned x sz'" + and szv: "sz' \ sz" and xsz: "x < 2 ^ sz" + shows "{ptr + x ..+ 2 ^ sz'} \ {ptr .. ptr + 2 ^ sz - 1}" + apply (simp only: upto_intvl_eq al aligned_add_aligned[OF al al' szv]) + apply (rule aligned_range_offset_subset[OF al al' szv xsz]) + done + +lemma ccorres_req_Ex: + assumes v: "\s s'. \ (s, s') \ sr; P s; s' \ P' \ \ \v. Q v s \ Q' v s' \ V v" + and cc: "\v. V v \ ccorres_underlying sr \ r xf r' xf' (P and Q v) (P' \ {s. Q' v s}) hs H C" + shows "ccorres_underlying sr \ r xf r' xf' P P' hs H C" + apply (rule ccorres_name_pre) + apply (rule ccorres_name_pre_C) + apply (case_tac "(s, sa) \ sr") + apply (drule(2) v, clarsimp) + apply (rule ccorres_guard_imp2, erule cc) + apply auto[1] + apply (rule ccorresI', simp) + done + +lemma region_actually_is_bytes_subset_t_hrs: + "region_actually_is_bytes ptr sz s' + \ {ptr' ..+ sz'} \ {ptr ..+ sz} + \ t_hrs_' (globals s') = t_hrs_' (globals s) + \ region_actually_is_bytes ptr' sz' s" + by (auto simp: region_actually_is_bytes_def) + +lemma eq_UntypedCap_helper: + "isUntypedCap cap \ capIsDevice cap = dev + \ capPtr cap = ptr \ capBlockSize cap = sz + \ capFreeIndex cap = idx + \ cap = UntypedCap dev ptr sz idx" + by (clarsimp simp: isCap_simps) + +lemma byte_regions_unmodified_actually_heap_list: + "byte_regions_unmodified hrs hrs' + \ region_actually_is_bytes' p' n' htd + \ htd = (hrs_htd hrs) + \ heap_list (hrs_mem hrs) n p = v + \ {p ..+ n} \ {p' ..+ n'} + \ heap_list (hrs_mem hrs') n p = v" + apply (erule trans[rotated], rule heap_list_h_eq2) + apply (simp add: byte_regions_unmodified_def region_actually_is_bytes_def) + apply (drule_tac x=x in spec) + apply (drule_tac x=x in bspec) + apply blast + apply (clarsimp split: if_split_asm) + done + +lemma ucast_64_32[simp]: + "UCAST(64 \ 32) (of_nat x) = of_nat x" + by (simp add: ucast_of_nat is_down_def source_size_def target_size_def word_size) + +lemma dsb_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call dsb_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ byte_regions_unmodified' s t}" + apply (rule allI, rule conseqPost, rule dsb_preserves_kernel_bytes[rule_format]; simp) + apply (clarsimp simp: byte_regions_unmodified_def) + done + +lemma cleanCacheRange_RAM_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call cleanCacheRange_RAM_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ byte_regions_unmodified' s t}" + apply (rule allI, rule conseqPost, rule cleanCacheRange_RAM_preserves_kernel_bytes[rule_format]; simp) + apply (clarsimp simp: byte_regions_unmodified_def) + done + +text \ +@{term resetUntypedCap_'proc} retypes the @{term Untyped} region as bytes, and then enters +a loop which progressively zeros the memory, ultimately establishing @{term zero_ranges_are_zero} +for the full @{term Untyped} range. Since @{term zero_ranges_are_zero} also requires +@{term region_actually_is_bytes}, the loop must remember that we retyped the whole range before +entering the loop. It is sufficient to know that @{term hrs_htd} is preserved, and for most +contents of the loop, this is straightforward. On RISCV64, @{term preemptionPoint_'proc} contains +inline assembly, so we must appeal to the axiomatisation of inline assembly to show that +@{term hrs_htd} is preserved. +\ +lemma preemptionPoint_hrs_htd: + "\\. \\\<^bsub>/UNIV\<^esub> {\} Call preemptionPoint_'proc \hrs_htd \t_hrs = hrs_htd \<^bsup>\\<^esup>t_hrs\" + by (rule allI, rule conseqPre, vcg, clarsimp simp: asm_spec_enabled elim!: asm_specE) + +lemma resetUntypedCap_ccorres: + notes upt.simps[simp del] Collect_const[simp del] replicate_numeral[simp del] + shows + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple and ct_active' and cte_wp_at' (isUntypedCap o cteCap) slot + and (\s. descendants_of' slot (ctes_of s) = {})) + (UNIV \ {s. srcSlot_' s = Ptr slot}) + [] + (resetUntypedCap slot) + (Call resetUntypedCap_'proc)" + using [[ceqv_simpl_sequence = true]] + supply if_cong[cong] option.case_cong[cong] + apply (cinit lift: srcSlot_') + apply (simp add: liftE_bindE getSlotCap_def + Collect_True extra_sle_sless_unfolds) + apply (rule ccorres_getCTE, rename_tac cte) + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'="prev_cap_'" in ccorres_abstract, ceqv) + apply (rename_tac prev_cap) + apply (rule_tac P="ccap_relation (cteCap cte) prev_cap" + in ccorres_gen_asm2) + apply (csymbr | rule ccorres_Guard_Seq[where S=UNIV])+ + apply (rule_tac P="isUntypedCap (cteCap cte) + \ capFreeIndex (cteCap cte) < 2 ^ word_bits + \ capFreeIndex (cteCap cte) < 2 ^ (word_bits - 1) + \ is_aligned (of_nat (capFreeIndex (cteCap cte)) :: addr) 4 + \ capBlockSize (cteCap cte) < 2 ^ word_bits" + in ccorres_gen_asm) + apply clarsimp + apply (frule(1) isUntypedCap_ccap_relation_helper) + apply (clarsimp simp: shiftr_shiftl1) + apply (rule ccorres_Cond_rhs_Seq) + apply (frule of_nat_0, simp add: word_bits_def) + apply (simp add: unlessE_def) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_CE, simp+) + apply vcg + apply clarsimp + apply (clarsimp simp: unat_of_nat64) + apply (frule of_nat_gt_0) + apply (simp add: unlessE_def) + apply (simp add: hrs_htd_update) + apply (rule ccorres_Guard_Seq[where S=UNIV])? + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) + apply ceqv + apply clarsimp + apply (simp only: ccorres_seq_cond_raise) + apply (rule ccorres_cond[where R="\"]) + apply (clarsimp simp: Kernel_Config.resetChunkBits_def) + apply (simp add: word_less_nat_alt unat_of_nat64 from_bool_0) + apply blast + apply (simp add: liftE_def bind_assoc shiftL_nat unless_def + when_def) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_split_nothrow[where xf'=xfdc and r'=dc]) + apply (rule ccorres_cond2[where R=\]) + apply (simp add: from_bool_0) + apply (ctac add: clearMemory_untyped_ccorres[where ut_slot=slot]) + apply (rule ccorres_return_Skip) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac cap'="cteCap cte" in updateFreeIndex_ccorres) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps' cap_get_tag_isCap + dest!: ccte_relation_ccap_relation) + apply (drule(1) isUntypedCap_ccap_relation_helper)+ + apply (rule exI, strengthen refl, simp) + apply (simp only: t_hrs_update_use_t_hrs mex_def meq_def) + apply blast + apply ceqv + apply (rule ccorres_return_CE'[unfolded returnOk_def o_apply], simp+) + apply wp + apply (simp add: guard_is_UNIV_def) + apply wp + apply simp + apply (vcg exspec=cleanCacheRange_RAM_preserves_bytes) + apply (rule_tac P="resetChunkBits \ capBlockSize (cteCap cte) + \ of_nat (capFreeIndex (cteCap cte)) - 1 + < (2 ^ capBlockSize (cteCap cte) :: addr)" + in ccorres_gen_asm) + apply (elim conjE) + apply (simp add: whileAnno_def) + apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ + apply csymbr + apply (simp add: reset_name_seq_bound_helper2 word_sle_def word_sless_def + msb_big linorder_not_le word_bits_def word_of_nat_less + reset_name_seq_bound_helper2[simplified simp_thms] + Collect_True) + apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? + apply (rule ccorres_add_returnOk) + apply (rule ccorres_splitE_novcg) + apply (rule_tac P="capPtr (cteCap cte) \ getFreeRef (capPtr (cteCap cte)) + (capFreeIndex (cteCap cte)) - 1" + in ccorres_gen_asm) + apply (rule_tac P="(\s. valid_cap' (cteCap cte) s) + \ \ capIsDevice (cteCap cte)" in ccorres_gen_asm) + apply (rule_tac yf="\ptr. ptr - (capPtr (cteCap cte))" + and P="\s. 2 ^ resetChunkBits \ gsMaxObjectSize s" + and F="\n b idx. cte_wp_at' (\cte'. \idx'. cteCap cte' + = (cteCap cte)\ capFreeIndex := idx' \ + \ idx = (getFreeRef (capPtr (cteCap cte)) idx') - 1 + && ~~ mask resetChunkBits) slot + and invs' + and (\s. descendants_of' slot (ctes_of s) = {}) + and pspace_no_overlap' (capPtr (cteCap cte)) (capBlockSize (cteCap cte))" + and Q="{s. \ capIsDevice (cteCap cte) + \ region_actually_is_bytes (capPtr (cteCap cte)) + (2 ^ (capBlockSize (cteCap cte))) s}" + in mapME_x_simpl_sequence_fun_related) + apply (rule nth_equalityI) + apply (simp add: length_upto_enum_step) + apply (simp add: getFreeRef_def shiftr_div_2n_w Kernel_Config.resetChunkBits_def + word_size) + apply (simp add: length_upto_enum_step upto_enum_step_nth + less_Suc_eq_le nth_rev getFreeRef_def + Kernel_Config.resetChunkBits_def shiftr_div_2n_w word_size + and_not_mask shiftl_t2n) + apply clarify + apply (rule_tac Q="\v. cte_wp_at' (\cte. capFreeIndex (cteCap cte) = v) slot" + and Q'="\\" and V="\v. x = (getFreeRef (capPtr (cteCap cte)) v) - 1 + && ~~ mask resetChunkBits" + in ccorres_req_Ex) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (clarsimp simp add: shiftL_nat) + apply (rename_tac prior_idx) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: clearMemory_untyped_ccorres[where ut_slot=slot]) + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac cap'="(cteCap cte)\ capFreeIndex := prior_idx \" + in updateFreeIndex_ccorres) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps' cap_get_tag_isCap + dest!: ccte_relation_ccap_relation) + apply (drule(1) isUntypedCap_ccap_relation_helper)+ + apply (drule isUntypedCap_ccap_relation_helper, clarsimp simp: isCap_simps) + apply (rule exI, strengthen refl, simp) + apply (simp only: t_hrs_update_use_t_hrs mex_def meq_def, + simp only: fun_app_def, strengthen exI[mk_strg I], strengthen refl) + apply (clarsimp simp: isCap_simps) + apply (simp add: getFreeIndex_def) + apply (clarsimp simp: in_set_conv_nth + length_upto_enum_step upto_enum_step_nth + less_Suc_eq_le getFreeRef_def) + apply (frule(2) reset_untyped_inner_offs_helper, simp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def + is_aligned_mask_out_add_eq_sub[OF is_aligned_weaken]) + apply (rule less_mask_eq, rule shiftr_less_t2n, + erule order_less_le_trans, rule two_power_increasing, + simp_all add: maxUntypedSizeBits_def)[1] + apply ceqv + apply (rule ccorres_add_returnOk) + apply (ctac add: preemptionPoint_ccorres) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def cintr_def) + apply wp + apply (simp, vcg exspec=preemptionPoint_modifies) + apply (wp updateFreeIndex_clear_invs') + apply (simp add: guard_is_UNIV_def) + apply (wp hoare_vcg_ex_lift doMachineOp_psp_no_overlap) + apply clarsimp + apply (vcg exspec=cleanCacheRange_RAM_preserves_bytes) + apply clarify + apply (rule conjI) + apply (clarsimp simp: invs_valid_objs' cte_wp_at_ctes_of + invs_urz + getFreeIndex_def isCap_simps + invs_pspace_aligned' + invs_pspace_distinct' + simp del: ) + apply (frule valid_global_refsD_with_objSize, clarsimp) + apply (clarsimp simp: conj_comms in_set_conv_nth + length_upto_enum_step upto_enum_step_nth + less_Suc_eq_le getFreeRef_def) + apply (frule(2) reset_untyped_inner_offs_helper, simp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def + aligned_offset_non_zero cteCaps_of_def + is_aligned_mask_out_add_eq_sub[OF is_aligned_weaken] + if_split[where P="\z. a \ z" for a]) + apply (strengthen is_aligned_mult_triv2[THEN is_aligned_weaken] + aligned_sub_aligned + aligned_intvl_offset_subset_ran + unat_le_helper Aligned.is_aligned_neg_mask) + apply (simp add: order_less_imp_le Kernel_Config.resetChunkBits_def untypedBits_defs) + + apply (clarsimp simp: in_set_conv_nth isCap_simps + length_upto_enum_step upto_enum_step_nth + less_Suc_eq_le getFreeRef_def + cte_wp_at_ctes_of getFreeIndex_def + hrs_mem_update) + apply (frule valid_global_refsD_with_objSize, clarsimp) + apply (frule(2) reset_untyped_inner_offs_helper, simp+) + apply (frule ctes_of_valid', clarify+) + apply (clarsimp simp: valid_cap_simps') + apply (strengthen ghost_assertion_size_logic[unfolded o_def, rotated, mk_strg I E] + is_aligned_weaken[where y=2 and x=resetChunkBits] + is_aligned_weaken[where y=8 and x=resetChunkBits] + is_aligned_no_overflow'[where n=8, simplified] + power_increasing[where a=2 and n=8, simplified] + region_actually_is_bytes_dom_s[mk_strg I E] + aligned_intvl_offset_subset[where sz'=8, simplified] + is_aligned_mult_triv2[THEN is_aligned_weaken] + region_actually_is_bytes_subset_t_hrs[mk_strg I E] + | simp)+ + apply (clarsimp simp: capAligned_def imp_conjL + aligned_offset_non_zero + is_aligned_add_multI conj_comms + is_aligned_mask_out_add_eq_sub[OF is_aligned_weaken]) + apply (strengthen region_actually_is_bytes_subset[mk_strg I E] + heap_list_is_zero_mono[OF heap_list_update_eq] + order_trans [OF intvl_start_le + aligned_intvl_offset_subset[where sz'=resetChunkBits]] + byte_regions_unmodified_actually_heap_list[mk_strg I E E] + | simp add: is_aligned_mult_triv2 hrs_mem_update)+ + apply (simp add: unat_sub word_le_nat_alt unat_sub[OF word_and_le2] + mask_out_sub_mask word_and_le2 + unat_of_nat64[OF order_le_less_trans, rotated, + OF power_strict_increasing]) + apply (case_tac idx') + apply clarsimp + apply (simp add: addr_card_def card_word Kernel_Config.resetChunkBits_def mask_def) + apply (rule conjI) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp add: minUntypedSizeBits_def) + apply (simp add: is_aligned_def) + apply (simp add: is_aligned_def) + apply clarsimp + apply (simp add: addr_card_def card_word is_aligned_def[of "0x100"] + Kernel_Config.resetChunkBits_def) + apply (simp add: unat_of_nat64[OF order_le_less_trans, rotated, + OF power_strict_increasing]) + apply (simp add: word_mod_2p_is_mask[symmetric] Kernel_Config.resetChunkBits_def + unat_mod unat_of_nat mod_mod_cancel) + apply (strengthen nat_le_Suc_less_imp[OF mod_less_divisor, THEN order_trans]) + apply simp + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp add: minUntypedSizeBits_def) + apply (drule sym[of "a * b" for a b], simp) + apply (cut_tac is_aligned_mult_triv2[of _ 8, simplified]) + apply (erule is_aligned_weaken, simp) + apply clarsimp + apply (rule conseqPre, vcg exspec=preemptionPoint_hrs_htd exspec=cleanCacheRange_RAM_preserves_bytes) + apply (clarsimp simp: in_set_conv_nth isCap_simps + length_upto_enum_step upto_enum_step_nth + less_Suc_eq_le getFreeRef_def + cte_wp_at_ctes_of getFreeIndex_def + hrs_mem_update) + apply (frule(2) reset_untyped_inner_offs_helper, simp+) + apply (clarsimp simp: valid_cap_simps') + apply (strengthen is_aligned_weaken[where y=2 and x=resetChunkBits] + ghost_assertion_size_logic[unfolded o_def, rotated, mk_strg I E] + is_aligned_weaken[where y=8 and x=resetChunkBits] + is_aligned_no_overflow'[where n=8, simplified] + power_increasing[where a=2 and n=8, simplified] + region_actually_is_bytes_dom_s[mk_strg I E] + aligned_intvl_offset_subset[where sz'=8, simplified] + is_aligned_mult_triv2[THEN is_aligned_weaken] + | simp)+ + apply (clarsimp simp: capAligned_def + aligned_offset_non_zero + is_aligned_add_multI conj_comms + region_actually_is_bytes_def) + apply (simp add: Kernel_Config.resetChunkBits_def is_aligned_def[of "0x100"]) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp add: minUntypedSizeBits_def) + apply (cut_tac is_aligned_mult_triv2[of _ 8, simplified]) + apply (erule is_aligned_weaken, simp) + apply (rule hoare_pre) + apply (wp updateFreeIndex_cte_wp_at updateFreeIndex_clear_invs' + updateFreeIndex_pspace_no_overlap' + updateFreeIndex_descendants_of2 + doMachineOp_psp_no_overlap + hoare_vcg_ex_lift + | (wp (once) preemptionPoint_inv, simp, simp add: pspace_no_overlap'_def) + | simp)+ + apply (simp add: cte_wp_at_ctes_of isCap_simps | clarify)+ + apply (clarsimp simp: length_upto_enum_step upto_enum_step_nth + less_Suc_eq_le getFreeRef_def + getFreeIndex_def nth_rev + conj_comms invs_pspace_aligned' invs_pspace_distinct' + invs_valid_pspace') + apply (frule(1) reset_untyped_inner_offs_helper[OF _ order_refl], simp+) + apply (frule ctes_of_valid', clarify+) + apply (clarsimp simp: valid_cap_simps' capAligned_def + is_aligned_mask_out_add_eq[OF is_aligned_weaken] + aligned_bump_down Aligned.is_aligned_neg_mask + is_aligned_mask_out_add_eq_sub[OF is_aligned_weaken]) + apply (simp add: field_simps) + apply (strengthen Aligned.is_aligned_neg_mask unat_le_helper) + apply (simp add: minUntypedSizeBits_def Kernel_Config.resetChunkBits_def[unfolded atomize_eq, THEN arg_cong[where f="\x. n \ x" for n]]) + apply (rule order_less_imp_le, erule order_le_less_trans[rotated], + rule olen_add_eqv[THEN iffD2]) + apply (rule order_trans, rule word_mult_le_mono1, rule word_of_nat_le, + erule order_trans[rotated], simp, simp add: Kernel_Config.resetChunkBits_def) + apply (simp only: unat_power_lower64 shiftr_div_2n_w[symmetric] + word_size word_bits_def[symmetric]) + apply (rule nat_less_power_trans2) + apply (rule order_less_le_trans[OF word_shiftr_lt]) + apply (simp add: word_bits_def) + apply (simp add: word_bits_def Kernel_Config.resetChunkBits_def) + apply (simp add: field_simps) + apply ceqv + apply (rule ccorres_return_CE, simp+)[1] + apply wp + apply (simp add: ccHoarePost_def guard_is_UNIV_def) + apply simp + + apply (strengthen invs_valid_objs' invs_urz) + apply ((rule_tac d="capIsDevice (cteCap cte)" and idx="capFreeIndex (cteCap cte)" in + deleteObject_no_overlap + | rule_tac d="capIsDevice (cteCap cte)" and idx="capFreeIndex (cteCap cte)" in + deleteObjects_cte_wp_at' + | wp (once) hoare_vcg_const_imp_lift + hoare_vcg_conj_lift + | wp (once) deleteObjects_invs'[where p=slot] + deleteObjects_descendants[where p=slot] + | strengthen exI[mk_strg I])+)[1] + apply (simp add: word_sle_def) + apply vcg + apply simp + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (rule conjI) + apply clarsimp + apply (frule if_unsafe_then_capD', clarsimp+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply ((strengthen refl eq_UntypedCap_helper + eq_UntypedCap_helper[symmetric] | simp)+)? + apply (frule ctes_of_valid', clarsimp+) + apply (simp add: exI)? + apply (clarsimp simp: isCap_simps valid_cap_simps' capAligned_def + conj_comms invs_valid_pspace' + descendants_range'_def2 + empty_descendants_range_in' + getFreeRef_def upto_intvl_eq) + apply (frule valid_global_refsD_with_objSize, clarsimp+) + apply (strengthen order_le_less_trans[where z="2 ^ n" for n, mk_strg I E] + order_trans[rotated, where z="gsMaxObjectSize s" for s, mk_strg I E]) + apply (strengthen power_strict_increasing + | simp)+ + apply (clarsimp simp: word_bits_def maxUntypedSizeBits_def minUntypedSizeBits_def) + apply (subgoal_tac "capPtr (cteCap cte) \ getFreeRef (capPtr (cteCap cte)) + (capFreeIndex (cteCap cte)) - 1") + apply (case_tac "the (ctes_of s slot)", simp) + apply (frule(3) ex_cte_not_in_untyped_range, clarsimp+) + apply (strengthen is_aligned_no_wrap'[where off="a - b" for a b, + simplified field_simps, mk_strg I E]) + apply (simp add: getFreeRef_def nth_rev length_upto_enum_step + upto_enum_step_nth word_of_nat_le + is_aligned_mask_out_add_eq_sub[OF is_aligned_weaken]) + apply (simp add: neg_mask_is_div' Kernel_Config.resetChunkBits_def word_size) + apply (safe, simp_all)[1] + apply (simp add: getFreeRef_def) + apply (strengthen is_aligned_no_wrap'[where off="a - b" for a b, + simplified field_simps, mk_strg I E]) + apply (simp add: word_of_nat_le) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule(1) rf_sr_ctes_of_clift, clarsimp) + apply (frule(2) rf_sr_cte_relation) + apply (clarsimp simp: typ_heap_simps' + dest!: ccte_relation_ccap_relation) + apply (strengthen typ_region_bytes_actually_is_bytes) + apply (simp add: hrs_htd_update hrs_mem_update exI) + apply (frule(1) isUntypedCap_ccap_relation_helper) + apply (frule ctes_of_valid', clarify+) + apply (frule valid_global_refsD_with_objSize, clarsimp) + apply (clarsimp simp: valid_cap_simps' isCap_simps capAligned_def + from_bool_0 cap_to_H_simps) + apply (frule(1) ghost_assertion_size_logic_no_unat) + apply (simp add: ghost_assertion_data_get_def gs_clear_region_def) + apply (strengthen is_aligned_no_overflow' + typ_region_bytes_dom_s + aligned_intvl_offset_subset + region_is_bytes_typ_region_bytes + intvl_start_le is_aligned_power2 + heap_list_is_zero_mono[OF heap_list_update_eq] + byte_regions_unmodified_actually_heap_list[OF _ _ refl, mk_strg I E] + typ_region_bytes_actually_is_bytes[OF refl] + region_actually_is_bytes_subset[OF typ_region_bytes_actually_is_bytes[OF refl]] + | simp add: unat_of_nat imp_conjL hrs_mem_update hrs_htd_update)+ + apply (simp add: maxUntypedSizeBits_def minUntypedSizeBits_def) + apply (rule conjI; clarsimp) + apply (rule conjI, erule is_aligned_weaken, simp) + by (clarsimp simp: order_trans[OF power_increasing[where a=2]] + addr_card_def card_word + is_aligned_weaken from_bool_0) + +lemma ccorres_cross_retype_zero_bytes_over_guard: + "range_cover ptr sz (APIType_capBits newType userSize) num_ret + \ ccorres_underlying rf_sr Gamm rvr xf arrel axf P' Q hs af cf + \ ccorres_underlying rf_sr Gamm rvr xf arrel axf + ((\s. invs' s + \ cte_wp_at' (\cte. \idx. cteCap cte = UntypedCap dev (ptr && ~~ mask sz) sz idx + \ idx \ unat (ptr && mask sz)) p s) and P') + {s'. (\ dev \ region_actually_is_zero_bytes ptr + (num_ret * 2 ^ APIType_capBits newType userSize) s') + \ (\cte cte' idx. cslift s' (cte_Ptr p) = Some cte' + \ ccte_relation cte cte' \ cteCap cte = UntypedCap dev (ptr && ~~ mask sz) sz idx) + \ s' \ Q} hs af cf" + apply (erule ccorres_guard_imp2) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule(1) rf_sr_ctes_of_clift, clarsimp) + apply (frule(2) rf_sr_cte_relation) + apply (case_tac dev) + apply fastforce + apply (frule(1) retype_offs_region_actually_is_zero_bytes, (simp | clarsimp)+) + apply fastforce + done + +lemma zero_bytes_heap_update: + "heap_list_is_zero (hrs_mem hrs) ptr n + \ region_is_bytes' ptr n (hrs_htd hrs) + \ h_t_valid (hrs_htd hrs) c_guard (cptr :: 'a ptr) + \ typ_uinfo_t TYPE ('a :: mem_type) \ typ_uinfo_t TYPE(8 word) + \ heap_list_is_zero (heap_update cptr v (hrs_mem hrs)) ptr n" + apply (frule(2) region_is_bytes_disjoint) + apply (clarsimp simp: heap_update_def) + apply (subst heap_list_update_disjoint_same, simp_all) + apply (simp add: Int_commute) + done + +crunches updateFreeIndex + for ksArchState[wp]: "\s. P (ksArchState s)" + +lemma invokeUntyped_Retype_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and ct_active' and ex_cte_cap_to' cnodeptr + and (\s. case gsCNodes s cnodeptr of None \ False + | Some n \ length destSlots + unat start \ 2 ^ n) + and valid_untyped_inv' (Retype cref reset ptr_base ptr newType us destSlots isdev) + and K (isdev \ (newType = APIObjectType ArchTypes_H.apiobject_type.Untyped \ isFrameType newType)) + ) + (UNIV \ {s. retypeBase_' s = Ptr ptr} + \ {s. srcSlot_' s = Ptr cref} + \ {s. reset_' s = from_bool reset} + \ {s. newType_' s = object_type_from_H newType } + \ {s. unat (userSize_' s) = us } + \ {s. deviceMemory_' s = from_bool isdev} + \ {s. destCNode_' s = cte_Ptr cnodeptr} + \ {s. destOffset_' s = start \ (\n < length destSlots. destSlots ! n = cnodeptr + (start + of_nat n) * 2^cteSizeBits)} + \ {s. destLength_' s = of_nat (length destSlots)}) + [] + (invokeUntyped (Retype cref reset ptr_base ptr newType us destSlots isdev)) + (Call invokeUntyped_Retype_'proc)" + (is "ccorres _ _ _ ?P' [] _ _") + apply (rule ccorres_name_pre) + apply (clarsimp simp only: valid_untyped_inv_wcap') + proof - + fix s sz idx cte + assume vui1: "valid_untyped_inv_wcap' + (Invocations_H.untyped_invocation.Retype cref reset ptr_base ptr newType us destSlots isdev) + (Some (case Invocations_H.untyped_invocation.Retype cref reset ptr_base ptr newType us destSlots + isdev of + Invocations_H.untyped_invocation.Retype slot reset ptr_base ptr ty us slots d \ + capability.UntypedCap d (ptr && ~~ mask sz) sz idx)) + s" + and misc1[simplified]: "ct_active' s" "invs' s" "ex_cte_cap_to' cnodeptr s" + "case gsCNodes s cnodeptr of None \ False + | Some n \ length destSlots + unat start \ 2 ^ n" + "K (isdev \ (newType = APIObjectType ArchTypes_H.apiobject_type.Untyped \ isFrameType newType)) s" + + have vui: "valid_untyped_inv_wcap' (Retype cref reset ptr_base ptr newType us destSlots isdev) + (Some (UntypedCap isdev (ptr && ~~ mask sz) sz idx)) s" + using vui1 + by (clarsimp simp: cte_wp_at_ctes_of) + + have proofs: "invokeUntyped_proofs s cref reset ptr_base ptr newType us destSlots sz idx isdev" + using vui misc1 + by (clarsimp simp: cte_wp_at_ctes_of invokeUntyped_proofs_def) + + note no_simps[simp del] = untyped_range.simps usable_untyped_range.simps + atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + usableUntypedRange.simps + + have cover: + "range_cover ptr sz (APIType_capBits newType us) (length destSlots)" + using vui + by (clarsimp simp: cte_wp_at_ctes_of) + + have us_misc: + "newType = APIObjectType apiobject_type.CapTableObject \ 0 < us" + "newType = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits" + using vui + by (auto simp: cte_wp_at_ctes_of untypedBits_defs) + + note bits_low = APIType_capBits_low[OF us_misc] + + have misc2: + "destSlots \ []" "ptr_base && ~~ mask sz = ptr_base" + using vui + by (clarsimp simp: cte_wp_at_ctes_of)+ + + note misc = misc1 misc2 + + have us_misc': + "(newType = APIObjectType apiobject_type.CapTableObject \ us < 59)" + using cover + apply - + apply (drule range_cover_sz') + apply (clarsimp simp:APIType_capBits_def objBits_simps' word_bits_conv) + done + + have ptr_base_eq: + "ptr_base = ptr && ~~ mask sz" + using vui + by (clarsimp simp: cte_wp_at_ctes_of)+ + + have sz_bound: + "sz \ 47" + using vui misc + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule Finalise_R.ctes_of_valid', clarsimp) + apply (clarsimp simp: valid_cap_simps' untypedBits_defs) + done + + have APIType_capBits_max: + "APIType_capBits newType us \ maxUntypedSizeBits" + using vui by clarsimp + + have some_range_cover_arithmetic: + "(ptr + (of_nat (length destSlots) << unat (of_nat (APIType_capBits newType us) :: addr)) + - ptr_base >> 4) && mask 48 + = of_nat (getFreeIndex ptr_base + (ptr + of_nat (shiftL (length destSlots) + (APIType_capBits newType us)))) >> 4" + using cover range_cover_sz'[OF cover] + apply (simp add: getFreeIndex_def shiftl_t2n + unat_of_nat_eq shiftL_nat) + apply (rule less_mask_eq) + apply (rule shiftr_less_t2n) + apply (rule order_le_less_trans[where y="2 ^ sz"]) + apply (rule order_trans[OF _ range_cover_compare_bound_word[OF cover]]) + apply (simp add: ptr_base_eq mask_out_sub_mask mult.commute) + apply (simp add: word_less_nat_alt order_le_less_trans[OF sz_bound]) + apply (rule order_less_le_trans, rule power_strict_increasing, + rule order_le_less_trans[OF sz_bound lessI], simp+) + done + + show + "ccorres (cintr \ dc) + (liftxf errstate id (K ()) ret__unsigned_long_') (\s'. s' = s) ?P' + [] (invokeUntyped (Retype cref reset ptr_base ptr newType us destSlots isdev)) + (Call invokeUntyped_Retype_'proc)" + apply (cinit lift: retypeBase_' srcSlot_' reset_' newType_' + userSize_' deviceMemory_' destCNode_' destOffset_' destLength_' + simp: when_def) + apply (rule ccorres_move_c_guard_cte) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply (rename_tac ptr_fetch, + rule_tac P="ptr_fetch = ptr_base" in ccorres_gen_asm2) + apply csymbr + apply csymbr + apply (simp add: from_bool_0 del: Collect_const) + apply (rule_tac xf'=xfdc and r'=dc in ccorres_splitE) + apply (rule ccorres_Cond_rhs) + apply (simp add: whenE_def) + apply (rule ccorres_add_returnOk) + apply (ctac add: resetUntypedCap_ccorres) + apply (simp add: ccorres_cond_empty_iff) + apply (rule ccorres_returnOk_skip) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def cintr_def) + apply wp + apply (vcg exspec=resetUntypedCap_modifies) + apply (simp add: whenE_def) + apply (rule ccorres_returnOk_skip) + apply ceqv + apply (simp add: liftE_def bind_assoc) + apply csymbr + apply (rule ccorres_Guard_Seq) + apply csymbr + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (rule ccorres_stateAssert) + apply (rule ccorres_stateAssert) + apply (rule ccorres_assert) + + apply (rule ccorres_cross_retype_zero_bytes_over_guard[where + dev=isdev and p=cref, OF cover]) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) + apply (rule_tac cap'="UntypedCap isdev ptr_base sz (if reset then 0 else idx)" + in updateFreeIndex_ccorres) + apply (rule allI, rule conseqPre, vcg) + apply (rule subsetI, clarsimp simp: typ_heap_simps' isCap_simps) + apply (frule ccte_relation_ccap_relation) + apply clarsimp + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (cut_tac some_range_cover_arithmetic) + apply (case_tac cte', clarsimp simp: modify_map_def fun_eq_iff split: if_split) + apply (simp add: mex_def meq_def ptr_base_eq) + apply (rule exI, strengthen refl, simp) + apply (strengthen globals.fold_congs, simp add: field_simps) + apply ceqv + apply (ctac add: createNewObjects_ccorres[where sz = sz and + start = start and cnodeptr=cnodeptr and + num = "of_nat (length destSlots)" + and idx = "getFreeIndex ptr_base + (ptr + of_nat (shiftL (length destSlots) (APIType_capBits newType us)))"]) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply wp + apply (vcg exspec=createNewObjects_modifies) + apply (simp add: canonicalAddressAssert_def) + (* strengthen canonical_address (ptr && ~~ mask sz) now, while we have canonical ptr *) + apply (drule canonical_address_and[where b="~~mask sz"]) + apply clarsimp + apply (wp updateFreeIndex_forward_invs' sch_act_simple_lift + updateFreeIndex_cte_wp_at hoare_vcg_const_Ball_lift + updateFreeIndex_pspace_no_overlap' + updateFreeIndex_caps_no_overlap'' + updateFreeIndex_caps_overlap_reserved + updateFreeIndex_descendants_range_in' + | simp)+ + apply (clarsimp simp: misc unat_of_nat_eq[OF range_cover.weak, OF cover]) + apply (vcg exspec=cap_untyped_cap_ptr_set_capFreeIndex_modifies) + apply simp + apply (rule validE_validE_R, rule hoare_strengthen_postE, + rule hoare_vcg_conj_liftE1[rotated, where Q="\_ s. + case gsCNodes s cnodeptr of None \ False + | Some n \ length destSlots + unat start \ 2 ^ n"], + rule whenE_reset_resetUntypedCap_invs_etc + [where ui="Retype cref reset ptr_base ptr newType us destSlots isdev" + and dev=isdev and ptr="ptr && ~~ mask sz" and ptr'=ptr and sz=sz and idx=idx]) + apply (simp add: whenE_def, wp resetUntypedCap_gsCNodes_at_pt)[1] + prefer 2 + apply simp + apply (clarsimp simp only: ) + apply (frule(2) invokeUntyped_proofs.intro) + apply (cut_tac bits_low us_misc us_misc') + apply (clarsimp simp: cte_wp_at_ctes_of + invokeUntyped_proofs.caps_no_overlap' + invokeUntyped_proofs.ps_no_overlap' + invokeUntyped_proofs.descendants_range + if_split[where P="\v. v \ getFreeIndex x y" for x y] + empty_descendants_range_in' + invs_pspace_aligned' invs_pspace_distinct' + invs_ksCurDomain_maxDomain' + invokeUntyped_proofs.not_0_ptr + atLeastAtMost_iff[where i=0] + cong: if_cong) + apply (frule invokeUntyped_proofs.idx_le_new_offs) + apply (frule invokeUntyped_proofs.szw) + apply (frule invokeUntyped_proofs.descendants_range(2), simp) + apply (simp add: cNodeNoOverlap_retype_have_size shiftL_nat mult.commute + archNoOverlap_retype_have_size[simplified o_def]) + apply (clarsimp simp: getFreeIndex_def conj_comms shiftL_nat + is_aligned_weaken[OF range_cover.funky_aligned] + invs_valid_pspace' isCap_simps + arg_cong[OF mask_out_sub_mask, where f="\y. x - y" for x] + field_simps unat_of_nat_eq[OF range_cover.weak, OF cover] + if_apply_def2 invs_valid_objs' ptr_base_eq sz_bound + invs_urz untypedBits_defs) + + apply (intro conjI; (solves\clarsimp simp: mask_2pm1 field_simps\)?) + (* pspace_no_overlap *) + apply (cases reset, simp_all)[1] + (* is_aligned 4 *) + apply (erule is_aligned_weaken[OF range_cover.aligned]) + apply (clarsimp simp: APIType_capBits_low) + (* new idx le *) + apply (clarsimp split: if_split) + (* cnodeptr not in area *) + apply (rule contra_subsetD[rotated], + rule invokeUntyped_proofs.ex_cte_no_overlap'[OF proofs], rule misc) + apply (simp add: shiftl_t2n mult.commute) + apply (rule order_trans, erule range_cover_subset', simp_all)[1] + apply (clarsimp simp: mask_2pm1 field_simps) + (* descendants_range_in' *) + using invokeUntyped_proofs.descendants_range + apply (clarsimp simp: mask_2pm1 field_simps) + (* gsCNodes *) + apply (clarsimp simp: mask_2pm1 field_simps split: option.split_asm) + (* kernel data refs *) + apply (drule(1) valid_global_refsD'[OF _ invs_valid_global']) + apply clarsimp + apply (clarsimp simp: mask_2pm1 field_simps) + apply (subst Int_commute, erule disjoint_subset2[rotated]) + apply (rule order_trans, erule invokeUntyped_proofs.subset_stuff) + apply (simp add: atLeastatMost_subset_iff word_and_le2) + apply (clarsimp simp: mask_2pm1 field_simps) + (* offset bounds *) + apply (frule range_cover.unat_of_nat_n_shift, rule order_refl) + apply (rule order_trans[rotated], erule range_cover.range_cover_compare_bound) + apply (subst unat_plus_simple[THEN iffD1]) + apply (rule order_trans, erule range_cover.range_cover_base_le, + simp add: shiftl_t2n field_simps) + apply (simp add: shiftl_t2n field_simps) + (* subsets *) + apply (rule order_trans, erule invokeUntyped_proofs.subset_stuff) + apply (simp add: atLeastatMost_subset_iff word_and_le2) + apply (clarsimp simp: mask_2pm1 field_simps) + (* destSlots *) + apply (clarsimp split: if_split) + apply (frule invokeUntyped_proofs.slots_invD[OF proofs]) + apply (simp add: conj_comms) + (* usableUntyped *) + apply (drule invokeUntyped_proofs.usableRange_disjoint[where d=isdev]) + apply (clarsimp simp: field_simps mask_out_sub_mask) + + (* clean up the C postcondition before applying VCG *) + apply (rule conseqPost[where Q'=UNIV and Q'=UNIV]) + apply (vcg exspec=resetUntypedCap_modifies) + apply (cut_tac range_cover.sz[OF cover] + invokeUntyped_proofs.idx_le_new_offs[OF proofs]) + apply (clarsimp simp: ccHoarePost_def hrs_mem_update + object_type_from_H_bound + typ_heap_simps' word_sle_def + word_of_nat_less zero_bytes_heap_update + region_actually_is_bytes) + apply (frule ccte_relation_ccap_relation) + apply (cut_tac vui) + apply (clarsimp simp: cap_get_tag_isCap getFreeIndex_def + cte_wp_at_ctes_of shiftL_nat + split: if_split) + apply (simp add: mask_out_sub_mask field_simps region_is_bytes'_def objBits_defs) + apply (clarsimp elim!: region_actually_is_bytes_subset) + apply (rule order_refl) + + apply (cut_tac misc us_misc' proofs us_misc bits_low + invokeUntyped_proofs.cref_inv[OF proofs]) + apply (clarsimp simp: cte_wp_at_ctes_of invokeUntyped_proofs_def + descendants_range'_def2 sch_act_simple_def + invs_valid_pspace' range_cover.sz) + apply (frule ctes_of_valid', fastforce) + apply (clarsimp simp: valid_cap'_def capAligned_def ct_in_state'_def + invs_valid_objs' inr_rrel_def) + apply (erule(1) rf_sr_ctes_of_cliftE) + apply (frule(2) rf_sr_cte_relation) + apply (clarsimp simp: cap_get_tag_isCap typ_heap_simps + dest!: ccte_relation_ccap_relation) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + + apply (intro conjI) + apply clarsimp + apply (drule invokeUntyped_proofs.ex_cte_no_overlap'[OF proofs]) + apply simp + apply (clarsimp simp: mask_2pm1 field_simps) + apply (frule(1) cap_get_tag_to_H) + apply (simp add: cap_lift_untyped_cap) + apply clarsimp + done +qed + +lemma ccorres_returnOk_Basic: + "\ \\ s. (\, s) \ sr \ r (Inr v) (xf (f s)) + \ (\, f s) \ sr \ \ + ccorres_underlying sr \ r xf arrel axf \ UNIV hs + (returnOk v) (Basic f)" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + done + +lemma injection_handler_sequenceE: + "injection_handler injf (sequenceE xs) + = sequenceE (map (injection_handler injf) xs)" + apply (induct xs, simp_all add: sequenceE_def) + apply (simp add: injection_handler_returnOk) + apply (simp add: injection_bindE[OF refl refl] + injection_handler_returnOk + Let_def) + done + +lemma getSlotCap_capAligned[wp]: + "\valid_objs'\ getSlotCap ptr \\rv s. capAligned rv\" + apply (rule hoare_strengthen_post, rule getSlotCap_valid_cap) + apply (clarsimp simp: valid_capAligned) + done + +lemma ccorres_throwError_inl_rrel: + "ccorres_underlying sr Gamm (inl_rrel r) xf arrel axf hs P P' + (throwError v) c + \ ccorres_underlying sr Gamm r xf (inl_rrel arrel) axf hs P P' + (throwError v) c" + apply (rule ccorresI') + apply (erule(3) ccorresE) + apply (simp add: throwError_def return_def) + apply assumption + apply (simp add: throwError_def return_def + unif_rrel_def split: if_split_asm) + done + +lemmas ccorres_return_C_errorE_inl_rrel + = ccorres_throwError_inl_rrel[OF ccorres_return_C_errorE] + +lemma mapME_ensureEmptySlot': + "\P\ + mapME (\x. injection_handler Inl (ensureEmptySlot (f x))) slots + \\rva s. P s \ (\slot \ set slots. (\cte. cteCap cte = capability.NullCap \ ctes_of s (f slot) = Some cte))\, -" + including no_pre + apply (induct slots arbitrary: P) + apply wpsimp + apply (rename_tac a slots P) + apply (simp add: mapME_def sequenceE_def Let_def) + apply (rule_tac Q="\rv. P and (\s. \cte. cteCap cte = capability.NullCap \ ctes_of s (f a) = Some cte)" in validE_R_sp) + apply (simp add: ensureEmptySlot_def unlessE_def) + apply (wp injection_wp_E[OF refl] getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule meta_allE) + apply wp + apply (fold validE_R_def) + apply (erule hoare_strengthen_postE_R) + apply clarsimp + done + +lemma mapME_ensureEmptySlot: + "\\\ + mapME (\x. injection_handler Inl (ensureEmptySlot (f x))) [S .e. (E::machine_word)] + \\rva s. \slot. S \ slot \ slot \ E \ + (\cte. cteCap cte = capability.NullCap \ ctes_of s (f slot) = Some cte)\, -" + apply (rule hoare_strengthen_postE_R) + apply (rule mapME_ensureEmptySlot') + apply clarsimp + done + +lemma capCNodeRadix_CL_less_64: + "cap_get_tag ccap = scast cap_cnode_cap \ capCNodeRadix_CL (cap_cnode_cap_lift ccap) < 64" + apply (simp add: cap_cnode_cap_lift_def cap_lift_cnode_cap) + apply (rule order_le_less_trans, rule word_and_le1) + apply (simp add: mask_def) + done + +lemmas unat_capCNodeRadix_CL_less_64 + = capCNodeRadix_CL_less_64[unfolded word_less_nat_alt, simplified] + +lemmas capCNodeRadix_CL_less_64s + = capCNodeRadix_CL_less_64 unat_capCNodeRadix_CL_less_64 + linorder_not_le[THEN iffD2, OF capCNodeRadix_CL_less_64] + linorder_not_le[THEN iffD2, OF unat_capCNodeRadix_CL_less_64] + +lemma TripleSuc: + "Suc (Suc (Suc 0)) = 3" + by simp + +lemma case_sum_distrib: + "case_sum a b x >>= f = case_sum (\x. a x >>= f) (\x. b x >>= f) x" + by (case_tac x,simp+) + +lemma alignUp_spec: + "\s. \\ \s. alignment_' s < 0x40 \ Call alignUp_'proc + \\ret__unsigned_long = alignUp (baseValue_' s) (unat (alignment_' s))\" + apply vcg + apply (simp add:alignUp_def2 mask_def field_simps) + done + +lemma checkFreeIndex_ccorres: + "ccap_relation cp cap \ + ccorresG rf_sr \ (intr_and_se_rel \ (\r (fi, r'). r' = from_bool r + \ (case r of True \ fi = 0 | False \ capFreeIndex cp = unat (fi << 4)))) + (liftxf errstate (K (scast EXCEPTION_NONE)) id (\s. (freeIndex_' s, reset_' s))) + (cte_wp_at' (\cte. (cteCap cte = cp \ isUntypedCap cp)) slot and valid_objs' and valid_mdb') UNIV hs + (liftE $ constOnFailure False (doE y \ ensureNoChildren slot; returnOk True odE)) + (\status :== CALL ensureNoChildren(cte_Ptr slot);; + (Cond \\status \ scast EXCEPTION_NONE\ + ( \ret__unsigned_longlong :== CALL cap_untyped_cap_get_capFreeIndex(cap);; + \freeIndex :== \ret__unsigned_longlong;; + \reset :== scast false) + (\freeIndex :== 0 + ;; \reset :== scast true)))" + apply (simp add: constOnFailure_def catch_def liftE_def bindE_bind_linearise bind_assoc case_sum_distrib) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_split_nothrow_case_sum) + apply (ctac add:ensureNoChildren_ccorres) + apply (ceqv) + apply (rule ccorres_from_vcg[where P' = UNIV]) + apply (clarsimp simp add: returnOk_def, simp add: return_def) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply simp + apply (rule ccorres_from_vcg[where P'= UNIV]) + apply (simp, clarsimp simp:return_def) + apply (rule conseqPre) + apply vcg + apply clarsimp + apply (rule context_conjI) + apply (clarsimp simp:cap_get_tag_isCap) + apply assumption + apply (clarsimp simp: ccap_relation_def isCap_simps cap_untyped_cap_lift_def cap_lift_def + cap_to_H_def + split: if_splits + cong: if_cong) + apply (rule ensureNoChildren_wp[where P = dc]) + apply clarsimp + apply (vcg exspec=ensureNoChildren_modifies) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma ccap_relation_untyped_CL_simps: + "\ccap_relation cp cap;isUntypedCap cp\ + \ unat (capBlockSize_CL (cap_untyped_cap_lift cap)) = (capBlockSize cp) + \ (capPtr_CL (cap_untyped_cap_lift cap)) = (capPtr cp)" + apply (frule cap_get_tag_UntypedCap) + apply (simp add:cap_get_tag_isCap) + done + +lemma valid_untyped_capBlockSize_misc: + "\s \' cp; isUntypedCap cp; (z::nat) \ capFreeIndex cp + \ \ 2 ^ capBlockSize cp - z < 2 ^ word_bits + \ of_nat (2^capBlockSize cp - z) = (2::machine_word) ^ capBlockSize cp - of_nat z" + apply (clarsimp simp:valid_cap'_def isCap_simps) + apply (rule le_less_trans[OF diff_le_self]) + apply (rule power_strict_increasing) + apply (simp add:word_bits_def) + apply (simp add: untypedBits_defs)+ + done + +lemma alternative_distrib: + "(do r\c; (a \ b) od) = ((do c; a od) \ (do c ; b od))" + apply (rule ext)+ + apply (clarsimp simp:alternative_def bind_def split_def) + apply force + done + +lemma setThreadStateRestart_ct_active': + "\ct_active'\ setThreadState Restart thread + \\rva s. ct_active' s\" + apply (simp add:ct_in_state'_def) + apply (rule hoare_pre) + apply (wps) + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + done + +lemma toEnum_object_type_to_H: + "unat v \ (fromEnum::object_type \ nat) maxBound + \ toEnum (unat v) = (object_type_to_H (v::machine_word))" + apply (simp add:enum_object_type enum_apiobject_type object_type_to_H_def toEnum_def + maxBound_less_length) + apply (clarsimp simp: Kernel_C_defs split:if_splits) + apply unat_arith + done + +lemma unat_of_nat_APIType_capBits: + "b \ word_bits + \ unat (of_nat (APIType_capBits z b) ::machine_word) = APIType_capBits z b" + apply (rule unat_of_nat64) + apply (case_tac z) + apply (clarsimp simp: invokeUntyped_proofs_def word_bits_conv APIType_capBits_def objBits_simps' + bit_simps + split: apiobject_type.splits if_split)+ + done + +lemma valid_untyped_inv'_D: + "valid_untyped_inv' (Retype cref reset ptr_base ptr ty us destSlots isdev) s + \ APIType_capBits ty us < 64" + apply clarsimp + apply (drule range_cover_sz') + apply (simp add:word_bits_def) + done + +lemma object_type_from_to_H: + "unat v \ (fromEnum::object_type \ nat) maxBound + \ v = object_type_from_H (object_type_to_H v)" + apply (simp add:toEnum_object_type_to_H[symmetric]) + apply (rule iffD1[OF word_unat.Rep_inject]) + apply (subst fromEnum_object_type_to_H[symmetric]) + apply (simp add:from_to_enum) + done + +lemma shiftR_gt0_le64: + "\0 < unat (of_nat (shiftR a b ));a < 2 ^ word_bits\ \ b< 64" + apply (rule ccontr) + apply (clarsimp simp:not_less shiftR_nat) + apply (subst (asm) div_less) + apply (erule less_le_trans) + apply (rule power_increasing) + apply (simp add:word_bits_def)+ + done + +lemma shiftr_overflow: + "64\ a \ (b::machine_word) >> a = 0" + apply (word_bitwise) + apply simp + done + +lemma ctes_of_ex_cte_cap_to': + "ctes_of s p = Some cte \ \r \ cte_refs' (cteCap cte) (irq_node' s). ex_cte_cap_to' r s" + by (auto simp add: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of) + + +lemma Arch_isFrameType_spec: + "\s. \ \ \s. unat \type \ fromEnum (maxBound::ArchTypes_H.object_type)\ + Call Arch_isFrameType_'proc + \ \ret__unsigned_long = + from_bool (isFrameType ((toEnum (unat \<^bsup>s\<^esup> type))::ArchTypes_H.object_type))\" + apply vcg + apply (simp add: toEnum_object_type_to_H) + apply (frule object_type_from_to_H) + apply (auto dest!: object_type_from_H_toAPIType_simps[THEN iffD1,OF eq_commute[THEN iffD1]]) + apply (auto simp: object_type_to_H_def isFrameType_def isFrameType_def + split: if_splits object_type.splits) + apply (auto simp: object_type_from_H_def ) + done + +lemma decodeUntypedInvocation_ccorres_helper: + notes TripleSuc[simp] + notes valid_untyped_inv_wcap'.simps[simp del] tl_drop_1[simp] + notes gen_invocation_type_eq[simp] + shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and sch_act_simple and ct_active' + and valid_cap' cp and K (isUntypedCap cp) + and cte_wp_at' (\cte. cteCap cte = cp) slot + and (excaps_in_mem extraCaps \ ctes_of) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). + ex_nonz_cap_to' y s) + and (\s. \v \ set extraCaps. + s \' fst v) + and sysargs_rel args buffer) + (UNIV + \ {s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. ccap_relation cp (cap_' s)} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. call_' s = from_bool isCall} + \ {s. buffer_' s = option_to_ptr buffer}) + [] + ((doE uinv \ decodeUntypedInvocation label args slot cp (map fst extraCaps); + liftE (stateAssert (valid_untyped_inv' uinv) []); returnOk uinv odE) + >>= invocationCatch thread isBlocking isCall InvokeUntyped) + (Call decodeUntypedInvocation_'proc)" + supply if_cong[cong] option.case_cong[cong] + apply (rule ccorres_name_pre) + apply (cinit' lift: invLabel_' length___unsigned_long_' cap_' slot_' current_extra_caps_' call_' buffer_' + simp: decodeUntypedInvocation_def list_case_If2 + invocation_eq_use_types) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: unlessE_def throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp del: Collect_const cong: call_ignore_cong) + apply csymbr + apply (simp add: if_1_0_0 word_less_nat_alt + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply simp + apply (rule ccorres_cond_true_seq) + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply (simp add: interpret_excaps_test_null + excaps_map_def if_1_0_0 + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_bind invocationCatch_def + split: list.split, + simp add: throwError_def return_def + syscall_error_rel_def exception_defs + syscall_error_to_H_cases) + apply (simp add: list_case_helper[OF neq_Nil_lengthI] + list_case_helper + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_add_return) + apply (simp only: ) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (simp only: ) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (simp only: ) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (simp only: ) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=3 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (simp only: ) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=4 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (simp only: ) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=5 and buffer=buffer]) + apply csymbr + apply (simp add: invocationCatch_use_injection_handler + injection_bindE[OF refl refl] + injection_handler_whenE + rangeCheck_def unlessE_whenE + injection_liftE [OF refl] + injection_handler_throwError + bindE_assoc length_ineq_not_Nil + injection_handler_If + cong: StateSpace.state.fold_congs globals.fold_congs + del: Collect_const) + apply (rule ccorres_split_when_throwError_cond + [where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (simp add: seL4_ObjectTypeCount_def maxBound_is_length) + apply (subst hd_conv_nth, clarsimp) + apply (clarsimp simp: enum_object_type enum_apiobject_type + word_le_nat_alt) + apply arith + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply csymbr + apply ((rule ccorres_Guard_Seq)+)? + apply (subst whenE_whenE_body) + apply (rule ccorres_split_when_throwError_cond + [where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (clarsimp simp: word_size Collect_const_mem fromIntegral_def integral_inv + hd_drop_conv_nth2 word_le_nat_alt maxUntypedSizeBits_def + toEnum_object_type_to_H wordBits_def not_less[symmetric]) + apply (subst hd_conv_nth, clarsimp) + apply (simp add: unat_of_nat_APIType_capBits word_bits_def) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def maxUntypedSizeBits_def + syscall_error_rel_def exception_defs + syscall_error_to_H_cases) + apply (simp add: word_size word_sle_def) + apply (simp add: fromIntegral_def integral_inv + hd_drop_conv_nth2 + del: Collect_const) + apply (rule ccorres_split_when_throwError_cond + [where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (clarsimp simp: Collect_const_mem unat_eq_0 + linorder_not_less + hd_conv_nth length_ineq_not_Nil) + apply (simp add: toEnum_eq_to_fromEnum_eq + fromEnum_object_type_to_H + object_type_from_H_def + fromAPIType_def AARCH64_H.fromAPIType_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_split_when_throwError_cond + [where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (clarsimp simp: Collect_const_mem unat_eq_0 + linorder_not_less + hd_conv_nth length_ineq_not_Nil + toEnum_eq_to_fromEnum_eq) + apply (simp add: fromEnum_object_type_to_H + object_type_from_H_def untypedBits_defs + fromAPIType_def AARCH64_H.fromAPIType_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule_tac xf'="nodeCap_'" + and r'="\rv rv'. ccap_relation rv rv' \ unat (args ! 3) \ word_bits" + in ccorres_splitE) + apply (rule ccorres_cond2[where R=\]) + apply (clarsimp simp add: unat_eq_0 ) + apply (rule_tac P="args ! 3 = 0" in ccorres_gen_asm) + apply (rule ccorres_move_c_guard_cte) + apply (simp add: injection_handler_returnOk) + apply (rule ccorres_nohs) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply ctac + apply (rule ccorres_assert2) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap}" in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) + apply (wp hoare_drop_imps) + apply vcg + apply (simp add: split_def injection_bindE[OF refl refl] + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (ctac add: ccorres_injection_handler_csum1 + [OF lookupTargetSlot_ccorres, + unfolded lookupTargetSlot_def]) + apply (simp add: injection_liftE[OF refl]) + apply (simp add: liftE_liftM split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) + apply (rule ccorres_nohs) + apply (rule ccorres_getSlotCap_cte_at) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply simp + apply (rule ccorres_split_throws, simp?, + rule ccorres_return_C_errorE_inl_rrel, simp+) + apply vcg + apply simp + apply wp + apply (simp add: all_ex_eq_helper) + apply (vcg exspec=lookupTargetSlot_modifies) + apply simp + apply (wp hoare_drop_imps) + apply (simp add: hd_conv_nth) + apply vcg + apply ceqv + apply (rule_tac P="\_. capAligned rv" in ccorres_cross_over_guard) + apply csymbr + apply (elim conjE) + apply (simp add: if_1_0_0 cap_get_tag_isCap + cap_case_CNodeCap_True_throw + injection_handler_whenE + injection_handler_throwError + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply simp + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + syscall_error_rel_def exception_defs + syscall_error_to_H_cases) + apply (simp add: lookup_fault_missing_capability_lift + hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (rule le_64_mask_eq) + apply (simp add: word_bits_def word_le_nat_alt) + apply simp + apply csymbr + apply (rule ccorres_Guard_Seq) + apply csymbr + apply (rule ccorres_split_when_throwError_cond + [where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (clarsimp simp: Collect_const_mem cap_get_tag_isCap[symmetric]) + apply (drule(1) cap_get_tag_to_H) + apply (clarsimp simp: linorder_not_le) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + syscall_error_rel_def exception_defs + syscall_error_to_H_cases) + apply (simp add: cap_get_tag_isCap[symmetric]) + apply (drule(1) cap_get_tag_to_H) + apply clarsimp + apply (rule ccorres_split_when_throwError_cond + [where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (clarsimp simp:) + apply (simp add: Kernel_Config.retypeFanOutLimit_def word_le_nat_alt + linorder_not_le) + apply (auto simp: linorder_not_le unat_eq_0)[1] + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + syscall_error_rel_def exception_defs + syscall_error_to_H_cases) + apply (simp add: Kernel_Config.retypeFanOutLimit_def) + apply (rule ccorres_split_when_throwError_cond + [where Q=\ and Q'=\, rotated -1]) + apply vcg + apply (clarsimp simp: numeral_eqs[symmetric] + word_le_nat_alt linorder_not_le + cap_get_tag_isCap[symmetric]) + apply (drule(1) cap_get_tag_to_H) + apply clarsimp + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + syscall_error_rel_def exception_defs + syscall_error_to_H_cases) + apply (simp add: cap_get_tag_isCap[symmetric]) + apply (drule(1) cap_get_tag_to_H) + apply (clarsimp) + apply csymbr + apply csymbr + apply csymbr + apply (simp add: mapM_locate_eq liftE_bindE + injection_handler_sequenceE mapME_x_sequenceE + whileAnno_def injection_bindE[OF refl refl] + bindE_assoc injection_handler_returnOk) + (* gsCNodes assertion *) + apply (rule ccorres_stateAssert) + apply (simp add: liftE_bindE[symmetric]) + apply (rule_tac P="capAligned rv" in ccorres_gen_asm) + apply (subgoal_tac "args ! 5 \ args ! 4 + args ! 5") + prefer 2 + apply (clarsimp simp: numeral_eqs[symmetric]) + apply (subst field_simps, erule plus_minus_no_overflow_ab) + apply (erule order_trans) + apply (rule order_less_imp_le, rule word_power_less_1) + apply (clarsimp simp: capAligned_def isCap_simps word_bits_def) + apply (rule ccorres_splitE) + apply (rule_tac F="\_ s. case gsCNodes s (RetypeDecls_H.capUntypedPtr rv) of + None \ False | Some n \ args ! 4 + args ! 5 - 1 < 2 ^ n" + in ccorres_sequenceE_while_gen' + [where i="unat (args ! 4)" and xf'=xfdc + and xf_update="i_'_update" and xf="i_'" + and r'=dc and Q=UNIV]) + apply simp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_returnOk) + apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ + apply (ctac add: ccorres_injection_handler_csum1 + [OF ensureEmptySlot_ccorres]) + apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ + apply (simp add: ccorres_cond_iffs returnOk_def) + apply (rule ccorres_return_Skip') + apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ + apply (simp add: ccorres_cond_iffs inl_rrel_inl_rrel) + apply (rule ccorres_return_C_errorE_inl_rrel, + simp+)[1] + apply wp + apply (simp add: all_ex_eq_helper) + apply (vcg exspec=ensureEmptySlot_modifies) + apply (clarsimp simp: upto_enum_word + split: if_split_asm simp del: upt.simps) + apply (simp add: cte_level_bits_def field_simps size_of_def + numeral_eqs[symmetric]) + apply (simp add: cap_get_tag_isCap[symmetric] + split: option.split_asm) + apply (drule(1) rf_sr_gsCNodes_array_assertion) + apply (drule(1) cap_get_tag_to_H) + apply clarsimp + apply (subgoal_tac P for P) + apply (subst array_assertion_shrink_right, assumption, assumption) + apply (simp add: array_assertion_shrink_right) + apply (rule unat_le_helper, simp) + apply (erule order_trans[rotated]) + apply (subst add.commute, rule word_plus_mono_right) + apply (simp add: unat_plus_simple[THEN iffD1] olen_add_eqv[symmetric]) + apply (simp add: word_le_nat_alt unat_of_nat) + apply (simp add: olen_add_eqv[symmetric]) + apply (clarsimp simp add: upto_enum_word + simp del: upt.simps) + apply (simp add: word_less_nat_alt[symmetric] numeral_eqs[symmetric]) + apply (simp add: Suc_unat_diff_1) + apply (subst iffD1 [OF unat_plus_simple]) + apply (erule iffD2 [OF olen_add_eqv]) + apply simp + apply (rule conseqPre, vcg exspec=ensureEmptySlot_modifies) + apply clarsimp + apply simp + apply (wp injection_wp_E[OF refl]) + apply (simp only: word_bits_def[symmetric]) + apply clarsimp + apply (simp add: upto_enum_word numeral_eqs[symmetric] + del: upt.simps) + apply (subst Suc_unat_diff_1) + apply clarsimp + apply unat_arith + apply (subst(asm) olen_add_eqv[symmetric]) + apply (simp add: iffD1 [OF unat_plus_simple]) + apply (simp add: iffD1 [OF unat_plus_simple, symmetric]) + apply (simp only: word_bits_def) + apply (rule less_le_trans, rule unat_lt2p, simp) + apply simp + apply simp + apply (rule ceqv_refl) + apply (ctac (c_lines 2) add:checkFreeIndex_ccorres[unfolded fun_app_def]) + apply (rename_tac reset reset_fi_tup) + apply (rule_tac xf'=reset_' in ccorres_abstract, ceqv) + apply (rule_tac xf'=freeIndex_' in ccorres_abstract, ceqv) + apply (rename_tac reset' fi', rule_tac P="reset_fi_tup = (fi', reset')" + in ccorres_gen_asm2) + apply csymbr + apply csymbr+ + apply (rule ccorres_Guard_Seq)+ + apply csymbr + apply (rule ccorres_Guard_Seq) + apply (rule_tac ccorres_split_when_throwError_cond[where Q = \ and Q' = \]) + apply (case_tac reset; + clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat + valid_untyped_capBlockSize_misc + valid_untyped_capBlockSize_misc[where z=0, simplified] + of_nat_shiftR toEnum_object_type_to_H) + apply (subst hd_conv_nth, clarsimp) + apply (subst unat_of_nat_APIType_capBits, + clarsimp simp: wordBits_def word_size word_bits_def) + apply simp + apply (subst hd_conv_nth, clarsimp) + apply (subst unat_of_nat_APIType_capBits, + clarsimp simp: wordBits_def word_size word_bits_def) + apply simp + apply (rule syscall_error_throwError_ccorres_n) + apply (case_tac reset; clarsimp simp: syscall_error_rel_def shiftL_nat + ccap_relation_untyped_CL_simps syscall_error_to_H_cases + valid_untyped_capBlockSize_misc) + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__int_' in ccorres_abstract, ceqv) + apply (rule_tac P = "rv'b = from_bool (capIsDevice cp \ + \ isFrameType (toEnum (unat (hd args))))" + in ccorres_gen_asm2) + apply (rule_tac ccorres_split_when_throwError_cond[where Q = \ and Q' = \]) + apply (clarsimp simp: toEnum_eq_to_fromEnum_eq length_ineq_not_Nil + fromEnum_object_type_to_H from_bool_0 + object_type_from_H_def hd_conv_nth + fromAPIType_def AARCH64_H.fromAPIType_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (clarsimp simp: syscall_error_rel_def + ccap_relation_untyped_CL_simps shiftL_nat + syscall_error_to_H_cases ) + apply csymbr + apply (simp add:liftE_bindE) + apply (rule ccorres_symb_exec_l) + apply (simp (no_asm) add: ccorres_invocationCatch_Inr split_def + performInvocation_def liftE_bindE bind_assoc) + apply (ctac add: setThreadState_ccorres) + apply (rule ccorres_trim_returnE, (simp (no_asm))+) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) + apply (rule ccorres_alternative2) + apply (rule ccorres_returnOk_skip) + apply (simp(no_asm) add: throwError_def, rule ccorres_return_Skip') + apply (rule hoare_vcg_conj_lift + | rule_tac p="capCNodePtr rv" in setThreadState_cap_to' + | wp (once) sts_invs_minor' setThreadStateRestart_ct_active' + sts_valid_untyped_inv')+ + apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat + toEnum_object_type_to_H unat_of_nat_APIType_capBits + word_size valid_untyped_capBlockSize_misc + getFreeRef_def hd_conv_nth length_ineq_not_Nil) + apply (rule_tac conseqPost[where A' = "{}" and Q' = UNIV]) + apply (vcg exspec=setThreadState_modifies) + apply (clarsimp simp: object_type_from_to_H cap_get_tag_isCap + ccap_relation_isDeviceCap) + apply (frule_tac cap = rv in cap_get_tag_to_H(5)) + apply (simp add: cap_get_tag_isCap) + apply (simp add: field_simps Suc_unat_diff_1) + apply (rule conjI) + apply (clarsimp split: bool.split_asm + simp: unat_of_nat_APIType_capBits wordBits_def + word_size word_bits_def) + apply (frule iffD2[OF olen_add_eqv]) + apply (frule(1) isUntypedCap_ccap_relation_helper) + apply (clarsimp simp: unat_plus_simple[THEN iffD1]) + apply (subst upto_enum_word) + apply (subst nth_map_upt) + apply (clarsimp simp: field_simps Suc_unat_diff_1 unat_plus_simple[THEN iffD1]) + apply (clarsimp simp: cte_level_bits_def objBits_defs) + apply simp + apply wp + apply simp + apply (simp (no_asm)) + apply (rule hoare_strengthen_post[OF stateAssert_sp]) + apply clarsimp + apply assumption + apply simp + apply clarsimp + apply vcg + apply clarsimp + apply vcg + apply clarsimp + apply (rule conseqPre,vcg,clarsimp) + apply vcg + apply (rule ccorres_guard_imp + [where Q =\ and Q' = UNIV,rotated],assumption+) + apply simp + apply (simp add: liftE_validE) + apply (rule checkFreeIndex_wp) + apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap + toEnum_object_type_to_H unat_of_nat_APIType_capBits word_size + valid_untyped_capBlockSize_misc getFreeRef_def hd_conv_nth length_ineq_not_Nil) + apply (rule_tac Q' ="{sa. + ksCurThread_' (globals sa) = tcb_ptr_to_ctcb_ptr (ksCurThread s)}" in conseqPost[where + A' = "{}"]) + apply (vcg exspec=ensureNoChildren_modifies + exspec=cap_untyped_cap_get_capFreeIndex_modifies) + apply (rule subsetI, + clarsimp simp:toEnum_object_type_to_H not_le word_sle_def + enum_apiobject_type enum_object_type maxBound_is_length + unat_of_nat_APIType_capBits word_size hd_conv_nth length_ineq_not_Nil + not_less word_le_nat_alt isCap_simps valid_cap_simps') + apply (strengthen word_of_nat_less) + apply (clarsimp simp: ThreadState_defs mask_def ccap_relation_isDeviceCap2 + split: if_split) + apply (clarsimp simp: not_less shiftr_overflow maxUntypedSizeBits_def + unat_of_nat_APIType_capBits) + apply (intro conjI impI; + clarsimp simp: not_less shiftr_overflow unat_of_nat_APIType_capBits + wordBits_def word_size word_bits_def) + apply simp + apply simp + apply (rule_tac Q'="\r. cte_wp_at' (\cte. cteCap cte = cp) slot + and invs' and (\s. ksCurThread s = thread) + and ex_cte_cap_to' (capCNodePtr rv) + and (\s. case gsCNodes s (capCNodePtr rv) of None \ False + | Some n \ args ! 4 + args ! 5 - 1 < 2 ^ n) + and sch_act_simple and ct_active'" in hoare_strengthen_postE_R) + prefer 2 + apply (clarsimp simp: invs_valid_objs' invs_mdb' + ct_in_state'_def pred_tcb_at') + apply (subgoal_tac "ksCurThread s \ ksIdleThread sa") + prefer 2 + apply clarsimp + apply (frule st_tcb_at_idle_thread',fastforce) + apply (clarsimp simp: valid_idle'_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def + invs'_def valid_state'_def) + apply (subgoal_tac "tcbState obja \ Inactive \ \ idle' (tcbState obja)") + prefer 2 + apply (rule conjI, clarsimp) + apply (clarsimp dest!:invs_valid_idle') + apply (subgoal_tac "tcb_st_refs_of' (tcbState obja) = {}") + prefer 2 + apply fastforce (* slow fastforce *) + apply (clarsimp split: if_splits simp: not_less toEnum_object_type_to_H + word_size hd_conv_nth length_ineq_not_Nil) + apply (subgoal_tac "tcbQueued obja \ runnable' (tcbState obja)") + apply (simp add: trans [OF olen_add_eqv[symmetric] unat_plus_simple] + fromAPIType_def) + apply (clarsimp simp: word_le_nat_alt unat_2tp_if + valid_tcb_state'_def + split: option.split_asm if_split_asm) + apply blast + apply (case_tac "tcbState obja", + (simp add: runnable'_def valid_tcb_state'_def)+)[1] + apply simp + apply (rule validE_validE_R, rule mapME_wp'[unfolded mapME_def]) + apply (rule hoare_pre) + apply (rule validE_R_validE) + apply (wp injection_wp_E[OF refl]) + apply clarsimp + apply (simp add: ccHoarePost_def) + apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) + apply (rule_tac V=UNIV + in HoarePartial.reannotateWhileNoGuard) + apply (vcg exspec=ensureEmptySlot_modifies) + prefer 2 + apply clarsimp + apply (subst (asm) mem_Collect_eq, assumption) + apply clarsimp + apply (rule_tac Q'="\r s. cte_wp_at' (\cte. cteCap cte = cp) slot s + \ invs' s \ ksCurThread s = thread + \ valid_cap' r s + \ (\rf\cte_refs' r (irq_node' s). ex_cte_cap_to' rf s) + \ sch_act_simple s \ ct_active' s" in hoare_strengthen_postE_R) + apply clarsimp + apply (wp injection_wp_E[OF refl] getSlotCap_cap_to' + getSlotCap_capAligned + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: valid_capAligned isCap_simps) + apply (drule_tac x=0 in bspec, simp+) + apply (frule(1) base_length_minus_one_inequality[where + wbase="args ! 4" and wlength="args ! 5"], simp_all)[1] + apply (simp add: valid_cap_simps' capAligned_def word_bits_def) + apply (clarsimp simp: upto_enum_def word_le_nat_alt[symmetric] + split: option.split_asm if_split_asm) + apply (drule spec, drule mp, erule conjI, rule order_refl) + apply clarsimp + apply (simp del: Collect_const) + apply (vcg exspec=lookupTargetSlot_modifies) + apply simp + apply wp + apply (simp add: all_ex_eq_helper) + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply clarsimp + apply (clarsimp simp: hd_drop_conv_nth2 hd_conv_nth neq_Nil_lengthI + ct_in_state'_def pred_tcb_at' + rf_sr_ksCurThread mask_eq_iff_w2p + numeral_eqs[symmetric] cap_get_tag_isCap cte_wp_at_ctes_of + unat_eq_0 ccHoarePost_def) + apply (rule conjI) + apply (clarsimp simp: linorder_not_less isCap_simps) + apply (clarsimp simp: sysargs_rel_to_n) + apply (rule conjI, clarsimp) + apply (clarsimp simp: fromAPIType_def) + apply (subgoal_tac "unat (args ! Suc 0) < word_bits") + prefer 2 + apply (simp add: word_size fromIntegral_def fromInteger_nat toInteger_nat word_bits_def + maxUntypedSizeBits_def wordBits_def) + apply (clarsimp simp: excaps_map_def neq_Nil_conv excaps_in_mem_def + slotcap_in_mem_def cte_wp_at_ctes_of + valid_capAligned[OF ctes_of_valid'] invs_valid_objs' + dest!: interpret_excaps_eq) + apply (clarsimp dest!: ctes_of_ex_cte_cap_to') + apply (simp only: word_bits_def unat_lt2p) + apply (frule interpret_excaps_eq) + apply (clarsimp simp: if_1_0_0 word_less_nat_alt neq_Nil_conv + mask_def[where n=4] excaps_map_def + ccap_rights_relation_def word_sle_def + rightsFromWord_wordFromRights + excaps_in_mem_def slotcap_in_mem_def + signed_shift_guard_simpler_64 + extra_sle_sless_unfolds + elim!: inl_inrE + simp del: rf_sr_upd_safe imp_disjL) + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_isCap_unfolded_H_cap + capCNodeRadix_CL_less_64s rf_sr_ksCurThread not_le + elim!: inl_inrE) + apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_isCap_unfolded_H_cap + capCNodeRadix_CL_less_64s rf_sr_ksCurThread not_le + elim!: inl_inrE) + apply (clarsimp simp: enum_object_type enum_apiobject_type word_le_nat_alt seL4_ObjectTypeCount_def) + done + +lemma decodeUntypedInvocation_ccorres: +notes TripleSuc[simp] +notes valid_untyped_inv_wcap'.simps[simp del] +shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and sch_act_simple and ct_active' + and valid_cap' cp and K (isUntypedCap cp) + and cte_wp_at' (\cte. cteCap cte = cp) slot + and (excaps_in_mem extraCaps \ ctes_of) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). + ex_nonz_cap_to' y s) + and (\s. \v \ set extraCaps. + s \' fst v) + and sysargs_rel args buffer) + (UNIV + \ {s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. ccap_relation cp (cap_' s)} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. call_' s = from_bool isCall} + \ {s. buffer_' s = option_to_ptr buffer}) + [] + (decodeUntypedInvocation label args slot cp (map fst extraCaps) + >>= invocationCatch thread isBlocking isCall InvokeUntyped) + (Call decodeUntypedInvocation_'proc)" + apply (rule ccorres_name_pre) + apply (clarsimp simp: isCap_simps) + apply (rule ccorres_guard_imp2) + apply (rule monadic_rewrite_ccorres_assemble) + apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer + in decodeUntypedInvocation_ccorres_helper) + apply assumption + apply (rule monadic_rewrite_trans[rotated]) + apply (rule monadic_rewrite_bind_head) + apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) + apply (monadic_rewrite_r monadic_rewrite_if_r_True) + apply (monadic_rewrite_r_method monadic_rewrite_symb_exec_r_drop wpsimp) + apply (rule monadic_rewrite_refl) + apply wpsimp + apply (rule monadic_rewrite_refl) + apply (rule monadic_rewrite_refl) + apply (clarsimp simp: ex_cte_cap_wp_to'_def excaps_in_mem_def) + apply (drule(1) bspec)+ + apply (rule_tac x = b in exI) + apply (clarsimp simp: slotcap_in_mem_def cte_wp_at_ctes_of) + done + +end +end diff --git a/proof/crefine/AARCH64/IpcCancel_C.thy b/proof/crefine/AARCH64/IpcCancel_C.thy new file mode 100644 index 0000000000..eab695a1e4 --- /dev/null +++ b/proof/crefine/AARCH64/IpcCancel_C.thy @@ -0,0 +1,2883 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory IpcCancel_C +imports SyscallArgs_C +begin + +context kernel_m +begin + +lemma cready_queues_index_to_C_in_range': + assumes prems: "qdom \ maxDomain" "prio \ maxPriority" + shows "cready_queues_index_to_C qdom prio < num_tcb_queues" +proof - + have P: "unat prio < numPriorities" + using prems + by (simp add: numPriorities_def Suc_le_lessD unat_le_helper maxDomain_def maxPriority_def) + have Q: "unat qdom < numDomains" + using prems + by (simp add: maxDom_to_H le_maxDomain_eq_less_numDomains word_le_nat_alt) + show ?thesis + using mod_lemma[OF _ P, where q="unat qdom" and c=numDomains] Q + by (clarsimp simp: num_tcb_queues_calculation cready_queues_index_to_C_def field_simps) +qed + +lemmas cready_queues_index_to_C_in_range = + cready_queues_index_to_C_in_range'[simplified num_tcb_queues_val] + +lemma cready_queues_index_to_C_inj: + "\ cready_queues_index_to_C qdom prio = cready_queues_index_to_C qdom' prio'; + prio \ maxPriority; prio' \ maxPriority \ \ prio = prio' \ qdom = qdom'" + apply (rule context_conjI) + apply (auto simp: cready_queues_index_to_C_def numPriorities_def maxPriority_def + seL4_MaxPrio_def word_le_nat_alt dest: arg_cong[where f="\x. x mod 256"]) + done + +lemma cready_queues_index_to_C_distinct: + "\ qdom = qdom' \ prio \ prio'; prio \ maxPriority; prio' \ maxPriority \ + \ cready_queues_index_to_C qdom prio \ cready_queues_index_to_C qdom' prio'" + apply (auto simp: cready_queues_index_to_C_inj) + done + +lemma cmap_relation_drop_fun_upd: + "\ cm x = Some v; \v''. rel v'' v = rel v'' v' \ + \ cmap_relation am (cm (x \ v')) f rel + = cmap_relation am cm f rel" + apply (simp add: cmap_relation_def) + apply (rule conj_cong[OF refl]) + apply (rule ball_cong[OF refl]) + apply (auto split: if_split) + done + +lemma ntfn_ptr_get_queue_spec: + "\s. \ \ {\. s = \ \ \ \\<^sub>c \<^bsup>\\<^esup>ntfnPtr} \ret__struct_tcb_queue_C :== PROC ntfn_ptr_get_queue(\ntfnPtr) + \head_C \ret__struct_tcb_queue_C = Ptr (ntfnQueue_head_CL (notification_lift (the (cslift s \<^bsup>s\<^esup>ntfnPtr)))) \ + end_C \ret__struct_tcb_queue_C = Ptr (ntfnQueue_tail_CL (notification_lift (the (cslift s \<^bsup>s\<^esup>ntfnPtr))))\" + apply vcg + apply clarsimp + done + +abbreviation + "cslift_all_but_tcb_C s t \ (cslift s :: cte_C typ_heap) = cslift t + \ (cslift s :: endpoint_C typ_heap) = cslift t + \ (cslift s :: notification_C typ_heap) = cslift t + \ (cslift s :: vcpu_C typ_heap) = cslift t + \ (cslift s :: asid_pool_C typ_heap) = cslift t + \ (cslift s :: pte_C typ_heap) = cslift t + \ (cslift s :: user_data_C typ_heap) = cslift t + \ (cslift s :: user_data_device_C typ_heap) = cslift t" + +lemma tcbEPDequeue_spec: + "\s queue. \ \ \s. \t. (t, s) \ rf_sr + \ (\tcb\set queue. tcb_at' tcb t) \ distinct queue + \ (ctcb_ptr_to_tcb_ptr \tcb \ set queue) + \ ep_queue_relation' (cslift s) queue (head_C \queue) (end_C \queue) \ + Call tcbEPDequeue_'proc + {t. (head_C (ret__struct_tcb_queue_C_' t) = + (if (tcbEPPrev_C (the (cslift s (\<^bsup>s\<^esup>tcb)))) = NULL then + (tcbEPNext_C (the (cslift s (\<^bsup>s\<^esup>tcb)))) + else + (head_C \<^bsup>s\<^esup>queue))) + \ (end_C (ret__struct_tcb_queue_C_' t) = + (if (tcbEPNext_C (the (cslift s (\<^bsup>s\<^esup>tcb)))) = NULL then + (tcbEPPrev_C (the (cslift s (\<^bsup>s\<^esup>tcb)))) + else + (end_C \<^bsup>s\<^esup>queue))) + \ (ep_queue_relation' (cslift t) + (Lib.delete (ctcb_ptr_to_tcb_ptr \<^bsup>s\<^esup>tcb) queue) + (head_C (ret__struct_tcb_queue_C_' t)) + (end_C (ret__struct_tcb_queue_C_' t)) + \ (cslift t |` (- tcb_ptr_to_ctcb_ptr ` set queue)) = + (cslift s |` (- tcb_ptr_to_ctcb_ptr ` set queue)) + \ option_map tcb_null_ep_ptrs \ (cslift t) = + option_map tcb_null_ep_ptrs \ (cslift s)) + \ cslift_all_but_tcb_C t s + \ (\rs. zero_ranges_are_zero rs (\<^bsup>t\<^esup>t_hrs) + = zero_ranges_are_zero rs (\<^bsup>s\<^esup>t_hrs)) + \ (hrs_htd \<^bsup>t\<^esup>t_hrs) = (hrs_htd \<^bsup>s\<^esup>t_hrs)}" + apply (intro allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp split del: if_split) + apply (frule (4) tcb_queue_valid_ptrsD [OF _ _ _ _ tcb_queue_relation'_queue_rel]) + apply (elim conjE exE) + apply (frule (3) tcbEPDequeue_update) + apply simp + apply (unfold upd_unless_null_def) + apply (frule (2) tcb_queue_relation_ptr_rel' [OF tcb_queue_relation'_queue_rel]) + prefer 2 + apply assumption + apply simp + apply (frule c_guard_clift) + apply (simp add: typ_heap_simps') + apply (intro allI conjI impI) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff) + apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff + cong: if_weak_cong) + apply (rule ext) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff tcb_null_ep_ptrs_def) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff c_guard_clift) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff) + apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff tcb_null_ep_ptrs_def + cong: if_weak_cong) + apply (rule ext) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff tcb_null_ep_ptrs_def) + apply (rule ext) + apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff tcb_null_ep_ptrs_def + cong: if_weak_cong) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff c_guard_clift) + apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff + cong: if_weak_cong) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff tcb_null_ep_ptrs_def) + apply (rule ext) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff tcb_null_ep_ptrs_def) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff c_guard_clift) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff c_guard_clift) + done + +lemma ntfn_ptr_set_queue_spec: + "\s. \ \ \s. s \\<^sub>c \ntfnPtr\ Call ntfn_ptr_set_queue_'proc + {t. (\ntfn'. notification_lift ntfn' = + (notification_lift (the (cslift s (\<^bsup>s\<^esup>ntfnPtr))))\ + ntfnQueue_head_CL := make_canonical (ptr_val (head_C \<^bsup>s\<^esup>ntfn_queue)), + ntfnQueue_tail_CL := make_canonical (ptr_val (end_C \<^bsup>s\<^esup>ntfn_queue)) \ + \ t_hrs_' (globals t) = hrs_mem_update (heap_update (\<^bsup>s\<^esup>ntfnPtr) ntfn') + (t_hrs_' (globals s)))}" + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: packed_heap_update_collapse_hrs typ_heap_simps' make_canonical_def + canonical_bit_def) + done + +lemma cancelSignal_ccorres_helper: + "ccorres dc xfdc (invs' and st_tcb_at' ((=) (BlockedOnNotification ntfn)) thread and ko_at' ntfn' ntfn) + UNIV + [] + (setNotification ntfn (ntfnObj_update + (\_. if remove1 thread (ntfnQueue (ntfnObj ntfn')) = [] + then ntfn.IdleNtfn + else ntfnQueue_update (\_. remove1 thread (ntfnQueue (ntfnObj ntfn'))) (ntfnObj ntfn')) ntfn')) + (\ntfn_queue :== CALL ntfn_ptr_get_queue(Ptr ntfn);; + \ntfn_queue :== CALL tcbEPDequeue(tcb_ptr_to_ctcb_ptr thread,\ntfn_queue);; + CALL ntfn_ptr_set_queue(Ptr ntfn,\ntfn_queue);; + IF head_C \ntfn_queue = NULL THEN + CALL notification_ptr_set_state(Ptr ntfn, + scast NtfnState_Idle) + FI)" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule (2) ntfn_blocked_in_queueD) + apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) + apply (elim conjE) + apply (frule (1) valid_ntfn_isWaitingNtfnD) + apply (elim conjE) + apply (frule cmap_relation_ntfn) + apply (erule (1) cmap_relation_ko_atE) + apply (rule conjI) + apply (erule h_t_valid_clift) + apply (rule impI) + apply (rule exI) + apply (rule conjI) + apply (rule_tac x = \ in exI) + apply (intro conjI, assumption+) + apply (drule (2) ntfn_to_ep_queue) + apply (simp add: tcb_queue_relation'_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) + apply (frule null_ep_queue [simplified comp_def]) + apply (intro impI conjI allI) + \ \empty case\ + apply clarsimp + apply (frule iffD1 [OF tcb_queue_head_empty_iff [OF tcb_queue_relation'_queue_rel]]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply clarsimp + apply simp + apply (simp add: setNotification_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: remove1_empty rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) + apply (simp add: carch_state_relation_def carch_globals_def) + apply (clarsimp simp: carch_state_relation_def carch_globals_def + typ_heap_simps' packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + \ \non empty case\ + apply (frule tcb_queue_head_empty_iff [OF tcb_queue_relation'_queue_rel]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply clarsimp + apply (simp add: setNotification_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (frule (1) st_tcb_at_h_t_valid) + apply (simp add: remove1_empty rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue) + apply fastforce + apply assumption+ + apply simp + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def + split: ntfn.splits split del: if_split) + apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) + apply (clarsimp simp add: h_t_valid_clift_Some_iff) + apply (subst tcb_queue_relation'_next_canonical; assumption?) + apply fastforce + apply (simp add: notification_lift_def make_canonical_def canonical_bit_def) + apply (clarsimp simp: h_t_valid_clift_Some_iff notification_lift_def) + apply (subst tcb_queue_relation'_prev_canonical; assumption?) + apply fastforce + apply (simp add: make_canonical_def canonical_bit_def) + apply simp + subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def) + subgoal by (simp add: cmachine_state_relation_def) + subgoal by (simp add: h_t_valid_clift_Some_iff) + subgoal by (simp add: objBits_simps') + subgoal by (simp add: objBits_simps) + by assumption + +lemmas rf_sr_tcb_update_no_queue_gen + = rf_sr_tcb_update_no_queue[where t="t''\ globals := gs \ t_hrs_' := th \\" for th, simplified] + +lemma threadSet_tcbState_simple_corres: + "ccorres dc xfdc (tcb_at' thread) + {s. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := v64_' s && mask 4\, fl)) \ + thread_state_ptr_' s = Ptr &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C''])} [] + (threadSet (tcbState_update (\_. st)) thread) (Call thread_state_ptr_set_tsType_'proc)" + apply (rule threadSet_corres_lemma) + apply (rule thread_state_ptr_set_tsType_spec) + apply (rule thread_state_ptr_set_tsType_modifies) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps') + apply (rule rf_sr_tcb_update_no_queue_gen, assumption+, simp, simp_all) + apply (rule ball_tcb_cte_casesI, simp_all) + apply (frule cmap_relation_tcb) + apply (frule (1) cmap_relation_ko_atD) + apply clarsimp + apply (simp add: ctcb_relation_def cthread_state_relation_def) + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + done + +lemma ko_at_obj_congD': + "\ko_at' k p s; ko_at' k' p s\ \ k = k'" + apply (erule obj_atE')+ + apply simp + done + +lemma threadGet_vcg_corres_P: + assumes c: "\x. \\. \\ {s. (\, s) \ rf_sr + \ tcb_at' thread \ \ P \ + \ (\tcb. ko_at' tcb thread \ \ (\tcb'. + x = f tcb \ cslift s (tcb_ptr_to_ctcb_ptr thread) = Some tcb' + \ ctcb_relation tcb tcb'))} c {s. (\, s) \ rf_sr \ r x (xf s)}" + shows "ccorres r xf P UNIV hs (threadGet f thread) c" + apply (rule ccorres_add_return2) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_threadGet) + apply (rule_tac P = "\\. \tcb. ko_at' tcb thread \ \ x = f tcb \ P \" + and P' = UNIV in ccorres_from_vcg) + apply (simp add: return_def) + apply (rule allI, rule conseqPre) + apply (rule spec [OF c]) + apply clarsimp + apply (frule cmap_relation_tcb) + apply (frule (1) cmap_relation_ko_atD) + apply clarsimp + apply (rule conjI) + apply (erule obj_at'_weakenE) + apply simp + apply clarsimp + apply (drule (1) ko_at_obj_congD') + apply simp + apply fastforce + done + +lemmas threadGet_vcg_corres = threadGet_vcg_corres_P[where P=\] + +lemma threadGet_specs_corres: + assumes spec: "\s. \ \ {s} Call g {t. xf t = f' s}" + and mod: "modifies_spec g" + and xf: "\f s. xf (globals_update f s) = xf s" + shows "ccorres r xf (ko_at' ko thread) {s'. r (f ko) (f' s')} hs (threadGet f thread) (Call g)" + apply (rule ccorres_Call_call_for_vcg) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_threadGet) + apply (rule_tac P = "\s. ko_at' ko thread s \ x = f ko" in ccorres_from_vcg [where P' = "{s'. r (f ko) (f' s')}"]) + apply (rule allI) + apply (rule HoarePartial.ProcModifyReturnNoAbr [where return' = "\s t. t\ globals := globals s \"]) + apply (rule HoarePartial.ProcSpecNoAbrupt [OF _ _ spec]) + defer + apply vcg + prefer 2 + apply (rule mod) + apply (clarsimp simp: mex_def meq_def) + apply (frule obj_at'_weakenE [OF _ TrueI]) + apply clarsimp + apply (drule (1) ko_at_obj_congD') + apply simp + apply (clarsimp simp: return_def) + apply (rule conjI) + apply (erule iffD1 [OF rf_sr_upd, rotated -1], simp_all)[1] + apply (simp add: xf) + done + +lemma ccorres_exI1: + assumes rl: "\x. ccorres r xf (Q x) (P' x) hs a c" + shows "ccorres r xf (\s. (\x. P x s) \ (\x. P x s \ Q x s)) + {s'. \x s. (s, s') \ rf_sr \ P x s \ s' \ P' x} hs a c" + apply (rule ccorresI') + apply clarsimp + apply (drule spec, drule (1) mp) + apply (rule ccorresE [OF rl], assumption+) + apply fastforce + apply assumption + apply assumption + apply fastforce + done + +lemma isStopped_ccorres [corres]: + "ccorres (\r r'. r = to_bool r') ret__unsigned_long_' + (tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (isStopped thread) (Call isStopped_'proc)" + apply (cinit lift: thread_' simp: getThreadState_def) + apply (rule ccorres_pre_threadGet) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_cond_weak) + apply (rule ccorres_return_C) + apply simp + apply simp + apply simp + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C) + apply simp + apply simp + apply simp + apply vcg + apply (rule conseqPre) + apply vcg + apply clarsimp + apply clarsimp + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ + done + +lemma isRunnable_ccorres [corres]: + "ccorres (\r r'. r = to_bool r') ret__unsigned_long_' + (tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (isRunnable thread) (Call isRunnable_'proc)" + apply (cinit lift: thread_' simp: getThreadState_def) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_pre_threadGet) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_cond_weak) + apply (rule ccorres_return_C) + apply (simp) + apply (simp) + apply (simp) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C) + apply (simp) + apply (simp) + apply (simp) + apply (vcg) + apply (rule conseqPre) + apply (vcg) + apply (clarsimp) + apply (clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ + done + +lemma tcb_ptr_to_ctcb_ptr_imageD: + "x \ tcb_ptr_to_ctcb_ptr ` S \ ctcb_ptr_to_tcb_ptr x \ S" + apply (erule imageE) + apply simp + done + +lemma ctcb_ptr_to_tcb_ptr_imageI: + "ctcb_ptr_to_tcb_ptr x \ S \ x \ tcb_ptr_to_ctcb_ptr ` S" + apply (drule imageI [where f = tcb_ptr_to_ctcb_ptr]) + apply simp + done + +lemma ctcb_relation_unat_prio_eq: + "ctcb_relation tcb tcb' \ unat (tcbPriority tcb) = unat (tcbPriority_C tcb')" + apply (clarsimp simp: ctcb_relation_def) + apply (erule_tac t = "tcbPriority_C tcb'" in subst) + apply simp + done + +lemma ctcb_relation_unat_dom_eq: + "ctcb_relation tcb tcb' \ unat (tcbDomain tcb) = unat (tcbDomain_C tcb')" + apply (clarsimp simp: ctcb_relation_def) + apply (erule_tac t = "tcbDomain_C tcb'" in subst) + apply simp + done + +lemma threadSet_queued_ccorres [corres]: + shows "ccorres dc xfdc (tcb_at' thread) + {s. v64_' s = from_bool v \ thread_state_ptr_' s = Ptr &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C''])} [] + (threadSet (tcbQueued_update (\_. v)) thread) + (Call thread_state_ptr_set_tcbQueued_'proc)" + apply (rule threadSet_corres_lemma) + apply (rule thread_state_ptr_set_tcbQueued_spec) + apply (rule thread_state_ptr_set_tcbQueued_modifies) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply clarsimp + apply (rule rf_sr_tcb_update_no_queue_gen, assumption+, simp, simp_all) + apply (rule ball_tcb_cte_casesI, simp_all) + apply (simp add: ctcb_relation_def cthread_state_relation_def) + apply (case_tac "tcbState ko"; simp) + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + done + +(* FIXME: move *) +lemma cmap_relation_no_upd: + "\ cmap_relation a c f rel; a p = Some ko; rel ko v; inj f \ \ cmap_relation a (c(f p \ v)) f rel" + apply (clarsimp simp: cmap_relation_def) + apply (subgoal_tac "f p \ dom c") + prefer 2 + apply (drule_tac t="dom c" in sym) + apply fastforce + apply clarsimp + apply (drule (1) injD) + apply simp + done + +(* FIXME: move *) +lemma cmap_relation_rel_upd: + "\ cmap_relation a c f rel; \v v'. rel v v' \ rel' v v' \ \ cmap_relation a c f rel'" + by (simp add: cmap_relation_def) + +declare fun_upd_restrict_conv[simp del] + +lemmas queue_in_range = of_nat_mono_maybe[OF _ cready_queues_index_to_C_in_range, + where 'a=32, unfolded cready_queues_index_to_C_def numPriorities_def, + simplified, unfolded ucast_nat_def] + of_nat_mono_maybe[OF _ cready_queues_index_to_C_in_range, + where 'a="32 signed", unfolded cready_queues_index_to_C_def numPriorities_def, + simplified, unfolded ucast_nat_def] + of_nat_mono_maybe[OF _ cready_queues_index_to_C_in_range, + where 'a=64, unfolded cready_queues_index_to_C_def numPriorities_def, + simplified, unfolded ucast_nat_def] + +lemma cready_queues_index_to_C_def2: + "\ qdom \ maxDomain; prio \ maxPriority \ + \ cready_queues_index_to_C qdom prio + = unat (ucast qdom * of_nat numPriorities + ucast prio :: machine_word)" + using numPriorities_machine_word_safe + apply - + apply (frule (1) cready_queues_index_to_C_in_range[simplified maxDom_to_H maxPrio_to_H]) + apply (subst unat_add_lem[THEN iffD1]) + apply (auto simp: unat_mult_simple cready_queues_index_to_C_def) + done + +lemma ready_queues_index_spec: + "\s. \ \ {s'. s' = s \ (Kernel_Config.numDomains \ 1 \ dom_' s' = 0)} + Call ready_queues_index_'proc + \\ret__unsigned_long = (dom_' s) * word_of_nat numPriorities + (prio_' s)\" + by vcg (simp add: numDomains_sge_1_simp numPriorities_def) + +lemma prio_to_l1index_spec: + "\s. \ \ {s} Call prio_to_l1index_'proc + \\ret__unsigned_long = prio_' s >> wordRadix \" + by vcg (simp add: word_sle_def wordRadix_def') + +lemma invert_l1index_spec: + "\s. \ \ {s} Call invert_l1index_'proc + \\ret__unsigned_long = of_nat l2BitmapSize - 1 - l1index_' s \" + unfolding l2BitmapSize_def' + by vcg + (simp add: word_sle_def sdiv_int_def sdiv_word_def smod_word_def smod_int_def) + +lemma cbitmap_L1_relation_update: + "\ (\, s) \ rf_sr ; cbitmap_L1_relation cupd aupd \ + \ (\\ksReadyQueuesL1Bitmap := aupd \, + globals_update (ksReadyQueuesL1Bitmap_'_update (\_. cupd)) s) + \ rf_sr" + by (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def) + +lemma cbitmap_L2_relation_update: + "\ (\, s) \ rf_sr ; cbitmap_L2_relation cupd aupd \ + \ (\\ksReadyQueuesL2Bitmap := aupd \, + globals_update (ksReadyQueuesL2Bitmap_'_update (\_. cupd)) s) + \ rf_sr" + by (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def) + +lemma cready_queues_index_to_C_ucast_helper: + fixes p :: priority + fixes d :: domain + shows "unat (ucast d * 0x100 + ucast p :: machine_word) = unat d * 256 + unat p" + unfolding tcb_queue_relation'_def maxPriority_def numPriorities_def + using unat_lt2p[where x=p] unat_lt2p[where x=d] + by (clarsimp simp: cready_queues_index_to_C_def word_le_nat_alt unat_word_ariths) + +lemmas prio_and_dom_limit_helpers = + prio_ucast_shiftr_wordRadix_helper + prio_ucast_shiftr_wordRadix_helper' + prio_ucast_shiftr_wordRadix_helper2 + prio_ucast_shiftr_wordRadix_helper3 + prio_unat_shiftr_wordRadix_helper' + cready_queues_index_to_C_ucast_helper + unat_ucast_prio_L1_cmask_simp + machine_word_and_3F_less_40 + +(* FIXME MOVE *) +lemma rf_sr_cbitmap_L1_relation[intro]: + "(\, x) \ rf_sr \ cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' (globals x)) (ksReadyQueuesL1Bitmap \)" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +(* FIXME MOVE *) +lemma rf_sr_cbitmap_L2_relation[intro]: + "(\, x) \ rf_sr \ cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' (globals x)) (ksReadyQueuesL2Bitmap \)" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma cbitmap_L1_relation_bit_set: + fixes p :: priority + shows + "\ cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' (globals x)) (ksReadyQueuesL1Bitmap \) ; + d \ maxDomain \ \ + cbitmap_L1_relation + (Arrays.update (ksReadyQueuesL1Bitmap_' (globals x)) (unat d) + (ksReadyQueuesL1Bitmap_' (globals x).[unat d] || 2 ^ unat (p >> wordRadix))) + ((ksReadyQueuesL1Bitmap \)(d := ksReadyQueuesL1Bitmap \ d || 2 ^ prioToL1Index p))" + apply (unfold cbitmap_L1_relation_def) + apply (clarsimp simp: le_maxDomain_eq_less_numDomains word_le_nat_alt prioToL1Index_def + num_domains_index_updates) + done + +lemma cbitmap_L2_relation_bit_set: + fixes p :: priority + fixes d :: domain + shows "\ cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' (globals \')) (ksReadyQueuesL2Bitmap \) ; + d \ maxDomain ; b = b' \ + \ + cbitmap_L2_relation + (Arrays.update (ksReadyQueuesL2Bitmap_' (globals \')) (unat d) + (Arrays.update (ksReadyQueuesL2Bitmap_' (globals \').[unat d]) + (invertL1Index (prioToL1Index p)) + (ksReadyQueuesL2Bitmap_' (globals \').[unat d].[invertL1Index (prioToL1Index p)] || + 2 ^ unat (p && b)))) + ((ksReadyQueuesL2Bitmap \) + ((d, invertL1Index (prioToL1Index p)) := + ksReadyQueuesL2Bitmap \ (d, invertL1Index (prioToL1Index p)) || + 2 ^ unat (p && b')))" + unfolding cbitmap_L2_relation_def numPriorities_def wordBits_def word_size l2BitmapSize_def' + apply (clarsimp simp: word_size prioToL1Index_def wordRadix_def mask_def + invertL1Index_def l2BitmapSize_def' + le_maxDomain_eq_less_numDomains word_le_nat_alt) + apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) + done + +lemma invert_prioToL1Index_c_simp: + "p \ maxPriority + \ + unat ((of_nat l2BitmapSize :: machine_word) - 1 - (ucast p >> wordRadix)) + = invertL1Index (prioToL1Index p)" + unfolding maxPriority_def l2BitmapSize_def' invertL1Index_def prioToL1Index_def + numPriorities_def + by (simp add: unat_sub prio_and_dom_limit_helpers) + +lemma c_invert_assist: "3 - (ucast (p :: priority) >> 6 :: machine_word) < 4" + using prio_ucast_shiftr_wordRadix_helper'[simplified wordRadix_def] + by - (rule word_less_imp_diff_less, simp_all) + +lemma addToBitmap_ccorres: + "ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (addToBitmap tdom prio) (Call addToBitmap_'proc)" + supply prio_and_dom_limit_helpers[simp] invert_prioToL1Index_c_simp[simp] + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (frule maxDomain_le_unat_ucast_explicit) + apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (intro conjI impI allI) + apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (rule conjI) + apply (clarsimp intro!: cbitmap_L1_relation_bit_set) + apply (fastforce dest!: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) + done + +lemma rf_sr_tcb_update_twice: + "h_t_valid (hrs_htd (hrs2 (globals s') (t_hrs_' (gs2 (globals s'))))) c_guard + (ptr (t_hrs_' (gs2 (globals s'))) (globals s')) + \ ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs :: tcb_C ptr) (v ths gs)) + (hrs_mem_update (heap_update (ptr ths gs) (v' ths gs)) (hrs2 gs ths))) (gs2 gs)) s') \ rf_sr) + = ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs) (v ths gs)) (hrs2 gs ths)) (gs2 gs)) s') \ rf_sr)" + by (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def typ_heap_simps' + carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + +lemmas rf_sr_tcb_update_no_queue_gen2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue_gen, simplified] + +lemma tcb_queue_prepend_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueuePrepend queue tcbPtr) (Call tcb_queue_prepend_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply fastforce + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueHead queue)) s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply clarsimp + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma tcb_queue_append_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s) + \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueAppend queue tcbPtr) (Call tcb_queue_append_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueEnd queue)) s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma getQueue_ccorres: + "ccorres ctcb_queue_relation queue_' + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx___unsigned_long = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (getQueue tdom prio) (\queue :== \ksReadyQueues.[unat \idx___unsigned_long])" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getQueue_def gets_def get_def bind_def return_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + done + +lemma setQueue_ccorres: + "ctcb_queue_relation queue cqueue \ + ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx___unsigned_long = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (setQueue tdom prio queue) + (Basic (\s. globals_update + (ksReadyQueues_'_update + (\_. Arrays.update (ksReadyQueues_' (globals s)) + (unat (idx___unsigned_long_' s)) cqueue)) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setQueue_def get_def modify_def put_def bind_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + apply (frule cready_queues_index_to_C_distinct, assumption+) + apply (frule_tac qdom=d and prio=p in cready_queues_index_to_C_in_range) + apply fastforce + apply clarsimp + done + +crunch (empty_fail) empty_fail[wp]: isRunnable + +lemma tcbSchedEnqueue_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedEnqueue t) (Call tcbSchedEnqueue_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] + + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + note word_less_1[simp del] + + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac runnable) + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_prepend_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2) + done +qed + +lemma tcbSchedAppend_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedAppend t) (Call tcbSchedAppend_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + note word_less_1[simp del] + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac "runnable") + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply (fastforce dest!: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_append_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply clarsimp + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2 tcbQueueEmpty_def) + done +qed + +(* FIXME same proofs as bit_set, maybe can generalise? *) +lemma cbitmap_L1_relation_bit_clear: + fixes p :: priority + shows + "\ cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' (globals x)) (ksReadyQueuesL1Bitmap \) ; + d \ maxDomain \ \ + cbitmap_L1_relation + (Arrays.update (ksReadyQueuesL1Bitmap_' (globals x)) (unat d) + (ksReadyQueuesL1Bitmap_' (globals x).[unat d] && ~~ 2 ^ unat (p >> wordRadix))) + ((ksReadyQueuesL1Bitmap \)(d := ksReadyQueuesL1Bitmap \ d && ~~ 2 ^ prioToL1Index p))" + unfolding cbitmap_L1_relation_def numPriorities_def wordBits_def word_size l2BitmapSize_def' + by (clarsimp simp: word_size prioToL1Index_def wordRadix_def mask_def + invertL1Index_def l2BitmapSize_def' + le_maxDomain_eq_less_numDomains word_le_nat_alt num_domains_index_updates) + +lemma cbitmap_L2_relationD: + "\ cbitmap_L2_relation cbitmap2 abitmap2 ; d \ maxDomain ; i < l2BitmapSize \ \ + cbitmap2.[unat d].[i] = abitmap2 (d, i)" + unfolding cbitmap_L2_relation_def l2BitmapSize_def' + by clarsimp + +lemma cbitmap_L2_relation_bit_clear: + fixes p :: priority + fixes d :: domain + shows "\ cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' (globals \')) (ksReadyQueuesL2Bitmap \) ; + d \ maxDomain \ + \ + cbitmap_L2_relation + (Arrays.update (ksReadyQueuesL2Bitmap_' (globals \')) (unat d) + (Arrays.update (ksReadyQueuesL2Bitmap_' (globals \').[unat d]) + (invertL1Index (prioToL1Index p)) + (ksReadyQueuesL2Bitmap_' (globals \').[unat d].[invertL1Index (prioToL1Index p)] && + ~~ 2 ^ unat (p && 0x3F)))) + ((ksReadyQueuesL2Bitmap \) + ((d, invertL1Index (prioToL1Index p)) := + ksReadyQueuesL2Bitmap \ (d, invertL1Index (prioToL1Index p)) && + ~~ 2 ^ unat (p && mask wordRadix)))" + unfolding cbitmap_L2_relation_def numPriorities_def wordBits_def word_size l2BitmapSize_def' + apply (clarsimp simp: word_size prioToL1Index_def wordRadix_def mask_def + invertL1Index_def l2BitmapSize_def' + le_maxDomain_eq_less_numDomains word_le_nat_alt) + apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) + done + +lemma removeFromBitmap_ccorres: + "ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (removeFromBitmap tdom prio) (Call removeFromBitmap_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] + + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + include no_less_1_simps + + have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < l2BitmapSize" + unfolding invertL1Index_def l2BitmapSize_def' prioToL1Index_def + by simp + + show ?thesis + supply if_split[split del] + (* pull out static assms *) + apply simp + apply (rule ccorres_grab_asm[where P=\, simplified]) + apply (cinit lift: dom_' prio_') + apply clarsimp + apply csymbr + apply csymbr + (* we can clear up all C guards now *) + apply (clarsimp simp: maxDomain_le_unat_ucast_explicit word_and_less') + apply (simp add: invert_prioToL1Index_c_simp word_less_nat_alt) + apply (simp add: invert_l1_index_limit[simplified l2BitmapSize_def']) + apply ccorres_rewrite + (* handle L2 update *) + apply (rule_tac ccorres_split_nothrow_novcg_dc) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L2_relation) + apply (erule cbitmap_L2_relation_update) + apply (erule (1) cbitmap_L2_relation_bit_clear) + (* the check on the C side is identical to checking the L2 entry, rewrite the condition *) + apply (simp add: getReadyQueuesL2Bitmap_def) + apply (rule ccorres_symb_exec_l3, rename_tac l2) + apply (rule_tac C'="{s. l2 = 0}" + and Q="\s. l2 = ksReadyQueuesL2Bitmap s (tdom, invertL1Index (prioToL1Index prio))" + in ccorres_rewrite_cond_sr[where Q'=UNIV]) + apply clarsimp + apply (frule rf_sr_cbitmap_L2_relation) + apply (clarsimp simp: cbitmap_L2_relationD invert_l1_index_limit split: if_split) + (* unset L1 bit when L2 entry is empty *) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L1_relation) + apply (erule cbitmap_L1_relation_update) + apply (erule (1) cbitmap_L1_relation_bit_clear) + apply wpsimp+ + apply (fastforce simp: guard_is_UNIV_def) + apply clarsimp + done +qed + +lemma ctcb_ptr_to_tcb_ptr_option_to_ctcb_ptr[simp]: + "ctcb_ptr_to_tcb_ptr (option_to_ctcb_ptr (Some ptr)) = ptr" + by (clarsimp simp: option_to_ctcb_ptr_def) + +lemma tcb_queue_remove_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s \ valid_objs' s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueRemove queue tcbPtr) (Call tcb_queue_remove_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit' lift: tcb_') + apply (rename_tac tcb') + apply (simp only: tcbQueueRemove_def) + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule ccorres_pre_getObject_tcb, rename_tac tcb) + apply (rule ccorres_symb_exec_l, rename_tac beforePtrOpt) + apply (rule ccorres_symb_exec_l, rename_tac afterPtrOpt) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="before___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr beforePtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedPrev tcb = beforePtrOpt)" + and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="after___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr afterPtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedNext tcb = afterPtrOpt)" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R="?abs"]) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) + apply clarsimp + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) + apply clarsimp + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply (rule ccorres_assert2)+ + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (fastforce intro: ccorres_return_C') + apply (wpsimp | vcg)+ + apply (clarsimp split: if_splits) + apply normalise_obj_at' + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + by (intro conjI impI; + clarsimp simp: ctcb_queue_relation_def typ_heap_simps option_to_ctcb_ptr_def + valid_tcb'_def) + +lemma tcbQueueRemove_tcb_at'_head: + "\\s. valid_objs' s \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)\ + tcbQueueRemove queue t + \\rv s. \ tcbQueueEmpty rv \ tcb_at' (the (tcbQueueHead rv)) s\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp haskell_assert_wp hoare_vcg_imp_lift') + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def tcbQueueEmpty_def obj_at'_def) + done + +lemma tcbSchedDequeue_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedDequeue t) (Call tcbSchedDequeue_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] + + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + include no_less_1_simps + + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rule_tac r'=ctcb_queue_relation and xf'=new_queue_' in ccorres_split_nothrow) + apply (ctac add: tcb_queue_remove_ccorres) + apply ceqv + apply (rename_tac queue' newqueue) + apply (rule ccorres_Guard_Seq) + apply (ctac add: setQueue_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply ctac + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue')" + and R="\s. \ tcbQueueEmpty queue' \ tcb_at' (the (tcbQueueHead queue')) s" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def split: option.splits) + apply ceqv + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: removeFromBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply vcg + apply (wpsimp wp: hoare_vcg_imp_lift') + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: hoare_vcg_imp_lift') + apply vcg + apply ((wpsimp wp: tcbQueueRemove_tcb_at'_head | wp (once) hoare_drop_imps)+)[1] + apply (vcg exspec=tcb_queue_remove_modifies) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) obj_at_cslift_tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + by (fastforce simp: word_less_nat_alt + cready_queues_index_to_C_def2 ctcb_relation_def + typ_heap_simps le_maxDomain_eq_less_numDomains(2) unat_trans_ucast_helper) +qed + +lemma tcb_queue_relation_append: + "\ tcb_queue_relation tn tp' mp queue qprev qhead; queue \ []; + qend' \ tcb_ptr_to_ctcb_ptr ` set queue; mp qend' = Some tcb; + queue = queue' @ [ctcb_ptr_to_tcb_ptr qend]; distinct queue; + \x \ set queue. tcb_ptr_to_ctcb_ptr x \ NULL; qend' \ NULL; + \v f g. tn (tn_update f v) = f (tn v) \ tp' (tp_update g v) = g (tp' v) + \ tn (tp_update f v) = tn v \ tp' (tn_update g v) = tp' v \ + \ tcb_queue_relation tn tp' + (mp (qend \ tn_update (\_. qend') (the (mp qend)), + qend' \ tn_update (\_. NULL) (tp_update (\_. qend) tcb))) + (queue @ [ctcb_ptr_to_tcb_ptr qend']) qprev qhead" + using [[hypsubst_thin = true]] + apply clarsimp + apply (induct queue' arbitrary: qprev qhead) + apply clarsimp + apply clarsimp + done + +lemma isStopped_spec: + "\s. \ \ ({s} \ {s. cslift s (thread_' s) \ None}) Call isStopped_'proc + {s'. ret__unsigned_long_' s' = from_bool (tsType_CL (thread_state_lift (tcbState_C (the (cslift s (thread_' s))))) \ + {scast ThreadState_BlockedOnReply, + scast ThreadState_BlockedOnNotification, scast ThreadState_BlockedOnSend, + scast ThreadState_BlockedOnReceive, scast ThreadState_Inactive}) }" + apply vcg + apply (clarsimp simp: typ_heap_simps) +done + +lemma isRunnable_spec: + "\s. \ \ ({s} \ {s. cslift s (thread_' s) \ None}) Call isRunnable_'proc + {s'. ret__unsigned_long_' s' = from_bool (tsType_CL (thread_state_lift (tcbState_C (the (cslift s (thread_' s))))) \ + { scast ThreadState_Running, scast ThreadState_Restart })}" + apply vcg + apply (clarsimp simp: typ_heap_simps) +done + +(* FIXME: move *) +lemma ccorres_setSchedulerAction: + "cscheduler_action_relation a p \ + ccorres dc xfdc \ UNIV hs + (setSchedulerAction a) + (Basic (\s. globals_update (ksSchedulerAction_'_update (\_. p)) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setSchedulerAction_def modify_def get_def put_def bind_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) + done + +declare ge_0_from_bool [simp] + +lemma scheduler_action_case_switch_to_if: + "(case act of SwitchToThread t \ f t | _ \ g) + = (if \t. act = SwitchToThread t + then f (case act of SwitchToThread t \ t) else g)" + by (simp split: scheduler_action.split) + +lemma tcb_at_1: + "tcb_at' t s \ tcb_ptr_to_ctcb_ptr t \ tcb_Ptr 1" + apply (drule is_aligned_tcb_ptr_to_ctcb_ptr) + apply (clarsimp simp add: is_aligned_def ctcb_size_bits_def) + done + +lemma rescheduleRequired_ccorres: + "ccorres dc xfdc + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' + and pspace_aligned' and pspace_distinct') + UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" + apply cinit + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) + apply (simp add: scheduler_action_case_switch_to_if + cong: if_weak_cong split del: if_split) + apply (rule_tac R="\s. action = ksSchedulerAction s \ weak_sch_act_wf action s" + in ccorres_cond) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def) + subgoal by (clarsimp simp: weak_sch_act_wf_def tcb_at_1 tcb_at_not_NULL + split: scheduler_action.split_asm dest!: pred_tcb_at') + apply (ctac add: tcbSchedEnqueue_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setSchedulerAction_def simpler_modify_def) + subgoal by (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def + carch_state_relation_def cmachine_state_relation_def) + apply wp + apply (simp add: guard_is_UNIV_def) + apply wp+ + apply (simp add: getSchedulerAction_def) + apply (clarsimp simp: weak_sch_act_wf_def rf_sr_def cstate_relation_def + Let_def cscheduler_action_relation_def) + by (auto simp: tcb_at_not_NULL tcb_at_1 + tcb_at_not_NULL[THEN not_sym] tcb_at_1[THEN not_sym] + split: scheduler_action.split_asm) + +lemma getReadyQueuesL1Bitmap_sp: + "\\s. P s \ d \ maxDomain \ + getReadyQueuesL1Bitmap d + \\rv s. ksReadyQueuesL1Bitmap s d = rv \ d \ maxDomain \ P s\" + unfolding bitmap_fun_defs + by wp simp + +(* this doesn't actually carry over d \ maxDomain to the rest of the ccorres, + use ccorres_cross_over_guard to do that *) +lemma ccorres_pre_getReadyQueuesL1Bitmap: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. d \ maxDomain \ (\rv. ksReadyQueuesL1Bitmap s d = rv \ P rv s)) + {s. \rv. (ksReadyQueuesL1Bitmap_' (globals s)).[unat d] = ucast rv + \ s \ P' rv } + hs (getReadyQueuesL1Bitmap d >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule getReadyQueuesL1Bitmap_sp) + apply blast + apply clarsimp + prefer 3 + apply (clarsimp simp: bitmap_fun_defs gets_exs_valid) + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply blast + apply assumption + apply (drule rf_sr_cbitmap_L1_relation) + apply (clarsimp simp: cbitmap_L1_relation_def) + done + +lemma getReadyQueuesL2Bitmap_sp: + "\\s. P s \ d \ maxDomain \ i < l2BitmapSize \ + getReadyQueuesL2Bitmap d i + \\rv s. ksReadyQueuesL2Bitmap s (d, i) = rv \ d \ maxDomain \ i < l2BitmapSize \ P s\" + unfolding bitmap_fun_defs + by wp simp + +lemma ccorres_pre_getReadyQueuesL2Bitmap: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. d \ maxDomain \ i < l2BitmapSize + \ (\rv. ksReadyQueuesL2Bitmap s (d,i) = rv \ P rv s)) + {s. \rv. (ksReadyQueuesL2Bitmap_' (globals s)).[unat d].[i] = ucast rv + \ s \ P' rv } + hs (getReadyQueuesL2Bitmap d i >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule getReadyQueuesL2Bitmap_sp) + apply blast + apply clarsimp + prefer 3 + apply (clarsimp simp: bitmap_fun_defs gets_exs_valid) + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply blast + apply assumption + apply (drule rf_sr_cbitmap_L2_relation) + apply (clarsimp simp: cbitmap_L2_relation_def) + done + +lemma rf_sr_ksReadyQueuesL1Bitmap_simp: + "\ (\, s') \ rf_sr ; d \ maxDomain \ + \ ksReadyQueuesL1Bitmap_' (globals s').[unat d] = ksReadyQueuesL1Bitmap \ d" + apply (drule rf_sr_cbitmap_L1_relation) + apply (simp add: cbitmap_L1_relation_def) + done + +lemma cguard_UNIV: + "P s \ s \ (if P s then UNIV else {})" + by fastforce + +lemma lookupBitmapPriority_le_maxPriority: + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + \d p. d > maxDomain \ p > maxPriority \ tcbQueueEmpty (ksReadyQueues s (d, p)); + valid_bitmaps s \ + \ lookupBitmapPriority d s \ maxPriority" + apply (clarsimp simp: valid_bitmaps_def) + by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) + +lemma rf_sr_ksReadyQueuesL1Bitmap_not_zero: + "\ (\, s') \ rf_sr ; d \ maxDomain ; ksReadyQueuesL1Bitmap_' (globals s').[unat d] \ 0 \ + \ ksReadyQueuesL1Bitmap \ d \ 0" + apply (drule rf_sr_cbitmap_L1_relation) + apply (simp add: cbitmap_L1_relation_def) + done + +lemma ksReadyQueuesL1Bitmap_word_log2_max: + "\valid_bitmaps s; ksReadyQueuesL1Bitmap s d \ 0\ + \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" + unfolding valid_bitmaps_def + by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) + +lemma word_log2_max_word64[simp]: + "word_log2 (w :: 64 word) < 64" + using word_log2_max[where w=w] + by (simp add: word_size) + +lemma rf_sr_ksReadyQueuesL2Bitmap_simp: + "\ (\, s') \ rf_sr ; d \ maxDomain ; valid_bitmaps \ ; ksReadyQueuesL1Bitmap \ d \ 0 \ + \ ksReadyQueuesL2Bitmap_' (globals s').[unat d].[word_log2 (ksReadyQueuesL1Bitmap \ d)] = + ksReadyQueuesL2Bitmap \ (d, word_log2 (ksReadyQueuesL1Bitmap \ d))" + apply (frule rf_sr_cbitmap_L2_relation) + apply (frule (1) ksReadyQueuesL1Bitmap_word_log2_max) + apply (drule (3) cbitmap_L2_relationD) + done + +lemma ksReadyQueuesL2Bitmap_nonzeroI: + "\ d \ maxDomain ; valid_bitmaps s ; ksReadyQueuesL1Bitmap s d \ 0 \ + \ ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d))) \ 0" + unfolding valid_bitmaps_def + apply clarsimp + apply (frule bitmapQ_no_L1_orphansD) + apply (erule word_log2_nth_same) + apply clarsimp + done + +lemma l1index_to_prio_spec: + "\s. \ \ {s} Call l1index_to_prio_'proc + \\ret__unsigned_long = l1index_' s << wordRadix \" + by vcg (simp add: word_sle_def wordRadix_def') + +lemma getHighestPrio_ccorres: + "ccorres (\rv rv'. rv' = ucast rv) ret__unsigned_long_' + ((\s. ksReadyQueuesL1Bitmap s d \ 0 \ bitmapQ_no_L1_orphans s) and K (d \ maxDomain)) + (UNIV \ {s. dom_' s = ucast d}) hs + (getHighestPrio d) (Call getHighestPrio_'proc)" +proof - + + note prio_and_dom_limit_helpers[simp] + note Collect_const_mem[simp] + + have unsigned_word_log2: + "\w. w \ 0 \ (0x3F::64 signed word) - of_nat (word_clz (w::machine_word)) = (of_nat (word_log2 w))" + unfolding word_log2_def + by (clarsimp dest!: word_clz_nonzero_max simp: word_size) + + have word_log2_def64: + "\w. word_log2 (w::machine_word) = 63 - word_clz w" + unfolding word_log2_def by (simp add: word_size) + + have invertL1Index_unat_fold: + "\(w::machine_word). \ w \ 0 ; word_log2 w < l2BitmapSize \ \ + unat (of_nat l2BitmapSize - (1::machine_word) - of_nat (word_log2 w)) + = invertL1Index (word_log2 w)" + apply (subst unat_sub) + apply (clarsimp simp: l2BitmapSize_def') + apply (rule word_of_nat_le) + apply (drule word_log2_nth_same) + apply (clarsimp simp: l2BitmapSize_def') + apply (clarsimp simp: invertL1Index_def l2BitmapSize_def') + apply (simp add: unat_of_nat_eq) + done + + (* annoyingly, inside one of the csymbr commands, we get unwanted minus distribution *) + have word_clz_word_log2_fixup: + "\w::machine_word. + w \ 0 \ (0xFFFFFFFFFFFFFFC0::machine_word) + (of_nat l2BitmapSize + of_nat (word_clz w)) + = of_nat l2BitmapSize - 1 - of_nat (word_log2 w)" + by (frule word_clz_nonzero_max) + (simp add: word_log2_def64 word_size) + + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + include no_less_1_simps + + show ?thesis + apply (rule ccorres_grab_asm) + apply (cinit lift: dom_') + apply (clarsimp split del: if_split) + apply (rule ccorres_pre_getReadyQueuesL1Bitmap) + apply (rule ccorres_pre_getReadyQueuesL2Bitmap) + apply (rename_tac l2) + apply ccorres_rewrite (* UNIV guard *) + apply (rule ccorres_Guard_Seq|csymbr)+ + apply (rule ccorres_abstract_cleanup) + apply (rule ccorres_Guard_Seq|csymbr)+ + apply (rule ccorres_abstract_cleanup) + apply (rule ccorres_Guard_Seq|csymbr)+ + apply (clarsimp simp: word_log2_def word_size) + apply (rename_tac clz_l1index clz_l2index) + apply (rule_tac P="\s. l1 \ 0 \ l2 \ 0 \ word_log2 l1 < l2BitmapSize" + and P'="{s. clz_l1index = of_nat (word_clz l1) \ + clz_l2index = of_nat (word_clz l2) }" + in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + subgoal for l1 l2 _ _ _ + apply (clarsimp simp: return_def l1IndexToPrio_def) + apply (simp add: unsigned_word_log2 word_log2_def64[symmetric] ucast_or_distrib) + apply (rule_tac f="(||)" in arg_cong2) + apply (subst of_nat_shiftl)+ + apply (subst ucast_of_nat_small, simp add: wordRadix_def l2BitmapSize_def') + apply (rule refl) + apply (subst ucast_of_nat_small, simp add: wordRadix_def) + apply (rule word_log2_max_word64[THEN order_less_le_trans], simp) + apply (rule refl) + done + apply clarsimp + apply (frule rf_sr_cbitmap_L1_relation) + apply (prop_tac "ksReadyQueuesL1Bitmap_' (globals s').[unat d] \ 0") + subgoal by (fastforce simp: cbitmap_L1_relation_def) + apply (simp add: word_clz_word_log2_fixup) + apply (clarsimp simp: unsigned_word_log2 cbitmap_L1_relation_def maxDomain_le_unat_ucast_explicit + order_trans[OF word_clz_sint_upper] order_trans[OF word_clz_sint_lower]) + apply (frule bitmapQ_no_L1_orphansD, erule word_log2_nth_same) + apply (rule conjI, fastforce simp: invertL1Index_def l2BitmapSize_def') + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce simp: invertL1Index_unat_fold) + apply (rule conjI) + apply (subst invertL1Index_unat_fold, assumption, fastforce) + apply (frule rf_sr_cbitmap_L2_relation) + apply (fastforce simp: cbitmap_L2_relation_def) + apply (clarsimp simp: l2BitmapSize_def') + apply (fastforce simp: word_less_nat_alt word_le_nat_alt unat_sub unat_of_nat) + done +qed + +lemma ccorres_abstract_ksCurThread: + assumes ceqv: "\rv' t t'. ceqv \ (\s. ksCurThread_' (globals s)) rv' t t' d (d' rv')" + and cc: "\ct. ccorres_underlying rf_sr \ r xf arrel axf (G ct) (G' ct) hs a (d' (tcb_ptr_to_ctcb_ptr ct))" + shows "ccorres_underlying rf_sr \ r xf arrel axf (\s. G (ksCurThread s) s) + {s. s \ G' (ctcb_ptr_to_tcb_ptr (ksCurThread_' (globals s)))} hs a d" + apply (rule ccorres_guard_imp) + prefer 2 + apply assumption + apply (rule ccorres_abstract[OF ceqv, where G'="\ct. \ct = \ksCurThread\ \ G' (ctcb_ptr_to_tcb_ptr ct)"]) + apply (subgoal_tac "\t. rv' = tcb_ptr_to_ctcb_ptr t") + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule cc) + apply (clarsimp simp: rf_sr_ksCurThread) + apply (metis tcb_ptr_to_tcb_ptr) + apply simp + done + +lemma ctcb_relation_unat_tcbPriority_C: + "ctcb_relation tcb tcb' \ unat (tcbPriority_C tcb') = unat (tcbPriority tcb)" + apply (clarsimp simp: ctcb_relation_def) + apply (rule trans, rule arg_cong[where f=unat], erule sym) + apply (simp(no_asm)) + done + +lemma ctcb_relation_unat_tcbDomain_C: + "ctcb_relation tcb tcb' \ unat (tcbDomain_C tcb') = unat (tcbDomain tcb)" + apply (clarsimp simp: ctcb_relation_def) + apply (rule trans, rule arg_cong[where f=unat], erule sym) + apply (simp(no_asm)) + done + +lemma getCurDomain_ccorres_dom_': + "ccorres (\rv rv'. rv' = ucast rv) dom_' + \ UNIV hs curDomain (\dom :== \ksCurDomain)" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: curDomain_def simpler_gets_def + rf_sr_ksCurDomain) + done + +lemma getCurDomain_maxDom_ccorres_dom_': + "ccorres (\rv rv'. rv' = ucast rv) dom_' + (\s. ksCurDomain s \ maxDomain) UNIV hs + curDomain (\dom :== (if maxDom \ 0 then \ksCurDomain else 0))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + using maxDom_to_H + apply (clarsimp simp: curDomain_def simpler_gets_def + rf_sr_ksCurDomain) + done + +lemma threadGet_get_obj_at'_has_domain: + "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" + by (wp threadGet_obj_at') (simp add: obj_at'_def) + +lemma possibleSwitchTo_ccorres: + shows + "ccorres dc xfdc + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) + and st_tcb_at' runnable' t and (\s. ksCurDomain s \ maxDomain) + and valid_objs' and pspace_aligned' and pspace_distinct') + ({s. target_' s = tcb_ptr_to_ctcb_ptr t} + \ UNIV) [] + (possibleSwitchTo t ) + (Call possibleSwitchTo_'proc)" + supply if_split [split del] + supply Collect_const [simp del] + supply prio_and_dom_limit_helpers[simp] + (* FIXME: these should likely be in simpset for CRefine, or even in general *) + supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] + ccorres_IF_True[simp] if_cong[cong] + apply (cinit lift: target_') + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_pre_curDomain, rename_tac curDom) + apply (rule ccorres_symb_exec_l3[OF _ threadGet_inv _ empty_fail_threadGet], rename_tac targetDom) + apply (rule ccorres_symb_exec_l3[OF _ gsa_wp _ empty_fail_getSchedulerAction], rename_tac sact) + apply (rule_tac C'="{s. targetDom \ curDom}" + and Q="\s. curDom = ksCurDomain s \ obj_at' (\tcb. targetDom = tcbDomain tcb) t s" + and Q'=UNIV in ccorres_rewrite_cond_sr) + subgoal + apply clarsimp + apply (drule obj_at_ko_at', clarsimp simp: rf_sr_ksCurDomain) + apply (frule (1) obj_at_cslift_tcb, clarsimp simp: typ_heap_simps') + apply (drule ctcb_relation_unat_tcbDomain_C) + apply unat_arith + apply fastforce + done + apply (rule ccorres_cond2[where R=\], simp) + apply (ctac add: tcbSchedEnqueue_ccorres) + apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" + in ccorres_cond) + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL + simp: cscheduler_action_relation_def weak_sch_act_wf_def + split: scheduler_action.splits) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: tcbSchedEnqueue_ccorres) + apply wp + apply (vcg exspec=rescheduleRequired_modifies) + apply (rule ccorres_setSchedulerAction, simp add: cscheduler_action_relation_def) + apply clarsimp + apply wp + apply clarsimp + apply (wp hoare_drop_imps threadGet_get_obj_at'_has_domain) + apply (clarsimp simp: pred_tcb_at') + done + +lemma scheduleTCB_ccorres': + "ccorres dc xfdc + (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') + (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) + [] + (do (runnable, curThread, action) \ do + runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) od; + when (\ runnable \ + curThread = thread \ action = ResumeCurrentThread) + rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] + apply (cinit' lift: tptr_') + apply (rule ccorres_rhs_assoc2)+ + apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) + defer + apply ceqv + apply (unfold split_def)[1] + apply (rule ccorres_when[where R=\]) + apply (intro allI impI) + apply (unfold mem_simps)[1] + apply assumption + apply (ctac add: rescheduleRequired_ccorres) + prefer 4 + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_pre_getCurThread) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P="\s. st_tcb_at' (\st. runnable' st = runnable) thread s + \ curThread = ksCurThread s + \ action = ksSchedulerAction s + \ (\t. ksSchedulerAction s = SwitchToThread t \ tcb_at' t s)" + and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def split del: if_split) + apply (clarsimp simp: from_bool_0 rf_sr_ksCurThread) + apply (rule conjI) + apply (clarsimp simp: st_tcb_at'_def) + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") + apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def tcb_at_not_NULL + split: scheduler_action.split_asm) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def) + apply wp+ + apply (simp add: isRunnable_def isStopped_def) + apply (simp add: guard_is_UNIV_def) + apply clarsimp + apply (clarsimp simp: st_tcb_at'_def obj_at'_def weak_sch_act_wf_def) + done + +lemma scheduleTCB_ccorres_valid_queues'_pre: + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] + apply (cinit' lift: tptr_') + apply (rule ccorres_rhs_assoc2)+ + apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) + defer + apply ceqv + apply (unfold split_def)[1] + apply (rule ccorres_when[where R=\]) + apply (intro allI impI) + apply (unfold mem_simps)[1] + apply assumption + apply (ctac add: rescheduleRequired_ccorres) + prefer 4 + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_pre_getCurThread) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P="\s. st_tcb_at' (\st. runnable' st = runnable) thread s + \ curThread = ksCurThread s + \ action = ksSchedulerAction s + \ weak_sch_act_wf (ksSchedulerAction s) s" + and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (clarsimp simp: from_bool_0 rf_sr_ksCurThread) + apply (rule conjI) + apply (clarsimp simp: st_tcb_at'_def) + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def weak_sch_act_wf_def) + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] + apply (fold_subgoals (prefix))[6] + subgoal premises prems using prems + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def + tcb_at_not_NULL[OF obj_tcb_at'] st_tcb_at'_def + split: scheduler_action.split_asm)+ + apply (clarsimp simp: rf_sr_def cstate_relation_def cscheduler_action_relation_def + split: scheduler_action.split_asm) + apply wp+ + apply (simp add: isRunnable_def isStopped_def) + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) + done + +lemmas scheduleTCB_ccorres_valid_queues' + = scheduleTCB_ccorres_valid_queues'_pre[unfolded bind_assoc return_bind split_conv] + +lemma rescheduleRequired_ccorres_valid_queues'_simple: + "ccorresG rf_sr \ dc xfdc + sch_act_simple UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" + apply cinit + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) + apply (simp add: scheduler_action_case_switch_to_if + cong: if_weak_cong split del: if_split) + apply (rule_tac R="\s. action = ksSchedulerAction s \ sch_act_simple s" + in ccorres_cond) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def) + apply (clarsimp simp: weak_sch_act_wf_def tcb_at_1 tcb_at_not_NULL + split: scheduler_action.split_asm dest!: st_tcb_strg'[rule_format]) + apply (ctac add: tcbSchedEnqueue_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setSchedulerAction_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def + carch_state_relation_def cmachine_state_relation_def + ) + apply wp + apply (simp add: guard_is_UNIV_def) + apply wp+ + apply (simp add: getSchedulerAction_def) + apply (clarsimp simp: weak_sch_act_wf_def rf_sr_def cstate_relation_def + Let_def cscheduler_action_relation_def) + by (auto simp: tcb_at_not_NULL tcb_at_1 + tcb_at_not_NULL[THEN not_sym] tcb_at_1[THEN not_sym] + split: scheduler_action.split_asm) + +lemma scheduleTCB_ccorres_valid_queues'_pre_simple: + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] + apply (cinit' lift: tptr_' simp del: word_neq_0_conv) + apply (rule ccorres_rhs_assoc2)+ + apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) + defer + apply ceqv + apply (unfold split_def)[1] + apply (rule ccorres_when[where R=\]) + apply (intro allI impI) + apply (unfold mem_simps)[1] + apply assumption + apply (ctac add: rescheduleRequired_ccorres_valid_queues'_simple) + prefer 4 + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_pre_getCurThread) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P="\s. st_tcb_at' (\st. runnable' st = runnable) thread s + \ curThread = ksCurThread s + \ action = ksSchedulerAction s + \ sch_act_simple s" + and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def if_1_0_0 split del: if_split) + apply (clarsimp simp: from_bool_0 rf_sr_ksCurThread) + apply (rule conjI) + apply (clarsimp simp: st_tcb_at'_def) + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") + apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def + tcb_at_not_NULL + split: scheduler_action.split_asm) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cscheduler_action_relation_def) + apply wp+ + apply (simp add: isRunnable_def isStopped_def) + apply (simp add: guard_is_UNIV_def) + apply clarsimp + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) + done + +lemmas scheduleTCB_ccorres_valid_queues'_simple + = scheduleTCB_ccorres_valid_queues'_pre_simple[unfolded bind_assoc return_bind split_conv] + +lemmas scheduleTCB_ccorres[corres] + = scheduleTCB_ccorres'[unfolded bind_assoc return_bind split_conv] + +lemma threadSet_weak_sch_act_wf_runnable': + "\ \s. (ksSchedulerAction s = SwitchToThread thread \ runnable' st) \ weak_sch_act_wf (ksSchedulerAction s) s \ + threadSet (tcbState_update (\_. st)) thread + \ \rv s. weak_sch_act_wf (ksSchedulerAction s) s \" + apply (simp add: weak_sch_act_wf_def) + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift threadSet_pred_tcb_at_state + threadSet_tcbDomain_triv) + apply simp + apply (clarsimp) +done + +lemma setThreadState_ccorres[corres]: + "ccorres dc xfdc + (\s. tcb_at' thread s \ valid_objs' s \ valid_tcb_state' st s + \ (ksSchedulerAction s = SwitchToThread thread \ runnable' st) + \ sch_act_wf (ksSchedulerAction s) s \ pspace_aligned' s \ pspace_distinct' s) + ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) hs + (setThreadState st thread) (Call setThreadState_'proc)" + apply (cinit lift: tptr_' cong add: call_ignore_cong) + apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) + apply (ctac add: scheduleTCB_ccorres) + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') + apply (clarsimp simp: weak_sch_act_wf_def valid_tcb'_tcbState_update) + done + +lemma setThreadState_ccorres_valid_queues': + "ccorres dc xfdc + (\s. tcb_at' thread s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s + \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s + \ pspace_aligned' s \ pspace_distinct' s) + ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (setThreadState st thread) (Call setThreadState_'proc)" + apply (cinit lift: tptr_' cong add: call_ignore_cong) + apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) + apply (ctac add: scheduleTCB_ccorres_valid_queues') + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs' + threadSet_tcbState_st_tcb_at') + by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma simp_list_case_return: + "(case x of [] \ return e | y # ys \ return f) = return (if x = [] then e else f)" + by (clarsimp split: list.splits) + +lemma cancelSignal_ccorres [corres]: + "ccorres dc xfdc + (invs' and st_tcb_at' ((=) (Structures_H.thread_state.BlockedOnNotification ntfn)) thread) + (UNIV \ {s. threadPtr_' s = tcb_ptr_to_ctcb_ptr thread} \ {s. ntfnPtr_' s = Ptr ntfn}) + [] (cancelSignal thread ntfn) (Call cancelSignal_'proc)" + apply (cinit lift: threadPtr_' ntfnPtr_' simp add: Let_def list_case_return cong add: call_ignore_cong) + apply (unfold fun_app_def) + apply (simp only: simp_list_case_return return_bind ccorres_seq_skip) + apply (rule ccorres_pre_getNotification) + apply (rule ccorres_assert) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (ctac (no_vcg) add: cancelSignal_ccorres_helper) + apply (ctac add: setThreadState_ccorres_valid_queues') + apply ((wp setNotification_nosch hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] + apply (simp add: ThreadState_defs) + apply (rule conjI, clarsimp, rule conjI, clarsimp) + apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) + subgoal by ((auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of + cthread_state_relation_def sch_act_wf_weak valid_ntfn'_def + | clarsimp simp: eq_commute)+) + apply (clarsimp) + apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) + apply (frule (2) ntfn_blocked_in_queueD) + by (auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of valid_ntfn'_def + cthread_state_relation_def sch_act_wf_weak isWaitingNtfn_def + split: ntfn.splits option.splits + | clarsimp simp: eq_commute + | drule_tac x=thread in bspec)+ + +(* FIXME: MOVE *) +lemma ccorres_pre_getEndpoint [ccorres_pre]: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) + ({s'. \ep. cendpoint_relation (cslift s') ep (the (cslift s' (Ptr p))) \ s' \ P' ep}) + hs (getEndpoint p >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule get_ep_sp') + apply assumption + apply clarsimp + prefer 3 + apply (clarsimp simp add: getEndpoint_def exs_getObject objBits_simps') + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply simp + apply assumption + apply (drule spec, erule mp) + apply (drule cmap_relation_ep) + apply (drule (1) cmap_relation_ko_atD) + apply clarsimp + done + +lemma ep_blocked_in_queueD: + "\ st_tcb_at' (\st. (isBlockedOnSend st \ isBlockedOnReceive st) + \ blockingObject st = ep) thread \; + ko_at' ep' ep \; invs' \ \ + \ thread \ set (epQueue ep') \ (isSendEP ep' \ isRecvEP ep')" + apply (drule sym_refs_st_tcb_atD') + apply clarsimp + apply (clarsimp simp: refs_of_rev' obj_at'_def ko_wp_at'_def projectKOs) + apply (clarsimp simp: isTS_defs split: Structures_H.thread_state.split_asm) + apply (cases ep', simp_all add: isSendEP_def isRecvEP_def)[1] + apply (cases ep', simp_all add: isSendEP_def isRecvEP_def)[1] + done + +lemma ep_ptr_get_queue_spec: + "\s. \ \ \s. s \\<^sub>c \epptr\ \ret__struct_tcb_queue_C :== PROC ep_ptr_get_queue(\epptr) + \head_C \ret__struct_tcb_queue_C = Ptr (epQueue_head_CL (endpoint_lift (the (cslift s \<^bsup>s\<^esup>epptr)))) \ + end_C \ret__struct_tcb_queue_C = Ptr (epQueue_tail_CL (endpoint_lift (the (cslift s \<^bsup>s\<^esup>epptr))))\" + apply vcg + apply clarsimp + done + +lemma valid_ep_blockedD: + "\ valid_ep' ep s; (isSendEP ep \ isRecvEP ep) \ \ (epQueue ep) \ [] \ (\t\set (epQueue ep). tcb_at' t s) \ distinct (epQueue ep)" + unfolding valid_ep'_def isSendEP_def isRecvEP_def + by (clarsimp split: endpoint.splits) + + +lemma ep_to_ep_queue: + assumes ko: "ko_at' ep' ep s" + and waiting: "(isSendEP ep' \ isRecvEP ep')" + and rf: "(s, s') \ rf_sr" + shows "ep_queue_relation' (cslift s') (epQueue ep') + (Ptr (epQueue_head_CL + (endpoint_lift (the (cslift s' (Ptr ep)))))) + (Ptr (epQueue_tail_CL + (endpoint_lift (the (cslift s' (Ptr ep))))))" +proof - + from rf have + "cmap_relation (map_to_eps (ksPSpace s)) (cslift s') Ptr (cendpoint_relation (cslift s'))" + by (rule cmap_relation_ep) + + thus ?thesis using ko waiting + apply - + apply (erule (1) cmap_relation_ko_atE) + apply (clarsimp simp: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def split: endpoint.splits) + done +qed + +lemma ep_ep_disjoint: + assumes srs: "sym_refs (state_refs_of' s)" + and epat: "ko_at' ep epptr s" + and epat': "ko_at' ep' epptr' s" + and epq: "(isSendEP ep \ isRecvEP ep)" + and epq': "(isSendEP ep' \ isRecvEP ep')" + and neq: "epptr' \ epptr" + shows "set (epQueue ep) \ set (epQueue ep') = {}" + using srs epat epat' epq epq' neq + apply - + apply (subst disjoint_iff_not_equal, intro ballI, rule notI) + apply (drule sym_refs_ko_atD', clarsimp)+ + apply clarsimp + apply (clarsimp simp: isSendEP_def isRecvEP_def split: endpoint.splits) + apply (simp_all add: st_tcb_at_refs_of_rev') + apply (fastforce simp: st_tcb_at'_def obj_at'_def)+ + done + +lemma cendpoint_relation_ep_queue: + fixes ep :: "endpoint" + assumes ep: "cendpoint_relation mp ep' b" + and mpeq: "(mp' |` (- S)) = (mp |` (- S))" + and epq: "ep' \ IdleEP \ + set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" + shows "cendpoint_relation mp' ep' b" +proof - + + have rl: "\p list. \ ctcb_ptr_to_tcb_ptr p \ set list; + ep' = RecvEP list \ ep' = SendEP list \ + \ mp' p = mp p" + using epq + apply (cut_tac x=p in fun_cong[OF mpeq]) + apply (cases ep', auto simp: restrict_map_def split: if_split_asm) + done + + have rl': "\p list. \ p \ tcb_ptr_to_ctcb_ptr ` set list; + ep' = RecvEP list \ ep' = SendEP list \ + \ mp' p = mp p" + by (clarsimp elim!: rl[rotated]) + + show ?thesis using ep rl' mpeq unfolding cendpoint_relation_def + by (simp add: Let_def + cong: Structures_H.endpoint.case_cong tcb_queue_relation'_cong) +qed + +lemma cpspace_relation_ep_update_an_ep: + fixes ep :: "endpoint" + defines "qs \ if (isSendEP ep \ isRecvEP ep) then set (epQueue ep) else {}" + assumes koat: "ko_at' ep epptr s" + and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" + and rel: "cendpoint_relation mp' ep' endpoint" + and mpeq: "(mp' |` (- S)) = (mp |` (- S))" + and pal: "pspace_aligned' s" "pspace_distinct' s" + and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ + \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + using cp koat pal rel unfolding cmap_relation_def + apply - + apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) + apply (drule (1) bspec [OF _ domI]) + apply simp + apply (erule cendpoint_relation_ep_queue[OF _ mpeq]) + apply (erule(4) others[OF map_to_ko_atI]) + done + +lemma endpoint_not_idle_cases: + "ep \ IdleEP \ isSendEP ep \ isRecvEP ep" + by (clarsimp simp: isRecvEP_def isSendEP_def split: Structures_H.endpoint.split) + +lemma cpspace_relation_ep_update_ep: + fixes ep :: "endpoint" + defines "qs \ if (isSendEP ep \ isRecvEP ep) then set (epQueue ep) else {}" + assumes koat: "ko_at' ep epptr s" + and invs: "invs' s" + and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" + and rel: "cendpoint_relation mp' ep' endpoint" + and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + using invs + apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) + apply clarsimp+ + apply (clarsimp simp add: qs_def image_image simp del: imp_disjL) + apply (rule ep_ep_disjoint[OF _ _ koat endpoint_not_idle_cases], auto) + done + +lemma cpspace_relation_ep_update_ep': + fixes ep :: "endpoint" and ep' :: "endpoint" + and epptr :: "machine_word" and s :: "kernel_state" + defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" + assumes koat: "ko_at' ep epptr s" + and vp: "valid_pspace' s" + and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" + and srs: "sym_refs (state_refs_of' s')" + and rel: "cendpoint_relation mp' ep' endpoint" + and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" +proof - + from koat have koat': "ko_at' ep' epptr s'" + by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) + + from koat have koat'': "\ep'' epptr'. \ ko_at' ep'' epptr' s; epptr' \ epptr \ + \ ko_at' ep'' epptr' s'" + by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) + + show ?thesis using vp ep_ep_disjoint[OF srs koat'' koat' endpoint_not_idle_cases] + apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) + apply clarsimp+ + apply (clarsimp simp add: qs_def image_image simp del: imp_disjL) + done +qed + +lemma cnotification_relation_ep_queue: + assumes srs: "sym_refs (state_refs_of' s)" + and koat: "ko_at' ep epptr s" + and iswaiting: "(isSendEP ep \ isRecvEP ep)" + and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` set (epQueue ep)))) + = (mp |` (- (tcb_ptr_to_ctcb_ptr ` set (epQueue ep))))" + and koat': "ko_at' a ntfnPtr s" + shows "cnotification_relation mp a b = cnotification_relation mp' a b" +proof - + have rl: "\p. \ p \ tcb_ptr_to_ctcb_ptr ` set (ntfnQueue (ntfnObj a)); + isWaitingNtfn (ntfnObj a) \ + \ mp p = mp' p" using srs koat' koat iswaiting mpeq + apply - + apply (drule (4) ntfn_ep_disjoint) + apply (erule restrict_map_eqI [symmetric]) + apply (erule imageE) + apply (fastforce simp: disjoint_iff_not_equal inj_eq) + done + + show ?thesis + unfolding cnotification_relation_def using rl + apply (simp add: Let_def) + apply (cases "ntfnObj a") + apply (simp add: isWaitingNtfn_def cong: tcb_queue_relation'_cong)+ + done +qed + +lemma epQueue_tail_mask_2 [simp]: + "epQueue_tail_CL (endpoint_lift ko') && ~~ mask 2 = epQueue_tail_CL (endpoint_lift ko')" + unfolding endpoint_lift_def + by (clarsimp simp: mask_def word_bw_assocs) + +(* FIXME AARCH64 is this used? useful? depends on deployment of make_canonical *) +lemma epQueue_tail_make_canonical[simp]: + "make_canonical (epQueue_tail_CL (endpoint_lift ko)) = epQueue_tail_CL (endpoint_lift ko)" + by (simp add: endpoint_lift_def make_canonical_def canonical_bit_def mask_def + word_bw_assocs) + +(* Clag from cancelSignal_ccorres_helper *) + +lemma cancelIPC_ccorres_helper: + "ccorres dc xfdc (invs' and + st_tcb_at' (\st. (isBlockedOnSend st \ isBlockedOnReceive st) + \ blockingObject st = ep) thread + and ko_at' ep' ep) + {s. epptr_' s = Ptr ep} + [] + (setEndpoint ep (if remove1 thread (epQueue ep') = [] then Structures_H.endpoint.IdleEP + else epQueue_update (\_. remove1 thread (epQueue ep')) ep')) + (\queue :== CALL ep_ptr_get_queue(\epptr);; + \queue :== CALL tcbEPDequeue(tcb_ptr_to_ctcb_ptr thread,\queue);; + CALL ep_ptr_set_queue(\epptr,\queue);; + IF head_C \queue = NULL THEN + CALL endpoint_ptr_set_state(\epptr,scast EPState_Idle) + FI)" + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (rule conseqPre) + apply vcg + apply (clarsimp split del: if_split) + apply (frule (2) ep_blocked_in_queueD) + apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) + apply (elim conjE) + apply (frule (1) valid_ep_blockedD) + apply (elim conjE) + apply (frule cmap_relation_ep) + apply (erule (1) cmap_relation_ko_atE) + apply (intro conjI) + apply (erule h_t_valid_clift) + apply (rule impI) + apply (rule exI) + apply (rule conjI) + apply (rule_tac x = \ in exI) + apply (intro conjI) + apply assumption+ + apply (drule (2) ep_to_ep_queue) + apply (simp add: tcb_queue_relation'_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (intro impI conjI allI) + \ \empty case\ + apply clarsimp + apply (frule iffD1 [OF tcb_queue_head_empty_iff [OF tcb_queue_relation'_queue_rel]]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply clarsimp + subgoal by simp + apply (simp add: setEndpoint_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: remove1_empty rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ep_map_tos typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) + subgoal by simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + subgoal by simp + apply (erule (1) map_to_ko_atI') + apply (simp add: heap_to_user_data_def Let_def) + subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def + packed_heap_update_collapse_hrs) + subgoal by (simp add: cmachine_state_relation_def) + subgoal by (simp add: h_t_valid_clift_Some_iff) + subgoal by (simp add: objBits_simps') + subgoal by (simp add: objBits_simps) + apply assumption + \ \non empty case\ + apply clarsimp + apply (frule tcb_queue_head_empty_iff [OF tcb_queue_relation'_queue_rel]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply clarsimp + apply (simp add: setEndpoint_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (frule (1) st_tcb_at_h_t_valid) + apply (simp add: remove1_empty rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ep_map_tos typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def + split: endpoint.splits split del: if_split) + \ \recv case\ + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce + apply (clarsimp simp: h_t_valid_clift_Some_iff ctcb_offset_defs mask_shiftl_decompose + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + tcb_queue_relation'_next_canonical tcb_queue_relation'_prev_canonical + simp flip: canonical_bit_def make_canonical_def + cong: tcb_queue_relation'_cong) + subgoal by (intro impI conjI; simp) + \ \send case\ + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce + apply (clarsimp simp: h_t_valid_clift_Some_iff ctcb_offset_defs mask_shiftl_decompose + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + tcb_queue_relation'_next_canonical tcb_queue_relation'_prev_canonical + simp flip: canonical_bit_def + cong: tcb_queue_relation'_cong) + subgoal by (intro impI conjI; simp) + \ \send case\ + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce + apply (clarsimp simp: h_t_valid_clift_Some_iff ctcb_offset_defs + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + tcb_queue_relation'_next_canonical tcb_queue_relation'_prev_canonical + simp flip: canonical_bit_def + cong: tcb_queue_relation'_cong) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def + packed_heap_update_collapse_hrs) + subgoal by (simp add: cmachine_state_relation_def) + subgoal by (simp add: h_t_valid_clift_Some_iff) + subgoal by (simp add: objBits_simps') + subgoal by (simp add: objBits_simps) + by assumption + +declare empty_fail_get[iff] + +lemma getThreadState_ccorres_foo: + "(\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c) \ + ccorres r xf (\s. \ts. st_tcb_at' ((=) ts) t s \ P ts s) + {s. \ts tcb'. cslift s (tcb_ptr_to_ctcb_ptr t) = Some tcb' + \ cthread_state_relation ts (tcbState_C tcb', tcbFault_C tcb') + \ s \ P' ts} hs + (getThreadState t >>= f) c" + apply (rule ccorres_symb_exec_l' [OF _ gts_inv' gts_sp' empty_fail_getThreadState]) + apply (erule_tac x=rv in meta_allE) + apply (erule ccorres_guard_imp2) + apply (clarsimp simp: st_tcb_at'_def) + apply (drule obj_at_ko_at', clarsimp) + apply (erule cmap_relationE1 [OF cmap_relation_tcb]) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: ctcb_relation_def obj_at'_def) + done + +lemma ep_blocked_in_queueD_recv: + "\st_tcb_at' ((=) (Structures_H.thread_state.BlockedOnReceive x gr)) thread \; ko_at' ep' x \; invs' \\ \ thread \ set (epQueue ep') \ isRecvEP ep'" + apply (frule sym_refs_st_tcb_atD', clarsimp) + apply (clarsimp simp: refs_of_rev' obj_at'_def ko_wp_at'_def projectKOs) + apply (cases ep', simp_all add: isSendEP_def isRecvEP_def)[1] + done + +lemma ep_blocked_in_queueD_send: + "\st_tcb_at' ((=) (Structures_H.thread_state.BlockedOnSend x xa xb xc xd)) thread \; ko_at' ep' x \; invs' \\ \ thread \ set (epQueue ep') \ isSendEP ep'" + apply (frule sym_refs_st_tcb_atD', clarsimp) + apply (clarsimp simp: refs_of_rev' obj_at'_def ko_wp_at'_def projectKOs) + apply (cases ep', simp_all add: isSendEP_def isRecvEP_def)[1] + done + +lemma cancelIPC_ccorres1: + assumes cteDeleteOne_ccorres: + "\w slot. ccorres dc xfdc + (invs' and cte_wp_at' (\ct. w = -1 \ cteCap ct = NullCap + \ (\cap'. ccap_relation (cteCap ct) cap' \ cap_get_tag cap' = w)) slot) + ({s. gs_get_assn cteDeleteOne_'proc (ghost'state_' (globals s)) = w} + \ {s. slot_' s = Ptr slot}) [] + (cteDeleteOne slot) (Call cteDeleteOne_'proc)" + shows + "ccorres dc xfdc (tcb_at' thread and invs') + (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (cancelIPC thread) (Call cancelIPC_'proc)" + apply (cinit lift: tptr_' simp: Let_def cong: call_ignore_cong) + apply (rule ccorres_move_c_guard_tcb) + apply csymbr + apply (rule getThreadState_ccorres_foo) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=ret__unsigned_longlong_' in ccorres_abstract, ceqv) + apply (rule_tac P="rv' = thread_state_to_tsType rv" in ccorres_gen_asm2) + apply wpc + \ \BlockedOnReceive\ + apply (simp add: word_sle_def ccorres_cond_iffs cong: call_ignore_cong) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (rule ccorres_pre_getEndpoint) + apply (rule ccorres_assert) + apply (rule ccorres_symb_exec_r) \ \ptr_get lemmas don't work so well :(\ + apply (rule ccorres_symb_exec_r) + apply (simp only: fun_app_def simp_list_case_return + return_bind ccorres_seq_skip) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) + apply (ctac add: setThreadState_ccorres_valid_queues') + apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+ + apply (simp add: ThreadState_defs) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply clarsimp + apply (rule conseqPre, vcg) + apply (rule subset_refl) + apply (rule conseqPre, vcg) + apply clarsimp + \ \BlockedOnReply case\ + apply (simp add: ThreadState_defs ccorres_cond_iffs + Collect_False Collect_True word_sle_def + cong: call_ignore_cong del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac P=\ in threadSet_ccorres_lemma2) + apply vcg + apply (clarsimp simp: typ_heap_simps') + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps')+)[1] + apply (rule ball_tcb_cte_casesI, simp_all)[1] + apply (clarsimp simp: ctcb_relation_def seL4_Fault_lift_NullFault + cfault_rel_def cthread_state_relation_def) + apply (case_tac "tcbState tcb", simp_all add: is_cap_fault_def)[1] + apply ceqv + apply ccorres_remove_UNIV_guard + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (rule_tac P="tcb_at' thread" in ccorres_cross_over_guard) + apply (simp add: getThreadReplySlot_def) + apply ctac + apply (simp only: liftM_def bind_assoc return_bind del: Collect_const) + apply (rule ccorres_pre_getCTE) + apply (rename_tac slot slot' cte) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=ret__unsigned_longlong_' and val="mdbNext (cteMDBNode cte)" + and R="cte_wp_at' ((=) cte) slot and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: ccte_relation_def map_option_Some_eq2) + apply ceqv + apply csymbr + apply (rule ccorres_Cond_rhs) + apply (simp add: nullPointer_def when_def) + apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) + apply (rule ccorres_symb_exec_r) + apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) + apply vcg + apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def + gs_set_assn_Delete_cstate_relation[unfolded o_def]) + apply (wp | simp)+ + apply (rule ccorres_return_Skip) + apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def + ghost_assertion_data_set_def cap_tag_defs) + apply (simp add: locateSlot_conv, wp) + apply vcg + apply (rule_tac Q="\rv. tcb_at' thread and invs'" in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of capHasProperty_def + cap_get_tag_isCap ucast_id) + apply (wp threadSet_invs_trivial | simp)+ + apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def + Kernel_C.tcbReply_def tcbCNodeEntries_def) + \ \BlockedOnNotification\ + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong) + apply (rule ccorres_symb_exec_r) + apply (ctac (no_vcg)) + apply clarsimp + apply (rule conseqPre, vcg) + apply (rule subset_refl) + apply (rule conseqPre, vcg) + apply clarsimp + \ \Running, Inactive, and Idle\ + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, + rule ccorres_return_Skip)+ + \ \BlockedOnSend\ + apply (simp add: word_sle_def ccorres_cond_iffs + cong: call_ignore_cong) + \ \clag\ + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (rule ccorres_pre_getEndpoint) + apply (rule ccorres_assert) + apply (rule ccorres_symb_exec_r) \ \ptr_get lemmas don't work so well :(\ + apply (rule ccorres_symb_exec_r) + apply (simp only: fun_app_def simp_list_case_return return_bind ccorres_seq_skip) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) + apply (ctac add: setThreadState_ccorres_valid_queues') + apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del:if_split)+ + apply (simp add: ThreadState_defs) + apply clarsimp + apply (rule conseqPre, vcg, rule subset_refl) + apply (rule conseqPre, vcg) + apply clarsimp + apply clarsimp + apply (rule conseqPre, vcg, rule subset_refl) + apply (rule conseqPre, vcg) + apply clarsimp + \ \Restart\ + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, + rule ccorres_return_Skip) + \ \Post wp proofs\ + apply vcg + apply clarsimp + apply (rule conseqPre, vcg) + apply clarsimp + apply clarsimp + apply (drule(1) obj_at_cslift_tcb) + apply clarsimp + apply (frule obj_at_valid_objs', clarsimp+) + apply (clarsimp simp: projectKOs valid_obj'_def valid_tcb'_def + valid_tcb_state'_def typ_heap_simps + word_sle_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (rule conjI) + subgoal by (auto simp: projectKOs obj_at'_def pred_tcb_at'_def split: thread_state.splits)[1] + apply (clarsimp) + apply (rule conjI) + subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of + cthread_state_relation_def sch_act_wf_weak valid_ep'_def + split: thread_state.splits) + apply clarsimp + apply (frule (2) ep_blocked_in_queueD_recv) + apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) + subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of isRecvEP_def + cthread_state_relation_def sch_act_wf_weak valid_ep'_def + split: thread_state.splits endpoint.splits) + apply (rule conjI) + apply (clarsimp simp: inQ_def) + apply clarsimp + apply (rule conjI) + subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of + cthread_state_relation_def sch_act_wf_weak valid_ep'_def + split: thread_state.splits) + apply clarsimp + apply (rule conjI) + subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of + cthread_state_relation_def sch_act_wf_weak valid_ep'_def + split: thread_state.splits) + apply clarsimp + apply (frule (2) ep_blocked_in_queueD_send) + apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) + subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of isSendEP_def + cthread_state_relation_def sch_act_wf_weak valid_ep'_def + split: thread_state.splits endpoint.splits)[1] + apply (auto simp: isTS_defs cthread_state_relation_def typ_heap_simps weak_sch_act_wf_def) + apply (case_tac ts, + auto simp: isTS_defs cthread_state_relation_def typ_heap_simps) + done + +end +end diff --git a/proof/crefine/AARCH64/Ipc_C.thy b/proof/crefine/AARCH64/Ipc_C.thy new file mode 100644 index 0000000000..6529225916 --- /dev/null +++ b/proof/crefine/AARCH64/Ipc_C.thy @@ -0,0 +1,6812 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Ipc_C +imports + Finalise_C + CSpace_All + SyscallArgs_C + IsolatedThreadAction +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "replyFromKernel_success_empty thread \ do + VSpace_H.lookupIPCBuffer True thread; + asUser thread $ setRegister AARCH64_H.badgeRegister 0; + setMessageInfo thread $ (Types_H.MI 0 0 0 0) + od" + +lemma replyFromKernel_success_empty: + "replyFromKernel thread (0, []) = replyFromKernel_success_empty thread" + unfolding replyFromKernel_def replyFromKernel_success_empty_def + by (simp add: setMRs_Nil submonad_asUser.fn_stateAssert) + +crunch sch_act_wf: handleFaultReply "\s. sch_act_wf (ksSchedulerAction s) s" + +crunch valid_ipc_buffer_ptr' [wp]: copyMRs "valid_ipc_buffer_ptr' p" + (rule: hoare_valid_ipc_buffer_ptr_typ_at' wp: crunch_wps) + +lemma threadSet_obj_at'_nontcb: + "koType TYPE('a::pspace_storable) \ koType TYPE(Structures_H.tcb) \ + \obj_at' (P :: 'a \ bool) t'\ threadSet f t \\rv. obj_at' P t'\" + apply (simp add: threadSet_def) + apply (wp obj_at_setObject2 hoare_drop_imps + | clarsimp simp: updateObject_default_def in_monad)+ + done + +lemma setMRs_ntfn_at[wp]: + "\ko_at' (ntfn :: Structures_H.notification) p\ + setMRs badge val thread + \\_. ko_at' ntfn p\" + apply (simp add: setMRs_def + zipWithM_x_mapM_x split_def storeWordUser_def + setThreadState_def asUser_def) + apply (wp threadSet_obj_at'_nontcb mapM_x_wp hoare_drop_imps + | simp | rule subset_refl)+ + done + +lemma asUser_ntfn_at[wp]: + "\ko_at' (ntfn :: Structures_H.notification) p\ + asUser tptr f \\_. ko_at' ntfn p\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_obj_at'_nontcb hoare_drop_imps | simp | rule subset_refl)+ + done + +definition + "lookup_fault_get_lufType luf \ case luf of + InvalidRoot \ 0 + | MissingCapability _ \ 1 + | DepthMismatch _ _ \ 2 + | GuardMismatch _ _ _ \ 3" + +definition + "setMR thread buffer \ \idx value. + if idx < length msgRegisters + then do + asUser thread (setRegister (msgRegisters ! idx) value); + return (idx + 1) + od + else case buffer of None \ return (length msgRegisters) + | Some buf \ do + storeWordUser (buf + (of_nat (idx + 1) * word_size)) value; + return (idx + 1) + od" + +lemmas msgMaxLength_unfold + = msgMaxLength_def[where 'a=nat, unfolded msgLengthBits_def, simplified, + unfolded shiftL_nat, simplified] + +lemma registers_less_maxlength: + "length msgRegisters < msgMaxLength" + by (simp add: msgRegisters_unfold msgMaxLength_unfold) + +lemma setMRs_to_setMR': +notes + wordSize_def' [simp] +shows + "setMRs thread buffer xs + = (do + stateAssert (tcb_at' thread) []; + ys \ zipWithM (setMR thread buffer) [0 ..< msgMaxLength] xs; + return (of_nat (min (length xs) (length msgRegisters + + (case buffer of None \ 0 | _ \ Suc (unat (msgMaxLength :: machine_word)) + - unat ((1 :: machine_word) + of_nat (length msgRegisters)))))) + od)" + apply (simp add: setMRs_def setMR_def split_def + zipWithM_x_mapM_x asUser_mapM_x bind_assoc + zipWithM_If_cut) + apply (simp add: zipWithM_mapM) + apply (simp add: split_def mapM_liftM_const[unfolded liftM_def] + mapM_return mapM_Nil mapM_x_Nil asUser_mapM_x + last_append map_replicate_const + split: option.split split del: if_split) + apply (simp add: mapM_discarded mapM_x_def split del: if_split) + apply (intro allI conjI impI bind_cong bind_apply_cong refl + arg_cong2[where f=sequence_x] + map_length_cong, + insert registers_less_maxlength, simp_all) + apply (clarsimp simp: set_zip) + apply (clarsimp simp: set_zip) + apply (simp add: msgRegisters_unfold msgMaxLength_def + msgLengthBits_def shiftL_nat) + apply (clarsimp simp only: set_zip min_less_iff_conj length_zip + length_map nth_zip fst_conv nth_map + snd_conv upto_enum_word length_drop + length_take nth_drop nth_upt) + apply (subst nth_take) + apply (simp add: less_diff_conv) + apply (simp add: word_size word_size_def field_simps) + done + +lemma setMRs_to_setMR: + "setMRs thread buffer xs + = (do + stateAssert (tcb_at' thread) []; + ys \ zipWithM (setMR thread buffer) [0 ..< msgMaxLength] xs; + return (of_nat (last (0 # ys))) + od)" + apply (simp add: setMRs_to_setMR' zipWithM_mapM split_def mapM_discarded + del: last.simps) + apply (subst mapM_last_Cons) + prefer 3 + apply simp + apply (simp add: msgMaxLength_unfold) + apply (simp add: fst_last_zip_upt) + apply (subgoal_tac "msgMaxLength - Suc 0 \ length msgRegisters + \ of_nat (length xs - Suc 0) = of_nat (length xs) - (1 :: machine_word) + \ unat ((1 :: machine_word) + of_nat (length msgRegisters)) = Suc (length msgRegisters)") + apply (simp add: setMR_def split: option.split) + apply (intro impI conjI allI) + apply clarsimp + apply clarsimp + apply (clarsimp simp add: msgRegisters_unfold) + apply (clarsimp simp: linorder_not_less linorder_not_le) + apply (clarsimp simp: msgRegisters_unfold msgMaxLength_def + msgLengthBits_def shiftL_nat) + apply (clarsimp simp: msgRegisters_unfold msgMaxLength_def + msgLengthBits_def shiftL_nat) + apply (simp add: msgRegisters_unfold msgMaxLength_unfold) + apply (case_tac xs, simp_all) + done + +lemma asUser_comm: + assumes neq: "a \ b" + assumes efa: "empty_fail fa" and efb: "empty_fail fb" + shows + "\ra rb. do + ra \ asUser a fa; + rb \ asUser b fb; + c ra rb + od = do + rb \ asUser b fb; + ra \ asUser a fa; + c ra rb + od" + apply (rule submonad_comm' [OF submonad_asUser submonad_asUser]) + apply (clarsimp simp: neq asUser_replace_def Let_def fun_upd_twist [OF neq]) + apply (clarsimp simp: neq asUser_replace_def Let_def obj_at'_real_def + ko_wp_at'_def ps_clear_upd_None ps_clear_upd + split: option.split kernel_object.split) + apply (clarsimp simp: neq[symmetric] asUser_replace_def Let_def + obj_at'_real_def ko_wp_at'_def ps_clear_upd_None + ps_clear_upd + split: option.split kernel_object.split) + apply (rule efa efb)+ + done + +crunch inv[wp]: getSanitiseRegisterInfo P + +lemma empty_fail_getSanitiseRegisterInfo[wp, simp]: + "empty_fail (getSanitiseRegisterInfo t)" + (* FIXME AARCH64 why do we need empty_fail_getObject here but not on other arches?! *) + by (wpsimp simp: getSanitiseRegisterInfo_def + wp: ArchMove_C.empty_fail_archThreadGet empty_fail_getObject) + +lemma asUser_getRegister_getSanitiseRegisterInfo_comm: + "do + ra \ asUser a (getRegister r); + rb \ getSanitiseRegisterInfo b; + c ra rb + od = do + rb \ getSanitiseRegisterInfo b; + ra \ asUser a (getRegister r); + c ra rb + od" + by (rule bind_inv_inv_comm; wpsimp) + +lemma asUser_mapMloadWordUser_threadGet_comm: + "do + ra \ mapM loadWordUser xs; + rb \ threadGet fb b; + c ra rb + od = do + rb \ threadGet fb b; + ra \ mapM loadWordUser xs; + c ra rb + od" + by (rule bind_inv_inv_comm, auto; wp mapM_wp') + +lemma asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm: + "do + ra \ mapM loadWordUser xs; + rb \ getSanitiseRegisterInfo b; + c ra rb + od = do + rb \ getSanitiseRegisterInfo b; + ra \ mapM loadWordUser xs; + c ra rb + od" + by (rule bind_inv_inv_comm, auto; wp mapM_wp') + +lemma asUser_loadWordUser_comm: + "empty_fail m \ + do x \ asUser t m; y \ loadWordUser p; n x y od = + do y \ loadWordUser p; x \ asUser t m; n x y od" + apply (rule submonad_comm2 [OF _ loadWordUser_submonad_fn + submonad_asUser, symmetric]) + apply (simp add: submonad_args_def pointerInUserData_def) + apply (simp add: asUser_replace_def Let_def) + apply (clarsimp simp: asUser_replace_def Let_def typ_at'_def ko_wp_at'_def + ps_clear_upd ps_clear_upd_None pointerInUserData_def + split: option.split kernel_object.split) + apply simp+ + done + +lemma asUser_storeWordUser_comm: + "empty_fail m \ + do x \ asUser t m; y \ storeWordUser p v; n x y od = + do y \ storeWordUser p v; x \ asUser t m; n x y od" + apply (rule submonad_comm2 [OF _ storeWordUser_submonad_fn + submonad_asUser, symmetric]) + apply (simp add: submonad_args_def pointerInUserData_def) + apply (simp add: asUser_replace_def Let_def) + apply (clarsimp simp: asUser_replace_def Let_def typ_at'_def ko_wp_at'_def + ps_clear_upd ps_clear_upd_None pointerInUserData_def + split: option.split kernel_object.split) + apply simp+ + done + +lemma length_syscallMessage: + "length AARCH64_H.syscallMessage = unat n_syscallMessage" + apply (simp add: syscallMessage_def AARCH64.syscallMessage_def + msgRegisters_unfold n_syscallMessage_def) + apply (simp add: upto_enum_def) + apply (simp add: fromEnum_def enum_register) + done + +end + +context kernel_m begin + +(* FIXME move *) +lemma ccap_relation_ep_helpers: + "\ ccap_relation cap cap'; cap_get_tag cap' = scast cap_endpoint_cap \ + \ capCanSend_CL (cap_endpoint_cap_lift cap') = from_bool (capEPCanSend cap) + \ capCanReceive_CL (cap_endpoint_cap_lift cap') = from_bool (capEPCanReceive cap) + \ capEPPtr_CL (cap_endpoint_cap_lift cap') = capEPPtr cap + \ capEPBadge_CL (cap_endpoint_cap_lift cap') = capEPBadge cap + \ capCanGrant_CL (cap_endpoint_cap_lift cap') = from_bool (capEPCanGrant cap) + \ capCanGrantReply_CL (cap_endpoint_cap_lift cap') = from_bool (capEPCanGrantReply cap)" + by (clarsimp simp: cap_lift_endpoint_cap cap_to_H_simps + cap_endpoint_cap_lift_def word_size + elim!: ccap_relationE) + +(* FIXME move *) +lemma ccap_relation_reply_helpers: + "\ ccap_relation cap cap'; cap_get_tag cap' = scast cap_reply_cap \ + \ capReplyCanGrant_CL (cap_reply_cap_lift cap') = from_bool (capReplyCanGrant cap) + \ capReplyMaster_CL (cap_reply_cap_lift cap') = from_bool (capReplyMaster cap) + \ cap_reply_cap_CL.capTCBPtr_CL (cap_reply_cap_lift cap') + = ptr_val (tcb_ptr_to_ctcb_ptr (capTCBPtr cap))" + by (clarsimp simp: cap_lift_reply_cap cap_to_H_simps + cap_reply_cap_lift_def word_size + elim!: ccap_relationE) + +(*FIXME: arch_split: C kernel names hidden by Haskell names *) +(*FIXME: fupdate simplification issues for 2D arrays *) +abbreviation "syscallMessageC \ kernel_all_global_addresses.fault_messages.[unat MessageID_Syscall]" +lemmas syscallMessageC_def = kernel_all_substitute.fault_messages_def +abbreviation "exceptionMessageC \ kernel_all_substitute.fault_messages.[unat MessageID_Exception]" +lemmas exceptionMessageC_def = kernel_all_substitute.fault_messages_def + +lemma syscallMessage_ccorres: + "n < unat n_syscallMessage + \ register_from_H (AARCH64_H.syscallMessage ! n) + = index syscallMessageC n" + apply (simp add: AARCH64_H.syscallMessage_def syscallMessageC_def + AARCH64.syscallMessage_def + MessageID_Exception_def MessageID_Syscall_def + n_syscallMessage_def msgRegisters_unfold) + apply (simp add: upto_enum_def fromEnum_def enum_register) + apply (simp add: toEnum_def enum_register) + apply (clarsimp simp: fupdate_def + | drule nat_less_cases' | erule disjE)+ + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "handleArchFaultReply' f sender receiver tag \ + let default_action = (do + sendBuf \ lookupIPCBuffer False sender; + stateAssert (tcb_at' sender) []; + case sendBuf of + None \ return () + | Some bufferPtr \ do + mapM loadWordUser + (map (\i. bufferPtr + PPtr (i * 8)) + [(scast n_msgRegisters :: machine_word) + 1.e. msgMaxLength]); + return () + od; + return True + od) in + (do + label \ return $ msgLabel tag; + mlen \ return $ msgLength tag; + case f of + VMFault _ _ \ default_action + | VCPUFault _ \ default_action + | VGICMaintenance _ \ default_action + | VPPIEvent _ \ default_action + od)" + +definition + "handleFaultReply' f sender receiver \ do + tag \ getMessageInfo sender; + label \ return $ msgLabel tag; + mlen \ return $ msgLength tag; + case f of + CapFault _ _ _ \ return True + | ArchFault af \ handleArchFaultReply' af sender receiver tag + | UnknownSyscallException _ \ do + t \ getSanitiseRegisterInfo receiver; + regs \ return $ take (unat mlen) syscallMessage; + zipWithM_x (\rs rd. do + v \ asUser sender $ getRegister rs; + asUser receiver $ setRegister rd $ sanitiseRegister t rd v + od) msgRegisters regs; + sendBuf \ lookupIPCBuffer False sender; + case sendBuf of + None \ return () + | Some bufferPtr \ + zipWithM_x (\i rd. do + v \ loadWordUser (bufferPtr + PPtr (i * 8)); + asUser receiver $ setRegister rd $ sanitiseRegister t rd v + od) [(scast n_msgRegisters :: machine_word) + 1.e. scast n_syscallMessage] (drop (unat (scast n_msgRegisters :: machine_word)) regs); + return (label = 0) + od + | UserException _ _ \ do + t \ getSanitiseRegisterInfo receiver; + regs \ return $ take (unat mlen) exceptionMessage; + zipWithM_x (\rs rd. do + v \ asUser sender $ getRegister rs; + asUser receiver $ setRegister rd $ sanitiseRegister t rd v + od) msgRegisters regs; + return (label = 0) + od + od" + +lemma loadWordUser_discarded: + "loadWordUser p >>= (\_. n) = + stateAssert (pointerInUserData p and K (p && mask 3 = 0)) [] >>= (\_. n)" + apply (rule ext) + apply (clarsimp simp: loadWordUser_def loadWord_def bind_def assert_def + stateAssert_def split_def return_def fail_def + doMachineOp_def gets_def get_def select_f_def + modify_def put_def) + done + +lemma stateAssert_mapM_loadWordUser_comm: + "do x \ stateAssert P []; y \ mapM loadWordUser ptrs; n od = + do y \ mapM loadWordUser ptrs; x \ stateAssert P []; n od" + apply (rule bind_inv_inv_comm) + apply (wp stateAssert_inv) + apply (wp mapM_wp_inv)+ + apply simp + done + +lemmas syscallMessage_unfold + = AARCH64_H.syscallMessage_def + AARCH64.syscallMessage_def + [unfolded upto_enum_def, simplified, + unfolded fromEnum_def enum_register, simplified, + unfolded toEnum_def enum_register, simplified] + +lemma handleArchFaultReply': + notes option.case_cong_weak [cong] wordSize_def'[simp] + shows "(do + sb \ lookupIPCBuffer False s; + msg \ getMRs s sb tag; + handleArchFaultReply f r (msgLabel tag) msg + od) x' = handleArchFaultReply' f s r tag x'" + supply empty_fail_cond[simp] + apply (unfold handleArchFaultReply'_def getMRs_def msgMaxLength_def + bit_def msgLengthBits_def msgRegisters_unfold + fromIntegral_simp1 fromIntegral_simp2 + shiftL_word Let_def) + apply (simp add: bind_assoc) + apply (clarsimp simp: mapM_def sequence_def bind_assoc asUser_bind_distrib + asUser_return submonad_asUser.fn_stateAssert) + apply (case_tac f ; clarsimp) + apply (clarsimp simp: handleArchFaultReply_def asUser_getRegister_discarded + bind_subst_lift [OF stateAssert_stateAssert] + pred_conj_def) + apply (rule bind_apply_cong [OF refl], rename_tac sb s'') + apply (rule bind_apply_cong [OF refl], rename_tac rv r'') + apply (case_tac sb, simp_all add: word_size n_msgRegisters_def)[1] + apply (clarsimp simp: handleArchFaultReply_def asUser_getRegister_discarded + bind_subst_lift [OF stateAssert_stateAssert] + pred_conj_def) + apply (rule bind_apply_cong [OF refl], rename_tac sb s'') + apply (rule bind_apply_cong [OF refl], rename_tac rv r'') + apply (case_tac sb, simp_all add: word_size n_msgRegisters_def)[1] + apply (clarsimp simp: handleArchFaultReply_def asUser_getRegister_discarded + bind_subst_lift [OF stateAssert_stateAssert] + pred_conj_def) + apply (rule bind_apply_cong [OF refl], rename_tac sb s'') + apply (rule bind_apply_cong [OF refl], rename_tac rv r'') + apply (case_tac sb, simp_all add: word_size n_msgRegisters_def)[1] + apply (clarsimp simp: handleArchFaultReply_def asUser_getRegister_discarded + bind_subst_lift [OF stateAssert_stateAssert] + pred_conj_def) + apply (rule bind_apply_cong [OF refl], rename_tac sb s'') + apply (rule bind_apply_cong [OF refl], rename_tac rv r'') + apply (case_tac sb, simp_all add: word_size n_msgRegisters_def)[1] + done + +lemmas lookup_uset_getreg_swap = bind_inv_inv_comm[OF lookupIPCBuffer_inv + user_getreg_inv' + empty_fail_lookupIPCBuffer + empty_fail_asUser[OF empty_fail_getRegister]] + +end + +lemma mapM_x_zip_take_Cons_append: + "n = 0 \ zs = [] + \ mapM_x f (zip (x # xs) (take n (y # ys) @ zs)) + = do + when (n > 0) (f (x, y)); + mapM_x f (zip xs (take (n - 1) ys @ zs)) + od" + by (cases n, simp_all add: mapM_x_Cons) + +lemma threadGet_lookupIPCBuffer_comm: + "do + a \ lookupIPCBuffer x y; + t \ threadGet id r; + c a t + od = do + t \ threadGet id r; + a \ lookupIPCBuffer x y; + c a t + od" + by (rule bind_inv_inv_comm; wp?; auto) + +lemma getSanitiseRegisterInfo_lookupIPCBuffer_comm: + "do + a \ lookupIPCBuffer x y; + t \ getSanitiseRegisterInfo r; + c a t + od = do + t \ getSanitiseRegisterInfo r; + a \ lookupIPCBuffer x y; + c a t + od" + by (rule bind_inv_inv_comm; wp?; auto) + +lemma threadGet_moreMapM_comm: + "do + a \ + case sb of None \ return [] + | Some bufferPtr \ return (xs bufferPtr) >>= mapM loadWordUser; + t \ threadGet id r; + c a t + od = do + t \ threadGet id r; + a \ + case sb of None \ return [] + | Some bufferPtr \ return (xs bufferPtr) >>= mapM loadWordUser; + c a t + od" + apply (rule bind_inv_inv_comm) + apply (rule hoare_pre, wpc; (wp mapM_wp')?) + apply simp + apply wp + apply (auto split: option.splits) + done + +lemma getSanitiseRegisterInfo_moreMapM_comm: + "do + a \ + case sb of None \ return [] + | Some bufferPtr \ return (xs bufferPtr) >>= mapM loadWordUser; + t \ getSanitiseRegisterInfo r; + c a t + od = do + t \ getSanitiseRegisterInfo r; + a \ + case sb of None \ return [] + | Some bufferPtr \ return (xs bufferPtr) >>= mapM loadWordUser; + c a t + od" + apply (rule bind_inv_inv_comm) + apply (rule hoare_pre, wpc; (wp mapM_wp')?) + apply simp + apply wp + apply (auto split: option.splits) + done + +lemma monadic_rewrite_threadGet_return: + "monadic_rewrite True False (tcb_at' r) (return x) (do t \ threadGet f r; return x od)" + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) + done + +context begin interpretation Arch . + +lemma no_fail_getSanitiseRegisterInfo[wp, simp]: + "no_fail (tcb_at' r) (getSanitiseRegisterInfo r)" + apply (simp add: getSanitiseRegisterInfo_def) + by wpsimp + +end + + +lemma monadic_rewrite_getSanitiseRegisterInfo_return: + "monadic_rewrite True False (tcb_at' r) (return x) (do t \ getSanitiseRegisterInfo r; return x od)" + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) + done + +lemma monadic_rewrite_getSanitiseRegisterInfo_drop: + "monadic_rewrite True False (tcb_at' r) (d) (do t \ getSanitiseRegisterInfo r; d od)" + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) + done + +context kernel_m begin interpretation Arch . + +lemma threadGet_discarded: + "(threadGet f t >>= (\_. n)) = stateAssert (tcb_at' t) [] >>= (\_. n)" + apply (simp add: threadGet_def getObject_get_assert liftM_def bind_assoc stateAssert_def) + apply (rule ext) + apply (simp add: bind_def simpler_gets_def get_def) + done + +lemma handleFaultReply': + notes option.case_cong_weak [cong] wordSize_def'[simp] take_append[simp del] prod.case_cong_weak[cong] + assumes neq: "s \ r" + shows "monadic_rewrite True False (tcb_at' s and tcb_at' r) (do + tag \ getMessageInfo s; + sb \ lookupIPCBuffer False s; + msg \ getMRs s sb tag; + handleFaultReply f r (msgLabel tag) msg + od) (handleFaultReply' f s r)" + supply empty_fail_cond[simp] + supply if_cong[cong] + supply empty_fail_asUser[wp] empty_fail_getRegister[wp] + + apply (unfold handleFaultReply'_def getMRs_def msgMaxLength_def + bit_def msgLengthBits_def msgRegisters_unfold + fromIntegral_simp1 fromIntegral_simp2 + shiftL_word) + apply (simp add: bind_assoc) + apply (rule monadic_rewrite_bind_tail) + apply (clarsimp simp: mapM_def sequence_def bind_assoc asUser_bind_distrib + asUser_return submonad_asUser.fn_stateAssert) + apply (case_tac f, simp_all add: handleFaultReply_def zipWithM_x_mapM_x zip_take) + (* UserException *) + apply (clarsimp simp: handleFaultReply_def zipWithM_x_mapM_x + zip_Cons AARCH64_H.exceptionMessage_def + AARCH64.exceptionMessage_def + mapM_x_Cons mapM_x_Nil) + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) + apply (case_tac sb; (case_tac "msgLength tag < scast n_msgRegisters", + (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm + asUser_getRegister_getSanitiseRegisterInfo_comm + asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm + asUser_comm[OF neq] asUser_getRegister_threadGet_comm + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp)+)+)) + apply wp+ + (* capFault *) + apply (repeat 5 \rule monadic_rewrite_symb_exec_l\) (* until case sb *) + apply (case_tac sb) + apply (clarsimp + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] + empty_fail_loadWordUser)+ + (* UnknownSyscallException *) + apply (simp add: zip_append2 mapM_x_append asUser_bind_distrib split_def bind_assoc) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_trans[rotated]) + apply (rule monadic_rewrite_do_flip) + apply (rule monadic_rewrite_bind_tail) + apply (rule_tac P="inj (case_bool s r)" in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) + apply (rule isolate_thread_actions_rewrite_bind + bool.simps setRegister_simple + zipWithM_setRegister_simple + thread_actions_isolatable_bind lookupIPCBuffer_isolatable + lookupIPCBuffer_isolatable[THEN thread_actions_isolatableD] + copy_registers_isolate_general thread_actions_isolatable_return + thread_actions_isolatable_return[THEN thread_actions_isolatableD] + | assumption + | wp assert_inv)+ + apply (rule monadic_rewrite_isolate_final[where P="\"]) + apply simp+ + apply wp + (* swap ends *) + apply (clarsimp simp: handleFaultReply_def zipWithM_x_mapM_x + zip_Cons syscallMessage_unfold + n_syscallMessage_def + upto_enum_word mapM_x_Cons mapM_x_Nil) + apply (simp add: getSanitiseRegisterInfo_moreMapM_comm asUser_getRegister_getSanitiseRegisterInfo_comm getSanitiseRegisterInfo_lookupIPCBuffer_comm) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) + apply (case_tac sb) + apply (case_tac "msgLength tag < scast n_msgRegisters") + apply (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + | wp asUser_typ_ats)+)+ + apply (case_tac "msgLength tag < scast n_msgRegisters") + apply (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + zipWithM_x_Nil + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + | wp asUser_typ_ats mapM_wp')+)+ + apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def + linorder_not_less syscallMessage_unfold) + apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, + OF order_less_le_trans, rotated])+ + apply (subgoal_tac "\n :: machine_word. n \ scast n_syscallMessage \ [n .e. msgMaxLength] + = [n .e. scast n_syscallMessage] + @ [scast n_syscallMessage + 1 .e. msgMaxLength]") + apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: machine_word"] + upto_enum_word[where y="scast n_syscallMessage + 1 :: machine_word"]) + apply (clarsimp simp: bind_assoc asUser_bind_distrib asUser_getRegister_threadGet_comm + mapM_x_Cons mapM_x_Nil threadGet_discarded + asUser_comm [OF neq] asUser_getRegister_discarded + submonad_asUser.fn_stateAssert take_zip + bind_subst_lift [OF submonad_asUser.stateAssert_fn] + word_less_nat_alt AARCH64_H.sanitiseRegister_def + split_def n_msgRegisters_def msgMaxLength_def + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_size msgLengthBits_def n_syscallMessage_def Let_def + split del: if_split + cong: if_weak_cong register.case_cong) + + + apply (rule monadic_rewrite_bind_tail)+ + apply (subst (2) upto_enum_word) + apply (case_tac "ma < unat n_syscallMessage - 4") + + apply (erule disjE[OF nat_less_cases'], + ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib + mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_loadWordUser_comm loadWordUser_discarded asUser_return + zip_take_triv2 msgMaxLength_def + no_fail_stateAssert + cong: if_weak_cong + | simp + | rule monadic_rewrite_bind_tail + monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + monadic_rewrite_getSanitiseRegisterInfo_drop + | wp asUser_typ_ats empty_fail_loadWordUser)+)+ + apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) + apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: machine_word))" + and k="Suc msgMaxLength" in upt_add_eq_append') + apply (simp add: n_syscallMessage_def) + apply (simp add: n_syscallMessage_def msgMaxLength_unfold) + apply (simp add: n_syscallMessage_def msgMaxLength_def + msgLengthBits_def shiftL_nat + del: upt.simps upt_rec_numeral) + apply (simp add: upto_enum_word cong: if_weak_cong) + apply wp+ + (* ArchFault *) + apply (simp add: neq inj_case_bool split: bool.split) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_is_refl) + apply (rule ext) + apply (unfold handleArchFaultReply'[symmetric] getMRs_def msgMaxLength_def + bit_def msgLengthBits_def msgRegisters_unfold + fromIntegral_simp1 fromIntegral_simp2 shiftL_word) + apply (clarsimp simp: mapM_def sequence_def bind_assoc asUser_bind_distrib + asUser_return submonad_asUser.fn_stateAssert) + apply wpsimp+ + done + +end + +context kernel_m +begin + +(* FIXME: move *) +lemma ccorres_merge_return: + "ccorres (r \ f) xf P P' hs H C \ + ccorres r xf P P' hs (do x \ H; return (f x) od) C" + by (rule ccorres_return_into_rel) + +(* FIXME: move *) +lemma ccorres_break: + assumes r: "\s s'. \ (s,s') \ rf_sr; P s; s' \ P' \ \ r (Inl e) (xf s')" + assumes xf: "\s. xf (global_exn_var_'_update (\_. Break) s) = xf s" + shows "ccorres r xf P P' (catchbrk_C#hs) (throwError e) break_C" + apply (simp add: throwError_def cbreak_def) + apply (clarsimp simp: ccorres_underlying_def return_def split: xstate.splits) + apply (frule (2) r) + apply (rule conjI) + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply (clarsimp simp: unif_rrel_def xf) + apply clarsimp + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply (rule conjI, clarsimp) + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply ((erule exec_elim_cases, simp_all)[1])+ + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_handlers.cases, simp_all)[1] + apply (auto elim: exec_elim_cases)[3] + apply (rule conjI, clarsimp) + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply ((erule exec_elim_cases, simp_all)[1])+ + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_handlers.cases, simp_all)[1] + apply (auto elim: exec_elim_cases)[3] + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply ((erule exec_elim_cases, simp_all)[1])+ + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_handlers.cases, simp_all)[1] + apply (auto elim!: exec_elim_cases) + done + +(* FIXME: move *) +lemma ccorres_break_return: + assumes r: "\s s'. \ (s,s') \ rf_sr; P s; s' \ P' \ \ r n (xf s')" + assumes xf: "\s. xf (global_exn_var_'_update (\_. Break) s) = xf s" + shows "ccorres r xf P P' (catchbrk_C#hs) (return n) break_C" + apply (simp add: throwError_def cbreak_def) + apply (clarsimp simp: ccorres_underlying_def return_def split: xstate.splits) + apply (frule (2) r) + apply (rule conjI) + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_elim_cases, simp_all)[1] + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply (clarsimp simp: unif_rrel_def xf) + apply clarsimp + apply clarsimp + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply (erule exec_elim_cases, simp_all)[1] + apply (rule conjI, clarsimp) + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply ((erule exec_elim_cases, simp_all)[1])+ + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_handlers.cases, simp_all)[1] + apply (auto elim: exec_elim_cases)[3] + apply (rule conjI, clarsimp) + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply ((erule exec_elim_cases, simp_all)[1])+ + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_handlers.cases, simp_all)[1] + apply (auto elim: exec_elim_cases)[3] + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply clarsimp + apply ((erule exec_elim_cases, simp_all)[1])+ + apply (clarsimp simp: catchbrk_C_def) + apply (erule exec_handlers.cases, simp_all)[1] + apply (auto elim!: exec_elim_cases) + done + +lemma messageInfoFromWord_spec: + "\s. \ \ {s} Call messageInfoFromWord_'proc {t. seL4_MessageInfo_lift (ret__struct_seL4_MessageInfo_C_' t) = + \label_CL = (w_' s >> 12) && 0xFFFFFFFFFFFFF, capsUnwrapped_CL = (w_' s >> 9) && 7, + extraCaps_CL = (w_' s >> 7) && 3, length_CL = let v = w_' s && 0x7F in if v > msgMaxLength then msgMaxLength else v\}" + apply vcg + apply (simp add: seL4_MessageInfo_lift_def Let_def msgMaxLength_def mask_def word_sle_def + word_sless_def seL4_MsgMaxLength_def + split: if_split) + done + +(* FIXME x64: msgLabelBits change *) +lemma messageInfoFromWord_ccorres [corres]: + "ccorres (\r r'. r = message_info_to_H r') ret__struct_seL4_MessageInfo_C_' \ {s. w_' s = w} [] + (return (messageInfoFromWord w)) (Call messageInfoFromWord_'proc)" + apply (rule ccorres_from_spec_modifies [where P = \, simplified]) + apply (rule messageInfoFromWord_spec) + apply (rule messageInfoFromWord_modifies) + apply simp + apply simp + apply (simp add: return_def messageInfoFromWord_def Let_def message_info_to_H_def + msgLengthBits_def Types_H.msgExtraCapBits_def msgMaxExtraCaps_def + shiftL_nat msgMaxLength_def msgLabelBits_def) + done + +lemma getMessageInfo_ccorres: + "ccorres (\r r'. r = message_info_to_H r') ret__struct_seL4_MessageInfo_C_' + (tcb_at' sender) UNIV hs (getMessageInfo sender) + (\ret__unsigned_long :== CALL getRegister(tcb_ptr_to_ctcb_ptr sender,scast Kernel_C.msgInfoRegister);; + \ret__struct_seL4_MessageInfo_C :== CALL messageInfoFromWord(\ret__unsigned_long))" + unfolding getMessageInfo_def + apply simp + apply (rule ccorres_guard_imp2) + apply ctac + apply ctac + apply wp + apply vcg + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps AARCH64_H.msgInfoRegister_def AARCH64.msgInfoRegister_def + C_register_defs + dest!: c_guard_clift) + done + +lemma getMessageInfo_ccorres': + "ccorres (\r r'. r = message_info_to_H r') tag_' + (tcb_at' sender) UNIV hs (getMessageInfo sender) + (\ret__unsigned_long :== CALL getRegister(tcb_ptr_to_ctcb_ptr sender,scast Kernel_C.msgInfoRegister);; + \tag :== CALL messageInfoFromWord(\ret__unsigned_long))" + unfolding getMessageInfo_def + apply simp + apply (rule ccorres_guard_imp2) + apply ctac + apply ctac + apply wp + apply vcg + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps AARCH64_H.msgInfoRegister_def AARCH64.msgInfoRegister_def + C_register_defs + dest!: c_guard_clift) + done + +lemma replyFromKernel_success_empty_ccorres [corres]: + "ccorres dc xfdc \ (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\) hs + (replyFromKernel thread (0, [])) + (Call replyFromKernel_success_empty_'proc)" + apply (subst replyFromKernel_success_empty) + apply (cinit lift: thread_') + apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_lookupIPCBuffer]) + apply (ctac add: setRegister_ccorres) + apply (unfold setMessageInfo_def) + apply csymbr + apply ctac + apply (simp only: fun_app_def) + apply (ctac add: setRegister_ccorres) + apply wp + apply vcg + apply wp + apply vcg + apply wp+ + apply (simp add: AARCH64_H.msgInfoRegister_def AARCH64.msgInfoRegister_def + AARCH64.capRegister_def + AARCH64_H.badgeRegister_def AARCH64.badgeRegister_def + message_info_to_H_def C_register_defs) + done + +lemma msgRegisters_offset_conv: + "\offset i. \ offset + i < length AARCH64_H.msgRegisters \ \ + index msgRegistersC (unat ((of_nat offset :: machine_word) + of_nat i)) = + register_from_H (AARCH64_H.msgRegisters ! (offset + i))" + apply (simp add: msgRegistersC_def msgRegisters_unfold fupdate_def) + apply (subst of_nat_add [symmetric]) + apply (case_tac "offset + i", simp_all del: of_nat_add) + apply (case_tac nat, simp, rename_tac nat, simp)+ + done + +lemmas ccorres_pre_stateAssert = + ccorres_symb_exec_l [OF _ stateAssert_inv stateAssert_wp + empty_fail_stateAssert] + +declare setRegister_ccorres[corres] + +lemma setMR_ccorres: + notes if_cong[cong] + notes unat_of_nat32 = unat_of_nat_eq[where 'a=32, unfolded word_bits_len_of] + shows + "ccorres (\r r'. r = unat (r' && mask msgLengthBits)) ret__unsigned_' + (valid_pspace' and case_option \ valid_ipc_buffer_ptr' buf + and (\s. offset < msgMaxLength)) + (UNIV \ {s. offset_' s = of_nat offset} \ {s. reg___unsigned_long_' s = v} + \ {s. receiver_' s = tcb_ptr_to_ctcb_ptr thread} + \ {s. receiveIPCBuffer_' s = option_to_ptr buf}) [] + (setMR thread buf offset v) (Call setMR_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: offset_' reg___unsigned_long_' receiver_' receiveIPCBuffer_') + apply (rule ccorres_cond2'[where R=\]) + apply (clarsimp simp add: msgRegisters_unfold n_msgRegisters_def Collect_const_mem + linorder_not_less word_le_nat_alt unat_of_nat32 + word_bits_def msgMaxLength_unfold) + apply arith + apply wpc + apply (simp add: option_to_ptr_def option_to_0_def Collect_False + ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_return_C, simp+)[1] + apply (simp add: option_to_ptr_def option_to_0_def Collect_True + ccorres_cond_iffs + del: Collect_const ptr_add_def') + apply (rule ccorres_cond_true) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (rule storeWordUser_ccorres) + apply ceqv + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) + apply (simp add: msgLengthBits_def msgMaxLength_def + unat_arith_simps less_mask_eq unat_of_nat + del: Collect_const) + apply ctac + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (simp del: Collect_const) + apply (vcg exspec=setRegister_modifies) + supply Word.of_int_uint[simp del] + apply (simp add: Collect_const_mem option_to_0_def + unat_gt_0 option_to_ptr_def) + apply (intro impI conjI allI; simp?) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (simp only: word_size_def is_aligned_mult_triv2[where n=3, simplified]) + apply (simp add: msg_align_bits_def word_size_bits_def) + apply (simp add: n_msgRegisters_def length_msgRegisters msgLengthBits_def mask_def) + apply (simp add: msg_align_bits word_size_def msgMaxLength_def unat_of_nat + length_msgRegisters n_msgRegisters_def uint_nat unat_word_ariths) + apply (simp add: unat_word_ariths msg_align_bits msgMaxLength_def + word_less_nat_alt unat_of_nat) + apply (simp add: unat_word_ariths msg_align_bits msgMaxLength_def + word_less_nat_alt unat_of_nat) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def) + apply (simp add: unat_of_nat32 word_bits_def msgMaxLength_unfold + word_le_nat_alt msgRegisters_ccorres n_msgRegisters_def) + apply (simp add: unat_of_nat32 msgMaxLength_unfold word_bits_def + unat_add_lem[THEN iffD1] less_mask_eq msgLengthBits_def + word_less_nat_alt) + apply (simp add: linorder_not_le n_msgRegisters_def) + done + +lemma setMR_ccorres_dc: + "ccorres dc xfdc + (valid_pspace' and case_option \ valid_ipc_buffer_ptr' buf + and (\s. offset < msgMaxLength)) + (UNIV \ {s. offset_' s = of_nat offset} \ {s. reg___unsigned_long_' s = v} + \ {s. receiver_' s = tcb_ptr_to_ctcb_ptr thread} + \ {s. receiveIPCBuffer_' s = option_to_ptr buf}) [] + (setMR thread buf offset v) (Call setMR_'proc)" + by (rule ccorres_rel_imp, rule setMR_ccorres, simp) + +end + +(* FIXME: move *) +context begin interpretation Arch . (*FIXME: arch_split*) +crunch valid_pspace'[wp]: setMR "valid_pspace'" +crunch valid_ipc_buffer_ptr'[wp]: setMR "valid_ipc_buffer_ptr' p" +end + +context kernel_m begin + +lemma setMRs_lookup_failure_ccorres: + notes unat_of_nat32 = unat_of_nat_eq[where 'a=32, unfolded word_bits_len_of] + shows + "ccorres (\r r'. r \ [] \ last r = unat (r' && mask msgLengthBits)) + ret__unsigned_' + (valid_pspace' + and (case buf of None \ \ | Some x \ valid_ipc_buffer_ptr' x) + and (\_. n + length (msgFromLookupFailure lf) < msgMaxLength)) + (UNIV \ \\receiver = tcb_ptr_to_ctcb_ptr thread\ + \ \\receiveIPCBuffer = option_to_ptr buf\ + \ \map_option lookup_fault_to_H + (lookup_fault_lift \luf) = Some lf\ + \ \n = unat \offset\) hs + (mapM (\(x, y). setMR thread buf x y) + (zip [n ..< msgMaxLength] (msgFromLookupFailure lf))) + (Call setMRs_lookup_failure_'proc)" + apply (rule ccorres_gen_asm)+ + apply (cinit' lift: receiver_' receiveIPCBuffer_' luf_' offset_') + apply csymbr + apply (rule_tac P="valid_pspace' + and (case buf of None \ \ | Some x \ valid_ipc_buffer_ptr' x)" and P'=UNIV + in ccorres_inst) + apply (clarsimp simp: msgFromLookupFailure_def lookup_fault_lift_def + Let_def zip_upt_Cons msgMaxLength_unfold + mapM_Cons mapM_Nil bind_assoc + simp del: Collect_const + split: if_split_asm) + apply (rule ccorres_guard_imp2) + apply csymbr + apply csymbr + apply (ctac add: setMR_ccorres) + apply csymbr + apply (ccorres_rewrite) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (simp del: Collect_const) + apply (vcg exspec=setMR_modifies) + apply (clarsimp simp: msgMaxLength_unfold Collect_const_mem) + apply (simp add: lookup_fault_tag_defs) + apply (rule ccorres_guard_imp2) + apply csymbr + apply csymbr + apply (ctac add: setMR_ccorres) + apply csymbr + apply (simp add: ccorres_cond_iffs) + apply (ccorres_rewrite) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (ctac add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply simp + apply (vcg exspec=setMR_modifies) + apply (wp hoare_case_option_wp) + apply (simp add: Collect_const_mem) + apply (vcg exspec=setMR_modifies) + apply (clarsimp simp: msgMaxLength_unfold Collect_const_mem) + apply (simp add: lookup_fault_missing_capability_lift_def + lookup_fault_lift_missing_capability) + apply (simp add: lookup_fault_tag_defs) + apply (rule ccorres_guard_imp2) + apply csymbr + apply csymbr + apply (ctac add: setMR_ccorres) + apply (simp add: ccorres_cond_iffs) + apply csymbr + apply (ccorres_rewrite) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (ctac add: setMR_ccorres_dc) + apply csymbr + apply (ctac add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply simp + apply (vcg exspec=setMR_modifies) + apply (wp hoare_case_option_wp) + apply (simp add: Collect_const_mem) + apply (vcg exspec=setMR_modifies) + apply (wp hoare_case_option_wp) + apply (simp add: Collect_const_mem) + apply (vcg exspec=setMR_modifies) + apply (clarsimp simp: msgMaxLength_unfold Collect_const_mem) + apply (simp add: lookup_fault_depth_mismatch_lift_def + lookup_fault_lift_depth_mismatch) + apply (simp add: lookup_fault_tag_defs) + apply (rule ccorres_guard_imp2) + apply csymbr + apply csymbr + apply (ctac add: setMR_ccorres) + apply csymbr + apply (simp add: ccorres_cond_iffs) + apply (ccorres_rewrite) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (ctac add: setMR_ccorres_dc) + apply csymbr + apply (ctac add: setMR_ccorres_dc) + apply csymbr + apply (ctac add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply (wp hoare_case_option_wp + | (simp add: Collect_const_mem, vcg exspec=setMR_modifies))+ + apply (clarsimp simp: msgMaxLength_unfold Collect_const_mem) + apply (simp add: lookup_fault_guard_mismatch_lift_def + lookup_fault_lift_guard_mismatch) + apply (simp add: lookup_fault_tag_defs) + apply simp + done + +lemma setMRs_syscall_error_ccorres: + "ccorres (\r r'. r = r' && mask msgLengthBits) ret__unsigned_long_' + (valid_pspace' + and (case buf of None \ \ | Some x \ valid_ipc_buffer_ptr' x) + and (\_. msg = snd (msgFromSyscallError err))) + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ + \ \\receiveIPCBuffer = option_to_ptr buf\ + \ \syscall_error_to_H \current_syscall_error + (lookup_fault_lift \current_lookup_fault) + = Some err\) hs + (setMRs thread buf msg) + (Call setMRs_syscall_error_'proc)" + (is "ccorres ?r ret__unsigned_long_' ?P ?P' hs ?a ?c") + apply (rule ccorres_gen_asm) + apply (cinit') + apply (rule_tac xf' = "\s. current_syscall_error_' (globals s)" + in ccorres_abstract) + apply (rule ceqv_rules, rule rewrite_xfI, simp, rule ceqv_refl)+ + apply (rule ceqv_refl) + apply (rename_tac err') + apply (rule_tac xf' = "\s. current_lookup_fault_' (globals s)" + in ccorres_abstract) + apply (rule ceqv_rules, rule rewrite_xfI, simp, rule ceqv_refl)+ + apply (rule ceqv_refl) + apply (rename_tac luf') + apply (rule_tac P="Some err = syscall_error_to_H err' (lookup_fault_lift luf')" + in ccorres_gen_asm2) + apply (rule_tac A="?P" and A'="?P'" in ccorres_guard_imp2) + apply (simp add: setMRs_to_setMR del: Collect_const) + apply (rule ccorres_stateAssert) + apply (rule ccorres_Cond_rhs[rotated])+ + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply simp + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (simp add: syscall_error_to_H_def) + apply (simp_all add: syscall_error_to_H_def msgFromSyscallError_def + zipWithM_mapM mapM_Nil mapM_Cons + msgMaxLength_unfold zip_upt_Cons bind_assoc) + apply (ctac add:setMR_ccorres) + apply (rule ccorres_return_C,simp+)[1] + apply (wp | (simp add: Collect_const_mem, + vcg exspec=setMR_modifies exspec=setMRs_lookup_failure_modifies))+ + apply (subgoal_tac "msg = []") + apply (simp add: zipWithM_mapM mapM_Nil) + apply (rule ccorres_return_C, simp+)[1] + apply (simp split: if_split_asm) + apply (subgoal_tac "err = FailedLookup (to_bool (failedLookupWasSource_C err')) + (lookup_fault_to_H (the (lookup_fault_lift luf')))") + apply (simp add: zip_upt_Cons mapM_Cons bind_assoc) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setMR_ccorres_dc) + apply (ctac add: setMRs_lookup_failure_ccorres[unfolded msgMaxLength_unfold]) + apply (rule ccorres_return_C, simp+)[1] + apply (wp hoare_case_option_wp + | (simp add: Collect_const_mem, + vcg exspec=setMR_modifies exspec=setMRs_lookup_failure_modifies))+ + apply (clarsimp simp: map_option_Some_eq2) + apply (rule ccorres_return_C, simp+)[1] + apply (rule ccorres_rhs_assoc + | (rule ccorres_inst, ctac add: setMR_ccorres_dc) + | (rule ccorres_inst, ctac add: setMR_ccorres) + | (rule ccorres_return_C, simp+)[1] + | wp hoare_case_option_wp + | (simp del: Collect_const, vcg exspec=setMR_modifies) + )+ + apply (simp add: msgMaxLength_unfold) + apply (clarsimp split:if_split_asm simp:syscall_error_to_H_def map_option_Some_eq2 ucast_and_mask ucast_nat_def) + apply (simp add: msgFromLookupFailure_def + split: lookup_failure.split + | simp add: to_bool_def split: if_split)+ + done + +lemma lookupIPCBuffer_aligned_option_to_0: + "\valid_objs'\ lookupIPCBuffer b s \\rv s. is_aligned (option_to_0 rv) msg_align_bits\" + apply (rule hoare_strengthen_post, rule lookupIPCBuffer_valid_ipc_buffer) + apply (simp add: option_to_0_def valid_ipc_buffer_ptr'_def split: option.split_asm) + done + +lemma Cond_if_mem: + "(Cond (if P then UNIV else {})) = (Cond {s. P})" + by simp + +lemma copyMRs_register_loop_helper: + fixes n + defines regs: "regs \ take (unat n) AARCH64_H.msgRegisters" + shows + "\i. i + ccorres dc xfdc \ \\i = of_nat i\ hs + (do v \ asUser sender (getRegister (regs ! i)); + asUser receiver (setRegister (regs ! i) v) + od) + (Guard ArrayBounds \\i < 4\ + (\ret__unsigned_long :== CALL getRegister(tcb_ptr_to_ctcb_ptr sender, + ucast (index msgRegistersC (unat \i))));; + Guard ArrayBounds \\i < 4\ + (CALL setRegister(tcb_ptr_to_ctcb_ptr receiver, + ucast (index msgRegistersC (unat \i)), + \ret__unsigned_long)))" + apply clarsimp + apply (rule ccorres_guard_imp) + apply ctac + apply ctac + apply wp + apply vcg + apply simp + apply (clarsimp simp: regs msgRegistersC_def msgRegisters_unfold) + apply (simp | + (case_tac i, + clarsimp simp: fupdate_def index_update index_update2 C_register_defs, + rename_tac i))+ + done + + +(* FIXME move *) +lemma copyMRs_ccorres [corres]: +notes + wordSize_def'[simp] +shows + "ccorres (\r r'. r = r' && mask msgLengthBits) ret__unsigned_long_' + (valid_pspace' and tcb_at' sender and tcb_at' receiver + and (case sendBuffer of None \ \ | Some x \ valid_ipc_buffer_ptr' x) + and (case recvBuffer of None \ \ | Some x \ valid_ipc_buffer_ptr' x) + and K (sendBuffer \ Some 0) and K (recvBuffer \ Some 0) + and K (unat n \ msgMaxLength)) + (UNIV \ \\n = \n && mask msgLengthBits \ \n = n\ + \ \\sendBuf = Ptr (option_to_0 sendBuffer)\ + \ \\recvBuf = Ptr (option_to_0 recvBuffer)\ + \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\) [] + (copyMRs sender sendBuffer receiver recvBuffer n) + (Call copyMRs_'proc)" + apply (unfold K_def, intro ccorres_gen_asm) + apply (cinit lift: n_' sendBuf_' recvBuf_' sender_' receiver_' + simp: whileAnno_def) + apply (simp only: mapM_discarded) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac P = "length (take (unat n) AARCH64_H.msgRegisters) < + 2 ^ word_bits" + in ccorres_gen_asm) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_mapM_x_while [OF copyMRs_register_loop_helper[unfolded ucast_id]]) + apply (clarsimp simp: n_msgRegisters_def + length_msgRegisters min_def + split: if_split) + apply unat_arith + apply vcg + apply simp + apply wp + apply assumption + apply ceqv + apply (wpc, simp add: option_to_0_def) + apply (rule ccorres_split_throws, rule ccorres_return_C, simp+) + apply vcg + apply (wpc, simp_all add: option_to_0_def)[1] + apply (rule ccorres_split_throws, rule ccorres_return_C, simp+) + apply vcg + apply (subst mapM_only_length) + apply (rule_tac P="unat n \ length AARCH64_H.msgRegisters" in ccorres_cases) + apply (simp add: upto_enum_def length_msgRegisters n_msgRegisters_def + mapM_x_Nil) + apply (rule ccorres_expand_while_iff_Seq[THEN iffD1]) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_C, simp+) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac i="length AARCH64_H.msgRegisters" + and F="\_. valid_ipc_buffer_ptr' (the sendBuffer) + and valid_ipc_buffer_ptr' (the recvBuffer) + and valid_pspace'" + in ccorres_mapM_x_while') + apply clarsimp + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_loadWordUser) + apply (unfold storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (unfold K_bind_def) + apply (rule ccorres_move_array_assertion_ipc_buffer + ccorres_Guard[where S="{s. h_t_valid (htd s) c_guard (ptr s)}" for ptr htd])+ + apply (ctac add: storeWord_ccorres) + apply (clarsimp simp: word_size valid_ipc_buffer_ptr'_def + msg_align_bits + aligned_add_aligned[OF _ is_aligned_mult_triv2[where n=3, simplified]]) + apply (clarsimp simp: msgRegisters_unfold upto_enum_word word_size + pointerInUserData_h_t_valid pointerInUserData_c_guard + typ_heap_simps' + split: if_split_asm simp del: upt.simps) + apply (simp only: unat_arith_simps unat_of_nat msg_align_bits + msgMaxLength_def, simp split: if_split) + apply arith + apply (simp add: n_msgRegisters_def length_msgRegisters) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule hoare_pre, wpsimp wp: hoare_valid_ipc_buffer_ptr_typ_at') + apply clarsimp + apply (simp add: length_msgRegisters n_msgRegisters_def msgMaxLength_def + word_bits_def) + apply ceqv + apply simp + apply (rule ccorres_return_C, simp_all)[1] + apply wp + apply clarsimp + apply (clarsimp simp: guard_is_UNIV_def upto_enum_def + min_def word_le_nat_alt + length_msgRegisters n_msgRegisters_def + msgLengthBits_def mask_def + linorder_not_le) + apply simp + apply (wp mapM_x_wp' hoare_vcg_all_lift hoare_vcg_const_imp_lift | simp)+ + apply (clarsimp simp: guard_is_UNIV_def + length_msgRegisters n_msgRegisters_def mask_def + Types_H.msgLengthBits_def min_def word_le_nat_alt + split: if_split) + apply unat_arith + apply (clarsimp simp: length_msgRegisters n_msgRegisters_def + msgLengthBits_def min_def word_bits_def) + apply (auto split: if_split) + done + +lemma getRestartPC_ccorres [corres]: + "ccorres (=) ret__unsigned_long_' \ + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\) hs + (asUser thread (getRegister register.FaultIP)) + (Call getRestartPC_'proc)" + apply (cinit' lift: thread_') + apply (rule ccorres_trim_return, simp, simp) + apply ctac + apply clarsimp + done + +lemma asUser_tcbFault_obj_at: + "asUser t' m \obj_at' (\tcb. P (tcbFault tcb)) t\" + apply (simp add: asUser_def split_def) + apply (wp threadGet_wp) + apply (simp cong: if_cong) + done + +lemma asUser_atcbContext_obj_at: + "t \ t' \ asUser t' m \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + apply (simp add: asUser_def split_def atcbContextGet_def atcbContextSet_def) + apply (wp threadGet_wp) + apply simp + done + +lemma asUser_tcbFault_inv: + "\\s. \t. ko_at' t p' s \ tcbFault t = f\ asUser p m + \\rv s. \t. ko_at' t p' s \ tcbFault t = f\" + apply (rule_tac Q="\rv. obj_at' (\t. tcbFault t = f) p'" + in hoare_strengthen_post) + apply (wp asUser_tcbFault_obj_at) + apply (clarsimp simp: obj_at'_def)+ + done + +lemma setMR_atcbContext_obj_at: + "t \ t' \ + \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ + setMR t' b r v + \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + apply (simp add: setMR_def) + apply (rule hoare_pre) + apply (wp asUser_atcbContext_obj_at[simplified] | simp | wpc)+ + done + +lemma setMR_tcbFault_obj_at: + "\obj_at' (\tcb. P (tcbFault tcb)) t\ setMR t' b r v + \\rv. obj_at' (\tcb. P (tcbFault tcb)) t\" + apply (simp add: setMR_def) + apply (rule hoare_pre) + apply (wp asUser_tcbFault_obj_at | wpc)+ + apply simp + done + +(* FIXME move to Corres_C and remove from Tcb_C *) +lemma ccorres_abstract_known: + "\ \rv' t t'. ceqv \ xf' rv' t t' g (g' rv'); ccorres rvr xf P P' hs f (g' val) \ + \ ccorres rvr xf P (P' \ {s. xf' s = val}) hs f g" + apply (rule ccorres_guard_imp2) + apply (rule_tac xf'=xf' in ccorres_abstract) + apply assumption + apply (rule_tac P="rv' = val" in ccorres_gen_asm2) + apply simp + apply simp + done + +lemma ccorres_add_getRegister: + "ccorres rv xf P P' hs (asUser t (getRegister r) >>= (\_. a)) c + \ ccorres rv xf (P and tcb_at' t) P' hs a c" + apply (simp add: asUser_getRegister_discarded) + apply (simp add: ccorres_underlying_def) + apply (elim ballEI | clarsimp del: allI)+ + apply (drule mp) + apply (simp add: stateAssert_def bind_assoc exec_get) + apply (elim allEI) + apply (clarsimp simp: in_monad stateAssert_def split: xstate.split_asm) + apply fastforce + done + +lemma exceptionMessage_ccorres: + "n < unat n_exceptionMessage + \ register_from_H (AARCH64_H.exceptionMessage ! n) + = index exceptionMessageC n" + apply (simp add: exceptionMessageC_def AARCH64_H.exceptionMessage_def + AARCH64.exceptionMessage_def MessageID_Exception_def) + by (simp add: Arrays.update_def n_exceptionMessage_def fcp_beta nth_Cons' + fupdate_def C_register_defs + split: if_split) (* long *) + +lemma asUser_obj_at_elsewhere: + "\obj_at' (P :: tcb \ bool) t' and (\_. t \ t')\ asUser t m \\rv. obj_at' P t'\" + apply (rule hoare_gen_asm') + apply (simp add: asUser_def split_def) + apply (wp threadGet_wp) + apply clarsimp + done + +lemma exceptionMessage_length_aux : + "\n. n < length AARCH64_H.exceptionMessage \ n < unat n_exceptionMessage" + by (simp add: AARCH64.exceptionMessage_def AARCH64_H.exceptionMessage_def n_exceptionMessage_def) + +lemma copyMRsFault_ccorres_exception: + "ccorres dc xfdc + (valid_pspace' + and obj_at' (\tcb. map (user_regs (atcbContext (tcbArch tcb))) exceptionMessage = msg) sender + and K (length msg = 3) \ \length AARCH64.exceptionMessage\ + and K (recvBuffer \ Some 0) + and K (sender \ receiver)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\receiveIPCBuffer = Ptr (option_to_0 recvBuffer)\ + \ \ \length___unsigned_long = 3 \ \ \length AARCH64.exceptionMessage\ + \ \ \id___anonymous_enum = MessageID_Exception \) + hs + (mapM_x (\(x, y). setMR receiver recvBuffer x y) (zip [0..<120] msg)) + (Call copyMRsFault_'proc)" + apply (unfold K_def) + apply (intro ccorres_gen_asm) + apply (cinit' lift: sender_' receiver_' receiveIPCBuffer_' + length___unsigned_long_' id___anonymous_enum_' + simp: whileAnno_def) + apply (simp only: mapM_x_append[where xs="take (unat n_msgRegisters) (zip as bs)" + and ys="drop (unat n_msgRegisters) (zip as bs)" + for as bs, simplified] bind_assoc) + apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) + + apply (rule_tac F="K $ obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) AARCH64_H.exceptionMessage = msg) sender" + in ccorres_mapM_x_while) + apply (clarsimp simp: n_msgRegisters_def) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="AARCH64_H.exceptionMessage ! n" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! n" in ccorres_cross_over_guard) + apply (simp add: setMR_def length_msgRegisters n_msgRegisters_def + liftM_def[symmetric]) + apply ctac + apply (wp user_getreg_rv) + apply (clarsimp simp: msgRegisters_ccorres n_msgRegisters_def + syscallMessage_ccorres n_syscallMessage_def + obj_at'_def projectKOs + atcbContextGet_def unat_of_nat64[unfolded word_bits_def]) + apply (clarsimp simp: exceptionMessage_ccorres[simplified,symmetric,OF exceptionMessage_length_aux]) + apply (clarsimp simp: word_of_nat_less MessageID_Exception_def) + apply (clarsimp simp: n_msgRegisters_def foo) + apply (rule allI, rule conseqPre, vcg exspec=setRegister_modifies exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def) + apply (rule hoare_pre) + apply (wp asUser_obj_at_elsewhere | wpc)+ + apply simp + apply (simp add: word_bits_def) + apply ceqv + apply (rule ccorres_Cond_rhs) + apply (simp del: Collect_const) + apply (simp add: n_msgRegisters_def mapM_x_Nil) + apply (subst ccorres_expand_while_iff[symmetric]) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip') + apply (simp add: n_msgRegisters_def mapM_x_Nil) + apply (rule ccorres_return_Skip') + apply wp + apply (simp add: guard_is_UNIV_def) + apply (clarsimp) + done + +lemma mapM_cong: "\ \x. elem x xs \ f x = g x \ \ mapM_x f xs = mapM_x g xs" + by (induction xs, (simp add: mapM_x_Nil mapM_x_Cons)+) + +lemma copyMRsFault_ccorres_syscall: + "ccorres dc xfdc + (valid_pspace' + and obj_at' (\tcb. map (user_regs (atcbContext (tcbArch tcb))) syscallMessage = msg) sender + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \) + and K (length msg = 12) \ \length AARCH64.syscallMessage\ + and K (recvBuffer \ Some 0) + and K (sender \ receiver)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\receiveIPCBuffer = Ptr (option_to_0 recvBuffer)\ + \ \\length___unsigned_long = 12 \ \ \length AARCH64.syscallMessage\ + \ \ \id___anonymous_enum = MessageID_Syscall \) + hs + (mapM_x (\(x, y). setMR receiver recvBuffer x y) (zip [0..<120] msg)) + (Call copyMRsFault_'proc)" +proof - + (* auxiliary lemmas *) + have option_to_0_imp : "\ option_to_0 recvBuffer= 0 ; recvBuffer \ Some 0 \ \ recvBuffer = None" + by (simp add: option_to_0_def; cases recvBuffer; simp+) + have drop_n: "\n m. drop n [0..x y n m. elem (x, y) (zip [n..<120] m) \ \ x < n " + by (simp | erule in_set_zipE)+ + have msg_aux: "\p. elem p (zip [4..<120] (drop 4 msg)) + \ (\(x1,y1). setMR receiver None x1 y1) p = (\_ . return (length msgRegisters)) p" + by (fastforce simp add: numeral_eqs setMR_def less_than_4 n_msgRegisters_def length_msgRegisters + take_bit_Suc + simp del: unsigned_numeral) + have mapM_x_return_gen: "\v w xs. mapM_x (\_. return v) xs = return w" (* FIXME mapM_x_return *) + by (induct_tac xs; simp add: mapM_x_Nil mapM_x_Cons) + show ?thesis + including no_pre + apply (unfold K_def) + apply (intro ccorres_gen_asm) + apply (cinit' lift: sender_' receiver_' receiveIPCBuffer_' + length___unsigned_long_' id___anonymous_enum_' + simp: whileAnno_def) + apply (simp only: mapM_x_append[where xs="take (unat n_msgRegisters) (zip as bs)" + and ys="drop (unat n_msgRegisters) (zip as bs)" + for as bs, simplified] bind_assoc) + apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) + apply (rule_tac F="K $ obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) AARCH64_H.syscallMessage = msg) sender" + in ccorres_mapM_x_while) + apply (clarsimp simp: n_msgRegisters_def) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="AARCH64_H.syscallMessage ! n" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! n" in ccorres_cross_over_guard) + apply (simp add: setMR_def length_msgRegisters n_msgRegisters_def + liftM_def[symmetric]) + apply ctac + apply (wp user_getreg_rv) + apply (clarsimp simp: msgRegisters_ccorres n_msgRegisters_def + syscallMessage_ccorres n_syscallMessage_def + obj_at'_def projectKOs + atcbContextGet_def unat_of_nat64[unfolded word_bits_def]) + apply (clarsimp simp: word_of_nat_less MessageID_Syscall_def) + apply (simp add: n_msgRegisters_def) + apply (rule allI, rule conseqPre, vcg exspec=setRegister_modifies exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def) + apply (rule hoare_pre) + apply (wp asUser_obj_at_elsewhere | wpc)+ + apply simp + apply (simp add: word_bits_def) + apply ceqv + apply (rule ccorres_Cond_rhs) + apply (simp del: Collect_const) + apply (rule ccorres_rel_imp[where r = dc, simplified]) + apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) AARCH64_H.syscallMessage = msg) + sender and valid_pspace' + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" + in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) + apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters + option_to_0_def liftM_def[symmetric] + split: option.split_asm) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="AARCH64_H.syscallMessage ! (n + unat n_msgRegisters)" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" + in ccorres_cross_over_guard) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (simp add: storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) + apply (simp add: pred_conj_def) + apply (wp user_getreg_rv) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + syscallMessage_ccorres msgRegisters_ccorres + unat_add_lem[THEN iffD1] unat_of_nat64 + word_bits_def word_size_def) + apply (simp only:field_simps imp_ex imp_conjL) + apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def + pointerInUserData_h_t_valid + atcbContextGet_def + projectKOs objBits_simps word_less_nat_alt + unat_add_lem[THEN iffD1] unat_of_nat) + apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def + MessageID_Syscall_def + msg_align_bits valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n=3]) + apply (simp add: is_aligned_def) + apply (rule is_aligned_mult_triv2 [where n=3, simplified]) + apply (simp add: wb_gt_2)+ + apply (simp add: n_msgRegisters_def) + apply (vcg exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) + apply (rule hoare_pre) + apply (wp hoare_case_option_wp | wpc)+ + apply clarsimp + apply (simp add: n_msgRegisters_def word_bits_def) + apply (simp add: n_msgRegisters_def) + apply (frule (1) option_to_0_imp) + apply (subst drop_zip) + apply (subst drop_n) + apply (clarsimp simp: n_msgRegisters_def numeral_eqs + mapM_cong[OF msg_aux, simplified numeral_eqs]) + apply (subst mapM_x_return_gen[where w2="()"]) + apply (rule ccorres_return_Skip) + apply (clarsimp) + apply (rule hoare_impI) + apply (wp mapM_x_wp_inv setMR_atcbContext_obj_at[simplified atcbContextGet_def, simplified] + | clarsimp + | wpc)+ + apply (wp hoare_case_option_wp) + apply (clarsimp simp: guard_is_UNIV_def n_msgRegisters_def msgLengthBits_def + mask_def)+ + done + qed + +lemma Arch_setMRs_fault_ccorres: + "ccorres (\r r'. r = r' && mask msgLengthBits) ret__unsigned_long_' + (valid_pspace' and obj_at' (\tcb. tcbFault tcb = Some ft) sender + and K (ft = ArchFault aft) + and (case buffer of Some x \ valid_ipc_buffer_ptr' x | None \ \) + and K (buffer \ Some 0) + and K (sender \ receiver)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\faultType = fault_to_fault_tag ft \ + \ \\receiveIPCBuffer = Ptr (option_to_0 buffer)\) hs + (makeArchFaultMessage aft sender >>= (\ms. setMRs receiver buffer (snd ms))) + (Call Arch_setMRs_fault_'proc)" +proof - + let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some ft) sender" + note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV + [where xf'=ret__unsigned_longlong_' and R="?obj_at_ft" and R'=UNIV] + show ?thesis + apply (unfold K_def) + apply (intro ccorres_gen_asm) + apply (cinit' lift: sender_' receiver_' faultType_' receiveIPCBuffer_') + apply (simp only: makeArchFaultMessage_def setMRs_to_setMR + del: Collect_const split del: if_split) + apply (rule_tac P="ft = ArchFault aft" in ccorres_gen_asm) + apply wpc + (* VMFault *) + apply (rename_tac list) + apply (rule_tac P="zip [Suc (Suc 0) ..< msgMaxLength] list = [(2, hd list), (3, hd (tl list))]" + in ccorres_gen_asm) + apply (simp add: Collect_True Collect_False ccorres_cond_iffs + zip_upt_Cons msgMaxLength_unfold + zipWithM_mapM mapM_Cons bind_assoc + seL4_Fault_tag_defs + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (ctac(no_vcg) add: getRestartPC_ccorres) + apply (rule ccorres_stateAssert) + apply (ctac(no_vcg) add: setMR_ccorres_dc) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="vmFaultAddress aft" in symb_exec_r_fault) + apply (rule conseqPre, vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def + seL4_Fault_VMFault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (ctac(no_vcg) add: setMR_ccorres_dc) + apply (rule_tac val="hd (vmFaultArchData aft)" in symb_exec_r_fault) + apply (rule conseqPre, vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps') + apply (clarsimp simp: ctcb_relation_def + cfault_rel_def seL4_Fault_lift_def + seL4_Fault_VMFault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (ctac(no_vcg) add: setMR_ccorres_dc) + apply (rule_tac val="vmFaultArchData aft ! 1" in symb_exec_r_fault) + apply (rule conseqPre, vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps') + apply (clarsimp simp: ctcb_relation_def + cfault_rel_def seL4_Fault_lift_def + seL4_Fault_VMFault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (ctac(no_vcg) add: setMR_ccorres) + apply (simp add: mapM_Nil) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (clarsimp simp: option_to_ptr_def) + apply (subgoal_tac "case list of [a, b] \ True | _ \ True") + apply (simp add: zip_upt_Cons guard_is_UNIVI seL4_VMFault_FSR_def + ucast_and_mask ucast_nat_def + split: list.split_asm) + apply (simp split: list.split) + apply (wp setMR_tcbFault_obj_at asUser_inv[OF getRestartPC_inv] + hoare_case_option_wp hoare_weak_lift_imp + | simp add: option_to_ptr_def guard_is_UNIVI + seL4_VMFault_PrefetchFault_def + seL4_VMFault_Addr_def + seL4_VMFault_IP_def + msgMaxLength_def + del: Collect_const + | wp (once) hoare_drop_imp)+ + (* VCPUFault *) + apply (simp add: Collect_True Collect_False ccorres_cond_iffs zip_upt_Cons msgMaxLength_unfold + zipWithM_mapM mapM_Cons bind_assoc seL4_Fault_tag_defs del: Collect_const) + apply (rule ccorres_stateAssert) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="vcpuHSR aft" in symb_exec_r_fault) + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def + seL4_Fault_VCPUFault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (ctac(no_vcg) add: setMR_ccorres) + apply (simp add: mapM_Nil) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (clarsimp simp: option_to_ptr_def seL4_VCPUFault_HSR_def guard_is_UNIV_def ucast_and_mask) + + (* VPPIEvent *) + apply (simp add: Collect_True Collect_False ccorres_cond_iffs zip_upt_Cons msgMaxLength_unfold + zipWithM_mapM mapM_Cons bind_assoc seL4_Fault_tag_defs + del: Collect_const) + apply (rename_tac irq) + apply (rule ccorres_stateAssert) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="ucast (vppiIRQ aft)" in symb_exec_r_fault) + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (rule context_conjI) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply (subgoal_tac "seL4_Fault_get_tag (tcbFault_C ko') = scast seL4_Fault_VPPIEvent") + apply (frule seL4_Fault_lift_VPPIEvent) + apply (clarsimp simp: seL4_Fault_VPPIEvent_lift_def) + apply (clarsimp simp: ctcb_relation_def is_cap_fault_def word_and_1 cfault_rel_def + split: if_split_asm option.splits) + apply (simp add: ucast_ucast_mask) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (ctac(no_vcg) add: setMR_ccorres) + apply (simp add: mapM_Nil) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (clarsimp simp: option_to_ptr_def seL4_VPPIEvent_IRQ_def guard_is_UNIV_def ucast_nat_def + ucast_and_mask) + +(* FIXME AARCH64 style issues: copied from ARM_HYP *) + (*VGICMaintenanceFault*) + apply (simp add: Collect_True Collect_False ccorres_cond_iffs zip_upt_Cons msgMaxLength_unfold + zipWithM_mapM mapM_Cons bind_assoc seL4_Fault_tag_defs + del: Collect_const) + apply (subst option.case_distrib[where h="\x. mapM (\(x, y). setMR receiver buffer x y) (zip [0..<120] x)"]) + apply (rule ccorres_stateAssert) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="case_option 0 (K 1) (vgicMaintenanceData aft)" in symb_exec_r_fault) + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (rule conjI) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply (subgoal_tac "seL4_Fault_get_tag (tcbFault_C ko') = scast seL4_Fault_VGICMaintenance") + apply (frule seL4_Fault_lift_VGICMaintenance) + apply (clarsimp simp: seL4_Fault_VGICMaintenance_lift_def) + apply (clarsimp simp: ctcb_relation_def is_cap_fault_def word_and_1 cfault_rel_def + split: if_split_asm option.splits) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply wpc + (* x4 = None *) + apply (simp add: option_to_0_def zip_upt_Cons mapM_Cons mapM_Nil + del: Collect_const split: option.split_asm) + (* buffer = None *) + apply ccorres_rewrite + apply (rule_tac P="valid_pspace' + and (case buffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" and P'=UNIV + in ccorres_inst) + apply (rule ccorres_guard_imp) + apply (ctac(no_vcg) add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (clarsimp simp: msgMaxLength_def) + apply (clarsimp simp: Collect_const_mem seL4_VGICMaintenance_IDX_def ucast_and_mask) + (* buffer = Some x2 *) + apply ccorres_rewrite + apply (rule_tac P="valid_pspace' + and (case buffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" and P'=UNIV + in ccorres_inst) + apply (rule ccorres_guard_imp) + apply (ctac(no_vcg) add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (clarsimp simp: msgMaxLength_def) + apply (clarsimp simp: Collect_const_mem seL4_VGICMaintenance_IDX_def ucast_and_mask) + (* x4 = Some a *) + apply (clarsimp simp: option_to_0_def zip_upt_Cons mapM_Cons mapM_Nil + split: option.split_asm) + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="x2" in symb_exec_r_fault) + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (rule conjI) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply (subgoal_tac "seL4_Fault_get_tag (tcbFault_C ko') = scast seL4_Fault_VGICMaintenance") + apply (frule seL4_Fault_lift_VGICMaintenance) + apply (clarsimp simp: seL4_Fault_VGICMaintenance_lift_def) + apply (clarsimp simp: ctcb_relation_def is_cap_fault_def word_and_1 cfault_rel_def + split: if_split_asm option.splits) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (rule_tac P="valid_pspace' + and (case buffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" and P'=UNIV + in ccorres_inst) + apply (rule ccorres_guard_imp) + apply (ctac(no_vcg) add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (simp add: msgMaxLength_def Collect_const_mem seL4_VGICMaintenance_IDX_def ucast_and_mask)+ + apply (simp add: guard_is_UNIV_def) + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="x2" in symb_exec_r_fault) + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (rule conjI) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply (subgoal_tac "seL4_Fault_get_tag (tcbFault_C ko') = scast seL4_Fault_VGICMaintenance") + apply (frule seL4_Fault_lift_VGICMaintenance) + apply (clarsimp simp: seL4_Fault_VGICMaintenance_lift_def) + apply (clarsimp simp: ctcb_relation_def is_cap_fault_def word_and_1 cfault_rel_def + split: if_split_asm option.splits) + apply (clarsimp simp: ctcb_relation_def cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (rule ccorres_guard_imp) + apply (ctac(no_vcg) add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (clarsimp simp: msgMaxLength_def Collect_const_mem seL4_VGICMaintenance_IDX_def) + apply assumption + apply clarsimp + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem seL4_VGICMaintenance_IDX_def + ucast_and_mask) + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: msgMaxLength_def seL4_VMFault_IP_def option_to_ptr_def + pageBits_def mask_def Collect_const_mem | rule conjI + | (drule(1) obj_at_cslift_tcb, + clarsimp simp: typ_heap_simps ctcb_relation_def, + fastforce simp: typ_at_to_obj_at_arches elim: obj_at'_weakenE))+ + apply (subgoal_tac "[Suc (Suc 0)..<120] = 2#3#[Suc (Suc (Suc (Suc 0)))..<120]") + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: zip_Cons ctcb_relation_def + cfault_rel_def seL4_Fault_lift_def + seL4_Fault_VMFault_lift_def Let_def + split: if_split_asm) + apply (subst upt_rec, simp)+ + apply (clarsimp simp: msgMaxLength_def seL4_VMFault_IP_def option_to_ptr_def + pageBits_def mask_def Collect_const_mem | rule conjI + | (drule(1) obj_at_cslift_tcb, clarsimp simp: typ_heap_simps ctcb_relation_def, + fastforce simp: typ_at_to_obj_at_arches elim: obj_at'_weakenE))+ + done +qed + +lemma setMRs_fault_ccorres [corres]: + "ccorres (\r r'. r = r' && mask msgLengthBits) ret__unsigned_long_' + (valid_pspace' and obj_at' (\tcb. tcbFault tcb = Some ft) sender + and tcb_at' receiver + and (case buffer of Some x \ valid_ipc_buffer_ptr' x | None \ \) + and K (buffer \ Some 0) + and K (sender \ receiver)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\receiveIPCBuffer = Ptr (option_to_0 buffer)\) hs + (makeFaultMessage ft sender >>= (\ms. setMRs receiver buffer (snd ms))) + (Call setMRs_fault_'proc)" +proof - + let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some ft) sender" + note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV + [where xf'=ret__unsigned_longlong_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] + show ?thesis + apply (unfold K_def) + apply (intro ccorres_gen_asm) + apply (cinit' lift: sender_' receiver_' receiveIPCBuffer_' simp: whileAnno_def) + apply (simp add: makeFaultMessage_def setMRs_to_setMR) + apply (rule_tac val="fault_to_fault_tag ft" in symb_exec_r_fault) + apply (vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps' + cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply (simp add: is_cap_fault_def) + apply ceqv + (* UserException *) + apply wpc + apply (simp add: bind_assoc seL4_Fault_tag_defs ccorres_cond_iffs + Collect_True Collect_False + zipWithM_mapM zip_append2 mapM_append) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_stateAssert) + apply (rule_tac P="length msg = unat n_exceptionMessage" + in ccorres_gen_asm) + apply (simp add: n_exceptionMessage_def msgMaxLength_unfold + zip_upt_Cons mapM_Nil mapM_Cons bind_assoc + mapM_discarded + del: Collect_const upt.simps upt_rec_numeral + split del: if_split) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: copyMRsFault_ccorres_exception) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="userExceptionNumber ft" in symb_exec_r_fault) + apply (vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def + seL4_Fault_UserException_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (ctac add: setMR_ccorres_dc) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="userExceptionErrorCode ft" in symb_exec_r_fault) + apply (vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def + seL4_Fault_UserException_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (ctac add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (simp del: Collect_const) + apply (vcg exspec=setMR_modifies) + apply (clarsimp simp: option_to_ptr_def guard_is_UNIVI + ucast_and_mask ucast_nat_def) + apply (wp setMR_tcbFault_obj_at + hoare_case_option_wp) + apply simp + apply (vcg exspec=setMR_modifies) + apply (clarsimp simp: option_to_ptr_def guard_is_UNIVI) + apply (simp add: split_def) + apply (wp mapM_x_wp' setMR_tcbFault_obj_at + hoare_case_option_wp + | simp)+ + apply (vcg exspec=copyMRsFault_modifies exspect=setRegister_modifies) + apply (wp asUser_inv mapM_wp' getRegister_inv) + apply simp + apply (wp asUser_inv mapM_wp' getRegister_inv hoare_drop_imps asUser_const_rv + asUser_get_registers[simplified atcbContextGet_def comp_def]) + apply simp + (* CapFault *) + apply (simp add: Collect_True Collect_False ccorres_cond_iffs + zip_upt_Cons msgMaxLength_unfold + zipWithM_mapM mapM_Cons bind_assoc + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (ctac(no_vcg)) + apply (rule ccorres_stateAssert) + apply (ctac(no_vcg) add: setMR_ccorres_dc) + apply (rule_tac val="capFaultAddress ft" in symb_exec_r_fault) + apply (rule conseqPre, vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_def + cfault_rel_def seL4_Fault_lift_def Let_def + seL4_Fault_CapFault_lift_def + split: if_split_asm) + apply ceqv + apply (ctac(no_vcg) add: setMR_ccorres_dc) + apply (rule_tac val="from_bool (capFaultInReceivePhase ft)" in symb_exec_r_fault) + apply (rule conseqPre, vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_def + cfault_rel_def seL4_Fault_lift_def Let_def + seL4_Fault_CapFault_lift_def + from_bool_to_bool_and_1 word_size + split: if_split_asm) + apply ceqv + apply (ctac(no_vcg) add: setMR_ccorres_dc) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac P="obj_at' (\tcb. tcbFault tcb = Some ft) sender" + in ccorres_cross_over_guard) + apply (ctac(no_vcg) add: setMRs_lookup_failure_ccorres[unfolded msgMaxLength_unfold]) + apply simp + apply (rule ccorres_return_C, simp+)[1] + apply (wp setMR_tcbFault_obj_at hoare_case_option_wp)+ + apply (clarsimp simp: option_to_ptr_def Collect_const_mem guard_is_UNIV_def) + apply (rule conjI) + apply (simp add: seL4_CapFault_InRecvPhase_def) + apply (rule conjI) + apply (simp add: from_bool_def split: bool.split) + apply clarsimp + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_def + cfault_rel_def seL4_Fault_lift_def Let_def + seL4_Fault_CapFault_lift_def is_cap_fault_def + seL4_CapFault_LookupFailureType_def + ucast_and_mask ucast_nat_def + split: if_split_asm) + apply (wp setMR_tcbFault_obj_at hoare_case_option_wp + asUser_inv[OF getRestartPC_inv] + | (rule guard_is_UNIVI, clarsimp simp: option_to_ptr_def + seL4_CapFault_Addr_def))+ + (* UnknownSyscall *) + apply (rename_tac syscall_number) + apply (simp add: seL4_Fault_tag_defs Collect_True Collect_False + ccorres_cond_iffs zipWithM_mapM mapM_append + zip_append2 bind_assoc + del: Collect_const) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_stateAssert) + apply (rule_tac P="length msg = unat n_syscallMessage" in ccorres_gen_asm) + apply (simp add: msgMaxLength_unfold n_syscallMessage_def zip_upt_Cons + mapM_Cons mapM_Nil mapM_discarded + del: Collect_const upt_rec_numeral) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: copyMRsFault_ccorres_syscall) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="syscall_number" in symb_exec_r_fault) + apply (vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps' + cfault_rel_def seL4_Fault_lift_def Let_def + seL4_Fault_UnknownSyscall_def + seL4_Fault_UnknownSyscall_lift_def + split: if_split_asm) + apply ceqv + apply (ctac add: setMR_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (simp del: Collect_const) + apply (vcg exspec=setMR_modifies) + apply (clarsimp simp: option_to_ptr_def guard_is_UNIVI ucast_and_mask ucast_nat_def) + apply (wp setMR_tcbFault_obj_at mapM_x_wp_inv hoare_case_option_wp + | clarsimp + | wpc)+ + apply (vcg exspec=copyMRsFault_modifies) + apply (wp asUser_inv mapM_wp' getRegister_inv) + apply simp + apply (wp asUser_inv mapM_wp' getRegister_inv hoare_drop_imps asUser_const_rv + asUser_get_registers[simplified atcbContextGet_def comp_def]) + apply simp + (* ArchFault *) + apply clarsimp + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="fault_to_fault_tag ft" in symb_exec_r_fault) + apply (vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps' + cfault_rel_def seL4_Fault_lift_def Let_def + seL4_Fault_UnknownSyscall_def + seL4_Fault_UnknownSyscall_lift_def + split: if_split_asm) + apply ceqv + apply (rule ccorres_add_return2) + apply (ctac add: Arch_setMRs_fault_ccorres[simplified setMRs_to_setMR last.simps K_bind_def]) + apply (ctac add: ccorres_return_C) + apply wpsimp + apply (vcg exspec=Arch_setMRs_fault_modifies) + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) + apply (rule fault_to_fault_tag.simps(2)[symmetric]) + (* done *) + apply (rule guard_is_UNIVI, clarsimp simp: option_to_ptr_def seL4_CapFault_IP_def) + apply (clarsimp simp: msgMaxLength_unfold length_syscallMessage msgFromLookupFailure_def + AARCH64_H.exceptionMessage_def + AARCH64.exceptionMessage_def + n_exceptionMessage_def + n_syscallMessage_def) + apply (fastforce elim: obj_tcb_at' split: Fault_H.lookup_failure.splits) + done +qed + +definition makeArchFaultMessage2 :: "arch_fault \ machine_word" where + "makeArchFaultMessage2 \ + \aft. case aft of VMFault _ _ \ 5 + | VCPUFault _ \ 7 + | VGICMaintenance _ \ 6 + | VPPIEvent _ \ 8" + +lemma makeFaultMessage2: + "makeFaultMessage ft thread + = (do x \ makeFaultMessage ft thread; + return (case ft of CapFault _ _ _ \ 1 + | UnknownSyscallException _ \ 2 + | UserException _ _ \ 3 + | ArchFault aft \ makeArchFaultMessage2 aft, snd x) od)" + by (cases ft) + (auto simp: makeFaultMessage_def makeArchFaultMessage2_def makeArchFaultMessage_def bind_assoc + split: fault.split arch_fault.split)+ + +lemma doFaultTransfer_ccorres [corres]: + "ccorres dc xfdc (valid_pspace' and tcb_at' sender and tcb_at' receiver + and (case buffer of Some x \ valid_ipc_buffer_ptr' x | None \ \) + and K (buffer \ Some 0) and K (receiver \ sender)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\receiverIPCBuffer = Ptr (option_to_0 buffer)\ + \ \\badge = badge\) [] + (doFaultTransfer badge sender receiver buffer) + (Call doFaultTransfer_'proc)" + apply (unfold K_def) + apply (intro ccorres_gen_asm) + apply (simp add: doFaultTransfer_def) + apply (subst makeFaultMessage2) + apply (simp only: makeArchFaultMessage2_def) + apply (cinit' lift: sender_' receiver_' receiverIPCBuffer_' badge_') + apply (rule ccorres_pre_threadGet) + apply (rename_tac ft) + apply wpc + apply (simp del: Collect_const, rule ccorres_fail) + apply (simp add: split_def bind_assoc del: Collect_const) + apply (simp only: bind_assoc[symmetric, where m="makeFaultMessage ft t" for ft t]) + apply (ctac(no_vcg) add: setMRs_fault_ccorres) + apply (rule_tac R="obj_at' (\tcb. tcbFault tcb = ft) sender" + and val="case (the ft) of CapFault _ _ _ \ 1 + | ArchFault (VMFault _ _) \ 5 + | ArchFault (VGICMaintenance _) \ 6 + | ArchFault (VCPUFault _) \ 7 + | ArchFault (VPPIEvent _) \ 8 + | UnknownSyscallException _ \ 2 + | UserException _ _ \ 3" + in ccorres_symb_exec_r_known_rv_UNIV + [where xf'=ret__unsigned_longlong_' and R'=UNIV]) + apply (rule conseqPre, vcg, clarsimp) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps' ctcb_relation_def + cfault_rel_def seL4_Fault_lift_def Let_def + seL4_Fault_tag_defs + split: if_split_asm) + apply ceqv + apply csymbr + apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) + apply (ctac add: setRegister_ccorres) + apply wp + apply (simp add: badgeRegister_def AARCH64.badgeRegister_def AARCH64.capRegister_def + Kernel_C.badgeRegister_def "StrictC'_register_defs") + apply (clarsimp simp: message_info_to_H_def guard_is_UNIVI + mask_def msgLengthBits_def + split: fault.split arch_fault.split) + apply (wpsimp simp: setMRs_to_setMR zipWithM_mapM split_def wp: mapM_wp' setMR_tcbFault_obj_at)+ + apply assumption + apply (clarsimp simp: obj_at'_def projectKOs) + done + +lemma ccorres_emptyOnFailure: + assumes corr_ac: "ccorres (\f c. case f of Inl _ \ r [] c | Inr xs \ r xs c) + xf + P P' hs a c" + shows "ccorres r xf P P' hs (emptyOnFailure a) c" using corr_ac + apply (simp add: emptyOnFailure_def catch_def const_def bind_def split_def) + apply (clarsimp simp: ccorres_underlying_def return_def split: xstate.splits sum.splits) + apply (drule (1) bspec) + apply (rule conjI, clarsimp) + apply (erule_tac x=n in allE) + apply (rename_tac s) + apply (erule_tac x="Normal s" in allE) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply clarsimp + apply (rule conjI, clarsimp simp: unif_rrel_def) + apply (clarsimp simp: unif_rrel_def) + apply fastforce + done + +lemma unifyFailure_ccorres: + assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" + shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" + using corr_ac + apply (simp add: unifyFailure_def rethrowFailure_def const_def + handleE'_def throwError_def) + apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def + split: xstate.splits sum.splits) + apply (drule (1) bspec) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule_tac x=n in allE) + apply (rename_tac s) + apply (erule_tac x="Normal s" in allE) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply (clarsimp simp: unif_rrel_def) + apply fastforce + apply fastforce + done + +definition +cct_relation:: "cap_transfer \ cap_transfer_C \ bool" +where +"cct_relation atc ctc \ + case atc of + (CT croot idx dpt) \ ctReceiveRoot_C ctc = croot \ + ctReceiveIndex_C ctc= idx \ + unat (ctReceiveDepth_C ctc) = dpt" + +lemma capTransferFromWords_ccorres [corres]: + "ccorres cct_relation ret__struct_cap_transfer_C_' + (valid_pspace' and K (is_aligned ptr 3)) + (UNIV \ \array_assertion (Ptr ptr :: machine_word ptr) 3 (hrs_htd \t_hrs)\ + \ \\wptr = Ptr ptr\) hs + (capTransferFromWords ptr) + (Call capTransferFromWords_'proc)" + apply (cinit lift: wptr_') + apply (rule ccorres_pre_loadWordUser)+ + apply (rule_tac P=\ + and P'="{s. array_assertion (Ptr ptr :: machine_word ptr) + 3 (hrs_htd (t_hrs_' (globals s))) \ + cslift s (Ptr ptr) = Some rv \ + cslift s (Ptr (ptr + 8)) = Some rva \ + cslift s (Ptr (ptr + 16)) = Some rvb}" + in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps' ptr_add_assertion_positive + array_assertion_shrink_right) + apply (simp add: return_def) + apply (simp add: cct_relation_def) + apply (clarsimp simp: word_size valid_ipc_buffer_ptr'_def wordSize_def') + apply safe + apply (erule aligned_add_aligned | simp add: is_aligned_def word_bits_conv)+ + done + +lemma array_assertion_shrink_left_add: + "array_assertion (Ptr ptr :: ('a :: wf_type) ptr) j htd + \ n = of_nat (k * size_of TYPE('a)) \ k + i \ j \ 0 < i + \ array_assertion (Ptr (ptr + n) :: 'a ptr) i htd" + apply (cases "k = 0") + apply (simp add: unat_eq_0) + apply (erule array_assertion_shrink_right, simp) + apply (drule_tac j=k in array_assertion_shrink_leftD) + apply simp + apply simp + apply (erule array_assertion_shrink_right) + apply arith + done + +lemma array_assertion_shrink_left_add_div: + "array_assertion (Ptr ptr :: ('a :: wf_type) ptr) j htd + \ n = of_nat (unat n div size_of TYPE('a)) * of_nat (size_of TYPE('a)) + \ i + (unat n div size_of TYPE('a)) \ j \ 0 < i + \ array_assertion (Ptr (ptr + n) :: 'a ptr) i htd" + apply (erule_tac k="unat n div size_of TYPE('a)" + in array_assertion_shrink_left_add) + apply simp + done + +lemma loadCapTransfer_ccorres [corres]: + "ccorres cct_relation ret__struct_cap_transfer_C_' + (valid_pspace' and valid_ipc_buffer_ptr' buffer) + (UNIV \ \\buffer = Ptr buffer\) hs + (loadCapTransfer buffer) + (Call loadCapTransfer_'proc)" + apply (cinit lift: buffer_') + apply (rule ccorres_Guard_Seq)+ + apply csymbr + apply (simp add: seL4_MsgLengthBits_def + seL4_MsgExtraCapBits_def + seL4_MsgMaxLength_def + ptr_add_assertion_positive) + apply (rule ccorres_move_array_assertion_ipc_buffer + ccorres_rhs_assoc)+ + apply (rule ccorres_add_return2) + apply (ctac(no_vcg)) + apply (rule ccorres_move_array_assertion_ipc_buffer) + apply simp + apply (ctac (no_vcg) add: ccorres_return_C) + apply (wp capTransferFromWords_inv | simp)+ + apply (clarsimp simp: word_size) + apply (simp add: seL4_MsgLengthBits_def + seL4_MsgExtraCapBits_def + seL4_MsgMaxLength_def + word_size word_sle_def + msgMaxLength_def msgMaxExtraCaps_def + msgLengthBits_def msgExtraCapBits_def + Collect_const_mem msg_align_bits) + apply (frule(1) valid_ipc_buffer_ptr_array[where p="Ptr p'" for p', simplified], + rule order_refl, simp_all add: msg_align_bits) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def wordSize_def') + apply (subst array_assertion_shrink_left_add_div, assumption) + apply (simp add: msgMaxLength_def msgExtraCaps_def msgMaxExtraCaps_def msgExtraCapBits_def + shiftL_nat) + apply simp + apply (erule aligned_add_aligned, simp_all add: is_aligned_def msg_align_bits_def bit_simps) + done + +lemma loadCapTransfer_ctReceiveDepth: + "\\\ loadCapTransfer buffer \\rv s. ctReceiveDepth rv < 2 ^ word_bits\" + apply (simp add: loadCapTransfer_def capTransferFromWords_def) + apply wp + apply (rule_tac Q'="\_ _. True" in hoare_post_eq) + apply simp + apply (simp only: word_bits_len_of[symmetric]) + apply (subst unat_lt2p, simp) + apply wpsimp+ + done + +lemma getReceiveSlots_ccorres: + "ccorres (\a c. (a = [] \ (\slot. a = [slot])) \ + ((a \ []) = (c \ NULL)) \ (a\[] \ c = cte_Ptr (hd a) \ c \ NULL)) + ret__ptr_to_struct_cte_C_' + (valid_ipc_buffer_ptr' buffer and + valid_pspace' and + tcb_at' thread + and (\s. buffer \ 0)) + (UNIV \ \\buffer = Ptr buffer\ \ \\thread = tcb_ptr_to_ctcb_ptr thread\) hs + (getReceiveSlots thread (Some buffer)) + (Call getReceiveSlots_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: buffer_' thread_') + apply (simp add: split_def) + apply (ctac (no_vcg)) + apply (rule ccorres_emptyOnFailure) + apply csymbr + apply (rule ccorres_split_nothrowE) + apply (rule unifyFailure_ccorres) + apply (ctac (no_vcg) add: lookupCap_ccorres') + apply ceqv + apply simp + apply csymbr + apply (fold lookupTargetSlot_def) + apply (rule ccorres_split_nothrow_novcgE) + apply (rule unifyFailure_ccorres) + apply (ctac (no_vcg) add: lookupTargetSlot_ccorres') + apply ceqv + apply (rename_tac slot slot_c) + apply (simp add: liftE_bindE) + apply csymbr + apply (rule_tac P="cte_at' slot and no_0_obj'" + in ccorres_from_vcg_throws[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: in_monad cte_wp_at_ctes_of Bex_def in_getCTE2) + apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (clarsimp simp: typ_heap_simps' cap_get_tag_isCap in_monad + dest!: ccte_relation_ccap_relation) + apply (erule cte_at_0'[rotated], simp add: cte_wp_at_ctes_of) + apply clarsimp + apply (rule ccorres_split_throws) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def) + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (wp lsfco_cte_at') + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: guard_is_UNIV_def) + apply (rule UNIV_I) + apply simp + apply clarsimp + apply (rule ccorres_split_throws) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def) + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply wp + apply (vcg exspec=lookupCap_modifies) + apply clarsimp + apply (wp loadCapTransfer_ctReceiveDepth) + apply clarsimp + apply (intro conjI) + apply fastforce + apply fastforce + apply clarsimp + apply (simp add: cct_relation_def) + apply (case_tac rv, clarsimp) + apply (rule UNIV_I) \ \still a schematic here ...\ + done + + +lemma setExtraBadge_ccorres: + "ccorres dc xfdc + (valid_pspace' and valid_ipc_buffer_ptr' buffer and (\_. msg_max_length + 2 + n < unat max_ipc_words)) + (UNIV \ \\bufferPtr = Ptr buffer\ \ \\badge = badge\ \ \\i = of_nat n\) + hs + (setExtraBadge buffer badge n) + (Call setExtraBadge_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: bufferPtr_' badge_' i_') + apply (unfold storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (simp only: K_bind_def) + apply (rule ccorres_move_array_assertion_ipc_buffer + ccorres_Guard[where F=C_Guard] + ccorres_Guard[where F=SignedArithmetic] + )+ + apply (ctac add: storeWord_ccorres) + apply (clarsimp simp: bufferCPtrOffset_def word_size msgMaxLength_def wordSize_def' + seL4_MsgLengthBits_def seL4_MsgMaxLength_def Types_H.msgLengthBits_def + field_simps Collect_const_mem) + apply (subgoal_tac " is_aligned (buffer + (of_nat n * 8 + 0x3D0)) 3") + apply clarsimp + prefer 2 + apply (clarsimp simp: valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned, simp_all add: msg_align_bits) + apply (rule_tac n=3 in aligned_add_aligned, simp_all add: word_bits_def) + apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) + apply (simp add: is_aligned_def) + apply (auto simp: pointerInUserData_c_guard pointerInUserData_h_t_valid + msg_align_bits max_ipc_words_def msg_max_length_def + capTransferDataSize_def msgMaxLength_def msgMaxExtraCaps_def + msgExtraCapBits_def unat_word_ariths unat_of_nat) + done + +(* FIXME: move *) +lemma ccorres_constOnFailure: + assumes corr_ac: "ccorres (\f c. case f of Inl x \ r n c | Inr n \ r n c) + xf + P P' hs a c" + shows "ccorres r xf P P' hs (constOnFailure n a) c" using corr_ac + apply (simp add: constOnFailure_def catch_def const_def bind_def split_def) + apply (clarsimp simp: ccorres_underlying_def return_def split: xstate.splits sum.splits) + apply (drule (1) bspec) + apply (rule conjI, clarsimp) + apply (erule_tac x=na in allE) + apply (rename_tac s) + apply (erule_tac x="Normal s" in allE) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply clarsimp + apply (rule conjI, clarsimp simp: unif_rrel_def) + apply (clarsimp simp: unif_rrel_def) + apply fastforce + done + +(* FIXME: move *) +lemma ccorres_case_sum_liftE: + "ccorres r xf P P' hs H C \ ccorres (\a c. case_sum (\x. r' x c) (\y. r y c) a) xf P P' hs (liftE H) C" + apply (clarsimp simp: ccorres_underlying_def split: xstate.splits) + apply (drule (1) bspec) + apply (clarsimp simp: split_def liftE_def bind_def return_def) + apply (fastforce simp: unif_rrel_def) + done + +(* FIXME: move *) +lemma ccorres_case_bools_rhs: + assumes P: "ccorres r xf P P' hs a c" + assumes Q: "ccorres r xf Q Q' hs a c" + shows "ccorres r xf (P and Q) + ({s. s \ B \ s \ P'} \ {s. s \ B \ s \ Q'}) + hs a c" using P Q + apply (clarsimp simp: ccorres_underlying_def) + apply (drule (1) bspec)+ + apply clarsimp + apply (case_tac "b \ B", auto) + done + +(* FIXME: move *) +lemma ccorres_return_bind_add: + "ccorres r xf P P' hs (do z \ return (f x); H z od) C \ ccorres r xf P P' hs (H (f x)) C" + by simp + + +(* FIXME: move *) +lemma ccorres_if_cond_throws_break: + fixes e :: 'e + assumes abs: "\s s'. (s, s') \ sr \ Q s \ Q' s' \ P = (s' \ P')" + and ac: "P \ ccorres_underlying sr \ r xf arrel axf R R' (catchbrk_C # hs) a c" + and bd: "\ P \ ccorres_underlying sr \ r xf arrel axf U U' (catchbrk_C # hs) b d" + and cthrows: "\ \\<^bsub>/UNIV\<^esub> PT' c {}, UNIV" \ \c always throws\ + shows "ccorres_underlying sr \ r xf arrel axf + (Q and (\s. P \ R s) and (\s. \ P \ U s)) + (Collect Q' \ {s. (s \ P' \ s \ R' \ PT') \ (s \ P' \ s \ U')}) + (catchbrk_C # hs) + (if P then a else b) (Cond P' c SKIP ;; d)" + (is "ccorres_underlying sr \ r xf arrel axf ?G ?G' ?hs ?a ?c") +proof (cases P) + case True + + thus ?thesis + apply simp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_split_throws) + apply (rule ccorres_cond_true [OF ac [OF True]]) + apply (rule HoarePartial.Cond [where P = "P' \ PT'", OF _ cthrows]) + apply clarsimp + apply (rule HoarePartial.Skip) + apply (rule subset_refl) + apply (clarsimp simp: abs [rule_format, OF conjI]) + done +next + case False + + thus ?thesis + apply simp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return) + apply (rule ccorres_split_nothrow) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (rule ceqv_refl) + apply (rule bd [OF False]) + apply wp + apply simp + apply (rule Cond_false) + apply (rule HoarePartial.Skip [OF subset_refl]) + apply (clarsimp simp: abs [rule_format, OF conjI]) + done +qed + + +(* FIXME: move *) +lemma ccorres_if_cond_throws_break2: + fixes e :: 'e + assumes abs: "\s s'. (s, s') \ sr \ Q s \ Q' s' \ (\ P) = (s' \ P')" + and ac: "\ P \ ccorres_underlying sr \ r xf arrel axf R R' (catchbrk_C # hs) a c" + and bd: "P \ ccorres_underlying sr \ r xf arrel axf U U' (catchbrk_C # hs) b d" + and cthrows: "\ \\<^bsub>/UNIV\<^esub> PT' c {}, UNIV" \ \c always throws\ + shows "ccorres_underlying sr \ r xf arrel axf + (Q and (\s. \ P \ R s) and (\s. P \ U s)) + (Collect Q' \ {s. (s \ P' \ s \ R' \ PT') \ (s \ P' \ s \ U')}) + (catchbrk_C # hs) + (if P then b else a) (Cond P' c SKIP ;; d)" + apply (subst if_swap) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_if_cond_throws_break [OF abs ac bd cthrows]) + apply assumption + apply simp + apply clarsimp + done + +declare scast_bit_test[simp] + +(* FIXME: move *) +lemma ccorres_split_when_throwError_cond_break: + fixes e :: 'e + assumes abs: "\s s'. (s, s') \ sr \ Q s \ Q' s' \ P = (s' \ P')" + and cc: "P \ ccorres_underlying sr \ r xf ar axf + R R' (catchbrk_C # hs) (throwError e) c" + and bd: "\ P \ ccorres_underlying sr \ r xf ar axf + U U' (catchbrk_C # hs) b d" + and cthrows: "\ \\<^bsub>/UNIV\<^esub> PT' c {}, UNIV" \ \c always throws\ + shows "ccorres_underlying sr \ r xf ar axf + (Q and (\s. P \ R s) and (\s. \ P \ U s)) + (Collect Q' \ {s. (s \ P' \ s \ R' \ PT') \ (s \ P' \ s \ U')}) + (catchbrk_C # hs) + (whenE P (throwError e) >>=E (\_. b)) (Cond P' c SKIP ;; d)" + apply (subst whenE_bindE_throwError_to_if) + apply (rule ccorres_if_cond_throws_break [OF abs cc bd cthrows]) + apply assumption + apply assumption + done + +lemma maskAsFull_isEndpoint[simp]: + "isEndpointCap a \ maskedAsFull a b = a" + by (clarsimp simp:isCap_simps maskedAsFull_def) + +lemma is_derived_capMasterCap: + "is_derived' m slot cap cap' + \ capMasterCap cap = capMasterCap cap'" + by (clarsimp simp:is_derived'_def badge_derived'_def) + +lemma maskedAsFull_misc: + "badge_derived' a (maskedAsFull a b)" + "capASID (maskedAsFull a b) = capASID a" + "cap_asid_base' (maskedAsFull a b) = cap_asid_base' a" + "cap_vptr' (maskedAsFull a b) = cap_vptr' a" + "capMasterCap (maskedAsFull a b) = capMasterCap a" + by (auto simp:maskedAsFull_def isCap_simps badge_derived'_def + split:if_split) + +lemma maskedAsFull_again: + "maskedAsFull (maskedAsFull aa aa) r = maskedAsFull aa aa" + apply (case_tac aa) + apply (simp_all add:maskedAsFull_def isCap_simps split: if_split)+ + done + +lemma ccap_relation_lift: + "ccap_relation cap cap' + \ (cap_to_H (the (cap_lift cap'))) = cap" + apply (case_tac "cap_lift cap'") + apply (auto simp:ccap_relation_def split:option.splits) + done + +lemma ccap_relation_inject: + "\ccap_relation acap cap; ccap_relation bcap cap\ \ acap = bcap" + apply (case_tac "cap_lift cap") + apply (auto simp:ccap_relation_def split:option.splits) + done + +lemma transferCapsLoop_ccorres: + assumes conds: + "rcv_buffer \ 0" + "ep \ Some 0" + defines "check1 \ + Guard ShiftError \0 <=s seL4_MsgExtraCapBits\ + (Guard ShiftError \seL4_MsgExtraCapBits + (\ret__int :== + (if \i < 2 ^ unat seL4_MsgExtraCapBits - 1 then 1 + else 0)))" + and "check2 \ \caps. + IF \ret__int \ 0 THEN + Guard ArrayBounds \\i < 3\ (\ret__int :== + (if index (excaprefs_C caps) (unat \i) \ cte_Ptr 0 then 1 + else 0)) + FI" + defines "W \ \ep caps. + check1;; check2 caps;; + (While \\ret__int \ 0\ + (Guard ArrayBounds \\i < 3\ (\slot :== index (excaprefs_C caps) (unat \i));; + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \slot\ + (\cap :== h_val (hrs_mem \t_hrs) (cap_Ptr &(\slot\[''cap_C''])));; + \ret__unsigned_longlong :== CALL cap_get_capType(\cap);; + \ret__int :== (if \ret__unsigned_longlong = scast cap_endpoint_cap then 1 else 0);; + IF \ret__int \ 0 THEN + \ret__unsigned_longlong :== CALL cap_endpoint_cap_get_capEPPtr(\cap);; + \ret__int :== (if ep_Ptr \ret__unsigned_longlong = option_to_ptr ep then 1 else 0) + FI;; + IF \ret__int \ 0 THEN + \ret__unsigned_longlong :== CALL cap_endpoint_cap_get_capEPBadge(\cap);; + CALL setExtraBadge(Ptr rcv_buffer, ucast \ret__unsigned_longlong,\i);; + \ret__unsigned_longlong :== CALL seL4_MessageInfo_get_capsUnwrapped(\info);; + Guard ShiftError \unat \i < 31\ + (\info :== CALL seL4_MessageInfo_set_capsUnwrapped(\info, + \ret__unsigned_longlong || scast ((1 :: sword32) << unat \i))) + ELSE + lvar_nondet_init dc_ret_' dc_ret_'_update;; + IF \destSlot = cte_Ptr 0 THEN + break_C + FI;; + \dc_ret :== CALL deriveCap(\slot,\cap);; + IF deriveCap_ret_C.status_C \dc_ret \ scast EXCEPTION_NONE THEN + break_C + FI;; + \ret__unsigned_longlong :== CALL cap_get_capType(deriveCap_ret_C.cap_C \dc_ret);; + IF \ret__unsigned_longlong = scast cap_null_cap THEN + break_C + FI;; + CALL cteInsert(deriveCap_ret_C.cap_C \dc_ret,\slot,\destSlot);; + \destSlot :== cte_Ptr 0 + FI;; + \i :== \i + 1;; check1;; check2 caps))" + defines "precond n mi slots \ (UNIV \ \\i = of_nat n\ + \ \mi = message_info_to_H (\info)\ + \ \\destSlot = (if slots = [] then NULL else cte_Ptr (hd slots)) + \ length slots \ 1 \ slots \ [0]\)" + defines "is_the_ep \ \cap. isEndpointCap cap \ ep \ None \ capEPPtr cap = the ep" + defines "stable_masked \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" + defines "relative_at \ \scap slot s. cte_wp_at' + (\cte. badge_derived' scap (cteCap cte) \ + capASID scap = capASID (cteCap cte) \ + cap_asid_base' scap = cap_asid_base' (cteCap cte) \ + cap_vptr' scap = cap_vptr' (cteCap cte)) slot s" + shows "drop n (interpret_excaps caps') = excaps_map caps + \ n \ length (interpret_excaps caps') + \ ccorresG rf_sr \ + (\r (i, info). r = msgExtraCaps_update (\_. i) (message_info_to_H info) + \ i \ 3) (\s. (i_' s, info_' s)) + (valid_pspace' and valid_ipc_buffer_ptr' rcv_buffer and + (\s. (\x \ set caps. s \' fst x + \ cte_wp_at' (\cte. slots \ [] \ is_the_ep (cteCap cte) + \ (fst x) = (cteCap cte)) (snd x) s + \ cte_wp_at' (\cte. fst x \ NullCap \ stable_masked (fst x) (cteCap cte)) (snd x) s)) and + (\s. \ sl \ (set slots). cte_wp_at' (isNullCap o cteCap) sl s) and + (\_. n + length caps \ 3 \ distinct slots )) + (precond n mi slots) + [catchbrk_C] + (transferCapsToSlots ep rcv_buffer n caps slots mi) + (W ep caps')" +unfolding W_def check1_def check2_def split_def +proof (rule ccorres_gen_asm, induct caps arbitrary: n slots mi) + note if_split[split] + case Nil + thus ?case + apply (simp only: transferCapsToSlots.simps) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ + apply (rule ccorres_rhs_assoc2, rule ccorres_symb_exec_r) + apply (rule ccorres_expand_while_iff [THEN iffD1]) + apply (rule ccorres_cond_false) + apply (rule_tac P="\_. n \ 3" and P'="\\i=of_nat n \ mi=message_info_to_H \info\" + in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def msgExtraCapBits_def word_le_nat_alt unat_of_nat) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (simp add: seL4_MsgExtraCapBits_def) + apply (clarsimp simp: excaps_map_def seL4_MsgExtraCapBits_def word_sle_def + precond_def) + apply (subst interpret_excaps_test_null; clarsimp simp: unat_of_nat elim!: le_neq_trans) + done +next + note if_split[split] + case (Cons x xs') + let ?S="\\i=of_nat n \ mi=message_info_to_H \info\" + have n3: "n \ 3" using Cons.prems by simp + hence of_nat_n3[intro!]: "of_nat n \ (3 :: machine_word)" + by (simp add: word_le_nat_alt unat_of_nat) + have drop_n_foo: "\xs n y ys. drop n xs = y # ys + \ \xs'. length xs' = n \ xs = xs' @ (y # ys)" + apply (frule_tac f=length in arg_cong, simp(no_asm_use)) + apply (cut_tac n=n and xs=xs in append_take_drop_id) + apply (rule_tac x="take n xs" in exI) + apply simp + done + + have ep_cap_not_null: "\cap. isEndpointCap cap \ cap \ NullCap" + by (clarsimp simp: isCap_simps) + + have is_the_ep_maskedAsFull[simp]:"\a b. is_the_ep (maskedAsFull a b) = is_the_ep a" + apply (case_tac a) + apply (simp add:maskedAsFull_def is_the_ep_def isCap_simps)+ + done + + have is_the_ep_fold: + "(isEndpointCap (fst x) \ (\y. ep = Some y) \ capEPPtr (fst x) = the ep) + = is_the_ep (fst x)" + by (simp add:is_the_ep_def) + + have relative_fold: + "\scap slot s. (cte_wp_at' + (\cte. badge_derived' scap (cteCap cte) \ + capASID scap = capASID (cteCap cte) \ + cap_asid_base' scap = cap_asid_base' (cteCap cte) \ + cap_vptr' scap = cap_vptr' (cteCap cte)) slot s) = relative_at scap slot s" + by (simp add:relative_at_def) + + have stableD: + "\scap excap. stable_masked scap excap + \ (badge_derived' scap excap \ + capASID scap = capASID excap \ + cap_asid_base' scap = cap_asid_base' excap \ cap_vptr' scap = cap_vptr' excap)" + apply (clarsimp simp:stable_masked_def) + apply (case_tac "excap = scap",simp+) + apply (simp add:maskedAsFull_misc) + done + + have stable_eq: + "\scap excap. \stable_masked scap excap; isEndpointCap excap\ \ scap = excap" + by (simp add:isCap_simps stable_masked_def maskedAsFull_def split:if_splits) + + have is_the_ep_stable: + "\a b. \a \ NullCap \ stable_masked a b; \ is_the_ep b \ \ \ is_the_ep a" + apply (clarsimp simp:stable_masked_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) + apply auto + done + + have is_the_ep_maskCapRights: + "\rights cap. is_the_ep (maskCapRights rights cap) = is_the_ep cap" + apply (case_tac "isEndpointCap cap") + apply (simp_all add:is_the_ep_def maskCapRights_def) + apply (clarsimp simp:isCap_simps) + apply (case_tac cap) + apply (simp_all add:isCap_simps) + apply (rename_tac acap) + apply (case_tac acap) + apply (clarsimp simp: AARCH64_H.maskCapRights_def isFrameCap_def)+ + done + + have is_the_ep_deriveCap: + "\slot cap. \\s. \ (is_the_ep cap)\ deriveCap slot cap \\rv s. \ (is_the_ep rv)\, -" + apply (case_tac cap) + apply (simp_all add:deriveCap_def Let_def isCap_simps is_the_ep_def) + apply (wp,clarsimp)+ + defer + apply (wp,clarsimp)+ + apply (rename_tac acap) + apply (case_tac acap) + apply (simp_all add: AARCH64_H.deriveCap_def Let_def isCap_simps is_the_ep_def) + apply (wp |clarsimp|rule conjI)+ + done + + have mask_right_eq_null: + "\r cap. (maskCapRights r cap = NullCap) = (cap = NullCap)" + apply (case_tac cap) + apply (simp_all add:maskCapRights_def isCap_simps) + apply (rename_tac acap) + apply (case_tac acap) + apply (simp add: AARCH64_H.maskCapRights_def isFrameCap_def)+ + done + + have scast_2n_eq: + "n \ 2 \ SCAST(32 signed \ 64) (1 << n) = (1 << n)" + apply (case_tac "n=0"; simp) + apply (case_tac "n=1"; simp) + by (case_tac "n=2"; simp) + + note if_split[split del] + note if_cong[cong] + note extra_sle_sless_unfolds [simp del] + from Cons.prems + show ?case + apply (clarsimp simp: Let_def word_sle_def[where b=5] split_def + cong: call_ignore_cong + simp del: Collect_const) + apply (rule ccorres_constOnFailure) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_expand_while_iff [THEN iffD1]) + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'=i_' in ccorres_abstract, ceqv) + apply (rule ccorres_Guard_Seq) + apply csymbr + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=cap_' in ccorres_abstract,ceqv) + apply (rename_tac n' cap') + apply (rule_tac P="\acap. ccap_relation acap cap' + \ (isEndpointCap acap \ capEPPtr acap \ 0)" in ccorres_gen_asm2) + apply csymbr+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_seq_cond_raise[THEN iffD2]) + apply (rule_tac xf'=ret__int_' in ccorres_abstract_known, ceqv) + apply (rule ccorres_cond2[where R=\]) + apply (clarsimp simp: Collect_const_mem) + apply (rule sym, rule from_bool_neq_0) + + \ \case where a badge is sent\ + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (simp only: Let_def liftE_bindE withoutFailure_def fun_app_def) + apply (ctac add: setExtraBadge_ccorres) + apply (simp only: K_bind_def) + apply (rule ccorres_case_sum_liftE) + apply (csymbr, rule ccorres_abstract_cleanup) + apply (rule ccorres_Guard_Seq) + apply (csymbr, rule ccorres_abstract_cleanup) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_rhs_assoc2) + apply (rule Cons.hyps) + apply (clarsimp simp: excaps_map_def dest!: drop_n_foo) + apply simp + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (wp hoare_vcg_ball_lift) + apply (simp split del: if_split) + apply (vcg exspec=setExtraBadge_modifies) + + \ \case where a cap is sent (or rather a send is attempted)\ + apply (simp add: split_def del: Collect_const + cong: call_ignore_cong split del: if_split) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply wpc + apply (rule ccorres_cond_true_seq) + apply (simp add: returnOk_liftE) + apply (rule ccorres_case_sum_liftE) + apply (rule ccorres_split_throws) + apply (rule_tac P=\ and P'="?S" in ccorres_break_return) + apply clarsimp + apply simp + apply vcg + apply (rule ccorres_cond_false_seq) + apply (simp) + + \ \case not diminish\ + apply (rule ccorres_split_nothrowE) + apply (rule unifyFailure_ccorres) + apply (ctac add: deriveCap_ccorres') + apply ceqv + apply (simp only: refl not_True_eq_False Collect_False ccorres_seq_simps) + apply csymbr + apply (rule_tac Q=\ and Q'=\ in ccorres_split_when_throwError_cond_break) + apply (clarsimp simp: cap_get_tag_isCap Collect_const_mem) + apply (rule_tac P=\ and P'="?S" in ccorres_break) + apply clarsimp + apply simp + apply (simp(no_asm) add: liftE_bindE split del: if_split) + apply (ctac add: cteInsert_ccorres) + apply (rule ccorres_case_sum_liftE) + apply csymbr + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac P="ccorresG rf_sr \ r xf Pre Pre' hs a" for r xf Pre Pre' hs a in rsubst) + apply (rule Cons.hyps) + apply (clarsimp simp: excaps_map_def dest!: drop_n_foo) + apply simp + apply simp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (rule cteInsert_assume_Null) + apply simp + apply (wp cteInsert_valid_pspace hoare_valid_ipc_buffer_ptr_typ_at' + hoare_vcg_const_Ball_lift cteInsert_cte_wp_at) + apply (vcg exspec=cteInsert_modifies) + apply vcg + apply (simp) + apply (rule ccorres_split_throws) + apply (rule_tac P=\ and P'="?S" in ccorres_break) + apply clarsimp + apply simp + apply vcg + apply wp + apply (simp cong: conj_cong) + apply (rule_tac Q' ="\rcap s. s \' rcap + \ (rcap\ NullCap \ cte_wp_at' (is_derived' (ctes_of s) (snd x) rcap \ cteCap) (snd x) s) + \ valid_pspace' s \ valid_ipc_buffer_ptr' rcv_buffer s \ length slots = 1 + \ \ is_the_ep rcap + \ (\x\set slots. cte_wp_at' (isNullCap \ cteCap) x s) + \ (\x\set xs'. s \' fst x + \ cte_wp_at' (\c. is_the_ep (cteCap c) \ fst x = cteCap c) (snd x) s + \ cte_wp_at' (\c. fst x \ NullCap \ stable_masked (fst x) (cteCap c)) (snd x) s)" + in hoare_strengthen_postE_R) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_splits valid_pspace_canonical' + is_derived_capMasterCap image_def) + apply (clarsimp split:if_splits) + apply (rule conjI) + apply clarsimp+ + apply (rule conjI) + apply (drule(1) bspec)+ + apply (rule conjI | clarsimp)+ + apply (clarsimp simp:is_the_ep_def isCap_simps stable_masked_def) + apply (drule(1) bspec)+ + apply (rule conjI | clarsimp)+ + apply (clarsimp simp:is_the_ep_def stable_masked_def split:if_splits)+ + apply (case_tac "a = cteCap cteb",clarsimp) + apply (simp add:maskedAsFull_def split:if_splits) + apply (simp add:maskedAsFull_again) + apply (wp deriveCap_derived is_the_ep_deriveCap) + apply (vcg exspec=deriveCap_modifies) + + \ \remaining non ccorres subgoals\ + apply (clarsimp simp: Collect_const_mem if_1_0_0 + split del: if_split) + apply (rule_tac Q'="\\ret__int = from_bool (cap_get_tag cap' = scast cap_endpoint_cap + \ ep_Ptr (capEPPtr_CL (cap_endpoint_cap_lift cap')) = option_to_ptr ep) + \ n' = of_nat n \ ((slots \ [] \ isEndpointCap (fst x) \ is_the_ep (cap_to_H (the (cap_lift cap')))) + \ ccap_relation (fst x) cap' ) + \ (isEndpointCap (fst x) \ capEPPtr (fst x) \ 0)\ + \ precond n mi slots" + in conseqPost[OF _ _ order_refl]) + apply vcg + apply (rule subsetI) + apply (clarsimp simp: word_of_nat_less from_bool_0 precond_def + cap_get_tag_isCap unat_of_nat) + apply (rule conjI) + apply (clarsimp simp: cap_get_tag_EndpointCap cap_get_tag_isCap[symmetric] + ep_cap_not_null) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (simp add: message_info_to_H_def word_ao_dist) + apply (fold shiftl_1)[1] + apply (subst scast_2n_eq, simp) + apply (subst and_mask_eq_iff_shiftr_0[THEN iffD2], + subst shiftl_shiftr2, simp, simp) + apply (simp add: seL4_MessageInfo_lift_def word_bw_assocs + word_sle_def t2n_mask_eq_if) + apply (rule conjI) + apply (clarsimp simp: ccap_rights_relation_def cap_rights_to_H_def + allRights_def excaps_map_def split_def + dest!: drop_n_foo interpret_excaps_eq) + apply (clarsimp simp:from_bool_def split:bool.splits) + apply (case_tac "isEndpointCap (fst x)") + apply (clarsimp simp: cap_get_tag_EndpointCap ep_cap_not_null cap_get_tag_isCap[symmetric]) + apply (clarsimp simp: option_to_ptr_def option_to_0_def split:option.splits) + apply (clarsimp simp: cap_get_tag_EndpointCap ep_cap_not_null cap_get_tag_isCap[symmetric]) + apply (case_tac "ccap_relation (fst x) cap'") + apply (simp add:ccap_relation_lift) + apply (drule(1) ccap_relation_inject) + apply (drule sym[where t = "fst x"]) + apply (clarsimp simp:isCap_simps) + apply (clarsimp simp: is_the_ep_def ccap_relation_lift isCap_simps) + apply (clarsimp simp: option_to_ptr_def option_to_0_def split:option.splits) + apply (clarsimp simp:option_to_ptr_def option_to_0_def) + apply (case_tac "isEndpointCap (fst x)") + apply (clarsimp simp: isCap_simps) + apply (drule_tac acap = acap in ccap_relation_inject) + apply assumption + apply clarsimp + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 split:option.splits) + apply (clarsimp simp: cap_endpoint_cap_lift_def option_to_ptr_def + option_to_0_def cap_to_H_def Let_def split:cap_CL.splits split:if_splits) + apply clarsimp + apply (simp only:badge_derived_mask capASID_mask cap_asid_base_mask' + cap_vptr_mask' maskCap_valid mask_right_eq_null) + apply (simp only:is_the_ep_fold relative_fold) + apply (clarsimp simp:Collect_const_mem if_1_0_0 + split del:if_split) + apply (rule conseqPre, vcg, clarsimp simp: Collect_const_mem) + apply (clarsimp simp: if_1_0_0 Collect_const_mem + trans[OF eq_commute from_bool_eq_if] + from_bool_0 + split del: if_split simp del: Collect_const) + apply vcg + apply (simp only:is_the_ep_fold) + apply (clarsimp simp:Collect_const_mem if_1_0_0 + split del:if_split) + apply (rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (clarsimp split del: if_split + simp add: Collect_const[symmetric] precond_def + simp del: Collect_const) + apply (rule HoarePartial.Seq[rotated] HoarePartial.Cond[OF order_refl] + HoarePartial.Basic[OF order_refl] HoarePartial.Skip[OF order_refl] + HoarePartial.Guard[OF order_refl])+ + apply (simp only:is_the_ep_fold) + apply (clarsimp simp:Collect_const_mem if_1_0_0 + split del:if_split) + apply (rule conseqPre, vcg, rule subsetI, clarsimp) + apply (simp only:is_the_ep_fold) + apply (clarsimp simp: Collect_const_mem seL4_MsgExtraCapBits_def + word_sle_def if_1_0_0 precond_def + msg_max_length_def max_ipc_words word_of_nat_less + excaps_map_def unat_of_nat valid_pspace'_def + cte_wp_at_ctes_of + dest!: drop_n_foo interpret_excaps_eq) + apply (frule(1) ctes_of_valid') + apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (clarsimp simp: typ_heap_simps' split_def is_the_ep_maskCapRights) + apply (frule ccte_relation_ccap_relation) + apply (intro conjI impI) + apply (intro allI impI) + apply clarsimp + apply fastforce + apply (subgoal_tac "fst x = cteCap cte",simp) + apply clarsimp + apply (elim disjE) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) + apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) + apply (clarsimp simp:valid_cap_simps' isCap_simps) + apply (subgoal_tac "slots \ []") + apply simp + apply clarsimp + apply (elim disjE) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) + apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) + apply (clarsimp dest!:ccap_relation_lift simp:cap_get_tag_isCap is_the_ep_def) + apply (clarsimp simp:valid_cap_simps' isCap_simps) + apply (intro exI conjI,assumption) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 + isCap_simps valid_cap_simps')+ + done +qed + +lemma cte_wp_at_imp_consequent': + "cte_wp_at' Q p s \ cte_wp_at' (\cte. P cte \ Q cte) p s" + by (clarsimp simp: cte_wp_at_ctes_of) + +lemma lookupExtraCaps_srcs2: + "\\\ lookupExtraCaps t buf mi \\caps s. \x \ set caps. cte_wp_at' + (\cte. cteCap cte = fst x) (snd x) s\,-" + apply (simp add: lookupExtraCaps_def lookupCapAndSlot_def + split_def lookupSlotForThread_def + getSlotCap_def) + apply (wp mapME_set[where R=\] getCTE_wp' + | simp add: cte_wp_at_ctes_of + | wp (once) hoare_drop_imps + | (rule hoare_strengthen_post [OF hoare_TrueI], rule allI, rule impI, rule TrueI))+ + done + +lemma transferCaps_ccorres [corres]: + notes if_cong[cong] + notes extra_sle_sless_unfolds[simp del] + shows + "ccorres (\r r'. r = message_info_to_H r') ret__struct_seL4_MessageInfo_C_' + (valid_pspace' and tcb_at' receiver + and (case_option \ valid_ipc_buffer_ptr') receiveBuffer + and (excaps_in_mem caps \ ctes_of) + and K (length caps \ 3) + and K (ep \ Some 0) + and K (receiveBuffer \ Some 0) + and K (unat (msgExtraCaps mi) \ 3)) + (UNIV \ \interpret_excaps (\current_extra_caps) = excaps_map caps\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \ mi = message_info_to_H \info\ + \ \\receiveBuffer = Ptr (option_to_0 receiveBuffer)\ + \ \\endpoint = option_to_ptr ep\) [] + (transferCaps mi caps ep receiver receiveBuffer) + (Call transferCaps_'proc)" (is "ccorres _ _ ?P _ _ _ _") + apply (unfold K_def, intro ccorres_gen_asm) + apply (cinit lift: current_extra_caps_' receiver_' info_' receiveBuffer_' endpoint_' + simp: getThreadCSpaceRoot_def locateSlot_conv whileAnno_def) + apply csymbr+ + apply (rule_tac P="?P" and P'="{s. info_' s = info}" in ccorres_inst) + apply (cases "receiveBuffer = None") + apply (clarsimp simp: option_to_0_def getReceiveSlots_def + simp del: Collect_const) + apply (rule ccorres_guard_imp2) + apply (simp (no_asm)) + apply (rule_tac R'=UNIV in ccorres_split_throws [OF ccorres_return_C], simp_all)[1] + apply vcg + apply simp + apply (simp add: message_info_to_H_def word_sless_def word_sle_def) + apply (cases "caps = []") + apply (clarsimp simp: interpret_excaps_test_null excaps_map_def + simp del: Collect_const not_None_eq) + apply (erule notE, rule ccorres_guard_imp2) + apply (simp (no_asm)) + apply (rule ccorres_symb_exec_l) + apply (rule_tac R'=UNIV in ccorres_split_throws [OF ccorres_return_C], simp_all)[1] + apply vcg + apply simp + apply ((wp empty_fail_getReceiveSlots)+)[3] + apply (simp add: message_info_to_H_def word_sless_def word_sle_def) + apply (simp add: option_to_0_def ccorres_cond_iffs + interpret_excaps_test_null excaps_map_def + del: Collect_const + cong: call_ignore_cong) + apply (elim exE) + apply (clarsimp simp: Collect_const[symmetric] Collect_False + signed_shift_guard_simpler_64 + simp del: Collect_const + cong: call_ignore_cong) + apply (rule ccorres_guard_imp2) + apply (ctac add: getReceiveSlots_ccorres) + apply (elim conjE) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_add_return2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_Catch) + apply (rule_tac caps=caps and caps'=current_extra_caps in transferCapsLoop_ccorres, simp+) + apply (simp add: excaps_map_def) + apply ceqv + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: message_info_to_H_def split: if_split) + apply (erule notE, (rule sym)?, rule less_mask_eq) + apply (simp add: word_leq_minus_one_le) + apply (subgoal_tac "rv \ [0]") + apply simp + apply vcg + apply clarsimp + apply (rule conseqPre, vcg, clarsimp) + apply (simp add: o_def pred_conj_def) + apply (strengthen cte_wp_at_imp_consequent') + apply wp + apply (simp only: mem_simps simp_thms split: if_split) + apply (vcg exspec=getReceiveSlots_modifies) + apply (clarsimp simp: message_info_to_H_def excaps_in_mem_def + slotcap_in_mem_def split_def cte_wp_at_ctes_of + word_sless_def word_sle_def) + apply fastforce + apply clarsimp + done + +definition + mi_from_H :: "Types_H.message_info \ seL4_MessageInfo_CL" +where + "mi_from_H mi \ + \ label_CL = msgLabel mi, + capsUnwrapped_CL = msgCapsUnwrapped mi, + extraCaps_CL = msgExtraCaps mi, + length_CL = msgLength mi \" + +lemma ccorres_add_returnOk2: + "ccorres_underlying rf_sr G r xf arrel axf P P' hs (a >>=E returnOk) c + \ ccorres_underlying rf_sr G r xf arrel axf P P' hs a c" + by (rule ccorres_add_returnOk) + +lemma capFaultOnFailure_ccorres: + "ccorres (f \ r) xf P P' hs a c + \ ccorres ((\x y z. \w. x = CapFault addr b w \ f w y z) \ r) + xf P P' hs + (capFaultOnFailure addr b a) c" + apply (simp add: capFault_injection injection_handler_liftM) + apply (erule ccorres_rel_imp) + apply (auto split: sum.split) + done + +definition + "cfault_rel2 \ \ft exnvar err. exnvar = (scast EXCEPTION_FAULT :: machine_word) \ + cfault_rel (Some ft) (errfault err) (errlookup_fault err)" + +lemma takeWhile_eq: + "\ \m. \ m < length ys \ \ P (xs ! m); + length ys < length xs \ \ P (xs ! length ys); + length ys \ length xs; + \m. m < length ys \ xs ! m = ys ! m \ + \ takeWhile P xs = ys" +proof (induct xs arbitrary: n ys) + case Nil + thus ?case by simp +next + case (Cons x xs' ys') + have P: "\v m. \ (x # xs') ! m = v; m < length ys' \ + \ P v" + using Cons.prems by clarsimp + show ?case using Cons.prems(2-3) + apply simp + apply (cases ys') + apply simp + apply (subst P[where m1=0]) + apply simp+ + apply (rule conjI) + apply (cut_tac m1=0 in Cons.prems(4), simp+) + apply (rule Cons.hyps) + apply (rule_tac m1="Suc m" in P, simp+) + apply (cut_tac m1="Suc m" in Cons.prems(4), simp+) + done +qed + +lemma ccorres_sequenceE_while': + fixes axf :: "globals myvars \ 'e" shows + "\\ys. length ys < length xs \ + ccorres_underlying sr \ (inr_rrel (\rv rv'. r' (ys @ [rv]) rv')) xf' + (inl_rrel arrel) axf + (F (length ys)) (Q \ {s. i_' s = of_nat (length ys) \ r' ys (xf' s)}) hs + (xs ! length ys) body; + \n. P n = (n < of_nat (length xs)); + \s. s \ Q \ \\\<^bsub>/UNIV\<^esub> {s} body (Q \ {t. i_' t = i_' s}),UNIV; + \n. n < length xs \ \F n\ xs ! n \\_. F (Suc n)\, -; + length xs < 2 ^ word_bits; + \s f. xf' (i_'_update f s) = xf' s + \ ((i_'_update f s \ Q) = (s \ Q)) + \ (\s'. ((s', i_'_update f s) \ sr) = ((s', s) \ sr)) \ + \ ccorres_underlying sr \ (inr_rrel (\rv (i', rv'). r' rv rv' \ i' = of_nat (length xs))) + (\s. (i_' s, xf' s)) arrel axf + (F 0) (Q \ {s. r' [] (xf' s)}) hs + (sequenceE xs) + (Basic (\s. i_'_update (\_. 0) s) ;; + While {s. P (i_' s)} (body;; + Basic (\s. i_'_update (\_. i_' s + 1) s)))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply simp + done + +lemma lookupExtraCaps_ccorres: + notes if_cong[cong] nat_min_simps [simp del] + shows + "ccorres + ((\ft _ err. + cfault_rel (Some ft) (errfault err) (errlookup_fault err)) + \ (\xs ys. interpret_excaps ys = excaps_map xs)) + (liftxf errstate fst snd + (\s. (ret__unsigned_long_' s, current_extra_caps_' (globals s)))) + (valid_pspace' and tcb_at' thread + and (case buffer of Some x\ valid_ipc_buffer_ptr' x | _ \ \) + and (\s. unat (msgExtraCaps info) <= 3)) + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread} + \ {s. bufferPtr_' s = option_to_ptr buffer} + \ {s. seL4_MessageInfo_lift (info_' s) = mi_from_H info + }) [] + (lookupExtraCaps thread buffer info) (Call lookupExtraCaps_'proc)" +proof - + let ?curr = "\s. current_extra_caps_' (globals s)" + let ?EXCNONE = "{s. ret__unsigned_long_' s = scast EXCEPTION_NONE}" + let ?interpret = "\v n. take n (array_to_list (excaprefs_C v))" + note empty_fail_cond[simp] + show ?thesis + apply (rule ccorres_gen_asm)+ + apply (cinit(no_subst_asm) lift: thread_' bufferPtr_' info_' simp: whileAnno_def) + apply (clarsimp simp add: getExtraCPtrs_def lookupCapAndSlot_def + capFault_bindE + simp del: Collect_const) + apply (simp add: liftE_bindE del: Collect_const) + apply wpc + apply (rename_tac word1 word2 word3 word4) + apply (simp del: Collect_const) + apply wpc + apply (simp add: option_to_ptr_def option_to_0_def) + apply (rule ccorres_rhs_assoc2, rule ccorres_split_throws) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (simp add: excaps_map_def) + apply (subst interpret_excaps_test_null[where n=0, simplified, symmetric]) + apply (simp add: word_sle_def word_sless_def) + apply vcg + apply (simp add: id_def[symmetric] del: Collect_const) + apply (rule ccorres_symb_exec_r) + apply csymbr + apply csymbr + apply (rename_tac "lngth") + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_add_returnOk2, + rule ccorres_splitE_novcg) + apply (rule_tac xf'="?curr" + and r'="\xs v. excaps_map xs = ?interpret v (length xs)" + and Q="UNIV" + and F="\n s. valid_pspace' s \ tcb_at' thread s \ + (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ + (\m < length xs. user_word_at (xs ! m) + (x2 + (of_nat m + (msgMaxLength + 2)) * 8) s)" + in ccorres_sequenceE_while') + apply (simp add: split_def) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) + apply (ctac add: capFaultOnFailure_ccorres + [OF lookupSlotForThread_ccorres']) + apply (rule_tac P="is_aligned rv 5" in ccorres_gen_asm) + apply (simp add: ccorres_cond_iffs liftE_bindE) + apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) + apply (rule_tac P'="UNIV \ {s. excaps_map ys + = ?interpret (?curr s) (length ys)} + \ {s. i_' s = of_nat (length ys)}" + in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (simp add: returnOk_liftE) + apply (clarsimp simp: Bex_def in_monad) + apply (clarsimp simp: excaps_map_def array_to_list_def + lookupSlot_raw_rel_def) + apply (subgoal_tac "length ys < 3") + apply (simp add: take_Suc_conv_app_nth take_map + unat_of_nat64[unfolded word_bits_conv] + word_of_nat_less) + apply (simp add: word_less_nat_alt) + apply wp+ + apply (clarsimp simp: ccorres_cond_iffs) + apply (rule_tac P= \ + and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ + rv' = (xs ! length ys)}" + in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def) + apply (frule lookup_failure_rel_fault_lift, assumption) + apply (clarsimp simp: cfault_rel2_def) + apply (clarsimp simp: cfault_rel_def) + apply (simp add: seL4_Fault_CapFault_lift) + apply (clarsimp simp: is_cap_fault_def) + apply wp + apply (rule hoare_strengthen_postE_R, rule lsft_real_cte) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps') + apply (vcg exspec=lookupSlot_modifies) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: valid_pspace'_def) + apply (drule spec, drule(1) mp) + apply (drule(1) user_word_at_cross_over [OF _ _ refl]) + apply (simp add: field_simps msgMaxLength_def + seL4_MsgLengthBits_def + seL4_MsgMaxLength_def + msgLengthBits_def) + apply (subst valid_ipc_buffer_ptr_array, simp+, + simp add: msg_align_bits unat_word_ariths unat_of_nat, + simp add: msg_align_bits unat_word_ariths unat_of_nat)+ + apply clarsimp + apply simp + apply (rule conseqPre) + apply (vcg exspec=lookupSlot_modifies) + apply clarsimp + apply (simp add: split_def) + apply (rule hoare_pre, wp) + apply simp + apply (simp add: word_less_nat_alt word_bits_def) + apply simp + apply (rule ceqv_tuple2) + apply ceqv + apply ceqv + apply (simp del: Collect_const) + apply (rule_tac P'="{s. snd rv'=?curr s}" + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" + in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def + seL4_MsgExtraCapBits_def) + apply (simp add: word_sle_def interpret_excaps_def + excaps_map_def) + apply (rule conjI) + apply (clarsimp simp: array_to_list_def) + apply (rule takeWhile_eq, simp_all)[1] + apply (drule_tac f="\xs. xs ! m" in arg_cong) + apply (clarsimp simp: split_def NULL_ptr_val[symmetric]) + apply (clarsimp simp: array_to_list_def) + apply (rule takeWhile_eq, simp_all)[1] + apply (drule_tac f="\xs. xs ! m" in arg_cong) + apply (clarsimp simp: split_def NULL_ptr_val[symmetric]) + apply (simp add: word_less_nat_alt) + apply simp + apply (simp add: mapME_def[symmetric] split_def + liftE_bindE[symmetric]) + apply (wp mapME_length mapME_set | simp)+ + apply (rule_tac Q'="\rv. no_0_obj' and real_cte_at' rv" + in hoare_strengthen_postE_R, wp lsft_real_cte) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wpsimp)+ + apply (clarsimp simp: guard_is_UNIV_def + elim!: inl_inrE) + apply (rule hoare_pre, (wp mapM_wp' | simp)+) + apply (rule mapM_loadWordUser_user_words_at) + apply simp + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp add: valid_pspace'_def) + apply (simp add: upto_enum_step_def + split: if_split_asm) + apply (simp add: word_size upto_enum_word field_simps wordSize_def' + del: upt.simps) + apply (clarsimp simp: excaps_map_def option_to_ptr_def option_to_0_def + valid_ipc_buffer_ptr'_def) + done +qed + +lemma interpret_excaps_empty: + "(interpret_excaps caps = []) = (index (excaprefs_C caps) 0 = NULL)" + by (simp add: interpret_excaps_test_null) + +lemma getSlotCap_slotcap_in_mem: + "\\\ getSlotCap slot \\cap s. slotcap_in_mem cap slot (ctes_of s)\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of slotcap_in_mem_def) + done + +lemma lookupExtraCaps_excaps_in_mem[wp]: + "\\\ lookupExtraCaps thread buffer info \\rv s. excaps_in_mem rv (ctes_of s)\,-" + apply (simp add: excaps_in_mem_def lookupExtraCaps_def lookupCapAndSlot_def + split_def) + apply (wp mapME_set) + apply (wpsimp wp: getSlotCap_slotcap_in_mem)+ + done + +lemma doNormalTransfer_ccorres [corres]: + "ccorres dc xfdc + (valid_pspace' and cur_tcb' and tcb_at' sender + and tcb_at' receiver + and K (endpoint \ Some 0) + and (case_option \ valid_ipc_buffer_ptr' sendBuffer) + and (case_option \ valid_ipc_buffer_ptr' receiveBuffer)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\sendBuffer = Ptr (option_to_0 sendBuffer)\ + \ \\receiveBuffer = Ptr (option_to_0 receiveBuffer)\ + \ \canGrant = to_bool \canGrant\ + \ \\badge = badge\ + \ \\endpoint = option_to_ptr endpoint\) [] + (doNormalTransfer sender sendBuffer endpoint badge canGrant + receiver receiveBuffer) + (Call doNormalTransfer_'proc)" +proof - + have word_0_le_helper: + "\i :: sword32. \ i + \ 0 <=s i + 1" + apply (simp add: seL4_MsgExtraCapBits_def word_sle_msb_le + word_sless_msb_less msb_nth) + apply (clarsimp simp: word_eq_iff) + apply (drule bang_is_le) + apply (unat_arith; simp add: take_bit_nat_def) + done + + show ?thesis + apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' + canGrant_' badge_' endpoint_' + cong: call_ignore_cong) + apply (clarsimp cong: call_ignore_cong) + apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') + apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" + and r'="\c c'. interpret_excaps c' = excaps_map c" + in ccorres_split_nothrow_novcg) + apply (rule ccorres_if_lhs) + apply (simp add: catch_def to_bool_def ccorres_cond_iffs) + apply (rule_tac xf'="\s. (status_' s, current_extra_caps_' (globals s))" + and ef'=fst and vf'=snd and es=errstate + in ccorres_split_nothrow_case_sum) + apply (rule ccorres_call, rule lookupExtraCaps_ccorres, simp+) + apply (rule ceqv_tuple2, ceqv, ceqv) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip') + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def excaps_map_def interpret_excaps_empty + word_sless_def word_sle_def) + apply wp + apply simp + apply (vcg exspec=lookupExtraCaps_modifies) + apply (simp add: to_bool_def ccorres_cond_iffs) + apply (rule ccorres_return[where R=\ and R'=UNIV], vcg) + apply (clarsimp simp: excaps_map_def interpret_excaps_empty) + apply ceqv + apply csymbr + apply (ctac add: copyMRs_ccorres) + apply (ctac add: transferCaps_ccorres) + apply csymbr + apply (ctac(c_lines 2, no_vcg) add: setMessageInfo_ccorres) + apply ctac + apply wp + apply (clarsimp simp: AARCH64_H.badgeRegister_def AARCH64.badgeRegister_def + C_register_defs) + apply wp + apply (simp add: seL4_MessageInfo_lift_def message_info_to_H_def msgLengthBits_def) + apply (vcg exspec=transferCaps_modifies) + apply (wpsimp wp: hoare_case_option_wp) + apply clarsimp + apply (vcg exspec=copyMRs_modifies) + apply (wpsimp wp: lookupExtraCaps_length) + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) + apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def + msgLengthBits_def word_bw_assocs) + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp + | simp)+ + apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def + option_to_0_def option_to_ptr_def + seL4_MessageInfo_lift_def mi_from_H_def message_info_to_H_def + split: option.split) + done +qed + +lemma lookupIPCBuffer_not_Some_0: + "\\\ lookupIPCBuffer r t \\rv. K (rv \ Some 0)\" + apply (simp add: lookupIPCBuffer_def AARCH64_H.lookupIPCBuffer_def) + apply (wp hoare_TrueI haskell_assert_wp + | simp add: Let_def getThreadBufferSlot_def locateSlotTCB_def + | intro conjI impI | wpc)+ + done + +lemma pageBitsForSize_3 [simp]: + "3 \ pageBitsForSize sz" + by (cases sz, auto simp: bit_simps) + +lemma pbfs_msg_align_bits [simp]: + "msg_align_bits \ pageBitsForSize sz" + by (cases sz, auto simp: msg_align_bits bit_simps) + +lemma lookupIPCBuffer_aligned: + "\valid_objs'\ lookupIPCBuffer r t \\rv. K (case_option True (\x. is_aligned x msg_align_bits) rv)\" + apply (simp add: lookupIPCBuffer_def AARCH64_H.lookupIPCBuffer_def + getThreadBufferSlot_def locateSlot_conv + Let_def getSlotCap_def cong: if_cong) + apply (rule hoare_pre) + apply (wp getCTE_wp' threadGet_wp | wpc)+ + apply clarsimp + apply (drule (1) ctes_of_valid) + apply (drule (1) ko_at_valid_objs', simp add: projectKOs) + apply (clarsimp simp: isCap_simps valid_cap'_def capAligned_def valid_obj'_def valid_tcb'_def) + apply (auto elim: aligned_add_aligned intro: is_aligned_andI1) + done + + +lemma isArchPageCap_def2: + "\cap. isArchFrameCap cap = (isArchObjectCap cap \ isFrameCap (capCap cap))" + by (fastforce simp: isCap_simps) + + +lemma replyFromKernel_error_ccorres [corres]: + "ccorres dc xfdc (valid_pspace' and tcb_at' thread) + (UNIV \ \syscall_error_to_H \current_syscall_error + (lookup_fault_lift \current_lookup_fault) + = Some err\ + \ \\thread = tcb_ptr_to_ctcb_ptr thread\) hs + (replyFromKernel thread (msgFromSyscallError err)) + (Call replyFromKernel_error_'proc)" + apply (cinit lift: thread_') + apply clarsimp + apply wpc + apply (ctac add: lookupIPCBuffer_ccorres) + apply simp + apply ctac + apply (ctac add: setMRs_syscall_error_ccorres[where err=err]) + apply ((rule ccorres_Guard_Seq)+)? + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply (rule setMessageInfo_ccorres) + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=setMRs_syscall_error_modifies) + apply (wp hoare_case_option_wp) + apply (vcg exspec=setRegister_modifies) + apply simp + apply (wp lookupIPCBuffer_aligned_option_to_0) + apply (simp del: Collect_const) + apply (vcg exspec=lookupIPCBuffer_modifies) + apply (simp add: msgInfoRegister_def + Kernel_C.msgInfoRegister_def C_register_defs + AARCH64_H.badgeRegister_def AARCH64.badgeRegister_def + Kernel_C.badgeRegister_def AARCH64.capRegister_def + message_info_to_H_def valid_pspace_valid_objs') + apply (clarsimp simp: msgLengthBits_def msgFromSyscallError_def + syscall_error_to_H_def syscall_error_type_defs + mask_def option_to_ptr_def + split: if_split_asm) + done + +lemma fault_to_fault_tag_nonzero: + "fault_to_fault_tag f \ 0" + apply (case_tac f; simp add: seL4_Fault_tag_defs) + apply (rename_tac af) + apply (case_tac af; simp add: seL4_Fault_tag_defs) + done + +lemma doIPCTransfer_ccorres [corres]: + "ccorres dc xfdc + (valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' receiver + and K (receiver \ sender \ endpoint \ Some 0)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \canGrant = to_bool \grant\ + \ \\badge = badge\ + \ \\endpoint = option_to_ptr endpoint\) [] + (doIPCTransfer sender endpoint badge canGrant receiver) + (Call doIPCTransfer_'proc)" + apply (cinit lift: sender_' receiver_' grant_' badge_' endpoint_') + apply (rule_tac xf'="ret__ptr_to_unsigned_long_'" + in ccorres_split_nothrow_call_novcg) + apply (rule lookupIPCBuffer_ccorres) + apply simp_all[3] + apply ceqv + apply csymbr + apply (rule ccorres_pre_threadGet) + apply (rename_tac fault) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="case_option (scast seL4_Fault_NullFault) fault_to_fault_tag fault" + and xf'=ret__unsigned_longlong_' + and R="\s. \t. ko_at' t sender s \ tcbFault t = fault" + in ccorres_symb_exec_r_known_rv_UNIV [where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (fastforce simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm option.split) + apply ceqv + apply wpc + apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_univ_iff) + apply (rule ccorres_rhs_assoc) + apply (rule_tac xf'="ret__ptr_to_unsigned_long_'" + in ccorres_split_nothrow_call_novcg) + apply (rule lookupIPCBuffer_ccorres) + apply simp_all[3] + apply ceqv + apply csymbr + apply ctac + apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) + apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs + fault_to_fault_tag_nonzero) + apply ctac + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) + apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender + and tcb_at' receiver and K (rv \ Some 0) + and (case_option \ valid_ipc_buffer_ptr' rv) + and K (receiver \ sender \ endpoint \ Some 0)" + in hoare_post_imp) + apply (auto simp: valid_ipc_buffer_ptr'_def option_to_0_def + split: option.splits)[1] + apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) + apply auto + done + +lemma fault_case_absorb_bind: + "(do x \ f; case_fault (p x) (q x) (r x) (s x) ft od) + = case_fault (\a b. f >>= (\x. p x a b)) (\a b c. f >>= (\x. q x a b c)) + (\a. f >>= (\x. r x a)) (\a. f >>= (\x. s x a)) ft" + by (simp split: fault.split) + +lemma length_exceptionMessage: + "length AARCH64_H.exceptionMessage = unat n_exceptionMessage" + by (simp add: AARCH64_H.exceptionMessage_def AARCH64.exceptionMessage_def n_exceptionMessage_def) + +lemma Arch_getSanitiseRegisterInfo_ccorres: + "ccorres ((=) \ from_bool) ret__unsigned_long_' + (tcb_at' r and no_0_obj' and valid_objs') + (\ \thread = tcb_ptr_to_ctcb_ptr r\) hs + (getSanitiseRegisterInfo r) + (Call Arch_getSanitiseRegisterInfo_'proc)" + apply (cinit' lift: thread_' simp: getSanitiseRegisterInfo_def[folded archThreadGet_def]) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_pre_archThreadGet) + apply (rule_tac P="\s. v \ Some 0" in ccorres_cross_over_guard) + apply (rule ccorres_return_C, simp+) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def) + apply (rule conjI) + apply clarsimp + apply (drule (1) valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def valid_arch_tcb'_def) + apply (clarsimp simp: typ_heap_simps) + apply (case_tac "atcbVCPUPtr (tcbArch tcb) \ None") + apply (clarsimp split: if_splits)+ + done + +lemma copyMRsFaultReply_ccorres_exception: + "ccorres dc xfdc + (valid_pspace' and tcb_at' s and tcb_at' r + and obj_at' (\t. tcbFault t = Some f) r + and K (r \ s) + and K (len \ unat n_exceptionMessage)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr s\ + \ \\receiver = tcb_ptr_to_ctcb_ptr r\ + \ \\id___anonymous_enum = MessageID_Exception \ + \ \\length___unsigned_long = of_nat len \) hs + (do t \ getSanitiseRegisterInfo r; + zipWithM_x (\rs rd. do v \ asUser s (getRegister rs); + asUser r (setRegister rd (AARCH64_H.sanitiseRegister t rd v)) + od) + AARCH64_H.msgRegisters (take len AARCH64_H.exceptionMessage) + od) + (Call copyMRsFaultReply_'proc)" +proof - + show ?thesis + apply (unfold K_def, rule ccorres_gen_asm) using [[goals_limit=1]] + apply (cinit' lift: sender_' receiver_' + id___anonymous_enum_' + length___unsigned_long_' + simp: whileAnno_def) + apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) + apply (rule ccorres_rhs_assoc2) + apply (simp add: MessageID_Exception_def) + apply ccorres_rewrite + apply (rule ccorres_add_return2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_zipWithM_x_while) + apply clarsimp + apply (intro ccorres_rhs_assoc) + apply (rule ccorres_symb_exec_r) + apply ctac + apply (rule ccorres_symb_exec_r) + apply ctac + apply (vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (auto simp: sanitiseRegister_def)[1] + apply wp + apply clarsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conjI, simp add: AARCH64_H.exceptionMessage_def + AARCH64.exceptionMessage_def word_of_nat_less) + apply (simp add: msgRegisters_ccorres n_msgRegisters_def length_msgRegisters + AARCH64_H.exceptionMessage_def AARCH64.exceptionMessage_def + unat_of_nat exceptionMessage_ccorres[symmetric,simplified MessageID_Exception_def,simplified] + n_exceptionMessage_def length_exceptionMessage sanitiseRegister_def Let_def) + apply (auto simp add: word_less_nat_alt unat_of_nat)[1] + apply (rule conseqPre, vcg) + apply (clarsimp simp: word_of_nat_less AARCH64_H.exceptionMessage_def + AARCH64.exceptionMessage_def) + apply (simp add: min_def length_msgRegisters) + apply (clarsimp simp: min_def n_exceptionMessage_def + AARCH64_H.exceptionMessage_def + AARCH64.exceptionMessage_def + length_msgRegisters n_msgRegisters_def + message_info_to_H_def word_of_nat_less + split: if_split) + apply (fastforce dest!: le_antisym) + apply clarsimp + apply (vcg spec=TrueI) + apply clarsimp + apply wp + apply simp+ + apply (clarsimp simp: AARCH64_H.exceptionMessage_def + AARCH64.exceptionMessage_def + word_bits_def) + apply unat_arith + apply ceqv + apply (simp add: length_exceptionMessage + length_msgRegisters + n_exceptionMessage_def + msgMaxLength_def + n_msgRegisters_def + of_nat_less_iff) + apply ccorres_rewrite + apply (rule ccorres_return_Skip) + apply (wp mapM_wp') + apply clarsimp+ + apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def + Collect_const_mem + split: if_split) + apply wp + apply (auto) + done +qed + +lemma valid_drop_case: "\ \P\ f \\rv s. P' rv s\ \ + \ \P\ f \\rv s. case rv of None \ True | Some x \ P' rv s\" + apply (simp only: valid_def Ball_def split: prod.split) + apply (rule allI impI)+ + apply (case_tac x1) + apply simp+ + done + +lemma copyMRsFaultReply_ccorres_syscall: + fixes word_size :: "'a::len" + shows "ccorres dc xfdc + (valid_pspace' and tcb_at' s + and tcb_at' r + and obj_at' (\t. tcbFault t = Some f) r + and K (r \ s) + and K (len \ unat n_syscallMessage)) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr s\ + \ \\receiver = tcb_ptr_to_ctcb_ptr r\ + \ \\id___anonymous_enum = MessageID_Syscall \ + \ \\length___unsigned_long = of_nat len \) hs + (do t \ getSanitiseRegisterInfo r; + a \ zipWithM_x (\rs rd. do v \ asUser s (getRegister rs); + asUser r (setRegister rd (AARCH64_H.sanitiseRegister t rd v)) + od) + AARCH64_H.msgRegisters (take len AARCH64_H.syscallMessage); + sendBuf \ lookupIPCBuffer False s; + case sendBuf of None \ return () + | Some bufferPtr \ + zipWithM_x (\i rd. do v \ loadWordUser (bufferPtr + i * 8); + asUser r (setRegister rd (AARCH64_H.sanitiseRegister t rd v)) + od) + [scast n_msgRegisters + 1.e.scast n_syscallMessage] + (drop (unat (scast n_msgRegisters :: machine_word)) + (take len AARCH64_H.syscallMessage)) + od) + (Call copyMRsFaultReply_'proc)" + proof - + let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some f) s" + note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV + [where xf'=ret__unsigned_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] + show ?thesis + apply (unfold K_def, rule ccorres_gen_asm) using [[goals_limit=1]] + apply (cinit' lift: sender_' receiver_' + id___anonymous_enum_' + length___unsigned_long_' + simp: whileAnno_def) + apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) + apply (rule ccorres_rhs_assoc2) + apply (simp add: MessageID_Syscall_def) + apply ccorres_rewrite + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_zipWithM_x_while) + apply clarsimp + apply (intro ccorres_rhs_assoc) + apply (rule ccorres_symb_exec_r) + apply ctac + apply (rule ccorres_symb_exec_r) + apply ctac + apply (vcg) + apply (rule conseqPre, vcg) + apply fastforce + apply wp + apply vcg + apply vcg + apply (rule conjI, simp add: AARCH64_H.syscallMessage_def + AARCH64.syscallMessage_def word_of_nat_less + unat_of_nat msgRegisters_ccorres n_msgRegisters_def + length_msgRegisters) + apply (simp add: msgRegisters_ccorres n_msgRegisters_def length_msgRegisters unat_of_nat + syscallMessage_ccorres[symmetric,simplified MessageID_Syscall_def,simplified] + n_syscallMessage_def length_syscallMessage sanitiseRegister_def) + apply (auto simp add: word_less_nat_alt unat_of_nat)[1] + apply (rule conseqPre, vcg) + apply (clarsimp simp: word_of_nat_less syscallMessage_unfold length_msgRegisters + n_syscallMessage_def n_msgRegisters_def) + apply (simp add: min_def length_msgRegisters) + apply (clarsimp simp: min_def n_syscallMessage_def + length_msgRegisters n_msgRegisters_def + length_syscallMessage + message_info_to_H_def word_of_nat_less + split: if_split) + apply (simp add: word_less_nat_alt unat_of_nat not_le) + apply clarsimp + apply (vcg spec=TrueI) + apply clarsimp + apply wp + apply simp+ + apply (clarsimp simp: length_syscallMessage + length_msgRegisters + n_msgRegisters_def n_syscallMessage_def + word_bits_def min_def + split: if_split) + apply ceqv + apply (rule_tac P'="if 4 < len then _ else _" in ccorres_inst) + apply (cases "4 < len" ; simp) + apply (clarsimp simp: unat_ucast_less_no_overflow n_syscallMessage_def + length_syscallMessage msgRegisters_unfold + word_of_nat_less unat_of_nat unat_less_helper) + apply ccorres_rewrite + apply (ctac(no_vcg)) + apply (rename_tac sb sb') + apply wpc + apply (simp add: option_to_0_def ccorres_cond_iffs option_to_ptr_def) + apply (rule ccorres_return_Skip') + apply (rule_tac P="sb \ Some 0" in ccorres_gen_asm) + apply (rule_tac P="case_option True (\x. is_aligned x msg_align_bits) sb" + in ccorres_gen_asm) + apply (simp add: option_to_0_def option_to_ptr_def) + apply (subgoal_tac "sb'\ NULL") prefer 2 + apply clarsimp + apply (simp add: ccorres_cond_iffs) + apply (subst ccorres_seq_skip' [symmetric]) + apply (rule_tac r'="\rv rv'. rv' = of_nat (unat n_msgRegisters) + _" in ccorres_rel_imp) + apply (drule_tac s="sb" in sym) + apply (simp only: zipWithM_x_mapM_x) + apply ccorres_rewrite + apply (rule_tac F="\_. valid_pspace' + and (case sb of None \ \ + | Some x \ valid_ipc_buffer_ptr' x) + and tcb_at' r" + in ccorres_mapM_x_while') + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_pre_loadWordUser) + apply (intro ccorres_rhs_assoc) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_move_array_assertion_ipc_buffer + ccorres_Guard_Seq[where S="{s. h_t_valid (htd s) c_guard (ptr s)}" for ptr htd])+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_symb_exec_r) + apply ctac + apply (vcg) + apply (rule conseqPre, vcg) + apply fastforce + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply clarsimp + apply (subst aligned_add_aligned, assumption) + apply (rule is_aligned_mult_triv2[where n=3, simplified]) + apply (simp add: msg_align_bits) + apply (simp only: n_msgRegisters_def) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + word_unat.Rep_inverse[of "scast _ :: 'a word"] + msgRegisters_ccorres[symmetric] + length_msgRegisters[symmetric] + syscallMessage_ccorres[symmetric] + length_msgRegisters length_syscallMessage + syscallMessage_ccorres[symmetric, simplified MessageID_Syscall_def, simplified] + unat_of_nat64 word_bits_def + MessageID_Syscall_def + min_def message_info_to_H_def + upto_enum_def typ_heap_simps' + unat_add_lem[THEN iffD1] Let_def + msg_align_bits sanitiseRegister_def + simp del: upt_rec_numeral cong: if_cong register.case_cong, + simp_all add: word_less_nat_alt unat_add_lem[THEN iffD1] unat_of_nat)[1] + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + msgRegisters_ccorres + syscallMessage_ccorres + length_syscallMessage length_msgRegisters + message_info_to_H_def min_def + split: if_split) + apply (fastforce dest!: le_antisym) + apply (vcg spec=TrueI) + apply clarsimp + apply (simp add: split_def) + apply (wp hoare_case_option_wp) + apply (fastforce elim: aligned_add_aligned + intro: is_aligned_mult_triv2 [where n=3,simplified] + simp: word_bits_def msg_align_bits) + apply (clarsimp simp: msgRegisters_unfold + n_msgRegisters_def + word_bits_def not_less) + apply (simp add: n_syscallMessage_def) + apply simp + apply (subst option.split[symmetric,where P=id, simplified]) + apply (rule valid_drop_case) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) + apply (simp add: length_syscallMessage + length_msgRegisters + n_syscallMessage_def + msgMaxLength_def + n_msgRegisters_def + of_nat_less_iff) + apply (rule_tac P'="{s. i_' s = of_nat len}" in ccorres_inst) + apply ccorres_rewrite + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + apply (case_tac rva ; clarsimp) + apply (rule ccorres_return_Skip)+ + apply (wp mapM_x_wp_inv user_getreg_inv' + | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ + apply (cases "4 < len") + apply ((fastforce simp: guard_is_UNIV_def + msgRegisters_unfold + syscallMessage_unfold + n_syscallMessage_def + n_msgRegisters_def + intro: obj_tcb_at')+)[2] + apply wp + apply auto + done +qed + +lemma handleArchFaultReply_corres: + "ccorres (\rv rv'. rv = to_bool rv') ret__unsigned_long_' + (valid_pspace' and tcb_at' sender + and tcb_at' receiver + and K (f = ArchFault af) + and K (sender \ receiver)) + (UNIV \ \ \faultType = fault_to_fault_tag f \ + \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\) + hs + (handleArchFaultReply' af sender receiver msg) + (Call Arch_handleFaultReply_'proc)" + apply (unfold K_def) + apply (rule ccorres_gen_asm)+ + apply (cinit lift: faultType_' sender_' receiver_') + apply (clarsimp simp: bind_assoc seL4_Fault_tag_defs ccorres_cond_iffs Let_def + split del: if_split) + apply (wpc ; clarsimp simp: seL4_Fault_tag_defs ; ccorres_rewrite) + (* same thing four times, could probably be cleaner *) + (* VMFault *) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_stateAssert) + apply wpc + apply (clarsimp simp: ccorres_cond_iffs) + apply (rule ccorres_return_C) + apply simp+ + apply (rule ccorres_symb_exec_l) + apply (ctac add: ccorres_return_C) + apply (wpsimp wp: mapM_wp' empty_fail_loadWordUser)+ + (* VCPUFault *) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_stateAssert) + apply wpc + apply (clarsimp simp: ccorres_cond_iffs) + apply (rule ccorres_return_C) + apply simp+ + apply (rule ccorres_symb_exec_l) + apply (ctac add: ccorres_return_C) + apply (wpsimp wp: mapM_wp' empty_fail_loadWordUser)+ + (* VPPIEvent *) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_stateAssert) + apply wpc + apply (clarsimp simp: ccorres_cond_iffs) + apply (rule ccorres_return_C) + apply simp+ + apply (rule ccorres_symb_exec_l) + apply (ctac add: ccorres_return_C) + apply (wpsimp wp: mapM_wp' empty_fail_loadWordUser)+ + (* VGICMaintenance *) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_stateAssert) + apply wpc + apply (clarsimp simp: ccorres_cond_iffs) + apply (rule ccorres_return_C) + apply simp+ + apply (rule ccorres_symb_exec_l) + apply (ctac add: ccorres_return_C) + apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp)+ + done + +(* MOVE *) +lemma monadic_rewrite_ccorres_assemble_nodrop: + assumes cc: "ccorres_underlying sr G r xf ar axf (P and Q) P' hs f c" + assumes mr: "monadic_rewrite True False Q g f" + shows "ccorres_underlying sr G r xf ar axf (P and Q) P' hs g c" +proof - + have snd: "\s. \ Q s; \ snd (g s) \ \ \ snd (f s)" + using mr + by (simp add: monadic_rewrite_def) + + have fst: "\s v. \ Q s; \ snd (g s); v \ fst (f s) \ \ v \ fst (g s)" + using mr + by (auto simp add: monadic_rewrite_def) + + show ?thesis + apply (rule ccorresI') + apply (erule ccorresE[OF cc], (simp add: snd)+) + apply clarsimp + apply (rule rev_bexI[OF fst], assumption+) + apply simp + done +qed + +lemma handleFaultReply_ccorres [corres]: + "ccorres (\rv rv'. rv = to_bool rv') ret__unsigned_long_' + (valid_pspace' and obj_at' (\t. tcbFault t = Some f) r + and (tcb_at' s and tcb_at' r) + and K (r \ s)) + (UNIV \ \cfault_rel (Some f) + (seL4_Fault_lift (h_val (hrs_mem \t_hrs) + (Ptr &(tcb_ptr_to_ctcb_ptr r\[''tcbFault_C''])))) + (lookup_fault_lift (h_val (hrs_mem \t_hrs) + (Ptr &(tcb_ptr_to_ctcb_ptr r\[''tcbLookupFailure_C'']))))\ + \ \\sender = tcb_ptr_to_ctcb_ptr s\ + \ \\receiver = tcb_ptr_to_ctcb_ptr r\) hs + (do + tag \ getMessageInfo s; + sb \ lookupIPCBuffer False s; + msg \ getMRs s sb tag; + handleFaultReply f r (msgLabel tag) msg + od) (Call handleFaultReply_'proc)" + supply if_cong[cong] option.case_cong[cong] + apply (unfold K_def, rule ccorres_gen_asm) + apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) + apply (cinit lift: sender_' receiver_' simp: whileAnno_def) + apply clarsimp + apply (ctac(c_lines 2) add: getMessageInfo_ccorres') + apply (rename_tac tag tag') + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_symb_exec_r) + apply (rule_tac val="fault_to_fault_tag f" + and xf'=ret__unsigned_longlong_' + and R="\s. \t. ko_at' t r s \ tcbFault t = Some f" + and R'="\cfault_rel (Some f) (seL4_Fault_lift \fault) + (lookup_fault_lift (h_val (hrs_mem \t_hrs) + (Ptr &(tcb_ptr_to_ctcb_ptr r\[''tcbLookupFailure_C'']))))\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (simp add: handleFaultReply_def fault_case_absorb_bind + del: Collect_const split del: if_split) + apply wpc + (* UserException *) + apply (rename_tac number code) + apply (clarsimp simp: bind_assoc seL4_Fault_tag_defs ccorres_cond_iffs + split del: if_split) + apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) + apply (simp add: bind_assoc[symmetric]) + apply (ctac add: copyMRsFaultReply_ccorres_exception) + apply (ctac add: ccorres_return_C) + apply wp + apply (vcg exspec=copyMRsFaultReply_modifies) + apply (wpsimp wp: threadGet_wp)+ + (* CapFault *) + apply (clarsimp simp: bind_assoc seL4_Fault_tag_defs ccorres_cond_iffs + split del: if_split) + apply (ctac add: ccorres_return_C) + (* UnknowSyscall *) + apply (rename_tac number) + apply (clarsimp simp: seL4_Fault_tag_defs ccorres_cond_iffs + split del: if_split) + apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) + apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) + apply (fold bind_assoc) + apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) + apply (ctac add: ccorres_return_C) + apply wp + apply (vcg exspec=copyMRsFaultReply_modifies) + apply (wpsimp wp: threadGet_wp)+ + + (* ArchFault *) + apply (rename_tac arch_fault) + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc) + (* apply (rule_tac P="\s. \t. ko_at' t r s" in ccorres_cross_over_guard) *) + apply (rule_tac val="fault_to_fault_tag f" + and xf'=ret__unsigned_longlong_' + and R="\s. \t. ko_at' t r s \ tcbFault t = Some f" + and R'="\cfault_rel (Some f) (seL4_Fault_lift \fault) + (lookup_fault_lift (h_val (hrs_mem \t_hrs) + (Ptr &(tcb_ptr_to_ctcb_ptr r\[''tcbLookupFailure_C'']))))\" + in ccorres_symb_exec_r_known_rv_UNIV) + apply (rule conseqPre, vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm) + apply ceqv + apply (rule ccorres_add_return2) + apply (ctac add: handleArchFaultReply_corres) + apply (rule ccorres_return_C) + apply simp+ + apply wp + apply (vcg exspec=Arch_handleFaultReply_modifies) + apply (clarsimp simp: guard_is_UNIV_def) + apply (subst fault_to_fault_tag.simps(2)) + apply (clarsimp split: if_split) + apply simp+ + (* Done *) + apply clarsimp + apply vcg + apply vcg + apply clarsimp + apply vcg_step + apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def + message_info_to_H_def scast_def + length_exceptionMessage length_syscallMessage + min_def word_less_nat_alt + guard_is_UNIV_def seL4_Faults seL4_Arch_Faults + split: if_split) + apply (simp add: length_exceptionMessage length_syscallMessage) + apply wp + apply clarsimp + apply (vcg exspec=getRegister_modifies) + apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def + message_info_to_H_def length_exceptionMessage length_syscallMessage + min_def word_less_nat_alt obj_at'_def + split: if_split) + apply (fastforce simp: seL4_Faults seL4_Arch_Faults) + done + +context +notes if_cong[cong] +begin +crunch tcbFault: emptySlot, tcbSchedEnqueue, rescheduleRequired + "obj_at' (\tcb. P (tcbFault tcb)) t" + (wp: threadSet_obj_at'_strongish crunch_wps + simp: crunch_simps unless_def) + +crunch tcbFault: setThreadState, cancelAllIPC, cancelAllSignals + "obj_at' (\tcb. P (tcbFault tcb)) t" + (wp: threadSet_obj_at'_strongish crunch_wps) +end + +lemma sbn_tcbFault: + "\obj_at' (\tcb. P (tcbFault tcb)) t\ + setBoundNotification st t' + \\_. obj_at' (\tcb. P (tcbFault tcb)) t\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_obj_at' | simp cong: if_cong)+ + done + +crunch tcbFault: unbindNotification, unbindMaybeNotification "obj_at' (\tcb. P (tcbFault tcb)) t" + (ignore: threadSet wp: sbn_tcbFault) + +(* FIXME: move *) +lemma cteDeleteOne_tcbFault: + "\obj_at' (\tcb. P (tcbFault tcb)) t\ + cteDeleteOne slot + \\_. obj_at' (\tcb. P (tcbFault tcb)) t\" + apply (simp add: cteDeleteOne_def unless_when split_def) + apply (simp add: finaliseCapTrue_standin_def Let_def) + apply (rule hoare_pre) + apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' + cancelAllSignals_tcbFault unbindNotification_tcbFault + isFinalCapability_inv unbindMaybeNotification_tcbFault + hoare_weak_lift_imp + | wpc | simp add: Let_def)+ + apply (clarsimp split: if_split) + done + +lemma transferCapsToSlots_local_slots: + assumes weak: "\c cap. P (maskedAsFull c cap) = P c" + shows + "\ cte_wp_at' (\cte. P (cteCap cte)) slot and K (slot \ set destSlots) \ + transferCapsToSlots ep rcvBuffer x caps destSlots mi + \\tag'. cte_wp_at' (\cte. P (cteCap cte)) slot\" +proof (rule hoare_gen_asm, induct caps arbitrary: x mi destSlots) + case Nil show ?case by simp +next + case (Cons cp cps) + show ?case using Cons.prems + apply (clarsimp simp: Let_def split del: if_split) + apply (wp Cons.hyps cteInsert_weak_cte_wp_at2 + | wpc | simp add: weak whenE_def split del: if_split split: prod.splits)+ + done +qed + +lemma transferCaps_local_slots: + assumes weak: "\c cap. P (maskedAsFull c cap) = P c" + shows "\ valid_objs' and (Not o real_cte_at' slot) and cte_wp_at' (\cte. P (cteCap cte)) slot \ + transferCaps tag caps ep receiver receiveBuffer + \\tag'. cte_wp_at' (\cte. P (cteCap cte)) slot\" + apply (simp add: transferCaps_def pred_conj_def) + apply (rule bind_wp_fwd) + apply (rule hoare_vcg_conj_lift) + apply (rule get_rs_real_cte_at') + apply (rule get_recv_slot_inv') + apply (rule hoare_pre) + apply (wp transferCapsToSlots_local_slots weak | wpc)+ + apply clarsimp + done + +lemma doNormalTransfer_local_slots: + assumes weak: "\c cap. P (maskedAsFull c cap) = P c" + shows "\ valid_objs' and (Not o real_cte_at' slot) + and cte_wp_at' (\cte. P (cteCap cte)) slot \ + doNormalTransfer sender sendBuffer ep badge grant receiver receiveBuffer + \\rv. cte_wp_at' (\cte. P (cteCap cte)) slot\" + apply (simp add: doNormalTransfer_def) + apply (wp transferCaps_local_slots weak copyMRs_typ_at'[where T=CTET, unfolded typ_at_cte] + | simp)+ + done + +lemma doIPCTransfer_local_slots: + assumes weak: "\c cap. P (maskedAsFull c cap) = P c" + shows "\ valid_objs' and (Not o real_cte_at' slot) + and cte_wp_at' (\cte. P (cteCap cte)) slot \ + doIPCTransfer sender ep badge grant receiver + \ \rv. cte_wp_at' (\cte. P (cteCap cte)) slot \" + apply (simp add: doIPCTransfer_def) + apply (wp doNormalTransfer_local_slots weak threadGet_wp | wpc)+ + apply simp + done + +lemma doIPCTransfer_reply_or_replyslot: + "\ cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot + or (valid_objs' and (Not o real_cte_at' slot) + and cte_wp_at' (\cte. cteCap cte = capability.NullCap \ isReplyCap (cteCap cte)) slot)\ + doIPCTransfer sender ep badge grant receiver + \ \rv. cte_wp_at' (\cte. cteCap cte = capability.NullCap \ isReplyCap (cteCap cte)) slot\" + apply (rule hoare_name_pre_state) + apply (case_tac "cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot s") + apply (rule hoare_pre, rule hoare_strengthen_post, + rule_tac P="isReplyCap" and ptr=slot in doIPCTransfer_non_null_cte_wp_at2') + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + apply (wp doIPCTransfer_local_slots) + apply (clarsimp simp: maskedAsFull_def cap_get_tag_isCap isCap_simps + split: if_split) + apply simp + done + +crunch ksCurDomain[wp]: handleFaultReply "\s. P (ksCurDomain s)" + +lemma doReplyTransfer_ccorres [corres]: + "ccorres dc xfdc + (invs' and st_tcb_at' (Not \ isReply) sender + and tcb_at' receiver and sch_act_simple and (\s. ksCurDomain s \ maxDomain) + and ((Not o real_cte_at' slot) or cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot) + and cte_wp_at' (\cte. cteCap cte = capability.NullCap \ isReplyCap (cteCap cte)) + slot) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\slot = Ptr slot\ + \ \\grant = from_bool grant\) hs + (doReplyTransfer sender receiver slot grant) + (Call doReplyTransfer_'proc)" + apply (cinit lift: sender_' receiver_' slot_' grant_') + apply (rule getThreadState_ccorres_foo) + apply (rule ccorres_assert2) + apply (simp add: liftM_def getSlotCap_def + del: Collect_const split del: if_split) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_assert2) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_assert2) + apply (rule ccorres_pre_threadGet) + apply (rename_tac fault) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac val="case_option (scast seL4_Fault_NullFault) fault_to_fault_tag fault" + and xf'=ret__unsigned_longlong_' + and R="\s. \t. ko_at' t receiver s \ tcbFault t = fault" + in ccorres_symb_exec_r_known_rv_UNIV [where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (fastforce simp: ctcb_relation_def typ_heap_simps + cfault_rel_def seL4_Fault_lift_def Let_def + split: if_split_asm option.split) + apply ceqv + apply csymbr + apply wpc + apply (clarsimp simp: ccorres_cond_iffs split del: if_split) + apply (rule ccorres_rhs_assoc)+ + apply (ctac(no_vcg)) + apply (rule ccorres_symb_exec_r) + apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) + apply (wpsimp wp: sts_valid_objs' setThreadState_st_tcb)+ + apply (wp cteDeleteOne_sch_act_wf) + apply vcg + apply (rule conseqPre, vcg) + apply (simp(no_asm_use) add: gs_set_assn_Delete_cstate_relation[unfolded o_def] + subset_iff rf_sr_def) + apply wp + apply (simp add: cap_get_tag_isCap) + apply (strengthen invs_weak_sch_act_wf_strg + cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct]) + apply (simp add: cap_reply_cap_def) + apply (wp doIPCTransfer_reply_or_replyslot) + apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs + fault_to_fault_tag_nonzero + split del: if_split) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) + apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) + apply (rule ccorres_split_nothrow_novcg [OF ccorres_call, + OF handleFaultReply_ccorres, + unfolded bind_assoc, + where xf'=restart_']) + apply simp_all[3] + apply ceqv + apply csymbr + apply (rule ccorres_split_nothrow_novcg) + apply (rule threadSet_ccorres_lemma2[where P=\]) + apply vcg + apply (clarsimp simp: typ_heap_simps') + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps')+, simp_all?)[1] + apply (rule ball_tcb_cte_casesI, simp+) + apply (clarsimp simp: ctcb_relation_def + seL4_Fault_lift_NullFault + cfault_rel_def is_cap_fault_def + cthread_state_relation_def) + apply (case_tac "tcbState tcb", simp_all add: is_cap_fault_def)[1] + apply ceqv + apply (rule_tac R=\ in ccorres_cond2) + apply (clarsimp simp: to_bool_def Collect_const_mem) + apply (ctac (no_vcg)) + apply (simp only: K_bind_def) + apply (ctac add: possibleSwitchTo_ccorres) + apply (wp sts_valid_objs' setThreadState_st_tcb | simp)+ + apply (ctac add: setThreadState_ccorres_simple) + apply wp + apply ((wp threadSet_sch_act hoare_weak_lift_imp + threadSet_valid_objs' threadSet_weak_sch_act_wf + | simp add: valid_tcb_state'_def)+)[1] + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def option_to_ctcb_ptr_def) + + apply (rule_tac Q="\rv. tcb_at' receiver and + valid_objs' and sch_act_simple and (\s. ksCurDomain s \ maxDomain) and + (\s. sch_act_wf (ksSchedulerAction s) s) and + pspace_aligned' and pspace_distinct'" in hoare_post_imp) + apply (clarsimp simp: inQ_def weak_sch_act_wf_def) + apply (wp threadSet_sch_act handleFaultReply_sch_act_wf) + apply (clarsimp simp: guard_is_UNIV_def) + apply assumption + apply clarsimp + apply (drule_tac p=receiver in obj_at_ko_at') + apply clarsimp + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps) + apply wp + apply (strengthen vp_invs_strg') + apply (wp cteDeleteOne_tcbFault cteDeleteOne_sch_act_wf) + apply vcg + apply (rule conseqPre, vcg) + apply (simp(no_asm_use) add: gs_set_assn_Delete_cstate_relation[unfolded o_def] + subset_iff rf_sr_def) + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def + ThreadState_defs mask_def + ghost_assertion_data_get_def ghost_assertion_data_set_def + cap_tag_defs option_to_ctcb_ptr_def + split: option.splits) + apply (clarsimp simp: pred_tcb_at' invs_valid_objs') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def cte_wp_at_ctes_of + cap_get_tag_isCap) + apply fastforce + done + +lemma ccorres_getCTE_cte_at: + "ccorresG rf_sr \ r xf P P' hs (getCTE p >>= f) c + \ ccorresG rf_sr \ r xf (\s. cte_at' p s \ P s) P' hs + (getCTE p >>= f) c" + apply (rule ccorres_guard_imp) + apply (subst gets_bind_ign[where f="cte_at' p", symmetric], + rule ccorres_symb_exec_l[OF _ _ gets_wp]) + apply (rule_tac b=x in ccorres_case_bools) + apply assumption + apply (rule ccorres_getCTE) + apply (rule ccorres_False[where P'=UNIV]) + apply wp + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + done + +lemma setupCallerCap_ccorres [corres]: + "ccorres dc xfdc (valid_pspace' + and (\s. sch_act_wf (ksSchedulerAction s) s) and sch_act_not sender + and tcb_at' sender and tcb_at' receiver + and tcb_at' sender and tcb_at' receiver) + (UNIV \ \\sender = tcb_ptr_to_ctcb_ptr sender\ + \ \\receiver = tcb_ptr_to_ctcb_ptr receiver\ + \ \\canGrant = from_bool canGrant\) hs + (setupCallerCap sender receiver canGrant) + (Call setupCallerCap_'proc)" + apply (rule ccorres_gen_asm_state, rule ccorres_gen_asm_state) + apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) + apply (cinit lift: sender_' receiver_' canGrant_') + apply (clarsimp simp: word_sle_def + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) + apply ccorres_remove_UNIV_guard + apply (ctac(no_vcg)) + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (simp only: getThreadReplySlot_def getThreadCallerSlot_def) + apply (ctac(no_vcg)) + apply (rename_tac replySlot replySlot') + apply (simp del: Collect_const) + apply (rule ccorres_getCTE_cte_at) + apply (rule ccorres_move_c_guard_cte) + apply (ctac(no_vcg) add: getSlotCap_h_val_ccorres[unfolded getSlotCap_def fun_app_def, + folded liftM_def, simplified ccorres_liftM_simp]) + apply (rule ccorres_assert2)+ + apply (simp add: ccorres_seq_skip locateSlot_conv + ) + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply csymbr + apply (rule ccorres_getSlotCap_cte_at) + apply (rule ccorres_move_c_guard_cte) + apply (ctac(no_vcg)) + apply (rule ccorres_assert) + apply (simp only: ccorres_seq_skip) + apply csymbr + apply (ctac add: cteInsert_ccorres) + apply simp + apply (wp getSlotCap_cte_wp_at) + apply (clarsimp simp: ccap_relation_def cap_lift_reply_cap + cap_to_H_simps cap_reply_cap_lift_def + tcbSlots Kernel_C.tcbCaller_def + size_of_def cte_level_bits_def) + apply (simp add: is_aligned_neg_mask) + apply (wp getCTE_wp') + apply (simp add: tcbSlots Kernel_C.tcbCaller_def + size_of_def cte_level_bits_def + ptr_add_assertion_positive Collect_const_mem + tcb_cnode_index_defs) + apply simp + apply (rule_tac Q="\rv. valid_pspace' and tcb_at' receiver" in hoare_post_imp) + apply (auto simp: cte_wp_at_ctes_of isCap_simps valid_pspace'_def + tcbSlots Kernel_C.tcbCaller_def size_of_def + cte_level_bits_def)[1] + apply (case_tac cte,clarsimp) + apply (drule ctes_of_valid_cap') + apply fastforce + apply (simp add:valid_cap'_def capAligned_def) + apply (simp add: locateSlot_conv) + apply wp + apply (clarsimp simp: ccap_rights_relation_def allRights_def + mask_def cap_rights_to_H_def tcbCallerSlot_def + Kernel_C.tcbCaller_def) + apply simp + apply wp + apply (clarsimp simp: ThreadState_defs mask_def + valid_pspace'_def tcbReplySlot_def + valid_tcb_state'_def Collect_const_mem + tcb_cnode_index_defs) + done + +lemma sendIPC_dequeue_ccorres_helper: + "ep_ptr = Ptr ep ==> + ccorres (\rv rv'. rv' = tcb_ptr_to_ctcb_ptr dest) dest___ptr_to_struct_tcb_C_' + (invs' and st_tcb_at' (\st. isBlockedOnReceive st \ + blockingObject st = ep) dest + and ko_at' (RecvEP (dest#rest)) ep) UNIV hs + (setEndpoint ep $ case rest of [] \ Structures_H.IdleEP + | (a#list) \ Structures_H.RecvEP rest) + (\queue :== CALL ep_ptr_get_queue(ep_ptr);; + \dest___ptr_to_struct_tcb_C :== head_C \queue;; + \queue :== CALL tcbEPDequeue(\dest___ptr_to_struct_tcb_C,\queue);; + CALL ep_ptr_set_queue(ep_ptr,\queue);; + IF head_C \queue = Ptr 0 THEN + CALL endpoint_ptr_set_state(ep_ptr,scast EPState_Idle) + FI)" + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) + apply simp + apply assumption+ + apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) + apply (elim conjE) + apply (frule (1) valid_ep_blockedD) + apply (elim conjE) + apply (frule cmap_relation_ep) + apply (erule (1) cmap_relation_ko_atE) + apply (intro conjI) + apply (erule h_t_valid_clift) + apply (rule impI) + apply (rule exI) + apply (rule conjI) + apply (rule_tac x=\ in exI) + apply (intro conjI) + apply assumption+ + apply (drule (2) ep_to_ep_queue) + apply (simp add: tcb_queue_relation'_def) + apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def + cong: imp_cong split del: if_split) + apply (intro conjI impI allI) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + \ \empty case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (frule iffD1 [OF tcb_queue_head_empty_iff + [OF tcb_queue_relation'_queue_rel]]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + apply simp + apply (simp add: setEndpoint_def split_def) + apply (rule conjI) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) + apply simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + \ \non-empty case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (frule tcb_queue_head_empty_iff [OF tcb_queue_relation'_queue_rel]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + apply (simp add: setEndpoint_def split_def) + apply (rule conjI) + apply (rule bexI [OF _ setObject_eq]) + apply (frule(1) st_tcb_at_h_t_valid) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def mask_shiftl_decompose + tcb_queue_relation'_def valid_ep'_def + simp flip: canonical_bit_def + split: endpoint.splits list.splits) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") (* FIXME AARCH64: clean up names *) + apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) + apply (simp (no_asm) add: objBits_simps') + apply (clarsimp split: if_split) + apply simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + done + +(* FIXME: this is the old formulation, the above one does not work as expected *) +lemma rf_sr_tcb_update_twice: + "h_t_valid (hrs_htd (hrs2 (globals s') (t_hrs_' (gs2 (globals s'))))) c_guard + (ptr (t_hrs_' (gs2 (globals s'))) (globals s')) + \ ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs :: tcb_C ptr) (v ths gs)) + (hrs_mem_update (heap_update (ptr ths gs) (v' ths gs)) (hrs2 gs ths))) (gs2 gs)) s') \ rf_sr) + = ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs) (v ths gs)) (hrs2 gs ths)) (gs2 gs)) s') \ rf_sr)" + by (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def typ_heap_simps' + carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + +lemma sendIPC_block_ccorres_helper: + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_canonical' and + pspace_aligned' and pspace_distinct' and + sch_act_not thread and ep_at' epptr and + (\s. sch_act_wf (ksSchedulerAction s) s) and + K (bos = ThreadState_BlockedOnSend + \ epptr' = epptr \ badge' = badge + \ cg = from_bool canGrant \ cgr = from_bool canGrantReply + \ dc' = from_bool do_call) and + K (epptr = epptr && ~~ mask 4) and + K (badge = badge && mask 64)) + UNIV hs + (setThreadState (Structures_H.thread_state.BlockedOnSend + epptr badge canGrant canGrantReply do_call) thread) + (Guard C_Guard + \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_tsType(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), + scast bos));; + Guard C_Guard + \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_blockingObject(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), epptr'));; + Guard C_Guard + \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_blockingIPCBadge(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), badge'));; + Guard C_Guard + \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_blockingIPCCanGrant(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), cg));; + Guard C_Guard + \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_blockingIPCCanGrantReply(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), cgr));; + Guard C_Guard + \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_blockingIPCIsCall(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), dc'));; + CALL scheduleTCB(tcb_ptr_to_ctcb_ptr thread))" + unfolding K_def setThreadState_def + apply (intro ccorres_gen_asm) + apply (rule ccorres_guard_imp) + apply (rule_tac P="canonical_address epptr" in ccorres_gen_asm) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac P=\ and P'="tcb_at' thread" + in threadSet_ccorres_lemma3) + apply vcg + apply clarsimp + apply (frule(1) tcb_at_h_t_valid) + apply (frule h_t_valid_c_guard) + apply (clarsimp simp: typ_heap_simps' rf_sr_tcb_update_twice + simp flip: canonical_bit_def) + apply (erule(1) rf_sr_tcb_update_no_queue_gen, (simp add: typ_heap_simps')+)[1] + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: ctcb_relation_def cthread_state_relation_def Suc_canonical_bit_fold + ThreadState_defs mask_shiftl_decompose + canonical_make_canonical_idem) + apply (clarsimp simp: mask_def) + apply ceqv + apply clarsimp + apply ctac + apply (wp threadSet_weak_sch_act_wf_runnable' + threadSet_valid_objs') + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def + tcb_cte_cases_def cteSizeBits_def) + apply (drule obj_at'_is_canonical, simp, simp) + apply clarsimp + done + +lemma tcb_queue_relation_last_not_NULL: + assumes tq: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and tcbs: "queue \ []" + shows "tcb_ptr_to_ctcb_ptr (last queue) \ NULL" +proof - + note last_in_set [where as = queue] + + with tq valid_ep(1) show ?thesis + by (rule tcb_queue_relation_not_NULL') fact+ +qed + +lemma tcb_queue_relation_update_end: + fixes getNext_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" and + getPrev_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" + assumes qr: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and qe: "qend = (if queue = [] then NULL + else tcb_ptr_to_ctcb_ptr (last queue))" + and qe': "qend' \ tcb_ptr_to_ctcb_ptr ` set queue" + and cs_tcb: "mp qend' = Some tcb" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and qeN: "qend' \ NULL" + and qpN: "queue = [] \ qprev = NULL" + and fgN: "fg_cons getNext (getNext_update \ (\x _. x))" + and fgP: "fg_cons getPrev (getPrev_update \ (\x _. x))" + and npu: "\f t. getNext (getPrev_update f t) = getNext t" + and pnu: "\f t. getPrev (getNext_update f t) = getPrev t" + shows "tcb_queue_relation getNext getPrev + (upd_unless_null qend (getNext_update (\_. qend') (the (mp qend))) + (mp(qend' := Some (getNext_update (\_. NULL) (getPrev_update (\_. qend) tcb))))) + (queue @ [ctcb_ptr_to_tcb_ptr qend']) + qprev (if qhead = NULL then qend' else qhead)" + using qr qe qe' cs_tcb valid_ep qeN qpN +proof (induct queue arbitrary: qhead qprev qend) + case Nil + thus ?case + by (clarsimp simp: upd_unless_null_def fg_consD1 [OF fgN] + fg_consD1 [OF fgP] pnu npu) +next + case (Cons tcb' tcbs) + have not_NULL[simplified]: "tcb_ptr_to_ctcb_ptr (last (tcb'#tcbs)) \ NULL" + "qhead \ NULL" + using tcb_queue_relation_next_not_NULL tcb_queue_relation_last_not_NULL + Cons tcb_at_not_NULL + by (auto split: if_split) + thus ?case using Cons.prems + apply (clarsimp simp: upd_unless_null_def fg_consD1 [OF fgN] + fg_consD1 [OF fgP] pnu npu + split: if_split) + apply (rule conjI, clarsimp) + apply (rule impI) + apply (subst tcb_queue_relation_cong) + prefer 5 + apply (erule Cons.hyps [OF _ refl], simp_all add: upd_unless_null_def) + apply (frule(3) tcb_queue_relation_next_not_NULL, simp) + done +qed + +lemma tcbEPAppend_update: + assumes sr: "ep_queue_relation' mp queue qhead qend" + and qe': "qend' \ tcb_ptr_to_ctcb_ptr ` set queue" + and cs_tcb: "mp qend' = Some tcb" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and qeN: "qend' \ NULL" + shows "ep_queue_relation' + (upd_unless_null qend (tcbEPNext_C_update (\_. qend') (the (mp qend))) + (mp(qend' \ tcb\tcbEPPrev_C := qend, tcbEPNext_C := NULL\))) + (queue @ [ctcb_ptr_to_tcb_ptr qend']) + (if qhead = NULL then qend' else qhead) qend'" + using sr qe' cs_tcb valid_ep qeN + apply - + apply (erule tcb_queue_relationE') + apply (rule tcb_queue_relationI') + apply (erule(6) tcb_queue_relation_update_end + [where getNext_update = tcbEPNext_C_update + and getPrev_update = tcbEPPrev_C_update]) + apply simp_all + done + +lemma tcb_queue_relation_qend_mem': + "\ tcb_queue_relation' getNext getPrev mp queue qhead qend; + (\tcb\set queue. tcb_at' tcb t) \ + \ qend \ NULL \ ctcb_ptr_to_tcb_ptr qend \ set queue" + by (clarsimp simp: tcb_queue_head_empty_iff tcb_queue_relation'_def + split: if_split) + +lemma tcb_queue_relation_qend_valid': + "\ tcb_queue_relation' getNext getPrev (cslift s') queue qhead qend; + (s, s') \ rf_sr; (\tcb\set queue. tcb_at' tcb s) \ + \ qend \ NULL \ s' \\<^sub>c qend" + apply (frule (1) tcb_queue_relation_qend_mem') + apply clarsimp + apply (drule (3) tcb_queue_memberD [OF tcb_queue_relation'_queue_rel]) + apply (simp add: h_t_valid_clift_Some_iff) + done + +lemma tcb_queue'_head_end_NULL: + assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" + and tat: "\t\set queue. tcb_at' t s" + shows "(qend = NULL) = (qhead = NULL)" + using qr tat + apply - + apply (erule tcb_queue_relationE') + apply (simp add: tcb_queue_head_empty_iff split: if_splits) + apply (rule tcb_at_not_NULL) + apply (erule bspec) + apply simp + done + +lemma tcbEPAppend_spec: + "\s queue. \ \ \s. \t. (t, s) \ rf_sr + \ (\tcb\set queue. tcb_at' tcb t) \ distinct queue + \ tcb_at' (ctcb_ptr_to_tcb_ptr \tcb) t + \ (ctcb_ptr_to_tcb_ptr \tcb \ set queue) + \ ep_queue_relation' (cslift s) queue + (head_C \queue) (end_C \queue)\ + Call tcbEPAppend_'proc + {t. head_C (ret__struct_tcb_queue_C_' t) = + (if head_C \<^bsup>s\<^esup>queue = tcb_Ptr 0 + then \<^bsup>s\<^esup>tcb + else head_C \<^bsup>s\<^esup>queue) + \ end_C (ret__struct_tcb_queue_C_' t) = \<^bsup>s\<^esup>tcb + \ ep_queue_relation' (cslift t) + (queue @ [ctcb_ptr_to_tcb_ptr \<^bsup>s\<^esup>tcb]) + (head_C (ret__struct_tcb_queue_C_' t)) + (end_C (ret__struct_tcb_queue_C_' t)) + \ (cslift t |` (- tcb_ptr_to_ctcb_ptr ` set + ((ctcb_ptr_to_tcb_ptr \<^bsup>s\<^esup>tcb) # queue))) = + (cslift s |` (- tcb_ptr_to_ctcb_ptr ` set + ((ctcb_ptr_to_tcb_ptr \<^bsup>s\<^esup>tcb) # queue))) + \ option_map tcb_null_ep_ptrs \ (cslift t) = + option_map tcb_null_ep_ptrs \ (cslift s) + \ cslift_all_but_tcb_C t s \ (hrs_htd \<^bsup>t\<^esup>t_hrs) = (hrs_htd \<^bsup>s\<^esup>t_hrs) + \ (\rs. zero_ranges_are_zero rs (\<^bsup>s\<^esup>t_hrs) + \ zero_ranges_are_zero rs (\<^bsup>t\<^esup>t_hrs))}" + apply (intro allI) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule obj_at_ko_at') + apply clarsimp + apply (frule cmap_relation_tcb) + apply (drule (1) cmap_relation_ko_atD) + apply clarsimp + apply (frule c_guard_clift) + apply (frule (1) tcb_queue'_head_end_NULL) + apply (frule (1) tcb_queue_relation_qend_mem') + apply (frule (2) tcb_queue_relation_qend_valid') + apply (subgoal_tac "end_C (queue_' x) \ tcb_' x") + prefer 2 + apply clarsimp + apply (frule tcbEPAppend_update) + apply (erule contrapos_nn, erule tcb_ptr_to_ctcb_ptr_imageD) + apply assumption+ + apply (drule tcb_at_not_NULL, simp) + apply (unfold upd_unless_null_def) + apply (clarsimp split: if_split_asm) + apply (simp add: typ_heap_simps' clift_heap_update_same) + apply (rule ext) + apply (clarsimp simp: typ_heap_simps tcb_null_ep_ptrs_def + split: if_split) + apply (simp add: typ_heap_simps' clift_heap_update_same) + apply (intro conjI) + apply (clarsimp simp add: typ_heap_simps h_t_valid_clift_Some_iff) + apply (erule iffD1 [OF tcb_queue_relation'_cong, OF refl refl refl, rotated -1]) + apply (clarsimp split: if_split) + apply (rule ext) + apply (clarsimp dest!: ctcb_ptr_to_tcb_ptr_imageI simp: typ_heap_simps h_t_valid_clift_Some_iff) + apply (rule ext) + apply (clarsimp simp: tcb_null_ep_ptrs_def typ_heap_simps h_t_valid_clift_Some_iff + split: if_split) + done + +lemma sendIPC_enqueue_ccorres_helper: + "ccorres dc xfdc (valid_pspace' + and (\s. sym_refs ((state_refs_of' s)(epptr := set queue \ {EPSend}))) + and st_tcb_at' (\st. isBlockedOnSend st \ + blockingObject st = epptr) thread + and ko_at' (ep::Structures_H.endpoint) epptr + and K ((ep = IdleEP \ queue = [thread]) \ + (\q. ep = SendEP q \ thread \ set q \ + queue = q @ [thread]))) + UNIV hs + (setEndpoint epptr (Structures_H.endpoint.SendEP queue)) + (\queue :== CALL ep_ptr_get_queue(ep_Ptr epptr);; + (\queue :== CALL tcbEPAppend(tcb_ptr_to_ctcb_ptr thread,\queue);; + (CALL endpoint_ptr_set_state(ep_Ptr epptr, scast EPState_Send);; + CALL ep_ptr_set_queue(ep_Ptr epptr,\queue))))" + unfolding K_def + apply (rule ccorres_gen_asm) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule cmap_relation_ep) + apply (erule (1) cmap_relation_ko_atE) + apply (rule conjI) + apply (erule h_t_valid_clift) + apply (frule(1) st_tcb_at_h_t_valid) + apply (frule pred_tcb_at') + apply (rule impI) + apply (rule_tac x="init queue" in exI) + apply (frule(1) ko_at_valid_ep' [OF _ valid_pspace_valid_objs']) + apply (frule is_aligned_tcb_ptr_to_ctcb_ptr) + apply (rule conjI) + apply (rule_tac x=\ in exI) + apply (simp add: cendpoint_relation_def Let_def) + apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] + apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") + prefer 2 + apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def + obj_at'_def projectKOs objBitsKO_def) + apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") + prefer 2 + apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) + apply (intro conjI impI allI) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + apply (case_tac ep, simp_all add: EPState_Idle_def EPState_Send_def)[1] + \ \IdleEP case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (simp add: setEndpoint_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def init_def Let_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Send_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + apply (rule conjI, simp add: mask_def) + subgoal + apply (clarsimp simp: valid_pspace'_def objBits_simps' mask_shiftl_decompose + simp flip: canonical_bit_def) + apply (erule (1) tcb_and_not_mask_canonical) + by (simp (no_asm) add: tcbBlockSizeBits_def) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) + apply (simp add: isSendEP_def isRecvEP_def) + apply simp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (simp only:projectKOs injectKO_ep objBits_simps) + apply clarsimp + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: typ_heap_simps') + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + \ \SendEP case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (simp add: setEndpoint_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def init_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Send_def + split: if_split) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) + apply (rule conjI, fastforce simp: mask_def) + apply (clarsimp simp: valid_pspace'_def objBits_simps' mask_shiftl_decompose + simp flip: canonical_bit_def) + apply (erule (1) tcb_and_not_mask_canonical) + apply (simp (no_asm) add: tcbBlockSizeBits_def) + apply (clarsimp simp: valid_pspace'_def objBits_simps' mask_shiftl_decompose + simp flip: canonical_bit_def) + apply (rule conjI, solves \simp (no_asm) add: mask_def\) + apply (erule (1) tcb_and_not_mask_canonical) + apply (simp (no_asm) add: tcbBlockSizeBits_def) + done + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) + apply (simp add: isSendEP_def isRecvEP_def) + apply simp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + done + +lemma ctcb_relation_blockingIPCCanGrantD: + "\ ctcb_relation ko ko' ; isReceive (tcbState ko) \ isSend (tcbState ko) \ + \ blockingIPCCanGrant_CL (thread_state_lift (tcbState_C ko')) + = from_bool (blockingIPCCanGrant (tcbState ko))" + apply (erule disjE; case_tac "tcbState ko" ; clarsimp simp: isReceive_def isSend_def) + apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def + thread_state_lift_def from_bool_to_bool_iff mask_eq1_nochoice)+ + done + +lemma sendIPC_ccorres [corres]: + "ccorres dc xfdc (invs' and st_tcb_at' simple' thread + and sch_act_not thread and ep_at' epptr) + (UNIV \ \\blocking = from_bool blocking\ + \ \\do_call = from_bool do_call\ + \ \\badge = badge\ + \ \\canGrant = from_bool canGrant\ + \ \\canGrantReply = from_bool canGrantReply\ + \ \\thread = tcb_ptr_to_ctcb_ptr thread\ + \ \\epptr = Ptr epptr\ + \ \badge && mask 64 = badge\) hs + (sendIPC blocking do_call badge canGrant canGrantReply thread epptr) + (Call sendIPC_'proc)" + unfolding K_def + apply (rule ccorres_gen_asm2) + apply (cinit' lift: blocking_' do_call_' badge_' canGrant_' canGrantReply_' thread_' epptr_') + apply (unfold sendIPC_def)[1] + apply (rule ccorres_pre_getEndpoint) + apply (rename_tac ep) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case ep of IdleEP \ scast EPState_Idle + | RecvEP _ \ scast EPState_Recv + | SendEP _ \ scast EPState_Send" + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule cmap_relationE1 [OF cmap_relation_ep]) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def + split: endpoint.split_asm) + apply ceqv + apply (rule_tac A="invs' and st_tcb_at' simple' thread + and sch_act_not thread and ko_at' ep epptr + and ep_at' epptr" + in ccorres_guard_imp2 [where A'=UNIV]) + apply wpc + \ \RecvEP case\ + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (intro ccorres_rhs_assoc) + apply (csymbr, csymbr) + apply wpc + apply (simp only: haskell_fail_def) + apply (rule ccorres_fail) + apply (rename_tac dest rest) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac dest=dest in sendIPC_dequeue_ccorres_helper) + apply simp + apply ceqv + apply (rename_tac dest') + apply (simp only: K_bind_def haskell_assert_def return_bind from_bool_0) + + apply (rule getThreadState_ccorres_foo) + apply (rename_tac recvState) + apply (rule ccorres_assert) + apply (ctac(no_vcg)) + + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=replyCanGrant_' and val="from_bool (blockingIPCCanGrant recvState)" + and R="st_tcb_at' ((=) recvState) dest" and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (clarsimp, rule conseqPre, vcg) + apply (fastforce simp: pred_tcb_at'_def tcb_at_h_t_valid typ_heap_simps' + dest: obj_at_cslift_tcb ctcb_relation_blockingIPCCanGrantD) + apply ceqv + apply (ctac(no_vcg)) + apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) + apply (clarsimp split del: if_split) + apply (wpc ; ccorres_rewrite) + apply (clarsimp simp: disj_imp[symmetric] split del: if_split) + apply (wpc ; clarsimp) + apply ccorres_rewrite + apply (ctac add: setupCallerCap_ccorres) + apply ccorres_rewrite + apply (ctac add: setThreadState_ccorres) + apply (rule ccorres_return_Skip) + apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not + possibleSwitchTo_sch_act_not sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def)+ + apply vcg + apply (wpsimp wp: doIPCTransfer_sch_act hoare_vcg_all_lift + set_ep_valid_objs' setEndpoint_valid_mdb' + | wp (once) hoare_drop_imp + | strengthen sch_act_wf_weak)+ + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs Collect_const_mem mask_def + option_to_ptr_def option_to_0_def + split: bool.split_asm) + + \ \IdleEP case\ + apply (rule ccorres_cond_true) + apply (rule ccorres_cond) + apply (clarsimp simp: from_bool_def split: bool.split) + \ \blocking case\ + apply (intro ccorres_rhs_assoc) + apply csymbr + apply (simp only:) + \ \apply (ctac (trace, no_vcg,c_lines 6) add: sendIPC_block_ccorres_helper)\ + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule sendIPC_block_ccorres_helper) + apply ceqv + apply (simp only: K_bind_def fun_app_def) + apply (rule_tac ep=IdleEP in sendIPC_enqueue_ccorres_helper) + apply (simp add: valid_ep'_def) + apply (wp sts_st_tcb') + apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) + apply (clarsimp simp: guard_is_UNIV_def) + apply (rule ccorres_return_Skip) + \ \SendEP case\ + apply (rule ccorres_cond_true) + apply (rule ccorres_cond) + apply (clarsimp simp: from_bool_def split: bool.split) + \ \blocking case\ + apply (intro ccorres_rhs_assoc) + apply csymbr + \ \apply (ctac (no_vcg,c_lines 6) add: sendIPC_block_ccorres_helper)\ + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rename_tac list) + apply (rule ccorres_split_nothrow_novcg) + apply (simp only: ) + apply (rule sendIPC_block_ccorres_helper) + apply ceqv + apply (simp only: K_bind_def fun_app_def) + apply (rule_tac ep="SendEP list" in sendIPC_enqueue_ccorres_helper) + apply (simp add: valid_ep'_def) + apply (wp sts_st_tcb') + apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) + apply (clarsimp simp: guard_is_UNIV_def) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: EPState_Recv_def EPState_Send_def EPState_Idle_def + split: if_split) + apply (frule(1) ko_at_valid_objs'[OF _ invs_valid_objs']) + apply (clarsimp simp: projectKO_opt_ep split: kernel_object.split_asm) + apply (subgoal_tac "epptr \ thread \ bound_tcb_at' (\x. tcb_bound_refs' x = + state_refs_of' s thread) thread s \ (\r. (thread, r) \ + ep_q_refs_of' ep)") + apply (clarsimp simp: valid_obj'_def valid_ep'_def refs_of'_def + split: endpoint.splits) + apply (frule(1) sym_refs_obj_atD'[OF _ invs_sym']) + apply (clarsimp simp: st_tcb_at_refs_of_rev' isBlockedOnReceive_def) + apply (auto split: list.splits elim!: pred_tcb'_weakenE)[1] + apply (subgoal_tac "state_refs_of' s epptr = {}") + apply (clarsimp simp: obj_at'_def is_aligned_neg_mask objBitsKO_def + projectKOs invs'_def valid_state'_def objBits_simps' + st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def + isBlockedOnSend_def projectKO_opt_tcb + split: if_split_asm if_split) + apply (rule conjI, simp, rule impI, clarsimp simp: valid_pspace'_def) + apply (erule delta_sym_refs) + apply (clarsimp split: if_split_asm + dest!: symreftype_inverse')+ + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def tcb_bound_refs'_def + projectKOs eq_sym_conv + dest!: symreftype_inverse' + split: if_split_asm) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs + tcb_bound_refs'_def eq_sym_conv) + apply (clarsimp simp: obj_at'_def state_refs_of'_def projectKOs) + apply (frule(1) sym_refs_obj_atD'[OF _ invs_sym']) + apply clarsimp + apply (rule conjI, assumption) + apply (clarsimp dest!: st_tcb_strg'[rule_format] + simp: invs'_def valid_state'_def obj_at'_def objBits_simps' + projectKOs valid_tcb_state'_def) + apply (rule conjI[rotated]) + apply (rule conjI[rotated]) + apply (clarsimp simp: isBlockedOnSend_def ko_wp_at'_def obj_at'_def + projectKOs projectKO_opt_tcb objBits_simps') + apply (fastforce split: if_split_asm + elim: delta_sym_refs + simp: pred_tcb_at'_def obj_at'_def projectKOs + tcb_bound_refs'_def eq_sym_conv symreftype_def) + apply (clarsimp simp: valid_pspace'_def) + apply clarsimp + apply (frule(1) sym_refs_obj_atD'[OF _ invs_sym']) + apply (frule simple_st_tcb_at_state_refs_ofD') + apply (case_tac ep, auto simp: st_tcb_at_refs_of_rev' st_tcb_at'_def + obj_at'_def projectKOs)[1] + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: guard_is_UNIV_def) + done + +lemma ctcb_relation_blockingIPCCanGrantReplyD: + "\ ctcb_relation ko ko' ; isSend (tcbState ko) \ + \ blockingIPCCanGrantReply_CL (thread_state_lift (tcbState_C ko')) + = from_bool (blockingIPCCanGrantReply (tcbState ko))" + apply ( case_tac "tcbState ko" ; clarsimp simp: isReceive_def isSend_def) + apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def + thread_state_lift_def from_bool_to_bool_iff mask_eq1_nochoice)+ + done + +lemma receiveIPC_block_ccorres_helper: + "ccorres dc xfdc (tcb_at' thread and valid_objs' and + pspace_canonical' and pspace_aligned' and pspace_distinct' and + sch_act_not thread and ep_at' epptr and + (\s. sch_act_wf (ksSchedulerAction s) s) and + K (epptr = epptr && ~~ mask 4) and + K (isEndpointCap cap \ ccap_relation cap cap')) + UNIV hs + (setThreadState (Structures_H.thread_state.BlockedOnReceive + epptr (capEPCanGrant cap)) thread) + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_tsType(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), + scast ThreadState_BlockedOnReceive));; + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_blockingObject(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), + ucast (ptr_val (ep_Ptr epptr))));; + \ret__unsigned_longlong :== CALL cap_endpoint_cap_get_capCanGrant(cap');; + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr thread\ + (CALL thread_state_ptr_set_blockingIPCCanGrant(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), + \ret__unsigned_longlong)));; + CALL scheduleTCB(tcb_ptr_to_ctcb_ptr thread))" + unfolding K_def setThreadState_def + apply (intro ccorres_gen_asm) + apply (rule ccorres_guard_imp) + apply (rule_tac P="canonical_address epptr" in ccorres_gen_asm) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac P=\ and P'="tcb_at' thread" + in threadSet_ccorres_lemma3) + apply vcg + apply clarsimp + apply (frule(1) tcb_at_h_t_valid) + apply (frule h_t_valid_c_guard) + apply (clarsimp simp: typ_heap_simps' rf_sr_tcb_update_twice cap_get_tag_isCap + simp flip: canonical_bit_def) + apply (erule(1) rf_sr_tcb_update_no_queue_gen, (simp add: typ_heap_simps)+) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: ctcb_relation_def cthread_state_relation_def ccap_relation_ep_helpers + ThreadState_defs cap_get_tag_isCap mask_shiftl_decompose + Suc_canonical_bit_fold canonical_make_canonical_idem) + apply (clarsimp simp: mask_def) + apply ceqv + apply clarsimp + apply ctac + apply (wp hoare_vcg_all_lift threadSet_valid_objs' + threadSet_weak_sch_act_wf_runnable') + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def + tcb_cte_cases_def obj_at'_is_canonical cteSizeBits_def) + apply clarsimp + done + +lemma receiveIPC_enqueue_ccorres_helper: + "ccorres dc xfdc (valid_pspace' + and (\s. sym_refs ((state_refs_of' s)(epptr := set queue \ {EPRecv}))) + and st_tcb_at' (\st. isBlockedOnReceive st \ + blockingObject st = epptr) thread + and ko_at' (ep::Structures_H.endpoint) epptr + and K ((ep = IdleEP \ queue = [thread]) \ + (\q. ep = RecvEP q \ thread \ set q \ + queue = q @ [thread]))) + UNIV hs + (setEndpoint epptr (Structures_H.endpoint.RecvEP queue)) + (\queue :== CALL ep_ptr_get_queue(ep_Ptr epptr);; + (\queue :== CALL tcbEPAppend(tcb_ptr_to_ctcb_ptr thread,\queue);; + (CALL endpoint_ptr_set_state(ep_Ptr epptr, scast EPState_Recv);; + CALL ep_ptr_set_queue(ep_Ptr epptr,\queue))))" + unfolding K_def + apply (rule ccorres_gen_asm) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule cmap_relation_ep) + apply (erule (1) cmap_relation_ko_atE) + apply (rule conjI) + apply (erule h_t_valid_clift) + apply (frule(1) st_tcb_at_h_t_valid) + apply (frule pred_tcb_at') + apply (rule impI) + apply (rule_tac x="init queue" in exI) + apply (frule(1) ko_at_valid_ep' [OF _ valid_pspace_valid_objs']) + apply (frule is_aligned_tcb_ptr_to_ctcb_ptr) + apply (rule conjI) + apply (rule_tac x=\ in exI) + apply (simp add: cendpoint_relation_def Let_def) + apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] + apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") + prefer 2 + apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def + obj_at'_def projectKOs objBitsKO_def) + apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") + prefer 2 + apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) + apply (intro conjI impI allI) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + apply (case_tac ep, simp_all add: EPState_Idle_def EPState_Recv_def)[1] + \ \RecvEP case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (simp add: setEndpoint_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def init_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Recv_def + split: if_split) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) + apply (rule conjI, fastforce simp: mask_def) + apply (clarsimp simp: valid_pspace'_def objBits_simps' mask_shiftl_decompose + simp flip: canonical_bit_def) + apply (erule (1) tcb_and_not_mask_canonical) + apply (simp (no_asm) add: tcbBlockSizeBits_def) + apply (clarsimp simp: valid_pspace'_def objBits_simps' mask_shiftl_decompose + simp flip: canonical_bit_def) + apply (rule conjI, solves \simp (no_asm) add: mask_def\) + apply (erule (1) tcb_and_not_mask_canonical) + apply (simp (no_asm) add: tcbBlockSizeBits_def) + done + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) + apply (simp add: isSendEP_def isRecvEP_def) + apply simp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + \ \IdleEP case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (simp add: setEndpoint_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def init_def Let_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Recv_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + simp flip: canonical_bit_def) + subgoal + apply (rule conjI, solves\simp (no_asm) add: mask_def\) + apply (clarsimp simp: valid_pspace'_def mask_shiftl_decompose + simp flip: canonical_bit_def) + apply (erule (1) tcb_and_not_mask_canonical, simp (no_asm) add: tcbBlockSizeBits_def) + done + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) + apply (simp add: isSendEP_def isRecvEP_def) + apply simp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: typ_heap_simps') + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + done + +lemma receiveIPC_dequeue_ccorres_helper: + "ccorres (\rv rv'. rv' = tcb_ptr_to_ctcb_ptr sender) sender_' + (invs' and st_tcb_at' (\st. isBlockedOnSend st \ + blockingObject st = ep) sender + and ko_at' (SendEP (sender#rest)) ep) UNIV hs + (setEndpoint ep (case rest of [] \ Structures_H.IdleEP + | (a#list) \ Structures_H.SendEP rest)) + (\queue :== CALL ep_ptr_get_queue(Ptr ep);; + \sender :== head_C \queue;; + \queue :== CALL tcbEPDequeue(\sender,\queue);; + CALL ep_ptr_set_queue(Ptr ep,\queue);; + IF head_C \queue = Ptr 0 THEN + CALL endpoint_ptr_set_state(Ptr ep,scast EPState_Idle) + FI)" + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) + apply simp + apply assumption+ + apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) + apply (elim conjE) + apply (frule (1) valid_ep_blockedD) + apply (elim conjE) + apply (frule cmap_relation_ep) + apply (erule (1) cmap_relation_ko_atE) + apply (intro conjI) + apply (erule h_t_valid_clift) + apply (rule impI) + apply (rule exI) + apply (rule conjI) + apply (rule_tac x=\ in exI) + apply (intro conjI) + apply assumption+ + apply (drule (2) ep_to_ep_queue) + apply (simp add: tcb_queue_relation'_def) + apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def + cong: imp_cong split del: if_split) + apply (intro conjI impI allI) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + \ \empty case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (frule iffD1 [OF tcb_queue_head_empty_iff + [OF tcb_queue_relation'_queue_rel]]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + apply simp + apply (simp add: setEndpoint_def split_def) + apply (rule conjI) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) + apply simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: typ_heap_simps') + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + \ \non-empty case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (frule tcb_queue_head_empty_iff [OF tcb_queue_relation'_queue_rel]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + apply (simp add: setEndpoint_def split_def) + apply (rule conjI) + apply (rule bexI [OF _ setObject_eq]) + apply (frule(1) st_tcb_at_h_t_valid) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ep_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def mask_shiftl_decompose + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + simp flip: canonical_bit_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") (* FIXME AARCH64: clean up names *) + apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) + apply (simp (no_asm) add: objBits_simps') + apply (clarsimp split: if_split) + apply simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: typ_heap_simps') + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) + done + +lemmas ccorres_pre_getBoundNotification = ccorres_pre_threadGet[where f=tcbBoundNotification, folded getBoundNotification_def] + +lemma completeSignal_ccorres: + notes if_split[split del] + shows + "ccorres dc xfdc (invs' and tcb_at' thread) + (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr thread\ + \ \\ntfnPtr = Ptr ntfnptr\) hs + (completeSignal ntfnptr thread) + (Call completeSignal_'proc)" + apply (cinit lift: tcb_' ntfnPtr_') + apply (rule_tac P="invs' and tcb_at' thread" in ccorres_gen_asm_state) + apply clarsimp + apply (subgoal_tac "tcb_ptr_to_ctcb_ptr thread \ NULL") + prefer 2 + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + tcb_ptr_to_ctcb_ptr_def objBits_simps') + apply (drule sum_to_zero) + apply (clarsimp simp: obj_at'_def ctcb_offset_defs projectKOs objBitsKO_def + is_aligned_def objBits_simps') + apply clarsimp + apply csymbr + apply simp + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_pre_getNotification, rename_tac ntfn) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle + | WaitingNtfn _ \ scast NtfnState_Waiting + | ActiveNtfn _ \ scast NtfnState_Active" + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_ntfn]) + apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps + split: Structures_H.ntfn.splits) + apply ceqv + apply wpc + \ \IdleNtfn case\ + apply (clarsimp simp: NtfnState_Idle_def NtfnState_Active_def) + apply csymbr + apply (rule ccorres_cond_false) + apply (rule ccorres_fail) + \ \ActiveNtfn case\ + apply (clarsimp, csymbr, rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (rename_tac word) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac val=word and xf'=badge_' and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) + apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) + apply ceqv + apply (ctac(no_vcg)) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp) + apply (frule(1) cmap_relation_ko_atD [OF cmap_relation_ntfn]) + apply (clarsimp simp: typ_heap_simps setNotification_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def update_ntfn_map_tos + cpspace_relation_def typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def mask_def) + apply simp + apply (clarsimp simp: carch_state_relation_def) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply wp + apply (clarsimp simp: guard_is_UNIV_def AARCH64_H.badgeRegister_def + AARCH64.badgeRegister_def C_register_defs + AARCH64.capRegister_def) + \ \WaitingNtfn case\ + apply (clarsimp simp: NtfnState_Active_def NtfnState_Waiting_def) + apply csymbr + apply (rule ccorres_cond_false) + apply (rule ccorres_fail) + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp) + done + +lemma doNBRecvFailedTransfer_ccorres[corres]: + "ccorres dc xfdc + \ + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) + [] (doNBRecvFailedTransfer thread) + (Call doNBRecvFailedTransfer_'proc)" + apply (cinit lift: thread_') + apply (ctac add: setRegister_ccorres) + by (clarsimp simp: C_register_defs AARCH64_H.badgeRegister_def + AARCH64.badgeRegister_def AARCH64.capRegister_def) + +lemma st_tcb_at'_ko_at': + "st_tcb_at' ((=) st) t s = (\tcb. tcbState tcb = st \ ko_at' tcb t s)" + unfolding pred_tcb_at'_def + by (auto dest: obj_at_ko_at' elim: obj_at'_weakenE) + +lemma receiveIPC_ccorres [corres]: + notes option.case_cong_weak [cong] + shows + "ccorres dc xfdc (invs' and st_tcb_at' simple' thread and sch_act_not thread + and valid_cap' cap and K (isEndpointCap cap)) + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ + \ \ccap_relation cap \cap\ + \ \\isBlocking = from_bool isBlocking\) hs + (receiveIPC thread cap isBlocking) + (Call receiveIPC_'proc)" + unfolding K_def + apply (rule ccorres_gen_asm) + apply (cinit lift: thread_' cap_' isBlocking_') + apply (rule ccorres_pre_getEndpoint) + apply (rename_tac ep) + apply (simp only: ccorres_seq_skip) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="capEPPtr cap" + and R=\ + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (simp add: cap_endpoint_cap_lift ccap_relation_def cap_to_H_def) + apply ceqv + apply csymbr + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'=ntfnPtr_' + and r'="\rv rv'. rv' = option_to_ptr rv \ rv \ Some 0" + in ccorres_split_nothrow_novcg) + apply (simp add: getBoundNotification_def) + apply (rule_tac P="no_0_obj' and valid_objs'" in threadGet_vcg_corres_P) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (drule(1) ko_at_valid_objs', simp add: projectKOs) + apply (clarsimp simp: option_to_ptr_def option_to_0_def projectKOs + valid_obj'_def valid_tcb'_def) + apply ceqv + apply (rename_tac ntfnptr ntfnptr') + apply (simp del: Collect_const split del: if_split cong: call_ignore_cong) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' + and r'="\rv rv'. (rv' = 0) = (ntfnptr = None \ \ isActive rv)" + in ccorres_split_nothrow_novcg) + apply wpc + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: option_to_ptr_def option_to_0_def in_monad Bex_def) + apply (rule ccorres_pre_getNotification[where f=return, simplified]) + apply (rule_tac P="\s. ko_at' rv (the ntfnptr) s" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: option_to_ptr_def option_to_0_def in_monad Bex_def) + apply (erule cmap_relationE1[OF cmap_relation_ntfn]) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def + notification_state_defs isActive_def + split: Structures_H.ntfn.split_asm Structures_H.notification.splits) + apply ceqv + apply (rule ccorres_cond[where R=\]) + apply (simp add: Collect_const_mem) + apply (ctac add: completeSignal_ccorres) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case ep of IdleEP \ scast EPState_Idle + | RecvEP _ \ scast EPState_Recv + | SendEP _ \ scast EPState_Send" + and R="ko_at' ep (capEPPtr cap)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule cmap_relationE1 [OF cmap_relation_ep]) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def + split: endpoint.split_asm) + apply ceqv + apply (rule_tac A="invs' and st_tcb_at' simple' thread + and sch_act_not thread + and ko_at' ep (capEPPtr cap)" + in ccorres_guard_imp2 [where A'=UNIV]) + apply wpc + \ \RecvEP case\ + apply (rule ccorres_cond_true) + apply csymbr + apply (simp only: case_bool_If from_bool_neq_0) + apply (rule ccorres_Cond_rhs, simp cong: Collect_cong split del: if_split) + apply (intro ccorres_rhs_assoc) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) + apply ceqv + apply simp + apply (rename_tac list NOo) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) + apply (simp add: valid_ep'_def) + apply (wp sts_st_tcb') + apply (rename_tac list) + apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) + apply (clarsimp simp: guard_is_UNIV_def) + apply simp + apply (ctac add: doNBRecvFailedTransfer_ccorres) + \ \IdleEP case\ + apply (rule ccorres_cond_true) + apply csymbr + apply (simp only: case_bool_If from_bool_neq_0) + apply (rule ccorres_Cond_rhs, simp cong: Collect_cong split del: if_split) + apply (intro ccorres_rhs_assoc) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) + apply ceqv + apply simp + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) + apply (simp add: valid_ep'_def) + apply (wp sts_st_tcb') + apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) + apply (clarsimp simp: guard_is_UNIV_def) + apply simp + apply (ctac add: doNBRecvFailedTransfer_ccorres) + \ \SendEP case\ + apply (thin_tac "isBlockinga = from_bool P" for P) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (intro ccorres_rhs_assoc) + apply (csymbr, csymbr, csymbr, csymbr, csymbr) + apply wpc + apply (simp only: haskell_fail_def) + apply (rule ccorres_fail) + apply (rename_tac sender rest) + apply csymbr + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply simp + apply (rule_tac sender=sender in receiveIPC_dequeue_ccorres_helper[simplified]) + apply ceqv + apply (rename_tac sender') + apply (simp only: K_bind_def haskell_assert_def return_bind) + apply (rule ccorres_move_c_guard_tcb) + apply (rule getThreadState_ccorres_foo) + apply (rename_tac sendState) + apply (rule ccorres_assert) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac val="blockingIPCBadge sendState" + and xf'=badge_' + and R="\s. \t. ko_at' t sender s \ tcbState t = sendState" + in ccorres_symb_exec_r_known_rv_UNIV [where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cthread_state_relation_def word_size + isSend_def thread_state_lift_def + split: Structures_H.thread_state.splits) + apply ceqv + apply (simp split del: if_split) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac val="from_bool (blockingIPCCanGrant sendState)" + and xf'=canGrant_' + and R="\s. \t. ko_at' t sender s \ tcbState t = sendState" + in ccorres_symb_exec_r_known_rv_UNIV [where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cthread_state_relation_def word_size + isSend_def thread_state_lift_def + split: Structures_H.thread_state.splits) + apply ceqv + + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=canGrantReply_' + and val="from_bool (blockingIPCCanGrantReply sendState)" + and R="st_tcb_at' ((=) sendState) sender" and R'=UNIV + in ccorres_symb_exec_r_known_rv_UNIV) + apply (clarsimp, rule conseqPre, vcg) + apply (fastforce simp: pred_tcb_at'_def tcb_at_h_t_valid typ_heap_simps' + dest: obj_at_cslift_tcb ctcb_relation_blockingIPCCanGrantReplyD) + apply ceqv + + apply (ctac(no_vcg)) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac val="from_bool (blockingIPCIsCall sendState)" + and xf'=do_call_' + and R="\s. \t. ko_at' t sender s \ tcbState t = sendState" + in ccorres_symb_exec_r_known_rv_UNIV [where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) + apply (clarsimp simp: ctcb_relation_def typ_heap_simps + cthread_state_relation_def word_size + isSend_def thread_state_lift_def + split: Structures_H.thread_state.splits) + apply ceqv + + apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) + apply wpc + (* blocking ipc call *) + apply (clarsimp split del: if_split simp del: Collect_const) + apply ccorres_rewrite + apply (wpc ; clarsimp ; ccorres_rewrite) + apply csymbr + apply clarsimp + apply ctac + apply ctac + (* non-blocking ipc call *) + apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) + apply ccorres_rewrite + apply ctac + apply (ctac add: possibleSwitchTo_ccorres) + apply (wpsimp wp: sts_st_tcb' sts_valid_objs') + apply (vcg exspec=setThreadState_modifies) + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs mask_def + cap_get_tag_isCap ccap_relation_ep_helpers) + apply (clarsimp simp: valid_tcb_state'_def) + apply (rule_tac Q="\_. valid_pspace' + and st_tcb_at' ((=) sendState) sender and tcb_at' thread + and (\s. sch_act_wf (ksSchedulerAction s) s) + and sch_act_not sender and K (thread \ sender) + and (\s. ksCurDomain s \ maxDomain)" in hoare_post_imp) + apply (fastforce simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak + obj_at'_def) + apply (wpsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def conj_ac)+ + apply (rule_tac Q="\rv. valid_pspace' + and cur_tcb' and tcb_at' sender and tcb_at' thread + and sch_act_not sender and K (thread \ sender) + and ep_at' (capEPPtr cap) + and (\s. ksCurDomain s \ maxDomain) + and (\s. sch_act_wf (ksSchedulerAction s) s)" + in hoare_post_imp) + subgoal by (auto, auto simp: st_tcb_at'_def obj_at'_def) + apply (wp hoare_vcg_all_lift set_ep_valid_objs') + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: EPState_Recv_def EPState_Send_def EPState_Idle_def) + apply (frule(1) ko_at_valid_objs' [OF _ invs_valid_objs']) + apply (clarsimp simp: projectKO_opt_ep split: kernel_object.split_asm) + apply (subgoal_tac "(capEPPtr cap) \ thread + \ state_refs_of' s thread = {r \ state_refs_of' s thread. snd r = TCBBound}") + apply (clarsimp simp: valid_obj'_def valid_ep'_def refs_of'_def + split: endpoint.splits) + apply (rename_tac list) + apply (subgoal_tac "state_refs_of' s (capEPPtr cap) = (set list) \ {EPRecv} + \ thread \ (set list)") + subgoal by (fastforce simp: obj_at'_def is_aligned_neg_mask objBitsKO_def + projectKOs invs'_def valid_state'_def st_tcb_at'_def + valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' + isBlockedOnReceive_def projectKO_opt_tcb + objBits_simps' + elim!: delta_sym_refs + split: if_split_asm bool.splits) (*very long*) + apply (frule(1) sym_refs_obj_atD' [OF _ invs_sym']) + apply (clarsimp simp: st_tcb_at'_def ko_wp_at'_def obj_at'_def projectKOs + split: if_split_asm) + apply (drule(1) bspec)+ + apply (case_tac "tcbState obj", simp_all add: tcb_bound_refs'_def)[1] + apply (subgoal_tac "state_refs_of' s (capEPPtr cap) = {}") + subgoal by (fastforce simp: obj_at'_def is_aligned_neg_mask objBitsKO_def + projectKOs invs'_def valid_state'_def st_tcb_at'_def + valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' + isBlockedOnReceive_def projectKO_opt_tcb objBits_simps' + elim: delta_sym_refs + split: if_split_asm bool.splits) (*very long *) + apply (clarsimp simp: obj_at'_def state_refs_of'_def projectKOs) + apply (frule(1) sym_refs_ko_atD' [OF _ invs_sym']) + apply clarsimp + apply (rename_tac list x xa) + apply (rule_tac P="x\set list" in case_split) + apply (clarsimp simp:st_tcb_at_refs_of_rev') + apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) + apply (subgoal_tac "sch_act_not x s") + prefer 2 + apply (frule invs_sch_act_wf') + apply (clarsimp simp:sch_act_wf_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) + apply (clarsimp simp: obj_at'_def st_tcb_at'_def + projectKOs isBlockedOnSend_def + split: list.split | rule conjI)+ + apply (clarsimp simp: state_refs_of'_def ) + apply (case_tac "tcbState obj", clarsimp+)[1] + apply (clarsimp simp: guard_is_UNIV_def)+ + apply (wp getNotification_wp | wpc)+ + apply (clarsimp simp add: guard_is_UNIV_def option_to_ptr_def option_to_0_def + split: if_split) + apply (wp gbn_wp' | simp add: guard_is_UNIV_def)+ + apply (auto simp: isCap_simps valid_cap'_def) + done + +lemma sendSignal_dequeue_ccorres_helper: + "ccorres (\rv rv'. rv' = tcb_ptr_to_ctcb_ptr dest) dest___ptr_to_struct_tcb_C_' + (invs' and st_tcb_at' ((=) (BlockedOnNotification ntfn)) dest + and ko_at' nTFN ntfn + and K (ntfnObj nTFN = WaitingNtfn (dest # rest))) UNIV hs + (setNotification ntfn $ ntfnObj_update (\_. case rest of [] \ Structures_H.ntfn.IdleNtfn + | (a#list) \ Structures_H.ntfn.WaitingNtfn rest) nTFN) + (\ntfn_queue :== CALL ntfn_ptr_get_queue(Ptr ntfn);; + \dest___ptr_to_struct_tcb_C :== head_C \ntfn_queue;; + \ntfn_queue :== CALL tcbEPDequeue(\dest___ptr_to_struct_tcb_C,\ntfn_queue);; + CALL ntfn_ptr_set_queue(Ptr ntfn,\ntfn_queue);; + IF head_C \ntfn_queue = Ptr 0 THEN + CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) + FI)" + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule (2) ntfn_blocked_in_queueD) + apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) + apply (elim conjE) + apply (frule (1) valid_ntfn_isWaitingNtfnD) + apply (elim conjE) + apply (frule cmap_relation_ntfn) + apply (erule (1) cmap_relation_ko_atE) + apply (intro conjI) + apply (erule h_t_valid_clift) + apply (rule impI) + apply (rule exI) + apply (rule conjI) + apply (rule_tac x=\ in exI) + apply (intro conjI) + apply assumption+ + apply clarsimp + apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) + apply (simp add: tcb_queue_relation'_def) + apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def + cong: imp_cong split del: if_split) + apply (intro conjI impI allI) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + \ \empty case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (frule iffD1 [OF tcb_queue_head_empty_iff + [OF tcb_queue_relation'_queue_rel]]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply (clarsimp simp: cnotification_relation_def Let_def + tcb_queue_relation'_def) + apply simp + apply (simp add: setNotification_def split_def) + apply (rule conjI) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def + tcb_queue_relation'_def make_canonical_def canonical_bit_def) + apply simp + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply (clarsimp simp: cnotification_relation_def Let_def + tcb_queue_relation'_def) + \ \non-empty case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (frule tcb_queue_head_empty_iff [OF tcb_queue_relation'_queue_rel]) + apply (rule ballI, erule bspec) + apply (erule subsetD [rotated]) + apply (clarsimp simp: cnotification_relation_def Let_def + tcb_queue_relation'_def) + apply (simp add: setNotification_def split_def) + apply (rule conjI) + apply (rule bexI [OF _ setObject_eq]) + apply (frule(1) st_tcb_at_h_t_valid) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def mask_shiftl_decompose + isWaitingNtfn_def + tcb_queue_relation'_def valid_ntfn'_def + split: Structures_H.notification.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") (* FIXME AARCH64: names *) + apply (rule conjI) + subgoal by (erule (1) tcb_ptr_canonical[OF invs_pspace_canonical']) + apply (rule context_conjI) + subgoal by (erule (1) tcb_ptr_canonical[OF invs_pspace_canonical']) + apply clarsimp + apply (clarsimp split: if_split) + apply simp + apply (clarsimp simp: carch_state_relation_def) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply (clarsimp simp: cnotification_relation_def Let_def + tcb_queue_relation'_def) + done + +lemma ntfn_set_active_ccorres: + "ccorres dc xfdc (invs' and ko_at' ntfn ntfnptr + and (\_. \ isWaitingNtfn (ntfnObj ntfn))) + (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr} \ {s. badge_' s = badge}) hs + (setNotification ntfnptr + (ntfnObj_update (\_. Structures_H.ntfn.ActiveNtfn badge) ntfn)) + (Call ntfn_set_active_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: ntfnPtr_' badge_') + apply (simp only: setNotification_def) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule cmap_relation_ko_atE [OF cmap_relation_ntfn], assumption+) + apply (clarsimp simp: typ_heap_simps) + apply (rule bexI[OF _ setObject_eq], simp_all) + apply (clarsimp simp: typ_heap_simps' rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ntfn_map_tos + carch_state_relation_def packed_heap_update_collapse_hrs + cmachine_state_relation_def) + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Active_def + isWaitingNtfn_def mask_def + split: Structures_H.ntfn.split_asm) + apply (simp add: objBits_simps')+ + done + +lemma sts_runnable: + "\if t \ dest then st_tcb_at' runnable' t else st_tcb_at' \ t\ + setThreadState Structures_H.thread_state.Running dest + \\rv. st_tcb_at' runnable' t\" + apply (rule hoare_pre) + apply (wp sts_st_tcb_at'_cases) + apply auto + done + +lemma st_tcb'_iff: + "st_tcb_at' \ t = tcb_at' t" + by (auto simp:st_tcb_at'_def) + +lemma sendSignal_ccorres [corres]: + "ccorres dc xfdc (invs') + (UNIV \ \\ntfnPtr = Ptr ntfnptr\ \ \\badge = badge\) hs + (sendSignal ntfnptr badge) + (Call sendSignal_'proc)" + apply (cinit lift: ntfnPtr_' badge_') + apply (rule ccorres_pre_getNotification, rename_tac ntfn) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle + | WaitingNtfn _ \ scast NtfnState_Waiting + | ActiveNtfn _ \ scast NtfnState_Active" + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_ntfn]) + apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps + split: Structures_H.ntfn.splits) + apply ceqv + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) + apply wpc + \ \IdleNtfn case\ + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="ptr_val (option_to_ctcb_ptr (ntfnBoundTCB ntfn))" + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_ntfn]) + apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps + split: Structures_H.ntfn.splits) + apply ceqv + apply csymbr + apply wpc + apply (simp add: option_to_ctcb_ptr_def split del: if_split) + apply (rule ccorres_cond_false) + apply (ctac add: ntfn_set_active_ccorres) + apply (rule ccorres_cond_true) + apply (rule getThreadState_ccorres_foo) + apply (rule ccorres_Guard_Seq) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply (rule_tac P="(ret__unsigned_longlong = scast ThreadState_BlockedOnReceive) + = receiveBlocked rv" in ccorres_gen_asm2) + apply (rule ccorres_cond[where R=\]) + apply (simp add: Collect_const_mem) + apply (rule ccorres_rhs_assoc)+ + apply simp + apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (ctac(no_vcg) add: setRegister_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) + apply (wp sts_valid_objs' sts_st_tcb_at'_cases + | simp add: option_to_ctcb_ptr_def split del: if_split)+ + apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" + in hoare_post_imp) + apply auto[1] + apply wp + apply simp + apply (ctac add: ntfn_set_active_ccorres) + apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def + AARCH64_H.badgeRegister_def C_register_defs + AARCH64.badgeRegister_def AARCH64.capRegister_def + ThreadState_defs less_mask_eq Collect_const_mem) + apply (case_tac ts, simp_all add: receiveBlocked_def typ_heap_simps + cthread_state_relation_def ThreadState_defs)[1] + \ \ActiveNtfn case\ + apply (rename_tac old_badge) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (clarsimp simp: setNotification_def) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" + and P'=UNIV + in ccorres_from_vcg) + apply (rule allI) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule(1) cmap_relation_ko_atD [OF cmap_relation_ntfn]) + apply (clarsimp simp: typ_heap_simps) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def + NtfnState_Active_def mask_def word_bw_comms) + apply simp + apply (clarsimp simp: carch_state_relation_def) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + \ \WaitingNtfn case\ + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply wpc + apply (simp only: haskell_fail_def) + apply (rule ccorres_fail) + apply (rename_tac dest rest) + apply (intro ccorres_rhs_assoc) + apply (csymbr, csymbr) + apply (intro ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc) + apply (rule ccorres_rhs_assoc) + apply (rule ccorres_split_nothrow_novcg) + apply (simp only: ) + apply (rule_tac dest=dest in sendSignal_dequeue_ccorres_helper) + apply ceqv + apply (simp only: K_bind_def) + apply (ctac (no_vcg)) + apply simp + apply (ctac (no_vcg)) + apply (ctac add: possibleSwitchTo_ccorres) + apply (simp) + apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift)[1] + apply (wp sts_valid_objs' sts_runnable) + apply (wp setThreadState_st_tcb set_ntfn_valid_objs' | clarsimp)+ + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def + badgeRegister_def C_register_defs + AARCH64.badgeRegister_def AARCH64.capRegister_def) + apply (clarsimp simp: guard_is_UNIV_def NtfnState_Idle_def + NtfnState_Active_def NtfnState_Waiting_def) + apply clarsimp + apply (simp only: conj_assoc[symmetric]) + apply (rule conjI) + apply (frule ko_at_valid_objs', (clarsimp simp: projectKOs)+) + apply (clarsimp simp: valid_obj'_def) + apply (auto simp: isWaitingNtfn_def st_tcb_at_refs_of_rev' valid_obj'_def valid_ntfn'_def + dest!: sym_refs_obj_atD' [OF _ invs_sym'] + elim: pred_tcb'_weakenE + split: list.splits option.splits)[1] + apply (clarsimp simp: option_to_ctcb_ptr_def tcb_ptr_to_ctcb_ptr_def + ctcb_offset_defs + dest!: sum_to_zero) + apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) + apply (auto simp: valid_ntfn'_def valid_bound_tcb'_def obj_at'_def is_aligned_def objBits_simps') + done + +lemma receiveSignal_block_ccorres_helper: + "ccorres dc xfdc (tcb_at' thread and sch_act_not thread and valid_objs' and ntfn_at' ntfnptr and + pspace_canonical' and pspace_aligned' and pspace_distinct' and + (\s. sch_act_wf (ksSchedulerAction s) s) and + K (ntfnptr = ntfnptr && ~~ mask 4)) + UNIV hs + (setThreadState (Structures_H.thread_state.BlockedOnNotification + ntfnptr) thread) + (Guard C_Guard {s. s \\<^sub>c tcb_ptr_to_ctcb_ptr thread} + (CALL thread_state_ptr_set_tsType(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), + scast ThreadState_BlockedOnNotification));; + Guard C_Guard {s. s \\<^sub>c tcb_ptr_to_ctcb_ptr thread} + (CALL thread_state_ptr_set_blockingObject(Ptr + &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C'']), + ucast (ptr_val (ntfn_Ptr ntfnptr))));; + CALL scheduleTCB(tcb_ptr_to_ctcb_ptr thread))" + unfolding K_def setThreadState_def + apply (intro ccorres_gen_asm) + apply (rule ccorres_guard_imp) + apply (rule_tac P="canonical_address ntfnptr" in ccorres_gen_asm) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac P=\ and P'="tcb_at' thread" + in threadSet_ccorres_lemma3) + apply vcg + apply clarsimp + apply (frule(1) tcb_at_h_t_valid) + apply (frule h_t_valid_c_guard) + apply (clarsimp simp: typ_heap_simps' rf_sr_tcb_update_twice) + apply (erule(1) rf_sr_tcb_update_no_queue_gen, + (simp add: typ_heap_simps')+) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: ctcb_relation_def cthread_state_relation_def mask_shiftl_decompose + ThreadState_defs Suc_canonical_bit_fold + canonical_make_canonical_idem + flip: canonical_bit_def) + apply (simp add: mask_def) + apply ceqv + apply clarsimp + apply ctac + apply (wp hoare_vcg_all_lift threadSet_valid_objs' + threadSet_weak_sch_act_wf_runnable') + apply (clarsimp simp: guard_is_UNIV_def) + apply (auto simp: weak_sch_act_wf_def valid_tcb'_def tcb_cte_cases_def + valid_tcb_state'_def obj_at'_is_canonical cteSizeBits_def) + done + +lemma cpspace_relation_ntfn_update_ntfn': + fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" + and ntfnptr :: "machine_word" and s :: "kernel_state" + defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" + assumes koat: "ko_at' ntfn ntfnptr s" + and vp: "valid_pspace' s" + and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" + and srs: "sym_refs (state_refs_of' s')" + and rel: "cnotification_relation (cslift t') ntfn' notification" + and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" +proof - + from koat have koat': "ko_at' ntfn' ntfnptr s'" + by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) + + have ntfns': + "\x. x \ ntfnptr \ map_to_ntfns (ksPSpace s) x = + map_to_ntfns (ksPSpace s') x" + unfolding s'_def + by (fastforce intro: ssubst [OF map_comp_update] simp: projectKO_opt_ntfn) + + from koat have map_to_ko_atI'': + "\x y. \ x \ ntfnptr; map_to_ntfns (ksPSpace s) x = Some y \ \ ko_at' y x s'" + using vp unfolding s'_def + by (simp add: map_to_ko_at_updI' injectKO_ntfn objBitsKO_def) + + thus ?thesis using vp srs cp rel mpeq unfolding cmap_relation_def + apply - + apply (elim conjE) + apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs + split: if_split) + apply (drule (1) bspec [OF _ domI]) + apply simp + apply (erule(1) cnotification_relation_ntfn_queue [OF _ _ koat']) + apply (erule(1) map_to_ko_atI'') + apply (fold qs_def, rule mpeq) + apply assumption + done +qed + +lemma ntfn_q_refs_of_no_NTFNBound': + "(x, NTFNBound) \ ntfn_q_refs_of' ntfn" + by (auto simp: ntfn_q_refs_of'_def split: Structures_H.ntfn.splits) + +lemma ntfnBound_state_refs_equivalence: + "ko_at' ntfn ntfnptr s \ {r \ state_refs_of' s ntfnptr. snd r = NTFNBound} = ntfn_bound_refs' (ntfnBoundTCB ntfn)" + by (auto simp: ko_at_state_refs_ofD' set_eq_subset ntfn_q_refs_of_no_NTFNBound' ntfn_bound_refs'_def) + + +lemma receiveSignal_enqueue_ccorres_helper: + notes option.case_cong_weak [cong] + shows + "ccorres dc xfdc (valid_pspace' + and (\s. sym_refs ((state_refs_of' s)(ntfnptr := set queue \ {NTFNSignal} \ {r \ state_refs_of' s ntfnptr. snd r = NTFNBound}))) + and st_tcb_at' (\st. isBlockedOnNotification st \ + waitingOnNotification st = ntfnptr) thread + and ko_at' (ntfn::Structures_H.notification) ntfnptr + and K ((ntfnObj ntfn = IdleNtfn \ queue = [thread]) \ + (\q. ntfnObj ntfn = WaitingNtfn q \ thread \ set q \ + queue = q @ [thread]))) + UNIV hs + (setNotification ntfnptr $ ntfnObj_update (\_. Structures_H.WaitingNtfn queue) ntfn) + (\ntfn_queue :== CALL ntfn_ptr_get_queue(ntfn_Ptr ntfnptr);; + (\ntfn_queue :== CALL tcbEPAppend(tcb_ptr_to_ctcb_ptr thread,\ntfn_queue);; + (CALL notification_ptr_set_state(ntfn_Ptr ntfnptr, scast NtfnState_Waiting);; + CALL ntfn_ptr_set_queue(ntfn_Ptr ntfnptr,\ntfn_queue))))" + unfolding K_def + apply (rule ccorres_gen_asm) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule cmap_relation_ntfn) + apply (erule (1) cmap_relation_ko_atE) + apply (rule conjI) + apply (erule h_t_valid_clift) + apply (frule(1) st_tcb_at_h_t_valid) + apply (frule pred_tcb_at') + apply (rule impI) + apply (rule_tac x="init queue" in exI) + apply (frule(1) ko_at_valid_ntfn' [OF _ valid_pspace_valid_objs']) + apply (frule is_aligned_tcb_ptr_to_ctcb_ptr) + apply (rule conjI) + apply (rule_tac x=\ in exI) + apply (simp add: cnotification_relation_def Let_def) + apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] + apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + prefer 2 + apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence + obj_at'_def objBitsKO_def) + apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + prefer 2 + apply (clarsimp simp: obj_at'_def objBitsKO_def ps_clear_upd) + apply (intro conjI impI allI) + apply (fastforce simp: h_t_valid_clift) + apply (fastforce simp: h_t_valid_clift) + apply (case_tac "ntfnObj ntfn", simp_all add: NtfnState_Idle_def NtfnState_Waiting_def)[1] + \ \IdleNtfn case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (simp add: setNotification_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def init_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) + apply simp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=3] NtfnState_Waiting_def) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask valid_ntfn'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, fastforce simp: mask_def) + apply (rule context_conjI) + subgoal by (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_ptr_canonical + dest!: st_tcb_strg'[rule_format]) + by clarsimp + apply (simp add: isWaitingNtfn_def) + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + \ \WaitingNtfn case\ + apply clarsimp + apply (frule null_ep_queue [simplified comp_def] null_ep_queue) + apply (simp add: setNotification_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def init_def Let_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) + apply simp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=3] NtfnState_Waiting_def + split: if_split) + subgoal for _ _ ko' + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) + apply (rule conjI, fastforce simp: mask_def) + apply (rule context_conjI) + subgoal by (fastforce intro!: tcb_ptr_canonical + dest!: st_tcb_strg'[rule_format]) + apply clarsimp + apply clarsimp + apply (rule conjI, fastforce simp: mask_def) + apply (rule conjI) + subgoal by (fastforce intro!: tcb_ptr_canonical + dest!: st_tcb_strg'[rule_format]) + apply (subgoal_tac "canonical_address (ntfnQueue_head_CL (notification_lift ko'))") + apply (clarsimp simp: canonical_make_canonical_idem) + apply (clarsimp simp: notification_lift_def canonical_address_mask_eq canonical_bit_def) + done + apply (simp add: isWaitingNtfn_def) + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + done + +lemma receiveSignal_ccorres [corres]: + "ccorres dc xfdc (invs' and valid_cap' cap and st_tcb_at' simple' thread + and sch_act_not thread + and K (isNotificationCap cap)) + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ + \ \ccap_relation cap \cap\ + \ {s. isBlocking_' s = from_bool is_blocking}) hs + (receiveSignal thread cap is_blocking) + (Call receiveSignal_'proc)" + unfolding K_def + supply if_cong[cong] option.case_cong[cong] + apply (rule ccorres_gen_asm) + apply (cinit lift: thread_' cap_' isBlocking_') + apply (rule ccorres_pre_getNotification, rename_tac ntfn) + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="capNtfnPtr cap" + and R=\ + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (simp add: cap_notification_cap_lift ccap_relation_def cap_to_H_def) + apply ceqv + apply csymbr + apply (rule_tac xf'=ret__unsigned_longlong_' + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle + | WaitingNtfn _ \ scast NtfnState_Waiting + | ActiveNtfn _ \ scast NtfnState_Active" + and R="ko_at' ntfn (capNtfnPtr cap)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_ntfn]) + apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps + split: Structures_H.ntfn.splits) + + apply ceqv + apply wpc + \ \IdleNtfn case\ + apply (rule ccorres_cond_true) + apply csymbr + apply (simp only: case_bool_If from_bool_neq_0) + apply (rule ccorres_Cond_rhs, simp cong: Collect_cong) + apply (intro ccorres_rhs_assoc) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule receiveSignal_block_ccorres_helper[simplified]) + apply ceqv + apply (simp only: K_bind_def) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) + apply (simp add: valid_ntfn'_def) + apply (wp sts_st_tcb') + apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn + \ projectKO_opt x = (None::tcb option)) + (capNtfnPtr cap)" + in hoare_post_imp) + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply simp + apply (ctac add: doNBRecvFailedTransfer_ccorres) + \ \ActiveNtfn case\ + apply (rename_tac badge) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (intro ccorres_rhs_assoc) + apply (rule_tac val=badge + and xf'=ret__unsigned_longlong_' + and R="ko_at' ntfn (capNtfnPtr cap)" + in ccorres_symb_exec_r_known_rv_UNIV [where R'=UNIV]) + apply (vcg, clarsimp) + apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_ntfn]) + apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps + split: Structures_H.notification.splits) + apply ceqv + apply (clarsimp simp: badgeRegister_def Kernel_C.badgeRegister_def, ctac(no_vcg)) + apply (rule_tac P="invs' and ko_at' ntfn (capNtfnPtr cap)" + and P'=UNIV + in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (frule(1) cmap_relation_ko_atD [OF cmap_relation_ntfn]) + apply (clarsimp simp: typ_heap_simps setNotification_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def + NtfnState_Idle_def mask_def) + apply simp + apply (simp add: carch_state_relation_def) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: AARCH64.badgeRegister_def AARCH64.capRegister_def C_register_defs) + \ \WaitingNtfn case\ + apply (rename_tac list) + apply (rule ccorres_cond_true) + apply csymbr + apply (simp only: case_bool_If from_bool_neq_0) + apply (rule ccorres_Cond_rhs, simp cong: Collect_cong) + apply (intro ccorres_rhs_assoc) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply (simp only: ) + apply (rule receiveSignal_block_ccorres_helper[simplified]) + apply ceqv + apply (simp only: K_bind_def) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) + apply (simp add: valid_ntfn'_def) + apply (wp sts_st_tcb') + apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn + \ projectKO_opt x = (None::tcb option)) + (capNtfnPtr cap) + and K (thread \ set list)" + in hoare_post_imp) + apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply simp + apply (ctac add: doNBRecvFailedTransfer_ccorres) + apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def + NtfnState_Waiting_def NtfnState_Idle_def) + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp split: if_split) + apply (rule conjI) + apply (clarsimp dest!: st_tcb_strg'[rule_format] + simp: isNotificationCap_def valid_cap'_def obj_at'_def projectKOs + split: capability.splits split del: if_split) + apply clarsimp + apply (frule(1) ko_at_valid_objs' [OF _ invs_valid_objs']) + apply (clarsimp simp: projectKO_opt_ntfn split: kernel_object.split_asm) + apply (subgoal_tac "state_refs_of' s thread = {r \ state_refs_of' s thread. snd r = TCBBound}") + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def refs_of'_def + split: ntfn.splits) + apply (subgoal_tac "state_refs_of' s (capNtfnPtr cap) = + {r \ state_refs_of' s (capNtfnPtr cap). snd r = NTFNBound}") + subgoal by (fastforce simp: obj_at'_def is_aligned_neg_mask objBits_simps' + projectKOs invs'_def valid_state'_def st_tcb_at'_def + valid_tcb_state'_def ko_wp_at'_def valid_pspace'_def + isBlockedOnNotification_def projectKO_opt_tcb + elim: delta_sym_refs + split: if_split_asm if_split) + apply (auto simp: obj_at'_def state_refs_of'_def projectKOs ntfn_bound_refs'_def)[1] + apply (rename_tac list) + apply (subgoal_tac "state_refs_of' s (capNtfnPtr cap) = (set list) \ {NTFNSignal} + \ {r \ state_refs_of' s (capNtfnPtr cap). snd r = NTFNBound} + \ thread \ (set list)") + subgoal by (fastforce simp: obj_at'_def is_aligned_neg_mask objBits_simps' + projectKOs invs'_def valid_state'_def st_tcb_at'_def + valid_tcb_state'_def ko_wp_at'_def valid_pspace'_def + isBlockedOnNotification_def projectKO_opt_tcb + elim: delta_sym_refs + split: if_split_asm if_split) + apply (frule(1) sym_refs_obj_atD' [OF _ invs_sym']) + apply (rule conjI, clarsimp simp: ko_wp_at'_def dest!: ntfnBound_state_refs_equivalence) + apply (clarsimp simp: st_tcb_at'_def ko_wp_at'_def obj_at'_def + split: if_split_asm) + apply (drule(1) bspec)+ + apply (drule_tac x="(thread, NTFNSignal)" in bspec, clarsimp) + apply (clarsimp simp: tcb_bound_refs'_def) + apply (case_tac "tcbState obj", simp_all)[1] + apply (frule(1) st_tcb_idle' [OF invs_valid_idle'], simp) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def state_refs_of'_def + split: if_split_asm) + apply (case_tac "tcbState obj", clarsimp+)[1] + done + +end +end diff --git a/proof/crefine/AARCH64/IsolatedThreadAction.thy b/proof/crefine/AARCH64/IsolatedThreadAction.thy new file mode 100644 index 0000000000..9bcadc156e --- /dev/null +++ b/proof/crefine/AARCH64/IsolatedThreadAction.thy @@ -0,0 +1,1939 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory IsolatedThreadAction +imports ArchMove_C +begin + +context begin interpretation Arch . + +datatype tcb_state_regs = + TCBStateRegs (tsrState : thread_state) (tsrContext : "MachineTypes.register \ machine_word") + +definition + get_tcb_state_regs :: "kernel_object option \ tcb_state_regs" +where + "get_tcb_state_regs oko \ case oko of + Some (KOTCB tcb) \ TCBStateRegs (tcbState tcb) ((user_regs o atcbContextGet o tcbArch) tcb)" + +definition + put_tcb_state_regs_tcb :: "tcb_state_regs \ tcb \ tcb" +where + "put_tcb_state_regs_tcb tsr tcb \ case tsr of + TCBStateRegs st regs \ + tcb \ tcbState := st, + tcbArch := atcbContextSet (UserContext (fpu_state (atcbContext (tcbArch tcb))) regs) + (tcbArch tcb) \" + +definition + put_tcb_state_regs :: "tcb_state_regs \ kernel_object option \ kernel_object option" +where + "put_tcb_state_regs tsr oko = Some (KOTCB (put_tcb_state_regs_tcb tsr + (case oko of + Some (KOTCB tcb) \ tcb | _ \ makeObject)))" + +definition + "partial_overwrite idx tcbs ps \ + \x. if x \ range idx + then put_tcb_state_regs (tcbs (inv idx x)) (ps x) + else ps x" + +definition + isolate_thread_actions :: "('x \ machine_word) \ 'a kernel + \ (('x \ tcb_state_regs) \ ('x \ tcb_state_regs)) + \ (scheduler_action \ scheduler_action) + \ 'a kernel" + where + "isolate_thread_actions idx m t f \ do + s \ gets (ksSchedulerAction_update (\_. ResumeCurrentThread) + o ksPSpace_update (partial_overwrite idx (K undefined))); + tcbs \ gets (\s. get_tcb_state_regs o ksPSpace s o idx); + sa \ getSchedulerAction; + (rv, s') \ select_f (m s); + modify (\s. ksPSpace_update (partial_overwrite idx (t tcbs)) + (s' \ ksSchedulerAction := f sa \)); + return rv + od" + +lemma UserContextGet[simp]: + "UserContext (fpu_state (atcbContext t)) (user_regs (atcbContextGet t)) = atcbContextGet t" + by (cases t, simp add: atcbContextGet_def) + +lemma put_tcb_state_regs_twice[simp]: + "put_tcb_state_regs tsr (put_tcb_state_regs tsr' tcb) + = put_tcb_state_regs tsr tcb" + apply (simp add: put_tcb_state_regs_def put_tcb_state_regs_tcb_def + makeObject_tcb newArchTCB_def + split: tcb_state_regs.split option.split + Structures_H.kernel_object.split) + using atcbContextSet_def atcbContext_set_set + apply (intro all_tcbI impI allI conjI; simp) + done + +lemma partial_overwrite_twice[simp]: + "partial_overwrite idx f (partial_overwrite idx g ps) + = partial_overwrite idx f ps" + by (rule ext, simp add: partial_overwrite_def) + +lemma get_tcb_state_regs_partial_overwrite[simp]: + "inj idx \ + get_tcb_state_regs (partial_overwrite idx tcbs f (idx x)) + = tcbs x" + apply (simp add: partial_overwrite_def) + apply (simp add: put_tcb_state_regs_def + get_tcb_state_regs_def + put_tcb_state_regs_tcb_def + split: tcb_state_regs.split) + done + +lemma isolate_thread_actions_bind: + "inj idx \ + isolate_thread_actions idx a b c >>= + (\x. isolate_thread_actions idx (d x) e f) + = isolate_thread_actions idx a id id + >>= (\x. isolate_thread_actions idx (d x) (e o b) (f o c))" + apply (rule ext) + apply (clarsimp simp: isolate_thread_actions_def bind_assoc split_def + bind_select_f_bind[symmetric]) + apply (clarsimp simp: exec_gets getSchedulerAction_def) + apply (rule select_bind_eq) + apply (simp add: exec_gets exec_modify o_def) + apply (rule select_bind_eq) + apply (simp add: exec_gets exec_modify) + done + +lemmas setNotification_tcb = set_ntfn_tcb_obj_at' + +lemma setObject_modify: + fixes v :: "'a :: pspace_storable" shows + "\ obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v \ + \ setObject p v s + = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (clarsimp simp: setObject_def split_def exec_gets obj_at'_def lookupAround2_known1 + assert_opt_def updateObject_default_def bind_assoc) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (clarsimp simp only: objBitsT_koTypeOf[symmetric] koTypeOf_injectKO) + apply (frule(2) in_magnitude_check[where s'=s]) + apply (simp add: magnitudeCheck_assert in_monad) + apply (simp add: simpler_modify_def) + done + +lemma getObject_return: + fixes v :: "'a :: pspace_storable" shows + "\ \a b c d. (loadObject a b c d :: 'a kernel) = loadObject_default a b c d; + ko_at' v p s; (1 :: machine_word) < 2 ^ objBits v \ \ getObject p s = return v s" + apply (clarsimp simp: getObject_def split_def exec_gets + obj_at'_def projectKOs lookupAround2_known1 + assert_opt_def loadObject_default_def) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (frule(2) in_magnitude_check[where s'=s]) + apply (simp add: magnitudeCheck_assert in_monad) + done + +end + +lemmas getObject_return_tcb + = getObject_return[OF meta_eq_to_obj_eq, OF loadObject_tcb, + unfolded objBits_simps, simplified] + +lemmas setObject_modify_tcb + = setObject_modify[OF _ meta_eq_to_obj_eq, OF _ updateObject_tcb, + unfolded objBits_simps, simplified] + +lemma partial_overwrite_fun_upd: + "inj idx \ + partial_overwrite idx (tsrs (x := y)) + = (\ps. (partial_overwrite idx tsrs ps) (idx x := put_tcb_state_regs y (ps (idx x))))" + apply (intro ext, simp add: partial_overwrite_def) + apply (clarsimp split: if_split) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma get_tcb_state_regs_ko_at': + "ko_at' ko p s \ get_tcb_state_regs (ksPSpace s p) + = TCBStateRegs (tcbState ko) ((user_regs o atcbContextGet o tcbArch) ko)" + by (clarsimp simp: obj_at'_def projectKOs get_tcb_state_regs_def) + +lemma put_tcb_state_regs_ko_at': + "ko_at' ko p s \ put_tcb_state_regs tsr (ksPSpace s p) + = Some (KOTCB (ko \ tcbState := tsrState tsr + , tcbArch := atcbContextSet (UserContext (fpu_state (atcbContext (tcbArch ko))) + (tsrContext tsr)) (tcbArch ko)\))" + by (clarsimp simp: obj_at'_def put_tcb_state_regs_def + put_tcb_state_regs_tcb_def + split: tcb_state_regs.split) + +lemma partial_overwrite_get_tcb_state_regs: + "\ \x. tcb_at' (idx x) s; inj idx \ \ + partial_overwrite idx (\x. get_tcb_state_regs (ksPSpace s (idx x))) + (ksPSpace s) = ksPSpace s" + apply (rule ext, simp add: partial_overwrite_def + split: if_split) + apply clarsimp + apply (drule_tac x=xa in spec) + apply (clarsimp simp: obj_at'_def projectKOs put_tcb_state_regs_def + get_tcb_state_regs_def put_tcb_state_regs_tcb_def) + apply (case_tac obj, simp) + done + +lemma ksPSpace_update_partial_id: + "\ \ps x. f ps x = ps (idx x) \ f ps x = ksPSpace s (idx x); + \x. tcb_at' (idx x) s; inj idx \ \ + ksPSpace_update (\ps. partial_overwrite idx (\x. get_tcb_state_regs (f ps x)) ps) s + = s" + apply (rule trans, rule kernel_state.fold_congs[OF refl refl]) + apply (erule_tac x="ksPSpace s" in meta_allE) + apply (clarsimp simp: partial_overwrite_get_tcb_state_regs) + apply (rule refl) + apply simp + done + +lemma isolate_thread_actions_asUser: + "\ idx t' = t; inj idx; f = (\s. ({(v, modify_registers g s)}, False)) \ \ + monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) + (asUser t f) + (isolate_thread_actions idx (return v) + (\tsrs. (tsrs (t' := TCBStateRegs (tsrState (tsrs t')) + (g (tsrContext (tsrs t')))))) + id)" + apply (simp add: asUser_def liftM_def isolate_thread_actions_def split_def + select_f_returns bind_assoc select_f_singleton_return + threadGet_def threadSet_def) + apply (clarsimp simp: monadic_rewrite_def) + apply (frule_tac x=t' in spec) + apply (drule obj_at_ko_at', clarsimp) + apply (simp add: exec_gets getSchedulerAction_def exec_modify objBits_defs + getObject_return_tcb setObject_modify_tcb o_def + cong: bind_apply_cong)+ + apply (simp add: partial_overwrite_fun_upd return_def get_tcb_state_regs_ko_at') + apply (rule kernel_state.fold_congs[OF refl refl]) + apply (clarsimp simp: partial_overwrite_get_tcb_state_regs + put_tcb_state_regs_ko_at') + apply (case_tac ko, simp) + apply (rename_tac uc) + apply (case_tac uc, simp add: modify_registers_def atcbContextGet_def atcbContextSet_def) + done + +lemma getRegister_simple: + "getRegister r = (\con. ({(user_regs con r, con)}, False))" + by (simp add: getRegister_def simpler_gets_def) + +lemma mapM_getRegister_simple: + "mapM getRegister rs = (\con. ({(map (user_regs con) rs, con)}, False))" + apply (induct rs) + apply (simp add: mapM_Nil return_def) + apply (simp add: mapM_Cons getRegister_def simpler_gets_def + bind_def return_def) + done + +lemma setRegister_simple: + "setRegister r v = (\con. ({((), UserContext (fpu_state con) ((user_regs con)(r := v)))}, False))" + by (simp add: setRegister_def simpler_modify_def) + +lemma zipWithM_setRegister_simple: + "zipWithM_x setRegister rs vs + = (\con. ({((), + UserContext (fpu_state con) + (foldl (\regs (r, v). ((regs)(r := v))) (user_regs con) (zip rs vs)))}, False))" + apply (simp add: zipWithM_x_mapM_x) + apply (induct ("zip rs vs")) + apply (simp add: mapM_x_Nil return_def) + apply (clarsimp simp add: mapM_x_Cons bind_def setRegister_def + simpler_modify_def fun_upd_def[symmetric]) + done + +lemma dom_partial_overwrite: + "\x. tcb_at' (idx x) s \ dom (partial_overwrite idx tsrs (ksPSpace s)) + = dom (ksPSpace s)" + apply (rule set_eqI) + apply (clarsimp simp: dom_def partial_overwrite_def put_tcb_state_regs_def + split: if_split) + apply (fastforce elim!: obj_atE') + done + +lemma map_to_ctes_partial_overwrite: + "\x. tcb_at' (idx x) s \ + map_to_ctes (partial_overwrite idx tsrs (ksPSpace s)) + = ctes_of s" + supply if_split[split del] + apply (rule ext) + apply (frule dom_partial_overwrite[where tsrs=tsrs]) + apply (simp add: map_to_ctes_def partial_overwrite_def + Let_def) + apply (case_tac "x \ range idx") + apply (clarsimp simp: put_tcb_state_regs_def) + apply (drule_tac x=xa in spec) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps' + cong: if_cong) + apply (simp add: put_tcb_state_regs_def put_tcb_state_regs_tcb_def + objBits_simps' + cong: if_cong option.case_cong) + apply (case_tac obj, simp split: tcb_state_regs.split if_split) + apply simp + apply (rule if_cong[OF refl]) + apply simp + apply (case_tac "x && ~~ mask (objBitsKO (KOTCB undefined)) \ range idx") + apply (clarsimp simp: put_tcb_state_regs_def) + apply (drule_tac x=xa in spec) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps' + cong: if_cong) + apply (simp add: put_tcb_state_regs_def put_tcb_state_regs_tcb_def + objBits_simps' + cong: if_cong option.case_cong) + apply (case_tac obj, simp split: tcb_state_regs.split if_split) + apply (intro impI allI) + apply (subgoal_tac "x - idx xa = x && mask tcbBlockSizeBits") + apply (clarsimp simp: tcb_cte_cases_def objBits_defs split: if_split) + apply (drule_tac t = "idx xa" in sym) + apply (simp add: objBits_defs) + apply (simp cong: if_cong) + done + +definition + "thread_actions_isolatable idx f = + (inj idx \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) + f (isolate_thread_actions idx f id id))" + +lemma getCTE_assert_opt: + "getCTE p = gets (\s. ctes_of s p) >>= assert_opt" + apply (intro ext) + apply (simp add: exec_gets assert_opt_def prod_eq_iff + fail_def return_def + split: option.split) + apply (rule conjI) + apply clarsimp + apply (rule context_conjI) + apply (rule ccontr, clarsimp elim!: nonemptyE) + apply (frule use_valid[OF _ getCTE_sp], rule TrueI) + apply (frule in_inv_by_hoareD[OF getCTE_inv]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (simp add: empty_failD[OF empty_fail_getCTE]) + apply clarsimp + apply (simp add: no_failD[OF no_fail_getCTE, OF ctes_of_cte_at]) + apply (subgoal_tac "cte_wp_at' ((=) x2) p x") + apply (clarsimp simp: cte_wp_at'_def getCTE_def) + apply (simp add: cte_wp_at_ctes_of) + done + +lemma getCTE_isolatable: + "thread_actions_isolatable idx (getCTE p)" + apply (clarsimp simp: thread_actions_isolatable_def) + apply (simp add: isolate_thread_actions_def bind_assoc split_def) + apply (simp add: getCTE_assert_opt bind_select_f_bind[symmetric] + bind_assoc select_f_returns) + apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def + map_to_ctes_partial_overwrite) + apply (simp add: assert_opt_def select_f_returns select_f_asserts + split: option.split) + apply (clarsimp simp: exec_modify o_def return_def) + apply (simp add: ksPSpace_update_partial_id) + done + +lemma obj_at_partial_overwrite_If: + "\ \x. tcb_at' (idx x) s \ + \ obj_at' P p (ksPSpace_update (partial_overwrite idx f) s) + = (if p \ range idx + then obj_at' (\tcb. P (put_tcb_state_regs_tcb (f (inv idx p)) tcb)) p s + else obj_at' P p s)" + apply (frule dom_partial_overwrite[where tsrs=f]) + apply (simp add: obj_at'_def ps_clear_def partial_overwrite_def + projectKOs split: if_split) + apply clarsimp + apply (drule_tac x=x in spec) + apply (clarsimp simp: put_tcb_state_regs_def objBits_simps) + done + +lemma obj_at_partial_overwrite_id1: + "\ p \ range idx; \x. tcb_at' (idx x) s \ + \ obj_at' P p (ksPSpace_update (partial_overwrite idx f) s) + = obj_at' P p s" + apply (drule dom_partial_overwrite[where tsrs=f]) + apply (simp add: obj_at'_def ps_clear_def partial_overwrite_def + projectKOs) + done + +lemma obj_at_partial_overwrite_id2: + "\ \x. tcb_at' (idx x) s; \v tcb. P v \ True \ injectKO v \ KOTCB tcb \ + \ obj_at' P p (ksPSpace_update (partial_overwrite idx f) s) + = obj_at' P p s" + apply (frule dom_partial_overwrite[where tsrs=f]) + apply (simp add: obj_at'_def ps_clear_def partial_overwrite_def + projectKOs split: if_split) + apply clarsimp + apply (drule_tac x=x in spec) + apply (clarsimp simp: put_tcb_state_regs_def objBits_simps + project_inject) + done + +lemma objBits_2n: + "(1 :: machine_word) < 2 ^ objBits obj" + by (simp add: objBits_def objBitsKO_def archObjSize_def pageBits_def objBits_simps' + split: kernel_object.split arch_kernel_object.split) + +lemma getObject_get_assert: + assumes deflt: "\a b c d. (loadObject a b c d :: ('a :: pspace_storable) kernel) + = loadObject_default a b c d" + shows + "(getObject p :: ('a :: pspace_storable) kernel) + = do v \ gets (obj_at' (\x :: 'a. True) p); + assert v; + gets (the o projectKO_opt o the o swp fun_app p o ksPSpace) + od" + apply (rule ext) + apply (simp add: exec_get getObject_def split_def exec_gets + deflt loadObject_default_def projectKO_def2 + alignCheck_assert) + apply (case_tac "ksPSpace x p") + apply (simp add: obj_at'_def assert_opt_def assert_def + split: option.split if_split) + apply (simp add: lookupAround2_known1 assert_opt_def + obj_at'_def projectKO_def2 + split: option.split) + apply (clarsimp simp: fail_set fst_return conj_comms project_inject + objBits_def) + apply (simp only: assert2[symmetric], + rule bind_apply_cong[OF refl]) + apply (clarsimp simp: in_monad) + apply (fold objBits_def) + apply (simp add: magnitudeCheck_assert2[OF _ objBits_2n]) + apply (rule bind_apply_cong[OF refl]) + apply (clarsimp simp: in_monad return_def simpler_gets_def) + apply (simp add: iffD2[OF project_inject refl]) + done + + +lemma getObject_isolatable: + "\ \a b c d. (loadObject a b c d :: 'a kernel) = loadObject_default a b c d; + \tcb. projectKO_opt (KOTCB tcb) = (None :: 'a option) \ \ + thread_actions_isolatable idx (getObject p :: ('a :: pspace_storable) kernel)" + apply (clarsimp simp: thread_actions_isolatable_def) + apply (simp add: getObject_get_assert split_def + isolate_thread_actions_def bind_select_f_bind[symmetric] + bind_assoc select_f_asserts select_f_returns) + apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) + apply (case_tac "p \ range idx") + apply clarsimp + apply (drule_tac x=x in spec) + apply (clarsimp simp: obj_at'_def projectKOs partial_overwrite_def + put_tcb_state_regs_def) + apply (simp add: obj_at_partial_overwrite_id1) + apply (simp add: partial_overwrite_def) + apply (rule bind_apply_cong[OF refl]) + apply (simp add: exec_modify return_def o_def simpler_gets_def + ksPSpace_update_partial_id in_monad) + done + +lemma gets_isolatable: + "\\g s. \x. tcb_at' (idx x) s \ + f (ksSchedulerAction_update g + (ksPSpace_update (partial_overwrite idx (\_. undefined)) s)) = f s \ \ + thread_actions_isolatable idx (gets f)" + apply (clarsimp simp: thread_actions_isolatable_def) + apply (simp add: isolate_thread_actions_def select_f_returns + liftM_def bind_assoc) + apply (clarsimp simp: monadic_rewrite_def exec_gets + getSchedulerAction_def exec_modify) + apply (simp add: simpler_gets_def return_def + ksPSpace_update_partial_id o_def) + done + +lemma modify_isolatable: + assumes swap:"\tsrs act s. \x. tcb_at' (idx x) s \ + (ksPSpace_update (partial_overwrite idx tsrs) ((f s)\ ksSchedulerAction := act \)) + = f (ksPSpace_update (partial_overwrite idx tsrs) + (s \ ksSchedulerAction := act\))" + shows + "thread_actions_isolatable idx (modify f)" + apply (clarsimp simp: thread_actions_isolatable_def) + apply (simp add: isolate_thread_actions_def select_f_returns + liftM_def bind_assoc) + apply (clarsimp simp: monadic_rewrite_def exec_gets + getSchedulerAction_def) + apply (simp add: simpler_modify_def) + apply (subst swap) + apply (simp add: obj_at_partial_overwrite_If) + apply (simp add: ksPSpace_update_partial_id o_def) + done + +lemma kernelExitAssertions_isolatable: + "thread_actions_isolatable idx (stateAssert kernelExitAssertions [])" + unfolding stateAssert_def kernelExitAssertions_def + apply (clarsimp simp: thread_actions_isolatable_def get_def assert_def bind_def) + apply (simp add: isolate_thread_actions_def select_f_returns liftM_def bind_assoc) + apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def exec_modify + split: if_split) + apply (simp add: simpler_gets_def return_def fail_def modify_def get_def put_def + ksPSpace_update_partial_id o_def bind_def select_f_def) + done + +lemma isolate_thread_actions_wrap_bind: + "inj idx \ + do x \ isolate_thread_actions idx a b c; + isolate_thread_actions idx (d x) e f + od = + isolate_thread_actions idx + (do x \ isolate_thread_actions idx a id id; + isolate_thread_actions idx (d x) id id + od) (e o b) (f o c) + " + apply (rule ext) + apply (clarsimp simp: isolate_thread_actions_def bind_assoc split_def + bind_select_f_bind[symmetric] liftM_def + select_f_returns select_f_selects + getSchedulerAction_def) + apply (clarsimp simp: exec_gets getSchedulerAction_def o_def) + apply (rule select_bind_eq) + apply (simp add: exec_gets exec_modify o_def) + apply (rule select_bind_eq) + apply (simp add: exec_modify) + done + +lemma monadic_rewrite_in_isolate_thread_actions: + "\ inj idx; monadic_rewrite F True P a d \ \ + monadic_rewrite F True (\s. P (ksSchedulerAction_update (\_. ResumeCurrentThread) + (ksPSpace_update (partial_overwrite idx (\_. undefined)) s))) + (isolate_thread_actions idx a b c) (isolate_thread_actions idx d b c)" + apply (clarsimp simp: isolate_thread_actions_def split_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (rule_tac P="\_. P s" in monadic_rewrite_bind_head) + apply (simp add: monadic_rewrite_def select_f_def) + apply wp+ + apply simp + done + +lemma thread_actions_isolatable_bind: + "\ thread_actions_isolatable idx f; \x. thread_actions_isolatable idx (g x); + \t. \tcb_at' t\ f \\rv. tcb_at' t\ \ + \ thread_actions_isolatable idx (f >>= g)" + apply (clarsimp simp: thread_actions_isolatable_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_trans) + apply (erule monadic_rewrite_bind_l, assumption) + apply (rule hoare_vcg_all_lift, assumption) + apply (subst isolate_thread_actions_wrap_bind, simp) + apply simp + apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) + apply (rule monadic_rewrite_transverse) + apply (erule monadic_rewrite_bind_l, assumption) + apply (rule hoare_vcg_all_lift, assumption) + apply (simp add: bind_assoc id_def) + apply (rule monadic_rewrite_refl) + apply (simp add: obj_at_partial_overwrite_If) + done + +lemma thread_actions_isolatable_return[simp]: + "thread_actions_isolatable idx (return v)" + apply (clarsimp simp: thread_actions_isolatable_def + monadic_rewrite_def liftM_def + isolate_thread_actions_def + split_def bind_assoc select_f_returns + exec_gets getSchedulerAction_def) + apply (simp add: exec_modify return_def o_def + ksPSpace_update_partial_id) + done + +lemma thread_actions_isolatable_fail[simp]: + "thread_actions_isolatable idx fail" + by (simp add: thread_actions_isolatable_def + isolate_thread_actions_def select_f_asserts + liftM_def bind_assoc getSchedulerAction_def + monadic_rewrite_def exec_gets) + +lemma thread_actions_isolatable_assert[simp]: + "thread_actions_isolatable idx (assert P)" + by (auto simp: assert_def) + +lemma thread_actions_isolatable_returns[simp]: + "thread_actions_isolatable idx (return v)" + "thread_actions_isolatable idx (returnOk v)" + "thread_actions_isolatable idx (throwError v)" + by (simp add: returnOk_def throwError_def)+ + +lemma thread_actions_isolatable_bindE: + "\ thread_actions_isolatable idx f; \x. thread_actions_isolatable idx (g x); + \t. \tcb_at' t\ f \\rv. tcb_at' t\ \ + \ thread_actions_isolatable idx (f >>=E g)" + apply (simp add: bindE_def) + apply (erule thread_actions_isolatable_bind) + apply (simp add: lift_def thread_actions_isolatable_returns + split: sum.split) + apply assumption + done + +lemma thread_actions_isolatable_catch: + "\ thread_actions_isolatable idx f; \x. thread_actions_isolatable idx (g x); + \t. \tcb_at' t\ f \\rv. tcb_at' t\ \ + \ thread_actions_isolatable idx (f g)" + apply (simp add: catch_def) + apply (erule thread_actions_isolatable_bind) + apply (simp add: thread_actions_isolatable_returns + split: sum.split) + apply assumption + done + +lemma thread_actions_isolatable_if: + "\ P \ thread_actions_isolatable idx a; + \ P \ thread_actions_isolatable idx b \ + \ thread_actions_isolatable idx (if P then a else b)" + by (cases P, simp_all) + +lemma select_f_isolatable: + "thread_actions_isolatable idx (select_f v)" + apply (clarsimp simp: thread_actions_isolatable_def + isolate_thread_actions_def + split_def select_f_selects liftM_def bind_assoc) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_drop_modify monadic_rewrite_bind_tail)+ + apply wp+ + apply (simp add: gets_bind_ign getSchedulerAction_def) + apply (rule monadic_rewrite_refl) + apply (simp add: ksPSpace_update_partial_id o_def) + done + +lemma doMachineOp_isolatable: + "thread_actions_isolatable idx (doMachineOp m)" + apply (simp add: doMachineOp_def split_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + gets_isolatable thread_actions_isolatable_returns + modify_isolatable select_f_isolatable) + apply (simp | wp)+ + done + +lemma page_table_at_partial_overwrite: + "\x. tcb_at' (idx x) s \ + page_table_at' pt_t p (ksPSpace_update (partial_overwrite idx f) s) + = page_table_at' pt_t p s" + by (simp add: page_table_at'_def typ_at_to_obj_at_arches + obj_at_partial_overwrite_id2) + +lemma getASIDPoolEntry_isolatable: + "thread_actions_isolatable idx (getASIDPoolEntry asid)" + supply if_split[split del] + apply (simp add: getASIDPoolEntry_def getPoolPtr_def liftE_bindE liftME_def bindE_assoc + case_option_If2 assertE_def liftE_def checkPTAt_def + stateAssert_def2 assert_def liftM_def + cong: if_cong) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_bindE[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail + gets_isolatable getObject_isolatable) + apply (simp add: projectKO_opt_asidpool page_table_at_partial_overwrite split: asidpool.split + | wpsimp wp: getASID_wp)+ + done + +lemma findVSpaceForASID_isolatable: + "thread_actions_isolatable idx (findVSpaceForASID asid)" + supply if_split[split del] + apply (simp add: findVSpaceForASID_def liftE_bindE liftME_def bindE_assoc + case_option_If2 assertE_def liftE_def checkPTAt_def + stateAssert_def2 + cong: if_cong) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_bindE[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail + gets_isolatable getObject_isolatable getASIDPoolEntry_isolatable + | simp add: projectKO_opt_asidpool page_table_at_partial_overwrite + findVSpaceForASID_def liftE_bindE liftME_def bindE_assoc + case_option_If2 assertE_def liftE_def checkPTAt_def + stateAssert_def2 split: asidpool_entry.split + | wpsimp wp: getASID_wp)+ + done + +lemma modifyArchState_isolatable: + "thread_actions_isolatable idx (modifyArchState f)" + by (clarsimp simp: modifyArchState_def modify_isolatable) + +lemma getVCPU_isolatable: + "thread_actions_isolatable idx (getObject v :: vcpu kernel)" + by (rule getObject_isolatable; simp add: projectKO_opt_vcpu) + +lemma getASIDPool_isolatable: + "thread_actions_isolatable idx (getObject v :: asidpool kernel)" + by (rule getObject_isolatable; simp add: projectKO_opt_asidpool) + +lemma modify_not_fail[simp]: + "modify f s \ fail s" + by (simp add: simpler_modify_def fail_def) + +lemma setObject_assert_modify: + "\ updateObject v = updateObject_default v; (1::machine_word) < 2 ^ objBits v; + \ko v'. projectKO_opt ko = Some (v'::'a) \ objBitsKO ko = objBits v \ \ + setObject p (v::'a::pspace_storable) s = (do + assert (obj_at' (\_::'a. True) p s); + modify (ksPSpace_update (\ps. ps(p \ injectKOS v))) + od) s" + apply (clarsimp simp: assert_def setObject_modify split: if_split) + apply (clarsimp simp: setObject_def updateObject_default_def exec_gets split_def bind_assoc) + apply (clarsimp simp: assert_opt_def assert_def alignCheck_assert projectKO_def + split: option.splits if_splits) + apply (clarsimp simp: magnitudeCheck_assert2 assert_def split: if_splits) + apply (clarsimp simp: obj_at'_def projectKOs) + done + +lemma setVCPU_isolatable: + "thread_actions_isolatable idx (setObject p (v::vcpu))" + apply (clarsimp simp: thread_actions_isolatable_def monadic_rewrite_def isolate_thread_actions_def) + apply (clarsimp simp: exec_gets getSchedulerAction_def) + apply (subst setObject_assert_modify; + simp add: projectKOs objBits_simps archObjSize_def vcpuBits_def vcpuBits_def pageBits_def)+ + apply (clarsimp simp: select_f_asserts assert_def obj_at_partial_overwrite_id2 split: if_splits) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def) + apply (case_tac s) + apply simp + apply (rule ext) + apply (clarsimp simp: partial_overwrite_def put_tcb_state_regs_def split: if_split) + apply (rule conjI) + apply clarsimp + apply (rename_tac p') + apply (erule_tac x=p' in allE) + apply (clarsimp simp: obj_at'_def projectKOs) + apply (clarsimp simp: put_tcb_state_regs_tcb_def get_tcb_state_regs_def) + apply (rename_tac p') + apply (erule_tac x=p' in allE) + apply (clarsimp simp: obj_at'_def projectKOs atcbContextSet_def atcbContextGet_def split: tcb_state_regs.splits) + apply (rename_tac tcb) + apply (case_tac tcb, simp) + apply (rename_tac arch_tcb) + apply (case_tac arch_tcb, simp) + done + +lemma setASIDPool_isolatable: + "thread_actions_isolatable idx (setObject p (v::asidpool))" + apply (clarsimp simp: thread_actions_isolatable_def monadic_rewrite_def isolate_thread_actions_def) + apply (clarsimp simp: exec_gets getSchedulerAction_def) + apply (subst setObject_assert_modify; + simp add: projectKOs objBits_simps archObjSize_def vcpuBits_def vcpuBits_def pageBits_def)+ + apply (clarsimp simp: select_f_asserts assert_def obj_at_partial_overwrite_id2 split: if_splits) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def) + apply (case_tac s) + apply simp + apply (rule ext) + apply (clarsimp simp: partial_overwrite_def put_tcb_state_regs_def split: if_split) + apply (rule conjI) + apply clarsimp + apply (rename_tac p') + apply (erule_tac x=p' in allE) + apply (clarsimp simp: obj_at'_def projectKOs) + apply (clarsimp simp: put_tcb_state_regs_tcb_def get_tcb_state_regs_def) + apply (rename_tac p') + apply (erule_tac x=p' in allE) + apply (clarsimp simp: obj_at'_def projectKOs atcbContextSet_def atcbContextGet_def split: tcb_state_regs.splits) + apply (rename_tac tcb) + apply (case_tac tcb, simp) + apply (rename_tac arch_tcb) + apply (case_tac arch_tcb, simp) + done + +lemma vcpuUpdate_isolatable: + "thread_actions_isolatable idx (vcpuUpdate p f)" + apply (clarsimp simp: vcpuUpdate_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + getVCPU_isolatable setVCPU_isolatable + |wp|assumption|clarsimp)+ + done + +lemma vgicUpdate_isolatable: + "thread_actions_isolatable idx (vgicUpdate p f)" + by (clarsimp simp: vgicUpdate_def vcpuUpdate_isolatable) + +lemma vgicUpdateLR_isolatable: + "thread_actions_isolatable idx (vgicUpdateLR p i virq)" + by (clarsimp simp: vgicUpdateLR_def vgicUpdate_isolatable) + +lemma vcpuWriteReg_isolatable: + "thread_actions_isolatable idx (vcpuWriteReg v p val)" + apply (clarsimp simp: vcpuWriteReg_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuUpdate_isolatable doMachineOp_isolatable + | wpsimp)+ + done + +lemma vcpuReadReg_isolatable: + "thread_actions_isolatable idx (vcpuReadReg v p)" + apply (clarsimp simp: vcpuReadReg_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuUpdate_isolatable getVCPU_isolatable thread_actions_isolatable_return + | wpsimp)+ + done + +lemma vcpuSaveReg_isolatable: + "thread_actions_isolatable idx (vcpuSaveReg p v)" + apply (clarsimp simp: vcpuSaveReg_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuUpdate_isolatable doMachineOp_isolatable + | wpsimp)+ + done + +lemma vcpuRestoreReg_isolatable: + "thread_actions_isolatable idx (vcpuRestoreReg p v)" + apply (clarsimp simp: vcpuRestoreReg_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuUpdate_isolatable doMachineOp_isolatable getVCPU_isolatable + | wpsimp)+ + done + +lemma thread_actions_isolatable_mapM_x: + "\ \x. thread_actions_isolatable idx (f x); + \x t. f x \tcb_at' t\ \ \ thread_actions_isolatable idx (mapM_x f xs)" + apply (induct xs; clarsimp simp: mapM_x_Nil mapM_x_Cons thread_actions_isolatable_returns) + apply (rule thread_actions_isolatable_bind[OF _ _ hoare_pre(1)]; clarsimp?) + apply assumption+ + done + +lemma vcpuSaveRegRange_isolatable: + "thread_actions_isolatable idx (vcpuSaveRegRange p r rt)" + apply (clarsimp simp: vcpuSaveRegRange_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuSaveReg_isolatable thread_actions_isolatable_mapM_x + | wpsimp)+ + done + +lemma vcpuRestoreRegRange_isolatable: + "thread_actions_isolatable idx (vcpuRestoreRegRange p r rt)" + apply (clarsimp simp: vcpuRestoreRegRange_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuRestoreReg_isolatable thread_actions_isolatable_mapM_x + | wpsimp)+ + done + +lemma saveVirtTimer_isolatable: + "thread_actions_isolatable idx (saveVirtTimer v)" + apply (clarsimp simp: saveVirtTimer_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail + gets_isolatable doMachineOp_isolatable vcpuSaveReg_isolatable + vcpuWriteReg_isolatable vcpuUpdate_isolatable + | wpsimp | fastforce)+ + done + +lemma getIRQState_isolatable: + "thread_actions_isolatable idx (getIRQState irq)" + apply (clarsimp simp: getIRQState_def liftM_def getInterruptState_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_returns gets_isolatable + | wpsimp | fastforce)+ + done + +lemma restoreVirtTimer_isolatable: + "thread_actions_isolatable idx (restoreVirtTimer v)" + apply (clarsimp simp: restoreVirtTimer_def when_def isIRQActive_def liftM_bind) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail + gets_isolatable doMachineOp_isolatable vcpuSaveReg_isolatable + vcpuReadReg_isolatable vcpuWriteReg_isolatable vcpuUpdate_isolatable + getVCPU_isolatable getIRQState_isolatable vcpuRestoreReg_isolatable + | wpsimp | fastforce)+ + done + +lemma vcpuSave_isolatable: + "thread_actions_isolatable idx (vcpuSave v)" + supply if_split[split del] + apply (clarsimp simp: vcpuSave_def armvVCPUSave_def thread_actions_isolatable_fail when_def + split: option.splits) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail + gets_isolatable doMachineOp_isolatable vcpuSaveReg_isolatable + vgicUpdateLR_isolatable vgicUpdate_isolatable vcpuSaveRegRange_isolatable + saveVirtTimer_isolatable + thread_actions_isolatable_mapM_x + | wpsimp wp: mapM_x_wp|fastforce)+ + done + +lemma vcpuEnable_isolatable: + "thread_actions_isolatable idx (vcpuEnable v)" + apply (clarsimp simp: vcpuEnable_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuRestoreReg_isolatable doMachineOp_isolatable getVCPU_isolatable + restoreVirtTimer_isolatable + | wpsimp)+ + done + +lemma vcpuRestore_isolatable: + "thread_actions_isolatable idx (vcpuRestore v)" + apply (clarsimp simp: vcpuRestore_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + getVCPU_isolatable gets_isolatable doMachineOp_isolatable vcpuEnable_isolatable + vcpuRestoreRegRange_isolatable + | wpsimp)+ + done + +lemma vcpuDisable_isolatable: + "thread_actions_isolatable idx (vcpuDisable v)" + apply (clarsimp simp: vcpuDisable_def split: option.splits, intro conjI) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + doMachineOp_isolatable vcpuEnable_isolatable + vgicUpdate_isolatable vcpuSaveReg_isolatable saveVirtTimer_isolatable + | wpsimp)+ + done + +lemma vcpuSwitch_isolatable: + "thread_actions_isolatable idx (vcpuSwitch v)" + supply if_cong[cong] option.case_cong[cong] + apply (clarsimp simp: vcpuSwitch_def when_def split: option.splits) + apply (safe intro!: + thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_bindE[OF _ _ hoare_pre(1)] + thread_actions_isolatable_catch[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail + gets_isolatable) + apply (clarsimp simp: thread_actions_isolatable_returns + split: option.splits + |intro thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + vcpuSave_isolatable vcpuRestore_isolatable + vcpuDisable_isolatable vcpuEnable_isolatable + modifyArchState_isolatable conjI doMachineOp_isolatable + |wp + |assumption)+ + done + +lemma liftM_getObject_return_tcb: + "ko_at' v p s \ liftM f (getObject p) s = return (f (v::tcb)) s" + by (simp add: liftM_def bind_def getObject_return_tcb return_def objBits_defs) + +lemma threadGet_vcpu_isolatable: + "thread_actions_isolatable idx (threadGet (atcbVCPUPtr o tcbArch) t)" + supply if_split[split del] + apply (clarsimp simp: threadGet_def thread_actions_isolatable_def) + apply (clarsimp simp: isolate_thread_actions_def) + apply (clarsimp simp: monadic_rewrite_def) + apply (cases "t \ range idx") + apply clarsimp + apply (frule_tac x=x in spec) + apply (drule tcb_ko_at') + apply (clarsimp simp: exec_gets getSchedulerAction_def liftM_getObject_return_tcb) + apply (rename_tac tcb) + apply (subgoal_tac "\tcb'. ko_at' tcb' t (ksPSpace_update (partial_overwrite idx (\y. undefined)) + s\ksSchedulerAction := ResumeCurrentThread\) \ + atcbVCPUPtr (tcbArch tcb') = atcbVCPUPtr (tcbArch tcb)") + apply (clarsimp simp: liftM_getObject_return_tcb) + apply (clarsimp simp: return_def select_f_def simpler_modify_def bind_def) + apply (clarsimp simp: o_def partial_overwrite_get_tcb_state_regs) + apply (clarsimp simp: obj_at_partial_overwrite_If) + apply (rule_tac x="put_tcb_state_regs_tcb undefined tcb" in exI) + apply (clarsimp simp: put_tcb_state_regs_tcb_def atcbContextSet_def obj_at'_def projectKOs + split: tcb_state_regs.splits) + apply (clarsimp simp: getObject_get_assert exec_gets liftM_def bind_assoc getSchedulerAction_def + obj_at_partial_overwrite_id1) + apply (clarsimp simp: assert_def exec_gets select_f_asserts split: if_split) + apply (clarsimp simp: select_f_def return_def simpler_modify_def bind_def + partial_overwrite_get_tcb_state_regs o_def) + apply (clarsimp simp: partial_overwrite_def) + done + +lemma getTCB_threadGet: + "do + tcbobj \ getObject t; + x \ f (atcbVCPUPtr (tcbArch tcbobj)); + g x + od = do + vcpu_ptr \ threadGet (atcbVCPUPtr o tcbArch) t; + x \ f vcpu_ptr; + g x + od" + by (simp add: threadGet_def liftM_def) + +lemma cap_case_isPageTableCap: + "(case cap of ArchObjectCap (PageTableCap pm pt_t (Some asid)) \ fn pm asid | _ => g) + = (if (if isArchObjectCap cap + then if isPageTableCap (capCap cap) then capPTMappedAddress (capCap cap) \ None else False + else False) + then fn (capPTBasePtr (capCap cap)) (the (capPTMappedAddress (capCap cap))) else g)" + apply (cases cap; simp add: isArchObjectCap_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, simp_all add: isPageTableCap_def) + apply (rename_tac option) + apply (case_tac option; simp) + done + +lemma armContextSwitch_isolatable: + "thread_actions_isolatable idx (armContextSwitch p asid)" + supply if_split[split del] + apply (simp add: armContextSwitch_def getVMID_def loadVMID_def getASIDPoolEntry_def getPoolPtr_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_bindE[OF _ _ hoare_pre(1)] + thread_actions_isolatable_catch[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail gets_isolatable getASIDPool_isolatable + setASIDPool_isolatable doMachineOp_isolatable + modify_isolatable + | wpsimp wp: hoare_vcg_all_lift + | split option.split asidpool.split asidpool_entry.split + | simp add: liftM_def comp_def findFreeVMID_def invalidateASID_def updateASIDPoolEntry_def + getPoolPtr_def invalidateVMIDEntry_def storeVMID_def + | wp hoare_drop_imps)+ + done + +lemma setVMRoot_isolatable: + "thread_actions_isolatable idx (setVMRoot t)" + supply if_split[split del] haskell_assertE_inv[wp] + apply (simp add: setVMRoot_def getThreadVSpaceRoot_def + locateSlot_conv getSlotCap_def + if_bool_simps cap_case_isPageTableCap + whenE_def liftE_def + stateAssert_def2 + cong: if_cong) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_bindE[OF _ _ hoare_pre(1)] + thread_actions_isolatable_catch[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if thread_actions_isolatable_returns + thread_actions_isolatable_fail + gets_isolatable getCTE_isolatable + findVSpaceForASID_isolatable doMachineOp_isolatable armContextSwitch_isolatable + | clarsimp simp: projectKO_opt_asidpool whenE_def liftE_def setGlobalUserVSpace_def + if_apply_def2 assertE_liftE + | wpsimp wp: getASID_wp typ_at_lifts + | split capability.split arch_capability.split pt_type.split option.split + | safe)+ + done + +lemma transferCaps_simple: + "transferCaps mi [] ep receiver rcvrBuf = + do + getReceiveSlots receiver rcvrBuf; + return (mi\msgExtraCaps := 0, msgCapsUnwrapped := 0\) + od" + apply (cases mi) + apply (clarsimp simp: transferCaps_def getThreadCSpaceRoot_def locateSlot_conv) + apply (rule ext bind_apply_cong[OF refl])+ + apply (simp add: upto_enum_def + split: option.split) + done + +lemma transferCaps_simple_rewrite: + "monadic_rewrite True True ((\_. caps = []) and \) + (transferCaps mi caps ep r rBuf) + (return (mi \ msgExtraCaps := 0, msgCapsUnwrapped := 0 \))" + including no_pre + supply empty_fail_getReceiveSlots[wp] (* FIXME *) + apply (rule monadic_rewrite_gen_asm) + apply (simp add: transferCaps_simple) + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) + apply simp + done + +lemma lookupExtraCaps_simple_rewrite: + "msgExtraCaps mi = 0 \ + (lookupExtraCaps thread rcvBuf mi = returnOk [])" + by (cases mi, simp add: lookupExtraCaps_def getExtraCPtrs_def + liftE_bindE upto_enum_step_def mapM_Nil + split: option.split) + +lemma lookupIPC_inv: "\P\ lookupIPCBuffer f t \\rv. P\" + by wp + +(* FIXME move *) +lemmas empty_fail_user_getreg[intro!, wp, simp] = empty_fail_asUser[OF empty_fail_getRegister] + +lemma copyMRs_simple: + "msglen \ of_nat (length msgRegisters) \ + copyMRs sender sbuf receiver rbuf msglen + = forM_x (take (unat msglen) msgRegisters) + (\r. do v \ asUser sender (getRegister r); + asUser receiver (setRegister r v) od) + >>= (\rv. return msglen)" + apply (clarsimp simp: copyMRs_def mapM_discarded) + apply (rule bind_cong[OF refl]) + apply (simp add: length_msgRegisters min_def + word_le_nat_alt + split: option.split) + apply (simp add: upto_enum_def mapM_Nil) + done + +lemma doIPCTransfer_simple_rewrite: + "monadic_rewrite True True + ((\_. msgExtraCaps (messageInfoFromWord msgInfo) = 0 + \ msgLength (messageInfoFromWord msgInfo) + \ of_nat (length msgRegisters)) + and obj_at' (\tcb. tcbFault tcb = None + \ (user_regs o atcbContextGet o tcbArch) tcb msgInfoRegister = msgInfo) sender) + (doIPCTransfer sender ep badge grant rcvr) + (do rv \ mapM_x (\r. do v \ asUser sender (getRegister r); + asUser rcvr (setRegister r v) + od) + (take (unat (msgLength (messageInfoFromWord msgInfo))) msgRegisters); + y \ setMessageInfo rcvr ((messageInfoFromWord msgInfo) \msgCapsUnwrapped := 0\); + asUser rcvr (setRegister badgeRegister badge) + od)" + supply if_cong[cong] + apply (rule monadic_rewrite_gen_asm) + apply (simp add: doIPCTransfer_def bind_assoc doNormalTransfer_def + getMessageInfo_def + cong: option.case_cong) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known None, simp) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known msgInfo) + apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) + apply (rule monadic_rewrite_bind_head) + apply (rule transferCaps_simple_rewrite) + apply (wp threadGet_const user_getreg_rv asUser_inv)+ + apply (simp add: bind_assoc) + apply (rule monadic_rewrite_symb_exec_l_drop[OF _ lookupIPC_inv empty_fail_lookupIPCBuffer] + monadic_rewrite_symb_exec_l_drop[OF _ threadGet_inv empty_fail_threadGet] + monadic_rewrite_symb_exec_l_drop[OF _ user_getreg_inv' empty_fail_user_getreg] + monadic_rewrite_bind_head monadic_rewrite_bind_tail)+ + apply (case_tac "messageInfoFromWord msgInfo") + apply simp + apply (rule monadic_rewrite_refl) + apply wp + apply clarsimp + apply (auto elim!: obj_at'_weakenE) + done + +lemma monadic_rewrite_setSchedulerAction_noop: + "monadic_rewrite F E (\s. ksSchedulerAction s = act) (setSchedulerAction act) (return ())" + unfolding setSchedulerAction_def + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_modify_noop) + apply simp + done + +lemma rescheduleRequired_simple_rewrite: + "monadic_rewrite F E + (sch_act_simple) + rescheduleRequired + (setSchedulerAction ChooseNewThread)" + apply (simp add: rescheduleRequired_def getSchedulerAction_def) + apply (simp add: monadic_rewrite_def exec_gets sch_act_simple_def) + apply auto + done + +(* FIXME move *) +lemma empty_fail_isRunnable[intro!, wp, simp]: + "empty_fail (isRunnable t)" + by (simp add: isRunnable_def isStopped_def empty_fail_cond) + +lemma setupCallerCap_rewrite: + "monadic_rewrite True True (\s. reply_masters_rvk_fb (ctes_of s)) + (setupCallerCap send rcv canGrant) + (do setThreadState BlockedOnReply send; + replySlot \ getThreadReplySlot send; + callerSlot \ getThreadCallerSlot rcv; + replySlotCTE \ getCTE replySlot; + assert (mdbNext (cteMDBNode replySlotCTE) = 0 + \ isReplyCap (cteCap replySlotCTE) + \ capReplyMaster (cteCap replySlotCTE) + \ mdbFirstBadged (cteMDBNode replySlotCTE) + \ mdbRevocable (cteMDBNode replySlotCTE)); + cteInsert (ReplyCap send False canGrant) replySlot callerSlot + od)" + apply (simp add: setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + getSlotCap_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (rule monadic_rewrite_assert)+ + apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) + \ mdbRevocable (cteMDBNode masterCTE)" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans) + apply monadic_rewrite_symb_exec_l + apply monadic_rewrite_symb_exec_l_drop + apply (rule monadic_rewrite_refl) + apply wpsimp+ + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ + apply (fastforce simp: reply_masters_rvk_fb_def) + done + +lemma oblivious_getObject_ksPSpace_default: + "\ \s. ksPSpace (f s) = ksPSpace s; + \a b c ko. (loadObject a b c ko :: 'a kernel) \ loadObject_default a b c ko \ \ + oblivious f (getObject p :: ('a :: pspace_storable) kernel)" + apply (simp add: getObject_def split_def loadObject_default_def + projectKO_def2 alignCheck_assert magnitudeCheck_assert) + apply (intro oblivious_bind, simp_all) + done + +lemmas oblivious_getObject_ksPSpace_tcb[simp] + = oblivious_getObject_ksPSpace_default[OF _ loadObject_tcb] + +lemma oblivious_setObject_ksPSpace_tcb[simp]: + "\ \s. ksPSpace (f s) = ksPSpace s; + \s g. ksPSpace_update g (f s) = f (ksPSpace_update g s) \ \ + oblivious f (setObject p (v :: tcb))" + apply (simp add: setObject_def split_def updateObject_default_def + projectKO_def2 alignCheck_assert magnitudeCheck_assert) + apply (intro oblivious_bind, simp_all) + done + +lemma oblivious_getObject_ksPSpace_cte[simp]: + "\ \s. ksPSpace (f s) = ksPSpace s \ \ + oblivious f (getObject p :: cte kernel)" + apply (simp add: getObject_def split_def loadObject_cte + projectKO_def2 alignCheck_assert magnitudeCheck_assert + typeError_def unless_when + cong: Structures_H.kernel_object.case_cong) + apply (intro oblivious_bind, + simp_all split: Structures_H.kernel_object.split if_split) + by (safe intro!: oblivious_bind, simp_all) + +lemma oblivious_setObject_ksPSpace_vcpu[simp]: + "\ \s. ksPSpace (f s) = ksPSpace s; + \s g. ksPSpace_update g (f s) = f (ksPSpace_update g s) \ \ + oblivious f (setObject p (v :: vcpu))" + apply (simp add: setObject_def split_def updateObject_default_def + projectKO_def2 alignCheck_assert magnitudeCheck_assert) + apply (intro oblivious_bind, simp_all) + done + +lemma oblivious_setObject_ksPSpace_asidpool[simp]: + "\ \s. ksPSpace (f s) = ksPSpace s; + \s g. ksPSpace_update g (f s) = f (ksPSpace_update g s) \ \ + oblivious f (setObject p (v :: asidpool))" + apply (simp add: setObject_def split_def updateObject_default_def + projectKO_def2 alignCheck_assert magnitudeCheck_assert) + apply (intro oblivious_bind, simp_all) + done + +lemma oblivious_doMachineOp[simp]: + "\ \s. ksMachineState (f s) = ksMachineState s; + \g s. ksMachineState_update g (f s) = f (ksMachineState_update g s) \ + \ oblivious f (doMachineOp oper)" + apply (simp add: doMachineOp_def split_def) + apply (intro oblivious_bind, simp_all) + done + +lemmas oblivious_getObject_ksPSpace_asidpool[simp] + = oblivious_getObject_ksPSpace_default[OF _ loadObject_asidpool] + +lemma oblivious_modifyArchState_schact[simp]: + "oblivious (ksSchedulerAction_update f) (modifyArchState f')" + by (simp add: oblivious_def modifyArchState_def simpler_modify_def) + +lemmas oblivious_getObject_ksPSpace_vcpu[simp] + = oblivious_getObject_ksPSpace_default [OF _ loadObject_vcpu] + +lemma oblivious_getIRQState_schact: + "oblivious (ksSchedulerAction_update f) (getIRQState irq)" + by (simp add: getIRQState_def liftM_def getInterruptState_def) + (safe intro!: oblivious_bind oblivious_bindE; simp) + +lemma oblivious_updateASIDPoolEntry_schact: + "oblivious (ksSchedulerAction_update f) (updateASIDPoolEntry f' asid)" + by (safe intro!: oblivious_bind + | simp add: findVSpaceForASID_def getPoolPtr_def oblivious_liftM updateASIDPoolEntry_def)+ + +lemma oblivious_setVMRoot_schact: + "oblivious (ksSchedulerAction_update f) (setVMRoot t)" + apply (simp add: setVMRoot_def getThreadVSpaceRoot_def locateSlot_conv + getSlotCap_def getCTE_def armContextSwitch_def) + by (safe intro!: oblivious_bind oblivious_bindE oblivious_catch oblivious_mapM_x + oblivious_getIRQState_schact + | simp add: liftE_def liftME_def invalidateASID_def findVSpaceForASID_def + getASIDPoolEntry_def getPoolPtr_def oblivious_liftM checkPTAt_def + armContextSwitch_def getVMID_def loadVMID_def findFreeVMID_def + updateASIDPoolEntry_def + invalidateVMIDEntry_def storeVMID_def setGlobalUserVSpace_def + split: if_split capability.split arch_capability.split option.split pt_type.split + asidpool.split asidpool_entry.split)+ + +lemma oblivious_vcpuSwitch_schact: + "oblivious (ksSchedulerAction_update f) (vcpuSwitch v)" + apply (simp add: vcpuSwitch_def) + apply (safe intro!: oblivious_bind oblivious_mapM_x + | simp_all add: vcpuSwitch_def vcpuDisable_def vcpuRestore_def + vcpuEnable_def vcpuSave_def + vcpuUpdate_def vgicUpdate_def vgicUpdateLR_def + vcpuSaveReg_def vcpuSaveRegRange_def + vcpuRestoreReg_def vcpuRestoreRegRange_def + saveVirtTimer_def vcpuWriteReg_def restoreVirtTimer_def vcpuReadReg_def + armvVCPUSave_def isIRQActive_def liftM_bind getIRQState_def + getInterruptState_def + split: if_split option.split)+ + done + +lemma oblivious_switchToThread_schact: + "oblivious (ksSchedulerAction_update f) (ThreadDecls_H.switchToThread t)" + apply (simp add: Thread_H.switchToThread_def switchToThread_def bind_assoc + getCurThread_def setCurThread_def threadGet_def liftM_def + threadSet_def tcbSchedEnqueue_def unless_when asUser_def + getQueue_def setQueue_def storeWordUser_def setRegister_def + pointerInUserData_def isRunnable_def isStopped_def + getThreadState_def tcbSchedDequeue_def bitmap_fun_defs + getThreadState_def tcbSchedDequeue_def tcbQueueRemove_def bitmap_fun_defs + ksReadyQueues_asrt_def) + by (safe intro!: oblivious_bind + | simp_all add: ready_qs_runnable_def idleThreadNotQueued_def + oblivious_setVMRoot_schact oblivious_vcpuSwitch_schact)+ + +(* FIXME move *) +lemma empty_fail_getCurThread[intro!, wp, simp]: + "empty_fail getCurThread" by (simp add: getCurThread_def) + +lemma activateThread_simple_rewrite: + "monadic_rewrite True True (ct_in_state' ((=) Running)) + (activateThread) (return ())" + apply (simp add: activateThread_def) + apply wp_pre + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known Running, simp) + apply (rule monadic_rewrite_refl) + apply wpsimp+ + apply (clarsimp simp: ct_in_state'_def elim!: pred_tcb'_weakenE) + done + +end + +lemma setCTE_obj_at_prio[wp]: + "\obj_at' (\tcb. P (tcbPriority tcb)) t\ setCTE p v \\rv. obj_at' (\tcb. P (tcbPriority tcb)) t\" + unfolding setCTE_def + by (rule setObject_cte_obj_at_tcb', simp+) + +crunch obj_at_prio[wp]: cteInsert "obj_at' (\tcb. P (tcbPriority tcb)) t" + (wp: crunch_wps) + +crunch ctes_of[wp]: asUser "\s. P (ctes_of s)" + (wp: crunch_wps) + +lemma tcbSchedEnqueue_tcbPriority[wp]: + "\obj_at' (\tcb. P (tcbPriority tcb)) t\ + tcbSchedEnqueue t' + \\rv. obj_at' (\tcb. P (tcbPriority tcb)) t\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp | simp cong: if_cong)+ + done + +crunch obj_at_prio[wp]: cteDeleteOne "obj_at' (\tcb. P (tcbPriority tcb)) t" + (wp: crunch_wps setEndpoint_obj_at'_tcb setNotification_tcb simp: crunch_simps unless_def) + +lemma setThreadState_no_sch_change: + "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ + setThreadState st t + \\rv s. P (ksSchedulerAction s)\" + (is "Nondet_VCG.valid ?P ?f ?Q") + apply (simp add: setThreadState_def setSchedulerAction_def) + apply (wp hoare_pre_cont[where f=rescheduleRequired]) + apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) + apply (clarsimp split: if_split) + apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs) + apply (wp threadSet_pred_tcb_at_state) + apply simp + done + +lemma asUser_obj_at_unchangedT: + assumes x: "\tcb con con'. con' \ fst (m con) + \ P (tcbArch_update (\_. atcbContextSet (snd con') (tcbArch tcb)) tcb) = P tcb" shows + "\obj_at' P t\ asUser t' m \\rv. obj_at' P t\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_obj_at' threadGet_wp) + apply (clarsimp simp: obj_at'_def projectKOs x cong: if_cong) + done + +lemmas asUser_obj_at_unchanged + = asUser_obj_at_unchangedT[OF all_tcbI, rule_format] + +lemma bind_assoc: + "do y \ do x \ m; f x od; g y od + = do x \ m; y \ f x; g y od" + by (rule bind_assoc) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma setObject_modify_assert: + "\ updateObject v = updateObject_default v \ + \ setObject p v = do f \ gets (obj_at' (\v'. v = v' \ True) p); + assert f; modify (ksPSpace_update (\ps. ps(p \ injectKO v))) od" + using objBits_2n[where obj=v] + apply (simp add: setObject_def split_def updateObject_default_def + bind_assoc projectKO_def2 alignCheck_assert) + apply (rule ext, simp add: exec_gets) + apply (case_tac "obj_at' (\v'. v = v' \ True) p x") + apply (clarsimp simp: obj_at'_def projectKOs lookupAround2_known1 + assert_opt_def) + apply (clarsimp simp: project_inject) + apply (simp only: objBits_def objBitsT_koTypeOf[symmetric] koTypeOf_injectKO) + apply (simp add: magnitudeCheck_assert2 simpler_modify_def) + apply (clarsimp simp: assert_opt_def assert_def magnitudeCheck_assert2 + split: option.split if_split) + apply (clarsimp simp: obj_at'_def projectKOs) + apply (clarsimp simp: project_inject) + apply (simp only: objBits_def objBitsT_koTypeOf[symmetric] + koTypeOf_injectKO simp_thms) + done + +lemma setEndpoint_isolatable: + "thread_actions_isolatable idx (setEndpoint p e)" + supply if_split[split del] + apply (simp add: setEndpoint_def setObject_modify_assert + assert_def) + apply (case_tac "p \ range idx") + apply (clarsimp simp: thread_actions_isolatable_def + monadic_rewrite_def fun_eq_iff + liftM_def isolate_thread_actions_def + bind_assoc exec_gets getSchedulerAction_def + bind_select_f_bind[symmetric]) + apply (simp add: obj_at_partial_overwrite_id2) + apply (drule_tac x=x in spec) + apply (clarsimp simp: obj_at'_def projectKOs select_f_asserts) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if + thread_actions_isolatable_return + thread_actions_isolatable_fail) + apply (rule gets_isolatable) + apply (simp add: obj_at_partial_overwrite_id2) + apply (rule modify_isolatable) + apply (clarsimp simp: o_def partial_overwrite_def) + apply (rule kernel_state.fold_congs[OF refl refl]) + apply (clarsimp simp: fun_eq_iff + split: if_split) + apply (wp | simp)+ + done + +(* FIXME x64: tcb bits *) +lemma setCTE_assert_modify: + "setCTE p v = do c \ gets (real_cte_at' p); + t \ gets (tcb_at' (p && ~~ mask tcbBlockSizeBits) + and K ((p && mask tcbBlockSizeBits) \ dom tcb_cte_cases)); + if c then modify (ksPSpace_update (\ps. ps(p \ KOCTE v))) + else if t then + modify (ksPSpace_update + (\ps. ps (p && ~~ mask tcbBlockSizeBits \ + KOTCB (snd (the (tcb_cte_cases (p && mask tcbBlockSizeBits))) (K v) + (the (projectKO_opt (the (ps (p && ~~ mask tcbBlockSizeBits))))))))) + else fail od" + apply (clarsimp simp: setCTE_def setObject_def split_def + fun_eq_iff exec_gets) + apply (case_tac "real_cte_at' p x") + apply (clarsimp simp: obj_at'_def projectKOs lookupAround2_known1 + assert_opt_def alignCheck_assert objBits_simps' + magnitudeCheck_assert2 updateObject_cte) + apply (simp add: simpler_modify_def) + apply (simp split: if_split, intro conjI impI) + apply (clarsimp simp: obj_at'_def projectKOs) + apply (subgoal_tac "p \ (p && ~~ mask tcbBlockSizeBits) + 2 ^ tcbBlockSizeBits - 1") + apply (subgoal_tac "fst (lookupAround2 p (ksPSpace x)) + = Some (p && ~~ mask tcbBlockSizeBits, KOTCB obj)") + apply (simp add: assert_opt_def) + apply (subst updateObject_cte_tcb) + apply (fastforce simp add: subtract_mask) + apply (simp add: assert_opt_def alignCheck_assert bind_assoc + magnitudeCheck_assert + is_aligned_neg_mask2 objBits_def) + apply (rule ps_clear_lookupAround2, assumption+) + apply (rule word_and_le2) + apply (simp add: objBits_simps mask_def field_simps) + apply (simp add: simpler_modify_def cong: option.case_cong if_cong) + apply (clarsimp simp: lookupAround2_char1 word_and_le2) + apply (rule ccontr, clarsimp) + apply (erule(2) ps_clearD) + apply (simp add: objBits_simps mask_def field_simps) + apply (rule tcb_cte_cases_in_range2) + apply (simp add: subtract_mask) + apply simp + apply (clarsimp simp: assert_opt_def split: option.split) + apply (rule trans [OF bind_apply_cong[OF _ refl] fun_cong[OF fail_bind]]) + apply (simp add: fail_def prod_eq_iff) + apply (rule context_conjI) + apply (rule ccontr, clarsimp elim!: nonemptyE) + apply (frule(1) updateObject_cte_is_tcb_or_cte[OF _ refl]) + apply (erule disjE) + apply clarsimp + apply (frule(1) tcb_cte_cases_aligned_helpers) + apply (clarsimp simp: domI field_simps) + apply (clarsimp simp: lookupAround2_char1 obj_at'_def projectKOs + objBits_simps) + apply (clarsimp simp: obj_at'_def lookupAround2_char1 + objBits_simps' projectKOs cte_level_bits_def) + apply (erule empty_failD[OF empty_fail_updateObject_cte]) + done + +lemma partial_overwrite_fun_upd2: + "partial_overwrite idx tsrs (f (x := y)) + = (partial_overwrite idx tsrs f) + (x := if x \ range idx then put_tcb_state_regs (tsrs (inv idx x)) y + else y)" + by (simp add: fun_eq_iff partial_overwrite_def split: if_split) + +lemma atcbContextSetSetGet_eq[simp]: + "atcbContextSet (UserContext (fpu_state (atcbContext + (atcbContextSet (UserContext (fpu_state (atcbContext t)) r) t))) + (user_regs (atcbContextGet t))) t = t" + by (cases t, simp add: atcbContextSet_def atcbContextGet_def) + +lemma setCTE_isolatable: + "thread_actions_isolatable idx (setCTE p v)" + supply if_split[split del] + apply (simp add: setCTE_assert_modify) + apply (clarsimp simp: thread_actions_isolatable_def + monadic_rewrite_def fun_eq_iff + liftM_def exec_gets + isolate_thread_actions_def + bind_assoc exec_gets getSchedulerAction_def + bind_select_f_bind[symmetric] + obj_at_partial_overwrite_If + obj_at_partial_overwrite_id2 + cong: if_cong) + apply (case_tac "p && ~~ mask tcbBlockSizeBits \ range idx \ p && mask tcbBlockSizeBits \ dom tcb_cte_cases") + apply clarsimp + apply (frule_tac x=x in spec, erule obj_atE') + apply (subgoal_tac "\ real_cte_at' p s") + apply (clarsimp simp: select_f_returns select_f_asserts split: if_split) + apply (clarsimp simp: o_def simpler_modify_def partial_overwrite_fun_upd2) + apply (rule kernel_state.fold_congs[OF refl refl]) + apply (rule ext) + apply (clarsimp simp: partial_overwrite_get_tcb_state_regs + split: if_split) + apply (clarsimp simp: projectKOs get_tcb_state_regs_def + put_tcb_state_regs_def put_tcb_state_regs_tcb_def + partial_overwrite_def + split: tcb_state_regs.split) + apply (case_tac obj, simp add: projectKO_opt_tcb) + apply (simp add: tcb_cte_cases_def split: if_split_asm) + apply (drule_tac x=x in spec) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps subtract_mask(2) [symmetric]) + apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) + apply (simp add: select_f_returns select_f_asserts split: if_split) + apply (intro conjI impI) + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 + intro!: kernel_state.fold_congs[OF refl refl]) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) + apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) + apply (fastforce simp add: subtract_mask) + apply simp + apply (clarsimp simp: simpler_modify_def + partial_overwrite_fun_upd2 o_def + partial_overwrite_get_tcb_state_regs + intro!: kernel_state.fold_congs[OF refl refl] + split: if_split) + apply (simp add: partial_overwrite_def) + apply (subgoal_tac "p \ range idx") + apply (clarsimp simp: simpler_modify_def + partial_overwrite_fun_upd2 o_def + partial_overwrite_get_tcb_state_regs + intro!: kernel_state.fold_congs[OF refl refl]) + apply clarsimp + apply (drule_tac x=x in spec) + apply (clarsimp simp: obj_at'_def projectKOs) + done + +lemma cteInsert_isolatable: + "thread_actions_isolatable idx (cteInsert cap src dest)" + supply if_split[split del] if_cong[cong] + apply (simp add: cteInsert_def updateCap_def updateMDB_def + Let_def setUntypedCapAsFull_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + thread_actions_isolatable_if + thread_actions_isolatable_returns + getCTE_isolatable setCTE_isolatable) + apply (wp | simp)+ + done + +lemma isolate_thread_actions_threadSet_tcbState: + "\ inj idx; idx t' = t \ \ + monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) + (threadSet (tcbState_update (\_. st)) t) + (isolate_thread_actions idx (return ()) + (\tsrs. (tsrs (t' := TCBStateRegs st (tsrContext (tsrs t'))))) + id)" + apply (simp add: isolate_thread_actions_def bind_assoc split_def + select_f_returns getSchedulerAction_def) + apply (clarsimp simp: monadic_rewrite_def exec_gets threadSet_def + getObject_get_assert bind_assoc liftM_def + setObject_modify_assert) + apply (frule_tac x=t' in spec, drule obj_at_ko_at') + apply (clarsimp simp: exec_gets simpler_modify_def o_def + intro!: kernel_state.fold_congs[OF refl refl]) + apply (simp add: partial_overwrite_fun_upd + partial_overwrite_get_tcb_state_regs) + apply (clarsimp simp: put_tcb_state_regs_def put_tcb_state_regs_tcb_def + projectKOs get_tcb_state_regs_def + elim!: obj_atE') + apply (case_tac ko) + apply (simp add: projectKO_opt_tcb) + done + +lemma thread_actions_isolatableD: + "\ thread_actions_isolatable idx f; inj idx \ + \ monadic_rewrite False True (\s. (\x. tcb_at' (idx x) s)) + f (isolate_thread_actions idx f id id)" + by (clarsimp simp: thread_actions_isolatable_def) + +lemma tcbSchedDequeue_rewrite: + "monadic_rewrite True True (obj_at' (Not \ tcbQueued) t) (tcbSchedDequeue t) (return ())" + apply (simp add: tcbSchedDequeue_def) + apply wp_pre + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: threadGet_const)+ + done + +(* FIXME: improve automation here *) +lemma switchToThread_rewrite: + "monadic_rewrite True True + (ct_in_state' (Not \ runnable') and cur_tcb' and obj_at' (Not \ tcbQueued) t) + (switchToThread t) + (do Arch.switchToThread t; setCurThread t od)" + apply (simp add: switchToThread_def Thread_H.switchToThread_def) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite, simp) + (* strip LHS of getters and asserts until LHS and RHS are the same *) + apply (repeat_unless \rule monadic_rewrite_refl\ monadic_rewrite_symb_exec_l) + apply wpsimp+ + apply (clarsimp simp: comp_def) + done + +lemma threadGet_isolatable: + assumes v: "\tsr. \tcb. f (put_tcb_state_regs_tcb tsr tcb) = f tcb" + shows "thread_actions_isolatable idx (threadGet f t)" + apply (clarsimp simp: threadGet_def thread_actions_isolatable_def + isolate_thread_actions_def split_def + getObject_get_assert liftM_def + bind_select_f_bind[symmetric] + select_f_returns select_f_asserts bind_assoc) + apply (clarsimp simp: monadic_rewrite_def exec_gets + getSchedulerAction_def) + apply (simp add: obj_at_partial_overwrite_If) + apply (rule bind_apply_cong[OF refl]) + apply (clarsimp simp: exec_gets exec_modify o_def + ksPSpace_update_partial_id in_monad) + apply (erule obj_atE') + apply (clarsimp simp: projectKOs + partial_overwrite_def put_tcb_state_regs_def + cong: if_cong) + apply (simp add: projectKO_opt_tcb v split: if_split) + done + +lemma switchToThread_isolatable: + "thread_actions_isolatable idx (Arch.switchToThread t)" + apply (simp add: switchToThread_def getTCB_threadGet + storeWordUser_def stateAssert_def2) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + gets_isolatable setVMRoot_isolatable + thread_actions_isolatable_if + doMachineOp_isolatable + threadGet_isolatable [OF all_tcbI] + thread_actions_isolatable_returns + thread_actions_isolatable_fail + threadGet_vcpu_isolatable + vcpuSwitch_isolatable) + apply (wpsimp simp: put_tcb_state_regs_tcb_def atcbContextSet_def + split: tcb_state_regs.split)+ + done + +lemma tcbQueued_put_tcb_state_regs_tcb: + "tcbQueued (put_tcb_state_regs_tcb tsr tcb) = tcbQueued tcb" + apply (clarsimp simp: put_tcb_state_regs_tcb_def) + by (cases tsr; clarsimp) + +lemma idleThreadNotQueued_isolatable: + "thread_actions_isolatable idx (stateAssert idleThreadNotQueued [])" + apply (simp add: stateAssert_def2 stateAssert_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + gets_isolatable + thread_actions_isolatable_if + thread_actions_isolatable_returns + thread_actions_isolatable_fail) + unfolding idleThreadNotQueued_def + apply (clarsimp simp: obj_at_partial_overwrite_If) + apply (clarsimp simp: obj_at'_def tcbQueued_put_tcb_state_regs_tcb) + apply wpsimp+ + done + +lemma setCurThread_isolatable: + "thread_actions_isolatable idx (setCurThread t)" + unfolding setCurThread_def + apply (rule thread_actions_isolatable_bind) + apply (rule idleThreadNotQueued_isolatable) + apply (fastforce intro: modify_isolatable) + apply wpsimp + done + +lemma isolate_thread_actions_tcbs_at: + assumes f: "\x. \tcb_at' (idx x)\ f \\rv. tcb_at' (idx x)\" shows + "\\s. \x. tcb_at' (idx x) s\ + isolate_thread_actions idx f f' f'' \\p s. \x. tcb_at' (idx x) s\" + apply (simp add: isolate_thread_actions_def split_def) + apply wp + apply clarsimp + apply (simp add: obj_at_partial_overwrite_If use_valid[OF _ f]) + done + +lemma isolate_thread_actions_rewrite_bind: + "\ inj idx; monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) + f (isolate_thread_actions idx f' f'' f'''); + \x. monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) + (g x) + (isolate_thread_actions idx (g' x) g'' g'''); + thread_actions_isolatable idx f'; \x. thread_actions_isolatable idx (g' x); + \x. \tcb_at' (idx x)\ f' \\rv. tcb_at' (idx x)\ \ + \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) + (f >>= g) (isolate_thread_actions idx + (f' >>= g') (g'' o f'') (g''' o f'''))" + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind, assumption+) + apply (wp isolate_thread_actions_tcbs_at) + apply simp + apply (subst isolate_thread_actions_wrap_bind, assumption) + apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) + apply (rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_bind_l) + apply (erule(1) thread_actions_isolatableD) + apply (rule thread_actions_isolatableD, assumption+) + apply (rule hoare_vcg_all_lift, assumption) + apply (simp add: liftM_def id_def) + apply (rule monadic_rewrite_refl) + apply (simp add: obj_at_partial_overwrite_If) + done + +definition + "copy_register_tsrs src dest r r' rf tsrs + = tsrs (dest := TCBStateRegs (tsrState (tsrs dest)) + ((tsrContext (tsrs dest)) (r' := rf (tsrContext (tsrs src) r))))" + +lemma tcb_at_KOTCB_upd: + "tcb_at' (idx x) s \ + tcb_at' p (ksPSpace_update (\ps. ps(idx x \ KOTCB tcb)) s) + = tcb_at' p s" + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps + split: if_split) + apply (fastforce simp add: ps_clear_def) + done + +definition + "set_register_tsrs dest r v tsrs + = tsrs (dest := TCBStateRegs (tsrState (tsrs dest)) + ((tsrContext (tsrs dest)) (r := v)))" + + +lemma set_register_isolate: + "\ inj idx; idx y = dest \ \ + monadic_rewrite False True + (\s. \x. tcb_at' (idx x) s) + (asUser dest (setRegister r v)) + (isolate_thread_actions idx (return ()) + (set_register_tsrs y r v) id)" + apply (simp add: asUser_def split_def bind_assoc + getRegister_def setRegister_def + select_f_returns isolate_thread_actions_def + getSchedulerAction_def) + apply (simp add: threadGet_def liftM_def getObject_get_assert + bind_assoc threadSet_def + setObject_modify_assert) + apply (clarsimp simp: monadic_rewrite_def exec_gets + exec_modify tcb_at_KOTCB_upd) + apply (clarsimp simp: simpler_modify_def + intro!: kernel_state.fold_congs[OF refl refl]) + apply (clarsimp simp: set_register_tsrs_def o_def + partial_overwrite_fun_upd + partial_overwrite_get_tcb_state_regs) + apply (drule_tac x=y in spec) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps + cong: if_cong) + apply (case_tac obj) + apply (simp add: projectKO_opt_tcb put_tcb_state_regs_def + put_tcb_state_regs_tcb_def get_tcb_state_regs_def + atcbContextGet_def + cong: if_cong) + done + +lemma copy_register_isolate: + "\ inj idx; idx x = src; idx y = dest \ \ + monadic_rewrite False True + (\s. \x. tcb_at' (idx x) s) + (do v \ asUser src (getRegister r); + asUser dest (setRegister r' (rf v)) od) + (isolate_thread_actions idx (return ()) + (copy_register_tsrs x y r r' rf) id)" + supply if_split[split del] + apply (simp add: asUser_def split_def bind_assoc + getRegister_def setRegister_def + select_f_returns isolate_thread_actions_def + getSchedulerAction_def) + apply (simp add: threadGet_def liftM_def getObject_get_assert + bind_assoc threadSet_def + setObject_modify_assert) + apply (clarsimp simp: monadic_rewrite_def exec_gets + exec_modify tcb_at_KOTCB_upd) + apply (clarsimp simp: simpler_modify_def + intro!: kernel_state.fold_congs[OF refl refl]) + apply (clarsimp simp: copy_register_tsrs_def o_def + partial_overwrite_fun_upd + partial_overwrite_get_tcb_state_regs) + apply (frule_tac x=x in spec, drule_tac x=y in spec) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps + cong: if_cong) + apply (case_tac obj, case_tac obja) + apply (simp add: projectKO_opt_tcb put_tcb_state_regs_def + put_tcb_state_regs_tcb_def get_tcb_state_regs_def + atcbContextGet_def + cong: if_cong) + apply (auto simp: fun_eq_iff split: if_split) + done + +lemma monadic_rewrite_isolate_final2: + assumes mr: "monadic_rewrite F E Q f g" + and eqs: "\s tsrs. \ P s; tsrs = get_tcb_state_regs o ksPSpace s o idx \ + \ f' tsrs = g' tsrs" + "\s. P s \ f'' (ksSchedulerAction s) = g'' (ksSchedulerAction s)" + "\s tsrs sa. R s \ + Q ((ksPSpace_update (partial_overwrite idx tsrs) + s) (| ksSchedulerAction := sa |))" + shows + "monadic_rewrite F E (P and R) + (isolate_thread_actions idx f f' f'') + (isolate_thread_actions idx g g' g'')" + apply (simp add: isolate_thread_actions_def split_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_bind_tail)+ + apply (rule_tac P="\ s'. Q s" in monadic_rewrite_bind) + apply (insert mr)[1] + apply (simp add: monadic_rewrite_def select_f_def) + apply auto[1] + apply (rule_tac P="P and (\s. tcbs = get_tcb_state_regs o ksPSpace s o idx + \ sa = ksSchedulerAction s)" + in monadic_rewrite_pre_imp_eq) + apply (clarsimp simp: exec_modify eqs return_def) + apply wp+ + apply (clarsimp simp: o_def eqs) + done + +lemmas monadic_rewrite_isolate_final + = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_is_refl, simplified] + +lemma copy_registers_isolate_general: + "\ inj idx; idx x = t; idx y = t' \ \ + monadic_rewrite False True + (\s. \x. tcb_at' (idx x) s) + (mapM_x (\r. do v \ asUser t (getRegister (f r)); + asUser t' (setRegister (f' r) (rf r v)) + od) + regs) + (isolate_thread_actions idx + (return ()) (foldr (\r. copy_register_tsrs x y (f r) (f' r) (rf r)) (rev regs)) id)" + apply (induct regs) + apply (simp add: mapM_x_Nil) + apply (clarsimp simp: monadic_rewrite_def liftM_def bind_assoc + isolate_thread_actions_def + split_def exec_gets getSchedulerAction_def + select_f_returns o_def ksPSpace_update_partial_id) + apply (simp add: return_def simpler_modify_def) + apply (simp add: mapM_x_Cons) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_trans) + apply (rule isolate_thread_actions_rewrite_bind, assumption) + apply (rule copy_register_isolate, assumption+) + apply (rule thread_actions_isolatable_returns)+ + apply wp + apply (rule monadic_rewrite_isolate_final[where P=\], simp+) + done + +lemmas copy_registers_isolate = copy_registers_isolate_general[where f="\x. x" and f'="\x. x" and rf="\_ x. x"] + +lemma setSchedulerAction_isolate: + "inj idx \ + monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) + (setSchedulerAction sa) + (isolate_thread_actions idx (return ()) id (\_. sa))" + apply (clarsimp simp: monadic_rewrite_def liftM_def bind_assoc + isolate_thread_actions_def select_f_returns + exec_gets getSchedulerAction_def o_def + ksPSpace_update_partial_id setSchedulerAction_def) + apply (simp add: simpler_modify_def) + done + +lemma updateMDB_isolatable: + "thread_actions_isolatable idx (updateMDB slot f)" + apply (simp add: updateMDB_def thread_actions_isolatable_return + split: if_split) + apply (intro impI thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + getCTE_isolatable setCTE_isolatable, + (wp | simp)+) + done + +lemma clearUntypedFreeIndex_isolatable: + "thread_actions_isolatable idx (clearUntypedFreeIndex slot)" + supply option.case_cong[cong] + apply (simp add: clearUntypedFreeIndex_def getSlotCap_def) + apply (rule thread_actions_isolatable_bind) + apply (rule getCTE_isolatable) + apply (simp split: capability.split, safe intro!: thread_actions_isolatable_return) + apply (simp add: updateTrackedFreeIndex_def getSlotCap_def) + apply (intro thread_actions_isolatable_bind getCTE_isolatable + modify_isolatable) + apply (wp | simp)+ + done + +lemma emptySlot_isolatable: + "thread_actions_isolatable idx (emptySlot slot NullCap)" + apply (simp add: emptySlot_def updateCap_def case_Null_If Retype_H.postCapDeletion_def + cong: if_cong) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + clearUntypedFreeIndex_isolatable + thread_actions_isolatable_if + getCTE_isolatable setCTE_isolatable + thread_actions_isolatable_return + updateMDB_isolatable, + (wp | simp)+) + done + +lemmas fastpath_isolatables + = setEndpoint_isolatable getCTE_isolatable + thread_actions_isolatable_assert cteInsert_isolatable + switchToThread_isolatable setCurThread_isolatable + emptySlot_isolatable updateMDB_isolatable + thread_actions_isolatable_returns + +lemmas fastpath_isolate_rewrites + = isolate_thread_actions_threadSet_tcbState isolate_thread_actions_asUser + copy_registers_isolate setSchedulerAction_isolate + fastpath_isolatables[THEN thread_actions_isolatableD] + +lemma lookupIPCBuffer_isolatable: + "thread_actions_isolatable idx (lookupIPCBuffer w t)" + supply if_cong[cong] if_split[split del] + apply (simp add: lookupIPCBuffer_def) + apply (rule thread_actions_isolatable_bind) + apply (clarsimp simp: put_tcb_state_regs_tcb_def threadGet_isolatable + getThreadBufferSlot_def locateSlot_conv getSlotCap_def + split: tcb_state_regs.split)+ + apply (rule thread_actions_isolatable_bind) + apply (clarsimp simp: thread_actions_isolatable_return + getCTE_isolatable + split: capability.split arch_capability.split bool.split)+ + apply (rule thread_actions_isolatable_if) + apply (rule thread_actions_isolatable_bind) + apply (simp add: thread_actions_isolatable_return | wp)+ + done + +lemma setThreadState_rewrite_simple: + "monadic_rewrite True True + (\s. (runnable' st \ ksSchedulerAction s \ ResumeCurrentThread \ t \ ksCurThread s) \ tcb_at' t s) + (setThreadState st t) + (threadSet (tcbState_update (\_. st)) t)" + supply if_split[split del] + apply (simp add: setThreadState_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at'\) + (* take the threadSet, drop everything until return () *) + apply (rule monadic_rewrite_trans[OF monadic_rewrite_bind_tail]) + apply (rule monadic_rewrite_symb_exec_l_drop)+ + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: getCurThread_def + wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at')+ + apply (rule monadic_rewrite_refl) + apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) + done + +end + +end diff --git a/proof/crefine/AARCH64/Machine_C.thy b/proof/crefine/AARCH64/Machine_C.thy new file mode 100644 index 0000000000..1e02ad82ad --- /dev/null +++ b/proof/crefine/AARCH64/Machine_C.thy @@ -0,0 +1,591 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Assumptions and lemmas on machine operations. +*) + +theory Machine_C +imports Ctac_lemmas_C +begin + +(* FIXME: somewhere automation has failed, resulting in virq_C arrays not being in packed_type! *) +instance virq_C :: array_inner_packed + apply intro_classes + by (simp add: size_of_def) + +locale kernel_m = kernel + + +(* timer and IRQ common machine ops (function names exist on other platforms *) + +assumes resetTimer_ccorres: + "ccorres dc xfdc \ UNIV [] + (doMachineOp resetTimer) + (Call resetTimer_'proc)" + +(* This is not very correct, however our current implementation of Hardware in haskell is stateless *) +assumes isIRQPending_ccorres: + "\in_kernel. + ccorres (\rv rv'. rv' = from_bool (rv \ None)) ret__unsigned_long_' + \ UNIV [] + (doMachineOp (getActiveIRQ in_kernel)) (Call isIRQPending_'proc)" + +assumes getActiveIRQ_Normal: + "\ \ \Call getActiveIRQ_'proc, Normal s\ \ s' \ isNormal s'" + +assumes getActiveIRQ_ccorres: + "\in_kernel. + ccorres + (\irq c_irq. + case irq of None \ c_irq = irqInvalid + | Some x \ c_irq = ucast x \ c_irq \ irqInvalid) + ret__unsigned_long_' \ UNIV hs + (doMachineOp (getActiveIRQ in_kernel)) (Call getActiveIRQ_'proc)" + +assumes setIRQTrigger_ccorres: + "ccorres dc xfdc \ (\\irq = ucast irq\ \ \\trigger = from_bool trigger\) [] + (doMachineOp (AARCH64.setIRQTrigger irq trigger)) + (Call setIRQTrigger_'proc)" + +assumes ackInterrupt_ccorres: + "ccorres dc xfdc \ (\\irq = ucast irq\) hs + (doMachineOp (ackInterrupt irq)) + (Call ackInterrupt_'proc)" + +assumes maskInterrupt_ccorres: + "ccorres dc xfdc \ (\\disable = from_bool m\ \ \\irq = ucast irq\) [] + (doMachineOp (maskInterrupt m irq)) + (Call maskInterrupt_'proc)" + +(* This is a simplification until complete FPU handling is added at a future date *) +assumes fpuThreadDelete_ccorres: + "ccorres dc xfdc (tcb_at' thread) (\\thread = tcb_ptr_to_ctcb_ptr thread\) hs + (fpuThreadDelete thread) + (Call fpuThreadDelete_'proc)" + +assumes setVSpaceRoot_ccorres: + "ccorres dc xfdc + \ (\ base_address_CL (ttbr_lift \ttbr) = pt\ \ \ asid_CL (ttbr_lift \ttbr) = asid\) [] + (doMachineOp (AARCH64.setVSpaceRoot pt asid)) + (Call setCurrentUserVSpaceRoot_'proc)" + +(* AArch64-specific machine ops (function names don't exist on other architectures) *) + +assumes getFAR_ccorres: + "ccorres (=) ret__unsigned_long_' \ UNIV [] + (doMachineOp getFAR) + (Call getFAR_'proc)" + +assumes getESR_ccorres: + "ccorres (=) ret__unsigned_long_' \ UNIV [] + (doMachineOp getESR) + (Call getESR_'proc)" + +assumes setHCR_ccorres: + "ccorres dc xfdc \ (\\reg = r \) [] + (doMachineOp (setHCR r)) + (Call setHCR_'proc)" + +assumes getSCTLR_ccorres: + "ccorres (=) ret__unsigned_long_' \ UNIV [] + (doMachineOp getSCTLR) + (Call getSCTLR_'proc)" + +assumes setSCTLR_ccorres: + "ccorres dc xfdc \ (\\sctlr = sctlr \) [] + (doMachineOp (setSCTLR sctlr)) + (Call setSCTLR_'proc)" + +assumes isb_ccorres: + "ccorres dc xfdc \ UNIV [] + (doMachineOp isb) + (Call isb_'proc)" + +assumes dsb_ccorres: + "ccorres dc xfdc \ UNIV [] + (doMachineOp dsb) + (Call dsb_'proc)" + +assumes dsb_preserves_kernel_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call dsb_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ (\x. snd (hrs_htd (t_hrs_' (globals s)) x) 0 \ None + \ hrs_mem (t_hrs_' (globals t)) x = hrs_mem (t_hrs_' (globals s)) x)}" + +assumes enableFpuEL01_ccorres: + "ccorres dc xfdc \ UNIV [] + (doMachineOp enableFpuEL01) + (Call enableFpuEL01_'proc)" + +assumes check_export_arch_timer_ccorres: + "ccorres dc xfdc \ UNIV [] + (doMachineOp check_export_arch_timer) + (Call check_export_arch_timer_'proc)" + +assumes read_cntpct_ccorres: + "ccorres (=) ret__unsigned_longlong_' \ UNIV [] + (doMachineOp read_cntpct) + (Call read_cntpct_'proc)" + +(* TLB and Cache ops *) + +assumes addressTranslateS1_ccorres: + "ccorres (=) ret__unsigned_long_' \ (\\vaddr___unsigned_long = vaddr \) [] + (doMachineOp (addressTranslateS1 vaddr)) + (Call addressTranslateS1_'proc)" + +assumes invalidateTranslationASID_ccorres: + "\hw_asid. ccorres dc xfdc \ (\\hw_asid___unsigned_char = ucast hw_asid\) [] + (doMachineOp (invalidateTranslationASID hw_asid)) + (Call invalidateTranslationASID_'proc)" + +assumes invalidateTranslationSingle_ccorres: + "\vptr. ccorres dc xfdc \ (\\vptr = vptr\) [] + (doMachineOp (invalidateTranslationSingle vptr)) + (Call invalidateTranslationSingle_'proc)" + +assumes cleanByVA_PoU_ccorres: + "ccorres dc xfdc \ (\\vaddr = w1\ \ \\paddr = w2\) [] + (doMachineOp (cleanByVA_PoU w1 w2)) + (Call cleanByVA_PoU_'proc)" + +assumes cleanCacheRange_RAM_ccorres: + "ccorres dc xfdc (\s. w1 \ w2 \ w3 \ w3 + (w2 - w1) + \ w1 && mask cacheLineSize = w3 && mask cacheLineSize + \ unat (w2 - w1) \ gsMaxObjectSize s) + (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] + (doMachineOp (cleanCacheRange_RAM w1 w2 w3)) + (Call cleanCacheRange_RAM_'proc)" + +assumes cleanCacheRange_PoU_ccorres: + "ccorres dc xfdc (\s. unat (w2 - w1) \ gsMaxObjectSize s + \ w1 \ w2 \ w3 \ w3 + (w2 - w1) + \ w1 && mask cacheLineSize = w3 && mask cacheLineSize) + (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] + (doMachineOp (cleanCacheRange_PoU w1 w2 w3)) + (Call cleanCacheRange_PoU_'proc)" + +assumes cleanInvalidateCacheRange_RAM_ccorres: + "ccorres dc xfdc (\s. unat (w2 - w1) \ gsMaxObjectSize s + \ w1 \ w2 \ w3 \ w3 + (w2 - w1) + \ w1 && mask cacheLineSize = w3 && mask cacheLineSize) + (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] + (doMachineOp (cleanInvalidateCacheRange_RAM w1 w2 w3)) + (Call cleanInvalidateCacheRange_RAM_'proc)" + +assumes invalidateCacheRange_RAM_ccorres: + "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) + and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) + \ w1 && mask cacheLineSize = w3 && mask cacheLineSize)) + (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] + (doMachineOp (invalidateCacheRange_RAM w1 w2 w3)) + (Call invalidateCacheRange_RAM_'proc)" + +assumes invalidateCacheRange_I_ccorres: + "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) + \ w1 && mask cacheLineSize = w3 && mask cacheLineSize) + (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] + (doMachineOp (invalidateCacheRange_I w1 w2 w3)) + (Call invalidateCacheRange_I_'proc)" + +assumes cleanCacheRange_RAM_preserves_kernel_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call cleanCacheRange_RAM_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ (\x. snd (hrs_htd (t_hrs_' (globals s)) x) 0 \ None + \ hrs_mem (t_hrs_' (globals t)) x = hrs_mem (t_hrs_' (globals s)) x)}" + +(* Hypervisor-related machine ops *) + +(* ARM Hypervisor hardware register getters and setters *) + +assumes get_gic_vcpu_ctrl_hcr_ccorres: + "ccorres (=) ret__unsigned_' \ UNIV [] + (doMachineOp get_gic_vcpu_ctrl_hcr) (Call get_gic_vcpu_ctrl_hcr_'proc)" +assumes get_gic_vcpu_ctrl_vmcr_ccorres: + "ccorres (=) ret__unsigned_' \ UNIV [] + (doMachineOp get_gic_vcpu_ctrl_vmcr) (Call get_gic_vcpu_ctrl_vmcr_'proc)" +assumes get_gic_vcpu_ctrl_apr_ccorres: + "ccorres (=) ret__unsigned_' \ UNIV [] + (doMachineOp get_gic_vcpu_ctrl_apr) (Call get_gic_vcpu_ctrl_apr_'proc)" +assumes get_gic_vcpu_ctrl_vtr_ccorres: + "ccorres (=) ret__unsigned_' \ UNIV [] + (doMachineOp get_gic_vcpu_ctrl_vtr) (Call get_gic_vcpu_ctrl_vtr_'proc)" +assumes get_gic_vcpu_ctrl_eisr0_ccorres: + "ccorres (=) ret__unsigned_' \ UNIV [] + (doMachineOp get_gic_vcpu_ctrl_eisr0) (Call get_gic_vcpu_ctrl_eisr0_'proc)" +assumes get_gic_vcpu_ctrl_eisr1_ccorres: + "ccorres (=) ret__unsigned_' \ UNIV [] + (doMachineOp get_gic_vcpu_ctrl_eisr1) (Call get_gic_vcpu_ctrl_eisr1_'proc)" +assumes get_gic_vcpu_ctrl_misr_ccorres: + "ccorres (=) ret__unsigned_' \ UNIV [] + (doMachineOp get_gic_vcpu_ctrl_misr) (Call get_gic_vcpu_ctrl_misr_'proc)" + +assumes set_gic_vcpu_ctrl_hcr_ccorres: + "ccorres dc xfdc \ (\\hcr = v \) [] + (doMachineOp (set_gic_vcpu_ctrl_hcr v)) (Call set_gic_vcpu_ctrl_hcr_'proc)" +assumes set_gic_vcpu_ctrl_vmcr_ccorres: + "ccorres dc xfdc \ (\\vmcr = v \) [] + (doMachineOp (set_gic_vcpu_ctrl_vmcr v)) (Call set_gic_vcpu_ctrl_vmcr_'proc)" +assumes set_gic_vcpu_ctrl_apr_ccorres: + "ccorres dc xfdc \ (\\apr = v \) [] + (doMachineOp (set_gic_vcpu_ctrl_apr v)) (Call set_gic_vcpu_ctrl_apr_'proc)" + +assumes set_gic_vcpu_ctrl_lr_ccorres: + "ccorres dc xfdc \ (\\num = scast n \ \ \virq_to_H \lr = lr \) [] + (doMachineOp (set_gic_vcpu_ctrl_lr n lr)) (Call set_gic_vcpu_ctrl_lr_'proc)" + +assumes get_gic_vcpu_ctrl_lr_ccorres: + "ccorres (\v virq. virq = virq_C (FCP (\_. v))) ret__struct_virq_C_' \ (\\num = scast n \) hs + (doMachineOp (get_gic_vcpu_ctrl_lr n)) (Call get_gic_vcpu_ctrl_lr_'proc)" + +(* Lazy FPU switching is not in current verification scope. We abstract this by acting as if + FPU is always enabled. When the FPU switching implementation is updated, this assumption + should be removed. *) +assumes isFpuEnable_ccorres: + "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' \ UNIV [] + (doMachineOp isFpuEnable) + (Call isFpuEnable_'proc)" + +(* ARM Hypervisor banked register save/restoring *) + +assumes vcpu_hw_read_reg_ccorres: + "\r. ccorres (=) ret__unsigned_long_' + \ (\ unat \reg_index = fromEnum r \) hs + (doMachineOp (readVCPUHardwareReg r)) + (Call vcpu_hw_read_reg_'proc)" + +assumes vcpu_hw_write_reg_ccorres: + "\r v. ccorres dc xfdc + \ (\ unat \reg_index = fromEnum r \ \ \ \reg = v \) hs + (doMachineOp (writeVCPUHardwareReg r v)) + (Call vcpu_hw_write_reg_'proc)" + +(* The following are fastpath specific assumptions. + We might want to move them somewhere else. *) + +(* + @{text slowpath} is an assembly stub that switches execution + from the fastpath to the slowpath. Its contract is equivalence + to the toplevel slowpath function @{term callKernel} for the + @{text SyscallEvent} case. +*) +assumes slowpath_ccorres: + "ccorres dc xfdc + (\s. invs' s \ ct_in_state' ((=) Running) s) + ({s. syscall_' s = syscall_from_H ev}) + [SKIP] + (callKernel (SyscallEvent ev)) (Call slowpath_'proc)" + +(* + @{text slowpath} does not return, but uses the regular + slowpath kernel exit instead. +*) +assumes slowpath_noreturn_spec: + "\ \ UNIV Call slowpath_'proc {},UNIV" + +(* + @{text fastpath_restore} updates badge and msgInfo registers + and returns to the user. +*) +assumes fastpath_restore_ccorres: + "ccorres dc xfdc + (\s. t = ksCurThread s) + ({s. badge_' s = bdg} \ {s. msgInfo_' s = msginfo} + \ {s. cur_thread_' s = tcb_ptr_to_ctcb_ptr t}) + [SKIP] + (asUser t (zipWithM_x setRegister + [AARCH64_H.badgeRegister, AARCH64_H.msgInfoRegister] + [bdg, msginfo])) + (Call fastpath_restore_'proc)" + +context kernel_m begin + +lemma index_xf_for_sequence: + "\s f. index_' (index_'_update f s) = f (index_' s) + \ globals (index_'_update f s) = globals s" + by simp + +lemma dmo_if: + "(doMachineOp (if a then b else c)) = (if a then (doMachineOp b) else (doMachineOp c))" + by (simp split: if_split) + +(* Count leading and trailing zeros. *) + +(* FIXME AARCH64 clzl and ctzl use builtin compiler versions, while clz32/64 and ctz32/64 are + software implementations that are provided BUT NOT USED, hence this whole chunk except for + clzl_spec and ctzl_spec can be removed. *) + +definition clz32_step where + "clz32_step i \ + \mask___unsigned :== \mask___unsigned >> unat ((1::32 sword) << unat i);; + \bits___unsigned :== SCAST(32 signed \ 32) (if \mask___unsigned < \x___unsigned then 1 else 0) << unat i;; + Guard ShiftError \\bits___unsigned < SCAST(32 signed \ 32) 0x20\ + (\x___unsigned :== \x___unsigned >> unat \bits___unsigned);; + \count :== \count - \bits___unsigned" + +definition clz32_invariant where + "clz32_invariant i s \ {s'. + mask___unsigned_' s' \ x___unsigned_' s' + \ of_nat (word_clz (x___unsigned_' s')) + count_' s' = of_nat (word_clz (x___unsigned_' s)) + 32 + \ mask___unsigned_' s' = mask (2 ^ unat i)}" + +lemma clz32_step: + "unat (i :: 32 sword) < 5 \ + \ \ (clz32_invariant (i+1) s) clz32_step i (clz32_invariant i s)" + unfolding clz32_step_def + apply (vcg, clarsimp simp: clz32_invariant_def) + \ \Introduce some trivial but useful facts so that most later goals are solved with simp\ + apply (prop_tac "i \ -1", clarsimp simp: unat_minus_one_word) + apply (frule unat_Suc2) + apply (prop_tac "(2 :: nat) ^ unat i < (32 :: nat)", + clarsimp simp: power_strict_increasing_iff[where b=2 and y=5, simplified]) + apply (prop_tac "(2 :: nat) ^ unat (i + 1) \ (32 :: nat)", + clarsimp simp: unat_Suc2 power_increasing_iff[where b=2 and y=4, simplified]) + apply (intro conjI impI; clarsimp) + apply (clarsimp simp: word_less_nat_alt) + apply (erule le_shiftr) + apply (clarsimp simp: word_size shiftr_mask2 word_clz_shiftr) + apply (clarsimp simp: shiftr_mask2) + apply fastforce + apply (clarsimp simp: shiftr_mask2) + done + +lemma clz32_spec: + "\s. \ \ {s} Call clz32_'proc \\ret__unsigned = of_nat (word_clz (x___unsigned_' s))\" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (hoarep_rewrite, fold clz32_step_def) + apply (intro allI hoarep.Catch[OF _ hoarep.Skip]) + apply (rule_tac Q="clz32_invariant 0 s" in hoarep_Seq_nothrow[OF _ creturn_wp]) + apply (rule HoarePartial.SeqSwap[OF clz32_step], simp, simp)+ + apply (rule conseqPre, vcg) + apply (all \clarsimp simp: clz32_invariant_def mask_def word_less_max_simp\) + by (fastforce simp: word_le_1) + +definition clz64_step where + "clz64_step i \ + \mask___unsigned_longlong :== \mask___unsigned_longlong >> unat ((1::32 sword) << unat i);; + \bits___unsigned :== SCAST(32 signed \ 32) (if \mask___unsigned_longlong < \x___unsigned_longlong then 1 else 0) << unat i;; + Guard ShiftError \\bits___unsigned < SCAST(32 signed \ 32) 0x40\ + (\x___unsigned_longlong :== \x___unsigned_longlong >> unat \bits___unsigned);; + \count :== \count - \bits___unsigned" + +definition clz64_invariant where + "clz64_invariant i s \ {s'. + mask___unsigned_longlong_' s' \ x___unsigned_longlong_' s' + \ of_nat (word_clz (x___unsigned_longlong_' s')) + count_' s' = of_nat (word_clz (x___unsigned_longlong_' s)) + 64 + \ mask___unsigned_longlong_' s' = mask (2 ^ unat i)}" + +lemma clz64_step: + "unat (i :: 32 sword) < 6 \ + \ \ (clz64_invariant (i+1) s) clz64_step i (clz64_invariant i s)" + unfolding clz64_step_def + apply (vcg, clarsimp simp: clz64_invariant_def) + \ \Introduce some trivial but useful facts so that most later goals are solved with simp\ + apply (prop_tac "i \ -1", clarsimp simp: unat_minus_one_word) + apply (frule unat_Suc2) + apply (prop_tac "(2 :: nat) ^ unat i < (64 :: nat)", + clarsimp simp: power_strict_increasing_iff[where b=2 and y=6, simplified]) + apply (prop_tac "(2 :: nat) ^ unat (i + 1) \ (64 :: nat)", + clarsimp simp: unat_Suc2 power_increasing_iff[where b=2 and y=5, simplified]) + apply (intro conjI impI; clarsimp) + apply (clarsimp simp: word_less_nat_alt) + apply (erule le_shiftr) + apply (clarsimp simp: word_size shiftr_mask2 word_clz_shiftr) + apply (clarsimp simp: shiftr_mask2) + apply fastforce + apply (clarsimp simp: shiftr_mask2) + done + +lemma clz64_spec: + "\s. \ \ {s} Call clz64_'proc \\ret__unsigned = of_nat (word_clz (x___unsigned_longlong_' s))\" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (hoarep_rewrite, fold clz64_step_def) + apply (intro allI hoarep.Catch[OF _ hoarep.Skip]) + apply (rule_tac Q="clz64_invariant 0 s" in hoarep_Seq_nothrow[OF _ creturn_wp]) + apply (rule HoarePartial.SeqSwap[OF clz64_step], simp, simp)+ + apply (rule conseqPre, vcg) + apply (all \clarsimp simp: clz64_invariant_def mask_def word_less_max_simp\) + apply (clarsimp simp: word_le_1) + apply (erule disjE; clarsimp) + apply (subst add.commute) + apply (subst ucast_increment[symmetric]) + apply (simp add: not_max_word_iff_less) + apply (rule word_of_nat_less) + apply (rule le_less_trans[OF word_clz_max]) + apply (simp add: word_size unat_max_word) + apply clarsimp + done + +definition ctz32_step where + "ctz32_step i \ \mask___unsigned :== \mask___unsigned >> unat ((1::32 sword) << unat i);; + \bits___unsigned :== SCAST(32 signed \ 32) (if \x___unsigned && \mask___unsigned = SCAST(32 signed \ 32) 0 then 1 else 0) << unat i;; + Guard ShiftError \\bits___unsigned < SCAST(32 signed \ 32) 0x20\ (\x___unsigned :== \x___unsigned >> unat \bits___unsigned);; + \count :== \count + \bits___unsigned" + +definition ctz32_invariant where + "ctz32_invariant (i :: 32 sword) s \ {s'. + (x___unsigned_' s' \ 0 \ (of_nat (word_ctz (x___unsigned_' s')) + count_' s' = of_nat (word_ctz (x___unsigned_' s)) + \ (word_ctz (x___unsigned_' s') < 2 ^ unat i))) + \ (x___unsigned_' s' = 0 \ (count_' s' + (0x1 << (unat i)) = 33 \ x___unsigned_' s = 0)) + \ mask___unsigned_' s' = mask (2 ^ unat i)}" + +lemma ctz32_step: + "unat (i :: 32 sword) < 5 \ + \ \ (ctz32_invariant (i+1) s) ctz32_step i (ctz32_invariant i s)" + supply word_neq_0_conv [simp del] + unfolding ctz32_step_def + apply (vcg, clarsimp simp: ctz32_invariant_def) + apply (prop_tac "i \ -1", clarsimp simp: unat_minus_one_word) + apply (frule unat_Suc2) + apply (prop_tac "(2 :: nat) ^ unat i < (32 :: nat)", + clarsimp simp: power_strict_increasing_iff[where b=2 and y=5, simplified]) + apply (prop_tac "(2 :: nat) ^ unat (i + 1) \ (32 :: nat)", + clarsimp simp: unat_Suc2 power_increasing_iff[where b=2 and y=4, simplified]) + apply (intro conjI; intro impI) + apply (intro conjI) + apply (clarsimp simp: word_less_nat_alt) + apply (intro impI) + apply (subgoal_tac "x___unsigned_' x \ 0") + apply (intro conjI, clarsimp) + apply (subst word_ctz_shiftr, clarsimp, clarsimp) + apply (rule word_ctz_bound_below, clarsimp simp: shiftr_mask2) + apply (clarsimp simp: shiftr_mask2 is_aligned_mask[symmetric]) + apply (subst of_nat_diff) + apply (rule word_ctz_bound_below, clarsimp simp: shiftr_mask2) + apply (clarsimp simp: shiftr_mask2) + apply fastforce + apply (subst word_ctz_shiftr, clarsimp, clarsimp) + apply (rule word_ctz_bound_below, clarsimp simp: shiftr_mask2) + apply (clarsimp simp: shiftr_mask2 is_aligned_mask[symmetric]) + apply (fastforce elim: is_aligned_weaken) + apply fastforce + apply (intro impI conjI; clarsimp simp: shiftr_mask2) + apply (subgoal_tac "x___unsigned_' x = 0", clarsimp) + apply (subst add.commute, simp) + apply (fastforce simp: shiftr_mask2 word_neq_0_conv and_mask_eq_iff_shiftr_0[symmetric]) + apply (simp add: and_mask_eq_iff_shiftr_0[symmetric]) + apply (clarsimp simp: shiftr_mask2) + by (fastforce simp: shiftr_mask2 intro: word_ctz_bound_above) + +lemma ctz32_spec: + "\s. \ \ {s} Call ctz32_'proc \\ret__unsigned = of_nat (word_ctz (x___unsigned_' s))\" + supply word_neq_0_conv [simp del] + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (hoarep_rewrite, fold ctz32_step_def) + apply (intro allI hoarep.Catch[OF _ hoarep.Skip]) + apply (rule_tac Q="ctz32_invariant 0 s" in hoarep_Seq_nothrow[OF _ creturn_wp]) + apply (rule HoarePartial.SeqSwap[OF ctz32_step], simp, simp)+ + apply (rule conseqPre, vcg) + apply (clarsimp simp: ctz32_invariant_def) + apply (clarsimp simp: mask_def) + apply (subgoal_tac "word_ctz (x___unsigned_' s) \ size (x___unsigned_' s)") + apply (clarsimp simp: word_size) + using word_ctz_len_word_and_mask_zero apply force + apply (rule word_ctz_max) + apply (clarsimp simp: ctz32_invariant_def) + apply (case_tac "x___unsigned_' x = 0"; clarsimp) + done + +definition ctz64_step where + "ctz64_step i \ \mask___unsigned_longlong :== \mask___unsigned_longlong >> unat ((1::32 sword) << unat i);; + \bits___unsigned :== SCAST(32 signed \ 32) (if \x___unsigned_longlong && \mask___unsigned_longlong = SCAST(32 signed \ 64) 0 then 1 else 0) << unat i;; + Guard ShiftError \\bits___unsigned < SCAST(32 signed \ 32) 0x40\ (\x___unsigned_longlong :== \x___unsigned_longlong >> unat \bits___unsigned);; + \count :== \count + \bits___unsigned" + +definition ctz64_invariant where + "ctz64_invariant i s \ {s'. + (x___unsigned_longlong_' s' \ 0 \ (of_nat (word_ctz (x___unsigned_longlong_' s')) + count_' s' = of_nat (word_ctz (x___unsigned_longlong_' s)) + \ (word_ctz (x___unsigned_longlong_' s') < 2 ^ unat i))) + \ (x___unsigned_longlong_' s' = 0 \ (count_' s' + (0x1 << (unat i)) = 65 \ x___unsigned_longlong_' s = 0)) + \ mask___unsigned_longlong_' s' = mask (2 ^ unat i)}" + +lemma ctz64_step: + "unat (i :: 32 sword) < 6 \ + \ \ (ctz64_invariant (i+1) s) ctz64_step i (ctz64_invariant i s)" +supply word_neq_0_conv [simp del] + unfolding ctz64_step_def + apply (vcg, clarsimp simp: ctz64_invariant_def) + apply (prop_tac "i \ -1", clarsimp simp: unat_minus_one_word) + apply (frule unat_Suc2) + apply (prop_tac "(2 :: nat) ^ unat i < (64 :: nat)", + clarsimp simp: power_strict_increasing_iff[where b=2 and y=6, simplified]) + apply (prop_tac "(2 :: nat) ^ unat (i + 1) \ (64 :: nat)", + clarsimp simp: unat_Suc2 power_increasing_iff[where b=2 and y=5, simplified]) + apply (intro conjI; intro impI) + apply (intro conjI) + apply (clarsimp simp: word_less_nat_alt) + apply (intro impI) + apply (subgoal_tac "x___unsigned_longlong_' x \ 0") + apply (intro conjI, clarsimp) + apply (subst word_ctz_shiftr, clarsimp, clarsimp) + apply (rule word_ctz_bound_below, clarsimp simp: shiftr_mask2) + apply (clarsimp simp: shiftr_mask2 is_aligned_mask[symmetric]) + apply (subst of_nat_diff) + apply (rule word_ctz_bound_below, clarsimp simp: shiftr_mask2) + apply (clarsimp simp: shiftr_mask2) + apply fastforce + apply (subst word_ctz_shiftr, clarsimp, clarsimp) + apply (rule word_ctz_bound_below, clarsimp simp: shiftr_mask2) + apply (clarsimp simp: shiftr_mask2 is_aligned_mask[symmetric]) + apply (fastforce elim: is_aligned_weaken) + apply fastforce + apply (intro impI conjI; clarsimp simp: shiftr_mask2) + apply (subgoal_tac "x___unsigned_longlong_' x = 0", clarsimp) + apply (subst add.commute, simp) + apply (fastforce simp: shiftr_mask2 word_neq_0_conv and_mask_eq_iff_shiftr_0[symmetric]) + apply (simp add: and_mask_eq_iff_shiftr_0[symmetric]) + apply (clarsimp simp: shiftr_mask2) + by (fastforce simp: shiftr_mask2 intro: word_ctz_bound_above) + +lemma ctz64_spec: + "\s. \ \ {s} Call ctz64_'proc \\ret__unsigned = of_nat (word_ctz (x___unsigned_longlong_' s))\" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (hoarep_rewrite, fold ctz64_step_def) + apply (intro allI hoarep.Catch[OF _ hoarep.Skip]) + apply (rule_tac Q="ctz64_invariant 0 s" in hoarep_Seq_nothrow[OF _ creturn_wp]) + apply (rule HoarePartial.SeqSwap[OF ctz64_step], simp, simp)+ + apply (rule conseqPre, vcg) + apply (clarsimp simp: ctz64_invariant_def) + apply (clarsimp simp: mask_def) + apply (subgoal_tac "word_ctz (x___unsigned_longlong_' s) \ size (x___unsigned_longlong_' s)") + apply (clarsimp simp: word_size) + apply (erule le_neq_trans, clarsimp) + using word_ctz_len_word_and_mask_zero[where 'a=64] apply force + apply (rule word_ctz_max) + apply (clarsimp simp: ctz64_invariant_def) + apply (case_tac "x___unsigned_longlong_' x = 0"; clarsimp) + done + +(* On AArch64, clzl and ctzl use compiler builtins and hence these are rephrasings of + Kernel_C.clzl_spec.clzl_spec and Kernel_C.ctzl_spec.ctzl_spec to omit "symbol_table" *) + +lemma clzl_spec: + "\s. \ \ {\. s = \ \ x___unsigned_long_' s \ 0} Call clzl_'proc + \\ret__long = of_nat (word_clz (x___unsigned_long_' s))\" + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule_tac x="ret__long_'_update f x" for f in exI) + apply (simp add: mex_def meq_def) + done + +lemma ctzl_spec: + "\s. \ \ {\. s = \ \ x___unsigned_long_' s \ 0} Call ctzl_'proc + \\ret__long = of_nat (word_ctz (x___unsigned_long_' s))\" + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule_tac x="ret__long_'_update f x" for f in exI) + apply (simp add: mex_def meq_def) + done + +(* FIXME AARCH64 there are a whole lot of cache op lemmas on ARM_HYP, e.g. + cleanCaches_PoU_ccorres, branchFlushRange_ccorres, invalidateCacheRange_I_ccorres, + invalidateCacheRange_RAM_ccorres, cleanCacheRange_PoU_ccorres, etc. + We'll probably need some of these. *) + +end +end diff --git a/proof/crefine/AARCH64/PSpace_C.thy b/proof/crefine/AARCH64/PSpace_C.thy new file mode 100644 index 0000000000..a499d77aa6 --- /dev/null +++ b/proof/crefine/AARCH64/PSpace_C.thy @@ -0,0 +1,136 @@ +(* + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory PSpace_C +imports Ctac_lemmas_C +begin + +context kernel begin + +lemma setObject_obj_at_pre: + "\ updateObject ko = updateObject_default ko; + (1 :: machine_word) < 2 ^ objBits ko \ + \ + setObject p ko + = (stateAssert (typ_at' (koTypeOf (injectKO ko)) p) [] + >>= (\_. setObject p ko))" + apply (rule ext) + apply (case_tac "typ_at' (koTypeOf (injectKO ko)) p x") + apply (simp add: stateAssert_def bind_def get_def return_def) + apply (simp add: stateAssert_def bind_def get_def assert_def fail_def) + apply (simp add: setObject_def exec_gets split_def assert_opt_def split: option.split) + apply (clarsimp simp add: fail_def) + apply (simp add: bind_def simpler_modify_def split_def) + apply (rule context_conjI) + apply (clarsimp simp: updateObject_default_def in_monad simp del: projectKOs) + apply (clarsimp simp: in_magnitude_check) + apply (frule iffD1[OF project_koType, OF exI]) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def) + apply (simp only: objBitsT_koTypeOf[symmetric] objBits_def) + apply (simp add: koTypeOf_injectKO) + apply (rule empty_failD[OF empty_fail_updateObject_default]) + apply (rule ccontr, erule nonemptyE) + apply clarsimp + done + + + +lemma setObject_ccorres_helper: + fixes ko :: "'a :: pspace_storable" + assumes valid: "\\ (ko' :: 'a). + \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" + shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; + \ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko \ + \ ccorres dc xfdc P P' hs (setObject p ko) c" + apply (rule ccorres_guard_imp2) + apply (subst setObject_obj_at_pre) + apply simp+ + apply (rule ccorres_symb_exec_l[where Q'="\_. P'"]) + defer + apply (rule stateAssert_inv) + apply (rule stateAssert_sp[where P=P]) + apply (rule empty_fail_stateAssert) + apply simp + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (rule hoare_complete) + apply (clarsimp simp: HoarePartialDef.valid_def) + apply (simp add: typ_at_to_obj_at' koTypeOf_injectKO) + apply (drule obj_at_ko_at', clarsimp) + apply (cut_tac \1=\ and ko'1=koa in valid) + apply (drule hoare_sound, + clarsimp simp: cvalid_def HoarePartialDef.valid_def) + apply (elim allE, drule(1) mp) + apply (drule mp, simp) + apply clarsimp + apply (rule imageI[OF CollectI]) + apply (rule rev_bexI) + apply (rule setObject_eq, simp+) + apply (simp add: objBits_def) + apply (simp only: objBitsT_koTypeOf[symmetric] + koTypeOf_injectKO) + apply assumption + apply simp + done + + +lemma carray_map_relation_upd_triv: + "f x = Some (v :: 'a :: pspace_storable) + \ carray_map_relation n (f (x \ y)) hp ptrf = carray_map_relation n f hp ptrf" + by (simp add: carray_map_relation_def objBits_def objBitsT_koTypeOf[symmetric] + koTypeOf_injectKO + del: objBitsT_koTypeOf) + + +lemma storePTE_Basic_ccorres': + "\ cpte_relation pte pte' \ \ + ccorres dc xfdc \ {s. ptr_val (f s) = p} hs + (storePTE p pte) + (Guard C_Guard {s. s \\<^sub>c f s} + (Basic (\s. globals_update( t_hrs_'_update + (hrs_mem_update (heap_update (f s) pte'))) s)))" + apply (simp add: storePTE_def) + apply (rule setObject_ccorres_helper) + apply (simp_all add: objBits_simps bit_simps) + apply (rule conseqPre, vcg) + apply (rule subsetI, clarsimp simp: Collect_const_mem) + apply (rule cmap_relationE1, erule rf_sr_cpte_relation, + erule ko_at_projectKO_opt) + apply (rule conjI, fastforce intro: typ_heap_simps) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps + update_pte_map_to_ptes + update_pte_map_tos + carray_map_relation_upd_triv) + + apply (case_tac "f x", simp) + + apply (erule cmap_relation_updI, + erule ko_at_projectKO_opt, simp+) + apply (simp add: cready_queues_relation_def + carch_state_relation_def + cmachine_state_relation_def + Let_def typ_heap_simps + cteCaps_of_def update_pte_map_tos bit_simps) + done + +lemma storePTE_Basic_ccorres: + "\ cpte_relation pte pte' \ \ + ccorres dc xfdc \ {s. f s = p} hs + (storePTE p pte) + (Guard C_Guard {s. s \\<^sub>c pte_Ptr (f s)} + (Basic (\s. globals_update( t_hrs_'_update + (hrs_mem_update (heap_update (pte_Ptr (f s)) pte'))) s)))" + apply (rule ccorres_guard_imp2) + apply (erule storePTE_Basic_ccorres') + apply simp + done + +end +end diff --git a/proof/crefine/AARCH64/Recycle_C.thy b/proof/crefine/AARCH64/Recycle_C.thy new file mode 100644 index 0000000000..ce0e81ca3d --- /dev/null +++ b/proof/crefine/AARCH64/Recycle_C.thy @@ -0,0 +1,1223 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Recycle_C +imports Delete_C Retype_C +begin + +context kernel_m +begin + +lemma isArchFrameCap_ArchObjectCap: + "isArchFrameCap (ArchObjectCap acap) + = isFrameCap acap" + by (simp add: isArchFrameCap_def isFrameCap_def) + +definition + "replicateHider \ replicate" + +lemma collapse_foldl_replicate: + "replicate (length xs) v = xs \ + foldl (@) [] (map (\_. xs) ys) + = replicateHider (length xs * length ys) v" + apply (induct ys rule: rev_induct) + apply (simp add: replicateHider_def) + apply (simp add: replicateHider_def) + apply (subst add.commute, simp add: replicate_add) + done + +lemma coerce_memset_to_heap_update_user_data: + "heap_update_list x (replicateHider 4096 0) + = heap_update (Ptr x :: user_data_C ptr) + (user_data_C (FCP (\_. 0)))" + apply (intro ext, simp add: heap_update_def) + apply (rule_tac f="\xs. heap_update_list x xs a b" for a b in arg_cong) + apply (simp add: to_bytes_def size_of_def typ_info_simps user_data_C_tag_def) + apply (simp add: ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td align_of_def padup_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: typ_info_simps + user_context_C_tag_def thread_state_C_tag_def seL4_Fault_C_tag_def + lookup_fault_C_tag_def update_ti_t_ptr_0s + ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td + ti_typ_combine_empty_ti ti_typ_combine_td + align_of_def padup_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def + align_td_array' size_td_array) + apply (simp add: typ_info_array') + apply (subst access_ti_list_array) + apply simp + apply simp + apply (simp add: typ_info_word typ_info_ptr word_rsplit_0) + apply fastforce + apply (simp add: collapse_foldl_replicate word_bits_def) + done + +lemma clift_foldl_hrs_mem_update: + "\ \x \ set xs. hrs_htd s \\<^sub>t f x; + \x s. hrs_htd s \\<^sub>t f x \ clift (hrs_mem_update (heap_update (f x) v) s) + = g (clift s :: ('a :: c_type) ptr \ 'a) x \ + \ + clift (hrs_mem_update (\s. foldl (\s x. heap_update (f x) v s) s xs) s) + = foldl g (clift s :: 'a ptr \ 'a) xs" + using [[hypsubst_thin]] + apply (cases s, clarsimp) + apply (induct xs arbitrary: a b) + apply (simp add: hrs_mem_update_def) + apply (clarsimp simp add: hrs_mem_update_def split_def hrs_htd_def) + done + +lemma map_to_user_data_aligned: + "\ map_to_user_data (ksPSpace s) x = Some y; pspace_aligned' s \ + \ is_aligned x pageBits" + apply (clarsimp simp: map_comp_eq projectKOs split: option.split_asm) + apply (drule(1) pspace_alignedD') + apply (simp add: objBits_simps) + done + +lemma help_force_intvl_range_conv: + "\ is_aligned (p::machine_word) n; v = 2 ^ n; n < word_bits \ + \ {p ..+ v} = {p .. p + 2 ^ n - 1}" + by (simp add: intvl_range_conv word_bits_def) + +lemma cmap_relation_If_upd: + "\ cmap_relation f g ptrfun rel; rel v v'; ptrfun ` S = S'; inj ptrfun \ + \ cmap_relation (\x. if x \ S then Some v else f x) + (\y. if y \ S' then Some v' else g y) + ptrfun rel" + apply (simp add: cmap_relation_def dom_If_Some) + apply (rule context_conjI) + apply blast + apply clarsimp + apply (case_tac "x \ S") + apply simp + apply clarsimp + apply (subst if_not_P) + apply (clarsimp simp: inj_eq) + apply (drule bspec, erule domI) + apply simp + done + +lemma length_replicateHider [simp]: + "length (replicateHider n x) = n" + by (simp add: replicateHider_def) + +lemma coerce_heap_update_to_heap_updates': + "n = chunk * m \ + heap_update_list x (replicateHider n 0) + = (\s. foldl (\s x. heap_update_list x (replicateHider chunk 0) s) s + (map (\n. x + (of_nat n * of_nat chunk)) [0 ..< m]))" + using [[hypsubst_thin]] + apply clarsimp + apply (induct m arbitrary: x) + apply (rule ext, simp) + apply (simp add: replicateHider_def) + apply (rule ext) + apply (simp only: map_upt_unfold map_Suc_upt[symmetric]) + apply (simp add: replicate_add[folded replicateHider_def] + heap_update_list_concat_unfold + o_def field_simps + length_replicate[folded replicateHider_def]) + done + +lemma h_t_valid_dom_s: + "\ h_t_valid htd c_guard p; x = ptr_val (p :: ('a :: mem_type) ptr); + n = size_of TYPE ('a) \ + \ {x ..+ n} \ {SIndexVal, SIndexTyp 0} \ dom_s htd" + apply (clarsimp simp: h_t_valid_def valid_footprint_def Let_def + intvl_def) + apply (drule_tac x=k in spec, simp add: size_of_def) + apply (clarsimp simp: dom_s_def) + apply (drule_tac x=0 in map_leD, simp_all) + done + +lemma user_data_at_rf_sr_dom_s: + "\ typ_at' UserDataT x s; (s, s') \ rf_sr \ + \ {x ..+ 2 ^ pageBits} \ {SIndexVal, SIndexTyp 0} + \ dom_s (hrs_htd (t_hrs_' (globals s')))" + apply (drule rf_sr_heap_user_data_relation) + apply (drule user_data_at_ko) + apply (erule_tac x=x in cmap_relationE1) + apply (simp only: heap_to_user_data_def Let_def ko_at_projectKO_opt) + apply simp + apply (drule h_t_valid_clift) + apply (simp add: h_t_valid_dom_s pageBits_def) + done + +lemma device_data_at_rf_sr_dom_s: + "\ typ_at' UserDataDeviceT x s; (s, s') \ rf_sr \ + \ {x ..+ 2 ^ pageBits} \ {SIndexVal, SIndexTyp 0} + \ dom_s (hrs_htd (t_hrs_' (globals s')))" + apply (drule rf_sr_heap_device_data_relation) + apply (drule device_data_at_ko) + apply (erule_tac x=x in cmap_relationE1) + apply (simp only: heap_to_device_data_def Let_def ko_at_projectKO_opt) + apply simp + apply (drule h_t_valid_clift) + apply (simp add: h_t_valid_dom_s pageBits_def) + done + +lemma intvl_2_power_times_decomp: + "\y < 2 ^ (n - m). {x + y * 2 ^ m ..+ 2 ^ m} \ S \ T + \ m \ n \ n < word_bits + \ {(x :: machine_word) ..+ 2 ^ n} \ S \ T" + apply (clarsimp simp: intvl_def) + apply (drule_tac x="of_nat k >> m" in spec) + apply (drule mp) + apply (rule shiftr_less_t2n) + apply (rule word_of_nat_less) + apply (simp add: word_of_nat_less) + apply (erule subsetD) + apply (clarsimp simp: shiftl_t2n[simplified mult.commute mult.left_commute, symmetric] + shiftr_shiftl1) + apply (rule_tac x="unat (of_nat k && mask m :: machine_word)" in exI) + apply (simp add: field_simps word_plus_and_or_coroll2) + apply (simp add: word_bits_def unat_less_power and_mask_less') + done + +lemma flex_user_data_at_rf_sr_dom_s: + "\ (\p<2 ^ (pageBitsForSize sz - pageBits). + typ_at' UserDataT (x + (p << pageBits)) s); (s, s') \ rf_sr \ + \ {x ..+ 2 ^ pageBitsForSize sz} \ {SIndexVal, SIndexTyp 0} + \ dom_s (hrs_htd (t_hrs_' (globals s')))" + apply (subst (asm) shiftl_t2n, subst (asm) mult.commute) + apply (rule_tac m=pageBits in intvl_2_power_times_decomp, + simp_all add: pbfs_atleast_pageBits pbfs_less_wb') + apply (erule allEI, clarsimp) + apply (drule(1) user_data_at_rf_sr_dom_s) + apply (erule subsetD) + apply (simp add: mult.assoc) + done + +lemma hrs_mem_update_fold_eq: + "hrs_mem_update (fold f xs) + = fold (hrs_mem_update o f) xs" + apply (rule sym, induct xs) + apply (simp add: hrs_mem_update_def) + apply (simp add: hrs_mem_update_def fun_eq_iff) + done + +lemma power_user_page_foldl_zero_ranges: + " \p<2 ^ (pageBitsForSize sz - pageBits). + hrs_htd hrs \\<^sub>t (Ptr (ptr + of_nat p * 0x1000) :: user_data_C ptr) + \ zero_ranges_are_zero rngs hrs + \ zero_ranges_are_zero rngs + (hrs_mem_update (\s. foldl (\s x. heap_update (Ptr x) (user_data_C (arr x)) s) s + (map (\n. ptr + of_nat n * 0x1000) [0..<2 ^ (pageBitsForSize sz - pageBits)])) + hrs)" + apply (simp add: foldl_conv_fold hrs_mem_update_fold_eq) + apply (rule conjunct1) + apply (rule fold_invariant[where P="\hrs'. zero_ranges_are_zero rngs hrs' + \ hrs_htd hrs' = hrs_htd hrs" + and xs=xs and Q="\x. x \ set xs" for xs], simp_all) + apply (subst zero_ranges_are_zero_update, simp_all) + apply clarsimp + done + +lemma heap_to_device_data_disj_mdf': + "\is_aligned ptr (pageBitsForSize sz); ksPSpace \ a = Some obj; objBitsKO obj = pageBits; pspace_aligned' \; + pspace_distinct' \; pspace_no_overlap' ptr (pageBitsForSize sz) \\ +\ heap_to_device_data (ksPSpace \) + (\x. if x \ {ptr..+2 ^ (pageBitsForSize sz)} then 0 + else underlying_memory (ksMachineState \) x) + a = + heap_to_device_data (ksPSpace \) (underlying_memory (ksMachineState \)) a" + apply (cut_tac heap_to_device_data_disj_mdf[where ptr = ptr + and gbits = "pageBitsForSize sz - pageBits" and n = 1 + and sz = "pageBitsForSize sz",simplified]) + apply (simp add: pbfs_atleast_pageBits pbfs_less_wb' field_simps| intro range_cover_full )+ + done + +(* FIXME AARCH64 not clear what the 9 is here and whether it needs to be reconsidered for AARCH64 *) +lemma range_cover_nca_neg: "\x p (off :: 9 word). + \(x::machine_word) < 8; {p..+2 ^pageBits } \ {ptr..ptr + (of_nat n * 2 ^ bits - 1)} = {}; + range_cover ptr sz bits n\ + \ p + ucast off * 8 + x \ {ptr..+n * 2 ^ bits}" + apply (case_tac "n = 0") + apply simp + apply (subst range_cover_intvl,simp) + apply simp + apply (subgoal_tac " p + ucast off * 8 + x \ {p..+2 ^ pageBits}") + apply blast + apply (clarsimp simp: intvl_def) + apply (rule_tac x = "unat off * 8 + unat x" in exI) + apply (simp add: ucast_nat_def) + apply (rule nat_add_offset_less [where n = 3, simplified]) + apply (simp add: word_less_nat_alt) + apply (rule unat_lt2p) + apply (simp add: pageBits_def objBits_simps) + done + +lemmas unat_of_nat32' = unat_of_nat_eq[where 'a=32] + +lemma clearMemory_PageCap_ccorres: + "ccorres dc xfdc (invs' and valid_cap' (ArchObjectCap (FrameCap ptr undefined sz False None)) + and (\s. 2 ^ pageBitsForSize sz \ gsMaxObjectSize s) + and K ({ptr .. ptr + 2 ^ (pageBitsForSize sz) - 1} \ kernel_data_refs = {}) + ) + ({s. bits_' s = of_nat (pageBitsForSize sz)} + \ {s. ptr___ptr_to_unsigned_long_' s = Ptr ptr}) + [] + (doMachineOp (clearMemory ptr (2 ^ pageBitsForSize sz))) (Call clearMemory_'proc)" + (is "ccorres dc xfdc ?P ?P' [] ?m ?c") + supply pageBitsForSize_bounded[simp del] + apply (cinit' lift: bits_' ptr___ptr_to_unsigned_long_') + apply (rule_tac P="capAligned (ArchObjectCap (FrameCap ptr undefined sz False None))" + in ccorres_gen_asm) + apply (rule ccorres_Guard_Seq) + apply (simp add: clearMemory_def) + apply (simp add: doMachineOp_bind) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac P="?P" in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: valid_cap'_def capAligned_def + is_aligned_no_wrap'[OF _ word64_power_less_1]) + apply (prop_tac "ptr \ 0") + subgoal + apply (simp add: frame_at'_def) + apply (drule_tac x=0 in spec) + apply (clarsimp simp: pageBitsForSize_def bit_simps split: vmpage_size.splits) + done + apply simp + apply (prop_tac "3 \ pageBitsForSize sz") + apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) + apply (rule conjI) + apply (erule is_aligned_weaken) + apply (clarsimp simp: pageBitsForSize_def split: vmpage_size.splits) + apply (rule conjI) + apply (rule is_aligned_power2) + apply (clarsimp simp: pageBitsForSize_def split: vmpage_size.splits) + apply (clarsimp simp: ghost_assertion_size_logic[unfolded o_def]) + apply (clarsimp simp: ghost_assertion_size_logic[unfolded o_def] frame_at'_def) + apply (simp add: flex_user_data_at_rf_sr_dom_s bit_simps) + apply (clarsimp simp: field_simps word_size_def mapM_x_storeWord_step) + apply (simp add: doMachineOp_def split_def exec_gets) + apply (simp add: select_f_def simpler_modify_def bind_def) + apply (fold replicateHider_def)[1] + apply (subst coerce_heap_update_to_heap_updates' + [where chunk=4096 and m="2 ^ (pageBitsForSize sz - pageBits)"]) + apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) + apply (subst coerce_memset_to_heap_update_user_data) + apply (subgoal_tac "\p<2 ^ (pageBitsForSize sz - pageBits). + x \\<^sub>c (Ptr (ptr + (of_nat p << pageBits)) :: user_data_C ptr)") + prefer 2 + apply (erule allfEI[where f=of_nat]) + apply (clarsimp simp: bit_simps) + apply (subst(asm) of_nat_power, assumption) + apply simp + apply (insert pageBitsForSize_64 [of sz])[1] + apply (erule order_le_less_trans [rotated]) + apply simp + apply (simp, drule ko_at_projectKO_opt[OF user_data_at_ko]) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply (erule cmap_relationE1, simp(no_asm) add: heap_to_user_data_def Let_def) + apply fastforce + subgoal by (simp add: typ_heap_simps) + apply (simp add: shiftl_t2n' pageBits_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps + clift_foldl_hrs_mem_update foldl_id + carch_state_relation_def + cmachine_state_relation_def + foldl_fun_upd_const[unfolded fun_upd_def] + power_user_page_foldl_zero_ranges[simplified pageBits_def] + dom_heap_to_device_data) + apply (rule conjI[rotated]) + apply (simp add:pageBitsForSize_mess_multi) + apply (rule cmap_relationI) + apply (clarsimp simp: dom_heap_to_device_data cmap_relation_def) + apply (simp add:cuser_user_data_device_relation_def) + apply (subst help_force_intvl_range_conv, assumption) + subgoal by (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) + apply simp + apply (subst heap_to_user_data_update_region) + apply (drule map_to_user_data_aligned, clarsimp) + apply (rule aligned_range_offset_mem[where m=pageBits], simp_all)[1] + apply (rule pbfs_atleast_pageBits) + apply (erule cmap_relation_If_upd) + apply (clarsimp simp: cuser_user_data_relation_def order_less_le_trans[OF unat_lt2p]) + apply (fold word_rsplit_0, simp add: word_rcat_rsplit)[1] + apply (rule image_cong[OF _ refl]) + apply (rule set_eqI, rule iffI) + apply (clarsimp simp del: atLeastAtMost_iff) + apply (drule map_to_user_data_aligned, clarsimp) + apply (simp only: mask_in_range[symmetric]) + apply (rule_tac x="unat ((xa && mask (pageBitsForSize sz)) >> pageBits)" in image_eqI) + apply (simp add: subtract_mask(2)[symmetric]) + apply (cut_tac w="xa - ptr" and n=pageBits in and_not_mask[symmetric]) + apply (simp add: shiftl_t2n field_simps pageBits_def) + apply (subst is_aligned_neg_mask_eq, simp_all)[1] + apply (erule aligned_sub_aligned, simp_all add: word_bits_def)[1] + apply (erule is_aligned_weaken) + apply (rule pbfs_atleast_pageBits[unfolded pageBits_def]) + apply (simp add: pageBits_def) + apply (rule unat_less_power) + apply (fold word_bits_def, simp) + apply (rule shiftr_less_t2n) + apply (simp add: pbfs_atleast_pageBits[simplified pageBits_def]) + apply (rule and_mask_less_size) + apply (simp add: word_bits_def word_size) + apply (rule IntI) + apply (clarsimp simp del: atLeastAtMost_iff) + apply (subst aligned_range_offset_mem, assumption, simp_all)[1] + apply (rule order_le_less_trans[rotated], erule shiftl_less_t2n [OF of_nat_power], + simp_all add: word_bits_def)[1] + apply (insert pageBitsForSize_64 [of sz])[1] + apply (erule order_le_less_trans [rotated]) + subgoal by simp + subgoal by (simp add: pageBits_def shiftl_t2n field_simps) + apply (clarsimp simp: image_iff) + apply (rename_tac n) + apply (drule_tac x="of_nat n" in spec) + apply (simp add: bit_simps) + apply (simp add: of_nat_power[where 'a=64, folded word_bits_def]) + apply (simp add: pageBits_def ko_at_projectKO_opt[OF user_data_at_ko]) + (* FIXME AARCH64 indentation *) + apply (rule inj_Ptr) + apply csymbr + apply (ctac add: cleanCacheRange_RAM_ccorres) + apply wp + apply (simp add: guard_is_UNIV_def unat_of_nat + word_bits_def capAligned_def word_of_nat_less) + apply (clarsimp simp: word_bits_def valid_cap'_def + capAligned_def word_of_nat_less) + apply (frule is_aligned_addrFromPPtr_n, simp add: pageBitsForSize_def split: vmpage_size.splits) + apply (simp add: bit_simps pptrBaseOffset_alignment_def)+ + apply (simp add: is_aligned_no_overflow') + apply (rule conjI) + subgoal + apply (prop_tac "cacheLineSize \ pageBitsForSize sz") + apply (simp add: pageBitsForSize_def bit_simps cacheLineSize_def split: vmpage_size.splits) + apply (simp add: is_aligned_mask[THEN iffD1] is_aligned_weaken) + done + apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.splits) + done + +declare replicate_numeral [simp] + +lemma coerce_memset_to_heap_update_pte: + "heap_update_list x (replicateHider 8 0) + = heap_update (Ptr x :: pte_C ptr) + (pte_C.pte_C (FCP (\x. 0)))" + apply (intro ext, simp add: heap_update_def) + apply (rule_tac f="\xs. heap_update_list x xs a b" for a b in arg_cong) + apply (simp add: to_bytes_def size_of_def typ_info_simps pte_C_tag_def) + apply (simp add: ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td align_of_def padup_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: typ_info_simps align_td_array' size_td_array) + apply (simp add: typ_info_array' typ_info_word word_rsplit_0) + apply (simp add: eval_nat_numeral) + apply (simp add: replicateHider_def word_rsplit_0 word_bits_def) + done + +lemma objBits_eq_by_type: + fixes x :: "'a :: pspace_storable" and y :: 'a + shows "objBits x = objBits y" + apply (simp add: objBits_def) + apply (rule objBits_type) + apply (simp add: koTypeOf_injectKO) + done + +lemma mapM_x_store_memset_ccorres_assist: + fixes val :: "'a :: pspace_storable" + assumes nofail: "\ snd (mapM_x (\slot. setObject slot val) slots \)" + assumes slots1: "\n < length slots. slots ! n = hd slots + (of_nat n << objBits val)" + assumes slots2: "n = length slots * (2 ^ objBits val)" + assumes ptr: "ptr = hd slots" + assumes ko: "\ko :: 'a. updateObject ko = updateObject_default ko" + "\ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko" + assumes restr: "set slots \ S" + assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, + globals_update (t_hrs_'_update (hrs_mem_update + (heap_update_list ptr + (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" + assumes rf_sr: "(\, s) \ rf_sr" + shows + "\(rv, \') \ fst (mapM_x (\slot. setObject slot val) slots \). + (\', globals_update (t_hrs_'_update (hrs_mem_update + (heap_update_list ptr (replicateHider n c)))) s) \ rf_sr" + unfolding slots2 ptr using rf_sr slots1 nofail restr +proof (induct slots arbitrary: s \) + case Nil + show ?case + using Nil.prems + apply (simp add: mapM_x_def sequence_x_def return_def replicateHider_def) + apply (simp add: rf_sr_def hrs_mem_update_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def + h_t_valid_clift_Some_iff) + done +next + case (Cons x xs tPre sPre) + + note nofail_bind = Cons.prems(3)[unfolded mapM_x_Cons K_bind_def] + + have obj_at: "obj_at' (\x :: 'a. True) x sPre" + using not_snd_bindI1[OF nofail_bind] + apply (subst(asm) setObject_obj_at_pre, simp_all add: ko snd_bind) + apply (clarsimp simp: stateAssert_def exec_get return_def) + apply (simp add: koTypeOf_injectKO typ_at_to_obj_at') + done + + note in_setObject = setObject_eq[OF _ _ objBits_eq_by_type obj_at, + where ko=val, simplified ko, simplified] + + note nofail_mapM = not_snd_bindI2[OF nofail_bind, OF in_setObject] + + have hd_xs: "xs \ [] \ hd xs = x + (2 ^ objBits val)" + using Cons.prems(2)[rule_format, where n=1] + by (simp add: hd_conv_nth) + + show ?case + using obj_at_ko_at'[OF obj_at] Cons.prems(4) + apply (clarsimp simp add: mapM_x_Cons bind_def split_def) + apply (rule rev_bexI, rule in_setObject) + apply (cut_tac Cons.hyps[OF _ _ nofail_mapM]) + defer + apply (rule worker, rule Cons.prems, assumption+) + apply clarsimp + apply (case_tac "xs = []", simp_all)[1] + apply (insert Cons.prems, simp)[1] + apply (frule_tac x="Suc n" in spec) + apply (simp add: hd_xs shiftl_t2n field_simps) + apply assumption + apply clarsimp + apply (rule rev_bexI, assumption) + apply (simp add: o_def) + apply (case_tac "xs = []") + apply (simp add: hrs_mem_update_def split_def replicateHider_def) + apply (subst(asm) heap_update_list_concat_fold_hrs_mem) + apply (simp add: hd_xs replicateHider_def) + apply (simp add: replicateHider_def replicate_add) + done +qed + +end + +lemma option_to_0_user_mem': + "option_to_0 \ user_mem' as =(\x. if x \ {y. \ pointerInUserData y as} then 0 + else underlying_memory (ksMachineState as) x) " + apply (rule ext) + apply (simp add:user_mem'_def option_to_0_def split:if_splits) + done + +lemma heap_to_user_data_in_user_mem'[simp]: + "\pspace_aligned' as;pspace_distinct' as\ \ heap_to_user_data (ksPSpace as) (option_to_0 \ user_mem' as) = + heap_to_user_data (ksPSpace as)(underlying_memory (ksMachineState as))" + apply (rule ext)+ + apply (clarsimp simp: heap_to_user_data_def option_map_def + split: option.splits) + apply (subst option_to_0_user_mem') + apply (subst map_option_byte_to_word_heap) + apply (clarsimp simp: projectKO_opt_user_data map_comp_def + split: option.split_asm kernel_object.split_asm) + apply (frule(1) pspace_alignedD') + apply (frule(1) pspace_distinctD') + apply (subgoal_tac "x + ucast off * 8 + xa && ~~ mask pageBits = x" ) + apply (clarsimp simp: pointerInUserData_def typ_at'_def ko_wp_at'_def) + apply (simp add: AARCH64.pageBits_def) + apply (subst mask_lower_twice2[where n = 3 and m = 12,simplified,symmetric]) + apply (subst is_aligned_add_helper[THEN conjunct2,where n1 = 3]) + apply (erule aligned_add_aligned) + apply (simp add: is_aligned_mult_triv2[where n = 3,simplified]) + apply (clarsimp simp: objBits_simps AARCH64.pageBits_def) + apply simp + apply (rule is_aligned_add_helper[THEN conjunct2]) + apply (simp add: AARCH64.pageBits_def objBits_simps) + apply (rule word_less_power_trans2[where k = 3,simplified]) + apply (rule less_le_trans[OF ucast_less]) + apply simp+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma setObject_asidpool_gs[wp]: + "setObject ptr (vcpu::asidpool) \\s. P (gsMaxObjectSize s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def crunch_simps) + +crunch gsMaxObjectSize[wp]: deleteASIDPool "\s. P (gsMaxObjectSize s)" + (wp: crunch_wps getObject_inv loadObject_default_inv + simp: crunch_simps) + +end + +context kernel_m begin + +lemma page_table_at_rf_sr_dom_s: + "\ page_table_at' pt_t x s; (s, s') \ rf_sr \ + \ {x ..+ 2 ^ (ptBits pt_t) } \ {SIndexVal, SIndexTyp 0} + \ dom_s (hrs_htd (t_hrs_' (globals s')))" + apply (rule_tac m=pte_bits in intvl_2_power_times_decomp) + defer + apply (simp add: bit_simps) + apply (simp add: bit_simps word_bits_def split: if_split) + apply (clarsimp simp: pt_bits_def table_size_def page_table_at'_def) + apply (erule_tac x="ucast y" in allE) + apply (simp add: shiftl_t2n') + apply (subst (asm) le_mask_iff_lt_2n[THEN iffD1]) + apply (simp add: bit_simps split: if_split) + apply simp + apply (simp add: typ_at_to_obj_at_arches) + apply (drule obj_at_ko_at', clarsimp) + apply (erule cmap_relationE1[OF rf_sr_cpte_relation]) + apply (erule ko_at_projectKO_opt) + apply (drule h_t_valid_clift) + apply (drule h_t_valid_dom_s[OF _ refl refl]) + apply (erule subsetD) + apply (auto simp add: intvl_def shiftl_t2n pte_bits_def word_size_bits_def)[1] + done + +lemma ccorres_make_xfdc: + "ccorresG rf_sr \ r xf P P' h a c \ ccorresG rf_sr \ dc xfdc P P' h a c" + apply (erule ccorres_rel_imp) + apply simp + done + +lemma ccorres_if_True_False_simps: + "ccorres r xf P P' hs a (IF True THEN c ELSE c' FI) = ccorres r xf P P' hs a c" + "ccorres r xf P P' hs a (IF False THEN c ELSE c' FI) = ccorres r xf P P' hs a c'" + "ccorres r xf P P' hs a (IF True THEN c ELSE c' FI ;; d) = ccorres r xf P P' hs a (c ;; d)" + "ccorres r xf P P' hs a (IF False THEN c ELSE c' FI ;; d) = ccorres r xf P P' hs a (c' ;; d)" + by (simp_all add: ccorres_cond_iffs ccorres_seq_simps) + +lemmas cap_tag_values = + cap_untyped_cap_def + cap_endpoint_cap_def + cap_notification_cap_def + cap_reply_cap_def + cap_cnode_cap_def + cap_thread_cap_def + cap_irq_handler_cap_def + cap_null_cap_def + cap_irq_control_cap_def + cap_zombie_cap_def + cap_frame_cap_def + cap_vspace_cap_def + cap_page_table_cap_def + cap_asid_pool_cap_def + cap_vcpu_cap_def + +lemma ccorres_return_C_seq: + "\\s f. xf (global_exn_var_'_update f (xfu (\_. v s) s)) = v s; \s f. globals (xfu f s) = globals s; wfhandlers hs\ + \ ccorres_underlying rf_sr \ r rvxf arrel xf (\_. True) {s. arrel rv (v s)} hs (return rv) (return_C xfu v ;; d)" + apply (rule ccorres_guard_imp) + apply (rule ccorres_split_throws, rule ccorres_return_C, simp+) + apply vcg + apply simp_all + done + + +lemma ccap_relation_get_capZombiePtr_CL: + "\ ccap_relation cap cap'; isZombie cap; capAligned cap \ + \ get_capZombiePtr_CL (cap_zombie_cap_lift cap') = capZombiePtr cap" + apply (simp only: cap_get_tag_isCap[symmetric]) + apply (drule(1) cap_get_tag_to_H) + apply (clarsimp simp: get_capZombiePtr_CL_def get_capZombieBits_CL_def Let_def split: if_split) + apply (subst less_mask_eq) + apply (clarsimp simp add: capAligned_def objBits_simps word_bits_conv) + apply unat_arith + apply simp + done + +lemma modify_gets_helper: + "do y \ modify (ksPSpace_update (\_. ps)); ps' \ gets ksPSpace; f ps' od + = do y \ modify (ksPSpace_update (\_. ps)); f ps od" + by (simp add: bind_def simpler_modify_def simpler_gets_def) + +lemma snd_lookupAround2_update: + "ps y \ None \ + snd (lookupAround2 x (ps (y \ v'))) = snd (lookupAround2 x ps)" + apply (clarsimp simp: lookupAround2_def lookupAround_def Let_def + dom_fun_upd2 + simp del: dom_fun_upd cong: if_cong option.case_cong) + apply (clarsimp split: option.split if_split cong: if_cong) + apply auto + done + +lemma double_setEndpoint: + "do y \ setEndpoint epptr v1; setEndpoint epptr v2 od + = setEndpoint epptr v2" + apply (simp add: setEndpoint_def setObject_def bind_assoc split_def + modify_gets_helper) + apply (simp add: updateObject_default_def bind_assoc objBits_simps) + apply (rule ext) + apply (rule bind_apply_cong, rule refl)+ + apply (clarsimp simp add: in_monad projectKOs magnitudeCheck_assert + snd_lookupAround2_update) + apply (simp add: lookupAround2_known1 assert_opt_def projectKO_def projectKO_opt_ep + alignCheck_assert) + apply (simp add: bind_def simpler_modify_def) + done + +lemma filterM_setEndpoint_adjustment: + "\ \v. do setEndpoint epptr IdleEP; body v od + = do v' \ body v; setEndpoint epptr IdleEP; return v' od \ + \ + (do q' \ filterM body q; setEndpoint epptr (f q') od) + = (do setEndpoint epptr IdleEP; q' \ filterM body q; setEndpoint epptr (f q') od)" + apply (rule sym) + apply (induct q arbitrary: f) + apply (simp add: double_setEndpoint) + apply (simp add: bind_assoc) + apply (subst bind_assoc[symmetric], simp, simp add: bind_assoc) + done + +lemma ccorres_inst_voodoo: + "\x. ccorres r xf (P x) (P' x) hs (h x) (c x) + \ \x. ccorres r xf (P x) (P' x) hs (h x) (c x)" + by simp + +lemma cpspace_relation_ep_update_ep2: + "\ ko_at' (ep :: endpoint) epptr s; + cmap_relation (map_to_eps (ksPSpace s)) + (cslift t) ep_Ptr (cendpoint_relation (cslift t)); + cendpoint_relation (cslift t') ep' endpoint; + (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) + ep_Ptr (cendpoint_relation (cslift t'))" + apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) + apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, + erule cmap_relation_upd_relI, assumption+) + apply simp+ + apply (rule ext, simp add: map_comp_def projectKO_opt_ep split: if_split) + done + +end + +context kernel_m begin + +lemma ccorres_abstract_h_val: + "(\rv. P rv \ ccorres r xf G (G' rv) hs a c) \ + ccorres r xf G ({s. P (h_val (hrs_mem (t_hrs_' (globals s))) p) + \ s \ G' (h_val (hrs_mem (t_hrs_' (globals s))) + p)} + \ {s. P (h_val (hrs_mem (t_hrs_' (globals s))) p)}) hs a c" + apply (rule ccorres_tmp_lift1 [where P = P]) + apply (clarsimp simp: Collect_conj_eq [symmetric]) + apply (fastforce intro: ccorres_guard_imp) + done + +lemma ccorres_subst_basic_helper: + "\ \s s'. \ P s; s' \ P'; (s, s') \ rf_sr \ \ f s' = f' s'; + \s s'. \ P s; s' \ P'; (s, s') \ rf_sr \ \ (s, f' s') \ rf_sr; + \s'. xf' (f' s') = v; \rv' t t'. ceqv \ xf' rv' t t' c (c' rv'); + ccorres rrel xf Q Q' hs a (c' v) \ + \ ccorres rrel xf (P and Q) {s. s \ P' \ f' s \ Q'} hs a (Basic f ;; c)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return) + apply (rule ccorres_split_nothrow[where xf'=xf' and r'="\rv rv'. rv' = v"]) + apply (rule ccorres_from_vcg[where P=P and P'=P']) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply assumption + apply simp + apply wp + apply vcg + apply clarsimp + done + +lemma ctcb_relation_blocking_ipc_badge: + "\ ctcb_relation tcb ctcb; isBlockedOnSend (tcbState tcb) \ \ + tsType_CL (thread_state_lift (tcbState_C ctcb)) = scast ThreadState_BlockedOnSend" + "\ ctcb_relation tcb ctcb; + tsType_CL (thread_state_lift (tcbState_C ctcb)) = scast ThreadState_BlockedOnSend \ + \ blockingIPCBadge (tcbState tcb) + = blockingIPCBadge_CL (thread_state_lift (tcbState_C ctcb))" + apply (clarsimp simp add: ctcb_relation_def) + apply (simp add: isBlockedOnSend_def split: Structures_H.thread_state.split_asm) + apply (clarsimp simp: cthread_state_relation_def) + apply (clarsimp simp add: ctcb_relation_def cthread_state_relation_def) + apply (cases "tcbState tcb", simp_all add: ThreadState_defs) + done + +lemma cendpoint_relation_q_cong: + "\ \t rf. (t, rf) \ ep_q_refs_of' ep \ hp (tcb_ptr_to_ctcb_ptr t) = hp' (tcb_ptr_to_ctcb_ptr t) \ + \ cendpoint_relation hp ep ep' = cendpoint_relation hp' ep ep'" + apply (cases ep, simp_all add: cendpoint_relation_def Let_def) + apply (rule conj_cong [OF refl]) + apply (rule tcb_queue_relation'_cong[OF refl refl refl]) + apply clarsimp + apply (rule conj_cong [OF refl]) + apply (rule tcb_queue_relation'_cong[OF refl refl refl]) + apply clarsimp + done + +lemma cnotification_relation_q_cong: + "\\t rf. (t, rf) \ ntfn_q_refs_of' (ntfnObj ntfn) \ hp (tcb_ptr_to_ctcb_ptr t) = hp' (tcb_ptr_to_ctcb_ptr t)\ + \ cnotification_relation hp ntfn ntfn' = cnotification_relation hp' ntfn ntfn'" + apply (cases "ntfnObj ntfn", simp_all add: cnotification_relation_def Let_def) + apply (auto intro: iffD1[OF tcb_queue_relation'_cong[OF refl refl refl]]) + done + +lemma ccorres_duplicate_guard: + "ccorres r xf (P and P) Q hs f f' \ ccorres r xf P Q hs f f'" + by (erule ccorres_guard_imp, auto) + + +lemma ep_q_refs'_no_NTFNBound[simp]: + "(x, NTFNBound) \ ep_q_refs_of' ep" + by (auto simp: ep_q_refs_of'_def split: endpoint.splits) + + +lemma ntfn_q_refs'_no_NTFNBound[simp]: + "(x, NTFNBound) \ ntfn_q_refs_of' ntfn" + by (auto simp: ntfn_q_refs_of'_def split: ntfn.splits) + +crunches setThreadState + for pspace_canonical'[wp]: pspace_canonical' + +lemma cancelBadgedSends_ccorres: + "ccorres dc xfdc (invs' and ep_at' ptr) + (UNIV \ {s. epptr_' s = Ptr ptr} \ {s. badge_' s = bdg}) [] + (cancelBadgedSends ptr bdg) (Call cancelBadgedSends_'proc)" + apply (cinit lift: epptr_' badge_' simp: whileAnno_def) + apply (rule ccorres_stateAssert) + apply (simp add: list_case_return + cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong + del: Collect_const) + apply (rule ccorres_pre_getEndpoint, rename_tac ep) + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_longlong_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + | SendEP q \ scast EPState_Send" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply clarsimp + apply (erule cmap_relationE1 [OF cmap_relation_ep], erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def + split: Structures_H.endpoint.split_asm) + apply ceqv + apply wpc + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_Skip) + apply (rename_tac list) + apply (simp add: Collect_True Collect_False endpoint_state_defs + ccorres_cond_iffs + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_rhs_assoc)+ + apply (csymbr, csymbr) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) + apply (rule_tac P="ko_at' ep ptr" + in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rule cmap_relationE1[OF cmap_relation_ep], assumption) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps setEndpoint_def) + apply (rule rev_bexI) + apply (rule setObject_eq; simp add: objBits_simps')[1] + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + apply (clarsimp simp: cpspace_relation_def update_ep_map_tos typ_heap_simps') + apply (erule(1) cpspace_relation_ep_update_ep2) + apply (simp add: cendpoint_relation_def endpoint_state_defs) + subgoal by simp + apply (rule ccorres_symb_exec_r) + apply (rule_tac xs=list in filterM_voodoo) + apply (rule_tac P="\xs s. (\x \ set xs \ set list. + st_tcb_at' (\st. isBlockedOnSend st \ blockingObject st = ptr) x s) + \ distinct (xs @ list) \ ko_at' IdleEP ptr s + \ (\p. \x \ set (xs @ list). \rf. (x, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) + \ pspace_aligned' s \ pspace_distinct' s \ pspace_canonical' s + \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" + and P'="\xs. {s. ep_queue_relation' (cslift s) (xs @ list) + (head_C (queue_' s)) (end_C (queue_' s))} + \ {s. thread_' s = (case list of [] \ tcb_Ptr 0 + | x # xs \ tcb_ptr_to_ctcb_ptr x)}" + in ccorres_inst_voodoo) + apply (induct_tac list) + apply (rule allI) + apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) + apply (rule ccorres_tmp_lift2 [OF _ _ Int_lower1]) + apply ceqv + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_duplicate_guard, rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_from_vcg, rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_ep], assumption) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps tcb_queue_relation'_def) + apply (case_tac x) + apply (clarsimp simp: setEndpoint_def) + apply (rule rev_bexI, rule setObject_eq, + (simp add: objBits_simps')+) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + packed_heap_update_collapse_hrs + carch_state_relation_def + cmachine_state_relation_def) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_ep_map_tos) + apply (erule(1) cpspace_relation_ep_update_ep2) + subgoal by (simp add: cendpoint_relation_def Let_def) + subgoal by simp + apply (clarsimp simp: tcb_at_not_NULL[OF pred_tcb_at'] + setEndpoint_def) + apply (rule rev_bexI, rule setObject_eq, + (simp add: objBits_simps')+) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + packed_heap_update_collapse_hrs + carch_state_relation_def + cmachine_state_relation_def) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps' + update_ep_map_tos) + apply (erule(1) cpspace_relation_ep_update_ep2) + apply (simp add: cendpoint_relation_def Let_def) + apply (subgoal_tac "tcb_at' (last (a # list)) \ \ tcb_at' a \") + apply (clarsimp simp: is_aligned_neg_mask_eq[OF is_aligned_tcb_ptr_to_ctcb_ptr[where P=\]]) + apply (simp add: tcb_queue_relation'_def EPState_Send_def mask_shiftl_decompose + flip: canonical_bit_def) + apply (drule (1) tcb_and_not_mask_canonical[where n=2]) + apply (simp (no_asm) add: tcbBlockSizeBits_def) + subgoal by (simp add: mask_def canonical_bit_def) + subgoal by (auto split: if_split) + subgoal by simp + apply (ctac add: rescheduleRequired_ccorres) + apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') + apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) + apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) + apply (simp add: guard_is_UNIV_def) + apply (rule allI) + apply (rename_tac a lista x) + apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) + apply (rule ccorres_init_tmp_lift2, ceqv) + apply (rule ccorres_guard_imp2) + apply (simp add: bind_assoc + del: Collect_const) + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_pre_threadGet[where f=tcbState, folded getThreadState_def]) + apply (rule ccorres_move_c_guard_tcb) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply csymbr + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac P=\ + and P'="{s. ep_queue_relation' (cslift s) (x @ a # lista) + (head_C (queue_' s)) (end_C (queue_' s))}" + and f'="\s. s \ next___ptr_to_struct_tcb_C_' := (case lista of [] \ tcb_Ptr 0 + | v # vs \ tcb_ptr_to_ctcb_ptr v) \" + and xf'="next___ptr_to_struct_tcb_C_'" + in ccorres_subst_basic_helper) + apply (thin_tac "\x. P x" for P) + apply (rule myvars.fold_congs, (rule refl)+) + apply (clarsimp simp: tcb_queue_relation'_def use_tcb_queue_relation2 + tcb_queue_relation2_concat) + apply (clarsimp simp: typ_heap_simps split: list.split) + subgoal by (simp add: rf_sr_def) + apply simp + apply ceqv + apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rv" in ccorres_gen_asm2) + apply (rule ccorres_if_bind, rule ccorres_if_lhs) + apply (simp add: bind_assoc) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setThreadState_ccorres) + apply (ctac add: tcbSchedEnqueue_ccorres) + apply (rule_tac P="\s. \t \ set (x @ a # lista). tcb_at' t s" + in ccorres_cross_over_guard) + apply (rule ccorres_add_return, rule ccorres_split_nothrow[OF _ ceqv_refl]) + apply (rule_tac rrel=dc and xf=xfdc + and P="\s. (\t \ set (x @ a # lista). tcb_at' t s) + \ (\p. \t \ set (x @ a # lista). \rf. (t, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) + \ distinct (x @ a # lista) + \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" + and P'="{s. ep_queue_relation' (cslift s) (x @ a # lista) + (head_C (queue_' s)) (end_C (queue_' s))}" + in ccorres_from_vcg) + apply (thin_tac "\x. P x" for P) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: ball_Un) + apply (rule exI, rule conjI) + apply (rule exI, erule conjI) + apply (intro conjI[rotated]) + apply (assumption) + apply (fold_subgoals (prefix))[3] + subgoal premises prems using prems by (fastforce intro: pred_tcb_at')+ + apply (clarsimp simp: return_def rf_sr_def cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def) + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + subgoal by (simp add: o_def) + apply (rule conjI) + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply clarsimp + apply (rule cendpoint_relation_q_cong) + apply (rule sym, erule restrict_map_eqI) + apply (clarsimp simp: image_iff) + apply (drule(2) map_to_ko_atI) + apply (drule ko_at_state_refs_ofD') + apply clarsimp + apply (drule_tac x=p in spec) + subgoal by fastforce + + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply clarsimp + apply (drule(2) map_to_ko_atI, drule ko_at_state_refs_ofD') + + apply (rule cnotification_relation_q_cong) + apply (rule sym, erule restrict_map_eqI) + apply (clarsimp simp: image_iff) + apply (drule_tac x=p in spec) + subgoal by fastforce + apply (clarsimp simp: carch_state_relation_def cmachine_state_relation_def) + apply (rule ccorres_symb_exec_r2) + apply (erule spec) + apply vcg + apply (vcg spec=modifies) + apply wp + apply simp + apply vcg + apply (wp hoare_vcg_const_Ball_lift sch_act_wf_lift) + apply simp + apply (vcg exspec=tcbSchedEnqueue_cslift_spec) + apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases + sts_sch_act sts_valid_objs') + apply (vcg exspec=setThreadState_cslift_spec) + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_symb_exec_r2) + apply (drule_tac x="x @ [a]" in spec, simp) + apply vcg + apply (vcg spec=modifies) + apply (thin_tac "\x. P x" for P) + apply (clarsimp simp: pred_tcb_at' ball_Un) + apply (rule conjI) + apply (clarsimp split: if_split) + subgoal by (fastforce simp: valid_tcb_state'_def valid_objs'_maxDomain + valid_objs'_maxPriority dest: pred_tcb_at') + apply (clarsimp simp: tcb_at_not_NULL [OF pred_tcb_at']) + apply (clarsimp simp: typ_heap_simps st_tcb_at'_def) + apply (drule(1) obj_at_cslift_tcb) + apply (clarsimp simp: ctcb_relation_blocking_ipc_badge) + apply (rule conjI) + apply clarsimp + apply (frule rf_sr_cscheduler_relation) + apply (clarsimp simp: cscheduler_action_relation_def st_tcb_at'_def + split: scheduler_action.split_asm) + apply (rename_tac word) + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; simp?) + subgoal by clarsimp + apply clarsimp + apply (rule conjI) + apply (frule tcbSchedEnqueue_cslift_precond_discharge; simp?) + subgoal by clarsimp + apply clarsimp + apply (rule context_conjI) + apply (clarsimp simp: tcb_queue_relation'_def) + apply (erule iffD2[OF ep_queue_relation_shift[rule_format], rotated -1]) + subgoal by simp + apply (rule_tac x="x @ a # lista" in exI) + apply (clarsimp simp: ball_Un) + apply (rule conjI, fastforce) + subgoal by (clarsimp simp: remove1_append) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (wp hoare_vcg_const_Ball_lift) + apply (wp obj_at_setObject3[where 'a=endpoint, folded setEndpoint_def]) + apply (simp add: objBits_simps')+ + apply (wp set_ep_valid_objs') + apply vcg + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (erule cmap_relationE1[OF cmap_relation_ep], erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: cendpoint_relation_def Let_def) + subgoal by (clarsimp simp: tcb_queue_relation'_def neq_Nil_conv + split: if_split_asm) + apply clarsimp + apply (frule ko_at_valid_objs', clarsimp) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (frule sym_refs_ko_atD', clarsimp) + apply (clarsimp simp: st_tcb_at_refs_of_rev') + apply (rule conjI) + subgoal by (auto simp: isBlockedOnSend_def elim!: pred_tcb'_weakenE) + apply (rule conjI) + apply (clarsimp split: if_split) + apply (drule sym_refsD, clarsimp) + apply (drule(1) bspec)+ + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply (fastforce simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def + tcb_bound_refs'_def + dest!: symreftype_inverse') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply fastforce + done + +lemma tcb_ptr_to_ctcb_ptr_force_fold: + "x + 2 ^ ctcb_size_bits = ptr_val (tcb_ptr_to_ctcb_ptr x)" + by (simp add: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) + + +lemma access_ti_list_word8_array: + "N \ CARD('a::finite) \ + access_ti_list (map (\n. DTPair (adjust_ti (typ_info_t TYPE(8 word)) (\x. x.[n]) + (\x f. Arrays.update f n x)) (replicate n CHR ''1'')) [0..x. 0)) + (user_fpu_state_C (FCP (\x. 0)) 0 0)) + NULL) + (thread_state_C (FCP (\x. 0))) + (NULL) + (seL4_Fault_C (FCP (\x. 0))) + (lookup_fault_C (FCP (\x. 0))) + 0 0 0 0 0 0 NULL NULL NULL NULL)" + apply (intro ext, simp add: heap_update_def) + apply (rule_tac f="\xs. heap_update_list x xs a b" for a b in arg_cong) + apply (simp add: to_bytes_def size_of_def typ_info_simps tcb_C_tag_def) + apply (simp add: ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td align_of_def padup_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: update_ti_adjust_ti update_ti_t_machine_word_0s update_ti_t_word32_0s + typ_info_simps thread_state_C_tag_def seL4_Fault_C_tag_def + lookup_fault_C_tag_def update_ti_t_ptr_0s arch_tcb_C_tag_def + ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td + ti_typ_combine_empty_ti ti_typ_combine_td + align_of_def padup_def user_fpu_state_C_tag_def user_context_C_tag_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def + align_td_array' size_td_array) + apply (simp add: typ_info_array' access_ti_list_word8_array) + apply (simp add: typ_info_word word_rsplit_0 word_rsplit_0_32 upt_conv_Cons) + apply (simp add: typ_info_word typ_info_ptr word_rsplit_0 word_rsplit_0_32 word_bits_def + replicateHider_def) + done + +lemma isArchObjectCap_capBits: + "isArchObjectCap cap \ capBits cap = acapBits (capCap cap)" + by (clarsimp simp: isCap_simps) + +declare Kernel_C.tcb_C_size [simp del] + +lemma cte_lift_ccte_relation: + "cte_lift cte' = Some ctel' + \ c_valid_cte cte' + \ ccte_relation (cte_to_H ctel') cte'" + by (simp add: ccte_relation_def) + +lemma updateFreeIndex_ccorres: + "\s. \ \ ({s} \ {s. \cte cte'. cslift s (cte_Ptr srcSlot) = Some cte' + \ cteCap cte = cap' \ ccte_relation cte cte'}) + c + {t. \cap. cap_untyped_cap_lift cap = (cap_untyped_cap_lift + (cte_C.cap_C (the (cslift s (cte_Ptr srcSlot))))) + \ cap_untyped_cap_CL.capFreeIndex_CL := ((of_nat idx') >> 4) \ + \ cap_get_tag cap = scast cap_untyped_cap + \ t_hrs_' (globals t) = hrs_mem_update (heap_update (cte_Ptr srcSlot) + (cte_C.cap_C_update (\_. cap) (the (cslift s (cte_Ptr srcSlot))))) + (t_hrs_' (globals s)) + \ t may_only_modify_globals s in [t_hrs] + } + \ ccorres dc xfdc + (valid_objs' and cte_wp_at' (\cte. isUntypedCap (cteCap cte) + \ cap' = (cteCap cte)) srcSlot + and untyped_ranges_zero' + and (\_. is_aligned (of_nat idx' :: machine_word) 4 \ idx' \ 2 ^ (capBlockSize cap'))) + {s. \ capIsDevice cap' + \ region_actually_is_zero_bytes (capPtr cap' + of_nat idx') (capFreeIndex cap' - idx') s} hs + (updateFreeIndex srcSlot idx') c" + (is "_ \ ccorres dc xfdc (valid_objs' and ?cte_wp_at' and _ and _) ?P' hs ?a c") + apply (rule ccorres_gen_asm) + apply (simp add: updateFreeIndex_def getSlotCap_def updateCap_def) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_split_noop_lhs, rule_tac cap'=cap' in updateTrackedFreeIndex_noop_ccorres) + apply (rule ccorres_pre_getCTE)+ + apply (rename_tac cte cte2) + apply (rule_tac P = "\s. ?cte_wp_at' s \ cte2 = cte \ cte_wp_at' ((=) cte) srcSlot s" + and P'="{s. \cte cte'. cslift s (cte_Ptr srcSlot) = Some cte' + \ cteCap cte = cap' \ ccte_relation cte cte'} \ ?P'" in ccorres_from_vcg) + apply (rule allI, rule HoarePartial.conseq_exploit_pre, clarify) + apply (drule_tac x=s in spec, rule conseqPre, erule conseqPost) + defer + apply clarsimp + apply clarsimp + apply (simp add: cte_wp_at_ctes_of) + apply wp + apply (clarsimp simp: isCap_simps cte_wp_at_ctes_of) + apply (frule(1) rf_sr_ctes_of_clift) + apply clarsimp + apply (frule(1) cte_lift_ccte_relation) + apply (rule exI, intro conjI[rotated], assumption, simp_all)[1] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) rf_sr_ctes_of_cliftE) + apply (frule(1) rf_sr_ctes_of_clift) + apply clarsimp + apply (subgoal_tac "ccap_relation (capFreeIndex_update (\_. idx') + (cteCap (the (ctes_of \ srcSlot)))) cap") + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption) + apply (erule bexI [rotated]) + apply (clarsimp simp add: rf_sr_def cstate_relation_def Let_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"] + isCap_simps) + apply (simp add:cpspace_relation_def) + apply (clarsimp simp:typ_heap_simps' modify_map_def mex_def meq_def) + apply (rule conjI) + apply (rule cpspace_cte_relation_upd_capI, assumption+) + apply (rule conjI) + apply (rule setCTE_tcb_case, assumption+) + apply (case_tac s', clarsimp) + subgoal by (simp add: carch_state_relation_def cmachine_state_relation_def) + + apply (clarsimp simp: isCap_simps) + apply (drule(1) cte_lift_ccte_relation, + drule ccte_relation_ccap_relation) + apply (simp add: cte_to_H_def) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: ccap_relation_def cap_lift_untyped_cap + cap_to_H_simps cap_untyped_cap_lift_def + is_aligned_shiftr_shiftl + dest!: ccte_relation_ccap_relation) + apply (rule unat_of_nat_eq unat_of_nat_eq[symmetric], + erule order_le_less_trans, + rule power_strict_increasing, simp_all) + apply (rule unat_less_helper, rule order_le_less_trans[OF word_and_le1], simp add: mask_def) + done + +end + +(* FIXME: Move *) +lemma ccap_relation_isDeviceCap: + "\ccap_relation cp cap; isUntypedCap cp + \ \ to_bool (capIsDevice_CL (cap_untyped_cap_lift cap)) = (capIsDevice cp)" + apply (frule cap_get_tag_UntypedCap) + apply (simp add:cap_get_tag_isCap ) + done + +lemma ccap_relation_isDeviceCap2: + "\ccap_relation cp cap; isUntypedCap cp + \ \ (capIsDevice_CL (cap_untyped_cap_lift cap) = 0) = (\ (capIsDevice cp))" + apply (frule cap_get_tag_UntypedCap) + apply (simp add:cap_get_tag_isCap to_bool_def) + done + +end diff --git a/proof/crefine/AARCH64/Refine_C.thy b/proof/crefine/AARCH64/Refine_C.thy new file mode 100644 index 0000000000..49b2a9321d --- /dev/null +++ b/proof/crefine/AARCH64/Refine_C.thy @@ -0,0 +1,1455 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +chapter "Toplevel Refinement Statement" + +theory Refine_C +imports + Init_C + Fastpath_Equiv + Fastpath_C + CToCRefine +begin + +context begin interpretation Arch . (*FIXME: arch_split*) +crunch ksQ[wp]: handleVMFault "\s. P (ksReadyQueues s)" +end + +context kernel_m +begin + +text \Assemble fastpaths\ + +lemmas fastpath_call_ccorres_callKernel + = monadic_rewrite_ccorres_assemble[OF fastpath_call_ccorres fastpath_callKernel_SysCall_corres] + +lemmas fastpath_reply_recv_ccorres_callKernel + = monadic_rewrite_ccorres_assemble[OF fastpath_reply_recv_ccorres fastpath_callKernel_SysReplyRecv_corres] + +declare liftE_handle [simp] + +lemma schedule_sch_act_wf: + "\invs'\ schedule \\_ s. sch_act_wf (ksSchedulerAction s) s\" +apply (rule hoare_post_imp) + apply (erule invs_sch_act_wf') +apply (rule schedule_invs') +done + +lemma ucast_8_32_neq: + "x \ 0xFF \ UCAST(8 \ 32 signed) x \ 0xFF" + by uint_arith (clarsimp simp: uint_up_ucast is_up) + +(* FIXME AARCH64 move, wrong name, eliminate magic number *) +lemma getActiveIRQ_neq_Some0xFF': + "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x1FF\" + apply (simp add: getActiveIRQ_def) + apply wpsimp + using irq_oracle_max_irq + apply (simp add: maxIRQ_def) + apply (drule_tac x="Suc (irq_state s)" in spec) + apply clarsimp + done + +(* FIXME: follows already from getActiveIRQ_le_maxIRQ *) +(* FIXME AARCH64 copied from Machine_R but absent on AARCH64, name is wrong *) +lemma getActiveIRQ_neq_Some0xFF: + "\\\ doMachineOp (getActiveIRQ in_kernel) \\rv s. rv \ Some 0x1FF\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule use_valid, rule getActiveIRQ_neq_Some0xFF') + apply auto + done + +lemma handleInterruptEntry_ccorres: + "ccorres dc xfdc + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + UNIV [] + (callKernel Interrupt) (Call handleInterruptEntry_'proc)" +proof - + show ?thesis + apply (cinit') + apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) + apply (simp add: liftE_bind bind_assoc) + apply (ctac (no_vcg) add: getActiveIRQ_ccorres) + apply (rule ccorres_Guard_Seq)? + apply (rule_tac P="rv \ Some 0xFFFF" in ccorres_gen_asm) + apply wpc + apply (simp add: irqInvalid_def mask_def) + apply (rule ccorres_symb_exec_r) + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply simp + apply vcg + apply vcg + apply (clarsimp simp: irqInvalid_def ucast_ucast_b is_up mask_def) + apply (ctac (no_vcg) add: handleInterrupt_ccorres) + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply simp + apply (rule_tac Q="\rv s. invs' s \ (\x. rv = Some x \ x \ AARCH64.maxIRQ) \ rv \ Some 0x3FF \ + sch_act_not (ksCurThread s) s" in hoare_post_imp) + apply (clarsimp simp: Kernel_C.maxIRQ_def AARCH64.maxIRQ_def) + apply (wp getActiveIRQ_le_maxIRQ getActiveIRQ_neq_Some0xFF | simp)+ + apply (clarsimp simp: invs'_def valid_state'_def) + done +qed + +lemma handleUnknownSyscall_ccorres: + "ccorres dc xfdc + (invs' and ct_running' and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (UNIV \ {s. of_nat n = w_' s}) [] + (callKernel (UnknownSyscall n)) (Call handleUnknownSyscall_'proc)" + apply (cinit' lift: w_') + apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) + apply (simp add: liftE_bind bind_assoc) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_pre_getCurThread) + apply (ctac (no_vcg) add: handleFault_ccorres) + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (clarsimp, vcg) + apply (clarsimp, rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (intro impI conjI allI) + apply fastforce + apply (rule active_ex_cap') + apply (erule active_from_running') + apply (erule invs_iflive') + apply (clarsimp simp: ct_in_state'_def) + apply (frule st_tcb_idle'[rotated]) + apply (erule invs_valid_idle') + apply (clarsimp simp: cfault_rel_def seL4_Fault_UnknownSyscall_lift is_cap_fault_def) + done + +lemma handleVMFaultEvent_ccorres: + "ccorres dc xfdc + (invs' and sch_act_simple and ct_running' and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (UNIV \ {s. vm_faultType_' s = vm_fault_type_from_H vmfault_type}) [] + (callKernel (VMFaultEvent vmfault_type)) (Call handleVMFaultEvent_'proc)" + apply (cinit' lift:vm_faultType_') + apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) + apply (simp add: liftE_bind bind_assoc) + apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) + apply (simp add: catch_def) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_split_nothrow_case_sum) + apply (ctac (no_vcg) add: handleVMFault_ccorres) + apply ceqv + apply clarsimp + apply clarsimp + apply (rule ccorres_cond_univ) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (rule_tac xf'=xfdc in ccorres_call) + apply (ctac (no_vcg) add: handleFault_ccorres) + apply simp + apply simp + apply simp + apply (rule hvmf_invs_lift) + apply (simp add: invs'_machine valid_machine_state'_def) + apply (simp add: guard_is_UNIV_def) + apply (vcg exspec=handleVMFault_modifies) + apply ceqv + apply clarsimp + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (case_tac rv, clarsimp, wp) + apply (clarsimp, wp, simp) + apply wp + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: simple_sane_strg[unfolded sch_act_sane_not]) + apply (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def + elim: pred_tcb'_weakenE st_tcb_ex_cap'' + dest: st_tcb_at_idle_thread' rf_sr_ksCurThread) + done + +lemma handleUserLevelFault_ccorres: + "ccorres dc xfdc + (invs' and sch_act_simple and ct_running' and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (UNIV \ {s. w_a_' s = word1} \ {s. w_b_' s = word2 }) [] + (callKernel (UserLevelFault word1 word2)) (Call handleUserLevelFault_'proc)" + apply (cinit' lift:w_a_' w_b_') + apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) + apply (simp add: liftE_bind bind_assoc) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_pre_getCurThread) + apply (ctac (no_vcg) add: handleFault_ccorres) + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (clarsimp, vcg) + apply (clarsimp, rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (intro impI conjI allI) + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply simp + apply (rule active_ex_cap') + apply (erule active_from_running') + apply (erule invs_iflive') + apply (clarsimp simp: ct_in_state'_def) + apply (frule st_tcb_idle'[rotated]) + apply (erule invs_valid_idle') + apply simp + apply (clarsimp simp: cfault_rel_def seL4_Fault_UserException_lift) + apply (simp add: is_cap_fault_def) + done + +lemmas syscall_defs = + Kernel_C.SysSend_def Kernel_C.SysNBSend_def + Kernel_C.SysCall_def Kernel_C.SysRecv_def Kernel_C.SysNBRecv_def + Kernel_C.SysReply_def Kernel_C.SysReplyRecv_def Kernel_C.SysYield_def + +lemma ct_active_not_idle'_strengthen: + "invs' s \ ct_active' s \ ksCurThread s \ ksIdleThread s" + by clarsimp + +lemma invs'_irq_strg: + "invs' s \ (\y. rv = Some y \ P y s) \ \y. rv = Some y \ invs' s \ P y s" + by simp + +lemma dmo'_getActiveIRQ_non_kernel[wp]: + "\\\ doMachineOp (getActiveIRQ True) + \\rv s. \irq. rv = Some irq \ irq \ non_kernel_IRQs \ P irq s\" + unfolding doMachineOp_def + apply wpsimp + apply (drule use_valid, rule getActiveIRQ_neq_non_kernel, rule TrueI) + apply clarsimp + done + +lemma handleSyscall_ccorres: + "ccorres dc xfdc + (invs' and + sch_act_simple and ct_running' and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (UNIV \ {s. syscall_' s = syscall_from_H sysc }) [] + (callKernel (SyscallEvent sysc)) (Call handleSyscall_'proc)" + supply if_cong[cong] option.case_cong[cong] + apply (cinit' lift: syscall_') + apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) + apply (simp add: handleE_def handleE'_def) + apply (rule ccorres_split_nothrow_novcg) + apply wpc + prefer 3 + \ \SysSend\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: handleSend_def) + apply (rule ccorres_split_nothrow_case_sum) + apply (ctac (no_vcg) add: handleInvocation_ccorres) + apply ceqv + apply clarsimp + apply (rule ccorres_cond_empty) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def,simplified]) + apply clarsimp + apply (rule ccorres_cond_univ) + apply (simp add: liftE_def bind_assoc) + apply (ctac (no_vcg) add: getActiveIRQ_ccorres) + apply (rule ccorres_Guard)? + apply (simp only: irqInvalid_def)? + apply (rule_tac P="rv \ Some 0xFFFF" in ccorres_gen_asm) + apply (subst ccorres_seq_skip'[symmetric]) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac R=\ and xf=xfdc in ccorres_when) + apply (case_tac rv; clarsimp simp: irqInvalid_def mask_def) + apply (ctac (no_vcg) add: handleInterrupt_ccorres) + apply ceqv + apply (rule_tac r=dc and xf=xfdc in ccorres_returnOk_skip[unfolded returnOk_def,simplified]) + apply wp + apply (simp add: guard_is_UNIV_def) + apply clarsimp + apply (subst Ex_Some_conv | strengthen invs'_irq_strg | simp + | wp dmo'_getActiveIRQ_non_kernel getActiveIRQ_neq_Some0xFF)+ + apply (vcg exspec=handleInvocation_modifies) + prefer 3 + \ \SysNBSend\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: handleSend_def) + apply (rule ccorres_split_nothrow_case_sum) + apply (ctac (no_vcg) add: handleInvocation_ccorres) + apply ceqv + apply clarsimp + apply (rule ccorres_cond_empty) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def,simplified]) + apply clarsimp + apply (rule ccorres_cond_univ) + apply (simp add: liftE_def bind_assoc irqInvalid_def) + apply (ctac (no_vcg) add: getActiveIRQ_ccorres) + apply (rule_tac P="rv \ Some 0xFFFF" in ccorres_gen_asm) + apply (subst ccorres_seq_skip'[symmetric]) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_Guard)? + apply (rule_tac R=\ and xf=xfdc in ccorres_when) + apply (case_tac rv; clarsimp simp: irqInvalid_def irqInvalid_def mask_def) + apply (ctac (no_vcg) add: handleInterrupt_ccorres) + apply ceqv + apply (rule_tac ccorres_returnOk_skip[unfolded returnOk_def,simplified]) + apply wp + apply (simp add: guard_is_UNIV_def) + apply clarsimp + apply (subst Ex_Some_conv | strengthen invs'_irq_strg | simp + | wp dmo'_getActiveIRQ_non_kernel getActiveIRQ_neq_Some0xFF)+ + apply (vcg exspec=handleInvocation_modifies) + \ \SysCall\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: handleCall_def) + apply (rule ccorres_split_nothrow_case_sum) + apply (ctac (no_vcg) add: handleInvocation_ccorres) + apply ceqv + apply clarsimp + apply (rule ccorres_cond_empty) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def,simplified]) + apply clarsimp + apply (rule ccorres_cond_univ) + apply (simp add: liftE_def bind_assoc irqInvalid_def) + apply (ctac (no_vcg) add: getActiveIRQ_ccorres) + apply (rule_tac P="rv \ Some 0xFFFF" in ccorres_gen_asm) + apply (subst ccorres_seq_skip'[symmetric]) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_Guard)? + apply (rule_tac R=\ and xf=xfdc in ccorres_when) + apply (case_tac rv; clarsimp simp: irqInvalid_def irqInvalid_def mask_def) + apply (ctac (no_vcg) add: handleInterrupt_ccorres) + apply ceqv + apply (rule_tac ccorres_returnOk_skip[unfolded returnOk_def,simplified]) + apply wp + apply (simp add: guard_is_UNIV_def) + apply clarsimp + apply (subst Ex_Some_conv | strengthen invs'_irq_strg | simp + | wp dmo'_getActiveIRQ_non_kernel getActiveIRQ_neq_Some0xFF)+ + apply (vcg exspec=handleInvocation_modifies) + prefer 2 + \ \SysRecv\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: liftE_bind) + apply (subst ccorres_seq_skip'[symmetric]) + apply (ctac (no_vcg) add: handleRecv_ccorres) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def, simplified]) + apply wp + prefer 2 + \ \SysReply\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: liftE_bind) + apply (subst ccorres_seq_skip'[symmetric]) + apply (ctac (no_vcg) add: handleReply_ccorres) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def, simplified]) + apply wp + \ \SysReplyRecv\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: liftE_bind bind_assoc) + apply (ctac (no_vcg) add: handleReply_ccorres) + apply (subst ccorres_seq_skip'[symmetric]) + apply (ctac (no_vcg) add: handleRecv_ccorres) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def, simplified]) + apply wp[1] + apply clarsimp + apply wp + apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s" + in hoare_post_imp) + apply (simp add: ct_in_state'_def) + apply (wp handleReply_sane) + \ \SysYield\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: liftE_bind) + apply (subst ccorres_seq_skip'[symmetric]) + apply (ctac (no_vcg) add: handleYield_ccorres) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def, simplified]) + apply wp + \ \SysNBRecv\ + apply (clarsimp simp: syscall_from_H_def syscall_defs) + apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ + apply (simp add: liftE_bind) + apply (subst ccorres_seq_skip'[symmetric]) + apply (ctac (no_vcg) add: handleRecv_ccorres) + apply (rule ccorres_returnOk_skip[unfolded returnOk_def, simplified]) + apply wp + \ \rest of body\ + apply ceqv + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp schedule_invs' schedule_sch_act_wf + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (wpsimp wp: hoare_vcg_if_lift3) + apply (strengthen non_kernel_IRQs_strg[where Q=True, simplified]) + apply (wpsimp wp: hoare_drop_imps) + apply (simp + | wpc + | wp hoare_drop_imp handleReply_sane handleReply_nonz_cap_to_ct schedule_invs' + | strengthen ct_active_not_idle'_strengthen invs_valid_objs_strengthen)+ + apply (rule_tac Q="\rv. invs' and ct_active'" in hoare_post_imp, simp) + apply (wp hy_invs') + apply (clarsimp simp add: liftE_def) + apply wp + apply (rule_tac Q="\rv. invs' and ct_active'" in hoare_post_imp, simp) + apply (wp hy_invs') + apply (clarsimp simp: liftE_def) + apply (wp) + apply (rule_tac Q="\_. invs'" in hoare_post_imp, simp) + apply (wp hw_invs') + apply (simp add: guard_is_UNIV_def) + apply clarsimp + apply (drule active_from_running') + apply (frule active_ex_cap') + apply (clarsimp simp: invs'_def valid_state'_def) + apply (clarsimp simp: simple_sane_strg ct_in_state'_def st_tcb_at'_def obj_at'_def + isReply_def irqInvalid_def) + apply (auto simp: syscall_from_H_def Kernel_C.SysSend_def mask_def + split: option.split_asm) + done + +lemma ccorres_corres_u: + "\ ccorres dc xfdc P (Collect P') [] H C; no_fail P H \ \ + corres_underlying rf_sr nf nf' dc P P' H (exec_C \ C)" + apply (clarsimp simp: ccorres_underlying_def corres_underlying_def) + apply (drule (1) bspec) + apply (clarsimp simp: exec_C_def no_fail_def) + apply (rule conjI) + apply clarsimp + apply (erule_tac x=0 in allE) + apply (erule_tac x="Normal y" in allE) + apply simp + apply (erule impE) + apply (drule EHOther [where hs="[]"], simp) + apply simp + apply fastforce + apply clarsimp + apply (case_tac xs, simp_all) + apply (fastforce intro: EHAbrupt EHEmpty) + apply (fastforce intro: EHOther)+ + done + +lemma ccorres_corres_u_xf: + "\ ccorres rel xf P (Collect P') [] H C; no_fail P H \ \ + corres_underlying rf_sr nf nf' rel P P' H ((exec_C \ C) >>= (\_. gets xf))" + apply (clarsimp simp: ccorres_underlying_def corres_underlying_def) + apply (drule (1) bspec) + apply (clarsimp simp: exec_C_def no_fail_def) + apply (drule_tac x = a in spec) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) + apply (rule conjI) + apply clarsimp + apply (erule_tac x=0 in allE) + apply (erule_tac x="Normal y" in allE) + apply simp + apply (erule impE) + apply (drule EHOther [where hs="[]"], simp) + apply simp + apply (simp add: unif_rrel_def) + apply (clarsimp simp:image_def) + apply (case_tac xs, simp_all) + apply (fastforce intro: EHAbrupt EHEmpty) + apply (fastforce intro: EHOther)+ + done + +definition + "all_invs' e \ \s'. \s :: det_state. + (s,s') \ state_relation \ + (einvs s \ (e \ Interrupt \ ct_running s) \ (ct_running s \ ct_idle s) \ + scheduler_action s = resume_cur_thread \ domain_time s \ 0 \ valid_domain_list s) \ + (invs' s' \ + (e \ Interrupt \ ct_running' s') \ (ct_running' s' \ ct_idle' s') \ + ksSchedulerAction s' = ResumeCurrentThread \ ksDomainTime s' \ 0)" + +lemma no_fail_callKernel: + "no_fail (all_invs' e) (callKernel e)" + unfolding all_invs'_def + apply (rule corres_nofail) + apply (rule corres_guard_imp) + apply (rule kernel_corres) + apply (force simp: word_neq_0_conv schact_is_rct_def) + apply (simp add: sch_act_simple_def) + apply metis + done + +(* this variant only used for armv_handleVCPUFault_ccorres *) +lemma handleUserLevelFault_ccorres': + "ccorres dc xfdc + (invs' and sch_act_simple and ct_running' and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + ({s. w_a_' s = word1} \ {s. w_b_' s = word2 }) [] + (do thread <- getCurThread; + rv <- handleFault thread (Fault_H.fault.UserException (word1 AND mask 32) (word2 AND mask 28)); + y <- ThreadDecls_H.schedule; + y <- activateThread; + stateAssert kernelExitAssertions [] + od) + (Call handleUserLevelFault_'proc)" + apply (cinit' lift:w_a_' w_b_') + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_pre_getCurThread) + apply (ctac (no_vcg) add: handleFault_ccorres) + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (clarsimp, vcg) + apply (clarsimp, rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (intro impI conjI allI) + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply simp + apply (rule active_ex_cap') + apply (erule active_from_running') + apply (erule invs_iflive') + apply (clarsimp simp: ct_in_state'_def) + apply (frule st_tcb_idle'[rotated]) + apply (erule invs_valid_idle') + apply (clarsimp simp: cfault_rel_def seL4_Fault_UserException_lift) + apply (simp add: is_cap_fault_def) + done + +(* ignore FPU status, but deal with UNKNOWN_FAULT *) +lemma armv_handleVCPUFault_unknown_fault_ccorres: + "ccorres (\rv rv'. rv' = from_bool True) ret__unsigned_long_' + (invs' and sch_act_simple and ct_running' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and K (hsr = 0x2000000)) + (\ \hsr___unsigned_long = hsr \) hs + (do fpu_enabled <- doMachineOp isFpuEnable; + (do esr <- doMachineOp getESR; + curThread <- getCurThread; + handleFault curThread (Fault_H.fault.UserException (esr AND mask 32) 0); + ThreadDecls_H.schedule; + activateThread; + stateAssert kernelExitAssertions [] + od) + od) + (Call armv_handleVCPUFault_'proc)" + supply Collect_const[simp del] + apply (rule ccorres_grab_asm) + apply (cinit' lift: hsr___unsigned_long_') + apply simp + apply ccorres_rewrite + (* we are discarding the entire FPU-fault handling IF calculation because we know isFpuEnable + is abstracted to return True until the FPU model is updated, and so the Haskell side does not + even feature the HSR calculation for the FPU fault *) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' and r'="\rv rv'. rv = True \ rv' = from_bool (\rv)" + in ccorres_split_nothrow[where P=\ and P'=UNIV]) + (* now we have to go through all the IF branches on the C side, because some will result in + false without calling isFpuEnable *) + apply (rule ccorres_cond_seq2[THEN iffD1]) + apply ccorres_rewrite + apply (rule ccorres_guard_imp) + apply csymbr + apply simp + apply ccorres_rewrite + apply csymbr + apply simp + apply ccorres_rewrite + apply (simp add: isFpuEnable_def) + apply (rule ccorres_inst[where P=\ and P'="\\ret__int = false\"]) + apply (rule_tac ccorres_from_vcg) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply clarsimp + apply clarsimp + apply ceqv + apply simp + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc) + apply (ctac (no_vcg) add: getESR_ccorres) + apply clarsimp + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: handleUserLevelFault_ccorres'[where ?word2.0=0, simplified]) + apply simp + apply (rule ccorres_return_C) + apply clarsimp + apply clarsimp + apply clarsimp + apply wpsimp+ + apply (simp add: isFpuEnable_def) + apply wpsimp + apply clarsimp + apply (vcg exspec=isFpuEnable_modifies) + apply clarsimp + done + +(* ignore FPU status, no UNKNOWN_FAULT *) +lemma armv_handleVCPUFault_no_op_ccorres: + "ccorres (\rv rv'. rv' = from_bool False) ret__unsigned_long_' + (invs' and sch_act_simple and ct_running' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and K (hsr \ 0x2000000)) + (\ \hsr___unsigned_long = hsr \) hs + (do fpu_enabled <- doMachineOp isFpuEnable; + return () + od) + (Call armv_handleVCPUFault_'proc)" + supply Collect_const[simp del] + apply (rule ccorres_grab_asm) + apply (cinit' lift: hsr___unsigned_long_') + apply simp + apply ccorres_rewrite + (* we are discarding the entire FPU-fault handling IF calculation because we know isFpuEnable + is abstracted to return True until the FPU model is updated, and so the Haskell side does not + even feature the HSR calculation for the FPU fault *) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' and r'="\rv rv'. rv = True \ rv' = from_bool (\rv)" + in ccorres_split_nothrow[where P=\ and P'=UNIV]) + (* now we have to go through all the IF branches on the C side, because some will result in + false without calling isFpuEnable *) + apply (rule ccorres_cond_seq2[THEN iffD1]) + apply ccorres_rewrite + apply (rule ccorres_guard_imp) + apply csymbr + apply simp + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: isFpuEnable_ccorres) + apply (rule_tac P="rv" in ccorres_gen_asm) + apply (simp add: from_bool_0 true_def) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wpsimp simp: isFpuEnable_def) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (simp add: isFpuEnable_def) + apply csymbr + apply simp + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: isFpuEnable_ccorres) + apply (rule_tac P="rv" in ccorres_gen_asm) + apply (simp add: from_bool_0 true_def) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wpsimp simp: isFpuEnable_def) + apply (rule ccorres_inst[where P=\ and P'="\\ret__int = false\"]) + apply (simp add: isFpuEnable_def) + apply (rule ccorres_inst[where P=\ and P'="\\ret__int = false\"]) + apply (rule_tac ccorres_from_vcg) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply clarsimp + apply clarsimp + apply ceqv + apply simp + apply ccorres_rewrite + apply (rule ccorres_return_C; clarsimp) + apply wpsimp+ + apply (wpsimp simp: isFpuEnable_def) + apply clarsimp + apply (vcg exspec=isFpuEnable_modifies) + apply clarsimp + done + +(* getCurThread is shared between branches in handleVCPUFault, placing it in wrong order for + specific cases *) +lemma handleVCPUFault_ccorres_getCurThread_helper: + "monadic_rewrite F E \ + (do thread <- getCurThread; + esr <- doMachineOp getESR; + f esr thread + od) + (do esr <- doMachineOp getESR; + thread <- getCurThread; + f esr thread + od)" + apply (simp add: getCurThread_def) + apply monadic_rewrite_pre + apply monadic_rewrite_symb_exec_l + apply (rule monadic_rewrite_bind_tail) + apply monadic_rewrite_symb_exec_r + apply (rule_tac P="threada = thread" in monadic_rewrite_gen_asm, simp) + apply (rule monadic_rewrite_refl) + apply wpsimp+ + done + +lemma handleVCPUFault_ccorres: + "ccorres dc xfdc + (invs' and ct_running' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (\ ucast hsr = \hsr___unsigned_long \) hs + (callKernel (HypervisorEvent (ARMVCPUFault hsr))) (Call handleVCPUFault_'proc)" + apply (cinit' lift: hsr___unsigned_long_') + apply (simp add: callKernel_def handleEvent_def handleHypervisorFault_def) + apply (rule ccorres_stateAssert) + apply (simp add: liftE_def bind_assoc) + apply (rule ccorres_cases[where P="hsr = 0x2000000"]; simp cong: if_cong) + (* UNKNOWN_FAULT case, armv_handleVCPUFault handles fault and returns true, ending operations *) + apply (simp add: isFpuEnable_def bind_assoc) (* isFpuEnable always true *) + apply (rule monadic_rewrite_ccorres_assemble[OF _ handleVCPUFault_ccorres_getCurThread_helper]) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: armv_handleVCPUFault_unknown_fault_ccorres[simplified isFpuEnable_def, + simplified, + where hsr="ucast hsr"]) + apply simp + apply ccorres_rewrite + apply (rule ccorres_return_void_C) + apply wpsimp + (* not UNKNOWN_FAULT case, armv_handleVCPUFault won't do anything *) + apply (simp add: isFpuEnable_def bind_assoc) + apply (rule ccorres_add_return) + apply (ctac (no_vcg) add: armv_handleVCPUFault_no_op_ccorres[simplified isFpuEnable_def, + simplified, + where hsr="ucast hsr"]) + apply simp + apply (rule ccorres_pre_getCurThread, rename_tac curThread) + apply (rule ccorres_symb_exec_r) + apply (ctac (no_vcg) add: handleFault_ccorres) + apply (ctac (no_vcg) add: schedule_ccorres) + apply (rule ccorres_stateAssert_after) + apply (rule ccorres_guard_imp) + apply (ctac (no_vcg) add: activateThread_ccorres) + apply (clarsimp, assumption) + apply assumption + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct')+ + apply vcg + apply (clarsimp, rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: ct_running_imp_simple' ucast_and_mask_drop) + apply (clarsimp simp: cfault_rel_def seL4_Fault_VCPUFault_lift is_cap_fault_def) + apply (subst (asm) up_ucast_inj_eq[symmetric, where 'b=machine_word_len]; simp) + apply (rule active_ex_cap', erule active_from_running', fastforce) + done + +lemma handleHypervisorEvent_ccorres: + "ccorres dc xfdc + ((all_invs' (HypervisorEvent e)) and sch_act_simple) + UNIV [] + (callKernel (HypervisorEvent t)) (handleHypervisorEvent_C t)" + apply (simp add: handleHypervisorEvent_C_def) + apply (cases t, clarsimp) + apply (rule ccorres_guard_imp) + apply (rule ccorres_call) + apply (rule handleVCPUFault_ccorres, (simp+)) + apply (clarsimp simp: all_invs'_def) + apply fastforce + done + +lemma callKernel_corres_C: + "corres_underlying rf_sr False True dc + (all_invs' e) + \ + (callKernel e) (callKernel_C e)" + using no_fail_callKernel [of e] + apply (clarsimp simp: callKernel_C_def) + apply (cases e, simp_all) + prefer 4 + apply (rule ccorres_corres_u) + apply simp + apply (rule ccorres_guard_imp) + apply (rule handleInterruptEntry_ccorres) + apply (clarsimp simp: all_invs'_def sch_act_simple_def) + apply simp + apply assumption + prefer 2 + apply (rule ccorres_corres_u [rotated], assumption) + apply simp + apply (rule ccorres_guard_imp) + apply (rule ccorres_call) + apply (rule handleUnknownSyscall_ccorres) + apply (clarsimp simp: all_invs'_def sch_act_simple_def)+ + prefer 3 + apply (rule ccorres_corres_u [rotated], assumption) + apply (rule ccorres_guard_imp) + apply (rule ccorres_call) + apply (rule handleVMFaultEvent_ccorres) + apply (clarsimp simp: all_invs'_def sch_act_simple_def)+ + prefer 2 + apply (rule ccorres_corres_u [rotated], assumption) + apply (rule ccorres_guard_imp) + apply (rule ccorres_call) + apply (rule handleUserLevelFault_ccorres) + apply (clarsimp simp: all_invs'_def sch_act_simple_def)+ + apply (rule ccorres_corres_u [rotated], assumption) + apply (rule ccorres_guard_imp) + apply (rule ccorres_call) + apply (rule handleSyscall_ccorres) + apply (clarsimp simp: all_invs'_def sch_act_simple_def)+ + apply (rule ccorres_corres_u [rotated], assumption) + apply (rule ccorres_guard_imp) + apply (rule handleHypervisorEvent_ccorres) + apply (clarsimp simp: all_invs'_def sch_act_simple_def) + apply simp + done + +lemma ccorres_add_gets: + "ccorresG rf_sr \ rv xf P P' hs (do v \ gets f; m od) c + \ ccorresG rf_sr \ rv xf P P' hs m c" + by (simp add: gets_bind_ign) + +lemma ccorres_get_registers: + "\ \cptr msgInfo. ccorres dc xfdc + ((\s. P s \ Q s \ + obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb AARCH64_H.capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb AARCH64_H.msgInfoRegister = msgInfo) + (ksCurThread s) s) and R) + (\\cptr = cptr\ \ \\msgInfo = msgInfo\) [] m c \ + \ + ccorres dc xfdc + (P and Q and ct_in_state' \ and R) + {s. \v. cslift s (ksCurThread_' (globals s)) = Some v + \ cptr_' s = index (registers_C (tcbContext_C (tcbArch_C v))) (unat Kernel_C.capRegister) + \ msgInfo_' s = index (registers_C (tcbContext_C (tcbArch_C v))) (unat Kernel_C.msgInfoRegister)} [] + m c" + apply (rule ccorres_assume_pre) + apply (clarsimp simp: ct_in_state'_def st_tcb_at'_def) + apply (drule obj_at_ko_at', clarsimp) + apply (erule_tac x="(user_regs o atcbContextGet o tcbArch) ko AARCH64.capRegister" in meta_allE) + apply (erule_tac x="(user_regs o atcbContextGet o tcbArch) ko AARCH64.msgInfoRegister" in meta_allE) + apply (erule ccorres_guard_imp2) + apply (clarsimp simp: rf_sr_ksCurThread) + apply (drule(1) obj_at_cslift_tcb, clarsimp simp: obj_at'_def) + apply (clarsimp simp: ctcb_relation_def ccontext_relation_def cregs_relation_def + C_register_defs AARCH64.capRegister_def AARCH64_H.capRegister_def + AARCH64.msgInfoRegister_def AARCH64_H.msgInfoRegister_def + carch_tcb_relation_def) + done + +lemma callKernel_withFastpath_corres_C: + "corres_underlying rf_sr False True dc + (all_invs' e) + \ + (callKernel e) (callKernel_withFastpath_C e)" + using no_fail_callKernel [of e] callKernel_corres_C [of e] + apply (cases "e = SyscallEvent syscall.SysCall \ + e = SyscallEvent syscall.SysReplyRecv") + apply (simp_all add: callKernel_withFastpath_C_def + del: Collect_const cong: call_ignore_cong) + apply (erule ccorres_corres_u[rotated]) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r)+ + apply (rule ccorres_Cond_rhs) + apply simp + apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) + apply simp + apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: all_invs'_def rf_sr_ksCurThread) + apply (frule(1) obj_at_cslift_tcb[OF tcb_at_invs']) + apply (clarsimp simp: typ_heap_simps' ct_in_state'_def + "StrictC'_register_defs" word_sle_def word_sless_def + st_tcb_at'_opeq_simp) + apply (frule ready_qs_runnable_cross, (fastforce simp: valid_sched_def)+) + apply (rule conjI, fastforce simp: st_tcb_at'_def) + apply (auto elim!: pred_tcb'_weakenE cnode_caps_gsCNodes_from_sr[rotated]) + done + +lemma tcb_vcpu_context_set_id[simp]: + "tcb_vcpu (arch_tcb_context_set f (tcb_arch tcb)) = tcb_vcpu (tcb_arch tcb)" + unfolding arch_tcb_context_set_def + by simp + +lemma threadSet_all_invs_triv': + "\all_invs' e and (\s. t = ksCurThread s)\ + threadSet (\tcb. tcbArch_update (\_. atcbContextSet f (tcbArch tcb)) tcb) t \\_. all_invs' e\" + unfolding all_invs'_def + apply (rule hoare_pre) + apply (rule wp_from_corres_unit) + apply (rule threadset_corresT [where f="tcb_arch_update (arch_tcb_context_set f)"]) + apply (simp add: tcb_relation_def arch_tcb_context_set_def + atcbContextSet_def arch_tcb_relation_def) + apply (simp add: tcb_cap_cases_def) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: exst_same_def) + apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp + thread_set_ct_in_state + | simp add: tcb_cap_cases_def tcb_arch_ref_def exst_same_def + | rule threadSet_ct_in_state' + | wp (once) hoare_vcg_disj_lift)+ + apply clarsimp + apply (rename_tac s s') + apply (rule exI, rule conjI, assumption) + apply (prop_tac "invs s'") + apply (clarsimp simp: invs_def) + apply (clarsimp simp: invs_psp_aligned invs_distinct) + apply (clarsimp simp: invs_def cur_tcb_def cur_tcb'_def state_relation_def) + done + +lemma getContext_corres: + "t' = tcb_ptr_to_ctcb_ptr t \ + corres_underlying rf_sr False True (=) (tcb_at' t) \ + (threadGet (atcbContextGet o tcbArch) t) (gets (getContext_C t'))" + apply (clarsimp simp: corres_underlying_def simpler_gets_def) + apply (drule obj_at_ko_at') + apply clarsimp + apply (frule threadGet_eq) + apply (rule bexI) + prefer 2 + apply assumption + apply clarsimp + apply (clarsimp simp: getContext_C_def) + apply (drule cmap_relation_ko_atD [rotated]) + apply fastforce + apply (clarsimp simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def from_user_context_C) + done + +lemma callKernel_cur: + "\all_invs' e\ callKernel e \\rv s. tcb_at' (ksCurThread s) s\" + apply (rule hoare_chain) + apply (rule ckernel_invs) + apply (clarsimp simp: all_invs'_def sch_act_simple_def) + apply clarsimp + done + +lemma entry_corres_C: + "corres_underlying rf_sr False True (=) + (all_invs' e) + \ + (kernelEntry e uc) (kernelEntry_C fp e uc)" + apply (simp add: kernelEntry_C_def kernelEntry_def getCurThread_def) + apply (rule corres_guard_imp) + apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule corres_split) + apply (rule setTCBContext_C_corres, rule ccontext_rel_to_C, simp) + apply simp + apply (rule corres_split) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) + apply (rule callKernel_corres_C) + apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule getContext_corres, simp) + apply (wp threadSet_all_invs_triv' callKernel_cur)+ + apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) + apply simp + done + +lemma entry_refinement_C: + "\all_invs' e s; (s, t) \ rf_sr \ + \ \ snd (kernelEntry_C fp e tc t) + \ (\tc' t'. (tc',t') \ fst (kernelEntry_C fp e tc t) + \ (\s'. (tc', s') \ fst (kernelEntry e tc s) \ (s',t') \ rf_sr))" + using entry_corres_C [of e] + by (fastforce simp add: corres_underlying_def) + +lemma ct_running'_C: + "\ (s, t) \ rf_sr; invs' s \ \ ct_running' s = ct_running_C t" + apply (simp add: ct_running_C_def Let_def ct_in_state'_def st_tcb_at'_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def cpspace_relation_def Let_def) + apply (rule iffI) + apply (drule obj_at_ko_at') + apply clarsimp + apply (erule (1) cmap_relation_ko_atE) + apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) + apply clarsimp + apply (drule (1) cmap_relation_cs_atD [where addr_fun=tcb_ptr_to_ctcb_ptr]) + apply simp + apply clarsimp + apply (frule (1) map_to_ko_atI') + apply (erule obj_at'_weakenE) + apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) + apply (case_tac "tcbState ko"; simp add: ThreadState_defs) + done + +lemma full_invs_both: + "ADT_H uop \ + {s'. \s. (s,s') \ lift_state_relation state_relation \ + s \ full_invs \ s' \ full_invs'}" + apply (rule fw_inv_transport) + apply (rule akernel_invariant) + apply (rule ckernel_invariant) + apply (rule fw_sim_A_H) + done +end + +context kernel_m +begin + +lemma user_memory_update_corres_C_helper: + "\(a, b) \ rf_sr; pspace_aligned' a; pspace_distinct' a; + dom um \ dom (user_mem' a)\ + \ (ksMachineState_update + (underlying_memory_update + (\m. foldl (\f p. f(p := the (um p))) m [p\e. p \ dom um])) a, + b\globals := globals b + \t_hrs_' := + (foldl (\f p. f(p := the (um p))) (fst (t_hrs_' (globals b))) + [p\e. p \ dom um], + snd (t_hrs_' (globals b)))\\) + \ rf_sr" +apply (induct e) + apply simp + apply (subgoal_tac + "ksMachineState_update (underlying_memory_update (\m. m)) a = a") + apply (simp (no_asm_simp)) + apply simp +apply (rename_tac x xs) +apply (simp add: foldl_fun_upd_eq_foldr) +apply (case_tac "x \ dom um", simp_all) +apply (frule_tac ptr=x and b="the (um x)" in storeByteUser_rf_sr_upd) + apply simp + apply simp + apply (thin_tac "(x,y) : rf_sr" for x y)+ + apply (fastforce simp add: pointerInUserData_def dom_user_mem') +apply (simp add: o_def hrs_mem_update_def) +done + +lemma user_memory_update_corres_C: + "corres_underlying rf_sr False nf (%_ _. True) + (\s. pspace_aligned' s \ pspace_distinct' s \ dom um \ dom (user_mem' s)) + \ + (doMachineOp (user_memory_update um)) (setUserMem_C um)" + supply if_cong[cong] option.case_cong[cong] + apply (clarsimp simp: corres_underlying_def) + apply (rule conjI) + prefer 2 + apply (clarsimp simp add: setUserMem_C_def simpler_modify_def) + apply (subgoal_tac + "doMachineOp (user_memory_update um) a = + modify (ksMachineState_update (underlying_memory_update + (\m. foldl (\f p. f(p := the (um p))) m [p\enum. p \ dom um]))) + a") + prefer 2 + apply (clarsimp simp add: doMachineOp_def user_memory_update_def + simpler_modify_def simpler_gets_def select_f_def + Nondet_Monad.bind_def return_def) + apply (thin_tac P for P)+ + apply (case_tac a, clarsimp) + apply (case_tac ksMachineState, clarsimp) + apply (rule ext) + apply (simp add: foldl_fun_upd_value dom_def split: option.splits) + apply clarsimp + apply (cut_tac s'=a and s="globals b" in user_mem_C_relation[symmetric]) + apply (simp add: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply simp+ + apply (simp add: setUserMem_C_def_foldl) + apply (clarsimp simp add: simpler_modify_def) + apply (thin_tac "doMachineOp p s = x" for p s x) + apply (drule sym, simp) + apply (rule user_memory_update_corres_C_helper,auto)[1] + done + +lemma device_update_corres_C: + "corres_underlying rf_sr False nf (=) (\_. True) (\_. True) + (doMachineOp (device_memory_update ms)) + (setDeviceState_C ms)" + apply (clarsimp simp: corres_underlying_def) + apply (rule conjI) + prefer 2 + apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) + apply (rule ballI) + apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad + gets_def get_def return_def simpler_modify_def select_f_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def) + done + +lemma mem_dom_split: + "(dom um \ dom (user_mem' s) \ dom (device_mem' s)) + \ um = restrict_map um (dom (user_mem' s)) ++ restrict_map um (dom (device_mem' s))" + apply (rule ext) + apply (auto simp: map_add_def restrict_map_def split:if_splits option.splits) + done + +lemma dom_if_rewrite: + "dom (\x. if P x then Some (f x) else None) = dom (\x. if P x then Some () else None)" + by (auto split:if_splits) + +crunch dmo_typ_at_pre_dom[wp]: doMachineOp "\s. P (dom (\x. if typ_at' T (x && ~~ mask pageBits) s then Some () else None))" + (wp: crunch_wps simp: crunch_simps device_mem'_def) + +lemma dmo_domain_device_mem'[wp]: + "\\s. P (dom (device_mem' s))\ doMachineOp opfun \\rv sa. P (dom (device_mem' sa))\" + apply (simp add:device_mem'_def pointerInDeviceData_def) + apply (rule hoare_pre) + apply (subst dom_if_rewrite) + apply (wp doMachineOp_typ_at') + apply (erule arg_cong[where f = P,THEN iffD1,rotated]) + apply (auto split:if_splits) + done + +lemma dmo_domain_user_mem'[wp]: + "\\s. P (dom (user_mem' s))\ doMachineOp opfun \\rv sa. P (dom (user_mem' sa))\" + apply (simp add:user_mem'_def pointerInUserData_def) + apply (rule hoare_pre) + apply (subst dom_if_rewrite) + apply (wp doMachineOp_typ_at') + apply (erule arg_cong[where f = P,THEN iffD1,rotated]) + apply (auto split:if_splits) + done + +lemma do_user_op_corres_C: + "corres_underlying rf_sr False False (=) + (invs' and ksReadyQueues_asrt and ex_abs einvs) \ + (doUserOp f tc) (doUserOp_C f tc)" + apply (simp only: doUserOp_C_def doUserOp_def split_def) + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) + apply (clarsimp simp: simpler_gets_def getCurThread_def + corres_underlying_def rf_sr_def cstate_relation_def Let_def) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) + apply (clarsimp simp: cstate_to_A_def absKState_def + rf_sr_def cstate_to_H_correct ptable_lift_def) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) + apply (clarsimp simp: cstate_to_A_def absKState_def + rf_sr_def cstate_to_H_correct ptable_rights_def) + apply (rule_tac P=pspace_distinct' and P'=\ and r'="(=)" + in corres_split) + apply clarsimp + apply (rule fun_cong[where x=ptrFromPAddr]) + apply (rule_tac f=comp in arg_cong) + apply (rule user_mem_C_relation[symmetric]) + apply (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def) + apply assumption + apply (rule_tac P=pspace_distinct' and P'=\ and r'="(=)" + in corres_split) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def) + apply (drule(1) device_mem_C_relation[symmetric]) + apply simp + apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (clarsimp simp: cstate_relation_def rf_sr_def + Let_def cmachine_state_relation_def) + apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) + apply (clarsimp simp add: corres_underlying_def fail_def + assert_def return_def + split:if_splits) + apply simp + apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) + apply (clarsimp simp add: corres_underlying_def fail_def + assert_def return_def + split:if_splits) + apply simp + apply (rule_tac r'="(=)" in corres_split[OF corres_select]) + apply clarsimp + apply simp + apply (rule corres_split[OF user_memory_update_corres_C]) + apply (rule corres_split[OF device_update_corres_C, + where R="\\" and R'="\\"]) + apply (wp | simp)+ + apply (intro conjI allI ballI impI) + apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] + apply (clarsimp simp: ex_abs_def restrict_map_def + split: if_splits) + apply (drule ptable_rights_imp_UserData[rotated -1]) + apply fastforce+ + apply (clarsimp simp: invs'_def valid_state'_def user_mem'_def device_mem'_def + split: if_splits) + apply (drule_tac c = x in subsetD[where B = "dom S" for S]) + apply (simp add:dom_def) + apply fastforce + apply clarsimp + done + +lemma check_active_irq_corres_C: + "corres_underlying rf_sr False True (=) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ex_abs valid_state) \ + (checkActiveIRQ) (checkActiveIRQ_C)" + apply (simp add: checkActiveIRQ_C_def checkActiveIRQ_def getActiveIRQ_C_def) + apply (rule corres_guard_imp) + apply (subst bind_assoc[symmetric]) + apply (rule corres_split) + apply (rule ccorres_corres_u_xf) + apply (rule ccorres_rel_imp, rule ccorres_guard_imp) + apply (ctac add:getActiveIRQ_ccorres) + apply (rule TrueI) + apply (simp (no_asm)) + apply assumption + apply (rule no_fail_dmo') + apply (rule no_fail_getActiveIRQ) + apply (clarsimp simp: irqInvalid_def ucast_up_ucast_id + is_up_def source_size_def target_size_def word_size mask_def + split: option.splits) + apply (rule hoare_TrueI)+ + apply (wp|simp)+ + done + +lemma refinement2_both: + " + \ Init = Init_C, Fin = Fin_C, + Step = (\u. global_automaton check_active_irq_C (do_user_op_C uop) (kernel_call_C fp)) \ + \ ADT_H uop" + supply word_neq_0_conv[simp] + apply (rule sim_imp_refines) + apply (rule L_invariantI [where I\<^sub>c=UNIV and r="lift_state_relation rf_sr"]) + apply (rule full_invs_both) + apply simp + apply (unfold LI_def) + apply (rule conjI) + apply (simp add: ADT_H_def) + apply (blast intro!: init_refinement_C) + apply (rule conjI) + prefer 2 + apply (simp add: ADT_H_def) + apply (clarsimp simp: Fin_C_def) + apply (drule lift_state_relationD) + apply (clarsimp simp: cstate_to_A_def) + apply (subst cstate_to_H_correct) + apply (fastforce simp: full_invs'_def invs'_def) + apply (clarsimp simp: rf_sr_def) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (simp add:absKState_def observable_memory_def absExst_def) + apply (rule MachineTypes.machine_state.equality,simp_all)[1] + apply (rule ext) + apply (clarsimp simp: user_mem'_def option_to_0_def split:if_splits) + apply (simp add: ADT_H_def) + supply subst_all [simp del] + apply (clarsimp simp: rel_semi_def global_automaton_def relcomp_unfold + in_lift_state_relation_eq) + + apply (erule_tac P="a \ (\x. b x)" for a b in disjE) + apply (clarsimp simp add: kernel_call_C_def kernel_call_H_def) + apply (subgoal_tac "all_invs' x b") + apply (drule_tac fp=fp and tc=af in entry_refinement_C, simp+) + apply clarsimp + apply (drule spec, drule spec, drule(1) mp) + apply (clarsimp simp: full_invs'_def) + apply (frule use_valid, rule kernelEntry_invs', + simp add: sch_act_simple_def) + apply (fastforce simp: ct_running'_C) + apply (clarsimp simp: full_invs_def full_invs'_def all_invs'_def) + apply fastforce + + apply (erule_tac P="a \ b \ c \ d \ e" for a b c d e in disjE) + apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) + apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) + apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce + + apply (erule_tac P="a \ b \ c \ (\x. e x)" for a b c d e in disjE) + apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) + apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) + apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce + + apply (clarsimp simp: check_active_irq_C_def check_active_irq_H_def) + apply (rule rev_mp, rule check_active_irq_corres_C) + apply (fastforce simp: corres_underlying_def full_invs'_def ex_abs_def) + done + +theorem refinement2: + "ADT_C uop \ ADT_H uop" + unfolding ADT_C_def + by (rule refinement2_both) + +theorem fp_refinement: + "ADT_FP_C uop \ ADT_H uop" + unfolding ADT_FP_C_def + by (rule refinement2_both) + +theorem seL4_refinement: + "ADT_C uop \ ADT_A uop" + by (blast intro: refinement refinement2 refinement_trans) + +theorem seL4_fastpath_refinement: + "ADT_FP_C uop \ ADT_A uop" + by (blast intro: refinement fp_refinement refinement_trans) + +lemma exec_C_Basic: + "exec_C Gamma (Basic f) = (modify f)" + apply (rule ext) + apply (simp add: exec_C_def simpler_modify_def) + apply (auto elim: exec.cases intro: exec.intros) + done + +lemma in_monad_imp_rewriteE: + "\ (a, b) \ fst (f' s); monadic_rewrite F False \ f f'; F \ \ snd (f s) \ + \ (a, b) \ fst (f s)" + by (auto simp add: monadic_rewrite_def) + +lemma ccorres_underlying_Fault: + "\ ccorres_underlying srel Gamma rrefl xf arrel axf G G' hs m c; + \s. (s, s') \ srel \ G s \ s' \ G' \ \ snd (m s) \ + \ \ Gamma \ \c, Normal s'\ \ Fault ft" + apply clarsimp + apply (erule(4) ccorresE) + apply (erule exec_handlers.EHOther) + apply simp + apply simp + done + +lemma monadic_rewrite_\: + "monadic_rewrite True False \ + (exec_C \ c) + (exec_C (kernel_all_global_addresses.\ symbol_table) c)" + using spec_refine [of symbol_table domain] + using spec_simulates_to_exec_simulates + apply (clarsimp simp: spec_statefn_simulates_via_statefn + o_def map_option_case monadic_rewrite_def exec_C_def + split: option.splits + cong: option.case_cong) + apply blast + done + +lemma no_fail_getActiveIRQ_C: + "\snd (getActiveIRQ_C s)" + apply (clarsimp simp: getActiveIRQ_C_def exec_C_def) + apply (drule getActiveIRQ_Normal) + apply (clarsimp simp: isNormal_def) + done + +lemma kernel_all_subset_kernel: + "global_automaton (kernel_global.check_active_irq_C symbol_table) (do_user_op_C uop) + (kernel_global.kernel_call_C symbol_table fp) + \ global_automaton check_active_irq_C (do_user_op_C uop) (kernel_call_C fp)" + apply (clarsimp simp: fw_sim_def rel_semi_def global_automaton_def + relcomp_unfold in_lift_state_relation_eq) + apply (intro conjI) + apply (simp_all add: kernel_global.kernel_call_C_def + kernel_call_C_def kernelEntry_C_def + setTCBContext_C_def + kernel_global.kernelEntry_C_def + exec_C_Basic + kernel_global.setTCBContext_C_def + kernel_call_H_def kernelEntry_def + getContext_C_def + check_active_irq_C_def checkActiveIRQ_C_def + kernel_global.check_active_irq_C_def kernel_global.checkActiveIRQ_C_def + check_active_irq_H_def checkActiveIRQ_def) + apply clarsimp + apply (erule in_monad_imp_rewriteE[where F=True]) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_bind_tail)+ + apply (rule monadic_rewrite_bind_head[where P=\]) + apply (simp add: callKernel_C_def callKernel_withFastpath_C_def + kernel_global.callKernel_C_def + kernel_global.callKernel_withFastpath_C_def + handleHypervisorEvent_C_def kernel_global.handleHypervisorEvent_C_def + split: event.split if_split) + apply (intro allI impI conjI monadic_rewrite_\)[1] + apply ((wp | simp)+)[3] + apply (clarsimp simp: snd_bind snd_modify in_monad gets_def) + apply clarsimp + apply clarsimp + apply clarsimp + apply (clarsimp simp: in_monad) + apply (erule (1) notE[OF _ in_monad_imp_rewriteE[where F=True]]) + apply (simp add: kernel_global.getActiveIRQ_C_def getActiveIRQ_C_def) + apply (rule monadic_rewrite_\) + apply (simp add: no_fail_getActiveIRQ_C) + apply (clarsimp simp: in_monad) + apply (erule (1) notE[OF _ in_monad_imp_rewriteE[where F=True]]) + apply (simp add: kernel_global.getActiveIRQ_C_def getActiveIRQ_C_def) + apply (rule monadic_rewrite_\) + apply (simp add: no_fail_getActiveIRQ_C) + apply (clarsimp simp: in_monad) + apply (erule (1) notE[OF _ in_monad_imp_rewriteE[where F=True]]) + apply (simp add: kernel_global.getActiveIRQ_C_def getActiveIRQ_C_def) + apply (rule monadic_rewrite_\) + apply (simp add: no_fail_getActiveIRQ_C) + done + +theorem true_refinement: + "kernel_global.ADT_C symbol_table armKSKernelVSpace_C uop + \ ADT_H uop" + apply (rule refinement_trans[OF _ refinement2]) + apply (simp add: kernel_global.ADT_C_def ADT_C_def) + apply (rule sim_imp_refines) + apply (clarsimp simp: fw_simulates_def) + apply (rule_tac x=Id in exI) + using kernel_all_subset_kernel + apply (simp add: fw_sim_def rel_semi_def) + done + +theorem true_fp_refinement: + "kernel_global.ADT_FP_C symbol_table armKSKernelVSpace_C uop + \ ADT_H uop" + apply (rule refinement_trans[OF _ fp_refinement]) + apply (simp add: kernel_global.ADT_FP_C_def ADT_FP_C_def) + apply (rule sim_imp_refines) + apply (clarsimp simp: fw_simulates_def) + apply (rule_tac x=Id in exI) + using kernel_all_subset_kernel + apply (simp add: fw_sim_def rel_semi_def) + done + +end + +end diff --git a/proof/crefine/AARCH64/Retype_C.thy b/proof/crefine/AARCH64/Retype_C.thy new file mode 100644 index 0000000000..c87b58ccef --- /dev/null +++ b/proof/crefine/AARCH64/Retype_C.thy @@ -0,0 +1,8669 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Retype_C +imports + Detype_C + CSpace_All + StoreWord_C +begin + +declare word_neq_0_conv [simp del] + +instance cte_C :: array_outer_max_size + by intro_classes simp + +(* FIXME AARCH64 these signed 64-bit word lemmas are probably not needed without sign-extended + canonical addresses *) + +lemma sint_eq_uint: + "unat (a::machine_word) < 2 ^ 63 \ sint a = uint a" + by (fastforce simp: unat_ucast_less_no_overflow_simp sint_eq_uint not_msb_from_less) + +lemma sle_positive: "\ b < 0x8000000000000000; (a :: machine_word) \ b \ \ a <=s b" + apply (simp add:word_sle_def) + apply (subst sint_eq_uint) + apply (rule unat_less_helper) + apply simp + apply (subst sint_eq_uint) + apply (rule unat_less_helper) + apply simp + apply (clarsimp simp:word_le_def) + done + +lemma zero_le_sint: "\ 0 \ (a :: machine_word); a < 0x8000000000000000 \ \ 0 \ sint a" + apply (subst sint_eq_uint) + apply (simp add:unat_less_helper) + apply simp + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma map_option_byte_to_word_heap: + assumes disj: "\(off :: 9 word) x. x<8 \ p + ucast off * 8 + x \ S " (*9=page table index*) + shows "byte_to_word_heap (\x. if x \ S then 0 else mem x) p + = byte_to_word_heap mem p" + by (clarsimp simp: option_map_def byte_to_word_heap_def[abs_def] + Let_def disj disj[where x = 0,simplified] + split: option.splits) + +text \Generalise the different kinds of retypes to allow more general proofs +about what they might change.\ +definition + ptr_retyps_gen :: "nat \ ('a :: c_type) ptr \ bool \ heap_typ_desc \ heap_typ_desc" +where + "ptr_retyps_gen n p mk_array + = (if mk_array then ptr_arr_retyps n p else ptr_retyps n p)" + +lemma ptr_retyp_gen_one: + "ptr_retyps_gen 1 p False = ptr_retyp p" + unfolding ptr_retyps_gen_def + by (rule ext, clarsimp) + +end + +context kernel_m +begin + +(* Ensure that the given region of memory does not contain any typed memory. *) +definition + region_is_typeless :: "machine_word \ nat \ ('a globals_scheme, 'b) StateSpace.state_scheme \ bool" +where + "region_is_typeless ptr sz s \ + \z\{ptr ..+ sz}. snd (snd (t_hrs_' (globals s)) z) = Map.empty" + +lemma c_guard_word8: + "c_guard (p :: word8 ptr) = (ptr_val p \ 0)" + unfolding c_guard_def ptr_aligned_def c_null_guard_def + apply simp + apply (rule iffI) + apply (drule intvlD) + apply clarsimp + apply simp + apply (rule intvl_self) + apply simp + done + +lemma + "(x \ {x ..+ n}) = (n \ 0)" + apply (rule iffI) + apply (drule intvlD) + apply clarsimp + apply (rule intvl_self) + apply simp + done + +lemma heap_update_list_append3: + "\ s' = s + of_nat (length xs) \ \ heap_update_list s (xs @ ys) H = heap_update_list s' ys (heap_update_list s xs H)" + apply simp + apply (subst heap_update_list_append [symmetric]) + apply clarsimp + done + +lemma ptr_aligned_machine_word: + "\ is_aligned p 3 \ \ ptr_aligned ((Ptr p) :: machine_word ptr)" + apply (clarsimp simp: is_aligned_def ptr_aligned_def) + done + +lemma c_guard_machine_word: + "\ is_aligned (ptr_val p) 3; p \ NULL \ \ c_guard (p :: (machine_word ptr))" + apply (clarsimp simp: c_guard_def) + apply (rule conjI) + apply (case_tac p, clarsimp simp: ptr_aligned_machine_word) + apply (case_tac p, simp add: c_null_guard_def) + apply (subst intvl_aligned_bottom_eq [where n=3 and bits=3], auto simp: word_bits_def) + done + +lemma is_aligned_and_not_zero: "\ is_aligned n k; n \ 0 \ \ 2^k \ n" + apply (metis aligned_small_is_0 word_not_le) + done + +lemma replicate_append [rule_format]: "\xs. replicate n x @ (x # xs) = replicate (n + 1) x @ xs" + apply (induct n) + apply clarsimp + apply clarsimp + done + +lemmas unat_add_simple = + iffD1 [OF unat_add_lem [where 'a = 32, folded word_bits_def]] + +lemma replicate_append_list [rule_format]: + "\n. set L \ {0::word8} \ (replicate n 0 @ L = replicate (n + length L) 0)" + apply (rule rev_induct) + apply clarsimp + apply (rule allI) + apply (erule_tac x="n+1" in allE) + apply clarsimp + apply (subst append_assoc[symmetric]) + apply clarsimp + apply (subgoal_tac "\n. (replicate n 0 @ [0]) = (0 # replicate n (0 :: word8))") + apply clarsimp + apply (induct_tac na) + apply clarsimp + apply clarsimp + done + +lemma heap_update_list_replicate: + "\ set L = {0}; n' = n + length L \ \ heap_update_list s ((replicate n 0) @ L) H = heap_update_list s (replicate n' 0) H" + apply (subst replicate_append_list) + apply clarsimp + apply clarsimp + done + +lemma heap_update_machine_word_is_heap_update_list: + "heap_update p (x :: machine_word) = heap_update_list (ptr_val p) (to_bytes x a)" + apply (rule ext)+ + apply (clarsimp simp: heap_update_def) + apply (clarsimp simp: to_bytes_def typ_info_word) + done + +lemma to_bytes_machine_word_0: + "to_bytes (0 :: machine_word) xs = [0, 0, 0, 0,0,0,0,0 :: word8]" + apply (simp add: to_bytes_def typ_info_word word_rsplit_same word_rsplit_0 word_bits_def) + done + +lemma globals_list_distinct_subset: + "\ globals_list_distinct D symtab xs; D' \ D \ + \ globals_list_distinct D' symtab xs" + by (simp add: globals_list_distinct_def disjoint_subset) + +lemma fst_s_footprint: + "(fst ` s_footprint p) = {ptr_val (p :: 'a ptr) + ..+ size_of TYPE('a :: c_type)}" + apply (simp add: s_footprint_def s_footprint_untyped_def) + apply (auto simp: intvl_def size_of_def image_def) + done + +lemma memzero_spec: + "\s. \ \ \s. ptr_val \s \ 0 \ ptr_val \s \ ptr_val \s + (\n - 1) + \ (is_aligned (ptr_val \s) 3) \ (is_aligned (\n) 3) + \ {ptr_val \s ..+ unat \n} \ {SIndexVal, SIndexTyp 0} \ dom_s (hrs_htd \t_hrs) + \ gs_get_assn cap_get_capSizeBits_'proc \ghost'state \ insert 0 {\n ..}\ + Call memzero_'proc {t. + t_hrs_' (globals t) = hrs_mem_update (heap_update_list (ptr_val (s_' s)) + (replicate (unat (n_' s)) (ucast (0)))) (t_hrs_' (globals s))}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (clarsimp simp: whileAnno_def) + apply (rule_tac I1="{t. (ptr_val (s_' s) \ ptr_val (s_' s) + ((n_' s) - 1) \ ptr_val (s_' s) \ 0) \ + ptr_val (s_' s) + (n_' s - n_' t) = ptr_val (p___ptr_to_unsigned_char_' t) \ + n_' t \ n_' s \ + (is_aligned (n_' t) 3) \ + (is_aligned (n_' s) 3) \ + (is_aligned (ptr_val (s_' t)) 3) \ + (is_aligned (ptr_val (s_' s)) 3) \ + (is_aligned (ptr_val (p___ptr_to_unsigned_char_' t)) 3) \ + {ptr_val (p___ptr_to_unsigned_char_' t) ..+ unat (n_' t)} \ {SIndexVal, SIndexTyp 0} + \ dom_s (hrs_htd (t_hrs_' (globals t))) \ + globals t = (globals s)\ t_hrs_' := + hrs_mem_update (heap_update_list (ptr_val (s_' s)) + (replicate (unat (n_' s - n_' t)) 0)) + (t_hrs_' (globals s))\ }" + and V1=undefined in subst [OF whileAnno_def]) + apply vcg + apply (clarsimp simp add: hrs_mem_update_def) + + apply clarsimp + apply (case_tac s, case_tac p___ptr_to_unsigned_char) + + apply (subgoal_tac "8 \ unat na") + apply (intro conjI) + apply (simp add: ptr_safe_def s_footprint_def s_footprint_untyped_def + typ_uinfo_t_def typ_info_word) + apply (erule order_trans[rotated]) + apply (auto intro!: intvlI)[1] + apply (subst c_guard_machine_word, simp_all)[1] + apply (clarsimp simp: field_simps) + apply (metis le_minus' word_leq_minus_one_le olen_add_eqv diff_self word_le_0_iff word_le_less_eq) + apply (clarsimp simp: field_simps) + apply (frule is_aligned_and_not_zero) + apply clarsimp + apply (rule word_le_imp_diff_le, auto)[1] + apply clarsimp + apply (rule aligned_sub_aligned [where n=3], simp_all add: is_aligned_def word_bits_def)[1] + apply clarsimp + apply (rule is_aligned_add, simp_all add: is_aligned_def word_bits_def)[1] + apply (erule order_trans[rotated]) + apply (clarsimp simp: subset_iff) + apply (erule subsetD[OF intvl_sub_offset, rotated]) + apply (simp add: unat_sub word_le_nat_alt) + apply (clarsimp simp: word_bits_def hrs_mem_update_def) + apply (subst heap_update_machine_word_is_heap_update_list [where a="[]"]) + apply (subst heap_update_list_append3[symmetric]) + apply clarsimp + apply (subst to_bytes_machine_word_0) + apply (rule heap_update_list_replicate) + apply clarsimp + apply (rule_tac s="unat ((n - na) + 8)" in trans) + apply (simp add: field_simps) + apply (subst Word.unat_plus_simple[THEN iffD1]) + apply (rule is_aligned_no_overflow''[where n=3, simplified]) + apply (erule(1) aligned_sub_aligned, simp) + apply (clarsimp simp: field_simps) + apply (frule_tac x=n in is_aligned_no_overflow'', simp) + apply simp + apply simp + apply (rule dvd_imp_le) + apply (simp add: is_aligned_def) + apply (simp add: unat_eq_0[symmetric]) + apply clarsimp + done + +lemma is_aligned_and_2_to_k: + assumes mask_2_k: "(n && 2 ^ k - 1) = 0" + shows "is_aligned (n :: machine_word) k" +proof (subst is_aligned_mask) + have "mask k = (2 :: machine_word) ^ k - 1" + by (clarsimp simp: mask_def) + thus "n && mask k = 0" using mask_2_k + by simp +qed + +lemma memset_spec: + "\s. \ \ \s. ptr_val \s \ 0 \ ptr_val \s \ ptr_val \s + (\n - 1) + \ {ptr_val \s ..+ unat \n} \ {SIndexVal, SIndexTyp 0} \ dom_s (hrs_htd \t_hrs) + \ gs_get_assn cap_get_capSizeBits_'proc \ghost'state \ insert 0 {\n ..}\ + Call memset_'proc + {t. t_hrs_' (globals t) = hrs_mem_update (heap_update_list (ptr_val (s_' s)) + (replicate (unat (n_' s)) (ucast (c_' s)))) (t_hrs_' (globals s))}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (clarsimp simp: whileAnno_def) + apply (rule_tac I1="{t. (ptr_val (s_' s) \ ptr_val (s_' s) + ((n_' s) - 1) \ ptr_val (s_' s) \ 0) \ + c_' t = c_' s \ + ptr_val (s_' s) + (n_' s - n_' t) = ptr_val (p___ptr_to_unsigned_char_' t) \ + n_' t \ n_' s \ + {ptr_val (p___ptr_to_unsigned_char_' t) ..+ unat (n_' t)} \ {SIndexVal, SIndexTyp 0} + \ dom_s (hrs_htd (t_hrs_' (globals t))) \ + globals t = (globals s)\ t_hrs_' := + hrs_mem_update (heap_update_list (ptr_val (s_' s)) + (replicate (unat (n_' s - n_' t)) (ucast (c_' t)))) + (t_hrs_' (globals s))\}" + and V1=undefined in subst [OF whileAnno_def]) + apply vcg + apply (clarsimp simp add: hrs_mem_update_def del: mod_0_imp_dvd split: if_split_asm) + apply (subst (asm) word_mod_2p_is_mask [where n=3, simplified], simp) + apply (subst (asm) word_mod_2p_is_mask [where n=3, simplified], simp) + apply (rule conjI) + apply (rule is_aligned_and_2_to_k, clarsimp simp: mask_def) + apply (rule is_aligned_and_2_to_k, clarsimp simp: mask_def) + apply clarsimp + apply (intro conjI) + apply (simp add: ptr_safe_def s_footprint_def s_footprint_untyped_def + typ_uinfo_t_def typ_info_word) + apply (erule order_trans[rotated]) + apply (auto simp: intvl_self unat_gt_0 intro!: intvlI)[1] + apply (simp add: c_guard_word8) + apply (erule subst) + apply (subst lt1_neq0 [symmetric]) + apply (rule order_trans) + apply (subst lt1_neq0, assumption) + apply (erule word_random) + apply (rule word_le_minus_mono_right) + apply (simp add: lt1_neq0) + apply assumption + apply (erule order_trans [rotated]) + apply (simp add: lt1_neq0) + apply (case_tac p___ptr_to_unsigned_char, simp add: CTypesDefs.ptr_add_def unat_minus_one field_simps) + apply (metis word_must_wrap word_not_simps(1) linear) + apply (erule order_trans[rotated]) + apply (clarsimp simp: ptr_val_case split: ptr.splits) + apply (erule subsetD[OF intvl_sub_offset, rotated]) + apply (simp add: unat_sub word_le_nat_alt word_less_nat_alt) + apply (clarsimp simp: ptr_val_case unat_minus_one hrs_mem_update_def split: ptr.splits) + apply (subgoal_tac "unat (n - (na - 1)) = Suc (unat (n - na))") + apply (erule ssubst, subst replicate_Suc_append) + apply (subst heap_update_list_append) + apply (simp add: heap_update_word8) + apply (subst unatSuc [symmetric]) + apply (subst add.commute) + apply (metis word_neq_0_conv word_sub_plus_one_nonzero) + apply (simp add: field_simps) + apply (clarsimp) + apply (metis diff_0_right word_gt_0) + done + +declare snd_get[simp] + +declare snd_gets[simp] + +lemma snd_when_aligneError[simp]: + shows "(snd ((when P (alignError sz)) s)) = P" + by (simp add: when_def alignError_def fail_def split: if_split) + +lemma snd_unless_aligneError[simp]: + shows "(snd ((unless P (alignError sz)) s)) = (\ P)" + by (simp add: unless_def) + +lemma lift_t_retyp_heap_same: + fixes p :: "'a :: mem_type ptr" + assumes gp: "g p" + shows "lift_t g (hp, ptr_retyp p td) p = Some (from_bytes (heap_list hp (size_of TYPE('a)) (ptr_val p)))" + apply (simp add: lift_t_def lift_typ_heap_if s_valid_def hrs_htd_def) + apply (subst ptr_retyp_h_t_valid) + apply (rule gp) + apply simp + apply (subst heap_list_s_heap_list_dom) + apply (clarsimp simp: s_footprint_intvl) + apply simp + done + +lemma lift_t_retyp_heap_same_rep0: + fixes p :: "'a :: mem_type ptr" + assumes gp: "g p" + shows "lift_t g (heap_update_list (ptr_val p) (replicate (size_of TYPE('a)) 0) hp, ptr_retyp p td) p = + Some (from_bytes (replicate (size_of TYPE('a)) 0))" + apply (subst lift_t_retyp_heap_same) + apply (rule gp) + apply (subst heap_list_update [where v = "replicate (size_of TYPE('a)) 0", simplified]) + apply (rule order_less_imp_le) + apply simp + apply simp + done + +lemma lift_t_retyp_heap_other2: + fixes p :: "'a :: mem_type ptr" and p' :: "'b :: mem_type ptr" + assumes orth: "{ptr_val p..+size_of TYPE('a)} \ {ptr_val p'..+size_of TYPE('b)} = {}" + shows "lift_t g (hp, ptr_retyp p td) p' = lift_t g (hp, td) p'" + apply (simp add: lift_t_def lift_typ_heap_if s_valid_def hrs_htd_def ptr_retyp_disjoint_iff [OF orth]) + apply (cases "td, g \\<^sub>t p'") + apply simp + apply (simp add: h_t_valid_taut heap_list_s_heap_list heap_list_update_disjoint_same + ptr_retyp_disjoint_iff orth) + apply (simp add: h_t_valid_taut heap_list_s_heap_list heap_list_update_disjoint_same + ptr_retyp_disjoint_iff orth) + done + +lemma dom_s_SindexValD: + "(x, SIndexVal) \ dom_s td \ fst (td x)" + unfolding dom_s_def by clarsimp + +lemma typ_slice_t_self_nth: + "\n < length (typ_slice_t td m). \b. typ_slice_t td m ! n = (td, b)" + using typ_slice_t_self [where td = td and m = m] + by (fastforce simp add: in_set_conv_nth) + +lemma ptr_retyp_other_cleared_region: + fixes p :: "'a :: mem_type ptr" and p' :: "'b :: mem_type ptr" + assumes ht: "ptr_retyp p td, g \\<^sub>t p'" + and tdisj: "typ_uinfo_t TYPE('a) \\<^sub>t typ_uinfo_t TYPE('b :: mem_type)" + and clear: "\x \ {ptr_val p ..+ size_of TYPE('a)}. \n b. snd (td x) n \ Some (typ_uinfo_t TYPE('b), b)" + shows "{ptr_val p'..+ size_of TYPE('b)} \ {ptr_val p ..+ size_of TYPE('a)} = {}" +proof (rule classical) + assume asm: "{ptr_val p'..+ size_of TYPE('b)} \ {ptr_val p ..+ size_of TYPE('a)} \ {}" + then obtain mv where mvp: "mv \ {ptr_val p..+size_of TYPE('a)}" + and mvp': "mv \ {ptr_val p'..+size_of TYPE('b)}" + by blast + + then obtain k' where mv: "mv = ptr_val p' + of_nat k'" and klt: "k' < size_td (typ_info_t TYPE('b))" + by (clarsimp dest!: intvlD simp: size_of_def typ_uinfo_size) + + let ?mv = "ptr_val p' + of_nat k'" + + obtain n b where nl: "n < length (typ_slice_t (typ_uinfo_t TYPE('b)) k')" + and tseq: "typ_slice_t (typ_uinfo_t TYPE('b)) k' ! n = (typ_uinfo_t TYPE('b), b)" + using typ_slice_t_self_nth [where td = "typ_uinfo_t TYPE('b)" and m = k'] + by clarsimp + + with ht have "snd (ptr_retyp p td ?mv) n = Some (typ_uinfo_t TYPE('b), b)" + unfolding h_t_valid_def + apply - + apply (clarsimp simp: valid_footprint_def Let_def) + apply (drule spec, drule mp [OF _ klt]) + apply (clarsimp simp: map_le_def) + apply (drule bspec) + apply simp + apply simp + done + + moreover { + assume "snd (ptr_retyp p empty_htd ?mv) n = Some (typ_uinfo_t TYPE('b), b)" + hence "(typ_uinfo_t TYPE('b)) \ fst ` set (typ_slice_t (typ_uinfo_t TYPE('a)) + (unat (ptr_val p' + of_nat k' - ptr_val p)))" + using asm mv mvp + apply - + apply (rule_tac x = "(typ_uinfo_t TYPE('b), b)" in image_eqI) + apply simp + apply (fastforce simp add: ptr_retyp_footprint list_map_eq in_set_conv_nth split: if_split_asm) + done + + with typ_slice_set have "(typ_uinfo_t TYPE('b)) \ fst ` td_set (typ_uinfo_t TYPE('a)) 0" + by (rule subsetD) + + hence False using tdisj by (clarsimp simp: tag_disj_def typ_tag_le_def) + } ultimately show ?thesis using mvp mvp' mv unfolding h_t_valid_def valid_footprint_def + apply - + apply (subst (asm) ptr_retyp_d_eq_snd) + apply (auto simp add: map_add_Some_iff clear) + done +qed + +lemma h_t_valid_not_empty: + fixes p :: "'a :: c_type ptr" + shows "\ d,g \\<^sub>t p; x \ {ptr_val p..+size_of TYPE('a)} \ \ snd (d x) \ Map.empty" + apply (drule intvlD) + apply (clarsimp simp: h_t_valid_def size_of_def) + apply (drule valid_footprintD) + apply (simp add: typ_uinfo_size) + apply clarsimp + done + +lemma ptr_retyps_out: + fixes p :: "'a :: mem_type ptr" + shows "x \ {ptr_val p..+n * size_of TYPE('a)} \ ptr_retyps n p td x = td x" +proof (induct n arbitrary: p) + case 0 thus ?case by simp +next + case (Suc m) + + have ih: "ptr_retyps m (CTypesDefs.ptr_add p 1) td x = td x" + proof (rule Suc.hyps) + from Suc.prems show "x \ {ptr_val (CTypesDefs.ptr_add p 1)..+m * size_of TYPE('a)}" + apply (rule contrapos_nn) + apply (erule subsetD [rotated]) + apply (simp add: CTypesDefs.ptr_add_def) + apply (rule intvl_sub_offset) + apply (simp add: unat_of_nat) + done + qed + + from Suc.prems have "x \ {ptr_val p..+size_of TYPE('a)}" + apply (rule contrapos_nn) + apply (erule subsetD [rotated]) + apply (rule intvl_start_le) + apply simp + done + + thus ?case + by (simp add: ptr_retyp_d ih) +qed + +lemma image_add_intvl: + "((+) x) ` {p ..+ n} = {p + x ..+ n}" + by (auto simp add: intvl_def) + +lemma intvl_sum: + "{p..+ i + j} + = {p ..+ i} \ {(p :: ('a :: len) word) + of_nat i ..+ j}" + apply (simp add: intvl_def, safe) + apply clarsimp + apply (case_tac "k < i") + apply auto[1] + apply (drule_tac x="k - i" in spec) + apply simp + apply fastforce + apply (rule_tac x="k + i" in exI) + apply simp + done + +lemma intvl_Suc_right: + "{p ..+ Suc n} = {p} \ {(p :: ('a :: len) word) + 1 ..+ n}" + apply (simp add: intvl_sum[where p=p and i=1 and j=n, simplified]) + apply (auto dest: intvl_Suc simp: intvl_self) + done + +lemma htd_update_list_same2: + "x \ {p ..+ length xs} \ + htd_update_list p xs htd x = htd x" + by (induct xs arbitrary: p htd, simp_all add: intvl_Suc_right) + +lemma ptr_retyps_gen_out: + fixes p :: "'a :: mem_type ptr" + shows "x \ {ptr_val p..+n * size_of TYPE('a)} \ ptr_retyps_gen n p arr td x = td x" + apply (simp add: ptr_retyps_gen_def ptr_retyps_out split: if_split) + apply (clarsimp simp: ptr_arr_retyps_def htd_update_list_same2) + done + +lemma h_t_valid_intvl_htd_contains_uinfo_t: + "h_t_valid d g (p :: ('a :: c_type) ptr) \ x \ {ptr_val p ..+ size_of TYPE('a)} \ + (\n. snd (d x) n \ None \ fst (the (snd (d x) n)) = typ_uinfo_t TYPE ('a))" + apply (clarsimp simp: h_t_valid_def valid_footprint_def Let_def intvl_def size_of_def) + apply (drule spec, drule(1) mp) + apply (cut_tac m=k in typ_slice_t_self[where td="typ_uinfo_t TYPE ('a)"]) + apply (clarsimp simp: in_set_conv_nth) + apply (drule_tac x=i in map_leD) + apply simp + apply fastforce + done + +lemma list_map_override_comono: + "list_map xs \\<^sub>m m ++ list_map ys + \ xs \ ys \ ys \ xs" + apply (simp add: map_le_def list_map_eq map_add_def) + apply (cases "length xs \ length ys") + apply (simp add: prefix_eq_nth) + apply (simp split: if_split_asm add: prefix_eq_nth) + done + +lemma list_map_plus_le_not_tag_disj: + "list_map (typ_slice_t td y) \\<^sub>m m ++ list_map (typ_slice_t td' y') + \ \ td \\<^sub>t td'" + apply (drule list_map_override_comono) + apply (auto dest: typ_slice_sub) + done + +lemma htd_update_list_not_tag_disj: + "list_map (typ_slice_t td y) + \\<^sub>m snd (htd_update_list p xs htd x) + \ x \ {p ..+ length xs} + \ y < size_td td + \ length xs < addr_card + \ set xs \ list_map ` typ_slice_t td' ` {..< size_td td'} + \ \ td \\<^sub>t td'" + apply (induct xs arbitrary: p htd) + apply simp + apply (clarsimp simp: intvl_Suc_right) + apply (erule disjE) + apply clarsimp + apply (subst(asm) htd_update_list_same2, + rule intvl_Suc_nmem'[where n="Suc m" for m, simplified]) + apply (simp add: addr_card_def card_word) + apply (simp add: list_map_plus_le_not_tag_disj) + apply blast + done + +(* Sigh *) +lemma td_set_offset_ind: + "\j. td_set t (Suc j) = (apsnd Suc :: ('a typ_desc \ nat) \ _) ` td_set t j" + "\j. td_set_struct ts (Suc j) = (apsnd Suc :: ('a typ_desc \ nat) \ _) ` td_set_struct ts j" + "\j. td_set_list xs (Suc j) = (apsnd Suc :: ('a typ_desc \ nat) \ _) ` td_set_list xs j" + "\j. td_set_pair x (Suc j) = (apsnd Suc :: ('a typ_desc \ nat) \ _) ` td_set_pair x j" + apply (induct t and ts and xs and x) + apply (simp_all add: image_Un) + done + +lemma td_set_offset: + "(td, i) \ td_set td' j \ (td, i - j) \ td_set td' 0" + by (induct j arbitrary: i, auto simp: td_set_offset_ind) + +lemma typ_le_uinfo_array_tag_n_m: + "0 < n \ td \ uinfo_array_tag_n_m TYPE('a :: c_type) n m + = (td \ typ_uinfo_t TYPE('a) \ td = uinfo_array_tag_n_m TYPE('a) n m)" +proof - + have ind: "\xs cs. \n'. td_set_list (map (\i. DTPair (typ_uinfo_t TYPE('a)) (cs i)) xs) n' + \ (fst ` (\i. td_set (typ_uinfo_t TYPE('a)) i)) \ UNIV" + apply (induct_tac xs) + apply clarsimp + apply clarsimp + apply (fastforce intro: image_eqI[rotated]) + done + assume "0 < n" + thus ?thesis + apply (simp add: uinfo_array_tag_n_m_def typ_tag_le_def upt_conv_Cons) + apply (auto dest!: ind[rule_format, THEN subsetD], (blast dest: td_set_offset)+) + done +qed + +lemma h_t_array_valid_retyp: + "0 < n \ n * size_of TYPE('a) < addr_card + \ h_t_array_valid (ptr_arr_retyps n p htd) (p :: ('a :: wf_type) ptr) n" + apply (clarsimp simp: ptr_arr_retyps_def h_t_array_valid_def + valid_footprint_def) + apply (simp add: htd_update_list_index intvlI mult.commute) + apply (simp add: addr_card_wb unat_of_nat64) + done + +lemma valid_call_Spec_eq_subset: +"\' procname = Some (Spec R) +\ (\x. \'\\<^bsub>/NF\<^esub> (P x) Call procname (Q x),(A x)) + = ((\x. P x \ fst ` R) \ (R \ (\x. (- P x) \ UNIV \ UNIV \ Q x)))" + supply image_cong_simp [cong del] + apply (safe, simp_all) + apply (clarsimp simp: HoarePartialDef.valid_def) + apply (rule ccontr) + apply (elim allE, subst(asm) imageI, assumption) + apply (drule mp, erule exec.Call, rule exec.SpecStuck) + apply (auto simp: image_def)[2] + apply (clarsimp simp: HoarePartialDef.valid_def) + apply (elim allE, drule mp, erule exec.Call, erule exec.Spec) + apply auto[1] + apply (clarsimp simp: HoarePartialDef.valid_def) + apply (erule exec_Normal_elim_cases, simp_all) + apply (erule exec_Normal_elim_cases, auto simp: image_def) + apply blast + apply (thin_tac "R \ _", fastforce) + done + +lemma field_of_t_refl: + "field_of_t p p' = (p = p')" + apply (safe, simp_all add: field_of_t_def field_of_self) + apply (simp add: field_of_def) + apply (drule td_set_size_lte) + apply (simp add: unat_eq_0) + done + +lemma h_t_valid_ptr_retyps_gen: + assumes sz: "nptrs * size_of TYPE('a :: mem_type) < addr_card" + and gd: "gd p'" + shows + "(p' \ ((+\<^sub>p) (Ptr p :: 'a ptr) \ int) ` {k. k < nptrs}) + \ h_t_valid (ptr_retyps_gen nptrs (Ptr p :: 'a ptr) arr htd) gd p'" + using gd sz + apply (cases arr, simp_all add: ptr_retyps_gen_def) + apply (cases "nptrs = 0") + apply simp + apply (cut_tac h_t_array_valid_retyp[where p="Ptr p" and htd=htd, OF _ sz], simp_all) + apply clarsimp + apply (drule_tac k=x in h_t_array_valid_field, simp_all) + apply (induct nptrs arbitrary: p htd) + apply simp + apply clarsimp + apply (case_tac x, simp_all add: ptr_retyp_h_t_valid) + apply (rule ptr_retyp_disjoint) + apply (elim meta_allE, erule meta_mp, rule image_eqI[rotated], simp) + apply (simp add: field_simps) + apply simp + apply (cut_tac p=p and z="size_of TYPE('a)" + and k="Suc nat * size_of TYPE('a)" in init_intvl_disj) + apply (erule order_le_less_trans[rotated]) + apply (simp del: mult_Suc) + apply (simp add: field_simps Int_ac) + apply (erule disjoint_subset[rotated] disjoint_subset2[rotated]) + apply (rule intvl_start_le, simp) + done + +lemma ptr_retyps_gen_not_tag_disj: + "x \ {p ..+ n * size_of TYPE('a :: mem_type)} + \ list_map (typ_slice_t td y) + \\<^sub>m snd (ptr_retyps_gen n (Ptr p :: 'a ptr) arr htd x) + \ y < size_td td + \ n * size_of TYPE('a) < addr_card + \ 0 < n + \ \ td \\<^sub>t typ_uinfo_t TYPE('a)" + apply (simp add: ptr_retyps_gen_def ptr_arr_retyps_def + split: if_split_asm) + apply (drule_tac td'="uinfo_array_tag_n_m TYPE('a) n n" + in htd_update_list_not_tag_disj, simp+) + apply (clarsimp simp: mult.commute) + apply (clarsimp simp: tag_disj_def) + apply (erule disjE) + apply (metis order_refl typ_le_uinfo_array_tag_n_m) + apply (erule notE, erule order_trans[rotated]) + apply (simp add: typ_le_uinfo_array_tag_n_m) + apply clarsimp + apply (induct n arbitrary: p htd, simp_all) + apply (case_tac "x \ {p ..+ size_of TYPE('a)}") + apply (simp add: intvl_sum ptr_retyp_def) + apply (drule_tac td'="typ_uinfo_t TYPE('a)" + in htd_update_list_not_tag_disj, simp+) + apply (clarsimp simp add: typ_slices_def size_of_def) + apply simp + apply (simp add: intvl_sum) + apply (case_tac "n = 0") + apply simp + apply (simp add: ptr_retyps_out[where n=1, simplified]) + apply blast + done + +lemma ptr_retyps_gen_valid_footprint: + assumes cleared: "region_is_bytes' p (n * size_of TYPE('a)) htd" + and distinct: "td \\<^sub>t typ_uinfo_t TYPE('a)" + and not_byte: "td \ typ_uinfo_t TYPE(word8)" + and sz: "n * size_of TYPE('a) < addr_card" + shows + "valid_footprint (ptr_retyps_gen n (Ptr p :: 'a :: mem_type ptr) arr htd) p' td + = (valid_footprint htd p' td)" + apply (cases "n = 0") + apply (simp add: ptr_retyps_gen_def ptr_arr_retyps_def split: if_split) + apply (simp add: valid_footprint_def Let_def) + apply (intro conj_cong refl, rule all_cong) + apply (case_tac "p' + of_nat y \ {p ..+ n * size_of TYPE('a)}") + apply (simp_all add: ptr_retyps_gen_out) + apply (rule iffI; clarsimp) + apply (frule(1) ptr_retyps_gen_not_tag_disj, (simp add: sz)+) + apply (simp add: distinct) + apply (cut_tac m=y in typ_slice_t_self[where td=td]) + apply (clarsimp simp: in_set_conv_nth) + apply (drule_tac x=i in map_leD) + apply simp + apply (simp add: cleared[unfolded region_is_bytes'_def] not_byte) + done + +lemma list_map_length_is_None [simp]: + "list_map xs (length xs) = None" + apply (induct xs) + apply (simp add: list_map_def) + apply (simp add: list_map_def) + done + +lemma list_map_append_one: + "list_map (xs @ [x]) = [length xs \ x] ++ list_map xs" + by (simp add: list_map_def) + +lemma ptr_retyp_same_cleared_region: + fixes p :: "'a :: mem_type ptr" and p' :: "'a :: mem_type ptr" + assumes ht: "ptr_retyp p td, g \\<^sub>t p'" + shows "p = p' \ {ptr_val p..+ size_of TYPE('a)} \ {ptr_val p' ..+ size_of TYPE('a)} = {}" + using ht + by (simp add: h_t_valid_ptr_retyp_eq[where p=p and p'=p'] field_of_t_refl + split: if_split_asm) + +lemma h_t_valid_ptr_retyp_inside_eq: + fixes p :: "'a :: mem_type ptr" and p' :: "'a :: mem_type ptr" + assumes inside: "ptr_val p' \ {ptr_val p ..+ size_of TYPE('a)}" + and ht: "ptr_retyp p td, g \\<^sub>t p'" + shows "p = p'" + using ptr_retyp_same_cleared_region[OF ht] inside mem_type_self[where p=p'] + by blast + +lemma ptr_add_orth: + fixes p :: "'a :: mem_type ptr" + assumes lt: "Suc n * size_of TYPE('a) < 2 ^ word_bits" + shows "{ptr_val p..+size_of TYPE('a)} \ {ptr_val (CTypesDefs.ptr_add p 1)..+n * size_of TYPE('a)} = {}" + using lt + apply - + apply (rule disjointI) + apply clarsimp + apply (drule intvlD)+ + apply (clarsimp simp: CTypesDefs.ptr_add_def) + apply (simp only: Abs_fnat_hom_add) + apply (drule unat_cong) + apply (simp only: unat_of_nat) + apply (unfold word_bits_len_of) + apply (simp add: addr_card_wb [symmetric]) + done + +lemma h_t_valid_ptr_retyps_gen_same: + assumes guard: "\n' < nptrs. gd (CTypesDefs.ptr_add (Ptr p :: 'a ptr) (of_nat n'))" + assumes cleared: "region_is_bytes' p (nptrs * size_of TYPE('a :: mem_type)) htd" + and not_byte: "typ_uinfo_t TYPE('a) \ typ_uinfo_t TYPE(word8)" + assumes sz: "nptrs * size_of TYPE('a) < addr_card" + shows + "h_t_valid (ptr_retyps_gen nptrs (Ptr p :: 'a ptr) arr htd) gd p' + = ((p' \ ((+\<^sub>p) (Ptr p :: 'a ptr) \ int) ` {k. k < nptrs}) \ h_t_valid htd gd p')" + (is "h_t_valid ?htd' gd p' = (p' \ ?S \ h_t_valid htd gd p')") +proof (cases "{ptr_val p' ..+ size_of TYPE('a)} \ {p ..+ nptrs * size_of TYPE('a)} = {}") + case True + + from True have notin: + "p' \ ?S" + apply clarsimp + apply (drule_tac x="p + of_nat (x * size_of TYPE('a))" in eqset_imp_iff) + apply (simp only: Int_iff empty_iff simp_thms) + apply (subst(asm) intvlI, simp) + apply (simp add: intvl_self) + done + + from True have same: "\y < size_of TYPE('a). ?htd' (ptr_val p' + of_nat y) + = htd (ptr_val p' + of_nat y)" + apply clarsimp + apply (rule ptr_retyps_gen_out) + apply simp + apply (blast intro: intvlI) + done + + show ?thesis + by (clarsimp simp: h_t_valid_def valid_footprint_def Let_def + notin same size_of_def[symmetric, where t="TYPE('a)"] + cong del: image_cong_simp) +next + case False + + from False have nvalid: "\ h_t_valid htd gd p'" + apply (clarsimp simp: h_t_valid_def valid_footprint_def set_eq_iff + Let_def size_of_def[symmetric, where t="TYPE('a)"] + intvl_def[where x="(ptr_val p', a)" for a]) + apply (drule cleared[unfolded region_is_bytes'_def, THEN bspec]) + apply (drule spec, drule(1) mp, clarsimp) + apply (cut_tac m=k in typ_slice_t_self[where td="typ_uinfo_t TYPE ('a)"]) + apply (clarsimp simp: in_set_conv_nth) + apply (drule_tac x=i in map_leD, simp_all) + apply (simp add: not_byte) + done + + have mod_split: "\k. k < nptrs * size_of TYPE('a) + \ \quot rem. k = quot * size_of TYPE('a) + rem \ rem < size_of TYPE('a) \ quot < nptrs" + apply (intro exI conjI, rule div_mult_mod_eq[symmetric]) + apply simp + apply (simp add: More_Divides.td_gal_lt) + done + + have gd: "\p'. p' \ ?S \ gd p'" + using guard by auto + + note htv = h_t_valid_ptr_retyps_gen[where gd=gd, OF sz gd] + + show ?thesis using False + apply (simp add: nvalid) + apply (rule iffI, simp_all add: htv) + apply (clarsimp simp: set_eq_iff intvl_def[where x="(p, a)" for a]) + apply (drule mod_split, clarsimp) + apply (frule_tac htv[OF imageI, simplified]) + apply fastforce + apply (rule ccontr) + apply (drule(1) h_t_valid_neq_disjoint) + apply simp + apply (clarsimp simp: field_of_t_refl) + apply (simp add: set_eq_iff) + apply (drule spec, drule(1) mp) + apply (subst(asm) add.assoc[symmetric], subst(asm) intvlI, assumption) + apply simp + done +qed + +lemma clift_ptr_retyps_gen_memset_same: + assumes guard: "\n' < n. c_guard (CTypesDefs.ptr_add (Ptr p :: 'a :: mem_type ptr) (of_nat n'))" + assumes cleared: "region_is_bytes' p (n * size_of TYPE('a :: mem_type)) (hrs_htd hrs)" + and not_byte: "typ_uinfo_t TYPE('a :: mem_type) \ typ_uinfo_t TYPE(word8)" + and nb: "nb = n * size_of TYPE ('a)" + and sz: "n * size_of TYPE('a) < 2 ^ word_bits" + shows "(clift (hrs_htd_update (ptr_retyps_gen n (Ptr p :: 'a :: mem_type ptr) arr) + (hrs_mem_update (heap_update_list p (replicate nb 0)) + hrs)) :: 'a :: mem_type typ_heap) + = (\y. if y \ (CTypesDefs.ptr_add (Ptr p :: 'a :: mem_type ptr) o of_nat) ` {k. k < n} + then Some (from_bytes (replicate (size_of TYPE('a :: mem_type)) 0)) else clift hrs y)" + using sz + supply if_cong[cong] + apply (simp add: nb liftt_if[folded hrs_mem_def hrs_htd_def] + hrs_htd_update hrs_mem_update + h_t_valid_ptr_retyps_gen_same[OF guard cleared not_byte] + addr_card_wb) + apply (rule ext, rename_tac p') + apply (case_tac "p' \ ((+\<^sub>p) (Ptr p) \ int) ` {k. k < n}") + apply (clarsimp simp: h_val_def) + apply (simp only: Word.Abs_fnat_hom_mult hrs_mem_update) + apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) + apply (subst heap_list_update_list; simp?) + apply (simp add: addr_card_def card_word word_bits_def) + apply (clarsimp split: if_split) + apply (simp add: h_val_def) + apply (subst heap_list_update_disjoint_same, simp_all) + apply (simp add: region_is_bytes_disjoint[OF cleared not_byte]) + done + +lemma clift_ptr_retyps_gen_prev_memset_same: + assumes guard: "\n' < n. c_guard (CTypesDefs.ptr_add (Ptr p :: 'a :: mem_type ptr) (of_nat n'))" + assumes cleared: "region_is_bytes' p (n * size_of TYPE('a :: mem_type)) (hrs_htd hrs)" + and not_byte: "typ_uinfo_t TYPE('a :: mem_type) \ typ_uinfo_t TYPE(word8)" + and nb: "nb = n * size_of TYPE ('a)" + and sz: "n * size_of TYPE('a) < 2 ^ word_bits" + and rep0: "heap_list (hrs_mem hrs) nb p = replicate nb 0" + shows "(clift (hrs_htd_update (ptr_retyps_gen n (Ptr p :: 'a :: mem_type ptr) arr) hrs) :: 'a :: mem_type typ_heap) + = (\y. if y \ (CTypesDefs.ptr_add (Ptr p :: 'a :: mem_type ptr) o of_nat) ` {k. k < n} + then Some (from_bytes (replicate (size_of TYPE('a :: mem_type)) 0)) else clift hrs y)" + using rep0 + apply (subst clift_ptr_retyps_gen_memset_same[symmetric, OF guard cleared not_byte nb sz]) + apply (rule arg_cong[where f=clift]) + apply (rule_tac f="hrs_htd_update f" for f in arg_cong) + apply (cases hrs, simp add: hrs_mem_update_def) + apply (simp add: heap_update_list_id hrs_mem_def) + done + +lemma clift_ptr_retyps_gen_other: + assumes cleared: "region_is_bytes' (ptr_val p) (nptrs * size_of TYPE('a :: mem_type)) (hrs_htd hrs)" + and sz: "nptrs * size_of TYPE('a) < 2 ^ word_bits" + and other: "typ_uinfo_t TYPE('b) \\<^sub>t typ_uinfo_t TYPE('a)" + and not_byte: "typ_uinfo_t TYPE('b :: mem_type) \ typ_uinfo_t TYPE(word8)" + shows "(clift (hrs_htd_update (ptr_retyps_gen nptrs (p :: 'a ptr) arr) hrs) :: 'b :: mem_type typ_heap) + = clift hrs" + using sz cleared + apply (cases p) + apply (simp add: liftt_if[folded hrs_mem_def hrs_htd_def] + h_t_valid_def hrs_htd_update + ptr_retyps_gen_valid_footprint[simplified addr_card_wb, OF _ other not_byte sz] + cong: if_cong) + done + +lemma clift_heap_list_update_no_heap_other: + assumes cleared: "region_is_bytes' p (length xs) (hrs_htd hrs)" + and not_byte: "typ_uinfo_t TYPE('a :: c_type) \ typ_uinfo_t TYPE(word8)" + shows "clift (hrs_mem_update (heap_update_list p xs) hrs) = (clift hrs :: 'a typ_heap)" + apply (clarsimp simp: liftt_if[folded hrs_mem_def hrs_htd_def] hrs_mem_update + fun_eq_iff h_val_def split: if_split) + apply (subst heap_list_update_disjoint_same, simp_all) + apply (clarsimp simp: set_eq_iff h_t_valid_def valid_footprint_def Let_def + dest!: intvlD[where n="size_of TYPE('a)"]) + apply (drule_tac x="of_nat k" in spec, clarsimp simp: size_of_def) + apply (cut_tac m=k in typ_slice_t_self[where td="typ_uinfo_t TYPE('a)"]) + apply (clarsimp simp: in_set_conv_nth) + apply (drule_tac x=i in map_leD, simp) + apply (simp add: cleared[unfolded region_is_bytes'_def] not_byte size_of_def) + done + +lemma add_is_injective_ring: + "inj ((+) (x :: 'a :: ring))" + by (rule inj_onI, clarsimp) + +(* assumes that y & elements are n-aligned but not that the compound + interval is aligned to a higher power of two. needed for cte arrays. *) +lemma ptr_span_disjoint_ptr_set_span: + fixes y :: "('a :: mem_type) ptr" + assumes align: "is_aligned p n" + and size_of: "size_of TYPE('a) = 2 ^ n" + and al: "is_aligned (ptr_val y) n" + and card: "b * 2 ^ n < addr_card" + and b: "b \ 0" + shows "y \ ((+\<^sub>p) (Ptr p) \ int) ` {k. k < b} + \ ptr_span y \ {p ..+ b * 2 ^ n} = {}" +proof - + from card b have word_bits: "n < word_bits" + using power_increasing[where n=word_bits and N=n and a=2] + apply (simp add: word_bits_def addr_card) + apply (rule ccontr, simp) + apply (cases b, simp_all) + apply (drule(1) order_less_le_trans) + apply simp + done + + note al_sub = aligned_sub_aligned_simple[OF al align] + + have yuck: "of_nat b * 2 ^ n \ (0 :: machine_word)" + using of_nat_neq_0[where k="b * 2 ^ n" and 'a=machine_word_len] b card + by (clarsimp simp: addr_card_def card_word) + + show ?thesis + supply image_cong_simp [cong del] + apply (clarsimp simp add: size_of) + apply (rule inj_image_eq_iff[OF add_is_injective_ring[where x="- p"], THEN iffD1]) + apply (subst image_Int[OF add_is_injective_ring]) + apply (simp add: image_add_intvl upto_intvl_eq al_sub) + apply (subst upto_intvl_eq', simp, simp add: b) + apply (cut_tac card, simp add: addr_card_def card_word) + apply safe + apply (simp only: mask_in_range[symmetric] al_sub) + apply simp + apply (drule_tac f="(+) p" in arg_cong, simp) + apply (erule notE, rule_tac x="unat (x >> n)" in image_eqI) + apply (simp add: size_of) + apply (cases y, clarsimp simp: and_not_mask shiftl_t2n) + apply (simp add: shiftr_div_2n') + apply (rule More_Divides.td_gal_lt[THEN iffD1], simp) + apply (drule word_leq_minus_one_le[OF yuck]) + apply (rule unat_less_helper, simp) + done +qed + +lemma ptr_retyp_to_array: + "ptr_retyps_gen 1 (p :: (('a :: wf_type)['b :: finite]) ptr) False + = ptr_retyps_gen CARD('b) (ptr_coerce p :: 'a ptr) True" + by (intro ext, simp add: ptr_retyps_gen_def ptr_arr_retyps_to_retyp) + +lemma projectKO_opt_retyp_other: + assumes cover: "range_cover ptr sz (objBitsKO ko) n" + assumes pal: "pspace_aligned' \" + assumes pno: "pspace_no_overlap' ptr sz \" + and ko_def: "ko \ x" + and pko: "\v. (projectKO_opt x :: ('a :: pre_storable) option) \ Some v" + shows "projectKO_opt \\<^sub>m + (\x. if x \ set (new_cap_addrs n ptr ko) then Some ko else ksPSpace \ x) + = (projectKO_opt \\<^sub>m (ksPSpace \) :: machine_word \ ('a :: pre_storable) option)" (is "?LHS = ?RHS") +proof (rule ext) + fix x + show "?LHS x = ?RHS x" + proof (cases "x \ set (new_cap_addrs n ptr ko)") + case False + thus ?thesis by (simp add: map_comp_def) + next + case True + hence "ksPSpace \ x = None" + apply - + apply (cut_tac no_overlap_new_cap_addrs_disjoint [OF cover pal pno]) + apply (rule ccontr) + apply (clarsimp,drule domI[where a = x]) + apply blast + done + thus ?thesis using True pko ko_def by simp + qed +qed + +lemma pspace_aligned_to_C: + fixes v :: "'a :: pre_storable" + assumes pal: "pspace_aligned' s" + and cmap: "cmap_relation (projectKO_opt \\<^sub>m (ksPSpace s) :: machine_word \ 'a) + (cslift x :: 'b :: mem_type typ_heap) Ptr rel" + and pko: "projectKO_opt ko = Some v" + and pkorl: "\ko' (v' :: 'a). projectKO_opt ko' = Some v' \ objBitsKO ko = objBitsKO ko'" + shows "\x\dom (cslift x :: 'b :: mem_type typ_heap). is_aligned (ptr_val x) (objBitsKO ko)" + (is "\x\dom ?CS. is_aligned (ptr_val x) (objBitsKO ko)") +proof + fix z + assume "z \ dom ?CS" + hence "z \ Ptr ` dom (projectKO_opt \\<^sub>m (ksPSpace s) :: machine_word \ 'a)" using cmap + by (simp add: cmap_relation_def) + hence pvz: "ptr_val z \ dom (projectKO_opt \\<^sub>m (ksPSpace s) :: machine_word \ 'a)" + by clarsimp + then obtain v' :: 'a where "projectKO_opt (the (ksPSpace s (ptr_val z))) = Some v'" + and pvz: "ptr_val z \ dom (ksPSpace s)" + apply - + apply (frule map_comp_subset_domD) + apply (clarsimp simp: dom_def) + done + + thus "is_aligned (ptr_val z) (objBitsKO ko)" using pal + unfolding pspace_aligned'_def + apply - + apply (drule (1) bspec) + apply (simp add: pkorl) + done +qed + +lemma pspace_aligned_to_C_cte: + fixes v :: "cte" + assumes pal: "pspace_aligned' s" + and cmap: "cmap_relation (ctes_of s) (cslift x :: cte_C typ_heap) Ptr ccte_relation" + and pko: "projectKO_opt ko = Some v" + shows "\x\dom (cslift x :: cte_C typ_heap). is_aligned (ptr_val x) (objBitsKO ko)" + (is "\x\dom ?CS. is_aligned (ptr_val x) (objBitsKO ko)") +proof + fix z + assume "z \ dom ?CS" + hence "z \ Ptr ` dom (ctes_of s)" using cmap + by (simp add: cmap_relation_def) + hence pvz: "ptr_val z \ dom (ctes_of s)" + by clarsimp + thus "is_aligned (ptr_val z) (objBitsKO ko)" using pal pko + unfolding pspace_aligned'_def + apply - + apply clarsimp + apply (drule ctes_of_is_aligned) + apply (cases ko, simp_all add: projectKOs) + apply (simp add: objBits_simps) + done +qed + +lemma pspace_aligned_to_C_tcb: + fixes v :: "tcb" + assumes pal: "pspace_aligned' s" + and cmap: "cpspace_tcb_relation (ksPSpace s) (t_hrs_' (globals x))" + shows "\x\dom (cslift x :: tcb_C typ_heap). is_aligned (ptr_val x) ctcb_size_bits" + (is "\x\dom ?CS. is_aligned (ptr_val x) ctcb_size_bits") +proof + fix z + assume "z \ dom ?CS" + hence "z \ tcb_ptr_to_ctcb_ptr ` dom (map_to_tcbs (ksPSpace s))" using cmap + by (simp add: cmap_relation_def) + hence pvz: "ctcb_ptr_to_tcb_ptr z \ dom (map_to_tcbs (ksPSpace s))" + by clarsimp + then obtain v' :: tcb where "projectKO_opt (the (ksPSpace s (ctcb_ptr_to_tcb_ptr z))) = Some v'" + and pvz: "ctcb_ptr_to_tcb_ptr z \ dom (ksPSpace s)" + apply - + apply (frule map_comp_subset_domD) + apply (clarsimp simp: dom_def) + done + + thus "is_aligned (ptr_val z) ctcb_size_bits" using pal + unfolding pspace_aligned'_def + apply - + apply (drule (1) bspec) + apply (clarsimp simp add: projectKOs objBits_simps) + apply (erule ctcb_ptr_to_tcb_ptr_aligned) + done +qed + +lemma ptr_add_to_new_cap_addrs: + assumes size_of_m: "size_of TYPE('a :: mem_type) = 2 ^ objBitsKO ko" + shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} + = Ptr ` set (new_cap_addrs n ptr ko)" + unfolding new_cap_addrs_def + apply (simp add: image_image shiftl_t2n size_of_m field_simps) + apply (clarsimp simp: atLeastLessThan_def lessThan_def) + done + +lemma cmap_relation_retype: + assumes cm: "cmap_relation mp mp' Ptr rel" + and rel: "rel (makeObject :: 'a :: pspace_storable) ko'" + shows "cmap_relation + (\x. if x \ addrs then Some (makeObject :: 'a :: pspace_storable) else mp x) + (\y. if y \ Ptr ` addrs then Some ko' else mp' y) + Ptr rel" + using cm rel + apply - + apply (rule cmap_relationI) + apply (simp add: dom_if cmap_relation_def image_Un) + apply (case_tac "x \ addrs") + apply simp + apply simp + apply (subst (asm) if_not_P) + apply clarsimp + apply (erule (2) cmap_relation_relI) + done + +lemma word_rcat_single[simp]: + "word_rcat [x] = x" + by (simp add: word_rcat_def bin_rcat_def) + +lemma update_ti_t_machine_word_0s: + "update_ti_t (typ_info_t TYPE(machine_word)) [0,0,0,0,0,0,0,0] X = 0" + "word_rcat [0, 0, 0, 0,0,0,0,(0 :: word8)] = (0 :: machine_word)" + by (simp_all add: typ_info_word word_rcat_def bin_rcat_def) + +(* 32-bit fields *) +lemma update_ti_t_word32_0s: + "update_ti_t (typ_info_t TYPE(32 word)) [0,0,0,0] X = 0" + "word_rcat [0, 0, 0, (0 :: word8)] = (0 :: 32 word)" + by (simp_all add: typ_info_word word_rcat_def bin_rcat_def) + +lemma retype_guard_helper: + assumes cover: "range_cover p sz (objBitsKO ko) n" + and ptr0: "p \ 0" + and szo: "size_of TYPE('a :: c_type) = 2 ^ objBitsKO ko" + and lt2: "m \ objBitsKO ko" + and ala: "align_of TYPE('a :: c_type) = 2 ^ m" + shows "\b < n. c_guard (CTypesDefs.ptr_add (Ptr p :: 'a ptr) (of_nat b))" +proof (rule allI, rule impI) + fix b :: nat + assume nv: "b < n" + let ?p = "(Ptr p :: 'a ptr)" + + have "of_nat b * of_nat (size_of TYPE('a)) = (of_nat (b * 2 ^ objBitsKO ko) :: machine_word)" + by (simp add: szo) + + also have "\ < (2 :: machine_word) ^ sz" using nv cover + apply simp + apply (rule word_less_power_trans_ofnat) + apply (erule less_le_trans) + apply (erule range_cover.range_cover_n_le(2)) + apply (erule range_cover.sz)+ + done + + finally have ofn: "of_nat b * of_nat (size_of TYPE('a)) < (2 :: machine_word) ^ sz" . + + have le: "p \ p + of_nat b * 2 ^ objBitsKO ko" + using ofn szo nv + apply - + apply (cases b,clarsimp+) + apply (cut_tac n = nat in range_cover_ptr_le) + apply (rule range_cover_le[OF cover]) + apply simp + apply (simp add:ptr0) + apply (simp add:shiftl_t2n field_simps) + done + + show "c_guard (CTypesDefs.ptr_add ?p (of_nat b))" + apply (rule is_aligned_c_guard[OF _ _ ala _ lt2]) + apply (simp add: szo) + apply (rule is_aligned_add) + apply (rule range_cover.aligned, rule cover) + apply (rule is_aligned_mult_triv2) + apply (simp add: szo neq_0_no_wrap[OF le ptr0]) + apply (simp add: szo) + done +qed + +lemma retype_guard_helper2: + assumes cover: "range_cover p sz (objBitsKO ko) n" + and ptr0: "p \ 0" + and szo: "size_of TYPE('a :: c_type) = 2 ^ objBitsKO ko" + and ala: "align_of TYPE('a :: c_type) \ set (map (\x. 2 ^ x) [0 ..< Suc (objBitsKO ko)])" + shows "\b < n. c_guard (CTypesDefs.ptr_add (Ptr p :: 'a ptr) (of_nat b))" + using ala retype_guard_helper[OF cover ptr0 szo] + by (clarsimp simp del: upt.simps) + +(* When we are retyping, CTEs in the system do not change, + * unless we happen to be retyping into a CNode or a TCB, + * in which case new CTEs only pop up in the new object. *) +lemma retype_ctes_helper: + assumes pal: "pspace_aligned' s" + and pdst: "pspace_distinct' s" + and pno: "pspace_no_overlap' ptr sz s" + and al: "is_aligned ptr (objBitsKO ko)" + and sz: "objBitsKO ko \ sz" + and szb: "sz < word_bits" + and mko: "makeObjectKO dev tp' = Some ko" + and rc: "range_cover ptr sz (objBitsKO ko) n" + shows "map_to_ctes (\xa. if xa \ set (new_cap_addrs n ptr ko) then Some ko else ksPSpace s xa) = + (\x. if tp' = Inr (APIObjectType ArchTypes_H.apiobject_type.CapTableObject) \ x \ set (new_cap_addrs n ptr ko) \ + tp' = Inr (APIObjectType ArchTypes_H.apiobject_type.TCBObject) \ + x && ~~ mask tcbBlockSizeBits \ set (new_cap_addrs n ptr ko) \ x && mask tcbBlockSizeBits \ dom tcb_cte_cases + then Some (CTE capability.NullCap nullMDBNode) else ctes_of s x)" + using mko pal pdst +proof (rule ctes_of_retype) + show "pspace_aligned' (s\ksPSpace := \xa. if xa \ set (new_cap_addrs n ptr ko) then Some ko else ksPSpace s xa\)" + using pal pdst pno szb al sz rc + apply - + apply (rule retype_aligned_distinct'', simp_all) + done + + show "pspace_distinct' (s\ksPSpace := \xa. if xa \ set (new_cap_addrs n ptr ko) then Some ko else ksPSpace s xa\)" + using pal pdst pno szb al sz rc + apply - + apply (rule retype_aligned_distinct'', simp_all) + done + + show "\x\set (new_cap_addrs n ptr ko). is_aligned x (objBitsKO ko)" + using al szb + apply - + apply (rule new_cap_addrs_aligned, simp_all) + done + + show "\x\set (new_cap_addrs n ptr ko). ksPSpace s x = None" + using al szb pno pal rc sz + apply - + apply (drule(1) pspace_no_overlap_disjoint') + apply (frule new_cap_addrs_subset) + apply (clarsimp simp: More_Word_Operations.ptr_add_def field_simps) + apply fastforce + done +qed + +lemma ptr_retyps_htd_safe: + "\ htd_safe D htd; + {ptr_val ptr ..+ n * size_of TYPE('a :: mem_type)} + \ D \ + \ htd_safe D (ptr_retyps_gen n (ptr :: 'a ptr) arr htd)" + apply (clarsimp simp: htd_safe_def) + apply (case_tac "a \ {ptr_val ptr..+n * size_of TYPE('a)}") + apply blast + apply (case_tac "(a, b) \ dom_s htd") + apply blast + apply (clarsimp simp: dom_s_def ptr_retyps_gen_out) + done + +lemma ptr_retyps_htd_safe_neg: + "\ htd_safe D htd; {ptr_val ptr ..+ n * size_of TYPE('a :: mem_type)} \ D' = {}; -D \ D' \ + \ htd_safe D (ptr_retyps_gen n (ptr :: 'a ptr) arr htd)" + using ptr_retyps_htd_safe by blast + +lemmas ptr_retyps_htd_safe_neg' = ptr_retyps_htd_safe_neg[OF _ _ subset_refl] + +lemma region_is_bytes_subset: + "region_is_bytes' ptr sz htd + \ {ptr' ..+ sz'} \ {ptr ..+ sz} + \ region_is_bytes' ptr' sz' htd" + by (auto simp: region_is_bytes'_def) + +lemma (in range_cover) strong_times_64: + "len_of TYPE('a) = len_of TYPE(64) \ n * 2 ^ sbit < 2 ^ word_bits" + apply (simp add: nat_mult_power_less_eq) + apply (rule order_less_le_trans, rule string) + apply (simp add: word_bits_def) + done + +(* Helper for use in the many proofs below. *) +lemma cslift_ptr_retyp_other_inst: + assumes bytes: "region_is_bytes' p (n * (2 ^ bits)) (hrs_htd hp)" + and cover: "range_cover p sz bits n" + and sz: "region_sz = n * size_of TYPE('a :: mem_type)" + and sz2: "size_of TYPE('a :: mem_type) = 2 ^ bits" + and tdisj: "typ_uinfo_t TYPE('b) \\<^sub>t typ_uinfo_t TYPE('a)" + and not_byte: "typ_uinfo_t TYPE('b :: mem_type) \ typ_uinfo_t TYPE(word8)" + shows "(clift (hrs_htd_update (ptr_retyps_gen n (Ptr p :: 'a :: mem_type ptr) arr) + hp) :: 'b :: mem_type typ_heap) + = clift hp" + using bytes + apply (subst clift_ptr_retyps_gen_other[OF _ _ tdisj not_byte], simp_all) + apply (simp add: sz2) + apply (simp add: sz2 range_cover.strong_times_64[OF cover]) + done + +(* Helper for use in the many proofs below. *) +lemma cslift_ptr_retyp_memset_other_inst: + assumes bytes: "region_is_bytes p (n * (2 ^ bits)) x" + and cover: "range_cover p sz bits n" + and sz: "region_sz = n * size_of TYPE('a :: mem_type)" + and sz2: "size_of TYPE('a :: mem_type) = 2 ^ bits" + and tdisj: "typ_uinfo_t TYPE('b) \\<^sub>t typ_uinfo_t TYPE('a)" + and not_byte: "typ_uinfo_t TYPE('b :: mem_type) \ typ_uinfo_t TYPE(word8)" + shows "(clift (hrs_htd_update (ptr_retyps_gen n (Ptr p :: 'a :: mem_type ptr) arr) + (hrs_mem_update (heap_update_list p (replicate (region_sz) 0)) + (t_hrs_' (globals x)))) :: 'b :: mem_type typ_heap) + = cslift x" + using bytes + apply (subst cslift_ptr_retyp_other_inst[OF _ cover sz sz2 tdisj not_byte]) + apply simp + apply (rule clift_heap_list_update_no_heap_other[OF _ not_byte]) + apply (simp add: hrs_htd_def sz sz2) + done + +lemma ptr_retyps_one: + "ptr_retyps (Suc 0) = ptr_retyp" + apply (rule ext)+ + apply simp + done + +lemma uinfo_array_tag_n_m_not_le_typ_name: + "typ_name (typ_info_t TYPE('b)) @ ''_array_'' @ nat_to_bin_string m + \ td_names (typ_info_t TYPE('a)) + \ \ uinfo_array_tag_n_m TYPE('b :: c_type) n m \ typ_uinfo_t TYPE('a :: c_type)" + apply (clarsimp simp: typ_tag_le_def typ_uinfo_t_def) + apply (drule td_set_td_names) + apply (clarsimp simp: uinfo_array_tag_n_m_def typ_uinfo_t_def) + apply (drule arg_cong[where f="\xs. set ''r'' \ set xs"], simp) + apply (simp add: uinfo_array_tag_n_m_def typ_uinfo_t_def) + done + +lemma tag_not_le_via_td_name: + "typ_name (typ_info_t TYPE('a)) \ td_names (typ_info_t TYPE('b)) + \ typ_name (typ_info_t TYPE('a)) \ pad_typ_name + \ \ typ_uinfo_t TYPE('a :: c_type) \ typ_uinfo_t TYPE ('b :: c_type)" + apply (clarsimp simp: typ_tag_le_def typ_uinfo_t_def) + apply (drule td_set_td_names, simp+) + done + +lemma in_set_list_map: + "x \ set xs \ \n. [n \ x] \\<^sub>m list_map xs" + apply (clarsimp simp: in_set_conv_nth) + apply (rule_tac x=i in exI) + apply (simp add: map_le_def) + done + +lemma h_t_valid_eq_array_valid: + "h_t_valid htd gd (p :: (('a :: wf_type)['b :: finite]) ptr) + = (gd p \ h_t_array_valid htd (ptr_coerce p :: 'a ptr) CARD('b))" + by (auto simp: h_t_array_valid_def h_t_valid_def + typ_uinfo_array_tag_n_m_eq) + +lemma h_t_array_valid_ptr_retyps_gen: + assumes sz2: "size_of TYPE('a :: mem_type) = sz" + assumes bytes: "region_is_bytes' (ptr_val p) (n * sz) htd" + shows "h_t_array_valid htd p' n' + \ h_t_array_valid (ptr_retyps_gen n (p :: 'a :: mem_type ptr) arr htd) p' n'" + apply (clarsimp simp: h_t_array_valid_def valid_footprint_def) + apply (drule spec, drule(1) mp, clarsimp) + apply (case_tac "ptr_val p' + of_nat y \ {ptr_val p ..+ n * size_of TYPE('a)}") + apply (cut_tac s="uinfo_array_tag_n_m TYPE('b) n' n'" and n=y in ladder_set_self) + apply (clarsimp dest!: in_set_list_map) + apply (drule(1) map_le_trans) + apply (simp add: map_le_def) + apply (subst(asm) bytes[unfolded region_is_bytes'_def, rule_format, symmetric]) + apply (simp add: sz2) + apply (simp add: uinfo_array_tag_n_m_def typ_uinfo_t_def typ_info_word) + apply simp + apply (simp add: ptr_retyps_gen_out) + done + +lemma cvariable_array_ptr_retyps: + assumes sz2: "size_of TYPE('a :: mem_type) = sz" + assumes bytes: "region_is_bytes' (ptr_val p) (n * sz) htd" + shows "cvariable_array_map_relation m ns ptrfun htd + \ cvariable_array_map_relation m ns (ptrfun :: _ \ ('b :: mem_type) ptr) + (ptr_retyps_gen n (p :: 'a :: mem_type ptr) arr htd)" + by (clarsimp simp: cvariable_array_map_relation_def + h_t_array_valid_ptr_retyps_gen[OF sz2 bytes]) + +lemma cvariable_array_ptr_upd: + assumes at: "h_t_array_valid htd (ptrfun x) (ns y)" + shows "cvariable_array_map_relation m ns ptrfun htd + \ cvariable_array_map_relation (m(x \ y)) + ns (ptrfun :: _ \ ('b :: mem_type) ptr) htd" + by (clarsimp simp: cvariable_array_map_relation_def at + split: if_split) + +lemma clift_eq_h_t_valid_eq: + "clift hp = (clift hp' :: ('a :: c_type) ptr \ _) + \ (h_t_valid (hrs_htd hp) c_guard :: 'a ptr \ _) + = h_t_valid (hrs_htd hp') c_guard" + by (rule ext, simp add: h_t_valid_clift_Some_iff) + +lemma region_is_bytes_typ_region_bytes: + "{ptr ..+ len} \ {ptr' ..+ 2 ^ bits} + \ region_is_bytes' ptr len (typ_region_bytes ptr' bits htd)" + apply (clarsimp simp: region_is_bytes'_def typ_region_bytes_def hrs_htd_update) + apply (simp add: subsetD split: if_split_asm) + done + +lemma region_actually_is_bytes_retyp_disjoint: + "{ptr ..+ sz} \ {ptr_val (p :: 'a ptr)..+n * size_of TYPE('a :: mem_type)} = {} + \ region_actually_is_bytes' ptr sz htd + \ region_actually_is_bytes' ptr sz (ptr_retyps_gen n p arr htd)" + apply (clarsimp simp: region_actually_is_bytes'_def del: impI) + apply (subst ptr_retyps_gen_out) + apply blast + apply simp + done + +lemma intvl_plus_unat_eq: + "p \ p + x - 1 \ x \ 0 + \ {p ..+ unat x} = {p .. p + x - 1}" + apply (subst upto_intvl_eq', simp_all add: unat_eq_0 field_simps) + apply (rule order_less_imp_le, simp) + done + +lemma zero_ranges_ptr_retyps: + "\ zero_ranges_are_zero (gsUntypedZeroRanges s) hrs; + caps_overlap_reserved' {ptr_val (p :: 'a ptr) ..+ n * size_of TYPE ('a :: mem_type)} s; + untyped_ranges_zero' s; valid_objs' s \ + \ zero_ranges_are_zero (gsUntypedZeroRanges s) + (hrs_htd_update (ptr_retyps_gen n p arr) hrs)" + apply (clarsimp simp: zero_ranges_are_zero_def untyped_ranges_zero_inv_def + hrs_htd_update) + apply (drule(1) bspec, clarsimp) + apply (rule region_actually_is_bytes_retyp_disjoint, simp_all) + apply (clarsimp simp: map_comp_Some_iff cteCaps_of_def + elim!: ranE) + apply (frule(1) ctes_of_valid') + apply (simp add: caps_overlap_reserved'_def, drule bspec, erule ranI) + apply (frule(1) untypedZeroRange_to_usableCapRange) + apply (clarsimp simp: isCap_simps untypedZeroRange_def + getFreeRef_def max_free_index_def + split: if_split_asm + cong: if_cong) + apply (erule disjoint_subset[rotated]) + apply (subst intvl_plus_unat_eq; clarsimp) + apply (clarsimp simp: word_unat.Rep_inject[symmetric] + valid_cap_simps' capAligned_def + unat_of_nat + simp del: word_unat.Rep_inject) + done + +abbreviation + "ret_zero ptr sz + \ valid_objs' and untyped_ranges_zero' and caps_overlap_reserved' {ptr ..+ sz}" + +lemma createObjects_ccorres_ep: + defines "ko \ (KOEndpoint (makeObject :: endpoint))" + shows "\\ x. (\, x) \ rf_sr + \ ptr \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ pspace_no_overlap' ptr sz \ + \ ret_zero ptr (n * (2 ^ objBitsKO ko)) \ + \ region_is_zero_bytes ptr (n * (2 ^ objBitsKO ko)) x + \ range_cover ptr sz (objBitsKO ko) n + \ {ptr ..+ n * (2 ^ objBitsKO ko)} \ kernel_data_refs = {} + \ + (\\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs n ptr ko) (ksPSpace \)\, + x\globals := globals x + \t_hrs_' := hrs_htd_update (ptr_retyps_gen n (Ptr ptr :: endpoint_C ptr) False) + (t_hrs_' (globals x))\\) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") +proof (intro impI allI) + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "Ptr ptr :: endpoint_C ptr" + + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" + and cover: "range_cover ptr sz (objBitsKO ko) n" + and al: "is_aligned ptr (objBitsKO ko)" and ptr0: "ptr \ 0" + and sz: "objBitsKO ko \ sz" + and szb: "sz < word_bits" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and rzo: "ret_zero ptr (n * (2 ^ objBitsKO ko)) \" + and empty: "region_is_bytes ptr (n * (2 ^ objBitsKO ko)) x" + and zero: "heap_list_is_zero (hrs_mem (t_hrs_' (globals x))) ptr (n * (2 ^ objBitsKO ko))" + and rc: "range_cover ptr sz (objBitsKO ko) n" + and kdr: "{ptr..+n * (2 ^ objBitsKO ko)} \ kernel_data_refs = {}" + by (clarsimp simp:range_cover_def[where 'a=machine_word_len, folded word_bits_def])+ + + (* obj specific *) + have mko: "\dev. makeObjectKO dev (Inr (APIObjectType ArchTypes_H.apiobject_type.EndpointObject)) = Some ko" + by (simp add: ko_def makeObjectKO_def) + + have relrl: + "cendpoint_relation (cslift x) makeObject (from_bytes (replicate (size_of TYPE(endpoint_C)) 0))" + unfolding cendpoint_relation_def + apply (simp add: Let_def makeObject_endpoint size_of_def endpoint_lift_def) + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps endpoint_C_tag_def endpoint_lift_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine Let_def + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def Let_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: typ_info_array array_tag_def eval_nat_numeral) + apply (simp add: array_tag_n_eq) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: EPState_Idle_def update_ti_t_machine_word_0s) + done + + (* /obj specific *) + + (* s/obj/obj'/ *) + have szo: "size_of TYPE(endpoint_C) = 2 ^ objBitsKO ko" by (simp add: size_of_def objBits_simps' ko_def) + have szo': "n * (2 ^ objBitsKO ko) = n * size_of TYPE(endpoint_C)" + by (metis szo) + + note rl' = cslift_ptr_retyp_other_inst[OF empty cover[simplified] szo' szo] + + note rl = projectKO_opt_retyp_other [OF rc pal pno ko_def] + note cterl = retype_ctes_helper [OF pal pdst pno al sz szb mko rc, simplified] + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + uinfo_array_tag_n_m_not_le_typ_name + + have guard: + "\b < n. c_guard (CTypesDefs.ptr_add ?ptr (of_nat b))" + apply (rule retype_guard_helper [where m = 3, OF cover ptr0 szo]) + apply (simp add: ko_def objBits_simps') + apply (simp add: align_of_def) + done + + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + apply - + supply image_cong_simp[cong del] + apply (clarsimp simp: rl' cterl tag_disj_via_td_name foldr_upd_app_if [folded data_map_insert_def] + heap_to_user_data_def cte_C_size heap_to_device_data_def) + apply (subst clift_ptr_retyps_gen_prev_memset_same[OF guard _ _ szo' _ zero], + simp_all only: szo empty, simp_all) + apply (rule range_cover.strong_times_64[OF cover refl]) + apply (simp add: ptr_add_to_new_cap_addrs [OF szo] ht_rl) + apply (simp add: rl projectKO_opt_retyp_same projectKOs) + apply (simp add: ko_def projectKO_opt_retyp_same projectKOs cong: if_cong) + apply (erule cmap_relation_retype) + apply (rule relrl[simplified szo ko_def]) + done + + thus ?thesis using rf empty kdr rzo + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' + tag_disj_via_td_name) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (simp add: rl' cterl tag_disj_via_td_name h_t_valid_clift_Some_iff) + apply (clarsimp simp: hrs_htd_update ptr_retyps_htd_safe_neg szo + kernel_data_refs_domain_eq_rotate + ht_rl foldr_upd_app_if [folded data_map_insert_def] + rl projectKOs cvariable_array_ptr_retyps[OF szo] + zero_ranges_ptr_retyps + simp del: endpoint_C_size) + done +qed + +lemma createObjects_ccorres_ntfn: + defines "ko \ (KONotification (makeObject :: Structures_H.notification))" + shows "\\ x. (\, x) \ rf_sr \ ptr \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ pspace_no_overlap' ptr sz \ + \ ret_zero ptr (n * (2 ^ objBitsKO ko)) \ + \ region_is_zero_bytes ptr (n * 2 ^ objBitsKO ko) x + \ range_cover ptr sz (objBitsKO ko) n + \ {ptr ..+ n * (2 ^ objBitsKO ko)} \ kernel_data_refs = {} + \ + (\\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs n ptr ko) (ksPSpace \)\, + x\globals := globals x + \t_hrs_' := hrs_htd_update (ptr_retyps_gen n (Ptr ptr :: notification_C ptr) False) + (t_hrs_' (globals x))\\) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") + +proof (intro impI allI) + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "Ptr ptr :: notification_C ptr" + + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" + and cover: "range_cover ptr sz (objBitsKO ko) n" + and al: "is_aligned ptr (objBitsKO ko)" and ptr0: "ptr \ 0" + and sz: "objBitsKO ko \ sz" + and szb: "sz < word_bits" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and rzo: "ret_zero ptr (n * (2 ^ objBitsKO ko)) \" + and empty: "region_is_bytes ptr (n * (2 ^ objBitsKO ko)) x" + and zero: "heap_list_is_zero (hrs_mem (t_hrs_' (globals x))) ptr (n * (2 ^ objBitsKO ko))" + and rc: "range_cover ptr sz (objBitsKO ko) n" + and kdr: "{ptr..+n * 2 ^ objBitsKO ko} \ kernel_data_refs = {}" + by (clarsimp simp:range_cover_def[where 'a=machine_word_len, folded word_bits_def])+ + + (* obj specific *) + have mko: "\ dev. makeObjectKO dev (Inr (APIObjectType ArchTypes_H.apiobject_type.NotificationObject)) = Some ko" by (simp add: ko_def makeObjectKO_def) + + have relrl: + "cnotification_relation (cslift x) makeObject (from_bytes (replicate (size_of TYPE(notification_C)) 0))" + unfolding cnotification_relation_def + apply (simp add: Let_def makeObject_notification size_of_def notification_lift_def) + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps notification_C_tag_def notification_lift_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine Let_def + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def Let_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: typ_info_array array_tag_def eval_nat_numeral) + apply (simp add: array_tag_n.simps) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine Let_def + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def Let_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: update_ti_t_machine_word_0s NtfnState_Idle_def option_to_ctcb_ptr_def) + done + + (* /obj specific *) + + (* s/obj/obj'/ *) + have szo: "size_of TYPE(notification_C) = 2 ^ objBitsKO ko" by (simp add: size_of_def objBits_simps' ko_def) + have szo': "n * (2 ^ objBitsKO ko) = n * size_of TYPE(notification_C)" using sz + apply (subst szo) + apply (simp add: power_add [symmetric]) + done + + note rl' = cslift_ptr_retyp_other_inst[OF empty cover[simplified] szo' szo] + + (* rest is generic *) + note rl = projectKO_opt_retyp_other [OF rc pal pno ko_def] + note cterl = retype_ctes_helper [OF pal pdst pno al sz szb mko rc, simplified] + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + uinfo_array_tag_n_m_not_le_typ_name + + have guard: + "\b) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + apply - + supply image_cong_simp [cong del] + apply (clarsimp simp: rl' cterl tag_disj_via_td_name foldr_upd_app_if [folded data_map_insert_def] + heap_to_user_data_def cte_C_size) + apply (subst clift_ptr_retyps_gen_prev_memset_same[OF guard _ _ szo' _ zero], + simp_all only: szo empty, simp_all) + apply (rule range_cover.strong_times_64[OF cover refl]) + apply (simp add: ptr_add_to_new_cap_addrs [OF szo] ht_rl) + apply (simp add: rl projectKO_opt_retyp_same projectKOs) + apply (simp add: ko_def projectKO_opt_retyp_same projectKOs cong: if_cong) + apply (erule cmap_relation_retype) + apply (rule relrl[simplified szo ko_def]) + done + + thus ?thesis using rf empty kdr rzo + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (simp add: rl' cterl tag_disj_via_td_name h_t_valid_clift_Some_iff ) + apply (clarsimp simp: hrs_htd_update ptr_retyps_htd_safe_neg szo + kernel_data_refs_domain_eq_rotate + ht_rl foldr_upd_app_if [folded data_map_insert_def] + rl projectKOs cvariable_array_ptr_retyps[OF szo] + zero_ranges_ptr_retyps + simp del: notification_C_size) + done +qed + + +lemma ccte_relation_makeObject: + notes option.case_cong_weak [cong] + shows "ccte_relation makeObject (from_bytes (replicate (size_of TYPE(cte_C)) 0))" + apply (simp add: Let_def makeObject_cte size_of_def ccte_relation_def map_option_Some_eq2) + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps cte_C_tag_def cte_lift_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def align_of_def + typ_info_simps cap_C_tag_def mdb_node_C_tag_def split: option.splits) + apply (simp add: typ_info_array array_tag_def eval_nat_numeral array_tag_n.simps) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def update_ti_t_machine_word_0s) + apply (simp add: cap_lift_def Let_def cap_get_tag_def cap_tag_defs cte_to_H_def cap_to_H_def mdb_node_to_H_def + mdb_node_lift_def nullMDBNode_def c_valid_cte_def) + done + +lemma ccte_relation_nullCap: + notes option.case_cong_weak [cong] + shows "ccte_relation (CTE NullCap (MDB 0 0 False False)) (from_bytes (replicate (size_of TYPE(cte_C)) 0))" + apply (simp add: Let_def makeObject_cte size_of_def ccte_relation_def map_option_Some_eq2) + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps cte_C_tag_def cte_lift_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def align_of_def + typ_info_simps cap_C_tag_def mdb_node_C_tag_def split: option.splits) + apply (simp add: typ_info_array array_tag_def eval_nat_numeral array_tag_n.simps) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def update_ti_t_machine_word_0s) + apply (simp add: cap_lift_def Let_def cap_get_tag_def cap_tag_defs cte_to_H_def cap_to_H_def mdb_node_to_H_def + mdb_node_lift_def nullMDBNode_def c_valid_cte_def) + done + +lemma createObjects_ccorres_cte: + defines "ko \ (KOCTE (makeObject :: cte))" + shows "\\ x. (\, x) \ rf_sr \ ptr \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ pspace_no_overlap' ptr sz \ + \ ret_zero ptr (n * 2 ^ objBitsKO ko) \ + \ region_is_zero_bytes ptr (n * 2 ^ objBitsKO ko) x + \ range_cover ptr sz (objBitsKO ko) n + \ {ptr ..+ n * (2 ^ objBitsKO ko)} \ kernel_data_refs = {} + \ + (\\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs n ptr ko) (ksPSpace \)\, + x\globals := globals x + \t_hrs_' := hrs_htd_update (ptr_retyps_gen n (Ptr ptr :: cte_C ptr) True) + (t_hrs_' (globals x))\\) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") +proof (intro impI allI) + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "Ptr ptr :: cte_C ptr" + + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" + and cover: "range_cover ptr sz (objBitsKO ko) n" + and al: "is_aligned ptr (objBitsKO ko)" and ptr0: "ptr \ 0" + and sz: "objBitsKO ko \ sz" + and szb: "sz < word_bits" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and rzo: "ret_zero ptr (n * 2 ^ objBitsKO ko) \" + and empty: "region_is_bytes ptr (n * (2 ^ objBitsKO ko)) x" + and zero: "heap_list_is_zero (hrs_mem (t_hrs_' (globals x))) ptr (n * (2 ^ objBitsKO ko))" + and rc: "range_cover ptr sz (objBitsKO ko) n" + and kdr: "{ptr..+n * 2 ^ objBitsKO ko} \ kernel_data_refs = {}" + by (clarsimp simp:range_cover_def[where 'a=machine_word_len, folded word_bits_def])+ + + (* obj specific *) + have mko: "\dev. makeObjectKO dev (Inr (APIObjectType ArchTypes_H.apiobject_type.CapTableObject)) = Some ko" + by (simp add: ko_def makeObjectKO_def) + + note relrl = ccte_relation_makeObject + + (* /obj specific *) + + (* s/obj/obj'/ *) + have szo: "size_of TYPE(cte_C) = 2 ^ objBitsKO ko" by (simp add: size_of_def objBits_simps' ko_def) + have szo': "n * 2 ^ objBitsKO ko = n * size_of TYPE(cte_C)" using sz + apply (subst szo) + apply (simp add: power_add [symmetric]) + done + + note rl' = cslift_ptr_retyp_other_inst[OF empty cover szo' szo] + + (* rest is generic *) + note rl = projectKO_opt_retyp_other [OF rc pal pno ko_def] + note cterl = retype_ctes_helper [OF pal pdst pno al sz szb mko rc, simplified] + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + uinfo_array_tag_n_m_not_le_typ_name + + have guard: + "\b< n. c_guard (CTypesDefs.ptr_add ?ptr (of_nat b))" + apply (rule retype_guard_helper[where m=3, OF cover ptr0 szo]) + apply (simp add: ko_def objBits_simps' align_of_def)+ + done + + note irq = h_t_valid_eq_array_valid[where 'a=cte_C] + h_t_array_valid_ptr_retyps_gen[where p="Ptr ptr", simplified, OF szo empty] + + with rf have irq: "h_t_valid (hrs_htd ?ks') c_guard intStateIRQNode_array_Ptr" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (simp add: hrs_htd_update h_t_valid_eq_array_valid) + apply (simp add: h_t_array_valid_ptr_retyps_gen[OF szo] empty) + done + + note if_cong[cong] (* needed by some of the [simplified]'s below. *) + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + apply - + supply image_cong_simp [cong del] + apply (clarsimp simp: rl' cterl tag_disj_via_td_name foldr_upd_app_if [folded data_map_insert_def]) + apply (subst clift_ptr_retyps_gen_prev_memset_same[OF guard _ _ szo' _ zero], + simp_all only: szo empty, simp_all) + apply (rule range_cover.strong_times_64[OF cover refl]) + apply (simp add: ptr_add_to_new_cap_addrs [OF szo] ht_rl) + apply (simp add: rl projectKO_opt_retyp_same projectKOs) + apply (simp add: ko_def projectKO_opt_retyp_same projectKOs cong: if_cong) + apply (subst makeObject_cte[symmetric]) + apply (erule cmap_relation_retype) + apply (rule relrl[simplified szo ko_def]) + done + + thus ?thesis using rf empty kdr irq rzo + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (simp add: rl' cterl tag_disj_via_td_name h_t_valid_clift_Some_iff) + apply (clarsimp simp: hrs_htd_update ptr_retyps_htd_safe_neg szo + kernel_data_refs_domain_eq_rotate + rl foldr_upd_app_if [folded data_map_insert_def] projectKOs + zero_ranges_ptr_retyps + ht_rl cvariable_array_ptr_retyps[OF szo]) + done +qed + +lemma h_t_valid_ptr_retyps_gen_disjoint_iff: + "\ {ptr_val p..+ size_of TYPE('b)} \ {ptr_val ptr..+n * size_of TYPE('a)} = {} \ \ + ptr_retyps_gen n (ptr::'a::mem_type ptr) arr d \\<^sub>t (p::'b::c_type ptr) = d \\<^sub>t p" + apply (cases "0 < size_td (typ_info_t TYPE('b))"; cases "c_guard p"; + clarsimp simp: h_t_valid_def valid_footprint_def Let_def) + apply (rule forall_eq[rule_format], rule iff_impI) + apply (subgoal_tac "ptr_val p + of_nat y \ {ptr_val ptr..+n * size_of TYPE('a)}") + apply (simp add: ptr_retyps_gen_out) + apply clarsimp + apply (drule intvlD) + apply (clarsimp simp: disjoint_iff_not_equal ) + apply (drule_tac x = "ptr_val p + of_nat y" in bspec) + apply (rule intvlI) + apply (simp add: size_of_def) + apply (drule_tac x = "ptr_val ptr + of_nat k" in bspec) + apply (erule intvlI) + apply simp + done + +lemma h_t_valid_ptr_retyps_gen_disjoint: + "\ d \\<^sub>t p; {ptr_val p..+ size_of TYPE('b)} \ {ptr_val ptr..+n * size_of TYPE('a)} = {} \ \ + ptr_retyps_gen n (ptr::'a::mem_type ptr) arr d \\<^sub>t (p::'b::mem_type ptr)" + using h_t_valid_ptr_retyps_gen_disjoint_iff[where arr=arr and d=d] by auto + +lemma range_cover_intvl: +assumes cover: "range_cover (ptr :: 'a :: len word) sz us n" +assumes not0 : "n \ 0" +shows "{ptr..+n * 2 ^ us} = {ptr..ptr + (of_nat n * 2 ^ us - 1)}" + proof + have not0' : "(0 :: 'a word) < of_nat n * (2 :: 'a word) ^ us" + using range_cover_not_zero_shift[OF _ cover,where gbits = "us"] + apply (simp add:not0 shiftl_t2n field_simps) + apply unat_arith + done + + show "{ptr..+n * 2 ^ us} \ {ptr..ptr + (of_nat n* 2 ^ us - 1)}" + using not0 not0' + apply (clarsimp simp:intvl_def) + apply (intro conjI) + apply (rule word_plus_mono_right2[rotated,where b = "of_nat n * 2^us - 1"]) + apply (subst le_m1_iff_lt[THEN iffD1]) + apply (simp add:not0') + apply (rule word_of_nat_less) + apply (clarsimp simp: range_cover.unat_of_nat_shift[OF cover] field_simps) + apply (clarsimp simp: field_simps) + apply (erule range_cover_bound[OF cover]) + apply (rule word_plus_mono_right) + apply (subst le_m1_iff_lt[THEN iffD1]) + apply (simp add:not0') + apply (rule word_of_nat_less) + apply (clarsimp simp: range_cover.unat_of_nat_shift[OF cover] field_simps) + apply (clarsimp simp: field_simps) + apply (erule range_cover_bound[OF cover]) + done + show "{ptr..ptr + (of_nat n * 2 ^ us - 1)} \ {ptr..+n * 2 ^ us}" + using not0 not0' + apply (clarsimp simp:intvl_def) + apply (rule_tac x = "unat (x - ptr)" in exI) + apply simp + apply (simp add:field_simps) + apply (rule unat_less_helper) + apply (subst le_m1_iff_lt[THEN iffD1,symmetric]) + apply (simp add:field_simps not0 range_cover_not_zero_shift[unfolded shiftl_t2n,OF _ _ le_refl]) + apply (rule word_diff_ls') + apply (simp add:field_simps) + apply simp + done + qed + +lemma aligned_new_cap_addrs_eq_base: + "is_aligned p bits \ is_aligned ptr bits + \ n = 2 ^ (bits - objBitsKO ko) + \ objBitsKO ko = shft + \ y < of_nat n + \ (p + (y << shft) \ set (new_cap_addrs n ptr ko)) = (p = ptr)" + apply (erule is_aligned_get_word_bits) + apply (rule iffI) + apply (clarsimp simp: new_cap_addrs_def) + apply (rule ccontr, drule(2) aligned_neq_into_no_overlap) + apply (simp only: field_simps upto_intvl_eq[symmetric]) + apply (drule equals0D, erule notE, rule_tac c="p + (y << shft)" in IntI) + apply (simp(no_asm) add: offs_in_intvl_iff) + apply (rule unat_less_helper, simp, rule shiftl_less_t2n; simp) + apply (simp add: offs_in_intvl_iff) + apply (rule unat_less_helper, simp, rule shiftl_less_t2n; simp add: word_of_nat_less) + apply (simp add: new_cap_addrs_def) + apply (rule_tac x="unat y" in image_eqI; simp add: unat_less_helper) + apply (erule is_aligned_get_word_bits; simp) + apply (simp add: new_cap_addrs_def) + apply (rule_tac x="unat y" in image_eqI; simp add: unat_less_helper) + done + +lemma cmap_relation_array_add_array[OF refl]: + "ptrf = Ptr \ carray_map_relation n ahp chp ptrf + \ is_aligned p n + \ ahp' = (\x. if x \ set (new_cap_addrs sz p ko) then Some v else ahp x) + \ (\x. chp x \ is_aligned (ptr_val x) n \ \y. chp' y = (y = ptrf p | chp y)) + \ sz = 2 ^ (n - objBits v) + \ objBitsKO ko = objBitsKO (injectKOS v) + \ objBits v \ n \ n < word_bits + \ carray_map_relation n ahp' chp' ptrf" + apply (clarsimp simp: carray_map_relation_def objBits_koTypeOf + objBitsT_koTypeOf[symmetric] + koTypeOf_injectKO + simp del: objBitsT_koTypeOf) + apply (drule meta_mp) + apply auto[1] + apply (case_tac "pa = p"; clarsimp) + apply (subst if_P; simp add: new_cap_addrs_def) + apply (rule_tac x="unat ((p' && mask n) >> objBitsKO ko)" in image_eqI) + apply (simp add: shiftr_shiftl1 is_aligned_andI1 add.commute + word_plus_and_or_coroll2) + apply (simp, rule unat_less_helper, simp, rule shiftr_less_t2n) + apply (simp add: and_mask_less_size word_size word_bits_def) + apply (case_tac "chp (ptrf pa)", simp_all) + apply (drule spec, drule(1) iffD2) + apply (auto split: if_split)[1] + apply (drule_tac x=pa in spec, clarsimp) + apply (drule_tac x=p' in spec, clarsimp split: if_split_asm) + apply (clarsimp simp: new_cap_addrs_def) + apply (subst(asm) is_aligned_add_helper, simp_all) + apply (rule shiftl_less_t2n, rule word_of_nat_less, simp_all add: word_bits_def) + done + +lemma createObjects_ccorres_pte_pt: + defines "ko \ (KOArch (KOPTE (makeObject :: pte)))" + (* unfortunately, because of the pt_Ptr in ptr_retyps below, we can't make this lemma generic *) + defines "pt_t \ NormalPT_T" + shows "\\ x. (\, x) \ rf_sr \ ptr \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ pspace_no_overlap' ptr sz \ + \ ret_zero ptr (2 ^ (ptBits pt_t)) \ + \ region_is_zero_bytes ptr (2 ^ (ptBits pt_t)) x + \ range_cover ptr sz (ptBits pt_t) 1 + \ valid_global_refs' s + \ kernel_data_refs \ {ptr..+ 2 ^ (ptBits pt_t) } = {} \ + (\\ksPSpace := foldr (\addr. data_map_insert addr ko) + (new_cap_addrs (2 ^ (ptTranslationBits pt_t)) ptr ko) (ksPSpace \)\, + x\globals := globals x + \t_hrs_' := hrs_htd_update (ptr_retyps_gen 1 (pt_Ptr ptr) False) + (t_hrs_' (globals x))\\) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") +proof (intro impI allI) + define array_len where "array_len \ pt_array_len" + note array_len_def = pt_array_len_val array_len_def + + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "pt_Ptr ptr" + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" + and cover: "range_cover ptr sz (ptBits pt_t) 1" + and al: "is_aligned ptr (ptBits pt_t)" + and ptr0: "ptr \ 0" + and sz: "(ptBits pt_t) \ sz" + and szb: "sz < word_bits" + and pal: "pspace_aligned' \" + and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and rzo: "ret_zero ptr (2 ^ ptBits pt_t) \" + and empty: "region_is_bytes ptr (2 ^ ptBits pt_t) x" + and zero: "heap_list_is_zero (hrs_mem (t_hrs_' (globals x))) ptr (2 ^ ptBits pt_t)" + and kernel_data_refs_disj : "kernel_data_refs \ {ptr..+ 2 ^ (ptBits pt_t)} = {}" + by (clarsimp simp:range_cover_def[where 'a=machine_word_len, folded word_bits_def])+ + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + (* obj specific *) + have mko: "\dev. makeObjectKO dev (Inr AARCH64_H.PageTableObject) = Some ko" + by (simp add: ko_def makeObjectKO_def) + + have relrl: + "cpte_relation makeObject (from_bytes (replicate (size_of TYPE(pte_C)) 0))" + unfolding cpte_relation_def + supply if_cong[cong] + apply (simp add: Let_def makeObject_pte size_of_def pte_lift_def) + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps pte_C_tag_def pte_lift_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: typ_info_array array_tag_def eval_nat_numeral) + apply (simp add: array_tag_n.simps) + apply (simp add: final_pad_def size_td_lt_ti_typ_pad_combine Let_def + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: update_ti_t_machine_word_0s pte_get_tag_def pte_tag_defs) + done + + (* /obj specific *) + + (* s/obj/obj'/ *) + have szo: "size_of TYPE(pte_C[pt_array_len]) = 2 ^ ptBits pt_t" + by (simp add: size_of_def size_td_array bit_simps pt_t_def) + have szo2[unfolded array_len_def]: "array_len * size_of TYPE(pte_C) = 2 ^ ptBits pt_t" + by (simp add: szo[symmetric] bit_simps pt_t_def array_len_def) + have szo': "size_of TYPE(pte_C) = 2 ^ objBitsKO ko" + by (simp add: objBits_simps ko_def bit_simps) + + note rl' = cslift_ptr_retyp_other_inst[where n=1, + simplified, OF empty, simplified, OF cover[simplified] + szo[symmetric] szo[simplified bit_simps_corres]] + + have sz_weaken: "objBitsKO ko \ ptBits pt_t" + by (simp add: objBits_simps ko_def bit_simps) + have cover'[unfolded array_len_def]: "range_cover ptr sz (objBitsKO ko) array_len" + apply (rule range_cover_rel[OF cover sz_weaken]) + apply (simp add: ptBits_def objBits_simps ko_def bit_simps pt_t_def array_len_def) + done + from sz sz_weaken have sz': "objBitsKO ko \ sz" by simp + note al' = is_aligned_weaken[OF al sz_weaken] + + have koT: "koTypeOf ko = ArchT PTET" + by (simp add: ko_def) + + note rl = projectKO_opt_retyp_other [OF cover' pal pno ko_def] + note cterl = retype_ctes_helper [OF pal pdst pno al' sz' szb mko cover'] + + have guard: "c_guard ?ptr" + using al[simplified bit_simps] + apply - + apply (rule is_aligned_c_guard[where n="ptBits pt_t" and m=3]) + apply (simp_all add: align_td_array align_of_def bit_simps ptr0 split: if_split) + done + + have guard'[unfolded array_len_def]: "\n < array_len. c_guard (pte_Ptr ptr +\<^sub>p int n)" + unfolding array_len_def + using al[simplified bit_simps] + apply - + apply (rule retype_guard_helper [OF cover' ptr0 szo', where m=3]) + apply (simp_all add: objBits_simps ko_def align_of_def bit_simps) + done + + note ptr_retyps.simps[simp del] + + from rf have pterl: "cmap_relation (map_to_ptes (ksPSpace \)) (cslift x) Ptr cpte_relation" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def cpspace_relation_def) + + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + uinfo_array_tag_n_m_not_le_typ_name + + have ptTranslationBits_num[unfolded array_len_def]: + "2 ^ ptTranslationBits pt_t = array_len" + by (simp add: bit_simps array_len_def pt_t_def) + + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + supply image_cong_simp [cong del] + apply (clarsimp simp: rl' cterl cte_C_size tag_disj_via_td_name ptTranslationBits_num + foldr_upd_app_if [folded data_map_insert_def]) + apply (simp add: ptr_retyp_to_array[simplified]) + apply (subst clift_ptr_retyps_gen_prev_memset_same[OF guard'], simp_all only: szo2 empty) + apply simp + apply (simp(no_asm) add: bit_simps word_bits_def pt_t_def) + apply (simp add: zero[simplified]) + apply (simp add: rl del: pte_C_size) + apply (simp add: rl projectKO_opt_retyp_same ko_def Let_def + ptr_add_to_new_cap_addrs [OF szo'] + cong: if_cong del: pte_C_size) + apply (erule cmap_relation_retype) + apply (insert relrl, auto) + done + + with rf empty kernel_data_refs_disj rzo + show ?thesis + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (clarsimp simp: rl' cterl tag_disj_via_td_name pt_t_def bit_simps + hrs_htd_update ht_rl foldr_upd_app_if [folded data_map_insert_def] rl + cvariable_array_ptr_retyps[OF szo] + zero_ranges_ptr_retyps[where p="pt_Ptr ptr", simplified szo]) + apply (subst h_t_valid_ptr_retyps_gen_disjoint, assumption) + apply (simp add:szo cte_C_size cte_level_bits_def) + apply (erule disjoint_subset) + apply (simp add: pt_t_def bit_simps del: replicate_numeral) + apply (subst h_t_valid_ptr_retyps_gen_disjoint, assumption) + apply (simp add:szo cte_C_size cte_level_bits_def) + apply (erule disjoint_subset) + apply (simp add: pt_t_def bit_simps del: replicate_numeral) + apply (simp add: szo ptr_retyps_htd_safe_neg hrs_htd_def Int_ac + kernel_data_refs_domain_eq_rotate bit_simps + del: replicate_numeral) + done + +qed + +lemma createObjects_ccorres_pte_vs: + defines "ko \ (KOArch (KOPTE (makeObject :: pte)))" + (* see createObjects_ccorres_pte_pt why this can't be generic *) + defines "pt_t \ VSRootPT_T" + shows "\\ x. (\, x) \ rf_sr \ ptr \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ pspace_no_overlap' ptr sz \ + \ ret_zero ptr (2 ^ (ptBits pt_t)) \ + \ region_is_zero_bytes ptr (2 ^ (ptBits pt_t)) x + \ range_cover ptr sz (ptBits pt_t) 1 + \ valid_global_refs' s + \ kernel_data_refs \ {ptr..+ 2 ^ (ptBits pt_t) } = {} \ + (\\ksPSpace := foldr (\addr. data_map_insert addr ko) + (new_cap_addrs (2 ^ (ptTranslationBits pt_t)) ptr ko) (ksPSpace \)\, + x\globals := globals x + \t_hrs_' := hrs_htd_update (ptr_retyps_gen 1 (vs_Ptr ptr) False) + (t_hrs_' (globals x))\\) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") +proof (intro impI allI) + define array_len where "array_len \ vs_array_len" + note array_len_def = vs_array_len_val array_len_def + + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "vs_Ptr ptr" + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" + and cover: "range_cover ptr sz (ptBits pt_t) 1" + and al: "is_aligned ptr (ptBits pt_t)" + and ptr0: "ptr \ 0" + and sz: "(ptBits pt_t) \ sz" + and szb: "sz < word_bits" + and pal: "pspace_aligned' \" + and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and rzo: "ret_zero ptr (2 ^ ptBits pt_t) \" + and empty: "region_is_bytes ptr (2 ^ ptBits pt_t) x" + and zero: "heap_list_is_zero (hrs_mem (t_hrs_' (globals x))) ptr (2 ^ ptBits pt_t)" + and kernel_data_refs_disj : "kernel_data_refs \ {ptr..+ 2 ^ (ptBits pt_t)} = {}" + by (clarsimp simp:range_cover_def[where 'a=machine_word_len, folded word_bits_def])+ + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + (* obj specific *) + have mko: "\dev. makeObjectKO dev (Inr AARCH64_H.PageTableObject) = Some ko" + by (simp add: ko_def makeObjectKO_def) + + have relrl: + "cpte_relation makeObject (from_bytes (replicate (size_of TYPE(pte_C)) 0))" + unfolding cpte_relation_def + supply if_cong[cong] + apply (simp add: Let_def makeObject_pte size_of_def pte_lift_def) + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps pte_C_tag_def pte_lift_def + size_td_lt_final_pad size_td_lt_ti_typ_pad_combine Let_def size_of_def) + apply (simp add: final_pad_def Let_def size_td_lt_ti_typ_pad_combine + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: typ_info_array array_tag_def eval_nat_numeral) + apply (simp add: array_tag_n.simps) + apply (simp add: final_pad_def size_td_lt_ti_typ_pad_combine Let_def + size_of_def padup_def align_td_array' size_td_array update_ti_adjust_ti + ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def) + apply (simp add: update_ti_t_machine_word_0s pte_get_tag_def pte_tag_defs) + done + + (* /obj specific *) + + (* s/obj/obj'/ *) + have szo: "size_of TYPE(pte_C[vs_array_len]) = 2 ^ ptBits pt_t" + by (simp add: size_of_def size_td_array pt_bits_def table_size_def pt_t_def + ptTranslationBits_vs_array_len power_add pte_bits_def word_size_bits_def) + have szo2[unfolded array_len_def]: "array_len * size_of TYPE(pte_C) = 2 ^ ptBits pt_t" + by (simp add: szo[symmetric] pt_bits_def table_size_def pt_t_def array_len_def + ptTranslationBits_vs_array_len power_add pte_bits_def word_size_bits_def) + have szo': "size_of TYPE(pte_C) = 2 ^ objBitsKO ko" + by (simp add: objBits_simps ko_def bit_simps) + + note rl' = cslift_ptr_retyp_other_inst[where n=1, + simplified, OF empty, simplified, OF cover[simplified] + szo[symmetric] szo[simplified bit_simps_corres]] + + have sz_weaken: "objBitsKO ko \ ptBits pt_t" + by (simp add: objBits_simps ko_def bit_simps) + have cover'[unfolded array_len_def]: "range_cover ptr sz (objBitsKO ko) array_len" + apply (rule range_cover_rel[OF cover sz_weaken]) + apply (simp add: ptBits_def objBits_simps ko_def pt_bits_def table_size_def pt_t_def + array_len_def ptTranslationBits_vs_array_len) + done + from sz sz_weaken have sz': "objBitsKO ko \ sz" by simp + note al' = is_aligned_weaken[OF al sz_weaken] + + have koT: "koTypeOf ko = ArchT PTET" + by (simp add: ko_def) + + note rl = projectKO_opt_retyp_other [OF cover' pal pno ko_def] + note cterl = retype_ctes_helper [OF pal pdst pno al' sz' szb mko cover'] + + have guard: "c_guard ?ptr" + using al[simplified bit_simps] + apply - + apply (rule is_aligned_c_guard[where n="ptBits pt_t" and m=3]) + apply (simp_all add: align_td_array align_of_def bit_simps ptr0 split: if_split) + done + + have guard'[unfolded array_len_def]: "\n < array_len. c_guard (pte_Ptr ptr +\<^sub>p int n)" + unfolding array_len_def + using al[simplified bit_simps] + apply - + apply (rule retype_guard_helper [OF cover' ptr0 szo', where m=3]) + apply (simp_all add: objBits_simps ko_def align_of_def bit_simps) + done + + note ptr_retyps.simps[simp del] + + from rf have pterl: "cmap_relation (map_to_ptes (ksPSpace \)) (cslift x) Ptr cpte_relation" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def cpspace_relation_def) + + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + uinfo_array_tag_n_m_not_le_typ_name + + have ptTranslationBits_num[unfolded array_len_def]: + "2 ^ ptTranslationBits pt_t = array_len" + by (simp add: array_len_def pt_t_def ptTranslationBits_vs_array_len) + + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + supply image_cong_simp [cong del] + apply (clarsimp simp: rl' cterl cte_C_size tag_disj_via_td_name ptTranslationBits_num + foldr_upd_app_if [folded data_map_insert_def]) + apply (simp add: ptr_retyp_to_array[simplified]) + apply (subst clift_ptr_retyps_gen_prev_memset_same[OF guard'], simp_all only: szo2 empty) + apply simp + apply (simp(no_asm) add: bit_simps word_bits_def pt_t_def split: if_split) + apply (simp add: zero[simplified]) + apply (simp add: rl del: pte_C_size) + apply (simp add: rl projectKO_opt_retyp_same ko_def Let_def + ptr_add_to_new_cap_addrs [OF szo'] + cong: if_cong del: pte_C_size) + apply (erule cmap_relation_retype) + apply (insert relrl, auto) + done + + with rf empty kernel_data_refs_disj rzo + show ?thesis + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (clarsimp simp: rl' cterl tag_disj_via_td_name pt_t_def ptTranslationBits_vs_array_len + pt_bits_def table_size_def power_add pte_bits_def word_size_bits_def + hrs_htd_update ht_rl foldr_upd_app_if [folded data_map_insert_def] rl + cvariable_array_ptr_retyps[OF szo] + zero_ranges_ptr_retyps[where p="pt_Ptr ptr", simplified szo]) + apply (subst h_t_valid_ptr_retyps_gen_disjoint, assumption) + apply (simp add:szo cte_C_size cte_level_bits_def) + apply (erule disjoint_subset) + apply (simp del: replicate_numeral) + apply (subst h_t_valid_ptr_retyps_gen_disjoint, assumption) + apply (simp add:szo cte_C_size cte_level_bits_def) + apply (erule disjoint_subset) + apply (simp del: replicate_numeral) + apply (simp add: szo ptr_retyps_htd_safe_neg hrs_htd_def Int_ac + kernel_data_refs_domain_eq_rotate + del: replicate_numeral) + done + +qed + +definition object_type_from_H :: "object_type \ machine_word" where + "object_type_from_H tp' \ case tp' of + APIObjectType x \ + (case x of ArchTypes_H.apiobject_type.Untyped \ scast seL4_UntypedObject + | ArchTypes_H.apiobject_type.TCBObject \ scast seL4_TCBObject + | ArchTypes_H.apiobject_type.EndpointObject \ scast seL4_EndpointObject + | ArchTypes_H.apiobject_type.NotificationObject \ scast seL4_NotificationObject + | ArchTypes_H.apiobject_type.CapTableObject \ scast seL4_CapTableObject) + | AARCH64_H.SmallPageObject \ scast seL4_ARM_SmallPageObject + | AARCH64_H.LargePageObject \ scast seL4_ARM_LargePageObject + | AARCH64_H.HugePageObject \ scast seL4_ARM_HugePageObject + | AARCH64_H.VSpaceObject \ scast seL4_ARM_VSpaceObject + | AARCH64_H.PageTableObject \ scast seL4_ARM_PageTableObject + | AARCH64_H.VCPUObject \ scast seL4_ARM_VCPUObject" + +lemmas nAPIObjects_def = seL4_NonArchObjectTypeCount_def + +lemma nAPIOBjects_object_type_from_H: + "(scast nAPIObjects <=s object_type_from_H tp') = (toAPIType tp' = None)" + by (simp add: toAPIType_def nAPIObjects_def + object_type_from_H_def word_sle_def api_object_defs StrictC'_object_defs + seL4_ARM_HugePageObject_def seL4_ARM_VSpaceObject_def + split: AARCH64_H.object_type.splits ArchTypes_H.apiobject_type.splits) + +definition + object_type_to_H :: "machine_word \ object_type" + where + "object_type_to_H x \ + (if (x = scast seL4_UntypedObject) then APIObjectType ArchTypes_H.apiobject_type.Untyped else ( + if (x = scast seL4_TCBObject) then APIObjectType ArchTypes_H.apiobject_type.TCBObject else ( + if (x = scast seL4_EndpointObject) then APIObjectType ArchTypes_H.apiobject_type.EndpointObject else ( + if (x = scast seL4_NotificationObject) then APIObjectType ArchTypes_H.apiobject_type.NotificationObject else ( + if (x = scast seL4_CapTableObject) then APIObjectType ArchTypes_H.apiobject_type.CapTableObject else ( + if (x = scast seL4_ARM_SmallPageObject) then AARCH64_H.SmallPageObject else ( + if (x = scast seL4_ARM_LargePageObject) then AARCH64_H.LargePageObject else ( + if (x = scast seL4_ARM_HugePageObject) then AARCH64_H.HugePageObject else ( + if (x = scast seL4_ARM_VSpaceObject) then AARCH64_H.VSpaceObject else ( + if (x = scast seL4_ARM_PageTableObject) then AARCH64_H.PageTableObject else ( + if (x = scast seL4_ARM_VCPUObject) then AARCH64_H.VCPUObject else + undefined)))))))))))" + +lemmas Kernel_C_defs = + seL4_UntypedObject_def + seL4_TCBObject_def + seL4_EndpointObject_def + seL4_NotificationObject_def + seL4_CapTableObject_def + seL4_ARM_SmallPageObject_def + seL4_ARM_LargePageObject_def + seL4_ARM_HugePageObject_def + seL4_ARM_VSpaceObject_def + seL4_ARM_PageTableObject_def + seL4_ARM_VCPUObject_def + Kernel_C.asidLowBits_def + Kernel_C.asidHighBits_def + +abbreviation(input) + "Basic_htd_update f == + (Basic (globals_update (t_hrs_'_update (hrs_htd_update f))))" + +lemma object_type_to_from_H [simp]: "object_type_to_H (object_type_from_H x) = x" + apply (clarsimp simp: object_type_from_H_def object_type_to_H_def Kernel_C_defs) + by (clarsimp split: object_type.splits apiobject_type.splits simp: Kernel_C_defs) + +lemma fromEnum_object_type_to_H: + "fromEnum x = unat (object_type_from_H x)" + apply (cut_tac eqset_imp_iff[where x=x, OF enum_surj]) + apply (simp add: fromEnum_def enum_object_type + enum_apiobject_type + object_type_from_H_def + "StrictC'_object_defs" "api_object_defs" + Kernel_C_defs + split: if_split) + apply (auto simp: "api_object_defs") + done + +declare ptr_retyps_one[simp] + +(* FIXME: move *) +lemma ccorres_return_C_Seq: + "ccorres_underlying sr \ r rvxf arrel xf P P' hs X (return_C xfu v) \ + ccorres_underlying sr \ r rvxf arrel xf P P' hs X (return_C xfu v ;; Z)" + apply (clarsimp simp: return_C_def) + apply (erule ccorres_semantic_equiv0[rotated]) + apply (rule semantic_equivI) + apply (clarsimp simp: exec_assoc[symmetric]) + apply (rule exec_Seq_cong, simp) + apply (clarsimp simp: exec_assoc[symmetric]) + apply (rule exec_Seq_cong, simp) + apply (rule iffI) + apply (auto elim!:exec_Normal_elim_cases intro: exec.Throw exec.Seq)[1] + apply (auto elim!:exec_Normal_elim_cases intro: exec.Throw) + done + +(* FIXME: move *) +lemma ccorres_rewrite_while_guard: + assumes rl: "\s. s \ R \ (s \ P) = (s \ P')" + and cc: "ccorres r xf G G' hs a (While P' b)" + shows "ccorres r xf G (G' \ R) hs a (While P' b)" +proof (rule iffD1 [OF ccorres_semantic_equiv]) + show "ccorres r xf G (G' \ R) hs a (While P' b)" + by (rule ccorres_guard_imp2 [OF cc]) simp +next + fix s s' + assume "s \ G' \ R" + hence sin: "(s \ P) = (s \ P')" using rl by simp + + show "semantic_equiv \ s s' (While P' b) (While P' b)" + apply (rule semantic_equivI) + apply (simp add: sin) + done +qed + +lemma mdb_node_get_mdbNext_heap_ccorres: + "ccorres (=) ret__unsigned_longlong_' \ UNIV hs + (liftM (mdbNext \ cteMDBNode) (getCTE parent)) + (\ret__unsigned_longlong :== CALL mdb_node_get_mdbNext(h_val + (hrs_mem \t_hrs) + (Ptr &((Ptr parent :: cte_C ptr) \[''cteMDBNode_C'']))))" + apply (simp add: ccorres_liftM_simp) + apply (rule ccorres_add_return2) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_getCTE) + apply (rule_tac P = "\s. ctes_of s parent = Some x" in ccorres_from_vcg [where P' = UNIV]) + apply (rule allI, rule conseqPre) + apply vcg + apply (clarsimp simp: return_def) + apply (drule cmap_relation_cte) + apply (erule (1) cmap_relationE1) + apply (simp add: typ_heap_simps) + apply (drule ccte_relation_cmdbnode_relation) + apply (erule mdbNext_CL_mdb_node_lift_eq_mdbNext [symmetric]) + apply simp + done + +lemma getCTE_pre_cte_at: + "\\s. \ cte_at' p s \ getCTE p \ \_ _. False \" + apply (wp getCTE_wp) + apply clarsimp + done + +lemmas ccorres_getCTE_cte_at = ccorres_guard_from_wp [OF getCTE_pre_cte_at empty_fail_getCTE] + ccorres_guard_from_wp_bind [OF getCTE_pre_cte_at empty_fail_getCTE] + +lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre empty_fail_liftM] +lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre empty_fail_liftM] + +lemmas ccorres_liftM_getCTE_cte_at = ccorres_guard_from_wp_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] + ccorres_guard_from_wp_bind_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] + +lemma insertNewCap_ccorres_helper: + notes option.case_cong_weak [cong] + shows "ccap_relation cap rv'b + \ ccorres dc xfdc (cte_at' slot and K (is_aligned next cteSizeBits \ canonical_address next \ is_aligned parent cteSizeBits)) + UNIV hs (setCTE slot (CTE cap (MDB next parent True True))) + (Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update (heap_update + (Ptr &(Ptr slot :: cte_C ptr\[''cap_C'']) :: cap_C ptr) rv'b))) s);; + \ret__struct_mdb_node_C :== CALL mdb_node_new(ptr_val (Ptr next),scast true,scast true,ptr_val (Ptr parent));; + Guard C_Guard \hrs_htd \t_hrs \\<^sub>t (Ptr slot :: cte_C ptr)\ + (Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update (heap_update + (Ptr &(Ptr slot :: cte_C ptr\[''cteMDBNode_C'']) :: mdb_node_C ptr) + (ret__struct_mdb_node_C_' s)))) s)))" + apply simp + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre) + apply vcg + apply (clarsimp simp: Collect_const_mem cte_wp_at_ctes_of) + apply (frule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp: typ_heap_simps) + apply (rule fst_setCTE [OF ctes_of_cte_at], assumption) + apply (erule bexI [rotated]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp add: rf_sr_def cstate_relation_def typ_heap_simps Let_def cpspace_relation_def) + apply (rule conjI) + apply (erule (2) cmap_relation_updI) + apply (simp add: ccap_relation_def ccte_relation_def cte_lift_def) + subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf + c_valid_cte_def mask_shiftl_decompose canonical_make_canonical_idem + cteSizeBits_def + split: option.splits + flip: canonical_bit_def) + subgoal by simp + apply (erule_tac t = s' in ssubst) + apply (simp cong: lifth_update) + apply (rule conjI) + apply (erule (1) setCTE_tcb_case) + apply (simp add: carch_state_relation_def cmachine_state_relation_def + typ_heap_simps + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"]) + done + +definition + byte_regions_unmodified :: "heap_raw_state \ heap_raw_state \ bool" +where + "byte_regions_unmodified hrs hrs' \ \x. (\n td b. snd (hrs_htd hrs x) n = Some (td, b) + \ td = typ_uinfo_t TYPE (word8)) + \ snd (hrs_htd hrs x) 0 \ None + \ hrs_mem hrs' x = hrs_mem hrs x" + +abbreviation + byte_regions_unmodified' :: "globals myvars \ globals myvars \ bool" +where + "byte_regions_unmodified' s t \ byte_regions_unmodified (t_hrs_' (globals s)) + (t_hrs_' (globals t))" + +lemma byte_regions_unmodified_refl[iff]: + "byte_regions_unmodified hrs hrs" + by (simp add: byte_regions_unmodified_def) + +lemma byte_regions_unmodified_trans: + "byte_regions_unmodified hrs hrs' + \ byte_regions_unmodified hrs' hrs'' + \ hrs_htd hrs' = hrs_htd hrs + \ byte_regions_unmodified hrs hrs''" + by (simp add: byte_regions_unmodified_def) + +lemma byte_regions_unmodified_hrs_mem_update1: + "byte_regions_unmodified hrs hrs' + \ hrs_htd hrs \\<^sub>t (p :: ('a :: wf_type) ptr) + \ hrs_htd hrs' = hrs_htd hrs + \ typ_uinfo_t TYPE ('a) \ typ_uinfo_t TYPE (word8) + \ byte_regions_unmodified hrs + (hrs_mem_update (heap_update p v) hrs')" + apply (erule byte_regions_unmodified_trans, simp_all) + apply (clarsimp simp: byte_regions_unmodified_def hrs_mem_update + heap_update_def h_t_valid_def + valid_footprint_def Let_def) + apply (rule heap_update_nmem_same) + apply (clarsimp simp: size_of_def intvl_def) + apply (drule spec, drule(1) mp, clarsimp) + apply (cut_tac s="(typ_uinfo_t TYPE('a))" and n=k in ladder_set_self) + apply (clarsimp dest!: in_set_list_map) + apply (drule(1) map_le_trans) + apply (simp add: map_le_def) + apply metis + done + +lemma byte_regions_unmodified_hrs_mem_update2: + "byte_regions_unmodified hrs hrs' + \ hrs_htd hrs \\<^sub>t (p :: ('a :: wf_type) ptr) + \ typ_uinfo_t TYPE ('a) \ typ_uinfo_t TYPE (word8) + \ byte_regions_unmodified (hrs_mem_update (heap_update p v) hrs) hrs'" + apply (erule byte_regions_unmodified_trans[rotated], simp_all) + apply (clarsimp simp: byte_regions_unmodified_def hrs_mem_update + heap_update_def h_t_valid_def + valid_footprint_def Let_def) + apply (rule sym, rule heap_update_nmem_same) + apply (clarsimp simp: size_of_def intvl_def) + apply (drule spec, drule(1) mp, clarsimp) + apply (cut_tac s="(typ_uinfo_t TYPE('a))" and n=k in ladder_set_self) + apply (clarsimp dest!: in_set_list_map) + apply (drule(1) map_le_trans) + apply (simp add: map_le_def) + apply metis + done + +lemmas byte_regions_unmodified_hrs_mem_update + = byte_regions_unmodified_hrs_mem_update1 + byte_regions_unmodified_hrs_mem_update2 + +lemma byte_regions_unmodified_hrs_htd_update[iff]: + "byte_regions_unmodified + (hrs_htd_update h hrs) hrs" + by (clarsimp simp: byte_regions_unmodified_def) + +lemma byte_regions_unmodified_flip: + "byte_regions_unmodified (hrs_htd_update (\_. hrs_htd hrs) hrs') hrs + \ byte_regions_unmodified hrs hrs'" + by (simp add: byte_regions_unmodified_def hrs_htd_update) + +lemma mdb_node_ptr_set_mdbPrev_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call mdb_node_ptr_set_mdbPrev_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ byte_regions_unmodified' s t}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: ) + apply (intro byte_regions_unmodified_hrs_mem_update byte_regions_unmodified_refl, + simp_all add: typ_heap_simps) + done + +lemma mdb_node_ptr_set_mdbNext_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call mdb_node_ptr_set_mdbNext_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ byte_regions_unmodified' s t}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: ) + apply (intro byte_regions_unmodified_hrs_mem_update byte_regions_unmodified_refl, + simp_all add: typ_heap_simps) + done + +lemma updateNewFreeIndex_noop_ccorres: + "ccorres dc xfdc (valid_objs' and cte_wp_at' (\cte. cteCap cte = cap) slot) + {s. (case untypedZeroRange cap of None \ True + | Some (a, b) \ region_actually_is_zero_bytes a (unat ((b + 1) - a)) s)} hs + (updateNewFreeIndex slot) Skip" + (is "ccorres _ _ ?P ?P' hs _ _") + apply (simp add: updateNewFreeIndex_def getSlotCap_def) + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_getCTE[where P="\rv. cte_wp_at' ((=) rv) slot and ?P" + and P'="K ?P'"]) + apply (case_tac "cteCap cte", simp_all add: ccorres_guard_imp[OF ccorres_return_Skip])[1] + defer + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + apply (simp add: updateTrackedFreeIndex_def getSlotCap_def) + apply (rule ccorres_guard_imp) + apply (rule_tac P="\rv. cte_wp_at' ((=) rv) slot and K (rv = cte) and ?P" + in ccorres_pre_getCTE[where P'="K ?P'"]) + defer + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: bind_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: zero_ranges_are_zero_def + cte_wp_at_ctes_of + split: option.split) + done + +lemma byte_regions_unmodified_region_is_bytes: + "byte_regions_unmodified hrs hrs' + \ region_actually_is_bytes' y n (hrs_htd hrs) + \ x \ {y ..+ n} + \ hrs_mem hrs' x = hrs_mem hrs x" + apply (clarsimp simp: byte_regions_unmodified_def imp_conjL[symmetric]) + apply (drule spec, erule mp) + apply (clarsimp simp: region_actually_is_bytes'_def) + apply (drule(1) bspec, simp split: if_split_asm) + done + + +lemma insertNewCap_ccorres1: + "ccorres dc xfdc (pspace_aligned' and pspace_canonical' and valid_mdb' and valid_objs' and valid_cap' cap) + ({s. (case untypedZeroRange cap of None \ True + | Some (a, b) \ region_actually_is_zero_bytes a (unat ((b + 1) - a)) s)} + \ {s. ccap_relation cap (cap_' s)} \ {s. parent_' s = Ptr parent} + \ {s. slot_' s = Ptr slot}) [] + (insertNewCap parent slot cap) + (Call insertNewCap_'proc)" + supply if_cong[cong] option.case_cong[cong] + apply (cinit (no_ignore_call) lift: cap_' parent_' slot_') + apply (rule ccorres_liftM_getCTE_cte_at) + apply (rule ccorres_move_c_guard_cte) + apply (simp only: ) + apply (rule ccorres_split_nothrow [OF mdb_node_get_mdbNext_heap_ccorres]) + apply ceqv + apply (erule_tac s = "next" in subst) + apply csymbr + apply (ctac (c_lines 3) pre: ccorres_pre_getCTE ccorres_assert add: insertNewCap_ccorres_helper) + apply (simp only: Ptr_not_null_pointer_not_zero) + apply (ctac add: updateMDB_set_mdbPrev) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (ctac add: updateMDB_set_mdbNext) + apply (rule updateNewFreeIndex_noop_ccorres[where cap=cap]) + apply (wp updateMDB_weak_cte_wp_at) + apply simp + apply (vcg exspec=mdb_node_ptr_set_mdbNext_preserves_bytes) + apply (wp updateMDB_weak_cte_wp_at) + apply clarsimp + apply (vcg exspec=mdb_node_ptr_set_mdbPrev_preserves_bytes) + apply (wp setCTE_weak_cte_wp_at) + apply (clarsimp simp: hrs_mem_update Collect_const_mem + simp del: imp_disjL) + apply vcg + apply simp + apply (wp getCTE_wp') + apply (clarsimp simp: hrs_mem_update) + apply vcg + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of is_aligned_3_next ctes_of_aligned_bits + canonical_address_mdbNext ctes_of_canonical) + apply (clarsimp split: option.split) + apply (intro allI conjI impI; simp; clarsimp simp: region_actually_is_bytes) + apply (erule trans[OF heap_list_h_eq2, rotated]) + apply (rule byte_regions_unmodified_region_is_bytes) + apply (erule byte_regions_unmodified_trans[rotated] + | simp + | rule byte_regions_unmodified_hrs_mem_update + | simp add: typ_heap_simps')+ + apply (erule trans[OF heap_list_h_eq2, rotated]) + apply (rule byte_regions_unmodified_region_is_bytes) + apply (erule byte_regions_unmodified_trans[rotated] + | simp + | rule byte_regions_unmodified_hrs_mem_update + | simp add: typ_heap_simps')+ + done + +lemma insertNewCap_pre_cte_at: + "\\s. \ (cte_at' p s \ cte_at' p' s) \ insertNewCap p p' cap \ \_ _. False \" + unfolding insertNewCap_def + apply simp + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas createNewCaps_guard_helper = createNewCaps_guard[where 'a=64, folded word_bits_def] + +end + +locale insertNewCap_i_locale = kernel +begin + +lemma mdb_node_get_mdbNext_spec: + "\s. \ \\<^bsub>/UNIV\<^esub> {s} Call mdb_node_get_mdbNext_'proc {t. i_' t = i_' s}" + apply (rule allI) + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg + apply simp + done + +lemma mdb_node_new_spec: + "\s. \ \\<^bsub>/UNIV\<^esub> {s} Call mdb_node_new_'proc {t. i_' t = i_' s}" + apply (rule allI) + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg + apply simp + done + +lemma mdb_node_ptr_set_mdbPrev_spec: + "\s. \ \\<^bsub>/UNIV\<^esub> {s} Call mdb_node_ptr_set_mdbPrev_'proc {t. i_' t = i_' s}" + apply (rule allI) + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg + apply simp + done + +lemma mdb_node_ptr_set_mdbNext_spec: + "\s. \ \\<^bsub>/UNIV\<^esub> {s} Call mdb_node_ptr_set_mdbNext_'proc {t. i_' t = i_' s}" + apply (rule allI) + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg + apply simp + done + +lemma insertNewCap_spec: + "\s. \ \\<^bsub>/UNIV\<^esub> {s} Call insertNewCap_'proc {t. i_' t = i_' s}" + apply vcg + apply clarsimp + done +end + +context kernel_m +begin + +lemma insertNewCap_spec: + "\s. \ \\<^bsub>/UNIV\<^esub> {s} Call insertNewCap_'proc {t. i_' t = i_' s}" + apply (rule insertNewCap_i_locale.insertNewCap_spec) + apply (intro_locales) + done + +lemma ccorres_fail: + "ccorres r xf \ UNIV hs fail c" + apply (rule ccorresI') + apply (simp add: fail_def) + done + +lemma hoarep_Cond_UNIV: + "\\\<^bsub>/UNIV\<^esub> P c P', A \ + \\\<^bsub>/UNIV\<^esub> P (Cond UNIV c d) P', A" + apply (rule HoarePartial.Cond [where P\<^sub>1 = P and P\<^sub>2 = "{}"]) + apply simp + apply assumption + apply (rule HoarePartial.conseq_exploit_pre) + apply simp + done + +lemma object_type_from_H_toAPIType_simps: + "(object_type_from_H tp' = scast seL4_UntypedObject) = (toAPIType tp' = Some ArchTypes_H.apiobject_type.Untyped)" + "(object_type_from_H tp' = scast seL4_TCBObject) = (toAPIType tp' = Some ArchTypes_H.apiobject_type.TCBObject)" + "(object_type_from_H tp' = scast seL4_EndpointObject) = (toAPIType tp' = Some ArchTypes_H.apiobject_type.EndpointObject)" + "(object_type_from_H tp' = scast seL4_NotificationObject) = (toAPIType tp' = Some ArchTypes_H.apiobject_type.NotificationObject)" + "(object_type_from_H tp' = scast seL4_CapTableObject) = (toAPIType tp' = Some ArchTypes_H.apiobject_type.CapTableObject)" + "(object_type_from_H tp' = scast seL4_ARM_SmallPageObject) = (tp' = AARCH64_H.SmallPageObject)" + "(object_type_from_H tp' = scast seL4_ARM_LargePageObject) = (tp' = AARCH64_H.LargePageObject)" + "(object_type_from_H tp' = scast seL4_ARM_HugePageObject) = (tp' = AARCH64_H.HugePageObject)" + "(object_type_from_H tp' = scast seL4_ARM_VSpaceObject) = (tp' = AARCH64_H.VSpaceObject)" + "(object_type_from_H tp' = scast seL4_ARM_PageTableObject) = (tp' = AARCH64_H.PageTableObject)" + "(object_type_from_H tp' = scast seL4_ARM_VCPUObject) = (tp' = AARCH64_H.VCPUObject)" + by (auto simp: toAPIType_def Kernel_C_defs + object_type_from_H_def "StrictC'_object_defs" api_object_defs + split: object_type.splits ArchTypes_H.apiobject_type.splits) + +declare Collect_const_mem [simp] + +lemma createNewCaps_untyped_if_helper: + "\s s'. (s, s') \ rf_sr \ (sz < word_bits \ gbits < word_bits) \ True \ + (\ gbits \ sz) = (s' \ \of_nat sz < (of_nat gbits :: machine_word)\)" + by (clarsimp simp: not_le unat_of_nat64 word_less_nat_alt lt_word_bits_lt_pow) + +lemma heap_list_update': + "\ n = length v; length v \ 2 ^ word_bits \ \ heap_list (heap_update_list p v h) n p = v" + by (simp add: heap_list_update addr_card_wb) + +lemma h_t_valid_clift_Some_iff': + "td \\<^sub>t p = (clift (hp, td) p = Some (h_val hp p))" + by (simp add: lift_t_if split: if_split) + +lemma option_noneI: "\ \x. a = Some x \ False \ \ a = None" + apply (case_tac a) + apply clarsimp + apply atomize + apply clarsimp + done + +lemma projectKO_opt_retyp_other': + assumes pko: "\v. (projectKO_opt ko :: 'a :: pre_storable option) \ Some v" + and pno: "pspace_no_overlap' ptr (objBitsKO ko) (\ :: kernel_state)" + and pal: "pspace_aligned' (\ :: kernel_state)" + and al: "is_aligned ptr (objBitsKO ko)" + shows "projectKO_opt \\<^sub>m ((ksPSpace \)(ptr \ ko)) + = (projectKO_opt \\<^sub>m (ksPSpace \) :: machine_word \ 'a :: pre_storable option)" (is "?LHS = ?RHS") +proof (rule ext) + fix x + show "?LHS x = ?RHS x" + proof (cases "x = ptr") + case True + hence "x \ {ptr..(ptr && ~~ mask (objBitsKO ko)) + 2 ^ objBitsKO ko - 1}" + apply (rule ssubst) + apply (insert al) + apply (clarsimp simp: is_aligned_def) + done + hence "ksPSpace \ x = None" using pno + apply - + apply (rule option_noneI) + apply (frule pspace_no_overlap_disjoint'[rotated]) + apply (rule pal) + apply (drule domI[where a = x]) + apply blast + done + thus ?thesis using True pko by simp + next + case False + thus ?thesis by (simp add: map_comp_def) + qed +qed + +lemma dom_tcb_cte_cases_iff: + "(x \ dom tcb_cte_cases) = (\y < 5. unat x = y * (2^cteSizeBits))" + unfolding tcb_cte_cases_def + by (auto simp: unat_arith_simps objBits_simps') + +lemma cmap_relation_retype2: + assumes cm: "cmap_relation mp mp' Ptr rel" + and rel: "rel (mobj :: 'a :: pre_storable) ko'" + shows "cmap_relation + (\x. if x \ ptr_val ` addrs then Some (mobj :: 'a :: pre_storable) else mp x) + (\y. if y \ addrs then Some ko' else mp' y) + Ptr rel" + using cm rel + apply - + apply (rule cmap_relationI) + apply (simp add: dom_if cmap_relation_def image_Un) + apply (case_tac "x \ addrs") + apply (simp add: image_image) + apply (simp add: image_image) + apply (clarsimp split: if_split_asm) + apply (erule contrapos_np) + apply (erule image_eqI [rotated]) + apply simp + apply (erule (2) cmap_relation_relI) + done + +lemma ti_typ_pad_combine_empty_ti: + fixes tp :: "'b :: c_type itself" + shows "ti_typ_pad_combine tp lu upd fld (empty_typ_info n) = + TypDesc (TypAggregate [DTPair (adjust_ti (typ_info_t TYPE('b)) lu upd) fld]) n" + by (simp add: ti_typ_pad_combine_def ti_typ_combine_def empty_typ_info_def Let_def) + +lemma ti_typ_combine_empty_ti: + fixes tp :: "'b :: c_type itself" + shows "ti_typ_combine tp lu upd fld (empty_typ_info n) = + TypDesc (TypAggregate [DTPair (adjust_ti (typ_info_t TYPE('b)) lu upd) fld]) n" + by (simp add: ti_typ_combine_def empty_typ_info_def Let_def) + +lemma ti_typ_pad_combine_td: + fixes tp :: "'b :: c_type itself" + shows "padup (align_of TYPE('b)) (size_td_struct st) = 0 \ + ti_typ_pad_combine tp lu upd fld (TypDesc st n) = + TypDesc (extend_ti_struct st (adjust_ti (typ_info_t TYPE('b)) lu upd) fld) n" + by (simp add: ti_typ_pad_combine_def ti_typ_combine_def Let_def) + +lemma ti_typ_combine_td: + fixes tp :: "'b :: c_type itself" + shows "padup (align_of TYPE('b)) (size_td_struct st) = 0 \ + ti_typ_combine tp lu upd fld (TypDesc st n) = + TypDesc (extend_ti_struct st (adjust_ti (typ_info_t TYPE('b)) lu upd) fld) n" + by (simp add: ti_typ_combine_def Let_def) + +lemma update_ti_t_pad_combine: + assumes std: "size_td td' mod 2 ^ align_td (typ_info_t TYPE('a :: c_type)) = 0" + shows "update_ti_t (ti_typ_pad_combine TYPE('a :: c_type) lu upd fld td') bs v = + update_ti_t (ti_typ_combine TYPE('a :: c_type) lu upd fld td') bs v" + using std + by (simp add: ti_typ_pad_combine_def size_td_simps Let_def) + + +lemma update_ti_t_ptr_0s: + "update_ti_t (typ_info_t TYPE('a :: c_type ptr)) [0,0,0,0,0,0,0,0] X = NULL" + apply (simp add: typ_info_ptr word_rcat_def bin_rcat_def) + done + +lemma size_td_map_list: + "size_td_list (map (\n. DTPair + (adjust_ti (typ_info_t TYPE('a :: c_type)) + (\x. index x n) + (\x f. Arrays.update f n x)) + (replicate n CHR ''1'')) + [0.. bs = replicate (n * size_td (typ_info_t TYPE('a))) v; n \ card (UNIV :: 'b set) \ \ + update_ti_t (array_tag_n n) bs x = + foldr (\n arr. Arrays.update arr n + (update_ti_t (typ_info_t TYPE('a)) (replicate (size_td (typ_info_t TYPE('a))) v) (index arr n))) + [0.. + update_ti_t (typ_info_t TYPE('a :: c_type['b :: finite])) bs x = + foldr (\n arr. Arrays.update arr n + (update_ti_t (typ_info_t TYPE('a)) (replicate (size_td (typ_info_t TYPE('a))) v) (index arr n))) + [0..<(card (UNIV :: 'b :: finite set))] x" + unfolding typ_info_array array_tag_def + apply (rule update_ti_t_array_tag_n_rep) + apply simp + apply simp + done + +lemma update_ti_t_array_rep_word0: + "bs = replicate ((card (UNIV :: 'b :: finite set)) * 8) 0 \ + update_ti_t (typ_info_t TYPE(machine_word['b :: finite])) bs x = + foldr (\n arr. Arrays.update arr n 0) + [0..<(card (UNIV :: 'b :: finite set))] x" + apply (subst update_ti_t_array_rep) + apply simp + apply (simp add: update_ti_t_machine_word_0s) + done + +lemma update_ti_t_array_rep_byte0: + "bs = replicate (CARD('b)) 0 \ + update_ti_t (typ_info_t TYPE(8 word['b :: finite])) bs x = + foldr (\n arr. Arrays.update arr n 0) + [0.. UserContext (FPUState (\y. 0) 0 0) (\x. if x = register.SPSR_EL1 then 0x140 else 0)" + by (rule newContext_def[simplified newFPUState_def initContext_def pstateUser_def, simplified, + simplified fun_upd_def]) + +lemma tcb_queue_update_other: + "\ ctcb_ptr_to_tcb_ptr p \ set tcbs \ \ + tcb_queue_relation next prev (mp(p \ v)) tcbs qe qh = + tcb_queue_relation next prev mp tcbs qe qh" + apply (induct tcbs arbitrary: qh qe) + apply simp + apply (rename_tac a tcbs qh qe) + apply simp + apply (subgoal_tac "p \ tcb_ptr_to_ctcb_ptr a") + apply (simp cong: conj_cong) + apply clarsimp + done + +lemma cmap_relation_cong': + "\am = am'; cm = cm'; + \p a a' b b'. + \am p = Some a; am' p = Some a'; cm (f p) = Some b; cm' (f p) = Some b'\ + \ rel a b = rel' a' b'\ + \ cmap_relation am cm f rel = cmap_relation am' cm' f rel'" + by (rule cmap_relation_cong, simp_all) + +lemma tcb_queue_update_other': + "\ ctcb_ptr_to_tcb_ptr p \ set tcbs \ \ + tcb_queue_relation' next prev (mp(p \ v)) tcbs qe qh = + tcb_queue_relation' next prev mp tcbs qe qh" + unfolding tcb_queue_relation'_def + by (simp add: tcb_queue_update_other) + +lemma c_guard_tcb: + assumes al: "is_aligned (ctcb_ptr_to_tcb_ptr p) tcbBlockSizeBits" + and ptr0: "ctcb_ptr_to_tcb_ptr p \ 0" + shows "c_guard p" + unfolding c_guard_def +proof (rule conjI) + show "ptr_aligned p" using al + apply - + apply (rule is_aligned_ptr_aligned [where n = word_size_bits]) + apply (rule is_aligned_weaken) + apply (erule ctcb_ptr_to_tcb_ptr_aligned) + apply (simp add: ctcb_size_bits_def word_size_bits_def) + apply (simp add: align_of_def word_size_bits_def) + done + + show "c_null_guard p" using ptr0 al + unfolding c_null_guard_def + apply - + apply (rule intvl_nowrap [where x = 0, simplified]) + apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs tcbBlockSizeBits_def is_aligned_def) + apply (drule ctcb_ptr_to_tcb_ptr_aligned) + apply (erule is_aligned_no_wrap_le) + apply (simp add: word_bits_conv ctcb_size_bits_def) + apply (simp add: size_of_def ctcb_size_bits_def) + done +qed + +lemma tcb_ptr_orth_cte_ptrs: + "{ptr_val p..+size_of TYPE(tcb_C)} \ {ctcb_ptr_to_tcb_ptr p..+5 * size_of TYPE(cte_C)} = {}" + apply (rule disjointI) + apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def intvl_def field_simps size_of_def ctcb_offset_defs) + apply unat_arith + apply (simp add: unat_of_nat64 word_bits_conv) + apply (simp add: unat_of_nat64 word_bits_conv) + done + +(* see TCB_OFFSET in C *) +lemma tcb_ptr_orth_cte_ptrs': + "ptr_span (tcb_Ptr (regionBase + 0x400)) \ ptr_span (Ptr regionBase :: (cte_C[5]) ptr) = {}" + apply (rule disjointI) + apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def size_td_array + intvl_def field_simps size_of_def ctcb_offset_def) + apply (simp add: unat_arith_simps unat_of_nat) + done + +lemma region_is_typeless_weaken: + "\ region_is_typeless a b s'; (t_hrs_' (globals s)) = (t_hrs_' (globals s')); a \ x; unat x + y \ unat a + b \ \ region_is_typeless x y s" + by (clarsimp simp: region_is_typeless_def subsetD[OF intvl_both_le]) + +lemmas ptr_retyp_htd_safe_neg + = ptr_retyps_htd_safe_neg[where n="Suc 0" and arr=False, unfolded ptr_retyps_gen_def, simplified] + +lemmas ptr_retyp_htd_safe_neg' = ptr_retyp_htd_safe_neg[OF _ _ subset_refl] + +abbreviation + tcbContext_of_tcb_Ptr :: "tcb_C ptr \ user_context_C ptr" +where + "tcbContext_of_tcb_Ptr p \ Ptr &(atcb_Ptr &(p\[''tcbArch_C''])\[''tcbContext_C''])" + +abbreviation + registers_of_tcb_Ptr :: "tcb_C ptr \ (machine_word[registers_count]) ptr" +where + "registers_of_tcb_Ptr p \ Ptr &(tcbContext_of_tcb_Ptr p \[''registers_C''])" + +definition + array_updates :: "'a::c_type['b::finite] \ (nat \ 'a) list \ 'a['b]" +where + "array_updates \ foldl (\a (i,v). Arrays.update a i v)" + +definition + heap_updates :: "heap_raw_state \ (heap_mem \ heap_mem) list \ heap_raw_state" +where + "heap_updates \ foldl (\h upd. hrs_mem_update upd h)" + +lemmas heap_updates_defs = + heap_updates_def heap_modify_def array_updates_def + +(* FIXME: move up to TypHeapLib? *) +lemma clift_heap_update_same': + fixes p :: "'a :: mem_type ptr" + shows "\ hrs_htd hp \\<^sub>t p; typ_uinfo_t TYPE('a) \\<^sub>t typ_uinfo_t TYPE('b) \ + \ clift (hrs_mem_update (\h. heap_update p (v h) h) hp) = (clift hp :: 'b :: mem_type typ_heap)" + unfolding hrs_mem_update_def + apply (cases hp) + apply (simp add: split_def hrs_htd_def) + apply (erule lift_t_heap_update_same) + apply simp + done + +(* FIXME: move up to TypHeapLib? *) +lemmas clift_heap_update_same_td_name' = + clift_heap_update_same'[OF _ tag_disj_via_td_name, unfolded pad_typ_name_def] + +definition initContext_registers :: "(nat \ machine_word) list" where + "initContext_registers \ [(unat Kernel_C.SPSR_EL1, 0x140)]" + +(* FIXME: move *) +lemma field_tag_sub': + fixes p :: "'a::mem_type ptr" + assumes fl: "field_lookup (typ_info_t TYPE('a)) f 0 = Some (t,n)" + assumes sz: "size_of TYPE('b) = size_td t" + shows "ptr_span (Ptr &(p\f)::'b::mem_type ptr) \ ptr_span p" + by (clarsimp simp: sz field_tag_sub[OF fl]) + +lemmas field_tag_sub_trans = + subset_trans[OF field_tag_sub', rotated -1] + +lemmas field_tag_subs = + field_tag_sub_trans[OF field_tag_sub_trans[OF field_tag_sub_trans[OF field_tag_sub']]] + field_tag_sub_trans[OF field_tag_sub_trans[OF field_tag_sub']] + field_tag_sub_trans[OF field_tag_sub'] + field_tag_sub' + +context + fixes p:: "'a::mem_type ptr" and n :: nat + assumes nkr: "{ptr_val p ..+ n * size_of TYPE('a)} \ kernel_data_refs = {}" +begin + +lemma retyp_non_kernel_data_ref: + fixes q :: "'b::mem_type ptr" + assumes "ptr_span q \ kernel_data_refs" + shows "ptr_retyps_gen n p foo (hrs_htd h) \\<^sub>t q \ hrs_htd h \\<^sub>t q" + apply (rule h_t_valid_ptr_retyps_gen_disjoint_iff) + apply (subst Int_commute) + apply (rule disjoint_subset2[OF assms nkr]) + done + +end + +lemma cnc_tcb_helper: + fixes p :: "tcb_C ptr" + defines "kotcb \ (KOTCB (makeObject :: tcb))" + assumes rfsr: "(\\ksPSpace := ks\, x) \ rf_sr" + assumes al: "is_aligned (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb)" + assumes ptr0: "ctcb_ptr_to_tcb_ptr p \ 0" + assumes pal: "pspace_aligned' (\\ksPSpace := ks\)" + assumes pno: "pspace_no_overlap' (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb) (\\ksPSpace := ks\)" + assumes pds: "pspace_distinct' (\\ksPSpace := ks\)" + assumes symref: "sym_refs (state_refs_of' (\\ksPSpace := ks\))" + assumes kssub: "dom (ksPSpace \) \ dom ks" + assumes rzo: "ret_zero (ctcb_ptr_to_tcb_ptr p) (2 ^ objBitsKO kotcb) \" + assumes empty: "region_is_bytes (ctcb_ptr_to_tcb_ptr p) (2 ^ tcbBlockSizeBits) x" + assumes rep0: "heap_list (fst (t_hrs_' (globals x))) (2 ^ tcbBlockSizeBits) (ctcb_ptr_to_tcb_ptr p) = replicate (2 ^ tcbBlockSizeBits) 0" + assumes kdr: "{ctcb_ptr_to_tcb_ptr p..+2 ^ tcbBlockSizeBits} \ kernel_data_refs = {}" + shows "(\\ksPSpace := ks(ctcb_ptr_to_tcb_ptr p \ kotcb)\, + globals_update + (t_hrs_'_update + (\hrs. (heap_updates (hrs_htd_update (\htd. ptr_retyps_gen 1 (tcb_cnode_Ptr (ctcb_ptr_to_tcb_ptr p)) False + (ptr_retyps_gen 1 p False htd)) hrs) + [heap_update (registers_of_tcb_Ptr p) + (array_updates (h_val (hrs_mem hrs) (registers_of_tcb_Ptr p)) + initContext_registers), + heap_update (machine_word_Ptr &(p\[''tcbTimeSlice_C''])) 5]) + )) x) + \ rf_sr" + (is "(\\ksPSpace := ?ks\, globals_update ?gs' x) \ rf_sr") +proof - + define ko where "ko \ (KOCTE (makeObject :: cte))" + let ?ptr = "cte_Ptr (ctcb_ptr_to_tcb_ptr p)" + let ?arr_ptr = "Ptr (ctcb_ptr_to_tcb_ptr p) :: (cte_C[5]) ptr" + let ?sp = "\\ksPSpace := ks\" + let ?s = "\\ksPSpace := ?ks\" + let ?gs = "?gs' (globals x)" + let ?hp = "(fst (t_hrs_' ?gs), (ptr_retyps_gen 1 p False (snd (t_hrs_' (globals x)))))" + + note tcb_C_size[simp del] + + from al have cover: "range_cover (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb) + (objBitsKO kotcb) (Suc 0)" + by (rule range_cover_full, simp_all add: al) + + have "\n<2 ^ (objBitsKO kotcb - objBitsKO ko). c_guard (CTypesDefs.ptr_add ?ptr (of_nat n))" + apply (rule retype_guard_helper [where m = 3]) + apply (rule range_cover_rel[OF cover, rotated]) + apply simp + apply (simp add: ko_def objBits_simps' kotcb_def) + apply (rule ptr0) + apply (simp add: ko_def objBits_simps' size_of_def) + apply (simp add: ko_def objBits_simps') + apply (simp add: ko_def objBits_simps align_of_def) + done + hence guard: "\n<5. c_guard (CTypesDefs.ptr_add ?ptr (of_nat n))" + by (simp add: ko_def kotcb_def objBits_simps' align_of_def) + + have arr_guard: "c_guard ?arr_ptr" + apply (rule is_aligned_c_guard[where m=3], simp, rule al) + apply (simp add: ptr0) + apply (simp add: align_of_def align_td_array) + apply (simp add: cte_C_size objBits_simps' kotcb_def) + apply (simp add: kotcb_def objBits_simps') + done + + have heap_update_to_hrs_mem_update: + "\p x hp ht. (heap_update p x hp, ht) = hrs_mem_update (heap_update p x) (hp, ht)" + by (simp add: hrs_mem_update_def split_def) + + have empty_smaller: + "region_is_bytes (ptr_val p) (size_of TYPE(tcb_C)) x" + "region_is_bytes' (ctcb_ptr_to_tcb_ptr p) (5 * size_of TYPE(cte_C)) + (ptr_retyps_gen 1 p False (hrs_htd (t_hrs_' (globals x))))" + using al region_is_bytes_subset[OF empty] tcb_ptr_to_ctcb_ptr_in_range' + apply (simp add: objBits_simps kotcb_def) + apply (clarsimp simp: region_is_bytes'_def) + apply (subst(asm) ptr_retyps_gen_out) + apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs intvl_def) + apply (simp add: unat_arith_simps unat_of_nat cte_C_size tcb_C_size + split: if_split_asm) + apply (subst(asm) empty[unfolded region_is_bytes'_def], simp_all) + apply (erule subsetD[rotated], rule intvl_start_le) + apply (simp add: cte_C_size objBits_simps') + done + + note htd[simp] = hrs_htd_update_htd_update[unfolded o_def, + where d="ptr_retyps_gen n p a" and d'="ptr_retyps_gen n' p' a'" + for n p a n' p' a', symmetric] + + have cgp: "c_guard p" using al + apply - + apply (rule c_guard_tcb [OF _ ptr0]) + apply (simp add: kotcb_def objBits_simps) + done + + from pal rfsr have "\x\dom (cslift x :: cte_C typ_heap). is_aligned (ptr_val x) (objBitsKO ko)" + apply (rule pspace_aligned_to_C_cte [OF _ cmap_relation_cte]) + apply (simp add: projectKOs ko_def) + done + + have "ptr_val p = ctcb_ptr_to_tcb_ptr p + ctcb_offset" + by (simp add: ctcb_ptr_to_tcb_ptr_def) + + have cte_tcb_disjoint: "\y. y \ (CTypesDefs.ptr_add (cte_Ptr (ctcb_ptr_to_tcb_ptr p)) \ of_nat) ` {k. k < 5} + \ {ptr_val p..+size_of TYPE(tcb_C)} \ {ptr_val y..+size_of TYPE(cte_C)} = {}" + apply (rule disjoint_subset2 [OF _ tcb_ptr_orth_cte_ptrs]) + apply (clarsimp simp: intvl_def size_of_def) + apply (rule_tac x = "x * (2^cteSizeBits) + k" in exI) + apply (simp add: objBits_simps') + done + + have cl_cte: "(cslift (x\globals := ?gs\) :: cte_C typ_heap) = + (\y. if y \ (CTypesDefs.ptr_add (cte_Ptr (ctcb_ptr_to_tcb_ptr p)) \ + of_nat) ` + {k. k < 5} + then Some (from_bytes (replicate (size_of TYPE(cte_C)) 0)) else cslift x y)" + using cgp unfolding heap_updates_defs + apply (simp add: ptr_retyp_to_array[simplified] hrs_comm[symmetric] Let_def) + apply (subst clift_ptr_retyps_gen_prev_memset_same[OF guard], + simp_all add: hrs_htd_update empty_smaller[simplified]) + apply (simp add: cte_C_size word_bits_def) + apply (simp add: hrs_mem_update typ_heap_simps + packed_heap_update_collapse) + apply (simp add: heap_update_def) + apply (subst heap_list_update_disjoint_same) + apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs intvl_def + set_eq_iff) + apply (simp add: unat_arith_simps unat_of_nat cte_C_size tcb_C_size) + apply (subst take_heap_list_le[symmetric]) + prefer 2 + apply (simp add: hrs_mem_def, subst rep0) + apply (simp only: take_replicate, simp add: cte_C_size objBits_simps') + apply (simp add: cte_C_size objBits_simps') + apply (simp add: fun_eq_iff split: if_split) + apply (simp add: hrs_comm packed_heap_update_collapse + typ_heap_simps) + apply (subst clift_heap_update_same_td_name', simp_all, + simp add: hrs_htd_update ptr_retyps_gen_def ptr_retyp_h_t_valid)+ + apply (subst clift_ptr_retyps_gen_other, + simp_all add: empty_smaller tag_disj_via_td_name) + apply (simp add: tcb_C_size word_bits_def) + done + + have tcb0: "heap_list (fst (t_hrs_' (globals x))) (size_of TYPE(tcb_C)) (ptr_val p) = replicate (size_of TYPE(tcb_C)) 0" + proof - + have "heap_list (fst (t_hrs_' (globals x))) (size_of TYPE(tcb_C)) (ptr_val p) + = take (size_of TYPE(tcb_C)) (drop (unat (ptr_val p - ctcb_ptr_to_tcb_ptr p)) + (heap_list (fst (t_hrs_' (globals x))) (2 ^ tcbBlockSizeBits) (ctcb_ptr_to_tcb_ptr p)))" + by (simp add: drop_heap_list_le take_heap_list_le size_of_def ctcb_ptr_to_tcb_ptr_def + ctcb_offset_defs objBits_simps') + also have "\ = replicate (size_of TYPE(tcb_C)) 0" + apply (subst rep0) + apply (simp only: take_replicate drop_replicate) + apply (rule arg_cong [where f = "\x. replicate x 0"]) + apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs size_of_def objBits_simps') + done + finally show "heap_list (fst (t_hrs_' (globals x))) (size_of TYPE(tcb_C)) (ptr_val p) = replicate (size_of TYPE(tcb_C)) 0" . + qed + + note alrl = pspace_aligned_to_C_tcb [OF pal cmap_relation_tcb [OF rfsr]] + + have tdisj: + "\xa\dom (cslift x) \ {p}. \y\dom (cslift x). {ptr_val xa..+size_of TYPE(tcb_C)} \ {ptr_val y..+size_of TYPE(tcb_C)} \ {} + \ xa = y" + using al + apply (intro ballI impI) + apply (erule contrapos_np) + apply (subgoal_tac "is_aligned (ptr_val xa) ctcb_size_bits") + apply (subgoal_tac "is_aligned (ptr_val y) ctcb_size_bits") + apply (subgoal_tac "ctcb_size_bits < word_bits") + apply (rule_tac A = "{ptr_val xa..+2 ^ ctcb_size_bits}" in disjoint_subset) + apply (rule intvl_start_le) + apply (simp add: size_of_def ctcb_size_bits_def) + apply (rule_tac B = "{ptr_val y..+2 ^ ctcb_size_bits}" in disjoint_subset2) + apply (rule intvl_start_le) + apply (simp add: size_of_def ctcb_size_bits_def) + apply (simp only: upto_intvl_eq) + apply (rule aligned_neq_into_no_overlap [simplified field_simps]) + apply simp + apply assumption+ + apply (simp add: word_bits_conv ctcb_size_bits_def) + apply (erule bspec [OF alrl]) + apply (clarsimp) + apply (erule disjE) + apply (simp add: objBits_simps kotcb_def) + apply (erule ctcb_ptr_to_tcb_ptr_aligned) + apply (erule bspec [OF alrl]) + done + + let ?tcbArch_C = "tcbArch_C (from_bytes (replicate (size_of TYPE(tcb_C)) 0))" + + let ?new_tcb = "(from_bytes (replicate (size_of TYPE(tcb_C)) 0) + \tcbArch_C := ?tcbArch_C + \tcbContext_C := tcbContext_C ?tcbArch_C + \registers_C := array_updates (registers_C (tcbContext_C ?tcbArch_C)) + initContext_registers + \\, + tcbTimeSlice_C := 5\)" + + have tdisj': + "\y. hrs_htd (t_hrs_' (globals x)) \\<^sub>t y \ ptr_span p \ ptr_span y \ {} \ y = p" + using tdisj by (auto simp: h_t_valid_clift_Some_iff) + + have "ptr_retyp p (snd (t_hrs_' (globals x))) \\<^sub>t p" using cgp + by (rule ptr_retyp_h_t_valid) + hence "clift (hrs_mem (t_hrs_' (globals x)), ptr_retyp p (snd (t_hrs_' (globals x)))) p + = Some (from_bytes (replicate (size_of TYPE(tcb_C)) 0))" + by (simp add: lift_t_if h_val_def tcb0 hrs_mem_def) + hence cl_tcb: "(cslift (x\globals := ?gs\) :: tcb_C typ_heap) = (cslift x)(p \ ?new_tcb)" + using cgp + apply (clarsimp simp add: typ_heap_simps heap_updates_def + hrs_mem_update packed_heap_update_collapse_hrs) + apply (simp add: hrs_comm[symmetric]) + apply (subst clift_ptr_retyps_gen_other, + simp_all add: hrs_htd_update empty_smaller[simplified] tag_disj_via_td_name) + apply (simp add: cte_C_size word_bits_def) + apply (simp add: hrs_comm typ_heap_simps ptr_retyps_gen_def + hrs_htd_update ptr_retyp_h_t_valid + h_val_heap_update h_val_field_from_bytes') + apply (simp add: h_val_def tcb0[folded hrs_mem_def]) + apply (rule ext, rename_tac p') + apply (case_tac "p' = p", simp_all) + apply (cut_tac clift_ptr_retyps_gen_prev_memset_same + [where n=1 and arr=False, simplified, OF _ empty_smaller(1) _ refl]) + apply (simp_all add: tcb0[folded hrs_mem_def] ptr_retyps_gen_def) + apply (simp add: tcb_C_size word_bits_def) + done + + have cl_rest: + "\typ_uinfo_t TYPE(tcb_C) \\<^sub>t typ_uinfo_t TYPE('a :: mem_type); + typ_uinfo_t TYPE(cte_C[5]) \\<^sub>t typ_uinfo_t TYPE('a :: mem_type); + typ_uinfo_t TYPE('a) \ typ_uinfo_t TYPE(word8) \ \ + cslift (x\globals := ?gs\) = (cslift x :: 'a :: mem_type typ_heap)" + using cgp + apply (clarsimp simp: hrs_comm[symmetric] heap_updates_def) + apply (subst clift_ptr_retyps_gen_other, + simp_all add: hrs_htd_update empty_smaller[simplified], + simp_all add: cte_C_size tcb_C_size word_bits_def) + apply (simp add: hrs_comm ptr_retyps_gen_def) + apply (simp add: clift_heap_update_same hrs_htd_update ptr_retyp_h_t_valid typ_heap_simps) + apply (rule trans[OF _ clift_ptr_retyps_gen_other[where nptrs=1 and arr=False, + simplified, OF empty_smaller(1)]], simp_all) + apply (simp add: ptr_retyps_gen_def) + apply (simp add: tcb_C_size word_bits_def) + done + + have rl: + "(\v :: 'a :: pre_storable. projectKO_opt kotcb \ Some v) \ + (projectKO_opt \\<^sub>m (ks(ctcb_ptr_to_tcb_ptr p \ KOTCB makeObject)) :: machine_word \ 'a option) + = projectKO_opt \\<^sub>m ks" using pno al + apply - + apply (drule(2) projectKO_opt_retyp_other'[OF _ _ pal]) + apply (simp add: kotcb_def) + done + + have rl_tcb: "(projectKO_opt \\<^sub>m (ks(ctcb_ptr_to_tcb_ptr p \ KOTCB makeObject)) :: machine_word \ tcb option) + = (projectKO_opt \\<^sub>m ks)(ctcb_ptr_to_tcb_ptr p \ makeObject)" + apply (rule ext) + apply (clarsimp simp: projectKOs map_comp_def split: if_split) + done + + have mko: "\dev. makeObjectKO dev (Inr (APIObjectType ArchTypes_H.apiobject_type.TCBObject)) = Some kotcb" + by (simp add: makeObjectKO_def kotcb_def) + note hacky_cte = retype_ctes_helper [where sz = "objBitsKO kotcb" and ko = kotcb and ptr = "ctcb_ptr_to_tcb_ptr p", + OF pal pds pno al _ _ mko, simplified new_cap_addrs_def, simplified] + + \ \Ugh\ + moreover have + "\y. y \ ptr_val ` (CTypesDefs.ptr_add (cte_Ptr (ctcb_ptr_to_tcb_ptr p)) \ of_nat) ` {k. k < 5} + = (y && ~~ mask tcbBlockSizeBits = ctcb_ptr_to_tcb_ptr p \ y && mask tcbBlockSizeBits \ dom tcb_cte_cases)" (is "\y. ?LHS y = ?RHS y") + proof - + fix y + + have al_rl: "\k. k < 5 \ + ctcb_ptr_to_tcb_ptr p + of_nat k * of_nat (size_of TYPE(cte_C)) && mask tcbBlockSizeBits = of_nat k * of_nat (size_of TYPE(cte_C)) + \ ctcb_ptr_to_tcb_ptr p + of_nat k * of_nat (size_of TYPE(cte_C)) && ~~ mask tcbBlockSizeBits = ctcb_ptr_to_tcb_ptr p" using al + apply - + apply (rule is_aligned_add_helper) + apply (simp add: objBits_simps kotcb_def) + apply (subst Abs_fnat_hom_mult) + apply (subst word_less_nat_alt) + apply (subst unat_of_nat64) + apply (simp add: size_of_def word_bits_conv objBits_simps')+ + done + + have al_rl2: "\k. k < 5 \ unat (of_nat k * of_nat (size_of TYPE(cte_C)) :: machine_word) = k * (2^cteSizeBits)" + apply (subst Abs_fnat_hom_mult) + apply (subst unat_of_nat64) + apply (simp add: size_of_def word_bits_conv objBits_simps')+ + done + + show "?LHS y = ?RHS y" using al + apply (simp add: image_image kotcb_def objBits_simps) + apply rule + apply (clarsimp simp: dom_tcb_cte_cases_iff al_rl al_rl2) + apply (clarsimp simp: dom_tcb_cte_cases_iff al_rl al_rl2) + apply (rule_tac x = ya in image_eqI) + apply (rule mask_eqI [where n = tcbBlockSizeBits]) + apply (subst unat_arith_simps(3)) + apply (simp add: al_rl al_rl2)+ + done + qed + + ultimately have rl_cte: "(map_to_ctes (ks(ctcb_ptr_to_tcb_ptr p \ KOTCB makeObject)) :: machine_word \ cte option) + = (\x. if x \ ptr_val ` (CTypesDefs.ptr_add (cte_Ptr (ctcb_ptr_to_tcb_ptr p)) \ of_nat) ` {k. k < 5} + then Some (CTE NullCap nullMDBNode) + else map_to_ctes ks x)" + apply simp + apply (drule_tac x = "Suc 0" in meta_spec) + apply clarsimp + apply (erule impE[OF impI]) + apply (rule range_cover_full[OF al]) + apply (simp add: objBits_simps' word_bits_conv bit_simps archObjSize_def + split:kernel_object.splits arch_kernel_object.splits) + apply (simp add: fun_upd_def kotcb_def cong: if_cong) + done + + let ?tcb_arch = "tcbArch_C undefined + \ tcbContext_C := tcbContext_C (tcbArch_C undefined) + \registers_C := + foldr (\n arr. Arrays.update arr n 0) [0..<37] \ \n_contextRegisters\ + (registers_C (tcbContext_C (tcbArch_C undefined))), + fpuState_C := fpuState_C (tcbContext_C (tcbArch_C undefined)) \ + vregs_C := foldr (\n arr. Arrays.update arr n 0) [0..<64] + (vregs_C (fpuState_C (tcbContext_C (tcbArch_C undefined)))), + fpsr_C := 0, + fpcr_C := 0 + \ + \, + tcbVCPU_C := vcpu_Ptr 0 + \" + + (* this needs to match what comes out of the LHS of fbtcb below *) + let ?tcb = "undefined + \ tcbArch_C := ?tcb_arch, + tcbState_C := + thread_state_C.words_C_update + (\_. foldr (\n arr. Arrays.update arr n 0) [0..<3] + (thread_state_C.words_C (tcbState_C undefined))) + (tcbState_C undefined), + tcbFault_C := + seL4_Fault_C.words_C_update + (\_. foldr (\n arr. Arrays.update arr n 0) [0..<2] + (seL4_Fault_C.words_C (tcbFault_C undefined))) + (tcbFault_C undefined), + tcbLookupFailure_C := + lookup_fault_C.words_C_update + (\_. foldr (\n arr. Arrays.update arr n 0) [0..<2] + (lookup_fault_C.words_C (tcbLookupFailure_C undefined))) + (tcbLookupFailure_C undefined), + tcbPriority_C := 0, tcbMCP_C := 0, tcbDomain_C := 0, tcbTimeSlice_C := 0, + tcbFaultHandler_C := 0, tcbIPCBuffer_C := 0, + tcbSchedNext_C := tcb_Ptr 0, tcbSchedPrev_C := tcb_Ptr 0, + tcbEPNext_C := tcb_Ptr 0, tcbEPPrev_C := tcb_Ptr 0, + tcbBoundNotification_C := ntfn_Ptr 0\" + + have fbtcb: "from_bytes (replicate (size_of TYPE(tcb_C)) 0) = ?tcb" + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps tcb_C_tag_def) + apply (simp add: ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td align_of_def padup_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def)(* takes ages *) + apply (simp add: update_ti_adjust_ti update_ti_t_machine_word_0s update_ti_t_word32_0s + typ_info_simps thread_state_C_tag_def seL4_Fault_C_tag_def + lookup_fault_C_tag_def update_ti_t_ptr_0s arch_tcb_C_tag_def + ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td + ti_typ_combine_empty_ti ti_typ_combine_td + align_of_def padup_def user_fpu_state_C_tag_def user_context_C_tag_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def + align_td_array' size_td_array) + apply (simp add: update_ti_t_array_rep_word0 update_ti_t_array_rep_byte0) + done + + have tcb_rel: + "ctcb_relation makeObject ?new_tcb" + unfolding ctcb_relation_def makeObject_tcb heap_updates_defs initContext_registers_def + apply (simp add: fbtcb minBound_word) + apply (intro conjI) + apply (simp add: cthread_state_relation_def thread_state_lift_def + eval_nat_numeral ThreadState_defs) + apply (clarsimp simp: ccontext_relation_def newContext_def2 carch_tcb_relation_def + newArchTCB_def fpu_relation_def cregs_relation_def atcbContextGet_def + index_foldr_update) + apply (case_tac r; simp add: C_register_defs index_foldr_update + atcbContext_def newArchTCB_def newContext_def + initContext_def) + apply (simp add: thread_state_lift_def index_foldr_update atcbContextGet_def) + apply (simp add: Kernel_Config.timeSlice_def) + apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def + lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def + index_foldr_update seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def + split: if_split)+ + apply (simp add: option_to_ctcb_ptr_def) + done + + have pks: "ks (ctcb_ptr_to_tcb_ptr p) = None" + by (rule pspace_no_overlap_base' [OF pal pno al, simplified]) + + have ep1 [simplified]: "\p' list. map_to_eps (ksPSpace ?sp) p' = Some (Structures_H.endpoint.RecvEP list) + \ ctcb_ptr_to_tcb_ptr p \ set list" + using symref pks pal pds + apply - + apply (frule map_to_ko_atI) + apply simp + apply simp + apply (drule (1) sym_refs_ko_atD') + apply clarsimp + apply (drule (1) bspec) + apply (simp add: ko_wp_at'_def) + done + + have ep2 [simplified]: "\p' list. map_to_eps (ksPSpace ?sp) p' = Some (Structures_H.endpoint.SendEP list) + \ ctcb_ptr_to_tcb_ptr p \ set list" + using symref pks pal pds + apply - + apply (frule map_to_ko_atI) + apply simp + apply simp + apply (drule (1) sym_refs_ko_atD') + apply clarsimp + apply (drule (1) bspec) + apply (simp add: ko_wp_at'_def) + done + + have ep3 [simplified]: "\p' list boundTCB. map_to_ntfns (ksPSpace ?sp) p' = Some (Structures_H.notification.NTFN (Structures_H.ntfn.WaitingNtfn list) boundTCB) + \ ctcb_ptr_to_tcb_ptr p \ set list" + using symref pks pal pds + apply - + apply (frule map_to_ko_atI) + apply simp + apply simp + apply (drule (1) sym_refs_ko_atD') + apply clarsimp + apply (drule_tac x="(ctcb_ptr_to_tcb_ptr p, NTFNSignal)" in bspec, simp) + apply (simp add: ko_wp_at'_def) + done + + have pks': "ksPSpace \ (ctcb_ptr_to_tcb_ptr p) = None" using pks kssub + apply - + apply (erule contrapos_pp) + apply (fastforce simp: dom_def) + done + + have ball_subsetE: + "\P S R. \ \x \ S. P x; R \ S \ \ \x \ R. P x" + by blast + + have domain_kdr: + "-domain \ kernel_data_refs" + using rfsr unfolding rf_sr_def cstate_relation_def Let_def by simp + + have htd_safe: + "htd_safe domain (hrs_htd (t_hrs_' (globals x))) + \ htd_safe domain (hrs_htd (t_hrs_' ?gs))" + using kdr + apply (simp add: hrs_htd_update heap_updates_def) + apply (intro ptr_retyps_htd_safe_neg[OF _ _ domain_kdr], simp_all) + apply (erule disjoint_subset[rotated]) + apply (simp add: ctcb_ptr_to_tcb_ptr_def size_of_def) + apply (rule intvl_sub_offset[where k="ptr_val p - ctcb_offset" and x="ctcb_offset", simplified]) + apply (simp add: ctcb_offset_defs objBits_simps') + apply (erule disjoint_subset[rotated]) + apply (rule intvl_start_le) + apply (simp add: size_of_def objBits_simps') + done + + have zro: + "zero_ranges_are_zero (gsUntypedZeroRanges \) (t_hrs_' (globals x))" + using rfsr + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + + have h_t_valid_p: + "h_t_valid (hrs_htd (t_hrs_' ?gs)) c_guard p" + using fun_cong[OF cl_tcb, where x=p] + by (clarsimp dest!: h_t_valid_clift) + + have zro': + "zero_ranges_are_zero (gsUntypedZeroRanges \) (t_hrs_' ?gs)" + using zro h_t_valid_p rzo al + apply clarsimp + apply (simp add: heap_updates_def hrs_htd_update typ_heap_simps') + apply (intro zero_ranges_ptr_retyps, simp_all) + apply (erule caps_overlap_reserved'_subseteq) + apply (rule order_trans, rule tcb_ptr_to_ctcb_ptr_in_range') + apply (simp add: objBits_simps kotcb_def) + apply (simp add: objBits_simps kotcb_def) + apply (erule caps_overlap_reserved'_subseteq) + apply (rule intvl_start_le) + apply (simp add: cte_C_size kotcb_def objBits_simps') + done + + note al' = al[simplified objBits_simps kotcb_def, simplified] + + have p_nkr: "ptr_span p \ kernel_data_refs = {}" + apply (rule disjoint_subset[OF _ kdr]) + using ptr_span_ctcb_subset[OF al'] + apply (simp add: upto_intvl_eq[OF al']) + done + + note ht_rest = clift_eq_h_t_valid_eq[OF cl_rest, simplified] + + note irq = h_t_valid_eq_array_valid[where p=intStateIRQNode_array_Ptr] + h_t_array_valid_ptr_retyps_gen[where n=1, simplified, OF refl empty_smaller(1)] + h_t_array_valid_ptr_retyps_gen[where p="Ptr x" for x, simplified, OF refl empty_smaller(2)] + + from rfsr have "cpspace_relation ks (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) (t_hrs_' ?gs)" + unfolding cpspace_relation_def + apply - + apply (simp add: cl_cte [simplified] cl_tcb [simplified] cl_rest [simplified] tag_disj_via_td_name + ht_rest) + apply (simp add: rl kotcb_def projectKOs rl_tcb rl_cte) + apply (elim conjE) + apply (intro conjI) + \ \cte\ + apply (erule cmap_relation_retype2) + apply (simp add:ccte_relation_nullCap nullMDBNode_def nullPointer_def) + \ \tcb\ + apply (erule cmap_relation_updI2 [where dest = "ctcb_ptr_to_tcb_ptr p" and f = "tcb_ptr_to_ctcb_ptr", simplified]) + apply (rule map_comp_simps) + apply (rule pks) + apply (rule tcb_rel[simplified FLAGS_default_eq, simplified]) + \ \ep\ + apply (erule iffD2 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply (simp add: cendpoint_relation_def Let_def) + apply (subst endpoint.case_cong) + apply (rule refl) + apply (simp add: tcb_queue_update_other' ep1) + apply (simp add: tcb_queue_update_other' del: tcb_queue_relation'_empty) + apply (simp add: tcb_queue_update_other' ep2) + apply clarsimp + \ \ntfn\ + apply (erule iffD2 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply (simp add: cnotification_relation_def Let_def) + apply (subst ntfn.case_cong) + apply (rule refl) + apply (simp add: tcb_queue_update_other' del: tcb_queue_relation'_empty) + apply (simp add: tcb_queue_update_other' del: tcb_queue_relation'_empty) + apply (case_tac a, simp add: tcb_queue_update_other' ep3) + apply (clarsimp simp: typ_heap_simps) + done + + moreover have "cte_array_relation \ ?gs + \ tcb_cte_array_relation ?s ?gs" + using rfsr + apply (clarsimp simp: heap_updates_def + rf_sr_def cstate_relation_def Let_def + hrs_htd_update map_comp_update + kotcb_def projectKO_opt_tcb) + apply (intro cvariable_array_ptr_upd conjI + cvariable_array_ptr_retyps[OF refl, where n=1, simplified], + simp_all add: empty_smaller[simplified]) + apply (simp add: ptr_retyps_gen_def) + apply (rule ptr_retyp_h_t_valid[where g=c_guard, OF arr_guard, + THEN h_t_array_valid, simplified]) + done + + moreover + from rfsr + have "pt_array_relation \ (globals x)" + by (simp add: rf_sr_def cstate_relation_def Let_def) + hence "cvariable_array_map_relation (gsPTTypes (ksArchState \)) + (\pt_t. 2 ^ ptTranslationBits pt_t) + pte_Ptr + (hrs_htd (t_hrs_' ?gs))" + apply (clarsimp simp: heap_updates_def) + apply (drule cvariable_array_ptr_retyps[OF refl, where n=1 and arr=False, simplified, rotated]) + apply (rule empty_smaller) + apply (simp add: hrs_htd_update) + apply (erule cvariable_array_ptr_retyps[where p="tcb_cnode_Ptr (ctcb_ptr_to_tcb_ptr p)", rotated -1]) + apply (rule refl) + apply simp + apply (rule empty_smaller[simplified]) + done + + ultimately show ?thesis + using rfsr zro' + apply (simp add: rf_sr_def cstate_relation_def Let_def h_t_valid_clift_Some_iff + tag_disj_via_td_name carch_state_relation_def + cmachine_state_relation_def irq) + apply (simp add: cl_cte [simplified] cl_tcb [simplified] cl_rest [simplified] tag_disj_via_td_name) + apply (clarsimp simp: cready_queues_relation_def Let_def + htd_safe[simplified] kernel_data_refs_domain_eq_rotate) + apply (simp add: heap_updates_def tcb_queue_update_other' hrs_htd_update + ptr_retyp_to_array[simplified] irq[simplified]) + done +qed + +lemma cnc_foldl_foldr: + defines "ko \ (KOTCB makeObject)" + shows "foldl (\v addr. v(addr \ ko)) mp + (map (\n. ptr + (of_nat n << tcbBlockSizeBits)) [0..< n]) = + foldr (\addr. data_map_insert addr ko) (new_cap_addrs n ptr ko) mp" + by (simp add: foldr_upd_app_if foldl_conv_foldr + new_cap_addrs_def objBits_simps ko_def power_minus_is_div + cong: foldr_cong) + +lemma objBitsKO_gt_1: + "(1 :: machine_word) < 2 ^ objBitsKO ko" + by (simp add: objBits_simps' archObjSize_def bit_simps + split: kernel_object.splits arch_kernel_object.splits) + +lemma ps_clear_subset: + assumes pd: "ps_clear x (objBitsKO ko) (s' \ksPSpace := (\x. if x \ as then Some (f x) else ksPSpace s' x) \)" + and sub: "as' \ as" + and al: "is_aligned x (objBitsKO ko)" + shows "ps_clear x (objBitsKO ko) (s' \ksPSpace := (\x. if x \ as' then Some (f x) else ksPSpace s' x) \)" + using al pd sub + apply - + apply (simp add: ps_clear_def3 [OF al objBitsKO_gt_0] dom_if_Some) + apply (erule disjoint_subset2 [rotated]) + apply fastforce + done + +lemma pspace_distinct_subset: + assumes pd: "pspace_distinct' (s' \ksPSpace := (\x. if x \ as then Some (f x) else ksPSpace s' x) \)" + and pal: "pspace_aligned' (s' \ksPSpace := (\x. if x \ as then Some (f x) else ksPSpace s' x) \)" + and sub: "as' \ as" + and doms: "as \ dom (ksPSpace s') = {}" + shows "pspace_distinct' (s' \ksPSpace := (\x. if x \ as' then Some (f x) else ksPSpace s' x) \)" + using pd sub doms pal + unfolding pspace_distinct'_def pspace_aligned'_def + apply - + apply (rule ballI) + apply (simp add: pspace_distinct'_def dom_if_Some) + apply (drule_tac x = x in bspec) + apply fastforce + apply (drule_tac x = x in bspec) + apply fastforce + apply (erule disjE) + apply (frule (1) subsetD) + apply simp + apply (erule (2) ps_clear_subset) + apply (subgoal_tac "x \ as") + apply (frule (1) contra_subsetD) + apply simp + apply (erule (2) ps_clear_subset) + apply fastforce + done + +lemma pspace_aligned_subset: + assumes pal: "pspace_aligned' (s' \ksPSpace := (\x. if x \ as then Some (f x) else ksPSpace s' x) \)" + and sub: "as' \ as" + and doms: "as \ dom (ksPSpace s') = {}" + shows "pspace_aligned' (s' \ksPSpace := (\x. if x \ as' then Some (f x) else ksPSpace s' x) \)" + using pal sub doms unfolding pspace_aligned'_def + apply - + apply (rule ballI) + apply (simp add: dom_if_Some) + apply (drule_tac x = x in bspec) + apply fastforce + apply (erule disjE) + apply simp + apply (frule (1) subsetD) + apply simp + apply (subgoal_tac "x \ as") + apply (frule (1) contra_subsetD) + apply simp + apply fastforce + done + + +lemma cslift_empty_mem_update: + fixes x :: cstate and sz and ptr + defines "x' \ x\globals := globals x + \t_hrs_' := hrs_mem_update (heap_update_list ptr (replicate sz 0)) (t_hrs_' (globals x))\\" + assumes empty: "region_is_typeless ptr sz x" + shows "cslift x' = clift (fst (t_hrs_' (globals x)), snd (t_hrs_' (globals x)))" + using empty + apply - + apply (unfold region_is_typeless_def) + apply (rule ext) + apply (simp only: lift_t_if hrs_mem_update_def split_def x'_def) + apply (simp add: lift_t_if hrs_mem_update_def split_def) + apply (clarsimp simp: h_val_def split: if_split) + apply (subst heap_list_update_disjoint_same) + apply simp + apply (rule disjointI) + apply clarsimp + apply (drule (1) bspec) + apply (frule (1) h_t_valid_not_empty) + apply simp + apply simp + done + +lemma cslift_bytes_mem_update: + fixes x :: cstate and sz and ptr + defines "x' \ x\globals := globals x + \t_hrs_' := hrs_mem_update (heap_update_list ptr (replicate sz 0)) (t_hrs_' (globals x))\\" + assumes bytes: "region_is_bytes ptr sz x" + assumes not_byte: "typ_uinfo_t TYPE ('a) \ typ_uinfo_t TYPE (word8)" + shows "(cslift x' :: ('a :: mem_type) ptr \ _) + = clift (fst (t_hrs_' (globals x)), snd (t_hrs_' (globals x)))" + using bytes + apply (unfold region_is_bytes'_def) + apply (rule ext) + apply (simp only: lift_t_if hrs_mem_update_def split_def x'_def) + apply (simp add: lift_t_if hrs_mem_update_def split_def) + apply (clarsimp simp: h_val_def split: if_split) + apply (subst heap_list_update_disjoint_same) + apply simp + apply (rule disjointI) + apply clarsimp + apply (drule (1) bspec) + apply (frule (1) h_t_valid_intvl_htd_contains_uinfo_t) + apply (clarsimp simp: hrs_htd_def not_byte) + apply simp + done + +lemma heap_list_eq_replicate_eq_eq: + "(heap_list hp n ptr = replicate n v) + = (\p \ {ptr ..+ n}. hp p = v)" + by (induct n arbitrary: ptr, simp_all add: intvl_Suc_right) + +lemma heap_update_list_replicate_eq: + "(heap_update_list x (replicate n v) hp y) + = (if y \ {x ..+ n} then v else hp y)" + apply (induct n arbitrary: x hp, simp_all add: intvl_Suc_right) + apply (simp split: if_split) + done + +lemma zero_ranges_are_zero_update_zero[simp]: + "zero_ranges_are_zero rs hrs + \ zero_ranges_are_zero rs (hrs_mem_update (heap_update_list ptr (replicate n 0)) hrs)" + apply (clarsimp simp: zero_ranges_are_zero_def hrs_mem_update) + apply (drule(1) bspec) + apply (clarsimp simp: heap_list_eq_replicate_eq_eq heap_update_list_replicate_eq cong: if_cong) + done + +lemma rf_sr_rep0: + assumes sr: "(\, x) \ rf_sr" + assumes empty: "region_is_bytes ptr sz x" + shows "(\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicate sz 0)))) x) \ rf_sr" + using sr + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def + carch_state_relation_def + cmachine_state_relation_def hrs_mem_update + cslift_bytes_mem_update[OF empty, simplified] cte_C_size) + +(* FIXME: generalise *) +lemma ccorres_already_have_rrel: + "\ ccorres dc xfdc P P' hs a c; \s. \ \\<^bsub>/UNIV\<^esub> {s} c {t. xf t = xf s} \ + \ + ccorres r xf P (P' \ {s. r v (xf s)}) hs (a >>= (\_. return v)) c" + apply (rule ccorres_return_into_rel) + apply (rule ccorresI') + apply (erule (2) ccorresE) + apply simp + apply assumption+ + apply (clarsimp elim!: rev_bexI) + apply (simp add: unif_rrel_def) + apply (drule_tac x = s' in spec) + apply (drule (1) exec_handlers_use_hoare_nothrow) + apply simp + apply fastforce + done + +lemma mapM_x_storeWord: + assumes al: "is_aligned ptr 3" + shows "mapM_x (\x. storeWord (ptr + of_nat x * 8) 0) [0..m x. if x \ {ptr..+ n * 8} then 0 else m x))" +proof (induct n) + case 0 + thus ?case + apply (rule ext) + apply (simp add: mapM_x_mapM mapM_def sequence_def + modify_def get_def put_def bind_def return_def) + done +next + case (Suc n') + + have funs_eq: + "\m x. (if x \ {ptr..+8 + n' * 8} then 0 else (m x :: word8)) = + ((\xa. if xa \ {ptr..+n' * 8} then 0 else m xa) + (ptr + of_nat n' * 8 := word_rsplit (0 :: machine_word) ! 7, + ptr + of_nat n' * 8 + 1 := word_rsplit (0 :: machine_word) ! 6, + ptr + of_nat n' * 8 + 2 := word_rsplit (0 :: machine_word) ! 5, + ptr + of_nat n' * 8 + 3 := word_rsplit (0 :: machine_word) ! 4, + ptr + of_nat n' * 8 + 4:= word_rsplit (0 :: machine_word) ! 3, + ptr + of_nat n' * 8 + 5 := word_rsplit (0 :: machine_word) ! 2, + ptr + of_nat n' * 8 + 6 := word_rsplit (0 :: machine_word) ! Suc 0, + ptr + of_nat n' * 8 + 7 := word_rsplit (0 :: machine_word) ! 0)) x" + proof - + fix m x + + have xin': "\x. (x < 8 + n' * 8) = (x < n' * 8 \ x = n' * 8 + \ x = (n' * 8) + 1 \ x = (n' * 8) + 2 \ x = (n' * 8) + 3 + \ x = (n' * 8) + 4 \ x = (n' * 8) + 5 \ x = (n' * 8) + 6 + \ x = (n' * 8) + 7)" + by (safe, simp_all) + + have xin: "x \ {ptr..+8 + n' * 8} = (x \ {ptr..+n' * 8} \ x = ptr + of_nat n' * 8 \ + x = ptr + of_nat n' * 8 + 1 \ x = ptr + of_nat n' * 8 + 2 \ x = ptr + of_nat n' * 8 + 3 + \ x = ptr + of_nat n' * 8 + 4 \ x = ptr + of_nat n' * 8 + 5 \ x = ptr + of_nat n' * 8 + 6 + \ x = ptr + of_nat n' * 8 + 7)" + by (simp add: intvl_def xin' conj_disj_distribL + ex_disj_distrib field_simps) + + show "?thesis m x" + apply (simp add: xin word_rsplit_0 word_bits_def cong: if_cong) + apply (simp split: if_split) + done + qed + + from al have "is_aligned (ptr + of_nat n' * 8) 3" + apply (rule aligned_add_aligned) + apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) + apply (simp add: word_bits_conv)+ + done + + thus ?case + apply (simp add: mapM_x_append bind_assoc Suc.hyps mapM_x_singleton) + apply (simp add: storeWord_def assert_def is_aligned_mask modify_modify comp_def) + apply (simp only: funs_eq upto0_7_def) + apply (rule arg_cong[where f=modify]) + apply (rule arg_cong[where f=underlying_memory_update]) + apply (simp add: fold_def del: fun_upd_apply) + done +qed + +lemma mapM_x_storeWord_step: + assumes al: "is_aligned ptr sz" + and sz2: "3 \ sz" + and sz: "sz < word_bits" + shows "mapM_x (\p. storeWord p 0) [ptr , ptr + 8 .e. ptr + 2 ^ sz - 1] = + modify (underlying_memory_update (\m x. if x \ {ptr..+2 ^ (sz - 3) * 8} then 0 else m x))" + using al sz + apply (simp only: upto_enum_step_def field_simps cong: if_cong) + apply (subst if_not_P) + apply (subst not_less) + apply (erule is_aligned_no_overflow) + apply (simp add: mapM_x_map upto_enum_word del: upt.simps) + apply (subst div_power_helper_64 [OF sz2, simplified]) + apply assumption + apply (simp add: word_bits_def unat_minus_one del: upt.simps) + apply (subst mapM_x_storeWord) + apply (erule is_aligned_weaken [OF _ sz2]) + apply (simp add: field_simps) + done + + +lemma pspace_aligned_to_C_user_data: + fixes v :: "user_data" + assumes pal: "pspace_aligned' s" + and cmap: "cpspace_user_data_relation (ksPSpace s) (underlying_memory (ksMachineState s)) (t_hrs_' (globals x))" + shows "\x\dom (cslift x :: user_data_C typ_heap). is_aligned (ptr_val x) (objBitsKO KOUserData)" + (is "\x\dom ?CS. is_aligned (ptr_val x) (objBitsKO KOUserData)") +proof + fix z + assume "z \ dom ?CS" + hence "z \ Ptr ` dom (map_to_user_data (ksPSpace s))" using cmap + by (simp add: cmap_relation_def dom_heap_to_user_data) + hence pvz: "ptr_val z \ dom (map_to_user_data (ksPSpace s))" + by clarsimp + hence "projectKO_opt (the (ksPSpace s (ptr_val z))) = Some UserData" + apply - + apply (frule map_comp_subset_domD) + apply (clarsimp simp: dom_def)+ + done + moreover have pvz: "ptr_val z \ dom (ksPSpace s)" using pvz + by (rule map_comp_subset_domD) + ultimately show "is_aligned (ptr_val z) (objBitsKO KOUserData)" using pal + unfolding pspace_aligned'_def + apply - + apply (drule (1) bspec) + apply (simp add: projectKOs) + done +qed + +lemma range_cover_bound_weak: + "\ n \ 0; range_cover ptr sz us n \ \ + ptr + (of_nat n * 2 ^ us - 1) \ (ptr && ~~ mask sz) + 2 ^ sz - 1" + apply (frule range_cover_cell_subset[where x = "of_nat (n - 1)"]) + apply (simp add:range_cover_not_zero) + apply (frule range_cover_subset_not_empty[rotated,where x = "of_nat (n - 1)"]) + apply (simp add:range_cover_not_zero) + apply (clarsimp simp: field_simps) + done + +lemma pspace_no_overlap_underlying_zero: + "pspace_no_overlap' ptr sz \ + \ valid_machine_state' \ + \ x \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} + \ underlying_memory (ksMachineState \) x = 0" + using mask_in_range[where ptr'=x and bits=pageBits and ptr="x && ~~ mask pageBits"] + apply (clarsimp simp: valid_machine_state'_def) + apply (drule_tac x=x in spec, clarsimp simp: pointerInUserData_def) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def koTypeOf_eq_UserDataT) + apply (case_tac "pointerInDeviceData x \") + apply (clarsimp simp: pointerInDeviceData_def + ko_wp_at'_def obj_at'_def projectKOs + dest!: device_data_at_ko) + apply (drule(1) pspace_no_overlapD') + apply (drule_tac x=x in eqset_imp_iff) + apply (simp add: objBits_simps) + apply clarsimp + apply (drule(1) pspace_no_overlapD') + apply (drule_tac x=x in eqset_imp_iff, simp) + apply (simp add: objBits_simps) + done + +lemma range_cover_nca_neg: "\x p (off :: 9 word). + \(x::machine_word) < 8; {p..+2 ^pageBits } \ {ptr..ptr + (of_nat n * 2 ^ (gbits + pageBits) - 1)} = {}; + range_cover ptr sz (gbits + pageBits) n\ + \ p + ucast off * 8 + x \ {ptr..+n * 2 ^ (gbits + pageBits)}" + apply (case_tac "n = 0") + apply simp + apply (subst range_cover_intvl,simp) + apply simp + apply (subgoal_tac "p + ucast off * 8 + x \ {p..+2 ^ pageBits}") + apply blast + apply (clarsimp simp: intvl_def) + apply (rule_tac x = "unat off * 8 + unat x" in exI) + apply (simp add: ucast_nat_def) + apply (rule nat_add_offset_less [where n = 3, simplified]) + apply (simp add: word_less_nat_alt) + apply (rule unat_lt2p) + apply (simp add: pageBits_def objBits_simps) + done + +lemma heap_to_device_data_disj_mdf: + assumes rc: "range_cover ptr sz (gbits + pageBits) n" + and ko_at: "ksPSpace \ a = Some obj" + and obj_size: "objBitsKO obj = pageBits" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and sz: "gbits + pageBits \ sz" + and szb: "sz < word_bits" + shows "(heap_to_device_data (ksPSpace \) + (\x. if x \ {ptr..+n * 2 ^ (gbits + pageBits)} then 0 else underlying_memory (ksMachineState \) x) a) + = (heap_to_device_data (ksPSpace \) (underlying_memory (ksMachineState \)) a)" + proof - + from sz have "3 \ sz" by (simp add: objBits_simps pageBits_def) + + hence sz2: "2 ^ (sz - 3) * 8 = (2 :: nat) ^ sz" + apply (subgoal_tac "(8 :: nat) = 2 ^ 3") + apply (erule ssubst) + apply (subst power_add [symmetric]) + apply (rule arg_cong [where f = "\n. 2 ^ n"]) + apply simp + apply simp + done + have p2dist: "n * (2::nat) ^ (gbits + pageBits) = n * 2 ^ gbits * 2 ^ pageBits" (is "?lhs = ?rhs") + by (simp add: monoid_mult_class.power_add) + show ?thesis + apply (simp add: heap_to_device_data_def) + apply (case_tac "n = 0") + apply simp + apply (subst map_option_byte_to_word_heap) + apply (erule range_cover_nca_neg[OF _ _ rc]) + using range_cover_intvl[OF rc] + apply (clarsimp simp add: heap_to_user_data_def Let_def + byte_to_word_heap_def[abs_def] map_comp_Some_iff projectKOs) + apply (cut_tac pspace_no_overlapD' [OF ko_at pno]) + apply (subst (asm) upto_intvl_eq [symmetric]) + apply (rule pspace_alignedD' [OF ko_at pal]) + apply (simp add: obj_size p2dist) + apply (drule_tac B' = "{ptr..ptr + (of_nat n * 2 ^ (gbits + pageBits) - 1)}" in disjoint_subset2[rotated]) + apply (clarsimp simp: p2dist ) + apply (rule range_cover_bound_weak) + apply simp + apply (rule rc) + apply simp + apply simp + done +qed + +lemma pageBitsForSize_mess_multi: + "8 * (2::nat) ^ (pageBitsForSize sz - 3) = 2^(pageBitsForSize sz)" + apply (subgoal_tac "(8 :: nat) = 2 ^ 3") + apply (erule ssubst) + apply (subst power_add [symmetric]) + apply (rule arg_cong [where f = "\n. 2 ^ n"]) + apply (case_tac sz,(simp add: bit_simps)+) + done + +lemma createObjects_ccorres_user_data: + defines "ko \ KOUserData" + shows "\\ x. (\, x) \ rf_sr \ range_cover ptr sz (gbits + pageBits) n + \ ptr \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ valid_machine_state' \ + \ ret_zero ptr (n * 2 ^ (gbits + pageBits)) \ + \ pspace_no_overlap' ptr sz \ + \ region_is_zero_bytes ptr (n * 2 ^ (gbits + pageBits)) x + \ {ptr ..+ n * (2 ^ (gbits + pageBits))} \ kernel_data_refs = {} + \ + (\\ksPSpace := + foldr (\addr. data_map_insert addr KOUserData) + (new_cap_addrs (n * 2^gbits) ptr KOUserData) (ksPSpace \)\, + x\globals := globals x\t_hrs_' := + hrs_htd_update + (ptr_retyps_gen (n * 2 ^ gbits) (Ptr ptr :: user_data_C ptr) arr) + ((t_hrs_' (globals x)))\ \) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") +proof (intro impI allI) + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "Ptr ptr :: user_data_C ptr" + + note Kernel_C.user_data_C_size [simp del] + + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" and al: "is_aligned ptr (gbits + pageBits)" + and ptr0: "ptr \ 0" + and sz: "gbits + pageBits \ sz" + and szb: "sz < word_bits" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and vms: "valid_machine_state' \" + and rzo: "ret_zero ptr (n * 2 ^ (gbits + pageBits)) \" + and empty: "region_is_bytes ptr (n * 2 ^ (gbits + pageBits)) x" + and zero: "heap_list_is_zero (hrs_mem (t_hrs_' (globals x))) ptr (n * 2 ^ (gbits + pageBits))" + and rc: "range_cover ptr sz (gbits + pageBits) n" + and rc': "range_cover ptr sz (objBitsKO ko) (n * 2^ gbits)" + and kdr: "{ptr..+n * 2 ^ (gbits + pageBits)} \ kernel_data_refs = {}" + by (auto simp: range_cover.aligned objBits_simps ko_def + range_cover_rel[where sbit' = pageBits] + range_cover.sz[where 'a=machine_word_len, folded word_bits_def]) + + hence al': "is_aligned ptr (objBitsKO ko)" + by (clarsimp dest!: is_aligned_weaken range_cover.aligned) + + (* This is a hack *) + have mko: "\dev. makeObjectKO False (Inr object_type.SmallPageObject) = Some ko" + by (simp add: makeObjectKO_def ko_def) + + from sz have "3 \ sz" by (simp add: objBits_simps pageBits_def ko_def) + + hence sz2: "2 ^ (sz - 3) * 8 = (2 :: nat) ^ sz" + apply (subgoal_tac "(8 :: nat) = 2 ^ 3") + apply (erule ssubst) + apply (subst power_add [symmetric]) + apply (rule arg_cong [where f = "\n. 2 ^ n"]) + apply simp + apply simp + done + + define big_0s where "big_0s \ (replicate (2^pageBits) 0) :: word8 list" + + have "length big_0s = 4096" unfolding big_0s_def + by simp (simp add: bit_simps) + + hence i1: "\off :: 9 word. index (user_data_C.words_C (from_bytes big_0s)) (unat off) = 0" + apply (simp add: from_bytes_def) + apply (simp add: typ_info_simps user_data_C_tag_def) + apply (simp add: ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td align_of_def padup_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def align_td_array' size_td_array size_of_def + cong: if_cong) + apply (simp add: update_ti_adjust_ti update_ti_t_machine_word_0s + typ_info_simps update_ti_t_ptr_0s + ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td + ti_typ_combine_empty_ti ti_typ_combine_td + align_of_def padup_def + final_pad_def size_td_lt_ti_typ_pad_combine Let_def + align_td_array' size_td_array cong: if_cong) + apply (subst update_ti_t_array_rep_word0) + apply (unfold big_0s_def)[1] + apply (rule arg_cong [where f = "\x. replicate x 0"]) + apply (simp (no_asm) add: size_of_def pageBits_def) + apply (subst index_foldr_update) + apply (rule order_less_le_trans [OF unat_lt2p]) + apply simp + apply simp + apply simp + done + + have p2dist: "n * (2::nat) ^ (gbits + pageBits) = n * 2 ^ gbits * 2 ^ pageBits" (is "?lhs = ?rhs") + by (simp add:monoid_mult_class.power_add) + + have nca: "\x p (off :: 9 word). \ p \ set (new_cap_addrs (n*2^gbits) ptr KOUserData); x < 8 \ + \ p + ucast off * 8 + x \ {ptr..+ n * 2 ^ (gbits + pageBits) }" + using sz + apply (clarsimp simp: new_cap_addrs_def objBits_simps shiftl_t2n intvl_def) + apply (rename_tac x off pa) + apply (rule_tac x = "2 ^ pageBits * pa + unat off * 8 + unat x" in exI) + apply (simp add: ucast_nat_def power_add) + apply (subst mult.commute, subst add.assoc) + apply (rule_tac y = "(pa + 1) * 2 ^ pageBits " in less_le_trans) + apply (simp add:word_less_nat_alt) + apply (rule_tac y="unat off * 8 + 8" in less_le_trans) + apply simp + apply (simp add:pageBits_def) + apply (cut_tac x = off in unat_lt2p) + apply simp + apply (subst mult.assoc[symmetric]) + apply (rule mult_right_mono) + apply simp+ + done + + have nca_neg: "\x p (off :: 9 word). + \x < 4; {p..+2 ^ objBitsKO KOUserData } \ {ptr..ptr + (of_nat n * 2 ^ (gbits + pageBits) - 1)} = {}\ + \ p + ucast off * 8 + x \ {ptr..+n * 2 ^ (gbits + pageBits)}" + apply (case_tac "n = 0") + apply simp + apply (subst range_cover_intvl[OF rc]) + apply simp + apply (subgoal_tac " p + ucast off * 8 + x \ {p..+2 ^ objBitsKO KOUserData}") + apply blast + apply (clarsimp simp:intvl_def) + apply (rule_tac x = "unat off * 8 + unat x" in exI) + apply (simp add: ucast_nat_def) + apply (rule nat_add_offset_less [where n = 3, simplified]) + apply (simp add: word_less_nat_alt) + apply (rule unat_lt2p) + apply (simp add: pageBits_def objBits_simps) + done + + have zero_app: "\x. x \ {ptr..+ n * 2 ^ (gbits + pageBits) } + \ underlying_memory (ksMachineState \) x = 0" + apply (cases "n = 0") + apply simp + apply (rule pspace_no_overlap_underlying_zero[OF pno vms]) + apply (erule subsetD[rotated]) + apply (cases "n = 0") + apply simp + apply (subst range_cover_intvl[OF rc], simp) + apply (rule order_trans[rotated], erule range_cover_subset'[OF rc]) + apply (simp add: field_simps) + done + + have cud: "\p. p \ set (new_cap_addrs (n * 2^ gbits) ptr KOUserData) \ + cuser_user_data_relation + (byte_to_word_heap + (underlying_memory (ksMachineState \)) p) + (from_bytes big_0s)" + unfolding cuser_user_data_relation_def + apply - + apply (rule allI) + apply (subst i1) + apply (simp add: byte_to_word_heap_def Let_def + zero_app nca nca [where x3 = 0, simplified]) + apply (simp add: word_rcat_bl) + done + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + have cud2: "\xa v y. + \ heap_to_user_data + (\x. if x \ set (new_cap_addrs (n*2^gbits) ptr KOUserData) + then Some KOUserData else ksPSpace \ x) + (underlying_memory (ksMachineState \)) xa = + Some v; xa \ set (new_cap_addrs (n*2^gbits) ptr KOUserData); + heap_to_user_data (ksPSpace \) (underlying_memory (ksMachineState \)) xa = Some y \ \ y = v" + using range_cover_intvl[OF rc] + by (clarsimp simp add: heap_to_user_data_def Let_def sz2 + byte_to_word_heap_def[abs_def] map_comp_Some_iff projectKOs) + + have relrl: "cmap_relation (heap_to_user_data (ksPSpace \) (underlying_memory (ksMachineState \))) + (cslift x) Ptr cuser_user_data_relation + \ cmap_relation + (heap_to_user_data + (\x. if x \ set (new_cap_addrs (n * 2 ^ gbits) ptr KOUserData) + then Some KOUserData else ksPSpace \ x) + (underlying_memory (ksMachineState \))) + (\y. if y \ Ptr ` set (new_cap_addrs (n*2^gbits) ptr KOUserData) + then Some + (from_bytes (replicate (2 ^ pageBits) 0)) + else cslift x y) + Ptr cuser_user_data_relation" + apply (rule cmap_relationI) + apply (clarsimp simp: dom_heap_to_user_data cmap_relation_def dom_if image_Un + projectKO_opt_retyp_same projectKOs) + apply (case_tac "xa \ set (new_cap_addrs (n*2^gbits) ptr KOUserData)") + apply (clarsimp simp: heap_to_user_data_def sz2) + apply (erule cud [unfolded big_0s_def]) + apply (subgoal_tac "(Ptr xa :: user_data_C ptr) \ Ptr ` set (new_cap_addrs (n*2^gbits) ptr KOUserData)") + apply simp + apply (erule (1) cmap_relationE2) + apply (drule (1) cud2) + apply simp + apply simp + apply clarsimp + done + + (* /obj specific *) + + (* s/obj/obj'/ *) + + have szo: "size_of TYPE(user_data_C) = 2 ^ objBitsKO ko" by (simp add: size_of_def objBits_simps archObjSize_def ko_def pageBits_def) + have szo': "n * 2 ^ (gbits + pageBits) = n * 2 ^ gbits * size_of TYPE(user_data_C)" using sz + apply (subst szo) + apply (clarsimp simp: power_add[symmetric] objBits_simps ko_def) + done + + have rb': "region_is_bytes ptr (n * 2 ^ gbits * 2 ^ objBitsKO ko) x" + using empty + by (simp add: mult.commute mult.left_commute power_add objBits_simps ko_def) + + note rl' = cslift_ptr_retyp_other_inst[OF rb' rc' szo' szo, simplified] + + (* rest is generic *) + + note rl = projectKO_opt_retyp_other [OF rc' pal pno,unfolded ko_def] + note cterl = retype_ctes_helper[OF pal pdst pno al' range_cover.sz(2)[OF rc'] range_cover.sz(1)[OF rc', folded word_bits_def] mko rc'] + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + + have guard: + "\t) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + using empty rc' szo + apply - + supply image_cong_simp [cong del] + apply (clarsimp simp: rl' tag_disj_via_td_name cte_C_size ht_rl + foldr_upd_app_if [folded data_map_insert_def]) + apply (simp add: rl ko_def projectKOs p2dist + cterl[unfolded ko_def]) + apply (subst clift_ptr_retyps_gen_prev_memset_same[OF guard]) + apply (simp add: pageBits_def objBits_simps) + apply simp + apply (simp add: pageBits_def objBits_simps) + apply (cut_tac range_cover.strong_times_64[OF rc], simp_all)[1] + apply (simp add: p2dist objBits_simps) + apply (cut_tac zero) + apply (simp add: pageBits_def power_add field_simps) + apply (simp add: objBits_simps ptr_add_to_new_cap_addrs[OF szo] ko_def + cong: if_cong) + apply (simp add: p2dist[symmetric]) + apply (erule relrl[simplified]) + done + + thus ?thesis using rf empty kdr rzo + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name ) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (simp add: tag_disj_via_td_name rl' tcb_C_size h_t_valid_clift_Some_iff) + apply (clarsimp simp: hrs_htd_update szo'[symmetric]) + apply (simp add:szo hrs_htd_def p2dist objBits_simps ko_def ptr_retyps_htd_safe_neg + kernel_data_refs_domain_eq_rotate + rl foldr_upd_app_if [folded data_map_insert_def] + projectKOs cvariable_array_ptr_retyps + zero_ranges_ptr_retyps) + done +qed + +lemma t_hrs_update_hrs_htd_id: + "t_hrs_'_update id = id" + "hrs_htd_update id = id" + by (simp_all add: fun_eq_iff hrs_htd_update_def) + +lemmas clift_array_assertionE + = clift_array_assertion_imp[where p="Ptr q" and p'="Ptr q" for q, + OF _ refl _ exI[where x=0], simplified] + +lemma getObjectSize_symb: + "\s. \ \ {s. t_' s = object_type_from_H newType \ userObjSize_' s = sz} Call getObjectSize_'proc + {s'. ret__unsigned_long_' s' = of_nat (getObjectSize newType (unat sz))}" + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: nAPIObjects_def Kernel_C_defs framesize_to_H_def) + apply (case_tac newType) + apply simp_all + apply (simp_all add: object_type_from_H_def Kernel_C_defs + APIType_capBits_def objBits_simps') + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add: object_type_from_H_def Kernel_C_defs frameSizeConstants_defs + APIType_capBits_def objBits_simps' bit_simps + split: if_split) + apply unat_arith + (* FIXME AARCH64 abstraction violation *) + apply (simp add: Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) + done + +(* If we only change local variables on the C side, nothing need be done on the abstract side. *) +lemma ccorres_only_change_locals: + "\ \s. \ \ {s} C {t. globals s = globals t} \ \ ccorresG rf_sr \ dc xfdc \ UNIV hs (return x) C" + apply (rule ccorres_from_vcg) + apply (clarsimp simp: return_def) + apply (clarsimp simp: rf_sr_def) + apply (rule hoare_complete) + apply (clarsimp simp: HoarePartialDef.valid_def) + apply (erule_tac x=x in meta_allE) + apply (drule hoare_sound) + apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def) + apply auto + done + +lemmas upt_enum_offset_trivial = upt_enum_offset_trivial[where 'a=64, folded word_bits_def] + +lemma getObjectSize_max_size: + "\ newType = APIObjectType apiobject_type.Untyped \ x < 64; + newType = APIObjectType apiobject_type.CapTableObject \ x < 59 \ \ getObjectSize newType x < word_bits" + apply (clarsimp simp only: getObjectSize_def apiGetObjectSize_def word_bits_def + split: AARCH64_H.object_type.splits apiobject_type.splits) + apply (clarsimp simp: tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def cteSizeBits_def + bit_simps + split: if_split) + done + +lemma getObjectSize_min_size: + "\ newType = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ x; + newType = APIObjectType apiobject_type.CapTableObject \ 2 \ x \ \ + 4 \ getObjectSize newType x" + apply (clarsimp simp only: getObjectSize_def apiGetObjectSize_def word_bits_def + split: AARCH64_H.object_type.splits apiobject_type.splits) + apply (clarsimp simp: tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def cteSizeBits_def + bit_simps untypedBits_defs + split: if_split) + done + +(* + * Assuming "placeNewObject" doesn't fail, it is equivalent + * to placing a number of objects into the PSpace. + *) +lemma placeNewObject_eq: + notes option.case_cong_weak [cong] + shows + "\ groupSizeBits < word_bits; is_aligned ptr (groupSizeBits + objBitsKO (injectKOS object)); + no_fail ((=) s) (placeNewObject ptr object groupSizeBits) \ \ + ((), (s\ksPSpace := foldr (\addr. data_map_insert addr (injectKOS object)) (new_cap_addrs (2 ^ groupSizeBits) ptr (injectKOS object)) (ksPSpace s)\)) + \ fst (placeNewObject ptr object groupSizeBits s)" + apply (clarsimp simp: placeNewObject_def placeNewObject'_def) + apply (clarsimp simp: split_def field_simps split del: if_split) + apply (clarsimp simp: no_fail_def) + apply (subst lookupAround2_pspace_no) + apply assumption + apply (subst (asm) lookupAround2_pspace_no) + apply assumption + apply (clarsimp simp add: in_monad' split_def bind_assoc field_simps + snd_bind ball_to_all unless_def split: option.splits if_split_asm) + apply (clarsimp simp: data_map_insert_def new_cap_addrs_def) + apply (subst upto_enum_red2) + apply (fold word_bits_def, assumption) + apply (clarsimp simp: field_simps shiftl_t2n power_add mult.commute mult.left_commute + cong: foldr_cong map_cong) + done + +lemma globals_list_distinct_rf_sr: + "\ (s, s') \ rf_sr; S \ kernel_data_refs = {} \ + \ globals_list_distinct S symbol_table globals_list" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (erule globals_list_distinct_subset) + apply blast + done + +lemma rf_sr_htd_safe: + "(s, s') \ rf_sr \ htd_safe domain (hrs_htd (t_hrs_' (globals s')))" + by (simp add: rf_sr_def cstate_relation_def Let_def) + +lemma region_actually_is_bytes_dom_s: + "region_actually_is_bytes' ptr len htd + \ S \ {ptr ..+ len} + \ S \ {SIndexVal, SIndexTyp 0} \ dom_s htd" + apply (clarsimp simp: region_actually_is_bytes'_def dom_s_def) + apply fastforce + done + +lemma typ_region_bytes_actually_is_bytes: + "htd = typ_region_bytes ptr bits htd' + \ region_actually_is_bytes' ptr (2 ^ bits) htd" + by (clarsimp simp: region_actually_is_bytes'_def typ_region_bytes_def) + +(* FIXME: need a way to avoid overruling the parser on this, it's ugly *) +lemma memzero_modifies: + "\\. \\\<^bsub>/UNIV\<^esub> {\} Call memzero_'proc {t. t may_only_modify_globals \ in [t_hrs]}" + apply (rule allI, rule conseqPre) + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (tactic \HoarePackage.vcg_tac "_modifies" "false" [] @{context} 1\) + apply (clarsimp simp: mex_def meq_def simp del: split_paired_Ex) + apply (intro exI globals.equality, simp_all) + done + +lemma ghost_assertion_size_logic_no_unat: + "sz \ gsMaxObjectSize s + \ (s, \) \ rf_sr + \ gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' (globals \)) = 0 \ + of_nat sz \ gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' (globals \))" + apply (rule ghost_assertion_size_logic'[rotated]) + apply (simp add: rf_sr_def) + apply (simp add: unat_of_nat) + done + +lemma ccorres_placeNewObject_endpoint: + "ko = (makeObject :: endpoint) + \ ccorresG rf_sr \ dc xfdc + (pspace_aligned' and pspace_distinct' + and pspace_no_overlap' regionBase (objBits ko) + and ret_zero regionBase (2 ^ objBits ko) + and (\s. 2 ^ (objBits ko) \ gsMaxObjectSize s) + and K (regionBase \ 0 \ range_cover regionBase (objBits ko) (objBits ko) 1 + \ {regionBase..+ 2 ^ (objBits ko)} \ kernel_data_refs = {})) + ({s. region_actually_is_zero_bytes regionBase (2 ^ objBits ko) s}) + hs + (placeNewObject regionBase ko 0) + (global_htd_update (\_. (ptr_retyp (ep_Ptr regionBase))))" + apply (rule ccorres_from_vcg_nofail) + apply clarsimp + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: rf_sr_htd_safe) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate + objBits_simps' ptr_retyp_htd_safe_neg) + apply (rule bexI [OF _ placeNewObject_eq]) + apply (clarsimp simp: split_def) + apply (clarsimp simp: new_cap_addrs_def) + apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) + apply (erule_tac x=\ in allE, erule_tac x=x in allE) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp simp: word_bits_conv) + apply (clarsimp simp: range_cover.aligned objBits_simps) + apply (clarsimp simp: no_fail_def) + done + +lemma ccorres_placeNewObject_notification: + "ko = (makeObject :: notification) + \ ccorresG rf_sr \ dc xfdc + (pspace_aligned' and pspace_distinct' + and pspace_no_overlap' regionBase (objBits ko) + and ret_zero regionBase (2 ^ objBits ko) + and (\s. 2 ^ (objBits ko) \ gsMaxObjectSize s) + and K (regionBase \ 0 \ range_cover regionBase (objBits ko) (objBits ko) 1 + \ {regionBase..+ 2 ^ (objBits ko)} \ kernel_data_refs = {})) + ({s. region_actually_is_zero_bytes regionBase (2 ^ objBits ko) s}) + hs + (placeNewObject regionBase ko 0) + (global_htd_update (\_. (ptr_retyp (ntfn_Ptr regionBase))))" + apply (rule ccorres_from_vcg_nofail) + apply clarsimp + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: rf_sr_htd_safe) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate objBits_simps' + ptr_retyp_htd_safe_neg) + apply (rule bexI [OF _ placeNewObject_eq]) + apply (clarsimp simp: split_def) + apply (clarsimp simp: new_cap_addrs_def) + apply (cut_tac createObjects_ccorres_ntfn [where ptr=regionBase and n="1" and sz="objBitsKO (KONotification makeObject)"]) + apply (erule_tac x=\ in allE, erule_tac x=x in allE) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps' + elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp simp: word_bits_conv) + apply (clarsimp simp: range_cover.aligned objBits_simps) + apply (clarsimp simp: no_fail_def) + done + + +lemma htd_update_list_dom_better [rule_format]: + "(\p d. dom_s (htd_update_list p xs d) = + (dom_s d) \ dom_tll p xs)" + apply(induct_tac xs) + apply simp + apply clarsimp + apply(auto split: if_split_asm) + apply(erule notE) + apply(clarsimp simp: dom_s_def) + apply(case_tac y) + apply clarsimp+ + apply(clarsimp simp: dom_s_def) + done + +lemma ptr_array_retyps_htd_safe_neg: + "\ htd_safe D htd; {ptr_val ptr ..+ n * size_of TYPE('a :: mem_type)} \ D' = {}; -D \ D' \ + \ htd_safe D (ptr_arr_retyps n (ptr :: 'a ptr) htd)" + apply (simp add: htd_safe_def ptr_arr_retyps_def htd_update_list_dom_better) + apply (auto simp: dom_tll_def intvl_def) + done + +lemmas ptr_array_retyps_htd_safe_neg' = ptr_array_retyps_htd_safe_neg[OF _ _ subset_refl] + +lemma ccorres_placeNewObject_captable: + "ccorresG rf_sr \ dc xfdc + (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase (unat userSize + 5) + and (\s. 2 ^ (unat userSize + 5) \ gsMaxObjectSize s) + and ret_zero regionBase (2 ^ (unat userSize + 5)) + and K (regionBase \ 0 \ range_cover regionBase (unat userSize + 5) (unat userSize + 5) 1 + \ ({regionBase..+2 ^ (unat userSize + 5)} \ kernel_data_refs = {}))) + ({s. region_actually_is_zero_bytes regionBase (2 ^ (unat userSize + 5)) s}) + hs + (placeNewObject regionBase (makeObject :: cte) (unat (userSize::machine_word))) + (global_htd_update (\_. (ptr_arr_retyps (2 ^ (unat userSize)) (cte_Ptr regionBase))))" + apply (rule ccorres_from_vcg_nofail) + apply clarsimp + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: rf_sr_htd_safe) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate + ptr_array_retyps_htd_safe_neg size_of_def power_add) + apply (frule range_cover_rel[where sbit' = 5]) + apply simp + apply simp + apply (frule range_cover.unat_of_nat_shift[where gbits = 5 , OF _ le_refl le_refl ]) + apply (subgoal_tac "region_is_bytes regionBase (2 ^ (unat userSize + 5)) x") + apply (rule bexI [OF _ placeNewObject_eq]) + apply (clarsimp simp: split_def new_cap_addrs_def) + apply (cut_tac createObjects_ccorres_cte [where ptr=regionBase and n="2 ^ unat userSize" and sz="unat userSize + objBitsKO (KOCTE makeObject)"]) + apply (erule_tac x=\ in allE, erule_tac x=x in allE) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ + apply (clarsimp simp: split_def objBitsKO_def rf_sr_def split_def Let_def cteSizeBits_def + new_cap_addrs_def field_simps power_add ptr_retyps_gen_def + elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp simp: word_bits_conv range_cover_def) + apply (clarsimp simp: objBitsKO_def objBits_simps' range_cover.aligned) + apply (clarsimp simp: no_fail_def) + apply (simp add: region_actually_is_bytes) + done + +lemma rf_sr_helper: + "\a b P X. ((a, globals_update P (b\tcb_' := X\)) \ rf_sr) = ((a, globals_update P b) \ rf_sr)" + apply (clarsimp simp: rf_sr_def) + done + +declare replicate_numeral [simp del] + +definition + array_updates_rev :: "(nat \ 'a) list \ 'a::c_type['b::finite] \ 'a['b]" +where + "array_updates_rev \ foldr (\(i,v) a. Arrays.update a i v)" + +lemma array_updates_rev: + "array_updates_rev upds arr = array_updates arr (fold (#) upds [])" + by (auto simp: array_updates_rev_def array_updates_def rev_conv_fold[symmetric] + foldl_conv_foldr + intro: foldr_cong[OF refl refl]) + +lemma array_updates_rev': + "array_updates arr upds = array_updates_rev (fold (#) upds []) arr" + by (auto simp: array_updates_rev_def array_updates_def rev_conv_fold[symmetric] + foldl_conv_foldr + intro: foldr_cong[OF refl refl]) + +lemma Arrays_udpate_array_updates_rev: + "Arrays.update a i v = array_updates_rev [(i,v)] a" + by (simp add: array_updates_rev_def) + +lemma array_updates_rev_app: + "array_updates_rev upds1 (array_updates_rev upds2 a) = array_updates_rev (upds1 @ upds2) a" + by (simp add: array_updates_rev_def) + +lemma Arch_initContext_spec': + shows + "\s\<^sub>0. \ \ + {t. t = s\<^sub>0 \ t \\<^sub>c context_' t } + Call Arch_initContext_'proc + {t. t = globals_update + (t_hrs_'_update + (hrs_mem_update + (heap_update (registers_Ptr &(context_' s\<^sub>0\[''registers_C''])) + (array_updates (h_val (hrs_mem (t_hrs_' (globals s\<^sub>0))) + (registers_Ptr &(context_' s\<^sub>0\[''registers_C'']))) + initContext_registers)))) s\<^sub>0}" + unfolding initContext_registers_def + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (simp add: C_register_defs Arrays_udpate_array_updates_rev) + apply (rule allI, rule conseqPre) + apply (rule hoarep.Catch[rotated], vcg) + apply (rule conseqPost[where A'="{}" and Q'=Q and Q=Q for Q, simplified]) + apply ((vcg, + clarsimp simp: hrs_mem_update_compose h_val_id packed_heap_update_collapse + array_updates_rev_app))+ + apply (auto simp: h_val_heap_same_hrs_mem_update_typ_disj[OF h_t_valid_c_guard_field _ tag_disj_via_td_name] + export_tag_adjust_ti typ_uinfo_t_def array_updates_rev + cong: Kernel_C.globals.unfold_congs StateSpace.state.unfold_congs) + done + +lemma ccorres_placeNewObject_tcb: + "ccorresG rf_sr \ dc xfdc + (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase tcbBlockSizeBits + and (\s. sym_refs (state_refs_of' s)) + and (\s. 2 ^ tcbBlockSizeBits \ gsMaxObjectSize s) + and ret_zero regionBase (2 ^ tcbBlockSizeBits) + and K (regionBase \ 0 \ range_cover regionBase tcbBlockSizeBits tcbBlockSizeBits 1 + \ {regionBase..+2^tcbBlockSizeBits} \ kernel_data_refs = {})) + ({s. region_actually_is_zero_bytes regionBase (2^tcbBlockSizeBits) s}) + hs + (placeNewObject regionBase (makeObject :: tcb) 0) + (\tcb :== tcb_Ptr (regionBase + 0x400);; + (global_htd_update (\s. ptr_retyp (Ptr (ptr_val (tcb_' s) - ctcb_offset) :: (cte_C[5]) ptr) + \ ptr_retyp (tcb_' s)));; + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \tcb\ + (call (\s. s\context_' := Ptr &((Ptr &(tcb_' s\[''tcbArch_C'']) :: arch_tcb_C ptr)\[''tcbContext_C''])\) Arch_initContext_'proc (\s t. s\globals := globals t\) (\s' s''. Basic (\s. s))));; + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \tcb\ + (Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update (heap_update (Ptr &((tcb_' s)\[''tcbTimeSlice_C''])) (5::machine_word)))) s))))" +proof - + let ?offs = "0x400" \ \2 ^ (tcbBlockSizeBits - 1)\ + + show ?thesis + apply (simp add: placeNewObject_eq) + apply (rule ccorres_from_vcg_nofail) + apply clarsimp + apply (rule conseqPre, vcg exspec=Arch_initContext_spec') + apply (clarsimp simp: rf_sr_htd_safe ctcb_offset_defs cong: conj_cong) + apply (subgoal_tac "c_guard (tcb_Ptr (regionBase + ?offs))") + prefer 2 + apply (rule c_guard_tcb; + clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs range_cover.aligned) + apply (clarsimp simp: tcbBlockSizeBits_def) + apply (subgoal_tac "hrs_htd (hrs_htd_update (ptr_retyp (Ptr regionBase :: (cte_C[5]) ptr) + \ ptr_retyp (tcb_Ptr (regionBase + ?offs))) + (t_hrs_' (globals x))) \\<^sub>t tcb_Ptr (regionBase + ?offs)") + prefer 2 + apply (clarsimp simp: hrs_htd_update) + apply (rule h_t_valid_ptr_retyps_gen_disjoint + [where n=1 and arr=False, unfolded ptr_retyps_gen_def, simplified]) + apply (rule ptr_retyp_h_t_valid) + apply simp + apply (rule tcb_ptr_orth_cte_ptrs') + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate) + apply (intro ptr_retyps_htd_safe_neg ptr_retyp_htd_safe_neg, simp_all add: size_of_def)[1] + apply (erule disjoint_subset[rotated]) + apply (rule intvl_sub_offset, simp add: objBits_defs) + apply (erule disjoint_subset[rotated], + simp add: intvl_start_le size_td_array cte_C_size objBits_defs) + apply (clarsimp simp: hrs_htd_update) + apply (rule h_t_valid_field[rotated], simp+)+ + apply (clarsimp simp: hrs_htd_update) + apply (rule bexI[OF _ placeNewObject_eq]; + clarsimp simp: hrs_htd_update word_bits_def no_fail_def objBitsKO_def + range_cover.aligned new_cap_addrs_def tcbBlockSizeBits_def) + apply (cut_tac \=\ and x=x and ks="ksPSpace \" and p="tcb_Ptr (regionBase + ctcb_offset)" + in cnc_tcb_helper; + clarsimp simp: ctcb_ptr_to_tcb_ptr_def objBitsKO_def range_cover.aligned tcbBlockSizeBits_def) + apply (frule region_actually_is_bytes; clarsimp simp: region_is_bytes'_def) + apply (clarsimp simp: hrs_mem_def) + by (clarsimp simp: ctcb_offset_defs rf_sr_def ptr_retyps_gen_def heap_updates_def + hrs_mem_update_compose + cong: Kernel_C.globals.unfold_congs StateSpace.state.unfold_congs + kernel_state.unfold_congs) +qed + +lemma placeNewObject_pte_pt: + "ccorresG rf_sr \ dc xfdc + (valid_global_refs' and pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase pageBits + and (\s. 2 ^ pageBits \ gsMaxObjectSize s) + and ret_zero regionBase (2 ^ pageBits) + and K (regionBase \ 0 \ range_cover regionBase pageBits pageBits 1 + \ ({regionBase..+2 ^ pageBits} \ kernel_data_refs = {}))) + ({s. region_actually_is_zero_bytes regionBase (2 ^ pageBits) s}) + hs + (placeNewObject regionBase (makeObject :: pte) (ptTranslationBits NormalPT_T)) + (global_htd_update (\_. (ptr_retyp (pt_Ptr regionBase))))" + apply (rule ccorres_from_vcg_nofail) + apply clarsimp + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: rf_sr_htd_safe) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate + ptr_retyp_htd_safe_neg pageBits_def) + apply (frule range_cover_rel[where sbit' = 3]) + apply ((simp add: pageBits_def)+)[3] + apply (frule range_cover.unat_of_nat_shift[where gbits = 3 ]) + apply (simp add: pageBits_def)+ + apply (rule le_refl) + apply (subgoal_tac "region_is_bytes regionBase 4096 x") + apply (rule bexI [OF _ placeNewObject_eq]) + apply (clarsimp simp: split_def new_cap_addrs_def) + apply (cut_tac s=\ in createObjects_ccorres_pte_pt [where ptr=regionBase and sz=pageBits]) + apply (erule_tac x=\ in allE, erule_tac x=x in allE) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: objBitsKO_def archObjSize_def + rf_sr_def split_def Let_def ptr_retyps_gen_def + new_cap_addrs_def field_simps power_add + cong: globals.unfold_congs) + apply (simp add: Int_ac bit_simps) + apply (clarsimp simp: word_bits_conv range_cover_def archObjSize_def bit_simps) + apply (clarsimp simp: objBitsKO_def range_cover.aligned archObjSize_def bit_simps) + apply (clarsimp simp: no_fail_def) + apply (simp add: region_actually_is_bytes bit_simps) + done + +lemma placeNewObject_pte_vs: + "ccorresG rf_sr \ dc xfdc + (valid_global_refs' and pspace_aligned' and pspace_distinct' + and pspace_no_overlap' regionBase (pt_bits VSRootPT_T) + and (\s. 2 ^ pt_bits VSRootPT_T \ gsMaxObjectSize s) + and ret_zero regionBase (2 ^ pt_bits VSRootPT_T) + and K (regionBase \ 0 \ range_cover regionBase (pt_bits VSRootPT_T) (pt_bits VSRootPT_T) 1 + \ ({regionBase..+2 ^ pt_bits VSRootPT_T} \ kernel_data_refs = {}))) + ({s. region_actually_is_zero_bytes regionBase (2 ^ pt_bits VSRootPT_T) s}) + hs + (placeNewObject regionBase (makeObject :: pte) (ptTranslationBits VSRootPT_T)) + (global_htd_update (\_. (ptr_retyp (vs_Ptr regionBase))))" + apply (rule ccorres_from_vcg_nofail) + apply clarsimp + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: rf_sr_htd_safe) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate) + apply (erule ptr_retyp_htd_safe_neg) + apply (simp add: pt_bits_def table_size_def power_add ptTranslationBits_vs_array_len) + apply (simp add: bit_simps) + apply simp + apply (frule range_cover_rel[where sbit' = pte_bits]) + apply (simp add: bit_simps split: if_split) + apply (rule refl) + apply (frule range_cover.unat_of_nat_shift[where gbits = 3 ]) + apply (simp add: bit_simps) + apply (rule le_refl) + apply (subgoal_tac "region_is_bytes regionBase (2 ^ pt_bits VSRootPT_T) x") + apply (rule bexI [OF _ placeNewObject_eq]) + apply (clarsimp simp: split_def new_cap_addrs_def) + apply (cut_tac s=\ in createObjects_ccorres_pte_vs[where ptr=regionBase and sz="pt_bits VSRootPT_T"]) + apply (erule_tac x=\ in allE, erule_tac x=x in allE) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: objBitsKO_def archObjSize_def pt_bits_minus_pte_bits + rf_sr_def split_def Let_def ptr_retyps_gen_def + new_cap_addrs_def field_simps power_add + cong: globals.unfold_congs) + apply (simp add: Int_ac) + apply (clarsimp simp: word_bits_conv range_cover_def archObjSize_def) + apply (clarsimp simp: objBitsKO_def range_cover.aligned archObjSize_def + ptTranslationBits_plus_pte_bits) + apply (clarsimp simp: no_fail_def) + apply (simp add: region_actually_is_bytes bit_simps) + done + +(* FIXME move *) +lemma dom_disj_union: + "dom (\x. if P x \ Q x then Some (G x) else None) = dom (\x. if P x then Some (G x) else None) + \ dom (\x. if Q x then Some (G x) else None)" + by (auto split:if_splits) + +lemma createObjects_ccorres_user_data_device: + defines "ko \ KOUserDataDevice" + shows "\\ x. (\, x) \ rf_sr \ range_cover ptr sz (gbits + pageBits) n + \ ptr \ 0 + \ pspace_aligned' \ \ pspace_distinct' \ + \ pspace_no_overlap' ptr sz \ + \ ret_zero ptr (n * 2 ^ (gbits + pageBits)) \ + \ region_is_bytes ptr (n * 2 ^ (gbits + pageBits)) x + \ {ptr ..+ n * (2 ^ (gbits + pageBits))} \ kernel_data_refs = {} + \ + (\\ksPSpace := + foldr (\addr. data_map_insert addr KOUserDataDevice) (new_cap_addrs (n * 2^gbits) ptr KOUserDataDevice) (ksPSpace \)\, + x\globals := globals x\t_hrs_' := + hrs_htd_update + (ptr_retyps_gen (n * 2 ^ gbits) (Ptr ptr :: user_data_device_C ptr) arr) + (t_hrs_' (globals x))\ \) \ rf_sr" + (is "\\ x. ?P \ x \ + (\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr") +proof (intro impI allI) + fix \ x + let ?thesis = "(\\ksPSpace := ?ks \\, x\globals := globals x\t_hrs_' := ?ks' x\\) \ rf_sr" + let ?ks = "?ks \" + let ?ks' = "?ks' x" + let ?ptr = "Ptr ptr :: user_data_device_C ptr" + + note Kernel_C.user_data_C_size [simp del] + + assume "?P \ x" + hence rf: "(\, x) \ rf_sr" and al: "is_aligned ptr (gbits + pageBits)" + and ptr0: "ptr \ 0" + and sz: "gbits + pageBits \ sz" + and szb: "sz < word_bits" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and pno: "pspace_no_overlap' ptr sz \" + and rzo: "ret_zero ptr (n * 2 ^ (gbits + pageBits)) \" + and empty: "region_is_bytes ptr (n * 2 ^ (gbits + pageBits)) x" + and rc: "range_cover ptr sz (gbits + pageBits) n" + and rc': "range_cover ptr sz (objBitsKO ko) (n * 2^ gbits)" + and kdr: "{ptr..+n * 2 ^ (gbits + pageBits)} \ kernel_data_refs = {}" + by (auto simp: range_cover.aligned objBits_simps ko_def + range_cover_rel[where sbit' = pageBits] + range_cover.sz[where 'a=machine_word_len, folded word_bits_def]) + + + hence al': "is_aligned ptr (objBitsKO ko)" + by (clarsimp dest!:is_aligned_weaken range_cover.aligned) + + note range_cover.no_overflow_n[OF rc'] + hence sz_word_bits: + "n * 2 ^ gbits * size_of TYPE(user_data_device_C) < 2 ^ word_bits" + by (simp add:word_bits_def objBits_simps ko_def pageBits_def) + + (* This is a hack *) + have mko: "\dev. makeObjectKO True (Inr object_type.SmallPageObject) = Some ko" + by (simp add: makeObjectKO_def ko_def) + + from sz have "3 \ sz" by (simp add: objBits_simps pageBits_def ko_def) + + hence sz2: "2 ^ (sz - 3) * 8 = (2 :: nat) ^ sz" + apply (subgoal_tac "(8 :: nat) = 2 ^ 3") + apply (erule ssubst) + apply (subst power_add [symmetric]) + apply (rule arg_cong [where f = "\n. 2 ^ n"]) + apply simp + apply simp + done + + have p2dist: "n * (2::nat) ^ (gbits + pageBits) = n * 2 ^ gbits * 2 ^ pageBits" (is "?lhs = ?rhs") + by (simp add:monoid_mult_class.power_add) + + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + (* /obj specific *) + + (* s/obj/obj'/ *) + + have szo: "size_of TYPE(user_data_device_C) = 2 ^ objBitsKO ko" + by (simp add: size_of_def objBits_simps archObjSize_def ko_def pageBits_def) + have szo': "n * 2 ^ (gbits + pageBits) = n * 2 ^ gbits * size_of TYPE(user_data_device_C)" using sz + apply (subst szo) + apply (clarsimp simp: power_add[symmetric] objBits_simps ko_def) + done + + have rb': "region_is_bytes ptr (n * 2 ^ gbits * 2 ^ objBitsKO ko) x" + using empty + by (simp add: mult.commute mult.left_commute power_add objBits_simps ko_def) + + from rb' have rbu: "region_is_bytes ptr (n * 2 ^ gbits * size_of TYPE(user_data_device_C)) x" + by (simp add:szo[symmetric]) + + note rl' = clift_ptr_retyps_gen_other[where p = "Ptr ptr",simplified, OF rbu sz_word_bits] + + (* rest is generic *) + + note rl = projectKO_opt_retyp_other [OF rc' pal pno,unfolded ko_def] + note cterl = retype_ctes_helper[OF pal pdst pno al' range_cover.sz(2)[OF rc'] range_cover.sz(1)[OF rc', folded word_bits_def] mko rc'] + note ht_rl = clift_eq_h_t_valid_eq[OF rl', OF tag_disj_via_td_name, simplified] + + have guard: + "\txa v y. + \ heap_to_device_data + (\x. if x \ set (new_cap_addrs (n*2^gbits) ptr KOUserDataDevice) + then Some KOUserData else ksPSpace \ x) + (underlying_memory (ksMachineState \)) xa = + Some v; xa \ set (new_cap_addrs (n*2^gbits) ptr KOUserDataDevice); + heap_to_device_data (ksPSpace \) (underlying_memory (ksMachineState \)) xa = Some y \ \ y = v" + using range_cover_intvl[OF rc] + by (clarsimp simp add: heap_to_device_data_def Let_def sz2 + byte_to_word_heap_def[abs_def] map_comp_Some_iff projectKOs) + + note ptr_retyps_valid = h_t_valid_ptr_retyps_gen_same[OF guard rbu,unfolded addr_card_wb,OF _ sz_word_bits,simplified] + + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals x))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + + hence "cpspace_relation ?ks (underlying_memory (ksMachineState \)) ?ks'" + unfolding cpspace_relation_def + using empty rc' szo + supply if_cong[cong] + apply - + apply (clarsimp simp: rl' tag_disj_via_td_name cte_C_size ht_rl + clift_ptr_retyps_gen_other + foldr_upd_app_if [folded data_map_insert_def]) + apply (simp add: rl ko_def projectKOs p2dist + cterl[unfolded ko_def]) + apply (rule cmap_relationI) + apply (clarsimp simp: dom_heap_to_device_data cmap_relation_def dom_if image_Un + projectKO_opt_retyp_same projectKOs liftt_if[folded hrs_mem_def hrs_htd_def] + hrs_htd_update hrs_mem_update ptr_retyps_valid dom_disj_union + simp flip: ptr_add_to_new_cap_addrs) + apply (simp add: heap_to_device_data_def cuser_user_data_device_relation_def) + done (* dont need to track all the device memory *) + + thus ?thesis using rf empty kdr rzo + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name ) + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (simp add: tag_disj_via_td_name rl' tcb_C_size h_t_valid_clift_Some_iff) + apply (clarsimp simp: hrs_htd_update szo'[symmetric] cvariable_array_ptr_retyps[OF szo] rb') + apply (subst zero_ranges_ptr_retyps, simp_all only: szo'[symmetric] power_add, simp) + apply (simp add:szo p2dist objBits_simps ko_def ptr_retyps_htd_safe_neg + kernel_data_refs_domain_eq_rotate + rl foldr_upd_app_if [folded data_map_insert_def] + projectKOs cvariable_array_ptr_retyps) + apply (subst cvariable_array_ptr_retyps[OF szo]) + apply (simp add: rb' ptr_retyps_htd_safe_neg)+ + apply (erule ptr_retyps_htd_safe_neg; simp add: pageBits_def field_simps) + done +qed + +lemma placeNewObject_user_data: + "ccorresG rf_sr \ dc xfdc + (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase (pageBits+us) + and valid_machine_state' + and ret_zero regionBase (2 ^ (pageBits+us)) + and (\s. sym_refs (state_refs_of' s)) + and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) + and K (regionBase \ 0 \ range_cover regionBase (pageBits + us) (pageBits+us) (Suc 0) + \ us < word_bits + \ {regionBase..+2^(pageBits + us)} \ kernel_data_refs = {})) + ({s. region_actually_is_zero_bytes regionBase (2^(pageBits+us)) s}) + hs + (placeNewObject regionBase UserData us) + (global_htd_update (\s. (ptr_retyps (2^us) (Ptr regionBase :: user_data_C ptr))))" + apply (rule ccorres_from_vcg_nofail) + apply (clarsimp simp:) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: rf_sr_htd_safe) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate + ptr_retyps_htd_safe_neg[where arr=False, unfolded ptr_retyps_gen_def, simplified] + size_of_def pageBits_def power_add mult.commute mult.left_commute) + apply (frule range_cover.unat_of_nat_shift[where gbits = "pageBits + us"]) + apply simp + apply (clarsimp simp:size_of_def power_add pageBits_def + rf_sr_def cstate_relation_def Let_def field_simps) + apply blast + apply (frule range_cover.aligned) + apply (rule bexI [OF _ placeNewObject_eq], simp_all) + apply (cut_tac ptr=regionBase and sz="pageBits + us" and gbits=us and arr=False + in createObjects_ccorres_user_data[rule_format]) + apply (rule conjI, assumption, clarsimp) + apply (fastforce simp: pageBits_def field_simps region_actually_is_bytes) + apply (clarsimp elim!: rsubst[where P="\x. (\, x) \ rf_sr" for \] + simp: field_simps objBitsKO_def ptr_retyps_gen_def) + apply (simp add: objBitsKO_def field_simps) + apply (rule no_fail_pre, rule no_fail_placeNewObject) + apply (clarsimp simp: objBitsKO_def) + done + +definition + createObject_hs_preconds :: "machine_word \ ArchTypes_H.object_type \ nat \ bool \ kernel_state \ bool" +where + "createObject_hs_preconds regionBase newType userSize d \ + (invs' and pspace_no_overlap' regionBase (getObjectSize newType userSize) + and caps_overlap_reserved' {regionBase ..+ 2 ^ (getObjectSize newType userSize)} + and (\s. 2 ^ (getObjectSize newType userSize) \ gsMaxObjectSize s) + and K(regionBase \ 0 \ canonical_address regionBase + \ ({regionBase..+2 ^ (getObjectSize newType userSize)} \ kernel_data_refs = {}) + \ range_cover regionBase (getObjectSize newType userSize) (getObjectSize newType userSize) (Suc 0) + \ (newType = APIObjectType apiobject_type.Untyped \ userSize \ maxUntypedSizeBits) + \ (newType = APIObjectType apiobject_type.CapTableObject \ userSize < 59) + \ (newType = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ userSize) + \ (newType = APIObjectType apiobject_type.CapTableObject \ 0 < userSize) + \ (d \ newType = APIObjectType apiobject_type.Untyped \ isFrameType newType) + ))" + +abbreviation + "region_actually_is_dev_bytes ptr len devMem s + \ region_actually_is_bytes ptr len s + \ (\ devMem \ heap_list_is_zero (hrs_mem (t_hrs_' (globals s))) ptr len)" + +(* these preconds actually used throughout the proof *) +abbreviation(input) + createObject_c_preconds1 :: "machine_word \ ArchTypes_H.object_type \ nat \ bool \ (globals myvars) set" +where + "createObject_c_preconds1 regionBase newType userSize deviceMemory \ + {s. region_actually_is_dev_bytes regionBase (2 ^ getObjectSize newType userSize) deviceMemory s}" + +(* these preconds used at start of proof *) +definition + createObject_c_preconds :: "machine_word \ ArchTypes_H.object_type \ nat \ bool \ (globals myvars) set" +where + "createObject_c_preconds regionBase newType userSize deviceMemory \ + (createObject_c_preconds1 regionBase newType userSize deviceMemory + \ {s. object_type_from_H newType = t_' s} + \ {s. Ptr regionBase = regionBase_' s} + \ {s. unat (scast (userSize_' s) :: machine_word) = userSize} + \ {s. deviceMemory_' s = from_bool deviceMemory} + )" + +lemma ccorres_apiType_split: + "\ apiType = apiobject_type.Untyped \ ccorres rr xf P1 P1' hs X Y; + apiType = apiobject_type.TCBObject \ ccorres rr xf P2 P2' hs X Y; + apiType = apiobject_type.EndpointObject \ ccorres rr xf P3 P3' hs X Y; + apiType = apiobject_type.NotificationObject \ ccorres rr xf P4 P4' hs X Y; + apiType = apiobject_type.CapTableObject \ ccorres rr xf P5 P5' hs X Y + \ \ ccorres rr xf + ((\s. apiType = apiobject_type.Untyped \ P1 s) + and (\s. apiType = apiobject_type.TCBObject \ P2 s) + and (\s. apiType = apiobject_type.EndpointObject \ P3 s) + and (\s. apiType = apiobject_type.NotificationObject \ P4 s) + and (\s. apiType = apiobject_type.CapTableObject \ P5 s)) + ({s. apiType = apiobject_type.Untyped \ s \ P1'} + \ {s. apiType = apiobject_type.TCBObject \ s \ P2'} + \ {s. apiType = apiobject_type.EndpointObject \ s \ P3'} + \ {s. apiType = apiobject_type.NotificationObject \ s \ P4'} + \ {s. apiType = apiobject_type.CapTableObject \ s \ P5'}) + hs X Y" + apply (case_tac apiType, simp_all) + done + +lemma range_cover_simpleI: + "\ is_aligned (ptr :: 'a :: len word) a; a < len_of TYPE('a); c = Suc 0 \ + \ range_cover ptr a a c" + apply (clarsimp simp: range_cover_def) + apply (metis shiftr_0 is_aligned_mask unat_0) + done + +lemma range_coverI: + "\is_aligned (ptr :: 'a :: len word) a; b \ a; a < len_of TYPE('a); + c \ 2 ^ (a - b)\ + \ range_cover ptr a b c" + apply (clarsimp simp: range_cover_def field_simps) + apply (rule conjI) + apply (erule(1) is_aligned_weaken) + apply (subst mask_zero, simp) + apply simp + done + +(* FIXME: with the current state of affairs, we could simplify gs_new_frames *) +lemma gsUserPages_update_ccorres: + "ccorresG rf_sr G dc xf (\_. sz = pageBitsForSize pgsz) UNIV hs + (modify (gsUserPages_update (\m a. if a = ptr then Some pgsz else m a))) + (Basic (globals_update (ghost'state_'_update + (gs_new_frames pgsz ptr sz))))" + apply (rule ccorres_from_vcg) + apply vcg_step + apply (clarsimp simp: split_def simpler_modify_def gs_new_frames_def) + apply (case_tac "ghost'state_' (globals x)") + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def fun_upd_def + carch_state_relation_def cmachine_state_relation_def + ghost_size_rel_def ghost_assertion_data_get_def + cong: if_cong) + done + +lemma placeNewObject_user_data_device: + "ccorresG rf_sr \ dc xfdc + (pspace_aligned' and pspace_distinct' + and ret_zero regionBase (2 ^ (pageBits + us)) + and pspace_no_overlap' regionBase (pageBits+us) + and (\s. sym_refs (state_refs_of' s)) + and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) + and K (regionBase \ 0 \ range_cover regionBase (pageBits + us) (pageBits+us) (Suc 0) + \ {regionBase..+2^(pageBits + us)} \ kernel_data_refs = {})) + ({s. region_actually_is_bytes regionBase (2^(pageBits+us)) s}) + hs + (placeNewObject regionBase UserDataDevice us ) + (global_htd_update (\s. (ptr_retyps (2^us) (Ptr regionBase :: user_data_device_C ptr))))" + apply (rule ccorres_from_vcg_nofail) + apply (clarsimp simp:) + apply (rule conseqPre) + apply vcg + apply (clarsimp simp: rf_sr_htd_safe) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + kernel_data_refs_domain_eq_rotate + ptr_retyps_htd_safe_neg[where arr=False, unfolded ptr_retyps_gen_def, simplified] + size_of_def pageBits_def power_add mult.commute mult.left_commute) + apply (frule range_cover.unat_of_nat_shift[where gbits = "pageBits + us"]) + apply simp + apply (clarsimp simp:size_of_def power_add pageBits_def + rf_sr_def cstate_relation_def Let_def field_simps) + apply blast + apply (frule range_cover.aligned) + apply (frule range_cover.sz(1), fold word_bits_def) + apply (rule bexI [OF _ placeNewObject_eq], simp_all) + apply (cut_tac ptr=regionBase and sz="pageBits + us" and gbits=us and arr=False + in createObjects_ccorres_user_data_device[rule_format]) + apply (rule conjI, assumption, clarsimp) + apply (fastforce simp: pageBits_def field_simps region_actually_is_bytes) + apply (clarsimp elim!: rsubst[where P="\x. (\, x) \ rf_sr" for \] + simp: field_simps objBitsKO_def ptr_retyps_gen_def) + apply (simp add: objBitsKO_def field_simps) + apply (rule no_fail_pre, rule no_fail_placeNewObject) + apply (clarsimp simp: objBitsKO_def) + done + +lemma gsUserPages_update: + "\f. (\s. s\gsUserPages := f(gsUserPages s)\) = gsUserPages_update f" + by (rule ext) simp + +lemma modify_gsUserPages_update: + "modify (\s. s\gsUserPages := f(gsUserPages s)\) = modify (gsUserPages_update f)" + by (simp only: gsUserPages_update) + +method arch_create_data_obj_corres_helper = + (match conclusion in "ccorres ?rel ?var ?P ?P' ?hs + (AARCH64_H.createObject object_type.SmallPageObject ?regionBase sz ?deviceMemory) + (Call Arch_createObject_'proc) + " for sz \ \(simp add: toAPIType_def AARCH64_H.createObject_def + placeNewDataObject_def bind_assoc + ),subst gsUserPages_update,((rule ccorres_gen_asm)+) \) + +lemma placeNewDataObject_ccorres: + "ccorresG rf_sr \ dc xfdc + (createObject_hs_preconds regionBase newType us devMem + and K (APIType_capBits newType us = pageBits + us)) + ({s. region_actually_is_bytes regionBase (2 ^ (pageBits + us)) s + \ (\ devMem \ heap_list_is_zero (hrs_mem (t_hrs_' (globals s))) regionBase + (2 ^ (pageBits + us)))}) + hs + (placeNewDataObject regionBase us devMem) + (Cond {s. devMem} + (global_htd_update (\s. (ptr_retyps (2^us) (Ptr regionBase :: user_data_device_C ptr)))) + (global_htd_update (\s. (ptr_retyps (2^us) (Ptr regionBase :: user_data_C ptr)))) + )" + apply (cases devMem) + apply (simp add: placeNewDataObject_def ccorres_cond_univ_iff) + apply (rule ccorres_guard_imp, rule placeNewObject_user_data_device, simp_all) + apply (clarsimp simp: createObject_hs_preconds_def invs'_def + valid_state'_def valid_pspace'_def) + apply (simp add: placeNewDataObject_def ccorres_cond_empty_iff) + apply (rule ccorres_guard_imp, rule placeNewObject_user_data, simp_all) + apply (clarsimp simp: createObject_hs_preconds_def invs'_def + valid_state'_def valid_pspace'_def) + apply (frule range_cover.sz(1), simp add: word_bits_def) + done + +lemma cond_second_eq_seq_ccorres: + "ccorres_underlying sr Gamm r xf arrel axf G G' hs m + (Cond P (a ;; c) (b ;; c) ;; d) + = ccorres_underlying sr Gamm r xf arrel axf G G' hs m + (Cond P a b ;; c ;; d)" + apply (rule ccorres_semantic_equiv) + apply (rule semantic_equivI) + apply (auto elim!: exec_Normal_elim_cases intro: exec.Seq exec.CondTrue exec.CondFalse) + done + +lemma cvariable_array_ptr_kill: + "cvariable_array_map_relation m ns ptrfun htd + \ cvariable_array_map_relation (m(x := None)) + ns (ptrfun :: _ \ ('b :: mem_type) ptr) htd" + by (clarsimp simp: cvariable_array_map_relation_def + split: if_split) + +(* FIXME AARCH64 copied from IsolatedThreadAction: should Retype import IsolatedThreadAction? *) +lemma setObject_modify: + fixes v :: "'a :: pspace_storable" shows + "\ obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v \ + \ setObject p v s + = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (clarsimp simp: setObject_def split_def exec_gets obj_at'_def lookupAround2_known1 + assert_opt_def updateObject_default_def bind_assoc) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (clarsimp simp only: objBitsT_koTypeOf[symmetric] koTypeOf_injectKO) + apply (frule(2) in_magnitude_check[where s'=s]) + apply (simp add: magnitudeCheck_assert in_monad) + apply (simp add: simpler_modify_def) + done + +(* FIXME move, depends on setObject_modify which lives in IsolatedThreadAction *) +(* FIXME would be interesting to generalise this kind of lemma *) +lemma monadic_rewrite_setObject_vcpu_modify: + fixes vcpu vcpupre :: vcpu + shows "monadic_rewrite True False (vcpu_at' v) + (setObject v vcpu) + (modify (ksPSpace_update (\ps. ps(v \ injectKOS vcpu))))" + by (clarsimp simp: monadic_rewrite_def setObject_modify objBits_simps + vcpuBits_def typ_at_to_obj_at_arches) + +(* FIXME move, depends on setObject_modify which lives in IsolatedThreadAction *) +lemma monadic_rewrite_modify_setObject_vcpu: + fixes vcpu vcpupre :: vcpu + shows "monadic_rewrite True False (vcpu_at' v) + (modify (ksPSpace_update (\ps. ps(v \ injectKOS vcpu)))) + (setObject v vcpu)" + by (clarsimp simp: monadic_rewrite_def setObject_modify objBits_simps + vcpuBits_def typ_at_to_obj_at_arches) + +lemma monadic_rewrite_placeNewObject_vcpu_decompose: + fixes vcpu vcpupre :: vcpu + shows "monadic_rewrite True False \ + (placeNewObject v vcpu 0) + (do placeNewObject v vcpupre 0; + setObject v vcpu + od)" + apply clarsimp + apply (monadic_rewrite_r monadic_rewrite_modify_setObject_vcpu + \wpsimp wp: placeNewObject_object_at_vcpu\) + apply (clarsimp simp: placeNewObject_def placeNewObject'_def bind_assoc split_def) + apply (clarsimp simp: objBits_simps' archObjSize_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (rule monadic_rewrite_is_refl) + apply (rule ext) + apply (clarsimp simp: exec_modify) + apply (fastforce simp: simpler_modify_def comp_def) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift)+ + done + +(* FIXME would be interesting to generalise this *) +lemma monadic_rewrite_setObject_vcpu_twice: + fixes vcpu vcpupre :: vcpu + shows "monadic_rewrite True False (vcpu_at' v) + (setObject v vcpu) + (do setObject v vcpupre; + setObject v vcpu + od)" + supply fun_upd_apply[simp del] + apply simp + apply (monadic_rewrite_r monadic_rewrite_modify_setObject_vcpu) + apply (monadic_rewrite_r monadic_rewrite_modify_setObject_vcpu) + apply (monadic_rewrite_l monadic_rewrite_setObject_vcpu_modify) + apply (rule monadic_rewrite_is_refl) + apply (rule ext) + apply (clarsimp simp: exec_modify) + apply (fastforce simp: simpler_modify_def comp_def) + apply clarsimp + apply (clarsimp simp: vcpu_at_ko'_eq[symmetric] obj_at'_def fun_upd_apply + objBits_simps vcpuBits_def ps_clear_def + split: if_splits) + done + +(* The usual way we deal with this is in terms of default object construction, but when you + put in an arbitrary object that's not cte/tcb related, we don't need to care about putting + it in memory for cte_wp_at' *) +lemma cte_wp_at_retype'_not_cte_tcb: + fixes obj :: kernel_object + fixes s :: kernel_state + fixes addrs :: "addr list" + defines "s' \ s\ksPSpace := (\x. if x \ set addrs then Some obj else ksPSpace s x) \" + assumes pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' s'" "pspace_distinct' s'" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + and irrelko: "koTypeOf obj \ CTET" "koTypeOf obj \ TCBT" + shows "cte_wp_at' P p s' = cte_wp_at' P p s" +proof - + have not_tcb_cte: "\ko. obj \ KOTCB ko" "\ko. obj \ KOCTE ko" + using irrelko by (clarsimp simp: irrelko)+ + + show ?thesis + apply (subgoal_tac "\p \ set addrs. \(P :: cte \ bool). \ obj_at' P p s") + prefer 2 + apply (fastforce elim!: obj_atE' simp: pn) + apply (subgoal_tac "\p \ set addrs. \(P :: tcb \ bool). \ obj_at' P p s") + prefer 2 + apply (fastforce elim!: obj_atE' simp: pn) + apply (simp only: cte_wp_at_obj_cases_mask) + apply (clarsimp simp: s'_def) + apply (clarsimp simp: foldr_update_obj_at'[OF pv pv'[simplified s'_def]] al) + apply (clarsimp cong: if_cong split: if_split) + apply (clarsimp simp: projectKOs dom_def) + apply (case_tac obj; clarsimp simp: not_tcb_cte) + done +qed + +lemma ctes_of_retype_not_cte_tcb: + fixes obj :: kernel_object + fixes s :: kernel_state + fixes addrs :: "addr list" + defines "s' \ s\ksPSpace := (\x. if x \ set addrs then Some obj else ksPSpace s x) \" + assumes pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' s'" "pspace_distinct' s'" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + and irrelko: "koTypeOf obj \ CTET" "koTypeOf obj \ TCBT" + shows "map_to_ctes (\x. if x \ set addrs then Some obj else ksPSpace s x) + = (\x. map_to_ctes (ksPSpace s) x)" + (is "map_to_ctes ?ps' = ?map'") + using cte_wp_at_retype'_not_cte_tcb[where P="(=) cte" for cte, OF pv _ _ al pn] pv' irrelko + arg_cong [where f=Not, OF cte_wp_at_retype'_not_cte_tcb [OF pv _ _ al pn, where P="\"]] + apply simp + apply (simp add: s'_def) + apply (simp(no_asm_use) add: cte_wp_at_ctes_of cong: if_cong) + apply (rule ext) + apply (case_tac "map_to_ctes ?ps' x") + apply (simp(no_asm_simp)) + apply (drule_tac x=x in meta_spec)+ + apply fastforce + apply (simp split: if_splits) + done + +(* for when we do not have direct access to s *) +lemma magnitudeCheck_assert2': + "\ is_aligned x n; (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some v ; + ksPSpace (s::kernel_state) = ps \ \ + magnitudeCheck x (snd (lookupAround2 x ps)) n + = assert (ps_clear x n s)" + by (clarsimp simp: magnitudeCheck_assert2) + +(* VCPU when recast from memset 0 *) +abbreviation (input) fromzeroVCPU :: vcpu where + "fromzeroVCPU \ vcpu.VCPUObj None (VGICInterface 0 0 0 (\_. 0)) (const 0) (const False) + (VirtTimer 0)" + +lemma monadic_rewrite_setObject_vcpu_as_init: + defines "vcpu0 \ fromzeroVCPU" + defines "vcpu1 \ (vcpuRegs_update (\_. (vcpuRegs vcpu0)(VCPURegSCTLR := sctlrEL1VM)) vcpu0)" + defines "vcpu2 \ (vcpuVGIC_update (\_. vgicHCR_update (\_. vgicHCREN) (vcpuVGIC vcpu1)) vcpu1)" + shows + "monadic_rewrite True False (K (v \ 0) and ko_at' fromzeroVCPU v) + (setObject v makeVCPUObject) + (do vcpuWriteReg v VCPURegSCTLR sctlrEL1VM; + vgicUpdate v (vgicHCR_update (\_. vgicHCREN)); + vcpuUpdate v (\vcpu. vcpu\ vcpuVTimer := VirtTimer 0 \) + od)" + supply fun_upd_apply[simp del] + apply simp + apply (rule monadic_rewrite_gen_asm) + apply monadic_rewrite_pre + apply (simp add: vcpuWriteReg_def vgicUpdate_def bind_assoc) + apply (clarsimp simp: vcpuUpdate_def bind_assoc) + (* explicitly state the vcpu we are setting for each setObject by rewriting the getObject to a return *) + apply (rule monadic_rewrite_trans[rotated]) + apply (monadic_rewrite_symb_exec_r_known vcpu0) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_r_known vcpu1) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_r_known vcpu2) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getObject_vcpu_prop simp: vcpu2_def vcpu1_def vcpu0_def)+ + apply (wp setObject_sets_object_vcpu) + apply (wpsimp wp: getObject_vcpu_prop)+ + apply (wpsimp wp: getObject_vcpu_prop simp: vcpu2_def vcpu1_def vcpu0_def)+ + apply (wp setObject_sets_object_vcpu) + apply (wpsimp wp: getObject_vcpu_prop)+ + (* now we have four setObjects in a row, fold them up using setObject-combining *) + apply (monadic_rewrite_r_method \rule monadic_rewrite_setObject_vcpu_twice[simplified]\ wpsimp) + apply (monadic_rewrite_r_method \rule monadic_rewrite_setObject_vcpu_twice[simplified]\ wpsimp) + apply (rule monadic_rewrite_is_refl) + apply (fastforce simp: vcpu2_def vcpu1_def vcpu0_def makeVCPUObject_def) + apply (fastforce simp: vcpu0_def ko_at_vcpu_at'D) + done + +lemma ptr_retyp_fromzeroVCPU: + fixes p' :: "vcpu_C ptr" + defines "vcpu0 \ fromzeroVCPU" + defines "ko_vcpu \ KOArch (KOVCPU vcpu0)" + assumes "valid_global_refs' \" + assumes pal: "pspace_aligned' \" + assumes pdst: "pspace_distinct' \" + assumes pno: "pspace_no_overlap' p vcpuBits \" + assumes "2 ^ vcpuBits \ gsMaxObjectSize \" + assumes vo: "valid_objs' \" + assumes urz: "untyped_ranges_zero' \" + assumes cor: "caps_overlap_reserved' {p ..+ 2 ^ vcpuBits} \" + assumes ptr0: "p \ 0" + assumes kdr: "{p ..+ 2 ^ vcpuBits} \ kernel_data_refs = {}" + assumes subr: "{p ..+ 752} \ {p ..+ 2 ^ vcpuBits}" (is "{_ ..+ ?vcpusz} \ _") + assumes act_bytes: "region_actually_is_bytes p (2 ^ vcpuBits) \'" + assumes rep0: "heap_list (hrs_mem (t_hrs_' (globals \'))) (2 ^ vcpuBits) p = replicate (2 ^ vcpuBits) 0" + assumes "\ snd (placeNewObject p vcpu0 0 \)" + assumes cover: "range_cover p vcpuBits vcpuBits 1" + assumes al: "is_aligned p vcpuBits" + assumes sr: "(\, \') \ rf_sr" + shows "(\\ksPSpace := (ksPSpace \)(p \ ko_vcpu)\, + globals_update (t_hrs_'_update (hrs_htd_update (ptr_retyp (vcpu_Ptr p)))) \') + \ rf_sr" + (is "(\\ksPSpace := ?ks\, globals_update ?gs' \') \ rf_sr") +proof - + let ?gs = "?gs' (globals \')" + let ?s' = "\\ksPSpace := ?ks\" + let ?htdret = "(hrs_htd_update (ptr_retyp (vcpu_Ptr p)) (t_hrs_' (globals \')))" + let ?zeros = "from_bytes (replicate (size_of TYPE(vcpu_C)) 0) :: vcpu_C" + + (* sanity check for the value of ?vcpusz *) + have "size_of TYPE(vcpu_C) = ?vcpusz" + by simp + + have ptr_al: + "ptr_aligned (vcpu_Ptr p)" using al + by (auto simp: align_of_def vcpuBits_def pageBits_def + intro!: is_aligned_ptr_aligned[of _ 3] + elim!: is_aligned_weaken) + + have "c_null_guard (vcpu_Ptr p)" using ptr0 al + by (auto simp: c_null_guard_def vcpuBits_def pageBits_def vcpu_C_size + elim!: intvl_nowrap[where x = 0, simplified] is_aligned_no_wrap_le) + + have cguard: "c_guard (vcpu_Ptr p)" + using \ptr_aligned (vcpu_Ptr p)\ \c_null_guard (vcpu_Ptr p)\ + by (simp add: c_guard_def) + + have zro: + "zero_ranges_are_zero (gsUntypedZeroRanges \) (t_hrs_' (globals \'))" + using sr + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + + have retyp_p': "ptr_retyp (vcpu_Ptr p) (snd (t_hrs_' (globals \'))) \\<^sub>t (vcpu_Ptr p)" + using cguard by (rule ptr_retyp_h_t_valid) + + have clift_retyp_p': + "clift (hrs_mem (t_hrs_' (globals \')), ptr_retyp (vcpu_Ptr p) (snd (t_hrs_' (globals \')))) (vcpu_Ptr p) + = Some (from_bytes (replicate (size_of TYPE(vcpu_C)) 0))" + (is "?hl = ?rep0") + using retyp_p' rep0 + apply (simp add: lift_t_if h_val_def) + apply (subst take_heap_list_le[where k="?vcpusz" and n="2^vcpuBits", symmetric]) + apply (simp add: vcpuBits_def)+ + done + + have domain_kdr: + "-domain \ kernel_data_refs" + using sr unfolding rf_sr_def cstate_relation_def Let_def by simp + + have htd_safe: + "htd_safe domain (hrs_htd (t_hrs_' (globals \'))) + \ htd_safe domain (hrs_htd (t_hrs_' ?gs))" + using kdr + apply (simp add: hrs_htd_update) + apply (intro ptr_retyp_htd_safe_neg[OF _ _ domain_kdr], simp_all) + apply (erule disjoint_subset[rotated]) + apply (clarsimp; rule intvl_mem_weaken[where n="2 ^ vcpuBits - size_of (TYPE (vcpu_C))"]) + by (clarsimp simp: vcpuBits_def) + + have pks: "map_to_vcpus (ksPSpace \) p = None" + apply (rule map_comp_simps) + apply (rule pspace_no_overlap_base'[OF pal pno al, simplified]) + done + + note cmap_vcpus = cmap_relation_updI2[where am="map_to_vcpus (ksPSpace \)" + and cm="cslift \'" + and dest=p and f=vcpu_Ptr, + OF _ pks, simplified] + + have map_vcpus: + "cmap_relation (map_to_vcpus (ksPSpace \)) (cslift \') vcpu_Ptr cvcpu_relation + \ cmap_relation ((map_to_vcpus (ksPSpace \))(p \ vcpu0)) + ((cslift \')(vcpu_Ptr p \ ?zeros)) vcpu_Ptr cvcpu_relation" + apply (erule cmap_vcpus) + apply (simp add: vcpu0_def from_bytes_def) + apply (simp add: typ_info_simps vcpu_C_tag_def) + apply (simp add: ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td align_of_def padup_def + final_pad_def typ_info_simps align_td_array' Let_def size_of_def) + (* about a minute *) + apply (timeit \simp add: update_ti_adjust_ti update_ti_t_word32_0s update_ti_t_machine_word_0s + typ_info_simps gicVCpuIface_C_tag_def virq_C_tag_def + update_ti_t_ptr_0s ti_typ_pad_combine_empty_ti ti_typ_pad_combine_td + ti_typ_combine_empty_ti ti_typ_combine_td + align_of_def padup_def replicate_def update_ti_t_array_rep + final_pad_def size_td_lt_ti_typ_pad_combine Let_def size_of_def + align_td_array' size_td_array vTimer_C_tag_def\) + apply (clarsimp simp: cvcpu_relation_def cvcpu_regs_relation_def option_to_ctcb_ptr_def + cvgic_relation_def cvcpu_vppi_masked_relation_def virq_to_H_def) + apply (rule conjI, clarsimp) + (* regs_C array initialisation *) + using le_imp_less_Suc[OF maxBound_is_bound[where 'a=vcpureg, + simplified fromEnum_maxBound_vcpureg_def]] + apply (case_tac r; clarsimp simp: index_foldr_update) + apply (rule conjI, clarsimp) + (* vgic_C array initialisation *) + apply (subst index_fold_update; clarsimp) + (* vppi array initialisation *) + apply clarsimp + apply (case_tac vppi; clarsimp) + (* only one vppievent_irq constructor, safe to unfold *) + apply (clarsimp simp: fromEnum_def enum_vppievent_irq) + done + + have is_bytes: "region_is_bytes p (size_of TYPE(vcpu_C)) \'" + using region_actually_is_bytes[OF act_bytes] region_is_bytes_subset[OF _ subr] + by (simp add: vcpu_C_size) + + have zero_bytes [simplified]: + "heap_list (hrs_mem (t_hrs_' (globals \'))) (size_of TYPE(vcpu_C)) p + = replicate (size_of TYPE(vcpu_C)) 0" + apply (subst take_heap_list_le[where n="2^vcpuBits", symmetric]) + prefer 2 + apply (subst rep0) + apply (simp add: vcpuBits_def)+ + done + + have clift_retyp_p': + "clift (?htdret) (vcpu_Ptr p) = Some (from_bytes (replicate (size_of TYPE(vcpu_C)) 0))" + using cguard zero_bytes + by (case_tac "t_hrs_' (globals \')") + (auto simp: lift_t_if hrs_htd_update_def h_val_def hrs_mem_def ptr_retyp_h_t_valid) + + have cl_vcpu: + "(clift (?htdret) :: vcpu_C typ_heap) = (cslift \')(vcpu_Ptr p \ ?zeros)" + using cguard clift_retyp_p' + apply clarsimp + apply (rule ext, rename_tac p') + apply (case_tac "p' = vcpu_Ptr p", fastforce) + using clift_ptr_retyps_gen_prev_memset_same[ + where 'a=vcpu_C and arr=False and n=1 and hrs="(t_hrs_' (globals \'))", + simplified, OF cguard _ refl] is_bytes[simplified] + apply (simp add: word_bits_def zero_bytes ptr_retyps_gen_def) + done + + have zro': + "zero_ranges_are_zero (gsUntypedZeroRanges \) (t_hrs_' ?gs)" + using zro vo urz cor al + apply (clarsimp simp: hrs_htd_update typ_heap_simps') + apply (intro zero_ranges_ptr_retyps[where n=1 and arr=False, + simplified ptr_retyps_gen_def, simplified]) + apply simp_all + apply (erule caps_overlap_reserved'_subseteq; clarsimp) + apply (rule intvl_mem_weaken[where n="2 ^ vcpuBits - size_of (TYPE (vcpu_C))"]) + apply (clarsimp simp: vcpuBits_def) + done + + have cl_rest: + "\ typ_uinfo_t TYPE(vcpu_C) \\<^sub>t typ_uinfo_t TYPE('z); typ_uinfo_t TYPE('z) \ typ_uinfo_t TYPE(word8) \ + \ (clift ?htdret) = (cslift \' :: 'z::mem_type typ_heap)" + using cguard + apply (clarsimp simp: clift_heap_update_same hrs_htd_update + ptr_retyp_h_t_valid typ_heap_simps) + apply (subst ptr_retyp_gen_one[symmetric]) + apply (rule clift_ptr_retyps_gen_other) + using region_actually_is_bytes[OF act_bytes] region_is_bytes_subset[OF _ subr] + apply (auto simp: word_bits_def vcpuBits_def) + done + + have arel: "cte_array_relation \ ?gs \ tcb_cte_array_relation ?s' ?gs" + using sr + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + hrs_htd_update ko_vcpu_def map_comp_update) + apply (intro conjI cvariable_array_ptr_kill + cvariable_array_ptr_retyps[OF refl, where n=1 and arr=False, + simplified ptr_retyps_gen_def, simplified]) + using region_actually_is_bytes[OF act_bytes] region_is_bytes_subset[OF _ subr] + by auto + + note ht_rest = clift_eq_h_t_valid_eq[OF cl_rest, simplified ko_vcpu_def, simplified] + + have objBitsKO_vcpu: + "\v. objBitsKO (KOArch (KOVCPU v)) = vcpuBits" + by (simp add: objBitsKO_def archObjSize_def vcpuBits_def) + + have rl_vcpu: + "(projectKO_opt \\<^sub>m ((ksPSpace \)(p \ KOArch (KOVCPU vcpu0))) :: machine_word \ vcpu option) + = (projectKO_opt \\<^sub>m ksPSpace \)(p \ vcpu0)" + by (rule ext) + (clarsimp simp: projectKOs map_comp_def vcpu0_def split: if_split) + + have ctes: + "map_to_ctes ((ksPSpace \)(p \ KOArch (KOVCPU vcpu0))) = ctes_of \" + using pal pdst al pno + apply (clarsimp simp: fun_upd_def) + apply (frule (2) pspace_no_overlap_base') + apply (rule ctes_of_retype_not_cte_tcb[where addrs="[p]", simplified] + ; simp?) + apply (simp add: pspace_aligned'_def) + apply (clarsimp split: if_splits simp: objBitsKO_vcpu) + apply (erule_tac x=x in ballE; (simp add: dom_def)?) + apply (simp add: pspace_distinct'_def) + apply (clarsimp split: if_splits simp: objBitsKO_vcpu) + apply (subst ps_clear_ksPSpace_upd_same[simplified fun_upd_def]) + apply (rule ps_clear_entire_slotI) + apply (drule (1) pspace_no_overlap_disjoint'[where n=vcpuBits]) + apply fastforce + apply (clarsimp simp: ps_clear_def) + apply (subst dom_fun_upd[simplified fun_upd_def]) + apply (simp only: option.distinct if_False) + apply (subgoal_tac "({x..x + mask (objBitsKO y)} - {x}) \ {p} = {}") + apply fastforce + apply (frule (1) pspace_no_overlapD') + apply (erule disjoint_subset_both[rotated 2]) + apply (simp add: mask_def p_assoc_help) + apply fastforce + apply (clarsimp simp: objBitsKO_def archObjSize_def) + done + + have csrel: "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals \'))" + using sr unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + + have csrel': "cpspace_relation ?ks (underlying_memory (ksMachineState \)) (t_hrs_' ?gs)" + using csrel map_vcpus pno pal al unfolding cpspace_relation_def + apply (clarsimp simp: cl_rest ht_rest tag_disj_via_td_name) + apply (simp add: ctes rl ko_vcpu_def projectKOs rl_vcpu objBitsKO_vcpu) + apply (simp add: projectKO_opt_retyp_other' objBitsKO_vcpu cl_vcpu) + done + + from sr + have "pt_array_relation \ (globals \')" + by (simp add: rf_sr_def cstate_relation_def Let_def) + hence pt_array: + "cvariable_array_map_relation (gsPTTypes (ksArchState \)) + (\pt_t. 2 ^ ptTranslationBits pt_t) + pte_Ptr + (hrs_htd ?htdret)" + using is_bytes cvariable_array_ptr_retyps[where n=1 and p="vcpu_Ptr p" and arr=False] + by (fastforce simp add: hrs_htd_update ptr_retyps_gen_def) + + show ?thesis + using assms zro' csrel' arel csrel map_vcpus pt_array + apply (clarsimp simp: ko_vcpu_def vcpu0_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def + cmachine_state_relation_def Let_def h_t_valid_clift_Some_iff) + apply (subgoal_tac "region_is_bytes p ?vcpusz \'") + prefer 2 + apply (fastforce simp: region_actually_is_bytes[OF act_bytes] + region_is_bytes_subset[OF _ subr]) + apply (simp add: projectKO_opt_retyp_other' objBitsKO_vcpu cl_vcpu + htd_safe[simplified] kernel_data_refs_domain_eq_rotate) + apply (subst ptr_retyp_gen_one[symmetric])+ + apply clarsimp + apply (subst clift_ptr_retyps_gen_other[where 'a=vcpu_C and arr=False and nptrs=1, + simplified word_bits_def, simplified] + ; simp add: tag_disj_via_td_name)+ + done +qed + +(* retyping to a vcpu from after a memset zero *) +lemma placeNewObject_vcpu_fromzero_ccorres: + "ccorres dc xfdc + (valid_global_refs' and pspace_aligned' and pspace_distinct' + and pspace_no_overlap' regionBase vcpuBits + and (\s. 2 ^ vcpuBits \ gsMaxObjectSize s) + and ret_zero regionBase (2 ^ vcpuBits) + and K (regionBase \ 0 \ range_cover regionBase vcpuBits vcpuBits 1 + \ ({regionBase..+2 ^ vcpuBits} \ kernel_data_refs = {}))) + ({s. region_actually_is_zero_bytes regionBase (2 ^ vcpuBits) s}) + hs + (placeNewObject regionBase fromzeroVCPU 0) + (global_htd_update (\_. (ptr_retyp (vcpu_Ptr regionBase))))" + apply (rule ccorres_from_vcg_nofail, clarsimp) + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_htd_safe) + apply (subgoal_tac "{regionBase..+752} \ {regionBase..+2^vcpuBits}") + prefer 2 + apply clarsimp + apply (drule intvlD, clarsimp) + apply (rule intvlI, simp add: vcpuBits_def pageBits_def) + apply (intro conjI allI impI) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (erule ptr_retyp_htd_safe_neg[where D'=kernel_data_refs]; simp add: vcpuBits_def pageBits_def) + apply blast + apply (rule bexI [OF _ placeNewObject_eq]) + apply (clarsimp simp: split_def new_cap_addrs_def) + apply (rule ptr_retyp_fromzeroVCPU[simplified] ; simp?) + apply (clarsimp simp: range_cover_def) + apply (clarsimp simp: word_bits_conv) + apply (clarsimp simp: objBits_simps range_cover.aligned archObjSize_def vcpuBits_def vcpuBits_def) + apply (clarsimp simp: no_fail_def) + done + +lemma vcpu_init_ccorres: + "ccorres dc xfdc + (ko_at' fromzeroVCPU vcpuptr and K (vcpuptr \ 0)) + (UNIV \ \ \vcpu = vcpu_Ptr vcpuptr \) hs + (setObject vcpuptr (makeObject :: vcpu)) + (Call vcpu_init_'proc)" +proof - + have bind_assoc_rev: + "\a b c. do a; b; c od = (do a; b od) >>_ c" + by (simp add: bind_assoc) + + have armv_vcpu_init_ccorres: + "\hs. ccorres dc xfdc (vcpu_at' vcpuptr) \\vcpu = vcpu_Ptr vcpuptr\ hs + (vcpuWriteReg vcpuptr VCPURegSCTLR sctlrEL1VM) (Call armv_vcpu_init_'proc)" + apply (cinit') + apply (ctac (no_vcg) add: vcpu_write_reg_ccorres) + apply (clarsimp simp: sctlrEL1VM_def fromEnum_def enum_vcpureg seL4_VCPUReg_defs) + done + + show ?thesis + apply (cinit' lift: vcpu_' simp: makeObject_vcpu) + apply clarsimp + apply (rule monadic_rewrite_ccorres_assemble[OF _ monadic_rewrite_setObject_vcpu_as_init]) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_call[OF armv_vcpu_init_ccorres]; solves simp) + apply (clarsimp simp: vgicHCREN_def) + apply (ctac (no_vcg) pre: ccorres_move_c_guard_vcpu add: vgicUpdate_HCR_ccorres) + apply (ctac pre: ccorres_move_c_guard_vcpu add: vgicUpdate_virtTimer_pcount_ccorres) + apply (wpsimp simp: guard_is_UNIV_def)+ + apply (fastforce simp: const_def ko_at_vcpu_at'D) + done +qed + +lemma placeNewObject_vcpu_ccorres: + "ccorres dc xfdc + (valid_global_refs' and pspace_aligned' and pspace_distinct' + and pspace_no_overlap' regionBase vcpuBits + and (\s. 2 ^ vcpuBits \ gsMaxObjectSize s) + and ret_zero regionBase (2 ^ vcpuBits) + and K (regionBase \ 0 \ range_cover regionBase vcpuBits vcpuBits 1 + \ ({regionBase..+2 ^ vcpuBits} \ kernel_data_refs = {}))) + (UNIV \ {s. region_actually_is_zero_bytes regionBase (2 ^ vcpuBits) s}) + hs + (placeNewObject regionBase (makeObject :: vcpu) 0) + (global_htd_update (\_. (ptr_retyp (vcpu_Ptr regionBase)));; CALL vcpu_init(vcpu_Ptr regionBase))" + apply (rule ccorres_guard_imp) + apply (rule monadic_rewrite_ccorres_assemble[OF _ + monadic_rewrite_placeNewObject_vcpu_decompose[where vcpupre=fromzeroVCPU]]) + apply (rule ccorres_split_nothrow) + apply (rule placeNewObject_vcpu_fromzero_ccorres) + apply ceqv + apply clarsimp + apply (ctac (no_vcg) add: vcpu_init_ccorres) + apply (wp placeNewObject_creates_object_vcpu) + apply clarsimp + apply vcg + apply clarsimp + apply fastforce + done + +lemma updatePTType_ccorres: + "ccorres dc xfdc + \ + {s. h_t_array_valid (hrs_htd (t_hrs_' (globals s))) + (pte_Ptr regionBase) + (2 ^ ptTranslationBits pt_t)} + hs + (updatePTType regionBase pt_t) + (Basic (globals_update (ghost'state_'_update (gs_new_pt_t pt_t regionBase))))" + apply (rule ccorres_from_vcg) + apply vcg_step + apply (clarsimp simp: updatePTType_def simpler_gets_def simpler_modify_def bind_def return_def) + apply (case_tac "ghost'state_' (globals x)") + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def fun_upd_def + carch_state_relation_def cmachine_state_relation_def + gs_new_pt_t_def ghost_size_rel_def carch_globals_def + ghost_assertion_data_get_def + cong: if_cong) + apply (clarsimp simp: cvariable_array_map_relation_def split: if_splits) + done + +lemma Arch_createObject_ccorres: + assumes t: "toAPIType newType = None" + shows "ccorres (\a b. ccap_relation (ArchObjectCap a) b) ret__struct_cap_C_' + (createObject_hs_preconds regionBase newType userSize deviceMemory) + (createObject_c_preconds regionBase newType userSize deviceMemory) + [] + (Arch.createObject newType regionBase userSize deviceMemory) + (Call Arch_createObject_'proc)" +proof - + note if_cong[cong] + + show ?thesis + apply (clarsimp simp: createObject_c_preconds_def + createObject_hs_preconds_def) + apply (rule ccorres_gen_asm) + apply clarsimp + apply (frule range_cover.aligned) + apply (cut_tac t) + apply (case_tac newType; simp add: toAPIType_def bind_assoc) + + (* FIXME AARCH64 cleanup and fix indentation later, once vspace parts are fixed *) + \ \VCPU\ + prefer 6 + apply (in_case "VCPUObject") + subgoal + apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') + apply (simp add: object_type_from_H_def Kernel_C_defs) + apply ccorres_rewrite + apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff + asidInvalid_def sle_positive APIType_capBits_def shiftL_nat + objBits_simps word_sle_def word_sless_def) + apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps + AARCH64_H.createObject_def pageBits_def) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_rhs_assoc2) + apply (ctac add: placeNewObject_vcpu_ccorres) + apply csymbr + apply (rule ccorres_return_C; simp) + apply wp + apply (vcg exspec=vcpu_init_modifies) + apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' + invs_valid_global' range_cover.aligned APIType_capBits_def + invs_valid_objs' isFrameType_def invs_urz) + apply (frule range_cover.aligned) + apply (clarsimp simp: ccap_relation_def cap_vcpu_cap_lift cap_to_H_def + canonical_address_and_maskD) + done + + apply (in_case "HugePageObject") +subgoal + apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') + apply (simp add: object_type_from_H_def Kernel_C_defs) + apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff + asidInvalid_def APIType_capBits_def shiftL_nat objBits_simps + ptBits_def pageBits_def word_sle_def word_sless_def fold_eq_0_to_bool) + apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps + AARCH64_H.createObject_def pageBits_def ptTranslationBits_def + cond_second_eq_seq_ccorres modify_gsUserPages_update + intro!: ccorres_rhs_assoc) + apply ((rule ccorres_return_C | simp | wp | vcg + | (rule match_ccorres, ctac add: + placeNewDataObject_ccorres[where us=18 and newType=newType, simplified] + gsUserPages_update_ccorres[folded modify_gsUserPages_update]) + | (rule match_ccorres, csymbr))+)[1] + apply (intro conjI) + apply (clarsimp simp: createObject_hs_preconds_def frameSizeConstants_defs ptTranslationBits_def + APIType_capBits_def pageBits_def) + apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def + framesize_to_H_def cap_to_H_simps cap_frame_cap_lift + vm_page_size_defs ptTranslationBits_def + canonical_address_and_maskD[unfolded mask_def, simplified] + vmrights_to_H_def mask_def vm_rights_defs c_valid_cap_def cl_valid_cap_def) + done + apply (in_case "VSpaceObject") + subgoal + apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') + apply (simp add: object_type_from_H_def Kernel_C_defs) + apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff asidInvalid_def + sle_positive APIType_capBits_def shiftL_nat objBits_simps + ptBits_def pageBits_def word_sle_def word_sless_def) + apply (rule ccorres_rhs_assoc)+ + apply (clarsimp simp: hrs_htd_update bitSimps objBits_simps + AARCH64_H.createObject_def pt_bits_minus_pte_bits) + apply (ctac pre only: add: placeNewObject_pte_vs[simplified]) + apply (ctac only: add: updatePTType_ccorres) + apply csymbr + apply (rule ccorres_return_C) + apply simp + apply simp + apply simp + apply wp + apply vcg + apply wp + apply vcg + apply clarify + apply (intro conjI) + apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' + APIType_capBits_def invs_valid_objs' + invs_urz) + apply clarsimp + apply (clarsimp simp: ccap_relation_def APIType_capBits_def + framesize_to_H_def cap_to_H_simps cap_vspace_cap_lift + vmrights_to_H_def isFrameType_def canonical_address_and_maskD) + apply (prop_tac "c_guard (vs_Ptr regionBase)") + apply (rule is_aligned_c_guard[where m=pte_bits], simp, simp) + apply (simp add: align_of_array) + apply (simp add: align_of_def bit_simps) + apply (simp add: bit_simps split: if_split) + apply (simp add: bit_simps) + apply (drule_tac p="vs_Ptr regionBase" and + d="hrs_htd (t_hrs_' (globals s'))" and + g="c_guard" in ptr_retyp_h_t_valid) + apply (drule h_t_array_valid) + apply (simp add: hrs_htd_update ptTranslationBits_vs_array_len) + done + + apply (in_case "SmallPageObject") +subgoal + apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') + apply (simp add: object_type_from_H_def Kernel_C_defs) + apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff + asidInvalid_def APIType_capBits_def shiftL_nat objBits_simps + ptBits_def pageBits_def word_sle_def word_sless_def fold_eq_0_to_bool) + apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps + AARCH64_H.createObject_def pageBits_def + cond_second_eq_seq_ccorres modify_gsUserPages_update + intro!: ccorres_rhs_assoc) + apply ((rule ccorres_return_C | simp | wp | vcg + | (rule match_ccorres, ctac add: + placeNewDataObject_ccorres[where us=0 and newType=newType, simplified] + gsUserPages_update_ccorres[folded modify_gsUserPages_update]) + | (rule match_ccorres, csymbr))+)[1] + apply (intro conjI) + apply (clarsimp simp: createObject_hs_preconds_def frameSizeConstants_defs + APIType_capBits_def pageBits_def) + apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def cl_valid_cap_def + framesize_to_H_def cap_to_H_simps cap_frame_cap_lift vm_page_size_defs + canonical_address_and_maskD[unfolded mask_def, simplified] + vmrights_to_H_def mask_def vm_rights_defs c_valid_cap_def) + done + apply (in_case "LargePageObject") +subgoal + apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') + apply (simp add: object_type_from_H_def Kernel_C_defs) + apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff asidInvalid_def + APIType_capBits_def shiftL_nat objBits_simps ptBits_def pageBits_def + word_sle_def word_sless_def fold_eq_0_to_bool) + apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps AARCH64_H.createObject_def + pageBits_def ptTranslationBits_def cond_second_eq_seq_ccorres + modify_gsUserPages_update + intro!: ccorres_rhs_assoc) + apply ((rule ccorres_return_C | simp | wp | vcg + | (rule match_ccorres, ctac add: + placeNewDataObject_ccorres[where us=9 and newType=newType, simplified] + gsUserPages_update_ccorres[folded modify_gsUserPages_update]) + | (rule match_ccorres, csymbr))+)[1] + apply (intro conjI) + apply (clarsimp simp: createObject_hs_preconds_def frameSizeConstants_defs ptTranslationBits_def + APIType_capBits_def pageBits_def) + apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def + framesize_to_H_def cap_to_H_simps cap_frame_cap_lift + ptTranslationBits_def vm_page_size_defs vmrights_to_H_def + canonical_address_and_maskD[unfolded mask_def, simplified] + mask_def vm_rights_defs c_valid_cap_def cl_valid_cap_def) + done + + apply (in_case "PageTableObject") + (* FIXME AARCH64: goal here shows a vs_Ptr, but that is only because pt_Ptr and vs_Ptr are the + same type in this config. Probably should get a comment at def of vs_Ptr *) +subgoal + apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') + apply (simp add: object_type_from_H_def Kernel_C_defs) + apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff asidInvalid_def + sle_positive APIType_capBits_def shiftL_nat objBits_simps + ptBits_def pageBits_def word_sle_def word_sless_def) + apply (rule ccorres_rhs_assoc)+ + apply (clarsimp simp: hrs_htd_update bitSimps objBits_simps word_size_bits_def + AARCH64_H.createObject_def pageBits_def pt_bits_def table_size + pte_bits_def) + apply (ctac pre only: add: placeNewObject_pte_pt[simplified ptTranslationBits_def, simplified]) + apply (ctac only: add: updatePTType_ccorres) + apply csymbr + apply (rule ccorres_return_C) + apply simp + apply simp + apply simp + apply wp + apply vcg + apply wp + apply vcg + apply clarify + apply (intro conjI) + apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' + APIType_capBits_def invs_valid_objs' + invs_urz bit_simps) + apply clarsimp + apply (clarsimp simp: bit_simps ccap_relation_def APIType_capBits_def + framesize_to_H_def cap_to_H_simps cap_page_table_cap_lift + vmrights_to_H_def isFrameType_def canonical_address_and_maskD) + apply (prop_tac "c_guard (pt_Ptr regionBase)") + apply (rule is_aligned_c_guard[where m=pte_bits], simp, simp) + apply (simp add: align_of_array) + apply (simp add: align_of_def bit_simps) + apply simp + apply (simp add: bit_simps) + apply (drule_tac p="pt_Ptr regionBase" and + d="hrs_htd (t_hrs_' (globals s'))" and + g="c_guard" in ptr_retyp_h_t_valid) + apply (drule h_t_array_valid) + apply (simp add: hrs_htd_update) + done + done +qed + +(* FIXME: with the current state of affairs, we could simplify gs_new_cnodes *) +lemma gsCNodes_update_ccorres: + "ccorresG rf_sr G dc xf (\_. bits = sz + 4) + \ h_t_array_valid (hrs_htd \t_hrs) (cte_Ptr ptr) (2 ^ sz) \ hs + (modify (gsCNodes_update (\m a. if a = ptr then Some sz else m a))) + (Basic (globals_update (ghost'state_'_update + (gs_new_cnodes sz ptr bits))))" + apply (rule ccorres_from_vcg) + apply vcg_step + apply (clarsimp simp: split_def simpler_modify_def gs_new_cnodes_def) + apply (case_tac "ghost'state_' (globals x)") + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def fun_upd_def + carch_state_relation_def cmachine_state_relation_def + ghost_size_rel_def ghost_assertion_data_get_def + cong: if_cong) + apply (rule cvariable_array_ptr_upd[unfolded fun_upd_def], simp_all) + done + +(* FIXME: move *) +lemma map_to_tcbs_upd: + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" + apply (rule ext) + apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) + done + +(* FIXME: move *) +lemma cmap_relation_updI: + "\cmap_relation am cm f rel; am dest = Some ov; rel nv nv'; inj f\ \ cmap_relation (am(dest \ nv)) (cm(f dest \ nv')) f rel" + apply (clarsimp simp: cmap_relation_def) + apply (rule conjI) + apply (drule_tac t="dom cm" in sym) + apply fastforce + apply clarsimp + apply (case_tac "x = dest") + apply simp + apply clarsimp + apply (subgoal_tac "f x \ f dest") + apply simp + apply force + apply clarsimp + apply (drule (1) injD) + apply simp + done + +lemma cep_relations_drop_fun_upd: + "\ f x = Some v; tcbEPNext_C v' = tcbEPNext_C v; tcbEPPrev_C v' = tcbEPPrev_C v \ + \ cendpoint_relation (f (x \ v')) = cendpoint_relation f" + "\ f x = Some v; tcbEPNext_C v' = tcbEPNext_C v; tcbEPPrev_C v' = tcbEPPrev_C v \ + \ cnotification_relation (f (x \ v')) = cnotification_relation f" + by (intro ext cendpoint_relation_upd_tcb_no_queues[where thread=x] + cnotification_relation_upd_tcb_no_queues[where thread=x] + | simp split: if_split)+ + +lemma threadSet_domain_ccorres [corres]: + "ccorres dc xfdc + (tcb_at' thread) + {s. thread' s = tcb_ptr_to_ctcb_ptr thread \ d' s = ucast d} hs + (threadSet (tcbDomain_update (\_. d)) thread) + (Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update (heap_update (Ptr &(thread' s\[''tcbDomain_C''])::machine_word ptr) (d' s)))) s))" + apply (rule ccorres_guard_imp2) + apply (rule threadSet_ccorres_lemma4 [where P=\ and P'=\]) + apply vcg + prefer 2 + apply (rule conjI, simp) + apply assumption + apply clarsimp + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: cmachine_state_relation_def carch_state_relation_def cpspace_relation_def) + apply (clarsimp simp: update_tcb_map_tos typ_heap_simps') + apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def cteSizeBits_def) + apply (simp add: cep_relations_drop_fun_upd + cvariable_relation_upd_const ko_at_projectKO_opt) + apply (drule ko_at_projectKO_opt) + apply (erule (2) cmap_relation_upd_relI) + subgoal by (simp add: ctcb_relation_def) + apply assumption + apply simp + done + +lemma createObject_ccorres: + notes APITypecapBits_simps[simp] = + APIType_capBits_def[split_simps + object_type.split apiobject_type.split] + shows + "ccorres ccap_relation ret__struct_cap_C_' + (createObject_hs_preconds regionBase newType userSize isdev) + (createObject_c_preconds regionBase newType userSize isdev) + [] + (createObject newType regionBase userSize isdev) + (Call createObject_'proc)" +proof - + note if_cong[cong] + + (* FIXME AARCH64 bit of abstraction leakage *) + have canonical_tcb_offset[simp]: + "\ canonical_address regionBase; is_aligned regionBase 11 \ + \ (regionBase + 0x400 && mask 48) - 0x400 = regionBase" + apply (subst and_mask_plus, assumption) + apply (simp add: canonical_address_and_maskD)+ + done + + have aligned_and: "\p. is_aligned p 1 \ p && 0xFFFFFFFFFFFFFFFE = (p::machine_word)" + by word_bitwise (simp add: is_aligned_nth) + + have gsCNodes_update: + "\f. (\ks. ks \gsCNodes := f (gsCNodes ks)\) = gsCNodes_update f" + by (rule ext) simp + + show ?thesis + apply (clarsimp simp: createObject_c_preconds_def + createObject_hs_preconds_def) + apply (rule ccorres_gen_asm_state) + apply (cinit lift: t_' regionBase_' userSize_' deviceMemory_') + apply (rule ccorres_cond_seq) + (* Architecture specific objects. *) + apply (rule_tac + Q="createObject_hs_preconds regionBase newType userSize isdev" and + S="createObject_c_preconds1 regionBase newType userSize isdev" and + R="createObject_hs_preconds regionBase newType userSize isdev" and + T="createObject_c_preconds1 regionBase newType userSize isdev" + in ccorres_Cond_rhs) + apply (subgoal_tac "toAPIType newType = None") + apply clarsimp + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_guard_imp) + apply (ctac (no_vcg) add: Arch_createObject_ccorres) + apply (rule ccorres_return_C_Seq) + apply (rule ccorres_return_C) + apply clarsimp + apply clarsimp + apply clarsimp + apply wp[1] + apply clarsimp + apply (clarsimp simp: createObject_c_preconds_def + region_actually_is_bytes + region_actually_is_bytes_def) + apply (clarsimp simp: object_type_from_H_def + AARCH64_H.toAPIType_def Kernel_C_defs + nAPIObjects_def word_sle_def createObject_c_preconds_def + word_le_nat_alt split: + apiobject_type.splits object_type.splits) + apply (subgoal_tac "\apiType. newType = APIObjectType apiType") + apply clarsimp + apply (rule ccorres_guard_imp) + apply (rule_tac apiType=apiType in ccorres_apiType_split) + + (* Untyped *) + apply (clarsimp simp: Kernel_C_defs object_type_from_H_def + AARCH64_H.toAPIType_def nAPIObjects_def + word_sle_def + intro!: Corres_UL_C.ccorres_cond_empty + Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) + apply (rule_tac + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.Untyped) + (unat (userSizea :: machine_word)) isdev" and + A'=UNIV in + ccorres_guard_imp) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_return_C, simp, simp, simp) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply simp + apply (clarsimp simp: ccap_relation_def cap_to_H_def + getObjectSize_def apiGetObjectSize_def cap_untyped_cap_lift + aligned_add_aligned canonical_address_and_maskD + split: option.splits) + apply (subst word_le_mask_eq, clarsimp simp: mask_def, unat_arith, + auto simp: word_bits_conv untypedBits_defs)[1] + + (* TCB *) + apply (clarsimp simp: Kernel_C_defs object_type_from_H_def + toAPIType_def nAPIObjects_def word_sle_def + intro!: Corres_UL_C.ccorres_cond_empty + Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) + apply (rule_tac + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" in + ccorres_guard_imp2) + apply (rule ccorres_symb_exec_r) + apply (ccorres_remove_UNIV_guard) + apply (simp add: hrs_htd_update) + apply (ctac (c_lines 4) add: ccorres_placeNewObject_tcb[simplified]) + apply simp + apply (rule ccorres_pre_curDomain) + apply ctac + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_return_C, simp, simp, simp) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply wp + apply vcg + apply (simp add: obj_at'_real_def) + apply (wp placeNewObject_ko_wp_at') + apply (vcg exspec=Arch_initContext_modifies) + apply clarsimp + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: createObject_hs_preconds_def + createObject_c_preconds_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule invs_sym') + apply (simp add: getObjectSize_def objBits_simps word_bits_conv + apiGetObjectSize_def + tcbBlockSizeBits_def new_cap_addrs_def projectKO_opt_tcb) + apply (clarsimp simp: range_cover.aligned + region_actually_is_bytes_def APIType_capBits_def) + apply (frule(1) ghost_assertion_size_logic_no_unat) + apply (clarsimp simp: ccap_relation_def cap_to_H_def + getObjectSize_def apiGetObjectSize_def + cap_thread_cap_lift aligned_add_aligned + split: option.splits) + apply (frule range_cover.aligned) + apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs + tcb_ptr_to_ctcb_ptr_def + invs_valid_objs' invs_urz isFrameType_def + simp flip: canonical_bit_def) + + (* Endpoint *) + apply (clarsimp simp: Kernel_C_defs object_type_from_H_def + toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) + apply (rule_tac + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) + apply (simp add: hrs_htd_update) + apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_endpoint) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_return_C, simp, simp, simp) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply wp + apply (clarsimp simp: ccap_relation_def cap_to_H_def getObjectSize_def + objBits_simps apiGetObjectSize_def epSizeBits_def + cap_endpoint_cap_lift canonical_address_and_maskD + split: option.splits + dest!: range_cover.aligned) + apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule invs_sym') + apply (auto simp: getObjectSize_def objBits_simps apiGetObjectSize_def + epSizeBits_def word_bits_conv + elim!: is_aligned_no_wrap' + intro!: range_cover_simpleI)[1] + + (* Notification *) + apply (clarsimp simp: createObject_c_preconds_def) + apply (clarsimp simp: getObjectSize_def objBits_simps apiGetObjectSize_def + epSizeBits_def word_bits_conv word_sle_def word_sless_def) + apply (clarsimp simp: Kernel_C_defs object_type_from_H_def + toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) + apply (rule_tac + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) + apply (simp add: hrs_htd_update) + apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_notification) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_return_C, simp, simp, simp) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply wp + apply (clarsimp simp: ccap_relation_def cap_to_H_def + getObjectSize_def canonical_address_and_maskD + apiGetObjectSize_def ntfnSizeBits_def objBits_simps + cap_notification_cap_lift + dest!: range_cover.aligned + split: option.splits) + apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule invs_sym') + apply (auto simp: getObjectSize_def objBits_simps + apiGetObjectSize_def + ntfnSizeBits_def word_bits_conv + elim!: is_aligned_no_wrap' intro!: range_cover_simpleI)[1] + + (* CapTable *) + apply (clarsimp simp: createObject_c_preconds_def) + apply (clarsimp simp: getObjectSize_def objBits_simps + apiGetObjectSize_def + ntfnSizeBits_def word_bits_conv) + apply (clarsimp simp: Kernel_C_defs object_type_from_H_def + toAPIType_def nAPIObjects_def + word_sle_def word_sless_def zero_le_sint + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc + ccorres_move_c_guards ccorres_Guard_Seq) + apply (rule_tac + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) + apply (simp add:field_simps hrs_htd_update) + apply (ctac pre only: add: ccorres_placeNewObject_captable) + apply (subst gsCNodes_update) + apply (ctac add: gsCNodes_update_ccorres) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_return_C, simp, simp, simp) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (rule hoare_triv[of \], simp add:hoare_TrueI) + apply vcg + apply wp + apply vcg + apply (rule conjI) + apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule invs_sym') + apply (frule(1) ghost_assertion_size_logic_no_unat) + apply (clarsimp simp: getObjectSize_def objBits_simps + apiGetObjectSize_def + cteSizeBits_def word_bits_conv add.commute createObject_c_preconds_def + region_actually_is_bytes_def + invs_valid_objs' invs_urz + elim!: is_aligned_no_wrap' + dest: word_of_nat_le intro!: range_coverI)[1] + apply (clarsimp simp: createObject_hs_preconds_def hrs_htd_update isFrameType_def) + apply (frule range_cover.strong_times_64[folded addr_card_wb], simp+) + apply (subst h_t_array_valid_retyp, simp+) + apply (simp add: power_add cte_C_size cteSizeBits_def) + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_cnode_cap_lift + getObjectSize_def apiGetObjectSize_def cteSizeBits_def + objBits_simps field_simps is_aligned_power2 + addr_card_wb is_aligned_weaken[where y=2] + split: option.splits + simp flip: canonical_bit_def) + apply (rule conjI) + apply (frule range_cover.aligned) + apply (simp add: mask_shiftl_decompose[where m=canonical_bit and n=1, + simplified shiftl1_is_mult] + canonical_make_canonical_idem aligned_and is_aligned_weaken) + apply (subst word_le_mask_eq[symmetric, THEN eqTrueI]) + apply (clarsimp simp: mask_def untypedBits_defs) + apply unat_arith + apply (clarsimp simp: word_bits_conv) + apply simp + apply auto[1] + apply (clarsimp simp: createObject_c_preconds_def) + apply (clarsimp simp:nAPIOBjects_object_type_from_H)? + apply (intro impI conjI, simp_all)[1] + apply (clarsimp simp: nAPIObjects_def object_type_from_H_def Kernel_C_defs + split: object_type.splits) + apply (clarsimp simp: createObject_c_preconds_def + createObject_hs_preconds_def) + done +qed + +lemma ccorres_guard_impR: + "\ccorres_underlying sr \ r xf arrel axf W Q' hs f g; (\s s'. \(s, s') \ sr; s' \ A'\ \ s' \ Q')\ + \ ccorres_underlying sr \ r xf arrel axf W A' hs f g" + by (rule ccorres_guard_imp2,simp+) + +lemma typ_clear_region_dom: + "dom (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'b :: mem_type typ_heap) + \ dom ((clift hp) :: 'b :: mem_type typ_heap)" + apply (clarsimp simp:lift_t_def lift_typ_heap_def comp_def) + apply (clarsimp simp:lift_state_def) + apply (case_tac hp) + apply (clarsimp simp:) + apply (case_tac x) + apply (clarsimp simp:s_valid_def h_t_valid_def) + apply (clarsimp simp:valid_footprint_def Let_def) + apply (drule spec) + apply (erule(1) impE) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp add:map_le_def) + apply (drule_tac x = aa in bspec) + apply simp + apply (drule sym) + apply simp + apply (clarsimp simp:proj_d_def) + apply (clarsimp simp:hrs_htd_update_def typ_clear_region_def + split:if_splits option.splits) + apply (clarsimp simp:proj_d_def) + apply (clarsimp simp:hrs_htd_update_def typ_clear_region_def + split:if_splits option.splits) + done + +lemma tcb_range_subseteq: + "is_aligned x (objBitsKO (KOTCB ko)) + \ {ptr_val (tcb_ptr_to_ctcb_ptr x)..+size_of TYPE(tcb_C)} \ {x..x + 2 ^ objBitsKO (KOTCB ko) - 1}" + apply (simp add:ptr_val_def tcb_ptr_to_ctcb_ptr_def) + apply (rule subset_trans) + apply (rule intvl_sub_offset[where z = "2^objBitsKO (KOTCB ko)"]) + apply (simp add:ctcb_offset_defs size_of_def objBits_simps') + apply (subst intvl_range_conv) + apply simp + apply (simp add:objBits_simps' word_bits_conv) + apply simp + done + +lemma pspace_no_overlap_induce_tcb: + "\cpspace_relation (ksPSpace (s::kernel_state)) + (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::tcb_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(tcb_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp:cpspace_relation_def) + apply (clarsimp simp:cmap_relation_def) + apply (subgoal_tac "xa\tcb_ptr_to_ctcb_ptr ` dom (map_to_tcbs (ksPSpace s))") + prefer 2 + apply (simp add:domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def projectKO_opt_tcb map_comp_def + split: option.splits kernel_object.split_asm) + apply (frule(1) pspace_no_overlapD') + apply (rule disjoint_subset[OF tcb_range_subseteq[simplified]]) + apply (erule(1) pspace_alignedD') + apply (subst intvl_range_conv) + apply (simp add: word_bits_def)+ + done + +lemma pspace_no_overlap_induce_endpoint: + "\cpspace_relation (ksPSpace (s::kernel_state)) + (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::endpoint_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(endpoint_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp: cpspace_relation_def) + apply (clarsimp simp: cmap_relation_def) + apply (subgoal_tac "xa\ep_Ptr ` dom (map_to_eps (ksPSpace s))") + prefer 2 + subgoal by (simp add: domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def projectKO_opt_ep map_comp_def + split: option.splits kernel_object.split_asm) + apply (frule(1) pspace_no_overlapD') + apply (subst intvl_range_conv) + apply simp + apply (simp add: word_bits_def) + apply (simp add: size_of_def) + apply (subst intvl_range_conv[where bits = epSizeBits,simplified epSizeBits_def, simplified]) + apply (drule(1) pspace_alignedD') + apply (simp add: objBits_simps' archObjSize_def + split: arch_kernel_object.split_asm) + apply (simp add: word_bits_conv) + apply (simp add: objBits_simps' archObjSize_def + split: arch_kernel_object.split_asm) + done + +lemma pspace_no_overlap_induce_notification: + "\cpspace_relation (ksPSpace (s::kernel_state)) + (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::notification_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(notification_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp: cpspace_relation_def) + apply (clarsimp simp: cmap_relation_def size_of_def) + apply (subgoal_tac "xa\ntfn_Ptr ` dom (map_to_ntfns (ksPSpace s))") + prefer 2 + apply (simp add: domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def projectKO_opt_ntfn map_comp_def + split: option.splits kernel_object.split_asm) + apply (frule(1) pspace_no_overlapD') + apply (subst intvl_range_conv) + apply simp + apply (simp add: word_bits_def) + apply (subst intvl_range_conv[where bits = ntfnSizeBits,simplified ntfnSizeBits_def, simplified]) + apply (drule(1) pspace_alignedD') + apply (simp add: objBits_simps' archObjSize_def + split: arch_kernel_object.split_asm) + apply (simp add: word_bits_conv) + apply (simp add: objBits_simps' archObjSize_def + split: arch_kernel_object.split_asm) + done + +lemma vcpu_range_subseteq: + "is_aligned x (objBitsKO (KOArch (KOVCPU ko))) + \ {(x::machine_word)..+size_of TYPE(vcpu_C)} \ {x..x + 2 ^ objBitsKO (KOArch (KOVCPU ko)) - 1}" + apply simp + apply (rule subset_trans) + apply (rule intvl_start_le[where y = "2^objBitsKO ((KOArch (KOVCPU ko)))"]) + apply (simp add: objBits_simps vcpuBits_def) + apply (subst intvl_range_conv) + apply simp + apply (simp add:objBits_simps word_bits_conv vcpuBits_def)+ + done + +lemma pspace_no_overlap_induce_vcpu: + "\cpspace_relation (ksPSpace (s::kernel_state)) + (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::vcpu_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(vcpu_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp:cpspace_relation_def) + apply (clarsimp simp:cmap_relation_def) + apply (subgoal_tac "xa\ vcpu_Ptr ` dom (map_to_vcpus (ksPSpace s))") + prefer 2 + apply (simp add:domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def projectKO_opt_tcb map_comp_def + split: option.splits kernel_object.split_asm) + apply (subst intvl_range_conv) + apply (simp add: word_bits_def)+ + (* prevent intersection in assumption becoming predicates *) + supply Int_atLeastAtMost[simp del] + apply (frule(1) pspace_no_overlapD') + apply (drule(1) pspace_alignedD') + apply (clarsimp simp: image_def projectKO_opt_vcpu map_comp_def + split: option.splits kernel_object.split_asm arch_kernel_object.split_asm) + apply (erule (1) disjoint_subset[OF vcpu_range_subseteq[simplified]]) + done + +lemma ctes_of_ko_at_strong: + "\ctes_of s p = Some a;is_aligned p cteSizeBits\ \ + (\ptr ko. (ksPSpace s ptr = Some ko \ {p ..+ 2^cteSizeBits} \ obj_range' ptr ko))" + apply (clarsimp simp: map_to_ctes_def Let_def split:if_split_asm) + apply (intro exI conjI,assumption) + apply (simp add:obj_range'_def objBits_simps is_aligned_no_wrap' field_simps) + apply (subst intvl_range_conv[where bits = cteSizeBits,simplified]) + apply simp + apply (simp add:word_bits_def objBits_simps') + apply (simp add:field_simps mask_def) + apply (intro exI conjI,assumption) + apply (clarsimp simp:objBits_simps obj_range'_def word_and_le2) + apply (cut_tac intvl_range_conv[where bits = cteSizeBits and ptr = p, simplified]) + defer + apply simp + apply (simp add:word_bits_conv objBits_simps') + apply (intro conjI) + apply (rule order_trans[OF word_and_le2]) + apply clarsimp + apply clarsimp + apply (thin_tac "P \ Q" for P Q) + apply (erule order_trans) + apply (subst word_plus_and_or_coroll2[where x = p and w = "mask tcbBlockSizeBits",symmetric]) + apply (clarsimp simp: tcb_cte_cases_def field_simps cteSizeBits_def tcbBlockSizeBits_def + split:if_split_asm) + apply (subst add.commute) + apply (rule word_plus_mono_left[OF _ is_aligned_no_wrap'] + , simp add: mask_def, rule is_aligned_neg_mask2, simp add: mask_def)+ + done + +lemma pspace_no_overlap_induce_cte: + "\cpspace_relation (ksPSpace (s::kernel_state)) + (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::cte_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(cte_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp: cpspace_relation_def) + apply (clarsimp simp: cmap_relation_def size_of_def) + apply (subgoal_tac "xa\cte_Ptr ` dom (ctes_of s)") + prefer 2 + apply (simp add:domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def projectKO_opt_cte map_comp_def + split: option.splits kernel_object.split_asm) + apply (frule ctes_of_is_aligned) + apply (simp add: objBits_simps) + apply (drule ctes_of_ko_at_strong) + apply simp + apply (clarsimp simp: objBits_simps') + apply (erule disjoint_subset) + apply (frule(1) pspace_no_overlapD') + apply (subst intvl_range_conv) + apply simp + apply (simp add: word_bits_def) + apply (simp add: obj_range'_def ptr_range_mask_range del: Int_atLeastAtMost) + done + +lemma pspace_no_overlap_induce_asidpool: + "\cpspace_relation (ksPSpace (s::kernel_state)) (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::asid_pool_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(asid_pool_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp:cpspace_relation_def) + apply (clarsimp simp:cmap_relation_def size_of_def) + apply (subgoal_tac "xa\ap_Ptr ` dom (map_to_asidpools (ksPSpace s))") + prefer 2 + apply (simp add:domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp:image_def projectKO_opt_asidpool + map_comp_def split:option.splits kernel_object.split_asm) + apply (frule(1) pspace_no_overlapD') + apply (subst intvl_range_conv) + apply simp + apply (simp add: word_bits_def) + apply (subst intvl_range_conv[where bits = 12,simplified]) + apply (drule(1) pspace_alignedD') + apply (simp add: objBits_simps archObjSize_def pageBits_def split:arch_kernel_object.split_asm) + apply (clarsimp elim!:is_aligned_weaken) + apply (simp only: is_aligned_neg_mask_eq) + apply (erule disjoint_subset[rotated]) + apply (clarsimp simp: field_simps) + apply (simp add: p_assoc_help) + apply (rule word_plus_mono_right) + apply (clarsimp simp:objBits_simps archObjSize_def pageBits_def split:arch_kernel_object.split_asm)+ + done + +lemma pspace_no_overlap_induce_user_data: + "\cpspace_relation (ksPSpace (s::kernel_state)) (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::user_data_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(user_data_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp:cpspace_relation_def) + apply (clarsimp simp:cmap_relation_def size_of_def) + apply (subgoal_tac "xa\Ptr ` dom (heap_to_user_data (ksPSpace s) (underlying_memory (ksMachineState s)))") + prefer 2 + apply (simp add:domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def heap_to_user_data_def projectKO_opt_user_data map_comp_def + split: option.splits kernel_object.splits) + apply (frule(1) pspace_no_overlapD') + apply (clarsimp simp: word_bits_def) + apply (subst intvl_range_conv[where bits = 12,simplified]) + apply (drule(1) pspace_alignedD') + apply (simp add:objBits_simps archObjSize_def pageBits_def split:arch_kernel_object.split_asm) + apply (clarsimp elim!:is_aligned_weaken) + apply (subst intvl_range_conv, simp, simp) + apply (clarsimp simp: field_simps) + apply (simp add: p_assoc_help) + apply (clarsimp simp: objBits_simps archObjSize_def pageBits_def split:arch_kernel_object.split_asm)+ + done + +lemma pspace_no_overlap_induce_device_data: + "\cpspace_relation (ksPSpace (s::kernel_state)) (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::user_data_device_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(user_data_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp: cpspace_relation_def) + apply (clarsimp simp: cmap_relation_def size_of_def) + apply (subgoal_tac "xa\Ptr ` dom (heap_to_device_data (ksPSpace s) (underlying_memory (ksMachineState s)))") + prefer 2 + apply (simp add: domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def heap_to_device_data_def projectKO_opt_user_data_device map_comp_def + split: option.splits kernel_object.splits) + apply (frule(1) pspace_no_overlapD') + apply (clarsimp simp: word_bits_def) + apply (subst intvl_range_conv[where bits = 12,simplified]) + apply (drule(1) pspace_alignedD') + apply (simp add: objBits_simps archObjSize_def pageBits_def split: arch_kernel_object.split_asm) + apply (clarsimp elim!: is_aligned_weaken) + apply (subst intvl_range_conv, simp, simp) + apply (clarsimp simp: field_simps) + apply (simp add: p_assoc_help) + apply (clarsimp simp: objBits_simps archObjSize_def pageBits_def split:arch_kernel_object.split_asm)+ + done + +lemma typ_region_bytes_dom: + "typ_uinfo_t TYPE('b) \ typ_uinfo_t TYPE (word8) + \ dom (clift (hrs_htd_update (typ_region_bytes ptr bits) hp) :: 'b :: mem_type typ_heap) + \ dom ((clift hp) :: 'b :: mem_type typ_heap)" + apply (clarsimp simp: liftt_if split: if_splits) + apply (case_tac "{ptr_val x ..+ size_of TYPE('b)} \ {ptr ..+ 2 ^ bits} = {}") + apply (clarsimp simp: h_t_valid_def valid_footprint_def Let_def + hrs_htd_update_def split_def typ_region_bytes_def) + apply (drule spec, drule(1) mp) + apply (simp add: size_of_def split: if_split_asm) + apply (drule subsetD[OF equalityD1], rule IntI, erule intvlI, simp) + apply simp + apply (clarsimp simp: set_eq_iff) + apply (drule(1) h_t_valid_intvl_htd_contains_uinfo_t) + apply (clarsimp simp: hrs_htd_update_def typ_region_bytes_def split_def + split: if_split_asm) + done + +lemma lift_t_typ_region_bytes_none: + "\ \x (v :: 'a). lift_t g hp x = Some v + \ {ptr_val x ..+ size_of TYPE('a)} \ {ptr ..+ 2 ^ bits} = {}; + typ_uinfo_t TYPE('a) \ typ_uinfo_t TYPE(8 word) \ \ + lift_t g (hrs_htd_update (typ_region_bytes ptr bits) hp) + = (lift_t g hp :: (('a :: mem_type) ptr) \ _)" + apply atomize + apply (subst lift_t_typ_region_bytes, simp_all) + apply (clarsimp simp: liftt_if hrs_htd_def split: if_splits) + apply (rule ext, simp add: restrict_map_def) + apply (rule ccontr, clarsimp split: if_splits) + apply (clarsimp simp: liftt_if hrs_htd_def split: if_splits) + apply (clarsimp simp: set_eq_iff intvl_self) + done + +lemma typ_bytes_cpspace_relation_clift_userdata: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" +shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (user_data_C ptr \ user_data_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (rule pspace_no_overlap_induce_user_data[simplified], auto) + done + + +lemma typ_bytes_cpspace_relation_clift_devicedata: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (user_data_device_C ptr \ user_data_device_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (rule pspace_no_overlap_induce_device_data[simplified], auto) + done + + +lemma pspace_no_overlap_induce_pte: + "\cpspace_relation (ksPSpace (s::kernel_state)) (underlying_memory (ksMachineState s)) hp; + pspace_aligned' s; clift hp xa = Some (v::pte_C); + is_aligned ptr bits; bits < word_bits; + pspace_no_overlap' ptr bits s\ + \ {ptr_val xa..+size_of TYPE(pte_C)} \ {ptr..+2 ^ bits} = {}" + apply (clarsimp simp:cpspace_relation_def) + apply (clarsimp simp:cmap_relation_def) + apply (subgoal_tac "xa\pte_Ptr ` dom (map_to_ptes (ksPSpace s))") + prefer 2 + apply (simp add:domI) + apply (thin_tac "S = dom K" for S K)+ + apply (thin_tac "\x\ S. K x" for S K)+ + apply (clarsimp simp: image_def projectKO_opt_pte map_comp_def + split: option.splits kernel_object.split_asm) + apply (frule(1) pspace_no_overlapD') + apply (subst intvl_range_conv) + apply simp + apply (simp add: word_bits_def) + apply (subst intvl_range_conv[where bits = 3,simplified]) + apply (drule(1) pspace_alignedD') + apply (simp add: objBits_simps archObjSize_def bit_simps split:arch_kernel_object.split_asm) + apply (simp add: word_bits_conv) + apply (simp add: objBits_simps archObjSize_def bit_simps split:arch_kernel_object.split_asm) + done + +lemma typ_bytes_cpspace_relation_clift_tcb: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (tcb_C ptr \ tcb_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (erule(5) pspace_no_overlap_induce_tcb[simplified]) + done + +lemma typ_bytes_cpspace_relation_clift_pte: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (pte_C ptr \ pte_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (erule(5) pspace_no_overlap_induce_pte[unfolded size_of_def,simplified]) + done + +lemma typ_bytes_cpspace_relation_clift_endpoint: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (endpoint_C ptr \ endpoint_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (erule(5) pspace_no_overlap_induce_endpoint[simplified]) + done + +lemma typ_bytes_cpspace_relation_clift_notification: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (notification_C ptr \ notification_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (erule(5) pspace_no_overlap_induce_notification[simplified]) + done + +lemma typ_bytes_cpspace_relation_clift_vcpu: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (vcpu_C ptr \ vcpu_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (erule(5) pspace_no_overlap_induce_vcpu[simplified]) + done + +lemma typ_bytes_cpspace_relation_clift_asid_pool: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (asid_pool_C ptr \ asid_pool_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none, simp_all) + apply (erule(5) pspace_no_overlap_induce_asidpool[simplified]) + done + +lemma typ_bytes_cpspace_relation_clift_cte: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "pspace_no_overlap' ptr bits s" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) = ((clift hp) :: (cte_C ptr \ cte_C))" + (is "?lhs = ?rhs") + using assms + apply - + apply (rule lift_t_typ_region_bytes_none) + apply (erule(5) pspace_no_overlap_induce_cte) + apply (simp add: cte_C_size) + done + +lemma pspace_no_overlap_obj_atD': + "obj_at' P p s \ pspace_no_overlap' ptr bits s + \ \ko. P ko \ is_aligned p (objBitsKO (injectKOS ko)) + \ {p .. p + (2 ^ objBitsKO (injectKOS ko)) - 1} + \ {ptr .. (ptr && ~~ mask bits) + 2 ^ bits - 1} = {}" + apply (clarsimp simp: obj_at'_def) + apply (drule(1) pspace_no_overlapD') + apply (clarsimp simp: projectKOs project_inject) + apply auto + done + +lemma typ_bytes_cpspace_relation_clift_gptr: +assumes "cpspace_relation (ksPSpace s) (underlying_memory (ksMachineState (s::kernel_state))) hp" + and "is_aligned ptr bits" "bits < word_bits" + and "pspace_aligned' s" + and "kernel_data_refs \ {ptr ..+ 2^bits} = {}" + and "ptr_span (ptr' :: 'a ptr) \ kernel_data_refs" + and "typ_uinfo_t TYPE('a :: mem_type) \ typ_uinfo_t TYPE(8 word)" + shows "clift (hrs_htd_update (typ_region_bytes ptr bits) hp) + ptr' + = (clift hp) ptr'" + (is "?lhs = ?rhs ptr'") + using assms + apply - + apply (case_tac "ptr' \ dom ?rhs") + apply (frule contra_subsetD[OF typ_region_bytes_dom[where ptr = ptr and bits = bits], rotated]) + apply simp + apply fastforce + apply (clarsimp simp: liftt_if hrs_htd_update_def split_def split: if_splits) + apply (simp add: h_t_valid_typ_region_bytes) + apply blast + done + +lemma cmap_array_typ_region_bytes_triv[OF refl]: + "ptrf = (Ptr :: _ \ 'b ptr) + \ carray_map_relation bits' (map_comp f (ksPSpace s)) (h_t_valid htd c_guard) ptrf + \ is_aligned ptr bits + \ pspace_no_overlap' ptr bits s + \ pspace_aligned' s + \ typ_uinfo_t TYPE('b :: c_type) \ typ_uinfo_t TYPE(8 word) + \ size_of TYPE('b) = 2 ^ bits' + \ objBitsT (koType TYPE('a :: pspace_storable)) \ bits + \ objBitsT (koType TYPE('a :: pspace_storable)) \ bits' + \ bits' < word_bits + \ carray_map_relation bits' (map_comp (f :: _ \ 'a option) (ksPSpace s)) + (h_t_valid (typ_region_bytes ptr bits htd) c_guard) ptrf" + apply (frule(7) cmap_array_typ_region_bytes[where ptrf=ptrf]) + apply (subst(asm) restrict_map_subdom, simp_all) + apply (drule(1) pspace_no_overlap_disjoint') + apply (simp add: upto_intvl_eq) + apply (rule order_trans[OF map_comp_subset_dom]) + apply auto + done + +lemma intvl_mult_is_union: + "{p..+n * m} = (\i < m. {p + of_nat (i * n)..+ n})" + apply (cases "n = 0") + apply simp + apply (simp add: intvl_def, safe, simp_all) + apply (rule_tac x="k div n" in bexI) + apply (rule_tac x="k mod n" in exI) + apply (simp only: Abs_fnat_hom_mult Abs_fnat_hom_add, simp) + apply (simp add: More_Divides.td_gal_lt[symmetric] mult.commute) + apply (rule_tac x="xa * n + k" in exI, simp) + apply (subst add.commute, rule order_less_le_trans, erule add_less_mono1) + apply (case_tac m, simp_all) + done + +lemma h_t_array_first_element_at: + "h_t_array_valid htd p n + \ 0 < n + \ gd p + \ h_t_valid htd gd (p :: ('a :: wf_type) ptr)" + apply (clarsimp simp: h_t_array_valid_def h_t_valid_def valid_footprint_def + Let_def CTypes.sz_nzero[unfolded size_of_def]) + apply(drule_tac x="y" in spec, erule impE) + apply (erule order_less_le_trans, simp add: size_of_def) + apply (clarsimp simp: uinfo_array_tag_n_m_def upt_conv_Cons) + apply (erule map_le_trans[rotated]) + apply (simp add: list_map_mono split: if_split) + done + +lemma aligned_intvl_disjointI: + "is_aligned p sz \ is_aligned q sz' + \ p \ {q ..+ 2 ^ sz'} + \ q \ {p ..+ 2 ^ sz} + \ {p..+2 ^ sz} \ {q..+2 ^ sz'} = {}" + apply (frule(1) aligned_ranges_subset_or_disjoint[where p=p and p'=q]) + apply (simp add: upto_intvl_eq[symmetric]) + apply (elim disjE, simp_all) + apply (erule notE, erule subsetD, simp add: intvl_self) + apply (erule notE, erule subsetD, simp add: intvl_self) + done + +end + +definition + "cnodes_retype_have_size R bits cns + = (\ptr' sz'. cns ptr' = Some sz' + \ is_aligned ptr' (cte_level_bits + sz') + \ ({ptr' ..+ 2 ^ (cte_level_bits + sz')} \ R = {} + \ cte_level_bits + sz' = bits))" + +lemma cnodes_retype_have_size_mono: + "cnodes_retype_have_size T bits cns \ S \ T + \ cnodes_retype_have_size S bits cns" + by (auto simp add: cnodes_retype_have_size_def) + +context kernel_m begin + +lemma gsCNodes_typ_region_bytes: + "cvariable_array_map_relation (gsCNodes \) ((^) 2) cte_Ptr (hrs_htd hrs) + \ cnodes_retype_have_size {ptr..+2 ^ bits} bits (gsCNodes \) + \ 0 \ {ptr..+2 ^ bits} \ is_aligned ptr bits + \ clift (hrs_htd_update (typ_region_bytes ptr bits) hrs) + = (clift hrs :: cte_C ptr \ _) + \ cvariable_array_map_relation (gsCNodes \) ((^) 2) cte_Ptr + (typ_region_bytes ptr bits (hrs_htd hrs))" + apply (clarsimp simp: cvariable_array_map_relation_def + h_t_array_valid_def) + apply (elim allE, drule(1) mp) + apply (subst valid_footprint_typ_region_bytes) + apply (simp add: uinfo_array_tag_n_m_def typ_uinfo_t_def typ_info_word) + apply (clarsimp simp: cnodes_retype_have_size_def field_simps) + apply (elim allE, drule(1) mp) + apply (subgoal_tac "size_of TYPE(cte_C) * 2 ^ v = 2 ^ (cte_level_bits + v)") + prefer 2 + apply (simp add: cte_C_size cte_level_bits_def power_add) + apply (clarsimp simp add: upto_intvl_eq[symmetric] field_simps) + apply (case_tac "p \ {ptr ..+ 2 ^ bits}") + apply (drule h_t_array_first_element_at[where p="Ptr p" and gd=c_guard for p, + unfolded h_t_array_valid_def, simplified]) + apply simp + apply (rule is_aligned_c_guard[where m=3], simp+) + apply clarsimp + apply (simp add: align_of_def) + apply (simp add: size_of_def cte_level_bits_def power_add) + apply (simp add: cte_level_bits_def) + apply (drule_tac x="cte_Ptr p" in fun_cong) + apply (simp add: liftt_if[folded hrs_htd_def] hrs_htd_update + h_t_valid_def valid_footprint_typ_region_bytes + split: if_split_asm) + apply (subgoal_tac "p \ {p ..+ size_of TYPE(cte_C)}") + apply (simp add: cte_C_size) + apply blast + apply (simp add: intvl_self) + apply (simp only: upto_intvl_eq mask_in_range[symmetric]) + apply (rule aligned_ranges_subset_or_disjoint_coroll, simp_all) + done + +lemma tcb_ctes_typ_region_bytes: + "cvariable_array_map_relation (map_to_tcbs (ksPSpace \)) + (\x. 5) cte_Ptr (hrs_htd hrs) + \ pspace_no_overlap' ptr bits \ + \ pspace_aligned' \ + \ is_aligned ptr bits + \ cpspace_tcb_relation (ksPSpace \) hrs + \ cvariable_array_map_relation (map_to_tcbs (ksPSpace \)) (\x. 5) + cte_Ptr (typ_region_bytes ptr bits (hrs_htd hrs))" + supply Int_atLeastAtMost[simp del] + apply (clarsimp simp: cvariable_array_map_relation_def + h_t_array_valid_def) + apply (drule spec, drule mp, erule exI) + apply (subst valid_footprint_typ_region_bytes) + apply (simp add: uinfo_array_tag_n_m_def typ_uinfo_t_def typ_info_word) + apply (clarsimp simp only: map_comp_Some_iff projectKOs + pspace_no_overlap'_def is_aligned_neg_mask_weaken + field_simps upto_intvl_eq[symmetric]) + apply (elim allE, drule(1) mp) + apply (drule(1) pspace_alignedD') + apply (simp add: ptr_range_mask_range[symmetric]) + apply (simp add: upto_intvl_eq[symmetric]) + apply (erule disjoint_subset[rotated]) + apply (rule intvl_start_le) + apply (simp add: objBits_simps' cte_C_size) + done + + +context begin + +private definition "pte_bits2 = pte_bits" +private lemma pte_bits2_num[simplified pte_bits_def bit_simps]: + "pte_bits2 = pte_bits" by (rule pte_bits2_def) + +lemma pte_bits_power_add[unfolded pte_bits2_num, simplified]: + "(2^pte_bits2) * 2 ^ x = (2::nat) ^ (pte_bits + x)" + by (simp add: power_add pte_bits2_def) + +end + +lemma pte_typ_region_bytes: (* FIXME AARCH64: should rename cnodes_retype_have_size into something more general *) + "\ cvariable_array_map_relation (gsPTTypes (ksArchState \)) (\x. 2^ptTranslationBits x) pte_Ptr + (hrs_htd hrs); + cnodes_retype_have_size {ptr..+2 ^ bits} bits + (gsPTTypes (ksArchState \) ||> (\pt_t. ptBits pt_t - cte_level_bits)); + 0 \ {ptr..+2 ^ bits}; is_aligned ptr bits; + clift (hrs_htd_update (typ_region_bytes ptr bits) hrs) = (clift hrs :: pte_C ptr \ _) \ + \ cvariable_array_map_relation (gsPTTypes (ksArchState \)) (\x. 2^ptTranslationBits x) + pte_Ptr (typ_region_bytes ptr bits (hrs_htd hrs))" + apply (clarsimp simp: cvariable_array_map_relation_def h_t_array_valid_def) + apply (elim allE, erule impE, fastforce) + apply (subst valid_footprint_typ_region_bytes) + apply (simp add: uinfo_array_tag_n_m_def typ_uinfo_t_def typ_info_word) + apply (clarsimp simp: cnodes_retype_have_size_def field_simps pte_bits_power_add) + apply (erule allE, erule allE, erule impE, fastforce simp: in_omonad) + apply (clarsimp simp: pt_bits_def table_size_def) + apply (subst (asm) le_add_diff_inverse2, simp add: bit_simps cte_level_bits_def split: if_splits)+ + apply (clarsimp simp add: upto_intvl_eq[symmetric] field_simps) + apply (case_tac "p \ {ptr ..+ 2 ^ bits}") + apply (drule h_t_array_first_element_at[where p="Ptr p" and gd=c_guard for p, + unfolded h_t_array_valid_def, simplified]) + apply simp + apply (rule is_aligned_c_guard[where m=3], simp+) + apply clarsimp + apply (simp add: align_of_def) + apply (simp add: size_of_def power_add bit_simps split: if_split) + apply (simp add: bit_simps split: if_split) + apply (drule_tac x="pte_Ptr p" in fun_cong) + apply (simp add: liftt_if[folded hrs_htd_def] hrs_htd_update + h_t_valid_def valid_footprint_typ_region_bytes + split: if_split_asm) + apply (subgoal_tac "p \ {p ..+ size_of TYPE(pte_C)}") + apply simp + apply blast + apply (simp add: intvl_self) + apply (simp only: upto_intvl_eq mask_in_range[symmetric]) + apply (rule aligned_ranges_subset_or_disjoint_coroll; simp) + done + +lemma ccorres_typ_region_bytes_dummy: + "ccorresG rf_sr + AnyGamma dc xfdc + (invs' and ct_active' and sch_act_simple and + pspace_no_overlap' ptr bits and + (cnodes_retype_have_size S bits o gsCNodes) and + (\\. cnodes_retype_have_size S bits + (gsPTTypes (ksArchState \) ||> (\pt_t. ptBits pt_t-cte_level_bits))) + and K (bits < word_bits \ is_aligned ptr bits \ 4 \ bits + \ 0 \ {ptr..+2 ^ bits} + \ {ptr ..+ 2 ^ bits} \ S + \ kernel_data_refs \ {ptr..+2 ^ bits} = {})) + UNIV hs + (return ()) + (global_htd_update (\_. (typ_region_bytes ptr bits)))" + apply (rule ccorres_from_vcg) + apply (clarsimp simp: return_def) + apply (simp add: rf_sr_def) + apply vcg + apply (clarsimp simp: cstate_relation_def Let_def) + apply (frule typ_bytes_cpspace_relation_clift_tcb) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_pte) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_endpoint) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_notification) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_asid_pool) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_cte) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_userdata) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_devicedata) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_vcpu) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_gptr[where ptr'="armKSGlobalUserVSpace_Ptr"]) + apply (simp add: invs_pspace_aligned')+ + apply (frule typ_bytes_cpspace_relation_clift_gptr[where ptr'="intStateIRQNode_array_Ptr"]) + apply (simp add: invs_pspace_aligned')+ + apply (simp add: carch_state_relation_def cmachine_state_relation_def) + apply (simp add: cpspace_relation_def htd_safe_typ_region_bytes) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: hrs_htd_update gsCNodes_typ_region_bytes + cnodes_retype_have_size_mono[where T=S] + tcb_ctes_typ_region_bytes[OF _ _ invs_pspace_aligned'] + pte_typ_region_bytes o_def) + (* FIXME AARCH64 abstraction violation, need to know config_ARM_PA_SIZE_BITS_40 is False *) + apply (simp add: cmap_array_typ_region_bytes_triv invs_pspace_aligned' bit_simps + objBitsT_simps word_bits_def zero_ranges_are_zero_typ_region_bytes + Kernel_Config.config_ARM_PA_SIZE_BITS_40_def + cong: conj_cong) + apply (rule htd_safe_typ_region_bytes, simp (no_asm_simp), blast) + done + +lemma region_is_typeless_cong: + "t_hrs_' (globals t) = t_hrs_' (globals s) + \ region_is_typeless ptr sz s = region_is_typeless ptr sz t" + by (simp add:region_is_typeless_def) + +lemma region_is_bytes_cong: + "t_hrs_' (globals t) = t_hrs_' (globals s) + \ region_is_bytes ptr sz s = region_is_bytes ptr sz t" + by (simp add:region_is_bytes'_def) + +lemma insertNewCap_sch_act_simple[wp]: + "\sch_act_simple\insertNewCap a b c\\_. sch_act_simple\" + by (simp add:sch_act_simple_def,wp) + +lemma insertNewCap_ct_active'[wp]: + "\ct_active'\insertNewCap a b c\\_. ct_active'\" + apply (simp add:ct_in_state'_def) + apply (rule hoare_pre) + apply wps + apply (wp insertNewCap_ksCurThread | simp)+ + done + +lemma updateMDB_ctes_of_cap: + "\\s. (\x\ran(ctes_of s). P (cteCap x)) \ no_0 (ctes_of s)\ + updateMDB srcSlot t + \\r s. \x\ran (ctes_of s). P (cteCap x)\" + apply (rule hoare_pre) + apply wp + apply (clarsimp) + apply (erule ranE) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule_tac x = z in bspec) + apply fastforce + apply simp + apply (drule_tac x = x in bspec) + apply fastforce + apply simp + done + +lemma insertNewCap_caps_no_overlap'': +notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex +shows "\cte_wp_at' (\_. True) cptr and valid_pspace' + and caps_no_overlap'' ptr us + and K (cptr \ (0::machine_word)) and K (untypedRange x \ {ptr..(ptr && ~~ mask us) + 2 ^ us - 1} = {})\ + insertNewCap srcSlot cptr x + \\rv s. caps_no_overlap'' ptr us s\" + apply (clarsimp simp:insertNewCap_def caps_no_overlap''_def) + apply (rule hoare_pre) + apply (wp getCTE_wp updateMDB_ctes_of_cap) + apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def no_0_def split:if_splits) + apply (erule ranE) + apply (clarsimp split:if_splits) + apply (frule_tac c= "(cteCap xa)" and q = xb in caps_no_overlapD''[rotated]) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply clarsimp + apply blast + done + +lemma insertNewCap_caps_overlap_reserved': +notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex +shows "\cte_wp_at' (\_. True) cptr and valid_pspace' and caps_overlap_reserved' S + and valid_cap' x and K (cptr \ (0::machine_word)) and K (untypedRange x \ S = {})\ + insertNewCap srcSlot cptr x + \\rv s. caps_overlap_reserved' S s\" + apply (clarsimp simp:insertNewCap_def caps_overlap_reserved'_def) + apply (rule hoare_pre) + apply (wp getCTE_wp updateMDB_ctes_of_cap) + apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def no_0_def split:if_splits) + apply (erule ranE) + apply (clarsimp split:if_splits) + apply (drule usableRange_subseteq[rotated]) + apply (simp add:valid_cap'_def) + apply blast + apply (drule_tac p = xaa in caps_overlap_reserved'_D) + apply simp + apply simp + apply blast + done + +lemma insertNewCap_pspace_no_overlap': +notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex +shows "\pspace_no_overlap' ptr sz and pspace_aligned' + and pspace_distinct' and cte_wp_at' (\_. True) cptr\ + insertNewCap srcSlot cptr x + \\rv s. pspace_no_overlap' ptr sz s\" + apply (clarsimp simp:insertNewCap_def) + apply (rule hoare_pre) + apply (wp updateMDB_pspace_no_overlap' + setCTE_pspace_no_overlap' getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma insertNewCap_cte_at: + "\cte_at' p\ insertNewCap srcSlot q cap + \\rv. cte_at' p\" + apply (clarsimp simp:insertNewCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma createObject_invs': + "\\s. invs' s \ ct_active' s \ pspace_no_overlap' ptr (APIType_capBits ty us) s + \ caps_no_overlap'' ptr (APIType_capBits ty us) s \ ptr \ 0 \ + caps_overlap_reserved' {ptr..ptr + 2 ^ APIType_capBits ty us - 1} s \ + (ty = APIObjectType apiobject_type.CapTableObject \ 0 < us) \ + is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us \ maxUntypedSizeBits \ + canonical_address ptr \ + {ptr..ptr + 2 ^ APIType_capBits ty us - 1} \ kernel_data_refs = {} \ + 0 < gsMaxObjectSize s + \ createObject ty ptr us dev\\r s. invs' s \" + apply (simp add:createObject_def3) + apply (rule hoare_pre) + apply (wp createNewCaps_invs'[where sz = "APIType_capBits ty us"]) + apply (subgoal_tac "APIType_capBits ty us < word_bits") + apply (clarsimp simp: range_cover_full) + apply (fastforce simp: untypedBits_defs word_bits_def) + done + +lemma createObject_sch_act_simple[wp]: + "\\s. sch_act_simple s + \createObject ty ptr us dev\\r s. sch_act_simple s \" + apply (simp add:sch_act_simple_def) + apply wp + done + +lemma createObject_ct_active'[wp]: + "\\s. ct_active' s \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr (APIType_capBits ty us) s + \ is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits + \createObject ty ptr us dev\\r s. ct_active' s \" + apply (simp add:ct_in_state'_def createObject_def3) + apply (rule hoare_pre) + apply wp + apply wps + apply (wp createNewCaps_pred_tcb_at') + apply (intro conjI) + apply (auto simp:range_cover_full) + done + +lemma createObject_notZombie[wp]: + "\\\createObject ty ptr us dev \\r s. \ isZombie r\" + apply (rule hoare_pre) + apply (simp add:createObject_def) + apply wpc + apply (wp| clarsimp simp add:isCap_simps)+ + apply wpc + apply (wp| clarsimp simp add:isCap_simps)+ + done + +lemma createObject_valid_cap': + "\\s. pspace_no_overlap' ptr (APIType_capBits ty us) s \ + valid_pspace' s \ + is_aligned ptr (APIType_capBits ty us) \ canonical_address ptr \ + APIType_capBits ty us < word_bits \ + (ty = APIObjectType apiobject_type.CapTableObject \ 0 < us \ us \ 42) \ + (ty = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits) \ ptr \ 0\ + createObject ty ptr us dev \\r s. s \' r\" + apply (simp add:createObject_def3) + apply (rule hoare_pre) + apply wp + apply (rule_tac Q = "\r s. r \ [] \ Q r s" for Q in hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply clarsimp + apply (rule hoare_strengthen_post[OF createNewCaps_valid_cap'[where sz = "APIType_capBits ty us"]]) + apply assumption + apply clarsimp + apply (clarsimp simp add:word_bits_conv range_cover_full) + apply (cases ty; simp add: APIType_capBits_def maxUntypedSizeBits_def bit_simps split: if_split) + apply (rename_tac t, case_tac t; simp add: objBits_simps') + done + +lemma createObject_untypedRange: + assumes split: + "\P\ createObject ty ptr us dev + \\m s. (toAPIType ty = Some apiobject_type.Untyped \ + Q {ptr..ptr + 2 ^ us - 1} s) \ + (toAPIType ty \ Some apiobject_type.Untyped \ Q {} s)\" + shows "\P\ createObject ty ptr us dev\\m s. Q (untypedRange m) s\" + including no_pre + using split + apply (simp add: createObject_def) + apply (case_tac "toAPIType ty") + apply (simp add: split | wp)+ + apply (simp add: valid_def return_def bind_def split_def) + apply (case_tac a, simp_all) + apply (simp add: valid_def return_def simpler_gets_def simpler_modify_def + bind_def split_def curDomain_def)+ + done + +lemma createObject_capRange: +shows "\P\createObject ty ptr us dev \\m s. capRange m = {ptr.. ptr + 2 ^ (APIType_capBits ty us) - 1}\" + apply (simp add:createObject_def) + apply (case_tac "ty") + apply (simp_all add:toAPIType_def AARCH64_H.toAPIType_def) + apply (rule hoare_pre) + apply wpc + apply wp + apply (simp add:split untypedRange.simps objBits_simps capRange_def APIType_capBits_def | wp)+ + apply (wpsimp simp: AARCH64_H.createObject_def capRange_def APIType_capBits_def + bit_simps acapClass.simps + | solves \simp split: if_split\)+ + done + +lemma createObject_capRange_helper: +assumes static: "\P\createObject ty ptr us dev \\m s. Q {ptr.. ptr + 2 ^ (APIType_capBits ty us) - 1} s\" +shows "\P\createObject ty ptr us dev \\m s. Q (capRange m) s\" + apply (rule hoare_pre) + apply (rule hoare_strengthen_post[OF hoare_vcg_conj_lift]) + apply (rule static) + apply (rule createObject_capRange) + apply simp + apply simp + done + +lemma createObject_caps_overlap_reserved': + "\\s. caps_overlap_reserved' S s \ + pspace_aligned' s \ + pspace_distinct' s \ pspace_no_overlap' ptr (APIType_capBits ty us) s \ + is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits + \createObject ty ptr us dev \\rv. caps_overlap_reserved' S\" + apply (simp add:createObject_def3) + apply (wp createNewCaps_caps_overlap_reserved'[where sz = "APIType_capBits ty us"]) + apply (clarsimp simp:range_cover_full) + done + +lemma createObject_caps_overlap_reserved_ret': + "\\s. caps_overlap_reserved' {ptr..ptr + 2 ^ APIType_capBits ty us - 1} s \ + pspace_aligned' s \ + pspace_distinct' s \ pspace_no_overlap' ptr (APIType_capBits ty us) s \ + is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits + \createObject ty ptr us dev \\rv. caps_overlap_reserved' (untypedRange rv)\" + apply (simp add:createObject_def3) + apply (rule hoare_pre) + apply wp + apply (rule_tac Q = "\r s. r \ [] \ Q r s" for Q in hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply clarsimp + apply (rule hoare_strengthen_post[OF createNewCaps_caps_overlap_reserved_ret'[where sz = "APIType_capBits ty us"]]) + apply assumption + apply (rename_tac rv s) + apply (case_tac rv,simp) + apply clarsimp + apply (erule caps_overlap_reserved'_subseteq) + apply (rule untypedRange_in_capRange) + apply (clarsimp simp add:word_bits_conv range_cover_full) + done + +lemma createObject_descendants_range': + "\\s. descendants_range_in' {ptr..ptr + 2 ^ APIType_capBits ty us - 1} q (ctes_of s) \ + pspace_aligned' s \ + pspace_distinct' s \ pspace_no_overlap' ptr (APIType_capBits ty us) s \ + is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits + \createObject ty ptr us dev \\rv s. descendants_range' rv q (ctes_of s)\" + apply (simp add:createObject_def3) + apply (rule hoare_pre) + apply wp + apply (rule_tac Q = "\r s. r \ [] \ Q r s" for Q in hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply clarsimp + apply (rule hoare_strengthen_post[OF createNewCaps_descendants_range_ret'[where sz = "APIType_capBits ty us"]]) + apply assumption + apply fastforce + apply (clarsimp simp add:word_bits_conv range_cover_full) + done + +lemma createObject_descendants_range_in': + "\\s. descendants_range_in' S q (ctes_of s) \ + pspace_aligned' s \ + pspace_distinct' s \ pspace_no_overlap' ptr (APIType_capBits ty us) s \ + is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits + \createObject ty ptr us dev \\rv s. descendants_range_in' S q (ctes_of s)\" + apply (simp add:createObject_def3 descendants_range_in'_def2) + apply (wp createNewCaps_null_filter') + apply clarsimp + apply (intro conjI) + apply simp + apply (simp add:range_cover_full) + done + +lemma createObject_idlethread_range: + "\\s. is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits + \ ksIdleThread s \ {ptr..ptr + 2 ^ (APIType_capBits ty us) - 1}\ + createObject ty ptr us dev \\cap s. ksIdleThread s \ capRange cap\" + apply (simp add:createObject_def3) + apply (rule hoare_pre) + apply wp + apply (rule_tac Q = "\r s. r \ [] \ Q r s" for Q in hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply clarsimp + apply (rule hoare_strengthen_post[OF createNewCaps_idlethread_ranges[where sz = "APIType_capBits ty us"]]) + apply assumption + apply clarsimp + apply (clarsimp simp:word_bits_conv range_cover_full) + done + +lemma caps_overlap_reserved_empty'[simp]: + "caps_overlap_reserved' {} s = True" + by (simp add:caps_overlap_reserved'_def) + +lemma createObject_IRQHandler: + "\\\ createObject ty ptr us dev + \\rv s. rv = IRQHandlerCap x \ P rv s x\" + apply (simp add:createObject_def3) + apply (rule hoare_pre) + apply wp + apply (rule_tac Q = "\r s. r \ [] \ Q r s" for Q in hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply clarsimp + apply (rule hoare_strengthen_post[OF createNewCaps_IRQHandler[where irq = x and P = "\_ _. False"]]) + apply assumption + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) + apply (clarsimp simp:word_bits_conv) + done + +lemma createObject_capClass[wp]: + "\ \s. is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits + \ createObject ty ptr us dev + \\rv s. capClass rv = PhysicalClass\" + apply (simp add:createObject_def3) + apply (rule hoare_pre) + apply wp + apply (rule_tac Q = "\r s. r \ [] \ Q r s" for Q in hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply clarsimp + apply (rule hoare_strengthen_post[OF createNewCaps_range_helper]) + apply assumption + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) + apply (clarsimp simp:word_bits_conv ) + apply (rule range_cover_full) + apply (simp add:word_bits_conv)+ + done + +lemma createObject_child: + "\\s. + is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits \ + {ptr .. ptr + (2^APIType_capBits ty us) - 1} \ (untypedRange cap) \ isUntypedCap cap + \ createObject ty ptr us dev + \\rv s. sameRegionAs cap rv\" + apply (rule hoare_assume_pre) + apply (simp add:createObject_def3) + apply wp + apply (rule hoare_chain [OF createNewCaps_range_helper[where sz = "APIType_capBits ty us"]]) + apply (fastforce simp:range_cover_full) + apply clarsimp + apply (drule_tac x = ptr in spec) + apply (case_tac "(capfn ptr)") + apply (simp_all add: capUntypedPtr_def sameRegionAs_def Let_def isCap_simps)+ + apply clarsimp+ + apply (rename_tac arch_capability d v0 v1 f) + apply (simp add: AARCH64_H.capUntypedSize_def bit_simps)+ + apply (auto simp: AARCH64_H.capUntypedSize_def add.commute[where b=ptr] + is_aligned_no_wrap' APIType_capBits_def bit_simps' + split: arch_capability.split if_split)+ + done + +lemma createObject_parent_helper: + "\\s. cte_wp_at' (\cte. isUntypedCap (cteCap cte) + \ {ptr .. ptr + (2^APIType_capBits ty us) - 1} \ untypedRange (cteCap cte)) p s \ + pspace_aligned' s \ + pspace_distinct' s \ + pspace_no_overlap' ptr (APIType_capBits ty us) s \ + is_aligned ptr (APIType_capBits ty us) \ APIType_capBits ty us < word_bits \ + (ty = APIObjectType apiobject_type.CapTableObject \ 0 < us) + \ + createObject ty ptr us dev + \\rv. cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ (sameRegionAs (cteCap cte) rv)) p\" + apply (rule hoare_post_imp [where Q="\rv s. \cte. cte_wp_at' ((=) cte) p s + \ isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) rv"]) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (wp hoare_vcg_ex_lift) + apply (rule hoare_vcg_conj_lift) + apply (simp add:createObject_def3) + apply (wp createNewCaps_cte_wp_at') + apply (wp createObject_child) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI) + apply (erule range_cover_full) + apply simp + apply simp + done + +lemma insertNewCap_untypedRange: + "\\s. cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ P untypedRange (cteCap cte)) srcSlot s\ + insertNewCap srcSlot destSlot x + \\rv s. cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ P untypedRange (cteCap cte)) srcSlot s\" + apply (simp add:insertNewCap_def) + apply (wp updateMDB_weak_cte_wp_at setCTE_cte_wp_at_other getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma createObject_caps_no_overlap'': + " \\s. caps_no_overlap'' (ptr + (1 + of_nat n << APIType_capBits newType userSize)) + sz s \ + pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' (ptr + (of_nat n << APIType_capBits newType userSize)) (APIType_capBits newType userSize) s + \ is_aligned ptr (APIType_capBits newType userSize) + \ APIType_capBits newType userSize < word_bits\ + createObject newType (ptr + (of_nat n << APIType_capBits newType userSize)) userSize dev + \\rv s. caps_no_overlap'' (ptr + (1 + of_nat n << APIType_capBits newType userSize)) + sz s \" + apply (clarsimp simp:createObject_def3 caps_no_overlap''_def2) + apply (wp createNewCaps_null_filter') + apply clarsimp + apply (intro conjI) + apply simp + apply (rule range_cover_full) + apply (erule aligned_add_aligned) + apply (rule is_aligned_shiftl_self) + apply simp + apply simp + done + +lemma createObject_ex_cte_cap_wp_to: + "\\s. ex_cte_cap_wp_to' P p s \ is_aligned ptr (APIType_capBits ty us) \ pspace_aligned' s + \ pspace_distinct' s \ (APIType_capBits ty us) < word_bits \ pspace_no_overlap' ptr (APIType_capBits ty us) s \ + createObject ty ptr us dev + \\rv s. ex_cte_cap_wp_to' P p s \" + apply (clarsimp simp:ex_cte_cap_wp_to'_def createObject_def3) + apply (rule hoare_pre) + apply (wp hoare_vcg_ex_lift) + apply wps + apply (wp createNewCaps_cte_wp_at') + apply clarsimp + apply (intro exI conjI) + apply assumption + apply (rule range_cover_full) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply simp + apply simp + done + +lemma range_cover_one: + "\is_aligned (ptr :: 'a :: len word) us; us\ sz;sz < len_of TYPE('a)\ + \ range_cover ptr sz us (Suc 0)" + apply (clarsimp simp:range_cover_def) + apply (rule Suc_leI) + apply (rule unat_less_power) + apply simp + apply (rule shiftr_less_t2n) + apply simp + apply (rule le_less_trans[OF word_and_le1]) + apply (simp add:mask_def) + done + +lemma createObject_no_inter: +notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex +shows + "\\s. range_cover ptr sz (APIType_capBits newType userSize) (n + 2) \ ptr \ 0\ + createObject newType (ptr + (of_nat n << APIType_capBits newType userSize)) userSize dev + \\rv s. untypedRange rv \ + {ptr + (1 + of_nat n << APIType_capBits newType userSize) .. + ptrend } = + {}\" + apply (rule createObject_untypedRange) + apply (clarsimp | wp)+ + apply (clarsimp simp: blah toAPIType_def APIType_capBits_def + AARCH64_H.toAPIType_def split: object_type.splits) + apply (clarsimp simp:shiftl_t2n field_simps) + apply (drule word_eq_zeroI) + apply (drule(1) range_cover_no_0[where p = "Suc n"]) + apply simp + apply (simp add:field_simps) + done + +lemma range_cover_bound'': + "\range_cover ptr sz us n; x < of_nat n\ + \ ptr + x * 2 ^ us + 2 ^ us - 1 \ (ptr && ~~ mask sz) + 2 ^ sz - 1" + apply (frule range_cover_cell_subset) + apply assumption + apply (drule(1) range_cover_subset_not_empty) + apply (clarsimp simp: field_simps) + done + +lemma caps_no_overlap''_cell: + "\range_cover ptr sz us n;caps_no_overlap'' ptr sz s;p < n\ + \ caps_no_overlap'' (ptr + (of_nat p << us)) us s" + apply (clarsimp simp:caps_no_overlap''_def) + apply (drule(1) bspec) + apply (subgoal_tac "{ptr + (of_nat p << us)..(ptr + (of_nat p << us) && ~~ mask us) + 2 ^ us - 1} + \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1}") + apply (erule impE) + apply (rule ccontr) + apply clarify + apply (drule(1) disjoint_subset2[rotated -1]) + apply simp + apply (erule subsetD)+ + apply simp + apply (subst is_aligned_neg_mask_eq) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (simp add:is_aligned_shiftl_self) + apply (simp add:range_cover_sz') + apply simp + apply (frule range_cover_cell_subset[where x = "of_nat p"]) + apply (rule word_of_nat_less) + apply (simp add:range_cover.unat_of_nat_n) + apply (simp add:shiftl_t2n field_simps) + done + +lemma caps_no_overlap''_le: + "\caps_no_overlap'' ptr sz s;us \ sz;sz < word_bits\ + \ caps_no_overlap'' ptr us s" + apply (clarsimp simp:caps_no_overlap''_def) + apply (drule(1) bspec) + apply (subgoal_tac "{ptr..(ptr && ~~ mask us) + 2 ^ us - 1} + \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1}") + apply (erule impE) + apply (rule ccontr) + apply clarify + apply (drule(1) disjoint_subset2[rotated -1]) + apply simp + apply (erule subsetD)+ + apply simp + apply clarsimp + apply (frule neg_mask_diff_bound[where ptr = ptr]) + apply (simp add:p_assoc_help) + apply (rule word_plus_mcs[where x = "2 ^ us - 1 + (ptr && ~~ mask sz)"]) + apply (simp add:field_simps) + apply (simp add:field_simps) + apply (simp add:p_assoc_help) + apply (rule word_plus_mono_right) + apply (simp add: word_bits_def) + apply (erule two_power_increasing) + apply simp + apply (rule is_aligned_no_overflow') + apply (simp add:is_aligned_neg_mask) + done + +lemma caps_no_overlap''_le2: + "\caps_no_overlap'' ptr sz s;ptr \ ptr'; ptr' && ~~ mask sz = ptr && ~~ mask sz\ + \ caps_no_overlap'' ptr' sz s" + apply (clarsimp simp:caps_no_overlap''_def) + apply (drule(1) bspec) + apply (subgoal_tac "{ptr'..(ptr' && ~~ mask sz) + 2 ^ sz - 1} + \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1}") + apply (erule impE) + apply (rule ccontr) + apply clarify + apply (drule(1) disjoint_subset2[rotated -1]) + apply simp + apply (erule subsetD)+ + apply simp + apply clarsimp + done + +lemma range_cover_head_mask: + "\range_cover (ptr :: machine_word) sz us (Suc n); ptr \ 0\ + \ ptr + (of_nat n << us) && ~~ mask sz = ptr && ~~ mask sz" + apply (case_tac n) + apply clarsimp + apply (clarsimp simp:range_cover_tail_mask) + done + +lemma pspace_no_overlap'_strg: + "pspace_no_overlap' ptr sz s \ sz' \ sz \ sz < word_bits \ pspace_no_overlap' ptr sz' s" + apply clarsimp + apply (erule(2) pspace_no_overlap'_le) + done + +lemma cte_wp_at_no_0: + "\invs' s; cte_wp_at' (\_. True) ptr s\ \ ptr \ 0" + by (clarsimp dest!:invs_mdb' simp:valid_mdb'_def valid_mdb_ctes_def no_0_def cte_wp_at_ctes_of) + +lemma insertNewCap_descendants_range_in': + "\\s. valid_pspace' s \ descendants_range_in' S p (ctes_of s) + \ capRange x \ S = {} + \ cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ sameRegionAs (cteCap cte) x) p s + \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) dslot s + \ descendants_range' x p (ctes_of s) \ capClass x = PhysicalClass + \ insertNewCap p dslot x + \\rv s. descendants_range_in' S p (ctes_of s)\" + apply (clarsimp simp:insertNewCap_def descendants_range_in'_def) + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI allI) + apply (clarsimp simp:valid_pspace'_def valid_mdb'_def + valid_mdb_ctes_def no_0_def split:if_splits) + apply (clarsimp simp: descendants_of'_mdbPrev split:if_splits) + apply (cut_tac p = p and m = "ctes_of s" and parent = p and s = s + and parent_cap = "cteCap cte" and parent_node = "cteMDBNode cte" + and site = dslot and site_cap = capability.NullCap and site_node = "cteMDBNode ctea" + and c' = x + in mdb_insert_again_child.descendants) + apply (case_tac cte ,case_tac ctea) + apply (rule mdb_insert_again_child.intro[OF mdb_insert_again.intro]) + apply (simp add:mdb_ptr_def vmdb_def valid_pspace'_def valid_mdb'_def + mdb_ptr_axioms_def mdb_insert_again_axioms_def )+ + apply (intro conjI allI impI) + apply clarsimp + apply (erule(1) ctes_of_valid_cap') + apply (clarsimp simp:valid_mdb_ctes_def) + apply clarsimp + apply (rule mdb_insert_again_child_axioms.intro) + apply (clarsimp simp: nullPointer_def)+ + apply (clarsimp simp:isMDBParentOf_def valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def) + apply (frule(2) ut_revocableD'[rotated 1]) + apply (clarsimp simp:isCap_simps) + apply (clarsimp cong: if_cong) + done + +lemma insertNewCap_cte_wp_at_other: + "\cte_wp_at' (\cte. P (cteCap cte)) p and K (slot \ p)\ insertNewCap srcSlot slot x + \\rv. cte_wp_at' (\cte. P (cteCap cte)) p \" + apply (clarsimp simp:insertNewCap_def) + apply (wp updateMDB_weak_cte_wp_at setCTE_cte_wp_at_other getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma range_cover_bound3: + "\range_cover ptr sz us n; x < of_nat n\ + \ ptr + x * 2 ^ us + 2 ^ us - 1 \ ptr + (of_nat n) * 2 ^ us - 1" + apply (frule range_cover_subset[where p = "unat x"]) + apply (simp add:unat_less_helper) + apply (rule ccontr,simp) + apply (drule(1) range_cover_subset_not_empty) + apply (clarsimp simp: field_simps) + done + +lemma range_cover_gsMaxObjectSize: + "cte_wp_at' (\cte. cteCap cte = UntypedCap dev (ptr &&~~ mask sz) sz idx) srcSlot s + \ range_cover ptr sz (APIType_capBits newType userSize) (length destSlots) + \ valid_global_refs' s + \ unat num = length destSlots + \ unat (num << (APIType_capBits newType userSize) :: machine_word) \ gsMaxObjectSize s + \ 2 ^ APIType_capBits newType userSize \ gsMaxObjectSize s" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_global_refsD_with_objSize) + apply clarsimp + apply (rule conjI) + apply (frule range_cover.range_cover_compare_bound) + apply (drule range_cover.unat_of_nat_n_shift, rule order_refl) + apply (drule_tac s="unat num" in sym) + apply simp + apply (clarsimp simp: range_cover_def) + apply (erule order_trans[rotated]) + apply simp + done + +lemma APIType_capBits_min: + "(tp' = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ userSize) + \ 4 \ APIType_capBits tp' userSize" + by (simp add: APIType_capBits_def objBits_simps' bit_simps untypedBits_defs + split: object_type.split ArchTypes_H.apiobject_type.split if_split) + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma createNewCaps_1_gsCNodes_p: + "\\s. P (gsCNodes s p) \ p \ ptr\ createNewCaps newType ptr 1 n dev\\rv s. P (gsCNodes s p)\" + apply (simp add: createNewCaps_def) + apply (rule hoare_pre) + apply (wp mapM_x_wp' | wpc | simp add: createObjects_def)+ + done + +lemma createObject_gsCNodes_p: + "\\s. P (gsCNodes s p) \ p \ ptr\ createObject t ptr sz dev\\rv s. P (gsCNodes s p)\" + apply (simp add: createObject_def) + apply (rule hoare_pre) + apply (wp mapM_x_wp' | wpc | simp add: createObjects_def)+ + done + +lemma createObject_cnodes_have_size: + "\\s. is_aligned ptr (APIType_capBits newType userSize) + \ cnodes_retype_have_size R (APIType_capBits newType userSize) (gsCNodes s)\ + createObject newType ptr userSize dev + \\rv s. cnodes_retype_have_size R (APIType_capBits newType userSize) (gsCNodes s)\" + apply (simp add: createObject_def) + apply (rule hoare_pre) + apply (wp mapM_x_wp' | wpc | simp add: createObjects_def)+ + apply (cases newType, simp_all add: AARCH64_H.toAPIType_def) + apply (clarsimp simp: APIType_capBits_def objBits_simps' + cnodes_retype_have_size_def cte_level_bits_def + split: if_split_asm) + done + +crunches placeNewDataObject + for ksArchState[wp]: "\s. P (ksArchState s)" + (simp: crunch_simps) + +lemma createObject_cnodes_have_size_pt[unfolded o_def]: + "\\s. is_aligned ptr (APIType_capBits newType userSize) + \ cnodes_retype_have_size R (APIType_capBits newType userSize) (gsPTTypes (ksArchState s) ||> (\pt_t. pt_bits pt_t - cte_level_bits))\ + createObject newType ptr userSize dev + \\rv s. cnodes_retype_have_size R (APIType_capBits newType userSize) (gsPTTypes (ksArchState s) ||> (\pt_t. pt_bits pt_t - cte_level_bits))\" + supply fun_upd_apply[simp del] fun_upd_def[symmetric, simp] + apply (simp add: createObject_def) + apply (rule hoare_pre) + apply (wp mapM_x_wp' | wpc | simp add: createObjects_def AARCH64_H.createObject_def updatePTType_def)+ + apply (cases newType, simp_all add: AARCH64_H.toAPIType_def o_def) + apply (rule conjI, clarsimp)+ + apply clarsimp + supply fun_upd_def[symmetric, simp del] fun_upd_apply[simp] + apply (clarsimp simp: APIType_capBits_def cnodes_retype_have_size_def bit_simps cte_level_bits_def + split: if_split_asm)+ + done + +lemma range_cover_not_in_neqD: + "\ x \ {ptr..ptr + (of_nat n << APIType_capBits newType userSize) - 1}; + range_cover ptr sz (APIType_capBits newType userSize) n; n' < n \ + \ x \ ptr + (of_nat n' << APIType_capBits newType userSize)" + apply (clarsimp simp only: shiftl_t2n mult.commute) + apply (erule notE, rule subsetD, erule_tac p=n' in range_cover_subset) + apply simp+ + apply (rule is_aligned_no_overflow) + apply (rule aligned_add_aligned) + apply (erule range_cover.aligned) + apply (simp add: is_aligned_mult_triv2) + apply simp + done + +crunch gsMaxObjectSize[wp]: createObject "\s. P (gsMaxObjectSize s)" + (simp: crunch_simps unless_def wp: crunch_wps) + +end + +context kernel_m begin + +lemma ceqv_restore_as_guard: + "ceqv Gamma xf' rv' t t' d (Guard C_Guard {s. xf' s = rv'} d)" + apply (simp add: ceqv_def) + apply (auto elim!: exec_Normal_elim_cases intro: exec.Guard) + done + +lemma insertNewCap_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call insertNewCap_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ byte_regions_unmodified' s t}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply (rule allI, rule conseqPre, vcg exspec=mdb_node_ptr_set_mdbPrev_preserves_bytes + exspec=mdb_node_ptr_set_mdbNext_preserves_bytes + exspec=mdb_node_get_mdbNext_modifies exspec=mdb_node_new_modifies) + apply (safe intro!: byte_regions_unmodified_hrs_mem_update + elim!: byte_regions_unmodified_trans byte_regions_unmodified_trans[rotated], + simp_all add: h_t_valid_field) + done + +lemma byte_regions_unmodified_flip_eq: + "byte_regions_unmodified hrs' hrs + \ hrs_htd hrs' = hrs_htd hrs + \ byte_regions_unmodified hrs hrs'" + by (simp add: byte_regions_unmodified_def) + +lemma insertNewCap_preserves_bytes_flip: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call insertNewCap_'proc + {t. hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ byte_regions_unmodified' t s}" + by (rule allI, rule conseqPost, + rule insertNewCap_preserves_bytes[rule_format], + auto elim: byte_regions_unmodified_flip_eq) + +lemma hrs_htd_update_canon: + "hrs_htd_update (\_. f (hrs_htd hrs)) hrs = hrs_htd_update f hrs" + by (cases hrs, simp add: hrs_htd_update_def hrs_htd_def) + +lemma Arch_createObject_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call Arch_createObject_'proc + {t. \nt. t_' s = object_type_from_H nt + \ (\x \ - {ptr_val (regionBase_' s) ..+ 2 ^ getObjectSize nt (unat (userSize_' s))}. + hrs_htd (t_hrs_' (globals t)) x = hrs_htd (t_hrs_' (globals s)) x) + \ byte_regions_unmodified' t s}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply clarsimp + apply (rule conseqPre, + vcg exspec=cap_frame_cap_new_modifies + exspec=cap_page_table_cap_new_modifies + exspec=addrFromPPtr_modifies + exspec=cap_vcpu_cap_new_modifies + ) + apply (safe intro!: byte_regions_unmodified_hrs_mem_update, + (simp_all add: h_t_valid_field hrs_htd_update)+) + apply (safe intro!: ptr_retyp_d ptr_retyps_out) + apply (simp_all add: object_type_from_H_def Kernel_C_defs APIType_capBits_def + bit_simps + split: object_type.split_asm ArchTypes_H.apiobject_type.split_asm) + apply (simp add: Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) (* FIXME AARCH64: from bit_simps above *) + apply (drule intvlD) + apply clarsimp + apply (erule notE, rule intvlI) + apply (simp add: vcpuBits_def pageBits_def) + done + +lemma ptr_arr_retyps_eq_outside_dom: + "x \ {ptr_val (p :: 'a ptr) ..+ n * size_of TYPE ('a :: wf_type)} + \ ptr_arr_retyps n p htd x = htd x" + by (simp add: ptr_arr_retyps_def htd_update_list_same2) + +context begin + +private abbreviation + "preserves_bytes_inv P s \ + {t. P s \ P t \ hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s)) + \ byte_regions_unmodified' t s}" + +private lemma preserves_bytes_modifies_inv_prop: + "modifies_inv_prop (preserves_bytes_inv P)" + by (clarsimp simp: modifies_inv_prop_def modifies_inv_refl_def modifies_inv_incl_def + byte_regions_unmodified_def) + +private abbreviation + "registers_Ptr_valid \ \s. s \\<^sub>c registers_Ptr &(context_' s\[''registers_C''])" + +private lemmas registers_modifies_inv_intros = + modifies_inv_intros[OF preserves_bytes_modifies_inv_prop[where P="registers_Ptr_valid"]] + +private method preserves_bytes_inv methods vcg = + (hoare_rule HoarePartial.ProcNoRec1; + intro allI registers_modifies_inv_intros; + clarsimp; + (rule conseqPre, vcg); + clarsimp; + rule byte_regions_unmodified_hrs_mem_update; + clarsimp simp: typ_heap_simps) + +lemma Arch_initContext_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call Arch_initContext_'proc (preserves_bytes_inv registers_Ptr_valid s)" + by (preserves_bytes_inv \vcg\) + +end + +lemma createObject_preserves_bytes: + "\s. \\\<^bsub>/UNIV\<^esub> {s} Call createObject_'proc + {t. \nt. t_' s = object_type_from_H nt + \ (\x \ - {ptr_val (regionBase_' s) ..+ 2 ^ getObjectSize nt (unat (userSize_' s))}. + hrs_htd (t_hrs_' (globals t)) x = hrs_htd (t_hrs_' (globals s)) x) + \ byte_regions_unmodified' t s}" + apply (hoare_rule HoarePartial.ProcNoRec1) + apply clarsimp + apply (rule conseqPre, + vcg exspec=Arch_createObject_preserves_bytes + exspec=cap_thread_cap_new_modifies + exspec=cap_endpoint_cap_new_modifies + exspec=cap_notification_cap_new_modifies + exspec=cap_cnode_cap_new_modifies + exspec=cap_untyped_cap_new_modifies + exspec=Arch_initContext_preserves_bytes) + apply (safe intro!: byte_regions_unmodified_hrs_mem_update, + simp_all add: h_t_valid_field hrs_htd_update) + apply (safe intro!: ptr_retyp_d ptr_retyps_out trans[OF ptr_retyp_d ptr_retyp_d] + ptr_arr_retyps_eq_outside_dom) + apply (simp_all add: object_type_from_H_def Kernel_C_defs APIType_capBits_def + objBits_simps' cte_C_size power_add ctcb_offset_def ctcb_size_bits_def + byte_regions_unmodified_def + split: object_type.split_asm ArchTypes_H.apiobject_type.split_asm) + apply (erule notE, erule subsetD[rotated], rule intvl_start_le intvl_sub_offset, simp)+ + done + +lemma offset_intvl_first_chunk_subsets: + "range_cover (p :: addr) sz bits n + \ i < of_nat n + \ {p + (i << bits) ..+ 2 ^ bits} \ {p + (i << bits) ..+ (n - unat i) * 2 ^ bits} + \ {p + ((i + 1) << bits) ..+ (n - unat (i + 1)) * 2 ^ bits} + \ {p + (i << bits) ..+ (n - unat i) * 2 ^ bits} + \ {p + (i << bits) ..+ 2 ^ bits} + \ {p + ((i + 1) << bits) ..+ (n - unat (i + 1)) * 2 ^ bits} + = {}" + apply (strengthen intvl_start_le) + apply (strengthen order_trans[OF _ + intvl_sub_offset[where x="2 ^ bits" and y="(n - unat (i + 1)) * 2 ^ bits"]]) + apply (frule range_cover_sz') + apply (cut_tac n=i in unatSuc) + apply unat_arith + apply (simp add: word_shiftl_add_distrib field_simps TWO) + apply (simp add: mult_Suc[symmetric] del: mult_Suc) + apply (frule unat_less_helper) + apply (cut_tac p="p + (i << bits)" and k="2 ^ bits" + and z="(n - unat (i + 1)) * 2 ^ bits" in init_intvl_disj) + apply (simp add: field_simps) + apply (drule range_cover.strong_times_64, simp) + apply (simp add: addr_card_def word_bits_def card_word) + apply (erule order_le_less_trans[rotated]) + apply (simp add: mult_Suc[symmetric] del: mult_Suc) + apply (simp add: Int_commute field_simps) + apply unat_arith + done + +lemma offset_intvl_first_chunk_subsets_unat: + "range_cover (p :: addr) sz bits n + \ unat n' = n + \ i < of_nat n + \ {p + (i << bits) ..+ 2 ^ bits} \ {p + (i << bits) ..+ unat (n' - i) * 2 ^ bits} + \ {p + ((i + 1) << bits) ..+ unat (n' - (i + 1)) * 2 ^ bits} + \ {p + (i << bits) ..+ unat (n' - i) * 2 ^ bits} + \ {p + (i << bits) ..+ 2 ^ bits} + \ {p + ((i + 1) << bits) ..+ unat (n' - (i + 1)) * 2 ^ bits} + = {}" + apply (subgoal_tac "unat (n' - (i + 1)) = unat n' - unat (i + 1) + \ unat (n' - i) = unat n' - unat i") + apply (frule(1) offset_intvl_first_chunk_subsets) + apply simp + apply (intro conjI unat_sub) + apply (rule word_minus_one_le_leq, simp) + apply (simp add: word_less_nat_alt unat_of_nat) + apply (simp add: word_le_nat_alt word_less_nat_alt unat_of_nat) + done + +lemma retype_offs_region_actually_is_zero_bytes: + "\ ctes_of s p = Some cte; (s, s') \ rf_sr; untyped_ranges_zero' s; + cteCap cte = UntypedCap False (ptr &&~~ mask sz) sz idx; + idx \ unat (ptr && mask sz); + range_cover ptr sz (getObjectSize newType userSize) num_ret \ + \ region_actually_is_zero_bytes ptr + (num_ret * 2 ^ APIType_capBits newType userSize) s'" + using word_unat_mask_lt[where w=ptr and m=sz] + apply - + apply (frule range_cover.sz(1)) + apply (drule(2) ctes_of_untyped_zero_rf_sr) + apply (simp add: untypedZeroRange_def max_free_index_def word_size) + apply clarify + apply (strengthen heap_list_is_zero_mono2[mk_strg I E] + region_actually_is_bytes_subset[mk_strg I E]) + apply (simp add: getFreeRef_def word_size) + apply (rule intvl_both_le) + apply (rule order_trans, rule word_plus_mono_right, erule word_of_nat_le) + apply (simp add: word_plus_and_or_coroll2 add.commute word_and_le2) + apply (simp add: word_plus_and_or_coroll2 add.commute) + apply (subst unat_plus_simple[THEN iffD1], rule is_aligned_no_wrap', + rule is_aligned_neg_mask2) + apply (rule word_of_nat_less, simp) + apply (simp add: unat_of_nat_eq[OF order_less_trans, OF _ power_strict_increasing[where n=sz]] + unat_sub[OF word_of_nat_le]) + apply (subst word_plus_and_or_coroll2[where x=ptr and w="mask sz", symmetric]) + apply (subst unat_plus_simple[THEN iffD1], + simp add: word_plus_and_or_coroll2 add.commute word_and_le2) + apply simp + apply (rule order_trans[rotated], erule range_cover.range_cover_compare_bound) + apply simp + done + +lemma createNewCaps_valid_cap_hd: + "\\s. pspace_no_overlap' ptr sz s \ + valid_pspace' s \ n \ 0 \ + sz \ maxUntypedSizeBits \ canonical_address ptr \ + range_cover ptr sz (APIType_capBits ty us) n \ + (ty = APIObjectType ArchTypes_H.CapTableObject \ 0 < us) \ + (ty = APIObjectType ArchTypes_H.apiobject_type.Untyped \ + minUntypedSizeBits \ us \ us \ maxUntypedSizeBits) \ + ptr \ 0 \ + createNewCaps ty ptr n us dev + \\r s. s \' hd r\" + apply (cases "n = 0") + apply simp + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createNewCaps_ret_len) + apply (rule createNewCaps_valid_cap'[where sz=sz]) + apply (clarsimp simp: range_cover_n_wb canonical_address_and) + apply simp + done + +lemma insertNewCap_ccorres: + "ccorres dc xfdc + (pspace_aligned' and pspace_canonical' and valid_mdb' and cte_wp_at' (\_. True) slot + and valid_objs' and valid_cap' cap) + ({s. cap_get_tag (cap_' s) = scast cap_untyped_cap + \ (case untypedZeroRange (cap_to_H (the (cap_lift (cap_' s)))) of None \ True + | Some (a, b) \ region_actually_is_zero_bytes a (unat ((b + 1) - a)) s)} + \ {s. ccap_relation cap (cap_' s)} \ {s. parent_' s = Ptr parent} + \ {s. slot_' s = Ptr slot}) [] + (insertNewCap parent slot cap) + (Call insertNewCap_'proc)" + (is "ccorres _ _ ?P ?P' _ _ _") + apply (rule ccorres_guard_imp2, rule insertNewCap_ccorres1) + apply (clarsimp simp: cap_get_tag_isCap) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (simp add: untypedZeroRange_def Let_def) + done + +lemma createObject_untyped_region_is_zero_bytes: + "\\. \\\<^bsub>/UNIV\<^esub> {s. let tp = (object_type_to_H (t_' s)); + sz = APIType_capBits tp (unat (userSize_' s)) + in (\ to_bool (deviceMemory_' s) + \ region_actually_is_zero_bytes (ptr_val (regionBase_' s)) (2 ^ sz) s) + \ canonical_address (ptr_val (regionBase_' s)) + \ sz < 64 \ (tp = APIObjectType ArchTypes_H.apiobject_type.Untyped \ sz \ minUntypedSizeBits)} + Call createObject_'proc + {t. cap_get_tag (ret__struct_cap_C_' t) = scast cap_untyped_cap + \ (case untypedZeroRange (cap_to_H (the (cap_lift (ret__struct_cap_C_' t)))) of None \ True + | Some (a, b) \ region_actually_is_zero_bytes a (unat ((b + 1) - a)) t)}" + apply (rule allI, rule conseqPre, vcg exspec=Arch_initContext_modifies) + apply (clarsimp simp: cap_tag_defs Let_def) + apply (simp add: cap_lift_untyped_cap cap_tag_defs cap_to_H_simps + cap_untyped_cap_lift_def object_type_from_H_def) + apply (simp add: untypedZeroRange_def split: if_split) + apply (clarsimp simp: getFreeRef_def Let_def object_type_to_H_def APIType_capBits_def + less_mask_eq word_less_nat_alt canonical_address_and_maskD) + done + +lemma range_cover_n_le': + "range_cover ptr sz sbit n \ 2 ^ sbit * n \ 2 ^ sz" + "range_cover ptr sz sbit n \ n \ 2 ^ sz" + unfolding atomize_conj atomize_imp + apply (rule context_conjI, rule impI) + apply (rule nat_le_power_trans, erule range_cover.range_cover_n_le, erule range_cover.sz) + apply (erule tfl_imp_trans, rule impI) + apply (erule le_trans[rotated]) + apply (rule rsubst[of "\r. r \ 2 ^ sbit * n", OF _ nat_mult_1]) + apply (rule mult_le_mono1, rule one_le_power, simp) + done + +lemma createNewObjects_ccorres: +notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex +and hoare_TrueI[simp add] +defines "unat_eq a b \ unat a = b" +shows "ccorres dc xfdc + (invs' and sch_act_simple and ct_active' + and (cte_wp_at' (\cte. cteCap cte = UntypedCap isdev (ptr && ~~ mask sz) sz idx) srcSlot) + and (\s. \slot\set destSlots. cte_wp_at' (\c. cteCap c = NullCap) slot s) + and (\s. \slot\set destSlots. ex_cte_cap_wp_to' (\_. True) slot s) + and (\s. \n. gsCNodes s cnodeptr = Some n \ unat start + length destSlots \ 2 ^ n) + and (pspace_no_overlap' ptr sz) + and caps_no_overlap'' ptr sz + and caps_overlap_reserved' {ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1} + and (\s. descendants_range_in' {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} srcSlot (ctes_of s)) + and cnodes_retype_have_size {ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1} + (APIType_capBits newType userSize) o gsCNodes + and (\s. cnodes_retype_have_size {ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1} + (APIType_capBits newType userSize) (gsPTTypes (ksArchState s) ||> (\pt_t. ptBits pt_t - cte_level_bits))) + and (K (srcSlot \ set destSlots + \ destSlots \ [] + \ range_cover ptr sz (getObjectSize newType userSize) (length destSlots) + \ ptr \ 0 + \ sz \ maxUntypedSizeBits + \ APIType_capBits newType userSize \ maxUntypedSizeBits + \ canonical_address (ptr && ~~ mask sz) + \ {ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1} + \ kernel_data_refs = {} + \ cnodeptr \ {ptr .. ptr + (of_nat (length destSlots) << APIType_capBits newType userSize) - 1} + \ 0 \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} + \ is_aligned ptr 4 + \ (newType = APIObjectType apiobject_type.Untyped \ userSize \ maxUntypedSizeBits) + \ (newType = APIObjectType apiobject_type.CapTableObject \ userSize < 59) + \ (newType = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ userSize) + \ (newType = APIObjectType apiobject_type.CapTableObject \ 0 < userSize) + \ (isdev \ newType = APIObjectType ArchTypes_H.apiobject_type.Untyped \ + isFrameType newType) + \ (unat num = length destSlots) + ))) + ({s. (\ isdev \ region_actually_is_zero_bytes ptr + (length destSlots * 2 ^ APIType_capBits newType userSize) s)} + \ {s. t_' s = object_type_from_H newType} + \ {s. parent_' s = cte_Ptr srcSlot} + \ {s. destCNode_' s = cte_Ptr cnodeptr} + \ {s. destOffset_' s = start \ (\n. n < length destSlots \ destSlots ! n = cnodeptr + ((start + of_nat n) * 0x20))} + \ {s. destLength_' s = num \ unat num \ 0} + \ {s. regionBase_' s = Ptr ptr } + \ {s. unat_eq (userSize_' s) userSize} + \ {s. deviceMemory_' s = from_bool isdev} + ) [] + (createNewObjects newType srcSlot destSlots ptr userSize isdev) + (Call createNewObjects_'proc)" + unfolding from_bool_to_bool_iff + supply if_cong[cong] + apply (rule ccorres_gen_asm_state) + apply clarsimp + apply (subgoal_tac "unat (of_nat (getObjectSize newType userSize)) = getObjectSize newType userSize") + prefer 2 + apply (rule unat_of_nat64) + apply (rule less_le_trans[OF getObjectSize_max_size]; clarsimp simp: word_bits_def untypedBits_defs) + apply (subgoal_tac "\n < length destSlots. canonical_address (ptr + (of_nat n << APIType_capBits newType userSize))") + prefer 2 subgoal by (simp add: shiftl_t2n field_simps range_cover_canonical_address) + apply (cinit lift: t_' parent_' destCNode_' destOffset_' destLength_' regionBase_' userSize_' deviceMemory_') + apply (rule ccorres_rhs_assoc2)+ + apply (rule ccorres_rhs_assoc) + apply (rule_tac Q' = "Q' + \ {s. objectSize_' s = of_nat (APIType_capBits newType userSize)} + \ {s. nextFreeArea_' s = Ptr ptr } " + and R="(\s. unat (num << (APIType_capBits newType userSize) :: machine_word) + \ gsMaxObjectSize s) and R''" + for Q' R'' in ccorres_symb_exec_r) + apply (rule ccorres_guard_imp[where A="X and Q" + and A'=Q' and Q=Q and Q'=Q' for X Q Q', rotated] + (* this moves the gsMaxObjectSize bit into the ccorres_symb_exec_r + vcg proof *)) + apply clarsimp + apply clarsimp + apply (cinitlift objectSize_' nextFreeArea_') + apply simp + apply (clarsimp simp: whileAnno_def) + apply (rule ccorres_rel_imp) + apply (rule_tac Q="{s. \ isdev \ region_actually_is_zero_bytes + (ptr + (i_' s << APIType_capBits newType userSize)) + (unat (num - i_' s) * 2 ^ APIType_capBits newType userSize) s}" + in ccorres_zipWithM_x_while_genQ[where j=1, OF _ _ _ _ _ i_xf_for_sequence, simplified]) + apply clarsimp + apply (subst upt_enum_offset_trivial) + apply (rule word_leq_le_minus_one) + apply (rule word_of_nat_le) + apply (drule range_cover.range_cover_n_less) + apply (simp add:word_bits_def minus_one_norm) + apply (erule range_cover_not_zero[rotated],simp) + apply simp + apply (rule ccorres_guard_impR) + apply (rule_tac xf'=i_' in ccorres_abstract, ceqv) + apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_add_return) + apply (simp only: hrs_htd_update) + apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? + apply (rule ccorres_split_nothrow, + rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" + in ccorres_typ_region_bytes_dummy, ceqv) + apply (rule ccorres_Guard_Seq)+ + apply (ctac add:createObject_ccorres) + apply (rule ccorres_move_array_assertion_cnode_ctes + ccorres_move_c_guard_cte)+ + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: insertNewCap_ccorres) + apply (rule ccorres_move_array_assertion_cnode_ctes + ccorres_return_Skip')+ + apply wp + apply (clarsimp simp: createObject_def3 conj_ac) + apply (wp createNewCaps_valid_pspace_extras[where sz = sz] + createNewCaps_cte_wp_at[where sz = sz] + createNewCaps_valid_cap_hd[where sz = sz]) + apply (rule range_cover_one) + apply (rule aligned_add_aligned[OF is_aligned_shiftl_self]) + apply (simp add:range_cover.aligned) + apply (simp add:range_cover_def) + apply (simp add:range_cover_def) + apply (simp add:range_cover_def) + apply (simp add:range_cover.sz) + apply (wp createNewCaps_1_gsCNodes_p[simplified] + createNewCaps_cte_wp_at'[where sz=sz])[1] + apply clarsimp + apply (vcg exspec=createObject_untyped_region_is_zero_bytes) + apply (simp add:size_of_def) + apply (rule_tac P = "\s. cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + {ptr .. ptr + (of_nat (length destSlots)<< APIType_capBits newType userSize) - 1} \ untypedRange (cteCap cte)) srcSlot s + \ pspace_no_overlap' ((of_nat n << APIType_capBits newType userSize) + ptr) sz s + \ caps_no_overlap'' ((of_nat n << APIType_capBits newType userSize) + ptr) sz s + \ caps_overlap_reserved' {(of_nat n << APIType_capBits newType userSize) + + ptr.. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1 } s + \ kernel_data_refs \ {ptr .. ptr + (of_nat (length destSlots) << APIType_capBits newType userSize) - 1} = {} + \ (\n < length destSlots. cte_at' (cnodeptr + (start * 0x20 + of_nat n * 0x20)) s + \ ex_cte_cap_wp_to' (\_. True) (cnodeptr + (start * 0x20 + of_nat n * 0x20)) s) + \ invs' s + \ 2 ^ APIType_capBits newType userSize \ gsMaxObjectSize s + \ (\cn. gsCNodes s cnodeptr = Some cn \ unat start + length destSlots \ 2 ^ cn) + \ cnodeptr \ {ptr .. ptr + (of_nat (length destSlots)<< APIType_capBits newType userSize) - 1} + \ (\k < length destSlots - n. + cte_wp_at' (\c. cteCap c = NullCap) + (cnodeptr + (of_nat k * 0x20 + start * 0x20 + of_nat n * 0x20)) s) + \ descendants_range_in' {(of_nat n << APIType_capBits newType userSize) + + ptr.. (ptr && ~~ mask sz) + 2 ^ sz - 1} srcSlot (ctes_of s)" + in hoare_pre(1)) + apply wp + apply (clarsimp simp:createObject_hs_preconds_def conj_comms add.commute[where b=ptr] + invs_valid_pspace' invs_pspace_distinct' invs_pspace_aligned' + invs_ksCurDomain_maxDomain') + apply (subst intvl_range_conv) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + subgoal by (simp add:is_aligned_shiftl_self) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems + by (simp_all add: range_cover_sz'[where 'a=machine_word_len, folded word_bits_def] + word_bits_def range_cover_def) + apply (simp add: range_cover_not_in_neqD canonical_address_and) + apply (intro conjI) + apply (drule_tac p = n in range_cover_no_0) + apply (simp add:shiftl_t2n mult.commute)+ + apply (cut_tac x=num in unat_lt2p, simp) + apply (simp add: unat_arith_simps unat_of_nat, simp split: if_split) + apply (intro impI, erule order_trans[rotated], simp) + apply (erule pspace_no_overlap'_le) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems + by (simp add:range_cover.sz[where 'a=machine_word_len, folded word_bits_def])+ + subgoal + apply (rule range_cover_one) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (simp add: is_aligned_shiftl_self) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems + by (simp add: range_cover_sz'[where 'a=machine_word_len, folded word_bits_def] + range_cover.sz[where 'a=machine_word_len, folded word_bits_def])+ + apply (simp add: word_bits_def range_cover_def) + done + apply (rule range_cover_one) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (simp add: is_aligned_shiftl_self) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems + by (simp add: range_cover_sz'[where 'a=machine_word_len, folded word_bits_def] + range_cover.sz[where 'a=machine_word_len, folded word_bits_def])+ + apply (simp add: word_bits_def range_cover_def) +(* + apply (rule range_cover_full) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (simp add:is_aligned_shiftl_self) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems + by (simp add: range_cover_sz'[where 'a=machine_word_len, folded word_bits_def] + range_cover.sz[where 'a=machine_word_len, folded word_bits_def])+ +*) + apply (erule caps_overlap_reserved'_subseteq) + apply (frule_tac x="of_nat n" in range_cover_bound3) + apply (rule word_of_nat_less) + apply (simp add: range_cover.unat_of_nat_n) + apply (clarsimp simp: shiftl_t2n blah mult.commute) + apply (erule disjoint_subset[rotated]) + apply (rule_tac p1 = n in subset_trans[OF _ range_cover_subset]) + apply (simp add: upto_intvl_eq is_aligned_add range_cover.aligned is_aligned_shiftl) + apply (simp add: shiftl_t2n mult.commute) + apply simp+ + apply (erule caps_overlap_reserved'_subseteq) + apply (frule_tac x = "of_nat n" in range_cover_bound3) + apply (rule word_of_nat_less) + apply (simp add: range_cover.unat_of_nat_n) + apply (clarsimp simp: shiftl_t2n blah mult.commute) + apply (clarsimp simp: createObject_c_preconds_def add.commute[where b=ptr] from_bool_to_bool_iff + cong: region_is_bytes_cong) + apply vcg + apply (clarsimp simp: cte_C_size conj_comms untypedBits_defs) + apply (simp cong: conj_cong) + apply (intro conjI impI) + apply (simp add: unat_eq_def) + apply (drule range_cover_sz') + apply (simp add: unat_eq_def word_less_nat_alt) + apply (simp add: hrs_htd_update typ_region_bytes_actually_is_bytes) + apply clarsimp + apply (erule heap_list_is_zero_mono) + apply (subgoal_tac "unat (num - of_nat n) \ 0") + apply simp + apply (simp only: unat_eq_0, clarsimp simp: unat_of_nat) + apply (frule range_cover_sz') + apply (clarsimp simp: Let_def hrs_htd_update + APIType_capBits_def[where ty="APIObjectType ArchTypes_H.apiobject_type.Untyped"]) + apply (simp, subst range_cover.unat_of_nat_n) + apply (erule range_cover_le) + subgoal by simp + subgoal by (simp add:word_unat.Rep_inverse') + apply clarsimp + apply (rule conseqPre, vcg exspec=insertNewCap_preserves_bytes_flip + exspec=createObject_preserves_bytes) + apply (clarsimp simp del: imp_disjL) + apply (frule(1) offset_intvl_first_chunk_subsets_unat, + erule order_less_le_trans) + apply (drule range_cover.weak) + apply (simp add: word_le_nat_alt unat_of_nat) + + apply (drule spec, drule mp, rule refl[where t="object_type_from_H newType"]) + apply clarsimp + apply (rule context_conjI) + apply (simp add: hrs_htd_update) + apply (simp add: region_actually_is_bytes'_def, rule ballI) + apply (drule bspec, erule(1) subsetD) + apply (drule(1) orthD2) + apply (simp add: Ball_def unat_eq_def typ_bytes_region_out) + apply (erule trans[OF heap_list_h_eq2 heap_list_is_zero_mono2, rotated]) + apply (simp add: word_shiftl_add_distrib field_simps) + apply (rule sym, rule byte_regions_unmodified_region_is_bytes) + apply (erule byte_regions_unmodified_trans, simp_all)[1] + apply (simp add: byte_regions_unmodified_def) + apply simp + apply assumption + + apply (clarsimp simp:conj_comms field_simps + createObject_hs_preconds_def range_cover_sz') + apply (subgoal_tac "is_aligned (ptr + (1 + of_nat n << APIType_capBits newType userSize)) + (APIType_capBits newType userSize)") + prefer 2 + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (rule is_aligned_shiftl_self) + apply (simp) + apply (simp add: range_cover_one[OF _ range_cover.sz(2) range_cover.sz(1)]) + including no_pre + apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' + insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) + apply (wp insertNewCap_cte_wp_at_other) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) + apply (clarsimp simp:conj_comms | + strengthen invs_valid_pspace' invs_pspace_aligned' + invs_pspace_distinct')+ + apply (frule range_cover.range_cover_n_less) + apply (subst upt_enum_offset_trivial) + apply (rule word_leq_le_minus_one[OF word_of_nat_le]) + apply (fold_subgoals (prefix))[3] + subgoal premises prems using prems + by (simp add:word_bits_conv minus_one_norm range_cover_not_zero[rotated])+ + apply (simp add: intvl_range_conv aligned_add_aligned[OF range_cover.aligned] + is_aligned_shiftl_self range_cover_sz') + apply (subst intvl_range_conv) + apply (erule aligned_add_aligned[OF range_cover.aligned]) + apply (rule is_aligned_shiftl_self, rule le_refl) + apply (erule range_cover_sz') + apply (subst intvl_range_conv) + apply (erule aligned_add_aligned[OF range_cover.aligned]) + apply (rule is_aligned_shiftl_self, rule le_refl) + apply (erule range_cover_sz') + apply (rule hoare_pre) + apply (strengthen pspace_no_overlap'_strg[where sz = sz]) + apply (clarsimp simp:range_cover.sz conj_comms) + apply (wp createObject_invs' + createObject_caps_overlap_reserved_ret' createObject_valid_cap' + createObject_descendants_range' createObject_idlethread_range + hoare_vcg_all_lift createObject_IRQHandler createObject_parent_helper + createObject_caps_overlap_reserved' createObject_caps_no_overlap'' + createObject_pspace_no_overlap' createObject_cte_wp_at' + createObject_ex_cte_cap_wp_to createObject_descendants_range_in' + createObject_caps_overlap_reserved' + hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) + apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) + apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to + createObject_cnodes_have_size_pt + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ + apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' + field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' + is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) + apply (drule_tac x = n and P = "\x. x< length destSlots \ Q x" for Q in spec)+ + apply clarsimp + apply (simp add: range_cover_not_in_neqD) + apply (intro conjI) + subgoal by (simp add: word_bits_def range_cover_def) + subgoal by (clarsimp simp: cte_wp_at_ctes_of invs'_def valid_state'_def + valid_global_refs'_def cte_at_valid_cap_sizes_0) + subgoal by (erule range_cover_le, simp) + subgoal by (simp add: range_cover_no_0 shiftl_t2n field_simps) + apply (erule caps_no_overlap''_le) + apply (simp add:range_cover.sz[where 'a=machine_word_len, folded word_bits_def])+ + apply (erule caps_no_overlap''_le2) + apply (erule range_cover_compare_offset,simp+) + apply (simp add: range_cover_tail_mask[OF range_cover_le] + range_cover_head_mask[OF range_cover_le]) + subgoal by (clarsimp simp: APIType_capBits_def objBits_simps' untypedBits_defs) + apply (rule contra_subsetD) + apply (rule order_trans[rotated], erule range_cover_cell_subset, + erule of_nat_mono_maybe[rotated], simp) + apply (simp add: upto_intvl_eq shiftl_t2n mult.commute + aligned_add_aligned[OF range_cover.aligned is_aligned_mult_triv2]) + subgoal by simp + apply (rule disjoint_subset2[where B="{ptr .. foo}" for foo, rotated], simp add: Int_commute) + apply (rule order_trans[rotated], erule_tac p="Suc n" in range_cover_subset, simp+) + subgoal by (simp add: upto_intvl_eq shiftl_t2n mult.commute + aligned_add_aligned[OF range_cover.aligned is_aligned_mult_triv2]) + apply (simp add:cte_wp_at_no_0) + apply (erule caps_overlap_reserved'_subseteq) + subgoal by (clarsimp simp:range_cover_compare_offset blah) + apply (erule descendants_range_in_subseteq') + subgoal by (clarsimp simp:range_cover_compare_offset blah) + apply (drule_tac x = 0 in spec) + subgoal by simp + apply (erule caps_overlap_reserved'_subseteq) + apply (clarsimp simp:range_cover_compare_offset blah) + apply (frule_tac x = "of_nat n" in range_cover_bound3) + subgoal by (simp add:word_of_nat_less range_cover.unat_of_nat_n blah) + subgoal by (simp add:field_simps shiftl_t2n blah) + apply (simp add:shiftl_t2n field_simps) + apply (rule contra_subsetD) + apply (rule_tac x1 = 0 in subset_trans[OF _ range_cover_cell_subset,rotated ]) + apply (erule_tac p = n in range_cover_offset[rotated]) + subgoal by simp + apply simp + apply (rule less_diff_gt0) + subgoal by (simp add:word_of_nat_less range_cover.unat_of_nat_n blah) + apply (clarsimp simp: field_simps) + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def + dest!:invs_valid_idle' elim!: obj_atE') + apply (drule(1) pspace_no_overlapD') + apply (rule_tac x = "ksIdleThread s" in in_empty_interE[rotated], simp) + prefer 2 + apply (simp add:Int_ac) + subgoal by (clarsimp simp: blah) + subgoal by blast + apply (erule descendants_range_in_subseteq') + apply (clarsimp simp: blah) + apply (rule order_trans[rotated], erule_tac x="of_nat n" in range_cover_bound'') + subgoal by (simp add: word_less_nat_alt unat_of_nat) + subgoal by (simp add: shiftl_t2n field_simps) + apply (rule order_trans[rotated], + erule_tac p="Suc n" in range_cover_subset, simp_all)[1] + subgoal by (simp add: upto_intvl_eq shiftl_t2n mult.commute + aligned_add_aligned[OF range_cover.aligned is_aligned_mult_triv2]) + apply (erule cte_wp_at_weakenE') + apply (clarsimp simp:shiftl_t2n field_simps) + apply (erule subsetD) + apply (erule subsetD[rotated]) + apply (rule_tac p1 = n in subset_trans[OF _ range_cover_subset]) + prefer 2 + apply (simp add:field_simps ) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems by (simp add:field_simps )+ + apply (clarsimp simp: word_shiftl_add_distrib) + apply (clarsimp simp:blah field_simps shiftl_t2n) + apply (drule word_eq_zeroI) + apply (drule_tac p = "Suc n" in range_cover_no_0) + apply (simp add:field_simps)+ + apply clarsimp + apply (rule conjI) + apply (subgoal_tac "of_nat (x + 1) << 5 \ (0::machine_word)") + apply (simp add: word_of_nat_plus field_simps shiftl_t2n) + apply (frule range_cover_n_le'(2)) + apply (subgoal_tac "x < 2 ^ sz") + prefer 2 apply simp + apply (drule (1) less_le_trans[OF _ power_increasing], simp) + apply (match premises in H: \x < 2 ^ maxUntypedSizeBits\ for x \ + \match premises in K[thin]: _ (multi) \ \insert H\\) + apply (rule word_shift_nonzero[where m=maxUntypedSizeBits]) + apply (simp add: less_eq_Suc_le) + apply (drule PackedTypes.of_nat_mono_maybe_le + [where X="2 ^ maxUntypedSizeBits" and 'a=machine_word_len, rotated]; + simp add: untypedBits_defs) + apply (simp add: untypedBits_defs) + apply (rule notI, erule Word.of_nat_0[THEN iffD1, THEN exE]) + apply (rename_tac q; case_tac q; clarsimp simp: untypedBits_defs) + apply (drule_tac x = "Suc x" in spec) + subgoal by (clarsimp simp: field_simps) + apply clarsimp + apply (subst range_cover.unat_of_nat_n) + apply (erule range_cover_le) + apply simp + apply (simp add:word_unat.Rep_inverse') + subgoal by (clarsimp simp:range_cover.range_cover_n_less[where 'a=machine_word_len, simplified]) + subgoal by clarsimp + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (frule(1) ghost_assertion_size_logic) + apply (drule range_cover_sz') + subgoal by (intro conjI impI; simp add: o_def word_of_nat_less) + apply (rule conjI) + apply (frule range_cover.aligned) + apply (frule range_cover_full[OF range_cover.aligned]) + apply (simp add:range_cover_def word_bits_def) + apply (clarsimp simp: invs_valid_pspace' conj_comms intvl_range_conv + createObject_hs_preconds_def range_cover.aligned range_cover_full) + apply (frule(1) range_cover_gsMaxObjectSize, fastforce, assumption) + apply (simp add: intvl_range_conv[OF range_cover.aligned range_cover_sz'] + order_trans[OF _ APIType_capBits_min] o_def) + apply (intro conjI) + subgoal by (simp add: word_bits_def range_cover_def) + apply (clarsimp simp:rf_sr_def cstate_relation_def Let_def) + apply (erule pspace_no_overlap'_le) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems + by (simp add:range_cover.sz[where 'a=machine_word_len, simplified] word_bits_def)+ + apply (erule contra_subsetD[rotated]) + subgoal by (rule order_trans[rotated], rule range_cover_subset'[where n=1], + erule range_cover_le, simp_all, (clarsimp simp: neq_Nil_conv)+) + apply (rule disjoint_subset2[rotated]) + apply (simp add:Int_ac) + apply (erule range_cover_subset[where p = 0,simplified]) + subgoal by simp + subgoal by simp + subgoal by (simp add: Int_commute shiftl_t2n mult.commute) + apply (erule cte_wp_at_weakenE') + apply (clarsimp simp:blah word_and_le2 shiftl_t2n field_simps) + apply (frule range_cover_bound''[where x = "of_nat (length destSlots) - 1"]) + subgoal by (simp add: range_cover_not_zero[rotated]) + subgoal by (simp add:field_simps) + subgoal by (erule range_cover_subset[where p=0, simplified]; simp) + apply clarsimp + apply (drule_tac x = k in spec) + apply simp + apply (drule(1) bspec[OF _ nth_mem])+ + subgoal by (clarsimp simp: field_simps) + apply clarsimp + apply (drule(1) bspec[OF _ nth_mem])+ + subgoal by (clarsimp simp:cte_wp_at_ctes_of) + apply clarsimp + apply (frule range_cover_sz') + apply (frule(1) range_cover_gsMaxObjectSize, fastforce, assumption) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) ghost_assertion_size_logic)+ + apply (simp add: o_def) + apply (case_tac newType, + simp_all add: object_type_from_H_def Kernel_C_defs + nAPIObjects_def APIType_capBits_def split:apiobject_type.splits)[1] + subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) + subgoal by (clarsimp simp:objBits_simps', unat_arith) + apply (fold_subgoals (prefix))[3] + subgoal premises prems using prems + by (clarsimp simp: objBits_simps' unat_eq_def word_unat.Rep_inverse' + word_less_nat_alt)+ + (* FIXME AARCH64 abstraction violation: for some of these we need to know config_ARM_PA_SIZE_BITS_40_def *) + by (clarsimp simp: bit_simps pageBitsForSize_def framesize_to_H_def frameSizeConstants_defs + Kernel_Config.config_ARM_PA_SIZE_BITS_40_def)+ + +end + +end diff --git a/proof/crefine/AARCH64/SR_lemmas_C.thy b/proof/crefine/AARCH64/SR_lemmas_C.thy new file mode 100644 index 0000000000..6dabdabd1a --- /dev/null +++ b/proof/crefine/AARCH64/SR_lemmas_C.thy @@ -0,0 +1,2548 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory SR_lemmas_C +imports + StateRelation_C + "Refine.Invariants_H" +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +section "vm rights" + +lemma vmRightsToBits_bounded: + "vmRightsToBits rights < 4" + by (cases rights; clarsimp simp: vmRightsToBits_def) + +lemma vmRightsToBits_not_2: + "vmRightsToBits rights \ 2" + by (cases rights; clarsimp simp: vmRightsToBits_def) + +lemma vmRightsToBits_vmrights_to_H: + "\ rights < 4; rights \ 2 \ \ vmRightsToBits (vmrights_to_H rights) = rights" + apply (clarsimp simp add: vmrights_to_H_def vm_rights_defs vmRightsToBits_def split: if_splits) + apply (drule word_less_cases, erule disjE, simp, simp)+ + done + +section "ctes" + +subsection "capabilities" + +lemma cteMDBNode_cte_to_H [simp]: + "cteMDBNode (cte_to_H cte) = mdb_node_to_H (cteMDBNode_CL cte)" + unfolding cte_to_H_def + by simp + +lemma cteMDBNode_CL_lift [simp]: + "cte_lift cte' = Some ctel \ + mdb_node_lift (cteMDBNode_C cte') = cteMDBNode_CL ctel" + unfolding cte_lift_def + by (fastforce split: option.splits) + +lemma cteCap_cte_to_H [simp]: + "cteCap (cte_to_H cte) = cap_to_H (cap_CL cte)" + unfolding cte_to_H_def + by simp + +lemma cap_CL_lift [simp]: + "cte_lift cte' = Some ctel \ cap_lift (cte_C.cap_C cte') = Some (cap_CL ctel)" + unfolding cte_lift_def + by (fastforce split: option.splits) + +lemma cteCap_update_cte_to_H [simp]: + "\ cte_lift cte' = Some z; cap_lift cap' = Some capl\ + \ map_option cte_to_H (cte_lift (cte_C.cap_C_update (\_. cap') cte')) = + Some (cteCap_update (\_. cap_to_H capl) (cte_to_H z))" + unfolding cte_lift_def + by (clarsimp simp: cte_to_H_def split: option.splits) + +lemma cteMDBNode_C_update_lift [simp]: + "cte_lift cte' = Some ctel \ + (cte_lift (cteMDBNode_C_update (\_. m) cte') = Some x) + = (cteMDBNode_CL_update (\_. mdb_node_lift m) ctel = x)" + unfolding cte_lift_def + by (fastforce split: option.splits) + +lemma ccap_relationE: + "\ccap_relation c v; \vl. \ cap_lift v = Some vl; c = cap_to_H vl; c_valid_cap v\ \ P \ \ P" + unfolding ccap_relation_def map_option_case + apply clarsimp + apply (drule sym) + apply (clarsimp split: option.splits) + done + +definition + "frameSize cap \ case cap of ArchObjectCap (FrameCap _ _ sz _ _) \ sz" + +definition + "isArchCap_tag (n :: machine_word) \ n mod 2 = 1" + +lemma isArchCap_tag_def2: + "isArchCap_tag n \ n && 1 = 1" + by (simp add: isArchCap_tag_def word_mod_2p_is_mask[where n=1, simplified] mask_def) + +(* On AARCH64 we cannot have isArchPageTableCap as at the abstract level there is only one cap, + while in C there are separate page table and vspace caps. *) + +definition isArchNormalPTCap :: "capability \ bool" where + "isArchNormalPTCap cap \ case cap of ArchObjectCap (PageTableCap _ NormalPT_T _) \ True | _ \ False" + +definition isArchVSpacePTCap :: "capability \ bool" where + "isArchVSpacePTCap cap \ case cap of ArchObjectCap (PageTableCap _ VSRootPT_T _) \ True | _ \ False" + +(* FIXME AARCH64 overrides version in Bits_R *) +lemmas isCap_simps = isCap_simps isArchNormalPTCap_def isArchVSpacePTCap_def + +lemma cap_get_tag_isCap0: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_thread_cap) = isThreadCap cap + \ (cap_get_tag cap' = scast cap_null_cap) = (cap = NullCap) + \ (cap_get_tag cap' = scast cap_notification_cap) = isNotificationCap cap + \ (cap_get_tag cap' = scast cap_endpoint_cap) = isEndpointCap cap + \ (cap_get_tag cap' = scast cap_irq_handler_cap) = isIRQHandlerCap cap + \ (cap_get_tag cap' = scast cap_irq_control_cap) = isIRQControlCap cap + \ (cap_get_tag cap' = scast cap_zombie_cap) = isZombie cap + \ (cap_get_tag cap' = scast cap_reply_cap) = isReplyCap cap + \ (cap_get_tag cap' = scast cap_untyped_cap) = isUntypedCap cap + \ (cap_get_tag cap' = scast cap_cnode_cap) = isCNodeCap cap + \ (cap_get_tag cap' = scast cap_domain_cap) = isDomainCap cap + \ isArchCap_tag (cap_get_tag cap') = isArchCap \ cap + \ (cap_get_tag cap' = scast cap_asid_control_cap) = isArchCap isASIDControlCap cap + \ (cap_get_tag cap' = scast cap_asid_pool_cap) = isArchCap isASIDPoolCap cap + \ (cap_get_tag cap' = scast cap_frame_cap) = isArchFrameCap cap + \ (cap_get_tag cap' = scast cap_page_table_cap) = isArchNormalPTCap cap + \ (cap_get_tag cap' = scast cap_vspace_cap) = isArchVSpacePTCap cap + \ (cap_get_tag cap' = scast cap_vcpu_cap) = isArchCap isVCPUCap cap" + using cr + apply - + apply (erule ccap_relationE) + apply (simp add: cap_to_H_def cap_lift_def Let_def isArchCap_tag_def2 isArchCap_def) + by (timeit \clarsimp simp: isCap_simps cap_tag_defs word_le_nat_alt Let_def + split: if_split_asm\) \ \takes a while: ~2.5min\ + +lemma cap_get_tag_isCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_thread_cap) = (isThreadCap cap)" + and "(cap_get_tag cap' = scast cap_null_cap) = (cap = NullCap)" + and "(cap_get_tag cap' = scast cap_notification_cap) = (isNotificationCap cap)" + and "(cap_get_tag cap' = scast cap_endpoint_cap) = (isEndpointCap cap)" + and "(cap_get_tag cap' = scast cap_irq_handler_cap) = (isIRQHandlerCap cap)" + and "(cap_get_tag cap' = scast cap_irq_control_cap) = (isIRQControlCap cap)" + and "(cap_get_tag cap' = scast cap_zombie_cap) = (isZombie cap)" + and "(cap_get_tag cap' = scast cap_reply_cap) = isReplyCap cap" + and "(cap_get_tag cap' = scast cap_untyped_cap) = (isUntypedCap cap)" + and "(cap_get_tag cap' = scast cap_cnode_cap) = (isCNodeCap cap)" + and "(cap_get_tag cap' = scast cap_domain_cap) = isDomainCap cap" + and "isArchCap_tag (cap_get_tag cap') = isArchCap \ cap" + and "(cap_get_tag cap' = scast cap_asid_control_cap) = isArchCap isASIDControlCap cap" + and "(cap_get_tag cap' = scast cap_asid_pool_cap) = isArchCap isASIDPoolCap cap" + and "(cap_get_tag cap' = scast cap_frame_cap) = isArchFrameCap cap" + and "(cap_get_tag cap' = scast cap_page_table_cap) = isArchNormalPTCap cap" + and "(cap_get_tag cap' = scast cap_vspace_cap) = isArchVSpacePTCap cap" + and "(cap_get_tag cap' = scast cap_vcpu_cap) = isArchCap isVCPUCap cap" + using cap_get_tag_isCap0 [OF cr] by auto + +lemmas cap_get_tag_NullCap = cap_get_tag_isCap(2) + +lemma cap_get_tag_ThreadCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_thread_cap) = + (cap = ThreadCap (ctcb_ptr_to_tcb_ptr (Ptr (cap_thread_cap_CL.capTCBPtr_CL (cap_thread_cap_lift cap')))))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_NotificationCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_notification_cap) = + (cap = NotificationCap + (capNtfnPtr_CL (cap_notification_cap_lift cap')) + (capNtfnBadge_CL (cap_notification_cap_lift cap')) + (to_bool (capNtfnCanSend_CL (cap_notification_cap_lift cap'))) + (to_bool (capNtfnCanReceive_CL (cap_notification_cap_lift cap'))))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_EndpointCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_endpoint_cap) = + (cap = EndpointCap (capEPPtr_CL (cap_endpoint_cap_lift cap')) + (capEPBadge_CL (cap_endpoint_cap_lift cap')) + (to_bool (capCanSend_CL (cap_endpoint_cap_lift cap'))) + (to_bool (capCanReceive_CL (cap_endpoint_cap_lift cap'))) + (to_bool (capCanGrant_CL (cap_endpoint_cap_lift cap'))) + (to_bool (capCanGrantReply_CL (cap_endpoint_cap_lift cap'))))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_CNodeCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_cnode_cap) = + (cap = capability.CNodeCap (capCNodePtr_CL (cap_cnode_cap_lift cap')) + (unat (capCNodeRadix_CL (cap_cnode_cap_lift cap'))) + (capCNodeGuard_CL (cap_cnode_cap_lift cap')) + (unat (capCNodeGuardSize_CL (cap_cnode_cap_lift cap'))))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def Let_def) + apply (simp add: cap_get_tag_isCap isCap_simps Let_def) + done + +lemma cap_get_tag_IRQHandlerCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_irq_handler_cap) = + (cap = capability.IRQHandlerCap (ucast (capIRQ_CL (cap_irq_handler_cap_lift cap'))))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_IRQControlCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_irq_control_cap) = + (cap = capability.IRQControlCap)" + using cr + apply - + apply (rule iffI) + apply (clarsimp simp add: cap_lifts cap_get_tag_isCap isCap_simps cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_ZombieCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_zombie_cap) = + (cap = + (if isZombieTCB_C (capZombieType_CL (cap_zombie_cap_lift cap')) + then capability.Zombie (capZombieID_CL (cap_zombie_cap_lift cap') && ~~ mask 5) ZombieTCB + (unat (capZombieID_CL (cap_zombie_cap_lift cap') && mask 5)) + else let radix = unat (capZombieType_CL (cap_zombie_cap_lift cap')) + in capability.Zombie (capZombieID_CL (cap_zombie_cap_lift cap') && ~~ mask (radix + 1)) + (ZombieCNode radix) + (unat (capZombieID_CL (cap_zombie_cap_lift cap') && mask (radix + 1)))))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps Let_def + split: if_split_asm) + done + + + +lemma cap_get_tag_ReplyCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_reply_cap) = + (cap = + ReplyCap (ctcb_ptr_to_tcb_ptr (Ptr (cap_reply_cap_CL.capTCBPtr_CL (cap_reply_cap_lift cap')))) + (to_bool (capReplyMaster_CL (cap_reply_cap_lift cap'))) + (to_bool (capReplyCanGrant_CL (cap_reply_cap_lift cap'))))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_UntypedCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_untyped_cap) = + (cap = UntypedCap (to_bool (capIsDevice_CL (cap_untyped_cap_lift cap'))) + (capPtr_CL (cap_untyped_cap_lift cap')) + (unat (capBlockSize_CL (cap_untyped_cap_lift cap'))) + (unat (capFreeIndex_CL (cap_untyped_cap_lift cap') << 4)))" + using cr + apply - + apply (rule iffI) + apply (erule ccap_relationE) + apply (clarsimp simp add: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_DomainCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_domain_cap) = (cap = DomainCap)" + using cr + apply - + apply (rule iffI) + apply (clarsimp simp add: cap_lifts cap_get_tag_isCap cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_ASIDControlCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_asid_control_cap) + = (cap = ArchObjectCap ASIDControlCap)" + using cr + apply - + apply (rule iffI) + (* cap_lift_asid_control_cap not part of cap_lifts *) + apply (clarsimp elim!: ccap_relationE simp: cap_lift_asid_control_cap cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_ASIDPoolCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_asid_pool_cap) + = (cap = ArchObjectCap (ASIDPoolCap (capASIDPool_CL (cap_asid_pool_cap_lift cap')) + (capASIDBase_CL (cap_asid_pool_cap_lift cap'))))" + using cr + apply - + apply (rule iffI) + apply (clarsimp elim!: ccap_relationE simp: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_FrameCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_frame_cap) + = (cap = ArchObjectCap ( + FrameCap (capFBasePtr_CL (cap_frame_cap_lift cap')) + (vmrights_to_H (capFVMRights_CL (cap_frame_cap_lift cap'))) + (framesize_to_H (capFSize_CL (cap_frame_cap_lift cap'))) + (to_bool (capFIsDevice_CL (cap_frame_cap_lift cap'))) + (if to_bool (capFMappedASID_CL (cap_frame_cap_lift cap')) + then Some (capFMappedASID_CL (cap_frame_cap_lift cap'), + capFMappedAddress_CL (cap_frame_cap_lift cap')) + else None)))" + using cr + apply - + apply (rule iffI) + apply (clarsimp elim!: ccap_relationE simp: cap_lifts cap_to_H_def to_bool_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + + +lemma cap_get_tag_PageTableCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_page_table_cap) + = (cap = ArchObjectCap ( + PageTableCap (capPTBasePtr_CL (cap_page_table_cap_lift cap')) + NormalPT_T + (if to_bool (capPTIsMapped_CL (cap_page_table_cap_lift cap')) + then Some (capPTMappedASID_CL (cap_page_table_cap_lift cap'), + capPTMappedAddress_CL (cap_page_table_cap_lift cap')) + else None)))" + using cr + apply - + apply (rule iffI) + apply (clarsimp elim!: ccap_relationE simp: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_VSpaceCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_vspace_cap) + = (cap = ArchObjectCap ( + PageTableCap (capVSBasePtr_CL (cap_vspace_cap_lift cap')) + VSRootPT_T + (if to_bool (capVSIsMapped_CL (cap_vspace_cap_lift cap')) + then Some (capVSMappedASID_CL (cap_vspace_cap_lift cap'), 0) + else None)))" + using cr + apply - + apply (rule iffI) + apply (clarsimp elim!: ccap_relationE simp: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemma cap_get_tag_VCPUCap: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_vcpu_cap) + = (cap = ArchObjectCap (VCPUCap (capVCPUPtr_CL (cap_vcpu_cap_lift cap'))))" + using cr + apply - + apply (rule iffI) + apply (clarsimp elim!: ccap_relationE simp: cap_lifts cap_to_H_def) + apply (simp add: cap_get_tag_isCap isCap_simps) + done + +lemmas cap_get_tag_to_H_iffs = + cap_get_tag_NullCap + cap_get_tag_ThreadCap + cap_get_tag_NotificationCap + cap_get_tag_EndpointCap + cap_get_tag_CNodeCap + cap_get_tag_IRQHandlerCap + cap_get_tag_IRQControlCap + cap_get_tag_ZombieCap + cap_get_tag_UntypedCap + cap_get_tag_DomainCap + cap_get_tag_ASIDControlCap + cap_get_tag_ASIDPoolCap + cap_get_tag_FrameCap + cap_get_tag_PageTableCap + cap_get_tag_VSpaceCap + cap_get_tag_VCPUCap + +lemmas cap_get_tag_to_H = cap_get_tag_to_H_iffs [THEN iffD1] + +subsection "mdb" + +lemma cmdbnode_relation_mdb_node_to_H [simp]: + "cte_lift cte' = Some y + \ cmdbnode_relation (mdb_node_to_H (cteMDBNode_CL y)) (cteMDBNode_C cte')" + unfolding cmdbnode_relation_def mdb_node_to_H_def mdb_node_lift_def cte_lift_def + by (fastforce split: option.splits) + +definition tcb_no_ctes_proj :: + "tcb \ Structures_H.thread_state \ machine_word \ machine_word \ arch_tcb \ bool \ word8 + \ word8 \ word8 \ nat \ fault option \ machine_word option + \ machine_word option \ machine_word option" + where + "tcb_no_ctes_proj t \ + (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, + tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t, + tcbSchedNext t, tcbSchedPrev t)" + +lemma tcb_cte_cases_proj_eq [simp]: + "tcb_cte_cases p = Some (getF, setF) \ + tcb_no_ctes_proj tcb = tcb_no_ctes_proj (setF f tcb)" + unfolding tcb_no_ctes_proj_def tcb_cte_cases_def + by (auto split: if_split_asm) + +(* NOTE: 5 = cte_level_bits *) +lemma map_to_ctes_upd_cte': + "\ ksPSpace s p = Some (KOCTE cte'); is_aligned p cte_level_bits; ps_clear p cte_level_bits s \ + \ map_to_ctes ((ksPSpace s)(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" + apply (erule (1) map_to_ctes_upd_cte) + apply (simp add: field_simps ps_clear_def3 cte_level_bits_def mask_def) + done + +lemma map_to_ctes_upd_tcb': + "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; + ps_clear p tcbBlockSizeBits s |] +==> map_to_ctes ((ksPSpace s)(p |-> KOTCB tcb)) = + (%x. if EX getF setF. + tcb_cte_cases (x - p) = Some (getF, setF) & + getF tcb ~= getF tcb' + then case tcb_cte_cases (x - p) of + Some (getF, setF) => Some (getF tcb) + else ctes_of s x)" + apply (erule (1) map_to_ctes_upd_tcb) + apply (simp add: field_simps ps_clear_def3 mask_def objBits_defs) + done + + +lemma tcb_cte_cases_inv [simp]: + "tcb_cte_cases p = Some (getF, setF) \ getF (setF (\_. v) tcb) = v" + unfolding tcb_cte_cases_def + by (simp split: if_split_asm) + +declare insert_dom [simp] + +lemma in_alignCheck': + "(z \ fst (alignCheck x n s)) = (snd z = s \ is_aligned x n)" + by (cases z, simp) + +lemma fst_alignCheck_empty [simp]: + "(fst (alignCheck x n s) = {}) = (\ is_aligned x n)" + apply (subst all_not_in_conv [symmetric]) + apply (clarsimp simp: in_alignCheck') + done + +lemma fst_setCTE0: + notes option.case_cong_weak [cong] + assumes ct: "cte_at' dest s" + shows "\(v, s') \ fst (setCTE dest cte s). + (s' = s \ ksPSpace := ksPSpace s' \) + \ (dom (ksPSpace s) = dom (ksPSpace s')) + \ (\x \ dom (ksPSpace s). + case (the (ksPSpace s x)) of + KOCTE _ \ (\cte. ksPSpace s' x = Some (KOCTE cte)) + | KOTCB t \ (\t'. ksPSpace s' x = Some (KOTCB t') \ tcb_no_ctes_proj t = tcb_no_ctes_proj t') + | _ \ ksPSpace s' x = ksPSpace s x)" + using ct + apply - + apply (clarsimp simp: setCTE_def setObject_def + bind_def return_def assert_opt_def gets_def split_beta get_def + modify_def put_def) + apply (erule cte_wp_atE') + apply (rule ps_clear_lookupAround2, assumption+) + apply simp + apply (erule is_aligned_no_overflow) + apply (simp (no_asm_simp) del: fun_upd_apply cong: option.case_cong) + apply (simp add: return_def updateObject_cte + bind_def assert_opt_def gets_def split_beta get_def + modify_def put_def unless_def when_def + objBits_simps + cong: bex_cong) + apply (rule bexI [where x = "((), s)"]) + apply (frule_tac s' = s in in_magnitude_check [where v = "()"]) + apply (simp add: cte_level_bits_def) + apply assumption + apply (simp add: objBits_defs cte_level_bits_def) + apply (erule bexI [rotated]) + apply (simp cong: if_cong) + apply rule + apply (simp split: kernel_object.splits) + apply (fastforce simp: tcb_no_ctes_proj_def) + apply (simp add: cte_level_bits_def objBits_defs) + (* clag *) + apply (rule ps_clear_lookupAround2, assumption+) + apply (erule (1) tcb_cte_cases_in_range1) + apply (erule (1) tcb_cte_cases_in_range2) + apply (simp add: return_def del: fun_upd_apply cong: bex_cong option.case_cong) + apply (subst updateObject_cte_tcb) + apply assumption + apply (simp add: bind_def return_def assert_opt_def gets_def split_beta get_def when_def + modify_def put_def unless_def when_def in_alignCheck') + apply (simp add: objBits_simps) + apply (simp add: magnitudeCheck_def return_def split: option.splits + cong: bex_cong if_cong) + apply (simp split: kernel_object.splits) + apply (fastforce simp: tcb_no_ctes_proj_def) + apply (simp add: magnitudeCheck_def when_def return_def fail_def + linorder_not_less + split: option.splits + cong: bex_cong if_cong) + apply rule + apply (simp split: kernel_object.splits) + apply (fastforce simp: tcb_no_ctes_proj_def) + done + + +(* duplicates *) +lemma pspace_alignedD' [intro?]: + assumes lu: "ksPSpace s x = Some v" + and al: "pspace_aligned' s" + shows "is_aligned x (objBitsKO v)" + using al lu unfolding pspace_aligned'_def + apply - + apply (drule (1) bspec [OF _ domI]) + apply simp + done + +declare pspace_distinctD' [intro?] + +lemma ctes_of_ksI [intro?]: + fixes s :: "kernel_state" + assumes ks: "ksPSpace s x = Some (KOCTE cte)" + and pa: "pspace_aligned' s" + and pd: "pspace_distinct' s" + shows "ctes_of s x = Some cte" +proof (rule ctes_of_eq_cte_wp_at') + from ks show "cte_wp_at' ((=) cte) x s" + proof (rule cte_wp_at_cteI' [OF _ _ _ refl]) + from ks pa have "is_aligned x (objBitsKO (KOCTE cte))" .. + thus "is_aligned x cte_level_bits" + unfolding cte_level_bits_def by (simp add: objBits_simps') + + from ks pd have "ps_clear x (objBitsKO (KOCTE cte)) s" .. + thus "ps_clear x cte_level_bits s" + unfolding cte_level_bits_def by (simp add: objBits_simps') + qed +qed + +lemma fst_setCTE: + assumes ct: "cte_at' dest s" + and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); + s' = s \ ksPSpace := ksPSpace s' \; + ctes_of s' = (ctes_of s)(dest \ cte); + map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s'); + map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s'); + map_to_ptes (ksPSpace s) = map_to_ptes (ksPSpace s'); + map_to_asidpools (ksPSpace s) = map_to_asidpools (ksPSpace s'); + map_to_user_data (ksPSpace s) = map_to_user_data (ksPSpace s'); + map_to_user_data_device (ksPSpace s) = map_to_user_data_device (ksPSpace s'); + map_to_vcpus (ksPSpace s) = map_to_vcpus (ksPSpace s'); + map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s) + = map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s'); + \T p. typ_at' T p s = typ_at' T p s'\ \ P" + shows "P" +proof - + from fst_setCTE0 [where cte = cte, OF ct] + obtain s' where + "((), s')\fst (setCTE dest cte s)" + "s' = s\ksPSpace := ksPSpace s'\" + "dom (ksPSpace s) = dom (ksPSpace s')" + "(\p \ dom (ksPSpace s'). + case the (ksPSpace s p) of + KOTCB t \ \t'. ksPSpace s' p = Some (KOTCB t') \ tcb_no_ctes_proj t = tcb_no_ctes_proj t' + | KOCTE _ \ \cte. ksPSpace s' p = Some (KOCTE cte) + | _ \ ksPSpace s' p = ksPSpace s p)" + by clarsimp + note thms = this + + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" + by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp + + show ?thesis + proof (rule rl) + show "map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')" + proof (rule map_comp_eqI) + fix x + assume xin: "x \ dom (ksPSpace s')" + then obtain ko where ko: "ksPSpace s x = Some ko" by (clarsimp simp: thms(3)[symmetric]) + moreover from xin obtain ko' where ko': "ksPSpace s' x = Some ko'" by clarsimp + ultimately have "(projectKO_opt ko' :: endpoint option) = projectKO_opt ko" using xin thms(4) ceq + by - (drule (1) bspec, cases ko, auto simp: projectKO_opt_ep) + thus "(projectKO_opt (the (ksPSpace s' x)) :: endpoint option) = projectKO_opt (the (ksPSpace s x))" using ko ko' + by simp + qed fact + + (* clag \ *) + show "map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')" + proof (rule map_comp_eqI) + fix x + assume xin: "x \ dom (ksPSpace s')" + then obtain ko where ko: "ksPSpace s x = Some ko" by (clarsimp simp: thms(3)[symmetric]) + moreover from xin obtain ko' where ko': "ksPSpace s' x = Some ko'" by clarsimp + ultimately have "(projectKO_opt ko' :: Structures_H.notification option) = projectKO_opt ko" using xin thms(4) ceq + by - (drule (1) bspec, cases ko, auto simp: projectKO_opt_ntfn) + thus "(projectKO_opt (the (ksPSpace s' x)) :: Structures_H.notification option) = projectKO_opt (the (ksPSpace s x))" using ko ko' + by simp + qed fact + + show "map_to_ptes (ksPSpace s) = map_to_ptes (ksPSpace s')" + proof (rule map_comp_eqI) + fix x + assume xin: "x \ dom (ksPSpace s')" + then obtain ko where ko: "ksPSpace s x = Some ko" by (clarsimp simp: thms(3)[symmetric]) + moreover from xin obtain ko' where ko': "ksPSpace s' x = Some ko'" by clarsimp + ultimately have "(projectKO_opt ko' :: pte option) = projectKO_opt ko" using xin thms(4) ceq + by - (drule (1) bspec, cases ko, auto simp: projectKO_opt_pte) + thus "(projectKO_opt (the (ksPSpace s' x)) :: pte option) = projectKO_opt (the (ksPSpace s x))" using ko ko' + by simp + qed fact + + show "map_to_asidpools (ksPSpace s) = map_to_asidpools (ksPSpace s')" + proof (rule map_comp_eqI) + fix x + assume xin: "x \ dom (ksPSpace s')" + then obtain ko where ko: "ksPSpace s x = Some ko" by (clarsimp simp: thms(3)[symmetric]) + moreover from xin obtain ko' where ko': "ksPSpace s' x = Some ko'" by clarsimp + ultimately have "(projectKO_opt ko' :: asidpool option) = projectKO_opt ko" using xin thms(4) ceq + by - (drule (1) bspec, cases ko, auto simp: projectKO_opt_asidpool) + thus "(projectKO_opt (the (ksPSpace s' x)) :: asidpool option) = projectKO_opt (the (ksPSpace s x))" using ko ko' + by simp + qed fact + + show "map_to_vcpus (ksPSpace s) = map_to_vcpus (ksPSpace s')" + proof (rule map_comp_eqI) + fix x + assume xin: "x \ dom (ksPSpace s')" + then obtain ko where ko: "ksPSpace s x = Some ko" by (clarsimp simp: thms(3)[symmetric]) + moreover from xin obtain ko' where ko': "ksPSpace s' x = Some ko'" by clarsimp + ultimately have "(projectKO_opt ko' :: vcpu option) = projectKO_opt ko" using xin thms(4) ceq + by - (drule (1) bspec, cases ko, auto simp: projectKO_opt_vcpu) + thus "(projectKO_opt (the (ksPSpace s' x)) :: vcpu option) = projectKO_opt (the (ksPSpace s x))" using ko ko' + by simp + qed fact + + show "map_to_user_data (ksPSpace s) = map_to_user_data (ksPSpace s')" + proof (rule map_comp_eqI) + fix x + assume xin: "x \ dom (ksPSpace s')" + then obtain ko where ko: "ksPSpace s x = Some ko" by (clarsimp simp: thms(3)[symmetric]) + moreover from xin obtain ko' where ko': "ksPSpace s' x = Some ko'" by clarsimp + ultimately have "(projectKO_opt ko' :: user_data option) = projectKO_opt ko" using xin thms(4) ceq + by - (drule (1) bspec, cases ko, auto simp: projectKO_opt_user_data) + thus "(projectKO_opt (the (ksPSpace s' x)) :: user_data option) = projectKO_opt (the (ksPSpace s x))" using ko ko' + by simp + qed fact + + show "map_to_user_data_device (ksPSpace s) = map_to_user_data_device (ksPSpace s')" + proof (rule map_comp_eqI) + fix x + assume xin: "x \ dom (ksPSpace s')" + then obtain ko where ko: "ksPSpace s x = Some ko" by (clarsimp simp: thms(3)[symmetric]) + moreover from xin obtain ko' where ko': "ksPSpace s' x = Some ko'" by clarsimp + ultimately have "(projectKO_opt ko' :: user_data_device option) = projectKO_opt ko" using xin thms(4) ceq + by - (drule (1) bspec, cases ko, auto simp: projectKO_opt_user_data_device) + thus "(projectKO_opt (the (ksPSpace s' x)) :: user_data_device option) = projectKO_opt (the (ksPSpace s x))" using ko ko' + by simp + qed fact + + + note sta = setCTE_typ_at'[where P="\x. x = y" for y] + show typ_at: "\T p. typ_at' T p s = typ_at' T p s'" + using use_valid[OF _ sta, OF thms(1), OF refl] + by auto + + show "map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s) = + map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s')" + proof (rule ext) + fix x + + have dm: "dom (map_to_tcbs (ksPSpace s)) = dom (map_to_tcbs (ksPSpace s'))" + using thms(3) thms(4) + apply - + apply (rule set_eqI) + apply rule + apply (frule map_comp_subset_domD) + apply simp + apply (drule (1) bspec) + apply (clarsimp simp: projectKOs dom_map_comp) + apply (frule map_comp_subset_domD) + apply (drule (1) bspec) + apply (auto simp: dom_map_comp projectKOs split: kernel_object.splits) + apply fastforce + done + + { + assume "x \ dom (map_to_tcbs (ksPSpace s))" + hence "map_option tcb_no_ctes_proj (map_to_tcbs (ksPSpace s) x) + = map_option tcb_no_ctes_proj (map_to_tcbs (ksPSpace s') x)" + using thms(3) thms(4) + apply - + apply (frule map_comp_subset_domD) + apply simp + apply (drule (1) bspec) + apply (clarsimp simp: dom_map_comp projectKOs projectKO_opt_tcb) + apply (case_tac y) + apply simp_all + apply clarsimp + done + } moreover + { + assume "x \ dom (map_to_tcbs (ksPSpace s))" + hence "map_option tcb_no_ctes_proj (map_to_tcbs (ksPSpace s) x) + = map_option tcb_no_ctes_proj (map_to_tcbs (ksPSpace s') x)" + apply - + apply (frule subst [OF dm]) + apply (simp add: dom_def) + done + } ultimately show "(map_option tcb_no_ctes_proj \ (map_to_tcbs (ksPSpace s))) x + = (map_option tcb_no_ctes_proj \ (map_to_tcbs (ksPSpace s'))) x" + by auto + qed + qed fact+ +qed + +lemma cor_map_relI: + assumes dm: "dom am = dom am'" + and rl: "\x y y' z. \ am x = Some y; am' x = Some y'; + rel y z \ \ rel y' z" + shows "cmap_relation am cm sz rel \ cmap_relation am' cm sz rel" + unfolding cmap_relation_def + apply - + apply clarsimp + apply rule + apply (simp add: dm) + apply rule + apply (frule_tac P = "\s. x \ s" in ssubst [OF dm]) + apply (drule (1) bspec) + apply (erule domD [where m = am, THEN exE]) + apply (rule rl, assumption+) + apply (clarsimp simp add: dom_def) + apply simp + done + +lemma setCTE_tcb_case: + assumes om: "map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s) = + map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s')" + and rel: "cmap_relation (map_to_tcbs (ksPSpace s)) (clift (t_hrs_' (globals x))) tcb_ptr_to_ctcb_ptr ctcb_relation" + shows "cmap_relation (map_to_tcbs (ksPSpace s')) (clift (t_hrs_' (globals x))) tcb_ptr_to_ctcb_ptr ctcb_relation" + using om +proof (rule cor_map_relI [OF map_option_eq_dom_eq]) + fix x tcb tcb' z + assume y: "map_to_tcbs (ksPSpace s) x = Some tcb" + and y': "map_to_tcbs (ksPSpace s') x = Some tcb'" and rel: "ctcb_relation tcb z" + + hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om + apply - + apply (drule fun_cong [where x = x]) + apply simp + done + + thus "ctcb_relation tcb' z" using rel + unfolding tcb_no_ctes_proj_def ctcb_relation_def cfault_rel_def + by auto +qed fact+ + +lemma lifth_update: + "clift (t_hrs_' s) ptr = clift (t_hrs_' s') ptr + \ lifth ptr s = lifth ptr s'" + unfolding lifth_def + by simp + +lemma getCTE_exs_valid: + "cte_at' dest s \ \(=) s\ getCTE dest \\\r. (=) s\" + unfolding exs_valid_def getCTE_def cte_wp_at'_def + by clarsimp + +lemma cmap_domE1: + "\ f ` dom am = dom cm; am x = Some v; \v'. cm (f x) = Some v' \ P \ \ P" + apply (drule equalityD1) + apply (drule subsetD) + apply (erule imageI [OF domI]) + apply (clarsimp simp: dom_def) + done + +lemma cmap_domE2: + "\ f ` dom am = dom cm; cm x = Some v'; \x' v. \ x = f x'; am x' = Some v \ \ P \ \ P" + apply (drule equalityD2) + apply (drule subsetD) + apply (erule domI) + apply (clarsimp simp: dom_def) + done + +lemma cmap_relationE1: + "\ cmap_relation am cm f rel; am x = Some y; + \y'. \am x = Some y; rel y y'; cm (f x) = Some y'\ \ P \ \ P" + unfolding cmap_relation_def + apply clarsimp + apply (erule (1) cmap_domE1) + apply (drule (1) bspec [OF _ domI]) + apply clarsimp + done + +lemma cmap_relationE2: + "\ cmap_relation am cm f rel; cm x = Some y'; + \x' y. \x = f x'; rel y y'; am x' = Some y\ \ P \ \ P" + unfolding cmap_relation_def + apply clarsimp + apply (erule (1) cmap_domE2) + apply (drule (1) bspec [OF _ domI]) + apply clarsimp + done + +lemma cmap_relationI: + assumes doms: "f ` dom am = dom cm" + and rel: "\x v v'. \am x = Some v; cm (f x) = Some v' \ \ rel v v'" + shows "cmap_relation am cm f rel" + unfolding cmap_relation_def using doms +proof (rule conjI) + show "\x\dom am. rel (the (am x)) (the (cm (f x)))" + proof + fix x + assume "x \ dom am" + then obtain v where "am x = Some v" .. + moreover with doms obtain v' where "cm (f x) = Some v'" by (rule cmap_domE1) + ultimately show "rel (the (am x)) (the (cm (f x)))" + by (simp add: rel) + qed +qed + +lemma cmap_relation_relI: + assumes "cmap_relation am cm f rel" + and "am x = Some v" + and "cm (f x) = Some v'" + shows "rel v v'" + using assms + by (fastforce elim!: cmap_relationE1) + +lemma cspace_cte_relationE: + "\ cmap_relation am cm Ptr ccte_relation; am x = Some y; + \z k'. \cm (Ptr x) = Some k'; cte_lift k' = Some z; cte_to_H z = y; c_valid_cte k' \ \ P + \ \ P" + apply (erule (1) cmap_relationE1) + apply (clarsimp simp: ccte_relation_def map_option_Some_eq2) + done + +lemma cmdbnode_relationE: + "\cmdbnode_relation m v; m = mdb_node_to_H (mdb_node_lift v) \ P \ \ P" + unfolding cmdbnode_relation_def + apply (drule sym) + apply clarsimp + done + +(* Used when the rel changes as well *) +lemma cmap_relation_upd_relI: + fixes am :: "machine_word \ 'a" and cm :: "'b typ_heap" + assumes cr: "cmap_relation am cm f rel" + and cof: "am dest = Some v" + and cl: "cm (f dest) = Some v'" + and cc: "rel' nv nv'" + and rel: "\x ov ov'. \ x \ dest; am x = Some ov; cm (f x) = Some ov'; rel ov ov' \ \ rel' ov ov'" + and inj: "inj f" + shows "cmap_relation (am(dest \ nv)) (cm(f dest \ nv')) f rel'" + using assms + apply - + apply (rule cmap_relationE1, assumption+) + apply clarsimp + apply (rule cmap_relationI) + apply (simp add: cmap_relation_def) + apply (case_tac "x = dest") + apply simp + apply (simp add: inj_eq split: if_split_asm) + apply (erule (2) rel) + apply (erule (2) cmap_relation_relI) + done + +lemma cmap_relation_updI: + fixes am :: "machine_word \ 'a" and cm :: "'b typ_heap" + assumes cr: "cmap_relation am cm f rel" + and cof: "am dest = Some v" + and cl: "cm (f dest) = Some v'" + and cc: "rel nv nv'" + and inj: "inj f" + shows "cmap_relation (am(dest \ nv)) (cm(f dest \ nv')) f rel" + using cr cof cl cc + apply (rule cmap_relation_upd_relI) + apply simp + apply fact + done + +declare inj_Ptr[simp] + +(* Ugh *) +lemma cpspace_cte_relation_upd_capI: + assumes cr: "cmap_relation (map_to_ctes am) (clift cm) Ptr ccte_relation" + and cof: "map_to_ctes am dest = Some cte" + and cl: "clift cm (Ptr dest) = Some cte'" + and cc: "ccap_relation capl cap" + shows "cmap_relation ((map_to_ctes am)(dest \ (cteCap_update (\_. capl) cte))) + ((clift cm)(Ptr dest \ cte_C.cap_C_update (\_. cap) cte')) Ptr ccte_relation" + using cr cof cl cc + apply - + apply (frule (2) cmap_relation_relI) + apply (erule (2) cmap_relation_updI) + apply (clarsimp elim!: ccap_relationE simp: map_comp_Some_iff ccte_relation_def) + apply (subst (asm) map_option_Some_eq2) + apply clarsimp + apply (simp add: c_valid_cte_def cl_valid_cte_def) + apply simp +done + +lemma cte_to_H_mdb_node_update [simp]: + "cte_to_H (cteMDBNode_CL_update (\_. m) cte) = + cteMDBNode_update (\_. mdb_node_to_H m) (cte_to_H cte)" + unfolding cte_to_H_def + by simp + +lemma cspace_cte_relation_upd_mdbI: + assumes cr: "cmap_relation (map_to_ctes am) (clift cm) Ptr ccte_relation" + and cof: "map_to_ctes am dest = Some cte" + and cl: "clift cm (Ptr dest) = Some cte'" + and cc: "cmdbnode_relation mdbl m" + shows "cmap_relation ((map_to_ctes am)(dest \ cteMDBNode_update (\_. mdbl) cte)) + ((clift cm)(Ptr dest \ cte_C.cteMDBNode_C_update (\_. m) cte')) Ptr ccte_relation" + using cr cof cl cc + apply - + apply (frule (2) cmap_relation_relI) + apply (erule (2) cmap_relation_updI) + apply (clarsimp elim!: cmdbnode_relationE + simp: map_comp_Some_iff ccte_relation_def c_valid_cte_def cl_valid_cte_def map_option_Some_eq2) + apply simp +done + +lemma mdb_node_to_H_mdbPrev_update[simp]: + "mdb_node_to_H (mdbPrev_CL_update (\_. x) m) + = mdbPrev_update (\_. x) (mdb_node_to_H m)" + unfolding mdb_node_to_H_def by simp + +lemma mdb_node_to_H_mdbNext_update[simp]: + "mdb_node_to_H (mdbNext_CL_update (\_. x) m) + = mdbNext_update (\_. x) (mdb_node_to_H m)" + unfolding mdb_node_to_H_def by simp + +lemma mdb_node_to_H_mdbRevocable_update[simp]: + "mdb_node_to_H (mdbRevocable_CL_update (\_. x) m) + = mdbRevocable_update (\_. to_bool x) (mdb_node_to_H m)" + unfolding mdb_node_to_H_def by simp + +lemma mdb_node_to_H_mdbFirstBadged_update[simp]: + "mdb_node_to_H (mdbFirstBadged_CL_update (\_. x) m) + = mdbFirstBadged_update (\_. to_bool x) (mdb_node_to_H m)" + unfolding mdb_node_to_H_def by simp + +declare to_bool_from_bool [simp] + +lemma mdbNext_to_H [simp]: + "mdbNext (mdb_node_to_H n) = mdbNext_CL n" + unfolding mdb_node_to_H_def + by simp + +lemma mdbPrev_to_H [simp]: + "mdbPrev (mdb_node_to_H n) = mdbPrev_CL n" + unfolding mdb_node_to_H_def + by simp + +lemmas ctes_of_not_0 [simp] = valid_mdbD3' [of s, rotated] for s + +(* For getting rid of the generated guards -- will probably break with c_guard*) +lemma cte_bits_le_3 [simp]: "3 \ cte_level_bits" + by (simp add: objBits_defs cte_level_bits_def) + +lemma cte_bits_le_tcb_bits: "cte_level_bits \ tcbBlockSizeBits" + by (simp add: cte_level_bits_def objBits_defs) + +lemma ctes_of_aligned_bits: + assumes pa: "pspace_aligned' s" + and cof: "ctes_of s p = Some cte" + and bits: "bits \ cte_level_bits" + shows "is_aligned p bits" +proof - + from cof have "cte_wp_at' ((=) cte) p s" + by (simp add: cte_wp_at_ctes_of) + thus ?thesis + apply - + apply (rule is_aligned_weaken[OF _ bits]) + apply (erule cte_wp_atE') + apply assumption + apply (simp add: tcb_cte_cases_def field_simps cteSizeBits_def split: if_split_asm) + apply (fastforce elim: aligned_add_aligned[OF _ _ cte_bits_le_tcb_bits] + simp: is_aligned_def cte_level_bits_def)+ + apply (erule is_aligned_weaken[OF _ cte_bits_le_tcb_bits]) + done +qed + +lemma mdbNext_not_zero_eq: + "cmdbnode_relation n n' \ \s s'. (s, s') \ rf_sr \ \ja \ (is_aligned (mdbNext n) 3)\ + \ (mdbNext n \ 0) = (s' \ {_. mdbNext_CL (mdb_node_lift n') \ 0})" + by (fastforce elim: cmdbnode_relationE) + +lemma mdbPrev_not_zero_eq: + "cmdbnode_relation n n' \ \s s'. (s, s') \ rf_sr \ \ja\ (is_aligned (mdbPrev n) 3)\ + \ (mdbPrev n \ 0) = (s' \ {_. mdbPrev_CL (mdb_node_lift n') \ 0})" + by (fastforce elim: cmdbnode_relationE) + +abbreviation + "nullCapPointers cte \ cteCap cte = NullCap \ mdbNext (cteMDBNode cte) = nullPointer \ mdbPrev (cteMDBNode cte) = nullPointer" + +lemma nullCapPointers_def: + "is_an_abbreviation" unfolding is_an_abbreviation_def by simp + +lemma valid_mdb_ctes_of_next: + "\ valid_mdb' s; ctes_of s p = Some cte; mdbNext (cteMDBNode cte) \ 0 \ \ cte_at' (mdbNext (cteMDBNode cte)) s" + unfolding valid_mdb'_def valid_mdb_ctes_def + apply (erule conjE) + apply (erule (2) valid_dlistE) + apply (simp add: cte_wp_at_ctes_of) + done + +lemma valid_mdb_ctes_of_prev: + "\ valid_mdb' s; ctes_of s p = Some cte; mdbPrev (cteMDBNode cte) \ 0 \ \ cte_at' (mdbPrev (cteMDBNode cte)) s" + unfolding valid_mdb'_def valid_mdb_ctes_def + apply (erule conjE) + apply (erule (2) valid_dlistE) + apply (simp add: cte_wp_at_ctes_of) + done + +end + +context kernel +begin + +lemma cmap_relation_tcb [intro]: + "(s, s') \ rf_sr \ cpspace_tcb_relation (ksPSpace s) (t_hrs_' (globals s'))" + unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def + by (simp add: Let_def) + +lemma cmap_relation_ep [intro]: + "(s, s') \ rf_sr \ cpspace_ep_relation (ksPSpace s) (t_hrs_' (globals s'))" + unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def + by (simp add: Let_def) + +lemma cmap_relation_ntfn [intro]: + "(s, s') \ rf_sr \ cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals s'))" + unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def + by (simp add: Let_def) + +lemma cmap_relation_cte [intro]: + "(s, s') \ rf_sr \ cpspace_cte_relation (ksPSpace s) (t_hrs_' (globals s'))" + unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def + by (simp add: Let_def) + +lemma rf_sr_cpspace_asidpool_relation: + "(s, s') \ rf_sr + \ cpspace_asidpool_relation (ksPSpace s) (t_hrs_' (globals s'))" + by (clarsimp simp: rf_sr_def cstate_relation_def + cpspace_relation_def Let_def) + +lemma rf_sr_cpte_relation: + "(s, s') \ rf_sr \ cmap_relation (map_to_ptes (ksPSpace s)) + (cslift s') pte_Ptr cpte_relation" + by (clarsimp simp: rf_sr_def cstate_relation_def + Let_def cpspace_relation_def) + +lemma cmap_relation_vcpu[intro]: + "(s, s') \ rf_sr \ cpspace_vcpu_relation (ksPSpace s) (t_hrs_' (globals s'))" + unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def + by (simp add: Let_def) + +lemma rf_sr_cte_relation: + "\ (s, s') \ rf_sr; ctes_of s src = Some cte; cslift s' (Ptr src) = Some cte' \ \ ccte_relation cte cte'" + apply (drule cmap_relation_cte) + apply (erule (2) cmap_relation_relI) + done + +lemma ccte_relation_ccap_relation: + "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" + unfolding ccte_relation_def ccap_relation_def c_valid_cte_def cl_valid_cte_def c_valid_cap_def cl_valid_cap_def + by (clarsimp simp: map_option_Some_eq2 if_bool_eq_conj) + +lemma ccte_relation_cmdbnode_relation: + "ccte_relation cte cte' \ cmdbnode_relation (cteMDBNode cte) (cte_C.cteMDBNode_C cte')" + unfolding ccte_relation_def ccap_relation_def + by (clarsimp simp: map_option_Some_eq2) + +lemma rf_sr_ctes_of_clift: + assumes sr: "(s, s') \ rf_sr" + and cof: "ctes_of s p = Some cte" + shows "\cte'. cslift s' (Ptr p) = Some cte' \ cte_lift cte' \ None \ cte = cte_to_H (the (cte_lift cte')) + \ c_valid_cte cte'" +proof - + from sr have "cpspace_cte_relation (ksPSpace s) (t_hrs_' (globals s'))" .. + thus ?thesis using cof + apply (rule cspace_cte_relationE) + apply clarsimp + done +qed + +lemma c_valid_cte_eq: + "c_valid_cte c = case_option True cl_valid_cte (cte_lift c)" + apply (clarsimp simp: c_valid_cte_def cl_valid_cte_def c_valid_cap_def split: option.splits) + apply (unfold cte_lift_def) + apply simp +done + +lemma rf_sr_ctes_of_cliftE: + assumes cof: "ctes_of s p = Some cte" + assumes sr: "(s, s') \ rf_sr" + and rl: "\cte' ctel'. \ctes_of s p = Some (cte_to_H ctel'); + cslift s' (Ptr p) = Some cte'; + cte_lift cte' = Some ctel'; + cte = cte_to_H ctel' ; + cl_valid_cte ctel'\ \ R" + shows "R" + using sr cof + apply - + apply (frule (1) rf_sr_ctes_of_clift) + apply (elim conjE exE) + apply (rule rl) + apply simp + apply assumption + apply clarsimp + apply clarsimp + apply (clarsimp simp: c_valid_cte_eq) + done + +lemma cstate_relation_only_t_hrs: + "\ t_hrs_' s = t_hrs_' t; + ksReadyQueues_' s = ksReadyQueues_' t; + ksReadyQueuesL1Bitmap_' s = ksReadyQueuesL1Bitmap_' t; + ksReadyQueuesL2Bitmap_' s = ksReadyQueuesL2Bitmap_' t; + ksSchedulerAction_' s = ksSchedulerAction_' t; + ksCurThread_' s = ksCurThread_' t; + ksIdleThread_' s = ksIdleThread_' t; + ksWorkUnitsCompleted_' s = ksWorkUnitsCompleted_' t; + intStateIRQTable_' s = intStateIRQTable_' t; + armKSASIDTable_' s = armKSASIDTable_' t; + armKSNextASID_' s = armKSNextASID_' t; + armKSHWASIDTable_' s = armKSHWASIDTable_' t; + phantom_machine_state_' s = phantom_machine_state_' t; + ghost'state_' s = ghost'state_' t; + ksDomScheduleIdx_' s = ksDomScheduleIdx_' t; + ksCurDomain_' s = ksCurDomain_' t; + ksDomainTime_' s = ksDomainTime_' t; + gic_vcpu_num_list_regs_' s = gic_vcpu_num_list_regs_' t; + armHSCurVCPU_' s = armHSCurVCPU_' t; + armHSVCPUActive_' s = armHSVCPUActive_' t + \ + \ cstate_relation a s = cstate_relation a t" + unfolding cstate_relation_def + by (clarsimp simp: Let_def carch_state_relation_def cmachine_state_relation_def) + +lemma rf_sr_upd: + assumes + "(t_hrs_' (globals x)) = (t_hrs_' (globals y))" + "(ksReadyQueues_' (globals x)) = (ksReadyQueues_' (globals y))" + "(ksReadyQueuesL1Bitmap_' (globals x)) = (ksReadyQueuesL1Bitmap_' (globals y))" + "(ksReadyQueuesL2Bitmap_' (globals x)) = (ksReadyQueuesL2Bitmap_' (globals y))" + "(ksSchedulerAction_' (globals x)) = (ksSchedulerAction_' (globals y))" + "(ksCurThread_' (globals x)) = (ksCurThread_' (globals y))" + "(ksIdleThread_' (globals x)) = (ksIdleThread_' (globals y))" + "(ksWorkUnitsCompleted_' (globals x)) = (ksWorkUnitsCompleted_' (globals y))" + "intStateIRQTable_'(globals x) = intStateIRQTable_' (globals y)" + "armKSASIDTable_' (globals x) = armKSASIDTable_' (globals y)" + "armKSNextASID_' (globals x) = armKSNextASID_' (globals y)" + "armKSHWASIDTable_' (globals x) = armKSHWASIDTable_' (globals y)" + "phantom_machine_state_' (globals x) = phantom_machine_state_' (globals y)" + "ghost'state_' (globals x) = ghost'state_' (globals y)" + "ksDomScheduleIdx_' (globals x) = ksDomScheduleIdx_' (globals y)" + "ksCurDomain_' (globals x) = ksCurDomain_' (globals y)" + "ksDomainTime_' (globals x) = ksDomainTime_' (globals y)" + "gic_vcpu_num_list_regs_' (globals x) = gic_vcpu_num_list_regs_' (globals y)" + "armHSCurVCPU_' (globals x) = armHSCurVCPU_' (globals y)" + "armHSVCPUActive_' (globals x) = armHSVCPUActive_' (globals y)" + shows "((a, x) \ rf_sr) = ((a, y) \ rf_sr)" + unfolding rf_sr_def using assms + by simp (rule cstate_relation_only_t_hrs, auto) + +lemma rf_sr_upd_safe[simp]: + assumes rl: "(t_hrs_' (globals (g y))) = (t_hrs_' (globals y))" + and rq: "(ksReadyQueues_' (globals (g y))) = (ksReadyQueues_' (globals y))" + and rqL1: "(ksReadyQueuesL1Bitmap_' (globals (g y))) = (ksReadyQueuesL1Bitmap_' (globals y))" + and rqL2: "(ksReadyQueuesL2Bitmap_' (globals (g y))) = (ksReadyQueuesL2Bitmap_' (globals y))" + and sa: "(ksSchedulerAction_' (globals (g y))) = (ksSchedulerAction_' (globals y))" + and ct: "(ksCurThread_' (globals (g y))) = (ksCurThread_' (globals y))" + and it: "(ksIdleThread_' (globals (g y))) = (ksIdleThread_' (globals y))" + and ist: "intStateIRQTable_'(globals (g y)) = intStateIRQTable_' (globals y)" + and dsi: "ksDomScheduleIdx_' (globals (g y)) = ksDomScheduleIdx_' (globals y)" + and cdom: "ksCurDomain_' (globals (g y)) = ksCurDomain_' (globals y)" + and dt: "ksDomainTime_' (globals (g y)) = ksDomainTime_' (globals y)" + and arch: + "armKSASIDTable_' (globals (g y)) = armKSASIDTable_' (globals y)" + "armKSNextASID_' (globals (g y)) = armKSNextASID_' (globals y)" + "armKSHWASIDTable_' (globals (g y)) = armKSHWASIDTable_' (globals y)" + "phantom_machine_state_' (globals (g y)) = phantom_machine_state_' (globals y)" + "gic_vcpu_num_list_regs_' (globals (g y)) = gic_vcpu_num_list_regs_' (globals y)" + "armHSCurVCPU_' (globals (g y)) = armHSCurVCPU_' (globals y)" + "armHSVCPUActive_' (globals (g y)) = armHSVCPUActive_' (globals y)" + "phantom_machine_state_' (globals (g y)) = phantom_machine_state_' (globals y)" + and gs: "ghost'state_' (globals (g y)) = ghost'state_' (globals y)" + and wu: "(ksWorkUnitsCompleted_' (globals (g y))) = (ksWorkUnitsCompleted_' (globals y))" + shows "((a, (g y)) \ rf_sr) = ((a, y) \ rf_sr)" + using rl rq rqL1 rqL2 sa ct it ist arch wu gs dsi cdom dt by - (rule rf_sr_upd) + +(* More of a well-formed lemma, but \ *) +lemma valid_mdb_cslift_next: + assumes vmdb: "valid_mdb' s" + and sr: "(s, s') \ rf_sr" + and cof: "ctes_of s p = Some cte" + and nz: "mdbNext (cteMDBNode cte) \ 0" + shows "cslift s' (Ptr (mdbNext (cteMDBNode cte)) :: cte_C ptr) \ None" +proof - + from vmdb cof nz obtain cten where + "ctes_of s (mdbNext (cteMDBNode cte)) = Some cten" + by (auto simp: cte_wp_at_ctes_of dest!: valid_mdb_ctes_of_next) + + with sr show ?thesis + apply - + apply (drule (1) rf_sr_ctes_of_clift) + apply clarsimp + done +qed + +lemma valid_mdb_cslift_prev: + assumes vmdb: "valid_mdb' s" + and sr: "(s, s') \ rf_sr" + and cof: "ctes_of s p = Some cte" + and nz: "mdbPrev (cteMDBNode cte) \ 0" + shows "cslift s' (Ptr (mdbPrev (cteMDBNode cte)) :: cte_C ptr) \ None" +proof - + from vmdb cof nz obtain cten where + "ctes_of s (mdbPrev (cteMDBNode cte)) = Some cten" + by (auto simp: cte_wp_at_ctes_of dest!: valid_mdb_ctes_of_prev) + + with sr show ?thesis + apply - + apply (drule (1) rf_sr_ctes_of_clift) + apply clarsimp + done +qed + +lemma rf_sr_cte_at_valid: + "\ cte_wp_at' P (ptr_val p) s; (s,s') \ rf_sr \ \ s' \\<^sub>c (p :: cte_C ptr)" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) rf_sr_ctes_of_clift) + apply (clarsimp simp add: typ_heap_simps) + done + +lemma rf_sr_cte_at_validD: + "\ cte_wp_at' P p s; (s,s') \ rf_sr \ \ s' \\<^sub>c (Ptr p :: cte_C ptr)" + by (simp add: rf_sr_cte_at_valid) + +(* MOVE *) +lemma ccap_relation_NullCap_iff: + "(ccap_relation NullCap cap') = (cap_get_tag cap' = scast cap_null_cap)" + unfolding ccap_relation_def + by (clarsimp simp: map_option_Some_eq2 c_valid_cap_def cl_valid_cap_def + cap_to_H_def cap_lift_def Let_def cap_tag_defs + split: if_split) + +(* MOVE *) +lemma ko_at_valid_ntfn': + "\ ko_at' ntfn p s; valid_objs' s \ \ valid_ntfn' ntfn s" + apply (erule obj_atE') + apply (erule (1) valid_objsE') + apply (simp add: projectKOs valid_obj'_def) + done + +(* MOVE *) +lemma ntfn_blocked_in_queueD: + "\ st_tcb_at' ((=) (Structures_H.thread_state.BlockedOnNotification ntfn)) thread \; ko_at' ntfn' ntfn \; invs' \ \ + \ thread \ set (ntfnQueue (ntfnObj ntfn')) \ isWaitingNtfn (ntfnObj ntfn')" + apply (drule sym_refs_st_tcb_atD') + apply clarsimp + apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs + refs_of_rev'[where ko = "KONotification ntfn'", simplified]) + apply (cases "ntfnObj ntfn'") + apply (simp_all add: isWaitingNtfn_def) + done + +(* MOVE *) +lemma valid_ntfn_isWaitingNtfnD: + "\ valid_ntfn' ntfn s; isWaitingNtfn (ntfnObj ntfn) \ + \ (ntfnQueue (ntfnObj ntfn)) \ [] \ (\t\set (ntfnQueue (ntfnObj ntfn)). tcb_at' t s) + \ distinct (ntfnQueue (ntfnObj ntfn))" + unfolding valid_ntfn'_def isWaitingNtfn_def + by (clarsimp split: Structures_H.notification.splits ntfn.splits) + +lemma cmap_relation_ko_atD: + fixes ko :: "'a :: pspace_storable" and mp :: "word32 \ 'a" + assumes ps: "cmap_relation (projectKO_opt \\<^sub>m (ksPSpace s)) (cslift s') f rel" + and ko: "ko_at' ko ptr s" + shows "\ko'. cslift s' (f ptr) = Some ko' \ rel ko ko'" + using ps ko unfolding cmap_relation_def + apply clarsimp + apply (drule bspec [where x = "ptr"]) + apply (clarsimp simp: obj_at'_def projectKOs) + apply (clarsimp simp: obj_at'_def projectKOs) + apply (drule equalityD1) + apply (drule subsetD [where c = "f ptr"]) + apply (rule imageI) + apply clarsimp + apply clarsimp + done + +lemma cmap_relation_ko_atE: + fixes ko :: "'a :: pspace_storable" and mp :: "word32 \ 'a" + assumes ps: "cmap_relation (projectKO_opt \\<^sub>m (ksPSpace s)) (cslift s') f rel" + and ko: "ko_at' ko ptr s" + and rl: "\ko'. \cslift s' (f ptr) = Some ko'; rel ko ko'\ \ P" + shows P + using ps ko + apply - + apply (drule (1) cmap_relation_ko_atD) + apply (clarsimp) + apply (erule (1) rl) + done + +lemma ntfn_to_ep_queue: + assumes ko: "ko_at' ntfn' ntfn s" + and waiting: "isWaitingNtfn (ntfnObj ntfn')" + and rf: "(s, s') \ rf_sr" + shows "ep_queue_relation' (cslift s') (ntfnQueue (ntfnObj ntfn')) + (Ptr (ntfnQueue_head_CL + (notification_lift (the (cslift s' (Ptr ntfn)))))) + (Ptr (ntfnQueue_tail_CL + (notification_lift (the (cslift s' (Ptr ntfn))))))" +proof - + from rf have + "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift s') Ptr (cnotification_relation (cslift s'))" + by (rule cmap_relation_ntfn) + + thus ?thesis using ko waiting + apply - + apply (erule (1) cmap_relation_ko_atE) + apply (clarsimp simp: cnotification_relation_def Let_def isWaitingNtfn_def + split: Structures_H.notification.splits ntfn.splits) + done +qed + +lemma map_to_tcbs_from_tcb_at: + "tcb_at' thread s \ map_to_tcbs (ksPSpace s) thread \ None" + unfolding obj_at'_def + by (clarsimp simp: projectKOs) + +lemma tcb_at_h_t_valid: + "\ tcb_at' thread s; (s, s') \ rf_sr \ \ s' \\<^sub>c tcb_ptr_to_ctcb_ptr thread" + apply (drule cmap_relation_tcb) + apply (drule map_to_tcbs_from_tcb_at) + apply (clarsimp simp add: cmap_relation_def) + apply (drule (1) bspec [OF _ domI]) + apply (clarsimp simp add: dom_def tcb_ptr_to_ctcb_ptr_def image_def) + apply (drule equalityD1) + apply (drule subsetD) + apply simp + apply (rule exI [where x = thread]) + apply simp + apply (clarsimp simp: typ_heap_simps) + done + +lemma st_tcb_at_h_t_valid: + "\ st_tcb_at' P thread s; (s, s') \ rf_sr \ \ s' \\<^sub>c tcb_ptr_to_ctcb_ptr thread" + apply (drule pred_tcb_at') + apply (erule (1) tcb_at_h_t_valid) + done + +(* MOVE *) +lemma exs_getObject: + assumes x: "\q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + and objat: "obj_at' (P :: ('a::pspace_storable \ bool)) p s" + shows "\(=) s\ getObject p \\\r :: ('a :: pspace_storable). (=) s\" + using objat unfolding exs_valid_def obj_at'_def + apply clarsimp + apply (rule_tac x = "(the (projectKO_opt ko), s)" in bexI) + apply (clarsimp simp: split_def) + apply (simp add: projectKO_def fail_def split: option.splits) + apply (clarsimp simp: loadObject_default_def getObject_def in_monad return_def lookupAround2_char1 + split_def x P lookupAround2_char1 projectKOs + objBits_def[symmetric] in_magnitude_check project_inject) + done + + +lemma setObject_eq: + fixes ko :: "('a :: pspace_storable)" + assumes x: "\(val :: 'a) old ptr ptr' next. updateObject val old ptr ptr' next = + (updateObject_default val old ptr ptr' next :: kernel_object kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + and ob: "\(v :: 'a) (v' :: 'a). objBits v = objBits v'" + and objat: "obj_at' (P :: ('a::pspace_storable \ bool)) p s" + shows "((), s\ ksPSpace := (ksPSpace s)(p \ injectKO ko)\) \ fst (setObject p ko s)" + using objat unfolding setObject_def obj_at'_def + apply (clarsimp simp: updateObject_default_def in_monad return_def lookupAround2_char1 + split_def x P lookupAround2_char1 projectKOs + objBits_def[symmetric] in_magnitude_check project_inject) + apply (frule ssubst [OF ob, where P = "is_aligned p" and v1 = ko]) + apply (simp add: P in_magnitude_check) + apply (rule conjI) + apply (rule_tac x = obj in exI) + apply simp + apply (erule ssubst [OF ob]) + done + +lemma getObject_eq: + fixes ko :: "'a :: pspace_storable" + assumes x: "\q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: 'a kernel)" + assumes P: "\(v::'a). (1 :: machine_word) < 2 ^ (objBits v)" + and objat: "ko_at' ko p s" + shows "(ko, s) \ fst (getObject p s)" + using objat unfolding exs_valid_def obj_at'_def + apply clarsimp + apply (clarsimp simp: loadObject_default_def getObject_def in_monad return_def lookupAround2_char1 + split_def x P lookupAround2_char2 projectKOs + objBits_def[symmetric] in_magnitude_check project_inject) + done + +lemma threadSet_eq: + "ko_at' tcb thread s \ + ((), s\ ksPSpace := (ksPSpace s)(thread \ injectKO (f tcb))\) \ fst (threadSet f thread s)" + unfolding threadSet_def + apply (clarsimp simp add: in_monad) + apply (rule exI) + apply (rule exI) + apply (rule conjI) + apply (rule getObject_eq) + apply simp + apply (simp add: objBits_simps') + apply assumption + apply (drule setObject_eq [rotated -1]) + apply simp + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply simp + done + +definition + "tcb_null_ep_ptrs a \ a \ tcbEPNext_C := NULL, tcbEPPrev_C := NULL \" + +definition + "tcb_null_sched_ptrs a \ a \ tcbSchedNext_C := NULL, tcbSchedPrev_C := NULL \" + +definition + "tcb_null_queue_ptrs a \ a \ tcbSchedNext_C := NULL, tcbSchedPrev_C := NULL, tcbEPNext_C := NULL, tcbEPPrev_C := NULL\" + +lemma null_sched_queue: + "map_option tcb_null_sched_ptrs \ mp = map_option tcb_null_sched_ptrs \ mp' + \ map_option tcb_null_queue_ptrs \ mp = map_option tcb_null_queue_ptrs \ mp'" + apply (rule ext) + apply (erule_tac x = x in map_option_comp_eqE) + apply simp + apply (clarsimp simp: tcb_null_queue_ptrs_def tcb_null_sched_ptrs_def) + done + +lemma null_ep_queue: + "map_option tcb_null_ep_ptrs \ mp = map_option tcb_null_ep_ptrs \ mp' + \ map_option tcb_null_queue_ptrs \ mp = map_option tcb_null_queue_ptrs \ mp'" + apply (rule ext) + apply (erule_tac x = x in map_option_comp_eqE) + apply simp + apply (case_tac v, case_tac v') + apply (clarsimp simp: tcb_null_queue_ptrs_def tcb_null_ep_ptrs_def) + done + +lemma null_sched_epD: + assumes om: "map_option tcb_null_sched_ptrs \ mp = map_option tcb_null_sched_ptrs \ mp'" + shows "map_option tcbEPNext_C \ mp = map_option tcbEPNext_C \ mp' \ + map_option tcbEPPrev_C \ mp = map_option tcbEPPrev_C \ mp'" + using om + apply - + apply (rule conjI) + apply (rule ext) + apply (erule_tac x = x in map_option_comp_eqE ) + apply simp + apply (case_tac v, case_tac v') + apply (clarsimp simp: tcb_null_sched_ptrs_def) + apply (rule ext) + apply (erule_tac x = x in map_option_comp_eqE ) + apply simp + apply (case_tac v, case_tac v') + apply (clarsimp simp: tcb_null_sched_ptrs_def) + done + +lemma null_ep_schedD: + assumes om: "map_option tcb_null_ep_ptrs \ mp = map_option tcb_null_ep_ptrs \ mp'" + shows "map_option tcbSchedNext_C \ mp = map_option tcbSchedNext_C \ mp' \ + map_option tcbSchedPrev_C \ mp = map_option tcbSchedPrev_C \ mp'" + using om + apply - + apply (rule conjI) + apply (rule ext) + apply (erule_tac x = x in map_option_comp_eqE ) + apply simp + apply (case_tac v, case_tac v') + apply (clarsimp simp: tcb_null_ep_ptrs_def) + apply (rule ext) + apply (erule_tac x = x in map_option_comp_eqE ) + apply simp + apply (case_tac v, case_tac v') + apply (clarsimp simp: tcb_null_ep_ptrs_def) + done + +lemma cmap_relation_cong: + assumes adom: "dom am = dom am'" + and cdom: "dom cm = dom cm'" + and rel: "\p a a' b b'. + \ am p = Some a; am' p = Some a'; cm (f p) = Some b; cm' (f p) = Some b' \ \ rel a b = rel' a' b'" + shows "cmap_relation am cm f rel = cmap_relation am' cm' f rel'" + unfolding cmap_relation_def + apply (clarsimp simp: adom cdom) + apply (rule iffI) + apply simp + apply (erule conjE) + apply (drule equalityD1) + apply (rule ballI) + apply (drule (1) bspec) + apply (erule iffD1 [OF rel, rotated -1]) + apply (rule Some_the, erule ssubst [OF adom]) + apply (erule Some_the) + apply (rule Some_the [where f = cm]) + apply (drule subsetD) + apply (erule imageI) + apply (simp add: cdom) + apply (rule Some_the [where f = cm']) + apply (erule subsetD) + apply (erule imageI) + \ \clag\ + apply simp + apply (erule conjE) + apply (drule equalityD1) + apply (rule ballI) + apply (drule (1) bspec) + apply (erule iffD2 [OF rel, rotated -1]) + apply (rule Some_the, erule ssubst [OF adom]) + apply (erule Some_the) + apply (rule Some_the [where f = cm]) + apply (drule subsetD) + apply (erule imageI) + apply (simp add: cdom) + apply (rule Some_the [where f = cm']) + apply (erule subsetD) + apply (erule imageI) + done + +lemma ctcb_relation_null_ep_ptrs: + assumes rel: "cmap_relation mp mp' tcb_ptr_to_ctcb_ptr ctcb_relation" + and same: "map_option tcb_null_ep_ptrs \ mp'' = map_option tcb_null_ep_ptrs \ mp'" + shows "cmap_relation mp mp'' tcb_ptr_to_ctcb_ptr ctcb_relation" + using rel + apply (rule iffD1 [OF cmap_relation_cong, OF _ map_option_eq_dom_eq, rotated -1]) + apply simp + apply (rule same [symmetric]) + apply (drule compD [OF same]) + apply (case_tac b, case_tac b') + apply (simp add: ctcb_relation_def tcb_null_ep_ptrs_def) + done + +lemma map_to_ctes_upd_tcb_no_ctes: + "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + apply (erule obj_atE') + apply (simp add: projectKOs objBits_simps) + apply (subst map_to_ctes_upd_tcb') + apply assumption+ + apply (rule ext) + apply (clarsimp split: if_split) + apply (drule (1) bspec [OF _ ranI]) + apply simp + done + +lemma update_ntfn_map_tos: + fixes P :: "Structures_H.notification \ bool" + assumes at: "obj_at' P p s" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KONotification ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + using at + by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI + simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ + +lemma update_ep_map_tos: + fixes P :: "endpoint \ bool" + assumes at: "obj_at' P p s" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + using at + by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI + simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ + +lemma update_tcb_map_tos: + fixes P :: "tcb \ bool" + assumes at: "obj_at' P p s" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOTCB ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + using at + by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI + simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ + +lemma update_asidpool_map_tos: + fixes P :: "asidpool \ bool" + assumes at: "obj_at' P p s" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + using at + by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI + simp: projectKOs projectKO_opts_defs + split: if_split if_split_asm Structures_H.kernel_object.split_asm + arch_kernel_object.split_asm) + +lemma update_asidpool_map_to_asidpools: + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) + = (map_to_asidpools (ksPSpace s))(p \ ap)" + by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) + +lemma update_pte_map_to_ptes: + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) + = (map_to_ptes (ksPSpace s))(p \ pte)" + by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) + +lemma update_pte_map_tos: + fixes P :: "pte \ bool" + assumes at: "obj_at' P p s" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + using at + by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other + split: if_split_asm if_split + simp: projectKOs, + auto simp: projectKO_opts_defs) + +lemma update_vcpu_map_tos: + fixes P :: "vcpu \ bool" + assumes at: "obj_at' P p s" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data_device (ksPSpace s)" + using at + by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other + split: if_split_asm if_split + simp: projectKOs) + +lemma heap_to_page_data_cong [cong]: + "\ map_to_user_data ks = map_to_user_data ks'; bhp = bhp' \ + \ heap_to_user_data ks bhp = heap_to_user_data ks' bhp'" + unfolding heap_to_user_data_def by simp + +lemma heap_to_device_data_cong [cong]: + "\ map_to_user_data_device ks = map_to_user_data_device ks'; bhp = bhp' \ + \ heap_to_device_data ks bhp = heap_to_device_data ks' bhp'" + unfolding heap_to_device_data_def by simp + +lemma map_leD: + "\ map_le m m'; m x = Some y \ \ m' x = Some y" + by (simp add: map_le_def dom_def) + +lemma region_is_bytes_disjoint: + assumes cleared: "region_is_bytes' p n (hrs_htd hrs)" + and not_byte: "typ_uinfo_t TYPE('a :: wf_type) \ typ_uinfo_t TYPE(word8)" + shows "hrs_htd hrs \\<^sub>t (p' :: 'a ptr) + \ {p ..+ n} \ {ptr_val p' ..+ size_of TYPE('a)} = {}" + apply (clarsimp simp: h_t_valid_def valid_footprint_def Let_def) + apply (clarsimp simp: set_eq_iff dest!: intvlD[where p="ptr_val p'"]) + apply (drule_tac x="of_nat k" in spec, clarsimp simp: size_of_def) + apply (cut_tac m=k in typ_slice_t_self[where td="typ_uinfo_t TYPE('a)"]) + apply (clarsimp simp: in_set_conv_nth) + apply (drule_tac x=i in map_leD, simp) + apply (simp add: cleared[unfolded region_is_bytes'_def] not_byte size_of_def) + done + +lemma region_actually_is_bytes: + "region_actually_is_bytes' ptr len htd + \ region_is_bytes' ptr len htd" + by (simp add: region_is_bytes'_def region_actually_is_bytes'_def + split: if_split) + +lemma zero_ranges_are_zero_update[simp]: + "h_t_valid (hrs_htd hrs) c_guard (ptr :: 'a ptr) + \ typ_uinfo_t TYPE('a :: wf_type) \ typ_uinfo_t TYPE(word8) + \ zero_ranges_are_zero rs (hrs_mem_update (heap_update ptr v) hrs) + = zero_ranges_are_zero rs hrs" + apply (clarsimp simp: zero_ranges_are_zero_def hrs_mem_update + intro!: ball_cong[OF refl] conj_cong[OF refl]) + apply (drule region_actually_is_bytes) + apply (drule(2) region_is_bytes_disjoint) + apply (simp add: heap_update_def heap_list_update_disjoint_same Int_commute) + done + +lemma inj_tcb_ptr_to_ctcb_ptr [simp]: + "inj tcb_ptr_to_ctcb_ptr" + apply (rule injI) + apply (simp add: tcb_ptr_to_ctcb_ptr_def) + done + +lemmas tcb_ptr_to_ctcb_ptr_eq [simp] = inj_eq [OF inj_tcb_ptr_to_ctcb_ptr] + +lemma obj_at_cslift_tcb: + fixes P :: "tcb \ bool" + shows "\obj_at' P thread s; (s, s') \ rf_sr\ \ + \ko ko'. ko_at' ko thread s \ P ko \ + cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some ko' \ + ctcb_relation ko ko'" + apply (frule obj_at_ko_at') + apply clarsimp + apply (frule cmap_relation_tcb) + apply (drule (1) cmap_relation_ko_atD) + apply fastforce + done + +fun + thread_state_to_tsType :: "thread_state \ machine_word" +where + "thread_state_to_tsType (Structures_H.Running) = scast ThreadState_Running" + | "thread_state_to_tsType (Structures_H.Restart) = scast ThreadState_Restart" + | "thread_state_to_tsType (Structures_H.Inactive) = scast ThreadState_Inactive" + | "thread_state_to_tsType (Structures_H.IdleThreadState) = scast ThreadState_IdleThreadState" + | "thread_state_to_tsType (Structures_H.BlockedOnReply) = scast ThreadState_BlockedOnReply" + | "thread_state_to_tsType (Structures_H.BlockedOnReceive oref cg) = scast ThreadState_BlockedOnReceive" + | "thread_state_to_tsType (Structures_H.BlockedOnSend oref badge cg cgr isc) = scast ThreadState_BlockedOnSend" + | "thread_state_to_tsType (Structures_H.BlockedOnNotification oref) = scast ThreadState_BlockedOnNotification" + +lemma ctcb_relation_thread_state_to_tsType: + "ctcb_relation tcb ctcb \ tsType_CL (thread_state_lift (tcbState_C ctcb)) = thread_state_to_tsType (tcbState tcb)" + unfolding ctcb_relation_def cthread_state_relation_def + by (cases "(tcbState tcb)", simp_all) + + +lemma tcb_ptr_to_tcb_ptr [simp]: + "tcb_ptr_to_ctcb_ptr (ctcb_ptr_to_tcb_ptr x) = x" + unfolding tcb_ptr_to_ctcb_ptr_def ctcb_ptr_to_tcb_ptr_def + by simp + +lemma ctcb_ptr_to_ctcb_ptr [simp]: + "ctcb_ptr_to_tcb_ptr (tcb_ptr_to_ctcb_ptr x) = x" + unfolding ctcb_ptr_to_tcb_ptr_def tcb_ptr_to_ctcb_ptr_def + by simp + +declare ucast_id [simp] + +definition + cap_rights_from_word_canon :: "machine_word \ seL4_CapRights_CL" + where + "cap_rights_from_word_canon wd \ + \ capAllowGrantReply_CL = from_bool (wd !! 3), + capAllowGrant_CL = from_bool (wd !! 2), + capAllowRead_CL = from_bool (wd !! 1), + capAllowWrite_CL = from_bool (wd !! 0)\" + +definition + cap_rights_from_word :: "machine_word \ seL4_CapRights_CL" + where + "cap_rights_from_word wd \ SOME cr. + to_bool (capAllowGrantReply_CL cr) = wd !! 3 \ + to_bool (capAllowGrant_CL cr) = wd !! 2 \ + to_bool (capAllowRead_CL cr) = wd !! 1 \ + to_bool (capAllowWrite_CL cr) = wd !! 0" + +lemma cap_rights_to_H_from_word [simp]: + "cap_rights_to_H (cap_rights_from_word wd) = rightsFromWord wd" + unfolding cap_rights_from_word_def rightsFromWord_def + apply (rule someI2_ex) + apply (rule exI [where x = "cap_rights_from_word_canon wd"]) + apply (simp add: cap_rights_from_word_canon_def) + apply (simp add: cap_rights_to_H_def) + done + +lemma cmap_relation_updI2: + fixes am :: "machine_word \ 'a" and cm :: "'b typ_heap" + assumes cr: "cmap_relation am cm f rel" + and cof: "am dest = None" + and cc: "rel nv nv'" + and inj: "inj f" + shows "cmap_relation (am(dest \ nv)) (cm(f dest \ nv')) f rel" + using cr cof cc inj + by (clarsimp simp add: cmap_relation_def inj_eq split: if_split) + +lemma rf_sr_heap_user_data_relation: + "(s, s') \ rf_sr \ cmap_relation + (heap_to_user_data (ksPSpace s) (underlying_memory (ksMachineState s))) + (cslift s') Ptr cuser_user_data_relation" + by (clarsimp simp: user_word_at_def rf_sr_def + cstate_relation_def Let_def + cpspace_relation_def) + +lemma rf_sr_heap_device_data_relation: + "(s, s') \ rf_sr \ cmap_relation + (heap_to_device_data (ksPSpace s) (underlying_memory (ksMachineState s))) + (cslift s') Ptr cuser_user_data_device_relation" + by (clarsimp simp: user_word_at_def rf_sr_def + cstate_relation_def Let_def + cpspace_relation_def) + +lemma user_word_at_cross_over: + "\ user_word_at x p s; (s, s') \ rf_sr; p' = Ptr p \ + \ c_guard p' \ hrs_htd (t_hrs_' (globals s')) \\<^sub>t p' + \ h_val (hrs_mem (t_hrs_' (globals s'))) p' = x" + apply (drule rf_sr_heap_user_data_relation) + apply (erule cmap_relationE1) + apply (clarsimp simp: heap_to_user_data_def Let_def + user_word_at_def pointerInUserData_def + typ_at_to_obj_at'[where 'a=user_data, simplified]) + apply (drule obj_at_ko_at', clarsimp) + apply (rule conjI, rule exI, erule ko_at_projectKO_opt) + apply (rule refl) + apply (thin_tac "heap_to_user_data a b c = d" for a b c d) + apply (cut_tac x=p and w="~~ mask pageBits" in word_plus_and_or_coroll2) + apply (rule conjI) + apply (clarsimp simp: user_word_at_def pointerInUserData_def) + apply (simp add: c_guard_def c_null_guard_def ptr_aligned_def) + apply (drule lift_t_g) + apply (clarsimp simp: ) + apply (simp add: align_of_def size_of_def) + apply (fold is_aligned_def[where n=3, simplified], simp) + apply (erule contra_subsetD[rotated]) + apply (rule order_trans[rotated]) + apply (rule_tac x="p && mask pageBits" and y=8 in intvl_sub_offset) + apply (cut_tac y=p and a="mask pageBits && (~~ mask 3)" in word_and_le1) + apply (subst(asm) word_bw_assocs[symmetric], subst(asm) is_aligned_neg_mask_eq, + erule is_aligned_andI1) + apply (simp add: word_le_nat_alt mask_def pageBits_def) + apply simp + apply (clarsimp simp: cuser_user_data_relation_def user_word_at_def) + apply (frule_tac f="[''words_C'']" in h_t_valid_field[OF h_t_valid_clift], + simp+) + apply (drule_tac n="uint (p && mask pageBits >> 3)" in h_t_valid_Array_element) + apply simp + apply (simp add: shiftr_over_and_dist mask_def pageBits_def uint_and) + apply (insert int_and_leR [where a="uint (p >> 3)" and b=511], clarsimp)[1] + apply (simp add: field_lvalue_def + field_lookup_offset_eq[OF trans, OF _ arg_cong[where f=Some, symmetric], OF _ prod.collapse] + word_shift_by_3 shiftr_shiftl1 is_aligned_andI1) + apply (drule_tac x="ucast (p >> 3)" in spec) + apply (simp add: byte_to_word_heap_def Let_def ucast_ucast_mask) + apply (fold shiftl_t2n[where n=3, simplified, simplified mult.commute mult.left_commute]) + apply (simp add: aligned_shiftr_mask_shiftl pageBits_def) + apply (rule trans[rotated], rule_tac hp="hrs_mem (t_hrs_' (globals s'))" + and x="Ptr &(Ptr (p && ~~ mask 12) \ [''words_C''])" + in access_in_array) + apply (rule trans) + apply (erule typ_heap_simps) + apply simp+ + apply (rule order_less_le_trans, rule unat_lt2p) + apply simp + apply (fastforce simp add: typ_info_word) + apply simp + apply (rule_tac f="h_val hp" for hp in arg_cong) + apply simp + apply (simp add: field_lvalue_def) + apply (simp add: ucast_nat_def ucast_ucast_mask) + apply (fold shiftl_t2n[where n=3, simplified, simplified mult.commute mult.left_commute]) + apply (simp add: aligned_shiftr_mask_shiftl) + done + +lemma memory_cross_over: + "\(\, s) \ rf_sr; pspace_aligned' \; pspace_distinct' \; + pointerInUserData ptr \\ + \ fst (t_hrs_' (globals s)) ptr = underlying_memory (ksMachineState \) ptr" + apply (subgoal_tac " c_guard (Ptr (ptr && ~~ mask 3)::machine_word ptr) \ + s \\<^sub>c (Ptr (ptr && ~~ mask 3)::machine_word ptr) \ h_val (hrs_mem (t_hrs_' (globals s))) (Ptr (ptr && ~~ mask 3)) = x" for x) + prefer 2 + apply (drule_tac p="ptr && ~~ mask 3" in user_word_at_cross_over[rotated]) + apply simp + apply (simp add: user_word_at_def Aligned.is_aligned_neg_mask + pointerInUserData_def pageBits_def mask_lower_twice) + apply assumption + apply (clarsimp simp: h_val_def from_bytes_def typ_info_word) + apply (drule_tac f="word_rsplit :: machine_word \ word8 list" in arg_cong) + apply (simp add: word_rsplit_rcat_size word_size) + apply (drule_tac f="\xs. xs ! unat (ptr && mask 3)" in arg_cong) + apply (simp add: heap_list_nth unat_mask_3_less_8 + word_plus_and_or_coroll2 add.commute + hrs_mem_def) + apply (cut_tac p=ptr in unat_mask_3_less_8) + apply (subgoal_tac "(ptr && ~~ mask 3) + (ptr && mask 3) = ptr") + apply (subgoal_tac "!n x. n < 8 \ (unat (x::machine_word) = n) = (x = of_nat n)") + apply (clarsimp simp: eval_nat_numeral) + apply (fastforce simp: add.commute elim!: less_SucE) + apply (clarsimp simp: unat64_eq_of_nat word_bits_def) + apply (simp add: add.commute word_plus_and_or_coroll2) + done + +lemma cap_get_tag_isCap_ArchObject0: + assumes cr: "ccap_relation (capability.ArchObjectCap cap) cap'" + shows "(cap_get_tag cap' = scast cap_asid_control_cap) = isASIDControlCap cap + \ (cap_get_tag cap' = scast cap_asid_pool_cap) = isASIDPoolCap cap + \ (cap_get_tag cap' = scast cap_vspace_cap) = (isPageTableCap cap \ capPTType cap = VSRootPT_T) + \ (cap_get_tag cap' = scast cap_page_table_cap) = (isPageTableCap cap \ capPTType cap = NormalPT_T) + \ (cap_get_tag cap' = scast cap_frame_cap) = (isFrameCap cap) + \ (cap_get_tag cap' = scast cap_vcpu_cap) = isVCPUCap cap" + using cr + apply - + apply (erule ccap_relationE) + apply (simp add: cap_to_H_def cap_lift_def Let_def isArchCap_def) + by (clarsimp simp: isCap_simps cap_tag_defs word_le_nat_alt Let_def split: if_split_asm) \ \takes a while\ + +lemma cap_get_tag_isCap_ArchObject: + assumes cr: "ccap_relation (capability.ArchObjectCap cap) cap'" + shows "(cap_get_tag cap' = scast cap_asid_control_cap) = isASIDControlCap cap" + and "(cap_get_tag cap' = scast cap_asid_pool_cap) = isASIDPoolCap cap" + and "(cap_get_tag cap' = scast cap_vspace_cap) = (isPageTableCap cap \ capPTType cap = VSRootPT_T)" + and "(cap_get_tag cap' = scast cap_page_table_cap) = (isPageTableCap cap \ capPTType cap = NormalPT_T)" + and "(cap_get_tag cap' = scast cap_frame_cap) = (isFrameCap cap)" + and "(cap_get_tag cap' = scast cap_vcpu_cap) = isVCPUCap cap" + using cap_get_tag_isCap_ArchObject0 [OF cr] by auto + +lemma cap_get_tag_isCap_unfolded_H_cap: + shows "ccap_relation (capability.ThreadCap v0) cap' \ (cap_get_tag cap' = scast cap_thread_cap)" + and "ccap_relation (capability.NullCap) cap' \ (cap_get_tag cap' = scast cap_null_cap)" + and "ccap_relation (capability.NotificationCap v4 v5 v6 v7) cap' \ (cap_get_tag cap' = scast cap_notification_cap) " + and "ccap_relation (capability.EndpointCap v8 v9 v10 v10b v11 v12) cap' \ (cap_get_tag cap' = scast cap_endpoint_cap)" + and "ccap_relation (capability.IRQHandlerCap v13) cap' \ (cap_get_tag cap' = scast cap_irq_handler_cap)" + and "ccap_relation (capability.IRQControlCap) cap' \ (cap_get_tag cap' = scast cap_irq_control_cap)" + and "ccap_relation (capability.Zombie v14 v15 v16) cap' \ (cap_get_tag cap' = scast cap_zombie_cap)" + and "ccap_relation (capability.ReplyCap v17 v18 vr18b) cap' \ (cap_get_tag cap' = scast cap_reply_cap)" + and "ccap_relation (capability.UntypedCap v100 v19 v20 v20b) cap' \ (cap_get_tag cap' = scast cap_untyped_cap)" + and "ccap_relation (capability.CNodeCap v21 v22 v23 v24) cap' \ (cap_get_tag cap' = scast cap_cnode_cap)" + and "ccap_relation (capability.DomainCap) cap' \ (cap_get_tag cap' = scast cap_domain_cap)" + + and "ccap_relation (capability.ArchObjectCap arch_capability.ASIDControlCap) cap' \ (cap_get_tag cap' = scast cap_asid_control_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.ASIDPoolCap v28 v29)) cap' \ (cap_get_tag cap' = scast cap_asid_pool_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.PageTableCap v30 VSRootPT_T v31)) cap' + \ (cap_get_tag cap' = scast cap_vspace_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.PageTableCap v30 NormalPT_T v31)) cap' + \ (cap_get_tag cap' = scast cap_page_table_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.FrameCap v101 v44 v45 v46 v47)) cap' \ (cap_get_tag cap' = scast cap_frame_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.VCPUCap v48)) cap' \ (cap_get_tag cap' = scast cap_vcpu_cap)" + apply (simp add: cap_get_tag_isCap cap_get_tag_isCap_ArchObject isCap_simps) + apply (frule cap_get_tag_isCap(2), simp) + apply (simp add: cap_get_tag_isCap cap_get_tag_isCap_ArchObject isCap_simps)+ + done + +lemma cap_get_tag_isCap_ArchObject2_worker: + "\ \cap''. ccap_relation (ArchObjectCap cap'') cap' \ (cap_get_tag cap' = n) = P cap''; + ccap_relation cap cap'; isArchCap_tag n \ + \ (cap_get_tag cap' = n) + = (isArchObjectCap cap \ P (capCap cap))" + apply (rule iffI) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) + apply (clarsimp simp: isCap_simps) + done + +lemma cap_get_tag_isCap_ArchObject2: + assumes cr: "ccap_relation cap cap'" + shows "(cap_get_tag cap' = scast cap_asid_control_cap) + = (isArchObjectCap cap \ isASIDControlCap (capCap cap))" + and "(cap_get_tag cap' = scast cap_asid_pool_cap) + = (isArchObjectCap cap \ isASIDPoolCap (capCap cap))" + and "(cap_get_tag cap' = scast cap_vspace_cap) + = (isArchObjectCap cap \ isPageTableCap (capCap cap) \ capPTType (capCap cap) = VSRootPT_T)" + and "(cap_get_tag cap' = scast cap_page_table_cap) + = (isArchObjectCap cap \ isPageTableCap (capCap cap) \ capPTType (capCap cap) = NormalPT_T)" + and "(cap_get_tag cap' = scast cap_frame_cap) + = (isArchObjectCap cap \ isFrameCap (capCap cap))" + and "(cap_get_tag cap' = scast cap_vcpu_cap) + = (isArchObjectCap cap \ isVCPUCap (capCap cap))" + by (rule cap_get_tag_isCap_ArchObject2_worker [OF _ cr], + simp add: cap_get_tag_isCap_ArchObject, + simp add: isArchCap_tag_def2 cap_tag_defs)+ + +schematic_goal cap_frame_cap_lift_def': + "cap_get_tag cap = SCAST(32 signed \ 64) cap_frame_cap + \ cap_frame_cap_lift cap = + \capFMappedASID_CL = ?mapped_asid, + capFBasePtr_CL = ?base_ptr, + capFSize_CL = ?frame_size, + capFMappedAddress_CL = ?mapped_address, + capFVMRights_CL = ?vm_rights, + capFIsDevice_CL = ?is_device \" + by (simp add: cap_frame_cap_lift_def cap_lift_def cap_tag_defs) + +lemmas ccap_rel_cap_get_tag_cases_generic = + cap_get_tag_isCap_unfolded_H_cap(1-11) + [OF back_subst[of "\cap. ccap_relation cap cap'" for cap']] + +lemmas ccap_rel_cap_get_tag_cases_arch = + cap_get_tag_isCap_unfolded_H_cap(12-17) + [OF back_subst[of "\cap. ccap_relation (ArchObjectCap cap) cap'" for cap'], + OF back_subst[of "\cap. ccap_relation cap cap'" for cap']] + +lemmas ccap_rel_cap_get_tag_cases_arch' = + ccap_rel_cap_get_tag_cases_arch[OF _ refl] + +(* Same as cap_get_tag_isCap_unfolded_H_cap, but with an "if" for PageTableCap, so that both cases + match. Replacing cap_get_tag_isCap_unfolded_H_cap woudl be nice, but some existing proofs break + if we do that. Might be possible with additional work. *) +lemma cap_get_tag_isCap_unfolded_H_cap2: + shows "ccap_relation (capability.ThreadCap v0) cap' \ (cap_get_tag cap' = scast cap_thread_cap)" + and "ccap_relation (capability.NullCap) cap' \ (cap_get_tag cap' = scast cap_null_cap)" + and "ccap_relation (capability.NotificationCap v4 v5 v6 v7) cap' \ (cap_get_tag cap' = scast cap_notification_cap) " + and "ccap_relation (capability.EndpointCap v8 v9 v10 v10b v11 v12) cap' \ (cap_get_tag cap' = scast cap_endpoint_cap)" + and "ccap_relation (capability.IRQHandlerCap v13) cap' \ (cap_get_tag cap' = scast cap_irq_handler_cap)" + and "ccap_relation (capability.IRQControlCap) cap' \ (cap_get_tag cap' = scast cap_irq_control_cap)" + and "ccap_relation (capability.Zombie v14 v15 v16) cap' \ (cap_get_tag cap' = scast cap_zombie_cap)" + and "ccap_relation (capability.ReplyCap v17 v18 vr18b) cap' \ (cap_get_tag cap' = scast cap_reply_cap)" + and "ccap_relation (capability.UntypedCap v100 v19 v20 v20b) cap' \ (cap_get_tag cap' = scast cap_untyped_cap)" + and "ccap_relation (capability.CNodeCap v21 v22 v23 v24) cap' \ (cap_get_tag cap' = scast cap_cnode_cap)" + and "ccap_relation (capability.DomainCap) cap' \ (cap_get_tag cap' = scast cap_domain_cap)" + + and "ccap_relation (capability.ArchObjectCap arch_capability.ASIDControlCap) cap' \ (cap_get_tag cap' = scast cap_asid_control_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.ASIDPoolCap v28 v29)) cap' \ (cap_get_tag cap' = scast cap_asid_pool_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.PageTableCap v30 v32 v31)) cap' + \ if v32 = VSRootPT_T + then cap_get_tag cap' = scast cap_vspace_cap + else cap_get_tag cap' = scast cap_page_table_cap" + and "ccap_relation (capability.ArchObjectCap (arch_capability.FrameCap v101 v44 v45 v46 v47)) cap' \ (cap_get_tag cap' = scast cap_frame_cap)" + and "ccap_relation (capability.ArchObjectCap (arch_capability.VCPUCap v48)) cap' \ (cap_get_tag cap' = scast cap_vcpu_cap)" + apply (simp add: cap_get_tag_isCap cap_get_tag_isCap_ArchObject isCap_simps) + apply (frule cap_get_tag_isCap(2), simp) + apply (clarsimp simp: cap_get_tag_isCap cap_get_tag_isCap_ArchObject isCap_simps + split: if_splits pt_type.splits)+ + done + +lemmas ccap_rel_cap_get_tag_cases_arch2 = + cap_get_tag_isCap_unfolded_H_cap2(12-16) + [OF back_subst[of "\cap. ccap_relation (ArchObjectCap cap) cap'" for cap'], + OF back_subst[of "\cap. ccap_relation cap cap'" for cap']] + +lemmas ccap_rel_cap_get_tag_cases_arch2' = + ccap_rel_cap_get_tag_cases_arch2[OF _ refl] + +lemmas cap_lift_defs = + cap_untyped_cap_lift_def + cap_endpoint_cap_lift_def + cap_notification_cap_lift_def + cap_reply_cap_lift_def + cap_cnode_cap_lift_def + cap_thread_cap_lift_def + cap_irq_handler_cap_lift_def + cap_zombie_cap_lift_def + cap_frame_cap_lift_def + cap_vspace_cap_lift_def + cap_page_table_cap_lift_def + cap_asid_pool_cap_lift_def + cap_vcpu_cap_lift_def + +lemma cap_lift_Some_CapD: + "\c'. cap_lift c = Some (Cap_untyped_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_untyped_cap" + "\c'. cap_lift c = Some (Cap_endpoint_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_endpoint_cap" + "\c'. cap_lift c = Some (Cap_notification_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_notification_cap" + "\c'. cap_lift c = Some (Cap_reply_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_reply_cap" + "\c'. cap_lift c = Some (Cap_cnode_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_cnode_cap" + "\c'. cap_lift c = Some (Cap_thread_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_thread_cap" + "\c'. cap_lift c = Some (Cap_irq_handler_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_irq_handler_cap" + "\c'. cap_lift c = Some (Cap_zombie_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_zombie_cap" + "\c'. cap_lift c = Some (Cap_frame_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_frame_cap" + "\c'. cap_lift c = Some (Cap_vspace_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_vspace_cap" + "\c'. cap_lift c = Some (Cap_page_table_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_page_table_cap" + "\c'. cap_lift c = Some (Cap_asid_pool_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_asid_pool_cap" + "\c'. cap_lift c = Some (Cap_vcpu_cap c') \ cap_get_tag c = SCAST(32 signed \ 64) cap_vcpu_cap" + by (auto simp: cap_lifts cap_lift_defs) + +lemma rf_sr_armKSGlobalUserVSpace: + "(s, s') \ rf_sr \ armKSGlobalUserVSpace (ksArchState s) = ptr_val armKSGlobalUserVSpace_Ptr" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + carch_globals_armKSGlobalUserVSpace) + +lemma ghost_assertion_size_logic': + "unat (sz :: machine_word) \ gsMaxObjectSize s + \ cstate_relation s gs + \ gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' gs) = 0 \ + sz \ gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' gs)" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def ghost_size_rel_def + linorder_not_le word_less_nat_alt) + +lemma ghost_assertion_size_logic: + "unat (sz :: machine_word) \ gsMaxObjectSize s + \ (s, \) \ rf_sr + \ gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' (globals \)) = 0 \ + sz \ gs_get_assn cap_get_capSizeBits_'proc (ghost'state_' (globals \))" + by (clarsimp simp: rf_sr_def ghost_assertion_size_logic') + +lemma gs_set_assn_Delete_cstate_relation: + "cstate_relation s (ghost'state_'_update (gs_set_assn cteDeleteOne_'proc v) gs) + = cstate_relation s gs" + apply (cases "ghost'state_' gs") + by (auto simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def ghost_assertion_data_set_def + ghost_size_rel_def ghost_assertion_data_get_def + cteDeleteOne_'proc_def cap_get_capSizeBits_'proc_def) + +lemma update_typ_at: + assumes at: "obj_at' P p s" + and tp: "\obj. P obj \ koTypeOf (injectKOS obj) = koTypeOf ko" + shows "typ_at' T p' (s \ksPSpace := (ksPSpace s)(p \ ko)\) = typ_at' T p' s" + using at + by (auto elim!: obj_atE' simp: typ_at'_def ko_wp_at'_def + dest!: tp[rule_format] + simp: project_inject projectKO_eq split: kernel_object.splits if_split_asm, + simp_all add: objBits_def objBitsT_koTypeOf[symmetric] ps_clear_upd + del: objBitsT_koTypeOf) + +lemma ptr_val_tcb_ptr_mask: + "obj_at' (P :: tcb \ bool) thread s + \ ptr_val (tcb_ptr_to_ctcb_ptr thread) && (~~ mask tcbBlockSizeBits) + = thread" + apply (clarsimp simp: obj_at'_def tcb_ptr_to_ctcb_ptr_def projectKOs) + apply (simp add: is_aligned_add_helper ctcb_offset_defs objBits_simps') + done + +lemmas ptr_val_tcb_ptr_mask'[simp] + = ptr_val_tcb_ptr_mask[unfolded mask_def tcbBlockSizeBits_def, simplified] + +lemma typ_uinfo_t_diff_from_typ_name: + "typ_name (typ_info_t TYPE ('a :: c_type)) \ typ_name (typ_info_t TYPE('b :: c_type)) + \ typ_uinfo_t (aty :: 'a itself) \ typ_uinfo_t (bty :: 'b itself)" + by (clarsimp simp: typ_uinfo_t_def td_diff_from_typ_name) + +declare ptr_add_assertion'[simp] typ_uinfo_t_diff_from_typ_name[simp] + +lemma clift_array_assertion_imp: + "clift hrs (p :: (('a :: wf_type)['b :: finite]) ptr) = Some v + \ htd = hrs_htd hrs + \ n \ 0 + \ \i. p' = ptr_add (ptr_coerce p) (int i) + \ i + n \ CARD('b) + \ array_assertion (p' :: 'a ptr) n htd" + apply clarsimp + apply (drule h_t_valid_clift) + apply (drule array_ptr_valid_array_assertionD) + apply (drule_tac j=i in array_assertion_shrink_leftD, simp) + apply (erule array_assertion_shrink_right) + apply simp + done + +lemma pt_array_map_relation_vs: + "\ gsPTTypes (ksArchState s) pt = Some VSRootPT_T; pt_array_relation s \; c_guard (vs_Ptr pt) \ + \ clift (t_hrs_' \) (vs_Ptr pt) \ None" + apply (clarsimp simp: cvariable_array_map_relation_def simp flip: h_t_valid_clift_Some_iff) + apply (erule allE, erule allE, erule (1) impE) + apply (clarsimp simp: h_t_valid_def h_t_array_valid_def typ_uinfo_array_tag_n_m_eq + ptTranslationBits_vs_array_len) + done + +lemma pt_array_map_relation_pt: + "\ gsPTTypes (ksArchState s) pt = Some NormalPT_T; pt_array_relation s \; c_guard (pt_Ptr pt) \ + \ clift (t_hrs_' \) (pt_Ptr pt) \ None" + apply (clarsimp simp: cvariable_array_map_relation_def simp flip: h_t_valid_clift_Some_iff) + apply (erule allE, erule allE, erule (1) impE) + apply (clarsimp simp: h_t_valid_def h_t_array_valid_def typ_uinfo_array_tag_n_m_eq bit_simps) + done + +lemma vspace_at_rf_sr: + "\ page_table_at' VSRootPT_T pt s; gsPTTypes (ksArchState s) pt = Some VSRootPT_T; + (s, s') \ rf_sr \ + \ cslift s' (vs_Ptr pt) \ None" + apply (frule rf_sr_cpte_relation) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (drule (1) pt_array_map_relation_vs) + apply (frule page_table_pte_at') + apply (clarsimp dest!: pte_at_ko') + apply (drule (1) cmap_relation_ko_atD) + apply (clarsimp simp: page_table_at'_def) + apply (drule c_guard_clift) + apply (clarsimp simp: c_guard_def c_null_guard_def ptr_aligned_def) + apply (simp add: align_of_def typ_info_array array_tag_def align_td_array_tag) + apply clarsimp + apply (drule aligned_intvl_0, simp) + apply (clarsimp simp: bit_simps Kernel_Config.config_ARM_PA_SIZE_BITS_40_def intvl_self) + apply simp + done + +lemma ptable_at_rf_sr: + "\ page_table_at' NormalPT_T pt s; gsPTTypes (ksArchState s) pt = Some NormalPT_T; + (s, s') \ rf_sr \ + \ cslift s' (pt_Ptr pt) \ None" + apply (frule rf_sr_cpte_relation) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (drule (1) pt_array_map_relation_pt) + apply (frule page_table_pte_at') + apply (clarsimp dest!: pte_at_ko') + apply (drule (1) cmap_relation_ko_atD) + apply (clarsimp simp: page_table_at'_def) + apply (drule c_guard_clift) + apply (clarsimp simp: c_guard_def c_null_guard_def ptr_aligned_def) + apply (simp add: align_of_def typ_info_array array_tag_def align_td_array_tag) + apply clarsimp + apply (drule aligned_intvl_0, simp) + apply (clarsimp simp: bit_simps intvl_self) + apply simp + done + +lemma asid_pool_at_rf_sr: + "\ko_at' (ASIDPool pool) p s; (s, s') \ rf_sr\ \ + \pool'. cslift s' (ap_Ptr p) = Some pool' \ + casid_pool_relation (ASIDPool pool) pool'" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply (erule (1) cmap_relation_ko_atE) + apply clarsimp + done + +lemma asid_pool_at_c_guard: + "\asid_pool_at' p s; (s, s') \ rf_sr\ \ c_guard (ap_Ptr p)" + by (fastforce intro: typ_heap_simps dest!: asid_pool_at_ko' asid_pool_at_rf_sr) + +lemma gsUntypedZeroRanges_rf_sr: + "\ (start, end) \ gsUntypedZeroRanges s; (s, s') \ rf_sr \ + \ region_actually_is_zero_bytes start (unat ((end + 1) - start)) s'" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + zero_ranges_are_zero_def) + apply (drule(1) bspec) + apply clarsimp + done + +lemma ctes_of_untyped_zero_rf_sr: + "\ ctes_of s p = Some cte; (s, s') \ rf_sr; + untyped_ranges_zero' s; + untypedZeroRange (cteCap cte) = Some (start, end) \ + \ region_actually_is_zero_bytes start (unat ((end + 1) - start)) s'" + apply (erule gsUntypedZeroRanges_rf_sr[rotated]) + apply (clarsimp simp: untyped_ranges_zero_inv_def) + apply (rule_tac a=p in ranI) + apply (simp add: map_comp_def cteCaps_of_def) + done + +lemma heap_list_is_zero_mono: + "heap_list_is_zero hmem p n \ n' \ n + \ heap_list_is_zero hmem p n'" + apply (induct n arbitrary: n' p) + apply simp + apply clarsimp + apply (case_tac n', simp_all) + done + +lemma heap_list_is_zero_mono2: + "heap_list_is_zero hmem p n + \ {p' ..+ n'} \ {p ..+ n} + \ heap_list_is_zero hmem p' n'" + using heap_list_h_eq2[where h'="\_. 0"] + heap_list_h_eq_better[where h'="\_. 0"] + apply (simp(no_asm_use) add: heap_list_rpbs) + apply blast + done + +lemma invs_urz[elim!]: + "invs' s \ untyped_ranges_zero' s" + by (clarsimp simp: invs'_def valid_state'_def) + +lemma arch_fault_tag_not_fault_tag_simps [simp]: + "(arch_fault_to_fault_tag arch_fault = scast seL4_Fault_CapFault) = False" + "(arch_fault_to_fault_tag arch_fault = scast seL4_Fault_UserException) = False" + "(arch_fault_to_fault_tag arch_fault = scast seL4_Fault_UnknownSyscall) = False" + by (cases arch_fault ; simp add: seL4_Faults seL4_Arch_Faults)+ + +lemma pte_at_rf_sr: + "\ko_at' pte p s; (s, s') \ rf_sr\ \ + \pte'. cslift s' (pte_Ptr p) = Some pte' \ cpte_relation pte pte'" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply (erule (1) cmap_relation_ko_atE) + apply clarsimp + done + +lemma vcpu_at_rf_sr: + "\ko_at' vcpu p s; (s, s') \ rf_sr\ \ + \vcpu'. cslift s' (vcpu_Ptr p) = Some vcpu' \ cvcpu_relation vcpu vcpu'" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply (erule (1) cmap_relation_ko_atE) + apply clarsimp + done + +(* all definitions of seL4_VCPUReg enum - these don't get autogenerated *) +lemmas seL4_VCPUReg_defs = + seL4_VCPUReg_SCTLR_def + seL4_VCPUReg_TTBR0_def + seL4_VCPUReg_TTBR1_def + seL4_VCPUReg_TCR_def + seL4_VCPUReg_MAIR_def + seL4_VCPUReg_AMAIR_def + seL4_VCPUReg_CIDR_def + seL4_VCPUReg_ACTLR_def + seL4_VCPUReg_CPACR_def + seL4_VCPUReg_AFSR0_def + seL4_VCPUReg_AFSR1_def + seL4_VCPUReg_ESR_def + seL4_VCPUReg_FAR_def + seL4_VCPUReg_ISR_def + seL4_VCPUReg_VBAR_def + seL4_VCPUReg_TPIDR_EL1_def + seL4_VCPUReg_VMPIDR_EL2_def + seL4_VCPUReg_SP_EL1_def + seL4_VCPUReg_ELR_EL1_def + seL4_VCPUReg_SPSR_EL1_def + seL4_VCPUReg_CNTV_CTL_def + seL4_VCPUReg_CNTV_CVAL_def + seL4_VCPUReg_CNTVOFF_def + seL4_VCPUReg_CNTKCTL_EL1_def + +(* rewrite a definition from a C enum into a vcpureg enumeration lookup *) +lemma vcpureg_eq_use_type: + fixes value' :: int_word + assumes e[simp]: "value \ value'" + assumes len: "unat value' < length (enum :: vcpureg list)" + shows "(of_nat (fromEnum (reg :: vcpureg)) = (scast value :: machine_word)) + = (reg = enum ! unat (scast value' :: machine_word))" +proof - + + note local_simps[simp] = unat_ucast_upcast is_up + + from len have "0 \s value" + by (intro word_0_sle_from_less) + (clarsimp simp: word_less_nat_alt fromEnum_maxBound_vcpureg_def simp flip: maxBound_less_length) + + then have [simp]: "(scast value' :: machine_word) = (ucast value' :: machine_word)" + by (simp add: signed_ge_zero_scast_eq_ucast) + + from len have toEnum: "enum ! unat value' = (toEnum (unat value') :: vcpureg)" + by (simp add: toEnum_def) + + have toEnum': "enum ! fromEnum reg = (reg :: vcpureg)" + using toEnum_def[symmetric, of "fromEnum reg", where 'a=vcpureg] maxBound_is_bound[of reg] + by (simp add: maxBound_is_length nat_le_Suc_less) + + have "unat (of_nat (fromEnum reg) :: machine_word) = fromEnum reg" + by (-, subst unat_of_nat_eq, simp_all) + (fastforce intro: order_le_less_trans[OF maxBound_is_bound] + simp: maxBound_is_length enum_vcpureg) + + thus ?thesis + by (fastforce simp: toEnum' toEnum maxBound_is_length len nat_le_Suc_less ucast_nat_def) + +qed + +(* e.g. (of_nat (fromEnum reg) = ucast seL4_VCPUReg_SCTLR) = (reg = VCPURegSCTLR) *) +lemmas vcpureg_eq_use_types = + seL4_VCPUReg_defs[THEN vcpureg_eq_use_type, simplified, + unfolded enum_vcpureg, simplified] + +(* C parser will generate terms like SCAST(32 signed \ 64) seL4_VCPUReg_SCTLR, which we want to + simplify to their Haskell equivalent under word_unat_eq_iff *) +lemma unat_ucast_seL4_VCPUReg_SCTLR_simp[simp]: + "unat (scast seL4_VCPUReg_SCTLR :: machine_word) = fromEnum VCPURegSCTLR" + by (simp add: vcpureg_eq_use_types[where reg=VCPURegSCTLR, simplified, symmetric]) + +lemma unat_scast_seL4_VCPUReg_ACTLR_simp[simp]: + "unat (scast seL4_VCPUReg_ACTLR :: machine_word) = fromEnum VCPURegACTLR" + by (simp add: vcpureg_eq_use_types[where reg=VCPURegACTLR, simplified, symmetric]) + +lemmas cvcpu_relation_regs_def = + cvcpu_relation_def[simplified cvcpu_regs_relation_def Let_def vcpuSCTLR_def, simplified] + +lemmas cvcpu_relation_vppi_def = + cvcpu_relation_def[simplified cvcpu_vppi_masked_relation_def, simplified] + +lemma capVCPUPtr_eq: + "\ ccap_relation (ArchObjectCap cap) cap'; isArchCap isVCPUCap (ArchObjectCap cap) \ + \ capVCPUPtr_CL (cap_vcpu_cap_lift cap') + = capVCPUPtr cap" + apply (simp only: cap_get_tag_isCap[symmetric]) + apply (drule (1) cap_get_tag_to_H) + apply clarsimp + done + +lemma rf_sr_armKSGICVCPUNumListRegs: + "(s, s') \ rf_sr + \ gic_vcpu_num_list_regs_' (globals s') = of_nat (armKSGICVCPUNumListRegs (ksArchState s))" + by (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) + +lemma update_vcpu_map_to_vcpu: + "map_to_vcpus ((ksPSpace s)(p \ KOArch (KOVCPU vcpu))) + = (map_to_vcpus (ksPSpace s))(p \ vcpu)" + by (rule ext, clarsimp simp: map_comp_def split: if_split) + +lemma capTCBPtr_eq: + "\ ccap_relation cap cap'; isThreadCap cap \ + \ cap_thread_cap_CL.capTCBPtr_CL (cap_thread_cap_lift cap') + = ptr_val (tcb_ptr_to_ctcb_ptr (capTCBPtr cap))" + apply (simp add: cap_get_tag_isCap[symmetric]) + apply (drule(1) cap_get_tag_to_H) + apply clarsimp + done + +lemma rf_sr_ctcb_queue_relation: + "\ (s, s') \ rf_sr; d \ maxDomain; p \ maxPriority \ + \ ctcb_queue_relation (ksReadyQueues s (d, p)) + (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p))" + unfolding rf_sr_def cstate_relation_def cready_queues_relation_def + apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def maxDom_to_H maxPrio_to_H) + done + +lemma rf_sr_sched_action_relation: + "(s, s') \ rf_sr + \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + +lemma canonical_address_tcb_ptr: + "\ canonical_address t; is_aligned t tcbBlockSizeBits \ + \ canonical_address (ptr_val (tcb_ptr_to_ctcb_ptr t))" + apply (rule canonical_address_and_maskI) + apply (drule canonical_address_and_maskD) + apply (clarsimp simp: tcb_ptr_to_ctcb_ptr_def canonical_address_range tcbBlockSizeBits_def + ctcb_offset_defs and_mask_plus) + done + +lemma canonical_address_ctcb_ptr: + assumes "canonical_address (ctcb_ptr_to_tcb_ptr t)" "is_aligned (ctcb_ptr_to_tcb_ptr t) tcbBlockSizeBits" + shows "canonical_address (ptr_val t)" +proof - + from assms(2)[unfolded ctcb_ptr_to_tcb_ptr_def] + have "canonical_address ((ptr_val t - ctcb_offset) + ctcb_offset)" + apply (rule canonical_address_add; simp add: objBits_simps' ctcb_offset_defs canonical_bit_def) + using assms(1) + by (simp add: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs) + thus ?thesis by simp +qed + +lemma tcb_and_not_mask_canonical: + "\ pspace_canonical' s; tcb_at' t s; n < tcbBlockSizeBits\ \ + tcb_Ptr (make_canonical (ptr_val (tcb_ptr_to_ctcb_ptr t)) && ~~ mask n) = tcb_ptr_to_ctcb_ptr t" + apply (frule (1) obj_at'_is_canonical) + apply (drule canonical_address_tcb_ptr) + apply (clarsimp simp: obj_at'_def objBits_simps' split: if_splits) + apply (clarsimp simp: canonical_make_canonical_idem) + apply (prop_tac "ptr_val (tcb_ptr_to_ctcb_ptr t) && ~~ mask n = ptr_val (tcb_ptr_to_ctcb_ptr t)") + apply (simp add: tcb_ptr_to_ctcb_ptr_def ctcb_offset_defs) + apply (rule is_aligned_neg_mask_eq) + apply (clarsimp simp: obj_at'_def objBits_simps') + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_weaken[where x="tcbBlockSizeBits - 1"]) + apply (simp add: is_aligned_def objBits_simps') + apply (simp add: objBits_simps') + apply simp + done + +lemma tcb_ptr_canonical: + "\ pspace_canonical' s; tcb_at' t s \ \ + tcb_Ptr (make_canonical (ptr_val (tcb_ptr_to_ctcb_ptr t))) = tcb_ptr_to_ctcb_ptr t" + apply (frule (1) obj_at'_is_canonical) + apply (drule canonical_address_tcb_ptr) + apply (clarsimp simp: obj_at'_def objBits_simps' split: if_splits) + apply (clarsimp simp: canonical_make_canonical_idem) + done + +lemma ccap_relation_capASIDBase: + "\ ccap_relation (ArchObjectCap (ASIDPoolCap p asid)) cap \ \ + capASIDBase_CL (cap_asid_pool_cap_lift cap) = asid" + by (clarsimp simp: cap_to_H_def Let_def cap_asid_pool_cap_lift_def + elim!: ccap_relationE + split: cap_CL.splits if_splits) + +lemma ccap_relation_capASIDPool: + "\ ccap_relation (ArchObjectCap (ASIDPoolCap p asid)) cap \ \ + capASIDPool_CL (cap_asid_pool_cap_lift cap) = p" + by (clarsimp simp: cap_to_H_def Let_def cap_asid_pool_cap_lift_def + elim!: ccap_relationE + split: cap_CL.splits if_splits) + +lemma asid_map_get_tag_neq_none[simp]: + "(asid_map_get_tag amap \ scast asid_map_asid_map_none) = + (asid_map_get_tag amap = scast asid_map_asid_map_vspace)" + by (simp add: asid_map_get_tag_def asid_map_tag_defs) + +lemma asid_map_get_tag_neq_vspace[simp]: + "(asid_map_get_tag amap \ scast asid_map_asid_map_vspace) = + (asid_map_get_tag amap = scast asid_map_asid_map_none)" + by (simp add: asid_map_get_tag_def asid_map_tag_defs) + +lemma asid_map_tags_neq[simp]: + "(scast asid_map_asid_map_vspace :: machine_word) \ scast asid_map_asid_map_none" + "(scast asid_map_asid_map_none :: machine_word) \ scast asid_map_asid_map_vspace" + by (auto simp: asid_map_tag_defs) + +lemma casid_map_relation_None[simp]: + "casid_map_relation None amap = (asid_map_get_tag amap = scast asid_map_asid_map_none)" + by (simp add: casid_map_relation_def asid_map_lift_def Let_def split: option.splits if_splits) + +lemma casid_map_relation_None_lift: + "casid_map_relation None v = (asid_map_lift v = Some Asid_map_asid_map_none)" + by (clarsimp simp: casid_map_relation_def split: option.splits asid_map_CL.splits) + + +(* FIXME move and share with other architectures (note: needs locale from C parse) *) +abbreviation Basic_heap_update :: + "(globals myvars \ ('a::c_type) ptr) \ (globals myvars \ 'a) + \ (globals myvars, int, strictc_errortype) com" + where + "Basic_heap_update p f \ + (Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update (heap_update (p s) (f s)))) s))" + +lemma numDomains_sge_1_simp: + "1 Suc 0 < Kernel_Config.numDomains" + apply (simp add: word_sless_alt sint_numDomains_to_H) + apply (subst nat_less_as_int, simp) + done + +lemma unat_scast_numDomains: + "unat (SCAST(32 signed \ machine_word_len) Kernel_C.numDomains) = unat Kernel_C.numDomains" + by (simp add: scast_eq sint_numDomains_to_H unat_numDomains_to_H numDomains_machine_word_safe) + +(* link up Kernel_Config loaded from the seL4 build system with physBase in C code *) +lemma physBase_spec: + "\s. \\ {s} Call physBase_'proc {t. ret__unsigned_long_' t = Kernel_Config.physBase }" + apply (rule allI, rule conseqPre, vcg) + apply (simp add: Kernel_Config.physBase_def) + done + +lemma rf_sr_obj_update_helper: + "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined + \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr + \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" + by (simp cong: StateSpace.state.fold_congs globals.fold_congs) + +end +end diff --git a/proof/crefine/AARCH64/Schedule_C.thy b/proof/crefine/AARCH64/Schedule_C.thy new file mode 100644 index 0000000000..4f4e7cb8e2 --- /dev/null +++ b/proof/crefine/AARCH64/Schedule_C.thy @@ -0,0 +1,893 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Schedule_C +imports Tcb_C Detype_C +begin + +instance tcb :: no_vcpu by intro_classes auto + +(*FIXME: arch_split: move up?*) +context Arch begin +context begin global_naming global +requalify_facts + Thread_H.switchToIdleThread_def + Thread_H.switchToThread_def +end +end + +context kernel_m begin + +lemma Arch_switchToIdleThread_ccorres: + "ccorres dc xfdc invs_no_cicd' UNIV [] + Arch.switchToIdleThread (Call Arch_switchToIdleThread_'proc)" + apply (cinit simp: AARCH64_H.switchToIdleThread_def) + apply (ctac (no_vcg) add: vcpu_switch_ccorres_None) + apply (simp add: setGlobalUserVSpace_def) + apply (rule ccorres_symb_exec_l) + apply (rename_tac globalUserVSpace) + apply (rule ccorres_gen_asm_state[where P="valid_arch_state'"]) + apply (rule ccorres_h_t_valid_armKSGlobalUserVSpace) + apply (rule_tac xf'=ret__unsigned_long_' and R'=UNIV and + R="\s. globalUserVSpace = (armKSGlobalUserVSpace \ ksArchState) s" and + val="addrFromKPPtr globalUserVSpace" + in ccorres_symb_exec_r_known_rv) + apply clarsimp + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def + carch_globals_def) + apply ceqv + apply csymbr + apply (ctac add: setVSpaceRoot_ccorres) + apply clarsimp + apply vcg + apply wpsimp+ + apply (clarsimp simp: invs_no_cicd'_def valid_pspace'_def valid_idle'_tcb_at'_ksIdleThread + canonical_address_and_maskD valid_arch_state_armKSGlobalUserVSpace) + done + +lemma switchToIdleThread_ccorres: + "ccorres dc xfdc invs_no_cicd' UNIV hs + switchToIdleThread (Call switchToIdleThread_'proc)" + apply (cinit) + apply (rule ccorres_stateAssert) + apply (rule ccorres_symb_exec_l) + apply (ctac (no_vcg) add: Arch_switchToIdleThread_ccorres) + apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) + apply (rule_tac P="\s. thread = ksIdleThread s" and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (wpsimp simp: AARCH64_H.switchToIdleThread_def wp: hoare_drop_imps)+ + done + +crunches vcpuSwitch + for pspace_canonical'[wp]: pspace_canonical' + (wp: crunch_wps) + +lemma Arch_switchToThread_ccorres: + "ccorres dc xfdc + (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) + (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) + [] + (Arch.switchToThread t) (Call Arch_switchToThread_'proc)" + apply (cinit lift: tcb_') + apply (unfold AARCH64_H.switchToThread_def)[1] + apply (rule ccorres_symb_exec_l3) + apply (rule_tac P="ko_at' rv t" in ccorres_cross_over_guard) + apply (ctac add: vcpu_switch_ccorres) (* c *) + apply simp + apply (ctac (no_vcg) add: setVMRoot_ccorres) + apply (simp (no_asm) del: Collect_const) + apply wpsimp + apply (vcg exspec=vcpu_switch_modifies) + apply wpsimp+ + apply (rule_tac Q="\rv s. all_invs_but_ct_idle_or_in_cur_domain' s + \ case_option \ (ko_wp_at' (is_vcpu' and hyp_live')) (atcbVCPUPtr (tcbArch rv)) s + \ obj_at' (\t::tcb. True) t s" in hoare_strengthen_post[rotated]) + apply (clarsimp simp: vcpu_at_is_vcpu' invs_no_cicd'_def valid_state'_def valid_pspace'_def + elim!: ko_wp_at'_weakenE + split: option.splits) + apply (wpsimp wp: getObject_tcb_hyp_sym_refs simp: empty_fail_getObject)+ + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + apply (frule cmap_relation_tcb, frule (1) cmap_relation_ko_atD) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def) + done + +lemma invs_no_cicd'_pspace_aligned': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_aligned' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + +lemma invs_no_cicd'_pspace_distinct': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_distinct' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + +lemma threadGet_exs_valid[wp]: + "tcb_at' t s \ \(=) s\ threadGet f t \\\r. (=) s\" + unfolding threadGet_def liftM_def + apply (wpsimp wp: exs_getObject) + apply (fastforce simp: obj_at'_def objBits_simps')+ + done + +lemma isRunnable_exs_valid[wp]: + "tcb_at' t s \ \(=) s\ isRunnable t \\\r. (=) s\" + unfolding isRunnable_def getThreadState_def + by (wpsimp wp: exs_getObject) + +(* FIXME: move *) +lemma switchToThread_ccorres: + "ccorres dc xfdc + (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) + hs + (switchToThread t) + (Call switchToThread_'proc)" + apply (clarsimp simp: switchToThread_def) + apply (rule ccorres_symb_exec_l'[OF _ _ isRunnable_sp]; (solves wpsimp)?) + apply (rule ccorres_symb_exec_l'[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule ccorres_stateAssert_fwd)+ + apply (cinit' lift: thread_') + apply (ctac (no_vcg) add: Arch_switchToThread_ccorres) + apply (ctac (no_vcg) add: tcbSchedDequeue_ccorres) + apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: setCurThread_def simpler_modify_def rf_sr_def cstate_relation_def + Let_def carch_state_relation_def cmachine_state_relation_def) + apply (wpsimp wp: Arch_switchToThread_invs_no_cicd' hoare_drop_imps + | strengthen invs_no_cicd'_pspace_aligned' invs_no_cicd'_pspace_distinct')+ + done + +lemma activateThread_ccorres: + "ccorres dc xfdc + (ct_in_state' activatable' and (\s. sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') + UNIV [] + activateThread + (Call activateThread_'proc)" + apply (cinit) + apply (rule ccorres_pre_getCurThread) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) + apply (rule_tac P="activatable' rv" in ccorres_gen_asm) + apply (wpc) + apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) + apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) + apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) + apply simp + apply (rule ccorres_cond_true) + apply (rule ccorres_return_Skip) + apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) + apply (simp add: ThreadState_defs del: Collect_const) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: activateIdleThread_def return_def) + apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) + apply (simp add: ThreadState_defs del: Collect_const) + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (ctac) + apply (ctac add: setNextPC_ccorres) + apply ctac + apply (wp | simp add: valid_tcb_state'_def)+ + apply vcg + apply wp + apply vcg + apply (wp gts_wp') + apply vcg + apply (clarsimp simp: ct_in_state'_def) + apply (rule conjI, clarsimp) + apply (clarsimp simp: st_tcb_at'_def) + apply (rule conjI, clarsimp simp: obj_at'_def) + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (subgoal_tac "ksCurThread_' (globals s') = tcb_ptr_to_ctcb_ptr (ksCurThread s)") + prefer 2 + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: typ_heap_simps ThreadState_defs mask_def) + done + +lemma ceqv_Guard_UNIV_Skip: + "ceqv Gamma xf v s s' (a ;; Guard F UNIV Skip) a" + apply (rule ceqvI) + apply (safe elim!: exec_Normal_elim_cases) + apply (case_tac s'a, auto intro: exec.intros elim!: exec_Normal_elim_cases)[1] + apply (cases s', auto intro: exec.intros) + done + +lemma ceqv_tail_Guard_onto_Skip: + "ceqv Gamma xf v s s' + (a ;; Guard F G b) ((a ;; Guard F G Skip) ;; b)" + apply (rule ceqvI) + apply (safe elim!: exec_Normal_elim_cases) + apply (case_tac s'a, auto intro: exec.intros elim!: exec_Normal_elim_cases)[1] + apply (case_tac s'aa, auto intro: exec.intros elim!: exec_Normal_elim_cases)[1] + done + +lemma ceqv_remove_tail_Guard_Skip: + "\ \s. s \ G \ \ ceqv Gamma xf v s s' (a ;; Guard F G Skip) a" + apply (rule ceqvI) + apply (safe elim!: exec_Normal_elim_cases) + apply (case_tac s'a, auto intro: exec.intros elim!: exec_Normal_elim_cases)[1] + apply (case_tac s', auto intro: exec.intros elim!: exec_Normal_elim_cases)[1] + done + +lemmas ccorres_remove_tail_Guard_Skip + = ccorres_abstract[where xf'="\_. ()", OF ceqv_remove_tail_Guard_Skip] + +lemma switchToThread_ccorres': + "ccorres dc xfdc + (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) + hs + (switchToThread t) + (Call switchToThread_'proc)" + apply (rule ccorres_guard_imp2) + apply (ctac (no_vcg) add: switchToThread_ccorres) + apply auto + done + +lemmas word_log2_max_word_word_size = word_log2_max[where 'a=machine_word_len, simplified word_size, simplified] + +lemma ccorres_pre_getQueue: + assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" + shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) + {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) + (cready_queues_index_to_C d p) in + ctcb_queue_relation queue cqueue) \ s' \ P' queue} + hs (getQueue d p >>= (\queue. f queue)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule gq_sp) + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply assumption + apply (clarsimp simp: getQueue_def gets_exs_valid) + apply clarsimp + apply (drule spec, erule mp) + apply (erule rf_sr_ctcb_queue_relation) + apply (simp add: maxDom_to_H maxPrio_to_H)+ + done + +lemma chooseThread_ccorres: + "ccorres dc xfdc all_invs_but_ct_idle_or_in_cur_domain' UNIV [] + chooseThread (Call chooseThread_'proc)" +proof - + + note prio_and_dom_limit_helpers [simp] + note ksReadyQueuesL2Bitmap_nonzeroI [simp] + note Collect_const_mem [simp] + + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + include no_less_1_simps + + have invs_no_cicd'_max_CurDomain[intro]: + "\s. invs_no_cicd' s \ ksCurDomain s \ maxDomain" + by (simp add: invs_no_cicd'_def) + + have invs_no_cicd'_valid_bitmaps: + "\s. invs_no_cicd' s \ valid_bitmaps s" + by (simp add: invs_no_cicd'_def) + + have invs_no_cicd'_pspace_aligned': + "\s. invs_no_cicd' s \ pspace_aligned' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + + have invs_no_cicd'_pspace_distinct': + "\s. invs_no_cicd' s \ pspace_distinct' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + + show ?thesis + supply if_split[split del] + apply (cinit) + apply (rule ccorres_stateAssert)+ + apply (simp add: numDomains_sge_1_simp) + apply (rule_tac xf'=dom_' and r'="\rv rv'. rv' = ucast rv" in ccorres_split_nothrow_novcg) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply clarsimp + apply (rule conseqPre, vcg) + apply (rule Collect_mono) + apply (clarsimp split: prod.split) + apply (clarsimp simp: curDomain_def simpler_gets_def return_def rf_sr_ksCurDomain) + apply ceqv + apply clarsimp + apply (rename_tac curdom) + apply (rule_tac P="curdom \ maxDomain" in ccorres_cross_over_guard_no_st) + apply (rule ccorres_Guard) + apply (rule ccorres_pre_getReadyQueuesL1Bitmap) + apply (rename_tac l1) + apply (rule_tac R="\s. l1 = ksReadyQueuesL1Bitmap s curdom \ curdom \ maxDomain" + in ccorres_cond) + subgoal by (fastforce dest!: rf_sr_cbitmap_L1_relation simp: cbitmap_L1_relation_def) + prefer 2 \ \switchToIdleThread\ + apply (ctac(no_vcg) add: switchToIdleThread_ccorres) + apply clarsimp + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac xf'=prio_' in ccorres_call) + apply (rule getHighestPrio_ccorres[simplified getHighestPrio_def']) + apply simp+ + apply ceqv + apply clarsimp + apply (rename_tac prio) + apply (rule_tac P="curdom \ maxDomain" in ccorres_cross_over_guard_no_st) + apply (rule_tac P="prio \ maxPriority" in ccorres_cross_over_guard_no_st) + apply (rule ccorres_pre_getQueue) + apply (rule_tac P="\ tcbQueueEmpty queue" in ccorres_cross_over_guard_no_st) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq)+ + apply (rule ccorres_symb_exec_r) + apply (simp only: ccorres_seq_skip) + apply (rule ccorres_call[OF switchToThread_ccorres']; simp) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply clarsimp + apply (rule conseqPre, vcg) + apply (rule Collect_mono) + apply clarsimp + apply assumption + apply clarsimp + apply (rule conseqPre, vcg) + apply clarsimp + apply (wp isRunnable_wp)+ + apply (clarsimp simp: Let_def guard_is_UNIV_def) + apply (rule conjI) + apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper) + apply (intro conjI impI) + apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def ctcb_queue_relation_def + tcbQueueEmpty_def option_to_ctcb_ptr_def) + apply (frule_tac qdom=curdom and prio=rv in cready_queues_index_to_C_in_range) + apply fastforce + apply (clarsimp simp: num_tcb_queues_def word_less_nat_alt cready_queues_index_to_C_def2) + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def le_maxDomain_eq_less_numDomains word_less_nat_alt + numDomains_less_numeric_explicit) + apply clarsimp + apply (frule invs_no_cicd'_max_CurDomain) + apply (frule invs_no_cicd'_pspace_aligned') + apply (frule invs_no_cicd'_pspace_distinct') + apply (frule invs_no_cicd'_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule valid_bitmaps_valid_bitmapQ) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def cong: conj_cong) + apply (intro conjI impI) + apply (fastforce intro: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) + apply (fastforce dest: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) + done +qed + +lemma ksDomSched_length_relation[simp]: + "\cstate_relation s s'\ \ length (kernel_state.ksDomSchedule s) = unat (ksDomScheduleLength)" + apply (auto simp: cstate_relation_def cdom_schedule_relation_def Let_def ksDomScheduleLength_def) + done + +lemma ksDomSched_length_dom_relation[simp]: + "\cdom_schedule_relation (kernel_state.ksDomSchedule s) kernel_all_global_addresses.ksDomSchedule \ \ length (kernel_state.ksDomSchedule s) = unat (ksDomScheduleLength)" + apply (auto simp: cstate_relation_def cdom_schedule_relation_def Let_def ksDomScheduleLength_def) + done + +lemma nextDomain_ccorres: + "ccorres dc xfdc invs' UNIV [] nextDomain (Call nextDomain_'proc)" + apply (cinit) + apply (simp add: ksDomScheduleLength_def sdiv_word_def sdiv_int_def) + apply (rule_tac P=invs' and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_modify_def Let_def + rf_sr_def cstate_relation_def + carch_state_relation_def cmachine_state_relation_def) + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "ksDomScheduleIdx \ = unat (ksDomScheduleLength - 1)") + apply (fastforce simp add: cdom_schedule_relation_def dom_schedule_entry_relation_def dschDomain_def dschLength_def ksDomScheduleLength_def sdiv_word_def sdiv_int_def simp del: ksDomSched_length_dom_relation) + apply (simp add: ksDomScheduleLength_def) + apply (frule invs'_ksDomScheduleIdx) + apply (simp add: invs'_ksDomSchedule newKernelState_def) + apply (simp only: Abs_fnat_hom_1 Abs_fnat_hom_add) + apply (drule unat_le_helper) + apply (simp add: sdiv_int_def sdiv_word_def) + apply (clarsimp simp: cdom_schedule_relation_def) + apply (simp only: Abs_fnat_hom_1 Abs_fnat_hom_add word_not_le) + apply clarsimp + apply (subst (asm) of_nat_Suc[symmetric]) + apply (drule iffD1[OF of_nat_mono_maybe'[where x=3, simplified, symmetric], rotated 2]) + apply simp + apply (frule invs'_ksDomScheduleIdx) + apply (simp add: invs'_ksDomSchedule newKernelState_def) + apply (clarsimp simp: cdom_schedule_relation_def) + apply (clarsimp simp: ksDomScheduleLength_def) + apply (subst of_nat_Suc[symmetric])+ + apply (subst unat_of_nat64) + apply (simp add: word_bits_def) + apply (subst unat_of_nat64) + apply (simp add: word_bits_def) + apply (fastforce simp add: cdom_schedule_relation_def dom_schedule_entry_relation_def dschDomain_def dschLength_def simp del: ksDomSched_length_dom_relation) + apply simp + done + +lemma scheduleChooseNewThread_ccorres: + "ccorres dc xfdc + (\s. invs' s \ ksSchedulerAction s = ChooseNewThread) UNIV hs + (do domainTime \ getDomainTime; + y \ when (domainTime = 0) nextDomain; + chooseThread + od) + (Call scheduleChooseNewThread_'proc)" + apply (cinit') + apply (rule ccorres_pre_getDomainTime) + apply (rule ccorres_split_nothrow) + apply (rule_tac R="\s. ksDomainTime s = domainTime" in ccorres_when) + apply (fastforce simp: rf_sr_ksDomainTime) + apply (rule_tac xf'=xfdc in ccorres_call[OF nextDomain_ccorres] ; simp) + apply ceqv + apply (ctac (no_vcg) add: chooseThread_ccorres) + apply (wp nextDomain_invs_no_cicd') + apply clarsimp + apply (vcg exspec=nextDomain_modifies) + apply (clarsimp simp: if_apply_def2 invs'_invs_no_cicd') + done + +lemma isHighestPrio_ccorres: + "ccorres (\rv rv'. rv = to_bool rv') ret__unsigned_long_' + (\s. d \ maxDomain \ bitmapQ_no_L1_orphans s) + (UNIV \ \\dom = ucast d\ \ \\prio = ucast p\) hs + (isHighestPrio d p) + (Call isHighestPrio_'proc)" + supply Collect_const [simp del] + supply prio_and_dom_limit_helpers[simp] + supply Collect_const_mem [simp] + (* FIXME: these should likely be in simpset for CRefine, or even in general *) + supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] + ccorres_IF_True[simp] if_cong[cong] + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + including no_less_1_simps + apply (cinit lift: dom_' prio_') + apply clarsimp + apply (rule ccorres_move_const_guard) + apply (rule ccorres_pre_getReadyQueuesL1Bitmap, rename_tac l1) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_cond_seq2[THEN iffD1]) + apply ccorres_rewrite + apply (rule_tac xf'=ret__int_' and val="from_bool (l1 = 0)" + and R="\s. l1 = ksReadyQueuesL1Bitmap s d \ d \ maxDomain" and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply vcg + apply clarsimp + apply (fastforce simp: rf_sr_ksReadyQueuesL1Bitmap_simp) + apply ceqv + apply clarsimp + apply (rule ccorres_cond[where R=\], blast) + apply (rule_tac P="l1 = 0" in ccorres_gen_asm, clarsimp) + apply (rule ccorres_return_C; clarsimp) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: getHighestPrio_ccorres[simplified]) + apply (rename_tac hprio hprio') + apply csymbr + apply (rule ccorres_return_C, simp, simp, simp) + apply (rule wp_post_taut) + apply (vcg exspec=getHighestPrio_modifies)+ + apply (clarsimp simp: word_le_nat_alt maxDomain_le_unat_ucast_explicit + split: if_splits) + done + +lemma schedule_ccorres: + "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" + supply Collect_const [simp del] + supply prio_and_dom_limit_helpers[simp] + supply Collect_const_mem [simp] + (* FIXME: these should likely be in simpset for CRefine, or even in general *) + supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] + ccorres_IF_True[simp] if_cong[cong] + apply (cinit) + apply (rule ccorres_pre_getCurThread) + apply (rule ccorres_pre_getSchedulerAction) + apply wpc + (* toplevel case: action is resume current thread *) + apply (rule ccorres_cond_false_seq) + apply simp + apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) + apply (clarsimp simp: return_def split: prod.splits) + apply (rule conseqPre, vcg, clarsimp) + (* toplevel case: action is choose new thread *) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + (* ct runnable? *) + apply (ctac add: isRunnable_ccorres, rename_tac runnable) + apply (clarsimp simp: to_bool_def) + apply (rule ccorres_split_nothrow_dc) + (* enqueue if runnable *) + apply (simp add: when_def) + apply (rule ccorres_cond[where R=\], clarsimp) + apply csymbr + apply (ctac add: tcbSchedEnqueue_ccorres) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule ccorres_cond_true_seq) + (* isolate haskell part before setting thread action *) + apply (simp add: scheduleChooseNewThread_def) + apply (rule ccorres_lhs_assoc)+ + apply (rule ccorres_split_nothrow_dc) + apply (simp add: bind_assoc) + apply (ctac add: scheduleChooseNewThread_ccorres) + apply (ctac(no_simp) add: ccorres_setSchedulerAction) + apply (wpsimp simp: cscheduler_action_relation_def + | vcg exspec=scheduleChooseNewThread_modifies exspec=tcbSchedEnqueue_modifies)+ + (* toplevel case: action is switch to candidate *) + apply (rename_tac candidate) + apply (rule_tac P="\s. ksSchedulerAction s = SwitchToThread candidate \ invs' s" in ccorres_cross_over_guard) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + (* ct runnable? *) + apply (ctac add: isRunnable_ccorres, rename_tac runnable runnable') + apply (clarsimp simp: to_bool_def) + apply (rule ccorres_split_nothrow_dc) + (* enqueue if runnable *) + apply (simp add: when_def) + apply (rule ccorres_cond[where R=\], clarsimp) + apply csymbr + apply (ctac add: tcbSchedEnqueue_ccorres) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule ccorres_cond_false_seq) + + apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) + apply (rename_tac was_runnable') + apply (rule_tac P="was_runnable' = runnable'" in ccorres_gen_asm2, clarsimp) + apply (rule ccorres_symb_exec_l3[OF _ git_wp _ empty_fail_getIdleThread], rename_tac it) + apply (rule ccorres_pre_threadGet, rename_tac targetPrio) + apply (rule ccorres_pre_threadGet, rename_tac curPrio) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'=candidate_' and val="tcb_ptr_to_ctcb_ptr candidate" + and R="\s. ksSchedulerAction s = SwitchToThread candidate" and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg, clarsimp) + apply (fastforce dest!: rf_sr_cscheduler_relation simp: cscheduler_action_relation_def) + apply ceqv + (* split off fastfail calculation *) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) + apply (rule ccorres_cond_seq2[THEN iffD1]) + apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" + and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg, fastforce simp: rf_sr_ksCurThread rf_sr_ksIdleThread) + apply ceqv + apply clarsimp + apply (rule ccorres_cond2'[where R=\], fastforce) + apply clarsimp + apply (rule ccorres_return[where R'=UNIV], clarsimp, vcg) + apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s + \ curThread = ksCurThread s + \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" + and P'=UNIV in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) + apply (drule (1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) + apply unat_arith + apply clarsimp + apply vcg + apply ceqv + (* fastfail calculation complete *) + apply (rename_tac fastfail fastfail') + apply (rule ccorres_pre_curDomain) + (* rest of the calculation: fastfail \ \ highest *) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac r'="\hprio rv'. to_bool rv' = (fastfail \ \hprio)" and xf'=ret__int_' + in ccorres_split_nothrow) + apply (csymbr) + apply (clarsimp simp: to_bool_def) + apply (rule ccorres_Cond_rhs ; clarsimp) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_add_return2) + apply (ctac add: isHighestPrio_ccorres, clarsimp) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (rule ccorres_return) + apply (rule conseqPre, vcg) + apply (clarsimp simp: to_bool_def) + apply (rule wp_post_taut) + apply (vcg exspec=isHighestPrio_modifies) + apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def + Nondet_Monad.bind_def + split: prod.split) + apply ceqv + apply (clarsimp simp: to_bool_def) + (* done with calculation of main acceptance criterion for candidate *) + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R=\], blast) + (* candidate is not the best one, enqueue and choose new thread *) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: tcbSchedEnqueue_ccorres) + apply clarsimp + apply (ctac(no_simp) add: ccorres_setSchedulerAction) + apply (clarsimp simp: cscheduler_action_relation_def) + (* isolate haskell part before setting thread action *) + apply (simp add: scheduleChooseNewThread_def) + apply (rule ccorres_lhs_assoc)+ + apply (rule ccorres_split_nothrow_dc) + apply (simp add: bind_assoc) + apply (ctac add: scheduleChooseNewThread_ccorres) + apply (ctac(no_simp) add: ccorres_setSchedulerAction) + apply (wpsimp simp: cscheduler_action_relation_def)+ + apply (vcg exspec=scheduleChooseNewThread_modifies) + apply (wp add: setSchedulerAction_invs' setSchedulerAction_direct del: ssa_wp) + apply (clarsimp | vcg exspec=tcbSchedEnqueue_modifies | wp wp_post_taut)+ + (* secondary check, when on equal prio and ct was running, prefer ct *) + apply (rule ccorres_rhs_assoc) + apply (rule_tac xf'=ret__int_' and val="from_bool (runnable' \ 0 \ curPrio = targetPrio)" + and R="\s. curThread = ksCurThread s + \ obj_at' (\tcb. tcbPriority tcb = curPrio) (ksCurThread s) s + \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" + and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) + + apply (drule (1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) + apply (solves \unat_arith, rule iffI; simp\) + apply ceqv + apply clarsimp + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R=\], blast) + (* candidate does not beat running ct, append and choose new thread *) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: tcbSchedAppend_ccorres) + apply clarsimp + apply (ctac(no_simp) add: ccorres_setSchedulerAction) + apply (clarsimp simp: cscheduler_action_relation_def) + (* isolate haskell part before setting thread action *) + apply (simp add: scheduleChooseNewThread_def) + apply (rule ccorres_lhs_assoc)+ + apply (rule ccorres_split_nothrow_dc) + apply (simp add: bind_assoc) + apply (ctac add: scheduleChooseNewThread_ccorres) + apply (ctac(no_simp) add: ccorres_setSchedulerAction) + apply (wpsimp simp: cscheduler_action_relation_def)+ + apply (vcg exspec=scheduleChooseNewThread_modifies) + apply (wp add: setSchedulerAction_invs' setSchedulerAction_direct del: ssa_wp) + apply (clarsimp | vcg exspec=tcbSchedAppend_modifies | wp wp_post_taut)+ + (* candidate is best, switch to it *) + apply (ctac add: switchToThread_ccorres) + apply clarsimp + apply (ctac(no_simp) add: ccorres_setSchedulerAction) + apply (clarsimp simp: cscheduler_action_relation_def) + apply (wpsimp wp: wp_post_taut) + apply (vcg exspec=switchToThread_modifies) + apply clarsimp + apply vcg + apply clarsimp + apply (strengthen invs'_invs_no_cicd') + apply (wp | wp (once) hoare_drop_imp)+ + apply clarsimp + apply (vcg exspec=isHighestPrio_modifies) + apply clarsimp + apply (wp (once) hoare_drop_imps) + apply wp + apply (strengthen strenghten_False_imp[where P="a = ResumeCurrentThread" for a]) + apply (clarsimp simp: conj_ac invs_valid_objs' cong: conj_cong) + apply wp + apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) + apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) + apply clarsimp + apply (strengthen ko_at'_obj_at'_field) + apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field) + apply wp + apply clarsimp + (* when runnable tcbSchedEnqueue curThread *) + apply (rule_tac Q="\rv s. invs' s \ ksCurThread s = curThread + \ ksSchedulerAction s = SwitchToThread candidate" in hoare_post_imp) + apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') + apply (fastforce dest: invs_sch_act_wf') + + apply wpsimp+ + apply (vcg exspec=tcbSchedEnqueue_modifies) + apply wp + apply vcg + + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_valid_objs') + apply (frule invs_sch_act_wf') + apply (frule tcb_at_invs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (rule conjI) + apply (clarsimp dest!: rf_sr_cscheduler_relation simp: cscheduler_action_relation_def) + apply (rule conjI; clarsimp) + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps + split: scheduler_action.splits) + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp dest!: rf_sr_cscheduler_relation invs_sch_act_wf' + simp: cscheduler_action_relation_def) + apply (intro conjI impI allI; clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (fastforce simp: tcb_at_not_NULL tcb_at_1 dest: pred_tcb_at')+ + done + +(* FIXME: move *) +lemma map_to_tcbs_upd: + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" + apply (rule ext) + apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) + done + +(* FIXME: move *) +lemma cep_relations_drop_fun_upd: + "\ f x = Some v; tcbEPNext_C v' = tcbEPNext_C v; tcbEPPrev_C v' = tcbEPPrev_C v \ + \ cendpoint_relation (f (x \ v')) = cendpoint_relation f" + "\ f x = Some v; tcbEPNext_C v' = tcbEPNext_C v; tcbEPPrev_C v' = tcbEPPrev_C v \ + \ cnotification_relation (f (x \ v')) = cnotification_relation f" + by (intro ext cendpoint_relation_upd_tcb_no_queues[where thread=x] + cnotification_relation_upd_tcb_no_queues[where thread=x] + | simp split: if_split)+ + +lemma threadSet_timeSlice_ccorres [corres]: + "ccorres dc xfdc (tcb_at' thread) {s. thread' s = tcb_ptr_to_ctcb_ptr thread \ unat (v' s) = v} hs + (threadSet (tcbTimeSlice_update (\_. v)) thread) + (Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update (heap_update (Ptr &(thread' s\[''tcbTimeSlice_C''])::machine_word ptr) (v' s)))) s))" + apply (rule ccorres_guard_imp2) + apply (rule threadSet_ccorres_lemma4 [where P=\ and P'=\]) + apply vcg + prefer 2 + apply (rule conjI, simp) + apply assumption + apply clarsimp + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: cmachine_state_relation_def carch_state_relation_def cpspace_relation_def) + apply (clarsimp simp: update_tcb_map_tos typ_heap_simps') + apply (simp add: map_to_ctes_upd_tcb_no_ctes tcb_cte_cases_def + map_to_tcbs_upd cteSizeBits_def) + + apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const + ko_at_projectKO_opt) + defer + apply (drule ko_at_projectKO_opt) + apply (erule (2) cmap_relation_upd_relI) + apply (simp add: ctcb_relation_def) + apply assumption + apply simp + done + +lemma timerTick_ccorres: + "ccorres dc xfdc invs' UNIV [] timerTick (Call timerTick_'proc)" + supply subst_all [simp del] + apply (cinit) + apply (rule ccorres_pre_getCurThread) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) + apply (rule ccorres_split_nothrow_novcg) + apply wpc + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ + (* thread_state.Running *) + apply simp + apply (rule ccorres_cond_true) + apply (rule ccorres_pre_threadGet) + apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) + apply (clarsimp simp: cur_tcb'_def) + apply (drule (1) tcb_at_h_t_valid) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule_tac Q="\s. obj_at' (\tcb. tcbTimeSlice tcb = rva) (ksCurThread s) s" + and Q'=\ in ccorres_cond_both') + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: ctcb_relation_def word_less_nat_alt) + apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) + apply (clarsimp simp: cur_tcb'_def) + apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) + apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) + apply (clarsimp simp: cur_tcb'_def) + apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) + apply (ctac add: threadSet_timeSlice_ccorres) + apply (rule ccorres_rhs_assoc)+ + apply (ctac) + apply simp + apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) + apply (ctac add: rescheduleRequired_ccorres) + apply (wp weak_sch_act_wf_lift_linear + threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv + | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ + apply (vcg exspec=tcbSchedDequeue_modifies) + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ + apply ceqv + apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) + apply (rule ccorres_when[where R=\]) + apply (solves \clarsimp split: if_split\) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac rrel=dc and xf=xfdc and P=\ and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply ceqv + apply (rule ccorres_pre_getDomainTime) + apply (rename_tac rva rv'a rvb) + apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) + apply (case_tac "rvb = 0") + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_cond_true) + apply (ctac add: rescheduleRequired_ccorres) + apply clarsimp + apply assumption + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply clarsimp + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wp hoare_vcg_conj_lift hoare_vcg_all_lift hoare_drop_imps) + apply (wpc | wp threadSet_weak_sch_act_wf threadSet_valid_objs' rescheduleRequired_weak_sch_act_wf + tcbSchedAppend_valid_objs' weak_sch_act_wf_lift_linear threadSet_st_tcb_at2 threadGet_wp + | simp split del: if_splits)+ + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem word_sle_def word_sless_def) + apply (wp gts_wp') + apply vcg + apply (clarsimp simp: invs_weak_sch_act_wf) + apply (fold cur_tcb'_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (rule conjI) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (auto simp: obj_at'_def inQ_def weak_sch_act_wf_def st_tcb_at'_def + valid_pspace'_def ct_idle_or_in_cur_domain'_def valid_tcb'_def valid_idle'_def projectKOs)[1] + apply (rule conjI, clarsimp simp: invs'_def valid_state'_def valid_tcb'_def)+ + apply (auto simp: obj_at'_def inQ_def weak_sch_act_wf_def st_tcb_at'_def + valid_pspace'_def ct_idle_or_in_cur_domain'_def valid_tcb'_def valid_idle'_def projectKOs)[1] + apply (auto simp: invs'_def valid_state'_def valid_tcb'_def tcb_cte_cases_def cur_tcb'_def + obj_at'_def cteSizeBits_def)[1] + + apply (frule invs_cur') + apply (clarsimp simp: cur_tcb'_def) + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps' Kernel_Config.timeSlice_def) + apply (subst unat_sub) + apply simp + apply (clarsimp simp: ctcb_relation_def) + done + +end + +end diff --git a/proof/crefine/AARCH64/StateRelation_C.thy b/proof/crefine/AARCH64/StateRelation_C.thy new file mode 100644 index 0000000000..a92b71fda0 --- /dev/null +++ b/proof/crefine/AARCH64/StateRelation_C.thy @@ -0,0 +1,983 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory StateRelation_C +imports Wellformed_C +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "lifth p s \ the (clift (t_hrs_' s) p)" + +definition + "array_relation r n a c \ \i \ n. r (a i) (index c (unat i))" + +definition option_to_ctcb_ptr :: "machine_word option \ tcb_C ptr" where + "option_to_ctcb_ptr x \ case x of None \ NULL | Some t \ tcb_ptr_to_ctcb_ptr t" + + +definition + byte_to_word_heap :: "(machine_word \ word8) \ (machine_word \ 9 word \ machine_word)" + where + "byte_to_word_heap m base off \ let (ptr :: machine_word) = base + (ucast off * 8) in + word_rcat [m (ptr + 7), m (ptr + 6), m (ptr + 5), m (ptr + 4), + m (ptr + 3), m (ptr + 2), m (ptr + 1), m ptr]" + +definition + heap_to_user_data :: "(machine_word \ kernel_object option) \ (machine_word \ word8) \ (machine_word \ (9 word \ machine_word) option)" + where + "heap_to_user_data hp bhp \ \p. let (uhp :: machine_word \ user_data option) = (projectKO_opt \\<^sub>m hp) in + option_map (\_. byte_to_word_heap bhp p) (uhp p)" + +definition + heap_to_device_data :: "(machine_word \ kernel_object option) \ (machine_word \ word8) \ (machine_word \ (9 word \ machine_word) option)" + where + "heap_to_device_data hp bhp \ \p. let (uhp :: machine_word \ user_data_device option) = (projectKO_opt \\<^sub>m hp) in + option_map (\_. byte_to_word_heap bhp p) (uhp p)" + + +definition + cmap_relation :: "(machine_word \ 'a) \ 'b typ_heap \ (machine_word \ 'b ptr) \ ('a \ 'b \ bool) \ bool" + where + "cmap_relation as cs addr_fun rel \ + (addr_fun ` (dom as) = dom cs) \ + (\x \ dom as. rel (the (as x)) (the (cs (addr_fun x))))" + +definition + carray_map_relation :: "nat \ (machine_word \ ('a :: pre_storable)) + \ ('b ptr \ bool) \ (machine_word \ 'b ptr) \ bool" +where + "carray_map_relation bits as cs addr_fun \ + (\p. (is_aligned p bits \ (\p'. p' && ~~ mask bits = p \ is_aligned p' (objBits (the (as p'))) + \ p' \ dom as)) \ cs (addr_fun p))" + +definition + cvariable_array_map_relation :: "(machine_word \ 'a) \ ('a \ nat) + \ (machine_word \ ('c :: c_type) ptr) \ heap_typ_desc \ bool" +where + "cvariable_array_map_relation amap szs ptrfun htd + \ \p v. amap p = Some v \ h_t_array_valid htd (ptrfun p) (szs v)" + +end + +text \ + Conceptually, the constant armKSKernelVSpace_C resembles ghost state. + The constant specifies the use of certain address ranges, or ``windows''. + It is the very nature of these ranges is that they remain fixed + after initialization. + Hence, it is not necessary to carry this value around + as part of the actual state. + Rather, we simply fix it in a locale for the state relation. + + Note that this locale does not build on @{text kernel} + but @{text substitute_pre}. + Hence, we can later base definitions for the ADT on it, + which can subsequently be instantiated for + @{text kernel_all_global_addresses} as well as @{text kernel_all_substitute}. +\ +locale state_rel = Arch + substitute_pre + (*FIXME: arch_split*) + fixes armKSKernelVSpace_C :: "machine_word \ arm_vspace_region_use" + +locale kernel = kernel_all_substitute + state_rel + +context state_rel +begin + +(* FIXME AARCH64 change/expand the comment in C for armKSGlobalUserVSpace to include that it's +"the default page table for when the user does not have a page table" rather than the currently +confusing +"the temporary userspace page table in kernel. +It is required before running user thread to avoid speculative page table walking with the +wrong page table." *) + +abbreviation armKSGlobalUserVSpace_Ptr :: vs_ptr where + "armKSGlobalUserVSpace_Ptr \ vs_Ptr (symbol_table ''armKSGlobalUserVSpace'')" + +(* relates fixed addresses *) +definition carch_globals :: "Arch.kernel_state \ bool" where + "carch_globals s \ armKSGlobalUserVSpace s = ptr_val armKSGlobalUserVSpace_Ptr" + +lemma carch_globals_armKSGlobalUserVSpace: + "carch_globals s \ armKSGlobalUserVSpace s = symbol_table ''armKSGlobalUserVSpace''" + by (simp add: carch_globals_def armKSGlobalUserVSpace_def) + +definition cur_vcpu_relation :: + "(machine_word \ bool) option \ vcpu_C ptr \ machine_word \ bool" where + "cur_vcpu_relation akscurvcpu cvcpu cactive \ + case akscurvcpu of + Some acurvcpu \ cvcpu = Ptr (fst acurvcpu) \ cvcpu \ NULL \ cactive = from_bool (snd acurvcpu) + | None \ cvcpu = NULL \ cactive = 0" + +(* FIXME AARCH64: the naming in C is confusing (do we want to change it to talk about VMIDs instead + of HW ASIDs?: + armKSNextVMID in Haskell is armKSNextASID in C (note this is a hw_asid_t, so should have "HW" in it + at least) + armKSVMIDTable in Haskell is armKSHWASIDTable in C *) + +definition carch_state_relation :: "Arch.kernel_state \ globals \ bool" where + "carch_state_relation astate cstate \ + armKSKernelVSpace astate = armKSKernelVSpace_C \ + array_relation ((=) \ option_to_ptr) (mask asid_high_bits) (armKSASIDTable astate) (armKSASIDTable_' cstate) \ + armKSNextVMID astate = armKSNextASID_' cstate \ + array_relation ((=) \ option_to_0) (mask vmid_bits) + (armKSVMIDTable astate) (armKSHWASIDTable_' cstate) \ + carch_globals astate \ + gic_vcpu_num_list_regs_' cstate = of_nat (armKSGICVCPUNumListRegs astate) \ + cur_vcpu_relation (armHSCurVCPU astate) (armHSCurVCPU_' cstate) (armHSVCPUActive_' cstate)" + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + cmachine_state_relation :: "machine_state \ globals \ bool" +where + "cmachine_state_relation s s' \ + irq_masks s = irq_masks (phantom_machine_state_' s') \ + irq_state s = irq_state (phantom_machine_state_' s') \ + device_state s = device_state (phantom_machine_state_' s') \ + \ \exclusive_state s = exclusive_state (phantom_machine_state_' s') \\ \ \FIXME: this is needed for infoflow so we'll leave it commented\ + machine_state_rest s = machine_state_rest (phantom_machine_state_' s')" + + +definition + "globals_list_id_fudge = id" + +type_synonym ('a, 'b) ltyp_heap = "'a ptr \ 'b" + +abbreviation + map_to_tcbs :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ tcb" + where + "map_to_tcbs hp \ projectKO_opt \\<^sub>m hp" + +abbreviation + map_to_eps :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ endpoint" + where + "map_to_eps hp \ projectKO_opt \\<^sub>m hp" + +abbreviation + map_to_ntfns :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ notification" + where + "map_to_ntfns hp \ projectKO_opt \\<^sub>m hp" + +abbreviation + map_to_ptes :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ pte" + where + "map_to_ptes hp \ projectKO_opt \\<^sub>m hp" + +abbreviation + map_to_asidpools :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ asidpool" + where + "map_to_asidpools hp \ projectKO_opt \\<^sub>m hp" + +abbreviation + map_to_user_data :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ user_data" + where + "map_to_user_data hp \ projectKO_opt \\<^sub>m hp" + +abbreviation + map_to_user_data_device :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ user_data_device" + where + "map_to_user_data_device hp \ projectKO_opt \\<^sub>m hp" + +abbreviation + map_to_vcpus :: "(machine_word \ Structures_H.kernel_object) \ machine_word \ vcpu" + where + "map_to_vcpus hp \ projectKO_opt \\<^sub>m hp" + + +definition + cmdbnode_relation :: "Structures_H.mdbnode \ mdb_node_C \ bool" +where + "cmdbnode_relation amdb cmdb \ amdb = mdb_node_to_H (mdb_node_lift cmdb)" + +definition + ccte_relation :: "Structures_H.cte \ cte_C \ bool" +where + "ccte_relation acte ccte \ Some acte = option_map cte_to_H (cte_lift ccte) + \ c_valid_cte ccte" + +lemma ccte_relation_c_valid_cte: "ccte_relation c c' \ c_valid_cte c'" + by (simp add: ccte_relation_def) + + +definition + tcb_queue_relation' :: "(tcb_C \ tcb_C ptr) \ (tcb_C \ tcb_C ptr) \ (tcb_C ptr \ tcb_C option) \ machine_word list \ tcb_C ptr \ tcb_C ptr \ bool" + where + "tcb_queue_relation' getNext getPrev hp queue qhead end \ + (end = (if queue = [] then NULL else (tcb_ptr_to_ctcb_ptr (last queue)))) + \ tcb_queue_relation getNext getPrev hp queue NULL qhead" + +fun register_from_H :: "register \ register_idx" where + "register_from_H AARCH64.X0 = scast Kernel_C.X0" +| "register_from_H AARCH64.X1 = scast Kernel_C.X1" +| "register_from_H AARCH64.X2 = scast Kernel_C.X2" +| "register_from_H AARCH64.X3 = scast Kernel_C.X3" +| "register_from_H AARCH64.X4 = scast Kernel_C.X4" +| "register_from_H AARCH64.X5 = scast Kernel_C.X5" +| "register_from_H AARCH64.X6 = scast Kernel_C.X6" +| "register_from_H AARCH64.X7 = scast Kernel_C.X7" +| "register_from_H AARCH64.X8 = scast Kernel_C.X8" +| "register_from_H AARCH64.X9 = scast Kernel_C.X9" +| "register_from_H AARCH64.X10 = scast Kernel_C.X10" +| "register_from_H AARCH64.X11 = scast Kernel_C.X11" +| "register_from_H AARCH64.X12 = scast Kernel_C.X12" +| "register_from_H AARCH64.X13 = scast Kernel_C.X13" +| "register_from_H AARCH64.X14 = scast Kernel_C.X14" +| "register_from_H AARCH64.X15 = scast Kernel_C.X15" +| "register_from_H AARCH64.X16 = scast Kernel_C.X16" +| "register_from_H AARCH64.X17 = scast Kernel_C.X17" +| "register_from_H AARCH64.X18 = scast Kernel_C.X18" +| "register_from_H AARCH64.X19 = scast Kernel_C.X19" +| "register_from_H AARCH64.X20 = scast Kernel_C.X20" +| "register_from_H AARCH64.X21 = scast Kernel_C.X21" +| "register_from_H AARCH64.X22 = scast Kernel_C.X22" +| "register_from_H AARCH64.X23 = scast Kernel_C.X23" +| "register_from_H AARCH64.X24 = scast Kernel_C.X24" +| "register_from_H AARCH64.X25 = scast Kernel_C.X25" +| "register_from_H AARCH64.X26 = scast Kernel_C.X26" +| "register_from_H AARCH64.X27 = scast Kernel_C.X27" +| "register_from_H AARCH64.X28 = scast Kernel_C.X28" +| "register_from_H AARCH64.X29 = scast Kernel_C.X29" +| "register_from_H AARCH64.X30 = scast Kernel_C.X30" +| "register_from_H AARCH64.SP_EL0 = scast Kernel_C.SP_EL0" +| "register_from_H AARCH64.NextIP = scast Kernel_C.NextIP" +| "register_from_H AARCH64.SPSR_EL1 = scast Kernel_C.SPSR_EL1" +| "register_from_H AARCH64.FaultIP = scast Kernel_C.FaultIP" +| "register_from_H AARCH64.TPIDR_EL0 = scast Kernel_C.TPIDR_EL0" +| "register_from_H AARCH64.TPIDRRO_EL0 = scast Kernel_C.TPIDRRO_EL0" + +lemma ELR_EL1_is_NextIP[simp]: + "Kernel_C.ELR_EL1 = Kernel_C.NextIP" + by (simp add: C_register_defs) + +definition + cregs_relation :: "(MachineTypes.register \ machine_word) \ machine_word[registers_count] \ bool" +where + "cregs_relation Hregs Cregs \ \r. Hregs r = Cregs.[unat (register_from_H r)]" + +definition fpu_relation :: "fpu_state \ user_fpu_state_C \ bool" where + "fpu_relation fpu_H fpu_C \ + (\r < CARD(fpu_regs). (fpuRegs fpu_H) (of_nat r) = (vregs_C fpu_C).[r]) + \ fpuSr fpu_H = fpsr_C fpu_C + \ fpuCr fpu_H = fpcr_C fpu_C" + +definition + ccontext_relation :: "user_context \ user_context_C \ bool" +where + "ccontext_relation uc_H uc_C \ cregs_relation (user_regs uc_H) (registers_C uc_C) \ + fpu_relation (fpu_state uc_H) (fpuState_C uc_C)" + +primrec + cthread_state_relation_lifted :: "Structures_H.thread_state \ + (thread_state_CL \ seL4_Fault_CL option) \ bool" +where + "cthread_state_relation_lifted (Structures_H.Running) ts' + = (tsType_CL (fst ts') = scast ThreadState_Running)" +| "cthread_state_relation_lifted (Structures_H.Restart) ts' + = (tsType_CL (fst ts') = scast ThreadState_Restart)" +| "cthread_state_relation_lifted (Structures_H.Inactive) ts' + = (tsType_CL (fst ts') = scast ThreadState_Inactive)" +| "cthread_state_relation_lifted (Structures_H.IdleThreadState) ts' + = (tsType_CL (fst ts') = scast ThreadState_IdleThreadState)" +| "cthread_state_relation_lifted (Structures_H.BlockedOnReply) ts' + = (tsType_CL (fst ts') = scast ThreadState_BlockedOnReply)" +| "cthread_state_relation_lifted (Structures_H.BlockedOnReceive oref cg) ts' + = (tsType_CL (fst ts') = scast ThreadState_BlockedOnReceive + \ oref = blockingObject_CL (fst ts') + \ cg = to_bool (blockingIPCCanGrant_CL (fst ts')))" +| "cthread_state_relation_lifted (Structures_H.BlockedOnSend oref badge cg cgr isc) ts' + = (tsType_CL (fst ts') = scast ThreadState_BlockedOnSend + \ oref = blockingObject_CL (fst ts') + \ badge = blockingIPCBadge_CL (fst ts') + \ cg = to_bool (blockingIPCCanGrant_CL (fst ts')) + \ cgr = to_bool (blockingIPCCanGrantReply_CL (fst ts')) + \ isc = to_bool (blockingIPCIsCall_CL (fst ts')))" +| "cthread_state_relation_lifted (Structures_H.BlockedOnNotification oref) ts' + = (tsType_CL (fst ts') = scast ThreadState_BlockedOnNotification + \ oref = blockingObject_CL (fst ts'))" + + +definition + cthread_state_relation :: "Structures_H.thread_state \ + (thread_state_C \ seL4_Fault_C) \ bool" +where + "cthread_state_relation \ \a (cs, cf). + cthread_state_relation_lifted a (thread_state_lift cs, seL4_Fault_lift cf)" + +definition "is_cap_fault cf \ + (case cf of (SeL4_Fault_CapFault _) \ True + | _ \ False)" + +lemma is_cap_fault_simp: "is_cap_fault cf = (\ x. cf=SeL4_Fault_CapFault x)" + by (simp add: is_cap_fault_def split:seL4_Fault_CL.splits) + + +definition + message_info_to_H :: "seL4_MessageInfo_C \ Types_H.message_info" + where + "message_info_to_H mi \ Types_H.message_info.MI (length_CL (seL4_MessageInfo_lift mi)) + (extraCaps_CL (seL4_MessageInfo_lift mi)) + (capsUnwrapped_CL (seL4_MessageInfo_lift mi)) + (label_CL (seL4_MessageInfo_lift mi))" + + +fun + lookup_fault_to_H :: "lookup_fault_CL \ lookup_failure" + where + "lookup_fault_to_H Lookup_fault_invalid_root = InvalidRoot" + | "lookup_fault_to_H (Lookup_fault_guard_mismatch lf) = + (GuardMismatch (unat (bitsLeft_CL lf)) (guardFound_CL lf) (unat (bitsFound_CL lf)))" + | "lookup_fault_to_H (Lookup_fault_depth_mismatch lf) = + (DepthMismatch (unat (lookup_fault_depth_mismatch_CL.bitsLeft_CL lf)) + (unat (lookup_fault_depth_mismatch_CL.bitsFound_CL lf)))" + | "lookup_fault_to_H (Lookup_fault_missing_capability lf) = + (MissingCapability (unat (lookup_fault_missing_capability_CL.bitsLeft_CL lf)))" + +fun + fault_to_H :: "seL4_Fault_CL \ lookup_fault_CL \ fault option" +where + "fault_to_H SeL4_Fault_NullFault lf = None" + | "fault_to_H (SeL4_Fault_CapFault cf) lf + = Some (CapFault (seL4_Fault_CapFault_CL.address_CL cf) (to_bool (inReceivePhase_CL cf)) (lookup_fault_to_H lf))" + | "fault_to_H (SeL4_Fault_VMFault vf) lf + = Some (ArchFault (VMFault (seL4_Fault_VMFault_CL.address_CL vf) [instructionFault_CL vf, FSR_CL vf]))" + | "fault_to_H (SeL4_Fault_UnknownSyscall us) lf + = Some (UnknownSyscallException (syscallNumber_CL us))" + | "fault_to_H (SeL4_Fault_UserException ue) lf + = Some (UserException (number_CL ue) (code_CL ue))" + | "fault_to_H (SeL4_Fault_VCPUFault vf) lf + = Some (ArchFault (VCPUFault (seL4_Fault_VCPUFault_CL.hsr_CL vf)))" + | "fault_to_H (SeL4_Fault_VGICMaintenance vf) lf + = Some (ArchFault (VGICMaintenance (if seL4_Fault_VGICMaintenance_CL.idxValid_CL vf = 1 + then Some (seL4_Fault_VGICMaintenance_CL.idx_CL vf) + else None)))" + | "fault_to_H (SeL4_Fault_VPPIEvent irq) lf + = Some (ArchFault (VPPIEvent (ucast (seL4_Fault_VPPIEvent_CL.irq_w_CL irq))))" + +definition + cfault_rel :: "Fault_H.fault option \ seL4_Fault_CL option \ lookup_fault_CL option \ bool" +where + "cfault_rel af cf lf \ \cf'. cf = Some cf' \ + (if (is_cap_fault cf') then (\lf'. lf = Some lf' \ fault_to_H cf' lf' = af) + else (fault_to_H cf' undefined = af))" + +definition + carch_tcb_relation :: "Structures_H.arch_tcb \ arch_tcb_C \ bool" +where + "carch_tcb_relation aarch_tcb carch_tcb \ + ccontext_relation (atcbContextGet aarch_tcb) (tcbContext_C carch_tcb) + \ option_to_ptr (atcbVCPUPtr aarch_tcb) = tcbVCPU_C carch_tcb" + +definition + ctcb_relation :: "Structures_H.tcb \ tcb_C \ bool" +where + "ctcb_relation atcb ctcb \ + tcbFaultHandler atcb = tcbFaultHandler_C ctcb + \ cthread_state_relation (tcbState atcb) (tcbState_C ctcb, tcbFault_C ctcb) + \ tcbIPCBuffer atcb = tcbIPCBuffer_C ctcb + \ carch_tcb_relation (tcbArch atcb) (tcbArch_C ctcb) + \ tcbQueued atcb = to_bool (tcbQueued_CL (thread_state_lift (tcbState_C ctcb))) + \ ucast (tcbDomain atcb) = tcbDomain_C ctcb + \ ucast (tcbPriority atcb) = tcbPriority_C ctcb + \ ucast (tcbMCP atcb) = tcbMCP_C ctcb + \ tcbTimeSlice atcb = unat (tcbTimeSlice_C ctcb) + \ cfault_rel (tcbFault atcb) (seL4_Fault_lift (tcbFault_C ctcb)) + (lookup_fault_lift (tcbLookupFailure_C ctcb)) + \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb + \ option_to_ctcb_ptr (tcbSchedPrev atcb) = tcbSchedPrev_C ctcb + \ option_to_ctcb_ptr (tcbSchedNext atcb) = tcbSchedNext_C ctcb" + +abbreviation + "ep_queue_relation' \ tcb_queue_relation' tcbEPNext_C tcbEPPrev_C" + +definition + cendpoint_relation :: "tcb_C typ_heap \ Structures_H.endpoint \ endpoint_C \ bool" +where + "cendpoint_relation h ntfn cep \ + let cstate = endpoint_CL.state_CL (endpoint_lift cep); + chead = (Ptr o epQueue_head_CL o endpoint_lift) cep; + cend = (Ptr o epQueue_tail_CL o endpoint_lift) cep in + case ntfn of + IdleEP \ cstate = scast EPState_Idle \ ep_queue_relation' h [] chead cend + | SendEP q \ cstate = scast EPState_Send \ ep_queue_relation' h q chead cend + | RecvEP q \ cstate = scast EPState_Recv \ ep_queue_relation' h q chead cend" + +definition + cnotification_relation :: "tcb_C typ_heap \ Structures_H.notification \ + notification_C \ bool" +where + "cnotification_relation h antfn cntfn \ + let cntfn' = notification_lift cntfn; + cstate = notification_CL.state_CL cntfn'; + chead = (Ptr o ntfnQueue_head_CL) cntfn'; + cend = (Ptr o ntfnQueue_tail_CL) cntfn'; + cbound = ((Ptr o ntfnBoundTCB_CL) cntfn' :: tcb_C ptr) + in + (case ntfnObj antfn of + IdleNtfn \ cstate = scast NtfnState_Idle \ ep_queue_relation' h [] chead cend + | WaitingNtfn q \ cstate = scast NtfnState_Waiting \ ep_queue_relation' h q chead cend + | ActiveNtfn msgid \ cstate = scast NtfnState_Active \ + msgid = ntfnMsgIdentifier_CL cntfn' \ + ep_queue_relation' h [] chead cend) + \ option_to_ctcb_ptr (ntfnBoundTCB antfn) = cbound" + +lemmas pte_tag_defs = + pte_pte_table_def + pte_pte_page_def + pte_pte_4k_page_def + pte_pte_invalid_def + +definition ap_from_vm_rights :: "vmrights \ machine_word" where + "ap_from_vm_rights R \ case R of + VMKernelOnly \ 0 + | VMReadWrite \ 3 + | VMReadOnly \ 1" \ \note: hyp vs non-hyp swap VMReadWrite and VMReadOnly AP values\ + +(* Invalid PTEs map to invalid PTEs (sanity check) *) +lemma pte_0: + "index (pte_C.words_C cpte) 0 = 0 \ + pte_lift cpte = Some (Pte_pte_invalid)" + by (simp add: pte_lift_def pte_get_tag_def pte_tag_defs) + +(* with hypervisor support enabled, use stage-2 translation format for PTE *) +(* see makeUserPage in C *) +definition cpte_relation :: "pte \ pte_C \ bool" where + "cpte_relation pte cpte \ + (let cpte' = pte_lift cpte in + case pte of + InvalidPTE \ + cpte' = Some Pte_pte_invalid + | PageTablePTE ppn \ + cpte' = Some (Pte_pte_table \ pt_base_address_CL = ppn << pageBits \) + | PagePTE baseaddr small global xn device vmrights \ + \ \Other than their type tag, the fields of page PTEs are identical\ + if small + then cpte' = Some (Pte_pte_4k_page + \ pte_pte_4k_page_CL.UXN_CL = of_bool xn, + page_base_address_CL = baseaddr, + nG_CL = from_bool global, \ \flipped in hyp mode\ + AF_CL = 1, + SH_CL = 0, + AP_CL = ap_from_vm_rights vmrights, + AttrIndx_CL = attridx_from_vmattributes (VMAttributes xn (\device)) + \ \device means non-cacheable\ + \) + else cpte' = Some (Pte_pte_page + \ pte_pte_page_CL.UXN_CL = of_bool xn, + page_base_address_CL = baseaddr, + nG_CL = from_bool global, \ \flipped in hyp mode\ + AF_CL = 1, + SH_CL = 0, + AP_CL = ap_from_vm_rights vmrights, + AttrIndx_CL = attridx_from_vmattributes (VMAttributes xn (\device)) + \ \device means non-cacheable\ + \))" + +(* note: asid_map_C is a historical name, on AARCH64 it refers to a single ASID pool entry *) +definition casid_map_relation :: "asidpool_entry option \ asid_map_C \ bool" where + "casid_map_relation ap_entry_opt c_asid_map \ + case asid_map_lift c_asid_map of + None \ False \ \bad tag, should never happen\ + | Some Asid_map_asid_map_none \ ap_entry_opt = None + | Some (Asid_map_asid_map_vspace vsroot) \ + ap_entry_opt = Some (ASIDPoolVSpace (if to_bool (stored_vmid_valid_CL vsroot) + then Some (ucast (stored_hw_vmid_CL vsroot)) else None) + (vspace_root_CL vsroot))" + +definition casid_pool_relation :: "asidpool \ asid_pool_C \ bool" where + "casid_pool_relation asid_pool casid_pool \ + case asid_pool of ASIDPool pool \ + case casid_pool of asid_pool_C.asid_pool_C cpool \ + array_relation casid_map_relation (mask asid_low_bits) pool cpool + \ dom pool \ {0 .. mask asid_low_bits}" + +definition cvcpu_regs_relation :: "vcpu \ vcpu_C \ bool" where + "cvcpu_regs_relation vcpu cvcpu \ + \r. regs_C cvcpu.[fromEnum r] = vcpuRegs vcpu r" + +definition cvcpu_vppi_masked_relation :: "vcpu \ vcpu_C \ bool" where + "cvcpu_vppi_masked_relation vcpu cvcpu \ + \vppi. (vppi_masked_C cvcpu).[fromEnum vppi] + = from_bool (vcpuVPPIMasked vcpu vppi)" + +definition virq_to_H :: "virq_C \ virq" where + "virq_to_H virq = (virq_C.words_C virq).[0]" + +definition cvgic_relation :: "gicvcpuinterface \ gicVCpuIface_C \ bool" where + "cvgic_relation vgic cvgic \ + gicVCpuIface_C.hcr_C cvgic = vgicHCR vgic + \ gicVCpuIface_C.vmcr_C cvgic = vgicVMCR vgic + \ gicVCpuIface_C.apr_C cvgic = vgicAPR vgic + \ (\i\63. vgicLR vgic i = virq_to_H ((gicVCpuIface_C.lr_C cvgic).[i])) + \ (\i\64. vgicLR vgic i = 0)" + +definition cvcpu_relation :: "vcpu \ vcpu_C \ bool" where + "cvcpu_relation vcpu cvcpu \ + vcpuTCB_C cvcpu = option_to_ctcb_ptr (vcpuTCBPtr vcpu) + \ cvcpu_regs_relation vcpu cvcpu + \ cvgic_relation (vcpuVGIC vcpu) (vgic_C cvcpu) + \ cvcpu_vppi_masked_relation vcpu cvcpu + \ last_pcount_C (virtTimer_C cvcpu) = vtimerLastPCount (vcpuVTimer vcpu)" + +definition + cuser_user_data_relation :: "(9 word \ machine_word) \ user_data_C \ bool" +where + "cuser_user_data_relation f ud \ \off. f off = index (user_data_C.words_C ud) (unat off)" + +definition + cuser_user_data_device_relation :: "(9 word \ machine_word) \ user_data_device_C \ bool" +where + "cuser_user_data_device_relation f ud \ True" + +abbreviation + "cpspace_cte_relation ah ch \ cmap_relation (map_to_ctes ah) (clift ch) Ptr ccte_relation" + +abbreviation + "cpspace_tcb_relation ah ch \ cmap_relation (map_to_tcbs ah) (clift ch) tcb_ptr_to_ctcb_ptr ctcb_relation" + +abbreviation + "cpspace_ep_relation ah ch \ cmap_relation (map_to_eps ah) (clift ch) Ptr (cendpoint_relation (clift ch))" + +abbreviation + "cpspace_ntfn_relation ah ch \ cmap_relation (map_to_ntfns ah) (clift ch) Ptr (cnotification_relation (clift ch))" + +abbreviation + "cpspace_pte_relation ah ch \ cmap_relation (map_to_ptes ah) (clift ch) Ptr cpte_relation" + +abbreviation + "cpspace_asidpool_relation ah ch \ cmap_relation (map_to_asidpools ah) (clift ch) Ptr casid_pool_relation" + +abbreviation + "cpspace_vcpu_relation ah ch \ cmap_relation (map_to_vcpus ah) (clift ch) Ptr cvcpu_relation" + +abbreviation + "cpspace_user_data_relation ah bh ch \ cmap_relation (heap_to_user_data ah bh) (clift ch) Ptr cuser_user_data_relation" + +abbreviation + "cpspace_device_data_relation ah bh ch \ cmap_relation (heap_to_device_data ah bh) (clift ch) Ptr cuser_user_data_device_relation" + +definition + cpspace_relation :: "(machine_word \ Structures_H.kernel_object) \ (machine_word \ word8) \ heap_raw_state \ bool" +where + "cpspace_relation ah bh ch \ + cpspace_cte_relation ah ch \ cpspace_tcb_relation ah ch \ cpspace_ep_relation ah ch \ cpspace_ntfn_relation ah ch \ + cpspace_pte_relation ah ch \ cpspace_asidpool_relation ah ch \ + cpspace_user_data_relation ah bh ch \ cpspace_device_data_relation ah bh ch \ + cpspace_vcpu_relation ah ch" + +(* + We sometimes want to treat page tables as arrays, e.g. in Retype. Also, pointer addition + as in lookupPTSlot creates array_assertion guards in the C parser, which require an array type + tag in the heap, so we can't escape the array in Retype by creating single PTEs instead. + + It is Ok to represent the same area of memory as a set of independent PTEs and as an array at + the same time. See thms h_t_array_valid and h_t_array_valid_field that show both types being + valid at the same time -- this means cpspace_pte_relation does not create a contradiction with + the below. + + It is *not* Ok to represent the same (overlapping) area of memory as arrays of different + lengths (VSRootPT/NormalPT), because the array type tag contains the length, leading to a + contradiction. This means we can't use carray_map_relation for both types. + + We can use ghost state to say which arrays should have which type to avoid overlaps. The + CNode cte_array_relation relation already provides a mechanism for that (as opposed to + cpspace_cte_relation which handles single ctes). + +*) +abbreviation pt_array_relation :: "kernel_state \ globals \ bool" where + "pt_array_relation astate cstate \ + cvariable_array_map_relation (gsPTTypes (ksArchState astate)) + (\pt_t. 2 ^ ptTranslationBits pt_t) + pte_Ptr + (hrs_htd (t_hrs_' cstate))" + +abbreviation + "sched_queue_relation' \ tcb_queue_relation' tcbSchedNext_C tcbSchedPrev_C" + +abbreviation + end_C :: "tcb_queue_C \ tcb_C ptr" +where + "end_C == tcb_queue_C.end_C" + +definition + cready_queues_index_to_C :: "domain \ priority \ nat" +where + "cready_queues_index_to_C qdom prio \ (unat qdom) * numPriorities + (unat prio)" + +definition ctcb_queue_relation :: "tcb_queue \ tcb_queue_C \ bool" where + "ctcb_queue_relation aqueue cqueue \ + head_C cqueue = option_to_ctcb_ptr (tcbQueueHead aqueue) + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd aqueue)" + +definition cready_queues_relation :: + "(domain \ priority \ ready_queue) \ (tcb_queue_C[num_tcb_queues]) \ bool" + where + "cready_queues_relation aqueues cqueues \ + \d p. d \ maxDomain \ p \ maxPriority + \ ctcb_queue_relation (aqueues (d, p)) (index cqueues (cready_queues_index_to_C d p))" + +abbreviation + "cte_array_relation astate cstate + \ cvariable_array_map_relation (gsCNodes astate) (\n. 2 ^ n) + cte_Ptr (hrs_htd (t_hrs_' cstate))" + +(* There are 5 elements in the TCB CTE *) +abbreviation + "tcb_cte_array_relation astate cstate + \ cvariable_array_map_relation (map_to_tcbs (ksPSpace astate)) + (\x. 5) cte_Ptr (hrs_htd (t_hrs_' cstate))" + +fun + irqstate_to_C :: "irqstate \ machine_word" + where + "irqstate_to_C IRQInactive = scast Kernel_C.IRQInactive" + | "irqstate_to_C IRQSignal = scast Kernel_C.IRQSignal" + | "irqstate_to_C IRQTimer = scast Kernel_C.IRQTimer" + | "irqstate_to_C irqstate.IRQReserved = scast Kernel_C.IRQReserved" + +definition + cinterrupt_relation :: "interrupt_state \ 'a ptr \ (machine_word[384]) \ bool" +where + "cinterrupt_relation airqs cnode cirqs \ + cnode = Ptr (intStateIRQNode airqs) \ + (\irq \ (ucast Kernel_C.maxIRQ). + irqstate_to_C (intStateIRQTable airqs irq) = index cirqs (unat irq))" + +definition + cscheduler_action_relation :: "Structures_H.scheduler_action \ tcb_C ptr \ bool" +where + "cscheduler_action_relation a p \ case a of + ResumeCurrentThread \ p = NULL + | ChooseNewThread \ p = Ptr 1 + | SwitchToThread p' \ p = tcb_ptr_to_ctcb_ptr p'" + +definition + dom_schedule_entry_relation :: "8 word \ machine_word \ dschedule_C \ bool" +where + "dom_schedule_entry_relation adomSched cdomSched \ + ucast (fst adomSched) = dschedule_C.domain_C cdomSched \ + (snd adomSched) = dschedule_C.length_C cdomSched" + +definition + cdom_schedule_relation :: "(8 word \ machine_word) list \ (dschedule_C['b :: finite]) \ bool" +where + "cdom_schedule_relation adomSched cdomSched \ + length adomSched = card (UNIV :: 'b set) \ + (\n \ length adomSched. dom_schedule_entry_relation (adomSched ! n) (index cdomSched n))" + +definition + ghost_size_rel :: "cghost_state \ nat \ bool" +where + "ghost_size_rel gs maxSize = ((gs_get_assn cap_get_capSizeBits_'proc gs = 0 + \ maxSize = card (UNIV :: machine_word set)) + \ (maxSize > 0 \ maxSize = unat (gs_get_assn cap_get_capSizeBits_'proc gs)))" + +definition + cbitmap_L1_relation :: "machine_word['dom::finite] \ (domain \ machine_word) \ bool" +where + "cbitmap_L1_relation cbitmap1 abitmap1 \ + \d. (d \ maxDomain \ cbitmap1.[unat d] = abitmap1 d) \ + (\ d \ maxDomain \ abitmap1 d = 0)" + +definition + cbitmap_L2_relation :: "machine_word['i::finite]['dom::finite] + \ ((domain \ nat) \ machine_word) \ bool" +where + "cbitmap_L2_relation cbitmap2 abitmap2 \ + \d i. ((d \ maxDomain \ i < l2BitmapSize) + \ cbitmap2.[unat d].[i] = abitmap2 (d, i)) \ + ((\ (d \ maxDomain \ i < l2BitmapSize)) + \ abitmap2 (d, i) = 0)" + +end (* interpretation Arch . (*FIXME: arch_split*) *) + +definition + region_is_bytes' :: "machine_word \ nat \ heap_typ_desc \ bool" +where + "region_is_bytes' ptr sz htd \ \z\{ptr ..+ sz}. \ td. td \ typ_uinfo_t TYPE (word8) \ + (\n b. snd (htd z) n \ Some (td, b))" + +abbreviation + region_is_bytes :: "machine_word \ nat \ globals myvars \ bool" +where + "region_is_bytes ptr sz s \ region_is_bytes' ptr sz (hrs_htd (t_hrs_' (globals s)))" + +abbreviation(input) + "heap_list_is_zero hp ptr n \ heap_list hp n ptr = replicate n 0" + +abbreviation + "region_is_zero_bytes ptr n x \ region_is_bytes ptr n x + \ heap_list_is_zero (hrs_mem (t_hrs_' (globals x))) ptr n" + +definition + region_actually_is_bytes' :: "addr \ nat \ heap_typ_desc \ bool" +where + "region_actually_is_bytes' ptr len htd + = (\x \ {ptr ..+ len}. htd x + = (True, [0 \ (typ_uinfo_t TYPE(8 word), True)]))" + +abbreviation + "region_actually_is_bytes ptr len s + \ region_actually_is_bytes' ptr len (hrs_htd (t_hrs_' (globals s)))" + +lemmas region_actually_is_bytes_def = region_actually_is_bytes'_def + +abbreviation + "region_actually_is_zero_bytes ptr len s + \ region_actually_is_bytes ptr len s + \ heap_list_is_zero (hrs_mem (t_hrs_' (globals s))) ptr len" + +definition + zero_ranges_are_zero +where + "zero_ranges_are_zero rs hrs + = (\(start, end) \ rs. region_actually_is_bytes' start (unat ((end + 1) - start)) (hrs_htd hrs) + \ heap_list_is_zero (hrs_mem hrs) start (unat ((end + 1) - start)))" + +context state_rel begin + +\ \The IRQ node is a global array of CTEs.\ +abbreviation intStateIRQNode_array_Ptr :: "(cte_C[512]) ptr" where + "intStateIRQNode_array_Ptr \ Ptr (symbol_table ''intStateIRQNode'')" + +\ \But for compatibility with older proofs (written when the IRQ Node was a global pointer + initialised during boot), it is sometimes convenient to treat the IRQ node pointer as + a pointer to a CTE.\ +abbreviation intStateIRQNode_Ptr :: "cte_C ptr" where + "intStateIRQNode_Ptr \ Ptr (symbol_table ''intStateIRQNode'')" + +definition + cstate_relation :: "KernelStateData_H.kernel_state \ globals \ bool" +where + cstate_relation_def: + "cstate_relation astate cstate \ + let cheap = t_hrs_' cstate in + cpspace_relation (ksPSpace astate) (underlying_memory (ksMachineState astate)) cheap \ + cready_queues_relation (ksReadyQueues astate) (ksReadyQueues_' cstate) \ + zero_ranges_are_zero (gsUntypedZeroRanges astate) cheap \ + cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' cstate) (ksReadyQueuesL1Bitmap astate) \ + cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' cstate) (ksReadyQueuesL2Bitmap astate) \ + ksCurThread_' cstate = (tcb_ptr_to_ctcb_ptr (ksCurThread astate)) \ + ksIdleThread_' cstate = (tcb_ptr_to_ctcb_ptr (ksIdleThread astate)) \ + cinterrupt_relation (ksInterruptState astate) intStateIRQNode_array_Ptr (intStateIRQTable_' cstate) \ + cscheduler_action_relation (ksSchedulerAction astate) + (ksSchedulerAction_' cstate) \ + carch_state_relation (ksArchState astate) cstate \ + cmachine_state_relation (ksMachineState astate) cstate \ + cte_array_relation astate cstate \ + tcb_cte_array_relation astate cstate \ + pt_array_relation astate cstate \ + apsnd fst (ghost'state_' cstate) = (gsUserPages astate, gsCNodes astate) \ + fst (snd (snd (ghost'state_' cstate))) = gsPTTypes (ksArchState astate) \ + ghost_size_rel (ghost'state_' cstate) (gsMaxObjectSize astate) \ + ksWorkUnitsCompleted_' cstate = ksWorkUnitsCompleted astate \ + h_t_valid (hrs_htd (t_hrs_' cstate)) c_guard intStateIRQNode_array_Ptr \ + ptr_span intStateIRQNode_array_Ptr \ kernel_data_refs \ + h_t_valid (hrs_htd (t_hrs_' cstate)) c_guard armKSGlobalUserVSpace_Ptr \ + ptr_span armKSGlobalUserVSpace_Ptr \ kernel_data_refs \ + htd_safe domain (hrs_htd (t_hrs_' cstate)) \ + -domain \ kernel_data_refs \ + globals_list_distinct (- kernel_data_refs) symbol_table globals_list \ + cdom_schedule_relation (ksDomSchedule astate) + Kernel_C.kernel_all_global_addresses.ksDomSchedule \ + ksDomScheduleIdx_' cstate = of_nat (ksDomScheduleIdx astate) \ + ksCurDomain_' cstate = ucast (ksCurDomain astate) \ + ksDomainTime_' cstate = ksDomainTime astate" + +end + +definition + ccap_relation :: "capability \ cap_C \ bool" +where + "ccap_relation acap ccap \ (Some acap = option_map cap_to_H (cap_lift ccap)) + \ (c_valid_cap ccap)" + +lemma ccap_relation_c_valid_cap: "ccap_relation c c' \ c_valid_cap c'" + by (simp add: ccap_relation_def) + +context begin interpretation Arch . +fun + arch_fault_to_fault_tag :: "arch_fault \ machine_word" + where + "arch_fault_to_fault_tag (VMFault a b) = scast seL4_Fault_VMFault" +| "arch_fault_to_fault_tag (VCPUFault a) = scast seL4_Fault_VCPUFault" +| "arch_fault_to_fault_tag (VGICMaintenance a) = scast seL4_Fault_VGICMaintenance" +| "arch_fault_to_fault_tag (VPPIEvent a) = scast seL4_Fault_VPPIEvent" +end + +fun + fault_to_fault_tag :: "fault \ machine_word" +where + " fault_to_fault_tag (CapFault a b c) = scast seL4_Fault_CapFault" + | "fault_to_fault_tag (ArchFault f) = arch_fault_to_fault_tag f" + | "fault_to_fault_tag (UnknownSyscallException a) = scast seL4_Fault_UnknownSyscall" + | "fault_to_fault_tag (UserException a b) = scast seL4_Fault_UserException" + +lemmas seL4_Faults = seL4_Fault_UserException_def + seL4_Fault_UnknownSyscall_def + seL4_Fault_CapFault_def + +lemmas seL4_Arch_Faults = seL4_Fault_VMFault_def + seL4_Fault_VCPUFault_def + seL4_Fault_VGICMaintenance_def + seL4_Fault_VPPIEvent_def + +(* Return relations *) + +record errtype = + errfault :: "seL4_Fault_CL option" + errlookup_fault :: "lookup_fault_CL option" + errsyscall :: syscall_error_C + +primrec + lookup_failure_rel :: "lookup_failure \ machine_word \ errtype \ bool" +where + "lookup_failure_rel InvalidRoot fl es = (fl = scast EXCEPTION_LOOKUP_FAULT \ errlookup_fault es = Some Lookup_fault_invalid_root)" +| "lookup_failure_rel (GuardMismatch bl gf sz) fl es = (fl = scast EXCEPTION_LOOKUP_FAULT \ + (\lf. errlookup_fault es = Some (Lookup_fault_guard_mismatch lf) \ + guardFound_CL lf = gf \ unat (bitsLeft_CL lf) = bl \ unat (bitsFound_CL lf) = sz))" +| "lookup_failure_rel (DepthMismatch bl bf) fl es = (fl = scast EXCEPTION_LOOKUP_FAULT \ + (\lf. errlookup_fault es = Some (Lookup_fault_depth_mismatch lf) \ + unat (lookup_fault_depth_mismatch_CL.bitsLeft_CL lf) = bl + \ unat (lookup_fault_depth_mismatch_CL.bitsFound_CL lf) = bf))" +| "lookup_failure_rel (MissingCapability bl) fl es = (fl = scast EXCEPTION_LOOKUP_FAULT \ + (\lf. errlookup_fault es = Some (Lookup_fault_missing_capability lf) \ + unat (lookup_fault_missing_capability_CL.bitsLeft_CL lf) = bl))" + + +definition + syscall_error_to_H :: "syscall_error_C \ lookup_fault_CL option \ syscall_error option" +where + "syscall_error_to_H se lf \ + if syscall_error_C.type_C se = scast seL4_InvalidArgument + then Some (InvalidArgument (unat (invalidArgumentNumber_C se))) + else if syscall_error_C.type_C se = scast seL4_InvalidCapability + then Some (InvalidCapability (unat (invalidCapNumber_C se))) + else if syscall_error_C.type_C se = scast seL4_IllegalOperation then Some IllegalOperation + else if syscall_error_C.type_C se = scast seL4_RangeError + then Some (RangeError (rangeErrorMin_C se) (rangeErrorMax_C se)) + else if syscall_error_C.type_C se = scast seL4_AlignmentError then Some AlignmentError + else if syscall_error_C.type_C se = scast seL4_FailedLookup + then option_map (FailedLookup (to_bool (failedLookupWasSource_C se)) + o lookup_fault_to_H) lf + else if syscall_error_C.type_C se = scast seL4_TruncatedMessage then Some TruncatedMessage + else if syscall_error_C.type_C se = scast seL4_DeleteFirst then Some DeleteFirst + else if syscall_error_C.type_C se = scast seL4_RevokeFirst then Some RevokeFirst + else if syscall_error_C.type_C se = scast seL4_NotEnoughMemory then Some (NotEnoughMemory (memoryLeft_C se)) + else None" + +lemmas syscall_error_type_defs + = seL4_AlignmentError_def seL4_DeleteFirst_def seL4_FailedLookup_def + seL4_IllegalOperation_def seL4_InvalidArgument_def seL4_InvalidCapability_def + seL4_NotEnoughMemory_def seL4_RangeError_def seL4_RevokeFirst_def + seL4_TruncatedMessage_def + +lemma + syscall_error_to_H_cases: + "syscall_error_C.type_C se = scast seL4_InvalidArgument + \ syscall_error_to_H se lf = Some (InvalidArgument (unat (invalidArgumentNumber_C se)))" + "syscall_error_C.type_C se = scast seL4_InvalidCapability + \ syscall_error_to_H se lf = Some (InvalidCapability (unat (invalidCapNumber_C se)))" + "syscall_error_C.type_C se = scast seL4_IllegalOperation + \ syscall_error_to_H se lf = Some IllegalOperation" + "syscall_error_C.type_C se = scast seL4_RangeError + \ syscall_error_to_H se lf = Some (RangeError (rangeErrorMin_C se) (rangeErrorMax_C se))" + "syscall_error_C.type_C se = scast seL4_AlignmentError + \ syscall_error_to_H se lf = Some AlignmentError" + "syscall_error_C.type_C se = scast seL4_FailedLookup + \ syscall_error_to_H se lf = option_map (FailedLookup (to_bool (failedLookupWasSource_C se)) + o lookup_fault_to_H) lf" + "syscall_error_C.type_C se = scast seL4_TruncatedMessage + \ syscall_error_to_H se lf = Some TruncatedMessage" + "syscall_error_C.type_C se = scast seL4_DeleteFirst + \ syscall_error_to_H se lf = Some DeleteFirst" + "syscall_error_C.type_C se = scast seL4_RevokeFirst + \ syscall_error_to_H se lf = Some RevokeFirst" + "syscall_error_C.type_C se = scast seL4_NotEnoughMemory + \ syscall_error_to_H se lf = Some (NotEnoughMemory (memoryLeft_C se))" + by (simp add: syscall_error_to_H_def syscall_error_type_defs)+ + +definition + syscall_error_rel :: "syscall_error \ machine_word \ errtype \ bool" where + "syscall_error_rel se fl es \ fl = scast EXCEPTION_SYSCALL_ERROR + \ syscall_error_to_H (errsyscall es) (errlookup_fault es) + = Some se" + +(* cap rights *) +definition + "cap_rights_to_H rs \ CapRights (to_bool (capAllowWrite_CL rs)) + (to_bool (capAllowRead_CL rs)) + (to_bool (capAllowGrant_CL rs)) + (to_bool (capAllowGrantReply_CL rs))" + +definition + "ccap_rights_relation cr cr' \ cr = cap_rights_to_H (seL4_CapRights_lift cr')" + +definition + syscall_from_H :: "syscall \ machine_word" +where + "syscall_from_H c \ case c of + SysSend \ scast Kernel_C.SysSend + | SysNBSend \ scast Kernel_C.SysNBSend + | SysCall \ scast Kernel_C.SysCall + | SysRecv \ scast Kernel_C.SysRecv + | SysReply \ scast Kernel_C.SysReply + | SysReplyRecv \ scast Kernel_C.SysReplyRecv + | SysNBRecv \ scast Kernel_C.SysNBRecv + | SysYield \ scast Kernel_C.SysYield" + +context kernel +begin + +lemma syscall_error_to_H_cases_rev: + "\n. syscall_error_to_H e lf = Some (InvalidArgument n) \ + syscall_error_C.type_C e = scast seL4_InvalidArgument" + "\n. syscall_error_to_H e lf = Some (InvalidCapability n) \ + syscall_error_C.type_C e = scast seL4_InvalidCapability" + "syscall_error_to_H e lf = Some IllegalOperation \ + syscall_error_C.type_C e = scast seL4_IllegalOperation" + "\w1 w2. syscall_error_to_H e lf = Some (RangeError w1 w2) \ + syscall_error_C.type_C e = scast seL4_RangeError" + "syscall_error_to_H e lf = Some AlignmentError \ + syscall_error_C.type_C e = scast seL4_AlignmentError" + "\b lf'. syscall_error_to_H e lf = Some (FailedLookup b lf') \ + syscall_error_C.type_C e = scast seL4_FailedLookup" + "syscall_error_to_H e lf = Some TruncatedMessage \ + syscall_error_C.type_C e = scast seL4_TruncatedMessage" + "syscall_error_to_H e lf = Some DeleteFirst \ + syscall_error_C.type_C e = scast seL4_DeleteFirst" + "syscall_error_to_H e lf = Some RevokeFirst \ + syscall_error_C.type_C e = scast seL4_RevokeFirst" + by (clarsimp simp: syscall_error_to_H_def syscall_error_type_defs + split: if_split_asm)+ + +lemma cmap_relation_cs_atD: + "\ cmap_relation as cs addr_fun rel; cs (addr_fun x) = Some y; inj addr_fun \ \ + \ko. as x = Some ko \ rel ko y" + apply (clarsimp simp: cmap_relation_def) + apply (subgoal_tac "x \ dom as") + apply (drule (1) bspec) + apply (clarsimp simp: dom_def) + apply (subgoal_tac "addr_fun x \ addr_fun ` dom as") + prefer 2 + apply fastforce + apply (erule imageE) + apply (drule (1) injD) + apply simp + done + +definition + rf_sr :: "(KernelStateData_H.kernel_state \ cstate) set" + where + "rf_sr \ {(s, s'). cstate_relation s (globals s')}" + +end + +end diff --git a/proof/crefine/AARCH64/StoreWord_C.thy b/proof/crefine/AARCH64/StoreWord_C.thy new file mode 100644 index 0000000000..305001dc68 --- /dev/null +++ b/proof/crefine/AARCH64/StoreWord_C.thy @@ -0,0 +1,1160 @@ +(* + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory StoreWord_C +imports VSpace_C +begin + +context kernel_m +begin + +lemma in_doMachineOp: + "(a, s) \ fst (doMachineOp f s') = (\b. (a, b) \ fst (f (ksMachineState s')) \ s = s'\ksMachineState := b\)" + unfolding doMachineOp_def + by (simp add: in_monad select_f_def) + +lemma dom_heap_to_user_data: + "dom (heap_to_user_data hp uhp) = dom (map_to_user_data hp)" + unfolding heap_to_user_data_def by (simp add: Let_def dom_def) + +lemma dom_heap_to_device_data: + "dom (heap_to_device_data hp uhp) = dom (map_to_user_data_device hp)" + unfolding heap_to_device_data_def by (simp add: Let_def dom_def) + +lemma projectKO_opt_retyp_same: + assumes pko: "projectKO_opt ko = Some v" + shows "projectKO_opt \\<^sub>m + (\x. if x \ set (new_cap_addrs sz ptr ko) then Some ko else ksPSpace \ x) + = + (\x. if x \ set (new_cap_addrs sz ptr ko) then Some v else (projectKO_opt \\<^sub>m (ksPSpace \)) x)" + (is "?LHS = ?RHS") +proof (rule ext) + fix x + + show "?LHS x = ?RHS x" + proof (cases "x \ set (new_cap_addrs sz ptr ko)") + case True + thus ?thesis using pko by simp + next + case False + thus ?thesis by (simp add: map_comp_def) + qed +qed + + +lemma byte_to_word_heap_upd_outside_range: + "p \ {(base + ucast off * 8)..+8} \ + byte_to_word_heap (m (p := v')) base off = byte_to_word_heap m base off" + apply (simp add: byte_to_word_heap_def Let_def) + apply (erule contrapos_np) + by (clarsimp intro!: intvl_inter_le [where k=0 and ka=7, simplified, OF refl] + intvl_inter_le [where k=0 and ka=6, simplified, OF refl] + intvl_inter_le [where k=0 and ka=5, simplified, OF refl] + intvl_inter_le [where k=0 and ka=4, simplified, OF refl] + intvl_inter_le [where k=0 and ka=3, simplified, OF refl] + intvl_inter_le [where k=0 and ka=2, simplified, OF refl] + intvl_inter_le [where k=0 and ka=1, simplified, OF refl] + intvl_inter_le [where k=0 and ka=0, simplified, OF refl] + split: if_split_asm) (* long, many if_asm splits *) + +lemma intvl_range_conv: + "\ is_aligned (ptr :: 'a :: len word) bits; bits < len_of TYPE('a) \ \ + {ptr ..+ 2 ^ bits} = {ptr .. ptr + 2 ^ bits - 1}" + by (rule upto_intvl_eq) + +lemma byte_to_word_heap_upd_neq: + assumes alb: "is_aligned base 3" + and alp: "is_aligned p 3" + and neq: "base + ucast off * 8 \ p" + and word_byte: "n < 8" + shows "byte_to_word_heap (m (p + n := v')) base off = byte_to_word_heap m base off" +proof - + from alb have alw: "is_aligned (base + ucast off * 8) 3" + by (fastforce elim: aligned_add_aligned + intro: is_aligned_mult_triv2 [where n=3, simplified] + simp: word_bits_def) + + from alp have p_intvl: "p + n \ {p .. p + 7}" + apply (clarsimp simp: word_byte) + apply (rule conjI) + apply (fastforce elim: is_aligned_no_wrap' simp: word_byte) + apply (subst word_plus_mono_right) + apply (clarsimp simp: word_byte word_le_make_less) + apply (simp add: word_bits_def is_aligned_no_overflow'[OF alp, simplified]) + apply simp + done + + hence not_in_range: "p + n \ {(base + ucast off * 8)..+8}" + apply (subst intvl_range_conv [OF alw, simplified]) + apply (simp add: word_bits_def) + apply (cut_tac aligned_neq_into_no_overlap [OF neq alw alp]) + apply (auto simp: field_simps range_inter)[1] + done + + thus ?thesis + (* when this is "by ..", it waits for byte_to_word_heap_upd_outside_range to complete *) + apply (rule byte_to_word_heap_upd_outside_range) + done +qed + +lemma update_ti_t_acc_foo: + "\acc f v. \ \a ys v. \ a \ set adjs; length ys = size_td_pair a \ + \ acc (update_ti_pair a ys v) = update_ti_pair (f a) ys (acc v); + \a. size_td_pair (f a) = size_td_pair a \ \ + \xs. acc (update_ti_list_t adjs xs v) = update_ti_list_t (map f adjs) xs (acc v)" + apply (simp add: update_ti_list_t_def size_td_list_map2 split: if_split) + apply (induct adjs) + apply simp + apply clarsimp + done + +lemma nat_less_8_cases: + "i < (8::nat) ==> i = 0 | i = 1 | i = 2 | i = 3 | i = 4 | i = 5 | i = 6 | i = 7" + by auto + +lemma user_data_relation_upd: + assumes al: "is_aligned ptr 3" + shows "cuser_user_data_relation + (byte_to_word_heap + (underlying_memory (ksMachineState \)) (ptr && ~~ mask pageBits)) + (the (cslift s (Ptr (ptr && ~~ mask pageBits)))) \ + cuser_user_data_relation + (byte_to_word_heap + ((underlying_memory (ksMachineState \)) + (ptr := word_rsplit w ! 7, ptr + 1 := word_rsplit w ! 6, + ptr + 2 := word_rsplit w ! 5, ptr + 3 := word_rsplit w ! 4, + ptr + 4 := word_rsplit w ! 3, ptr + 5 := word_rsplit w ! 2, + ptr + 6 := word_rsplit w ! Suc 0, ptr + 7 := word_rsplit w ! 0)) + (ptr && ~~ mask pageBits)) + (user_data_C.words_C_update + (\ws. Arrays.update ws (unat (ucast ((ptr && mask pageBits) >> 3):: 9 word)) w) + (the (cslift s (Ptr (ptr && ~~ mask pageBits)))))" + unfolding cuser_user_data_relation_def + apply - + apply (erule allEI) + apply (case_tac "off = ucast ((ptr && mask pageBits) >> 3)") + apply (clarsimp simp: mask_pageBits_inner_beauty [OF al] byte_to_word_heap_def) + apply (subst index_update) + apply (simp, unat_arith, simp) + apply (subgoal_tac "map ((!) (word_rsplit w)) [0,1,2,3,4,5,6,7] + = (word_rsplit w :: word8 list)") + apply (clarsimp simp: word_rcat_rsplit) + apply (cut_tac w=w and m=8 and 'a=8 + in length_word_rsplit_even_size [OF refl]) + apply (simp add: word_size) + apply (rule nth_equalityI[symmetric]) + apply simp + apply (subgoal_tac "[0,1,2,3,4,5,6,7] = [0..<8]") + apply clarsimp + apply (rule nth_equalityI[symmetric]) + apply simp + apply (auto dest: nat_less_8_cases)[1] + apply (frule more_pageBits_inner_beauty) + apply (simp add: byte_to_word_heap_upd_neq aligned_already_mask al + byte_to_word_heap_upd_neq [where n=0, simplified]) + apply (subst index_update2) + apply (cut_tac x=off in unat_lt2p, simp) + apply simp + apply simp + done + +(* This lemma is true for trivial reason. + But it might become non-trivial if we change our way of modeling device memory *) +lemma user_data_device_relation_upd: + assumes al: "is_aligned ptr 3" + shows "cuser_user_data_device_relation + (byte_to_word_heap + (underlying_memory (ksMachineState \)) (ptr && ~~ mask pageBits)) + (the (cslift s (Ptr (ptr && ~~ mask pageBits)))) \ + cuser_user_data_device_relation + (byte_to_word_heap + ((underlying_memory (ksMachineState \)) + (ptr := word_rsplit w ! 7, ptr + 1 := word_rsplit w ! 6, + ptr + 2 := word_rsplit w ! 5, ptr + 3 := word_rsplit w ! 4, + ptr + 4 := word_rsplit w ! 3, ptr + 5 := word_rsplit w ! 2, + ptr + 6 := word_rsplit w ! Suc 0, ptr + 7 := word_rsplit w ! 0)) + (ptr && ~~ mask pageBits)) + (user_data_device_C.words_C_update + (\ws. Arrays.update ws (unat (ucast ((ptr && mask pageBits) >> 3):: 9 word)) w) + (the (cslift s (Ptr (ptr && ~~ mask pageBits)))))" + by (simp add:cuser_user_data_device_relation_def ) + +lemma deviceDataSeperate: + "\\ pointerInDeviceData ptr \; pspace_distinct' \; pspace_aligned' \; ksPSpace \ x = Some KOUserDataDevice\ + \ ptr \ x" + apply (rule ccontr,clarsimp) + apply (frule(1) pspace_alignedD') + apply (clarsimp simp: pointerInDeviceData_def objBits_simps typ_at'_def ko_wp_at'_def) + apply (frule(1) pspace_distinctD') + apply (clarsimp simp: objBits_simps) + done + +lemma userDataSeperate: + "\\ pointerInUserData ptr \; pspace_distinct' \; pspace_aligned' \; ksPSpace \ x = Some KOUserData\ + \ ptr \ x" + apply (rule ccontr,clarsimp) + apply (frule(1) pspace_alignedD') + apply (clarsimp simp: pointerInUserData_def objBits_simps typ_at'_def ko_wp_at'_def) + apply (frule(1) pspace_distinctD') + apply (clarsimp simp: objBits_simps) + done + +lemma pointerInUserData_whole_word[simp]: + "\is_aligned ptr 3; n < 8\ \ pointerInUserData (ptr + n) \ = pointerInUserData ptr \" + apply (simp add:pointerInUserData_def pageBits_def) + apply (subst and_not_mask_twice[symmetric,where m = 12 and n =3,simplified]) + apply (simp add: neg_mask_add_aligned[where n=3,simplified]) + done + +lemma pointerInDeviceData_whole_word[simp]: + "\is_aligned ptr 3; n < 8\ \ pointerInDeviceData (ptr + n) \ = pointerInDeviceData ptr \" + apply (simp add:pointerInDeviceData_def pageBits_def) + apply (subst and_not_mask_twice[symmetric,where m = 12 and n =3,simplified]) + apply (simp add: neg_mask_add_aligned[where n=3,simplified]) + done + +lemma du_ptr_disjoint: + "pointerInDeviceData ptr \ \ \ pointerInUserData ptr \" + "pointerInUserData ptr \ \ \ pointerInDeviceData ptr \" + by (auto simp: pointerInDeviceData_def pointerInUserData_def typ_at'_def ko_wp_at'_def) + +lemma heap_to_device_data_seperate: + "\ \ pointerInDeviceData ptr \; pspace_distinct' \; pspace_aligned' \\ + \ heap_to_device_data (ksPSpace \) (fun_upd ms ptr a) x + = heap_to_device_data (ksPSpace \) ms x" + apply (simp add : heap_to_device_data_def) + apply (case_tac "map_to_user_data_device (ksPSpace \) x") + apply simp + apply simp + apply (clarsimp simp add: projectKO_opt_user_data_device map_comp_def + split: option.split_asm kernel_object.splits) + apply (frule deviceDataSeperate) + apply simp+ + apply (frule(1) pspace_alignedD') + apply (simp add: objBits_simps) + apply (rule ext) + apply (subst AND_NOT_mask_plus_AND_mask_eq[symmetric,where n =3]) + apply (subst byte_to_word_heap_upd_neq[where n = "ptr && mask 3",simplified]) + apply (erule is_aligned_weaken,simp add:pageBits_def) + apply simp+ + apply (clarsimp simp: pointerInDeviceData_def pageBits_def) + apply (subst(asm) and_not_mask_twice[symmetric,where m = 12 and n =3,simplified]) + apply (drule sym[where t=" ptr && ~~ mask 3"]) + apply simp + apply (subst(asm) neg_mask_add_aligned,assumption) + apply (rule word_less_power_trans2[where k = 3,simplified]) + apply (simp add: pageBits_def) + apply (rule less_le_trans[OF ucast_less],simp+) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def pageBits_def objBits_simps + dest!: pspace_distinctD') + apply (rule word_and_less') + apply (simp add:mask_def) + apply simp + done + +lemma heap_to_user_data_seperate: + "\ \ pointerInUserData ptr \; pspace_distinct' \; pspace_aligned' \\ + \ heap_to_user_data (ksPSpace \) (fun_upd ms ptr a) x + = heap_to_user_data (ksPSpace \) ms x" + apply (simp add : heap_to_user_data_def) + apply (case_tac "map_to_user_data (ksPSpace \) x") + apply simp + apply simp + apply (clarsimp simp add: projectKO_opt_user_data map_comp_def + split: option.split_asm kernel_object.splits) + apply (frule userDataSeperate) + apply simp+ + apply (frule(1) pspace_alignedD') + apply (simp add:objBits_simps) + apply (rule ext) + apply (subst AND_NOT_mask_plus_AND_mask_eq[symmetric,where n =3]) + apply (subst byte_to_word_heap_upd_neq[where n = "ptr && mask 3",simplified]) + apply (erule is_aligned_weaken, simp add: pageBits_def) + apply simp+ + apply (clarsimp simp: pointerInUserData_def pageBits_def) + apply (subst(asm) and_not_mask_twice[symmetric,where m = 12 and n =3,simplified]) + apply (drule sym[where t=" ptr && ~~ mask 3"]) + apply simp + apply (subst(asm) neg_mask_add_aligned,assumption) + apply (rule word_less_power_trans2[where k = 3,simplified]) + apply (simp add: pageBits_def) + apply (rule less_le_trans[OF ucast_less],simp+) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def pageBits_def objBits_simps + dest!: pspace_distinctD') + apply (rule word_and_less') + apply (simp add:mask_def) + apply simp + done + +lemma storeWordUser_rf_sr_upd': + shows "\\ s. + (\, s) \ rf_sr \ pspace_aligned' \ \ pspace_distinct' \ + \ pointerInUserData ptr \ \ is_aligned ptr 3 \ + (\\ksMachineState := underlying_memory_update (\m. + m(ptr := word_rsplit (w::machine_word) ! 7, ptr + 1 := word_rsplit w ! 6, + ptr + 2 := word_rsplit w ! 5, ptr + 3 := word_rsplit w ! 4, + ptr + 4 := word_rsplit (w::machine_word) ! 3, ptr + 5 := word_rsplit w ! 2, + ptr + 6 := word_rsplit w ! 1, ptr + 7 := word_rsplit w ! 0)) + (ksMachineState \)\, + s\globals := globals s\t_hrs_' := hrs_mem_update (heap_update (Ptr ptr) w) (t_hrs_' (globals s))\\) \ rf_sr" + (is "\\ s. ?P \ s \ + (\\ksMachineState := ?ms \\, + s\globals := globals s\t_hrs_' := ?ks' s\\) \ rf_sr") +proof (intro allI impI) + fix \ s + let ?thesis = "(\\ksMachineState := ?ms \\, s\globals := globals s\t_hrs_' := ?ks' s\\) \ rf_sr" + let ?ms = "?ms \" + let ?ks' = "?ks' s" + let ?ptr = "Ptr ptr :: machine_word ptr" + let ?hp = "t_hrs_' (globals s)" + + assume "?P \ s" + hence rf: "(\, s) \ rf_sr" and al: "is_aligned ptr 3" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and piud: "pointerInUserData ptr \" + by simp_all + + define offset where "offset \ ucast ((ptr && mask pageBits) >> 3) :: 9 word" + define base where "base \ Ptr (ptr && ~~ mask pageBits) :: user_data_C ptr" + + from piud + obtain old_w where + old_w: "heap_to_user_data (ksPSpace \) (underlying_memory (ksMachineState \)) (ptr_val base) = Some old_w" + apply (clarsimp simp: heap_to_user_data_def pointerInUserData_def Let_def) + apply (drule user_data_at_ko) + apply (drule ko_at_projectKO_opt) + apply (simp add: base_def) + done + + from rf + obtain page :: user_data_C + where page: "cslift s base = Some page" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply (erule cmap_relationE1, rule old_w) + apply simp + done + + from page + have page_def: "page = the (cslift s base)" by simp + + have size_td_list_map[rule_format, OF order_refl]: + "\f xs v S. set xs \ S \ (\x. x \ S \ size_td_pair (f x) = v) + \ size_td_list (map f xs) = v * length xs" + apply (induct_tac xs) + apply simp_all + done + + have user_data_upd: + "\A f v. heap_update base (user_data_C.words_C_update f v) = + heap_update (ptr_coerce base) (f (user_data_C.words_C v))" + apply (rule ext) + apply (simp add: heap_update_def to_bytes_def) + apply (simp add: user_data_C_typ_tag user_data_C_tag_def) + apply (simp add: final_pad_def Let_def) + apply (simp add: align_td_array' cong: if_cong) + apply (simp add: ti_typ_pad_combine_def Let_def ti_typ_combine_def adjust_ti_def empty_typ_info_def size_td_array cong: if_cong) + apply (simp add: padup_def) + apply (simp add: typ_info_array') + apply (simp add: size_of_def size_td_list_map) + done + + have ud_split: "\x z. user_data_C.words_C_update (\_. x) z = user_data_C x" + by (case_tac z, simp) + + have map_td_list_map: + "\f xs. map_td_list f xs = map (map_td_pair f) xs" + by (induct_tac xs, simp_all) + + have update_ti_t_Cons_foo: + "\Cons upd adjs f v v'. \ v = Cons v'; \a ys v. length ys = size_td_pair a + \ update_ti_pair (map_td_pair f a) ys (Cons v) = Cons (update_ti_pair a ys v) \ + \ \xs. update_ti_list_t (map_td_list f adjs) xs v + = Cons (update_ti_list_t adjs xs v')" + apply (simp add: update_ti_list_t_def split: if_split) + apply (induct_tac adjs) + apply simp + apply clarsimp + done + + note if_cong[cong] + have hval: + "\hp. h_val hp base = user_data_C (h_val hp (ptr_coerce base))" + apply (simp add: h_val_def base_def from_bytes_def) + apply (simp add: user_data_C_typ_tag user_data_C_tag_def) + apply (simp add: final_pad_def Let_def) + apply (simp add: align_td_array' cong: if_cong) + apply (simp add: ti_typ_pad_combine_def Let_def ti_typ_combine_def adjust_ti_def empty_typ_info_def size_td_array) + apply (simp add: padup_def size_of_def typ_info_array' size_td_list_map) + apply (simp add: map_td_list_map) + apply (rule injD[where f=user_data_C.words_C]) + apply (rule injI) + apply (case_tac x, case_tac y, simp) + apply (simp add: map_td_list_map del: map_map) + apply (rule trans, rule_tac acc=user_data_C.words_C + and f="map_td_pair (K (K (update_desc user_data_C (\a b. user_data_C.words_C a))))" + in update_ti_t_acc_foo[rule_format]) + apply (clarsimp simp: map_td_list_map typ_info_word + adjust_ti_def update_desc_def) + apply simp + apply simp + apply (simp add: update_ti_list_array'[where g="\n. typ_info_t TYPE(machine_word)", OF refl] + typ_info_word adjust_ti_def update_desc_def) + apply (rule Arrays.cart_eq[THEN iffD2], clarsimp) + apply (subst index_fold_update | clarsimp)+ + apply (subst if_P, arith)+ + apply simp + done + + from and_mask_less_size [of pageBits ptr] + have ptr_mask_less: "ptr && mask pageBits >> 3 < 2^9" + apply - + apply (rule shiftr_less_t2n) + apply (simp add: pageBits_def word_size) + done + hence uoffset: + "unat offset = unat (ptr && mask pageBits >> 3)" + apply (simp add: offset_def) + apply (simp add: unat_ucast) + apply (simp add: word_less_nat_alt) + done + + have heap_upd: + "heap_update ?ptr w = + (\hp. heap_update base (user_data_C.words_C_update (\ws. Arrays.update ws (unat offset) w) (h_val hp base)) hp)" + apply (rule ext) + apply (subst user_data_upd) + apply (subst hval) + apply (unfold base_def uoffset) + apply simp + apply (subst heap_update_Array_element) + apply (insert ptr_mask_less)[1] + apply (simp add: word_less_nat_alt) + apply (simp add: ptr_add_def word_shift_by_3 shiftr_shiftl1) + apply (simp add: al is_aligned_andI1) + apply (simp add: word_plus_and_or_coroll2 add.commute) + done + + have x': "\x::9 word. (ucast x * 8::machine_word) && ~~ mask pageBits = 0" + proof - + fix x::"9 word" + have "ucast x * 8 = (ucast x << 3 :: machine_word)" + by (simp add: shiftl_t2n) + thus "?thesis x" + apply simp + apply (rule word_eqI) + apply (clarsimp simp: word_size nth_shiftl word_ops_nth_size nth_ucast) + apply (drule test_bit_size) + apply (clarsimp simp: word_size pageBits_def) + apply arith + done + qed + + have x: "\(x::machine_word) (y::9 word). + is_aligned x pageBits \ x + ucast y * 8 && ~~ mask pageBits = x" + apply (subst mask_out_add_aligned [symmetric], assumption) + apply (clarsimp simp: x') + done + + from piud al + have relrl: "cmap_relation (heap_to_user_data (ksPSpace \) + (underlying_memory (ksMachineState \))) + (cslift s) Ptr cuser_user_data_relation + \ cmap_relation + (heap_to_user_data (ksPSpace \) + ((underlying_memory (ksMachineState \))( + ptr := word_rsplit (w::machine_word) ! 7, ptr + 1 := word_rsplit w ! 6, + ptr + 2 := word_rsplit w ! 5, ptr + 3 := word_rsplit w ! 4, + ptr + 4 := word_rsplit (w::machine_word) ! 3, ptr + 5 := word_rsplit w ! 2, + ptr + 6 := word_rsplit w ! 1, ptr + 7 := word_rsplit w ! 0))) + (\y. if ptr_val y = (ptr_val ?ptr) && ~~ mask pageBits + then Some (user_data_C.words_C_update + (\ws. Arrays.update ws (unat (ucast ((ptr && mask pageBits) >> 3) :: 9 word)) w) + (the (cslift s y))) + else cslift s y) + Ptr cuser_user_data_relation" + apply - + apply (rule cmap_relationI) + apply (clarsimp simp: dom_heap_to_user_data cmap_relation_def dom_if_Some + intro!: Un_absorb1 [symmetric]) + apply (clarsimp simp: pointerInUserData_def) + apply (drule user_data_at_ko) + apply (drule ko_at_projectKO_opt) + apply (case_tac x) + apply clarsimp + apply fastforce + apply clarsimp + apply (case_tac "x = ptr && ~~ mask pageBits") + apply (fastforce simp: heap_to_user_data_def Let_def user_data_relation_upd cmap_relation_def + dest: bspec) + apply clarsimp + apply (subgoal_tac "Some v = heap_to_user_data (ksPSpace \) + (underlying_memory (ksMachineState \)) x") + apply (clarsimp simp: heap_to_user_data_def Let_def map_option_case + split: option.split_asm) + apply (fastforce simp: cmap_relation_def dest: bspec) + apply (clarsimp simp: heap_to_user_data_def Let_def) + apply (frule (1) cmap_relation_cs_atD) + apply simp + apply clarsimp + apply (drule map_to_ko_atI) + apply (rule pal) + apply (rule pdst) + apply (subgoal_tac "is_aligned x pageBits") + prefer 2 + apply (clarsimp simp: obj_at'_def objBits_simps simp: projectKOs) + apply (subgoal_tac "is_aligned x 3") + prefer 2 + apply (erule is_aligned_weaken) + apply (simp add: pageBits_def) + apply (rule ext) + apply (subst byte_to_word_heap_upd_neq, assumption+, clarsimp simp: x, simp)+ + apply (subst byte_to_word_heap_upd_neq [where n=0, simplified], assumption+) + apply (clarsimp simp: x) + apply simp + done + + have hrs_mem: + "\f hp'. + hrs_mem_update (\hp. heap_update base (f (h_val hp base)) hp) hp' + = hrs_mem_update (heap_update base (f (h_val (hrs_mem hp') base))) hp'" + by (simp add: hrs_mem_update_def split_def hrs_mem_def) + + from page + have rl': "typ_uinfo_t TYPE(user_data_C) \\<^sub>t typ_uinfo_t TYPE('t :: mem_type) \ + (clift (hrs_mem_update (heap_update ?ptr w) (t_hrs_' (globals s))) :: ('t :: mem_type) typ_heap) + = cslift s" + apply (subst heap_upd) + apply (subst hrs_mem) + apply (simp add: typ_heap_simps clift_heap_update_same) + done + + have subset: "{ptr..+ 2 ^ 3} \ {ptr && ~~ mask pageBits ..+ 2 ^ pageBits}" + apply (simp only: upto_intvl_eq al is_aligned_neg_mask2) + apply (cut_tac ptr="ptr && ~~ mask pageBits" and x="ptr && mask pageBits" + in aligned_range_offset_subset, rule is_aligned_neg_mask2) + apply (rule is_aligned_andI1[OF al]) + apply (simp add: pageBits_def) + apply (rule and_mask_less', simp add: pageBits_def) + apply (erule order_trans[rotated]) + apply (simp add: mask_out_sub_mask) + done + + hence zr: "\rs. zero_ranges_are_zero rs (hrs_mem_update (heap_update ?ptr w) (t_hrs_' (globals s))) + = zero_ranges_are_zero rs (t_hrs_' (globals s))" + using page + apply (clarsimp simp: zero_ranges_are_zero_def hrs_mem_update base_def + heap_update_def + intro!: ball_cong[OF refl] conj_cong[OF refl]) + apply (drule region_actually_is_bytes) + apply (frule(1) region_is_bytes_disjoint[rotated 2, OF h_t_valid_clift]) + apply simp + apply (subst heap_list_update_disjoint_same, simp_all) + apply ((subst Int_commute)?, erule disjoint_subset2[rotated]) + apply (simp add: pageBits_def) + done + + have cmap_relation_heap_cong: + "\as cs cs' f rel. \ cmap_relation as cs f rel; cs = cs' \ \ cmap_relation as cs' f rel" + by simp + + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals s))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation (ksPSpace \) (underlying_memory ?ms) ?ks'" + unfolding cpspace_relation_def using page + apply - + apply (clarsimp simp: rl' tag_disj_via_td_name) + apply (drule relrl) + apply (simp add: heap_upd) + apply (subst hrs_mem) + apply (simp add: base_def offset_def) + apply (rule conjI) + apply (erule cmap_relation_heap_cong) + apply (simp add: typ_heap_simps') + apply (rule ext) + apply clarsimp + apply (case_tac y) + apply (clarsimp split: if_split) + apply (rule cmap_relationI) + apply (clarsimp simp: dom_heap_to_device_data cmap_relation_def dom_if_Some + intro!: Un_absorb1 [symmetric]) + using pal + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_device_data_seperate) + apply (simp add:piud al du_ptr_disjoint pal pdst)+ + apply (erule cmap_relation_relI[where rel = cuser_user_data_device_relation]) + apply simp+ + done + + thus ?thesis using rf + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name) + apply (simp add: carch_state_relation_def + cmachine_state_relation_def carch_globals_def) + apply (simp add: rl' tag_disj_via_td_name zr) + done +qed + + +lemma storeWordDevice_rf_sr_upd': + shows "\\ s. + (\, s) \ rf_sr \ pspace_aligned' \ \ pspace_distinct' \ + \ pointerInDeviceData ptr \ \ is_aligned ptr 3 \ + (\\ksMachineState := underlying_memory_update (\m. + m(ptr := word_rsplit (w::machine_word) ! 7, ptr + 1 := word_rsplit w ! 6, + ptr + 2 := word_rsplit w ! 5, ptr + 3 := word_rsplit w ! 4, + ptr + 4 := word_rsplit (w::machine_word) ! 3, ptr + 5 := word_rsplit w ! 2, + ptr + 6 := word_rsplit w ! 1, ptr + 7 := word_rsplit w ! 0)) + (ksMachineState \)\, + s\globals := globals s\t_hrs_' := hrs_mem_update (heap_update (Ptr ptr) w) (t_hrs_' (globals s))\\) \ rf_sr" + (is "\\ s. ?P \ s \ + (\\ksMachineState := ?ms \\, + s\globals := globals s\t_hrs_' := ?ks' s\\) \ rf_sr") +proof (intro allI impI) + fix \ s + let ?thesis = "(\\ksMachineState := ?ms \\, s\globals := globals s\t_hrs_' := ?ks' s\\) \ rf_sr" + let ?ms = "?ms \" + let ?ks' = "?ks' s" + let ?ptr = "Ptr ptr :: machine_word ptr" + let ?hp = "t_hrs_' (globals s)" + + assume "?P \ s" + hence rf: "(\, s) \ rf_sr" and al: "is_aligned ptr 3" + and pal: "pspace_aligned' \" and pdst: "pspace_distinct' \" + and piud: "pointerInDeviceData ptr \" + by simp_all + + define offset where "offset \ ucast ((ptr && mask pageBits) >> 3) :: 9 word" + define base where "base \ Ptr (ptr && ~~ mask pageBits) :: user_data_device_C ptr" + + from piud + obtain old_w where + old_w: "heap_to_device_data (ksPSpace \) (underlying_memory (ksMachineState \)) (ptr_val base) = Some old_w" + apply (clarsimp simp: heap_to_device_data_def pointerInDeviceData_def Let_def) + apply (drule device_data_at_ko) + apply (drule ko_at_projectKO_opt) + apply (simp add: base_def) + done + + from rf + obtain page :: user_data_device_C + where page: "cslift s base = Some page" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply (erule cmap_relationE1, rule old_w) + apply simp + done + + from page + have page_def: "page = the (cslift s base)" by simp + + have size_td_list_map[rule_format, OF order_refl]: + "\f xs v S. set xs \ S \ (\x. x \ S \ size_td_pair (f x) = v) + \ size_td_list (map f xs) = v * length xs" + apply (induct_tac xs) + apply simp_all + done + + have user_data_upd: + "\A f v. heap_update base (user_data_device_C.words_C_update f v) = + heap_update (ptr_coerce base) (f (user_data_device_C.words_C v))" + apply (rule ext) + apply (simp add: heap_update_def to_bytes_def) + apply (simp add: user_data_device_C_typ_tag user_data_device_C_tag_def) + apply (simp add: final_pad_def Let_def) + apply (simp add: align_td_array' cong: if_cong) + apply (simp add: ti_typ_pad_combine_def Let_def ti_typ_combine_def adjust_ti_def empty_typ_info_def size_td_array cong: if_cong) + apply (simp add: padup_def) + apply (simp add: typ_info_array') + apply (simp add: size_of_def size_td_list_map) + done + + have ud_split: "\x z. user_data_device_C.words_C_update (\_. x) z = user_data_device_C x" + by (case_tac z, simp) + + have map_td_list_map: + "\f xs. map_td_list f xs = map (map_td_pair f) xs" + by (induct_tac xs, simp_all) + + have update_ti_t_Cons_foo: + "\Cons upd adjs f v v'. \ v = Cons v'; \a ys v. length ys = size_td_pair a + \ update_ti_pair (map_td_pair f a) ys (Cons v) = Cons (update_ti_pair a ys v) \ + \ \xs. update_ti_list_t (map_td_list f adjs) xs v + = Cons (update_ti_list_t adjs xs v')" + apply (simp add: update_ti_list_t_def split: if_split) + apply (induct_tac adjs) + apply simp + apply clarsimp + done + + note if_cong[cong] + have hval: + "\hp. h_val hp base = user_data_device_C (h_val hp (ptr_coerce base))" + apply (simp add: h_val_def base_def from_bytes_def) + apply (simp add: user_data_device_C_typ_tag user_data_device_C_tag_def) + apply (simp add: final_pad_def Let_def) + apply (simp add: align_td_array' cong: if_cong) + apply (simp add: ti_typ_pad_combine_def Let_def ti_typ_combine_def adjust_ti_def empty_typ_info_def size_td_array) + apply (simp add: padup_def size_of_def typ_info_array' size_td_list_map) + apply (simp add: map_td_list_map) + apply (rule injD[where f=user_data_device_C.words_C]) + apply (rule injI) + apply (case_tac x, case_tac y, simp) + apply (simp add: map_td_list_map del: map_map) + apply (rule trans, rule_tac acc=user_data_device_C.words_C + and f="map_td_pair (K (K (update_desc user_data_device_C (\a b. user_data_device_C.words_C a))))" + in update_ti_t_acc_foo[rule_format]) + apply (clarsimp simp: map_td_list_map typ_info_word + adjust_ti_def update_desc_def) + apply simp + apply simp + apply (simp add: update_ti_list_array'[where g="\n. typ_info_t TYPE(machine_word)", OF refl] + typ_info_word adjust_ti_def update_desc_def) + apply (rule Arrays.cart_eq[THEN iffD2], clarsimp) + apply (subst index_fold_update | clarsimp)+ + apply (subst if_P, arith)+ + apply simp + done + + from and_mask_less_size [of pageBits ptr] + have ptr_mask_less: "ptr && mask pageBits >> 3 < 2^9" + apply - + apply (rule shiftr_less_t2n) + apply (simp add: pageBits_def word_size) + done + hence uoffset: + "unat offset = unat (ptr && mask pageBits >> 3)" + apply (simp add: offset_def) + apply (simp add: unat_ucast) + apply (simp add: word_less_nat_alt) + done + + have heap_upd: + "heap_update ?ptr w = + (\hp. heap_update base (user_data_device_C.words_C_update (\ws. Arrays.update ws (unat offset) w) (h_val hp base)) hp)" + apply (rule ext) + apply (subst user_data_upd) + apply (subst hval) + apply (unfold base_def uoffset) + apply simp + apply (subst heap_update_Array_element) + apply (insert ptr_mask_less)[1] + apply (simp add: word_less_nat_alt) + apply (simp add: ptr_add_def word_shift_by_3 shiftr_shiftl1) + apply (simp add: al is_aligned_andI1) + apply (simp add: word_plus_and_or_coroll2 add.commute) + done + + have x': "\x::9 word. (ucast x * 8::machine_word) && ~~ mask pageBits = 0" + proof - + fix x::"9 word" + have "ucast x * 8 = (ucast x << 3 :: machine_word)" + by (simp add: shiftl_t2n) + thus "?thesis x" + apply simp + apply (rule word_eqI) + apply (clarsimp simp: word_size nth_shiftl word_ops_nth_size nth_ucast) + apply (drule test_bit_size) + apply (clarsimp simp: word_size pageBits_def) + apply arith + done + qed + + have x: "\(x::machine_word) (y::9 word). + is_aligned x pageBits \ x + ucast y * 8 && ~~ mask pageBits = x" + apply (subst mask_out_add_aligned [symmetric], assumption) + apply (clarsimp simp: x') + done + + from piud al + have relrl: "cmap_relation (heap_to_device_data (ksPSpace \) + (underlying_memory (ksMachineState \))) + (cslift s) Ptr cuser_user_data_device_relation + \ cmap_relation + (heap_to_device_data (ksPSpace \) + ((underlying_memory (ksMachineState \))( + ptr := word_rsplit (w::machine_word) ! 7, ptr + 1 := word_rsplit w ! 6, + ptr + 2 := word_rsplit w ! 5, ptr + 3 := word_rsplit w ! 4, + ptr + 4 := word_rsplit (w::machine_word) ! 3, ptr + 5 := word_rsplit w ! 2, + ptr + 6 := word_rsplit w ! 1, ptr + 7 := word_rsplit w ! 0))) + (\y. if ptr_val y = (ptr_val ?ptr) && ~~ mask pageBits + then Some (user_data_device_C.words_C_update + (\ws. Arrays.update ws (unat (ucast ((ptr && mask pageBits) >> 3) :: 9 word)) w) + (the (cslift s y))) + else cslift s y) + Ptr cuser_user_data_device_relation" + apply - + apply (rule cmap_relationI) + apply (clarsimp simp: dom_heap_to_device_data cmap_relation_def dom_if_Some + intro!: Un_absorb1 [symmetric]) + apply (clarsimp simp: pointerInDeviceData_def) + apply (drule device_data_at_ko) + apply (drule ko_at_projectKO_opt) + apply (case_tac x) + apply clarsimp + apply fastforce + apply clarsimp + apply (case_tac "x = ptr && ~~ mask pageBits") + apply (fastforce simp: heap_to_device_data_def Let_def user_data_device_relation_upd cmap_relation_def + dest: bspec) + apply clarsimp + apply (subgoal_tac "Some v = heap_to_device_data (ksPSpace \) + (underlying_memory (ksMachineState \)) x") + apply (clarsimp simp: heap_to_device_data_def Let_def map_option_case + split: option.split_asm) + apply (fastforce simp: cmap_relation_def dest: bspec) + apply (clarsimp simp: heap_to_device_data_def Let_def) + apply (frule (1) cmap_relation_cs_atD) + apply simp + apply clarsimp + apply (drule map_to_ko_atI) + apply (rule pal) + apply (rule pdst) + apply (subgoal_tac "is_aligned x pageBits") + prefer 2 + apply (clarsimp simp: obj_at'_def objBits_simps simp: projectKOs) + apply (subgoal_tac "is_aligned x 3") + prefer 2 + apply (erule is_aligned_weaken) + apply (simp add: pageBits_def) + apply (rule ext) + apply (subst byte_to_word_heap_upd_neq, assumption+, clarsimp simp: x, simp)+ + apply (subst byte_to_word_heap_upd_neq [where n=0, simplified], assumption+) + apply (clarsimp simp: x) + apply simp + done + + have hrs_mem: + "\f hp'. + hrs_mem_update (\hp. heap_update base (f (h_val hp base)) hp) hp' + = hrs_mem_update (heap_update base (f (h_val (hrs_mem hp') base))) hp'" + by (simp add: hrs_mem_update_def split_def hrs_mem_def) + + from page + have rl': "typ_uinfo_t TYPE(user_data_device_C) \\<^sub>t typ_uinfo_t TYPE('t :: mem_type) \ + (clift (hrs_mem_update (heap_update ?ptr w) (t_hrs_' (globals s))) :: ('t :: mem_type) typ_heap) + = cslift s" + apply (subst heap_upd) + apply (subst hrs_mem) + apply (simp add: typ_heap_simps clift_heap_update_same) + done + + have subset: "{ptr..+ 2 ^ 3} \ {ptr && ~~ mask pageBits ..+ 2 ^ pageBits}" + apply (simp only: upto_intvl_eq al is_aligned_neg_mask2) + apply (cut_tac ptr="ptr && ~~ mask pageBits" and x="ptr && mask pageBits" + in aligned_range_offset_subset, rule is_aligned_neg_mask2) + apply (rule is_aligned_andI1[OF al]) + apply (simp add: pageBits_def) + apply (rule and_mask_less', simp add: pageBits_def) + apply (erule order_trans[rotated]) + apply (simp add: mask_out_sub_mask) + done + + hence zr: "\rs. zero_ranges_are_zero rs (hrs_mem_update (heap_update ?ptr w) (t_hrs_' (globals s))) + = zero_ranges_are_zero rs (t_hrs_' (globals s))" + using page + apply (clarsimp simp: zero_ranges_are_zero_def hrs_mem_update base_def + heap_update_def + intro!: ball_cong[OF refl] conj_cong[OF refl]) + apply (drule region_actually_is_bytes) + apply (frule(1) region_is_bytes_disjoint[rotated 2, OF h_t_valid_clift]) + apply simp + apply (subst heap_list_update_disjoint_same, simp_all) + apply ((subst Int_commute)?, erule disjoint_subset2[rotated]) + apply (simp add: pageBits_def) + done + + have cmap_relation_heap_cong: + "\as cs cs' f rel. \ cmap_relation as cs f rel; cs = cs' \ \ cmap_relation as cs' f rel" + by simp + + from rf have "cpspace_relation (ksPSpace \) (underlying_memory (ksMachineState \)) (t_hrs_' (globals s))" + unfolding rf_sr_def cstate_relation_def by (simp add: Let_def) + hence "cpspace_relation (ksPSpace \) (underlying_memory ?ms) ?ks'" + unfolding cpspace_relation_def using page + apply - + apply (clarsimp simp: rl' tag_disj_via_td_name) + apply (drule relrl) + apply (simp add: heap_upd) + apply (subst hrs_mem) + apply (simp add: base_def offset_def) + apply (rule conjI[rotated]) + apply (erule cmap_relation_heap_cong) + apply (simp add: typ_heap_simps') + apply (rule ext) + apply clarsimp + apply (case_tac y) + apply (clarsimp split: if_split) + apply (rule cmap_relationI) + apply (clarsimp simp: dom_heap_to_user_data cmap_relation_def dom_if_Some + intro!: Un_absorb1 [symmetric]) + using pal + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (subst(asm) heap_to_user_data_seperate) + apply (simp add: piud al du_ptr_disjoint pal pdst)+ + apply (erule cmap_relation_relI[where rel = cuser_user_data_relation]) + apply simp+ + done + + thus ?thesis using rf + apply (simp add: rf_sr_def cstate_relation_def Let_def rl' tag_disj_via_td_name) + apply (simp add: carch_state_relation_def + cmachine_state_relation_def carch_globals_def) + apply (simp add: rl' tag_disj_via_td_name zr) + done +qed + +lemma storeWord_rf_sr_upd: + "\ (\, s) \ rf_sr; pspace_aligned' \; pspace_distinct' \; + pointerInUserData ptr \ \ pointerInDeviceData ptr \; is_aligned ptr 3\ \ + (\\ksMachineState := underlying_memory_update (\m. + m(ptr := word_rsplit (w::machine_word) ! 7, ptr + 1 := word_rsplit w ! 6, + ptr + 2 := word_rsplit w ! 5, ptr + 3 := word_rsplit w ! 4, + ptr + 4 := word_rsplit (w::machine_word) ! 3, ptr + 5 := word_rsplit w ! 2, + ptr + 6 := word_rsplit w ! 1, ptr + 7 := word_rsplit w ! 0)) + (ksMachineState \)\, + globals_update (t_hrs_'_update (hrs_mem_update + (heap_update (Ptr ptr) w))) s) \ rf_sr" + apply (elim disjE) + apply (cut_tac storeWordUser_rf_sr_upd' [rule_format, where s=s and \=\]) + prefer 2 + apply fastforce + apply simp + apply (erule iffD1 [OF rf_sr_upd, rotated -1], simp_all)[1] + apply (cut_tac storeWordDevice_rf_sr_upd' [rule_format, where s=s and \=\]) + prefer 2 + apply fastforce + apply simp + apply (erule iffD1 [OF rf_sr_upd, rotated -1], simp_all)[1] + done + +(* The following should be also true for pointerInDeviceData, + but the reason why it is true is different *) +lemma storeByteUser_rf_sr_upd: + assumes asms: "(\, s) \ rf_sr" "pspace_aligned' \" "pspace_distinct' \" + "pointerInUserData ptr \" + shows "(ksMachineState_update (underlying_memory_update (\m. m(ptr := b))) \, + globals_update (t_hrs_'_update (hrs_mem_update (\m. m(ptr := b)))) s) + \ rf_sr" +proof - + have horrible_helper: + "\v p. v \ 7 \ (7 - unat (p && mask 3 :: machine_word) = v) = + (p && mask 3 = 7 - of_nat v)" + apply (simp add: unat_arith_simps unat_of_nat) + apply (cut_tac p=p in unat_mask_3_less_8) + apply arith + done + + have horrible_helper2: + "\n x p. n < 8 \ (unat (x - p :: machine_word) = n) = (x = (p + of_nat n))" + apply (subst unat64_eq_of_nat) + apply (simp add:word_bits_def) + apply (simp only:field_simps) + done + + from asms + show ?thesis + apply (frule_tac ptr="ptr && ~~ mask 3" + and w="word_rcat (list_update + (map (underlying_memory (ksMachineState \)) + [(ptr && ~~ mask 3) + 7, + (ptr && ~~ mask 3) + 6, + (ptr && ~~ mask 3) + 5, + (ptr && ~~ mask 3) + 4, + (ptr && ~~ mask 3) + 3, + (ptr && ~~ mask 3) + 2, + (ptr && ~~ mask 3) + 1, + (ptr && ~~ mask 3)]) + (7 - unat (ptr && mask 3)) b)" + in storeWord_rf_sr_upd) + apply simp+ + apply (simp add: pointerInUserData_def pointerInDeviceData_def mask_lower_twice pageBits_def) + apply (simp add: Aligned.is_aligned_neg_mask) + apply (erule iffD1[rotated], + rule_tac f="\a b. (a, b) \ rf_sr" and c="globals_update f s" + for f s in arg_cong2) + apply (rule kernel_state.fold_congs[OF refl refl], simp only:) + apply (rule machine_state.fold_congs[OF refl refl], simp only:) + apply (cut_tac p=ptr in unat_mask_3_less_8) + apply (simp del: list_update.simps split del: if_split + add: word_rsplit_rcat_size word_size nth_list_update + horrible_helper) + apply (subgoal_tac "(ptr && ~~ mask 3) + (ptr && mask 3) = ptr") + apply (subgoal_tac "(ptr && mask 3) \ {0, 1, 2, 3,4,5,6,7}") + subgoal by (auto split: if_split simp: fun_upd_idem) (* long *) + apply (simp add: word_unat.Rep_inject[symmetric] + del: word_unat.Rep_inject) + apply arith + apply (subst add.commute, rule word_plus_and_or_coroll2) + apply (rule StateSpace.state.fold_congs[OF refl refl]) + apply (rule globals.fold_congs[OF refl refl]) + apply (clarsimp simp: hrs_mem_update_def simp del: list_update.simps) + apply (rule ext) + apply (simp add: heap_update_def to_bytes_def typ_info_word + word_rsplit_rcat_size word_size heap_update_list_value' + nth_list_update nth_rev TWO + del: list_update.simps) + apply (subgoal_tac "length (rev ([underlying_memory (ksMachineState \) + ((ptr && ~~ mask 2) + 7), + underlying_memory (ksMachineState \) + ((ptr && ~~ mask 2) + 6), + underlying_memory (ksMachineState \) + ((ptr && ~~ mask 2) + 5), + underlying_memory (ksMachineState \) + ((ptr && ~~ mask 2) + 4), + underlying_memory (ksMachineState \) + ((ptr && ~~ mask 2) + 3), + underlying_memory (ksMachineState \) + ((ptr && ~~ mask 2) + 2), + underlying_memory (ksMachineState \) + ((ptr && ~~ mask 2) + 1), + underlying_memory (ksMachineState \) + (ptr && ~~ mask 2)] + [3 - unat (ptr && mask 2) := b])) + < addr_card") + prefer 2 + apply (simp add: addr_card del: list_update.simps) + apply (simp add: heap_update_def to_bytes_def typ_info_word + word_rsplit_rcat_size word_size heap_update_list_value' + nth_list_update nth_rev TWO + del: list_update.simps cong: if_cong) + apply (simp only: If_rearrage) + apply (subgoal_tac "P" for P) + apply (rule if_cong) + apply assumption + apply simp + apply (clarsimp simp: nth_list_update split: if_split) + apply (frule_tac ptr=x in memory_cross_over, simp+) + apply (clarsimp simp: pointerInUserData_def pointerInDeviceData_def) + apply (cut_tac p="ptr && ~~ mask 3" and n=3 and d="x - (ptr && ~~ mask 3)" + in is_aligned_add_helper) + apply (simp add: Aligned.is_aligned_neg_mask) + apply (simp add: word_less_nat_alt) + apply clarsimp + apply (cut_tac x=x in mask_lower_twice[where n=3 and m=pageBits]) + apply (simp add: pageBits_def) + apply (cut_tac x=ptr in mask_lower_twice[where n=3 and m=pageBits]) + apply (simp add: pageBits_def) + apply simp + apply (auto simp add: eval_nat_numeral horrible_helper2 take_bit_Suc simp del: unsigned_numeral + elim!: less_SucE)[1] + apply (rule iffI) + apply clarsimp + apply (cut_tac p=ptr in unat_mask_3_less_8) + apply (subgoal_tac "unat (x - (ptr && ~~ mask 3)) = unat (ptr && mask 3)") + prefer 2 + apply arith + apply (simp add: unat_mask_3_less_8 field_simps word_plus_and_or_coroll2) + apply (simp add: subtract_mask TWO unat_mask_3_less_8) + done +qed + +lemma storeWord_ccorres': + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and + K (is_aligned ptr 3) and (\s. pointerInUserData ptr s \ pointerInDeviceData ptr s)) + (UNIV \ {s. ptr' s = Ptr ptr} \ {s. c_guard (ptr' s)} \ {s. val' s = val}) hs + (doMachineOp $ storeWord ptr val) + (Basic (\s. globals_update (t_hrs_'_update + (hrs_mem_update (heap_update (ptr' s) (val' s)))) s))" + supply if_cong[cong] + apply (clarsimp simp: storeWordUser_def simp del: Collect_const) + apply (rule ccorres_from_vcg_nofail) + apply (rule allI) + apply (rule conseqPre, vcg) + apply (clarsimp split: if_split_asm) + apply (rule bexI[rotated]) + apply (subst in_doMachineOp) + apply (fastforce simp: storeWord_def in_monad is_aligned_mask) + apply (simp add: upto0_7_def) + apply (fold fun_upd_def One_nat_def)+ + apply (fastforce elim: storeWord_rf_sr_upd) + done + +lemma storeWord_ccorres: + "ccorres dc xfdc + (valid_pspace' and K (is_aligned ptr 3) and pointerInUserData ptr) + (UNIV \ {s. ptr' s = Ptr ptr} \ {s. c_guard (ptr' s)} \ {s. val' s = val}) hs + (doMachineOp $ storeWord ptr val) + (Basic (\s. globals_update (t_hrs_'_update + (hrs_mem_update (heap_update (ptr' s) (val' s)))) s))" + apply (rule ccorres_guard_imp2, rule storeWord_ccorres') + apply fastforce + done + +lemma pointerInUserData_c_guard: + "\ valid_pspace' s; pointerInUserData ptr s \ pointerInDeviceData ptr s ; is_aligned ptr 3 \ + \ c_guard (Ptr ptr :: machine_word ptr)" + apply (simp add: pointerInUserData_def pointerInDeviceData_def) + apply (simp add: c_guard_def ptr_aligned_def is_aligned_def c_null_guard_def) + apply (fold is_aligned_def [where n=3, simplified])[1] + apply (rule contra_subsetD) + apply (rule order_trans [rotated]) + apply (rule_tac x="ptr && mask pageBits" and y=8 and z=4096 in intvl_sub_offset) + apply (cut_tac y=ptr and a="mask pageBits && (~~ mask 3)" in word_and_le1) + apply (subst(asm) word_bw_assocs[symmetric], subst(asm) is_aligned_neg_mask_eq, + erule is_aligned_andI1) + apply (simp add: word_le_nat_alt mask_def pageBits_def) + apply (subst word_plus_and_or_coroll2 [where w="~~ mask pageBits", simplified]) + apply simp + apply (fastforce dest: intvl_le_lower + intro: is_aligned_no_overflow' [where n=12, simplified] + is_aligned_andI2 + simp: mask_def pageBits_def is_aligned_def word_bits_def) + done + +lemma pointerInUserData_h_t_valid: + "\ valid_pspace' s; pointerInUserData ptr s ; + is_aligned ptr 3; (s, s') \ rf_sr \ + \ hrs_htd (t_hrs_' (globals s')) \\<^sub>t (Ptr ptr :: machine_word ptr)" + apply (frule_tac p=ptr in + user_word_at_cross_over[rotated, OF _ refl]) + apply (simp add: user_word_at_def) + apply simp + done + +lemma storeWordUser_ccorres: + "ccorres dc xfdc (valid_pspace' and (\_. is_aligned ptr 3)) + (UNIV \ {s. ptr' s = Ptr ptr} \ {s. w' s = w}) hs + (storeWordUser ptr w) + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t \(\s. ptr' s)\ + (Basic (\s. globals_update (t_hrs_'_update + (hrs_mem_update (heap_update (ptr' s) (w' s)))) s)))" + apply (simp add: storeWordUser_def) + apply (rule ccorres_symb_exec_l'[OF _ stateAssert_inv stateAssert_sp empty_fail_stateAssert]) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Guard) + apply (rule storeWord_ccorres[unfolded fun_app_def]) + apply (clarsimp simp: pointerInUserData_c_guard pointerInUserData_h_t_valid) + done + +end + +end diff --git a/proof/crefine/AARCH64/SyscallArgs_C.thy b/proof/crefine/AARCH64/SyscallArgs_C.thy new file mode 100644 index 0000000000..a2244a0130 --- /dev/null +++ b/proof/crefine/AARCH64/SyscallArgs_C.thy @@ -0,0 +1,1234 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory SyscallArgs_C +imports + TcbQueue_C + CSpace_RAB_C + StoreWord_C DetWP +begin + +(*FIXME: arch_split: C kernel names hidden by Haskell names *) +context kernel_m begin +abbreviation "msgRegistersC \ kernel_all_substitute.msgRegisters" +lemmas msgRegistersC_def = kernel_all_substitute.msgRegisters_def +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare word_neq_0_conv[simp del] + +definition + cintr :: "irq \ machine_word \ errtype \ bool" +where + "cintr a x err \ x = scast EXCEPTION_PREEMPTED" + +definition + replyOnRestart :: "machine_word \ machine_word list \ bool \ unit kernel" +where + "replyOnRestart thread reply isCall \ + do state \ getThreadState thread; + when (state = Restart) (do + _ \ when isCall (replyFromKernel thread (0, reply)); + setThreadState Running thread + od) + od" + +crunch typ_at'[wp]: replyOnRestart "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: crunch_simps) + +lemmas replyOnRestart_typ_ats[wp] = typ_at_lifts [OF replyOnRestart_typ_at'] + +lemma replyOnRestart_invs'[wp]: + "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" + including no_pre + apply (simp add: replyOnRestart_def) + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) + apply (rule hoare_strengthen_post, rule gts_sp') + apply (clarsimp simp: pred_tcb_at') + apply (auto elim!: pred_tcb'_weakenE st_tcb_ex_cap'' + dest: st_tcb_at_idle_thread') + done + + +declare psubset_singleton[simp] + +lemma gts_eq: + "st_tcb_at' (\st. st = state) t s + \ (getThreadState t s = return state s)" + apply (simp add: prod_eq_iff return_def) + apply (subst conj_commute, rule context_conjI) + apply (rule no_failD[OF no_fail_getThreadState]) + apply (erule pred_tcb_at') + apply (rule not_psubset_eq) + apply clarsimp + apply (drule empty_failD [OF empty_fail_getThreadState]) + apply simp + apply clarsimp + apply (frule in_inv_by_hoareD[OF gts_inv']) + apply (drule use_valid [OF _ gts_sp', OF _ TrueI]) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def projectKOs objBits_simps) + done + +lemma replyOnRestart_twice': + "((), s') \ fst (replyOnRestart t reply isCall s) + \ replyOnRestart t reply' isCall' s' + = return () s'" + apply (clarsimp simp add: replyOnRestart_def in_monad) + apply (drule use_valid [OF _ gts_sp', OF _ TrueI]) + apply (case_tac "state = Restart") + apply clarsimp + apply (drule use_valid [OF _ setThreadState_st_tcb'], simp) + apply (simp add: gts_eq when_def cong: bind_apply_cong) + apply (simp add: gts_eq when_def cong: bind_apply_cong) + done + +lemma replyOnRestart_twice[simplified]: + "do replyOnRestart t reply isCall; replyOnRestart t reply' isCall'; m od + = do replyOnRestart t reply isCall; m od" + apply (rule ext) + apply (rule bind_apply_cong[OF refl]) + apply simp + apply (subst bind_apply_cong [OF _ refl]) + apply (erule replyOnRestart_twice') + apply simp + done + +end + +context kernel_m begin + +lemma ccorres_pre_getWorkUnits: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. \rv. ksWorkUnitsCompleted s = rv \ P rv s) + {s. \rv. s \ P' rv} hs (getWorkUnits >>= f) c" + apply (simp add: getWorkUnits_def) + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply clarsimp + done + +lemma preemptionPoint_ccorres: + "ccorres (cintr \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + invs' UNIV [] + preemptionPoint (Call preemptionPoint_'proc)" + apply (cinit simp: workUnitsLimit_def whenE_def) + apply (rule ccorres_liftE_Seq) + apply (rule ccorres_split_nothrow + [where P=\ and P'=UNIV and r'=dc and xf'=xfdc]) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: modifyWorkUnits_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def + cmachine_state_relation_def) + apply ceqv + apply (rule ccorres_liftE_Seq) + apply (rule ccorres_pre_getWorkUnits) + apply (rule ccorres_cond_seq) + apply (rule_tac R="\s. rv = ksWorkUnitsCompleted s" in ccorres_cond2) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + prefer 2 + apply simp + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (simp add: returnOk_def return_def) + apply (rule ccorres_liftE_Seq) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_split_nothrow + [where P=\ and P'=UNIV and r'=dc and xf'=xfdc]) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (thin_tac "P" for P)+ + apply (clarsimp simp: setWorkUnits_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def + cmachine_state_relation_def) + apply ceqv + apply (rule ccorres_liftE_Seq) + apply (ctac (no_vcg) add: isIRQPending_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (simp add: from_bool_0 whenE_def returnOk_def throwError_def + return_def split: option.splits) + apply (clarsimp simp: cintr_def exception_defs) + apply wp+ + apply vcg + apply (unfold modifyWorkUnits_def)[1] + apply wp + apply vcg + apply simp + done + +definition + "invocationCatch thread isBlocking isCall inject + \ + sum.case_sum (throwError \ Inl) + (\oper. doE y \ liftE (setThreadState Structures_H.thread_state.Restart thread); + reply \ RetypeDecls_H.performInvocation isBlocking isCall (inject oper) + >>= sum.case_sum (throwError \ Inr) returnOk; + liftE (if reply = [] then replyOnRestart thread [] isCall \ return () + else replyOnRestart thread reply isCall) + odE)" + +definition + "intr_and_se_rel seOrIRQ status err + \ case seOrIRQ of Inl se \ syscall_error_rel se status err + | Inr irq \ cintr irq status err" + +lemma intr_and_se_rel_simps[simp]: + "intr_and_se_rel (Inl se) = syscall_error_rel se" + "intr_and_se_rel (Inr irq) = cintr irq" + by (rule ext | simp add: intr_and_se_rel_def)+ + +lemma errstate_globals_overwrite[simp]: + "errstate (s \ globals := globals t \) = errstate t" + by (simp add: errstate_def) + +definition + array_to_list :: "('a['b :: finite]) \ 'a list" +where + "array_to_list arr \ map (index arr) ([0 ..< card (UNIV :: 'b set)])" + +definition + interpret_excaps :: "extra_caps_C \ cte_C ptr list" +where + "interpret_excaps excps \ + (takeWhile (\x. ptr_val x \ 0) + (array_to_list (excaprefs_C excps)))" + +lemma interpret_excaps_test_null[unfolded array_to_list_def, simplified]: + "\ length (interpret_excaps excps) \ n; + n < length (array_to_list (excaprefs_C excps)) \ + \ (index (excaprefs_C excps) n = NULL) = (length (interpret_excaps excps) = n)" + apply (simp add: interpret_excaps_def) + apply (rule iffI) + apply (erule order_antisym[rotated]) + apply (rule length_takeWhile_le) + apply (simp add: array_to_list_def) + apply simp + apply (drule length_takeWhile_ge) + apply (simp add: array_to_list_def NULL_ptr_val) + done + +definition + excaps_map :: "(capability \ machine_word) list + \ cte_C ptr list" +where + "excaps_map \ map (\(cp, slot). cte_Ptr slot)" + +definition + slotcap_in_mem :: "capability \ machine_word + \ cte_heap \ bool" +where + "slotcap_in_mem cap slot + \ \cte_heap. \cte. cte_heap slot = Some cte \ cap = cteCap cte" + +lemma slotcap_in_mem_def2: + "slotcap_in_mem cap slot (ctes_of s) + = cte_wp_at' (\cte. cap = cteCap cte) slot s" + by (simp add: slotcap_in_mem_def cte_wp_at_ctes_of) + +definition + excaps_in_mem :: "(capability \ machine_word) list + \ cte_heap \ bool" +where + "excaps_in_mem cps \ \cte_heap. + \(cp, slot) \ set cps. slotcap_in_mem cp slot cte_heap" + + +lemma ccorres_alternative1: + "ccorres rvr xf P P' hs f c + \ ccorres rvr xf P P' hs (f \ g) c" + apply (simp add: ccorres_underlying_def) + apply (erule ballEI, clarsimp del: allI) + apply (simp add: alternative_def) + apply (elim allEI) + apply (auto simp: alternative_def split: xstate.split_asm) + done + +lemma ccorres_alternative2: + "ccorres rvr xf P P' hs g c + \ ccorres rvr xf P P' hs (f \ g) c" + apply (simp add: ccorres_underlying_def) + apply (erule ballEI, clarsimp del: allI) + apply (simp add: alternative_def) + apply (elim allEI) + apply (auto simp: alternative_def split: xstate.split_asm) + done + +lemma o_xo_injector: + "((f o f') \ r) = ((f \ r) o case_sum (Inl o f') Inr)" + by (intro ext, simp split: sum.split) + +lemma ccorres_invocationCatch_Inr: + "ccorres (f \ r) xf P P' hs + (invocationCatch thread isBlocking isCall injector (Inr v)) c + = + ccorres ((f \ Inr) \ r) xf P P' hs + (do _ \ setThreadState Restart thread; + doE reply \ performInvocation isBlocking isCall (injector v); + if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () + else liftE (replyOnRestart thread reply isCall) + odE od) c" + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) + apply (subst ccorres_liftM_simp[symmetric]) + apply (simp add: liftM_def bind_assoc bindE_def) + apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) + apply (rule ext) + apply (rule bind_apply_cong [OF refl])+ + apply (simp add: throwError_bind returnOk_bind lift_def liftE_def + alternative_bind + split: sum.split if_split) + apply (simp add: throwError_def) + done + +lemma getSlotCap_eq: + "slotcap_in_mem cap slot (ctes_of s) + \ getSlotCap slot s = return cap s" + apply (clarsimp simp: slotcap_in_mem_def2 getSlotCap_def) + apply (frule cte_wp_at_weakenE'[OF _ TrueI]) + apply (drule no_failD[OF no_fail_getCTE]) + apply (clarsimp simp: cte_wp_at'_def getCTE_def[symmetric] + bind_def return_def) + done + +lemma getSlotCap_ccorres_fudge: + "ccorres_underlying sr Gamm rvr xf ar axf P Q hs (do rv \ getSlotCap slot; _ \ assert (rv = cp); a rv od) c + \ ccorres_underlying sr Gamm rvr xf ar axf + (P and (slotcap_in_mem cp slot o ctes_of)) + Q hs (a cp) c" + apply (simp add: ccorres_underlying_def) + apply (erule ballEI, clarsimp del: allI) + apply (simp add: bind_apply_cong [OF getSlotCap_eq refl] + cong: xstate.case_cong) + done + +lemma getSlotCap_ccorres_fudge_n: + "ccorres_underlying sr Gamm rvr xf ar axf P Q hs + (do rv \ getSlotCap (snd (vals ! n)); + _ \ assert (rv = fst (vals ! n)); a od) c + \ ccorres_underlying sr Gamm rvr xf ar axf + ((\s. cte_wp_at' (\cte. cteCap cte = fst (vals ! n)) + (snd (vals ! n)) s \ P s) + and (excaps_in_mem vals \ ctes_of) and K (n < length vals)) Q + hs a c" + apply (rule ccorres_guard_imp2) + apply (erule getSlotCap_ccorres_fudge) + apply (clarsimp simp: excaps_in_mem_def) + apply (drule bspec, erule nth_mem) + apply (clarsimp simp: slotcap_in_mem_def cte_wp_at_ctes_of) + done + +definition + "is_syscall_error_code f code + = (\Q. (\ \ {s. global_exn_var_'_update (\_. Return) + (ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR) + (globals_update (current_syscall_error_'_update f) s)) \ Q} + code {}, Q))" + +abbreviation(input) + (* no longer needed *) + "Basic_with_globals f == (Basic f)" + +lemma is_syscall_error_codes: + "is_syscall_error_code f + (Basic (globals_update (current_syscall_error_'_update f));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f' o f) + (Basic (globals_update (current_syscall_error_'_update f));; + Basic (globals_update (current_syscall_error_'_update f'));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f'' o f' o f) + (Basic (globals_update (current_syscall_error_'_update f));; + Basic (globals_update (current_syscall_error_'_update f'));; + Basic (globals_update (current_syscall_error_'_update f''));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code f + (SKIP;; + Basic (globals_update (current_syscall_error_'_update f));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f' o f) + (SKIP;; + Basic (globals_update (current_syscall_error_'_update f));; + Basic (globals_update (current_syscall_error_'_update f'));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f'' o f' o f) + (SKIP;; + Basic (globals_update (current_syscall_error_'_update f));; + Basic (globals_update (current_syscall_error_'_update f'));; + Basic (globals_update (current_syscall_error_'_update f''));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code f + (Basic_with_globals (globals_update (current_syscall_error_'_update f));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f' o f) + (Basic_with_globals (globals_update (current_syscall_error_'_update f));; + Basic_with_globals (globals_update (current_syscall_error_'_update f'));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f'' o f' o f) + (Basic_with_globals (globals_update (current_syscall_error_'_update f));; + Basic_with_globals (globals_update (current_syscall_error_'_update f'));; + Basic_with_globals (globals_update (current_syscall_error_'_update f''));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f'''' \ f''' \ f'' o f' o f) + ( + Basic_with_globals (globals_update (current_syscall_error_'_update f));; + Basic_with_globals (globals_update (current_syscall_error_'_update f'));; + Basic_with_globals (globals_update (current_syscall_error_'_update f''));; + Basic_with_globals (globals_update (current_syscall_error_'_update f'''));; + Basic_with_globals (globals_update (current_syscall_error_'_update f''''));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code f + (SKIP;; + Basic_with_globals (globals_update (current_syscall_error_'_update f));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f' o f) + (SKIP;; + Basic_with_globals (globals_update (current_syscall_error_'_update f));; + Basic_with_globals (globals_update (current_syscall_error_'_update f'));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + "is_syscall_error_code (f'' o f' o f) + (SKIP;; + Basic_with_globals (globals_update (current_syscall_error_'_update f));; + Basic_with_globals (globals_update (current_syscall_error_'_update f'));; + Basic_with_globals (globals_update (current_syscall_error_'_update f''));; + return_C ret__unsigned_long_'_update (\_. scast EXCEPTION_SYSCALL_ERROR))" + by ((rule iffD2[OF is_syscall_error_code_def], intro allI, + rule conseqPre, vcg, safe, (simp_all add: o_def)?)+) + +lemma syscall_error_throwError_ccorres_direct_gen: + "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; + \err' ft'. syscall_error_to_H (f err') ft' = Some err \ + \ + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') + \ (UNIV) (SKIP # hs) + (throwError (Inl err)) code" + apply (rule ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre) + apply (erule iffD1[OF is_syscall_error_code_def, THEN spec]) + apply (clarsimp simp: throwError_def return_def) + apply (simp add: syscall_error_rel_def exception_defs) + done + +lemma syscall_error_throwError_ccorres_succs_gen: + "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; + \err' ft'. syscall_error_to_H (f err') ft' = Some err \ + \ + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') + \ (UNIV) (SKIP # hs) + (throwError (Inl err)) (code ;; remainder)" + apply (rule ccorres_guard_imp2, + rule ccorres_split_throws) + apply (erule syscall_error_throwError_ccorres_direct_gen; assumption) + apply (rule HoarePartialProps.augment_Faults) + apply (erule iffD1[OF is_syscall_error_code_def, THEN spec]) + apply simp+ + done + +lemmas syscall_error_throwError_ccorres_n_gen = + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct_gen, + simplified o_apply] + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs_gen, + simplified o_apply] + +lemmas syscall_error_throwError_ccorres_n = + syscall_error_throwError_ccorres_n_gen[where arrel="intr_and_se_rel \ dc", simplified] + +lemmas syscall_error_throwError_ccorres_n_inl_rrel = + syscall_error_throwError_ccorres_n_gen[where arrel="inl_rrel (intr_and_se_rel \ dc)", simplified] + +definition idButNot :: "'a \ 'a" +where "idButNot x = x" + +lemma interpret_excaps_test_null2: + "n < 3 \ + (index (excaprefs_C excps) n = NULL) + = (length (interpret_excaps excps) \ n + \ index (idButNot excaprefs_C excps) n = NULL)" + unfolding idButNot_def + apply safe + apply (rule ccontr, simp only: linorder_not_le) + apply (frule(1) interpret_excaps_test_null [OF order_less_imp_le]) + apply simp + done + +lemma interpret_excaps_eq[unfolded array_to_list_def, simplified]: + "interpret_excaps excps = xs \ + \n < length xs. (index (excaprefs_C excps) n) = (xs ! n) + \ (length xs < length (array_to_list (excaprefs_C excps)) + \ index (excaprefs_C excps) (length xs) = NULL)" + apply (clarsimp simp: interpret_excaps_def) + apply (drule length_takeWhile_gt) + apply (clarsimp simp: nth_append) + apply (clarsimp simp: array_to_list_def) + apply (frule_tac f=length in arg_cong, subst(asm) length_map, + simp(no_asm_use)) + apply (rule conjI) + apply (rule trans, erule map_upt_eq_vals_D, simp) + apply (simp add: nth_append) + apply clarsimp + apply (frule nth_length_takeWhile) + apply (rule trans, erule map_upt_eq_vals_D, simp) + apply (simp add: nth_append NULL_ptr_val) + done + +lemma ctes_of_0_contr[elim]: + "\ ctes_of s 0 = Some cte; valid_mdb' s \ \ P" + by (drule(1) ctes_of_not_0, simp) + +lemma invocationCatch_use_injection_handler: + "(v >>= invocationCatch thread isBlocking isCall injector) + = (injection_handler Inl v >>=E + (invocationCatch thread isBlocking isCall injector o Inr))" + apply (simp add: injection_handler_def handleE'_def + bind_bindE_assoc) + apply (rule ext, rule bind_apply_cong [OF refl]) + apply (simp add: invocationCatch_def return_returnOk + split: sum.split) + done + +lemma ccorres_injection_handler_csum1: + "ccorres (f \ r) xf P P' hs a c + \ ccorres + ((\rv a b. \rv'. rv = injector rv' \ f rv' a b) \ r) xf P P' hs + (injection_handler injector a) c" + apply (simp add: injection_handler_liftM) + apply (erule ccorres_rel_imp) + apply (auto split: sum.split) + done + +lemma ccorres_injection_handler_csum2: + "ccorres ((f o injector) \ r) xf P P' hs a c + \ ccorres (f \ r) xf P P' hs + (injection_handler injector a) c" + apply (simp add: injection_handler_liftM) + apply (erule ccorres_rel_imp) + apply (auto split: sum.split) + done + +definition + is_nondet_refinement :: "('a, 's) nondet_monad + \ ('a, 's) nondet_monad \ bool" +where + "is_nondet_refinement f g \ \s. (snd (f s) \ snd (g s)) \ fst (f s) \ fst (g s)" + +lemma is_nondet_refinement_refl[simp]: + "is_nondet_refinement a a" + by (simp add: is_nondet_refinement_def) + +lemma is_nondet_refinement_bind: + "\ is_nondet_refinement a c; \rv. is_nondet_refinement (b rv) (d rv) \ + \ is_nondet_refinement (a >>= b) (c >>= d)" + apply (clarsimp simp: is_nondet_refinement_def bind_def split_def) + apply fast + done + +lemma is_nondet_refinement_bindE: + "\ is_nondet_refinement a c; \rv. is_nondet_refinement (b rv) (d rv) \ + \ is_nondet_refinement (a >>=E b) (c >>=E d)" + apply (simp add: bindE_def) + apply (erule is_nondet_refinement_bind) + apply (simp add: lift_def split: sum.split) + done + +lemma ccorres_nondet_refinement: + "\ is_nondet_refinement a b; + ccorres_underlying sr Gamm rvr xf arrel axf G G' hs a c \ + \ ccorres_underlying sr Gamm rvr xf arrel axf G G' hs b c" + apply (simp add: ccorres_underlying_def is_nondet_refinement_def + split_def) + apply (rule ballI, drule(1) bspec) + apply (intro impI) + apply (drule mp, blast) + apply (elim allEI) + apply (clarsimp split: xstate.split_asm) + apply blast + done + +lemma is_nondet_refinement_alternative1: + "is_nondet_refinement a (a \ b)" + by (clarsimp simp add: is_nondet_refinement_def alternative_def) + +lemma ccorres_defer: + assumes c: "ccorres r xf P P' hs H C" + assumes f: "no_fail Q H" + shows "ccorres (\_. r rv) xf (\s. P s \ Q s \ (P s \ Q s \ fst (H s) = {(rv,s)})) P' hs (return ()) C" + using c + apply (clarsimp simp: ccorres_underlying_def split: xstate.splits) + apply (drule (1) bspec) + apply clarsimp + apply (erule impE) + apply (insert f)[1] + apply (clarsimp simp: no_fail_def) + apply (clarsimp simp: return_def) + apply (rule conjI) + apply clarsimp + apply (rename_tac s) + apply (erule_tac x=n in allE) + apply (erule_tac x="Normal s" in allE) + apply (clarsimp simp: unif_rrel_def) + apply fastforce + done + +lemma no_fail_loadWordUser: + "no_fail (pointerInUserData x and K (is_aligned x 3)) (loadWordUser x)" + apply (simp add: loadWordUser_def) + apply (rule no_fail_pre, wp no_fail_stateAssert) + apply simp + done + +lemma no_fail_getMRs: + "no_fail (tcb_at' thread and case_option \ valid_ipc_buffer_ptr' buffer) + (getMRs thread buffer info)" + apply (rule det_wp_no_fail) + apply (rule det_wp_getMRs) + done + +lemma nat_less_4_cases: + "(x::nat) < 4 \ x=0 \ x=1 \ x=2 \ x=3" + by clarsimp + +lemma asUser_cur_obj_at': + assumes f: "\P\ f \Q\" + shows "\\s. obj_at' (\tcb. P (atcbContextGet (tcbArch tcb))) (ksCurThread s) s \ t = ksCurThread s\ + asUser t f \\rv s. obj_at' (\tcb. Q rv (atcbContextGet (tcbArch tcb))) (ksCurThread s) s\" + apply (simp add: asUser_def split_def) + apply (wp) + apply (rule hoare_lift_Pf2 [where f=ksCurThread]) + apply (wp threadSet_obj_at'_really_strongest)+ + apply (clarsimp simp: threadGet_def) + apply (wp getObject_tcb_wp) + apply clarsimp + apply (drule obj_at_ko_at') + apply clarsimp + apply (rename_tac tcb) + apply (rule_tac x=tcb in exI) + apply clarsimp + apply (drule use_valid, rule f, assumption) + apply clarsimp + done + +lemma asUser_const_rv: + assumes f: "\\s. P\ f \\rv s. Q rv\" + shows "\\s. P\ asUser t f \\rv s. Q rv\" + apply (simp add: asUser_def split_def) + apply (wp) + apply (clarsimp simp: threadGet_def) + apply (wp getObject_tcb_wp) + apply clarsimp + apply (drule obj_at_ko_at') + apply clarsimp + apply (rename_tac tcb) + apply (rule_tac x=tcb in exI) + apply clarsimp + apply (drule use_valid, rule f, assumption) + apply clarsimp + done + +lemma getMRs_tcbContext: + "\\s. n < unat n_msgRegisters \ n < unat (msgLength info) \ thread = ksCurThread s \ cur_tcb' s\ + getMRs thread buffer info + \\rv s. obj_at' (\tcb. user_regs (atcbContextGet (tcbArch tcb)) (AARCH64_H.msgRegisters ! n) = rv ! n) (ksCurThread s) s\" + apply (rule hoare_assume_pre) + apply (elim conjE) + apply (thin_tac "thread = t" for t) + apply (clarsimp simp add: getMRs_def) + apply (wp|wpc)+ + apply (rule_tac P="n < length rv" in hoare_gen_asm) + apply (clarsimp simp: nth_append) + apply (wp mapM_wp' hoare_weak_lift_imp)+ + apply simp + apply (rule asUser_cur_obj_at') + apply (simp add: getRegister_def msgRegisters_unfold) + apply (simp add: mapM_Cons bind_assoc mapM_empty) + apply wp + apply (wp hoare_drop_imps hoare_vcg_all_lift) + apply (wp asUser_cur_obj_at') + apply (simp add: getRegister_def msgRegisters_unfold) + apply (simp add: mapM_Cons bind_assoc mapM_empty) + apply (wp asUser_const_rv) + apply clarsimp + apply (wp asUser_const_rv) + apply (clarsimp simp: n_msgRegisters_def msgRegisters_unfold) + apply (simp add: nth_Cons' cur_tcb'_def split: if_split) + done + +lemma threadGet_tcbIpcBuffer_ccorres [corres]: + "ccorres (=) w_bufferPtr_' (tcb_at' tptr) UNIV hs + (threadGet tcbIPCBuffer tptr) + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t tcb_ptr_to_ctcb_ptr tptr\ + (\w_bufferPtr :== + h_val (hrs_mem \t_hrs) + (Ptr &(tcb_ptr_to_ctcb_ptr tptr\[''tcbIPCBuffer_C''])::machine_word ptr)))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_threadGet) + apply (rule_tac P = "obj_at' (\tcb. tcbIPCBuffer tcb = x) tptr" and + P'="{s'. \ctcb. + cslift s' (tcb_ptr_to_ctcb_ptr tptr) = Some ctcb \ + tcbIPCBuffer_C ctcb = x }" in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: return_def typ_heap_simps') + apply (clarsimp simp: obj_at'_def ctcb_relation_def) + done + +(* FIXME: move *) +lemma ccorres_case_bools: + assumes P: "ccorres r xf P P' hs (a True) (c True)" + assumes Q: "ccorres r xf Q Q' hs (a False) (c False)" + shows "ccorres r xf (\s. (b \ P s) \ (\b \ Q s)) + ({s. b \ s \ P'} \ {s. \b \ s \ Q'}) + hs (a b) (c b)" + apply (cases b) + apply (auto simp: P Q) + done + +lemma ccorres_cond_both': + assumes abs: "\s s'. (s, s') \ sr \ Q s \ Q' s' \ P = (s' \ P')" + and ac: "P \ ccorres_underlying sr G r xf arrel axf R R' hs a c" + and bd: "\ P \ ccorres_underlying sr G r xf arrel axf U U' hs b d" + shows "ccorres_underlying sr G r xf arrel axf + (Q and (\s. P \ R s) and (\s. \ P \ U s)) + (Collect Q' \ {s. (s \ P' \ s \ R') \ (s \ P' \ s \ U')}) + hs + (if P then a else b) (Cond P' c d)" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_if_lhs) + apply (rule ccorres_cond_true) + apply (erule ac) + apply (rule ccorres_cond_false) + apply (erule bd) + apply clarsimp + apply (frule abs[rule_format, OF conjI], simp+) + done + +lemma pageBitsForSize_64 [simp]: + "pageBitsForSize sz < 64" + by (cases sz, auto simp: bit_simps) + +lemma ccap_relation_frame_tags: + "ccap_relation (ArchObjectCap (FrameCap v0 v1 v2 dev v3)) cap \ + cap_get_tag cap = scast cap_frame_cap" + by (auto simp: cap_get_tag_isCap_unfolded_H_cap) + +(* FIXME: move *) +lemma ccorres_case_bools': + assumes P: "b \ ccorres r xf P P' hs (a True) (c True)" + assumes Q: "\ b \ ccorres r xf Q Q' hs (a False) (c False)" + shows "ccorres r xf (\s. (b \ P s) \ (\b \ Q s)) + ({s. b \ s \ P'} \ {s. \b \ s \ Q'}) + hs (a b) (c b)" + apply (cases b) + apply (auto simp: P Q) + done + +(* FIXME x64: does this need vmrights \ 0 *) +lemma capFVMRights_range: + "\cap. cap_get_tag cap = scast cap_frame_cap \ + cap_frame_cap_CL.capFVMRights_CL (cap_frame_cap_lift cap) \ 3" + by (simp add: cap_frame_cap_lift_def + cap_lift_def cap_tag_defs word_and_le1 mask_def)+ + +lemma dumb_bool_split_for_vcg: + "\d \ \ret__unsigned_long \ 0\ \ \\ d \ \ret__unsigned_long = 0\ + = \d = to_bool \ret__unsigned_long \" + by (auto simp: to_bool_def) + +lemma ccap_relation_page_is_device: + "ccap_relation (capability.ArchObjectCap (arch_capability.FrameCap v0a v1 v2 d v3)) c + \ (cap_frame_cap_CL.capFIsDevice_CL (cap_frame_cap_lift c) \ 0) = d" + apply (clarsimp simp: ccap_relation_def Let_def map_option_Some_eq2 cap_to_H_def) + apply (case_tac z) + apply (auto split: if_splits simp: to_bool_def Let_def cap_frame_cap_lift_def) + done + +(* FIXME AARCH64 unat_of_nat_pageBitsForSize exists, but it's a 64-bit version *) +lemma unat_of_nat_32_pageBitsForSize[simp]: + "unat (of_nat (pageBitsForSize x)::32 word) = pageBitsForSize x" + apply (subst unat_of_nat_eq; simp) + apply (rule order_le_less_trans, rule pageBitsForSize_le) + apply simp + done + +lemma lookupIPCBuffer_ccorres[corres]: + "ccorres ((=) \ option_to_ptr) ret__ptr_to_unsigned_long_' + (tcb_at' t) + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr t} + \ {s. isReceiver_' s = from_bool isReceiver}) [] + (lookupIPCBuffer isReceiver t) (Call lookupIPCBuffer_'proc)" + apply (cinit lift: thread_' isReceiver_') + apply (rule ccorres_split_nothrow) + apply simp + apply (rule threadGet_tcbIpcBuffer_ccorres) + apply ceqv + apply (simp add: getThreadBufferSlot_def locateSlot_conv + cte_C_size word_sle_def Collect_True + del: Collect_const) + apply (rule ccorres_getSlotCap_cte_at) + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (ctac (no_vcg)) + apply csymbr + apply (rule_tac b="isArchObjectCap rv \ isFrameCap (capCap rv)" in ccorres_case_bools') + apply simp + apply (rule ccorres_cond_false_seq) + apply (simp(no_asm)) + apply csymbr + apply (rule_tac b="isDeviceCap rv" in ccorres_case_bools') + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg, + clarsimp simp: isCap_simps return_def option_to_ptr_def option_to_0_def) + apply (rule ccorres_cond_false_seq) + apply simp + apply csymbr + apply csymbr + apply (clarsimp simp: isCap_simps) + apply (rule ccorres_guard_imp[where A=\ and A'=UNIV], + rule ccorres_cond [where R=\]) + apply (clarsimp simp: from_bool_0 isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: cap_frame_cap_lift cap_to_H_def elim!: ccap_relationE) + apply (clarsimp simp: vmrights_to_H_def) + apply (simp add: Kernel_C.VMReadOnly_def Kernel_C.VMKernelOnly_def + Kernel_C.VMReadWrite_def + split: if_split) + apply (frule cap_get_tag_isCap_unfolded_H_cap(16),simp) + apply (frule capFVMRights_range) + apply (simp add: cap_frame_cap_lift) + apply (clarsimp simp: cap_to_H_def vmrights_to_H_def word_le_make_less + Kernel_C.VMReadWrite_def Kernel_C.VMReadOnly_def + Kernel_C.VMKernelOnly_def + dest: word_less_cases) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_Guard) + apply simp + apply (rule ccorres_assert)+ + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def option_to_ptr_def option_to_0_def isCap_simps + is_down ucast_of_nat) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: cap_frame_cap_lift cap_to_H_def mask_def elim!: ccap_relationE) + apply (ctac add: ccorres_return_C) + apply clarsimp + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: Collect_const_mem isCap_simps word_less_nat_alt + option_to_ptr_def from_bool_0 option_to_0_def ccap_relation_def + c_valid_cap_def cl_valid_cap_def cap_frame_cap_lift + is_down ucast_of_nat) + apply (rule ccorres_cond_true_seq) + apply simp + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def option_to_ptr_def option_to_0_def isCap_simps + dumb_bool_for_all + split: capability.splits arch_capability.splits bool.splits) + apply wpsimp + apply (clarsimp simp: Collect_const_mem) + apply (rule conjI) + apply (clarsimp simp: isCap_simps word_less_nat_alt ) + apply (frule ccap_relation_page_is_device) + apply (frule ccap_relation_frame_tags) + apply clarsimp + apply (rule ccontr) + apply clarsimp + apply (fastforce simp: cap_get_tag_PageCap_frame + isCap_simps) + apply wp + apply vcg + apply (simp add: word_sle_def Collect_const_mem + tcb_cnode_index_defs tcbSlots cte_level_bits_def + size_of_def) + done + + +lemma doMachineOp_pointerInUserData: + "\pointerInUserData p\ doMachineOp m \\rv. pointerInUserData p\" + by (simp add: pointerInUserData_def) wp + +lemma loadWordUser_wp: + "\\s. is_aligned p 3 \ (\v. user_word_at v p s \ P v s)\ + loadWordUser p + \P\" + apply (simp add: loadWordUser_def loadWord_def stateAssert_def + user_word_at_def valid_def upto0_7_def) + apply (clarsimp simp: in_monad in_doMachineOp) + done + +lemma ccorres_pre_loadWordUser: + "(\rv. ccorres r xf (P rv) (Q rv) hs (a rv) c) \ + ccorres r xf (valid_pspace' and K (is_aligned ptr 3) and + (\s. \v. user_word_at v ptr s \ P v s)) + {s. \v. cslift s (Ptr ptr :: machine_word ptr) = Some v \ + s \ Q v} hs + (loadWordUser ptr >>= a) c" + apply (rule ccorres_guard_imp) + apply (rule_tac Q="\rv. P rv and user_word_at rv ptr" and Q'=Q in + ccorres_symb_exec_l [OF _ loadWordUser_inv loadWordUser_wp]) + apply (fastforce intro: ccorres_guard_imp) + apply simp + apply simp + apply clarsimp + apply (drule(1) user_word_at_cross_over, simp) + apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff) + done + +lemma loadWordUser_user_word_at: + "\\s. \rv. user_word_at rv x s \ Q rv s\ loadWordUser x \Q\" + apply (simp add: loadWordUser_def user_word_at_def + doMachineOp_def split_def) + apply wp + apply (clarsimp simp: pointerInUserData_def + loadWord_def in_monad + is_aligned_mask upto0_7_def) + done + +lemma mapM_loadWordUser_user_words_at: + "\\s. \rv. (\x < length xs. user_word_at (rv ! x) (xs ! x) s) + \ length rv = length xs \ Q rv s\ + mapM loadWordUser xs \Q\" + apply (induct xs arbitrary: Q) + apply (simp add: mapM_def sequence_def) + apply wp + apply (simp add: mapM_Cons) + apply wp + apply assumption + apply (wp loadWordUser_user_word_at) + apply clarsimp + apply (drule spec, erule mp) + apply clarsimp + apply (case_tac x) + apply simp + apply simp + done + + +lemma getMRs_user_word: + "\\s. valid_ipc_buffer_ptr' buffer s \ i < msgLength info + \ msgLength info \ msgMaxLength \ i >= scast n_msgRegisters\ + getMRs thread (Some buffer) info + \\xs. user_word_at (xs ! unat i) (buffer + (i * 8 + 8))\" + supply if_cong[cong] + apply (rule hoare_assume_pre) + apply (elim conjE) + apply (thin_tac "valid_ipc_buffer_ptr' x y" for x y) + apply (simp add: getMRs_def) + apply wp + apply (rule_tac P="length hardwareMRValues = unat n_msgRegisters" in hoare_gen_asm) + apply (clarsimp simp: nth_append word_le_nat_alt + word_less_nat_alt word_size + linorder_not_less [symmetric]) + apply (wp mapM_loadWordUser_user_words_at) + apply (wp hoare_vcg_all_lift) + apply (rule_tac Q="\_. \" in hoare_strengthen_post) + apply wp + apply clarsimp + defer + apply simp + apply (wp asUser_const_rv) + apply (simp add: msgRegisters_unfold n_msgRegisters_def) + apply (erule_tac x="unat i - unat n_msgRegisters" in allE) + apply (erule impE) + apply (simp add: msgRegisters_unfold + msgMaxLength_def msgLengthBits_def n_msgRegisters_def) + apply (drule (1) order_less_le_trans) + apply (simp add: word_less_nat_alt word_le_nat_alt) + apply (simp add: msgRegisters_unfold + msgMaxLength_def msgLengthBits_def n_msgRegisters_def) + apply (simp add: upto_enum_word del: upt_rec_numeral) + apply (subst (asm) nth_map) + apply (simp del: upt_rec_numeral) + apply (drule (1) order_less_le_trans) + apply (simp add: word_less_nat_alt word_le_nat_alt) + apply (subst (asm) nth_upt) + apply simp + apply (drule (1) order_less_le_trans) + apply (simp add: word_less_nat_alt word_le_nat_alt) + apply (simp add: word_le_nat_alt add.commute add.left_commute mult.commute mult.left_commute + wordSize_def' take_bit_Suc) + done + +declare if_split [split] + +definition + "getMRs_rel args buffer \ \s. \mi. msgLength mi \ msgMaxLength \ fst (getMRs (ksCurThread s) buffer mi s) = {(args, s)}" + +definition + "sysargs_rel args buffer \ + cur_tcb' + and case_option \ valid_ipc_buffer_ptr' buffer + and getMRs_rel args buffer + and (\_. length args > unat (scast n_msgRegisters :: machine_word) \ buffer \ None)" + +definition + "sysargs_rel_n args buffer n \ \s. n < length args \ (unat (scast n_msgRegisters :: machine_word) \ n \ buffer \ None)" + +lemma sysargs_rel_to_n: + "sysargs_rel args buffer s \ sysargs_rel_n args buffer n s = (n < length args)" + by (auto simp add: sysargs_rel_def sysargs_rel_n_def) + +lemma getMRs_rel: + "\\s. msgLength mi \ msgMaxLength \ thread = ksCurThread s \ + case_option \ valid_ipc_buffer_ptr' buffer s \ + cur_tcb' s\ + getMRs thread buffer mi \\args. getMRs_rel args buffer\" + apply (simp add: getMRs_rel_def) + apply (rule hoare_pre) + apply (rule_tac x=mi in hoare_exI) + apply wp + apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) + apply (wp det_result det_wp_getMRs) + apply clarsimp + apply (clarsimp simp: cur_tcb'_def) + done + +lemma length_msgRegisters: + "length AARCH64_H.msgRegisters = unat (scast n_msgRegisters :: machine_word)" + by (simp add: msgRegisters_unfold n_msgRegisters_def) + +lemma getMRs_len[simplified]: + "\\\ getMRs thread buffer mi \\args s. length args > unat (scast n_msgRegisters :: machine_word) \ buffer \ None\" + apply (simp add: getMRs_def) + apply (cases buffer, simp_all add:hoare_TrueI) + apply (wp asUser_const_rv | simp)+ + apply (simp add: length_msgRegisters) + done + +lemma getMRs_sysargs_rel: + "\(\s. thread = ksCurThread s) and cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer and K (msgLength mi \ msgMaxLength)\ + getMRs thread buffer mi \\args. sysargs_rel args buffer\" + apply (simp add: sysargs_rel_def) + apply (wp getMRs_rel getMRs_len|simp)+ + done + +lemma ccorres_assume_pre: + assumes "\s. P s \ ccorres r xf (P and (\s'. s' = s)) P' hs H C" + shows "ccorres r xf P P' hs H C" + apply (clarsimp simp: ccorres_underlying_def) + apply (frule assms) + apply (simp add: ccorres_underlying_def) + apply blast + done + +lemma getMRs_length: + "\\s. msgLength mi \ msgMaxLength\ getMRs thread buffer mi + \\args s. if buffer = None then length args = min (unat (scast n_msgRegisters :: machine_word)) (unat (msgLength mi)) + else length args = unat (msgLength mi)\" + apply (cases buffer) + apply (simp add: getMRs_def) + apply (rule hoare_pre, wp) + apply (rule asUser_const_rv) + apply simp + apply (wp mapM_length) + apply (simp add: min_def length_msgRegisters) + apply clarsimp + apply (simp add: getMRs_def) + apply (rule hoare_pre, wp) + apply simp + apply (wp mapM_length asUser_const_rv mapM_length)+ + apply (clarsimp simp: length_msgRegisters) + apply (simp add: min_def split: if_splits) + apply (clarsimp simp: word_le_nat_alt) + apply (simp add: msgMaxLength_def msgLengthBits_def n_msgRegisters_def) + done + +lemma index_msgRegisters_less': + "n < 4 \ index msgRegistersC n < 37" + by (simp add: msgRegistersC_def fupdate_def Arrays.update_def + fcp_beta "StrictC'_register_defs") + +lemma index_msgRegisters_less: + "n < 4 \ index msgRegistersC n (s, s') \ rf_sr + \ n \ 2 ^ (msg_align_bits - 3) + \ n \ 0 + \ array_assertion (p :: machine_word ptr) n (hrs_htd (t_hrs_' (globals s')))" + apply (clarsimp simp: valid_ipc_buffer_ptr'_def typ_at_to_obj_at_arches) + apply (drule obj_at_ko_at', clarsimp) + apply (drule rf_sr_heap_user_data_relation) + apply (erule cmap_relationE1) + apply (clarsimp simp: heap_to_user_data_def Let_def) + apply (rule conjI, rule exI, erule ko_at_projectKO_opt) + apply (rule refl) + apply (drule clift_field, rule user_data_C_words_C_fl_ti, simp) + apply (erule clift_array_assertion_imp, simp+) + apply (simp add: field_lvalue_def msg_align_bits) + apply (rule_tac x="unat (ptr_val p && mask pageBits >> 3)" in exI, + simp add: word_shift_by_3 shiftr_shiftl1 + is_aligned_andI1[OF is_aligned_weaken]) + apply (simp add: add.commute word_plus_and_or_coroll2) + apply (cut_tac a="(ptr_val p && mask pageBits ) >> 3" + and b="2 ^ (pageBits - 3) - 2 ^ (msg_align_bits - 3)" in unat_le_helper) + apply (simp add: pageBits_def msg_align_bits mask_def is_aligned_mask) + apply word_bitwise + apply simp + apply (simp add: msg_align_bits pageBits_def) + done + +lemma array_assertion_valid_ipc_buffer_ptr_abs: + "\s s'. (s, s') \ rf_sr \ (valid_ipc_buffer_ptr' (ptr_val (p s)) s) + \ (n s' \ 2 ^ (msg_align_bits - 3) \ (x s' \ 0 \ n s' \ 0)) + \ (x s' = 0 \ array_assertion (p s :: machine_word ptr) (n s') (hrs_htd (t_hrs_' (globals s'))))" + apply (intro allI impI disjCI2, clarsimp) + apply (erule(1) valid_ipc_buffer_ptr_array, simp_all) + done + +lemmas ccorres_move_array_assertion_ipc_buffer + = ccorres_move_array_assertions [OF array_assertion_valid_ipc_buffer_ptr_abs] + +lemma getSyscallArg_ccorres_foo: + "ccorres (\a rv. rv = args ! n) ret__unsigned_long_' + (sysargs_rel args buffer and sysargs_rel_n args buffer n) + (UNIV \ \unat \i = n\ \ \\ipc_buffer = option_to_ptr buffer\) [] + (return ()) (Call getSyscallArg_'proc)" + apply (rule ccorres_assume_pre) + apply (subst (asm) sysargs_rel_def) + apply (subst (asm) getMRs_rel_def) + apply (subst (asm) pred_conj_def)+ + apply (elim conjE exE) + apply (cinit lift: i_' ipc_buffer_') + apply (fold return_def) + apply (rule_tac H="do thread \ gets ksCurThread; getMRs thread buffer mi od" in ccorres_defer) + prefer 2 + apply (rule no_fail_pre, wp no_fail_getMRs) + apply assumption + apply (rule ccorres_cond_seq) + apply (rule_tac R=\ and P="\_. n < unat (scast n_msgRegisters :: machine_word)" in ccorres_cond_both) + apply (simp add: word_less_nat_alt split: if_split) + apply (rule ccorres_add_return2) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P="\s. n < unat (scast n_msgRegisters :: machine_word) \ + obj_at' (\tcb. user_regs (atcbContextGet (tcbArch tcb)) + (AARCH64_H.msgRegisters!n) = x!n) (ksCurThread s) s" + and P' = UNIV + in ccorres_from_vcg_split_throws) + apply vcg + apply (simp add: return_def del: Collect_const) + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_ksCurThread) + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps') + apply (clarsimp simp: ctcb_relation_def ccontext_relation_def + msgRegisters_ccorres atcbContextGet_def + carch_tcb_relation_def cregs_relation_def) + apply (subst (asm) msgRegisters_ccorres) + apply (clarsimp simp: n_msgRegisters_def) + apply (clarsimp simp: n_msgRegisters_def) + apply (clarsimp simp: word_less_nat_alt word_upcast_0_sle) + apply (simp add: index_msgRegisters_less'[simplified word_less_nat_alt, simplified]) + apply wp[1] + apply (wp getMRs_tcbContext) + apply fastforce + apply (rule ccorres_seq_skip [THEN iffD2]) + apply (rule ccorres_add_return2) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P="\s. user_word_at (x!n) (ptr_val (CTypesDefs.ptr_add ipc_buffer (of_nat n + 1))) s + \ valid_ipc_buffer_ptr' (ptr_val ipc_buffer) s \ n < msgMaxLength" + and P'=UNIV + in ccorres_from_vcg_throws) + apply (simp add: return_def split del: if_split) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp split del: if_split) + apply (frule(1) user_word_at_cross_over, rule refl) + apply (clarsimp simp: ptr_add_def mult.commute + msgMaxLength_def) + apply (safe intro!: disjCI2 elim!: valid_ipc_buffer_ptr_array, + simp_all add: unatSuc2 add.commute msg_align_bits)[1] + apply wp[1] + apply (rule_tac P="\b. buffer = Some b" in hoare_gen_asm) + apply (clarsimp simp: option_to_ptr_def option_to_0_def) + apply (rule_tac P="\s. valid_ipc_buffer_ptr' (ptr_val (Ptr b)) s \ i < msgLength mi \ + msgLength mi \ msgMaxLength \ scast n_msgRegisters \ i" + in hoare_pre(1)) + apply (wp getMRs_user_word) + apply (clarsimp simp: msgMaxLength_def unat_less_helper) + apply fastforce + apply (clarsimp simp: sysargs_rel_def sysargs_rel_n_def) + apply (rule conjI, clarsimp simp: unat_of_nat64 word_bits_def) + apply (drule equalityD2) + apply clarsimp + apply (drule use_valid, rule getMRs_length, assumption) + apply (simp add: n_msgRegisters_def split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: option_to_ptr_def option_to_0_def + word_less_nat_alt word_le_nat_alt unat_of_nat64 word_bits_def + n_msgRegisters_def not_less msgMaxLength_def) + apply (drule equalityD2) + apply clarsimp + apply (drule use_valid, rule getMRs_length) + apply (simp add: word_le_nat_alt msgMaxLength_def) + apply (simp split: if_split_asm) + apply (rule conjI, clarsimp simp: cur_tcb'_def) + apply clarsimp + apply (clarsimp simp: bind_def gets_def return_def split_def get_def) + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma invocation_eq_use_type: + "\ value \ (value' :: 32 signed word); + unat (scast value' :: machine_word) < length (enum :: invocation_label list); (scast value' :: machine_word) \ 0 \ + \ (label = (scast value)) = (invocation_type label = enum ! unat (scast value' :: machine_word))" + apply (fold invocationType_eq, unfold invocationType_def) + apply (simp add: maxBound_is_length Let_def toEnum_def + nth_eq_iff_index_eq nat_le_Suc_less_imp + split: if_split) + apply (intro impI conjI) + apply (simp add: enum_invocation_label) + apply (subgoal_tac "GenInvocationLabel InvalidInvocation = enum ! 0") + apply (erule ssubst, subst nth_eq_iff_index_eq, simp+) + apply (clarsimp simp add: unat_eq_0) + apply (simp add: enum_invocation_label enum_gen_invocation_labels) + done + +lemmas all_invocation_label_defs = invocation_label_defs arch_invocation_label_defs sel4_arch_invocation_label_defs + +lemmas invocation_eq_use_types + = all_invocation_label_defs[THEN invocation_eq_use_type, simplified, + unfolded enum_invocation_label enum_gen_invocation_labels enum_arch_invocation_label, simplified] + +lemma ccorres_equals_throwError: + "\ f = throwError v; ccorres_underlying sr Gamm rr xf arr axf P P' hs (throwError v) c \ + \ ccorres_underlying sr Gamm rr xf arr axf P P' hs f c" + by simp + +end + +end diff --git a/proof/crefine/AARCH64/Syscall_C.thy b/proof/crefine/AARCH64/Syscall_C.thy new file mode 100644 index 0000000000..b16028a967 --- /dev/null +++ b/proof/crefine/AARCH64/Syscall_C.thy @@ -0,0 +1,2362 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Syscall_C +imports + Interrupt_C + Ipc_C + Invoke_C + Schedule_C + Arch_C +begin + +context begin interpretation Arch . (*FIXME: arch_split*) +crunch sch_act_wf [wp]: replyFromKernel "\s. sch_act_wf (ksSchedulerAction s) s" +end + +context kernel_m begin + +lemma ccorres_If_False: + "ccorres_underlying sr Gamm r xf arrel axf R R' hs b c + \ ccorres_underlying sr Gamm r xf arrel axf + (R and (\_. \ P)) R' hs (If P a b) c" + by (rule ccorres_gen_asm, simp) + +definition + one_on_true :: "bool \ nat" +where + "one_on_true P \ if P then 1 else 0" + +lemma one_on_true_True[simp]: "one_on_true True = 1" + by (simp add: one_on_true_def) + +lemma one_on_true_eq_0[simp]: "(one_on_true P = 0) = (\ P)" + by (simp add: one_on_true_def split: if_split) + +lemma cap_cases_one_on_true_sum: + "one_on_true (isZombie cap) + one_on_true (isArchObjectCap cap) + + one_on_true (isThreadCap cap) + one_on_true (isCNodeCap cap) + + one_on_true (isNotificationCap cap) + one_on_true (isEndpointCap cap) + + one_on_true (isUntypedCap cap) + one_on_true (isReplyCap cap) + + one_on_true (isIRQControlCap cap) + one_on_true (isIRQHandlerCap cap) + + one_on_true (isNullCap cap) + one_on_true (isDomainCap cap) = 1" + by (cases cap, simp_all add: isCap_simps) + +lemma performInvocation_Endpoint_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and st_tcb_at' simple' thread and ep_at' epptr + and sch_act_sane and (\s. thread = ksCurThread s)) + (UNIV \ {s. block_' s = from_bool blocking} + \ {s. call_' s = from_bool do_call} + \ {s. badge_' s = badge} + \ {s. canGrant_' s = from_bool canGrant} + \ {s. canGrantReply_' s = from_bool canGrantReply} + \ {s. ep_' s = ep_Ptr epptr}) [] + (liftE (sendIPC blocking do_call badge canGrant canGrantReply thread epptr)) + (Call performInvocation_Endpoint_'proc)" + apply cinit + apply (ctac add: sendIPC_ccorres) + apply (simp add: return_returnOk) + apply (rule ccorres_return_CE, simp+)[1] + apply wp + apply simp + apply (vcg exspec=sendIPC_modifies) + apply (clarsimp simp add: rf_sr_ksCurThread sch_act_sane_not) + done + +(* This lemma now assumes 'weak_sch_act_wf (ksSchedulerAction s) s' in place of 'sch_act_simple'. *) + +lemma performInvocation_Notification_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + (UNIV \ {s. ntfn_' s = ntfn_Ptr ntfnptr} + \ {s. badge_' s = badge} + \ {s. message_' s = message}) [] + (liftE (sendSignal ntfnptr badge)) + (Call performInvocation_Notification_'proc)" + apply cinit + apply (ctac add: sendSignal_ccorres) + apply (simp add: return_returnOk) + apply (rule ccorres_return_CE, simp+)[1] + apply wp + apply simp + apply (vcg exspec=sendSignal_modifies) + apply simp + done + +lemma performInvocation_Reply_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and tcb_at' receiver and st_tcb_at' active' sender and sch_act_simple + and ((Not o real_cte_at' slot) or cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot) + and cte_wp_at' (\cte. cteCap cte = capability.NullCap \ isReplyCap (cteCap cte)) + slot and (\s. ksCurThread s = sender)) + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr receiver} + \ {s. slot_' s = cte_Ptr slot} + \ {s. canGrant_' s = from_bool grant}) [] + (liftE (doReplyTransfer sender receiver slot grant)) + (Call performInvocation_Reply_'proc)" + apply cinit + apply (ctac add: doReplyTransfer_ccorres) + apply (simp add: return_returnOk) + apply (rule ccorres_return_CE, simp+)[1] + apply wp + apply simp + apply (vcg exspec=doReplyTransfer_modifies) + apply (simp add: rf_sr_ksCurThread) + apply (auto simp: isReply_def elim!: pred_tcb'_weakenE) + done + +lemma decodeInvocation_ccorres: + "interpret_excaps extraCaps' = excaps_map extraCaps + \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and valid_cap' cp and (\s. \x \ zobj_refs' cp. ex_nonz_cap_to' x s) + and (excaps_in_mem extraCaps \ ctes_of) + and cte_wp_at' ((=) cp \ cteCap) slot + and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and (\s. \v \ set extraCaps. s \' fst v \ cte_at' (snd v) s) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). ex_nonz_cap_to' y s) + and sysargs_rel args buffer) + (UNIV \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. call_' s = from_bool isCall} + \ {s. block_' s = from_bool isBlocking} + \ {s. call_' s = from_bool isCall} + \ {s. block_' s = from_bool isBlocking} + \ {s. invLabel_' s = label} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. capIndex_' s = cptr} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. ccap_relation cp (cap_' s)} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeInvocation label args cptr slot cp extraCaps + >>= invocationCatch thread isBlocking isCall id) + (Call decodeInvocation_'proc)" + apply (cinit' lift: call_' block_' invLabel_' length___unsigned_long_' + capIndex_' slot_' current_extra_caps_' cap_' buffer_') + apply csymbr + apply (simp add: cap_get_tag_isCap decodeInvocation_def + cong: if_cong StateSpace.state.fold_congs + globals.fold_congs + del: Collect_const) + apply (cut_tac cap=cp in cap_cases_one_on_true_sum) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: Let_def isArchCap_T_isArchObjectCap liftME_invocationCatch) + apply (rule ccorres_split_throws) + apply (rule ccorres_trim_returnE) + apply simp + apply simp + apply (rule ccorres_call, rule Arch_decodeInvocation_ccorres [where buffer=buffer]) + apply assumption + apply simp+ + apply (vcg exspec=Arch_decodeInvocation_modifies) + apply simp + apply csymbr + apply (simp add: cap_get_tag_isCap del: Collect_const) + apply (rule ccorres_Cond_rhs) + apply (simp add: invocationCatch_def throwError_bind) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_Cond_rhs) + apply (simp add: invocationCatch_def throwError_bind) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_Cond_rhs) + apply (simp add: if_to_top_of_bind) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) + apply (clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (drule(1) cap_get_tag_to_H) + apply (clarsimp simp: to_bool_def) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr + performInvocation_def bind_assoc liftE_bindE) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_pre_getCurThread) + apply (simp only: liftE_bindE[symmetric]) + apply (ctac add: performInvocation_Endpoint_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply simp + apply (vcg exspec=performInvocation_Endpoint_modifies) + apply simp + apply (rule hoare_use_eq[where f=ksCurThread]) + apply (wp sts_invs_minor' sts_st_tcb_at'_cases + setThreadState_ct' hoare_vcg_all_lift)+ + apply simp + apply (vcg exspec=setThreadState_modifies) + apply vcg + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_rhs_assoc)+ + apply (csymbr) + apply (simp add: if_to_top_of_bind Collect_const[symmetric] + del: Collect_const) + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) + apply (clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (drule(1) cap_get_tag_to_H) + apply (clarsimp simp: to_bool_def) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr + performInvocation_def bindE_assoc) + apply (simp add: liftE_bindE) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply csymbr + apply (simp only: liftE_bindE[symmetric]) + apply (ctac(no_vcg) add: performInvocation_Notification_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (wp sts_invs_minor') + apply simp + apply (vcg exspec=setThreadState_modifies) + apply vcg + apply (rule ccorres_Cond_rhs) + apply (simp add: if_to_top_of_bind) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) + apply (clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (clarsimp simp: cap_get_tag_ReplyCap to_bool_def) + apply (simp add: throwError_bind invocationCatch_def) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr + performInvocation_def liftE_bindE + bind_assoc) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply csymbr + apply (rule ccorres_pre_getCurThread) + apply (simp only: liftE_bindE[symmetric]) + apply (ctac add: performInvocation_Reply_ccorres) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply simp + apply (vcg exspec=performInvocation_Reply_modifies) + apply (simp add: cur_tcb'_def[symmetric]) + apply (rule_tac R="\rv s. ksCurThread s = thread" in hoare_post_add) + apply (simp cong: conj_cong) + apply (strengthen imp_consequent) + apply (wp sts_invs_minor' sts_st_tcb_at'_cases) + apply simp + apply (vcg exspec=setThreadState_modifies) + apply vcg + apply (rule ccorres_Cond_rhs) + apply (simp add: if_to_top_of_bind) + apply (rule ccorres_trim_returnE, simp+) + apply (simp add: liftME_invocationCatch) + apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) + apply assumption + apply (simp+)[3] + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_trim_returnE, simp+) + apply (simp add: liftME_invocationCatch) + apply (rule ccorres_call, + erule decodeDomainInvocation_ccorres, + simp+)[1] + apply (rule ccorres_Cond_rhs) + apply (simp add: if_to_top_of_bind) + apply (rule ccorres_trim_returnE, simp+) + apply (simp add: liftME_invocationCatch) + apply (rule ccorres_call, + erule decodeCNodeInvocation_ccorres, + simp+)[1] + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_trim_returnE, simp+) + apply (simp add: liftME_invocationCatch) + apply (rule ccorres_call, + erule decodeUntypedInvocation_ccorres, simp+)[1] + apply (rule ccorres_Cond_rhs) + apply (simp add: liftME_invocationCatch) + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call, erule decodeIRQControlInvocation_ccorres, + simp+)[1] + apply (rule ccorres_Cond_rhs) + apply (simp add: Let_def liftME_invocationCatch) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_trim_returnE, simp+) + apply (rule ccorres_call, + erule decodeIRQHandlerInvocation_ccorres, simp+) + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply (simp add: isArchCap_T_isArchObjectCap one_on_true_def from_bool_0) + apply (rule conjI) + apply (clarsimp simp: tcb_at_invs' ct_in_state'_def + simple_sane_strg) + apply (clarsimp simp: cte_wp_at_ctes_of valid_cap'_def isCap_simps + unat_eq_0 sysargs_rel_n_def n_msgRegisters_def valid_tcb_state'_def + | rule conjI | erule pred_tcb'_weakenE disjE + | drule st_tcb_at_idle_thread')+ + apply fastforce + apply (simp add: cap_lift_capEPBadge_mask_eq) + apply (clarsimp simp: rf_sr_ksCurThread Collect_const_mem + cap_get_tag_isCap ThreadState_defs) + apply (frule word_unat.Rep_inverse') + apply (simp add: cap_get_tag_isCap[symmetric] cap_get_tag_ReplyCap) + apply (rule conjI) + apply (simp add: cap_get_tag_isCap) + apply (clarsimp simp: isCap_simps cap_get_tag_to_H from_bool_neq_0) + apply (insert ccap_relation_IRQHandler_mask, elim meta_allE, drule(1) meta_mp) + apply (clarsimp simp: word_size) + apply (clarsimp simp: cap_get_tag_isCap) + apply (cases cp ; clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap, drule (1) cap_get_tag_to_H) + apply fastforce + apply (frule cap_get_tag_isCap_unfolded_H_cap, drule (1) cap_get_tag_to_H) + apply (fastforce simp: cap_endpoint_cap_lift_def mask_eq_ucast_eq) + apply (frule ccap_relation_ep_helpers) + apply (clarsimp simp: cap_get_tag_isCap isEndpointCap_def) + apply clarsimp + apply (frule ccap_relation_reply_helpers) + apply (clarsimp simp: cap_get_tag_isCap isReplyCap_def) + apply clarsimp + done + +lemma ccorres_Call_Seq: + "\ \ f = Some v; ccorres r xf P P' hs a (v ;; c) \ + \ ccorres r xf P P' hs a (Call f ;; c)" + apply (erule ccorres_semantic_equivD1) + apply (rule semantic_equivI) + apply (auto elim!: exec_elim_cases intro: exec.intros) + done + +lemma wordFromRights_mask_0: + "wordFromRights rghts && ~~ mask 4 = 0" + apply (simp add: wordFromRights_def word_ao_dist word_or_zero + split: cap_rights.split) + apply (simp add: mask_def split: if_split) + done + +lemma wordFromRights_mask_eq: + "wordFromRights rghts && mask 4 = wordFromRights rghts" + apply (cut_tac x="wordFromRights rghts" and y="mask 4" and z="~~ mask 4" + in bit.conj_disj_distrib) + apply (simp add: wordFromRights_mask_0) + done + +lemma mapM_loadWordUser_user_words_at: + "\\s. \rv. (\x < length xs. user_word_at (rv ! x) (xs ! x) s) + \ length rv = length xs \ Q rv s\ + mapM loadWordUser xs \Q\" + apply (induct xs arbitrary: Q) + apply (simp add: mapM_def sequence_def) + apply wp + apply (simp add: mapM_Cons) + apply wp + apply assumption + apply (wp loadWordUser_user_word_at) + apply clarsimp + apply (drule spec, erule mp) + apply clarsimp + apply (case_tac x) + apply simp + apply simp + done + +lemma getSlotCap_slotcap_in_mem: + "\\\ getSlotCap slot \\cap s. slotcap_in_mem cap slot (ctes_of s)\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of slotcap_in_mem_def) + done + +lemma lookupExtraCaps_excaps_in_mem[wp]: + "\\\ lookupExtraCaps thread buffer info \\rv s. excaps_in_mem rv (ctes_of s)\,-" + apply (simp add: excaps_in_mem_def lookupExtraCaps_def lookupCapAndSlot_def + split_def) + apply (wp mapME_set) + apply (wp getSlotCap_slotcap_in_mem | simp)+ + done + +lemma getCurThread_ccorres: + "ccorres ((=) \ tcb_ptr_to_ctcb_ptr) thread_' + \ UNIV hs getCurThread (\thread :== \ksCurThread)" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getCurThread_def simpler_gets_def + rf_sr_ksCurThread) + done + +lemma getMessageInfo_ccorres: + "ccorres (\rv rv'. rv = messageInfoFromWord rv') ret__unsigned_long_' \ + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread} + \ {s. reg_' s = register_from_H AARCH64_H.msgInfoRegister}) [] + (getMessageInfo thread) (Call getRegister_'proc)" + apply (simp add: getMessageInfo_def liftM_def[symmetric]) + apply (rule ccorres_rel_imp, rule ccorres_guard_imp2, rule getRegister_ccorres) + apply simp + apply simp + done + +lemma messageInfoFromWord_spec: + "\s. \ \ {s} Call messageInfoFromWord_'proc + {s'. seL4_MessageInfo_lift (ret__struct_seL4_MessageInfo_C_' s') + = mi_from_H (messageInfoFromWord (w_' s))}" + apply (rule allI, rule conseqPost, rule messageInfoFromWord_spec[rule_format]) + apply simp_all + apply (clarsimp simp: seL4_MessageInfo_lift_def mi_from_H_def + messageInfoFromWord_def Let_def + msgLengthBits_def msgExtraCapBits_def + msgMaxExtraCaps_def msgLabelBits_def shiftL_nat) + done + +lemma threadGet_tcbIpcBuffer_ccorres [corres]: + "ccorres (=) w_bufferPtr_' (tcb_at' tptr) UNIV hs + (threadGet tcbIPCBuffer tptr) + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t (Ptr &(tcb_ptr_to_ctcb_ptr tptr\ + [''tcbIPCBuffer_C''])::machine_word ptr)\ + (\w_bufferPtr :== + h_val (hrs_mem \t_hrs) + (Ptr &(tcb_ptr_to_ctcb_ptr tptr\[''tcbIPCBuffer_C''])::machine_word ptr)))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_threadGet) + apply (rule_tac P = "obj_at' (\tcb. tcbIPCBuffer tcb = x) tptr" and + P'="{s'. \ctcb. + cslift s' (tcb_ptr_to_ctcb_ptr tptr) = Some ctcb \ + tcbIPCBuffer_C ctcb = x }" in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: return_def typ_heap_simps') + apply (clarsimp simp: obj_at'_def ctcb_relation_def) + done + +lemma handleInvocation_def2: + "handleInvocation isCall isBlocking = + do thread \ getCurThread; + info \ getMessageInfo thread; + ptr \ asUser thread (getRegister AARCH64_H.capRegister); + v \ (doE (cap, slot) \ capFaultOnFailure ptr False (lookupCapAndSlot thread ptr); + buffer \ withoutFailure (VSpace_H.lookupIPCBuffer False thread); + extracaps \ lookupExtraCaps thread buffer info; + returnOk (slot, cap, extracaps, buffer) + odE); + case v of Inl f \ liftE (when isBlocking (handleFault thread f)) + | Inr (slot, cap, extracaps, buffer) \ + do args \ getMRs thread buffer info; + v' \ do v \ RetypeDecls_H.decodeInvocation (msgLabel info) args ptr slot cap extracaps; + invocationCatch thread isBlocking isCall id v od; + case v' of Inr _ \ liftE (replyOnRestart thread [] isCall) + | Inl (Inl syserr) \ liftE (when isCall (replyFromKernel thread + (msgFromSyscallError syserr))) + | Inl (Inr preempt) \ throwError preempt + od + od" + apply (simp add: handleInvocation_def Syscall_H.syscall_def runExceptT_def + liftE_bindE cong: sum.case_cong) + apply (rule ext, (rule bind_apply_cong [OF refl])+) + apply (clarsimp simp: bind_assoc split: sum.split) + apply (rule bind_apply_cong [OF refl])+ + apply (clarsimp simp: invocationCatch_def throwError_bind + liftE_bindE bind_assoc + split: sum.split) + apply (rule bind_apply_cong [OF refl])+ + apply (simp add: bindE_def bind_assoc) + apply (rule bind_apply_cong [OF refl])+ + apply (clarsimp simp: lift_def throwError_bind returnOk_bind split: sum.split) + apply (simp cong: bind_cong add: ts_Restart_case_helper') + apply (simp add: when_def[symmetric] replyOnRestart_def[symmetric]) + apply (simp add: liftE_def replyOnRestart_twice alternative_bind + alternative_refl split: if_split) + done + +lemma thread_state_to_tsType_eq_Restart: + "(thread_state_to_tsType ts = scast ThreadState_Restart) + = (ts = Restart)" + by (cases ts, simp_all add: ThreadState_defs) + +lemma wordFromMessageInfo_spec: + "\s. \\ {s} Call wordFromMessageInfo_'proc + {s'. \mi. seL4_MessageInfo_lift (mi_' s) = mi_from_H mi + \ ret__unsigned_long_' s' = wordFromMessageInfo mi}" + apply (rule allI, rule conseqPost[OF wordFromMessageInfo_spec[rule_format] _ subset_refl]) + apply (clarsimp simp: wordFromMessageInfo_def + msgLengthBits_def msgExtraCapBits_def msgMaxExtraCaps_def shiftL_nat) + apply (drule sym[where t="mi_from_H mi" for mi]) + apply (clarsimp simp: seL4_MessageInfo_lift_def mi_from_H_def mask_def) + apply (thin_tac _)+ + apply word_bitwise + done + +lemma handleDoubleFault_ccorres: + "ccorres dc xfdc (invs' and tcb_at' tptr and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and + sch_act_not tptr) + (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) + [] (handleDoubleFault tptr ex1 ex2) + (Call handleDoubleFault_'proc)" + apply (cinit lift: tptr_') + apply (subst ccorres_seq_skip'[symmetric]) + apply (ctac (no_vcg)) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_return_Skip) + apply (wp asUser_inv getRestartPC_inv)+ + apply (rule empty_fail_asUser) + apply (simp add: getRestartPC_def) + apply wp + apply clarsimp + apply (simp add: ThreadState_defs) + apply (fastforce simp: valid_tcb_state'_def) + done + +lemma cap_case_EndpointCap: + "(case cap of EndpointCap ep b cs cr cg cgr \ f ep b cs cr cg cgr | _ \ g) + = (if isEndpointCap cap then f (capEPPtr cap) (capEPBadge cap) (capEPCanSend cap) + (capEPCanReceive cap) (capEPCanGrant cap) (capEPCanGrantReply cap) + else g)" + by (cases cap; clarsimp simp: isCap_simps) + +lemma cap_case_EndpointCap_CanSend_CanGrant: + "(case cap of EndpointCap v0 v1 True v3 True v4 \ f v0 v1 v3 v4 + | _ \ g) + = (if (isEndpointCap cap \ capEPCanSend cap \ capEPCanGrant cap) + then f (capEPPtr cap) (capEPBadge cap) (capEPCanReceive cap) (capEPCanGrantReply cap) + else g)" + by (simp add: isCap_simps + split: capability.split bool.split) + +lemma threadGet_tcbFaultHandler_ccorres [corres]: + "ccorres (=) handlerCPtr_' (tcb_at' tptr) UNIV hs + (threadGet tcbFaultHandler tptr) + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t (tcb_ptr_to_ctcb_ptr tptr)\ + (\handlerCPtr :== + h_val (hrs_mem \t_hrs) + (Ptr &(tcb_ptr_to_ctcb_ptr tptr\[''tcbFaultHandler_C''])::machine_word ptr)))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_threadGet) + apply (rule_tac P = "obj_at' (\tcb. tcbFaultHandler tcb = x) tptr" and + P'="{s'. \ ctcb. + cslift s' (tcb_ptr_to_ctcb_ptr tptr) = Some ctcb \ + tcbFaultHandler_C ctcb = x }" in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: return_def typ_heap_simps') + apply (clarsimp simp: obj_at'_def ctcb_relation_def) +done + +lemma tcb_cte_cases_tcbFault_update_simp: + "(f, u) \ ran tcb_cte_cases \ f (tcbFault_update (\_. Some fault) tcb) = f tcb" + unfolding tcb_cte_cases_def cteSizeBits_def + by auto + +lemma hrs_mem_update_use_hrs_mem: + "hrs_mem_update f = (\hrs. (hrs_mem_update $ (\_. f (hrs_mem hrs))) hrs)" + by (simp add: hrs_mem_update_def hrs_mem_def fun_eq_iff) + +lemma sendFaultIPC_ccorres: + "ccorres (cfault_rel2 \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and st_tcb_at' simple' tptr and sch_act_not tptr) + (UNIV \ {s. (cfault_rel (Some fault) (seL4_Fault_lift(current_fault_' (globals s))) + (lookup_fault_lift(current_lookup_fault_' (globals s))))} + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) + [] (sendFaultIPC tptr fault) + (Call sendFaultIPC_'proc)" + supply if_cong[cong] option.case_cong[cong] + supply Collect_const[simp del] + apply (cinit lift: tptr_' cong: call_ignore_cong) + apply (simp add: liftE_bindE del:Collect_const cong:call_ignore_cong) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_split_nothrow) + apply (rule threadGet_tcbFaultHandler_ccorres) + apply ceqv + apply (rule_tac xf'=lu_ret___struct_lookupCap_ret_C_' + in ccorres_split_nothrow_callE) + apply (rule capFaultOnFailure_ccorres) + apply (rule lookupCap_ccorres) + apply simp+ + apply ceqv + apply (rename_tac epcap epcap') + apply clarsimp + apply ccorres_rewrite + apply csymbr + apply (simp add: cap_case_EndpointCap) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac val="from_bool (isEndpointCap epcap \ capEPCanSend epcap + \ (capEPCanGrant epcap \ capEPCanGrantReply epcap))" + and xf'=ret__int_' and R=\ and R'=UNIV in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce simp: from_bool_eq_if' cap_get_tag_isCap isCap_simps + ccap_relation_ep_helpers) + apply ceqv + apply clarsimp + apply (rule ccorres_Cond_rhs) + (* case: we send the IPC *) + apply clarsimp + apply (simp add: liftE_def bind_assoc) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac ccorres_split_nothrow_novcg) + apply (rule_tac P=\ and P'=invs' + and R="{s. + (cfault_rel (Some fault) + (seL4_Fault_lift(current_fault_' (globals s))) + (lookup_fault_lift(original_lookup_fault_' s)))}" + in threadSet_ccorres_lemma4) + apply vcg + apply (clarsimp simp: typ_heap_simps' rf_sr_tcb_update_twice) + apply (intro conjI allI impI) + apply (simp add: typ_heap_simps' rf_sr_def) + apply (rule rf_sr_tcb_update_no_queue2[unfolded rf_sr_def, simplified], + assumption+, (simp add: typ_heap_simps')+) + apply (rule ball_tcb_cte_casesI, simp+) + apply (simp add: ctcb_relation_def cthread_state_relation_def) + apply (case_tac "tcbState tcb", simp+) + apply (simp add: rf_sr_def) + apply (rule rf_sr_tcb_update_no_queue2[unfolded rf_sr_def, simplified], + assumption+, (simp add: typ_heap_simps' | simp only: hrs_mem_update_use_hrs_mem)+) + apply (rule ball_tcb_cte_casesI, simp+) + apply (clarsimp simp: typ_heap_simps') + apply (simp add: ctcb_relation_def cthread_state_relation_def) + apply (rule conjI) + apply (case_tac "tcbState tcb", simp+) + apply (simp add: cfault_rel_def) + apply (clarsimp) + apply (clarsimp simp: seL4_Fault_lift_def Let_def is_cap_fault_def + split: if_split_asm) + apply ceqv + apply csymbr + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: sendIPC_ccorres) + apply (ctac (no_vcg) add: ccorres_return_CE [unfolded returnOk_def comp_def]) + apply wp + apply (wpsimp wp: threadSet_invs_trivial) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_typ_at_lifts) + + apply (clarsimp simp: guard_is_UNIV_def) + apply (subgoal_tac "capEPBadge epcap && mask 64 = capEPBadge epcap") + apply (clarsimp simp: cap_get_tag_isCap isEndpointCap_def isCap_simps + ccap_relation_ep_helpers) + apply (drule cap_get_tag_isCap(4)[symmetric]) + apply (clarsimp simp: cap_get_tag_EndpointCap) + apply (clarsimp simp: case_bool_If) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply clarsimp + apply (clarsimp simp add: throwError_def return_def) + apply (rule conseqPre, vcg) + apply (clarsimp simp: EXCEPTION_FAULT_def EXCEPTION_NONE_def) + apply (simp add: cfault_rel2_def cfault_rel_def EXCEPTION_FAULT_def) + apply (simp add: seL4_Fault_CapFault_lift) + apply (simp add: lookup_fault_missing_capability_lift is_cap_fault_def) + apply clarsimp + apply vcg + apply clarsimp + apply (rule ccorres_split_throws) + apply (rule_tac P=\ and P'="{x. errstate x= err'}" in ccorres_from_vcg_throws) + apply clarsimp + apply (clarsimp simp add: throwError_def return_def) + apply (rule conseqPre, vcg) + apply (clarsimp simp: EXCEPTION_FAULT_def EXCEPTION_NONE_def) + apply (simp add: cfault_rel2_def cfault_rel_def EXCEPTION_FAULT_def) + apply (simp add: seL4_Fault_CapFault_lift is_cap_fault_def) + apply (erule lookup_failure_rel_fault_lift [rotated, unfolded EXCEPTION_NONE_def, simplified] + , assumption) + apply vcg + apply (clarsimp simp: inQ_def) + apply (rule_tac Q="\a b. invs' b \ st_tcb_at' simple' tptr b + \ sch_act_not tptr b \ valid_cap' a b" + and E="\ _. \" + in hoare_strengthen_postE) + apply (wp) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: valid_cap'_def pred_tcb_at')+ + apply (vcg exspec=lookupCap_modifies) + apply wpsimp+ + apply vcg + apply (clarsimp, vcg) + apply (rule conseqPre, vcg) + apply fastforce+ + done + +lemma handleFault_ccorres: + "ccorres dc xfdc (invs' and st_tcb_at' simple' t and sch_act_not t) + (UNIV \ {s. (cfault_rel (Some flt) (seL4_Fault_lift(current_fault_' (globals s))) + (lookup_fault_lift(current_lookup_fault_' (globals s))) )} + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t}) + hs (handleFault t flt) + (Call handleFault_'proc)" + apply (cinit lift: tptr_') + apply (simp add: catch_def) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_split_nothrow_novcg_case_sum) + apply (ctac (no_vcg) add: sendFaultIPC_ccorres) + apply ceqv + apply clarsimp + apply (rule ccorres_cond_empty) + apply (rule ccorres_return_Skip') + apply clarsimp + apply (rule ccorres_cond_univ) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) + apply (simp add: sendFaultIPC_def) + apply wp + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply clarsimp + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply (wp) + apply (simp add: guard_is_UNIV_def) + apply (simp add: guard_is_UNIV_def) + apply clarsimp + apply vcg + apply clarsimp + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: pred_tcb_at') + done + +(* FIXME: move *) +lemma length_CL_from_H [simp]: + "length_CL (mi_from_H mi) = msgLength mi" + by (simp add: mi_from_H_def) + +lemma getMRs_length: + "\\s. msgLength mi \ 120\ getMRs thread buffer mi + \\args s. if buffer = None then length args = min (unat n_msgRegisters) (unat (msgLength mi)) + else length args = unat (msgLength mi)\" + supply if_cong[cong] + apply (cases buffer) + apply (simp add: getMRs_def) + apply (rule hoare_pre, wp) + apply (rule asUser_const_rv) + apply simp + apply (wp mapM_length) + apply (simp add: min_def length_msgRegisters) + apply (clarsimp simp: n_msgRegisters_def) + apply (simp add: getMRs_def) + apply (rule hoare_pre, wp) + apply simp + apply (wp mapM_length asUser_const_rv mapM_length)+ + apply (clarsimp simp: length_msgRegisters) + apply (simp add: min_def split: if_splits) + apply (clarsimp simp: word_le_nat_alt) + apply (simp add: msgMaxLength_def msgLengthBits_def n_msgRegisters_def) + done + +lemma handleInvocation_ccorres: + "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and ct_active' and sch_act_simple) + (UNIV \ {s. isCall_' s = from_bool isCall} + \ {s. isBlocking_' s = from_bool isBlocking}) [] + (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" + apply (cinit' lift: isCall_' isBlocking_' + simp: whileAnno_def handleInvocation_def2) + apply (simp add: liftE_bindE del: Collect_const cong: call_ignore_cong) + apply (ctac(no_vcg) add: getCurThread_ccorres) + apply (ctac(no_vcg) add: getMessageInfo_ccorres) + apply (simp del: Collect_const cong: call_ignore_cong) + apply csymbr + apply (ctac(no_vcg) add: getRegister_ccorres) + apply (simp add: Syscall_H.syscall_def + liftE_bindE split_def bindE_bind_linearise + cong: call_ignore_cong + del: Collect_const) + apply (rule_tac ccorres_split_nothrow_case_sum) + apply (ctac add: capFaultOnFailure_ccorres + [OF lookupCapAndSlot_ccorres]) + apply ceqv + apply (simp add: ccorres_cond_iffs Collect_False + cong: call_ignore_cong + del: Collect_const) + apply (simp only: bind_assoc) + apply (ctac(no_vcg) add: lookupIPCBuffer_ccorres) + apply (simp add: liftME_def bindE_assoc del: Collect_const) + apply (simp add: bindE_bind_linearise del: Collect_const) + apply (rule_tac xf'="\s. (status_' s, + current_extra_caps_' (globals s))" + and ef'=fst and vf'=snd and es=errstate + in ccorres_split_nothrow_novcg_case_sum) + apply (rule ccorres_call, rule lookupExtraCaps_ccorres, simp+) + apply (rule ceqv_tuple2, ceqv, ceqv) + apply (simp add: returnOk_bind liftE_bindE + Collect_False + ccorres_cond_iffs ts_Restart_case_helper' + del: Collect_const cong: bind_cong) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, + rule_tac xf'="length___unsigned_long_'" + and r'="\rv rv'. unat rv' = length rv" + in ccorres_split_nothrow) + apply (rule ccorres_add_return2) + apply (rule ccorres_symb_exec_l) + apply (rule_tac P="\s. rvd \ Some 0 \ (if rvd = None then + length x = min (unat (n_msgRegisters)) + (unat (msgLength (messageInfoFromWord ret__unsigned_long))) + else + length x = (unat (msgLength (messageInfoFromWord ret__unsigned_long))))" + and P'=UNIV + in ccorres_from_vcg) + apply (clarsimp simp: return_def) + apply (rule conseqPre, vcg) + apply (clarsimp simp: word_less_nat_alt) + apply (rule conjI) + apply clarsimp + apply (case_tac rvd, clarsimp simp: option_to_ptr_def option_to_0_def min_def n_msgRegisters_def) + apply (clarsimp simp: option_to_0_def option_to_ptr_def) + apply clarsimp + apply (case_tac rvd, + clarsimp simp: option_to_0_def min_def option_to_ptr_def + n_msgRegisters_def + split: if_splits) + apply (clarsimp simp: option_to_0_def option_to_ptr_def) + apply wp + apply (wp getMRs_length) + apply simp + apply ceqv + apply csymbr + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_case_sum) + apply (ctac add: decodeInvocation_ccorres) + apply ceqv + apply (simp add: Collect_False exception_defs + replyOnRestart_def liftE_def bind_assoc + del: Collect_const) + apply (rule ccorres_move_c_guard_tcb) + apply (rule getThreadState_ccorres_foo) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply (rule_tac P="ret__unsigned_longlong = thread_state_to_tsType rvg" + in ccorres_gen_asm2) + apply (simp add: thread_state_to_tsType_eq_Restart from_bool_0 + del: Collect_const add: Collect_const[symmetric]) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: bind_assoc) + apply (ctac(no_vcg) add: replyFromKernel_success_empty_ccorres) + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (rule ccorres_return_CE[folded return_returnOk], simp+)[1] + apply wp+ + apply (rule hoare_strengthen_post, rule rfk_invs') + apply auto[1] + apply simp + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (rule ccorres_return_CE[folded return_returnOk], simp+)[1] + apply wp + apply simp + apply (rule ccorres_return_CE[folded return_returnOk], simp+)[1] + apply wpc + apply (simp add: syscall_error_rel_def from_bool_0 exception_defs + Collect_False ccorres_cond_iffs Collect_True + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: liftE_def Collect_const[symmetric] + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply simp + apply (ctac(no_vcg) add: replyFromKernel_error_ccorres) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_CE[folded return_returnOk], simp+)[1] + apply vcg + apply wp + apply simp + apply (rule ccorres_split_throws) + apply (rule ccorres_return_CE[folded return_returnOk], simp+)[1] + apply vcg + apply (simp add: cintr_def) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply (simp add: invocationCatch_def o_def) + apply (rule_tac Q="\rv'. invs' and tcb_at' rv" + and E="\ft. invs' and tcb_at' rv" + in hoare_strengthen_postE) + apply (wp hoare_split_bind_case_sumE hoare_drop_imps + setThreadState_nonqueued_state_update + ct_in_state'_set setThreadState_st_tcb + hoare_vcg_all_lift + | wpc | wps)+ + apply auto[1] + apply clarsimp + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) + apply (simp add: ThreadState_defs mask_def) + apply (simp add: typ_heap_simps) + apply (case_tac ts, simp_all add: cthread_state_relation_def)[1] + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) + apply (clarsimp simp add: intr_and_se_rel_def exception_defs + syscall_error_rel_def cintr_def + split: sum.split_asm) + apply (simp add: conj_comms) + apply (wp getMRs_sysargs_rel) + apply (simp add: ) + apply vcg + apply (simp add: ccorres_cond_iffs ccorres_seq_cond_raise + Collect_True Collect_False + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: ccorres_cond_iffs Collect_const[symmetric] + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: from_bool_0 liftE_def) + apply (ctac(no_vcg) add: handleFault_ccorres) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_CE[folded return_returnOk], simp+)[1] + apply vcg + apply wp + apply (simp add: from_bool_0 liftE_def) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_CE[folded return_returnOk], simp+)[1] + apply vcg + apply (simp add: ball_conj_distrib) + apply (wp lookupExtraCaps_excaps_in_mem + lec_eq[unfolded o_def] lec_derived'[unfolded o_def]) + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def + mi_from_H_def) + apply (clarsimp simp: guard_is_UNIV_def) + apply simp + apply (wp lookupIPCBuffer_Some_0) + apply (simp add: Collect_True liftE_def return_returnOk + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac P=\ in ccorres_cross_over_guard) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_when[where R=\]) + apply (simp add: from_bool_0 Collect_const_mem) + apply (ctac add: handleFault_ccorres) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_CE, simp+)[1] + apply vcg + apply wp + apply (simp add: guard_is_UNIV_def) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (simp, wp lcs_eq[unfolded o_def]) + apply clarsimp + apply (vcg exspec= lookupCapAndSlot_modifies) + apply simp + apply (wp getMessageInfo_less_4 getMessageInfo_le3 getMessageInfo_msgLength')+ + apply (simp add: msgMaxLength_def, wp getMessageInfo_msgLength')[1] + apply simp + apply wp + apply (clarsimp simp: Collect_const_mem) + apply (simp add: Kernel_C.msgInfoRegister_def AARCH64_H.msgInfoRegister_def + AARCH64.msgInfoRegister_def C_register_defs + Kernel_C.capRegister_def AARCH64_H.capRegister_def + AARCH64.capRegister_def) + apply (clarsimp simp: cfault_rel_def option_to_ptr_def) + apply (simp add: seL4_Fault_CapFault_lift is_cap_fault_def) + apply (frule lookup_failure_rel_fault_lift, assumption) + apply clarsimp + apply (clarsimp simp: ct_in_state'_def pred_tcb_at') + apply (auto simp: ct_in_state'_def sch_act_simple_def intro!: active_ex_cap' + elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] + done + +lemma ccorres_return_void_catchbrk: + "ccorres_underlying sr G r xf ar axf P P' hs + f return_void_C + \ ccorres_underlying sr G r xf ar axf P P' (catchbrk_C # hs) + f return_void_C" + apply (simp add: return_void_C_def catchbrk_C_def) + apply (rule ccorresI') + apply clarsimp + apply (erule exec_handlers_Seq_cases') + prefer 2 + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (erule exec_handlers.cases, simp_all) + prefer 2 + apply (auto elim!: exec_Normal_elim_cases)[1] + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (erule exec_Normal_elim_cases, simp_all) + apply (clarsimp elim!: exec_Normal_elim_cases) + apply (erule(4) ccorresE) + apply (rule EHAbrupt) + apply (fastforce intro: exec.intros) + apply assumption + apply clarsimp + apply (frule exec_handlers_less) + apply clarsimp + apply fastforce + done + +lemma real_cte_tcbCallerSlot: + "tcb_at' t s \ \ real_cte_at' (t + 2 ^ cte_level_bits * tcbCallerSlot) s" + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps' + cte_level_bits_def tcbCallerSlot_def) + apply (drule_tac x=t and y="t + a" for a in ps_clearD, assumption) + apply (rule le_neq_trans, simp_all)[1] + apply (erule is_aligned_no_wrap') + apply simp + apply (subst field_simps[symmetric], rule is_aligned_no_overflow3, assumption, simp_all) + done + +lemma handleReply_ccorres: + "ccorres dc xfdc + (\s. invs' s \ st_tcb_at' (\a. \ isReply a) (ksCurThread s) s \ sch_act_simple s) + UNIV + [] + (handleReply) + (Call handleReply_'proc)" + apply cinit + apply (rule ccorres_pre_getCurThread) + + apply (simp only: getThreadCallerSlot_def locateSlot_conv) + + + apply (rule_tac P="\s. thread=ksCurThread s \ invs' s \ is_aligned thread tcbBlockSizeBits" + and r'="\ a c. c = cte_Ptr a" + and xf'="callerSlot_'" and P'=UNIV in ccorres_split_nothrow) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def word_sle_def) + apply (frule is_aligned_neg_mask_eq) + apply (frule tcb_at_invs') + apply (simp add: mask_def tcbCallerSlot_def + cte_level_bits_def size_of_def + ptr_add_assertion_positive + tcb_cnode_index_defs rf_sr_ksCurThread + rf_sr_tcb_ctes_array_assertion2[THEN array_assertion_shrink_right]) + apply ceqv + + apply (simp del: Collect_const) + apply (rule ccorres_getSlotCap_cte_at) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (wpc, simp_all) + apply (rule ccorres_fail) + apply (simp add: ccap_relation_NullCap_iff cap_tag_defs) + apply (rule ccorres_split_throws) + apply (rule ccorres_Catch) + apply csymbr + apply (rule ccorres_cond_false) + apply (rule ccorres_cond_true) + apply simp + apply (rule ccorres_return_void_catchbrk) + apply (rule ccorres_return_void_C) + apply (vcg exspec=doReplyTransfer_modifies) + apply (rule ccorres_fail)+ + apply (wpc, simp_all) + apply (rule ccorres_fail) + apply (rule ccorres_split_throws) + apply (rule ccorres_Catch) + apply csymbr + apply (rule ccorres_cond_true) + apply (frule cap_get_tag_isCap_unfolded_H_cap(8)) + apply simp + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (frule cap_get_tag_ReplyCap) + apply (clarsimp simp: to_bool_def) + apply (csymbr, csymbr, csymbr) + apply simp + apply (rule ccorres_assert2) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg)) + apply (rule ccorres_return_void_catchbrk) + apply (rule ccorres_return_void_C) + apply wp + apply (vcg exspec=doReplyTransfer_modifies) + apply (rule ccorres_fail)+ + apply simp_all + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp')[1] + apply vcg + apply wp + apply vcg + apply clarsimp + apply (intro allI conjI impI, + simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs) + apply (rule tcb_aligned', rule tcb_at_invs', simp) + apply (auto simp: cte_wp_at_ctes_of valid_cap'_def + dest!: ctes_of_valid')[1] + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (simp add: real_cte_tcbCallerSlot[OF pred_tcb_at']) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (fastforce simp: ccap_relation_reply_helpers cap_get_tag_isCap isCap_simps)+ + done + +lemma deleteCallerCap_ccorres [corres]: + "ccorres dc xfdc + (\s. invs' s \ tcb_at' receiver s) + (UNIV \ {s. receiver_' s = tcb_ptr_to_ctcb_ptr receiver}) + [] + (deleteCallerCap receiver) + (Call deleteCallerCap_'proc)" + apply (cinit lift: receiver_') + apply (simp only: getThreadCallerSlot_def locateSlot_conv) + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ + apply (rule_tac P="\_. is_aligned receiver tcbBlockSizeBits" and r'="\ a c. cte_Ptr a = c" + and xf'="callerSlot_'" and P'=UNIV in ccorres_split_nothrow_novcg) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def word_sle_def) + apply (frule is_aligned_neg_mask_eq) + apply (simp add: mask_def tcbCallerSlot_def Kernel_C.tcbCaller_def + cte_level_bits_def size_of_def) + apply (drule ptr_val_tcb_ptr_mask2) + apply (simp add: mask_def objBits_defs) + apply ceqv + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_symb_exec_r) + apply (ctac add: cteDeleteOne_ccorres[where w="ucast cap_reply_cap"]) + apply vcg + apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def + gs_set_assn_Delete_cstate_relation[unfolded o_def]) + apply (wp | simp)+ + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp)+ + apply clarsimp + apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def + ghost_assertion_data_set_def) + apply (clarsimp simp: cte_wp_at_ctes_of cap_get_tag_isCap[symmetric] + cap_tag_defs tcb_cnode_index_defs word_sle_def + tcb_aligned') + done + +(* FIXME: MOVE to Corres_C.thy *) +lemma ccorres_trim_redundant_throw_break: + "\ccorres_underlying rf_sr \ arrel axf arrel axf G G' (SKIP # hs) a c; + \s f. axf (global_exn_var_'_update f s) = axf s \ + \ ccorres_underlying rf_sr \ r xf arrel axf G G' (SKIP # hs) + a (c;; Basic (global_exn_var_'_update (\_. Break));; THROW)" + apply - + apply (rule ccorres_trim_redundant_throw') + apply simp + apply simp + apply simp + done + +lemma invs_valid_objs_strengthen: + "invs' s \ valid_objs' s" by fastforce + +lemma option_to_ctcb_ptr_valid_ntfn: + "valid_ntfn' ntfn s ==> (option_to_ctcb_ptr (ntfnBoundTCB ntfn) = NULL) = (ntfnBoundTCB ntfn = None)" + apply (cases "ntfnBoundTCB ntfn", simp_all add: option_to_ctcb_ptr_def) + apply (clarsimp simp: valid_ntfn'_def tcb_at_not_NULL) + done + + +lemma deleteCallerCap_valid_ntfn'[wp]: + "\\s. valid_ntfn' x s\ deleteCallerCap c \\rv s. valid_ntfn' x s\" + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift hoare_vcg_ball_lift hoare_vcg_imp_lift + | simp add: valid_ntfn'_def split: ntfn.splits)+ + apply auto + done + +lemma hoare_vcg_imp_liftE: + "\\P'\ f \\rv s. \ P rv s\, \E\; \Q'\ f \Q\, \E\\ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \E\" + apply (simp add: validE_def valid_def split_def split: sum.splits) + done + + +lemma not_obj_at'_ntfn: + "(\obj_at' (P::Structures_H.notification \ bool) t s) = (\ typ_at' NotificationT t s \ obj_at' (Not \ P) t s)" + apply (simp add: obj_at'_real_def projectKOs typ_at'_def ko_wp_at'_def objBits_simps) + apply (rule iffI) + apply (clarsimp) + apply (case_tac ko) + apply (clarsimp)+ + done + +lemma handleRecv_ccorres: + notes rf_sr_upd_safe[simp del] + shows + "ccorres dc xfdc + (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s \ sch_act_sane s) + {s. isBlocking_' s = from_bool isBlocking} + [] + (handleRecv isBlocking) + (Call handleRecv_'proc)" + supply if_cong[cong] option.case_cong[cong] + apply (cinit lift: isBlocking_') + apply (rule ccorres_pre_getCurThread) + apply (ctac) + apply (simp add: catch_def) + apply (simp add: capFault_bindE) + apply (simp add: bindE_bind_linearise) + apply (rule_tac xf'=lu_ret___struct_lookupCap_ret_C_' + in ccorres_split_nothrow_case_sum) + apply (rule capFaultOnFailure_ccorres) + apply (ctac add: lookupCap_ccorres) + apply ceqv + apply clarsimp + apply (rule ccorres_Catch) + apply csymbr + apply (simp add: cap_get_tag_isCap del: Collect_const) + apply (clarsimp simp: cap_case_EndpointCap_NotificationCap + capFaultOnFailure_if_case_sum) + apply (rule ccorres_cond_both' [where Q=\ and Q'=\]) + apply clarsimp + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (simp add: case_bool_If capFaultOnFailure_if_case_sum) + apply (rule ccorres_if_cond_throws_break2 [where Q=\ and Q'=\]) + apply clarsimp + apply (simp add: cap_get_tag_isCap[symmetric] cap_get_tag_EndpointCap + del: Collect_const) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: capFaultOnFailure_def rethrowFailure_def + handleE'_def throwError_def) + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_add_return2) + apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" + and Q="\_ _. True" and Q'="\_ _. UNIV"]) + apply (ctac add: handleFault_ccorres) + apply simp+ + apply ceqv + apply (rule ccorres_break_return) + apply simp+ + apply wp + apply (vcg exspec=handleFault_modifies) + + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + + apply (simp add: liftE_bind) + apply (ctac) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) + + apply (wp hoare_vcg_all_lift) + apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) + apply (clarsimp dest!: rf_sr_ksCurThread) + apply simp + apply clarsimp + apply (vcg exspec=handleFault_modifies) + + apply (clarsimp simp: case_bool_If capFaultOnFailure_if_case_sum capFault_bindE) + apply (simp add: liftE_bindE bind_bindE_assoc bind_assoc) + apply (rule ccorres_cond_both' [where Q=\ and Q'=\]) + apply clarsimp + + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rename_tac thread epCPtr rv rva ntfnptr) + apply (rule_tac P="valid_cap' rv" in ccorres_cross_over_guard) + apply (simp only: capFault_injection injection_handler_If injection_liftE + injection_handler_throwError if_to_top_of_bind) + apply csymbr + apply (rule ccorres_abstract_cleanup) + apply csymbr + apply csymbr + apply (rule ccorres_if_lhs) + + apply (rule ccorres_pre_getNotification) + apply (rename_tac ntfn) + apply (rule_tac Q="valid_ntfn' ntfn and (\s. thread = ksCurThread s)" + and Q'="\s. ret__unsigned_longlonga = ptr_val (option_to_ctcb_ptr (ntfnBoundTCB ntfn))" + in ccorres_if_cond_throws_break2) + apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_NotificationCap + option_to_ctcb_ptr_valid_ntfn rf_sr_ksCurThread) + apply (auto simp: option_to_ctcb_ptr_def)[1] + apply (rule ccorres_rhs_assoc)+ + + apply (simp add: capFaultOnFailure_def rethrowFailure_def + handleE'_def throwError_def) + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_add_return2) + apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" + and Q="\_ _. True" and Q'="\_ _. UNIV"]) + apply (ctac add: handleFault_ccorres) + apply simp+ + apply ceqv + apply (rule ccorres_break_return) + apply simp+ + apply wp + apply (vcg exspec=handleFault_modifies) + + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + + apply (simp add: liftE_bind) + apply (ctac add: receiveSignal_ccorres) + apply clarsimp + apply (vcg exspec=handleFault_modifies) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_split_throws) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: capFaultOnFailure_def rethrowFailure_def + handleE'_def throwError_def) + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_add_return2) + apply (ctac add: handleFault_ccorres) + apply (rule ccorres_break_return[where P=\ and P'=UNIV]) + apply simp+ + apply wp + apply (vcg exspec=handleFault_modifies) + + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + apply (vcg exspec=handleFault_modifies) + apply (simp add: capFaultOnFailure_def rethrowFailure_def + handleE'_def throwError_def) + + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (ctac add: handleFault_ccorres) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + + apply clarsimp + apply (rule ccorres_add_return2) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_cross_over_guard[where P=\]) + apply (rule ccorres_symb_exec_r) + apply (ctac add: handleFault_ccorres) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_void_C) + apply vcg + apply wp + apply (vcg exspec=handleFault_modifies) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_upd_safe) + apply (wp) + apply clarsimp + apply (rename_tac thread epCPtr) + apply (rule_tac Q'="(\rv s. invs' s \ st_tcb_at' simple' thread s + \ sch_act_sane s \ thread = ksCurThread s + \ valid_cap' rv s)" in hoare_strengthen_postE_R[rotated]) + apply (intro conjI impI allI; clarsimp simp: sch_act_sane_def) + apply (fastforce dest: obj_at_valid_objs'[OF _ invs_valid_objs'] ko_at_valid_ntfn') + apply wp + apply clarsimp + apply (vcg exspec=isStopped_modifies exspec=lookupCap_modifies) + + apply wp + apply clarsimp + apply vcg + + apply (clarsimp simp add: sch_act_sane_def) + apply (simp add: cap_get_tag_isCap[symmetric] del: rf_sr_upd_safe) + apply (simp add: Kernel_C.capRegister_def AARCH64_H.capRegister_def ct_in_state'_def + AARCH64.capRegister_def C_register_defs + tcb_at_invs') + apply (frule invs_valid_objs') + apply (frule tcb_aligned'[OF tcb_at_invs']) + apply clarsimp + apply (intro conjI impI allI) + apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift + lookup_fault_missing_capability_lift is_cap_fault_def)+ + apply (clarsimp simp: cap_get_tag_NotificationCap) + apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt) + apply (clarsimp simp: cnotification_relation_def Let_def) + apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift + lookup_fault_missing_capability_lift is_cap_fault_def)+ + apply (clarsimp simp: cap_get_tag_NotificationCap) + apply (simp add: ccap_relation_def to_bool_def) + apply (clarsimp simp: cap_get_tag_NotificationCap valid_cap'_def) + apply (drule obj_at_ko_at', clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift + lookup_fault_missing_capability_lift is_cap_fault_def)+ + apply (case_tac w, clarsimp+) + done + +lemma handleYield_ccorres: + "ccorres dc xfdc + (invs' and ct_active') + UNIV + [] + (handleYield) + (Call handleYield_'proc)" + apply cinit + apply (rule ccorres_pre_getCurThread) + apply (ctac add: tcbSchedDequeue_ccorres) + apply (ctac add: tcbSchedAppend_ccorres) + apply (ctac add: rescheduleRequired_ccorres) + apply (wp weak_sch_act_wf_lift_linear tcbSchedAppend_valid_objs') + apply (vcg exspec= tcbSchedAppend_modifies) + apply (wp weak_sch_act_wf_lift_linear) + apply (vcg exspec= tcbSchedDequeue_modifies) + apply (clarsimp simp: tcb_at_invs' invs_valid_objs' + valid_objs'_maxPriority valid_objs'_maxDomain) + apply (auto simp: obj_at'_def st_tcb_at'_def ct_in_state'_def valid_objs'_maxDomain) + done + + +lemma getIRQState_sp: + "\P\ getIRQState irq \\rv s. rv = intStateIRQTable (ksInterruptState s) irq \ P s\" + apply (simp add: getIRQState_def getInterruptState_def) + apply wp + apply simp + done + +lemma ccorres_pre_getIRQState: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. irq \ ucast Kernel_C.maxIRQ \ P (intStateIRQTable (ksInterruptState s) irq) s) + {s. \rv. index (intStateIRQTable_' (globals s)) (unat irq) = irqstate_to_C rv \ s \ P' rv } + hs (getIRQState irq >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply (simp add: getIRQState_def getInterruptState_def) + apply wp + apply simp + apply (rule getIRQState_sp) + apply (simp add: getIRQState_def getInterruptState_def) + apply assumption + prefer 2 + apply (rule ccorres_guard_imp) + apply (rule cc) + apply simp + apply assumption + apply clarsimp + apply (erule allE) + apply (erule impE) + prefer 2 + apply assumption + apply (clarsimp simp: rf_sr_def cstate_relation_def + Let_def cinterrupt_relation_def) + done + +(* FIXME: move *) +lemma ccorres_ntfn_cases: + assumes P: "\p b send d. cap = NotificationCap p b send d \ ccorres r xf (P p b send d) (P' p b send d) hs (a cap) (c cap)" + assumes Q: "\isNotificationCap cap \ ccorres r xf (Q cap) (Q' cap) hs (a cap) (c cap)" + shows + "ccorres r xf (\s. (\p b send d. cap = NotificationCap p b send d \ P p b send d s) \ + (\isNotificationCap cap \ Q cap s)) + ({s. \p b send d. cap = NotificationCap p b send d \ s \ P' p b send d} \ + {s. \isNotificationCap cap \ s \ Q' cap}) + hs (a cap) (c cap)" + apply (cases "isNotificationCap cap") + apply (simp add: isCap_simps) + apply (elim exE) + apply (rule ccorres_guard_imp) + apply (erule P) + apply simp + apply simp + apply (rule ccorres_guard_imp) + apply (erule Q) + apply clarsimp + apply clarsimp + done + +(* FIXME: generalise the one in Interrupt_C *) +lemma getIRQSlot_ccorres2: + "ccorres ((=) \ Ptr) slot_' + \ UNIV hs + (getIRQSlot irq) (\slot :== CTypesDefs.ptr_add intStateIRQNode_Ptr (uint (ucast irq :: machine_word)))" + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getIRQSlot_def liftM_def getInterruptState_def + locateSlot_conv) + apply (simp add: simpler_gets_def bind_def return_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cinterrupt_relation_def size_of_def + cte_level_bits_def mult.commute mult.left_commute ucast_nat_def) + done + +lemma getIRQSlot_ccorres3: + "(\rv. ccorresG rf_sr \ r xf (P rv) (P' rv) hs (f rv) c) \ + ccorresG rf_sr \ r xf + (\s. P (intStateIRQNode (ksInterruptState s) + 2 ^ cte_level_bits * of_nat (unat irq)) s) + {s. s \ P' (ptr_val (CTypesDefs.ptr_add intStateIRQNode_Ptr (uint (ucast irq :: machine_word))))} hs + (getIRQSlot irq >>= f) c" + apply (simp add: getIRQSlot_def locateSlot_conv liftM_def getInterruptState_def) + apply (intro ccorres_symb_exec_l'[OF _ _gets_sp] empty_fail_gets gets_inv) + apply (rule ccorres_guard_imp2, assumption) + by (clarsimp simp: cte_C_size cte_level_bits_def ucast_nat_def + rf_sr_def cstate_relation_def Let_def cinterrupt_relation_def + elim!: rsubst[of "\t. _ \ P' t"]) + +lemma scast_maxIRQ_less_eq: + fixes b :: irq + shows "(Kernel_C.maxIRQ 64) Kernel_C.maxIRQ < UCAST(irq_len \ 64) irq \ scast Kernel_C.maxIRQ < irq" + apply (clarsimp simp: scast_def Kernel_C.maxIRQ_def) + apply (subgoal_tac "LENGTH(irq_len) \ LENGTH(64)") + apply (drule less_ucast_ucast_less[where x= "0x17F" and y="irq"]) + by (simp)+ + +lemma validIRQcastingLess: + "Kernel_C.maxIRQ AARCH64.maxIRQ < b" + by (simp add: Platform_maxIRQ scast_maxIRQ_is_less is_up_def target_size source_size) + +lemma scast_maxIRQ_is_not_less: + fixes b :: irq + shows "\ Kernel_C.maxIRQ \ (scast Kernel_C.maxIRQ < b)" + by (simp add: scast_maxIRQ_less_eq) + +lemma ucast_maxIRQ_is_not_less: + "\ (SCAST(32 signed \ 64) Kernel_C.maxIRQ < UCAST(irq_len \ 64) irq) \ \ (scast Kernel_C.maxIRQ < irq)" + apply (clarsimp simp: scast_def Kernel_C.maxIRQ_def) + apply (subgoal_tac "LENGTH(irq_len) \ LENGTH(64)") + prefer 2 + apply simp + apply (erule notE) + using ucast_up_mono by fastforce + +lemma get_gic_vcpu_ctrl_misr_invs'[wp]: + "valid invs' (doMachineOp get_gic_vcpu_ctrl_misr) (\_. invs')" + by (simp add: get_gic_vcpu_ctrl_misr_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma get_gic_vcpu_ctrl_eisr1_invs'[wp]: + "valid invs' (doMachineOp get_gic_vcpu_ctrl_eisr1) (\_. invs')" + by (simp add: get_gic_vcpu_ctrl_eisr1_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma get_gic_vcpu_ctrl_eisr0_invs'[wp]: + "valid invs' (doMachineOp get_gic_vcpu_ctrl_eisr0) (\_. invs')" + by (simp add: get_gic_vcpu_ctrl_eisr0_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma virq_to_H_arrayp[simp]: + "virq_to_H (virq_C (ARRAY _. v)) = v" + by (simp add: virq_to_H_def) + +lemma virq_virq_active_set_virqEOIIRQEN_spec': + "\s. \ \ \s. virq_get_tag \virq = scast virq_virq_active\ + Call virq_virq_active_set_virqEOIIRQEN_'proc + \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v64) \" + apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) + apply vcg + apply (clarsimp simp: virq_to_H_def AARCH64_A.virqSetEOIIRQEN_def) + apply (case_tac virq) + apply clarsimp + apply (rule array_ext) + apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def split: if_split) + done + +lemma virq_virq_invalid_set_virqEOIIRQEN_spec': + "\s. \ \ \s. virq_get_tag \virq = scast virq_virq_invalid\ + Call virq_virq_invalid_set_virqEOIIRQEN_'proc + \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v64) \" + apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) + apply vcg + apply (clarsimp simp: virq_to_H_def AARCH64_A.virqSetEOIIRQEN_def) + apply (case_tac virq) + apply clarsimp + apply (rule array_ext) + apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def split: if_split) + done + +lemma virq_virq_pending_set_virqEOIIRQEN_spec': + "\s. \ \ \s. virq_get_tag \virq = scast virq_virq_pending\ + Call virq_virq_pending_set_virqEOIIRQEN_'proc + \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v64) \" + apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) + apply vcg + apply (clarsimp simp: virq_to_H_def AARCH64_A.virqSetEOIIRQEN_def) + apply (case_tac virq) + apply clarsimp + apply (rule array_ext) + apply (clarsimp simp: virq_get_tag_def virq_tag_defs mask_def virq_type_def split: if_split) + done + +lemma gic_vcpu_num_list_regs_cross_over: + "\ of_nat (armKSGICVCPUNumListRegs (ksArchState s)) = gic_vcpu_num_list_regs_' t; + valid_arch_state' s \ + \ gic_vcpu_num_list_regs_' t \ 0x3F" + apply (drule sym, simp) + apply (clarsimp simp: valid_arch_state'_def max_armKSGICVCPUNumListRegs_def) + apply (clarsimp simp: word_le_nat_alt unat_of_nat) + done + +lemma virqSetEOIIRQEN_id: + "\ virq_get_tag (virq_C (ARRAY _. idx)) \ scast virq_virq_active; + virq_get_tag (virq_C (ARRAY _. idx)) \ scast virq_virq_pending; + virq_get_tag (virq_C (ARRAY _. idx)) \ scast virq_virq_invalid \ + \ virqSetEOIIRQEN idx 0 = idx" + apply (clarsimp simp: AARCH64_A.virqSetEOIIRQEN_def virq_get_tag_def virq_tag_defs mask_def + virq_type_def + split: if_split) + apply (rule_tac x="idx >> 28" in two_bits_cases; simp) + done + +lemma vgicUpdateLR_ccorres_armHSCurVCPU: + "\ v' = v ; n' = n ; n \ 63 \ \ + ccorres dc xfdc + (\s. (\active. armHSCurVCPU (ksArchState s) = Some (vcpuptr,active) \ + vcpu_at' vcpuptr s)) + UNIV hs + (vgicUpdateLR vcpuptr n v) + (Guard C_Guard {s. s \\<^sub>c armHSCurVCPU_' (globals s)} + (Basic_heap_update + (\s. vgic_lr_C_Ptr &(vgic_C_Ptr &((armHSCurVCPU_' (globals s))\[''vgic_C''])\[''lr_C''])) + (\s. Arrays.update + (h_val (hrs_mem (t_hrs_' (globals s))) + (vgic_lr_C_Ptr &(vgic_C_Ptr &((armHSCurVCPU_' (globals s))\[''vgic_C''])\[''lr_C'']))) + n' (virq_C (ARRAY _. v')))))" + supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] + apply (rule ccorres_guard_imp) + apply (rule_tac P="\s. armHSCurVCPU (ksArchState s) \ None + \ vcpu_at' (fst (the (armHSCurVCPU (ksArchState s)))) s" + and P'=\ in ccorres_move_Guard) + apply clarsimp + apply (frule rf_sr_ksArchState_armHSCurVCPU) + apply (clarsimp simp: cur_vcpu_relation_def split: option.splits) + apply (fastforce simp: move_c_guard_vcpu[rule_format, simplified]) + apply (simp add: vgicUpdate_def vcpuUpdate_def vgicUpdateLR_def) + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (rule_tac P="\s. armHSCurVCPU (ksArchState s) \ None + \ (fst (the (armHSCurVCPU (ksArchState s)))) = vcpuptr" + in ccorres_cross_over_guard) + apply (rule_tac P="ko_at' vcpu vcpuptr" + and P'="{s. armHSCurVCPU_' (globals s) = vcpu_Ptr vcpuptr }" in setObject_ccorres_helper + , rule conseqPre, vcg) + apply clarsimp + apply (rule cmap_relationE1[OF cmap_relation_vcpu] + ; (clarsimp simp: objBits_simps bit_simps)?) + apply (assumption, erule ko_at_projectKO_opt) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def update_vcpu_map_to_vcpu + typ_heap_simps' cpspace_relation_def update_vcpu_map_tos) + apply (erule (1) cmap_relation_updI + ; clarsimp simp: cvcpu_relation_regs_def cvgic_relation_def cvcpu_vppi_masked_relation_def + ; (rule refl)?) + apply (fastforce simp: virq_to_H_def split: if_split) + apply (clarsimp simp add: objBits_simps bit_simps)+ + apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def split: option.splits) + done + +(* folded calculation of eisr used in vgicMaintenance *) +definition + eisr_calc :: "32 word \ 32 word \ nat" +where + "eisr_calc eisr0 eisr1 \ if eisr0 \ 0 then word_ctz eisr0 else word_ctz eisr1 + 32" + +lemma ccorres_vgicMaintenance: + notes Collect_const[simp del] + notes virq_virq_active_set_virqEOIIRQEN_spec = virq_virq_active_set_virqEOIIRQEN_spec' + notes virq_virq_invalid_set_virqEOIIRQEN_spec = virq_virq_invalid_set_virqEOIIRQEN_spec' + notes virq_virq_pending_set_virqEOIIRQEN_spec = virq_virq_pending_set_virqEOIIRQEN_spec' + shows + "ccorres dc xfdc + (\s. invs' s \ sch_act_not (ksCurThread s) s) + UNIV hs + vgicMaintenance (Call VGICMaintenance_'proc)" + (is "ccorres _ _ ?PRE _ _ _ _") +proof - + + (* There are multiple helper lemmas to cover all the different word casts in C. The "32 word" + comes from actual machine register sizes. "32 signed word" comes from integer constants, + "64 word" is machine_word, and "64 signed word" comes from the return value of ctzl. + The recurring 0x20/32 usually refer to the number of bits in a 32-bit word. *) + + have scast_mw_int_ctz_eq: + "SCAST(machine_word_len \ int_literal_len) (of_nat (word_ctz x)) = of_nat (word_ctz x)" + for x :: "32 word" + using word_ctz_le[of x] + by (subst scast_of_nat_small; simp add: word_ctz_le) + + have scast_int_mw_ctz_eq: + "SCAST(int_literal_len \ machine_word_len) (of_nat (word_ctz x)) = of_nat (word_ctz x)" + for x :: "32 word" + using word_ctz_le[of x] + by (subst scast_of_nat_small; simp add: word_ctz_le) + + have scast_s64_int_ctz_eq: + "SCAST(64 signed \ int_literal_len) (word_of_nat (word_ctz x)) = word_of_nat (word_ctz x)" + for x :: "32 word" + using word_ctz_le[of x] + by (subst scast_of_nat_small; simp add: word_ctz_le) + + have ctz_add_0x20_mw_int_eq: + "SCAST(machine_word_len \ int_literal_len) (word_of_nat (word_ctz x) + 0x20) + = word_of_nat (word_ctz x) + 0x20" for x :: "32 word" + proof - + have n32: "(0x20 :: machine_word) = of_nat 32" by simp + + show ?thesis + using word_ctz_le[of x] + apply (subst n32, subst of_nat_add[symmetric]) + apply (subst scast_of_nat_small; simp) + done + qed + + have ctz_add_0x20_unat_mw_eq: + "unat (word_of_nat (word_ctz x) + 0x20 :: machine_word) = word_ctz x + 0x20" + for x :: "32 word" + proof - + have n32: "(0x20 :: 64 word) = of_nat 32" by simp + + show ?thesis + using word_ctz_le[of x] + apply (subst n32, subst of_nat_add[symmetric]) + apply (subst unat_of_nat_eq; simp) + done + qed + + have ctz_add_0x20_int_mw_eq: + "SCAST(int_literal_len \ machine_word_len) (word_of_nat (word_ctz x) + 0x20) + = word_of_nat (word_ctz x) + 0x20" for x :: "32 word" + proof - + have n32: "(0x20 :: int_word) = of_nat 32" by simp + + show ?thesis + using word_ctz_le[of x] + apply (subst n32, subst of_nat_add[symmetric]) + apply (subst scast_of_nat_small; simp) + done + qed + + have ctz_add_0x20_s64_int_eq: + "SCAST(64 signed \ int_literal_len) (word_of_nat (word_ctz x) + 0x20) + = word_of_nat (word_ctz x) + 0x20" for x :: "32 word" + proof - + have n32: "(0x20 :: 64 signed word) = of_nat 32" by simp + + show ?thesis + using word_ctz_le[of x] + apply (subst n32, subst of_nat_add[symmetric]) + apply (subst scast_of_nat_small; simp) + done + qed + + have sint_s64_ctz_ge_0: + "0 \ sint (word_of_nat (word_ctz x) :: 64 signed word)" for x :: "32 word" + using word_ctz_le[of x] + by (simp add: signed_of_nat bit_iff_odd) + + have sint_s64_ctz_le_32: + "sint (word_of_nat (word_ctz x) :: 64 signed word) \ 32" for x :: "32 word" + using word_ctz_le[of x] + by (simp add: signed_of_nat signed_take_bit_int_eq_self) + + have sint_int_ctz_ge_0: + "0 \ sint (word_of_nat (word_ctz x) :: int_word)" for x :: "32 word" + using word_ctz_le[of x] + by (simp add: signed_of_nat bit_iff_odd) + + have sint_int_ctz_less_32: + "x \ 0 \ sint (word_of_nat (word_ctz x) :: int_word) < 32" for x :: "32 word" + by (drule word_ctz_less, simp add: signed_of_nat signed_take_bit_int_eq_self) + + have unat_of_nat_ctz_plus_32s: + "unat (of_nat (word_ctz w) + (0x20 :: int_word)) = word_ctz w + 32" for w :: "32 word" + apply (subst unat_add_lem' ; clarsimp simp: unat_of_nat_ctz_smw) + using word_ctz_le[where w=w, simplified] by (auto simp: unat_of_nat_eq) + + have unat_of_nat_ctz_plus_32: + "unat (of_nat (word_ctz w) + (0x20 :: 32 word)) = word_ctz w + 32" for w :: "32 word" + apply (subst unat_add_lem' ; clarsimp simp: unat_of_nat_ctz_mw) + using word_ctz_le[where w=w, simplified] by (auto simp: unat_of_nat_eq) + + have eisr_calc_le: + "eisr0 = 0 \ eisr1 \ 0 + \ eisr_calc eisr0 eisr1 \ 63" + for eisr0 and eisr1 + using word_ctz_le[where w=eisr0] word_ctz_less[where w=eisr1] + by (clarsimp simp: eisr_calc_def split: if_splits) + + have of_nat_word_ctz_0x21helper: + "0x21 + word_of_nat (word_ctz w) \ (0 :: int_word)" for w :: "32 word" + apply (subst unat_arith_simps, simp) + apply (subst unat_add_lem'; clarsimp simp: unat_of_nat_ctz_smw) + using word_ctz_le[where w=w, simplified] + by simp + + show ?thesis + supply if_cong[cong] + apply (cinit) + apply (rule ccorres_pre_getCurVCPU, rename_tac vcpuPtr_opt) + apply wpc + (* no current vcpu *) + apply (rule ccorres_cond_true_seq) + apply ccorres_rewrite + apply (rule ccorres_return_void_C) + (* have current vcpu *) + apply clarsimp + apply (rename_tac vcpuPtr active) + apply wpfix + apply wpc + (* handle inactive current vcpu first *) + prefer 2 + apply clarsimp + apply (rule ccorres_cond_true_seq) + apply ccorres_rewrite + apply (rule ccorres_return_void_C) + (* have active current vcpu, handle VGICMaintenance *) + apply clarsimp + apply (rule ccorres_cond_false_seq) + apply ccorres_rewrite + + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_eisr0_ccorres) + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_eisr1_ccorres) + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_misr_ccorres) + apply (rule ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState[simplified comp_def], + rename_tac num_list_regs) + apply clarsimp + apply (rule ccorres_Cond_rhs_Seq ; (clarsimp simp: vgicHCREN_def)) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (fold eisr_calc_def) + apply (rule_tac xf'=irq_idx_' + and val="if eisr0 \ 0 + then of_nat (word_ctz eisr0) + else (if eisr1 \ 0 then of_nat (word_ctz eisr1 + 32) else -1)" + and R=\ and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply clarsimp + apply (rule conseqPre, vcg) + + + subgoal by (clarsimp simp: ucast_up_preserves_not0 word_ctz_upcast_id_32_64 scast_s64_int_ctz_eq + ctz_add_0x20_s64_int_eq sint_s64_ctz_ge_0 + order.trans[OF sint_s64_ctz_le_32]) + + apply ceqv + apply clarsimp + apply (simp add: if_to_top_of_bind) + apply (rule_tac C'="{s. eisr0 = 0 \ eisr1 = 0 + \ num_list_regs \ eisr_calc eisr0 eisr1}" + and Q="\s. num_list_regs \ 63" + and Q'="{s. gic_vcpu_num_list_regs_' (globals s) = of_nat num_list_regs}" + in ccorres_rewrite_cond_sr_Seq) + apply clarsimp + subgoal for _ eisr0 eisr1 + using word_ctz_not_minus_1[where w=eisr1] word_ctz_not_minus_1[where w=eisr0] + by (clarsimp split: if_splits simp: eisr_calc_def word_le_nat_alt unat_of_nat_eq + of_nat_eq_signed_scast of_nat_word_ctz_0x21helper + ctz_add_0x20_int_mw_eq scast_int_mw_ctz_eq + ctz_add_0x20_unat_mw_eq less_trans[OF word_ctz_less]) + apply (rule ccorres_Cond_rhs_Seq) + + (* check if current thread is runnable, if so handle fault *) + (* NOTE: this block repeats three times in the proof of ccorres_vgicMaintenance *) + apply clarsimp + apply (rule ccorres_inst[where P="?PRE" and P'=UNIV]) + subgoal for _ _ eisr0 eisr1 + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_getCurThread, rename_tac cthread) + apply (rule_tac P="tcb_at' cthread" in ccorres_cross_over_guard) + apply (rule ccorres_symb_exec_r) + apply (ctac) (* isRunnable *) + apply (clarsimp simp: when_def to_bool_def) + apply (rule ccorres_cond[where R=\], simp) + apply (ctac add: handleFault_ccorres) + apply (rule ccorres_return_Skip) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (clarsimp simp: pred_tcb_at'_def tcb_runnable_imp_simple tcb_at_invs') + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: cfault_rel_def seL4_Fault_VGICMaintenance_lift_def + seL4_Fault_lift_def seL4_Fault_tag_defs is_cap_fault_def Let_def + eisr_calc_def mask_eq_iff + split: if_splits + ; fastforce simp: uint_nat unat_of_nat_ctz_mw + dest: word_ctz_less[where w=eisr1] word_ctz_less[where w=eisr0]) + done + + apply (simp only:) (* rewrite if condition to False *) + apply (clarsimp simp: bind_assoc) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: get_gic_vcpu_ctrl_lr_ccorres) + apply (rename_tac virq virq') + apply clarsimp + + apply (prop_tac "(if eisr0 \ 0 then of_nat (word_ctz eisr0) + else if eisr1 \ 0 then of_nat (word_ctz eisr1) + 32 else - 1) + = (of_nat (eisr_calc eisr0 eisr1) :: (machine_word_len sword))") + apply (fastforce split: if_splits simp: eisr_calc_def) + + (* getting type and setting EOIIRQEN for valid virq types is captured by virqSetEOIIRQEN *) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=virq_' + and val="virq_C (ARRAY _. AARCH64_H.virqSetEOIIRQEN virq 0)" + and R=\ and R'="{s. virq_' s = virq_C (ARRAY _. virq)}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce simp: virqSetEOIIRQEN_id[simplified]) + apply ceqv + apply (ctac add: set_gic_vcpu_ctrl_lr_ccorres) + apply clarsimp + + (* we already know we have a current active vcpu from the haskell *) + (* FIXME: if we already know we have an active vcpu, there's no point having the C + code check for it *) + apply (rule_tac Q="\s. (armHSCurVCPU \ ksArchState) s = Some (vcpuPtr, True)" + and Q'=UNIV and C'=UNIV + in ccorres_rewrite_cond_sr_Seq) + apply (solves \clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU + simp: cur_vcpu_relation_def\) + apply ccorres_rewrite + apply (rule ccorres_split_nothrow) (* updating vgic LR of current vcpu *) + (* active *) + apply (rule ccorres_move_const_guards)+ + apply (rule vgicUpdateLR_ccorres_armHSCurVCPU ; clarsimp simp: word_ctz_le) + apply (fastforce dest: word_ctz_less + simp: eisr_calc_def unat_of_nat_ctz_smw unat_of_nat_ctz_plus_32s) + apply (erule eisr_calc_le) + apply ceqv + + (* check if current thread is runnable, if so handle fault *) + apply clarsimp + apply (rule ccorres_inst[where P="?PRE" and P'=UNIV]) + subgoal for _ _ eisr0 eisr1 + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_getCurThread, rename_tac cthread) + apply (rule_tac P="tcb_at' cthread" in ccorres_cross_over_guard) + apply (rule ccorres_symb_exec_r) + apply (ctac) (* isRunnable *) + apply (clarsimp simp: when_def to_bool_def) + apply (rule ccorres_cond[where R=\], simp) + apply (ctac add: handleFault_ccorres) + apply (rule ccorres_return_Skip) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (clarsimp simp: pred_tcb_at'_def tcb_runnable_imp_simple tcb_at_invs') + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: cfault_rel_def seL4_Fault_VGICMaintenance_lift_def + seL4_Fault_lift_def seL4_Fault_tag_defs is_cap_fault_def Let_def + eisr_calc_def and_mask_eq_iff_le_mask word_le_nat_alt + ctz_add_0x20_int_mw_eq scast_int_mw_ctz_eq ctz_add_0x20_unat_mw_eq + split: if_splits; + fastforce simp: unat_of_nat_eq mask_def + dest!: word_ctz_less[where w=eisr0] word_ctz_less[where w=eisr1]) + done + apply wpsimp + apply wps + apply wpsimp + apply clarsimp + apply vcg + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_all_lift hoare_vcg_imp_lift' + | assumption (* schematic asm *) + | clarsimp simp: conj_comms cong: conj_cong + | wps)+ + apply (vcg exspec=set_gic_vcpu_ctrl_lr_modifies) + apply clarsimp + apply (vcg exspec=set_gic_vcpu_ctrl_lr_modifies) + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_all_lift hoare_vcg_imp_lift' + | wps)+ + apply (vcg exspec=get_gic_vcpu_ctrl_lr_modifies) + apply clarsimp + apply vcg + + (* check if current thread is runnable, if so handle fault *) + apply (rule ccorres_inst[where P="?PRE" and P'=UNIV]) + subgoal for _ _ eisr0 eisr1 + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_getCurThread, rename_tac cthread) + apply (rule_tac P="tcb_at' cthread" in ccorres_cross_over_guard) + apply (rule ccorres_symb_exec_r) + apply (ctac) (* isRunnable *) + apply (clarsimp simp: when_def to_bool_def) + apply (rule ccorres_cond[where R=\], simp) + apply (ctac add: handleFault_ccorres) + apply (rule ccorres_return_Skip) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (clarsimp simp: pred_tcb_at'_def tcb_runnable_imp_simple tcb_at_invs') + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: cfault_rel_def seL4_Fault_VGICMaintenance_lift_def + seL4_Fault_lift_def seL4_Fault_tag_defs is_cap_fault_def Let_def + eisr_calc_def mask_eq_iff + split: if_splits + ; fastforce simp: uint_nat unat_of_nat_ctz_mw + dest: word_ctz_less[where w=eisr1] word_ctz_less[where w=eisr0]) + done + + (* clean up get_gic_vcpu_ctrl_misr postcondition *) + apply (wp hoare_vcg_all_lift) + apply (rule_tac Q="\_ s. ?PRE s \ armHSCurVCPU (ksArchState s) = Some (vcpuPtr, active)" in hoare_post_imp) + apply clarsimp + subgoal for _ _ eisr0 eisr1 + apply (clarsimp simp: invs'_HScurVCPU_vcpu_at' valid_arch_state'_def max_armKSGICVCPUNumListRegs_def dest!: invs_arch_state') + using sint_ctz[where x=eisr0, simplified] + apply (clarsimp simp: word_sless_alt word_sle_eq sint_int_ctz_ge_0 split: if_split) + using sint_ctz[where x=eisr1, simplified] + apply (subst signed_arith_sint; clarsimp simp: word_size; simp add: sint_int_ctz_ge_0) + using sint_ctz[where x=eisr1, simplified] + apply (subst signed_arith_sint; clarsimp simp: word_size; simp add: sint_int_ctz_less_32) + done + apply clarsimp + apply wpsimp+ + apply (clarsimp simp: cur_vcpu_relation_def eisr_calc_def ucast_up_preserves_not0 + ctz_add_0x20_mw_int_eq scast_mw_int_ctz_eq + split: option.splits) + done +qed + +lemma vcpuUpdate_vppi_masked_ccorres_armHSCurVCPU: + "\ v' = from_bool v ; n' = fromEnum n \ \ + ccorres dc xfdc + (\s. (armHSCurVCPU (ksArchState s) \ None) \ + (fst (the (armHSCurVCPU (ksArchState s)))) = vcpuptr \ + vcpu_at' (fst (the (armHSCurVCPU (ksArchState s)))) s) + UNIV hs + (vcpuUpdate vcpuptr + (\vcpu. + vcpuVPPIMasked_update + (\_ a. if a = n then v else vcpuVPPIMasked vcpu a) vcpu)) + (Guard C_Guard {s. s \\<^sub>c armHSCurVCPU_' (globals s)} + (Basic_heap_update + (\s. vcpu_vppi_masked_C_Ptr &((armHSCurVCPU_' (globals s))\[''vppi_masked_C''])) + (\s. Arrays.update + (h_val (hrs_mem (t_hrs_' (globals s))) + ((vcpu_vppi_masked_C_Ptr &((armHSCurVCPU_' (globals s))\[''vppi_masked_C''])))) + n' v')))" + supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] + apply (rule ccorres_guard_imp) + apply (rule_tac P="\s. armHSCurVCPU (ksArchState s) \ None + \ vcpu_at' (fst (the (armHSCurVCPU (ksArchState s)))) s" + and P'=\ in ccorres_move_Guard) + apply clarsimp + apply (frule rf_sr_ksArchState_armHSCurVCPU) + apply (clarsimp simp: cur_vcpu_relation_def split: option.splits) + apply (fastforce simp: move_c_guard_vcpu[rule_format, simplified]) + apply (simp add: vgicUpdate_def vcpuUpdate_def vgicUpdateLR_def) + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (rule_tac P="\s. armHSCurVCPU (ksArchState s) \ None + \ (fst (the (armHSCurVCPU (ksArchState s)))) = vcpuptr" + in ccorres_cross_over_guard) + apply (rule_tac P="ko_at' vcpu vcpuptr" + and P'="{s. armHSCurVCPU_' (globals s) = vcpu_Ptr vcpuptr }" in setObject_ccorres_helper + , rule conseqPre, vcg) + apply clarsimp + apply (rule cmap_relationE1[OF cmap_relation_vcpu] + ; (clarsimp simp: objBits_simps archObjSize_def bit_simps)?) + apply (assumption, erule ko_at_projectKO_opt) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def update_vcpu_map_to_vcpu + typ_heap_simps' cpspace_relation_def update_vcpu_map_tos) + apply (erule (1) cmap_relation_updI + ; clarsimp simp: cvcpu_relation_regs_def cvgic_relation_def cvcpu_vppi_masked_relation_def + ; (rule refl)?) + + apply (split if_split) + apply (rule conjI) + apply clarsimp + using maxBound_is_bound[where 'a=vppievent_irq, simplified fromEnum_maxBound_vppievent_irq_def] + apply - + apply (clarsimp simp: fromEnum_eq_iff less_eq_Suc_le fromEnum_eq_iff split: if_split) + apply (rule impI) + apply (subst Arrays.index_update2, simp) + apply (rule vppievent_irq_noteq_fromEnum_mono) + apply simp + apply blast + apply (clarsimp simp add: objBits_simps archObjSize_def bit_simps)+ + apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def split: option.splits) + done + +lemma ccorres_VPPIEvent: + notes Collect_const[simp del] + shows + "ccorres dc xfdc + (\s. invs' s \ sch_act_not (ksCurThread s) s \ irqVPPIEventIndex irq \ None) + \\irq = ucast irq\ hs + (vppiEvent irq) (Call VPPIEvent_'proc)" + (is "ccorres _ _ ?PRE _ _ _ _") +proof - + show ?thesis + apply (cinit lift: irq_') + apply (rule_tac P="irqVPPIEventIndex irq \ None" in ccorres_gen_asm) + apply (rule ccorres_pre_getCurVCPU, rename_tac vcpuPtr_opt) + apply wpc + (* no current vcpu *) + apply clarsimp + apply (rule ccorres_cond_false) + apply ccorres_rewrite + apply (rule ccorres_return_Skip) + (* have current vcpu *) + apply clarsimp + apply (rename_tac vcpuPtr active) + apply wpfix + apply wpc + (* handle inactive current vcpu first *) + prefer 2 + apply clarsimp + apply (rule ccorres_cond_false) + apply ccorres_rewrite + apply (rule ccorres_return_Skip) + (* have active current vcpu, handle VGICMaintenance *) + apply clarsimp + apply (rule ccorres_cond_true) + apply ccorres_rewrite + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: maskInterrupt_ccorres) + apply csymbr + (* updating vppi mask of current vcpu *) + apply (rule ccorres_split_nothrow) + apply clarsimp + apply (rule ccorres_move_const_guards)+ + apply (rule vcpuUpdate_vppi_masked_ccorres_armHSCurVCPU; clarsimp) + apply ceqv + + (* check if current thread is runnable, if so handle fault *) + apply (rule ccorres_inst[where P="?PRE" and P'=UNIV]) + subgoal for _ _ eisr0 eisr1 + apply (rule ccorres_guard_imp) + apply (rule ccorres_pre_getCurThread, rename_tac cthread) + apply (rule_tac P="tcb_at' cthread" in ccorres_cross_over_guard) + apply (rule ccorres_symb_exec_r) + apply (ctac) (* isRunnable *) + apply (clarsimp simp: when_def to_bool_def) + apply (rule ccorres_cond[where R=\], simp) + apply (ctac add: handleFault_ccorres) + apply (rule ccorres_return_Skip) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply clarsimp + apply (clarsimp simp: pred_tcb_at'_def tcb_runnable_imp_simple tcb_at_invs') + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: cfault_rel_def seL4_Fault_VPPIEvent_lift_def + seL4_Fault_lift_def seL4_Fault_tag_defs is_cap_fault_def Let_def + mask_eq_iff + split: if_splits) + apply word_eqI_solve + done + + apply (wpsimp simp: vcpuUpdate_def wp: setVCPU_VPPIMasked_invs' | wps)+ + apply vcg + apply (wpsimp wp: dmo_maskInterrupt_True + simp: irqVPPIEventIndex_def IRQ_def fromEnum_def enum_vppievent_irq + split: if_splits + | wps)+ + apply (clarsimp simp: cur_vcpu_relation_def invs'_HScurVCPU_vcpu_at' irqVTimerEvent_def + word_le_nat_alt mask_def + split: option.splits) + done +qed + +lemma ccorres_handleReservedIRQ: + "ccorres dc xfdc + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) + (UNIV \ {s. irq_' s = ucast irq}) hs + (handleReservedIRQ irq) (Call handleReservedIRQ_'proc)" + supply Collect_const[simp del] + apply (cinit lift: irq_') + apply (clarsimp simp: ucast_up_ucast is_up) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: ucast_up_ucast is_up) + apply ccorres_rewrite + apply (prop_tac "irq = irqVGICMaintenance") + apply (clarsimp simp: ucast_up_ucast is_up irqVGICMaintenance_def) + apply (simp flip: word_unat.Rep_inject) + apply (simp add: irqVGICMaintenance_def irqVPPIEventIndex_def IRQ_def irqVTimerEvent_def) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: ccorres_vgicMaintenance) + apply (rule ccorres_return_void_C) + apply wpsimp + (* potentially handling VPPIEvent *) + apply (simp flip: word_unat.Rep_inject add: irqVGICMaintenance_def) + apply csymbr + apply (rule ccorres_when[where R=\]) + subgoal + by (clarsimp simp: irqVPPIEventIndex_def IRQ_def fromEnum_def enum_vppievent_irq + VPPIEventIRQ_invalid_def + simp flip: word_unat.Rep_inject split: if_splits) + apply clarsimp + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: ccorres_VPPIEvent) + apply (rule ccorres_return_void_C) + apply wpsimp + apply clarsimp + apply (clarsimp simp: irqVPPIEventIndex_def IRQ_def non_kernel_IRQs_def irqVGICMaintenance_def + ge_mask_eq + simp flip: word_unat.Rep_inject split: if_splits) + done + +lemma handleInterrupt_ccorres: + "ccorres dc xfdc + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) + (UNIV \ \\irq = ucast irq\) + hs + (handleInterrupt irq) + (Call handleInterrupt_'proc)" + apply (cinit lift: irq_' cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: Platform_maxIRQ del: Collect_const) + apply (drule ucast_maxIRQ_is_less[simplified]) + apply (simp del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (subst doMachineOp_bind) + apply (rule maskInterrupt_empty_fail) + apply (rule ackInterrupt_empty_fail) + apply (ctac add: maskInterrupt_ccorres) + apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) + apply (ctac add: ackInterrupt_ccorres) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_void_C) + apply vcg + apply wp + apply (vcg exspec=ackInterrupt_modifies) + apply wp + apply (vcg exspec=maskInterrupt_modifies) + apply (simp add: ucast_maxIRQ_is_not_less Platform_maxIRQ del: Collect_const) + apply (rule ccorres_pre_getIRQState) + apply wpc + apply simp + apply (rule ccorres_fail) + apply (simp add: bind_assoc cong: call_ignore_cong) + apply (rule ccorres_move_const_guards)+ + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule getIRQSlot_ccorres3) + apply (rule ccorres_getSlotCap_cte_at) + apply (rule_tac P="cte_at' rv" in ccorres_cross_over_guard) + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_Guard_intStateIRQNode_array_Ptr) + apply (rule ccorres_move_array_assertion_irq ccorres_move_c_guard_cte)+ + apply ctac + apply csymbr + apply csymbr + apply (rule ccorres_ntfn_cases) + apply (clarsimp cong: call_ignore_cong simp del: Collect_const) + apply (rule_tac b=send in ccorres_case_bools) + apply simp + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: sendSignal_ccorres) + apply (simp add: maskIrqSignal_def) + apply (ctac (no_vcg) add: maskInterrupt_ccorres) + apply (ctac add: ackInterrupt_ccorres) + apply wp+ + apply (simp del: Collect_const) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr+ + apply (rule ccorres_cond_false_seq) + apply (simp add: maskIrqSignal_def) + apply (ctac (no_vcg) add: maskInterrupt_ccorres) + apply (ctac add: ackInterrupt_ccorres) + apply wp + apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) + apply (clarsimp simp: isCap_simps simp del: Collect_const) + apply (case_tac rva, simp_all del: Collect_const)[1] + prefer 3 + apply metis + apply ((simp add: maskIrqSignal_def, + rule ccorres_guard_imp2, + rule ccorres_cond_false_seq, simp, + rule ccorres_cond_false_seq, simp, + ctac (no_vcg) add: maskInterrupt_ccorres, + ctac (no_vcg) add: ackInterrupt_ccorres, + wp, simp)+) + apply (wpsimp wp: getSlotCap_wp) + apply simp + apply vcg + apply (simp add: bind_assoc) + apply (rule ccorres_move_const_guards)+ + apply (rule ccorres_cond_false_seq) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: timerTick_ccorres) + apply (ctac (no_vcg) add: resetTimer_ccorres) + apply (ctac add: ackInterrupt_ccorres ) + apply wp+ + apply (simp add: Platform_maxIRQ maxIRQ_def del: Collect_const) + apply (rule ccorres_move_const_guards)+ + apply (rule ccorres_cond_false_seq) + apply (rule ccorres_cond_false_seq) + apply (rule ccorres_cond_true_seq) + apply (ctac add: ccorres_handleReservedIRQ) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) + apply wp + apply (vcg exspec=handleReservedIRQ_modifies) + apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up ) + apply (clarsimp simp: word_sless_alt word_less_alt word_le_def Kernel_C.maxIRQ_def + uint_up_ucast is_up_def + source_size_def target_size_def word_size + sint_ucast_eq_uint is_down is_up word_0_sle_from_less) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of non_kernel_IRQs_def) + apply (clarsimp) + apply (clarsimp simp: Kernel_C.IRQTimer_def Kernel_C.IRQSignal_def + cte_wp_at_ctes_of ucast_ucast_b is_up) + apply (intro conjI impI) + apply clarsimp + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps') + apply (simp add: cap_get_tag_isCap) + apply (clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (frule cap_get_tag_to_H, assumption) + apply (clarsimp simp: to_bool_def) + apply (cut_tac un_ui_le[where b = "383::machine_word" and a = irq, + simplified word_size]) + apply (simp add: ucast_eq_0 is_up_def source_size_def + target_size_def word_size unat_gt_0 not_less + | subst array_assertion_abs_irq[rule_format, OF conjI])+ + apply (clarsimp simp: nat_le_iff) + apply (clarsimp simp: IRQReserved_def)+ + done +end + +end diff --git a/proof/crefine/AARCH64/TcbAcc_C.thy b/proof/crefine/AARCH64/TcbAcc_C.thy new file mode 100644 index 0000000000..5e517d69d8 --- /dev/null +++ b/proof/crefine/AARCH64/TcbAcc_C.thy @@ -0,0 +1,462 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory TcbAcc_C +imports Ctac_lemmas_C +begin + +context kernel +begin + +lemma ccorres_pre_threadGet: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (g rv) c" + shows "ccorres r xf + (\s. \tcb. ko_at' tcb p s \ P (f tcb) s) + ({s'. \tcb ctcb. cslift s' (tcb_ptr_to_ctcb_ptr p) = Some ctcb \ ctcb_relation tcb ctcb \ s' \ P' (f tcb)}) + hs (threadGet f p >>= (\rv. g rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule tg_sp') + apply simp + apply assumption + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply (frule obj_at_ko_at') + apply clarsimp + apply assumption + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply clarsimp + done + +lemma ccorres_pre_archThreadGet: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (g rv) c" + shows "ccorres r xf + (\s. \tcb. ko_at' tcb p s \ P (f (tcbArch tcb)) s) + ({s'. \tcb ctcb. cslift s' (tcb_ptr_to_ctcb_ptr p) = Some ctcb + \ ctcb_relation tcb ctcb \ s' \ P' (f (tcbArch tcb))}) + hs (archThreadGet f p >>= (\rv. g rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule atg_sp') + apply simp + apply assumption + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply (frule obj_at_ko_at') + apply clarsimp + apply assumption + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply clarsimp + done + +lemma threadGet_eq: + "ko_at' tcb thread s \ (f tcb, s) \ fst (threadGet f thread s)" + unfolding threadGet_def + apply (simp add: liftM_def in_monad) + apply (rule exI [where x = tcb]) + apply simp + apply (subst getObject_eq) + apply simp + apply (simp add: objBits_simps') + apply assumption + apply simp + done + +lemma archThreadGet_eq: + "ko_at' tcb thread s \ (f (tcbArch tcb), s) \ fst (archThreadGet f thread s)" + unfolding archThreadGet_def + apply (simp add: liftM_def in_monad) + apply (rule exI [where x = tcb]) + apply simp + apply (subst getObject_eq) + apply simp + apply (simp add: objBits_simps') + apply assumption + apply simp + done + +lemma get_tsType_ccorres[corres]: + "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_longlong_' (tcb_at' thread) + ({s. f s = tcb_ptr_to_ctcb_ptr thread} \ + {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] + (getThreadState thread) (Call thread_state_get_tsType_'proc)" + unfolding getThreadState_def + apply (rule ccorres_from_spec_modifies [where P=\, simplified]) + apply (rule thread_state_get_tsType_spec) + apply (rule thread_state_get_tsType_modifies) + apply simp + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (rule bexI [rotated, OF threadGet_eq], assumption) + apply simp + apply (drule ctcb_relation_thread_state_to_tsType) + apply simp + done + +lemma threadGet_obj_at2: + "\\\ threadGet f thread \\v. obj_at' (\t. f t = v) thread\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule tg_sp') + apply simp + done + +lemma register_from_H_less: + "register_from_H hr < 37" + by (cases hr, simp_all add: "StrictC'_register_defs") + +lemma register_from_H_sless: + "UCAST(register_idx_len \ int_literal_len) (register_from_H hr) int_literal_len) (register_from_H hr)" + by (cases hr, simp_all add: "StrictC'_register_defs" word_sless_def word_sle_def) + +lemma getRegister_ccorres [corres]: + "ccorres (=) ret__unsigned_long_' \ + ({s. thread_' s = tcb_ptr_to_ctcb_ptr thread} \ {s. reg_' s = register_from_H reg}) [] + (asUser thread (getRegister reg)) (Call getRegister_'proc)" + apply (unfold asUser_def) + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l [where Q="\u. obj_at' (\t. (atcbContextGet o tcbArch) t = u) thread" and + Q'="\rv. {s. thread_' s = tcb_ptr_to_ctcb_ptr thread} \ {s. reg_' s = register_from_H reg}"]) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre) + apply vcg + apply clarsimp + apply (drule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps register_from_H_less register_from_H_sless) + apply (clarsimp simp: getRegister_def typ_heap_simps) + apply (rule_tac x = "((user_regs o atcbContextGet o tcbArch) ko reg, \)" in bexI [rotated]) + apply (simp add: in_monad' asUser_def select_f_def split_def) + apply (subst arg_cong2 [where f = "(\)"]) + defer + apply (rule refl) + apply (erule threadSet_eq) + apply (clarsimp simp: ctcb_relation_def ccontext_relation_def cregs_relation_def carch_tcb_relation_def) + apply (wp threadGet_obj_at2)+ + apply simp + apply simp + apply (erule obj_atE') + apply (clarsimp simp: projectKOs ) + apply (subst fun_upd_idem) + apply (case_tac ko) + apply clarsimp + apply simp + done + +lemma getRestartPC_ccorres [corres]: + "ccorres (=) ret__unsigned_long_' \ \\thread = tcb_ptr_to_ctcb_ptr thread\ [] + (asUser thread getRestartPC) (Call getRestartPC_'proc)" + unfolding getRestartPC_def + apply (cinit') + apply (rule ccorres_add_return2, ctac) + apply (rule ccorres_return_C, simp+)[1] + apply wp + apply vcg + apply (simp add: scast_id) + done + +lemma threadSet_corres_lemma: + assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" + and mod: "modifies_heap_spec f" + and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, + t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" + and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" + shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" + apply (rule ccorres_Call_call_for_vcg) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre) + apply (rule HoarePartial.ProcModifyReturnNoAbr [where return' = "\s t. t\ globals := globals s\t_hrs_' := t_hrs_' (globals t) \\"]) + apply (rule HoarePartial.ProcSpecNoAbrupt [OF _ _ spec]) + apply (rule subset_refl) + apply vcg + prefer 2 + apply (rule mod) + apply (clarsimp simp: mex_def meq_def) + apply clarsimp + apply (rule conjI) + apply (erule (2) g) + apply clarsimp + apply (frule obj_at_ko_at') + apply clarsimp + apply (rule bexI [rotated]) + apply (erule threadSet_eq) + apply simp + apply (rule_tac x1 = "t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\" in iffD1 [OF rf_sr_upd], simp_all)[1] + apply (erule (3) rl) + done + + +lemma threadSet_ccorres_lemma4: + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; + cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; + ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ + \ ccorres dc xfdc (obj_at' (P :: tcb \ bool) thread and P') R hs (threadSet F thread) c" + apply (rule ccorres_from_vcg) + apply (rule allI) + apply (case_tac "obj_at' P thread \") + apply (drule obj_at_ko_at', clarsimp) + apply (rule conseqPre, rule conseqPost) + apply assumption + apply clarsimp + apply (rule rev_bexI, rule threadSet_eq) + apply assumption + apply simp + apply simp + apply clarsimp + apply (drule(1) obj_at_cslift_tcb, clarsimp) + apply simp + apply (rule hoare_complete') + apply (simp add: cnvalid_def nvalid_def) (* pretty *) + done + +lemmas threadSet_ccorres_lemma3 = threadSet_ccorres_lemma4[where R=UNIV] + +lemmas threadSet_ccorres_lemma2 + = threadSet_ccorres_lemma3[where P'=\] + +lemma is_aligned_tcb_ptr_to_ctcb_ptr: + "obj_at' (P :: tcb \ bool) p s + \ is_aligned (ptr_val (tcb_ptr_to_ctcb_ptr p)) ctcb_size_bits" + apply (clarsimp simp: obj_at'_def objBits_simps' projectKOs + tcb_ptr_to_ctcb_ptr_def ctcb_offset_defs) + apply (erule aligned_add_aligned, simp_all add: word_bits_conv) + apply (simp add: is_aligned_def) + done + +lemma sanitiseRegister_spec: + "\s t v r. \ \ ({s} \ \\v = v\ \ \\reg = register_from_H r\ \ \\archInfo = from_bool t\) + Call sanitiseRegister_'proc + \\ret__unsigned_long = sanitiseRegister t r v\" + apply vcg + by (case_tac r; simp add: C_register_defs sanitiseRegister_def) + +lemma ctcb_relation_tcbVCPU: + "ctcb_relation t ko' \ tcbVCPU_C (tcbArch_C ko') = option_to_ptr (atcbVCPUPtr (tcbArch t))" + unfolding ctcb_relation_def carch_tcb_relation_def ccontext_relation_def + by clarsimp + +lemma valid_tcb'_vcpuE [elim_format]: + "valid_tcb' t s \ atcbVCPUPtr (tcbArch t) = Some v \ vcpu_at' v s" + unfolding valid_tcb'_def valid_arch_tcb'_def + by auto + +lemma rf_sr_ksArchState_armHSCurVCPU: + "(s, s') \ rf_sr \ cur_vcpu_relation (armHSCurVCPU (ksArchState s)) (armHSCurVCPU_' (globals s')) (armHSVCPUActive_' (globals s'))" + by (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) + +lemma ccorres_pre_getCurVCPU: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. (armHSCurVCPU \ ksArchState) s = rv \ P rv s)) + {s. \rv vcpu' act'. armHSCurVCPU_' (globals s) = vcpu' \ + armHSVCPUActive_' (globals s) = act' \ + cur_vcpu_relation rv vcpu' act' + \ s \ P' rv } + hs (gets (armHSCurVCPU \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp + apply (rule hoare_gets_sp) + apply (clarsimp simp: empty_fail_def getCurThread_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (clarsimp simp: rf_sr_ksArchState_armHSCurVCPU) + done + +lemma getObject_tcb_wp': + "\\s. \t. ko_at' (t :: tcb) p s \ Q t s\ getObject p \Q\" + by (clarsimp simp: getObject_def valid_def in_monad + split_def objBits_simps' loadObject_default_def + projectKOs obj_at'_def in_magnitude_check) + +lemma ccorres_pre_getObject_tcb: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\tcb. ko_at' tcb p s \ P tcb s)) + {s. \ tcb tcb'. cslift s (tcb_ptr_to_ctcb_ptr p) = Some tcb' \ ctcb_relation tcb tcb' + \ s \ P' tcb} + hs (getObject p >>= (\rv :: tcb. f rv)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_guard_imp2) + apply (rule cc) + apply (rule conjI) + apply (rule_tac Q="ko_at' rv p s" in conjunct1) + apply assumption + apply assumption + apply (wpsimp wp: empty_fail_getObject getObject_tcb_wp')+ + apply (erule cmap_relationE1[OF cmap_relation_tcb], + erule ko_at_projectKO_opt) + apply simp + done + +lemma ccorres_pre_getObject_vcpu: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\vcpu. ko_at' vcpu p s \ P vcpu s)) + {s. \ vcpu vcpu'. cslift s (vcpu_Ptr p) = Some vcpu' \ cvcpu_relation vcpu vcpu' + \ s \ P' vcpu} + hs (getObject p >>= (\rv :: vcpu. f rv)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_guard_imp2) + apply (rule cc) + apply (rule conjI) + apply (rule_tac Q="ko_at' rv p s" in conjunct1) + apply assumption + apply assumption + apply (wpsimp wp: getVCPU_wp empty_fail_getObject)+ + apply (erule cmap_relationE1 [OF cmap_relation_vcpu], + erule ko_at_projectKO_opt) + apply simp + done + +lemma armHSCurVCPU_update_active_ccorres: + "b' = from_bool b \ v' = fst v \ + ccorres dc xfdc (\s. armHSCurVCPU (ksArchState s) = Some v) UNIV hs + (modifyArchState (armHSCurVCPU_update (\_. Some (v', b)))) + (Basic (\s. globals_update (armHSVCPUActive_'_update (\_. b')) s))" + apply (clarsimp simp: modifyArchState_def) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + by (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def + cmachine_state_relation_def + split: bool.split) + +lemma armHSCurVCPU_update_curv_ccorres: + "ccorres dc xfdc (\s. ((armHSCurVCPU (ksArchState s)) = Some (v, a)) \ v \ 0 \ new \ 0) UNIV hs + (modifyArchState (armHSCurVCPU_update (\_. Some (new, a)))) + (Basic (\s. globals_update (armHSCurVCPU_'_update (\_. Ptr new)) s))" + apply (clarsimp simp: modifyArchState_def) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + by (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def + cmachine_state_relation_def + split: bool.split) + +lemma armHSCurVCPU_update_ccorres: + "ccorres dc xfdc (\_. cur_vcpu_relation curv vcpu act) UNIV hs + (modifyArchState (armHSCurVCPU_update (\_. curv))) + (Basic (\s. globals_update (armHSCurVCPU_'_update (\_. vcpu)) s);; + Basic (\s. globals_update (armHSVCPUActive_'_update (\_. act)) s))" + apply (clarsimp simp: modifyArchState_def) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + by (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def + cmachine_state_relation_def + split: bool.split) + +lemmas armHSCurVCPU_update_active_ccorres2 = armHSCurVCPU_update_ccorres[where curv="Some (v, b)" for v b] + +lemma invs'_HScurVCPU_vcpu_at': + "\invs' s; armHSCurVCPU (ksArchState s) = Some (a, b) \ \ vcpu_at' a s" +by (fastforce dest: invs_arch_state' simp: valid_arch_state'_def vcpu_at_is_vcpu' ko_wp_at'_def split: option.splits) + +crunches vcpuDisable, vcpuRestore, vcpuSave, vcpuEnable + for ksArch[wp]: "\s. P (ksArchState s)" + (wp: crunch_wps) + +(* FIXME: move *) +lemma vcpu_at_c_guard: + "\vcpu_at' p s; (s, s') \ rf_sr\ \ c_guard (vcpu_Ptr p)" + by (fastforce intro: typ_heap_simps dest!: vcpu_at_ko vcpu_at_rf_sr) + +(* FIXME move *) +lemma ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. (armKSGICVCPUNumListRegs \ ksArchState) s = rv \ P rv s)) + {s. \rv vcpu' act'. of_nat rv = gic_vcpu_num_list_regs_' (globals s) + \ s \ P' rv } + hs (gets (armKSGICVCPUNumListRegs \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (simp add: rf_sr_armKSGICVCPUNumListRegs) + done + +(* FIXME: MOVE, probably to CSpace_RAB *) +lemma ccorres_gen_asm2_state: + assumes rl: "\s. P s \ ccorres r xf G G' hs a c" + shows "ccorres r xf G (G' \ {s. P s}) hs a c" +proof (rule ccorres_guard_imp2) + show "ccorres r xf G (G' \ {_. \s. P s}) hs a c" + apply (rule ccorres_gen_asm2) + apply (erule exE) + apply (erule rl) + done +next + fix s s' + assume "(s, s') \ rf_sr" and "G s" and "s' \ G' \ {s. P s}" + thus "G s \ s' \ (G' \ {_. \s. P s})" + by (clarsimp elim!: exI simp: Collect_const_mem) +qed + +lemma cap_case_TCBCap2: + "(case cap of ThreadCap pd + \ f pd | _ \ g) + = (if isThreadCap cap + then f (capTCBPtr cap) + else g)" + by (simp add: isCap_simps + split: capability.split arch_capability.split) + +lemma length_of_msgRegisters: + "length AARCH64_H.msgRegisters = 4" + by (auto simp: msgRegisters_unfold) + +lemma setMRs_single: + "setMRs thread buffer [val] = do + _ \ asUser thread (setRegister register.X2 val); + return 1 + od" + apply (clarsimp simp: setMRs_def length_of_msgRegisters zipWithM_x_def zipWith_def split: option.splits) + apply (subst zip_commute, subst zip_singleton) + apply (simp add: length_of_msgRegisters length_0_conv[symmetric]) + apply (clarsimp simp: msgRegisters_unfold sequence_x_def) + done + +end +end diff --git a/proof/crefine/AARCH64/TcbQueue_C.thy b/proof/crefine/AARCH64/TcbQueue_C.thy new file mode 100644 index 0000000000..660712b7d7 --- /dev/null +++ b/proof/crefine/AARCH64/TcbQueue_C.thy @@ -0,0 +1,1398 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory TcbQueue_C +imports Ctac_lemmas_C +begin + +context kernel +begin + +lemma tcb_queueD: + assumes queue_rel: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and valid_ntfn: "distinct queue" + and in_queue: "tcbp \ set queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + shows "(if tcbp = hd queue then getPrev tcb = qprev + else (\n < (length queue) - 1. getPrev tcb = tcb_ptr_to_ctcb_ptr (queue ! n) + \ tcbp = queue ! Suc n)) + \ (if tcbp = last queue then getNext tcb = NULL + else (\n < (length queue) - 1. tcbp = queue ! n + \ getNext tcb = tcb_ptr_to_ctcb_ptr (queue ! Suc n)))" + (is "?prev tcb queue qprev \ ?next tcb queue") + using queue_rel in_queue valid_ntfn +proof (induct queue arbitrary: qprev qhead) + case Nil + thus ?case by simp +next + case (Cons tcb' tcbs qprev' qhead') + have "tcbp = tcb' \ tcbp \ set tcbs" using Cons.prems by simp + thus ?case + proof + assume tcbp: "tcbp = tcb'" + hence "?prev tcb (tcb' # tcbs) qprev'" + using Cons.prems cs_tcb by clarsimp + moreover + have "?next tcb (tcb' # tcbs)" + proof (cases "tcbs = []") + case True + thus ?thesis using tcbp Cons.prems cs_tcb by clarsimp + next + case False + hence "tcbp \ last tcbs" using tcbp Cons.prems by clarsimp + thus ?thesis using False tcbp Cons.prems cs_tcb + apply clarsimp + apply (rule exI [where x = 0]) + apply simp + apply (cases tcbs) + apply simp_all + done + qed + ultimately show ?thesis .. + next + assume tcbp: "tcbp \ set tcbs" + obtain tcb2 where cs_tcb2: "mp (tcb_ptr_to_ctcb_ptr tcb') = Some tcb2" + and rel2: "tcb_queue_relation getNext getPrev mp tcbs (tcb_ptr_to_ctcb_ptr tcb') (getNext tcb2)" + using Cons.prems + by clarsimp + + have ih: "?prev tcb tcbs (tcb_ptr_to_ctcb_ptr tcb') \ ?next tcb tcbs" + proof (rule Cons.hyps) + from Cons.prems show (* "\t\set tcbs. tcb_at' t s" + and *) "distinct tcbs" by simp_all + qed fact+ + + from tcbp Cons.prems have tcbp_not_tcb': "tcbp \ tcb'" by clarsimp + from tcbp have tcbsnz: "tcbs \ []" by clarsimp + hence hd_tcbs: "hd tcbs = tcbs ! 0" by (simp add: hd_conv_nth) + + show ?case + proof (rule conjI) + show "?prev tcb (tcb' # tcbs) qprev'" + using ih [THEN conjunct1] tcbp_not_tcb' hd_tcbs tcbsnz + apply (clarsimp split: if_split_asm) + apply fastforce + apply (rule_tac x = "Suc n" in exI) + apply simp + done + next + show "?next tcb (tcb' # tcbs)" + using ih [THEN conjunct2] tcbp_not_tcb' hd_tcbs tcbsnz + apply (clarsimp split: if_split_asm) + apply (rule_tac x = "Suc n" in exI) + apply simp + done + qed + qed +qed + +lemma tcb_queue_memberD: + assumes queue_rel: "tcb_queue_relation getNext getPrev (cslift s') queue qprev qhead" + and in_queue: "tcbp \ set queue" + and valid_ntfn: "\t\set queue. tcb_at' t s" + and rf_sr: "(s, s') \ rf_sr" + shows "\tcb. cslift s' (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + using assms + apply - + apply (drule (1) bspec) + apply (drule (1) tcb_at_h_t_valid) + apply (clarsimp simp add: h_t_valid_clift_Some_iff) + done + +lemma tcb_queue_valid_ptrsD: + assumes in_queue: "tcbp \ set queue" + and rf_sr: "(s, s') \ rf_sr" + and valid_ntfn: "\t\set queue. tcb_at' t s" "distinct queue" + and queue_rel: "tcb_queue_relation getNext getPrev (cslift s') queue NULL qhead" + shows "\tcb. cslift s' (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb + \ (getPrev tcb \ NULL \ s' \\<^sub>c (getPrev tcb) + \ getPrev tcb \ tcb_ptr_to_ctcb_ptr ` set queue) + \ (getNext tcb \ NULL \ s' \\<^sub>c (getNext tcb) + \ getNext tcb \ tcb_ptr_to_ctcb_ptr ` set queue)" + using assms + apply - + apply (frule (3) tcb_queue_memberD) + apply (elim exE) + apply (frule (3) tcb_queueD) + apply (auto intro!: tcb_at_h_t_valid elim!: bspec split: if_split_asm) + done + +lemma tcb_queue_relation_restrict0: + "set queue \ S \ tcb_queue_relation getNext getPrev mp queue qprev qhead = + tcb_queue_relation getNext getPrev (restrict_map mp (tcb_ptr_to_ctcb_ptr ` S)) queue qprev qhead" +proof (induct queue arbitrary: S qprev qhead) + case Nil thus ?case by simp +next + case (Cons tcb tcbs S' qprev' qhead') + thus ?case + using Cons by auto +qed + +lemma tcb_queue_relation_restrict: + "tcb_queue_relation getNext getPrev mp queue qprev qhead = + tcb_queue_relation getNext getPrev (restrict_map mp (tcb_ptr_to_ctcb_ptr ` set queue)) queue qprev qhead" + apply (rule tcb_queue_relation_restrict0) + apply simp + done + +lemma tcb_queue_relation_only_next_prev: + assumes mapeq: "option_map getNext \ mp = option_map getNext \ mp'" "option_map getPrev \ mp = option_map getPrev \ mp'" + shows "tcb_queue_relation getNext getPrev mp queue qprev qhead = tcb_queue_relation getNext getPrev mp' queue qprev qhead" +proof (induct queue arbitrary: qprev qhead) + case Nil thus ?case by simp +next + case (Cons tcb tcbs qprev' qhead') + thus ?case + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (frule compD [OF mapeq(1)]) + apply clarsimp + apply (frule compD [OF mapeq(2)]) + apply clarsimp + apply clarsimp + apply (frule compD [OF mapeq(1) [symmetric]]) + apply clarsimp + apply (frule compD [OF mapeq(2) [symmetric]]) + apply clarsimp + done +qed + + +lemma tcb_queue_relation_cong: + assumes queuec: "queue = queue'" + and qpc: "qprev = qprev'" + and qhc: "qhead = qhead'" + and mpc: "\p. p \ tcb_ptr_to_ctcb_ptr ` set queue' \ mp p = mp' p" + shows "tcb_queue_relation getNext getPrev mp queue qprev qhead = + tcb_queue_relation getNext getPrev mp' queue' qprev' qhead'" (is "?LHS = ?RHS") +proof - + have "?LHS = tcb_queue_relation getNext getPrev (mp |` (tcb_ptr_to_ctcb_ptr ` set queue')) queue' qprev' qhead'" + by (simp add: queuec qpc qhc, subst tcb_queue_relation_restrict, rule refl) + + also have "\ = tcb_queue_relation getNext getPrev (mp' |` (tcb_ptr_to_ctcb_ptr ` set queue')) queue' qprev' qhead'" + by (simp add: mpc cong: restrict_map_cong) + + also have "\ = ?RHS" + by (simp add: tcb_queue_relation_restrict [symmetric]) + + finally show ?thesis . +qed + +lemma tcb_queue_relation'_cong: + assumes queuec: "queue = queue'" + and qhc: "qhead = qhead'" + and qpc: "qend = qend'" + and mpc: "\p. p \ tcb_ptr_to_ctcb_ptr ` set queue' \ mp p = mp' p" + shows "tcb_queue_relation' getNext getPrev mp queue qhead qend = + tcb_queue_relation' getNext getPrev mp' queue' qhead' qend'" (is "?LHS = ?RHS") +proof - + have "?LHS = tcb_queue_relation' getNext getPrev (mp |` (tcb_ptr_to_ctcb_ptr ` set queue')) queue' qhead' qend'" + by (clarsimp simp add: queuec qpc qhc tcb_queue_relation'_def , subst tcb_queue_relation_restrict, rule refl) + + also have "\ = tcb_queue_relation' getNext getPrev (mp' |` (tcb_ptr_to_ctcb_ptr ` set queue')) queue' qhead' qend'" + by (simp add: mpc cong: restrict_map_cong) + + also have "\ = ?RHS" + by (simp add: tcb_queue_relation'_def tcb_queue_relation_restrict [symmetric]) + + finally show ?thesis . +qed + +lemma tcb_queue_relation_not_NULL: + assumes tq: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and valid_ep: "\t\set queue. tcb_at' t s" + shows "\t \ set queue. tcb_ptr_to_ctcb_ptr t \ NULL" +proof (cases "queue = []") + case True thus ?thesis by simp +next + case False + show ?thesis + proof (rule ballI, rule notI) + fix t + assume tq: "t \ set queue" and "tcb_ptr_to_ctcb_ptr t = NULL" + hence "ctcb_ptr_to_tcb_ptr NULL \ set queue" + apply - + apply (erule subst) + apply simp + done + + with valid_ep(1) have "tcb_at' (ctcb_ptr_to_tcb_ptr NULL) s" .. + thus False + apply - + apply (drule tcb_at_not_NULL) + apply simp + done + qed +qed + +lemmas tcb_queue_relation_not_NULL' = bspec [OF tcb_queue_relation_not_NULL] + +lemma tcb_queue_relation_head_hd: + assumes tq: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and tcbs: "queue \ []" + shows "ctcb_ptr_to_tcb_ptr qhead = hd queue" + using assms + apply (cases queue) + apply simp + apply simp + done + +lemma tcb_queue_relation_next_not_NULL: + assumes tq: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and tcbs: "queue \ []" + shows "qhead \ NULL" +proof - + have "ctcb_ptr_to_tcb_ptr qhead \ set queue" using tq tcbs + by (simp add: tcb_queue_relation_head_hd) + + with tq valid_ep(1) have "tcb_ptr_to_ctcb_ptr (ctcb_ptr_to_tcb_ptr qhead) \ NULL" + by (rule tcb_queue_relation_not_NULL') + + thus ?thesis by simp +qed + +lemma tcb_queue_relation_ptr_rel: + assumes tq: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and prev_not_queue: "ctcb_ptr_to_tcb_ptr qprev \ set queue" + and in_queue: "tcbp \ set queue" + shows "tcb_ptr_to_ctcb_ptr tcbp \ getNext tcb \ tcb_ptr_to_ctcb_ptr tcbp \ getPrev tcb + \ (getNext tcb \ NULL \ getNext tcb \ getPrev tcb)" + using tq valid_ep in_queue cs_tcb prev_not_queue + apply - + apply (frule (3) tcb_queueD) + apply (frule (2) tcb_queue_relation_not_NULL') + apply (simp split: if_split_asm) + apply (rule not_sym) + apply (rule notI) + apply simp + apply (clarsimp simp: inj_eq distinct_conv_nth) + apply (intro conjI impI) + apply (clarsimp simp: inj_eq distinct_conv_nth) + apply (rule not_sym) + apply clarsimp + apply clarsimp + apply (clarsimp simp: inj_eq) + apply (intro conjI impI) + apply (clarsimp simp: distinct_conv_nth) + apply (erule_tac s = "queue ! Suc n" in subst) + apply (clarsimp simp: distinct_conv_nth) + apply clarsimp + apply (case_tac "na = Suc n") + apply hypsubst + apply (clarsimp simp: distinct_conv_nth) + apply (clarsimp simp: distinct_conv_nth) + done + +lemma distinct_cons_nth: + assumes dxs: "distinct xs" + and ln: "n < length xs" + and x: "x \ set xs" + shows "(x # xs) ! n \ xs ! n" +proof + assume n: "(x # xs) ! n = xs ! n" + have ln': "n < length (x # xs)" using ln by simp + have sln: "Suc n < length (x # xs)" using ln by simp + + from n have "(x # xs) ! n = (x # xs) ! Suc n" by simp + moreover have "distinct (x # xs)" using dxs x by simp + ultimately show False + unfolding distinct_conv_nth + apply - + apply (drule spec, drule mp [OF _ ln']) + apply (drule spec, drule mp [OF _ sln]) + apply simp + done +qed + +lemma distinct_nth: + assumes dist: "distinct xs" + and ln: "n < length xs" + and lm: "m < length xs" + shows "(xs ! n = xs ! m) = (n = m)" + using dist ln lm + apply (cases "n = m") + apply simp + apply (clarsimp simp: distinct_conv_nth) + done + +lemma distinct_nth_cons: + assumes dist: "distinct xs" + and xxs: "x \ set xs" + and ln: "n < length xs" + and lm: "m < length xs" + shows "((x # xs) ! n = xs ! m) = (n = Suc m)" +proof (cases "n = Suc m") + case True + thus ?thesis by simp +next + case False + + have ln': "n < length (x # xs)" using ln by simp + have lm': "Suc m < length (x # xs)" using lm by simp + + have "distinct (x # xs)" using dist xxs by simp + thus ?thesis using False + unfolding distinct_conv_nth + apply - + apply (drule spec, drule mp [OF _ ln']) + apply (drule spec, drule mp [OF _ lm']) + apply clarsimp + done +qed + +lemma distinct_nth_cons': + assumes dist: "distinct xs" + and xxs: "x \ set xs" + and ln: "n < length xs" + and lm: "m < length xs" + shows "(xs ! n = (x # xs) ! m) = (m = Suc n)" +proof (cases "m = Suc n") + case True + thus ?thesis by simp +next + case False + + have ln': "Suc n < length (x # xs)" using ln by simp + have lm': "m < length (x # xs)" using lm by simp + + have "distinct (x # xs)" using dist xxs by simp + thus ?thesis using False + unfolding distinct_conv_nth + apply - + apply (drule spec, drule mp [OF _ ln']) + apply (drule spec, drule mp [OF _ lm']) + apply clarsimp + done +qed + +lemma nth_first_not_member: + assumes xxs: "x \ set xs" + and ln: "n < length xs" + shows "((x # xs) ! n = x) = (n = 0)" + using xxs ln + apply (cases n) + apply simp + apply clarsimp + done + +lemma tcb_queue_next_prev: + assumes qr: "tcb_queue_relation getNext getPrev mp queue qprev qhead" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and tcb': "mp (tcb_ptr_to_ctcb_ptr tcbp') = Some tcb'" + and tq: "tcbp \ set queue" "tcbp' \ set queue" + and prev_not_queue: "ctcb_ptr_to_tcb_ptr qprev \ set queue" + and tcbs: "tcbp \ tcbp'" + shows "(getNext tcb = tcb_ptr_to_ctcb_ptr tcbp') = + (getPrev tcb' = tcb_ptr_to_ctcb_ptr tcbp)" + using qr valid_ep prev_not_queue tq tcb tcb' tcbs + apply - + apply (frule (1) tcb_queueD) + apply (rule tq(1)) + apply (rule tcb) + apply (frule (1) tcb_queueD) + apply (rule tq(2)) + apply (rule tcb') + apply (cases queue) + apply simp + apply (cut_tac bspec [OF tcb_queue_relation_not_NULL, OF qr valid_ep(1) tq(1)]) + apply (cut_tac bspec [OF tcb_queue_relation_not_NULL, OF qr valid_ep(1) tq(2)]) + apply (simp add: inj_eq split: if_split_asm) + apply clarsimp + apply clarsimp + subgoal by (clarsimp simp: last_conv_nth distinct_nth distinct_nth_cons) + apply (clarsimp simp: last_conv_nth distinct_nth distinct_nth_cons) + apply (subgoal_tac "list ! Suc na \ tcbp'") + apply clarsimp + apply clarsimp + subgoal by (clarsimp simp: last_conv_nth distinct_nth distinct_nth_cons nth_first_not_member) + subgoal by (fastforce simp: last_conv_nth distinct_nth distinct_nth_cons nth_first_not_member) + subgoal by (clarsimp simp: last_conv_nth distinct_nth distinct_nth_cons distinct_nth_cons' nth_first_not_member) + by (fastforce simp: last_conv_nth distinct_nth distinct_nth_cons distinct_nth_cons' nth_first_not_member) + + +lemma null_not_in: + "\tcb_queue_relation getNext getPrev mp queue qprev qhead; \t\set queue. tcb_at' t s; distinct queue\ + \ ctcb_ptr_to_tcb_ptr NULL \ set queue" + apply - + apply (rule notI) + apply (drule (2) tcb_queue_relation_not_NULL') + apply simp + done + +lemma tcb_queue_relationI': + "\ tcb_queue_relation getNext getPrev hp queue NULL qhead; + qend = (if queue = [] then NULL else (tcb_ptr_to_ctcb_ptr (last queue))) \ + \ tcb_queue_relation' getNext getPrev hp queue qhead qend" + unfolding tcb_queue_relation'_def + by simp + +lemma tcb_queue_relationE': + "\ tcb_queue_relation' getNext getPrev hp queue qhead qend; + \ tcb_queue_relation getNext getPrev hp queue NULL qhead; + qend = (if queue = [] then NULL else (tcb_ptr_to_ctcb_ptr (last queue))) \ \ P \ \ P" + unfolding tcb_queue_relation'_def + by simp + +lemma tcb_queue_relation'_queue_rel: + "tcb_queue_relation' getNext getPrev hp queue qhead qend + \ tcb_queue_relation getNext getPrev hp queue NULL qhead" + unfolding tcb_queue_relation'_def + by simp + +lemma tcb_queue_singleton_iff: + assumes queue_rel: "tcb_queue_relation getNext getPrev mp queue NULL qhead" + and in_queue: "tcbp \ set queue" + and valid_ntfn: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + shows "(queue = [tcbp]) = (getNext tcb = NULL \ getPrev tcb = NULL)" +proof (rule iffI) + assume "queue = [tcbp]" + thus "(getNext tcb = NULL \ getPrev tcb = NULL)" using queue_rel cs_tcb + by clarsimp +next + assume asms: "getNext tcb = NULL \ getPrev tcb = NULL" + hence "hd queue = tcbp" using queue_rel valid_ntfn in_queue cs_tcb + apply - + apply (drule (3) tcb_queueD) + apply (rule classical) + apply clarsimp + apply (cut_tac x = "queue ! n" in bspec [OF tcb_queue_relation_not_NULL [OF queue_rel valid_ntfn(1)]]) + apply clarsimp + apply simp + done + moreover have "last queue = tcbp" using queue_rel valid_ntfn in_queue cs_tcb asms + apply - + apply (drule (3) tcb_queueD) + apply (rule classical) + apply clarsimp + apply (cut_tac x = "queue ! Suc n" in bspec [OF tcb_queue_relation_not_NULL [OF queue_rel valid_ntfn(1)]]) + apply clarsimp + apply simp + done + moreover have "queue \ []" using in_queue by clarsimp + ultimately show "queue = [tcbp]" using valid_ntfn in_queue + apply clarsimp + apply (simp add: hd_conv_nth last_conv_nth nth_eq_iff_index_eq) + apply (cases queue) + apply simp + apply simp + done +qed + + +lemma distinct_remove1_take_drop: + "\ distinct ls; n < length ls \ \ remove1 (ls ! n) ls = (take n ls) @ drop (Suc n) ls" +proof (induct ls arbitrary: n) + case Nil thus ?case by simp +next + case (Cons x xs n) + + show ?case + proof (cases n) + case 0 + thus ?thesis by simp + next + case (Suc m) + + hence "((x # xs) ! n) \ x" using Cons.prems by clarsimp + thus ?thesis using Suc Cons.prems by (clarsimp simp add: Cons.hyps) + qed +qed + + +definition + "upd_unless_null \ \p v f. if p = NULL then f else fun_upd f p (Some v)" + +lemma upd_unless_null_cong_helper: + "\p p' v mp S. \ p' \ tcb_ptr_to_ctcb_ptr ` S; ctcb_ptr_to_tcb_ptr p \ S \ \ (upd_unless_null p v mp) p' = mp p'" + unfolding upd_unless_null_def + apply simp + apply (intro impI conjI) + apply (erule imageE) + apply hypsubst + apply (simp only: ctcb_ptr_to_ctcb_ptr) + apply blast + done + +(* RISCV: tp renamed to tp' due to clash with register with same name *) +lemma tcbDequeue_update0: + assumes in_queue: "tcbp \ set queue" + and valid_ntfn: "\t\set queue. tcb_at' t s" "distinct queue" + and queue_rel: "tcb_queue_relation tn tp' mp queue qprev qhead" + and prev_not_queue: "ctcb_ptr_to_tcb_ptr qprev \ set queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and f: "\v f g. tn (tn_update f v) = f (tn v) \ tp' (tp_update g v) = g (tp' v) + \ tn (tp_update f v) = tn v \ tp' (tn_update g v) = tp' v" + shows "tcb_queue_relation tn tp' + (upd_unless_null (tn tcb) (tp_update (\_. tp' tcb) (the (mp (tn tcb)))) + (upd_unless_null (tp' tcb) (tn_update (\_. tn tcb) (the (mp (tp' tcb)))) mp)) + (remove1 tcbp queue) + (if tcb_ptr_to_ctcb_ptr tcbp = qhead then tp' tcb else qprev) + (if tcb_ptr_to_ctcb_ptr tcbp = qhead then tn tcb else qhead)" + (is "tcb_queue_relation tn tp' ?mp (remove1 tcbp queue) (?qprev qprev qhead) (?qhead qhead)") + using queue_rel valid_ntfn prev_not_queue in_queue +proof (induct queue arbitrary: qprev qhead) + case Nil + thus ?case by simp +next + case (Cons tcb' tcbs qprev' qhead') + + have "tcbp = tcb' \ tcbp \ set tcbs" using Cons.prems by simp + thus ?case + proof + assume tcbp: "tcbp = tcb'" + hence qp: "qprev' = tp' tcb" and qh: "qhead' = tcb_ptr_to_ctcb_ptr tcb'" + using Cons.prems cs_tcb by auto + + from Cons.prems have tq: "tcb_queue_relation tn tp' mp tcbs (tcb_ptr_to_ctcb_ptr tcb') (tn tcb)" + using Cons.prems cs_tcb tcbp by clarsimp + + note ind_prems = Cons.prems + note ind_hyp = Cons.hyps + + show ?thesis + proof (cases tcbs) + case Nil thus ?thesis using Cons.prems tcbp cs_tcb by clarsimp + next + case (Cons tcbs_hd tcbss) + + have nnull: "tn tcb \ NULL" using tq + proof (rule tcb_queue_relation_next_not_NULL) + from ind_prems show "\t\set tcbs. tcb_at' t s" + and "distinct tcbs" by simp_all + show "tcbs \ []" using Cons by simp + qed + + from Cons ind_prems have "tcbs_hd \ set tcbss" by simp + hence mpeq: "\p. p \ tcb_ptr_to_ctcb_ptr ` set tcbss \ ?mp p = mp p" + using tq cs_tcb tcbp Cons nnull ind_prems + apply - + apply (subst upd_unless_null_cong_helper, assumption, clarsimp)+ + apply simp + done + + have "tcb_ptr_to_ctcb_ptr tcbp \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp \ tp' tcb + \ tn tcb \ tp' tcb" using tq cs_tcb ind_prems nnull + apply - + apply (drule (5) tcb_queue_relation_ptr_rel) + apply clarsimp + done + + hence "?mp (tcb_ptr_to_ctcb_ptr tcbs_hd) = Some (tp_update (\_. tp' tcb) (the (mp (tn tcb))))" + using qp qh tq cs_tcb tcbp Cons nnull + by (simp add: upd_unless_null_def) + + thus ?thesis using qp qh tq cs_tcb tcbp Cons nnull + apply (simp (no_asm) add: tcbp Cons split del: if_split) + apply (subst tcb_queue_relation_cong [OF refl refl refl mpeq]) + apply assumption + apply (clarsimp simp: f) + done + qed + next + assume inset: "tcbp \ set tcbs" + hence tcbp: "tcbp \ tcb'" using Cons.prems by clarsimp + + obtain tcb2 where cs_tcb2: "mp (tcb_ptr_to_ctcb_ptr tcb') = Some tcb2" + and rel2: "tcb_queue_relation tn tp' mp tcbs (tcb_ptr_to_ctcb_ptr tcb') (tn tcb2)" + and qh: "qhead' = tcb_ptr_to_ctcb_ptr tcb'" + and qp: "qprev' = tp' tcb2" + using Cons.prems + by clarsimp + + have nnull: "tcb_ptr_to_ctcb_ptr tcb' \ NULL" using Cons.prems + apply - + apply (erule (1) tcb_queue_relation_not_NULL') + apply simp + done + + have ih: "tcb_queue_relation tn tp' ?mp (remove1 tcbp tcbs) + (?qprev (tcb_ptr_to_ctcb_ptr tcb') (tn tcb2)) + (?qhead (tn tcb2))" using rel2 + proof (rule Cons.hyps) + from Cons.prems show "\t\set tcbs. tcb_at' t s" + and "distinct tcbs" + and "ctcb_ptr_to_tcb_ptr (tcb_ptr_to_ctcb_ptr tcb') \ set tcbs" by simp_all + qed fact + + have tcb_next: "tn tcb \ tcb_ptr_to_ctcb_ptr tcb'" + using Cons.prems tcb_queue_next_prev[OF Cons.prems(1), OF _ _ cs_tcb cs_tcb2] + tcbp qp[symmetric] + by auto + + show ?thesis using tcbp + proof (cases "tn tcb2 = tcb_ptr_to_ctcb_ptr tcbp") + case True + hence tcb_prev: "tp' tcb = tcb_ptr_to_ctcb_ptr tcb'" using Cons.prems cs_tcb2 cs_tcb not_sym [OF tcbp] + apply - + apply (subst tcb_queue_next_prev [symmetric], assumption+) + apply simp + apply simp + apply simp + apply (rule not_sym [OF tcbp]) + apply simp + done + + hence "?mp (tcb_ptr_to_ctcb_ptr tcb') = Some (tn_update (\_. tn tcb) tcb2)" + using tcb_next nnull cs_tcb2 unfolding upd_unless_null_def by simp + + thus ?thesis using tcbp cs_tcb qh qp True ih tcb_prev + by (simp add: inj_eq f) + next + case False + hence tcb_prev: "tp' tcb \ tcb_ptr_to_ctcb_ptr tcb'" + using Cons.prems cs_tcb2 cs_tcb not_sym [OF tcbp] + apply - + apply (subst tcb_queue_next_prev [symmetric], assumption+) + apply simp + apply simp + apply simp + apply (rule not_sym [OF tcbp]) + apply simp + done + hence "?mp (tcb_ptr_to_ctcb_ptr tcb') = Some tcb2" + using tcb_next nnull cs_tcb2 unfolding upd_unless_null_def by simp + + thus ?thesis using tcbp cs_tcb qh qp False ih tcb_prev + by (simp add: inj_eq) + qed + qed +qed + +(* RISCV: tp renamed to tp' due to clash with register with same name *) +lemma tcbDequeue_update: + assumes queue_rel: "tcb_queue_relation' tn tp' mp queue qhead qend" + and in_queue: "tcbp \ set queue" + and valid_ntfn: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and f: "\v f g. tn (tn_update f v) = f (tn v) \ tp' (tp_update g v) = g (tp' v) + \ tn (tp_update f v) = tn v \ tp' (tn_update g v) = tp' v" + shows "tcb_queue_relation' tn tp' + (upd_unless_null (tn tcb) (tp_update (\_. tp' tcb) (the (mp (tn tcb)))) + (upd_unless_null (tp' tcb) (tn_update (\_. tn tcb) (the (mp (tp' tcb)))) mp)) + (remove1 tcbp queue) + (if tp' tcb = NULL then tn tcb else qhead) + (if tn tcb = NULL then tp' tcb else qend)" +proof - + have ne: "NULL = (if tcb_ptr_to_ctcb_ptr tcbp = qhead then tp' tcb else NULL)" + using queue_rel in_queue cs_tcb + apply - + apply (drule tcb_queue_relation'_queue_rel) + apply (clarsimp split: if_split) + apply (cases queue) + apply simp + apply clarsimp + done + + have if2: "(if tp' tcb = NULL then tn tcb else qhead) = + (if tcb_ptr_to_ctcb_ptr tcbp = qhead then tn tcb else qhead)" + using tcb_queue_relation'_queue_rel [OF queue_rel] in_queue cs_tcb valid_ntfn + apply - + apply (cases queue) + apply simp + apply (frule (3) tcb_queueD) + apply (simp add: inj_eq) + apply (intro impI) + apply simp + apply (elim conjE exE) + apply (cut_tac x = "queue ! n" + in bspec [OF tcb_queue_relation_not_NULL [OF tcb_queue_relation'_queue_rel [OF queue_rel] valid_ntfn(1)]]) + apply (rule nth_mem) + apply clarsimp + apply clarsimp + done + + note null_not_in' = null_not_in [OF tcb_queue_relation'_queue_rel [OF queue_rel] valid_ntfn(1) valid_ntfn(2)] + + show ?thesis + proof (rule tcb_queue_relationI') + show "tcb_queue_relation tn tp' + (upd_unless_null (tn tcb) + (tp_update (\_. tp' tcb) (the (mp (tn tcb)))) + (upd_unless_null (tp' tcb) + (tn_update (\_. tn tcb) (the (mp (tp' tcb)))) mp)) + (remove1 tcbp queue) NULL + (if tp' tcb = NULL then tn tcb else qhead)" + using in_queue valid_ntfn tcb_queue_relation'_queue_rel [OF queue_rel] null_not_in' cs_tcb + by (subst ne, subst if2, rule tcbDequeue_update0[rotated -1, OF f]) + next + have r1: "(remove1 tcbp queue = []) = (tn tcb = NULL \ tp' tcb = NULL)" + using in_queue tcb_queue_relation'_queue_rel [OF queue_rel] cs_tcb valid_ntfn null_not_in' + apply - + apply (subst tcb_queue_singleton_iff [symmetric], assumption+) + apply (fastforce simp: remove1_empty) + done + have "queue \ []" using in_queue by clarsimp + thus "(if tn tcb = NULL then tp' tcb else qend) = + (if remove1 tcbp queue = [] then NULL else tcb_ptr_to_ctcb_ptr (last (remove1 tcbp queue)))" + using queue_rel in_queue cs_tcb valid_ntfn + tcb_queue_relation_not_NULL [OF tcb_queue_relation'_queue_rel [OF queue_rel] valid_ntfn(1)] + apply - + apply (erule tcb_queue_relationE') + apply (frule (3) tcb_queueD) + apply (subst r1) + apply simp + apply (intro impI conjI) + apply (subgoal_tac "tcbp = last queue") + apply simp + apply (subgoal_tac "(remove1 (last queue) queue) \ []") + apply (clarsimp simp: inj_eq last_conv_nth nth_eq_iff_index_eq length_remove1 + distinct_remove1_take_drop split: if_split_asm) + apply arith + apply (clarsimp simp: remove1_empty last_conv_nth hd_conv_nth nth_eq_iff_index_eq not_le split: if_split_asm) + apply (cases queue) + apply simp + apply simp + apply (fastforce simp: inj_eq split: if_split_asm) + apply (clarsimp simp: last_conv_nth distinct_remove1_take_drop nth_eq_iff_index_eq inj_eq split: if_split_asm) + apply arith + apply (simp add: nth_append min_def nth_eq_iff_index_eq) + apply clarsimp + apply arith + done + qed +qed + +lemmas tcbEPDequeue_update + = tcbDequeue_update[where tn=tcbEPNext_C and tn_update=tcbEPNext_C_update + and tp'=tcbEPPrev_C and tp_update=tcbEPPrev_C_update, + simplified] + +lemma tcb_queue_relation_ptr_rel': + assumes tq: "tcb_queue_relation getNext getPrev mp queue NULL qhead" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and in_queue: "tcbp \ set queue" + shows "tcb_ptr_to_ctcb_ptr tcbp \ getNext tcb \ tcb_ptr_to_ctcb_ptr tcbp \ getPrev tcb + \ (getNext tcb \ NULL \ getNext tcb \ getPrev tcb)" + using tq valid_ep cs_tcb null_not_in [OF tq valid_ep(1) valid_ep(2)] in_queue + by (rule tcb_queue_relation_ptr_rel) + +lemma tcb_queue_head_empty_iff: + "\ tcb_queue_relation getNext getPrev mp queue NULL qhead; \t \ set queue. tcb_at' t s \ \ + (qhead = NULL) = (queue = [])" + apply (rule classical) + apply (cases queue) + apply simp + apply (frule (1) tcb_queue_relation_not_NULL) + apply clarsimp + done + +lemma ctcb_ptr_to_tcb_ptr_aligned: + assumes al: "is_aligned (ctcb_ptr_to_tcb_ptr ptr) tcbBlockSizeBits" + shows "is_aligned (ptr_val ptr) ctcb_size_bits" +proof - + have "is_aligned (ptr_val (tcb_ptr_to_ctcb_ptr (ctcb_ptr_to_tcb_ptr ptr))) ctcb_size_bits" + unfolding tcb_ptr_to_ctcb_ptr_def using al + apply simp + apply (erule aligned_add_aligned) + apply (unfold ctcb_offset_defs, rule is_aligned_triv) + apply (simp add: word_bits_conv objBits_defs)+ + done + thus ?thesis by simp +qed + +lemma ctcb_size_bits_ge_4: "4 \ ctcb_size_bits" + by (simp add: ctcb_size_bits_def) + +lemma tcb_queue_relation_next_mask: + assumes tq: "tcb_queue_relation getNext getPrev mp queue NULL qhead" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and in_queue: "tcbp \ set queue" + and bits: "bits \ ctcb_size_bits" + shows "ptr_val (getNext tcb) && ~~ mask bits = ptr_val (getNext tcb)" +proof (cases "(getNext tcb) = NULL") + case True + thus ?thesis by simp +next + case False + + hence "ctcb_ptr_to_tcb_ptr (getNext tcb) \ set queue" using assms + apply - + apply (drule (3) tcb_queueD) + apply (clarsimp split: if_split_asm) + done + + with valid_ep(1) have "tcb_at' (ctcb_ptr_to_tcb_ptr (getNext tcb)) s" .. + hence "is_aligned (ctcb_ptr_to_tcb_ptr (getNext tcb)) tcbBlockSizeBits" by (rule tcb_aligned') + hence "is_aligned (ptr_val (getNext tcb)) ctcb_size_bits" by (rule ctcb_ptr_to_tcb_ptr_aligned) + thus ?thesis using bits by (simp add: is_aligned_neg_mask) +qed + +lemma tcb_queue_relation_prev_mask: + assumes tq: "tcb_queue_relation getNext getPrev mp queue NULL qhead" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and in_queue: "tcbp \ set queue" + and bits: "bits \ ctcb_size_bits" + shows "ptr_val (getPrev tcb) && ~~ mask bits = ptr_val (getPrev tcb)" +proof (cases "(getPrev tcb) = NULL") + case True + thus ?thesis by simp +next + case False + + hence "ctcb_ptr_to_tcb_ptr (getPrev tcb) \ set queue" using assms + apply - + apply (drule (3) tcb_queueD) + apply (clarsimp split: if_split_asm) + done + + with valid_ep(1) have "tcb_at' (ctcb_ptr_to_tcb_ptr (getPrev tcb)) s" .. + hence "is_aligned (ctcb_ptr_to_tcb_ptr (getPrev tcb)) tcbBlockSizeBits" by (rule tcb_aligned') + hence "is_aligned (ptr_val (getPrev tcb)) ctcb_size_bits" by (rule ctcb_ptr_to_tcb_ptr_aligned) + thus ?thesis using bits by (simp add: is_aligned_neg_mask) +qed + +lemma tcb_queue_relation'_next_mask: + assumes tq: "tcb_queue_relation' getNext getPrev mp queue qhead qend" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and in_queue: "tcbp \ set queue" + and bits: "bits \ ctcb_size_bits" + shows "ptr_val (getNext tcb) && ~~ mask bits = ptr_val (getNext tcb)" + by (rule tcb_queue_relation_next_mask [OF tcb_queue_relation'_queue_rel], fact+) + +lemma tcb_queue_relation'_prev_mask: + assumes tq: "tcb_queue_relation' getNext getPrev mp queue qhead qend" + and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" + and cs_tcb: "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + and in_queue: "tcbp \ set queue" + and bits: "bits \ ctcb_size_bits" + shows "ptr_val (getPrev tcb) && ~~ mask bits = ptr_val (getPrev tcb)" + by (rule tcb_queue_relation_prev_mask [OF tcb_queue_relation'_queue_rel], fact+) + +lemma tcb_queue_relation_next_canonical: + assumes "tcb_queue_relation getNext getPrev mp queue NULL qhead" + assumes valid_ep: "\t\set queue. tcb_at' t s" + "distinct queue" + "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + "tcbp \ set queue" + assumes canon: "pspace_canonical' s" + shows "make_canonical (ptr_val (getNext tcb)) = ptr_val (getNext tcb)" +proof (cases "getNext tcb = NULL") + case True + thus ?thesis by simp +next + case False + hence "ctcb_ptr_to_tcb_ptr (getNext tcb) \ set queue" using assms + by (fastforce dest: tcb_queueD split: if_split_asm) + with valid_ep(1) + have tcb: "tcb_at' (ctcb_ptr_to_tcb_ptr (getNext tcb)) s" .. + with canon + have "canonical_address (ctcb_ptr_to_tcb_ptr (getNext tcb))" + by (simp add: obj_at'_is_canonical) + moreover + have "is_aligned (ctcb_ptr_to_tcb_ptr (getNext tcb)) tcbBlockSizeBits" + using tcb by (rule tcb_aligned') + ultimately + have "canonical_address (ptr_val (getNext tcb))" + by (rule canonical_address_ctcb_ptr) + thus ?thesis + by (simp add: canonical_make_canonical_idem) +qed + +lemma tcb_queue_relation'_next_canonical: + "\ tcb_queue_relation' getNext getPrev mp queue qhead qend; \t\set queue. tcb_at' t s; + distinct queue; mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb; tcbp \ set queue; + pspace_canonical' s\ + \ make_canonical (ptr_val (getNext tcb)) = ptr_val (getNext tcb)" + by (rule tcb_queue_relation_next_canonical [OF tcb_queue_relation'_queue_rel]) + +lemma tcb_queue_relation_prev_canonical: + assumes "tcb_queue_relation getNext getPrev mp queue NULL qhead" + assumes valid_ep: "\t\set queue. tcb_at' t s" + "distinct queue" + "mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb" + "tcbp \ set queue" + assumes canon: "pspace_canonical' s" + shows "make_canonical (ptr_val (getPrev tcb)) = ptr_val (getPrev tcb)" +proof (cases "getPrev tcb = NULL") + case True + thus ?thesis by simp +next + case False + hence "ctcb_ptr_to_tcb_ptr (getPrev tcb) \ set queue" using assms + by (fastforce dest: tcb_queueD split: if_split_asm) + with valid_ep(1) + have tcb: "tcb_at' (ctcb_ptr_to_tcb_ptr (getPrev tcb)) s" .. + with canon + have "canonical_address (ctcb_ptr_to_tcb_ptr (getPrev tcb))" + by (simp add: obj_at'_is_canonical) + moreover + have "is_aligned (ctcb_ptr_to_tcb_ptr (getPrev tcb)) tcbBlockSizeBits" + using tcb by (rule tcb_aligned') + ultimately + have "canonical_address (ptr_val (getPrev tcb))" + by (rule canonical_address_ctcb_ptr) + thus ?thesis + by (simp add: canonical_make_canonical_idem) +qed + +lemma tcb_queue_relation'_prev_canonical: + "\ tcb_queue_relation' getNext getPrev mp queue qhead qend; \t\set queue. tcb_at' t s; + distinct queue; mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb; tcbp \ set queue; + pspace_canonical' s\ + \ make_canonical (ptr_val (getPrev tcb)) = ptr_val (getPrev tcb)" + by (rule tcb_queue_relation_prev_canonical [OF tcb_queue_relation'_queue_rel]) + +lemma ntfn_ep_disjoint: + assumes srs: "sym_refs (state_refs_of' s)" + and epat: "ko_at' ep epptr s" + and ntfnat: "ko_at' ntfn ntfnptr s" + and ntfnq: "isWaitingNtfn (ntfnObj ntfn)" + and epq: "isSendEP ep \ isRecvEP ep" + shows "set (epQueue ep) \ set (ntfnQueue (ntfnObj ntfn)) = {}" + using srs epat ntfnat ntfnq epq + apply - + apply (subst disjoint_iff_not_equal, intro ballI, rule notI) + apply (drule sym_refs_ko_atD', clarsimp)+ + apply clarsimp + apply (clarsimp simp: isWaitingNtfn_def isSendEP_def isRecvEP_def + split: ntfn.splits endpoint.splits) + apply (drule bspec, fastforce simp: ko_wp_at'_def)+ + apply (fastforce simp: ko_wp_at'_def refs_of_rev') + apply (drule bspec, fastforce simp: ko_wp_at'_def)+ + apply (fastforce simp: ko_wp_at'_def refs_of_rev') + done + +lemma ntfn_ntfn_disjoint: + assumes srs: "sym_refs (state_refs_of' s)" + and ntfnat: "ko_at' ntfn ntfnptr s" + and ntfnat': "ko_at' ntfn' ntfnptr' s" + and ntfnq: "isWaitingNtfn (ntfnObj ntfn)" + and ntfnq': "isWaitingNtfn (ntfnObj ntfn')" + and neq: "ntfnptr' \ ntfnptr" + shows "set (ntfnQueue (ntfnObj ntfn)) \ set (ntfnQueue (ntfnObj ntfn')) = {}" + using srs ntfnat ntfnat' ntfnq ntfnq' neq + apply - + apply (subst disjoint_iff_not_equal, intro ballI, rule notI) + apply (drule sym_refs_ko_atD', clarsimp)+ + apply clarsimp + apply (clarsimp simp: isWaitingNtfn_def split: ntfn.splits) + apply (drule bspec, fastforce simp: ko_wp_at'_def)+ + apply (clarsimp simp: ko_wp_at'_def refs_of_rev') + done + +lemma tcb_queue_relation'_empty[simp]: + "tcb_queue_relation' getNext getPrev mp [] qhead qend = + (qend = tcb_Ptr 0 \ qhead = tcb_Ptr 0)" + by (simp add: tcb_queue_relation'_def) + +lemma cnotification_relation_ntfn_queue: + fixes ntfn :: "Structures_H.notification" + defines "qs \ if isWaitingNtfn (ntfnObj ntfn) then set (ntfnQueue (ntfnObj ntfn)) else {}" + assumes ntfn: "cnotification_relation (cslift t) ntfn' b" + and srs: "sym_refs (state_refs_of' s)" + and koat: "ko_at' ntfn ntfnptr s" + and koat': "ko_at' ntfn' ntfnptr' s" + and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" + and neq: "ntfnptr' \ ntfnptr" + shows "cnotification_relation (cslift t') ntfn' b" +proof - + have rl: "\p. \ p \ tcb_ptr_to_ctcb_ptr ` set (ntfnQueue (ntfnObj ntfn')); + isWaitingNtfn (ntfnObj ntfn); isWaitingNtfn (ntfnObj ntfn')\ + \ cslift t p = cslift t' p" using srs koat' koat mpeq neq + apply - + apply (drule (3) ntfn_ntfn_disjoint [OF _ koat koat']) + apply (erule restrict_map_eqI [symmetric]) + apply (erule imageE) + apply (fastforce simp: disjoint_iff_not_equal inj_eq qs_def) + done + + show ?thesis using ntfn rl mpeq unfolding cnotification_relation_def + apply (simp add: Let_def) + apply (cases "ntfnObj ntfn'") + apply simp + apply simp + apply (cases "isWaitingNtfn (ntfnObj ntfn)") + apply (simp add: isWaitingNtfn_def cong: tcb_queue_relation'_cong) + apply (simp add: qs_def) + done +qed + +lemma cpspace_relation_ntfn_update_ntfn: + fixes ntfn :: "Structures_H.notification" + defines "qs \ if isWaitingNtfn (ntfnObj ntfn) then set (ntfnQueue (ntfnObj ntfn)) else {}" + assumes koat: "ko_at' ntfn ntfnptr s" + and invs: "invs' s" + and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" + and rel: "cnotification_relation (cslift t') ntfn' notification" + and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + using koat invs cp rel + apply - + apply (subst map_comp_update) + apply (simp add: projectKO_opts_defs) + apply (frule ko_at_projectKO_opt) + apply (rule cmap_relationE1, assumption+) + apply (erule (3) cmap_relation_upd_relI) + apply (erule (1) cnotification_relation_ntfn_queue [OF _ invs_sym' koat]) + apply (erule (1) map_to_ko_atI') + apply (fold qs_def, rule mpeq) + apply assumption + apply simp + done + +lemma cendpoint_relation_upd_tcb_no_queues: + assumes cs: "mp thread = Some tcb" + and next_pres: "option_map tcbEPNext_C \ mp = option_map tcbEPNext_C \ mp'" + and prev_pres: "option_map tcbEPPrev_C \ mp = option_map tcbEPPrev_C \ mp'" + shows "cendpoint_relation mp a b = cendpoint_relation mp' a b" +proof - + show ?thesis + unfolding cendpoint_relation_def + apply (simp add: Let_def) + apply (cases a) + apply (simp add: tcb_queue_relation'_def tcb_queue_relation_only_next_prev [OF next_pres prev_pres, symmetric])+ + done +qed + +lemma cnotification_relation_upd_tcb_no_queues: + assumes cs: "mp thread = Some tcb" + and next_pres: "option_map tcbEPNext_C \ mp = option_map tcbEPNext_C \ mp'" + and prev_pres: "option_map tcbEPPrev_C \ mp = option_map tcbEPPrev_C \ mp'" + shows "cnotification_relation mp a b = cnotification_relation mp' a b" +proof - + show ?thesis + unfolding cnotification_relation_def + apply (simp add: Let_def) + apply (cases "ntfnObj a") + apply (simp add: tcb_queue_relation'_def tcb_queue_relation_only_next_prev [OF next_pres prev_pres, symmetric])+ + done +qed + +lemma cendpoint_relation_ntfn_queue: + assumes srs: "sym_refs (state_refs_of' s)" + and koat: "ko_at' ntfn ntfnptr s" + and iswaiting: "isWaitingNtfn (ntfnObj ntfn)" + and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` set (ntfnQueue (ntfnObj ntfn))))) + = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` set (ntfnQueue (ntfnObj ntfn)))))" + and koat': "ko_at' a epptr s" + shows "cendpoint_relation (cslift t) a b = cendpoint_relation (cslift t') a b" +proof - + have rl: "\p. \ p \ tcb_ptr_to_ctcb_ptr ` set (epQueue a); isSendEP a \ isRecvEP a \ + \ cslift t p = cslift t' p" using srs koat' koat iswaiting mpeq + apply - + apply (drule (4) ntfn_ep_disjoint) + apply (erule restrict_map_eqI [symmetric]) + apply (erule imageE) + apply (clarsimp simp: disjoint_iff_not_equal inj_eq) + done + + show ?thesis + unfolding cendpoint_relation_def using rl + apply (simp add: Let_def) + apply (cases a) + apply (simp add: isRecvEP_def cong: tcb_queue_relation'_cong) + apply simp + apply (simp add: isSendEP_def isRecvEP_def cong: tcb_queue_relation'_cong) + done +qed + +lemma cvariable_relation_upd_const: + "m x \ None + \ cvariable_array_map_relation (m (x \ y)) (\x. n) + = cvariable_array_map_relation m (\x. n)" + by (auto simp: fun_eq_iff cvariable_array_map_relation_def) + +lemma ptr_span_ctcb_subset: + "is_aligned p tcbBlockSizeBits \ ptr_span (tcb_ptr_to_ctcb_ptr p) \ {p .. p + 2^tcbBlockSizeBits-1}" + apply (simp add: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) + apply (frule aligned_add_aligned[where m=ctcb_size_bits, OF _ is_aligned_triv], + simp add: objBits_defs ctcb_size_bits_def) + apply (subst upto_intvl_eq'; clarsimp) + apply (erule is_aligned_no_wrap', simp add: ctcb_size_bits_def) + apply (rule conjI) + apply (erule is_aligned_no_wrap', simp add: objBits_defs ctcb_size_bits_def) + apply (cut_tac word_add_le_mono1[where k=p and j="2^tcbBlockSizeBits-1"]) + apply (simp add: field_simps) + apply (simp add: objBits_defs ctcb_size_bits_def) + apply (subst field_simps, subst unat_plus_simple[where x=p, THEN iffD1, symmetric]) + apply (erule is_aligned_no_overflow') + apply (rule unat_lt2p) + done + +(* FIXME: move *) +lemma tcb_at'_non_kernel_data_ref: + "pspace_domain_valid s \ tcb_at' p s \ ptr_span (tcb_ptr_to_ctcb_ptr p) \ kernel_data_refs = {}" + apply (rule disjoint_subset[OF ptr_span_ctcb_subset]) + apply (erule tcb_aligned') + apply (drule map_to_tcbs_from_tcb_at) + apply (clarsimp simp: pspace_domain_valid_def map_comp_def split: option.splits) + apply (drule spec, drule spec, drule (1) mp) + apply (simp add: objBits_simps add_mask_fold) + done + +lemmas tcb_at'_non_kernel_data_ref' + = tcb_at'_non_kernel_data_ref[OF invs'_pspace_domain_valid] + +(* FIXME: move near tag_disj_via_td_name *) +lemma tag_not_less_via_td_name: + assumes ta: "typ_name (typ_info_t TYPE('a)) \ pad_typ_name" + assumes tina: "typ_name (typ_info_t TYPE('a)) \ td_names (typ_info_t TYPE('b))" + shows "\ typ_uinfo_t TYPE('a::c_type) < typ_uinfo_t TYPE('b::c_type)" + using assms + by (auto simp: sub_typ_proper_def typ_tag_lt_def typ_simps dest: td_set_td_names) + +(* FIXME: move *) +lemma td_set_map_td_commute[rule_format]: + "\i. td_set (map_td f t) i = apfst (map_td f) ` td_set t i" + "\i. td_set_struct (map_td_struct f st) i = apfst (map_td f) ` td_set_struct st i" + "\i. td_set_list (map_td_list f ts) i = apfst (map_td f) ` td_set_list ts i" + "\i. td_set_pair (map_td_pair f tp') i = apfst (map_td f) ` td_set_pair tp' i" + apply (induct t and st and ts and tp') + apply simp_all + apply (case_tac dt_pair; clarsimp simp: image_Un) + done + +(* FIXME: move *) +lemma td_set_export_uinfo_eq: + "td_set (export_uinfo t) i = apfst export_uinfo ` td_set t i" + unfolding export_uinfo_def by (rule td_set_map_td_commute) + +(* FIXME: move *) +lemma td_set_adjust_ti_eq: + "td_set (adjust_ti t a b) i = apfst (\t. adjust_ti t a b) ` td_set t i" + unfolding adjust_ti_def by (rule td_set_map_td_commute) + +(* FIXME: move *) +lemma td_set_list_app: + "td_set_list (ts @ ts') i = td_set_list ts i \ td_set_list ts' (i + size_td_list ts)" + apply (induct ts arbitrary: i, simp) + apply (rename_tac p ps i, case_tac p, simp add: Un_assoc field_simps) + done + +(* FIXME: move *) +lemma apfst_comp: + "apfst f \ apfst g = apfst (f \ g)" + by auto + +lemma td_set_offset_wf[rule_format]: + fixes td :: "'a typ_desc" + and st :: "'a typ_struct" + and ts :: "('a typ_desc, char list) dt_pair list" + and tp :: "('a typ_desc, char list) dt_pair" + shows "\s n m. (s, n) \ td_set td m \ m \ n" + "\s n m. (s, n) \ td_set_struct st m \ m \ n" + "\s n m. (s, n) \ td_set_list ts m \ m \ n" + "\s n m. (s, n) \ td_set_pair tp m \ m \ n" + apply (induct td and st and ts and tp) + apply simp_all + apply (case_tac dt_pair; fastforce) + done + +lemma field_lookup_offset_wf[rule_format]: + fixes td :: "'a typ_desc" + and st :: "'a typ_struct" + and ts :: "('a typ_desc, char list) dt_pair list" + and tp :: "('a typ_desc, char list) dt_pair" + shows "\s n m f. field_lookup td f m = Some (s, n) \ m \ n" + "\s n m f. field_lookup_struct st f m = Some (s, n) \ m \ n" + "\s n m f. field_lookup_list ts f m = Some (s, n) \ m \ n" + "\s n m f. field_lookup_pair tp f m = Some (s, n) \ m \ n" + apply (induct td and st and ts and tp) + apply simp_all + apply (fastforce split: option.splits)+ + done + +lemma td_set_field_lookup_wf[rule_format]: + fixes td :: "'a typ_desc" + and st :: "'a typ_struct" + and ts :: "('a typ_desc, char list) dt_pair list" + and tp :: "('a typ_desc, char list) dt_pair" + shows "\k m. wf_desc td \ k \ td_set td m \ (\f. field_lookup td f m = Some k)" + "\k m. wf_desc_struct st \ k \ td_set_struct st m \ (\f. field_lookup_struct st f m = Some k)" + "\k m. wf_desc_list ts \ k \ td_set_list ts m \ (\f. field_lookup_list ts f m = Some k)" + "\k m. wf_desc_pair tp \ k \ td_set_pair tp m \ (\f. field_lookup_pair tp f m = Some k)" + using td_set_field_lookup'[of td st ts tp] + apply - + apply (clarsimp, frule td_set_offset_wf, drule spec, drule spec, drule spec, drule mp, + erule rsubst[where P="\n. (s,n) \ td_set" for s td_set], subst add_diff_inverse_nat, + simp add: not_less, simp, simp)+ + done + +lemma td_set_image_field_lookup: + "wf_desc td \ k \ f ` td_set td m \ (\fn. option_map f (field_lookup td fn m) = Some k)" + "wf_desc_struct st \ k \ f ` td_set_struct st m \ (\fn. option_map f (field_lookup_struct st fn m) = Some k)" + "wf_desc_list ts \ k \ f ` td_set_list ts m \ (\fn. option_map f (field_lookup_list ts fn m) = Some k)" + "wf_desc_pair tp' \ k \ f ` td_set_pair tp' m \ (\fn. option_map f (field_lookup_pair tp' fn m) = Some k)" + by (fastforce simp: image_def dest: td_set_field_lookup_wf)+ + +lemma field_lookup_td_set[rule_format]: + fixes td :: "'a typ_desc" + and st :: "'a typ_struct" + and ts :: "('a typ_desc, char list) dt_pair list" + and tp :: "('a typ_desc, char list) dt_pair" + shows "\k m f. field_lookup td f m = Some k \ k \ td_set td m" + "\k m f. field_lookup_struct st f m = Some k \ k \ td_set_struct st m" + "\k m f. field_lookup_list ts f m = Some k \ k \ td_set_list ts m" + "\k m f. field_lookup_pair tp f m = Some k \ k \ td_set_pair tp m" + using td_set_field_lookup_rev'[of td st ts tp] + apply - + apply (clarsimp, frule field_lookup_offset_wf, drule spec, drule spec, drule spec, drule mp, + rule exI, erule rsubst[where P="\n. f = Some (s,n)" for f s], subst add_diff_inverse_nat, + simp add: not_less, simp, simp)+ + done + +lemma field_lookup_list_Some: + assumes "wf_desc_list ts" + assumes "field_lookup_list ts (fn # fns') m = Some (s, n)" + shows "\td' m'. field_lookup_list ts [fn] m = Some (td', m') \ field_lookup td' fns' m' = Some (s, n)" + using assms + apply (induct ts arbitrary: m, simp) + apply (rename_tac tp ts m, case_tac tp) + apply (clarsimp split: if_splits option.splits simp: field_lookup_list_None) + done + +lemma field_lookup_Some_cases: + assumes "wf_desc td" + assumes "field_lookup td fns m = Some (s,n)" + shows "case fns of + [] \ s = td \ m = n + | fn # fns' \ \td' m'. field_lookup td [fn] m = Some (td',m') + \ field_lookup td' fns' m' = Some (s,n)" + using assms + apply (cases fns; simp) + apply (cases td, rename_tac fn fns' st tn, clarsimp) + apply (case_tac st; clarsimp simp: field_lookup_list_Some) + done + +lemma field_lookup_SomeE: + assumes lookup: "field_lookup td fns m = Some (s,n)" + assumes wf: "wf_desc td" + assumes nil: "\ fns = []; s = td; m = n \ \ P" + assumes some: "\fn fns' td' m'. \ fns = fn # fns'; field_lookup td [fn] m = Some (td',m'); + field_lookup td' fns' m' = Some (s,n) \ \ P" + shows P + using field_lookup_Some_cases[OF wf lookup] + by (cases fns) (auto simp add: nil some) + +lemmas typ_combine_simps = + ti_typ_pad_combine_def[where tag="TypDesc st tn" for st tn] + ti_typ_combine_def[where tag="TypDesc st tn" for st tn] + ti_pad_combine_def[where tag="TypDesc st tn" for st tn] + align_td_array' size_td_array + CompoundCTypes.field_names_list_def + empty_typ_info_def + final_pad_def padup_def + align_of_def + +bundle typ_combine_bundle = + typ_combine_simps[simp] + if_weak_cong[cong] + +schematic_goal tcb_C_typ_info_unfold: + "typ_info_t (?t :: tcb_C itself) = TypDesc ?st ?tn" + including typ_combine_bundle by (simp add: tcb_C_typ_info tcb_C_tag_def) + +schematic_goal arch_tcb_C_typ_info_unfold: + "typ_info_t (?t :: arch_tcb_C itself) = TypDesc ?st ?tn" + including typ_combine_bundle by (simp add: arch_tcb_C_typ_info arch_tcb_C_tag_def) + +schematic_goal user_context_C_typ_info_unfold: + "typ_info_t (?t :: user_context_C itself) = TypDesc ?st ?tn" + including typ_combine_bundle by (simp add: user_context_C_typ_info user_context_C_tag_def) + +lemma rf_sr_tcb_update_no_queue: + "\ (s, s') \ rf_sr; + ko_at' tcb thread s; + t_hrs_' (globals t) = hrs_mem_update (heap_update (tcb_ptr_to_ctcb_ptr thread) ctcb) + (t_hrs_' (globals s')); + tcbEPNext_C ctcb = tcbEPNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); + tcbEPPrev_C ctcb = tcbEPPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); + (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); + ctcb_relation tcb' ctcb + \ + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, + x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" + unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def + apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes + heap_to_user_data_def) + apply (frule (1) cmap_relation_ko_atD) + apply (erule obj_atE') + apply clarsimp + apply (clarsimp simp: map_comp_update projectKO_opt_tcb cvariable_relation_upd_const + typ_heap_simps') + apply (intro conjI) + subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_upd_tcb_no_queues, assumption+) + subgoal by fastforce + subgoal by fastforce + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) + subgoal by fastforce + subgoal by fastforce + subgoal by (clarsimp simp: carch_state_relation_def typ_heap_simps') + by (simp add: cmachine_state_relation_def) + +lemmas rf_sr_tcb_update_no_queue2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue, simplified] + +lemma tcb_queue_relation_not_in_q: + "ctcb_ptr_to_tcb_ptr x \ set xs \ + tcb_queue_relation' nxtFn prvFn (hp(x := v)) xs start end + = tcb_queue_relation' nxtFn prvFn hp xs start end" + by (rule tcb_queue_relation'_cong, auto) + +lemma rf_sr_tcb_update_not_in_queue: + "\ (s, s') \ rf_sr; ko_at' tcb thread s; + t_hrs_' (globals t) = hrs_mem_update (heap_update + (tcb_ptr_to_ctcb_ptr thread) ctcb) (t_hrs_' (globals s')); + \ live' (KOTCB tcb); invs' s; + (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); + ctcb_relation tcb' ctcb \ + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, + x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" + unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def + apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes + heap_to_user_data_def live'_def) + apply (frule (1) cmap_relation_ko_atD) + apply (erule obj_atE') + apply (clarsimp) + apply (clarsimp simp: map_comp_update projectKO_opt_tcb cvariable_relation_upd_const + typ_heap_simps') + apply (subgoal_tac "\rf. \ ko_wp_at' (\ko. rf \ refs_of' ko) thread s") + prefer 2 + apply clarsimp + apply (auto simp: obj_at'_def ko_wp_at'_def)[1] + apply (intro conjI) + subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply clarsimp + apply (subgoal_tac "thread \ (fst ` ep_q_refs_of' a)") + apply (clarsimp simp: cendpoint_relation_def Let_def split: Structures_H.endpoint.split) + subgoal by (intro conjI impI allI, simp_all add: image_def tcb_queue_relation_not_in_q)[1] + apply (drule(1) map_to_ko_atI') + apply (drule sym_refs_ko_atD', clarsimp+) + subgoal by blast + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply clarsimp + apply (subgoal_tac "thread \ (fst ` ntfn_q_refs_of' (ntfnObj a))") + apply (clarsimp simp: cnotification_relation_def Let_def + split: ntfn.splits) + subgoal by (simp add: image_def tcb_queue_relation_not_in_q)[1] + apply (drule(1) map_to_ko_atI') + apply (drule sym_refs_ko_atD', clarsimp+) + subgoal by blast + apply (clarsimp simp: Let_def) + apply (simp add: carch_state_relation_def) + by (simp add: cmachine_state_relation_def) + +end +end diff --git a/proof/crefine/AARCH64/Tcb_C.thy b/proof/crefine/AARCH64/Tcb_C.thy new file mode 100644 index 0000000000..6b1bdbcd8d --- /dev/null +++ b/proof/crefine/AARCH64/Tcb_C.thy @@ -0,0 +1,4658 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Tcb_C +imports Delete_C Ipc_C +begin + +lemma getObject_sched: + "(x::tcb, s') \ fst (getObject t s) \ + (x,s'\ksSchedulerAction := ChooseNewThread\) \ fst (getObject t (s\ksSchedulerAction := ChooseNewThread\))" + apply (clarsimp simp: in_monad getObject_def split_def loadObject_default_def + magnitudeCheck_def projectKOs + split: option.splits) + done + +lemma threadGet_sched: + "(x, s') \ fst (threadGet t f s) \ + (x,s'\ksSchedulerAction := ChooseNewThread\) \ fst (threadGet t f (s\ksSchedulerAction := ChooseNewThread\))" + apply (clarsimp simp: in_monad threadGet_def liftM_def) + apply (drule getObject_sched) + apply fastforce + done + +lemma setObject_sched: + "(x, s') \ fst (setObject t (v::tcb) s) \ + (x, s'\ksSchedulerAction := ChooseNewThread\) \ fst (setObject t v (s\ksSchedulerAction := ChooseNewThread\))" + apply (clarsimp simp: in_monad setObject_def split_def updateObject_default_def + magnitudeCheck_def projectKOs + split: option.splits) + done + +lemma threadSet_sched: + "(x, s') \ fst (threadSet f t s) \ + (x,s'\ksSchedulerAction := ChooseNewThread\) \ fst (threadSet f t (s\ksSchedulerAction := ChooseNewThread\))" + apply (clarsimp simp: in_monad threadSet_def) + apply (drule getObject_sched) + apply (drule setObject_sched) + apply fastforce + done + +lemma asUser_sched: + "(rv,s') \ fst (asUser t f s) \ + (rv,s'\ksSchedulerAction := ChooseNewThread\) \ fst (asUser t f (s\ksSchedulerAction := ChooseNewThread\))" + apply (clarsimp simp: asUser_def split_def in_monad select_f_def) + apply (drule threadGet_sched) + apply (drule threadSet_sched) + apply fastforce + done + +lemma doMachineOp_sched: + "(rv,s') \ fst (doMachineOp f s) \ + (rv,s'\ksSchedulerAction := ChooseNewThread\) \ fst (doMachineOp f (s\ksSchedulerAction := ChooseNewThread\))" + apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) + apply fastforce + done + +context begin interpretation Arch . (*FIXME: arch_split*) +crunch curThread [wp]: restart "\s. P (ksCurThread s)" + (wp: crunch_wps simp: crunch_simps) +end + +context kernel_m +begin + +lemma getMRs_rel_sched: + "\ getMRs_rel args buffer s; + (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s \ + \ getMRs_rel args buffer (s\ksSchedulerAction := ChooseNewThread\)" + apply (clarsimp simp: getMRs_rel_def) + apply (rule exI, rule conjI, assumption) + apply (subst det_wp_use, rule det_wp_getMRs) + apply (simp add: cur_tcb'_def split: option.splits) + apply (simp add: valid_ipc_buffer_ptr'_def) + apply (subst (asm) det_wp_use, rule det_wp_getMRs) + apply (simp add: cur_tcb'_def) + apply (clarsimp simp: getMRs_def in_monad) + apply (drule asUser_sched) + apply (intro exI) + apply (erule conjI) + apply (cases buffer) + apply (simp add: return_def) + apply clarsimp + apply (drule mapM_upd [rotated]) + prefer 2 + apply fastforce + apply (clarsimp simp: loadWordUser_def in_monad stateAssert_def word_size) + apply (erule doMachineOp_sched) + done + +lemma getObject_state: + " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ + \ (if t = t' then tcbState_update (\_. st) x else x, + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" + apply (simp split: if_split) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: getObject_def split_def loadObject_default_def in_monad + Corres_C.in_magnitude_check' projectKOs objBits_simps') + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs objBits_simps) + apply (simp add: magnitudeCheck_def in_monad split: option.splits) + apply clarsimp + apply (simp add: lookupAround2_char2) + apply (clarsimp split: if_split_asm) + apply (erule_tac x=x2 in allE) + apply (clarsimp simp: ps_clear_def) + apply (drule_tac x=x2 in orthD2) + apply fastforce + apply clarsimp + apply (erule impE) + apply simp + apply (simp flip: add_mask_fold) + apply (erule notE, rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow, simp add: word_bits_def) + apply clarsimp + apply (clarsimp simp: getObject_def split_def loadObject_default_def in_monad + Corres_C.in_magnitude_check' projectKOs objBits_simps') + apply (simp add: magnitudeCheck_def in_monad split: option.splits) + apply clarsimp + apply (simp add: lookupAround2_char2) + apply (clarsimp split: if_split_asm) + apply (erule_tac x=t in allE) + apply simp + apply (clarsimp simp: obj_at'_real_def projectKOs + ko_wp_at'_def objBits_simps) + apply (simp add: ps_clear_def) + apply (drule_tac x=t in orthD2) + apply fastforce + apply clarsimp + apply (erule impE) + apply simp + apply (simp flip: add_mask_fold) + apply (erule notE, rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply (erule_tac x=x2 in allE) + apply (clarsimp simp: ps_clear_def) + apply (drule_tac x=x2 in orthD2) + apply fastforce + apply clarsimp + apply (simp flip: add_mask_fold) + apply (erule impE) + apply (rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply fastforce + done + + +lemma threadGet_state: + "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" + apply (clarsimp simp: threadGet_def liftM_def in_monad) + apply (drule (1) getObject_state [where st=st]) + apply (rule exI) + apply (erule conjI) + apply (simp split: if_splits) + done + +lemma asUser_state: + "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" + apply (clarsimp simp: asUser_def in_monad select_f_def) + apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) + apply (frule use_valid, assumption, rule refl) + apply clarsimp + apply (frule (1) threadGet_state) + apply (intro exI) + apply (erule conjI) + apply (rule exI, erule conjI) + apply (clarsimp simp: threadSet_def in_monad) + apply (frule use_valid, rule getObject_inv [where P="(=) s"]) + apply (simp add: loadObject_default_def) + apply wp + apply simp + apply (rule refl) + apply clarsimp + apply (frule (1) getObject_state) + apply (intro exI) + apply (erule conjI) + apply (clarsimp simp: setObject_def split_def updateObject_default_def threadGet_def + in_magnitude_check' getObject_def loadObject_default_def liftM_def + objBits_simps' projectKOs in_monad) + apply (simp split: if_split) + apply (rule conjI) + apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (rule conjI) + apply clarsimp + apply (cases s, simp) + apply (rule ext) + apply (clarsimp split: if_split) + apply (cases ko) + apply clarsimp + apply clarsimp + apply (rule conjI) + apply (clarsimp simp add: lookupAround2_char2 split: if_split_asm) + apply (erule_tac x=x2 in allE) + apply simp + apply (simp add: ps_clear_def) + apply (drule_tac x=x2 in orthD2) + apply fastforce + apply clarsimp + apply (erule impE, simp) + apply (simp flip: add_mask_fold) + apply (erule notE, rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply (rule exI) + apply (rule conjI, fastforce) + apply clarsimp + apply (cases s, clarsimp) + apply (rule ext, clarsimp split: if_split) + apply (cases ko, clarsimp) + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (rule conjI) + apply clarsimp + apply (cases s, clarsimp) + apply (rule ext, clarsimp split: if_split) + apply (case_tac tcb, clarsimp) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp add: lookupAround2_char2 split: if_split_asm) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs objBits_simps) + apply (erule_tac x=t in allE) + apply simp + apply (simp add: ps_clear_def) + apply (drule_tac x=t in orthD2) + apply fastforce + apply clarsimp + apply (erule impE, simp) + apply (simp flip: add_mask_fold) + apply (erule notE, rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply (erule_tac x=x2 in allE) + apply simp + apply (simp add: ps_clear_def) + apply (drule_tac x=x2 in orthD2) + apply fastforce + apply clarsimp + apply (erule impE) + apply (simp flip: add_mask_fold) + apply (rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply (erule impE, simp) + apply simp + apply (rule exI) + apply (rule conjI, fastforce) + apply clarsimp + apply (cases s, clarsimp) + apply (rule ext, clarsimp split: if_split) + apply (case_tac tcb, clarsimp) + done + +lemma doMachineOp_state: + "(rv,s') \ fst (doMachineOp f s) \ + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" + apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) + apply fastforce + done + +lemma mapM_upd_inv: + assumes f: "\x rv. (rv,s) \ fst (f x s) \ x \ set xs \ (rv, g s) \ fst (f x (g s))" + assumes inv: "\x. \(=) s\ f x \\_. (=) s\" + shows "(rv,s) \ fst (mapM f xs s) \ (rv, g s) \ fst (mapM f xs (g s))" + using f inv +proof (induct xs arbitrary: rv s) + case Nil + thus ?case by (simp add: mapM_Nil return_def) +next + case (Cons z zs) + from Cons.prems + show ?case + apply (clarsimp simp: mapM_Cons in_monad) + apply (frule use_valid, assumption, rule refl) + apply clarsimp + apply (drule Cons.prems, simp) + apply (rule exI, erule conjI) + apply (drule Cons.hyps) + apply simp + apply assumption + apply simp + done +qed + +lemma getMRs_rel_state: + "\getMRs_rel args buffer s; + (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; + ko_at' ko t s \ \ + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" + apply (clarsimp simp: getMRs_rel_def) + apply (rule exI, erule conjI) + apply (subst (asm) det_wp_use, rule det_wp_getMRs) + apply (simp add: cur_tcb'_def) + apply (subst det_wp_use, rule det_wp_getMRs) + apply (simp add: cur_tcb'_def) + apply (rule conjI) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs + objBits_simps ps_clear_def split: if_split) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def split: option.splits) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def projectKOs obj_at'_real_def + objBits_simps ps_clear_def split: if_split) + apply (clarsimp simp: getMRs_def in_monad) + apply (frule use_valid, rule asUser_inv [where P="(=) s"]) + apply (wp mapM_wp' getRegister_inv)[1] + apply simp + apply clarsimp + apply (drule (1) asUser_state) + apply (wp mapM_wp' getRegister_inv)[1] + apply (intro exI) + apply (erule conjI) + apply (cases buffer) + apply (clarsimp simp: return_def) + apply clarsimp + apply (drule mapM_upd_inv [rotated -1]) + prefer 3 + apply fastforce + prefer 2 + apply wp + apply (clarsimp simp: loadWordUser_def in_monad stateAssert_def word_size + simp del: fun_upd_apply) + apply (rule conjI) + apply (clarsimp simp: pointerInUserData_def typ_at'_def ko_wp_at'_def + projectKOs ps_clear_def obj_at'_real_def + split: if_split) + apply (erule doMachineOp_state) + done + +lemma setThreadState_getMRs_rel: + "\getMRs_rel args buffer and cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer + and (\_. runnable' st)\ + setThreadState st t \\_. getMRs_rel args buffer\" + apply (rule hoare_gen_asm') + apply (simp add: setThreadState_runnable_simp) + apply (simp add: threadSet_def) + apply wp + apply (simp add: setObject_def split_def updateObject_default_def) + apply wp + apply (simp del: fun_upd_apply) + apply (wp getObject_tcb_wp) + apply (clarsimp simp del: fun_upd_apply) + apply (drule obj_at_ko_at')+ + apply (clarsimp simp del: fun_upd_apply) + apply (rule exI, rule conjI, assumption) + apply (clarsimp split: if_split simp del: fun_upd_apply) + apply (simp add: getMRs_rel_state) + done + +lemma setThreadState_sysargs_rel: + "\sysargs_rel args buffer and (\_. runnable' st)\ setThreadState st t \\_. sysargs_rel args buffer\" + apply (cases buffer, simp_all add: sysargs_rel_def) + apply (rule hoare_pre) + apply (wp setThreadState_getMRs_rel hoare_valid_ipc_buffer_ptr_typ_at'|simp)+ + done + +lemma ccorres_abstract_known: + "\ \rv' t t'. ceqv \ xf' rv' t t' g (g' rv'); ccorres rvr xf P P' hs f (g' val) \ + \ ccorres rvr xf P (P' \ {s. xf' s = val}) hs f g" + apply (rule ccorres_guard_imp2) + apply (rule_tac xf'=xf' in ccorres_abstract) + apply assumption + apply (rule_tac P="rv' = val" in ccorres_gen_asm2) + apply simp + apply simp + done + +lemma distinct_remove1_filter: + "distinct xs \ remove1 v xs = [x\xs. x \ v]" + apply (induct xs) + apply simp + apply (clarsimp split: if_split) + apply (rule sym, simp add: filter_id_conv) + apply clarsimp + done + +lemma hrs_mem_update_cong: + "\ \x. f x = f' x \ \ hrs_mem_update f = hrs_mem_update f'" + by (simp add: hrs_mem_update_def) + +lemma setPriority_ccorres: + "ccorres dc xfdc + (\s. tcb_at' t s \ ksCurDomain s \ maxDomain \ + valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s) + ({s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) + [] (setPriority t priority) (Call setPriority_'proc)" + apply (cinit lift: tptr_' prio_') + apply (ctac(no_vcg) add: tcbSchedDequeue_ccorres) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule threadSet_ccorres_lemma2[where P=\]) + apply vcg + apply clarsimp + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps')+, simp_all?)[1] + apply (rule ball_tcb_cte_casesI, simp+) + apply (simp add: ctcb_relation_def) + apply (ctac(no_vcg) add: isRunnable_ccorres) + apply (simp add: when_def to_bool_def del: Collect_const) + apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) + apply (rule ccorres_pre_getCurThread) + apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) + apply (clarsimp simp: rf_sr_ksCurThread) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) + apply (rule ccorres_return_Skip') + apply (wp isRunnable_wp) + apply (wpsimp wp: hoare_drop_imps threadSet_valid_objs' + weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state + threadSet_tcbDomain_triv + simp: st_tcb_at'_def o_def split: if_splits) + apply (simp add: guard_is_UNIV_def) + apply (rule hoare_strengthen_post[ + where Q="\rv s. + obj_at' (\_. True) t s \ + priority \ maxPriority \ + ksCurDomain s \ maxDomain \ + valid_objs' s \ + weak_sch_act_wf (ksSchedulerAction s) s \ + pspace_aligned' s \ pspace_distinct' s"]) + apply (wp weak_sch_act_wf_lift_linear valid_tcb'_def) + apply (clarsimp simp: valid_tcb'_tcbPriority_update) + apply clarsimp + done + +lemma setMCPriority_ccorres: + "ccorres dc xfdc + (invs' and tcb_at' t and (\s. priority \ maxPriority)) + (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. mcp_' s = ucast priority}) + [] (setMCPriority t priority) (Call setMCPriority_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: tptr_' mcp_') + apply (rule ccorres_move_c_guard_tcb) + apply (rule threadSet_ccorres_lemma2[where P=\]) + apply vcg + apply clarsimp + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps')+)[1] + apply (rule ball_tcb_cte_casesI, simp+) + apply (simp add: ctcb_relation_def cast_simps) + apply (clarsimp simp: down_cast_same [symmetric] ucast_up_ucast is_up is_down) + done + +lemma ccorres_subgoal_tailE: + "\ ccorres rvr xf Q Q' hs (b ()) d; + ccorres rvr xf Q Q' hs (b ()) d \ ccorres rvr xf P P' hs (a >>=E b) (c ;; d) \ + \ ccorres rvr xf P P' hs (a >>=E b) (c ;; d)" + by simp + +lemma checkCapAt_ccorres: + "\ \rv' t t'. ceqv \ ret__unsigned_long_' rv' t t' c (c' rv'); + ccorres rvr xf P P' hs (f >>= g) (c' (scast true)); + ccorres rvr xf Q Q' hs (g ()) (c' (scast false)); + guard_is_UNIV dc xfdc (\_ _. P' \ Q') \ + \ ccorres rvr xf (invs' and valid_cap' cap and P and Q) + (UNIV \ {s. ccap_relation cap cap'} \ {s. slot' = cte_Ptr slot}) hs + (checkCapAt cap slot f >>= g) + (Guard C_Guard \hrs_htd \t_hrs \\<^sub>t slot'\ + (\ret__unsigned_long :== CALL sameObjectAs(cap', + h_val (hrs_mem \t_hrs) (cap_Ptr &(slot'\[''cap_C'']))));;c)" + apply (rule ccorres_gen_asm2)+ + apply (simp add: checkCapAt_def liftM_def bind_assoc del: Collect_const) + apply (rule ccorres_symb_exec_l' [OF _ getCTE_inv getCTE_sp empty_fail_getCTE]) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac xf'=ret__unsigned_long_' and val="from_bool (sameObjectAs cap (cteCap x))" + and R="cte_wp_at' ((=) x) slot and valid_cap' cap and invs'" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (rule exI, rule conjI, assumption) + apply (clarsimp simp: typ_heap_simps dest!: ccte_relation_ccap_relation) + apply (rule exI, rule conjI, assumption) + apply (auto intro: valid_capAligned dest: ctes_of_valid')[1] + apply assumption + apply (simp only: when_def if_to_top_of_bind) + apply (rule ccorres_if_lhs) + apply simp + apply simp + apply (simp add: guard_is_UNIV_def) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas checkCapAt_ccorres2 + = checkCapAt_ccorres[where g=return, simplified bind_return] + +lemma invs_psp_aligned_strg': + "invs' s \ pspace_aligned' s" + by clarsimp + +lemma cte_is_derived_capMasterCap_strg: + "cte_wp_at' (is_derived' (ctes_of s) ptr cap \ cteCap) ptr s + \ cte_wp_at' (\scte. capMasterCap (cteCap scte) = capMasterCap cap \ P) ptr s" + by (clarsimp simp: cte_wp_at_ctes_of is_derived'_def + badge_derived'_def) + +lemma cteInsert_cap_to'2: + "\ex_nonz_cap_to' p\ + cteInsert newCap srcSlot destSlot + \\_. ex_nonz_cap_to' p\" + apply (simp add: cteInsert_def ex_nonz_cap_to'_def setUntypedCapAsFull_def) + apply (rule hoare_vcg_ex_lift) + apply (wp updateMDB_weak_cte_wp_at + updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply auto + done + +lemma threadSet_ipcbuffer_invs: + "is_aligned a msg_align_bits \ + \invs' and tcb_at' t\ threadSet (tcbIPCBuffer_update (\_. a)) t \\rv. invs'\" + apply (wp threadSet_invs_trivial, simp_all add: inQ_def cong: conj_cong) + done + +lemma invokeTCB_ThreadControl_ccorres: + notes prod.case_cong_weak[cong] + shows + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple + and tcb_inv_wf' (ThreadControl target slot faultep mcp priority cRoot vRoot buf) + and (\_. (faultep = None) = (cRoot = None) \ (cRoot = None) = (vRoot = None) + \ (case buf of Some (ptr, Some (cap, slot)) \ slot \ 0 | _ \ True))) + (UNIV \ {s. target_' s = tcb_ptr_to_ctcb_ptr target} + \ {s. (cRoot \ None \ buf \ None) \ slot_' s = cte_Ptr slot} + \ {s. faultep_' s = option_to_0 faultep} + \ {s. mcp_' s = case_option 0 (ucast o fst) mcp} + \ {s. priority_' s = case_option 0 (ucast o fst) priority} + \ {s. case cRoot of None \ True | Some (cRootCap, cRootSlot) \ ccap_relation cRootCap (cRoot_newCap_' s)} + \ {s. cRoot_srcSlot_' s = cte_Ptr (option_to_0 (option_map snd cRoot))} + \ {s. case vRoot of None \ True | Some (vRootCap, vRootSlot) \ ccap_relation vRootCap (vRoot_newCap_' s)} + \ {s. vRoot_srcSlot_' s = cte_Ptr (option_to_0 (option_map snd vRoot))} + \ {s. bufferAddr_' s = option_to_0 (option_map fst buf)} + \ {s. bufferSrcSlot_' s = cte_Ptr (case buf of Some (ptr, Some (cap, slot)) \ slot | _ \ 0)} + \ {s. case buf of Some (ptr, Some (cap, slot)) \ ccap_relation cap (bufferCap_' s) | _ \ True} + \ {s. updateFlags_' s = (if mcp \ None then scast thread_control_update_mcp else 0) + || (if priority \ None then scast thread_control_update_priority else 0) + || (if buf \ None then scast thread_control_update_ipc_buffer else 0) + || (if cRoot \ None then scast thread_control_update_space else 0)}) + [] + (invokeTCB (ThreadControl target slot faultep mcp priority cRoot vRoot buf)) + (Call invokeTCB_ThreadControl_'proc)" + (is "ccorres ?rvr ?xf (?P and (\_. ?P')) ?Q [] ?af ?cf") + apply (rule ccorres_gen_asm) + apply (cinit lift: target_' slot_' faultep_' mcp_' priority_' cRoot_newCap_' cRoot_srcSlot_' + vRoot_newCap_' vRoot_srcSlot_' bufferAddr_' bufferSrcSlot_' bufferCap_' + updateFlags_') + apply csymbr + apply (simp add: liftE_bindE case_option_If2 thread_control_flag_defs + word_ao_dist if_and_helper if_n_0_0 fun_app_def + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] + del: Collect_const cong add: call_ignore_cong if_cong) + apply (rule_tac P="ptr_val (tcb_ptr_to_ctcb_ptr target) && ~~ mask 5 + = ptr_val (tcb_ptr_to_ctcb_ptr target) + \ ptr_val (tcb_ptr_to_ctcb_ptr target) && ~~ mask tcbBlockSizeBits = target + \ canonical_address target \ is_aligned target tcbBlockSizeBits" + in ccorres_gen_asm) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow_novcg) + apply (rule ccorres_cond_both'[where Q=\ and Q'=\]) + apply (simp add: Collect_const_mem) + apply (rule ccorres_move_c_guard_tcb) + apply (rule threadSet_ccorres_lemma2[where P=\]) + apply vcg + apply clarsimp + apply (subst StateSpace.state.fold_congs[OF refl refl]) + apply (rule globals.fold_congs[OF refl refl]) + apply (rule heap_update_field_hrs) + apply (simp add: typ_heap_simps) + apply (fastforce intro: typ_heap_simps) + apply simp + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps)+) + apply (rule ball_tcb_cte_casesI, simp+) + apply (clarsimp simp: ctcb_relation_def option_to_0_def) + apply (rule ccorres_return_Skip) + apply (rule ceqv_refl) + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow_novcg) + apply (rule ccorres_cond_both'[where Q=\ and Q'=\]) + apply (simp add: Collect_const_mem) + apply (ctac add: setMCPriority_ccorres) + apply (rule ccorres_return_Skip) + apply (rule ceqv_refl) + apply (rule ccorres_subgoal_tailE) + apply (rule ccorres_subgoal_tailE) + apply (rule_tac A="invs' and sch_act_simple and tcb_at' target + and (\(s::kernel_state). (case priority of None \ True | Some x \ ((\y. fst y \ maxPriority)) x)) + and case_option \ (case_option \ (valid_cap' \ fst) \ snd) buf + and case_option \ (case_option \ (cte_at' \ snd) \ snd) buf + and K (case_option True (swp is_aligned msg_align_bits \ fst) buf) + and K (case_option True (case_option True (isArchObjectCap \ fst) \ snd) buf)" + (* bits of tcb_inv_wf' *) + in ccorres_guard_imp2[where A'=UNIV]) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp only: if_True Collect_True split_def bindE_assoc) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ + apply csymbr + apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def + locateSlot_conv + del: Collect_const) + apply (simp add: liftE_bindE del: Collect_const) + apply (ctac(no_vcg) add: cteDelete_ccorres) + apply (simp del: Collect_const add: Collect_False) + apply (rule ccorres_move_c_guard_tcb) + apply (rule ccorres_split_nothrow_novcg) + apply (rule threadSet_ccorres_lemma2[where P=\]) + apply vcg + apply clarsimp + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps')+, simp_all?)[1] + apply (rule ball_tcb_cte_casesI, simp+) + apply (clarsimp simp: ctcb_relation_def option_to_0_def) + apply (rule ceqv_refl) + apply csymbr + apply (simp add: ccorres_cond_iffs Collect_False split_def + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + (* P *) + apply (rule ccorres_rhs_assoc)+ + apply (simp add: case_option_If2 if_n_0_0 split_def + del: Collect_const) + apply (rule checkCapAt_ccorres) + apply ceqv + apply csymbr + apply (simp add: Collect_True + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule checkCapAt_ccorres) + apply ceqv + apply csymbr + apply (simp add: Collect_True + del: Collect_const) + apply (simp add: assertDerived_def bind_assoc del: Collect_const) + apply (rule ccorres_symb_exec_l) + apply (ctac(no_vcg) add: cteInsert_ccorres) + apply (rule ccorres_pre_getCurThread) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (simp add: when_def) + apply (rename_tac curThread) + apply (rule_tac C'="{s. target = curThread}" + and Q="\s. ksCurThread s = curThread" + and Q'=UNIV in ccorres_rewrite_cond_sr) + apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) + apply (rule ccorres_Cond_rhs; clarsimp) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) + apply (rule ccorres_return_Skip') + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) + apply (ctac add: setPriority_ccorres) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_CE, simp+)[1] + apply (wp (once)) + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: when_def hoare_weak_lift_imp) + apply (strengthen sch_act_wf_weak, wp) + apply clarsimp + apply wp + apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) + apply (rule hoare_strengthen_post[ + where Q= "\rv s. + valid_objs' s \ + weak_sch_act_wf (ksSchedulerAction s) s \ + ((\a b. priority = Some (a, b)) \ + tcb_at' target s \ ksCurDomain s \ maxDomain \ + fst (the priority) \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s"]) + apply (strengthen sch_act_wf_weak) + apply (wp hoare_weak_lift_imp) + apply (clarsimp split: if_splits) + apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ + apply csymbr + apply (simp add: Collect_False ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_pre_getCurThread) + apply (rename_tac curThread) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (simp add: when_def) + apply (rule_tac C'="{s. target = curThread}" + and Q="\s. ksCurThread s = curThread" + and Q'=UNIV in ccorres_rewrite_cond_sr) + apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) + apply (rule ccorres_Cond_rhs; clarsimp) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) + apply (rule ccorres_return_Skip') + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) + apply (ctac add: setPriority_ccorres) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_CE, simp+) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply (simp add: when_def) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) + apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem + tcbBuffer_def size_of_def cte_level_bits_def + tcbIPCBufferSlot_def + mask_def objBits_defs) + apply csymbr + apply (simp add: Collect_False ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_cond_false_seq, simp) + apply (rule ccorres_pre_getCurThread) + apply (rename_tac curThread) + apply (simp add: when_def) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac C'="{s. target = curThread}" + and Q="\s. ksCurThread s = curThread" + and Q'=UNIV in ccorres_rewrite_cond_sr) + apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) + apply (rule ccorres_Cond_rhs; clarsimp) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) + apply (rule ccorres_return_Skip') + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) + apply (ctac add: setPriority_ccorres) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_CE, simp+) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) + apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) + apply (simp add: guard_is_UNIV_def Collect_const_mem + flip: canonical_bit_def) + apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def + canonical_address_tcb_ptr[THEN canonical_address_and_maskD]) + (* \ P *) + apply simp + apply (rule ccorres_cond_false_seq, simp) + apply (rule ccorres_cond_false_seq, simp) + apply (simp split: option.split_asm) + apply (rule ccorres_pre_getCurThread) + apply (rename_tac curThread) + apply (simp add: when_def) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac C'="{s. target = curThread}" + and Q="\s. ksCurThread s = curThread" + and Q'=UNIV in ccorres_rewrite_cond_sr) + apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) + apply (rule ccorres_Cond_rhs; clarsimp) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) + apply (rule ccorres_return_Skip') + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) + apply (ctac add: setPriority_ccorres) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_CE, simp+) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply wpsimp + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) + apply wp + apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) + apply (simp cong: conj_cong) + apply (rule hoare_strengthen_post[ + where Q="\a b. (valid_objs' b \ + sch_act_wf (ksSchedulerAction b) b \ + pspace_aligned' b \ pspace_distinct' b \ + ((\a b. priority = Some (a, b)) \ + tcb_at' target b \ + ksCurDomain b \ maxDomain \ + fst (the priority) \ maxPriority)) \ + ((case snd (the buf) + of None \ 0 + | Some x \ snd x) \ 0 \ + invs' b \ + valid_cap' (capability.ThreadCap target) b \ + valid_cap' (fst (the (snd (the buf)))) b \ + (cte_wp_at' (\a. is_derived' (map_to_ctes (ksPSpace b)) + (snd (the (snd (the buf)))) + (fst (the (snd (the buf)))) + (cteCap a)) (snd (the (snd (the buf)))) b \ + cte_wp_at' (\scte. capMasterCap (cteCap scte) + = capMasterCap (fst (the (snd (the buf)))) + \ is_simple_cap' (fst (the (snd (the buf))))) + (snd (the (snd (the buf)))) b \ + valid_mdb' b \ + pspace_aligned' b \ + cte_wp_at' (\c. True) (snd (the (snd (the buf)))) b))"]) + prefer 2 + apply fastforce + apply (strengthen cte_is_derived_capMasterCap_strg + invs_weak_sch_act_wf invs_sch_act_wf' + invs_valid_objs' invs_mdb' invs_pspace_aligned', + simp add: o_def) + apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" + in hoare_gen_asm) + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp + | simp + | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf + | wp hoare_drop_imps)+ + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem + option_to_0_def + split: option.split_asm) + apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply (simp add: conj_comms cong: conj_cong) + apply (strengthen invs_ksCurDomain_maxDomain' invs_pspace_distinct') + apply (wp hoare_vcg_const_imp_lift_R cteDelete_invs') + apply simp + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) + apply (ctac add: setPriority_ccorres) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_CE, simp+) + apply wp + apply (clarsimp simp: guard_is_UNIV_def) + apply (clarsimp simp: inQ_def Collect_const_mem cintr_def + exception_defs tcb_cnode_index_defs) + apply (simp add: tcbBuffer_def tcbIPCBufferSlot_def word_sle_def + cte_level_bits_def size_of_def case_option_If2 ) + apply (rule conjI) + apply (clarsimp simp: objBits_simps' word_bits_conv case_option_If2 if_n_0_0 valid_cap'_def + capAligned_def obj_at'_def projectKOs) + apply (fastforce simp: invs_valid_objs' invs_ksCurDomain_maxDomain') + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ + apply (simp add: split_def getThreadVSpaceRoot_def locateSlot_conv + bindE_assoc liftE_bindE + del: Collect_const) + apply csymbr + apply (ctac(no_vcg) add: cteDelete_ccorres) + apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs + del: Collect_const) + apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ + apply (simp add: conj_comms pred_conj_def) + apply (simp cong: conj_cong option.case_cong) + apply (wp checked_insert_tcb_invs' hoare_case_option_wp + checkCap_inv [where P="tcb_at' p0" for p0] + checkCap_inv [where P="cte_at' p0" for p0] + checkCap_inv [where P="valid_cap' c" for c] + checkCap_inv [where P="sch_act_simple"] + | simp)+ + apply (simp add: guard_is_UNIV_def) + apply (thin_tac "ccorres a1 a2 a3 a4 a5 a6 a7" for a1 a2 a3 a4 a5 a6 a7) + apply (rule ccorres_rhs_assoc)+ + apply (rule checkCapAt_ccorres2) + apply ceqv + apply csymbr + apply (simp add: Collect_True + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule checkCapAt_ccorres2) + apply ceqv + apply csymbr + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_symb_exec_l) + apply (ctac add: cteInsert_ccorres) + apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ + apply csymbr + apply (simp add: Collect_False ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem + tcbVTable_def tcbVTableSlot_def Kernel_C.tcbVTable_def + cte_level_bits_def size_of_def option_to_0_def objBits_defs mask_def) + apply csymbr + apply (simp add: Collect_False + del: Collect_const) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift + cap_to_H_def Collect_const_mem + canonical_address_tcb_ptr[THEN canonical_address_and_maskD]) + apply simp + apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) + apply vcg + apply (simp add: conj_comms, simp cong: conj_cong add: invs_mdb' invs_pspace_aligned') + apply (simp add: cte_is_derived_capMasterCap_strg o_def) + apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes + cteDelete_sch_act_simple + | strengthen invs_valid_objs')+ + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) + apply (wp cteDelete_invs') + apply (clarsimp simp:cte_wp_at_ctes_of) + apply simp + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ + apply (simp add: split_def getThreadCSpaceRoot_def locateSlot_conv + bindE_assoc liftE_bindE + del: Collect_const) + apply csymbr + apply (ctac(no_vcg) add: cteDelete_ccorres) + apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs + del: Collect_const) + apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) + | rule ccorres_rhs_assoc2)+ + apply (simp add: conj_comms pred_conj_def) + apply (simp cong: conj_cong option.case_cong) + apply (wp checked_insert_tcb_invs' hoare_case_option_wp + checkCap_inv [where P="tcb_at' p0" for p0] + checkCap_inv [where P="cte_at' p0" for p0] + checkCap_inv [where P="valid_cap' c" for c] + checkCap_inv [where P="sch_act_simple"] + | simp)+ + apply (clarsimp simp: guard_is_UNIV_def word_sle_def Collect_const_mem + option_to_0_def Kernel_C.tcbVTable_def tcbVTableSlot_def + cte_level_bits_def size_of_def cintr_def + tcb_cnode_index_defs objBits_defs mask_def) + apply (thin_tac "ccorres a1 a2 a3 a4 a5 a6 a7" for a1 a2 a3 a4 a5 a6 a7) + apply (rule ccorres_rhs_assoc)+ + apply (rule checkCapAt_ccorres2) + apply ceqv + apply csymbr + apply (simp add: Collect_True + del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (rule checkCapAt_ccorres2) + apply ceqv + apply csymbr + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_symb_exec_l) + apply (ctac add: cteInsert_ccorres) + apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ + apply csymbr + apply (simp add: Collect_False ccorres_cond_iffs + del: Collect_const) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem + Kernel_C.tcbCTable_def tcbCTableSlot_def + cte_level_bits_def size_of_def option_to_0_def mask_def objBits_defs) + apply csymbr + apply (simp add: Collect_False + del: Collect_const) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift + cap_to_H_def Collect_const_mem + canonical_address_tcb_ptr[THEN canonical_address_and_maskD]) + apply simp + apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) + apply vcg + apply (simp add: conj_comms, simp cong: conj_cong add: invs_mdb' invs_pspace_aligned') + apply (simp add: cte_is_derived_capMasterCap_strg o_def) + apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple + | strengthen invs_valid_objs')+ + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) + apply (wp cteDelete_invs') + apply (clarsimp simp:cte_wp_at_ctes_of) + apply simp + apply (simp add: conj_comms) + apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' + typ_at_lifts[OF setMCPriority_typ_at'] + threadSet_cap_to' hoare_weak_lift_imp | simp)+ + apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def + cte_level_bits_def size_of_def word_sle_def option_to_0_def + cintr_def objBits_defs mask_def) + apply (simp add: conj_comms) + apply (wp hoare_case_option_wp threadSet_invs_trivial + threadSet_cap_to' hoare_weak_lift_imp | simp)+ + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) + apply (clarsimp simp: inQ_def) + apply (subst is_aligned_neg_mask_eq) + apply (simp add: tcb_ptr_to_ctcb_ptr_def) + apply (rule aligned_add_aligned) + apply (fastforce simp add: obj_at'_def objBits_simps') + apply (simp add: ctcb_offset_defs is_aligned_def) + apply (simp add: word_bits_conv) + apply simp + apply (subgoal_tac "s \' capability.ThreadCap target") + apply (clarsimp simp: cte_level_bits_def Kernel_C.tcbCTable_def Kernel_C.tcbVTable_def + tcbCTableSlot_def tcbVTableSlot_def size_of_def + tcb_cte_cases_def isCap_simps tcb_aligned' obj_at'_is_canonical + cteSizeBits_def + split: option.split_asm + dest!: isValidVTableRootD invs_pspace_canonical') + apply (clarsimp simp: valid_cap'_def capAligned_def word_bits_conv + obj_at'_def objBits_simps') + done + +lemma setupReplyMaster_ccorres: + "ccorres dc xfdc (tcb_at' t) + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr t}) [] + (setupReplyMaster t) (Call setupReplyMaster_'proc)" + apply (cinit lift: thread_') + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ + apply ctac + apply (simp del: Collect_const) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" + and R="cte_wp_at' ((=) oldCTE) rv" + and xf'=ret__unsigned_longlong_' + in ccorres_symb_exec_r_abstract_UNIV[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1[OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap + dest!: ccte_relation_ccap_relation) + apply ceqv + apply (simp only:) + apply (rule ccorres_when[where R=\]) + apply (simp add: Collect_const_mem) + apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_stateAssert]) + apply (rule_tac P="cte_at' rv and tcb_at' t" in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) + apply (rule conjI, fastforce intro: typ_heap_simps) + apply (clarsimp simp: typ_heap_simps) + apply (rule fst_setCTE[OF ctes_of_cte_at], assumption) + apply (rule rev_bexI, assumption) + apply (clarsimp simp: rf_sr_def cstate_relation_def + cpspace_relation_def Let_def + typ_heap_simps') + apply (subst setCTE_tcb_case, assumption+) + apply (rule_tac r="s'" in KernelStateData_H.kernel_state.cases) + apply clarsimp + apply (rule conjI) + apply (erule(2) cmap_relation_updI) + apply (clarsimp simp: ccte_relation_def cap_reply_cap_lift cte_lift_def + cong: option.case_cong_weak) + apply (simp add: cte_to_H_def cap_to_H_def mdb_node_to_H_def + nullMDBNode_def c_valid_cte_def) + apply (simp add: cap_reply_cap_lift) + apply simp + apply (simp add: cmachine_state_relation_def packed_heap_update_collapse_hrs + carch_state_relation_def carch_globals_def + cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"]) + apply (wp | simp)+ + apply (clarsimp simp: guard_is_UNIV_def) + apply (wp | simp add: locateSlot_conv)+ + apply vcg + apply (clarsimp simp: word_sle_def cte_wp_at_ctes_of + tcb_cnode_index_defs tcbReplySlot_def) + done + +lemma restart_ccorres: + "ccorres dc xfdc (invs' and tcb_at' thread and sch_act_simple) + (UNIV \ {s. target_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (restart thread) (Call restart_'proc)" + apply (cinit lift: target_') + apply (ctac(no_vcg) add: isStopped_ccorres) + apply (simp only: when_def) + apply (rule ccorres_cond2[where R=\]) + apply (simp add: to_bool_def Collect_const_mem) + apply (rule ccorres_rhs_assoc)+ + apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) + apply (ctac(no_vcg) add: setupReplyMaster_ccorres) + apply (ctac(no_vcg)) + apply (ctac(no_vcg) add: tcbSchedEnqueue_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) + apply (wp weak_sch_act_wf_lift)[1] + apply (wp sts_valid_objs' setThreadState_st_tcb)[1] + apply (simp add: valid_tcb_state'_def) + apply wp + apply (wp (once) sch_act_wf_lift, (wp tcb_in_cur_domain'_lift)+) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule delete_one_conc_fr.cancelIPC_invs) + + apply (rule cancelIPC_tcb_at'[where t=thread]) + apply fastforce + apply (rule ccorres_return_Skip) + apply (wp hoare_drop_imps) + apply (auto simp: Collect_const_mem mask_def ThreadState_defs) + done + +lemma setNextPC_ccorres: + "ccorres dc xfdc \ + (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ + \ {s. v_' s = val}) [] + (asUser thread (setNextPC val)) + (Call setNextPC_'proc)" + apply (cinit') + apply (simp add: setNextPC_def) + apply (ctac add: setRegister_ccorres) + apply simp + done + +lemma Arch_performTransfer_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + \ UNIV [] + (liftE (performTransfer a b c)) + (Call Arch_performTransfer_'proc)" + apply (cinit' simp: performTransfer_def) + apply (fold returnOk_liftE) + apply (rule ccorres_return_CE) + apply simp+ + done + +(*FIXME: arch_split: C kernel names hidden by Haskell names *) +abbreviation "frameRegistersC \ kernel_all_substitute.frameRegisters" +lemmas frameRegistersC_def = kernel_all_substitute.frameRegisters_def +abbreviation "gpRegistersC \ kernel_all_substitute.gpRegisters" +lemmas gpRegistersC_def = kernel_all_substitute.gpRegisters_def + +lemma frame_gp_registers_convs: + "length AARCH64_H.frameRegisters = unat n_frameRegisters" + "length AARCH64_H.gpRegisters = unat n_gpRegisters" + "n < length AARCH64_H.frameRegisters \ + index frameRegistersC n = register_from_H (AARCH64_H.frameRegisters ! n)" + "n < length AARCH64_H.gpRegisters \ + index gpRegistersC n = register_from_H (AARCH64_H.gpRegisters ! n)" + apply (simp_all add: AARCH64_H.gpRegisters_def AARCH64_H.frameRegisters_def + AARCH64.gpRegisters_def n_gpRegisters_def + AARCH64.frameRegisters_def n_frameRegisters_def + frameRegistersC_def gpRegistersC_def msgRegisters_unfold + fupdate_def Arrays.update_def toEnum_def + upto_enum_def fromEnum_def enum_register) + apply (auto simp: less_Suc_eq fcp_beta C_register_defs) + done + + +lemma postModifyRegisters_ccorres: + "ccorres dc xfdc + (\s. ct = ksCurThread s) + \\tptr = tcb_ptr_to_ctcb_ptr dest\ hs + (asUser dest (postModifyRegisters ct dest)) + (Call Arch_postModifyRegisters_'proc)" + apply (cinit' lift: tptr_' simp: postModifyRegisters_def when_def) + apply (simp add: if_distrib[where f="asUser t" for t] asUser_return) + apply (rule ccorres_add_return2) + apply (rule ccorres_stateAssert) + apply (rule ccorres_return_Skip') + by simp+ + +lemma invokeTCB_CopyRegisters_ccorres: + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple and tcb_at' destn and tcb_at' source + and ex_nonz_cap_to' destn and ex_nonz_cap_to' source) + (UNIV \ {s. dest___ptr_to_struct_tcb_C_' s = tcb_ptr_to_ctcb_ptr destn} + \ {s. tcb_src_' s = tcb_ptr_to_ctcb_ptr source} + \ {s. to_bool (resumeTarget_' s) = resume} + \ {s. to_bool (suspendSource_' s) = susp} + \ {s. to_bool (transferFrame_' s) = frames} + \ {s. to_bool (transferInteger_' s) = ints}) [] + (invokeTCB (CopyRegisters destn source susp resume frames ints arch)) + (Call invokeTCB_CopyRegisters_'proc)" + apply (cinit lift: dest___ptr_to_struct_tcb_C_' tcb_src_' resumeTarget_' + suspendSource_' transferFrame_' transferInteger_' + simp: whileAnno_def) + apply (simp add: liftE_def bind_assoc whileAnno_def + del: Collect_const) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_when[where R=\]) + apply (simp add: to_bool_def del: Collect_const) + apply (ctac add: suspend_ccorres[OF cteDeleteOne_ccorres]) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_when[where R=\]) + apply (simp add: to_bool_def del: Collect_const) + apply (ctac add: restart_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_when[where R=\]) + apply (simp add: to_bool_def Collect_const_mem) + apply (rule ccorres_rhs_assoc)+ + apply (csymbr, csymbr, csymbr) + apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_rel_imp) + apply (rule ccorres_mapM_x_while[where F="\x. tcb_at' destn and tcb_at' source"]) + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (ctac(no_vcg) add: getRegister_ccorres) + apply (ctac add: setRegister_ccorres) + apply wp + apply (clarsimp simp: frame_gp_registers_convs + n_frameRegisters_def unat_of_nat64 + word_bits_def word_of_nat_less) + apply (simp add: frame_gp_registers_convs n_frameRegisters_def) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (wp | simp)+ + apply (simp add: frame_gp_registers_convs n_frameRegisters_def + word_bits_def) + apply simp + apply (ctac(no_vcg) add: getRestartPC_ccorres) + apply (ctac add: setNextPC_ccorres) + apply wp+ + apply (clarsimp simp: guard_is_UNIV_def) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule ccorres_when[where R=\]) + apply (simp add: to_bool_def Collect_const_mem) + apply (rule ccorres_rhs_assoc)+ + apply (csymbr, csymbr) + apply (rule ccorres_rel_imp) + apply (rule ccorres_mapM_x_while[where F="\x. tcb_at' destn and tcb_at' source"]) + apply clarsimp + apply (rule ccorres_guard_imp2) + apply ((wp | ctac(no_vcg) add: getRegister_ccorres setRegister_ccorres)+)[1] + apply (clarsimp simp: frame_gp_registers_convs n_gpRegisters_def + unat_of_nat64 word_bits_def word_of_nat_less) + apply (simp add: frame_gp_registers_convs n_gpRegisters_def) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (wp | simp)+ + apply (simp add: word_bits_def frame_gp_registers_convs n_gpRegisters_def) + apply simp + apply (rule ccorres_pre_getCurThread) + apply (ctac add: postModifyRegisters_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac R="\s. rvc = ksCurThread s" + in ccorres_when) + apply (clarsimp simp: rf_sr_ksCurThread) + apply clarsimp + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) + apply (simp only: liftE_bindE[symmetric] return_returnOk) + apply (ctac(no_vcg) add: Arch_performTransfer_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply simp + apply wp+ + apply (clarsimp simp: guard_is_UNIV_def) + apply simp + apply (wpsimp simp: postModifyRegisters_def pred_conj_def + cong: if_cong + wp: hoare_drop_imp) + apply vcg + apply (simp add: pred_conj_def guard_is_UNIV_def cong: if_cong + | wp mapM_x_wp_inv hoare_drop_imp)+ + apply clarsimp + apply (rule_tac Q="\rv. invs' and tcb_at' destn" in hoare_strengthen_post[rotated]) + apply (fastforce simp: sch_act_wf_weak) + apply (wpsimp wp: hoare_drop_imp restart_invs')+ + apply (clarsimp simp add: guard_is_UNIV_def) + apply (wp hoare_drop_imp hoare_vcg_if_lift)+ + apply simp + apply (rule_tac Q="\rv. invs' and tcb_at' destn" in hoare_strengthen_post[rotated]) + apply (fastforce simp: sch_act_wf_weak) + apply (wpsimp wp: hoare_drop_imp)+ + apply (clarsimp simp add: guard_is_UNIV_def) + apply (clarsimp simp: invs_weak_sch_act_wf invs_valid_objs' + split: if_split cong: if_cong | rule conjI)+ + apply (clarsimp dest!: global'_no_ex_cap simp: invs'_def valid_state'_def | rule conjI)+ + done + +lemma invokeTCB_WriteRegisters_ccorres_helper: + "\ unat (f (of_nat n)) = incn + \ g (of_nat n) = register_from_H reg \ n'=incn + \ of_nat n < bnd \ of_nat n < bnd2 \ \ + ccorres dc xfdc (sysargs_rel args buffer and sysargs_rel_n args buffer n' and + tcb_at' dst and P) + (\\i = of_nat n\) hs + (asUser dst (setRegister reg + (sanitiseRegister t reg (args ! incn)))) + (\ret__unsigned_long :== CALL getSyscallArg(f (\i),option_to_ptr buffer);; + Guard ArrayBounds \\i < bnd\ + (\unsigned_long_eret_2 :== CALL sanitiseRegister(g (\i),\ret__unsigned_long,from_bool t));; + Guard ArrayBounds \\i < bnd2\ + (CALL setRegister(tcb_ptr_to_ctcb_ptr dst,g (\i),\unsigned_long_eret_2)))" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n="incn" and buffer=buffer]) + apply (rule ccorres_symb_exec_r) + apply (ctac add: setRegister_ccorres) + apply (vcg) + apply clarsimp + apply (rule conseqPre, vcg, clarsimp) + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply fastforce + done + +lemma doMachineOp_context: + "(rv,s') \ fst (doMachineOp f s) \ + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) + apply fastforce + done + + +lemma getObject_context: + " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ + \ (if t = t' then tcbContext_update (\_. st) x else x, + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + apply (simp split: if_split) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: getObject_def split_def loadObject_default_def in_monad + Corres_C.in_magnitude_check' projectKOs objBits_simps') + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs objBits_simps) + apply (simp add: magnitudeCheck_def in_monad split: option.splits) + apply clarsimp + apply (simp add: lookupAround2_char2) + apply (clarsimp split: if_split_asm) + apply (erule_tac x=x2 in allE) + apply (clarsimp simp: ps_clear_def) + apply (drule_tac x=x2 in orthD2) + apply fastforce + apply clarsimp + apply (erule impE) + apply simp + apply (simp flip: add_mask_fold) + apply (erule notE, rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply clarsimp + apply (clarsimp simp: getObject_def split_def loadObject_default_def in_monad + Corres_C.in_magnitude_check' projectKOs objBits_simps') + apply (simp add: magnitudeCheck_def in_monad split: option.splits) + apply clarsimp + apply (simp add: lookupAround2_char2) + apply (clarsimp split: if_split_asm) + apply (erule_tac x=t in allE) + apply simp + apply (clarsimp simp: obj_at'_real_def projectKOs + ko_wp_at'_def objBits_simps) + apply (simp add: ps_clear_def) + apply (drule_tac x=t in orthD2) + apply fastforce + apply clarsimp + apply (erule impE) + apply simp + apply (simp flip: add_mask_fold) + apply (erule notE, rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply (erule_tac x=x2 in allE) + apply (clarsimp simp: ps_clear_def) + apply (drule_tac x=x2 in orthD2) + apply fastforce + apply clarsimp + apply (erule impE) + apply (simp flip: add_mask_fold) + apply (rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply fastforce + done + +lemma threadGet_context: + "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; + t \ ksCurThread s \ \ + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + apply (clarsimp simp: threadGet_def liftM_def in_monad) + apply (drule (1) getObject_context [where st=st]) + apply (rule exI) + apply (erule conjI) + apply (simp split: if_splits) +done + + +lemma asUser_context: + "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; + t \ ksCurThread s\ \ + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + apply (clarsimp simp: asUser_def in_monad select_f_def) + apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) + apply (frule use_valid, assumption, rule refl) + apply clarsimp + apply (frule (2) threadGet_context) + apply (intro exI) + apply (erule conjI) + apply (rule exI, erule conjI) + apply (clarsimp simp: threadSet_def in_monad) + apply (frule use_valid, rule getObject_inv [where P="(=) s"]) + apply (simp add: loadObject_default_def) + apply wp + apply simp + apply (rule refl) + apply clarsimp + apply (frule (1) getObject_context) + apply (intro exI) + apply (erule conjI) + apply (clarsimp simp: setObject_def split_def updateObject_default_def threadGet_def + Corres_C.in_magnitude_check' getObject_def loadObject_default_def liftM_def + objBits_simps' projectKOs in_monad) + + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (rule conjI) + apply clarsimp + apply (cases s, simp) + apply (rule ext, clarsimp split: if_split) + apply (case_tac tcb, simp) + + apply clarsimp + apply (rule conjI) + apply (clarsimp simp add: lookupAround2_char2 split: if_split_asm) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs objBits_simps) + apply (erule_tac x=t in allE) + apply simp + apply (simp add: ps_clear_def) + apply (drule_tac x=t in orthD2) + apply fastforce + apply clarsimp + apply (erule impE, simp) + apply (simp flip: add_mask_fold) + apply (erule notE, rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply (erule_tac x=x2 in allE) + apply simp + apply (simp add: ps_clear_def) + apply (drule_tac x=x2 in orthD2) + apply fastforce + apply clarsimp + apply (erule impE) + apply (simp flip: add_mask_fold) + apply (rule word_diff_ls'(3)) + apply unat_arith + apply (drule is_aligned_no_overflow) + apply simp + apply (erule impE, simp) + apply simp + apply (rule exI) + apply (rule conjI, fastforce) + apply clarsimp + apply (cases s, clarsimp) + apply (rule ext, clarsimp split: if_split) + apply (case_tac tcb, clarsimp) +done + + +lemma getMRs_rel_context: + "\getMRs_rel args buffer s; + (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; + ko_at' ko t s ; t \ ksCurThread s\ \ + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + apply (clarsimp simp: getMRs_rel_def) + apply (rule exI, erule conjI) + apply (subst (asm) det_wp_use, rule det_wp_getMRs) + apply (simp add: cur_tcb'_def) + apply (subst det_wp_use, rule det_wp_getMRs) + apply (simp add: cur_tcb'_def) + apply (rule conjI) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs + objBits_simps ps_clear_def split: if_split) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def split: option.splits) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def projectKOs obj_at'_real_def + objBits_simps ps_clear_def split: if_split) + apply (clarsimp simp: getMRs_def in_monad) + apply (frule use_valid, rule asUser_inv [where P="(=) s"]) + apply (wp mapM_wp' getRegister_inv)[1] + apply simp + apply clarsimp + apply (drule (1) asUser_context) + apply (wp mapM_wp' getRegister_inv)[1] + apply assumption + apply (intro exI) + apply (erule conjI) + apply (cases buffer) + apply (clarsimp simp: return_def) + apply clarsimp + apply (drule mapM_upd_inv [rotated -1]) + prefer 3 + apply fastforce + prefer 2 + apply wp + apply (clarsimp simp: loadWordUser_def in_monad stateAssert_def word_size + simp del: fun_upd_apply) + apply (rule conjI) + apply (clarsimp simp: pointerInUserData_def typ_at'_def ko_wp_at'_def + projectKOs ps_clear_def obj_at'_real_def + split: if_split) + apply (erule doMachineOp_context) +done + +lemma asUser_getMRs_rel: + "\(\s. t \ ksCurThread s) and getMRs_rel args buffer and cur_tcb' + and case_option \ valid_ipc_buffer_ptr' buffer \ + asUser t f \\_. getMRs_rel args buffer\" + apply (simp add: asUser_def) + apply (rule hoare_pre, wp) + apply (simp add: threadSet_def) + apply (simp add: setObject_def split_def updateObject_default_def) + apply wp + apply (simp del: fun_upd_apply) + apply (wp getObject_tcb_wp) + apply (wp threadGet_wp)+ + apply (clarsimp simp del: fun_upd_apply) + apply (drule obj_at_ko_at')+ + apply (clarsimp simp del: fun_upd_apply) + apply (rule exI, rule conjI, assumption) + apply (clarsimp split: if_split simp del: fun_upd_apply) + apply (erule getMRs_rel_context, simp) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs) + apply simp + done + + +lemma asUser_sysargs_rel: + "\\s. t \ ksCurThread s \ sysargs_rel args buffer s\ + asUser t f \\_. sysargs_rel args buffer\" + apply (cases buffer, simp_all add: sysargs_rel_def) + apply (rule hoare_pre) + apply (wp asUser_getMRs_rel hoare_valid_ipc_buffer_ptr_typ_at'|simp)+ +done + +lemma threadSet_same: + "\\s. \tcb'. ko_at' tcb' t s \ tcb = f tcb'\ threadSet f t \\rv. ko_at' tcb t\" + unfolding threadSet_def + by (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp) fastforce + +lemma asUser_setRegister_ko_at': + "\obj_at' (\tcb'. tcb = tcbArch_update (\_. atcbContextSet (modify_registers (\regs. regs(r := v)) (atcbContextGet (tcbArch tcb'))) (tcbArch tcb')) tcb') dst\ + asUser dst (setRegister r v) \\rv. ko_at' (tcb::tcb) dst\" + unfolding asUser_def + apply (wpsimp wp: threadSet_same threadGet_wp) + apply (clarsimp simp: setRegister_def simpler_modify_def obj_at'_def modify_registers_def) + done + +lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: + notes hoare_weak_lift_imp [wp] word_less_1[simp del] + shows + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple + and sysargs_rel args buffer + and (\s. dst \ ksCurThread s) + and (\_. values = take someNum (drop 2 args) + \ someNum + 2 \ length args)) + ({s. unat (n_' s) = length values} \ S + \ {s. unat (n_' s) = length values} + \ {s. dest___ptr_to_struct_tcb_C_' s = tcb_ptr_to_ctcb_ptr dst} + \ {s. resumeTarget_' s = from_bool resume} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (invokeTCB (WriteRegisters dst resume values arch)) + (Call invokeTCB_WriteRegisters_'proc)" + supply empty_fail_cond[simp] + apply (rule ccorres_gen_asm) + apply (erule conjE) + apply (cinit lift: n_' dest___ptr_to_struct_tcb_C_' resumeTarget_' buffer_' + simp: whileAnno_def) + (* using S not univ seems to stop cinit doing this? *) + apply (csymbr, csymbr, csymbr, csymbr) + apply (simp add: liftE_def bind_assoc + del: Collect_const) + apply (rule ccorres_pre_getCurThread) + apply (rule_tac P="\a. ccorres_underlying rf_sr \ r' xf arrel axf P P' hs a c" for r' xf arrel axf P P' hs c in subst) + apply (rule liftE_bindE) + + apply (ctac add: Arch_performTransfer_ccorres) + apply (simp add: Collect_False whileAnno_def del: Collect_const) + apply (rule ccorres_add_return) + apply (rule_tac xf'=n_' and r'="\rv rv'. rv' = min n (scast n_frameRegisters + scast n_gpRegisters)" + in ccorres_split_nothrow) + apply (rule_tac P'="{s. n_' s = n}" in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def min_def) + apply (simp add: linorder_not_less[symmetric] n_gpRegisters_def n_frameRegisters_def) + apply ceqv + apply (ctac add: Arch_getSanitiseRegisterInfo_ccorres) + apply (simp add: zipWithM_mapM split_def zip_append1 mapM_discarded mapM_x_append + del: Collect_const) + apply (simp add: asUser_bind_distrib getRestartPC_def setNextPC_def bind_assoc + del: Collect_const) + apply (simp only: getRestartPC_def[symmetric] setNextPC_def[symmetric]) + apply (simp add: asUser_mapM_x bind_assoc) + apply (rule ccorres_stateAssert) + apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) + apply (drule_tac t="archInfo" in sym, simp only:) + apply (rule_tac F="\n. sysargs_rel args buffer and sysargs_rel_n args buffer (n + 2) and + tcb_at' dst and (\s. dst \ ksCurThread s)" + and Q=UNIV in ccorres_mapM_x_whileQ) + apply clarsimp + apply (rule invokeTCB_WriteRegisters_ccorres_helper [where args=args]) + apply (simp add: unat_word_ariths frame_gp_registers_convs n_frameRegisters_def + unat_of_nat64 word_bits_def word_of_nat_less) + apply (simp add: n_frameRegisters_def n_gpRegisters_def + frame_gp_registers_convs word_less_nat_alt) + apply (simp add: unat_of_nat64 word_bits_def) + apply arith + apply clarsimp + apply (vcg exspec=setRegister_modifies exspec=getSyscallArg_modifies + exspec=sanitiseRegister_modifies) + apply clarsimp + apply (simp add: sysargs_rel_n_def) + apply (rule hoare_pre, wp asUser_sysargs_rel asUser_setRegister_ko_at') + apply (clarsimp simp: n_msgRegisters_def sysargs_rel_def) + apply (simp add: frame_gp_registers_convs n_frameRegisters_def word_bits_def) + apply simp + apply (rule ceqv_refl) + apply (rule ccorres_stateAssert) + apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) + apply (drule_tac t="archInfo" in sym, simp only:) + apply (rule_tac F="\n. sysargs_rel args buffer + and sysargs_rel_n args buffer (n + length AARCH64_H.frameRegisters + 2) + and tcb_at' dst and (\s. dst \ ksCurThread s)" + and Q=UNIV in ccorres_mapM_x_whileQ) + apply clarsimp + apply (rule invokeTCB_WriteRegisters_ccorres_helper [where args=args]) + apply (simp add: n_gpRegisters_def unat_word_ariths + frame_gp_registers_convs unat_of_nat64 + word_bits_def n_frameRegisters_def + word_of_nat_less word_less_1) + apply (simp add: n_frameRegisters_def n_gpRegisters_def + frame_gp_registers_convs unat_of_nat64 + word_less_nat_alt word_bits_def + less_diff_conv) + apply (simp add: unat_word_ariths cong: conj_cong) + apply clarsimp + apply (vcg exspec=setRegister_modifies exspec=getSyscallArg_modifies + exspec=sanitiseRegister_modifies) + apply clarsimp + apply (simp add: sysargs_rel_n_def) + apply (rule hoare_pre, wp asUser_sysargs_rel) + apply (clarsimp simp: n_msgRegisters_def frame_gp_registers_convs + n_frameRegisters_def) + apply arith + apply (simp add: AARCH64_H.gpRegisters_def size_gpRegisters word_bits_def + AARCH64_H.frameRegisters_def size_frameRegisters) + apply simp + apply (rule ceqv_refl) + apply (ctac(no_vcg) add: getRestartPC_ccorres) + apply simp + apply (ctac(no_vcg) add: setNextPC_ccorres) + apply (ctac (no_vcg) add: postModifyRegisters_ccorres) + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_when[where R=\]) + apply (simp add: from_bool_0 Collect_const_mem) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp + apply simp + apply simp + apply (rule ceqv_refl) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac R="\s. self = ksCurThread s" + in ccorres_when) + apply (clarsimp simp: rf_sr_ksCurThread) + apply clarsimp + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) + apply (unfold return_returnOk)[1] + apply (rule ccorres_return_CE, simp+)[1] + apply wp + apply (simp add: guard_is_UNIV_def) + apply (wp hoare_drop_imp) + apply (rule_tac Q="\rv. invs' and tcb_at' dst" in hoare_strengthen_post[rotated]) + apply (fastforce simp: sch_act_wf_weak) + apply (wpsimp wp: restart_invs')+ + apply (clarsimp simp add: guard_is_UNIV_def) + apply (wp hoare_drop_imp hoare_vcg_if_lift)+ + apply simp + apply (rule mapM_x_wp') + apply (wpsimp) + apply (simp add: guard_is_UNIV_def) + apply (rule hoare_drop_imps) + apply (simp add: sysargs_rel_n_def) + apply (wp mapM_x_wp') + apply (rule hoare_pre, wp asUser_sysargs_rel) + apply clarsimp + apply wpsimp + apply (simp add: guard_is_UNIV_def) + apply (wp) + apply vcg + apply (wp threadGet_wp) + apply vcg + apply (rule ccorres_inst[where P=\ and P'=UNIV]) + apply simp + apply (simp add: performTransfer_def) + apply wp + apply clarsimp + apply vcg + apply (clarsimp simp: n_msgRegisters_def sysargs_rel_n_def invs_valid_objs' invs_no_0_obj' split: if_split) + apply (rule conjI) + apply (cases args, simp) + apply (case_tac list, simp) + apply (case_tac lista, clarsimp simp: unat_eq_0) + apply fastforce + apply (clarsimp simp: frame_gp_registers_convs word_less_nat_alt + sysargs_rel_def n_frameRegisters_def n_msgRegisters_def + split: if_split_asm) + apply (simp add: invs_weak_sch_act_wf invs_valid_objs') + apply (fastforce dest!: global'_no_ex_cap simp: invs'_def valid_state'_def) + done + +lemma invokeTCB_Suspend_ccorres: + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple and tcb_at' t and ex_nonz_cap_to' t) + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr t}) [] + (invokeTCB (Suspend t)) (Call invokeTCB_Suspend_'proc)" + apply (cinit lift: thread_') + apply (simp add: liftE_def return_returnOk) + apply (ctac(no_vcg) add: suspend_ccorres[OF cteDeleteOne_ccorres]) + apply (rule ccorres_return_CE, simp+)[1] + apply wp + apply clarsimp + apply (auto simp: invs'_def valid_state'_def global'_no_ex_cap) + done + +lemma invokeTCB_Resume_ccorres: + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and tcb_at' t and ex_nonz_cap_to' t and sch_act_simple) + (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr t}) [] + (invokeTCB (Resume t)) (Call invokeTCB_Resume_'proc)" + apply (cinit lift: thread_') + apply (simp add: liftE_def return_returnOk) + apply (ctac(no_vcg) add: restart_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply wp + apply clarsimp + done + +lemma Arch_decodeTransfer_spec: + "\s. \ \ {s} Call Arch_decodeTransfer_'proc {s'. ret__unsigned_long_' s' = 0}" + by (vcg, simp) + +lemmas ccorres_split_nothrow_dc + = ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl] + +lemmas getRegister_ccorres_defer + = ccorres_defer[OF getRegister_ccorres, OF no_fail_asUser [OF no_fail_getRegister]] + +lemma msg_registers_convs: + "length AARCH64_H.msgRegisters = unat n_msgRegisters" + "n < length AARCH64_H.msgRegisters \ + index msgRegistersC n = register_from_H (AARCH64_H.msgRegisters ! n)" + apply (simp_all add: msgRegisters_unfold + AARCH64.msgRegisters_def n_msgRegisters_def + msgRegistersC_def fupdate_def Arrays.update_def) + apply (auto simp: less_Suc_eq fcp_beta) + done + +lemma mapM_x_split_append: + "mapM_x f xs = do _ \ mapM_x f (take n xs); mapM_x f (drop n xs) od" + using mapM_x_append[where f=f and xs="take n xs" and ys="drop n xs"] + by simp + +lemma ccorres_abstract_cong: + "\ \s s'. \ P s ; s' \ P'; (s, s') \ sr \ \ a s = b s \ \ + ccorres_underlying sr G r xf ar axf P P' hs a c + = ccorres_underlying sr G r xf ar axf P P' hs b c" + by (simp add: ccorres_underlying_def split_paired_Ball imp_conjL + cong: conj_cong xstate.case_cong) + +lemma is_aligned_the_x_strengthen: + "x \ None \ case_option \ valid_ipc_buffer_ptr' x s \ is_aligned (the x) msg_align_bits" + by (clarsimp simp: valid_ipc_buffer_ptr'_def) + +lemma valid_ipc_buffer_ptr_the_strengthen: + "x \ None \ case_option \ valid_ipc_buffer_ptr' x s \ valid_ipc_buffer_ptr' (the x) s" + by clarsimp + + +lemma lookupIPCBuffer_Some_0: + "\\\ lookupIPCBuffer w t \\rv s. rv \ Some 0\" + apply (simp add: lookupIPCBuffer_def + AARCH64_H.lookupIPCBuffer_def + Let_def getThreadBufferSlot_def + locateSlot_conv + cong: if_cong) + apply (wp haskell_assert_wp | wpc | simp)+ + done + +lemma asUser_valid_ipc_buffer_ptr': + "\ valid_ipc_buffer_ptr' p \ asUser t m \ \rv s. valid_ipc_buffer_ptr' p s \" + by (simp add: valid_ipc_buffer_ptr'_def, wp, auto simp: valid_ipc_buffer_ptr'_def) + +lemma invokeTCB_ReadRegisters_ccorres: +notes + nat_min_simps [simp del] + wordSize_def' [simp] + option.case_cong_weak [cong] + prod.case_cong_weak [cong] +shows + "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart) + and tcb_at' target and sch_act_simple and (\s. target \ ksIdleThread s) + and (\_. target \ thread)) + (UNIV + \ {s. tcb_src_' s = tcb_ptr_to_ctcb_ptr target} + \ {s. suspendSource_' s = from_bool susp} + \ {s. n_' s = n} + \ {s. call_' s = from_bool isCall}) [] + (doE reply \ invokeTCB (ReadRegisters target susp n archCp); + liftE (replyOnRestart thread reply isCall) odE) + (Call invokeTCB_ReadRegisters_'proc)" + supply empty_fail_cond[simp] + apply (rule ccorres_gen_asm) + apply (cinit' lift: tcb_src_' suspendSource_' n_' call_' + simp: invokeTCB_def liftE_bindE bind_assoc) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=thread_' in ccorres_abstract, ceqv) + apply (rename_tac cthread, + rule_tac P="cthread = tcb_ptr_to_ctcb_ptr thread" in ccorres_gen_asm2) + apply (rule ccorres_split_nothrow_dc) + apply (simp add: when_def del: Collect_const split del: if_split) + apply (rule ccorres_cond2[where R=\], simp add: from_bool_0 Collect_const_mem) + apply (ctac add: suspend_ccorres[OF cteDeleteOne_ccorres]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_pre_getCurThread) + apply (simp only: liftE_bindE[symmetric]) + apply (ctac add: Arch_performTransfer_ccorres) + apply (simp add: liftE_bindE Collect_False + del: Collect_const) + apply (simp add: replyOnRestart_def liftE_def bind_assoc when_def + replyFromKernel_def if_to_top_of_bind setMRs_def + zipWithM_x_mapM_x asUser_mapM_x split_def + del: Collect_const cong: if_cong) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_getThreadState]) + apply (rule ccorres_if_lhs[OF _ ccorres_False[where P'=UNIV]]) + apply (rule ccorres_if_lhs) + apply (simp add: Collect_True whileAnno_def del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) + apply (ctac add: setRegister_ccorres) + apply (rule ccorres_stateAssert) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac P="length reply + = min (unat n) (unat n_frameRegisters + unat n_gpRegisters)" + in ccorres_gen_asm) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n + (AARCH64_H.frameRegisters @ AARCH64_H.gpRegisters)) + = reply) target s" + in ccorres_mapM_x_while) + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return, + ctac add: getRegister_ccorres_defer[where thread=target]) + apply (ctac add: setRegister_ccorres) + apply wp + apply simp + apply (vcg exspec=getRegister_modifies) + apply (clarsimp simp: getRegister_def submonad_asUser.guarded_gets) + apply (clarsimp simp: simpler_gets_def obj_at'_weakenE[OF _ TrueI] + msg_registers_convs) + apply (cut_tac x=na in unat_of_nat64) + apply (simp add: word_bits_def n_msgRegisters_def) + apply (simp add: msg_registers_convs n_msgRegisters_def + word_of_nat_less) + apply (subgoal_tac "na < unat n_frameRegisters") + apply (intro conjI[rotated] allI impI) + apply (assumption | erule sym) + apply (rule frame_gp_registers_convs) + apply (simp add: frame_gp_registers_convs) + apply (drule obj_at_ko_at') + apply (clarsimp simp: obj_at'_def projectKOs asUser_fetch_def + frame_gp_registers_convs genericTake_def + nth_append + split: if_split) + apply (simp add: n_frameRegisters_def n_msgRegisters_def) + apply (simp add: frame_gp_registers_convs msg_registers_convs + n_msgRegisters_def n_frameRegisters_def + n_gpRegisters_def msgMaxLength_def msgLengthBits_def + split: option.split) + apply (simp add: min_def word_less_nat_alt split: if_split)[1] + apply arith + apply (rule allI, rule conseqPre, vcg exspec=setRegister_modifies + exspec=getRegister_modifies) + apply clarsimp + apply (wp asUser_obj_at') + apply simp + apply (simp add: word_bits_def msgMaxLength_def + msg_registers_convs n_msgRegisters_def) + apply ceqv + (* got to split reply into frameregisters part and gpregisters + part remaining, match against 2nd and 4th loop. 3rd loop + never executes with current configuration *) + apply (simp add: msg_registers_convs del: Collect_const) + apply (rule iffD2 [OF ccorres_abstract_cong]) + apply (rule bind_apply_cong[OF _ refl]) + apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" + in fun_cong [OF mapM_x_split_append]) + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") + prefer 2 + apply (clarsimp simp: option_to_ptr_def option_to_0_def + split: option.split_asm) + apply (simp add: bind_assoc del: Collect_const) + apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) + (min (unat n) + (case destIPCBuffer of None \ unat n_msgRegisters + | _ \ unat n_frameRegisters))" + in ccorres_split_nothrow_novcg) + apply (rule ccorres_Cond_rhs) + apply (rule ccorres_rel_imp, + rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n + (AARCH64_H.frameRegisters @ AARCH64_H.gpRegisters)) + = reply) target s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s + \ valid_pspace' s" + and i="unat n_msgRegisters" + in ccorres_mapM_x_while') + apply (intro allI impI, elim conjE exE, hypsubst, simp) + apply (simp add: less_diff_conv) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return, + ctac add: getRegister_ccorres_defer[where thread=target]) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, + rule ccorres_move_array_assertion_ipc_buffer))+ + apply (rule storeWordUser_ccorres) + apply wp + apply (vcg exspec=getRegister_modifies) + apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI] + word_size unat_gt_0 option_to_ptr_def + option_to_0_def) + apply (intro conjI[rotated] impI allI) + apply (simp add: n_msgRegisters_def n_frameRegisters_def + word_less_nat_alt) + apply (subst unat_add_lem[THEN iffD1], simp_all add: unat_of_nat)[1] + prefer 3 + apply (erule sym) + apply (simp add: n_msgRegisters_def msg_registers_convs + msg_align_bits msgMaxLength_def) + apply (simp(no_asm) add: unat_arith_simps unat_of_nat + cong: if_cong, simp) + apply (simp add: n_msgRegisters_def msg_registers_convs + msg_align_bits msgMaxLength_def) + apply (simp(no_asm) add: unat_arith_simps unat_of_nat + cong: if_cong, simp) + apply (simp add: option_to_ptr_def option_to_0_def + msg_registers_convs upto_enum_word wordSize_def' + del: upt.simps) + apply (rule frame_gp_registers_convs) + apply (simp add: frame_gp_registers_convs less_diff_conv) + apply (subst iffD1 [OF unat_add_lem]) + apply (simp add: n_msgRegisters_def n_frameRegisters_def + word_le_nat_alt unat_of_nat) + apply (simp add: n_frameRegisters_def n_msgRegisters_def + unat_of_nat) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule is_aligned_mult_triv2[where n=3, simplified]) + apply (simp add: msg_align_bits_def word_size_bits_def) + apply (clarsimp simp: getRegister_def submonad_asUser.guarded_gets + obj_at'_weakenE[OF _ TrueI]) + apply (clarsimp simp: asUser_fetch_def simpler_gets_def + obj_at'_def projectKOs genericTake_def + nth_append frame_gp_registers_convs) + apply (simp add: n_msgRegisters_def unat_of_nat n_frameRegisters_def) + apply (subst iffD1 [OF unat_add_lem]) + apply (simp add: unat_of_nat)+ + apply (clarsimp simp: less_diff_conv) + apply (simp add: frame_gp_registers_convs msg_registers_convs + n_msgRegisters_def n_frameRegisters_def + n_gpRegisters_def Types_H.msgMaxLength_def + Types_H.msgLengthBits_def + split: option.split) + apply (simp add: min_def word_less_nat_alt split: if_split)[1] + apply (simp split: if_split_asm, arith+)[1] + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (wp) + apply (clarsimp simp: less_diff_conv) + apply (simp add: word_bits_def n_msgRegisters_def n_frameRegisters_def + Types_H.msgLengthBits_def Types_H.msgMaxLength_def + msg_registers_convs) + apply (clarsimp simp: n_msgRegisters_def n_frameRegisters_def + msgMaxLength_def Types_H.msgLengthBits_def + n_gpRegisters_def msg_registers_convs + split: option.split_asm) + apply (simp add: min_def split: if_split_asm if_split) + apply (drule_tac s=rv'a in sym, simp) + apply (rule_tac P=\ and P'="{s. i_' s = rv'a}" in ccorres_inst) + apply clarsimp + apply (elim disjE impCE) + apply (clarsimp simp: word_le_nat_alt linorder_not_less) + apply (subst (asm) unat_of_nat64) + apply (simp add: n_msgRegisters_def word_bits_def) + apply (clarsimp simp: mapM_x_Nil) + apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') + apply (simp add: n_msgRegisters_def n_frameRegisters_def + n_gpRegisters_def cong: option.case_cong) + apply (simp add: min_def split: if_split option.split) + apply (simp add: mapM_x_Nil) + apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') + apply (simp add: n_msgRegisters_def n_frameRegisters_def + n_gpRegisters_def cong: option.case_cong) + apply (simp add: min_def split: if_split option.split) + apply (clarsimp simp only: unat_arith_simps, simp) + apply (clarsimp simp: less_diff_conv word_le_nat_alt linorder_not_less) + apply (subst(asm) unat_of_nat64) + apply (simp add: word_bits_def n_msgRegisters_def) + apply (simp add: mapM_x_Nil n_frameRegisters_def n_gpRegisters_def) + apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') + apply (simp add: n_msgRegisters_def n_frameRegisters_def + n_gpRegisters_def cong: option.case_cong) + apply ceqv + apply csymbr + apply csymbr + apply (rule iffD1[OF ccorres_expand_while_iff_Seq]) + apply (rule ccorres_cond_false) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac xf'=i_' in ccorres_abstract, ceqv) + apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) + apply (simp add: drop_zip del: Collect_const) + apply (rule ccorres_Cond_rhs) + apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n + (AARCH64_H.frameRegisters @ AARCH64_H.gpRegisters)) + = reply) target s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" + and i="0" in ccorres_mapM_x_while') + apply (clarsimp simp: less_diff_conv drop_zip) + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return, + ctac add: getRegister_ccorres_defer[where thread=target]) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, + rule ccorres_move_array_assertion_ipc_buffer))+ + apply (rule storeWordUser_ccorres) + apply wp + apply (vcg exspec=getRegister_modifies) + apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI] + word_size unat_gt_0 option_to_ptr_def + option_to_0_def) + apply (intro conjI[rotated] impI allI) + apply (simp add: n_frameRegisters_def n_msgRegisters_def + length_msgRegisters word_of_nat_less + n_gpRegisters_def msgMaxLength_def) + prefer 3 + apply (erule sym) + apply (simp add: n_frameRegisters_def n_msgRegisters_def msg_registers_convs + msg_align_bits msgMaxLength_def) + apply (simp(no_asm) add: unat_arith_simps unat_of_nat + cong: if_cong, simp) + apply (simp add: n_frameRegisters_def n_msgRegisters_def msg_registers_convs + msg_align_bits msgMaxLength_def) + apply (simp(no_asm) add: unat_arith_simps unat_of_nat + cong: if_cong, simp) + apply (simp add: option_to_ptr_def option_to_0_def + msg_registers_convs upto_enum_word + n_msgRegisters_def n_frameRegisters_def + n_gpRegisters_def msgMaxLength_def msgLengthBits_def + del: upt.simps upt_rec_numeral) + apply (rule frame_gp_registers_convs) + apply (simp add: frame_gp_registers_convs n_msgRegisters_def n_frameRegisters_def + n_gpRegisters_def msgMaxLength_def msgLengthBits_def + unat_of_nat) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def, + erule aligned_add_aligned) + apply (rule is_aligned_mult_triv2[where n=3, simplified]) + apply (simp add: msg_align_bits_def word_size_bits_def) + apply (clarsimp simp: getRegister_def submonad_asUser.guarded_gets + obj_at'_weakenE[OF _ TrueI]) + apply (clarsimp simp: asUser_fetch_def simpler_gets_def + obj_at'_def projectKOs genericTake_def + nth_append frame_gp_registers_convs + n_frameRegisters_def n_gpRegisters_def + n_msgRegisters_def frame_gp_registers_convs + cong: if_cong split: if_split) + apply (clarsimp simp: frame_gp_registers_convs n_gpRegisters_def + min.absorb1 unat_of_nat) + apply (clarsimp simp: less_diff_conv) + apply (clarsimp simp: nth_append frame_gp_registers_convs + n_frameRegisters_def n_gpRegisters_def + n_msgRegisters_def frame_gp_registers_convs + Types_H.msgMaxLength_def Types_H.msgLengthBits_def + msg_registers_convs + cong: if_cong split: if_split) + apply (simp add: word_less_nat_alt unat_of_nat) + apply (simp add: iffD1[OF unat_add_lem] cong: conj_cong) + apply (simp add: min_def + split: if_split if_split_asm, + unat_arith, + fastforce simp: unat_eq_0)[1] + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply wp + apply (simp add: word_bits_def n_frameRegisters_def n_gpRegisters_def + n_msgRegisters_def) + apply (simp add: min_less_iff_disj less_imp_diff_less) + apply (simp add: drop_zip n_gpRegisters_def) + apply (elim disjE impCE) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) + apply (rule ccorres_return_Skip') + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) + apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') + apply simp + apply ceqv + apply csymbr + apply (rule ccorres_rhs_assoc2) + apply (ctac (no_vcg) add: setMessageInfo_ccorres) + apply (ctac (no_vcg)) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wp | simp add: valid_tcb_state'_def)+ + apply (clarsimp simp: ThreadState_defs mask_def) + apply (rule mapM_x_wp') + apply (rule hoare_pre) + apply (wp sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift) + apply clarsimp + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) + apply (simp add: message_info_to_H_def) + apply (clarsimp simp: n_frameRegisters_def n_msgRegisters_def + n_gpRegisters_def field_simps upto_enum_word + word_less_nat_alt Types_H.msgMaxLength_def + Types_H.msgLengthBits_def + simp del: upt.simps + split: option.split_asm) + apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size + word_less_nat_alt + split: if_split_asm dest!: word_unat.Rep_inverse') + apply (clarsimp simp: length_msgRegisters n_msgRegisters_def) + apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size + word_less_nat_alt + split: if_split_asm dest!: word_unat.Rep_inverse') + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp + tcb_in_cur_domain'_lift) + apply (simp add: n_frameRegisters_def n_msgRegisters_def + guard_is_UNIV_def) + apply simp + apply (rule mapM_x_wp') + apply (rule hoare_pre) + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp + asUser_valid_ipc_buffer_ptr') + apply clarsimp + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem + msg_registers_convs n_msgRegisters_def + n_frameRegisters_def n_gpRegisters_def + msgMaxLength_def msgLengthBits_def + word_less_nat_alt unat_of_nat) + apply (wp (once) hoare_drop_imps) + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp + asUser_valid_ipc_buffer_ptr') + apply (vcg exspec=setRegister_modifies) + apply simp + apply (strengthen valid_ipc_buffer_ptr_the_strengthen) + apply simp + apply (wp lookupIPCBuffer_Some_0 | wp (once) hoare_drop_imps)+ + apply (simp add: Collect_const_mem AARCH64_H.badgeRegister_def + AARCH64.badgeRegister_def AARCH64.capRegister_def + "StrictC'_register_defs") + apply (vcg exspec=lookupIPCBuffer_modifies) + apply simp + apply (ctac(no_vcg) add: setThreadState_ccorres) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (simp add: return_def) + apply wp+ + apply (simp cong: rev_conj_cong) + apply wp + apply (wp asUser_inv mapM_wp' getRegister_inv + asUser_get_registers[simplified] hoare_weak_lift_imp)+ + apply (rule hoare_strengthen_post, rule asUser_get_registers) + apply (clarsimp simp: obj_at'_def genericTake_def + frame_gp_registers_convs) + apply arith + apply (wp hoare_weak_lift_imp) + apply simp + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply (simp add: performTransfer_def) + apply wp + apply (simp add: Collect_const_mem ThreadState_defs mask_def) + apply vcg + apply (rule_tac Q="\rv. invs' and st_tcb_at' ((=) Restart) thread + and tcb_at' target" in hoare_post_imp) + apply (clarsimp simp: pred_tcb_at') + apply (auto elim!: pred_tcb'_weakenE)[1] + apply (wp suspend_st_tcb_at') + apply (vcg exspec=suspend_modifies) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def + split: if_split) + done + +lemma decodeReadRegisters_ccorres: + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple and (\s. ksCurThread s = thread) and ct_active' + and sysargs_rel args buffer + and tcb_at' (capTCBPtr cp) and ex_nonz_cap_to' (capTCBPtr cp) + and K (isThreadCap cp)) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. call_' s = from_bool isCall} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeReadRegisters args cp + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeReadRegisters_'proc)" + apply (cinit' lift: cap_' length___unsigned_long_' call_' buffer_') + apply (simp add: decodeReadRegisters_def decodeTransfer_def + del: Collect_const cong: list.case_cong) + apply wpc + apply (drule word_unat.Rep_inverse') + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply wpc + apply (drule word_unat.Rep_inverse') + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: word_less_nat_alt Collect_False del: Collect_const) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (rule ccorres_move_const_guards)+ + apply (simp add: rangeCheck_def unlessE_whenE whenE_def if_to_top_of_bindE + ccorres_seq_cond_raise if_to_top_of_bind + del: Collect_const) + apply (rule ccorres_cond2[where R=\]) + apply (simp add: frame_gp_registers_convs n_frameRegisters_def + n_gpRegisters_def Collect_const_mem) + apply unat_arith + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def intr_and_se_rel_def + syscall_error_rel_def syscall_error_to_H_cases + exception_defs Collect_const_mem) + apply (simp add: frame_gp_registers_convs n_frameRegisters_def + n_gpRegisters_def) + apply (simp add: ccorres_invocationCatch_Inr returnOk_bind + performInvocation_def + del: Collect_const) + apply csymbr + apply csymbr + apply csymbr + apply (simp add: liftE_bindE bind_assoc) + apply (rule ccorres_pre_getCurThread) + apply (rule ccorres_cond_seq) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) + apply clarsimp + apply (frule rf_sr_ksCurThread) + apply clarsimp + apply (frule (1) cap_get_tag_isCap[symmetric, THEN iffD1]) + apply (drule (1) cap_get_tag_to_H) + apply clarsimp + apply (rule iffI) + apply (drule_tac t="ksCurThread s" in sym) + apply simp + apply simp + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) + apply simp + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) + apply (simp add: returnOk_bind) + apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (rule ccorres_Guard_Seq)+ + apply (rule ccorres_nondet_refinement) + apply (rule is_nondet_refinement_bindE) + apply (rule is_nondet_refinement_refl) + apply (simp split: if_split, rule conjI[rotated]) + apply (rule impI, rule is_nondet_refinement_refl) + apply (rule impI, rule is_nondet_refinement_alternative1) + apply (simp add: performInvocation_def) + apply (rule ccorres_add_returnOk) + apply (ctac(no_vcg) add: invokeTCB_ReadRegisters_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (wp ct_in_state'_set sts_invs_minor') + apply (simp add: Collect_const_mem intr_and_se_rel_def + cintr_def exception_defs) + apply (vcg exspec=setThreadState_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread + ThreadState_defs word_sless_def word_sle_def + mask_eq_iff_w2p word_size isCap_simps + ReadRegistersFlags_defs tcb_at_invs' + cap_get_tag_isCap capTCBPtr_eq) + apply (frule global'_no_ex_cap[OF invs_valid_global'], clarsimp) + apply (rule conjI) + apply clarsimp + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (auto simp: ct_in_state'_def n_frameRegisters_def n_gpRegisters_def + valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread')[1] + apply (clarsimp simp: word_and_1 split: if_split) + done + +lemma decodeWriteRegisters_ccorres: + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple + and (\s. ksCurThread s = thread) and ct_active' and K (isThreadCap cp) + and valid_cap' cp and (\s. \x \ zobj_refs' cp. ex_nonz_cap_to' x s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeWriteRegisters args cp + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeWriteRegisters_'proc)" + supply unsigned_numeral[simp del] + apply (cinit' lift: cap_' length___unsigned_long_' buffer_' simp: decodeWriteRegisters_def) + apply (rename_tac length' cap') + apply (rule ccorres_Cond_rhs_Seq) + apply wpc + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: word_less_nat_alt) + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: word_less_nat_alt del: Collect_const) + apply (rule_tac P="\a. ccorres rvr xf P P' hs a c" for rvr xf P P' hs c in ssubst, + rule bind_cong [OF _ refl], rule list_case_helper, + clarsimp)+ + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return) + apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (rule_tac P="unat (of_nat (length args) :: machine_word) = length args" + in ccorres_gen_asm) + apply (simp add: unat_sub word_le_nat_alt genericLength_def + word_less_nat_alt hd_drop_conv_nth2 + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: whenE_def decodeTransfer_def returnOk_bind del: Collect_const) + apply csymbr + apply csymbr + apply csymbr + apply (simp add: liftE_bindE bind_assoc) + apply (rule ccorres_pre_getCurThread) + apply (rule ccorres_cond_seq) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) + apply clarsimp + apply (frule rf_sr_ksCurThread) + apply clarsimp + apply (frule (1) cap_get_tag_isCap[symmetric, THEN iffD1]) + apply (drule (1) cap_get_tag_to_H) + apply clarsimp + apply (rule iffI) + apply (drule_tac t="ksCurThread s" in sym) + apply simp + apply simp + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) + apply simp + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) + apply (simp add: returnOk_bind) + apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) + apply (ctac add: setThreadState_ccorres) + apply (rule ccorres_Guard_Seq)+ + apply (simp add: performInvocation_def) + apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres + [where args=args and someNum="unat (args ! 1)"]) + apply simp + apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp[1] + apply simp + apply (wp sts_invs_minor' setThreadState_sysargs_rel) + apply (simp add: Collect_const_mem cintr_def intr_and_se_rel_def + exception_defs) + apply (vcg exspec=setThreadState_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: Collect_const_mem ct_in_state'_def pred_tcb_at') + apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) + apply (clarsimp simp: valid_cap'_def ThreadState_defs + mask_eq_iff_w2p word_size rf_sr_ksCurThread + WriteRegisters_resume_def word_sle_def word_sless_def + numeral_eqs) + apply (frule arg_cong[where f="\x. unat (of_nat x :: machine_word)"], + simp(no_asm_use) only: word_unat.Rep_inverse, + simp) + apply (rule conjI) + apply clarsimp + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def word_less_nat_alt) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def word_less_nat_alt) + apply (auto simp: genericTake_def cur_tcb'_def linorder_not_less word_le_nat_alt + valid_tcb_state'_def + elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] + apply (intro allI impI) + apply (rule disjCI2) + apply (clarsimp simp: genericTake_def linorder_not_less) + apply (subst hd_conv_nth, clarsimp simp: unat_eq_0) + apply (clarsimp simp: word_and_1 split: if_split) + done + +lemma excaps_map_Nil: "(excaps_map caps = []) = (caps = [])" + by (simp add: excaps_map_def) + +lemma decodeCopyRegisters_ccorres: + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple + and (\s. ksCurThread s = thread) and ct_active' + and (excaps_in_mem extraCaps o ctes_of) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and tcb_at' (capTCBPtr cp) and ex_nonz_cap_to' (capTCBPtr cp) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). + ex_nonz_cap_to' y s) + and sysargs_rel args buffer + and K (isThreadCap cp)) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeCopyRegisters args cp (map fst extraCaps) + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeCopyRegisters_'proc)" + apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeCopyRegisters_def) + apply csymbr + apply wpc + apply (simp add: if_1_0_0 unat_eq_0) + apply (rule ccorres_cond_true_seq) + apply (simp add: invocationCatch_def throwError_bind + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp del: Collect_const) + apply (subst unat_eq_0[symmetric], simp add: Collect_False del: Collect_const) + apply csymbr + apply (simp add: interpret_excaps_test_null decodeTransfer_def + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: excaps_map_def invocationCatch_def throwError_bind null_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: excaps_map_def null_def del: Collect_const) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_symb_exec_r) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply csymbr + apply (simp add: cap_get_tag_isCap del: Collect_const) + apply (rule ccorres_assert2) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule_tac P="Q' (capTCBPtr rva) rva" for Q' + in ccorres_inst) + apply (rule ccorres_rhs_assoc)+ + apply (csymbr, csymbr) + apply (simp add: hd_map del: Collect_const, + simp add: hd_conv_nth del: Collect_const) + apply (simp only: cap_get_tag_isCap[symmetric], + drule(1) cap_get_tag_to_H) + apply (simp add: case_bool_If if_to_top_of_bindE + if_to_top_of_bind + del: Collect_const cong: if_cong) + apply (simp add: returnOk_bind Collect_True + ccorres_invocationCatch_Inr performInvocation_def + del: Collect_const) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (rule ccorres_Guard_Seq)+ + apply (ctac add: invokeTCB_CopyRegisters_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (simp add: cintr_def intr_and_se_rel_def exception_defs) + apply (vcg exspec=invokeTCB_CopyRegisters_modifies) + apply (wp sts_invs_minor') + apply (simp add: Collect_const_mem) + apply (vcg exspec=setThreadState_modifies) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (simp add: hd_map isCap_simps hd_conv_nth) + apply (clarsimp simp: invocationCatch_def throwError_bind + split: capability.split, + auto simp: throwError_def return_def intr_and_se_rel_def + syscall_error_rel_def syscall_error_to_H_cases + exception_defs)[1] + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (simp add: Collect_const_mem if_1_0_0 cap_get_tag_isCap) + apply vcg + apply (simp add: Collect_const_mem) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wp + apply (simp add: Collect_const_mem) + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: Collect_const_mem excaps_map_Nil) + apply (rule conjI) + apply (clarsimp simp: excaps_in_mem_def neq_Nil_conv + slotcap_in_mem_def cte_wp_at_ctes_of + ct_in_state'_def pred_tcb_at') + apply (rule conjI) + apply (clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (clarsimp simp: isCap_simps simp del: capMasterCap_maskCapRights) + apply (frule ctes_of_valid', clarsimp+) + apply (auto simp: valid_cap'_def excaps_map_def valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] + apply (clarsimp simp: word_sle_def CopyRegistersFlags_defs word_sless_def + ThreadState_defs rf_sr_ksCurThread + split: if_split) + apply (drule interpret_excaps_eq) + apply (clarsimp simp: mask_def excaps_map_def split_def ccap_rights_relation_def + rightsFromWord_wordFromRights excaps_map_Nil) + apply (simp only: cap_get_tag_isCap[symmetric], + drule(1) cap_get_tag_to_H) + apply (clarsimp simp: cap_get_tag_isCap to_bool_def) + apply (auto simp: unat_eq_of_nat word_and_1_shiftls + word_and_1_shiftl [where n=3,simplified] cap_get_tag_isCap[symmetric] split: if_split_asm) + done + +method wrong_cap_throwError_ccorres = solves \ + (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + , vcg + , (rule conseqPre, vcg) + , (auto simp: syscall_error_rel_def syscall_error_to_H_cases isCap_simps + exception_defs throwError_def return_def if_1_0_0 + split: capability.split arch_capability.split if_split_asm)[1] + \ + +add_try_method wrong_cap_throwError_ccorres + +lemma checkValidIPCBuffer_ccorres: + "ccorres (syscall_error_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs') (UNIV \ {s. vptr_' s = vptr} \ {s. ccap_relation cp (cap_' s)}) [] + (checkValidIPCBuffer vptr cp) + (Call checkValidIPCBuffer_'proc)" + apply (simp add:checkValidIPCBuffer_def AARCH64_H.checkValidIPCBuffer_def) + apply (cases "isArchFrameCap cp \ \ isDeviceCap cp") + apply (simp only: isCap_simps isDeviceCap.simps, safe)[1] + apply (cinit lift: vptr_' cap_') + apply (simp add: AARCH64_H.checkValidIPCBuffer_def if_1_0_0 ipcBufferSizeBits_def + del: Collect_const) + apply csymbr + apply (rule ccorres_cond_false_seq) + apply simp + apply csymbr + apply (rule ccorres_cond_false_seq) + apply clarsimp + apply (simp only:Cond_if_mem) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp add: msgAlignBits_def mask_def whenE_def) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def exception_defs + syscall_error_rel_def syscall_error_to_H_cases) + apply (simp add: whenE_def msgAlignBits_def mask_def) + apply (rule ccorres_return_CE, simp+)[1] + apply (clarsimp simp: cap_get_tag_isCap isCap_simps + Collect_const_mem ccap_relation_page_is_device + word_sle_def) + apply (cases "isArchFrameCap cp") + apply (simp only: isCap_simps isDeviceCap.simps)[1] + apply clarsimp + apply (cinit lift: vptr_' cap_') + apply (simp add: AARCH64_H.checkValidIPCBuffer_def del: Collect_const) + apply csymbr + apply (rule ccorres_cond_false_seq) + apply simp + apply csymbr + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def exception_defs + syscall_error_rel_def syscall_error_to_H_cases) + apply (simp add: cap_get_tag_isCap isCap_simps ccap_relation_page_is_device) + apply (cinit' lift: vptr_' cap_') + apply csymbr + apply (simp add: AARCH64_H.checkValidIPCBuffer_def cap_get_tag_isCap) + apply (wpc; wrong_cap_throwError_ccorres) + apply clarsimp + done + +lemma slotCapLongRunningDelete_ccorres: + "ccorres ((=) \ from_bool) ret__unsigned_long_' invs' + (UNIV \ {s. slot_' s = cte_Ptr slot}) [] + (slotCapLongRunningDelete slot) (Call slotCapLongRunningDelete_'proc)" + supply subst_all [simp del] + apply (cinit lift: slot_') + apply (simp add: case_Null_If del: Collect_const) + apply (rule ccorres_pre_getCTE) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac P="cte_wp_at' ((=) cte) slot" + in ccorres_cross_over_guard) + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_if_lhs) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_split_throws, rule ccorres_return_C, simp+) + apply vcg + apply (rule ccorres_cond_false_seq) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac xf'=ret__unsigned_long_' in ccorres_split_nothrow_novcg) + apply (ctac add: isFinalCapability_ccorres[where slot=slot]) + apply (rule Seq_weak_ceqv) + apply (rule Cond_ceqv [OF _ ceqv_refl ceqv_refl]) + apply simp + apply (rule impI, rule sym, rule mem_simps) + apply (clarsimp simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_split_throws, rule ccorres_return_C, simp+) + apply vcg + apply (simp del: Collect_const) + apply (rule ccorres_move_c_guard_cte) + apply (rule_tac P="cte_wp_at' ((=) cte) slot" + in ccorres_from_vcg_throws[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: cte_wp_at_ctes_of return_def) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap + from_bool_0 + dest!: ccte_relation_ccap_relation) + apply (simp add: from_bool_def + split: bool.split) + apply (auto simp add: longRunningDelete_def isCap_simps + split: capability.split)[1] + apply simp + apply (wp hoare_drop_imps isFinalCapability_inv) + apply (clarsimp simp: Collect_const_mem guard_is_UNIV_def) + apply (rename_tac rv') + apply (case_tac rv'; clarsimp simp: false_def) + apply vcg + apply (rule conseqPre, vcg, clarsimp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap map_comp_Some_iff + dest!: ccte_relation_ccap_relation) + done + +definition + isValidVTableRoot_C :: "cap_C \ bool" +where + "isValidVTableRoot_C cap \ cap_get_tag cap = scast cap_vspace_cap + \ to_bool (capVSIsMapped_CL (cap_vspace_cap_lift cap))" + +lemma isVTableRoot_spec: + "\s. \ \ {s} Call isVTableRoot_'proc + {s'. ret__unsigned_long_' s' = from_bool (cap_get_tag (cap_' s) = scast cap_vspace_cap)}" + by vcg (clarsimp simp: from_bool_def split: if_split) + +(* Needs workaround to avoid the existing spec rule with ccap_relation *) +lemma isValidNativeRoot_spec': + "\s. \ \ {s} Call isValidNativeRoot_'proc + {s'. ret__unsigned_long_' s' = from_bool (isValidVTableRoot_C (cap_' s))}" + apply (rule HoarePartial.ProcNoRec1, simp add: isValidNativeRoot_impl) + apply (simp add: isValidNativeRoot_body_def) + apply vcg + apply (clarsimp simp: isValidVTableRoot_C_def from_bool_0 to_bool_def split: if_split) + apply (simp add: isValidNativeRoot_impl dom_def) + done + +lemma isValidVTableRoot_spec: + "\s. \ \ {s} Call isValidVTableRoot_'proc + {s'. ret__unsigned_long_' s' = from_bool (isValidVTableRoot_C (cap_' s))}" + by (vcg exspec=isValidNativeRoot_spec') simp + +lemma isValidVTableRoot_conv: + "\ ccap_relation cap cap' \ + \ isValidVTableRoot_C cap' = isValidVTableRoot cap" + apply (clarsimp simp: isValidVTableRoot_C_def + if_1_0_0 from_bool_0 isValidVTableRoot_def + AARCH64_H.isValidVTableRoot_def isVTableRoot_def) + apply (case_tac "isArchCap_tag (cap_get_tag cap')") + apply (clarsimp simp: cap_get_tag_isCap cap_get_tag_isCap_ArchObject) + apply (case_tac "cap_get_tag cap' = scast cap_vspace_cap") + apply (clarsimp split: arch_capability.split simp: isCap_simps) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 + cap_vspace_cap_lift cap_to_H_def) + apply (clarsimp split: if_split) + apply (clarsimp simp: cap_get_tag_isCap cap_get_tag_isCap_ArchObject) + apply (simp split: arch_capability.split_asm add: isCap_simps) + apply (case_tac "cap_get_tag cap' = scast cap_vspace_cap") + apply (clarsimp simp: cap_vspace_cap_def isArchCap_tag_def2) + apply (clarsimp simp: cap_get_tag_isCap split: capability.split_asm) + done + +lemma updateCapData_spec: + "\cap preserve newData. \\ \ccap_relation cap \cap \ preserve = to_bool \preserve \ newData = \newData\ + Call updateCapData_'proc + \ccap_relation (RetypeDecls_H.updateCapData preserve newData cap) \ret__struct_cap_C\" + by (simp add: updateCapData_spec) + +lemma length_excaps_map: + "length (excaps_map xcs) = length xcs" + by (simp add: excaps_map_def) + +lemma getSyscallArg_ccorres_foo': + "ccorres (\a rv. rv = ucast (args ! n)) (\x. ucast (ret__unsigned_long_' x)) + (sysargs_rel args buffer and sysargs_rel_n args buffer n) + (UNIV \ \unat \i = n\ \ \\ipc_buffer = option_to_ptr buffer\) [] + (return ()) (Call getSyscallArg_'proc)" + apply (insert getSyscallArg_ccorres_foo + [where args=args and n=n and buffer=buffer]) + apply (clarsimp simp: ccorres_underlying_def) + apply (erule (1) my_BallE) + apply clarsimp + apply (erule allE, erule allE, erule (1) impE) + apply (clarsimp simp: return_def unif_rrel_def split: xstate.splits) + done + +lemma scast_mask_8: + "scast (mask 8 :: sword32) = (mask 8 :: word32)" + by (clarsimp simp: mask_def) + +lemma tcb_at_capTCBPtr_CL: + "ccap_relation cp cap \ valid_cap' cp s + \ isThreadCap cp + \ tcb_at' (cap_thread_cap_CL.capTCBPtr_CL + (cap_thread_cap_lift cap) && ~~mask tcbBlockSizeBits) s" + apply (clarsimp simp: cap_get_tag_isCap[symmetric] + valid_cap_simps' + dest!: cap_get_tag_to_H) + apply (frule ctcb_ptr_to_tcb_ptr_mask[OF tcb_aligned'], simp) + done + +lemma checkPrio_ccorres: + "ccorres (syscall_error_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (tcb_at' auth) + (UNIV \ {s. prio_' s = prio} \ {s. auth_' s = tcb_ptr_to_ctcb_ptr auth}) [] + (checkPrio prio auth) + (Call checkPrio_'proc)" + apply (cinit lift: prio_' auth_') + apply (clarsimp simp: liftE_bindE) + apply (rule ccorres_split_nothrow_novcg[where r'="\rv rv'. rv' = ucast rv" and xf'=mcp_']) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: rf_sr_ksCurThread obj_at'_def projectKOs + typ_heap_simps' ctcb_relation_def) + apply ceqv + apply (simp add: whenE_def del: Collect_const split: if_split) + apply (rule conjI; clarsimp) + apply (rule ccorres_from_vcg_split_throws) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def syscall_error_rel_def syscall_error_to_H_cases + exception_defs Collect_const_mem rf_sr_ksCurThread return_def + seL4_MinPrio_def minPriority_def) + apply clarsimp + apply (rule ccorres_return_CE) + apply clarsimp+ + apply wp + apply (simp add: guard_is_UNIV_def)+ + done + +lemma mcpriority_tcb_at'_prio_bounded': + assumes "mcpriority_tcb_at' (\mcp. prio \ ucast mcp) t s" + "priorityBits \ len_of TYPE('a)" + shows "(prio::'a::len word) \ ucast (max_word :: priority)" + using assms + by (clarsimp simp: pred_tcb_at'_def obj_at'_def priorityBits_def ucast_le_ucast + simp del: unsigned_uminus1 + elim!: order.trans) + +lemmas mcpriority_tcb_at'_prio_bounded + = mcpriority_tcb_at'_prio_bounded'[simplified priorityBits_def] + +lemma decodeTCBConfigure_ccorres: + notes tl_drop_1[simp] scast_mask_8 [simp] + shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple + and (\s. ksCurThread s = thread) and ct_active' + and (excaps_in_mem extraCaps o ctes_of) + and valid_cap' cp and cte_at' slot and K (isThreadCap cp) + and ex_nonz_cap_to' (capTCBPtr cp) and tcb_at' (capTCBPtr cp) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeTCBConfigure args cp slot extraCaps + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeTCBConfigure_'proc)" + apply (cinit' lift: cap_' length___unsigned_long_' slot_' current_extra_caps_' buffer_' + simp: decodeTCBConfigure_def) + apply csymbr + apply (clarsimp cong: StateSpace.state.fold_congs globals.fold_congs + simp del: Collect_const + simp add: interpret_excaps_test_null2 excaps_map_Nil) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_cond_true_seq | simp)+ + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: if_1_0_0 word_less_nat_alt) + apply (clarsimp split: list.split + simp: throwError_def invocationCatch_def fst_return + intr_and_se_rel_def + Collect_const_mem syscall_error_rel_def + exception_defs syscall_error_to_H_cases) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_cond_true_seq | simp)+ + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: if_1_0_0 word_less_nat_alt) + apply (clarsimp split: list.split + simp: throwError_def invocationCatch_def fst_return + intr_and_se_rel_def + Collect_const_mem syscall_error_rel_def + exception_defs syscall_error_to_H_cases) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_cond_true_seq | simp)+ + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: if_1_0_0 word_less_nat_alt) + apply (clarsimp split: list.split + simp: throwError_def invocationCatch_def fst_return + intr_and_se_rel_def excaps_map_def + Collect_const_mem syscall_error_rel_def + exception_defs syscall_error_to_H_cases) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_cond_true_seq | simp)+ + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: if_1_0_0 word_less_nat_alt) + apply (clarsimp split: list.split + simp: throwError_def invocationCatch_def fst_return + intr_and_se_rel_def excaps_map_def + Collect_const_mem syscall_error_rel_def + exception_defs syscall_error_to_H_cases) + apply (simp add: if_1_0_0 word_less_nat_alt linorder_not_less + del: Collect_const) + apply (rename_tac length' cap') + apply (subgoal_tac "length extraCaps \ 3") + prefer 2 + apply (clarsimp simp: idButNot_def interpret_excaps_test_null + excaps_map_def neq_Nil_conv) + apply (thin_tac "P \ index exc n \ NULL" for P exc n)+ + apply (rule_tac P="\a. ccorres rvr xf P P' hs a c" for rvr xf P P' hs c in ssubst, + rule bind_cong [OF _ refl], rule list_case_helper, clarsimp)+ + apply (simp add: hd_drop_conv_nth2 del: Collect_const) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo'[where args=args and n=1 and buffer=buffer]) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=3 and buffer=buffer]) + apply csymbr + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=1]) + apply ctac + apply (rule ccorres_assert2) + apply csymbr + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=2]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply (simp add: decodeSetIPCBuffer_def split_def + bindE_assoc invocationCatch_use_injection_handler + injection_bindE[OF refl refl] injection_handler_returnOk + injection_handler_If + del: Collect_const cong: if_cong) + apply (rule_tac xf'="\s. (bufferCap_' s, bufferSlot_' s)" and + r'="\v (cp', sl'). case v of None \ args ! 3 = 0 \ sl' = cte_Ptr 0 + | Some (cp, sl) \ ccap_relation cp cp' + \ args ! 3 \ 0 + \ sl' = cte_Ptr sl" + in ccorres_splitE) + apply (rule ccorres_cond2[where R=\]) + apply (clarsimp simp add: Collect_const_mem numeral_eqs) + apply (rule_tac P="\s. args ! 3 = 0" in ccorres_from_vcg[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_split_nothrowE) + apply (simp add: numeral_eqs) + apply (ctac add: ccorres_injection_handler_csum1[OF deriveCap_ccorres]) + apply ceqv + apply simp + apply (clarsimp simp: numeral_eqs) + apply csymbr + apply (rule ccorres_split_nothrowE) + apply (ctac add: ccorres_injection_handler_csum1[OF checkValidIPCBuffer_ccorres]) + apply ceqv + apply (match premises in "ccap_relation _ (deriveCap_ret_C.cap_C ccap)" for ccap + \ \rule ccorres_from_vcg + [where P'="{s. bufferCap_' s = (deriveCap_ret_C.cap_C ccap) + \ bufferSlot_' s = cte_Ptr (snd (extraCaps ! 2))}" + and P="\s. args ! 3 \ 0"]\) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def numeral_eqs) + apply (rule_tac P'="{s. err' = errstate s}" + in ccorres_from_vcg_throws[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + syscall_error_rel_def + intr_and_se_rel_def) + apply wp + apply simp + apply (vcg exspec=checkValidIPCBuffer_modifies) + apply simp + apply (rule_tac P'="{s. err' = errstate s}" + in ccorres_from_vcg_split_throws[where P=\]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + intr_and_se_rel_def syscall_error_rel_def) + apply simp + apply (wp injection_wp_E [OF refl]) + apply (simp add: all_ex_eq_helper Collect_const_mem numeral_eqs) + apply (vcg exspec=deriveCap_modifies) + apply (rule ceqv_tuple2, ceqv, ceqv) + apply (rename_tac rv'dc) + apply (rule_tac P="P (fst rv'dc) (snd rv'dc)" + and P'="P' (fst rv'dc) (snd rv'dc)" + for P P' in ccorres_inst) + apply (clarsimp simp: tcb_cnode_index_defs + [THEN ptr_add_assertion_positive + [OF ptr_add_assertion_positive_helper]] + simp del: Collect_const) + apply csymbr + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ + apply (simp add: decodeSetSpace_def injection_bindE[OF refl] split_def + del: Collect_const) + apply (simp add: injection_liftE[OF refl] bindE_assoc + liftM_def getThreadCSpaceRoot + getThreadVSpaceRoot del: Collect_const) + apply (simp add: liftE_bindE del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: slotCapLongRunningDelete_ccorres) + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (simp del: Collect_const) + apply csymbr + apply (clarsimp simp add: if_1_0_0 from_bool_0 + simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_symb_exec_l3 + [OF _ _ _ empty_fail_slotCapLongRunningDelete]) + apply (simp add: unlessE_def injection_handler_throwError + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_cond_true_seq) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply wp+ + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (simp del: Collect_const) + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq + ccorres_rhs_assoc)+ + apply (ctac add: slotCapLongRunningDelete_ccorres) + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (simp del: Collect_const) + apply csymbr + apply (clarsimp simp add: if_1_0_0 from_bool_0 + simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: unlessE_def injection_handler_throwError + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: unlessE_def injection_handler_returnOk + del: Collect_const) + apply (rule ccorres_add_return, + rule_tac r'="\rv rv'. ccap_relation + (if args ! 1 = 0 + then fst (hd extraCaps) + else updateCapData False (args ! 1) (fst (hd extraCaps))) rv'" + and xf'="cRootCap_'" + in ccorres_split_nothrow) + apply (rule_tac P'="{s. cRootCap = cRootCap_' s}" + in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (subgoal_tac "extraCaps \ []") + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) + apply fastforce + apply clarsimp + apply ceqv + apply (ctac add: ccorres_injection_handler_csum1 + [OF deriveCap_ccorres]) + apply (simp add: Collect_False del: Collect_const) + apply (csymbr, csymbr) + apply (simp add: cap_get_tag_isCap cnode_cap_case_if + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_throwError + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: injection_handler_returnOk del: Collect_const) + apply (rule ccorres_add_return, + rule_tac r'="\rv rv'. ccap_relation + (if args ! 2 = 0 + then fst (extraCaps ! Suc 0) + else updateCapData False (args ! 2) (fst (extraCaps ! Suc 0))) rv'" + and xf'="vRootCap_'" + in ccorres_split_nothrow) + apply (rule_tac P'="{s. vRootCap = vRootCap_' s}" + in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def + hd_drop_conv_nth2) + apply fastforce + apply ceqv + apply (ctac add: ccorres_injection_handler_csum1 + [OF deriveCap_ccorres]) + apply (simp add: Collect_False del: Collect_const) + apply csymbr + apply csymbr + apply (simp add: from_bool_0 isValidVTableRoot_conv del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_throwError + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr + performInvocation_def) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (ctac(no_vcg) add: invokeTCB_ThreadControl_ccorres) + apply (simp, rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (simp add: o_def) + apply (wp sts_invs_minor' hoare_case_option_wp) + apply (simp add: Collect_const_mem cintr_def intr_and_se_rel_def + exception_defs + cong: option.case_cong) + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) + apply vcg + apply simp + apply (wp injection_wp_E[OF refl] hoare_drop_imps) + apply (vcg exspec=deriveCap_modifies) + apply (simp add: pred_conj_def cong: if_cong) + apply wp + apply (simp add: Collect_const_mem) + apply (vcg) + apply simp + apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) + apply vcg + apply (simp cong: if_cong) + apply (wp injection_wp_E[OF refl] hoare_drop_imps) + apply (simp add: Collect_const_mem intr_and_se_rel_def + syscall_error_rel_def exception_defs + cong: option.case_cong sum.case_cong) + apply (simp add: all_ex_eq_helper numeral_eqs) + apply (vcg exspec=deriveCap_modifies) + apply (simp cong: if_cong) + apply wp + apply (simp add: Collect_const_mem del: Collect_const) + apply vcg + apply (simp cong: if_cong) + apply (wp | wp (once) hoare_drop_imps)+ + apply (simp add: Collect_const_mem all_ex_eq_helper + cong: option.case_cong) + apply (vcg exspec=slotCapLongRunningDelete_modifies) + apply (simp cong: if_cong) + apply (wp | wp (once) hoare_drop_imps)+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=slotCapLongRunningDelete_modifies) + apply (simp add: pred_conj_def cong: if_cong) + apply (wp injection_wp_E[OF refl] checkValidIPCBuffer_ArchObject_wp) + apply simp + apply (wp | wp (once) hoare_drop_imps)+ + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply (rule_tac P="{s. cRootCap_' s = cRootCap \ vRootCap_' s = vRootCap + \ bufferAddr_' s = args ! 3 + \ ccap_relation cp cap' \ isThreadCap cp + \ is_aligned (capTCBPtr cp) tcbBlockSizeBits + \ ksCurThread_' (globals s) = tcb_ptr_to_ctcb_ptr thread}" + in conseqPre) + apply (simp add: cong: option.case_cong) + apply (vcg exspec=deriveCap_modifies exspec=checkValidIPCBuffer_modifies) + apply (clarsimp simp: excaps_map_def Collect_const_mem ccHoarePost_def + numeral_eqs + cong: option.case_cong) + apply (frule interpret_excaps_eq[rule_format, where n=0], clarsimp) + apply (frule interpret_excaps_eq[rule_format, where n=1], clarsimp) + apply (frule interpret_excaps_eq[rule_format, where n=2], clarsimp) + apply (clarsimp simp: mask_def[where n=4] ccap_rights_relation_def + rightsFromWord_wordFromRights capTCBPtr_eq + ptr_val_tcb_ptr_mask2[unfolded mask_def objBits_defs, simplified] + tcb_cnode_index_defs size_of_def + option_to_0_def rf_sr_ksCurThread + ThreadState_defs mask_eq_iff_w2p word_size + from_bool_all_helper all_ex_eq_helper + ucast_ucast_mask objBits_defs) + apply (subgoal_tac "args \ [] \ extraCaps \ []") + apply (simp add: word_sle_def cap_get_tag_isCap numeral_eqs + hd_conv_nth hd_drop_conv_nth2 + word_FF_is_mask split_def + thread_control_update_priority_def + thread_control_update_mcp_def + thread_control_update_space_def + thread_control_update_ipc_buffer_def) + apply (auto split: option.split elim!: inl_inrE)[1] + apply (fastforce+)[2] + apply clarsimp + apply (strengthen if_n_updateCapData_valid_strg) + apply (wp | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: Collect_const_mem all_ex_eq_helper + cong: option.case_cong) + apply vcg + apply simp + apply (wp | wp (once) hoare_drop_imps)+ + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply vcg + apply simp + apply (wp | wp (once) hoare_drop_imps)+ + apply (wpsimp | vcg exspec=getSyscallArg_modifies)+ + apply (clarsimp simp: Collect_const_mem all_ex_eq_helper) + apply (rule conjI) + apply (clarsimp simp: idButNot_def interpret_excaps_test_null + excaps_map_def neq_Nil_conv) + apply (clarsimp simp: sysargs_rel_to_n word_less_nat_alt) + apply (frule invs_mdb') + apply (frule(2) tcb_at_capTCBPtr_CL) + apply (rule conjI, fastforce) + apply (drule interpret_excaps_eq) + apply (clarsimp simp: cte_wp_at_ctes_of valid_tcb_state'_def numeral_eqs le_ucast_ucast_le + tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + invs_pspace_aligned' invs_pspace_distinct' + ct_in_state'_def pred_tcb_at'_def obj_at'_def tcb_st_refs_of'_def) + apply (erule disjE; simp add: objBits_defs mask_def) + apply (clarsimp simp: idButNot_def interpret_excaps_test_null + excaps_map_def neq_Nil_conv word_sle_def word_sless_def) + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (frule interpret_excaps_eq[rule_format, where n=1], simp) + apply (frule interpret_excaps_eq[rule_format, where n=2], simp) + apply (clarsimp simp: mask_def[where n=4] ccap_rights_relation_def + rightsFromWord_wordFromRights + capTCBPtr_eq tcb_ptr_to_ctcb_ptr_mask + tcb_cnode_index_defs size_of_def + option_to_0_def rf_sr_ksCurThread + ThreadState_defs mask_eq_iff_w2p word_size + from_bool_all_helper) + apply (frule(1) tcb_at_h_t_valid [OF tcb_at_invs']) + apply (clarsimp simp: typ_heap_simps numeral_eqs isCap_simps valid_cap'_def capAligned_def + objBits_simps) + done + +lemma not_isThreadCap_case: + "\\isThreadCap cap\ \ + (case cap of ThreadCap x \ f x | _ \ g) = g" + by (clarsimp simp: isThreadCap_def split: capability.splits) + +lemma decodeSetMCPriority_ccorres: + "\interpret_excaps extraCaps' = excaps_map extraCaps\ \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and (\s. \rf \ zobj_refs' cp. ex_nonz_cap_to' rf s) + and valid_cap' cp and K (isThreadCap cp) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeSetMCPriority args cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeSetMCPriority_'proc)" + supply Collect_const[simp del] + apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) + apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' and R'=UNIV and R=\ and + val="from_bool (length args = 0 \ length extraCaps = 0)" in + ccorres_symb_exec_r_known_rv) + apply vcg + apply (force simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 + split: bool.splits) + apply ceqv + apply clarsimp + apply wpc + (* Case args = [] *) + apply (rule ccorres_cond_true_seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_rhs_assoc) + apply (ccorres_rewrite) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* Case args is Cons *) + apply wpc + (* Sub-case extraCaps = [] *) + apply (rule ccorres_cond_true_seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_rhs_assoc) + apply (ccorres_rewrite) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* Main case where args and extraCaps are both Cons *) + apply (rule ccorres_cond_false_seq) + apply (simp add: split_def) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo'[where args=args and n=0 and buffer=buffer]) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply csymbr + apply (simp add: cap_get_tag_isCap cong: call_ignore_cong) + apply (rule ccorres_assert2) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc) + apply ccorres_rewrite + apply (clarsimp simp: not_isThreadCap_case throwError_bind invocationCatch_def + simp del: id_simps) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: isCap_simps) + apply csymbr + apply csymbr + (* Pre-conditions need to depend on the inner value of the thread cap *) + apply (rule ccorres_inst[where P="Q (capTCBPtr (fst (extraCaps ! 0)))" and + P'="Q' (capTCBPtr (fst (extraCaps ! 0)))" for Q Q']) + apply (clarsimp simp: capTCBPtr_eq isCap_simps invocationCatch_use_injection_handler + injection_bindE[OF refl refl] bindE_assoc injection_handler_returnOk) + apply (ctac add: ccorres_injection_handler_csum1[OF checkPrio_ccorres]) + apply (rule_tac P="hd args \ ucast (max_word :: priority)" + in ccorres_cross_over_guard_no_st) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def) + apply ccorres_rewrite + apply (ctac add: setThreadState_ccorres) + apply (simp add: invocationCatch_def) + apply ccorres_rewrite + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) + apply clarsimp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE; simp) + apply (rule ccorres_return_C_errorE; simp) + apply wp + apply (wpsimp wp: sts_invs_minor') + apply simp + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (rename_tac err_c) + apply (rule_tac P'="{s. err_c = errstate s}" in ccorres_from_vcg_split_throws) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def intr_and_se_rel_def syscall_error_rel_def) + apply simp + apply (rule injection_handler_wp) + apply (rule checkPrio_wp[simplified validE_R_def]) + apply vcg + apply (wp | simp | wpc | wp (once) hoare_drop_imps)+ + apply vcg + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: EXCEPTION_SYSCALL_ERROR_def EXCEPTION_NONE_def) + apply vcg + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: ct_in_state'_def pred_tcb_at' + valid_cap'_def isCap_simps) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (clarsimp simp: maxPriority_def numPriorities_def FF_eq_minus_1) + apply (rule conjI, clarsimp) + apply (frule mcpriority_tcb_at'_prio_bounded, simp) + apply (auto simp: valid_tcb_state'_def le_ucast_ucast_le + elim!: obj_at'_weakenE pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread')[1] + apply (clarsimp simp: interpret_excaps_eq excaps_map_def) + apply (frule rf_sr_ksCurThread) + apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) + apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) + apply (intro conjI impI allI) + apply (clarsimp simp: unat_eq_0 le_max_word_ucast_id cap_get_tag_isCap_unfolded_H_cap isCap_simps)+ + done + +lemma decodeSetPriority_ccorres: + "\interpret_excaps extraCaps' = excaps_map extraCaps\ \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and (\s. \rf \ zobj_refs' cp. ex_nonz_cap_to' rf s) + and valid_cap' cp and K (isThreadCap cp) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeSetPriority args cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeSetPriority_'proc)" + supply Collect_const[simp del] + apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) + apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' and R'=UNIV and R=\ and + val="from_bool (length args = 0 \ length extraCaps = 0)" in + ccorres_symb_exec_r_known_rv) + apply vcg + apply (force simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 + split: bool.splits) + apply ceqv + apply clarsimp + apply wpc + (* Case args = [] *) + apply (rule ccorres_cond_true_seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_rhs_assoc) + apply (ccorres_rewrite) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* Case args is Cons *) + apply wpc + (* Sub-case extraCaps = [] *) + apply (rule ccorres_cond_true_seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_rhs_assoc) + apply (ccorres_rewrite) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + (* Main case where args and extraCaps are both Cons *) + apply (rule ccorres_cond_false_seq) + apply (simp add: split_def) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo'[where args=args and n=0 and buffer=buffer]) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply csymbr + apply (simp add: cap_get_tag_isCap cong: call_ignore_cong) + apply (rule ccorres_assert2) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc) + apply ccorres_rewrite + apply (clarsimp simp: not_isThreadCap_case throwError_bind invocationCatch_def + simp del: id_simps) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: isCap_simps) + apply csymbr + apply csymbr + (* Pre-conditions need to depend on the inner value of the thread cap *) + apply (rule ccorres_inst[where P="Q (capTCBPtr (fst (extraCaps ! 0)))" and + P'="Q' (capTCBPtr (fst (extraCaps ! 0)))" for Q Q']) + apply (clarsimp simp: capTCBPtr_eq isCap_simps invocationCatch_use_injection_handler + injection_bindE[OF refl refl] bindE_assoc injection_handler_returnOk) + apply (ctac add: ccorres_injection_handler_csum1[OF checkPrio_ccorres]) + apply (rule_tac P="hd args \ ucast (max_word :: priority)" + in ccorres_cross_over_guard_no_st) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def) + apply ccorres_rewrite + apply (ctac add: setThreadState_ccorres) + apply (simp add: invocationCatch_def) + apply ccorres_rewrite + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) + apply clarsimp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE; simp) + apply (rule ccorres_return_C_errorE; simp) + apply wp + apply (wpsimp wp: sts_invs_minor') + apply simp + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (rename_tac err_c) + apply (rule_tac P'="{s. err_c = errstate s}" in ccorres_from_vcg_split_throws) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def intr_and_se_rel_def syscall_error_rel_def) + apply simp + apply (rule injection_handler_wp) + apply (rule checkPrio_wp[simplified validE_R_def]) + apply vcg + apply (wp | simp | wpc | wp (once) hoare_drop_imps)+ + apply vcg + apply wp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: EXCEPTION_SYSCALL_ERROR_def EXCEPTION_NONE_def) + apply vcg + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: ct_in_state'_def pred_tcb_at' + valid_cap'_def isCap_simps) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (clarsimp simp: maxPriority_def numPriorities_def FF_eq_minus_1) + apply (rule conjI, clarsimp) + apply (frule mcpriority_tcb_at'_prio_bounded, simp) + apply (auto simp: valid_tcb_state'_def le_ucast_ucast_le + elim!: obj_at'_weakenE pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread')[1] + apply (clarsimp simp: interpret_excaps_eq excaps_map_def) + apply (frule rf_sr_ksCurThread) + apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) + apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) + apply (intro conjI impI allI) + apply (clarsimp simp: unat_eq_0 le_max_word_ucast_id cap_get_tag_isCap_unfolded_H_cap isCap_simps)+ + done + +lemma ucast_le_8_64_equiv: + "x \ UCAST (8 \ 64) max_word \ + (UCAST (64 \ 8) x \ y) = (x \ UCAST (8 \ 64) y)" + apply (rule iffI) + apply (word_bitwise; simp) + apply (simp add: le_ucast_ucast_le) + done + +lemma mcpriority_tcb_at'_le_ucast: + "pred_tcb_at' itcbMCP (\mcp. x \ UCAST(8 \ 64) mcp) v s \ + pred_tcb_at' itcbMCP (\mcp. UCAST(64 \ 8) x \ mcp) v s" + by (clarsimp simp: ucast_le_8_64_equiv mcpriority_tcb_at'_prio_bounded simp del: unsigned_uminus1) + +lemma decodeSetSchedParams_ccorres: + "\interpret_excaps extraCaps' = excaps_map extraCaps\ \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) + and ct_active' and sch_act_simple + and (excaps_in_mem extraCaps \ ctes_of) + and (\s. \rf \ zobj_refs' cp. ex_nonz_cap_to' rf s) + and valid_cap' cp and K (isThreadCap cp) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeSetSchedParams args cp extraCaps + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeSetSchedParams_'proc)" + supply Collect_const[simp del] + apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) + apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' and R'=UNIV and R=\ and + val="from_bool (length args < 2 \ length extraCaps = 0)" in + ccorres_symb_exec_r_known_rv) + apply vcg + apply (force simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 + unat_arith_simps + split: bool.splits if_splits) + apply ceqv + apply clarsimp +(* + apply (wpc) + apply (rule ccorres_cond_true_seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_rhs_assoc) + apply (ccorres_rewrite) + apply (fastforce intro: syscall_error_throwError_ccorres_n simp: syscall_error_to_H_cases) + apply (wpc) + apply (rule ccorres_cond_true_seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_rhs_assoc) + apply (ccorres_rewrite) + apply (fastforce intro: syscall_error_throwError_ccorres_n simp: syscall_error_to_H_cases) + apply (wpc) + apply (rule ccorres_cond_true_seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_rhs_assoc) + apply (ccorres_rewrite) + apply (fastforce intro: syscall_error_throwError_ccorres_n simp: syscall_error_to_H_cases) +*) + apply (wpc, + rule ccorres_cond_true_seq, + clarsimp simp: throwError_bind invocationCatch_def, + rule ccorres_rhs_assoc, + ccorres_rewrite, + fastforce intro: syscall_error_throwError_ccorres_n simp: syscall_error_to_H_cases)+ + (* Main case where args and extraCaps are both well-formed *) + apply (rule ccorres_cond_false_seq) + apply (simp add: split_def) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo'[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo'[where args=args and n=1 and buffer=buffer]) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply csymbr + apply (simp add: cap_get_tag_isCap cong: call_ignore_cong) + apply (rule ccorres_assert2) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_rhs_assoc) + apply ccorres_rewrite + apply (clarsimp simp: not_isThreadCap_case throwError_bind invocationCatch_def + simp del: id_simps) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: isCap_simps) + apply csymbr + apply csymbr + (* Pre-conditions need to depend on the inner value of the thread cap *) + apply (rule ccorres_inst[where P="Q (capTCBPtr (fst (extraCaps ! 0)))" and + P'="Q' (capTCBPtr (fst (extraCaps ! 0)))" for Q Q']) + apply (clarsimp simp: capTCBPtr_eq isCap_simps invocationCatch_use_injection_handler + injection_bindE[OF refl refl] bindE_assoc injection_handler_returnOk) + apply (ctac add: ccorres_injection_handler_csum1[OF checkPrio_ccorres]) + apply (rule_tac P="args ! 0 \ ucast (max_word :: priority)" + in ccorres_cross_over_guard_no_st) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def) + apply ccorres_rewrite + apply (clarsimp simp: capTCBPtr_eq isCap_simps invocationCatch_use_injection_handler + injection_bindE[OF refl refl] bindE_assoc injection_handler_returnOk) + apply (ctac add: ccorres_injection_handler_csum1[OF checkPrio_ccorres]) + apply (rule_tac P="args ! 1 \ ucast (max_word :: priority)" + in ccorres_cross_over_guard_no_st) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def) + apply ccorres_rewrite + apply (ctac add: setThreadState_ccorres) + apply (simp add: invocationCatch_def) + apply ccorres_rewrite + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) + apply clarsimp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE; simp) + apply (rule ccorres_return_C_errorE; simp) + apply wp + apply (wpsimp wp: sts_invs_minor') + apply simp + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (rename_tac err_c) + apply (rule_tac P'="{s. err_c = errstate s}" in ccorres_from_vcg_split_throws) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def intr_and_se_rel_def syscall_error_rel_def) + apply simp + apply (rule injection_handler_wp) + apply (rule checkPrio_wp[simplified validE_R_def]) + apply vcg + apply clarsimp + apply ccorres_rewrite + apply (rule ccorres_return_C_errorE; simp) + apply simp + apply (rule injection_handler_wp) + apply (rule checkPrio_wp[simplified validE_R_def]) + apply vcg + apply (wp | simp | wpc | wp (once) hoare_drop_imps)+ + apply vcg + apply simp + apply (rule return_wp) + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply (rule return_wp) + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: ct_in_state'_def pred_tcb_at' + valid_cap'_def isCap_simps) + apply (rule conjI; clarsimp simp: sysargs_rel_to_n n_msgRegisters_def) + apply (clarsimp simp: maxPriority_def numPriorities_def FF_eq_minus_1) + apply (rule conjI, clarsimp) + apply (insert mcpriority_tcb_at'_prio_bounded[where prio="args ! 0"]) + apply (insert mcpriority_tcb_at'_prio_bounded[where prio="args ! 1"]) + apply (auto simp: valid_tcb_state'_def mcpriority_tcb_at'_le_ucast + elim!: obj_at'_weakenE pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread')[1] + apply (clarsimp simp: interpret_excaps_eq excaps_map_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (frule rf_sr_ksCurThread) + apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) + apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) + apply (intro conjI impI allI) + by (clarsimp simp: unat_eq_0 le_max_word_ucast_id + thread_control_update_mcp_def thread_control_update_priority_def + cap_get_tag_isCap_unfolded_H_cap isCap_simps + interpret_excaps_eq excaps_map_def)+ + +lemma decodeSetIPCBuffer_ccorres: + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and valid_cap' cp and cte_at' slot and K (isThreadCap cp) + and (excaps_in_mem extraCaps o ctes_of) + and (\s. \rf \ zobj_refs' cp. ex_nonz_cap_to' rf s) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeSetIPCBuffer args cp slot extraCaps + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeSetIPCBuffer_'proc)" + apply (cinit' lift: cap_' length___unsigned_long_' slot_' current_extra_caps_' buffer_' + simp: decodeSetIPCBuffer_def) + apply wpc + apply (simp add: unat_eq_0) + apply csymbr + apply simp + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (simp add: throwError_bind invocationCatch_def) + apply (clarsimp simp: throwError_def return_def + intr_and_se_rel_def exception_defs + syscall_error_rel_def syscall_error_to_H_cases) + apply csymbr + apply (rule ccorres_cond_false_seq) + apply csymbr + apply (simp del: Collect_const) + apply (simp add: interpret_excaps_test_null excaps_map_Nil if_1_0_0 + del: Collect_const) + apply wpc + apply (simp add: throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (rule ccorres_cond_false_seq) + apply (simp add: split_def + del: Collect_const) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo [where args=args and n=0 and buffer=buffer]) + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply ctac + apply (rule ccorres_assert2) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr performInvocation_def) + apply csymbr + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply csymbr + apply csymbr + apply (ctac add: invokeTCB_ThreadControl_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeTCB_ThreadControl_modifies) + apply simp + apply (wp sts_invs_minor') + apply (vcg exspec=setThreadState_modifies) + apply (simp add: bindE_assoc del: Collect_const) + apply (rule ccorres_rhs_assoc)+ + apply (csymbr, csymbr) + apply (simp add: bindE_bind_linearise) + apply (rule ccorres_split_nothrow_case_sum) + apply (ctac add: deriveCap_ccorres) + apply ceqv + apply (simp add: Collect_False del: Collect_const) + apply csymbr + apply (rule ccorres_split_nothrow_case_sum) + apply (ctac add: checkValidIPCBuffer_ccorres) + apply ceqv + apply (simp add: Collect_False returnOk_bind + ccorres_invocationCatch_Inr + del: Collect_const) + apply (ctac add: setThreadState_ccorres) + apply (simp add: performInvocation_def) + apply (csymbr, csymbr, csymbr) + apply (ctac add: invokeTCB_ThreadControl_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeTCB_ThreadControl_modifies) + apply simp + apply (wp sts_invs_minor') + apply (simp add: Collect_const_mem cintr_def intr_and_se_rel_def) + apply (vcg exspec=setThreadState_modifies) + apply (simp add: invocationCatch_def) + apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) + apply vcg + apply simp + apply (wp checkValidIPCBuffer_ArchObject_wp) + apply (simp add: intr_and_se_rel_def syscall_error_rel_def + exception_defs) + apply (vcg exspec=checkValidIPCBuffer_modifies) + apply (simp add: invocationCatch_def) + apply (rule ccorres_split_throws) + apply (rule ccorres_return_C_errorE, simp+)[1] + apply vcg + apply simp + apply (wp | wp (once) hoare_drop_imps)+ + apply (simp add: Collect_const_mem) + apply (vcg exspec=deriveCap_modifies) + apply simp + apply (wp | wp (once) hoare_drop_imps)+ + apply simp + apply vcg + apply wp + apply simp + apply (vcg exspec=getSyscallArg_modifies) + apply (clarsimp simp: Collect_const_mem if_1_0_0 ct_in_state'_def + pred_tcb_at' cintr_def intr_and_se_rel_def + exception_defs syscall_error_rel_def) + apply (rule conjI) + apply (clarsimp simp: excaps_in_mem_def slotcap_in_mem_def) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (frule invs_mdb') + apply (frule(2) tcb_at_capTCBPtr_CL) + apply (auto simp: isCap_simps valid_cap'_def valid_mdb'_def valid_tcb_state'_def + valid_mdb_ctes_def no_0_def excaps_map_def + elim: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' + dest!: interpret_excaps_eq)[1] + apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def word_sle_def mask_def) + apply (rule conjI[rotated], clarsimp+) + apply (drule interpret_excaps_eq[rule_format, where n=0], simp add: excaps_map_Nil) + apply (simp add: mask_def ThreadState_defs excaps_map_def) + apply (clarsimp simp: ccap_rights_relation_def rightsFromWord_wordFromRights + cap_get_tag_isCap) + apply (frule cap_get_tag_to_H, subst cap_get_tag_isCap, assumption, assumption) + apply clarsimp + done + +lemma bindNTFN_alignment_junk: + "\ is_aligned tcb tcbBlockSizeBits; bits \ ctcb_size_bits \ + \ ptr_val (tcb_ptr_to_ctcb_ptr tcb) && ~~ mask bits = ptr_val (tcb_ptr_to_ctcb_ptr tcb)" + apply (clarsimp simp: tcb_ptr_to_ctcb_ptr_def projectKOs) + apply (rule is_aligned_neg_mask_eq) + apply (erule aligned_add_aligned) + apply (erule is_aligned_weaken[rotated]) + by (auto simp add: is_aligned_def objBits_defs ctcb_offset_defs) + +lemma bindNotification_ccorres: + "ccorres dc xfdc (invs' and tcb_at' tcb) + (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr tcb} + \ {s. ntfnPtr_' s = ntfn_Ptr ntfnptr}) [] + (bindNotification tcb ntfnptr) + (Call bindNotification_'proc)" + apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) + apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV + in ccorres_split_nothrow_novcg) + apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp) + apply (frule cmap_relation_ntfn) + apply (erule (1) cmap_relation_ko_atE) + apply (rule conjI) + apply (erule h_t_valid_clift) + apply (clarsimp simp: setNotification_def split_def) + apply (rule bexI [OF _ setObject_eq]) + apply (simp add: rf_sr_def cstate_relation_def Let_def init_def + cpspace_relation_def update_ntfn_map_tos + typ_heap_simps') + apply (elim conjE) + apply (intro conjI) + \ \tcb relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=2] NtfnState_Waiting_def) + apply (case_tac "ntfnObj ntfn") + apply ((clarsimp simp: Suc_canonical_bit_fold option_to_ctcb_ptr_def + tcb_ptr_canonical[OF invs_pspace_canonical'] + simp flip: make_canonical_def)+)[3] + apply (auto simp: option_to_ctcb_ptr_def objBits_simps' + bindNTFN_alignment_junk canonical_bit_def)[1] + apply (simp add: carch_state_relation_def) + apply (simp add: cmachine_state_relation_def) + apply (simp add: h_t_valid_clift_Some_iff) + apply (simp add: objBits_simps') + apply (simp add: objBits_simps) + apply assumption + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (simp add: setBoundNotification_def) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) + apply vcg + apply simp + apply (erule (1) rf_sr_tcb_update_no_queue2, (simp add: typ_heap_simps')+, simp_all?)[1] + apply (simp add: ctcb_relation_def option_to_ptr_def option_to_0_def) + apply simp + apply (wp get_ntfn_ko'| simp add: guard_is_UNIV_def)+ + done + +lemma invokeTCB_NotificationControl_bind_ccorres: + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and tcb_inv_wf' (tcbinvocation.NotificationControl t (Some a))) + (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr t} \ {s. ntfnPtr_' s = ntfn_Ptr a}) [] + (invokeTCB (tcbinvocation.NotificationControl t (Some a))) + (Call invokeTCB_NotificationControl_'proc)" + apply (cinit lift: tcb_' ntfnPtr_') + apply (clarsimp simp: option_to_0_def liftE_def) + apply (rule ccorres_cond_true_seq) + apply (ctac(no_vcg) add: bindNotification_ccorres) + apply (rule ccorres_return_CE[unfolded returnOk_def, simplified]) + apply simp + apply simp + apply simp + apply wp + apply (case_tac "a = 0", auto) + done + +lemma invokeTCB_NotificationControl_unbind_ccorres: + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and tcb_inv_wf' (tcbinvocation.NotificationControl t None)) + (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr t} \ {s. ntfnPtr_' s = NULL}) [] + (invokeTCB (tcbinvocation.NotificationControl t None)) + (Call invokeTCB_NotificationControl_'proc)" + apply (cinit lift: tcb_' ntfnPtr_') + apply (clarsimp simp add: option_to_0_def liftE_def) + apply (ctac(no_vcg) add: unbindNotification_ccorres) + apply (rule ccorres_return_CE[unfolded returnOk_def, simplified]) + apply simp + apply simp + apply simp + apply wp + apply (clarsimp simp: option_to_0_def) + done + +lemma valid_objs_boundNTFN_NULL: + "ko_at' tcb p s ==> valid_objs' s \ no_0_obj' s \ tcbBoundNotification tcb \ Some 0" + apply (drule(1) obj_at_valid_objs') + apply (clarsimp simp: valid_tcb'_def projectKOs valid_obj'_def) + done + +lemma decodeUnbindNotification_ccorres: + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and valid_cap' cp and K (isThreadCap cp) + and tcb_at' (capTCBPtr cp) + and (\s. \rf \ zobj_refs' cp. ex_nonz_cap_to' rf s)) + (UNIV \ {s. ccap_relation cp (cap_' s)}) [] + (decodeUnbindNotification cp >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeUnbindNotification_'proc)" + apply (cinit' lift: cap_' simp: decodeUnbindNotification_def) + apply csymbr + apply csymbr + apply (rule ccorres_Guard_Seq) + apply (simp add: liftE_bindE bind_assoc) + apply (rule ccorres_pre_getBoundNotification) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) + apply (simp add: bindE_bind_linearise) + apply wpc + apply (simp add: bindE_bind_linearise[symmetric] + injection_handler_throwError + invocationCatch_use_injection_handler) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr + performInvocation_def) + apply (rule ccorres_cond_false_seq) + apply simp + apply (ctac add: setThreadState_ccorres) + apply (ctac add: invokeTCB_NotificationControl_unbind_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeTCB_NotificationControl_modifies) + apply simp + apply (wp sts_invs_minor' hoare_case_option_wp sts_bound_tcb_at' | wpc | simp)+ + apply (vcg exspec=setThreadState_modifies) + apply (clarsimp, frule obj_at_ko_at', clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_tcb], assumption) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (auto simp: ctcb_relation_def typ_heap_simps cap_get_tag_ThreadCap ct_in_state'_def + option_to_ptr_def option_to_0_def ThreadState_defs + mask_def rf_sr_ksCurThread valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: valid_objs_boundNTFN_NULL) + done + +lemma nTFN_case_If_ptr: + "(case x of capability.NotificationCap a b c d \ P a d | _ \ Q) = (if (isNotificationCap x) then P (capNtfnPtr x) (capNtfnCanReceive x) else Q)" + by (auto simp: isNotificationCap_def split: capability.splits) + +lemma decodeBindNotification_ccorres: + notes prod.case_cong_weak[cong] + option.case_cong_weak[cong] + shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and valid_cap' cp + and tcb_at' (capTCBPtr cp) and ex_nonz_cap_to' (capTCBPtr cp) + and (\s. \rf \ zobj_refs' cp. ex_nonz_cap_to' rf s) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). ex_nonz_cap_to' y s ) + and (excaps_in_mem extraCaps o ctes_of) + and K (isThreadCap cp)) + (UNIV \ {s. ccap_relation cp (cap_' s)} + \ {s. current_extra_caps_' (globals s) = extraCaps'}) [] + (decodeBindNotification cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeBindNotification_'proc)" + using [[goals_limit=1]] + apply (simp, rule ccorres_gen_asm) + apply (cinit' lift: cap_' current_extra_caps_' simp: decodeBindNotification_def) + apply (simp add: bind_assoc whenE_def bind_bindE_assoc interpret_excaps_test_null + del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: excaps_map_def invocationCatch_def throwError_bind null_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: excaps_map_def null_def del: Collect_const cong: call_ignore_cong) + apply csymbr + apply csymbr + apply (rule ccorres_Guard_Seq) + apply (simp add: liftE_bindE bind_assoc cong: call_ignore_cong) + apply (rule ccorres_pre_getBoundNotification) + apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (simp add: bindE_bind_linearise cong: call_ignore_cong) + apply wpc + prefer 2 + apply (simp add: bindE_bind_linearise[symmetric] injection_handler_throwError + invocationCatch_use_injection_handler throwError_bind invocationCatch_def) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases + exception_defs) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr performInvocation_def + bindE_bind_linearise[symmetric] cong: call_ignore_cong) + apply (rule ccorres_cond_false_seq) + apply (simp cong: call_ignore_cong) + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply csymbr + apply (simp add: cap_get_tag_isCap if_1_0_0 del: Collect_const cong: call_ignore_cong) + apply (rule ccorres_assert2) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule_tac P="Q (capNtfnPtr rva) (capNtfnCanReceive rva) rva"for Q in ccorres_inst) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (simp add: hd_conv_nth del: Collect_const cong: call_ignore_cong) + apply (simp only: cap_get_tag_isCap(3)[symmetric] cong: call_ignore_cong, frule(1) cap_get_tag_to_H(3) ) + apply (simp add: case_bool_If if_to_top_of_bindE if_to_top_of_bind bind_assoc + del: Collect_const cong: if_cong call_ignore_cong) + apply csymbr + apply (clarsimp simp add: if_to_top_of_bind to_bool_eq_0[symmetric] simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: throwError_bind invocationCatch_def) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases exception_defs) + apply (clarsimp simp: to_bool_def) + apply (rule ccorres_pre_getNotification) + apply (rename_tac ntfn) + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'="ret__int_'" and val="from_bool (ntfnBoundTCB ntfn \ None \ + isWaitingNtfn (ntfnObj ntfn))" + and R="ko_at' ntfn (capNtfnPtr_CL (cap_notification_cap_lift ntfn_cap)) + and valid_ntfn' ntfn" + and R'=UNIV + in ccorres_symb_exec_r_known_rv_UNIV) + apply (rule conseqPre, vcg) + apply (clarsimp simp: if_1_0_0) + + apply (erule cmap_relationE1[OF cmap_relation_ntfn], erule ko_at_projectKO_opt) + apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def + valid_ntfn'_def) + apply (case_tac "ntfnObj ntfn", simp_all add: isWaitingNtfn_def option_to_ctcb_ptr_def + split: option.split_asm if_split, + auto simp: neq_Nil_conv tcb_queue_relation'_def tcb_at_not_NULL[symmetric] + tcb_at_not_NULL)[1] + apply ceqv + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (simp add: bindE_bind_linearise del: Collect_const) + apply wpc + \ \IdleNtfn\ + apply (simp add: case_option_If del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: isWaitingNtfn_def from_bool_neq_0) + apply (simp add: bindE_bind_linearise[symmetric] injection_handler_throwError + invocationCatch_use_injection_handler throwError_bind invocationCatch_def) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply (clarsimp simp: isWaitingNtfn_def from_bool_neq_0 returnOk_bind) + apply (clarsimp simp: isWaitingNtfn_def from_bool_neq_0 returnOk_bind + ccorres_invocationCatch_Inr performInvocation_def) + apply (ctac add: setThreadState_ccorres) + apply (ctac add: invokeTCB_NotificationControl_bind_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeTCB_NotificationControl_modifies) + apply simp + apply (wp sts_invs_minor' hoare_case_option_wp sts_bound_tcb_at' | wpc | simp)+ + apply (vcg exspec=setThreadState_modifies) + apply (simp add: case_option_If del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (clarsimp simp: isWaitingNtfn_def from_bool_neq_0) + apply (simp add: bindE_bind_linearise[symmetric] injection_handler_throwError + invocationCatch_use_injection_handler throwError_bind invocationCatch_def) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply (clarsimp simp: isWaitingNtfn_def from_bool_neq_0 returnOk_bind) + apply (clarsimp simp: isWaitingNtfn_def from_bool_neq_0 returnOk_bind + ccorres_invocationCatch_Inr performInvocation_def) + apply (ctac add: setThreadState_ccorres) + apply (ctac add: invokeTCB_NotificationControl_bind_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeTCB_NotificationControl_modifies) + apply simp + apply (wp sts_invs_minor' hoare_case_option_wp sts_bound_tcb_at' | wpc | simp)+ + apply (vcg exspec=setThreadState_modifies) + apply (simp add: bindE_bind_linearise[symmetric] injection_handler_throwError + invocationCatch_use_injection_handler throwError_bind invocationCatch_def) + apply (rule ccorres_cond_true_seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) + apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def + ThreadState_defs mask_def + rf_sr_ksCurThread capTCBPtr_eq) + apply (simp add: hd_conv_nth bindE_bind_linearise nTFN_case_If_ptr throwError_bind invocationCatch_def) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases + exception_defs) + apply clarsimp + apply (wp | simp | wpc | wp (once) hoare_drop_imps)+ + apply vcg + apply clarsimp + apply (rule conjI) + apply safe[1] + apply (fastforce simp: invs'_def valid_state'_def valid_pspace'_def + dest!: valid_objs_boundNTFN_NULL) + apply ((fastforce elim!: pred_tcb'_weakenE obj_at'_weakenE + simp: ct_in_state'_def from_bool_0 isCap_simps excaps_map_def + neq_Nil_conv obj_at'_def pred_tcb_at'_def valid_tcb_state'_def)+)[12] + apply (clarsimp dest!: obj_at_valid_objs'[OF _ invs_valid_objs'] + simp: projectKOs valid_obj'_def) + apply (clarsimp simp: excaps_map_Nil cte_wp_at_ctes_of excaps_map_def neq_Nil_conv + dest!: interpret_excaps_eq ) + apply (clarsimp simp: excaps_map_Nil) + apply (frule obj_at_ko_at', clarsimp) + apply (rule cmap_relationE1[OF cmap_relation_tcb], assumption) + apply (erule ko_at_projectKO_opt) + apply (clarsimp simp: isCap_simps) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply safe[1] + apply (clarsimp simp: typ_heap_simps excaps_map_def neq_Nil_conv + dest!: interpret_excaps_eq) + apply clarsimp + apply (frule cap_get_tag_isCap_unfolded_H_cap(3)) + apply (clarsimp simp: typ_heap_simps cap_get_tag_ThreadCap ccap_relation_def) + apply (auto simp: word_sless_alt typ_heap_simps cap_get_tag_ThreadCap ctcb_relation_def + option_to_ptr_def option_to_0_def + split: if_split) + done + + +lemma decodeSetSpace_ccorres: + notes tl_drop_1[simp] scast_mask_8 [simp] + shows + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and valid_cap' cp and cte_at' slot and K (isThreadCap cp) + and tcb_at' (capTCBPtr cp) + and (\s. \rf \ zobj_refs' cp. ex_nonz_cap_to' rf s) + and (excaps_in_mem extraCaps o ctes_of) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeSetSpace args cp slot extraCaps + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeSetSpace_'proc)" + supply unsigned_numeral[simp del] + apply (cinit' lift: cap_' length___unsigned_long_' slot_' current_extra_caps_' buffer_' + simp: decodeSetSpace_def) + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: if_1_0_0) + apply (rule ccorres_cond_true_seq | simp)+ + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (subgoal_tac "unat length___unsigned_long < 3") + apply (clarsimp simp: throwError_def invocationCatch_def fst_return + intr_and_se_rel_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs + subset_iff + split: list.split) + apply unat_arith + apply csymbr + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: if_1_0_0 interpret_excaps_test_null excaps_map_Nil) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: throwError_def invocationCatch_def fst_return + intr_and_se_rel_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs + split: list.split) + apply csymbr + apply (simp add: if_1_0_0 interpret_excaps_test_null del: Collect_const) + apply (rule_tac P="\c. ccorres rvr xf P P' hs a (Cond c c1 c2 ;; c3)" for rvr xf P P' hs a c1 c2 c3 in ssubst) + apply (rule Collect_cong) + apply (rule interpret_excaps_test_null) + apply (clarsimp simp: neq_Nil_conv) + apply simp + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (clarsimp simp: throwError_def invocationCatch_def fst_return + intr_and_se_rel_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs + excaps_map_def + split: list.split) + apply (clarsimp simp add: linorder_not_less word_le_nat_alt + excaps_map_Nil length_excaps_map + simp del: Collect_const) + apply (drule_tac a="Suc 0" in neq_le_trans [OF not_sym]) + apply (clarsimp simp: neq_Nil_conv) + apply (rule_tac P="\a. ccorres rvr xf P P' hs a c" for rvr xf P P' hs c in ssubst, + rule bind_cong [OF _ refl], rule list_case_helper, + clarsimp)+ + apply (simp add: hd_drop_conv_nth2 del: Collect_const) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer]) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer]) + apply csymbr + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply csymbr + apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=1]) + apply (rule ccorres_move_c_guard_cte) + apply ctac + apply (rule ccorres_assert2) + apply csymbr + apply (simp add: decodeSetSpace_def injection_bindE[OF refl] + split_def + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] + del: Collect_const) + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq + ccorres_rhs_assoc)+ + apply (simp add: injection_liftE[OF refl] bindE_assoc + liftM_def getThreadCSpaceRoot + getThreadVSpaceRoot del: Collect_const) + apply (simp add: liftE_bindE bind_assoc del: Collect_const) + apply (ctac add: slotCapLongRunningDelete_ccorres) + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (simp del: Collect_const) + apply csymbr + apply (clarsimp simp add: if_1_0_0 from_bool_0 + simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_slotCapLongRunningDelete]) + apply (simp add: unlessE_def throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule ccorres_cond_true_seq) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply wp+ + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply (simp add: tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] + del: Collect_const) + apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq + ccorres_rhs_assoc)+ + apply (ctac add: slotCapLongRunningDelete_ccorres) + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (simp del: Collect_const) + apply csymbr + apply (clarsimp simp add: if_1_0_0 from_bool_0 + simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: unlessE_def throwError_bind invocationCatch_def + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: unlessE_def + del: Collect_const) + apply (rule ccorres_add_return, + rule_tac r'="\rv rv'. ccap_relation (if args ! Suc 0 = 0 then fst (hd extraCaps) + else updateCapData False (args ! Suc 0) (fst (hd extraCaps))) rv'" + and xf'="cRootCap_'" in ccorres_split_nothrow) + apply (rule_tac P'="{s. cRootCap = cRootCap_' s}" + in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (subgoal_tac "extraCaps \ []") + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) + apply fastforce + apply clarsimp + apply ceqv + apply (simp add: invocationCatch_use_injection_handler + injection_bindE [OF refl refl] bindE_assoc + del: Collect_const) + apply (ctac add: ccorres_injection_handler_csum1 + [OF deriveCap_ccorres]) + apply (simp add: Collect_False del: Collect_const) + apply csymbr + apply csymbr + apply (simp add: cnode_cap_case_if cap_get_tag_isCap + del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_throwError + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: injection_handler_returnOk del: Collect_const) + apply (rule ccorres_add_return, + rule_tac r'="\rv rv'. ccap_relation (if args ! 2 = 0 then fst (extraCaps ! Suc 0) + else updateCapData False (args ! 2) (fst (extraCaps ! Suc 0))) rv'" + and xf'="vRootCap_'" in ccorres_split_nothrow) + apply (rule_tac P'="{s. vRootCap = vRootCap_' s}" + in ccorres_from_vcg[where P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: returnOk_def return_def hd_drop_conv_nth2) + apply fastforce + apply ceqv + apply (ctac add: ccorres_injection_handler_csum1 + [OF deriveCap_ccorres]) + apply (simp add: Collect_False del: Collect_const) + apply csymbr + apply csymbr + apply (simp add: from_bool_0 isValidVTableRoot_conv del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: injection_handler_throwError + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr + performInvocation_def) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply csymbr + apply (ctac(no_vcg) add: invokeTCB_ThreadControl_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply simp + apply (wp sts_invs_minor') + apply (vcg exspec=setThreadState_modifies) + apply simp + apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) + apply vcg + apply simp + apply (wp hoare_drop_imps) + apply (wp injection_wp_E [OF refl]) + apply (simp add: Collect_const_mem cintr_def intr_and_se_rel_def + all_ex_eq_helper syscall_error_rel_def + exception_defs) + apply (vcg exspec=deriveCap_modifies) + apply (simp cong: if_cong) + apply wp + apply (simp add: Collect_const_mem all_ex_eq_helper) + apply vcg + apply simp + apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) + apply vcg + apply (simp cong: if_cong) + apply (wp hoare_drop_imps injection_wp_E[OF refl]) + apply (simp add: Collect_const_mem all_ex_eq_helper + numeral_eqs syscall_error_rel_def + exception_defs cintr_def intr_and_se_rel_def) + apply (vcg exspec=deriveCap_modifies) + apply (simp cong: if_cong) + apply wp + apply (simp add: Collect_const_mem all_ex_eq_helper + numeral_eqs syscall_error_rel_def + exception_defs cintr_def intr_and_se_rel_def + hd_drop_conv_nth2 + cong: if_cong) + apply vcg + apply (simp cong: if_cong) + apply (wp hoare_drop_imps) + apply (simp add: Collect_const_mem) + apply (vcg exspec=slotCapLongRunningDelete_modifies) + apply (simp cong: if_cong) + apply (wp hoare_drop_imps) + apply (simp add: Collect_const_mem all_ex_eq_helper + numeral_eqs syscall_error_rel_def + exception_defs cintr_def intr_and_se_rel_def) + apply (vcg exspec=slotCapLongRunningDelete_modifies) + apply (simp add: pred_conj_def cong: if_cong) + apply (strengthen if_n_updateCapData_valid_strg) + apply (wp hoare_drop_imps) + apply (simp add: Collect_const_mem all_ex_eq_helper + numeral_eqs syscall_error_rel_def + exception_defs cintr_def intr_and_se_rel_def) + apply vcg + apply simp + apply (wp hoare_drop_imps) + apply (simp add: Collect_const_mem all_ex_eq_helper + numeral_eqs syscall_error_rel_def + exception_defs cintr_def intr_and_se_rel_def) + apply vcg + apply simp + apply (wp hoare_drop_imps) + apply (simp add: Collect_const_mem all_ex_eq_helper + numeral_eqs syscall_error_rel_def + exception_defs cintr_def intr_and_se_rel_def + cong: if_cong + | vcg exspec=getSyscallArg_modifies + | wp)+ + apply (clarsimp simp: word_less_nat_alt) + apply (rule conjI) + apply (clarsimp simp: ct_in_state'_def interpret_excaps_test_null + excaps_map_def neq_Nil_conv) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (rule conjI, clarsimp simp: sysargs_rel_n_def n_msgRegisters_def) + apply (frule(2) tcb_at_capTCBPtr_CL) + apply (auto simp: isCap_simps valid_tcb_state'_def objBits_defs mask_def + elim!: pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] + apply (clarsimp simp: linorder_not_le interpret_excaps_test_null + excaps_map_def neq_Nil_conv word_sle_def + word_sless_def) + apply (frule interpret_excaps_eq[rule_format, where n=0], simp) + apply (frule interpret_excaps_eq[rule_format, where n=1], simp) + apply (clarsimp simp: mask_def[where n=4] ccap_rights_relation_def + rightsFromWord_wordFromRights + capTCBPtr_eq tcb_cnode_index_defs size_of_def + option_to_0_def rf_sr_ksCurThread + ThreadState_defs mask_eq_iff_w2p word_size) + apply (simp add: word_sle_def cap_get_tag_isCap) + apply (subgoal_tac "args \ []") + apply (clarsimp simp: hd_conv_nth) + apply (clarsimp simp: objBits_simps') + apply fastforce + apply clarsimp + done + +lemma invokeTCB_SetTLSBase_ccorres: + notes hoare_weak_lift_imp [wp] + shows + "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs') + ({s. thread_' s = tcb_ptr_to_ctcb_ptr tcb} + \ {s. tls_base_' s = tls_base}) [] + (invokeTCB (SetTLSBase tcb tls_base)) + (Call invokeSetTLSBase_'proc)" + apply (cinit lift: thread_' tls_base_') + apply (simp add: liftE_def bind_assoc + del: Collect_const) + apply (ctac add: setRegister_ccorres) + apply (rule ccorres_pre_getCurThread) + apply (rename_tac cur_thr) + apply (rule ccorres_split_nothrow_novcg_dc) + apply (rule_tac R="\s. cur_thr = ksCurThread s" in ccorres_when) + apply (clarsimp simp: rf_sr_ksCurThread) + apply clarsimp + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) + apply (unfold return_returnOk)[1] + apply (rule ccorres_return_CE, simp+)[1] + apply (wpsimp wp: hoare_drop_imp simp: guard_is_UNIV_def)+ + apply vcg + apply (fastforce simp: tlsBaseRegister_def AARCH64.tlsBaseRegister_def + invs_weak_sch_act_wf C_register_defs + split: if_split) + done + +lemma decodeSetTLSBase_ccorres: + "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and sch_act_simple + and (\s. ksCurThread s = thread) and ct_active' and K (isThreadCap cp) + and valid_cap' cp and (\s. \x \ zobj_refs' cp. ex_nonz_cap_to' x s) + and sysargs_rel args buffer) + (UNIV + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeSetTLSBase args cp + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeSetTLSBase_'proc)" + apply (cinit' lift: cap_' length___unsigned_long_' buffer_' + simp: decodeSetTLSBase_def) + apply wpc + apply (simp add: throwError_bind invocationCatch_def) + apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) + apply vcg + apply (rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def + exception_defs syscall_error_rel_def + syscall_error_to_H_cases) + apply (rule ccorres_cond_false_seq; simp) + apply (rule ccorres_add_return, + ctac add: getSyscallArg_ccorres_foo'[where args=args and n=0 and buffer=buffer]) + apply (simp add: invocationCatch_use_injection_handler + bindE_assoc injection_handler_returnOk + ccorres_invocationCatch_Inr performInvocation_def) + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (ctac (no_vcg) add: invokeTCB_SetTLSBase_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply (wpsimp wp: sts_invs_minor')+ + apply (vcg exspec=setThreadState_modifies) + apply wp + apply vcg + apply (clarsimp simp: Collect_const_mem) + apply (rule conjI) + apply (clarsimp simp: ct_in_state'_def sysargs_rel_n_def n_msgRegisters_def) + apply (auto simp: valid_tcb_state'_def + elim!: pred_tcb'_weakenE)[1] + apply (frule rf_sr_ksCurThread) + apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) + apply (auto simp: unat_eq_0 le_max_word_ucast_id)+ + done + +lemma decodeTCBInvocation_ccorres: + "interpret_excaps extraCaps' = excaps_map extraCaps \ + ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple + and valid_cap' cp and cte_at' slot and K (isThreadCap cp) + and (excaps_in_mem extraCaps o ctes_of) + and tcb_at' (capTCBPtr cp) and ex_nonz_cap_to' (capTCBPtr cp) + and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). + ex_nonz_cap_to' y s) + and (\s. \v \ set extraCaps. + s \' fst v \ cte_at' (snd v) s) + and (\s. \v \ set extraCaps. + ex_cte_cap_wp_to' isCNodeCap (snd v) s) + and sysargs_rel args buffer) + (UNIV + \ {s. invLabel_' s = label} + \ {s. ccap_relation cp (cap_' s)} + \ {s. unat (length___unsigned_long_' s) = length args} + \ {s. slot_' s = cte_Ptr slot} + \ {s. current_extra_caps_' (globals s) = extraCaps'} + \ {s. call_' s = from_bool isCall} + \ {s. buffer_' s = option_to_ptr buffer}) [] + (decodeTCBInvocation label args cp slot extraCaps + >>= invocationCatch thread isBlocking isCall InvokeTCB) + (Call decodeTCBInvocation_'proc)" + apply (cinit' lift: invLabel_' cap_' length___unsigned_long_' slot_' current_extra_caps_' call_' buffer_') + apply (simp add: decodeTCBInvocation_def invocation_eq_use_types gen_invocation_type_eq + del: Collect_const) + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeReadRegisters_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeWriteRegisters_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeCopyRegisters_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (simp add: performInvocation_def) + apply (ctac add: invokeTCB_Suspend_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeTCB_Suspend_modifies) + apply (wp sts_invs_minor') + apply (vcg exspec=setThreadState_modifies) + apply (rule ccorres_Cond_rhs) + apply (simp add: returnOk_bind ccorres_invocationCatch_Inr) + apply (rule ccorres_rhs_assoc)+ + apply (ctac add: setThreadState_ccorres) + apply csymbr + apply (simp add: performInvocation_def) + apply (ctac add: invokeTCB_Resume_ccorres) + apply simp + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (vcg exspec=invokeTCB_Resume_modifies) + apply (wp sts_invs_minor') + apply (vcg exspec=setThreadState_modifies) + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeTCBConfigure_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeSetPriority_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeSetMCPriority_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeSetSchedParams_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeSetIPCBuffer_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeSetSpace_ccorres [where buffer=buffer]) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeBindNotification_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply simp + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeUnbindNotification_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_Cond_rhs) + apply (simp add: gen_invocation_type_eq) + apply (rule ccorres_add_returnOk, ctac(no_vcg) add: decodeSetTLSBase_ccorres) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_return_C_errorE, simp+)[1] + apply wp + apply (rule ccorres_equals_throwError) + apply (fastforce simp: throwError_bind invocationCatch_def + split: invocation_label.split gen_invocation_labels.split) + apply (simp add: ccorres_cond_iffs + cong: StateSpace.state.fold_congs globals.fold_congs) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (clarsimp simp: cintr_def intr_and_se_rel_def + exception_defs rf_sr_ksCurThread + Collect_const_mem) + apply (rule conjI) + apply (auto simp: ct_in_state'_def isCap_simps valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread')[1] + apply (simp split: sum.split add: cintr_def intr_and_se_rel_def + exception_defs syscall_error_rel_def) + apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) + apply clarsimp + done + +end +end diff --git a/proof/crefine/AARCH64/VSpace_C.thy b/proof/crefine/AARCH64/VSpace_C.thy new file mode 100644 index 0000000000..39e009032d --- /dev/null +++ b/proof/crefine/AARCH64/VSpace_C.thy @@ -0,0 +1,3072 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory VSpace_C +imports TcbAcc_C CSpace_C PSpace_C TcbQueue_C +begin + +unbundle l4v_word_context + +autocorres + [ skip_heap_abs, skip_word_abs, + scope = handleVMFault, + scope_depth = 0, + c_locale = kernel_all_substitute + ] "../c/build/$L4V_ARCH/kernel_all.c_pp" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma ccorres_name_pre_C: + "(\s. s \ P' \ ccorres_underlying sr \ r xf arrel axf P {s} hs f g) + \ ccorres_underlying sr \ r xf arrel axf P P' hs f g" + apply (rule ccorres_guard_imp) + apply (rule_tac xf'=id in ccorres_abstract, rule ceqv_refl) + apply (rule_tac P="rv' \ P'" in ccorres_gen_asm2) + apply assumption + apply simp + apply simp + done + +lemma ccorres_flip_Guard: + assumes cc: "ccorres_underlying sr \ r xf arrel axf A C hs a (Guard F S (Guard F' S' c))" + shows "ccorres_underlying sr \ r xf arrel axf A C hs a (Guard F' S' (Guard F S c))" + apply (rule ccorres_name_pre_C) + using cc + apply (case_tac "s \ (S' \ S)") + apply (clarsimp simp: ccorres_underlying_def) + apply (erule exec_handlers.cases; + fastforce elim!: exec_Normal_elim_cases intro: exec_handlers.intros exec.Guard) + apply (clarsimp simp: ccorres_underlying_def) + apply (case_tac "s \ S") + apply (fastforce intro: exec.Guard exec.GuardFault exec_handlers.intros) + apply (fastforce intro: exec.Guard exec.GuardFault exec_handlers.intros) + done + +end + +context kernel_m begin + +lemma pageBitsForSize_le: + "pageBitsForSize x \ 30" + by (simp add: pageBitsForSize_def bit_simps split: vmpage_size.splits) + +lemma unat_of_nat_pageBitsForSize[simp]: + "unat (of_nat (pageBitsForSize x)::machine_word) = pageBitsForSize x" + apply (subst unat_of_nat64) + apply (rule order_le_less_trans, rule pageBitsForSize_le) + apply (simp add: word_bits_def) + apply simp + done + +lemma rf_asidTable: + "\ (\, x) \ rf_sr; valid_arch_state' \; idx \ mask asid_high_bits \ + \ case armKSASIDTable (ksArchState \) + idx of + None \ + index (armKSASIDTable_' (globals x)) (unat idx) = + NULL + | Some v \ + index (armKSASIDTable_' (globals x)) (unat idx) = Ptr v \ + index (armKSASIDTable_' (globals x)) (unat idx) \ NULL" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def + array_relation_def) + apply (drule_tac x=idx in spec)+ + apply (clarsimp simp: mask_def split: option.split) + apply (drule sym, simp) + apply (simp add: option_to_ptr_def option_to_0_def) + apply (fastforce simp: invs'_def valid_state'_def valid_arch_state'_def + valid_asid_table'_def ran_def) + done + +lemma getKSASIDTable_ccorres_stuff: + "\ invs' \; (\, x) \ rf_sr; idx' = unat idx; + idx < 2 ^ asid_high_bits \ + \ case armKSASIDTable (ksArchState \) + idx of + None \ + index (armKSASIDTable_' (globals x)) + idx' = + NULL + | Some v \ + index (armKSASIDTable_' (globals x)) + idx' = Ptr v \ + index (armKSASIDTable_' (globals x)) + idx' \ NULL" + apply (drule rf_asidTable [where idx=idx]) + apply fastforce + apply (simp add: mask_def) + apply (simp add: word_le_minus_one_leq) + apply (clarsimp split: option.splits) + done + +lemma asidLowBits_handy_convs: + "sint Kernel_C.asidLowBits = 9" + "Kernel_C.asidLowBits \ 0x20" + "unat Kernel_C.asidLowBits = asid_low_bits" + by (simp add: Kernel_C.asidLowBits_def asid_low_bits_def)+ + +lemma rf_sr_armKSASIDTable: + "\ (s, s') \ rf_sr; n \ mask asid_high_bits \ + \ index (armKSASIDTable_' (globals s')) (unat n) + = option_to_ptr (armKSASIDTable (ksArchState s) n)" + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def array_relation_def) + +lemma asid_high_bits_word_bits: + "asid_high_bits < word_bits" + by (simp add: asid_high_bits_def word_bits_def) + +lemma array_relation_update: + "\ array_relation R bnd table (arr :: 'a['b :: finite]); + x' = unat (x :: ('td :: len) word); R v v'; + unat bnd < card (UNIV :: 'b set) \ + \ array_relation R bnd (table (x := v)) + (Arrays.update arr x' v')" + by (simp add: array_relation_def word_le_nat_alt split: if_split) + +definition vm_fault_type_from_H :: "vmfault_type \ machine_word" where + "vm_fault_type_from_H fault \ + case fault of + vmfault_type.ARMDataAbort \ scast Kernel_C.ARMDataAbort + | vmfault_type.ARMPrefetchAbort \ scast Kernel_C.ARMPrefetchAbort" + +lemmas vm_fault_defs_C = + Kernel_C.ARMDataAbort_def + Kernel_C.ARMPrefetchAbort_def + +(* FIXME: automate this *) +lemma seL4_Fault_VMFault_new'_spec: + "\ \s. s = \ \ seL4_Fault_VMFault_new' addr FSR i + \ \r s. s = \ + \ seL4_Fault_VMFault_lift r = \address_CL = addr, FSR_CL = FSR && mask 32, instructionFault_CL = i && mask 1\ + \ seL4_Fault_get_tag r = scast seL4_Fault_VMFault \" + apply (rule hoare_weaken_pre, rule hoare_strengthen_post) + apply (rule autocorres_transfer_spec_no_modifies + [where cs="undefined\globals := \, address_' := addr, + FSR_' := FSR, instructionFault_' := i\", + OF seL4_Fault_VMFault_new'_def seL4_Fault_VMFault_new_spec + seL4_Fault_VMFault_new_modifies]) + by auto + +lemma no_fail_seL4_Fault_VMFault_new': + "no_fail \ (seL4_Fault_VMFault_new' addr fault i)" + apply (rule terminates_spec_no_fail'[OF seL4_Fault_VMFault_new'_def seL4_Fault_VMFault_new_spec]) + apply clarsimp + apply terminates_trivial + done + +lemma handleVMFault_ccorres: + "ccorres ((\f ex v. ex = scast EXCEPTION_FAULT + \ (\vf. f = ArchFault (VMFault (address_CL vf) + [instructionFault_CL vf, FSR_CL vf]) + \ errfault v = Some (SeL4_Fault_VMFault vf))) \ \\) + (liftxf errstate id (K ()) ret__unsigned_long_') + \ + (\\thread = tcb_ptr_to_ctcb_ptr thread\ + \ \\vm_faultType = vm_fault_type_from_H vm_fault\) + [] + (handleVMFault thread vm_fault) + (Call handleVMFault_'proc)" + apply (cinit lift: thread_' vm_faultType_') + apply wpc + apply (simp add: vm_fault_type_from_H_def Kernel_C.ARMDataAbort_def Kernel_C.ARMPrefetchAbort_def) + apply (simp add: ccorres_cond_univ_iff) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: getFAR_ccorres pre: ccorres_liftE_Seq) + apply (ctac (no_vcg) add: getESR_ccorres pre: ccorres_liftE_Seq) + apply (clarsimp simp: curVCPUActive_def liftE_bindE bind_assoc) + apply (rule ccorres_pre_getCurVCPU) + apply (rule ccorres_if_bindE) + apply (rule ccorres_cond_seq) + apply (rule_tac R="\s. vcpu = armHSCurVCPU (ksArchState s)" and R'=UNIV in ccorres_cond_strong) + apply (fastforce simp: cur_vcpu_relation_def + dest!: rf_sr_ksArchState_armHSCurVCPU + split: option.splits) + apply (clarsimp simp: bindE_assoc) + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: addressTranslateS1_ccorres pre: ccorres_liftE_Seq) + apply csymbr + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (clarsimp simp add: throwError_def return_def) + apply (rule conseqPre, vcg) + apply (clarsimp simp: errstate_def EXCEPTION_FAULT_def EXCEPTION_NONE_def + seL4_Fault_VMFault_lift mask_def pageBits_def) + apply wp + apply ccorres_rewrite + apply (rule_tac P=\ and P'="\\addr = addr\" in ccorres_from_vcg_throws) + apply (clarsimp simp add: throwError_def return_def) + apply (rule conseqPre, vcg) + apply (clarsimp simp: errstate_def EXCEPTION_FAULT_def EXCEPTION_NONE_def + seL4_Fault_VMFault_lift mask_def pageBits_def) + apply wpsimp + apply wp + apply (simp add: vm_fault_type_from_H_def Kernel_C.ARMDataAbort_def Kernel_C.ARMPrefetchAbort_def) + apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: getRestartPC_ccorres pre: ccorres_liftE_Seq) + apply (ctac (no_vcg) add: getESR_ccorres pre: ccorres_liftE_Seq) + apply (clarsimp simp: curVCPUActive_def liftE_bindE bind_assoc) + apply (rule ccorres_pre_getCurVCPU) + apply (rule ccorres_if_bindE) + apply (rule ccorres_cond_seq) + apply (rule_tac R="\s. vcpu = armHSCurVCPU (ksArchState s)" and R'=UNIV in ccorres_cond_strong) + apply (fastforce simp: cur_vcpu_relation_def + dest!: rf_sr_ksArchState_armHSCurVCPU + split: option.splits) + apply (clarsimp simp: bindE_assoc) + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: addressTranslateS1_ccorres pre: ccorres_liftE_Seq) + apply csymbr + apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) + apply (clarsimp simp add: throwError_def return_def) + apply (rule conseqPre, vcg) + apply (clarsimp simp: errstate_def EXCEPTION_FAULT_def EXCEPTION_NONE_def + seL4_Fault_VMFault_lift mask_def pageBits_def) + apply wp + apply ccorres_rewrite + apply (rule_tac P=\ and P'="\\pc = pc\" in ccorres_from_vcg_throws) + apply (clarsimp simp add: throwError_def return_def) + apply (rule conseqPre, vcg) + apply (clarsimp simp: errstate_def EXCEPTION_FAULT_def EXCEPTION_NONE_def + seL4_Fault_VMFault_lift mask_def pageBits_def) + apply wpsimp+ + done + +lemma unat_asidLowBits[simp]: + "unat Kernel_C.asidLowBits = asidLowBits" + by (simp add: asidLowBits_def Kernel_C.asidLowBits_def asid_low_bits_def) + +lemma asid_wf_eq_mask_eq: + "asid_wf asid = (asid && mask asid_bits = asid)" + by (simp add: asid_wf_def and_mask_eq_iff_le_mask) + +lemma leq_asid_bits_shift: + "asid_wf x \ (x::machine_word) >> asid_low_bits \ mask asid_high_bits" + unfolding asid_wf_def + apply (rule word_leI) + apply (simp add: nth_shiftr word_size) + apply (rule ccontr) + apply (clarsimp simp: linorder_not_less asid_high_bits_def asid_low_bits_def) + apply (simp add: mask_def) + apply (simp add: upper_bits_unset_is_l2p_64 [symmetric]) + apply (simp add: asid_bits_def word_bits_def) + apply (erule_tac x="asid_low_bits+n" in allE) (*asid_low_bits*) + apply (simp add: linorder_not_less asid_low_bits_def) + apply (drule test_bit_size) + apply (simp add: word_size) + done + +lemma ucast_asid_high_bits_is_shift: + "asid_wf asid \ ucast (asid_high_bits_of (ucast asid)) = asid >> asid_low_bits" + unfolding asid_wf_def + apply (simp add: mask_def upper_bits_unset_is_l2p_64[symmetric]) + apply (simp add: asid_high_bits_of_def mask_2pm1[symmetric] ucast_ucast_mask) + using shiftr_mask_eq[where n=asid_low_bits and x=asid, simplified] + apply (simp add: asid_low_bits_def word_size asid_bits_def word_bits_def mask_def) + apply word_bitwise + apply simp + done + +lemma rf_sr_asidTable_None: + "\ (\, x) \ rf_sr; asid_wf asid; valid_arch_state' \ \ \ + (index (armKSASIDTable_' (globals x)) (unat (asid >> asid_low_bits)) = ap_Ptr 0) = + (armKSASIDTable (ksArchState \) (ucast (asid_high_bits_of (ucast asid))) = None)" + apply (simp add: ucast_asid_high_bits_is_shift) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def) + apply (simp add: array_relation_def option_to_0_def) + apply (erule_tac x="asid >> asid_low_bits" in allE) + apply (erule impE) + apply (simp add: leq_asid_bits_shift flip: mask_2pm1) + apply (drule sym [where t="index a b" for a b]) + apply (simp add: option_to_0_def option_to_ptr_def split: option.splits) + apply (clarsimp simp: valid_arch_state'_def valid_asid_table'_def ran_def) + done + +lemma clift_ptr_safe: + "clift (h, x) ptr = Some a + \ ptr_safe ptr x" + by (erule lift_t_ptr_safe[where g = c_guard]) + +lemma clift_ptr_safe2: + "clift htd ptr = Some a + \ ptr_safe ptr (hrs_htd htd)" + by (cases htd, simp add: hrs_htd_def clift_ptr_safe) + +lemma ptTranslationBits_mask_le: "(x::machine_word) && 0x1FF < 0x200" + by (word_bitwise) + +lemma ptrFromPAddr_spec: + "\s. \ \ {s} + Call ptrFromPAddr_'proc + \ \ret__ptr_to_void = Ptr (ptrFromPAddr (paddr_' s) ) \" + apply vcg + apply (simp add: AARCH64.ptrFromPAddr_def AARCH64.pptrBase_def pptrBaseOffset_def paddrBase_def) + done + +lemma addrFromPPtr_spec: + "\s. \ \ {s} + Call addrFromPPtr_'proc + \ \ret__unsigned_long = (addrFromPPtr (ptr_val (pptr_' s)) ) \" + apply vcg + apply (simp add: addrFromPPtr_def AARCH64.pptrBase_def pptrBaseOffset_def paddrBase_def) + done + +lemma addrFromKPPtr_spec: + "\s. \ \ {s} + Call addrFromKPPtr_'proc + \\ret__unsigned_long = addrFromKPPtr (ptr_val (pptr_' s))\" + apply vcg + apply (simp add: addrFromKPPtr_def kernelELFBaseOffset_def kernelELFPAddrBase_def + kernelELFBase_def pptrBase_def mask_def) + done + +(* FIXME: move *) +lemma corres_symb_exec_unknown_r: + assumes "\rv. corres_underlying sr nf nf' r P P' a (c rv)" + shows "corres_underlying sr nf nf' r P P' a (unknown >>= c)" + apply (simp add: unknown_def) + apply (rule corres_symb_exec_r[OF assms]; wp select_inv) + done + +lemma isPageTablePTE_def2: + "isPageTablePTE pte = (\ppn. pte = PageTablePTE ppn)" + by (simp add: isPageTablePTE_def split: pte.splits) + +lemma ccorres_checkPTAt: + "ccorres_underlying srel Ga rrel xf arrel axf P P' hs (a ()) c \ + ccorres_underlying srel Ga rrel xf arrel axf + (\s. page_table_at' pt_t pt s \ gsPTTypes (ksArchState s) pt = Some pt_t \ P s) + P' + hs + (checkPTAt pt_t pt >>= a) c" + unfolding checkPTAt_def by (rule ccorres_stateAssert) + +lemma pteAtIndex_ko[wp]: + "\\\ pteAtIndex level pt vptr \\pte. ko_at' pte (ptSlotIndex level pt vptr)\" + unfolding pteAtIndex_def by (wpsimp wp: getPTE_wp) + +lemma ptBitsLeft_bound: + "level \ maxPTLevel \ ptBitsLeft level \ canonical_bit" + by (simp add: ptBitsLeft_def bit_simps maxPTLevel_def canonical_bit_def split: if_splits) + +lemma unat_of_nat_ptBitsLeft[simp]: + "level \ maxPTLevel \ unat (of_nat (ptBitsLeft level)::machine_word) = ptBitsLeft level" + apply (subst unat_of_nat64) + apply (rule order_le_less_trans, erule ptBitsLeft_bound) + apply (simp add: word_bits_def canonical_bit_def) + apply simp + done + +lemma pte_at'_ptSlotIndex: + "\ page_table_at' pt_t pt s; levelType level = pt_t \ \ pte_at' (ptSlotIndex level pt vptr) s" + apply (simp add: ptSlotIndex_def ptIndex_def) + apply (drule page_table_pte_atI'[where i="ucast (vptr >> ptBitsLeft level) && mask (ptTranslationBits pt_t)"]) + apply (simp add: word_bool_le_funs) + apply simp + done + +lemma pte_pte_table_ptr_get_present_ccorres: + "ccorres (\_ isPTE. isPTE = from_bool (isPageTablePTE pte)) + ret__unsigned_long_' + (ko_at' pte ptePtr) + \ \pt = pte_Ptr ptePtr \ + hs + (return ()) + (Call pte_pte_table_ptr_get_present_'proc)" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre) + apply vcg + apply (clarsimp simp: return_def) + apply (drule rf_sr_cpte_relation) + apply (drule (1) cmap_relation_ko_atD) + apply (clarsimp simp: typ_heap_simps) + apply (cases pte; simp add: isPageTablePTE_def cpte_relation_def pte_lift_def Let_def + split: if_splits) + done + +lemma pte_ptr_get_valid_spec: + "\s. \\ \s. s \\<^sub>c \<^bsup>s\<^esup>pt\ Call pte_ptr_get_valid_'proc + \\ret__unsigned_long = + from_bool (pte_get_tag (the (cslift s \<^bsup>s\<^esup>pt)) \ scast pte_pte_invalid)\" + by (rule allI, rule conseqPre, vcg) (clarsimp simp: from_bool_def split: if_split) + +lemma pte_pte_table_ptr_get_present_spec: + "\s. \\ \s. s \\<^sub>c \<^bsup>s\<^esup>pt\ Call pte_pte_table_ptr_get_present_'proc + \\ret__unsigned_long = + from_bool (pte_get_tag (the (cslift s \<^bsup>s\<^esup>pt)) = scast pte_pte_table)\" + by (rule allI, rule conseqPre, vcg) (clarsimp simp: from_bool_def split: if_split) + +lemma pte_is_page_type_spec: + "\s. \\ {s} Call pte_is_page_type_'proc + \\ret__unsigned_long = from_bool (pte_get_tag \<^bsup>s\<^esup>pte = scast pte_pte_4k_page \ + pte_get_tag \<^bsup>s\<^esup>pte = scast pte_pte_page) \" + by (rule allI, rule conseqPre, vcg) (clarsimp simp: from_bool_def split: if_split) + +lemma pte_get_page_base_address_spec: + "\s. \\ {s} + Call pte_get_page_base_address_'proc + \ \pte. cpte_relation pte (\<^bsup>s\<^esup>pte) \ isPagePTE pte \ + \ret__unsigned_longlong = pteBaseAddress pte \" + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: isPagePTE_def split: pte.splits) + apply (clarsimp simp: cpte_relation_def Let_def pte_lift_def mask_def split: if_splits) + done + +lemma ccorres_pre_getObject_pte: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\pte. ko_at' pte p s \ P pte s)) + {s. \pte pte'. cslift s (pte_Ptr p) = Some pte' \ cpte_relation pte pte' + \ s \ P' pte} + hs (getObject p >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_guard_imp2) + apply (rule cc) + apply (rule conjI) + apply (rule_tac Q="ko_at' rv p s" in conjunct1) + apply assumption + apply assumption + apply (wp getPTE_wp empty_fail_getObject | simp)+ + apply clarsimp + apply (erule cmap_relationE1 [OF rf_sr_cpte_relation], + erule ko_at_projectKO_opt) + apply simp + done + +lemma getObject_pte_ccorres: + "p' = pte_Ptr p \ + ccorres cpte_relation pte_' \ UNIV hs + (getObject p) + (Guard C_Guard {s. s \\<^sub>c p'} (\pte :== h_val (hrs_mem \t_hrs) p'))" + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule ccorres_add_return2) + apply (rule ccorres_pre_getObject_pte) + apply (rule ccorres_move_c_guard_pte) + apply (rename_tac pte) + apply (rule_tac P="ko_at' pte p" and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (drule rf_sr_cpte_relation) + apply (drule (1) cmap_relation_ko_atD) + apply (clarsimp simp: typ_heap_simps) + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + done + +lemmas unat_and_mask_le_ptTrans = unat_and_mask_le[OF AARCH64.ptTranslationBits_le_machine_word] + +definition mask_ptTranslationBits :: "pt_type \ machine_word" where + "mask_ptTranslationBits pt_t \ mask (ptTranslationBits pt_t)" + +schematic_goal mask_ptTranslationBits_pt: + "mask_ptTranslationBits NormalPT_T = numeral ?n" + by (simp add: mask_ptTranslationBits_def bit_simps mask_def del: word_eq_numeral_iff_iszero) + +schematic_goal mask_ptTranslationBits_vs: + "mask_ptTranslationBits VSRootPT_T = numeral ?n" + by (simp add: mask_ptTranslationBits_def bit_simps mask_def + Kernel_Config.config_ARM_PA_SIZE_BITS_40_def + del: word_eq_numeral_iff_iszero) + +lemma lookupPTSlotFromLevel_ccorres: + defines + "ptSlot_upd pt_t \ + Guard ShiftError \ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C < 0x40\ + (Guard MemorySafety + \(\vptr >> unat (ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C)) && mask_ptTranslationBits pt_t = 0 \ + array_assertion \pt + (unat ((\vptr >> unat (ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C)) && mask_ptTranslationBits pt_t)) + (hrs_htd \t_hrs)\ + (\ret___struct_lookupPTSlot_ret_C :== + ptSlot_C_update + (\_. \pt +\<^sub>p + uint ((\vptr >> unat (ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C)) && mask_ptTranslationBits pt_t)) + \ret___struct_lookupPTSlot_ret_C))" + shows + "ccorres (\(bitsLeft, ptSlot) cr. bitsLeft = unat (ptBitsLeft_C cr) \ ptSlot_C cr = pte_Ptr ptSlot) + ret__struct_lookupPTSlot_ret_C_' + (page_table_at' (levelType level) pt and + (\s. gsPTTypes (ksArchState s) pt = Some (levelType level)) and + (\_. level \ maxPTLevel)) + (\ ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C = of_nat (ptBitsLeft level) \ + \ \ \level = of_nat level \ \ \ \pt = Ptr pt \ \ \ \vptr = vptr \) + (SKIP # hs) + (lookupPTSlotFromLevel level pt vptr) + (ptSlot_upd (levelType level);; + \ret__unsigned_long :== CALL pte_pte_table_ptr_get_present(ptSlot_C + \ret___struct_lookupPTSlot_ret_C);; + WHILE \ret__unsigned_long \ 0 \ 0 < \level DO + \level :== \level - 1;; + \ret___struct_lookupPTSlot_ret_C :== + ptBitsLeft_C_update (\_. ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C - 9) + \ret___struct_lookupPTSlot_ret_C;; + \ret__unsigned_longlong :== CALL pte_pte_table_ptr_get_pt_base_address(ptSlot_C + \ret___struct_lookupPTSlot_ret_C);; + \ret__ptr_to_void :== CALL ptrFromPAddr(UCAST(64 \ 64) \ret__unsigned_longlong);; + \pt :== PTR_COERCE(unit \ pte_C) \ret__ptr_to_void;; + ptSlot_upd NormalPT_T;; + \ret__unsigned_long :== CALL pte_pte_table_ptr_get_present(ptSlot_C + \ret___struct_lookupPTSlot_ret_C) + OD;; + return_C ret__struct_lookupPTSlot_ret_C_'_update ret___struct_lookupPTSlot_ret_C_')" +proof (induct level arbitrary: pt) + note unat_and_mask_le_ptTrans[simp] neq_0_unat[simp] + + case 0 + show ?case + apply (simp only: ptSlot_upd_def lookupPTSlotFromLevel.simps(1)) + apply (cinitlift pt_' vptr_', simp only:) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_guard_imp2) + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_move_array_assertion_pt) + apply (rule ccorres_symb_exec_r2) + apply (rule ccorres_symb_exec_r2) + apply (simp add: whileAnno_def) + apply (rule ccorres_expand_while_iff_Seq[THEN iffD1]) + apply (rule ccorres_cond_false[where R="\" and + R'="\ \level = 0 \ + ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C = of_nat pageBits \ + ptSlot_C \ret___struct_lookupPTSlot_ret_C = + pte_Ptr pt +\<^sub>p uint ((vptr >> pageBits) && mask (ptTranslationBits NormalPT_T)) \"]) + apply (rule ccorres_guard_imp) + apply (rule ccorres_return_C) + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule TrueI) + apply (clarsimp simp: bit_simps pt_slot_offset_def pt_index_def pt_bits_left_def shiftl_t2n) + apply vcg + apply clarsimp + apply (vcg spec=modifies) + apply clarsimp + apply vcg + apply (vcg spec=modifies) + apply (clarsimp simp: mask_ptTranslationBits_pt) + apply (drule pte_at'_ptSlotIndex[where level=0 and vptr=vptr], simp) + apply (clarsimp simp: pt_slot_offset_def pt_index_def pt_bits_left_def field_simps) + apply (clarsimp simp: bit_simps mask_def unat_le_fold shiftl_t2n c_guard_abs_pte) + apply (rule order_trans, rule word_bool_le_funs, solves simp) + done + + case (Suc level) + + have ptSlot_upd_levelType: + "Suc level \ maxPTLevel \ ptSlot_upd NormalPT_T = ptSlot_upd (levelType level)" + by (simp add: levelType_def) + + show ?case + apply (simp only: lookupPTSlotFromLevel.simps) + apply (subst ptSlot_upd_def) + \ \cinitlift will not fully eliminate pt and vptr, + so we double the precondition to remember the connection\ + apply (rule ccorres_guard_imp[where Q=Q and A=Q and + Q'="A' \ \\pt = pte_Ptr pt\ \ \\vptr = vptr\" and + A'=A' for Q A']; simp) + apply (cinitlift pt_' vptr_', simp only:) \ \Warns about ptSlot_upd, which is fine\ + apply (rename_tac vptrC ptC) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_guard_imp2) + apply (rule ccorres_gen_asm[where P="Suc level \ maxPTLevel"]) + apply (rule ccorres_Guard_Seq) + apply (rule ccorres_move_array_assertion_pt_gen[where pt_t="levelType (Suc level)"]) + apply (rule ccorres_symb_exec_r2) + apply (rule_tac G'="\ ptSlot_C \ret___struct_lookupPTSlot_ret_C = + pte_Ptr (ptSlotIndex (Suc level) pt vptr) \ + ptBitsLeft_C \ret___struct_lookupPTSlot_ret_C = + of_nat (ptBitsLeft (Suc level)) \ + \level = of_nat (Suc level) \ + \vptr = vptr \ + \pt = ptC \ + hrs_htd \t_hrs,c_guard \\<^sub>t pte_Ptr (ptSlotIndex (Suc level) pt vptr) + \" + in ccorres_symb_exec_l') + apply (rename_tac pte) + apply (rule ccorres_add_return) + apply (rule ccorres_guard_imp) + apply (rule_tac xf'=ret__unsigned_long_' in ccorres_split_nothrow_call) + apply (rule_tac pte=pte in pte_pte_table_ptr_get_present_ccorres) + apply simp + apply simp + apply simp + apply (simp only: ptSlot_upd_def) + apply ceqv + apply (rename_tac from_bl) + apply (fold ptSlot_upd_def) + apply (unfold whileAnno_def)[1] + apply (rule ccorres_expand_while_iff_Seq[THEN iffD1]) + apply (rule_tac R="\" and + R'="\\ret__unsigned_long = from_bl \ \level = of_nat (Suc level)\" + in ccorres_cond_strong) + apply (clarsimp simp: maxPTLevel_def word_less_nat_alt unat_word_ariths + unat_of_nat_eq + split: if_split_asm) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r2) + apply (rule ccorres_symb_exec_r2) + apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) + apply (rule_tac xf'=pt_' and + R="ko_at' pte (ptSlotIndex (Suc level) pt vptr) and + K (isPageTablePTE pte)" and + R'="{s. ptSlot_C (ret___struct_lookupPTSlot_ret_C_' s) = + pte_Ptr (ptSlotIndex (Suc level) pt vptr)}" and + val="pte_Ptr (getPPtrFromPTE pte)" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg, clarsimp) + apply (frule (1) pte_at_rf_sr) + apply (clarsimp simp: typ_heap_simps isPageTablePTE_def2 cpte_relation_def + pte_pte_table_lift_def pte_pte_table_lift + getPPtrFromPTE_def isPagePTE_def) + apply ceqv + apply (rule ccorres_checkPTAt) + apply simp + apply (rule ccorres_rhs_assoc2)+ + apply (subst ptSlot_upd_levelType, assumption) + apply (rule Suc[unfolded whileAnno_def, simplified]) + apply vcg + apply vcg + apply (vcg spec=modifies) + apply vcg + apply (vcg spec=modifies) + apply (rule ccorres_return_C; simp) + apply simp + apply wp + prefer 2 + apply assumption + prefer 4 + apply (wpsimp wp: getPTE_wp simp: pteAtIndex_def) + apply simp + apply vcg + apply clarsimp + apply (clarsimp simp: ptBitsLeft_def bit_simps false_def true_def) + apply (wpsimp simp: pteAtIndex_def) + apply (wpsimp simp: pteAtIndex_def wp: empty_fail_getObject) + apply vcg + apply (vcg spec=modifies) + apply clarsimp + apply (prop_tac "levelType level = NormalPT_T", clarsimp simp: levelType_def) + apply clarsimp + apply (drule pte_at'_ptSlotIndex[where vptr=vptr and level="Suc level"], simp) + apply (simp add: c_guard_abs_pte) + apply (simp add: ptSlotIndex_def ptIndex_def ptBitsLeft_def mask_ptTranslationBits_def) + apply (simp add: pte_bits_def word_size_bits_def shiftl_t2n) + apply (simp add: bit_simps word_less_nat_alt maxPTLevel_def unat_word_ariths unat_of_nat_eq + split: if_splits) + done +qed + +lemma lookupPTSlot_ccorres: + "ccorres (\(bitsLeft,ptSlot) cr. bitsLeft = unat (ptBitsLeft_C cr) \ ptSlot_C cr = Ptr ptSlot) + ret__struct_lookupPTSlot_ret_C_' + (page_table_at' VSRootPT_T pt) + (\\vptr = vptr \ \ \\vspace = Ptr pt \) + hs + (lookupPTSlot pt vptr) + (Call lookupPTSlot_'proc)" + apply (cinit lift: vspace_') + apply (rule ccorres_symb_exec_r2) + apply (rule ccorres_symb_exec_r2) + apply (rule ccorres_symb_exec_r2) + apply (rule ccorres_rhs_assoc2)+ + apply (rule ccorres_checkPTAt) + apply simp + apply (rule lookupPTSlotFromLevel_ccorres[where level=maxPTLevel, simplified, + simplified mask_ptTranslationBits_pt + mask_ptTranslationBits_vs]) + apply vcg + apply (vcg spec=modifies) + apply vcg + apply (vcg spec=modifies) + apply vcg + apply (vcg spec=modifies) + apply (simp add: bit_simps ptBitsLeft_def maxPTLevel_val split: if_split) + done + +abbreviation + "findVSpaceForASID_xf \ + liftxf errstate findVSpaceForASID_ret_C.status_C + findVSpaceForASID_ret_C.vspace_root_C + ret__struct_findVSpaceForASID_ret_C_'" + +lemma ccorres_pre_getObject_asidpool: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\asidpool. ko_at' asidpool p s \ P asidpool s)) + {s. \ asidpool asidpool'. cslift s (ap_Ptr p) = Some asidpool' \ casid_pool_relation asidpool asidpool' + \ s \ P' asidpool} + hs (getObject p >>= (\rv :: asidpool. f rv)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_guard_imp2) + apply (rule cc) + apply (rule conjI) + apply (rule_tac Q="ko_at' rv p s" in conjunct1) + apply assumption + apply assumption + apply (wpsimp wp: getASID_wp empty_fail_getObject)+ + apply (erule cmap_relationE1 [OF rf_sr_cpspace_asidpool_relation], + erule ko_at_projectKO_opt) + apply simp + done + +lemma asid_wf_table_guard[unfolded asid_high_bits_def, simplified]: + "asid_wf asid \ asid >> asid_low_bits < 2^asid_high_bits" + apply (simp add: asid_wf_def) + apply (simp add: mask_def asid_bits_def asid_low_bits_def asid_high_bits_def) + apply word_bitwise + done + +lemma asidLowBits_guard0[simp]: + "0 <=s Kernel_C.asidLowBits" + by (simp add: Kernel_C.asidLowBits_def) + +lemma asidLowBits_shift_guard[unfolded word_bits_def, simplified, simp]: + "Kernel_C.asidLowBits snd asidRange) = asid_wf asid" + by (simp add: asid_wf_def mask_def asidRange_def del: word64_less_sub_le) + +lemma getPoolPtr_assign_ccorres: + "ccorres ((=) \ option_to_ptr) poolPtr_' \ UNIV hs + (getPoolPtr asid) + (\poolPtr :== \armKSASIDTable.[unat (asid >> asid_low_bits)])" + unfolding getPoolPtr_def + apply simp + apply (rule ccorres_assert)+ + apply (rule ccorres_from_vcg_nofail) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def return_def bind_def asidRange_asid_wf) + apply (simp add: ucast_asid_high_bits_is_shift) + apply (fastforce dest!: rf_sr_armKSASIDTable intro!: leq_asid_bits_shift) + done + +lemma getPoolPtr_ccorres: + "ccorres ((=) \ option_to_ptr) ret__ptr_to_struct_asid_pool_C_' + \ \ \asid___unsigned_long = asid \ hs + (getPoolPtr asid) + (Call getPoolPtr_'proc)" + (* getPoolPtr_assign_ccorres above does not apply to the body, because everything is in the + return_C statement *) + apply (cinit lift: asid___unsigned_long_') + apply (rule ccorres_assert)+ + apply (clarsimp simp: asidRange_asid_wf asid_wf_table_guard gets_return_gets_eq) + apply (rule ccorres_Guard) + apply (rule ccorres_from_vcg_throws_nofail[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: asidRange_asid_wf ucast_asid_high_bits_is_shift simpler_gets_def) + apply (fastforce dest!: rf_sr_armKSASIDTable intro!: leq_asid_bits_shift) + apply simp + done + +lemma findMapForASID_ccorres: + "ccorres casid_map_relation ret__struct_asid_map_C_' + (valid_arch_state' and K (asid_wf asid)) + (\\asid___unsigned_long = asid\) hs + (getASIDPoolEntry asid) + (Call findMapForASID_'proc)" + apply (cinit lift: asid___unsigned_long_') + apply (simp add: bind_assoc) + apply (rule ccorres_move_const_guards) + apply (ctac (no_vcg) add: getPoolPtr_assign_ccorres) + apply (clarsimp cong: option.case_cong) + apply (rename_tac ap_opt) + apply (wpc; clarsimp) + apply ccorres_rewrite + apply csymbr + apply (rule ccorres_return_C; simp) + apply (rename_tac ap_ptr) + apply (rule_tac P="ap_ptr \ 0" in ccorres_gen_asm) + apply (clarsimp simp: liftM_def) + apply (rule ccorres_pre_getObject_asidpool) + apply (rename_tac ap) + apply (rule ccorres_move_c_guard_ap) + apply wpc + apply (rename_tac pool) + apply (rule ccorres_return_C; clarsimp) + apply (wpsimp simp: asid_pool_at_ko'_eq getPoolPtr_def) + apply (clarsimp simp: casid_map_relation_None_lift typ_heap_simps asid_map_lift_def + simp del: casid_map_relation_None) + apply (clarsimp simp: casid_pool_relation_def split: asid_pool_C.splits) + apply (fastforce simp: word_and_le1 array_relation_def mask_2pm1[symmetric]) + apply (clarsimp simp: asid_wf_table_guard valid_arch_state'_def valid_asid_table'_def ran_def) + done + +lemma findVSpaceForASID_ccorres: + "ccorres + (lookup_failure_rel \ (\pteptrc pteptr. pteptr = pte_Ptr pteptrc)) + findVSpaceForASID_xf + (valid_arch_state' and (\_. asid_wf asid)) + (\\asid___unsigned_long = asid\) + [] + (findVSpaceForASID asid) + (Call findVSpaceForASID_'proc)" + apply (rule ccorres_gen_asm) + apply (cinit lift: asid___unsigned_long_') + apply (simp add: liftE_bindE) + apply (ctac (no_vcg) add: findMapForASID_ccorres) + apply csymbr + apply wpc + apply (rule ccorres_cond_true_seq) + apply ccorres_rewrite + apply (rule_tac P="\" and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: throwError_def return_def bindE_def Nondet_Monad.lift_def + EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def + lookup_fault_lift_invalid_root asid_wf_table_guard) + apply (rule ccorres_cond_false_seq) + apply ccorres_rewrite + apply wpc + apply (clarsimp simp: checkPTAt_def liftE_bindE) + apply (rule ccorres_assertE) + apply (rule ccorres_stateAssert) + apply csymbr + apply csymbr + apply csymbr + apply (rule ccorres_return_CE; simp) + apply clarsimp + apply wp + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def asid_map_tag_defs + asid_map_asid_map_vspace_lift_def + split: if_splits) + done + +lemma ccorres_pre_gets_armKSGlobalUserVSpace_ksArchState: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. armKSGlobalUserVSpace (ksArchState s) = rv \ P rv s)) + (P' (ptr_val armKSGlobalUserVSpace_Ptr)) + hs (gets (armKSGlobalUserVSpace \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp + apply (rule gets_sp) + apply wp + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (drule rf_sr_armKSGlobalUserVSpace) + apply simp + done + +(* FIXME move *) +lemma ccorres_from_vcg_might_throw: + "(\\. Gamm \ {s. P \ \ s \ P' \ (\, s) \ sr} c + {s. \(rv, \') \ fst (a \). (\', s) \ sr \ r rv (xf s)}, + {s. \(rv, \') \ fst (a \). (\', s) \ sr \ arrel rv (axf s)}) + \ ccorres_underlying sr Gamm r xf arrel axf P P' (SKIP # hs) a c" + apply (rule ccorresI') + apply (drule_tac x=s in spec) + apply (erule exec_handlers.cases, simp_all) + apply clarsimp + apply (erule exec_handlers.cases, simp_all)[1] + apply (auto elim!: exec_Normal_elim_cases)[1] + apply (drule(1) exec_abrupt[rotated]) + apply simp + apply (clarsimp simp: unif_rrel_simps elim!: exec_Normal_elim_cases) + apply fastforce + apply (clarsimp simp: unif_rrel_simps) + apply (drule hoare_sound) + apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def) + apply fastforce + done + +end + +context kernel_m begin + +lemma isValidVTableRoot_def2: + "isValidVTableRoot cap = + (\pt asid vref. cap = ArchObjectCap (PageTableCap pt VSRootPT_T (Some (asid,vref))))" + unfolding isValidVTableRoot_def + by (auto simp: isVTableRoot_def + split: capability.splits arch_capability.splits option.splits pt_type.splits) + +lemma isValidNativeRoot_spec: + "\s. + \ \ {\. s = \ \ True} + Call isValidNativeRoot_'proc + {t. \cap. ccap_relation cap (cap_' s) \ ret__unsigned_long_' t = from_bool (isValidVTableRoot cap)}" + apply (vcg, clarsimp simp: isValidVTableRoot_def2) + apply (rule conjI, clarsimp simp: case_bool_If split: if_split) + apply (rule conjI; clarsimp simp: cap_vspace_cap_lift) + apply (erule ccap_relationE, clarsimp simp: cap_to_H_def isCap_simps to_bool_def + split: if_split_asm) + apply (erule ccap_relationE, clarsimp simp: isCap_simps cap_to_H_def) + by (clarsimp simp: from_bool_def case_bool_If isCap_simps + dest!: cap_get_tag_isCap_unfolded_H_cap + split: if_split) + +lemma findMapForASID_loadVMID_ccorres: + "ccorres (\vmid rv'. \vspace. casid_map_relation (Some (ASIDPoolVSpace vmid vspace)) rv') + ret__struct_asid_map_C_' + (valid_arch_state' and K (asid_wf asid)) (\\asid___unsigned_long = asid\) hs + (loadVMID asid) + (Call findMapForASID_'proc)" + apply (cinit lift: asid___unsigned_long_') + apply (simp add: getASIDPoolEntry_def bind_assoc) + apply (rule ccorres_move_const_guards) + apply (ctac (no_vcg) add: getPoolPtr_assign_ccorres) + apply (clarsimp cong: option.case_cong) + apply (rename_tac ap_opt) + apply (wpc; clarsimp) + apply (rule ccorres_fail) + apply (rename_tac ap_ptr) + apply (rule_tac P="ap_ptr \ 0" in ccorres_gen_asm) + apply (clarsimp simp: liftM_def) + apply (rule ccorres_pre_getObject_asidpool) + apply (rename_tac ap) + apply (rule ccorres_move_c_guard_ap) + apply wpc + apply (rename_tac pool) + apply clarsimp + apply wpc + apply (rename_tac entry) + apply (rule ccorres_fail) + apply (wpc; clarsimp) + apply (rename_tac vmid) + apply (rule ccorres_return_C; clarsimp) + apply (wpsimp simp: asid_pool_at_ko'_eq getPoolPtr_def) + apply clarsimp + apply (simp add: typ_heap_simps) + apply (rename_tac pool vmid vspace) + apply (clarsimp simp: casid_pool_relation_def split: asid_pool_C.splits) + apply (rename_tac cpool) + apply (fold mask_2pm1) + apply (simp add: array_relation_def) + apply (drule_tac x="asid && mask asid_low_bits" in spec) + apply (clarsimp simp: word_and_le1) + apply fastforce + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: valid_arch_state'_def valid_asid_table'_def ran_def) + apply (drule leq_asid_bits_shift) + apply (simp add: mask_def bit_simps') + apply unat_arith + done + +lemma ccorres_pre_gets_armKSVMIDTable_ksArchState: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. armKSVMIDTable (ksArchState s) = rv \ P rv s)) + {s. \rv. s \ P' rv } + hs (gets (armKSVMIDTable \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply clarsimp + done + +lemma ccorres_pre_gets_armKSNextVMID_ksArchState: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. armKSNextVMID (ksArchState s) = rv \ P rv s)) + {s. \rv. s \ P' rv } + hs (gets (armKSNextVMID \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply clarsimp + done + +lemma setObjectASID_Basic_ccorres: + "ccorres dc xfdc \ {s. f s = p \ casid_pool_relation pool (asid_pool_C.asid_pool_C (pool' s))} hs + (setObject p pool) + ((Basic (\s. globals_update( t_hrs_'_update + (hrs_mem_update (heap_update (Ptr &(ap_Ptr (f s)\[''array_C''])) (pool' s)))) s)))" + apply (rule setObject_ccorres_helper) + apply (simp_all add: objBits_simps pageBits_def) + apply (rule conseqPre, vcg) + apply (rule subsetI, clarsimp simp: Collect_const_mem) + apply (rule cmap_relationE1, erule rf_sr_cpspace_asidpool_relation, + erule ko_at_projectKO_opt) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps + update_asidpool_map_to_asidpools + update_asidpool_map_tos) + apply (case_tac y') + apply clarsimp + apply (erule cmap_relation_updI, + erule ko_at_projectKO_opt, simp+) + apply (simp add: cready_queues_relation_def + carch_state_relation_def + cmachine_state_relation_def + Let_def typ_heap_simps + update_asidpool_map_tos) + done + +lemma getASIDMap_ccorres: (* "asid" needs to instantiated when this rule is used *) + "ccorres (\pool asid_map. casid_map_relation (pool (asid && mask asid_low_bits)) asid_map) + ret__struct_asid_map_C_' + \ + (\\asid___unsigned_long = asid\ \ \\poolPtr = ap_Ptr ap\) hs + (liftM (inv ASIDPool) (getObject ap)) + (Call getASIDMap_'proc)" + apply (cinit lift: poolPtr_') + apply (rule ccorres_pre_getObject_asidpool) + apply (rename_tac pool) + apply (rule ccorres_Guard) + apply (rule ccorres_return_C; simp) + apply (clarsimp simp: typ_heap_simps casid_pool_relation_def mask_2pm1[symmetric] + array_relation_def word_bool_le_funs + split: asidpool.splits asid_pool_C.splits) + done + +lemma setASIDMap_ccorres: (* "asid_map_entry'" needs to instantiated when this rule is used *) + "casid_map_relation entry asid_map_entry' \ + ccorres dc xfdc + (ko_at' (ASIDPool pool) ap) + (\ \poolPtr = ap_Ptr ap \ \ \ \asid___unsigned_long = asid \ \ \ \asid_map = asid_map_entry' \) + hs + (setObject ap (ASIDPool (pool(asid && mask asid_low_bits := entry)))) + (Call setASIDMap_'proc)" + apply (cinit' lift: poolPtr_' asid___unsigned_long_' asid_map_') + apply (rule ccorres_Guard) + apply (rule setObject_ccorres_helper[where P="ko_at' (ASIDPool pool) ap" and P'=UNIV]; + simp add: objBits_simps pageBits_def) + apply (rule conseqPre, vcg) + apply normalise_obj_at' + apply (rule cmap_relationE1, erule rf_sr_cpspace_asidpool_relation, erule ko_at_projectKO_opt) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (rule conjI) + apply (clarsimp simp: cpspace_relation_def typ_heap_simps + update_asidpool_map_to_asidpools + update_asidpool_map_tos) + apply (case_tac y') + apply clarsimp + apply (erule cmap_relation_updI, erule ko_at_projectKO_opt, simp) + apply (clarsimp simp flip: mask_2pm1 simp: casid_pool_relation_def) + apply (rule conjI) + apply (erule array_relation_update, rule refl, assumption) + apply (simp add: mask_def asid_low_bits_def) + apply (simp add: word_bool_le_funs split: if_split) + apply blast + apply simp + apply (simp add: cready_queues_relation_def carch_state_relation_def + cmachine_state_relation_def Let_def typ_heap_simps update_asidpool_map_tos) + apply (fastforce dest: asid_pool_at_rf_sr simp: typ_heap_simps) + done + +lemma invalidateASID_ccorres: + "ccorres dc xfdc \ \\asid___unsigned_long = asid\ hs + (invalidateASID asid) (Call invalidateASID_'proc)" + apply (cinit lift: asid___unsigned_long_') + apply (clarsimp simp: updateASIDPoolEntry_def) + apply (ctac add: getPoolPtr_ccorres) + apply (rule ccorres_assert2, clarsimp) + apply (ctac add: getASIDMap_ccorres[where asid=asid]) + apply (rule ccorres_assert2) + apply csymbr + apply csymbr + apply (rule ccorres_call, rule_tac asid_map_entry'=asid_map in setASIDMap_ccorres; simp?) + apply (fastforce simp: casid_map_relation_def asid_map_lift_def Let_def + asid_map_asid_map_vspace_lift_def + split: option.splits if_splits) + apply (wp getASID_wp) + apply (vcg exspec=getASIDMap_modifies) + apply (wpsimp simp: getPoolPtr_def) + apply (vcg exspec=getPoolPtr_modifies) + apply (clarsimp simp: inv_ASIDPool casid_map_relation_def asid_map_lift_def Let_def + split: if_splits asidpool.splits) + done + +lemma rf_sr_armKSNextVMID: + "(s, s') \ rf_sr \ armKSNextVMID (ksArchState s) = armKSNextASID_' (globals s')" + by (simp add: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) + +lemma rf_sr_armKSVMIDTable_rel': + "(s, s') \ rf_sr \ + array_relation ((=) \ option_to_0) (mask vmid_bits) + (armKSVMIDTable (ksArchState s)) + (armKSHWASIDTable_' (globals s'))" + by (simp add: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) + +lemma invalidateASIDEntry_ccorres: + "ccorres dc xfdc (valid_arch_state' and K (asid_wf asid)) \ \asid___unsigned_long = asid \ hs + (invalidateASIDEntry asid) (Call invalidateASIDEntry_'proc)" + apply (cinit lift: asid___unsigned_long_') + apply (ctac add: findMapForASID_loadVMID_ccorres) + apply csymbr + apply (clarsimp simp: when_def) + apply (rule ccorres_split_nothrow[where xf'=xfdc and r'=dc]) + apply (rule_tac R="\" in ccorres_cond2) + apply (clarsimp simp: casid_map_relation_def asid_map_asid_map_vspace_lift_def + asid_map_lift_def Let_def + split: option.splits if_splits) + apply (rule_tac P="\_. rv \ None" and P'=UNIV in ccorres_from_vcg) + apply (clarsimp, rule conseqPre, vcg) + apply (clarsimp simp: invalidateVMIDEntry_def simpler_gets_def simpler_modify_def return_def + bind_def) + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def + split: option.splits if_splits) + apply (clarsimp simp: asid_map_asid_map_vspace_lift_def asid_map_lift_def) + apply (clarsimp simp: asid_map_get_tag_def to_bool_def) + apply (rule conjI) + apply (rule le_less_trans, rule word_and_mask_le_2pm1, simp) + apply (clarsimp simp: rf_sr_def cstate_relation_def cmachine_state_relation_def Let_def + carch_state_relation_def carch_globals_def + simp del: fun_upd_apply) + apply (erule array_relation_update) + apply word_eqI_solve + apply (clarsimp simp: asidInvalid_def) + apply (simp add: mask_def vmid_bits_val unat_max_word) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: invalidateASID_ccorres) + apply wp + apply vcg + apply wpsimp + apply clarsimp + apply (vcg exspec=findMapForASID_modifies) + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def + split: if_splits) + done + +lemma invalidateVMIDEntry_ccorres: + "vmid' = unat vmid \ + ccorres dc xfdc \ UNIV [] + (invalidateVMIDEntry vmid) + (Basic (\s. globals_update + (armKSHWASIDTable_'_update + (\_. Arrays.update (armKSHWASIDTable_' (globals s)) vmid' (scast asidInvalid))) + s))" + apply (clarsimp simp: invalidateVMIDEntry_def) + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + carch_globals_def cmachine_state_relation_def) + apply (simp flip: fun_upd_apply) + apply (erule array_relation_update, rule refl) + apply (simp (no_asm) add: asidInvalid_def) + apply (simp (no_asm) add: mask_def vmid_bits_val unat_max_word) + done + +crunches invalidateVMIDEntry, invalidateASID + for nextVMID[wp]: "\s. P (armKSNextVMID (ksArchState s))" + (wp: crunch_wps getASID_wp) + +lemma findFreeHWASID_ccorres: + "ccorres (=) ret__unsigned_char_' + (valid_arch_state') UNIV [] + (findFreeVMID) (Call findFreeHWASID_'proc)" + apply (cinit) + apply csymbr + apply (rule ccorres_pre_gets_armKSVMIDTable_ksArchState) + apply (rule ccorres_pre_gets_armKSNextVMID_ksArchState) + apply (simp add: whileAnno_def case_option_find_give_me_a_map + mapME_def + del: Collect_const map_append) + apply (rule ccorres_splitE_novcg) + apply (subgoal_tac "[nextVMID .e. maxBound] @ init [minBound .e. nextVMID] + = map (\x. nextVMID + (of_nat x)) [0 ..< 256]") (* FIXME AARCH64: vmid array size *) + apply clarsimp + apply (rule_tac xf=hw_asid_offset_' and i=0 + and xf_update=hw_asid_offset_'_update + and r'=dc and xf'=xfdc and Q=UNIV + and F="\n s. vmidTable = armKSVMIDTable (ksArchState s) + \ nextVMID = armKSNextVMID (ksArchState s) + \ valid_arch_state' s" + in ccorres_sequenceE_while_gen') + apply (rule ccorres_from_vcg_might_throw) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (rename_tac ys \ s) + apply (subst down_cast_same [symmetric, where 'b=8], (* FIXME AARCH64: vmid_len, but vmid just word8 *) + simp add: is_down_def target_size_def source_size_def word_size)+ + apply (simp add: ucast_ucast_mask + ucast_ucast_add ucast_and_mask + ucast_of_nat_small asidInvalid_def + word_sless_msb_less ucast_less[THEN order_less_le_trans] + word_0_sle_from_less) + apply (simp add: word_sint_msb_eq not_msb_from_less word_of_nat_less + trans[OF msb_nth nth_ucast] bang_big word_size + uint_up_ucast is_up_def source_size_def + target_size_def rf_sr_armKSNextVMID) + apply (rule conjI, rule order_trans[OF _ uint_add_ge0], simp) + apply (simp add: throwError_def return_def split: if_split) + apply (clarsimp simp: returnOk_def return_def inr_rrel_def rf_sr_armKSNextVMID) + apply (drule rf_sr_armKSVMIDTable_rel') + apply (clarsimp simp: array_relation_def vmid_bits_val mask_def) + apply (erule_tac x="armKSNextASID_' (globals s) + word_of_nat (length ys)" in allE) + apply (clarsimp simp: valid_arch_state'_def ran_def) + apply ((rule conjI, uint_arith, simp add: take_bit_nat_def unsigned_of_nat, clarsimp)+)[1] + apply (simp add: mask_def) + apply unat_arith + apply (rule conseqPre, vcg) + apply clarsimp + apply simp + apply (rule hoare_pre, wp) + apply simp + apply simp + apply simp + apply simp + + apply (cut_tac x=nextVMID in leq_maxBound[unfolded word_le_nat_alt]) + apply (simp add: minBound_word init_def maxBound_word minus_one_norm) + apply (simp add: upto_enum_word) + apply (rule nth_equalityI) + apply (simp del: upt.simps) + apply (simp del: upt.simps) + apply (simp add: nth_append + split: if_split) + + apply ceqv + apply (rule ccorres_assert) + apply (rule_tac A="\s. nextVMID = armKSNextVMID (ksArchState s) + \ vmidTable = armKSVMIDTable (ksArchState s) + \ valid_arch_state' s" + in ccorres_guard_imp2[where A'=UNIV]) + apply (simp add: split_def) + apply (rule ccorres_symb_exec_r) + apply (rule_tac xf'=hw_asid_' in ccorres_abstract, ceqv) + apply (rename_tac hw_asid) + apply (rule_tac P="hw_asid = nextVMID" in ccorres_gen_asm2) + apply (simp del: Collect_const) + apply ((rule ccorres_move_const_guard )+)? + apply (ctac(no_vcg) add: invalidateASID_ccorres) + apply ((rule ccorres_move_const_guard + | simp only: ccorres_seq_simps)+)? + apply (ctac(no_vcg) add: invalidateTranslationASID_ccorres) + apply (rule ccorres_split_nothrow) + apply (rule ccorres_move_const_guard )+ + apply (rule ccorres_handlers_weaken) + apply (rule invalidateVMIDEntry_ccorres[OF refl]) + apply ceqv + apply (rule_tac P="\s. nextVMID = armKSNextVMID (ksArchState s)" + in ccorres_from_vcg_throws[where P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp del: rf_sr_upd_safe) + apply (clarsimp simp: rf_sr_def bind_def simpler_modify_def + return_def cstate_relation_def Let_def) + apply (simp add: carch_state_relation_def carch_globals_def + cmachine_state_relation_def) + apply (subst down_cast_same [symmetric], + simp add: is_down_def target_size_def source_size_def word_size)+ + apply (clarsimp simp: maxBound_word minBound_word + ucast_ucast_add minus_one_norm + split: if_split) + apply (simp add: word_sint_msb_eq uint_up_ucast word_size + msb_nth nth_ucast bang_big is_up_def source_size_def + target_size_def) + apply uint_arith + subgoal by simp + apply wp + apply vcg + apply simp + apply wp[1] + apply simp + apply wp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply (drule_tac x=nextVMID in bspec, simp) + apply clarsimp + apply (clarsimp simp: rf_sr_armKSNextVMID + valid_arch_state'_def + Collect_const_mem word_sless_msb_less + ucast_less[THEN order_less_le_trans] + word_0_sle_from_less asid_bits_def) + apply (drule rf_sr_armKSVMIDTable_rel') + apply (clarsimp simp: array_relation_def) + apply (erule_tac x="armKSNextASID_' (globals s')" in allE, erule impE) + apply (simp add: vmid_bits_val mask_def) + apply simp + apply (fold mapME_def) + apply (wp mapME_wp') + apply (rule hoare_pre, wp) + apply simp + apply (clarsimp simp: guard_is_UNIV_def) + apply simp + done + +lemma storeHWASID_ccorres: + "ccorres dc xfdc \ (\\asid___unsigned_long = asid\ \ \\hw_asid = vmid\) [] + (storeVMID asid vmid) (Call storeHWASID_'proc)" + supply Collect_const[simp del] + apply (cinit lift: asid___unsigned_long_' hw_asid_' simp: updateASIDPoolEntry_def bind_assoc) + apply (ctac (no_vcg) add: getPoolPtr_ccorres) + apply (rule ccorres_assert) + apply clarsimp + apply (ctac add: getASIDMap_ccorres[where asid=asid]) + apply (rule ccorres_assert2) + apply csymbr + apply csymbr + apply (rule ccorres_split_nothrow_novcg) + apply (rule ccorres_call[where xf'=xfdc]) + apply (rule_tac asid_map_entry'=asid_map in setASIDMap_ccorres) + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def ucast_and_mask_drop + asid_map_asid_map_vspace_lift_def asid_map_tag_defs Let_def + split: if_splits) + apply simp + apply simp + apply simp + apply ceqv + apply (rule ccorres_Guard)+ + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: simpler_modify_def simpler_gets_def bind_def + rf_sr_def cstate_relation_def Let_def + cmachine_state_relation_def carch_state_relation_def carch_globals_def + simp del: fun_upd_apply) + apply (erule array_relation_update, rule refl, simp) + apply (simp add: mask_def vmid_bits_val unat_max_word) + apply wp + apply (clarsimp simp: guard_is_UNIV_def split: if_splits) + apply (clarsimp simp: zero_sle_ucast_up is_down word_sless_alt sint_ucast_eq_uint) + apply (uint_arith, fastforce) + apply (wp getASID_wp) + apply clarsimp + apply (vcg exspec=getASIDMap_modifies) + apply (wpsimp simp: getPoolPtr_def) + apply (clarsimp simp: inv_ASIDPool split: if_split asidpool.splits) + apply (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def split: if_splits) + done + +lemma getHWASID_ccorres: + "ccorres (=) ret__unsigned_char_' + (valid_arch_state' and K (asid_wf asid)) (\\asid___unsigned_long = asid\) [] + (getVMID asid) (Call getHWASID_'proc)" + apply (cinit lift: asid___unsigned_long_') + apply (ctac(no_vcg) add: findMapForASID_loadVMID_ccorres) + apply csymbr + apply wpc + apply (rule ccorres_cond_false) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply simp + apply (ctac(no_vcg) add: findFreeHWASID_ccorres) + apply (ctac(no_vcg) add: storeHWASID_ccorres) + apply (rule ccorres_return_C, simp+)[1] + apply wp+ + apply (rule ccorres_cond_true) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (clarsimp simp: to_bool_def casid_map_relation_def asid_map_lift_def Let_def + asid_map_asid_map_vspace_lift_def + split: if_split_asm) + apply (wpsimp wp: hoare_drop_imp) + apply (clarsimp simp: to_bool_def casid_map_relation_def asid_map_lift_def Let_def + asid_map_asid_map_vspace_lift_def + split: if_split) + done + +lemma armv_contextSwitch_ccorres: + "ccorres dc xfdc + (valid_arch_state' and + K (asid_wf asid \ canonical_address vspace \ pptrBase \ vspace \ vspace < pptrTop)) + (\\vspace = pte_Ptr vspace\ \ \ \asid___unsigned_long = asid \) [] + (armContextSwitch vspace asid) (Call armv_contextSwitch_'proc)" + apply (cinit lift: vspace_' asid___unsigned_long_') + apply simp + apply (ctac(no_vcg) add: getHWASID_ccorres) + apply csymbr + apply csymbr + apply csymbr + apply (ctac (no_vcg)add: setVSpaceRoot_ccorres) + apply wp + apply (clarsimp simp: ucast_and_mask_drop canonical_address_and_maskD + addrFromPPtr_canonical_in_kernel_window split: if_split) + done + +lemma canonical_address_page_table_at': + "\page_table_at' pt_t p s; pspace_canonical' s\ \ canonical_address p" + apply (clarsimp simp: page_table_at'_def) + apply (drule_tac x=0 in spec, clarsimp simp: bit_simps typ_at_to_obj_at_arches) + apply (erule (1) obj_at'_is_canonical) + done + +lemma setVMRoot_ccorres: + "ccorres dc xfdc + (valid_arch_state' and valid_objs' and pspace_canonical' and tcb_at' thread) + ({s. tcb_' s = tcb_ptr_to_ctcb_ptr thread}) hs + (setVMRoot thread) (Call setVMRoot_'proc)" + supply Collect_const[simp del] + apply (cinit lift: tcb_') + apply (rule ccorres_move_array_assertion_tcb_ctes) + apply (rule ccorres_move_c_guard_tcb_ctes) + apply (simp add: getThreadVSpaceRoot_def locateSlot_conv bit_simps asid_bits_def) + apply (ctac, rename_tac vRootCap vRootCap') + apply (rule ccorres_assert2) + apply (csymbr, rename_tac vRootTag) + apply (simp add: cap_get_tag_isCap_ArchObject2 isValidVTableRoot_def2) + apply (rule ccorres_Cond_rhs_Seq) + apply (subst will_throw_and_catch) + apply (simp split: capability.split arch_capability.split option.split) + apply (fastforce simp: isCap_simps) + apply (clarsimp simp: setGlobalUserVSpace_def) + apply (rule ccorres_pre_gets_armKSGlobalUserVSpace_ksArchState) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_h_t_valid_armKSGlobalUserVSpace) + apply csymbr + apply csymbr + apply ccorres_rewrite + apply (subst bind_return_unit) + apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) + apply (rule ccorres_return_void_C) + apply (rule wp_post_taut) + apply (simp add: catch_def bindE_bind_linearise bind_assoc liftE_def) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply simp + apply ((wpc; (solves \rule ccorres_inst[where P=\ and P'=UNIV], clarsimp simp: isCap_simps isValidVTableRoot_def\)?), simp)+ + apply (simp add: catch_def bindE_bind_linearise bind_assoc liftE_def) + apply (rule_tac f'=lookup_failure_rel + and r'="\pte_ptr pte_ptr'. pte_ptr' = pte_Ptr pte_ptr" + and xf'=find_ret_' + in ccorres_split_nothrow_case_sum) + apply (ctac (no_vcg) add: findVSpaceForASID_ccorres) + apply ceqv + apply (rename_tac vspace vspace') + apply (rule_tac P="capVSBasePtr_CL (cap_vspace_cap_lift vRootCap') + = capPTBasePtr (capCap vRootCap)" + in ccorres_gen_asm2) + apply simp + apply (rule ccorres_Cond_rhs_Seq) + apply (simp add: whenE_def throwError_def setGlobalUserVSpace_def, ccorres_rewrite) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_h_t_valid_armKSGlobalUserVSpace) + apply csymbr + apply csymbr + apply (rule ccorres_pre_gets_armKSGlobalUserVSpace_ksArchState) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) + apply (rule ccorres_return_void_C) + apply (rule wp_post_taut) + apply (simp add: whenE_def returnOk_def assertE_liftE liftE_bind) + apply (rule ccorres_assert2) + apply (ctac (no_vcg) add: armv_contextSwitch_ccorres) + apply (clarsimp simp: setGlobalUserVSpace_def) + apply (rule ccorres_cond_true_seq, ccorres_rewrite) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_h_t_valid_armKSGlobalUserVSpace) + apply csymbr + apply csymbr + apply (rule ccorres_pre_gets_armKSGlobalUserVSpace_ksArchState) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) + apply (rule ccorres_return_void_C) + apply wp + apply wpsimp + apply (wp hoare_drop_imps) + apply clarsimp + apply (vcg exspec=findVSpaceForASID_modifies) + apply (simp add: isCap_simps) + apply (wpsimp wp: getSlotCap_wp) + apply vcg + apply (clarsimp simp: Collect_const_mem) + apply (rule conjI) + apply (frule cte_at_tcb_at_32', drule cte_at_cte_wp_atD) + apply (clarsimp simp: cte_level_bits_def tcbVTableSlot_def) + apply (rule_tac x="cteCap cte" in exI) + apply (rule conjI, erule cte_wp_at_weakenE', simp) + apply (clarsimp simp: isCap_simps isValidVTableRoot_def2) + apply (frule cte_wp_at_valid_objs_valid_cap', assumption) + apply (clarsimp simp: valid_cap'_def wellformed_mapdata'_def canonical_address_page_table_at') + apply (frule valid_arch_state_armKSGlobalUserVSpace) + apply (frule rf_sr_armKSGlobalUserVSpace) + apply (clarsimp simp: tcb_cnode_index_defs cte_level_bits_def tcbVTableSlot_def) + apply (clarsimp simp: isCap_simps isValidVTableRoot_def2 canonical_address_and_maskD) + apply (erule allE, erule (1) impE) + apply (clarsimp simp: cap_get_tag_isCap_ArchObject2) + by (clarsimp simp: cap_get_tag_isCap_ArchObject[symmetric] + cap_lift_vspace_cap cap_to_H_def + cap_vspace_cap_lift_def isCap_simps isZombieTCB_C_def Let_def + elim!: ccap_relationE + split: if_split_asm cap_CL.splits) + +(* FIXME: move *) +lemma register_from_H_bound[simp]: + "unat (register_from_H v) < 37" + by (cases v, simp_all add: "StrictC'_register_defs") + +(* FIXME: move *) +lemma register_from_H_inj: + "inj register_from_H" + apply (rule inj_onI) + apply (case_tac x) + by (case_tac y, simp_all add: "StrictC'_register_defs")+ + +(* FIXME: move *) +lemmas register_from_H_eq_iff[simp] + = inj_on_eq_iff [OF register_from_H_inj, simplified] + +lemma setRegister_ccorres: + "ccorres dc xfdc \ + (\\thread = tcb_ptr_to_ctcb_ptr thread\ \ \\reg = register_from_H reg\ + \ {s. w_' s = val}) [] + (asUser thread (setRegister reg val)) + (Call setRegister_'proc)" + apply (cinit' lift: thread_' reg_' w_') + apply (simp add: asUser_def split_def) + apply (rule ccorres_pre_threadGet) + apply (rule ccorres_Guard) + apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) + apply vcg + apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def + simpler_modify_def typ_heap_simps) + apply (subst StateSpace.state.fold_congs[OF refl refl]) + apply (rule globals.fold_congs[OF refl refl]) + apply (rule heap_update_field_hrs, simp) + apply (fastforce intro: typ_heap_simps) + apply simp + apply (erule(1) rf_sr_tcb_update_no_queue2, + (simp add: typ_heap_simps')+) + apply (rule ball_tcb_cte_casesI, simp+) + apply (clarsimp simp: ctcb_relation_def ccontext_relation_def cregs_relation_def + atcbContextSet_def atcbContextGet_def + carch_tcb_relation_def + split: if_split) + apply (clarsimp simp: Collect_const_mem register_from_H_sless + register_from_H_less) + apply (auto intro: typ_heap_simps elim: obj_at'_weakenE) + done + +lemma msgRegisters_ccorres: + "n < unat n_msgRegisters \ + register_from_H (AARCH64_H.msgRegisters ! n) = (index kernel_all_substitute.msgRegisters n)" + apply (simp add: kernel_all_substitute.msgRegisters_def msgRegisters_unfold fupdate_def) + apply (simp add: Arrays.update_def n_msgRegisters_def nth_Cons' split: if_split) + done + +(* usually when we call setMR directly, we mean to only set a registers, which will + fit in actual registers *) +lemma setMR_as_setRegister_ccorres: + "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' + (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) + (\\reg___unsigned_long = val\ + \ \\offset = of_nat offset\ + \ \\receiver = tcb_ptr_to_ctcb_ptr thread\) hs + (asUser thread (setRegister reg val)) + (Call setMR_'proc)" + apply (rule ccorres_grab_asm) + apply (cinit' lift: reg___unsigned_long_' offset_' receiver_') + apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters) + apply (rule ccorres_cond_false) + apply (rule ccorres_move_const_guards) + apply (rule ccorres_add_return2) + apply (ctac add: setRegister_ccorres) + apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) + apply (vcg exspec=setRegister_modifies) + apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters not_le conj_commute) + apply (subst msgRegisters_ccorres[symmetric]) + apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters unat_of_nat_eq) + apply (clarsimp simp: word_less_nat_alt word_le_nat_alt unat_of_nat_eq not_le[symmetric]) + done + +lemma wordFromMessageInfo_spec: + defines "mil s \ seL4_MessageInfo_lift (mi_' s)" + shows "\s. \ \ {s} Call wordFromMessageInfo_'proc + \\ret__unsigned_long = (label_CL (mil s) << 12) + || (capsUnwrapped_CL (mil s) << 9) + || (extraCaps_CL (mil s) << 7) + || length_CL (mil s)\" + unfolding mil_def + apply vcg + apply (simp del: scast_id add: seL4_MessageInfo_lift_def mask_shift_simps) + apply word_bitwise + done + +lemma wordFromMessageInfo_ccorres [corres]: + "ccorres (=) ret__unsigned_long_' + \ {s. mi = message_info_to_H (mi_' s)} [] + (return (wordFromMessageInfo mi)) (Call wordFromMessageInfo_'proc)" + apply (rule ccorres_from_spec_modifies [where P = \, simplified]) + apply (rule wordFromMessageInfo_spec) + apply (rule wordFromMessageInfo_modifies) + apply simp + apply clarsimp + apply (simp add: return_def wordFromMessageInfo_def Let_def message_info_to_H_def + msgLengthBits_def msgExtraCapBits_def + msgMaxExtraCaps_def shiftL_nat word_bw_assocs word_bw_comms word_bw_lcs) + done + +(* FIXME move *) +lemma register_from_H_eq: + "(r = r') = (register_from_H r = register_from_H r')" + apply (case_tac r, simp_all add: C_register_defs) + by (case_tac r', simp_all add: C_register_defs)+ + +lemma setMessageInfo_ccorres: + "ccorres dc xfdc (tcb_at' thread) + (\mi = message_info_to_H mi'\) hs + (setMessageInfo thread mi) + (\ret__unsigned_long :== CALL wordFromMessageInfo(mi');; + CALL setRegister(tcb_ptr_to_ctcb_ptr thread, + scast Kernel_C.msgInfoRegister, + \ret__unsigned_long))" + unfolding setMessageInfo_def + apply (rule ccorres_guard_imp2) + apply ctac + apply simp + apply (ctac add: setRegister_ccorres) + apply wp + apply vcg + apply (simp add: AARCH64_H.msgInfoRegister_def AARCH64.msgInfoRegister_def C_register_defs) + done + +lemmas unfold_checkMapping_return + = from_bool_0[where 'a=machine_word_len, folded exception_defs] + to_bool_def + +lemma ccorres_return_void_C': + "ccorres_underlying rf_sr \ (inr_rrel dc) xfdc (inl_rrel dc) xfdc (\_. True) UNIV (SKIP # hs) (return (Inl rv)) return_void_C" + apply (rule ccorres_from_vcg_throws) + apply (simp add: return_def) + apply (rule allI, rule conseqPre, vcg) + apply auto + done + +lemma findVSpaceForASID_page_table_at'_simple[wp]: + notes checkPTAt_inv[wp del] + shows "\\\ findVSpaceForASID asid \ page_table_at' VSRootPT_T \,-" + unfolding findVSpaceForASID_def + by (wpsimp wp: getASID_wp simp: checkPTAt_def) + +lemma findVSpaceForASID_gsPTTypes[wp]: + "\\\ findVSpaceForASID asid \\vspace s. gsPTTypes (ksArchState s) vspace = Some VSRootPT_T\,-" + unfolding findVSpaceForASID_def + by (wpsimp wp: getASID_wp simp: checkPTAt_def wp_del: checkPTAt_inv) + +lemmas ccorres_name_ksCurThread = ccorres_pre_getCurThread[where f="\_. f'" for f', + unfolded getCurThread_def, simplified gets_bind_ign] + +lemma of_nat_pageBitsForSize: + "unat x = pageBitsForSize sz \ x = of_nat (pageBitsForSize sz)" for x::machine_word + by (drule sym, simp) + +lemma checkMappingPPtr_def2: + "checkMappingPPtr p pte = + (if isPagePTE pte \ ptrFromPAddr (pteBaseAddress pte) = p + then returnOk() + else throw InvalidRoot)" + unfolding checkMappingPPtr_def + apply (cases pte; simp add: isPagePTE_def unlessE_def cong: if_cong split: if_splits) + apply auto + done + +lemma pte_pte_invalid_new_spec: + "\s. \ \ {s} + \ret__struct_pte_C :== PROC pte_pte_invalid_new() + \ pte_lift \ret__struct_pte_C = Some Pte_pte_invalid \" + apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) + apply vcg + apply (clarsimp simp: pte_pte_invalid_new_body_def pte_pte_invalid_new_impl + pte_lift_def Let_def pte_get_tag_def pte_tag_defs) + done + +lemma casid_map_relation_vspace_tag: + "casid_map_relation (Some (ASIDPoolVSpace vmid vspace)) casid_map \ + asid_map_get_tag casid_map = scast asid_map_asid_map_vspace" + by (clarsimp simp: casid_map_relation_def asid_map_lift_def Let_def split: if_splits) + +lemma invalidateTLBByASIDVA_ccorres: + "ccorres dc xfdc + (valid_arch_state' and K (asid_wf asid)) + (\\asid___unsigned_long = asid\ \ \\vaddr = vaddr\) hs + (invalidateTLBByASIDVA asid vaddr) + (Call invalidateTLBByASIDVA_'proc)" + apply (cinit lift: asid___unsigned_long_' vaddr_') + apply (ctac (no_vcg) add: findMapForASID_loadVMID_ccorres) + apply (rename_tac maybe_vmid asid_map) + apply csymbr + apply (clarsimp simp: when_def simp del: Collect_const) + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) + apply (clarsimp simp: casid_map_relation_def asid_map_asid_map_vspace_lift_def + asid_map_lift_def Let_def + split: if_splits option.splits) + apply (rule ccorres_return_void_C) + apply csymbr + apply (ctac (no_vcg) add: invalidateTranslationSingle_ccorres) + apply vcg + apply wpsimp + apply (clarsimp simp: casid_map_relation_vspace_tag split: if_splits) + apply (rule conjI; clarsimp simp: casid_map_relation_vspace_tag)+ + apply (simp add: bit_simps wordBits_def word_size asid_bits_def) + apply (clarsimp simp: casid_map_relation_def asid_map_asid_map_vspace_lift_def asid_map_lift_def + Let_def ucast_ucast_mask_id + split: if_splits) + done + +lemma unmapPage_ccorres: + "ccorres dc xfdc (invs' and (\_. asid_wf asid)) + (\ framesize_to_H \page_size = sz \ \page_size < 3 \ \ + \ \asid___unsigned_long = asid \ \ \ \vptr = vptr \ \ \ \pptr___unsigned_long = pptr \) + hs + (unmapPage sz asid vptr pptr) (Call unmapPage_'proc)" + supply Collect_const[simp del] + apply (rule ccorres_gen_asm) + apply (cinit lift: page_size_' asid___unsigned_long_' vptr_' pptr___unsigned_long_') + apply (simp add: ignoreFailure_liftM) + apply (ctac add: findVSpaceForASID_ccorres) + apply (rename_tac vspace find_ret) + apply (rule ccorres_liftE_Seq) + apply (simp add: Collect_False) + apply (ctac add: lookupPTSlot_ccorres) + apply csymbr + apply (simp (no_asm) add: split_def) + apply (rule ccorres_split_unless_throwError_cond[where Q=\ and Q'=\]) + apply (clarsimp simp: of_nat_pageBitsForSize Collect_const split: if_split) + apply (simp add: throwError_def) + apply (rule ccorres_return_void_C) + apply (simp add: liftE_bindE) + apply (rule ccorres_split_nothrow_novcg) (* FIXME AARCH64: check why ctac isn't working here *) + apply (rule getObject_pte_ccorres) + apply clarsimp + apply ceqv + apply (rename_tac pte') + apply (simp add: checkMappingPPtr_def2) + apply csymbr + apply (rule ccorres_cond_seq) + apply ccorres_rewrite + apply clarsimp + (* Haskell condition matches multiple steps of C conditions. Proof follows C structure. *) + apply (rule_tac C'="\\isPagePTE pte\" in ccorres_rewrite_cond_sr[where Q=\ and Q'=UNIV]) + apply (auto dest!: pte_lift_pte_4k_page pte_lift_pte_page + simp: cpte_relation_def Let_def isPagePTE_eq pte_lift_def from_bool_0 + split: pte.splits if_splits)[1] + apply (rule ccorres_Cond_rhs) + apply clarsimp + apply (simp add: throwError_def) + apply (rule ccorres_return_void_C) + apply clarsimp + apply csymbr + apply csymbr + apply (clarsimp simp: if_to_top_of_bindE) + apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) + apply (clarsimp simp: Collect_const split: if_splits) + apply clarsimp + apply (simp add: throwError_def) + apply (rule ccorres_return_void_C) + apply clarsimp + apply csymbr + apply (rule ccorres_split_nothrow_novcg) + apply wpfix + apply (rule storePTE_Basic_ccorres) + apply (simp add: cpte_relation_def Let_def) + apply ceqv + apply csymbr + apply (ctac (no_vcg) add: cleanByVA_PoU_ccorres) + apply (rule ccorres_liftE') + apply (clarsimp cong: ccorres_all_cong) + apply (ctac (no_vcg) add: invalidateTLBByASIDVA_ccorres) + apply wp+ + apply (clarsimp simp: guard_is_UNIV_def) + apply vcg + apply clarsimp + apply (wpsimp wp: hoare_drop_imps) + apply (clarsimp simp: guard_is_UNIV_def) + apply vcg + apply (wpsimp wp: hoare_drop_imps lookupPTSlot_inv) + apply clarsimp + apply (vcg exspec=lookupPTSlot_modifies) + apply ccorres_rewrite + apply (simp add: throwError_def) + apply (rule ccorres_return_void_C) + apply wpsimp + apply clarsimp + apply (vcg exspec=findVSpaceForASID_modifies) + apply clarsimp + done + +(* FIXME: move *) +lemma cap_to_H_PageCap_tag: + "\ cap_to_H cap = ArchObjectCap (FrameCap p R sz d A); + cap_lift C_cap = Some cap \ \ + cap_get_tag C_cap = scast cap_frame_cap" + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_split_asm) + by (simp_all add: Let_def cap_lift_def split_def split: if_splits) + +lemma ccap_relation_mapped_asid_0: + "\ccap_relation (ArchObjectCap (FrameCap d v0 v1 v2 v3)) cap\ + \ (capFMappedASID_CL (cap_frame_cap_lift cap) \ 0 \ v3 \ None) \ + (capFMappedASID_CL (cap_frame_cap_lift cap) = 0 \ v3 = None)" + apply (frule cap_get_tag_PageCap_frame) + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply simp + done + +lemma framesize_from_H_bounded: + "framesize_from_H x < 3" + by (clarsimp simp: framesize_from_H_def framesize_defs + split: vmpage_size.split) + +lemma cap_to_H_Frame_unfold: + "cap_to_H capC = ArchObjectCap (FrameCap p R sz d m) \ + \asid_C sz_C vmrights_C device_C mappedAddr_C. + capC = Cap_frame_cap \capFMappedASID_CL = asid_C, capFBasePtr_CL = p, capFSize_CL = sz_C, + capFMappedAddress_CL = mappedAddr_C, capFVMRights_CL = vmrights_C, + capFIsDevice_CL = device_C \ \ + sz = framesize_to_H sz_C \ + d = to_bool device_C \ + R = vmrights_to_H vmrights_C \ + m = (if asid_C = 0 then None else Some (asid_C, mappedAddr_C))" + supply if_cong[cong] + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits) + apply (simp split: if_split_asm) + apply (rename_tac fcap, case_tac fcap, simp) + done + +lemma performPageInvocationUnmap_ccorres: + notes Collect_const[simp del] + shows + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and cte_wp_at' ((=) (ArchObjectCap cap) o cteCap) ctSlot and K (isFrameCap cap)) + (\ccap_relation (ArchObjectCap cap) \cap\ \ \\ctSlot = Ptr ctSlot\) + hs + (liftE (performPageInvocation (PageUnmap cap ctSlot))) + (Call performPageInvocationUnmap_'proc)" + apply (simp only: liftE_liftM ccorres_liftM_simp K_def) + apply (rule ccorres_gen_asm) + apply (clarsimp simp: isCap_simps) + apply (cinit' lift: cap_' ctSlot_' simp: performPageInvocation_def) + apply (rename_tac ctSlotC capC) + apply csymbr + apply (simp only: ) + apply (frule ccap_relation_mapped_asid_0) + apply (rule_tac R'="\ cap_get_tag capC = SCAST(32 signed \ 64) cap_frame_cap \" + in ccorres_split_nothrow) + apply (rule ccorres_Cond_rhs) + (* ASID present, unmap *) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply clarsimp + apply (frule cap_get_tag_isCap_unfolded_H_cap) + apply (rule ccorres_call[where xf'=xfdc]) + apply datatype_schem + apply (rule unmapPage_ccorres) + apply simp + apply simp + apply simp + apply (simp add: asidInvalid_def) + apply (rule ccorres_return_Skip) + apply ceqv + apply (simp add: liftM_def) + apply (rule_tac Q="\slotCap. cte_wp_at' ((=) slotCap o cteCap) ctSlot and (\_. isArchFrameCap slotCap)" and + Q'="\slotCap slotCap_C. UNIV" + in ccorres_split_nothrow) + apply (ctac add: getSlotCap_h_val_ccorres) + apply ceqv + apply (rename_tac slotCap slotCap_C) + apply (rule ccorres_gen_asm) + apply (rule ccorres_guard_imp) + apply csymbr + apply csymbr + apply (rule ccorres_move_c_guard_cte) + apply (ctac add: ccorres_updateCap) + apply (rule ccorres_rel_imp[where xf'=ret__unsigned_long_' and + r'="\_ x. x = SCAST(32 signed \ 64) EXCEPTION_NONE"]) + apply (rule ccorres_return_C; simp) + apply simp + apply wp + apply vcg + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: cap_get_tag_isCap asidInvalid_def) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 c_valid_cap_def) + apply (clarsimp simp: cap_frame_cap_lift) + apply (rename_tac slotCap_CL) + apply (clarsimp simp: isCap_simps) + apply (simp (no_asm) add: cap_to_H_def cap_frame_cap_lift_def) + apply simp + apply (drule cap_to_H_Frame_unfold)+ + apply (clarsimp simp: cl_valid_cap_def) + apply wp + apply (wpsimp simp: getSlotCap_def wp: getCTE_wp) + apply vcg + apply simp + apply (wpsimp wp: hoare_drop_imps hoare_vcg_ex_lift unmapPage_cte_wp_at') + apply (rule conseqPre, vcg exspec=unmapPage_modifies) + apply clarsimp + apply (clarsimp simp: asidInvalid_def cap_get_tag_isCap cte_wp_at_ctes_of) + apply (rename_tac p R sz d m cap' s s' cte) + apply (frule ctes_of_valid', fastforce) + apply (drule_tac t="cteCap cte" in sym) + apply (clarsimp simp: valid_cap'_def) + apply (clarsimp simp: ccap_relation_def map_option_Some_eq2) + apply (drule cap_to_H_Frame_unfold) + apply (clarsimp simp: cap_frame_cap_lift_def + c_valid_cap_def cl_valid_cap_def wellformed_mapdata'_def) + done + +lemma APFromVMRights_spec: + "\s. \ \ \s. \vm_rights < 4 \ \vm_rights \ 2 \ Call APFromVMRights_'proc + \ \ret__unsigned_long = ap_from_vm_rights (vmrights_to_H \<^bsup>s\<^esup>vm_rights) \" + apply vcg + apply (simp add: vmrights_to_H_def ap_from_vm_rights_def + Kernel_C.VMKernelOnly_def + Kernel_C.VMReadOnly_def Kernel_C.VMReadWrite_def) + apply clarsimp + apply (drule word_less_cases, auto)+ + done + +lemma armExecuteNever_CL_limit: + "armExecuteNever_CL (vm_attributes_lift attrs) \ 1" + by (simp add: vm_attributes_lift_def word_and_le1) + +lemmas vm_attributes_helpers = + armExecuteNever_CL_limit word_le_1_and_idem from_to_bool_le_1_idem + +(* FIXME AARCH64 rename/cleanup/generalise to not mention 12, maybe also not 36 (these are from + the bitfield generator *) +lemma makeUserPagePTE_spec_helper: + "\ canonical_address p; is_aligned p pageBits \ \ p && (mask 36 << 12) = p" + apply (simp add: word_and_mask_shiftl pageBits_def canonical_address_range canonical_bit_def) + apply word_eqI + apply (clarsimp simp: le_def) + apply (rule iffI, clarsimp) + apply (subst add_diff_inverse_nat; fastforce) + done + +lemma makeUserPagePTE_spec: + "\s. \ \ + \s. \vm_rights < 4 \ \vm_rights \ 2 \ canonical_address \paddr \ is_aligned \paddr pageBits \ + Call makeUserPagePTE_'proc + \ let uxn = uxn_from_vmattributes (vm_attributes_to_H \<^bsup>s\<^esup>attributes); + ap = ap_from_vm_rights (vmrights_to_H \<^bsup>s\<^esup>vm_rights); + attridx = attridx_from_vmattributes (vm_attributes_to_H \<^bsup>s\<^esup>attributes); + nG = 0 \ \hyp 0, non-hyp 1\ + in + if \<^bsup>s\<^esup>page_size = scast Kernel_C.ARMSmallPage + then + pte_lift \ret__struct_pte_C = Some (Pte_pte_4k_page \ + pte_pte_4k_page_CL.UXN_CL = uxn, + page_base_address_CL = \<^bsup>s\<^esup>paddr, + nG_CL = nG, + AF_CL = 1, + SH_CL = 0, + AP_CL = ap, + AttrIndx_CL = attridx + \) + else + pte_lift \ret__struct_pte_C = Some (Pte_pte_page \ + pte_pte_page_CL.UXN_CL = uxn, + page_base_address_CL = \<^bsup>s\<^esup>paddr, + nG_CL = nG, + AF_CL = 1, + SH_CL = 0, + AP_CL = ap, + AttrIndx_CL = attridx + \) \" + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: Let_def) + (* these simps don't want to be combined *) + apply (clarsimp simp: pte_pte_page_lift pte_pte_4k_page_lift makeUserPagePTE_spec_helper) + apply (clarsimp simp: uxn_from_vmattributes_def vm_attributes_to_H_def Let_def vm_attributes_helpers + attridx_from_vmattributes_def S2_NORMAL_def S2_DEVICE_nGnRnE_def mask_def + ap_from_vm_rights_def vmrights_to_H_def + split: if_split vmrights.split) + apply (simp add: to_bool_def) + done + +lemma cap_to_H_PTCap: + "cap_to_H cap = ArchObjectCap (PageTableCap p NormalPT_T asid) \ + \cap_CL. cap = Cap_page_table_cap cap_CL \ + to_bool (capPTIsMapped_CL cap_CL) = (asid \ None) \ + (asid \ None \ capPTMappedASID_CL cap_CL = fst (the asid) \ + capPTMappedAddress_CL cap_CL = snd (the asid)) \ + cap_page_table_cap_CL.capPTBasePtr_CL cap_CL = p" + by (auto simp add: cap_to_H_def Let_def split: cap_CL.splits if_splits) + +(* FIXME AARCH64 might not be needed *) +lemma cap_to_H_VSCap: + "cap_to_H cap = ArchObjectCap (PageTableCap p VSRootPT_T asid) \ + \cap_CL. cap = Cap_vspace_cap cap_CL \ + to_bool (capVSIsMapped_CL cap_CL) = (asid \ None) \ + (asid \ None \ capVSMappedASID_CL cap_CL = fst (the asid)) \ + cap_vspace_cap_CL.capVSBasePtr_CL cap_CL = p" + by (auto simp add: cap_to_H_def Let_def split: cap_CL.splits if_splits) + +lemma cap_lift_PTCap_Base: + "\ cap_to_H cap_cl = ArchObjectCap (PageTableCap p NormalPT_T asid); + cap_lift cap_c = Some cap_cl \ + \ p = cap_page_table_cap_CL.capPTBasePtr_CL (cap_page_table_cap_lift cap_c)" + apply (simp add: cap_page_table_cap_lift_def) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + done + +declare mask_Suc_0[simp] + +(* FIXME: move *) +lemma setCTE_asidpool': + "\ ko_at' (ASIDPool pool) p \ setCTE c p' \\_. ko_at' (ASIDPool pool) p\" + apply (clarsimp simp: setCTE_def) + apply (simp add: setObject_def split_def) + apply (rule bind_wp [OF _ hoare_gets_sp]) + apply (clarsimp simp: valid_def in_monad) + apply (frule updateObject_type) + apply (clarsimp simp: obj_at'_def) + apply (rule conjI) + apply (clarsimp simp: lookupAround2_char1) + apply (clarsimp split: if_split) + apply (case_tac obj', auto)[1] + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, auto)[1] + apply (simp add: updateObject_cte) + apply (clarsimp simp: updateObject_cte typeError_def magnitudeCheck_def in_monad + split: kernel_object.splits if_splits option.splits) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1) + done + +lemmas udpateCap_asidpool' = updateCap_ko_at_ap_inv' + +lemma getObject_ap_inv [wp]: "\P\ (getObject addr :: asidpool kernel) \\rv. P\" + apply (rule getObject_inv) + apply simp + apply (rule loadObject_default_inv) + done + +lemma getObject_ko_at_ap [wp]: + "\\\ getObject p \\rv::asidpool. ko_at' rv p\" + by (rule getObject_ko_at | simp add: objBits_simps bit_simps)+ + +(* FIXME AARCH64 assuming the only array use of page tables are root PTs (vspace) + these might need to be renamed or gain comments to explain why it's only root PTs *) +lemma page_table_at'_array_assertion: + assumes "(s,s') \ rf_sr" + assumes "page_table_at' VSRootPT_T pt s" + assumes "gsPTTypes (ksArchState s) pt = Some VSRootPT_T" + assumes "n \ 2^(ptTranslationBits VSRootPT_T)" "0 < n" + shows "array_assertion (pte_Ptr pt) n (hrs_htd (t_hrs_' (globals s')))" + using assms + by (fastforce intro: array_assertion_abs_vspace[where x="\_. (1::nat)", simplified, rule_format]) + +lemma cap_lift_VSCap_Base: + "\ cap_to_H cap_cl = ArchObjectCap (PageTableCap p VSRootPT_T asid); + cap_lift cap_c = Some cap_cl \ + \ cap_vspace_cap_CL.capVSBasePtr_CL (cap_vspace_cap_lift cap_c) = p" + apply (simp add: cap_vspace_cap_lift_def) + apply (clarsimp simp: cap_to_H_def Let_def split: cap_CL.splits if_splits) + done + +lemma performASIDPoolInvocation_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and cte_wp_at' (isVTableRoot o cteCap) ctSlot and K (asid_wf asid)) + (\\poolPtr = Ptr poolPtr\ \ \\asid___unsigned_long = asid\ \ \\vspaceCapSlot = Ptr ctSlot\) + [] + (liftE (performASIDPoolInvocation (Assign asid poolPtr ctSlot))) + (Call performASIDPoolInvocation_'proc)" + apply (simp only: liftE_liftM ccorres_liftM_simp K_def) + apply (rule ccorres_gen_asm) + apply (cinit lift: poolPtr_' asid___unsigned_long_' vspaceCapSlot_') + apply (rule_tac Q="\slotCap. valid_arch_state' and valid_objs' and + cte_wp_at' ((=) slotCap o cteCap) ctSlot and + (\_. isVTableRoot slotCap \ + canonical_address (capPTBasePtr (capCap slotCap)) \ + is_aligned (capPTBasePtr (capCap slotCap)) pageBits)" and + Q'="\slotCap slotCap_C. UNIV" + in ccorres_split_nothrow) + apply (ctac add: getSlotCap_h_val_ccorres) + apply ceqv + apply (rule ccorres_gen_asm) + apply (rule ccorres_guard_imp) + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (ctac add: ccorres_updateCap) + apply (simp add: liftM_def) + apply (rule ccorres_pre_getObject_asidpool) + apply (rule ccorres_move_c_guard_ap) + apply (rule ccorres_add_return2) + apply (ctac add: setObjectASID_Basic_ccorres) + apply (rule ccorres_rel_imp[where xf'=ret__unsigned_long_' and + r'="\_ x. x = SCAST(32 signed \ 64) EXCEPTION_NONE"]) + apply (rule ccorres_return_C; simp) + apply simp + apply wp + apply simp + apply vcg + apply (rule hoare_strengthen_post[where Q="\_. \"], wp) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def obj_at'_def) + apply simp + apply vcg + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: isVTableRoot_ex cap_get_tag_isCap_unfolded_H_cap asidInvalid_def) + apply (clarsimp split: if_split) + apply (erule ccap_relationE) + apply (rename_tac cap_CL) + apply (drule_tac t="cap_to_H cap_CL" in sym) + apply (clarsimp simp: cap_lift_VSCap_Base) + apply (frule cap_to_H_VSCap, clarsimp) + apply (rule conjI; clarsimp) + apply (clarsimp simp: typ_heap_simps simp flip: fun_upd_def) + apply (clarsimp simp: casid_pool_relation_def + split: asidpool.splits asid_pool_C.splits) + apply (rule conjI) + apply (rule array_relation_update) + apply (simp add: inv_def) + apply (simp add: mask_2pm1) + apply (clarsimp simp: casid_map_relation_def asid_map_asid_map_vspace_lift_def + asid_map_lift_def Let_def asid_map_tag_defs makeUserPagePTE_spec_helper + split: if_splits) + apply (simp add: asid_low_bits_def mask_def) + apply (clarsimp simp: word_and_le1) + apply (clarsimp simp: cap_vspace_cap_lift) + apply (clarsimp simp: ccap_relation_def cap_to_H_def) + apply (simp (no_asm) add: cap_vspace_cap_lift_def) + apply (clarsimp simp: asid_wf_eq_mask_eq asid_bits_def) + apply (simp add: c_valid_cap_def cl_valid_cap_def) + apply (wpsimp simp: getSlotCap_def wp: getCTE_wp) + apply simp + apply vcg + apply (clarsimp simp: cte_wp_at_ctes_of isVTableRoot_ex) + apply (fastforce simp: bit_simps valid_cap'_def capAligned_def + elim!: is_aligned_weaken + dest!: ctes_of_valid' + intro: canonical_address_page_table_at' + split: if_split) + done + +lemma pte_case_isInvalidPTE: + "(case pte of InvalidPTE \ P | _ \ Q) + = (if isInvalidPTE pte then P else Q)" + by (cases pte, simp_all add: isInvalidPTE_def) + +lemma ccap_relation_vspace_mapped_asid: + "ccap_relation (ArchObjectCap (PageTableCap p VSRootPT_T (Some (asid, vspace)))) cap + \ asid = capVSMappedASID_CL (cap_vspace_cap_lift cap)" + by (frule cap_get_tag_isCap_unfolded_H_cap) + (clarsimp simp: cap_vspace_cap_lift ccap_relation_def cap_to_H_def split: if_splits) + +lemma ccap_relation_vspace_base: + "ccap_relation (ArchObjectCap (PageTableCap p VSRootPT_T m)) cap + \ capVSBasePtr_CL (cap_vspace_cap_lift cap) = p" + by (frule cap_get_tag_isCap_unfolded_H_cap) + (clarsimp simp: cap_vspace_cap_lift ccap_relation_def cap_to_H_def split: if_splits) + +lemma performPageTableInvocationMap_ccorres: + "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (cte_at' ctSlot) + (\ccap_relation cap \cap\ \ \\ctSlot = Ptr ctSlot\ + \ \cpte_relation pte \pte\ \ \\ptSlot = Ptr ptSlot\) + [] + (liftE (performPageTableInvocation (PageTableMap cap ctSlot pte ptSlot))) + (Call performPageTableInvocationMap_'proc)" + apply (simp only: liftE_liftM ccorres_liftM_simp) + apply (cinit lift: cap_' ctSlot_' pte_' ptSlot_') + apply (ctac (no_vcg)) + apply (rule ccorres_split_nothrow_novcg) + apply simp + apply (erule storePTE_Basic_ccorres) + apply ceqv + apply csymbr + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: cleanByVA_PoU_ccorres) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (wpsimp simp: guard_is_UNIV_def)+ + done + + + +(* FIXME AARCH64 VCPU/HYP related block (everything from VSpace_C on ARM_HYP) adapted to AARCH64 + some of these might be needed earlier *) + +(* FIXME AARCH64: potentially put this into a VCPU theory + if we try move most of the VCPU lemmas into a VCPU theory, some might need items in this or later + theories, meaning we'd need a VCPU low (maybe put into ArchAcc?) and VCPU high theory + *) +(* When updating fields (or fields of fields) inside VCPUs, typ_heap_simps can resolve the + hrs_mem_update of a field into a specific C vcpu update when on its own. Then if we can + show that specific C vcpu is related to the Haskell one, there is no need to unfold + rf_sr anymore. *) +lemma vcpu_hrs_mem_update_helper: + "\ (s, s') \ rf_sr; ko_at' (vcpu'::vcpu) vcpuptr s; + hrs_mem_update (f s') (t_hrs_' (globals s')) + = hrs_mem_update (heap_update (vcpu_Ptr vcpuptr) cvcpu) (t_hrs_' (globals s')); + cvcpu_relation vcpu cvcpu \ + \ (s\ksPSpace := (ksPSpace s)(vcpuptr \ KOArch (KOVCPU vcpu))\, + globals_update (t_hrs_'_update (hrs_mem_update (f s'))) s') \ rf_sr" + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def update_vcpu_map_to_vcpu + cpspace_relation_def update_vcpu_map_tos) + apply (frule (1) cmap_relation_ko_atD) + apply (clarsimp simp: typ_heap_simps') + apply (erule cmap_relation_upd_relI) + apply (erule (3) ko_at_projectKO_opt) + apply simp + done + +lemmas setObject_ccorres_helper_vcpu = + setObject_ccorres_helper[where 'a=vcpu, simplified objBits_simps vcpuBits_def, simplified] + +lemma vcpuUpdate_ccorres: + (* We depend on the simplifier and typ_heap_simps to resolve what the updated VCPU on the C side + looks like for specific updates f and heap_upd, and need to ensure they maintain the VCPU + relation. *) + assumes update_rel: + "\s s' vcpu cvcpu. + \ (s, s') \ rf_sr; ko_at' vcpu vcpuptr s; cslift s' (vcpu_Ptr vcpuptr) = Some cvcpu; + cvcpu_relation vcpu cvcpu \ + \ \cvcpu'. + hrs_mem_update (heap_upd s') (t_hrs_' (globals s')) + = hrs_mem_update (heap_update (vcpu_Ptr vcpuptr) cvcpu') (t_hrs_' (globals s')) + \ cvcpu_relation (f vcpu) cvcpu'" + shows "ccorres dc xfdc \ UNIV hs + (vcpuUpdate vcpuptr f) + (Basic (\s. globals_update (t_hrs_'_update (hrs_mem_update (heap_upd s))) s))" + apply (rule ccorres_guard_imp) + apply (simp add: vcpuUpdate_def) + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (rule_tac P="ko_at' vcpu vcpuptr" in setObject_ccorres_helper_vcpu[where P'=UNIV]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) cmap_relation_ko_atD[OF cmap_relation_vcpu]) + apply clarsimp + apply (frule (3) update_rel) + apply clarsimp + apply (rule vcpu_hrs_mem_update_helper) + apply (fastforce dest!: vcpu_at_ko simp: obj_at'_ko_at'_prop)+ + done + +method vcpuUpdate_ccorres + \ccorres of vcpuUpdate and a Basic heap update on vcpu fields\ = + rule vcpuUpdate_ccorres, + rule exI, + rule conjI, \ \need to resolve schematic cvcpu before showing it's in the relation\ + solves \simp add: typ_heap_simps'\, \ \calculate updated vcpu object\ + \ \unfold VCPU and VGIC relations (will solve for simple relations)\ + simp add: cvcpu_relation_regs_def cvgic_relation_def cvcpu_vppi_masked_relation_def + +lemma vcpuUpdate_vcpuRegs_ccorres: + "ccorres dc xfdc \ UNIV hs + (vcpuUpdate vcpuptr (\vcpu. vcpuRegs_update (\_. (vcpuRegs vcpu)(r := v)) vcpu)) + (Basic_heap_update + (\_. vcpuregs_C_Ptr &(vcpu_Ptr vcpuptr\[''regs_C''])) + (\s. Arrays.update (h_val (hrs_mem (t_hrs_' (globals s))) + (vcpuregs_C_Ptr &(vcpu_Ptr vcpuptr\[''regs_C'']))) (fromEnum r) v))" + apply vcpuUpdate_ccorres + using maxBound_is_bound[where 'a=vcpureg, simplified fromEnum_maxBound_vcpureg_def] + apply (clarsimp simp: fromEnum_eq_iff less_eq_Suc_le split: if_split) + done + +(* FIXME AARCH64 consider moving this inline *) +lemma vgicUpdate_HCR_ccorres: + "ccorres dc xfdc \ UNIV hs + (vgicUpdate vcpuptr (vgicHCR_update (\_. hcr))) + (Basic_heap_update + (\_. PTR(32 word) &(vgic_C_Ptr &(vcpu_Ptr vcpuptr\[''vgic_C''])\[''hcr_C''])) (\_. hcr))" + by (simp add: vgicUpdate_def) + vcpuUpdate_ccorres + +(* FIXME AARCH64 consider moving this inline *) +lemma vgicUpdate_virtTimer_pcount_ccorres: + "ccorres dc xfdc \ UNIV hs + (vcpuUpdate vcpuptr (vcpuVTimer_update (\_. VirtTimer pcount))) + (Basic_heap_update + (\_. PTR(64 word) &(PTR(vTimer_C) &(vcpu_Ptr vcpuptr\[''virtTimer_C''])\[''last_pcount_C''])) + (\_. pcount))" + by vcpuUpdate_ccorres + +(* FIXME AARCH64 consider moving this inline *) +lemma vgicUpdate_APR_ccorres: + "ccorres dc xfdc \ UNIV hs + (vgicUpdate vcpuptr (vgicAPR_update (\_. hcr))) + (Basic_heap_update + (\_. PTR(32 word) &(vgic_C_Ptr &(vcpu_Ptr vcpuptr\[''vgic_C''])\[''apr_C''])) (\_. hcr))" + by (simp add: vgicUpdate_def) + vcpuUpdate_ccorres + +(* FIXME AARCH64 consider moving this inline *) +lemma vgicUpdate_VMCR_ccorres: + "ccorres dc xfdc \ UNIV hs + (vgicUpdate vcpuptr (vgicVMCR_update (\_. hcr))) + (Basic_heap_update + (\_. PTR(32 word) &(vgic_C_Ptr &(vcpu_Ptr vcpuptr\[''vgic_C''])\[''vmcr_C''])) (\_. hcr))" + by (simp add: vgicUpdate_def) + vcpuUpdate_ccorres + +lemma vppievent_irq_noteq_fromEnum_mono: + "vppi \ (k :: vppievent_irq) \ fromEnum vppi \ fromEnum k" + apply (cases vppi, clarsimp) + apply (cases k, clarsimp) + done + +lemma vcpuVPPIMasked_update_ccorres: + "ccorres dc xfdc \ UNIV hs + (vcpuUpdate vcpuptr (\vcpu. vcpuVPPIMasked_update (\_. (vcpuVPPIMasked vcpu)(k := v)) vcpu)) + ((Basic_heap_update + (\s. vcpu_vppi_masked_C_Ptr &(vcpu_Ptr vcpuptr\[''vppi_masked_C''])) + (\s. Arrays.update (h_val (hrs_mem (t_hrs_' (globals s))) + (vcpu_vppi_masked_C_Ptr &(vcpu_Ptr vcpuptr\[''vppi_masked_C'']))) + (fromEnum k) (from_bool v))))" + apply vcpuUpdate_ccorres + using maxBound_is_bound[where 'a=vppievent_irq, simplified fromEnum_maxBound_vppievent_irq_def] + apply (split if_split) + apply (rule allI) + apply (rule conjI) + apply clarsimp + apply (rule impI) + apply (subst Arrays.index_update2, simp) + apply (rule vppievent_irq_noteq_fromEnum_mono) + apply simp + apply blast + done + +lemma vcpu_write_reg_ccorres: + "ccorres dc xfdc + (vcpu_at' vcpuptr) + (\ \vcpu = vcpu_Ptr vcpuptr \ \ \ \reg = of_nat (fromEnum reg) \ \ \ \value = v \) hs + (vcpuWriteReg vcpuptr reg v) + (Call vcpu_write_reg_'proc)" + supply Collect_const[simp del] + apply (cinit lift: vcpu_' reg_' value_') + apply (rule ccorres_assert) + apply clarsimp + apply (rule ccorres_cond_false_seq, simp) + apply (rule ccorres_move_const_guards) + apply ccorres_rewrite + apply (rule ccorres_move_c_guard_vcpu, rule vcpuUpdate_vcpuRegs_ccorres) + using maxBound_is_bound[of reg, simplified fromEnum_maxBound_vcpureg_def] + apply (clarsimp simp: seL4_VCPUReg_Num_def not_le word_less_nat_alt) + done + +lemma vcpu_save_reg_ccorres: + "ccorres dc xfdc (vcpu_at' vcpuptr) (\unat \reg = fromEnum r\ \ \ \vcpu = vcpu_Ptr vcpuptr \) hs + (vcpuSaveReg vcpuptr r) (Call vcpu_save_reg_'proc)" + supply Collect_const[simp del] + apply (cinit lift: reg_' vcpu_') + apply (rule ccorres_assert2) + apply (rule ccorres_cond_false_seq, simp) + apply (ctac add: vcpu_hw_read_reg_ccorres) + apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu)+ + apply (simp del: fun_upd_apply) + apply (ctac add: vcpuUpdate_vcpuRegs_ccorres) + apply wpsimp + apply (vcg exspec=vcpu_hw_read_reg_modifies) + apply (fastforce dest: maxBound_is_bound' + simp: fromEnum_maxBound_vcpureg_def seL4_VCPUReg_Num_def unat_arith_simps) + done + +lemma vcpu_restore_reg_ccorres: + "ccorres dc xfdc + (vcpu_at' vcpuptr) (\unat \reg = fromEnum r\ \ \ \vcpu = vcpu_Ptr vcpuptr \) hs + (vcpuRestoreReg vcpuptr r) (Call vcpu_restore_reg_'proc)" + supply Collect_const[simp del] + apply (cinit lift: reg_' vcpu_') + apply (rule ccorres_assert2) + apply (rule ccorres_cond_false_seq, simp) + apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu)+ + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (ctac add: vcpu_hw_write_reg_ccorres) + apply (frule maxBound_is_bound') + apply (clarsimp simp: word_le_nat_alt word_less_nat_alt + fromEnum_maxBound_vcpureg_def seL4_VCPUReg_Num_def) + apply (frule cmap_relation_vcpu) + apply (clarsimp simp: typ_heap_simps cvcpu_relation_def cvcpu_regs_relation_def) + done + +lemma ccorres_dc_from_rrel: + "ccorres r xf P P' hs a c \ ccorres dc xf' P P' hs a c" + unfolding ccorres_underlying_def + by (fastforce simp: unif_rrel_def split: xstate.splits) + +lemma vcpu_restore_reg_range_ccorres: + "ccorres dc xfdc + (vcpu_at' vcpuptr and K (fromEnum (start::vcpureg) \ fromEnum end)) + (\unat \start = fromEnum start\ \ \unat \end = fromEnum end\ + \ \ \vcpu = vcpu_Ptr vcpuptr \) hs + (vcpuRestoreRegRange vcpuptr start end) (Call vcpu_restore_reg_range_'proc)" + using maxBound_is_bound[of start, simplified fromEnum_maxBound_vcpureg_def] + length_upto_enum_le_maxBound[of start "end", simplified fromEnum_maxBound_vcpureg_def] + apply - + apply (rule ccorres_grab_asm) + apply (cinit lift: start_' end_' vcpu_' simp: whileAnno_def) + apply csymbr + apply (rule ccorres_dc_from_rrel) + apply (rule ccorres_mapM_x_while'[where i="fromEnum start" and F="\n s. vcpu_at' vcpuptr s"]) + apply clarsimp + apply (rule ccorres_guard_imp) + apply (ctac add: vcpu_restore_reg_ccorres) + apply assumption + subgoal + apply (clarsimp simp: fromEnum_upto_nth dest!: less_length_upto_enum_maxBoundD) + by (subst unat_add_lem'; clarsimp simp: fromEnum_maxBound_vcpureg_def unat_of_nat_eq) + subgoal + apply (simp add: word_less_nat_alt word_le_nat_alt) + apply (subst unat_add_lem'; clarsimp simp: unat_of_nat_eq) + apply (fastforce simp add: upto_enum_red split: if_splits) + done + apply (rule allI, rule conseqPre, vcg exspec=vcpu_restore_reg_modifies) + apply fastforce + apply wpsimp + apply (fastforce simp: word_bits_def) + apply (clarsimp simp: Collect_const_mem) + apply (subst unat_eq_of_nat[symmetric]; clarsimp) + done + +lemma vcpu_save_reg_range_ccorres: + "ccorres dc xfdc + (vcpu_at' vcpuptr and K (fromEnum start \ fromEnum end)) + (\unat \start = fromEnum start\ \ \unat \end = fromEnum end\ + \ \ \vcpu = vcpu_Ptr vcpuptr \) hs + (vcpuSaveRegRange vcpuptr start end) (Call vcpu_save_reg_range_'proc)" + using maxBound_is_bound[of start, simplified fromEnum_maxBound_vcpureg_def] + length_upto_enum_le_maxBound[of start "end", simplified fromEnum_maxBound_vcpureg_def] + apply - + apply (rule ccorres_grab_asm) + apply (cinit lift: start_' end_' vcpu_' simp: whileAnno_def) + apply csymbr + apply (rule ccorres_dc_from_rrel) + apply (rule ccorres_mapM_x_while'[where i="fromEnum start" and F="\n s. vcpu_at' vcpuptr s"]) + apply clarsimp + apply (rule ccorres_guard_imp) + apply (ctac add: vcpu_save_reg_ccorres) + apply assumption + subgoal + apply (clarsimp simp: fromEnum_upto_nth dest!: less_length_upto_enum_maxBoundD) + by (subst unat_add_lem'; clarsimp simp: fromEnum_maxBound_vcpureg_def unat_of_nat_eq) + subgoal + apply (simp add: word_less_nat_alt word_le_nat_alt) + apply (subst unat_add_lem'; clarsimp simp: unat_of_nat_eq) + apply (fastforce simp add: upto_enum_red split: if_splits) + done + apply (rule allI, rule conseqPre, vcg exspec=vcpu_save_reg_modifies) + apply fastforce + apply wpsimp + apply (fastforce simp: word_bits_def) + apply (clarsimp simp: Collect_const_mem) + apply (subst unat_eq_of_nat[symmetric]; clarsimp) + done + +lemma vcpu_read_reg_ccorres: + "ccorres (=) ret__unsigned_long_' \ + (\ \vcpu = vcpu_Ptr vcpuptr \ \ \ \reg = of_nat (fromEnum reg) \) hs + (vcpuReadReg vcpuptr reg) + (Call vcpu_read_reg_'proc)" + supply Collect_const[simp del] + apply (cinit lift: vcpu_' reg_') + apply (rule ccorres_assert) + apply clarsimp + apply (rule ccorres_cond_false_seq, simp) + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (rule ccorres_move_const_guards) + apply ccorres_rewrite + apply (rule ccorres_move_c_guard_vcpu) + apply (rule ccorres_return_C; clarsimp) + apply (clarsimp simp: vcpu_at_ko'_eq) + + using maxBound_is_bound[of reg, simplified fromEnum_maxBound_vcpureg_def] + apply (clarsimp simp: seL4_VCPUReg_Num_def not_le word_less_nat_alt) + apply (fastforce elim: allE[where x=reg] + simp: cvcpu_relation_def cvcpu_regs_relation_def typ_heap_simps' ) + done + +lemma irqVPPIEventIndex_spec: + "\s. \ \ \s. \irq && mask LENGTH(irq_len) = \irq \ + Call irqVPPIEventIndex_'proc + \ \ret__unsigned_long + = case_option (ucast VPPIEventIRQ_invalid) (of_nat \ fromEnum) (irqVPPIEventIndex (ucast \<^bsup>s\<^esup>irq)) \" + apply vcg + apply (clarsimp simp: irqVPPIEventIndex_def IRQ_def irqVTimerEvent_def + Kernel_C.VPPIEventIRQ_VTimer_def + split: if_splits) + apply (auto dest!: word_unat.Rep_inject[THEN iffD2] + simp: VPPIEventIRQ_invalid_def unat_ucast_eq_unat_and_mask and_mask_eq_iff_le_mask + fromEnum_def enum_vppievent_irq mask_def word_le_nat_alt word_less_nat_alt + simp flip: word_unat.Rep_inject) + done + +lemma vcpuWriteReg_obj_at'_vcpuVPPIMasked: + "vcpuWriteReg vcpuptr r v + \\s. obj_at' (\vcpu. P (vcpuVPPIMasked vcpu)) vcpuptr s \" + apply (simp add: vcpuWriteReg_def vcpuUpdate_def obj_at'_real_def) + apply (wp setObject_ko_wp_at[where n="objBits (undefined :: vcpu)"], simp) + apply (simp add: objBits_simps vcpuBits_def)+ + apply (wpsimp wp: getVCPU_wp)+ + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma isIRQActive_ccorres: + "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' + (\s. irq \ scast Kernel_C.maxIRQ) ({s. irq_' s = ucast irq}) [] + (isIRQActive irq) (Call isIRQActive_'proc)" + apply (cinit lift: irq_') + apply (simp add: getIRQState_def getInterruptState_def) + apply (rule_tac P="irq \ ucast Kernel_C.maxIRQ \ unat irq \ (unat maxIRQ)" in ccorres_gen_asm) + apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def word_sless_msb_less maxIRQ_def + word_less_nat_alt) + apply (clarsimp simp: order_le_less_trans unat_less_helper Kernel_C.IRQInactive_def + Kernel_C.maxIRQ_def word_0_sle_from_less[OF order_less_le_trans, OF ucast_less]) + apply (clarsimp simp: rf_sr_def cstate_relation_def Kernel_C.maxIRQ_def + Let_def cinterrupt_relation_def) + apply (drule spec, drule(1) mp) + apply (case_tac "intStateIRQTable (ksInterruptState \) irq") + apply (simp add: irq_state_defs Kernel_C.maxIRQ_def word_le_nat_alt maxIRQ_def)+ + done + +crunches isIRQActive, vcpuRestoreReg + for ko_at_vcpu'[wp]: "\s. P (ko_at' (v::vcpu) p s)" + +lemma restore_virt_timer_ccorres: + "ccorres dc xfdc + (vcpu_at' vcpuptr) + (\ \vcpu = vcpu_Ptr vcpuptr \) hs + (restoreVirtTimer vcpuptr) (Call restore_virt_timer_'proc)" + apply (cinit lift: vcpu_') + apply (rename_tac vcpu) + apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres) + apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres) + apply csymbr + apply (ctac (no_vcg) add: read_cntpct_ccorres) + apply (rename_tac current_cntpct) + apply (rule ccorres_pre_getObject_vcpu) + apply (rename_tac vcpu_obj) + apply (rule ccorres_move_c_guard_vcpu) + apply (rule_tac xf'=pcount_delta_' and + R=\ and + R'="{s. \vcpu'. cslift s vcpu = Some vcpu' \ cvcpu_relation vcpu_obj vcpu'}" and + F="\s. s = current_cntpct - vtimerLastPCount (vcpuVTimer vcpu_obj)" + in ccorres_symb_exec_r_rv_abstract) + apply (rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps cvcpu_relation_def) + apply ceqv + apply (ctac (no_vcg) add: vcpu_read_reg_ccorres) + apply csymbr + apply csymbr + apply (ctac (no_vcg) add: vcpu_write_reg_ccorres) + apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres) + apply (rule ccorres_pre_getObject_vcpu) + apply (ctac add: isIRQActive_ccorres) + apply (clarsimp simp: when_def simp del: Collect_const) + apply (rule ccorres_Cond_rhs_Seq; clarsimp) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply clarsimp + apply (rule ccorres_move_const_guards) + apply (rule ccorres_move_c_guard_vcpu) + apply (ctac (no_vcg) add: maskInterrupt_ccorres) + apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres) + apply wp + apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres) + apply (wpsimp simp: irqVPPIEventIndex_def IRQ_def irqVTimerEvent_def fromEnum_def + enum_vppievent_irq) + apply (vcg exspec=isIRQActive_modifies) + apply (wpsimp wp: hoare_drop_imp) + apply wp+ + apply vcg + apply (wpsimp wp: hoare_drop_imp) + apply wp+ + apply (clarsimp simp: seL4_VCPUReg_defs enum_vcpureg irqVPPIEventIndex_def IRQ_def + irqVTimerEvent_def mask_def typ_heap_simps + fromEnum_def enum_vppievent_irq Kernel_C.maxIRQ_def + cvcpu_relation_def cvcpu_vppi_masked_relation_def + split: if_split) + apply (erule_tac x=VPPIEventIRQ_VTimer in allE)+ + apply (clarsimp simp: irqVPPIEventIndex_def fromEnum_def enum_vppievent_irq) + done + +lemma vcpuUpdate_vTimer_pcount_ccorres: + "ccorres dc xfdc (vcpu_at' vcpuptr) UNIV hs + (vcpuUpdate vcpuptr (vcpuVTimer_update (\_. VirtTimer v))) + (Guard C_Guard {s. s \\<^sub>c vcpu_Ptr vcpuptr} + (Basic_heap_update + (\_. PTR(64 word) &(PTR(vTimer_C) &(vcpu_Ptr vcpuptr\[''virtTimer_C''])\[''last_pcount_C''])) (\_. v)))" + apply (rule ccorres_guard_imp) + apply (rule ccorres_move_c_guard_vcpu) + apply vcpuUpdate_ccorres + apply simp+ + done + +lemma save_virt_timer_ccorres: + "ccorres dc xfdc + (vcpu_at' vcpuptr) + (\ \vcpu = vcpu_Ptr vcpuptr \) hs + (saveVirtTimer vcpuptr) (Call save_virt_timer_'proc)" + apply (cinit lift: vcpu_') + apply (ctac (no_vcg) add: vcpu_save_reg_ccorres) + apply (ctac (no_vcg) add: vcpu_hw_write_reg_ccorres) + apply (ctac (no_vcg) add: vcpu_save_reg_ccorres) + apply (ctac (no_vcg) add: vcpu_save_reg_ccorres) + apply (ctac (no_vcg) add: vcpu_save_reg_ccorres) + apply (ctac (no_vcg) add: check_export_arch_timer_ccorres) + apply (ctac (no_vcg) add: read_cntpct_ccorres) + apply clarsimp + apply (rule vcpuUpdate_vTimer_pcount_ccorres) + apply wpsimp+ + apply (simp add: vcpureg_eq_use_types[where reg=VCPURegCNTV_CVAL, simplified, symmetric] + vcpureg_eq_use_types[where reg=VCPURegCNTV_CTL, simplified, symmetric] + vcpureg_eq_use_types[where reg=VCPURegCNTVOFF, simplified, symmetric] + vcpureg_eq_use_types[where reg=VCPURegCNTKCTL_EL1, simplified, symmetric]) + done + +lemma armv_vcpu_save_ccorres: + "ccorres dc xfdc + (vcpu_at' vcpuptr) + (\ \vcpu = vcpu_Ptr vcpuptr \ \ \ \active = from_bool act \) hs + (armvVCPUSave vcpuptr act) (Call armv_vcpu_save_'proc)" + apply (cinit lift: vcpu_' active_') + apply (ctac (no_vcg) add: vcpu_save_reg_range_ccorres) + apply wpsimp + apply (clarsimp split: if_splits simp: seL4_VCPUReg_defs fromEnum_def enum_vcpureg) + done + +lemma vcpu_disable_ccorres: + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' + and (case v of None \ \ | Some new \ vcpu_at' new)) + ({s. vcpu_' s = option_to_ptr v}) hs + (vcpuDisable v) (Call vcpu_disable_'proc)" + supply if_cong[cong] option.case_cong[cong] empty_fail_cond[simp] + apply (cinit lift: vcpu_') + apply (ctac (no_vcg) add: dsb_ccorres) + apply (rule ccorres_split_nothrow_novcg) + apply wpc + (* v=None *) + apply simp + apply ccorres_rewrite + apply (rule ccorres_return_Skip) + (* v=Some x2 *) + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_hcr_ccorres) + apply (rule ccorres_split_nothrow_novcg[of _ _ dc xfdc]) + apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu, simp) + apply clarsimp + apply (ctac (no_vcg) add: vgicUpdate_HCR_ccorres) + apply ceqv + apply (ctac (no_vcg) add: vcpu_save_reg_ccorres) + apply (ctac (no_vcg) add: vcpu_save_reg_ccorres) + apply (ctac (no_vcg) pre: ccorres_call[where r=dc and xf'=xfdc] add: isb_ccorres) + apply (wpsimp simp: guard_is_UNIV_def)+ + apply (clarsimp split: if_splits simp: seL4_VCPUReg_CPACR_def fromEnum_def enum_vcpureg) + apply wpsimp + apply ceqv + apply (clarsimp simp: doMachineOp_bind bind_assoc) + apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_hcr_ccorres) + apply (ctac (no_vcg) add: isb_ccorres) + apply (ctac (no_vcg) add: setSCTLR_ccorres) + apply (ctac (no_vcg) add: isb_ccorres) + apply (ctac (no_vcg) add: setHCR_ccorres) + apply (ctac (no_vcg) add: isb_ccorres) + apply (ctac (no_vcg) add: enableFpuEL01_ccorres) + apply (wpc; ccorres_rewrite) + apply (rule ccorres_return_Skip) + apply (rename_tac vcpu_ptr) + apply (rule_tac P="the v \ 0" in ccorres_gen_asm) + apply ccorres_rewrite + apply (ctac (no_vcg) add: save_virt_timer_ccorres) + apply (ctac (no_vcg) add: maskInterrupt_ccorres) + apply (wpsimp wp: hoare_vcg_all_lift)+ + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem hcrNative_def sctlrDefault_def + irqVTimerEvent_def IRQ_def) + apply (wpsimp wp: hoare_vcg_all_lift)+ + apply (clarsimp simp: Collect_const_mem ko_at'_not_NULL dest!: vcpu_at_ko split: option.splits) + done + +lemma vcpu_enable_ccorres: + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' + and valid_arch_state' and vcpu_at' v) + ({s. vcpu_' s = vcpu_Ptr v}) hs + (vcpuEnable v) (Call vcpu_enable_'proc)" + supply empty_fail_cond[simp] + apply (cinit lift: vcpu_') + apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres)+ + apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) + apply (clarsimp simp: doMachineOp_bind bind_assoc) + apply (ctac (no_vcg) add: setHCR_ccorres) + apply (ctac (no_vcg) add: isb_ccorres) + apply (rule_tac P="ko_at' vcpu v" in ccorres_cross_over_guard) + apply (ctac pre: ccorres_move_c_guard_vcpu add: set_gic_vcpu_ctrl_hcr_ccorres) + apply wpsimp+ + apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres) + apply (ctac (no_vcg) add: restore_virt_timer_ccorres) + apply wpsimp + apply simp + apply wpsimp + apply (vcg exspec=set_gic_vcpu_ctrl_hcr_modifies) + apply wpsimp+ + apply (rule_tac Q="\_. vcpu_at' v" in hoare_post_imp, fastforce) + apply wpsimp + apply (clarsimp simp: typ_heap_simps' Collect_const_mem cvcpu_relation_def + cvcpu_regs_relation_def Let_def cvgic_relation_def hcrVCPU_def + | rule conjI | simp)+ + apply (drule (1) vcpu_at_rf_sr) + apply (clarsimp simp: typ_heap_simps' cvcpu_relation_def cvgic_relation_def) + apply (clarsimp split: if_splits simp: seL4_VCPUReg_CPACR_def fromEnum_def enum_vcpureg) + done + +lemma vcpu_restore_ccorres: + notes upt_Suc[simp del] Collect_const[simp del] + shows + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' + and vcpu_at' vcpuPtr) + ({s. vcpu_' s = vcpu_Ptr vcpuPtr}) hs + (vcpuRestore vcpuPtr) (Call vcpu_restore_'proc)" + supply empty_fail_cond[simp] + apply (cinit lift: vcpu_' simp: whileAnno_def) + apply (simp add: doMachineOp_bind uncurry_def split_def doMachineOp_mapM_x)+ + apply (clarsimp simp: bind_assoc) + apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_hcr_ccorres) + apply (ctac (no_vcg) add: isb_ccorres) + apply (rule ccorres_pre_getObject_vcpu) + apply (rule ccorres_move_c_guard_vcpu, rename_tac vcpu) + apply (rule ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState, rename_tac lr_num) + apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_vmcr_ccorres) + apply (rule_tac P="ko_at' vcpu vcpuPtr" in ccorres_cross_over_guard) + apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_apr_ccorres) + apply (rule_tac xf'=lr_num_' and R="\s. lr_num = (armKSGICVCPUNumListRegs \ ksArchState) s" + and val="of_nat lr_num" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + apply vcg + apply (fastforce intro!: rf_sr_armKSGICVCPUNumListRegs) + apply ceqv + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + (* the loop *) + apply (rule_tac P="lr_num \ 63" in ccorres_gen_asm) + apply (rule_tac F="\_ s. lr_num \ 63 \ ko_at' vcpu vcpuPtr s" in ccorres_mapM_x_while) + apply (intro allI impI) + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule_tac P="\s. lr_num \ 63" in ccorres_cross_over_guard) + apply (rule ccorres_Guard) + apply (rule_tac val="of_nat n" in ccorres_abstract_known[where xf'=i_'], ceqv) + apply (rule_tac P="n \ 63" in ccorres_gen_asm) + apply (rule ccorres_move_c_guard_vcpu) + apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_lr_ccorres) + apply (clarsimp simp: virq_to_H_def ko_at_vcpu_at'D upt_Suc) + apply (rule conjI) + apply (subst scast_eq_ucast; (rule refl)?) + apply (fastforce intro!: not_msb_from_less simp: word_less_nat_alt unat_of_nat) + apply (frule (1) vcpu_at_rf_sr) + apply (clarsimp simp: typ_heap_simps cvcpu_relation_regs_def cvgic_relation_def virq_to_H_def unat_of_nat) + apply (simp add: word_less_nat_alt upt_Suc) + subgoal (* FIXME extract into separate lemma *) + by (fastforce simp: word_less_nat_alt unat_of_nat_eq elim: order_less_le_trans) + apply clarsimp + apply (simp add: upt_Suc) + apply vcg + apply (fastforce simp: word_less_nat_alt unat_of_nat_eq word_bits_def elim: order_less_le_trans) + apply wpsimp + apply (simp add: upt_Suc word_bits_def) + apply ceqv + apply (ctac add: vcpu_restore_reg_range_ccorres) + apply (ctac add: vcpu_enable_ccorres) + apply wpsimp + apply (vcg exspec=vcpu_restore_reg_range_modifies) + apply (wpsimp wp: crunch_wps) + apply (wpsimp simp: guard_is_UNIV_def upt_Suc ko_at_vcpu_at'D wp: mapM_x_wp_inv + | rule UNIV_I + | wp hoare_vcg_imp_lift hoare_vcg_all_lift hoare_vcg_disj_lift)+ + apply (fastforce simp: fromEnum_def enum_vcpureg seL4_VCPUReg_defs) + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp simp: vcpu_at_ko'_eq wp: hoare_vcg_imp_lift')+ + apply (rule conjI) + apply (fastforce simp: invs_no_cicd'_def valid_arch_state'_def max_armKSGICVCPUNumListRegs_def) + apply (rule conjI) + apply (fastforce simp: fromEnum_def enum_vcpureg) + apply (fastforce dest!: vcpu_at_rf_sr + simp: typ_heap_simps' cvcpu_relation_def cvgic_relation_def) + done + +(* FIXME AARCH64 unused +lemma ccorres_pre_getsNumListRegs: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. (armKSGICVCPUNumListRegs \ ksArchState) s = rv \ P rv s)) + {s. \rv num'. gic_vcpu_num_list_regs_' (globals s) = num' + \ s \ P' rv } + hs (gets (armKSGICVCPUNumListRegs \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp + apply (rule hoare_gets_sp) + apply (clarsimp simp: empty_fail_def getCurThread_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply (clarsimp simp: rf_sr_ksArchState_armHSCurVCPU) + done *) + +lemma ccorres_gets_armKSGICVCPUNumListRegs: + "ccorres ((=) \ of_nat) lr_num_' \ UNIV hs + (gets (armKSGICVCPUNumListRegs \ ksArchState)) (\lr_num :== \gic_vcpu_num_list_regs)" + apply (rule ccorres_from_vcg_nofail) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def) + done + +lemma vgicUpdateLR_ccorres: + "ccorres dc xfdc (\ and K (n \ 63 \ n' = n \ virq_to_H v' = v)) UNIV hs + (vgicUpdateLR vcpuptr n v) + (Basic_heap_update + (\_. vgic_lr_C_Ptr &(vgic_C_Ptr &(vcpu_Ptr vcpuptr\[''vgic_C''])\[''lr_C''])) + (\s. Arrays.update + (h_val (hrs_mem (t_hrs_' (globals s))) + (vgic_lr_C_Ptr &(vgic_C_Ptr &(vcpu_Ptr vcpuptr\[''vgic_C''])\[''lr_C'']))) + n' v'))" + apply (rule ccorres_grab_asm) + apply (simp add: vgicUpdateLR_def vgicUpdate_def) + apply vcpuUpdate_ccorres + supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] + apply (fastforce simp: virq_to_H_def cvcpu_vppi_masked_relation_def split: if_split) + done + +lemma vcpu_save_ccorres: + notes Collect_const[simp del] + shows + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' + and case_option \ (vcpu_at' \ fst) v) + ({s. vcpu_' s = case_option NULL (vcpu_Ptr \ fst) v} + \ {s. active_' s = case_option 0 (from_bool \ snd) v}) hs + (vcpuSave v) (Call vcpu_save_'proc)" + supply if_cong[cong] option.case_cong[cong] + apply (cinit lift: vcpu_' active_' simp: whileAnno_def) + apply wpc + (* v = None *) + apply (rule ccorres_fail) + (* v = Some (vcpuPtr, active) *) + apply wpc + apply (rename_tac vcpuPtr act) + apply (ctac (no_vcg) add: dsb_ccorres) + apply (rule ccorres_split_nothrow_novcg) + apply (rule_tac R=\ in ccorres_when) + apply clarsimp + apply (rule ccorres_rhs_assoc)+ + apply (ctac (no_vcg) add: vcpu_save_reg_ccorres) + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_hcr_ccorres) + apply (rule ccorres_move_c_guard_vcpu) + apply (clarsimp) + apply (ctac (no_vcg) add: vgicUpdate_HCR_ccorres) + apply (rule ccorres_call[where xf'=xfdc], rule save_virt_timer_ccorres) + apply wpsimp+ + apply ceqv + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_vmcr_ccorres) + apply clarsimp + apply (rule ccorres_move_c_guard_vcpu) + apply (ctac (no_vcg) add: vgicUpdate_VMCR_ccorres) + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_apr_ccorres) + apply (rule ccorres_move_c_guard_vcpu) + apply clarsimp + apply (ctac (no_vcg) add: vgicUpdate_APR_ccorres) + apply (ctac (no_vcg) add: ccorres_gets_armKSGICVCPUNumListRegs) + apply (rename_tac lr_num lr_num') + apply (rule ccorres_rhs_assoc2) + apply (rule ccorres_split_nothrow_novcg) + (* the loop *) + apply (rule_tac P="lr_num \ 63" in ccorres_gen_asm) + apply (rule_tac F="\_ s. lr_num \ 63 \ vcpu_at' vcpuPtr s" in ccorres_mapM_x_while) + apply (intro allI impI) + apply clarsimp + apply (rule ccorres_guard_imp2) + apply (rule_tac P="\s. lr_num \ 63" in ccorres_cross_over_guard) + apply (ctac (no_vcg) add: get_gic_vcpu_ctrl_lr_ccorres) + apply (rule ccorres_Guard) + apply (rule_tac val="of_nat n" in ccorres_abstract_known[where xf'=i_'], ceqv) + apply (rule_tac P="n \ 63" in ccorres_gen_asm) + apply (rule ccorres_move_c_guard_vcpu) + apply (clarsimp simp: unat_of_nat_eq) + apply (ctac (no_vcg) add: vgicUpdateLR_ccorres) + apply (wpsimp simp: virq_to_H_def)+ + apply (subst scast_eq_ucast; (rule refl)?) + apply (fastforce intro!: not_msb_from_less simp: word_less_nat_alt unat_of_nat) + apply (fastforce intro: word_of_nat_less) + apply (fastforce simp: word_less_nat_alt unat_of_nat) + apply clarsimp + apply (rule conseqPre, vcg exspec=get_gic_vcpu_ctrl_lr_modifies) + apply fastforce + apply wpsimp + apply (fastforce simp: word_bits_def) + apply ceqv + apply (ctac (no_vcg) add: armv_vcpu_save_ccorres) + apply (wpsimp simp: guard_is_UNIV_def wp: mapM_x_wp_inv)+ + apply (simp add: invs_no_cicd'_def valid_arch_state'_def max_armKSGICVCPUNumListRegs_def) + done + +lemma vcpu_switch_ccorres_None: + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' + and valid_arch_state') + ({s. new_' s = NULL}) hs + (vcpuSwitch None) (Call vcpu_switch_'proc)" + apply (cinit lift: new_') + (* v = None *) + apply ccorres_rewrite + apply (simp add: when_def) + apply (rule ccorres_pre_getCurVCPU) + apply wpc + (* v = None & CurVCPU = None *) + apply (rule ccorres_cond_false) + apply (rule ccorres_return_Skip) + (* v = None & CurVCPU \ None *) + apply ccorres_rewrite + apply wpc + apply (rename_tac ccurv cactive) + apply simp + apply (rule ccorres_cond_true) + apply (rule_tac R="\s. armHSCurVCPU (ksArchState s) = Some (ccurv, cactive)" in ccorres_cond) + apply (clarsimp simp: cur_vcpu_relation_def dest!: rf_sr_ksArchState_armHSCurVCPU) + apply (ctac add: vcpu_disable_ccorres) + apply (rule_tac v=x2 in armHSCurVCPU_update_active_ccorres) + apply simp + apply simp + apply wp + apply clarsimp + apply assumption + apply clarsimp + apply (vcg exspec=vcpu_disable_modifies) + apply (rule ccorres_return_Skip) + apply (clarsimp, rule conjI) + apply (fastforce dest: invs_cicd_arch_state' simp: valid_arch_state'_def vcpu_at_is_vcpu' ko_wp_at'_def split: option.splits) + by (auto dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def)+ + +lemma vcpu_switch_ccorres_Some: + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' + and valid_arch_state' and vcpu_at' v) + ({s. new_' s = vcpu_Ptr v}) hs + (vcpuSwitch (Some v)) (Call vcpu_switch_'proc)" + supply if_cong[cong] option.case_cong[cong] + apply (cinit lift: new_') + (* v \ None *) + apply simp + apply (rule ccorres_pre_getCurVCPU) + apply wpc + (* v \ None & CurVCPU = None *) + apply (rule ccorres_cond_true) + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_cond_false_seq) + apply ccorres_rewrite + apply (ctac add: vcpu_restore_ccorres) + apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres) + apply wp + apply clarsimp + apply (vcg exspec=vcpu_restore_modifies) + (* v \ None & CurVCPU \ None *) + apply wpc + apply (rename_tac ccurv cactive) + apply (rule_tac R="\s. (armHSCurVCPU \ ksArchState) s = Some (ccurv, cactive)" in ccorres_cond) + apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU + simp: Collect_const_mem cur_vcpu_relation_def + split: option.splits) + (* new \ CurVCPU or equivalently v \ ccurv *) + apply (rule ccorres_cond_true) + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_cond_true_seq) + apply (ctac add: vcpu_save_ccorres) + apply (ctac add: vcpu_restore_ccorres) + apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres) + apply wp + apply clarsimp + apply (vcg exspec=vcpu_restore_modifies) + apply (wpsimp wp: hoare_vcg_conj_lift vcpuSave_invs_no_cicd' vcpuSave_typ_at') + apply clarsimp + apply (vcg exspec=vcpu_save_modifies) + (* new = CurVCPU or equivalently v = ccurv *) + apply (unfold when_def) + apply (rule_tac R="\s. (ccurv = v) \ (armHSCurVCPU \ ksArchState) s = Some (ccurv, cactive)" + in ccorres_cond) + apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU + simp: Collect_const_mem cur_vcpu_relation_def from_bool_def + split: option.splits bool.splits) + (* ccactive = false *) + apply (rule ccorres_rhs_assoc) + apply (ctac (no_vcg) add: isb_ccorres) + apply (ctac (no_vcg) add: vcpu_enable_ccorres) + apply (rule_tac v="(v, cactive)" in armHSCurVCPU_update_active_ccorres) + apply simp + apply simp + apply wp + apply (wpsimp wp: hoare_vcg_conj_lift vcpuSave_invs_no_cicd' vcpuSave_typ_at') + (* ccactive =true *) + apply (rule ccorres_return_Skip) + (* last goal *) + apply simp + apply (rule conjI + | clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU + simp: Collect_const_mem cur_vcpu_relation_def + | fastforce dest: invs_cicd_arch_state' split: option.splits + simp: valid_arch_state'_def vcpu_at_is_vcpu' ko_wp_at'_def Collect_const_mem)+ + done + +lemma vcpu_switch_ccorres: + "ccorres dc xfdc + (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' + and valid_arch_state' + and (case v of None \ \ | Some new \ vcpu_at' new)) + ({s. new_' s = option_to_ptr v \ \(case v of None \ NULL | Some new \ vcpu_Ptr new)\ }) hs + (vcpuSwitch v) (Call vcpu_switch_'proc)" + by (cases v; clarsimp simp: vcpu_switch_ccorres_None[simplified] vcpu_switch_ccorres_Some[simplified]) + + +lemma invs_no_cicd_sym_hyp'[elim!]: + "invs_no_cicd' s \ sym_refs (state_hyp_refs_of' s)" + by (simp add: invs_no_cicd'_def valid_state'_def) + +(* FIXME AARCH64 the above was above setVMRoot_ccorres on ARM_HYP, so things might be needed earlier *) + +end + +end diff --git a/proof/crefine/AARCH64/Wellformed_C.thy b/proof/crefine/AARCH64/Wellformed_C.thy new file mode 100644 index 0000000000..9bbe8b1e99 --- /dev/null +++ b/proof/crefine/AARCH64/Wellformed_C.thy @@ -0,0 +1,704 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Wellformedness of caps, kernel objects, states on the C level *) + +theory Wellformed_C +imports + "CLib.CTranslationNICTA" + CLevityCatch + "CSpec.Substitute" +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* Takes an address and ensures it can be given to a function expecting a canonical address. + Canonical addresses on 64-bit machines aren't really 64-bit, due to bus sizes. Hence, structures + used by the bitfield generator will use packed addresses, resulting in this mask in the C code + on AARCH64 (which would be a cast plus sign-extension on X64 and RISCV64). + For our spec rules, it's better to wrap the magic numbers if possible. + + Dependency-wise this could also go into Invariants_H, but we want to limit its use to CRefine. *) +definition make_canonical :: "machine_word \ machine_word" where + "make_canonical p \ p && mask (Suc canonical_bit)" + +lemma make_canonical_0[simp]: + "make_canonical 0 = 0" + by (simp add: make_canonical_def) + +lemma canonical_make_canonical_idem: + "canonical_address p \ make_canonical p = p" + unfolding make_canonical_def + by (simp add: canonical_address_mask_eq) + +lemma make_canonical_is_canonical: + "canonical_address (make_canonical p)" + unfolding make_canonical_def + by (simp add: canonical_address_mask_eq) + +(* This is [simp] because if we see this pattern, it's very likely that we want to use the + other make_canonical rules *) +lemma make_canonical_and_fold[simp]: + "p && mask (Suc canonical_bit) && n = make_canonical p && n" for p :: machine_word + by (simp flip: make_canonical_def word_bw_assocs) + +lemmas make_canonical_fold = make_canonical_def[symmetric, unfolded canonical_bit_def, simplified] + +schematic_goal Suc_canonical_bit_fold: + "numeral ?n = Suc canonical_bit" + by (simp add: canonical_bit_def) + +lemma make_canonical_aligned: + "is_aligned p n \ is_aligned (make_canonical p) n" + by (simp add: is_aligned_mask make_canonical_def) word_eqI_solve + +abbreviation + cte_Ptr :: "addr \ cte_C ptr" where "cte_Ptr == Ptr" +abbreviation + mdb_Ptr :: "addr \ mdb_node_C ptr" where "mdb_Ptr == Ptr" +abbreviation + cap_Ptr :: "addr \ cap_C ptr" where "cap_Ptr == Ptr" +abbreviation + tcb_Ptr :: "addr \ tcb_C ptr" where "tcb_Ptr == Ptr" +abbreviation + atcb_Ptr :: "addr \ arch_tcb_C ptr" where "atcb_Ptr == Ptr" +abbreviation + vcpu_Ptr :: "addr \ vcpu_C ptr" where "vcpu_Ptr == Ptr" +abbreviation + ep_Ptr :: "addr \ endpoint_C ptr" where "ep_Ptr == Ptr" +abbreviation + ntfn_Ptr :: "addr \ notification_C ptr" where "ntfn_Ptr == Ptr" +abbreviation + ap_Ptr :: "addr \ asid_pool_C ptr" where "ap_Ptr == Ptr" + +type_synonym pt_ptr = "(pte_C[pt_array_len]) ptr" +type_synonym vs_ptr = "(pte_C[vs_array_len]) ptr" + +abbreviation + pte_Ptr :: "addr \ pte_C ptr" where "pte_Ptr == Ptr" +abbreviation + pt_Ptr :: "machine_word \ pt_ptr" where "pt_Ptr == Ptr" +abbreviation + vs_Ptr :: "machine_word \ vs_ptr" where "vs_Ptr == Ptr" + +abbreviation + vgic_lr_C_Ptr :: "addr \ (virq_C[64]) ptr" where "vgic_lr_C_Ptr \ Ptr" +abbreviation + vgic_C_Ptr :: "addr \ gicVCpuIface_C ptr" where "vgic_C_Ptr \ Ptr" +abbreviation + vcpu_vppi_masked_C_Ptr :: "addr \ (machine_word[1]) ptr" where "vcpu_vppi_masked_C_Ptr \ Ptr" + +declare seL4_VCPUReg_Num_def[code] +value_type num_vcpu_regs = "unat seL4_VCPUReg_Num" + +abbreviation + vcpuregs_C_Ptr :: "addr \ (machine_word[num_vcpu_regs]) ptr" where "vcpuregs_C_Ptr \ Ptr" + +type_synonym tcb_cnode_array = "cte_C[5]" +type_synonym registers_count = 37 (* length enum_register *) +type_synonym registers_array = "machine_word[registers_count]" + +(* typedef word_t register_t; *) +type_synonym register_idx_len = machine_word_len +type_synonym register_idx = "register_idx_len word" + +(* representation of C int literals, the default for any unadorned numeral *) +type_synonym int_literal_len = "32 signed" +type_synonym int_word = "int_literal_len word" + +abbreviation "user_context_Ptr \ Ptr :: addr \ user_context_C ptr" +abbreviation "machine_word_Ptr \ Ptr :: addr \ machine_word ptr" +abbreviation "tcb_cnode_Ptr \ Ptr :: addr \ tcb_cnode_array ptr" +abbreviation "registers_Ptr \ Ptr :: addr \ registers_array ptr" + +lemma halt_spec: + "Gamma \ {} Call halt_'proc {}" + apply (rule hoare_complete) + apply (simp add: HoarePartialDef.valid_def) + done + +definition + isUntypedCap_C :: "cap_CL \ bool" where + "isUntypedCap_C c \ + case c of + Cap_untyped_cap q \ True + | _ \ False" + +definition + isNullCap_C :: "cap_CL \ bool" where + "isNullCap_C c \ + case c of + Cap_null_cap \ True + | _ \ False" + +definition + isEndpointCap_C :: "cap_CL \ bool" where + "isEndpointCap_C v \ case v of + Cap_endpoint_cap ec \ True + | _ \ False" + +definition + isCNodeCap_C :: "cap_CL \ bool" where + "isCNodeCap_C c \ case c of + Cap_cnode_cap a \ True + | _ \ False" + +definition + isThreadCap_C :: "cap_CL \ bool" where + "isThreadCap_C c \ case c of + Cap_thread_cap a \ True + | _ \ False" + +definition + isIRQControlCap_C :: "cap_CL \ bool" where + "isIRQControlCap_C c \ case c of + Cap_irq_control_cap \ True + | _ \ False" + +definition + isIRQHandlerCap_C :: "cap_CL \ bool" where + "isIRQHandlerCap_C c \ case c of + Cap_irq_handler_cap a \ True + | _ \ False" + +definition + isNotificationCap_C :: "cap_CL \ bool" where + "isNotificationCap_C v \ case v of + Cap_notification_cap aec \ True + | _ \ False" + +definition + ep_at_C' :: "word64 \ heap_raw_state \ bool" +where + "ep_at_C' p h \ Ptr p \ dom (clift h :: endpoint_C typ_heap)" \ \endpoint_lift is total\ + +definition + ntfn_at_C' :: "word64 \ heap_raw_state \ bool" + where \ \notification_lift is total\ + "ntfn_at_C' p h \ Ptr p \ dom (clift h :: notification_C typ_heap)" + +definition + tcb_at_C' :: "word64 \ heap_raw_state \ bool" + where + "tcb_at_C' p h \ Ptr p \ dom (clift h :: tcb_C typ_heap)" + +definition + cte_at_C' :: "word64 \ heap_raw_state \ bool" + where + "cte_at_C' p h \ Ptr p \ dom (clift h :: cte_C typ_heap)" + +definition + ctcb_ptr_to_tcb_ptr :: "tcb_C ptr \ word64" + where + "ctcb_ptr_to_tcb_ptr p \ ptr_val p - ctcb_offset" + +definition + tcb_ptr_to_ctcb_ptr :: "word64 \ tcb_C ptr" + where + "tcb_ptr_to_ctcb_ptr p \ Ptr (p + ctcb_offset)" + +primrec + tcb_queue_relation :: "(tcb_C \ tcb_C ptr) \ (tcb_C \ tcb_C ptr) \ + (tcb_C ptr \ tcb_C option) \ word64 list \ + tcb_C ptr \ tcb_C ptr \ bool" +where + "tcb_queue_relation getNext getPrev hp [] qprev qhead = (qhead = NULL)" +| "tcb_queue_relation getNext getPrev hp (x#xs) qprev qhead = + (qhead = tcb_ptr_to_ctcb_ptr x \ + (\tcb. (hp qhead = Some tcb \ getPrev tcb = qprev \ tcb_queue_relation getNext getPrev hp xs qhead (getNext tcb))))" + +abbreviation + "ep_queue_relation \ tcb_queue_relation tcbEPNext_C tcbEPPrev_C" + +definition +capUntypedPtr_C :: "cap_CL \ word64" where + "capUntypedPtr_C cap \ case cap of + (Cap_untyped_cap uc) \ capBlockSize_CL uc + | Cap_endpoint_cap ep \ capEPPtr_CL ep + | Cap_notification_cap ntfn \ capNtfnPtr_CL ntfn + | Cap_cnode_cap ccap \ capCNodePtr_CL ccap + | Cap_reply_cap rc \ cap_reply_cap_CL.capTCBPtr_CL rc + | Cap_thread_cap tc \ cap_thread_cap_CL.capTCBPtr_CL tc + | Cap_frame_cap fc \ cap_frame_cap_CL.capFBasePtr_CL fc + | Cap_vspace_cap vsc \ cap_vspace_cap_CL.capVSBasePtr_CL vsc + | Cap_page_table_cap ptc \ cap_page_table_cap_CL.capPTBasePtr_CL ptc + | Cap_vcpu_cap tc \ cap_vcpu_cap_CL.capVCPUPtr_CL tc + | _ \ error []" + +definition ZombieTCB_C_def: +"ZombieTCB_C \ bit 6" (*wordRadix*) + +definition + isZombieTCB_C :: "word64 \ bool" where + "isZombieTCB_C v \ v = ZombieTCB_C" + +(* FIXME AARCH64 vmrights_to_H should be renamed vm_rights_to_H on all platforms, as there is no + "vmrights" anywhere, and follow that up with renaming "vmrights" lemmas *) + +definition +vmrights_to_H :: "word64 \ vmrights" where +"vmrights_to_H c \ + if c = scast Kernel_C.VMReadWrite then VMReadWrite + else if c = scast Kernel_C.VMReadOnly then VMReadOnly + else VMKernelOnly" + +definition vm_attributes_to_H :: "vm_attributes_C \ vmattributes" where + "vm_attributes_to_H attrs_raw \ + let attrs = vm_attributes_lift attrs_raw in + VMAttributes (to_bool (armExecuteNever_CL attrs)) + (to_bool (armPageCacheable_CL attrs))" + +definition attridx_from_vmattributes :: "vmattributes \ machine_word" where + "attridx_from_vmattributes attrs \ + if armPageCacheable attrs + then ucast Kernel_C.S2_NORMAL + else ucast Kernel_C.S2_DEVICE_nGnRnE" + +definition uxn_from_vmattributes :: "vmattributes \ machine_word" where + "uxn_from_vmattributes attrs \ from_bool (armExecuteNever attrs)" + +(* Force clarity over name collisions *) +abbreviation + ARMSmallPage :: "vmpage_size" where + "ARMSmallPage == AARCH64.ARMSmallPage" +abbreviation + ARMLargePage :: "vmpage_size" where + "ARMLargePage == AARCH64.ARMLargePage" +abbreviation + ARMHugePage :: "vmpage_size" where + "ARMHugePage == AARCH64.ARMHugePage" + +definition framesize_to_H :: "machine_word \ vmpage_size" where + "framesize_to_H c \ + if c = scast Kernel_C.ARMSmallPage then ARMSmallPage + else if c = scast Kernel_C.ARMLargePage then ARMLargePage + else ARMHugePage" + +definition + framesize_from_H :: "vmpage_size \ machine_word" +where + "framesize_from_H sz \ + case sz of + ARMSmallPage \ scast Kernel_C.ARMSmallPage + | ARMLargePage \ scast Kernel_C.ARMLargePage + | ARMHugePage \ scast Kernel_C.ARMHugePage" + +lemmas framesize_defs = Kernel_C.ARMSmallPage_def Kernel_C.ARMLargePage_def + Kernel_C.ARMHugePage_def + +lemma framesize_from_to_H: + "framesize_to_H (framesize_from_H sz) = sz" + by (simp add: framesize_to_H_def framesize_from_H_def framesize_defs + split: if_split vmpage_size.splits) + +lemma framesize_from_H_eq: + "(framesize_from_H sz = framesize_from_H sz') = (sz = sz')" + by (cases sz; cases sz'; + simp add: framesize_from_H_def framesize_defs) + +end + +record cte_CL = + cap_CL :: cap_CL + cteMDBNode_CL :: mdb_node_CL + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + cte_lift :: "cte_C \ cte_CL" + where + "cte_lift c \ case cap_lift (cte_C.cap_C c) of + None \ None + | Some cap \ Some \ cap_CL = cap, + cteMDBNode_CL = mdb_node_lift (cteMDBNode_C c) \" + +definition + mdb_node_to_H :: "mdb_node_CL \ mdbnode" + where + "mdb_node_to_H n \ MDB (mdbNext_CL n) + (mdbPrev_CL n) + (to_bool (mdbRevocable_CL n)) + (to_bool (mdbFirstBadged_CL n))" + +definition +cap_to_H :: "cap_CL \ capability" +where +"cap_to_H c \ case c of + Cap_null_cap \ NullCap + | Cap_zombie_cap zc \ (if isZombieTCB_C(capZombieType_CL zc) + then + (Zombie ((capZombieID_CL zc) && ~~(mask(5))) + (ZombieTCB) + (unat ((capZombieID_CL zc) && mask(5)))) + else let radix = unat (capZombieType_CL zc) in + (Zombie ((capZombieID_CL zc) && ~~(mask (radix+1))) + (ZombieCNode radix) + (unat ((capZombieID_CL zc) && mask(radix+1))))) + | Cap_cnode_cap ccap \ + CNodeCap (capCNodePtr_CL ccap) (unat (capCNodeRadix_CL ccap)) + (capCNodeGuard_CL ccap) + (unat (capCNodeGuardSize_CL ccap)) + | Cap_untyped_cap uc \ UntypedCap (to_bool(capIsDevice_CL uc)) (capPtr_CL uc) (unat (capBlockSize_CL uc)) (unat (capFreeIndex_CL uc << 4)) + | Cap_endpoint_cap ec \ + EndpointCap (capEPPtr_CL ec) (capEPBadge_CL ec) (to_bool(capCanSend_CL ec)) (to_bool(capCanReceive_CL ec)) + (to_bool(capCanGrant_CL ec)) (to_bool(capCanGrantReply_CL ec)) + | Cap_notification_cap ntfn \ + NotificationCap (capNtfnPtr_CL ntfn)(capNtfnBadge_CL ntfn)(to_bool(capNtfnCanSend_CL ntfn)) + (to_bool(capNtfnCanReceive_CL ntfn)) + | Cap_reply_cap rc \ ReplyCap (ctcb_ptr_to_tcb_ptr (Ptr (cap_reply_cap_CL.capTCBPtr_CL rc))) + (to_bool (capReplyMaster_CL rc)) (to_bool (capReplyCanGrant_CL rc)) + | Cap_thread_cap tc \ ThreadCap(ctcb_ptr_to_tcb_ptr (Ptr (cap_thread_cap_CL.capTCBPtr_CL tc))) + | Cap_irq_handler_cap ihc \ IRQHandlerCap (ucast(capIRQ_CL ihc)) + | Cap_irq_control_cap \ IRQControlCap + | Cap_asid_control_cap \ ArchObjectCap ASIDControlCap + | Cap_asid_pool_cap apc \ ArchObjectCap (ASIDPoolCap (capASIDPool_CL apc) (capASIDBase_CL apc)) + | Cap_frame_cap fc \ ArchObjectCap (FrameCap (capFBasePtr_CL fc) + (vmrights_to_H(capFVMRights_CL fc)) + (framesize_to_H(capFSize_CL fc)) + (to_bool(capFIsDevice_CL fc)) + (if capFMappedASID_CL fc = 0 + then None else + Some(capFMappedASID_CL fc, capFMappedAddress_CL fc))) + | Cap_vspace_cap vsc \ ArchObjectCap + (PageTableCap (capVSBasePtr_CL vsc) VSRootPT_T + (if to_bool (capVSIsMapped_CL vsc) + then Some (capVSMappedASID_CL vsc, 0) + else None)) + \ \cap_vspace_cap_CL does not have a mapped address field, and the vaddr for mapped VSRoot_T caps + is always 0 due to alignment constraint\ + | Cap_page_table_cap ptc \ ArchObjectCap + (PageTableCap (cap_page_table_cap_CL.capPTBasePtr_CL ptc) NormalPT_T + (if to_bool (capPTIsMapped_CL ptc) + then Some (capPTMappedASID_CL ptc, capPTMappedAddress_CL ptc) + else None)) + | Cap_domain_cap \ DomainCap + | Cap_vcpu_cap vcpu \ ArchObjectCap (VCPUCap (capVCPUPtr_CL vcpu))" + +lemmas cap_to_H_simps = cap_to_H_def[split_simps cap_CL.split] + +definition + cte_to_H :: "cte_CL \ cte" + where + "cte_to_H cte \ CTE (cap_to_H (cap_CL cte)) (mdb_node_to_H (cteMDBNode_CL cte))" + +(* FIXME AARCH64 the "9" here is irq size, do we have a better abbreviation for irq bits? *) +definition +cl_valid_cap :: "cap_CL \ bool" +where +"cl_valid_cap c \ + case c of + Cap_irq_handler_cap fc \ ((capIRQ_CL fc) && mask 9 = capIRQ_CL fc) + | Cap_frame_cap fc \ capFSize_CL fc < 3 \ capFVMRights_CL fc < 4 \ capFVMRights_CL fc \ 2 + | x \ True" + +definition +c_valid_cap :: "cap_C \ bool" +where +"c_valid_cap c \ case_option True cl_valid_cap (cap_lift c)" + +definition +cl_valid_cte :: "cte_CL \ bool" +where +"cl_valid_cte c \ cl_valid_cap (cap_CL c)" + +definition +c_valid_cte :: "cte_C \ bool" +where +"c_valid_cte c \ c_valid_cap (cte_C.cap_C c)" + +(* all uninteresting cases can be deduced from the cap tag *) +lemma c_valid_cap_simps [simp]: + "cap_get_tag c = scast cap_thread_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_notification_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_endpoint_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_cnode_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_asid_control_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_irq_control_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_vspace_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_page_table_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_asid_pool_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_untyped_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_zombie_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_reply_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_vcpu_cap \ c_valid_cap c" + "cap_get_tag c = scast cap_null_cap \ c_valid_cap c" + unfolding c_valid_cap_def cap_lift_def cap_tag_defs + by (simp add: cl_valid_cap_def)+ + +lemma ptr_val_tcb_ptr_mask2: + "is_aligned thread tcbBlockSizeBits + \ ptr_val (tcb_ptr_to_ctcb_ptr thread) && (~~ mask tcbBlockSizeBits) + = thread" + apply (clarsimp simp: tcb_ptr_to_ctcb_ptr_def) + apply (simp add: is_aligned_add_helper ctcb_offset_defs objBits_simps') + done + +section \Domains\ + +text \ + seL4's build system allows configuration of the number of domains. This means the proofs have to + work for any number of domains provided it fits into the hard limit of a 8-bit word. + + In the C code, we have the enumerated constant numDomains, one greater than maxDom. In the + abstract specs, we have the corresponding Platform_Config.numDomains and maxDomain. + + To keep the proofs as general as possible, we avoid unfolding definitions of: + maxDom, maxDomain, numDomains except in this theory where we need to establish basic properties. + + Unfortunately, array bounds checks coming from the C code use numerical values, meaning we might + get 0x10 instead of the number of domains, or 0x1000 for numDomains * numPriorities. To solve + these, the "explicit" lemmas expose direct numbers. They are more risky to deploy, as one could + prove that 0x5 is less than the number of domains when that's the case, and then the proof will + break upon reconfiguration. +\ + +text \The @{text num_domains} enumerated type and constant represent the number of domains.\ + +value_type num_domains = "numDomains" + +context includes no_less_1_simps begin + +(* The proofs expect the minimum priority and minimum domain to be zero. + Note that minDom is unused in the C code. *) +lemma min_prio_dom_sanity: + "seL4_MinPrio = 0" + "Kernel_C.minDom = 0" + by (auto simp: seL4_MinPrio_def minDom_def) + +lemma less_numDomains_is_domain[simplified word_size, simplified]: + "x < numDomains \ x < 2 ^ size (y::domain)" + unfolding Kernel_Config.numDomains_def + by (simp add: word_size) + +lemma sint_numDomains_to_H: + "sint Kernel_C.numDomains = int Kernel_Config.numDomains" + by (clarsimp simp: Kernel_C.numDomains_def Kernel_Config.numDomains_def) + +lemma unat_numDomains_to_H: + "unat Kernel_C.numDomains = Kernel_Config.numDomains" + by (clarsimp simp: Kernel_C.numDomains_def Kernel_Config.numDomains_def) + +lemma maxDom_to_H: + "ucast maxDom = maxDomain" + by (simp add: maxDomain_def Kernel_C.maxDom_def Kernel_Config.numDomains_def) + +lemma maxDom_sgt_0_maxDomain: + "0 0 < maxDomain" + unfolding Kernel_C.maxDom_def maxDomain_def Kernel_Config.numDomains_def + by clarsimp + +lemma num_domains_calculation: + "num_domains = numDomains" + unfolding num_domains_val by eval + +private lemma num_domains_card_explicit: + "num_domains = CARD(num_domains)" + by (simp add: num_domains_val) + +lemmas num_domains_index_updates = + index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, + simplified num_domains_calculation] + index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, + simplified num_domains_calculation] + +(* C ArrayGuards will throw these at us and there is no way to avoid a proof of being less than a + specific number expressed as a word, so we must introduce these. However, being explicit means + lack of discipline can lead to a violation. *) +lemma numDomains_less_numeric_explicit[simplified num_domains_val One_nat_def]: + "x < Kernel_Config.numDomains \ x < num_domains" + by (simp add: num_domains_calculation) + +lemma numDomains_less_unat_ucast_explicit[simplified num_domains_val]: + "unat x < Kernel_Config.numDomains \ (ucast (x::domain) :: machine_word) < of_nat num_domains" + apply (rule word_less_nat_alt[THEN iffD2]) + apply transfer + apply simp + apply (drule numDomains_less_numeric_explicit, simp add: num_domains_val) + done + +lemmas maxDomain_le_unat_ucast_explicit = + numDomains_less_unat_ucast_explicit[simplified le_maxDomain_eq_less_numDomains(2)[symmetric], + simplified] + +end (* numDomain abstraction definitions and lemmas *) + + +text \Priorities - not expected to be configurable\ + +lemma maxPrio_to_H: + "ucast seL4_MaxPrio = maxPriority" + by (simp add: maxPriority_def seL4_MaxPrio_def numPriorities_def) + + +text \TCB scheduling queues\ + +(* establish and sanity-check relationship between the calculation of the number of TCB queues and + the size of the array in C *) +value_type num_tcb_queues = "numDomains * numPriorities" + +lemma num_tcb_queues_calculation: + "num_tcb_queues = numDomains * numPriorities" + unfolding num_tcb_queues_val by eval + + +(* Input abbreviations for API object types *) +(* disambiguates names *) + +abbreviation(input) + NotificationObject :: sword32 +where + "NotificationObject == seL4_NotificationObject" + +abbreviation(input) + CapTableObject :: sword32 +where + "CapTableObject == seL4_CapTableObject" + +abbreviation(input) + EndpointObject :: sword32 +where + "EndpointObject == seL4_EndpointObject" + +abbreviation(input) + VSpaceObject :: sword32 +where + "VSpaceObject == seL4_ARM_VSpaceObject" + +abbreviation(input) + PageTableObject :: sword32 +where + "PageTableObject == seL4_ARM_PageTableObject" + +abbreviation(input) + SmallPageObject :: sword32 +where + "SmallPageObject == seL4_ARM_SmallPageObject" + +abbreviation(input) + LargePageObject :: sword32 +where + "LargePageObject == seL4_ARM_LargePageObject" + +abbreviation(input) + HugePageObject :: sword32 +where + "HugePageObject == seL4_ARM_HugePageObject" + +abbreviation(input) + VCPUObject :: sword32 +where + "VCPUObject == seL4_ARM_VCPUObject" + +abbreviation(input) + TCBObject :: sword32 +where + "TCBObject == seL4_TCBObject" + +abbreviation(input) + UntypedObject :: sword32 +where + "UntypedObject == seL4_UntypedObject" + +abbreviation(input) + maxPrio :: sword32 +where + "maxPrio == seL4_MaxPrio" + +abbreviation(input) + minPrio :: sword32 +where + "minPrio == seL4_MinPrio" + +abbreviation(input) + nAPIObjects :: sword32 +where + "nAPIObjects == seL4_NonArchObjectTypeCount" + +abbreviation(input) + nObjects :: sword32 +where + "nObjects == seL4_ObjectTypeCount" + +abbreviation(input) + prioInvalid :: sword32 +where + "prioInvalid == seL4_InvalidPrio" + +(* caches *) + +definition cacheLineSize :: nat where + "cacheLineSize \ 6" + +lemma addrFromPPtr_mask_cacheLineSize: + "addrFromPPtr ptr && mask cacheLineSize = ptr && mask cacheLineSize" + apply (simp add: addrFromPPtr_def AARCH64.pptrBase_def pptrBaseOffset_def canonical_bit_def + paddrBase_def cacheLineSize_def mask_def) + apply word_bitwise + done + +lemma pptrBaseOffset_cacheLineSize_aligned[simp]: + "pptrBaseOffset && mask cacheLineSize = 0" + by (simp add: pptrBaseOffset_def paddrBase_def pptrBase_def cacheLineSize_def mask_def) + +lemma ptrFromPAddr_mask_cacheLineSize[simp]: + "ptrFromPAddr v && mask cacheLineSize = v && mask cacheLineSize" + by (simp add: ptrFromPAddr_def add_mask_ignore) + +(* The magic 4 comes out of the bitfield generator -- this applies to all versions of the kernel. *) +lemma ThreadState_Restart_mask[simp]: + "(scast ThreadState_Restart::machine_word) && mask 4 = scast ThreadState_Restart" + by (simp add: ThreadState_Restart_def mask_def) + +lemma aligned_tcb_ctcb_not_NULL: + assumes "is_aligned p tcbBlockSizeBits" + shows "tcb_ptr_to_ctcb_ptr p \ NULL" +proof + assume "tcb_ptr_to_ctcb_ptr p = NULL" + hence "p + ctcb_offset = 0" + by (simp add: tcb_ptr_to_ctcb_ptr_def) + moreover + from `is_aligned p tcbBlockSizeBits` + have "p + ctcb_offset = p || ctcb_offset" + by (rule word_and_or_mask_aligned) (simp add: ctcb_offset_defs objBits_defs mask_def) + moreover + have "ctcb_offset !! ctcb_size_bits" + by (simp add: ctcb_offset_defs objBits_defs) + ultimately + show False + by (simp add: bang_eq) +qed + +lemma tcb_at_not_NULL: + "tcb_at' t s \ tcb_ptr_to_ctcb_ptr t \ NULL" + by (rule aligned_tcb_ctcb_not_NULL) (rule tcb_aligned') + +(* generic lemmas with arch-specific consequences *) + +schematic_goal size_gpRegisters: + "size AARCH64.gpRegisters = numeral ?x" + supply Suc_eq_numeral[simp del] One_nat_def[simp del] + by (simp add: upto_enum_def fromEnum_def enum_register + AARCH64.gpRegisters_def) + (simp add: Suc_eq_plus1) + +schematic_goal size_frameRegisters: + "size AARCH64.frameRegisters = numeral ?x" + supply Suc_eq_numeral[simp del] One_nat_def[simp del] + by (simp add: upto_enum_def fromEnum_def enum_register + AARCH64.frameRegisters_def) + (simp add: Suc_eq_plus1) + +(* Could live in Refine, but we want to make sure this is only used in CRefine. Before CRefine + the numeral value should never be stated explicitly. *) +schematic_goal maxPTLevel_val: + "maxPTLevel = numeral ?n" + by (simp add: maxPTLevel_def Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) + +end + +end diff --git a/proof/crefine/ARM/ADT_C.thy b/proof/crefine/ARM/ADT_C.thy index dd69714fb6..f41e08f478 100644 --- a/proof/crefine/ARM/ADT_C.thy +++ b/proof/crefine/ARM/ADT_C.thy @@ -75,8 +75,8 @@ lemma Basic_sem_eq: lemma setTCBContext_C_corres: "\ ccontext_relation tc tc'; t' = tcb_ptr_to_ctcb_ptr t \ \ - corres_underlying rf_sr nf nf' dc (pspace_domain_valid and tcb_at' t) \ - (threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb)\) t) (setTCBContext_C tc' t')" + corres_underlying rf_sr nf nf' dc (pspace_domain_valid and tcb_at' t) \ + (threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb)\) t) (setTCBContext_C tc' t')" apply (simp add: setTCBContext_C_def exec_C_def Basic_sem_eq corres_underlying_def) apply clarsimp apply (simp add: threadSet_def bind_assoc split_def exec_gets) @@ -84,7 +84,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption @@ -105,8 +105,6 @@ lemma setTCBContext_C_corres: apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def cvariable_relation_upd_const ko_at_projectKO_opt cteSizeBits_def) apply (simp add: cep_relations_drop_fun_upd) - apply (apply_conjunct \match conclusion in \cready_queues_relation _ _ _\ \ - \erule cready_queues_relation_not_queue_ptrs; rule ext; simp split: if_split\\) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def carch_tcb_relation_def) @@ -118,26 +116,24 @@ end definition "register_to_H \ inv register_from_H" +context state_rel begin + definition to_user_context_C :: "user_context \ user_context_C" where - "to_user_context_C uc \ user_context_C (FCP (\r. uc (register_to_H (of_nat r))))" - -context kernel_m begin - -lemma ccontext_rel_to_C: - "ccontext_relation uc (to_user_context_C uc)" - apply (clarsimp simp: ccontext_relation_def to_user_context_C_def) - apply (rule arg_cong [where f=uc]) - apply (simp add: register_to_H_def inv_def) - done - -end + "to_user_context_C uc \ + user_context_C (ARRAY r. user_regs uc (register_to_H (of_nat r)))" definition from_user_context_C :: "user_context_C \ user_context" where - "from_user_context_C uc \ \r. index (registers_C uc) (unat (register_from_H r))" + "from_user_context_C uc \ + UserContext (\r. (registers_C uc).[unat (register_from_H r)])" + +lemma (in kernel_m) ccontext_rel_to_C: + "ccontext_relation uc (to_user_context_C uc)" + unfolding ccontext_relation_def to_user_context_C_def cregs_relation_def + by (clarsimp simp: register_to_H_def inv_def) definition getContext_C :: "tcb_C ptr \ cstate \ user_context" @@ -147,7 +143,12 @@ where lemma from_user_context_C: "ccontext_relation uc uc' \ from_user_context_C uc' = uc" - by (auto simp: ccontext_relation_def from_user_context_C_def) + unfolding ccontext_relation_def cregs_relation_def + apply (cases uc) + apply (auto simp: from_user_context_C_def) + done + +end context kernel_m begin @@ -639,25 +640,50 @@ lemma tcb_queue_rel'_unique: apply (erule(2) tcb_queue_rel_unique) done -definition - cready_queues_to_H - :: "(tcb_C ptr \ tcb_C) \ (tcb_queue_C[num_tcb_queues]) \ word8 \ word8 \ word32 list" +definition tcb_queue_C_to_tcb_queue :: "tcb_queue_C \ tcb_queue" where + "tcb_queue_C_to_tcb_queue q \ + TcbQueue (if head_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (head_C q))) + (if end_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (end_C q)))" + +definition cready_queues_to_H :: + "tcb_queue_C[num_tcb_queues] \ (domain \ priority \ ready_queue)" where - "cready_queues_to_H h_tcb cs \ \(qdom, prio). if ucast minDom \ qdom \ qdom \ ucast maxDom - \ ucast seL4_MinPrio \ prio \ prio \ ucast seL4_MaxPrio - then THE aq. let cqueue = index cs (cready_queues_index_to_C qdom prio) - in sched_queue_relation' h_tcb aq (head_C cqueue) (StateRelation_C.end_C cqueue) - else []" + "cready_queues_to_H cs \ + \(qdom, prio). + if qdom \ maxDomain \ prio \ maxPriority + then let cqueue = index cs (cready_queues_index_to_C qdom prio) + in tcb_queue_C_to_tcb_queue cqueue + else TcbQueue None None" lemma cready_queues_to_H_correct: - "cready_queues_relation (clift s) cs as \ - cready_queues_to_H (clift s) cs = as" - apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def - fun_eq_iff) - apply (rule the_equality) - apply simp - apply (clarsimp simp: Let_def) - apply (rule_tac hp="clift s" in tcb_queue_rel'_unique, simp_all add: lift_t_NULL) + "\cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' ch); + no_0_obj' s; ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ cready_queues_to_H (ksReadyQueues_' ch) = ksReadyQueues s" + apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def Let_def) + apply (clarsimp simp: fun_eq_iff) + apply (rename_tac d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (rule conjI) + apply (clarsimp simp: tcb_queue_C_to_tcb_queue_def ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (case_tac "tcbQueueHead (ksReadyQueues s (d, p)) = None") + apply (clarsimp simp: tcb_queue.expand) + apply clarsimp + apply (rename_tac queue_head queue_end) + apply (prop_tac "tcb_at' queue_head s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (prop_tac "tcb_at' queue_end s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (drule kernel.tcb_at_not_NULL)+ + apply (fastforce simp: tcb_queue.expand kernel.ctcb_ptr_to_ctcb_ptr) + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits; + metis tcb_queue.exhaust_sel word_not_le) done (* showing that cpspace_relation is actually unique >>>*) @@ -688,9 +714,16 @@ lemma cpspace_cte_relation_unique: lemma inj_tcb_ptr_to_ctcb_ptr: "inj tcb_ptr_to_ctcb_ptr" by (simp add: inj_on_def tcb_ptr_to_ctcb_ptr_def) +lemma cregs_relation_imp_eq: + "cregs_relation f x \ cregs_relation g x \ f=g" + by (auto simp: cregs_relation_def) + lemma ccontext_relation_imp_eq: "ccontext_relation f x \ ccontext_relation g x \ f=g" - by (rule ext) (simp add: ccontext_relation_def) + unfolding ccontext_relation_def + apply (cases f, cases g) + apply (auto dest: cregs_relation_imp_eq) + done lemma carch_tcb_relation_imp_eq: "carch_tcb_relation f x \ carch_tcb_relation g x \ f = g" @@ -766,49 +799,93 @@ lemma cthread_state_rel_imp_eq: "cthread_state_relation x z \ cthread_state_relation y z \ x=y" apply (simp add: cthread_state_relation_def split_def) apply (cases x) - apply (cases y, simp_all add: ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnReply_def ThreadState_BlockedOnNotification_def - ThreadState_Running_def ThreadState_Inactive_def - ThreadState_IdleThreadState_def ThreadState_BlockedOnSend_def - ThreadState_Restart_def)+ + apply (cases y, simp_all add: ThreadState_defs)+ done -lemma ksPSpace_valid_objs_tcbBoundNotification_nonzero: - "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s - \ map_to_tcbs ah p = Some tcb \ tcbBoundNotification tcb \ Some 0" +lemma map_to_tcbs_Some_refs_nonzero: + "\map_to_tcbs (ksPSpace s) p = Some tcb; no_0_obj' s; valid_objs' s\ + \ tcbBoundNotification tcb \ Some 0 + \ tcbSchedPrev tcb \ Some 0 + \ tcbSchedNext tcb \ Some 0" + supply word_neq_0_conv[simp del] apply (clarsimp simp: map_comp_def split: option.splits) - apply (erule(1) valid_objsE') - apply (clarsimp simp: projectKOs valid_obj'_def valid_tcb'_def) + apply (erule (1) valid_objsE') + apply (fastforce simp: projectKOs valid_obj'_def valid_tcb'_def) done +lemma ccontext_relation_imp_eq2: + "\ccontext_relation (atcbContextGet t) x; ccontext_relation (atcbContextGet t') x\ \ t = t'" + by (fastforce simp: atcbContextGet_def arch_tcb.expand ccontext_relation_imp_eq) + +lemma tcb_ptr_to_ctcb_ptr_inj: + "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" + by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) + +lemma + assumes "pspace_aligned' as" "pspace_distinct' as" "valid_tcb' atcb as" + shows tcb_at'_tcbBoundNotification: + "bound (tcbBoundNotification atcb) \ ntfn_at' (the (tcbBoundNotification atcb)) as" + and tcb_at'_tcbSchedPrev: + "tcbSchedPrev atcb \ None \ tcb_at' (the (tcbSchedPrev atcb)) as" + and tcb_at'_tcbSchedNext: + "tcbSchedNext atcb \ None \ tcb_at' (the (tcbSchedNext atcb)) as" + using assms + by (clarsimp simp: valid_tcb'_def obj_at'_def)+ + lemma cpspace_tcb_relation_unique: - assumes tcbs: "cpspace_tcb_relation ah ch" "cpspace_tcb_relation ah' ch" - and vs: "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s" - and vs': "\s. ksPSpace s = ah' \ no_0_obj' s \ valid_objs' s" - assumes ctes: " \tcb tcb'. (\p. map_to_tcbs ah p = Some tcb \ - map_to_tcbs ah' p = Some tcb') \ - (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" - shows "map_to_tcbs ah' = map_to_tcbs ah" + assumes tcbs: "cpspace_tcb_relation (ksPSpace as) ch" "cpspace_tcb_relation (ksPSpace as') ch" + assumes vs: "no_0_obj' as" "valid_objs' as" + assumes vs': "no_0_obj' as'" "valid_objs' as'" + assumes ad: "pspace_aligned' as" "pspace_distinct' as" + assumes ad': "pspace_aligned' as'" "pspace_distinct' as'" + assumes ctes: "\tcb tcb'. (\p. map_to_tcbs (ksPSpace as) p = Some tcb \ + map_to_tcbs (ksPSpace as') p = Some tcb') \ + (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" + shows "map_to_tcbs (ksPSpace as') = map_to_tcbs (ksPSpace as)" using tcbs(2) tcbs(1) apply (clarsimp simp add: cmap_relation_def) apply (drule inj_image_inv[OF inj_tcb_ptr_to_ctcb_ptr])+ apply (simp add: tcb_ptr_to_ctcb_ptr_def[abs_def] ctcb_offset_def) apply (rule ext) - apply (case_tac "x:dom (map_to_tcbs ah)") + apply (case_tac "x \ dom (map_to_tcbs (ksPSpace as))") apply (drule bspec, assumption)+ apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) apply clarsimp apply (rename_tac p x y) apply (cut_tac ctes) apply (drule_tac x=x in spec, drule_tac x=y in spec, erule impE, fastforce) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs]) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs']) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs]) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs']) + apply (rename_tac atcb atcb') + apply (prop_tac "valid_tcb' atcb as") + apply (fastforce intro: vs ad map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (prop_tac "valid_tcb' atcb' as'") + apply (fastforce intro: vs' ad' map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (frule tcb_at'_tcbSchedPrev[OF ad]) + apply (frule tcb_at'_tcbSchedPrev[OF ad']) + apply (frule tcb_at'_tcbSchedNext[OF ad]) + apply (frule tcb_at'_tcbSchedNext[OF ad']) apply (thin_tac "map_to_tcbs x y = Some z" for x y z)+ - apply (case_tac x, case_tac y, case_tac "the (clift ch (tcb_Ptr (p+0x100)))") + apply (case_tac "the (clift ch (tcb_Ptr (p + 2 ^ ctcb_size_bits)))") apply (clarsimp simp: ctcb_relation_def ran_tcb_cte_cases) - apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.splits) - apply (auto simp: cfault_rel_imp_eq cthread_state_rel_imp_eq carch_tcb_relation_imp_eq - ccontext_relation_imp_eq up_ucast_inj_eq ctcb_size_bits_def) + apply (clarsimp simp: option_to_ctcb_ptr_def option_to_ptr_def option_to_0_def) + apply (rule tcb.expand) + apply clarsimp + apply (intro conjI) + apply (simp add: cthread_state_rel_imp_eq) + apply (simp add: cfault_rel_imp_eq) + apply (case_tac "tcbBoundNotification atcb'", case_tac "tcbBoundNotification atcb"; clarsimp) + apply (clarsimp split: option.splits) + apply (case_tac "tcbSchedPrev atcb'"; case_tac "tcbSchedPrev atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (case_tac "tcbSchedNext atcb'"; case_tac "tcbSchedNext atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (force simp: carch_tcb_relation_def ccontext_relation_imp_eq2) + apply auto done lemma tcb_queue_rel_clift_unique: @@ -839,10 +916,6 @@ lemma ksPSpace_valid_pspace_ntfnBoundTCB_nonzero: apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def) done -lemma tcb_ptr_to_ctcb_ptr_inj: - "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" - by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) - lemma cpspace_ntfn_relation_unique: assumes ntfns: "cpspace_ntfn_relation ah ch" "cpspace_ntfn_relation ah' ch" and vs: "\s. ksPSpace s = ah \ valid_pspace' s" @@ -1094,8 +1167,8 @@ proof - OF valid_objs'_imp_wf_asid_pool'[OF valid_objs] valid_objs'_imp_wf_asid_pool'[OF valid_objs']]) apply (drule (1) cpspace_tcb_relation_unique) - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') + apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs')+ + apply (fastforce intro: aligned distinct aligned' distinct')+ apply (intro allI impI,elim exE conjE) apply (rule_tac p=p in map_to_ctes_tcb_ctes, assumption) apply (frule (1) map_to_ko_atI[OF _ aligned distinct]) @@ -1303,7 +1376,7 @@ where ksDomSchedule = cDomSchedule_to_H kernel_all_global_addresses.ksDomSchedule, ksCurDomain = ucast (ksCurDomain_' s), ksDomainTime = ksDomainTime_' s, - ksReadyQueues = cready_queues_to_H (clift (t_hrs_' s)) (ksReadyQueues_' s), + ksReadyQueues = cready_queues_to_H (ksReadyQueues_' s), ksReadyQueuesL1Bitmap = cbitmap_L1_to_H (ksReadyQueuesL1Bitmap_' s), ksReadyQueuesL2Bitmap = cbitmap_L2_to_H (ksReadyQueuesL2Bitmap_' s), ksCurThread = ctcb_ptr_to_tcb_ptr (ksCurThread_' s), @@ -1319,16 +1392,16 @@ where lemma (in kernel_m) cstate_to_H_correct: assumes valid: "valid_state' as" assumes cstate_rel: "cstate_relation as cs" + assumes rdyqs: "ksReadyQueues_asrt as" shows "cstate_to_H cs = as \ksMachineState:= observable_memory (ksMachineState as) (user_mem' as)\" apply (subgoal_tac "cstate_to_machine_H cs = observable_memory (ksMachineState as) (user_mem' as)") apply (rule kernel_state.equality, simp_all add: cstate_to_H_def) - apply (rule cstate_to_pspace_H_correct) + apply (rule cstate_to_pspace_H_correct) using valid apply (simp add: valid_state'_def) using cstate_rel valid apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def - observable_memory_def valid_state'_def - valid_pspace'_def) + observable_memory_def valid_state'_def valid_pspace'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def prod_eq_iff) using cstate_rel @@ -1352,8 +1425,13 @@ lemma (in kernel_m) cstate_to_H_correct: using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) apply (rule cready_queues_to_H_correct) - using cstate_rel - apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel rdyqs + apply (fastforce intro!: cready_queues_to_H_correct + simp: cstate_relation_def Let_def) + using valid apply (fastforce simp: valid_state'_def) + using rdyqs apply fastforce + using valid apply (fastforce simp: valid_state'_def) + using valid apply (fastforce simp: valid_state'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) using cstate_rel diff --git a/proof/crefine/ARM/ArchMove_C.thy b/proof/crefine/ARM/ArchMove_C.thy index 6dcd98e180..dd325534e7 100644 --- a/proof/crefine/ARM/ArchMove_C.thy +++ b/proof/crefine/ARM/ArchMove_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * Copyright 2014, General Dynamics C4 Systems * @@ -65,7 +66,7 @@ lemma setCTE_asidpool': "\ ko_at' (ASIDPool pool) p \ setCTE c p' \\_. ko_at' (ASIDPool pool) p\" apply (clarsimp simp: setCTE_def) apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (clarsimp simp: obj_at'_def projectKOs) @@ -82,30 +83,17 @@ lemma setCTE_asidpool': lemma empty_fail_findPDForASID[iff]: "empty_fail (findPDForASID asid)" - apply (simp add: findPDForASID_def liftME_def) - apply (intro empty_fail_bindE, simp_all split: option.split) - apply (simp add: assertE_def split: if_split) - apply (simp add: assertE_def split: if_split) - apply (simp add: empty_fail_getObject) - apply (simp add: assertE_def liftE_bindE checkPDAt_def split: if_split) - done + unfolding findPDForASID_def checkPDAt_def + by (wpsimp wp: empty_fail_getObject) lemma empty_fail_findPDForASIDAssert[iff]: "empty_fail (findPDForASIDAssert asid)" - apply (simp add: findPDForASIDAssert_def catch_def - checkPDAt_def checkPDUniqueToASID_def - checkPDASIDMapMembership_def) - apply (intro empty_fail_bind, simp_all split: sum.split) - done + unfolding findPDForASIDAssert_def checkPDAt_def checkPDUniqueToASID_def checkPDASIDMapMembership_def + by (wpsimp wp: empty_fail_getObject) -crunches Arch.switchToThread - for valid_queues'[wp]: valid_queues' - (simp: crunch_simps ignore: clearExMonitor) crunches switchToIdleThread for ksCurDomain[wp]: "\s. P (ksCurDomain s)" -crunches switchToIdleThread, switchToThread - for valid_pspace'[wp]: valid_pspace' - (simp: crunch_simps) + crunches switchToThread for valid_arch_state'[wp]: valid_arch_state' @@ -209,7 +197,7 @@ lemma cap_case_isPageDirectoryCap: lemma empty_fail_loadWordUser[intro!, simp]: "empty_fail (loadWordUser x)" - by (simp add: loadWordUser_def ef_loadWord ef_dmo') + by (fastforce simp: loadWordUser_def ef_loadWord ef_dmo') lemma empty_fail_getMRs[iff]: "empty_fail (getMRs t buf mi)" @@ -219,30 +207,20 @@ lemma empty_fail_getReceiveSlots: "empty_fail (getReceiveSlots r rbuf)" proof - note - empty_fail_assertE[iff] - empty_fail_resolveAddressBits[iff] + empty_fail_resolveAddressBits[wp] + empty_fail_rethrowFailure[wp] + empty_fail_rethrowFailure[wp] show ?thesis - apply (clarsimp simp: getReceiveSlots_def loadCapTransfer_def split_def - split: option.split) - apply (rule empty_fail_bind) - apply (simp add: capTransferFromWords_def) - apply (simp add: emptyOnFailure_def unifyFailure_def) - apply (intro empty_fail_catch empty_fail_bindE empty_fail_rethrowFailure, - simp_all add: empty_fail_whenEs) - apply (simp_all add: lookupCap_def split_def lookupCapAndSlot_def - lookupSlotForThread_def liftME_def - getThreadCSpaceRoot_def locateSlot_conv bindE_assoc - lookupSlotForCNodeOp_def lookupErrorOnFailure_def - cong: if_cong) - apply (intro empty_fail_bindE, - simp_all add: getSlotCap_def) - apply (intro empty_fail_If empty_fail_bindE empty_fail_rethrowFailure impI, - simp_all add: empty_fail_whenEs rangeCheck_def) - done + unfolding getReceiveSlots_def loadCapTransfer_def lookupCap_def lookupCapAndSlot_def + by (wpsimp simp: emptyOnFailure_def unifyFailure_def lookupSlotForThread_def + capTransferFromWords_def getThreadCSpaceRoot_def locateSlot_conv bindE_assoc + lookupSlotForCNodeOp_def lookupErrorOnFailure_def rangeCheck_def) qed lemma user_getreg_rv: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb r)) t\ asUser t (getRegister r) \\rv s. P rv\" + "\obj_at' (\tcb. P ((user_regs \ atcbContextGet \ tcbArch) tcb r)) t\ + asUser t (getRegister r) + \\rv s. P rv\" apply (simp add: asUser_def split_def) apply (wp threadGet_wp) apply (clarsimp simp: obj_at'_def projectKOs getRegister_def in_monad atcbContextGet_def) @@ -255,6 +233,22 @@ crunches insertNewCap, Arch_createNewCaps, threadSet, Arch.createObject, setThre simp: unless_def updateObject_default_def crunch_simps ignore_del: preemptionPoint) +lemma addrFromPPtr_mask[simplified ARM.pageBitsForSize_simps]: + "n \ pageBitsForSize ARMSuperSection + \ addrFromPPtr ptr && mask n = ptr && mask n" + apply (simp add: addrFromPPtr_def) + apply (prop_tac "pptrBaseOffset AND mask n = 0") + apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) + apply (simp flip: mask_eqs(8)) + done + +(* this could be done as + lemmas addrFromPPtr_mask_5 = addrFromPPtr_mask[where n=5, simplified] + but that wouldn't give a sanity check of the n \ ... assumption disappearing *) +lemma addrFromPPtr_mask_5: + "addrFromPPtr ptr && mask 5 = ptr && mask 5" + by (rule addrFromPPtr_mask[where n=5, simplified]) + end end diff --git a/proof/crefine/ARM/Arch_C.thy b/proof/crefine/ARM/Arch_C.thy index b925d72fb7..e7f165b36f 100644 --- a/proof/crefine/ARM/Arch_C.thy +++ b/proof/crefine/ARM/Arch_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -49,11 +50,11 @@ lemma performPageTableInvocationUnmap_ccorres: apply (ctac add: unmapPageTable_ccorres) apply csymbr apply (simp add: storePTE_def swp_def) - apply (ctac add: clearMemory_PT_setObject_PTE_ccorres[unfolded dc_def]) + apply (ctac add: clearMemory_PT_setObject_PTE_ccorres) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPageTable_modifies) - apply (simp add: to_bool_def) + apply simp apply (rule ccorres_return_Skip') apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_page_table_cap cap_to_H_def @@ -396,7 +397,9 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rename_tac pcap) + apply (rule_tac P = "pcap = (capability.UntypedCap isdev frame pageBits idx)" + in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -531,10 +534,10 @@ shows pageBits_def split: if_split) apply (clarsimp simp: ARMSmallPageBits_def word_sle_def is_aligned_mask[symmetric] - ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + ghost_assertion_data_get_gs_clear_region) apply (subst ghost_assertion_size_logic_flex[unfolded o_def, rotated]) apply assumption - apply (simp add: ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + apply (simp add: ghost_assertion_data_get_gs_clear_region) apply (drule valid_global_refsD_with_objSize, clarsimp)+ apply (clarsimp simp: isCap_simps dest!: ccte_relation_ccap_relation) apply (cut_tac ptr=frame and bits=12 @@ -795,7 +798,7 @@ lemma decodeARMPageTableInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -837,7 +840,7 @@ lemma decodeARMPageTableInvocation_ccorres: slotcap_in_mem_def) apply (auto dest: ctes_of_valid')[1] apply (rule conjI) - apply (clarsimp simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" + apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size ct_in_state'_def st_tcb_at'_def word_sle_def word_sless_def @@ -863,7 +866,7 @@ lemma decodeARMPageTableInvocation_ccorres: apply (subst array_assertion_abs_pd, erule conjI, simp add: unat_eq_0 unat_shiftr_le_bound pdBits_def pageBits_def pdeBits_def) apply (clarsimp simp: rf_sr_ksCurThread mask_def[where n=4] - "StrictC'_thread_state_defs" + ThreadState_defs ccap_relation_def cap_to_H_def cap_lift_page_table_cap word_bw_assocs shiftr_shiftl1 mask_def[where n=18]) @@ -888,8 +891,8 @@ lemma checkVPAlignment_spec: apply (clarsimp simp: mask_eq_iff_w2p word_size) apply (rule conjI) apply (simp add: pageBitsForSize_def split: vmpage_size.split) - apply (simp add: from_bool_def vmsz_aligned'_def is_aligned_mask - mask_def split: if_split) + apply (simp add: vmsz_aligned'_def is_aligned_mask mask_def + split: if_split) done definition @@ -1031,9 +1034,9 @@ lemma createSafeMappingEntries_PDE_ccorres: apply (clarsimp simp: pde_get_tag_alt cpde_relation_pde_case pde_tag_defs fst_throwError_returnOk pde_range_relation_def ptr_range_to_list_def - exception_defs isRight_def from_bool_def[where b=True] + exception_defs isRight_def syscall_error_rel_def syscall_error_to_H_cases) - apply (clarsimp simp: cpde_relation_def true_def false_def) + apply (clarsimp simp: cpde_relation_def) apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) apply (rule ccorres_rhs_assoc)+ @@ -1073,7 +1076,7 @@ lemma createSafeMappingEntries_PDE_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: if_1_0_0 return_def typ_heap_simps Let_def) apply (simp add: isPageTablePDE_def isSectionPDE_def - cpde_relation_pde_case from_bool_def) + cpde_relation_pde_case) apply (intro impI conjI disjCI2, simp_all add: array_assertion_shrink_right)[1] apply (clarsimp simp: pde_tag_defs split: if_split bool.split) apply (frule pde_pde_section_size_0_1[simplified pde_tag_defs, simplified], simp) @@ -1132,8 +1135,7 @@ lemma createSafeMappingEntries_PDE_ccorres: vm_attribs_relation_def superSectionPDEOffsets_def pdeBits_def from_bool_mask_simp[unfolded mask_def, simplified] - ptr_range_to_list_def upto_enum_step_def - o_def upto_enum_word + ptr_range_to_list_def upto_enum_step_def upto_enum_word cong: if_cong) apply (frule(1) page_directory_at_rf_sr, clarsimp) apply (frule array_ptr_valid_array_assertionD[OF h_t_valid_clift]) @@ -1141,7 +1143,7 @@ lemma createSafeMappingEntries_PDE_ccorres: ARMSectionBits_def word_0_sle_from_less pageBits_def) apply (rule conjI) - apply (simp add: cpde_relation_def true_def false_def) + apply (simp add: cpde_relation_def) apply (simp add: upt_def split: if_split) done @@ -1186,11 +1188,9 @@ lemma lookupPTSlot_le_0x3C: apply clarsimp apply (simp add: word_bits_def) apply simp - apply (simp add: ARM.ptrFromPAddr_def pptrBaseOffset_def) - apply (erule aligned_add_aligned) - apply (simp add: pptrBase_def ARM.physBase_def - physBase_def is_aligned_def) - apply (simp add: word_bits_def pteBits_def) + apply (rule is_aligned_ptrFromPAddr_n[rotated], simp) + apply (erule is_aligned_weaken) + apply (simp add: pteBits_def) done lemma pte_get_tag_exhaust: @@ -1275,7 +1275,7 @@ lemma createSafeMappingEntries_PTE_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk syscall_error_to_H_cases - syscall_error_rel_def exception_defs false_def) + syscall_error_rel_def exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -1373,7 +1373,7 @@ lemma createSafeMappingEntries_PTE_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply (wp injection_wp[OF refl]) @@ -1392,7 +1392,7 @@ lemma createSafeMappingEntries_PTE_ccorres: from_bool_mask_simp[unfolded mask_def, simplified]) apply (clarsimp simp: typ_heap_simps pte_range_relation_def ptr_range_to_list_def upto_enum_word) - apply (simp add: cpte_relation_def true_def false_def pte_tag_defs) + apply (simp add: cpte_relation_def pte_tag_defs) using pte_get_tag_exhaust apply (auto simp: vmsz_aligned'_def largePagePTEOffsets_def pteBits_def upt_def upto_enum_def upto_enum_step_def)[1] @@ -1413,7 +1413,7 @@ definition lemma valid_pte_slots_lift2: "\ \pt. \ page_table_at' pt \ f \ \_. page_table_at' pt \ \ \ \ valid_pte_slots'2 slots \ f \ \_. valid_pte_slots'2 slots \" - apply (cases slots, simp_all add: valid_pte_slots'2_def hoare_post_taut) + apply (cases slots, simp_all add: valid_pte_slots'2_def hoare_TrueI) apply clarsimp apply (wp hoare_vcg_ex_lift hoare_vcg_conj_lift | assumption)+ done @@ -1428,19 +1428,11 @@ definition lemma valid_pde_slots_lift2: "\ \pd. \ page_directory_at' pd \ f \ \_. page_directory_at' pd \ \ \ \ valid_pde_slots'2 slots \ f \ \_. valid_pde_slots'2 slots \" - apply (cases slots, simp_all add: valid_pde_slots'2_def hoare_post_taut) + apply (cases slots, simp_all add: valid_pde_slots'2_def hoare_TrueI) apply clarsimp apply (wp hoare_vcg_ex_lift hoare_vcg_conj_lift | assumption)+ done -lemma addrFromPPtr_mask_5: - "addrFromPPtr ptr && mask (5::nat) = ptr && mask (5::nat)" - apply (simp add:addrFromPPtr_def pptrBaseOffset_def - pptrBase_def physBase_def ARM.physBase_def) - apply word_bitwise - apply (simp add:mask_def) - done - lemma pteCheckIfMapped_ccorres: "ccorres (\rv rv'. rv = to_bool rv') ret__unsigned_long_' \ (UNIV \ {s. pte___ptr_to_struct_pte_C_' s = Ptr slot}) [] @@ -1454,7 +1446,7 @@ lemma pteCheckIfMapped_ccorres: apply clarsimp apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps' return_def) - apply (case_tac rv, simp_all add: to_bool_def isInvalidPTE_def pte_tag_defs pte_pte_invalid_def + apply (case_tac rv, simp_all add: isInvalidPTE_def pte_tag_defs pte_pte_invalid_def cpte_relation_def pte_pte_large_lift_def pte_get_tag_def pte_lift_def Let_def split: if_split_asm) @@ -1476,13 +1468,13 @@ lemma pdeCheckIfMapped_ccorres: (Call pdeCheckIfMapped_'proc)" apply (cinit lift: pde___ptr_to_struct_pde_C_') apply (rule ccorres_pre_getObject_pde) - apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation rv pde'}" + apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation pd pde'}" in ccorres_from_vcg_throws[where P="\s. True"]) apply simp_all apply clarsimp apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps' return_def) - apply (case_tac rv, simp_all add: to_bool_def cpde_relation_invalid isInvalidPDE_def + apply (case_tac pd, simp_all add: cpde_relation_invalid isInvalidPDE_def split: if_split) done @@ -1566,7 +1558,7 @@ lemma performPageInvocationMapPTE_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) apply (rule wp_post_taut) - apply (simp add: to_bool_def) + apply simp apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) @@ -1803,7 +1795,7 @@ lemma performPageInvocationMapPDE_ccorres: apply (clarsimp simp: pde_range_relation_def ptr_range_to_list_def) apply vcg apply simp - apply (wp valid_pde_slots_lift2) + apply (wpsimp wp: valid_pde_slots_lift2) apply clarsimp apply (clarsimp simp: pde_range_relation_def ptr_range_to_list_def) apply (rule order_less_le_trans) @@ -1827,12 +1819,12 @@ lemma performPageInvocationMapPDE_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) apply wp [1] - apply (simp add: to_bool_def) + apply simp apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) apply (wp hoare_vcg_const_imp_lift) [1] - apply (clarsimp simp: to_bool_def) + apply clarsimp apply (rule hoare_strengthen_post) apply (rule_tac Q'="\rv s. valid_pde_mappings' s \ valid_pde_slots'2 mapping s @@ -1898,8 +1890,6 @@ lemma setMRs_single: (* usually when we call setMR directly, we mean to only set a single message register which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg = val\ @@ -1916,8 +1906,8 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: length_msgRegisters n_msgRegisters_def not_le conj_commute) apply (subst msgRegisters_ccorres[symmetric]) @@ -1926,7 +1916,7 @@ lemma setMR_as_setRegister_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) @@ -1952,8 +1942,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) @@ -1975,8 +1965,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -1988,40 +1978,35 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def Kernel_C.R1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg apply clarsimp apply (rule conseqPre, vcg) apply clarsimp - apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold seL4_MessageInfo_lift_def message_info_to_H_def mask_def) apply (cases isCall) apply (auto simp: ARM.badgeRegister_def ARM_H.badgeRegister_def Kernel_C.badgeRegister_def - Kernel_C.R0_def fromPAddr_def ThreadState_Running_def + Kernel_C.R0_def fromPAddr_def ThreadState_defs pred_tcb_at'_def obj_at'_def projectKOs ct_in_state'_def) done lemma vmsz_aligned_addrFromPPtr': - "vmsz_aligned' (addrFromPPtr p) sz - = vmsz_aligned' p sz" - apply (simp add: vmsz_aligned'_def addrFromPPtr_def - ARM.addrFromPPtr_def) - apply (subgoal_tac "is_aligned pptrBaseOffset (pageBitsForSize sz)") - apply (rule iffI) - apply (drule(1) aligned_add_aligned) - apply (simp add: pageBitsForSize_def word_bits_def split: vmpage_size.split) - apply simp - apply (erule(1) aligned_sub_aligned) - apply (simp add: pageBitsForSize_def word_bits_def split: vmpage_size.split) - apply (simp add: pageBitsForSize_def pptrBaseOffset_def pptrBase_def - physBase_def ARM.physBase_def is_aligned_def - split: vmpage_size.split) + "vmsz_aligned' (addrFromPPtr p) sz = vmsz_aligned' p sz" + apply (simp add: vmsz_aligned'_def) + apply (rule iffI) + apply (simp add: addrFromPPtr_def is_aligned_mask) + apply (prop_tac "pptrBaseOffset AND mask (pageBitsForSize sz) = 0") + apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) + apply (simp flip: mask_eqs(8)) + apply (erule is_aligned_addrFromPPtr_n) + apply (cases sz; clarsimp) done lemmas vmsz_aligned_addrFromPPtr @@ -2172,7 +2157,7 @@ lemma pte_get_tag_alt: by (auto simp add: pte_lift_def Let_def split: if_split_asm) definition - to_option :: "('a \ bool) \ 'a \ 'a option" + to_option :: "('a \ bool) \ 'a \ 'a option" (* FIXME: consider moving to Lib *) where "to_option f x \ if f x then Some x else None" @@ -2187,12 +2172,12 @@ where lemma resolve_ret_rel_None[simp]: "resolve_ret_rel None y = (valid_C y = scast false)" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def false_def to_bool_def split: if_splits) + by (clarsimp simp: resolve_ret_rel_def to_option_def to_bool_def split: if_splits) lemma resolve_ret_rel_Some: "\valid_C y = scast true; frameSize_C y = framesize_from_H (fst x); snd x = frameBase_C y\ \ resolve_ret_rel (Some x) y" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def true_def) + by (clarsimp simp: resolve_ret_rel_def to_option_def) lemma resolveVAddr_ccorres: "ccorres resolve_ret_rel ret__struct_resolve_ret_C_' @@ -2462,7 +2447,7 @@ lemma decodeARMFrameInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply (wp injection_wp[OF refl]) @@ -2619,7 +2604,7 @@ lemma decodeARMFrameInvocation_ccorres: apply csymbr apply (simp add: ARM.pptrBase_def hd_conv_nth length_ineq_not_Nil) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[unfolded id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* Doesn't throw case *) apply (drule_tac s="Some y" in sym, @@ -2635,7 +2620,7 @@ lemma decodeARMFrameInvocation_ccorres: apply (prop_tac "generic_frame_cap_get_capFMappedASID_CL (cap_lift cap) = capPDMappedASID_CL (cap_page_directory_cap_lift rv')") apply (clarsimp simp: cap_lift_page_directory_cap cap_to_H_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def elim!: ccap_relationE split: if_splits) apply (clarsimp, ccorres_rewrite) apply (csymbr, clarsimp simp: hd_conv_nth length_ineq_not_Nil, ccorres_rewrite) @@ -2646,7 +2631,6 @@ lemma decodeARMFrameInvocation_ccorres: simp add: ARM.pptrBase_def ARM.pptrBase_def hd_conv_nth length_ineq_not_Nil, ccorres_rewrite) - apply (fold dc_def) apply (rule ccorres_return_Skip, clarsimp) apply (subgoal_tac "cap_get_tag cap = SCAST(32 signed \ 32) cap_frame_cap \ cap_get_tag cap = SCAST(32 signed \ 32) cap_small_frame_cap", @@ -2667,7 +2651,7 @@ lemma decodeARMFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (clarsimp simp: cap_lift_page_directory_cap cap_to_H_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def elim!: ccap_relationE split: if_split) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) @@ -2744,7 +2728,7 @@ lemma decodeARMFrameInvocation_ccorres: apply (rule_tac P'="{s. find_ret = errstate s}" in ccorres_from_vcg_throws[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk exception_defs syscall_error_rel_def - syscall_error_to_H_cases false_def) + syscall_error_to_H_cases) apply (erule lookup_failure_rel_fault_lift[rotated], simp add: exception_defs) apply simp apply (wp injection_wp[OF refl] | wp (once) hoare_drop_imps)+ @@ -2804,7 +2788,7 @@ lemma decodeARMFrameInvocation_ccorres: done (* C side *) - apply (clarsimp simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" mask_eq_iff_w2p + apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size word_less_nat_alt from_bool_0 excaps_map_def cte_wp_at_ctes_of) apply (frule ctes_of_valid', clarsimp) apply (drule_tac t="cteCap ctea" in sym) @@ -3041,11 +3025,10 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (simp add:if_to_top_of_bind if_to_top_of_bindE) apply (rule ccorres_if_cond_throws[rotated -1,where Q=\ and Q'=\]) apply vcg - apply (clarsimp dest!:cap_lift_page_directory_cap - simp : cap_page_directory_cap_lift_def - cap_to_H_def to_bool_def Let_def - elim!: ccap_relationE - split: cap_CL.splits if_splits) + apply (clarsimp dest!: cap_lift_page_directory_cap + simp : cap_page_directory_cap_lift_def cap_to_H_def to_bool_def Let_def + elim!: ccap_relationE + split: cap_CL.splits if_splits) apply (simp add:injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add:syscall_error_to_H_cases) @@ -3081,16 +3064,17 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply simp apply simp apply ceqv - apply (simp add:injection_handler_If - injection_handler_returnOk if_to_top_of_bind if_to_top_of_bindE) + apply (simp add: injection_handler_If injection_handler_returnOk if_to_top_of_bind + if_to_top_of_bindE) apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) apply vcg - apply (clarsimp simp:resolve_ret_rel_def to_bool_def to_option_def - rel_option_alt_def not_le split:option.splits if_splits) - apply (simp add:invocationCatch_def ARM_H.performInvocation_def - performInvocation_def performARMMMUInvocation_def) - apply (simp add:performPageDirectoryInvocation_def - liftE_case_sum liftE_bindE liftE_alternative) + apply (clarsimp simp: resolve_ret_rel_def to_bool_def to_option_def + rel_option_alt_def not_le + split: option.splits if_splits) + apply (simp add: invocationCatch_def ARM_H.performInvocation_def + performInvocation_def performARMMMUInvocation_def) + apply (simp add: performPageDirectoryInvocation_def + liftE_case_sum liftE_bindE liftE_alternative) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_alternative2) apply (simp add:returnOk_liftE[symmetric]) @@ -3115,7 +3099,7 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def exception_defs - syscall_error_to_H_cases false_def) + syscall_error_to_H_cases) apply (clarsimp simp: page_base_def resolve_ret_rel_def rel_option_alt_def to_option_def mask_def[unfolded shiftl_1,symmetric] split: option.splits if_splits) @@ -3150,8 +3134,8 @@ lemma decodeARMPageDirectoryInvocation_ccorres: st' \ Structures_H.thread_state.Inactive \ st' \ Structures_H.thread_state.IdleThreadState) thread and (\s. thread \ ksIdleThread s \ (obj_at' tcbQueued thread s \ st_tcb_at' runnable' thread s))"]]) - apply (clarsimp simp: invs_valid_objs' invs_sch_act_wf' - valid_tcb_state'_def invs_queues) + apply (clarsimp simp: invs_valid_objs' invs_sch_act_wf' valid_tcb_state'_def + invs_pspace_aligned' invs_pspace_distinct') apply (rule conjI) apply (erule flush_range_le) apply (simp add:linorder_not_le) @@ -3171,7 +3155,7 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -3221,67 +3205,65 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (case_tac st,simp+) apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap typ_heap_simps' - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap typ_heap_simps' + cap_to_H_def cap_page_directory_cap_lift_def + cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - apply (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ + apply (clarsimp simp: ThreadState_defs less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def to_bool_def typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less,simp add: pbfs_less)+ apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap + cap_to_H_def cap_page_directory_cap_lift_def + cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - apply (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def - typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ + apply (clarsimp simp: less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def to_bool_def + typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv, simp add:isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less, simp add: pbfs_less)+ apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap + cap_to_H_def cap_page_directory_cap_lift_def + to_bool_def cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - apply (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def - typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ (* slow 20 secs *) + apply (clarsimp simp: ThreadState_defs less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv, simp add:isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less, simp add: pbfs_less)+ (* slow 20 secs *) apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap + cap_to_H_def cap_page_directory_cap_lift_def + to_bool_def cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - by (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def - typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ + by (clarsimp simp: less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv, simp add:isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less, simp add: pbfs_less)+ lemma Arch_decodeInvocation_ccorres: "interpret_excaps extraCaps' = excaps_map extraCaps @@ -3433,14 +3415,13 @@ lemma Arch_decodeInvocation_ccorres: apply (cut_tac P="\y. y < i_' x + 1 = rhs y" for rhs in allI, rule less_x_plus_1) apply (clarsimp simp: asid_high_bits_def) - apply (clarsimp simp: rf_sr_armKSASIDTable from_bool_def + apply (clarsimp simp: rf_sr_armKSASIDTable asid_high_bits_word_bits option_to_ptr_def option_to_0_def order_less_imp_le linorder_not_less order_antisym[OF inc_le]) - apply (clarsimp simp: true_def false_def - split: option.split if_split) + apply (clarsimp split: option.split if_split) apply (simp add: asid_high_bits_def word_le_nat_alt word_less_nat_alt unat_add_lem[THEN iffD1]) apply auto[1] @@ -3460,7 +3441,6 @@ lemma Arch_decodeInvocation_ccorres: word_sless_def if_1_0_0 from_bool_0 rf_sr_armKSASIDTable[where n=0, simplified]) apply (simp add: asid_high_bits_def option_to_ptr_def option_to_0_def - from_bool_def split: option.split if_split) apply fastforce apply ceqv @@ -3573,8 +3553,7 @@ lemma Arch_decodeInvocation_ccorres: del: Collect_const) apply (simp add: if_1_0_0 from_bool_0 hd_conv_nth length_ineq_not_Nil del: Collect_const) - apply (clarsimp simp: eq_Nil_null[symmetric] asid_high_bits_word_bits hd_conv_nth - ThreadState_Restart_def mask_def) + apply (clarsimp simp: eq_Nil_null[symmetric] asid_high_bits_word_bits hd_conv_nth mask_def) apply wp+ apply (simp add: cap_get_tag_isCap) apply (rule HoarePartial.SeqSwap) @@ -3695,7 +3674,7 @@ lemma Arch_decodeInvocation_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def exception_defs - syscall_error_to_H_cases false_def) + syscall_error_to_H_cases) apply (simp add: lookup_fault_lift_invalid_root) apply csymbr apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) @@ -3744,9 +3723,7 @@ lemma Arch_decodeInvocation_ccorres: = capASIDBase cp") apply (subgoal_tac "\x. (x < (i_' xb + 1)) = (x < i_' xb \ x = i_' xb)") - apply (clarsimp simp: inc_le from_bool_def typ_heap_simps - asid_low_bits_def not_less field_simps - false_def + apply (clarsimp simp: inc_le typ_heap_simps asid_low_bits_def not_less field_simps split: if_split bool.splits) apply unat_arith apply (rule iffI) @@ -3797,11 +3774,10 @@ lemma Arch_decodeInvocation_ccorres: word_sless_def word_sle_def) apply (erule cmap_relationE1[OF rf_sr_cpspace_asidpool_relation], erule ko_at_projectKO_opt) - apply (clarsimp simp: typ_heap_simps from_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps split: if_split) apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_asid_pool_cap cap_to_H_def - cap_asid_pool_cap_lift_def false_def - ucast_minus ucast_nat_def + cap_asid_pool_cap_lift_def ucast_minus ucast_nat_def elim!: ccap_relationE) apply ceqv apply (rule ccorres_Guard_Seq)+ @@ -3885,9 +3861,12 @@ lemma Arch_decodeInvocation_ccorres: apply (clarsimp simp: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of invs_sch_act_wf' dest!: isCapDs(1)) apply (intro conjI) - apply (simp add: Invariants_H.invs_queues) - apply (simp add: valid_tcb_state'_def) - apply (fastforce elim!: pred_tcb'_weakenE dest!:st_tcb_at_idle_thread') + apply (simp add: valid_tcb_state'_def) + apply (fastforce elim!: pred_tcb'_weakenE dest!:st_tcb_at_idle_thread') + apply fastforce + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) + apply (rename_tac obj) + apply (case_tac "tcbState obj", (simp add: runnable'_def)+)[1] apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (rename_tac obj) apply (case_tac "tcbState obj", (simp add: runnable'_def)+)[1] @@ -3924,13 +3903,11 @@ lemma Arch_decodeInvocation_ccorres: apply (auto simp: ct_in_state'_def valid_tcb_state'_def dest!: st_tcb_at_idle_thread' elim!: pred_tcb'_weakenE)[1] - apply (clarsimp simp: if_1_0_0 cte_wp_at_ctes_of asidHighBits_handy_convs + apply (clarsimp simp: cte_wp_at_ctes_of asidHighBits_handy_convs word_sle_def word_sless_def asidLowBits_handy_convs - rf_sr_ksCurThread "StrictC'_thread_state_defs" - mask_def[where n=4] + rf_sr_ksCurThread ThreadState_defs mask_def[where n=4] cong: if_cong) - apply (clarsimp simp: if_1_0_0 to_bool_def ccap_relation_isDeviceCap2 - objBits_simps archObjSize_def pageBits_def from_bool_def case_bool_If) + apply (clarsimp simp: ccap_relation_isDeviceCap2 objBits_simps archObjSize_def pageBits_def) apply (rule conjI) (* Is Asid Control Cap *) apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def excaps_map_def) @@ -3940,11 +3917,10 @@ lemma Arch_decodeInvocation_ccorres: ccap_rights_relation_def rightsFromWord_wordFromRights) apply (clarsimp simp: asid_high_bits_word_bits split: list.split_asm) apply (clarsimp simp: cap_untyped_cap_lift_def cap_lift_untyped_cap - cap_to_H_def[split_simps cap_CL.split] - hd_conv_nth length_ineq_not_Nil - elim!: ccap_relationE) - apply (clarsimp simp: if_1_0_0 to_bool_def unat_eq_of_nat - objBits_simps archObjSize_def pageBits_def from_bool_def case_bool_If + cap_to_H_def[split_simps cap_CL.split] + hd_conv_nth length_ineq_not_Nil + elim!: ccap_relationE) + apply (clarsimp simp: to_bool_def unat_eq_of_nat objBits_simps archObjSize_def pageBits_def split: if_splits) apply (clarsimp simp: asid_low_bits_word_bits isCap_simps neq_Nil_conv excaps_map_def excaps_in_mem_def @@ -3962,10 +3938,10 @@ lemma Arch_decodeInvocation_ccorres: elim!: ccap_relationE split: if_split_asm) apply (clarsimp split: list.split) apply (clarsimp simp: cap_lift_asid_pool_cap cap_lift_page_directory_cap - cap_to_H_def to_bool_def - cap_page_directory_cap_lift_def + cap_to_H_def cap_page_directory_cap_lift_def to_bool_def elim!: ccap_relationE split: if_split_asm) done + end end diff --git a/proof/crefine/ARM/BuildRefineCache_C.thy b/proof/crefine/ARM/BuildRefineCache_C.thy deleted file mode 100644 index fb44f0481e..0000000000 --- a/proof/crefine/ARM/BuildRefineCache_C.thy +++ /dev/null @@ -1,39 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory BuildRefineCache_C (* FIXME: broken *) -imports Main -begin - -ML \ - -(* needed to generate a proof cache *) -proofs := 1; -DupSkip.record_proofs := true; - -tracing "Building crefinement image using Refine_C"; - -time_use_thy "Refine_C"; - -\ - -ML \ - -tracing "Synching proof cache"; - -DupSkip.sync_cache @{theory Refine_C}; - -tracing "Dumping proof cache"; - -let - val xml = XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache); -in - File.open_output (XML_Syntax.output_forest xml) (Path.basic "proof_cache.xml") -end; - -\ - -end; diff --git a/proof/crefine/ARM/CACHE.ML b/proof/crefine/ARM/CACHE.ML deleted file mode 100644 index 2c551dadbd..0000000000 --- a/proof/crefine/ARM/CACHE.ML +++ /dev/null @@ -1,8 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -with_quick_and_dirty_use_thy "BuildRefineCache_C"; - diff --git a/proof/crefine/ARM/CLevityCatch.thy b/proof/crefine/ARM/CLevityCatch.thy index cb74756fcc..fbff4b2e49 100644 --- a/proof/crefine/ARM/CLevityCatch.thy +++ b/proof/crefine/ARM/CLevityCatch.thy @@ -8,8 +8,9 @@ theory CLevityCatch imports "CBaseRefine.Include_C" ArchMove_C - "CLib.LemmaBucket_C" + "CParser.LemmaBucket_C" "Lib.LemmaBucket" + Boolean_C begin context begin interpretation Arch . (*FIXME: arch_split*) @@ -61,12 +62,12 @@ declare empty_fail_doMachineOp [simp] lemma asUser_get_registers: "\tcb_at' target\ asUser target (mapM getRegister xs) - \\rv s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) xs = rv) target s\" + \\rv s. obj_at' (\tcb. map ((user_regs \ atcbContextGet \ tcbArch) tcb) xs = rv) target s\" apply (induct xs) apply (simp add: mapM_empty asUser_return) apply wp apply simp - apply (simp add: mapM_Cons asUser_bind_distrib asUser_return) + apply (simp add: mapM_Cons asUser_bind_distrib asUser_return empty_fail_cond) apply wp apply simp apply (rule hoare_strengthen_post) diff --git a/proof/crefine/ARM/CSpace_All.thy b/proof/crefine/ARM/CSpace_All.thy index 966310106e..ab54a3a670 100644 --- a/proof/crefine/ARM/CSpace_All.thy +++ b/proof/crefine/ARM/CSpace_All.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -24,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf @@ -248,8 +249,7 @@ lemma lookupSlotForCNodeOp_ccorres': apply vcg \ \last subgoal\ - apply (clarsimp simp: if_1_0_0 to_bool_def true_def word_size - fromIntegral_def integral_inv) + apply (clarsimp simp: word_size fromIntegral_def integral_inv) apply (case_tac "cap_get_tag root = scast cap_cnode_cap") prefer 2 apply clarsimp apply (clarsimp simp: unat_of_nat32 word_sle_def) @@ -285,7 +285,7 @@ lemma lookupSourceSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupSourceSlot_ccorres: @@ -315,7 +315,7 @@ lemma lookupTargetSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupTargetSlot_ccorres: @@ -345,7 +345,7 @@ lemma lookupPivotSlot_ccorres: apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres) - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done end diff --git a/proof/crefine/ARM/CSpace_C.thy b/proof/crefine/ARM/CSpace_C.thy index 91d7df7c0a..607f475755 100644 --- a/proof/crefine/ARM/CSpace_C.thy +++ b/proof/crefine/ARM/CSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -60,7 +61,7 @@ lemma maskVMRights_spec: apply clarsimp apply (rule conjI) apply ((auto simp: vmrights_to_H_def maskVMRights_def vmrights_defs - cap_rights_to_H_def to_bool_def + cap_rights_to_H_def split: bool.split | simp add: mask_def | word_bitwise)+)[1] @@ -152,10 +153,6 @@ lemma Arch_maskCapRights_ccorres [corres]: apply (cases arch_cap) by (fastforce simp add: cap_get_tag_isCap isCap_simps simp del: not_ex simp_thms(44))+ -lemma to_bool_mask_to_bool_bf: - "to_bool (x && 1) = to_bool_bf (x::word32)" - by (simp add: to_bool_bf_def to_bool_def) - lemma to_bool_cap_rights_bf: "to_bool (capAllowRead_CL (seL4_CapRights_lift R)) = to_bool_bf (capAllowRead_CL (seL4_CapRights_lift R))" @@ -216,7 +213,7 @@ lemma maskCapRights_ccorres [corres]: apply csymbr apply (simp add: maskCapRights_cap_cases cap_get_tag_isCap del: Collect_const) apply wpc - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -226,7 +223,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -235,7 +232,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -261,7 +258,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ntfn_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -269,7 +266,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -297,7 +294,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ep_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -306,7 +303,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -314,7 +311,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply (subst bind_return [symmetric]) apply (rule ccorres_split_throws) apply ctac @@ -327,7 +324,7 @@ lemma maskCapRights_ccorres [corres]: apply wp apply vcg apply vcg - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply ccorres_rewrite @@ -347,7 +344,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_reply_cap_bf to_bool_mask_to_bool_bf[simplified] to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -356,7 +353,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -366,7 +363,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -521,9 +518,9 @@ lemma Arch_isCapRevocable_spec: {t. \c c'. ccap_relation c (derivedCap_' s) \ ccap_relation c' (srcCap_' s) \ ret__unsigned_long_' t = from_bool (Arch.isCapRevocable c c')}" apply vcg - by (auto simp: false_def from_bool_def) + by auto -method revokable'_hammer = solves \(simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def, +method revokable'_hammer = solves \(simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs, rule ccorres_guard_imp, rule ccorres_return_C; clarsimp)\ lemma revokable_ccorres: @@ -550,7 +547,7 @@ lemma revokable_ccorres: \ \Uninteresting caps\ apply revokable'_hammer+ \ \NotificationCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_NotificationCap[THEN iffD1]) @@ -559,12 +556,12 @@ lemma revokable_ccorres: apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \IRQHandlerCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \EndpointCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_EndpointCap[THEN iffD1]) @@ -722,50 +719,6 @@ definition then (if cap_get_tag srcCap = scast cap_irq_control_cap then 1 else 0) else if (cap_get_tag newCap = scast cap_untyped_cap) then 1 else 0)" -lemma cteInsert_if_helper: - assumes cgt: "rv = cap_get_tag newCap" - and rul: "\s g. (s \ Q) = (s\ ret__unsigned_' := undefined, - unsigned_eret_2_':= undefined \ \ Q')" - shows "\ \\<^bsub>/UNIV\<^esub> {s. (cap_get_tag srcCap = cap_get_tag newCap - \ is_simple_cap_tag (cap_get_tag newCap)) \ - (s\newCapIsRevocable_' := cteInsert_newCapIsRevocable_if newCap srcCap\ \ Q)} - (IF rv = scast cap_endpoint_cap THEN - \ret__unsigned :== CALL cap_endpoint_cap_get_capEPBadge(newCap);; - \unsigned_eret_2 :== CALL cap_endpoint_cap_get_capEPBadge(srcCap);; - \newCapIsRevocable :== (if \ret__unsigned \ \unsigned_eret_2 then 1 else 0) - ELSE - IF rv = scast cap_notification_cap THEN - \ret__unsigned :== CALL cap_notification_cap_get_capNtfnBadge(newCap);; - \unsigned_eret_2 :== CALL cap_notification_cap_get_capNtfnBadge(srcCap);; - \newCapIsRevocable :== (if \ret__unsigned \ \unsigned_eret_2 then 1 else 0) - ELSE - IF rv = scast cap_irq_handler_cap THEN - \ret__unsigned :== CALL cap_get_capType(srcCap);; - \newCapIsRevocable :== (if \ret__unsigned = scast cap_irq_control_cap then 1 else 0) - ELSE - IF rv = scast cap_untyped_cap THEN - \newCapIsRevocable :== scast true - ELSE - \newCapIsRevocable :== scast false - FI - FI - FI - FI) Q" - unfolding cteInsert_newCapIsRevocable_if_def - apply (unfold cgt) - apply (rule conseqPre) - apply vcg - apply (clarsimp simp: true_def false_def - is_simple_cap_tag_def - cong: if_cong) - apply (simp add: cap_tag_defs) - apply (intro allI conjI impI) - apply (clarsimp simp: rul)+ - done - -lemma forget_Q': - "(x \ Q) = (y \ Q) \ (x \ Q) = (y \ Q)" . - (* Useful: apply (tactic {* let val _ = reset CtacImpl.trace_ceqv; val _ = reset CtacImpl.trace_ctac in all_tac end; *}) *) @@ -817,7 +770,7 @@ lemma update_freeIndex': supply if_cong[cong] apply (cinit lift: cap_ptr_' v32_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -939,7 +892,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -965,14 +918,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1078,19 +1031,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -1261,11 +1212,9 @@ lemma cteMove_ccorres: apply (intro conjI, simp+) apply (erule (2) is_aligned_3_prev) apply (erule (2) is_aligned_3_next) - apply (clarsimp simp: dc_def split del: if_split) + apply (clarsimp split del: if_split) apply (simp add: ccap_relation_NullCap_iff) - apply (clarsimp simp add: cmdbnode_relation_def - mdb_node_to_H_def nullMDBNode_def - false_def to_bool_def) + apply (clarsimp simp: cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) done (************************************************************************) @@ -1613,8 +1562,8 @@ lemma emptySlot_helper: mdbFirstBadged_CL (cteMDBNode_CL y)") prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) - subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - subgoal by (simp add: to_bool_def mask_def) + subgoal by (simp add: mdb_node_lift_def word_bw_assocs) + subgoal by (simp add: to_bool_def) \ \\ \x\fst \\ apply clarsimp apply (rule fst_setCTE [OF ctes_of_cte_at], assumption ) @@ -1644,7 +1593,7 @@ lemma emptySlot_helper: prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - apply (simp add: to_bool_def mask_def split: if_split) + apply (simp add: to_bool_def split: if_split) \ \trivial case where mdbNext rva = 0\ apply (simp add:ccorres_cond_empty_iff) @@ -1743,7 +1692,6 @@ lemma setIRQState_ccorres: apply (simp add: empty_fail_def getInterruptState_def simpler_gets_def) apply clarsimp - apply (simp add: from_bool_def) apply (cases irqState, simp_all) apply (simp add: Kernel_C.IRQSignal_def Kernel_C.IRQInactive_def) apply (simp add: Kernel_C.IRQTimer_def Kernel_C.IRQInactive_def) @@ -2006,7 +1954,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -2017,9 +1964,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2068,7 +2015,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2108,12 +2055,11 @@ lemma emptySlot_ccorres: apply csymbr apply (rule ccorres_move_c_guard_cte) \ \--- instruction y \ updateMDB slot (\a. nullMDBNode);\ - apply (ctac (no_vcg) - add: ccorres_updateMDB_const [unfolded const_def]) + apply (ctac (no_vcg) add: ccorres_updateMDB_const) \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2123,7 +2069,7 @@ lemma emptySlot_ccorres: \ \Haskell pre/post for y \ updateCap slot capability.NullCap;\ apply wp \ \C pre/post for y \ updateCap slot capability.NullCap;\ - apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def false_def) + apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) \ \Haskell pre/post for the two nested updates\ apply wp \ \C pre/post for the two nested updates\ @@ -2185,8 +2131,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2195,7 +2141,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ @@ -2289,8 +2235,8 @@ lemma Arch_sameRegionAs_spec: apply (cases capa; simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps) \ \capa is ASIDPoolCap\ - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is also ASIDPoolCap\ apply (frule cap_get_tag_isCap_unfolded_H_cap(13)[where cap'=cap_a]) apply (frule cap_get_tag_isCap_unfolded_H_cap(13)[where cap'=cap_b]) @@ -2312,8 +2258,8 @@ lemma Arch_sameRegionAs_spec: done \ \capa is ASIDControlCap\ - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is PageCap\ subgoal for \ vmpage_size option apply (case_tac "vmpage_size=ARMSmallPage") @@ -2328,8 +2274,8 @@ lemma Arch_sameRegionAs_spec: apply (cases "vmpage_size=ARMSmallPage") \ \capa is a small frame\ apply (frule cap_get_tag_isCap_unfolded_H_cap(16)[where cap' = cap_a], assumption) - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs from_bool_def) \ \capb is PageCap\ subgoal for \ vmpage_sizea optiona @@ -2404,8 +2350,8 @@ lemma Arch_sameRegionAs_spec: apply (simp add: cap_frame_cap_lift) apply (simp add: c_valid_cap_def cl_valid_cap_def) - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs from_bool_def) \ \capb is PageCap\ subgoal for \ vmpage_sizea optiona @@ -2482,8 +2428,8 @@ lemma Arch_sameRegionAs_spec: done \ \capa is PageTableCap\ - apply (cases capb; simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is PageCap\ subgoal for \ vmpage_size option apply (cases "vmpage_size=ARMSmallPage") @@ -2503,8 +2449,8 @@ lemma Arch_sameRegionAs_spec: capPTBasePtr_CL (cap_page_table_cap_lift cap_b)"; simp) \ \capa is PageDirectoryCap\ - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is PageCap\ subgoal for \ vmpage_size option apply (cases "vmpage_size=ARMSmallPage") @@ -2848,8 +2794,7 @@ lemma cap_get_capIsPhysical_spec: cap_lift_asid_control_cap word_sle_def cap_lift_irq_control_cap cap_lift_null_cap mask_def objBits_simps cap_lift_domain_cap - ptr_add_assertion_positive from_bool_def - true_def false_def + ptr_add_assertion_positive dest!: sym [where t = "cap_get_tag cap" for cap] split: vmpage_size.splits)+ (* XXX: slow. there should be a rule for this *) @@ -2943,22 +2888,23 @@ lemma sameRegionAs_spec: apply (simp add: sameRegionAs_def isArchCap_tag_def2) apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps) \ \capa is a ThreadCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(1)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(1)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_thread_cap_lift) apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split) apply (clarsimp simp: case_bool_If ctcb_ptr_to_tcb_ptr_def if_distrib cong: if_cong) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a NullCap\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an NotificationCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(3)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(3)) apply (simp add: ccap_relation_def map_option_case) @@ -2968,15 +2914,15 @@ lemma sameRegionAs_spec: apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an IRQHandlerCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(5)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(5)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_irq_handler_cap_lift) apply (simp add: cap_to_H_def) apply (clarsimp simp: up_ucast_inj_eq c_valid_cap_def - cl_valid_cap_def mask_twice + cl_valid_cap_def mask_twice from_bool_0 split: if_split bool.split | intro impI conjI | simp )+ @@ -2986,34 +2932,34 @@ lemma sameRegionAs_spec: apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an EndpointCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(4)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(4)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_endpoint_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a DomainCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) \ \capa is a Zombie\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an Arch object cap\ apply (frule_tac cap'=cap_a in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) apply (rule conjI, clarsimp, rule impI)+ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] \ \capb is an Arch object cap\ apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) \ \capa is a ReplyCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(8)) @@ -3021,7 +2967,7 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_reply_cap_lift) apply (simp add: cap_to_H_def ctcb_ptr_to_tcb_ptr_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) \ \capa is an UntypedCap\ apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(9)) apply (intro conjI) @@ -3029,8 +2975,7 @@ lemma sameRegionAs_spec: apply (rule impI, drule(1) cap_get_tag_to_H)+ apply (clarsimp simp: capAligned_def word_bits_conv objBits_simps' get_capZombieBits_CL_def - Let_def word_less_nat_alt - less_mask_eq true_def + Let_def word_less_nat_alt less_mask_eq split: if_split_asm) apply (subgoal_tac "capBlockSize_CL (cap_untyped_cap_lift cap_a) \ 0x1F") apply (simp add: word_le_make_less) @@ -3049,10 +2994,9 @@ lemma sameRegionAs_spec: cap_untyped_cap_lift cap_to_H_def field_simps valid_cap'_def)+)[4] apply (rule impI, simp add: from_bool_0 ccap_relation_get_capIsPhysical[symmetric]) - apply (simp add: from_bool_def false_def) \ \capa is a CNodeCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(10)) @@ -3060,10 +3004,9 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_cnode_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split bool.split) + apply (clarsimp simp: from_bool_0 split: if_split bool.split) \ \capa is an IRQControlCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) done @@ -3104,23 +3047,21 @@ lemma Arch_sameObjectAs_spec: simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs)[1] apply (rename_tac vmpage_sizea optiona) apply (case_tac "vmpage_sizea = ARMSmallPage", - simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - false_def from_bool_def)[1] + simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(16), simp) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(16), simp) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_small_frame_cap_lift) - apply (clarsimp simp: cap_to_H_def capAligned_def to_bool_def from_bool_def + apply (clarsimp simp: cap_to_H_def capAligned_def to_bool_def split: if_split bool.split dest!: is_aligned_no_overflow) apply (case_tac "vmpage_sizea = ARMSmallPage", - simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - false_def from_bool_def)[1] + simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(17), simp) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(17), simp) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_frame_cap_lift) - apply (clarsimp simp: cap_to_H_def capAligned_def from_bool_def + apply (clarsimp simp: cap_to_H_def capAligned_def c_valid_cap_def cl_valid_cap_def Kernel_C.ARMSmallPage_def split: if_split bool.split vmpage_size.split_asm @@ -3139,8 +3080,7 @@ lemma sameObjectAs_spec: apply vcg apply (clarsimp simp: sameObjectAs_def isArchCap_tag_def2) apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs - from_bool_def false_def) + isCap_simps cap_tag_defs) apply fastforce+ \ \capa is an arch cap\ apply (frule cap_get_tag_isArchCap_unfolded_H_cap) @@ -3222,7 +3162,7 @@ lemma isMDBParentOf_spec: apply (simp add: ccte_relation_def map_option_case) apply (simp add: cte_lift_def) apply (clarsimp simp: cte_to_H_def mdb_node_to_H_def split: option.split_asm) - apply (clarsimp simp: Let_def false_def from_bool_def to_bool_def + apply (clarsimp simp: Let_def to_bool_def split: if_split bool.splits) apply ((clarsimp simp: typ_heap_simps dest!: lift_t_g)+)[3] apply (rule_tac x="cteCap ctea" in exI, rule conjI) @@ -3241,11 +3181,11 @@ lemma isMDBParentOf_spec: apply (rule impI, rule conjI) \ \sameRegionAs = 0\ apply (rule impI) - apply (clarsimp simp: from_bool_def false_def + apply (clarsimp simp: from_bool_def split: if_split bool.splits) \ \sameRegionAs \ 0\ - apply (clarsimp simp: from_bool_def false_def) + apply (clarsimp simp: from_bool_def) apply (case_tac "RetypeDecls_H.sameRegionAs (cap_to_H x2b) (cap_to_H x2c)") prefer 2 apply clarsimp apply (clarsimp cong:bool.case_cong if_cong simp: typ_heap_simps) @@ -3255,8 +3195,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_EndpointCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_endpoint_cap") \ \needed also after\ prefer 2 @@ -3271,8 +3210,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_NotificationCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_notification_cap") \ \needed also after\ prefer 2 @@ -3288,11 +3226,9 @@ lemma isMDBParentOf_spec: apply clarsimp apply (simp add: to_bool_def) apply (subgoal_tac "(\ (isEndpointCap (cap_to_H x2b))) \ ( \ (isNotificationCap (cap_to_H x2b)))") - apply (clarsimp simp: true_def) - apply (rule conjI) - apply (clarsimp simp: cap_get_tag_isCap [symmetric])+ -done - + apply clarsimp + apply (clarsimp simp: cap_get_tag_isCap[symmetric]) + done lemma updateCapData_spec: "\cap. \ \ \ ccap_relation cap \cap \ preserve = to_bool (\preserve) \ newData = \newData\ @@ -3306,7 +3242,7 @@ lemma updateCapData_spec: apply (simp add: updateCapData_def) apply (case_tac cap, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps from_bool_def isArchCap_tag_def2 cap_tag_defs Let_def) + isCap_simps isArchCap_tag_def2 cap_tag_defs Let_def) \ \NotificationCap\ apply clarsimp apply (frule cap_get_tag_isCap_unfolded_H_cap(3)) @@ -3435,7 +3371,6 @@ lemma ensureNoChildren_ccorres: apply (rule conjI) \ \isMDBParentOf is not zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) @@ -3446,7 +3381,6 @@ lemma ensureNoChildren_ccorres: apply (simp add: syscall_error_to_H_cases(9)) \ \isMDBParentOf is zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) apply (simp add: split_paired_Bex) @@ -3561,9 +3495,8 @@ lemma Arch_deriveCap_ccorres: apply (rule context_conjI) apply (simp add: cap_get_tag_isCap_ArchObject) apply (clarsimp simp: returnOk_def return_def isCap_simps) - subgoal by (simp add: ccap_relation_def cap_lift_def Let_def - cap_tag_defs cap_to_H_def to_bool_def - cap_small_frame_cap_lift_def asidInvalid_def) + subgoal by (simp add: ccap_relation_def cap_lift_def Let_def cap_tag_defs cap_to_H_def + cap_small_frame_cap_lift_def asidInvalid_def) apply (clarsimp simp: ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -3571,8 +3504,7 @@ lemma Arch_deriveCap_ccorres: apply (rule context_conjI) apply (simp add: cap_get_tag_isCap_ArchObject) apply (clarsimp simp: returnOk_def return_def isCap_simps) - subgoal by (simp add: ccap_relation_def cap_lift_def Let_def - cap_tag_defs cap_to_H_def to_bool_def + subgoal by (simp add: ccap_relation_def cap_lift_def Let_def cap_tag_defs cap_to_H_def cap_frame_cap_lift_def asidInvalid_def c_valid_cap_def cl_valid_cap_def) apply (simp add: cap_get_tag_isCap_ArchObject ccorres_cond_iffs) @@ -3596,7 +3528,7 @@ lemma deriveCap_ccorres': apply csymbr apply (fold case_bool_If) apply wpc - apply (clarsimp simp: cap_get_tag_isCap isCap_simps from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply csymbr apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws [where P=\ and P' = UNIV]) @@ -3605,7 +3537,7 @@ lemma deriveCap_ccorres': apply vcg apply clarsimp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3613,7 +3545,7 @@ lemma deriveCap_ccorres': apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_rhs_assoc)+ @@ -3638,7 +3570,7 @@ lemma deriveCap_ccorres': errstate_def) apply wp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3670,7 +3602,7 @@ lemma deriveCap_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def throwError_def) apply wp - apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap from_bool_def) + apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap) apply csymbr apply (simp add: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3681,7 +3613,6 @@ lemma deriveCap_ccorres': cap_get_tag_isArchCap_unfolded_H_cap) done - lemma deriveCap_ccorres: "ccorres (syscall_error_rel \ ccap_relation) deriveCap_xf (invs') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. slot_' s = Ptr slot}) [] diff --git a/proof/crefine/ARM/CSpace_RAB_C.thy b/proof/crefine/ARM/CSpace_RAB_C.thy index e7c9ac0c2f..f4e4f2987f 100644 --- a/proof/crefine/ARM/CSpace_RAB_C.thy +++ b/proof/crefine/ARM/CSpace_RAB_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -101,7 +102,8 @@ lemma ccorres_locateSlotCap_push: apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) apply (rule monadic_rewrite_transverse) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_stateAssert) + apply (rule monadic_rewrite_stateAssert[where f="return", simplified]) + apply (rule monadic_rewrite_refl) apply simp apply (rule monadic_rewrite_refl) apply assumption @@ -167,10 +169,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer @@ -182,7 +182,7 @@ next apply (vcg strip_guards=true) \ \takes a while\ apply clarsimp apply simp - apply (clarsimp simp: cap_get_tag_isCap to_bool_def) + apply (clarsimp simp: cap_get_tag_isCap) \ \Main thm\ proof (induct cap' cptr' guard' rule: resolveAddressBits.induct [case_names ind]) case (ind cap cptr guard) @@ -523,8 +523,8 @@ lemma rightsFromWord_spec: \seL4_CapRights_lift \ret__struct_seL4_CapRights_C = cap_rights_from_word_canon \<^bsup>s\<^esup>w \" apply vcg apply (simp add: seL4_CapRights_lift_def nth_shiftr mask_shift_simps nth_shiftr - cap_rights_from_word_canon_def from_bool_def word_and_1 eval_nat_numeral - word_sless_def word_sle_def) + cap_rights_from_word_canon_def word_and_1 eval_nat_numeral + word_sless_def word_sle_def) done diff --git a/proof/crefine/ARM/Cache.thy b/proof/crefine/ARM/Cache.thy deleted file mode 100644 index 0a50ec6813..0000000000 --- a/proof/crefine/ARM/Cache.thy +++ /dev/null @@ -1,37 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory Cache (* FIXME: broken *) -imports Main -begin - -text \Enable the proof cache, both skipping from it - and recording to it.\ -ML \DupSkip.record_proofs := true\ -ML \proofs := 1\ - -ML \DupSkip.skip_dup_proofs := true\ - -text \If executed in reverse order, save the cache\ -ML \val cache_thy_save_cache = ref false;\ -ML \ -if (! cache_thy_save_cache) -then File.open_output (XML_Syntax.output_forest - (XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache))) - (Path.basic "proof_cache.xml") -else ()\ -ML \cache_thy_save_cache := true\ -ML \cache_thy_save_cache := false\ - -text \Load the proof cache - - can take up to a minute\ - -ML \ -DupSkip.the_cache := XML_Syntax.cache_of_xml_forest ( - File.open_input (XML_Syntax.input_forest) - (Path.basic "proof_cache.xml"))\ - -end diff --git a/proof/crefine/ARM/Ctac_lemmas_C.thy b/proof/crefine/ARM/Ctac_lemmas_C.thy index 7389fdaca3..2b42ab3a0c 100644 --- a/proof/crefine/ARM/Ctac_lemmas_C.thy +++ b/proof/crefine/ARM/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -96,7 +96,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -119,7 +119,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/ARM/Delete_C.thy b/proof/crefine/ARM/Delete_C.thy index d761d9be45..ecac0c3300 100644 --- a/proof/crefine/ARM/Delete_C.thy +++ b/proof/crefine/ARM/Delete_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -141,7 +142,7 @@ lemma capRemovable_spec: supply if_cong[cong] apply vcg apply (clarsimp simp: cap_get_tag_isCap(1-8)[THEN trans[OF eq_commute]]) - apply (simp add: capRemovable_def from_bool_def[where b=True] true_def) + apply (simp add: capRemovable_def) apply (clarsimp simp: ccap_zombie_radix_less4) apply (subst eq_commute, subst from_bool_eq_if) apply (rule exI, rule conjI, assumption) @@ -222,7 +223,7 @@ lemma cteDelete_ccorres1: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply wp - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R) apply (wp cutMon_validE_drop finaliseSlot_invs) apply fastforce apply (auto simp: cintr_def) @@ -299,7 +300,7 @@ lemma cteDelete_invs'': "\invs' and sch_act_simple and (\s. ex \ ex_cte_cap_to' ptr s)\ cteDelete ptr ex \\rv. invs'\" apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -594,14 +595,14 @@ lemma reduceZombie_ccorres1: apply (clarsimp simp: throwError_def return_def cintr_def) apply vcg apply (wp cutMon_validE_drop) - apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_strengthen_postE_R) apply (wp cteDelete_invs'') apply (clarsimp simp: cte_wp_at_ctes_of) apply (fastforce dest: ctes_of_valid') apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (simp add: guard_is_UNIV_def Collect_const_mem) - apply (clarsimp simp: from_bool_def false_def isCap_simps size_of_def cte_level_bits_def) + apply (clarsimp simp: isCap_simps size_of_def cte_level_bits_def) apply (simp only: word_bits_def unat_of_nat unat_arith_simps, simp) apply (simp add: guard_is_UNIV_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -676,8 +677,7 @@ lemma finaliseSlot_ccorres: apply (rule ccorres_drop_cutMon) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def ccap_relation_NullCap_iff) + apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply (simp add: Collect_True liftE_bindE split_def ccorres_cond_iffs cutMon_walk_bind del: Collect_const cong: call_ignore_cong) @@ -716,8 +716,7 @@ lemma finaliseSlot_ccorres: | _ \ True" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def) + apply (clarsimp simp: returnOk_def return_def) apply (clarsimp simp: cleanup_info_wf'_def arch_cleanup_info_wf'_def split: if_split capability.splits) apply vcg @@ -754,11 +753,11 @@ lemma finaliseSlot_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) apply (drule use_valid [OF _ finaliseCap_cases, OF _ TrueI]) - apply (simp add: from_bool_def false_def irq_opt_relation_def true_def + apply (simp add: irq_opt_relation_def split: if_split_asm) apply vcg apply wp - apply (simp add: guard_is_UNIV_def true_def) + apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) apply (simp only: liftE_bindE cutMon_walk_bind Let_def @@ -783,7 +782,6 @@ lemma finaliseSlot_ccorres: in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (simp add: from_bool_def false_def) apply fastforce apply ceqv apply (simp only: from_bool_0 simp_thms Collect_False @@ -806,7 +804,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -828,7 +826,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -853,11 +851,11 @@ lemma finaliseSlot_ccorres: simp: isCap_simps final_matters'_def o_def) apply clarsimp apply (frule valid_globals_cte_wpD'[rotated], clarsimp) - apply (clarsimp simp: cte_wp_at_ctes_of false_def from_bool_def) + apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -952,26 +950,23 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR) apply simp apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) @@ -979,9 +974,8 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) - apply (simp add: guard_is_UNIV_def from_bool_def true_def cintr_def - Collect_const_mem exception_defs) - apply (simp add: guard_is_UNIV_def from_bool_def true_def) + apply (simp add: guard_is_UNIV_def cintr_def Collect_const_mem exception_defs) + apply (simp add: guard_is_UNIV_def) apply (rule getCTE_wp) apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def) apply (drule invs_mdb') diff --git a/proof/crefine/ARM/Detype_C.thy b/proof/crefine/ARM/Detype_C.thy index 4e3ffb5648..783bcaf7e8 100644 --- a/proof/crefine/ARM/Detype_C.thy +++ b/proof/crefine/ARM/Detype_C.thy @@ -123,16 +123,6 @@ lemma h_t_valid_typ_region_bytes: by (simp add: valid_footprint_typ_region_bytes[OF neq_byte] size_of_def) -lemma proj_d_lift_state_hrs_htd_update [simp]: - "proj_d (lift_state (hrs_htd_update f hp)) = f (hrs_htd hp)" - by (cases hp) (simp add: hrs_htd_update_def proj_d_lift_state hrs_htd_def) - -lemma proj_d_lift_state_hrs_htd [simp]: - "proj_d (lift_state hp), g \\<^sub>t x = hrs_htd hp, g \\<^sub>t x" - apply (cases hp) - apply (simp add: proj_d_lift_state hrs_htd_def) - done - lemma heap_list_s_heap_list': fixes p :: "'a :: c_type ptr" shows "hrs_htd hp,\ \\<^sub>t p \ @@ -1371,14 +1361,6 @@ lemma map_comp_restrict_map: "(f \\<^sub>m (restrict_map m S)) = (restrict_map (f \\<^sub>m m) S)" by (rule ext, simp add: restrict_map_def map_comp_def) -lemma size_td_uinfo_array_tag_n_m[simp]: - "size_td (uinfo_array_tag_n_m (ta :: ('a :: c_type) itself) n m) - = size_of (TYPE('a)) * n" - apply (induct n) - apply (simp add: uinfo_array_tag_n_m_def) - apply (simp add: uinfo_array_tag_n_m_def size_of_def) - done - lemma modify_machinestate_assert_cnodes_swap: "do x \ modify (ksMachineState_update f); y \ stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) S) []; g od @@ -1446,13 +1428,13 @@ lemma deleteObjects_ccorres': doMachineOp_modify modify_modify o_def ksPSpace_ksMSu_comm bind_assoc modify_machinestate_assert_cnodes_swap modify_modify_bind) - apply (rule ccorres_stateAssert_fwd) + apply (rule ccorres_stateAssert_fwd)+ apply (rule ccorres_stateAssert_after) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - @@ -1586,35 +1568,11 @@ proof - apply (rule cmap_array; simp add: pteBits_def) done moreover - from invs have "valid_queues s" .. - hence "\p. \t \ set (ksReadyQueues s p). tcb_at' t s \ ko_wp_at' live' t s" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec, drule spec) - apply clarsimp - apply (drule (1) bspec) - apply (rule conjI) - apply (erule obj_at'_weakenE) - apply simp - apply (simp add: obj_at'_real_def) - apply (erule ko_wp_at'_weakenE) - apply (clarsimp simp: projectKOs inQ_def) - done - hence tat: "\p. \t \ set (ksReadyQueues s p). tcb_at' t s" - and tlive: "\p. \t \ set (ksReadyQueues s p). ko_wp_at' live' t s" - by auto from sr have - "cready_queues_relation (clift ?th_s) - (ksReadyQueues_' (globals s')) (ksReadyQueues s)" + "cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' (globals s'))" unfolding cready_queues_relation_def rf_sr_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp) - apply fastforce - apply ((subst lift_t_typ_region_bytes, rule cm_disj_tcb, assumption+, - simp_all add: objBits_simps archObjSize_def pageBits_def projectKOs)[1])+ - \ \waiting ...\ - apply (simp add: tcb_queue_relation_live_restrict - [OF D.valid_untyped tat tlive rl]) done moreover diff --git a/proof/crefine/ARM/Fastpath_C.thy b/proof/crefine/ARM/Fastpath_C.thy index 4b2625c25b..b2a461cf97 100644 --- a/proof/crefine/ARM/Fastpath_C.thy +++ b/proof/crefine/ARM/Fastpath_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -38,11 +39,10 @@ lemma getEndpoint_obj_at': lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb lemma tcbSchedEnqueue_tcbContext[wp]: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - tcbSchedEnqueue t' - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule tcbSchedEnqueue_obj_at_unchangedT[OF all_tcbI]) - apply simp + "tcbSchedEnqueue t' \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_when) + apply (wp threadSet_obj_at' hoare_drop_imps threadGet_wp + | simp split: if_split)+ done lemma setCTE_tcbContext: @@ -53,26 +53,22 @@ lemma setCTE_tcbContext: apply (rule setObject_cte_obj_at_tcb', simp_all) done -lemma seThreadState_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setThreadState a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setThreadState_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ - done +lemma setThreadState_tcbContext: + "setThreadState a b \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def + tcbQueuePrepend_def rescheduleRequired_def + by (wp threadSet_obj_at' hoare_drop_imps threadGet_wp | wpc + | simp split: if_split)+ lemma setBoundNotification_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setBoundNotification a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setBoundNotification_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ - done + "setBoundNotification a b \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setBoundNotification_def + by wpsimp declare comp_apply [simp del] crunch tcbContext[wp]: deleteCallerCap "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" (wp: setEndpoint_obj_at_tcb' setBoundNotification_tcbContext - setNotification_tcb crunch_wps seThreadState_tcbContext + setNotification_tcb crunch_wps setThreadState_tcbContext simp: crunch_simps unless_def) declare comp_apply [simp] @@ -630,10 +626,10 @@ lemma dmo_clearExMonitor_setCurThread_swap: od) = (do _ \ setCurThread thread; doMachineOp ARM.clearExMonitor od)" - apply (simp add: setCurThread_def doMachineOp_def split_def) - apply (rule oblivious_modify_swap[symmetric]) - apply (intro oblivious_bind, - simp_all add: select_f_oblivious) + apply (clarsimp simp: ARM.clearExMonitor_def) + apply (simp add: doMachineOp_modify) + apply (rule oblivious_modify_swap) + apply (fastforce intro: oblivious_bind simp: setCurThread_def idleThreadNotQueued_def) done lemma pd_at_asid_inj': @@ -653,18 +649,15 @@ lemma armv_contextSwitch_HWASID_fp_rewrite: checkPDAt_def checkPDUniqueToASID_def checkPDASIDMapMembership_def stateAssert_def2[folded assert_def]) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_gets_l) apply (rule monadic_rewrite_symb_exec_l) - apply (wpsimp)+ - apply (simp add: empty_fail_findPDForASID empty_fail_catch) - apply (rule monadic_rewrite_assert monadic_rewrite_gets_l)+ - apply (rule_tac P="asidMap asid \ None \ fst (the (asidMap asid)) = the (pde_stored_asid v)" - in monadic_rewrite_gen_asm) - apply (simp only: case_option_If2 simp_thms if_True if_False - split_def, simp) - apply (rule monadic_rewrite_refl) - apply (wp findPDForASID_pd_at_wp | simp only: const_def)+ + apply (rule monadic_rewrite_assert monadic_rewrite_gets_l)+ + apply (rule_tac P="asidMap asid \ None \ fst (the (asidMap asid)) = the (pde_stored_asid v)" + in monadic_rewrite_gen_asm) + apply (simp add: case_option_If2 split_def) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: findPDForASID_pd_at_wp simp: empty_fail_catch)+ apply (clarsimp simp: pd_has_hwasid_def cte_level_bits_def field_simps cte_wp_at_ctes_of word_0_sle_from_less @@ -734,9 +727,10 @@ lemma switchToThread_fp_ccorres: apply (simp add: storeWordUser_def bind_assoc case_option_If2 split_def del: Collect_const) - apply (simp only: dmo_clearExMonitor_setCurThread_swap - dc_def[symmetric]) + apply (simp only: dmo_clearExMonitor_setCurThread_swap) apply (rule ccorres_split_nothrow_novcg_dc) + apply (clarsimp simp: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp del: rf_sr_upd_safe) @@ -746,7 +740,7 @@ lemma switchToThread_fp_ccorres: apply (ctac add: clearExMonitor_fp_ccorres) apply wp apply (simp add: guard_is_UNIV_def) - apply wp + apply (wp hoare_drop_imps) apply (simp add: bind_assoc checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) @@ -799,7 +793,7 @@ lemma thread_state_ptr_set_tsType_np_spec: apply (clarsimp simp: typ_heap_simps') apply (rule exI, rule conjI[OF _ conjI [OF _ refl]]) apply (simp_all add: thread_state_lift_def) - apply (auto simp: "StrictC'_thread_state_defs" mask_def) + apply (auto simp: ThreadState_defs mask_def) done lemma thread_state_ptr_mset_blockingObject_tsType_spec: @@ -987,10 +981,7 @@ lemma ccorres_call_hSkip: apply - apply (rule ccorres_call_hSkip') apply (erule ccorres_guard_imp) - apply simp - apply clarsimp - apply (simp_all add: ggl xfdc_def) - apply (clarsimp simp: igl) + apply (clarsimp simp: ggl igl xfdc_def)+ done lemma bind_case_sum_rethrow: @@ -1118,7 +1109,7 @@ lemma isValidVTableRoot_fp_spec: {t. ret__unsigned_long_' t = from_bool (isValidVTableRoot_C (pd_cap_' s))}" apply vcg apply (clarsimp simp: word_sle_def word_sless_def isValidVTableRoot_fp_lemma) - apply (simp add: from_bool_def split: if_split) + apply (simp split: if_split) done lemma isRecvEP_endpoint_case: @@ -1206,8 +1197,8 @@ lemma fastpath_dequeue_ccorres: apply (rule conjI) apply (clarsimp simp: cpspace_relation_def update_ep_map_tos update_tcb_map_tos typ_heap_simps') - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_queue_ptrs_def + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (rule conjI) apply (rule cpspace_relation_ep_update_ep, assumption+) @@ -1223,8 +1214,6 @@ lemma fastpath_dequeue_ccorres: apply (simp add: carch_state_relation_def typ_heap_simps' cmachine_state_relation_def h_t_valid_clift_Some_iff update_ep_map_tos) - apply (erule cready_queues_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) done lemma st_tcb_at_not_in_ep_queue: @@ -1362,8 +1351,8 @@ lemma fastpath_enqueue_ccorres: apply (rule conjI) apply (clarsimp simp: cpspace_relation_def update_ep_map_tos typ_heap_simps') - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_queue_ptrs_def + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (rule conjI) apply (rule_tac S="tcb_ptr_to_ctcb_ptr ` set (ksCurThread \ # list)" @@ -1402,8 +1391,6 @@ lemma fastpath_enqueue_ccorres: auto dest!: map_to_ko_atI)[1] apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos cmachine_state_relation_def h_t_valid_clift_Some_iff) - apply (erule cready_queues_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (clarsimp simp: typ_heap_simps' EPState_Recv_def mask_def is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) @@ -1411,8 +1398,8 @@ lemma fastpath_enqueue_ccorres: apply (rule conjI) apply (clarsimp simp: cpspace_relation_def update_ep_map_tos typ_heap_simps' ct_in_state'_def) - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_queue_ptrs_def + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (rule conjI) apply (rule_tac S="{tcb_ptr_to_ctcb_ptr (ksCurThread \)}" @@ -1432,8 +1419,6 @@ lemma fastpath_enqueue_ccorres: assumption+, auto dest!: map_to_ko_atI)[1] apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos cmachine_state_relation_def h_t_valid_clift_Some_iff) - apply (erule cready_queues_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) done lemma setCTE_rf_sr: @@ -1528,8 +1513,8 @@ lemma cap_reply_cap_ptr_new_np_updateCap_ccorres: limited_and_simps cap_reply_cap_def limited_and_simps1[OF lshift_limited_and, OF limited_and_from_bool] shiftr_over_or_dist word_bw_assocs mask_def shiftl_shiftr3 word_size) - apply (cases m ; clarsimp) - apply (cases canGrant ; clarsimp) + apply (cases m ; clarsimp simp: true_def) + apply (cases canGrant ; clarsimp simp: true_def false_def) done lemma fastpath_copy_mrs_ccorres: @@ -1580,7 +1565,7 @@ lemma ctes_of_Some_cte_wp_at: by (clarsimp simp: cte_wp_at_ctes_of) lemma user_getreg_wp: - "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ + "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ asUser t (getRegister r) \Q\" apply (rule_tac Q="\rv s. \rv'. rv' = rv \ Q rv' s" in hoare_post_imp) apply simp @@ -1704,8 +1689,8 @@ lemma fastpath_call_ccorres: notes hoare_TrueI[simp] shows "ccorres dc xfdc (\s. invs' s \ ct_in_state' ((=) Running) s - \ obj_at' (\tcb. (atcbContextGet o tcbArch) tcb ARM_H.capRegister = cptr - \ (atcbContextGet o tcbArch) tcb ARM_H.msgInfoRegister = msginfo) + \ obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb ARM_H.capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb ARM_H.msgInfoRegister = msginfo) (ksCurThread s) s) (UNIV \ {s. cptr_' s = cptr} \ {s. msgInfo_' s = msginfo}) [] (fastpaths SysCall) (Call fastpath_call_'proc)" @@ -1779,7 +1764,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -1813,9 +1797,10 @@ proof - apply (simp add: from_bool_0 if_1_0_0 cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) - apply (rule slowpath_ccorres, simp+) + apply (erule disjE; simp; rule slowpath_ccorres) + apply simp + apply simp apply (vcg exspec=slowpath_noreturn_spec) apply (rule ccorres_rhs_assoc)+ apply csymbr+ @@ -1828,7 +1813,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1865,7 +1849,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1888,7 +1871,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1946,29 +1928,25 @@ proof - apply (simp add: ctcb_relation_unat_tcbPriority_C word_less_nat_alt linorder_not_le) apply ceqv - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) - apply (simp add: if_1_0_0 ccap_relation_ep_helpers from_bool_0 word_le_not_less - del: Collect_const cong: call_ignore_cong) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp add: bindE_assoc del: Collect_const) apply (rule ccorres_Guard_Seq) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (clarsimp simp: to_bool_def) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0) + apply (clarsimp simp: from_bool_eq_if' word_le_not_less from_bool_0) apply (clarsimp simp: return_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply (clarsimp simp: isHighestPrio_def' simpler_gets_def) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: from_bool_0) apply clarsimp apply vcg apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) @@ -1982,7 +1960,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: bindE_assoc from_bool_0 catch_throwError del: Collect_const) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2001,7 +1978,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2016,7 +1992,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2045,7 +2020,6 @@ proof - apply (rule ccorres_seq_cond_raise[THEN iffD2]) apply (rule_tac R=\ in ccorres_cond2', blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2077,9 +2051,6 @@ proof - apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split add: typ_heap_simps') - apply (rule ext, simp split: if_split add: typ_heap_simps') apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) @@ -2101,7 +2072,7 @@ proof - ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply csymbr - apply (simp add: cteInsert_def bind_assoc dc_def[symmetric] + apply (simp add: cteInsert_def bind_assoc del: Collect_const cong: call_ignore_cong) apply (rule ccorres_pre_getCTE2, rename_tac curThreadReplyCTE) apply (simp only: getThreadState_def) @@ -2204,9 +2175,6 @@ proof - apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) @@ -2224,7 +2192,6 @@ proof - apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -2331,7 +2298,7 @@ proof - apply (vcg exspec=endpoint_ptr_get_epQueue_head_modifies exspec=endpoint_ptr_get_state_modifies) apply (simp add: if_1_0_0 getSlotCap_def) - apply (rule valid_isRight_theRight_split) + apply (rule valid_isLeft_theRight_split) apply simp apply (wp getCTE_wp') apply (rule validE_R_abstract_rv) @@ -2399,7 +2366,7 @@ proof - apply (rule conjI) (* isReceive on queued tcb state *) apply (fastforce simp: st_tcb_at_tcbs_of isBlockedOnReceive_def isReceive_def) apply clarsimp - apply (rule conjI, fastforce dest!: invs_queues simp: valid_queues_def) + apply (rule conjI, fastforce dest!: simp: valid_queues_def) apply (frule invs_mdb', clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (case_tac xb, clarsimp, drule(1) nullcapsD') apply (clarsimp simp: pde_stored_asid_def to_bool_def @@ -2504,7 +2471,7 @@ lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add[where tcb="\s. Ptr (tcb' s)" for tcb', simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)[where tcb="\s. Ptr (tcb' s)" for tcb', simplified]] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] @@ -2532,8 +2499,8 @@ lemma fastpath_reply_recv_ccorres: notes hoare_TrueI[simp] shows "ccorres dc xfdc (\s. invs' s \ ct_in_state' ((=) Running) s - \ obj_at' (\tcb. (atcbContextGet o tcbArch) tcb capRegister = cptr - \ (atcbContextGet o tcbArch) tcb msgInfoRegister = msginfo) + \ obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb msgInfoRegister = msginfo) (ksCurThread s) s) (UNIV \ {s. cptr_' s = cptr} \ {s. msgInfo_' s = msginfo}) [] (fastpaths SysReplyRecv) (Call fastpath_reply_recv_'proc)" @@ -2609,7 +2576,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2642,9 +2608,8 @@ lemma fastpath_reply_recv_ccorres: apply (simp add: if_1_0_0 cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) - apply (rule slowpath_ccorres) + apply (erule disjE; simp; rule slowpath_ccorres) apply simp apply simp apply (vcg exspec=slowpath_noreturn_spec) @@ -2659,7 +2624,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2684,7 +2648,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_split_throws) apply simp - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2714,7 +2677,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2748,7 +2710,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp cong: conj_cong) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2768,7 +2729,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2790,7 +2750,6 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2822,7 +2781,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_cond2'[where R=\], blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2837,7 +2795,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2868,12 +2825,10 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def - to_bool_def del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc2) @@ -2886,7 +2841,7 @@ lemma fastpath_reply_recv_ccorres: apply (clarsimp simp: rf_sr_ksCurThread typ_heap_simps' h_t_valid_clift_Some_iff) apply (clarsimp simp: capAligned_def isCap_simps objBits_simps - "StrictC'_thread_state_defs" mask_def) + ThreadState_defs mask_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps' objBits_defs) apply (rule conjI) @@ -2897,20 +2852,16 @@ lemma fastpath_reply_recv_ccorres: apply (simp add: cep_relations_drop_fun_upd) apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def - "StrictC'_thread_state_defs" from_bool_0 - to_bool_def if_1_0_0) + ThreadState_defs) apply (clarsimp simp: ccap_relation_ep_helpers) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) apply ceqv apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) - apply (rule fastpath_enqueue_ccorres[unfolded o_def,simplified]) + apply (rule fastpath_enqueue_ccorres[simplified]) apply simp apply ceqv apply (simp add: liftM_def del: Collect_const cong: call_ignore_cong) @@ -2980,9 +2931,6 @@ lemma fastpath_reply_recv_ccorres: apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) @@ -2999,7 +2947,6 @@ lemma fastpath_reply_recv_ccorres: apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -3024,7 +2971,7 @@ lemma fastpath_reply_recv_ccorres: apply (wp setCTE_cte_wp_at_other) apply (simp del: Collect_const) apply vcg - apply (simp add: o_def) + apply simp apply (wp | simp | wp (once) updateMDB_weak_cte_wp_at | wp (once) updateMDB_cte_wp_at_other)+ @@ -3086,7 +3033,7 @@ lemma fastpath_reply_recv_ccorres: apply (simp del: Collect_const) apply vcg apply (simp add: if_1_0_0 getSlotCap_def) - apply (rule valid_isRight_theRight_split) + apply (rule valid_isLeft_theRight_split) apply (wp getCTE_wp') apply (rule validE_R_abstract_rv) apply wp @@ -3104,8 +3051,6 @@ lemma fastpath_reply_recv_ccorres: apply (clarsimp simp: ct_in_state'_def obj_at_tcbs_of word_sle_def) apply (clarsimp simp add: invs_ksCurDomain_maxDomain') apply (rule conjI, fastforce) - apply (frule invs_queues) - apply (simp add: valid_queues_def) apply (frule tcbs_of_aligned') apply (simp add:invs_pspace_aligned') apply (frule tcbs_of_cte_wp_at_caller) @@ -3135,6 +3080,11 @@ lemma fastpath_reply_recv_ccorres: invs_valid_pde_mappings' obj_at_tcbs_of dest!: isValidVTableRootD) apply (frule invs_mdb') + apply (frule invs_valid_objs') + apply (frule invs_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') apply (clarsimp simp: cte_wp_at_ctes_of tcbSlots cte_level_bits_def makeObject_cte isValidVTableRoot_def @@ -3142,10 +3092,10 @@ lemma fastpath_reply_recv_ccorres: pde_stored_asid_def to_bool_def valid_mdb'_def valid_tcb_state'_def word_le_nat_alt[symmetric] length_msgRegisters) - apply (frule ko_at_valid_ep', fastforce) apply (rule conjI) - subgoal (* dest thread domain \ maxDomain *) - by (drule (1) tcbs_of_valid_tcb'[OF invs_valid_objs'], solves \clarsimp simp: valid_tcb'_def\) + apply (fastforce dest: tcbs_of_valid_tcb' simp: valid_tcb'_def opt_map_def + split: option.splits) + apply (frule ko_at_valid_ep', fastforce) apply clarsimp apply (safe del: notI disjE)[1] apply (simp add: isSendEP_def valid_ep'_def tcb_at_invs' diff --git a/proof/crefine/ARM/Fastpath_Equiv.thy b/proof/crefine/ARM/Fastpath_Equiv.thy index 9858ec9beb..13ab694ea3 100644 --- a/proof/crefine/ARM/Fastpath_Equiv.thy +++ b/proof/crefine/ARM/Fastpath_Equiv.thy @@ -1,6 +1,6 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems - * Copyright 2020, Proofcraft Pty Ltd * * SPDX-License-Identifier: GPL-2.0-only *) @@ -31,44 +31,38 @@ lemma getEndpoint_obj_at': lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb -lemma tcbSchedEnqueue_tcbContext[wp]: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - tcbSchedEnqueue t' - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule tcbSchedEnqueue_obj_at_unchangedT[OF all_tcbI]) - apply simp - done +crunches tcbSchedEnqueue + for tcbContext[wp]: "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" + (simp: tcbQueuePrepend_def) lemma setCTE_tcbContext: "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setCTE slot cte - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + setCTE slot cte + \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" apply (simp add: setCTE_def) apply (rule setObject_cte_obj_at_tcb', simp_all) done context begin interpretation Arch . (*FIXME: arch_split*) -lemma seThreadState_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setThreadState a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setThreadState_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ +lemma setThreadState_tcbContext: + "setThreadState st tptr \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setThreadState_def rescheduleRequired_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps) + apply (fastforce simp: obj_at'_def objBits_simps projectKOs atcbContext_def ps_clear_upd) done lemma setBoundNotification_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setBoundNotification a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setBoundNotification_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ + "setBoundNotification ntfnPtr tptr \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setBoundNotification_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps) + apply (fastforce simp: obj_at'_def objBits_simps projectKOs) done declare comp_apply [simp del] crunch tcbContext[wp]: deleteCallerCap "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" (wp: setEndpoint_obj_at_tcb' setBoundNotification_tcbContext - setNotification_tcb crunch_wps seThreadState_tcbContext + setNotification_tcb crunch_wps setThreadState_tcbContext simp: crunch_simps unless_def) declare comp_apply [simp] @@ -121,24 +115,21 @@ lemmas valid_cnode_cap_cte_at'' declare of_int_sint_scast[simp] -lemma isCNodeCap_capUntypedPtr_capCNodePtr: - "isCNodeCap c \ capUntypedPtr c = capCNodePtr c" - by (clarsimp simp: isCap_simps) - lemma of_bl_from_bool: "of_bl [x] = from_bool x" by (cases x, simp_all add: from_bool_def) lemma dmo_clearExMonitor_setCurThread_swap: "(do _ \ doMachineOp ARM.clearExMonitor; - setCurThread thread - od) - = (do _ \ setCurThread thread; - doMachineOp ARM.clearExMonitor od)" - apply (simp add: setCurThread_def doMachineOp_def split_def) - apply (rule oblivious_modify_swap[symmetric]) - apply (intro oblivious_bind, - simp_all add: select_f_oblivious) + setCurThread thread + od) + = (do _ \ setCurThread thread; + doMachineOp ARM.clearExMonitor + od)" + apply (clarsimp simp: ARM.clearExMonitor_def) + apply (simp add: doMachineOp_modify) + apply (rule oblivious_modify_swap) + apply (fastforce intro: oblivious_bind simp: setCurThread_def idleThreadNotQueued_def) done lemma pd_at_asid_inj': @@ -253,7 +244,7 @@ lemma ctes_of_Some_cte_wp_at: by (clarsimp simp: cte_wp_at_ctes_of) lemma user_getreg_wp: - "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ + "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ asUser t (getRegister r) \Q\" apply (rule_tac Q="\rv s. \rv'. rv' = rv \ Q rv' s" in hoare_post_imp) apply simp @@ -313,8 +304,6 @@ lemma threadSet_tcbState_valid_objs: apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) done -lemmas monadic_rewrite_symb_exec_l' = monadic_rewrite_symb_exec_l'_preserve_names - lemma possibleSwitchTo_rewrite: "monadic_rewrite True True (\s. obj_at' (\tcb. tcbPriority tcb = destPrio \ tcbDomain tcb = destDom) t s @@ -325,21 +314,12 @@ lemma possibleSwitchTo_rewrite: (possibleSwitchTo t) (setSchedulerAction (SwitchToThread t))" supply if_split[split del] apply (simp add: possibleSwitchTo_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_l'[OF threadGet_inv empty_fail_threadGet, - where P'=\], simp) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="targetDom = curDom" in monadic_rewrite_gen_asm) - apply simp - apply (rule_tac P="action = ResumeCurrentThread" in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_refl) - apply (wp threadGet_wp cd_wp |simp add: bitmap_fun_defs)+ + (* under current preconditions both branch conditions are false *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: threadGet_wp cd_wp\) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: threadGet_wp cd_wp\) + (* discard unused getters before setSchedulerAction *) apply (simp add: getCurThread_def curDomain_def gets_bind_ign getSchedulerAction_def) - apply (rule monadic_rewrite_refl) - apply clarsimp + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) apply (auto simp: obj_at'_def) done @@ -358,7 +338,7 @@ lemma lookupBitmapPriority_lift: unfolding lookupBitmapPriority_def apply (rule hoare_pre) apply (wps prqL1 prqL2) - apply wpsimp+ + apply wpsimp+ done (* slow path additionally requires current thread not idle *) @@ -408,6 +388,15 @@ lemma fastpathBestSwitchCandidate_ksSchedulerAction_simp[simp]: unfolding fastpathBestSwitchCandidate_def lookupBitmapPriority_def by simp +lemma sched_act_SwitchToThread_rewrite: + "\ sa = SwitchToThread t \ monadic_rewrite F E Q (m_sw t) f \ + \ monadic_rewrite F E ((\_. sa = SwitchToThread t) and Q) + (case_scheduler_action m_res m_ch (\t. m_sw t) sa) f" + apply (cases sa; simp add: monadic_rewrite_impossible) + apply (rename_tac t') + apply (case_tac "t' = t"; simp add: monadic_rewrite_impossible) + done + lemma schedule_rewrite_ct_not_runnable': "monadic_rewrite True True (\s. ksSchedulerAction s = SwitchToThread t \ ct_in_state' (Not \ runnable') s @@ -417,51 +406,36 @@ lemma schedule_rewrite_ct_not_runnable': (do setSchedulerAction ResumeCurrentThread; switchToThread t od)" supply subst_all [simp del] apply (simp add: schedule_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="action = SwitchToThread t" in monadic_rewrite_gen_asm, simp) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ wasRunnable \ action = SwitchToThread t" - in monadic_rewrite_gen_asm,simp) - apply (rule monadic_rewrite_bind_tail, rename_tac idleThread) - apply (rule monadic_rewrite_bind_tail, rename_tac targetPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac curPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac fastfail) - apply (rule monadic_rewrite_bind_tail, rename_tac curDom) - apply (rule monadic_rewrite_bind_tail, rename_tac highest) - apply (rule_tac P="\ (fastfail \ \ highest)" in monadic_rewrite_gen_asm, simp only:) - apply simp - apply (rule monadic_rewrite_refl) - apply (wpsimp wp: hoare_vcg_imp_lift) - apply (simp add: isHighestPrio_def') - apply wp+ - apply (wp hoare_vcg_disj_lift) - apply (wp scheduleSwitchThreadFastfail_False_wp) - apply (wp hoare_vcg_disj_lift threadGet_wp'' | simp add: comp_def)+ - (* remove no-ops, somewhat by magic *) - apply (rule monadic_rewrite_symb_exec_l'_TT, solves wp, - wpsimp wp: empty_fail_isRunnable simp: isHighestPrio_def')+ - apply (simp add: setSchedulerAction_def) - apply (subst oblivious_modify_swap[symmetric], rule oblivious_switchToThread_schact) - apply (rule monadic_rewrite_refl) - apply wp+ - apply (clarsimp simp: ct_in_state'_def) - apply (strengthen not_pred_tcb_at'_strengthen, simp) - supply word_neq_0_conv[simp del] + (* switching to t *) + apply (monadic_rewrite_l sched_act_SwitchToThread_rewrite[where t=t]) + (* not wasRunnable, skip enqueue *) + apply (simp add: when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + (* fastpath: \ (fastfail \ \ highest) *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* fastpath: no scheduleChooseNewThread *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* remove no-ops *) + apply (repeat 10 monadic_rewrite_symb_exec_l) (* until switchToThread *) + apply (simp add: setSchedulerAction_def) + apply (subst oblivious_modify_swap[symmetric], + rule oblivious_switchToThread_schact) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: empty_fail_isRunnable simp: isHighestPrio_def')+ + apply (clarsimp simp: ct_in_state'_def not_pred_tcb_at'_strengthen + fastpathBestSwitchCandidate_def) apply normalise_obj_at' - apply (simp add: fastpathBestSwitchCandidate_def) - apply (erule_tac x="tcbPriority ko" in allE) - apply (erule impE, normalise_obj_at'+) done -crunch tcb2[wp]: "Arch.switchToThread" "tcb_at' t" - (ignore: ARM.clearExMonitor) - lemma resolveAddressBits_points_somewhere: "\\s. \slot. Q slot s\ resolveAddressBits cp cptr bits \Q\,-" - apply (rule_tac Q'="\rv s. \rv. Q rv s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. \rv. Q rv s" in hoare_strengthen_postE_R) apply wp apply clarsimp done @@ -482,18 +456,12 @@ lemmas cteInsert_obj_at'_not_queued = cteInsert_obj_at'_queued[of "\a. lemma monadic_rewrite_threadGet: "monadic_rewrite E F (obj_at' (\tcb. f tcb = v) t) (threadGet f t) (return v)" - unfolding getThreadState_def - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_gets_known) - apply (unfold threadGet_def liftM_def fun_app_def) - apply (rule monadic_rewrite_symb_exec_l' | wp | rule empty_fail_getObject getObject_inv)+ - apply (clarsimp; rule no_fail_getObject_tcb) - apply (simp only: exec_gets) - apply (rule_tac P = "(\s. (f x)=v) and tcb_at' t" in monadic_rewrite_refl3) - apply (simp add:) - apply (wp OMG_getObject_tcb | wpc)+ - apply (auto intro: obj_tcb_at') + unfolding getThreadState_def threadGet_def + apply (simp add: liftM_def) + apply monadic_rewrite_symb_exec_l + apply (rule_tac P="\_. f x = v" in monadic_rewrite_pre_imp_eq) + apply blast + apply (wpsimp wp: OMG_getObject_tcb simp: obj_tcb_at')+ done lemma monadic_rewrite_getThreadState: @@ -515,9 +483,6 @@ crunches cteInsert, asUser (wp: setCTE_obj_at'_queued crunch_wps threadSet_obj_at'_really_strongest) end -crunch ksReadyQueues_inv[wp]: cteInsert "\s. P (ksReadyQueues s)" - (wp: hoare_drop_imps) - crunches cteInsert, threadSet, asUser, emptySlot for ksReadyQueuesL1Bitmap_inv[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" and ksReadyQueuesL2Bitmap_inv[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" @@ -535,16 +500,51 @@ lemma setThreadState_runnable_bitmap_inv: \ \s. Q (ksReadyQueuesL2Bitmap s) \ setThreadState ts t \\rv s. Q (ksReadyQueuesL2Bitmap s) \" by (simp_all add: setThreadState_runnable_simp, wp+) +(* FIXME move *) +crunches curDomain + for (no_fail) no_fail[intro!, wp, simp] + +lemma setThreadState_tcbDomain_tcbPriority_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb) (tcbPriority tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + +lemma setThreadState_tcbQueued_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbQueued tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + +lemma setThreadState_tcbFault_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbFault tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + +lemma setThreadState_tcbArch_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbArch tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + lemma fastpath_callKernel_SysCall_corres: "monadic_rewrite True False (invs' and ct_in_state' ((=) Running) and (\s. ksSchedulerAction s = ResumeCurrentThread) - and (\s. ksDomainTime s \ 0)) + and (\s. ksDomainTime s \ 0) and ready_qs_runnable) (callKernel (SyscallEvent SysCall)) (fastpaths SysCall)" - supply if_split[split del] if_cong[cong] - apply (rule monadic_rewrite_introduce_alternative) - apply (simp add: callKernel_def) - apply (rule monadic_rewrite_imp) + supply if_cong[cong] option.case_cong[cong] if_split[split del] + supply empty_fail_getMRs[wp] (* FIXME *) + supply empty_fail_getEndpoint[wp] (* FIXME *) + apply (rule monadic_rewrite_introduce_alternative[OF callKernel_def[simplified atomize_eq]]) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_bind_alternative_l, wpsimp) + apply (rule monadic_rewrite_stateAssert) apply (simp add: handleEvent_def handleCall_def handleInvocation_def liftE_bindE_handle bind_assoc getMessageInfo_def) @@ -553,236 +553,224 @@ lemma fastpath_callKernel_SysCall_corres: getMessageInfo_def alternative_bind fastpaths_def cong: if_cong) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) apply (rename_tac msgInfo) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_r - [OF threadGet_inv no_fail_threadGet]) - apply (rename_tac thread msgInfo ptr tcbFault) - apply (rule monadic_rewrite_alternative_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rename_tac tcbFault) + apply (rule monadic_rewrite_alternative_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: split_def Syscall_H.syscall_def liftE_bindE_handle bind_assoc capFaultOnFailure_def) apply (simp only: bindE_bind_linearise[where f="rethrowFailure fn f'" for fn f'] bind_case_sum_rethrow) - apply (simp add: lookupCapAndSlot_def lookupSlotForThread_def + apply (simp add: lookupCapAndSlot_def lookupSlotForThread_def bindE_assoc liftE_bind_return_bindE_returnOk split_def getThreadCSpaceRoot_def locateSlot_conv returnOk_liftE[symmetric] const_def getSlotCap_def) apply (simp only: liftE_bindE_assoc) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_rdonly_bind_l) + apply (rule monadic_rewrite_bind_alternative_l) apply (wp | simp)+ apply (rule_tac fn="case_sum Inl (Inr \ fst)" in monadic_rewrite_split_fn) apply (simp add: liftME_liftM[symmetric] liftME_def bindE_assoc) apply (rule monadic_rewrite_refl) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: isRight_right_map isRight_case_sum) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_rdonly_bind_l[OF lookupIPC_inv]) - apply (rule monadic_rewrite_symb_exec_l[OF lookupIPC_inv empty_fail_lookupIPCBuffer]) + apply (rule monadic_rewrite_bind_alternative_l[OF lookupIPC_inv]) + apply monadic_rewrite_symb_exec_l apply (simp add: lookupExtraCaps_null returnOk_bind liftE_bindE_handle bind_assoc liftE_bindE_assoc decodeInvocation_def Let_def from_bool_0 performInvocation_def liftE_handle liftE_bind) - apply (rule monadic_rewrite_symb_exec_r [OF getEndpoint_inv no_fail_getEndpoint]) + apply monadic_rewrite_symb_exec_r apply (rename_tac "send_ep") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: getThreadVSpaceRoot_def locateSlot_conv) - apply (rule monadic_rewrite_symb_exec_r [OF getCTE_inv no_fail_getCTE]) + apply monadic_rewrite_symb_exec_r apply (rename_tac "pdCapCTE") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF curDomain_inv], - simp only: curDomain_def, rule non_fail_gets) - apply (rename_tac "curDom") - apply (rule monadic_rewrite_symb_exec_r [OF threadGet_inv no_fail_threadGet])+ - apply (rename_tac curPrio destPrio) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r apply (simp add: isHighestPrio_def') - apply (rule monadic_rewrite_symb_exec_r [OF gets_inv non_fail_gets]) - apply (rename_tac highest) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r [OF gets_inv non_fail_gets]) - apply (rename_tac asidMap) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - - apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet]) - apply (rename_tac "destDom") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (rule monadic_rewrite_trans, rule monadic_rewrite_pick_alternative_1) - apply (rule monadic_rewrite_symb_exec_l[OF get_mrs_inv' empty_fail_getMRs]) + apply monadic_rewrite_symb_exec_l (* now committed to fastpath *) apply (rule monadic_rewrite_trans) - apply (rule_tac F=True and E=True in monadic_rewrite_weaken) + apply (rule_tac F=True and E=True in monadic_rewrite_weaken_flags) apply simp apply (rule monadic_rewrite_bind_tail) - apply (rule_tac x=thread in monadic_rewrite_symb_exec, - (wp empty_fail_getCurThread)+) - apply (simp add: sendIPC_def bind_assoc) - apply (rule_tac x=send_ep in monadic_rewrite_symb_exec, - (wp empty_fail_getEndpoint getEndpoint_obj_at')+) - apply (rule_tac P="epQueue send_ep \ []" in monadic_rewrite_gen_asm) - apply (simp add: isRecvEP_endpoint_case list_case_helper bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (elim conjE) - apply (rule monadic_rewrite_bind_tail, rename_tac dest_st) - apply (rule_tac P="\gr. dest_st = BlockedOnReceive (capEPPtr (fst (theRight rv))) gr" - in monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_symb_exec2, (wp | simp)+) - apply (rule monadic_rewrite_bind) - apply clarsimp - apply (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite) + apply (monadic_rewrite_symb_exec_l_known thread) + apply (simp add: sendIPC_def bind_assoc) + apply (monadic_rewrite_symb_exec_l_known send_ep) + apply (rule_tac P="epQueue send_ep \ []" in monadic_rewrite_gen_asm) + apply (simp add: isRecvEP_endpoint_case list_case_helper bind_assoc) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind) - apply (rule_tac destPrio=destPrio - and curDom=curDom and destDom=destDom and thread=thread - in possibleSwitchTo_rewrite) + apply (elim conjE) + apply (rule monadic_rewrite_bind_tail, rename_tac dest_st) + apply (rule_tac P="\gr. dest_st = BlockedOnReceive (capEPPtr (fst (theRight rv))) gr" + in monadic_rewrite_gen_asm) + apply monadic_rewrite_symb_exec_l_drop apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_trans) - apply (rule setupCallerCap_rewrite) - apply (rule monadic_rewrite_bind_head) - apply (rule setThreadState_rewrite_simple, simp) - apply (rule monadic_rewrite_trans) - apply (rule_tac x=BlockedOnReply in monadic_rewrite_symb_exec, - (wp empty_fail_getThreadState)+) - apply simp - apply (rule monadic_rewrite_refl) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_head) - apply (rule_tac t="hd (epQueue send_ep)" - in schedule_rewrite_ct_not_runnable') - apply (simp add: bind_assoc) + apply clarsimp + apply (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite) apply (rule monadic_rewrite_bind_tail) apply (rule monadic_rewrite_bind) - apply (rule switchToThread_rewrite) + apply (rule_tac destPrio=destPrio + and curDom=curDom and destDom=destDom and thread=thread + in possibleSwitchTo_rewrite) apply (rule monadic_rewrite_bind) - apply (rule activateThread_simple_rewrite) - apply (rule monadic_rewrite_refl) - apply wp - apply (wp setCurThread_ct_in_state) - apply (simp only: st_tcb_at'_def[symmetric]) - apply (wp, clarsimp simp: cur_tcb'_def ct_in_state'_def) - apply (simp add: getThreadCallerSlot_def getThreadReplySlot_def - locateSlot_conv ct_in_state'_def cur_tcb'_def) - - apply ((wp assert_inv threadSet_pred_tcb_at_state - cteInsert_obj_at'_not_queued - | wps)+)[1] - - apply (wp fastpathBestSwitchCandidate_lift[where f="cteInsert c w w'" for c w w']) + apply (rule monadic_rewrite_trans) + apply (rule setupCallerCap_rewrite) + apply (rule monadic_rewrite_bind_head) + apply (rule setThreadState_rewrite_simple, simp) + apply (rule monadic_rewrite_trans) + apply (monadic_rewrite_symb_exec_l_known BlockedOnReply) + apply simp + apply (rule monadic_rewrite_refl) + apply wpsimp + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind_head) + apply (rule_tac t="hd (epQueue send_ep)" + in schedule_rewrite_ct_not_runnable') + apply (simp add: bind_assoc) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind) + apply (rule switchToThread_rewrite) + apply (rule monadic_rewrite_bind) + apply (rule activateThread_simple_rewrite) + apply (rule monadic_rewrite_refl) + apply wp + apply (wp setCurThread_ct_in_state) + apply (simp only: st_tcb_at'_def[symmetric]) + apply (wp, clarsimp simp: cur_tcb'_def ct_in_state'_def) + apply (simp add: getThreadCallerSlot_def getThreadReplySlot_def + locateSlot_conv ct_in_state'_def cur_tcb'_def) + + apply ((wp assert_inv threadSet_pred_tcb_at_state + cteInsert_obj_at'_not_queued + | wps)+)[1] + + apply (wp fastpathBestSwitchCandidate_lift[where f="cteInsert c w w'" for c w w']) + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] - apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] - apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] - apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] - apply (wp fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t]) - apply simp - apply ((wp assert_inv threadSet_pred_tcb_at_state - cteInsert_obj_at'_not_queued - | wps)+)[1] - apply (simp add: setSchedulerAction_def) - apply wp[1] - apply (simp cong: if_cong HOL.conj_cong add: if_bool_simps) - apply (simp_all only:)[5] - apply ((wp setThreadState_oa_queued[of _ "\a _ _. \ a"] - setThreadState_obj_at_unchanged - asUser_obj_at_unchanged mapM_x_wp' - sts_st_tcb_at'_cases - setThreadState_no_sch_change - setEndpoint_obj_at_tcb' - fastpathBestSwitchCandidate_lift[where f="setThreadState f t" for f t] - setThreadState_oa_queued - fastpathBestSwitchCandidate_lift[where f="asUser t f" for f t] - fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] - lookupBitmapPriority_lift - setThreadState_runnable_bitmap_inv - | simp add: setMessageInfo_def - | wp (once) hoare_vcg_disj_lift)+) - + apply (wpsimp wp: fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t]) + apply ((wp assert_inv threadSet_pred_tcb_at_state + cteInsert_obj_at'_not_queued + | wps)+)[1] + apply (simp add: setSchedulerAction_def) + apply wp[1] + apply (simp cong: if_cong HOL.conj_cong add: if_bool_simps) + apply (simp_all only:)[5] + apply ((wp asUser_obj_at_unchanged mapM_x_wp' + sts_st_tcb_at'_cases + setThreadState_no_sch_change + setEndpoint_obj_at_tcb' + fastpathBestSwitchCandidate_lift[where f="setThreadState f t" for f t] + fastpathBestSwitchCandidate_lift[where f="asUser t f" for f t] + fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] + lookupBitmapPriority_lift + setThreadState_runnable_bitmap_inv + getEndpoint_obj_at' + | simp add: setMessageInfo_def obj_at'_conj + | wp (once) hoare_vcg_disj_lift)+) apply (simp add: setThreadState_runnable_simp getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv bind_assoc) - apply (rule_tac P="\v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (hd (epQueue send_ep))" - in monadic_rewrite_exists_v) - apply (rename_tac ipcBuffer) - apply (rule_tac P="\v. obj_at' (\tcb. tcbState tcb = v) (hd (epQueue send_ep))" - in monadic_rewrite_exists_v) - apply (rename_tac destState) - - apply (simp add: switchToThread_def bind_assoc) - (* retrieving state or thread registers is not thread_action_isolatable, + apply (rule_tac P="\v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (hd (epQueue send_ep))" + in monadic_rewrite_exists_v) + apply (rename_tac ipcBuffer) + + apply (rule_tac P="\v. obj_at' (\tcb. tcbState tcb = v) (hd (epQueue send_ep))" + in monadic_rewrite_exists_v) + apply (rename_tac destState) + + apply (simp add: ARM_H.switchToThread_def bind_assoc) + (* retrieving state or thread registers is not thread_action_isolatable, translate into return with suitable precondition *) - apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule_tac v=destState in monadic_rewrite_getThreadState - | rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ - apply (rule_tac v=destState in monadic_rewrite_getThreadState - | rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ - - apply (rule_tac P="inj (case_bool thread (hd (epQueue send_ep)))" - in monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) - apply (rule isolate_thread_actions_rewrite_bind - fastpath_isolate_rewrites fastpath_isolatables - bool.simps setRegister_simple - setVMRoot_isolatable[THEN thread_actions_isolatableD] setVMRoot_isolatable - doMachineOp_isolatable[THEN thread_actions_isolatableD] doMachineOp_isolatable - kernelExitAssertions_isolatable[THEN thread_actions_isolatableD] - kernelExitAssertions_isolatable - zipWithM_setRegister_simple - thread_actions_isolatable_bind + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + apply (rule_tac v=destState in monadic_rewrite_getThreadState + | rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ + apply (rule_tac v=destState in monadic_rewrite_getThreadState + | rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ + + apply (rule_tac P="inj (case_bool thread (hd (epQueue send_ep)))" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) + apply (rule isolate_thread_actions_rewrite_bind + fastpath_isolate_rewrites fastpath_isolatables + bool.simps setRegister_simple_modify_registers + zipWithM_setRegister_simple_modify_registers + setVMRoot_isolatable[THEN thread_actions_isolatableD] setVMRoot_isolatable + doMachineOp_isolatable[THEN thread_actions_isolatableD] doMachineOp_isolatable + kernelExitAssertions_isolatable[THEN thread_actions_isolatableD] + kernelExitAssertions_isolatable + thread_actions_isolatable_bind | assumption | wp assert_inv)+ - apply (rule_tac P="\s. ksSchedulerAction s = ResumeCurrentThread - \ tcb_at' thread s" - and F=True and E=False in monadic_rewrite_weaken) - apply simp - apply (rule monadic_rewrite_isolate_final) - apply (simp add: isRight_case_sum cong: list.case_cong) - apply (clarsimp simp: fun_eq_iff if_flip - cong: if_cong) - apply (drule obj_at_ko_at', clarsimp) - apply (frule get_tcb_state_regs_ko_at') - apply (clarsimp simp: zip_map2 zip_same_conv_map foldl_map - foldl_fun_upd - foldr_copy_register_tsrs - isRight_case_sum - cong: if_cong) - apply (simp add: upto_enum_def fromEnum_def - enum_register toEnum_def - msgRegisters_unfold - cong: if_cong) - apply (clarsimp split: if_split) - apply (rule ext) - apply (simp add: badgeRegister_def msgInfoRegister_def - ARM.badgeRegister_def - ARM.msgInfoRegister_def - split: if_split) - apply simp - apply (wp | simp cong: if_cong bool.case_cong - | rule getCTE_wp' gts_wp' threadGet_wp - getEndpoint_wp)+ + apply (rule_tac P="\s. ksSchedulerAction s = ResumeCurrentThread + \ tcb_at' thread s" + and F=True and E=False in monadic_rewrite_weaken_flags) + apply simp + apply (rule monadic_rewrite_isolate_final) + apply (simp add: isRight_case_sum cong: list.case_cong) + apply (clarsimp simp: fun_eq_iff if_flip + cong: if_cong) + apply (drule obj_at_ko_at', clarsimp) + apply (frule get_tcb_state_regs_ko_at') + apply (clarsimp simp: zip_map2 zip_same_conv_map foldl_map + foldl_fun_upd + foldr_copy_register_tsrs + isRight_case_sum + cong: if_cong) + apply (simp add: upto_enum_def fromEnum_def + enum_register toEnum_def + msgRegisters_unfold + cong: if_cong) + apply (clarsimp split: if_split) + apply (rule ext) + apply (simp add: badgeRegister_def msgInfoRegister_def + ARM.badgeRegister_def + ARM.msgInfoRegister_def + split: if_split) + apply simp + apply (wp | simp cong: if_cong bool.case_cong + | rule getCTE_wp' gts_wp' threadGet_wp + getEndpoint_wp)+ apply (rule validE_cases_valid) apply (simp add: isRight_def getSlotCap_def) apply (wp getCTE_wp') @@ -823,16 +811,17 @@ lemma fastpath_callKernel_SysCall_corres: prefer 2 apply normalise_obj_at' apply clarsimp - apply (frule_tac t="blockedThread" in valid_queues_not_runnable_not_queued, assumption) - subgoal by (fastforce simp: st_tcb_at'_def elim: obj_at'_weakenE) apply (subgoal_tac "fastpathBestSwitchCandidate blockedThread s") prefer 2 apply (rule_tac ttcb=tcbb and ctcb=tcb in fastpathBestSwitchCandidateI) apply (solves \simp only: disj_ac\) apply simp+ - apply (clarsimp simp: st_tcb_at'_def obj_at'_def objBits_simps projectKOs - valid_mdb'_def valid_mdb_ctes_def inj_case_bool - split: bool.split)+ + apply (clarsimp simp: st_tcb_at'_def obj_at'_def objBits_simps projectKOs valid_mdb'_def + valid_mdb_ctes_def inj_case_bool + split: bool.split)+ + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x=blockedThread in spec) + apply (clarsimp simp: obj_at'_def projectKOs st_tcb_at'_def objBits_simps) done lemma capability_case_Null_ReplyCap: @@ -868,11 +857,9 @@ lemma doReplyTransfer_simple: od)" apply (simp add: doReplyTransfer_def liftM_def nullPointer_def getSlotCap_def) apply (rule monadic_rewrite_bind_tail)+ - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_threadGet)+) - apply (rule_tac P="rv = None" in monadic_rewrite_gen_asm, simp) + apply (monadic_rewrite_symb_exec_l_known None, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const gts_wp' getCTE_wp')+ - apply (simp add: o_def) + apply (wpsimp wp: threadGet_const gts_wp' getCTE_wp' simp: o_def)+ done lemma receiveIPC_simple_rewrite: @@ -884,44 +871,39 @@ lemma receiveIPC_simple_rewrite: setThreadState (BlockedOnReceive (capEPPtr ep_cap) (capEPCanGrant ep_cap)) thread; setEndpoint (capEPPtr ep_cap) (RecvEP (case ep of RecvEP q \ (q @ [thread]) | _ \ [thread])) od)" + supply empty_fail_getEndpoint[wp] apply (rule monadic_rewrite_gen_asm) apply (simp add: receiveIPC_def) - apply (rule monadic_rewrite_imp) - apply (rule_tac rv=ep in monadic_rewrite_symb_exec_l_known, - (wp empty_fail_getEndpoint)+) - apply (rule monadic_rewrite_symb_exec_l, (wp | simp add: getBoundNotification_def)+) - apply (rule monadic_rewrite_symb_exec_l) - apply (rule hoare_pre, wpc, wp+, simp) - apply (simp split: option.split) - apply (rule monadic_rewrite_trans, rule monadic_rewrite_if_known[where X=False], simp) - apply (rule monadic_rewrite_refl3[where P=\]) - apply (cases ep, simp_all add: isSendEP_def)[1] - apply (wp getNotification_wp gbn_wp' getEndpoint_wp | wpc)+ + apply (monadic_rewrite_symb_exec_l_known ep) + apply monadic_rewrite_symb_exec_l+ + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + apply (rule monadic_rewrite_is_refl) + apply (cases ep; simp add: isSendEP_def) + apply (wpsimp wp: getNotification_wp gbn_wp' getEndpoint_wp + simp: getBoundNotification_def)+ apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) done lemma empty_fail_isFinalCapability: "empty_fail (isFinalCapability cte)" - by (simp add: isFinalCapability_def Let_def split: if_split) + by (simp add: isFinalCapability_def Let_def empty_fail_cond split: if_split) lemma cteDeleteOne_replycap_rewrite: "monadic_rewrite True False (cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot) (cteDeleteOne slot) (emptySlot slot NullCap)" + supply isFinalCapability_inv[wp] empty_fail_isFinalCapability[wp] (* FIXME *) apply (simp add: cteDeleteOne_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) - apply (rule_tac P="cteCap rv \ NullCap \ isReplyCap (cteCap rv) - \ \ isEndpointCap (cteCap rv) - \ \ isNotificationCap (cteCap rv)" - in monadic_rewrite_gen_asm) - apply (simp add: finaliseCapTrue_standin_def - capRemovable_def) - apply (rule monadic_rewrite_symb_exec_l, - (wp isFinalCapability_inv empty_fail_isFinalCapability)+) - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp')+ + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="cteCap cte \ NullCap \ isReplyCap (cteCap cte) + \ \ isEndpointCap (cteCap cte) + \ \ isNotificationCap (cteCap cte)" + in monadic_rewrite_gen_asm) + apply (simp add: finaliseCapTrue_standin_def capRemovable_def) + apply monadic_rewrite_symb_exec_l + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp')+ apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) done @@ -930,14 +912,10 @@ lemma cteDeleteOne_nullcap_rewrite: (cte_wp_at' (\cte. cteCap cte = NullCap) slot) (cteDeleteOne slot) (return ())" - apply (simp add: cteDeleteOne_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) - apply (rule_tac P="cteCap rv = NullCap" in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp') - apply (clarsimp simp: cte_wp_at_ctes_of) + apply (simp add: cteDeleteOne_def unless_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: getCTE_wp'\) + apply (monadic_rewrite_symb_exec_l, rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ done lemma deleteCallerCap_nullcap_rewrite: @@ -947,12 +925,9 @@ lemma deleteCallerCap_nullcap_rewrite: (return ())" apply (simp add: deleteCallerCap_def getThreadCallerSlot_def locateSlot_conv getSlotCap_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) - apply (rule monadic_rewrite_assert) - apply (rule cteDeleteOne_nullcap_rewrite) - apply (wp getCTE_wp) - apply (clarsimp simp: cte_wp_at_ctes_of) + apply (monadic_rewrite_l cteDeleteOne_nullcap_rewrite \wpsimp wp: getCTE_wp\) + apply (monadic_rewrite_symb_exec_l+, rule monadic_rewrite_refl) + apply (wpsimp simp: cte_wp_at_ctes_of)+ done lemma emptySlot_cnode_caps: @@ -963,7 +938,7 @@ lemma emptySlot_cnode_caps: o_assoc[symmetric] cteCaps_of_def[symmetric]) apply (wp emptySlot_cteCaps_of) apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of - elim!: rsubst[where P=P] intro!: ext + elim!: rsubst[where P=P] del: ext intro!: ext split: if_split) done @@ -993,36 +968,26 @@ lemma setCTE_obj_at_ntfn[wp]: crunch obj_at_ep[wp]: emptySlot "obj_at' (P :: endpoint \ bool) p" -crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" - crunches emptySlot, asUser for gsCNodes[wp]: "\s. P (gsCNodes s)" (wp: crunch_wps) -crunch cte_wp_at'[wp]: possibleSwitchTo "cte_wp_at' P p" - (wp: hoare_drop_imps) - crunch tcbContext[wp]: possibleSwitchTo "obj_at' (\tcb. P ( (atcbContextGet o tcbArch) tcb)) t" (wp: crunch_wps simp_del: comp_apply) crunch only_cnode_caps[wp]: doFaultTransfer "\s. P (only_cnode_caps (ctes_of s))" (wp: crunch_wps simp: crunch_simps) -lemma tcbSchedDequeue_rewrite_not_queued: "monadic_rewrite True False (tcb_at' t and obj_at' (Not \ tcbQueued) t) (tcbSchedDequeue t) (return ())" +(* FIXME: monadic_rewrite_l does not work with stateAssert here *) +lemma tcbSchedDequeue_rewrite_not_queued: + "monadic_rewrite True False (tcb_at' t and obj_at' (Not \ tcbQueued) t) + (tcbSchedDequeue t) (return ())" apply (simp add: tcbSchedDequeue_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ queued" in monadic_rewrite_gen_asm) - apply (simp add: when_def) + apply wp_pre + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const) - - apply (rule monadic_rewrite_symb_exec_l) - apply wp+ - apply (rule monadic_rewrite_refl) - apply (wp) - apply (clarsimp simp: o_def obj_at'_def) + apply (wpsimp wp: threadGet_const)+ done lemma schedule_known_rewrite: @@ -1041,60 +1006,31 @@ lemma schedule_known_rewrite: supply subst_all[simp del] if_split[split del] apply (simp add: schedule_def) apply (simp only: Thread_H.switchToThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="action = SwitchToThread t" in monadic_rewrite_gen_asm, simp) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ wasRunnable \ action = SwitchToThread t" in monadic_rewrite_gen_asm,simp) - apply (rule monadic_rewrite_bind_tail, rename_tac idleThread) - apply (rule monadic_rewrite_bind_tail, rename_tac targetPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac curPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac fastfail) - apply (rule monadic_rewrite_bind_tail, rename_tac curDom) - apply (rule monadic_rewrite_bind_tail, rename_tac highest) - apply (rule_tac P="\ (fastfail \ \ highest)" in monadic_rewrite_gen_asm, simp only:) - apply simp - apply (simp add: bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_trans) - apply (rule tcbSchedDequeue_rewrite_not_queued) - apply (rule monadic_rewrite_refl) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_refl) - apply (wpsimp wp: Arch_switchToThread_obj_at_pre)+ - apply (wp hoare_vcg_imp_lift)+ - apply (simp add: isHighestPrio_def') - apply wp+ - apply (wp hoare_vcg_disj_lift) - apply (wp scheduleSwitchThreadFastfail_False_wp) - apply wp+ - apply (wp hoare_vcg_disj_lift threadGet_wp'') - apply (wp hoare_vcg_disj_lift threadGet_wp'') - apply clarsimp - apply wp - apply (simp add: comp_def) - apply wp - apply wp - apply wp - (* remove no-ops, somewhat by magic *) - apply (rule monadic_rewrite_symb_exec_l'_TT, solves wp, - wpsimp wp: empty_fail_isRunnable simp: isHighestPrio_def')+ - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_l) - apply simp+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (clarsimp simp: ct_in_state'_def) - apply (rule conjI) - apply (rule not_pred_tcb_at'_strengthen, assumption) - apply normalise_obj_at' - apply (simp add: fastpathBestSwitchCandidate_def) + (* switching to t *) + apply (monadic_rewrite_l sched_act_SwitchToThread_rewrite[where t=t]) + (* not wasRunnable, skip enqueue *) + apply (simp add: when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + (* fastpath: \ (fastfail \ \ highest) *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* fastpath: no scheduleChooseNewThread *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + apply (simp add: bind_assoc) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite_not_queued + \wpsimp wp: Arch_switchToThread_obj_at_pre\) + (* remove no-ops *) + apply simp + apply (repeat 13 \rule monadic_rewrite_symb_exec_l\) (* until switchToThread *) + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: isHighestPrio_def')+ + apply (clarsimp simp: ct_in_state'_def not_pred_tcb_at'_strengthen + fastpathBestSwitchCandidate_def) apply normalise_obj_at' done @@ -1119,7 +1055,6 @@ lemma emptySlot_cte_wp_at_cteCap: lemma setEndpoint_getCTE_pivot[unfolded K_bind_def]: "do setEndpoint p val; v <- getCTE slot; f v od = do v <- getCTE slot; setEndpoint p val; f v od" - supply word_neq_0_conv[simp del] apply (simp add: getCTE_assert_opt setEndpoint_def setObject_modify_assert fun_eq_iff bind_assoc) @@ -1134,7 +1069,7 @@ lemma setEndpoint_setCTE_pivot[unfolded K_bind_def]: supply if_split[split del] apply (rule monadic_rewrite_to_eq) apply simp - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans, rule_tac f="ep_at' p" in monadic_rewrite_add_gets) apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets, @@ -1159,11 +1094,12 @@ lemma setEndpoint_setCTE_pivot[unfolded K_bind_def]: | simp)+ apply (rule_tac P="\s. epat = ep_at' p s \ cteat = real_cte_at' slot s \ tcbat = (tcb_at' (slot && ~~ mask 9) and (%y. slot && mask 9 : dom tcb_cte_cases)) s" - in monadic_rewrite_refl3) + in monadic_rewrite_pre_imp_eq) apply (simp add: setEndpoint_def setObject_modify_assert bind_assoc exec_gets assert_def exec_modify split: if_split) apply (auto split: if_split simp: obj_at'_def projectKOs objBits_defs + del: ext intro!: arg_cong[where f=f] ext kernel_state.fold_congs)[1] apply wp+ apply (simp add: objBits_defs) @@ -1222,23 +1158,18 @@ lemma emptySlot_setEndpoint_pivot[unfolded K_bind_def]: case_Null_If Retype_H.postCapDeletion_def setEndpoint_clearUntypedFreeIndex_pivot split: if_split - | rule bind_apply_cong[OF refl])+ + | rule bind_apply_cong[OF refl])+ done lemma set_getCTE[unfolded K_bind_def]: "do setCTE p cte; v <- getCTE p; f v od = do setCTE p cte; f cte od" - apply simp + apply (simp add: getCTE_assert_opt bind_assoc) apply (rule monadic_rewrite_to_eq) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_bind_tail) - apply (simp add: getCTE_assert_opt bind_assoc) - apply (rule monadic_rewrite_trans, - rule_tac rv="Some cte" in monadic_rewrite_gets_known) - apply (simp add: assert_opt_def) - apply (rule monadic_rewrite_refl) - apply wp - apply simp + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known cte, rule monadic_rewrite_refl) + apply (wpsimp simp: assert_opt_def wp: gets_wp)+ done lemma set_setCTE[unfolded K_bind_def]: @@ -1246,7 +1177,7 @@ lemma set_setCTE[unfolded K_bind_def]: supply if_split[split del] apply simp apply (rule monadic_rewrite_to_eq) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans, rule_tac f="real_cte_at' p" in monadic_rewrite_add_gets) apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets, @@ -1272,9 +1203,10 @@ lemma set_setCTE[unfolded K_bind_def]: (\ getF setF. tcb_cte_cases (p && mask 9) = Some (getF, setF) \ (\ f g tcb. setF f (setF g tcb) = setF (f o g) tcb)))" in monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_refl2) + apply (rule monadic_rewrite_is_refl[OF ext]) apply (simp add: exec_modify split: if_split) apply (auto simp: simpler_modify_def projectKO_opt_tcb objBits_defs + del: ext intro!: kernel_state.fold_congs ext split: if_split)[1] apply wp+ @@ -1285,7 +1217,7 @@ lemma set_setCTE[unfolded K_bind_def]: lemma setCTE_updateCapMDB: "p \ 0 \ setCTE p cte = do updateCap p (cteCap cte); updateMDB p (const (cteMDBNode cte)) od" - supply if_split[split del] word_neq_0_conv[simp del] + supply if_split[split del] apply (simp add: updateCap_def updateMDB_def bind_assoc set_getCTE cte_overwrite set_setCTE) apply (simp add: getCTE_assert_opt setCTE_assert_modify bind_assoc) @@ -1304,13 +1236,9 @@ lemma clearUntypedFreeIndex_simple_rewrite: apply (simp add: clearUntypedFreeIndex_def getSlotCap_def) apply (rule monadic_rewrite_name_pre) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (rule monadic_rewrite_imp) - apply (rule_tac rv=cte in monadic_rewrite_symb_exec_l_known, wp+) - apply (simp split: capability.split, - strengthen monadic_rewrite_refl, simp) - apply clarsimp - apply (wp getCTE_wp') - apply (clarsimp simp: cte_wp_at_ctes_of) + apply (monadic_rewrite_symb_exec_l_known cte) + apply (simp split: capability.split, strengthen monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ done lemma emptySlot_replymaster_rewrite[OF refl]: @@ -1329,57 +1257,48 @@ lemma emptySlot_replymaster_rewrite[OF refl]: o mdbRevocable_update (K True)); setCTE slot makeObject od)" - supply if_split[split del] word_neq_0_conv[simp del] + supply if_split[split del] apply (rule monadic_rewrite_gen_asm)+ - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule_tac P="slot \ 0" in monadic_rewrite_gen_asm) apply (clarsimp simp: emptySlot_def setCTE_updateCapMDB) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_head) - apply (rule clearUntypedFreeIndex_simple_rewrite) - apply simp - apply (rule_tac rv=cte in monadic_rewrite_symb_exec_l_known, (wp empty_fail_getCTE)+) + apply (monadic_rewrite_l clearUntypedFreeIndex_simple_rewrite, simp) + apply (monadic_rewrite_symb_exec_l_known cte) apply (simp add: updateMDB_def Let_def bind_assoc makeObject_cte case_Null_If) apply (rule monadic_rewrite_bind_tail) apply (rule monadic_rewrite_bind) apply (rule_tac P="mdbFirstBadged (cteMDBNode ctea) \ mdbRevocable (cteMDBNode ctea)" - in monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_refl2) + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_is_refl) apply (case_tac ctea, rename_tac mdbnode, case_tac mdbnode) apply simp apply (simp add: Retype_H.postCapDeletion_def) apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp')+ + apply (solves wp | wp getCTE_wp')+ apply (clarsimp simp: cte_wp_at_ctes_of reply_masters_rvk_fb_def) apply (fastforce simp: isCap_simps) done lemma all_prio_not_inQ_not_tcbQueued: "\ obj_at' (\a. (\d p. \ inQ d p a)) t s \ \ obj_at' (\a. \ tcbQueued a) t s" apply (clarsimp simp: obj_at'_def inQ_def) -done + done crunches setThreadState, emptySlot, asUser for ntfn_obj_at[wp]: "obj_at' (P::(Structures_H.notification \ bool)) ntfnptr" (wp: obj_at_setObject2 crunch_wps simp: crunch_simps updateObject_default_def in_monad) -lemma st_tcb_at_is_Reply_imp_not_tcbQueued: "\s t.\ invs' s; st_tcb_at' isReply t s\ \ obj_at' (\a. \ tcbQueued a) t s" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def st_tcb_at'_def valid_queues_no_bitmap_def) - apply (rule all_prio_not_inQ_not_tcbQueued) - apply (clarsimp simp: obj_at'_def) - apply (erule_tac x="d" in allE) - apply (erule_tac x="p" in allE) - apply (erule conjE) - apply (erule_tac x="t" in ballE) - apply (clarsimp simp: obj_at'_def runnable'_def isReply_def) - apply (case_tac "tcbState obj") - apply ((clarsimp simp: inQ_def)+)[8] - apply (clarsimp simp: valid_queues'_def obj_at'_def) -done +lemma st_tcb_at_is_Reply_imp_not_tcbQueued: + "\s t. \ ready_qs_runnable s; st_tcb_at' isReply t s\ \ obj_at' (\tcb. \ tcbQueued tcb) t s" + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x=t in spec) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def isReply_def) + apply (case_tac "tcbState obj"; clarsimp) + done lemma valid_objs_ntfn_at_tcbBoundNotification: "ko_at' tcb t s \ valid_objs' s \ tcbBoundNotification tcb \ None - \ ntfn_at' (the (tcbBoundNotification tcb)) s" + \ ntfn_at' (the (tcbBoundNotification tcb)) s" apply (drule(1) ko_at_valid_objs', simp add: projectKOs) apply (simp add: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def) apply clarsimp @@ -1403,7 +1322,7 @@ lemma resolveAddressBitsFn_eq_name_slot: \ valid_objs' s \ cnode_caps_gsCNodes' s) (resolveAddressBits cap capptr bits) (gets (resolveAddressBitsFn cap capptr bits o only_cnode_caps o ctes_of))" - apply (rule monadic_rewrite_imp, rule resolveAddressBitsFn_eq) + apply (rule monadic_rewrite_guard_imp, rule resolveAddressBitsFn_eq) apply auto done @@ -1430,7 +1349,7 @@ lemma tcbSchedEnqueue_tcbIPCBuffer: "\obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\ tcbSchedEnqueue t' \\_. obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\" - apply (simp add: tcbSchedEnqueue_def unless_when) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_when) apply (wp threadSet_obj_at' hoare_drop_imps threadGet_wp |simp split: if_split)+ done @@ -1452,19 +1371,33 @@ end crunch obj_at'_tcbIPCBuffer[wp]: emptySlot "obj_at' (\tcb. P (tcbIPCBuffer tcb)) t" (wp: crunch_wps) +(* FIXME move *) +crunches getBoundNotification + for (no_fail) no_fail[intro!, wp, simp] + +lemma threadSet_tcb_at'[wp]: + "threadSet f t' \\s. P (tcb_at' addr s)\" + apply (wpsimp wp: threadSet_wp) + apply (erule rsubst[where P=P]) + by (clarsimp simp: obj_at'_def projectKOs ps_clear_upd objBits_simps) + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for tcb''[wp]: "\s. P (tcb_at' addr s)" + (wp: crunch_wps) + lemma fastpath_callKernel_SysReplyRecv_corres: "monadic_rewrite True False (invs' and ct_in_state' ((=) Running) and (\s. ksSchedulerAction s = ResumeCurrentThread) - and cnode_caps_gsCNodes') + and cnode_caps_gsCNodes' and ready_qs_runnable) (callKernel (SyscallEvent SysReplyRecv)) (fastpaths SysReplyRecv)" - including no_pre - supply option.case_cong_weak[cong del] - supply if_cong[cong] - supply word_neq_0_conv[simp del] + including classic_wp_pre + supply if_cong[cong] option.case_cong[cong] supply if_split[split del] - apply (rule monadic_rewrite_introduce_alternative) - apply (simp add: callKernel_def) - apply (rule monadic_rewrite_imp) + supply user_getreg_inv[wp] (* FIXME *) + apply (rule monadic_rewrite_introduce_alternative[OF callKernel_def[simplified atomize_eq]]) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_bind_alternative_l, wpsimp) + apply (rule monadic_rewrite_stateAssert) apply (simp add: handleEvent_def handleReply_def handleRecv_def liftE_bindE_handle liftE_handle bind_assoc getMessageInfo_def liftE_bind) @@ -1475,17 +1408,16 @@ lemma fastpath_callKernel_SysReplyRecv_corres: locateSlot_conv capability_case_Null_ReplyCap getThreadCSpaceRoot_def cong: if_cong) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac thread msgInfo) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac cptr) - apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet]) + apply monadic_rewrite_symb_exec_r + apply (rename_tac msgInfo) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r apply (rename_tac tcbFault) - apply (rule monadic_rewrite_alternative_rhs[rotated]) + apply (rule monadic_rewrite_alternative_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: lookupCap_def liftME_def lookupCapAndSlot_def lookupSlotForThread_def bindE_assoc @@ -1494,18 +1426,15 @@ lemma fastpath_callKernel_SysReplyRecv_corres: capFaultOnFailure_def rethrowFailure_injection injection_handler_catch bind_bindE_assoc getThreadCallerSlot_def bind_assoc - getSlotCap_def - case_bool_If o_def + getSlotCap_def case_bool_If isRight_def[where x="Inr v" for v] isRight_def[where x="Inl v" for v] cong: if_cong) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac "cTableCTE") - apply (rule monadic_rewrite_transverse, - rule monadic_rewrite_bind_head, - rule resolveAddressBitsFn_eq) - apply (rule monadic_rewrite_symb_exec_r, (wp | simp)+) + monadic_rewrite_l resolveAddressBitsFn_eq wpsimp, rule monadic_rewrite_refl) + apply monadic_rewrite_symb_exec_r apply (rename_tac "rab_ret") apply (rule_tac P="isRight rab_ret" in monadic_rewrite_cases[rotated]) @@ -1514,75 +1443,67 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply clarsimp apply (simp add: isRight_case_sum liftE_bind isRight_def[where x="Inr v" for v]) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac ep_cap) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF _ _ _ active_ntfn_check_wp, unfolded bind_assoc fun_app_def]) - apply (rule hoare_pre, (wp | wpc | simp)+)[1] - apply (unfold getBoundNotification_def)[1] - apply (wp threadGet_wp) + apply (monadic_rewrite_symb_exec + \rule monadic_rewrite_symb_exec_r_nE[OF _ _ _ active_ntfn_check_wp, unfolded bind_assoc fun_app_def]\ + \wpsimp simp: getBoundNotification_def wp: threadGet_wp\) apply (rename_tac ep) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac ep) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) apply (rename_tac replyCTE) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: bind_assoc) - apply (rule monadic_rewrite_rdonly_bind_l, wp assert_inv) + apply (rule monadic_rewrite_bind_alternative_l, wp assert_inv) apply (rule monadic_rewrite_assert) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac callerFault) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: getThreadVSpaceRoot_def locateSlot_conv) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac vTableCTE) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF curDomain_inv], - simp only: curDomain_def, rule non_fail_gets) - apply (rename_tac "curDom") - apply (rule monadic_rewrite_symb_exec_r - [OF threadGet_inv no_fail_threadGet]) - apply (rename_tac callerPrio) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r apply (simp add: isHighestPrio_def') - apply (rule monadic_rewrite_symb_exec_r [OF gets_inv non_fail_gets]) - apply (rename_tac highest) - apply (rule monadic_rewrite_if_rhs[rotated]) - apply (rule monadic_rewrite_alternative_l) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac asidMap) - apply (rule monadic_rewrite_if_rhs[rotated]) - apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet]) - apply (rename_tac "callerDom") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) + apply (rule monadic_rewrite_alternative_l) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (rule monadic_rewrite_trans, rule monadic_rewrite_pick_alternative_1) - apply (rule_tac P="\v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (capTCBPtr (cteCap replyCTE))" - in monadic_rewrite_exists_v) - apply (rename_tac ipcBuffer) + (* now committed to fastpath *) + apply (rule_tac P="\v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (capTCBPtr (cteCap replyCTE))" + in monadic_rewrite_exists_v) + apply (rename_tac ipcBuffer) - apply (simp add: switchToThread_def bind_assoc) - apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + apply (simp add: ARM_H.switchToThread_def bind_assoc) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' getObject_inv | wpc | simp add: - | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp add: + | wp (once) hoare_drop_imps )+ - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp - | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ apply (rule monadic_rewrite_trans) apply (rule monadic_rewrite_trans) @@ -1590,46 +1511,43 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (rule monadic_rewrite_trans) apply (rule doReplyTransfer_simple) apply simp - apply (((rule monadic_rewrite_weaken2, + apply (((rule monadic_rewrite_weaken_flags', (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite | rule_tac destPrio=callerPrio and curDom=curDom and destDom=callerDom and thread=thread in possibleSwitchTo_rewrite)) | rule cteDeleteOne_replycap_rewrite | rule monadic_rewrite_bind monadic_rewrite_refl - | wp assert_inv mapM_x_wp' - setThreadState_obj_at_unchanged + | wp assert_inv mapM_x_wp' sts_valid_objs' asUser_obj_at_unchanged hoare_strengthen_post[OF _ obj_at_conj'[simplified atomize_conjL], rotated] - lookupBitmapPriority_lift - setThreadState_runnable_bitmap_inv + lookupBitmapPriority_lift + setThreadState_runnable_bitmap_inv | simp add: setMessageInfo_def setThreadState_runnable_simp | wp (once) hoare_vcg_disj_lift)+)[1] apply (simp add: setMessageInfo_def) apply (rule monadic_rewrite_bind_tail) - apply (rename_tac unblocked) - apply (rule_tac rv=thread in monadic_rewrite_symb_exec_l_known, - (wp empty_fail_getCurThread)+) - apply (rule_tac rv=cptr in monadic_rewrite_symb_exec_l_known, - (wp empty_fail_asUser empty_fail_getRegister)+) + apply (rename_tac unblocked) + apply (monadic_rewrite_symb_exec_l_known thread) + apply (monadic_rewrite_symb_exec_l_known cptr) apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_catch[OF _ monadic_rewrite_refl True_E_E]) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply (rule monadic_rewrite_catch[OF _ monadic_rewrite_refl wp_post_tautE_E]) + apply monadic_rewrite_symb_exec_l apply (rename_tac cTableCTE2, rule_tac P="cteCap cTableCTE2 = cteCap cTableCTE" - in monadic_rewrite_gen_asm) + in monadic_rewrite_gen_asm) apply simp apply (rule monadic_rewrite_trans, rule monadic_rewrite_bindE[OF _ monadic_rewrite_refl]) apply (rule_tac slot="\s. ksCurThread s + 2 ^ cte_level_bits * tcbCTableSlot" - in resolveAddressBitsFn_eq_name_slot) + in resolveAddressBitsFn_eq_name_slot) apply wp apply (rule monadic_rewrite_trans) apply (rule_tac rv=rab_ret - in monadic_rewrite_gets_known[where m="NonDetMonad.lift f" + in monadic_rewrite_gets_known[where m="Nondet_Monad.lift f" for f, folded bindE_def]) - apply (simp add: NonDetMonad.lift_def isRight_case_sum) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply (simp add: Nondet_Monad.lift_def isRight_case_sum) + apply monadic_rewrite_symb_exec_l apply (rename_tac ep_cap2) apply (rule_tac P="cteCap ep_cap2 = cteCap ep_cap" in monadic_rewrite_gen_asm) apply (simp add: cap_case_EndpointCap_NotificationCap) @@ -1641,7 +1559,7 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (wp, simp) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_weaken[where E=True and F=True], simp) + apply (rule monadic_rewrite_weaken_flags[where E=True and F=True], simp) apply (rule setThreadState_rewrite_simple) apply clarsimp apply (wp getCTE_known_cap)+ @@ -1649,7 +1567,7 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (rule_tac t="capTCBPtr (cteCap replyCTE)" and t'=thread in schedule_known_rewrite) - apply (rule monadic_rewrite_weaken[where E=True and F=True], simp) + apply (rule monadic_rewrite_weaken_flags[where E=True and F=True], simp) apply (rule monadic_rewrite_bind) apply (rule activateThread_simple_rewrite) apply (rule monadic_rewrite_refl) @@ -1659,88 +1577,76 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply ((wp setCurThread_ct_in_state[folded st_tcb_at'_def] Arch_switchToThread_pred_tcb')+)[2] apply (simp add: catch_liftE) - apply (wp setEndpoint_obj_at_tcb' threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj]) - - apply (wp setEndpoint_obj_at_tcb' - threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj] - fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] - fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t] - | simp - | rule hoare_lift_Pf2[where f=ksCurThread, OF _ setEndpoint_ct'] - hoare_lift_Pf2[where f=ksCurThread, OF _ threadSet_ct])+ - - apply (simp cong: rev_conj_cong) - apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s]) - apply (unfold setSchedulerAction_def)[3] - apply ((wp setThreadState_oa_queued user_getreg_rv setThreadState_no_sch_change - setThreadState_obj_at_unchanged - sts_st_tcb_at'_cases sts_bound_tcb_at' - emptySlot_obj_at'_not_queued - emptySlot_cte_wp_at_cteCap - emptySlot_cnode_caps - user_getreg_inv asUser_typ_ats - asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift - hoare_vcg_ex_lift - | simp del: comp_apply - | clarsimp simp: obj_at'_weakenE[OF _ TrueI])+) - - apply (rule hoare_lift_Pf2[where f=ksCurThread, OF _ setThreadState_ct']) - apply (wp setThreadState_oa_queued - fastpathBestSwitchCandidate_lift[where f="setThreadState f t" for f t]) - apply (simp add: setThreadState_runnable_simp) - apply (wp threadSet_tcbState_st_tcb_at') - apply (clarsimp simp del: comp_apply) - apply (wp emptySlot_obj_at_ep)+ - - apply ((wp setThreadState_oa_queued user_getreg_rv - setThreadState_no_sch_change - setThreadState_obj_at_unchanged - sts_st_tcb_at'_cases sts_bound_tcb_at' - emptySlot_obj_at'_not_queued - emptySlot_cte_wp_at_cteCap - emptySlot_cnode_caps - user_getreg_inv asUser_typ_ats - asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift - hoare_vcg_ex_lift - | simp del: comp_apply - | clarsimp simp: obj_at'_weakenE[OF _ TrueI] - | solves \ - rule hoare_lift_Pf2[where f=ksCurThread, OF _ emptySlot_ct] - hoare_lift_Pf2[where f=ksCurThread, OF _ asUser_ct], - wp fastpathBestSwitchCandidate_lift[where f="emptySlot a b" for a b] - fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b] - user_getreg_inv asUser_typ_ats\)+) - - apply (clarsimp | wp getCTE_wp' gts_imp')+ - - apply (simp add: switchToThread_def bind_assoc) - apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' handleFault_obj_at'_tcbIPCBuffer getObject_inv | wpc | simp - | wp (once) hoare_drop_imps )+ - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp - | wp (once) hoare_drop_imps )+ + apply ((wpsimp wp: user_getreg_rv setEndpoint_obj_at_tcb' + threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj] + fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] + fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t] + | wps)+)[3] + apply (simp cong: rev_conj_cong) + apply (wpsimp wp: setThreadState_tcbContext[simplified comp_apply] + user_getreg_rv + setThreadState_no_sch_change sts_valid_objs' + sts_st_tcb_at'_cases sts_bound_tcb_at' + fastpathBestSwitchCandidate_lift[where f="setThreadState s t" for s t] + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + | wps)+ + apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s]) + apply ((wp user_getreg_rv setThreadState_no_sch_change + sts_st_tcb_at'_cases sts_bound_tcb_at' + emptySlot_obj_at'_not_queued emptySlot_obj_at_ep + emptySlot_tcbContext[simplified comp_apply] + emptySlot_cte_wp_at_cteCap + emptySlot_cnode_caps + user_getreg_inv asUser_typ_ats + asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + fastpathBestSwitchCandidate_lift[where f="emptySlot a b" for a b] + | simp del: comp_apply + | clarsimp simp: obj_at'_weakenE[OF _ TrueI] + | wps)+) + + apply (wpsimp wp: fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b])+ + apply (clarsimp cong: conj_cong) + apply ((wp user_getreg_inv asUser_typ_ats + asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + | clarsimp simp: obj_at'_weakenE[OF _ TrueI] + | solves \ + wp fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b] + \)+) + + apply (clarsimp | wp getCTE_wp' gts_imp')+ + + apply (simp add: ARM_H.switchToThread_def bind_assoc) + apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) + + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' handleFault_obj_at'_tcbIPCBuffer getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ apply (simp add: bind_assoc catch_liftE receiveIPC_def Let_def liftM_def setThreadState_runnable_simp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getThreadState)+) + apply monadic_rewrite_symb_exec_l apply (rule monadic_rewrite_assert) apply (rule_tac P="inj (case_bool thread (capTCBPtr (cteCap replyCTE)))" in monadic_rewrite_gen_asm) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) apply (rule isolate_thread_actions_rewrite_bind fastpath_isolate_rewrites fastpath_isolatables - bool.simps setRegister_simple - zipWithM_setRegister_simple + bool.simps setRegister_simple_modify_registers + zipWithM_setRegister_simple_modify_registers thread_actions_isolatable_bind thread_actions_isolatableD[OF setCTE_isolatable] setCTE_isolatable @@ -1757,21 +1663,19 @@ lemma fastpath_callKernel_SysReplyRecv_corres: (thread + 2 ^ cte_level_bits * tcbCallerSlot) and (\s. \x. tcb_at' (case_bool thread (capTCBPtr (cteCap replyCTE)) x) s) and valid_mdb')" - and F=True and E=False in monadic_rewrite_weaken) + and F=True and E=False in monadic_rewrite_weaken_flags) apply (rule monadic_rewrite_isolate_final2) apply simp - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply monadic_rewrite_symb_exec_l apply (rename_tac callerCTE) apply (rule monadic_rewrite_assert) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply monadic_rewrite_symb_exec_l apply (rule monadic_rewrite_assert) apply (simp add: emptySlot_setEndpoint_pivot) apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_refl2) + apply (rule monadic_rewrite_is_refl) apply (clarsimp simp: isSendEP_def split: Structures_H.endpoint.split) - apply (rule_tac Q="\rv. (\_. rv = callerCTE) and Q'" for Q' - in monadic_rewrite_symb_exec_r, wp+) - apply (rule monadic_rewrite_gen_asm, simp) + apply (monadic_rewrite_symb_exec_r_known callerCTE) apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_head, rule_tac cte=callerCTE in emptySlot_replymaster_rewrite) apply (simp add: bind_assoc o_def) @@ -1808,13 +1712,13 @@ lemma fastpath_callKernel_SysReplyRecv_corres: map_to_ctes_partial_overwrite) apply (simp add: valid_mdb'_def valid_mdb_ctes_def) apply simp - apply (simp cong: if_cong bool.case_cong - | rule getCTE_wp' gts_wp' threadGet_wp - getEndpoint_wp gets_wp - user_getreg_wp - gets_the_wp gct_wp getNotification_wp - return_wp liftM_wp gbn_wp' - | (simp only: curDomain_def, wp)[1])+ + apply (simp cong: if_cong bool.case_cong + | rule getCTE_wp' gts_wp' threadGet_wp + getEndpoint_wp gets_wp + user_getreg_wp + gets_the_wp gct_wp getNotification_wp + return_wp liftM_wp gbn_wp' + | (simp only: curDomain_def, wp)[1])+ apply clarsimp apply (subgoal_tac "ksCurThread s \ ksIdleThread s") @@ -1843,6 +1747,7 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (subst tcb_at_cte_at_offset, assumption, simp add: tcb_cte_cases_def cte_level_bits_def tcbSlots) apply (clarsimp simp: inj_case_bool cte_wp_at_ctes_of + length_msgRegisters order_less_imp_le tcb_at_invs' invs_mdb' split: bool.split) @@ -1856,13 +1761,15 @@ lemma fastpath_callKernel_SysReplyRecv_corres: prefer 2 apply normalise_obj_at' apply (rule_tac ttcb=tcba and ctcb=tcb in fastpathBestSwitchCandidateI) - apply (erule disjE, blast, blast) - apply simp+ + apply (erule disjE, blast, blast) + apply simp+ apply (clarsimp simp: obj_at_tcbs_of tcbSlots cte_level_bits_def) apply (frule(1) st_tcb_at_is_Reply_imp_not_tcbQueued) - apply (auto simp: obj_at_tcbs_of tcbSlots + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (auto simp: obj_at_tcbs_of tcbSlots projectKOs cte_level_bits_def) done diff --git a/proof/crefine/ARM/Finalise_C.thy b/proof/crefine/ARM/Finalise_C.thy index d81e57dd83..3e02d3c46d 100644 --- a/proof/crefine/ARM/Finalise_C.thy +++ b/proof/crefine/ARM/Finalise_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,6 +17,108 @@ declare if_split [split del] definition "option_map2 f m = option_map f \ m" +definition ksReadyQueues_head_end_2 :: "(domain \ priority \ ready_queue) \ bool" where + "ksReadyQueues_head_end_2 qs \ + \d p. tcbQueueHead (qs (d, p)) \ None \ tcbQueueEnd (qs (d, p)) \ None" + +abbreviation "ksReadyQueues_head_end s \ ksReadyQueues_head_end_2 (ksReadyQueues s)" + +lemmas ksReadyQueues_head_end_def = ksReadyQueues_head_end_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end: + "ksReadyQueues_asrt s \ ksReadyQueues_head_end s" + by (fastforce dest: tcbQueueHead_iff_tcbQueueEnd + simp: ready_queue_relation_def ksReadyQueues_asrt_def ksReadyQueues_head_end_def) + +lemma tcbSchedEnqueue_ksReadyQueues_head_end[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: tcbQueueEmpty_def obj_at'_def ksReadyQueues_head_end_def split: if_splits) + done + +lemma ksReadyQueues_head_end_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end (s\ksSchedulerAction := ChooseNewThread\) = ksReadyQueues_head_end s" + by (simp add: ksReadyQueues_head_end_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + +lemma setThreadState_ksReadyQueues_head_end[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end\" + unfolding setThreadState_def + by (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + +definition ksReadyQueues_head_end_tcb_at'_2 :: + "(domain \ priority \ ready_queue) \ (obj_ref \ tcb) \ bool" where + "ksReadyQueues_head_end_tcb_at'_2 qs tcbs \ + \d p. (\head. tcbQueueHead (qs (d, p)) = Some head \ tcbs head \ None) + \ (\end. tcbQueueEnd (qs (d, p)) = Some end \ tcbs end \ None)" + +abbreviation "ksReadyQueues_head_end_tcb_at' s \ + ksReadyQueues_head_end_tcb_at'_2 (ksReadyQueues s) (tcbs_of' s)" + +lemmas ksReadyQueues_head_end_tcb_at'_def = ksReadyQueues_head_end_tcb_at'_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at': + "\ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ ksReadyQueues_head_end_tcb_at' s" + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def + ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI allI) + apply (case_tac "ts = []", clarsimp) + apply (fastforce dest!: heap_path_head hd_in_set + simp: opt_pred_def tcbQueueEmpty_def split: option.splits) + apply (fastforce simp: queue_end_valid_def opt_pred_def tcbQueueEmpty_def + split: option.splits) + done + +lemma tcbSchedEnqueue_ksReadyQueues_head_end_tcb_at'[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma ksReadyQueues_head_end_tcb_at'_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end_tcb_at' (s\ksSchedulerAction := ChooseNewThread\) + = ksReadyQueues_head_end_tcb_at' s" + by (simp add: ksReadyQueues_head_end_tcb_at'_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + +lemma setThreadState_ksReadyQueues_head_end_tcb_at'[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma head_end_ksReadyQueues_': + "\ (s, s') \ rf_sr; ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s; + d \ maxDomain; p \ maxPriority \ + \ head_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL + \ end_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL" + apply (frule (2) rf_sr_ctcb_queue_relation[where d=d and p=p]) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: option.splits) + apply (rename_tac "end" head end_tcb head_tcb) + apply (prop_tac "tcb_at' head s \ tcb_at' end s") + apply (fastforce intro!: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (fastforce dest: tcb_at_not_NULL) + done + lemma tcbSchedEnqueue_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. \d v. option_map2 tcbPriority_C (cslift s) \tcb = Some v \ unat v \ numPriorities @@ -27,7 +130,9 @@ lemma tcbSchedEnqueue_cslift_spec: \ None \ option_map2 tcbDomain_C (cslift s) (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) - \ None)\ + \ None) + \ (head_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL + \ end_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL)\ Call tcbSchedEnqueue_'proc {s'. option_map2 tcbEPNext_C (cslift s') = option_map2 tcbEPNext_C (cslift s) \ option_map2 tcbEPPrev_C (cslift s') = option_map2 tcbEPPrev_C (cslift s) @@ -44,8 +149,8 @@ lemma tcbSchedEnqueue_cslift_spec: apply (rule conjI) apply (clarsimp simp: typ_heap_simps cong: if_cong) apply (simp split: if_split) - apply (clarsimp simp: typ_heap_simps if_Some_helper cong: if_cong) - by (simp split: if_split) + by (auto simp: typ_heap_simps' if_Some_helper numPriorities_def + cong: if_cong split: if_splits) lemma setThreadState_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. s \\<^sub>c \tptr \ (\x. ksSchedulerAction_' (globals s) = tcb_Ptr x @@ -140,8 +245,9 @@ lemma ctcb_relation_tcbPriority_maxPriority_numPriorities: done lemma tcbSchedEnqueue_cslift_precond_discharge: - "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; - valid_queues s; valid_objs' s \ \ + "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; valid_objs' s ; + ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s\ \ (\d v. option_map2 tcbPriority_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some v \ unat v < numPriorities \ option_map2 tcbDomain_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some d @@ -152,31 +258,49 @@ lemma tcbSchedEnqueue_cslift_precond_discharge: \ None \ option_map2 tcbDomain_C (cslift s') (head_C (index (ksReadyQueues_' (globals s')) (unat (d*0x100 + v)))) - \ None))" + \ None) + \ (head_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL + \ end_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL))" apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps' option_map2_def) + apply (rename_tac tcb tcb') apply (frule_tac t=x in valid_objs'_maxPriority, fastforce simp: obj_at'_def) apply (frule_tac t=x in valid_objs'_maxDomain, fastforce simp: obj_at'_def) apply (drule_tac P="\tcb. tcbPriority tcb \ maxPriority" in obj_at_ko_at2', simp) apply (drule_tac P="\tcb. tcbDomain tcb \ maxDomain" in obj_at_ko_at2', simp) apply (simp add: ctcb_relation_tcbDomain_maxDomain_numDomains ctcb_relation_tcbPriority_maxPriority_numPriorities) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_ctcb_queue_relation) apply (simp add: maxDom_to_H maxPrio_to_H)+ + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in head_end_ksReadyQueues_', fastforce+) apply (simp add: cready_queues_index_to_C_def2 numPriorities_def le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ctcb_relation_def) - apply (frule arg_cong[where f=unat], subst(asm) unat_ucast_8_32) - apply (frule tcb_queue'_head_end_NULL) - apply (erule conjunct1[OF valid_queues_valid_q]) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (frule arg_cong[where f=unat], subst(asm) unat_ucast_up_simp, simp) + apply (frule (3) head_end_ksReadyQueues_', fastforce+) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (prop_tac "\ tcbQueueEmpty ((ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)))") + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (clarsimp simp: tcbQueueEmpty_def) + apply (rename_tac head "end" head_tcb end_tcb) + apply (prop_tac "tcb_at' head s") + apply (fastforce intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (frule_tac thread=head in obj_at_cslift_tcb) + apply fastforce + apply (clarsimp dest: obj_at_cslift_tcb simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) done lemma cancel_all_ccorres_helper: "ccorres dc xfdc - (\s. valid_objs' s \ valid_queues s + (\s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s \ (\t\set ts. tcb_at' t s \ t \ 0) \ sch_act_wf (ksSchedulerAction s) s) {s'. \p. ep_queue_relation (cslift s') ts @@ -199,8 +323,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -209,7 +332,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -232,17 +355,15 @@ next apply (erule cmap_relationE1 [OF cmap_relation_tcb]) apply (erule ko_at_projectKO_opt) apply (fastforce intro: typ_heap_simps) - apply (wp sts_running_valid_queues | simp)+ + apply (wp sts_valid_objs' | simp)+ apply (rule ceqv_refl) apply (rule "Cons.hyps") apply (wp sts_valid_objs' sts_sch_act sch_act_wf_lift hoare_vcg_const_Ball_lift - sts_running_valid_queues sts_st_tcb' setThreadState_oa_queued | simp)+ + sts_st_tcb' | simp)+ apply (vcg exspec=setThreadState_cslift_spec exspec=tcbSchedEnqueue_cslift_spec) - apply (clarsimp simp: tcb_at_not_NULL - Collect_const_mem valid_tcb_state'_def - ThreadState_Restart_def mask_def - valid_objs'_maxDomain valid_objs'_maxPriority) + apply (clarsimp simp: tcb_at_not_NULL Collect_const_mem valid_tcb_state'_def + ThreadState_defs mask_def valid_objs'_maxDomain valid_objs'_maxPriority) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (rule conjI) @@ -252,16 +373,13 @@ next st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - apply clarsimp - apply clarsimp - apply clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; clarsimp?) + apply simp apply clarsimp apply (rule conjI) apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) apply clarsimp - apply clarsimp + apply clarsimp+ apply (subst ep_queue_relation_shift, fastforce) apply (drule_tac x="tcb_ptr_to_ctcb_ptr thread" in fun_cong)+ @@ -270,17 +388,23 @@ next done qed +crunches setEndpoint, setNotification + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + and ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + (simp: updateObject_default_def) + lemma cancelAllIPC_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. epptr_' s = Ptr epptr}) [] + invs' (UNIV \ {s. epptr_' s = Ptr epptr}) [] (cancelAllIPC epptr) (Call cancelAllIPC_'proc)" apply (cinit lift: epptr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -289,8 +413,8 @@ lemma cancelAllIPC_ccorres: apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -323,29 +447,26 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear - cancelAllIPC_mapM_x_valid_queues | simp)+ apply (rule mapM_x_wp', wp)+ apply (wp sts_st_tcb') apply (clarsimp split: if_split) - apply (rule mapM_x_wp', wp)+ + apply (rule mapM_x_wp', wp sts_valid_objs')+ apply (clarsimp simp: valid_tcb_state'_def) apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -373,48 +494,48 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (clarsimp simp: valid_ep'_def invs_valid_objs' invs_queues) + apply (clarsimp simp: valid_ep'_def invs_valid_objs') apply (rule cmap_relationE1[OF cmap_relation_ep], assumption) apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) - apply (clarsimp simp: projectKOs valid_obj'_def valid_ep'_def) - subgoal by (auto simp: typ_heap_simps cendpoint_relation_def - Let_def tcb_queue_relation'_def - invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority - intro!: obj_at_conj') + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + subgoal + by (auto simp: typ_heap_simps cendpoint_relation_def + Let_def tcb_queue_relation'_def projectKOs + invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority + intro!: obj_at_conj') apply (clarsimp simp: guard_is_UNIV_def) apply (wp getEndpoint_wp) apply clarsimp done -lemma empty_fail_getNotification: - "empty_fail (getNotification ep)" - unfolding getNotification_def - by (auto intro: empty_fail_getObject) - lemma cancelAllSignals_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] + invs' (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] (cancelAllSignals ntfnptr) (Call cancelAllSignals_'proc)" apply (cinit lift: ntfnPtr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -423,18 +544,15 @@ lemma cancelAllSignals_ccorres: apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -442,8 +560,8 @@ lemma cancelAllSignals_ccorres: apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -460,13 +578,12 @@ lemma cancelAllSignals_ccorres: subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ntfn_valid_objs' hoare_vcg_const_Ball_lift @@ -476,11 +593,16 @@ lemma cancelAllSignals_ccorres: apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption) apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) - apply (clarsimp simp add: valid_obj'_def valid_ntfn'_def projectKOs) - subgoal by (auto simp: typ_heap_simps cnotification_relation_def - Let_def tcb_queue_relation'_def - invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority - intro!: obj_at_conj') + apply (clarsimp simp add: valid_obj'_def valid_ntfn'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + subgoal + by (auto simp: typ_heap_simps cnotification_relation_def + Let_def tcb_queue_relation'_def projectKOs + invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority + intro!: obj_at_conj') apply (clarsimp simp: guard_is_UNIV_def) apply (wp getNotification_wp) apply clarsimp @@ -527,16 +649,16 @@ lemma tcb_queue_relation2_concat: context kernel_m begin -lemma setThreadState_ccorres_valid_queues'_simple: - "ccorres dc xfdc (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ sch_act_simple s) +lemma setThreadState_ccorres_simple: + "ccorres dc xfdc (\s. tcb_at' thread s \ \ runnable' st \ sch_act_simple s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues'_simple) - apply (wp threadSet_valid_queues'_and_not_runnable') - apply (clarsimp simp: weak_sch_act_wf_def valid_queues'_def) + apply (wp threadSet_tcbState_st_tcb_at') + apply (fastforce simp: weak_sch_act_wf_def) done lemma updateRestartPC_ccorres: @@ -552,9 +674,7 @@ lemma updateRestartPC_ccorres: done crunches updateRestartPC - for valid_queues'[wp]: valid_queues' - and sch_act_simple[wp]: sch_act_simple - and valid_queues[wp]: Invariants_H.valid_queues + for sch_act_simple[wp]: sch_act_simple and valid_objs'[wp]: valid_objs' and tcb_at'[wp]: "tcb_at' p" @@ -594,25 +714,16 @@ lemma suspend_ccorres: apply clarsimp apply (rule iffI) apply simp - apply (erule thread_state_to_tsType.elims; simp add: StrictC'_thread_state_defs) + apply (erule thread_state_to_tsType.elims; simp add: ThreadState_defs) apply (ctac (no_vcg) add: updateRestartPC_ccorres) apply (rule ccorres_return_Skip) apply ceqv - apply (ctac(no_vcg) add: setThreadState_ccorres_valid_queues'_simple) - apply (ctac add: tcbSchedDequeue_ccorres') - apply (rule_tac Q="\_. - (\s. \t' d p. (t' \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d - \ tcbPriority tcb = p) t' s \ - (t' \ thread \ st_tcb_at' runnable' t' s)) \ - distinct (ksReadyQueues s (d, p))) and valid_queues' and valid_objs' and tcb_at' thread" - in hoare_post_imp) + apply (ctac(no_vcg) add: setThreadState_ccorres_simple) + apply (ctac add: tcbSchedDequeue_ccorres) + apply (rule_tac Q="\_. valid_objs' and tcb_at' thread and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) apply clarsimp - apply (drule_tac x="t" in spec) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp elim!: obj_at'_weakenE simp: inQ_def) - apply (wp sts_valid_queues_partial)[1] + apply (wp sts_valid_objs')[1] apply clarsimp apply (wpsimp simp: valid_tcb_state'_def) apply clarsimp @@ -621,16 +732,14 @@ lemma suspend_ccorres: apply clarsimp apply (rule conseqPre, vcg) apply (rule subset_refl) - apply (rule hoare_strengthen_post) + apply (rule hoare_strengthen_post) apply (rule hoare_vcg_conj_lift) apply (rule hoare_vcg_conj_lift) apply (rule cancelIPC_sch_act_simple) apply (rule cancelIPC_tcb_at'[where t=thread]) apply (rule delete_one_conc_fr.cancelIPC_invs) - apply (fastforce simp: invs_valid_queues' invs_queues invs_valid_objs' - valid_tcb_state'_def) - apply clarsimp - apply (auto simp: "StrictC'_thread_state_defs") + apply (fastforce simp: invs_valid_objs' valid_tcb_state'_def) + apply (auto simp: ThreadState_defs) done lemma cap_to_H_NTFNCap_tag: @@ -653,8 +762,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -673,7 +782,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal by (simp add: carch_state_relation_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) @@ -684,7 +793,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -734,7 +843,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -769,9 +878,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -788,13 +897,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -834,7 +943,7 @@ lemma finaliseCap_True_cases_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap Collect_False del: Collect_const) apply (fold case_bool_If) - apply (simp add: false_def) + apply simp apply csymbr apply wpc apply (simp add: cap_get_tag_isCap ccorres_cond_univ_iff Let_def) @@ -991,7 +1100,6 @@ lemma invalidateASIDEntry_ccorres: apply (rule order_le_less_trans, rule word_and_le1) apply (simp add: mask_def) apply (rule ccorres_return_Skip) - apply (fold dc_def) apply (ctac add: invalidateASID_ccorres) apply wp apply (simp add: guard_is_UNIV_def) @@ -1024,8 +1132,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] fun_upd_def[symmetric] - when_def + apply (clarsimp simp: liftM_def fun_upd_def[symmetric] when_def simp del: Collect_const) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_armKSASIDTable_ksArchState) @@ -1172,14 +1279,12 @@ lemma deleteASID_ccorres: apply ceqv apply csymbr apply wpc - apply (simp add: ccorres_cond_iffs dc_def[symmetric] - Collect_False + apply (simp add: ccorres_cond_iffs Collect_False del: Collect_const cong: call_ignore_cong) apply (rule ccorres_cond_false) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] when_def - Collect_True liftM_def + apply (simp add: when_def Collect_True liftM_def cong: conj_cong call_ignore_cong del: Collect_const) apply (rule ccorres_pre_getObject_asidpool) @@ -1202,8 +1307,7 @@ lemma deleteASID_ccorres: apply (simp add: asid_low_bits_def Kernel_C.asidLowBits_def mask_def word_and_le1) apply (drule sym, simp) - apply (simp add: option_to_ptr_def option_to_0_def - from_bool_def inv_ASIDPool + apply (simp add: option_to_ptr_def option_to_0_def inv_ASIDPool split: option.split if_split bool.split) apply ceqv apply (rule ccorres_cond2[where R=\]) @@ -1319,12 +1423,9 @@ lemma pageTableMapped_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cpde_relation_def Let_def return_def addrFromPPtr_def pde_pde_coarse_lift_def) - apply (rule conjI) - apply (simp add: pde_lift_def Let_def split: if_split_asm) - apply (clarsimp simp: option_to_0_def option_to_ptr_def split: if_split) - apply (clarsimp simp: ARM.addrFromPPtr_def ARM.ptrFromPAddr_def) - apply (auto simp: Let_def pde_lift_def addrFromPPtr_def ptrFromPAddr_def option_to_ptr_def - split: if_splits)[1] + apply (fastforce simp: Let_def pde_lift_def addrFromPPtr_def ptrFromPAddr_def + option_to_ptr_def + split: if_splits) apply ((rule ccorres_cond_false_seq ccorres_cond_false ccorres_return_C | simp)+)[3] apply (simp only: simp_thms) @@ -1349,8 +1450,8 @@ lemma pageTableMapped_pd: \\rv s. case rv of Some x \ page_directory_at' x s | _ \ True\" apply (simp add: pageTableMapped_def) apply (rule hoare_pre) - apply (wp getPDE_wp hoare_vcg_all_lift_R | wpc)+ - apply (rule hoare_post_imp_R, rule findPDForASID_page_directory_at'_simple) + apply (wp getPDE_wp hoare_vcg_all_liftE_R | wpc)+ + apply (rule hoare_strengthen_postE_R, rule findPDForASID_page_directory_at'_simple) apply (clarsimp split: if_split) apply simp done @@ -1364,7 +1465,7 @@ lemma unmapPageTable_ccorres: apply (ctac(no_vcg) add: pageTableMapped_ccorres) apply wpc apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -1374,7 +1475,6 @@ lemma unmapPageTable_ccorres: apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) - apply (fold dc_def) apply csymbr apply (ctac add: cleanByVA_PoU_ccorres) apply (ctac(no_vcg) add:flushTable_ccorres) @@ -1420,7 +1520,7 @@ method return_NullCap_pair_ccorres = (rule allI, rule conseqPre, vcg), (clarsimp simp: return_def ccap_relation_NullCap_iff)\ lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -1571,7 +1671,7 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_directory_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def asid_bits_def split: if_split_asm) apply simp @@ -1613,9 +1713,8 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_table_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_table_cap_lift_def - asid_bits_def - split: if_split_asm) + cap_page_table_cap_lift_def asid_bits_def + split: if_split_asm) apply simp apply return_NullCap_pair_ccorres apply (clarsimp simp: isCap_simps) @@ -1779,7 +1878,7 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_directory_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def asid_bits_def split: if_split_asm) apply (frule cap_get_tag_isCap_unfolded_H_cap) @@ -1791,12 +1890,6 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap, simp) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1817,7 +1910,7 @@ lemma isFinalCapability_ccorres: apply (simp add: mdbPrev_to_H[symmetric]) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (simp add: return_def from_bool_def false_def) + apply (simp add: return_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_l[OF _ getCTE_inv getCTE_wp empty_fail_getCTE]) apply (rule_tac P="cte_wp_at' ((=) cte) slot @@ -1856,10 +1949,9 @@ lemma isFinalCapability_ccorres: apply (rule cmap_relationE1 [OF cmap_relation_cte], assumption+, simp?, simp add: typ_heap_simps)+ apply (drule ccte_relation_ccap_relation)+ - apply (auto simp: false_def true_def from_bool_def split: bool.splits)[1] + apply (auto simp: from_bool_def split: bool.splits)[1] apply (wp getCTE_wp') - apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem false_def - from_bool_0 true_def from_bool_def) + apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -1894,7 +1986,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1905,25 +1997,24 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) apply (strengthen invs_mdb_strengthen' invs_urz) apply (wp typ_at_lifts isFinalCapability_inv | strengthen invs_valid_objs')+ - apply (clarsimp simp: from_bool_def true_def irq_opt_relation_def - invs_pspace_aligned' cte_wp_at_ctes_of) + apply (clarsimp simp: irq_opt_relation_def invs_pspace_aligned' cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (clarsimp simp: typ_heap_simps ccte_relation_ccap_relation ccap_relation_NullCap_iff) apply (wp isFinalCapability_inv) apply simp - apply (simp del: Collect_const add: false_def) + apply (simp del: Collect_const) apply (rule ccorres_return_Skip) apply (clarsimp simp: Collect_const_mem cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) @@ -1947,7 +2038,7 @@ lemma deletingIRQHandler_ccorres: (UNIV \ {s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) apply (rule ccorres_Guard_intStateIRQNode_array_Ptr) @@ -2084,7 +2175,7 @@ lemma finaliseCap_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (simp, rule ccorres_fail) - apply (simp add: from_bool_0 Collect_True Collect_False false_def + apply (simp add: from_bool_0 Collect_True Collect_False del: Collect_const) apply csymbr apply (simp add: cap_get_tag_isCap Collect_False Collect_True @@ -2168,7 +2259,7 @@ lemma finaliseCap_ccorres: apply (simp add: isArchCap_T_isArchObjectCap[symmetric] del: Collect_const) apply (rule ccorres_if_lhs) - apply (simp add: Collect_False Collect_True Let_def true_def + apply (simp add: Collect_False Collect_True Let_def del: Collect_const) apply (rule_tac P="(capIRQ cap) \ ARM.maxIRQ" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc)+ @@ -2188,18 +2279,18 @@ lemma finaliseCap_ccorres: apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -2208,8 +2299,7 @@ lemma finaliseCap_ccorres: irq_opt_relation_def) apply wp apply (simp add: guard_is_UNIV_def) - apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem - false_def from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem) apply (intro impI conjI) apply (clarsimp split: bool.splits) apply (clarsimp split: bool.splits) @@ -2225,7 +2315,7 @@ lemma finaliseCap_ccorres: split: option.splits cap_CL.splits if_splits) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) - apply (clarsimp simp: isCap_simps from_bool_def false_def) + apply (clarsimp simp: isCap_simps) apply (clarsimp simp: tcb_cnode_index_defs ptr_add_assertion_def) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) diff --git a/proof/crefine/ARM/Interrupt_C.thy b/proof/crefine/ARM/Interrupt_C.thy index 195f6bec20..1295d8af50 100644 --- a/proof/crefine/ARM/Interrupt_C.thy +++ b/proof/crefine/ARM/Interrupt_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,7 +17,7 @@ lemma invokeIRQHandler_AckIRQ_ccorres: (InterruptDecls_H.invokeIRQHandler (AckIRQ irq)) (Call invokeIRQHandler_AckIRQ_'proc)" apply (cinit lift: irq_' simp: Interrupt_H.invokeIRQHandler_def invokeIRQHandler_def) apply (ctac add: maskInterrupt_ccorres) - apply (simp add: from_bool_def false_def) + apply simp done lemma getIRQSlot_ccorres: @@ -73,11 +74,11 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply (simp add: pred_conj_def)+ - apply (strengthen ntfn_badge_derived_enough_strg[unfolded o_def] + apply (strengthen ntfn_badge_derived_enough_strg invs_mdb_strengthen' valid_objs_invs'_strg) - apply (wp cteDeleteOne_other_cap[unfolded o_def])[1] + apply (wp cteDeleteOne_other_cap[unfolded o_def]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -107,7 +108,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply simp apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -220,7 +221,7 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (clarsimp simp: Collect_const_mem neq_Nil_conv dest!: interpret_excaps_eq) apply (simp add: rf_sr_ksCurThread if_1_0_0 mask_def[where n=4] - "StrictC'_thread_state_defs" cap_get_tag_isCap excaps_map_def + ThreadState_defs cap_get_tag_isCap excaps_map_def word_sless_def word_sle_def) apply (simp add: invocationCatch_def throwError_bind interpret_excaps_test_null Collect_True @@ -248,24 +249,23 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (simp add: syscall_error_to_H_cases) apply simp apply (clarsimp simp: Collect_const_mem tcb_at_invs') - apply (clarsimp simp: invs_queues invs_valid_objs' + apply (clarsimp simp: invs_valid_objs' ct_in_state'_def ccap_rights_relation_def - mask_def[where n=4] - "StrictC'_thread_state_defs") + mask_def[where n=4] ThreadState_defs) apply (subst pred_tcb'_weakenE, assumption, fastforce)+ apply (clarsimp simp: rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_n_def word_less_nat_alt) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits) apply (intro conjI impI allI) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits)+ - apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[4] + apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[6] apply (drule ctes_of_valid') apply fastforce apply (clarsimp simp add:valid_cap_simps' ARM.maxIRQ_def) @@ -381,8 +381,7 @@ lemma isIRQActive_ccorres: Let_def cinterrupt_relation_def) apply (drule spec, drule(1) mp) apply (case_tac "intStateIRQTable (ksInterruptState \) irq") - apply (simp add: from_bool_def irq_state_defs Kernel_C.maxIRQ_def - word_le_nat_alt)+ + apply (simp add: irq_state_defs Kernel_C.maxIRQ_def word_le_nat_alt)+ done lemma Platform_maxIRQ: @@ -586,8 +585,7 @@ lemma Arch_decodeIRQControlInvocation_ccorres: apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: interpret_excaps_test_null excaps_map_def - Collect_const_mem word_sless_def word_sle_def - ThreadState_Restart_def unat_of_nat mask_def) + Collect_const_mem word_sless_def word_sle_def unat_of_nat mask_def) apply (rule conjI) apply (simp add: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def unat_ucast) apply (cut_tac unat_lt2p[where x="args ! 3"]) @@ -602,14 +600,14 @@ lemma Arch_decodeIRQControlInvocation_ccorres: dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] apply (clarsimp simp: neq_Nil_conv numeral_eqs[symmetric] word_sle_def word_sless_def) apply (drule interpret_excaps_eq[rule_format, where n=0], simp) - apply (clarsimp simp: mask_def[where n=4] "StrictC'_thread_state_defs" + apply (clarsimp simp: mask_def[where n=4] ThreadState_defs rf_sr_ksCurThread ccap_rights_relation_def rightsFromWord_wordFromRights) apply (simp cong: conj_cong) apply (clarsimp simp: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def ucast_ucast_mask mask_eq_ucast_eq unat_ucast_mask less_mask_eq[unfolded word_less_nat_alt]) - apply (cases "args ! Suc 0 = 0"; clarsimp simp: true_def false_def) + apply (cases "args ! Suc 0 = 0"; clarsimp) done lemma decodeIRQControlInvocation_ccorres: @@ -759,7 +757,7 @@ lemma decodeIRQControlInvocation_ccorres: apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: interpret_excaps_test_null excaps_map_def Collect_const_mem word_sless_def word_sle_def - ThreadState_Restart_def unat_of_nat mask_def) + unat_of_nat mask_def) apply (rule conjI) apply (simp add: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def unat_ucast) @@ -776,7 +774,7 @@ lemma decodeIRQControlInvocation_ccorres: apply (clarsimp simp: neq_Nil_conv numeral_eqs[symmetric] word_sle_def word_sless_def) apply (drule interpret_excaps_eq[rule_format, where n=0], simp) - apply (clarsimp simp: mask_def[where n=4] "StrictC'_thread_state_defs" + apply (clarsimp simp: mask_def[where n=4] ThreadState_defs rf_sr_ksCurThread ccap_rights_relation_def rightsFromWord_wordFromRights) diff --git a/proof/crefine/ARM/Invoke_C.thy b/proof/crefine/ARM/Invoke_C.thy index 8b41734685..dd8f5f6a98 100644 --- a/proof/crefine/ARM/Invoke_C.thy +++ b/proof/crefine/ARM/Invoke_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -63,11 +64,11 @@ lemma setDomain_ccorres: apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) - apply (simp add: when_def to_bool_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (simp add: when_def) + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -75,15 +76,17 @@ lemma setDomain_ccorres: apply simp apply wp apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s)" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp threadSet_all_invs_but_sch_extra) - apply (clarsimp simp:valid_pspace_valid_objs' st_tcb_at_def[symmetric] - sch_act_simple_def st_tcb_at'_def o_def weak_sch_act_wf_def split:if_splits) + apply (fastforce simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) apply (simp add: guard_is_UNIV_def) - apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" in hoare_strengthen_post) + apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued - tcbSchedDequeue_not_in_queue hoare_vcg_imp_lift hoare_vcg_all_lift) + hoare_vcg_imp_lift hoare_vcg_all_lift) apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) apply (fastforce simp: valid_tcb'_def tcb_cte_cases_def invs'_def valid_state'_def valid_pspace'_def) @@ -191,10 +194,10 @@ lemma decodeDomainInvocation_ccorres: apply clarsimp apply (vcg exspec=getSyscallArg_modifies) - apply (clarsimp simp: valid_tcb_state'_def invs_valid_queues' invs_valid_objs' - invs_queues invs_sch_act_wf' ct_in_state'_def pred_tcb_at' + apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' + invs_sch_act_wf' ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_to_n - mask_eq_iff_w2p mask_eq_iff_w2p word_size "StrictC'_thread_state_defs") + mask_eq_iff_w2p mask_eq_iff_w2p word_size ThreadState_defs) apply (rule conjI) apply (clarsimp simp: linorder_not_le isCap_simps) apply (rule conjI, clarsimp simp: unat32_eq_of_nat) @@ -202,7 +205,7 @@ lemma decodeDomainInvocation_ccorres: apply (drule_tac x="extraCaps ! 0" and P="\v. valid_cap' (fst v) s" in bspec) apply (clarsimp simp: nth_mem interpret_excaps_test_null excaps_map_def) apply (clarsimp simp: valid_cap_simps' pred_tcb'_weakenE active_runnable') - apply (rule conjI) + apply (intro conjI; fastforce?) apply (fastforce simp: tcb_st_refs_of'_def elim:pred_tcb'_weakenE) apply (simp add: word_le_nat_alt unat_ucast unat_numDomains_to_H le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ccap_relation_def cap_to_H_simps cap_thread_cap_lift) @@ -227,7 +230,7 @@ lemma invokeCNodeDelete_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteDelete_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -247,7 +250,7 @@ lemma invokeCNodeRevoke_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteRevoke_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -387,7 +390,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -536,12 +539,10 @@ lemma hasCancelSendRights_spec: apply clarsimp apply (drule sym, drule (1) cap_get_tag_to_H) apply (clarsimp simp: hasCancelSendRights_def to_bool_def - true_def false_def split: if_split bool.splits) apply (rule impI) apply (case_tac cap, - auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - from_bool_def false_def true_def hasCancelSendRights_def + auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs hasCancelSendRights_def dest: cap_get_tag_isArchCap_unfolded_H_cap split: capability.splits bool.splits)[1] done @@ -619,9 +620,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] - if_1_0_0 dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -704,8 +703,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -732,13 +730,12 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr performInvocation_def - bindE_assoc false_def) + bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeInsert_ccorres) @@ -751,16 +748,16 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply simp apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper[OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def - valid_tcb_state'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def + valid_tcb_state'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply wp @@ -809,12 +806,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk - ccorres_invocationCatch_Inr false_def + ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) @@ -828,15 +824,15 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply (simp add: conj_comms valid_tcb_state'_def) apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper [OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply (simp add: Collect_const_mem) @@ -870,7 +866,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply csymbr apply csymbr - apply (simp add: cap_get_tag_NullCap true_def) + apply (simp add: cap_get_tag_NullCap) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) @@ -889,7 +885,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -908,15 +904,14 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr numeral_eqs performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) - apply (simp add: true_def ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) apply (rule ccorres_alternative2) apply (rule ccorres_return_CE, simp+)[1] @@ -944,14 +939,16 @@ lemma decodeCNodeInvocation_ccorres: apply (rule validE_R_validE) apply (rule_tac Q'="\a b. cte_wp_at' (\x. True) a b \ invs' b \ tcb_at' thread b \ sch_act_wf (ksSchedulerAction b) b \ valid_tcb_state' Restart b - \ Q2 b" for Q2 in hoare_post_imp_R) - prefer 2 - apply (clarsimp simp:cte_wp_at_ctes_of) - apply (drule ctes_of_valid') - apply (erule invs_valid_objs') - apply (clarsimp simp:valid_updateCapDataI invs_queues invs_valid_objs' invs_valid_pspace') - apply (assumption) - apply (wp hoare_vcg_all_lift_R injection_wp_E[OF refl] + \ Q2 b" for Q2 in hoare_strengthen_postE_R) + prefer 2 + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule ctes_of_valid') + apply (erule invs_valid_objs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (clarsimp simp:valid_updateCapDataI invs_valid_objs' invs_valid_pspace') + apply assumption + apply (wp hoare_vcg_all_liftE_R injection_wp_E[OF refl] lsfco_cte_at' hoare_vcg_const_imp_lift_R )+ apply (simp add: Collect_const_mem word_sle_def word_sless_def @@ -1008,13 +1005,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_returnOk bindE_assoc - injection_bindE[OF refl refl] split_def - dc_def[symmetric]) + injection_bindE[OF refl refl] split_def) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc)+ apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) - apply (simp add: ccorres_invocationCatch_Inr performInvocation_def - dc_def[symmetric] bindE_assoc) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) apply (rule ccorres_alternative2) @@ -1023,7 +1018,7 @@ lemma decodeCNodeInvocation_ccorres: apply (wp sts_valid_pspace_hangers)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_split_throws) apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg @@ -1053,8 +1048,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1098,12 +1092,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1163,8 +1155,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1226,8 +1217,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1235,8 +1225,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1250,7 +1239,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1289,7 +1278,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule_tac Q'="\rvb. invs' and cte_at' rv and cte_at' rva and tcb_at' thread" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of weak_derived_updateCapData capBadge_updateCapData_True) @@ -1314,16 +1303,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1338,7 +1327,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_liftE_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1353,7 +1342,7 @@ lemma decodeCNodeInvocation_ccorres: apply simp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' invs_valid_pspace' - ct_in_state'_def pred_tcb_at' invs_queues + ct_in_state'_def pred_tcb_at' cur_tcb'_def word_sle_def word_sless_def unat_lt2p[where 'a=32, folded word_bits_def]) apply (rule conjI) @@ -1367,7 +1356,7 @@ lemma decodeCNodeInvocation_ccorres: apply (frule interpret_excaps_eq) apply (clarsimp simp: excaps_map_def mask_def[where n=4] ccap_rights_relation_def rightsFromWord_wordFromRights - "StrictC'_thread_state_defs" map_comp_Some_iff + ThreadState_defs map_comp_Some_iff rf_sr_ksCurThread hd_conv_nth hd_drop_conv_nth) apply ((rule conjI | clarsimp simp: rightsFromWord_wordFromRights @@ -1375,9 +1364,7 @@ lemma decodeCNodeInvocation_ccorres: cl_valid_cte_def c_valid_cap_def map_option_Some_eq2 neq_Nil_conv ccap_relation_def numeral_eqs hasCancelSendRights_not_Null - ccap_relation_NullCap_iff[symmetric] - if_1_0_0 interpret_excaps_test_null - false_def true_def + ccap_relation_NullCap_iff[symmetric] interpret_excaps_test_null | clarsimp simp: typ_heap_simps' | frule length_ineq_not_Nil)+) done @@ -1386,9 +1373,6 @@ end context begin interpretation Arch . (*FIXME: arch_split*) -crunch valid_queues[wp]: insertNewCap "valid_queues" - (wp: crunch_wps) - lemma setCTE_sch_act_wf[wp]: "\ \s. sch_act_wf (ksSchedulerAction s) s \ setCTE src cte @@ -1513,15 +1497,6 @@ lemma pspace_no_overlap_underlying_zero_update: apply blast done -lemma addrFromPPtr_mask: - "n \ 28 - \ addrFromPPtr ptr && mask n = ptr && mask n" - apply (simp add: addrFromPPtr_def pptrBaseOffset_def pptrBase_def - ARM.physBase_def) - apply word_bitwise - apply simp - done - lemma clearMemory_untyped_ccorres: "ccorres dc xfdc ((\s. invs' s \ (\cap. cte_wp_at' (\cte. cteCap cte = cap) ut_slot s @@ -1568,7 +1543,7 @@ lemma clearMemory_untyped_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply csymbr - apply (ctac add: cleanCacheRange_RAM_ccorres[unfolded dc_def]) + apply (ctac add: cleanCacheRange_RAM_ccorres) apply wp apply (simp add: guard_is_UNIV_def unat_of_nat word_bits_def capAligned_def word_of_nat_less) @@ -1773,8 +1748,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2268,7 +2242,7 @@ lemma invokeUntyped_Retype_ccorres: (Call invokeUntyped_Retype_'proc)" apply (cinit lift: retypeBase_' srcSlot_' reset_' newType_' userSize_' deviceMemory_' destCNode_' destOffset_' destLength_' - simp: when_def) + simp: when_def archOverlap_def) apply (rule ccorres_move_c_guard_cte) apply csymbr apply (rule ccorres_abstract_cleanup) @@ -2337,7 +2311,7 @@ lemma invokeUntyped_Retype_ccorres: apply (clarsimp simp: misc unat_of_nat_eq[OF range_cover.weak, OF cover]) apply (vcg exspec=cap_untyped_cap_ptr_set_capFreeIndex_modifies) apply simp - apply (rule validE_validE_R, rule hoare_post_impErr, + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule hoare_vcg_conj_liftE1[rotated, where Q="\_ s. case gsCNodes s cnodeptr of None \ False | Some n \ length destSlots + unat start \ 2 ^ n"], @@ -2508,7 +2482,7 @@ lemma mapME_ensureEmptySlot': apply (erule meta_allE) apply wp apply (fold validE_R_def) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -2660,7 +2634,6 @@ lemma Arch_isFrameType_spec: apply (auto simp: object_type_from_H_def ) done - lemma decodeUntypedInvocation_ccorres_helper: notes TripleSuc[simp] untypedBits_defs[simp] notes valid_untyped_inv_wcap'.simps[simp del] tl_drop_1[simp] @@ -2838,8 +2811,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (ctac add: ccorres_injection_handler_csum1 [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM o_def split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -2951,8 +2924,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (simp add: ccorres_cond_iffs returnOk_def) apply (rule ccorres_return_Skip') apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ - apply (simp add: ccorres_cond_iffs inl_rrel_inl_rrel) - apply (rule ccorres_return_C_errorE_inl_rrel, simp+)[1] + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C_errorE_inl_rrel; simp) apply wp apply (simp add: all_ex_eq_helper) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3047,8 +3020,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) apply (rule ccorres_alternative2) @@ -3093,7 +3065,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule conseqPre,vcg,clarsimp) apply vcg apply (rule ccorres_guard_imp[where Q =\ and Q' = UNIV,rotated], assumption+) - apply (simp add: o_def) + apply simp apply simp apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3111,8 +3083,7 @@ lemma decodeUntypedInvocation_ccorres_helper: unat_of_nat_APIType_capBits word_size length_ineq_not_Nil not_less word_le_nat_alt isCap_simps valid_cap_simps') apply (strengthen word_of_nat_less) - apply (clarsimp simp: StrictC'_thread_state_defs mask_def true_def false_def - from_bool_0 ccap_relation_isDeviceCap2 + apply (clarsimp simp: ThreadState_defs mask_def ccap_relation_isDeviceCap2 split: if_split) apply (intro conjI impI; clarsimp simp: not_less shiftr_eq_0 unat_of_nat_APIType_capBits @@ -3124,10 +3095,9 @@ lemma decodeUntypedInvocation_ccorres_helper: and ex_cte_cap_to' (capCNodePtr rv) and (\s. case gsCNodes s (capCNodePtr rv) of None \ False | Some n \ args ! 4 + args ! 5 - 1 < 2 ^ n) - and sch_act_simple and ct_active'" in hoare_post_imp_R) + and sch_act_simple and ct_active'" in hoare_strengthen_postE_R) prefer 2 - apply (clarsimp simp: invs_valid_objs' invs_mdb' - invs_queues ct_in_state'_def pred_tcb_at') + apply (clarsimp simp: invs_valid_objs' invs_mdb' ct_in_state'_def pred_tcb_at') apply (subgoal_tac "ksCurThread s \ ksIdleThread sa") prefer 2 apply clarsimp @@ -3158,7 +3128,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3170,7 +3140,7 @@ lemma decodeUntypedInvocation_ccorres_helper: \ invs' s \ ksCurThread s = thread \ valid_cap' r s \ (\rf\cte_refs' r (irq_node' s). ex_cte_cap_to' rf s) - \ sch_act_simple s \ ct_active' s" in hoare_post_imp_R) + \ sch_act_simple s \ ct_active' s" in hoare_strengthen_postE_R) apply clarsimp apply (wp injection_wp_E[OF refl] getSlotCap_cap_to' | wp (once) hoare_drop_imps)+ @@ -3213,8 +3183,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (clarsimp simp: hd_drop_conv_nth2 hd_conv_nth neq_Nil_lengthI ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread mask_eq_iff_w2p - "StrictC'_thread_state_defs" numeral_eqs[symmetric] - cap_get_tag_isCap cte_wp_at_ctes_of + numeral_eqs[symmetric] cap_get_tag_isCap cte_wp_at_ctes_of unat_eq_0 ccHoarePost_def) apply (rule conjI) apply (clarsimp simp: linorder_not_less isCap_simps) @@ -3285,18 +3254,16 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption - apply (rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) - apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P=x in monadic_rewrite_gen_asm) - apply simp + apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) + apply (monadic_rewrite_r monadic_rewrite_if_r_True) + apply (monadic_rewrite_r_method monadic_rewrite_symb_exec_r_drop wpsimp) apply (rule monadic_rewrite_refl) - apply (wp | simp)+ - apply (simp add: gets_bind_ign) + apply wpsimp + apply (rule monadic_rewrite_refl) apply (rule monadic_rewrite_refl) apply (clarsimp simp: ex_cte_cap_wp_to'_def excaps_in_mem_def) apply (drule(1) bspec)+ diff --git a/proof/crefine/ARM/IpcCancel_C.thy b/proof/crefine/ARM/IpcCancel_C.thy index aed779b54f..b872f9e13a 100644 --- a/proof/crefine/ARM/IpcCancel_C.thy +++ b/proof/crefine/ARM/IpcCancel_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -14,12 +15,12 @@ begin declare ctcb_size_bits_ge_4[simp] lemma cready_queues_index_to_C_in_range': - assumes prems: "qdom \ ucast maxDom" "prio \ ucast maxPrio" + assumes prems: "qdom \ maxDomain" "prio \ maxPriority" shows "cready_queues_index_to_C qdom prio < num_tcb_queues" proof - have P: "unat prio < numPriorities" using prems - by (simp add: numPriorities_def seL4_MaxPrio_def Suc_le_lessD unat_le_helper) + by (simp add: numPriorities_def Suc_le_lessD unat_le_helper maxDomain_def maxPriority_def) have Q: "unat qdom < numDomains" using prems by (simp add: maxDom_to_H le_maxDomain_eq_less_numDomains word_le_nat_alt) @@ -29,32 +30,22 @@ proof - qed lemmas cready_queues_index_to_C_in_range = - cready_queues_index_to_C_in_range'[simplified num_tcb_queues_def] + cready_queues_index_to_C_in_range'[simplified num_tcb_queues_val] lemma cready_queues_index_to_C_inj: "\ cready_queues_index_to_C qdom prio = cready_queues_index_to_C qdom' prio'; - prio \ ucast maxPrio; prio' \ ucast maxPrio \ \ prio = prio' \ qdom = qdom'" + prio \ maxPriority; prio' \ maxPriority \ \ prio = prio' \ qdom = qdom'" apply (rule context_conjI) - apply (auto simp: cready_queues_index_to_C_def numPriorities_def + apply (auto simp: cready_queues_index_to_C_def numPriorities_def maxPriority_def seL4_MaxPrio_def word_le_nat_alt dest: arg_cong[where f="\x. x mod 256"]) done lemma cready_queues_index_to_C_distinct: - "\ qdom = qdom' \ prio \ prio'; prio \ ucast maxPrio; prio' \ ucast maxPrio \ + "\ qdom = qdom' \ prio \ prio'; prio \ maxPriority; prio' \ maxPriority \ \ cready_queues_index_to_C qdom prio \ cready_queues_index_to_C qdom' prio'" apply (auto simp: cready_queues_index_to_C_inj) done -lemma valid_queuesD': - "\ obj_at' (inQ d p) t s; valid_queues' s \ - \ t \ set (ksReadyQueues s (d, p))" - by (simp add: valid_queues'_def) - -lemma invs_valid_queues'[elim!]: - "invs' s \ valid_queues' s" - by (simp add: invs'_def valid_state'_def) - - lemma ntfn_ptr_get_queue_spec: "\s. \ \ {\. s = \ \ \ \\<^sub>c \<^bsup>\\<^esup>ntfnPtr} \ret__struct_tcb_queue_C :== PROC ntfn_ptr_get_queue(\ntfnPtr) \head_C \ret__struct_tcb_queue_C = Ptr (ntfnQueue_head_CL (notification_lift (the (cslift s \<^bsup>s\<^esup>ntfnPtr)))) \ @@ -199,22 +190,19 @@ lemma cancelSignal_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) apply simp - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) - apply (simp add: carch_state_relation_def carch_globals_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) + apply (simp add: carch_state_relation_def carch_globals_def) apply (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) @@ -237,30 +225,27 @@ lemma cancelSignal_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue) - apply fastforce - apply assumption+ - apply simp - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def - split: ntfn.splits split del: if_split) - apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) - apply (simp add: tcb_queue_relation'_next_mask) - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) - apply (simp add: tcb_queue_relation'_prev_mask) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue) + apply fastforce + apply assumption+ + apply simp + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def + split: ntfn.splits split del: if_split) + apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) + apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) + apply (simp add: tcb_queue_relation'_next_mask) + apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) + apply (simp add: tcb_queue_relation'_prev_mask) + apply simp apply (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) @@ -353,9 +338,9 @@ lemma isStopped_ccorres [corres]: apply vcg apply clarsimp apply clarsimp - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma isRunnable_ccorres [corres]: @@ -381,71 +366,9 @@ lemma isRunnable_ccorres [corres]: apply (vcg) apply (clarsimp) apply (clarsimp) - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ -done - - - -lemma tcb_queue_relation_update_head: - fixes getNext_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" and - getPrev_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" - assumes qr: "tcb_queue_relation getNext getPrev mp queue NULL qhead" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - and fgN: "fg_cons getNext (getNext_update \ (\x _. x))" - and fgP: "fg_cons getPrev (getPrev_update \ (\x _. x))" - and npu: "\f t. getNext (getPrev_update f t) = getNext t" - and pnu: "\f t. getPrev (getNext_update f t) = getPrev t" - shows "tcb_queue_relation getNext getPrev - (upd_unless_null qhead (getPrev_update (\_. qhead') (the (mp qhead))) - (mp(qhead' := Some (getPrev_update (\_. NULL) (getNext_update (\_. qhead) tcb))))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) NULL qhead'" - using qr qh' cs_tcb valid_ep qhN - apply (subgoal_tac "qhead \ qhead'") - apply (clarsimp simp: pnu upd_unless_null_def fg_consD1 [OF fgN] fg_consD1 [OF fgP] pnu npu) - apply (cases queue) - apply simp - apply (frule (2) tcb_queue_relation_next_not_NULL) - apply simp - apply (clarsimp simp: fg_consD1 [OF fgN] fg_consD1 [OF fgP] pnu npu) - apply (subst tcb_queue_relation_cong [OF refl refl refl, where mp' = mp]) - apply (clarsimp simp: inj_eq) - apply (intro impI conjI) - apply (frule_tac x = x in imageI [where f = tcb_ptr_to_ctcb_ptr]) - apply simp - apply simp - apply simp - apply clarsimp - apply (cases queue) - apply simp - apply simp - done - -lemma tcbSchedEnqueue_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qhead (tcbSchedPrev_C_update (\_. qhead') (the (mp qhead))) - (mp(qhead' \ tcb\tcbSchedNext_C := qhead, tcbSchedPrev_C := NULL\))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) qhead' (if qend = NULL then qhead' else qend)" - using sr qh' cs_tcb valid_ep qhN - apply - - apply (erule tcb_queue_relationE') - apply (rule tcb_queue_relationI') - apply (erule (5) tcb_queue_relation_update_head - [where getNext_update = tcbSchedNext_C_update and getPrev_update = tcbSchedPrev_C_update], simp_all)[1] - apply simp - apply (intro impI) - apply (erule (1) tcb_queue_relation_not_NULL') - apply simp + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma tcb_ptr_to_ctcb_ptr_imageD: @@ -460,63 +383,8 @@ lemma ctcb_ptr_to_tcb_ptr_imageI: apply simp done -lemma tcb_queue'_head_end_NULL: - assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" - and tat: "\t\set queue. tcb_at' t s" - shows "(qend = NULL) = (qhead = NULL)" - using qr tat - apply - - apply (erule tcb_queue_relationE') - apply (simp add: tcb_queue_head_empty_iff) - apply (rule impI) - apply (rule tcb_at_not_NULL) - apply (erule bspec) - apply simp - done - -lemma tcb_queue_relation_qhead_mem: - "\ tcb_queue_relation getNext getPrev mp queue NULL qhead; - (\tcb\set queue. tcb_at' tcb t) \ - \ qhead \ NULL \ ctcb_ptr_to_tcb_ptr qhead \ set queue" - by (clarsimp simp: tcb_queue_head_empty_iff tcb_queue_relation_head_hd) - -lemma tcb_queue_relation_qhead_valid: - "\ tcb_queue_relation getNext getPrev (cslift s') queue NULL qhead; - (s, s') \ rf_sr; (\tcb\set queue. tcb_at' tcb s) \ - \ qhead \ NULL \ s' \\<^sub>c qhead" - apply (frule (1) tcb_queue_relation_qhead_mem) - apply clarsimp - apply(drule (3) tcb_queue_memberD) - apply (simp add: h_t_valid_clift_Some_iff) - done - -lemmas tcb_queue_relation_qhead_mem' = tcb_queue_relation_qhead_mem [OF tcb_queue_relation'_queue_rel] -lemmas tcb_queue_relation_qhead_valid' = tcb_queue_relation_qhead_valid [OF tcb_queue_relation'_queue_rel] - - -lemma valid_queues_valid_q: - "valid_queues s \ (\tcb\set (ksReadyQueues s (qdom, prio)). tcb_at' tcb s) \ distinct (ksReadyQueues s (qdom, prio))" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec [where x = qdom]) - apply (drule spec [where x = prio]) - apply clarsimp - apply (drule (1) bspec, erule obj_at'_weakenE) - apply simp - done - declare unat_ucast_8_32[simp] -lemma rf_sr_sched_queue_relation: - "\ (s, s') \ rf_sr; d \ ucast maxDom; p \ ucast maxPrio \ - \ sched_queue_relation' (cslift s') (ksReadyQueues s (d, p)) - (head_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p))) - (end_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p)))" - unfolding rf_sr_def cstate_relation_def cready_queues_relation_def - apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def) - done - lemma threadSet_queued_ccorres [corres]: shows "ccorres dc xfdc (tcb_at' thread) {s. v32_' s = from_bool v \ thread_state_ptr_' s = Ptr &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C''])} [] @@ -536,146 +404,6 @@ lemma threadSet_queued_ccorres [corres]: apply (clarsimp simp: typ_heap_simps) done -lemma ccorres_pre_getQueue: - assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" - shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) - {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p) in - sched_queue_relation' (cslift s') queue (head_C cqueue) (end_C cqueue)) \ s' \ P' queue} - hs (getQueue d p >>= (\queue. f queue)) c" - apply (rule ccorres_guard_imp2) - apply (rule ccorres_symb_exec_l2) - defer - defer - apply (rule gq_sp) - defer - apply (rule ccorres_guard_imp) - apply (rule cc) - apply clarsimp - apply assumption - apply assumption - apply (clarsimp simp: getQueue_def gets_exs_valid) - apply clarsimp - apply (drule spec, erule mp) - apply (simp add: Let_def) - apply (erule rf_sr_sched_queue_relation) - apply (simp add: maxDom_to_H maxPrio_to_H)+ - done - -lemma state_relation_queue_update_helper': - "\ (s, s') \ rf_sr; - (\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))); - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subst(asm) disj_imp_rhs) - apply (subst obj_at'_and[symmetric]) - apply (rule disjI1, erule obj_at'_weakenE, simp add: inQ_def) - apply (subst(asm) disj_imp_rhs) - apply (subst(asm) obj_at'_and[symmetric]) - apply (rule conjI, erule obj_at'_weakenE, simp) - apply (rule allI, rule allI) - apply (drule_tac x=d' in spec) - apply (drule_tac x=p' in spec) - apply clarsimp - apply (drule(1) bspec) - apply (clarsimp simp: inQ_def obj_at'_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (intro conjI) - \ \cpspace_relation\ - apply (erule nonemptyE, drule(1) bspec) - apply (clarsimp simp: cpspace_relation_def) - apply (drule obj_at_ko_at', clarsimp) - apply (rule cmap_relationE1, assumption, - erule ko_at_projectKO_opt) - apply (frule null_sched_queue) - apply (frule null_sched_epD) - apply (intro conjI) - \ \tcb relation\ - apply (drule ctcb_relation_null_queue_ptrs, - simp_all)[1] - \ \endpoint relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cendpoint_relation_upd_tcb_no_queues, simp+) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cnotification_relation_upd_tcb_no_queues, simp+) - \ \ready queues\ - apply (simp add: cready_queues_relation_def Let_def cready_queues_index_to_C_in_range - seL4_MinPrio_def minDom_def) - apply clarsimp - apply (frule cready_queues_index_to_C_distinct, assumption+) - apply (clarsimp simp: cready_queues_index_to_C_in_range all_conj_distrib) - apply (rule iffD1 [OF tcb_queue_relation'_cong[OF refl], rotated -1], - drule spec, drule spec, erule mp, simp+) - apply clarsimp - apply (drule_tac x="tcb_ptr_to_ctcb_ptr x" in fun_cong)+ - apply (clarsimp simp: restrict_map_def - split: if_split_asm) - apply (simp_all add: carch_state_relation_def cmachine_state_relation_def - h_t_valid_clift_Some_iff) - done - -lemma state_relation_queue_update_helper: - "\ (s, s') \ rf_sr; valid_queues s; - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subgoal_tac "\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))") - apply (erule(5) state_relation_queue_update_helper', simp_all) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE, clarsimp) - done - -(* FIXME: move *) -lemma from_bool_vals [simp]: - "from_bool True = scast true" - "from_bool False = scast false" - "scast true \ scast false" - by (auto simp add: from_bool_def true_def false_def) - declare fun_upd_restrict_conv[simp del] lemmas queue_in_range = of_nat_mono_maybe[OF _ cready_queues_index_to_C_in_range, @@ -699,8 +427,8 @@ lemma cready_queues_index_to_C_def2: lemma ready_queues_index_spec: "\s. \ \ {s'. s' = s \ (Kernel_Config.numDomains \ 1 \ dom_' s' = 0)} Call ready_queues_index_'proc - \\ret__unsigned_long = (dom_' s) * 0x100 + (prio_' s)\" - by vcg (simp add: numDomains_sge_1_simp) + \\ret__unsigned_long = (dom_' s) * word_of_nat numPriorities + (prio_' s)\" + by vcg (simp add: numDomains_sge_1_simp numPriorities_def) lemma prio_to_l1index_spec: "\s. \ \ {s} Call prio_to_l1index_'proc @@ -714,6 +442,22 @@ lemma invert_l1index_spec: by vcg (simp add: word_sle_def sdiv_int_def sdiv_word_def smod_word_def smod_int_def) +lemma cbitmap_L1_relation_update: + "\ (\, s) \ rf_sr ; cbitmap_L1_relation cupd aupd \ + \ (\\ksReadyQueuesL1Bitmap := aupd \, + globals_update (ksReadyQueuesL1Bitmap_'_update (\_. cupd)) s) + \ rf_sr" + by (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def) + +lemma cbitmap_L2_relation_update: + "\ (\, s) \ rf_sr ; cbitmap_L2_relation cupd aupd \ + \ (\\ksReadyQueuesL2Bitmap := aupd \, + globals_update (ksReadyQueuesL2Bitmap_'_update (\_. cupd)) s) + \ rf_sr" + by (simp add: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + cmachine_state_relation_def) + lemma unat_ucast_prio_L1_cmask_simp: "unat (ucast (p::priority) && 0x1F :: machine_word) = unat (p && 0x1F)" using unat_ucast_prio_mask_simp[where m=5] @@ -839,15 +583,6 @@ lemma rf_sr_drop_bitmaps_enqueue_helper: carch_state_relation_def cmachine_state_relation_def by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) -lemma tcb_queue_relation'_empty_ksReadyQueues: - "\ sched_queue_relation' (cslift x) (q s) NULL NULL ; \t\ set (q s). tcb_at' t s \ \ q s = []" - apply (clarsimp simp add: tcb_queue_relation'_def) - apply (subst (asm) eq_commute) - apply (cases "q s" rule: rev_cases, simp) - apply (clarsimp simp: tcb_at_not_NULL) - done - - lemma invert_prioToL1Index_c_simp: "p \ maxPriority \ @@ -861,13 +596,247 @@ lemma c_invert_assist: "7 - (ucast (p :: priority) >> 5 :: machine_word) < 8" using prio_ucast_shiftr_wordRadix_helper'[simplified wordRadix_def] by - (rule word_less_imp_diff_less, simp_all) +lemma addToBitmap_ccorres: + "ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (addToBitmap tdom prio) (Call addToBitmap_'proc)" + supply prio_and_dom_limit_helpers[simp] invert_prioToL1Index_c_simp[simp] + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (frule maxDomain_le_unat_ucast_explicit) + apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (intro conjI impI allI) + apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (rule conjI) + apply (clarsimp intro!: cbitmap_L1_relation_bit_set) + apply (fastforce dest!: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) + done + +lemma rf_sr_tcb_update_twice: + "h_t_valid (hrs_htd (hrs2 (globals s') (t_hrs_' (gs2 (globals s'))))) c_guard + (ptr (t_hrs_' (gs2 (globals s'))) (globals s')) + \ ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs :: tcb_C ptr) (v ths gs)) + (hrs_mem_update (heap_update (ptr ths gs) (v' ths gs)) (hrs2 gs ths))) (gs2 gs)) s') \ rf_sr) + = ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs) (v ths gs)) (hrs2 gs ths)) (gs2 gs)) s') \ rf_sr)" + by (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def typ_heap_simps' + carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + +lemmas rf_sr_tcb_update_no_queue_gen2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue_gen, simplified] + +lemma tcb_queue_prepend_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueuePrepend queue tcbPtr) (Call tcb_queue_prepend_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply fastforce + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueHead queue)) s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply clarsimp + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma tcb_queue_append_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s) + \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueAppend queue tcbPtr) (Call tcb_queue_append_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueEnd queue)) s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma getQueue_ccorres: + "ccorres ctcb_queue_relation queue_' + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (getQueue tdom prio) (\queue :== \ksReadyQueues.[unat \idx])" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getQueue_def gets_def get_def bind_def return_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + done + +lemma setQueue_ccorres: + "ctcb_queue_relation queue cqueue \ + ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (setQueue tdom prio queue) + (Basic (\s. globals_update + (ksReadyQueues_'_update + (\_. Arrays.update (ksReadyQueues_' (globals s)) (unat (idx_' s)) cqueue)) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setQueue_def get_def modify_def put_def bind_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + apply (frule cready_queues_index_to_C_distinct, assumption+) + apply (frule_tac qdom=d and prio=p in cready_queues_index_to_C_in_range) + apply fastforce + apply clarsimp + done + +crunch (empty_fail) empty_fail[wp]: isRunnable + lemma tcbSchedEnqueue_ccorres: "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - hs - (tcbSchedEnqueue t) - (Call tcbSchedEnqueue_'proc)" + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedEnqueue t) (Call tcbSchedEnqueue_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] note invert_prioToL1Index_c_simp[simp] @@ -877,35 +846,13 @@ proof - note word_less_1[simp del] show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac runnable) + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_'" + in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -913,236 +860,246 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - (* queue is empty, set t to be new queue *) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (subgoal_tac - "head_C (ksReadyQueues_' - (globals x).[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL; simp add: valid_queues_valid_q) - apply (subgoal_tac - "end_C (ksReadyQueues_' - (globals x).[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL[symmetric]; simp add: valid_queues_valid_q) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (frule maxDomain_le_unat_ucast_explicit) - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - apply (rule conjI) - apply (clarsimp simp: l2BitmapSize_def' wordRadix_def c_invert_assist) - - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - apply (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken)+ - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply clarsimp - apply (rule conjI; clarsimp simp: queue_in_range) - (* invalid, disagreement between C and Haskell on emptiness of queue *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (drule tcb_queue_relation'_empty_ksReadyQueues; simp add: valid_queues_valid_q) - (* queue was not empty, add t to queue and leave bitmaps alone *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (frule_tac t=\ in tcb_queue_relation_qhead_mem') - apply (simp add: valid_queues_valid_q) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff numPriorities_def - cready_queues_index_to_C_def2) - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - cong: if_cong split del: if_split - del: fun_upd_restrict_conv)[1] - apply simp - apply (rule conjI) + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 3 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule valid_queues_obj_at'D) - apply simp+ - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - apply (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) - done + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_prepend_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2) + done qed -lemmas tcbSchedDequeue_update - = tcbDequeue_update[where tn=tcbSchedNext_C and tn_update=tcbSchedNext_C_update - and tp=tcbSchedPrev_C and tp_update=tcbSchedPrev_C_update, - simplified] - -lemma tcb_queue_relation_prev_next: - "\ tcb_queue_relation tn tp mp queue qprev qhead; - tcbp \ set queue; distinct (ctcb_ptr_to_tcb_ptr qprev # queue); - \t \ set queue. tcb_at' t s; qprev \ tcb_Ptr 0 \ mp qprev \ None; - mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp tcb \ tcb_Ptr 0 \ (tp tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ tp tcb = qprev) - \ mp (tp tcb) \ None \ tp tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp tcb)" - apply (induct queue arbitrary: qprev qhead) - apply simp - apply simp - apply (erule disjE) - apply clarsimp - apply (case_tac "queue") - apply clarsimp - apply clarsimp - apply (rule conjI) - apply clarsimp - apply clarsimp - apply (drule_tac f=ctcb_ptr_to_tcb_ptr in arg_cong[where y="tp tcb"], simp) - apply clarsimp - apply fastforce - done - -lemma tcb_queue_relation_prev_next': - "\ tcb_queue_relation' tn tp mp queue qhead qend; tcbp \ set queue; distinct queue; - \t \ set queue. tcb_at' t s; mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp tcb \ tcb_Ptr 0 \ tp tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tp tcb) \ None \ tp tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp tcb)" - apply (clarsimp simp: tcb_queue_relation'_def split: if_split_asm) - apply (drule(1) tcb_queue_relation_prev_next, simp_all) - apply (fastforce dest: tcb_at_not_NULL) - apply clarsimp - done +lemma tcbSchedAppend_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedAppend t) (Call tcbSchedAppend_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] -(* L1 bitmap only updated if L2 entry bits end up all zero *) -lemma rf_sr_drop_bitmaps_dequeue_helper_L2: - "\ (\,\') \ rf_sr ; - cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + note word_less_1[simp del] -lemma rf_sr_drop_bitmaps_dequeue_helper: - "\ (\,\') \ rf_sr ; - cbitmap_L1_relation ksqL1upd' ksqL1upd ; cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd, - ksReadyQueuesL1Bitmap := ksqL1upd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueuesL1Bitmap_' := ksqL1upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac "runnable") + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply (fastforce dest!: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_append_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply clarsimp + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2 tcbQueueEmpty_def) + done +qed (* FIXME same proofs as bit_set, maybe can generalise? *) lemma cbitmap_L1_relation_bit_clear: @@ -1159,27 +1116,6 @@ lemma cbitmap_L1_relation_bit_clear: invertL1Index_def l2BitmapSize_def' le_maxDomain_eq_less_numDomains word_le_nat_alt num_domains_index_updates) -lemma cready_queues_relation_empty_queue_helper: - "\ tcbDomain ko \ maxDomain ; tcbPriority ko \ maxPriority ; - cready_queues_relation (cslift \') (ksReadyQueues_' (globals \')) (ksReadyQueues \)\ - \ - cready_queues_relation (cslift \') - (Arrays.update (ksReadyQueues_' (globals \')) (unat (tcbDomain ko) * 256 + unat (tcbPriority ko)) - (tcb_queue_C.end_C_update (\_. NULL) - (head_C_update (\_. NULL) - (ksReadyQueues_' (globals \').[unat (tcbDomain ko) * 256 + unat (tcbPriority ko)])))) - ((ksReadyQueues \)((tcbDomain ko, tcbPriority ko) := []))" - unfolding cready_queues_relation_def Let_def - using maxPrio_to_H[simp] maxDom_to_H[simp] - apply clarsimp - apply (frule (1) cready_queues_index_to_C_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (fold cready_queues_index_to_C_def[simplified numPriorities_def]) - apply (case_tac "qdom = tcbDomain ko", - simp_all add: prio_and_dom_limit_helpers seL4_MinPrio_def - minDom_def) - apply (fastforce simp: cready_queues_index_to_C_in_range simp: cready_queues_index_to_C_distinct)+ - done - lemma cbitmap_L2_relationD: "\ cbitmap_L2_relation cbitmap2 abitmap2 ; d \ maxDomain ; i < l2BitmapSize \ \ cbitmap2.[unat d].[i] = abitmap2 (d, i)" @@ -1209,465 +1145,301 @@ lemma cbitmap_L2_relation_bit_clear: apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) done -lemma tcbSchedDequeue_ccorres': +lemma removeFromBitmap_ccorres: "ccorres dc xfdc - ((\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))) - and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (removeFromBitmap tdom prio) (Call removeFromBitmap_'proc)" proof - - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) include no_less_1_simps - have ksQ_tcb_at': "\s ko d p. - \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p)) \ - \t\set (ksReadyQueues s (d, p)). tcb_at' t s" - by (fastforce dest: spec elim: obj_at'_weakenE) - - have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < 8" + have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < l2BitmapSize" unfolding invertL1Index_def l2BitmapSize_def' prioToL1Index_def by simp show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def - del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) + supply if_split[split del] + (* pull out static assms *) + apply simp + apply (rule ccorres_grab_asm[where P=\, simplified]) + apply (cinit lift: dom_' prio_') + apply clarsimp + apply csymbr + apply csymbr + (* we can clear up all C guards now *) + apply (clarsimp simp: maxDomain_le_unat_ucast_explicit word_and_less') + apply (simp add: invert_prioToL1Index_c_simp word_less_nat_alt) + apply (simp add: invert_l1_index_limit[simplified l2BitmapSize_def']) + apply ccorres_rewrite + (* handle L2 update *) + apply (rule_tac ccorres_split_nothrow_novcg_dc) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L2_relation) + apply (erule cbitmap_L2_relation_update) + apply (erule (1) cbitmap_L2_relation_bit_clear) + (* the check on the C side is identical to checking the L2 entry, rewrite the condition *) + apply (simp add: getReadyQueuesL2Bitmap_def) + apply (rule ccorres_symb_exec_l3, rename_tac l2) + apply (rule_tac C'="{s. l2 = 0}" + and Q="\s. l2 = ksReadyQueuesL2Bitmap s (tdom, invertL1Index (prioToL1Index prio))" + in ccorres_rewrite_cond_sr[where Q'=UNIV]) + apply clarsimp + apply (frule rf_sr_cbitmap_L2_relation) + apply (clarsimp simp: cbitmap_L2_relationD invert_l1_index_limit split: if_split) + (* unset L1 bit when L2 entry is empty *) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="(\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))) - and valid_queues' and obj_at' (inQ rva rvb) t - and (\s. rva \ maxDomain \ rvb \ maxPriority)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs when_def - null_def) - - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule(1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (frule_tac s=\ in tcb_queue_relation_prev_next'; (fastforce simp: ksQ_tcb_at')?) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (intro conjI ; - clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift)+ - apply (drule(2) filter_empty_unfiltered_contr, simp)+ - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_clear) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (clarsimp simp: cbitmap_L2_relation_def - word_size prioToL1Index_def wordRadix_def mask_def - word_le_nat_alt - numPriorities_def wordBits_def l2BitmapSize_def' - invertL1Index_def numDomains_less_numeric_explicit) - apply (case_tac "d = tcbDomain ko" - ; fastforce simp: le_maxDomain_eq_less_numDomains - numDomains_less_numeric_explicit) - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by ((fastforce simp: ksQ_tcb_at')+) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst (asm) num_domains_index_updates) - subgoal by (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - apply (simp add: invert_l1_index_limit) - - apply (frule rf_sr_cbitmap_L2_relation) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - apply (fastforce simp: l2BitmapSize_def' invert_l1_index_limit) - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - (* impossible case *) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (drule(2) filter_empty_unfiltered_contr, fastforce) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply fold_subgoals[2] - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (rule conjI; clarsimp) - apply (simp add: typ_heap_simps) - apply (clarsimp simp: h_t_valid_c_guard [OF h_t_valid_field, OF h_t_valid_clift] - h_t_valid_field[OF h_t_valid_clift] h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: tcb_null_sched_ptrs_def)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split, - simp_all add: typ_heap_simps')[1] - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - subgoal by fastforce + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L1_relation) + apply (erule cbitmap_L1_relation_update) + apply (erule (1) cbitmap_L1_relation_bit_clear) + apply wpsimp+ + apply (fastforce simp: guard_is_UNIV_def) + apply clarsimp + done +qed +lemma ctcb_ptr_to_tcb_ptr_option_to_ctcb_ptr[simp]: + "ctcb_ptr_to_tcb_ptr (option_to_ctcb_ptr (Some ptr)) = ptr" + by (clarsimp simp: option_to_ctcb_ptr_def) + +lemma tcb_queue_remove_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s \ valid_objs' s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueRemove queue tcbPtr) (Call tcb_queue_remove_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit' lift: tcb_') + apply (rename_tac tcb') + apply (simp only: tcbQueueRemove_def) + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule ccorres_pre_getObject_tcb, rename_tac tcb) + apply (rule ccorres_symb_exec_l, rename_tac beforePtrOpt) + apply (rule ccorres_symb_exec_l, rename_tac afterPtrOpt) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="before___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr beforePtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedPrev tcb = beforePtrOpt)" + and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="after___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr afterPtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedNext tcb = afterPtrOpt)" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R="?abs"]) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) apply clarsimp - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems - by (fastforce dest!: tcb_queue_relation'_empty_ksReadyQueues - elim: obj_at'_weaken)+ - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps tcb_null_sched_ptrs_def)+ - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) apply fastforce - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (drule(2) filter_empty_unfiltered_contr[simplified filter_noteq_op], simp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (subst (asm) num_domains_index_updates) - apply (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - subgoal using invert_l1_index_limit - by (fastforce simp add: invert_prioToL1Index_c_simp intro: nat_Suc_less_le_imp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (simp add: invert_prioToL1Index_c_simp) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - subgoal by (simp add: invert_l1_index_limit l2BitmapSize_def') - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper_L2, assumption) - subgoal by (fastforce dest: rf_sr_cbitmap_L2_relation elim!: cbitmap_L2_relation_bit_clear) - - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') - apply (fastforce simp add: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fastforce simp: typ_heap_simps tcb_null_sched_ptrs_def)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps tcb_null_sched_ptrs_def)+ - apply (clarsimp) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps tcb_null_sched_ptrs_def)+ - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def valid_tcb'_def inQ_def) -qed - -lemma tcbSchedDequeue_ccorres: - "ccorres dc xfdc - (valid_queues and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" - apply (rule ccorres_guard_imp [OF tcbSchedDequeue_ccorres']) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp)+ - done - -lemma tcb_queue_relation_append: - "\ tcb_queue_relation tn tp mp queue qprev qhead; queue \ []; - qend' \ tcb_ptr_to_ctcb_ptr ` set queue; mp qend' = Some tcb; - queue = queue' @ [ctcb_ptr_to_tcb_ptr qend]; distinct queue; - \x \ set queue. tcb_ptr_to_ctcb_ptr x \ NULL; qend' \ NULL; - \v f g. tn (tn_update f v) = f (tn v) \ tp (tp_update g v) = g (tp v) - \ tn (tp_update f v) = tn v \ tp (tn_update g v) = tp v \ - \ tcb_queue_relation tn tp - (mp (qend \ tn_update (\_. qend') (the (mp qend)), - qend' \ tn_update (\_. NULL) (tp_update (\_. qend) tcb))) - (queue @ [ctcb_ptr_to_tcb_ptr qend']) qprev qhead" - using [[hypsubst_thin = true]] - apply clarsimp - apply (induct queue' arbitrary: qprev qhead) - apply clarsimp - apply clarsimp - done - -lemma tcbSchedAppend_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qend' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qend' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qend' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qend (tcbSchedNext_C_update (\_. qend') (the (mp qend))) - (mp(qend' \ tcb\tcbSchedNext_C := NULL, tcbSchedPrev_C := qend\))) - (queue @ [ctcb_ptr_to_tcb_ptr qend']) (if queue = [] then qend' else qhead) qend'" - using sr qh' valid_ep cs_tcb qhN - apply - - apply (rule rev_cases[where xs=queue]) - apply (simp add: tcb_queue_relation'_def upd_unless_null_def) - apply (clarsimp simp: tcb_queue_relation'_def upd_unless_null_def tcb_at_not_NULL) - apply (drule_tac qend'=qend' and tn_update=tcbSchedNext_C_update - and tp_update=tcbSchedPrev_C_update and qend="tcb_ptr_to_ctcb_ptr y" - in tcb_queue_relation_append, simp_all) - apply (fastforce simp add: tcb_at_not_NULL) - apply (simp add: fun_upd_twist) - done - -lemma tcb_queue_relation_qend_mems: - "\ tcb_queue_relation' getNext getPrev mp queue qhead qend; - \x \ set queue. tcb_at' x s \ - \ (qend = NULL \ queue = []) - \ (qend \ NULL \ ctcb_ptr_to_tcb_ptr qend \ set queue)" - apply (clarsimp simp: tcb_queue_relation'_def) - apply (drule bspec, erule last_in_set) - apply (simp add: tcb_at_not_NULL) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) + apply clarsimp + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply (rule ccorres_assert2)+ + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (fastforce intro: ccorres_return_C') + apply (wpsimp | vcg)+ + apply (clarsimp split: if_splits) + apply normalise_obj_at' + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + by (intro conjI impI; + clarsimp simp: ctcb_queue_relation_def typ_heap_simps option_to_ctcb_ptr_def + valid_tcb'_def valid_bound_tcb'_def) + +lemma tcbQueueRemove_tcb_at'_head: + "\\s. valid_objs' s \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)\ + tcbQueueRemove queue t + \\rv s. \ tcbQueueEmpty rv \ tcb_at' (the (tcbQueueHead rv)) s\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp haskell_assert_wp hoare_vcg_imp_lift') + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def valid_bound_tcb'_def tcbQueueEmpty_def obj_at'_def) done -lemma tcbSchedAppend_ccorres: +lemma tcbSchedDequeue_ccorres: "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedAppend t) - (Call tcbSchedAppend_'proc)" + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedDequeue t) (Call tcbSchedDequeue_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) - include no_less_1_simps + note word_less_1[simp del] show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1675,128 +1447,97 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - subgoal by (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken)+ - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rule_tac r'=ctcb_queue_relation and xf'=new_queue_' in ccorres_split_nothrow) + apply (ctac add: tcb_queue_remove_ccorres) + apply ceqv + apply (rename_tac queue' newqueue) + apply (rule ccorres_Guard_Seq) + apply (ctac add: setQueue_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply ctac + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue')" + and R="\s. \ tcbQueueEmpty queue' \ tcb_at' (the (tcbQueueHead queue')) s" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def split: option.splits) + apply ceqv + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: removeFromBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply vcg + apply (wpsimp wp: hoare_vcg_imp_lift') + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: hoare_vcg_imp_lift') + apply vcg + apply ((wpsimp wp: tcbQueueRemove_tcb_at'_head | wp (once) hoare_drop_imps)+)[1] + apply (vcg exspec=tcb_queue_remove_modifies) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg) apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: cready_queues_index_to_C_def2 numPriorities_def) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - cong: if_cong split del: if_split - del: fun_upd_restrict_conv)[1] - apply simp - apply (rule conjI) - apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 3 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule valid_queues_obj_at'D) - subgoal by simp - subgoal by simp - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) obj_at_cslift_tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + by (fastforce simp: word_less_nat_alt + cready_queues_index_to_C_def2 ctcb_relation_def + typ_heap_simps le_maxDomain_eq_less_numDomains(2) unat_trans_ucast_helper) qed -lemma true_eq_from_bool [simp]: - "(scast true = from_bool P) = P" - by (simp add: true_def from_bool_def split: bool.splits) +lemma tcb_queue_relation_append: + "\ tcb_queue_relation tn tp' mp queue qprev qhead; queue \ []; + qend' \ tcb_ptr_to_ctcb_ptr ` set queue; mp qend' = Some tcb; + queue = queue' @ [ctcb_ptr_to_tcb_ptr qend]; distinct queue; + \x \ set queue. tcb_ptr_to_ctcb_ptr x \ NULL; qend' \ NULL; + \v f g. tn (tn_update f v) = f (tn v) \ tp' (tp_update g v) = g (tp' v) + \ tn (tp_update f v) = tn v \ tp' (tn_update g v) = tp' v \ + \ tcb_queue_relation tn tp' + (mp (qend \ tn_update (\_. qend') (the (mp qend)), + qend' \ tn_update (\_. NULL) (tp_update (\_. qend) tcb))) + (queue @ [ctcb_ptr_to_tcb_ptr qend']) qprev qhead" + using [[hypsubst_thin = true]] + apply clarsimp + apply (induct queue' arbitrary: qprev qhead) + apply clarsimp + apply clarsimp + done lemma isRunnable_spec: "\s. \ \ ({s} \ {s. cslift s (thread_' s) \ None}) Call isRunnable_'proc @@ -1833,8 +1574,11 @@ lemma tcb_at_1: done lemma rescheduleRequired_ccorres: - "ccorres dc xfdc (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorres dc xfdc + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' + and pspace_aligned' and pspace_distinct') + UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -1940,16 +1684,18 @@ lemma rf_sr_ksReadyQueuesL1Bitmap_simp: done lemma lookupBitmapPriority_le_maxPriority: - "\ ksReadyQueuesL1Bitmap s d \ 0 ; valid_queues s \ + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + \d p. d > maxDomain \ p > maxPriority \ tcbQueueEmpty (ksReadyQueues s (d, p)); + valid_bitmaps s \ \ lookupBitmapPriority d s \ maxPriority" - unfolding valid_queues_def valid_queues_no_bitmap_def - by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) + apply (clarsimp simp: valid_bitmaps_def) + by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) lemma ksReadyQueuesL1Bitmap_word_log2_max: - "\valid_queues s; ksReadyQueuesL1Bitmap s d \ 0\ - \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" - unfolding valid_queues_def - by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) + "\valid_bitmaps s; ksReadyQueuesL1Bitmap s d \ 0\ + \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" + unfolding valid_bitmaps_def + by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) lemma clzl_spec: "\s. \ \ {\. s = \ \ x___unsigned_long_' s \ 0} Call clzl_'proc @@ -2139,11 +1885,6 @@ lemma getCurDomain_maxDom_ccorres_dom_': rf_sr_ksCurDomain) done -lemma rf_sr_cscheduler_action_relation: - "(s, s') \ rf_sr - \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" - by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - lemma threadGet_get_obj_at'_has_domain: "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" by (wp threadGet_obj_at') (simp add: obj_at'_def) @@ -2151,16 +1892,15 @@ lemma threadGet_get_obj_at'_has_domain: lemma possibleSwitchTo_ccorres: shows "ccorres dc xfdc - (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t and (\s. ksCurDomain s \ maxDomain) - and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') ({s. target_' s = tcb_ptr_to_ctcb_ptr t} \ UNIV) [] (possibleSwitchTo t ) (Call possibleSwitchTo_'proc)" supply if_split [split del] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] @@ -2185,7 +1925,7 @@ lemma possibleSwitchTo_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2202,8 +1942,8 @@ lemma possibleSwitchTo_ccorres: lemma scheduleTCB_ccorres': "ccorres dc xfdc - (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_queues - and valid_objs') + (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (do (runnable, curThread, action) \ do @@ -2216,6 +1956,7 @@ lemma scheduleTCB_ccorres': rescheduleRequired od) (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_') apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2245,31 +1986,34 @@ lemma scheduleTCB_ccorres': apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL split: scheduler_action.split_asm) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp apply (clarsimp simp: st_tcb_at'_def obj_at'_def weak_sch_act_wf_def) done lemma scheduleTCB_ccorres_valid_queues'_pre: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_' simp del: word_neq_0_conv) apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2298,7 +2042,7 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: apply (drule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def weak_sch_act_wf_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (fold_subgoals (prefix))[6] subgoal premises prems using prems by (clarsimp simp: rf_sr_def cstate_relation_def Let_def @@ -2309,17 +2053,17 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: split: scheduler_action.split_asm) apply wp+ apply (simp add: isRunnable_def isStopped_def) - apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done - lemmas scheduleTCB_ccorres_valid_queues' = scheduleTCB_ccorres_valid_queues'_pre[unfolded bind_assoc return_bind split_conv] lemma rescheduleRequired_ccorres_valid_queues'_simple: - "ccorresG rf_sr \ dc xfdc (valid_queues' and sch_act_simple) UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorresG rf_sr \ dc xfdc + sch_act_simple UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -2352,16 +2096,18 @@ lemma rescheduleRequired_ccorres_valid_queues'_simple: split: scheduler_action.split_asm) lemma scheduleTCB_ccorres_valid_queues'_pre_simple: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_' simp del: word_neq_0_conv) apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2391,7 +2137,7 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL @@ -2399,11 +2145,10 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp - apply (clarsimp simp: st_tcb_at'_def obj_at'_def valid_queues'_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done lemmas scheduleTCB_ccorres_valid_queues'_simple @@ -2423,48 +2168,35 @@ lemma threadSet_weak_sch_act_wf_runnable': apply (clarsimp) done -lemma threadSet_valid_queues_and_runnable': "\\s. valid_queues s \ (\p. thread \ set (ksReadyQueues s p) \ runnable' st)\ - threadSet (tcbState_update (\_. st)) thread - \\rv s. valid_queues s\" - apply (wp threadSet_valid_queues) - apply (clarsimp simp: inQ_def) -done - lemma setThreadState_ccorres[corres]: "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues s \ valid_objs' s \ valid_tcb_state' st s \ - (ksSchedulerAction s = SwitchToThread thread \ runnable' st) \ - (\p. thread \ set (ksReadyQueues s p) \ runnable' st) \ - sch_act_wf (ksSchedulerAction s) s) - ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} - \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] - (setThreadState st thread) (Call setThreadState_'proc)" + (\s. tcb_at' thread s \ valid_objs' s \ valid_tcb_state' st s + \ (ksSchedulerAction s = SwitchToThread thread \ runnable' st) + \ sch_act_wf (ksSchedulerAction s) s \ pspace_aligned' s \ pspace_distinct' s) + ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) hs + (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres) - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' - threadSet_valid_objs') - by (clarsimp simp: weak_sch_act_wf_def valid_queues_def valid_tcb'_tcbState_update) - -lemma threadSet_valid_queues'_and_not_runnable': "\tcb_at' thread and valid_queues' and (\s. (\ runnable' st))\ - threadSet (tcbState_update (\_. st)) thread - \\rv. tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' \" - - apply (wp threadSet_valid_queues' threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: pred_neg_def valid_queues'_def inQ_def)+ -done + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') + apply (clarsimp simp: weak_sch_act_wf_def valid_tcb'_tcbState_update) + done lemma setThreadState_ccorres_valid_queues': - "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s \ Invariants_H.valid_queues s \ (\p. thread \ set (ksReadyQueues s p)) \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s) + "ccorres dc xfdc + (\s. tcb_at' thread s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s + \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s + \ pspace_aligned' s \ pspace_distinct' s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} - \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] - (setThreadState st thread) (Call setThreadState_'proc)" + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues') - apply (wp threadSet_valid_queues'_and_not_runnable' threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' threadSet_valid_objs') - by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs' + threadSet_tcbState_st_tcb_at') + by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) lemma simp_list_case_return: "(case x of [] \ return e | y # ys \ return f) = return (if x = [] then e else f)" @@ -2485,24 +2217,22 @@ lemma cancelSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (ctac (no_vcg) add: cancelSignal_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') - apply ((wp setNotification_ksQ hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] + apply ((wp hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] apply (simp add: "StrictC'_thread_state_defs") apply (rule conjI, clarsimp, rule conjI, clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) subgoal by ((auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" - cthread_state_relation_def sch_act_wf_weak valid_ntfn'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] | + isTS_defs cte_wp_at_ctes_of + cthread_state_relation_def sch_act_wf_weak valid_ntfn'_def | clarsimp simp: eq_commute)+) apply (clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) apply (frule (2) ntfn_blocked_in_queueD) by (auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" valid_ntfn'_def + isTS_defs cte_wp_at_ctes_of valid_ntfn'_def cthread_state_relation_def sch_act_wf_weak isWaitingNtfn_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] - split: ntfn.splits option.splits - | clarsimp simp: eq_commute + split: ntfn.splits option.splits + | clarsimp simp: eq_commute | drule_tac x=thread in bspec)+ lemma cmap_relation_ep: @@ -2512,7 +2242,7 @@ lemma cmap_relation_ep: by (simp add: Let_def) (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2653,8 +2383,8 @@ lemma cpspace_relation_ep_update_an_ep: and pal: "pspace_aligned' s" "pspace_distinct' s" and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat pal rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2676,8 +2406,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2689,15 +2419,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "word32" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2770,7 +2500,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2788,7 +2518,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -2804,23 +2534,20 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) - subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - subgoal by simp - apply (erule (1) map_to_ko_atI') - apply (simp add: heap_to_user_data_def Let_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) + subgoal by simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + subgoal by simp + apply (erule (1) map_to_ko_atI') + apply (simp add: heap_to_user_data_def Let_def) subgoal by (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) @@ -2841,38 +2568,36 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def split: endpoint.splits split del: if_split) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def + split: endpoint.splits split del: if_split) \ \recv case\ - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff - tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask cong: tcb_queue_relation'_cong) - subgoal by (intro impI conjI; simp) - \ \send case\ - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff - tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask cong: tcb_queue_relation'_cong) + apply (clarsimp simp: Ptr_ptr_val h_t_valid_clift_Some_iff + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + cong: tcb_queue_relation'_cong) subgoal by (intro impI conjI; simp) - subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) - subgoal by (simp add: carch_state_relation_def carch_globals_def - typ_heap_simps') + \ \send case\ + apply (clarsimp simp: Ptr_ptr_val h_t_valid_clift_Some_iff + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask cong: tcb_queue_relation'_cong) + subgoal by (intro impI conjI; simp) + subgoal by simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + subgoal by (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) subgoal by (simp add: objBits_simps') subgoal by (simp add: objBits_simps) apply assumption - done + done declare empty_fail_get[iff] @@ -2928,8 +2653,7 @@ lemma cancelIPC_ccorres1: apply (rule_tac P="rv' = thread_state_to_tsType rv" in ccorres_gen_asm2) apply wpc \ \BlockedOnReceive\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) + apply (simp add: word_sle_def ccorres_cond_iffs cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -2945,7 +2669,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -2955,10 +2679,9 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \BlockedOnReply case\ - apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs + apply (simp add: ThreadState_defs ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -2998,14 +2721,12 @@ lemma cancelIPC_ccorres1: apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3018,7 +2739,8 @@ lemma cancelIPC_ccorres1: apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3027,10 +2749,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3046,7 +2770,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del:if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply clarsimp apply (rule conseqPre, vcg, rule subset_refl) apply (rule conseqPre, vcg) @@ -3056,7 +2780,8 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg @@ -3079,37 +2804,35 @@ lemma cancelIPC_ccorres1: subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_recv) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isRecvEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits) + split: thread_state.splits endpoint.splits) apply (rule conjI) apply (clarsimp simp: inQ_def) - apply (rule conjI) - apply clarsimp apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_send) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isSendEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits)[1] + split: thread_state.splits endpoint.splits)[1] apply (auto simp: isTS_defs cthread_state_relation_def typ_heap_simps weak_sch_act_wf_def) apply (case_tac ts, auto simp: isTS_defs cthread_state_relation_def typ_heap_simps) diff --git a/proof/crefine/ARM/Ipc_C.thy b/proof/crefine/ARM/Ipc_C.thy index 2f7b4002b5..b8fed68184 100644 --- a/proof/crefine/ARM/Ipc_C.thy +++ b/proof/crefine/ARM/Ipc_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -376,6 +377,7 @@ lemma handleArchFaultReply': msg \ getMRs s sb tag; handleArchFaultReply f r (msgLabel tag) msg od) x' = handleArchFaultReply' f s r tag x'" + supply empty_fail_cond[simp] apply (unfold handleArchFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -438,6 +440,19 @@ end context kernel_m begin interpretation Arch . +lemma asUser_mapMloadWordUser_threadGet_comm: + "do + ra \ mapM loadWordUser xs; + rb \ threadGet fb b; + c ra rb + od = do + rb \ threadGet fb b; + ra \ mapM loadWordUser xs; + c ra rb + od" + by (rule bind_inv_inv_comm, auto; wp mapM_wp') + + lemma handleFaultReply': notes option.case_cong_weak [cong] wordSize_def'[simp] take_append[simp del] assumes neq: "s \ r" @@ -447,6 +462,7 @@ lemma handleFaultReply': msg \ getMRs s sb tag; handleFaultReply f r (msgLabel tag) msg od) (handleFaultReply' f s r)" + supply empty_fail_cond[simp] apply (unfold handleFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -461,41 +477,41 @@ lemma handleFaultReply': zip_Cons ARM_H.exceptionMessage_def ARM.exceptionMessage_def mapM_x_Cons mapM_x_Nil) - apply (rule monadic_rewrite_symb_exec_l, wp+) - apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) - apply (case_tac rv; (case_tac "msgLength tag < scast n_msgRegisters", - (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_getRegister_discarded - asUser_getRegister_getSanitiseRegisterInfo_comm - asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm - asUser_comm[OF neq] - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp lookupIPCBuffer_inv )+)+)) - apply wp + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) + apply (case_tac sb; (case_tac "msgLength tag < scast n_msgRegisters", + (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm + asUser_getRegister_getSanitiseRegisterInfo_comm + asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm + asUser_comm[OF neq] asUser_getRegister_threadGet_comm + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp)+)+)) + apply wp+ (* capFault *) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_asUser empty_fail_getRegister)+)+ - apply(case_tac rv) - apply (clarsimp - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] - empty_fail_loadWordUser)+ + apply (repeat 5 \rule monadic_rewrite_symb_exec_l\) (* until case sb *) + apply (case_tac sb) + apply (clarsimp + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] + empty_fail_loadWordUser)+ (* UnknownSyscallExceptio *) apply (simp add: zip_append2 mapM_x_append asUser_bind_distrib split_def bind_assoc) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_do_flip) apply (rule monadic_rewrite_bind_tail) apply (rule_tac P="inj (case_bool s r)" in monadic_rewrite_gen_asm) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) apply (rule isolate_thread_actions_rewrite_bind bool.simps setRegister_simple zipWithM_setRegister_simple @@ -515,81 +531,81 @@ lemma handleFaultReply': upto_enum_word mapM_x_Cons mapM_x_Nil) apply (simp add: getSanitiseRegisterInfo_moreMapM_comm asUser_getRegister_getSanitiseRegisterInfo_comm getSanitiseRegisterInfo_lookupIPCBuffer_comm) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) - apply (case_tac sb) + apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) + apply (case_tac sb) + apply (case_tac "msgLength tag < scast n_msgRegisters") + apply (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + | wp)+)+ apply (case_tac "msgLength tag < scast n_msgRegisters") apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - | wp)+)+ - apply (case_tac "msgLength tag < scast n_msgRegisters") - apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - zipWithM_x_Nil - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - | wp mapM_wp')+)+ - apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def - linorder_not_less syscallMessage_unfold) - apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, - OF order_less_le_trans, rotated])+ - apply (subgoal_tac "\n :: word32. n \ scast n_syscallMessage \ [n .e. msgMaxLength] - = [n .e. scast n_syscallMessage] - @ [scast n_syscallMessage + 1 .e. msgMaxLength]") - apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: word32"] - upto_enum_word[where y="scast n_syscallMessage + 1 :: word32"]) - apply (clarsimp simp: bind_assoc asUser_bind_distrib - mapM_x_Cons mapM_x_Nil - asUser_comm [OF neq] asUser_getRegister_discarded - submonad_asUser.fn_stateAssert take_zip - bind_subst_lift [OF submonad_asUser.stateAssert_fn] - word_less_nat_alt ARM_H.sanitiseRegister_def - split_def n_msgRegisters_def msgMaxLength_def - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_size msgLengthBits_def n_syscallMessage_def - split del: if_split - cong: if_weak_cong) - apply (rule monadic_rewrite_bind_tail)+ - apply (subst (2) upto_enum_word) - apply (case_tac "ma < unat n_syscallMessage - 4") - apply (erule disjE[OF nat_less_cases'], - ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib - mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_loadWordUser_comm loadWordUser_discarded asUser_return - zip_take_triv2 msgMaxLength_def - cong: if_weak_cong - | simp - | rule monadic_rewrite_bind_tail - monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp empty_fail_loadWordUser)+)+ - apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) - apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: word32))" - and k="Suc msgMaxLength" in upt_add_eq_append') - apply (simp add: n_syscallMessage_def) - apply (simp add: n_syscallMessage_def msgMaxLength_unfold) - apply (simp add: n_syscallMessage_def msgMaxLength_def - msgLengthBits_def shiftL_nat - del: upt.simps upt_rec_numeral) - apply (simp add: upto_enum_word cong: if_weak_cong) - apply wp+ + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + zipWithM_x_Nil + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + | wp mapM_wp')+)+ + apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def + linorder_not_less syscallMessage_unfold) + apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, + OF order_less_le_trans, rotated])+ + apply (subgoal_tac "\n :: word32. n \ scast n_syscallMessage \ [n .e. msgMaxLength] + = [n .e. scast n_syscallMessage] + @ [scast n_syscallMessage + 1 .e. msgMaxLength]") + apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: word32"] + upto_enum_word[where y="scast n_syscallMessage + 1 :: word32"]) + apply (clarsimp simp: bind_assoc asUser_bind_distrib + mapM_x_Cons mapM_x_Nil + asUser_comm [OF neq] asUser_getRegister_discarded + submonad_asUser.fn_stateAssert take_zip + bind_subst_lift [OF submonad_asUser.stateAssert_fn] + word_less_nat_alt ARM_H.sanitiseRegister_def + split_def n_msgRegisters_def msgMaxLength_def + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_size msgLengthBits_def n_syscallMessage_def + split del: if_split + cong: if_weak_cong) + apply (rule monadic_rewrite_bind_tail)+ + apply (subst (2) upto_enum_word) + apply (case_tac "ma < unat n_syscallMessage - 4") + apply (erule disjE[OF nat_less_cases'], + ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib + mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_loadWordUser_comm loadWordUser_discarded asUser_return + zip_take_triv2 msgMaxLength_def + cong: if_weak_cong + | simp + | rule monadic_rewrite_bind_tail + monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp empty_fail_loadWordUser)+)+ + apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) + apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: word32))" + and k="Suc msgMaxLength" in upt_add_eq_append') + apply (simp add: n_syscallMessage_def) + apply (simp add: n_syscallMessage_def msgMaxLength_unfold) + apply (simp add: n_syscallMessage_def msgMaxLength_def + msgLengthBits_def shiftL_nat + del: upt.simps upt_rec_numeral) + apply (simp add: upto_enum_word cong: if_weak_cong) + apply wp+ (* ArchFault *) apply (simp add: neq inj_case_bool split: bool.split) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_is_refl) apply (rule ext) apply (unfold handleArchFaultReply'[symmetric] getMRs_def msgMaxLength_def @@ -1043,7 +1059,7 @@ lemma setMRs_syscall_error_ccorres: | wp hoare_case_option_wp | (simp del: Collect_const, vcg exspec=setMR_modifies) )+ - apply (simp add: msgMaxLength_unfold true_def false_def) + apply (simp add: msgMaxLength_unfold) apply (clarsimp split:if_split_asm simp:syscall_error_to_H_def map_option_Some_eq2) apply (simp add: msgFromLookupFailure_def split: lookup_failure.split @@ -1190,23 +1206,15 @@ shows apply (auto split: if_split) done -declare zipWith_Nil2[simp] - -declare zipWithM_x_Nil2[simp] - lemma asUser_tcbFault_obj_at: - "\obj_at' (\tcb. P (tcbFault tcb)) t\ asUser t' m - \\rv. obj_at' (\tcb. P (tcbFault tcb)) t\" + "asUser t' m \obj_at' (\tcb. P (tcbFault tcb)) t\" apply (simp add: asUser_def split_def) apply (wp threadGet_wp) apply (simp cong: if_cong) done lemma asUser_atcbContext_obj_at: - "t \ t' \ - \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - asUser t' m - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + "t \ t' \ asUser t' m \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" apply (simp add: asUser_def split_def atcbContextGet_def atcbContextSet_def) apply (wp threadGet_wp) apply simp @@ -1281,7 +1289,7 @@ lemma exceptionMessage_length_aux : lemma copyMRsFault_ccorres_exception: "ccorres dc xfdc (valid_pspace' - and obj_at' (\tcb. map (atcbContext (tcbArch tcb)) ARM_H.exceptionMessage = msg) sender + and obj_at' (\tcb. map (user_regs (atcbContext (tcbArch tcb))) ARM_H.exceptionMessage = msg) sender and K (length msg = 3) and K (recvBuffer \ Some 0) and K (sender \ receiver)) @@ -1303,7 +1311,7 @@ lemma copyMRsFault_ccorres_exception: for as bs, simplified] bind_assoc) apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) - apply (rule_tac F="K $ obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_H.exceptionMessage = msg) sender" + apply (rule_tac F="K $ obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) ARM_H.exceptionMessage = msg) sender" in ccorres_mapM_x_while) apply (clarsimp simp: n_msgRegisters_def) apply (rule ccorres_guard_imp2) @@ -1349,7 +1357,7 @@ lemma mapM_cong: "\ \x. elem x xs \ f x = g x \< lemma copyMRsFault_ccorres_syscall: "ccorres dc xfdc (valid_pspace' - and obj_at' (\tcb. map (atcbContext (tcbArch tcb)) ARM_H.syscallMessage = msg) sender + and obj_at' (\tcb. map (user_regs (atcbContext (tcbArch tcb))) ARM_H.syscallMessage = msg) sender and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \) and K (length msg = 12) and K (recvBuffer \ Some 0) @@ -1388,7 +1396,7 @@ proof - and ys="drop (unat n_msgRegisters) (zip as bs)" for as bs, simplified] bind_assoc) apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) - apply (rule_tac F="K $ obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_H.syscallMessage = msg) sender" + apply (rule_tac F="K $ obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) ARM_H.syscallMessage = msg) sender" in ccorres_mapM_x_while) apply (clarsimp simp: n_msgRegisters_def) apply (rule ccorres_guard_imp2) @@ -1416,61 +1424,62 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) - apply (rule_tac F="\_. obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_H.syscallMessage = msg) - sender and valid_pspace' - and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" - in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) - apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters - option_to_0_def liftM_def[symmetric] - split: option.split_asm) - apply (rule ccorres_guard_imp2) - apply (rule_tac t=sender and r="ARM_H.syscallMessage ! (n + unat n_msgRegisters)" - in ccorres_add_getRegister) - apply (ctac(no_vcg)) - apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" - in ccorres_cross_over_guard) - apply (rule ccorres_move_array_assertion_ipc_buffer - | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ - apply (simp add: storeWordUser_def) - apply (rule ccorres_pre_stateAssert) - apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) - apply (simp add: pred_conj_def) - apply (wp user_getreg_rv) - apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def - syscallMessage_ccorres msgRegisters_ccorres - unat_add_lem[THEN iffD1] unat_of_nat32 - word_bits_def word_size_def) - apply (simp only:field_simps imp_ex imp_conjL) - apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def - pointerInUserData_h_t_valid - atcbContextGet_def - projectKOs objBits_simps word_less_nat_alt - unat_add_lem[THEN iffD1] unat_of_nat) - apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def - MessageID_Syscall_def - msg_align_bits valid_ipc_buffer_ptr'_def) - apply (erule aligned_add_aligned) - apply (rule aligned_add_aligned[where n=2]) - apply (simp add: is_aligned_def) - apply (rule is_aligned_mult_triv2 [where n=2, simplified]) - apply (simp)+ - apply (simp add: n_msgRegisters_def) - apply (vcg exspec=getRegister_modifies) - apply simp - apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) - apply (rule hoare_pre) - apply (wp hoare_case_option_wp | wpc)+ - apply clarsimp - apply (simp add: n_msgRegisters_def word_bits_def) - apply (simp add: n_msgRegisters_def) + apply (rule ccorres_rel_imp) + apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) ARM_H.syscallMessage = msg) + sender and valid_pspace' + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" + in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) + apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters + option_to_0_def liftM_def[symmetric] + split: option.split_asm) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="ARM_H.syscallMessage ! (n + unat n_msgRegisters)" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" + in ccorres_cross_over_guard) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (simp add: storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) + apply (simp add: pred_conj_def) + apply (wp user_getreg_rv) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + syscallMessage_ccorres msgRegisters_ccorres + unat_add_lem[THEN iffD1] unat_of_nat32 + word_bits_def word_size_def) + apply (simp only:field_simps imp_ex imp_conjL) + apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def + pointerInUserData_h_t_valid + atcbContextGet_def + projectKOs objBits_simps word_less_nat_alt + unat_add_lem[THEN iffD1] unat_of_nat) + apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def + MessageID_Syscall_def + msg_align_bits valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n=2]) + apply (simp add: is_aligned_def) + apply (rule is_aligned_mult_triv2 [where n=2, simplified]) + apply (simp)+ + apply (simp add: n_msgRegisters_def) + apply (vcg exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) + apply (rule hoare_pre) + apply (wp hoare_case_option_wp | wpc)+ + apply clarsimp + apply (simp add: n_msgRegisters_def word_bits_def) + apply (simp add: n_msgRegisters_def) + apply simp apply (frule (1) option_to_0_imp) apply (subst drop_zip) apply (subst drop_n) apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (rule mapM_x_wp_inv) @@ -1597,6 +1606,7 @@ proof - let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some ft) sender" note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV [where xf'=ret__unsigned_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] show ?thesis apply (unfold K_def) apply (intro ccorres_gen_asm) @@ -1853,7 +1863,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def ARM.badgeRegister_def Kernel_C.badgeRegister_def "StrictC'_register_defs") @@ -1892,7 +1902,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -2335,7 +2345,7 @@ lemma transferCapsLoop_ccorres: \ \\destSlot = (if slots = [] then NULL else cte_Ptr (hd slots)) \ length slots \ 1 \ slots \ [0]\)" defines "is_the_ep \ \cap. isEndpointCap cap \ ep \ None \ capEPPtr cap = the ep" - defines "stable \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" + defines "stable_masked \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" defines "relative_at \ \scap slot s. cte_wp_at' (\cte. badge_derived' scap (cteCap cte) \ capASID scap = capASID (cteCap cte) \ @@ -2350,7 +2360,7 @@ lemma transferCapsLoop_ccorres: (\s. (\x \ set caps. s \' fst x \ cte_wp_at' (\cte. slots \ [] \ is_the_ep (cteCap cte) \ (fst x) = (cteCap cte)) (snd x) s - \ cte_wp_at' (\cte. fst x \ NullCap \ stable (fst x) (cteCap cte)) (snd x) s)) and + \ cte_wp_at' (\cte. fst x \ NullCap \ stable_masked (fst x) (cteCap cte)) (snd x) s)) and (\s. \ sl \ (set slots). cte_wp_at' (isNullCap o cteCap) sl s) and (\_. n + length caps \ 3 \ distinct slots )) (precond n mi slots) @@ -2415,12 +2425,12 @@ next by (simp add:relative_at_def) have stable_eq: - "\scap excap. \stable scap excap; isEndpointCap excap\ \ scap = excap" - by (simp add:isCap_simps stable_def maskedAsFull_def split:if_splits) + "\scap excap. \stable_masked scap excap; isEndpointCap excap\ \ scap = excap" + by (simp add:isCap_simps stable_masked_def maskedAsFull_def split:if_splits) have is_the_ep_stable: - "\a b. \a \ NullCap \ stable a b; \ is_the_ep b \ \ \ is_the_ep a" - apply (clarsimp simp:stable_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) + "\a b. \a \ NullCap \ stable_masked a b; \ is_the_ep b \ \ \ is_the_ep a" + apply (clarsimp simp:stable_masked_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) apply auto done @@ -2573,8 +2583,8 @@ next \ (\x\set slots. cte_wp_at' (isNullCap \ cteCap) x s) \ (\x\set xs'. s \' fst x \ cte_wp_at' (\c. is_the_ep (cteCap c) \ fst x = cteCap c) (snd x) s - \ cte_wp_at' (\c. fst x \ NullCap \ stable (fst x) (cteCap c)) (snd x) s)" - in hoare_post_imp_R) + \ cte_wp_at' (\c. fst x \ NullCap \ stable_masked (fst x) (cteCap c)) (snd x) s)" + in hoare_strengthen_postE_R) prefer 2 apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace_mdb' valid_pspace'_splits valid_pspace_valid_objs' is_derived_capMasterCap image_def) @@ -2584,10 +2594,10 @@ next apply (rule conjI) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def isCap_simps stable_def) + apply (clarsimp simp:is_the_ep_def isCap_simps stable_masked_def) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def stable_def split:if_splits)+ + apply (clarsimp simp:is_the_ep_def stable_masked_def split:if_splits)+ apply (case_tac "a = cteCap cteb",clarsimp) apply (simp add:maskedAsFull_def split:if_splits) apply (simp add:maskedAsFull_again) @@ -2620,9 +2630,8 @@ next word_sle_def t2n_mask_eq_if) apply (rule conjI) apply (clarsimp simp: ccap_rights_relation_def cap_rights_to_H_def - false_def true_def to_bool_def allRights_def - excaps_map_def split_def - dest!: drop_n_foo interpret_excaps_eq) + allRights_def excaps_map_def split_def + dest!: drop_n_foo interpret_excaps_eq) apply (clarsimp simp:from_bool_def split:bool.splits) apply (case_tac "isEndpointCap (fst x)") apply (clarsimp simp: cap_get_tag_EndpointCap ep_cap_not_null cap_get_tag_isCap[symmetric]) @@ -2662,7 +2671,7 @@ next apply (rule conseqPre, vcg) apply (clarsimp split del: if_split) apply (clarsimp split del: if_split - simp add: Collect_const[symmetric] precond_def true_def false_def + simp add: Collect_const[symmetric] precond_def simp del: Collect_const) apply (rule HoarePartial.Seq[rotated] HoarePartial.Cond[OF order_refl] HoarePartial.Basic[OF order_refl] HoarePartial.Skip[OF order_refl] @@ -2689,14 +2698,14 @@ next apply (subgoal_tac "fst x = cteCap cte",simp) apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp simp:valid_cap_simps' isCap_simps) apply (subgoal_tac "slots \ []") apply simp apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp dest!:ccap_relation_lift simp:cap_get_tag_isCap is_the_ep_def) apply (clarsimp simp:valid_cap_simps' isCap_simps) @@ -2878,10 +2887,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -2909,6 +2919,7 @@ proof - let ?curr = "\s. current_extra_caps_' (globals s)" let ?EXCNONE = "{s. ret__unsigned_long_' s = scast EXCEPTION_NONE}" let ?interpret = "\v n. take n (array_to_list (excaprefs_C v))" + note empty_fail_cond[simp] show ?thesis apply (rule ccorres_gen_asm)+ apply (cinit(no_subst_asm) lift: thread_' bufferPtr_' info_' simp: whileAnno_def) @@ -2936,9 +2947,10 @@ proof - del: Collect_const) apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply csymbr apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, @@ -2948,7 +2960,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 4) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -2958,7 +2970,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 4" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 4" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -2979,7 +2991,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -2987,9 +2999,9 @@ proof - apply (clarsimp simp: cfault_rel2_def) apply (clarsimp simp: cfault_rel_def) apply (simp add: seL4_Fault_CapFault_lift) - apply (clarsimp simp: is_cap_fault_def to_bool_def false_def) + apply (clarsimp simp: is_cap_fault_def) apply wp - apply (rule hoare_post_imp_R, rule lsft_real_cte) + apply (rule hoare_strengthen_postE_R, rule lsft_real_cte) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps') apply (vcg exspec=lookupSlot_modifies) apply vcg @@ -3020,7 +3032,7 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv \ (\x \ set rva. snd x \ 0)" + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def @@ -3042,7 +3054,7 @@ proof - liftE_bindE[symmetric]) apply (wp mapME_length mapME_set | simp)+ apply (rule_tac Q'="\rv. no_0_obj' and real_cte_at' rv" - in hoare_post_imp_R, wp lsft_real_cte) + in hoare_strengthen_postE_R, wp lsft_real_cte) apply (clarsimp simp: cte_wp_at_ctes_of) apply (wpsimp)+ apply (clarsimp simp: guard_is_UNIV_def @@ -3115,7 +3127,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3160,7 +3172,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (simp add: Collect_const_mem) apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def @@ -3173,7 +3185,7 @@ qed lemma lookupIPCBuffer_not_Some_0: "\\\ lookupIPCBuffer r t \\rv. K (rv \ Some 0)\" apply (simp add: lookupIPCBuffer_def ARM_H.lookupIPCBuffer_def) - apply (wp hoare_post_taut haskell_assert_wp + apply (wp hoare_TrueI haskell_assert_wp | simp add: Let_def getThreadBufferSlot_def locateSlotTCB_def | intro conjI impI | wpc)+ done @@ -3214,7 +3226,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3232,7 +3243,7 @@ lemma replyFromKernel_error_ccorres [corres]: message_info_to_H_def valid_pspace_valid_objs') apply (clarsimp simp: msgLengthBits_def msgFromSyscallError_def syscall_error_to_H_def syscall_error_type_defs - mask_def true_def option_to_ptr_def + mask_def option_to_ptr_def split: if_split_asm) done @@ -3283,14 +3294,12 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac - apply (clarsimp simp: guard_is_UNIV_def false_def option_to_ptr_def split: option.splits) + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' receiver and K (rv \ Some 0) and (case_option \ valid_ipc_buffer_ptr' rv) @@ -3299,7 +3308,7 @@ lemma doIPCTransfer_ccorres [corres]: apply (auto simp: valid_ipc_buffer_ptr'_def option_to_0_def split: option.splits)[1] apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) - apply (auto simp: to_bool_def true_def) + apply auto done lemma length_exceptionMessage: @@ -3314,7 +3323,6 @@ lemma Arch_getSanitiseRegisterInfo_ccorres: (Call Arch_getSanitiseRegisterInfo_'proc)" apply (cinit' lift: thread_' simp: getSanitiseRegisterInfo_def) apply (rule ccorres_return_C, simp+) - apply (clarsimp simp: false_def) done lemma copyMRsFaultReply_ccorres_exception: @@ -3346,7 +3354,7 @@ proof - apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3395,7 +3403,7 @@ proof - n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -3549,7 +3557,6 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=2, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -3587,8 +3594,8 @@ lemma copyMRsFaultReply_ccorres_syscall: apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -3600,7 +3607,7 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -3639,7 +3646,7 @@ lemma handleArchFaultReply_corres: apply simp+ apply (rule ccorres_symb_exec_l) apply (ctac add: ccorres_return_C) - apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp simp: to_bool_def true_def)+ + apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp)+ done (* MOVE *) @@ -3691,7 +3698,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -3737,7 +3744,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -3780,9 +3787,9 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply vcg_step apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def scast_def + message_info_to_H_def scast_def length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def + min_def word_less_nat_alt guard_is_UNIV_def seL4_Faults seL4_Arch_Faults split: if_split) apply (simp add: length_exceptionMessage length_syscallMessage) @@ -3790,10 +3797,8 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply (vcg exspec=getRegister_modifies) apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def - length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def - obj_at'_def + message_info_to_H_def length_exceptionMessage length_syscallMessage + min_def word_less_nat_alt obj_at'_def split: if_split) using arch_fault_to_fault_tag_range apply (fastforce simp: seL4_Faults seL4_Arch_Faults) @@ -3834,7 +3839,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ apply (clarsimp split: if_split) done @@ -3859,7 +3864,7 @@ lemma transferCaps_local_slots: transferCaps tag caps ep receiver receiveBuffer \\tag'. cte_wp_at' (\cte. P (cteCap cte)) slot\" apply (simp add: transferCaps_def pred_conj_def) - apply (rule hoare_seq_ext[rotated]) + apply (rule bind_wp_fwd) apply (rule hoare_vcg_conj_lift) apply (rule get_rs_real_cte_at') apply (rule get_recv_slot_inv') @@ -3925,10 +3930,6 @@ lemma doReplyTransfer_ccorres [corres]: \ \\grant = from_bool grant\) hs (doReplyTransfer sender receiver slot grant) (Call doReplyTransfer_'proc)" -proof - - have invs_valid_queues_strg: "\s. invs' s \ valid_queues s" - by clarsimp - show ?thesis apply (cinit lift: sender_' receiver_' slot_' grant_') apply (rule getThreadState_ccorres_foo) apply (rule ccorres_assert2) @@ -3954,14 +3955,13 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs split del: if_split) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_running_valid_queues setThreadState_st_tcb)+ + apply (wpsimp wp: sts_valid_objs' setThreadState_st_tcb)+ apply (wp cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) @@ -3970,15 +3970,13 @@ proof - apply wp apply (simp add: cap_get_tag_isCap) apply (strengthen invs_weak_sch_act_wf_strg - cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct] - invs_valid_queues_strg) + cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct]) apply (simp add: cap_reply_cap_def) apply (wp doIPCTransfer_reply_or_replyslot) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4007,22 +4005,20 @@ proof - apply (ctac (no_vcg)) apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) - apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] - apply (ctac add: setThreadState_ccorres_valid_queues'_simple) + apply (wp sts_valid_objs' setThreadState_st_tcb | simp)+ + apply (ctac add: setThreadState_ccorres_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_sch_act hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def - ThreadState_Inactive_def mask_def to_bool_def - option_to_ctcb_ptr_def) + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def option_to_ctcb_ptr_def) - apply (rule_tac Q="\rv. valid_queues and tcb_at' receiver and valid_queues' and + apply (rule_tac Q="\rv. tcb_at' receiver and valid_objs' and sch_act_simple and (\s. ksCurDomain s \ maxDomain) and - (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + (\s. sch_act_wf (ksSchedulerAction s) s) and + pspace_aligned' and pspace_distinct'" in hoare_post_imp) apply (clarsimp simp: inQ_def weak_sch_act_wf_def) - apply (wp threadSet_valid_queues threadSet_sch_act handleFaultReply_sch_act_wf) + apply (wp threadSet_sch_act handleFaultReply_sch_act_wf) apply (clarsimp simp: guard_is_UNIV_def) apply assumption apply clarsimp @@ -4031,15 +4027,14 @@ proof - apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) apply (clarsimp simp: ctcb_relation_def typ_heap_simps) apply wp - apply (strengthen vp_invs_strg' invs_valid_queues') + apply (strengthen vp_invs_strg') apply (wp cteDeleteOne_tcbFault cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) apply (simp(no_asm_use) add: gs_set_assn_Delete_cstate_relation[unfolded o_def] subset_iff rf_sr_def) - apply (clarsimp simp: guard_is_UNIV_def to_bool_def true_def - option_to_ptr_def option_to_0_def false_def - ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def + ThreadState_defs mask_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs option_to_ctcb_ptr_def split: option.splits) @@ -4048,7 +4043,6 @@ proof - cap_get_tag_isCap) apply fastforce done -qed lemma ccorres_getCTE_cte_at: "ccorresG rf_sr \ r xf P P' hs (getCTE p >>= f) c @@ -4068,7 +4062,7 @@ lemma ccorres_getCTE_cte_at: done lemma setupCallerCap_ccorres [corres]: - "ccorres dc xfdc (valid_queues and valid_pspace' and (\s. \d p. sender \ set (ksReadyQueues s (d, p))) + "ccorres dc xfdc (valid_pspace' and (\s. sch_act_wf (ksSchedulerAction s) s) and sch_act_not sender and tcb_at' sender and tcb_at' receiver and tcb_at' sender and tcb_at' receiver) @@ -4081,8 +4075,7 @@ lemma setupCallerCap_ccorres [corres]: apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) apply (cinit lift: sender_' receiver_' canGrant_') apply (clarsimp simp: word_sle_def - tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] - , fold dc_def)[1] + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) apply ccorres_remove_UNIV_guard apply (ctac(no_vcg)) apply (rule ccorres_move_array_assertion_tcb_ctes) @@ -4103,14 +4096,14 @@ lemma setupCallerCap_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg)) apply (rule ccorres_assert) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply csymbr apply (ctac add: cteInsert_ccorres) apply simp apply (wp getSlotCap_cte_wp_at) apply (clarsimp simp: ccap_relation_def cap_lift_reply_cap cap_to_H_simps cap_reply_cap_lift_def - false_def tcbSlots Kernel_C.tcbCaller_def + tcbSlots Kernel_C.tcbCaller_def size_of_def cte_level_bits_def ctcb_size_bits_def) apply (simp add: is_aligned_neg_mask) apply (wp getCTE_wp') @@ -4130,11 +4123,11 @@ lemma setupCallerCap_ccorres [corres]: apply (simp add: locateSlot_conv) apply wp apply (clarsimp simp: ccap_rights_relation_def allRights_def - mask_def true_def cap_rights_to_H_def tcbCallerSlot_def + mask_def cap_rights_to_H_def tcbCallerSlot_def Kernel_C.tcbCaller_def) apply simp apply wp - apply (clarsimp simp: Kernel_C.ThreadState_BlockedOnReply_def mask_def + apply (clarsimp simp: ThreadState_defs mask_def valid_pspace'_def tcbReplySlot_def valid_tcb_state'_def Collect_const_mem tcb_cnode_index_defs) @@ -4158,7 +4151,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4179,7 +4172,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -4200,23 +4193,20 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4240,31 +4230,28 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - split: endpoint.splits list.splits + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (clarsimp simp: is_aligned_neg_mask + dest!: is_aligned_tcb_ptr_to_ctcb_ptr split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (clarsimp simp: is_aligned_neg_mask - dest!: is_aligned_tcb_ptr_to_ctcb_ptr - split del: if_split) - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4287,10 +4274,9 @@ lemma rf_sr_tcb_update_twice: cmachine_state_relation_def) lemma sendIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (bos = ThreadState_BlockedOnSend \ epptr' = epptr \ badge' = badge \ cg = from_bool canGrant \ cgr = from_bool canGrantReply @@ -4342,13 +4328,11 @@ lemma sendIPC_block_ccorres_helper: (simp add: typ_heap_simps')+)[1] apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnSend_def mask_def - from_bool_def to_bool_def) - apply (clarsimp split: bool.split) + ThreadState_defs mask_def) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -4452,6 +4436,19 @@ lemma tcb_queue_relation_qend_valid': apply (simp add: h_t_valid_clift_Some_iff) done +lemma tcb_queue'_head_end_NULL: + assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" + and tat: "\t\set queue. tcb_at' t s" + shows "(qend = NULL) = (qhead = NULL)" + using qr tat + apply - + apply (erule tcb_queue_relationE') + apply (simp add: tcb_queue_head_empty_iff split: if_splits) + apply (rule tcb_at_not_NULL) + apply (erule bspec) + apply simp + done + lemma tcbEPAppend_spec: "\s queue. \ \ \s. \t. (t, s) \ rf_sr \ (\tcb\set queue. tcb_at' tcb t) \ distinct queue @@ -4534,7 +4531,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4550,12 +4547,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -4572,29 +4569,26 @@ lemma sendIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Send_def) - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Send_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (simp only:projectKOs injectKO_ep objBits_simps) - apply clarsimp - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (simp only:projectKOs injectKO_ep objBits_simps) + apply clarsimp + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -4611,31 +4605,28 @@ lemma sendIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Send_def - split: if_split) - apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Send_def + split: if_split) + apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4655,8 +4646,7 @@ lemma ctcb_relation_blockingIPCCanGrantD: lemma sendIPC_ccorres [corres]: "ccorres dc xfdc (invs' and st_tcb_at' simple' thread - and sch_act_not thread and ep_at' epptr and - (\s. \d p. thread \ set (ksReadyQueues s (d, p)))) + and sch_act_not thread and ep_at' epptr) (UNIV \ \\blocking = from_bool blocking\ \ \\do_call = from_bool do_call\ \ \\badge = badge\ @@ -4687,8 +4677,7 @@ lemma sendIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread and ko_at' ep epptr - and ep_at' epptr - and (\s. \d p. thread \ set (ksReadyQueues s (d, p)))" + and ep_at' epptr" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc \ \RecvEP case\ @@ -4728,29 +4717,24 @@ lemma sendIPC_ccorres [corres]: apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) apply (clarsimp split del: if_split) apply (wpc ; ccorres_rewrite) - apply (clarsimp simp: from_bool_def disj_imp[symmetric] split del: if_split) + apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not - possibleSwitchTo_sch_act_not sts_st_tcb' - possibleSwitchTo_ksQ' sts_valid_queues sts_ksQ' + possibleSwitchTo_sch_act_not sts_st_tcb' sts_valid_objs' simp: valid_tcb_state'_def)+ apply vcg - apply (wpsimp wp: doIPCTransfer_sch_act setEndpoint_ksQ hoare_vcg_all_lift - set_ep_valid_objs' setEndpoint_valid_mdb' + apply (wpsimp wp: doIPCTransfer_sch_act hoare_vcg_all_lift + set_ep_valid_objs' setEndpoint_valid_mdb' | wp (once) hoare_drop_imp | strengthen sch_act_wf_weak)+ - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def Collect_const_mem - ThreadState_Running_def mask_def from_bool_def - option_to_ptr_def option_to_0_def - split: bool.split_asm) + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs Collect_const_mem + mask_def option_to_ptr_def option_to_0_def + split: bool.split_asm) \ \IdleEP case\ apply (rule ccorres_cond_true) @@ -4821,7 +4805,7 @@ lemma sendIPC_ccorres [corres]: st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def isBlockedOnSend_def projectKO_opt_tcb split: if_split_asm if_split) - apply (rule conjI, simp, rule impI, clarsimp simp: valid_pspace_valid_objs') + apply (rule conjI, simp, rule impI, clarsimp simp: valid_pspace'_def) apply (erule delta_sym_refs) apply (clarsimp split: if_split_asm dest!: symreftype_inverse')+ @@ -4865,10 +4849,9 @@ lemma ctcb_relation_blockingIPCCanGrantReplyD: done lemma receiveIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (epptr = epptr && ~~ mask 4) and K (isEndpointCap cap \ ccap_relation cap cap')) UNIV hs @@ -4902,11 +4885,11 @@ lemma receiveIPC_block_ccorres_helper: apply (erule(1) rf_sr_tcb_update_no_queue_gen, (simp add: typ_heap_simps)+) apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def ccap_relation_ep_helpers - ThreadState_BlockedOnReceive_def mask_def cap_get_tag_isCap) + ThreadState_defs mask_def cap_get_tag_isCap) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -4933,7 +4916,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4949,12 +4932,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -4971,31 +4954,28 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Recv_def - split: if_split) - apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Recv_def + split: if_split) + apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -5012,28 +4992,25 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Recv_def) - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Recv_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5059,7 +5036,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5080,7 +5057,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5101,23 +5078,20 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5141,31 +5115,28 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - split: endpoint.splits list.splits + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (clarsimp simp: is_aligned_neg_mask + dest!: is_aligned_tcb_ptr_to_ctcb_ptr split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (clarsimp simp: is_aligned_neg_mask - dest!: is_aligned_tcb_ptr_to_ctcb_ptr - split del: if_split) - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5229,7 +5200,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp) @@ -5282,7 +5253,6 @@ lemma receiveIPC_ccorres [corres]: notes option.case_cong_weak [cong] shows "ccorres dc xfdc (invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and valid_cap' cap and K (isEndpointCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -5343,7 +5313,7 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5358,7 +5328,6 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and ko_at' ep (capEPPtr cap)" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc @@ -5373,20 +5342,18 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5398,18 +5365,16 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -5487,12 +5452,10 @@ lemma receiveIPC_ccorres [corres]: split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) - apply (clarsimp simp: from_bool_def split del: if_split simp del: Collect_const) + apply (clarsimp split del: if_split simp del: Collect_const) apply ccorres_rewrite apply (wpc ; clarsimp ; ccorres_rewrite) apply csymbr @@ -5504,28 +5467,25 @@ lemma receiveIPC_ccorres [corres]: apply ccorres_rewrite apply ctac apply (ctac add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_st_tcb' sts_valid_queues) + apply (wpsimp wp: sts_st_tcb' sts_valid_objs') apply (vcg exspec=setThreadState_modifies) - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def - mask_def ThreadState_Running_def cap_get_tag_isCap - ccap_relation_ep_helpers) + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs mask_def + cap_get_tag_isCap ccap_relation_ep_helpers) apply (clarsimp simp: valid_tcb_state'_def) - apply (rule_tac Q="\_. valid_pspace' and valid_queues + apply (rule_tac Q="\_. valid_pspace' and st_tcb_at' ((=) sendState) sender and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s) - and (\s. (\a b. sender \ set (ksReadyQueues s (a, b)))) and sch_act_not sender and K (thread \ sender) and (\s. ksCurDomain s \ maxDomain)" in hoare_post_imp) - apply (clarsimp simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak - obj_at'_def) + apply (fastforce simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak + obj_at'_def) apply (wpsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def conj_ac)+ - apply (rule_tac Q="\rv. valid_queues and valid_pspace' + apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' thread and sch_act_not sender and K (thread \ sender) and ep_at' (capEPPtr cap) and (\s. ksCurDomain s \ maxDomain) - and (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. sender \ set (ksReadyQueues s (d, p))))" + and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) subgoal by (auto, auto simp: st_tcb_at'_def obj_at'_def) apply (wp hoare_vcg_all_lift set_ep_valid_objs') @@ -5541,12 +5501,11 @@ lemma receiveIPC_ccorres [corres]: apply (subgoal_tac "state_refs_of' s (capEPPtr cap) = (set list) \ {EPRecv} \ thread \ (set list)") subgoal by (fastforce simp: obj_at'_def is_aligned_neg_mask objBits_simps' - projectKOs invs'_def valid_state'_def st_tcb_at'_def - valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' - isBlockedOnReceive_def projectKO_opt_tcb - from_bool_def to_bool_def - elim!: delta_sym_refs - split: if_split_asm bool.splits) (*very long*) + projectKOs invs'_def valid_state'_def st_tcb_at'_def + valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' + isBlockedOnReceive_def projectKO_opt_tcb + elim!: delta_sym_refs + split: if_split_asm bool.splits) (*very long*) apply (frule(1) sym_refs_obj_atD' [OF _ invs_sym']) apply (clarsimp simp: st_tcb_at'_def ko_wp_at'_def obj_at'_def projectKOs split: if_split_asm) @@ -5554,27 +5513,24 @@ lemma receiveIPC_ccorres [corres]: apply (case_tac "tcbState obj", simp_all add: tcb_bound_refs'_def)[1] apply (subgoal_tac "state_refs_of' s (capEPPtr cap) = {}") subgoal by (fastforce simp: obj_at'_def is_aligned_neg_mask objBits_simps' - projectKOs invs'_def valid_state'_def st_tcb_at'_def - valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' - isBlockedOnReceive_def projectKO_opt_tcb - from_bool_def to_bool_def - elim: delta_sym_refs - split: if_split_asm bool.splits) (*very long *) + projectKOs invs'_def valid_state'_def st_tcb_at'_def + valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' + isBlockedOnReceive_def projectKO_opt_tcb + elim!: delta_sym_refs + split: if_split_asm bool.splits) (*very long *) apply (clarsimp simp: obj_at'_def state_refs_of'_def projectKOs) apply (frule(1) sym_refs_ko_atD' [OF _ invs_sym']) - apply (frule invs_queues) apply clarsimp apply (rename_tac list x xa) apply (rule_tac P="x\set list" in case_split) apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) - apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -5602,11 +5558,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ntfn_blocked_in_queueD) apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) apply (elim conjE) @@ -5626,7 +5581,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5648,23 +5603,20 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def - tcb_queue_relation'_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def + tcb_queue_relation'_def) + apply simp apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -5690,31 +5642,28 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (clarsimp simp: cnotification_relation_def Let_def - isWaitingNtfn_def - tcb_queue_relation'_def valid_ntfn'_def - split: Structures_H.notification.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (clarsimp simp: is_aligned_neg_mask - dest!: is_aligned_tcb_ptr_to_ctcb_ptr - split del: if_split) - apply (clarsimp split: if_split) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def + isWaitingNtfn_def + tcb_queue_relation'_def valid_ntfn'_def + split: Structures_H.notification.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (clarsimp simp: is_aligned_neg_mask + dest!: is_aligned_tcb_ptr_to_ctcb_ptr + split del: if_split) + apply (clarsimp split: if_split) + apply simp apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -5800,7 +5749,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -5815,22 +5764,21 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) - apply (wp sts_running_valid_queues sts_st_tcb_at'_cases + apply (ctac add: possibleSwitchTo_ccorres) + apply (wp sts_valid_objs' sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" in hoare_post_imp) apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def ARM_H.badgeRegister_def Kernel_C.badgeRegister_def ARM.badgeRegister_def Kernel_C.R0_def - "StrictC'_thread_state_defs"less_mask_eq - Collect_const_mem) + ThreadState_defs less_mask_eq Collect_const_mem) apply (case_tac ts, simp_all add: receiveBlocked_def typ_heap_simps - cthread_state_relation_def "StrictC'_thread_state_defs")[1] + cthread_state_relation_def ThreadState_defs)[1] \ \ActiveNtfn case\ apply (rename_tac old_badge) apply (rule ccorres_cond_false) @@ -5879,16 +5827,14 @@ lemma sendSignal_ccorres [corres]: apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) - apply (wp weak_sch_act_wf_lift_linear - setThreadState_oa_queued - sts_valid_queues tcb_in_cur_domain'_lift)[1] - apply (wp sts_valid_queues sts_runnable) + apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift)[1] + apply (wp sts_valid_objs' sts_runnable) apply (wp setThreadState_st_tcb set_ntfn_valid_objs' | clarsimp)+ - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def badgeRegister_def Kernel_C.badgeRegister_def ARM.badgeRegister_def Kernel_C.R0_def) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Idle_def @@ -5911,10 +5857,9 @@ lemma sendSignal_ccorres [corres]: done lemma receiveSignal_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and sch_act_not thread and - valid_objs' and ntfn_at' ntfnptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + "ccorres dc xfdc (tcb_at' thread and sch_act_not thread and + valid_objs' and ntfn_at' ntfnptr and pspace_aligned' and pspace_distinct' and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (ntfnptr = ntfnptr && ~~ mask 4)) UNIV hs (setThreadState (Structures_H.thread_state.BlockedOnNotification @@ -5943,12 +5888,11 @@ lemma receiveSignal_block_ccorres_helper: (simp add: typ_heap_simps')+) apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnNotification_def mask_def - from_bool_def to_bool_def) + ThreadState_defs mask_def) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (auto simp: weak_sch_act_wf_def valid_tcb'_def tcb_cte_cases_def @@ -5959,16 +5903,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "word32" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6030,7 +5975,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6046,12 +5991,12 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6068,31 +6013,28 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=2] NtfnState_Waiting_def) - subgoal by (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken - valid_ntfn'_def - dest: tcb_queue_relation_next_not_NULL) - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=2] NtfnState_Waiting_def) + subgoal by (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken + valid_ntfn'_def + dest: tcb_queue_relation_next_not_NULL) + apply (simp add: isWaitingNtfn_def) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6109,30 +6051,27 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=2] NtfnState_Waiting_def - split: if_split) - apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken) - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=2] NtfnState_Waiting_def + split: if_split) + apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken) + apply (simp add: isWaitingNtfn_def) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6144,7 +6083,6 @@ lemma receiveSignal_enqueue_ccorres_helper: lemma receiveSignal_ccorres [corres]: "ccorres dc xfdc (invs' and valid_cap' cap and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and K (isNotificationCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -6188,11 +6126,10 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6203,7 +6140,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6259,8 +6196,7 @@ lemma receiveSignal_ccorres [corres]: apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6272,7 +6208,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM/IsolatedThreadAction.thy b/proof/crefine/ARM/IsolatedThreadAction.thy index 12001da3a6..40156e314b 100644 --- a/proof/crefine/ARM/IsolatedThreadAction.thy +++ b/proof/crefine/ARM/IsolatedThreadAction.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -8,37 +9,25 @@ theory IsolatedThreadAction imports ArchMove_C begin -datatype tcb_state_regs = TCBStateRegs "thread_state" "MachineTypes.register \ machine_word" - -definition - "tsrContext tsr \ case tsr of TCBStateRegs ts regs \ regs" - -definition - "tsrState tsr \ case tsr of TCBStateRegs ts regs \ ts" - -lemma accessors_TCBStateRegs[simp]: - "TCBStateRegs (tsrState v) (tsrContext v) = v" - by (cases v, simp add: tsrState_def tsrContext_def) - -lemma tsrContext_simp[simp]: - "tsrContext (TCBStateRegs st con) = con" - by (simp add: tsrContext_def) +context begin interpretation Arch . (*FIXME: arch_split*) -lemma tsrState_simp[simp]: - "tsrState (TCBStateRegs st con) = st" - by (simp add: tsrState_def) +datatype tcb_state_regs = + TCBStateRegs (tsrState : thread_state) (tsrContext : "MachineTypes.register \ machine_word") definition get_tcb_state_regs :: "kernel_object option \ tcb_state_regs" where "get_tcb_state_regs oko \ case oko of - Some (KOTCB tcb) \ TCBStateRegs (tcbState tcb) ((atcbContextGet o tcbArch) tcb)" + Some (KOTCB tcb) \ TCBStateRegs (tcbState tcb) ((user_regs o atcbContextGet o tcbArch) tcb)" definition put_tcb_state_regs_tcb :: "tcb_state_regs \ tcb \ tcb" where "put_tcb_state_regs_tcb tsr tcb \ case tsr of - TCBStateRegs st regs \ tcb \ tcbState := st, tcbArch := atcbContextSet regs (tcbArch tcb) \" + TCBStateRegs st regs \ + tcb \ tcbState := st, + tcbArch := atcbContextSet (UserContext regs) + (tcbArch tcb) \" definition put_tcb_state_regs :: "tcb_state_regs \ kernel_object option \ kernel_object option" @@ -119,8 +108,6 @@ lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb lemmas setNotification_tcb = set_ntfn_tcb_obj_at' -context begin interpretation Arch . (*FIXME: arch_split*) - lemma setObject_modify: fixes v :: "'a :: pspace_storable" shows "\ obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; @@ -152,8 +139,6 @@ lemma getObject_return: apply (simp add: magnitudeCheck_assert in_monad) done -end - lemmas getObject_return_tcb = getObject_return[OF meta_eq_to_obj_eq, OF loadObject_tcb, unfolded objBits_simps', simplified] @@ -172,13 +157,13 @@ lemma partial_overwrite_fun_upd: lemma get_tcb_state_regs_ko_at': "ko_at' ko p s \ get_tcb_state_regs (ksPSpace s p) - = TCBStateRegs (tcbState ko) ((atcbContextGet o tcbArch) ko)" + = TCBStateRegs (tcbState ko) ((user_regs o atcbContextGet o tcbArch) ko)" by (clarsimp simp: obj_at'_def projectKOs get_tcb_state_regs_def) lemma put_tcb_state_regs_ko_at': "ko_at' ko p s \ put_tcb_state_regs tsr (ksPSpace s p) = Some (KOTCB (ko \ tcbState := tsrState tsr - , tcbArch := atcbContextSet (tsrContext tsr) (tcbArch ko)\))" + , tcbArch := atcbContextSet (UserContext (tsrContext tsr)) (tcbArch ko)\))" by (clarsimp simp: obj_at'_def projectKOs put_tcb_state_regs_def put_tcb_state_regs_tcb_def split: tcb_state_regs.split) @@ -209,7 +194,7 @@ lemma ksPSpace_update_partial_id: done lemma isolate_thread_actions_asUser: - "\ idx t' = t; inj idx; f = (\s. ({(v, g s)}, False)) \ \ + "\ idx t' = t; inj idx; f = (\s. ({(v, modify_registers g s)}, False)) \ \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) (asUser t f) (isolate_thread_actions idx (return v) @@ -230,17 +215,30 @@ lemma isolate_thread_actions_asUser: apply (clarsimp simp: partial_overwrite_get_tcb_state_regs put_tcb_state_regs_ko_at') apply (case_tac ko, simp) + apply (rename_tac uc) + apply (case_tac uc, simp add: modify_registers_def atcbContextGet_def atcbContextSet_def) done -context begin interpretation Arch . (*FIXME: arch_split*) +lemma getRegister_simple: + "getRegister r = (\con. ({(user_regs con r, con)}, False))" + by (simp add: getRegister_def simpler_gets_def) + +lemma mapM_getRegister_simple: + "mapM getRegister rs = (\con. ({(map (user_regs con) rs, con)}, False))" + apply (induct rs) + apply (simp add: mapM_Nil return_def) + apply (simp add: mapM_Cons getRegister_def simpler_gets_def + bind_def return_def) + done lemma setRegister_simple: - "setRegister r v = (\con. ({((), con (r := v))}, False))" + "setRegister r v = (\con. ({((), UserContext ((user_regs con)(r := v)))}, False))" by (simp add: setRegister_def simpler_modify_def) lemma zipWithM_setRegister_simple: "zipWithM_x setRegister rs vs - = (\con. ({((), foldl (\con (r, v). con (r := v)) con (zip rs vs))}, False))" + = (\con. ({((), + UserContext (foldl (\regs (r, v). ((regs)(r := v))) (user_regs con) (zip rs vs)))}, False))" supply if_split[split del] apply (simp add: zipWithM_x_mapM_x) apply (induct ("zip rs vs")) @@ -249,6 +247,18 @@ lemma zipWithM_setRegister_simple: simpler_modify_def fun_upd_def[symmetric]) done +(* this variant used in fastpath rewrite proof *) +lemma setRegister_simple_modify_registers: + "setRegister r v = (\con. ({((), modify_registers (\f. f(r := v)) con)}, False))" + by (simp add: modify_registers_def setRegister_simple) + +(* this variant used in fastpath rewrite proof *) +lemma zipWithM_setRegister_simple_modify_registers: + "zipWithM_x setRegister rs vs + = (\con. ({((), modify_registers (\regs. foldl (\f (r,v). f(r := v)) regs (zip rs vs)) con)}, + False))" + by (simp add: modify_registers_def zipWithM_setRegister_simple) + lemma dom_partial_overwrite: "\x. tcb_at' (idx x) s \ dom (partial_overwrite idx tsrs (ksPSpace s)) = dom (ksPSpace s)" @@ -361,6 +371,9 @@ lemma getObject_get_assert: apply (simp add: lookupAround2_known1 assert_opt_def obj_at'_def projectKO_def2 split: option.split) + apply (rule conjI) + apply (clarsimp simp: fail_def fst_return conj_comms project_inject + objBits_def bind_def simpler_gets_def) apply (clarsimp simp: fail_def fst_return conj_comms project_inject objBits_def) apply (simp only: assert2[symmetric], @@ -456,7 +469,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -513,15 +526,15 @@ lemma thread_actions_isolatable_bind: \t. \tcb_at' t\ f \\rv. tcb_at' t\ \ \ thread_actions_isolatable idx (f >>= g)" apply (clarsimp simp: thread_actions_isolatable_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (subst isolate_thread_actions_wrap_bind, simp) apply simp apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (simp add: bind_assoc id_def) apply (rule monadic_rewrite_refl) @@ -586,7 +599,7 @@ lemma select_f_isolatable: apply (clarsimp simp: thread_actions_isolatable_def isolate_thread_actions_def split_def select_f_selects liftM_def bind_assoc) - apply (rule monadic_rewrite_imp, rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_transverse) apply (rule monadic_rewrite_drop_modify monadic_rewrite_bind_tail)+ apply wp+ apply (simp add: gets_bind_ign getSchedulerAction_def) @@ -690,12 +703,10 @@ lemma transferCaps_simple_rewrite: (transferCaps mi caps ep r rBuf) (return (mi \ msgExtraCaps := 0, msgCapsUnwrapped := 0 \))" including no_pre + supply empty_fail_getReceiveSlots[wp] (* FIXME *) apply (rule monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (simp add: transferCaps_simple, rule monadic_rewrite_refl) - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getReceiveSlots)+) - apply (rule monadic_rewrite_refl) + apply (simp add: transferCaps_simple) + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) apply simp done @@ -709,7 +720,8 @@ lemma lookupExtraCaps_simple_rewrite: lemma lookupIPC_inv: "\P\ lookupIPCBuffer f t \\rv. P\" by wp -lemmas empty_fail_user_getreg = empty_fail_asUser[OF empty_fail_getRegister] +(* FIXME move *) +lemmas empty_fail_user_getreg[intro!, wp, simp] = empty_fail_asUser[OF empty_fail_getRegister] lemma copyMRs_simple: "msglen \ of_nat (length msgRegisters) \ @@ -732,7 +744,7 @@ lemma doIPCTransfer_simple_rewrite: \ msgLength (messageInfoFromWord msgInfo) \ of_nat (length msgRegisters)) and obj_at' (\tcb. tcbFault tcb = None - \ (atcbContextGet o tcbArch) tcb msgInfoRegister = msgInfo) sender) + \ (user_regs o atcbContextGet o tcbArch) tcb msgInfoRegister = msgInfo) sender) (doIPCTransfer sender ep badge grant rcvr) (do rv \ mapM_x (\r. do v \ asUser sender (getRegister r); asUser rcvr (setRegister r v) @@ -746,25 +758,23 @@ lemma doIPCTransfer_simple_rewrite: apply (simp add: doIPCTransfer_def bind_assoc doNormalTransfer_def getMessageInfo_def cong: option.case_cong) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="fault = None" in monadic_rewrite_gen_asm, simp) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known None, simp) apply (rule monadic_rewrite_bind_tail) - apply (rule_tac x=msgInfo in monadic_rewrite_symb_exec, - (wp empty_fail_user_getreg user_getreg_rv)+) - apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) - apply (rule monadic_rewrite_bind_head) - apply (rule transferCaps_simple_rewrite) - apply (wp threadGet_const)+ + apply (monadic_rewrite_symb_exec_l_known msgInfo) + apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) + apply (rule monadic_rewrite_bind_head) + apply (rule transferCaps_simple_rewrite) + apply (wp threadGet_const user_getreg_rv asUser_inv)+ apply (simp add: bind_assoc) - apply (rule monadic_rewrite_symb_exec2[OF lookupIPC_inv empty_fail_lookupIPCBuffer] - monadic_rewrite_symb_exec2[OF threadGet_inv empty_fail_threadGet] - monadic_rewrite_symb_exec2[OF user_getreg_inv' empty_fail_user_getreg] - monadic_rewrite_bind_head monadic_rewrite_bind_tail - | wp)+ + apply (rule monadic_rewrite_symb_exec_l_drop[OF _ lookupIPC_inv empty_fail_lookupIPCBuffer] + monadic_rewrite_symb_exec_l_drop[OF _ threadGet_inv empty_fail_threadGet] + monadic_rewrite_symb_exec_l_drop[OF _ user_getreg_inv' empty_fail_user_getreg] + monadic_rewrite_bind_head monadic_rewrite_bind_tail)+ apply (case_tac "messageInfoFromWord msgInfo") apply simp apply (rule monadic_rewrite_refl) @@ -773,9 +783,10 @@ lemma doIPCTransfer_simple_rewrite: apply (auto elim!: obj_at'_weakenE) done -lemma empty_fail_isRunnable: +(* FIXME move *) +lemma empty_fail_isRunnable[intro!, wp, simp]: "empty_fail (isRunnable t)" - by (simp add: isRunnable_def isStopped_def) + by (simp add: isRunnable_def isStopped_def empty_fail_cond) lemma setupCallerCap_rewrite: "monadic_rewrite True True (\s. reply_masters_rvk_fb (ctes_of s)) @@ -794,23 +805,19 @@ lemma setupCallerCap_rewrite: apply (simp add: setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv getSlotCap_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule monadic_rewrite_assert)+ - apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) - \ mdbRevocable (cteMDBNode masterCTE)" - in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec2, (wp | simp)+)+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getCTE)+)+ - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp' | simp add: cte_wp_at_ctes_of)+ - apply (clarsimp simp: reply_masters_rvk_fb_def) - apply fastforce + apply (rule monadic_rewrite_bind_tail)+ + apply (rule monadic_rewrite_assert)+ + apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) + \ mdbRevocable (cteMDBNode masterCTE)" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans) + apply monadic_rewrite_symb_exec_l + apply monadic_rewrite_symb_exec_l_drop + apply (rule monadic_rewrite_refl) + apply wpsimp+ + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ + apply (fastforce simp: reply_masters_rvk_fb_def) done lemma oblivious_getObject_ksPSpace_default: @@ -878,44 +885,30 @@ lemma oblivious_switchToThread_schact: threadSet_def tcbSchedEnqueue_def unless_when asUser_def getQueue_def setQueue_def storeWordUser_def setRegister_def pointerInUserData_def isRunnable_def isStopped_def - getThreadState_def tcbSchedDequeue_def bitmap_fun_defs) + getThreadState_def tcbSchedDequeue_def tcbQueueRemove_def bitmap_fun_defs + ksReadyQueues_asrt_def) by (safe intro!: oblivious_bind - | simp_all add: oblivious_setVMRoot_schact)+ + | simp_all add: ready_qs_runnable_def idleThreadNotQueued_def + oblivious_setVMRoot_schact)+ -lemma empty_fail_getCurThread[iff]: +(* FIXME move *) +lemma empty_fail_getCurThread[intro!, wp, simp]: "empty_fail getCurThread" by (simp add: getCurThread_def) + lemma activateThread_simple_rewrite: "monadic_rewrite True True (ct_in_state' ((=) Running)) (activateThread) (return ())" apply (simp add: activateThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="state = Running" in monadic_rewrite_gen_asm) - apply simp + apply wp_pre + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known Running, simp) apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getThreadState)+) - apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, - simp_all add: getCurThread_def) - apply (rule monadic_rewrite_refl) + apply wpsimp+ apply (clarsimp simp: ct_in_state'_def elim!: pred_tcb'_weakenE) done end -lemma setCTE_obj_at_prio[wp]: - "\obj_at' (\tcb. P (tcbPriority tcb)) t\ setCTE p v \\rv. obj_at' (\tcb. P (tcbPriority tcb)) t\" - unfolding setCTE_def - by (rule setObject_cte_obj_at_tcb', simp+) - -crunch obj_at_prio[wp]: cteInsert "obj_at' (\tcb. P (tcbPriority tcb)) t" - (wp: crunch_wps) - -crunch ctes_of[wp]: asUser "\s. P (ctes_of s)" - (wp: crunch_wps) - lemma tcbSchedEnqueue_tcbPriority[wp]: "\obj_at' (\tcb. P (tcbPriority tcb)) t\ tcbSchedEnqueue t' @@ -925,9 +918,8 @@ lemma tcbSchedEnqueue_tcbPriority[wp]: done crunch obj_at_prio[wp]: cteDeleteOne "obj_at' (\tcb. P (tcbPriority tcb)) t" - (wp: crunch_wps setEndpoint_obj_at_tcb' - setThreadState_obj_at_unchanged setNotification_tcb setBoundNotification_obj_at_unchanged - simp: crunch_simps unless_def) + (wp: crunch_wps setEndpoint_obj_at'_tcb setNotification_tcb + simp: crunch_simps unless_def setBoundNotification_def) context notes if_cong[cong] @@ -939,9 +931,9 @@ lemma setThreadState_no_sch_change: "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ setThreadState st t \\rv s. P (ksSchedulerAction s)\" - (is "NonDetMonad.valid ?P ?f ?Q") + (is "Nondet_VCG.valid ?P ?f ?Q") apply (simp add: setThreadState_def setSchedulerAction_def) - apply (wp hoare_pre_cont[where a=rescheduleRequired]) + apply (wp hoare_pre_cont[where f=rescheduleRequired]) apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) apply (clarsimp split: if_split) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs) @@ -1045,14 +1037,11 @@ lemma setCTE_assert_modify: apply (subst updateObject_cte_tcb) apply (fastforce simp add: subtract_mask) apply (simp add: assert_opt_def alignCheck_assert bind_assoc - magnitudeCheck_assert - is_aligned_neg_mask2 objBits_def) + magnitudeCheck_assert objBits_def) apply (rule ps_clear_lookupAround2, assumption+) apply (rule word_and_le2) apply (simp add: objBits_simps mask_def field_simps) apply (simp add: simpler_modify_def cong: option.case_cong if_cong) - apply (rule kernel_state.fold_congs[OF refl refl]) - apply (clarsimp simp: projectKO_opt_tcb cong: if_cong) apply (clarsimp simp: lookupAround2_char1 word_and_le2) apply (rule ccontr, clarsimp) apply (erule(2) ps_clearD) @@ -1084,13 +1073,17 @@ lemma partial_overwrite_fun_upd2: else y)" by (simp add: fun_eq_iff partial_overwrite_def split: if_split) +lemma atcbContextSetSetGet_eq[simp]: + "atcbContextSet (UserContext (user_regs (atcbContextGet t))) t = t" + by (cases t, simp add: atcbContextSet_def atcbContextGet_def) + lemma setCTE_isolatable: "thread_actions_isolatable idx (setCTE p v)" supply if_split[split del] apply (simp add: setCTE_assert_modify) apply (clarsimp simp: thread_actions_isolatable_def monadic_rewrite_def fun_eq_iff - liftM_def exec_gets + liftM_def isolate_thread_actions_def bind_assoc exec_gets getSchedulerAction_def bind_select_f_bind[symmetric] @@ -1118,8 +1111,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) @@ -1192,37 +1184,24 @@ lemma thread_actions_isolatableD: lemma tcbSchedDequeue_rewrite: "monadic_rewrite True True (obj_at' (Not \ tcbQueued) t) (tcbSchedDequeue t) (return ())" apply (simp add: tcbSchedDequeue_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ queued" in monadic_rewrite_gen_asm) - apply (simp add: when_def) + apply wp_pre + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const) - apply (rule monadic_rewrite_symb_exec2) - apply wp+ - apply (rule monadic_rewrite_refl) - apply (clarsimp) + apply (wpsimp wp: threadGet_const)+ done +(* FIXME: improve automation here *) lemma switchToThread_rewrite: "monadic_rewrite True True (ct_in_state' (Not \ runnable') and cur_tcb' and obj_at' (Not \ tcbQueued) t) (switchToThread t) (do Arch.switchToThread t; setCurThread t od)" apply (simp add: switchToThread_def Thread_H.switchToThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind) - apply (rule tcbSchedDequeue_rewrite) - apply (rule monadic_rewrite_refl) - apply (wp Arch_switchToThread_obj_at_pre)+ - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec) - apply (wp+, simp) - apply (rule monadic_rewrite_refl) - apply (wp) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite, simp) + (* strip LHS of getters and asserts until LHS and RHS are the same *) + apply (repeat_unless \rule monadic_rewrite_refl\ monadic_rewrite_symb_exec_l) + apply wpsimp+ apply (clarsimp simp: comp_def) done @@ -1266,9 +1245,33 @@ lemma threadGet_isolatable: split: tcb_state_regs.split)+ done +lemma tcbQueued_put_tcb_state_regs_tcb: + "tcbQueued (put_tcb_state_regs_tcb tsr tcb) = tcbQueued tcb" + apply (clarsimp simp: put_tcb_state_regs_tcb_def) + by (cases tsr; clarsimp) + +lemma idleThreadNotQueued_isolatable: + "thread_actions_isolatable idx (stateAssert idleThreadNotQueued [])" + apply (simp add: stateAssert_def2 stateAssert_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + gets_isolatable + thread_actions_isolatable_if + thread_actions_isolatable_returns + thread_actions_isolatable_fail) + unfolding idleThreadNotQueued_def + apply (clarsimp simp: obj_at_partial_overwrite_If) + apply (clarsimp simp: obj_at'_def tcbQueued_put_tcb_state_regs_tcb) + apply wpsimp+ + done + lemma setCurThread_isolatable: "thread_actions_isolatable idx (setCurThread t)" - by (simp add: setCurThread_def modify_isolatable) + unfolding setCurThread_def + apply (rule thread_actions_isolatable_bind) + apply (rule idleThreadNotQueued_isolatable) + apply (fastforce intro: modify_isolatable) + apply wpsimp + done lemma isolate_thread_actions_tcbs_at: assumes f: "\x. \tcb_at' (idx x)\ f \\rv. tcb_at' (idx x)\" shows @@ -1291,7 +1294,7 @@ lemma isolate_thread_actions_rewrite_bind: \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) (f >>= g) (isolate_thread_actions idx (f' >>= g') (g'' o f'') (g''' o f'''))" - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule monadic_rewrite_bind, assumption+) apply (wp isolate_thread_actions_tcbs_at) @@ -1299,7 +1302,7 @@ lemma isolate_thread_actions_rewrite_bind: apply (subst isolate_thread_actions_wrap_bind, assumption) apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (rule monadic_rewrite_bind2) + apply (rule monadic_rewrite_bind_l) apply (erule(1) thread_actions_isolatableD) apply (rule thread_actions_isolatableD, assumption+) apply (rule hoare_vcg_all_lift, assumption) @@ -1356,6 +1359,7 @@ lemma copy_register_isolate: apply (case_tac obj, case_tac obja) apply (simp add: projectKO_opt_tcb put_tcb_state_regs_def put_tcb_state_regs_tcb_def get_tcb_state_regs_def + atcbContextGet_def cong: if_cong) apply (auto simp: fun_eq_iff split: if_split) done @@ -1373,7 +1377,7 @@ lemma monadic_rewrite_isolate_final2: (isolate_thread_actions idx f f' f'') (isolate_thread_actions idx g g' g'')" apply (simp add: isolate_thread_actions_def split_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule_tac P="\ s'. Q s" in monadic_rewrite_bind) apply (insert mr)[1] @@ -1381,14 +1385,14 @@ lemma monadic_rewrite_isolate_final2: apply auto[1] apply (rule_tac P="P and (\s. tcbs = get_tcb_state_regs o ksPSpace s o idx \ sa = ksSchedulerAction s)" - in monadic_rewrite_refl3) + in monadic_rewrite_pre_imp_eq) apply (clarsimp simp: exec_modify eqs return_def) apply wp+ apply (clarsimp simp: o_def eqs) done lemmas monadic_rewrite_isolate_final - = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_refl2, simplified] + = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_is_refl, simplified] lemma copy_registers_isolate_general: "\ inj idx; idx x = t; idx y = t' \ \ @@ -1408,7 +1412,7 @@ lemma copy_registers_isolate_general: select_f_returns o_def ksPSpace_update_partial_id) apply (simp add: return_def simpler_modify_def) apply (simp add: mapM_x_Cons) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule isolate_thread_actions_rewrite_bind, assumption) apply (rule copy_register_isolate, assumption+) @@ -1474,7 +1478,8 @@ lemmas fastpath_isolatables thread_actions_isolatable_returns lemmas fastpath_isolate_rewrites - = isolate_thread_actions_threadSet_tcbState isolate_thread_actions_asUser + = isolate_thread_actions_threadSet_tcbState + isolate_thread_actions_asUser copy_registers_isolate setSchedulerAction_isolate fastpath_isolatables[THEN thread_actions_isolatableD] @@ -1502,27 +1507,17 @@ lemma setThreadState_rewrite_simple: (setThreadState st t) (threadSet (tcbState_update (\_. st)) t)" supply if_split[split del] - apply (simp add: setThreadState_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (simp add: when_def) - apply (rule monadic_rewrite_gen_asm) - apply (subst if_not_P) - apply assumption - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, - (wp empty_fail_isRunnable - | (simp only: getCurThread_def getSchedulerAction_def - , rule empty_fail_gets))+)+ - apply (rule monadic_rewrite_refl) - apply (simp add: conj_comms, wp hoare_vcg_imp_lift threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) + apply (simp add: setThreadState_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at'\) + (* take the threadSet, drop everything until return () *) + apply (rule monadic_rewrite_trans[OF monadic_rewrite_bind_tail]) + apply (rule monadic_rewrite_symb_exec_l_drop)+ + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: getCurThread_def + wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at')+ apply (rule monadic_rewrite_refl) - apply clarsimp + apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) done end diff --git a/proof/crefine/ARM/Machine_C.thy b/proof/crefine/ARM/Machine_C.thy index 10bd73d19c..edd276229a 100644 --- a/proof/crefine/ARM/Machine_C.thy +++ b/proof/crefine/ARM/Machine_C.thy @@ -413,13 +413,13 @@ lemma cleanCacheRange_PoC_ccorres: apply (clarsimp simp: cleanCacheRange_PoC_def word_sle_def whileAnno_def) apply (ccorres_remove_UNIV_guard) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) @@ -442,7 +442,7 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (rule ccorres_basic_srnoop) apply (simp add: cleanInvalidateCacheRange_RAM_def doMachineOp_bind empty_fail_dsb empty_fail_cleanInvalidateL2Range - empty_fail_cleanInvalByVA) + empty_fail_cleanInvalByVA empty_fail_cond) apply (ctac (no_vcg) add: cleanCacheRange_PoC_ccorres) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: cleanInvalidateL2Range_ccorres) @@ -460,8 +460,8 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanInvalByVA_modifies) apply (rule ceqv_refl) - apply (ctac (no_vcg) add: dsb_ccorres[simplified dc_def]) - apply (wp | clarsimp simp: guard_is_UNIVI o_def)+ + apply (ctac (no_vcg) add: dsb_ccorres) + apply (wp | clarsimp simp: guard_is_UNIVI)+ apply (frule(1) ghost_assertion_size_logic) apply (clarsimp simp: o_def) done @@ -474,7 +474,8 @@ lemma cleanCacheRange_RAM_ccorres: (doMachineOp (cleanCacheRange_RAM w1 w2 w3)) (Call cleanCacheRange_RAM_'proc)" apply (cinit' lift: start_' end_' pstart_') - apply (simp add: cleanCacheRange_RAM_def doMachineOp_bind empty_fail_dsb empty_fail_cleanL2Range) + apply (simp add: cleanCacheRange_RAM_def doMachineOp_bind empty_fail_dsb empty_fail_cleanL2Range + empty_fail_cond) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) apply (ctac (no_vcg) add: cleanCacheRange_PoC_ccorres) @@ -483,7 +484,7 @@ lemma cleanCacheRange_RAM_ccorres: in ccorres_cross_over_guard) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) - apply (ctac (no_vcg) add: cleanL2Range_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: cleanL2Range_ccorres) apply wp+ apply clarsimp apply (auto dest: ghost_assertion_size_logic simp: o_def) @@ -504,13 +505,13 @@ lemma cleanCacheRange_PoU_ccorres: apply (rule ccorres_basic_srnoop2, simp) apply (simp add: cleanCacheRange_PoU_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA_PoU) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) @@ -536,21 +537,21 @@ lemma invalidateCacheRange_RAM_ccorres: apply (clarsimp simp: word_sle_def whileAnno_def split del: if_split) apply (ccorres_remove_UNIV_guard) apply (simp add: invalidateCacheRange_RAM_def doMachineOp_bind when_def - if_split_empty_fail empty_fail_invalidateL2Range empty_fail_invalidateByVA - empty_fail_dsb dmo_if + empty_fail_invalidateL2Range empty_fail_invalidateByVA + empty_fail_dsb dmo_if empty_fail_cond split del: if_split) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply csymbr apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" in ccorres_cross_over_guard) @@ -573,7 +574,7 @@ lemma invalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=invalidateByVA_modifies) apply ceqv - apply (ctac add: dsb_ccorres[unfolded dc_def]) + apply (ctac add: dsb_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -601,13 +602,13 @@ lemma invalidateCacheRange_I_ccorres: apply (ccorres_remove_UNIV_guard) apply (simp add: invalidateCacheRange_I_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_invalidateByVA_I) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: invalidateByVA_I_ccorres[unfolded dc_def]) + apply (ctac add: invalidateByVA_I_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) @@ -627,13 +628,13 @@ lemma branchFlushRange_ccorres: apply (ccorres_remove_UNIV_guard) apply (simp add: branchFlushRange_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_branchFlush) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: branchFlush_ccorres[unfolded dc_def]) + apply (ctac add: branchFlush_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) @@ -646,7 +647,7 @@ lemma cleanCaches_PoU_ccorres: (doMachineOp cleanCaches_PoU) (Call cleanCaches_PoU_'proc)" apply cinit' - apply (simp add: cleanCaches_PoU_def doMachineOp_bind + apply (simp add: cleanCaches_PoU_def doMachineOp_bind empty_fail_cond empty_fail_dsb empty_fail_clean_D_PoU empty_fail_invalidate_I_PoU) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: clean_D_PoU_ccorres) @@ -672,7 +673,7 @@ lemma setCurrentPD_ccorres: (Call setCurrentPD_'proc)" apply cinit' apply (clarsimp simp: setCurrentPD_def doMachineOp_bind empty_fail_dsb empty_fail_isb - writeTTBR0_empty_fail + writeTTBR0_empty_fail empty_fail_cond intro!: ccorres_cond_empty) apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: dsb_ccorres) diff --git a/proof/crefine/ARM/PSpace_C.thy b/proof/crefine/ARM/PSpace_C.thy index b08ffdecc2..3cb5beee00 100644 --- a/proof/crefine/ARM/PSpace_C.thy +++ b/proof/crefine/ARM/PSpace_C.thy @@ -49,7 +49,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: word32) < 2 ^ objBits ko \ \ ccorres dc xfdc P P' hs (setObject p ko) c" diff --git a/proof/crefine/ARM/Recycle_C.thy b/proof/crefine/ARM/Recycle_C.thy index 6b8582dc8b..bd9f5452c6 100644 --- a/proof/crefine/ARM/Recycle_C.thy +++ b/proof/crefine/ARM/Recycle_C.thy @@ -230,7 +230,7 @@ lemma mapM_x_store_memset_ccorres_assist: "\ko :: 'a. (1 :: word32) < 2 ^ objBits ko" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -304,8 +304,8 @@ lemma invalidateTLBByASID_ccorres: apply (simp add: case_option_If2 del: Collect_const) apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) apply (clarsimp simp: pde_stored_asid_def to_bool_def split: if_split) - apply (rule ccorres_return_void_C[unfolded dc_def]) - apply (simp add: dc_def[symmetric]) + apply (rule ccorres_return_void_C) + apply simp apply csymbr apply (ctac add: invalidateTranslationASID_ccorres) apply vcg @@ -484,8 +484,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -528,7 +528,7 @@ lemma ctcb_relation_blocking_ipc_badge: apply (simp add: isBlockedOnSend_def split: Structures_H.thread_state.split_asm) apply (clarsimp simp: cthread_state_relation_def) apply (clarsimp simp add: ctcb_relation_def cthread_state_relation_def) - apply (cases "tcbState tcb", simp_all add: "StrictC'_thread_state_defs") + apply (cases "tcbState tcb", simp_all add: ThreadState_defs) done lemma cendpoint_relation_q_cong: @@ -550,16 +550,6 @@ lemma cnotification_relation_q_cong: apply (auto intro: iffD1[OF tcb_queue_relation'_cong[OF refl refl refl]]) done -lemma tcbSchedEnqueue_ep_at: - "\obj_at' (P :: endpoint \ bool) ep\ - tcbSchedEnqueue t - \\rv. obj_at' P ep\" - including no_pre - apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp threadGet_wp, clarsimp, wp+) - apply (clarsimp split: if_split, wp) - done - lemma ccorres_duplicate_guard: "ccorres r xf (P and P) Q hs f f' \ ccorres r xf P Q hs f f'" by (erule ccorres_guard_imp, auto) @@ -579,12 +569,13 @@ lemma cancelBadgedSends_ccorres: (UNIV \ {s. epptr_' s = Ptr ptr} \ {s. badge_' s = bdg}) [] (cancelBadgedSends ptr bdg) (Call cancelBadgedSends_'proc)" apply (cinit lift: epptr_' badge_' simp: whileAnno_def) - apply (simp add: list_case_return2 + apply (rule ccorres_stateAssert) + apply (simp add: list_case_return cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong del: Collect_const) - apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule ccorres_pre_getEndpoint, rename_tac ep) + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -594,22 +585,22 @@ lemma cancelBadgedSends_ccorres: split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -634,8 +625,9 @@ lemma cancelBadgedSends_ccorres: st_tcb_at' (\st. isBlockedOnSend st \ blockingObject st = ptr) x s) \ distinct (xs @ list) \ ko_at' IdleEP ptr s \ (\p. \x \ set (xs @ list). \rf. (x, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ pspace_aligned' s \ pspace_distinct' s - \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s" + \ pspace_aligned' s \ pspace_distinct' s + \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="\xs. {s. ep_queue_relation' (cslift s) (xs @ list) (head_C (queue_' s)) (end_C (queue_' s))} \ {s. thread_' s = (case list of [] \ tcb_Ptr 0 @@ -684,7 +676,7 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: tcb_queue_relation'_def EPState_Send_def mask_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -694,7 +686,7 @@ lemma cancelBadgedSends_ccorres: apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -719,9 +711,9 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -731,8 +723,9 @@ lemma cancelBadgedSends_ccorres: apply (rule_tac rrel=dc and xf=xfdc and P="\s. (\t \ set (x @ a # lista). tcb_at' t s) \ (\p. \t \ set (x @ a # lista). \rf. (t, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ distinct (x @ a # lista) - \ pspace_aligned' s \ pspace_distinct' s" + \ distinct (x @ a # lista) + \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="{s. ep_queue_relation' (cslift s) (x @ a # lista) (head_C (queue_' s)) (end_C (queue_' s))}" in ccorres_from_vcg) @@ -748,8 +741,7 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: return_def rf_sr_def cstate_relation_def Let_def) apply (rule conjI) apply (clarsimp simp: cpspace_relation_def) - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule null_ep_queue) + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) subgoal by (simp add: o_def) apply (rule conjI) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) @@ -771,9 +763,6 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: image_iff) apply (drule_tac x=p in spec) subgoal by fastforce - apply (rule conjI) - apply (erule cready_queues_relation_not_queue_ptrs, - auto dest: null_ep_schedD[unfolded o_def] simp: o_def)[1] apply (simp add: carch_state_relation_def cmachine_state_relation_def h_t_valid_clift_Some_iff) @@ -784,16 +773,15 @@ lemma cancelBadgedSends_ccorres: apply wp apply simp apply vcg - apply (wp hoare_vcg_const_Ball_lift tcbSchedEnqueue_ep_at - sch_act_wf_lift) + apply (wp hoare_vcg_const_Ball_lift sch_act_wf_lift) apply simp apply (vcg exspec=tcbSchedEnqueue_cslift_spec) apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases - sts_sch_act sts_valid_queues setThreadState_oa_queued) + sts_sch_act sts_valid_objs') apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) @@ -806,21 +794,18 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: typ_heap_simps st_tcb_at'_def) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: ctcb_relation_blocking_ipc_badge) - apply (rule conjI, simp add: "StrictC'_thread_state_defs" mask_def) + apply (rule conjI, simp add: ThreadState_defs mask_def) apply (rule conjI) apply clarsimp apply (frule rf_sr_cscheduler_relation) apply (clarsimp simp: cscheduler_action_relation_def st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - subgoal by clarsimp - subgoal by clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule conjI) - apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) + apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule context_conjI) @@ -860,8 +845,19 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp split: if_split) apply (drule sym_refsD, clarsimp) apply (drule(1) bspec)+ - by (auto simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def tcb_bound_refs'_def - dest!: symreftype_inverse') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply (fastforce simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def + tcb_bound_refs'_def + dest!: symreftype_inverse') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply fastforce + done declare Kernel_C.tcb_C_size [simp del] diff --git a/proof/crefine/ARM/Refine_C.thy b/proof/crefine/ARM/Refine_C.thy index 6782f35ecb..bb8c9cf30f 100644 --- a/proof/crefine/ARM/Refine_C.thy +++ b/proof/crefine/ARM/Refine_C.thy @@ -57,6 +57,7 @@ proof - show ?thesis apply (cinit') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (ctac (no_vcg) add: getActiveIRQ_ccorres) apply (rule_tac P="rv \ Some 0xFFFF" in ccorres_gen_asm) @@ -71,7 +72,7 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply simp apply vcg apply vcg @@ -85,7 +86,7 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (rule_tac Q="\rv s. invs' s \ (\x. rv = Some x \ x \ ARM.maxIRQ) \ rv \ Some 0x3FF" in hoare_post_imp) apply (clarsimp simp: Kernel_C.maxIRQ_def ARM.maxIRQ_def) apply (wp getActiveIRQ_le_maxIRQ getActiveIRQ_neq_Some0xFF | simp)+ @@ -101,6 +102,7 @@ lemma handleUnknownSyscall_ccorres: (callKernel (UnknownSyscall n)) (Call handleUnknownSyscall_'proc)" apply (cinit' lift: w_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -113,14 +115,12 @@ lemma handleUnknownSyscall_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply fastforce - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply fastforce apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -138,8 +138,10 @@ lemma handleVMFaultEvent_ccorres: (callKernel (VMFaultEvent vmfault_type)) (Call handleVMFaultEvent_'proc)" apply (cinit' lift:vm_faultType_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (simp add: catch_def) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) @@ -168,13 +170,13 @@ lemma handleVMFaultEvent_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ - apply (case_tac x, clarsimp, wp) + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (case_tac rv, clarsimp, wp) apply (clarsimp, wp, simp) apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: simple_sane_strg[unfolded sch_act_sane_not]) - by (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def ct_not_ksQ + by (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def elim: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread' rf_sr_ksCurThread) @@ -187,6 +189,7 @@ lemma handleUserLevelFault_ccorres: (callKernel (UserLevelFault word1 word2)) (Call handleUserLevelFault_'proc)" apply (cinit' lift:w_a_' w_b_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -199,16 +202,14 @@ lemma handleUserLevelFault_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply (simp add: ct_in_state'_def) - apply (erule pred_tcb'_weakenE) - apply simp - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply simp apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -240,6 +241,7 @@ lemma handleSyscall_ccorres: (callKernel (SyscallEvent sysc)) (Call handleSyscall_'proc)" apply (cinit' lift: syscall_') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: handleE_def handleE'_def) apply (rule ccorres_split_nothrow_novcg) apply wpc @@ -392,11 +394,10 @@ lemma handleSyscall_ccorres: apply wp[1] apply clarsimp apply wp - apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s \ - (\p. ksCurThread s \ set (ksReadyQueues s p))" + apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s" in hoare_post_imp) apply (simp add: ct_in_state'_def) - apply (wp handleReply_sane handleReply_ct_not_ksQ) + apply (wp handleReply_sane) \ \SysYield\ apply (clarsimp simp: syscall_from_H_def syscall_defs) apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ @@ -422,11 +423,11 @@ lemma handleSyscall_ccorres: apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (wp schedule_invs' schedule_sch_act_wf | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + apply (wp schedule_invs' schedule_sch_act_wf + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (simp | wpc | wp hoare_drop_imp handleReply_sane handleReply_nonz_cap_to_ct schedule_invs' - handleReply_ct_not_ksQ[simplified] | strengthen ct_active_not_idle'_strengthen invs_valid_objs_strengthen)+ apply (rule_tac Q="\rv. invs' and ct_active'" in hoare_post_imp, simp) apply (wp hy_invs') @@ -444,7 +445,7 @@ lemma handleSyscall_ccorres: apply (frule active_ex_cap') apply (clarsimp simp: invs'_def valid_state'_def) apply (clarsimp simp: simple_sane_strg ct_in_state'_def st_tcb_at'_def obj_at'_def - isReply_def ct_not_ksQ) + isReply_def) apply (rule conjI, fastforce) prefer 2 apply (cut_tac 'b=32 and x=a and n=10 and 'a=10 in ucast_leq_mask) @@ -479,7 +480,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -510,7 +511,7 @@ lemma no_fail_callKernel: apply (rule corres_nofail) apply (rule corres_guard_imp) apply (rule kernel_corres) - apply force + apply (force simp: schact_is_rct_def) apply (simp add: sch_act_simple_def) apply metis done @@ -523,6 +524,7 @@ lemma handleHypervisorEvent_ccorres: apply (simp add: callKernel_def handleEvent_def handleHypervisorEvent_C_def) apply (simp add: liftE_def bind_assoc) apply (rule ccorres_guard_imp) + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l) apply (cases t; simp add: handleHypervisorFault_def) apply (ctac (no_vcg) add: schedule_ccorres) @@ -532,7 +534,7 @@ lemma handleHypervisorEvent_ccorres: apply simp apply assumption apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply clarsimp+ done @@ -591,8 +593,8 @@ lemma ccorres_add_gets: lemma ccorres_get_registers: "\ \cptr msgInfo. ccorres dc xfdc ((\s. P s \ Q s \ - obj_at' (\tcb. (atcbContextGet o tcbArch) tcb ARM_H.capRegister = cptr - \ (atcbContextGet o tcbArch) tcb ARM_H.msgInfoRegister = msgInfo) + obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb ARM_H.capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb ARM_H.msgInfoRegister = msgInfo) (ksCurThread s) s) and R) (UNIV \ \\cptr = cptr\ \ \\msgInfo = msgInfo\) [] m c \ \ @@ -605,15 +607,15 @@ lemma ccorres_get_registers: apply (rule ccorres_assume_pre) apply (clarsimp simp: ct_in_state'_def st_tcb_at'_def) apply (drule obj_at_ko_at', clarsimp) - apply (erule_tac x="(atcbContextGet o tcbArch) ko ARM_H.capRegister" in meta_allE) - apply (erule_tac x="(atcbContextGet o tcbArch) ko ARM_H.msgInfoRegister" in meta_allE) + apply (erule_tac x="(user_regs o atcbContextGet o tcbArch) ko ARM_H.capRegister" in meta_allE) + apply (erule_tac x="(user_regs o atcbContextGet o tcbArch) ko ARM_H.msgInfoRegister" in meta_allE) apply (erule ccorres_guard_imp2) apply (clarsimp simp: rf_sr_ksCurThread) apply (drule(1) obj_at_cslift_tcb, clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: ctcb_relation_def ccontext_relation_def ARM_H.msgInfoRegister_def ARM_H.capRegister_def ARM.msgInfoRegister_def ARM.capRegister_def - carch_tcb_relation_def + carch_tcb_relation_def cregs_relation_def "StrictC'_register_defs") done @@ -633,9 +635,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -646,6 +648,7 @@ lemma callKernel_withFastpath_corres_C: apply (clarsimp simp: typ_heap_simps' ct_in_state'_def "StrictC'_register_defs" word_sle_def word_sless_def st_tcb_at'_opeq_simp) + apply (frule ready_qs_runnable_cross, (fastforce simp: valid_sched_def)+) apply (rule conjI, fastforce simp: st_tcb_at'_def) apply (auto elim!: pred_tcb'_weakenE cnode_caps_gsCNodes_from_sr[rotated]) done @@ -663,14 +666,14 @@ lemma threadSet_all_invs_triv': apply (simp add: tcb_cte_cases_def) apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state - | simp add: tcb_cap_cases_def tcb_arch_ref_def + | simp add: tcb_cap_cases_def tcb_arch_ref_def exst_same_def | rule threadSet_ct_in_state' | wp (once) hoare_vcg_disj_lift)+ apply clarsimp apply (rule exI, rule conjI, assumption) - apply (clarsimp simp: invs_def invs'_def cur_tcb_def cur_tcb'_def) + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def invs'_def cur_tcb_def cur_tcb'_def) apply (simp add: state_relation_def) done @@ -713,12 +716,12 @@ lemma entry_corres_C: apply (rule setTCBContext_C_corres, rule ccontext_rel_to_C, simp) apply simp apply (rule corres_split) - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (rule getContext_corres[unfolded o_def], simp) + apply (rule getContext_corres, simp) apply (wp threadSet_all_invs_triv' callKernel_cur)+ apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) apply simp @@ -748,15 +751,7 @@ lemma ct_running'_C: apply (frule (1) map_to_ko_atI') apply (erule obj_at'_weakenE) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: - ThreadState_Running_def - ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnSend_def - ThreadState_BlockedOnReply_def - ThreadState_BlockedOnNotification_def - ThreadState_Inactive_def - ThreadState_IdleThreadState_def - ThreadState_Restart_def) + apply (case_tac "tcbState ko"; simp add: ThreadState_defs) done lemma full_invs_both: @@ -820,7 +815,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineStatea, clarsimp) @@ -847,7 +842,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -888,17 +883,22 @@ lemma dmo_domain_user_mem'[wp]: done lemma do_user_op_corres_C: - "corres_underlying rf_sr False False (=) (invs' and ex_abs einvs) \ - (doUserOp f tc) (doUserOp_C f tc)" + "corres_underlying rf_sr False False (=) + (invs' and ksReadyQueues_asrt and ex_abs einvs) \ + (doUserOp f tc) (doUserOp_C f tc)" apply (simp only: doUserOp_C_def doUserOp_def split_def) apply (rule corres_guard_imp) apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: simpler_gets_def getCurThread_def corres_underlying_def rf_sr_def cstate_relation_def Let_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_lift_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_rights_def) apply (rule_tac P=pspace_distinct' and P'=\ and r'="(=)" @@ -915,7 +915,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -935,7 +935,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def @@ -995,6 +995,9 @@ lemma refinement2_both: apply (subst cstate_to_H_correct) apply (fastforce simp: full_invs'_def invs'_def) apply (clarsimp simp: rf_sr_def) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) apply (simp add:absKState_def observable_memory_def absExst_def) apply (rule MachineTypes.machine_state.equality,simp_all)[1] apply (rule ext) @@ -1021,13 +1024,35 @@ lemma refinement2_both: apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (erule_tac P="a \ b \ c \ (\x. e x)" for a b c d e in disjE) apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (clarsimp simp: check_active_irq_C_def check_active_irq_H_def) apply (rule rev_mp, rule check_active_irq_corres_C) @@ -1115,7 +1140,7 @@ lemma kernel_all_subset_kernel: check_active_irq_H_def checkActiveIRQ_def) apply clarsimp apply (erule in_monad_imp_rewriteE[where F=True]) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule monadic_rewrite_bind_head[where P=\]) apply (simp add: callKernel_C_def callKernel_withFastpath_C_def diff --git a/proof/crefine/ARM/Retype_C.thy b/proof/crefine/ARM/Retype_C.thy index 55d50220d6..a3b36e7e3c 100644 --- a/proof/crefine/ARM/Retype_C.thy +++ b/proof/crefine/ARM/Retype_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -498,46 +499,6 @@ lemma h_t_array_valid_retyp: apply (simp add: addr_card_wb unat_of_nat32) done -lemma typ_slice_list_array: - "x < size_td td * n - \ typ_slice_list (map (\i. DTPair td (nm i)) [0.. k < n - \ gd (p +\<^sub>p int k) - \ h_t_valid htd gd (p +\<^sub>p int k)" - apply (clarsimp simp: h_t_array_valid_def h_t_valid_def valid_footprint_def - size_of_def[symmetric, where t="TYPE('a)"]) - apply (drule_tac x="k * size_of TYPE('a) + y" in spec) - apply (drule mp) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute) - apply (clarsimp simp: ptr_add_def add.assoc) - apply (erule map_le_trans[rotated]) - apply (clarsimp simp: uinfo_array_tag_n_m_def) - apply (subst typ_slice_list_array) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute size_of_def) - apply (simp add: size_of_def list_map_mono) - done - lemma h_t_valid_ptr_retyps_gen: assumes sz: "nptrs * size_of TYPE('a :: mem_type) < addr_card" and gd: "gd p'" @@ -625,14 +586,6 @@ lemma ptr_retyps_gen_valid_footprint: apply (simp add: cleared[unfolded region_is_bytes'_def] not_byte) done -(* FIXME: Move to LemmaBucket_C. Stopped by: simp rules. *) -(* This is currently unused, but might be useful. - it might be worth fixing if it breaks, but ask around first. *) -lemma dom_lift_t_heap_update: - "dom (lift_t g (hrs_mem_update v hp)) = dom (lift_t g hp)" - by (clarsimp simp add: lift_t_def lift_typ_heap_if s_valid_def hrs_htd_def hrs_mem_update_def split_def dom_def - intro!: Collect_cong split: if_split) - lemma h_t_valid_ptr_retyps_gen_same: assumes guard: "\n' < nptrs. gd (CTypesDefs.ptr_add (Ptr p :: 'a ptr) (of_nat n'))" assumes cleared: "region_is_bytes' p (nptrs * size_of TYPE('a :: mem_type)) htd" @@ -863,7 +816,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -891,29 +844,6 @@ lemma update_ti_t_word32_0s: "word_rcat [0, 0, 0, (0 :: word8)] = (0 :: word32)" by (simp_all add: typ_info_word word_rcat_def bin_rcat_def) -lemma is_aligned_ptr_aligned: - fixes p :: "'a :: c_type ptr" - assumes al: "is_aligned (ptr_val p) n" - and alignof: "align_of TYPE('a) = 2 ^ n" - shows "ptr_aligned p" - using al unfolding is_aligned_def ptr_aligned_def - by (simp add: alignof) - -lemma is_aligned_c_guard: - "is_aligned (ptr_val p) n - \ ptr_val p \ 0 - \ align_of TYPE('a) = 2 ^ m - \ size_of TYPE('a) \ 2 ^ n - \ m \ n - \ c_guard (p :: ('a :: c_type) ptr)" - apply (clarsimp simp: c_guard_def c_null_guard_def) - apply (rule conjI) - apply (rule is_aligned_ptr_aligned, erule(1) is_aligned_weaken, simp) - apply (erule is_aligned_get_word_bits, simp_all) - apply (rule intvl_nowrap[where x=0, simplified], simp) - apply (erule is_aligned_no_wrap_le, simp+) - done - lemma retype_guard_helper: assumes cover: "range_cover p sz (objBitsKO ko) n" and ptr0: "p \ 0" @@ -2166,8 +2096,8 @@ lemma getCTE_pre_cte_at: apply clarsimp done -lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre iffD2 [OF empty_fail_liftM]] -lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre iffD2 [OF empty_fail_liftM]] +lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre empty_fail_liftM] +lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre empty_fail_liftM] lemmas ccorres_liftM_getCTE_cte_at = ccorres_guard_from_wp_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] ccorres_guard_from_wp_bind_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] @@ -2199,9 +2129,9 @@ lemma insertNewCap_ccorres_helper: apply (rule conjI) apply (erule (2) cmap_relation_updI) apply (simp add: ccap_relation_def ccte_relation_def cte_lift_def) - subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf is_aligned_neg_mask - c_valid_cte_def true_def - split: option.splits) + subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf + is_aligned_neg_mask c_valid_cte_def + split: option.splits) subgoal by simp apply (erule_tac t = s' in ssubst) apply simp @@ -2461,6 +2391,9 @@ lemma ccorres_fail: apply (simp add: fail_def) done +(* always unfold StrictC'_mode_object_defs together with api_object_defs *) +lemmas api_object_defs = api_object_defs StrictC'_mode_object_defs + lemma object_type_from_H_toAPIType_simps: "(object_type_from_H tp = scast seL4_UntypedObject) = (toAPIType tp = Some ArchTypes_H.apiobject_type.Untyped)" "(object_type_from_H tp = scast seL4_TCBObject) = (toAPIType tp = Some ArchTypes_H.apiobject_type.TCBObject)" @@ -2664,7 +2597,6 @@ lemma cnc_tcb_helper: and al: "is_aligned (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb)" and ptr0: "ctcb_ptr_to_tcb_ptr p \ 0" and ptrlb: "2^ctcb_size_bits \ ptr_val p" - and vq: "valid_queues \" and pal: "pspace_aligned' (\\ksPSpace := ks\)" and pno: "pspace_no_overlap' (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb) (\\ksPSpace := ks\)" and pds: "pspace_distinct' (\\ksPSpace := ks\)" @@ -2978,21 +2910,23 @@ proof - unfolding ctcb_relation_def makeObject_tcb apply (simp add: fbtcb minBound_word) apply (intro conjI) - apply (simp add: cthread_state_relation_def thread_state_lift_def - eval_nat_numeral ThreadState_Inactive_def) - apply (simp add: ccontext_relation_def carch_tcb_relation_def) - apply (rule allI) - subgoal for r - by (case_tac r; - simp add: "StrictC'_register_defs" eval_nat_numeral atcbContext_def atcbContextGet_def - newArchTCB_def newContext_def initContext_def take_bit_Suc - del: unsigned_numeral) - apply (simp add: thread_state_lift_def eval_nat_numeral atcbContextGet_def)+ - apply (simp add: Kernel_Config.timeSlice_def) - apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def - lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def - eval_nat_numeral seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def + apply (simp add: cthread_state_relation_def thread_state_lift_def + eval_nat_numeral ThreadState_Inactive_def) + apply (clarsimp simp: ccontext_relation_def carch_tcb_relation_def) + (* C regs relation *) + apply (clarsimp simp: cregs_relation_def) + subgoal for r + by (case_tac r; + simp add: "StrictC'_register_defs" eval_nat_numeral atcbContext_def atcbContextGet_def + newArchTCB_def newContext_def initContext_def take_bit_Suc + del: unsigned_numeral) + apply (simp add: thread_state_lift_def index_foldr_update atcbContextGet_def) + apply (simp add: Kernel_Config.timeSlice_def) + apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def + lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def + index_foldr_update seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def split: if_split)+ + apply (simp add: option_to_ctcb_ptr_def) done have pks: "ks (ctcb_ptr_to_tcb_ptr p) = None" @@ -3043,15 +2977,6 @@ proof - apply (fastforce simp: dom_def) done - hence kstcb: "\qdom prio. ctcb_ptr_to_tcb_ptr p \ set (ksReadyQueues \ (qdom, prio))" using vq - apply (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x = qdom in spec) - apply (drule_tac x = prio in spec) - apply clarsimp - apply (drule (1) bspec) - apply (simp add: obj_at'_def) - done - have ball_subsetE: "\P S R. \ \x \ S. P x; R \ S \ \ \x \ R. P x" by blast @@ -3165,7 +3090,7 @@ proof - apply (simp add: cl_cte [simplified] cl_tcb [simplified] cl_rest [simplified] tag_disj_via_td_name) apply (clarsimp simp add: cready_queues_relation_def Let_def htd_safe[simplified] kernel_data_refs_domain_eq_rotate) - apply (simp add: kstcb tcb_queue_update_other' hrs_htd_update + apply (simp add: tcb_queue_update_other' hrs_htd_update ptr_retyp_to_array[simplified] irq[simplified]) done qed @@ -3708,17 +3633,17 @@ lemma copyGlobalMappings_ccorres: apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply csymbr apply (rule ccorres_rel_imp) - apply (rule_tac F="\_ s. rv = armKSGlobalPD (ksArchState s) - \ is_aligned rv pdBits \ valid_pde_mappings' s + apply (rule_tac F="\_ s. globalPD = armKSGlobalPD (ksArchState s) + \ is_aligned globalPD pdBits \ valid_pde_mappings' s \ page_directory_at' pd s \ page_directory_at' (armKSGlobalPD (ksArchState s)) s" - and i="0xE00" - in ccorres_mapM_x_while') + and i="0xE00" + in ccorres_mapM_x_while') apply (clarsimp simp del: Collect_const) apply (rule ccorres_guard_imp2) apply (rule ccorres_pre_getObject_pde) apply (simp add: storePDE_def del: Collect_const) - apply (rule_tac P="\s. ko_at' rva (armKSGlobalPD (ksArchState s) + apply (rule_tac P="\s. ko_at' rv (armKSGlobalPD (ksArchState s) + ((0xE00 + of_nat n) << 2)) s \ page_directory_at' pd s \ valid_pde_mappings' s \ page_directory_at' (armKSGlobalPD (ksArchState s)) s" @@ -3733,7 +3658,7 @@ lemma copyGlobalMappings_ccorres: apply (rule cmap_relationE1[OF rf_sr_cpde_relation], assumption, erule_tac ko=ko' in ko_at_projectKO_opt) apply (rule cmap_relationE1[OF rf_sr_cpde_relation], - assumption, erule_tac ko=rva in ko_at_projectKO_opt) + assumption, erule_tac ko=rv in ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps') apply (drule(1) page_directory_at_rf_sr)+ apply clarsimp @@ -3929,12 +3854,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -4047,7 +3970,7 @@ declare replicate_numeral [simp del] lemma ccorres_placeNewObject_tcb: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase tcbBlockSizeBits - and valid_queues and (\s. sym_refs (state_refs_of' s)) + and (\s. sym_refs (state_refs_of' s)) and (\s. 2 ^ tcbBlockSizeBits \ gsMaxObjectSize s) and ret_zero regionBase (2 ^ tcbBlockSizeBits) and K (regionBase \ 0 \ range_cover regionBase tcbBlockSizeBits tcbBlockSizeBits 1 @@ -4386,7 +4309,7 @@ qed lemma placeNewObject_user_data: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase (pageBits+us) - and valid_queues and valid_machine_state' + and valid_machine_state' and ret_zero regionBase (2 ^ (pageBits+us)) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) @@ -4508,7 +4431,7 @@ lemma placeNewObject_user_data_device: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and ret_zero regionBase (2 ^ (pageBits + us)) - and pspace_no_overlap' regionBase (pageBits+us) and valid_queues + and pspace_no_overlap' regionBase (pageBits+us) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) and K (regionBase \ 0 \ range_cover regionBase (pageBits + us) (pageBits+us) (Suc 0) @@ -4763,7 +4686,7 @@ proof - apply clarify apply (intro conjI) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' - APIType_capBits_def invs_queues invs_valid_objs' + APIType_capBits_def invs_valid_objs' invs_urz) apply clarsimp apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def @@ -4771,7 +4694,7 @@ proof - is_aligned_neg_mask_eq vmrights_to_H_def Kernel_C.VMReadWrite_def Kernel_C.VMNoAccess_def Kernel_C.VMKernelOnly_def Kernel_C.VMReadOnly_def) - apply (simp add: to_bool_def false_def isFrameType_def) + apply (simp add: isFrameType_def) \ \PageDirectoryObject\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') @@ -4794,7 +4717,7 @@ proof - apply simp apply simp apply wp - apply (clarsimp simp: false_def) + apply clarsimp apply vcg apply wp apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def @@ -4846,7 +4769,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -4896,11 +4819,6 @@ lemma threadSet_domain_ccorres [corres]: apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def) apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) - defer - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) subgoal by (simp add: ctcb_relation_def) @@ -4981,8 +4899,7 @@ proof - apply simp apply (clarsimp simp: ccap_relation_def cap_to_H_def getObjectSize_def apiGetObjectSize_def - cap_untyped_cap_lift to_bool_eq_0 true_def - aligned_add_aligned + cap_untyped_cap_lift to_bool_eq_0 aligned_add_aligned split: option.splits) apply (subst is_aligned_neg_mask_eq [OF is_aligned_weaken]) apply (erule range_cover.aligned) @@ -5018,7 +4935,7 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply vcg - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (clarsimp simp: CPSR_def) apply (rule conseqPre, vcg, clarsimp) @@ -5026,7 +4943,6 @@ proof - createObject_c_preconds_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (simp add: getObjectSize_def objBits_simps' word_bits_conv apiGetObjectSize_def new_cap_addrs_def projectKO_opt_tcb) @@ -5034,8 +4950,7 @@ proof - region_actually_is_bytes_def APIType_capBits_def) apply (frule(1) ghost_assertion_size_logic_no_unat) apply (clarsimp simp: ccap_relation_def cap_to_H_def getObjectSize_def - apiGetObjectSize_def cap_thread_cap_lift to_bool_def true_def - aligned_add_aligned + apiGetObjectSize_def cap_thread_cap_lift aligned_add_aligned split: option.splits) apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs tcb_ptr_to_ctcb_ptr_def @@ -5072,13 +4987,12 @@ proof - apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def getObjectSize_def objBits_simps apiGetObjectSize_def epSizeBits_def - cap_endpoint_cap_lift to_bool_def true_def + cap_endpoint_cap_lift split: option.splits dest!: range_cover.aligned) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps apiGetObjectSize_def @@ -5111,13 +5025,12 @@ proof - apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def getObjectSize_def apiGetObjectSize_def ntfnSizeBits_def objBits_simps - cap_notification_cap_lift to_bool_def true_def + cap_notification_cap_lift dest!: range_cover.aligned split: option.splits) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps apiGetObjectSize_def ntfnSizeBits_def word_bits_conv @@ -5156,7 +5069,6 @@ proof - apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (frule(1) ghost_assertion_size_logic_no_unat) apply (clarsimp simp: getObjectSize_def objBits_simps apiGetObjectSize_def @@ -5170,8 +5082,7 @@ proof - apply (simp add: power_add cte_C_size objBits_defs) apply (frule range_cover.aligned) apply (clarsimp simp: ccap_relation_def cap_to_H_def - cap_cnode_cap_lift to_bool_def true_def - getObjectSize_def + cap_cnode_cap_lift getObjectSize_def apiGetObjectSize_def cteSizeBits_def objBits_simps field_simps is_aligned_power2 addr_card_wb is_aligned_weaken[where y=word_size_bits] @@ -6061,7 +5972,8 @@ lemma createObject_caps_overlap_reserved_ret': apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_caps_overlap_reserved_ret'[where sz = "APIType_capBits ty us"]]) apply assumption - apply (case_tac r,simp) + apply (rename_tac rv s) + apply (case_tac rv,simp) apply clarsimp apply (erule caps_overlap_reserved'_subseteq) apply (rule untypedRange_in_capRange) @@ -6130,7 +6042,8 @@ lemma createObject_IRQHandler: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_IRQHandler[where irq = x and P = "\_ _. False"]]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv) done @@ -6147,7 +6060,8 @@ lemma createObject_capClass[wp]: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_range_helper]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv ) apply (rule range_cover_full) apply (simp add:word_bits_conv)+ @@ -6845,7 +6759,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -7016,9 +6930,9 @@ shows "ccorres dc xfdc including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -7052,7 +6966,7 @@ shows "ccorres dc xfdc hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -7199,9 +7113,9 @@ shows "ccorres dc xfdc apply (frule(1) range_cover_gsMaxObjectSize, fastforce, assumption) apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) ghost_assertion_size_logic)+ - apply (simp add: o_def) - apply (case_tac newType,simp_all add:object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + apply (case_tac newType, + simp_all add: object_type_from_H_def Kernel_C_defs nAPIObjects_def APIType_capBits_def o_def + split: apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp: objBits_simps', unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/ARM/SR_lemmas_C.thy b/proof/crefine/ARM/SR_lemmas_C.thy index 0abe850951..7857a68d4c 100644 --- a/proof/crefine/ARM/SR_lemmas_C.thy +++ b/proof/crefine/ARM/SR_lemmas_C.thy @@ -296,11 +296,15 @@ lemma cmdbnode_relation_mdb_node_to_H [simp]: unfolding cmdbnode_relation_def mdb_node_to_H_def mdb_node_lift_def cte_lift_def by (fastforce split: option.splits) -definition - tcb_no_ctes_proj :: "tcb \ Structures_H.thread_state \ word32 \ word32 \ arch_tcb \ bool \ word8 \ word8 \ word8 \ nat \ fault option \ word32 option" +definition tcb_no_ctes_proj :: + "tcb \ Structures_H.thread_state \ machine_word \ machine_word \ arch_tcb \ bool \ word8 + \ word8 \ word8 \ nat \ fault option \ machine_word option + \ machine_word option \ machine_word option" where - "tcb_no_ctes_proj t \ (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, - tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t)" + "tcb_no_ctes_proj t \ + (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, + tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t, + tcbSchedNext t, tcbSchedPrev t)" lemma tcb_cte_cases_proj_eq [simp]: "tcb_cte_cases p = Some (getF, setF) \ @@ -309,15 +313,15 @@ lemma tcb_cte_cases_proj_eq [simp]: by (auto split: if_split_asm) lemma map_to_ctes_upd_tcb': - "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; - ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = - (%x. if EX getF setF. + "\ ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; + ps_clear p tcbBlockSizeBits s \ + \ map_to_ctes ((ksPSpace s)(p \ KOTCB tcb)) = + (\x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & - getF tcb ~= getF tcb' - then case tcb_cte_cases (x - p) of - Some (getF, setF) => Some (getF tcb) - else ctes_of s x)" + getF tcb \ getF tcb' + then case tcb_cte_cases (x - p) of + Some (getF, setF) \ Some (getF tcb) + else ctes_of s x)" apply (erule (1) map_to_ctes_upd_tcb) apply (simp add: field_simps ps_clear_def3 mask_def objBits_defs) done @@ -431,18 +435,19 @@ qed lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); - (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); - (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); - (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); - (map_to_pdes (ksPSpace s) = map_to_pdes (ksPSpace s')); - (map_to_ptes (ksPSpace s) = map_to_ptes (ksPSpace s')); - (map_to_asidpools (ksPSpace s) = map_to_asidpools (ksPSpace s')); - (map_to_user_data (ksPSpace s) = map_to_user_data (ksPSpace s')); - (map_to_user_data_device (ksPSpace s) = map_to_user_data_device (ksPSpace s')); - (map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s) - = map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s')); - \T p. typ_at' T p s = typ_at' T p s'\ \ P" + s' = s \ ksPSpace := ksPSpace s' \; + ctes_of s' = (ctes_of s)(dest \ cte); + map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s'); + map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s'); + map_to_pdes (ksPSpace s) = map_to_pdes (ksPSpace s'); + map_to_ptes (ksPSpace s) = map_to_ptes (ksPSpace s'); + map_to_asidpools (ksPSpace s) = map_to_asidpools (ksPSpace s'); + map_to_user_data (ksPSpace s) = map_to_user_data (ksPSpace s'); + map_to_user_data_device (ksPSpace s) = map_to_user_data_device (ksPSpace s'); + map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s) + = map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s'); + \T p. typ_at' T p s = typ_at' T p s'\ + \ P" shows "P" proof - from fst_setCTE0 [where cte = cte, OF ct] @@ -458,7 +463,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -636,7 +641,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1384,9 +1388,9 @@ lemma cmap_relation_cong: apply (erule imageI) done -lemma ctcb_relation_null_queue_ptrs: +lemma ctcb_relation_null_ep_ptrs: assumes rel: "cmap_relation mp mp' tcb_ptr_to_ctcb_ptr ctcb_relation" - and same: "map_option tcb_null_queue_ptrs \ mp'' = map_option tcb_null_queue_ptrs \ mp'" + and same: "map_option tcb_null_ep_ptrs \ mp'' = map_option tcb_null_ep_ptrs \ mp'" shows "cmap_relation mp mp'' tcb_ptr_to_ctcb_ptr ctcb_relation" using rel apply (rule iffD1 [OF cmap_relation_cong, OF _ map_option_eq_dom_eq, rotated -1]) @@ -1394,7 +1398,7 @@ lemma ctcb_relation_null_queue_ptrs: apply (rule same [symmetric]) apply (drule compD [OF same]) apply (case_tac b, case_tac b') - apply (simp add: ctcb_relation_def tcb_null_queue_ptrs_def) + apply (simp add: ctcb_relation_def tcb_null_ep_ptrs_def) done (* Levity: added (20090419 09:44:27) *) @@ -1407,7 +1411,7 @@ lemma ntfnQueue_tail_mask_4 [simp]: lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1421,14 +1425,14 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1436,14 +1440,14 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1451,13 +1455,13 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1465,14 +1469,14 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI @@ -1481,26 +1485,26 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1508,21 +1512,21 @@ lemma update_pte_map_tos: auto simp: projectKO_opts_defs) lemma update_pde_map_to_pdes: - "map_to_pdes (ksPSpace s(p \ KOArch (KOPDE pde))) + "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDE pde))) = (map_to_pdes (ksPSpace s))(p \ pde)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pde_map_tos: fixes P :: "pde \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1609,7 +1613,6 @@ where | "thread_state_to_tsType (Structures_H.BlockedOnSend oref badge cg cgr isc) = scast ThreadState_BlockedOnSend" | "thread_state_to_tsType (Structures_H.BlockedOnNotification oref) = scast ThreadState_BlockedOnNotification" - lemma ctcb_relation_thread_state_to_tsType: "ctcb_relation tcb ctcb \ tsType_CL (thread_state_lift (tcbState_C ctcb)) = thread_state_to_tsType (tcbState tcb)" unfolding ctcb_relation_def cthread_state_relation_def @@ -1877,9 +1880,9 @@ lemma memory_cross_over: apply (cut_tac p=ptr in unat_mask_2_less_4) apply (subgoal_tac "(ptr && ~~ mask 2) + (ptr && mask 2) = ptr") apply (subgoal_tac "!n x. n < 4 \ (unat (x::word32) = n) = (x = of_nat n)") - apply (auto simp add: eval_nat_numeral unat_eq_0 add.commute take_bit_Suc - elim!: less_SucE)[1] - apply (clarsimp simp add: unat32_eq_of_nat word_bits_def) + apply (clarsimp simp: eval_nat_numeral) + apply (fastforce simp: add.commute elim!: less_SucE) + apply (clarsimp simp: unat32_eq_of_nat word_bits_def) apply (simp add: add.commute word_plus_and_or_coroll2) done @@ -2118,6 +2121,14 @@ lemma invs_urz[elim!]: "invs' s \ untyped_ranges_zero' s" by (clarsimp simp: invs'_def valid_state'_def) +lemma rf_sr_ctcb_queue_relation: + "\ (s, s') \ rf_sr; d \ maxDomain; p \ maxPriority \ + \ ctcb_queue_relation (ksReadyQueues s (d, p)) + (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p))" + unfolding rf_sr_def cstate_relation_def cready_queues_relation_def + apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def maxDom_to_H maxPrio_to_H) + done + lemma rf_sr_sched_action_relation: "(s, s') \ rf_sr \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" @@ -2137,5 +2148,18 @@ lemma msgRegisters_size_sanity: "size_msgRegisters = unat (n_msgRegisters)" by (simp add: n_msgRegisters_def size_msgRegisters_def) +(* link up Kernel_Config loaded from the seL4 build system with physBase in C code *) +lemma physBase_spec: + "\s. \\ {s} Call physBase_'proc {t. ret__unsigned_long_' t = Kernel_Config.physBase }" + apply (rule allI, rule conseqPre, vcg) + apply (simp add: Kernel_Config.physBase_def) + done + +lemma rf_sr_obj_update_helper: + "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined + \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr + \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" + by (simp cong: StateSpace.state.fold_congs globals.fold_congs) + end end diff --git a/proof/crefine/ARM/Schedule_C.thy b/proof/crefine/ARM/Schedule_C.thy index 2072f05e74..c9a827f206 100644 --- a/proof/crefine/ARM/Schedule_C.thy +++ b/proof/crefine/ARM/Schedule_C.thy @@ -1,11 +1,12 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) theory Schedule_C -imports Tcb_C +imports Tcb_C Detype_C begin (*FIXME: arch_split: move up?*) @@ -32,18 +33,17 @@ lemma switchToIdleThread_ccorres: "ccorres dc xfdc invs_no_cicd' UNIV [] switchToIdleThread (Call switchToIdleThread_'proc)" apply (cinit) + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l) apply (ctac (no_vcg) add: Arch_switchToIdleThread_ccorres) apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P="\s. thread = ksIdleThread s" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) - apply (simp add: ARM_H.switchToIdleThread_def) - apply wp+ - apply simp - apply simp + apply (wpsimp simp: ARM_H.switchToIdleThread_def wp: hoare_drop_imps)+ done lemma Arch_switchToThread_ccorres: @@ -57,13 +57,19 @@ lemma Arch_switchToThread_ccorres: apply (ctac (no_vcg) add: setVMRoot_ccorres) apply (simp (no_asm) del: Collect_const) apply (rule_tac A'=UNIV in ccorres_guard_imp2) - apply (fold dc_def)[1] apply (ctac add: clearExMonitor_ccorres) apply clarsimp apply wp apply clarsimp done +lemma invs_no_cicd'_pspace_aligned': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_aligned' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + +lemma invs_no_cicd'_pspace_distinct': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_distinct' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) (* FIXME: move *) lemma switchToThread_ccorres: @@ -73,47 +79,34 @@ lemma switchToThread_ccorres: hs (switchToThread t) (Call switchToThread_'proc)" - apply (cinit lift: thread_') + apply (clarsimp simp: switchToThread_def) + apply (rule ccorres_symb_exec_l'[OF _ _ isRunnable_sp]; (solves wpsimp)?) + apply (rule ccorres_symb_exec_l'[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule ccorres_stateAssert_fwd)+ + apply (cinit' lift: thread_') apply (ctac (no_vcg) add: Arch_switchToThread_ccorres) apply (ctac (no_vcg) add: tcbSchedDequeue_ccorres) + apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: setCurThread_def simpler_modify_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def) - apply wp+ - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def) - done - -lemma get_tsType_ccorres2: - "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_' (tcb_at' thread) - (UNIV \ {s. f s = tcb_ptr_to_ctcb_ptr thread} \ - {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] - (getThreadState thread) (Call thread_state_get_tsType_'proc)" - unfolding getThreadState_def - apply (rule ccorres_from_spec_modifies [where P=\, simplified]) - apply (rule thread_state_get_tsType_spec) - apply (rule thread_state_get_tsType_modifies) - apply simp - apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: typ_heap_simps) - apply (rule bexI [rotated, OF threadGet_eq], assumption) - apply simp - apply (drule ctcb_relation_thread_state_to_tsType) - apply simp + apply (clarsimp simp: setCurThread_def simpler_modify_def rf_sr_def cstate_relation_def + Let_def carch_state_relation_def cmachine_state_relation_def) + apply (wpsimp wp: Arch_switchToThread_invs_no_cicd' hoare_drop_imps + | strengthen invs_no_cicd'_pspace_aligned' invs_no_cicd'_pspace_distinct')+ done lemma activateThread_ccorres: "ccorres dc xfdc (ct_in_state' activatable' and (\s. sch_act_wf (ksSchedulerAction s) s) - and valid_queues and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') UNIV [] activateThread (Call activateThread_'proc)" apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule_tac P="activatable' rv" in ccorres_gen_asm) apply (wpc) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) @@ -123,7 +116,7 @@ lemma activateThread_ccorres: apply (rule ccorres_cond_true) apply (rule ccorres_return_Skip) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) @@ -131,7 +124,7 @@ lemma activateThread_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: activateIdleThread_def return_def) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -154,7 +147,7 @@ lemma activateThread_ccorres: apply (subgoal_tac "ksCurThread_' (globals s') = tcb_ptr_to_ctcb_ptr (ksCurThread s)") prefer 2 apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (clarsimp simp: typ_heap_simps ThreadState_Running_def mask_def) + apply (clarsimp simp: typ_heap_simps ThreadState_defs mask_def) done lemma ceqv_remove_tail_Guard_Skip: @@ -166,17 +159,42 @@ lemma ceqv_remove_tail_Guard_Skip: done lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done +lemma ccorres_pre_getQueue: + assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" + shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) + {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) + (cready_queues_index_to_C d p) in + ctcb_queue_relation queue cqueue) \ s' \ P' queue} + hs (getQueue d p >>= (\queue. f queue)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule gq_sp) + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply assumption + apply (clarsimp simp: getQueue_def gets_exs_valid) + apply clarsimp + apply (drule spec, erule mp) + apply (erule rf_sr_ctcb_queue_relation) + apply (simp add: maxDom_to_H maxPrio_to_H)+ + done + lemma chooseThread_ccorres: "ccorres dc xfdc all_invs_but_ct_idle_or_in_cur_domain' UNIV [] chooseThread (Call chooseThread_'proc)" proof - @@ -192,9 +210,22 @@ proof - "\s. invs_no_cicd' s \ ksCurDomain s \ maxDomain" by (simp add: invs_no_cicd'_def) + have invs_no_cicd'_valid_bitmaps: + "\s. invs_no_cicd' s \ valid_bitmaps s" + by (simp add: invs_no_cicd'_def) + + have invs_no_cicd'_pspace_aligned': + "\s. invs_no_cicd' s \ pspace_aligned' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + + have invs_no_cicd'_pspace_distinct': + "\s. invs_no_cicd' s \ pspace_distinct' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + show ?thesis supply if_split[split del] apply (cinit) + apply (rule ccorres_stateAssert)+ apply (simp add: numDomains_sge_1_simp) apply (rule_tac xf'=dom_' and r'="\rv rv'. rv' = ucast rv" in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) @@ -227,7 +258,7 @@ proof - apply (rule_tac P="curdom \ maxDomain" in ccorres_cross_over_guard_no_st) apply (rule_tac P="prio \ maxPriority" in ccorres_cross_over_guard_no_st) apply (rule ccorres_pre_getQueue) - apply (rule_tac P="queue \ []" in ccorres_cross_over_guard_no_st) + apply (rule_tac P="\ tcbQueueEmpty queue" in ccorres_cross_over_guard_no_st) apply (rule ccorres_symb_exec_l) apply (rule ccorres_assert) apply (rule ccorres_symb_exec_r) @@ -242,39 +273,40 @@ proof - apply (rule conseqPre, vcg) apply (rule Collect_mono) apply clarsimp - apply (strengthen queue_in_range) apply assumption apply clarsimp apply (rule conseqPre, vcg) apply clarsimp apply (wp isRunnable_wp)+ - apply (simp add: isRunnable_def) - apply wp apply (clarsimp simp: Let_def guard_is_UNIV_def) - apply (drule invs_no_cicd'_queues) - apply (case_tac queue, simp) - apply (clarsimp simp: tcb_queue_relation'_def cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp add: maxDom_to_H maxPrio_to_H - queue_in_range[where qdom=0, simplified, simplified maxPrio_to_H]) - apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper ) + apply (rule conjI) + apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper) + apply (intro conjI impI) + apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def ctcb_queue_relation_def + tcbQueueEmpty_def option_to_ctcb_ptr_def) + apply (frule_tac qdom=curdom and prio=rv in cready_queues_index_to_C_in_range') + apply fastforce + apply (clarsimp simp: num_tcb_queues_val word_less_nat_alt cready_queues_index_to_C_def2) apply wpsimp apply (clarsimp simp: guard_is_UNIV_def le_maxDomain_eq_less_numDomains word_less_nat_alt numDomains_less_numeric_explicit) - apply (frule invs_no_cicd'_queues) + apply clarsimp apply (frule invs_no_cicd'_max_CurDomain) - apply (frule invs_no_cicd'_queues) - apply (clarsimp simp: valid_queues_def lookupBitmapPriority_le_maxPriority) + apply (frule invs_no_cicd'_pspace_aligned') + apply (frule invs_no_cicd'_pspace_distinct') + apply (frule invs_no_cicd'_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule valid_bitmaps_valid_bitmapQ) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def cong: conj_cong) apply (intro conjI impI) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) - apply (prop_tac "ksCurDomain s = 0") - using unsigned_eq_0_iff apply force - apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (fastforce intro: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) + apply (fastforce dest: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) done qed @@ -350,7 +382,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -385,14 +416,13 @@ lemma isHighestPrio_ccorres: apply (rule ccorres_return_C, simp, simp, simp) apply (rule wp_post_taut) apply (vcg exspec=getHighestPrio_modifies)+ - apply (clarsimp simp: word_le_nat_alt true_def to_bool_def maxDomain_le_unat_ucast_explicit + apply (clarsimp simp: word_le_nat_alt maxDomain_le_unat_ucast_explicit split: if_splits) done lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -406,7 +436,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -423,7 +453,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -451,7 +481,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -471,7 +501,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -482,17 +512,17 @@ lemma schedule_ccorres: apply (rule ccorres_cond2'[where R=\], fastforce) apply clarsimp apply (rule ccorres_return[where R'=UNIV], clarsimp, vcg) - apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s - \ curThread = ksCurThread s - \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" - and P'=UNIV in ccorres_from_vcg) - apply clarsimp - apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) - apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) - apply unat_arith - apply (wpsimp wp: threadGet_obj_at2) + apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s + \ curThread = ksCurThread s + \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" + and P'=UNIV in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) + apply (drule (1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) + apply unat_arith + apply clarsimp apply vcg apply ceqv (* fastfail calculation complete *) @@ -508,18 +538,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -553,10 +582,10 @@ lemma schedule_ccorres: in ccorres_symb_exec_r_known_rv) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: false_def cur_tcb'_def rf_sr_ksCurThread) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) apply (solves \unat_arith, rule iffI; simp\) apply ceqv apply clarsimp @@ -597,13 +626,13 @@ lemma schedule_ccorres: apply (wp (once) hoare_drop_imps) apply wp apply (strengthen strenghten_False_imp[where P="a = ResumeCurrentThread" for a]) - apply (clarsimp simp: conj_ac invs_queues invs_valid_objs' cong: conj_cong) + apply (clarsimp simp: conj_ac invs_valid_objs' cong: conj_cong) apply wp apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) - apply (clarsimp simp: to_bool_def true_def) + apply clarsimp apply (strengthen ko_at'_obj_at'_field) - apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field to_bool_def true_def) + apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field) apply wp apply clarsimp (* when runnable tcbSchedEnqueue curThread *) @@ -612,21 +641,21 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp - apply (clarsimp simp: to_bool_def false_def) apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') apply (rule conjI) apply (clarsimp dest!: rf_sr_cscheduler_relation simp: cscheduler_action_relation_def) apply (rule conjI; clarsimp) apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps max_word_not_0 + apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps split: scheduler_action.splits) apply (frule (1) obj_at_cslift_tcb) apply (clarsimp dest!: rf_sr_cscheduler_relation invs_sch_act_wf' @@ -637,7 +666,7 @@ lemma schedule_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -670,11 +699,7 @@ lemma threadSet_timeSlice_ccorres [corres]: map_to_tcbs_upd) apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) defer - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def) @@ -687,10 +712,10 @@ lemma timerTick_ccorres: supply subst_all [simp del] apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -712,17 +737,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues + apply (ctac add: rescheduleRequired_ccorres) + apply (wp weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -734,7 +759,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -742,13 +766,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM/StateRelation_C.thy b/proof/crefine/ARM/StateRelation_C.thy index 1f68d18a5b..b026a1d1df 100644 --- a/proof/crefine/ARM/StateRelation_C.thy +++ b/proof/crefine/ARM/StateRelation_C.thy @@ -20,8 +20,7 @@ definition definition "option_to_ptr \ Ptr o option_to_0" -(* used for bound ntfn/tcb *) -definition +definition option_to_ctcb_ptr :: "machine_word option \ tcb_C ptr" where "option_to_ctcb_ptr x \ case x of None \ NULL | Some t \ tcb_ptr_to_ctcb_ptr t" @@ -237,9 +236,14 @@ fun | "register_from_H ARM.FaultIP = scast Kernel_C.FaultIP" definition - ccontext_relation :: "(MachineTypes.register \ word32) \ user_context_C \ bool" + cregs_relation :: "(MachineTypes.register \ machine_word) \ machine_word[registers_count] \ bool" +where + "cregs_relation Hregs Cregs \ \r. Hregs r = Cregs.[unat (register_from_H r)]" + +definition + ccontext_relation :: "user_context \ user_context_C \ bool" where - "ccontext_relation regs uc \ \r. regs r = index (registers_C uc) (unat (register_from_H r))" + "ccontext_relation uc_H uc_C \ cregs_relation (user_regs uc_H) (registers_C uc_C)" primrec cthread_state_relation_lifted :: "Structures_H.thread_state \ @@ -344,7 +348,9 @@ where \ tcbTimeSlice atcb = unat (tcbTimeSlice_C ctcb) \ cfault_rel (tcbFault atcb) (seL4_Fault_lift (tcbFault_C ctcb)) (lookup_fault_lift (tcbLookupFailure_C ctcb)) - \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb" + \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb + \ option_to_ctcb_ptr (tcbSchedPrev atcb) = tcbSchedPrev_C ctcb + \ option_to_ctcb_ptr (tcbSchedNext atcb) = tcbSchedNext_C ctcb" abbreviation "ep_queue_relation' \ tcb_queue_relation' tcbEPNext_C tcbEPPrev_C" @@ -567,17 +573,17 @@ definition where "cready_queues_index_to_C qdom prio \ (unat qdom) * numPriorities + (unat prio)" -definition cready_queues_relation :: - "tcb_C typ_heap \ (tcb_queue_C[num_tcb_queues]) \ (domain \ priority \ ready_queue) \ bool" -where - "cready_queues_relation h_tcb queues aqueues \ - \qdom prio. ((qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ - (let cqueue = index queues (cready_queues_index_to_C qdom prio) in - sched_queue_relation' h_tcb (aqueues (qdom, prio)) (head_C cqueue) (end_C cqueue))) - \ (\ (qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ aqueues (qdom, prio) = [])" +definition ctcb_queue_relation :: "tcb_queue \ tcb_queue_C \ bool" where + "ctcb_queue_relation aqueue cqueue \ + head_C cqueue = option_to_ctcb_ptr (tcbQueueHead aqueue) + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd aqueue)" +definition cready_queues_relation :: + "(domain \ priority \ ready_queue) \ (tcb_queue_C[num_tcb_queues]) \ bool" + where + "cready_queues_relation aqueues cqueues \ + \d p. d \ maxDomain \ p \ maxPriority + \ ctcb_queue_relation (aqueues (d, p)) (index cqueues (cready_queues_index_to_C d p))" abbreviation "cte_array_relation astate cstate @@ -715,9 +721,7 @@ where "cstate_relation astate cstate \ let cheap = t_hrs_' cstate in cpspace_relation (ksPSpace astate) (underlying_memory (ksMachineState astate)) cheap \ - cready_queues_relation (clift cheap) - (ksReadyQueues_' cstate) - (ksReadyQueues astate) \ + cready_queues_relation (ksReadyQueues astate) (ksReadyQueues_' cstate) \ zero_ranges_are_zero (gsUntypedZeroRanges astate) cheap \ cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' cstate) (ksReadyQueuesL1Bitmap astate) \ cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' cstate) (ksReadyQueuesL2Bitmap astate) \ diff --git a/proof/crefine/ARM/SyscallArgs_C.thy b/proof/crefine/ARM/SyscallArgs_C.thy index 1e79117b7e..51e037db57 100644 --- a/proof/crefine/ARM/SyscallArgs_C.thy +++ b/proof/crefine/ARM/SyscallArgs_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -46,10 +47,8 @@ lemma replyOnRestart_invs'[wp]: "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) - apply (rule hoare_vcg_all_lift) - apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_vcg_all_lift rfk_ksQ) - apply (rule hoare_strengthen_post, rule gts_sp') + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) + apply (rule hoare_strengthen_post, rule gts_sp') apply (clarsimp simp: pred_tcb_at') apply (auto elim!: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread') @@ -288,7 +287,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -406,11 +405,13 @@ lemma is_syscall_error_codes: by ((rule iffD2[OF is_syscall_error_code_def], intro allI, rule conseqPre, vcg, safe, (simp_all add: o_def)?)+) -lemma syscall_error_throwError_ccorres_direct: +lemma syscall_error_throwError_ccorres_direct_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) code" apply (rule ccorres_from_vcg_throws) @@ -420,28 +421,35 @@ lemma syscall_error_throwError_ccorres_direct: apply (simp add: syscall_error_rel_def exception_defs) done -lemma syscall_error_throwError_ccorres_succs: +lemma syscall_error_throwError_ccorres_succs_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) (code ;; remainder)" apply (rule ccorres_guard_imp2, rule ccorres_split_throws) - apply (erule syscall_error_throwError_ccorres_direct) - apply simp + apply (erule syscall_error_throwError_ccorres_direct_gen; assumption) apply (rule HoarePartialProps.augment_Faults) apply (erule iffD1[OF is_syscall_error_code_def, THEN spec]) apply simp+ done -lemmas syscall_error_throwError_ccorres_n = - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct, +lemmas syscall_error_throwError_ccorres_n_gen = + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct_gen, simplified o_apply] - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs, + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs_gen, simplified o_apply] +lemmas syscall_error_throwError_ccorres_n = + syscall_error_throwError_ccorres_n_gen[where arrel="intr_and_se_rel \ dc", simplified] + +lemmas syscall_error_throwError_ccorres_n_inl_rrel = + syscall_error_throwError_ccorres_n_gen[where arrel="inl_rrel (intr_and_se_rel \ dc)", simplified] + definition idButNot :: "'a \ 'a" where "idButNot x = x" @@ -622,15 +630,16 @@ lemma asUser_const_rv: lemma getMRs_tcbContext: "\\s. n < unat n_msgRegisters \ n < unat (msgLength info) \ thread = ksCurThread s \ cur_tcb' s\ getMRs thread buffer info - \\rv s. obj_at' (\tcb. atcbContextGet (tcbArch tcb) (ARM_H.msgRegisters ! n) = rv ! n) (ksCurThread s) s\" + \\rv s. obj_at' (\tcb. user_regs (atcbContextGet (tcbArch tcb)) (ARM_H.msgRegisters ! n) = rv ! n) + (ksCurThread s) s\" apply (rule hoare_assume_pre) apply (elim conjE) apply (thin_tac "thread = t" for t) apply (clarsimp simp add: getMRs_def) apply (wp|wpc)+ - apply (rule_tac P="n < length x" in hoare_gen_asm) + apply (rule_tac P="n < length rv" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -754,11 +763,13 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply (ctac (no_vcg)) + apply (rename_tac bufferCap bufferCap') apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isPageCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap bufferCap \ isPageCap (capCap bufferCap)" + in ccorres_case_bools') apply simp apply (rule ccorres_symb_exec_r) - apply (rule_tac b="capVPSize (capCap rva) \ ARMSmallPage" in ccorres_case_bools') + apply (rule_tac b="capVPSize (capCap bufferCap) \ ARMSmallPage" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -766,7 +777,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -792,7 +803,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (frule capFVMRights_range) apply (simp add: cap_frame_cap_lift generic_frame_cap_get_capFVMRights_CL_def) - apply (clarsimp simp: cap_to_H_def vmrights_to_H_def to_bool_def + apply (clarsimp simp: cap_to_H_def vmrights_to_H_def word_le_make_less Kernel_C.VMNoAccess_def Kernel_C.VMReadWrite_def Kernel_C.VMReadOnly_def Kernel_C.VMKernelOnly_def @@ -820,7 +831,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -847,7 +858,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (frule capFVMRights_range) apply (simp add: cap_frame_cap_lift generic_frame_cap_get_capFVMRights_CL_def) - apply (clarsimp simp: cap_to_H_def vmrights_to_H_def to_bool_def + apply (clarsimp simp: cap_to_H_def vmrights_to_H_def word_le_make_less Kernel_C.VMNoAccess_def Kernel_C.VMReadWrite_def Kernel_C.VMReadOnly_def Kernel_C.VMKernelOnly_def @@ -1048,7 +1059,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) @@ -1176,7 +1187,10 @@ lemma getSyscallArg_ccorres_foo: apply (simp add: word_less_nat_alt split: if_split) apply (rule ccorres_add_return2) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="\s. n < unat (scast n_msgRegisters :: word32) \ obj_at' (\tcb. atcbContextGet (tcbArch tcb) (ARM_H.msgRegisters!n) = x!n) (ksCurThread s) s" + apply (rule_tac P="\s. n < unat (scast n_msgRegisters :: word32) + \ obj_at' (\tcb. user_regs (atcbContextGet (tcbArch tcb)) + (ARM_H.msgRegisters!n) = x!n) + (ksCurThread s) s" and P' = UNIV in ccorres_from_vcg_split_throws) apply vcg @@ -1187,14 +1201,14 @@ lemma getSyscallArg_ccorres_foo: apply (clarsimp simp: typ_heap_simps') apply (clarsimp simp: ctcb_relation_def ccontext_relation_def msgRegisters_ccorres atcbContextGet_def - carch_tcb_relation_def) + carch_tcb_relation_def cregs_relation_def) apply (subst (asm) msgRegisters_ccorres) apply (clarsimp simp: n_msgRegisters_def) apply (simp add: n_msgRegisters_def word_less_nat_alt) apply (simp add: index_msgRegisters_less unat_less_helper) apply wp[1] apply (wp getMRs_tcbContext) - apply simp + apply fastforce apply (rule ccorres_seq_skip [THEN iffD2]) apply (rule ccorres_add_return2) apply (rule ccorres_symb_exec_l) @@ -1218,7 +1232,7 @@ lemma getSyscallArg_ccorres_foo: in hoare_pre(1)) apply (wp getMRs_user_word) apply (clarsimp simp: msgMaxLength_def unat_less_helper) - apply simp + apply fastforce apply (clarsimp simp: sysargs_rel_def sysargs_rel_n_def) apply (rule conjI, clarsimp simp: unat_of_nat32 word_bits_def) apply (drule equalityD2) diff --git a/proof/crefine/ARM/Syscall_C.thy b/proof/crefine/ARM/Syscall_C.thy index 79bd843bb8..f46bb5918d 100644 --- a/proof/crefine/ARM/Syscall_C.thy +++ b/proof/crefine/ARM/Syscall_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -19,9 +20,6 @@ end context kernel_m begin -(* FIXME: should do this from the beginning *) -declare true_def [simp] false_def [simp] - definition one_on_true :: "bool \ nat" where @@ -45,8 +43,7 @@ lemma cap_cases_one_on_true_sum: lemma performInvocation_Endpoint_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and st_tcb_at' simple' thread and ep_at' epptr - and sch_act_sane and (\s. thread = ksCurThread s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)))) + and sch_act_sane and (\s. thread = ksCurThread s)) (UNIV \ {s. block_' s = from_bool blocking} \ {s. call_' s = from_bool do_call} \ {s. badge_' s = badge} @@ -119,7 +116,6 @@ lemma decodeInvocation_ccorres: and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) and (\s. \v \ set extraCaps. s \' fst v \ cte_at' (snd v) s) and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). ex_nonz_cap_to' y s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and sysargs_rel args buffer) (UNIV \ {s. call_' s = from_bool isCall} \ {s. block_' s = from_bool isBlocking} @@ -196,7 +192,7 @@ lemma decodeInvocation_ccorres: apply simp apply (rule hoare_use_eq[where f=ksCurThread]) apply (wp sts_invs_minor' sts_st_tcb_at'_cases - setThreadState_ct' hoare_vcg_all_lift sts_ksQ')+ + setThreadState_ct' hoare_vcg_all_lift)+ apply simp apply (vcg exspec=setThreadState_modifies) apply vcg @@ -266,22 +262,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -313,7 +309,7 @@ lemma decodeInvocation_ccorres: apply fastforce apply (simp add: cap_lift_capEPBadge_mask_eq) apply (clarsimp simp: rf_sr_ksCurThread Collect_const_mem - cap_get_tag_isCap "StrictC'_thread_state_defs") + cap_get_tag_isCap ThreadState_defs) apply (frule word_unat.Rep_inverse') apply (simp add: cap_get_tag_isCap[symmetric] cap_get_tag_ReplyCap) apply (rule conjI) @@ -445,7 +441,7 @@ lemma handleInvocation_def2: lemma thread_state_to_tsType_eq_Restart: "(thread_state_to_tsType ts = scast ThreadState_Restart) = (ts = Restart)" - by (cases ts, simp_all add: "StrictC'_thread_state_defs") + by (cases ts, simp_all add: ThreadState_defs) lemma wordFromMessageInfo_spec: "\s. \\ {s} Call wordFromMessageInfo_'proc @@ -463,7 +459,7 @@ lemma wordFromMessageInfo_spec: lemma handleDoubleFault_ccorres: "ccorres dc xfdc (invs' and tcb_at' tptr and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and - sch_act_not tptr and (\s. \p. tptr \ set (ksReadyQueues s p))) + sch_act_not tptr) (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) [] (handleDoubleFault tptr ex1 ex2) (Call handleDoubleFault_'proc)" @@ -477,7 +473,7 @@ lemma handleDoubleFault_ccorres: apply (simp add: getRestartPC_def) apply wp apply clarsimp - apply (simp add: ThreadState_Inactive_def) + apply (simp add: ThreadState_defs) apply (fastforce simp: valid_tcb_state'_def) done @@ -541,8 +537,7 @@ lemma hrs_mem_update_use_hrs_mem: lemma sendFaultIPC_ccorres: "ccorres (cfault_rel2 \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and st_tcb_at' simple' tptr and sch_act_not tptr and - (\s. \p. tptr \ set (ksReadyQueues s p))) + (invs' and st_tcb_at' simple' tptr and sch_act_not tptr) (UNIV \ {s. (cfault_rel (Some fault) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) @@ -621,15 +616,15 @@ lemma sendFaultIPC_ccorres: apply (ctac (no_vcg) add: sendIPC_ccorres) apply (ctac (no_vcg) add: ccorres_return_CE [unfolded returnOk_def comp_def]) apply wp - apply (wp threadSet_pred_tcb_no_state threadSet_invs_trivial threadSet_typ_at_lifts - | simp)+ + apply (wpsimp wp: threadSet_invs_trivial) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_typ_at_lifts) apply (clarsimp simp: guard_is_UNIV_def) apply (subgoal_tac "capEPBadge epcap && mask 28 = capEPBadge epcap") apply (clarsimp simp: cap_get_tag_isCap isEndpointCap_def isCap_simps ccap_relation_ep_helpers) apply (frule cap_get_tag_isCap(4)[symmetric]) - apply (clarsimp simp: cap_get_tag_EndpointCap to_bool_def) + apply (clarsimp simp: cap_get_tag_EndpointCap) apply (drule cap_get_tag_isCap(4) [symmetric]) apply (clarsimp simp: isCap_simps cap_endpoint_cap_lift cap_lift_capEPBadge_mask_eq) apply (clarsimp simp: case_bool_If) @@ -657,10 +652,9 @@ lemma sendFaultIPC_ccorres: apply vcg apply (clarsimp simp: inQ_def) apply (rule_tac Q="\a b. invs' b \ st_tcb_at' simple' tptr b - \ sch_act_not tptr b \ valid_cap' a b - \ (\p. tptr \ set (ksReadyQueues b p))" + \ sch_act_not tptr b \ valid_cap' a b" and E="\ _. \" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp) apply (clarsimp simp: isCap_simps) apply (clarsimp simp: valid_cap'_def pred_tcb_at') @@ -679,8 +673,7 @@ lemma sendFaultIPC_ccorres: done lemma handleFault_ccorres: - "ccorres dc xfdc (invs' and st_tcb_at' simple' t and - sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p))) + "ccorres dc xfdc (invs' and st_tcb_at' simple' t and sch_act_not t) (UNIV \ {s. (cfault_rel (Some flt) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))) )} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t}) @@ -697,12 +690,12 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply clarsimp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply (wp) apply (simp add: guard_is_UNIV_def) apply (simp add: guard_is_UNIV_def) @@ -755,8 +748,7 @@ lemma getMessageInfo_msgLength': lemma handleInvocation_ccorres: "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and - ct_active' and sch_act_simple and - (\s. \x. ksCurThread s \ set (ksReadyQueues s x))) + ct_active' and sch_act_simple) (UNIV \ {s. isCall_' s = from_bool isCall} \ {s. isBlocking_' s = from_bool isBlocking}) [] (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" @@ -881,17 +873,16 @@ lemma handleInvocation_ccorres: apply (simp add: invocationCatch_def o_def) apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" - in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + in hoare_strengthen_postE) + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb - hoare_vcg_all_lift sts_ksQ' + hoare_vcg_all_lift | wpc | wps)+ apply auto[1] apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) - apply (simp add: "StrictC'_thread_state_defs" mask_def) + apply (simp add: ThreadState_defs mask_def) apply (simp add: typ_heap_simps) apply (case_tac ts, simp_all add: cthread_state_relation_def)[1] apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) @@ -1048,7 +1039,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1066,7 +1057,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1144,8 +1134,7 @@ lemma handleRecv_ccorres: notes rf_sr_upd_safe[simp del] shows "ccorres dc xfdc - (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s - \ sch_act_sane s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s \ sch_act_sane s) {s. isBlocking_' s = from_bool isBlocking} [] (handleRecv isBlocking) @@ -1188,7 +1177,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1206,10 +1195,10 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) - apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) + apply (wp hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) apply (clarsimp dest!: rf_sr_ksCurThread) apply simp @@ -1255,7 +1244,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1272,7 +1261,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1285,7 +1274,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1306,7 +1295,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1319,9 +1308,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1332,13 +1321,11 @@ lemma handleRecv_ccorres: apply clarsimp apply (rename_tac thread epCPtr) apply (rule_tac Q'="(\rv s. invs' s \ st_tcb_at' simple' thread s - \ sch_act_sane s \ (\p. thread \ set (ksReadyQueues s p)) \ thread = ksCurThread s - \ valid_cap' rv s)" in hoare_post_imp_R[rotated]) - apply (clarsimp simp: sch_act_sane_def) - apply (auto dest!: obj_at_valid_objs'[OF _ invs_valid_objs'] - simp: projectKOs valid_obj'_def, - auto simp: pred_tcb_at'_def obj_at'_def objBits_simps projectKOs ct_in_state'_def)[1] - apply wp + \ sch_act_sane s \ thread = ksCurThread s + \ valid_cap' rv s)" in hoare_strengthen_postE_R[rotated]) + apply (intro conjI impI allI; clarsimp simp: sch_act_sane_def) + apply (fastforce dest: obj_at_valid_objs'[OF _ invs_valid_objs'] ko_at_valid_ntfn') + apply wp apply clarsimp apply (vcg exspec=isStopped_modifies exspec=lookupCap_modifies) @@ -1355,8 +1342,8 @@ lemma handleRecv_ccorres: apply (frule tcb_aligned'[OF tcb_at_invs']) apply clarsimp apply (intro conjI impI allI) - apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift - lookup_fault_missing_capability_lift is_cap_fault_def)+ + apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift + lookup_fault_missing_capability_lift is_cap_fault_def)+ apply (clarsimp simp: cap_get_tag_NotificationCap) apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt) apply (clarsimp simp: cnotification_relation_def Let_def) @@ -1387,7 +1374,7 @@ lemma handleYield_ccorres: apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear tcbSchedAppend_valid_objs') apply (vcg exspec= tcbSchedAppend_modifies) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues) + apply (wp weak_sch_act_wf_lift_linear) apply (vcg exspec= tcbSchedDequeue_modifies) apply (clarsimp simp: tcb_at_invs' invs_valid_objs' valid_objs'_maxPriority valid_objs'_maxDomain) @@ -1533,11 +1520,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -1556,7 +1543,7 @@ lemma handleInterrupt_ccorres: apply (rule getIRQSlot_ccorres3) apply (rule ccorres_getSlotCap_cte_at) apply (rule_tac P="cte_at' rv" in ccorres_cross_over_guard) - supply ccorres_move_array_assertion_tcb_ctes [corres_pre del] + supply ccorres_move_array_assertion_tcb_ctes [ccorres_pre del] apply ctac apply csymbr apply csymbr @@ -1575,7 +1562,7 @@ lemma handleInterrupt_ccorres: apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -1584,7 +1571,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) @@ -1596,7 +1583,7 @@ lemma handleInterrupt_ccorres: rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, ctac (no_vcg) add: maskInterrupt_ccorres, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, wp, simp)+) apply (wp getSlotCap_wp) apply simp @@ -1605,7 +1592,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -1617,7 +1603,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReserveIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply vcg apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up) diff --git a/proof/crefine/ARM/TcbAcc_C.thy b/proof/crefine/ARM/TcbAcc_C.thy index 4fd5f9ddda..dc3f316a50 100644 --- a/proof/crefine/ARM/TcbAcc_C.thy +++ b/proof/crefine/ARM/TcbAcc_C.thy @@ -49,6 +49,24 @@ lemma threadGet_eq: apply simp done +lemma get_tsType_ccorres[corres]: + "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_' (tcb_at' thread) + ({s. f s = tcb_ptr_to_ctcb_ptr thread} \ + {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] + (getThreadState thread) (Call thread_state_get_tsType_'proc)" + unfolding getThreadState_def + apply (rule ccorres_from_spec_modifies [where P=\, simplified]) + apply (rule thread_state_get_tsType_spec) + apply (rule thread_state_get_tsType_modifies) + apply simp + apply (frule (1) obj_at_cslift_tcb) + apply (clarsimp simp: typ_heap_simps) + apply (rule bexI [rotated, OF threadGet_eq], assumption) + apply simp + apply (drule ctcb_relation_thread_state_to_tsType) + apply simp + done + lemma threadGet_obj_at2: "\\\ threadGet f thread \\v. obj_at' (\t. f t = v) thread\" apply (rule hoare_post_imp) @@ -76,13 +94,14 @@ lemma getRegister_ccorres [corres]: apply (drule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps register_from_H_less) apply (clarsimp simp: getRegister_def typ_heap_simps) - apply (rule_tac x = "((atcbContextGet o tcbArch) ko reg, \)" in bexI [rotated]) + apply (rule_tac x = "((user_regs o atcbContextGet o tcbArch) ko reg, \)" in bexI[rotated]) apply (simp add: in_monad' asUser_def select_f_def split_def) apply (subst arg_cong2 [where f = "(\)"]) defer apply (rule refl) apply (erule threadSet_eq) - apply (clarsimp simp: ctcb_relation_def ccontext_relation_def carch_tcb_relation_def) + apply (clarsimp simp: ctcb_relation_def ccontext_relation_def cregs_relation_def + carch_tcb_relation_def) apply (wp threadGet_obj_at2)+ apply simp apply simp @@ -110,7 +129,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -139,7 +158,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ @@ -185,6 +204,27 @@ lemma sanitiseRegister_spec: split: register.split) done +lemma ccorres_pre_getObject_tcb: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\tcb. ko_at' tcb p s \ P tcb s)) + {s. \ tcb tcb'. cslift s (tcb_ptr_to_ctcb_ptr p) = Some tcb' \ ctcb_relation tcb tcb' + \ s \ P' tcb} + hs (getObject p >>= (\rv :: tcb. f rv)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_guard_imp2) + apply (rule cc) + apply (rule conjI) + apply (rule_tac Q="ko_at' rv p s" in conjunct1) + apply assumption + apply assumption + apply (wpsimp wp: empty_fail_getObject getTCB_wp)+ + apply (erule cmap_relationE1[OF cmap_relation_tcb], + erule ko_at_projectKO_opt) + apply simp + done + end end diff --git a/proof/crefine/ARM/TcbQueue_C.thy b/proof/crefine/ARM/TcbQueue_C.thy index ccc1d1bfe7..6bbacbab10 100644 --- a/proof/crefine/ARM/TcbQueue_C.thy +++ b/proof/crefine/ARM/TcbQueue_C.thy @@ -844,49 +844,6 @@ lemma tcb_queue_relation'_prev_mask: shows "ptr_val (getPrev tcb) && ~~ mask bits = ptr_val (getPrev tcb)" by (rule tcb_queue_relation_prev_mask [OF tcb_queue_relation'_queue_rel], fact+) - -lemma cready_queues_relation_null_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcb_null_ep_ptrs \ mp' = option_map tcb_null_ep_ptrs \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply (clarsimp simp: tcb_queue_relation'_def) - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - \ \clag\ - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - done - -lemma cready_queues_relation_not_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcbSchedNext_C \ mp' = option_map tcbSchedNext_C \ mp" - "option_map tcbSchedPrev_C \ mp' = option_map tcbSchedPrev_C \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def tcb_queue_relation'_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply clarsimp - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule same) - apply (rule same) - done - lemma ntfn_ep_disjoint: assumes srs: "sym_refs (state_refs_of' s)" and epat: "ko_at' ep epptr s" @@ -970,8 +927,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs cp rel apply - apply (subst map_comp_update) @@ -1054,12 +1011,10 @@ lemma rf_sr_tcb_update_no_queue: (tcb_ptr_to_ctcb_ptr thread) ctcb) (t_hrs_' (globals s')); tcbEPNext_C ctcb = tcbEPNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); tcbEPPrev_C ctcb = tcbEPPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedNext_C ctcb = tcbSchedNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedPrev_C ctcb = tcbSchedPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes heap_to_user_data_def) @@ -1069,31 +1024,22 @@ lemma rf_sr_tcb_update_no_queue: apply (clarsimp simp: map_comp_update projectKO_opt_tcb cvariable_relation_upd_const typ_heap_simps') apply (intro conjI) - subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_upd_tcb_no_queues, assumption+) - subgoal by (clarsimp intro!: ext) - subgoal by (clarsimp intro!: ext) + subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) + apply (rule cendpoint_relation_upd_tcb_no_queues, assumption+) subgoal by (clarsimp intro!: ext) subgoal by (clarsimp intro!: ext) - apply (erule cready_queues_relation_not_queue_ptrs) + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) subgoal by (clarsimp intro!: ext) subgoal by (clarsimp intro!: ext) subgoal by (simp add: carch_state_relation_def typ_heap_simps') by (simp add: cmachine_state_relation_def) -lemma rf_sr_tcb_update_no_queue_helper: - "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined - \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr - \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" - by (simp cong: StateSpace.state.fold_congs globals.fold_congs) - -lemmas rf_sr_tcb_update_no_queue2 - = rf_sr_tcb_update_no_queue_helper [OF rf_sr_tcb_update_no_queue, simplified] +lemmas rf_sr_tcb_update_no_queue2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue, simplified] lemma tcb_queue_relation_not_in_q: "ctcb_ptr_to_tcb_ptr x \ set xs \ @@ -1108,7 +1054,7 @@ lemma rf_sr_tcb_update_not_in_queue: \ live' (KOTCB tcb); invs' s; (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes @@ -1122,31 +1068,24 @@ lemma rf_sr_tcb_update_not_in_queue: prefer 2 apply (auto simp: obj_at'_def ko_wp_at'_def)[1] apply (intro conjI) - subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply clarsimp - apply (subgoal_tac "thread \ (fst ` ep_q_refs_of' a)") - apply (clarsimp simp: cendpoint_relation_def Let_def split: Structures_H.endpoint.split) - subgoal by (intro conjI impI allI, simp_all add: image_def tcb_queue_relation_not_in_q)[1] - apply (drule(1) map_to_ko_atI') - apply (drule sym_refs_ko_atD', clarsimp+) - subgoal by blast + subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply clarsimp - apply (subgoal_tac "thread \ (fst ` ntfn_q_refs_of' (ntfnObj a))") - apply (clarsimp simp: cnotification_relation_def Let_def - split: ntfn.splits) - subgoal by (simp add: image_def tcb_queue_relation_not_in_q)[1] + apply (subgoal_tac "thread \ (fst ` ep_q_refs_of' a)") + apply (clarsimp simp: cendpoint_relation_def Let_def split: Structures_H.endpoint.split) + subgoal by (intro conjI impI allI, simp_all add: image_def tcb_queue_relation_not_in_q)[1] apply (drule(1) map_to_ko_atI') apply (drule sym_refs_ko_atD', clarsimp+) subgoal by blast - apply (simp add: cready_queues_relation_def, erule allEI) - apply (clarsimp simp: Let_def) - apply (subst tcb_queue_relation_not_in_q) - apply clarsimp - apply (drule valid_queues_obj_at'D, clarsimp) - apply (clarsimp simp: obj_at'_def projectKOs inQ_def) - subgoal by simp + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply clarsimp + apply (subgoal_tac "thread \ (fst ` ntfn_q_refs_of' (ntfnObj a))") + apply (clarsimp simp: cnotification_relation_def Let_def + split: ntfn.splits) + subgoal by (simp add: image_def tcb_queue_relation_not_in_q)[1] + apply (drule(1) map_to_ko_atI') + apply (drule sym_refs_ko_atD', clarsimp+) + subgoal by blast subgoal by (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') by (simp add: cmachine_state_relation_def) diff --git a/proof/crefine/ARM/Tcb_C.thy b/proof/crefine/ARM/Tcb_C.thy index d2383932ac..73be35101d 100644 --- a/proof/crefine/ARM/Tcb_C.thy +++ b/proof/crefine/ARM/Tcb_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -58,8 +59,6 @@ lemma doMachineOp_sched: done context begin interpretation Arch . (*FIXME: arch_split*) -crunch queues[wp]: setupReplyMaster "valid_queues" - (simp: crunch_simps wp: crunch_wps) crunch curThread [wp]: restart "\s. P (ksCurThread s)" (wp: crunch_wps simp: crunch_simps) @@ -71,8 +70,8 @@ begin lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -130,8 +129,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -141,8 +140,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -239,8 +238,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -273,7 +272,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -352,9 +351,10 @@ lemma ccorres_abstract_known: lemma setPriority_ccorres: "ccorres dc xfdc - (\s. tcb_at' t s \ Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority)) - (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) + (\s. tcb_at' t s \ ksCurDomain s \ maxDomain \ + valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s) + ({s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) [] (setPriority t priority) (Call setPriority_'proc)" apply (cinit lift: tptr_' prio_') apply (ctac(no_vcg) add: tcbSchedDequeue_ccorres) @@ -373,11 +373,11 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) - apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' + apply (wpsimp wp: hoare_drop_imps threadSet_valid_objs' weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state threadSet_tcbDomain_triv simp: st_tcb_at'_def o_def split: if_splits) @@ -386,19 +386,14 @@ lemma setPriority_ccorres: where Q="\rv s. obj_at' (\_. True) t s \ priority \ maxPriority \ - Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ valid_objs' s \ - valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s \ - (\d p. \ t \ set (ksReadyQueues s (d, p)))"]) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues tcbSchedDequeue_nonq) + pspace_aligned' s \ pspace_distinct' s"]) + apply (wp weak_sch_act_wf_lift_linear valid_tcb'_def) apply (clarsimp simp: valid_tcb'_tcbPriority_update) apply clarsimp - apply (frule (1) valid_objs'_maxDomain[where t=t]) - apply (frule (1) valid_objs'_maxPriority[where t=t]) - apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -453,8 +448,8 @@ lemma checkCapAt_ccorres: apply assumption apply (simp only: when_def if_to_top_of_bind) apply (rule ccorres_if_lhs) - apply (simp add: from_bool_def true_def) - apply (simp add: from_bool_def false_def) + apply simp + apply simp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -550,7 +545,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -576,13 +571,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (simp add: assertDerived_def bind_assoc del: Collect_const) apply (rule ccorres_symb_exec_l) @@ -596,7 +591,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -605,36 +600,36 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (rule hoare_strengthen_post[ where Q= "\rv s. - Invariants_H.valid_queues s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ ((\a b. priority = Some (a, b)) \ tcb_at' target s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ fst (the priority) \ maxPriority)"]) + fst (the priority) \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) apply (rule ccorres_split_nothrow_novcg_dc) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -644,25 +639,24 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def tcbIPCBufferSlot_def) apply csymbr - apply (simp add: Collect_False false_def - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false_seq, simp) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply(rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -671,9 +665,9 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) - apply (simp add: guard_is_UNIV_def false_def Collect_const_mem) + apply (simp add: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_cond_false_seq, simp) @@ -681,14 +675,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp split: option.split_asm) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -698,17 +692,17 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) apply (rule hoare_strengthen_post[ - where Q="\a b. (Invariants_H.valid_queues b \ - valid_objs' b \ + where Q="\a b. (valid_objs' b \ sch_act_wf (ksSchedulerAction b) b \ + pspace_aligned' b \ pspace_distinct' b \ ((\a b. priority = Some (a, b)) \ tcb_at' target b \ - ksCurDomain b \ maxDomain \ valid_queues' b \ + ksCurDomain b \ maxDomain \ fst (the priority) \ maxPriority)) \ ((case snd (the buf) of None \ 0 @@ -731,15 +725,15 @@ lemma invokeTCB_ThreadControl_ccorres: prefer 2 apply fastforce apply (strengthen cte_is_derived_capMasterCap_strg - invs_queues invs_weak_sch_act_wf invs_sch_act_wf' + invs_weak_sch_act_wf invs_sch_act_wf' invs_valid_objs' invs_mdb' invs_pspace_aligned', simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp - | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues - invs_valid_queues' | wp hoare_drop_imps)+ + | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf + | wp hoare_drop_imps)+ (* \ P *) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem option_to_0_def @@ -749,7 +743,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply (simp add: conj_comms cong: conj_cong) - apply (strengthen invs_ksCurDomain_maxDomain') + apply (strengthen invs_ksCurDomain_maxDomain' invs_pspace_distinct') apply (wp hoare_vcg_const_imp_lift_R cteDelete_invs') apply simp apply (rule ccorres_split_nothrow_novcg_dc) @@ -762,12 +756,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply (clarsimp simp: inQ_def Collect_const_mem cintr_def exception_defs tcb_cnode_index_defs) apply (simp add: tcbBuffer_def tcbIPCBufferSlot_def word_sle_def - cte_level_bits_def from_bool_def true_def size_of_def case_option_If2 ) + cte_level_bits_def size_of_def case_option_If2 ) apply (rule conjI) apply (clarsimp simp: case_option_If2 if_n_0_0 objBits_simps' valid_cap'_def capAligned_def word_bits_conv obj_at'_def projectKOs) - apply (clarsimp simp: invs_valid_objs' invs_valid_queues' - Invariants_H.invs_queues invs_ksCurDomain_maxDomain') + apply (fastforce simp: invs_valid_objs' invs_ksCurDomain_maxDomain') apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -778,7 +771,6 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) @@ -795,33 +787,27 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - del: Collect_const) + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (fastforce simp: guard_is_UNIV_def Kernel_C.tcbVTable_def tcbVTableSlot_def cte_level_bits_def size_of_def) apply csymbr - apply (simp add: false_def Collect_False - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) apply vcg @@ -830,7 +816,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp @@ -844,7 +830,6 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ @@ -865,34 +850,28 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - del: Collect_const) + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem Kernel_C.tcbCTable_def tcbCTableSlot_def cte_level_bits_def size_of_def option_to_0_def) apply csymbr - apply (simp add: false_def Collect_False - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) apply vcg @@ -900,20 +879,20 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: cte_is_derived_capMasterCap_strg o_def) apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def - true_def from_bool_def cintr_def Collect_const_mem) + cintr_def Collect_const_mem) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -940,7 +919,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -981,7 +960,7 @@ lemma setupReplyMaster_ccorres: apply (subst is_aligned_neg_mask_weaken) apply (erule is_aligned_tcb_ptr_to_ctcb_ptr) apply (simp add: ctcb_size_bits_def) - apply (simp add: true_def mask_def to_bool_def) + apply simp apply simp apply (simp add: cmachine_state_relation_def typ_heap_simps' @@ -1011,7 +990,7 @@ lemma restart_ccorres: apply (ctac(no_vcg) add: tcbSchedEnqueue_ccorres) apply (ctac add: possibleSwitchTo_ccorres) apply (wp weak_sch_act_wf_lift)[1] - apply (wp sts_valid_queues setThreadState_st_tcb)[1] + apply (wp sts_valid_objs' setThreadState_st_tcb)[1] apply (simp add: valid_tcb_state'_def) apply wp apply (wp (once) sch_act_wf_lift, (wp tcb_in_cur_domain'_lift)+) @@ -1023,7 +1002,7 @@ lemma restart_ccorres: apply fastforce apply (rule ccorres_return_Skip) apply (wp hoare_drop_imps) - apply (auto simp: Collect_const_mem mask_def "StrictC'_thread_state_defs") + apply (auto simp: Collect_const_mem mask_def ThreadState_defs) done lemma setNextPC_ccorres: @@ -1153,10 +1132,10 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (simp add: word_bits_def frame_gp_registers_convs n_gpRegisters_def) apply simp apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (ctac add: postModifyRegisters_ccorres[simplified]) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. thread = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1186,9 +1165,8 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (fastforce simp: sch_act_wf_weak) apply (wpsimp wp: hoare_drop_imp)+ apply (clarsimp simp add: guard_is_UNIV_def) - apply (clarsimp simp: to_bool_def invs_weak_sch_act_wf invs_valid_objs' + apply (clarsimp simp: invs_weak_sch_act_wf invs_valid_objs' split: if_split - cong: if_cong | rule conjI)+ apply (clarsimp dest!: global'_no_ex_cap simp: invs'_def valid_state'_def | rule conjI)+ done @@ -1222,8 +1200,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1232,8 +1210,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1292,8 +1270,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1305,8 +1283,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1377,7 +1355,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1454,7 +1432,7 @@ lemma threadSet_same: by (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp) fastforce lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1469,6 +1447,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: \ {s. buffer_' s = option_to_ptr buffer}) [] (invokeTCB (WriteRegisters dst resume values arch)) (Call invokeTCB_WriteRegisters_'proc)" + supply empty_fail_cond[simp] apply (rule ccorres_gen_asm) apply (erule conjE) apply (cinit lift: n_' dest_' resumeTarget_' buffer_' @@ -1557,15 +1536,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1608,7 +1586,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (clarsimp simp: frame_gp_registers_convs word_less_nat_alt sysargs_rel_def n_frameRegisters_def n_msgRegisters_def split: if_split_asm) - apply (simp add: invs_weak_sch_act_wf invs_valid_objs' invs_queues) + apply (simp add: invs_weak_sch_act_wf invs_valid_objs') apply (fastforce dest!: global'_no_ex_cap simp: invs'_def valid_state'_def) done @@ -1622,7 +1600,7 @@ lemma invokeTCB_Suspend_ccorres: apply (ctac(no_vcg) add: suspend_ccorres[OF cteDeleteOne_ccorres]) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp apply (auto simp: invs'_def valid_state'_def global'_no_ex_cap) done @@ -1636,7 +1614,7 @@ lemma invokeTCB_Resume_ccorres: apply (ctac(no_vcg) add: restart_ccorres) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp done lemma Arch_decodeTransfer_spec: @@ -1707,7 +1685,7 @@ shows (doE reply \ invokeTCB (ReadRegisters target susp n archCp); liftE (replyOnRestart thread reply isCall) odE) (Call invokeTCB_ReadRegisters_'proc)" - supply option.case_cong_weak[cong] + supply option.case_cong_weak[cong] empty_fail_cond[simp] apply (rule ccorres_gen_asm) apply (cinit' lift: tcb_src_' suspendSource_' n_' call_' simp: invokeTCB_def liftE_bindE bind_assoc) @@ -1733,10 +1711,11 @@ shows apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_getThreadState]) apply (rule ccorres_if_lhs[OF _ ccorres_False[where P'=UNIV]]) apply (rule ccorres_if_lhs) - apply (simp add: Collect_True true_def whileAnno_def del: Collect_const) + apply (simp add: Collect_True whileAnno_def del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1744,7 +1723,7 @@ shows = min (unat n) (unat n_frameRegisters + unat n_gpRegisters)" in ccorres_gen_asm) apply (rule ccorres_split_nothrow_novcg) - apply (rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n + apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (ARM_H.frameRegisters @ ARM_H.gpRegisters)) = reply) target s" in ccorres_mapM_x_while) @@ -1797,23 +1776,23 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) apply (rule ccorres_rel_imp, - rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n + rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (ARM_H.frameRegisters @ ARM_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -1923,11 +1902,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) - apply (rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n + apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (ARM_H.frameRegisters @ ARM_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -1998,11 +1976,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2014,7 +1992,7 @@ shows apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp | simp add: valid_tcb_state'_def)+ - apply (clarsimp simp: ThreadState_Running_def mask_def) + apply (clarsimp simp: ThreadState_defs mask_def) apply (rule mapM_x_wp') apply (rule hoare_pre) apply (wp sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift) @@ -2034,15 +2012,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2051,7 +2029,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2062,7 +2040,7 @@ shows ARM.badgeRegister_def "StrictC'_register_defs") apply (vcg exspec=lookupIPCBuffer_modifies) - apply (simp add: false_def) + apply simp apply (ctac(no_vcg) add: setThreadState_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -2071,18 +2049,17 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) apply wp - apply (simp add: Collect_const_mem "StrictC'_thread_state_defs" - mask_def) + apply (simp add: Collect_const_mem ThreadState_defs mask_def) apply vcg apply (rule_tac Q="\rv. invs' and st_tcb_at' ((=) Restart) thread and tcb_at' target" in hoare_post_imp) @@ -2092,7 +2069,7 @@ shows apply (vcg exspec=suspend_modifies) apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def true_def + apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def split: if_split) done @@ -2166,7 +2143,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2177,13 +2155,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2210,7 +2188,7 @@ lemma decodeReadRegisters_ccorres: apply wp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread - "StrictC'_thread_state_defs" word_sless_def word_sle_def + ThreadState_defs word_sless_def word_sle_def mask_eq_iff_w2p word_size isCap_simps ReadRegistersFlags_defs tcb_at_invs' cap_get_tag_isCap capTCBPtr_eq) @@ -2223,7 +2201,7 @@ lemma decodeReadRegisters_ccorres: valid_tcb_state'_def elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] - apply (clarsimp simp: from_bool_def word_and_1 split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma decodeWriteRegisters_ccorres: @@ -2277,7 +2255,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2288,13 +2267,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2302,7 +2281,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2317,13 +2296,13 @@ lemma decodeWriteRegisters_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem ct_in_state'_def pred_tcb_at') apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) - apply (clarsimp simp: valid_cap'_def "StrictC'_thread_state_defs" + apply (clarsimp simp: valid_cap'_def ThreadState_defs mask_eq_iff_w2p word_size rf_sr_ksCurThread WriteRegisters_resume_def word_sle_def word_sless_def numeral_eqs simp del: unsigned_numeral) apply (frule arg_cong[where f="\x. unat (of_nat x :: word32)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2336,8 +2315,7 @@ lemma decodeWriteRegisters_ccorres: apply (rule disjCI2) apply (clarsimp simp: genericTake_def linorder_not_less) apply (subst hd_conv_nth, clarsimp simp: unat_eq_0) - apply (clarsimp simp: from_bool_def word_and_1 - split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma excaps_map_Nil: "(excaps_map caps = []) = (caps = [])" @@ -2405,7 +2383,7 @@ lemma decodeCopyRegisters_ccorres: apply (simp add: case_bool_If if_to_top_of_bindE if_to_top_of_bind del: Collect_const cong: if_cong) - apply (simp add: to_bool_def returnOk_bind Collect_True + apply (simp add: returnOk_bind Collect_True ccorres_invocationCatch_Inr performInvocation_def del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2456,7 +2434,7 @@ lemma decodeCopyRegisters_ccorres: elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] apply (clarsimp simp: word_sle_def CopyRegistersFlags_defs word_sless_def - "StrictC'_thread_state_defs" rf_sr_ksCurThread + ThreadState_defs rf_sr_ksCurThread split: if_split) apply (drule interpret_excaps_eq) apply (clarsimp simp: mask_def excaps_map_def split_def ccap_rights_relation_def @@ -2581,7 +2559,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2602,7 +2580,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -2610,7 +2588,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap from_bool_0 dest!: ccte_relation_ccap_relation) - apply (simp add: from_bool_def false_def true_def + apply (simp add: from_bool_def split: bool.split) apply (auto simp add: longRunningDelete_def isCap_simps split: capability.split)[1] @@ -2618,13 +2596,12 @@ lemma slotCapLongRunningDelete_ccorres: apply (wp hoare_drop_imps isFinalCapability_inv) apply (clarsimp simp: Collect_const_mem guard_is_UNIV_def) apply (rename_tac rv') - apply (case_tac rv'; clarsimp simp: false_def true_def) + apply (case_tac rv'; clarsimp simp: false_def) apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) - apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap - from_bool_def false_def map_comp_Some_iff + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap map_comp_Some_iff dest!: ccte_relation_ccap_relation) done @@ -2639,7 +2616,7 @@ lemma isValidVTableRoot_spec: {s'. ret__unsigned_long_' s' = from_bool (isValidVTableRoot_C (cap_' s))}" apply vcg apply (clarsimp simp: isValidVTableRoot_C_def if_1_0_0 from_bool_0) - apply (simp add: from_bool_def to_bool_def false_def split: if_split) + apply (simp add: to_bool_def split: if_split) done lemma isValidVTableRoot_conv: @@ -2653,9 +2630,8 @@ lemma isValidVTableRoot_conv: apply (case_tac "cap_get_tag cap' = scast cap_page_directory_cap") apply (clarsimp split: arch_capability.split simp: isCap_simps) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 - cap_page_directory_cap_lift cap_to_H_def - from_bool_def) - apply (clarsimp simp: to_bool_def split: if_split) + cap_page_directory_cap_lift cap_to_H_def) + apply (clarsimp split: if_split) apply (clarsimp simp: cap_get_tag_isCap cap_get_tag_isCap_ArchObject) apply (simp split: arch_capability.split_asm add: isCap_simps) apply (case_tac "cap_get_tag cap' = scast cap_page_directory_cap") @@ -2966,7 +2942,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -2993,7 +2969,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -3079,7 +3055,7 @@ lemma decodeTCBConfigure_ccorres: ptr_val_tcb_ptr_mask2[unfolded mask_def objBits_defs, simplified] tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper all_ex_eq_helper ucast_ucast_mask objBits_defs) apply (subgoal_tac "args \ [] \ extraCaps \ []") @@ -3115,7 +3091,8 @@ lemma decodeTCBConfigure_ccorres: apply (rule conjI, fastforce) apply (drule interpret_excaps_eq) apply (clarsimp simp: cte_wp_at_ctes_of valid_tcb_state'_def numeral_eqs le_ucast_ucast_le - tcb_at_invs' invs_valid_objs' invs_queues invs_sch_act_wf' + tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + invs_pspace_aligned' invs_pspace_distinct' ct_in_state'_def pred_tcb_at'_def obj_at'_def tcb_st_refs_of'_def) apply (erule disjE; simp add: objBits_defs mask_def) apply (clarsimp simp: idButNot_def interpret_excaps_test_null @@ -3128,7 +3105,7 @@ lemma decodeTCBConfigure_ccorres: capTCBPtr_eq tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper) apply (frule(1) tcb_at_h_t_valid [OF tcb_at_invs']) apply (clarsimp simp: typ_heap_simps numeral_eqs isCap_simps valid_cap'_def capAligned_def @@ -3163,7 +3140,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3231,8 +3207,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3268,7 +3243,7 @@ lemma decodeSetMCPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3297,7 +3272,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3365,8 +3340,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3402,7 +3376,7 @@ lemma decodeSetPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3444,7 +3418,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3452,9 +3426,9 @@ lemma decodeSetSchedParams_ccorres: val="from_bool (length args < 2 \ length extraCaps = 0)" in ccorres_symb_exec_r_known_rv) apply vcg - apply (auto simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 - split: bool.splits)[1] - apply (unat_arith+)[2] + apply (force simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 + unat_arith_simps + split: bool.splits if_splits) apply ceqv apply clarsimp apply (wpc, @@ -3511,8 +3485,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3561,16 +3534,15 @@ lemma decodeSetSchedParams_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) apply (intro conjI impI allI) - apply (clarsimp simp: unat_eq_0 le_max_word_ucast_id - thread_control_update_mcp_def thread_control_update_priority_def - cap_get_tag_isCap_unfolded_H_cap isCap_simps - interpret_excaps_eq excaps_map_def)+ - done + by (clarsimp simp: unat_eq_0 le_max_word_ucast_id + thread_control_update_mcp_def thread_control_update_priority_def + cap_get_tag_isCap_unfolded_H_cap isCap_simps + interpret_excaps_eq excaps_map_def)+ lemma decodeSetIPCBuffer_ccorres: "interpret_excaps extraCaps' = excaps_map extraCaps \ @@ -3708,11 +3680,10 @@ lemma decodeSetIPCBuffer_ccorres: valid_mdb_ctes_def no_0_def excaps_map_def elim: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' dest!: interpret_excaps_eq)[1] - apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def - word_sle_def ThreadState_Restart_def mask_def) + apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def word_sle_def mask_def) apply (rule conjI[rotated], clarsimp+) apply (drule interpret_excaps_eq[rule_format, where n=0], simp add: excaps_map_Nil) - apply (simp add: mask_def "StrictC'_thread_state_defs" excaps_map_def) + apply (simp add: mask_def ThreadState_defs excaps_map_def) apply (clarsimp simp: ccap_rights_relation_def rightsFromWord_wordFromRights cap_get_tag_isCap) apply (frule cap_get_tag_to_H, subst cap_get_tag_isCap, assumption, assumption) @@ -3736,7 +3707,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3756,7 +3727,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply (auto simp: option_to_ctcb_ptr_def obj_at'_def objBits_simps projectKOs bindNTFN_alignment_junk)[4] apply (simp add: carch_state_relation_def typ_heap_simps') @@ -3768,7 +3739,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3834,7 +3805,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -3867,10 +3838,10 @@ lemma decodeUnbindNotification_ccorres: apply (clarsimp simp: isCap_simps) apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (auto simp: ctcb_relation_def typ_heap_simps cap_get_tag_ThreadCap ct_in_state'_def - option_to_ptr_def option_to_0_def ThreadState_Restart_def - mask_def rf_sr_ksCurThread valid_tcb_state'_def - elim!: pred_tcb'_weakenE - dest!: valid_objs_boundNTFN_NULL) + option_to_ptr_def option_to_0_def ThreadState_defs + mask_def rf_sr_ksCurThread valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: valid_objs_boundNTFN_NULL) done lemma nTFN_case_If_ptr: @@ -3940,7 +3911,7 @@ lemma decodeBindNotification_ccorres: apply csymbr apply (clarsimp simp add: if_to_top_of_bind to_bool_eq_0[symmetric] simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (clarsimp simp: to_bool_def throwError_bind invocationCatch_def) + apply (clarsimp simp: throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg apply (rule conseqPre, vcg) @@ -3963,7 +3934,7 @@ lemma decodeBindNotification_ccorres: apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def valid_ntfn'_def) apply (case_tac "ntfnObj ntfn", simp_all add: isWaitingNtfn_def option_to_ctcb_ptr_def - false_def true_def split: option.split_asm if_split, + split: option.split_asm if_split, auto simp: neq_Nil_conv tcb_queue_relation'_def tcb_at_not_NULL[symmetric] tcb_at_not_NULL)[1] apply ceqv @@ -4027,8 +3998,8 @@ lemma decodeBindNotification_ccorres: apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases exception_defs) - apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def from_bool_0 - ThreadState_Restart_def mask_def true_def + apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def + ThreadState_defs mask_def rf_sr_ksCurThread capTCBPtr_eq) apply (simp add: hd_conv_nth bindE_bind_linearise nTFN_case_If_ptr throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) @@ -4214,7 +4185,7 @@ lemma decodeSetSpace_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -4226,7 +4197,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4241,8 +4212,7 @@ lemma decodeSetSpace_ccorres: apply (rule_tac P'="{s. vRootCap = vRootCap_' s}" in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + apply (clarsimp simp: returnOk_def return_def hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -4352,18 +4322,15 @@ lemma decodeSetSpace_ccorres: rightsFromWord_wordFromRights capTCBPtr_eq tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size) + ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: word_sle_def cap_get_tag_isCap) apply (subgoal_tac "args \ []") - apply (clarsimp simp: hd_conv_nth) - apply (drule sym, simp, simp add: true_def from_bool_0) - apply (clarsimp simp: objBits_defs) - apply fastforce + apply (fastforce simp: hd_conv_nth objBits_defs) apply clarsimp done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4374,7 +4341,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) @@ -4386,9 +4353,9 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wpsimp wp: hoare_drop_imp simp: guard_is_UNIV_def)+ apply vcg - apply (clarsimp simp: tlsBaseRegister_def ARM.tlsBaseRegister_def - invs_weak_sch_act_wf invs_queues TLS_BASE_def TPIDRURW_def - split: if_split) + apply (fastforce simp: tlsBaseRegister_def ARM.tlsBaseRegister_def + invs_weak_sch_act_wf TLS_BASE_def TPIDRURW_def + split: if_split) done lemma decodeSetTLSBase_ccorres: @@ -4436,7 +4403,7 @@ lemma decodeSetTLSBase_ccorres: apply (clarsimp simp: ct_in_state'_def sysargs_rel_n_def n_msgRegisters_def) apply (auto simp: valid_tcb_state'_def elim!: pred_tcb'_weakenE)[1] - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (auto simp: unat_eq_0 le_max_word_ucast_id)+ @@ -4588,8 +4555,7 @@ lemma decodeTCBInvocation_ccorres: dest!: st_tcb_at_idle_thread')[1] apply (simp split: sum.split add: cintr_def intr_and_se_rel_def exception_defs syscall_error_rel_def) - apply (simp add: "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size - cap_get_tag_isCap) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply clarsimp done diff --git a/proof/crefine/ARM/VSpace_C.thy b/proof/crefine/ARM/VSpace_C.thy index 54068f3380..bf71421f3d 100644 --- a/proof/crefine/ARM/VSpace_C.thy +++ b/proof/crefine/ARM/VSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -170,7 +171,7 @@ lemma loadHWASID_ccorres: apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_gets]) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_findPDForASIDAssert]) apply (rename_tac pd) - apply (rule_tac P="\s. pd_at_asid' pd asid s \ rv = armKSASIDMap (ksArchState s) + apply (rule_tac P="\s. pd_at_asid' pd asid s \ asidMap = armKSASIDMap (ksArchState s) \ pd \ ran (option_map snd o armKSASIDMap (ksArchState s) |` (- {asid})) \ option_map snd (armKSASIDMap (ksArchState s) asid) \ {None, Some pd} @@ -284,7 +285,7 @@ lemma storeHWASID_ccorres: apply (subst asid_map_pd_to_hwasids_update, assumption) subgoal by clarsimp apply (rule ext, simp add: pd_pointer_to_asid_slot_def map_comp_def split: if_split) - apply (clarsimp simp: pde_stored_asid_def true_def mask_def[where n="Suc 0"]) + apply (clarsimp simp: pde_stored_asid_def) apply (subst less_mask_eq) apply (rule order_less_le_trans, rule ucast_less) subgoal by simp @@ -376,7 +377,7 @@ lemma invalidateASID_ccorres: apply (subst asid_map_pd_to_hwasids_clear, assumption) subgoal by clarsimp apply (rule ext, simp add: pd_pointer_to_asid_slot_def map_comp_def split: if_split) - subgoal by (clarsimp simp: pde_stored_asid_def false_def mask_def[where n="Suc 0"]) + subgoal by (clarsimp simp: pde_stored_asid_def) apply wp[1] apply (wp findPDForASIDAssert_pd_at_wp2) apply (clarsimp simp: asidLowBits_handy_convs word_sle_def word_sless_def @@ -416,7 +417,7 @@ lemma handleVMFault_ccorres: apply vcg apply (clarsimp simp: errstate_def) apply (clarsimp simp: EXCEPTION_FAULT_def EXCEPTION_NONE_def) - apply (simp add: seL4_Fault_VMFault_lift false_def) + apply (simp add: seL4_Fault_VMFault_lift) apply wp+ apply (simp add: vm_fault_type_from_H_def Kernel_C.ARMDataAbort_def Kernel_C.ARMPrefetchAbort_def) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff) @@ -431,7 +432,7 @@ lemma handleVMFault_ccorres: apply vcg apply (clarsimp simp: errstate_def) apply (clarsimp simp: EXCEPTION_FAULT_def EXCEPTION_NONE_def) - apply (simp add: seL4_Fault_VMFault_lift true_def mask_def) + apply (simp add: seL4_Fault_VMFault_lift) apply wp+ apply simp done @@ -709,7 +710,7 @@ lemma ptrFromPAddr_spec: Call ptrFromPAddr_'proc \\ret__ptr_to_void = Ptr (ptrFromPAddr (paddr_' s))\" apply vcg - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def) + apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def) done lemma addrFromPPtr_spec: @@ -717,7 +718,7 @@ lemma addrFromPPtr_spec: Call addrFromPPtr_'proc \\ret__unsigned_long = addrFromPPtr (ptr_val (pptr_' s))\" apply vcg - apply (simp add: addrFromPPtr_def pptrBaseOffset_def pptrBase_def physBase_def) + apply (simp add: addrFromPPtr_def pptrBaseOffset_def pptrBase_def) done lemma addrFromKPPtr_spec: @@ -726,7 +727,7 @@ lemma addrFromKPPtr_spec: \\ret__unsigned_long = addrFromKPPtr (ptr_val (pptr_' s))\" apply vcg apply (simp add: addrFromKPPtr_def kernelELFBaseOffset_def kernelELFPAddrBase_def - kernelELFBase_def physBase_def pptrBase_def mask_def) + kernelELFBase_def pptrBase_def mask_def) done abbreviation @@ -745,7 +746,7 @@ lemma lookupPTSlot_ccorres: apply csymbr apply csymbr apply (rule ccorres_abstract_cleanup) - apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE rv)" + apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE pde)" in ccorres_gen_asm2) apply (rule ccorres_cond2'[where R=\]) apply (clarsimp simp: Collect_const_mem) @@ -760,9 +761,10 @@ lemma lookupPTSlot_ccorres: apply (simp add: checkPTAt_def bind_liftE_distrib liftE_bindE returnOk_liftE[symmetric]) apply (rule ccorres_stateAssert) - apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable rv)) - and ko_at' rv (lookup_pd_slot pd vptr) - and K (isPageTablePDE rv)" and P'=UNIV in ccorres_from_vcg_throws) + apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable pde)) + and ko_at' pde (lookup_pd_slot pd vptr) and K (isPageTablePDE pde)" + and P'=UNIV + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def Collect_const_mem lookup_pd_slot_def word_sle_def) @@ -770,7 +772,7 @@ lemma lookupPTSlot_ccorres: apply (erule cmap_relationE1[OF rf_sr_cpde_relation], erule ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps cpde_relation_def Let_def isPageTablePDE_def pde_pde_coarse_lift_def pde_pde_coarse_lift - split: pde.split_asm) + split: pde.split_asm split_of_bool_asm) apply (subst array_ptr_valid_array_assertionI, erule h_t_valid_clift; simp) apply (rule unat_le_helper, rule order_trans[OF word_and_le1], simp) apply (simp add: word_shift_by_2 lookup_pt_slot_no_fail_def) @@ -912,7 +914,7 @@ lemma findPDForASID_ccorres: apply (rule_tac P=\ and P' =UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def bind_def NonDetMonad.lift_def) + apply (clarsimp simp: throwError_def return_def bindE_def bind_def Nondet_Monad.lift_def) apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def) apply (simp add: lookup_fault_lift_invalid_root) @@ -1017,7 +1019,7 @@ lemma flushSpace_ccorres: apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws2) apply (clarsimp simp: Collect_const_mem pde_stored_asid_def) apply (simp add: if_split_eq1 to_bool_def) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply csymbr apply (clarsimp simp: pde_stored_asid_def) apply (case_tac "to_bool (stored_asid_valid_CL (pde_pde_invalid_lift stored_hw_asid___struct_pde_C))") @@ -1029,7 +1031,7 @@ lemma flushSpace_ccorres: apply clarsimp apply clarsimp apply (rule ccorres_call, - rule invalidateTranslationASID_ccorres [simplified dc_def xfdc_def], + rule invalidateTranslationASID_ccorres, simp+)[1] apply vcg apply wp+ @@ -1167,15 +1169,15 @@ lemma findFreeHWASID_ccorres: apply (rule_tac xf=hw_asid_offset_' and i=0 and xf_update=hw_asid_offset_'_update and r'=dc and xf'=xfdc and Q=UNIV - and F="\n s. rv = armKSHWASIDTable (ksArchState s) - \ nextASID = armKSNextASID (ksArchState s) - \ valid_arch_state' s" + and F="\n s. hwASIDTable = armKSHWASIDTable (ksArchState s) + \ nextASID = armKSNextASID (ksArchState s) + \ valid_arch_state' s" in ccorres_sequenceE_while_gen') apply (rule ccorres_from_vcg_might_throw) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: rf_sr_armKSNextASID) apply (subst down_cast_same [symmetric], - simp add: is_down_def target_size_def source_size_def word_size)+ + simp add: is_down_def target_size_def source_size_def word_size)+ apply (simp add: ucast_ucast_mask ucast_ucast_add ucast_and_mask ucast_of_nat_small asidInvalid_def @@ -1213,7 +1215,7 @@ lemma findFreeHWASID_ccorres: apply ceqv apply (rule ccorres_assert) apply (rule_tac A="\s. nextASID = armKSNextASID (ksArchState s) - \ rv = armKSHWASIDTable (ksArchState s) + \ hwASIDTable = armKSHWASIDTable (ksArchState s) \ valid_arch_state' s \ valid_pde_mappings' s" in ccorres_guard_imp2[where A'=UNIV]) apply (simp add: split_def) @@ -1324,7 +1326,6 @@ lemma armv_contextSwitch_ccorres: apply (cinit lift: cap_pd_' asid_') apply simp apply (ctac(no_vcg) add: getHWASID_ccorres) - apply (fold dc_def) apply (ctac (no_vcg)add: armv_contextSwitch_HWASID_ccorres) apply wp apply clarsimp @@ -1356,11 +1357,11 @@ lemma setVMRoot_ccorres: apply (simp add: cap_case_isPageDirectoryCap cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc) - apply (simp add: throwError_def catch_def dc_def[symmetric]) + apply (simp add: throwError_def catch_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) @@ -1380,11 +1381,11 @@ lemma setVMRoot_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: cap_case_isPageDirectoryCap) @@ -1409,28 +1410,28 @@ lemma setVMRoot_ccorres: apply (simp add: whenE_def throwError_def checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: whenE_def returnOk_def) - apply (ctac (no_vcg) add: armv_contextSwitch_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: armv_contextSwitch_ccorres) apply (simp add: checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc)+ - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply simp @@ -1448,17 +1449,16 @@ lemma setVMRoot_ccorres: apply (auto simp: isCap_simps valid_cap'_def mask_def)[1] apply (clarsimp simp: size_of_def cte_level_bits_def tcbVTableSlot_def tcb_cnode_index_defs - ccap_rights_relation_def cap_rights_to_H_def - to_bool_def true_def allRights_def + ccap_rights_relation_def cap_rights_to_H_def allRights_def mask_def[where n="Suc 0"] cte_at_tcb_at_16' addrFromPPtr_def) apply (clarsimp simp: cap_get_tag_isCap_ArchObject2 dest!: isCapDs) - by (clarsimp simp: cap_get_tag_isCap_ArchObject[symmetric] + apply (clarsimp simp: cap_get_tag_isCap_ArchObject[symmetric] cap_lift_page_directory_cap cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def elim!: ccap_relationE split: if_split_asm) + done lemma setVMRootForFlush_ccorres: "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' @@ -1477,9 +1477,9 @@ lemma setVMRootForFlush_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (rule_tac P="(capPDIsMapped_CL (cap_page_directory_cap_lift threadRoot) = 0) - = (capPDMappedASID (capCap rva) = None) + = (capPDMappedASID (capCap rv) = None) \ capPDBasePtr_CL (cap_page_directory_cap_lift threadRoot) - = capPDBasePtr (capCap rva)" in ccorres_gen_asm2) + = capPDBasePtr (capCap rv)" in ccorres_gen_asm2) apply (rule ccorres_rhs_assoc | csymbr | simp add: Collect_True del: Collect_const)+ apply (rule ccorres_split_throws) apply (rule ccorres_return_C, simp+) @@ -1491,7 +1491,7 @@ lemma setVMRootForFlush_ccorres: apply (ctac (no_vcg)add: armv_contextSwitch_ccorres) apply (ctac add: ccorres_return_C) apply wp - apply (simp add: true_def from_bool_def) + apply simp apply vcg apply (rule conseqPre, vcg) apply (simp add: Collect_const_mem) @@ -1501,7 +1501,7 @@ lemma setVMRootForFlush_ccorres: apply vcg apply (clarsimp simp: Collect_const_mem word_sle_def ccap_rights_relation_def cap_rights_to_H_def - mask_def[where n="Suc 0"] true_def to_bool_def + mask_def[where n="Suc 0"] allRights_def size_of_def cte_level_bits_def tcbVTableSlot_def Kernel_C.tcbVTable_def invs'_invs_no_cicd) apply (clarsimp simp: rf_sr_ksCurThread ptr_add_assertion_positive) @@ -1510,8 +1510,7 @@ lemma setVMRootForFlush_ccorres: apply (clarsimp simp: rf_sr_ksCurThread ptr_val_tcb_ptr_mask' [OF tcb_at_invs']) apply (frule cte_at_tcb_at_16'[OF tcb_at_invs'], clarsimp simp: cte_wp_at_ctes_of) apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) - apply (clarsimp simp: false_def true_def from_bool_def - typ_heap_simps') + apply (clarsimp simp: typ_heap_simps') apply (case_tac "isArchObjectCap rv \ isPageDirectoryCap (capCap rv)") apply (clarsimp simp: isCap_simps(2) cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_page_directory_cap_lift cap_to_H_def @@ -1594,9 +1593,10 @@ lemma doFlush_ccorres: apply (rule ccorres_cond_false) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) - apply (simp add: empty_fail_cleanCacheRange_PoU empty_fail_dsb empty_fail_invalidateCacheRange_I empty_fail_branchFlushRange empty_fail_isb doMachineOp_bind) + apply (simp add: empty_fail_cond empty_fail_cleanCacheRange_PoU empty_fail_dsb + empty_fail_invalidateCacheRange_I empty_fail_branchFlushRange empty_fail_isb + doMachineOp_bind) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def) apply (ctac (no_vcg) add: cleanCacheRange_PoU_ccorres) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: invalidateCacheRange_I_ccorres) @@ -1605,13 +1605,13 @@ lemma doFlush_ccorres: apply wp+ apply simp apply (clarsimp simp: Collect_const_mem) - apply (auto simp: flushtype_relation_def o_def - Kernel_C.ARMPageClean_Data_def Kernel_C.ARMPDClean_Data_def - Kernel_C.ARMPageInvalidate_Data_def Kernel_C.ARMPDInvalidate_Data_def - Kernel_C.ARMPageCleanInvalidate_Data_def Kernel_C.ARMPDCleanInvalidate_Data_def - Kernel_C.ARMPageUnify_Instruction_def Kernel_C.ARMPDUnify_Instruction_def - dest: ghost_assertion_size_logic[rotated] - split: ARM_H.flush_type.splits) + apply (auto simp: flushtype_relation_def + Kernel_C.ARMPageClean_Data_def Kernel_C.ARMPDClean_Data_def + Kernel_C.ARMPageInvalidate_Data_def Kernel_C.ARMPDInvalidate_Data_def + Kernel_C.ARMPageCleanInvalidate_Data_def Kernel_C.ARMPDCleanInvalidate_Data_def + Kernel_C.ARMPageUnify_Instruction_def Kernel_C.ARMPDUnify_Instruction_def + dest: ghost_assertion_size_logic[rotated] + split: ARM_H.flush_type.splits) done end @@ -1642,7 +1642,7 @@ lemma performPageFlush_ccorres: apply (ctac (no_vcg) add: setVMRootForFlush_ccorres) apply (ctac (no_vcg) add: doFlush_ccorres) apply (rule ccorres_cond2[where R=\]) - apply (simp add: from_bool_def split: if_split bool.splits) + apply (simp split: if_split bool.splits) apply (rule ccorres_pre_getCurThread) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) @@ -1653,7 +1653,7 @@ lemma performPageFlush_ccorres: apply (rule ccorres_return_Skip) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply wpsimp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: order_less_imp_le) @@ -1682,12 +1682,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -1701,7 +1701,7 @@ lemma setRegister_ccorres: apply (rule ball_tcb_cte_casesI, simp+) apply (clarsimp simp: ctcb_relation_def ccontext_relation_def atcbContextSet_def atcbContextGet_def - carch_tcb_relation_def + carch_tcb_relation_def cregs_relation_def split: if_split) apply (clarsimp simp: Collect_const_mem register_from_H_less) @@ -1775,7 +1775,7 @@ lemma performPageDirectoryInvocationFlush_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\]) - apply (simp add: from_bool_def split: if_split bool.splits) + apply (simp split: if_split bool.splits) apply (rule ccorres_pre_getCurThread) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) @@ -1836,7 +1836,6 @@ lemma flushPage_ccorres: apply (rule ccorres_cond2[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (fold dc_def) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) apply (wp | simp add: cur_tcb'_def[symmetric])+ @@ -1850,8 +1849,8 @@ lemma flushPage_ccorres: apply (wp hoare_drop_imps setVMRootForFlush_invs') apply (clarsimp simp: Collect_const_mem word_sle_def) apply (rule conjI, clarsimp+) - apply (clarsimp simp: pde_stored_asid_def to_bool_def cong: conj_cong - ucast_ucast_mask) + apply (clarsimp simp: pde_stored_asid_def to_bool_def ucast_ucast_mask + cong: conj_cong) apply (drule is_aligned_neg_mask_eq) apply (simp add: pde_pde_invalid_lift_def pde_lift_def mask_def[where n=8] word_bw_assocs mask_def[where n=pageBits]) @@ -2093,8 +2092,7 @@ lemma unmapPage_ccorres: (unmapPage sz asid vptr pptr) (Call unmapPage_'proc)" apply (rule ccorres_gen_asm) apply (cinit lift: page_size_' asid_' vptr_' pptr_') - apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive - Collect_True + apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive Collect_True del: Collect_const) apply ccorres_remove_UNIV_guard apply csymbr @@ -2106,16 +2104,16 @@ lemma unmapPage_ccorres: apply (rule ccorres_splitE_novcg[where r'=dc and xf'=xfdc]) \ \ARMSmallPage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric]) + apply (simp add: gen_framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac pt_slot pt_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_splitE_novcg) - apply (simp only: inl_rrel_inl_rrel) + apply simp apply (rule checkMappingPPtr_pte_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') @@ -2124,7 +2122,7 @@ lemma unmapPage_ccorres: pte_pte_small_lift_def pte_pte_invalid_def split: if_split_asm pte.split_asm) apply (rule ceqv_refl) - apply (simp add: liftE_liftM Collect_const[symmetric] dc_def[symmetric] + apply (simp add: liftE_liftM Collect_const[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr @@ -2132,8 +2130,7 @@ lemma unmapPage_ccorres: apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) apply csymbr - apply simp - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -2147,18 +2144,17 @@ lemma unmapPage_ccorres: apply (vcg exspec=lookupPTSlot_modifies) \ \ARMLargePage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] + apply (simp add: gen_framesize_to_H_def largePagePTEOffsets_def pteBits_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac ptSlot lookupPTSlot_ret) - apply (simp add: Collect_False dc_def[symmetric] del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pte_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pte_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: cpte_relation_def Let_def pte_lift_def @@ -2166,7 +2162,7 @@ lemma unmapPage_ccorres: pte_pte_large_lift_def pte_pte_invalid_def split: if_split_asm pte.split_asm) apply (rule ceqv_refl) - apply (simp add: liftE_liftM dc_def[symmetric] + apply (simp add: liftE_liftM mapM_discarded whileAnno_def ARMLargePageBits_def ARMSmallPageBits_def Collect_False word_sle_def del: Collect_const) @@ -2197,7 +2193,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pte ccorres_move_array_assertion_pte_16)+ apply (rule ccorres_add_return2, - ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pte_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned ptSlot 6" in hoare_gen_asm) @@ -2239,32 +2235,29 @@ lemma unmapPage_ccorres: apply (rule ccorres_Cond_rhs) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM + apply (simp add: gen_framesize_to_H_def liftE_liftM del: Collect_const) apply (simp split: if_split, rule conjI[rotated], rule impI, rule ccorres_empty, rule impI) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pde_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pde_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: pde_pde_section_lift_def cpde_relation_def pde_lift_def Let_def pde_tag_defs isSectionPDE_def split: pde.split_asm if_split_asm) apply (rule ceqv_refl) - apply (simp add: Collect_False dc_def[symmetric] - del: Collect_const) - apply (rule ccorres_handlers_weaken2, simp) + apply (simp add: Collect_False del: Collect_const) + apply (rule ccorres_handlers_weaken2) apply csymbr apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp @@ -2279,15 +2272,13 @@ lemma unmapPage_ccorres: apply (case_tac "pd = pde_Ptr (lookup_pd_slot pdPtr vptr)") prefer 2 apply (simp, rule ccorres_empty) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM mapM_discarded whileAnno_def - superSectionPDEOffsets_def pdeBits_def - del: Collect_const) + apply (simp add: gen_framesize_to_H_def liftE_liftM mapM_discarded whileAnno_def + superSectionPDEOffsets_def pdeBits_def + del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pde_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pde_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: cpde_relation_def Let_def pde_lift_def @@ -2331,7 +2322,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pde ccorres_move_array_assertion_pde_16)+ apply (rule ccorres_add_return2) - apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pde_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned pdPtr pdBits" in hoare_gen_asm) @@ -2368,14 +2359,14 @@ lemma unmapPage_ccorres: apply (rule ccorres_empty[where P=\]) apply ceqv apply (simp add: liftE_liftM) - apply (ctac add: flushPage_ccorres[unfolded dc_def]) + apply (ctac add: flushPage_ccorres) apply ((wp lookupPTSlot_inv mapM_storePTE_invs[unfolded swp_def] mapM_storePDE_invs[unfolded swp_def] | wpc | simp)+)[1] apply (simp add: guard_is_UNIV_def) apply (simp add: throwError_def) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply (simp add: lookup_pd_slot_def Let_def) apply (wp hoare_vcg_const_imp_lift_R) @@ -2890,13 +2881,13 @@ lemma performASIDPoolInvocation_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule_tac ccorres_split_nothrow [where r'=dc and xf'=xfdc]) apply (simp add: updateCap_def) - apply (rule_tac A="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits)" + apply (rule_tac A="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits)" and A'=UNIV in ccorres_guard_imp2) apply (rule ccorres_pre_getCTE) - apply (rule_tac P="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits) - and cte_wp_at' ((=) rva) ctSlot" + apply (rule_tac P="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits) + and cte_wp_at' ((=) rv) ctSlot" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -2929,7 +2920,7 @@ lemma performASIDPoolInvocation_ccorres: apply (simp add: cte_to_H_def c_valid_cte_def) apply (simp add: cap_page_directory_cap_lift) apply (simp (no_asm) add: cap_to_H_def) - apply (simp add: to_bool_def asid_bits_def le_mask_imp_and_mask word_bits_def) + apply (simp add: asid_bits_def le_mask_imp_and_mask word_bits_def) apply (erule (1) cap_lift_PDCap_Base) apply simp apply (erule_tac t = s' in ssubst) @@ -2964,7 +2955,7 @@ lemma performASIDPoolInvocation_ccorres: apply (wp getASID_wp) apply simp apply wp - apply (simp add: o_def inv_def) + apply (simp add: inv_def) apply (wp getASID_wp) apply simp apply (rule empty_fail_getObject) @@ -3019,14 +3010,14 @@ lemma flushTable_ccorres: apply (rule_tac R=\ in ccorres_cond2) apply (clarsimp simp: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (ctac (no_vcg) add: setVMRoot_ccorres [unfolded dc_def]) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (wp static_imp_wp) + apply (ctac (no_vcg) add: setVMRoot_ccorres) + apply (rule ccorres_return_Skip) + apply (wp hoare_weak_lift_imp) apply clarsimp apply (rule_tac Q="\_ s. invs' s \ cur_tcb' s" in hoare_post_imp) apply (simp add: invs'_invs_no_cicd cur_tcb'_def) apply (wp mapM_x_wp_inv getPTE_wp | wpc)+ - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply wp apply clarsimp apply (strengthen invs_valid_pde_mappings') diff --git a/proof/crefine/ARM/Wellformed_C.thy b/proof/crefine/ARM/Wellformed_C.thy index 7668d14bd7..3dc1810608 100644 --- a/proof/crefine/ARM/Wellformed_C.thy +++ b/proof/crefine/ARM/Wellformed_C.thy @@ -39,6 +39,9 @@ abbreviation abbreviation pd_Ptr :: "32 word \ (pde_C[4096]) ptr" where "pd_Ptr == Ptr" +type_synonym registers_count = 20 +type_synonym registers_array = "machine_word[registers_count]" + lemma halt_spec: "Gamma \ {} Call halt_'proc {}" apply (rule hoare_complete) @@ -139,10 +142,6 @@ where abbreviation "ep_queue_relation \ tcb_queue_relation tcbEPNext_C tcbEPPrev_C" -abbreviation - "sched_queue_relation \ tcb_queue_relation tcbSchedNext_C tcbSchedPrev_C" - - definition wordSizeCase :: "'a \ 'a \ 'a" where "wordSizeCase a b \ (if bitSize (undefined::word32) = 32 @@ -243,40 +242,6 @@ definition | Some cap \ Some \ cap_CL = cap, cteMDBNode_CL = mdb_node_lift (cteMDBNode_C c) \" -(* this is slightly weird, but the bitfield generator - masks everything with the expected bit length. - So we do that here too. *) -definition - to_bool_bf :: "'a::len word \ bool" where - "to_bool_bf w \ (w && mask 1) = 1" - -lemma to_bool_bf_0 [simp]: "\to_bool_bf 0" - by (simp add: to_bool_bf_def) - -lemma to_bool_bf_1 [simp]: "to_bool_bf 1" - by (simp add: to_bool_bf_def mask_def) - -lemma to_bool_bf_and [simp]: - "to_bool_bf (a && b) = (to_bool_bf a \ to_bool_bf (b::word32))" - apply (clarsimp simp: to_bool_bf_def) - apply (rule iffI) - apply (subst (asm) bang_eq) - apply (simp add: word_size) - apply (rule conjI) - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply clarsimp - apply (rule word_eqI) - apply (subst (asm) bang_eq)+ - apply (auto simp add: word_size)[1] - done - -lemma to_bool_bf_to_bool_mask: - "w && mask (Suc 0) = w \ to_bool_bf w = to_bool (w::word32)" - by (metis One_nat_def mask_eq1_nochoice fold_eq_0_to_bool mask_1 to_bool_bf_0 to_bool_bf_def) - definition mdb_node_to_H :: "mdb_node_CL \ mdbnode" where @@ -460,31 +425,31 @@ lemma maxDom_sgt_0_maxDomain: lemma num_domains_calculation: "num_domains = numDomains" - unfolding num_domains_def by eval + unfolding num_domains_val by eval private lemma num_domains_card_explicit: "num_domains = CARD(num_domains)" - by (simp add: num_domains_def) + by (simp add: num_domains_val) lemmas num_domains_index_updates = - index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] - index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] (* C ArrayGuards will throw these at us and there is no way to avoid a proof of being less than a specific number expressed as a word, so we must introduce these. However, being explicit means lack of discipline can lead to a violation. *) -lemma numDomains_less_numeric_explicit[simplified num_domains_def One_nat_def]: +lemma numDomains_less_numeric_explicit[simplified num_domains_val One_nat_def]: "x < Kernel_Config.numDomains \ x < num_domains" by (simp add: num_domains_calculation) -lemma numDomains_less_unat_ucast_explicit[simplified num_domains_def]: +lemma numDomains_less_unat_ucast_explicit[simplified num_domains_val]: "unat x < Kernel_Config.numDomains \ (ucast (x::domain) :: machine_word) < of_nat num_domains" apply (rule word_less_nat_alt[THEN iffD2]) apply transfer apply simp - apply (drule numDomains_less_numeric_explicit, simp add: num_domains_def) + apply (drule numDomains_less_numeric_explicit, simp add: num_domains_val) done lemmas maxDomain_le_unat_ucast_explicit = @@ -509,7 +474,7 @@ value_type num_tcb_queues = "numDomains * numPriorities" lemma num_tcb_queues_calculation: "num_tcb_queues = numDomains * numPriorities" - unfolding num_tcb_queues_def by eval + unfolding num_tcb_queues_val by eval abbreviation(input) diff --git a/proof/crefine/ARM_HYP/ADT_C.thy b/proof/crefine/ARM_HYP/ADT_C.thy index 1fed61071a..a5f581dddf 100644 --- a/proof/crefine/ARM_HYP/ADT_C.thy +++ b/proof/crefine/ARM_HYP/ADT_C.thy @@ -93,7 +93,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption @@ -114,11 +114,6 @@ lemma setTCBContext_C_corres: apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def cvariable_relation_upd_const ko_at_projectKO_opt) apply (simp add: cep_relations_drop_fun_upd) - apply (rule conjI) - defer - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def carch_tcb_relation_def) @@ -131,11 +126,13 @@ end definition "register_to_H \ inv register_from_H" +context state_rel begin + definition to_user_context_C :: "user_context \ user_context_C" where - "to_user_context_C uc \ user_context_C (FCP (\r. uc (register_to_H (of_nat r))))" - + "to_user_context_C uc \ + user_context_C (ARRAY r. user_regs uc (register_to_H (of_nat r)))" (* FIXME ARMHYP is this useful in any other file? *) (* Note: depends on vcpuactive being false when vcpuptr is NULL! *) @@ -147,21 +144,16 @@ where then None else Some (ptr_val vcpuptr, to_bool vcpuactive)" -context kernel_m begin - -lemma ccontext_rel_to_C: - "ccontext_relation uc (to_user_context_C uc)" - apply (clarsimp simp: ccontext_relation_def to_user_context_C_def) - apply (rule arg_cong [where f=uc]) - apply (simp add: register_to_H_def inv_def) - done - -end - definition from_user_context_C :: "user_context_C \ user_context" where - "from_user_context_C uc \ \r. index (registers_C uc) (unat (register_from_H r))" + "from_user_context_C uc \ + UserContext (\r. (registers_C uc).[unat (register_from_H r)])" + +lemma (in kernel_m) ccontext_rel_to_C: + "ccontext_relation uc (to_user_context_C uc)" + unfolding ccontext_relation_def to_user_context_C_def cregs_relation_def + by (clarsimp simp: register_to_H_def inv_def) definition getContext_C :: "tcb_C ptr \ cstate \ user_context" @@ -171,7 +163,12 @@ where lemma from_user_context_C: "ccontext_relation uc uc' \ from_user_context_C uc' = uc" - by (auto simp: ccontext_relation_def from_user_context_C_def) + unfolding ccontext_relation_def cregs_relation_def + apply (cases uc) + apply (auto simp: from_user_context_C_def) + done + +end context kernel_m begin @@ -661,25 +658,50 @@ lemma tcb_queue_rel'_unique: apply (erule(2) tcb_queue_rel_unique) done -definition - cready_queues_to_H - :: "(tcb_C ptr \ tcb_C) \ (tcb_queue_C[num_tcb_queues]) \ word8 \ word8 \ word32 list" +definition tcb_queue_C_to_tcb_queue :: "tcb_queue_C \ tcb_queue" where + "tcb_queue_C_to_tcb_queue q \ + TcbQueue (if head_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (head_C q))) + (if end_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (end_C q)))" + +definition cready_queues_to_H :: + "tcb_queue_C[num_tcb_queues] \ (domain \ priority \ ready_queue)" where - "cready_queues_to_H h_tcb cs \ \(qdom, prio). if ucast minDom \ qdom \ qdom \ ucast maxDom - \ ucast seL4_MinPrio \ prio \ prio \ ucast seL4_MaxPrio - then THE aq. let cqueue = index cs (cready_queues_index_to_C qdom prio) - in sched_queue_relation' h_tcb aq (head_C cqueue) (StateRelation_C.end_C cqueue) - else []" + "cready_queues_to_H cs \ + \(qdom, prio). + if qdom \ maxDomain \ prio \ maxPriority + then let cqueue = index cs (cready_queues_index_to_C qdom prio) + in tcb_queue_C_to_tcb_queue cqueue + else TcbQueue None None" lemma cready_queues_to_H_correct: - "cready_queues_relation (clift s) cs as \ - cready_queues_to_H (clift s) cs = as" - apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def - fun_eq_iff) - apply (rule the_equality) - apply simp - apply (clarsimp simp: Let_def) - apply (rule_tac hp="clift s" in tcb_queue_rel'_unique, simp_all add: lift_t_NULL) + "\cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' ch); + no_0_obj' s; ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ cready_queues_to_H (ksReadyQueues_' ch) = ksReadyQueues s" + apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def Let_def) + apply (clarsimp simp: fun_eq_iff) + apply (rename_tac d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (rule conjI) + apply (clarsimp simp: tcb_queue_C_to_tcb_queue_def ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (case_tac "tcbQueueHead (ksReadyQueues s (d, p)) = None") + apply (clarsimp simp: tcb_queue.expand) + apply clarsimp + apply (rename_tac queue_head queue_end) + apply (prop_tac "tcb_at' queue_head s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (prop_tac "tcb_at' queue_end s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (drule kernel.tcb_at_not_NULL)+ + apply (fastforce simp: tcb_queue.expand kernel.ctcb_ptr_to_ctcb_ptr) + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits; + metis tcb_queue.exhaust_sel word_not_le) done (* showing that cpspace_relation is actually unique >>>*) @@ -742,9 +764,16 @@ lemma cpspace_cte_relation_unique: lemma inj_tcb_ptr_to_ctcb_ptr: "inj tcb_ptr_to_ctcb_ptr" by (simp add: inj_on_def tcb_ptr_to_ctcb_ptr_def) +lemma cregs_relation_imp_eq: + "cregs_relation f x \ cregs_relation g x \ f=g" + by (auto simp: cregs_relation_def) + lemma ccontext_relation_imp_eq: "ccontext_relation f x \ ccontext_relation g x \ f=g" - by (rule ext) (simp add: ccontext_relation_def) + unfolding ccontext_relation_def + apply (cases f, cases g) + apply (auto dest: cregs_relation_imp_eq) + done lemma map_to_ctes_tcb_ctes: notes if_cong[cong] @@ -813,24 +842,23 @@ lemma cthread_state_rel_imp_eq: "cthread_state_relation x z \ cthread_state_relation y z \ x=y" apply (simp add: cthread_state_relation_def split_def) apply (cases x) - apply (cases y, simp_all add: ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnReply_def ThreadState_BlockedOnNotification_def - ThreadState_Running_def ThreadState_Inactive_def - ThreadState_IdleThreadState_def ThreadState_BlockedOnSend_def - ThreadState_Restart_def)+ + apply (cases y, simp_all add: ThreadState_defs)+ done -lemma ksPSpace_valid_objs_tcbBoundNotification_nonzero: - "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s - \ map_to_tcbs ah p = Some tcb \ tcbBoundNotification tcb \ Some 0" +lemma map_to_tcbs_Some_refs_nonzero: + "\map_to_tcbs (ksPSpace s) p = Some tcb; no_0_obj' s; valid_objs' s\ + \ tcbBoundNotification tcb \ Some 0 + \ tcbSchedPrev tcb \ Some 0 + \ tcbSchedNext tcb \ Some 0" + supply word_neq_0_conv[simp del] apply (clarsimp simp: map_comp_def split: option.splits) - apply (erule(1) valid_objsE') - apply (clarsimp simp: projectKOs valid_obj'_def valid_tcb'_def) + apply (erule (1) valid_objsE') + apply (fastforce simp: projectKOs valid_obj'_def valid_tcb'_def) done lemma ksPSpace_valid_objs_atcbVCPUPtr_nonzero: - "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s - \ map_to_tcbs ah p = Some tcb \ atcbVCPUPtr (tcbArch tcb) \ Some 0" + "\no_0_obj' s; valid_objs' s; map_to_tcbs (ksPSpace s) p = Some tcb\ + \ atcbVCPUPtr (tcbArch tcb) \ Some 0" apply (clarsimp simp: map_comp_def split: option.splits) apply (erule(1) valid_objsE') apply (clarsimp simp: projectKOs valid_obj'_def valid_tcb'_def valid_arch_tcb'_def) @@ -855,36 +883,77 @@ lemma carch_tcb_relation_imp_eq: apply (case_tac vcpuptr2 ; simp) done +lemma tcb_ptr_to_ctcb_ptr_inj: + "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" + by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) + +lemma + assumes "pspace_aligned' as" "pspace_distinct' as" "valid_tcb' atcb as" + shows tcb_at'_tcbBoundNotification: + "bound (tcbBoundNotification atcb) \ ntfn_at' (the (tcbBoundNotification atcb)) as" + and tcb_at'_tcbSchedPrev: + "tcbSchedPrev atcb \ None \ tcb_at' (the (tcbSchedPrev atcb)) as" + and tcb_at'_tcbSchedNext: + "tcbSchedNext atcb \ None \ tcb_at' (the (tcbSchedNext atcb)) as" + using assms + by (clarsimp simp: valid_tcb'_def obj_at'_def)+ + lemma cpspace_tcb_relation_unique: - assumes tcbs: "cpspace_tcb_relation ah ch" "cpspace_tcb_relation ah' ch" - and vs: "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s" - and vs': "\s. ksPSpace s = ah' \ no_0_obj' s \ valid_objs' s" - assumes ctes: " \tcb tcb'. (\p. map_to_tcbs ah p = Some tcb \ - map_to_tcbs ah' p = Some tcb') \ - (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" - shows "map_to_tcbs ah' = map_to_tcbs ah" + assumes tcbs: "cpspace_tcb_relation (ksPSpace as) ch" "cpspace_tcb_relation (ksPSpace as') ch" + assumes vs: "no_0_obj' as" "valid_objs' as" + assumes vs': "no_0_obj' as'" "valid_objs' as'" + assumes ad: "pspace_aligned' as" "pspace_distinct' as" + assumes ad': "pspace_aligned' as'" "pspace_distinct' as'" + assumes ctes: "\tcb tcb'. (\p. map_to_tcbs (ksPSpace as) p = Some tcb \ + map_to_tcbs (ksPSpace as') p = Some tcb') \ + (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" + shows "map_to_tcbs (ksPSpace as') = map_to_tcbs (ksPSpace as)" using tcbs(2) tcbs(1) apply (clarsimp simp add: cmap_relation_def) apply (drule inj_image_inv[OF inj_tcb_ptr_to_ctcb_ptr])+ apply (simp add: tcb_ptr_to_ctcb_ptr_def[abs_def] ctcb_offset_def) apply (rule ext) - apply (case_tac "x:dom (map_to_tcbs ah)") + apply (case_tac "x \ dom (map_to_tcbs (ksPSpace as))") apply (drule bspec, assumption)+ apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) apply clarsimp apply (rename_tac p x y) apply (cut_tac ctes) apply (drule_tac x=x in spec, drule_tac x=y in spec, erule impE, fastforce) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs]) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs']) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs]) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs']) apply (frule ksPSpace_valid_objs_atcbVCPUPtr_nonzero[OF vs]) apply (frule ksPSpace_valid_objs_atcbVCPUPtr_nonzero[OF vs']) + apply (rename_tac atcb atcb') + apply (prop_tac "valid_tcb' atcb as") + apply (fastforce intro: vs ad map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (prop_tac "valid_tcb' atcb' as'") + apply (fastforce intro: vs' ad' map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (frule tcb_at'_tcbSchedPrev[OF ad]) + apply (frule tcb_at'_tcbSchedPrev[OF ad']) + apply (frule tcb_at'_tcbSchedNext[OF ad]) + apply (frule tcb_at'_tcbSchedNext[OF ad']) apply (thin_tac "map_to_tcbs x y = Some z" for x y z)+ - apply (case_tac x, case_tac y, case_tac "the (clift ch (tcb_Ptr (p+0x100)))") + apply (case_tac "the (clift ch (tcb_Ptr (p + 2 ^ ctcb_size_bits)))") apply (clarsimp simp: ctcb_relation_def ran_tcb_cte_cases) - apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.splits) - apply (auto simp: cfault_rel_imp_eq cthread_state_rel_imp_eq carch_tcb_relation_imp_eq - ccontext_relation_imp_eq up_ucast_inj_eq ctcb_size_bits_def) + apply (clarsimp simp: option_to_ctcb_ptr_def option_to_ptr_def option_to_0_def) + apply (rule tcb.expand) + apply clarsimp + apply (intro conjI) + apply (simp add: cthread_state_rel_imp_eq) + apply (simp add: cfault_rel_imp_eq) + apply (case_tac "tcbBoundNotification atcb'", case_tac "tcbBoundNotification atcb"; clarsimp) + apply (clarsimp split: option.splits) + apply (case_tac "tcbSchedPrev atcb'"; case_tac "tcbSchedPrev atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (case_tac "tcbSchedNext atcb'"; case_tac "tcbSchedNext atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (force simp: carch_tcb_relation_imp_eq) + apply (force simp: carch_tcb_relation_def ) done lemma tcb_queue_rel_clift_unique: @@ -915,10 +984,6 @@ lemma ksPSpace_valid_pspace_ntfnBoundTCB_nonzero: apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def) done -lemma tcb_ptr_to_ctcb_ptr_inj: - "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" - by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) - lemma cpspace_ntfn_relation_unique: assumes ntfns: "cpspace_ntfn_relation ah ch" "cpspace_ntfn_relation ah' ch" and vs: "\s. ksPSpace s = ah \ valid_pspace' s" @@ -1014,9 +1079,7 @@ lemma cpspace_vcpu_relation_unique: apply (case_tac "64 \ r"; simp) apply (rule conjI) apply (rule ext, blast) - apply (rule conjI) - apply (rule ext, rename_tac vppi) - apply (rule from_bool_eqI, blast) + apply (rule conjI, blast) apply (case_tac vtimer, case_tac vtimer') apply clarsimp done @@ -1264,8 +1327,8 @@ proof - OF valid_objs'_imp_wf_asid_pool'[OF valid_objs] valid_objs'_imp_wf_asid_pool'[OF valid_objs']]) apply (drule (1) cpspace_tcb_relation_unique) - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') + apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs' + aligned aligned' distinct distinct')+ apply (intro allI impI,elim exE conjE) apply (rule_tac p=p in map_to_ctes_tcb_ctes, assumption) apply (frule (1) map_to_ko_atI[OF _ aligned distinct]) @@ -1484,7 +1547,7 @@ where ksDomSchedule = cDomSchedule_to_H kernel_all_global_addresses.ksDomSchedule, ksCurDomain = ucast (ksCurDomain_' s), ksDomainTime = ksDomainTime_' s, - ksReadyQueues = cready_queues_to_H (clift (t_hrs_' s)) (ksReadyQueues_' s), + ksReadyQueues = cready_queues_to_H (ksReadyQueues_' s), ksReadyQueuesL1Bitmap = cbitmap_L1_to_H (ksReadyQueuesL1Bitmap_' s), ksReadyQueuesL2Bitmap = cbitmap_L2_to_H (ksReadyQueuesL2Bitmap_' s), ksCurThread = ctcb_ptr_to_tcb_ptr (ksCurThread_' s), @@ -1503,16 +1566,16 @@ lemma trivial_eq_conj: "B = C \ (A \ B) = (A \ C)" lemma (in kernel_m) cstate_to_H_correct: assumes valid: "valid_state' as" assumes cstate_rel: "cstate_relation as cs" + assumes rdyqs: "ksReadyQueues_asrt as" shows "cstate_to_H cs = as \ksMachineState:= observable_memory (ksMachineState as) (user_mem' as)\" apply (subgoal_tac "cstate_to_machine_H cs = observable_memory (ksMachineState as) (user_mem' as)") apply (rule kernel_state.equality, simp_all add: cstate_to_H_def) - apply (rule cstate_to_pspace_H_correct) + apply (rule cstate_to_pspace_H_correct) using valid apply (simp add: valid_state'_def) using cstate_rel valid apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def - observable_memory_def valid_state'_def - valid_pspace'_def) + observable_memory_def valid_state'_def valid_pspace'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def prod_eq_iff) using cstate_rel @@ -1536,8 +1599,13 @@ lemma (in kernel_m) cstate_to_H_correct: using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) apply (rule cready_queues_to_H_correct) - using cstate_rel - apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel rdyqs + apply (fastforce intro!: cready_queues_to_H_correct + simp: cstate_relation_def Let_def) + using valid apply (fastforce simp: valid_state'_def) + using rdyqs apply fastforce + using valid apply (fastforce simp: valid_state'_def) + using valid apply (fastforce simp: valid_state'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) using cstate_rel diff --git a/proof/crefine/ARM_HYP/ArchMove_C.thy b/proof/crefine/ARM_HYP/ArchMove_C.thy index b690e7d5bd..a2a025aa00 100644 --- a/proof/crefine/ARM_HYP/ArchMove_C.thy +++ b/proof/crefine/ARM_HYP/ArchMove_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * Copyright 2014, General Dynamics C4 Systems * @@ -146,7 +147,7 @@ lemma setCTE_asidpool': "\ ko_at' (ASIDPool pool) p \ setCTE c p' \\_. ko_at' (ASIDPool pool) p\" apply (clarsimp simp: setCTE_def) apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (clarsimp simp: obj_at'_def projectKOs) @@ -187,21 +188,13 @@ lemma dmo_invalidateCacheRange_RAM_invs'[wp]: lemma empty_fail_findPDForASID[iff]: "empty_fail (findPDForASID asid)" - apply (simp add: findPDForASID_def liftME_def) - apply (intro empty_fail_bindE, simp_all split: option.split) - apply (simp add: assertE_def split: if_split) - apply (simp add: assertE_def split: if_split) - apply (simp add: empty_fail_getObject) - apply (simp add: assertE_def liftE_bindE checkPDAt_def split: if_split) - done + unfolding findPDForASID_def checkPDAt_def + by (wpsimp wp: empty_fail_getObject) lemma empty_fail_findPDForASIDAssert[iff]: "empty_fail (findPDForASIDAssert asid)" - apply (simp add: findPDForASIDAssert_def catch_def - checkPDAt_def checkPDUniqueToASID_def - checkPDASIDMapMembership_def) - apply (intro empty_fail_bind, simp_all split: sum.split) - done + unfolding findPDForASIDAssert_def checkPDAt_def checkPDUniqueToASID_def checkPDASIDMapMembership_def + by (wpsimp wp: empty_fail_getObject) lemma vcpu_at_ko: "vcpu_at' p s \ \vcpu. ko_at' (vcpu::vcpu) p s" @@ -229,7 +222,7 @@ lemma atg_sp': (* FIXME: MOVE to EmptyFail *) lemma empty_fail_archThreadGet [intro!, wp, simp]: "empty_fail (archThreadGet f p)" - by (simp add: archThreadGet_def getObject_def split_def) + by (fastforce simp: archThreadGet_def getObject_def split_def) lemma mab_gt_2 [simp]: "2 \ msg_align_bits" by (simp add: msg_align_bits) @@ -415,12 +408,6 @@ lemma ko_at'_tcb_vcpu_not_NULL: by (fastforce simp: valid_tcb'_def valid_arch_tcb'_def word_gt_0 typ_at'_no_0_objD dest: valid_objs_valid_tcb') - -(* FIXME move *) -lemma setVMRoot_valid_queues': - "\ valid_queues' \ setVMRoot a \ \_. valid_queues' \" - by (rule valid_queues_lift'; wp) - lemma vcpuEnable_valid_pspace' [wp]: "\ valid_pspace' \ vcpuEnable a \\_. valid_pspace' \" by (wpsimp simp: valid_pspace'_def valid_mdb'_def) @@ -450,8 +437,6 @@ crunch ko_at'2[wp]: doMachineOp "\s. P (ko_at' p t s)" crunch pred_tcb_at'2[wp]: doMachineOp "\s. P (pred_tcb_at' a b p s)" (simp: crunch_simps) -crunch valid_queues'[wp]: readVCPUReg "\s. valid_queues s" - crunch valid_objs'[wp]: readVCPUReg "\s. valid_objs' s" crunch sch_act_wf'[wp]: readVCPUReg "\s. P (sch_act_wf (ksSchedulerAction s) s)" @@ -464,8 +449,9 @@ crunch pred_tcb_at'[wp]: readVCPUReg "\s. P (pred_tcb_at' a b p s)" crunch ksCurThread[wp]: readVCPUReg "\s. P (ksCurThread s)" +(* schematic_goal leads to Suc (Suc ..) form only *) lemma fromEnum_maxBound_vcpureg_def: - "fromEnum (maxBound :: vcpureg) = 41" + "fromEnum (maxBound :: vcpureg) = 42" by (clarsimp simp: fromEnum_def maxBound_def enum_vcpureg) lemma unat_of_nat_mword_fromEnum_vcpureg[simp]: @@ -493,7 +479,7 @@ lemma ps_clear_entire_slotI: by (fastforce simp: ps_clear_def) lemma ps_clear_ksPSpace_upd_same[simp]: - "ps_clear p n (s\ksPSpace := ksPSpace s(p \ v)\) = ps_clear p n s" + "ps_clear p n (s\ksPSpace := (ksPSpace s)(p \ v)\) = ps_clear p n s" by (fastforce simp: ps_clear_def) lemma getObject_vcpu_prop: @@ -530,6 +516,12 @@ lemma placeNewObject_creates_object_vcpu: apply (fastforce intro: ps_clear_entire_slotI simp add: field_simps) done +(* FIXME would be interesting to generalise these kinds of lemmas to other KOs *) +lemma placeNewObject_object_at_vcpu: + "\ \ \ placeNewObject v (vcpu::vcpu) 0 \ \_. vcpu_at' v \" + by (rule hoare_post_imp[OF _ placeNewObject_creates_object_vcpu]) + (fastforce simp: ko_at_vcpu_at'D) + lemma valid_untyped': notes usableUntypedRange.simps[simp del] assumes pspace_distinct': "pspace_distinct' s" and @@ -602,7 +594,7 @@ lemma cap_case_isPageDirectoryCap: lemma empty_fail_loadWordUser[intro!, simp]: "empty_fail (loadWordUser x)" - by (simp add: loadWordUser_def ef_loadWord ef_dmo') + by (fastforce simp: loadWordUser_def ef_loadWord ef_dmo') lemma empty_fail_getMRs[iff]: "empty_fail (getMRs t buf mi)" @@ -612,30 +604,20 @@ lemma empty_fail_getReceiveSlots: "empty_fail (getReceiveSlots r rbuf)" proof - note - empty_fail_assertE[iff] - empty_fail_resolveAddressBits[iff] + empty_fail_resolveAddressBits[wp] + empty_fail_rethrowFailure[wp] + empty_fail_rethrowFailure[wp] show ?thesis - apply (clarsimp simp: getReceiveSlots_def loadCapTransfer_def split_def - split: option.split) - apply (rule empty_fail_bind) - apply (simp add: capTransferFromWords_def) - apply (simp add: emptyOnFailure_def unifyFailure_def) - apply (intro empty_fail_catch empty_fail_bindE empty_fail_rethrowFailure, - simp_all add: empty_fail_whenEs) - apply (simp_all add: lookupCap_def split_def lookupCapAndSlot_def - lookupSlotForThread_def liftME_def - getThreadCSpaceRoot_def locateSlot_conv bindE_assoc - lookupSlotForCNodeOp_def lookupErrorOnFailure_def - cong: if_cong) - apply (intro empty_fail_bindE, - simp_all add: getSlotCap_def) - apply (intro empty_fail_If empty_fail_bindE empty_fail_rethrowFailure impI, - simp_all add: empty_fail_whenEs rangeCheck_def) - done + unfolding getReceiveSlots_def loadCapTransfer_def lookupCap_def lookupCapAndSlot_def + by (wpsimp simp: emptyOnFailure_def unifyFailure_def lookupSlotForThread_def + capTransferFromWords_def getThreadCSpaceRoot_def locateSlot_conv bindE_assoc + lookupSlotForCNodeOp_def lookupErrorOnFailure_def rangeCheck_def) qed lemma user_getreg_rv: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb r)) t\ asUser t (getRegister r) \\rv s. P rv\" + "\obj_at' (\tcb. P ((user_regs \ atcbContextGet \ tcbArch) tcb r)) t\ + asUser t (getRegister r) + \\rv s. P rv\" apply (simp add: asUser_def split_def) apply (wp threadGet_wp) apply (clarsimp simp: obj_at'_def projectKOs getRegister_def in_monad atcbContextGet_def) @@ -648,6 +630,17 @@ crunches insertNewCap, Arch_createNewCaps, threadSet, Arch.createObject, setThre simp: unless_def updateObject_default_def crunch_simps ignore_del: preemptionPoint) +(* this could be done as + lemmas addrFromPPtr_mask_6 = addrFromPPtr_mask[where n=6, simplified] + but that wouldn't give a sanity check of the n \ ... assumption disappearing *) +lemma addrFromPPtr_mask_6: + "addrFromPPtr ptr && mask 6 = ptr && mask 6" + by (rule addrFromPPtr_mask[where n=6, simplified]) + +lemma ptrFromPAddr_mask_6: + "ptrFromPAddr ps && mask 6 = ps && mask 6" + by (rule ptrFromPAddr_mask[where n=6, simplified]) + end end diff --git a/proof/crefine/ARM_HYP/Arch_C.thy b/proof/crefine/ARM_HYP/Arch_C.thy index abe6c67c95..a07830f949 100644 --- a/proof/crefine/ARM_HYP/Arch_C.thy +++ b/proof/crefine/ARM_HYP/Arch_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -70,12 +71,11 @@ lemma performPageTableInvocationUnmap_ccorres: apply (ctac add: unmapPageTable_ccorres) apply csymbr apply (simp add: storePTE_def' swp_def) - apply (ctac add: clearMemory_PT_setObject_PTE_ccorres[simplified objBits_InvalidPTE, - unfolded dc_def, simplified]) + apply (ctac add: clearMemory_PT_setObject_PTE_ccorres[unfolded objBits_InvalidPTE, simplified]) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPageTable_modifies) - apply (simp add: to_bool_def) + apply simp apply (rule ccorres_return_Skip') apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_page_table_cap cap_to_H_def @@ -438,7 +438,9 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rename_tac pcap) + apply (rule_tac P = "pcap = (capability.UntypedCap isdev frame pageBits idx)" + in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -574,10 +576,10 @@ shows pageBits_def split: if_split) apply (clarsimp simp: ARMSmallPageBits_def word_sle_def is_aligned_mask[symmetric] - ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + ghost_assertion_data_get_gs_clear_region) apply (subst ghost_assertion_size_logic_flex[unfolded o_def, rotated]) apply assumption - apply (simp add: ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + apply (simp add: ghost_assertion_data_get_gs_clear_region) apply (drule valid_global_refsD_with_objSize, clarsimp)+ apply (clarsimp simp: isCap_simps dest!: ccte_relation_ccap_relation) apply (cut_tac ptr=frame and bits=12 @@ -834,7 +836,7 @@ lemma decodeARMPageTableInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -871,7 +873,7 @@ lemma decodeARMPageTableInvocation_ccorres: slotcap_in_mem_def) apply (auto dest: ctes_of_valid')[1] apply (rule conjI) - apply (clarsimp simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" + apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size ct_in_state'_def st_tcb_at'_def word_sle_def word_sless_def @@ -897,7 +899,7 @@ lemma decodeARMPageTableInvocation_ccorres: apply (subst array_assertion_abs_pd, erule conjI, simp add: unat_eq_0 unat_shiftr_le_bound table_bits_defs) apply (clarsimp simp: rf_sr_ksCurThread mask_def[where n=4] - "StrictC'_thread_state_defs" + ThreadState_defs ccap_relation_def cap_to_H_def cap_lift_page_table_cap word_bw_assocs shiftr_shiftl1 mask_def[where n=17]) @@ -915,8 +917,8 @@ lemma checkVPAlignment_spec: apply (clarsimp simp: mask_eq_iff_w2p word_size) apply (rule conjI) apply (simp add: pageBitsForSize_def split: vmpage_size.split) - apply (simp add: from_bool_def vmsz_aligned'_def is_aligned_mask - mask_def split: if_split) + apply (simp add: vmsz_aligned'_def is_aligned_mask mask_def + split: if_split) done definition @@ -1054,9 +1056,9 @@ lemma createSafeMappingEntries_PDE_ccorres: apply (clarsimp simp: pde_get_tag_alt cpde_relation_pde_case pde_tag_defs fst_throwError_returnOk pde_range_relation_def ptr_range_to_list_def - exception_defs isRight_def from_bool_def[where b=True] + exception_defs isRight_def syscall_error_rel_def syscall_error_to_H_cases) - apply (clarsimp simp: cpde_relation_def true_def false_def) + apply (clarsimp simp: cpde_relation_def) apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) apply (rule ccorres_rhs_assoc)+ @@ -1096,7 +1098,7 @@ lemma createSafeMappingEntries_PDE_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: if_1_0_0 return_def typ_heap_simps Let_def) apply (simp add: isPageTablePDE_def isSectionPDE_def - cpde_relation_pde_case from_bool_def) + cpde_relation_pde_case) apply (intro impI conjI disjCI2, simp_all add: array_assertion_shrink_right)[1] apply (clarsimp simp: pde_tag_defs split: if_split bool.split) apply (frule pde_pde_section_size_0_1[simplified pde_tag_defs, simplified], simp) @@ -1152,8 +1154,7 @@ lemma createSafeMappingEntries_PDE_ccorres: apply (clarsimp simp: vmsz_aligned'_def gen_framesize_to_H_def vm_page_size_defs vm_attribs_relation_def from_bool_mask_simp[unfolded mask_def, simplified] - ptr_range_to_list_def upto_enum_step_def - o_def upto_enum_word + ptr_range_to_list_def upto_enum_step_def upto_enum_word cong: if_cong) apply (frule(1) page_directory_at_rf_sr, clarsimp) apply (frule array_ptr_valid_array_assertionD[OF h_t_valid_clift]) @@ -1161,7 +1162,7 @@ lemma createSafeMappingEntries_PDE_ccorres: ARMSectionBits_def word_0_sle_from_less table_bits_defs) apply (rule conjI) - apply (simp add: cpde_relation_def true_def false_def) + apply (simp add: cpde_relation_def) apply (simp add: superSectionPDEOffsets_def table_bits_defs upto_enum_step_def upto_enum_def comp_def del: upt_Suc split: if_split) done @@ -1207,11 +1208,9 @@ lemma lookupPTSlot_le_0x3C: apply clarsimp apply simp apply simp - apply (simp add: ARM_HYP.ptrFromPAddr_def pptrBaseOffset_def) - apply (erule aligned_add_aligned) - apply (simp add: pptrBase_def ARM_HYP.physBase_def - physBase_def is_aligned_def) - apply (simp add: word_bits_def) + apply (rule is_aligned_ptrFromPAddr_n[rotated], simp) + apply (erule is_aligned_weaken) + apply (simp add: pteBits_def) done lemma createSafeMappingEntries_PTE_ccorres: @@ -1268,8 +1267,7 @@ lemma createSafeMappingEntries_PTE_ccorres: apply (erule cmap_relationE1[OF rf_sr_cpte_relation], erule ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps cpte_relation_def Let_def) apply (case_tac rva - ; fastforce simp: if_1_0_0 pte_lifts isLargePagePTE_def false_def true_def - pte_pte_small_lift_def) + ; fastforce simp: pte_lifts isLargePagePTE_def pte_pte_small_lift_def) apply ceqv apply (clarsimp simp del: Collect_const) (* the if/IF condition is now the same on both sides *) @@ -1296,7 +1294,7 @@ lemma createSafeMappingEntries_PTE_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk syscall_error_to_H_cases - syscall_error_rel_def exception_defs false_def) + syscall_error_rel_def exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -1392,7 +1390,7 @@ lemma createSafeMappingEntries_PTE_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply (wp injection_wp[OF refl]) @@ -1411,8 +1409,7 @@ lemma createSafeMappingEntries_PTE_ccorres: from_bool_mask_simp[unfolded mask_def, simplified]) apply (clarsimp simp: typ_heap_simps pte_range_relation_def ptr_range_to_list_def upto_enum_word) - apply (simp add: cpte_relation_def true_def false_def pte_tag_defs if_1_0_0 table_bits_defs - largePagePTEOffsets_def) + apply (simp add: cpte_relation_def pte_tag_defs table_bits_defs largePagePTEOffsets_def) apply (auto simp: vmsz_aligned'_def upto_enum_step_def upto_enum_def)[1] done @@ -1436,7 +1433,7 @@ definition lemma valid_pte_slots_lift2: "\ \pt. \ page_table_at' pt \ f \ \_. page_table_at' pt \ \ \ \ valid_pte_slots'2 slots \ f \ \_. valid_pte_slots'2 slots \" - apply (cases slots, simp_all add: valid_pte_slots'2_def hoare_post_taut) + apply (cases slots, simp_all add: valid_pte_slots'2_def hoare_TrueI) apply clarsimp apply (wp hoare_vcg_ex_lift hoare_vcg_conj_lift | assumption)+ done @@ -1455,7 +1452,7 @@ definition lemma valid_pde_slots_lift2: "\ \pd. \ page_directory_at' pd \ f \ \_. page_directory_at' pd \ \ \ \ valid_pde_slots'2 slots \ f \ \_. valid_pde_slots'2 slots \" - apply (cases slots, simp_all add: valid_pde_slots'2_def hoare_post_taut) + apply (cases slots, simp_all add: valid_pde_slots'2_def hoare_TrueI) apply clarsimp apply (wp hoare_vcg_ex_lift hoare_vcg_conj_lift | assumption)+ done @@ -1468,23 +1465,6 @@ lemma obj_at_pte_aligned: elim!: is_aligned_weaken) done -lemma addrFromPPtr_mask_5: - "addrFromPPtr ptr && mask (5::nat) = ptr && mask (5::nat)" - apply (simp add:addrFromPPtr_def pptrBaseOffset_def - pptrBase_def physBase_def ARM_HYP.physBase_def) - apply word_bitwise - apply (simp add:mask_def) - done - -lemma addrFromPPtr_mask_6: - "addrFromPPtr ptr && mask (6::nat) = ptr && mask (6::nat)" - apply (simp add:addrFromPPtr_def pptrBaseOffset_def - pptrBase_def physBase_def ARM_HYP.physBase_def) - apply word_bitwise - apply (simp add:mask_def) - done - - lemma pteCheckIfMapped_ccorres: "ccorres (\rv rv'. rv = to_bool rv') ret__unsigned_long_' \ (UNIV \ {s. pte___ptr_to_struct_pte_C_' s = Ptr slot}) [] @@ -1498,9 +1478,8 @@ lemma pteCheckIfMapped_ccorres: apply clarsimp apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps' return_def) - apply (case_tac rv, simp_all add: to_bool_def isInvalidPTE_def pte_tag_defs pte_pte_invalid_def - cpte_relation_def pte_get_tag_def - pte_lift_def Let_def + apply (case_tac rv, simp_all add: isInvalidPTE_def pte_tag_defs pte_pte_invalid_def + cpte_relation_def pte_get_tag_def pte_lift_def Let_def split: if_split_asm) done @@ -1520,13 +1499,13 @@ lemma pdeCheckIfMapped_ccorres: (Call pdeCheckIfMapped_'proc)" apply (cinit lift: pde___ptr_to_struct_pde_C_') apply (rule ccorres_pre_getObject_pde) - apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation rv pde'}" + apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation pd pde'}" in ccorres_from_vcg_throws[where P="\s. True"]) apply simp_all apply clarsimp apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps' return_def) - apply (case_tac rv, simp_all add: to_bool_def cpde_relation_invalid isInvalidPDE_def + apply (case_tac pd, simp_all add: cpde_relation_invalid isInvalidPDE_def split: if_split) done @@ -1720,7 +1699,7 @@ lemma performPageInvocationMapPTE_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) apply (rule wp_post_taut) - apply (simp add: to_bool_def) + apply simp apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) @@ -2098,7 +2077,7 @@ lemma performPageInvocationMapPDE_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) apply (rule wp_post_taut) - apply (simp add: to_bool_def) + apply simp apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp:return_def) @@ -2179,7 +2158,7 @@ lemma performPageInvocationMapPDE_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) @@ -2205,8 +2184,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) @@ -2228,8 +2207,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -2241,40 +2220,35 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def Kernel_C.R1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg apply clarsimp apply (rule conseqPre, vcg) apply clarsimp - apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold seL4_MessageInfo_lift_def message_info_to_H_def mask_def) apply (cases isCall) apply (auto simp: ARM_HYP.badgeRegister_def ARM_HYP_H.badgeRegister_def Kernel_C.badgeRegister_def - Kernel_C.R0_def fromPAddr_def ThreadState_Running_def + Kernel_C.R0_def fromPAddr_def ThreadState_defs pred_tcb_at'_def obj_at'_def projectKOs ct_in_state'_def) done lemma vmsz_aligned_addrFromPPtr': - "vmsz_aligned' (addrFromPPtr p) sz - = vmsz_aligned' p sz" - apply (simp add: vmsz_aligned'_def addrFromPPtr_def - ARM_HYP.addrFromPPtr_def) - apply (subgoal_tac "is_aligned pptrBaseOffset (pageBitsForSize sz)") - apply (rule iffI) - apply (drule(1) aligned_add_aligned) - apply (simp add: pageBitsForSize_def word_bits_def split: vmpage_size.split) - apply simp - apply (erule(1) aligned_sub_aligned) - apply (simp add: pageBitsForSize_def word_bits_def split: vmpage_size.split) - apply (simp add: pageBitsForSize_def pptrBaseOffset_def pptrBase_def - physBase_def ARM_HYP.physBase_def is_aligned_def - split: vmpage_size.split) + "vmsz_aligned' (addrFromPPtr p) sz = vmsz_aligned' p sz" + apply (simp add: vmsz_aligned'_def) + apply (rule iffI) + apply (simp add: addrFromPPtr_def is_aligned_mask) + apply (prop_tac "pptrBaseOffset AND mask (pageBitsForSize sz) = 0") + apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) + apply (simp flip: mask_eqs(8)) + apply (erule is_aligned_addrFromPPtr_n) + apply (cases sz; clarsimp) done lemmas vmsz_aligned_addrFromPPtr @@ -2407,9 +2381,9 @@ lemma setVMRootForFlush_ccorres2: del: Collect_const) apply (rule ccorres_if_lhs) apply (rule_tac P="(capPDIsMapped_CL (cap_page_directory_cap_lift threadRoot) = 0) - = (capPDMappedASID (capCap rva) = None) + = (capPDMappedASID (capCap rv) = None) \ capPDBasePtr_CL (cap_page_directory_cap_lift threadRoot) - = capPDBasePtr (capCap rva)" in ccorres_gen_asm2) + = capPDBasePtr (capCap rv)" in ccorres_gen_asm2) apply (rule ccorres_rhs_assoc | csymbr | simp add: Collect_True del: Collect_const)+ apply (rule ccorres_split_throws) apply (rule ccorres_return_C, simp+) @@ -2421,7 +2395,7 @@ lemma setVMRootForFlush_ccorres2: apply (ctac (no_vcg) add: armv_contextSwitch_ccorres) apply (ctac add: ccorres_return_C) apply wp - apply (simp add: true_def from_bool_def) + apply simp apply vcg apply (rule conseqPre, vcg) apply (simp add: Collect_const_mem) @@ -2431,14 +2405,12 @@ lemma setVMRootForFlush_ccorres2: apply vcg apply (clarsimp simp: Collect_const_mem if_1_0_0 word_sle_def ccap_rights_relation_def cap_rights_to_H_def - mask_def[where n="Suc 0"] true_def to_bool_def - allRights_def size_of_def cte_level_bits_def + mask_def[where n="Suc 0"] allRights_def size_of_def cte_level_bits_def tcbVTableSlot_def Kernel_C.tcbVTable_def invs'_invs_no_cicd) apply (clarsimp simp: rf_sr_ksCurThread ptr_val_tcb_ptr_mask' [OF tcb_at_invs']) apply (frule cte_at_tcb_at_16'[OF tcb_at_invs'], clarsimp simp: cte_wp_at_ctes_of) apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) - apply (clarsimp simp: false_def true_def from_bool_def - typ_heap_simps' ptr_add_assertion_positive) + apply (clarsimp simp: typ_heap_simps' ptr_add_assertion_positive) apply (clarsimp simp: tcb_cnode_index_defs rf_sr_tcb_ctes_array_assertion2[OF _ tcb_at_invs', THEN array_assertion_shrink_right]) @@ -2467,12 +2439,12 @@ where lemma resolve_ret_rel_None[simp]: "resolve_ret_rel None y = (valid_C y = scast false)" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def false_def to_bool_def split: if_splits) + by (clarsimp simp: resolve_ret_rel_def to_option_def to_bool_def split: if_splits) lemma resolve_ret_rel_Some: "\valid_C y = scast true; frameSize_C y = framesize_from_H (fst x); snd x = frameBase_C y\ \ resolve_ret_rel (Some x) y" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def true_def) + by (clarsimp simp: resolve_ret_rel_def to_option_def) lemma pte_get_tag_exhaust: "pte_get_tag pte = 0 \ pte_get_tag pte = 1 \ pte_get_tag pte = 2 \ pte_get_tag pte = 3" @@ -2563,12 +2535,12 @@ lemma resolveVAddr_ccorres: prefer 2 apply (simp add: mask_def ARMLargePage_def) \ \reduce to resolve_ret_rel goals first\ - apply (clarsimp simp: fst_return pte_get_tag_alt true_def false_def pt_bits_def pte_bits_def + apply (clarsimp simp: fst_return pte_get_tag_alt pt_bits_def pte_bits_def split: pte.splits) apply (safe ; clarsimp simp: cpte_relation_get_tag_simps c_pages_noteq) (* 4 subgoals *) apply (fastforce simp: cpte_relation_def pte_pte_small_lift_def pte_lift_def Let_def mask_def - valid_mapping'_def true_def framesize_from_H_simps page_base_def + valid_mapping'_def framesize_from_H_simps page_base_def split: if_splits intro!: resolve_ret_rel_Some dest!: is_aligned_neg_mask_eq)+ done @@ -2585,12 +2557,12 @@ lemma resolveVAddr_ccorres: apply clarsimp apply (clarsimp simp: isPageTablePDE_def pde_get_tag_alt pde_tag_defs cpde_relation_def fst_return typ_heap_simps framesize_from_H_simps - pde_pde_section_lift_def true_def + pde_pde_section_lift_def intro: resolve_ret_rel_Some split: pde.splits) subgoal apply (fastforce simp: cpte_relation_def pte_pte_small_lift_def pte_lift_def Let_def mask_def - valid_mapping'_def true_def framesize_from_H_simps + valid_mapping'_def framesize_from_H_simps split: if_splits intro!: resolve_ret_rel_Some dest!: is_aligned_neg_mask_eq)+ done @@ -2599,7 +2571,7 @@ lemma resolveVAddr_ccorres: apply (rule conjI) apply (clarsimp simp: gen_framesize_to_H_def split: if_splits) apply (rule resolve_ret_rel_Some; - clarsimp simp: true_def framesize_from_H_simps ARMSuperSection_def) + clarsimp simp: framesize_from_H_simps ARMSuperSection_def) apply (clarsimp simp: page_base_def gen_framesize_to_H_def ARMSmallPage_def ARMLargePage_def ARMSection_def mask_def) done @@ -2787,32 +2759,48 @@ lemma decodeARMFrameInvocation_ccorres: apply csymbr apply csymbr apply csymbr + apply csymbr + (* can't csymbr the IF calculation because of the function calls, but we can show + it's the same as the condition on the Haskell side*) + apply (rule ccorres_rhs_assoc2) + apply (rule_tac xf'=ret__int_' and R'=UNIV and R=\ and + val="from_bool ( + addrFromPPtr v0 + hd args < physBase \ + ARM_HYP_H.fromPAddr paddrTop + < hd (drop (Suc 0) args) - hd args + + ARM_HYP_H.fromPAddr (addrFromPPtr v0 + hd args))" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (clarsimp dest!: ccap_relation_PageCap_generics) + apply (clarsimp simp: hd_drop_conv_nth hd_conv_nth) + (* sync up preprocessor-defined number sources coming from C *) + apply (clarsimp simp: fromPAddr_def paddrTop_def pptrBase_def pptrTop_def + pptrBaseOffset_def add.commute from_bool_eq_if') + apply ceqv - apply (rule ccorres_if_cond_throws[rotated -1,where Q = \ and Q' = \]) - apply vcg - apply (clarsimp simp: paddrTop_def ARM_HYP.paddrTop_def pptrTop_def fromPAddr_def - physBase_def ARM_HYP.physBase_def ARM_HYP.pptrBase_def - hd_drop_conv_nth hd_conv_nth) - apply (clarsimp dest!: ccap_relation_PageCap_generics) - apply (simp add:injection_handler_throwError) - apply (rule syscall_error_throwError_ccorres_n) - apply (simp add: syscall_error_to_H_cases) - apply (simp add: performARMMMUInvocations bindE_assoc) - apply (ctac add: setThreadState_ccorres) - apply (ctac(no_vcg) add: performPageFlush_ccorres) - apply (rule ccorres_gen_asm) - apply (erule ssubst[OF if_P, where P="\x. ccorres _ _ _ _ _ x _"]) - apply (rule ccorres_alternative2) - apply (rule ccorres_return_CE, simp+)[1] - apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) - apply (wpsimp simp: performPageInvocation_def) + apply (rule ccorres_if_cond_throws[rotated -1,where Q = \ and Q' = \]) + apply vcg + apply (solves clarsimp) + apply (simp add:injection_handler_throwError) + apply (rule syscall_error_throwError_ccorres_n) + apply (simp add: syscall_error_to_H_cases) + apply (simp add: performARMMMUInvocations bindE_assoc) + apply (ctac add: setThreadState_ccorres) + apply (ctac(no_vcg) add: performPageFlush_ccorres) + apply (rule ccorres_gen_asm) + apply (erule ssubst[OF if_P, where P="\x. ccorres _ _ _ _ _ x _"]) + apply (rule ccorres_alternative2) + apply (rule ccorres_return_CE, simp+)[1] + apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) + apply (wpsimp simp: performPageInvocation_def) + apply simp + apply (strengthen unat_sub_le_strg[where v="2 ^ pageBitsForSize (capVPSize cp)"]) + apply (simp add: linorder_not_less linorder_not_le order_less_imp_le) + apply (wp sts_invs_minor') apply simp - apply (strengthen unat_sub_le_strg[where v="2 ^ pageBitsForSize (capVPSize cp)"]) - apply (simp add: linorder_not_less linorder_not_le order_less_imp_le) - apply (wp sts_invs_minor') + apply (vcg exspec=setThreadState_modifies) apply simp - apply (vcg exspec=setThreadState_modifies) - apply simp + apply vcg apply wp apply vcg apply wp @@ -2824,7 +2812,7 @@ lemma decodeARMFrameInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply (wp injection_wp[OF refl]) @@ -2982,7 +2970,7 @@ lemma decodeARMFrameInvocation_ccorres: apply csymbr apply (simp add: ARM_HYP.pptrBase_def ARM_HYP.pptrBase_def hd_conv_nth length_ineq_not_Nil) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[unfolded id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* Doesn't throw case *) apply (drule_tac s="Some y" in sym, @@ -3008,7 +2996,6 @@ lemma decodeARMFrameInvocation_ccorres: simp add: ARM_HYP.pptrBase_def ARM_HYP.pptrBase_def hd_conv_nth length_ineq_not_Nil, ccorres_rewrite) - apply (fold dc_def) apply (rule ccorres_return_Skip, clarsimp) apply clarsimp apply (subgoal_tac "cap_get_tag cap = SCAST(32 signed \ 32) cap_frame_cap @@ -3030,7 +3017,7 @@ lemma decodeARMFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (clarsimp simp: cap_lift_page_directory_cap cap_to_H_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def elim!: ccap_relationE split: if_split) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) @@ -3106,7 +3093,7 @@ lemma decodeARMFrameInvocation_ccorres: apply (rule_tac P'="{s. find_ret = errstate s}" in ccorres_from_vcg_throws[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk exception_defs syscall_error_rel_def - syscall_error_to_H_cases false_def) + syscall_error_to_H_cases) apply (erule lookup_failure_rel_fault_lift[rotated], simp add: exception_defs) apply simp apply (wp injection_wp[OF refl] | wp (once) hoare_drop_imps)+ @@ -3165,7 +3152,7 @@ lemma decodeARMFrameInvocation_ccorres: done (* C side *) - apply (clarsimp simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" mask_eq_iff_w2p + apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size word_less_nat_alt from_bool_0 excaps_map_def cte_wp_at_ctes_of) apply (frule ctes_of_valid', clarsimp) apply (drule_tac t="cteCap ctea" in sym) @@ -3382,9 +3369,9 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (rule ccorres_add_return) apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer]) apply (simp add: invocationCatch_use_injection_handler - injection_bindE[OF refl refl] bindE_assoc - injection_handler_returnOk injection_handler_whenE - lookupError_injection) + injection_bindE[OF refl refl] bindE_assoc + injection_handler_returnOk injection_handler_whenE + lookupError_injection) apply (simp add:if_to_top_of_bindE) apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg @@ -3409,11 +3396,11 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (simp add:if_to_top_of_bind if_to_top_of_bindE) apply (rule ccorres_if_cond_throws[rotated -1,where Q=\ and Q'=\]) apply vcg - apply (clarsimp dest!:cap_lift_page_directory_cap - simp : cap_page_directory_cap_lift_def - cap_to_H_def to_bool_def Let_def - elim!: ccap_relationE - split: cap_CL.splits if_splits) + apply (clarsimp simp: cap_page_directory_cap_lift_def + cap_to_H_def to_bool_def Let_def + dest!: cap_lift_page_directory_cap + elim!: ccap_relationE + split: cap_CL.splits if_splits) apply (simp add:injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add:syscall_error_to_H_cases) @@ -3447,16 +3434,17 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply simp apply simp apply ceqv - apply (simp add:injection_handler_If - injection_handler_returnOk if_to_top_of_bind if_to_top_of_bindE) + apply (simp add: injection_handler_If + injection_handler_returnOk if_to_top_of_bind if_to_top_of_bindE) apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) apply vcg - apply (clarsimp simp:resolve_ret_rel_def to_bool_def to_option_def - rel_option_alt_def not_le split:option.splits if_splits) - apply (simp add:invocationCatch_def ARM_HYP_H.performInvocation_def - performInvocation_def performARMMMUInvocation_def) - apply (simp add:performPageDirectoryInvocation_def - liftE_case_sum liftE_bindE liftE_alternative) + apply (clarsimp simp: resolve_ret_rel_def to_bool_def to_option_def + rel_option_alt_def not_le + split: option.splits if_splits) + apply (simp add: invocationCatch_def ARM_HYP_H.performInvocation_def + performInvocation_def performARMMMUInvocation_def) + apply (simp add: performPageDirectoryInvocation_def + liftE_case_sum liftE_bindE liftE_alternative) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_alternative2) apply (simp add:returnOk_liftE[symmetric]) @@ -3465,12 +3453,11 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply csymbr apply csymbr - apply (simp add:injection_handler_whenE - injection_bindE[OF refl refl] bindE_assoc - if_to_top_of_bindE injection_handler_throwError - injection_handler_returnOk injection_handler_stateAssert_relocate - checkValidMappingSize_def - ) + apply (simp add: injection_handler_whenE + injection_bindE[OF refl refl] bindE_assoc + if_to_top_of_bindE injection_handler_throwError + injection_handler_returnOk injection_handler_stateAssert_relocate + checkValidMappingSize_def) apply (rule ccorres_stateAssert) apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) apply vcg @@ -3481,7 +3468,7 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def exception_defs - syscall_error_to_H_cases false_def) + syscall_error_to_H_cases) apply (clarsimp simp: page_base_def resolve_ret_rel_def rel_option_alt_def to_option_def mask_def[unfolded shiftl_1,symmetric] split: option.splits if_splits) @@ -3516,8 +3503,8 @@ lemma decodeARMPageDirectoryInvocation_ccorres: st' \ Structures_H.thread_state.Inactive \ st' \ Structures_H.thread_state.IdleThreadState) thread and (\s. thread \ ksIdleThread s \ (obj_at' tcbQueued thread s \ st_tcb_at' runnable' thread s))"]]) - apply (clarsimp simp: invs_valid_objs' invs_sch_act_wf' - valid_tcb_state'_def invs_queues) + apply (clarsimp simp: invs_valid_objs' invs_sch_act_wf' invs_pspace_distinct' + invs_pspace_aligned' valid_tcb_state'_def) \ \cache flush constraints\ subgoal for _ _ _ _ _ _ sz p @@ -3547,7 +3534,7 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -3565,99 +3552,96 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (simp add:isPDFlush_fold throwError_invocationCatch) apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) - apply (clarsimp simp:ex_cte_cap_wp_to'_def invs_arch_state' - invs_valid_objs' invs_sch_act_wf' tcb_at_invs') + apply (clarsimp simp: ex_cte_cap_wp_to'_def invs_arch_state' + invs_valid_objs' invs_sch_act_wf' tcb_at_invs') apply (clarsimp simp: isCap_simps cte_wp_at_ctes_of invs_no_0_obj') apply (frule ctes_of_valid', clarsimp) apply (drule_tac t="cteCap cte" in sym, simp) apply (intro conjI) apply (clarsimp simp: sysargs_rel_to_n word_le_nat_alt mask_def - linorder_not_less linorder_not_le valid_cap_simps') + linorder_not_less linorder_not_le valid_cap_simps') apply (clarsimp dest!:ct_active_runnable') apply (simp add:ct_in_state'_def) apply (erule pred_tcb'_weakenE) apply (case_tac st,simp+) apply (clarsimp simp: sysargs_rel_to_n word_le_nat_alt mask_def - linorder_not_less linorder_not_le valid_cap_simps') + linorder_not_less linorder_not_le valid_cap_simps') apply (clarsimp dest!:ct_active_runnable') apply (simp add:ct_in_state'_def) apply (erule pred_tcb'_weakenE) apply (case_tac st,simp+) apply (clarsimp simp: sysargs_rel_to_n word_le_nat_alt mask_def - linorder_not_less linorder_not_le valid_cap_simps') + linorder_not_less linorder_not_le valid_cap_simps') apply (clarsimp dest!:ct_active_runnable') apply (simp add:ct_in_state'_def) apply (erule pred_tcb'_weakenE) apply (case_tac st,simp+) apply (clarsimp simp: sysargs_rel_to_n word_le_nat_alt mask_def - linorder_not_less linorder_not_le valid_cap_simps') + linorder_not_less linorder_not_le valid_cap_simps') apply (clarsimp dest!:ct_active_runnable') apply (simp add:ct_in_state'_def) apply (erule pred_tcb'_weakenE) apply (case_tac st,simp+) apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap typ_heap_simps' - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap typ_heap_simps' + cap_to_H_def cap_page_directory_cap_lift_def + to_bool_def cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - apply (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ + apply (clarsimp simp: ThreadState_defs less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def to_bool_def typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv, simp add: isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less, simp add: pbfs_less)+ apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap + cap_to_H_def cap_page_directory_cap_lift_def + cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - apply (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def - typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ + apply (clarsimp simp: less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def to_bool_def typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv, simp add: isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less, simp add: pbfs_less)+ apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap + cap_to_H_def cap_page_directory_cap_lift_def + cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - apply (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def - typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ (* slow 20 secs *) + apply (clarsimp simp: ThreadState_defs less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def to_bool_def typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv, simp add: isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less, simp add: pbfs_less)+ (* slow 20 secs *) apply (frule cap_get_tag_isCap_unfolded_H_cap(15)) apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth - cap_lift_page_table_cap - cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def cap_page_table_cap_lift_def - typ_heap_simps' shiftl_t2n[where n=2] field_simps - elim!: ccap_relationE) + cap_lift_page_table_cap + cap_to_H_def cap_page_directory_cap_lift_def + to_bool_def cap_page_table_cap_lift_def + typ_heap_simps' shiftl_t2n[where n=2] field_simps + elim!: ccap_relationE) apply (intro conjI impI allI) - by (clarsimp simp:ThreadState_Restart_def less_mask_eq rf_sr_ksCurThread - resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 - to_option_def rel_option_alt_def to_bool_def - typ_heap_simps' - split:option.splits if_splits - | fastforce simp: mask_def - | rule flushtype_relation_triv,simp add:isPageFlush_def isPDFlushLabel_def - | rule word_of_nat_less,simp add: pbfs_less)+ + by (clarsimp simp: less_mask_eq rf_sr_ksCurThread + resolve_ret_rel_def framesize_from_to_H framesize_from_H_mask2 + to_option_def rel_option_alt_def typ_heap_simps' + split: option.splits if_splits + | fastforce simp: mask_def + | rule flushtype_relation_triv, simp add: isPageFlush_def isPDFlushLabel_def + | rule word_of_nat_less, simp add: pbfs_less)+ lemma decodeARMMMUInvocation_ccorres: "\ interpret_excaps extraCaps' = excaps_map extraCaps ; \ isVCPUCap cp \ @@ -3803,14 +3787,13 @@ lemma decodeARMMMUInvocation_ccorres: apply (cut_tac P="\y. y < i_' x + 1 = rhs y" for rhs in allI, rule less_x_plus_1) apply (clarsimp simp: asid_high_bits_def) - apply (clarsimp simp: rf_sr_armKSASIDTable from_bool_def + apply (clarsimp simp: rf_sr_armKSASIDTable asid_high_bits_word_bits option_to_ptr_def option_to_0_def order_less_imp_le linorder_not_less order_antisym[OF inc_le]) - apply (clarsimp simp: true_def false_def - split: option.split if_split) + apply (clarsimp split: option.split if_split) apply (simp add: asid_high_bits_def word_le_nat_alt word_less_nat_alt unat_add_lem[THEN iffD1]) apply auto[1] @@ -3830,7 +3813,6 @@ lemma decodeARMMMUInvocation_ccorres: word_sless_def if_1_0_0 from_bool_0 rf_sr_armKSASIDTable[where n=0, simplified]) apply (simp add: asid_high_bits_def option_to_ptr_def option_to_0_def - from_bool_def split: option.split if_split) apply fastforce apply ceqv @@ -3943,8 +3925,7 @@ lemma decodeARMMMUInvocation_ccorres: del: Collect_const) apply (simp add: if_1_0_0 from_bool_0 hd_conv_nth length_ineq_not_Nil del: Collect_const) - apply (clarsimp simp: eq_Nil_null[symmetric] asid_high_bits_word_bits hd_conv_nth - ThreadState_Restart_def mask_def) + apply (clarsimp simp: eq_Nil_null[symmetric] asid_high_bits_word_bits hd_conv_nth mask_def) apply wp+ apply (simp add: cap_get_tag_isCap) apply (rule HoarePartial.SeqSwap) @@ -4065,7 +4046,7 @@ lemma decodeARMMMUInvocation_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def exception_defs - syscall_error_to_H_cases false_def) + syscall_error_to_H_cases) apply (simp add: lookup_fault_lift_invalid_root) apply csymbr apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws[rotated -1]) @@ -4114,9 +4095,7 @@ lemma decodeARMMMUInvocation_ccorres: = capASIDBase cp") apply (subgoal_tac "\x. (x < (i_' xb + 1)) = (x < i_' xb \ x = i_' xb)") - apply (clarsimp simp: inc_le from_bool_def typ_heap_simps - asid_low_bits_def not_less field_simps - false_def + apply (clarsimp simp: inc_le typ_heap_simps asid_low_bits_def not_less field_simps split: if_split bool.splits) apply unat_arith apply (rule iffI) @@ -4167,11 +4146,10 @@ lemma decodeARMMMUInvocation_ccorres: word_sless_def word_sle_def) apply (erule cmap_relationE1[OF rf_sr_cpspace_asidpool_relation], erule ko_at_projectKO_opt) - apply (clarsimp simp: typ_heap_simps from_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps split: if_split) apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_asid_pool_cap cap_to_H_def - cap_asid_pool_cap_lift_def false_def - ucast_minus ucast_nat_def + cap_asid_pool_cap_lift_def ucast_minus ucast_nat_def elim!: ccap_relationE) apply ceqv apply (rule ccorres_Guard_Seq)+ @@ -4255,9 +4233,12 @@ lemma decodeARMMMUInvocation_ccorres: apply (clarsimp simp: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of invs_sch_act_wf' dest!: isCapDs(1)) apply (intro conjI) - apply (simp add: Invariants_H.invs_queues) - apply (simp add: valid_tcb_state'_def) - apply (fastforce elim!: pred_tcb'_weakenE dest!:st_tcb_at_idle_thread') + apply (simp add: valid_tcb_state'_def) + apply (fastforce elim!: pred_tcb'_weakenE dest!:st_tcb_at_idle_thread') + apply fastforce + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) + apply (rename_tac obj) + apply (case_tac "tcbState obj", (simp add: runnable'_def)+)[1] apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (rename_tac obj) apply (case_tac "tcbState obj", (simp add: runnable'_def)+)[1] @@ -4294,13 +4275,11 @@ lemma decodeARMMMUInvocation_ccorres: apply (auto simp: ct_in_state'_def valid_tcb_state'_def dest!: st_tcb_at_idle_thread' elim!: pred_tcb'_weakenE)[1] - apply (clarsimp simp: if_1_0_0 cte_wp_at_ctes_of asidHighBits_handy_convs + apply (clarsimp simp: cte_wp_at_ctes_of asidHighBits_handy_convs word_sle_def word_sless_def asidLowBits_handy_convs - rf_sr_ksCurThread "StrictC'_thread_state_defs" - mask_def[where n=4] + rf_sr_ksCurThread ThreadState_defs mask_def[where n=4] cong: if_cong) - apply (clarsimp simp: if_1_0_0 to_bool_def ccap_relation_isDeviceCap2 - objBits_simps archObjSize_def pageBits_def from_bool_def case_bool_If) + apply (clarsimp simp: ccap_relation_isDeviceCap2 objBits_simps archObjSize_def pageBits_def) apply (rule conjI) (* Is Asid Control Cap *) apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def excaps_map_def) @@ -4310,11 +4289,10 @@ lemma decodeARMMMUInvocation_ccorres: ccap_rights_relation_def rightsFromWord_wordFromRights) apply (clarsimp simp: asid_high_bits_word_bits split: list.split_asm) apply (clarsimp simp: cap_untyped_cap_lift_def cap_lift_untyped_cap - cap_to_H_def[split_simps cap_CL.split] - hd_conv_nth length_ineq_not_Nil - elim!: ccap_relationE) - apply (clarsimp simp: if_1_0_0 to_bool_def unat_eq_of_nat - objBits_simps archObjSize_def pageBits_def from_bool_def case_bool_If + cap_to_H_def[split_simps cap_CL.split] + hd_conv_nth length_ineq_not_Nil + elim!: ccap_relationE) + apply (clarsimp simp: to_bool_def unat_eq_of_nat objBits_simps archObjSize_def pageBits_def split: if_splits) apply (clarsimp simp: asid_low_bits_word_bits isCap_simps neq_Nil_conv excaps_map_def excaps_in_mem_def @@ -4332,8 +4310,7 @@ lemma decodeARMMMUInvocation_ccorres: elim!: ccap_relationE split: if_split_asm) apply (clarsimp split: list.split) apply (clarsimp simp: cap_lift_asid_pool_cap cap_lift_page_directory_cap - cap_to_H_def to_bool_def - cap_page_directory_cap_lift_def + cap_to_H_def cap_page_directory_cap_lift_def to_bool_def elim!: ccap_relationE split: if_split_asm) done @@ -4349,7 +4326,7 @@ lemma vcpuRegSavedWhenDisabled_spec[simp]: by (simp add: vcpuRegSavedWhenDisabled_def split: vcpureg.splits) lemma writeVCPUReg_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres dc xfdc (vcpu_at' vcpuptr and no_0_obj') @@ -4395,7 +4372,7 @@ lemma writeVCPUReg_ccorres: done lemma readVCPUReg_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((=)) ret__unsigned_long_' (vcpu_at' vcpuptr and no_0_obj') @@ -4447,9 +4424,12 @@ lemma readVCPUReg_ccorres: apply fastforce done +crunches readVCPUReg + for pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres *) - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart) @@ -4483,8 +4463,8 @@ lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) \ \now if we are part of a call\ apply (rule ccorres_rhs_assoc)+ @@ -4510,8 +4490,8 @@ lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -4519,13 +4499,13 @@ lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres apply clarsimp apply (vcg) apply wpsimp - apply (clarsimp simp: dc_def msgInfoRegister_def ARM_HYP.msgInfoRegister_def Kernel_C.msgInfoRegister_def Kernel_C.R1_def) + apply (clarsimp simp: msgInfoRegister_def ARM_HYP.msgInfoRegister_def Kernel_C.msgInfoRegister_def Kernel_C.R1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_all_lift hoare_vcg_imp_lift) @@ -4535,15 +4515,15 @@ lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres apply clarsimp apply (rule conseqPre, vcg) apply clarsimp - apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' - rf_sr_ksCurThread msgRegisters_unfold - seL4_MessageInfo_lift_def message_info_to_H_def mask_def) + apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + invs_pspace_aligned' invs_pspace_distinct' + rf_sr_ksCurThread msgRegisters_unfold ThreadState_defs + seL4_MessageInfo_lift_def message_info_to_H_def mask_def) apply (cases isCall; clarsimp) apply (rule conjI, clarsimp simp: ct_in_state'_def st_tcb_at'_def comp_def) apply (fastforce simp: obj_at'_def projectKOs) apply (clarsimp simp: Kernel_C.badgeRegister_def ARM_HYP.badgeRegister_def ARM_HYP_H.badgeRegister_def Kernel_C.R0_def) apply (simp add: rf_sr_def cstate_relation_def Let_def) - apply (clarsimp simp: ThreadState_Running_def) apply (rule conjI, clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs ct_in_state'_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) done @@ -4555,7 +4535,7 @@ lemma liftE_invokeVCPUWriteReg_empty_return: by (clarsimp simp: liftE_bindE bind_assoc) lemma invokeVCPUWriteReg_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and vcpu_at' vcpuptr) @@ -4571,7 +4551,7 @@ lemma invokeVCPUWriteReg_ccorres: apply (ctac (no_vcg) add: writeVCPUReg_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) by (wpsimp simp: invs_no_0_obj')+ lemma decodeVCPUWriteReg_ccorres: @@ -4581,14 +4561,14 @@ lemma decodeVCPUWriteReg_ccorres: (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple and sysargs_rel args buffer and (valid_cap' (ArchObjectCap cp)) and K (isVCPUCap cp)) - (UNIV \ {s. unat (length_' s) = length args} + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} \ {s. buffer_' s = option_to_ptr buffer}) hs (decodeVCPUWriteReg args cp >>= invocationCatch thread isBlocking isCall InvokeArchObject) (Call decodeVCPUWriteReg_'proc)" apply (rule ccorres_grab_asm) - apply (cinit' lift: length_' cap_' buffer_' simp: decodeVCPUWriteReg_def Let_def) + apply (cinit' lift: length___unsigned_long_' cap_' buffer_' simp: decodeVCPUWriteReg_def Let_def) apply (rule ccorres_Cond_rhs_Seq ; clarsimp) apply (rule_tac ccorres_gen_asm[where P="length args < 2"]) apply clarsimp @@ -4630,9 +4610,9 @@ lemma decodeVCPUWriteReg_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: word_less_nat_alt word_le_nat_alt conj_commute - invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold - valid_tcb_state'_def ThreadState_Restart_def mask_def) + valid_tcb_state'_def ThreadState_defs mask_def) apply (rule conjI; clarsimp) \ \not enough args\ apply (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq) apply (subst from_to_enum; clarsimp simp: fromEnum_maxBound_vcpureg_def) @@ -4650,7 +4630,7 @@ lemma liftE_invokeVCPUInjectIRQ_empty_return: by (clarsimp simp: liftE_bindE bind_assoc) lemma invokeVCPUInjectIRQ_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and vcpu_at' vcpuptr and K (idx < 64)) @@ -4678,7 +4658,7 @@ lemma invokeVCPUInjectIRQ_ccorres: apply clarsimp apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_lr_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) - apply (rule allI, rule conseqPre, vcg, clarsimp simp: dc_def return_def) + apply (rule allI, rule conseqPre, vcg, clarsimp simp: return_def) apply (rule wp_post_taut) apply (simp only:) apply (clarsimp simp: bind_assoc) @@ -4686,7 +4666,7 @@ lemma invokeVCPUInjectIRQ_ccorres: apply (rule ccorres_move_c_guard_vcpu) apply (ctac (no_vcg) add: vgicUpdateLR_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) - apply (rule allI, rule conseqPre, vcg, clarsimp simp: dc_def return_def) + apply (rule allI, rule conseqPre, vcg, clarsimp simp: return_def) apply wpsimp+ apply (clarsimp simp: unat_of_nat_eq word_of_nat_less) done @@ -4712,7 +4692,7 @@ lemma decodeVCPUInjectIRQ_ccorres: and sysargs_rel args buffer and (valid_cap' (ArchObjectCap cp)) and K (isVCPUCap cp)) - (UNIV \ {s. unat (length_' s) = length args} + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} \ {s. buffer_' s = option_to_ptr buffer} ) hs @@ -4720,7 +4700,7 @@ lemma decodeVCPUInjectIRQ_ccorres: >>= invocationCatch thread isBlocking isCall InvokeArchObject) (Call decodeVCPUInjectIRQ_'proc)" apply (rule ccorres_grab_asm) - apply (cinit' lift: length_' cap_' buffer_' + apply (cinit' lift: length___unsigned_long_' cap_' buffer_' simp: decodeVCPUInjectIRQ_def Let_def shiftL_nat ) apply csymbr apply csymbr @@ -4771,7 +4751,7 @@ lemma decodeVCPUInjectIRQ_ccorres: liftE_liftM[symmetric] liftE_bindE_assoc) (* symbolically execute the gets on LHS *) - apply (rule_tac ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState[simplified comp_def], + apply (rule_tac ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState, rename_tac nregs) (* unfortunately directly looking at \gic_vcpu_num_list_regs means we need to abstract the IF condition*) @@ -4859,7 +4839,7 @@ lemma decodeVCPUInjectIRQ_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply wp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=invokeVCPUInjectIRQ_modifies) apply (wpsimp wp: sts_invs_minor' ct_in_state'_set)+ apply (vcg exspec=setThreadState_modifies) @@ -4873,9 +4853,9 @@ lemma decodeVCPUInjectIRQ_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: word_less_nat_alt word_le_nat_alt conj_commute - invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold - valid_tcb_state'_def ThreadState_Restart_def mask_def) + valid_tcb_state'_def ThreadState_defs mask_def) apply (frule invs_arch_state') apply (clarsimp simp: valid_arch_state'_def max_armKSGICVCPUNumListRegs_def rf_sr_armKSGICVCPUNumListRegs) @@ -4928,14 +4908,14 @@ lemma decodeVCPUReadReg_ccorres: (invs' and (\s. ksCurThread s = thread) and ct_active' and sch_act_simple and sysargs_rel args buffer and (valid_cap' (ArchObjectCap cp))) - (UNIV \ {s. unat (length_' s) = length args} + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} \ {s. buffer_' s = option_to_ptr buffer} \ \\call = from_bool isCall \) hs (decodeVCPUReadReg args cp >>= invocationCatch thread isBlocking isCall InvokeArchObject) (Call decodeVCPUReadReg_'proc)" - apply (cinit' lift: length_' cap_' buffer_' call_') + apply (cinit' lift: length___unsigned_long_' cap_' buffer_' call_') apply (clarsimp simp: decodeVCPUReadReg_def Let_def) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types @@ -4980,9 +4960,9 @@ lemma decodeVCPUReadReg_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: word_le_nat_alt conj_commute - invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold - valid_tcb_state'_def ThreadState_Restart_def mask_def) + valid_tcb_state'_def ThreadState_defs mask_def) apply (rule conjI; clarsimp) \ \no args\ subgoal by (clarsimp simp: isCap_simps cap_get_tag_isCap capVCPUPtr_eq) @@ -4996,7 +4976,7 @@ lemma decodeVCPUReadReg_ccorres: done lemma invokeVCPUSetTCB_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' tptr and vcpu_at' vcpuptr) @@ -5008,10 +4988,10 @@ lemma invokeVCPUSetTCB_ccorres: apply clarsimp apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: associateVCPUTCB_ccorres) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) by (wpsimp simp: invs_no_0_obj')+ lemma liftE_associateVCPUTCB_empty_return: @@ -5084,9 +5064,10 @@ lemma decodeVCPUSetTCB_ccorres: apply vcg apply (clarsimp simp: word_less_nat_alt word_le_nat_alt conj_commute - invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + invs_pspace_aligned' invs_pspace_distinct' rf_sr_ksCurThread msgRegisters_unfold - valid_tcb_state'_def ThreadState_Restart_def mask_def) + valid_tcb_state'_def ThreadState_defs mask_def) apply (clarsimp simp: idButNot_def interpret_excaps_test_null excaps_map_def neq_Nil_conv) apply (rule conjI; clarsimp) @@ -5104,7 +5085,7 @@ lemma decodeVCPUSetTCB_ccorres: done lemma invokeVCPUAckVPPI_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and vcpu_at' vcpuptr) @@ -5117,13 +5098,11 @@ lemma invokeVCPUAckVPPI_ccorres: apply (simp add: invokeVCPUAckVPPI_def) apply (rule ccorres_move_const_guards) apply (rule ccorres_move_c_guard_vcpu) - apply (simp add: false_def) apply (ctac (no_vcg) add: vcpuVPPIMasked_update_ccorres[ - where v=False, simplified false_def from_bool_def, - simplified]) + where v=False, simplified from_bool_vals]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply wpsimp+ apply (case_tac vppi, simp add: fromEnum_def enum_vppievent_irq flip: word_unat.Rep_inject) done @@ -5146,7 +5125,7 @@ lemma decodeVCPUAckVPPI_ccorres: and sysargs_rel args buffer and (valid_cap' (ArchObjectCap cp)) and K (isVCPUCap cp)) - (UNIV \ {s. unat (length_' s) = length args} + (UNIV \ {s. unat (length___unsigned_long_' s) = length args} \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} \ {s. buffer_' s = option_to_ptr buffer} ) hs @@ -5168,7 +5147,7 @@ proof - show ?thesis apply (rule ccorres_grab_asm) - apply (cinit' lift: length_' cap_' buffer_') + apply (cinit' lift: length___unsigned_long_' cap_' buffer_') apply (clarsimp simp: decodeVCPUAckVPPI_def) apply (csymbr, rename_tac cp') apply csymbr @@ -5203,7 +5182,7 @@ proof - apply (simp add: throwError_bind invocationCatch_def whenE_def injection_handler_throwError) apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types cong: StateSpace.state.fold_congs globals.fold_congs) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (solves \simp add: syscall_error_to_H_cases\) apply (clarsimp simp: irqVPPIEventIndex_not_invalid; ccorres_rewrite) @@ -5237,17 +5216,17 @@ proof - (* Haskell side *) apply (clarsimp simp: excaps_in_mem_def slotcap_in_mem_def isCap_simps ctes_of_cte_at) apply (clarsimp simp: word_le_nat_alt conj_commute - invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold - valid_tcb_state'_def ThreadState_Restart_def mask_def + valid_tcb_state'_def mask_def valid_cap'_def ct_in_state'_def sysargs_rel_to_n st_tcb_at'_def comp_def runnable'_eq) apply (fastforce elim: obj_at'_weakenE) (* C side *) apply (clarsimp simp: word_le_nat_alt conj_commute - invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold - valid_tcb_state'_def ThreadState_Restart_def Kernel_C.maxIRQ_def + valid_tcb_state'_def ThreadState_defs Kernel_C.maxIRQ_def and_mask_eq_iff_le_mask capVCPUPtr_eq) apply (clarsimp simp: mask_def) done @@ -5267,7 +5246,7 @@ lemma decodeARMVCPUInvocation_ccorres: and (valid_cap' (ArchObjectCap cp))) (UNIV \ \whoever wrote the C code decided to name this arbitrarily differently from other functions\ \ {s. label___unsigned_long_' s = label} - \ {s. unat (length_' s) = length args} + \ {s. unat (length___unsigned_long_' s) = length args} \ {s. slot_' s = cte_Ptr slot} \ {s. current_extra_caps_' (globals s) = extraCaps'} \ {s. ccap_relation (ArchObjectCap cp) (cap_' s)} @@ -5276,7 +5255,7 @@ lemma decodeARMVCPUInvocation_ccorres: (decodeARMVCPUInvocation label args cptr slot cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeArchObject) (Call decodeARMVCPUInvocation_'proc)" - apply (cinit' lift: label___unsigned_long_' length_' slot_' current_extra_caps_' + apply (cinit' lift: label___unsigned_long_' length___unsigned_long_' slot_' current_extra_caps_' cap_' buffer_' call_') apply (clarsimp simp: decodeARMVCPUInvocation_def) diff --git a/proof/crefine/ARM_HYP/BuildRefineCache_C.thy b/proof/crefine/ARM_HYP/BuildRefineCache_C.thy deleted file mode 100644 index fb44f0481e..0000000000 --- a/proof/crefine/ARM_HYP/BuildRefineCache_C.thy +++ /dev/null @@ -1,39 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory BuildRefineCache_C (* FIXME: broken *) -imports Main -begin - -ML \ - -(* needed to generate a proof cache *) -proofs := 1; -DupSkip.record_proofs := true; - -tracing "Building crefinement image using Refine_C"; - -time_use_thy "Refine_C"; - -\ - -ML \ - -tracing "Synching proof cache"; - -DupSkip.sync_cache @{theory Refine_C}; - -tracing "Dumping proof cache"; - -let - val xml = XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache); -in - File.open_output (XML_Syntax.output_forest xml) (Path.basic "proof_cache.xml") -end; - -\ - -end; diff --git a/proof/crefine/ARM_HYP/CACHE.ML b/proof/crefine/ARM_HYP/CACHE.ML deleted file mode 100644 index 2c551dadbd..0000000000 --- a/proof/crefine/ARM_HYP/CACHE.ML +++ /dev/null @@ -1,8 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -with_quick_and_dirty_use_thy "BuildRefineCache_C"; - diff --git a/proof/crefine/ARM_HYP/CLevityCatch.thy b/proof/crefine/ARM_HYP/CLevityCatch.thy index 37a25da200..de1d3a09a2 100644 --- a/proof/crefine/ARM_HYP/CLevityCatch.thy +++ b/proof/crefine/ARM_HYP/CLevityCatch.thy @@ -8,8 +8,9 @@ theory CLevityCatch imports "CBaseRefine.Include_C" ArchMove_C - "CLib.LemmaBucket_C" + "CParser.LemmaBucket_C" "Lib.LemmaBucket" + Boolean_C begin context begin interpretation Arch . (*FIXME: arch_split*) @@ -64,12 +65,12 @@ lemma empty_fail_getExtraCPtrs [intro!, simp]: "empty_fail (getExtraCPtrs sendBuffer info)" apply (simp add: getExtraCPtrs_def) apply (cases info, simp) - apply (cases sendBuffer, simp_all) + apply (cases sendBuffer; fastforce) done lemma empty_fail_loadCapTransfer [intro!, simp]: "empty_fail (loadCapTransfer a)" - by (simp add: loadCapTransfer_def capTransferFromWords_def) + by (fastforce simp: loadCapTransfer_def capTransferFromWords_def) lemma empty_fail_emptyOnFailure [intro!, simp]: "empty_fail m \ empty_fail (emptyOnFailure m)" @@ -84,12 +85,12 @@ lemma empty_fail_unifyFailure [intro!, simp]: lemma asUser_get_registers: "\tcb_at' target\ asUser target (mapM getRegister xs) - \\rv s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) xs = rv) target s\" + \\rv s. obj_at' (\tcb. map ((user_regs \ atcbContextGet \ tcbArch) tcb) xs = rv) target s\" apply (induct xs) apply (simp add: mapM_empty asUser_return) apply wp apply simp - apply (simp add: mapM_Cons asUser_bind_distrib asUser_return) + apply (simp add: mapM_Cons asUser_bind_distrib asUser_return empty_fail_cond) apply wp apply simp apply (rule hoare_strengthen_post) diff --git a/proof/crefine/ARM_HYP/CSpace_All.thy b/proof/crefine/ARM_HYP/CSpace_All.thy index 966310106e..ab54a3a670 100644 --- a/proof/crefine/ARM_HYP/CSpace_All.thy +++ b/proof/crefine/ARM_HYP/CSpace_All.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -24,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf @@ -248,8 +249,7 @@ lemma lookupSlotForCNodeOp_ccorres': apply vcg \ \last subgoal\ - apply (clarsimp simp: if_1_0_0 to_bool_def true_def word_size - fromIntegral_def integral_inv) + apply (clarsimp simp: word_size fromIntegral_def integral_inv) apply (case_tac "cap_get_tag root = scast cap_cnode_cap") prefer 2 apply clarsimp apply (clarsimp simp: unat_of_nat32 word_sle_def) @@ -285,7 +285,7 @@ lemma lookupSourceSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupSourceSlot_ccorres: @@ -315,7 +315,7 @@ lemma lookupTargetSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupTargetSlot_ccorres: @@ -345,7 +345,7 @@ lemma lookupPivotSlot_ccorres: apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres) - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done end diff --git a/proof/crefine/ARM_HYP/CSpace_C.thy b/proof/crefine/ARM_HYP/CSpace_C.thy index 02dbcf52f9..7e5f0ed9ec 100644 --- a/proof/crefine/ARM_HYP/CSpace_C.thy +++ b/proof/crefine/ARM_HYP/CSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -60,7 +61,7 @@ lemma maskVMRights_spec: apply clarsimp apply (rule conjI) apply ((auto simp: vmrights_to_H_def maskVMRights_def vmrights_defs - cap_rights_to_H_def to_bool_def + cap_rights_to_H_def split: bool.split | simp add: mask_def | word_bitwise)+)[1] @@ -152,11 +153,7 @@ lemma Arch_maskCapRights_ccorres [corres]: apply (cases arch_cap) by (fastforce simp add: cap_get_tag_isCap isCap_simps simp del: not_ex simp_thms(44))+ -(* FIXME: move to Wellformed_C (or move to_bool_bf out of Wellformed_C) *) -lemma to_bool_mask_to_bool_bf: - "to_bool (x && 1) = to_bool_bf (x::word32)" - by (simp add: to_bool_bf_def to_bool_def) - +(* FIXME: move to Wellformed_C *) lemma to_bool_cap_rights_bf: "to_bool (capAllowRead_CL (seL4_CapRights_lift R)) = to_bool_bf (capAllowRead_CL (seL4_CapRights_lift R))" @@ -217,7 +214,7 @@ lemma maskCapRights_ccorres [corres]: apply csymbr apply (simp add: maskCapRights_cap_cases cap_get_tag_isCap del: Collect_const) apply wpc - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -227,7 +224,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -236,7 +233,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -262,7 +259,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ntfn_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -270,7 +267,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -298,7 +295,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ep_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -307,7 +304,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -315,7 +312,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply (subst bind_return [symmetric]) apply (rule ccorres_split_throws) apply ctac @@ -328,7 +325,7 @@ lemma maskCapRights_ccorres [corres]: apply wp apply vcg apply vcg - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply ccorres_rewrite @@ -348,7 +345,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_reply_cap_bf to_bool_mask_to_bool_bf[simplified] to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -357,7 +354,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -367,7 +364,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -528,9 +525,9 @@ lemma Arch_isCapRevocable_spec: {t. \c c'. ccap_relation c (derivedCap_' s) \ ccap_relation c' (srcCap_' s) \ ret__unsigned_long_' t = from_bool (Arch.isCapRevocable c c')}" apply vcg - by (auto simp: false_def from_bool_def) + by auto -method revokable'_hammer = solves \(simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def, +method revokable'_hammer = solves \(simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs, rule ccorres_guard_imp, rule ccorres_return_C; clarsimp)\ lemma revokable_ccorres: @@ -557,7 +554,7 @@ lemma revokable_ccorres: \ \Uninteresting caps\ apply revokable'_hammer+ \ \NotificationCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_NotificationCap[THEN iffD1]) @@ -566,12 +563,12 @@ lemma revokable_ccorres: apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \IRQHandlerCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \EndpointCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_EndpointCap[THEN iffD1]) @@ -729,52 +726,6 @@ definition then (if cap_get_tag srcCap = scast cap_irq_control_cap then 1 else 0) else if (cap_get_tag newCap = scast cap_untyped_cap) then 1 else 0)" -lemma cteInsert_if_helper: - assumes cgt: "rv = cap_get_tag newCap" - and rul: "\s g. (s \ Q) = (s\ ret__unsigned_' := undefined, - unsigned_eret_2_':= undefined \ \ Q')" - shows "\ \\<^bsub>/UNIV\<^esub> {s. (cap_get_tag srcCap = cap_get_tag newCap - \ is_simple_cap_tag (cap_get_tag newCap)) \ - (s\newCapIsRevocable_' := cteInsert_newCapIsRevocable_if newCap srcCap\ \ Q)} - (IF rv = scast cap_endpoint_cap THEN - \ret__unsigned :== CALL cap_endpoint_cap_get_capEPBadge(newCap);; - \unsigned_eret_2 :== CALL cap_endpoint_cap_get_capEPBadge(srcCap);; - \newCapIsRevocable :== (if \ret__unsigned \ \unsigned_eret_2 then 1 else 0) - ELSE - IF rv = scast cap_notification_cap THEN - \ret__unsigned :== CALL cap_notification_cap_get_capNtfnBadge(newCap);; - \unsigned_eret_2 :== CALL cap_notification_cap_get_capNtfnBadge(srcCap);; - \newCapIsRevocable :== (if \ret__unsigned \ \unsigned_eret_2 then 1 else 0) - ELSE - IF rv = scast cap_irq_handler_cap THEN - \ret__unsigned :== CALL cap_get_capType(srcCap);; - \newCapIsRevocable :== (if \ret__unsigned = scast cap_irq_control_cap then 1 else 0) - ELSE - IF rv = scast cap_untyped_cap THEN - \newCapIsRevocable :== scast true - ELSE - \newCapIsRevocable :== scast false - FI - FI - FI - FI) Q" - unfolding cteInsert_newCapIsRevocable_if_def - apply (unfold cgt) - apply (rule conseqPre) - apply vcg - apply (clarsimp simp: true_def false_def - is_simple_cap_tag_def - cong: if_cong) - apply (simp add: cap_tag_defs) - apply (intro allI conjI impI) - apply (clarsimp simp: rul)+ - done - -lemma forget_Q': - "(x \ Q) = (y \ Q) \ (x \ Q) = (y \ Q)" . - -lemmas cteInsert_if_helper' = cteInsert_if_helper [OF _ forget_Q'] - (* Useful: apply (tactic {* let val _ = reset CtacImpl.trace_ceqv; val _ = reset CtacImpl.trace_ctac in all_tac end; *}) *) @@ -845,7 +796,7 @@ lemma update_freeIndex': show ?thesis apply (cinit lift: cap_ptr_' v32_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -968,7 +919,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -994,14 +945,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1120,19 +1071,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -1303,11 +1252,9 @@ lemma cteMove_ccorres: apply (intro conjI, simp+) apply (erule (2) is_aligned_3_prev) apply (erule (2) is_aligned_3_next) - apply (clarsimp simp: dc_def split del: if_split) + apply (clarsimp split del: if_split) apply (simp add: ccap_relation_NullCap_iff) - apply (clarsimp simp add: cmdbnode_relation_def - mdb_node_to_H_def nullMDBNode_def - false_def to_bool_def) + apply (clarsimp simp: cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) done lemma cteMove_ccorres_verbose: @@ -1449,13 +1396,11 @@ lemma cteMove_ccorres_verbose: \ \***--------------------------***\ \ \***C generalised precondition***\ \ \***--------------------------***\ - apply (unfold dc_def) apply (clarsimp simp: ccap_relation_NullCap_iff split del: if_split) \ \cmdbnode_relation nullMDBNode va\ apply (simp add: cmdbnode_relation_def) apply (simp add: mdb_node_to_H_def) apply (simp add: nullMDBNode_def) - apply (simp add: false_def to_bool_def) done (************************************************************************) @@ -1961,8 +1906,8 @@ lemma emptySlot_helper: mdbFirstBadged_CL (cteMDBNode_CL y)") prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) - subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - subgoal by (simp add: to_bool_def mask_def) + subgoal by (simp add: mdb_node_lift_def word_bw_assocs) + subgoal by (simp add: to_bool_def) \ \\ \x\fst \\ apply clarsimp apply (rule fst_setCTE [OF ctes_of_cte_at], assumption ) @@ -1993,7 +1938,7 @@ lemma emptySlot_helper: prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - apply (simp add: to_bool_def mask_def split: if_split) + apply (simp add: to_bool_def split: if_split) \ \trivial case where mdbNext rva = 0\ apply (simp add:ccorres_cond_empty_iff) @@ -2162,7 +2107,6 @@ lemma setIRQState_ccorres: apply (simp add: empty_fail_def getInterruptState_def simpler_gets_def) apply clarsimp - apply (simp add: from_bool_def) apply (cases irqState, simp_all) apply (simp add: Kernel_C.IRQSignal_def Kernel_C.IRQInactive_def) apply (simp add: Kernel_C.IRQTimer_def Kernel_C.IRQInactive_def) @@ -2430,7 +2374,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -2441,9 +2384,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2492,7 +2435,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2537,7 +2480,7 @@ lemma emptySlot_ccorres: \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2547,7 +2490,7 @@ lemma emptySlot_ccorres: \ \Haskell pre/post for y \ updateCap slot capability.NullCap;\ apply wp \ \C pre/post for y \ updateCap slot capability.NullCap;\ - apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def false_def) + apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) \ \Haskell pre/post for the two nested updates\ apply wp \ \C pre/post for the two nested updates\ @@ -2609,8 +2552,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2619,7 +2562,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ @@ -2741,8 +2684,8 @@ lemma Arch_sameRegionAs_spec: (* FIXME: add 1 indent, 1 extra VCPU goal appeared *) \ \capa is ASIDPoolCap\ - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is also ASIDPoolCap\ apply (frule cap_get_tag_isCap_unfolded_H_cap(13)[where cap'=cap_a]) apply (frule cap_get_tag_isCap_unfolded_H_cap(13)[where cap'=cap_b]) @@ -2764,8 +2707,8 @@ lemma Arch_sameRegionAs_spec: done \ \capa is ASIDControlCap\ - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is PageCap\ subgoal for \ vmpage_size option apply (case_tac "vmpage_size=ARMSmallPage") @@ -2780,8 +2723,8 @@ lemma Arch_sameRegionAs_spec: apply (cases "vmpage_size=ARMSmallPage") \ \capa is a small frame\ apply (frule cap_get_tag_isCap_unfolded_H_cap(16)[where cap' = cap_a], assumption) - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs from_bool_def) \ \capb is PageCap\ subgoal for \ vmpage_sizea optiona @@ -2856,8 +2799,8 @@ lemma Arch_sameRegionAs_spec: apply (simp add: cap_frame_cap_lift) apply (simp add: c_valid_cap_def cl_valid_cap_def) - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs from_bool_def) \ \capb is PageCap\ subgoal for \ vmpage_sizea optiona @@ -2934,8 +2877,8 @@ lemma Arch_sameRegionAs_spec: done \ \capa is PageTableCap\ - apply (cases capb; simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is PageCap\ subgoal for \ vmpage_size option apply (cases "vmpage_size=ARMSmallPage") @@ -2955,8 +2898,8 @@ lemma Arch_sameRegionAs_spec: capPTBasePtr_CL (cap_page_table_cap_lift cap_b)"; simp) \ \capa is PageDirectoryCap\ - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is PageCap\ subgoal for \ vmpage_size option apply (cases "vmpage_size=ARMSmallPage") @@ -2976,8 +2919,8 @@ lemma Arch_sameRegionAs_spec: capPDBasePtr_CL (cap_page_directory_cap_lift cap_b)"; simp) \ \capa is VCPUCap\ - apply (cases capb; simp add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def) + apply (cases capb; + simp add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs) \ \capb is PageCap\ subgoal for \ vmpage_size option apply (cases "vmpage_size=ARMSmallPage") @@ -3343,8 +3286,7 @@ lemma cap_get_capIsPhysical_spec: cap_lift_asid_control_cap word_sle_def cap_lift_irq_control_cap cap_lift_null_cap mask_def objBits_simps cap_lift_domain_cap - ptr_add_assertion_positive from_bool_def - true_def false_def + ptr_add_assertion_positive dest!: sym [where t = "cap_get_tag cap" for cap] split: vmpage_size.splits)+ (* XXX: slow. there should be a rule for this *) @@ -3452,22 +3394,23 @@ lemma sameRegionAs_spec: apply (simp add: sameRegionAs_def isArchCap_tag_def2) apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps) \ \capa is a ThreadCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(1)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(1)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_thread_cap_lift) apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split) apply (clarsimp simp: case_bool_If ctcb_ptr_to_tcb_ptr_def if_distrib cong: if_cong) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a NullCap\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an NotificationCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(3)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(3)) apply (simp add: ccap_relation_def map_option_case) @@ -3477,15 +3420,15 @@ lemma sameRegionAs_spec: apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an IRQHandlerCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(5)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(5)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_irq_handler_cap_lift) apply (simp add: cap_to_H_def) apply (clarsimp simp: up_ucast_inj_eq c_valid_cap_def - cl_valid_cap_def mask_twice + cl_valid_cap_def mask_twice from_bool_0 split: if_split bool.split | intro impI conjI | simp )+ @@ -3495,34 +3438,34 @@ lemma sameRegionAs_spec: apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an EndpointCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(4)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(4)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_endpoint_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a DomainCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) \ \capa is a Zombie\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an Arch object cap\ apply (frule_tac cap'=cap_a in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) apply (rule conjI, clarsimp, rule impI)+ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] \ \capb is an Arch object cap\ apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) \ \capa is a ReplyCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(8)) @@ -3530,7 +3473,7 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_reply_cap_lift) apply (simp add: cap_to_H_def ctcb_ptr_to_tcb_ptr_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) \ \capa is an UntypedCap\ apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(9)) apply (intro conjI) @@ -3538,8 +3481,7 @@ lemma sameRegionAs_spec: apply (rule impI, drule(1) cap_get_tag_to_H)+ apply (clarsimp simp: capAligned_def word_bits_conv objBits_simps' get_capZombieBits_CL_def - Let_def word_less_nat_alt - less_mask_eq true_def + Let_def word_less_nat_alt less_mask_eq split: if_split_asm) apply (subgoal_tac "capBlockSize_CL (cap_untyped_cap_lift cap_a) \ 0x1F") apply (simp add: word_le_make_less) @@ -3560,10 +3502,9 @@ lemma sameRegionAs_spec: cap_untyped_cap_lift cap_to_H_def field_simps valid_cap'_def)+)[4] apply (rule impI, simp add: from_bool_0 ccap_relation_get_capIsPhysical[symmetric]) - apply (simp add: from_bool_def false_def) \ \capa is a CNodeCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(10)) @@ -3571,10 +3512,9 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_cnode_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split bool.split) + apply (clarsimp simp: from_bool_0 split: if_split bool.split) \ \capa is an IRQControlCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) done @@ -3624,23 +3564,21 @@ lemma Arch_sameObjectAs_spec: simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs)[1] apply (rename_tac vmpage_sizea optiona) apply (case_tac "vmpage_sizea = ARMSmallPage", - simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - false_def from_bool_def)[1] + simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(16), simp) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(16), simp) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_small_frame_cap_lift) - apply (clarsimp simp: cap_to_H_def capAligned_def to_bool_def from_bool_def + apply (clarsimp simp: cap_to_H_def capAligned_def to_bool_def split: if_split bool.split dest!: is_aligned_no_overflow) apply (case_tac "vmpage_sizea = ARMSmallPage", - simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - false_def from_bool_def)[1] + simp_all add: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(17), simp) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(17), simp) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_frame_cap_lift) - apply (clarsimp simp: cap_to_H_def capAligned_def from_bool_def + apply (clarsimp simp: cap_to_H_def capAligned_def c_valid_cap_def cl_valid_cap_def Kernel_C.ARMSmallPage_def split: if_split bool.split vmpage_size.split_asm @@ -3659,8 +3597,7 @@ lemma sameObjectAs_spec: apply vcg apply (clarsimp simp: sameObjectAs_def isArchCap_tag_def2) apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs - from_bool_def false_def) + isCap_simps cap_tag_defs) apply fastforce+ \ \capa is an arch cap\ apply (frule cap_get_tag_isArchCap_unfolded_H_cap) @@ -3741,7 +3678,7 @@ lemma isMDBParentOf_spec: apply (simp add: ccte_relation_def map_option_case) apply (simp add: cte_lift_def) apply (clarsimp simp: cte_to_H_def mdb_node_to_H_def split: option.split_asm) - apply (clarsimp simp: Let_def false_def from_bool_def to_bool_def + apply (clarsimp simp: Let_def to_bool_def split: if_split bool.splits) apply ((clarsimp simp: typ_heap_simps dest!: lift_t_g)+)[3] apply (rule_tac x="cteCap ctea" in exI, rule conjI) @@ -3760,11 +3697,11 @@ lemma isMDBParentOf_spec: apply (rule impI, rule conjI) \ \sameRegionAs = 0\ apply (rule impI) - apply (clarsimp simp: from_bool_def false_def + apply (clarsimp simp: from_bool_def split: if_split bool.splits) \ \sameRegionAs \ 0\ - apply (clarsimp simp: from_bool_def false_def) + apply (clarsimp simp: from_bool_def) apply (case_tac "RetypeDecls_H.sameRegionAs (cap_to_H x2b) (cap_to_H x2c)") prefer 2 apply clarsimp apply (clarsimp cong:bool.case_cong if_cong simp: typ_heap_simps) @@ -3774,8 +3711,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_EndpointCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_endpoint_cap") \ \needed also after\ prefer 2 @@ -3790,8 +3726,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_NotificationCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_notification_cap") \ \needed also after\ prefer 2 @@ -3807,11 +3742,9 @@ lemma isMDBParentOf_spec: apply clarsimp apply (simp add: to_bool_def) apply (subgoal_tac "(\ (isEndpointCap (cap_to_H x2b))) \ ( \ (isNotificationCap (cap_to_H x2b)))") - apply (clarsimp simp: true_def) - apply (rule conjI) - apply (clarsimp simp: cap_get_tag_isCap [symmetric])+ -done - + apply clarsimp + apply (clarsimp simp: cap_get_tag_isCap[symmetric]) + done lemma updateCapData_spec: "\cap. \ \ \ ccap_relation cap \cap \ preserve = to_bool (\preserve) \ newData = \newData\ @@ -3825,7 +3758,7 @@ lemma updateCapData_spec: apply (simp add: updateCapData_def) apply (case_tac cap, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps from_bool_def isArchCap_tag_def2 cap_tag_defs Let_def) + isCap_simps isArchCap_tag_def2 cap_tag_defs Let_def) \ \NotificationCap\ apply clarsimp apply (frule cap_get_tag_isCap_unfolded_H_cap(3)) @@ -3954,7 +3887,6 @@ lemma ensureNoChildren_ccorres: apply (rule conjI) \ \isMDBParentOf is not zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) @@ -3965,7 +3897,6 @@ lemma ensureNoChildren_ccorres: apply (simp add: syscall_error_to_H_cases(9)) \ \isMDBParentOf is zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) apply (simp add: split_paired_Bex) @@ -4077,9 +4008,8 @@ lemma Arch_deriveCap_ccorres: apply (rule context_conjI) apply (simp add: cap_get_tag_isCap_ArchObject) apply (clarsimp simp: returnOk_def return_def isCap_simps) - subgoal by (simp add: ccap_relation_def cap_lift_def Let_def - cap_tag_defs cap_to_H_def to_bool_def - cap_small_frame_cap_lift_def asidInvalid_def) + subgoal by (simp add: ccap_relation_def cap_lift_def Let_def cap_tag_defs cap_to_H_def + cap_small_frame_cap_lift_def asidInvalid_def) apply (clarsimp simp: ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -4087,8 +4017,7 @@ lemma Arch_deriveCap_ccorres: apply (rule context_conjI) apply (simp add: cap_get_tag_isCap_ArchObject) apply (clarsimp simp: returnOk_def return_def isCap_simps) - subgoal by (simp add: ccap_relation_def cap_lift_def Let_def - cap_tag_defs cap_to_H_def to_bool_def + subgoal by (simp add: ccap_relation_def cap_lift_def Let_def cap_tag_defs cap_to_H_def cap_frame_cap_lift_def asidInvalid_def c_valid_cap_def cl_valid_cap_def) apply (simp add: cap_get_tag_isCap_ArchObject ccorres_cond_iffs) @@ -4112,7 +4041,7 @@ lemma deriveCap_ccorres': apply csymbr apply (fold case_bool_If) apply wpc - apply (clarsimp simp: cap_get_tag_isCap isCap_simps from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply csymbr apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws [where P=\ and P' = UNIV]) @@ -4121,7 +4050,7 @@ lemma deriveCap_ccorres': apply vcg apply clarsimp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -4129,7 +4058,7 @@ lemma deriveCap_ccorres': apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_rhs_assoc)+ @@ -4154,7 +4083,7 @@ lemma deriveCap_ccorres': errstate_def) apply wp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -4168,7 +4097,7 @@ lemma deriveCap_ccorres': apply vcg apply (clarsimp simp: cap_get_tag_isCap liftME_def Let_def isArchCap_T_isArchObjectCap - ccorres_cond_univ_iff from_bool_def) + ccorres_cond_univ_iff) apply (rule ccorres_add_returnOk) apply (rule ccorres_split_nothrow_call_novcgE [where xf'=ret__struct_deriveCap_ret_C_']) @@ -4186,7 +4115,7 @@ lemma deriveCap_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def throwError_def) apply wp - apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap from_bool_def) + apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap) apply csymbr apply (simp add: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -4197,7 +4126,6 @@ lemma deriveCap_ccorres': cap_get_tag_isArchCap_unfolded_H_cap) done - lemma deriveCap_ccorres: "ccorres (syscall_error_rel \ ccap_relation) deriveCap_xf (invs') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. slot_' s = Ptr slot}) [] diff --git a/proof/crefine/ARM_HYP/CSpace_RAB_C.thy b/proof/crefine/ARM_HYP/CSpace_RAB_C.thy index d2e9b60714..8aed289d52 100644 --- a/proof/crefine/ARM_HYP/CSpace_RAB_C.thy +++ b/proof/crefine/ARM_HYP/CSpace_RAB_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -53,7 +54,7 @@ lemma ccorres_remove_bind_returnOk_noguard: apply clarsimp apply (drule not_snd_bindE_I1) apply (erule (4) ccorresE[OF ac]) - apply (clarsimp simp add: bindE_def returnOk_def NonDetMonad.lift_def bind_def return_def + apply (clarsimp simp add: bindE_def returnOk_def Nondet_Monad.lift_def bind_def return_def split_def) apply (rule bexI [rotated], assumption) apply (simp add: throwError_def return_def unif_rrel_def @@ -138,7 +139,8 @@ lemma ccorres_locateSlotCap_push: apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) apply (rule monadic_rewrite_transverse) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_stateAssert) + apply (rule monadic_rewrite_stateAssert[where f="return", simplified]) + apply (rule monadic_rewrite_refl) apply simp apply (rule monadic_rewrite_refl) apply assumption @@ -204,10 +206,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer @@ -219,7 +219,7 @@ next apply (vcg strip_guards=true) \ \takes a while\ apply clarsimp apply simp - apply (clarsimp simp: cap_get_tag_isCap to_bool_def) + apply (clarsimp simp: cap_get_tag_isCap) \ \Main thm\ proof (induct cap' cptr' guard' rule: resolveAddressBits.induct [case_names ind]) case (ind cap cptr guard) @@ -559,8 +559,8 @@ lemma rightsFromWord_spec: \seL4_CapRights_lift \ret__struct_seL4_CapRights_C = cap_rights_from_word_canon \<^bsup>s\<^esup>w \" apply vcg apply (simp add: seL4_CapRights_lift_def nth_shiftr mask_shift_simps nth_shiftr - cap_rights_from_word_canon_def from_bool_def word_and_1 eval_nat_numeral - word_sless_def word_sle_def) + cap_rights_from_word_canon_def word_and_1 eval_nat_numeral + word_sless_def word_sle_def) done @@ -575,12 +575,6 @@ lemma cap_rights_to_H_from_word_canon [simp]: apply (simp add: cap_rights_to_H_def) done -(* MOVE *) -lemma to_bool_false [simp]: - "to_bool false = False" - unfolding to_bool_def false_def - by simp - lemma tcb_ptr_to_ctcb_ptr_mask [simp]: assumes tcbat: "tcb_at' thread s" diff --git a/proof/crefine/ARM_HYP/Cache.thy b/proof/crefine/ARM_HYP/Cache.thy deleted file mode 100644 index 0a50ec6813..0000000000 --- a/proof/crefine/ARM_HYP/Cache.thy +++ /dev/null @@ -1,37 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory Cache (* FIXME: broken *) -imports Main -begin - -text \Enable the proof cache, both skipping from it - and recording to it.\ -ML \DupSkip.record_proofs := true\ -ML \proofs := 1\ - -ML \DupSkip.skip_dup_proofs := true\ - -text \If executed in reverse order, save the cache\ -ML \val cache_thy_save_cache = ref false;\ -ML \ -if (! cache_thy_save_cache) -then File.open_output (XML_Syntax.output_forest - (XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache))) - (Path.basic "proof_cache.xml") -else ()\ -ML \cache_thy_save_cache := true\ -ML \cache_thy_save_cache := false\ - -text \Load the proof cache - - can take up to a minute\ - -ML \ -DupSkip.the_cache := XML_Syntax.cache_of_xml_forest ( - File.open_input (XML_Syntax.input_forest) - (Path.basic "proof_cache.xml"))\ - -end diff --git a/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy b/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy index dcf5c72655..e099909f4e 100644 --- a/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy +++ b/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -96,7 +96,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -119,7 +119,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/ARM_HYP/Delete_C.thy b/proof/crefine/ARM_HYP/Delete_C.thy index 1358d0fa10..103a1820ae 100644 --- a/proof/crefine/ARM_HYP/Delete_C.thy +++ b/proof/crefine/ARM_HYP/Delete_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -141,7 +142,7 @@ lemma capRemovable_spec: supply if_cong[cong] apply vcg apply (clarsimp simp: cap_get_tag_isCap(1-8)[THEN trans[OF eq_commute]]) - apply (simp add: capRemovable_def from_bool_def[where b=True] true_def) + apply (simp add: capRemovable_def) apply (clarsimp simp: ccap_zombie_radix_less4) apply (subst eq_commute, subst from_bool_eq_if) apply (rule exI, rule conjI, assumption) @@ -222,7 +223,7 @@ lemma cteDelete_ccorres1: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply wp - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R) apply (wp cutMon_validE_drop finaliseSlot_invs) apply fastforce apply (auto simp: cintr_def) @@ -299,7 +300,7 @@ lemma cteDelete_invs'': "\invs' and sch_act_simple and (\s. ex \ ex_cte_cap_to' ptr s)\ cteDelete ptr ex \\rv. invs'\" apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -628,14 +629,14 @@ lemma reduceZombie_ccorres1: apply (clarsimp simp: throwError_def return_def cintr_def) apply vcg apply (wp cutMon_validE_drop) - apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_strengthen_postE_R) apply (wp cteDelete_invs'') apply (clarsimp simp: cte_wp_at_ctes_of) apply (fastforce dest: ctes_of_valid') apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (simp add: guard_is_UNIV_def Collect_const_mem) - apply (clarsimp simp: from_bool_def false_def isCap_simps size_of_def cte_level_bits_def) + apply (clarsimp simp: isCap_simps size_of_def cte_level_bits_def) apply (simp only: word_bits_def unat_of_nat unat_arith_simps, simp) apply (simp add: guard_is_UNIV_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -717,8 +718,7 @@ lemma finaliseSlot_ccorres: apply (rule ccorres_drop_cutMon) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def ccap_relation_NullCap_iff) + apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply (simp add: Collect_True liftE_bindE split_def ccorres_cond_iffs cutMon_walk_bind del: Collect_const cong: call_ignore_cong) @@ -757,8 +757,7 @@ lemma finaliseSlot_ccorres: | _ \ True" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def) + apply (clarsimp simp: returnOk_def return_def) apply (clarsimp simp: cleanup_info_wf'_def arch_cleanup_info_wf'_def split: if_split capability.splits) apply vcg @@ -795,11 +794,11 @@ lemma finaliseSlot_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) apply (drule use_valid [OF _ finaliseCap_cases, OF _ TrueI]) - apply (simp add: from_bool_def false_def irq_opt_relation_def true_def + apply (simp add: irq_opt_relation_def split: if_split_asm) apply vcg apply wp - apply (simp add: guard_is_UNIV_def true_def) + apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) apply (simp only: liftE_bindE cutMon_walk_bind Let_def @@ -824,7 +823,6 @@ lemma finaliseSlot_ccorres: in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (simp add: from_bool_def false_def) apply fastforce apply ceqv apply (simp only: from_bool_0 simp_thms Collect_False @@ -847,7 +845,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -869,7 +867,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -894,11 +892,11 @@ lemma finaliseSlot_ccorres: simp: isCap_simps final_matters'_def o_def) apply clarsimp apply (frule valid_globals_cte_wpD'[rotated], clarsimp) - apply (clarsimp simp: cte_wp_at_ctes_of false_def from_bool_def) + apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -993,26 +991,23 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR) apply simp apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) @@ -1020,9 +1015,8 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) - apply (simp add: guard_is_UNIV_def from_bool_def true_def cintr_def - Collect_const_mem exception_defs) - apply (simp add: guard_is_UNIV_def from_bool_def true_def) + apply (simp add: guard_is_UNIV_def cintr_def Collect_const_mem exception_defs) + apply (simp add: guard_is_UNIV_def) apply (rule getCTE_wp) apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def) apply (drule invs_mdb') diff --git a/proof/crefine/ARM_HYP/Detype_C.thy b/proof/crefine/ARM_HYP/Detype_C.thy index 8e21ca644d..a2634411f7 100644 --- a/proof/crefine/ARM_HYP/Detype_C.thy +++ b/proof/crefine/ARM_HYP/Detype_C.thy @@ -123,16 +123,6 @@ lemma h_t_valid_typ_region_bytes: by (simp add: valid_footprint_typ_region_bytes[OF neq_byte] size_of_def) -lemma proj_d_lift_state_hrs_htd_update [simp]: - "proj_d (lift_state (hrs_htd_update f hp)) = f (hrs_htd hp)" - by (cases hp) (simp add: hrs_htd_update_def proj_d_lift_state hrs_htd_def) - -lemma proj_d_lift_state_hrs_htd [simp]: - "proj_d (lift_state hp), g \\<^sub>t x = hrs_htd hp, g \\<^sub>t x" - apply (cases hp) - apply (simp add: proj_d_lift_state hrs_htd_def) - done - lemma heap_list_s_heap_list': fixes p :: "'a :: c_type ptr" shows "hrs_htd hp,\ \\<^sub>t p \ @@ -1477,14 +1467,6 @@ lemma map_comp_restrict_map: "(f \\<^sub>m (restrict_map m S)) = (restrict_map (f \\<^sub>m m) S)" by (rule ext, simp add: restrict_map_def map_comp_def) -lemma size_td_uinfo_array_tag_n_m[simp]: - "size_td (uinfo_array_tag_n_m (ta :: ('a :: c_type) itself) n m) - = size_of (TYPE('a)) * n" - apply (induct n) - apply (simp add: uinfo_array_tag_n_m_def) - apply (simp add: uinfo_array_tag_n_m_def size_of_def) - done - lemma modify_machinestate_assert_cnodes_swap: "do x \ modify (ksMachineState_update f); y \ stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) S) []; g od @@ -1553,13 +1535,13 @@ lemma deleteObjects_ccorres': doMachineOp_modify modify_modify o_def ksPSpace_ksMSu_comm bind_assoc modify_machinestate_assert_cnodes_swap modify_modify_bind) - apply (rule ccorres_stateAssert_fwd) + apply (rule ccorres_stateAssert_fwd)+ apply (rule ccorres_stateAssert_after) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - @@ -1693,35 +1675,11 @@ proof - done moreover - from invs have "valid_queues s" .. - hence "\p. \t \ set (ksReadyQueues s p). tcb_at' t s \ ko_wp_at' live' t s" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec, drule spec) - apply clarsimp - apply (drule (1) bspec) - apply (rule conjI) - apply (erule obj_at'_weakenE) - apply simp - apply (simp add: obj_at'_real_def) - apply (erule ko_wp_at'_weakenE) - apply (clarsimp simp: live'_def projectKOs inQ_def) - done - hence tat: "\p. \t \ set (ksReadyQueues s p). tcb_at' t s" - and tlive: "\p. \t \ set (ksReadyQueues s p). ko_wp_at' live' t s" - by auto from sr have - "cready_queues_relation (clift ?th_s) - (ksReadyQueues_' (globals s')) (ksReadyQueues s)" + "cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' (globals s'))" unfolding cready_queues_relation_def rf_sr_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp) - apply fastforce - apply ((subst lift_t_typ_region_bytes, rule cm_disj_tcb, assumption+, - simp_all add: objBits_simps archObjSize_def pageBits_def projectKOs)[1])+ - \ \waiting ...\ - apply (simp add: tcb_queue_relation_live_restrict - [OF D.valid_untyped tat tlive rl]) done moreover diff --git a/proof/crefine/ARM_HYP/Fastpath_C.thy b/proof/crefine/ARM_HYP/Fastpath_C.thy index 8f25ce055c..1100808af2 100644 --- a/proof/crefine/ARM_HYP/Fastpath_C.thy +++ b/proof/crefine/ARM_HYP/Fastpath_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -38,11 +39,10 @@ lemma getEndpoint_obj_at': lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb lemma tcbSchedEnqueue_tcbContext[wp]: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - tcbSchedEnqueue t' - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule tcbSchedEnqueue_obj_at_unchangedT[OF all_tcbI]) - apply simp + "tcbSchedEnqueue t' \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_when) + apply (wp threadSet_obj_at' hoare_drop_imps threadGet_wp + | simp split: if_split)+ done lemma setCTE_tcbContext: @@ -53,26 +53,22 @@ lemma setCTE_tcbContext: apply (rule setObject_cte_obj_at_tcb', simp_all) done -lemma seThreadState_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setThreadState a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setThreadState_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ - done +lemma setThreadState_tcbContext: + "setThreadState a b \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def + tcbQueuePrepend_def rescheduleRequired_def + by (wp threadSet_obj_at' hoare_drop_imps threadGet_wp | wpc + | simp split: if_split)+ lemma setBoundNotification_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setBoundNotification a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setBoundNotification_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ - done + "setBoundNotification a b \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setBoundNotification_def + by wpsimp declare comp_apply [simp del] crunch tcbContext[wp]: deleteCallerCap "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" (wp: setEndpoint_obj_at_tcb' setBoundNotification_tcbContext - setNotification_tcb crunch_wps seThreadState_tcbContext + setNotification_tcb crunch_wps setThreadState_tcbContext simp: crunch_simps unless_def) declare comp_apply [simp] @@ -625,14 +621,15 @@ lemmas stored_hw_asid_get_ccorres_split lemma dmo_clearExMonitor_setCurThread_swap: "(do _ \ doMachineOp ARM_HYP.clearExMonitor; - setCurThread thread - od) + setCurThread thread + od) = (do _ \ setCurThread thread; - doMachineOp ARM_HYP.clearExMonitor od)" - apply (simp add: setCurThread_def doMachineOp_def split_def) - apply (rule oblivious_modify_swap[symmetric]) - apply (intro oblivious_bind, - simp_all add: select_f_oblivious) + doMachineOp ARM_HYP.clearExMonitor + od)" + apply (clarsimp simp: ARM_HYP.clearExMonitor_def) + apply (simp add: doMachineOp_modify) + apply (rule oblivious_modify_swap) + apply (fastforce intro: oblivious_bind simp: setCurThread_def idleThreadNotQueued_def) done lemma pd_at_asid_inj': @@ -652,18 +649,12 @@ lemma armv_contextSwitch_HWASID_fp_rewrite: checkPDAt_def checkPDUniqueToASID_def checkPDASIDMapMembership_def stateAssert_def2[folded assert_def]) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_gets_l) - apply (rule monadic_rewrite_symb_exec_l) - apply (wpsimp)+ - apply (simp add: empty_fail_findPDForASID empty_fail_catch) - apply (rule monadic_rewrite_assert monadic_rewrite_gets_l)+ - apply (rule_tac P="asidMap asid \ None \ fst (the (asidMap asid)) = the (pde_stored_asid v)" - in monadic_rewrite_gen_asm) - apply (simp only: case_option_If2 simp_thms if_True if_False - split_def, simp) - apply (rule monadic_rewrite_refl) - apply (wp findPDForASID_pd_at_wp | simp only: const_def)+ + apply (wp_pre, repeat 9 monadic_rewrite_symb_exec_l) (* until hwasid <- *) + apply (rule_tac P="asidMap asid \ None \ fst (the (asidMap asid)) = the (pde_stored_asid v)" + in monadic_rewrite_gen_asm) + apply (simp add: case_option_If2 split_def) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: findPDForASID_pd_at_wp simp: empty_fail_catch)+ apply (clarsimp simp: pd_has_hwasid_def cte_level_bits_def field_simps cte_wp_at_ctes_of word_0_sle_from_less @@ -774,17 +765,19 @@ lemma switchToThread_fp_ccorres: ceqv, rename_tac "hw_asid_ret") apply (ctac(no_vcg) add: armv_contextSwitch_HWASID_ccorres) apply (simp add: storeWordUser_def bind_assoc case_option_If2 split_def del: Collect_const) - apply (simp only: dmo_clearExMonitor_setCurThread_swap dc_def[symmetric]) + apply (simp only: dmo_clearExMonitor_setCurThread_swap) apply (rule ccorres_split_nothrow_novcg_dc) + apply (clarsimp simp: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp del: rf_sr_upd_safe) - apply (clarsimp simp: setCurThread_def simpler_modify_def rf_sr_def cstate_relation_def + apply (clarsimp simp: simpler_modify_def rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply (ctac add: clearExMonitor_fp_ccorres) apply wp apply (simp add: guard_is_UNIV_def) - apply (wpsimp wp: dmo_contextSwitch_HWASID_atcbVCPUPtr_cases_helper hoare_vcg_all_lift + apply (wpsimp wp: hoare_drop_imps dmo_contextSwitch_HWASID_atcbVCPUPtr_cases_helper hoare_vcg_all_lift hoare_vcg_imp_lift) apply (rule conseqPre, vcg, simp, rule subset_refl) apply (rule conseqPre, vcg, clarsimp) @@ -846,7 +839,7 @@ lemma thread_state_ptr_set_tsType_np_spec: apply (clarsimp simp: typ_heap_simps') apply (rule exI, rule conjI[OF _ conjI [OF _ refl]]) apply (simp_all add: thread_state_lift_def) - apply (auto simp: "StrictC'_thread_state_defs" mask_def) + apply (auto simp: ThreadState_defs mask_def) done lemma thread_state_ptr_mset_blockingObject_tsType_spec: @@ -1034,10 +1027,7 @@ lemma ccorres_call_hSkip: apply - apply (rule ccorres_call_hSkip') apply (erule ccorres_guard_imp) - apply simp - apply clarsimp - apply (simp_all add: ggl xfdc_def) - apply (clarsimp simp: igl) + apply (clarsimp simp: ggl igl xfdc_def)+ done lemma bind_case_sum_rethrow: @@ -1165,7 +1155,7 @@ lemma isValidVTableRoot_fp_spec: {t. ret__unsigned_long_' t = from_bool (isValidVTableRoot_C (pd_cap_' s))}" apply vcg apply (clarsimp simp: word_sle_def word_sless_def isValidVTableRoot_fp_lemma) - apply (simp add: from_bool_def split: if_split) + apply (simp split: if_split) done lemma isRecvEP_endpoint_case: @@ -1253,8 +1243,8 @@ lemma fastpath_dequeue_ccorres: apply (rule conjI) apply (clarsimp simp: cpspace_relation_def update_ep_map_tos update_tcb_map_tos typ_heap_simps') - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_queue_ptrs_def + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (rule conjI) apply (rule cpspace_relation_ep_update_ep, assumption+) @@ -1270,8 +1260,6 @@ lemma fastpath_dequeue_ccorres: apply (simp add: carch_state_relation_def typ_heap_simps' cmachine_state_relation_def h_t_valid_clift_Some_iff update_ep_map_tos) - apply (erule cready_queues_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) done lemma st_tcb_at_not_in_ep_queue: @@ -1409,8 +1397,8 @@ lemma fastpath_enqueue_ccorres: apply (rule conjI) apply (clarsimp simp: cpspace_relation_def update_ep_map_tos typ_heap_simps') - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_queue_ptrs_def + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (rule conjI) apply (rule_tac S="tcb_ptr_to_ctcb_ptr ` set (ksCurThread \ # list)" @@ -1449,8 +1437,6 @@ lemma fastpath_enqueue_ccorres: auto dest!: map_to_ko_atI)[1] apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos cmachine_state_relation_def h_t_valid_clift_Some_iff) - apply (erule cready_queues_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (clarsimp simp: typ_heap_simps' EPState_Recv_def mask_def is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) @@ -1458,8 +1444,8 @@ lemma fastpath_enqueue_ccorres: apply (rule conjI) apply (clarsimp simp: cpspace_relation_def update_ep_map_tos typ_heap_simps' ct_in_state'_def) - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_queue_ptrs_def + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) + apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) apply (rule conjI) apply (rule_tac S="{tcb_ptr_to_ctcb_ptr (ksCurThread \)}" @@ -1479,8 +1465,6 @@ lemma fastpath_enqueue_ccorres: assumption+, auto dest!: map_to_ko_atI)[1] apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos cmachine_state_relation_def h_t_valid_clift_Some_iff) - apply (erule cready_queues_relation_null_queue_ptrs) - apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split) done lemma setCTE_rf_sr: @@ -1575,8 +1559,8 @@ lemma cap_reply_cap_ptr_new_np_updateCap_ccorres: limited_and_simps cap_reply_cap_def limited_and_simps1[OF lshift_limited_and, OF limited_and_from_bool] shiftr_over_or_dist word_bw_assocs mask_def shiftl_shiftr3 word_size) - apply (cases m ; clarsimp) - apply (cases canGrant ; clarsimp) + apply (cases m ; clarsimp simp: true_def) + apply (cases canGrant ; clarsimp simp: true_def false_def) done lemma fastpath_copy_mrs_ccorres: @@ -1627,7 +1611,7 @@ lemma ctes_of_Some_cte_wp_at: by (clarsimp simp: cte_wp_at_ctes_of) lemma user_getreg_wp: - "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ + "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ asUser t (getRegister r) \Q\" apply (rule_tac Q="\rv s. \rv'. rv' = rv \ Q rv' s" in hoare_post_imp) apply simp @@ -1751,8 +1735,8 @@ lemma fastpath_call_ccorres: notes hoare_TrueI[simp] if_cong[cong] option.case_cong[cong] shows "ccorres dc xfdc (\s. invs' s \ ct_in_state' ((=) Running) s - \ obj_at' (\tcb. (atcbContextGet o tcbArch) tcb ARM_HYP_H.capRegister = cptr - \ (atcbContextGet o tcbArch) tcb ARM_HYP_H.msgInfoRegister = msginfo) + \ obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb ARM_HYP_H.capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb ARM_HYP_H.msgInfoRegister = msginfo) (ksCurThread s) s) (UNIV \ {s. cptr_' s = cptr} \ {s. msgInfo_' s = msginfo}) [] (fastpaths SysCall) (Call fastpath_call_'proc)" @@ -1826,7 +1810,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -1860,9 +1843,10 @@ proof - apply (simp add: from_bool_0 if_1_0_0 cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) - apply (rule slowpath_ccorres, simp+) + apply (erule disjE; simp; rule slowpath_ccorres) + apply simp + apply simp apply (vcg exspec=slowpath_noreturn_spec) apply (rule ccorres_rhs_assoc)+ apply csymbr+ @@ -1875,7 +1859,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1912,7 +1895,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1935,7 +1917,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1993,29 +1974,25 @@ proof - apply (simp add: ctcb_relation_unat_tcbPriority_C word_less_nat_alt linorder_not_le) apply ceqv - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) - apply (simp add: if_1_0_0 ccap_relation_ep_helpers from_bool_0 word_le_not_less - del: Collect_const cong: call_ignore_cong) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp add: bindE_assoc del: Collect_const) apply (rule ccorres_Guard_Seq) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (clarsimp simp: to_bool_def) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0) + apply (clarsimp simp: from_bool_eq_if' word_le_not_less from_bool_0) apply (clarsimp simp: return_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply (clarsimp simp: isHighestPrio_def' simpler_gets_def) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: from_bool_0) apply clarsimp apply vcg apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) @@ -2029,7 +2006,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: bindE_assoc from_bool_0 catch_throwError del: Collect_const) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2048,7 +2024,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2063,7 +2038,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2092,7 +2066,6 @@ proof - apply (rule ccorres_seq_cond_raise[THEN iffD2]) apply (rule_tac R=\ in ccorres_cond2', blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2124,9 +2097,6 @@ proof - apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split add: typ_heap_simps') - apply (rule ext, simp split: if_split add: typ_heap_simps') apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) @@ -2148,7 +2118,7 @@ proof - ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply csymbr - apply (simp add: cteInsert_def bind_assoc dc_def[symmetric] + apply (simp add: cteInsert_def bind_assoc del: Collect_const cong: call_ignore_cong) apply (rule ccorres_pre_getCTE2, rename_tac curThreadReplyCTE) apply (simp only: getThreadState_def) @@ -2251,9 +2221,6 @@ proof - apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) @@ -2271,7 +2238,6 @@ proof - apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -2298,7 +2264,7 @@ proof - apply (wp updateMDB_weak_cte_wp_at) apply simp apply (vcg exspec=mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_modifies) - apply (simp add: o_def) + apply simp apply (wp | simp | wp (once) updateMDB_weak_cte_wp_at | wp (once) updateMDB_cte_wp_at_other)+ @@ -2375,7 +2341,7 @@ proof - apply (vcg exspec=endpoint_ptr_get_epQueue_head_modifies exspec=endpoint_ptr_get_state_modifies) apply (simp add: if_1_0_0 getSlotCap_def) - apply (rule valid_isRight_theRight_split) + apply (rule valid_isLeft_theRight_split) apply simp apply (wp getCTE_wp') apply (rule validE_R_abstract_rv) @@ -2444,7 +2410,7 @@ proof - apply (rule conjI) (* isReceive on queued tcb state *) apply (fastforce simp: st_tcb_at_tcbs_of isBlockedOnReceive_def isReceive_def) apply clarsimp - apply (rule conjI, fastforce dest!: invs_queues simp: valid_queues_def) + apply (rule conjI, fastforce dest!: simp: valid_queues_def) apply (frule invs_mdb', clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (case_tac xb, clarsimp, drule(1) nullcapsD') apply (clarsimp simp: pde_stored_asid_def to_bool_def @@ -2549,7 +2515,7 @@ lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add[where tcb="\s. Ptr (tcb' s)" for tcb', simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)[where tcb="\s. Ptr (tcb' s)" for tcb', simplified]] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] @@ -2577,8 +2543,8 @@ lemma fastpath_reply_recv_ccorres: notes hoare_TrueI[simp] shows "ccorres dc xfdc (\s. invs' s \ ct_in_state' ((=) Running) s - \ obj_at' (\tcb. (atcbContextGet o tcbArch) tcb capRegister = cptr - \ (atcbContextGet o tcbArch) tcb msgInfoRegister = msginfo) + \ obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb msgInfoRegister = msginfo) (ksCurThread s) s) (UNIV \ {s. cptr_' s = cptr} \ {s. msgInfo_' s = msginfo}) [] (fastpaths SysReplyRecv) (Call fastpath_reply_recv_'proc)" @@ -2654,7 +2620,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2687,9 +2652,8 @@ lemma fastpath_reply_recv_ccorres: apply (simp add: if_1_0_0 cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) - apply (rule slowpath_ccorres) + apply (erule disjE; simp; rule slowpath_ccorres) apply simp apply simp apply (vcg exspec=slowpath_noreturn_spec) @@ -2704,7 +2668,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2729,7 +2692,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_split_throws) apply simp - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2759,7 +2721,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2775,7 +2736,7 @@ lemma fastpath_reply_recv_ccorres: and val="tcb_ptr_to_ctcb_ptr curThread" in ccorres_abstract_known) apply (rule Seq_weak_ceqv, rule Basic_ceqv) - apply (rule rewrite_xfI, clarsimp simp only: o_def) + apply (rule rewrite_xfI) apply (rule refl) apply csymbr apply (rule ccorres_move_c_guard_cte) @@ -2793,7 +2754,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp cong: conj_cong) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2813,7 +2773,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2835,7 +2794,6 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2867,7 +2825,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_cond2'[where R=\], blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2882,7 +2839,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2913,12 +2869,10 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def - to_bool_def del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc2) @@ -2931,7 +2885,7 @@ lemma fastpath_reply_recv_ccorres: apply (clarsimp simp: rf_sr_ksCurThread typ_heap_simps' h_t_valid_clift_Some_iff) apply (clarsimp simp: capAligned_def isCap_simps objBits_simps - "StrictC'_thread_state_defs" mask_def) + ThreadState_defs mask_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps' objBits_defs) apply (rule conjI) @@ -2942,20 +2896,16 @@ lemma fastpath_reply_recv_ccorres: apply (simp add: cep_relations_drop_fun_upd) apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def - "StrictC'_thread_state_defs" from_bool_0 - to_bool_def if_1_0_0) + ThreadState_defs) apply (clarsimp simp: ccap_relation_ep_helpers) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) apply ceqv apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) - apply (rule fastpath_enqueue_ccorres[unfolded o_def,simplified]) + apply (rule fastpath_enqueue_ccorres[simplified]) apply simp apply ceqv apply (simp add: liftM_def del: Collect_const cong: call_ignore_cong) @@ -3025,9 +2975,6 @@ lemma fastpath_reply_recv_ccorres: apply (erule cmap_relation_updI, erule ko_at_projectKO_opt) apply (simp add: ctcb_relation_def cthread_state_relation_def) apply simp - apply (rule conjI, erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (simp add: carch_state_relation_def cmachine_state_relation_def typ_heap_simps' map_comp_update projectKO_opt_tcb cvariable_relation_upd_const ko_at_projectKO_opt) @@ -3044,7 +2991,6 @@ lemma fastpath_reply_recv_ccorres: apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -3069,7 +3015,7 @@ lemma fastpath_reply_recv_ccorres: apply (wp setCTE_cte_wp_at_other) apply (simp del: Collect_const) apply vcg - apply (simp add: o_def) + apply simp apply (wp | simp | wp (once) updateMDB_weak_cte_wp_at | wp (once) updateMDB_cte_wp_at_other)+ @@ -3131,7 +3077,7 @@ lemma fastpath_reply_recv_ccorres: apply (simp del: Collect_const) apply vcg apply (simp add: if_1_0_0 getSlotCap_def) - apply (rule valid_isRight_theRight_split) + apply (rule valid_isLeft_theRight_split) apply (wp getCTE_wp') apply (rule validE_R_abstract_rv) apply wp @@ -3149,8 +3095,6 @@ lemma fastpath_reply_recv_ccorres: apply (clarsimp simp: ct_in_state'_def obj_at_tcbs_of word_sle_def) apply (clarsimp simp add: invs_ksCurDomain_maxDomain') apply (rule conjI, fastforce) - apply (frule invs_queues) - apply (simp add: valid_queues_def) apply (frule tcbs_of_aligned') apply (simp add:invs_pspace_aligned') apply (frule tcbs_of_cte_wp_at_caller) @@ -3180,6 +3124,11 @@ lemma fastpath_reply_recv_ccorres: invs_valid_pde_mappings' obj_at_tcbs_of dest!: isValidVTableRootD) apply (frule invs_mdb') + apply (frule invs_valid_objs') + apply (frule invs_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') apply (clarsimp simp: cte_wp_at_ctes_of tcbSlots cte_level_bits_def makeObject_cte isValidVTableRoot_def @@ -3187,10 +3136,10 @@ lemma fastpath_reply_recv_ccorres: pde_stored_asid_def to_bool_def valid_mdb'_def valid_tcb_state'_def word_le_nat_alt[symmetric] length_msgRegisters) - apply (frule ko_at_valid_ep', fastforce) apply (rule conjI) - subgoal (* dest thread domain \ maxDomain *) - by (drule (1) tcbs_of_valid_tcb'[OF invs_valid_objs'], solves \clarsimp simp: valid_tcb'_def\) + apply (fastforce dest: tcbs_of_valid_tcb' simp: valid_tcb'_def opt_map_def + split: option.splits) + apply (frule ko_at_valid_ep', fastforce) apply clarsimp apply (safe del: notI disjE)[1] apply (simp add: isSendEP_def valid_ep'_def tcb_at_invs' diff --git a/proof/crefine/ARM_HYP/Fastpath_Equiv.thy b/proof/crefine/ARM_HYP/Fastpath_Equiv.thy index 3401a60246..7b01b2b656 100644 --- a/proof/crefine/ARM_HYP/Fastpath_Equiv.thy +++ b/proof/crefine/ARM_HYP/Fastpath_Equiv.thy @@ -1,6 +1,6 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems - * Copyright 2020, Proofcraft Pty Ltd * * SPDX-License-Identifier: GPL-2.0-only *) @@ -31,44 +31,38 @@ lemma getEndpoint_obj_at': lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb -lemma tcbSchedEnqueue_tcbContext[wp]: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - tcbSchedEnqueue t' - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule tcbSchedEnqueue_obj_at_unchangedT[OF all_tcbI]) - apply simp - done +crunches tcbSchedEnqueue + for tcbContext[wp]: "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" + (simp: tcbQueuePrepend_def) lemma setCTE_tcbContext: "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setCTE slot cte - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + setCTE slot cte + \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" apply (simp add: setCTE_def) apply (rule setObject_cte_obj_at_tcb', simp_all) done context begin interpretation Arch . (*FIXME: arch_split*) -lemma seThreadState_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setThreadState a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setThreadState_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ +lemma setThreadState_tcbContext: + "setThreadState st tptr \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setThreadState_def rescheduleRequired_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps) + apply (fastforce simp: obj_at'_def objBits_simps projectKOs atcbContext_def ps_clear_upd) done lemma setBoundNotification_tcbContext: - "\obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - setBoundNotification a b - \\_. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" - apply (rule setBoundNotification_obj_at_unchanged) - apply (clarsimp simp: atcbContext_def)+ + "setBoundNotification ntfnPtr tptr \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + unfolding setBoundNotification_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps) + apply (fastforce simp: obj_at'_def objBits_simps projectKOs) done declare comp_apply [simp del] crunch tcbContext[wp]: deleteCallerCap "obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t" (wp: setEndpoint_obj_at_tcb' setBoundNotification_tcbContext - setNotification_tcb crunch_wps seThreadState_tcbContext + setNotification_tcb crunch_wps setThreadState_tcbContext simp: crunch_simps unless_def) declare comp_apply [simp] @@ -121,24 +115,21 @@ lemmas valid_cnode_cap_cte_at'' declare of_int_sint_scast[simp] -lemma isCNodeCap_capUntypedPtr_capCNodePtr: - "isCNodeCap c \ capUntypedPtr c = capCNodePtr c" - by (clarsimp simp: isCap_simps) - lemma of_bl_from_bool: "of_bl [x] = from_bool x" by (cases x, simp_all add: from_bool_def) lemma dmo_clearExMonitor_setCurThread_swap: "(do _ \ doMachineOp ARM_HYP.clearExMonitor; - setCurThread thread - od) + setCurThread thread + od) = (do _ \ setCurThread thread; - doMachineOp ARM_HYP.clearExMonitor od)" - apply (simp add: setCurThread_def doMachineOp_def split_def) - apply (rule oblivious_modify_swap[symmetric]) - apply (intro oblivious_bind, - simp_all add: select_f_oblivious) + doMachineOp ARM_HYP.clearExMonitor + od)" + apply (clarsimp simp: ARM_HYP.clearExMonitor_def) + apply (simp add: doMachineOp_modify) + apply (rule oblivious_modify_swap) + apply (fastforce intro: oblivious_bind simp: setCurThread_def idleThreadNotQueued_def) done lemma pd_at_asid_inj': @@ -253,7 +244,7 @@ lemma ctes_of_Some_cte_wp_at: by (clarsimp simp: cte_wp_at_ctes_of) lemma user_getreg_wp: - "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ + "\\s. tcb_at' t s \ (\rv. obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb r = rv) t s \ Q rv s)\ asUser t (getRegister r) \Q\" apply (rule_tac Q="\rv s. \rv'. rv' = rv \ Q rv' s" in hoare_post_imp) apply simp @@ -313,8 +304,6 @@ lemma threadSet_tcbState_valid_objs: apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) done -lemmas monadic_rewrite_symb_exec_l' = monadic_rewrite_symb_exec_l'_preserve_names - lemma possibleSwitchTo_rewrite: "monadic_rewrite True True (\s. obj_at' (\tcb. tcbPriority tcb = destPrio \ tcbDomain tcb = destDom) t s @@ -325,21 +314,12 @@ lemma possibleSwitchTo_rewrite: (possibleSwitchTo t) (setSchedulerAction (SwitchToThread t))" supply if_split[split del] apply (simp add: possibleSwitchTo_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_l'[OF threadGet_inv empty_fail_threadGet, - where P'=\], simp) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="targetDom = curDom" in monadic_rewrite_gen_asm) - apply simp - apply (rule_tac P="action = ResumeCurrentThread" in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_refl) - apply (wp threadGet_wp cd_wp |simp add: bitmap_fun_defs)+ + (* under current preconditions both branch conditions are false *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: threadGet_wp cd_wp\) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: threadGet_wp cd_wp\) + (* discard unused getters before setSchedulerAction *) apply (simp add: getCurThread_def curDomain_def gets_bind_ign getSchedulerAction_def) - apply (rule monadic_rewrite_refl) - apply clarsimp + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) apply (auto simp: obj_at'_def) done @@ -358,7 +338,7 @@ lemma lookupBitmapPriority_lift: unfolding lookupBitmapPriority_def apply (rule hoare_pre) apply (wps prqL1 prqL2) - apply wpsimp+ + apply wpsimp+ done (* slow path additionally requires current thread not idle *) @@ -408,6 +388,15 @@ lemma fastpathBestSwitchCandidate_ksSchedulerAction_simp[simp]: unfolding fastpathBestSwitchCandidate_def lookupBitmapPriority_def by simp +lemma sched_act_SwitchToThread_rewrite: + "\ sa = SwitchToThread t \ monadic_rewrite F E Q (m_sw t) f \ + \ monadic_rewrite F E ((\_. sa = SwitchToThread t) and Q) + (case_scheduler_action m_res m_ch (\t. m_sw t) sa) f" + apply (cases sa; simp add: monadic_rewrite_impossible) + apply (rename_tac t') + apply (case_tac "t' = t"; simp add: monadic_rewrite_impossible) + done + lemma schedule_rewrite_ct_not_runnable': "monadic_rewrite True True (\s. ksSchedulerAction s = SwitchToThread t \ ct_in_state' (Not \ runnable') s @@ -417,51 +406,36 @@ lemma schedule_rewrite_ct_not_runnable': (do setSchedulerAction ResumeCurrentThread; switchToThread t od)" supply subst_all [simp del] apply (simp add: schedule_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="action = SwitchToThread t" in monadic_rewrite_gen_asm, simp) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ wasRunnable \ action = SwitchToThread t" - in monadic_rewrite_gen_asm,simp) - apply (rule monadic_rewrite_bind_tail, rename_tac idleThread) - apply (rule monadic_rewrite_bind_tail, rename_tac targetPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac curPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac fastfail) - apply (rule monadic_rewrite_bind_tail, rename_tac curDom) - apply (rule monadic_rewrite_bind_tail, rename_tac highest) - apply (rule_tac P="\ (fastfail \ \ highest)" in monadic_rewrite_gen_asm, simp only:) - apply simp - apply (rule monadic_rewrite_refl) - apply (wpsimp wp: hoare_vcg_imp_lift) - apply (simp add: isHighestPrio_def') - apply wp+ - apply (wp hoare_vcg_disj_lift) - apply (wp scheduleSwitchThreadFastfail_False_wp) - apply (wp hoare_vcg_disj_lift threadGet_wp'' | simp add: comp_def)+ - (* remove no-ops, somewhat by magic *) - apply (rule monadic_rewrite_symb_exec_l'_TT, solves wp, - wpsimp wp: empty_fail_isRunnable simp: isHighestPrio_def')+ - apply (simp add: setSchedulerAction_def) - apply (subst oblivious_modify_swap[symmetric], rule oblivious_switchToThread_schact) - apply (rule monadic_rewrite_refl) - apply wp+ - apply (clarsimp simp: ct_in_state'_def) - apply (strengthen not_pred_tcb_at'_strengthen, simp) - supply word_neq_0_conv[simp del] + (* switching to t *) + apply (monadic_rewrite_l sched_act_SwitchToThread_rewrite[where t=t]) + (* not wasRunnable, skip enqueue *) + apply (simp add: when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + (* fastpath: \ (fastfail \ \ highest) *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* fastpath: no scheduleChooseNewThread *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* remove no-ops *) + apply (repeat 10 monadic_rewrite_symb_exec_l) (* until switchToThread *) + apply (simp add: setSchedulerAction_def) + apply (subst oblivious_modify_swap[symmetric], + rule oblivious_switchToThread_schact) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: empty_fail_isRunnable simp: isHighestPrio_def')+ + apply (clarsimp simp: ct_in_state'_def not_pred_tcb_at'_strengthen + fastpathBestSwitchCandidate_def) apply normalise_obj_at' - apply (simp add: fastpathBestSwitchCandidate_def) - apply (erule_tac x="tcbPriority ko" in allE) - apply (erule impE, normalise_obj_at'+) done -crunch tcb2[wp]: "Arch.switchToThread" "tcb_at' t" - (ignore: ARM_HYP.clearExMonitor) - lemma resolveAddressBits_points_somewhere: "\\s. \slot. Q slot s\ resolveAddressBits cp cptr bits \Q\,-" - apply (rule_tac Q'="\rv s. \rv. Q rv s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. \rv. Q rv s" in hoare_strengthen_postE_R) apply wp apply clarsimp done @@ -482,18 +456,12 @@ lemmas cteInsert_obj_at'_not_queued = cteInsert_obj_at'_queued[of "\a. lemma monadic_rewrite_threadGet: "monadic_rewrite E F (obj_at' (\tcb. f tcb = v) t) (threadGet f t) (return v)" - unfolding getThreadState_def - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_gets_known) - apply (unfold threadGet_def liftM_def fun_app_def) - apply (rule monadic_rewrite_symb_exec_l' | wp | rule empty_fail_getObject getObject_inv)+ - apply (clarsimp; rule no_fail_getObject_tcb) - apply (simp only: exec_gets) - apply (rule_tac P = "(\s. (f x)=v) and tcb_at' t" in monadic_rewrite_refl3) - apply (simp add:) - apply (wp OMG_getObject_tcb | wpc)+ - apply (auto intro: obj_tcb_at') + unfolding getThreadState_def threadGet_def + apply (simp add: liftM_def) + apply monadic_rewrite_symb_exec_l + apply (rule_tac P="\_. f x = v" in monadic_rewrite_pre_imp_eq) + apply blast + apply (wpsimp wp: OMG_getObject_tcb simp: obj_tcb_at')+ done lemma monadic_rewrite_getThreadState: @@ -515,9 +483,6 @@ crunches cteInsert, asUser (wp: setCTE_obj_at'_queued crunch_wps threadSet_obj_at'_really_strongest) end -crunch ksReadyQueues_inv[wp]: cteInsert "\s. P (ksReadyQueues s)" - (wp: hoare_drop_imps) - crunches cteInsert, threadSet, asUser, emptySlot for ksReadyQueuesL1Bitmap_inv[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" and ksReadyQueuesL2Bitmap_inv[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" @@ -535,16 +500,51 @@ lemma setThreadState_runnable_bitmap_inv: \ \s. Q (ksReadyQueuesL2Bitmap s) \ setThreadState ts t \\rv s. Q (ksReadyQueuesL2Bitmap s) \" by (simp_all add: setThreadState_runnable_simp, wp+) +(* FIXME move *) +crunches curDomain + for (no_fail) no_fail[intro!, wp, simp] + +lemma setThreadState_tcbDomain_tcbPriority_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb) (tcbPriority tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + +lemma setThreadState_tcbQueued_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbQueued tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + +lemma setThreadState_tcbFault_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbFault tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + +lemma setThreadState_tcbArch_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbArch tcb)) t'\" + unfolding setThreadState_def rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp hoare_drop_imps threadGet_wp simp: setQueue_def bitmap_fun_defs) + apply (fastforce simp: obj_at'_def st_tcb_at'_def objBits_simps projectKOs) + done + lemma fastpath_callKernel_SysCall_corres: "monadic_rewrite True False (invs' and ct_in_state' ((=) Running) and (\s. ksSchedulerAction s = ResumeCurrentThread) - and (\s. ksDomainTime s \ 0)) + and (\s. ksDomainTime s \ 0) and ready_qs_runnable) (callKernel (SyscallEvent SysCall)) (fastpaths SysCall)" supply if_cong[cong] option.case_cong[cong] if_split[split del] - apply (rule monadic_rewrite_introduce_alternative) - apply (simp add: callKernel_def) - apply (rule monadic_rewrite_imp) + supply empty_fail_getMRs[wp] (* FIXME *) + supply empty_fail_getEndpoint[wp] (* FIXME *) + apply (rule monadic_rewrite_introduce_alternative[OF callKernel_def[simplified atomize_eq]]) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_bind_alternative_l, wpsimp) + apply (rule monadic_rewrite_stateAssert) apply (simp add: handleEvent_def handleCall_def handleInvocation_def liftE_bindE_handle bind_assoc getMessageInfo_def) @@ -553,171 +553,158 @@ lemma fastpath_callKernel_SysCall_corres: getMessageInfo_def alternative_bind fastpaths_def cong: if_cong) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) apply (rename_tac msgInfo) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_r - [OF threadGet_inv no_fail_threadGet]) - apply (rename_tac thread msgInfo ptr tcbFault) - apply (rule monadic_rewrite_alternative_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rename_tac tcbFault) + apply (rule monadic_rewrite_alternative_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: split_def Syscall_H.syscall_def liftE_bindE_handle bind_assoc capFaultOnFailure_def) apply (simp only: bindE_bind_linearise[where f="rethrowFailure fn f'" for fn f'] bind_case_sum_rethrow) - apply (simp add: lookupCapAndSlot_def lookupSlotForThread_def + apply (simp add: lookupCapAndSlot_def lookupSlotForThread_def bindE_assoc liftE_bind_return_bindE_returnOk split_def getThreadCSpaceRoot_def locateSlot_conv returnOk_liftE[symmetric] const_def getSlotCap_def) apply (simp only: liftE_bindE_assoc) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_rdonly_bind_l) + apply (rule monadic_rewrite_bind_alternative_l) apply (wp | simp)+ apply (rule_tac fn="case_sum Inl (Inr \ fst)" in monadic_rewrite_split_fn) apply (simp add: liftME_liftM[symmetric] liftME_def bindE_assoc) apply (rule monadic_rewrite_refl) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: isRight_right_map isRight_case_sum) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_rdonly_bind_l[OF lookupIPC_inv]) - apply (rule monadic_rewrite_symb_exec_l[OF lookupIPC_inv empty_fail_lookupIPCBuffer]) + apply (rule monadic_rewrite_bind_alternative_l[OF lookupIPC_inv]) + apply monadic_rewrite_symb_exec_l apply (simp add: lookupExtraCaps_null returnOk_bind liftE_bindE_handle bind_assoc liftE_bindE_assoc decodeInvocation_def Let_def from_bool_0 performInvocation_def liftE_handle liftE_bind) - apply (rule monadic_rewrite_symb_exec_r [OF getEndpoint_inv no_fail_getEndpoint]) + apply monadic_rewrite_symb_exec_r apply (rename_tac "send_ep") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: getThreadVSpaceRoot_def locateSlot_conv) - apply (rule monadic_rewrite_symb_exec_r [OF getCTE_inv no_fail_getCTE]) + apply monadic_rewrite_symb_exec_r apply (rename_tac "pdCapCTE") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF curDomain_inv], - simp only: curDomain_def, rule non_fail_gets) - apply (rename_tac "curDom") - apply (rule monadic_rewrite_symb_exec_r [OF threadGet_inv no_fail_threadGet])+ - apply (rename_tac curPrio destPrio) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r apply (simp add: isHighestPrio_def') - apply (rule monadic_rewrite_symb_exec_r [OF gets_inv non_fail_gets]) - apply (rename_tac highest) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r [OF gets_inv non_fail_gets]) - apply (rename_tac asidMap) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - - apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet]) - apply (rename_tac "destDom") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (rule monadic_rewrite_trans, rule monadic_rewrite_pick_alternative_1) - apply (rule monadic_rewrite_symb_exec_l[OF get_mrs_inv' empty_fail_getMRs]) + apply monadic_rewrite_symb_exec_l (* now committed to fastpath *) apply (rule monadic_rewrite_trans) - apply (rule_tac F=True and E=True in monadic_rewrite_weaken) + apply (rule_tac F=True and E=True in monadic_rewrite_weaken_flags) apply simp apply (rule monadic_rewrite_bind_tail) - apply (rule_tac x=thread in monadic_rewrite_symb_exec, - (wp empty_fail_getCurThread)+) - apply (simp add: sendIPC_def bind_assoc) - apply (rule_tac x=send_ep in monadic_rewrite_symb_exec, - (wp empty_fail_getEndpoint getEndpoint_obj_at')+) - apply (rule_tac P="epQueue send_ep \ []" in monadic_rewrite_gen_asm) - apply (simp add: isRecvEP_endpoint_case list_case_helper bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (elim conjE) - apply (rule monadic_rewrite_bind_tail, rename_tac dest_st) - apply (rule_tac P="\gr. dest_st = BlockedOnReceive (capEPPtr (fst (theRight rv))) gr" - in monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_symb_exec2, (wp | simp)+) - apply (rule monadic_rewrite_bind) - apply clarsimp - apply (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite) - apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known thread) + apply (simp add: sendIPC_def bind_assoc) + apply (monadic_rewrite_symb_exec_l_known send_ep) + apply (rule_tac P="epQueue send_ep \ []" in monadic_rewrite_gen_asm) + apply (simp add: isRecvEP_endpoint_case list_case_helper bind_assoc) + apply (rule monadic_rewrite_bind_tail) + apply (elim conjE) + apply (rule monadic_rewrite_bind_tail, rename_tac dest_st) + apply (rule_tac P="\gr. dest_st = BlockedOnReceive (capEPPtr (fst (theRight rv))) gr" + in monadic_rewrite_gen_asm) + apply monadic_rewrite_symb_exec_l_drop apply (rule monadic_rewrite_bind) - apply (rule_tac destPrio=destPrio - and curDom=curDom and destDom=destDom and thread=thread - in possibleSwitchTo_rewrite) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_trans) - apply (rule setupCallerCap_rewrite) - apply (rule monadic_rewrite_bind_head) - apply (rule setThreadState_rewrite_simple, simp) - apply (rule monadic_rewrite_trans) - apply (rule_tac x=BlockedOnReply in monadic_rewrite_symb_exec, - (wp empty_fail_getThreadState)+) - apply simp - apply (rule monadic_rewrite_refl) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_head) - apply (rule_tac t="hd (epQueue send_ep)" - in schedule_rewrite_ct_not_runnable') - apply (simp add: bind_assoc) - apply (rule monadic_rewrite_bind_tail) + apply clarsimp + apply (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind) + apply (rule_tac destPrio=destPrio + and curDom=curDom and destDom=destDom and thread=thread + in possibleSwitchTo_rewrite) apply (rule monadic_rewrite_bind) - apply (rule switchToThread_rewrite) - apply (rule monadic_rewrite_bind) - apply (rule activateThread_simple_rewrite) - apply (rule monadic_rewrite_refl) - apply wp - apply (wp setCurThread_ct_in_state) - apply (simp only: st_tcb_at'_def[symmetric]) - apply (wp, clarsimp simp: cur_tcb'_def ct_in_state'_def) - apply (simp add: getThreadCallerSlot_def getThreadReplySlot_def - locateSlot_conv ct_in_state'_def cur_tcb'_def) - - apply ((wp assert_inv threadSet_pred_tcb_at_state - cteInsert_obj_at'_not_queued - | wps)+)[1] - - apply (wp fastpathBestSwitchCandidate_lift[where f="cteInsert c w w'" for c w w']) + apply (rule monadic_rewrite_trans) + apply (rule setupCallerCap_rewrite) + apply (rule monadic_rewrite_bind_head) + apply (rule setThreadState_rewrite_simple, simp) + apply (rule monadic_rewrite_trans) + apply (monadic_rewrite_symb_exec_l_known BlockedOnReply) + apply simp + apply (rule monadic_rewrite_refl) + apply wpsimp + apply (rule monadic_rewrite_trans) + apply (rule monadic_rewrite_bind_head) + apply (rule_tac t="hd (epQueue send_ep)" + in schedule_rewrite_ct_not_runnable') + apply (simp add: bind_assoc) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_bind) + apply (rule switchToThread_rewrite) + apply (rule monadic_rewrite_bind) + apply (rule activateThread_simple_rewrite) + apply (rule monadic_rewrite_refl) + apply wp + apply (wp setCurThread_ct_in_state) + apply (simp only: st_tcb_at'_def[symmetric]) + apply (wp, clarsimp simp: cur_tcb'_def ct_in_state'_def) + apply (simp add: getThreadCallerSlot_def getThreadReplySlot_def + locateSlot_conv ct_in_state'_def cur_tcb'_def) + + apply ((wp assert_inv threadSet_pred_tcb_at_state + cteInsert_obj_at'_not_queued + | wps)+)[1] + + apply (wp fastpathBestSwitchCandidate_lift[where f="cteInsert c w w'" for c w w']) + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] + apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] - apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] - apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1] - apply (wp fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t]) - apply simp - apply ((wp assert_inv threadSet_pred_tcb_at_state - cteInsert_obj_at'_not_queued - | wps)+)[1] - apply (simp add: setSchedulerAction_def) - apply wp[1] - apply (simp cong: if_cong HOL.conj_cong add: if_bool_simps) - apply (simp_all only:)[5] - apply ((wp setThreadState_oa_queued[of _ "\a _ _. \ a"] - setThreadState_obj_at_unchanged - asUser_obj_at_unchanged mapM_x_wp' - sts_st_tcb_at'_cases - setThreadState_no_sch_change - setEndpoint_obj_at_tcb' - fastpathBestSwitchCandidate_lift[where f="setThreadState f t" for f t] - setThreadState_oa_queued - fastpathBestSwitchCandidate_lift[where f="asUser t f" for f t] - fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] - lookupBitmapPriority_lift - setThreadState_runnable_bitmap_inv - | simp add: setMessageInfo_def - | wp (once) hoare_vcg_disj_lift)+) - + apply (wpsimp wp: fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t]) + apply ((wp assert_inv threadSet_pred_tcb_at_state + cteInsert_obj_at'_not_queued + | wps)+)[1] + apply (simp add: setSchedulerAction_def) + apply wp[1] + apply (simp cong: if_cong HOL.conj_cong add: if_bool_simps) + apply (simp_all only:)[5] + apply ((wp asUser_obj_at_unchanged mapM_x_wp' + sts_st_tcb_at'_cases + setThreadState_no_sch_change + setEndpoint_obj_at_tcb' + fastpathBestSwitchCandidate_lift[where f="setThreadState f t" for f t] + fastpathBestSwitchCandidate_lift[where f="asUser t f" for f t] + fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] + lookupBitmapPriority_lift + setThreadState_runnable_bitmap_inv + getEndpoint_obj_at' + | simp add: setMessageInfo_def obj_at'_conj + | wp (once) hoare_vcg_disj_lift)+) apply (simp add: setThreadState_runnable_simp getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv bind_assoc) @@ -730,23 +717,24 @@ lemma fastpath_callKernel_SysCall_corres: apply (rename_tac destState) apply (simp add: ARM_HYP_H.switchToThread_def getTCB_threadGet bind_assoc) - (* retrieving state or thread registers is not thread_action_isolatable, - translate into return with suitable precondition *) + (* retrieving state or thread registers is not thread_action_isolatable, + translate into return with suitable precondition *) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) apply (rule_tac v=destState in monadic_rewrite_getThreadState | rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ apply (rule_tac v=destState in monadic_rewrite_getThreadState | rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp | wp (once) hoare_drop_imps)+ apply (rule_tac P="inj (case_bool thread (hd (epQueue send_ep)))" in monadic_rewrite_gen_asm) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) apply (rule isolate_thread_actions_rewrite_bind fastpath_isolate_rewrites fastpath_isolatables - bool.simps setRegister_simple + bool.simps setRegister_simple_modify_registers + zipWithM_setRegister_simple_modify_registers threadGet_vcpu_isolatable[THEN thread_actions_isolatableD, simplified o_def] threadGet_vcpu_isolatable[simplified o_def] vcpuSwitch_isolatable[THEN thread_actions_isolatableD] vcpuSwitch_isolatable @@ -754,13 +742,12 @@ lemma fastpath_callKernel_SysCall_corres: doMachineOp_isolatable[THEN thread_actions_isolatableD] doMachineOp_isolatable kernelExitAssertions_isolatable[THEN thread_actions_isolatableD] kernelExitAssertions_isolatable - zipWithM_setRegister_simple thread_actions_isolatable_bind | assumption | wp assert_inv)+ apply (rule_tac P="\s. ksSchedulerAction s = ResumeCurrentThread \ tcb_at' thread s" - and F=True and E=False in monadic_rewrite_weaken) + and F=True and E=False in monadic_rewrite_weaken_flags) apply simp apply (rule monadic_rewrite_isolate_final) apply (simp add: isRight_case_sum cong: list.case_cong) @@ -827,8 +814,6 @@ lemma fastpath_callKernel_SysCall_corres: prefer 2 apply normalise_obj_at' apply clarsimp - apply (frule_tac t="blockedThread" in valid_queues_not_runnable_not_queued, assumption) - subgoal by (fastforce simp: st_tcb_at'_def elim: obj_at'_weakenE) apply (subgoal_tac "fastpathBestSwitchCandidate blockedThread s") prefer 2 apply (rule_tac ttcb=tcbb and ctcb=tcb in fastpathBestSwitchCandidateI) @@ -837,6 +822,9 @@ lemma fastpath_callKernel_SysCall_corres: apply (clarsimp simp: st_tcb_at'_def obj_at'_def objBits_simps projectKOs valid_mdb'_def valid_mdb_ctes_def inj_case_bool split: bool.split)+ + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x=blockedThread in spec) + apply (clarsimp simp: obj_at'_def projectKOs st_tcb_at'_def objBits_simps) done lemma capability_case_Null_ReplyCap: @@ -872,11 +860,9 @@ lemma doReplyTransfer_simple: od)" apply (simp add: doReplyTransfer_def liftM_def nullPointer_def getSlotCap_def) apply (rule monadic_rewrite_bind_tail)+ - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_threadGet)+) - apply (rule_tac P="rv = None" in monadic_rewrite_gen_asm, simp) + apply (monadic_rewrite_symb_exec_l_known None, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const gts_wp' getCTE_wp')+ - apply (simp add: o_def) + apply (wpsimp wp: threadGet_const gts_wp' getCTE_wp' simp: o_def)+ done lemma receiveIPC_simple_rewrite: @@ -888,44 +874,39 @@ lemma receiveIPC_simple_rewrite: setThreadState (BlockedOnReceive (capEPPtr ep_cap) (capEPCanGrant ep_cap)) thread; setEndpoint (capEPPtr ep_cap) (RecvEP (case ep of RecvEP q \ (q @ [thread]) | _ \ [thread])) od)" + supply empty_fail_getEndpoint[wp] apply (rule monadic_rewrite_gen_asm) apply (simp add: receiveIPC_def) - apply (rule monadic_rewrite_imp) - apply (rule_tac rv=ep in monadic_rewrite_symb_exec_l_known, - (wp empty_fail_getEndpoint)+) - apply (rule monadic_rewrite_symb_exec_l, (wp | simp add: getBoundNotification_def)+) - apply (rule monadic_rewrite_symb_exec_l) - apply (rule hoare_pre, wpc, wp+, simp) - apply (simp split: option.split) - apply (rule monadic_rewrite_trans, rule monadic_rewrite_if_known[where X=False], simp) - apply (rule monadic_rewrite_refl3[where P=\]) - apply (cases ep, simp_all add: isSendEP_def)[1] - apply (wp getNotification_wp gbn_wp' getEndpoint_wp | wpc)+ + apply (monadic_rewrite_symb_exec_l_known ep) + apply monadic_rewrite_symb_exec_l+ + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + apply (rule monadic_rewrite_is_refl) + apply (cases ep; simp add: isSendEP_def) + apply (wpsimp wp: getNotification_wp gbn_wp' getEndpoint_wp + simp: getBoundNotification_def)+ apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) done lemma empty_fail_isFinalCapability: "empty_fail (isFinalCapability cte)" - by (simp add: isFinalCapability_def Let_def split: if_split) + by (simp add: isFinalCapability_def Let_def empty_fail_cond split: if_split) lemma cteDeleteOne_replycap_rewrite: "monadic_rewrite True False (cte_wp_at' (\cte. isReplyCap (cteCap cte)) slot) (cteDeleteOne slot) (emptySlot slot NullCap)" + supply isFinalCapability_inv[wp] empty_fail_isFinalCapability[wp] (* FIXME *) apply (simp add: cteDeleteOne_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) - apply (rule_tac P="cteCap rv \ NullCap \ isReplyCap (cteCap rv) - \ \ isEndpointCap (cteCap rv) - \ \ isNotificationCap (cteCap rv)" - in monadic_rewrite_gen_asm) - apply (simp add: finaliseCapTrue_standin_def - capRemovable_def) - apply (rule monadic_rewrite_symb_exec_l, - (wp isFinalCapability_inv empty_fail_isFinalCapability)+) - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp')+ + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="cteCap cte \ NullCap \ isReplyCap (cteCap cte) + \ \ isEndpointCap (cteCap cte) + \ \ isNotificationCap (cteCap cte)" + in monadic_rewrite_gen_asm) + apply (simp add: finaliseCapTrue_standin_def capRemovable_def) + apply monadic_rewrite_symb_exec_l + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp')+ apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) done @@ -934,14 +915,10 @@ lemma cteDeleteOne_nullcap_rewrite: (cte_wp_at' (\cte. cteCap cte = NullCap) slot) (cteDeleteOne slot) (return ())" - apply (simp add: cteDeleteOne_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) - apply (rule_tac P="cteCap rv = NullCap" in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp') - apply (clarsimp simp: cte_wp_at_ctes_of) + apply (simp add: cteDeleteOne_def unless_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: getCTE_wp'\) + apply (monadic_rewrite_symb_exec_l, rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ done lemma deleteCallerCap_nullcap_rewrite: @@ -951,12 +928,9 @@ lemma deleteCallerCap_nullcap_rewrite: (return ())" apply (simp add: deleteCallerCap_def getThreadCallerSlot_def locateSlot_conv getSlotCap_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) - apply (rule monadic_rewrite_assert) - apply (rule cteDeleteOne_nullcap_rewrite) - apply (wp getCTE_wp) - apply (clarsimp simp: cte_wp_at_ctes_of) + apply (monadic_rewrite_l cteDeleteOne_nullcap_rewrite \wpsimp wp: getCTE_wp\) + apply (monadic_rewrite_symb_exec_l+, rule monadic_rewrite_refl) + apply (wpsimp simp: cte_wp_at_ctes_of)+ done lemma emptySlot_cnode_caps: @@ -967,7 +941,7 @@ lemma emptySlot_cnode_caps: o_assoc[symmetric] cteCaps_of_def[symmetric]) apply (wp emptySlot_cteCaps_of) apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of - elim!: rsubst[where P=P] intro!: ext + elim!: rsubst[where P=P] del: ext intro!: ext split: if_split) done @@ -997,36 +971,26 @@ lemma setCTE_obj_at_ntfn[wp]: crunch obj_at_ep[wp]: emptySlot "obj_at' (P :: endpoint \ bool) p" -crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" - crunches emptySlot, asUser for gsCNodes[wp]: "\s. P (gsCNodes s)" (wp: crunch_wps) -crunch cte_wp_at'[wp]: possibleSwitchTo "cte_wp_at' P p" - (wp: hoare_drop_imps) - crunch tcbContext[wp]: possibleSwitchTo "obj_at' (\tcb. P ( (atcbContextGet o tcbArch) tcb)) t" (wp: crunch_wps simp_del: comp_apply) crunch only_cnode_caps[wp]: doFaultTransfer "\s. P (only_cnode_caps (ctes_of s))" (wp: crunch_wps simp: crunch_simps) -lemma tcbSchedDequeue_rewrite_not_queued: "monadic_rewrite True False (tcb_at' t and obj_at' (Not \ tcbQueued) t) (tcbSchedDequeue t) (return ())" +(* FIXME: monadic_rewrite_l does not work with stateAssert here *) +lemma tcbSchedDequeue_rewrite_not_queued: + "monadic_rewrite True False (tcb_at' t and obj_at' (Not \ tcbQueued) t) + (tcbSchedDequeue t) (return ())" apply (simp add: tcbSchedDequeue_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ queued" in monadic_rewrite_gen_asm) - apply (simp add: when_def) + apply wp_pre + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const) - - apply (rule monadic_rewrite_symb_exec_l) - apply wp+ - apply (rule monadic_rewrite_refl) - apply (wp) - apply (clarsimp simp: o_def obj_at'_def) + apply (wpsimp wp: threadGet_const)+ done lemma schedule_known_rewrite: @@ -1045,60 +1009,31 @@ lemma schedule_known_rewrite: supply subst_all[simp del] if_split[split del] apply (simp add: schedule_def) apply (simp only: Thread_H.switchToThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="action = SwitchToThread t" in monadic_rewrite_gen_asm, simp) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ wasRunnable \ action = SwitchToThread t" in monadic_rewrite_gen_asm,simp) - apply (rule monadic_rewrite_bind_tail, rename_tac idleThread) - apply (rule monadic_rewrite_bind_tail, rename_tac targetPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac curPrio) - apply (rule monadic_rewrite_bind_tail, rename_tac fastfail) - apply (rule monadic_rewrite_bind_tail, rename_tac curDom) - apply (rule monadic_rewrite_bind_tail, rename_tac highest) - apply (rule_tac P="\ (fastfail \ \ highest)" in monadic_rewrite_gen_asm, simp only:) - apply simp - apply (simp add: bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_trans) - apply (rule tcbSchedDequeue_rewrite_not_queued) - apply (rule monadic_rewrite_refl) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_refl) - apply (wpsimp wp: Arch_switchToThread_obj_at_pre)+ - apply (wp hoare_vcg_imp_lift)+ - apply (simp add: isHighestPrio_def') - apply wp+ - apply (wp hoare_vcg_disj_lift) - apply (wp scheduleSwitchThreadFastfail_False_wp) - apply wp+ - apply (wp hoare_vcg_disj_lift threadGet_wp'') - apply (wp hoare_vcg_disj_lift threadGet_wp'') - apply clarsimp - apply wp - apply (simp add: comp_def) - apply wp - apply wp - apply wp - (* remove no-ops, somewhat by magic *) - apply (rule monadic_rewrite_symb_exec_l'_TT, solves wp, - wpsimp wp: empty_fail_isRunnable simp: isHighestPrio_def')+ - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_l) - apply simp+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (clarsimp simp: ct_in_state'_def) - apply (rule conjI) - apply (rule not_pred_tcb_at'_strengthen, assumption) - apply normalise_obj_at' - apply (simp add: fastpathBestSwitchCandidate_def) + (* switching to t *) + apply (monadic_rewrite_l sched_act_SwitchToThread_rewrite[where t=t]) + (* not wasRunnable, skip enqueue *) + apply (simp add: when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + (* fastpath: \ (fastfail \ \ highest) *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + (* fastpath: no scheduleChooseNewThread *) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp simp: isHighestPrio_def' + wp: hoare_vcg_imp_lift hoare_vcg_disj_lift threadGet_wp'' + scheduleSwitchThreadFastfail_False_wp\) + apply (simp add: bind_assoc) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite_not_queued + \wpsimp wp: Arch_switchToThread_obj_at_pre\) + (* remove no-ops *) + apply simp + apply (repeat 13 \rule monadic_rewrite_symb_exec_l\) (* until switchToThread *) + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: isHighestPrio_def')+ + apply (clarsimp simp: ct_in_state'_def not_pred_tcb_at'_strengthen + fastpathBestSwitchCandidate_def) apply normalise_obj_at' done @@ -1123,7 +1058,6 @@ lemma emptySlot_cte_wp_at_cteCap: lemma setEndpoint_getCTE_pivot[unfolded K_bind_def]: "do setEndpoint p val; v <- getCTE slot; f v od = do v <- getCTE slot; setEndpoint p val; f v od" - supply word_neq_0_conv[simp del] apply (simp add: getCTE_assert_opt setEndpoint_def setObject_modify_assert fun_eq_iff bind_assoc) @@ -1138,7 +1072,7 @@ lemma setEndpoint_setCTE_pivot[unfolded K_bind_def]: supply if_split[split del] apply (rule monadic_rewrite_to_eq) apply simp - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans, rule_tac f="ep_at' p" in monadic_rewrite_add_gets) apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets, @@ -1163,11 +1097,12 @@ lemma setEndpoint_setCTE_pivot[unfolded K_bind_def]: | simp)+ apply (rule_tac P="\s. epat = ep_at' p s \ cteat = real_cte_at' slot s \ tcbat = (tcb_at' (slot && ~~ mask 9) and (%y. slot && mask 9 : dom tcb_cte_cases)) s" - in monadic_rewrite_refl3) + in monadic_rewrite_pre_imp_eq) apply (simp add: setEndpoint_def setObject_modify_assert bind_assoc exec_gets assert_def exec_modify split: if_split) apply (auto split: if_split simp: obj_at'_def projectKOs objBits_defs + del: ext intro!: arg_cong[where f=f] ext kernel_state.fold_congs)[1] apply wp+ apply (simp add: objBits_defs) @@ -1232,17 +1167,12 @@ lemma emptySlot_setEndpoint_pivot[unfolded K_bind_def]: lemma set_getCTE[unfolded K_bind_def]: "do setCTE p cte; v <- getCTE p; f v od = do setCTE p cte; f cte od" - apply simp + apply (simp add: getCTE_assert_opt bind_assoc) apply (rule monadic_rewrite_to_eq) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_bind_tail) - apply (simp add: getCTE_assert_opt bind_assoc) - apply (rule monadic_rewrite_trans, - rule_tac rv="Some cte" in monadic_rewrite_gets_known) - apply (simp add: assert_opt_def) - apply (rule monadic_rewrite_refl) - apply wp - apply simp + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known cte, rule monadic_rewrite_refl) + apply (wpsimp simp: assert_opt_def wp: gets_wp)+ done lemma set_setCTE[unfolded K_bind_def]: @@ -1250,7 +1180,7 @@ lemma set_setCTE[unfolded K_bind_def]: supply if_split[split del] apply simp apply (rule monadic_rewrite_to_eq) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans, rule_tac f="real_cte_at' p" in monadic_rewrite_add_gets) apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets, @@ -1276,9 +1206,10 @@ lemma set_setCTE[unfolded K_bind_def]: (\ getF setF. tcb_cte_cases (p && mask 9) = Some (getF, setF) \ (\ f g tcb. setF f (setF g tcb) = setF (f o g) tcb)))" in monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_refl2) + apply (rule monadic_rewrite_is_refl[OF ext]) apply (simp add: exec_modify split: if_split) apply (auto simp: simpler_modify_def projectKO_opt_tcb objBits_defs + del: ext intro!: kernel_state.fold_congs ext split: if_split)[1] apply wp+ @@ -1289,7 +1220,7 @@ lemma set_setCTE[unfolded K_bind_def]: lemma setCTE_updateCapMDB: "p \ 0 \ setCTE p cte = do updateCap p (cteCap cte); updateMDB p (const (cteMDBNode cte)) od" - supply if_split[split del] word_neq_0_conv[simp del] + supply if_split[split del] apply (simp add: updateCap_def updateMDB_def bind_assoc set_getCTE cte_overwrite set_setCTE) apply (simp add: getCTE_assert_opt setCTE_assert_modify bind_assoc) @@ -1308,13 +1239,9 @@ lemma clearUntypedFreeIndex_simple_rewrite: apply (simp add: clearUntypedFreeIndex_def getSlotCap_def) apply (rule monadic_rewrite_name_pre) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (rule monadic_rewrite_imp) - apply (rule_tac rv=cte in monadic_rewrite_symb_exec_l_known, wp+) - apply (simp split: capability.split, - strengthen monadic_rewrite_refl, simp) - apply clarsimp - apply (wp getCTE_wp') - apply (clarsimp simp: cte_wp_at_ctes_of) + apply (monadic_rewrite_symb_exec_l_known cte) + apply (simp split: capability.split, strengthen monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ done lemma emptySlot_replymaster_rewrite[OF refl]: @@ -1333,57 +1260,48 @@ lemma emptySlot_replymaster_rewrite[OF refl]: o mdbRevocable_update (K True)); setCTE slot makeObject od)" - supply if_split[split del] word_neq_0_conv[simp del] + supply if_split[split del] apply (rule monadic_rewrite_gen_asm)+ - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule_tac P="slot \ 0" in monadic_rewrite_gen_asm) apply (clarsimp simp: emptySlot_def setCTE_updateCapMDB) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_head) - apply (rule clearUntypedFreeIndex_simple_rewrite) - apply simp - apply (rule_tac rv=cte in monadic_rewrite_symb_exec_l_known, (wp empty_fail_getCTE)+) + apply (monadic_rewrite_l clearUntypedFreeIndex_simple_rewrite, simp) + apply (monadic_rewrite_symb_exec_l_known cte) apply (simp add: updateMDB_def Let_def bind_assoc makeObject_cte case_Null_If) apply (rule monadic_rewrite_bind_tail) apply (rule monadic_rewrite_bind) apply (rule_tac P="mdbFirstBadged (cteMDBNode ctea) \ mdbRevocable (cteMDBNode ctea)" in monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_refl2) + apply (rule monadic_rewrite_is_refl) apply (case_tac ctea, rename_tac mdbnode, case_tac mdbnode) apply simp apply (simp add: Retype_H.postCapDeletion_def) apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp')+ + apply (solves wp | wp getCTE_wp')+ apply (clarsimp simp: cte_wp_at_ctes_of reply_masters_rvk_fb_def) apply (fastforce simp: isCap_simps) done lemma all_prio_not_inQ_not_tcbQueued: "\ obj_at' (\a. (\d p. \ inQ d p a)) t s \ \ obj_at' (\a. \ tcbQueued a) t s" apply (clarsimp simp: obj_at'_def inQ_def) -done + done crunches setThreadState, emptySlot, asUser for ntfn_obj_at[wp]: "obj_at' (P::(Structures_H.notification \ bool)) ntfnptr" (wp: obj_at_setObject2 crunch_wps simp: crunch_simps updateObject_default_def in_monad) -lemma st_tcb_at_is_Reply_imp_not_tcbQueued: "\s t.\ invs' s; st_tcb_at' isReply t s\ \ obj_at' (\a. \ tcbQueued a) t s" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def st_tcb_at'_def valid_queues_no_bitmap_def) - apply (rule all_prio_not_inQ_not_tcbQueued) - apply (clarsimp simp: obj_at'_def) - apply (erule_tac x="d" in allE) - apply (erule_tac x="p" in allE) - apply (erule conjE) - apply (erule_tac x="t" in ballE) - apply (clarsimp simp: obj_at'_def runnable'_def isReply_def) - apply (case_tac "tcbState obj") - apply ((clarsimp simp: inQ_def)+)[8] - apply (clarsimp simp: valid_queues'_def obj_at'_def) -done +lemma st_tcb_at_is_Reply_imp_not_tcbQueued: + "\s t. \ ready_qs_runnable s; st_tcb_at' isReply t s\ \ obj_at' (\tcb. \ tcbQueued tcb) t s" + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x=t in spec) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def isReply_def) + apply (case_tac "tcbState obj"; clarsimp) + done lemma valid_objs_ntfn_at_tcbBoundNotification: "ko_at' tcb t s \ valid_objs' s \ tcbBoundNotification tcb \ None - \ ntfn_at' (the (tcbBoundNotification tcb)) s" + \ ntfn_at' (the (tcbBoundNotification tcb)) s" apply (drule(1) ko_at_valid_objs', simp add: projectKOs) apply (simp add: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def) apply clarsimp @@ -1407,7 +1325,7 @@ lemma resolveAddressBitsFn_eq_name_slot: \ valid_objs' s \ cnode_caps_gsCNodes' s) (resolveAddressBits cap capptr bits) (gets (resolveAddressBitsFn cap capptr bits o only_cnode_caps o ctes_of))" - apply (rule monadic_rewrite_imp, rule resolveAddressBitsFn_eq) + apply (rule monadic_rewrite_guard_imp, rule resolveAddressBitsFn_eq) apply auto done @@ -1434,7 +1352,7 @@ lemma tcbSchedEnqueue_tcbIPCBuffer: "\obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\ tcbSchedEnqueue t' \\_. obj_at' (\tcb. P (tcbIPCBuffer tcb)) t\" - apply (simp add: tcbSchedEnqueue_def unless_when) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_when) apply (wp threadSet_obj_at' hoare_drop_imps threadGet_wp |simp split: if_split)+ done @@ -1456,18 +1374,33 @@ end crunch obj_at'_tcbIPCBuffer[wp]: emptySlot "obj_at' (\tcb. P (tcbIPCBuffer tcb)) t" (wp: crunch_wps) +(* FIXME move *) +crunches getBoundNotification + for (no_fail) no_fail[intro!, wp, simp] + +lemma threadSet_tcb_at'[wp]: + "threadSet f t' \\s. P (tcb_at' addr s)\" + apply (wpsimp wp: threadSet_wp) + apply (erule rsubst[where P=P]) + by (clarsimp simp: obj_at'_def projectKOs ps_clear_upd objBits_simps) + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for tcb''[wp]: "\s. P (tcb_at' addr s)" + (wp: crunch_wps) + lemma fastpath_callKernel_SysReplyRecv_corres: "monadic_rewrite True False (invs' and ct_in_state' ((=) Running) and (\s. ksSchedulerAction s = ResumeCurrentThread) - and cnode_caps_gsCNodes') + and cnode_caps_gsCNodes' and ready_qs_runnable) (callKernel (SyscallEvent SysReplyRecv)) (fastpaths SysReplyRecv)" - including no_pre + including classic_wp_pre supply if_cong[cong] option.case_cong[cong] - supply word_neq_0_conv[simp del] supply if_split[split del] - apply (rule monadic_rewrite_introduce_alternative) - apply ( simp add: callKernel_def) - apply (rule monadic_rewrite_imp) + supply user_getreg_inv[wp] (* FIXME *) + apply (rule monadic_rewrite_introduce_alternative[OF callKernel_def[simplified atomize_eq]]) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_bind_alternative_l, wpsimp) + apply (rule monadic_rewrite_stateAssert) apply (simp add: handleEvent_def handleReply_def handleRecv_def liftE_bindE_handle liftE_handle bind_assoc getMessageInfo_def liftE_bind) @@ -1478,17 +1411,16 @@ lemma fastpath_callKernel_SysReplyRecv_corres: locateSlot_conv capability_case_Null_ReplyCap getThreadCSpaceRoot_def cong: if_cong) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac thread msgInfo) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac cptr) - apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet]) + apply monadic_rewrite_symb_exec_r + apply (rename_tac msgInfo) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r apply (rename_tac tcbFault) - apply (rule monadic_rewrite_alternative_rhs[rotated]) + apply (rule monadic_rewrite_alternative_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: lookupCap_def liftME_def lookupCapAndSlot_def lookupSlotForThread_def bindE_assoc @@ -1497,18 +1429,15 @@ lemma fastpath_callKernel_SysReplyRecv_corres: capFaultOnFailure_def rethrowFailure_injection injection_handler_catch bind_bindE_assoc getThreadCallerSlot_def bind_assoc - getSlotCap_def - case_bool_If o_def + getSlotCap_def case_bool_If isRight_def[where x="Inr v" for v] isRight_def[where x="Inl v" for v] cong: if_cong) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac "cTableCTE") - apply (rule monadic_rewrite_transverse, - rule monadic_rewrite_bind_head, - rule resolveAddressBitsFn_eq) - apply (rule monadic_rewrite_symb_exec_r, (wp | simp)+) + monadic_rewrite_l resolveAddressBitsFn_eq wpsimp, rule monadic_rewrite_refl) + apply monadic_rewrite_symb_exec_r apply (rename_tac "rab_ret") apply (rule_tac P="isRight rab_ret" in monadic_rewrite_cases[rotated]) @@ -1517,61 +1446,53 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply clarsimp apply (simp add: isRight_case_sum liftE_bind isRight_def[where x="Inr v" for v]) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac ep_cap) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF _ _ _ active_ntfn_check_wp, unfolded bind_assoc fun_app_def]) - apply (rule hoare_pre, (wp | wpc | simp)+)[1] - apply (unfold getBoundNotification_def)[1] - apply (wp threadGet_wp) + apply (monadic_rewrite_symb_exec + \rule monadic_rewrite_symb_exec_r_nE[OF _ _ _ active_ntfn_check_wp, unfolded bind_assoc fun_app_def]\ + \wpsimp simp: getBoundNotification_def wp: threadGet_wp\) apply (rename_tac ep) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac ep) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_rdonly_bind_l, wp) + apply (rule monadic_rewrite_bind_alternative_l, wp) apply (rule monadic_rewrite_bind_tail) apply (rename_tac replyCTE) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: bind_assoc) - apply (rule monadic_rewrite_rdonly_bind_l, wp assert_inv) + apply (rule monadic_rewrite_bind_alternative_l, wp assert_inv) apply (rule monadic_rewrite_assert) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac callerFault) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (simp add: getThreadVSpaceRoot_def locateSlot_conv) - apply (rule monadic_rewrite_symb_exec_r, wp+) + apply monadic_rewrite_symb_exec_r apply (rename_tac vTableCTE) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF curDomain_inv], - simp only: curDomain_def, rule non_fail_gets) - apply (rename_tac "curDom") - apply (rule monadic_rewrite_symb_exec_r - [OF threadGet_inv no_fail_threadGet]) - apply (rename_tac callerPrio) + apply monadic_rewrite_symb_exec_r + apply monadic_rewrite_symb_exec_r apply (simp add: isHighestPrio_def') - apply (rule monadic_rewrite_symb_exec_r [OF gets_inv non_fail_gets]) - apply (rename_tac highest) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac asidMap) - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) - apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet]) - apply (rename_tac "callerDom") - apply (rule monadic_rewrite_if_rhs[rotated]) + apply monadic_rewrite_symb_exec_r + apply (rule monadic_rewrite_if_r[rotated]) apply (rule monadic_rewrite_alternative_l) apply (rule monadic_rewrite_trans, rule monadic_rewrite_pick_alternative_1) + (* now committed to fastpath *) apply (rule_tac P="\v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (capTCBPtr (cteCap replyCTE))" in monadic_rewrite_exists_v) apply (rename_tac ipcBuffer) @@ -1579,13 +1500,13 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (simp add: ARM_HYP_H.switchToThread_def bind_assoc) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' getObject_inv | wpc | simp add: - | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' getObject_inv | wpc | simp add: + | wp (once) hoare_drop_imps )+ - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp - | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ apply (rule monadic_rewrite_trans) apply (rule monadic_rewrite_trans) @@ -1593,15 +1514,14 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (rule monadic_rewrite_trans) apply (rule doReplyTransfer_simple) apply simp - apply (((rule monadic_rewrite_weaken2, + apply (((rule monadic_rewrite_weaken_flags', (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite | rule_tac destPrio=callerPrio and curDom=curDom and destDom=callerDom and thread=thread in possibleSwitchTo_rewrite)) | rule cteDeleteOne_replycap_rewrite | rule monadic_rewrite_bind monadic_rewrite_refl - | wp assert_inv mapM_x_wp' - setThreadState_obj_at_unchanged + | wp assert_inv mapM_x_wp' sts_valid_objs' asUser_obj_at_unchanged hoare_strengthen_post[OF _ obj_at_conj'[simplified atomize_conjL], rotated] lookupBitmapPriority_lift @@ -1611,13 +1531,11 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (simp add: setMessageInfo_def) apply (rule monadic_rewrite_bind_tail) apply (rename_tac unblocked) - apply (rule_tac rv=thread in monadic_rewrite_symb_exec_l_known, - (wp empty_fail_getCurThread)+) - apply (rule_tac rv=cptr in monadic_rewrite_symb_exec_l_known, - (wp empty_fail_asUser empty_fail_getRegister)+) + apply (monadic_rewrite_symb_exec_l_known thread) + apply (monadic_rewrite_symb_exec_l_known cptr) apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_catch[OF _ monadic_rewrite_refl True_E_E]) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply (rule monadic_rewrite_catch[OF _ monadic_rewrite_refl wp_post_tautE_E]) + apply monadic_rewrite_symb_exec_l apply (rename_tac cTableCTE2, rule_tac P="cteCap cTableCTE2 = cteCap cTableCTE" in monadic_rewrite_gen_asm) @@ -1629,10 +1547,10 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply wp apply (rule monadic_rewrite_trans) apply (rule_tac rv=rab_ret - in monadic_rewrite_gets_known[where m="NonDetMonad.lift f" + in monadic_rewrite_gets_known[where m="Nondet_Monad.lift f" for f, folded bindE_def]) - apply (simp add: NonDetMonad.lift_def isRight_case_sum) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply (simp add: Nondet_Monad.lift_def isRight_case_sum) + apply monadic_rewrite_symb_exec_l apply (rename_tac ep_cap2) apply (rule_tac P="cteCap ep_cap2 = cteCap ep_cap" in monadic_rewrite_gen_asm) apply (simp add: cap_case_EndpointCap_NotificationCap) @@ -1644,7 +1562,7 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (wp, simp) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_weaken[where E=True and F=True], simp) + apply (rule monadic_rewrite_weaken_flags[where E=True and F=True], simp) apply (rule setThreadState_rewrite_simple) apply clarsimp apply (wp getCTE_known_cap)+ @@ -1652,7 +1570,7 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (rule_tac t="capTCBPtr (cteCap replyCTE)" and t'=thread in schedule_known_rewrite) - apply (rule monadic_rewrite_weaken[where E=True and F=True], simp) + apply (rule monadic_rewrite_weaken_flags[where E=True and F=True], simp) apply (rule monadic_rewrite_bind) apply (rule activateThread_simple_rewrite) apply (rule monadic_rewrite_refl) @@ -1662,88 +1580,76 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply ((wp setCurThread_ct_in_state[folded st_tcb_at'_def] Arch_switchToThread_pred_tcb')+)[2] apply (simp add: catch_liftE) - apply (wp setEndpoint_obj_at_tcb' threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj]) - - apply (wp setEndpoint_obj_at_tcb' - threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj] - fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] - fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t] - | simp - | rule hoare_lift_Pf2[where f=ksCurThread, OF _ setEndpoint_ct'] - hoare_lift_Pf2[where f=ksCurThread, OF _ threadSet_ct])+ - - apply (simp cong: rev_conj_cong) - apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s]) - apply (unfold setSchedulerAction_def)[3] - apply ((wp setThreadState_oa_queued user_getreg_rv setThreadState_no_sch_change - setThreadState_obj_at_unchanged - sts_st_tcb_at'_cases sts_bound_tcb_at' - emptySlot_obj_at'_not_queued - emptySlot_cte_wp_at_cteCap - emptySlot_cnode_caps - user_getreg_inv asUser_typ_ats - asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift - hoare_vcg_ex_lift - | simp del: comp_apply - | clarsimp simp: obj_at'_weakenE[OF _ TrueI])+) - - apply (rule hoare_lift_Pf2[where f=ksCurThread, OF _ setThreadState_ct']) - apply (wp setThreadState_oa_queued - fastpathBestSwitchCandidate_lift[where f="setThreadState f t" for f t]) - apply (simp add: setThreadState_runnable_simp) - apply (wp threadSet_tcbState_st_tcb_at') - apply (clarsimp simp del: comp_apply) - apply (wp emptySlot_obj_at_ep)+ - - apply ((wp setThreadState_oa_queued user_getreg_rv - setThreadState_no_sch_change - setThreadState_obj_at_unchanged + apply ((wpsimp wp: user_getreg_rv setEndpoint_obj_at_tcb' + threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj] + fastpathBestSwitchCandidate_lift[where f="setEndpoint a b" for a b] + fastpathBestSwitchCandidate_lift[where f="threadSet f t" for f t] + | wps)+)[3] + apply (simp cong: rev_conj_cong) + apply (wpsimp wp: setThreadState_tcbContext[simplified comp_apply] + user_getreg_rv + setThreadState_no_sch_change sts_valid_objs' + sts_st_tcb_at'_cases sts_bound_tcb_at' + fastpathBestSwitchCandidate_lift[where f="setThreadState s t" for s t] + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + | wps)+ + apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s]) + apply ((wp user_getreg_rv setThreadState_no_sch_change sts_st_tcb_at'_cases sts_bound_tcb_at' - emptySlot_obj_at'_not_queued + emptySlot_obj_at'_not_queued emptySlot_obj_at_ep + emptySlot_tcbContext[simplified comp_apply] emptySlot_cte_wp_at_cteCap emptySlot_cnode_caps user_getreg_inv asUser_typ_ats asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift hoare_vcg_ex_lift + fastpathBestSwitchCandidate_lift[where f="emptySlot a b" for a b] | simp del: comp_apply | clarsimp simp: obj_at'_weakenE[OF _ TrueI] + | wps)+) + + apply (wpsimp wp: fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b])+ + apply (clarsimp cong: conj_cong) + apply ((wp user_getreg_inv asUser_typ_ats + asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift + hoare_vcg_ex_lift + | clarsimp simp: obj_at'_weakenE[OF _ TrueI] | solves \ - rule hoare_lift_Pf2[where f=ksCurThread, OF _ emptySlot_ct] - hoare_lift_Pf2[where f=ksCurThread, OF _ asUser_ct], - wp fastpathBestSwitchCandidate_lift[where f="emptySlot a b" for a b] - fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b] - user_getreg_inv asUser_typ_ats\)+) + wp fastpathBestSwitchCandidate_lift[where f="asUser a b" for a b] + \)+) apply (clarsimp | wp getCTE_wp' gts_imp')+ apply (simp add: ARM_HYP_H.switchToThread_def getTCB_threadGet bind_assoc) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp mapM_x_wp' handleFault_obj_at'_tcbIPCBuffer getObject_inv | wpc | simp - | wp (once) hoare_drop_imps )+ - apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ - apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp - | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp mapM_x_wp' handleFault_obj_at'_tcbIPCBuffer getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ + apply (rule monadic_rewrite_bind monadic_rewrite_refl)+ + apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv mapM_x_wp' getObject_inv | wpc | simp + | wp (once) hoare_drop_imps )+ apply (simp add: bind_assoc catch_liftE receiveIPC_def Let_def liftM_def setThreadState_runnable_simp) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getThreadState)+) + apply monadic_rewrite_symb_exec_l apply (rule monadic_rewrite_assert) apply (rule_tac P="inj (case_bool thread (capTCBPtr (cteCap replyCTE)))" in monadic_rewrite_gen_asm) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) apply (rule isolate_thread_actions_rewrite_bind fastpath_isolate_rewrites fastpath_isolatables - bool.simps setRegister_simple - zipWithM_setRegister_simple + bool.simps setRegister_simple_modify_registers + zipWithM_setRegister_simple_modify_registers thread_actions_isolatable_bind thread_actions_isolatableD[OF setCTE_isolatable] setCTE_isolatable @@ -1763,21 +1669,19 @@ lemma fastpath_callKernel_SysReplyRecv_corres: (thread + 2 ^ cte_level_bits * tcbCallerSlot) and (\s. \x. tcb_at' (case_bool thread (capTCBPtr (cteCap replyCTE)) x) s) and valid_mdb')" - and F=True and E=False in monadic_rewrite_weaken) + and F=True and E=False in monadic_rewrite_weaken_flags) apply (rule monadic_rewrite_isolate_final2) apply simp - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply monadic_rewrite_symb_exec_l apply (rename_tac callerCTE) apply (rule monadic_rewrite_assert) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+) + apply monadic_rewrite_symb_exec_l apply (rule monadic_rewrite_assert) apply (simp add: emptySlot_setEndpoint_pivot) apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_refl2) + apply (rule monadic_rewrite_is_refl) apply (clarsimp simp: isSendEP_def split: Structures_H.endpoint.split) - apply (rule_tac Q="\rv. (\_. rv = callerCTE) and Q'" for Q' - in monadic_rewrite_symb_exec_r, wp+) - apply (rule monadic_rewrite_gen_asm, simp) + apply (monadic_rewrite_symb_exec_r_known callerCTE) apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_head, rule_tac cte=callerCTE in emptySlot_replymaster_rewrite) apply (simp add: bind_assoc o_def) @@ -1869,7 +1773,9 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (clarsimp simp: obj_at_tcbs_of tcbSlots cte_level_bits_def) apply (frule(1) st_tcb_at_is_Reply_imp_not_tcbQueued) - apply (auto simp: obj_at_tcbs_of tcbSlots + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (auto simp: obj_at_tcbs_of tcbSlots projectKOs cte_level_bits_def) done diff --git a/proof/crefine/ARM_HYP/Finalise_C.thy b/proof/crefine/ARM_HYP/Finalise_C.thy index cf8bcac7b4..bc1edef7da 100644 --- a/proof/crefine/ARM_HYP/Finalise_C.thy +++ b/proof/crefine/ARM_HYP/Finalise_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,6 +17,108 @@ declare if_split [split del] definition "option_map2 f m = option_map f \ m" +definition ksReadyQueues_head_end_2 :: "(domain \ priority \ ready_queue) \ bool" where + "ksReadyQueues_head_end_2 qs \ + \d p. tcbQueueHead (qs (d, p)) \ None \ tcbQueueEnd (qs (d, p)) \ None" + +abbreviation "ksReadyQueues_head_end s \ ksReadyQueues_head_end_2 (ksReadyQueues s)" + +lemmas ksReadyQueues_head_end_def = ksReadyQueues_head_end_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end: + "ksReadyQueues_asrt s \ ksReadyQueues_head_end s" + by (fastforce dest: tcbQueueHead_iff_tcbQueueEnd + simp: ready_queue_relation_def ksReadyQueues_asrt_def ksReadyQueues_head_end_def) + +lemma tcbSchedEnqueue_ksReadyQueues_head_end[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: tcbQueueEmpty_def obj_at'_def ksReadyQueues_head_end_def split: if_splits) + done + +lemma ksReadyQueues_head_end_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end (s\ksSchedulerAction := ChooseNewThread\) = ksReadyQueues_head_end s" + by (simp add: ksReadyQueues_head_end_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + +lemma setThreadState_ksReadyQueues_head_end[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end\" + unfolding setThreadState_def + by (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + +definition ksReadyQueues_head_end_tcb_at'_2 :: + "(domain \ priority \ ready_queue) \ (obj_ref \ tcb) \ bool" where + "ksReadyQueues_head_end_tcb_at'_2 qs tcbs \ + \d p. (\head. tcbQueueHead (qs (d, p)) = Some head \ tcbs head \ None) + \ (\end. tcbQueueEnd (qs (d, p)) = Some end \ tcbs end \ None)" + +abbreviation "ksReadyQueues_head_end_tcb_at' s \ + ksReadyQueues_head_end_tcb_at'_2 (ksReadyQueues s) (tcbs_of' s)" + +lemmas ksReadyQueues_head_end_tcb_at'_def = ksReadyQueues_head_end_tcb_at'_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at': + "\ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ ksReadyQueues_head_end_tcb_at' s" + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def + ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI allI) + apply (case_tac "ts = []", clarsimp) + apply (fastforce dest!: heap_path_head hd_in_set + simp: opt_pred_def tcbQueueEmpty_def split: option.splits) + apply (fastforce simp: queue_end_valid_def opt_pred_def tcbQueueEmpty_def + split: option.splits) + done + +lemma tcbSchedEnqueue_ksReadyQueues_head_end_tcb_at'[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma ksReadyQueues_head_end_tcb_at'_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end_tcb_at' (s\ksSchedulerAction := ChooseNewThread\) + = ksReadyQueues_head_end_tcb_at' s" + by (simp add: ksReadyQueues_head_end_tcb_at'_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + +lemma setThreadState_ksReadyQueues_head_end_tcb_at'[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma head_end_ksReadyQueues_': + "\ (s, s') \ rf_sr; ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s; + d \ maxDomain; p \ maxPriority \ + \ head_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL + \ end_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL" + apply (frule (2) rf_sr_ctcb_queue_relation[where d=d and p=p]) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: option.splits) + apply (rename_tac "end" head end_tcb head_tcb) + apply (prop_tac "tcb_at' head s \ tcb_at' end s") + apply (fastforce intro!: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (fastforce dest: tcb_at_not_NULL) + done + lemma tcbSchedEnqueue_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. \d v. option_map2 tcbPriority_C (cslift s) \tcb = Some v \ unat v \ numPriorities @@ -27,7 +130,9 @@ lemma tcbSchedEnqueue_cslift_spec: \ None \ option_map2 tcbDomain_C (cslift s) (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) - \ None)\ + \ None) + \ (head_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL + \ end_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL)\ Call tcbSchedEnqueue_'proc {s'. option_map2 tcbEPNext_C (cslift s') = option_map2 tcbEPNext_C (cslift s) \ option_map2 tcbEPPrev_C (cslift s') = option_map2 tcbEPPrev_C (cslift s) @@ -44,8 +149,8 @@ lemma tcbSchedEnqueue_cslift_spec: apply (rule conjI) apply (clarsimp simp: typ_heap_simps cong: if_cong) apply (simp split: if_split) - apply (clarsimp simp: typ_heap_simps if_Some_helper cong: if_cong) - by (simp split: if_split) + by (auto simp: typ_heap_simps' if_Some_helper numPriorities_def + cong: if_cong split: if_splits) lemma setThreadState_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. s \\<^sub>c \tptr \ (\x. ksSchedulerAction_' (globals s) = tcb_Ptr x @@ -140,8 +245,9 @@ lemma ctcb_relation_tcbPriority_maxPriority_numPriorities: done lemma tcbSchedEnqueue_cslift_precond_discharge: - "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; - valid_queues s; valid_objs' s \ \ + "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; valid_objs' s ; + ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s\ \ (\d v. option_map2 tcbPriority_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some v \ unat v < numPriorities \ option_map2 tcbDomain_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some d @@ -152,31 +258,49 @@ lemma tcbSchedEnqueue_cslift_precond_discharge: \ None \ option_map2 tcbDomain_C (cslift s') (head_C (index (ksReadyQueues_' (globals s')) (unat (d*0x100 + v)))) - \ None))" + \ None) + \ (head_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL + \ end_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL))" apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps' option_map2_def) + apply (rename_tac tcb tcb') apply (frule_tac t=x in valid_objs'_maxPriority, fastforce simp: obj_at'_def) apply (frule_tac t=x in valid_objs'_maxDomain, fastforce simp: obj_at'_def) apply (drule_tac P="\tcb. tcbPriority tcb \ maxPriority" in obj_at_ko_at2', simp) apply (drule_tac P="\tcb. tcbDomain tcb \ maxDomain" in obj_at_ko_at2', simp) apply (simp add: ctcb_relation_tcbDomain_maxDomain_numDomains ctcb_relation_tcbPriority_maxPriority_numPriorities) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_ctcb_queue_relation) apply (simp add: maxDom_to_H maxPrio_to_H)+ + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in head_end_ksReadyQueues_', fastforce+) apply (simp add: cready_queues_index_to_C_def2 numPriorities_def le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ctcb_relation_def) - apply (frule arg_cong[where f=unat], subst(asm) unat_ucast_8_32) - apply (frule tcb_queue'_head_end_NULL) - apply (erule conjunct1[OF valid_queues_valid_q]) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (frule arg_cong[where f=unat], subst(asm) unat_ucast_up_simp, simp) + apply (frule (3) head_end_ksReadyQueues_', fastforce+) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (prop_tac "\ tcbQueueEmpty ((ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)))") + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (clarsimp simp: tcbQueueEmpty_def) + apply (rename_tac head "end" head_tcb end_tcb) + apply (prop_tac "tcb_at' head s") + apply (fastforce intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (frule_tac thread=head in obj_at_cslift_tcb) + apply fastforce + apply (clarsimp dest: obj_at_cslift_tcb simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) done lemma cancel_all_ccorres_helper: "ccorres dc xfdc - (\s. valid_objs' s \ valid_queues s + (\s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s \ (\t\set ts. tcb_at' t s \ t \ 0) \ sch_act_wf (ksSchedulerAction s) s) {s'. \p. ep_queue_relation (cslift s') ts @@ -199,8 +323,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -209,7 +332,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -232,17 +355,15 @@ next apply (erule cmap_relationE1 [OF cmap_relation_tcb]) apply (erule ko_at_projectKO_opt) apply (fastforce intro: typ_heap_simps) - apply (wp sts_running_valid_queues | simp)+ + apply (wp sts_valid_objs' | simp)+ apply (rule ceqv_refl) apply (rule "Cons.hyps") apply (wp sts_valid_objs' sts_sch_act sch_act_wf_lift hoare_vcg_const_Ball_lift - sts_running_valid_queues sts_st_tcb' setThreadState_oa_queued | simp)+ + sts_st_tcb' | simp)+ apply (vcg exspec=setThreadState_cslift_spec exspec=tcbSchedEnqueue_cslift_spec) - apply (clarsimp simp: tcb_at_not_NULL - Collect_const_mem valid_tcb_state'_def - ThreadState_Restart_def mask_def - valid_objs'_maxDomain valid_objs'_maxPriority) + apply (clarsimp simp: tcb_at_not_NULL Collect_const_mem valid_tcb_state'_def + ThreadState_defs mask_def valid_objs'_maxDomain valid_objs'_maxPriority) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (rule conjI) @@ -252,16 +373,13 @@ next st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - apply clarsimp - apply clarsimp - apply clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; clarsimp?) + apply simp apply clarsimp apply (rule conjI) apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) apply clarsimp - apply clarsimp + apply clarsimp+ apply (subst ep_queue_relation_shift, fastforce) apply (drule_tac x="tcb_ptr_to_ctcb_ptr thread" in fun_cong)+ @@ -270,17 +388,23 @@ next done qed +crunches setEndpoint, setNotification + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + and ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + (simp: updateObject_default_def) + lemma cancelAllIPC_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. epptr_' s = Ptr epptr}) [] + invs' (UNIV \ {s. epptr_' s = Ptr epptr}) [] (cancelAllIPC epptr) (Call cancelAllIPC_'proc)" apply (cinit lift: epptr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -289,8 +413,8 @@ lemma cancelAllIPC_ccorres: apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -323,29 +447,26 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear - cancelAllIPC_mapM_x_valid_queues | simp)+ apply (rule mapM_x_wp', wp)+ apply (wp sts_st_tcb') apply (clarsimp split: if_split) - apply (rule mapM_x_wp', wp)+ + apply (rule mapM_x_wp', wp sts_valid_objs')+ apply (clarsimp simp: valid_tcb_state'_def) apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -373,48 +494,48 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (clarsimp simp: valid_ep'_def invs_valid_objs' invs_queues) + apply (clarsimp simp: valid_ep'_def invs_valid_objs') apply (rule cmap_relationE1[OF cmap_relation_ep], assumption) apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) - apply (clarsimp simp: projectKOs valid_obj'_def valid_ep'_def) - subgoal by (auto simp: typ_heap_simps cendpoint_relation_def - Let_def tcb_queue_relation'_def - invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority - intro!: obj_at_conj') + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + subgoal + by (auto simp: typ_heap_simps cendpoint_relation_def + Let_def tcb_queue_relation'_def projectKOs + invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority + intro!: obj_at_conj') apply (clarsimp simp: guard_is_UNIV_def) apply (wp getEndpoint_wp) apply clarsimp done -lemma empty_fail_getNotification: - "empty_fail (getNotification ep)" - unfolding getNotification_def - by (auto intro: empty_fail_getObject) - lemma cancelAllSignals_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] + invs' (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] (cancelAllSignals ntfnptr) (Call cancelAllSignals_'proc)" apply (cinit lift: ntfnPtr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -423,18 +544,15 @@ lemma cancelAllSignals_ccorres: apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -442,8 +560,8 @@ lemma cancelAllSignals_ccorres: apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -460,13 +578,12 @@ lemma cancelAllSignals_ccorres: subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ntfn_valid_objs' hoare_vcg_const_Ball_lift @@ -476,11 +593,16 @@ lemma cancelAllSignals_ccorres: apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption) apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) - apply (clarsimp simp add: valid_obj'_def valid_ntfn'_def projectKOs) - subgoal by (auto simp: typ_heap_simps cnotification_relation_def - Let_def tcb_queue_relation'_def - invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority - intro!: obj_at_conj') + apply (clarsimp simp add: valid_obj'_def valid_ntfn'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + subgoal + by (auto simp: typ_heap_simps cnotification_relation_def + Let_def tcb_queue_relation'_def projectKOs + invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority + intro!: obj_at_conj') apply (clarsimp simp: guard_is_UNIV_def) apply (wp getNotification_wp) apply clarsimp @@ -561,16 +683,16 @@ lemma tcb_queue_relation2_cong: context kernel_m begin -lemma setThreadState_ccorres_valid_queues'_simple: - "ccorres dc xfdc (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ sch_act_simple s) +lemma setThreadState_ccorres_simple: + "ccorres dc xfdc (\s. tcb_at' thread s \ \ runnable' st \ sch_act_simple s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues'_simple) - apply (wp threadSet_valid_queues'_and_not_runnable') - apply (clarsimp simp: weak_sch_act_wf_def valid_queues'_def) + apply (wp threadSet_tcbState_st_tcb_at') + apply (fastforce simp: weak_sch_act_wf_def) done lemma updateRestartPC_ccorres: @@ -586,9 +708,7 @@ lemma updateRestartPC_ccorres: done crunches updateRestartPC - for valid_queues'[wp]: valid_queues' - and sch_act_simple[wp]: sch_act_simple - and valid_queues[wp]: Invariants_H.valid_queues + for sch_act_simple[wp]: sch_act_simple and valid_objs'[wp]: valid_objs' and tcb_at'[wp]: "tcb_at' p" @@ -628,25 +748,16 @@ lemma suspend_ccorres: apply clarsimp apply (rule iffI) apply simp - apply (erule thread_state_to_tsType.elims; simp add: StrictC'_thread_state_defs) + apply (erule thread_state_to_tsType.elims; simp add: ThreadState_defs) apply (ctac (no_vcg) add: updateRestartPC_ccorres) apply (rule ccorres_return_Skip) apply ceqv - apply (ctac(no_vcg) add: setThreadState_ccorres_valid_queues'_simple) - apply (ctac add: tcbSchedDequeue_ccorres') - apply (rule_tac Q="\_. - (\s. \t' d p. (t' \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d - \ tcbPriority tcb = p) t' s \ - (t' \ thread \ st_tcb_at' runnable' t' s)) \ - distinct (ksReadyQueues s (d, p))) and valid_queues' and valid_objs' and tcb_at' thread" - in hoare_post_imp) + apply (ctac(no_vcg) add: setThreadState_ccorres_simple) + apply (ctac add: tcbSchedDequeue_ccorres) + apply (rule_tac Q="\_. valid_objs' and tcb_at' thread and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) apply clarsimp - apply (drule_tac x="t" in spec) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp elim!: obj_at'_weakenE simp: inQ_def) - apply (wp sts_valid_queues_partial)[1] + apply (wp sts_valid_objs')[1] apply clarsimp apply (wpsimp simp: valid_tcb_state'_def) apply clarsimp @@ -655,16 +766,14 @@ lemma suspend_ccorres: apply clarsimp apply (rule conseqPre, vcg) apply (rule subset_refl) - apply (rule hoare_strengthen_post) + apply (rule hoare_strengthen_post) apply (rule hoare_vcg_conj_lift) apply (rule hoare_vcg_conj_lift) apply (rule cancelIPC_sch_act_simple) apply (rule cancelIPC_tcb_at'[where t=thread]) apply (rule delete_one_conc_fr.cancelIPC_invs) - apply (fastforce simp: invs_valid_queues' invs_queues invs_valid_objs' - valid_tcb_state'_def) - apply clarsimp - apply (auto simp: "StrictC'_thread_state_defs") + apply (fastforce simp: invs_valid_objs' valid_tcb_state'_def) + apply (auto simp: ThreadState_defs) done lemma cap_to_H_NTFNCap_tag: @@ -687,8 +796,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -707,7 +816,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal by (simp add: carch_state_relation_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) @@ -718,7 +827,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -768,7 +877,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -803,9 +912,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -822,13 +931,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -868,7 +977,7 @@ lemma finaliseCap_True_cases_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap Collect_False del: Collect_const) apply (fold case_bool_If) - apply (simp add: false_def) + apply simp apply csymbr apply wpc apply (simp add: cap_get_tag_isCap ccorres_cond_univ_iff Let_def) @@ -1025,7 +1134,6 @@ lemma invalidateASIDEntry_ccorres: apply (rule order_le_less_trans, rule word_and_le1) apply (simp add: mask_def) apply (rule ccorres_return_Skip) - apply (fold dc_def) apply (ctac add: invalidateASID_ccorres) apply wp apply (simp add: guard_is_UNIV_def) @@ -1058,8 +1166,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] fun_upd_def[symmetric] - when_def + apply (clarsimp simp: liftM_def fun_upd_def[symmetric] when_def simp del: Collect_const) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_armKSASIDTable_ksArchState) @@ -1206,14 +1313,12 @@ lemma deleteASID_ccorres: apply ceqv apply csymbr apply wpc - apply (simp add: ccorres_cond_iffs dc_def[symmetric] - Collect_False + apply (simp add: ccorres_cond_iffs Collect_False del: Collect_const cong: call_ignore_cong) apply (rule ccorres_cond_false) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] when_def - Collect_True liftM_def + apply (simp add: when_def Collect_True liftM_def cong: conj_cong call_ignore_cong del: Collect_const) apply (rule ccorres_pre_getObject_asidpool) @@ -1236,8 +1341,7 @@ lemma deleteASID_ccorres: apply (simp add: asid_low_bits_def Kernel_C.asidLowBits_def mask_def word_and_le1) apply (drule sym, simp) - apply (simp add: option_to_ptr_def option_to_0_def - from_bool_def inv_ASIDPool + apply (simp add: option_to_ptr_def option_to_0_def inv_ASIDPool split: option.split if_split bool.split) apply ceqv apply (rule ccorres_cond2[where R=\]) @@ -1310,7 +1414,7 @@ lemma deleteASID_ccorres: lemma setObject_ccorres_lemma: fixes val :: "'a :: pspace_storable" shows - "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := ksPSpace s (ptr \ injectKO val) \, s') \ rf_sr},{}; + "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val) \, s') \ rf_sr},{}; \s s' val (val' :: 'a). \ ko_at' val' ptr s; (s, s') \ rf_sr \ \ s' \ Q s; \val :: 'a. updateObject val = updateObject_default val; @@ -1333,7 +1437,7 @@ lemma setObject_ccorres_lemma: apply (subgoal_tac "fst (setObject ptr val \) = {}") apply simp apply (erule notE, erule_tac s=\ in empty_failD[rotated]) - apply (simp add: setObject_def split_def) + apply (simp add: setObject_def split_def empty_fail_cond) apply (rule ccontr) apply (clarsimp elim!: nonemptyE) apply (frule use_valid [OF _ obj_at_setObject3[where P=\]], simp_all)[1] @@ -1419,8 +1523,8 @@ lemma pageTableMapped_pd: \\rv s. case rv of Some x \ page_directory_at' x s | _ \ True\" apply (simp add: pageTableMapped_def) apply (rule hoare_pre) - apply (wp getPDE_wp hoare_vcg_all_lift_R | wpc)+ - apply (rule hoare_post_imp_R, rule findPDForASID_page_directory_at'_simple) + apply (wp getPDE_wp hoare_vcg_all_liftE_R | wpc)+ + apply (rule hoare_strengthen_postE_R, rule findPDForASID_page_directory_at'_simple) apply (clarsimp split: if_split) apply simp done @@ -1434,7 +1538,7 @@ lemma unmapPageTable_ccorres: apply (ctac(no_vcg) add: pageTableMapped_ccorres) apply wpc apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -1444,7 +1548,6 @@ lemma unmapPageTable_ccorres: apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) - apply (fold dc_def) apply csymbr apply (ctac add: cleanByVA_PoU_ccorres) apply (ctac(no_vcg) add:flushTable_ccorres) @@ -1481,12 +1584,6 @@ lemma no_0_pd_at'[elim!]: apply (drule spec[where x=0], clarsimp) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1507,7 +1604,7 @@ lemma isFinalCapability_ccorres: apply (simp add: mdbPrev_to_H[symmetric]) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (simp add: return_def from_bool_def false_def) + apply (simp add: return_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_l[OF _ getCTE_inv getCTE_wp empty_fail_getCTE]) apply (rule_tac P="cte_wp_at' ((=) cte) slot @@ -1546,10 +1643,9 @@ lemma isFinalCapability_ccorres: apply (rule cmap_relationE1 [OF cmap_relation_cte], assumption+, simp?, simp add: typ_heap_simps)+ apply (drule ccte_relation_ccap_relation)+ - apply (auto simp: false_def true_def from_bool_def split: bool.splits)[1] + apply (auto simp: from_bool_def split: bool.splits)[1] apply (wp getCTE_wp') - apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem false_def - from_bool_0 true_def from_bool_def) + apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -1584,7 +1680,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1595,25 +1691,23 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] - del: Collect_const) + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) apply (strengthen invs_mdb_strengthen' invs_urz) apply (wp typ_at_lifts isFinalCapability_inv | strengthen invs_valid_objs')+ - apply (clarsimp simp: from_bool_def true_def irq_opt_relation_def - invs_pspace_aligned' cte_wp_at_ctes_of) + apply (clarsimp simp: irq_opt_relation_def invs_pspace_aligned' cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (clarsimp simp: typ_heap_simps ccte_relation_ccap_relation ccap_relation_NullCap_iff) apply (wp isFinalCapability_inv) apply simp - apply (simp del: Collect_const add: false_def) + apply (simp del: Collect_const) apply (rule ccorres_return_Skip) apply (clarsimp simp: Collect_const_mem cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) @@ -1638,7 +1732,7 @@ lemma deletingIRQHandler_ccorres: (UNIV \ {s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) @@ -1694,8 +1788,6 @@ lemma irq_opt_relation_Some_ucast: apply (simp only: unat_arith_simps) by (clarsimp simp: word_le_nat_alt Kernel_C.maxIRQ_def) -lemmas upcast_ucast_id = More_Word.ucast_up_inj - lemma irq_opt_relation_Some_ucast': "\ x && mask 10 = x; ucast x \ (ucast Kernel_C.maxIRQ :: 10 word) \ x \ (ucast Kernel_C.maxIRQ :: machine_word) \ \ irq_opt_relation (Some (ucast x)) (ucast x)" @@ -1732,7 +1824,7 @@ lemma option_to_ctcb_ptr_not_0: done lemma update_tcb_map_to_tcb: - "map_to_tcbs (ksPSpace s(p \ KOTCB tcb)) + "map_to_tcbs ((ksPSpace s)(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) @@ -1753,26 +1845,9 @@ lemma ep_queue_relation_shift2: apply (clarsimp split: option.split_asm) done -lemma sched_queue_relation_shift: - "(option_map2 tcbSchedNext_C (f (cslift s)) - = option_map2 tcbSchedNext_C (cslift s) - \ option_map2 tcbSchedPrev_C (f (cslift s)) - = option_map2 tcbSchedPrev_C (cslift s)) - \ sched_queue_relation (f (cslift s)) ts qPrev qHead - = sched_queue_relation (cslift s) ts qPrev qHead" - apply clarsimp - apply (induct ts arbitrary: qPrev qHead) - apply simp - apply simp - apply (simp add: option_map2_def fun_eq_iff - map_option_case) - apply (drule_tac x=qHead in spec)+ - apply (clarsimp split: option.split_asm) - done - lemma cendpoint_relation_udpate_arch: "\ cslift x p = Some tcb ; cendpoint_relation (cslift x) v v' \ - \ cendpoint_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cendpoint_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def split: endpoint.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1783,7 +1858,7 @@ lemma cendpoint_relation_udpate_arch: lemma cnotification_relation_udpate_arch: "\ cslift x p = Some tcb ; cnotification_relation (cslift x) v v' \ - \ cnotification_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cnotification_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cnotification_relation_def Let_def tcb_queue_relation'_def split: notification.splits ntfn.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1816,18 +1891,9 @@ lemma archThreadSet_tcbVCPU_Basic_ccorres: apply clarsimp apply (rule cmap_relation_rel_upd[OF _ cendpoint_relation_udpate_arch], simp+) apply (rule cmap_relation_rel_upd[OF _ cnotification_relation_udpate_arch], simp+) - apply (clarsimp simp add: cready_queues_relation_def Let_def tcb_queue_relation'_def) - apply (subst sched_queue_relation_shift; simp add: fun_eq_iff) - apply (safe ; case_tac "xa = tcb_ptr_to_ctcb_ptr tptr" ; clarsimp simp: option_map2_def map_option_case) - apply (clarsimp simp: cvariable_relation_upd_const) + apply (clarsimp simp: cvariable_relation_upd_const) done -(* MOVE *) -lemma update_vcpu_map_to_vcpu: - "map_to_vcpus (ksPSpace s(p \ KOArch (KOVCPU vcpu))) - = (map_to_vcpus (ksPSpace s))(p \ vcpu)" - by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) - lemma setObject_vcpuTCB_updated_Basic_ccorres: "ccorres dc xfdc (ko_at' (vcpuTCBPtr_update t vcpu) vcpuptr) UNIV hs (setObject vcpuptr (vcpuTCBPtr_update (\_. tptr) vcpu)) @@ -1896,11 +1962,11 @@ lemma armHSCurVCPU_update_active_false_ccorres: apply (clarsimp simp: modifyArchState_def) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: bind_def simpler_gets_def from_bool_def simpler_modify_def) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (clarsimp simp: cmachine_state_relation_def from_bool_def) + apply (clarsimp simp: cmachine_state_relation_def) apply (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def) - apply (case_tac "armHSCurVCPU (ksArchState \)"; clarsimp simp: from_bool_def false_def) + apply (case_tac "armHSCurVCPU (ksArchState \)"; clarsimp) done lemma armHSCurVCPU_update_curv_Null_ccorres: @@ -1913,15 +1979,14 @@ lemma armHSCurVCPU_update_curv_Null_ccorres: apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) apply (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def - cmachine_state_relation_def from_bool_def true_def false_def - split: bool.split option.splits) + cmachine_state_relation_def + split: bool.split option.splits) done lemma vcpuInvalidateActive_ccorres: "ccorres dc xfdc invs' UNIV hs vcpuInvalidateActive (Call vcpu_invalidate_active_'proc)" - supply dc_simp[simp del] apply cinit apply (rule ccorres_pre_getCurVCPU) apply (subst modify_armHSCurVCPU_when_split) @@ -1934,8 +1999,8 @@ lemma vcpuInvalidateActive_ccorres: apply clarsimp apply (frule rf_sr_ksArchState_armHSCurVCPU) apply (case_tac "\ t. (armHSCurVCPU \ ksArchState) s = Some t \ snd t") - apply (clarsimp simp: cur_vcpu_relation_def true_def) - apply (clarsimp simp: cur_vcpu_relation_def true_def) + apply (clarsimp simp: cur_vcpu_relation_def) + apply (clarsimp simp: cur_vcpu_relation_def) apply (case_tac "(armHSCurVCPU \ ksArchState) s"; clarsimp) apply (rule_tac a=" _ >>= (\_. when (hsCurVCPU \ None \ snd (the hsCurVCPU)) (modifyArchState(armHSCurVCPU_update @@ -1979,7 +2044,7 @@ lemma sanitiseSetRegister_ccorres: UNIV hs (asUser tptr (setRegister reg (local.sanitiseRegister False reg val))) - (\unsigned_long_eret_2 :== CALL sanitiseRegister(reg',val',0);; + (\unsigned_long_eret_2 :== CALL sanitiseRegister(reg',val',scast false);; CALL setRegister(tcb_ptr_to_ctcb_ptr tptr,reg',\unsigned_long_eret_2))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) @@ -1995,7 +2060,6 @@ lemma dissociateVCPUTCB_ccorres: (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr } \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs (dissociateVCPUTCB vcpuptr tptr) (Call dissociateVCPUTCB_'proc)" - supply dc_simp[simp del] apply (cinit lift: tcb_' vcpu_') apply (rule ccorres_pre_archThreadGet, rename_tac tcbVCPU) apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) @@ -2033,7 +2097,7 @@ lemma dissociateVCPUTCB_ccorres: apply (case_tac "tcbVCPU = Some vcpuptr \ vcpuTCBPtr vcpu \ Some tptr") apply simp apply (rule ccorres_fail') - apply (simp add: false_def) + apply simp apply ccorres_rewrite apply (rule ccorres_pre_getCurVCPU) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc]) @@ -2102,7 +2166,6 @@ lemma associateVCPUTCB_ccorres: (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr } \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs (associateVCPUTCB vcpuptr tptr) (Call associateVCPUTCB_'proc)" - supply dc_simp[simp del] apply (cinit lift: tcb_' vcpu_') apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_pre_archThreadGet, rename_tac tcbVCPU) @@ -2199,7 +2262,6 @@ lemma vcpuFinalise_ccorres: "ccorres dc xfdc (invs' and vcpu_at' vcpuptr) ({s. vcpu_' s = Ptr vcpuptr}) [] (vcpuFinalise vcpuptr) (Call vcpu_finalise_'proc)" - supply dc_simp[simp del] apply (cinit lift: vcpu_') apply (rule ccorres_move_c_guard_vcpu) apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) @@ -2252,7 +2314,7 @@ method return_NullCap_pair_ccorres = (rule allI, rule conseqPre, vcg), (clarsimp simp: return_def ccap_relation_NullCap_iff)\ lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -2405,7 +2467,7 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_directory_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def asid_bits_def split: if_split_asm) apply simp @@ -2448,9 +2510,8 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_table_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_table_cap_lift_def - asid_bits_def - split: if_split_asm) + cap_page_table_cap_lift_def asid_bits_def + split: if_split_asm) apply simp apply return_NullCap_pair_ccorres apply (clarsimp simp: isCap_simps) @@ -2624,7 +2685,7 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_directory_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def asid_bits_def split: if_split_asm) apply (frule cap_get_tag_isCap_unfolded_H_cap) @@ -2648,7 +2709,6 @@ lemma prepareThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (prepareThreadDelete thread) (Call Arch_prepareThreadDelete_'proc)" - supply dc_simp[simp del] apply (cinit lift: thread_', rename_tac cthread) apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_pre_archThreadGet, rename_tac vcpuopt) @@ -2719,7 +2779,7 @@ lemma finaliseCap_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (simp, rule ccorres_fail) - apply (simp add: from_bool_0 Collect_True Collect_False false_def + apply (simp add: from_bool_0 Collect_True Collect_False del: Collect_const) apply csymbr apply (simp add: cap_get_tag_isCap Collect_False Collect_True @@ -2804,7 +2864,7 @@ lemma finaliseCap_ccorres: apply (simp add: isArchCap_T_isArchObjectCap[symmetric] del: Collect_const) apply (rule ccorres_if_lhs) - apply (simp add: Collect_False Collect_True Let_def true_def + apply (simp add: Collect_False Collect_True Let_def del: Collect_const) apply (rule_tac P="(capIRQ cap) \ ARM_HYP.maxIRQ" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc)+ @@ -2824,18 +2884,18 @@ lemma finaliseCap_ccorres: apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -2844,8 +2904,7 @@ lemma finaliseCap_ccorres: irq_opt_relation_def) apply wp apply (simp add: guard_is_UNIV_def) - apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem - false_def from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem) apply (intro impI conjI) apply (clarsimp split: bool.splits) apply (clarsimp split: bool.splits) @@ -2862,7 +2921,7 @@ lemma finaliseCap_ccorres: split: option.splits cap_CL.splits if_splits) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) - apply (clarsimp simp: isCap_simps from_bool_def false_def) + apply (clarsimp simp: isCap_simps) apply (clarsimp simp: tcb_cnode_index_defs ptr_add_assertion_def) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) @@ -2899,7 +2958,6 @@ lemma Arch_checkIRQ_ccorres: length_ineq_not_Nil hd_conv_nth cast_simps del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: throwError_bind) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg apply (rule conseqPre, vcg) diff --git a/proof/crefine/ARM_HYP/Interrupt_C.thy b/proof/crefine/ARM_HYP/Interrupt_C.thy index f65eb90e14..1befc2efcc 100644 --- a/proof/crefine/ARM_HYP/Interrupt_C.thy +++ b/proof/crefine/ARM_HYP/Interrupt_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,7 +17,7 @@ lemma invokeIRQHandler_AckIRQ_ccorres: (InterruptDecls_H.invokeIRQHandler (AckIRQ irq)) (Call invokeIRQHandler_AckIRQ_'proc)" apply (cinit lift: irq_' simp: Interrupt_H.invokeIRQHandler_def invokeIRQHandler_def) apply (ctac add: maskInterrupt_ccorres) - apply (simp add: from_bool_def false_def) + apply simp done lemma getIRQSlot_ccorres: @@ -74,12 +75,12 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply simp apply simp apply simp apply (simp add: pred_conj_def) - apply (strengthen ntfn_badge_derived_enough_strg[unfolded o_def] + apply (strengthen ntfn_badge_derived_enough_strg invs_mdb_strengthen' valid_objs_invs'_strg) apply (wp cteDeleteOne_other_cap[unfolded o_def])[1] apply vcg @@ -111,7 +112,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply simp apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -228,7 +229,7 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (clarsimp simp: Collect_const_mem neq_Nil_conv dest!: interpret_excaps_eq) apply (simp add: rf_sr_ksCurThread if_1_0_0 mask_def[where n=4] - "StrictC'_thread_state_defs" cap_get_tag_isCap excaps_map_def + ThreadState_defs cap_get_tag_isCap excaps_map_def word_sless_def word_sle_def) apply (simp add: invocationCatch_def throwError_bind interpret_excaps_test_null Collect_True @@ -256,24 +257,23 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (simp add: syscall_error_to_H_cases) apply simp apply (clarsimp simp: Collect_const_mem tcb_at_invs') - apply (clarsimp simp: invs_queues invs_valid_objs' + apply (clarsimp simp: invs_valid_objs' ct_in_state'_def ccap_rights_relation_def - mask_def[where n=4] - "StrictC'_thread_state_defs") + mask_def[where n=4] ThreadState_defs) apply (subst pred_tcb'_weakenE, assumption, fastforce)+ apply (clarsimp simp: rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_n_def word_less_nat_alt) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits) apply (intro conjI impI allI) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits)+ - apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[4] + apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[6] apply (drule ctes_of_valid') apply fastforce apply (clarsimp simp add:valid_cap_simps' ARM_HYP.maxIRQ_def) @@ -558,8 +558,7 @@ lemma Arch_decodeIRQControlInvocation_ccorres: apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: interpret_excaps_test_null excaps_map_def - Collect_const_mem word_sless_def word_sle_def - ThreadState_Restart_def unat_of_nat mask_def) + Collect_const_mem word_sless_def word_sle_def unat_of_nat mask_def) apply (rule conjI) apply (simp add: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def unat_ucast) apply (cut_tac unat_lt2p[where x="args ! 3"]) @@ -575,14 +574,14 @@ lemma Arch_decodeIRQControlInvocation_ccorres: apply (clarsimp simp: neq_Nil_conv numeral_eqs[symmetric] word_sle_def word_sless_def) apply (drule interpret_excaps_eq[rule_format, where n=0], simp) - apply (clarsimp simp: mask_def[where n=4] "StrictC'_thread_state_defs" + apply (clarsimp simp: mask_def[where n=4] ThreadState_defs rf_sr_ksCurThread ccap_rights_relation_def rightsFromWord_wordFromRights) apply (simp cong: conj_cong) apply (clarsimp simp: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def ucast_ucast_mask mask_eq_ucast_eq unat_ucast_mask less_mask_eq[unfolded word_less_nat_alt]) - apply (cases "args ! Suc 0 = 0"; clarsimp simp: true_def false_def) + apply (cases "args ! Suc 0 = 0"; clarsimp) done lemma decodeIRQControlInvocation_ccorres: @@ -732,7 +731,7 @@ lemma decodeIRQControlInvocation_ccorres: apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: interpret_excaps_test_null excaps_map_def Collect_const_mem word_sless_def word_sle_def - ThreadState_Restart_def unat_of_nat mask_def) + unat_of_nat mask_def) apply (rule conjI) apply (simp add: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def unat_ucast) @@ -749,7 +748,7 @@ lemma decodeIRQControlInvocation_ccorres: apply (clarsimp simp: neq_Nil_conv numeral_eqs[symmetric] word_sle_def word_sless_def) apply (drule interpret_excaps_eq[rule_format, where n=0], simp) - apply (clarsimp simp: mask_def[where n=4] "StrictC'_thread_state_defs" + apply (clarsimp simp: mask_def[where n=4] ThreadState_defs rf_sr_ksCurThread ccap_rights_relation_def rightsFromWord_wordFromRights) apply (simp cong: conj_cong) diff --git a/proof/crefine/ARM_HYP/Invoke_C.thy b/proof/crefine/ARM_HYP/Invoke_C.thy index 415585664a..04fcfb1b39 100644 --- a/proof/crefine/ARM_HYP/Invoke_C.thy +++ b/proof/crefine/ARM_HYP/Invoke_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -63,11 +64,11 @@ lemma setDomain_ccorres: apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) - apply (simp add: when_def to_bool_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (simp add: when_def) + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -75,15 +76,17 @@ lemma setDomain_ccorres: apply simp apply wp apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s)" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp threadSet_all_invs_but_sch_extra) - apply (clarsimp simp:valid_pspace_valid_objs' st_tcb_at_def[symmetric] - sch_act_simple_def st_tcb_at'_def o_def weak_sch_act_wf_def split:if_splits) + apply (fastforce simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) apply (simp add: guard_is_UNIV_def) - apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" in hoare_strengthen_post) + apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued - tcbSchedDequeue_not_in_queue hoare_vcg_imp_lift hoare_vcg_all_lift) + hoare_vcg_imp_lift hoare_vcg_all_lift) apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) apply (fastforce simp: valid_tcb'_def tcb_cte_cases_def invs'_def valid_state'_def valid_pspace'_def) @@ -191,10 +194,10 @@ lemma decodeDomainInvocation_ccorres: apply clarsimp apply (vcg exspec=getSyscallArg_modifies) - apply (clarsimp simp: valid_tcb_state'_def invs_valid_queues' invs_valid_objs' - invs_queues invs_sch_act_wf' ct_in_state'_def pred_tcb_at' + apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' + invs_sch_act_wf' ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_to_n - mask_eq_iff_w2p mask_eq_iff_w2p word_size "StrictC'_thread_state_defs") + mask_eq_iff_w2p mask_eq_iff_w2p word_size ThreadState_defs) apply (rule conjI) apply (clarsimp simp: linorder_not_le isCap_simps) apply (rule conjI, clarsimp simp: unat32_eq_of_nat) @@ -202,7 +205,7 @@ lemma decodeDomainInvocation_ccorres: apply (drule_tac x="extraCaps ! 0" and P="\v. valid_cap' (fst v) s" in bspec) apply (clarsimp simp: nth_mem interpret_excaps_test_null excaps_map_def) apply (clarsimp simp: valid_cap_simps' pred_tcb'_weakenE active_runnable') - apply (rule conjI) + apply (intro conjI; fastforce?) apply (fastforce simp: tcb_st_refs_of'_def elim:pred_tcb'_weakenE) apply (simp add: word_le_nat_alt unat_ucast unat_numDomains_to_H le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ccap_relation_def cap_to_H_simps cap_thread_cap_lift) @@ -227,7 +230,7 @@ lemma invokeCNodeDelete_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteDelete_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -247,7 +250,7 @@ lemma invokeCNodeRevoke_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteRevoke_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -387,7 +390,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -555,12 +558,10 @@ lemma hasCancelSendRights_spec: apply clarsimp apply (drule sym, drule (1) cap_get_tag_to_H) apply (clarsimp simp: hasCancelSendRights_def to_bool_def - true_def false_def split: if_split bool.splits) apply (rule impI) apply (case_tac cap, - auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - from_bool_def false_def true_def hasCancelSendRights_def + auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs hasCancelSendRights_def dest: cap_get_tag_isArchCap_unfolded_H_cap split: capability.splits bool.splits)[1] done @@ -638,8 +639,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler[symmetric, unfolded o_def] - dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -721,8 +721,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -749,13 +748,12 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr performInvocation_def - bindE_assoc false_def) + bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeInsert_ccorres) @@ -768,16 +766,16 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply simp apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper[OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def - valid_tcb_state'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def + valid_tcb_state'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply wp @@ -826,12 +824,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk - ccorres_invocationCatch_Inr false_def + ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) @@ -845,15 +842,15 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply (simp add: conj_comms valid_tcb_state'_def) apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper [OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply (simp add: Collect_const_mem) @@ -887,7 +884,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply csymbr apply csymbr - apply (simp add: cap_get_tag_NullCap true_def) + apply (simp add: cap_get_tag_NullCap) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) @@ -906,7 +903,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -925,15 +922,14 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr numeral_eqs performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) - apply (simp add: true_def ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) apply (rule ccorres_alternative2) apply (rule ccorres_return_CE, simp+)[1] @@ -961,14 +957,16 @@ lemma decodeCNodeInvocation_ccorres: apply (rule validE_R_validE) apply (rule_tac Q'="\a b. cte_wp_at' (\x. True) a b \ invs' b \ tcb_at' thread b \ sch_act_wf (ksSchedulerAction b) b \ valid_tcb_state' Restart b - \ Q2 b" for Q2 in hoare_post_imp_R) - prefer 2 - apply (clarsimp simp:cte_wp_at_ctes_of) - apply (drule ctes_of_valid') - apply (erule invs_valid_objs') - apply (clarsimp simp:valid_updateCapDataI invs_queues invs_valid_objs' invs_valid_pspace') - apply (assumption) - apply (wp hoare_vcg_all_lift_R injection_wp_E[OF refl] + \ Q2 b" for Q2 in hoare_strengthen_postE_R) + prefer 2 + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule ctes_of_valid') + apply (erule invs_valid_objs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (clarsimp simp:valid_updateCapDataI invs_valid_objs' invs_valid_pspace') + apply assumption + apply (wp hoare_vcg_all_liftE_R injection_wp_E[OF refl] lsfco_cte_at' hoare_vcg_const_imp_lift_R )+ apply (simp add: Collect_const_mem word_sle_def word_sless_def @@ -1025,13 +1023,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_returnOk bindE_assoc - injection_bindE[OF refl refl] split_def - dc_def[symmetric]) + injection_bindE[OF refl refl] split_def) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc)+ apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) - apply (simp add: ccorres_invocationCatch_Inr performInvocation_def - dc_def[symmetric] bindE_assoc) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) apply (rule ccorres_alternative2) @@ -1040,7 +1036,7 @@ lemma decodeCNodeInvocation_ccorres: apply (wp sts_valid_pspace_hangers)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_split_throws) apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg @@ -1070,8 +1066,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1115,12 +1110,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1180,8 +1173,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1243,8 +1235,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1252,8 +1243,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1267,7 +1257,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1306,7 +1296,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule_tac Q'="\rvb. invs' and cte_at' rv and cte_at' rva and tcb_at' thread" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of weak_derived_updateCapData capBadge_updateCapData_True) @@ -1331,16 +1321,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1355,7 +1345,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_liftE_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1370,7 +1360,7 @@ lemma decodeCNodeInvocation_ccorres: apply simp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' invs_valid_pspace' - ct_in_state'_def pred_tcb_at' invs_queues + ct_in_state'_def pred_tcb_at' cur_tcb'_def word_sle_def word_sless_def unat_lt2p[where 'a=32, folded word_bits_def]) apply (rule conjI) @@ -1384,7 +1374,7 @@ lemma decodeCNodeInvocation_ccorres: apply (frule interpret_excaps_eq) apply (clarsimp simp: excaps_map_def mask_def[where n=4] ccap_rights_relation_def rightsFromWord_wordFromRights - "StrictC'_thread_state_defs" map_comp_Some_iff + ThreadState_defs map_comp_Some_iff rf_sr_ksCurThread hd_conv_nth hd_drop_conv_nth) apply ((rule conjI | clarsimp simp: rightsFromWord_wordFromRights @@ -1393,8 +1383,7 @@ lemma decodeCNodeInvocation_ccorres: map_option_Some_eq2 neq_Nil_conv ccap_relation_def numeral_eqs hasCancelSendRights_not_Null ccap_relation_NullCap_iff[symmetric] - if_1_0_0 interpret_excaps_test_null - mdbRevocable_CL_cte_to_H false_def true_def + interpret_excaps_test_null mdbRevocable_CL_cte_to_H | clarsimp simp: typ_heap_simps' | frule length_ineq_not_Nil)+) done @@ -1403,9 +1392,6 @@ end context begin interpretation Arch . (*FIXME: arch_split*) -crunch valid_queues[wp]: insertNewCap "valid_queues" - (wp: crunch_wps) - lemmas setCTE_def3 = setCTE_def2[THEN eq_reflection] lemma setCTE_sch_act_wf[wp]: @@ -1475,7 +1461,7 @@ lemma seL4_MessageInfo_lift_def2: lemma globals_update_id: "globals_update (t_hrs_'_update (hrs_htd_update id)) x = x" - by (simp add:id_def hrs_htd_update_def) + by (simp add: hrs_htd_update_def) lemma getObjectSize_spec: "\s. \\\s. \t \ of_nat (length (enum::object_type list) - 1)\ Call getObjectSize_'proc @@ -1532,7 +1518,7 @@ shows "\ctes_of (s::kernel_state) (ptr_val p) = Some cte; is_aligned ptr bits; bits < word_bits; {ptr..ptr + 2 ^ bits - 1} \ {ptr_val p..ptr_val p + mask cteSizeBits} = {}; ((clift hp) :: (cte_C ptr \ cte_C)) p = Some to\ \ (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: (cte_C ptr \ cte_C)) p = Some to" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def restrict_map_def split:if_splits) + apply (clarsimp simp:lift_t_def lift_typ_heap_def restrict_map_def split:if_splits) apply (intro conjI impI) apply (case_tac hp) apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) @@ -1671,15 +1657,6 @@ lemma pspace_no_overlap_underlying_zero_update: apply blast done -lemma addrFromPPtr_mask: - "n \ 28 - \ addrFromPPtr ptr && mask n = ptr && mask n" - apply (simp add: addrFromPPtr_def pptrBaseOffset_def pptrBase_def - ARM_HYP.physBase_def) - apply word_bitwise - apply simp - done - lemma clearMemory_untyped_ccorres: "ccorres dc xfdc ((\s. invs' s \ (\cap. cte_wp_at' (\cte. cteCap cte = cap) ut_slot s @@ -1726,7 +1703,7 @@ lemma clearMemory_untyped_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply csymbr - apply (ctac add: cleanCacheRange_RAM_ccorres[unfolded dc_def]) + apply (ctac add: cleanCacheRange_RAM_ccorres) apply wp apply (simp add: guard_is_UNIV_def unat_of_nat word_bits_def capAligned_def word_of_nat_less) @@ -1931,8 +1908,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2428,7 +2404,7 @@ lemma invokeUntyped_Retype_ccorres: (Call invokeUntyped_Retype_'proc)" apply (cinit lift: retypeBase_' srcSlot_' reset_' newType_' userSize_' deviceMemory_' destCNode_' destOffset_' destLength_' - simp: when_def) + simp: when_def archOverlap_def) apply (rule ccorres_move_c_guard_cte) apply csymbr apply (rule ccorres_abstract_cleanup) @@ -2497,7 +2473,7 @@ lemma invokeUntyped_Retype_ccorres: apply (clarsimp simp: misc unat_of_nat_eq[OF range_cover.weak, OF cover]) apply (vcg exspec=cap_untyped_cap_ptr_set_capFreeIndex_modifies) apply simp - apply (rule validE_validE_R, rule hoare_post_impErr, + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule hoare_vcg_conj_liftE1[rotated, where Q="\_ s. case gsCNodes s cnodeptr of None \ False | Some n \ length destSlots + unat start \ 2 ^ n"], @@ -2684,7 +2660,7 @@ lemma mapME_ensureEmptySlot': apply (erule meta_allE) apply wp apply (fold validE_R_def) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -2693,7 +2669,7 @@ lemma mapME_ensureEmptySlot: mapME (\x. injection_handler Inl (ensureEmptySlot (f x))) [S .e. (E::word32)] \\rva s. \slot. S \ slot \ slot \ E \ (\cte. cteCap cte = capability.NullCap \ ctes_of s (f slot) = Some cte)\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule mapME_ensureEmptySlot') apply clarsimp done @@ -2863,7 +2839,6 @@ lemma Arch_isFrameType_spec: apply (auto simp: object_type_from_H_def ) done - lemma decodeUntypedInvocation_ccorres_helper: notes TripleSuc[simp] untypedBits_defs[simp] notes valid_untyped_inv_wcap'.simps[simp del] tl_drop_1[simp] @@ -3042,8 +3017,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (ctac add: ccorres_injection_handler_csum1 [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -3155,8 +3130,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (simp add: ccorres_cond_iffs returnOk_def) apply (rule ccorres_return_Skip') apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ - apply (simp add: ccorres_cond_iffs inl_rrel_inl_rrel) - apply (rule ccorres_return_C_errorE_inl_rrel, simp+)[1] + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C_errorE_inl_rrel; simp) apply wp apply (simp add: all_ex_eq_helper) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3253,8 +3228,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) apply (rule ccorres_alternative2) @@ -3302,7 +3276,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule conseqPre,vcg,clarsimp) apply vcg apply (rule ccorres_guard_imp[where Q =\ and Q' = UNIV,rotated], assumption+) - apply (simp add: o_def) + apply simp apply simp apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3320,8 +3294,7 @@ lemma decodeUntypedInvocation_ccorres_helper: unat_of_nat_APIType_capBits word_size length_ineq_not_Nil not_less word_le_nat_alt isCap_simps valid_cap_simps') apply (strengthen word_of_nat_less) - apply (clarsimp simp: StrictC'_thread_state_defs mask_def true_def false_def - from_bool_0 ccap_relation_isDeviceCap2 + apply (clarsimp simp: ThreadState_defs mask_def ccap_relation_isDeviceCap2 split: if_split) apply (intro conjI impI; clarsimp simp: not_less shiftr_eq_0 unat_of_nat_APIType_capBits @@ -3333,10 +3306,9 @@ lemma decodeUntypedInvocation_ccorres_helper: and ex_cte_cap_to' (capCNodePtr rv) and (\s. case gsCNodes s (capCNodePtr rv) of None \ False | Some n \ args ! 4 + args ! 5 - 1 < 2 ^ n) - and sch_act_simple and ct_active'" in hoare_post_imp_R) + and sch_act_simple and ct_active'" in hoare_strengthen_postE_R) prefer 2 - apply (clarsimp simp: invs_valid_objs' invs_mdb' - invs_queues ct_in_state'_def pred_tcb_at') + apply (clarsimp simp: invs_valid_objs' invs_mdb' ct_in_state'_def pred_tcb_at') apply (subgoal_tac "ksCurThread s \ ksIdleThread sa") prefer 2 apply clarsimp @@ -3367,7 +3339,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3379,7 +3351,7 @@ lemma decodeUntypedInvocation_ccorres_helper: \ invs' s \ ksCurThread s = thread \ valid_cap' r s \ (\rf\cte_refs' r (irq_node' s). ex_cte_cap_to' rf s) - \ sch_act_simple s \ ct_active' s" in hoare_post_imp_R) + \ sch_act_simple s \ ct_active' s" in hoare_strengthen_postE_R) apply clarsimp apply (wp injection_wp_E[OF refl] getSlotCap_cap_to' getSlotCap_capAligned @@ -3423,8 +3395,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (clarsimp simp: hd_drop_conv_nth2 hd_conv_nth neq_Nil_lengthI ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread mask_eq_iff_w2p - "StrictC'_thread_state_defs" numeral_eqs[symmetric] - cap_get_tag_isCap cte_wp_at_ctes_of + numeral_eqs[symmetric] cap_get_tag_isCap cte_wp_at_ctes_of unat_eq_0 ccHoarePost_def) apply (rule conjI) apply (clarsimp simp: linorder_not_less isCap_simps) @@ -3495,18 +3466,16 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption - apply (rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) - apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P=x in monadic_rewrite_gen_asm) - apply simp + apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) + apply (monadic_rewrite_r monadic_rewrite_if_r_True) + apply (monadic_rewrite_r_method monadic_rewrite_symb_exec_r_drop wpsimp) apply (rule monadic_rewrite_refl) - apply (wp | simp)+ - apply (simp add: gets_bind_ign) + apply wpsimp + apply (rule monadic_rewrite_refl) apply (rule monadic_rewrite_refl) apply (clarsimp simp: ex_cte_cap_wp_to'_def excaps_in_mem_def) apply (drule(1) bspec)+ diff --git a/proof/crefine/ARM_HYP/IpcCancel_C.thy b/proof/crefine/ARM_HYP/IpcCancel_C.thy index 5623d522e7..78d4b3fe8b 100644 --- a/proof/crefine/ARM_HYP/IpcCancel_C.thy +++ b/proof/crefine/ARM_HYP/IpcCancel_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -14,12 +15,12 @@ begin declare ctcb_size_bits_ge_4[simp] lemma cready_queues_index_to_C_in_range': - assumes prems: "qdom \ ucast maxDom" "prio \ ucast maxPrio" + assumes prems: "qdom \ maxDomain" "prio \ maxPriority" shows "cready_queues_index_to_C qdom prio < num_tcb_queues" proof - have P: "unat prio < numPriorities" using prems - by (simp add: numPriorities_def seL4_MaxPrio_def Suc_le_lessD unat_le_helper) + by (simp add: numPriorities_def Suc_le_lessD unat_le_helper maxDomain_def maxPriority_def) have Q: "unat qdom < numDomains" using prems by (simp add: maxDom_to_H le_maxDomain_eq_less_numDomains word_le_nat_alt) @@ -29,60 +30,22 @@ proof - qed lemmas cready_queues_index_to_C_in_range = - cready_queues_index_to_C_in_range'[simplified num_tcb_queues_def] + cready_queues_index_to_C_in_range'[simplified num_tcb_queues_val] lemma cready_queues_index_to_C_inj: "\ cready_queues_index_to_C qdom prio = cready_queues_index_to_C qdom' prio'; - prio \ ucast maxPrio; prio' \ ucast maxPrio \ \ prio = prio' \ qdom = qdom'" + prio \ maxPriority; prio' \ maxPriority \ \ prio = prio' \ qdom = qdom'" apply (rule context_conjI) - apply (auto simp: cready_queues_index_to_C_def numPriorities_def + apply (auto simp: cready_queues_index_to_C_def numPriorities_def maxPriority_def seL4_MaxPrio_def word_le_nat_alt dest: arg_cong[where f="\x. x mod 256"]) done lemma cready_queues_index_to_C_distinct: - "\ qdom = qdom' \ prio \ prio'; prio \ ucast maxPrio; prio' \ ucast maxPrio \ + "\ qdom = qdom' \ prio \ prio'; prio \ maxPriority; prio' \ maxPriority \ \ cready_queues_index_to_C qdom prio \ cready_queues_index_to_C qdom' prio'" apply (auto simp: cready_queues_index_to_C_inj) done -lemma cstate_relation_ksReadyQueues_update: - "\ cstate_relation hs cs; arr = ksReadyQueues_' cs; - sched_queue_relation' (clift (t_hrs_' cs)) v (head_C v') (end_C v'); - qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ cstate_relation (ksReadyQueues_update (\qs. qs ((qdom, prio) := v)) hs) - (ksReadyQueues_'_update (\_. Arrays.update arr - (cready_queues_index_to_C qdom prio) v') cs)" - apply (clarsimp simp: cstate_relation_def Let_def - cmachine_state_relation_def - carch_state_relation_def carch_globals_def - cready_queues_relation_def seL4_MinPrio_def minDom_def) - apply (frule cready_queues_index_to_C_in_range, assumption) - apply clarsimp - apply (frule_tac qdom=qdoma and prio=prioa in cready_queues_index_to_C_in_range, assumption) - apply (frule cready_queues_index_to_C_distinct, assumption+) - apply clarsimp - done - -lemma cmap_relation_drop_fun_upd: - "\ cm x = Some v; \v''. rel v'' v = rel v'' v' \ - \ cmap_relation am (cm (x \ v')) f rel - = cmap_relation am cm f rel" - apply (simp add: cmap_relation_def) - apply (rule conj_cong[OF refl]) - apply (rule ball_cong[OF refl]) - apply (auto split: if_split) - done - -lemma valid_queuesD': - "\ obj_at' (inQ d p) t s; valid_queues' s \ - \ t \ set (ksReadyQueues s (d, p))" - by (simp add: valid_queues'_def) - -lemma invs_valid_queues'[elim!]: - "invs' s \ valid_queues' s" - by (simp add: invs'_def valid_state'_def) - - lemma ntfn_ptr_get_queue_spec: "\s. \ \ {\. s = \ \ \ \\<^sub>c \<^bsup>\\<^esup>ntfnPtr} \ret__struct_tcb_queue_C :== PROC ntfn_ptr_get_queue(\ntfnPtr) \head_C \ret__struct_tcb_queue_C = Ptr (ntfnQueue_head_CL (notification_lift (the (cslift s \<^bsup>s\<^esup>ntfnPtr)))) \ @@ -228,22 +191,19 @@ lemma cancelSignal_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) apply simp - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) - apply (simp add: carch_state_relation_def carch_globals_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) + apply (simp add: carch_state_relation_def carch_globals_def) apply (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) @@ -266,30 +226,27 @@ lemma cancelSignal_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue) - apply fastforce - apply assumption+ - apply simp - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def - split: ntfn.splits split del: if_split) - apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) - apply (simp add: tcb_queue_relation'_next_mask) - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) - apply (simp add: tcb_queue_relation'_prev_mask) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue) + apply fastforce + apply assumption+ apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def + split: ntfn.splits split del: if_split) + apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) + apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) + apply (simp add: tcb_queue_relation'_next_mask) + apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff) + apply (simp add: tcb_queue_relation'_prev_mask) + apply simp apply (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) @@ -424,9 +381,9 @@ lemma isStopped_ccorres [corres]: apply vcg apply clarsimp apply clarsimp - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma isRunnable_ccorres [corres]: @@ -452,71 +409,9 @@ lemma isRunnable_ccorres [corres]: apply (vcg) apply (clarsimp) apply (clarsimp) - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ -done - - - -lemma tcb_queue_relation_update_head: - fixes getNext_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" and - getPrev_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" - assumes qr: "tcb_queue_relation getNext getPrev mp queue NULL qhead" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - and fgN: "fg_cons getNext (getNext_update \ (\x _. x))" - and fgP: "fg_cons getPrev (getPrev_update \ (\x _. x))" - and npu: "\f t. getNext (getPrev_update f t) = getNext t" - and pnu: "\f t. getPrev (getNext_update f t) = getPrev t" - shows "tcb_queue_relation getNext getPrev - (upd_unless_null qhead (getPrev_update (\_. qhead') (the (mp qhead))) - (mp(qhead' := Some (getPrev_update (\_. NULL) (getNext_update (\_. qhead) tcb))))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) NULL qhead'" - using qr qh' cs_tcb valid_ep qhN - apply (subgoal_tac "qhead \ qhead'") - apply (clarsimp simp: pnu upd_unless_null_def fg_consD1 [OF fgN] fg_consD1 [OF fgP] pnu npu) - apply (cases queue) - apply simp - apply (frule (2) tcb_queue_relation_next_not_NULL) - apply simp - apply (clarsimp simp: fg_consD1 [OF fgN] fg_consD1 [OF fgP] pnu npu) - apply (subst tcb_queue_relation_cong [OF refl refl refl, where mp' = mp]) - apply (clarsimp simp: inj_eq) - apply (intro impI conjI) - apply (frule_tac x = x in imageI [where f = tcb_ptr_to_ctcb_ptr]) - apply simp - apply simp - apply simp - apply clarsimp - apply (cases queue) - apply simp - apply simp - done - -lemma tcbSchedEnqueue_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qhead (tcbSchedPrev_C_update (\_. qhead') (the (mp qhead))) - (mp(qhead' \ tcb\tcbSchedNext_C := qhead, tcbSchedPrev_C := NULL\))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) qhead' (if qend = NULL then qhead' else qend)" - using sr qh' cs_tcb valid_ep qhN - apply - - apply (erule tcb_queue_relationE') - apply (rule tcb_queue_relationI') - apply (erule (5) tcb_queue_relation_update_head - [where getNext_update = tcbSchedNext_C_update and getPrev_update = tcbSchedPrev_C_update], simp_all)[1] - apply simp - apply (intro impI) - apply (erule (1) tcb_queue_relation_not_NULL') - apply simp + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma tcb_ptr_to_ctcb_ptr_imageD: @@ -531,94 +426,8 @@ lemma ctcb_ptr_to_tcb_ptr_imageI: apply simp done -lemma tcb_queue'_head_end_NULL: - assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" - and tat: "\t\set queue. tcb_at' t s" - shows "(qend = NULL) = (qhead = NULL)" - using qr tat - apply - - apply (erule tcb_queue_relationE') - apply (simp add: tcb_queue_head_empty_iff) - apply (rule impI) - apply (rule tcb_at_not_NULL) - apply (erule bspec) - apply simp - done - -lemma tcb_queue_relation_qhead_mem: - "\ tcb_queue_relation getNext getPrev mp queue NULL qhead; - (\tcb\set queue. tcb_at' tcb t) \ - \ qhead \ NULL \ ctcb_ptr_to_tcb_ptr qhead \ set queue" - by (clarsimp simp: tcb_queue_head_empty_iff tcb_queue_relation_head_hd) - -lemma tcb_queue_relation_qhead_valid: - "\ tcb_queue_relation getNext getPrev (cslift s') queue NULL qhead; - (s, s') \ rf_sr; (\tcb\set queue. tcb_at' tcb s) \ - \ qhead \ NULL \ s' \\<^sub>c qhead" - apply (frule (1) tcb_queue_relation_qhead_mem) - apply clarsimp - apply(drule (3) tcb_queue_memberD) - apply (simp add: h_t_valid_clift_Some_iff) - done - -lemmas tcb_queue_relation_qhead_mem' = tcb_queue_relation_qhead_mem [OF tcb_queue_relation'_queue_rel] -lemmas tcb_queue_relation_qhead_valid' = tcb_queue_relation_qhead_valid [OF tcb_queue_relation'_queue_rel] - - -lemma valid_queues_valid_q: - "valid_queues s \ (\tcb\set (ksReadyQueues s (qdom, prio)). tcb_at' tcb s) \ distinct (ksReadyQueues s (qdom, prio))" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec [where x = qdom]) - apply (drule spec [where x = prio]) - apply clarsimp - apply (drule (1) bspec, erule obj_at'_weakenE) - apply simp - done - -lemma invs_valid_q: - "invs' s \ (\tcb\set (ksReadyQueues s (qdom, prio)). tcb_at' tcb s) \ distinct (ksReadyQueues s (qdom, prio))" - apply (rule valid_queues_valid_q) - apply (clarsimp simp: invs'_def valid_state'_def) - done - -lemma tcbQueued_not_in_queues: - assumes vq: "valid_queues s" - and objat: "obj_at' (Not \ tcbQueued) thread s" - shows "thread \ set (ksReadyQueues s (d, p))" - using vq objat - apply - - apply clarsimp - apply (drule (1) valid_queues_obj_at'D) - apply (erule obj_atE')+ - apply (clarsimp simp: inQ_def) - done - declare unat_ucast_8_32[simp] -lemma rf_sr_sched_queue_relation: - "\ (s, s') \ rf_sr; d \ ucast maxDom; p \ ucast maxPrio \ - \ sched_queue_relation' (cslift s') (ksReadyQueues s (d, p)) - (head_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p))) - (end_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p)))" - unfolding rf_sr_def cstate_relation_def cready_queues_relation_def - apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def) - done - -lemma ready_queue_not_in: - assumes vq: "valid_queues s" - and inq: "t \ set (ksReadyQueues s (d, p))" - and neq: "d \ d' \ p \ p'" - shows "t \ set (ksReadyQueues s (d', p'))" -proof - assume "t \ set (ksReadyQueues s (d', p'))" - hence "obj_at' (inQ d' p') t s" using vq by (rule valid_queues_obj_at'D) - moreover have "obj_at' (inQ d p) t s" using inq vq by (rule valid_queues_obj_at'D) - ultimately show False using neq - by (clarsimp elim!: obj_atE' simp: inQ_def) -qed - lemma ctcb_relation_unat_prio_eq: "ctcb_relation tcb tcb' \ unat (tcbPriority tcb) = unat (tcbPriority_C tcb')" apply (clarsimp simp: ctcb_relation_def) @@ -652,146 +461,6 @@ lemma threadSet_queued_ccorres [corres]: apply (clarsimp simp: typ_heap_simps) done -lemma ccorres_pre_getQueue: - assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" - shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) - {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p) in - sched_queue_relation' (cslift s') queue (head_C cqueue) (end_C cqueue)) \ s' \ P' queue} - hs (getQueue d p >>= (\queue. f queue)) c" - apply (rule ccorres_guard_imp2) - apply (rule ccorres_symb_exec_l2) - defer - defer - apply (rule gq_sp) - defer - apply (rule ccorres_guard_imp) - apply (rule cc) - apply clarsimp - apply assumption - apply assumption - apply (clarsimp simp: getQueue_def gets_exs_valid) - apply clarsimp - apply (drule spec, erule mp) - apply (simp add: Let_def) - apply (erule rf_sr_sched_queue_relation) - apply (simp add: maxDom_to_H maxPrio_to_H)+ - done - -lemma state_relation_queue_update_helper': - "\ (s, s') \ rf_sr; - (\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))); - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subst(asm) disj_imp_rhs) - apply (subst obj_at'_and[symmetric]) - apply (rule disjI1, erule obj_at'_weakenE, simp add: inQ_def) - apply (subst(asm) disj_imp_rhs) - apply (subst(asm) obj_at'_and[symmetric]) - apply (rule conjI, erule obj_at'_weakenE, simp) - apply (rule allI, rule allI) - apply (drule_tac x=d' in spec) - apply (drule_tac x=p' in spec) - apply clarsimp - apply (drule(1) bspec) - apply (clarsimp simp: inQ_def obj_at'_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (intro conjI) - \ \cpspace_relation\ - apply (erule nonemptyE, drule(1) bspec) - apply (clarsimp simp: cpspace_relation_def) - apply (drule obj_at_ko_at', clarsimp) - apply (rule cmap_relationE1, assumption, - erule ko_at_projectKO_opt) - apply (frule null_sched_queue) - apply (frule null_sched_epD) - apply (intro conjI) - \ \tcb relation\ - apply (drule ctcb_relation_null_queue_ptrs, - simp_all)[1] - \ \endpoint relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cendpoint_relation_upd_tcb_no_queues, simp+) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cnotification_relation_upd_tcb_no_queues, simp+) - \ \ready queues\ - apply (simp add: cready_queues_relation_def Let_def cready_queues_index_to_C_in_range - seL4_MinPrio_def minDom_def) - apply clarsimp - apply (frule cready_queues_index_to_C_distinct, assumption+) - apply (clarsimp simp: cready_queues_index_to_C_in_range all_conj_distrib) - apply (rule iffD1 [OF tcb_queue_relation'_cong[OF refl], rotated -1], - drule spec, drule spec, erule mp, simp+) - apply clarsimp - apply (drule_tac x="tcb_ptr_to_ctcb_ptr x" in fun_cong)+ - apply (clarsimp simp: restrict_map_def - split: if_split_asm) - apply (simp_all add: carch_state_relation_def cmachine_state_relation_def - h_t_valid_clift_Some_iff) - done - -lemma state_relation_queue_update_helper: - "\ (s, s') \ rf_sr; valid_queues s; - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subgoal_tac "\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))") - apply (erule(5) state_relation_queue_update_helper', simp_all) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE, clarsimp) - done - -(* FIXME: move *) -lemma from_bool_vals [simp]: - "from_bool True = scast true" - "from_bool False = scast false" - "scast true \ scast false" - by (auto simp add: from_bool_def true_def false_def) - (* FIXME: move *) lemma cmap_relation_no_upd: "\ cmap_relation a c f rel; a p = Some ko; rel ko v; inj f \ \ cmap_relation a (c(f p \ v)) f rel" @@ -833,8 +502,8 @@ lemma cready_queues_index_to_C_def2: lemma ready_queues_index_spec: "\s. \ \ {s'. s' = s \ (Kernel_Config.numDomains \ 1 \ dom_' s' = 0)} Call ready_queues_index_'proc - \\ret__unsigned_long = (dom_' s) * 0x100 + (prio_' s)\" - by vcg (simp add: numDomains_sge_1_simp) + \\ret__unsigned_long = (dom_' s) * word_of_nat numPriorities + (prio_' s)\" + by vcg (simp add: numDomains_sge_1_simp numPriorities_def) lemma prio_to_l1index_spec: "\s. \ \ {s} Call prio_to_l1index_'proc @@ -970,15 +639,6 @@ lemma cmachine_state_relation_enqueue_simp: unfolding cmachine_state_relation_def by clarsimp -lemma tcb_queue_relation'_empty_ksReadyQueues: - "\ sched_queue_relation' (cslift x) (q s) NULL NULL ; \t\ set (q s). tcb_at' t s \ \ q s = []" - apply (clarsimp simp add: tcb_queue_relation'_def) - apply (subst (asm) eq_commute) - apply (cases "q s" rule: rev_cases, simp) - apply (clarsimp simp: tcb_at_not_NULL) - done - - lemma invert_prioToL1Index_c_simp: "p \ maxPriority \ @@ -992,13 +652,247 @@ lemma c_invert_assist: "7 - (ucast (p :: priority) >> 5 :: machine_word) < 8" using prio_ucast_shiftr_wordRadix_helper'[simplified wordRadix_def] by - (rule word_less_imp_diff_less, simp_all) +lemma addToBitmap_ccorres: + "ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (addToBitmap tdom prio) (Call addToBitmap_'proc)" + supply prio_and_dom_limit_helpers[simp] invert_prioToL1Index_c_simp[simp] + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (frule maxDomain_le_unat_ucast_explicit) + apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (intro conjI impI allI) + apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (rule conjI) + apply (clarsimp intro!: cbitmap_L1_relation_bit_set) + apply (fastforce dest!: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) + done + +lemma rf_sr_tcb_update_twice: + "h_t_valid (hrs_htd (hrs2 (globals s') (t_hrs_' (gs2 (globals s'))))) c_guard + (ptr (t_hrs_' (gs2 (globals s'))) (globals s')) + \ ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs :: tcb_C ptr) (v ths gs)) + (hrs_mem_update (heap_update (ptr ths gs) (v' ths gs)) (hrs2 gs ths))) (gs2 gs)) s') \ rf_sr) + = ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs) (v ths gs)) (hrs2 gs ths)) (gs2 gs)) s') \ rf_sr)" + by (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def typ_heap_simps' + carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + +lemmas rf_sr_tcb_update_no_queue_gen2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue_gen, simplified] + +lemma tcb_queue_prepend_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueuePrepend queue tcbPtr) (Call tcb_queue_prepend_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply fastforce + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueHead queue)) s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply clarsimp + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma tcb_queue_append_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s) + \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueAppend queue tcbPtr) (Call tcb_queue_append_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueEnd queue)) s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma getQueue_ccorres: + "ccorres ctcb_queue_relation queue_' + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx___unsigned_long = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (getQueue tdom prio) (\queue :== \ksReadyQueues.[unat \idx___unsigned_long])" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getQueue_def gets_def get_def bind_def return_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + done + +lemma setQueue_ccorres: + "ctcb_queue_relation queue cqueue \ + ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx___unsigned_long = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (setQueue tdom prio queue) + (Basic (\s. globals_update + (ksReadyQueues_'_update + (\_. Arrays.update (ksReadyQueues_' (globals s)) (unat (idx___unsigned_long_' s)) cqueue)) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setQueue_def get_def modify_def put_def bind_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + apply (frule cready_queues_index_to_C_distinct, assumption+) + apply (frule_tac qdom=d and prio=p in cready_queues_index_to_C_in_range) + apply fastforce + apply clarsimp + done + +crunch (empty_fail) empty_fail[wp]: isRunnable + lemma tcbSchedEnqueue_ccorres: "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - hs - (tcbSchedEnqueue t) - (Call tcbSchedEnqueue_'proc)" + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedEnqueue t) (Call tcbSchedEnqueue_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] note invert_prioToL1Index_c_simp[simp] @@ -1008,35 +902,13 @@ proof - note word_less_1[simp del] show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac runnable) + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_'" + in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1044,238 +916,246 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - (* queue is empty, set t to be new queue *) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (subgoal_tac - "head_C (ksReadyQueues_' - (globals x).[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL; simp add: valid_queues_valid_q) - apply (subgoal_tac - "end_C (ksReadyQueues_' - (globals x).[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL[symmetric]; simp add: valid_queues_valid_q) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (frule maxDomain_le_unat_ucast_explicit) - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - apply (rule conjI) - apply (clarsimp simp: l2BitmapSize_def' wordRadix_def c_invert_assist) - - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - apply (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def typ_heap_simps c_guard_clift - elim: obj_at'_weaken)+ - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply clarsimp - apply (rule conjI; clarsimp simp: queue_in_range) - (* invalid, disagreement between C and Haskell on emptiness of queue *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (drule tcb_queue_relation'_empty_ksReadyQueues; simp add: valid_queues_valid_q) - (* queue was not empty, add t to queue and leave bitmaps alone *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (frule_tac t=\ in tcb_queue_relation_qhead_mem') - apply (simp add: valid_queues_valid_q) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff numPriorities_def - cready_queues_index_to_C_def2) - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - cong: if_cong split del: if_split - del: fun_upd_restrict_conv)[1] - apply simp - apply (rule conjI) + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 4 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule valid_queues_obj_at'D) - apply simp+ - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (simp add: typ_heap_simps c_guard_clift) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - apply (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) - done + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_prepend_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2) + done qed -lemmas tcbSchedDequeue_update - = tcbDequeue_update[where tn=tcbSchedNext_C and tn_update=tcbSchedNext_C_update - and tp=tcbSchedPrev_C and tp_update=tcbSchedPrev_C_update, - simplified] - -lemma tcb_queue_relation_prev_next: - "\ tcb_queue_relation tn tp mp queue qprev qhead; - tcbp \ set queue; distinct (ctcb_ptr_to_tcb_ptr qprev # queue); - \t \ set queue. tcb_at' t s; qprev \ tcb_Ptr 0 \ mp qprev \ None; - mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp tcb \ tcb_Ptr 0 \ (tp tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ tp tcb = qprev) - \ mp (tp tcb) \ None \ tp tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp tcb)" - apply (induct queue arbitrary: qprev qhead) - apply simp - apply simp - apply (erule disjE) - apply clarsimp - apply (case_tac "queue") - apply clarsimp - apply clarsimp - apply (rule conjI) - apply clarsimp - apply clarsimp - apply (drule_tac f=ctcb_ptr_to_tcb_ptr in arg_cong[where y="tp tcb"], simp) - apply clarsimp - apply fastforce - done - -lemma tcb_queue_relation_prev_next': - "\ tcb_queue_relation' tn tp mp queue qhead qend; tcbp \ set queue; distinct queue; - \t \ set queue. tcb_at' t s; mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp tcb \ tcb_Ptr 0 \ tp tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tp tcb) \ None \ tp tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp tcb)" - apply (clarsimp simp: tcb_queue_relation'_def split: if_split_asm) - apply (drule(1) tcb_queue_relation_prev_next, simp_all) - apply (fastforce dest: tcb_at_not_NULL) - apply clarsimp - done +lemma tcbSchedAppend_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedAppend t) (Call tcbSchedAppend_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] -(* L1 bitmap only updated if L2 entry bits end up all zero *) -lemma rf_sr_drop_bitmaps_dequeue_helper_L2: - "\ (\,\') \ rf_sr ; - cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd\, - \'\idx___unsigned_long_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx___unsigned_long_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + note word_less_1[simp del] -lemma rf_sr_drop_bitmaps_dequeue_helper: - "\ (\,\') \ rf_sr ; - cbitmap_L1_relation ksqL1upd' ksqL1upd ; cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd, - ksReadyQueuesL1Bitmap := ksqL1upd\, - \'\idx___unsigned_long_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueuesL1Bitmap_' := ksqL1upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx___unsigned_long_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac "runnable") + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply (fastforce dest!: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_append_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply clarsimp + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2 tcbQueueEmpty_def) + done +qed (* FIXME same proofs as bit_set, maybe can generalise? *) lemma cbitmap_L1_relation_bit_clear: @@ -1292,27 +1172,6 @@ lemma cbitmap_L1_relation_bit_clear: invertL1Index_def l2BitmapSize_def' le_maxDomain_eq_less_numDomains word_le_nat_alt num_domains_index_updates) -lemma cready_queues_relation_empty_queue_helper: - "\ tcbDomain ko \ maxDomain ; tcbPriority ko \ maxPriority ; - cready_queues_relation (cslift \') (ksReadyQueues_' (globals \')) (ksReadyQueues \)\ - \ - cready_queues_relation (cslift \') - (Arrays.update (ksReadyQueues_' (globals \')) (unat (tcbDomain ko) * 256 + unat (tcbPriority ko)) - (tcb_queue_C.end_C_update (\_. NULL) - (head_C_update (\_. NULL) - (ksReadyQueues_' (globals \').[unat (tcbDomain ko) * 256 + unat (tcbPriority ko)])))) - ((ksReadyQueues \)((tcbDomain ko, tcbPriority ko) := []))" - unfolding cready_queues_relation_def Let_def - using maxPrio_to_H[simp] maxDom_to_H[simp] - apply clarsimp - apply (frule (1) cready_queues_index_to_C_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (fold cready_queues_index_to_C_def[simplified numPriorities_def]) - apply (case_tac "qdom = tcbDomain ko", - simp_all add: prio_and_dom_limit_helpers seL4_MinPrio_def - minDom_def) - apply (fastforce simp: cready_queues_index_to_C_in_range simp: cready_queues_index_to_C_distinct)+ - done - lemma cbitmap_L2_relationD: "\ cbitmap_L2_relation cbitmap2 abitmap2 ; d \ maxDomain ; i < l2BitmapSize \ \ cbitmap2.[unat d].[i] = abitmap2 (d, i)" @@ -1342,64 +1201,301 @@ lemma cbitmap_L2_relation_bit_clear: apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) done -lemma tcbSchedDequeue_ccorres': +lemma removeFromBitmap_ccorres: "ccorres dc xfdc - ((\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))) - and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (removeFromBitmap tdom prio) (Call removeFromBitmap_'proc)" proof - - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) include no_less_1_simps - have ksQ_tcb_at': "\s ko d p. - \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p)) \ - \t\set (ksReadyQueues s (d, p)). tcb_at' t s" - by (fastforce dest: spec elim: obj_at'_weakenE) - - have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < 8" + have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < l2BitmapSize" unfolding invertL1Index_def l2BitmapSize_def' prioToL1Index_def by simp show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def - del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) + supply if_split[split del] + (* pull out static assms *) + apply simp + apply (rule ccorres_grab_asm[where P=\, simplified]) + apply (cinit lift: dom_' prio_') + apply clarsimp + apply csymbr + apply csymbr + (* we can clear up all C guards now *) + apply (clarsimp simp: maxDomain_le_unat_ucast_explicit word_and_less') + apply (simp add: invert_prioToL1Index_c_simp word_less_nat_alt) + apply (simp add: invert_l1_index_limit[simplified l2BitmapSize_def']) + apply ccorres_rewrite + (* handle L2 update *) + apply (rule_tac ccorres_split_nothrow_novcg_dc) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L2_relation) + apply (erule cbitmap_L2_relation_update) + apply (erule (1) cbitmap_L2_relation_bit_clear) + (* the check on the C side is identical to checking the L2 entry, rewrite the condition *) + apply (simp add: getReadyQueuesL2Bitmap_def) + apply (rule ccorres_symb_exec_l3, rename_tac l2) + apply (rule_tac C'="{s. l2 = 0}" + and Q="\s. l2 = ksReadyQueuesL2Bitmap s (tdom, invertL1Index (prioToL1Index prio))" + in ccorres_rewrite_cond_sr[where Q'=UNIV]) + apply clarsimp + apply (frule rf_sr_cbitmap_L2_relation) + apply (clarsimp simp: cbitmap_L2_relationD invert_l1_index_limit split: if_split) + (* unset L1 bit when L2 entry is empty *) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L1_relation) + apply (erule cbitmap_L1_relation_update) + apply (erule (1) cbitmap_L1_relation_bit_clear) + apply wpsimp+ + apply (fastforce simp: guard_is_UNIV_def) + apply clarsimp + done +qed + +lemma ctcb_ptr_to_tcb_ptr_option_to_ctcb_ptr[simp]: + "ctcb_ptr_to_tcb_ptr (option_to_ctcb_ptr (Some ptr)) = ptr" + by (clarsimp simp: option_to_ctcb_ptr_def) + +lemma tcb_queue_remove_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s \ valid_objs' s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueRemove queue tcbPtr) (Call tcb_queue_remove_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit' lift: tcb_') + apply (rename_tac tcb') + apply (simp only: tcbQueueRemove_def) + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule ccorres_pre_getObject_tcb, rename_tac tcb) + apply (rule ccorres_symb_exec_l, rename_tac beforePtrOpt) + apply (rule ccorres_symb_exec_l, rename_tac afterPtrOpt) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="before___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr beforePtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedPrev tcb = beforePtrOpt)" + and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="after___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr afterPtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedNext tcb = afterPtrOpt)" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R="?abs"]) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) + apply clarsimp + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) + apply clarsimp + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply (rule ccorres_assert2)+ + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (fastforce intro: ccorres_return_C') + apply (wpsimp | vcg)+ + apply (clarsimp split: if_splits) + apply normalise_obj_at' + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + by (intro conjI impI; + clarsimp simp: ctcb_queue_relation_def typ_heap_simps option_to_ctcb_ptr_def + valid_tcb'_def valid_bound_tcb'_def) + +lemma tcbQueueRemove_tcb_at'_head: + "\\s. valid_objs' s \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)\ + tcbQueueRemove queue t + \\rv s. \ tcbQueueEmpty rv \ tcb_at' (the (tcbQueueHead rv)) s\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp haskell_assert_wp hoare_vcg_imp_lift') + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def valid_bound_tcb'_def tcbQueueEmpty_def obj_at'_def) + done + +lemma tcbSchedDequeue_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedDequeue t) (Call tcbSchedDequeue_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] + + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + note word_less_1[simp del] + + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1407,309 +1503,80 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="(\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))) - and valid_queues' and obj_at' (inQ rva rvb) t - and (\s. rva \ maxDomain \ rvb \ maxPriority)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs when_def - null_def) - - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule(1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (frule_tac s=\ in tcb_queue_relation_prev_next'; (fastforce simp: ksQ_tcb_at')?) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (intro conjI ; - clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift)+ - apply (drule(2) filter_empty_unfiltered_contr, simp)+ - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_clear) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (clarsimp simp: cbitmap_L2_relation_def - word_size prioToL1Index_def wordRadix_def mask_def - word_le_nat_alt - numPriorities_def wordBits_def l2BitmapSize_def' - invertL1Index_def numDomains_less_numeric_explicit) - apply (case_tac "d = tcbDomain ko" - ; fastforce simp: le_maxDomain_eq_less_numDomains - numDomains_less_numeric_explicit) - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by ((fastforce simp: ksQ_tcb_at')+) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst (asm) num_domains_index_updates) - subgoal by (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - apply (simp add: invert_l1_index_limit) - - apply (frule rf_sr_cbitmap_L2_relation) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - apply (fastforce simp: l2BitmapSize_def' invert_l1_index_limit) - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - (* impossible case *) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (drule(2) filter_empty_unfiltered_contr, fastforce) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply fold_subgoals[2] - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (rule conjI; clarsimp) - apply (simp add: typ_heap_simps) - apply (clarsimp simp: h_t_valid_c_guard [OF h_t_valid_field, OF h_t_valid_clift] - h_t_valid_field[OF h_t_valid_clift] h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[3] - subgoal premises prems using prems by (fastforce simp: tcb_null_sched_ptrs_def typ_heap_simps c_guard_clift)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split, - simp_all add: typ_heap_simps')[1] - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - subgoal by fastforce - + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rule_tac r'=ctcb_queue_relation and xf'=new_queue_' in ccorres_split_nothrow) + apply (ctac add: tcb_queue_remove_ccorres) + apply ceqv + apply (rename_tac queue' newqueue) + apply (rule ccorres_Guard_Seq) + apply (ctac add: setQueue_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply ctac + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue')" + and R="\s. \ tcbQueueEmpty queue' \ tcb_at' (the (tcbQueueHead queue')) s" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def split: option.splits) + apply ceqv + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: removeFromBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply vcg + apply (wpsimp wp: hoare_vcg_imp_lift') + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: hoare_vcg_imp_lift') + apply vcg + apply ((wpsimp wp: tcbQueueRemove_tcb_at'_head | wp (once) hoare_drop_imps)+)[1] + apply (vcg exspec=tcb_queue_remove_modifies) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg) apply clarsimp - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems - by (fastforce dest!: tcb_queue_relation'_empty_ksReadyQueues - elim: obj_at'_weaken)+ - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def)+ - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') - apply fastforce - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (drule(2) filter_empty_unfiltered_contr[simplified filter_noteq_op], simp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (subst (asm) num_domains_index_updates) - apply (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - subgoal using invert_l1_index_limit - by (fastforce simp add: invert_prioToL1Index_c_simp intro: nat_Suc_less_le_imp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (simp add: invert_prioToL1Index_c_simp) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - subgoal by (simp add: invert_l1_index_limit l2BitmapSize_def') - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper_L2, assumption) - subgoal by (fastforce dest: rf_sr_cbitmap_L2_relation elim!: cbitmap_L2_relation_bit_clear) - - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') - apply (fastforce simp add: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (clarsimp simp: typ_heap_simps) - apply (fastforce simp: typ_heap_simps c_guard_clift) - apply (fastforce simp: typ_heap_simps) - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def)+ - apply (clarsimp) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[3] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def)+ - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def valid_tcb'_def inQ_def) + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) obj_at_cslift_tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + by (fastforce simp: word_less_nat_alt + cready_queues_index_to_C_def2 ctcb_relation_def + typ_heap_simps le_maxDomain_eq_less_numDomains(2) unat_trans_ucast_helper) qed -lemma tcbSchedDequeue_ccorres: - "ccorres dc xfdc - (valid_queues and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" - apply (rule ccorres_guard_imp [OF tcbSchedDequeue_ccorres']) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp)+ - done - lemma tcb_queue_relation_append: "\ tcb_queue_relation tn tp mp queue qprev qhead; queue \ []; qend' \ tcb_ptr_to_ctcb_ptr ` set queue; mp qend' = Some tcb; @@ -1728,216 +1595,6 @@ lemma tcb_queue_relation_append: apply clarsimp done -lemma tcbSchedAppend_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qend' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qend' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qend' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qend (tcbSchedNext_C_update (\_. qend') (the (mp qend))) - (mp(qend' \ tcb\tcbSchedNext_C := NULL, tcbSchedPrev_C := qend\))) - (queue @ [ctcb_ptr_to_tcb_ptr qend']) (if queue = [] then qend' else qhead) qend'" - using sr qh' valid_ep cs_tcb qhN - apply - - apply (rule rev_cases[where xs=queue]) - apply (simp add: tcb_queue_relation'_def upd_unless_null_def) - apply (clarsimp simp: tcb_queue_relation'_def upd_unless_null_def tcb_at_not_NULL) - apply (drule_tac qend'=qend' and tn_update=tcbSchedNext_C_update - and tp_update=tcbSchedPrev_C_update and qend="tcb_ptr_to_ctcb_ptr y" - in tcb_queue_relation_append, simp_all) - apply (fastforce simp add: tcb_at_not_NULL) - apply (simp add: fun_upd_twist) - done - -lemma tcb_queue_relation_qend_mems: - "\ tcb_queue_relation' getNext getPrev mp queue qhead qend; - \x \ set queue. tcb_at' x s \ - \ (qend = NULL \ queue = []) - \ (qend \ NULL \ ctcb_ptr_to_tcb_ptr qend \ set queue)" - apply (clarsimp simp: tcb_queue_relation'_def) - apply (drule bspec, erule last_in_set) - apply (simp add: tcb_at_not_NULL) - done - -lemma tcbSchedAppend_ccorres: - "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedAppend t) - (Call tcbSchedAppend_'proc)" -proof - - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] - - (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the - shape of the proof compared to when numDomains > 1 *) - include no_less_1_simps - - show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - subgoal by (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken) - apply (fastforce simp: typ_heap_simps c_guard_clift) - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken) - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: cready_queues_index_to_C_def2 numPriorities_def) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - cong: if_cong split del: if_split - del: fun_upd_restrict_conv)[1] - apply simp - apply (rule conjI) - apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 4 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule valid_queues_obj_at'D) - subgoal by simp - subgoal by simp - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - subgoal by (fastforce simp: typ_heap_simps c_guard_clift) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) -qed - -lemma true_eq_from_bool [simp]: - "(scast true = from_bool P) = P" - by (simp add: true_def from_bool_def split: bool.splits) - lemma isStopped_spec: "\s. \ \ ({s} \ {s. cslift s (thread_' s) \ None}) Call isStopped_'proc {s'. ret__unsigned_long_' s' = from_bool (tsType_CL (thread_state_lift (tcbState_C (the (cslift s (thread_' s))))) \ @@ -1983,8 +1640,11 @@ lemma tcb_at_1: done lemma rescheduleRequired_ccorres: - "ccorres dc xfdc (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorres dc xfdc + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' + and pspace_aligned' and pspace_distinct') + UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -2094,43 +1754,18 @@ lemma cguard_UNIV: by fastforce lemma lookupBitmapPriority_le_maxPriority: - "\ ksReadyQueuesL1Bitmap s d \ 0 ; valid_queues s \ + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + \d p. d > maxDomain \ p > maxPriority \ tcbQueueEmpty (ksReadyQueues s (d, p)); + valid_bitmaps s \ \ lookupBitmapPriority d s \ maxPriority" - unfolding valid_queues_def valid_queues_no_bitmap_def - by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) - -lemma rf_sr_ksReadyQueuesL1Bitmap_not_zero: - "\ (\, s') \ rf_sr ; d \ maxDomain ; ksReadyQueuesL1Bitmap_' (globals s').[unat d] \ 0 \ - \ ksReadyQueuesL1Bitmap \ d \ 0" - apply (drule rf_sr_cbitmap_L1_relation) - apply (simp add: cbitmap_L1_relation_def) - done + apply (clarsimp simp: valid_bitmaps_def) + by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) lemma ksReadyQueuesL1Bitmap_word_log2_max: - "\valid_queues s; ksReadyQueuesL1Bitmap s d \ 0\ - \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" - unfolding valid_queues_def - by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) - - -lemma rf_sr_ksReadyQueuesL2Bitmap_simp: - "\ (\, s') \ rf_sr ; d \ maxDomain ; valid_queues \ ; ksReadyQueuesL1Bitmap \ d \ 0 \ - \ ksReadyQueuesL2Bitmap_' (globals s').[unat d].[word_log2 (ksReadyQueuesL1Bitmap \ d)] = - ksReadyQueuesL2Bitmap \ (d, word_log2 (ksReadyQueuesL1Bitmap \ d))" - apply (frule rf_sr_cbitmap_L2_relation) - apply (frule (1) ksReadyQueuesL1Bitmap_word_log2_max) - apply (drule (3) cbitmap_L2_relationD) - done - -lemma ksReadyQueuesL2Bitmap_nonzeroI: - "\ d \ maxDomain ; valid_queues s ; ksReadyQueuesL1Bitmap s d \ 0 \ - \ ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d))) \ 0" - unfolding valid_queues_def - apply clarsimp - apply (frule bitmapQ_no_L1_orphansD) - apply (erule word_log2_nth_same) - apply clarsimp - done + "\valid_bitmaps s; ksReadyQueuesL1Bitmap s d \ 0\ + \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" + unfolding valid_bitmaps_def + by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) lemma clzl_spec: "\s. \ \ {\. s = \ \ x___unsigned_long_' s \ 0} Call clzl_'proc @@ -2319,11 +1954,6 @@ lemma getCurDomain_maxDom_ccorres_dom_': rf_sr_ksCurDomain) done -lemma rf_sr_cscheduler_action_relation: - "(s, s') \ rf_sr - \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" - by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - lemma threadGet_get_obj_at'_has_domain: "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" by (wp threadGet_obj_at') (simp add: obj_at'_def) @@ -2331,16 +1961,15 @@ lemma threadGet_get_obj_at'_has_domain: lemma possibleSwitchTo_ccorres: shows "ccorres dc xfdc - (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t and (\s. ksCurDomain s \ maxDomain) - and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') ({s. target_' s = tcb_ptr_to_ctcb_ptr t} \ UNIV) [] (possibleSwitchTo t ) (Call possibleSwitchTo_'proc)" supply if_split [split del] if_cong[cong] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] if_1_0_0[simp] @@ -2365,7 +1994,7 @@ lemma possibleSwitchTo_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2382,8 +2011,8 @@ lemma possibleSwitchTo_ccorres: lemma scheduleTCB_ccorres': "ccorres dc xfdc - (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_queues - and valid_objs') + (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (do (runnable, curThread, action) \ do @@ -2396,6 +2025,7 @@ lemma scheduleTCB_ccorres': rescheduleRequired od) (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_' simp del: word_neq_0_conv) apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2425,31 +2055,34 @@ lemma scheduleTCB_ccorres': apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL split: scheduler_action.split_asm) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp apply (clarsimp simp: st_tcb_at'_def obj_at'_def weak_sch_act_wf_def) done lemma scheduleTCB_ccorres_valid_queues'_pre: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_' simp del: word_neq_0_conv) apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2478,7 +2111,7 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: apply (drule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def weak_sch_act_wf_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (fold_subgoals (prefix))[6] subgoal premises prems using prems by (clarsimp simp: rf_sr_def cstate_relation_def Let_def @@ -2489,17 +2122,17 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: split: scheduler_action.split_asm) apply wp+ apply (simp add: isRunnable_def isStopped_def) - apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done - lemmas scheduleTCB_ccorres_valid_queues' = scheduleTCB_ccorres_valid_queues'_pre[unfolded bind_assoc return_bind split_conv] lemma rescheduleRequired_ccorres_valid_queues'_simple: - "ccorresG rf_sr \ dc xfdc (valid_queues' and sch_act_simple) UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorresG rf_sr \ dc xfdc + sch_act_simple UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -2532,16 +2165,18 @@ lemma rescheduleRequired_ccorres_valid_queues'_simple: split: scheduler_action.split_asm) lemma scheduleTCB_ccorres_valid_queues'_pre_simple: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_' simp del: word_neq_0_conv) apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2571,7 +2206,7 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL @@ -2579,11 +2214,10 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp - apply (clarsimp simp: st_tcb_at'_def obj_at'_def valid_queues'_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done lemmas scheduleTCB_ccorres_valid_queues'_simple @@ -2603,48 +2237,35 @@ lemma threadSet_weak_sch_act_wf_runnable': apply (clarsimp) done -lemma threadSet_valid_queues_and_runnable': "\\s. valid_queues s \ (\p. thread \ set (ksReadyQueues s p) \ runnable' st)\ - threadSet (tcbState_update (\_. st)) thread - \\rv s. valid_queues s\" - apply (wp threadSet_valid_queues) - apply (clarsimp simp: inQ_def) -done - lemma setThreadState_ccorres[corres]: "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues s \ valid_objs' s \ valid_tcb_state' st s \ - (ksSchedulerAction s = SwitchToThread thread \ runnable' st) \ - (\p. thread \ set (ksReadyQueues s p) \ runnable' st) \ - sch_act_wf (ksSchedulerAction s) s) - ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} + (\s. tcb_at' thread s \ valid_objs' s \ valid_tcb_state' st s + \ (ksSchedulerAction s = SwitchToThread thread \ runnable' st) + \ sch_act_wf (ksSchedulerAction s) s \ pspace_aligned' s \ pspace_distinct' s) + ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) hs - (setThreadState st thread) (Call setThreadState_'proc)" + (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres) - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' - threadSet_valid_objs') - by (clarsimp simp: weak_sch_act_wf_def valid_queues_def valid_tcb'_tcbState_update) - -lemma threadSet_valid_queues'_and_not_runnable': "\tcb_at' thread and valid_queues' and (\s. (\ runnable' st))\ - threadSet (tcbState_update (\_. st)) thread - \\rv. tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' \" - - apply (wp threadSet_valid_queues' threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: pred_neg_def valid_queues'_def inQ_def)+ -done + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') + apply (clarsimp simp: weak_sch_act_wf_def valid_tcb'_tcbState_update) + done lemma setThreadState_ccorres_valid_queues': - "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s \ Invariants_H.valid_queues s \ (\p. thread \ set (ksReadyQueues s p)) \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s) + "ccorres dc xfdc + (\s. tcb_at' thread s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s + \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s + \ pspace_aligned' s \ pspace_distinct' s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} - \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] - (setThreadState st thread) (Call setThreadState_'proc)" + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues') - apply (wp threadSet_valid_queues'_and_not_runnable' threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' threadSet_valid_objs') - by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs' + threadSet_tcbState_st_tcb_at') + by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) lemma simp_list_case_return: "(case x of [] \ return e | y # ys \ return f) = return (if x = [] then e else f)" @@ -2665,24 +2286,23 @@ lemma cancelSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (ctac (no_vcg) add: cancelSignal_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') - apply ((wp setNotification_ksQ hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] - apply (simp add: "StrictC'_thread_state_defs") + apply ((wp hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] + apply (simp add: ThreadState_defs) apply (rule conjI, clarsimp, rule conjI, clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) - subgoal by ((auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" + subgoal + by ((auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def + isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ntfn'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] | - clarsimp simp: eq_commute)+) + | clarsimp simp: eq_commute)+) apply (clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) apply (frule (2) ntfn_blocked_in_queueD) by (auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" valid_ntfn'_def + isTS_defs cte_wp_at_ctes_of valid_ntfn'_def cthread_state_relation_def sch_act_wf_weak isWaitingNtfn_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] - split: ntfn.splits option.splits - | clarsimp simp: eq_commute + split: ntfn.splits option.splits + | clarsimp simp: eq_commute | drule_tac x=thread in bspec)+ lemma cmap_relation_ep: @@ -2692,7 +2312,7 @@ lemma cmap_relation_ep: by (simp add: Let_def) (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2833,8 +2453,8 @@ lemma cpspace_relation_ep_update_an_ep: and pal: "pspace_aligned' s" "pspace_distinct' s" and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat pal rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2856,8 +2476,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2869,15 +2489,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "word32" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2951,7 +2571,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2969,7 +2589,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -2985,23 +2605,20 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) - subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - subgoal by simp - apply (erule (1) map_to_ko_atI') - apply (simp add: heap_to_user_data_def Let_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) + subgoal by simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + subgoal by simp + apply (erule (1) map_to_ko_atI') + apply (simp add: heap_to_user_data_def Let_def) subgoal by (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) @@ -3022,38 +2639,36 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def split: endpoint.splits split del: if_split) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def + split: endpoint.splits split del: if_split) \ \recv case\ - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff - tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask cong: tcb_queue_relation'_cong) - subgoal by (intro impI conjI; simp) - \ \send case\ - apply (clarsimp simp add: Ptr_ptr_val h_t_valid_clift_Some_iff - tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask cong: tcb_queue_relation'_cong) + apply (clarsimp simp: Ptr_ptr_val h_t_valid_clift_Some_iff + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + cong: tcb_queue_relation'_cong) + subgoal by (intro impI conjI; simp) + \ \send case\ + apply (clarsimp simp: Ptr_ptr_val h_t_valid_clift_Some_iff + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask cong: tcb_queue_relation'_cong) subgoal by (intro impI conjI; simp) subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) - subgoal by (simp add: carch_state_relation_def carch_globals_def - typ_heap_simps') + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + subgoal by (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) subgoal by (simp add: objBits_simps') subgoal by (simp add: objBits_simps) apply assumption - done + done declare empty_fail_get[iff] @@ -3109,8 +2724,7 @@ lemma cancelIPC_ccorres1: apply (rule_tac P="rv' = thread_state_to_tsType rv" in ccorres_gen_asm2) apply wpc \ \BlockedOnReceive\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) + apply (simp add: word_sle_def ccorres_cond_iffs cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3126,7 +2740,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3136,10 +2750,9 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \BlockedOnReply case\ - apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs + apply (simp add: ThreadState_defs ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3179,14 +2792,12 @@ lemma cancelIPC_ccorres1: apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3199,7 +2810,8 @@ lemma cancelIPC_ccorres1: apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3208,10 +2820,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3227,7 +2841,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del:if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply clarsimp apply (rule conseqPre, vcg, rule subset_refl) apply (rule conseqPre, vcg) @@ -3237,7 +2851,8 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg @@ -3260,37 +2875,35 @@ lemma cancelIPC_ccorres1: subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_recv) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isRecvEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits) + split: thread_state.splits endpoint.splits) apply (rule conjI) apply (clarsimp simp: inQ_def) - apply (rule conjI) - apply clarsimp apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_send) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isSendEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits)[1] + split: thread_state.splits endpoint.splits)[1] apply (auto simp: isTS_defs cthread_state_relation_def typ_heap_simps weak_sch_act_wf_def) apply (case_tac ts, auto simp: isTS_defs cthread_state_relation_def typ_heap_simps) diff --git a/proof/crefine/ARM_HYP/Ipc_C.thy b/proof/crefine/ARM_HYP/Ipc_C.thy index c46285aebd..f728396e11 100644 --- a/proof/crefine/ARM_HYP/Ipc_C.thy +++ b/proof/crefine/ARM_HYP/Ipc_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -26,10 +27,6 @@ lemma replyFromKernel_success_empty: unfolding replyFromKernel_def replyFromKernel_success_empty_def by (simp add: setMRs_Nil submonad_asUser.fn_stateAssert) -crunch valid_queues[wp]: handleFaultReply valid_queues - -crunch valid_queues'[wp]: handleFaultReply valid_queues' - crunch sch_act_wf: handleFaultReply "\s. sch_act_wf (ksSchedulerAction s) s" crunch valid_ipc_buffer_ptr' [wp]: copyMRs "valid_ipc_buffer_ptr' p" @@ -465,6 +462,7 @@ lemma handleArchFaultReply': msg \ getMRs s sb tag; handleArchFaultReply f r (msgLabel tag) msg od) x' = handleArchFaultReply' f s r tag x'" + supply empty_fail_cond[simp] apply (unfold handleArchFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -581,13 +579,10 @@ lemma getSanitiseRegisterInfo_moreMapM_comm: apply (auto split: option.splits) done - lemma monadic_rewrite_threadGet_return: "monadic_rewrite True False (tcb_at' r) (return x) (do t \ threadGet f r; return x od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done context begin interpretation Arch . @@ -602,18 +597,14 @@ end lemma monadic_rewrite_getSanitiseRegisterInfo_return: "monadic_rewrite True False (tcb_at' r) (return x) (do t \ getSanitiseRegisterInfo r; return x od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done lemma monadic_rewrite_getSanitiseRegisterInfo_drop: "monadic_rewrite True False (tcb_at' r) (d) (do t \ getSanitiseRegisterInfo r; d od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done context kernel_m begin interpretation Arch . @@ -634,7 +625,8 @@ lemma handleFaultReply': msg \ getMRs s sb tag; handleFaultReply f r (msgLabel tag) msg od) (handleFaultReply' f s r)" - supply if_cong[cong] + supply if_cong[cong] empty_fail_cond[simp] + supply empty_fail_asUser[wp] empty_fail_getRegister[wp] apply (unfold handleFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -649,41 +641,41 @@ lemma handleFaultReply': zip_Cons ARM_HYP_H.exceptionMessage_def ARM_HYP.exceptionMessage_def mapM_x_Cons mapM_x_Nil) - apply (rule monadic_rewrite_symb_exec_l, wp+) - apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) - apply (case_tac rv; (case_tac "msgLength tag < scast n_msgRegisters", - (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm - asUser_getRegister_getSanitiseRegisterInfo_comm - asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm - asUser_comm[OF neq] asUser_getRegister_threadGet_comm - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp lookupIPCBuffer_inv )+)+)) - apply wp + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) + apply (case_tac sb; (case_tac "msgLength tag < scast n_msgRegisters", + (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm + asUser_getRegister_getSanitiseRegisterInfo_comm + asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm + asUser_comm[OF neq] asUser_getRegister_threadGet_comm + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp)+)+)) + apply wp+ (* capFault *) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_asUser empty_fail_getRegister)+)+ - apply(case_tac rv) - apply (clarsimp - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] - empty_fail_loadWordUser)+ + apply (repeat 5 \rule monadic_rewrite_symb_exec_l\) (* until case sb *) + apply (case_tac sb) + apply (clarsimp + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] + empty_fail_loadWordUser)+ (* UnknownSyscallException *) apply (simp add: zip_append2 mapM_x_append asUser_bind_distrib split_def bind_assoc) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_do_flip) apply (rule monadic_rewrite_bind_tail) apply (rule_tac P="inj (case_bool s r)" in monadic_rewrite_gen_asm) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) apply (rule isolate_thread_actions_rewrite_bind bool.simps setRegister_simple zipWithM_setRegister_simple @@ -703,89 +695,89 @@ lemma handleFaultReply': upto_enum_word mapM_x_Cons mapM_x_Nil) apply (simp add: getSanitiseRegisterInfo_moreMapM_comm asUser_getRegister_getSanitiseRegisterInfo_comm getSanitiseRegisterInfo_lookupIPCBuffer_comm) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) - apply (case_tac sb) + apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) + apply (case_tac sb) + apply (case_tac "msgLength tag < scast n_msgRegisters") + apply (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + | wp)+)+ apply (case_tac "msgLength tag < scast n_msgRegisters") apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - | wp)+)+ - apply (case_tac "msgLength tag < scast n_msgRegisters") - apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - zipWithM_x_Nil - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_threadGet_return - monadic_rewrite_getSanitiseRegisterInfo_return - | wp mapM_wp')+)+ - apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def - linorder_not_less syscallMessage_unfold) - apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, - OF order_less_le_trans, rotated])+ - apply (subgoal_tac "\n :: word32. n \ scast n_syscallMessage \ [n .e. msgMaxLength] - = [n .e. scast n_syscallMessage] - @ [scast n_syscallMessage + 1 .e. msgMaxLength]") - apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: word32"] - upto_enum_word[where y="scast n_syscallMessage + 1 :: word32"]) - apply (clarsimp simp: bind_assoc asUser_bind_distrib asUser_getRegister_threadGet_comm - mapM_x_Cons mapM_x_Nil threadGet_discarded - asUser_comm [OF neq] asUser_getRegister_discarded - submonad_asUser.fn_stateAssert take_zip - bind_subst_lift [OF submonad_asUser.stateAssert_fn] - word_less_nat_alt ARM_HYP_H.sanitiseRegister_def - split_def n_msgRegisters_def msgMaxLength_def - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_size msgLengthBits_def n_syscallMessage_def Let_def - cong: if_weak_cong register.case_cong) - - - apply (rule monadic_rewrite_bind_tail)+ - apply (subst (2) upto_enum_word) - apply (case_tac "ma < unat n_syscallMessage - 4") - - apply (erule disjE[OF nat_less_cases'], - ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib - mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_loadWordUser_comm loadWordUser_discarded asUser_return - zip_take_triv2 msgMaxLength_def - no_fail_stateAssert - cong: if_weak_cong - | simp - | rule monadic_rewrite_bind_tail - monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - monadic_rewrite_threadGet_return - monadic_rewrite_getSanitiseRegisterInfo_return - monadic_rewrite_getSanitiseRegisterInfo_drop - | wp asUser_typ_ats empty_fail_loadWordUser)+)+ - apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) - apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: word32))" - and k="Suc msgMaxLength" in upt_add_eq_append') - apply (simp add: n_syscallMessage_def) - apply (simp add: n_syscallMessage_def msgMaxLength_unfold) - apply (simp add: n_syscallMessage_def msgMaxLength_def - msgLengthBits_def shiftL_nat - del: upt.simps upt_rec_numeral) - apply (simp add: upto_enum_word cong: if_weak_cong) - apply wp+ + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + zipWithM_x_Nil + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + | wp mapM_wp')+)+ + apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def + linorder_not_less syscallMessage_unfold) + apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, + OF order_less_le_trans, rotated])+ + apply (subgoal_tac "\n :: word32. n \ scast n_syscallMessage \ [n .e. msgMaxLength] + = [n .e. scast n_syscallMessage] + @ [scast n_syscallMessage + 1 .e. msgMaxLength]") + apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: word32"] + upto_enum_word[where y="scast n_syscallMessage + 1 :: word32"]) + apply (clarsimp simp: bind_assoc asUser_bind_distrib asUser_getRegister_threadGet_comm + mapM_x_Cons mapM_x_Nil threadGet_discarded + asUser_comm [OF neq] asUser_getRegister_discarded + submonad_asUser.fn_stateAssert take_zip + bind_subst_lift [OF submonad_asUser.stateAssert_fn] + word_less_nat_alt ARM_HYP_H.sanitiseRegister_def + split_def n_msgRegisters_def msgMaxLength_def + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_size msgLengthBits_def n_syscallMessage_def Let_def + cong: if_weak_cong register.case_cong) + + + apply (rule monadic_rewrite_bind_tail)+ + apply (subst (2) upto_enum_word) + apply (case_tac "ma < unat n_syscallMessage - 4") + + apply (erule disjE[OF nat_less_cases'], + ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib + mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_loadWordUser_comm loadWordUser_discarded asUser_return + zip_take_triv2 msgMaxLength_def + no_fail_stateAssert + cong: if_weak_cong + | simp + | rule monadic_rewrite_bind_tail + monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + monadic_rewrite_getSanitiseRegisterInfo_drop + | wp asUser_typ_ats empty_fail_loadWordUser)+)+ + apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) + apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: word32))" + and k="Suc msgMaxLength" in upt_add_eq_append') + apply (simp add: n_syscallMessage_def) + apply (simp add: n_syscallMessage_def msgMaxLength_unfold) + apply (simp add: n_syscallMessage_def msgMaxLength_def + msgLengthBits_def shiftL_nat + del: upt.simps upt_rec_numeral) + apply (simp add: upto_enum_word cong: if_weak_cong) + apply wp+ (* ArchFault *) apply (simp add: neq inj_case_bool split: bool.split) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_is_refl) apply (rule ext) apply (unfold handleArchFaultReply'[symmetric] getMRs_def msgMaxLength_def @@ -803,7 +795,7 @@ begin (* FIXME: move *) lemma ccorres_merge_return: - "ccorres (\a c. r (f a) c) xf P P' hs H C \ + "ccorres (r \ f) xf P P' hs H C \ ccorres r xf P P' hs (do x \ H; return (f x) od) C" by (rule ccorres_return_into_rel) @@ -1272,7 +1264,7 @@ lemma setMRs_syscall_error_ccorres: | wp hoare_case_option_wp | (simp del: Collect_const, vcg exspec=setMR_modifies) )+ - apply (simp add: msgMaxLength_unfold true_def false_def) + apply (simp add: msgMaxLength_unfold) apply (clarsimp split:if_split_asm simp:syscall_error_to_H_def map_option_Some_eq2) apply (simp add: msgFromLookupFailure_def split: lookup_failure.split @@ -1426,10 +1418,6 @@ shows apply (auto split: if_split) done -declare zipWith_Nil2[simp] - -declare zipWithM_x_Nil2[simp] - lemma getRestartPC_ccorres [corres]: "ccorres (=) ret__unsigned_long_' \ (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\) hs @@ -1442,8 +1430,7 @@ lemma getRestartPC_ccorres [corres]: done lemma asUser_tcbFault_obj_at: - "\obj_at' (\tcb. P (tcbFault tcb)) t\ asUser t' m - \\rv. obj_at' (\tcb. P (tcbFault tcb)) t\" + "asUser t' m \obj_at' (\tcb. P (tcbFault tcb)) t\" supply if_cong[cong] apply (simp add: asUser_def split_def) apply (wp threadGet_wp) @@ -1538,7 +1525,7 @@ lemma exceptionMessage_length_aux : lemma copyMRsFault_ccorres_exception: "ccorres dc xfdc (valid_pspace' - and obj_at' (\tcb. map (atcbContext (tcbArch tcb)) ARM_HYP_H.exceptionMessage = msg) sender + and obj_at' (\tcb. map (user_regs (atcbContext (tcbArch tcb))) ARM_HYP_H.exceptionMessage = msg) sender and K (length msg = 3) and K (recvBuffer \ Some 0) and K (sender \ receiver)) @@ -1560,7 +1547,7 @@ lemma copyMRsFault_ccorres_exception: for as bs, simplified] bind_assoc) apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) - apply (rule_tac F="K $ obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_HYP_H.exceptionMessage = msg) sender" + apply (rule_tac F="K $ obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) ARM_HYP_H.exceptionMessage = msg) sender" in ccorres_mapM_x_while) apply (clarsimp simp: n_msgRegisters_def) apply (rule ccorres_guard_imp2) @@ -1606,7 +1593,7 @@ lemma mapM_cong: "\ \x. elem x xs \ f x = g x \< lemma copyMRsFault_ccorres_syscall: "ccorres dc xfdc (valid_pspace' - and obj_at' (\tcb. map (atcbContext (tcbArch tcb)) ARM_HYP_H.syscallMessage = msg) sender + and obj_at' (\tcb. map (user_regs (atcbContext (tcbArch tcb))) ARM_HYP_H.syscallMessage = msg) sender and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \) and K (length msg = 12) and K (recvBuffer \ Some 0) @@ -1645,7 +1632,7 @@ proof - and ys="drop (unat n_msgRegisters) (zip as bs)" for as bs, simplified] bind_assoc) apply (rule ccorres_rhs_assoc2, rule ccorres_split_nothrow_novcg) - apply (rule_tac F="K $ obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_HYP_H.syscallMessage = msg) sender" + apply (rule_tac F="K $ obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) ARM_HYP_H.syscallMessage = msg) sender" in ccorres_mapM_x_while) apply (clarsimp simp: n_msgRegisters_def) apply (rule ccorres_guard_imp2) @@ -1673,53 +1660,54 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) - apply (rule_tac F="\_. obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_HYP_H.syscallMessage = msg) - sender and valid_pspace' - and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" - in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) - apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters - option_to_0_def liftM_def[symmetric] - split: option.split_asm) - apply (rule ccorres_guard_imp2) - apply (rule_tac t=sender and r="ARM_HYP_H.syscallMessage ! (n + unat n_msgRegisters)" - in ccorres_add_getRegister) - apply (ctac(no_vcg)) - apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" - in ccorres_cross_over_guard) - apply (rule ccorres_move_array_assertion_ipc_buffer - | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ - apply (simp add: storeWordUser_def) - apply (rule ccorres_pre_stateAssert) - apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) - apply (simp add: pred_conj_def) - apply (wp user_getreg_rv) - apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def - syscallMessage_ccorres msgRegisters_ccorres - unat_add_lem[THEN iffD1] unat_of_nat32 - word_bits_def word_size_def) - apply (simp only:field_simps imp_ex imp_conjL) - apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def - pointerInUserData_h_t_valid - atcbContextGet_def - projectKOs objBits_simps word_less_nat_alt - unat_add_lem[THEN iffD1] unat_of_nat) - apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def - MessageID_Syscall_def - msg_align_bits valid_ipc_buffer_ptr'_def) - apply (erule aligned_add_aligned) - apply (rule aligned_add_aligned[where n=2]) - apply (simp add: is_aligned_def) - apply (rule is_aligned_mult_triv2 [where n=2, simplified]) - apply (simp add: wb_gt_2)+ - apply (simp add: n_msgRegisters_def) - apply (vcg exspec=getRegister_modifies) - apply simp - apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) - apply (rule hoare_pre) - apply (wp hoare_case_option_wp | wpc)+ - apply clarsimp - apply (simp add: n_msgRegisters_def word_bits_def) + apply (rule ccorres_rel_imp) + apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) ARM_HYP_H.syscallMessage = msg) + sender and valid_pspace' + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" + in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) + apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters + option_to_0_def liftM_def[symmetric] + split: option.split_asm) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="ARM_HYP_H.syscallMessage ! (n + unat n_msgRegisters)" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" + in ccorres_cross_over_guard) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (simp add: storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) + apply (simp add: pred_conj_def) + apply (wp user_getreg_rv) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + syscallMessage_ccorres msgRegisters_ccorres + unat_add_lem[THEN iffD1] unat_of_nat32 + word_bits_def word_size_def) + apply (simp only:field_simps imp_ex imp_conjL) + apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def + pointerInUserData_h_t_valid + atcbContextGet_def + projectKOs objBits_simps word_less_nat_alt + unat_add_lem[THEN iffD1] unat_of_nat) + apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def + MessageID_Syscall_def + msg_align_bits valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n=2]) + apply (simp add: is_aligned_def) + apply (rule is_aligned_mult_triv2 [where n=2, simplified]) + apply (simp add: wb_gt_2)+ + apply (simp add: n_msgRegisters_def) + apply (vcg exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) + apply (rule hoare_pre) + apply (wp hoare_case_option_wp | wpc)+ + apply clarsimp + apply (simp add: n_msgRegisters_def word_bits_def) + apply simp apply (simp add: n_msgRegisters_def) apply (frule (1) option_to_0_imp) apply (subst drop_zip) @@ -1727,7 +1715,7 @@ proof - apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (rule mapM_x_wp_inv) @@ -1817,7 +1805,7 @@ proof - apply (simp add: zip_upt_Cons guard_is_UNIVI seL4_VMFault_FSR_def split: list.split_asm) apply (simp split: list.split) apply (wp setMR_tcbFault_obj_at asUser_inv[OF getRestartPC_inv] - hoare_case_option_wp static_imp_wp + hoare_case_option_wp hoare_weak_lift_imp | simp add: option_to_ptr_def guard_is_UNIVI seL4_VMFault_PrefetchFault_def seL4_VMFault_Addr_def @@ -2022,6 +2010,7 @@ proof - let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some ft) sender" note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV [where xf'=ret__unsigned_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] show ?thesis apply (unfold K_def) apply (intro ccorres_gen_asm) @@ -2289,7 +2278,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def ARM_HYP.badgeRegister_def "StrictC'_register_defs") @@ -2327,7 +2316,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -2799,7 +2788,7 @@ lemma transferCapsLoop_ccorres: \ \\destSlot = (if slots = [] then NULL else cte_Ptr (hd slots)) \ length slots \ 1 \ slots \ [0]\)" defines "is_the_ep \ \cap. isEndpointCap cap \ ep \ None \ capEPPtr cap = the ep" - defines "stable \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" + defines "stable_masked \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" defines "relative_at \ \scap slot s. cte_wp_at' (\cte. badge_derived' scap (cteCap cte) \ capASID scap = capASID (cteCap cte) \ @@ -2814,7 +2803,7 @@ lemma transferCapsLoop_ccorres: (\s. (\x \ set caps. s \' fst x \ cte_wp_at' (\cte. slots \ [] \ is_the_ep (cteCap cte) \ (fst x) = (cteCap cte)) (snd x) s - \ cte_wp_at' (\cte. fst x \ NullCap \ stable (fst x) (cteCap cte)) (snd x) s)) and + \ cte_wp_at' (\cte. fst x \ NullCap \ stable_masked (fst x) (cteCap cte)) (snd x) s)) and (\s. \ sl \ (set slots). cte_wp_at' (isNullCap o cteCap) sl s) and (\_. n + length caps \ 3 \ distinct slots )) (precond n mi slots) @@ -2879,22 +2868,22 @@ next by (simp add:relative_at_def) have stableD: - "\scap excap. stable scap excap + "\scap excap. stable_masked scap excap \ (badge_derived' scap excap \ capASID scap = capASID excap \ cap_asid_base' scap = cap_asid_base' excap \ cap_vptr' scap = cap_vptr' excap)" - apply (clarsimp simp:stable_def) + apply (clarsimp simp:stable_masked_def) apply (case_tac "excap = scap",simp+) apply (simp add:maskedAsFull_misc) done have stable_eq: - "\scap excap. \stable scap excap; isEndpointCap excap\ \ scap = excap" - by (simp add:isCap_simps stable_def maskedAsFull_def split:if_splits) + "\scap excap. \stable_masked scap excap; isEndpointCap excap\ \ scap = excap" + by (simp add:isCap_simps stable_masked_def maskedAsFull_def split:if_splits) have is_the_ep_stable: - "\a b. \a \ NullCap \ stable a b; \ is_the_ep b \ \ \ is_the_ep a" - apply (clarsimp simp:stable_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) + "\a b. \a \ NullCap \ stable_masked a b; \ is_the_ep b \ \ \ is_the_ep a" + apply (clarsimp simp:stable_masked_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) apply auto done @@ -3047,8 +3036,8 @@ next \ (\x\set slots. cte_wp_at' (isNullCap \ cteCap) x s) \ (\x\set xs'. s \' fst x \ cte_wp_at' (\c. is_the_ep (cteCap c) \ fst x = cteCap c) (snd x) s - \ cte_wp_at' (\c. fst x \ NullCap \ stable (fst x) (cteCap c)) (snd x) s)" - in hoare_post_imp_R) + \ cte_wp_at' (\c. fst x \ NullCap \ stable_masked (fst x) (cteCap c)) (snd x) s)" + in hoare_strengthen_postE_R) prefer 2 apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace_mdb' valid_pspace'_splits valid_pspace_valid_objs' is_derived_capMasterCap image_def) @@ -3058,10 +3047,10 @@ next apply (rule conjI) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def isCap_simps stable_def) + apply (clarsimp simp:is_the_ep_def isCap_simps stable_masked_def) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def stable_def split:if_splits)+ + apply (clarsimp simp:is_the_ep_def stable_masked_def split:if_splits)+ apply (case_tac "a = cteCap cteb",clarsimp) apply (simp add:maskedAsFull_def split:if_splits) apply (simp add:maskedAsFull_again) @@ -3094,9 +3083,8 @@ next word_sle_def t2n_mask_eq_if) apply (rule conjI) apply (clarsimp simp: ccap_rights_relation_def cap_rights_to_H_def - false_def true_def to_bool_def allRights_def - excaps_map_def split_def - dest!: drop_n_foo interpret_excaps_eq) + allRights_def excaps_map_def split_def + dest!: drop_n_foo interpret_excaps_eq) apply (clarsimp simp:from_bool_def split:bool.splits) apply (case_tac "isEndpointCap (fst x)") apply (clarsimp simp: cap_get_tag_EndpointCap ep_cap_not_null cap_get_tag_isCap[symmetric]) @@ -3136,7 +3124,7 @@ next apply (rule conseqPre, vcg) apply (clarsimp split del: if_split) apply (clarsimp split del: if_split - simp add: Collect_const[symmetric] precond_def true_def false_def + simp add: Collect_const[symmetric] precond_def simp del: Collect_const) apply (rule HoarePartial.Seq[rotated] HoarePartial.Cond[OF order_refl] HoarePartial.Basic[OF order_refl] HoarePartial.Skip[OF order_refl] @@ -3163,14 +3151,14 @@ next apply (subgoal_tac "fst x = cteCap cte",simp) apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp simp:valid_cap_simps' isCap_simps) apply (subgoal_tac "slots \ []") apply simp apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp dest!:ccap_relation_lift simp:cap_get_tag_isCap is_the_ep_def) apply (clarsimp simp:valid_cap_simps' isCap_simps) @@ -3365,10 +3353,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3397,6 +3386,7 @@ proof - let ?EXCNONE = "{s. ret__unsigned_long_' s = scast EXCEPTION_NONE}" let ?interpret = "\v n. take n (array_to_list (excaprefs_C v))" note if_split[split del] + note empty_fail_cond[simp] show ?thesis apply (rule ccorres_gen_asm)+ apply (cinit(no_subst_asm) lift: thread_' bufferPtr_' info_' simp: whileAnno_def) @@ -3421,9 +3411,10 @@ proof - apply (rule ccorres_symb_exec_r) apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply csymbr apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, @@ -3433,7 +3424,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 4) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -3443,7 +3434,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 4" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 4" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -3464,7 +3455,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -3472,9 +3463,9 @@ proof - apply (clarsimp simp: cfault_rel2_def) apply (clarsimp simp: cfault_rel_def) apply (simp add: seL4_Fault_CapFault_lift) - apply (clarsimp simp: is_cap_fault_def to_bool_def false_def) + apply (clarsimp simp: is_cap_fault_def) apply wp - apply (rule hoare_post_imp_R, rule lsft_real_cte) + apply (rule hoare_strengthen_postE_R, rule lsft_real_cte) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps') apply (vcg exspec=lookupSlot_modifies) apply vcg @@ -3505,8 +3496,7 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv - \ (\x \ set rva. snd x \ 0)" + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def @@ -3528,7 +3518,7 @@ proof - liftE_bindE[symmetric]) apply (wp mapME_length mapME_set | simp)+ apply (rule_tac Q'="\rv. no_0_obj' and real_cte_at' rv" - in hoare_post_imp_R, wp lsft_real_cte) + in hoare_strengthen_postE_R, wp lsft_real_cte) apply (clarsimp simp: cte_wp_at_ctes_of) apply (wpsimp)+ apply (clarsimp simp: guard_is_UNIV_def @@ -3601,7 +3591,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3646,7 +3636,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (simp add: Collect_const_mem) apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def @@ -3659,7 +3649,7 @@ qed lemma lookupIPCBuffer_not_Some_0: "\\\ lookupIPCBuffer r t \\rv. K (rv \ Some 0)\" apply (simp add: lookupIPCBuffer_def ARM_HYP_H.lookupIPCBuffer_def) - apply (wp hoare_post_taut haskell_assert_wp + apply (wp hoare_TrueI haskell_assert_wp | simp add: Let_def getThreadBufferSlot_def locateSlotTCB_def | intro conjI impI | wpc)+ done @@ -3710,7 +3700,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3728,7 +3717,7 @@ lemma replyFromKernel_error_ccorres [corres]: message_info_to_H_def valid_pspace_valid_objs') apply (clarsimp simp: msgLengthBits_def msgFromSyscallError_def syscall_error_to_H_def syscall_error_type_defs - mask_def true_def option_to_ptr_def + mask_def option_to_ptr_def split: if_split_asm) done @@ -3779,14 +3768,12 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac - apply (clarsimp simp: guard_is_UNIV_def false_def option_to_ptr_def split: option.splits) + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' receiver and K (rv \ Some 0) and (case_option \ valid_ipc_buffer_ptr' rv) @@ -3795,7 +3782,7 @@ lemma doIPCTransfer_ccorres [corres]: apply (auto simp: valid_ipc_buffer_ptr'_def option_to_0_def split: option.splits)[1] apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) - apply (auto simp: to_bool_def true_def) + apply auto done lemma fault_case_absorb_bind: @@ -3817,7 +3804,7 @@ lemma Arch_getSanitiseRegisterInfo_ccorres: apply (cinit' lift: thread_' simp: getSanitiseRegisterInfo_def2) apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_pre_archThreadGet) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. v \ Some 0" in ccorres_cross_over_guard) apply (rule ccorres_return_C, simp+) apply (clarsimp simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def) apply (rule conjI) @@ -3826,7 +3813,7 @@ lemma Arch_getSanitiseRegisterInfo_ccorres: apply (clarsimp simp: valid_tcb'_def valid_arch_tcb'_def) apply (clarsimp simp: typ_heap_simps) apply (case_tac "atcbVCPUPtr (tcbArch tcb) \ None") - apply (clarsimp simp:if_1_0_0 true_def false_def split: if_splits)+ + apply (clarsimp split: if_splits)+ done @@ -3858,7 +3845,7 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3870,7 +3857,7 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply (vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (auto simp: from_bool_def sanitiseRegister_def)[1] + apply (auto simp: sanitiseRegister_def)[1] apply wp apply clarsimp apply vcg @@ -3911,7 +3898,7 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -4066,7 +4053,6 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=2, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -4083,7 +4069,6 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) msg_align_bits sanitiseRegister_def simp del: upt_rec_numeral cong: if_cong register.case_cong, simp_all add: word_less_nat_alt unat_add_lem[THEN iffD1] unat_of_nat)[1] - apply (rule_tac x=rv in exI, auto)[1] apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def msgRegisters_ccorres syscallMessage_ccorres @@ -4105,8 +4090,8 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -4118,7 +4103,7 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva ; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -4161,7 +4146,7 @@ lemma handleArchFaultReply_corres: apply simp+ apply (rule ccorres_symb_exec_l) apply (ctac add: ccorres_return_C) - apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp simp: to_bool_def true_def)+ + apply (wpsimp wp: mapM_wp' empty_fail_loadWordUser)+ (* VCPUFault *) apply (rule ccorres_symb_exec_l) apply (rule ccorres_stateAssert) @@ -4171,7 +4156,7 @@ lemma handleArchFaultReply_corres: apply simp+ apply (rule ccorres_symb_exec_l) apply (ctac add: ccorres_return_C) - apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp simp: to_bool_def true_def)+ + apply (wpsimp wp: mapM_wp' empty_fail_loadWordUser)+ (* VPPIEvent *) apply (rule ccorres_symb_exec_l) apply (rule ccorres_stateAssert) @@ -4181,7 +4166,7 @@ lemma handleArchFaultReply_corres: apply simp+ apply (rule ccorres_symb_exec_l) apply (ctac add: ccorres_return_C) - apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp simp: to_bool_def true_def)+ + apply (wpsimp wp: mapM_wp' empty_fail_loadWordUser)+ (* VGICMaintenance *) apply (rule ccorres_symb_exec_l) apply (rule ccorres_stateAssert) @@ -4191,7 +4176,7 @@ lemma handleArchFaultReply_corres: apply simp+ apply (rule ccorres_symb_exec_l) apply (ctac add: ccorres_return_C) - apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp simp: to_bool_def true_def)+ + apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp)+ done (* MOVE *) @@ -4239,7 +4224,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -4285,7 +4270,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -4327,9 +4312,9 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply vcg_step apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def scast_def + message_info_to_H_def scast_def length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def + min_def word_less_nat_alt guard_is_UNIV_def seL4_Faults seL4_Arch_Faults split: if_split) apply (simp add: length_exceptionMessage length_syscallMessage) @@ -4337,10 +4322,8 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply (vcg exspec=getRegister_modifies) apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def - length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def - obj_at'_def + message_info_to_H_def length_exceptionMessage length_syscallMessage + min_def word_less_nat_alt obj_at'_def split: if_split) apply (fastforce simp: seL4_Faults seL4_Arch_Faults) done @@ -4382,7 +4365,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ apply (clarsimp split: if_split) done @@ -4407,7 +4390,7 @@ lemma transferCaps_local_slots: transferCaps tag caps ep receiver receiveBuffer \\tag'. cte_wp_at' (\cte. P (cteCap cte)) slot\" apply (simp add: transferCaps_def pred_conj_def) - apply (rule hoare_seq_ext[rotated]) + apply (rule bind_wp_fwd) apply (rule hoare_vcg_conj_lift) apply (rule get_rs_real_cte_at') apply (rule get_recv_slot_inv') @@ -4473,10 +4456,6 @@ lemma doReplyTransfer_ccorres [corres]: \ \\grant = from_bool grant\) hs (doReplyTransfer sender receiver slot grant) (Call doReplyTransfer_'proc)" -proof - - have invs_valid_queues_strg: "\s. invs' s \ valid_queues s" - by clarsimp - show ?thesis apply (cinit lift: sender_' receiver_' slot_' grant_') apply (rule getThreadState_ccorres_foo) apply (rule ccorres_assert2) @@ -4502,14 +4481,13 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_running_valid_queues setThreadState_st_tcb)+ + apply (wpsimp wp: sts_valid_objs' setThreadState_st_tcb)+ apply (wp cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) @@ -4518,15 +4496,13 @@ proof - apply wp apply (simp add: cap_get_tag_isCap) apply (strengthen invs_weak_sch_act_wf_strg - cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct] - invs_valid_queues_strg) + cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct]) apply (simp add: cap_reply_cap_def) apply (wp doIPCTransfer_reply_or_replyslot) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4555,22 +4531,20 @@ proof - apply (ctac (no_vcg)) apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) - apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] - apply (ctac add: setThreadState_ccorres_valid_queues'_simple) + apply (wp sts_valid_objs' setThreadState_st_tcb | simp)+ + apply (ctac add: setThreadState_ccorres_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_sch_act hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def - ThreadState_Inactive_def mask_def to_bool_def - option_to_ctcb_ptr_def) + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def option_to_ctcb_ptr_def) - apply (rule_tac Q="\rv. valid_queues and tcb_at' receiver and valid_queues' and + apply (rule_tac Q="\rv. tcb_at' receiver and valid_objs' and sch_act_simple and (\s. ksCurDomain s \ maxDomain) and - (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + (\s. sch_act_wf (ksSchedulerAction s) s) and + pspace_aligned' and pspace_distinct'" in hoare_post_imp) apply (clarsimp simp: inQ_def weak_sch_act_wf_def) - apply (wp threadSet_valid_queues threadSet_sch_act handleFaultReply_sch_act_wf) + apply (wp threadSet_sch_act handleFaultReply_sch_act_wf) apply (clarsimp simp: guard_is_UNIV_def) apply assumption apply clarsimp @@ -4579,15 +4553,14 @@ proof - apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) apply (clarsimp simp: ctcb_relation_def typ_heap_simps) apply wp - apply (strengthen vp_invs_strg' invs_valid_queues') + apply (strengthen vp_invs_strg') apply (wp cteDeleteOne_tcbFault cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) apply (simp(no_asm_use) add: gs_set_assn_Delete_cstate_relation[unfolded o_def] subset_iff rf_sr_def) - apply (clarsimp simp: guard_is_UNIV_def to_bool_def true_def - option_to_ptr_def option_to_0_def false_def - ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def + ThreadState_defs mask_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs option_to_ctcb_ptr_def split: option.splits) @@ -4596,7 +4569,6 @@ proof - cap_get_tag_isCap) apply fastforce done -qed lemma ccorres_getCTE_cte_at: "ccorresG rf_sr \ r xf P P' hs (getCTE p >>= f) c @@ -4616,7 +4588,7 @@ lemma ccorres_getCTE_cte_at: done lemma setupCallerCap_ccorres [corres]: - "ccorres dc xfdc (valid_queues and valid_pspace' and (\s. \d p. sender \ set (ksReadyQueues s (d, p))) + "ccorres dc xfdc (valid_pspace' and (\s. sch_act_wf (ksSchedulerAction s) s) and sch_act_not sender and tcb_at' sender and tcb_at' receiver and tcb_at' sender and tcb_at' receiver) @@ -4629,8 +4601,7 @@ lemma setupCallerCap_ccorres [corres]: apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) apply (cinit lift: sender_' receiver_' canGrant_') apply (clarsimp simp: word_sle_def - tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] - , fold dc_def)[1] + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) apply ccorres_remove_UNIV_guard apply (ctac(no_vcg)) apply (rule ccorres_move_array_assertion_tcb_ctes) @@ -4651,14 +4622,14 @@ lemma setupCallerCap_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg)) apply (rule ccorres_assert) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply csymbr apply (ctac add: cteInsert_ccorres) apply simp apply (wp getSlotCap_cte_wp_at) apply (clarsimp simp: ccap_relation_def cap_lift_reply_cap cap_to_H_simps cap_reply_cap_lift_def - false_def tcbSlots Kernel_C.tcbCaller_def + tcbSlots Kernel_C.tcbCaller_def size_of_def cte_level_bits_def ctcb_size_bits_def) apply (wp getCTE_wp') apply (simp add: tcbSlots Kernel_C.tcbCaller_def @@ -4677,11 +4648,11 @@ lemma setupCallerCap_ccorres [corres]: apply (simp add: locateSlot_conv) apply wp apply (clarsimp simp: ccap_rights_relation_def allRights_def - mask_def true_def cap_rights_to_H_def tcbCallerSlot_def + mask_def cap_rights_to_H_def tcbCallerSlot_def Kernel_C.tcbCaller_def) apply simp apply wp - apply (clarsimp simp: Kernel_C.ThreadState_BlockedOnReply_def mask_def + apply (clarsimp simp: ThreadState_defs mask_def valid_pspace'_def tcbReplySlot_def valid_tcb_state'_def Collect_const_mem tcb_cnode_index_defs) @@ -4705,7 +4676,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4726,7 +4697,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -4747,23 +4718,20 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4787,30 +4755,27 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - split: endpoint.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (clarsimp dest!: is_aligned_tcb_ptr_to_ctcb_ptr - split del: if_split) - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (clarsimp dest!: is_aligned_tcb_ptr_to_ctcb_ptr + split del: if_split) + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4833,10 +4798,9 @@ lemma rf_sr_tcb_update_twice: cmachine_state_relation_def) lemma sendIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (bos = ThreadState_BlockedOnSend \ epptr' = epptr \ badge' = badge \ cg = from_bool canGrant \ cgr = from_bool canGrantReply @@ -4888,13 +4852,11 @@ lemma sendIPC_block_ccorres_helper: (simp add: typ_heap_simps')+)[1] apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnSend_def mask_def - from_bool_def to_bool_def) - apply (clarsimp split: bool.split) + ThreadState_defs mask_def) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -4998,6 +4960,19 @@ lemma tcb_queue_relation_qend_valid': apply (simp add: h_t_valid_clift_Some_iff) done +lemma tcb_queue'_head_end_NULL: + assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" + and tat: "\t\set queue. tcb_at' t s" + shows "(qend = NULL) = (qhead = NULL)" + using qr tat + apply - + apply (erule tcb_queue_relationE') + apply (simp add: tcb_queue_head_empty_iff split: if_splits) + apply (rule tcb_at_not_NULL) + apply (erule bspec) + apply simp + done + lemma tcbEPAppend_spec: "\s queue. \ \ \s. \t. (t, s) \ rf_sr \ (\tcb\set queue. tcb_at' tcb t) \ distinct queue @@ -5080,7 +5055,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5096,12 +5071,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5118,29 +5093,26 @@ lemma sendIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Send_def) - apply (clarsimp simp: tcb_queue_relation'_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Send_def) + apply (clarsimp simp: tcb_queue_relation'_def) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (simp only:projectKOs injectKO_ep objBits_simps) - apply clarsimp - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (simp only:projectKOs injectKO_ep objBits_simps) + apply clarsimp + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5157,31 +5129,28 @@ lemma sendIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Send_def - split: if_split) - apply (fastforce simp: tcb_queue_relation'_def - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Send_def + split: if_split) + apply (fastforce simp: tcb_queue_relation'_def + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -5201,8 +5170,7 @@ lemma ctcb_relation_blockingIPCCanGrantD: lemma sendIPC_ccorres [corres]: "ccorres dc xfdc (invs' and st_tcb_at' simple' thread - and sch_act_not thread and ep_at' epptr and - (\s. \d p. thread \ set (ksReadyQueues s (d, p)))) + and sch_act_not thread and ep_at' epptr) (UNIV \ \\blocking = from_bool blocking\ \ \\do_call = from_bool do_call\ \ \\badge = badge\ @@ -5233,8 +5201,7 @@ lemma sendIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread and ko_at' ep epptr - and ep_at' epptr - and (\s. \d p. thread \ set (ksReadyQueues s (d, p)))" + and ep_at' epptr" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc \ \RecvEP case\ @@ -5274,28 +5241,23 @@ lemma sendIPC_ccorres [corres]: apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) apply (clarsimp split del: if_split) apply (wpc ; ccorres_rewrite) - apply (clarsimp simp: from_bool_def disj_imp[symmetric] split del: if_split) + apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not - possibleSwitchTo_sch_act_not sts_st_tcb' - possibleSwitchTo_ksQ' sts_valid_queues sts_ksQ' + possibleSwitchTo_sch_act_not sts_st_tcb' sts_valid_objs' simp: valid_tcb_state'_def)+ apply vcg - apply (wpsimp wp: doIPCTransfer_sch_act setEndpoint_ksQ hoare_vcg_all_lift - set_ep_valid_objs' setEndpoint_valid_mdb' + apply (wpsimp wp: doIPCTransfer_sch_act hoare_vcg_all_lift + set_ep_valid_objs' setEndpoint_valid_mdb' | wp (once) hoare_drop_imp | strengthen sch_act_wf_weak)+ - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def Collect_const_mem - ThreadState_Running_def mask_def from_bool_def - option_to_ptr_def option_to_0_def + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs Collect_const_mem mask_def + option_to_ptr_def option_to_0_def split: bool.split_asm) \ \IdleEP case\ @@ -5367,7 +5329,7 @@ lemma sendIPC_ccorres [corres]: st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def isBlockedOnSend_def projectKO_opt_tcb split: if_split_asm if_split) - apply (rule conjI, simp, rule impI, clarsimp simp: valid_pspace_valid_objs') + apply (rule conjI, simp, rule impI, clarsimp simp: valid_pspace'_def) apply (erule delta_sym_refs) apply (clarsimp split: if_split_asm dest!: symreftype_inverse')+ @@ -5410,10 +5372,9 @@ lemma ctcb_relation_blockingIPCCanGrantReplyD: done lemma receiveIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (epptr = epptr && ~~ mask 4) and K (isEndpointCap cap \ ccap_relation cap cap')) UNIV hs @@ -5447,11 +5408,11 @@ lemma receiveIPC_block_ccorres_helper: apply (erule(1) rf_sr_tcb_update_no_queue_gen, (simp add: typ_heap_simps)+) apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def ccap_relation_ep_helpers - ThreadState_BlockedOnReceive_def mask_def cap_get_tag_isCap) + ThreadState_defs mask_def cap_get_tag_isCap) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -5478,7 +5439,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5494,12 +5455,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5516,31 +5477,28 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Recv_def - split: if_split) - apply (fastforce simp: tcb_queue_relation'_def - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Recv_def + split: if_split) + apply (fastforce simp: tcb_queue_relation'_def + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -5557,28 +5515,25 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=2] EPState_Recv_def) - apply (clarsimp simp: tcb_queue_relation'_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=2] EPState_Recv_def) + apply (clarsimp simp: tcb_queue_relation'_def) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5604,7 +5559,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5625,7 +5580,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5646,23 +5601,20 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5686,30 +5638,27 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - split: endpoint.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (clarsimp dest!: is_aligned_tcb_ptr_to_ctcb_ptr - split del: if_split) - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (clarsimp dest!: is_aligned_tcb_ptr_to_ctcb_ptr + split del: if_split) + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5773,7 +5722,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp) @@ -5826,7 +5775,6 @@ lemma receiveIPC_ccorres [corres]: notes option.case_cong_weak [cong] shows "ccorres dc xfdc (invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and valid_cap' cap and K (isEndpointCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -5887,7 +5835,7 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5902,7 +5850,6 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and ko_at' ep (capEPPtr cap)" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc @@ -5917,20 +5864,18 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5942,18 +5887,16 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -6031,12 +5974,10 @@ lemma receiveIPC_ccorres [corres]: split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) - apply (clarsimp simp: from_bool_def split del: if_split simp del: Collect_const) + apply (clarsimp split del: if_split simp del: Collect_const) apply ccorres_rewrite apply (wpc ; clarsimp ; ccorres_rewrite) apply csymbr @@ -6048,29 +5989,26 @@ lemma receiveIPC_ccorres [corres]: apply ccorres_rewrite apply ctac apply (ctac add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_st_tcb' sts_valid_queues) + apply (wpsimp wp: sts_st_tcb' sts_valid_objs') apply (vcg exspec=setThreadState_modifies) - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def - mask_def ThreadState_Running_def cap_get_tag_isCap - ccap_relation_ep_helpers) + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs mask_def + cap_get_tag_isCap ccap_relation_ep_helpers) apply (clarsimp simp: valid_tcb_state'_def) - apply (rule_tac Q="\_. valid_pspace' and valid_queues + apply (rule_tac Q="\_. valid_pspace' and st_tcb_at' ((=) sendState) sender and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s) - and (\s. (\a b. sender \ set (ksReadyQueues s (a, b)))) and sch_act_not sender and K (thread \ sender) and (\s. ksCurDomain s \ maxDomain)" in hoare_post_imp) - apply (clarsimp simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak - obj_at'_def) + apply (fastforce simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak + obj_at'_def) apply (wpsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def conj_ac)+ - apply (rule_tac Q="\rv. valid_queues and valid_pspace' - and cur_tcb' and tcb_at' sender and tcb_at' thread - and sch_act_not sender and K (thread \ sender) - and ep_at' (capEPPtr cap) - and (\s. ksCurDomain s \ maxDomain) - and (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. sender \ set (ksReadyQueues s (d, p))))" - in hoare_post_imp) + apply (rule_tac Q="\rv. valid_pspace' + and cur_tcb' and tcb_at' sender and tcb_at' thread + and sch_act_not sender and K (thread \ sender) + and ep_at' (capEPPtr cap) + and (\s. ksCurDomain s \ maxDomain) + and (\s. sch_act_wf (ksSchedulerAction s) s)" + in hoare_post_imp) subgoal by (auto, auto simp: st_tcb_at'_def obj_at'_def) apply (wp hoare_vcg_all_lift set_ep_valid_objs') apply (clarsimp simp: guard_is_UNIV_def) @@ -6088,7 +6026,6 @@ lemma receiveIPC_ccorres [corres]: projectKOs invs'_def valid_state'_def st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' isBlockedOnReceive_def projectKO_opt_tcb - from_bool_def to_bool_def elim!: delta_sym_refs split: if_split_asm bool.splits) (*very long*) apply (frule(1) sym_refs_obj_atD' [OF _ invs_sym']) @@ -6101,24 +6038,21 @@ lemma receiveIPC_ccorres [corres]: projectKOs invs'_def valid_state'_def st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' isBlockedOnReceive_def projectKO_opt_tcb - from_bool_def to_bool_def - elim: delta_sym_refs + elim!: delta_sym_refs split: if_split_asm bool.splits) (*very long *) apply (clarsimp simp: obj_at'_def state_refs_of'_def projectKOs) apply (frule(1) sym_refs_ko_atD' [OF _ invs_sym']) - apply (frule invs_queues) apply clarsimp apply (rename_tac list x xa) apply (rule_tac P="x\set list" in case_split) apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) - apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -6146,11 +6080,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ntfn_blocked_in_queueD) apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) apply (elim conjE) @@ -6170,7 +6103,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -6192,23 +6125,20 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def - tcb_queue_relation'_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def + tcb_queue_relation'_def) + apply simp apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6234,30 +6164,27 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (clarsimp simp: cnotification_relation_def Let_def - isWaitingNtfn_def - tcb_queue_relation'_def valid_ntfn'_def - split: Structures_H.notification.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (clarsimp dest!: is_aligned_tcb_ptr_to_ctcb_ptr - split del: if_split) - apply (clarsimp split: if_split) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def + isWaitingNtfn_def + tcb_queue_relation'_def valid_ntfn'_def + split: Structures_H.notification.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (clarsimp dest!: is_aligned_tcb_ptr_to_ctcb_ptr + split del: if_split) + apply (clarsimp split: if_split) + apply simp apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6347,7 +6274,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -6362,22 +6289,21 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) - apply (wp sts_running_valid_queues sts_st_tcb_at'_cases + apply (ctac add: possibleSwitchTo_ccorres) + apply (wp sts_valid_objs' sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" in hoare_post_imp) apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def ARM_HYP_H.badgeRegister_def Kernel_C.badgeRegister_def ARM_HYP.badgeRegister_def Kernel_C.R0_def - "StrictC'_thread_state_defs"less_mask_eq - Collect_const_mem) + ThreadState_defs less_mask_eq Collect_const_mem) apply (case_tac ts, simp_all add: receiveBlocked_def typ_heap_simps - cthread_state_relation_def "StrictC'_thread_state_defs")[1] + cthread_state_relation_def ThreadState_defs)[1] \ \ActiveNtfn case\ apply (rename_tac old_badge) apply (rule ccorres_cond_false) @@ -6426,16 +6352,14 @@ lemma sendSignal_ccorres [corres]: apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) - apply (wp weak_sch_act_wf_lift_linear - setThreadState_oa_queued - sts_valid_queues tcb_in_cur_domain'_lift)[1] - apply (wp sts_valid_queues sts_runnable) + apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift)[1] + apply (wp sts_valid_objs' sts_runnable) apply (wp setThreadState_st_tcb set_ntfn_valid_objs' | clarsimp)+ - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def badgeRegister_def Kernel_C.badgeRegister_def ARM_HYP.badgeRegister_def Kernel_C.R0_def) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Idle_def @@ -6458,10 +6382,9 @@ lemma sendSignal_ccorres [corres]: done lemma receiveSignal_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and sch_act_not thread and - valid_objs' and ntfn_at' ntfnptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + "ccorres dc xfdc (tcb_at' thread and sch_act_not thread and + valid_objs' and ntfn_at' ntfnptr and pspace_aligned' and pspace_distinct' and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (ntfnptr = ntfnptr && ~~ mask 4)) UNIV hs (setThreadState (Structures_H.thread_state.BlockedOnNotification @@ -6490,12 +6413,11 @@ lemma receiveSignal_block_ccorres_helper: (simp add: typ_heap_simps')+) apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnNotification_def mask_def - from_bool_def to_bool_def) + ThreadState_defs mask_def) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (auto simp: weak_sch_act_wf_def valid_tcb'_def tcb_cte_cases_def @@ -6506,16 +6428,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "word32" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6578,7 +6501,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6594,12 +6517,12 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6616,31 +6539,28 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=2] NtfnState_Waiting_def) - subgoal by (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken - valid_ntfn'_def - dest: tcb_queue_relation_next_not_NULL) - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=2] NtfnState_Waiting_def) + subgoal by (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken + valid_ntfn'_def + dest: tcb_queue_relation_next_not_NULL) + apply (simp add: isWaitingNtfn_def) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6657,30 +6577,27 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=2] NtfnState_Waiting_def - split: if_split) - apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken) - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=2] NtfnState_Waiting_def + split: if_split) + apply (fastforce simp: tcb_queue_relation'_def is_aligned_neg_mask_weaken) + apply (simp add: isWaitingNtfn_def) apply (simp add: carch_state_relation_def typ_heap_simps') apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6692,7 +6609,6 @@ lemma receiveSignal_enqueue_ccorres_helper: lemma receiveSignal_ccorres [corres]: "ccorres dc xfdc (invs' and valid_cap' cap and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and K (isNotificationCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -6736,11 +6652,10 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6751,7 +6666,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6807,8 +6722,7 @@ lemma receiveSignal_ccorres [corres]: apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6820,7 +6734,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM_HYP/IsolatedThreadAction.thy b/proof/crefine/ARM_HYP/IsolatedThreadAction.thy index ace0d99e06..939a16a2e9 100644 --- a/proof/crefine/ARM_HYP/IsolatedThreadAction.thy +++ b/proof/crefine/ARM_HYP/IsolatedThreadAction.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -8,37 +9,25 @@ theory IsolatedThreadAction imports ArchMove_C begin -datatype tcb_state_regs = TCBStateRegs "thread_state" "MachineTypes.register \ machine_word" - -definition - "tsrContext tsr \ case tsr of TCBStateRegs ts regs \ regs" - -definition - "tsrState tsr \ case tsr of TCBStateRegs ts regs \ ts" - -lemma accessors_TCBStateRegs[simp]: - "TCBStateRegs (tsrState v) (tsrContext v) = v" - by (cases v, simp add: tsrState_def tsrContext_def) - -lemma tsrContext_simp[simp]: - "tsrContext (TCBStateRegs st con) = con" - by (simp add: tsrContext_def) +context begin interpretation Arch . (*FIXME: arch_split*) -lemma tsrState_simp[simp]: - "tsrState (TCBStateRegs st con) = st" - by (simp add: tsrState_def) +datatype tcb_state_regs = + TCBStateRegs (tsrState : thread_state) (tsrContext : "MachineTypes.register \ machine_word") definition get_tcb_state_regs :: "kernel_object option \ tcb_state_regs" where "get_tcb_state_regs oko \ case oko of - Some (KOTCB tcb) \ TCBStateRegs (tcbState tcb) ((atcbContextGet o tcbArch) tcb)" + Some (KOTCB tcb) \ TCBStateRegs (tcbState tcb) ((user_regs o atcbContextGet o tcbArch) tcb)" definition put_tcb_state_regs_tcb :: "tcb_state_regs \ tcb \ tcb" where "put_tcb_state_regs_tcb tsr tcb \ case tsr of - TCBStateRegs st regs \ tcb \ tcbState := st, tcbArch := atcbContextSet regs (tcbArch tcb) \" + TCBStateRegs st regs \ + tcb \ tcbState := st, + tcbArch := atcbContextSet (UserContext regs) + (tcbArch tcb) \" definition put_tcb_state_regs :: "tcb_state_regs \ kernel_object option \ kernel_object option" @@ -117,8 +106,6 @@ lemmas setEndpoint_obj_at_tcb' = setEndpoint_obj_at'_tcb lemmas setNotification_tcb = set_ntfn_tcb_obj_at' -context begin interpretation Arch . (*FIXME: arch_split*) - lemma setObject_modify: fixes v :: "'a :: pspace_storable" shows "\ obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; @@ -150,8 +137,6 @@ lemma getObject_return: apply (simp add: magnitudeCheck_assert in_monad) done -end - lemmas getObject_return_tcb = getObject_return[OF meta_eq_to_obj_eq, OF loadObject_tcb, unfolded objBits_simps', simplified] @@ -170,13 +155,13 @@ lemma partial_overwrite_fun_upd: lemma get_tcb_state_regs_ko_at': "ko_at' ko p s \ get_tcb_state_regs (ksPSpace s p) - = TCBStateRegs (tcbState ko) ((atcbContextGet o tcbArch) ko)" + = TCBStateRegs (tcbState ko) ((user_regs o atcbContextGet o tcbArch) ko)" by (clarsimp simp: obj_at'_def projectKOs get_tcb_state_regs_def) lemma put_tcb_state_regs_ko_at': "ko_at' ko p s \ put_tcb_state_regs tsr (ksPSpace s p) = Some (KOTCB (ko \ tcbState := tsrState tsr - , tcbArch := atcbContextSet (tsrContext tsr) (tcbArch ko)\))" + , tcbArch := atcbContextSet (UserContext (tsrContext tsr)) (tcbArch ko)\))" by (clarsimp simp: obj_at'_def projectKOs put_tcb_state_regs_def put_tcb_state_regs_tcb_def split: tcb_state_regs.split) @@ -207,7 +192,7 @@ lemma ksPSpace_update_partial_id: done lemma isolate_thread_actions_asUser: - "\ idx t' = t; inj idx; f = (\s. ({(v, g s)}, False)) \ \ + "\ idx t' = t; inj idx; f = (\s. ({(v, modify_registers g s)}, False)) \ \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) (asUser t f) (isolate_thread_actions idx (return v) @@ -228,16 +213,16 @@ lemma isolate_thread_actions_asUser: apply (clarsimp simp: partial_overwrite_get_tcb_state_regs put_tcb_state_regs_ko_at') apply (case_tac ko, simp) + apply (rename_tac uc) + apply (case_tac uc, simp add: modify_registers_def atcbContextGet_def atcbContextSet_def) done -context begin interpretation Arch . (*FIXME: arch_split*) - lemma getRegister_simple: - "getRegister r = (\con. ({(con r, con)}, False))" + "getRegister r = (\con. ({(user_regs con r, con)}, False))" by (simp add: getRegister_def simpler_gets_def) lemma mapM_getRegister_simple: - "mapM getRegister rs = (\con. ({(map con rs, con)}, False))" + "mapM getRegister rs = (\con. ({(map (user_regs con) rs, con)}, False))" apply (induct rs) apply (simp add: mapM_Nil return_def) apply (simp add: mapM_Cons getRegister_def simpler_gets_def @@ -245,12 +230,13 @@ lemma mapM_getRegister_simple: done lemma setRegister_simple: - "setRegister r v = (\con. ({((), con (r := v))}, False))" + "setRegister r v = (\con. ({((), UserContext ((user_regs con)(r := v)))}, False))" by (simp add: setRegister_def simpler_modify_def) lemma zipWithM_setRegister_simple: "zipWithM_x setRegister rs vs - = (\con. ({((), foldl (\con (r, v). con (r := v)) con (zip rs vs))}, False))" + = (\con. ({((), + UserContext (foldl (\regs (r, v). ((regs)(r := v))) (user_regs con) (zip rs vs)))}, False))" apply (simp add: zipWithM_x_mapM_x) apply (induct ("zip rs vs")) apply (simp add: mapM_x_Nil return_def) @@ -258,6 +244,18 @@ lemma zipWithM_setRegister_simple: simpler_modify_def fun_upd_def[symmetric]) done +(* this variant used in fastpath rewrite proof *) +lemma setRegister_simple_modify_registers: + "setRegister r v = (\con. ({((), modify_registers (\f. f(r := v)) con)}, False))" + by (simp add: modify_registers_def setRegister_simple) + +(* this variant used in fastpath rewrite proof *) +lemma zipWithM_setRegister_simple_modify_registers: + "zipWithM_x setRegister rs vs + = (\con. ({((), modify_registers (\regs. foldl (\f (r,v). f(r := v)) regs (zip rs vs)) con)}, + False))" + by (simp add: modify_registers_def zipWithM_setRegister_simple) + lemma dom_partial_overwrite: "\x. tcb_at' (idx x) s \ dom (partial_overwrite idx tsrs (ksPSpace s)) = dom (ksPSpace s)" @@ -428,7 +426,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -485,15 +483,15 @@ lemma thread_actions_isolatable_bind: \t. \tcb_at' t\ f \\rv. tcb_at' t\ \ \ thread_actions_isolatable idx (f >>= g)" apply (clarsimp simp: thread_actions_isolatable_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (subst isolate_thread_actions_wrap_bind, simp) apply simp apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (simp add: bind_assoc id_def) apply (rule monadic_rewrite_refl) @@ -558,7 +556,7 @@ lemma select_f_isolatable: apply (clarsimp simp: thread_actions_isolatable_def isolate_thread_actions_def split_def select_f_selects liftM_def bind_assoc) - apply (rule monadic_rewrite_imp, rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_transverse) apply (rule monadic_rewrite_drop_modify monadic_rewrite_bind_tail)+ apply wp+ apply (simp add: gets_bind_ign getSchedulerAction_def) @@ -662,7 +660,7 @@ lemma setVCPU_isolatable: apply (subst setObject_assert_modify; simp add: projectKOs objBits_simps archObjSize_def vcpuBits_def vcpu_bits_def pageBits_def)+ apply (clarsimp simp: select_f_asserts assert_def obj_at_partial_overwrite_id2 split: if_splits) - apply (clarsimp simp: select_f_def simpler_modify_def bind_def o_def) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def) apply (case_tac s) apply simp apply (rule ext) @@ -932,12 +930,10 @@ lemma transferCaps_simple_rewrite: (transferCaps mi caps ep r rBuf) (return (mi \ msgExtraCaps := 0, msgCapsUnwrapped := 0 \))" including no_pre + supply empty_fail_getReceiveSlots[wp] (* FIXME *) apply (rule monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (simp add: transferCaps_simple, rule monadic_rewrite_refl) - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getReceiveSlots)+) - apply (rule monadic_rewrite_refl) + apply (simp add: transferCaps_simple) + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) apply simp done @@ -951,7 +947,8 @@ lemma lookupExtraCaps_simple_rewrite: lemma lookupIPC_inv: "\P\ lookupIPCBuffer f t \\rv. P\" by wp -lemmas empty_fail_user_getreg = empty_fail_asUser[OF empty_fail_getRegister] +(* FIXME move *) +lemmas empty_fail_user_getreg[intro!, wp, simp] = empty_fail_asUser[OF empty_fail_getRegister] lemma copyMRs_simple: "msglen \ of_nat (length msgRegisters) \ @@ -974,7 +971,7 @@ lemma doIPCTransfer_simple_rewrite: \ msgLength (messageInfoFromWord msgInfo) \ of_nat (length msgRegisters)) and obj_at' (\tcb. tcbFault tcb = None - \ (atcbContextGet o tcbArch) tcb msgInfoRegister = msgInfo) sender) + \ (user_regs o atcbContextGet o tcbArch) tcb msgInfoRegister = msgInfo) sender) (doIPCTransfer sender ep badge grant rcvr) (do rv \ mapM_x (\r. do v \ asUser sender (getRegister r); asUser rcvr (setRegister r v) @@ -988,25 +985,23 @@ lemma doIPCTransfer_simple_rewrite: apply (simp add: doIPCTransfer_def bind_assoc doNormalTransfer_def getMessageInfo_def cong: option.case_cong) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="fault = None" in monadic_rewrite_gen_asm, simp) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known None, simp) apply (rule monadic_rewrite_bind_tail) - apply (rule_tac x=msgInfo in monadic_rewrite_symb_exec, - (wp empty_fail_user_getreg user_getreg_rv)+) - apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) - apply (rule monadic_rewrite_bind_head) - apply (rule transferCaps_simple_rewrite) - apply (wp threadGet_const)+ + apply (monadic_rewrite_symb_exec_l_known msgInfo) + apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) + apply (rule monadic_rewrite_bind_head) + apply (rule transferCaps_simple_rewrite) + apply (wp threadGet_const user_getreg_rv asUser_inv)+ apply (simp add: bind_assoc) - apply (rule monadic_rewrite_symb_exec2[OF lookupIPC_inv empty_fail_lookupIPCBuffer] - monadic_rewrite_symb_exec2[OF threadGet_inv empty_fail_threadGet] - monadic_rewrite_symb_exec2[OF user_getreg_inv' empty_fail_user_getreg] - monadic_rewrite_bind_head monadic_rewrite_bind_tail - | wp)+ + apply (rule monadic_rewrite_symb_exec_l_drop[OF _ lookupIPC_inv empty_fail_lookupIPCBuffer] + monadic_rewrite_symb_exec_l_drop[OF _ threadGet_inv empty_fail_threadGet] + monadic_rewrite_symb_exec_l_drop[OF _ user_getreg_inv' empty_fail_user_getreg] + monadic_rewrite_bind_head monadic_rewrite_bind_tail)+ apply (case_tac "messageInfoFromWord msgInfo") apply simp apply (rule monadic_rewrite_refl) @@ -1018,7 +1013,7 @@ lemma doIPCTransfer_simple_rewrite: lemma monadic_rewrite_setSchedulerAction_noop: "monadic_rewrite F E (\s. ksSchedulerAction s = act) (setSchedulerAction act) (return ())" unfolding setSchedulerAction_def - apply (rule monadic_rewrite_imp, rule monadic_rewrite_modify_noop) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_modify_noop) apply simp done @@ -1032,9 +1027,10 @@ lemma rescheduleRequired_simple_rewrite: apply auto done -lemma empty_fail_isRunnable: +(* FIXME move *) +lemma empty_fail_isRunnable[intro!, wp, simp]: "empty_fail (isRunnable t)" - by (simp add: isRunnable_def isStopped_def) + by (simp add: isRunnable_def isStopped_def empty_fail_cond) lemma setupCallerCap_rewrite: "monadic_rewrite True True (\s. reply_masters_rvk_fb (ctes_of s)) @@ -1053,23 +1049,19 @@ lemma setupCallerCap_rewrite: apply (simp add: setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv getSlotCap_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule monadic_rewrite_assert)+ - apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) - \ mdbRevocable (cteMDBNode masterCTE)" - in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec2, (wp | simp)+)+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getCTE)+)+ - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp' | simp add: cte_wp_at_ctes_of)+ - apply (clarsimp simp: reply_masters_rvk_fb_def) - apply fastforce + apply (rule monadic_rewrite_bind_tail)+ + apply (rule monadic_rewrite_assert)+ + apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) + \ mdbRevocable (cteMDBNode masterCTE)" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans) + apply monadic_rewrite_symb_exec_l + apply monadic_rewrite_symb_exec_l_drop + apply (rule monadic_rewrite_refl) + apply wpsimp+ + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ + apply (fastforce simp: reply_masters_rvk_fb_def) done lemma oblivious_getObject_ksPSpace_default: @@ -1174,45 +1166,31 @@ lemma oblivious_switchToThread_schact: threadSet_def tcbSchedEnqueue_def unless_when asUser_def getQueue_def setQueue_def storeWordUser_def setRegister_def pointerInUserData_def isRunnable_def isStopped_def - getThreadState_def tcbSchedDequeue_def bitmap_fun_defs) + getThreadState_def tcbSchedDequeue_def tcbQueueRemove_def bitmap_fun_defs + ksReadyQueues_asrt_def) apply (safe intro!: oblivious_bind - | simp_all add: oblivious_setVMRoot_schact oblivious_vcpuSwitch_schact)+ + | simp_all add: ready_qs_runnable_def idleThreadNotQueued_def oblivious_setVMRoot_schact + oblivious_vcpuSwitch_schact)+ done -lemma empty_fail_getCurThread[iff]: +(* FIXME move *) +lemma empty_fail_getCurThread[intro!, wp, simp]: "empty_fail getCurThread" by (simp add: getCurThread_def) + lemma activateThread_simple_rewrite: "monadic_rewrite True True (ct_in_state' ((=) Running)) (activateThread) (return ())" apply (simp add: activateThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="state = Running" in monadic_rewrite_gen_asm) - apply simp + apply wp_pre + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known Running, simp) apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getThreadState)+) - apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, - simp_all add: getCurThread_def) - apply (rule monadic_rewrite_refl) + apply wpsimp+ apply (clarsimp simp: ct_in_state'_def elim!: pred_tcb'_weakenE) done end -lemma setCTE_obj_at_prio[wp]: - "\obj_at' (\tcb. P (tcbPriority tcb)) t\ setCTE p v \\rv. obj_at' (\tcb. P (tcbPriority tcb)) t\" - unfolding setCTE_def - by (rule setObject_cte_obj_at_tcb', simp+) - -crunch obj_at_prio[wp]: cteInsert "obj_at' (\tcb. P (tcbPriority tcb)) t" - (wp: crunch_wps) - -crunch ctes_of[wp]: asUser "\s. P (ctes_of s)" - (wp: crunch_wps) - lemma tcbSchedEnqueue_tcbPriority[wp]: "\obj_at' (\tcb. P (tcbPriority tcb)) t\ tcbSchedEnqueue t' @@ -1222,17 +1200,16 @@ lemma tcbSchedEnqueue_tcbPriority[wp]: done crunch obj_at_prio[wp]: cteDeleteOne "obj_at' (\tcb. P (tcbPriority tcb)) t" - (wp: crunch_wps setEndpoint_obj_at_tcb' - setThreadState_obj_at_unchanged setNotification_tcb setBoundNotification_obj_at_unchanged - simp: crunch_simps unless_def) + (wp: crunch_wps setEndpoint_obj_at'_tcb setNotification_tcb + simp: crunch_simps unless_def setBoundNotification_def) lemma setThreadState_no_sch_change: "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ setThreadState st t \\rv s. P (ksSchedulerAction s)\" - (is "NonDetMonad.valid ?P ?f ?Q") + (is "Nondet_VCG.valid ?P ?f ?Q") apply (simp add: setThreadState_def setSchedulerAction_def) - apply (wp hoare_pre_cont[where a=rescheduleRequired]) + apply (wp hoare_pre_cont[where f=rescheduleRequired]) apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) apply (clarsimp split: if_split) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs) @@ -1336,14 +1313,11 @@ lemma setCTE_assert_modify: apply (subst updateObject_cte_tcb) apply (fastforce simp add: subtract_mask) apply (simp add: assert_opt_def alignCheck_assert bind_assoc - magnitudeCheck_assert - is_aligned_neg_mask2 objBits_def) + magnitudeCheck_assert objBits_def) apply (rule ps_clear_lookupAround2, assumption+) apply (rule word_and_le2) apply (simp add: objBits_simps mask_def field_simps) apply (simp add: simpler_modify_def cong: option.case_cong if_cong) - apply (rule kernel_state.fold_congs[OF refl refl]) - apply (clarsimp simp: projectKO_opt_tcb cong: if_cong) apply (clarsimp simp: lookupAround2_char1 word_and_le2) apply (rule ccontr, clarsimp) apply (erule(2) ps_clearD) @@ -1375,13 +1349,17 @@ lemma partial_overwrite_fun_upd2: else y)" by (simp add: fun_eq_iff partial_overwrite_def split: if_split) +lemma atcbContextSetSetGet_eq[simp]: + "atcbContextSet (UserContext (user_regs (atcbContextGet t))) t = t" + by (cases t, simp add: atcbContextSet_def atcbContextGet_def) + lemma setCTE_isolatable: "thread_actions_isolatable idx (setCTE p v)" supply if_split[split del] apply (simp add: setCTE_assert_modify) apply (clarsimp simp: thread_actions_isolatable_def monadic_rewrite_def fun_eq_iff - liftM_def exec_gets + liftM_def isolate_thread_actions_def bind_assoc exec_gets getSchedulerAction_def bind_select_f_bind[symmetric] @@ -1409,8 +1387,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) @@ -1483,37 +1460,24 @@ lemma thread_actions_isolatableD: lemma tcbSchedDequeue_rewrite: "monadic_rewrite True True (obj_at' (Not \ tcbQueued) t) (tcbSchedDequeue t) (return ())" apply (simp add: tcbSchedDequeue_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ queued" in monadic_rewrite_gen_asm) - apply (simp add: when_def) + apply wp_pre + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const) - apply (rule monadic_rewrite_symb_exec2) - apply wp+ - apply (rule monadic_rewrite_refl) - apply (clarsimp) + apply (wpsimp wp: threadGet_const)+ done +(* FIXME: improve automation here *) lemma switchToThread_rewrite: "monadic_rewrite True True (ct_in_state' (Not \ runnable') and cur_tcb' and obj_at' (Not \ tcbQueued) t) (switchToThread t) (do Arch.switchToThread t; setCurThread t od)" apply (simp add: switchToThread_def Thread_H.switchToThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind) - apply (rule tcbSchedDequeue_rewrite) - apply (rule monadic_rewrite_refl) - apply (wp Arch_switchToThread_obj_at_pre)+ - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec) - apply (wp+, simp) - apply (rule monadic_rewrite_refl) - apply (wp) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite, simp) + (* strip LHS of getters and asserts until LHS and RHS are the same *) + apply (repeat_unless \rule monadic_rewrite_refl\ monadic_rewrite_symb_exec_l) + apply wpsimp+ apply (clarsimp simp: comp_def) done @@ -1555,9 +1519,33 @@ lemma switchToThread_isolatable: split: tcb_state_regs.split)+ done +lemma tcbQueued_put_tcb_state_regs_tcb: + "tcbQueued (put_tcb_state_regs_tcb tsr tcb) = tcbQueued tcb" + apply (clarsimp simp: put_tcb_state_regs_tcb_def) + by (cases tsr; clarsimp) + +lemma idleThreadNotQueued_isolatable: + "thread_actions_isolatable idx (stateAssert idleThreadNotQueued [])" + apply (simp add: stateAssert_def2 stateAssert_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + gets_isolatable + thread_actions_isolatable_if + thread_actions_isolatable_returns + thread_actions_isolatable_fail) + unfolding idleThreadNotQueued_def + apply (clarsimp simp: obj_at_partial_overwrite_If) + apply (clarsimp simp: obj_at'_def tcbQueued_put_tcb_state_regs_tcb) + apply wpsimp+ + done + lemma setCurThread_isolatable: "thread_actions_isolatable idx (setCurThread t)" - by (simp add: setCurThread_def modify_isolatable) + unfolding setCurThread_def + apply (rule thread_actions_isolatable_bind) + apply (rule idleThreadNotQueued_isolatable) + apply (fastforce intro: modify_isolatable) + apply wpsimp + done lemma isolate_thread_actions_tcbs_at: assumes f: "\x. \tcb_at' (idx x)\ f \\rv. tcb_at' (idx x)\" shows @@ -1580,7 +1568,7 @@ lemma isolate_thread_actions_rewrite_bind: \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) (f >>= g) (isolate_thread_actions idx (f' >>= g') (g'' o f'') (g''' o f'''))" - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule monadic_rewrite_bind, assumption+) apply (wp isolate_thread_actions_tcbs_at) @@ -1588,7 +1576,7 @@ lemma isolate_thread_actions_rewrite_bind: apply (subst isolate_thread_actions_wrap_bind, assumption) apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (rule monadic_rewrite_bind2) + apply (rule monadic_rewrite_bind_l) apply (erule(1) thread_actions_isolatableD) apply (rule thread_actions_isolatableD, assumption+) apply (rule hoare_vcg_all_lift, assumption) @@ -1676,6 +1664,7 @@ lemma copy_register_isolate: apply (case_tac obj, case_tac obja) apply (simp add: projectKO_opt_tcb put_tcb_state_regs_def put_tcb_state_regs_tcb_def get_tcb_state_regs_def + atcbContextGet_def cong: if_cong) apply (auto simp: fun_eq_iff split: if_split) done @@ -1693,7 +1682,7 @@ lemma monadic_rewrite_isolate_final2: (isolate_thread_actions idx f f' f'') (isolate_thread_actions idx g g' g'')" apply (simp add: isolate_thread_actions_def split_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule_tac P="\ s'. Q s" in monadic_rewrite_bind) apply (insert mr)[1] @@ -1701,14 +1690,14 @@ lemma monadic_rewrite_isolate_final2: apply auto[1] apply (rule_tac P="P and (\s. tcbs = get_tcb_state_regs o ksPSpace s o idx \ sa = ksSchedulerAction s)" - in monadic_rewrite_refl3) + in monadic_rewrite_pre_imp_eq) apply (clarsimp simp: exec_modify eqs return_def) apply wp+ apply (clarsimp simp: o_def eqs) done lemmas monadic_rewrite_isolate_final - = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_refl2, simplified] + = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_is_refl, simplified] lemma copy_registers_isolate_general: "\ inj idx; idx x = t; idx y = t' \ \ @@ -1728,7 +1717,7 @@ lemma copy_registers_isolate_general: select_f_returns o_def ksPSpace_update_partial_id) apply (simp add: return_def simpler_modify_def) apply (simp add: mapM_x_Cons) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule isolate_thread_actions_rewrite_bind, assumption) apply (rule copy_register_isolate, assumption+) @@ -1794,7 +1783,8 @@ lemmas fastpath_isolatables thread_actions_isolatable_returns lemmas fastpath_isolate_rewrites - = isolate_thread_actions_threadSet_tcbState isolate_thread_actions_asUser + = isolate_thread_actions_threadSet_tcbState + isolate_thread_actions_asUser copy_registers_isolate setSchedulerAction_isolate fastpath_isolatables[THEN thread_actions_isolatableD] @@ -1821,27 +1811,17 @@ lemma setThreadState_rewrite_simple: (setThreadState st t) (threadSet (tcbState_update (\_. st)) t)" supply if_split[split del] - apply (simp add: setThreadState_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (simp add: when_def) - apply (rule monadic_rewrite_gen_asm) - apply (subst if_not_P) - apply assumption - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, - (wp empty_fail_isRunnable - | (simp only: getCurThread_def getSchedulerAction_def - , rule empty_fail_gets))+)+ - apply (rule monadic_rewrite_refl) - apply (simp add: conj_comms, wp hoare_vcg_imp_lift threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) + apply (simp add: setThreadState_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at'\) + (* take the threadSet, drop everything until return () *) + apply (rule monadic_rewrite_trans[OF monadic_rewrite_bind_tail]) + apply (rule monadic_rewrite_symb_exec_l_drop)+ + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: getCurThread_def + wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at')+ apply (rule monadic_rewrite_refl) - apply clarsimp + apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) done end diff --git a/proof/crefine/ARM_HYP/Machine_C.thy b/proof/crefine/ARM_HYP/Machine_C.thy index 66e8fa7c47..3fe2164561 100644 --- a/proof/crefine/ARM_HYP/Machine_C.thy +++ b/proof/crefine/ARM_HYP/Machine_C.thy @@ -560,13 +560,13 @@ lemma cleanCacheRange_PoC_ccorres: apply (cinit' lift: start_' end_' pstart_') apply (clarsimp simp: cleanCacheRange_PoC_def word_sle_def whileAnno_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) @@ -588,7 +588,7 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (rule ccorres_basic_srnoop) apply (simp add: cleanInvalidateCacheRange_RAM_def doMachineOp_bind empty_fail_dsb empty_fail_cleanInvalidateL2Range - empty_fail_cleanInvalByVA) + empty_fail_cleanInvalByVA empty_fail_cond) apply (ctac (no_vcg) add: cleanCacheRange_PoC_ccorres) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: plat_cleanInvalidateL2Range_ccorres) @@ -606,8 +606,8 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanInvalByVA_modifies) apply (rule ceqv_refl) - apply (ctac (no_vcg) add: dsb_ccorres[simplified dc_def]) - apply (wp | clarsimp simp: guard_is_UNIVI o_def)+ + apply (ctac (no_vcg) add: dsb_ccorres) + apply (wp | clarsimp simp: guard_is_UNIVI)+ apply (frule(1) ghost_assertion_size_logic) apply (clarsimp simp: o_def) done @@ -620,7 +620,8 @@ lemma cleanCacheRange_RAM_ccorres: (doMachineOp (cleanCacheRange_RAM w1 w2 w3)) (Call cleanCacheRange_RAM_'proc)" apply (cinit' lift: start_' end_' pstart_') - apply (simp add: cleanCacheRange_RAM_def doMachineOp_bind empty_fail_dsb empty_fail_cleanL2Range) + apply (simp add: cleanCacheRange_RAM_def doMachineOp_bind empty_fail_dsb empty_fail_cleanL2Range + empty_fail_cond) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) apply (ctac (no_vcg) add: cleanCacheRange_PoC_ccorres) @@ -629,7 +630,7 @@ lemma cleanCacheRange_RAM_ccorres: in ccorres_cross_over_guard) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) - apply (ctac (no_vcg) add: cleanL2Range_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: cleanL2Range_ccorres) apply wp+ apply clarsimp apply (auto dest: ghost_assertion_size_logic simp: o_def) @@ -649,13 +650,13 @@ lemma cleanCacheRange_PoU_ccorres: apply (rule ccorres_basic_srnoop2, simp) apply (simp add: cleanCacheRange_PoU_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA_PoU) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) @@ -680,21 +681,21 @@ lemma invalidateCacheRange_RAM_ccorres: apply (cinit' lift: start_' end_' pstart_') apply (clarsimp simp: word_sle_def whileAnno_def split del: if_split) apply (simp add: invalidateCacheRange_RAM_def doMachineOp_bind when_def - if_split_empty_fail empty_fail_invalidateL2Range empty_fail_invalidateByVA - empty_fail_dsb dmo_if - split del: if_split) + empty_fail_invalidateL2Range empty_fail_invalidateByVA + empty_fail_dsb dmo_if empty_fail_cond + split del: if_split) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply csymbr apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" in ccorres_cross_over_guard) @@ -717,7 +718,7 @@ lemma invalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=invalidateByVA_modifies) apply ceqv - apply (ctac add: dsb_ccorres[unfolded dc_def]) + apply (ctac add: dsb_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -757,13 +758,13 @@ lemma branchFlushRange_ccorres: apply (clarsimp simp: word_sle_def whileAnno_def) apply (simp add: branchFlushRange_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_branchFlush) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: branchFlush_ccorres[unfolded dc_def]) + apply (ctac add: branchFlush_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) @@ -776,7 +777,7 @@ lemma cleanCaches_PoU_ccorres: (doMachineOp cleanCaches_PoU) (Call cleanCaches_PoU_'proc)" apply cinit' - apply (simp add: cleanCaches_PoU_def doMachineOp_bind + apply (simp add: cleanCaches_PoU_def doMachineOp_bind empty_fail_cond empty_fail_dsb empty_fail_clean_D_PoU empty_fail_invalidate_I_PoU) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: clean_D_PoU_ccorres) @@ -793,7 +794,7 @@ lemma setCurrentPD_ccorres: (Call setCurrentPD_'proc)" apply cinit' apply (clarsimp simp: setCurrentPD_def doMachineOp_bind empty_fail_dsb empty_fail_isb - setCurrentPDPL2_empty_fail + setCurrentPDPL2_empty_fail empty_fail_cond intro!: ccorres_cond_empty) apply (ctac (no_vcg) add: setCurrentPDPL2_ccorres) apply wpsimp diff --git a/proof/crefine/ARM_HYP/PSpace_C.thy b/proof/crefine/ARM_HYP/PSpace_C.thy index 50d4aed6cd..97b7236e95 100644 --- a/proof/crefine/ARM_HYP/PSpace_C.thy +++ b/proof/crefine/ARM_HYP/PSpace_C.thy @@ -47,7 +47,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: word32) < 2 ^ objBits ko \ \ ccorres dc xfdc P P' hs (setObject p ko) c" diff --git a/proof/crefine/ARM_HYP/Recycle_C.thy b/proof/crefine/ARM_HYP/Recycle_C.thy index 4bd1359986..14ea1378ab 100644 --- a/proof/crefine/ARM_HYP/Recycle_C.thy +++ b/proof/crefine/ARM_HYP/Recycle_C.thy @@ -366,7 +366,7 @@ lemma clearMemory_PageCap_ccorres: subgoal by (simp add: pageBits_def ko_at_projectKO_opt[OF user_data_at_ko]) subgoal by simp apply csymbr - apply (ctac add: cleanCacheRange_RAM_ccorres[unfolded dc_def]) + apply (ctac add: cleanCacheRange_RAM_ccorres) apply wp apply (simp add: guard_is_UNIV_def unat_of_nat word_bits_def capAligned_def word_of_nat_less) @@ -456,7 +456,7 @@ lemma mapM_x_store_memset_ccorres_assist: "\ko :: 'a. (1 :: word32) < 2 ^ objBits ko" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -530,8 +530,8 @@ lemma invalidateTLBByASID_ccorres: apply (simp add: case_option_If2 del: Collect_const) apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) apply (clarsimp simp: pde_stored_asid_def to_bool_def split: if_split) - apply (rule ccorres_return_void_C[unfolded dc_def]) - apply (simp add: dc_def[symmetric]) + apply (rule ccorres_return_void_C) + apply simp apply csymbr apply (ctac add: invalidateTranslationASID_ccorres) apply vcg @@ -812,8 +812,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -867,7 +867,7 @@ lemma ctcb_relation_blocking_ipc_badge: apply (simp add: isBlockedOnSend_def split: Structures_H.thread_state.split_asm) apply (clarsimp simp: cthread_state_relation_def) apply (clarsimp simp add: ctcb_relation_def cthread_state_relation_def) - apply (cases "tcbState tcb", simp_all add: "StrictC'_thread_state_defs") + apply (cases "tcbState tcb", simp_all add: ThreadState_defs) done lemma cendpoint_relation_q_cong: @@ -889,16 +889,6 @@ lemma cnotification_relation_q_cong: apply (auto intro: iffD1[OF tcb_queue_relation'_cong[OF refl refl refl]]) done -lemma tcbSchedEnqueue_ep_at: - "\obj_at' (P :: endpoint \ bool) ep\ - tcbSchedEnqueue t - \\rv. obj_at' P ep\" - including no_pre - apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp threadGet_wp, clarsimp, wp+) - apply (clarsimp split: if_split, wp) - done - lemma ccorres_duplicate_guard: "ccorres r xf (P and P) Q hs f f' \ ccorres r xf P Q hs f f'" by (erule ccorres_guard_imp, auto) @@ -918,12 +908,13 @@ lemma cancelBadgedSends_ccorres: (UNIV \ {s. epptr_' s = Ptr ptr} \ {s. badge_' s = bdg}) [] (cancelBadgedSends ptr bdg) (Call cancelBadgedSends_'proc)" apply (cinit lift: epptr_' badge_' simp: whileAnno_def) - apply (simp add: list_case_return2 + apply (rule ccorres_stateAssert) + apply (simp add: list_case_return cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong del: Collect_const) - apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule ccorres_pre_getEndpoint, rename_tac ep) + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -933,22 +924,22 @@ lemma cancelBadgedSends_ccorres: split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -973,8 +964,9 @@ lemma cancelBadgedSends_ccorres: st_tcb_at' (\st. isBlockedOnSend st \ blockingObject st = ptr) x s) \ distinct (xs @ list) \ ko_at' IdleEP ptr s \ (\p. \x \ set (xs @ list). \rf. (x, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ pspace_aligned' s \ pspace_distinct' s - \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s" + \ pspace_aligned' s \ pspace_distinct' s + \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="\xs. {s. ep_queue_relation' (cslift s) (xs @ list) (head_C (queue_' s)) (end_C (queue_' s))} \ {s. thread_' s = (case list of [] \ tcb_Ptr 0 @@ -1023,7 +1015,7 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: tcb_queue_relation'_def EPState_Send_def mask_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -1033,7 +1025,7 @@ lemma cancelBadgedSends_ccorres: apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -1058,9 +1050,9 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -1070,8 +1062,9 @@ lemma cancelBadgedSends_ccorres: apply (rule_tac rrel=dc and xf=xfdc and P="\s. (\t \ set (x @ a # lista). tcb_at' t s) \ (\p. \t \ set (x @ a # lista). \rf. (t, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ distinct (x @ a # lista) - \ pspace_aligned' s \ pspace_distinct' s" + \ distinct (x @ a # lista) + \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="{s. ep_queue_relation' (cslift s) (x @ a # lista) (head_C (queue_' s)) (end_C (queue_' s))}" in ccorres_from_vcg) @@ -1087,8 +1080,7 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: return_def rf_sr_def cstate_relation_def Let_def) apply (rule conjI) apply (clarsimp simp: cpspace_relation_def) - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule null_ep_queue) + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) subgoal by (simp add: o_def) apply (rule conjI) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) @@ -1111,9 +1103,6 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: image_iff) apply (drule_tac x=p in spec) subgoal by fastforce - apply (rule conjI) - apply (erule cready_queues_relation_not_queue_ptrs, - auto dest: null_ep_schedD[unfolded o_def] simp: o_def)[1] apply (simp add: carch_state_relation_def cmachine_state_relation_def h_t_valid_clift_Some_iff) @@ -1124,16 +1113,15 @@ lemma cancelBadgedSends_ccorres: apply wp apply simp apply vcg - apply (wp hoare_vcg_const_Ball_lift tcbSchedEnqueue_ep_at - sch_act_wf_lift) + apply (wp hoare_vcg_const_Ball_lift sch_act_wf_lift) apply simp apply (vcg exspec=tcbSchedEnqueue_cslift_spec) apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases - sts_sch_act sts_valid_queues setThreadState_oa_queued) + sts_sch_act sts_valid_objs') apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) @@ -1146,21 +1134,18 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: typ_heap_simps st_tcb_at'_def) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: ctcb_relation_blocking_ipc_badge) - apply (rule conjI, simp add: "StrictC'_thread_state_defs" mask_def) + apply (rule conjI, simp add: ThreadState_defs mask_def) apply (rule conjI) apply clarsimp apply (frule rf_sr_cscheduler_relation) apply (clarsimp simp: cscheduler_action_relation_def st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - subgoal by clarsimp - subgoal by clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule conjI) - apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) + apply (frule tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule context_conjI) @@ -1200,9 +1185,19 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp split: if_split) apply (drule sym_refsD, clarsimp) apply (drule(1) bspec)+ - by (auto simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def tcb_bound_refs'_def - dest!: symreftype_inverse') - + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply (fastforce simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def + tcb_bound_refs'_def + dest!: symreftype_inverse') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply fastforce + done lemma tcb_ptr_to_ctcb_ptr_force_fold: "x + 2 ^ ctcb_size_bits = ptr_val (tcb_ptr_to_ctcb_ptr x)" diff --git a/proof/crefine/ARM_HYP/Refine_C.thy b/proof/crefine/ARM_HYP/Refine_C.thy index a0177eb39f..64ce39b288 100644 --- a/proof/crefine/ARM_HYP/Refine_C.thy +++ b/proof/crefine/ARM_HYP/Refine_C.thy @@ -44,6 +44,7 @@ proof - show ?thesis apply (cinit') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (ctac (no_vcg) add: getActiveIRQ_ccorres) apply (rule ccorres_Guard_Seq)? @@ -59,7 +60,7 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply simp apply vcg apply vcg @@ -73,14 +74,13 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply simp apply (rule_tac Q="\rv s. invs' s \ (\x. rv = Some x \ x \ ARM_HYP.maxIRQ) \ rv \ Some 0x3FF \ - sch_act_not (ksCurThread s) s \ - (\p. ksCurThread s \ set (ksReadyQueues s p))" in hoare_post_imp) + sch_act_not (ksCurThread s) s" + in hoare_post_imp) apply (clarsimp simp: Kernel_C.maxIRQ_def ARM_HYP.maxIRQ_def) apply (wp getActiveIRQ_le_maxIRQ getActiveIRQ_neq_Some0xFF | simp)+ - apply (clarsimp simp: ct_not_ksQ) apply (clarsimp simp: invs'_def valid_state'_def) done qed @@ -93,6 +93,7 @@ lemma handleUnknownSyscall_ccorres: (callKernel (UnknownSyscall n)) (Call handleUnknownSyscall_'proc)" apply (cinit' lift: w_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -105,14 +106,12 @@ lemma handleUnknownSyscall_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply fastforce - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply fastforce apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -132,8 +131,10 @@ lemma handleVMFaultEvent_ccorres: (callKernel (VMFaultEvent vmfault_type)) (Call handleVMFaultEvent_'proc)" apply (cinit' lift:vm_faultType_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (simp add: catch_def) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) @@ -163,13 +164,13 @@ lemma handleVMFaultEvent_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ - apply (case_tac x, clarsimp, wp) + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (case_tac rv, clarsimp, wp) apply (clarsimp, wp, simp) apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: simple_sane_strg[unfolded sch_act_sane_not]) - by (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def ct_not_ksQ + by (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def elim: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread' rf_sr_ksCurThread) @@ -182,6 +183,7 @@ lemma handleUserLevelFault_ccorres: (callKernel (UserLevelFault word1 word2)) (Call handleUserLevelFault_'proc)" apply (cinit' lift:w_a_' w_b_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -194,16 +196,14 @@ lemma handleUserLevelFault_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply (simp add: ct_in_state'_def) - apply (erule pred_tcb'_weakenE) - apply simp - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply simp apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -248,6 +248,7 @@ lemma handleSyscall_ccorres: (callKernel (SyscallEvent sysc)) (Call handleSyscall_'proc)" apply (cinit' lift: syscall_') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: handleE_def handleE'_def) apply (rule ccorres_split_nothrow_novcg) apply wpc @@ -382,11 +383,10 @@ lemma handleSyscall_ccorres: apply wp[1] apply clarsimp apply wp - apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s \ - (\p. ksCurThread s \ set (ksReadyQueues s p))" + apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s" in hoare_post_imp) apply (simp add: ct_in_state'_def) - apply (wp handleReply_sane handleReply_ct_not_ksQ) + apply (wp handleReply_sane) \ \SysYield\ apply (clarsimp simp: syscall_from_H_def syscall_defs) apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ @@ -412,14 +412,14 @@ lemma handleSyscall_ccorres: apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (wp schedule_invs' schedule_sch_act_wf | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + apply (wp schedule_invs' schedule_sch_act_wf + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (wpsimp wp: hoare_vcg_if_lift3) apply (strengthen non_kernel_IRQs_strg[where Q=True, simplified]) apply (wpsimp wp: hoare_drop_imps) apply (simp | wpc | wp hoare_drop_imp handleReply_sane handleReply_nonz_cap_to_ct schedule_invs' - handleReply_ct_not_ksQ[simplified] | strengthen ct_active_not_idle'_strengthen invs_valid_objs_strengthen)+ apply (rule_tac Q="\rv. invs' and ct_active'" in hoare_post_imp, simp) apply (wp hy_invs') @@ -437,7 +437,7 @@ lemma handleSyscall_ccorres: apply (frule active_ex_cap') apply (clarsimp simp: invs'_def valid_state'_def) apply (clarsimp simp: simple_sane_strg ct_in_state'_def st_tcb_at'_def obj_at'_def - isReply_def ct_not_ksQ) + isReply_def) apply (rule conjI, fastforce) prefer 2 apply (cut_tac 'b=32 and x=a and n=10 and 'a=10 in ucast_leq_mask) @@ -472,7 +472,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -503,7 +503,7 @@ lemma no_fail_callKernel: apply (rule corres_nofail) apply (rule corres_guard_imp) apply (rule kernel_corres) - apply force + apply (force simp: schact_is_rct_def) apply (simp add: sch_act_simple_def) apply metis done @@ -515,6 +515,7 @@ lemma handleVCPUFault_ccorres: (callKernel (HypervisorEvent (ARMVCPUFault hsr))) (Call handleVCPUFault_'proc)" apply (cinit' lift: hsr___unsigned_long_') apply (simp add: callKernel_def handleEvent_def handleHypervisorFault_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_def bind_assoc) apply (rule ccorres_pre_getCurThread, rename_tac curThread) (* armv_handleVCPUFault returns false on this platform, doing nothing else *) @@ -525,17 +526,18 @@ lemma handleVCPUFault_ccorres: apply (ctac (no_vcg) add: schedule_ccorres) apply (rule ccorres_stateAssert_after) apply (rule ccorres_guard_imp) - apply (ctac (no_vcg) add: activateThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: activateThread_ccorres) apply (clarsimp, assumption) apply assumption - apply (wp schedule_sch_act_wf schedule_invs'|strengthen invs_queues invs_valid_objs')+ + apply (wp schedule_sch_act_wf schedule_invs' + | strengthen invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct')+ apply vcg apply (clarsimp, rule conseqPre, vcg) apply clarsimp apply vcg apply (clarsimp, rule conseqPre, vcg) apply clarsimp - apply (clarsimp simp: ct_not_ksQ ct_running_imp_simple') + apply (clarsimp simp: ct_running_imp_simple' fastpathKernelAssertions_def) apply (rule conjI, rule active_ex_cap', erule active_from_running', fastforce) apply (clarsimp simp: cfault_rel_def seL4_Fault_VCPUFault_lift is_cap_fault_def) done @@ -600,8 +602,8 @@ lemma ccorres_add_gets: lemma ccorres_get_registers: "\ \cptr msgInfo. ccorres dc xfdc ((\s. P s \ Q s \ - obj_at' (\tcb. (atcbContextGet o tcbArch) tcb ARM_HYP_H.capRegister = cptr - \ (atcbContextGet o tcbArch) tcb ARM_HYP_H.msgInfoRegister = msgInfo) + obj_at' (\tcb. (user_regs o atcbContextGet o tcbArch) tcb ARM_HYP_H.capRegister = cptr + \ (user_regs o atcbContextGet o tcbArch) tcb ARM_HYP_H.msgInfoRegister = msgInfo) (ksCurThread s) s) and R) (UNIV \ \\cptr = cptr\ \ \\msgInfo = msgInfo\) [] m c \ \ @@ -614,15 +616,15 @@ lemma ccorres_get_registers: apply (rule ccorres_assume_pre) apply (clarsimp simp: ct_in_state'_def st_tcb_at'_def) apply (drule obj_at_ko_at', clarsimp) - apply (erule_tac x="(atcbContextGet o tcbArch) ko ARM_HYP_H.capRegister" in meta_allE) - apply (erule_tac x="(atcbContextGet o tcbArch) ko ARM_HYP_H.msgInfoRegister" in meta_allE) + apply (erule_tac x="(user_regs o atcbContextGet o tcbArch) ko ARM_HYP_H.capRegister" in meta_allE) + apply (erule_tac x="(user_regs o atcbContextGet o tcbArch) ko ARM_HYP_H.msgInfoRegister" in meta_allE) apply (erule ccorres_guard_imp2) apply (clarsimp simp: rf_sr_ksCurThread) apply (drule(1) obj_at_cslift_tcb, clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: ctcb_relation_def ccontext_relation_def ARM_HYP_H.msgInfoRegister_def ARM_HYP_H.capRegister_def ARM_HYP.msgInfoRegister_def ARM_HYP.capRegister_def - carch_tcb_relation_def + carch_tcb_relation_def cregs_relation_def "StrictC'_register_defs") done @@ -642,9 +644,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -655,6 +657,7 @@ lemma callKernel_withFastpath_corres_C: apply (clarsimp simp: typ_heap_simps' ct_in_state'_def "StrictC'_register_defs" word_sle_def word_sless_def st_tcb_at'_opeq_simp) + apply (frule ready_qs_runnable_cross, (fastforce simp: valid_sched_def)+) apply (rule conjI, fastforce simp: st_tcb_at'_def) apply (auto elim!: pred_tcb'_weakenE cnode_caps_gsCNodes_from_sr[rotated]) done @@ -671,20 +674,23 @@ lemma threadSet_all_invs_triv': apply (rule hoare_pre) apply (rule wp_from_corres_unit) apply (rule threadset_corresT [where f="tcb_arch_update (arch_tcb_context_set f)"]) - apply (simp add: tcb_relation_def arch_tcb_context_set_def - atcbContextSet_def arch_tcb_relation_def) - apply (simp add: tcb_cap_cases_def) - apply (simp add: tcb_cte_cases_def) + apply (simp add: tcb_relation_def arch_tcb_context_set_def + atcbContextSet_def arch_tcb_relation_def) + apply (simp add: tcb_cap_cases_def) + apply (simp add: tcb_cte_cases_def) + apply fastforce + apply fastforce + apply fastforce apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state | simp add: tcb_cap_cases_def | rule threadSet_ct_in_state' | wp (once) hoare_vcg_disj_lift)+ apply clarsimp apply (rule exI, rule conjI, assumption) - apply (clarsimp simp: invs_def invs'_def cur_tcb_def cur_tcb'_def) + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def invs'_def cur_tcb_def) apply (simp add: state_relation_def) done @@ -729,9 +735,9 @@ lemma entry_corres_C: apply (simp add: ccontext_rel_to_C) apply simp apply (rule corres_split) - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) apply (rule getContext_corres, simp) @@ -764,15 +770,7 @@ lemma ct_running'_C: apply (frule (1) map_to_ko_atI') apply (erule obj_at'_weakenE) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: - ThreadState_Running_def - ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnSend_def - ThreadState_BlockedOnReply_def - ThreadState_BlockedOnNotification_def - ThreadState_Inactive_def - ThreadState_IdleThreadState_def - ThreadState_Restart_def) + apply (case_tac "tcbState ko"; simp add: ThreadState_defs) done lemma full_invs_both: @@ -836,7 +834,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineState, clarsimp) @@ -863,7 +861,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -904,17 +902,22 @@ lemma dmo_domain_user_mem'[wp]: done lemma do_user_op_corres_C: - "corres_underlying rf_sr False False (=) (invs' and ex_abs einvs) \ - (doUserOp f tc) (doUserOp_C f tc)" + "corres_underlying rf_sr False False (=) + (invs' and ksReadyQueues_asrt and ex_abs einvs) \ + (doUserOp f tc) (doUserOp_C f tc)" apply (simp only: doUserOp_C_def doUserOp_def split_def) apply (rule corres_guard_imp) apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: simpler_gets_def getCurThread_def corres_underlying_def rf_sr_def cstate_relation_def Let_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_lift_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_rights_def) apply (rule_tac P=pspace_distinct' and P'=\ and r'="(=)" @@ -931,7 +934,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -951,7 +954,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def @@ -1011,6 +1014,9 @@ lemma refinement2_both: apply (subst cstate_to_H_correct) apply (fastforce simp: full_invs'_def invs'_def) apply (clarsimp simp: rf_sr_def) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) apply (simp add:absKState_def observable_memory_def absExst_def) apply (rule MachineTypes.machine_state.equality,simp_all)[1] apply (rule ext) @@ -1037,13 +1043,35 @@ lemma refinement2_both: apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (erule_tac P="a \ b \ c \ (\x. e x)" for a b c d e in disjE) apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (clarsimp simp: check_active_irq_C_def check_active_irq_H_def) apply (rule rev_mp, rule check_active_irq_corres_C) @@ -1131,7 +1159,7 @@ lemma kernel_all_subset_kernel: check_active_irq_H_def checkActiveIRQ_def) apply clarsimp apply (erule in_monad_imp_rewriteE[where F=True]) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule monadic_rewrite_bind_head[where P=\]) apply (simp add: callKernel_C_def callKernel_withFastpath_C_def diff --git a/proof/crefine/ARM_HYP/Retype_C.thy b/proof/crefine/ARM_HYP/Retype_C.thy index 3a5d32dcc1..63f6013146 100644 --- a/proof/crefine/ARM_HYP/Retype_C.thy +++ b/proof/crefine/ARM_HYP/Retype_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -607,46 +608,6 @@ lemma field_of_t_refl: apply (simp add: unat_eq_0) done -lemma typ_slice_list_array: - "x < size_td td * n - \ typ_slice_list (map (\i. DTPair td (nm i)) [0.. k < n - \ gd (p +\<^sub>p int k) - \ h_t_valid htd gd (p +\<^sub>p int k)" - apply (clarsimp simp: h_t_array_valid_def h_t_valid_def valid_footprint_def - size_of_def[symmetric, where t="TYPE('a)"]) - apply (drule_tac x="k * size_of TYPE('a) + y" in spec) - apply (drule mp) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute) - apply (clarsimp simp: ptr_add_def add.assoc) - apply (erule map_le_trans[rotated]) - apply (clarsimp simp: uinfo_array_tag_n_m_def) - apply (subst typ_slice_list_array) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute size_of_def) - apply (simp add: size_of_def list_map_mono) - done - lemma h_t_valid_ptr_retyps_gen: assumes sz: "nptrs * size_of TYPE('a :: mem_type) < addr_card" and gd: "gd p'" @@ -778,11 +739,6 @@ lemma ptr_add_orth: apply (simp add: addr_card_wb [symmetric]) done -lemma dom_lift_t_heap_update: - "dom (lift_t g (hrs_mem_update v hp)) = dom (lift_t g hp)" - by (clarsimp simp add: lift_t_def lift_typ_heap_if s_valid_def hrs_htd_def hrs_mem_update_def split_def dom_def - intro!: Collect_cong split: if_split) - lemma h_t_valid_ptr_retyps_gen_same: assumes guard: "\n' < nptrs. gd (CTypesDefs.ptr_add (Ptr p :: 'a ptr) (of_nat n'))" assumes cleared: "region_is_bytes' p (nptrs * size_of TYPE('a :: mem_type)) htd" @@ -1114,7 +1070,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -1147,29 +1103,6 @@ lemma update_ti_t_word64_0s: "word_rcat [0, 0, 0, 0, 0, 0, 0, (0 :: 8 word)] = (0 :: 64 word)" by (simp_all add: typ_info_word word_rcat_def bin_rcat_def) -lemma is_aligned_ptr_aligned: - fixes p :: "'a :: c_type ptr" - assumes al: "is_aligned (ptr_val p) n" - and alignof: "align_of TYPE('a) = 2 ^ n" - shows "ptr_aligned p" - using al unfolding is_aligned_def ptr_aligned_def - by (simp add: alignof) - -lemma is_aligned_c_guard: - "is_aligned (ptr_val p) n - \ ptr_val p \ 0 - \ align_of TYPE('a) = 2 ^ m - \ size_of TYPE('a) \ 2 ^ n - \ m \ n - \ c_guard (p :: ('a :: c_type) ptr)" - apply (clarsimp simp: c_guard_def c_null_guard_def) - apply (rule conjI) - apply (rule is_aligned_ptr_aligned, erule(1) is_aligned_weaken, simp) - apply (erule is_aligned_get_word_bits, simp_all) - apply (rule intvl_nowrap[where x=0, simplified], simp) - apply (erule is_aligned_no_wrap_le, simp+) - done - lemma retype_guard_helper: assumes cover: "range_cover p sz (objBitsKO ko) n" and ptr0: "p \ 0" @@ -2399,6 +2332,9 @@ definition | ARM_HYP_H.PageDirectoryObject \ scast seL4_ARM_PageDirectoryObject | ARM_HYP_H.VCPUObject \ scast seL4_ARM_VCPUObject" +(* always unfold StrictC'_mode_object_defs together with api_object_defs *) +lemmas api_object_defs = api_object_defs StrictC'_mode_object_defs + lemmas nAPIObjects_def = seL4_NonArchObjectTypeCount_def lemma nAPIOBjects_object_type_from_H: @@ -2486,21 +2422,6 @@ next done qed -(* FIXME: move *) -lemma ccorres_to_vcg_nf: - "\ccorres rrel xf P P' [] a c; no_fail Q a; \s. P s \ Q s\ - \ \\ {s. P \ \ s \ P' \ (\, s) \ rf_sr} c - {s. \(rv, \')\fst (a \). (\', s) \ rf_sr \ rrel rv (xf s)}" - apply (rule HoarePartial.conseq_exploit_pre) - apply clarsimp - apply (rule conseqPre) - apply (drule ccorres_to_vcg') - prefer 2 - apply simp - apply (simp add: no_fail_def) - apply clarsimp - done - lemma mdb_node_get_mdbNext_heap_ccorres: "ccorres (=) ret__unsigned_' \ UNIV hs (liftM (mdbNext \ cteMDBNode) (getCTE parent)) @@ -2532,8 +2453,8 @@ lemma getCTE_pre_cte_at: lemmas ccorres_getCTE_cte_at = ccorres_guard_from_wp [OF getCTE_pre_cte_at empty_fail_getCTE] ccorres_guard_from_wp_bind [OF getCTE_pre_cte_at empty_fail_getCTE] -lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre iffD2 [OF empty_fail_liftM]] -lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre iffD2 [OF empty_fail_liftM]] +lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre empty_fail_liftM] +lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre empty_fail_liftM] lemmas ccorres_liftM_getCTE_cte_at = ccorres_guard_from_wp_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] ccorres_guard_from_wp_bind_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] @@ -2565,9 +2486,9 @@ lemma insertNewCap_ccorres_helper: apply (rule conjI) apply (erule (2) cmap_relation_updI) apply (simp add: ccap_relation_def ccte_relation_def cte_lift_def) - subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf is_aligned_neg_mask - c_valid_cte_def true_def - split: option.splits) + subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf + is_aligned_neg_mask c_valid_cte_def + split: option.splits) subgoal by simp apply (erule_tac t = s' in ssubst) apply (simp cong: lifth_update) @@ -2876,19 +2797,9 @@ lemma createNewCaps_untyped_if_helper: (\ gbits \ sz) = (s' \ \of_nat sz < (of_nat gbits :: word32)\)" by (clarsimp simp: not_le unat_of_nat32 word_less_nat_alt lt_word_bits_lt_pow) -lemma true_mask1 [simp]: - "true && mask (Suc 0) = true" - unfolding true_def - by (simp add: bang_eq cong: conj_cong) - (* Levity: added (20090419 09:44:40) *) declare shiftl_mask_is_0 [simp] -lemma to_bool_simps [simp]: - "to_bool true" "\ to_bool false" - unfolding true_def false_def to_bool_def - by simp_all - lemma heap_list_update': "\ n = length v; length v \ 2 ^ word_bits \ \ heap_list (heap_update_list p v h) n p = v" by (simp add: heap_list_update addr_card_wb) @@ -3050,14 +2961,9 @@ lemma update_ti_t_array_rep_word0: done lemma newContext_def2: - "newContext \ (\x. if x = register.CPSR then 0x150 else 0)" -proof - - have "newContext = (\x. if x = register.CPSR then 0x150 else 0)" - apply (simp add: newContext_def initContext_def) - apply (auto intro: ext) - done - thus "newContext \ (\x. if x = register.CPSR then 0x150 else 0)" by simp -qed + "newContext \ UserContext (\x. if x = register.CPSR then 0x150 else 0)" + by (rule newContext_def[simplified initContext_def, simplified, + simplified fun_upd_def]) lemma tcb_queue_update_other: "\ ctcb_ptr_to_tcb_ptr p \ set tcbs \ \ @@ -3143,7 +3049,6 @@ lemma cnc_tcb_helper: and al: "is_aligned (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb)" and ptr0: "ctcb_ptr_to_tcb_ptr p \ 0" and ptrlb: "2^ctcb_size_bits \ ptr_val p" - and vq: "valid_queues \" and pal: "pspace_aligned' (\\ksPSpace := ks\)" and pno: "pspace_no_overlap' (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb) (\\ksPSpace := ks\)" and pds: "pspace_distinct' (\\ksPSpace := ks\)" @@ -3509,20 +3414,21 @@ proof - supply unsigned_numeral[simp del] apply (simp add: fbtcb minBound_word) apply (intro conjI) - apply (simp add: cthread_state_relation_def thread_state_lift_def - eval_nat_numeral ThreadState_Inactive_def) - apply (clarsimp simp: ccontext_relation_def newContext_def2 carch_tcb_relation_def - newArchTCB_def) + apply (simp add: cthread_state_relation_def thread_state_lift_def + eval_nat_numeral ThreadState_defs) + apply (clarsimp simp: ccontext_relation_def newContext_def2 carch_tcb_relation_def + newArchTCB_def cregs_relation_def) apply (case_tac r, simp_all add: "StrictC'_register_defs" eval_nat_numeral atcbContext_def newArchTCB_def newContext_def initContext_def)[1] \ \takes ages\ - apply (simp add: thread_state_lift_def eval_nat_numeral atcbContextGet_def)+ + apply (simp add: thread_state_lift_def eval_nat_numeral atcbContextGet_def)+ apply (simp add: Kernel_Config.timeSlice_def) apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def - lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def - eval_nat_numeral seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def - split: if_split)+ + lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def + eval_nat_numeral seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def + option_to_ctcb_ptr_def + split: if_split)+ done have pks: "ks (ctcb_ptr_to_tcb_ptr p) = None" @@ -3573,15 +3479,6 @@ proof - apply (fastforce simp: dom_def) done - hence kstcb: "\qdom prio. ctcb_ptr_to_tcb_ptr p \ set (ksReadyQueues \ (qdom, prio))" using vq - apply (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x = qdom in spec) - apply (drule_tac x = prio in spec) - apply clarsimp - apply (drule (1) bspec) - apply (simp add: obj_at'_def) - done - have ball_subsetE: "\P S R. \ \x \ S. P x; R \ S \ \ \x \ R. P x" by blast @@ -3695,7 +3592,7 @@ proof - apply (simp add: cl_cte [simplified] cl_tcb [simplified] cl_rest [simplified] tag_disj_via_td_name) apply (clarsimp simp add: cready_queues_relation_def Let_def htd_safe[simplified] kernel_data_refs_domain_eq_rotate) - apply (simp add: kstcb tcb_queue_update_other' hrs_htd_update + apply (simp add: tcb_queue_update_other' hrs_htd_update ptr_retyp_to_array[simplified] irq[simplified]) done qed @@ -4523,12 +4420,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -4646,7 +4541,7 @@ declare replicate_numeral [simp del] lemma ccorres_placeNewObject_tcb: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase tcbBlockSizeBits - and valid_queues and (\s. sym_refs (state_refs_of' s)) + and (\s. sym_refs (state_refs_of' s)) and (\s. 2 ^ tcbBlockSizeBits \ gsMaxObjectSize s) and ret_zero regionBase (2 ^ tcbBlockSizeBits) and K (regionBase \ 0 \ range_cover regionBase tcbBlockSizeBits tcbBlockSizeBits 1 @@ -4983,7 +4878,7 @@ qed lemma placeNewObject_user_data: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase (pageBits+us) - and valid_queues and valid_machine_state' + and valid_machine_state' and ret_zero regionBase (2 ^ (pageBits+us)) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) @@ -5124,7 +5019,7 @@ lemma placeNewObject_user_data_device: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and ret_zero regionBase (2 ^ (pageBits + us)) - and pspace_no_overlap' regionBase (pageBits+us) and valid_queues + and pspace_no_overlap' regionBase (pageBits+us) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) and K (regionBase \ 0 \ range_cover regionBase (pageBits + us) (pageBits+us) (Suc 0) @@ -5260,13 +5155,9 @@ lemma monadic_rewrite_placeNewObject_vcpu_decompose: (do placeNewObject v vcpupre 0; setObject v vcpu od)" - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_bind_tail) - apply clarsimp - apply (rule monadic_rewrite_modify_setObject_vcpu) - apply (rule hoare_post_imp[OF _ placeNewObject_creates_object_vcpu]) - apply (fastforce simp: ko_at_vcpu_at'D) + apply clarsimp + apply (monadic_rewrite_r monadic_rewrite_modify_setObject_vcpu + \wpsimp wp: placeNewObject_object_at_vcpu\) apply (clarsimp simp: placeNewObject_def placeNewObject'_def bind_assoc split_def) apply (clarsimp simp: objBits_simps' archObjSize_def) apply (rule monadic_rewrite_bind_tail)+ @@ -5286,17 +5177,10 @@ lemma monadic_rewrite_setObject_vcpu_twice: setObject v vcpu od)" supply fun_upd_apply[simp del] - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_modify_setObject_vcpu) - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_bind_tail) - apply clarsimp - apply (rule monadic_rewrite_modify_setObject_vcpu) - apply wp - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_setObject_vcpu_modify) + apply simp + apply (monadic_rewrite_r monadic_rewrite_modify_setObject_vcpu) + apply (monadic_rewrite_r monadic_rewrite_modify_setObject_vcpu) + apply (monadic_rewrite_l monadic_rewrite_setObject_vcpu_modify) apply (rule monadic_rewrite_is_refl) apply (rule ext) apply (clarsimp simp: exec_modify) @@ -5398,50 +5282,34 @@ lemma monadic_rewrite_setObject_vcpu_as_init: od) " supply fun_upd_apply[simp del] - apply (simp add: K_def) + apply simp apply (rule monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_imp) + apply monadic_rewrite_pre apply (simp add: vcpuWriteReg_def vgicUpdate_def bind_assoc) - apply (rule monadic_rewrite_trans[rotated]) apply (clarsimp simp: vcpuUpdate_def bind_assoc) (* explicitly state the vcpu we are setting for each setObject *) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac vcpu) - apply (rule_tac P="vcpu = vcpu0" in monadic_rewrite_gen_asm, simp) + apply (rule monadic_rewrite_trans[rotated]) + apply (monadic_rewrite_symb_exec_r_known vcpu0) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac vcpu') - apply (rule_tac P="vcpu' = vcpu1" in monadic_rewrite_gen_asm, simp) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac vcpu'') - apply (rule_tac P="vcpu'' = vcpu2" in monadic_rewrite_gen_asm, simp) + apply (monadic_rewrite_symb_exec_r_known vcpu1) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec_r, wp+) - apply (rename_tac vcpu''') - apply (rule_tac P="vcpu''' = vcpu3" in monadic_rewrite_gen_asm, simp) - apply (rule monadic_rewrite_refl) - apply (wpsimp wp: getObject_vcpu_prop simp: vcpu1_def vcpu2_def vcpu3_def vcpu0_def)+ - apply (wp setObject_sets_object_vcpu) - apply (wpsimp wp: getObject_vcpu_prop)+ - apply (wpsimp wp: getObject_vcpu_prop simp: vcpu1_def vcpu2_def vcpu0_def)+ - apply (wp setObject_sets_object_vcpu) - apply (wpsimp wp: getObject_vcpu_prop)+ - apply (wpsimp wp: getObject_vcpu_prop simp: vcpu1_def vcpu2_def vcpu0_def)+ - apply (wp setObject_sets_object_vcpu) - apply (wpsimp wp: getObject_vcpu_prop)+ + apply (monadic_rewrite_symb_exec_r_known vcpu2) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_r_known vcpu3) + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getObject_vcpu_prop simp: vcpu1_def vcpu2_def vcpu3_def vcpu0_def)+ + apply (wp setObject_sets_object_vcpu) + apply (wpsimp wp: getObject_vcpu_prop)+ + apply (wpsimp wp: getObject_vcpu_prop simp: vcpu1_def vcpu2_def vcpu0_def)+ + apply (wp setObject_sets_object_vcpu) + apply (wpsimp wp: getObject_vcpu_prop)+ + apply (wpsimp wp: getObject_vcpu_prop simp: vcpu1_def vcpu2_def vcpu0_def)+ + apply (wp setObject_sets_object_vcpu) + apply (wpsimp wp: getObject_vcpu_prop simp: vcpu1_def vcpu2_def vcpu0_def)+ (* now we have four setObjects in a row, fold them up using setObject-combining *) - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_setObject_vcpu_twice[simplified]) - apply wp+ - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_setObject_vcpu_twice[simplified]) - apply wp+ - apply (rule monadic_rewrite_trans[rotated]) - apply (rule monadic_rewrite_setObject_vcpu_twice[simplified]) + apply (monadic_rewrite_r_method \rule monadic_rewrite_setObject_vcpu_twice[simplified]\ wpsimp) + apply (monadic_rewrite_r_method \rule monadic_rewrite_setObject_vcpu_twice[simplified]\ wpsimp) + apply (monadic_rewrite_r_method \rule monadic_rewrite_setObject_vcpu_twice[simplified]\ wpsimp) apply (rule monadic_rewrite_is_refl) apply (fastforce simp: vcpu3_def vcpu2_def vcpu1_def vcpu0_def makeVCPUObject_def) apply (fastforce simp: vcpu0_def ko_at_vcpu_at'D) @@ -5461,14 +5329,14 @@ lemma ptr_retyp_fromzeroVCPU: assumes cor: "caps_overlap_reserved' {p ..+ 2 ^ vcpu_bits} \" assumes ptr0: "p \ 0" assumes kdr: "{p ..+ 2 ^ vcpu_bits} \ kernel_data_refs = {}" - assumes subr: "{p ..+ 456} \ {p ..+ 2 ^ vcpu_bits}" + assumes subr: "{p ..+ 464} \ {p ..+ 2 ^ vcpu_bits}" (is "{_ ..+ ?vcpusz} \ _") assumes act_bytes: "region_actually_is_bytes p (2 ^ vcpu_bits) \'" assumes rep0: "heap_list (hrs_mem (t_hrs_' (globals \'))) (2 ^ vcpu_bits) p = replicate (2 ^ vcpu_bits) 0" assumes "\ snd (placeNewObject p vcpu0 0 \)" assumes cover: "range_cover p vcpu_bits vcpu_bits 1" assumes al: "is_aligned p vcpu_bits" assumes sr: "(\, \') \ rf_sr" - shows "(\\ksPSpace := ksPSpace \(p \ ko_vcpu)\, + shows "(\\ksPSpace := (ksPSpace \)(p \ ko_vcpu)\, globals_update (t_hrs_'_update (hrs_htd_update (ptr_retyp (vcpu_Ptr p)))) \') \ rf_sr" (is "(\\ksPSpace := ?ks\, globals_update ?gs' \') \ rf_sr") @@ -5478,7 +5346,8 @@ proof - let ?htdret = "(hrs_htd_update (ptr_retyp (vcpu_Ptr p)) (t_hrs_' (globals \')))" let ?zeros = "from_bytes (replicate (size_of TYPE(vcpu_C)) 0) :: vcpu_C" - have "size_of TYPE(vcpu_C) = 456" (is "_ = ?vcpusz") + (* sanity check for the value of ?vcpusz *) + have "size_of TYPE(vcpu_C) = ?vcpusz" by simp have ptr_al: @@ -5539,8 +5408,8 @@ proof - have map_vcpus: "cmap_relation (map_to_vcpus (ksPSpace \)) (cslift \') vcpu_Ptr cvcpu_relation - \ cmap_relation (map_to_vcpus (ksPSpace \)(p \ vcpu0)) - (cslift \'(vcpu_Ptr p \ ?zeros)) vcpu_Ptr cvcpu_relation" + \ cmap_relation ((map_to_vcpus (ksPSpace \))(p \ vcpu0)) + ((cslift \')(vcpu_Ptr p \ ?zeros)) vcpu_Ptr cvcpu_relation" apply (erule cmap_vcpus) apply (simp add: vcpu0_def from_bytes_def) apply (simp add: typ_info_simps vcpu_C_tag_def) @@ -5566,7 +5435,7 @@ proof - apply (subst index_fold_update; clarsimp) (* vppi array initialisation *) apply clarsimp - apply (case_tac vppi; clarsimp simp: from_bool_def) + apply (case_tac vppi; clarsimp) (* only one vppievent_irq constructor, safe to unfold *) apply (clarsimp simp: fromEnum_def enum_vppievent_irq) done @@ -5643,13 +5512,13 @@ proof - by (simp add: objBitsKO_def archObjSize_def vcpu_bits_def' vcpuBits_def') have rl_vcpu: - "(projectKO_opt \\<^sub>m (ksPSpace \(p \ KOArch (KOVCPU vcpu0))) :: word32 \ vcpu option) + "(projectKO_opt \\<^sub>m ((ksPSpace \)(p \ KOArch (KOVCPU vcpu0))) :: word32 \ vcpu option) = (projectKO_opt \\<^sub>m ksPSpace \)(p \ vcpu0)" by (rule ext) (clarsimp simp: projectKOs map_comp_def vcpu0_def split: if_split) have ctes: - "map_to_ctes (ksPSpace \(p \ KOArch (KOVCPU vcpu0))) = ctes_of \" + "map_to_ctes ((ksPSpace \)(p \ KOArch (KOVCPU vcpu0))) = ctes_of \" using pal pdst al pno apply (clarsimp simp: fun_upd_def) apply (frule (2) pspace_no_overlap_base') @@ -5692,7 +5561,7 @@ proof - apply (clarsimp simp: ko_vcpu_def vcpu0_def) apply (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def cmachine_state_relation_def Let_def h_t_valid_clift_Some_iff) - apply (subgoal_tac "region_is_bytes p 456 \'") + apply (subgoal_tac "region_is_bytes p ?vcpusz \'") prefer 2 apply (fastforce simp: region_actually_is_bytes[OF act_bytes] region_is_bytes_subset[OF _ subr]) @@ -5722,7 +5591,7 @@ lemma placeNewObject_vcpu_fromzero_ccorres: apply (rule ccorres_from_vcg_nofail, clarsimp) apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_htd_safe) - apply (subgoal_tac "{regionBase..+456} \ {regionBase..+2^vcpu_bits}") + apply (subgoal_tac "{regionBase..+464} \ {regionBase..+2^vcpu_bits}") prefer 2 apply clarsimp apply (drule intvlD, clarsimp) @@ -5766,7 +5635,6 @@ proof - done show ?thesis - supply dc_simp[simp del] apply (cinit' lift: vcpu_' simp: makeObject_vcpu) apply clarsimp apply (rule monadic_rewrite_ccorres_assemble[OF _ monadic_rewrite_setObject_vcpu_as_init]) @@ -5793,7 +5661,6 @@ lemma placeNewObject_vcpu_ccorres: hs (placeNewObject regionBase (makeObject :: vcpu) 0) (global_htd_update (\_. (ptr_retyp (vcpu_Ptr regionBase)));; CALL vcpu_init(vcpu_Ptr regionBase))" - supply dc_simp[simp del] apply (rule ccorres_guard_imp) apply (rule monadic_rewrite_ccorres_assemble[OF _ monadic_rewrite_placeNewObject_vcpu_decompose[where vcpupre=fromzeroVCPU]]) @@ -5830,48 +5697,47 @@ proof - apply (frule range_cover.aligned) apply (cut_tac t) apply (case_tac newType, - simp_all add: toAPIType_def - bind_assoc - ARMLargePageBits_def) + simp_all add: toAPIType_def bind_assoc ARMLargePageBits_def) apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff - ARMLargePageBits_def ARMSmallPageBits_def - ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def - sle_positive APIType_capBits_def shiftL_nat objBits_simps - ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def - fold_eq_0_to_bool) + ARMLargePageBits_def ARMSmallPageBits_def + ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def + sle_positive APIType_capBits_def shiftL_nat objBits_simps + ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def + fold_eq_0_to_bool) apply (ccorres_remove_UNIV_guard) apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps archObjSize_def - ARM_HYP_H.createObject_def pageBits_def - cond_second_eq_seq_ccorres modify_gsUserPages_update - intro!: ccorres_rhs_assoc) + ARM_HYP_H.createObject_def pageBits_def + cond_second_eq_seq_ccorres modify_gsUserPages_update + intro!: ccorres_rhs_assoc) apply ((rule ccorres_return_C | simp | wp | vcg - | (rule match_ccorres, ctac add: - placeNewDataObject_ccorres[where us=0 and newType=newType, simplified] - gsUserPages_update_ccorres[folded modify_gsUserPages_update]) - | (rule match_ccorres, csymbr))+)[1] + | (rule match_ccorres, ctac add: + placeNewDataObject_ccorres[where us=0 and newType=newType, simplified] + gsUserPages_update_ccorres[folded modify_gsUserPages_update]) + | (rule match_ccorres, csymbr))+)[1] apply (intro conjI) apply (clarsimp simp: createObject_hs_preconds_def APIType_capBits_def pageBits_def) apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def - framesize_to_H_def cap_to_H_simps cap_small_frame_cap_lift - vmrights_to_H_def vm_rights_defs is_aligned_neg_mask_eq, simp add: mask_def) + framesize_to_H_def cap_to_H_simps cap_small_frame_cap_lift + vmrights_to_H_def vm_rights_defs is_aligned_neg_mask_eq, + simp add: mask_def) \ \Page objects: could possibly fix the duplication here\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff - ARMLargePageBits_def ARMSmallPageBits_def - ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def - sle_positive APIType_capBits_def shiftL_nat objBits_simps - ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def - fold_eq_0_to_bool) + ARMLargePageBits_def ARMSmallPageBits_def + ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def + sle_positive APIType_capBits_def shiftL_nat objBits_simps + ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def + fold_eq_0_to_bool) apply (ccorres_remove_UNIV_guard) apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps archObjSize_def - ARM_HYP_H.createObject_def pageBits_def - cond_second_eq_seq_ccorres modify_gsUserPages_update - intro!: ccorres_rhs_assoc) + ARM_HYP_H.createObject_def pageBits_def + cond_second_eq_seq_ccorres modify_gsUserPages_update + intro!: ccorres_rhs_assoc) apply ((rule ccorres_return_C | simp | wp | vcg | (rule match_ccorres, ctac add: placeNewDataObject_ccorres[where us=4 and newType=newType, simplified] @@ -5881,24 +5747,24 @@ proof - apply (clarsimp simp: createObject_hs_preconds_def APIType_capBits_def pageBits_def) apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def - framesize_to_H_def cap_to_H_simps cap_frame_cap_lift - vmrights_to_H_def mask_def vm_rights_defs vm_page_size_defs - cl_valid_cap_def c_valid_cap_def - is_aligned_neg_mask_eq_concrete[THEN sym]) + framesize_to_H_def cap_to_H_simps cap_frame_cap_lift + vmrights_to_H_def mask_def vm_rights_defs vm_page_size_defs + cl_valid_cap_def c_valid_cap_def + is_aligned_neg_mask_eq_concrete[THEN sym]) apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff - ARMLargePageBits_def ARMSmallPageBits_def - ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def - sle_positive APIType_capBits_def shiftL_nat objBits_simps - ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def - fold_eq_0_to_bool) + ARMLargePageBits_def ARMSmallPageBits_def + ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def + sle_positive APIType_capBits_def shiftL_nat objBits_simps + ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def + fold_eq_0_to_bool) apply (ccorres_remove_UNIV_guard) apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps archObjSize_def - ARM_HYP_H.createObject_def pageBits_def - cond_second_eq_seq_ccorres modify_gsUserPages_update - intro!: ccorres_rhs_assoc) + ARM_HYP_H.createObject_def pageBits_def + cond_second_eq_seq_ccorres modify_gsUserPages_update + intro!: ccorres_rhs_assoc) apply ((rule ccorres_return_C | simp | wp | vcg | (rule match_ccorres, ctac add: placeNewDataObject_ccorres[where us=9 and newType=newType, simplified] @@ -5908,19 +5774,19 @@ proof - apply (clarsimp simp: createObject_hs_preconds_def APIType_capBits_def pageBits_def) apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def - framesize_to_H_def cap_to_H_simps cap_frame_cap_lift - vmrights_to_H_def mask_def vm_rights_defs vm_page_size_defs - cl_valid_cap_def c_valid_cap_def - is_aligned_neg_mask_eq_concrete[THEN sym]) + framesize_to_H_def cap_to_H_simps cap_frame_cap_lift + vmrights_to_H_def mask_def vm_rights_defs vm_page_size_defs + cl_valid_cap_def c_valid_cap_def + is_aligned_neg_mask_eq_concrete[THEN sym]) apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff - ARMLargePageBits_def ARMSmallPageBits_def - ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def - sle_positive APIType_capBits_def shiftL_nat objBits_simps - ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def - fold_eq_0_to_bool) + ARMLargePageBits_def ARMSmallPageBits_def + ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def + sle_positive APIType_capBits_def shiftL_nat objBits_simps + ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def + fold_eq_0_to_bool) apply (ccorres_remove_UNIV_guard) apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps archObjSize_def ARM_HYP_H.createObject_def pageBits_def @@ -5935,23 +5801,23 @@ proof - apply (clarsimp simp: createObject_hs_preconds_def APIType_capBits_def pageBits_def) apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def - framesize_to_H_def cap_to_H_simps cap_frame_cap_lift - vmrights_to_H_def mask_def vm_rights_defs vm_page_size_defs - cl_valid_cap_def c_valid_cap_def - is_aligned_neg_mask_eq_concrete[THEN sym]) + framesize_to_H_def cap_to_H_simps cap_frame_cap_lift + vmrights_to_H_def mask_def vm_rights_defs vm_page_size_defs + cl_valid_cap_def c_valid_cap_def + is_aligned_neg_mask_eq_concrete[THEN sym]) \ \PageTableObject\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff - ARMLargePageBits_def ARMSmallPageBits_def - ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def - sle_positive APIType_capBits_def shiftL_nat objBits_simps - ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def) + ARMLargePageBits_def ARMSmallPageBits_def + ARMSectionBits_def ARMSuperSectionBits_def asidInvalid_def + sle_positive APIType_capBits_def shiftL_nat objBits_simps + ptBits_def archObjSize_def pageBits_def word_sle_def word_sless_def) apply (ccorres_remove_UNIV_guard) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps archObjSize_def - ARM_HYP_H.createObject_def pageBits_def pt_bits_def) + ARM_HYP_H.createObject_def pageBits_def pt_bits_def) apply (ctac pre only: add: placeNewObject_pte[simplified]) apply csymbr apply (rule ccorres_return_C) @@ -5963,15 +5829,15 @@ proof - apply clarify apply (intro conjI) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' - APIType_capBits_def invs_queues invs_valid_objs' + APIType_capBits_def invs_valid_objs' invs_urz) apply clarsimp apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def - framesize_to_H_def cap_to_H_simps cap_page_table_cap_lift - is_aligned_neg_mask_eq vmrights_to_H_def - Kernel_C.VMReadWrite_def Kernel_C.VMNoAccess_def - Kernel_C.VMKernelOnly_def Kernel_C.VMReadOnly_def) - apply (clarsimp simp: to_bool_def false_def isFrameType_def) + framesize_to_H_def cap_to_H_simps cap_page_table_cap_lift + is_aligned_neg_mask_eq vmrights_to_H_def + Kernel_C.VMReadWrite_def Kernel_C.VMNoAccess_def + Kernel_C.VMKernelOnly_def Kernel_C.VMReadOnly_def) + apply (clarsimp simp: isFrameType_def) apply (rule sym) apply (simp add: is_aligned_neg_mask_eq'[symmetric] is_aligned_weaken) @@ -5979,13 +5845,13 @@ proof - apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff - asidInvalid_def sle_positive APIType_capBits_def shiftL_nat - objBits_simps archObjSize_def - ptBits_def pageBits_def pdBits_def word_sle_def word_sless_def) + asidInvalid_def sle_positive APIType_capBits_def shiftL_nat + objBits_simps archObjSize_def + ptBits_def pageBits_def pdBits_def word_sle_def word_sless_def) apply (ccorres_remove_UNIV_guard) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps archObjSize_def - ARM_HYP_H.createObject_def pageBits_def pdBits_def pd_bits_def) + ARM_HYP_H.createObject_def pageBits_def pdBits_def pd_bits_def) apply (ctac pre only: add: placeNewObject_pde[simplified]) apply (ctac add: copyGlobalMappings_ccorres) apply csymbr @@ -5996,14 +5862,14 @@ proof - apply simp apply simp apply wp - apply (clarsimp simp: false_def) + apply clarsimp apply vcg apply wp apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def - framesize_to_H_def cap_to_H_simps cap_page_directory_cap_lift - is_aligned_neg_mask_eq vmrights_to_H_def - Kernel_C.VMReadWrite_def Kernel_C.VMNoAccess_def - Kernel_C.VMKernelOnly_def Kernel_C.VMReadOnly_def) + framesize_to_H_def cap_to_H_simps cap_page_directory_cap_lift + is_aligned_neg_mask_eq vmrights_to_H_def + Kernel_C.VMReadWrite_def Kernel_C.VMNoAccess_def + Kernel_C.VMKernelOnly_def Kernel_C.VMReadOnly_def) apply (vcg exspec=copyGlobalMappings_modifies) apply (clarsimp simp:placeNewObject_def2) apply (wp createObjects'_pde_mappings' createObjects'_page_directory_at_global[where sz=pdBits] @@ -6011,24 +5877,24 @@ proof - apply clarsimp apply vcg apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' - archObjSize_def invs_valid_global' makeObject_pde pdBits_def - pageBits_def range_cover.aligned projectKOs APIType_capBits_def - object_type_from_H_def objBits_simps - invs_valid_objs' isFrameType_def) + archObjSize_def invs_valid_global' makeObject_pde pdBits_def + pageBits_def range_cover.aligned projectKOs APIType_capBits_def + object_type_from_H_def objBits_simps + invs_valid_objs' isFrameType_def) apply (frule invs_arch_state') apply (frule range_cover.aligned) apply (frule is_aligned_addrFromPPtr_n, simp) apply (intro conjI, simp_all add: table_bits_defs)[1] apply fastforce apply ((clarsimp simp: is_aligned_no_overflow'[where n=14, simplified] - field_simps is_aligned_mask[symmetric] mask_AND_less_0)+)[3] + field_simps is_aligned_mask[symmetric] mask_AND_less_0)+)[3] \ \VCPU\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) apply ccorres_rewrite apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff - asidInvalid_def sle_positive APIType_capBits_def shiftL_nat - objBits_simps archObjSize_def word_sle_def word_sless_def) + asidInvalid_def sle_positive APIType_capBits_def shiftL_nat + objBits_simps archObjSize_def word_sle_def word_sless_def) apply (clarsimp simp: hrs_htd_update ptBits_def objBits_simps archObjSize_def ARM_HYP_H.createObject_def pageBits_def pdBits_def) apply (rule ccorres_rhs_assoc)+ @@ -6039,8 +5905,8 @@ proof - apply wp apply (vcg exspec=vcpu_init_modifies) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' - invs_valid_global' range_cover.aligned APIType_capBits_def - invs_valid_objs' isFrameType_def invs_urz) + invs_valid_global' range_cover.aligned APIType_capBits_def + invs_valid_objs' isFrameType_def invs_urz) apply (frule range_cover.aligned) apply (clarsimp simp: ccap_relation_def cap_vcpu_cap_lift cap_to_H_def) apply (rule sym, rule is_aligned_neg_mask_eq) @@ -6069,7 +5935,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -6119,11 +5985,6 @@ lemma threadSet_domain_ccorres [corres]: apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def) apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) - defer - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) subgoal by (simp add: ctcb_relation_def) @@ -6156,11 +6017,11 @@ proof - apply (rule ccorres_cond_seq) (* Architecture specific objects. *) apply (rule_tac - Q="createObject_hs_preconds regionBase newType userSize isdev" and - S="createObject_c_preconds1 regionBase newType userSize isdev" and - R="createObject_hs_preconds regionBase newType userSize isdev" and - T="createObject_c_preconds1 regionBase newType userSize isdev" - in ccorres_Cond_rhs) + Q="createObject_hs_preconds regionBase newType userSize isdev" and + S="createObject_c_preconds1 regionBase newType userSize isdev" and + R="createObject_hs_preconds regionBase newType userSize isdev" and + T="createObject_c_preconds1 regionBase newType userSize isdev" + in ccorres_Cond_rhs) apply (subgoal_tac "toAPIType newType = None") apply clarsimp apply (rule ccorres_rhs_assoc)+ @@ -6177,10 +6038,9 @@ proof - region_actually_is_bytes region_actually_is_bytes_def) apply (clarsimp simp: object_type_from_H_def - ARM_HYP_H.toAPIType_def Kernel_C_defs toAPIType_def - nAPIObjects_def word_sle_def createObject_c_preconds_def - word_le_nat_alt split: - apiobject_type.splits object_type.splits) + ARM_HYP_H.toAPIType_def Kernel_C_defs toAPIType_def + nAPIObjects_def word_sle_def createObject_c_preconds_def word_le_nat_alt + split: apiobject_type.splits object_type.splits) apply (subgoal_tac "\apiType. newType = APIObjectType apiType") apply clarsimp apply (rule ccorres_guard_imp) @@ -6188,24 +6048,24 @@ proof - (* Untyped *) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def - word_sle_def intro!: Corres_UL_C.ccorres_cond_empty - Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) + toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def word_sle_def + intro!: Corres_UL_C.ccorres_cond_empty + Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.Untyped) - (unat (userSizea :: word32)) isdev" and - A'=UNIV in - ccorres_guard_imp) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.Untyped) + (unat (userSizea :: word32)) isdev" and + A'=UNIV in + ccorres_guard_imp) apply (rule ccorres_symb_exec_r) apply (rule ccorres_return_C, simp, simp, simp) apply vcg apply (rule conseqPre, vcg, clarsimp) apply simp apply (clarsimp simp: ccap_relation_def cap_to_H_def ARM_HYP_H.getObjectSize_def - apiGetObjectSize_def cap_untyped_cap_lift to_bool_eq_0 true_def - aligned_add_aligned - split: option.splits) + apiGetObjectSize_def cap_untyped_cap_lift to_bool_eq_0 + aligned_add_aligned + split: option.splits) apply (subst is_aligned_neg_mask_eq [OF is_aligned_weaken]) apply (erule range_cover.aligned) apply (clarsimp simp:APIType_capBits_def untypedBits_defs) @@ -6215,15 +6075,15 @@ proof - (* TCB *) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def - word_sle_def intro!: Corres_UL_C.ccorres_cond_empty - Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) + toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def word_sle_def + intro!: Corres_UL_C.ccorres_cond_empty + Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" in + ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) apply (ccorres_remove_UNIV_guard) apply (simp add: hrs_htd_update) @@ -6240,7 +6100,7 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply vcg - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (clarsimp simp: CPSR_def) apply (rule conseqPre, vcg, clarsimp) @@ -6248,7 +6108,6 @@ proof - createObject_c_preconds_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (simp add: getObjectSize_def objBits_simps word_bits_conv ARM_HYP_H.getObjectSize_def apiGetObjectSize_def @@ -6257,11 +6116,10 @@ proof - region_actually_is_bytes_def APIType_capBits_def) apply (frule(1) ghost_assertion_size_logic_no_unat) apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def ARM_HYP_H.getObjectSize_def - apiGetObjectSize_def Collect_const_mem - cap_thread_cap_lift to_bool_def true_def - aligned_add_aligned - split: option.splits) + getObjectSize_def ARM_HYP_H.getObjectSize_def + apiGetObjectSize_def Collect_const_mem + cap_thread_cap_lift aligned_add_aligned + split: option.splits) apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs tcb_ptr_to_ctcb_ptr_def invs_valid_objs' invs_urz isFrameType_def) @@ -6276,17 +6134,16 @@ proof - (* Endpoint *) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def - word_sle_def intro!: ccorres_cond_empty ccorres_cond_univ - ccorres_rhs_assoc) + toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.EndpointObject) - (unat (userSizea :: word32)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.EndpointObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat (userSizea :: word32)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (ccorres_remove_UNIV_guard) apply (simp add: hrs_htd_update) apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_endpoint) @@ -6296,38 +6153,37 @@ proof - apply (rule conseqPre, vcg, clarsimp) apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def ARM_HYP_H.getObjectSize_def - objBits_simps apiGetObjectSize_def epSizeBits_def - Collect_const_mem cap_endpoint_cap_lift - to_bool_def true_def - split: option.splits dest!: range_cover.aligned) + getObjectSize_def ARM_HYP_H.getObjectSize_def + objBits_simps apiGetObjectSize_def epSizeBits_def + Collect_const_mem cap_endpoint_cap_lift + split: option.splits + dest!: range_cover.aligned) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps - ARM_HYP_H.getObjectSize_def apiGetObjectSize_def - epSizeBits_def word_bits_conv - elim!: is_aligned_no_wrap' intro!: range_cover_simpleI)[1] + ARM_HYP_H.getObjectSize_def apiGetObjectSize_def + epSizeBits_def word_bits_conv + elim!: is_aligned_no_wrap' + intro!: range_cover_simpleI)[1] (* Notification *) apply (clarsimp simp: createObject_c_preconds_def) apply (clarsimp simp: getObjectSize_def objBits_simps - ARM_HYP_H.getObjectSize_def apiGetObjectSize_def - epSizeBits_def word_bits_conv word_sle_def word_sless_def) + ARM_HYP_H.getObjectSize_def apiGetObjectSize_def + epSizeBits_def word_bits_conv word_sle_def word_sless_def) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def - word_sle_def intro!: ccorres_cond_empty ccorres_cond_univ - ccorres_rhs_assoc) + toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.NotificationObject) - (unat (userSizea :: word32)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.NotificationObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat (userSizea :: word32)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (ccorres_remove_UNIV_guard) apply (simp add: hrs_htd_update) apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_notification) @@ -6337,38 +6193,39 @@ proof - apply (rule conseqPre, vcg, clarsimp) apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def ARM_HYP_H.getObjectSize_def - apiGetObjectSize_def ntfnSizeBits_def objBits_simps - Collect_const_mem cap_notification_cap_lift to_bool_def true_def - dest!: range_cover.aligned split: option.splits) + getObjectSize_def ARM_HYP_H.getObjectSize_def + apiGetObjectSize_def ntfnSizeBits_def objBits_simps + Collect_const_mem cap_notification_cap_lift + dest!: range_cover.aligned + split: option.splits) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps - ARM_HYP_H.getObjectSize_def apiGetObjectSize_def - ntfnSizeBits_def word_bits_conv - elim!: is_aligned_no_wrap' intro!: range_cover_simpleI)[1] + ARM_HYP_H.getObjectSize_def apiGetObjectSize_def + ntfnSizeBits_def word_bits_conv + elim!: is_aligned_no_wrap' + intro!: range_cover_simpleI)[1] (* CapTable *) apply (clarsimp simp: createObject_c_preconds_def) apply (clarsimp simp: getObjectSize_def objBits_simps - ARM_HYP_H.getObjectSize_def apiGetObjectSize_def - ntfnSizeBits_def word_bits_conv) + ARM_HYP_H.getObjectSize_def apiGetObjectSize_def + ntfnSizeBits_def word_bits_conv) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def - word_sle_def word_sless_def zero_le_sint_32 - intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc - ccorres_move_c_guards ccorres_Guard_Seq) + toAPIType_def ARM_HYP_H.toAPIType_def nAPIObjects_def + word_sle_def word_sless_def zero_le_sint_32 + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc + ccorres_move_c_guards ccorres_Guard_Seq) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.CapTableObject) - (unat (userSizea :: word32)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.CapTableObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat (userSizea :: word32)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (simp add:field_simps hrs_htd_update) apply (ccorres_remove_UNIV_guard) apply (ctac pre only: add: ccorres_placeNewObject_captable) @@ -6386,22 +6243,19 @@ proof - apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (frule(1) ghost_assertion_size_logic_no_unat) - apply (clarsimp simp: getObjectSize_def objBits_simps - ARM_HYP_H.getObjectSize_def apiGetObjectSize_def - cteSizeBits_def word_bits_conv add.commute createObject_c_preconds_def - region_actually_is_bytes_def - invs_valid_objs' invs_urz - elim!: is_aligned_no_wrap' - dest: word_of_nat_le intro!: range_coverI)[1] + apply (clarsimp simp: objBits_simps ARM_HYP_H.getObjectSize_def apiGetObjectSize_def + cteSizeBits_def word_bits_conv add.commute createObject_c_preconds_def + region_actually_is_bytes_def invs_valid_objs' invs_urz + elim!: is_aligned_no_wrap' + dest: word_of_nat_le) apply (clarsimp simp: createObject_hs_preconds_def hrs_htd_update isFrameType_def) apply (frule range_cover.strong_times_32[folded addr_card_wb], simp+) apply (subst h_t_array_valid_retyp, simp+) apply (simp add: power_add cte_C_size objBits_defs) apply (frule range_cover.aligned) - apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_cnode_cap_lift to_bool_def true_def + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_cnode_cap_lift getObjectSize_def apiGetObjectSize_def objBits_simps' field_simps is_aligned_power2 addr_card_wb is_aligned_weaken[where y=word_size_bits] is_aligned_neg_mask @@ -6431,7 +6285,7 @@ lemma ccorres_guard_impR: lemma typ_clear_region_dom: "dom (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'b :: mem_type typ_heap) \ dom ((clift hp) :: 'b :: mem_type typ_heap)" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def) + apply (clarsimp simp:lift_t_def lift_typ_heap_def comp_def) apply (clarsimp simp:lift_state_def) apply (case_tac hp) apply (clarsimp simp:) @@ -7420,7 +7274,8 @@ lemma createObject_caps_overlap_reserved_ret': apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_caps_overlap_reserved_ret'[where sz = "APIType_capBits ty us"]]) apply assumption - apply (case_tac r,simp) + apply (rename_tac rv s) + apply (case_tac rv,simp) apply clarsimp apply (erule caps_overlap_reserved'_subseteq) apply (rule untypedRange_in_capRange) @@ -7493,7 +7348,8 @@ lemma createObject_IRQHandler: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_IRQHandler[where irq = x and P = "\_ _. False"]]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv) done @@ -7510,7 +7366,8 @@ lemma createObject_capClass[wp]: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_range_helper]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv ) apply (rule range_cover_full) apply (simp add:word_bits_conv)+ @@ -8246,7 +8103,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -8413,9 +8270,9 @@ shows "ccorres dc xfdc including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -8449,7 +8306,7 @@ shows "ccorres dc xfdc hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -8596,9 +8453,9 @@ shows "ccorres dc xfdc apply (frule(1) range_cover_gsMaxObjectSize, fastforce, assumption) apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) ghost_assertion_size_logic)+ - apply (simp add: o_def) - apply (case_tac newType,simp_all add:object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + apply (case_tac newType, + simp_all add: object_type_from_H_def Kernel_C_defs nAPIObjects_def APIType_capBits_def o_def + split: apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp:objBits_simps',unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/ARM_HYP/SR_lemmas_C.thy b/proof/crefine/ARM_HYP/SR_lemmas_C.thy index c8a780b0b3..1fd9818801 100644 --- a/proof/crefine/ARM_HYP/SR_lemmas_C.thy +++ b/proof/crefine/ARM_HYP/SR_lemmas_C.thy @@ -310,11 +310,15 @@ lemma cmdbnode_relation_mdb_node_to_H [simp]: unfolding cmdbnode_relation_def mdb_node_to_H_def mdb_node_lift_def cte_lift_def by (fastforce split: option.splits) -definition - tcb_no_ctes_proj :: "tcb \ Structures_H.thread_state \ word32 \ word32 \ arch_tcb \ bool \ word8 \ word8 \ word8 \ nat \ fault option \ word32 option" +definition tcb_no_ctes_proj :: + "tcb \ Structures_H.thread_state \ machine_word \ machine_word \ arch_tcb \ bool \ word8 + \ word8 \ word8 \ nat \ fault option \ machine_word option + \ machine_word option \ machine_word option" where - "tcb_no_ctes_proj t \ (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, - tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t)" + "tcb_no_ctes_proj t \ + (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, + tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t, + tcbSchedNext t, tcbSchedPrev t)" lemma tcb_cte_cases_proj_eq [simp]: "tcb_cte_cases p = Some (getF, setF) \ @@ -324,21 +328,21 @@ lemma tcb_cte_cases_proj_eq [simp]: lemma map_to_ctes_upd_cte': "\ ksPSpace s p = Some (KOCTE cte'); is_aligned p cte_level_bits; ps_clear p cte_level_bits s \ - \ map_to_ctes (ksPSpace s(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" + \ map_to_ctes ((ksPSpace s)(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" apply (erule (1) map_to_ctes_upd_cte) apply (simp add: field_simps ps_clear_def3 cte_level_bits_def mask_def) done lemma map_to_ctes_upd_tcb': - "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; - ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = - (%x. if EX getF setF. + "\ ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; + ps_clear p tcbBlockSizeBits s \ + \ map_to_ctes ((ksPSpace s)(p \ KOTCB tcb)) = + (\x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & - getF tcb ~= getF tcb' - then case tcb_cte_cases (x - p) of - Some (getF, setF) => Some (getF tcb) - else ctes_of s x)" + getF tcb \ getF tcb' + then case tcb_cte_cases (x - p) of + Some (getF, setF) \ Some (getF tcb) + else ctes_of s x)" apply (erule (1) map_to_ctes_upd_tcb) apply (simp add: field_simps ps_clear_def3 mask_def objBits_defs) done @@ -459,7 +463,7 @@ lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); + (ctes_of s' = (ctes_of s)(dest \ cte)); (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); (map_to_pdes (ksPSpace s) = map_to_pdes (ksPSpace s')); @@ -486,7 +490,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -676,7 +680,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1489,9 +1492,9 @@ lemma cmap_relation_cong: apply (erule imageI) done -lemma ctcb_relation_null_queue_ptrs: +lemma ctcb_relation_null_ep_ptrs: assumes rel: "cmap_relation mp mp' tcb_ptr_to_ctcb_ptr ctcb_relation" - and same: "map_option tcb_null_queue_ptrs \ mp'' = map_option tcb_null_queue_ptrs \ mp'" + and same: "map_option tcb_null_ep_ptrs \ mp'' = map_option tcb_null_ep_ptrs \ mp'" shows "cmap_relation mp mp'' tcb_ptr_to_ctcb_ptr ctcb_relation" using rel apply (rule iffD1 [OF cmap_relation_cong, OF _ map_option_eq_dom_eq, rotated -1]) @@ -1499,7 +1502,7 @@ lemma ctcb_relation_null_queue_ptrs: apply (rule same [symmetric]) apply (drule compD [OF same]) apply (case_tac b, case_tac b') - apply (simp add: ctcb_relation_def tcb_null_queue_ptrs_def) + apply (simp add: ctcb_relation_def tcb_null_ep_ptrs_def) done (* Levity: added (20090419 09:44:27) *) @@ -1512,7 +1515,7 @@ lemma ntfnQueue_tail_mask_4 [simp]: lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1526,15 +1529,15 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KONotification ko)) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KONotification ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1542,15 +1545,15 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KOEndpoint ko)) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1558,14 +1561,14 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KOTCB ko)) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOTCB ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1573,15 +1576,15 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI @@ -1590,27 +1593,27 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1618,22 +1621,22 @@ lemma update_pte_map_tos: auto simp: projectKO_opts_defs) lemma update_pde_map_to_pdes: - "map_to_pdes (ksPSpace s(p \ KOArch (KOPDE pde))) + "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDE pde))) = (map_to_pdes (ksPSpace s))(p \ pde)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pde_map_tos: fixes P :: "pde \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1643,15 +1646,15 @@ lemma update_pde_map_tos: lemma update_vcpu_map_tos: fixes P :: "vcpu \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_ptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1736,7 +1739,6 @@ where | "thread_state_to_tsType (Structures_H.BlockedOnSend oref badge cg cgr isc) = scast ThreadState_BlockedOnSend" | "thread_state_to_tsType (Structures_H.BlockedOnNotification oref) = scast ThreadState_BlockedOnNotification" - lemma ctcb_relation_thread_state_to_tsType: "ctcb_relation tcb ctcb \ tsType_CL (thread_state_lift (tcbState_C ctcb)) = thread_state_to_tsType (tcbState tcb)" unfolding ctcb_relation_def cthread_state_relation_def @@ -2014,9 +2016,9 @@ lemma memory_cross_over: apply (cut_tac p=ptr in unat_mask_2_less_4) apply (subgoal_tac "(ptr && ~~ mask 2) + (ptr && mask 2) = ptr") apply (subgoal_tac "!n x. n < 4 \ (unat (x::word32) = n) = (x = of_nat n)") - apply (auto simp add: eval_nat_numeral unat_eq_0 add.commute take_bit_Suc - elim!: less_SucE)[1] - apply (clarsimp simp add: unat32_eq_of_nat word_bits_def) + apply (clarsimp simp: eval_nat_numeral) + apply (fastforce simp: add.commute elim!: less_SucE) + apply (clarsimp simp: unat32_eq_of_nat word_bits_def) apply (simp add: add.commute word_plus_and_or_coroll2) done @@ -2135,7 +2137,7 @@ lemma gs_set_assn_Delete_cstate_relation: lemma update_typ_at: assumes at: "obj_at' P p s" and tp: "\obj. P obj \ koTypeOf (injectKOS obj) = koTypeOf ko" - shows "typ_at' T p' (s \ksPSpace := ksPSpace s(p \ ko)\) = typ_at' T p' s" + shows "typ_at' T p' (s \ksPSpace := (ksPSpace s)(p \ ko)\) = typ_at' T p' s" using at by (auto elim!: obj_atE' simp: typ_at'_def ko_wp_at'_def dest!: tp[rule_format] @@ -2325,6 +2327,7 @@ lemmas seL4_VCPUReg_defs = seL4_VCPUReg_R10fiq_def seL4_VCPUReg_R11fiq_def seL4_VCPUReg_R12fiq_def + seL4_VCPUReg_VMPIDR_def seL4_VCPUReg_SPSRsvc_def seL4_VCPUReg_SPSRabt_def seL4_VCPUReg_SPSRund_def @@ -2403,10 +2406,18 @@ lemma rf_sr_armKSGICVCPUNumListRegs: by (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) lemma update_vcpu_map_to_vcpu: - "map_to_vcpus (ksPSpace s(p \ KOArch (KOVCPU vcpu))) + "map_to_vcpus ((ksPSpace s)(p \ KOArch (KOVCPU vcpu))) = (map_to_vcpus (ksPSpace s))(p \ vcpu)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) +lemma rf_sr_ctcb_queue_relation: + "\ (s, s') \ rf_sr; d \ maxDomain; p \ maxPriority \ + \ ctcb_queue_relation (ksReadyQueues s (d, p)) + (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p))" + unfolding rf_sr_def cstate_relation_def cready_queues_relation_def + apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def maxDom_to_H maxPrio_to_H) + done + lemma rf_sr_sched_action_relation: "(s, s') \ rf_sr \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" @@ -2438,5 +2449,18 @@ lemma unat_scast_numDomains: "unat (SCAST(32 signed \ machine_word_len) Kernel_C.numDomains) = unat Kernel_C.numDomains" by (simp add: scast_eq sint_numDomains_to_H unat_numDomains_to_H numDomains_machine_word_safe) +(* link up Kernel_Config loaded from the seL4 build system with physBase in C code *) +lemma physBase_spec: + "\s. \\ {s} Call physBase_'proc {t. ret__unsigned_long_' t = Kernel_Config.physBase }" + apply (rule allI, rule conseqPre, vcg) + apply (simp add: Kernel_Config.physBase_def) + done + +lemma rf_sr_obj_update_helper: + "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined + \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr + \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" + by (simp cong: StateSpace.state.fold_congs globals.fold_congs) + end end diff --git a/proof/crefine/ARM_HYP/Schedule_C.thy b/proof/crefine/ARM_HYP/Schedule_C.thy index c00d98a232..4c0ca2714c 100644 --- a/proof/crefine/ARM_HYP/Schedule_C.thy +++ b/proof/crefine/ARM_HYP/Schedule_C.thy @@ -1,29 +1,16 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) theory Schedule_C -imports Tcb_C +imports Tcb_C Detype_C begin instance tcb :: no_vcpu by intro_classes auto -context begin interpretation Arch . (*FIXME: arch_split*) - -(* FIXME: Move to Refine *) -crunches Arch.switchToThread - for valid_queues'[wp]: valid_queues' - (ignore: clearExMonitor wp: crunch_wps) -crunches switchToIdleThread - for ksCurDomain[wp]: "\s. P (ksCurDomain s)" -crunches switchToIdleThread, switchToThread - for valid_pspace'[wp]: valid_pspace' - (simp: whenE_def) - -end - (*FIXME: arch_split: move up?*) context Arch begin context begin global_naming global @@ -50,16 +37,17 @@ lemma switchToIdleThread_ccorres: "ccorres dc xfdc invs_no_cicd' UNIV hs switchToIdleThread (Call switchToIdleThread_'proc)" apply (cinit) + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l) apply (ctac (no_vcg) add: Arch_switchToIdleThread_ccorres) apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P="\s. thread = ksIdleThread s" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) - apply (wpsimp simp: ARM_HYP_H.switchToIdleThread_def - wp: vcpuSwitch_it')+ + apply (wpsimp simp: ARM_HYP_H.switchToIdleThread_def wp: hoare_drop_imps)+ done lemma Arch_switchToThread_ccorres: @@ -78,7 +66,6 @@ lemma Arch_switchToThread_ccorres: apply (ctac (no_vcg) add: setVMRoot_ccorres) apply (simp (no_asm) del: Collect_const) apply (rule_tac A'=UNIV in ccorres_guard_imp2) - apply (fold dc_def)[1] apply (ctac add: clearExMonitor_ccorres) apply wpsimp+ apply (vcg exspec=vcpu_switch_modifies) @@ -94,6 +81,13 @@ lemma Arch_switchToThread_ccorres: apply (clarsimp simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def) done +lemma invs_no_cicd'_pspace_aligned': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_aligned' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + +lemma invs_no_cicd'_pspace_distinct': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_distinct' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) (* FIXME: move *) lemma switchToThread_ccorres: @@ -103,47 +97,34 @@ lemma switchToThread_ccorres: hs (switchToThread t) (Call switchToThread_'proc)" - apply (cinit lift: thread_') + apply (clarsimp simp: switchToThread_def) + apply (rule ccorres_symb_exec_l'[OF _ _ isRunnable_sp]; (solves wpsimp)?) + apply (rule ccorres_symb_exec_l'[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule ccorres_stateAssert_fwd)+ + apply (cinit' lift: thread_') apply (ctac (no_vcg) add: Arch_switchToThread_ccorres) apply (ctac (no_vcg) add: tcbSchedDequeue_ccorres) + apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: setCurThread_def simpler_modify_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def) - apply wp+ - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def) - done - -lemma get_tsType_ccorres2: - "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_' (tcb_at' thread) - (UNIV \ {s. f s = tcb_ptr_to_ctcb_ptr thread} \ - {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] - (getThreadState thread) (Call thread_state_get_tsType_'proc)" - unfolding getThreadState_def - apply (rule ccorres_from_spec_modifies [where P=\, simplified]) - apply (rule thread_state_get_tsType_spec) - apply (rule thread_state_get_tsType_modifies) - apply simp - apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: typ_heap_simps) - apply (rule bexI [rotated, OF threadGet_eq], assumption) - apply simp - apply (drule ctcb_relation_thread_state_to_tsType) - apply simp + apply (clarsimp simp: setCurThread_def simpler_modify_def rf_sr_def cstate_relation_def + Let_def carch_state_relation_def cmachine_state_relation_def) + apply (wpsimp wp: Arch_switchToThread_invs_no_cicd' hoare_drop_imps + | strengthen invs_no_cicd'_pspace_aligned' invs_no_cicd'_pspace_distinct')+ done lemma activateThread_ccorres: "ccorres dc xfdc (ct_in_state' activatable' and (\s. sch_act_wf (ksSchedulerAction s) s) - and valid_queues and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') UNIV [] activateThread (Call activateThread_'proc)" apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule_tac P="activatable' rv" in ccorres_gen_asm) apply (wpc) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) @@ -153,7 +134,7 @@ lemma activateThread_ccorres: apply (rule ccorres_cond_true) apply (rule ccorres_return_Skip) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) @@ -161,7 +142,7 @@ lemma activateThread_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: activateIdleThread_def return_def) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -184,7 +165,7 @@ lemma activateThread_ccorres: apply (subgoal_tac "ksCurThread_' (globals s') = tcb_ptr_to_ctcb_ptr (ksCurThread s)") prefer 2 apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (clarsimp simp: typ_heap_simps ThreadState_Running_def mask_def) + apply (clarsimp simp: typ_heap_simps ThreadState_defs mask_def) done lemma ceqv_Guard_UNIV_Skip: @@ -216,23 +197,47 @@ lemmas ccorres_remove_tail_Guard_Skip = ccorres_abstract[where xf'="\_. ()", OF ceqv_remove_tail_Guard_Skip] lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done +lemma ccorres_pre_getQueue: + assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" + shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) + {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) + (cready_queues_index_to_C d p) in + ctcb_queue_relation queue cqueue) \ s' \ P' queue} + hs (getQueue d p >>= (\queue. f queue)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule gq_sp) + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply assumption + apply (clarsimp simp: getQueue_def gets_exs_valid) + apply clarsimp + apply (drule spec, erule mp) + apply (erule rf_sr_ctcb_queue_relation) + apply (simp add: maxDom_to_H maxPrio_to_H)+ + done + lemma chooseThread_ccorres: "ccorres dc xfdc all_invs_but_ct_idle_or_in_cur_domain' UNIV [] chooseThread (Call chooseThread_'proc)" proof - note prio_and_dom_limit_helpers [simp] - note ksReadyQueuesL2Bitmap_nonzeroI [simp] note Collect_const_mem [simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) @@ -242,9 +247,22 @@ proof - "\s. invs_no_cicd' s \ ksCurDomain s \ maxDomain" by (simp add: invs_no_cicd'_def) + have invs_no_cicd'_valid_bitmaps: + "\s. invs_no_cicd' s \ valid_bitmaps s" + by (simp add: invs_no_cicd'_def) + + have invs_no_cicd'_pspace_aligned': + "\s. invs_no_cicd' s \ pspace_aligned' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + + have invs_no_cicd'_pspace_distinct': + "\s. invs_no_cicd' s \ pspace_distinct' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + show ?thesis supply if_split[split del] apply (cinit) + apply (rule ccorres_stateAssert)+ apply (simp add: numDomains_sge_1_simp) apply (rule_tac xf'=dom_' and r'="\rv rv'. rv' = ucast rv" in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) @@ -277,7 +295,7 @@ proof - apply (rule_tac P="curdom \ maxDomain" in ccorres_cross_over_guard_no_st) apply (rule_tac P="prio \ maxPriority" in ccorres_cross_over_guard_no_st) apply (rule ccorres_pre_getQueue) - apply (rule_tac P="queue \ []" in ccorres_cross_over_guard_no_st) + apply (rule_tac P="\ tcbQueueEmpty queue" in ccorres_cross_over_guard_no_st) apply (rule ccorres_symb_exec_l) apply (rule ccorres_assert) apply (rule ccorres_symb_exec_r) @@ -292,39 +310,40 @@ proof - apply (rule conseqPre, vcg) apply (rule Collect_mono) apply clarsimp - apply (strengthen queue_in_range) apply assumption apply clarsimp apply (rule conseqPre, vcg) apply clarsimp apply (wp isRunnable_wp)+ - apply (simp add: isRunnable_def) - apply wp apply (clarsimp simp: Let_def guard_is_UNIV_def) - apply (drule invs_no_cicd'_queues) - apply (case_tac queue, simp) - apply (clarsimp simp: tcb_queue_relation'_def cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp add: maxDom_to_H maxPrio_to_H - queue_in_range[where qdom=0, simplified, simplified maxPrio_to_H]) - apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper ) + apply (rule conjI) + apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper) + apply (intro conjI impI) + apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def ctcb_queue_relation_def + tcbQueueEmpty_def option_to_ctcb_ptr_def) + apply (frule_tac qdom=curdom and prio=rv in cready_queues_index_to_C_in_range') + apply fastforce + apply (clarsimp simp: num_tcb_queues_val word_less_nat_alt cready_queues_index_to_C_def2) apply wpsimp apply (clarsimp simp: guard_is_UNIV_def le_maxDomain_eq_less_numDomains word_less_nat_alt numDomains_less_numeric_explicit) - apply (frule invs_no_cicd'_queues) + apply clarsimp apply (frule invs_no_cicd'_max_CurDomain) - apply (frule invs_no_cicd'_queues) - apply (clarsimp simp: valid_queues_def lookupBitmapPriority_le_maxPriority) + apply (frule invs_no_cicd'_pspace_aligned') + apply (frule invs_no_cicd'_pspace_distinct') + apply (frule invs_no_cicd'_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule valid_bitmaps_valid_bitmapQ) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def cong: conj_cong) apply (intro conjI impI) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) - apply (prop_tac "ksCurDomain s = 0") - using unsigned_eq_0_iff apply force - apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (fastforce intro: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) + apply (fastforce dest: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) done qed @@ -405,7 +424,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -439,14 +457,13 @@ lemma isHighestPrio_ccorres: apply (rule ccorres_return_C, simp, simp, simp) apply (rule wp_post_taut) apply (vcg exspec=getHighestPrio_modifies)+ - apply (clarsimp simp: word_le_nat_alt true_def to_bool_def maxDomain_le_unat_ucast_explicit + apply (clarsimp simp: word_le_nat_alt maxDomain_le_unat_ucast_explicit split: if_splits) done lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -460,7 +477,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -477,7 +494,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -505,7 +522,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -525,7 +542,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -536,17 +553,17 @@ lemma schedule_ccorres: apply (rule ccorres_cond2'[where R=\], fastforce) apply clarsimp apply (rule ccorres_return[where R'=UNIV], clarsimp, vcg) - apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s - \ curThread = ksCurThread s - \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" - and P'=UNIV in ccorres_from_vcg) - apply clarsimp - apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) - apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) - apply unat_arith - apply (wpsimp wp: threadGet_obj_at2) + apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s + \ curThread = ksCurThread s + \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" + and P'=UNIV in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) + apply (drule (1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) + apply unat_arith + apply clarsimp apply vcg apply ceqv (* fastfail calculation complete *) @@ -562,18 +579,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -607,10 +623,10 @@ lemma schedule_ccorres: in ccorres_symb_exec_r_known_rv) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: false_def cur_tcb'_def rf_sr_ksCurThread) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) apply (solves \unat_arith, rule iffI; simp\) apply ceqv apply clarsimp @@ -651,13 +667,13 @@ lemma schedule_ccorres: apply (wp (once) hoare_drop_imps) apply wp apply (strengthen strenghten_False_imp[where P="a = ResumeCurrentThread" for a]) - apply (clarsimp simp: conj_ac invs_queues invs_valid_objs' cong: conj_cong) + apply (clarsimp simp: conj_ac invs_valid_objs' cong: conj_cong) apply wp apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) - apply (clarsimp simp: to_bool_def true_def) + apply clarsimp apply (strengthen ko_at'_obj_at'_field) - apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field to_bool_def true_def) + apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field) apply wp apply clarsimp (* when runnable tcbSchedEnqueue curThread *) @@ -666,21 +682,21 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp - apply (clarsimp simp: to_bool_def false_def) apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') apply (rule conjI) apply (clarsimp dest!: rf_sr_cscheduler_relation simp: cscheduler_action_relation_def) apply (rule conjI; clarsimp) apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps max_word_not_0 + apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps split: scheduler_action.splits) apply (frule (1) obj_at_cslift_tcb) apply (clarsimp dest!: rf_sr_cscheduler_relation invs_sch_act_wf' @@ -691,7 +707,7 @@ lemma schedule_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -724,11 +740,7 @@ lemma threadSet_timeSlice_ccorres [corres]: map_to_tcbs_upd) apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) defer - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def) @@ -741,10 +753,10 @@ lemma timerTick_ccorres: supply subst_all [simp del] apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -766,17 +778,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues + apply (ctac add: rescheduleRequired_ccorres) + apply (wp weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -788,7 +800,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -796,13 +807,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM_HYP/StateRelation_C.thy b/proof/crefine/ARM_HYP/StateRelation_C.thy index 085931cff3..cdddcb39a9 100644 --- a/proof/crefine/ARM_HYP/StateRelation_C.thy +++ b/proof/crefine/ARM_HYP/StateRelation_C.thy @@ -16,8 +16,7 @@ definition definition "array_relation r n a c \ \i \ n. r (a i) (index c (unat i))" -(* used for bound ntfn/tcb *) -definition +definition option_to_ctcb_ptr :: "machine_word option \ tcb_C ptr" where "option_to_ctcb_ptr x \ case x of None \ NULL | Some t \ tcb_ptr_to_ctcb_ptr t" @@ -251,9 +250,14 @@ fun | "register_from_H ARM_HYP.FaultIP = scast Kernel_C.FaultIP" definition - ccontext_relation :: "(MachineTypes.register \ word32) \ user_context_C \ bool" + cregs_relation :: "(MachineTypes.register \ machine_word) \ machine_word[registers_count] \ bool" +where + "cregs_relation Hregs Cregs \ \r. Hregs r = Cregs.[unat (register_from_H r)]" + +definition + ccontext_relation :: "user_context \ user_context_C \ bool" where - "ccontext_relation regs uc \ \r. regs r = index (registers_C uc) (unat (register_from_H r))" + "ccontext_relation uc_H uc_C \ cregs_relation (user_regs uc_H) (registers_C uc_C)" primrec cthread_state_relation_lifted :: "Structures_H.thread_state \ @@ -371,7 +375,9 @@ where \ tcbTimeSlice atcb = unat (tcbTimeSlice_C ctcb) \ cfault_rel (tcbFault atcb) (seL4_Fault_lift (tcbFault_C ctcb)) (lookup_fault_lift (tcbLookupFailure_C ctcb)) - \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb" + \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb + \ option_to_ctcb_ptr (tcbSchedPrev atcb) = tcbSchedPrev_C ctcb + \ option_to_ctcb_ptr (tcbSchedNext atcb) = tcbSchedNext_C ctcb" abbreviation "ep_queue_relation' \ tcb_queue_relation' tcbEPNext_C tcbEPPrev_C" @@ -600,17 +606,17 @@ definition where "cready_queues_index_to_C qdom prio \ (unat qdom) * numPriorities + (unat prio)" -definition cready_queues_relation :: - "tcb_C typ_heap \ (tcb_queue_C[num_tcb_queues]) \ (domain \ priority \ ready_queue) \ bool" -where - "cready_queues_relation h_tcb queues aqueues \ - \qdom prio. ((qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ - (let cqueue = index queues (cready_queues_index_to_C qdom prio) in - sched_queue_relation' h_tcb (aqueues (qdom, prio)) (head_C cqueue) (end_C cqueue))) - \ (\ (qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ aqueues (qdom, prio) = [])" +definition ctcb_queue_relation :: "tcb_queue \ tcb_queue_C \ bool" where + "ctcb_queue_relation aqueue cqueue \ + head_C cqueue = option_to_ctcb_ptr (tcbQueueHead aqueue) + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd aqueue)" +definition cready_queues_relation :: + "(domain \ priority \ ready_queue) \ (tcb_queue_C[num_tcb_queues]) \ bool" + where + "cready_queues_relation aqueues cqueues \ + \d p. d \ maxDomain \ p \ maxPriority + \ ctcb_queue_relation (aqueues (d, p)) (index cqueues (cready_queues_index_to_C d p))" abbreviation "cte_array_relation astate cstate @@ -748,9 +754,7 @@ where "cstate_relation astate cstate \ let cheap = t_hrs_' cstate in cpspace_relation (ksPSpace astate) (underlying_memory (ksMachineState astate)) cheap \ - cready_queues_relation (clift cheap) - (ksReadyQueues_' cstate) - (ksReadyQueues astate) \ + cready_queues_relation (ksReadyQueues astate) (ksReadyQueues_' cstate) \ zero_ranges_are_zero (gsUntypedZeroRanges astate) cheap \ cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' cstate) (ksReadyQueuesL1Bitmap astate) \ cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' cstate) (ksReadyQueuesL2Bitmap astate) \ diff --git a/proof/crefine/ARM_HYP/SyscallArgs_C.thy b/proof/crefine/ARM_HYP/SyscallArgs_C.thy index b4795ea32c..828eebe501 100644 --- a/proof/crefine/ARM_HYP/SyscallArgs_C.thy +++ b/proof/crefine/ARM_HYP/SyscallArgs_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -46,10 +47,8 @@ lemma replyOnRestart_invs'[wp]: "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) - apply (rule hoare_vcg_all_lift) - apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_vcg_all_lift rfk_ksQ) - apply (rule hoare_strengthen_post, rule gts_sp') + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) + apply (rule hoare_strengthen_post, rule gts_sp') apply (clarsimp simp: pred_tcb_at') apply (auto elim!: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread') @@ -288,7 +287,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -414,11 +413,13 @@ lemma is_syscall_error_codes: by ((rule iffD2[OF is_syscall_error_code_def], intro allI, rule conseqPre, vcg, safe, (simp_all add: o_def)?)+) -lemma syscall_error_throwError_ccorres_direct: +lemma syscall_error_throwError_ccorres_direct_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) code" apply (rule ccorres_from_vcg_throws) @@ -428,28 +429,35 @@ lemma syscall_error_throwError_ccorres_direct: apply (simp add: syscall_error_rel_def exception_defs) done -lemma syscall_error_throwError_ccorres_succs: +lemma syscall_error_throwError_ccorres_succs_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) (code ;; remainder)" apply (rule ccorres_guard_imp2, rule ccorres_split_throws) - apply (erule syscall_error_throwError_ccorres_direct) - apply simp + apply (erule syscall_error_throwError_ccorres_direct_gen; assumption) apply (rule HoarePartialProps.augment_Faults) apply (erule iffD1[OF is_syscall_error_code_def, THEN spec]) apply simp+ done -lemmas syscall_error_throwError_ccorres_n = - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct, +lemmas syscall_error_throwError_ccorres_n_gen = + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct_gen, simplified o_apply] - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs, + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs_gen, simplified o_apply] +lemmas syscall_error_throwError_ccorres_n = + syscall_error_throwError_ccorres_n_gen[where arrel="intr_and_se_rel \ dc", simplified] + +lemmas syscall_error_throwError_ccorres_n_inl_rrel = + syscall_error_throwError_ccorres_n_gen[where arrel="inl_rrel (intr_and_se_rel \ dc)", simplified] + definition idButNot :: "'a \ 'a" where "idButNot x = x" @@ -646,15 +654,15 @@ lemma asUser_const_rv: lemma getMRs_tcbContext: "\\s. n < unat n_msgRegisters \ n < unat (msgLength info) \ thread = ksCurThread s \ cur_tcb' s\ getMRs thread buffer info - \\rv s. obj_at' (\tcb. atcbContextGet (tcbArch tcb) (ARM_HYP_H.msgRegisters ! n) = rv ! n) (ksCurThread s) s\" + \\rv s. obj_at' (\tcb. user_regs (atcbContextGet (tcbArch tcb)) (ARM_HYP_H.msgRegisters ! n) = rv ! n) (ksCurThread s) s\" apply (rule hoare_assume_pre) apply (elim conjE) apply (thin_tac "thread = t" for t) apply (clarsimp simp add: getMRs_def) apply (wp|wpc)+ - apply (rule_tac P="n < length x" in hoare_gen_asm) + apply (rule_tac P="n < length rv" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -783,11 +791,13 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply (ctac (no_vcg)) + apply (rename_tac bufferCap bufferCap') apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isPageCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap bufferCap \ isPageCap (capCap bufferCap)" + in ccorres_case_bools') apply simp apply (rule ccorres_symb_exec_r) - apply (rule_tac b="capVPSize (capCap rva) \ ARMSmallPage" in ccorres_case_bools') + apply (rule_tac b="capVPSize (capCap bufferCap) \ ARMSmallPage" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -795,7 +805,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -821,7 +831,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (frule capFVMRights_range) apply (simp add: cap_frame_cap_lift generic_frame_cap_get_capFVMRights_CL_def) - apply (clarsimp simp: cap_to_H_def vmrights_to_H_def to_bool_def + apply (clarsimp simp: cap_to_H_def vmrights_to_H_def word_le_make_less Kernel_C.VMNoAccess_def Kernel_C.VMReadWrite_def Kernel_C.VMReadOnly_def Kernel_C.VMKernelOnly_def @@ -849,7 +859,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -876,7 +886,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (frule capFVMRights_range) apply (simp add: cap_frame_cap_lift generic_frame_cap_get_capFVMRights_CL_def) - apply (clarsimp simp: cap_to_H_def vmrights_to_H_def to_bool_def + apply (clarsimp simp: cap_to_H_def vmrights_to_H_def word_le_make_less Kernel_C.VMNoAccess_def Kernel_C.VMReadWrite_def Kernel_C.VMReadOnly_def Kernel_C.VMKernelOnly_def @@ -1082,7 +1092,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) @@ -1210,7 +1220,10 @@ lemma getSyscallArg_ccorres_foo: apply (simp add: word_less_nat_alt split: if_split) apply (rule ccorres_add_return2) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="\s. n < unat (scast n_msgRegisters :: word32) \ obj_at' (\tcb. atcbContextGet (tcbArch tcb) (ARM_HYP_H.msgRegisters!n) = x!n) (ksCurThread s) s" + apply (rule_tac P="\s. n < unat (scast n_msgRegisters :: word32) + \ obj_at' (\tcb. user_regs (atcbContextGet (tcbArch tcb)) + (ARM_HYP_H.msgRegisters!n) = x!n) + (ksCurThread s) s" and P' = UNIV in ccorres_from_vcg_split_throws) apply vcg @@ -1221,14 +1234,14 @@ lemma getSyscallArg_ccorres_foo: apply (clarsimp simp: typ_heap_simps' msgRegisters_scast) apply (clarsimp simp: ctcb_relation_def ccontext_relation_def msgRegisters_ccorres atcbContextGet_def - carch_tcb_relation_def) + carch_tcb_relation_def cregs_relation_def) apply (subst (asm) msgRegisters_ccorres) apply (clarsimp simp: n_msgRegisters_def) apply (simp add: n_msgRegisters_def word_less_nat_alt) apply (simp add: index_msgRegisters_less unat_less_helper) apply wp[1] apply (wp getMRs_tcbContext) - apply simp + apply fastforce apply (rule ccorres_seq_skip [THEN iffD2]) apply (rule ccorres_add_return2) apply (rule ccorres_symb_exec_l) @@ -1252,7 +1265,7 @@ lemma getSyscallArg_ccorres_foo: in hoare_pre(1)) apply (wp getMRs_user_word) apply (clarsimp simp: msgMaxLength_def unat_less_helper) - apply simp + apply fastforce apply (clarsimp simp: sysargs_rel_def sysargs_rel_n_def) apply (rule conjI, clarsimp simp: unat_of_nat32 word_bits_def) apply (drule equalityD2) diff --git a/proof/crefine/ARM_HYP/Syscall_C.thy b/proof/crefine/ARM_HYP/Syscall_C.thy index 2aa8858115..07ccf20843 100644 --- a/proof/crefine/ARM_HYP/Syscall_C.thy +++ b/proof/crefine/ARM_HYP/Syscall_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -19,9 +20,6 @@ end context kernel_m begin -(* FIXME: should do this from the beginning *) -declare true_def [simp] false_def [simp] - lemma ccorres_If_False: "ccorres_underlying sr Gamm r xf arrel axf R R' hs b c \ ccorres_underlying sr Gamm r xf arrel axf @@ -51,8 +49,7 @@ lemma cap_cases_one_on_true_sum: lemma performInvocation_Endpoint_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and st_tcb_at' simple' thread and ep_at' epptr - and sch_act_sane and (\s. thread = ksCurThread s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)))) + and sch_act_sane and (\s. thread = ksCurThread s)) (UNIV \ {s. block_' s = from_bool blocking} \ {s. call_' s = from_bool do_call} \ {s. badge_' s = badge} @@ -125,7 +122,6 @@ lemma decodeInvocation_ccorres: and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) and (\s. \v \ set extraCaps. s \' fst v \ cte_at' (snd v) s) and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). ex_nonz_cap_to' y s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and sysargs_rel args buffer) (UNIV \ {s. call_' s = from_bool isCall} \ {s. block_' s = from_bool isBlocking} @@ -202,7 +198,7 @@ lemma decodeInvocation_ccorres: apply simp apply (rule hoare_use_eq[where f=ksCurThread]) apply (wp sts_invs_minor' sts_st_tcb_at'_cases - setThreadState_ct' hoare_vcg_all_lift sts_ksQ')+ + setThreadState_ct' hoare_vcg_all_lift)+ apply simp apply (vcg exspec=setThreadState_modifies) apply vcg @@ -272,22 +268,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -319,7 +315,7 @@ lemma decodeInvocation_ccorres: apply fastforce apply (simp add: cap_lift_capEPBadge_mask_eq) apply (clarsimp simp: rf_sr_ksCurThread Collect_const_mem - cap_get_tag_isCap "StrictC'_thread_state_defs") + cap_get_tag_isCap ThreadState_defs) apply (frule word_unat.Rep_inverse') apply (simp add: cap_get_tag_isCap[symmetric] cap_get_tag_ReplyCap) apply (rule conjI) @@ -506,7 +502,7 @@ lemma handleInvocation_def2: lemma thread_state_to_tsType_eq_Restart: "(thread_state_to_tsType ts = scast ThreadState_Restart) = (ts = Restart)" - by (cases ts, simp_all add: "StrictC'_thread_state_defs") + by (cases ts, simp_all add: ThreadState_defs) lemma wordFromMessageInfo_spec: "\s. \\ {s} Call wordFromMessageInfo_'proc @@ -524,7 +520,7 @@ lemma wordFromMessageInfo_spec: lemma handleDoubleFault_ccorres: "ccorres dc xfdc (invs' and tcb_at' tptr and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and - sch_act_not tptr and (\s. \p. tptr \ set (ksReadyQueues s p))) + sch_act_not tptr) (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) [] (handleDoubleFault tptr ex1 ex2) (Call handleDoubleFault_'proc)" @@ -538,7 +534,7 @@ lemma handleDoubleFault_ccorres: apply (simp add: getRestartPC_def) apply wp apply clarsimp - apply (simp add: ThreadState_Inactive_def) + apply (simp add: ThreadState_defs) apply (fastforce simp: valid_tcb_state'_def) done @@ -602,8 +598,7 @@ lemma hrs_mem_update_use_hrs_mem: lemma sendFaultIPC_ccorres: "ccorres (cfault_rel2 \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and st_tcb_at' simple' tptr and sch_act_not tptr and - (\s. \p. tptr \ set (ksReadyQueues s p))) + (invs' and st_tcb_at' simple' tptr and sch_act_not tptr) (UNIV \ {s. (cfault_rel (Some fault) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) @@ -681,15 +676,15 @@ lemma sendFaultIPC_ccorres: apply (ctac (no_vcg) add: sendIPC_ccorres) apply (ctac (no_vcg) add: ccorres_return_CE [unfolded returnOk_def comp_def]) apply wp - apply (wp threadSet_pred_tcb_no_state threadSet_invs_trivial threadSet_typ_at_lifts - | simp)+ + apply (wpsimp wp: threadSet_invs_trivial) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_typ_at_lifts) apply (clarsimp simp: guard_is_UNIV_def) apply (subgoal_tac "capEPBadge epcap && mask 28 = capEPBadge epcap") apply (clarsimp simp: cap_get_tag_isCap isEndpointCap_def isCap_simps ccap_relation_ep_helpers) apply (frule cap_get_tag_isCap(4)[symmetric]) - apply (clarsimp simp: cap_get_tag_EndpointCap to_bool_def) + apply (clarsimp simp: cap_get_tag_EndpointCap) apply (drule cap_get_tag_isCap(4) [symmetric]) apply (clarsimp simp: isCap_simps cap_endpoint_cap_lift cap_lift_capEPBadge_mask_eq) apply (clarsimp simp: case_bool_If) @@ -717,10 +712,9 @@ lemma sendFaultIPC_ccorres: apply vcg apply (clarsimp simp: inQ_def) apply (rule_tac Q="\a b. invs' b \ st_tcb_at' simple' tptr b - \ sch_act_not tptr b \ valid_cap' a b - \ (\p. tptr \ set (ksReadyQueues b p))" + \ sch_act_not tptr b \ valid_cap' a b" and E="\ _. \" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp) apply (clarsimp simp: isCap_simps) apply (clarsimp simp: valid_cap'_def pred_tcb_at') @@ -739,8 +733,7 @@ lemma sendFaultIPC_ccorres: done lemma handleFault_ccorres: - "ccorres dc xfdc (invs' and st_tcb_at' simple' t and - sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p))) + "ccorres dc xfdc (invs' and st_tcb_at' simple' t and sch_act_not t) (UNIV \ {s. (cfault_rel (Some flt) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))) )} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t}) @@ -757,12 +750,12 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply clarsimp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply (wp) apply (simp add: guard_is_UNIV_def) apply (simp add: guard_is_UNIV_def) @@ -827,8 +820,7 @@ lemma getMessageInfo_msgLength': lemma handleInvocation_ccorres: "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and - ct_active' and sch_act_simple and - (\s. \x. ksCurThread s \ set (ksReadyQueues s x))) + ct_active' and sch_act_simple) (UNIV \ {s. isCall_' s = from_bool isCall} \ {s. isBlocking_' s = from_bool isBlocking}) [] (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" @@ -953,17 +945,16 @@ lemma handleInvocation_ccorres: apply (simp add: invocationCatch_def o_def) apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" - in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + in hoare_strengthen_postE) + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb - hoare_vcg_all_lift sts_ksQ' + hoare_vcg_all_lift | wpc | wps)+ apply auto[1] apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) - apply (simp add: "StrictC'_thread_state_defs" mask_def) + apply (simp add: ThreadState_defs mask_def) apply (simp add: typ_heap_simps) apply (case_tac ts, simp_all add: cthread_state_relation_def)[1] apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) @@ -1120,7 +1111,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1138,7 +1129,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1221,9 +1211,6 @@ lemma ccorres_trim_redundant_throw_break: lemma invs_valid_objs_strengthen: "invs' s \ valid_objs' s" by fastforce -lemma ct_not_ksQ_strengthen: - "thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p) \ thread \ set (ksReadyQueues s p)" by fastforce - lemma option_to_ctcb_ptr_valid_ntfn: "valid_ntfn' ntfn s ==> (option_to_ctcb_ptr (ntfnBoundTCB ntfn) = NULL) = (ntfnBoundTCB ntfn = None)" apply (cases "ntfnBoundTCB ntfn", simp_all add: option_to_ctcb_ptr_def) @@ -1257,8 +1244,7 @@ lemma handleRecv_ccorres: notes rf_sr_upd_safe[simp del] if_cong[cong] shows "ccorres dc xfdc - (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s - \ sch_act_sane s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s \ sch_act_sane s) {s. isBlocking_' s = from_bool isBlocking} [] (handleRecv isBlocking) @@ -1300,7 +1286,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1318,10 +1304,10 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) - apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) + apply (wp hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) apply (clarsimp dest!: rf_sr_ksCurThread) apply simp @@ -1367,7 +1353,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1384,7 +1370,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1397,7 +1383,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1418,7 +1404,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1431,9 +1417,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1444,13 +1430,11 @@ lemma handleRecv_ccorres: apply clarsimp apply (rename_tac thread epCPtr) apply (rule_tac Q'="(\rv s. invs' s \ st_tcb_at' simple' thread s - \ sch_act_sane s \ (\p. thread \ set (ksReadyQueues s p)) \ thread = ksCurThread s - \ valid_cap' rv s)" in hoare_post_imp_R[rotated]) - apply (clarsimp simp: sch_act_sane_def) - apply (auto dest!: obj_at_valid_objs'[OF _ invs_valid_objs'] - simp: projectKOs valid_obj'_def, - auto simp: pred_tcb_at'_def obj_at'_def objBits_simps projectKOs ct_in_state'_def)[1] - apply wp + \ sch_act_sane s \ thread = ksCurThread s + \ valid_cap' rv s)" in hoare_strengthen_postE_R[rotated]) + apply (intro conjI impI allI; clarsimp simp: sch_act_sane_def) + apply (fastforce dest: obj_at_valid_objs'[OF _ invs_valid_objs'] ko_at_valid_ntfn') + apply wp apply clarsimp apply (vcg exspec=isStopped_modifies exspec=lookupCap_modifies) @@ -1467,8 +1451,8 @@ lemma handleRecv_ccorres: apply (frule tcb_aligned'[OF tcb_at_invs']) apply clarsimp apply (intro conjI impI allI) - apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift - lookup_fault_missing_capability_lift is_cap_fault_def)+ + apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift + lookup_fault_missing_capability_lift is_cap_fault_def)+ apply (clarsimp simp: cap_get_tag_NotificationCap) apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt) apply (clarsimp simp: cnotification_relation_def Let_def) @@ -1499,7 +1483,7 @@ lemma handleYield_ccorres: apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear tcbSchedAppend_valid_objs') apply (vcg exspec= tcbSchedAppend_modifies) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues) + apply (wp weak_sch_act_wf_lift_linear) apply (vcg exspec= tcbSchedDequeue_modifies) apply (clarsimp simp: tcb_at_invs' invs_valid_objs' valid_objs'_maxPriority valid_objs'_maxDomain) @@ -1691,7 +1675,7 @@ lemma virq_virq_active_set_virqEOIIRQEN_spec': \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v32) \" apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) apply vcg - apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def o_def) + apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def) apply (case_tac virq) apply clarsimp apply (rule array_ext) @@ -1704,7 +1688,7 @@ lemma virq_virq_invalid_set_virqEOIIRQEN_spec': \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v32) \" apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) apply vcg - apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def o_def) + apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def) apply (case_tac virq) apply clarsimp apply (rule array_ext) @@ -1717,7 +1701,7 @@ lemma virq_virq_pending_set_virqEOIIRQEN_spec': \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v32) \" apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) apply vcg - apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def o_def) + apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def) apply (case_tac virq) apply clarsimp apply (rule array_ext) @@ -1796,16 +1780,15 @@ definition where "eisr_calc eisr0 eisr1 \ if eisr0 \ 0 then word_ctz eisr0 else word_ctz eisr1 + 32" -lemma ccorres_vgicMaintenance: - notes dc_simp[simp del] Collect_const[simp del] +lemma ccorres_vgicMaintenance: + notes Collect_const[simp del] notes scast_specific_plus32[simp] scast_specific_plus32_signed[simp] notes virq_virq_active_set_virqEOIIRQEN_spec = virq_virq_active_set_virqEOIIRQEN_spec' notes virq_virq_invalid_set_virqEOIIRQEN_spec = virq_virq_invalid_set_virqEOIIRQEN_spec' notes virq_virq_pending_set_virqEOIIRQEN_spec = virq_virq_pending_set_virqEOIIRQEN_spec' shows "ccorres dc xfdc - (\s. invs' s \ sch_act_not (ksCurThread s) s - \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (\s. invs' s \ sch_act_not (ksCurThread s) s) UNIV hs vgicMaintenance (Call VGICMaintenance_'proc)" (is "ccorres _ _ ?PRE _ _ _ _") @@ -2069,7 +2052,7 @@ proof - apply wpsimp apply wpsimp apply wpsimp - apply (clarsimp simp: cur_vcpu_relation_def dc_def eisr_calc_def split: option.splits) + apply (clarsimp simp: cur_vcpu_relation_def eisr_calc_def split: option.splits) done qed @@ -2134,13 +2117,12 @@ lemma vcpuUpdate_vppi_masked_ccorres_armHSCurVCPU: apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def split: option.splits) done -lemma ccorres_VPPIEvent: - notes dc_simp[simp del] Collect_const[simp del] +lemma ccorres_VPPIEvent: + notes Collect_const[simp del] notes scast_specific_plus32[simp] scast_specific_plus32_signed[simp] shows "ccorres dc xfdc (\s. invs' s \ sch_act_not (ksCurThread s) s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)) \ irqVPPIEventIndex irq \ None) \\irq = ucast irq\ hs (vppiEvent irq) (Call VPPIEvent_'proc)" @@ -2222,11 +2204,9 @@ qed lemma ccorres_handleReservedIRQ: "ccorres dc xfdc - (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s \ - (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) (UNIV \ {s. irq_' s = ucast irq}) hs (handleReservedIRQ irq) (Call handleReservedIRQ_'proc)" - supply dc_simp[simp del] supply Collect_const[simp del] apply (cinit lift: irq_') apply (clarsimp simp: ucast_up_ucast is_up) @@ -2262,10 +2242,8 @@ lemma ccorres_handleReservedIRQ: lemma handleInterrupt_ccorres: "ccorres dc xfdc - (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s \ - (\p. ksCurThread s \ set (ksReadyQueues s p)))) - (UNIV \ \\irq = ucast irq\) - hs + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) + \\irq = ucast irq\ hs (handleInterrupt irq) (Call handleInterrupt_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) @@ -2277,11 +2255,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -2300,7 +2278,7 @@ lemma handleInterrupt_ccorres: apply (rule getIRQSlot_ccorres3) apply (rule ccorres_getSlotCap_cte_at) apply (rule_tac P="cte_at' rv" in ccorres_cross_over_guard) - supply ccorres_move_array_assertion_tcb_ctes [corres_pre del] + supply ccorres_move_array_assertion_tcb_ctes [ccorres_pre del] apply ctac apply csymbr apply csymbr @@ -2319,7 +2297,7 @@ lemma handleInterrupt_ccorres: apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -2328,7 +2306,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) @@ -2340,7 +2318,7 @@ lemma handleInterrupt_ccorres: rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, ctac (no_vcg) add: maskInterrupt_ccorres, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, wp, simp)+) apply (wp getSlotCap_wp) apply simp @@ -2349,7 +2327,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -2361,7 +2338,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReservedIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply (vcg exspec=handleReservedIRQ_modifies) apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up) diff --git a/proof/crefine/ARM_HYP/TcbAcc_C.thy b/proof/crefine/ARM_HYP/TcbAcc_C.thy index ac48ff46a7..9f988bbdba 100644 --- a/proof/crefine/ARM_HYP/TcbAcc_C.thy +++ b/proof/crefine/ARM_HYP/TcbAcc_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -89,22 +90,22 @@ lemma archThreadGet_eq: apply simp done -lemma get_tsType_ccorres [corres]: +lemma get_tsType_ccorres[corres]: "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_' (tcb_at' thread) - (UNIV \ {s. thread_state_ptr_' s = Ptr &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C''])}) [] - (getThreadState thread) (Call thread_state_ptr_get_tsType_'proc)" + ({s. f s = tcb_ptr_to_ctcb_ptr thread} \ + {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] + (getThreadState thread) (Call thread_state_get_tsType_'proc)" unfolding getThreadState_def - apply (rule ccorres_from_spec_modifies) - apply (rule thread_state_ptr_get_tsType_spec) - apply (rule thread_state_ptr_get_tsType_modifies) - apply simp - apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: typ_heap_simps) + apply (rule ccorres_from_spec_modifies [where P=\, simplified]) + apply (rule thread_state_get_tsType_spec) + apply (rule thread_state_get_tsType_modifies) + apply simp apply (frule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (rule bexI [rotated, OF threadGet_eq], assumption) apply simp - apply (erule ctcb_relation_thread_state_to_tsType) + apply (drule ctcb_relation_thread_state_to_tsType) + apply simp done lemma threadGet_obj_at2: @@ -143,13 +144,14 @@ lemma getRegister_ccorres [corres]: apply (drule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps register_from_H_less register_from_H_sless) apply (clarsimp simp: getRegister_def typ_heap_simps) - apply (rule_tac x = "((atcbContextGet o tcbArch) ko reg, \)" in bexI [rotated]) + apply (rule_tac x = "((user_regs o atcbContextGet o tcbArch) ko reg, \)" in bexI[rotated]) apply (simp add: in_monad' asUser_def select_f_def split_def) apply (subst arg_cong2 [where f = "(\)"]) defer apply (rule refl) apply (erule threadSet_eq) - apply (clarsimp simp: ctcb_relation_def ccontext_relation_def carch_tcb_relation_def) + apply (clarsimp simp: ctcb_relation_def ccontext_relation_def cregs_relation_def + carch_tcb_relation_def) apply (wp threadGet_obj_at2)+ apply simp apply simp @@ -177,7 +179,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -206,7 +208,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ @@ -347,10 +349,10 @@ lemma armHSCurVCPU_update_active_ccorres: apply (clarsimp simp: modifyArchState_def) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: bind_def simpler_gets_def from_bool_def simpler_modify_def) + apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) by (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def - cmachine_state_relation_def from_bool_def + cmachine_state_relation_def split: bool.split) lemma armHSCurVCPU_update_curv_ccorres: @@ -363,7 +365,7 @@ lemma armHSCurVCPU_update_curv_ccorres: apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) by (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def - cmachine_state_relation_def from_bool_def true_def false_def + cmachine_state_relation_def split: bool.split) lemma armHSCurVCPU_update_ccorres: @@ -377,7 +379,7 @@ lemma armHSCurVCPU_update_ccorres: apply (clarsimp simp: bind_def simpler_gets_def simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) by (clarsimp simp: carch_state_relation_def carch_globals_def cur_vcpu_relation_def - cmachine_state_relation_def from_bool_def true_def false_def + cmachine_state_relation_def split: bool.split) lemmas armHSCurVCPU_update_active_ccorres2 = armHSCurVCPU_update_ccorres[where curv="Some (v, b)" for v b] diff --git a/proof/crefine/ARM_HYP/TcbQueue_C.thy b/proof/crefine/ARM_HYP/TcbQueue_C.thy index 3fb6ca79a9..c83f759d51 100644 --- a/proof/crefine/ARM_HYP/TcbQueue_C.thy +++ b/proof/crefine/ARM_HYP/TcbQueue_C.thy @@ -891,49 +891,6 @@ lemma tcb_queue_relation'_prev_mask: shows "ptr_val (getPrev tcb) && ~~ mask bits = ptr_val (getPrev tcb)" by (rule tcb_queue_relation_prev_mask [OF tcb_queue_relation'_queue_rel], fact+) - -lemma cready_queues_relation_null_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcb_null_ep_ptrs \ mp' = option_map tcb_null_ep_ptrs \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply (clarsimp simp: tcb_queue_relation'_def) - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - \ \clag\ - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - done - -lemma cready_queues_relation_not_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcbSchedNext_C \ mp' = option_map tcbSchedNext_C \ mp" - "option_map tcbSchedPrev_C \ mp' = option_map tcbSchedPrev_C \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def tcb_queue_relation'_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply clarsimp - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule same) - apply (rule same) - done - lemma ntfn_ep_disjoint: assumes srs: "sym_refs (state_refs_of' s)" and epat: "ko_at' ep epptr s" @@ -1017,8 +974,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs cp rel apply - apply (subst map_comp_update) @@ -1101,12 +1058,10 @@ lemma rf_sr_tcb_update_no_queue: (tcb_ptr_to_ctcb_ptr thread) ctcb) (t_hrs_' (globals s')); tcbEPNext_C ctcb = tcbEPNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); tcbEPPrev_C ctcb = tcbEPPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedNext_C ctcb = tcbSchedNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedPrev_C ctcb = tcbSchedPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes heap_to_user_data_def) @@ -1116,31 +1071,22 @@ lemma rf_sr_tcb_update_no_queue: apply (clarsimp simp: map_comp_update projectKO_opt_tcb cvariable_relation_upd_const typ_heap_simps') apply (intro conjI) - subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_upd_tcb_no_queues, assumption+) - subgoal by (clarsimp intro!: ext) - subgoal by (clarsimp intro!: ext) + subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) + apply (rule cendpoint_relation_upd_tcb_no_queues, assumption+) subgoal by (clarsimp intro!: ext) subgoal by (clarsimp intro!: ext) - apply (erule cready_queues_relation_not_queue_ptrs) + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) subgoal by (clarsimp intro!: ext) subgoal by (clarsimp intro!: ext) subgoal by (simp add: carch_state_relation_def typ_heap_simps') by (simp add: cmachine_state_relation_def) -lemma rf_sr_tcb_update_no_queue_helper: - "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined - \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr - \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" - by (simp cong: StateSpace.state.fold_congs globals.fold_congs) - -lemmas rf_sr_tcb_update_no_queue2 - = rf_sr_tcb_update_no_queue_helper [OF rf_sr_tcb_update_no_queue, simplified] +lemmas rf_sr_tcb_update_no_queue2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue, simplified] lemma tcb_queue_relation_not_in_q: "ctcb_ptr_to_tcb_ptr x \ set xs \ @@ -1155,7 +1101,7 @@ lemma rf_sr_tcb_update_not_in_queue: \ live' (KOTCB tcb); invs' s; (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes @@ -1170,37 +1116,27 @@ lemma rf_sr_tcb_update_not_in_queue: apply clarsimp apply (auto simp: obj_at'_def ko_wp_at'_def)[1] apply (intro conjI) - subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply clarsimp - apply (subgoal_tac "thread \ (fst ` ep_q_refs_of' a)") - apply (clarsimp simp: cendpoint_relation_def Let_def split: Structures_H.endpoint.split) - subgoal by (intro conjI impI allI, simp_all add: image_def tcb_queue_relation_not_in_q)[1] - apply (drule(1) map_to_ko_atI') - apply (drule sym_refs_ko_atD', clarsimp+) - subgoal by blast + subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply clarsimp - apply (subgoal_tac "thread \ (fst ` ntfn_q_refs_of' (ntfnObj a))") - apply (clarsimp simp: cnotification_relation_def Let_def - split: ntfn.splits) - subgoal by (simp add: image_def tcb_queue_relation_not_in_q)[1] + apply (subgoal_tac "thread \ (fst ` ep_q_refs_of' a)") + apply (clarsimp simp: cendpoint_relation_def Let_def split: Structures_H.endpoint.split) + subgoal by (intro conjI impI allI, simp_all add: image_def tcb_queue_relation_not_in_q)[1] apply (drule(1) map_to_ko_atI') apply (drule sym_refs_ko_atD', clarsimp+) subgoal by blast - apply (simp add: cready_queues_relation_def, erule allEI) - apply (clarsimp simp: Let_def) - apply (subst tcb_queue_relation_not_in_q) - apply clarsimp - apply (drule valid_queues_obj_at'D, clarsimp) - apply (clarsimp simp: obj_at'_def projectKOs inQ_def) - subgoal by simp + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply clarsimp + apply (subgoal_tac "thread \ (fst ` ntfn_q_refs_of' (ntfnObj a))") + apply (clarsimp simp: cnotification_relation_def Let_def + split: ntfn.splits) + subgoal by (simp add: image_def tcb_queue_relation_not_in_q)[1] + apply (drule(1) map_to_ko_atI') + apply (drule sym_refs_ko_atD', clarsimp+) + subgoal by blast subgoal by (simp add: carch_state_relation_def carch_globals_def typ_heap_simps') by (simp add: cmachine_state_relation_def) -lemmas rf_sr_tcb_update_not_in_queue2 - = rf_sr_tcb_update_no_queue_helper [OF rf_sr_tcb_update_not_in_queue, simplified] - end end diff --git a/proof/crefine/ARM_HYP/Tcb_C.thy b/proof/crefine/ARM_HYP/Tcb_C.thy index af1553c0ae..6d2632f1a2 100644 --- a/proof/crefine/ARM_HYP/Tcb_C.thy +++ b/proof/crefine/ARM_HYP/Tcb_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -58,8 +59,6 @@ lemma doMachineOp_sched: done context begin interpretation Arch . (*FIXME: arch_split*) -crunch queues[wp]: setupReplyMaster "valid_queues" - (simp: crunch_simps wp: crunch_wps) crunch curThread [wp]: restart "\s. P (ksCurThread s)" (wp: crunch_wps simp: crunch_simps) @@ -96,8 +95,8 @@ lemma getMRs_rel_sched: lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -155,8 +154,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -166,8 +165,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -264,8 +263,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -298,7 +297,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -390,9 +389,10 @@ lemma hrs_mem_update_cong: lemma setPriority_ccorres: "ccorres dc xfdc - (\s. tcb_at' t s \ Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority)) - (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) + (\s. tcb_at' t s \ ksCurDomain s \ maxDomain \ + valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s) + ({s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) [] (setPriority t priority) (Call setPriority_'proc)" apply (cinit lift: tptr_' prio_') apply (ctac(no_vcg) add: tcbSchedDequeue_ccorres) @@ -411,11 +411,11 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) - apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' + apply (wpsimp wp: hoare_drop_imps threadSet_valid_objs' weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state threadSet_tcbDomain_triv simp: st_tcb_at'_def o_def split: if_splits) @@ -424,19 +424,14 @@ lemma setPriority_ccorres: where Q="\rv s. obj_at' (\_. True) t s \ priority \ maxPriority \ - Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ valid_objs' s \ - valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s \ - (\d p. \ t \ set (ksReadyQueues s (d, p)))"]) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues tcbSchedDequeue_nonq) + pspace_aligned' s \ pspace_distinct' s"]) + apply (wp weak_sch_act_wf_lift_linear valid_tcb'_def) apply (clarsimp simp: valid_tcb'_tcbPriority_update) apply clarsimp - apply (frule (1) valid_objs'_maxDomain[where t=t]) - apply (frule (1) valid_objs'_maxPriority[where t=t]) - apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -491,8 +486,8 @@ lemma checkCapAt_ccorres: apply assumption apply (simp only: when_def if_to_top_of_bind) apply (rule ccorres_if_lhs) - apply (simp add: from_bool_def true_def) - apply (simp add: from_bool_def false_def) + apply simp + apply simp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -517,7 +512,7 @@ lemma cteInsert_cap_to'2: apply (simp add: cteInsert_def ex_nonz_cap_to'_def setUntypedCapAsFull_def) apply (rule hoare_vcg_ex_lift) apply (wp updateMDB_weak_cte_wp_at - updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp) + updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of) apply auto done @@ -611,7 +606,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -637,13 +632,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (simp add: assertDerived_def bind_assoc del: Collect_const) apply (rule ccorres_symb_exec_l) @@ -657,7 +652,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -666,36 +661,36 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (rule hoare_strengthen_post [ where Q= "\rv s. - Invariants_H.valid_queues s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ ((\a b. priority = Some (a, b)) \ tcb_at' target s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ fst (the priority) \ maxPriority)"]) + fst (the priority) \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) apply (rule ccorres_split_nothrow_novcg_dc) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (trace_schematic_insts \rule ccorres_cond2[where R=\], simp add: Collect_const_mem\) @@ -705,25 +700,24 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def tcbIPCBufferSlot_def) apply csymbr - apply (simp add: Collect_False false_def - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false_seq, simp) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply(rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -732,9 +726,9 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) - apply (simp add: guard_is_UNIV_def false_def Collect_const_mem) + apply (simp add: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_cond_false_seq, simp) @@ -742,14 +736,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp split: option.split_asm) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -759,17 +753,17 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) apply (rule hoare_strengthen_post[ - where Q="\a b. (Invariants_H.valid_queues b \ - valid_objs' b \ + where Q="\a b. (valid_objs' b \ sch_act_wf (ksSchedulerAction b) b \ + pspace_aligned' b \ pspace_distinct' b \ ((\a b. priority = Some (a, b)) \ tcb_at' target b \ - ksCurDomain b \ maxDomain \ valid_queues' b \ + ksCurDomain b \ maxDomain \ fst (the priority) \ maxPriority)) \ ((case snd (the buf) of None \ 0 @@ -792,15 +786,15 @@ lemma invokeTCB_ThreadControl_ccorres: prefer 2 apply fastforce apply (strengthen cte_is_derived_capMasterCap_strg - invs_queues invs_weak_sch_act_wf invs_sch_act_wf' + invs_weak_sch_act_wf invs_sch_act_wf' invs_valid_objs' invs_mdb' invs_pspace_aligned', simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp - | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues - invs_valid_queues' | wp hoare_drop_imps)+ + | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf + | wp hoare_drop_imps)+ (* \ P *) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem option_to_0_def @@ -810,7 +804,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply (simp add: conj_comms cong: conj_cong) - apply (strengthen invs_ksCurDomain_maxDomain') + apply (strengthen invs_ksCurDomain_maxDomain' invs_pspace_distinct') apply (wp hoare_vcg_const_imp_lift_R cteDelete_invs') apply simp apply (rule ccorres_split_nothrow_novcg_dc) @@ -823,12 +817,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply (clarsimp simp: inQ_def Collect_const_mem cintr_def exception_defs tcb_cnode_index_defs) apply (simp add: tcbBuffer_def tcbIPCBufferSlot_def word_sle_def - cte_level_bits_def from_bool_def true_def size_of_def case_option_If2 ) + cte_level_bits_def size_of_def case_option_If2 ) apply (rule conjI) apply (clarsimp simp: case_option_If2 if_n_0_0 objBits_simps' valid_cap'_def capAligned_def word_bits_conv obj_at'_def projectKOs) - apply (clarsimp simp: invs_valid_objs' invs_valid_queues' - Invariants_H.invs_queues invs_ksCurDomain_maxDomain') + apply (fastforce simp: invs_valid_objs' invs_ksCurDomain_maxDomain') apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -839,11 +832,10 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -856,33 +848,27 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - del: Collect_const) + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (fastforce simp: guard_is_UNIV_def Kernel_C.tcbVTable_def tcbVTableSlot_def cte_level_bits_def size_of_def) apply csymbr - apply (simp add: false_def Collect_False - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) apply vcg @@ -891,7 +877,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp @@ -905,12 +891,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -926,34 +911,28 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - del: Collect_const) + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem Kernel_C.tcbCTable_def tcbCTableSlot_def cte_level_bits_def size_of_def option_to_0_def) apply csymbr - apply (simp add: false_def Collect_False - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) apply vcg @@ -961,20 +940,20 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: cte_is_derived_capMasterCap_strg o_def) apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def - true_def from_bool_def cintr_def Collect_const_mem) + cintr_def Collect_const_mem) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -1001,7 +980,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -1042,7 +1021,7 @@ lemma setupReplyMaster_ccorres: apply (subst is_aligned_neg_mask_weaken) apply (erule is_aligned_tcb_ptr_to_ctcb_ptr) apply (simp add: ctcb_size_bits_def) - apply (simp add: true_def mask_def to_bool_def) + apply simp apply simp apply (simp add: cmachine_state_relation_def typ_heap_simps' @@ -1072,7 +1051,7 @@ lemma restart_ccorres: apply (ctac(no_vcg) add: tcbSchedEnqueue_ccorres) apply (ctac add: possibleSwitchTo_ccorres) apply (wp weak_sch_act_wf_lift)[1] - apply (wp sts_valid_queues setThreadState_st_tcb)[1] + apply (wp sts_valid_objs' setThreadState_st_tcb)[1] apply (simp add: valid_tcb_state'_def) apply wp apply (wp (once) sch_act_wf_lift, (wp tcb_in_cur_domain'_lift)+) @@ -1084,7 +1063,7 @@ lemma restart_ccorres: apply fastforce apply (rule ccorres_return_Skip) apply (wp hoare_drop_imps) - apply (auto simp: Collect_const_mem mask_def "StrictC'_thread_state_defs") + apply (auto simp: Collect_const_mem mask_def ThreadState_defs) done lemma setNextPC_ccorres: @@ -1214,10 +1193,10 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (simp add: word_bits_def frame_gp_registers_convs n_gpRegisters_def) apply simp apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (ctac add: postModifyRegisters_ccorres[simplified]) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. thread = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1247,9 +1226,8 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (fastforce simp: sch_act_wf_weak) apply (wpsimp wp: hoare_drop_imp)+ apply (clarsimp simp add: guard_is_UNIV_def) - apply (clarsimp simp: to_bool_def invs_weak_sch_act_wf invs_valid_objs' + apply (clarsimp simp: invs_weak_sch_act_wf invs_valid_objs' split: if_split - cong: if_cong | rule conjI)+ apply (clarsimp dest!: global'_no_ex_cap simp: invs'_def valid_state'_def | rule conjI)+ done @@ -1285,8 +1263,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1295,8 +1273,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1355,8 +1333,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1368,8 +1346,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1440,7 +1418,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1517,15 +1495,15 @@ lemma threadSet_same: by (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp) fastforce lemma asUser_setRegister_ko_at': - "\obj_at' (\tcb'. tcb = tcbArch_update (\_. atcbContextSet ((atcbContextGet (tcbArch tcb'))(r := v)) (tcbArch tcb')) tcb') dst\ + "\obj_at' (\tcb'. tcb = tcbArch_update (\_. atcbContextSet (modify_registers (\regs. regs(r := v)) (atcbContextGet (tcbArch tcb'))) (tcbArch tcb')) tcb') dst\ asUser dst (setRegister r v) \\rv. ko_at' (tcb::tcb) dst\" unfolding asUser_def apply (wpsimp wp: threadSet_same threadGet_wp) - apply (clarsimp simp: setRegister_def simpler_modify_def obj_at'_def) + apply (clarsimp simp: setRegister_def simpler_modify_def obj_at'_def modify_registers_def) done lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1540,6 +1518,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: \ {s. buffer_' s = option_to_ptr buffer}) [] (invokeTCB (WriteRegisters dst resume values arch)) (Call invokeTCB_WriteRegisters_'proc)" + supply empty_fail_cond[simp] apply (rule ccorres_gen_asm) apply (erule conjE) apply (cinit lift: n_' dest_' resumeTarget_' buffer_' @@ -1631,15 +1610,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1683,7 +1661,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (clarsimp simp: frame_gp_registers_convs word_less_nat_alt sysargs_rel_def n_frameRegisters_def n_msgRegisters_def split: if_split_asm) - apply (simp add: invs_weak_sch_act_wf invs_valid_objs' invs_queues) + apply (simp add: invs_weak_sch_act_wf invs_valid_objs') apply (fastforce dest!: global'_no_ex_cap simp: invs'_def valid_state'_def) done @@ -1697,7 +1675,7 @@ lemma invokeTCB_Suspend_ccorres: apply (ctac(no_vcg) add: suspend_ccorres[OF cteDeleteOne_ccorres]) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp apply (auto simp: invs'_def valid_state'_def global'_no_ex_cap) done @@ -1711,7 +1689,7 @@ lemma invokeTCB_Resume_ccorres: apply (ctac(no_vcg) add: restart_ccorres) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp done lemma Arch_decodeTransfer_spec: @@ -1788,6 +1766,7 @@ shows (doE reply \ invokeTCB (ReadRegisters target susp n archCp); liftE (replyOnRestart thread reply isCall) odE) (Call invokeTCB_ReadRegisters_'proc)" + supply empty_fail_cond[simp] apply (rule ccorres_gen_asm) using [[goals_limit=1]] apply (cinit' lift: tcb_src_' suspendSource_' n_' call_' simp: invokeTCB_def liftE_bindE bind_assoc) @@ -1813,10 +1792,11 @@ shows apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_getThreadState]) apply (rule ccorres_if_lhs[OF _ ccorres_False[where P'=UNIV]]) apply (rule ccorres_if_lhs) - apply (simp add: Collect_True true_def whileAnno_def del: Collect_const) + apply (simp add: Collect_True whileAnno_def del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1824,7 +1804,7 @@ shows = min (unat n) (unat n_frameRegisters + unat n_gpRegisters)" in ccorres_gen_asm) apply (rule ccorres_split_nothrow_novcg) - apply (rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n + apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (ARM_HYP_H.frameRegisters @ ARM_HYP_H.gpRegisters)) = reply) target s" in ccorres_mapM_x_while) @@ -1877,23 +1857,23 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) apply (rule ccorres_rel_imp, - rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n + rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (ARM_HYP_H.frameRegisters @ ARM_HYP_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -2002,11 +1982,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) - apply (rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n + apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (ARM_HYP_H.frameRegisters @ ARM_HYP_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -2077,11 +2056,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2093,7 +2072,7 @@ shows apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp | simp add: valid_tcb_state'_def)+ - apply (clarsimp simp: ThreadState_Running_def mask_def) + apply (clarsimp simp: ThreadState_defs mask_def) apply (rule mapM_x_wp') apply (rule hoare_pre) apply (wp sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift) @@ -2113,15 +2092,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2130,7 +2109,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2141,7 +2120,7 @@ shows ARM_HYP.badgeRegister_def "StrictC'_register_defs") apply (vcg exspec=lookupIPCBuffer_modifies) - apply (simp add: false_def) + apply simp apply (ctac(no_vcg) add: setThreadState_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -2150,18 +2129,17 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) apply wp - apply (simp add: Collect_const_mem "StrictC'_thread_state_defs" - mask_def) + apply (simp add: Collect_const_mem ThreadState_defs mask_def) apply vcg apply (rule_tac Q="\rv. invs' and st_tcb_at' ((=) Restart) thread and tcb_at' target" in hoare_post_imp) @@ -2171,7 +2149,7 @@ shows apply (vcg exspec=suspend_modifies) apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def true_def + apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def split: if_split) done @@ -2236,7 +2214,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2247,13 +2226,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2280,7 +2259,7 @@ lemma decodeReadRegisters_ccorres: apply wp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread - "StrictC'_thread_state_defs" word_sless_def word_sle_def + ThreadState_defs word_sless_def word_sle_def mask_eq_iff_w2p word_size isCap_simps ReadRegistersFlags_defs tcb_at_invs' cap_get_tag_isCap capTCBPtr_eq) @@ -2293,7 +2272,7 @@ lemma decodeReadRegisters_ccorres: valid_tcb_state'_def elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] - apply (clarsimp simp: from_bool_def word_and_1 split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma decodeWriteRegisters_ccorres: @@ -2347,7 +2326,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2358,13 +2338,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2372,7 +2352,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2387,13 +2367,13 @@ lemma decodeWriteRegisters_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem ct_in_state'_def pred_tcb_at') apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) - apply (clarsimp simp: valid_cap'_def "StrictC'_thread_state_defs" + apply (clarsimp simp: valid_cap'_def ThreadState_defs mask_eq_iff_w2p word_size rf_sr_ksCurThread WriteRegisters_resume_def word_sle_def word_sless_def numeral_eqs simp del: unsigned_numeral) apply (frule arg_cong[where f="\x. unat (of_nat x :: word32)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2406,8 +2386,7 @@ lemma decodeWriteRegisters_ccorres: apply (rule disjCI2) apply (clarsimp simp: genericTake_def linorder_not_less) apply (subst hd_conv_nth, clarsimp simp: unat_eq_0) - apply (clarsimp simp: from_bool_def word_and_1 - split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma excaps_map_Nil: "(excaps_map caps = []) = (caps = [])" @@ -2476,7 +2455,7 @@ lemma decodeCopyRegisters_ccorres: apply (simp add: case_bool_If if_to_top_of_bindE if_to_top_of_bind del: Collect_const cong: if_cong) - apply (simp add: to_bool_def returnOk_bind Collect_True + apply (simp add: returnOk_bind Collect_True ccorres_invocationCatch_Inr performInvocation_def del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2526,7 +2505,7 @@ lemma decodeCopyRegisters_ccorres: elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] apply (clarsimp simp: word_sle_def CopyRegistersFlags_defs word_sless_def - "StrictC'_thread_state_defs" rf_sr_ksCurThread + ThreadState_defs rf_sr_ksCurThread split: if_split) apply (drule interpret_excaps_eq) apply (clarsimp simp: mask_def excaps_map_def split_def ccap_rights_relation_def @@ -2674,7 +2653,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2695,7 +2674,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -2703,7 +2682,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap from_bool_0 dest!: ccte_relation_ccap_relation) - apply (simp add: from_bool_def false_def true_def + apply (simp add: from_bool_def split: bool.split) apply (auto simp add: longRunningDelete_def isCap_simps split: capability.split)[1] @@ -2711,13 +2690,12 @@ lemma slotCapLongRunningDelete_ccorres: apply (wp hoare_drop_imps isFinalCapability_inv) apply (clarsimp simp: Collect_const_mem guard_is_UNIV_def) apply (rename_tac rv') - apply (case_tac rv'; clarsimp simp: false_def true_def) + apply (case_tac rv'; clarsimp simp: false_def) apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) - apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap - from_bool_def false_def map_comp_Some_iff + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap map_comp_Some_iff dest!: ccte_relation_ccap_relation) done @@ -2732,7 +2710,7 @@ lemma isValidVTableRoot_spec: {s'. ret__unsigned_long_' s' = from_bool (isValidVTableRoot_C (cap_' s))}" apply vcg apply (clarsimp simp: isValidVTableRoot_C_def if_1_0_0 from_bool_0) - apply (simp add: from_bool_def to_bool_def false_def split: if_split) + apply (simp add: to_bool_def split: if_split) done lemma isValidVTableRoot_conv: @@ -2746,9 +2724,8 @@ lemma isValidVTableRoot_conv: apply (case_tac "cap_get_tag cap' = scast cap_page_directory_cap") apply (clarsimp split: arch_capability.split simp: isCap_simps) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 - cap_page_directory_cap_lift cap_to_H_def - from_bool_def) - apply (clarsimp simp: to_bool_def split: if_split) + cap_page_directory_cap_lift cap_to_H_def) + apply (clarsimp split: if_split) apply (clarsimp simp: cap_get_tag_isCap cap_get_tag_isCap_ArchObject) apply (simp split: arch_capability.split_asm add: isCap_simps) apply (case_tac "cap_get_tag cap' = scast cap_page_directory_cap") @@ -3060,7 +3037,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -3087,7 +3064,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -3173,7 +3150,7 @@ lemma decodeTCBConfigure_ccorres: ptr_val_tcb_ptr_mask2[unfolded mask_def objBits_defs, simplified] tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper all_ex_eq_helper ucast_ucast_mask objBits_defs) apply (subgoal_tac "args \ [] \ extraCaps \ []") @@ -3209,7 +3186,8 @@ lemma decodeTCBConfigure_ccorres: apply (rule conjI, fastforce) apply (drule interpret_excaps_eq) apply (clarsimp simp: cte_wp_at_ctes_of valid_tcb_state'_def numeral_eqs le_ucast_ucast_le - tcb_at_invs' invs_valid_objs' invs_queues invs_sch_act_wf' + tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + invs_pspace_aligned' invs_pspace_distinct' ct_in_state'_def pred_tcb_at'_def obj_at'_def tcb_st_refs_of'_def) apply (erule disjE; simp add: objBits_defs mask_def) apply (clarsimp simp: idButNot_def interpret_excaps_test_null @@ -3222,7 +3200,7 @@ lemma decodeTCBConfigure_ccorres: capTCBPtr_eq tcb_ptr_to_ctcb_ptr_mask tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper) apply (frule(1) tcb_at_h_t_valid [OF tcb_at_invs']) apply (clarsimp simp: typ_heap_simps numeral_eqs isCap_simps valid_cap'_def capAligned_def @@ -3256,7 +3234,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3324,8 +3301,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3361,7 +3337,7 @@ lemma decodeSetMCPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3390,7 +3366,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3458,8 +3434,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3495,7 +3470,7 @@ lemma decodeSetPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3530,7 +3505,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3538,9 +3513,9 @@ lemma decodeSetSchedParams_ccorres: val="from_bool (length args < 2 \ length extraCaps = 0)" in ccorres_symb_exec_r_known_rv) apply vcg - apply (auto simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 - split: bool.splits)[1] - apply (unat_arith+)[2] + apply (force simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 + unat_arith_simps + split: bool.splits if_splits) apply ceqv apply clarsimp apply (wpc, @@ -3597,8 +3572,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3647,16 +3621,15 @@ lemma decodeSetSchedParams_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) apply (intro conjI impI allI) - apply (clarsimp simp: unat_eq_0 le_max_word_ucast_id - thread_control_update_mcp_def thread_control_update_priority_def - cap_get_tag_isCap_unfolded_H_cap isCap_simps - interpret_excaps_eq excaps_map_def)+ - done + by (clarsimp simp: unat_eq_0 le_max_word_ucast_id + thread_control_update_mcp_def thread_control_update_priority_def + cap_get_tag_isCap_unfolded_H_cap isCap_simps + interpret_excaps_eq excaps_map_def)+ lemma decodeSetIPCBuffer_ccorres: "interpret_excaps extraCaps' = excaps_map extraCaps \ @@ -3794,11 +3767,10 @@ lemma decodeSetIPCBuffer_ccorres: valid_mdb_ctes_def no_0_def excaps_map_def elim: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' dest!: interpret_excaps_eq)[1] - apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def - word_sle_def ThreadState_Restart_def mask_def) + apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def word_sle_def mask_def) apply (rule conjI[rotated], clarsimp+) apply (drule interpret_excaps_eq[rule_format, where n=0], simp add: excaps_map_Nil) - apply (simp add: mask_def "StrictC'_thread_state_defs" excaps_map_def) + apply (simp add: mask_def ThreadState_defs excaps_map_def) apply (clarsimp simp: ccap_rights_relation_def rightsFromWord_wordFromRights cap_get_tag_isCap) apply (frule cap_get_tag_to_H, subst cap_get_tag_isCap, assumption, assumption) @@ -3822,7 +3794,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3842,7 +3814,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply (auto simp: option_to_ctcb_ptr_def obj_at'_def objBits_simps projectKOs bindNTFN_alignment_junk)[4] apply (simp add: carch_state_relation_def typ_heap_simps') @@ -3854,7 +3826,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3920,7 +3892,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -3953,10 +3925,10 @@ lemma decodeUnbindNotification_ccorres: apply (clarsimp simp: isCap_simps) apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (auto simp: ctcb_relation_def typ_heap_simps cap_get_tag_ThreadCap ct_in_state'_def - option_to_ptr_def option_to_0_def ThreadState_Restart_def - mask_def rf_sr_ksCurThread valid_tcb_state'_def - elim!: pred_tcb'_weakenE - dest!: valid_objs_boundNTFN_NULL) + option_to_ptr_def option_to_0_def ThreadState_defs + mask_def rf_sr_ksCurThread valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: valid_objs_boundNTFN_NULL) done lemma nTFN_case_If_ptr: @@ -4030,7 +4002,7 @@ lemma decodeBindNotification_ccorres: apply csymbr apply (clarsimp simp add: if_to_top_of_bind to_bool_eq_0[symmetric] simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (clarsimp simp: to_bool_def throwError_bind invocationCatch_def) + apply (clarsimp simp: throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg apply (rule conseqPre, vcg) @@ -4053,7 +4025,7 @@ lemma decodeBindNotification_ccorres: apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def valid_ntfn'_def) apply (case_tac "ntfnObj ntfn", simp_all add: isWaitingNtfn_def option_to_ctcb_ptr_def - false_def true_def split: option.split_asm if_split, + split: option.split_asm if_split, auto simp: neq_Nil_conv tcb_queue_relation'_def tcb_at_not_NULL[symmetric] tcb_at_not_NULL)[1] apply ceqv @@ -4117,8 +4089,8 @@ lemma decodeBindNotification_ccorres: apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases exception_defs) - apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def from_bool_0 - ThreadState_Restart_def mask_def true_def + apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def + ThreadState_defs mask_def rf_sr_ksCurThread capTCBPtr_eq) apply (simp add: hd_conv_nth bindE_bind_linearise nTFN_case_If_ptr throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) @@ -4304,7 +4276,7 @@ lemma decodeSetSpace_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -4316,7 +4288,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4331,8 +4303,7 @@ lemma decodeSetSpace_ccorres: apply (rule_tac P'="{s. vRootCap = vRootCap_' s}" in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + apply (clarsimp simp: returnOk_def return_def hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -4442,18 +4413,15 @@ lemma decodeSetSpace_ccorres: rightsFromWord_wordFromRights capTCBPtr_eq tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size) + ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: word_sle_def cap_get_tag_isCap) apply (subgoal_tac "args \ []") - apply (clarsimp simp: hd_conv_nth) - apply (drule sym, simp, simp add: true_def from_bool_0) - apply (clarsimp simp: objBits_defs) - apply fastforce + apply (fastforce simp: hd_conv_nth objBits_defs) apply clarsimp done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4464,7 +4432,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) @@ -4476,9 +4444,9 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wpsimp wp: hoare_drop_imp simp: guard_is_UNIV_def)+ apply vcg - apply (clarsimp simp: tlsBaseRegister_def ARM_HYP.tlsBaseRegister_def - invs_weak_sch_act_wf invs_queues TLS_BASE_def TPIDRURW_def - split: if_split) + apply (fastforce simp: tlsBaseRegister_def ARM_HYP.tlsBaseRegister_def + invs_weak_sch_act_wf TLS_BASE_def TPIDRURW_def + split: if_split) done lemma decodeSetTLSBase_ccorres: @@ -4526,7 +4494,7 @@ lemma decodeSetTLSBase_ccorres: apply (clarsimp simp: ct_in_state'_def sysargs_rel_n_def n_msgRegisters_def) apply (auto simp: valid_tcb_state'_def elim!: pred_tcb'_weakenE)[1] - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (auto simp: unat_eq_0 le_max_word_ucast_id)+ @@ -4678,8 +4646,7 @@ lemma decodeTCBInvocation_ccorres: dest!: st_tcb_at_idle_thread')[1] apply (simp split: sum.split add: cintr_def intr_and_se_rel_def exception_defs syscall_error_rel_def) - apply (simp add: "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size - cap_get_tag_isCap) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply clarsimp done diff --git a/proof/crefine/ARM_HYP/VSpace_C.thy b/proof/crefine/ARM_HYP/VSpace_C.thy index 6b47927fac..8b47da5e6e 100644 --- a/proof/crefine/ARM_HYP/VSpace_C.thy +++ b/proof/crefine/ARM_HYP/VSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -95,7 +96,7 @@ proof - apply simp apply simp apply simp - apply (simp split: if_split add: to_bool_def) + apply (simp split: if_split) apply (clarsimp simp: mask_def unlessE_def throwError_def split: if_split) apply (rule ccorres_guard_imp) apply (rule ccorres_return_C) @@ -103,7 +104,7 @@ proof - apply simp apply simp apply simp - apply (simp split: if_split add: to_bool_def) + apply (simp split: if_split) apply (clarsimp split: if_split) apply (simp add: word_less_nat_alt) apply (rule order_le_less_trans, rule pageBitsForSize_le) @@ -261,7 +262,7 @@ lemma loadHWASID_ccorres: apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_gets]) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_findPDForASIDAssert]) apply (rename_tac pd) - apply (rule_tac P="\s. pd_at_asid' pd asid s \ rv = armKSASIDMap (ksArchState s) + apply (rule_tac P="\s. pd_at_asid' pd asid s \ asidMap = armKSASIDMap (ksArchState s) \ pd \ ran (option_map snd o armKSASIDMap (ksArchState s) |` (- {asid})) \ option_map snd (armKSASIDMap (ksArchState s) asid) \ {None, Some pd} @@ -375,7 +376,7 @@ lemma storeHWASID_ccorres: apply (subst asid_map_pd_to_hwasids_update, assumption) subgoal by clarsimp apply (rule ext, simp add: pd_pointer_to_asid_slot_def map_comp_def split: if_split) - apply (clarsimp simp: pde_stored_asid_def true_def mask_def[where n="Suc 0"]) + apply (clarsimp simp: pde_stored_asid_def) apply (subst less_mask_eq) apply (rule order_less_le_trans, rule ucast_less) subgoal by simp @@ -467,7 +468,7 @@ lemma invalidateASID_ccorres: apply (subst asid_map_pd_to_hwasids_clear, assumption) subgoal by clarsimp apply (rule ext, simp add: pd_pointer_to_asid_slot_def map_comp_def split: if_split) - subgoal by (clarsimp simp: pde_stored_asid_def false_def mask_def[where n="Suc 0"]) + subgoal by (clarsimp simp: pde_stored_asid_def) apply wp[1] apply (wp findPDForASIDAssert_pd_at_wp2) apply (clarsimp simp: asidLowBits_handy_convs word_sle_def word_sless_def @@ -510,7 +511,7 @@ lemma handleVMFault_ccorres: apply (rule conseqPre) apply vcg apply (clarsimp simp: errstate_def EXCEPTION_FAULT_def EXCEPTION_NONE_def) - apply (wpsimp simp: seL4_Fault_VMFault_lift false_def mask_def)+ + apply (wpsimp simp: seL4_Fault_VMFault_lift mask_def)+ apply (simp add: vm_fault_type_from_H_def Kernel_C.ARMDataAbort_def Kernel_C.ARMPrefetchAbort_def) apply (simp add: ccorres_cond_univ_iff ccorres_cond_empty_iff) apply (rule ccorres_rhs_assoc)+ @@ -527,7 +528,7 @@ lemma handleVMFault_ccorres: apply (rule conseqPre) apply vcg apply (clarsimp simp: errstate_def EXCEPTION_FAULT_def EXCEPTION_NONE_def) - apply (wpsimp simp: seL4_Fault_VMFault_lift true_def mask_def)+ + apply (wpsimp simp: seL4_Fault_VMFault_lift mask_def)+ done lemma unat_asidLowBits [simp]: @@ -781,7 +782,7 @@ lemma ptrFromPAddr_spec: Call ptrFromPAddr_'proc \\ret__ptr_to_void = Ptr (ptrFromPAddr (paddr_' s))\" apply vcg - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def) + apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def) done lemma addrFromPPtr_spec: @@ -789,7 +790,7 @@ lemma addrFromPPtr_spec: Call addrFromPPtr_'proc \\ret__unsigned_long = addrFromPPtr (ptr_val (pptr_' s))\" apply vcg - apply (simp add: addrFromPPtr_def pptrBaseOffset_def pptrBase_def physBase_def) + apply (simp add: addrFromPPtr_def pptrBaseOffset_def pptrBase_def) done lemma addrFromKPPtr_spec: @@ -798,7 +799,7 @@ lemma addrFromKPPtr_spec: \\ret__unsigned_long = addrFromKPPtr (ptr_val (pptr_' s))\" apply vcg apply (simp add: addrFromKPPtr_def kernelELFBaseOffset_def kernelELFPAddrBase_def - kernelELFBase_def physBase_def pptrBase_def mask_def) + kernelELFBase_def pptrBase_def mask_def) done abbreviation @@ -826,7 +827,7 @@ lemma lookupPTSlot_ccorres: apply csymbr apply csymbr apply (rule ccorres_abstract_cleanup) - apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE rv)" + apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE pde)" in ccorres_gen_asm2) apply (rule ccorres_cond2'[where R=\]) apply (clarsimp simp: Collect_const_mem) @@ -841,9 +842,10 @@ lemma lookupPTSlot_ccorres: apply (simp add: checkPTAt_def bind_liftE_distrib liftE_bindE returnOk_liftE[symmetric]) apply (rule ccorres_stateAssert) - apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable rv)) - and ko_at' rv (lookup_pd_slot pd vptr) - and K (isPageTablePDE rv)" and P'=UNIV in ccorres_from_vcg_throws) + apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable pde)) + and ko_at' pde (lookup_pd_slot pd vptr) and K (isPageTablePDE pde)" + and P'=UNIV + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def Collect_const_mem lookup_pd_slot_def word_sle_def) @@ -987,7 +989,7 @@ lemma findPDForASID_ccorres: apply (rule_tac P=\ and P' =UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def bind_def NonDetMonad.lift_def) + apply (clarsimp simp: throwError_def return_def bindE_def bind_def Nondet_Monad.lift_def) apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def) apply (simp add: lookup_fault_lift_invalid_root) @@ -1094,7 +1096,7 @@ lemma flushSpace_ccorres: apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws2) apply (clarsimp simp: Collect_const_mem pde_stored_asid_def) apply (simp add: if_split_eq1 to_bool_def) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply csymbr apply (clarsimp simp: pde_stored_asid_def) apply (case_tac "to_bool (stored_asid_valid_CL (pde_pde_invalid_lift stored_hw_asid___struct_pde_C))") @@ -1106,7 +1108,7 @@ lemma flushSpace_ccorres: apply clarsimp apply clarsimp apply (rule ccorres_call, - rule invalidateTranslationASID_ccorres [simplified dc_def xfdc_def], + rule invalidateTranslationASID_ccorres, simp+)[1] apply vcg apply wp+ @@ -1247,15 +1249,15 @@ lemma findFreeHWASID_ccorres: apply (rule_tac xf=hw_asid_offset_' and i=0 and xf_update=hw_asid_offset_'_update and r'=dc and xf'=xfdc and Q=UNIV - and F="\n s. rv = armKSHWASIDTable (ksArchState s) - \ nextASID = armKSNextASID (ksArchState s) - \ valid_arch_state' s" + and F="\n s. hwASIDTable = armKSHWASIDTable (ksArchState s) + \ nextASID = armKSNextASID (ksArchState s) + \ valid_arch_state' s" in ccorres_sequenceE_while_gen') apply (rule ccorres_from_vcg_might_throw) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: rf_sr_armKSNextASID) apply (subst down_cast_same [symmetric], - simp add: is_down_def target_size_def source_size_def word_size)+ + simp add: is_down_def target_size_def source_size_def word_size)+ apply (simp add: ucast_ucast_mask ucast_ucast_add ucast_and_mask ucast_of_nat_small asidInvalid_def @@ -1293,7 +1295,7 @@ lemma findFreeHWASID_ccorres: apply ceqv apply (rule ccorres_assert) apply (rule_tac A="\s. nextASID = armKSNextASID (ksArchState s) - \ rv = armKSHWASIDTable (ksArchState s) + \ hwASIDTable = armKSHWASIDTable (ksArchState s) \ valid_arch_state' s \ valid_pde_mappings' s" in ccorres_guard_imp2[where A'=UNIV]) apply (simp add: split_def) @@ -1404,7 +1406,6 @@ lemma armv_contextSwitch_ccorres: apply (cinit lift: cap_pd_' asid_') apply simp apply (ctac(no_vcg) add: getHWASID_ccorres) - apply (fold dc_def) apply (ctac (no_vcg)add: armv_contextSwitch_HWASID_ccorres) apply wp apply clarsimp @@ -1650,7 +1651,7 @@ lemma vcpu_write_reg_ccorres: \ \ \value = v \) hs (vcpuWriteReg vcpuptr reg v) (Call vcpu_write_reg_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit lift: vcpu_' reg_' value_') apply (rule ccorres_assert) apply clarsimp @@ -1665,12 +1666,13 @@ lemma vcpu_write_reg_ccorres: lemma vcpu_save_reg_ccorres: "ccorres dc xfdc (vcpu_at' vcpuptr) (UNIV \ \unat \reg = fromEnum r\ \ \ \vcpu = vcpu_Ptr vcpuptr \) hs (vcpuSaveReg vcpuptr r) (Call vcpu_save_reg_'proc)" - supply dc_simp[simp del] Collect_const[simp del] + supply Collect_const[simp del] apply (cinit lift: reg_' vcpu_') apply (rule ccorres_assert2) apply (rule ccorres_cond_false_seq, simp) apply (ctac add: vcpu_hw_read_reg_ccorres) - apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu, simp del: fun_upd_apply)+ + apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu)+ + apply (simp del: fun_upd_apply) apply (ctac add: vcpuUpdate_vcpuRegs_ccorres) apply wpsimp apply (vcg exspec=vcpu_hw_read_reg_modifies) @@ -1682,11 +1684,11 @@ lemma vcpu_restore_reg_ccorres: "ccorres dc xfdc (vcpu_at' vcpuptr) (UNIV \ \unat \reg = fromEnum r\ \ \ \vcpu = vcpu_Ptr vcpuptr \) hs (vcpuRestoreReg vcpuptr r) (Call vcpu_restore_reg_'proc)" - supply dc_simp[simp del] Collect_const[simp del] + supply Collect_const[simp del] apply (cinit lift: reg_' vcpu_') apply (rule ccorres_assert2) apply (rule ccorres_cond_false_seq, simp) - apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu, simp)+ + apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu)+ apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) apply (ctac add: vcpu_hw_write_reg_ccorres) apply (frule maxBound_is_bound') @@ -1732,7 +1734,6 @@ lemma vcpu_restore_reg_range_ccorres: apply (rule ccorres_grab_asm) apply (cinit lift: start_' end_' vcpu_' simp: whileAnno_def) apply csymbr - apply (clarsimp, fold dc_def) apply (rule ccorres_dc_from_rrel) (* supplying these as dest/intro/simp to proof tactics has no desired effect *) using maxBound_is_bound[of start, simplified fromEnum_maxBound_vcpureg_def] @@ -1769,7 +1770,6 @@ lemma vcpu_save_reg_range_ccorres: apply (rule ccorres_grab_asm) apply (cinit lift: start_' end_' vcpu_' simp: whileAnno_def) apply csymbr - apply (clarsimp, fold dc_def) apply (rule ccorres_dc_from_rrel) (* supplying these as dest/intro/simp to proof tactics has no desired effect *) using maxBound_is_bound[of start, simplified fromEnum_maxBound_vcpureg_def] @@ -1862,8 +1862,7 @@ lemma isIRQActive_ccorres: Let_def cinterrupt_relation_def) apply (drule spec, drule(1) mp) apply (case_tac "intStateIRQTable (ksInterruptState \) irq") - apply (simp add: from_bool_def irq_state_defs Kernel_C.maxIRQ_def - word_le_nat_alt)+ + apply (simp add: irq_state_defs Kernel_C.maxIRQ_def word_le_nat_alt)+ done lemma restore_virt_timer_ccorres: @@ -1912,7 +1911,6 @@ lemma restore_virt_timer_ccorres: apply (rule ccorres_call) apply (rule_tac P="obj_at' (\vcpu'. vcpuVPPIMasked vcpu' vppievent_irq.VPPIEventIRQ_VTimer = vcpuVPPIMasked vcpu vppievent_irq.VPPIEventIRQ_VTimer) vcpuptr" in ccorres_cross_over_guard) - apply (fold dc_def) apply (rule maskInterrupt_ccorres, simp) apply simp apply simp @@ -1994,7 +1992,6 @@ lemma save_virt_timer_ccorres: apply (ctac (no_vcg) add: vcpu_write_reg_ccorres) apply (ctac (no_vcg) add: read_cntpct_ccorres) apply clarsimp - apply (fold dc_def) apply (rule vcpuUpdate_vTimer_pcount_ccorres) apply wpsimp+ apply (simp add: vcpureg_eq_use_types[where reg=VCPURegCNTV_CVALhigh, simplified, symmetric] @@ -2022,7 +2019,7 @@ lemma vcpu_disable_ccorres: and (case v of None \ \ | Some new \ vcpu_at' new)) (UNIV \ {s. vcpu_' s = option_to_ptr v}) hs (vcpuDisable v) (Call vcpu_disable_'proc)" - supply if_cong[cong] option.case_cong[cong] + supply if_cong[cong] option.case_cong[cong] empty_fail_cond[simp] apply (cinit lift: vcpu_') apply (ctac (no_vcg) add: dsb_ccorres) apply (rule ccorres_split_nothrow_novcg) @@ -2049,17 +2046,16 @@ lemma vcpu_disable_ccorres: apply (ctac (no_vcg) add: isb_ccorres) apply (ctac (no_vcg) add: setSCTLR_ccorres) apply (ctac (no_vcg) add: setHCR_ccorres) - apply (ctac (no_vcg) add: isb_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: isb_ccorres) apply (wpc; ccorres_rewrite) - apply (rule ccorres_return_Skip[simplified dc_def]) - apply (fold dc_def) + apply (rule ccorres_return_Skip) apply (rename_tac vcpu_ptr) apply (rule_tac P="the v \ 0" in ccorres_gen_asm) apply ccorres_rewrite apply (ctac (no_vcg) add: save_virt_timer_ccorres) apply (ctac (no_vcg) add: maskInterrupt_ccorres) apply (wpsimp wp: hoare_vcg_all_lift)+ - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem from_bool_def true_def hcrNative_def + apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem hcrNative_def irqVTimerEvent_def IRQ_def) apply (rule refl (* stray ?sctlr *)) apply (wpsimp wp: hoare_vcg_all_lift)+ @@ -2072,6 +2068,7 @@ lemma vcpu_enable_ccorres: and valid_arch_state' and vcpu_at' v) (UNIV \ {s. vcpu_' s = vcpu_Ptr v}) hs (vcpuEnable v) (Call vcpu_enable_'proc)" + supply empty_fail_cond[simp] apply (cinit lift: vcpu_') apply (ctac (no_vcg) add: vcpu_restore_reg_ccorres)+ apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) @@ -2079,9 +2076,9 @@ lemma vcpu_enable_ccorres: apply (ctac (no_vcg) add: setHCR_ccorres) apply (ctac (no_vcg) add: isb_ccorres) apply (rule_tac P="ko_at' vcpu v" in ccorres_cross_over_guard) - apply (ctac pre: ccorres_move_c_guard_vcpu add: set_gic_vcpu_ctrl_hcr_ccorres[unfolded dc_def]) + apply (ctac pre: ccorres_move_c_guard_vcpu add: set_gic_vcpu_ctrl_hcr_ccorres) apply wpsimp+ - apply (fold dc_def, ctac (no_vcg) add: restore_virt_timer_ccorres) + apply (ctac (no_vcg) add: restore_virt_timer_ccorres) apply simp apply wpsimp apply (vcg exspec=set_gic_vcpu_ctrl_hcr_modifies) @@ -2125,13 +2122,14 @@ lemma ccorres_abstract_known: done lemma vcpu_restore_ccorres: - notes upt_Suc[simp del] dc_simp[simp del] Collect_const[simp del] + notes upt_Suc[simp del] Collect_const[simp del] shows "ccorres dc xfdc (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' and vcpu_at' vcpuPtr) (UNIV \ {s. vcpu_' s = vcpu_Ptr vcpuPtr}) hs (vcpuRestore vcpuPtr) (Call vcpu_restore_'proc)" + supply empty_fail_cond[simp] apply (cinit lift: vcpu_' simp: whileAnno_def) apply (simp add: doMachineOp_bind uncurry_def split_def doMachineOp_mapM_x)+ apply (clarsimp simp: bind_assoc) @@ -2162,7 +2160,7 @@ lemma vcpu_restore_ccorres: apply (rule_tac P="n \ 63" in ccorres_gen_asm) apply (rule ccorres_move_c_guard_vcpu) apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_lr_ccorres) - apply (clarsimp simp: virq_to_H_def ko_at_vcpu_at'D dc_def upt_Suc) + apply (clarsimp simp: virq_to_H_def ko_at_vcpu_at'D upt_Suc) apply (rule conjI[rotated]) subgoal (* FIXME extract into separate lemma *) by (fastforce simp: word_less_nat_alt unat_of_nat_eq elim: order_less_le_trans) @@ -2180,7 +2178,7 @@ lemma vcpu_restore_ccorres: apply wpsimp apply (vcg exspec=vcpu_restore_reg_range_modifies) apply (wpsimp wp: crunch_wps) - apply (wpsimp simp: guard_is_UNIV_def dc_def upt_Suc ko_at_vcpu_at'D wp: mapM_x_wp_inv + apply (wpsimp simp: guard_is_UNIV_def upt_Suc ko_at_vcpu_at'D wp: mapM_x_wp_inv | rule UNIV_I | wp hoare_vcg_imp_lift hoare_vcg_all_lift hoare_vcg_disj_lift)+ apply (fastforce simp: fromEnum_def enum_vcpureg seL4_VCPUReg_SPSRfiq_def) @@ -2257,7 +2255,7 @@ lemma vgicUpdateLR_ccorres: done lemma vcpu_save_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres dc xfdc (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' @@ -2294,7 +2292,7 @@ lemma vcpu_save_ccorres: apply (rule ccorres_move_c_guard_vcpu) apply clarsimp apply (ctac (no_vcg) add: vgicUpdate_APR_ccorres) - apply (ctac (no_vcg) add: ccorres_gets_armKSGICVCPUNumListRegs[simplified comp_def]) + apply (ctac (no_vcg) add: ccorres_gets_armKSGICVCPUNumListRegs) apply (rename_tac lr_num lr_num') apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) @@ -2323,7 +2321,7 @@ lemma vcpu_save_ccorres: apply ceqv apply (ctac (no_vcg) add: armv_vcpu_save_ccorres) apply (wpsimp simp: guard_is_UNIV_def wp: mapM_x_wp_inv)+ - apply (simp add: invs_no_cicd'_def valid_arch_state'_def max_armKSGICVCPUNumListRegs_def dc_def) + apply (simp add: invs_no_cicd'_def valid_arch_state'_def max_armKSGICVCPUNumListRegs_def) done lemma vcpu_switch_ccorres_None: @@ -2340,7 +2338,7 @@ lemma vcpu_switch_ccorres_None: apply wpc (* v = None & CurVCPU = None *) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) (* v = None & CurVCPU \ None *) apply ccorres_rewrite apply wpc @@ -2350,15 +2348,15 @@ lemma vcpu_switch_ccorres_None: apply (rule_tac R="\s. armHSCurVCPU (ksArchState s) = Some (ccurv, cactive)" in ccorres_cond) apply (clarsimp simp: cur_vcpu_relation_def dest!: rf_sr_ksArchState_armHSCurVCPU) apply (ctac add: vcpu_disable_ccorres) - apply (rule_tac v=x2 in armHSCurVCPU_update_active_ccorres[simplified dc_def]) - apply (simp add: from_bool_def false_def) + apply (rule_tac v=x2 in armHSCurVCPU_update_active_ccorres) + apply simp apply simp apply wp apply clarsimp apply assumption apply clarsimp apply (vcg exspec=vcpu_disable_modifies) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp, rule conjI) apply (fastforce dest: invs_cicd_arch_state' simp: valid_arch_state'_def vcpu_at_is_vcpu' ko_wp_at'_def split: option.splits) by (auto dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def)+ @@ -2382,7 +2380,7 @@ lemma vcpu_switch_ccorres_Some: apply (rule ccorres_cond_false_seq) apply ccorres_rewrite apply (ctac add: vcpu_restore_ccorres) - apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres[unfolded dc_def]) + apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres) apply wp apply clarsimp apply (vcg exspec=vcpu_restore_modifies) @@ -2391,7 +2389,7 @@ lemma vcpu_switch_ccorres_Some: apply (rename_tac ccurv cactive) apply (rule_tac R="\s. (armHSCurVCPU \ ksArchState) s = Some (ccurv, cactive)" in ccorres_cond) apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU - simp: Collect_const_mem cur_vcpu_relation_def from_bool_def true_def + simp: Collect_const_mem cur_vcpu_relation_def split: option.splits) (* new \ CurVCPU or equivalently v \ ccurv *) apply (rule ccorres_cond_true) @@ -2399,7 +2397,7 @@ lemma vcpu_switch_ccorres_Some: apply (rule ccorres_cond_true_seq) apply (ctac add: vcpu_save_ccorres) apply (ctac add: vcpu_restore_ccorres) - apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres[unfolded dc_def]) + apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres) apply wp apply clarsimp apply (vcg exspec=vcpu_restore_modifies) @@ -2417,20 +2415,20 @@ lemma vcpu_switch_ccorres_Some: apply (rule ccorres_rhs_assoc) apply (ctac (no_vcg) add: isb_ccorres) apply (ctac (no_vcg) add: vcpu_enable_ccorres) - apply (rule_tac v="(v, cactive)" in armHSCurVCPU_update_active_ccorres[simplified dc_def]) - apply (simp add: from_bool_def true_def) + apply (rule_tac v="(v, cactive)" in armHSCurVCPU_update_active_ccorres) + apply simp apply simp apply wp apply (wpsimp wp: hoare_vcg_conj_lift vcpuSave_invs_no_cicd' vcpuSave_typ_at') (* ccactive =true *) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) (* last goal *) apply simp apply (rule conjI - | clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU - simp: Collect_const_mem cur_vcpu_relation_def from_bool_def true_def - | fastforce dest: invs_cicd_arch_state' split: option.splits - simp: valid_arch_state'_def vcpu_at_is_vcpu' ko_wp_at'_def Collect_const_mem)+ + | clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU + simp: Collect_const_mem cur_vcpu_relation_def + | fastforce dest: invs_cicd_arch_state' split: option.splits + simp: valid_arch_state'_def vcpu_at_is_vcpu' ko_wp_at'_def Collect_const_mem)+ done lemma vcpu_switch_ccorres: @@ -2464,11 +2462,11 @@ lemma setVMRoot_ccorres: apply (simp add: cap_case_isPageDirectoryCap cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc) - apply (simp add: throwError_def catch_def dc_def[symmetric]) + apply (simp add: throwError_def catch_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) @@ -2488,11 +2486,11 @@ lemma setVMRoot_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: cap_case_isPageDirectoryCap) @@ -2516,31 +2514,31 @@ lemma setVMRoot_ccorres: apply (simp add: whenE_def throwError_def checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: whenE_def returnOk_def) - apply (ctac (no_vcg) add: armv_contextSwitch_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: armv_contextSwitch_ccorres) apply (rename_tac tcb) apply simp apply clarsimp apply (simp add: checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc)+ - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply simp @@ -2559,16 +2557,13 @@ lemma setVMRoot_ccorres: apply (clarsimp simp: obj_at'_def typ_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: size_of_def cte_level_bits_def tcbVTableSlot_def tcb_cnode_index_defs - ccap_rights_relation_def cap_rights_to_H_def - to_bool_def true_def allRights_def - mask_def[where n="Suc 0"] + ccap_rights_relation_def cap_rights_to_H_def allRights_def cte_at_tcb_at_16' addrFromPPtr_def) apply (clarsimp simp: cap_get_tag_isCap_ArchObject2 dest!: isCapDs) apply (clarsimp simp: cap_get_tag_isCap_ArchObject[symmetric] cap_lift_page_directory_cap cap_to_H_def cap_page_directory_cap_lift_def - to_bool_def elim!: ccap_relationE split: if_split_asm) done @@ -2589,9 +2584,9 @@ lemma setVMRootForFlush_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (rule_tac P="(capPDIsMapped_CL (cap_page_directory_cap_lift threadRoot) = 0) - = (capPDMappedASID (capCap rva) = None) + = (capPDMappedASID (capCap rv) = None) \ capPDBasePtr_CL (cap_page_directory_cap_lift threadRoot) - = capPDBasePtr (capCap rva)" in ccorres_gen_asm2) + = capPDBasePtr (capCap rv)" in ccorres_gen_asm2) apply (rule ccorres_rhs_assoc | csymbr | simp add: Collect_True del: Collect_const)+ apply (rule ccorres_split_throws) apply (rule ccorres_return_C, simp+) @@ -2603,7 +2598,7 @@ lemma setVMRootForFlush_ccorres: apply (ctac (no_vcg)add: armv_contextSwitch_ccorres) apply (ctac add: ccorres_return_C) apply wp - apply (simp add: true_def from_bool_def) + apply simp apply vcg apply (rule conseqPre, vcg) apply (simp add: Collect_const_mem) @@ -2613,7 +2608,7 @@ lemma setVMRootForFlush_ccorres: apply vcg apply (clarsimp simp: Collect_const_mem word_sle_def ccap_rights_relation_def cap_rights_to_H_def - mask_def[where n="Suc 0"] true_def to_bool_def + mask_def[where n="Suc 0"] allRights_def size_of_def cte_level_bits_def tcbVTableSlot_def Kernel_C.tcbVTable_def invs'_invs_no_cicd) apply (clarsimp simp: rf_sr_ksCurThread ptr_add_assertion_positive) @@ -2622,8 +2617,7 @@ lemma setVMRootForFlush_ccorres: apply (clarsimp simp: rf_sr_ksCurThread ptr_val_tcb_ptr_mask' [OF tcb_at_invs']) apply (frule cte_at_tcb_at_16'[OF tcb_at_invs'], clarsimp simp: cte_wp_at_ctes_of) apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+) - apply (clarsimp simp: false_def true_def from_bool_def - typ_heap_simps') + apply (clarsimp simp: typ_heap_simps') apply (case_tac "isArchObjectCap rv \ isPageDirectoryCap (capCap rv)") apply (clarsimp simp: isCap_simps(2) cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_page_directory_cap_lift cap_to_H_def @@ -2675,15 +2669,6 @@ definition | ARM_HYP_H.flush_type.CleanInvalidate \ (label = Kernel_C.ARMPageCleanInvalidate_Data \ label = Kernel_C.ARMPDCleanInvalidate_Data) | ARM_HYP_H.flush_type.Unify \ (label = Kernel_C.ARMPageUnify_Instruction \ label = Kernel_C.ARMPDUnify_Instruction)" -lemma ccorres_seq_IF_False: - "ccorres_underlying sr \ r xf arrel axf G G' hs a (IF False THEN x ELSE y FI ;; c) = ccorres_underlying sr \ r xf arrel axf G G' hs a (y ;; c)" - by simp - -lemma ptrFromPAddr_mask6_simp[simp]: - "ptrFromPAddr ps && mask 6 = ps && mask 6" - unfolding ptrFromPAddr_def pptrBaseOffset_def pptrBase_def ARM_HYP.physBase_def - by (subst add.commute, subst mask_add_aligned ; simp add: is_aligned_def) - lemma doFlush_ccorres: "ccorres dc xfdc (\s. vs \ ve \ ps \ ps + (ve - vs) \ vs && mask 6 = ps && mask 6 \ \ahyp version translates ps into kernel virtual before flushing\ @@ -2708,7 +2693,7 @@ lemma doFlush_ccorres: apply (rule_tac xf'=invLabel___int_' in ccorres_abstract, ceqv, rename_tac invlabel) apply (rule_tac P="flushtype_relation t invlabel" in ccorres_gen_asm2) apply (simp only: dmo_flushtype_case Let_def) - apply (wpc ; simp add: dc_def[symmetric]) + apply (wpc ; simp) apply (rule ccorres_cond_true) apply (ctac (no_vcg) add: cleanCacheRange_RAM_ccorres) apply (rule ccorres_cond_false) @@ -2724,9 +2709,8 @@ lemma doFlush_ccorres: apply (rule ccorres_cond_true) apply (simp add: empty_fail_cleanCacheRange_PoU empty_fail_dsb empty_fail_invalidateCacheRange_I empty_fail_branchFlushRange empty_fail_isb - doMachineOp_bind) + doMachineOp_bind empty_fail_cond) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def) apply (ctac (no_vcg) add: cleanCacheRange_PoU_ccorres) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: invalidateCacheRange_I_ccorres) @@ -2739,6 +2723,7 @@ lemma doFlush_ccorres: Kernel_C.ARMPageInvalidate_Data_def Kernel_C.ARMPDInvalidate_Data_def Kernel_C.ARMPageCleanInvalidate_Data_def Kernel_C.ARMPDCleanInvalidate_Data_def Kernel_C.ARMPageUnify_Instruction_def Kernel_C.ARMPDUnify_Instruction_def + ptrFromPAddr_mask_6 dest: ghost_assertion_size_logic[rotated] split: ARM_HYP_H.flush_type.splits) done @@ -2772,7 +2757,7 @@ lemma performPageFlush_ccorres: apply (ctac (no_vcg) add: setVMRootForFlush_ccorres) apply (ctac (no_vcg) add: doFlush_ccorres) apply (rule ccorres_cond2[where R=\]) - apply (simp add: from_bool_def split: if_split bool.splits) + apply (simp split: if_split bool.splits) apply (rule ccorres_pre_getCurThread) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) @@ -2783,7 +2768,7 @@ lemma performPageFlush_ccorres: apply (rule ccorres_return_Skip) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply wpsimp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: order_less_imp_le) @@ -2812,12 +2797,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -2831,7 +2816,7 @@ lemma setRegister_ccorres: apply (rule ball_tcb_cte_casesI, simp+) apply (clarsimp simp: ctcb_relation_def ccontext_relation_def atcbContextSet_def atcbContextGet_def - carch_tcb_relation_def + carch_tcb_relation_def cregs_relation_def split: if_split) apply (clarsimp simp: Collect_const_mem register_from_H_sless register_from_H_less) @@ -2848,8 +2833,6 @@ lemma msgRegisters_ccorres: (* usually when we call setMR directly, we mean to only set a registers, which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg = val\ @@ -2866,8 +2849,8 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters not_le conj_commute) apply (subst msgRegisters_ccorres[symmetric]) @@ -2949,7 +2932,7 @@ lemma performPageDirectoryInvocationFlush_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\]) - apply (simp add: from_bool_def split: if_split bool.splits) + apply (simp split: if_split bool.splits) apply (rule ccorres_pre_getCurThread) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) @@ -3010,7 +2993,6 @@ lemma flushPage_ccorres: apply (rule ccorres_cond2[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (fold dc_def) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) apply (wp | simp add: cur_tcb'_def[symmetric])+ @@ -3024,8 +3006,8 @@ lemma flushPage_ccorres: apply (wp hoare_drop_imps setVMRootForFlush_invs') apply (clarsimp simp: Collect_const_mem word_sle_def) apply (rule conjI, clarsimp+) - apply (clarsimp simp: pde_stored_asid_def to_bool_def cong: conj_cong - ucast_ucast_mask) + apply (clarsimp simp: pde_stored_asid_def to_bool_def ucast_ucast_mask + cong: conj_cong) apply (drule is_aligned_neg_mask_eq) apply (simp add: pde_pde_invalid_lift_def pde_lift_def mask_def[where n=8] word_bw_assocs mask_def[where n=pageBits]) @@ -3273,8 +3255,7 @@ lemma unmapPage_ccorres: (unmapPage sz asid vptr pptr) (Call unmapPage_'proc)" apply (rule ccorres_gen_asm) apply (cinit lift: page_size_' asid_' vptr_' pptr_') - apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive - Collect_True + apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive Collect_True del: Collect_const) apply ccorres_remove_UNIV_guard apply csymbr @@ -3286,12 +3267,12 @@ lemma unmapPage_ccorres: apply (rule ccorres_splitE_novcg[where r'=dc and xf'=xfdc]) \ \ARMSmallPage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric]) + apply (simp add: gen_framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac pt_slot pt_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) @@ -3306,7 +3287,7 @@ lemma unmapPage_ccorres: split: if_split_asm pte.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_liftM - Collect_const[symmetric] dc_def[symmetric] + Collect_const[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr @@ -3314,8 +3295,7 @@ lemma unmapPage_ccorres: apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) apply csymbr - apply simp - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -3329,18 +3309,17 @@ lemma unmapPage_ccorres: apply (vcg exspec=lookupPTSlot_modifies) \ \ARMLargePage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric]) + apply (simp add: gen_framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac ptSlot lookupPTSlot_ret) - apply (simp add: Collect_False dc_def[symmetric] del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pte_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pte_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: cpte_relation_def Let_def pte_lift_def @@ -3348,7 +3327,7 @@ lemma unmapPage_ccorres: pte_pte_small_lift_def split: if_split_asm pte.split_asm) apply (rule ceqv_refl) - apply (simp add: liftE_liftM dc_def[symmetric] + apply (simp add: liftE_liftM mapM_discarded whileAnno_def ARMLargePageBits_def ARMSmallPageBits_def Collect_False unfold_checkMapping_return word_sle_def del: Collect_const) @@ -3382,7 +3361,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pte ccorres_move_array_assertion_pte_16)+ apply (rule ccorres_add_return2, - ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pte_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned ptSlot 7" in hoare_gen_asm) @@ -3425,15 +3404,14 @@ lemma unmapPage_ccorres: apply (rule ccorres_Cond_rhs) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM + apply (simp add: gen_framesize_to_H_def liftE_liftM del: Collect_const) apply (simp split: if_split, rule conjI[rotated], rule impI, rule ccorres_empty, rule impI) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pde_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') @@ -3441,16 +3419,16 @@ lemma unmapPage_ccorres: Let_def pde_tag_defs isSectionPDE_def split: pde.split_asm if_split_asm) apply (rule ceqv_refl) - apply (simp add: unfold_checkMapping_return Collect_False dc_def[symmetric] - del: Collect_const) - apply (rule ccorres_handlers_weaken2, simp) + apply (simp add: unfold_checkMapping_return Collect_False + del: Collect_const) + apply (rule ccorres_handlers_weaken2) apply csymbr apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp @@ -3465,8 +3443,7 @@ lemma unmapPage_ccorres: apply (case_tac "pd = pde_Ptr (lookup_pd_slot pdPtr vptr)") prefer 2 apply (simp, rule ccorres_empty) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM mapM_discarded whileAnno_def + apply (simp add: gen_framesize_to_H_def liftE_liftM mapM_discarded whileAnno_def del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, @@ -3516,7 +3493,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pde ccorres_move_array_assertion_pde_16)+ apply (rule ccorres_add_return2) - apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pde_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned pdPtr pdBits" in hoare_gen_asm) @@ -3554,14 +3531,14 @@ lemma unmapPage_ccorres: apply (rule ccorres_empty[where P=\]) apply ceqv apply (simp add: liftE_liftM) - apply (ctac add: flushPage_ccorres[unfolded dc_def]) + apply (ctac add: flushPage_ccorres) apply ((wp lookupPTSlot_inv mapM_storePTE_invs[unfolded swp_def] mapM_storePDE_invs[unfolded swp_def] | wpc | simp)+)[1] apply (simp add: guard_is_UNIV_def) apply (simp add: throwError_def) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply (simp add: lookup_pd_slot_def Let_def table_bits_defs) apply (wp hoare_vcg_const_imp_lift_R findPDForASID_valid_offset'[simplified table_bits_defs] @@ -4067,13 +4044,13 @@ lemma performASIDPoolInvocation_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule_tac ccorres_split_nothrow [where r'=dc and xf'=xfdc]) apply (simp add: updateCap_def) - apply (rule_tac A="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits)" + apply (rule_tac A="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits)" and A'=UNIV in ccorres_guard_imp2) apply (rule ccorres_pre_getCTE) - apply (rule_tac P="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits) - and cte_wp_at' ((=) rva) ctSlot" + apply (rule_tac P="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits) + and cte_wp_at' ((=) rv) ctSlot" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -4106,7 +4083,7 @@ lemma performASIDPoolInvocation_ccorres: apply (simp add: cte_to_H_def c_valid_cte_def) apply (simp add: cap_page_directory_cap_lift) apply (simp (no_asm) add: cap_to_H_def) - apply (simp add: to_bool_def asid_bits_def le_mask_imp_and_mask word_bits_def) + apply (simp add: asid_bits_def le_mask_imp_and_mask word_bits_def) apply (erule (1) cap_lift_PDCap_Base) apply simp apply (erule_tac t = s' in ssubst) @@ -4141,7 +4118,7 @@ lemma performASIDPoolInvocation_ccorres: apply (wp getASID_wp) apply simp apply wp - apply (simp add: o_def inv_def) + apply (simp add: inv_def) apply (wp getASID_wp) apply simp apply (rule empty_fail_getObject) @@ -4202,21 +4179,20 @@ lemma flushTable_ccorres: apply (rule_tac R=\ in ccorres_cond2) apply (clarsimp simp: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (ctac (no_vcg) add: setVMRoot_ccorres [unfolded dc_def]) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (wp static_imp_wp) + apply (ctac (no_vcg) add: setVMRoot_ccorres) + apply (rule ccorres_return_Skip) + apply (wp hoare_weak_lift_imp) apply clarsimp apply (rule_tac Q="\_ s. invs' s \ cur_tcb' s" in hoare_post_imp) apply (simp add: invs'_invs_no_cicd cur_tcb'_def) apply (wp mapM_x_wp_inv getPTE_wp | wpc)+ - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply wp apply clarsimp apply (strengthen invs_valid_pde_mappings') apply (wp setVMRootForFlush_invs' hoare_drop_imps) apply (clarsimp simp:Collect_const_mem) - apply (simp add: pde_pde_invalid_lift_def - pde_lift_def pde_stored_asid_def to_bool_def) + apply (simp add: pde_pde_invalid_lift_def pde_lift_def pde_stored_asid_def to_bool_def) done lemma performPageTableInvocationMap_ccorres: diff --git a/proof/crefine/ARM_HYP/Wellformed_C.thy b/proof/crefine/ARM_HYP/Wellformed_C.thy index d34de5fd58..54e922c584 100644 --- a/proof/crefine/ARM_HYP/Wellformed_C.thy +++ b/proof/crefine/ARM_HYP/Wellformed_C.thy @@ -42,8 +42,13 @@ abbreviation pt_Ptr :: "32 word \ (pte_C[512]) ptr" where "pt_Ptr == Ptr" abbreviation pd_Ptr :: "32 word \ (pde_C[2048]) ptr" where "pd_Ptr == Ptr" + +declare seL4_VCPUReg_Num_def[code] +value_type num_vcpu_regs = "unat seL4_VCPUReg_Num" + abbreviation - regs_C_Ptr :: "addr \ (machine_word_len word[42]) ptr" where "regs_C_Ptr \ Ptr" + regs_C_Ptr :: "addr \ (machine_word[num_vcpu_regs]) ptr" where"regs_C_Ptr \ Ptr" + abbreviation vgic_lr_C_Ptr :: "addr \ (virq_C[64]) ptr" where "vgic_lr_C_Ptr \ Ptr" abbreviation @@ -54,6 +59,9 @@ abbreviation abbreviation word_Ptr :: "addr \ machine_word ptr" where "word_Ptr \ Ptr" +type_synonym registers_count = 20 +type_synonym registers_array = "machine_word[registers_count]" + lemma halt_spec: "Gamma \ {} Call halt_'proc {}" apply (rule hoare_complete) @@ -165,10 +173,6 @@ where abbreviation "ep_queue_relation \ tcb_queue_relation tcbEPNext_C tcbEPPrev_C" -abbreviation - "sched_queue_relation \ tcb_queue_relation tcbSchedNext_C tcbSchedPrev_C" - - definition wordSizeCase :: "'a \ 'a \ 'a" where "wordSizeCase a b \ (if bitSize (undefined::word32) = 32 @@ -271,63 +275,6 @@ definition | Some cap \ Some \ cap_CL = cap, cteMDBNode_CL = mdb_node_lift (cteMDBNode_C c) \" -lemma to_bool_false [simp]: "\ to_bool false" - by (simp add: to_bool_def false_def) - -(* this is slightly weird, but the bitfield generator - masks everything with the expected bit length. - So we do that here too. *) -definition - to_bool_bf :: "'a::len word \ bool" where - "to_bool_bf w \ (w && mask 1) = 1" - -lemma to_bool_bf_mask1 [simp]: - "to_bool_bf (mask (Suc 0))" - by (simp add: mask_def to_bool_bf_def) - -lemma to_bool_bf_0 [simp]: "\to_bool_bf 0" - by (simp add: to_bool_bf_def) - -lemma to_bool_bf_1 [simp]: "to_bool_bf 1" - by (simp add: to_bool_bf_def mask_def) - -lemma to_bool_bf_false [simp]: - "\to_bool_bf false" - by (simp add: false_def) - -lemma to_bool_bf_true [simp]: - "to_bool_bf true" - by (simp add: true_def) - -lemma to_bool_to_bool_bf: - "w = false \ w = true \ to_bool_bf w = to_bool w" - by (auto simp: false_def true_def to_bool_def to_bool_bf_def mask_def) - -lemma to_bool_bf_mask_1 [simp]: - "to_bool_bf (w && mask (Suc 0)) = to_bool_bf w" - by (simp add: to_bool_bf_def) - -lemma to_bool_bf_and [simp]: - "to_bool_bf (a && b) = (to_bool_bf a \ to_bool_bf (b::word32))" - apply (clarsimp simp: to_bool_bf_def) - apply (rule iffI) - apply (subst (asm) bang_eq) - apply (simp add: word_size) - apply (rule conjI) - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply clarsimp - apply (rule word_eqI) - apply (subst (asm) bang_eq)+ - apply (auto simp add: word_size)[1] - done - -lemma to_bool_bf_to_bool_mask: - "w && mask (Suc 0) = w \ to_bool_bf w = to_bool (w::word32)" - by (metis One_nat_def mask_eq1_nochoice fold_eq_0_to_bool mask_1 to_bool_bf_0 to_bool_bf_def) - definition mdb_node_to_H :: "mdb_node_CL \ mdbnode" where @@ -513,31 +460,31 @@ lemma maxDom_sgt_0_maxDomain: lemma num_domains_calculation: "num_domains = numDomains" - unfolding num_domains_def by eval + unfolding num_domains_val by eval private lemma num_domains_card_explicit: "num_domains = CARD(num_domains)" - by (simp add: num_domains_def) + by (simp add: num_domains_val) lemmas num_domains_index_updates = - index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] - index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] (* C ArrayGuards will throw these at us and there is no way to avoid a proof of being less than a specific number expressed as a word, so we must introduce these. However, being explicit means lack of discipline can lead to a violation. *) -lemma numDomains_less_numeric_explicit[simplified num_domains_def One_nat_def]: +lemma numDomains_less_numeric_explicit[simplified num_domains_val One_nat_def]: "x < Kernel_Config.numDomains \ x < num_domains" by (simp add: num_domains_calculation) -lemma numDomains_less_unat_ucast_explicit[simplified num_domains_def]: +lemma numDomains_less_unat_ucast_explicit[simplified num_domains_val]: "unat x < Kernel_Config.numDomains \ (ucast (x::domain) :: machine_word) < of_nat num_domains" apply (rule word_less_nat_alt[THEN iffD2]) apply transfer apply simp - apply (drule numDomains_less_numeric_explicit, simp add: num_domains_def) + apply (drule numDomains_less_numeric_explicit, simp add: num_domains_val) done lemmas maxDomain_le_unat_ucast_explicit = @@ -562,7 +509,7 @@ value_type num_tcb_queues = "numDomains * numPriorities" lemma num_tcb_queues_calculation: "num_tcb_queues = numDomains * numPriorities" - unfolding num_tcb_queues_def by eval + unfolding num_tcb_queues_val by eval (* Input abbreviations for API object types *) diff --git a/proof/crefine/Move_C.thy b/proof/crefine/Move_C.thy index a0b3c0f452..e7198d4ea2 100644 --- a/proof/crefine/Move_C.thy +++ b/proof/crefine/Move_C.thy @@ -143,180 +143,6 @@ lemma cteSizeBits_le_cte_level_bits[simp]: "cteSizeBits \ cte_level_bits" by (simp add: cte_level_bits_def cteSizeBits_def) -lemma msb_le_mono: - fixes v w :: "'a::len word" - shows "v \ w \ msb v \ msb w" - by (simp add: msb_big) - -lemma neg_msb_le_mono: - fixes v w :: "'a::len word" - shows "v \ w \ \ msb w \ \ msb v" - by (simp add: msb_big) - -lemmas msb_less_mono = msb_le_mono[OF less_imp_le] -lemmas neg_msb_less_mono = neg_msb_le_mono[OF less_imp_le] - -lemma word_sless_iff_less: - "\ \ msb v; \ msb w \ \ v v < w" - by (simp add: word_sless_alt sint_eq_uint word_less_alt) - -lemmas word_sless_imp_less = word_sless_iff_less[THEN iffD1, rotated 2] -lemmas word_less_imp_sless = word_sless_iff_less[THEN iffD2, rotated 2] - -lemma word_sle_iff_le: - "\ \ msb v; \ msb w \ \ v <=s w \ v \ w" - by (simp add: word_sle_def sint_eq_uint word_le_def) - -lemmas word_sle_imp_le = word_sle_iff_le[THEN iffD1, rotated 2] -lemmas word_le_imp_sle = word_sle_iff_le[THEN iffD2, rotated 2] - -lemma to_bool_if: - "(if w \ 0 then 1 else 0) = (if to_bool w then 1 else 0)" - by (auto simp: to_bool_def) - -(* FIXME: move to Word_Lib *) -lemma word_upcast_shiftr: - assumes "LENGTH('a::len) \ LENGTH('b::len)" - shows "UCAST('a \ 'b) (w >> n) = UCAST('a \ 'b) w >> n" - apply (intro word_eqI impI iffI; clarsimp simp: word_size nth_shiftr nth_ucast) - apply (drule test_bit_size) - using assms by (simp add: word_size) - -lemma word_upcast_neg_msb: - "LENGTH('a::len) < LENGTH('b::len) \ \ msb (UCAST('a \ 'b) w)" - unfolding ucast_def msb_word_of_int - by clarsimp (metis Suc_pred bit_imp_le_length lens_gt_0(2) not_less_eq) - -(* FIXME: move to Word_Lib *) -lemma word_upcast_0_sle: - "LENGTH('a::len) < LENGTH('b::len) \ 0 <=s UCAST('a \ 'b) w" - by (simp add: word_sle_iff_le[OF word_msb_0 word_upcast_neg_msb]) - -(* FIXME: move to Word_Lib *) -lemma scast_ucast_up_eq_ucast: - assumes "LENGTH('a::len) < LENGTH('b::len)" - shows "SCAST('b \ 'c) (UCAST('a \ 'b) w) = UCAST('a \ 'c::len) w" - using assms - apply (subst scast_eq_ucast; simp) - apply (simp only: ucast_def msb_word_of_int) - apply (metis bin_nth_uint_imp decr_length_less_iff numeral_nat(7) verit_comp_simplify1(3)) - by (metis less_or_eq_imp_le ucast_nat_def unat_ucast_up_simp) - -lemma not_max_word_iff_less: - "w \ max_word \ w < max_word" - by (simp add: order_less_le) - -lemma ucast_increment: - assumes "w \ max_word" - shows "UCAST('a::len \ 'b::len) w + 1 = UCAST('a \ 'b) (w + 1)" - apply (cases "LENGTH('b) \ LENGTH('a)") - apply (simp add: ucast_down_add is_down) - apply (subgoal_tac "uint w + 1 < 2 ^ LENGTH('a)") - apply (subgoal_tac "uint w + 1 < 2 ^ LENGTH('b)") - apply (subst word_uint_eq_iff) - apply (simp add: uint_arith_simps uint_up_ucast is_up) - apply (erule less_trans, rule power_strict_increasing, simp, simp) - apply (subst less_diff_eq[symmetric]) - using assms - apply (simp add: not_max_word_iff_less word_less_alt) - apply (erule less_le_trans) - apply simp - done - -lemma max_word_gt_0: - "0 < max_word" - by (simp add: le_neq_trans[OF max_word_max]) - -lemma and_not_max_word: - "m \ max_word \ w && m \ max_word" - by (simp add: not_max_word_iff_less word_and_less') - -lemma mask_not_max_word: - "m < LENGTH('a::len) \ mask m \ (max_word :: 'a word)" - by (simp add: mask_eq_exp_minus_1) - -lemmas and_mask_not_max_word = - and_not_max_word[OF mask_not_max_word] - -lemma shiftr_not_max_word: - "0 < n \ w >> n \ max_word" - by (metis and_mask_eq_iff_shiftr_0 and_mask_not_max_word diff_less len_gt_0 shiftr_le_0 word_shiftr_lt) - -lemma word_sandwich1: - fixes a b c :: "'a::len word" - assumes "a < b" - assumes "b <= c" - shows "0 < b - a \ b - a <= c" - using assms diff_add_cancel order_less_irrefl add_0 word_le_imp_diff_le - word_le_less_eq word_neq_0_conv - by metis - -lemma word_sandwich2: - fixes a b :: "'a::len word" - assumes "0 < a" - assumes "a <= b" - shows "b - a < b" - using assms less_le_trans word_diff_less - by blast - -lemma unat_and_mask_less_2p: - fixes w :: "'a::len word" - shows "m < LENGTH('a) \ unat (w && mask m) < 2 ^ m" - by (simp add: unat_less_helper and_mask_less') - -lemma unat_shiftr_less_2p: - fixes w :: "'a::len word" - shows "n + m = LENGTH('a) \ unat (w >> n) < 2 ^ m" - by (cases "n = 0"; simp add: unat_less_helper shiftr_less_t2n3) - -lemma nat_div_less_mono: - fixes m n :: nat - shows "m div d < n div d \ m < n" - by (meson div_le_mono not_less) - -lemma word_shiftr_less_mono: - fixes w :: "'a::len word" - shows "w >> n < v >> n \ w < v" - by (auto simp: word_less_nat_alt shiftr_div_2n' elim: nat_div_less_mono) - -lemma word_shiftr_less_mask: - fixes w :: "'a::len word" - shows "(w >> n < v >> n) \ (w && ~~mask n < v && ~~mask n)" - by (metis (mono_tags) le_shiftr mask_shift shiftr_eq_neg_mask_eq word_le_less_eq word_le_not_less) - -lemma word_shiftr_le_mask: - fixes w :: "'a::len word" - shows "(w >> n \ v >> n) \ (w && ~~mask n \ v && ~~mask n)" - by (metis (mono_tags) le_shiftr mask_shift shiftr_eq_neg_mask_eq word_le_less_eq word_le_not_less) - -lemma word_shiftr_eq_mask: - fixes w :: "'a::len word" - shows "(w >> n = v >> n) \ (w && ~~mask n = v && ~~mask n)" - by (metis (mono_tags) mask_shift shiftr_eq_neg_mask_eq) - -lemmas word_shiftr_cmp_mask = - word_shiftr_less_mask word_shiftr_le_mask word_shiftr_eq_mask - -lemma if_if_if_same_output: - "(if c1 then if c2 then t else f else if c3 then t else f) = (if c1 \ c2 \ \c1 \ c3 then t else f)" - by (simp split: if_splits) - -lemma word_le_split_mask: - "(w \ v) \ (w >> n < v >> n \ w >> n = v >> n \ w && mask n \ v && mask n)" - apply (simp add: word_shiftr_eq_mask word_shiftr_less_mask) - apply (rule subst[where P="\c. c \ d = e" for d e, OF AND_NOT_mask_plus_AND_mask_eq[where n=n]]) - apply (rule subst[where P="\c. d \ c = e" for d e, OF AND_NOT_mask_plus_AND_mask_eq[where n=n]]) - apply (rule iffI) - apply safe - apply (fold_subgoals (prefix))[2] - apply (subst atomize_conj) - apply (rule context_conjI) - apply (metis AND_NOT_mask_plus_AND_mask_eq neg_mask_mono_le word_le_less_eq) - apply (metis add.commute word_and_le1 word_bw_comms(1) word_plus_and_or_coroll2 word_plus_mcs_4) - apply (metis Groups.add_ac(2) neg_mask_mono_le word_le_less_eq word_not_le word_plus_and_or_coroll2) - apply (metis add.commute word_and_le1 word_bw_comms(1) word_plus_and_or_coroll2 word_plus_mcs_3) - done - lemma unat_ucast_prio_mask_simp[simp]: "unat (ucast (p::priority) && mask m :: machine_word) = unat (p && mask m)" by (simp add: ucast_and_mask) @@ -325,19 +151,11 @@ lemma unat_ucast_prio_shiftr_simp[simp]: "unat (ucast (p::priority) >> n :: machine_word) = unat (p >> n)" by simp -lemma from_bool_to_bool_and_1 [simp]: - assumes r_size: "1 < size r" - shows "from_bool (to_bool (r && 1)) = r && 1" -proof - - from r_size have "r && 1 < 2" - by (simp add: and_mask_less_size [where n=1, unfolded mask_def, simplified]) - thus ?thesis - by (fastforce simp add: from_bool_def to_bool_def dest: word_less_cases) -qed - lemma wb_gt_2: "2 < word_bits" by (simp add: word_bits_conv) +declare from_bool_to_bool_and_1[simp] + (* NOTE: unused. *) lemma inj_on_option_map: "inj_on (map_option f o m) (dom m) \ inj_on m (dom m)" @@ -468,8 +286,8 @@ lemma word_minus_1_shiftr: apply (clarsimp simp: and_mask_dvd low_bits_zero) apply (subst mod_pos_pos_trivial) apply (simp add: word_le_def) - apply (metis mult_zero_left neq_zero div_positive_int linorder_not_le uint_2p_alt word_div_lt_eq_0 - word_less_def zless2p) + apply (metis (mono_tags) More_Word.word_div_mult assms(2) div_of_0_id p2_gt_0 uint_2p_alt uint_div + unsigned_eq_0_iff word_less_div word_less_iff_unsigned) apply (metis shiftr_div_2n uint_1 uint_sub_lt2p) apply fastforce done @@ -568,7 +386,7 @@ lemma map_to_ko_at_updI': \ (projectKO_opt \\<^sub>m (ksPSpace s)) x = Some y; valid_pspace' s; ko_at' y' x' s; objBitsKO (injectKO y') = objBitsKO y''; x \ x' \ \ - ko_at' y x (s\ksPSpace := ksPSpace s(x' \ y'')\)" + ko_at' y x (s\ksPSpace := (ksPSpace s)(x' \ y'')\)" by (fastforce simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd dest: map_to_ko_atI) @@ -625,9 +443,7 @@ lemma tcbFault_submonad_args: lemma threadGet_stateAssert_gets: "threadGet ext t = do stateAssert (tcb_at' t) []; gets (thread_fetch ext t) od" apply (rule is_stateAssert_gets [OF _ _ empty_fail_threadGet no_fail_threadGet]) - apply (clarsimp intro!: obj_at_ko_at'[where P="\tcb :: tcb. True", simplified] - | wp threadGet_wp)+ - apply (clarsimp simp: obj_at'_def thread_fetch_def projectKOs) + apply (wp threadGet_wp | clarsimp simp: obj_at'_def thread_fetch_def projectKOs)+ done lemma threadGet_tcbFault_submonad_fn: @@ -653,7 +469,7 @@ lemma asUser_obj_at_notQ: asUser t (setRegister r v) \\rv. obj_at' (Not \ tcbQueued) t\" apply (simp add: asUser_def) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ apply (simp add: split_def) apply (rule threadSet_obj_at'_really_strongest) apply (wp threadGet_wp |rule gets_inv|wpc|clarsimp)+ @@ -670,6 +486,7 @@ lemma empty_fail_asUser[iff]: lemma asUser_mapM_x: "(\x. empty_fail (f x)) \ asUser t (mapM_x f xs) = do stateAssert (tcb_at' t) []; mapM_x (\x. asUser t (f x)) xs od" + supply empty_fail_cond[simp] apply (simp add: mapM_x_mapM asUser_bind_distrib) apply (subst submonad_mapM [OF submonad_asUser submonad_asUser]) apply simp @@ -678,7 +495,7 @@ lemma asUser_mapM_x: apply (rule bind_apply_cong [OF refl])+ apply (clarsimp simp: in_monad dest!: fst_stateAssertD) apply (drule use_valid, rule mapM_wp', rule asUser_typ_ats, assumption) - apply (simp add: stateAssert_def get_def NonDetMonad.bind_def) + apply (simp add: stateAssert_def get_def Nondet_Monad.bind_def) done lemma asUser_threadGet_tcbFault_comm: @@ -785,7 +602,7 @@ lemma empty_fail_rethrowFailure: lemma empty_fail_resolveAddressBits: "empty_fail (resolveAddressBits cap cptr bits)" proof - - note empty_fail_assertE[iff] + note empty_fail_cond[simp] show ?thesis apply (rule empty_fail_use_cutMon) apply (induct rule: resolveAddressBits.induct) @@ -793,8 +610,7 @@ proof - apply (unfold Let_def cnode_cap_case_if fun_app_def K_bind_def haskell_assertE_def split_def) apply (intro empty_fail_cutMon_intros) - apply (clarsimp simp: empty_fail_drop_cutMon empty_fail_whenEs - locateSlot_conv returnOk_liftE[symmetric] + apply (clarsimp simp: empty_fail_drop_cutMon locateSlot_conv returnOk_liftE[symmetric] isCap_simps)+ done qed @@ -828,8 +644,9 @@ lemma getMessageInfo_le3: apply wp apply (rule_tac Q="\_. \" in hoare_strengthen_post) apply wp + apply (rename_tac rv s) apply (simp add: messageInfoFromWord_def Let_def msgExtraCapBits_def) - apply (cut_tac y="r >> Types_H.msgLengthBits" in word_and_le1 [where a=3]) + apply (cut_tac y="rv >> Types_H.msgLengthBits" in word_and_le1 [where a=3]) apply (simp add: word_le_nat_alt) done @@ -872,7 +689,7 @@ lemma cteDeleteOne_sch_act_wf: apply (simp add: finaliseCapTrue_standin_def Let_def) apply (rule hoare_pre) apply (wp isFinalCapability_inv cancelAllSignals_sch_act_wf - cancelAllIPC_sch_act_wf getCTE_wp' static_imp_wp + cancelAllIPC_sch_act_wf getCTE_wp' hoare_weak_lift_imp | wpc | simp add: Let_def split: if_split)+ done @@ -902,7 +719,7 @@ lemma setNotification_tcb: lemma state_refs_of'_upd: "\ valid_pspace' s; ko_wp_at' (\ko. objBitsKO ko = objBitsKO ko') ptr s \ \ - state_refs_of' (s\ksPSpace := ksPSpace s(ptr \ ko')\) = + state_refs_of' (s\ksPSpace := (ksPSpace s)(ptr \ ko')\) = (state_refs_of' s)(ptr := refs_of' ko')" apply (rule ext) apply (clarsimp simp: ps_clear_upd valid_pspace'_def pspace_aligned'_def @@ -914,14 +731,6 @@ lemma ex_st_tcb_at'_simp[simp]: "(\ts. st_tcb_at' ((=) ts) dest s) = tcb_at' dest s" by (auto simp add: pred_tcb_at'_def obj_at'_def) -lemma threadGet_wp: - "\\s. \tcb. ko_at' tcb thread s \ P (f tcb) s\ threadGet f thread \P\" - apply (rule hoare_post_imp [OF _ tg_sp']) - apply clarsimp - apply (frule obj_at_ko_at') - apply (clarsimp elim: obj_atE') - done - lemma threadGet_wp'': "\\s. \v. obj_at' (\tcb. f tcb = v) thread s \ P v s\ threadGet f thread \P\" apply (rule hoare_pre) @@ -987,7 +796,7 @@ lemma empty_fail_getIdleThread [simp,intro!]: lemma setTCB_cur: "\cur_tcb'\ setObject t (v::tcb) \\_. cur_tcb'\" - including no_pre + including classic_wp_pre apply (wp cur_tcb_lift) apply (simp add: setObject_def split_def updateObject_default_def) apply wp @@ -998,8 +807,7 @@ lemma empty_fail_slotCapLongRunningDelete: "empty_fail (slotCapLongRunningDelete slot)" by (auto simp: slotCapLongRunningDelete_def Let_def case_Null_If isFinalCapability_def - split: if_split - intro!: empty_fail_bind) + split: if_split) lemmas mapM_x_append = mapM_x_append2 @@ -1020,9 +828,6 @@ lemma getSlotCap_wp': apply (clarsimp simp: cte_wp_at_ctes_of) done -lemma fromIntegral_simp_nat[simp]: "(fromIntegral :: nat \ nat) = id" - by (simp add: fromIntegral_def fromInteger_nat toInteger_nat) - lemma invs_cicd_valid_objs' [elim!]: "all_invs_but_ct_idle_or_in_cur_domain' s \ valid_objs' s" by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) @@ -1032,10 +837,6 @@ lemma st_tcb_at'_opeq_simp: = st_tcb_at' (\st. st = Structures_H.thread_state.Running) (ksCurThread s) s" by (fastforce simp add: st_tcb_at'_def obj_at'_def) -lemma invs_queues_imp: - "invs' s \ valid_queues s" - by clarsimp - lemma invs'_pspace_domain_valid: "invs' s \ pspace_domain_valid s" by (simp add: invs'_def valid_state'_def) @@ -1045,45 +846,6 @@ lemma and_eq_0_is_nth: shows "y = 1 << n \ ((x && y) = 0) = (\ (x !! n))" by (metis (poly_guards_query) and_eq_0_is_nth) -lemma tcbSchedEnqueue_obj_at_unchangedT: - assumes y: "\f. \tcb. P (tcbQueued_update f tcb) = P tcb" - shows "\obj_at' P t\ tcbSchedEnqueue t' \\rv. obj_at' P t\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp | simp add: y)+ - done - -lemma rescheduleRequired_obj_at_unchangedT: - assumes y: "\f. \tcb. P (tcbQueued_update f tcb) = P tcb" - shows "\obj_at' P t\ rescheduleRequired \\rv. obj_at' P t\" - apply (simp add: rescheduleRequired_def) - apply (wp tcbSchedEnqueue_obj_at_unchangedT[OF y] | wpc)+ - apply simp - done - -lemma setThreadState_obj_at_unchangedT: - assumes x: "\f. \tcb. P (tcbState_update f tcb) = P tcb" - assumes y: "\f. \tcb. P (tcbQueued_update f tcb) = P tcb" - shows "\obj_at' P t\ setThreadState t' ts \\rv. obj_at' P t\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_obj_at_unchangedT[OF y], simp) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp simp: obj_at'_def projectKOs x cong: if_cong) - done - -lemma setBoundNotification_obj_at_unchangedT: - assumes x: "\f. \tcb. P (tcbBoundNotification_update f tcb) = P tcb" - shows "\obj_at' P t\ setBoundNotification t' ts \\rv. obj_at' P t\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp simp: obj_at'_def projectKOs x cong: if_cong) - done - -lemmas setThreadState_obj_at_unchanged - = setThreadState_obj_at_unchangedT[OF all_tcbI all_tcbI] - -lemmas setBoundNotification_obj_at_unchanged - = setBoundNotification_obj_at_unchangedT[OF all_tcbI] - lemma magnitudeCheck_assert2: "\ is_aligned x n; (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some v \ \ magnitudeCheck x (snd (lookupAround2 x (ksPSpace (s :: kernel_state)))) n @@ -1207,7 +969,7 @@ lemma ctes_of_valid_strengthen: lemma finaliseCap_Reply: "\Q (NullCap,NullCap) and K (isReplyCap cap)\ finaliseCapTrue_standin cap is_final \Q\" - apply (rule NonDetMonadVCG.hoare_gen_asm) + apply (rule Nondet_VCG.hoare_gen_asm) apply (wpsimp simp: finaliseCapTrue_standin_def isCap_simps) done @@ -1302,13 +1064,6 @@ lemma ksPSpace_update_eq_ExD: \ \ps. s = t \ ksPSpace := ps \" by (erule exI) -lemma tcbSchedEnqueue_queued_queues_inv: - "\\s. obj_at' tcbQueued t s \ P (ksReadyQueues s) \ tcbSchedEnqueue t \\_ s. P (ksReadyQueues s)\" - unfolding tcbSchedEnqueue_def unless_def - apply (wpsimp simp: if_apply_def2 wp: threadGet_wp) - apply normalise_obj_at' - done - (* FIXME BV: generalise *) lemma word_clz_1[simp]: "word_clz (1::32 word) = 31" @@ -1466,9 +1221,21 @@ lemma asUser_obj_at': lemma update_ep_map_to_ctes: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + shows "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm) +(* FIXME AARCH64 move *) +lemma multiple_add_less_nat: + "\ a < (c :: nat); x dvd a; x dvd c; b < x \ + \ a + b < c" + apply (subgoal_tac "b < c - a") + apply simp + apply (erule order_less_le_trans) + apply (rule dvd_imp_le) + apply simp + apply simp + done + end diff --git a/proof/crefine/README.md b/proof/crefine/README.md index e31bf5990e..e9d2fc0d32 100644 --- a/proof/crefine/README.md +++ b/proof/crefine/README.md @@ -30,13 +30,9 @@ The approach used for the proof is described in the TPHOLS '09 Building -------- -To build from the `l4v/proof` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - make CRefine - -If you wish to build for a specific architecture other than the default, set -your `L4V_ARCH` environment variable accordingly, as documented for the [C code -translation](../../spec/cspec/README.md). + L4V_ARCH=ARM ./run_tests CRefine Important Theories ------------------ diff --git a/proof/crefine/RISCV64/ADT_C.thy b/proof/crefine/RISCV64/ADT_C.thy index cc33bc2da2..e4961e7853 100644 --- a/proof/crefine/RISCV64/ADT_C.thy +++ b/proof/crefine/RISCV64/ADT_C.thy @@ -77,8 +77,8 @@ lemma Basic_sem_eq: lemma setTCBContext_C_corres: "\ ccontext_relation tc tc'; t' = tcb_ptr_to_ctcb_ptr t \ \ - corres_underlying rf_sr nf nf' dc (pspace_domain_valid and tcb_at' t) \ - (threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb)\) t) (setTCBContext_C tc' t')" + corres_underlying rf_sr nf nf' dc (pspace_domain_valid and tcb_at' t) \ + (threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb)\) t) (setTCBContext_C tc' t')" apply (simp add: setTCBContext_C_def exec_C_def Basic_sem_eq corres_underlying_def) apply clarsimp apply (simp add: threadSet_def bind_assoc split_def exec_gets) @@ -86,7 +86,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption @@ -107,8 +107,6 @@ lemma setTCBContext_C_corres: apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def cvariable_relation_upd_const ko_at_projectKO_opt cteSizeBits_def) apply (simp add: cep_relations_drop_fun_upd) - apply (apply_conjunct \match conclusion in \cready_queues_relation _ _ _\ \ - \erule cready_queues_relation_not_queue_ptrs; rule ext; simp split: if_split\\) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def carch_tcb_relation_def) @@ -591,25 +589,51 @@ lemma tcb_queue_rel'_unique: apply (erule(2) tcb_queue_rel_unique) done -definition - cready_queues_to_H - :: "(tcb_C ptr \ tcb_C) \ (tcb_queue_C[num_tcb_queues]) \ word8 \ word8 \ machine_word list" + +definition tcb_queue_C_to_tcb_queue :: "tcb_queue_C \ tcb_queue" where + "tcb_queue_C_to_tcb_queue q \ + TcbQueue (if head_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (head_C q))) + (if end_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (end_C q)))" + +definition cready_queues_to_H :: + "tcb_queue_C[num_tcb_queues] \ (domain \ priority \ ready_queue)" where - "cready_queues_to_H h_tcb cs \ \(qdom, prio). if ucast minDom \ qdom \ qdom \ ucast maxDom - \ ucast seL4_MinPrio \ prio \ prio \ ucast seL4_MaxPrio - then THE aq. let cqueue = index cs (cready_queues_index_to_C qdom prio) - in sched_queue_relation' h_tcb aq (head_C cqueue) (StateRelation_C.end_C cqueue) - else []" + "cready_queues_to_H cs \ + \(qdom, prio). + if qdom \ maxDomain \ prio \ maxPriority + then let cqueue = index cs (cready_queues_index_to_C qdom prio) + in tcb_queue_C_to_tcb_queue cqueue + else TcbQueue None None" lemma cready_queues_to_H_correct: - "cready_queues_relation (clift s) cs as \ - cready_queues_to_H (clift s) cs = as" - apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def - fun_eq_iff) - apply (rule the_equality) - apply simp - apply (clarsimp simp: Let_def) - apply (rule_tac hp="clift s" in tcb_queue_rel'_unique, simp_all add: lift_t_NULL) + "\cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' ch); + no_0_obj' s; ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ cready_queues_to_H (ksReadyQueues_' ch) = ksReadyQueues s" + apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def Let_def) + apply (clarsimp simp: fun_eq_iff) + apply (rename_tac d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (rule conjI) + apply (clarsimp simp: tcb_queue_C_to_tcb_queue_def ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (case_tac "tcbQueueHead (ksReadyQueues s (d, p)) = None") + apply (clarsimp simp: tcb_queue.expand) + apply clarsimp + apply (rename_tac queue_head queue_end) + apply (prop_tac "tcb_at' queue_head s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (prop_tac "tcb_at' queue_end s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (drule kernel.tcb_at_not_NULL)+ + apply (fastforce simp: tcb_queue.expand kernel.ctcb_ptr_to_ctcb_ptr) + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits; + metis tcb_queue.exhaust_sel word_not_le) done (* showing that cpspace_relation is actually unique >>>*) @@ -750,19 +774,18 @@ lemma cthread_state_rel_imp_eq: "cthread_state_relation x z \ cthread_state_relation y z \ x=y" apply (simp add: cthread_state_relation_def split_def) apply (cases x) - apply (cases y, simp_all add: ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnReply_def ThreadState_BlockedOnNotification_def - ThreadState_Running_def ThreadState_Inactive_def - ThreadState_IdleThreadState_def ThreadState_BlockedOnSend_def - ThreadState_Restart_def)+ + apply (cases y, simp_all add: ThreadState_defs)+ done -lemma ksPSpace_valid_objs_tcbBoundNotification_nonzero: - "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s - \ map_to_tcbs ah p = Some tcb \ tcbBoundNotification tcb \ Some 0" +lemma map_to_tcbs_Some_refs_nonzero: + "\map_to_tcbs (ksPSpace s) p = Some tcb; no_0_obj' s; valid_objs' s\ + \ tcbBoundNotification tcb \ Some 0 + \ tcbSchedPrev tcb \ Some 0 + \ tcbSchedNext tcb \ Some 0" + supply word_neq_0_conv[simp del] apply (clarsimp simp: map_comp_def split: option.splits) - apply (erule(1) valid_objsE') - apply (clarsimp simp: projectKOs valid_obj'_def valid_tcb'_def) + apply (erule (1) valid_objsE') + apply (fastforce simp: projectKOs valid_obj'_def valid_tcb'_def) done lemma atcbContextGet_inj[simp]: @@ -773,34 +796,75 @@ lemma ccontext_relation_imp_eq2: "\ccontext_relation (atcbContextGet t) x; ccontext_relation (atcbContextGet t') x\ \ t = t'" by (auto dest: ccontext_relation_imp_eq) +lemma tcb_ptr_to_ctcb_ptr_inj: + "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" + by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) + +lemma + assumes "pspace_aligned' as" "pspace_distinct' as" "valid_tcb' atcb as" + shows tcb_at'_tcbBoundNotification: + "bound (tcbBoundNotification atcb) \ ntfn_at' (the (tcbBoundNotification atcb)) as" + and tcb_at'_tcbSchedPrev: + "tcbSchedPrev atcb \ None \ tcb_at' (the (tcbSchedPrev atcb)) as" + and tcb_at'_tcbSchedNext: + "tcbSchedNext atcb \ None \ tcb_at' (the (tcbSchedNext atcb)) as" + using assms + by (clarsimp simp: valid_tcb'_def obj_at'_def)+ + lemma cpspace_tcb_relation_unique: - assumes tcbs: "cpspace_tcb_relation ah ch" "cpspace_tcb_relation ah' ch" - and vs: "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s" - and vs': "\s. ksPSpace s = ah' \ no_0_obj' s \ valid_objs' s" - assumes ctes: " \tcb tcb'. (\p. map_to_tcbs ah p = Some tcb \ - map_to_tcbs ah' p = Some tcb') \ - (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" - shows "map_to_tcbs ah' = map_to_tcbs ah" + assumes tcbs: "cpspace_tcb_relation (ksPSpace as) ch" "cpspace_tcb_relation (ksPSpace as') ch" + assumes vs: "no_0_obj' as" "valid_objs' as" + assumes vs': "no_0_obj' as'" "valid_objs' as'" + assumes ad: "pspace_aligned' as" "pspace_distinct' as" + assumes ad': "pspace_aligned' as'" "pspace_distinct' as'" + assumes ctes: "\tcb tcb'. (\p. map_to_tcbs (ksPSpace as) p = Some tcb \ + map_to_tcbs (ksPSpace as') p = Some tcb') \ + (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" + shows "map_to_tcbs (ksPSpace as') = map_to_tcbs (ksPSpace as)" using tcbs(2) tcbs(1) apply (clarsimp simp add: cmap_relation_def) apply (drule inj_image_inv[OF inj_tcb_ptr_to_ctcb_ptr])+ apply (simp add: tcb_ptr_to_ctcb_ptr_def[abs_def] ctcb_offset_def) apply (rule ext) - apply (case_tac "x:dom (map_to_tcbs ah)") + apply (case_tac "x \ dom (map_to_tcbs (ksPSpace as))") apply (drule bspec, assumption)+ apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) apply clarsimp apply (rename_tac p x y) apply (cut_tac ctes) apply (drule_tac x=x in spec, drule_tac x=y in spec, erule impE, fastforce) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs]) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs']) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs]) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs']) + apply (rename_tac atcb atcb') + apply (prop_tac "valid_tcb' atcb as") + apply (fastforce intro: vs ad map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (prop_tac "valid_tcb' atcb' as'") + apply (fastforce intro: vs' ad' map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (frule tcb_at'_tcbSchedPrev[OF ad]) + apply (frule tcb_at'_tcbSchedPrev[OF ad']) + apply (frule tcb_at'_tcbSchedNext[OF ad]) + apply (frule tcb_at'_tcbSchedNext[OF ad']) apply (thin_tac "map_to_tcbs x y = Some z" for x y z)+ - apply (case_tac x, case_tac y, case_tac "the (clift ch (tcb_Ptr (p+0x200)))") + apply (case_tac "the (clift ch (tcb_Ptr (p + 2 ^ ctcb_size_bits)))") apply (clarsimp simp: ctcb_relation_def ran_tcb_cte_cases) - apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.splits) - apply (auto simp: cfault_rel_imp_eq cthread_state_rel_imp_eq carch_tcb_relation_def - ccontext_relation_imp_eq2 up_ucast_inj_eq ctcb_size_bits_def) + apply (clarsimp simp: option_to_ctcb_ptr_def option_to_ptr_def option_to_0_def) + apply (rule tcb.expand) + apply clarsimp + apply (intro conjI) + apply (simp add: cthread_state_rel_imp_eq) + apply (simp add: cfault_rel_imp_eq) + apply (case_tac "tcbBoundNotification atcb'", case_tac "tcbBoundNotification atcb"; clarsimp) + apply (clarsimp split: option.splits) + apply (case_tac "tcbSchedPrev atcb'"; case_tac "tcbSchedPrev atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (case_tac "tcbSchedNext atcb'"; case_tac "tcbSchedNext atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (force simp: carch_tcb_relation_def ccontext_relation_imp_eq2) + apply auto done lemma tcb_queue_rel_clift_unique: @@ -831,10 +895,6 @@ lemma ksPSpace_valid_pspace_ntfnBoundTCB_nonzero: apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def) done -lemma tcb_ptr_to_ctcb_ptr_inj: - "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" - by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) - lemma cpspace_ntfn_relation_unique: assumes ntfns: "cpspace_ntfn_relation ah ch" "cpspace_ntfn_relation ah' ch" and vs: "\s. ksPSpace s = ah \ valid_pspace' s" @@ -1082,8 +1142,8 @@ proof - apply (drule (1) cpspace_pte_relation_unique) apply (drule (1) cpspace_asidpool_relation_unique) apply (drule (1) cpspace_tcb_relation_unique) - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') + apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs')+ + apply (fastforce intro: aligned distinct aligned' distinct')+ apply (intro allI impI,elim exE conjE) apply (rule_tac p=p in map_to_ctes_tcb_ctes, assumption) apply (frule (1) map_to_ko_atI[OF _ aligned distinct]) @@ -1136,7 +1196,7 @@ lemma ksPSpace_eq_imp_valid_tcb'_eq: by (auto simp: ksPSpace_eq_imp_obj_at'_eq[OF ksPSpace] ksPSpace_eq_imp_valid_cap'_eq[OF ksPSpace] ksPSpace_eq_imp_typ_at'_eq[OF ksPSpace] - valid_tcb'_def valid_tcb_state'_def valid_bound_ntfn'_def + valid_tcb'_def valid_tcb_state'_def valid_bound_ntfn'_def valid_bound_tcb'_def split: thread_state.splits option.splits) lemma ksPSpace_eq_imp_valid_objs'_eq: @@ -1291,7 +1351,7 @@ where ksDomSchedule = cDomSchedule_to_H kernel_all_global_addresses.ksDomSchedule, ksCurDomain = ucast (ksCurDomain_' s), ksDomainTime = ksDomainTime_' s, - ksReadyQueues = cready_queues_to_H (clift (t_hrs_' s)) (ksReadyQueues_' s), + ksReadyQueues = cready_queues_to_H (ksReadyQueues_' s), ksReadyQueuesL1Bitmap = cbitmap_L1_to_H (ksReadyQueuesL1Bitmap_' s), ksReadyQueuesL2Bitmap = cbitmap_L2_to_H (ksReadyQueuesL2Bitmap_' s), ksCurThread = ctcb_ptr_to_tcb_ptr (ksCurThread_' s), @@ -1313,16 +1373,16 @@ lemma trivial_eq_conj: "B = C \ (A \ B) = (A \ C)" lemma cstate_to_H_correct: assumes valid: "valid_state' as" assumes cstate_rel: "cstate_relation as cs" + assumes rdyqs: "ksReadyQueues_asrt as" shows "cstate_to_H cs = as \ksMachineState:= observable_memory (ksMachineState as) (user_mem' as)\" apply (subgoal_tac "cstate_to_machine_H cs = observable_memory (ksMachineState as) (user_mem' as)") apply (rule kernel_state.equality, simp_all add: cstate_to_H_def) - apply (rule cstate_to_pspace_H_correct) + apply (rule cstate_to_pspace_H_correct) using valid apply (simp add: valid_state'_def) using cstate_rel valid apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def - observable_memory_def valid_state'_def - valid_pspace'_def) + observable_memory_def valid_state'_def valid_pspace'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def prod_eq_iff) using cstate_rel @@ -1330,10 +1390,10 @@ lemma cstate_to_H_correct: using valid cstate_rel apply (rule mk_gsUntypedZeroRanges_correct) subgoal - using cstate_rel - by (fastforce simp: cstate_relation_def cpspace_relation_def - Let_def ghost_size_rel_def unat_eq_0 - split: if_split) + using cstate_rel + by (fastforce simp: cstate_relation_def cpspace_relation_def + Let_def ghost_size_rel_def unat_eq_0 + split: if_split) using valid cstate_rel apply (rule cDomScheduleIdx_to_H_correct) using cstate_rel @@ -1347,8 +1407,13 @@ lemma cstate_to_H_correct: using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) apply (rule cready_queues_to_H_correct) - using cstate_rel - apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel rdyqs + apply (fastforce intro!: cready_queues_to_H_correct + simp: cstate_relation_def Let_def) + using valid apply (fastforce simp: valid_state'_def) + using rdyqs apply fastforce + using valid apply (fastforce simp: valid_state'_def) + using valid apply (fastforce simp: valid_state'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) using cstate_rel diff --git a/proof/crefine/RISCV64/ArchMove_C.thy b/proof/crefine/RISCV64/ArchMove_C.thy index f400e9ea3f..ddbf18b54d 100644 --- a/proof/crefine/RISCV64/ArchMove_C.thy +++ b/proof/crefine/RISCV64/ArchMove_C.thy @@ -93,18 +93,13 @@ lemma atg_sp': (* FIXME: MOVE to EmptyFail *) lemma empty_fail_archThreadGet [intro!, wp, simp]: "empty_fail (archThreadGet f p)" - by (simp add: archThreadGet_def getObject_def split_def) + by (fastforce simp: archThreadGet_def getObject_def split_def) (* FIXME: move to ainvs? *) lemma sign_extend_canonical_address: "(x = sign_extend 38 x) = canonical_address x" by (fastforce simp: sign_extended_iff_sign_extend canonical_address_sign_extended canonical_bit_def) -lemma ptr_range_mask_range: - "{ptr..ptr + 2 ^ bits - 1} = mask_range ptr bits" - unfolding mask_def - by simp - lemma valid_untyped': notes usableUntypedRange.simps[simp del] assumes pspace_distinct': "pspace_distinct' s" and @@ -302,14 +297,8 @@ lemma obj_at_kernel_mappings': \ p \ kernel_mappings" by (clarsimp simp: pspace_in_kernel_mappings'_def obj_at'_def dom_def) -crunches Arch.switchToThread - for valid_queues'[wp]: valid_queues' - (simp: crunch_simps wp: hoare_drop_imps) crunches switchToIdleThread for ksCurDomain[wp]: "\s. P (ksCurDomain s)" -crunches switchToIdleThread, switchToThread - for valid_pspace'[wp]: valid_pspace' - (simp: whenE_def crunch_simps wp: hoare_drop_imps) lemma getMessageInfo_less_4: "\\\ getMessageInfo t \\rv s. msgExtraCaps rv < 4\" @@ -348,8 +337,7 @@ lemma asid_shiftr_low_bits_less[simplified]: lemma getActiveIRQ_neq_Some0x3FF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma getActiveIRQ_neq_Some0x3FF: @@ -377,7 +365,7 @@ lemma length_msgRegisters[simplified size_msgRegisters_def]: lemma empty_fail_loadWordUser[intro!, simp]: "empty_fail (loadWordUser x)" - by (simp add: loadWordUser_def ef_loadWord ef_dmo') + by (fastforce simp: loadWordUser_def ef_loadWord ef_dmo') lemma empty_fail_getMRs[iff]: "empty_fail (getMRs t buf mi)" @@ -387,26 +375,14 @@ lemma empty_fail_getReceiveSlots: "empty_fail (getReceiveSlots r rbuf)" proof - note - empty_fail_assertE[iff] - empty_fail_resolveAddressBits[iff] + empty_fail_resolveAddressBits[wp] + empty_fail_rethrowFailure[wp] + empty_fail_rethrowFailure[wp] show ?thesis - apply (clarsimp simp: getReceiveSlots_def loadCapTransfer_def split_def - split: option.split) - apply (rule empty_fail_bind) - apply (simp add: capTransferFromWords_def) - apply (simp add: emptyOnFailure_def unifyFailure_def) - apply (intro empty_fail_catch empty_fail_bindE empty_fail_rethrowFailure, - simp_all add: empty_fail_whenEs) - apply (simp_all add: lookupCap_def split_def lookupCapAndSlot_def - lookupSlotForThread_def liftME_def - getThreadCSpaceRoot_def locateSlot_conv bindE_assoc - lookupSlotForCNodeOp_def lookupErrorOnFailure_def - cong: if_cong) - apply (intro empty_fail_bindE, - simp_all add: getSlotCap_def) - apply (intro empty_fail_If empty_fail_bindE empty_fail_rethrowFailure impI, - simp_all add: empty_fail_whenEs rangeCheck_def) - done + unfolding getReceiveSlots_def loadCapTransfer_def lookupCap_def lookupCapAndSlot_def + by (wpsimp simp: emptyOnFailure_def unifyFailure_def lookupSlotForThread_def + capTransferFromWords_def getThreadCSpaceRoot_def locateSlot_conv bindE_assoc + lookupSlotForCNodeOp_def lookupErrorOnFailure_def rangeCheck_def) qed lemma user_getreg_rv: diff --git a/proof/crefine/RISCV64/Arch_C.thy b/proof/crefine/RISCV64/Arch_C.thy index 75b93a30c0..3e35362d4d 100644 --- a/proof/crefine/RISCV64/Arch_C.thy +++ b/proof/crefine/RISCV64/Arch_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -72,12 +73,12 @@ using [[goals_limit=20]] apply (ctac add: unmapPageTable_ccorres) apply (simp add: storePTE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply(simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PTE_ccorres[simplified objBits_InvalidPTE_pte_bits]) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPageTable_modifies) - apply (simp add: to_bool_def) + apply simp apply (rule ccorres_return_Skip') apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_page_table_cap cap_to_H_def @@ -445,7 +446,7 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rule_tac P = "rv = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -668,7 +669,7 @@ lemma liftME_option_catch_bind: apply (rule ext) apply (clarsimp simp: return_def) apply (case_tac "m s", clarsimp) - apply (auto simp: split_def throwError_def return_def NonDetMonad.lift_def + apply (auto simp: split_def throwError_def return_def Nondet_Monad.lift_def split: prod.splits sum.splits) done @@ -962,7 +963,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (solves \clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_IsMapped\) apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply csymbr @@ -975,7 +976,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (fold not_None_def) (* avoid expanding capPTMappedAddress *) apply clarsimp apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: lookupError_injection invocationCatch_use_injection_handler injection_bindE[OF refl refl] injection_handler_If bindE_assoc @@ -987,7 +988,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves\clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_BasePtr\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: bindE_assoc) apply (ctac pre: ccorres_liftE_Seq add: lookupPTSlot_ccorres) @@ -1010,16 +1011,14 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (erule cmap_relationE1[OF rf_sr_cpte_relation], erule ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps from_bool_eq_if) apply (simp flip: word_unat.Rep_inject) - apply (auto simp: cpte_relation_def Let_def - pte_lift_def - from_bool_def case_bool_If + apply (auto simp: cpte_relation_def Let_def pte_lift_def case_bool_If split: pte.split_asm if_splits)[1] apply ceqv apply clarsimp apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* checks are done, move on to doing the mapping *) apply (clarsimp simp: injection_handler_returnOk) @@ -1060,10 +1059,10 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (rule conseqPre, vcg) apply clarsimp apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply clarsimp apply (wp injection_wp[OF refl] findVSpaceForASID_inv hoare_drop_imps) apply clarsimp @@ -1105,11 +1104,9 @@ lemma decodeRISCVPageTableInvocation_ccorres: subgoal for _ v1 (* RISCVPageTableUnmap: C preconditions *) apply (drule_tac t="cteCap _" in sym) - apply (clarsimp simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" - mask_eq_iff_w2p word_size - ct_in_state'_def st_tcb_at'_def - word_sle_def word_sless_def - typ_heap_simps' bit_simps) + apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size + ct_in_state'_def st_tcb_at'_def word_sle_def word_sless_def + typ_heap_simps' bit_simps) apply (frule cap_get_tag_isCap_unfolded_H_cap, simp) apply clarsimp apply (case_tac v1; clarsimp) @@ -1121,7 +1118,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: (* RISCVPageTableMap: C preconditions *) apply (prop_tac "SCAST(32 signed \ 64) ThreadState_Restart && mask 4 = SCAST(32 signed \ 64) ThreadState_Restart") - apply (solves \clarsimp simp: ThreadState_Restart_def mask_def\) + apply (solves \clarsimp simp: ThreadState_defs mask_def\) apply (clarsimp cong: imp_cong conj_cong) apply (clarsimp simp: neq_Nil_conv[where xs=extraCaps] excaps_in_mem_def slotcap_in_mem_def @@ -1180,8 +1177,7 @@ lemma checkVPAlignment_spec: apply (clarsimp simp: mask_eq_iff_w2p word_size) apply (rule conjI) apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) - apply (simp add: from_bool_def vmsz_aligned_def is_aligned_mask - mask_def split: if_split) + apply (simp add: vmsz_aligned_def is_aligned_mask mask_def split: if_split) done definition @@ -1323,7 +1319,7 @@ lemma performPageInvocationMapPTE_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) @@ -1349,8 +1345,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) @@ -1372,8 +1368,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -1385,34 +1381,25 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def Kernel_C.a1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg apply clarsimp apply (rule conseqPre, vcg) apply clarsimp - apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold seL4_MessageInfo_lift_def message_info_to_H_def mask_def) apply (cases isCall) apply (auto simp: RISCV64.badgeRegister_def RISCV64_H.badgeRegister_def Kernel_C.badgeRegister_def - Kernel_C.a0_def fromPAddr_def ThreadState_Running_def + Kernel_C.a0_def fromPAddr_def ThreadState_defs pred_tcb_at'_def obj_at'_def ct_in_state'_def) done -lemma vaddr_segment_nonsense3_folded: - "is_aligned (p :: machine_word) pageBits \ - (p + ((vaddr >> pageBits) && mask (pt_bits - word_size_bits) << word_size_bits) && ~~ mask pt_bits) = p" - apply (rule is_aligned_add_helper[THEN conjunct2]) - apply (simp add: bit_simps mask_def)+ - apply (rule shiftl_less_t2n[where m=12 and n=3, simplified, OF and_mask_less'[where n=9, unfolded mask_def, simplified]]) - apply simp+ - done - lemma vmsz_aligned_addrFromPPtr': "vmsz_aligned (addrFromPPtr p) sz = vmsz_aligned p sz" @@ -1455,18 +1442,6 @@ lemma slotcap_in_mem_valid: apply (erule(1) ctes_of_valid') done -lemma unat_less_iff64: - "\unat (a::machine_word) = b;c < 2^word_bits\ - \ (a < of_nat c) = (b < c)" - apply (rule iffI) - apply (drule unat_less_helper) - apply simp - apply (simp add:unat64_eq_of_nat) - apply (rule of_nat_mono_maybe) - apply (simp add:word_bits_def) - apply simp - done - lemma injection_handler_if_returnOk: "injection_handler Inl (if a then b else returnOk c) = (if a then (injection_handler Inl b) else returnOk c)" @@ -1479,11 +1454,6 @@ lemma injection_handler_if_returnOk: lemma pbfs_less: "pageBitsForSize sz < 31" by (case_tac sz,simp_all add: bit_simps) -definition - to_option :: "('a \ bool) \ 'a \ 'a option" -where - "to_option f x \ if f x then Some x else None" - lemma cte_wp_at_eq_gsMaxObjectSize: "cte_wp_at' ((=) cap o cteCap) slot s \ valid_global_refs' s @@ -1777,7 +1747,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (solves \clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_IsMapped\) apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply csymbr @@ -1794,7 +1764,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves\clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_BasePtr\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: bindE_assoc) (* check vaddr is valid *) @@ -1806,7 +1776,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves \clarsimp simp: pptrUserTop_def' p_assoc_help\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* check vaddr alignment *) apply (clarsimp simp: checkVPAlignment_def unlessE_def injection_handler_If @@ -1817,7 +1787,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves \clarsimp simp: vmsz_aligned_def from_bool_0 is_aligned_mask\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* lookup pt slot *) @@ -1829,7 +1799,6 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rename_tac ptSlot ptSlot_ret) apply wpfix apply (rule_tac P="unat (ptBitsLeft_C ptSlot_ret) < 64" in ccorres_gen_asm) - apply (fold dc_def id_def) apply (rule ccorres_if_lhs[rotated]) (* throwing a lookup fault, branch condition on C side is true *) apply (prop_tac "ptBitsLeft_C ptSlot_ret @@ -1842,10 +1811,10 @@ lemma decodeRISCVFrameInvocation_ccorres: lookup_fault_missing_capability_new_'proc *) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def NonDetMonad.lift_def + apply (clarsimp simp: throwError_def return_def bindE_def Nondet_Monad.lift_def exception_defs lookup_fault_lift_invalid_root) apply (clarsimp simp: syscall_error_rel_def exception_defs syscall_error_to_H_def - syscall_error_type_defs false_def) + syscall_error_type_defs) apply (simp add: lookup_fault_missing_capability_lift) apply (subst word_le_mask_eq) apply (simp add: mask_def word_le_nat_alt) @@ -1882,9 +1851,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply clarsimp apply (erule cmap_relationE1[OF rf_sr_cpte_relation], erule ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps from_bool_eq_if from_bool_0) - apply (fastforce simp: cpte_relation_def Let_def - pte_lift_def - from_bool_def case_bool_If + apply (fastforce simp: cpte_relation_def Let_def pte_lift_def case_bool_If split: pte.split_asm if_splits) apply ceqv apply clarsimp @@ -1892,7 +1859,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* checks handled, perform frame map *) @@ -1959,7 +1926,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (clarsimp simp: isCap_simps not_None_def ccap_relation_FrameCap_MappedAddress ccap_relation_PageTableCap_MappedASID ccap_relation_FrameCap_MappedASID) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* ensure mapped address of frame matches *) apply csymbr @@ -1968,7 +1935,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* ensure lookupPTSlot returned a slot with a PTE *) @@ -1991,7 +1958,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* checks handled, perform frame remap *) @@ -2061,10 +2028,10 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule conseqPre, vcg) apply clarsimp apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply clarsimp apply (wp injection_wp[OF refl] findVSpaceForASID_inv hoare_drop_imps) apply clarsimp @@ -2104,7 +2071,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (prop_tac "SCAST(32 signed \ 64) ThreadState_Restart && mask 4 = SCAST(32 signed \ 64) ThreadState_Restart") - apply (solves \clarsimp simp: ThreadState_Restart_def mask_def\) + apply (solves \clarsimp simp: ThreadState_defs mask_def\) apply (rule conjI) (* RISCVPageMap, Haskell side *) @@ -2154,7 +2121,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (clarsimp simp: not_le rf_sr_ksCurThread isCap_simps) apply (prop_tac "SCAST(32 signed \ 64) ThreadState_Restart && mask 4 = SCAST(32 signed \ 64) ThreadState_Restart") - apply (solves \clarsimp simp: ThreadState_Restart_def mask_def\) + apply (solves \clarsimp\) apply (rule conjI, solves \simp add: word_less_nat_alt\) (* size args < 3 *) (* get a hold of our valid caps and resolve the C heap *) @@ -2183,7 +2150,6 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (prop_tac "(addrFromPPtr p >> 12) AND mask 44 = (addrFromPPtr p >> 12)") subgoal apply (frule cte_wp_at'_frame_at', fastforce) - apply (clarsimp simp: comp_def) apply (prop_tac "canonical_address p") apply (erule canonical_address_frame_at', fastforce) apply (prop_tac "p \ kernel_mappings") @@ -2231,7 +2197,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (match conclusion in \cpte_relation _ _\ \ \solves \simp (no_asm) add: cpte_relation_def, clarsimp simp: Let_def makeUserPTE_def attribsFromWord_def - pageBits_def + pageBits_def word_and_1 split: pte.splits if_splits\\ | match conclusion in \ccap_relation _ _\ \ \solves \simp (no_asm) add: ccap_relation_def, @@ -2442,14 +2408,13 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (cut_tac P="\y. y < i_' x + 1 = rhs y" for rhs in allI, rule less_x_plus_1) apply (fastforce simp: asid_high_bits_def) - apply (clarsimp simp: rf_sr_riscvKSASIDTable from_bool_def + apply (clarsimp simp: rf_sr_riscvKSASIDTable asid_high_bits_word_bits option_to_ptr_def option_to_0_def order_less_imp_le linorder_not_less order_antisym[OF inc_le]) - apply (clarsimp simp: true_def false_def - split: option.split if_split) + apply (clarsimp split: option.split if_split) apply (auto simp: asid_high_bits_def word_le_nat_alt word_less_nat_alt unat_add_lem[THEN iffD1] Kernel_C_defs)[1] @@ -2468,8 +2433,7 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (clarsimp simp: asidHighBits_handy_convs word_sle_def word_sless_def from_bool_0 rf_sr_riscvKSASIDTable[where n=0, simplified]) - apply (simp add: asid_high_bits_def option_to_ptr_def option_to_0_def - from_bool_def Kernel_C_defs + apply (simp add: asid_high_bits_def option_to_ptr_def option_to_0_def Kernel_C_defs split: option.split if_split) apply fastforce apply ceqv @@ -2549,7 +2513,7 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (rule_tac Q'=UNIV and A'="{}" in conseqPost) apply (vcg exspec=ensureEmptySlot_modifies) apply (frule length_ineq_not_Nil) - apply (clarsimp simp: null_def ThreadState_Restart_def mask_def hd_conv_nth + apply (clarsimp simp: null_def ThreadState_defs mask_def hd_conv_nth isCap_simps rf_sr_ksCurThread cap_get_tag_UntypedCap word_le_make_less asid_high_bits_def split: list.split) @@ -2645,11 +2609,10 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (clarsimp simp: excaps_in_mem_def) apply (frule (1) slotcap_in_mem_PageTable) apply (clarsimp simp: typ_heap_simps' from_bool_0 split: if_split) - apply (case_tac a; clarsimp simp: isCap_simps cap_get_tag_isCap_unfolded_H_cap - cap_tag_defs true_def) + apply (case_tac a; clarsimp simp: isCap_simps cap_get_tag_isCap_unfolded_H_cap cap_tag_defs) apply (intro conjI impI ; solves \clarsimp simp: isCap_simps asidInvalid_def cap_lift_page_table_cap cap_to_H_simps - true_def c_valid_cap_def cl_valid_cap_def + c_valid_cap_def cl_valid_cap_def ccap_relation_PageTableCap_IsMapped\) apply ceqv apply (rule ccorres_Cond_rhs_Seq) @@ -2694,8 +2657,7 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def - syscall_error_rel_def exception_defs - syscall_error_to_H_cases false_def) + syscall_error_rel_def exception_defs syscall_error_to_H_cases) apply (simp add: lookup_fault_lift_invalid_root) apply csymbr apply (simp add: liftME_def bindE_assoc if_to_top_of_bind) @@ -2745,9 +2707,7 @@ lemma decodeRISCVMMUInvocation_ccorres: = capASIDBase cp") apply (subgoal_tac "\x. (x < (i_' xb + 1)) = (x < i_' xb \ x = i_' xb)") - apply (clarsimp simp: inc_le from_bool_def typ_heap_simps - asid_low_bits_def not_less field_simps - false_def + apply (clarsimp simp: inc_le typ_heap_simps asid_low_bits_def not_less field_simps split: if_split bool.splits) apply unat_arith apply (rule iffI) @@ -2797,11 +2757,10 @@ lemma decodeRISCVMMUInvocation_ccorres: word_sless_def word_sle_def) apply (erule cmap_relationE1[OF rf_sr_cpspace_asidpool_relation], erule ko_at_projectKO_opt) - apply (clarsimp simp: typ_heap_simps from_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps split: if_split) apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (clarsimp simp: cap_lift_asid_pool_cap cap_to_H_def - cap_asid_pool_cap_lift_def false_def - ucast_minus ucast_nat_def + cap_asid_pool_cap_lift_def ucast_minus ucast_nat_def elim!: ccap_relationE) apply ceqv apply (simp add: if_to_top_of_bind) @@ -2856,7 +2815,7 @@ lemma decodeRISCVMMUInvocation_ccorres: (* Can't reach *) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (cases cp; simp add: isCap_simps) - apply (clarsimp simp: o_def) + apply clarsimp apply (rule conjI) (* PTCap *) apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule_tac t="cteCap cte" in sym) @@ -2874,13 +2833,14 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (rule conjI; clarsimp) apply (frule invs_arch_state') apply (rule conjI, clarsimp simp: valid_arch_state'_def valid_asid_table'_def) - apply (clarsimp simp: neq_Nil_conv excaps_map_def valid_tcb_state'_def invs_queues - invs_sch_act_wf' + apply (clarsimp simp: neq_Nil_conv excaps_map_def valid_tcb_state'_def invs_sch_act_wf' unat_lt2p[where 'a=machine_word_len, folded word_bits_def]) apply (frule interpret_excaps_eq[rule_format, where n=1], simp) apply (rule conjI; clarsimp)+ apply (rule conjI, erule ctes_of_valid', clarsimp) apply (intro conjI) + apply fastforce + apply fastforce apply fastforce apply (fastforce elim!: pred_tcb'_weakenE) apply (clarsimp simp: st_tcb_at'_def obj_at'_def) @@ -2897,9 +2857,11 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (clarsimp simp: le_mask_asid_bits_helper) apply (simp add: is_aligned_shiftl_self) (* RISCVASIDPoolAssign *) - apply (clarsimp simp: isCap_simps valid_tcb_state'_def invs_queues invs_sch_act_wf') + apply (clarsimp simp: isCap_simps valid_tcb_state'_def invs_sch_act_wf') apply (frule invs_arch_state', clarsimp) apply (intro conjI) + apply fastforce + apply fastforce apply fastforce apply (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) apply (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) @@ -2918,24 +2880,21 @@ lemma decodeRISCVMMUInvocation_ccorres: apply clarsimp apply (clarsimp simp: cte_wp_at_ctes_of asidHighBits_handy_convs word_sle_def word_sless_def asidLowBits_handy_convs - rf_sr_ksCurThread "StrictC'_thread_state_defs" - mask_def[where n=4] + rf_sr_ksCurThread ThreadState_defs mask_def[where n=4] cong: if_cong) - apply (clarsimp simp: to_bool_def ccap_relation_isDeviceCap2 objBits_simps - pageBits_def from_bool_def case_bool_If) + apply (clarsimp simp: ccap_relation_isDeviceCap2 objBits_simps pageBits_def case_bool_If) apply (rule conjI; clarsimp) apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def excaps_map_def) apply (frule interpret_excaps_eq[rule_format, where n=0], simp) apply (frule interpret_excaps_eq[rule_format, where n=1], simp) apply (clarsimp simp: mask_def[where n=4] slotcap_in_mem_def ccap_rights_relation_def rightsFromWord_wordFromRights) - apply (clarsimp simp: asid_high_bits_word_bits Kernel_C.asidHighBits_def true_def split: list.split_asm) + apply (clarsimp simp: asid_high_bits_word_bits Kernel_C.asidHighBits_def split: list.split_asm) apply (clarsimp simp: cap_untyped_cap_lift_def cap_lift_untyped_cap cap_to_H_def[split_simps cap_CL.split] hd_conv_nth length_ineq_not_Nil Kernel_C_defs elim!: ccap_relationE) - apply (clarsimp simp: to_bool_def unat_eq_of_nat - objBits_simps pageBits_def from_bool_def case_bool_If + apply (clarsimp simp: to_bool_def unat_eq_of_nat objBits_simps pageBits_def case_bool_If split: if_splits) apply (clarsimp simp: asid_low_bits_word_bits isCap_simps neq_Nil_conv excaps_map_def excaps_in_mem_def @@ -2946,23 +2905,21 @@ lemma decodeRISCVMMUInvocation_ccorres: apply (frule interpret_excaps_eq[rule_format, where n=0], simp) apply (rule conjI) apply (clarsimp simp: cap_lift_asid_pool_cap cap_lift_page_table_cap - cap_to_H_def to_bool_def valid_cap'_def + cap_to_H_def valid_cap'_def cap_page_table_cap_lift_def inv_ASIDPool - cap_asid_pool_cap_lift_def mask_def true_def + cap_asid_pool_cap_lift_def mask_def asid_shiftr_low_bits_less[unfolded mask_def asid_bits_def] word_and_le1 elim!: ccap_relationE split: if_split_asm asidpool.splits) apply (clarsimp split: list.split) apply (clarsimp simp: casid_pool_relation_def) apply (case_tac asidpool', simp) apply (clarsimp simp: cap_lift_asid_pool_cap cap_lift_page_table_cap - cap_to_H_def to_bool_def - cap_page_table_cap_lift_def + cap_to_H_def cap_page_table_cap_lift_def elim!: ccap_relationE split: if_split_asm) apply (erule rf_sr_cte_at_validD[rotated]) apply (fastforce simp: slotcap_in_mem_def2) done - lemma setMessageInfo_ksCurThread_ccorres: "ccorres dc xfdc (tcb_at' thread and (\s. ksCurThread s = thread)) (UNIV \ \mi = message_info_to_H mi'\) hs diff --git a/proof/crefine/RISCV64/CLevityCatch.thy b/proof/crefine/RISCV64/CLevityCatch.thy index 1de20de454..d51e5224f8 100644 --- a/proof/crefine/RISCV64/CLevityCatch.thy +++ b/proof/crefine/RISCV64/CLevityCatch.thy @@ -8,8 +8,9 @@ theory CLevityCatch imports "CBaseRefine.Include_C" ArchMove_C - "CLib.LemmaBucket_C" + "CParser.LemmaBucket_C" "Lib.LemmaBucket" + Boolean_C begin context begin interpretation Arch . (*FIXME: arch_split*) @@ -65,12 +66,12 @@ lemma empty_fail_getExtraCPtrs [intro!, simp]: "empty_fail (getExtraCPtrs sendBuffer info)" apply (simp add: getExtraCPtrs_def) apply (cases info, simp) - apply (cases sendBuffer, simp_all) + apply (cases sendBuffer; fastforce) done lemma empty_fail_loadCapTransfer [intro!, simp]: "empty_fail (loadCapTransfer a)" - by (simp add: loadCapTransfer_def capTransferFromWords_def) + by (fastforce simp: loadCapTransfer_def capTransferFromWords_def) lemma empty_fail_emptyOnFailure [intro!, simp]: "empty_fail m \ empty_fail (emptyOnFailure m)" @@ -90,7 +91,7 @@ lemma asUser_get_registers: apply (simp add: mapM_empty asUser_return) apply wp apply simp - apply (simp add: mapM_Cons asUser_bind_distrib asUser_return) + apply (simp add: mapM_Cons asUser_bind_distrib asUser_return empty_fail_cond) apply wp apply simp apply (rule hoare_strengthen_post) diff --git a/proof/crefine/RISCV64/CSpaceAcc_C.thy b/proof/crefine/RISCV64/CSpaceAcc_C.thy index 1bf605bbda..d177cad8d1 100644 --- a/proof/crefine/RISCV64/CSpaceAcc_C.thy +++ b/proof/crefine/RISCV64/CSpaceAcc_C.thy @@ -270,7 +270,7 @@ lemma array_assertion_abs_cnode_ctes: apply (metis array_assertion_shrink_right) done -lemmas ccorres_move_array_assertion_cnode_ctes [corres_pre] +lemmas ccorres_move_array_assertion_cnode_ctes [ccorres_pre] = ccorres_move_Guard_Seq [OF array_assertion_abs_cnode_ctes] ccorres_move_Guard [OF array_assertion_abs_cnode_ctes] diff --git a/proof/crefine/RISCV64/CSpace_All.thy b/proof/crefine/RISCV64/CSpace_All.thy index 603b77cb17..5054835fd9 100644 --- a/proof/crefine/RISCV64/CSpace_All.thy +++ b/proof/crefine/RISCV64/CSpace_All.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -24,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf @@ -253,8 +254,7 @@ lemma lookupSlotForCNodeOp_ccorres': apply vcg \ \last subgoal\ - apply (clarsimp simp: if_1_0_0 to_bool_def true_def word_size - fromIntegral_def integral_inv) + apply (clarsimp simp: word_size fromIntegral_def integral_inv) apply (case_tac "cap_get_tag root = scast cap_cnode_cap") prefer 2 apply clarsimp apply (clarsimp simp: unat_of_nat64 word_sle_def) @@ -290,7 +290,7 @@ lemma lookupSourceSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupSourceSlot_ccorres: @@ -320,7 +320,7 @@ lemma lookupTargetSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupTargetSlot_ccorres: @@ -350,7 +350,7 @@ lemma lookupPivotSlot_ccorres: apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres) - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done end diff --git a/proof/crefine/RISCV64/CSpace_C.thy b/proof/crefine/RISCV64/CSpace_C.thy index 6fc0a23ab2..aca185a59c 100644 --- a/proof/crefine/RISCV64/CSpace_C.thy +++ b/proof/crefine/RISCV64/CSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -111,10 +112,6 @@ lemma Arch_maskCapRights_ccorres [corres]: apply (cases arch_cap) by (fastforce simp add: cap_get_tag_isCap isCap_simps simp del: not_ex simp_thms(44))+ -lemma to_bool_mask_to_bool_bf: - "to_bool (x && 1) = to_bool_bf (x::machine_word)" - by (simp add: to_bool_bf_def to_bool_def) - lemma to_bool_cap_rights_bf: "to_bool (capAllowRead_CL (seL4_CapRights_lift R)) = to_bool_bf (capAllowRead_CL (seL4_CapRights_lift R))" @@ -175,7 +172,7 @@ lemma maskCapRights_ccorres [corres]: apply csymbr apply (simp add: maskCapRights_cap_cases cap_get_tag_isCap del: Collect_const) apply wpc - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -185,7 +182,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -194,7 +191,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -220,7 +217,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ntfn_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -228,7 +225,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -256,7 +253,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ep_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -265,7 +262,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -273,7 +270,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply (subst bind_return [symmetric]) apply (rule ccorres_split_throws) apply ctac @@ -286,7 +283,7 @@ lemma maskCapRights_ccorres [corres]: apply wp apply vcg apply vcg - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply ccorres_rewrite @@ -306,7 +303,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_reply_cap_bf to_bool_mask_to_bool_bf[simplified] to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -315,7 +312,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -325,7 +322,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -482,7 +479,7 @@ lemma Arch_isCapRevocable_spec: {t. \c c'. ccap_relation c (derivedCap_' s) \ ccap_relation c' (srcCap_' s) \ ret__unsigned_long_' t = from_bool (Arch.isCapRevocable c c')}" apply vcg - by (auto simp: false_def from_bool_def RISCV64_H.isCapRevocable_def + by (auto simp: RISCV64_H.isCapRevocable_def cap_get_tag_isCap_unfolded_H_cap cap_tag_defs isCap_simps cap_get_tag_isCap[unfolded, simplified] split: capability.splits arch_capability.splits bool.splits) @@ -492,7 +489,7 @@ lemmas isCapRevocable_simps[simp] = Retype_H.isCapRevocable_def[split_simps capa context begin (* revokable_ccorres *) private method revokable'_hammer = solves \( - simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def, + simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs, rule ccorres_guard_imp, rule ccorres_return_C; clarsimp)\ @@ -521,7 +518,7 @@ lemma revokable_ccorres: \ \Uninteresting caps\ apply revokable'_hammer+ \ \NotificationCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_NotificationCap[THEN iffD1]) @@ -530,12 +527,12 @@ lemma revokable_ccorres: apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \IRQHandlerCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \EndpointCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_EndpointCap[THEN iffD1]) @@ -749,7 +746,7 @@ lemma update_freeIndex': show ?thesis apply (cinit lift: cap_ptr_' v64_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -871,7 +868,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -897,14 +894,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1020,19 +1017,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -1169,7 +1164,7 @@ lemma cteMove_ccorres: apply (clarsimp simp: cte_wp_at_ctes_of cteSizeBits_eq ctes_of_canonical ctes_of_aligned_bits) apply assumption apply (clarsimp simp: ccap_relation_NullCap_iff cmdbnode_relation_def - mdb_node_to_H_def nullMDBNode_def false_def) + mdb_node_to_H_def nullMDBNode_def) done (************************************************************************) @@ -1495,8 +1490,8 @@ lemma emptySlot_helper: mdbFirstBadged_CL (cteMDBNode_CL y)") prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) - subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - subgoal by (simp add: to_bool_def mask_def) + subgoal by (simp add: mdb_node_lift_def word_bw_assocs) + subgoal by (simp add: to_bool_def) \ \\ \x\fst \\ apply clarsimp apply (rule fst_setCTE [OF ctes_of_cte_at], assumption ) @@ -1526,7 +1521,7 @@ lemma emptySlot_helper: prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - apply (simp add: to_bool_def mask_def split: if_split) + apply (simp add: to_bool_def split: if_split) \ \trivial case where mdbNext rva = 0\ apply (simp add:ccorres_cond_empty_iff) @@ -1677,7 +1672,6 @@ lemma setIRQState_ccorres: apply wp apply (simp add: empty_fail_def getInterruptState_def simpler_gets_def) apply clarsimp - apply (simp add: from_bool_def) apply (cases irqState, simp_all) apply (simp add: Kernel_C.IRQSignal_def Kernel_C.IRQInactive_def) apply (simp add: Kernel_C.IRQTimer_def Kernel_C.IRQInactive_def) @@ -2215,7 +2209,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -2226,9 +2219,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2277,7 +2270,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2322,7 +2315,7 @@ lemma emptySlot_ccorres: \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2332,7 +2325,7 @@ lemma emptySlot_ccorres: \ \Haskell pre/post for y \ updateCap slot capability.NullCap;\ apply wp \ \C pre/post for y \ updateCap slot capability.NullCap;\ - apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def false_def) + apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) \ \Haskell pre/post for the two nested updates\ apply wp \ \C pre/post for the two nested updates\ @@ -2395,8 +2388,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2405,7 +2398,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ @@ -2487,7 +2480,7 @@ lemma Arch_sameRegionAs_spec: apply (cases capa; cases capb; frule (1) cap_get_tag[where cap'=cap_a]; (frule cap_lifts[where c=cap_a, THEN iffD1])?; frule (1) cap_get_tag[where cap'=cap_b]; (frule cap_lifts[where c=cap_b, THEN iffD1])?; - simp add: cap_tag_defs isCap_simps from_bool_def true_def false_def if_0_1_eq; + simp add: cap_tag_defs isCap_simps from_bool_def if_0_1_eq; clarsimp simp: ccap_relation_def cap_to_H_def c_valid_cap_def cl_valid_cap_def Let_def) by (clarsimp simp: cap_frame_cap_lift_def'[simplified cap_tag_defs] framesize_to_H_def pageBitsForSize_def field_simps @@ -2717,11 +2710,10 @@ lemma cap_get_capIsPhysical_spec: cap_lift_asid_control_cap word_sle_def cap_lift_irq_control_cap cap_lift_null_cap mask_def objBits_simps cap_lift_domain_cap - ptr_add_assertion_positive from_bool_def - true_def false_def cap_get_tag_scast + ptr_add_assertion_positive cap_get_tag_scast dest!: sym [where t = "ucast (cap_get_tag cap)" for cap] split: vmpage_size.splits)+ - by (fastforce dest!: cap_lift_Some_CapD split: option.split cap_CL.split) + by (fastforce dest!: cap_lift_Some_CapD split: option.splits cap_CL.splits) lemma ccap_relation_get_capPtr_not_physical: "\ ccap_relation hcap ccap; capClass hcap \ PhysicalClass \ \ @@ -2800,39 +2792,40 @@ lemma sameRegionAs_spec: apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps) \ \capa is a ThreadCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(1)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(1)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_thread_cap_lift) apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split) apply (clarsimp simp: case_bool_If ctcb_ptr_to_tcb_ptr_def if_distrib cong: if_cong) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a NullCap\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an NotificationCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(3)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(3)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_notification_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an IRQHandlerCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(5)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(5)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_irq_handler_cap_lift) apply (simp add: cap_to_H_def) apply (clarsimp simp: up_ucast_inj_eq c_valid_cap_def ucast_eq_mask - cl_valid_cap_def mask_twice + cl_valid_cap_def mask_twice from_bool_0 split: if_split bool.split | intro impI conjI | simp) @@ -2840,34 +2833,34 @@ lemma sameRegionAs_spec: apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an EndpointCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(4)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(4)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_endpoint_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a DomainCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) \ \capa is a Zombie\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an Arch object cap\ apply (frule_tac cap'=cap_a in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) apply (rule conjI, clarsimp, rule impI)+ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] \ \capb is an Arch object cap\ apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) \ \capa is a ReplyCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(8)) @@ -2875,7 +2868,7 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_reply_cap_lift) apply (simp add: cap_to_H_def ctcb_ptr_to_tcb_ptr_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) \ \capa is an UntypedCap\ apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(9)) apply (intro conjI) @@ -2883,8 +2876,7 @@ lemma sameRegionAs_spec: apply (rule impI, drule(1) cap_get_tag_to_H)+ apply (clarsimp simp: capAligned_def word_bits_conv objBits_simps' get_capZombieBits_CL_def - Let_def word_less_nat_alt - less_mask_eq true_def + Let_def word_less_nat_alt less_mask_eq split: if_split_asm) apply (subgoal_tac "capBlockSize_CL (cap_untyped_cap_lift cap_a) \ 0x3F") apply (simp add: word_le_make_less) @@ -2905,10 +2897,9 @@ lemma sameRegionAs_spec: cap_untyped_cap_lift cap_to_H_def field_simps valid_cap'_def)+)[4] apply (rule impI, simp add: from_bool_0 ccap_relation_get_capIsPhysical[symmetric]) - apply (simp add: from_bool_def false_def) \ \capa is a CNodeCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(10)) @@ -2916,10 +2907,9 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_cnode_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split bool.split) + apply (clarsimp simp: from_bool_0 split: if_split bool.split) \ \capa is an IRQControlCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) done @@ -2954,7 +2944,7 @@ lemma ccap_relation_FrameCap_IsDevice: apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_lift_def cap_lift_defs cap_tag_defs Let_def) apply (thin_tac _)+ - by (clarsimp simp: to_bool_def mask_def word_and_1 split: if_splits) + by (clarsimp simp: word_and_1 split: if_splits) lemma ccap_relation_FrameCap_Size: "ccap_relation (ArchObjectCap (FrameCap p r s d m)) ccap @@ -3013,16 +3003,16 @@ lemma Arch_sameObjectAs_spec: apply (cases capa) apply (all \frule (1) cap_get_tag[where cap'=cap_a]\) apply (all \(frule cap_lifts[where c=cap_a, THEN iffD1])?\) - apply (all \clarsimp simp: cap_tag_defs isCap_simps from_bool_def true_def false_def if_0_1_eq - split: if_splits\) + apply (all \clarsimp simp: cap_tag_defs isCap_simps + split: if_splits\) apply (all \fastforce?\) (* frames remain. *) apply (all \cases capb\) apply (all \frule (1) cap_get_tag[where cap'=cap_b]\) apply (all \(frule cap_lifts[where c=cap_b, THEN iffD1])?\) - apply (all \clarsimp simp: cap_tag_defs isCap_simps from_bool_def true_def false_def if_0_1_eq - ccap_relation_FrameCap_fields framesize_from_H_eq capAligned_def - split: if_splits\) + apply (all \clarsimp simp: cap_tag_defs isCap_simps ccap_relation_FrameCap_fields + framesize_from_H_eq capAligned_def + split: if_splits\) by (all \(fastforce simp: RISCV64_H.sameRegionAs_def isCap_simps is_aligned_no_overflow_mask)?\) done qed @@ -3036,8 +3026,7 @@ lemma sameObjectAs_spec: apply vcg apply (clarsimp simp: sameObjectAs_def isArchCap_tag_def2) apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs - from_bool_def false_def) + isCap_simps cap_tag_defs) apply fastforce+ \ \capa is an arch cap\ apply (frule cap_get_tag_isArchCap_unfolded_H_cap) @@ -3118,7 +3107,7 @@ lemma isMDBParentOf_spec: apply (simp add: ccte_relation_def map_option_case) apply (simp add: cte_lift_def) apply (clarsimp simp: cte_to_H_def mdb_node_to_H_def split: option.split_asm) - apply (clarsimp simp: Let_def false_def from_bool_def to_bool_def + apply (clarsimp simp: Let_def to_bool_def split: if_split bool.splits) apply ((clarsimp simp: typ_heap_simps dest!: lift_t_g)+)[3] apply (rule_tac x="cteCap ctea" in exI, rule conjI) @@ -3135,11 +3124,11 @@ lemma isMDBParentOf_spec: apply (rule conjI) \ \sameRegionAs = 0\ apply (rule impI) - apply (clarsimp simp: from_bool_def false_def + apply (clarsimp simp: from_bool_def split: if_split bool.splits) \ \sameRegionAs \ 0\ - apply (clarsimp simp: from_bool_def false_def) + apply (clarsimp simp: from_bool_def) apply (clarsimp cong:bool.case_cong if_cong simp: typ_heap_simps) apply (rule conjI) @@ -3147,8 +3136,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_EndpointCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_endpoint_cap") \ \needed also after\ prefer 2 @@ -3163,8 +3151,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_NotificationCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_notification_cap") \ \needed also after\ prefer 2 @@ -3180,7 +3167,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (simp add: to_bool_def) apply (subgoal_tac "(\ (isEndpointCap (cap_to_H x2b))) \ ( \ (isNotificationCap (cap_to_H x2b)))") - apply (clarsimp simp: true_def) + apply clarsimp apply (clarsimp simp: cap_get_tag_isCap [symmetric]) done @@ -3196,7 +3183,7 @@ lemma updateCapData_spec: apply (simp add: updateCapData_def) apply (case_tac cap, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps from_bool_def isArchCap_tag_def2 cap_tag_defs Let_def) + isCap_simps isArchCap_tag_def2 cap_tag_defs Let_def) \ \NotificationCap\ apply clarsimp apply (frule cap_get_tag_isCap_unfolded_H_cap(3)) @@ -3322,7 +3309,6 @@ lemma ensureNoChildren_ccorres: apply (rule conjI) \ \isMDBParentOf is not zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) @@ -3333,7 +3319,6 @@ lemma ensureNoChildren_ccorres: apply (simp add: syscall_error_to_H_cases(9)) \ \isMDBParentOf is zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) apply (simp add: split_paired_Bex) @@ -3417,7 +3402,7 @@ lemma deriveCap_ccorres': apply csymbr apply (fold case_bool_If) apply wpc - apply (clarsimp simp: cap_get_tag_isCap isCap_simps from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply csymbr apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws [where P=\ and P' = UNIV]) @@ -3426,7 +3411,7 @@ lemma deriveCap_ccorres': apply vcg apply clarsimp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3434,7 +3419,7 @@ lemma deriveCap_ccorres': apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_rhs_assoc)+ @@ -3457,7 +3442,7 @@ lemma deriveCap_ccorres': errstate_def) apply wp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3471,7 +3456,7 @@ lemma deriveCap_ccorres': apply vcg apply (clarsimp simp: cap_get_tag_isCap liftME_def Let_def isArchCap_T_isArchObjectCap - ccorres_cond_univ_iff from_bool_def) + ccorres_cond_univ_iff) apply (rule ccorres_add_returnOk) apply (rule ccorres_split_nothrow_call_novcgE [where xf'=ret__struct_deriveCap_ret_C_']) @@ -3489,7 +3474,7 @@ lemma deriveCap_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def throwError_def) apply wp - apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap from_bool_def) + apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap) apply csymbr apply (simp add: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3500,7 +3485,6 @@ lemma deriveCap_ccorres': cap_get_tag_isArchCap_unfolded_H_cap) done - lemma deriveCap_ccorres: "ccorres (syscall_error_rel \ ccap_relation) deriveCap_xf (invs') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. slot_' s = Ptr slot}) [] diff --git a/proof/crefine/RISCV64/CSpace_RAB_C.thy b/proof/crefine/RISCV64/CSpace_RAB_C.thy index b31bf1d95b..7da90c1b6b 100644 --- a/proof/crefine/RISCV64/CSpace_RAB_C.thy +++ b/proof/crefine/RISCV64/CSpace_RAB_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -53,7 +54,7 @@ lemma ccorres_remove_bind_returnOk_noguard: apply clarsimp apply (drule not_snd_bindE_I1) apply (erule (4) ccorresE[OF ac]) - apply (clarsimp simp add: bindE_def returnOk_def NonDetMonad.lift_def bind_def return_def + apply (clarsimp simp add: bindE_def returnOk_def Nondet_Monad.lift_def bind_def return_def split_def) apply (rule bexI [rotated], assumption) apply (simp add: throwError_def return_def unif_rrel_def @@ -140,7 +141,8 @@ lemma ccorres_locateSlotCap_push: apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) apply (rule monadic_rewrite_transverse) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_stateAssert) + apply (rule monadic_rewrite_stateAssert[where f="return", simplified]) + apply (rule monadic_rewrite_refl) apply simp apply (rule monadic_rewrite_refl) apply assumption @@ -207,10 +209,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned_longlong = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer @@ -222,7 +222,7 @@ next apply (vcg strip_guards=true) \ \takes a while\ apply clarsimp apply simp - apply (clarsimp simp: cap_get_tag_isCap to_bool_def) + apply (clarsimp simp: cap_get_tag_isCap) \ \Main thm\ proof (induct cap' cptr' guard' rule: resolveAddressBits.induct [case_names ind]) case (ind cap cptr guard) @@ -563,8 +563,8 @@ lemma rightsFromWord_spec: \seL4_CapRights_lift \ret__struct_seL4_CapRights_C = cap_rights_from_word_canon \<^bsup>s\<^esup>w \" apply vcg apply (simp add: seL4_CapRights_lift_def nth_shiftr mask_shift_simps nth_shiftr - cap_rights_from_word_canon_def from_bool_def word_and_1 eval_nat_numeral - word_sless_def word_sle_def) + cap_rights_from_word_canon_def word_and_1 eval_nat_numeral + word_sless_def word_sle_def) done @@ -579,12 +579,6 @@ lemma cap_rights_to_H_from_word_canon [simp]: apply (simp add: cap_rights_to_H_def) done -(* MOVE *) -lemma to_bool_false [simp]: - "to_bool false = False" - unfolding to_bool_def false_def - by simp - lemma tcb_ptr_to_ctcb_ptr_mask [simp]: assumes tcbat: "tcb_at' thread s" shows "ptr_val (tcb_ptr_to_ctcb_ptr thread) && ~~ mask tcbBlockSizeBits = thread" diff --git a/proof/crefine/RISCV64/Ctac_lemmas_C.thy b/proof/crefine/RISCV64/Ctac_lemmas_C.thy index f17584a573..f7875d0c76 100644 --- a/proof/crefine/RISCV64/Ctac_lemmas_C.thy +++ b/proof/crefine/RISCV64/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -96,7 +96,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -119,7 +119,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/RISCV64/Delete_C.thy b/proof/crefine/RISCV64/Delete_C.thy index 60e6406f17..876fb99dd4 100644 --- a/proof/crefine/RISCV64/Delete_C.thy +++ b/proof/crefine/RISCV64/Delete_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -147,7 +148,7 @@ lemma capRemovable_spec: supply if_cong[cong] apply vcg apply (clarsimp simp: cap_get_tag_isCap(1-8)[THEN trans[OF eq_commute]]) - apply (simp add: capRemovable_def from_bool_def[where b=True] true_def) + apply (simp add: capRemovable_def) apply (clarsimp simp: ccap_zombie_radix_less4) apply (subst eq_commute, subst from_bool_eq_if) apply (rule exI, rule conjI, assumption) @@ -228,7 +229,7 @@ lemma cteDelete_ccorres1: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply wp - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R) apply (wp cutMon_validE_drop finaliseSlot_invs) apply fastforce apply (auto simp: cintr_def) @@ -305,7 +306,7 @@ lemma cteDelete_invs'': "\invs' and sch_act_simple and (\s. ex \ ex_cte_cap_to' ptr s)\ cteDelete ptr ex \\rv. invs'\" apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -641,14 +642,14 @@ lemma reduceZombie_ccorres1: apply (clarsimp simp: throwError_def return_def cintr_def) apply vcg apply (wp cutMon_validE_drop) - apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_strengthen_postE_R) apply (wp cteDelete_invs'') apply (clarsimp simp: cte_wp_at_ctes_of) apply (fastforce dest: ctes_of_valid') apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (simp add: guard_is_UNIV_def Collect_const_mem) - apply (clarsimp simp: from_bool_def false_def isCap_simps size_of_def cte_level_bits_def) + apply (clarsimp simp: isCap_simps size_of_def cte_level_bits_def) apply (simp only: word_bits_def unat_of_nat unat_arith_simps, simp) apply (simp add: guard_is_UNIV_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -730,8 +731,7 @@ lemma finaliseSlot_ccorres: apply (rule ccorres_drop_cutMon) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def ccap_relation_NullCap_iff) + apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply (simp add: Collect_True liftE_bindE split_def ccorres_cond_iffs cutMon_walk_bind del: Collect_const cong: call_ignore_cong) @@ -768,8 +768,7 @@ lemma finaliseSlot_ccorres: apply (rule_tac P="\s. cleanup_info_wf' (snd rvb)" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def) + apply (clarsimp simp: returnOk_def return_def) apply (clarsimp simp: cleanup_info_wf'_def arch_cleanup_info_wf'_def split: if_split capability.splits) apply vcg @@ -806,11 +805,11 @@ lemma finaliseSlot_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) apply (drule use_valid [OF _ finaliseCap_cases, OF _ TrueI]) - apply (simp add: from_bool_def false_def irq_opt_relation_def true_def + apply (simp add: irq_opt_relation_def split: if_split_asm) apply vcg apply wp - apply (simp add: guard_is_UNIV_def true_def) + apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) apply (simp only: liftE_bindE cutMon_walk_bind Let_def @@ -835,7 +834,6 @@ lemma finaliseSlot_ccorres: in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (simp add: from_bool_def false_def) apply fastforce apply ceqv apply (simp only: from_bool_0 simp_thms Collect_False @@ -858,7 +856,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -880,7 +878,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -904,11 +902,11 @@ lemma finaliseSlot_ccorres: simp: isCap_simps final_matters'_def o_def) apply clarsimp apply (frule valid_globals_cte_wpD'[rotated], clarsimp) - apply (clarsimp simp: cte_wp_at_ctes_of false_def from_bool_def) + apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -1003,26 +1001,23 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR) apply simp apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) @@ -1030,9 +1025,8 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) - apply (simp add: guard_is_UNIV_def from_bool_def true_def cintr_def - Collect_const_mem exception_defs) - apply (simp add: guard_is_UNIV_def from_bool_def true_def) + apply (simp add: guard_is_UNIV_def cintr_def Collect_const_mem exception_defs) + apply (simp add: guard_is_UNIV_def) apply (rule getCTE_wp) apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def) apply (drule invs_mdb') diff --git a/proof/crefine/RISCV64/Detype_C.thy b/proof/crefine/RISCV64/Detype_C.thy index c07de07ab7..7e2c7a19ee 100644 --- a/proof/crefine/RISCV64/Detype_C.thy +++ b/proof/crefine/RISCV64/Detype_C.thy @@ -122,16 +122,6 @@ lemma h_t_valid_typ_region_bytes: by (simp add: valid_footprint_typ_region_bytes[OF neq_byte] size_of_def) -lemma proj_d_lift_state_hrs_htd_update [simp]: - "proj_d (lift_state (hrs_htd_update f hp)) = f (hrs_htd hp)" - by (cases hp) (simp add: hrs_htd_update_def proj_d_lift_state hrs_htd_def) - -lemma proj_d_lift_state_hrs_htd [simp]: - "proj_d (lift_state hp), g \\<^sub>t x = hrs_htd hp, g \\<^sub>t x" - apply (cases hp) - apply (simp add: proj_d_lift_state hrs_htd_def) - done - lemma heap_list_s_heap_list': fixes p :: "'a :: c_type ptr" shows "hrs_htd hp,\ \\<^sub>t p \ @@ -1486,14 +1476,6 @@ lemma map_comp_restrict_map: "(f \\<^sub>m (restrict_map m S)) = (restrict_map (f \\<^sub>m m) S)" by (rule ext, simp add: restrict_map_def map_comp_def) -lemma size_td_uinfo_array_tag_n_m[simp]: - "size_td (uinfo_array_tag_n_m (ta :: ('a :: c_type) itself) n m) - = size_of (TYPE('a)) * n" - apply (induct n) - apply (simp add: uinfo_array_tag_n_m_def) - apply (simp add: uinfo_array_tag_n_m_def size_of_def) - done - lemma modify_machinestate_assert_cnodes_swap: "do x \ modify (ksMachineState_update f); y \ stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) S) []; g od @@ -1562,13 +1544,13 @@ lemma deleteObjects_ccorres': doMachineOp_modify modify_modify o_def ksPSpace_ksMSu_comm bind_assoc modify_machinestate_assert_cnodes_swap modify_modify_bind) - apply (rule ccorres_stateAssert_fwd) + apply (rule ccorres_stateAssert_fwd)+ apply (rule ccorres_stateAssert_after) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - @@ -1698,35 +1680,11 @@ proof - done moreover - from invs have "valid_queues s" .. - hence "\p. \t \ set (ksReadyQueues s p). tcb_at' t s \ ko_wp_at' live' t s" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec, drule spec) - apply clarsimp - apply (drule (1) bspec) - apply (rule conjI) - apply (erule obj_at'_weakenE) - apply simp - apply (simp add: obj_at'_real_def) - apply (erule ko_wp_at'_weakenE) - apply (clarsimp simp: inQ_def) - done - hence tat: "\p. \t \ set (ksReadyQueues s p). tcb_at' t s" - and tlive: "\p. \t \ set (ksReadyQueues s p). ko_wp_at' live' t s" - by auto from sr have - "cready_queues_relation (clift ?th_s) - (ksReadyQueues_' (globals s')) (ksReadyQueues s)" + "cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' (globals s'))" unfolding cready_queues_relation_def rf_sr_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp) - apply fastforce - apply ((subst lift_t_typ_region_bytes, rule cm_disj_tcb, assumption+, - simp_all add: objBits_simps pageBits_def)[1])+ - \ \waiting ...\ - apply (simp add: tcb_queue_relation_live_restrict - [OF D.valid_untyped tat tlive rl]) done moreover diff --git a/proof/crefine/RISCV64/Finalise_C.thy b/proof/crefine/RISCV64/Finalise_C.thy index 8e5e172513..cd38ceecaa 100644 --- a/proof/crefine/RISCV64/Finalise_C.thy +++ b/proof/crefine/RISCV64/Finalise_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -34,6 +35,108 @@ declare if_split [split del] definition "option_map2 f m = option_map f \ m" +definition ksReadyQueues_head_end_2 :: "(domain \ priority \ ready_queue) \ bool" where + "ksReadyQueues_head_end_2 qs \ + \d p. tcbQueueHead (qs (d, p)) \ None \ tcbQueueEnd (qs (d, p)) \ None" + +abbreviation "ksReadyQueues_head_end s \ ksReadyQueues_head_end_2 (ksReadyQueues s)" + +lemmas ksReadyQueues_head_end_def = ksReadyQueues_head_end_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end: + "ksReadyQueues_asrt s \ ksReadyQueues_head_end s" + by (fastforce dest: tcbQueueHead_iff_tcbQueueEnd + simp: ready_queue_relation_def ksReadyQueues_asrt_def ksReadyQueues_head_end_def) + +lemma tcbSchedEnqueue_ksReadyQueues_head_end[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: tcbQueueEmpty_def obj_at'_def ksReadyQueues_head_end_def split: if_splits) + done + +lemma ksReadyQueues_head_end_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end (s\ksSchedulerAction := ChooseNewThread\) = ksReadyQueues_head_end s" + by (simp add: ksReadyQueues_head_end_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + +lemma setThreadState_ksReadyQueues_head_end[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end\" + unfolding setThreadState_def + by (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + +definition ksReadyQueues_head_end_tcb_at'_2 :: + "(domain \ priority \ ready_queue) \ (obj_ref \ tcb) \ bool" where + "ksReadyQueues_head_end_tcb_at'_2 qs tcbs \ + \d p. (\head. tcbQueueHead (qs (d, p)) = Some head \ tcbs head \ None) + \ (\end. tcbQueueEnd (qs (d, p)) = Some end \ tcbs end \ None)" + +abbreviation "ksReadyQueues_head_end_tcb_at' s \ + ksReadyQueues_head_end_tcb_at'_2 (ksReadyQueues s) (tcbs_of' s)" + +lemmas ksReadyQueues_head_end_tcb_at'_def = ksReadyQueues_head_end_tcb_at'_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at': + "\ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ ksReadyQueues_head_end_tcb_at' s" + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def + ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI allI) + apply (case_tac "ts = []", clarsimp) + apply (fastforce dest!: heap_path_head hd_in_set + simp: opt_pred_def tcbQueueEmpty_def split: option.splits) + apply (fastforce simp: queue_end_valid_def opt_pred_def tcbQueueEmpty_def + split: option.splits) + done + +lemma tcbSchedEnqueue_ksReadyQueues_head_end_tcb_at'[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma ksReadyQueues_head_end_tcb_at'_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end_tcb_at' (s\ksSchedulerAction := ChooseNewThread\) + = ksReadyQueues_head_end_tcb_at' s" + by (simp add: ksReadyQueues_head_end_tcb_at'_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + +lemma setThreadState_ksReadyQueues_head_end_tcb_at'[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma head_end_ksReadyQueues_': + "\ (s, s') \ rf_sr; ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s; + d \ maxDomain; p \ maxPriority \ + \ head_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL + \ end_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL" + apply (frule (2) rf_sr_ctcb_queue_relation[where d=d and p=p]) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: option.splits) + apply (rename_tac "end" head end_tcb head_tcb) + apply (prop_tac "tcb_at' head s \ tcb_at' end s") + apply (fastforce intro!: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (fastforce dest: tcb_at_not_NULL) + done + lemma tcbSchedEnqueue_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. \d v. option_map2 tcbPriority_C (cslift s) \tcb = Some v \ unat v \ numPriorities @@ -45,7 +148,9 @@ lemma tcbSchedEnqueue_cslift_spec: \ None \ option_map2 tcbDomain_C (cslift s) (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) - \ None)\ + \ None) + \ (head_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL + \ end_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL)\ Call tcbSchedEnqueue_'proc {s'. option_map2 tcbEPNext_C (cslift s') = option_map2 tcbEPNext_C (cslift s) \ option_map2 tcbEPPrev_C (cslift s') = option_map2 tcbEPPrev_C (cslift s) @@ -62,8 +167,8 @@ lemma tcbSchedEnqueue_cslift_spec: apply (rule conjI) apply (clarsimp simp: typ_heap_simps cong: if_cong) apply (simp split: if_split) - apply (clarsimp simp: typ_heap_simps if_Some_helper cong: if_cong) - by (simp split: if_split) + by (auto simp: typ_heap_simps' if_Some_helper numPriorities_def + cong: if_cong split: if_splits) lemma setThreadState_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. s \\<^sub>c \tptr \ (\x. ksSchedulerAction_' (globals s) = tcb_Ptr x @@ -159,8 +264,9 @@ lemma ctcb_relation_tcbPriority_maxPriority_numPriorities: done lemma tcbSchedEnqueue_cslift_precond_discharge: - "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; - valid_queues s; valid_objs' s \ \ + "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; valid_objs' s ; + ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s\ \ (\d v. option_map2 tcbPriority_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some v \ unat v < numPriorities \ option_map2 tcbDomain_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some d @@ -171,31 +277,49 @@ lemma tcbSchedEnqueue_cslift_precond_discharge: \ None \ option_map2 tcbDomain_C (cslift s') (head_C (index (ksReadyQueues_' (globals s')) (unat (d*0x100 + v)))) - \ None))" + \ None) + \ (head_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL + \ end_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL))" apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps' option_map2_def) + apply (rename_tac tcb tcb') apply (frule_tac t=x in valid_objs'_maxPriority, fastforce simp: obj_at'_def) apply (frule_tac t=x in valid_objs'_maxDomain, fastforce simp: obj_at'_def) apply (drule_tac P="\tcb. tcbPriority tcb \ maxPriority" in obj_at_ko_at2', simp) apply (drule_tac P="\tcb. tcbDomain tcb \ maxDomain" in obj_at_ko_at2', simp) apply (simp add: ctcb_relation_tcbDomain_maxDomain_numDomains ctcb_relation_tcbPriority_maxPriority_numPriorities) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_ctcb_queue_relation) apply (simp add: maxDom_to_H maxPrio_to_H)+ + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in head_end_ksReadyQueues_', fastforce+) apply (simp add: cready_queues_index_to_C_def2 numPriorities_def le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ctcb_relation_def) apply (frule arg_cong[where f=unat], subst(asm) unat_ucast_up_simp, simp) - apply (frule tcb_queue'_head_end_NULL) - apply (erule conjunct1[OF valid_queues_valid_q]) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (frule (3) head_end_ksReadyQueues_', fastforce+) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (prop_tac "\ tcbQueueEmpty ((ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)))") + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (clarsimp simp: tcbQueueEmpty_def) + apply (rename_tac head "end" head_tcb end_tcb) + apply (prop_tac "tcb_at' head s") + apply (fastforce intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (frule_tac thread=head in obj_at_cslift_tcb) + apply fastforce + apply (clarsimp dest: obj_at_cslift_tcb simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) done lemma cancel_all_ccorres_helper: "ccorres dc xfdc - (\s. valid_objs' s \ valid_queues s + (\s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s \ (\t\set ts. tcb_at' t s \ t \ 0) \ sch_act_wf (ksSchedulerAction s) s) {s'. \p. ep_queue_relation (cslift s') ts @@ -218,8 +342,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -228,7 +351,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -251,17 +374,15 @@ next apply (erule cmap_relationE1 [OF cmap_relation_tcb]) apply (erule ko_at_projectKO_opt) apply (fastforce intro: typ_heap_simps) - apply (wp sts_running_valid_queues | simp)+ + apply (wp sts_valid_objs' | simp)+ apply (rule ceqv_refl) apply (rule "Cons.hyps") apply (wp sts_valid_objs' sts_sch_act sch_act_wf_lift hoare_vcg_const_Ball_lift - sts_running_valid_queues sts_st_tcb' setThreadState_oa_queued | simp)+ + sts_st_tcb' | simp)+ apply (vcg exspec=setThreadState_cslift_spec exspec=tcbSchedEnqueue_cslift_spec) - apply (clarsimp simp: tcb_at_not_NULL - Collect_const_mem valid_tcb_state'_def - ThreadState_Restart_def mask_def - valid_objs'_maxDomain valid_objs'_maxPriority) + apply (clarsimp simp: tcb_at_not_NULL Collect_const_mem valid_tcb_state'_def + ThreadState_defs mask_def valid_objs'_maxDomain valid_objs'_maxPriority) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (rule conjI) @@ -271,16 +392,13 @@ next st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - apply clarsimp - apply clarsimp - apply clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; clarsimp?) + apply simp apply clarsimp apply (rule conjI) apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) apply clarsimp - apply clarsimp + apply clarsimp+ apply (subst ep_queue_relation_shift, fastforce) apply (drule_tac x="tcb_ptr_to_ctcb_ptr thread" in fun_cong)+ @@ -289,17 +407,23 @@ next done qed +crunches setEndpoint, setNotification + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + and ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + (simp: updateObject_default_def) + lemma cancelAllIPC_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. epptr_' s = Ptr epptr}) [] + invs' (UNIV \ {s. epptr_' s = Ptr epptr}) [] (cancelAllIPC epptr) (Call cancelAllIPC_'proc)" apply (cinit lift: epptr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -308,8 +432,8 @@ lemma cancelAllIPC_ccorres: apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -343,29 +467,26 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear - cancelAllIPC_mapM_x_valid_queues | simp)+ apply (rule mapM_x_wp', wp)+ apply (wp sts_st_tcb') apply (clarsimp split: if_split) - apply (rule mapM_x_wp', wp)+ + apply (rule mapM_x_wp', wp sts_valid_objs')+ apply (clarsimp simp: valid_tcb_state'_def) apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -394,23 +515,26 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (clarsimp simp: valid_ep'_def invs_valid_objs' invs_queues) + apply (clarsimp simp: valid_ep'_def invs_valid_objs') apply (rule cmap_relationE1[OF cmap_relation_ep], assumption) apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') subgoal by (auto simp: typ_heap_simps cendpoint_relation_def Let_def tcb_queue_relation'_def invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority @@ -420,22 +544,18 @@ lemma cancelAllIPC_ccorres: apply clarsimp done -lemma empty_fail_getNotification: - "empty_fail (getNotification ep)" - unfolding getNotification_def - by (auto intro: empty_fail_getObject) - lemma cancelAllSignals_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] + invs' (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] (cancelAllSignals ntfnptr) (Call cancelAllSignals_'proc)" apply (cinit lift: ntfnPtr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -444,18 +564,15 @@ lemma cancelAllSignals_ccorres: apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -463,8 +580,8 @@ lemma cancelAllSignals_ccorres: apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -482,13 +599,12 @@ lemma cancelAllSignals_ccorres: subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ntfn_valid_objs' hoare_vcg_const_Ball_lift @@ -499,6 +615,10 @@ lemma cancelAllSignals_ccorres: apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) apply (clarsimp simp add: valid_obj'_def valid_ntfn'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') subgoal by (auto simp: typ_heap_simps cnotification_relation_def Let_def tcb_queue_relation'_def invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority @@ -583,16 +703,16 @@ lemma tcb_queue_relation2_cong: context kernel_m begin -lemma setThreadState_ccorres_valid_queues'_simple: - "ccorres dc xfdc (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ sch_act_simple s) +lemma setThreadState_ccorres_simple: + "ccorres dc xfdc (\s. tcb_at' thread s \ \ runnable' st \ sch_act_simple s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues'_simple) - apply (wp threadSet_valid_queues'_and_not_runnable') - apply (clarsimp simp: weak_sch_act_wf_def valid_queues'_def) + apply (wp threadSet_tcbState_st_tcb_at') + apply (fastforce simp: weak_sch_act_wf_def) done lemma updateRestartPC_ccorres: @@ -608,9 +728,7 @@ lemma updateRestartPC_ccorres: done crunches updateRestartPC - for valid_queues'[wp]: valid_queues' - and sch_act_simple[wp]: sch_act_simple - and valid_queues[wp]: Invariants_H.valid_queues + for sch_act_simple[wp]: sch_act_simple and valid_objs'[wp]: valid_objs' and tcb_at'[wp]: "tcb_at' p" @@ -650,25 +768,16 @@ lemma suspend_ccorres: apply clarsimp apply (rule iffI) apply simp - apply (erule thread_state_to_tsType.elims; simp add: StrictC'_thread_state_defs) + apply (erule thread_state_to_tsType.elims; simp add: ThreadState_defs) apply (ctac (no_vcg) add: updateRestartPC_ccorres) apply (rule ccorres_return_Skip) apply ceqv - apply (ctac(no_vcg) add: setThreadState_ccorres_valid_queues'_simple) - apply (ctac add: tcbSchedDequeue_ccorres') - apply (rule_tac Q="\_. - (\s. \t' d p. (t' \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d - \ tcbPriority tcb = p) t' s \ - (t' \ thread \ st_tcb_at' runnable' t' s)) \ - distinct (ksReadyQueues s (d, p))) and valid_queues' and valid_objs' and tcb_at' thread" - in hoare_post_imp) + apply (ctac(no_vcg) add: setThreadState_ccorres_simple) + apply (ctac add: tcbSchedDequeue_ccorres) + apply (rule_tac Q="\_. valid_objs' and tcb_at' thread and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) apply clarsimp - apply (drule_tac x="t" in spec) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp elim!: obj_at'_weakenE simp: inQ_def) - apply (wp sts_valid_queues_partial)[1] + apply (wp sts_valid_objs')[1] apply clarsimp apply (wpsimp simp: valid_tcb_state'_def) apply clarsimp @@ -683,9 +792,8 @@ lemma suspend_ccorres: apply (rule cancelIPC_sch_act_simple) apply (rule cancelIPC_tcb_at'[where t=thread]) apply (rule delete_one_conc_fr.cancelIPC_invs) - apply (fastforce simp: invs_valid_queues' invs_queues invs_valid_objs' - valid_tcb_state'_def) - apply (auto simp: "StrictC'_thread_state_defs") + apply (fastforce simp: invs_valid_objs' valid_tcb_state'_def) + apply (auto simp: ThreadState_defs) done lemma cap_to_H_NTFNCap_tag: @@ -708,8 +816,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -728,7 +836,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal by (simp add: carch_state_relation_def) subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) @@ -739,7 +847,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -789,7 +897,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -823,9 +931,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -842,13 +950,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -869,8 +977,8 @@ lemma unbindMaybeNotification_ccorres: apply (clarsimp ) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) by (auto simp: valid_ntfn'_def valid_bound_tcb'_def obj_at'_def - objBitsKO_def is_aligned_def option_to_ctcb_ptr_def tcb_at_not_NULL - split: ntfn.splits) + objBitsKO_def is_aligned_def option_to_ctcb_ptr_def tcb_at_not_NULL + split: ntfn.splits) (* TODO: move *) definition @@ -898,7 +1006,7 @@ lemma finaliseCap_True_cases_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap Collect_False del: Collect_const) apply (fold case_bool_If) - apply (simp add: false_def) + apply simp apply csymbr apply wpc apply (simp add: cap_get_tag_isCap ccorres_cond_univ_iff Let_def) @@ -1031,7 +1139,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] when_def) + apply (clarsimp simp: liftM_def when_def) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_riscvKSASIDTable_ksArchState) apply (rule_tac R="\s. rv = riscvKSASIDTable (ksArchState s)" in ccorres_cond2) @@ -1088,7 +1196,7 @@ lemma deleteASID_ccorres: apply (rule ccorres_from_vcg[where P="\" and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (simp add: return_def) - apply (clarsimp simp: dc_def[symmetric] when_def liftM_def + apply (clarsimp simp: when_def liftM_def cong: conj_cong call_ignore_cong) apply (rename_tac asidTable ap) apply csymbr @@ -1108,7 +1216,7 @@ lemma deleteASID_ccorres: simp flip: mask_2pm1 split: asidpool.split_asm asid_pool_C.split_asm) apply (drule_tac x="asid && mask asid_low_bits" in spec) - apply (clarsimp simp: word_and_le1 from_bool_def case_bool_If inv_ASIDPool) + apply (clarsimp simp: word_and_le1 case_bool_If inv_ASIDPool) apply (fastforce simp: option_to_ptr_def option_to_0_def split: if_splits option.splits) apply ceqv apply (rule ccorres_cond2[where R=\]) @@ -1162,7 +1270,7 @@ lemma deleteASID_ccorres: lemma setObject_ccorres_lemma: fixes val :: "'a :: pspace_storable" shows - "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := ksPSpace s (ptr \ injectKO val) \, s') \ rf_sr},{}; + "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val) \, s') \ rf_sr},{}; \s s' val'::'a. \ ko_at' val' ptr s; (s, s') \ rf_sr \ \ s' \ Q s; \val :: 'a. updateObject val = updateObject_default val; @@ -1185,7 +1293,7 @@ lemma setObject_ccorres_lemma: apply (subgoal_tac "fst (setObject ptr val \) = {}") apply simp apply (erule notE, erule_tac s=\ in empty_failD[rotated]) - apply (simp add: setObject_def split_def) + apply (simp add: setObject_def split_def empty_fail_cond) apply (rule ccontr) apply (clarsimp elim!: nonemptyE) apply (frule use_valid [OF _ obj_at_setObject3[where P=\]], simp_all)[1] @@ -1358,7 +1466,6 @@ next apply (rule ccorres_checkPTAt) apply (rule ccorres_symb_exec_r2) apply (rule ccorres_symb_exec_r2) - apply (fold dc_def)[1] apply (rule Suc.hyps[unfolded whileAnno_def]) using level apply simp apply vcg @@ -1420,7 +1527,7 @@ lemma unmapPageTable_ccorres: apply simp apply wp apply (simp add: guard_is_UNIV_def) - apply (simp add: guard_is_UNIV_def) + apply wpsimp apply (simp add: guard_is_UNIV_def) apply vcg apply (vcg spec=modifies) @@ -1447,12 +1554,6 @@ lemma no_0_page_table_at'[elim!]: apply (drule spec[where x=0], clarsimp simp: bit_simps) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1473,7 +1574,7 @@ lemma isFinalCapability_ccorres: apply (simp add: mdbPrev_to_H[symmetric]) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (simp add: return_def from_bool_def false_def) + apply (simp add: return_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_l[OF _ getCTE_inv getCTE_wp empty_fail_getCTE]) apply (rule_tac P="cte_wp_at' ((=) cte) slot @@ -1512,10 +1613,9 @@ lemma isFinalCapability_ccorres: apply (rule cmap_relationE1 [OF cmap_relation_cte], assumption+, simp?, simp add: typ_heap_simps)+ apply (drule ccte_relation_ccap_relation)+ - apply (auto simp: false_def true_def from_bool_def split: bool.splits)[1] + apply (auto simp: from_bool_def split: bool.splits)[1] apply (wp getCTE_wp') - apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem false_def - from_bool_0 true_def from_bool_def) + apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -1550,7 +1650,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned_longlong = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1561,25 +1661,24 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) apply (strengthen invs_mdb_strengthen' invs_urz) apply (wp typ_at_lifts isFinalCapability_inv | strengthen invs_valid_objs')+ - apply (clarsimp simp: from_bool_def true_def irq_opt_relation_def - invs_pspace_aligned' cte_wp_at_ctes_of) + apply (clarsimp simp: irq_opt_relation_def invs_pspace_aligned' cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (clarsimp simp: typ_heap_simps ccte_relation_ccap_relation ccap_relation_NullCap_iff) apply (wp isFinalCapability_inv) apply simp - apply (simp del: Collect_const add: false_def) + apply (simp del: Collect_const) apply (rule ccorres_return_Skip) apply (clarsimp simp: Collect_const_mem cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) @@ -1603,7 +1702,7 @@ lemma deletingIRQHandler_ccorres: ({s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) @@ -1652,8 +1751,6 @@ lemma Zombie_new_spec: apply (simp add: word_add_less_mono1[where k=1 and j="0x3F", simplified]) done -lemmas upcast_ucast_id = More_Word.ucast_up_inj - lemma irq_opt_relation_Some_ucast: "\ x && mask 6 = x; ucast x \ irqInvalid; ucast x \ (scast Kernel_C.maxIRQ :: 6 word) \ x \ (scast Kernel_C.maxIRQ :: machine_word) \ @@ -1686,7 +1783,7 @@ lemma option_to_ctcb_ptr_not_0: done lemma update_tcb_map_to_tcb: - "map_to_tcbs (ksPSpace s(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" + "map_to_tcbs ((ksPSpace s)(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" by (rule ext, clarsimp simp: map_comp_def split: if_split) lemma ep_queue_relation_shift2: @@ -1700,23 +1797,9 @@ lemma ep_queue_relation_shift2: apply (clarsimp split: option.split_asm) done -lemma sched_queue_relation_shift: - "(option_map2 tcbSchedNext_C (f (cslift s)) - = option_map2 tcbSchedNext_C (cslift s) - \ option_map2 tcbSchedPrev_C (f (cslift s)) - = option_map2 tcbSchedPrev_C (cslift s)) - \ sched_queue_relation (f (cslift s)) ts qPrev qHead - = sched_queue_relation (cslift s) ts qPrev qHead" - apply (induct ts arbitrary: qPrev qHead; clarsimp) - apply (simp add: option_map2_def fun_eq_iff - map_option_case) - apply (drule_tac x=qHead in spec)+ - apply (clarsimp split: option.split_asm) - done - lemma cendpoint_relation_udpate_arch: "\ cslift x p = Some tcb ; cendpoint_relation (cslift x) v v' \ - \ cendpoint_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cendpoint_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def split: endpoint.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1727,29 +1810,13 @@ lemma cendpoint_relation_udpate_arch: lemma cnotification_relation_udpate_arch: "\ cslift x p = Some tcb ; cnotification_relation (cslift x) v v' \ - \ cnotification_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cnotification_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cnotification_relation_def Let_def tcb_queue_relation'_def split: notification.splits ntfn.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) apply (safe ; case_tac "xa = p" ; clarsimp simp: option_map2_def map_option_case) done -lemma sanitiseSetRegister_ccorres: - "\ val = val'; reg' = register_from_H reg\ \ - ccorres dc xfdc (tcb_at' tptr) - UNIV - hs - (asUser tptr (setRegister reg (local.sanitiseRegister False reg val))) - (\unsigned_long_eret_2 :== CALL sanitiseRegister(reg',val',0);; - CALL setRegister(tcb_ptr_to_ctcb_ptr tptr,reg',\unsigned_long_eret_2))" - apply (rule ccorres_guard_imp2) - apply (rule ccorres_symb_exec_r) - apply (ctac add: setRegister_ccorres) - apply (vcg) - apply (rule conseqPre, vcg) - apply (fastforce simp: sanitiseRegister_def split: register.splits) - by (auto simp: sanitiseRegister_def from_bool_def simp del: Collect_const split: register.splits bool.splits) - lemma case_option_both[simp]: "(case f of None \ P | _ \ P) = P" by (auto split: option.splits) @@ -1776,7 +1843,7 @@ lemma ccap_relation_capFMappedASID_CL_0: done lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] if_weak_cong[cong] + notes Collect_const[simp del] if_weak_cong[cong] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -1935,7 +2002,7 @@ lemma Arch_finaliseCap_ccorres: apply (rule ccorres_return_C; simp) apply (prop_tac "ret__unsigned_longlong \ 0") apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 dest!: cap_to_H_PTCap) - apply (simp add: cap_page_table_cap_lift_def to_bool_def split: if_split_asm) + apply (simp add: cap_page_table_cap_lift_def split: if_split_asm) apply ccorres_rewrite apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -1989,7 +2056,6 @@ lemma prepareThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (prepareThreadDelete thread) (Call Arch_prepareThreadDelete_'proc)" - supply dc_simp[simp del] apply (cinit lift: thread_', rename_tac cthread) apply (rule ccorres_return_Skip) apply fastforce @@ -2039,7 +2105,7 @@ lemma finaliseCap_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (simp, rule ccorres_fail) - apply (simp add: from_bool_0 Collect_True Collect_False false_def + apply (simp add: from_bool_0 Collect_True Collect_False del: Collect_const) apply csymbr apply (simp add: cap_get_tag_isCap Collect_False Collect_True @@ -2124,7 +2190,7 @@ lemma finaliseCap_ccorres: apply (simp add: isArchCap_T_isArchObjectCap[symmetric] del: Collect_const) apply (rule ccorres_if_lhs) - apply (simp add: Collect_False Collect_True Let_def true_def + apply (simp add: Collect_False Collect_True Let_def del: Collect_const) apply (rule_tac P="(capIRQ cap) \ RISCV64.maxIRQ" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc)+ @@ -2144,18 +2210,18 @@ lemma finaliseCap_ccorres: apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -2164,8 +2230,7 @@ lemma finaliseCap_ccorres: irq_opt_relation_def) apply wp apply (simp add: guard_is_UNIV_def) - apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem - false_def from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem) apply (intro impI conjI) apply (clarsimp split: bool.splits) apply (clarsimp split: bool.splits) @@ -2182,7 +2247,7 @@ lemma finaliseCap_ccorres: split: option.splits cap_CL.splits if_splits) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) - apply (clarsimp simp: isCap_simps from_bool_def false_def) + apply (clarsimp simp: isCap_simps) apply (clarsimp simp: tcb_cnode_index_defs ptr_add_assertion_def) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) diff --git a/proof/crefine/RISCV64/Interrupt_C.thy b/proof/crefine/RISCV64/Interrupt_C.thy index f783a2dab1..706f2d40ec 100644 --- a/proof/crefine/RISCV64/Interrupt_C.thy +++ b/proof/crefine/RISCV64/Interrupt_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,7 +17,7 @@ lemma invokeIRQHandler_AckIRQ_ccorres: (InterruptDecls_H.invokeIRQHandler (AckIRQ irq)) (Call invokeIRQHandler_AckIRQ_'proc)" apply (cinit lift: irq_' simp: Interrupt_H.invokeIRQHandler_def invokeIRQHandler_def) apply (ctac add: plic_complete_claim_ccorres) - apply (simp add: from_bool_def false_def) + apply simp done lemma getIRQSlot_ccorres: @@ -74,7 +75,7 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply simp apply simp apply simp @@ -111,7 +112,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply (simp add: ucast_up_ucast is_up) apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -229,7 +230,7 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (clarsimp simp: Collect_const_mem neq_Nil_conv dest!: interpret_excaps_eq) apply (simp add: rf_sr_ksCurThread mask_def[where n=4] - "StrictC'_thread_state_defs" cap_get_tag_isCap excaps_map_def + ThreadState_defs cap_get_tag_isCap excaps_map_def word_sless_def word_sle_def) apply (simp add: invocationCatch_def throwError_bind interpret_excaps_test_null Collect_True @@ -256,24 +257,23 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (simp add: syscall_error_to_H_cases) apply simp apply (clarsimp simp: Collect_const_mem tcb_at_invs') - apply (clarsimp simp: invs_queues invs_valid_objs' + apply (clarsimp simp: invs_valid_objs' ct_in_state'_def ccap_rights_relation_def - mask_def[where n=4] - "StrictC'_thread_state_defs") + mask_def[where n=4] ThreadState_defs) apply (subst pred_tcb'_weakenE, assumption, fastforce)+ apply (clarsimp simp: rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_n_def word_less_nat_alt) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits) apply (intro conjI impI allI) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits)+ - apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[4] + apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[6] apply (drule ctes_of_valid') apply fastforce apply (clarsimp simp add:valid_cap_simps' RISCV64.maxIRQ_def) @@ -344,7 +344,7 @@ lemma invokeIRQControl_ccorres: (performIRQControl (Invocations_H.irqcontrol_invocation.IssueIRQHandler irq slot parent)) (Call invokeIRQControl_'proc)" by (clarsimp simp: performIRQControl_def liftE_def bind_assoc - intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def K_def, simplified]) + intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def, simplified]) lemma isIRQActive_ccorres: "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' @@ -363,8 +363,7 @@ lemma isIRQActive_ccorres: Let_def cinterrupt_relation_def) apply (drule spec, drule(1) mp) apply (case_tac "intStateIRQTable (ksInterruptState \) irq") - apply (simp add: from_bool_def irq_state_defs Kernel_C.maxIRQ_def - word_le_nat_alt)+ + apply (simp add: irq_state_defs Kernel_C.maxIRQ_def word_le_nat_alt)+ done lemma Platform_maxIRQ: @@ -607,7 +606,7 @@ lemma Arch_decodeIRQControlInvocation_ccorres: apply (simp add: and_mask_eq_iff_le_mask) apply (simp add: mask_def word_le_nat_alt) apply (clarsimp simp: numeral_2_eq_2 numeral_3_eq_3 exception_defs - ThreadState_Restart_def false_def mask_def from_bool_def) + ThreadState_defs mask_def) apply (rule conseqPre, vcg) apply (fastforce simp: exception_defs split: if_split) apply (rule subset_refl) @@ -625,12 +624,11 @@ lemma Arch_decodeIRQControlInvocation_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply ccorres_rewrite apply (auto split: invocation_label.split arch_invocation_label.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases invocation_eq_use_types)[1] apply clarsimp apply (clarsimp simp: interpret_excaps_test_null excaps_map_def Collect_const_mem word_sless_def word_sle_def - ThreadState_Restart_def unat_of_nat mask_def sysargs_rel_to_n cong: if_cong) apply (rule conjI) @@ -774,7 +772,7 @@ lemma decodeIRQControlInvocation_ccorres: apply (rule sym) apply (simp add: and_mask_eq_iff_le_mask) apply (simp add: mask_def word_le_nat_alt) - apply (clarsimp simp: numeral_2_eq_2 exception_defs ThreadState_Restart_def false_def mask_def) + apply (clarsimp simp: numeral_2_eq_2 exception_defs ThreadState_defs mask_def) apply (rule conseqPre, vcg) apply (fastforce simp: exception_defs) apply (rule subset_refl) @@ -800,7 +798,6 @@ lemma decodeIRQControlInvocation_ccorres: apply clarsimp apply (clarsimp simp: interpret_excaps_test_null excaps_map_def Collect_const_mem word_sless_def word_sle_def - ThreadState_Restart_def unat_of_nat mask_def sysargs_rel_to_n cong: if_cong) apply (rule conjI) diff --git a/proof/crefine/RISCV64/Invoke_C.thy b/proof/crefine/RISCV64/Invoke_C.thy index 5555e2533b..f30bb4b43a 100644 --- a/proof/crefine/RISCV64/Invoke_C.thy +++ b/proof/crefine/RISCV64/Invoke_C.thy @@ -1,6 +1,7 @@ (* - * Copyright 2014, General Dynamics C4 Systems + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) @@ -64,11 +65,11 @@ lemma setDomain_ccorres: apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) - apply (simp add: when_def to_bool_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (simp add: when_def) + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -76,15 +77,17 @@ lemma setDomain_ccorres: apply simp apply wp apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s)" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp threadSet_all_invs_but_sch_extra) - apply (clarsimp simp:valid_pspace_valid_objs' st_tcb_at_def[symmetric] - sch_act_simple_def st_tcb_at'_def o_def weak_sch_act_wf_def split:if_splits) + apply (fastforce simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) apply (simp add: guard_is_UNIV_def) - apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" in hoare_strengthen_post) + apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued - tcbSchedDequeue_not_in_queue hoare_vcg_imp_lift hoare_vcg_all_lift) + hoare_vcg_imp_lift hoare_vcg_all_lift) apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) apply (fastforce simp: valid_tcb'_def tcb_cte_cases_def invs'_def valid_state'_def valid_pspace'_def) @@ -192,10 +195,10 @@ lemma decodeDomainInvocation_ccorres: apply clarsimp apply (vcg exspec=getSyscallArg_modifies) - apply (clarsimp simp: valid_tcb_state'_def invs_valid_queues' invs_valid_objs' - invs_queues invs_sch_act_wf' ct_in_state'_def pred_tcb_at' + apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' + invs_sch_act_wf' ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_to_n - mask_eq_iff_w2p mask_eq_iff_w2p word_size "StrictC'_thread_state_defs") + mask_eq_iff_w2p mask_eq_iff_w2p word_size ThreadState_defs) apply (rule conjI) apply (clarsimp simp: linorder_not_le isCap_simps) apply (rule conjI, clarsimp simp: unat64_eq_of_nat) @@ -203,7 +206,7 @@ lemma decodeDomainInvocation_ccorres: apply (drule_tac x="extraCaps ! 0" and P="\v. valid_cap' (fst v) s" in bspec) apply (clarsimp simp: nth_mem interpret_excaps_test_null excaps_map_def) apply (clarsimp simp: valid_cap_simps' pred_tcb'_weakenE active_runnable') - apply (rule conjI) + apply (intro conjI; fastforce?) apply (fastforce simp: tcb_st_refs_of'_def elim:pred_tcb'_weakenE) apply (simp add: word_le_nat_alt unat_ucast unat_numDomains_to_H le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ccap_relation_def cap_to_H_simps cap_thread_cap_lift) @@ -228,7 +231,7 @@ lemma invokeCNodeDelete_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteDelete_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -248,7 +251,7 @@ lemma invokeCNodeRevoke_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteRevoke_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -381,7 +384,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -546,12 +549,10 @@ lemma hasCancelSendRights_spec: apply clarsimp apply (drule sym, drule (1) cap_get_tag_to_H) apply (clarsimp simp: hasCancelSendRights_def to_bool_def - true_def false_def split: if_split bool.splits) apply (rule impI) apply (case_tac cap, - auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - from_bool_def false_def true_def hasCancelSendRights_def + auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs hasCancelSendRights_def dest: cap_get_tag_isArchCap_unfolded_H_cap split: capability.splits bool.splits)[1] done @@ -629,9 +630,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] - if_1_0_0 dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -714,8 +713,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -742,13 +740,12 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr performInvocation_def - bindE_assoc false_def) + bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeInsert_ccorres) @@ -761,16 +758,16 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply simp apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper[OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def - valid_tcb_state'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def + valid_tcb_state'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply wp @@ -819,12 +816,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk - ccorres_invocationCatch_Inr false_def + ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) @@ -838,15 +834,15 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply (simp add: conj_comms valid_tcb_state'_def) apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper [OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply (simp add: Collect_const_mem) @@ -880,7 +876,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply csymbr apply csymbr - apply (simp add: cap_get_tag_NullCap true_def) + apply (simp add: cap_get_tag_NullCap) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) @@ -899,7 +895,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -918,15 +914,14 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr numeral_eqs performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) - apply (simp add: true_def ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) apply (rule ccorres_alternative2) apply (rule ccorres_return_CE, simp+)[1] @@ -954,14 +949,16 @@ lemma decodeCNodeInvocation_ccorres: apply (rule validE_R_validE) apply (rule_tac Q'="\a b. cte_wp_at' (\x. True) a b \ invs' b \ tcb_at' thread b \ sch_act_wf (ksSchedulerAction b) b \ valid_tcb_state' Restart b - \ Q2 b" for Q2 in hoare_post_imp_R) - prefer 2 - apply (clarsimp simp:cte_wp_at_ctes_of) - apply (drule ctes_of_valid') - apply (erule invs_valid_objs') - apply (clarsimp simp:valid_updateCapDataI invs_queues invs_valid_objs' invs_valid_pspace') - apply (assumption) - apply (wp hoare_vcg_all_lift_R injection_wp_E[OF refl] + \ Q2 b" for Q2 in hoare_strengthen_postE_R) + prefer 2 + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule ctes_of_valid') + apply (erule invs_valid_objs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (clarsimp simp:valid_updateCapDataI invs_valid_objs' invs_valid_pspace') + apply assumption + apply (wp hoare_vcg_all_liftE_R injection_wp_E[OF refl] lsfco_cte_at' hoare_vcg_const_imp_lift_R )+ apply (simp add: Collect_const_mem word_sle_def word_sless_def @@ -1018,13 +1015,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_returnOk bindE_assoc - injection_bindE[OF refl refl] split_def - dc_def[symmetric]) + injection_bindE[OF refl refl] split_def) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc)+ apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) - apply (simp add: ccorres_invocationCatch_Inr performInvocation_def - dc_def[symmetric] bindE_assoc) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) apply (rule ccorres_alternative2) @@ -1033,7 +1028,7 @@ lemma decodeCNodeInvocation_ccorres: apply (wp sts_valid_pspace_hangers)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_split_throws) apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg @@ -1063,8 +1058,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1108,12 +1102,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1173,8 +1165,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1236,8 +1227,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1245,8 +1235,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1260,7 +1249,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1299,7 +1288,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule_tac Q'="\rvb. invs' and cte_at' rv and cte_at' rva and tcb_at' thread" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of weak_derived_updateCapData capBadge_updateCapData_True) @@ -1324,16 +1313,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1348,7 +1337,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_liftE_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1363,7 +1352,7 @@ lemma decodeCNodeInvocation_ccorres: apply simp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' invs_valid_pspace' - ct_in_state'_def pred_tcb_at' invs_queues + ct_in_state'_def pred_tcb_at' cur_tcb'_def word_sle_def word_sless_def unat_lt2p[where 'a=machine_word_len, folded word_bits_def]) apply (rule conjI) @@ -1377,7 +1366,7 @@ lemma decodeCNodeInvocation_ccorres: apply (frule interpret_excaps_eq) apply (clarsimp simp: excaps_map_def mask_def[where n=4] ccap_rights_relation_def rightsFromWord_wordFromRights - "StrictC'_thread_state_defs" map_comp_Some_iff + ThreadState_defs map_comp_Some_iff rf_sr_ksCurThread hd_conv_nth hd_drop_conv_nth) apply ((rule conjI | clarsimp simp: rightsFromWord_wordFromRights @@ -1386,8 +1375,7 @@ lemma decodeCNodeInvocation_ccorres: map_option_Some_eq2 neq_Nil_conv ccap_relation_def numeral_eqs hasCancelSendRights_not_Null ccap_relation_NullCap_iff[symmetric] - if_1_0_0 interpret_excaps_test_null - mdbRevocable_CL_cte_to_H false_def true_def + interpret_excaps_test_null mdbRevocable_CL_cte_to_H | clarsimp simp: typ_heap_simps' | frule length_ineq_not_Nil)+) done @@ -1396,9 +1384,6 @@ end context begin interpretation Arch . (*FIXME: arch_split*) -crunch valid_queues[wp]: insertNewCap "valid_queues" - (wp: crunch_wps) - lemmas setCTE_def3 = setCTE_def2[THEN eq_reflection] lemma setCTE_sch_act_wf[wp]: @@ -1468,7 +1453,7 @@ lemma seL4_MessageInfo_lift_def2: lemma globals_update_id: "globals_update (t_hrs_'_update (hrs_htd_update id)) x = x" - by (simp add:id_def hrs_htd_update_def) + by (simp add: hrs_htd_update_def) lemma getObjectSize_spec: "\s. \\\s. \t \ of_nat (length (enum::object_type list) - 1)\ Call getObjectSize_'proc @@ -1523,7 +1508,7 @@ shows "\ctes_of (s::kernel_state) (ptr_val p) = Some cte; is_aligned ptr bits; bits < word_bits; {ptr..ptr + 2 ^ bits - 1} \ {ptr_val p..ptr_val p + mask cteSizeBits} = {}; ((clift hp) :: (cte_C ptr \ cte_C)) p = Some to\ \ (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: (cte_C ptr \ cte_C)) p = Some to" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def restrict_map_def split:if_splits) + apply (clarsimp simp:lift_t_def lift_typ_heap_def restrict_map_def split:if_splits) apply (intro conjI impI) apply (case_tac hp) apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) @@ -1831,8 +1816,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2347,7 +2331,7 @@ lemma invokeUntyped_Retype_ccorres: (Call invokeUntyped_Retype_'proc)" apply (cinit lift: retypeBase_' srcSlot_' reset_' newType_' userSize_' deviceMemory_' destCNode_' destOffset_' destLength_' - simp: when_def) + simp: when_def archOverlap_def) apply (rule ccorres_move_c_guard_cte) apply csymbr apply (rule ccorres_abstract_cleanup) @@ -2422,7 +2406,7 @@ lemma invokeUntyped_Retype_ccorres: apply (clarsimp simp: misc unat_of_nat_eq[OF range_cover.weak, OF cover]) apply (vcg exspec=cap_untyped_cap_ptr_set_capFreeIndex_modifies) apply simp - apply (rule validE_validE_R, rule hoare_post_impErr, + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule hoare_vcg_conj_liftE1[rotated, where Q="\_ s. case gsCNodes s cnodeptr of None \ False | Some n \ length destSlots + unat start \ 2 ^ n"], @@ -2607,7 +2591,7 @@ lemma mapME_ensureEmptySlot': apply (erule meta_allE) apply wp apply (fold validE_R_def) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -2616,7 +2600,7 @@ lemma mapME_ensureEmptySlot: mapME (\x. injection_handler Inl (ensureEmptySlot (f x))) [S .e. (E::machine_word)] \\rva s. \slot. S \ slot \ slot \ E \ (\cte. cteCap cte = capability.NullCap \ ctes_of s (f slot) = Some cte)\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule mapME_ensureEmptySlot') apply clarsimp done @@ -2980,8 +2964,8 @@ lemma decodeUntypedInvocation_ccorres_helper: [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -3204,8 +3188,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) apply (rule ccorres_alternative2) @@ -3254,7 +3237,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply vcg apply (rule ccorres_guard_imp [where Q =\ and Q' = UNIV,rotated],assumption+) - apply (simp add: o_def) + apply simp apply (simp add: liftE_validE) apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3271,8 +3254,7 @@ lemma decodeUntypedInvocation_ccorres_helper: unat_of_nat_APIType_capBits word_size hd_conv_nth length_ineq_not_Nil not_less word_le_nat_alt isCap_simps valid_cap_simps') apply (strengthen word_of_nat_less) - apply (clarsimp simp: StrictC'_thread_state_defs mask_def true_def false_def - from_bool_0 ccap_relation_isDeviceCap2 + apply (clarsimp simp: ThreadState_defs mask_def ccap_relation_isDeviceCap2 split: if_split) apply (clarsimp simp: not_less shiftr_overflow maxUntypedSizeBits_def unat_of_nat_APIType_capBits) @@ -3286,10 +3268,10 @@ lemma decodeUntypedInvocation_ccorres_helper: and ex_cte_cap_to' (capCNodePtr rv) and (\s. case gsCNodes s (capCNodePtr rv) of None \ False | Some n \ args ! 4 + args ! 5 - 1 < 2 ^ n) - and sch_act_simple and ct_active'" in hoare_post_imp_R) + and sch_act_simple and ct_active'" in hoare_strengthen_postE_R) prefer 2 apply (clarsimp simp: invs_valid_objs' invs_mdb' - invs_queues ct_in_state'_def pred_tcb_at') + ct_in_state'_def pred_tcb_at') apply (subgoal_tac "ksCurThread s \ ksIdleThread sa") prefer 2 apply clarsimp @@ -3321,7 +3303,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) @@ -3334,7 +3316,7 @@ lemma decodeUntypedInvocation_ccorres_helper: \ invs' s \ ksCurThread s = thread \ valid_cap' r s \ (\rf\cte_refs' r (irq_node' s). ex_cte_cap_to' rf s) - \ sch_act_simple s \ ct_active' s" in hoare_post_imp_R) + \ sch_act_simple s \ ct_active' s" in hoare_strengthen_postE_R) apply clarsimp apply (wp injection_wp_E[OF refl] getSlotCap_cap_to' getSlotCap_capAligned @@ -3378,8 +3360,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (clarsimp simp: hd_drop_conv_nth2 hd_conv_nth neq_Nil_lengthI ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread mask_eq_iff_w2p - "StrictC'_thread_state_defs" numeral_eqs[symmetric] - cap_get_tag_isCap cte_wp_at_ctes_of + numeral_eqs[symmetric] cap_get_tag_isCap cte_wp_at_ctes_of unat_eq_0 ccHoarePost_def) apply (rule conjI) apply (clarsimp simp: linorder_not_less isCap_simps) @@ -3451,18 +3432,16 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption - apply (rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) - apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P=x in monadic_rewrite_gen_asm) - apply simp + apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) + apply (monadic_rewrite_r monadic_rewrite_if_r_True) + apply (monadic_rewrite_r_method monadic_rewrite_symb_exec_r_drop wpsimp) apply (rule monadic_rewrite_refl) - apply (wp | simp)+ - apply (simp add: gets_bind_ign) + apply wpsimp + apply (rule monadic_rewrite_refl) apply (rule monadic_rewrite_refl) apply (clarsimp simp: ex_cte_cap_wp_to'_def excaps_in_mem_def) apply (drule(1) bspec)+ diff --git a/proof/crefine/RISCV64/IpcCancel_C.thy b/proof/crefine/RISCV64/IpcCancel_C.thy index 4e5d904bd6..b11da5071d 100644 --- a/proof/crefine/RISCV64/IpcCancel_C.thy +++ b/proof/crefine/RISCV64/IpcCancel_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -13,12 +14,12 @@ context kernel_m begin lemma cready_queues_index_to_C_in_range': - assumes prems: "qdom \ ucast maxDom" "prio \ ucast maxPrio" + assumes prems: "qdom \ maxDomain" "prio \ maxPriority" shows "cready_queues_index_to_C qdom prio < num_tcb_queues" proof - have P: "unat prio < numPriorities" using prems - by (simp add: numPriorities_def seL4_MaxPrio_def Suc_le_lessD unat_le_helper) + by (simp add: numPriorities_def Suc_le_lessD unat_le_helper maxDomain_def maxPriority_def) have Q: "unat qdom < numDomains" using prems by (simp add: maxDom_to_H le_maxDomain_eq_less_numDomains word_le_nat_alt) @@ -28,40 +29,22 @@ proof - qed lemmas cready_queues_index_to_C_in_range = - cready_queues_index_to_C_in_range'[simplified num_tcb_queues_def] + cready_queues_index_to_C_in_range'[simplified num_tcb_queues_val] lemma cready_queues_index_to_C_inj: "\ cready_queues_index_to_C qdom prio = cready_queues_index_to_C qdom' prio'; - prio \ ucast maxPrio; prio' \ ucast maxPrio \ \ prio = prio' \ qdom = qdom'" + prio \ maxPriority; prio' \ maxPriority \ \ prio = prio' \ qdom = qdom'" apply (rule context_conjI) - apply (auto simp: cready_queues_index_to_C_def numPriorities_def + apply (auto simp: cready_queues_index_to_C_def numPriorities_def maxPriority_def seL4_MaxPrio_def word_le_nat_alt dest: arg_cong[where f="\x. x mod 256"]) done lemma cready_queues_index_to_C_distinct: - "\ qdom = qdom' \ prio \ prio'; prio \ ucast maxPrio; prio' \ ucast maxPrio \ + "\ qdom = qdom' \ prio \ prio'; prio \ maxPriority; prio' \ maxPriority \ \ cready_queues_index_to_C qdom prio \ cready_queues_index_to_C qdom' prio'" apply (auto simp: cready_queues_index_to_C_inj) done -lemma cstate_relation_ksReadyQueues_update: - "\ cstate_relation hs cs; arr = ksReadyQueues_' cs; - sched_queue_relation' (clift (t_hrs_' cs)) v (head_C v') (end_C v'); - qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ cstate_relation (ksReadyQueues_update (\qs. qs ((qdom, prio) := v)) hs) - (ksReadyQueues_'_update (\_. Arrays.update arr - (cready_queues_index_to_C qdom prio) v') cs)" - apply (clarsimp simp: cstate_relation_def Let_def - cmachine_state_relation_def - carch_state_relation_def carch_globals_def - cready_queues_relation_def seL4_MinPrio_def minDom_def) - apply (frule cready_queues_index_to_C_in_range, assumption) - apply clarsimp - apply (frule_tac qdom=qdoma and prio=prioa in cready_queues_index_to_C_in_range, assumption) - apply (frule cready_queues_index_to_C_distinct, assumption+) - apply clarsimp - done - lemma cmap_relation_drop_fun_upd: "\ cm x = Some v; \v''. rel v'' v = rel v'' v' \ \ cmap_relation am (cm (x \ v')) f rel @@ -72,16 +55,6 @@ lemma cmap_relation_drop_fun_upd: apply (auto split: if_split) done -lemma valid_queuesD': - "\ obj_at' (inQ d p) t s; valid_queues' s \ - \ t \ set (ksReadyQueues s (d, p))" - by (simp add: valid_queues'_def) - -lemma invs_valid_queues'[elim!]: - "invs' s \ valid_queues' s" - by (simp add: invs'_def valid_state'_def) - - lemma ntfn_ptr_get_queue_spec: "\s. \ \ {\. s = \ \ \ \\<^sub>c \<^bsup>\\<^esup>ntfnPtr} \ret__struct_tcb_queue_C :== PROC ntfn_ptr_get_queue(\ntfnPtr) \head_C \ret__struct_tcb_queue_C = Ptr (ntfnQueue_head_CL (notification_lift (the (cslift s \<^bsup>s\<^esup>ntfnPtr)))) \ @@ -210,7 +183,7 @@ lemma cancelSignal_ccorres_helper: apply (drule (2) ntfn_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) - apply (frule null_ep_queue [simplified Fun.comp_def]) + apply (frule null_ep_queue [simplified comp_def]) apply (intro impI conjI allI) \ \empty case\ apply clarsimp @@ -226,22 +199,19 @@ lemma cancelSignal_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) - apply (simp add: carch_state_relation_def carch_globals_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) + apply (simp add: carch_state_relation_def carch_globals_def) apply (clarsimp simp: carch_state_relation_def carch_globals_def typ_heap_simps' packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) @@ -262,34 +232,31 @@ lemma cancelSignal_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue) - apply fastforce - apply assumption+ - apply simp - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def - split: ntfn.splits split del: if_split) - apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) - apply (clarsimp simp add: h_t_valid_clift_Some_iff) - apply (subst tcb_queue_relation'_next_sign; assumption?) - apply fastforce - apply (simp add: notification_lift_def sign_extend_sign_extend_eq canonical_bit_def) - apply (clarsimp simp: h_t_valid_clift_Some_iff notification_lift_def sign_extend_sign_extend_eq) - apply (subst tcb_queue_relation'_prev_sign; assumption?) - apply fastforce - apply (simp add: canonical_bit_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue) + apply fastforce + apply assumption+ + apply simp + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def + split: ntfn.splits split del: if_split) + apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) + apply (clarsimp simp add: h_t_valid_clift_Some_iff) + apply (subst tcb_queue_relation'_next_sign; assumption?) + apply fastforce + apply (simp add: notification_lift_def sign_extend_sign_extend_eq canonical_bit_def) + apply (clarsimp simp: h_t_valid_clift_Some_iff notification_lift_def sign_extend_sign_extend_eq) + apply (subst tcb_queue_relation'_prev_sign; assumption?) + apply fastforce + apply (simp add: canonical_bit_def) + apply simp subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def) subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) @@ -422,9 +389,9 @@ lemma isStopped_ccorres [corres]: apply vcg apply clarsimp apply clarsimp - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma isRunnable_ccorres [corres]: @@ -450,71 +417,9 @@ lemma isRunnable_ccorres [corres]: apply (vcg) apply (clarsimp) apply (clarsimp) - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ -done - - - -lemma tcb_queue_relation_update_head: - fixes getNext_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" and - getPrev_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" - assumes qr: "tcb_queue_relation getNext getPrev mp queue NULL qhead" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - and fgN: "fg_cons getNext (getNext_update \ (\x _. x))" - and fgP: "fg_cons getPrev (getPrev_update \ (\x _. x))" - and npu: "\f t. getNext (getPrev_update f t) = getNext t" - and pnu: "\f t. getPrev (getNext_update f t) = getPrev t" - shows "tcb_queue_relation getNext getPrev - (upd_unless_null qhead (getPrev_update (\_. qhead') (the (mp qhead))) - (mp(qhead' := Some (getPrev_update (\_. NULL) (getNext_update (\_. qhead) tcb))))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) NULL qhead'" - using qr qh' cs_tcb valid_ep qhN - apply (subgoal_tac "qhead \ qhead'") - apply (clarsimp simp: pnu upd_unless_null_def fg_consD1 [OF fgN] fg_consD1 [OF fgP] npu) - apply (cases queue) - apply simp - apply (frule (2) tcb_queue_relation_next_not_NULL) - apply simp - apply (clarsimp simp: fg_consD1 [OF fgN] fg_consD1 [OF fgP] pnu npu) - apply (subst tcb_queue_relation_cong [OF refl refl refl, where mp' = mp]) - apply (clarsimp simp: inj_eq) - apply (intro impI conjI) - apply (frule_tac x = x in imageI [where f = tcb_ptr_to_ctcb_ptr]) - apply simp - apply simp - apply simp - apply clarsimp - apply (cases queue) - apply simp - apply simp - done - -lemma tcbSchedEnqueue_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qhead (tcbSchedPrev_C_update (\_. qhead') (the (mp qhead))) - (mp(qhead' \ tcb\tcbSchedNext_C := qhead, tcbSchedPrev_C := NULL\))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) qhead' (if qend = NULL then qhead' else qend)" - using sr qh' cs_tcb valid_ep qhN - apply - - apply (erule tcb_queue_relationE') - apply (rule tcb_queue_relationI') - apply (erule (5) tcb_queue_relation_update_head - [where getNext_update = tcbSchedNext_C_update and getPrev_update = tcbSchedPrev_C_update], simp_all)[1] - apply simp - apply (intro impI) - apply (erule (1) tcb_queue_relation_not_NULL') - apply simp + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma tcb_ptr_to_ctcb_ptr_imageD: @@ -529,93 +434,6 @@ lemma ctcb_ptr_to_tcb_ptr_imageI: apply simp done -lemma tcb_queue'_head_end_NULL: - assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" - and tat: "\t\set queue. tcb_at' t s" - shows "(qend = NULL) = (qhead = NULL)" - using qr tat - apply - - apply (erule tcb_queue_relationE') - apply (simp add: tcb_queue_head_empty_iff) - apply (rule impI) - apply (rule tcb_at_not_NULL) - apply (erule bspec) - apply simp - done - -lemma tcb_queue_relation_qhead_mem: - "\ tcb_queue_relation getNext getPrev mp queue NULL qhead; - (\tcb\set queue. tcb_at' tcb t) \ - \ qhead \ NULL \ ctcb_ptr_to_tcb_ptr qhead \ set queue" - by (clarsimp simp: tcb_queue_head_empty_iff tcb_queue_relation_head_hd) - -lemma tcb_queue_relation_qhead_valid: - "\ tcb_queue_relation getNext getPrev (cslift s') queue NULL qhead; - (s, s') \ rf_sr; (\tcb\set queue. tcb_at' tcb s) \ - \ qhead \ NULL \ s' \\<^sub>c qhead" - apply (frule (1) tcb_queue_relation_qhead_mem) - apply clarsimp - apply(drule (3) tcb_queue_memberD) - apply (simp add: h_t_valid_clift_Some_iff) - done - -lemmas tcb_queue_relation_qhead_mem' = tcb_queue_relation_qhead_mem [OF tcb_queue_relation'_queue_rel] -lemmas tcb_queue_relation_qhead_valid' = tcb_queue_relation_qhead_valid [OF tcb_queue_relation'_queue_rel] - - -lemma valid_queues_valid_q: - "valid_queues s \ (\tcb\set (ksReadyQueues s (qdom, prio)). tcb_at' tcb s) \ distinct (ksReadyQueues s (qdom, prio))" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec [where x = qdom]) - apply (drule spec [where x = prio]) - apply clarsimp - apply (drule (1) bspec, erule obj_at'_weakenE) - apply simp - done - -lemma invs_valid_q: - "invs' s \ (\tcb\set (ksReadyQueues s (qdom, prio)). tcb_at' tcb s) \ distinct (ksReadyQueues s (qdom, prio))" - apply (rule valid_queues_valid_q) - apply (clarsimp simp: invs'_def valid_state'_def) - done - -lemma tcbQueued_not_in_queues: - assumes vq: "valid_queues s" - and objat: "obj_at' (Not \ tcbQueued) thread s" - shows "thread \ set (ksReadyQueues s (d, p))" - using vq objat - apply - - apply clarsimp - apply (drule (1) valid_queues_obj_at'D) - apply (erule obj_atE')+ - apply (clarsimp simp: inQ_def) - done - - -lemma rf_sr_sched_queue_relation: - "\ (s, s') \ rf_sr; d \ ucast maxDom; p \ ucast maxPrio \ - \ sched_queue_relation' (cslift s') (ksReadyQueues s (d, p)) - (head_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p))) - (end_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p)))" - unfolding rf_sr_def cstate_relation_def cready_queues_relation_def - apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def) - done - -lemma ready_queue_not_in: - assumes vq: "valid_queues s" - and inq: "t \ set (ksReadyQueues s (d, p))" - and neq: "d \ d' \ p \ p'" - shows "t \ set (ksReadyQueues s (d', p'))" -proof - assume "t \ set (ksReadyQueues s (d', p'))" - hence "obj_at' (inQ d' p') t s" using vq by (rule valid_queues_obj_at'D) - moreover have "obj_at' (inQ d p) t s" using inq vq by (rule valid_queues_obj_at'D) - ultimately show False using neq - by (clarsimp elim!: obj_atE' simp: inQ_def) -qed - lemma ctcb_relation_unat_prio_eq: "ctcb_relation tcb tcb' \ unat (tcbPriority tcb) = unat (tcbPriority_C tcb')" apply (clarsimp simp: ctcb_relation_def) @@ -649,144 +467,6 @@ lemma threadSet_queued_ccorres [corres]: apply (clarsimp simp: typ_heap_simps) done -lemma ccorres_pre_getQueue: - assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" - shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) - {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p) in - sched_queue_relation' (cslift s') queue (head_C cqueue) (end_C cqueue)) \ s' \ P' queue} - hs (getQueue d p >>= (\queue. f queue)) c" - apply (rule ccorres_guard_imp2) - apply (rule ccorres_symb_exec_l2) - defer - defer - apply (rule gq_sp) - defer - apply (rule ccorres_guard_imp) - apply (rule cc) - apply clarsimp - apply assumption - apply assumption - apply (clarsimp simp: getQueue_def gets_exs_valid) - apply clarsimp - apply (drule spec, erule mp) - apply (simp add: Let_def) - apply (erule rf_sr_sched_queue_relation) - apply (simp add: maxDom_to_H maxPrio_to_H)+ - done - -lemma state_relation_queue_update_helper': - "\ (s, s') \ rf_sr; - (\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))); - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subst(asm) disj_imp_rhs) - apply (subst obj_at'_and[symmetric]) - apply (rule disjI1, erule obj_at'_weakenE, simp add: inQ_def) - apply (subst(asm) disj_imp_rhs) - apply (subst(asm) obj_at'_and[symmetric]) - apply (rule conjI, erule obj_at'_weakenE, simp) - apply (rule allI, rule allI) - apply (drule_tac x=d' in spec) - apply (drule_tac x=p' in spec) - apply clarsimp - apply (drule(1) bspec) - apply (clarsimp simp: inQ_def obj_at'_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (intro conjI) - \ \cpspace_relation\ - apply (erule nonemptyE, drule(1) bspec) - apply (clarsimp simp: cpspace_relation_def) - apply (drule obj_at_ko_at', clarsimp) - apply (rule cmap_relationE1, assumption, - erule ko_at_projectKO_opt) - apply (frule null_sched_queue) - apply (frule null_sched_epD) - apply (intro conjI) - \ \tcb relation\ - apply (drule ctcb_relation_null_queue_ptrs, - simp_all)[1] - \ \endpoint relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cendpoint_relation_upd_tcb_no_queues, simp+) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cnotification_relation_upd_tcb_no_queues, simp+) - \ \ready queues\ - apply (simp add: cready_queues_relation_def Let_def cready_queues_index_to_C_in_range - seL4_MinPrio_def minDom_def) - apply clarsimp - apply (frule cready_queues_index_to_C_distinct, assumption+) - apply (clarsimp simp: cready_queues_index_to_C_in_range all_conj_distrib) - apply (rule iffD1 [OF tcb_queue_relation'_cong[OF refl], rotated -1], - drule spec, drule spec, erule mp, simp+) - apply clarsimp - apply (drule_tac x="tcb_ptr_to_ctcb_ptr x" in fun_cong)+ - apply (clarsimp simp: restrict_map_def - split: if_split_asm) - by (auto simp: carch_state_relation_def cmachine_state_relation_def) - -lemma state_relation_queue_update_helper: - "\ (s, s') \ rf_sr; valid_queues s; - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subgoal_tac "\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))") - apply (erule(5) state_relation_queue_update_helper', simp_all) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE, clarsimp) - done - -(* FIXME: move *) -lemma from_bool_vals [simp]: - "from_bool True = scast true" - "from_bool False = scast false" - "scast true \ scast false" - by (auto simp add: from_bool_def true_def false_def) - (* FIXME: move *) lemma cmap_relation_no_upd: "\ cmap_relation a c f rel; a p = Some ko; rel ko v; inj f \ \ cmap_relation a (c(f p \ v)) f rel" @@ -831,8 +511,8 @@ lemma cready_queues_index_to_C_def2: lemma ready_queues_index_spec: "\s. \ \ {s'. s' = s \ (Kernel_Config.numDomains \ 1 \ dom_' s' = 0)} Call ready_queues_index_'proc - \\ret__unsigned_long = (dom_' s) * 0x100 + (prio_' s)\" - by vcg (simp add: numDomains_sge_1_simp) + \\ret__unsigned_long = (dom_' s) * word_of_nat numPriorities + (prio_' s)\" + by vcg (simp add: numDomains_sge_1_simp numPriorities_def) lemma prio_to_l1index_spec: "\s. \ \ {s} Call prio_to_l1index_'proc @@ -927,56 +607,6 @@ lemma cbitmap_L2_relation_bit_set: apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) done -lemma carch_state_relation_enqueue_simp: - "carch_state_relation (ksArchState \) - (t_hrs_'_update f - (globals \' \ksReadyQueuesL1Bitmap_' := l1upd, ksReadyQueuesL2Bitmap_' := l2upd \) - \ksReadyQueues_' := rqupd \) = - carch_state_relation (ksArchState \) (t_hrs_'_update f (globals \'))" - unfolding carch_state_relation_def - by clarsimp - -lemma t_hrs_ksReadyQueues_upd_absorb: - "t_hrs_'_update f (g s) \ksReadyQueues_' := rqupd \ = - t_hrs_'_update f (g s \ksReadyQueues_' := rqupd\)" - by simp - -lemma rf_sr_drop_bitmaps_enqueue_helper: - "\ (\,\') \ rf_sr ; - cbitmap_L1_relation ksqL1upd' ksqL1upd ; cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ - ((\\ksReadyQueues := ksqupd, ksReadyQueuesL1Bitmap := ksqL1upd, ksReadyQueuesL2Bitmap := ksqL2upd\, - \'\idx_' := i', queue_' := queue_upd', - globals := t_hrs_'_update f - (globals \' - \ksReadyQueuesL1Bitmap_' := ksqL1upd', - ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueues_' := ksqupd'\)\) \ rf_sr) = - ((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', queue_' := queue_upd', - globals := t_hrs_'_update f - (globals \' \ksReadyQueues_' := ksqupd'\)\) \ rf_sr)" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) - -lemma cmachine_state_relation_enqueue_simp: - "cmachine_state_relation (ksMachineState \) - (t_hrs_'_update f - (globals \' \ksReadyQueuesL1Bitmap_' := l1upd, ksReadyQueuesL2Bitmap_' := l2upd \) - \ksReadyQueues_' := rqupd \) = - cmachine_state_relation (ksMachineState \) (t_hrs_'_update f (globals \'))" - unfolding cmachine_state_relation_def - by clarsimp - -lemma tcb_queue_relation'_empty_ksReadyQueues: - "\ sched_queue_relation' (cslift x) (q s) NULL NULL ; \t\ set (q s). tcb_at' t s \ \ q s = []" - apply (clarsimp simp add: tcb_queue_relation'_def) - apply (subst (asm) eq_commute) - apply (cases "q s" rule: rev_cases, simp) - apply (clarsimp simp: tcb_at_not_NULL) - done - lemma invert_prioToL1Index_c_simp: "p \ maxPriority \ @@ -990,13 +620,247 @@ lemma c_invert_assist: "3 - (ucast (p :: priority) >> 6 :: machine_word) < 4" using prio_ucast_shiftr_wordRadix_helper'[simplified wordRadix_def] by - (rule word_less_imp_diff_less, simp_all) +lemma addToBitmap_ccorres: + "ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (addToBitmap tdom prio) (Call addToBitmap_'proc)" + supply prio_and_dom_limit_helpers[simp] invert_prioToL1Index_c_simp[simp] + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (frule maxDomain_le_unat_ucast_explicit) + apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (intro conjI impI allI) + apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (rule conjI) + apply (clarsimp intro!: cbitmap_L1_relation_bit_set) + apply (fastforce dest!: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) + done + +lemma rf_sr_tcb_update_twice: + "h_t_valid (hrs_htd (hrs2 (globals s') (t_hrs_' (gs2 (globals s'))))) c_guard + (ptr (t_hrs_' (gs2 (globals s'))) (globals s')) + \ ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs :: tcb_C ptr) (v ths gs)) + (hrs_mem_update (heap_update (ptr ths gs) (v' ths gs)) (hrs2 gs ths))) (gs2 gs)) s') \ rf_sr) + = ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs) (v ths gs)) (hrs2 gs ths)) (gs2 gs)) s') \ rf_sr)" + by (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def typ_heap_simps' + carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + +lemmas rf_sr_tcb_update_no_queue_gen2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue_gen, simplified] + +lemma tcb_queue_prepend_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueuePrepend queue tcbPtr) (Call tcb_queue_prepend_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply fastforce + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueHead queue)) s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply clarsimp + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma tcb_queue_append_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s) + \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueAppend queue tcbPtr) (Call tcb_queue_append_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueEnd queue)) s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma getQueue_ccorres: + "ccorres ctcb_queue_relation queue_' + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (getQueue tdom prio) (\queue :== \ksReadyQueues.[unat \idx])" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getQueue_def gets_def get_def bind_def return_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + done + +lemma setQueue_ccorres: + "ctcb_queue_relation queue cqueue \ + ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (setQueue tdom prio queue) + (Basic (\s. globals_update + (ksReadyQueues_'_update + (\_. Arrays.update (ksReadyQueues_' (globals s)) (unat (idx_' s)) cqueue)) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setQueue_def get_def modify_def put_def bind_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + apply (frule cready_queues_index_to_C_distinct, assumption+) + apply (frule_tac qdom=d and prio=p in cready_queues_index_to_C_in_range) + apply fastforce + apply clarsimp + done + +crunch (empty_fail) empty_fail[wp]: isRunnable + lemma tcbSchedEnqueue_ccorres: "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - hs - (tcbSchedEnqueue t) - (Call tcbSchedEnqueue_'proc)" + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedEnqueue t) (Call tcbSchedEnqueue_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] note invert_prioToL1Index_c_simp[simp] @@ -1007,24 +871,12 @@ proof - show ?thesis apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" - in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac runnable) + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1032,244 +884,246 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - (* queue is empty, set t to be new queue *) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (subgoal_tac - "head_C (ksReadyQueues_' (globals x) - .[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL; simp add: valid_queues_valid_q) - apply (subgoal_tac - "end_C (ksReadyQueues_' (globals x) - .[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL[symmetric]; simp add: valid_queues_valid_q) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (frule maxDomain_le_unat_ucast_explicit) - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - - apply (rule conjI) - apply (clarsimp simp: l2BitmapSize_def' wordRadix_def c_invert_assist) - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - apply (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def typ_heap_simps c_guard_clift - elim: obj_at'_weaken)+ - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply clarsimp - apply (rule conjI; clarsimp simp: queue_in_range) - (* invalid, disagreement between C and Haskell on emptiness of queue *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (drule tcb_queue_relation'_empty_ksReadyQueues; simp add: valid_queues_valid_q) - (* queue was not empty, add t to queue and leave bitmaps alone *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (frule_tac t=\ in tcb_queue_relation_qhead_mem') - apply (simp add: valid_queues_valid_q) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff numPriorities_def - cready_queues_index_to_C_def2) - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - del: fun_upd_restrict_conv - cong: if_cong - split del: if_split)[1] - apply simp - apply (rule conjI) + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 3 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule (1) valid_queues_obj_at'D) - apply clarsimp - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (simp add: typ_heap_simps c_guard_clift) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - apply (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) - done + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_prepend_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2) + done qed -lemmas tcbSchedDequeue_update - = tcbDequeue_update[where tn=tcbSchedNext_C and tn_update=tcbSchedNext_C_update - and tp'=tcbSchedPrev_C and tp_update=tcbSchedPrev_C_update, - simplified] - -lemma tcb_queue_relation_prev_next: - "\ tcb_queue_relation tn tp' mp queue qprev qhead; - tcbp \ set queue; distinct (ctcb_ptr_to_tcb_ptr qprev # queue); - \t \ set queue. tcb_at' t s; qprev \ tcb_Ptr 0 \ mp qprev \ None; - mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp' tcb \ tcb_Ptr 0 \ (tp' tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ tp' tcb = qprev) - \ mp (tp' tcb) \ None \ tp' tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp' tcb)" - apply (induct queue arbitrary: qprev qhead) - apply simp - apply simp - apply (erule disjE) - apply clarsimp - apply (case_tac "queue") - apply clarsimp - apply clarsimp - apply (rule conjI) - apply clarsimp - apply clarsimp - apply (drule_tac f=ctcb_ptr_to_tcb_ptr in arg_cong[where y="tp' tcb"], simp) - apply clarsimp - apply fastforce - done +lemma tcbSchedAppend_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedAppend t) (Call tcbSchedAppend_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] -lemma tcb_queue_relation_prev_next': - "\ tcb_queue_relation' tn tp' mp queue qhead qend; tcbp \ set queue; distinct queue; - \t \ set queue. tcb_at' t s; mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp' tcb \ tcb_Ptr 0 \ tp' tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tp' tcb) \ None \ tp' tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp' tcb)" - apply (clarsimp simp: tcb_queue_relation'_def split: if_split_asm) - apply (drule(1) tcb_queue_relation_prev_next, simp_all) - apply (fastforce dest: tcb_at_not_NULL) - apply clarsimp - done + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + note word_less_1[simp del] -(* L1 bitmap only updated if L2 entry bits end up all zero *) -lemma rf_sr_drop_bitmaps_dequeue_helper_L2: - "\ (\,\') \ rf_sr ; - cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) - -lemma rf_sr_drop_bitmaps_dequeue_helper: - "\ (\,\') \ rf_sr ; - cbitmap_L1_relation ksqL1upd' ksqL1upd ; cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd, - ksReadyQueuesL1Bitmap := ksqL1upd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueuesL1Bitmap_' := ksqL1upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac "runnable") + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply (fastforce dest!: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_append_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply clarsimp + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2 tcbQueueEmpty_def) + done +qed (* FIXME same proofs as bit_set, maybe can generalise? *) lemma cbitmap_L1_relation_bit_clear: @@ -1286,27 +1140,6 @@ lemma cbitmap_L1_relation_bit_clear: invertL1Index_def l2BitmapSize_def' le_maxDomain_eq_less_numDomains word_le_nat_alt num_domains_index_updates) -lemma cready_queues_relation_empty_queue_helper: - "\ tcbDomain ko \ maxDomain ; tcbPriority ko \ maxPriority ; - cready_queues_relation (cslift \') (ksReadyQueues_' (globals \')) (ksReadyQueues \)\ - \ - cready_queues_relation (cslift \') - (Arrays.update (ksReadyQueues_' (globals \')) (unat (tcbDomain ko) * 256 + unat (tcbPriority ko)) - (tcb_queue_C.end_C_update (\_. NULL) - (head_C_update (\_. NULL) - (ksReadyQueues_' (globals \').[unat (tcbDomain ko) * 256 + unat (tcbPriority ko)])))) - ((ksReadyQueues \)((tcbDomain ko, tcbPriority ko) := []))" - unfolding cready_queues_relation_def Let_def - using maxPrio_to_H[simp] maxDom_to_H[simp] - apply clarsimp - apply (frule (1) cready_queues_index_to_C_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (fold cready_queues_index_to_C_def[simplified numPriorities_def]) - apply (case_tac "qdom = tcbDomain ko", - simp_all add: prio_and_dom_limit_helpers seL4_MinPrio_def - minDom_def) - apply (fastforce simp: cready_queues_index_to_C_in_range simp: cready_queues_index_to_C_distinct)+ - done - lemma cbitmap_L2_relationD: "\ cbitmap_L2_relation cbitmap2 abitmap2 ; d \ maxDomain ; i < l2BitmapSize \ \ cbitmap2.[unat d].[i] = abitmap2 (d, i)" @@ -1336,466 +1169,301 @@ lemma cbitmap_L2_relation_bit_clear: apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) done -lemma tcbSchedDequeue_ccorres': +lemma removeFromBitmap_ccorres: "ccorres dc xfdc - ((\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))) - and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (removeFromBitmap tdom prio) (Call removeFromBitmap_'proc)" proof - - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) include no_less_1_simps - have ksQ_tcb_at': "\s ko d p. - \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p)) \ - \t\set (ksReadyQueues s (d, p)). tcb_at' t s" - by (fastforce dest: spec elim: obj_at'_weakenE) - - have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < 4" + have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < l2BitmapSize" unfolding invertL1Index_def l2BitmapSize_def' prioToL1Index_def by simp show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" - in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) + supply if_split[split del] + (* pull out static assms *) + apply simp + apply (rule ccorres_grab_asm[where P=\, simplified]) + apply (cinit lift: dom_' prio_') + apply clarsimp + apply csymbr + apply csymbr + (* we can clear up all C guards now *) + apply (clarsimp simp: maxDomain_le_unat_ucast_explicit word_and_less') + apply (simp add: invert_prioToL1Index_c_simp word_less_nat_alt) + apply (simp add: invert_l1_index_limit[simplified l2BitmapSize_def']) + apply ccorres_rewrite + (* handle L2 update *) + apply (rule_tac ccorres_split_nothrow_novcg_dc) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L2_relation) + apply (erule cbitmap_L2_relation_update) + apply (erule (1) cbitmap_L2_relation_bit_clear) + (* the check on the C side is identical to checking the L2 entry, rewrite the condition *) + apply (simp add: getReadyQueuesL2Bitmap_def) + apply (rule ccorres_symb_exec_l3, rename_tac l2) + apply (rule_tac C'="{s. l2 = 0}" + and Q="\s. l2 = ksReadyQueuesL2Bitmap s (tdom, invertL1Index (prioToL1Index prio))" + in ccorres_rewrite_cond_sr[where Q'=UNIV]) + apply clarsimp + apply (frule rf_sr_cbitmap_L2_relation) + apply (clarsimp simp: cbitmap_L2_relationD invert_l1_index_limit split: if_split) + (* unset L1 bit when L2 entry is empty *) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L1_relation) + apply (erule cbitmap_L1_relation_update) + apply (erule (1) cbitmap_L1_relation_bit_clear) + apply wpsimp+ + apply (fastforce simp: guard_is_UNIV_def) + apply clarsimp + done +qed + +lemma ctcb_ptr_to_tcb_ptr_option_to_ctcb_ptr[simp]: + "ctcb_ptr_to_tcb_ptr (option_to_ctcb_ptr (Some ptr)) = ptr" + by (clarsimp simp: option_to_ctcb_ptr_def) + +lemma tcb_queue_remove_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s \ valid_objs' s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueRemove queue tcbPtr) (Call tcb_queue_remove_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit' lift: tcb_') + apply (rename_tac tcb') + apply (simp only: tcbQueueRemove_def) + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule ccorres_pre_getObject_tcb, rename_tac tcb) + apply (rule ccorres_symb_exec_l, rename_tac beforePtrOpt) + apply (rule ccorres_symb_exec_l, rename_tac afterPtrOpt) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="before___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr beforePtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedPrev tcb = beforePtrOpt)" + and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="(\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))) - and valid_queues' and obj_at' (inQ rva rvb) t - and (\s. rva \ maxDomain \ rvb \ maxPriority)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs when_def - null_def) - - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule(1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" in rf_sr_sched_queue_relation) - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (frule_tac s=\ in tcb_queue_relation_prev_next'; (fastforce simp: ksQ_tcb_at')?) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (intro conjI; - clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift)+ - apply (drule(2) filter_empty_unfiltered_contr, simp)+ - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_clear) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (clarsimp simp: cbitmap_L2_relation_def - word_size prioToL1Index_def wordRadix_def mask_def - word_le_nat_alt - numPriorities_def wordBits_def l2BitmapSize_def' - invertL1Index_def numDomains_less_numeric_explicit) - apply (case_tac "d = tcbDomain ko" - ; fastforce simp: le_maxDomain_eq_less_numDomains - numDomains_less_numeric_explicit) - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by ((fastforce simp: ksQ_tcb_at')+) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst (asm) num_domains_index_updates) - subgoal by (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - apply (simp add: invert_l1_index_limit) - - apply (frule rf_sr_cbitmap_L2_relation) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - apply (fastforce simp: l2BitmapSize_def' invert_l1_index_limit) - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - (* impossible case *) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (drule(2) filter_empty_unfiltered_contr, fastforce) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply fold_subgoals[2] - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (rule conjI; clarsimp) - apply (simp add: typ_heap_simps) - apply (clarsimp simp: h_t_valid_c_guard [OF h_t_valid_field, OF h_t_valid_clift] - h_t_valid_field[OF h_t_valid_clift] h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - - apply (fastforce simp: tcb_null_sched_ptrs_def typ_heap_simps c_guard_clift - elim: obj_at'_weaken)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split, - simp_all add: typ_heap_simps')[1] - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - subgoal by fastforce + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="after___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr afterPtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedNext tcb = afterPtrOpt)" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R="?abs"]) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) apply clarsimp - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems - by (fastforce dest!: tcb_queue_relation'_empty_ksReadyQueues - elim: obj_at'_weaken)+ - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by - (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def - clift_heap_update_same[OF h_t_valid_clift])+ - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) apply fastforce - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (drule(2) filter_empty_unfiltered_contr[simplified filter_noteq_op], simp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (subst (asm) num_domains_index_updates) - apply (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - subgoal using invert_l1_index_limit - by (fastforce simp add: invert_prioToL1Index_c_simp intro: nat_Suc_less_le_imp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (simp add: invert_prioToL1Index_c_simp) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - subgoal by (simp add: invert_l1_index_limit l2BitmapSize_def') - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper_L2, assumption) - subgoal by (fastforce dest: rf_sr_cbitmap_L2_relation elim!: cbitmap_L2_relation_bit_clear) - - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') - apply (fastforce simp add: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (clarsimp simp: typ_heap_simps) - apply (fastforce simp: typ_heap_simps) - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by - (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def - clift_heap_update_same[OF h_t_valid_clift])+ - apply (clarsimp) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def)+ - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def valid_tcb'_def inQ_def) -qed - -lemma tcbSchedDequeue_ccorres: - "ccorres dc xfdc - (valid_queues and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" - apply (rule ccorres_guard_imp [OF tcbSchedDequeue_ccorres']) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp)+ - done - -lemma tcb_queue_relation_append: - "\ tcb_queue_relation tn tp' mp queue qprev qhead; queue \ []; - qend' \ tcb_ptr_to_ctcb_ptr ` set queue; mp qend' = Some tcb; - queue = queue' @ [ctcb_ptr_to_tcb_ptr qend]; distinct queue; - \x \ set queue. tcb_ptr_to_ctcb_ptr x \ NULL; qend' \ NULL; - \v f g. tn (tn_update f v) = f (tn v) \ tp' (tp_update g v) = g (tp' v) - \ tn (tp_update f v) = tn v \ tp' (tn_update g v) = tp' v \ - \ tcb_queue_relation tn tp' - (mp (qend \ tn_update (\_. qend') (the (mp qend)), - qend' \ tn_update (\_. NULL) (tp_update (\_. qend) tcb))) - (queue @ [ctcb_ptr_to_tcb_ptr qend']) qprev qhead" - using [[hypsubst_thin = true]] - apply clarsimp - apply (induct queue' arbitrary: qprev qhead) - apply clarsimp - apply clarsimp - done - -lemma tcbSchedAppend_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qend' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qend' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qend' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qend (tcbSchedNext_C_update (\_. qend') (the (mp qend))) - (mp(qend' \ tcb\tcbSchedNext_C := NULL, tcbSchedPrev_C := qend\))) - (queue @ [ctcb_ptr_to_tcb_ptr qend']) (if queue = [] then qend' else qhead) qend'" - using sr qh' valid_ep cs_tcb qhN - apply - - apply (rule rev_cases[where xs=queue]) - apply (simp add: tcb_queue_relation'_def upd_unless_null_def) - apply (clarsimp simp: tcb_queue_relation'_def upd_unless_null_def tcb_at_not_NULL) - apply (drule_tac qend'=qend' and tn_update=tcbSchedNext_C_update - and tp_update=tcbSchedPrev_C_update and qend="tcb_ptr_to_ctcb_ptr y" - in tcb_queue_relation_append, simp_all) - apply (fastforce simp add: tcb_at_not_NULL) - apply (simp add: fun_upd_twist) - done - -lemma tcb_queue_relation_qend_mems: - "\ tcb_queue_relation' getNext getPrev mp queue qhead qend; - \x \ set queue. tcb_at' x s \ - \ (qend = NULL \ queue = []) - \ (qend \ NULL \ ctcb_ptr_to_tcb_ptr qend \ set queue)" - apply (clarsimp simp: tcb_queue_relation'_def) - apply (drule bspec, erule last_in_set) - apply (simp add: tcb_at_not_NULL) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) + apply clarsimp + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply (rule ccorres_assert2)+ + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (fastforce intro: ccorres_return_C') + apply (wpsimp | vcg)+ + apply (clarsimp split: if_splits) + apply normalise_obj_at' + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + by (intro conjI impI; + clarsimp simp: ctcb_queue_relation_def typ_heap_simps option_to_ctcb_ptr_def + valid_tcb'_def valid_bound_tcb'_def) + +lemma tcbQueueRemove_tcb_at'_head: + "\\s. valid_objs' s \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)\ + tcbQueueRemove queue t + \\rv s. \ tcbQueueEmpty rv \ tcb_at' (the (tcbQueueHead rv)) s\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp haskell_assert_wp hoare_vcg_imp_lift') + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def valid_bound_tcb'_def tcbQueueEmpty_def obj_at'_def) done -lemma tcbSchedAppend_ccorres: +lemma tcbSchedDequeue_ccorres: "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedAppend t) - (Call tcbSchedAppend_'proc)" + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedDequeue t) (Call tcbSchedDequeue_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) - include no_less_1_simps + note word_less_1[simp del] show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_longlong_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1803,130 +1471,80 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - subgoal by (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken) - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken) - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rule_tac r'=ctcb_queue_relation and xf'=new_queue_' in ccorres_split_nothrow) + apply (ctac add: tcb_queue_remove_ccorres) + apply ceqv + apply (rename_tac queue' newqueue) + apply (rule ccorres_Guard_Seq) + apply (ctac add: setQueue_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply ctac + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue')" + and R="\s. \ tcbQueueEmpty queue' \ tcb_at' (the (tcbQueueHead queue')) s" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def split: option.splits) + apply ceqv + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: removeFromBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply vcg + apply (wpsimp wp: hoare_vcg_imp_lift') + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: hoare_vcg_imp_lift') + apply vcg + apply ((wpsimp wp: tcbQueueRemove_tcb_at'_head | wp (once) hoare_drop_imps)+)[1] + apply (vcg exspec=tcb_queue_remove_modifies) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg) apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: cready_queues_index_to_C_def2 numPriorities_def) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - cong: if_cong split del: if_split - del: fun_upd_restrict_conv)[1] - apply simp - apply (rule conjI) - apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 3 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule valid_queues_obj_at'D) - subgoal by simp - subgoal by simp - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) obj_at_cslift_tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + by (fastforce simp: word_less_nat_alt + cready_queues_index_to_C_def2 ctcb_relation_def + typ_heap_simps le_maxDomain_eq_less_numDomains(2) unat_trans_ucast_helper) qed -lemma true_eq_from_bool [simp]: - "(scast true = from_bool P) = P" - by (simp add: true_def from_bool_def split: bool.splits) - lemma isStopped_spec: "\s. \ \ ({s} \ {s. cslift s (thread_' s) \ None}) Call isStopped_'proc {s'. ret__unsigned_long_' s' = from_bool (tsType_CL (thread_state_lift (tcbState_C (the (cslift s (thread_' s))))) \ @@ -1972,8 +1590,11 @@ lemma tcb_at_1: done lemma rescheduleRequired_ccorres: - "ccorres dc xfdc (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorres dc xfdc + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' + and pspace_aligned' and pspace_distinct') + UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -2083,10 +1704,12 @@ lemma cguard_UNIV: by fastforce lemma lookupBitmapPriority_le_maxPriority: - "\ ksReadyQueuesL1Bitmap s d \ 0 ; valid_queues s \ + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + \d p. d > maxDomain \ p > maxPriority \ tcbQueueEmpty (ksReadyQueues s (d, p)); + valid_bitmaps s \ \ lookupBitmapPriority d s \ maxPriority" - unfolding valid_queues_def valid_queues_no_bitmap_def - by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) + apply (clarsimp simp: valid_bitmaps_def) + by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) lemma rf_sr_ksReadyQueuesL1Bitmap_not_zero: "\ (\, s') \ rf_sr ; d \ maxDomain ; ksReadyQueuesL1Bitmap_' (globals s').[unat d] \ 0 \ @@ -2096,10 +1719,10 @@ lemma rf_sr_ksReadyQueuesL1Bitmap_not_zero: done lemma ksReadyQueuesL1Bitmap_word_log2_max: - "\valid_queues s; ksReadyQueuesL1Bitmap s d \ 0\ - \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" - unfolding valid_queues_def - by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) + "\valid_bitmaps s; ksReadyQueuesL1Bitmap s d \ 0\ + \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" + unfolding valid_bitmaps_def + by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) lemma word_log2_max_word64[simp]: "word_log2 (w :: 64 word) < 64" @@ -2107,7 +1730,7 @@ lemma word_log2_max_word64[simp]: by (simp add: word_size) lemma rf_sr_ksReadyQueuesL2Bitmap_simp: - "\ (\, s') \ rf_sr ; d \ maxDomain ; valid_queues \ ; ksReadyQueuesL1Bitmap \ d \ 0 \ + "\ (\, s') \ rf_sr ; d \ maxDomain ; valid_bitmaps \ ; ksReadyQueuesL1Bitmap \ d \ 0 \ \ ksReadyQueuesL2Bitmap_' (globals s').[unat d].[word_log2 (ksReadyQueuesL1Bitmap \ d)] = ksReadyQueuesL2Bitmap \ (d, word_log2 (ksReadyQueuesL1Bitmap \ d))" apply (frule rf_sr_cbitmap_L2_relation) @@ -2116,9 +1739,9 @@ lemma rf_sr_ksReadyQueuesL2Bitmap_simp: done lemma ksReadyQueuesL2Bitmap_nonzeroI: - "\ d \ maxDomain ; valid_queues s ; ksReadyQueuesL1Bitmap s d \ 0 \ + "\ d \ maxDomain ; valid_bitmaps s ; ksReadyQueuesL1Bitmap s d \ 0 \ \ ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d))) \ 0" - unfolding valid_queues_def + unfolding valid_bitmaps_def apply clarsimp apply (frule bitmapQ_no_L1_orphansD) apply (erule word_log2_nth_same) @@ -2276,11 +1899,6 @@ lemma getCurDomain_maxDom_ccorres_dom_': rf_sr_ksCurDomain) done -lemma rf_sr_cscheduler_action_relation: - "(s, s') \ rf_sr - \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" - by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - lemma threadGet_get_obj_at'_has_domain: "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" by (wp threadGet_obj_at') (simp add: obj_at'_def) @@ -2288,16 +1906,15 @@ lemma threadGet_get_obj_at'_has_domain: lemma possibleSwitchTo_ccorres: shows "ccorres dc xfdc - (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t and (\s. ksCurDomain s \ maxDomain) - and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') ({s. target_' s = tcb_ptr_to_ctcb_ptr t} \ UNIV) [] (possibleSwitchTo t ) (Call possibleSwitchTo_'proc)" supply if_split [split del] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] @@ -2322,7 +1939,7 @@ lemma possibleSwitchTo_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2339,8 +1956,8 @@ lemma possibleSwitchTo_ccorres: lemma scheduleTCB_ccorres': "ccorres dc xfdc - (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_queues - and valid_objs') + (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (do (runnable, curThread, action) \ do @@ -2353,6 +1970,7 @@ lemma scheduleTCB_ccorres': rescheduleRequired od) (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_') apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2382,32 +2000,35 @@ lemma scheduleTCB_ccorres': apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL split: scheduler_action.split_asm) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp apply (clarsimp simp: st_tcb_at'_def obj_at'_def weak_sch_act_wf_def) done lemma scheduleTCB_ccorres_valid_queues'_pre: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" - apply (cinit' lift: tptr_' simp del: word_neq_0_conv) + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] + apply (cinit' lift: tptr_') apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) defer @@ -2428,14 +2049,14 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: \ weak_sch_act_wf (ksSchedulerAction s) s" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def if_1_0_0 split del: if_split) + apply (clarsimp simp: return_def) apply (clarsimp simp: from_bool_0 rf_sr_ksCurThread) apply (rule conjI) apply (clarsimp simp: st_tcb_at'_def) apply (drule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def weak_sch_act_wf_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (fold_subgoals (prefix))[6] subgoal premises prems using prems by (clarsimp simp: rf_sr_def cstate_relation_def Let_def @@ -2446,17 +2067,17 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: split: scheduler_action.split_asm) apply wp+ apply (simp add: isRunnable_def isStopped_def) - apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done - lemmas scheduleTCB_ccorres_valid_queues' = scheduleTCB_ccorres_valid_queues'_pre[unfolded bind_assoc return_bind split_conv] lemma rescheduleRequired_ccorres_valid_queues'_simple: - "ccorresG rf_sr \ dc xfdc (valid_queues' and sch_act_simple) UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorresG rf_sr \ dc xfdc + sch_act_simple UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -2489,16 +2110,18 @@ lemma rescheduleRequired_ccorres_valid_queues'_simple: split: scheduler_action.split_asm) lemma scheduleTCB_ccorres_valid_queues'_pre_simple: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_' simp del: word_neq_0_conv) apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2528,7 +2151,7 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL @@ -2536,11 +2159,10 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp - apply (clarsimp simp: st_tcb_at'_def obj_at'_def valid_queues'_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done lemmas scheduleTCB_ccorres_valid_queues'_simple @@ -2560,47 +2182,34 @@ lemma threadSet_weak_sch_act_wf_runnable': apply (clarsimp) done -lemma threadSet_valid_queues_and_runnable': "\\s. valid_queues s \ (\p. thread \ set (ksReadyQueues s p) \ runnable' st)\ - threadSet (tcbState_update (\_. st)) thread - \\rv s. valid_queues s\" - apply (wp threadSet_valid_queues) - apply (clarsimp simp: inQ_def) -done - lemma setThreadState_ccorres[corres]: "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues s \ valid_objs' s \ valid_tcb_state' st s \ - (ksSchedulerAction s = SwitchToThread thread \ runnable' st) \ - (\p. thread \ set (ksReadyQueues s p) \ runnable' st) \ - sch_act_wf (ksSchedulerAction s) s) - ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} + (\s. tcb_at' thread s \ valid_objs' s \ valid_tcb_state' st s + \ (ksSchedulerAction s = SwitchToThread thread \ runnable' st) + \ sch_act_wf (ksSchedulerAction s) s \ pspace_aligned' s \ pspace_distinct' s) + ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) hs - (setThreadState st thread) (Call setThreadState_'proc)" + (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres) - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' - threadSet_valid_objs') - by (clarsimp simp: weak_sch_act_wf_def valid_queues_def valid_tcb'_tcbState_update) - -lemma threadSet_valid_queues'_and_not_runnable': "\tcb_at' thread and valid_queues' and (\s. (\ runnable' st))\ - threadSet (tcbState_update (\_. st)) thread - \\rv. tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' \" - - apply (wp threadSet_valid_queues' threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: pred_neg_def valid_queues'_def inQ_def)+ -done + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') + apply (clarsimp simp: weak_sch_act_wf_def valid_tcb'_tcbState_update) + done lemma setThreadState_ccorres_valid_queues': - "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s \ Invariants_H.valid_queues s \ (\p. thread \ set (ksReadyQueues s p)) \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s) + "ccorres dc xfdc + (\s. tcb_at' thread s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s + \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s + \ pspace_aligned' s \ pspace_distinct' s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} - \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] - (setThreadState st thread) (Call setThreadState_'proc)" + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues') - apply (wp threadSet_valid_queues'_and_not_runnable' threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' threadSet_valid_objs') + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs' + threadSet_tcbState_st_tcb_at') by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) lemma simp_list_case_return: @@ -2622,28 +2231,26 @@ lemma cancelSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (ctac (no_vcg) add: cancelSignal_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') - apply ((wp setNotification_nosch setNotification_ksQ hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] - apply (simp add: "StrictC'_thread_state_defs") + apply ((wp setNotification_nosch hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] + apply (simp add: ThreadState_defs) apply (rule conjI, clarsimp, rule conjI, clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) subgoal by ((auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" + isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ntfn'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] | - clarsimp simp: eq_commute)+) + | clarsimp simp: eq_commute)+) apply (clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) apply (frule (2) ntfn_blocked_in_queueD) by (auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" valid_ntfn'_def + isTS_defs cte_wp_at_ctes_of valid_ntfn'_def cthread_state_relation_def sch_act_wf_weak isWaitingNtfn_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: ntfn.splits option.splits | clarsimp simp: eq_commute | drule_tac x=thread in bspec)+ (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2785,8 +2392,8 @@ lemma cpspace_relation_ep_update_an_ep: and pal: "pspace_aligned' s" "pspace_distinct' s" and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat pal rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2808,8 +2415,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2821,15 +2428,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "machine_word" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2908,7 +2515,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2926,7 +2533,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -2942,23 +2549,20 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) - subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - subgoal by simp - apply (erule (1) map_to_ko_atI') - apply (simp add: heap_to_user_data_def Let_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) + subgoal by simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + subgoal by simp + apply (erule (1) map_to_ko_atI') + apply (simp add: heap_to_user_data_def Let_def) subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def packed_heap_update_collapse_hrs) subgoal by (simp add: cmachine_state_relation_def) @@ -2979,27 +2583,16 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def split: endpoint.splits split del: if_split) - \ \recv case\ - apply (subgoal_tac "pspace_canonical' \") - prefer 2 - apply fastforce - apply (clarsimp - simp: h_t_valid_clift_Some_iff ctcb_offset_defs - tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask - tcb_queue_relation'_next_sign tcb_queue_relation'_prev_sign - simp flip: canonical_bit_def - cong: tcb_queue_relation'_cong) - subgoal by (intro impI conjI; simp) - \ \send case\ - apply (subgoal_tac "pspace_canonical' \") - prefer 2 - apply fastforce + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def split: endpoint.splits split del: if_split) + \ \recv case\ + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce apply (clarsimp simp: h_t_valid_clift_Some_iff ctcb_offset_defs tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask @@ -3007,16 +2600,24 @@ lemma cancelIPC_ccorres_helper: simp flip: canonical_bit_def cong: tcb_queue_relation'_cong) subgoal by (intro impI conjI; simp) - subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + \ \send case\ + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce + apply (clarsimp + simp: h_t_valid_clift_Some_iff ctcb_offset_defs + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + tcb_queue_relation'_next_sign tcb_queue_relation'_prev_sign + simp flip: canonical_bit_def + cong: tcb_queue_relation'_cong) + subgoal by (intro impI conjI; simp) + subgoal by simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def packed_heap_update_collapse_hrs) subgoal by (simp add: cmachine_state_relation_def) @@ -3079,8 +2680,7 @@ lemma cancelIPC_ccorres1: apply (rule_tac P="rv' = thread_state_to_tsType rv" in ccorres_gen_asm2) apply wpc \ \BlockedOnReceive\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) + apply (simp add: word_sle_def ccorres_cond_iffs cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3096,7 +2696,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3106,14 +2706,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \BlockedOnReply case\ - apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs + apply (simp add: ThreadState_defs ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr - apply (unfold comp_def)[1] apply csymbr apply (rule ccorres_move_c_guard_tcb)+ apply (rule ccorres_split_nothrow_novcg) @@ -3149,14 +2747,12 @@ lemma cancelIPC_ccorres1: apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3169,7 +2765,8 @@ lemma cancelIPC_ccorres1: apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3178,10 +2775,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3197,7 +2796,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del:if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply clarsimp apply (rule conseqPre, vcg, rule subset_refl) apply (rule conseqPre, vcg) @@ -3207,7 +2806,8 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg @@ -3230,37 +2830,35 @@ lemma cancelIPC_ccorres1: subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_recv) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isRecvEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits) + split: thread_state.splits endpoint.splits) apply (rule conjI) apply (clarsimp simp: inQ_def) - apply (rule conjI) - apply clarsimp apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_send) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isSendEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits)[1] + split: thread_state.splits endpoint.splits)[1] apply (auto simp: isTS_defs cthread_state_relation_def typ_heap_simps weak_sch_act_wf_def) apply (case_tac ts, auto simp: isTS_defs cthread_state_relation_def typ_heap_simps) diff --git a/proof/crefine/RISCV64/Ipc_C.thy b/proof/crefine/RISCV64/Ipc_C.thy index 952ca781d5..1bbac3ad28 100644 --- a/proof/crefine/RISCV64/Ipc_C.thy +++ b/proof/crefine/RISCV64/Ipc_C.thy @@ -1,6 +1,7 @@ (* - * Copyright 2014, General Dynamics C4 Systems + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) @@ -404,6 +405,7 @@ lemma handleArchFaultReply': msg \ getMRs s sb tag; handleArchFaultReply f r (msgLabel tag) msg od) x' = handleArchFaultReply' f s r tag x'" + supply empty_fail_cond[simp] apply (unfold handleArchFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -504,10 +506,8 @@ lemma getSanitiseRegisterInfo_moreMapM_comm: lemma monadic_rewrite_threadGet_return: "monadic_rewrite True False (tcb_at' r) (return x) (do t \ threadGet f r; return x od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done context begin interpretation Arch . @@ -522,18 +522,14 @@ end lemma monadic_rewrite_getSanitiseRegisterInfo_return: "monadic_rewrite True False (tcb_at' r) (return x) (do t \ getSanitiseRegisterInfo r; return x od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done lemma monadic_rewrite_getSanitiseRegisterInfo_drop: "monadic_rewrite True False (tcb_at' r) (d) (do t \ getSanitiseRegisterInfo r; d od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done context kernel_m begin interpretation Arch . @@ -554,6 +550,7 @@ lemma handleFaultReply': msg \ getMRs s sb tag; handleFaultReply f r (msgLabel tag) msg od) (handleFaultReply' f s r)" + supply empty_fail_cond[simp] apply (unfold handleFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -568,41 +565,41 @@ lemma handleFaultReply': zip_Cons RISCV64_H.exceptionMessage_def RISCV64.exceptionMessage_def mapM_x_Cons mapM_x_Nil) - apply (rule monadic_rewrite_symb_exec_l, wp+) - apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) - apply (case_tac rv; (case_tac "msgLength tag < scast n_msgRegisters", - (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm - asUser_getRegister_getSanitiseRegisterInfo_comm - asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm - asUser_comm[OF neq] asUser_getRegister_threadGet_comm - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp asUser_typ_ats lookupIPCBuffer_inv )+)+)) - apply wp + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) + apply (case_tac sb; (case_tac "msgLength tag < scast n_msgRegisters", + (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm + asUser_getRegister_getSanitiseRegisterInfo_comm + asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm + asUser_comm[OF neq] asUser_getRegister_threadGet_comm + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp)+)+)) + apply wp+ (* capFault *) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_asUser empty_fail_getRegister)+)+ - apply(case_tac rv) - apply (clarsimp - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] - empty_fail_loadWordUser)+ + apply (repeat 5 \rule monadic_rewrite_symb_exec_l\) (* until case sb *) + apply (case_tac sb) + apply (clarsimp + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] + empty_fail_loadWordUser)+ (* UnknownSyscallException *) apply (simp add: zip_append2 mapM_x_append asUser_bind_distrib split_def bind_assoc) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_do_flip) apply (rule monadic_rewrite_bind_tail) apply (rule_tac P="inj (case_bool s r)" in monadic_rewrite_gen_asm) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) apply (rule isolate_thread_actions_rewrite_bind bool.simps setRegister_simple zipWithM_setRegister_simple @@ -622,90 +619,90 @@ lemma handleFaultReply': upto_enum_word mapM_x_Cons mapM_x_Nil) apply (simp add: getSanitiseRegisterInfo_moreMapM_comm asUser_getRegister_getSanitiseRegisterInfo_comm getSanitiseRegisterInfo_lookupIPCBuffer_comm) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) - apply (case_tac sb) + apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) + apply (case_tac sb) + apply (case_tac "msgLength tag < scast n_msgRegisters") + apply (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + | wp asUser_typ_ats)+)+ apply (case_tac "msgLength tag < scast n_msgRegisters") apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - | wp asUser_typ_ats)+)+ - apply (case_tac "msgLength tag < scast n_msgRegisters") - apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - zipWithM_x_Nil - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_threadGet_return - monadic_rewrite_getSanitiseRegisterInfo_return - | wp asUser_typ_ats mapM_wp')+)+ - apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def - linorder_not_less syscallMessage_unfold) - apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, - OF order_less_le_trans, rotated])+ - apply (subgoal_tac "\n :: machine_word. n \ scast n_syscallMessage \ [n .e. msgMaxLength] - = [n .e. scast n_syscallMessage] - @ [scast n_syscallMessage + 1 .e. msgMaxLength]") - apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: machine_word"] - upto_enum_word[where y="scast n_syscallMessage + 1 :: machine_word"]) - apply (clarsimp simp: bind_assoc asUser_bind_distrib asUser_getRegister_threadGet_comm - mapM_x_Cons mapM_x_Nil threadGet_discarded - asUser_comm [OF neq] asUser_getRegister_discarded - submonad_asUser.fn_stateAssert take_zip - bind_subst_lift [OF submonad_asUser.stateAssert_fn] - word_less_nat_alt RISCV64_H.sanitiseRegister_def - split_def n_msgRegisters_def msgMaxLength_def - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_size msgLengthBits_def n_syscallMessage_def Let_def - split del: if_split - cong: if_weak_cong register.case_cong) - - - apply (rule monadic_rewrite_bind_tail)+ - apply (subst (2) upto_enum_word) - apply (case_tac "ma < unat n_syscallMessage - 4") - - apply (erule disjE[OF nat_less_cases'], - ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib - mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_loadWordUser_comm loadWordUser_discarded asUser_return - zip_take_triv2 msgMaxLength_def - no_fail_stateAssert - cong: if_weak_cong - | simp - | rule monadic_rewrite_bind_tail - monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - monadic_rewrite_threadGet_return - monadic_rewrite_getSanitiseRegisterInfo_return - monadic_rewrite_getSanitiseRegisterInfo_drop - | wp asUser_typ_ats empty_fail_loadWordUser)+)+ - apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) - apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: machine_word))" - and k="Suc msgMaxLength" in upt_add_eq_append') - apply (simp add: n_syscallMessage_def) - apply (simp add: n_syscallMessage_def msgMaxLength_unfold) - apply (simp add: n_syscallMessage_def msgMaxLength_def - msgLengthBits_def shiftL_nat - del: upt.simps upt_rec_numeral) - apply (simp add: upto_enum_word cong: if_weak_cong) - apply wp+ + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + zipWithM_x_Nil + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + | wp asUser_typ_ats mapM_wp')+)+ + apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def + linorder_not_less syscallMessage_unfold) + apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, + OF order_less_le_trans, rotated])+ + apply (subgoal_tac "\n :: machine_word. n \ scast n_syscallMessage \ [n .e. msgMaxLength] + = [n .e. scast n_syscallMessage] + @ [scast n_syscallMessage + 1 .e. msgMaxLength]") + apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: machine_word"] + upto_enum_word[where y="scast n_syscallMessage + 1 :: machine_word"]) + apply (clarsimp simp: bind_assoc asUser_bind_distrib asUser_getRegister_threadGet_comm + mapM_x_Cons mapM_x_Nil threadGet_discarded + asUser_comm [OF neq] asUser_getRegister_discarded + submonad_asUser.fn_stateAssert take_zip + bind_subst_lift [OF submonad_asUser.stateAssert_fn] + word_less_nat_alt RISCV64_H.sanitiseRegister_def + split_def n_msgRegisters_def msgMaxLength_def + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_size msgLengthBits_def n_syscallMessage_def Let_def + split del: if_split + cong: if_weak_cong register.case_cong) + + + apply (rule monadic_rewrite_bind_tail)+ + apply (subst (2) upto_enum_word) + apply (case_tac "ma < unat n_syscallMessage - 4") + + apply (erule disjE[OF nat_less_cases'], + ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib + mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_loadWordUser_comm loadWordUser_discarded asUser_return + zip_take_triv2 msgMaxLength_def + no_fail_stateAssert + cong: if_weak_cong + | simp + | rule monadic_rewrite_bind_tail + monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + monadic_rewrite_getSanitiseRegisterInfo_drop + | wp asUser_typ_ats empty_fail_loadWordUser)+)+ + apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) + apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: machine_word))" + and k="Suc msgMaxLength" in upt_add_eq_append') + apply (simp add: n_syscallMessage_def) + apply (simp add: n_syscallMessage_def msgMaxLength_unfold) + apply (simp add: n_syscallMessage_def msgMaxLength_def + msgLengthBits_def shiftL_nat + del: upt.simps upt_rec_numeral) + apply (simp add: upto_enum_word cong: if_weak_cong) + apply wp+ (* ArchFault *) apply (simp add: neq inj_case_bool split: bool.split) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_is_refl) apply (rule ext) apply (unfold handleArchFaultReply'[symmetric] getMRs_def msgMaxLength_def @@ -723,7 +720,7 @@ begin (* FIXME: move *) lemma ccorres_merge_return: - "ccorres (\a c. r (f a) c) xf P P' hs H C \ + "ccorres (r \ f) xf P P' hs H C \ ccorres r xf P P' hs (do x \ H; return (f x) od) C" by (rule ccorres_return_into_rel) @@ -1198,7 +1195,7 @@ lemma setMRs_syscall_error_ccorres: | wp hoare_case_option_wp | (simp del: Collect_const, vcg exspec=setMR_modifies) )+ - apply (simp add: msgMaxLength_unfold if_1_0_0 true_def false_def) + apply (simp add: msgMaxLength_unfold) apply (clarsimp split:if_split_asm simp:syscall_error_to_H_def map_option_Some_eq2 ucast_and_mask ucast_nat_def) apply (simp add: msgFromLookupFailure_def split: lookup_failure.split @@ -1350,10 +1347,6 @@ shows apply (auto split: if_split) done -declare zipWith_Nil2[simp] - -declare zipWithM_x_Nil2[simp] - lemma getRestartPC_ccorres [corres]: "ccorres (=) ret__unsigned_long_' \ (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\) hs @@ -1366,18 +1359,14 @@ lemma getRestartPC_ccorres [corres]: done lemma asUser_tcbFault_obj_at: - "\obj_at' (\tcb. P (tcbFault tcb)) t\ asUser t' m - \\rv. obj_at' (\tcb. P (tcbFault tcb)) t\" + "asUser t' m \obj_at' (\tcb. P (tcbFault tcb)) t\" apply (simp add: asUser_def split_def) apply (wp threadGet_wp) apply (simp cong: if_cong) done lemma asUser_atcbContext_obj_at: - "t \ t' \ - \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - asUser t' m - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + "t \ t' \ asUser t' m \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" apply (simp add: asUser_def split_def atcbContextGet_def atcbContextSet_def) apply (wp threadGet_wp) apply simp @@ -1597,7 +1586,7 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) + apply (rule ccorres_rel_imp[where r = dc, simplified]) apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) RISCV64_H.syscallMessage = msg) sender and valid_pspace' and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" @@ -1651,7 +1640,7 @@ proof - apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (wp mapM_x_wp_inv setMR_atcbContext_obj_at[simplified atcbContextGet_def, simplified] @@ -1740,7 +1729,7 @@ proof - split: list.split_asm) apply (simp split: list.split) apply (wp setMR_tcbFault_obj_at asUser_inv[OF getRestartPC_inv] - hoare_case_option_wp static_imp_wp + hoare_case_option_wp hoare_weak_lift_imp | simp add: option_to_ptr_def guard_is_UNIVI seL4_VMFault_PrefetchFault_def seL4_VMFault_Addr_def @@ -1780,12 +1769,12 @@ proof - let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some ft) sender" note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV [where xf'=ret__unsigned_longlong_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] show ?thesis apply (unfold K_def) apply (intro ccorres_gen_asm) apply (cinit' lift: sender_' receiver_' receiveIPCBuffer_' simp: whileAnno_def) - apply (simp add: makeFaultMessage_def setMRs_to_setMR - del: Collect_const split del: if_split) + apply (simp add: makeFaultMessage_def setMRs_to_setMR) apply (rule_tac val="fault_to_fault_tag ft" in symb_exec_r_fault) apply (vcg, clarsimp) apply (drule(1) obj_at_cslift_tcb) @@ -1798,8 +1787,7 @@ proof - apply wpc apply (simp add: bind_assoc seL4_Fault_tag_defs ccorres_cond_iffs Collect_True Collect_False - zipWithM_mapM zip_append2 mapM_append - del: Collect_const split del: if_split) + zipWithM_mapM zip_append2 mapM_append) apply (rule ccorres_symb_exec_l) apply (rule ccorres_stateAssert) apply (rule_tac P="length msg = unat n_exceptionMessage" @@ -2041,7 +2029,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def RISCV64.badgeRegister_def RISCV64.capRegister_def Kernel_C.badgeRegister_def "StrictC'_register_defs") @@ -2079,7 +2067,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -2550,7 +2538,7 @@ lemma transferCapsLoop_ccorres: \ \\destSlot = (if slots = [] then NULL else cte_Ptr (hd slots)) \ length slots \ 1 \ slots \ [0]\)" defines "is_the_ep \ \cap. isEndpointCap cap \ ep \ None \ capEPPtr cap = the ep" - defines "stable \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" + defines "stable_masked \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" defines "relative_at \ \scap slot s. cte_wp_at' (\cte. badge_derived' scap (cteCap cte) \ capASID scap = capASID (cteCap cte) \ @@ -2565,7 +2553,7 @@ lemma transferCapsLoop_ccorres: (\s. (\x \ set caps. s \' fst x \ cte_wp_at' (\cte. slots \ [] \ is_the_ep (cteCap cte) \ (fst x) = (cteCap cte)) (snd x) s - \ cte_wp_at' (\cte. fst x \ NullCap \ stable (fst x) (cteCap cte)) (snd x) s)) and + \ cte_wp_at' (\cte. fst x \ NullCap \ stable_masked (fst x) (cteCap cte)) (snd x) s)) and (\s. \ sl \ (set slots). cte_wp_at' (isNullCap o cteCap) sl s) and (\_. n + length caps \ 3 \ distinct slots )) (precond n mi slots) @@ -2631,22 +2619,22 @@ next by (simp add:relative_at_def) have stableD: - "\scap excap. stable scap excap + "\scap excap. stable_masked scap excap \ (badge_derived' scap excap \ capASID scap = capASID excap \ cap_asid_base' scap = cap_asid_base' excap \ cap_vptr' scap = cap_vptr' excap)" - apply (clarsimp simp:stable_def) + apply (clarsimp simp:stable_masked_def) apply (case_tac "excap = scap",simp+) apply (simp add:maskedAsFull_misc) done have stable_eq: - "\scap excap. \stable scap excap; isEndpointCap excap\ \ scap = excap" - by (simp add:isCap_simps stable_def maskedAsFull_def split:if_splits) + "\scap excap. \stable_masked scap excap; isEndpointCap excap\ \ scap = excap" + by (simp add:isCap_simps stable_masked_def maskedAsFull_def split:if_splits) have is_the_ep_stable: - "\a b. \a \ NullCap \ stable a b; \ is_the_ep b \ \ \ is_the_ep a" - apply (clarsimp simp:stable_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) + "\a b. \a \ NullCap \ stable_masked a b; \ is_the_ep b \ \ \ is_the_ep a" + apply (clarsimp simp:stable_masked_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) apply auto done @@ -2805,8 +2793,8 @@ next \ (\x\set slots. cte_wp_at' (isNullCap \ cteCap) x s) \ (\x\set xs'. s \' fst x \ cte_wp_at' (\c. is_the_ep (cteCap c) \ fst x = cteCap c) (snd x) s - \ cte_wp_at' (\c. fst x \ NullCap \ stable (fst x) (cteCap c)) (snd x) s)" - in hoare_post_imp_R) + \ cte_wp_at' (\c. fst x \ NullCap \ stable_masked (fst x) (cteCap c)) (snd x) s)" + in hoare_strengthen_postE_R) prefer 2 apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace_mdb' valid_pspace'_splits valid_pspace_valid_objs' is_derived_capMasterCap image_def) @@ -2816,10 +2804,10 @@ next apply (rule conjI) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def isCap_simps stable_def) + apply (clarsimp simp:is_the_ep_def isCap_simps stable_masked_def) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def stable_def split:if_splits)+ + apply (clarsimp simp:is_the_ep_def stable_masked_def split:if_splits)+ apply (case_tac "a = cteCap cteb",clarsimp) apply (simp add:maskedAsFull_def split:if_splits) apply (simp add:maskedAsFull_again) @@ -2853,9 +2841,8 @@ next word_sle_def t2n_mask_eq_if) apply (rule conjI) apply (clarsimp simp: ccap_rights_relation_def cap_rights_to_H_def - false_def true_def to_bool_def allRights_def - excaps_map_def split_def - dest!: drop_n_foo interpret_excaps_eq) + allRights_def excaps_map_def split_def + dest!: drop_n_foo interpret_excaps_eq) apply (clarsimp simp:from_bool_def split:bool.splits) apply (case_tac "isEndpointCap (fst x)") apply (clarsimp simp: cap_get_tag_EndpointCap ep_cap_not_null cap_get_tag_isCap[symmetric]) @@ -2895,7 +2882,7 @@ next apply (rule conseqPre, vcg) apply (clarsimp split del: if_split) apply (clarsimp split del: if_split - simp add: Collect_const[symmetric] precond_def true_def false_def + simp add: Collect_const[symmetric] precond_def simp del: Collect_const) apply (rule HoarePartial.Seq[rotated] HoarePartial.Cond[OF order_refl] HoarePartial.Basic[OF order_refl] HoarePartial.Skip[OF order_refl] @@ -2922,14 +2909,14 @@ next apply (subgoal_tac "fst x = cteCap cte",simp) apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp simp:valid_cap_simps' isCap_simps) apply (subgoal_tac "slots \ []") apply simp apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp dest!:ccap_relation_lift simp:cap_get_tag_isCap is_the_ep_def) apply (clarsimp simp:valid_cap_simps' isCap_simps) @@ -3122,10 +3109,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3153,6 +3141,7 @@ proof - let ?curr = "\s. current_extra_caps_' (globals s)" let ?EXCNONE = "{s. ret__unsigned_long_' s = scast EXCEPTION_NONE}" let ?interpret = "\v n. take n (array_to_list (excaprefs_C v))" + note empty_fail_cond[simp] show ?thesis apply (rule ccorres_gen_asm)+ apply (cinit(no_subst_asm) lift: thread_' bufferPtr_' info_' simp: whileAnno_def) @@ -3178,9 +3167,10 @@ proof - apply csymbr apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, rule ccorres_splitE_novcg) @@ -3189,7 +3179,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 8) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -3199,7 +3189,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 5" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 5" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -3220,7 +3210,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -3228,9 +3218,9 @@ proof - apply (clarsimp simp: cfault_rel2_def) apply (clarsimp simp: cfault_rel_def) apply (simp add: seL4_Fault_CapFault_lift) - apply (clarsimp simp: is_cap_fault_def to_bool_def false_def) + apply (clarsimp simp: is_cap_fault_def) apply wp - apply (rule hoare_post_imp_R, rule lsft_real_cte) + apply (rule hoare_strengthen_postE_R, rule lsft_real_cte) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps') apply (vcg exspec=lookupSlot_modifies) apply vcg @@ -3261,9 +3251,8 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv - \ (\x \ set rva. snd x \ 0)" - in ccorres_from_vcg_throws) + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def seL4_MsgExtraCapBits_def) @@ -3284,7 +3273,7 @@ proof - liftE_bindE[symmetric]) apply (wp mapME_length mapME_set | simp)+ apply (rule_tac Q'="\rv. no_0_obj' and real_cte_at' rv" - in hoare_post_imp_R, wp lsft_real_cte) + in hoare_strengthen_postE_R, wp lsft_real_cte) apply (clarsimp simp: cte_wp_at_ctes_of) apply (wpsimp)+ apply (clarsimp simp: guard_is_UNIV_def @@ -3357,7 +3346,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3402,7 +3391,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def option_to_0_def option_to_ptr_def @@ -3414,7 +3403,7 @@ qed lemma lookupIPCBuffer_not_Some_0: "\\\ lookupIPCBuffer r t \\rv. K (rv \ Some 0)\" apply (simp add: lookupIPCBuffer_def RISCV64_H.lookupIPCBuffer_def) - apply (wp hoare_post_taut haskell_assert_wp + apply (wp hoare_TrueI haskell_assert_wp | simp add: Let_def getThreadBufferSlot_def locateSlotTCB_def | intro conjI impI | wpc)+ done @@ -3465,7 +3454,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3483,7 +3471,7 @@ lemma replyFromKernel_error_ccorres [corres]: message_info_to_H_def valid_pspace_valid_objs') apply (clarsimp simp: msgLengthBits_def msgFromSyscallError_def syscall_error_to_H_def syscall_error_type_defs - mask_def true_def option_to_ptr_def + mask_def option_to_ptr_def split: if_split_asm) done @@ -3534,14 +3522,12 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac - apply (clarsimp simp: guard_is_UNIV_def false_def option_to_ptr_def split: option.splits) + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' receiver and K (rv \ Some 0) and (case_option \ valid_ipc_buffer_ptr' rv) @@ -3550,7 +3536,7 @@ lemma doIPCTransfer_ccorres [corres]: apply (auto simp: valid_ipc_buffer_ptr'_def option_to_0_def split: option.splits)[1] apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) - apply (auto simp: to_bool_def true_def) + apply auto done lemma fault_case_absorb_bind: @@ -3571,7 +3557,6 @@ lemma Arch_getSanitiseRegisterInfo_ccorres: (Call Arch_getSanitiseRegisterInfo_'proc)" apply (cinit' lift: thread_' simp: getSanitiseRegisterInfo_def) apply (rule ccorres_return_C, simp+) - apply (simp add: false_def) done lemma copyMRsFaultReply_ccorres_exception: @@ -3602,7 +3587,7 @@ proof - apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3614,7 +3599,7 @@ proof - apply (vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (auto simp: from_bool_def sanitiseRegister_def)[1] + apply (auto simp: sanitiseRegister_def)[1] apply wp apply clarsimp apply vcg @@ -3655,7 +3640,7 @@ proof - n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -3706,6 +3691,7 @@ lemma copyMRsFaultReply_ccorres_syscall: let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some f) s" note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV [where xf'=ret__unsigned_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] show ?thesis apply (unfold K_def, rule ccorres_gen_asm) using [[goals_limit=1]] apply (cinit' lift: sender_' receiver_' @@ -3810,7 +3796,6 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=3, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -3827,7 +3812,6 @@ lemma copyMRsFaultReply_ccorres_syscall: msg_align_bits sanitiseRegister_def simp del: upt_rec_numeral cong: if_cong register.case_cong, simp_all add: word_less_nat_alt unat_add_lem[THEN iffD1] unat_of_nat)[1] - apply (rule_tac x=rv in exI, auto)[1] apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def msgRegisters_ccorres syscallMessage_ccorres @@ -3849,8 +3833,8 @@ lemma copyMRsFaultReply_ccorres_syscall: apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -3862,7 +3846,7 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva ; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -3904,7 +3888,7 @@ lemma handleArchFaultReply_corres: apply simp+ apply (rule ccorres_symb_exec_l) apply (ctac add: ccorres_return_C) - apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp simp: to_bool_def true_def)+ + apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp)+ done (* MOVE *) @@ -3952,7 +3936,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -3998,7 +3982,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -4040,9 +4024,9 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply vcg_step apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def scast_def + message_info_to_H_def scast_def length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def + min_def word_less_nat_alt guard_is_UNIV_def seL4_Faults seL4_Arch_Faults split: if_split) apply (simp add: length_exceptionMessage length_syscallMessage) @@ -4050,10 +4034,8 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply (vcg exspec=getRegister_modifies) apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def - length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def - obj_at'_def + message_info_to_H_def length_exceptionMessage length_syscallMessage + min_def word_less_nat_alt obj_at'_def split: if_split) apply (fastforce simp: seL4_Faults seL4_Arch_Faults) done @@ -4093,7 +4075,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ apply (clarsimp split: if_split) done @@ -4121,7 +4103,7 @@ lemma transferCaps_local_slots: transferCaps tag caps ep receiver receiveBuffer \\tag'. cte_wp_at' (\cte. P (cteCap cte)) slot\" apply (simp add: transferCaps_def pred_conj_def) - apply (rule hoare_seq_ext[rotated]) + apply (rule bind_wp_fwd) apply (rule hoare_vcg_conj_lift) apply (rule get_rs_real_cte_at') apply (rule get_recv_slot_inv') @@ -4187,10 +4169,6 @@ lemma doReplyTransfer_ccorres [corres]: \ \\grant = from_bool grant\) hs (doReplyTransfer sender receiver slot grant) (Call doReplyTransfer_'proc)" -proof - - have invs_valid_queues_strg: "\s. invs' s \ valid_queues s" - by clarsimp - show ?thesis apply (cinit lift: sender_' receiver_' slot_' grant_') apply (rule getThreadState_ccorres_foo) apply (rule ccorres_assert2) @@ -4216,14 +4194,13 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs split del: if_split) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_running_valid_queues setThreadState_st_tcb)+ + apply (wpsimp wp: sts_valid_objs' setThreadState_st_tcb)+ apply (wp cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) @@ -4232,15 +4209,13 @@ proof - apply wp apply (simp add: cap_get_tag_isCap) apply (strengthen invs_weak_sch_act_wf_strg - cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct] - invs_valid_queues_strg) + cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct]) apply (simp add: cap_reply_cap_def) apply (wp doIPCTransfer_reply_or_replyslot) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4269,22 +4244,20 @@ proof - apply (ctac (no_vcg)) apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) - apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] - apply (ctac add: setThreadState_ccorres_valid_queues'_simple) + apply (wp sts_valid_objs' setThreadState_st_tcb | simp)+ + apply (ctac add: setThreadState_ccorres_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_sch_act hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def - ThreadState_Inactive_def mask_def to_bool_def - option_to_ctcb_ptr_def) + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def option_to_ctcb_ptr_def) - apply (rule_tac Q="\rv. valid_queues and tcb_at' receiver and valid_queues' and + apply (rule_tac Q="\rv. tcb_at' receiver and valid_objs' and sch_act_simple and (\s. ksCurDomain s \ maxDomain) and - (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + (\s. sch_act_wf (ksSchedulerAction s) s) and + pspace_aligned' and pspace_distinct'" in hoare_post_imp) apply (clarsimp simp: inQ_def weak_sch_act_wf_def) - apply (wp threadSet_valid_queues threadSet_sch_act handleFaultReply_sch_act_wf) + apply (wp threadSet_sch_act handleFaultReply_sch_act_wf) apply (clarsimp simp: guard_is_UNIV_def) apply assumption apply clarsimp @@ -4293,15 +4266,14 @@ proof - apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) apply (clarsimp simp: ctcb_relation_def typ_heap_simps) apply wp - apply (strengthen vp_invs_strg' invs_valid_queues') + apply (strengthen vp_invs_strg') apply (wp cteDeleteOne_tcbFault cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) apply (simp(no_asm_use) add: gs_set_assn_Delete_cstate_relation[unfolded o_def] subset_iff rf_sr_def) - apply (clarsimp simp: guard_is_UNIV_def to_bool_def true_def - option_to_ptr_def option_to_0_def false_def - ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def + ThreadState_defs mask_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs option_to_ctcb_ptr_def split: option.splits) @@ -4310,7 +4282,6 @@ proof - cap_get_tag_isCap) apply fastforce done -qed lemma ccorres_getCTE_cte_at: "ccorresG rf_sr \ r xf P P' hs (getCTE p >>= f) c @@ -4330,7 +4301,7 @@ lemma ccorres_getCTE_cte_at: done lemma setupCallerCap_ccorres [corres]: - "ccorres dc xfdc (valid_queues and valid_pspace' and (\s. \d p. sender \ set (ksReadyQueues s (d, p))) + "ccorres dc xfdc (valid_pspace' and (\s. sch_act_wf (ksSchedulerAction s) s) and sch_act_not sender and tcb_at' sender and tcb_at' receiver and tcb_at' sender and tcb_at' receiver) @@ -4343,8 +4314,7 @@ lemma setupCallerCap_ccorres [corres]: apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) apply (cinit lift: sender_' receiver_' canGrant_') apply (clarsimp simp: word_sle_def - tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] - , fold dc_def)[1] + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) apply ccorres_remove_UNIV_guard apply (ctac(no_vcg)) apply (rule ccorres_move_array_assertion_tcb_ctes) @@ -4365,14 +4335,14 @@ lemma setupCallerCap_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg)) apply (rule ccorres_assert) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply csymbr apply (ctac add: cteInsert_ccorres) apply simp apply (wp getSlotCap_cte_wp_at) apply (clarsimp simp: ccap_relation_def cap_lift_reply_cap cap_to_H_simps cap_reply_cap_lift_def - false_def tcbSlots Kernel_C.tcbCaller_def + tcbSlots Kernel_C.tcbCaller_def size_of_def cte_level_bits_def) apply (simp add: is_aligned_neg_mask) apply (wp getCTE_wp') @@ -4392,11 +4362,11 @@ lemma setupCallerCap_ccorres [corres]: apply (simp add: locateSlot_conv) apply wp apply (clarsimp simp: ccap_rights_relation_def allRights_def - mask_def true_def cap_rights_to_H_def tcbCallerSlot_def + mask_def cap_rights_to_H_def tcbCallerSlot_def Kernel_C.tcbCaller_def) apply simp apply wp - apply (clarsimp simp: Kernel_C.ThreadState_BlockedOnReply_def mask_def + apply (clarsimp simp: ThreadState_defs mask_def valid_pspace'_def tcbReplySlot_def valid_tcb_state'_def Collect_const_mem tcb_cnode_index_defs) @@ -4420,7 +4390,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4441,7 +4411,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -4463,23 +4433,20 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4503,31 +4470,28 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - simp flip: canonical_bit_def - split: endpoint.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) - apply (simp add: objBits_simps') - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + simp flip: canonical_bit_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) + apply (simp add: objBits_simps') + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4552,10 +4516,10 @@ lemma rf_sr_tcb_update_twice: packed_heap_update_collapse_hrs) lemma sendIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and pspace_canonical' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_canonical' and + pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (bos = ThreadState_BlockedOnSend \ epptr' = epptr \ badge' = badge \ cg = from_bool canGrant \ cgr = from_bool canGrantReply @@ -4608,14 +4572,13 @@ lemma sendIPC_block_ccorres_helper: (simp add: typ_heap_simps')+)[1] apply (simp add: tcb_cte_cases_def cteSizeBits_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnSend_def mask_def - from_bool_def to_bool_def) + ThreadState_defs mask_def) apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend split: bool.split) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -4720,6 +4683,19 @@ lemma tcb_queue_relation_qend_valid': apply (simp add: h_t_valid_clift_Some_iff) done +lemma tcb_queue'_head_end_NULL: + assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" + and tat: "\t\set queue. tcb_at' t s" + shows "(qend = NULL) = (qhead = NULL)" + using qr tat + apply - + apply (erule tcb_queue_relationE') + apply (simp add: tcb_queue_head_empty_iff split: if_splits) + apply (rule tcb_at_not_NULL) + apply (erule bspec) + apply simp + done + lemma tcbEPAppend_spec: "\s queue. \ \ \s. \t. (t, s) \ rf_sr \ (\tcb\set queue. tcb_at' tcb t) \ distinct queue @@ -4802,7 +4778,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4818,12 +4794,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -4842,33 +4818,30 @@ lemma sendIPC_enqueue_ccorres_helper: apply (elim conjE) apply (intro conjI) \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Send_def) - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) - apply (rule conjI, simp add: mask_def) - subgoal - apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) - apply (erule (1) tcb_and_not_mask_canonical) - by (simp (no_asm) add: tcbBlockSizeBits_def) + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Send_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + apply (rule conjI, simp add: mask_def) + subgoal + apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) + apply (erule (1) tcb_and_not_mask_canonical) + by (simp (no_asm) add: tcbBlockSizeBits_def) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (simp only:projectKOs injectKO_ep objBits_simps) - apply clarsimp - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (simp only:projectKOs injectKO_ep objBits_simps) + apply clarsimp + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -4885,42 +4858,39 @@ lemma sendIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Send_def - split: if_split) - subgoal - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, clarsimp) - apply (rule conjI, fastforce simp: mask_def) - apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) - apply (erule (1) tcb_and_not_mask_canonical) - apply (simp (no_asm) add: tcbBlockSizeBits_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Send_def + split: if_split) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) + apply (rule conjI, fastforce simp: mask_def) apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) - apply (rule conjI, solves \simp (no_asm) add: mask_def\) apply (erule (1) tcb_and_not_mask_canonical) apply (simp (no_asm) add: tcbBlockSizeBits_def) - done + apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) + apply (rule conjI, solves \simp (no_asm) add: mask_def\) + apply (erule (1) tcb_and_not_mask_canonical) + apply (simp (no_asm) add: tcbBlockSizeBits_def) + done + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -4940,8 +4910,7 @@ lemma ctcb_relation_blockingIPCCanGrantD: lemma sendIPC_ccorres [corres]: "ccorres dc xfdc (invs' and st_tcb_at' simple' thread - and sch_act_not thread and ep_at' epptr and - (\s. \d p. thread \ set (ksReadyQueues s (d, p)))) + and sch_act_not thread and ep_at' epptr) (UNIV \ \\blocking = from_bool blocking\ \ \\do_call = from_bool do_call\ \ \\badge = badge\ @@ -4972,8 +4941,7 @@ lemma sendIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread and ko_at' ep epptr - and ep_at' epptr - and (\s. \d p. thread \ set (ksReadyQueues s (d, p)))" + and ep_at' epptr" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc \ \RecvEP case\ @@ -5013,28 +4981,23 @@ lemma sendIPC_ccorres [corres]: apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) apply (clarsimp split del: if_split) apply (wpc ; ccorres_rewrite) - apply (clarsimp simp: from_bool_def disj_imp[symmetric] split del: if_split) + apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not - possibleSwitchTo_sch_act_not sts_st_tcb' - possibleSwitchTo_ksQ' sts_valid_queues sts_ksQ' + possibleSwitchTo_sch_act_not sts_st_tcb' sts_valid_objs' simp: valid_tcb_state'_def)+ apply vcg - apply (wpsimp wp: doIPCTransfer_sch_act setEndpoint_ksQ hoare_vcg_all_lift - set_ep_valid_objs' setEndpoint_valid_mdb' + apply (wpsimp wp: doIPCTransfer_sch_act hoare_vcg_all_lift + set_ep_valid_objs' setEndpoint_valid_mdb' | wp (once) hoare_drop_imp | strengthen sch_act_wf_weak)+ - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def Collect_const_mem - ThreadState_Running_def mask_def from_bool_def - option_to_ptr_def option_to_0_def + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs Collect_const_mem mask_def + option_to_ptr_def option_to_0_def split: bool.split_asm) \ \IdleEP case\ @@ -5151,10 +5114,10 @@ lemma ctcb_relation_blockingIPCCanGrantReplyD: done lemma receiveIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and pspace_canonical' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_canonical' and + pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (epptr = epptr && ~~ mask 4) and K (isEndpointCap cap \ ccap_relation cap cap')) UNIV hs @@ -5190,12 +5153,12 @@ lemma receiveIPC_block_ccorres_helper: apply (erule(1) rf_sr_tcb_update_no_queue_gen, (simp add: typ_heap_simps)+) apply (simp add: tcb_cte_cases_def cteSizeBits_def) apply (simp add: ctcb_relation_def cthread_state_relation_def ccap_relation_ep_helpers - ThreadState_BlockedOnReceive_def mask_def cap_get_tag_isCap) + ThreadState_defs mask_def cap_get_tag_isCap) apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -5222,7 +5185,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5238,12 +5201,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5261,42 +5224,38 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Recv_def - split: if_split) - subgoal - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, clarsimp) - apply (rule conjI, fastforce simp: mask_def) - apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) - apply (erule (1) tcb_and_not_mask_canonical) - apply (simp (no_asm) add: tcbBlockSizeBits_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Recv_def + split: if_split) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) + apply (rule conjI, fastforce simp: mask_def) apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) - apply (rule conjI, solves \simp (no_asm) add: mask_def\) apply (erule (1) tcb_and_not_mask_canonical) apply (simp (no_asm) add: tcbBlockSizeBits_def) - done + apply (clarsimp simp: valid_pspace'_def objBits_simps' simp flip: canonical_bit_def) + apply (rule conjI, solves \simp (no_asm) add: mask_def\) + apply (erule (1) tcb_and_not_mask_canonical) + apply (simp (no_asm) add: tcbBlockSizeBits_def) + done + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -5313,34 +5272,31 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Recv_def) - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask - simp flip: canonical_bit_def) - subgoal - apply (rule conjI, solves\simp (no_asm) add: mask_def\) - apply (clarsimp simp: valid_pspace'_def) - apply (erule (1) tcb_and_not_mask_canonical, simp (no_asm) add: tcbBlockSizeBits_def) - done + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Recv_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + simp flip: canonical_bit_def) + subgoal + apply (rule conjI, solves\simp (no_asm) add: mask_def\) + apply (clarsimp simp: valid_pspace'_def) + apply (erule (1) tcb_and_not_mask_canonical, simp (no_asm) add: tcbBlockSizeBits_def) + done + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5366,7 +5322,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5387,7 +5343,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5409,23 +5365,20 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5449,31 +5402,28 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - simp flip: canonical_bit_def - split: endpoint.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) - apply (clarsimp simp: objBits_simps') - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + simp flip: canonical_bit_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) + apply (clarsimp simp: objBits_simps') + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: typ_heap_simps') @@ -5537,7 +5487,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp) @@ -5590,7 +5540,6 @@ lemma receiveIPC_ccorres [corres]: notes option.case_cong_weak [cong] shows "ccorres dc xfdc (invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and valid_cap' cap and K (isEndpointCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -5651,7 +5600,7 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_longlong_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5666,7 +5615,6 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and ko_at' ep (capEPPtr cap)" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc @@ -5681,20 +5629,18 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5706,18 +5652,16 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -5795,12 +5739,10 @@ lemma receiveIPC_ccorres [corres]: split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) - apply (clarsimp simp: from_bool_def split del: if_split simp del: Collect_const) + apply (clarsimp split del: if_split simp del: Collect_const) apply ccorres_rewrite apply (wpc ; clarsimp ; ccorres_rewrite) apply csymbr @@ -5812,28 +5754,25 @@ lemma receiveIPC_ccorres [corres]: apply ccorres_rewrite apply ctac apply (ctac add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_st_tcb' sts_valid_queues) + apply (wpsimp wp: sts_st_tcb' sts_valid_objs') apply (vcg exspec=setThreadState_modifies) - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def - mask_def ThreadState_Running_def cap_get_tag_isCap - ccap_relation_ep_helpers) + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs mask_def + cap_get_tag_isCap ccap_relation_ep_helpers) apply (clarsimp simp: valid_tcb_state'_def) - apply (rule_tac Q="\_. valid_pspace' and valid_queues + apply (rule_tac Q="\_. valid_pspace' and st_tcb_at' ((=) sendState) sender and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s) - and (\s. (\a b. sender \ set (ksReadyQueues s (a, b)))) and sch_act_not sender and K (thread \ sender) and (\s. ksCurDomain s \ maxDomain)" in hoare_post_imp) - apply (clarsimp simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak - obj_at'_def) + apply (fastforce simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak + obj_at'_def) apply (wpsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def conj_ac)+ - apply (rule_tac Q="\rv. valid_queues and valid_pspace' + apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' thread and sch_act_not sender and K (thread \ sender) and ep_at' (capEPPtr cap) and (\s. ksCurDomain s \ maxDomain) - and (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. sender \ set (ksReadyQueues s (d, p))))" + and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) subgoal by (auto, auto simp: st_tcb_at'_def obj_at'_def) apply (wp hoare_vcg_all_lift set_ep_valid_objs') @@ -5852,7 +5791,7 @@ lemma receiveIPC_ccorres [corres]: projectKOs invs'_def valid_state'_def st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' isBlockedOnReceive_def projectKO_opt_tcb - from_bool_def to_bool_def objBits_simps' + objBits_simps' elim!: delta_sym_refs split: if_split_asm bool.splits) (*very long*) apply (frule(1) sym_refs_obj_atD' [OF _ invs_sym']) @@ -5865,24 +5804,20 @@ lemma receiveIPC_ccorres [corres]: projectKOs invs'_def valid_state'_def st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' isBlockedOnReceive_def projectKO_opt_tcb objBits_simps' - from_bool_def to_bool_def elim: delta_sym_refs split: if_split_asm bool.splits) (*very long *) apply (clarsimp simp: obj_at'_def state_refs_of'_def projectKOs) apply (frule(1) sym_refs_ko_atD' [OF _ invs_sym']) - apply (frule invs_queues) apply clarsimp apply (rename_tac list x xa) apply (rule_tac P="x\set list" in case_split) apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) - apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -5910,11 +5845,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ntfn_blocked_in_queueD) apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) apply (elim conjE) @@ -5934,7 +5868,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5956,23 +5890,20 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def - tcb_queue_relation'_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def + tcb_queue_relation'_def) + apply simp apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -5998,33 +5929,30 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (clarsimp simp: cnotification_relation_def Let_def - isWaitingNtfn_def - tcb_queue_relation'_def valid_ntfn'_def - split: Structures_H.notification.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (rule conjI) - subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) - apply (rule context_conjI) - subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) - apply clarsimp - apply (clarsimp split: if_split) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def + isWaitingNtfn_def + tcb_queue_relation'_def valid_ntfn'_def + split: Structures_H.notification.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (rule conjI) + subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) + apply (rule context_conjI) + subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) + apply clarsimp + apply (clarsimp split: if_split) + apply simp apply (clarsimp simp: carch_state_relation_def) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6115,7 +6043,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -6130,22 +6058,21 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) - apply (wp sts_running_valid_queues sts_st_tcb_at'_cases + apply (ctac add: possibleSwitchTo_ccorres) + apply (wp sts_valid_objs' sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" in hoare_post_imp) apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def RISCV64_H.badgeRegister_def C_register_defs RISCV64.badgeRegister_def RISCV64.capRegister_def - "StrictC'_thread_state_defs"less_mask_eq - Collect_const_mem) + ThreadState_defs less_mask_eq Collect_const_mem) apply (case_tac ts, simp_all add: receiveBlocked_def typ_heap_simps - cthread_state_relation_def "StrictC'_thread_state_defs")[1] + cthread_state_relation_def ThreadState_defs)[1] \ \ActiveNtfn case\ apply (rename_tac old_badge) apply (rule ccorres_cond_false) @@ -6194,16 +6121,14 @@ lemma sendSignal_ccorres [corres]: apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) - apply (wp weak_sch_act_wf_lift_linear - setThreadState_oa_queued - sts_valid_queues tcb_in_cur_domain'_lift)[1] - apply (wp sts_valid_queues sts_runnable) + apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift)[1] + apply (wp sts_valid_objs' sts_runnable) apply (wp setThreadState_st_tcb set_ntfn_valid_objs' | clarsimp)+ - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def badgeRegister_def C_register_defs RISCV64.badgeRegister_def RISCV64.capRegister_def) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Idle_def @@ -6226,10 +6151,10 @@ lemma sendSignal_ccorres [corres]: done lemma receiveSignal_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and sch_act_not thread and + "ccorres dc xfdc (tcb_at' thread and sch_act_not thread and valid_objs' and ntfn_at' ntfnptr and pspace_canonical' and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + pspace_aligned' and pspace_distinct' and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (ntfnptr = ntfnptr && ~~ mask 4)) UNIV hs (setThreadState (Structures_H.thread_state.BlockedOnNotification @@ -6259,14 +6184,13 @@ lemma receiveSignal_block_ccorres_helper: (simp add: typ_heap_simps')+) apply (simp add: tcb_cte_cases_def cteSizeBits_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnNotification_def mask_def - from_bool_def to_bool_def + ThreadState_defs mask_def flip: canonical_bit_def) apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (auto simp: weak_sch_act_wf_def valid_tcb'_def tcb_cte_cases_def @@ -6277,16 +6201,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "machine_word" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6346,7 +6271,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6362,12 +6287,12 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6384,37 +6309,34 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=3] NtfnState_Waiting_def) - subgoal - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask valid_ntfn'_def - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, fastforce simp: mask_def) - apply (rule context_conjI) - subgoal by (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_ptr_sign_extend_canonical - dest!: st_tcb_strg'[rule_format]) - by clarsimp - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=3] NtfnState_Waiting_def) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask valid_ntfn'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, fastforce simp: mask_def) + apply (rule context_conjI) + subgoal by (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_ptr_sign_extend_canonical + dest!: st_tcb_strg'[rule_format]) + by clarsimp + apply (simp add: isWaitingNtfn_def) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6431,49 +6353,46 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=3] NtfnState_Waiting_def - split: if_split) - subgoal for _ _ ko' - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, clarsimp) - apply (rule conjI, fastforce simp: mask_def) - apply (rule context_conjI) - subgoal by (fastforce intro!: tcb_ptr_sign_extend_canonical - dest!: st_tcb_strg'[rule_format]) - apply clarsimp - apply clarsimp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=3] NtfnState_Waiting_def + split: if_split) + subgoal for _ _ ko' + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) apply (rule conjI, fastforce simp: mask_def) - apply (rule conjI) + apply (rule context_conjI) subgoal by (fastforce intro!: tcb_ptr_sign_extend_canonical dest!: st_tcb_strg'[rule_format]) - apply (subgoal_tac "canonical_address (ntfnQueue_head_CL (notification_lift ko'))") - apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) - apply (clarsimp simp: notification_lift_def canonical_address_sign_extended - sign_extended_sign_extend - simp flip: canonical_bit_def) - done - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply clarsimp + apply clarsimp + apply (rule conjI, fastforce simp: mask_def) + apply (rule conjI) + subgoal by (fastforce intro!: tcb_ptr_sign_extend_canonical + dest!: st_tcb_strg'[rule_format]) + apply (subgoal_tac "canonical_address (ntfnQueue_head_CL (notification_lift ko'))") + apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) + apply (clarsimp simp: notification_lift_def canonical_address_sign_extended + sign_extended_sign_extend + simp flip: canonical_bit_def) + done + apply (simp add: isWaitingNtfn_def) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs) apply (simp add: cmachine_state_relation_def) apply (simp add: h_t_valid_clift_Some_iff) @@ -6485,7 +6404,6 @@ lemma receiveSignal_enqueue_ccorres_helper: lemma receiveSignal_ccorres [corres]: "ccorres dc xfdc (invs' and valid_cap' cap and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and K (isNotificationCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -6529,11 +6447,10 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6544,7 +6461,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6600,8 +6517,7 @@ lemma receiveSignal_ccorres [corres]: apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6613,7 +6529,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/RISCV64/IsolatedThreadAction.thy b/proof/crefine/RISCV64/IsolatedThreadAction.thy index fca6325ce3..0c5bd15952 100644 --- a/proof/crefine/RISCV64/IsolatedThreadAction.thy +++ b/proof/crefine/RISCV64/IsolatedThreadAction.thy @@ -1,6 +1,7 @@ (* - * Copyright 2014, General Dynamics C4 Systems + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) @@ -11,25 +12,8 @@ begin context begin interpretation Arch . -datatype tcb_state_regs = TCBStateRegs "thread_state" "MachineTypes.register \ machine_word" - -definition - "tsrContext tsr \ case tsr of TCBStateRegs ts regs \ regs" - -definition - "tsrState tsr \ case tsr of TCBStateRegs ts regs \ ts" - -lemma accessors_TCBStateRegs[simp]: - "TCBStateRegs (tsrState v) (tsrContext v) = v" - by (cases v, simp add: tsrState_def tsrContext_def) - -lemma tsrContext_simp[simp]: - "tsrContext (TCBStateRegs st con) = con" - by (simp add: tsrContext_def) - -lemma tsrState_simp[simp]: - "tsrState (TCBStateRegs st con) = st" - by (simp add: tsrState_def) +datatype tcb_state_regs = + TCBStateRegs (tsrState : thread_state) (tsrContext : "MachineTypes.register \ machine_word") definition get_tcb_state_regs :: "kernel_object option \ tcb_state_regs" @@ -84,12 +68,12 @@ lemma put_tcb_state_regs_twice[simp]: "put_tcb_state_regs tsr (put_tcb_state_regs tsr' tcb) = put_tcb_state_regs tsr tcb" apply (simp add: put_tcb_state_regs_def put_tcb_state_regs_tcb_def - atcbContextSet_def - makeObject_tcb newArchTCB_def newContext_def initContext_def + makeObject_tcb newArchTCB_def split: tcb_state_regs.split option.split Structures_H.kernel_object.split) apply (intro all_tcbI impI allI) - apply (case_tac q, simp) + using atcbContextSet_def atcbContext_set_set + apply fastforce+ done lemma partial_overwrite_twice[simp]: @@ -416,7 +400,7 @@ lemma getObject_get_assert: apply (simp add: lookupAround2_known1 assert_opt_def obj_at'_def projectKO_def2 split: option.split) - apply (clarsimp simp: fail_def fst_return conj_comms project_inject + apply (clarsimp simp: fail_set fst_return conj_comms project_inject objBits_def) apply (simp only: assert2[symmetric], rule bind_apply_cong[OF refl]) @@ -476,7 +460,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -533,15 +517,15 @@ lemma thread_actions_isolatable_bind: \t. \tcb_at' t\ f \\rv. tcb_at' t\ \ \ thread_actions_isolatable idx (f >>= g)" apply (clarsimp simp: thread_actions_isolatable_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (subst isolate_thread_actions_wrap_bind, simp) apply simp apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (simp add: bind_assoc id_def) apply (rule monadic_rewrite_refl) @@ -606,7 +590,7 @@ lemma select_f_isolatable: apply (clarsimp simp: thread_actions_isolatable_def isolate_thread_actions_def split_def select_f_selects liftM_def bind_assoc) - apply (rule monadic_rewrite_imp, rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_transverse) apply (rule monadic_rewrite_drop_modify monadic_rewrite_bind_tail)+ apply wp+ apply (simp add: gets_bind_ign getSchedulerAction_def) @@ -732,12 +716,10 @@ lemma transferCaps_simple_rewrite: (transferCaps mi caps ep r rBuf) (return (mi \ msgExtraCaps := 0, msgCapsUnwrapped := 0 \))" including no_pre + supply empty_fail_getReceiveSlots[wp] (* FIXME *) apply (rule monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (simp add: transferCaps_simple, rule monadic_rewrite_refl) - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getReceiveSlots)+) - apply (rule monadic_rewrite_refl) + apply (simp add: transferCaps_simple) + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) apply simp done @@ -751,7 +733,8 @@ lemma lookupExtraCaps_simple_rewrite: lemma lookupIPC_inv: "\P\ lookupIPCBuffer f t \\rv. P\" by wp -lemmas empty_fail_user_getreg = empty_fail_asUser[OF empty_fail_getRegister] +(* FIXME move *) +lemmas empty_fail_user_getreg[intro!, wp, simp] = empty_fail_asUser[OF empty_fail_getRegister] lemma copyMRs_simple: "msglen \ of_nat (length msgRegisters) \ @@ -788,25 +771,23 @@ lemma doIPCTransfer_simple_rewrite: apply (simp add: doIPCTransfer_def bind_assoc doNormalTransfer_def getMessageInfo_def cong: option.case_cong) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="fault = None" in monadic_rewrite_gen_asm, simp) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known None, simp) apply (rule monadic_rewrite_bind_tail) - apply (rule_tac x=msgInfo in monadic_rewrite_symb_exec, - (wp empty_fail_user_getreg user_getreg_rv)+) - apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) - apply (rule monadic_rewrite_bind_head) - apply (rule transferCaps_simple_rewrite) - apply (wp threadGet_const)+ + apply (monadic_rewrite_symb_exec_l_known msgInfo) + apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) + apply (rule monadic_rewrite_bind_head) + apply (rule transferCaps_simple_rewrite) + apply (wp threadGet_const user_getreg_rv asUser_inv)+ apply (simp add: bind_assoc) - apply (rule monadic_rewrite_symb_exec2[OF lookupIPC_inv empty_fail_lookupIPCBuffer] - monadic_rewrite_symb_exec2[OF threadGet_inv empty_fail_threadGet] - monadic_rewrite_symb_exec2[OF user_getreg_inv' empty_fail_user_getreg] - monadic_rewrite_bind_head monadic_rewrite_bind_tail - | wp)+ + apply (rule monadic_rewrite_symb_exec_l_drop[OF _ lookupIPC_inv empty_fail_lookupIPCBuffer] + monadic_rewrite_symb_exec_l_drop[OF _ threadGet_inv empty_fail_threadGet] + monadic_rewrite_symb_exec_l_drop[OF _ user_getreg_inv' empty_fail_user_getreg] + monadic_rewrite_bind_head monadic_rewrite_bind_tail)+ apply (case_tac "messageInfoFromWord msgInfo") apply simp apply (rule monadic_rewrite_refl) @@ -818,7 +799,7 @@ lemma doIPCTransfer_simple_rewrite: lemma monadic_rewrite_setSchedulerAction_noop: "monadic_rewrite F E (\s. ksSchedulerAction s = act) (setSchedulerAction act) (return ())" unfolding setSchedulerAction_def - apply (rule monadic_rewrite_imp, rule monadic_rewrite_modify_noop) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_modify_noop) apply simp done @@ -832,9 +813,10 @@ lemma rescheduleRequired_simple_rewrite: apply auto done -lemma empty_fail_isRunnable: +(* FIXME move *) +lemma empty_fail_isRunnable[intro!, wp, simp]: "empty_fail (isRunnable t)" - by (simp add: isRunnable_def isStopped_def) + by (simp add: isRunnable_def isStopped_def empty_fail_cond) lemma setupCallerCap_rewrite: "monadic_rewrite True True (\s. reply_masters_rvk_fb (ctes_of s)) @@ -853,23 +835,19 @@ lemma setupCallerCap_rewrite: apply (simp add: setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv getSlotCap_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule monadic_rewrite_assert)+ - apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) - \ mdbRevocable (cteMDBNode masterCTE)" - in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec2, (wp | simp)+)+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getCTE)+)+ - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp' | simp add: cte_wp_at_ctes_of)+ - apply (clarsimp simp: reply_masters_rvk_fb_def) - apply fastforce + apply (rule monadic_rewrite_bind_tail)+ + apply (rule monadic_rewrite_assert)+ + apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) + \ mdbRevocable (cteMDBNode masterCTE)" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans) + apply monadic_rewrite_symb_exec_l + apply monadic_rewrite_symb_exec_l_drop + apply (rule monadic_rewrite_refl) + apply wpsimp+ + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ + apply (fastforce simp: reply_masters_rvk_fb_def) done lemma oblivious_getObject_ksPSpace_default: @@ -936,28 +914,25 @@ lemma oblivious_switchToThread_schact: threadSet_def tcbSchedEnqueue_def unless_when asUser_def getQueue_def setQueue_def storeWordUser_def setRegister_def pointerInUserData_def isRunnable_def isStopped_def - getThreadState_def tcbSchedDequeue_def bitmap_fun_defs) + getThreadState_def tcbSchedDequeue_def tcbQueueRemove_def bitmap_fun_defs + ksReadyQueues_asrt_def) by (safe intro!: oblivious_bind - | simp_all add: oblivious_setVMRoot_schact)+ + | simp_all add: ready_qs_runnable_def idleThreadNotQueued_def + oblivious_setVMRoot_schact)+ -lemma empty_fail_getCurThread[iff]: +(* FIXME move *) +lemma empty_fail_getCurThread[intro!, wp, simp]: "empty_fail getCurThread" by (simp add: getCurThread_def) + lemma activateThread_simple_rewrite: "monadic_rewrite True True (ct_in_state' ((=) Running)) (activateThread) (return ())" apply (simp add: activateThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="state = Running" in monadic_rewrite_gen_asm) - apply simp + apply wp_pre + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known Running, simp) apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getThreadState)+) - apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, - simp_all add: getCurThread_def) - apply (rule monadic_rewrite_refl) + apply wpsimp+ apply (clarsimp simp: ct_in_state'_def elim!: pred_tcb'_weakenE) done @@ -983,17 +958,15 @@ lemma tcbSchedEnqueue_tcbPriority[wp]: done crunch obj_at_prio[wp]: cteDeleteOne "obj_at' (\tcb. P (tcbPriority tcb)) t" - (wp: crunch_wps setEndpoint_obj_at'_tcb - setThreadState_obj_at_unchanged setNotification_tcb setBoundNotification_obj_at_unchanged - simp: crunch_simps unless_def) + (wp: crunch_wps setEndpoint_obj_at'_tcb setNotification_tcb simp: crunch_simps unless_def) lemma setThreadState_no_sch_change: "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ setThreadState st t \\rv s. P (ksSchedulerAction s)\" - (is "NonDetMonad.valid ?P ?f ?Q") + (is "Nondet_VCG.valid ?P ?f ?Q") apply (simp add: setThreadState_def setSchedulerAction_def) - apply (wp hoare_pre_cont[where a=rescheduleRequired]) + apply (wp hoare_pre_cont[where f=rescheduleRequired]) apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) apply (clarsimp split: if_split) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs) @@ -1104,8 +1077,6 @@ lemma setCTE_assert_modify: apply (rule word_and_le2) apply (simp add: objBits_simps mask_def field_simps) apply (simp add: simpler_modify_def cong: option.case_cong if_cong) - apply (rule kernel_state.fold_congs[OF refl refl]) - apply (clarsimp simp: projectKO_opt_tcb cong: if_cong) apply (clarsimp simp: lookupAround2_char1 word_and_le2) apply (rule ccontr, clarsimp) apply (erule(2) ps_clearD) @@ -1122,7 +1093,7 @@ lemma setCTE_assert_modify: apply (erule disjE) apply clarsimp apply (frule(1) tcb_cte_cases_aligned_helpers) - apply (clarsimp simp: domI[where m = cte_cte_cases] field_simps) + apply (clarsimp simp: domI field_simps) apply (clarsimp simp: lookupAround2_char1 obj_at'_def projectKOs objBits_simps) apply (clarsimp simp: obj_at'_def lookupAround2_char1 @@ -1175,8 +1146,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) @@ -1249,37 +1219,24 @@ lemma thread_actions_isolatableD: lemma tcbSchedDequeue_rewrite: "monadic_rewrite True True (obj_at' (Not \ tcbQueued) t) (tcbSchedDequeue t) (return ())" apply (simp add: tcbSchedDequeue_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ queued" in monadic_rewrite_gen_asm) - apply (simp add: when_def) + apply wp_pre + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const) - apply (rule monadic_rewrite_symb_exec2) - apply wp+ - apply (rule monadic_rewrite_refl) - apply (clarsimp) + apply (wpsimp wp: threadGet_const)+ done +(* FIXME: improve automation here *) lemma switchToThread_rewrite: "monadic_rewrite True True (ct_in_state' (Not \ runnable') and cur_tcb' and obj_at' (Not \ tcbQueued) t) (switchToThread t) (do Arch.switchToThread t; setCurThread t od)" apply (simp add: switchToThread_def Thread_H.switchToThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind) - apply (rule tcbSchedDequeue_rewrite) - apply (rule monadic_rewrite_refl) - apply (wp Arch_switchToThread_obj_at_pre)+ - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec) - apply (wp+, simp) - apply (rule monadic_rewrite_refl) - apply (wp) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite, simp) + (* strip LHS of getters and asserts until LHS and RHS are the same *) + apply (repeat_unless \rule monadic_rewrite_refl\ monadic_rewrite_symb_exec_l) + apply wpsimp+ apply (clarsimp simp: comp_def) done @@ -1317,9 +1274,33 @@ lemma threadGet_isolatable: thread_actions_isolatable_fail) done +lemma tcbQueued_put_tcb_state_regs_tcb: + "tcbQueued (put_tcb_state_regs_tcb tsr tcb) = tcbQueued tcb" + apply (clarsimp simp: put_tcb_state_regs_tcb_def) + by (cases tsr; clarsimp) + +lemma idleThreadNotQueued_isolatable: + "thread_actions_isolatable idx (stateAssert idleThreadNotQueued [])" + apply (simp add: stateAssert_def2 stateAssert_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + gets_isolatable + thread_actions_isolatable_if + thread_actions_isolatable_returns + thread_actions_isolatable_fail) + unfolding idleThreadNotQueued_def + apply (clarsimp simp: obj_at_partial_overwrite_If) + apply (clarsimp simp: obj_at'_def tcbQueued_put_tcb_state_regs_tcb) + apply wpsimp+ + done + lemma setCurThread_isolatable: "thread_actions_isolatable idx (setCurThread t)" - by (simp add: setCurThread_def modify_isolatable) + unfolding setCurThread_def + apply (rule thread_actions_isolatable_bind) + apply (rule idleThreadNotQueued_isolatable) + apply (fastforce intro: modify_isolatable) + apply wpsimp + done lemma isolate_thread_actions_tcbs_at: assumes f: "\x. \tcb_at' (idx x)\ f \\rv. tcb_at' (idx x)\" shows @@ -1342,7 +1323,7 @@ lemma isolate_thread_actions_rewrite_bind: \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) (f >>= g) (isolate_thread_actions idx (f' >>= g') (g'' o f'') (g''' o f'''))" - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule monadic_rewrite_bind, assumption+) apply (wp isolate_thread_actions_tcbs_at) @@ -1350,7 +1331,7 @@ lemma isolate_thread_actions_rewrite_bind: apply (subst isolate_thread_actions_wrap_bind, assumption) apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (rule monadic_rewrite_bind2) + apply (rule monadic_rewrite_bind_l) apply (erule(1) thread_actions_isolatableD) apply (rule thread_actions_isolatableD, assumption+) apply (rule hoare_vcg_all_lift, assumption) @@ -1457,7 +1438,7 @@ lemma monadic_rewrite_isolate_final2: (isolate_thread_actions idx f f' f'') (isolate_thread_actions idx g g' g'')" apply (simp add: isolate_thread_actions_def split_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule_tac P="\ s'. Q s" in monadic_rewrite_bind) apply (insert mr)[1] @@ -1465,14 +1446,14 @@ lemma monadic_rewrite_isolate_final2: apply auto[1] apply (rule_tac P="P and (\s. tcbs = get_tcb_state_regs o ksPSpace s o idx \ sa = ksSchedulerAction s)" - in monadic_rewrite_refl3) + in monadic_rewrite_pre_imp_eq) apply (clarsimp simp: exec_modify eqs return_def) apply wp+ apply (clarsimp simp: o_def eqs) done lemmas monadic_rewrite_isolate_final - = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_refl2, simplified] + = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_is_refl, simplified] lemma copy_registers_isolate_general: "\ inj idx; idx x = t; idx y = t' \ \ @@ -1492,7 +1473,7 @@ lemma copy_registers_isolate_general: select_f_returns o_def ksPSpace_update_partial_id) apply (simp add: return_def simpler_modify_def) apply (simp add: mapM_x_Cons) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule isolate_thread_actions_rewrite_bind, assumption) apply (rule copy_register_isolate, assumption+) @@ -1586,27 +1567,17 @@ lemma setThreadState_rewrite_simple: (setThreadState st t) (threadSet (tcbState_update (\_. st)) t)" supply if_split[split del] - apply (simp add: setThreadState_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (simp add: when_def) - apply (rule monadic_rewrite_gen_asm) - apply (subst if_not_P) - apply assumption - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, - (wp empty_fail_isRunnable - | (simp only: getCurThread_def getSchedulerAction_def - , rule empty_fail_gets))+)+ - apply (rule monadic_rewrite_refl) - apply (simp add: conj_comms, wp hoare_vcg_imp_lift threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) + apply (simp add: setThreadState_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at'\) + (* take the threadSet, drop everything until return () *) + apply (rule monadic_rewrite_trans[OF monadic_rewrite_bind_tail]) + apply (rule monadic_rewrite_symb_exec_l_drop)+ + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: getCurThread_def + wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at')+ apply (rule monadic_rewrite_refl) - apply clarsimp + apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) done end diff --git a/proof/crefine/RISCV64/PSpace_C.thy b/proof/crefine/RISCV64/PSpace_C.thy index cbce98552a..a499d77aa6 100644 --- a/proof/crefine/RISCV64/PSpace_C.thy +++ b/proof/crefine/RISCV64/PSpace_C.thy @@ -43,7 +43,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko \ \ ccorres dc xfdc P P' hs (setObject p ko) c" diff --git a/proof/crefine/RISCV64/Recycle_C.thy b/proof/crefine/RISCV64/Recycle_C.thy index a7ddd5b717..dc3d306d2e 100644 --- a/proof/crefine/RISCV64/Recycle_C.thy +++ b/proof/crefine/RISCV64/Recycle_C.thy @@ -419,7 +419,7 @@ lemma mapM_x_store_memset_ccorres_assist: "\ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -697,8 +697,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -752,7 +752,7 @@ lemma ctcb_relation_blocking_ipc_badge: apply (simp add: isBlockedOnSend_def split: Structures_H.thread_state.split_asm) apply (clarsimp simp: cthread_state_relation_def) apply (clarsimp simp add: ctcb_relation_def cthread_state_relation_def) - apply (cases "tcbState tcb", simp_all add: "StrictC'_thread_state_defs") + apply (cases "tcbState tcb", simp_all add: ThreadState_defs) done lemma cendpoint_relation_q_cong: @@ -774,16 +774,6 @@ lemma cnotification_relation_q_cong: apply (auto intro: iffD1[OF tcb_queue_relation'_cong[OF refl refl refl]]) done -lemma tcbSchedEnqueue_ep_at: - "\obj_at' (P :: endpoint \ bool) ep\ - tcbSchedEnqueue t - \\rv. obj_at' P ep\" - including no_pre - apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp threadGet_wp, clarsimp, wp+) - apply (clarsimp split: if_split, wp) - done - lemma ccorres_duplicate_guard: "ccorres r xf (P and P) Q hs f f' \ ccorres r xf P Q hs f f'" by (erule ccorres_guard_imp, auto) @@ -803,12 +793,13 @@ lemma cancelBadgedSends_ccorres: (UNIV \ {s. epptr_' s = Ptr ptr} \ {s. badge_' s = bdg}) [] (cancelBadgedSends ptr bdg) (Call cancelBadgedSends_'proc)" apply (cinit lift: epptr_' badge_' simp: whileAnno_def) - apply (simp add: list_case_return2 + apply (rule ccorres_stateAssert) + apply (simp add: list_case_return cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong del: Collect_const) - apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_longlong_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule ccorres_pre_getEndpoint, rename_tac ep) + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_longlong_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -818,22 +809,22 @@ lemma cancelBadgedSends_ccorres: split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -855,8 +846,9 @@ lemma cancelBadgedSends_ccorres: st_tcb_at' (\st. isBlockedOnSend st \ blockingObject st = ptr) x s) \ distinct (xs @ list) \ ko_at' IdleEP ptr s \ (\p. \x \ set (xs @ list). \rf. (x, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ pspace_aligned' s \ pspace_distinct' s \ pspace_canonical' s - \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s" + \ pspace_aligned' s \ pspace_distinct' s \ pspace_canonical' s + \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="\xs. {s. ep_queue_relation' (cslift s) (xs @ list) (head_C (queue_' s)) (end_C (queue_' s))} \ {s. thread_' s = (case list of [] \ tcb_Ptr 0 @@ -909,7 +901,7 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: mask_def canonical_bit_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -919,7 +911,7 @@ lemma cancelBadgedSends_ccorres: apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -944,9 +936,9 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -956,8 +948,9 @@ lemma cancelBadgedSends_ccorres: apply (rule_tac rrel=dc and xf=xfdc and P="\s. (\t \ set (x @ a # lista). tcb_at' t s) \ (\p. \t \ set (x @ a # lista). \rf. (t, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ distinct (x @ a # lista) - \ pspace_aligned' s \ pspace_distinct' s" + \ distinct (x @ a # lista) + \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="{s. ep_queue_relation' (cslift s) (x @ a # lista) (head_C (queue_' s)) (end_C (queue_' s))}" in ccorres_from_vcg) @@ -973,8 +966,7 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: return_def rf_sr_def cstate_relation_def Let_def) apply (rule conjI) apply (clarsimp simp: cpspace_relation_def) - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule null_ep_queue) + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) subgoal by (simp add: o_def) apply (rule conjI) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) @@ -997,9 +989,6 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: image_iff) apply (drule_tac x=p in spec) subgoal by fastforce - apply (rule conjI) - apply (erule cready_queues_relation_not_queue_ptrs, - auto dest: null_ep_schedD[unfolded o_def] simp: o_def)[1] apply (clarsimp simp: carch_state_relation_def cmachine_state_relation_def) apply (rule ccorres_symb_exec_r2) apply (erule spec) @@ -1008,16 +997,15 @@ lemma cancelBadgedSends_ccorres: apply wp apply simp apply vcg - apply (wp hoare_vcg_const_Ball_lift tcbSchedEnqueue_ep_at - sch_act_wf_lift) + apply (wp hoare_vcg_const_Ball_lift sch_act_wf_lift) apply simp apply (vcg exspec=tcbSchedEnqueue_cslift_spec) apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases - sts_sch_act sts_valid_queues setThreadState_oa_queued) + sts_sch_act sts_valid_objs') apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) @@ -1030,21 +1018,18 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: typ_heap_simps st_tcb_at'_def) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: ctcb_relation_blocking_ipc_badge) - apply (rule conjI, simp add: "StrictC'_thread_state_defs" mask_def) + apply (rule conjI, simp add: ThreadState_defs mask_def) apply (rule conjI) apply clarsimp apply (frule rf_sr_cscheduler_relation) apply (clarsimp simp: cscheduler_action_relation_def st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - subgoal by clarsimp - subgoal by clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule conjI) - apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) + apply (frule tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule context_conjI) @@ -1084,9 +1069,19 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp split: if_split) apply (drule sym_refsD, clarsimp) apply (drule(1) bspec)+ - by (auto simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def tcb_bound_refs'_def - dest!: symreftype_inverse') - + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply (fastforce simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def + tcb_bound_refs'_def + dest!: symreftype_inverse') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply fastforce + done lemma tcb_ptr_to_ctcb_ptr_force_fold: "x + 2 ^ ctcb_size_bits = ptr_val (tcb_ptr_to_ctcb_ptr x)" diff --git a/proof/crefine/RISCV64/Refine_C.thy b/proof/crefine/RISCV64/Refine_C.thy index 30351e517f..770d60cc1e 100644 --- a/proof/crefine/RISCV64/Refine_C.thy +++ b/proof/crefine/RISCV64/Refine_C.thy @@ -49,6 +49,7 @@ proof - show ?thesis apply (cinit') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (ctac (no_vcg) add: getActiveIRQ_ccorres) apply (rule ccorres_Guard_Seq)? @@ -63,7 +64,7 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply vcg apply vcg apply (clarsimp simp: irqInvalid_def ucast_8_32_neq Kernel_C.irqInvalid_def) @@ -76,7 +77,7 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (rule_tac Q="\rv s. invs' s \ (\x. rv = Some x \ x \ RISCV64.maxIRQ) \ rv \ Some 0x3FF" in hoare_post_imp) apply (clarsimp simp: non_kernel_IRQs_def) apply (wp getActiveIRQ_le_maxIRQ getActiveIRQ_neq_Some0x3FF | simp)+ @@ -92,6 +93,7 @@ lemma handleUnknownSyscall_ccorres: (callKernel (UnknownSyscall n)) (Call handleUnknownSyscall_'proc)" apply (cinit' lift: w_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -104,14 +106,12 @@ lemma handleUnknownSyscall_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply fastforce - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply fastforce apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -129,8 +129,10 @@ lemma handleVMFaultEvent_ccorres: (callKernel (VMFaultEvent vmfault_type)) (Call handleVMFaultEvent_'proc)" apply (cinit' lift:vm_faultType_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (simp add: catch_def) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) @@ -159,13 +161,13 @@ lemma handleVMFaultEvent_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ - apply (case_tac x, clarsimp, wp) + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (case_tac rv, clarsimp, wp) apply (clarsimp, wp, simp) apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: simple_sane_strg[unfolded sch_act_sane_not]) - apply (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def ct_not_ksQ + apply (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def elim: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread' rf_sr_ksCurThread) done @@ -178,6 +180,7 @@ lemma handleUserLevelFault_ccorres: (callKernel (UserLevelFault word1 word2)) (Call handleUserLevelFault_'proc)" apply (cinit' lift:w_a_' w_b_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -190,16 +193,14 @@ lemma handleUserLevelFault_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply (simp add: ct_in_state'_def) - apply (erule pred_tcb'_weakenE) - apply simp - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply simp apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -231,6 +232,7 @@ lemma handleSyscall_ccorres: supply if_cong[cong] option.case_cong[cong] apply (cinit' lift: syscall_') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: handleE_def handleE'_def) apply (rule ccorres_split_nothrow_novcg) apply wpc @@ -371,11 +373,10 @@ lemma handleSyscall_ccorres: apply wp[1] apply clarsimp apply wp - apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s \ - (\p. ksCurThread s \ set (ksReadyQueues s p))" + apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s" in hoare_post_imp) apply (simp add: ct_in_state'_def) - apply (wp handleReply_sane handleReply_ct_not_ksQ) + apply (wp handleReply_sane) \ \SysYield\ apply (clarsimp simp: syscall_from_H_def syscall_defs) apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ @@ -401,11 +402,11 @@ lemma handleSyscall_ccorres: apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (wp schedule_invs' schedule_sch_act_wf | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + apply (wp schedule_invs' schedule_sch_act_wf + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (simp | wpc | wp hoare_drop_imp handleReply_sane handleReply_nonz_cap_to_ct schedule_invs' - handleReply_ct_not_ksQ[simplified] | strengthen ct_active_not_idle'_strengthen invs_valid_objs_strengthen)+ apply (rule_tac Q="\rv. invs' and ct_active'" in hoare_post_imp, simp) apply (wp hy_invs') @@ -423,7 +424,7 @@ lemma handleSyscall_ccorres: apply (frule active_ex_cap') apply (clarsimp simp: invs'_def valid_state'_def) apply (clarsimp simp: simple_sane_strg ct_in_state'_def st_tcb_at'_def obj_at'_def - isReply_def ct_not_ksQ irqInvalid_def Kernel_C.irqInvalid_def) + isReply_def irqInvalid_def Kernel_C.irqInvalid_def) apply (auto simp: syscall_from_H_def Kernel_C.SysSend_def split: option.split_asm) done @@ -456,7 +457,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -487,7 +488,7 @@ lemma no_fail_callKernel: apply (rule corres_nofail) apply (rule corres_guard_imp) apply (rule kernel_corres) - apply (force simp: word_neq_0_conv) + apply (force simp: word_neq_0_conv schact_is_rct_def) apply (simp add: sch_act_simple_def) apply metis done @@ -500,6 +501,7 @@ lemma handleHypervisorEvent_ccorres: apply (simp add: callKernel_def handleEvent_def handleHypervisorEvent_C_def) apply (simp add: liftE_def bind_assoc) apply (rule ccorres_guard_imp) + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l) apply (cases t; simp add: handleHypervisorFault_def) apply (ctac (no_vcg) add: schedule_ccorres) @@ -509,7 +511,7 @@ lemma handleHypervisorEvent_ccorres: apply simp apply assumption apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply clarsimp+ done @@ -608,9 +610,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -639,9 +641,9 @@ lemma threadSet_all_invs_triv': apply (simp add: tcb_cte_cases_def cteSizeBits_def) apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state - | simp add: tcb_cap_cases_def tcb_arch_ref_def + | simp add: tcb_cap_cases_def tcb_arch_ref_def exst_same_def | rule threadSet_ct_in_state' | wp (once) hoare_vcg_disj_lift)+ apply clarsimp @@ -694,13 +696,13 @@ lemma entry_corres_C: apply simp apply (rule corres_split) (* FIXME: fastpath - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) *) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (rule getContext_corres[unfolded o_def], simp) + apply (rule getContext_corres, simp) apply (wp threadSet_all_invs_triv' callKernel_cur)+ apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) apply simp @@ -730,15 +732,7 @@ lemma ct_running'_C: apply (frule (1) map_to_ko_atI') apply (erule obj_at'_weakenE) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: - ThreadState_Running_def - ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnSend_def - ThreadState_BlockedOnReply_def - ThreadState_BlockedOnNotification_def - ThreadState_Inactive_def - ThreadState_IdleThreadState_def - ThreadState_Restart_def) + apply (case_tac "tcbState ko"; simp add: ThreadState_defs) done lemma full_invs_both: @@ -802,7 +796,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineState, clarsimp) @@ -829,7 +823,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -870,17 +864,22 @@ lemma dmo_domain_user_mem'[wp]: done lemma do_user_op_corres_C: - "corres_underlying rf_sr False False (=) (invs' and ex_abs einvs) \ - (doUserOp f tc) (doUserOp_C f tc)" + "corres_underlying rf_sr False False (=) + (invs' and ksReadyQueues_asrt and ex_abs einvs) \ + (doUserOp f tc) (doUserOp_C f tc)" apply (simp only: doUserOp_C_def doUserOp_def split_def) apply (rule corres_guard_imp) apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: simpler_gets_def getCurThread_def corres_underlying_def rf_sr_def cstate_relation_def Let_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_lift_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_rights_def) apply (rule_tac P=pspace_distinct' and P'=\ and r'="(=)" @@ -897,7 +896,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -917,7 +916,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def @@ -978,6 +977,9 @@ lemma refinement2_both: apply (subst cstate_to_H_correct) apply (fastforce simp: full_invs'_def invs'_def) apply (clarsimp simp: rf_sr_def) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) apply (simp add:absKState_def observable_memory_def absExst_def) apply (rule MachineTypes.machine_state.equality,simp_all)[1] apply (rule ext) @@ -1004,13 +1006,35 @@ lemma refinement2_both: apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (erule_tac P="a \ b \ c \ (\x. e x)" for a b c d e in disjE) apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (clarsimp simp: check_active_irq_C_def check_active_irq_H_def) apply (rule rev_mp, rule check_active_irq_corres_C) @@ -1102,7 +1126,7 @@ lemma kernel_all_subset_kernel: check_active_irq_H_def checkActiveIRQ_def) apply clarsimp apply (erule in_monad_imp_rewriteE[where F=True]) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule monadic_rewrite_bind_head[where P=\]) apply (simp add: callKernel_C_def callKernel_withFastpath_C_def diff --git a/proof/crefine/RISCV64/Retype_C.thy b/proof/crefine/RISCV64/Retype_C.thy index 3b50597128..c5001d8e53 100644 --- a/proof/crefine/RISCV64/Retype_C.thy +++ b/proof/crefine/RISCV64/Retype_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -633,46 +634,6 @@ lemma field_of_t_refl: apply (simp add: unat_eq_0) done -lemma typ_slice_list_array: - "x < size_td td * n - \ typ_slice_list (map (\i. DTPair td (nm i)) [0.. k < n - \ gd (p +\<^sub>p int k) - \ h_t_valid htd gd (p +\<^sub>p int k)" - apply (clarsimp simp: h_t_array_valid_def h_t_valid_def valid_footprint_def - size_of_def[symmetric, where t="TYPE('a)"]) - apply (drule_tac x="k * size_of TYPE('a) + y" in spec) - apply (drule mp) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute) - apply (clarsimp simp: ptr_add_def add.assoc) - apply (erule map_le_trans[rotated]) - apply (clarsimp simp: uinfo_array_tag_n_m_def) - apply (subst typ_slice_list_array) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute size_of_def) - apply (simp add: size_of_def list_map_mono) - done - lemma h_t_valid_ptr_retyps_gen: assumes sz: "nptrs * size_of TYPE('a :: mem_type) < addr_card" and gd: "gd p'" @@ -804,11 +765,6 @@ lemma ptr_add_orth: apply (simp add: addr_card_wb [symmetric]) done -lemma dom_lift_t_heap_update: - "dom (lift_t g (hrs_mem_update v hp)) = dom (lift_t g hp)" - by (clarsimp simp add: lift_t_def lift_typ_heap_if s_valid_def hrs_htd_def hrs_mem_update_def split_def dom_def - intro!: Collect_cong split: if_split) - lemma h_t_valid_ptr_retyps_gen_same: assumes guard: "\n' < nptrs. gd (CTypesDefs.ptr_add (Ptr p :: 'a ptr) (of_nat n'))" assumes cleared: "region_is_bytes' p (nptrs * size_of TYPE('a :: mem_type)) htd" @@ -1141,7 +1097,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -1173,29 +1129,6 @@ lemma update_ti_t_machine_word_0s: "word_rcat [0, 0, 0, 0,0,0,0,(0 :: word8)] = (0 :: machine_word)" by (simp_all add: typ_info_word word_rcat_def bin_rcat_def) -lemma is_aligned_ptr_aligned: - fixes p :: "'a :: c_type ptr" - assumes al: "is_aligned (ptr_val p) n" - and alignof: "align_of TYPE('a) = 2 ^ n" - shows "ptr_aligned p" - using al unfolding is_aligned_def ptr_aligned_def - by (simp add: alignof) - -lemma is_aligned_c_guard: - "is_aligned (ptr_val p) n - \ ptr_val p \ 0 - \ align_of TYPE('a) = 2 ^ m - \ size_of TYPE('a) \ 2 ^ n - \ m \ n - \ c_guard (p :: ('a :: c_type) ptr)" - apply (clarsimp simp: c_guard_def c_null_guard_def) - apply (rule conjI) - apply (rule is_aligned_ptr_aligned, erule(1) is_aligned_weaken, simp) - apply (erule is_aligned_get_word_bits, simp_all) - apply (rule intvl_nowrap[where x=0, simplified], simp) - apply (erule is_aligned_no_wrap_le, simp+) - done - lemma retype_guard_helper: assumes cover: "range_cover p sz (objBitsKO ko) n" and ptr0: "p \ 0" @@ -2292,21 +2225,6 @@ next done qed -(* FIXME: move *) -lemma ccorres_to_vcg_nf: - "\ccorres rrel xf P P' [] a c; no_fail Q a; \s. P s \ Q s\ - \ \\ {s. P \ \ s \ P' \ (\, s) \ rf_sr} c - {s. \(rv, \')\fst (a \). (\', s) \ rf_sr \ rrel rv (xf s)}" - apply (rule HoarePartial.conseq_exploit_pre) - apply clarsimp - apply (rule conseqPre) - apply (drule ccorres_to_vcg') - prefer 2 - apply simp - apply (simp add: no_fail_def) - apply clarsimp - done - lemma mdb_node_get_mdbNext_heap_ccorres: "ccorres (=) ret__unsigned_longlong_' \ UNIV hs (liftM (mdbNext \ cteMDBNode) (getCTE parent)) @@ -2338,8 +2256,8 @@ lemma getCTE_pre_cte_at: lemmas ccorres_getCTE_cte_at = ccorres_guard_from_wp [OF getCTE_pre_cte_at empty_fail_getCTE] ccorres_guard_from_wp_bind [OF getCTE_pre_cte_at empty_fail_getCTE] -lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre iffD2 [OF empty_fail_liftM]] -lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre iffD2 [OF empty_fail_liftM]] +lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre empty_fail_liftM] +lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre empty_fail_liftM] lemmas ccorres_liftM_getCTE_cte_at = ccorres_guard_from_wp_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] ccorres_guard_from_wp_bind_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] @@ -2370,9 +2288,10 @@ lemma insertNewCap_ccorres_helper: apply (rule conjI) apply (erule (2) cmap_relation_updI) apply (simp add: ccap_relation_def ccte_relation_def cte_lift_def) - subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf is_aligned_neg_mask_weaken - c_valid_cte_def true_def canonical_address_sign_extended sign_extended_iff_sign_extend cteSizeBits_def - split: option.splits flip: canonical_bit_def) + subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf + is_aligned_neg_mask_weaken c_valid_cte_def canonical_address_sign_extended + sign_extended_iff_sign_extend cteSizeBits_def + split: option.splits flip: canonical_bit_def) subgoal by simp apply (erule_tac t = s' in ssubst) apply (simp cong: lifth_update) @@ -2682,16 +2601,6 @@ lemma createNewCaps_untyped_if_helper: (\ gbits \ sz) = (s' \ \of_nat sz < (of_nat gbits :: machine_word)\)" by (clarsimp simp: not_le unat_of_nat64 word_less_nat_alt lt_word_bits_lt_pow) -lemma true_mask1 [simp]: - "true && mask (Suc 0) = true" - unfolding true_def - by (simp add: bang_eq cong: conj_cong) - -lemma to_bool_simps [simp]: - "to_bool true" "\ to_bool false" - unfolding true_def false_def to_bool_def - by simp_all - lemma heap_list_update': "\ n = length v; length v \ 2 ^ word_bits \ \ heap_list (heap_update_list p v h) n p = v" by (simp add: heap_list_update addr_card_wb) @@ -3041,7 +2950,6 @@ lemma cnc_tcb_helper: assumes rfsr: "(\\ksPSpace := ks\, x) \ rf_sr" assumes al: "is_aligned (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb)" assumes ptr0: "ctcb_ptr_to_tcb_ptr p \ 0" - assumes vq: "valid_queues \" assumes pal: "pspace_aligned' (\\ksPSpace := ks\)" assumes pno: "pspace_no_overlap' (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb) (\\ksPSpace := ks\)" assumes pds: "pspace_distinct' (\\ksPSpace := ks\)" @@ -3167,8 +3075,7 @@ proof - apply (simp add: hrs_mem_def, subst rep0) apply (simp only: take_replicate, simp add: cte_C_size objBits_simps') apply (simp add: cte_C_size objBits_simps') - apply (simp add: fun_eq_iff o_def - split: if_split) + apply (simp add: fun_eq_iff split: if_split) apply (simp add: hrs_comm packed_heap_update_collapse typ_heap_simps) apply (subst clift_heap_update_same_td_name', simp_all, @@ -3400,20 +3307,20 @@ proof - unfolding ctcb_relation_def makeObject_tcb heap_updates_defs initContext_registers_def apply (simp add: fbtcb minBound_word) apply (intro conjI) - apply (simp add: cthread_state_relation_def thread_state_lift_def - eval_nat_numeral ThreadState_Inactive_def) - apply (clarsimp simp: ccontext_relation_def newContext_def2 carch_tcb_relation_def - newArchTCB_def cregs_relation_def atcbContextGet_def) - apply (case_tac r; simp add: C_register_defs index_foldr_update - atcbContext_def newArchTCB_def newContext_def - initContext_def) - apply (clarsimp) - apply (simp add: thread_state_lift_def index_foldr_update atcbContextGet_def) - apply (simp add: Kernel_Config.timeSlice_def) - apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def - lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def - index_foldr_update seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def - split: if_split)+ + apply (simp add: cthread_state_relation_def thread_state_lift_def + eval_nat_numeral ThreadState_defs) + apply (clarsimp simp: ccontext_relation_def newContext_def2 carch_tcb_relation_def + newArchTCB_def cregs_relation_def atcbContextGet_def) + apply (case_tac r; simp add: C_register_defs index_foldr_update + atcbContext_def newArchTCB_def newContext_def + initContext_def) + apply (simp add: thread_state_lift_def index_foldr_update atcbContextGet_def) + apply (simp add: Kernel_Config.timeSlice_def) + apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def + lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def + index_foldr_update seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def + split: if_split)+ + apply (simp add: option_to_ctcb_ptr_def) done have pks: "ks (ctcb_ptr_to_tcb_ptr p) = None" @@ -3464,15 +3371,6 @@ proof - apply (fastforce simp: dom_def) done - hence kstcb: "\qdom prio. ctcb_ptr_to_tcb_ptr p \ set (ksReadyQueues \ (qdom, prio))" using vq - apply (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x = qdom in spec) - apply (drule_tac x = prio in spec) - apply clarsimp - apply (drule (1) bspec) - apply (simp add: obj_at'_def) - done - have ball_subsetE: "\P S R. \ \x \ S. P x; R \ S \ \ \x \ R. P x" by blast @@ -3596,7 +3494,7 @@ proof - apply (simp add: cl_cte [simplified] cl_tcb [simplified] cl_rest [simplified] tag_disj_via_td_name) apply (clarsimp simp: cready_queues_relation_def Let_def htd_safe[simplified] kernel_data_refs_domain_eq_rotate) - apply (simp add: heap_updates_def kstcb tcb_queue_update_other' hrs_htd_update + apply (simp add: heap_updates_def tcb_queue_update_other' hrs_htd_update ptr_retyp_to_array[simplified] irq[simplified]) done qed @@ -3844,7 +3742,7 @@ lemma mapM_x_storeWord_step: apply (subst if_not_P) apply (subst not_less) apply (erule is_aligned_no_overflow) - apply (simp add: mapM_x_map comp_def upto_enum_word del: upt.simps) + apply (simp add: mapM_x_map upto_enum_word del: upt.simps) apply (subst div_power_helper_64 [OF sz2, simplified]) apply assumption apply (simp add: word_bits_def unat_minus_one del: upt.simps) @@ -4418,12 +4316,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -4456,12 +4352,10 @@ lemma ccorres_placeNewObject_notification: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ntfn [where ptr=regionBase and n="1" and sz="objBitsKO (KONotification makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps' - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps' + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -4520,11 +4414,10 @@ lemma ccorres_placeNewObject_captable: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac createObjects_ccorres_cte [where ptr=regionBase and n="2 ^ unat userSize" and sz="unat userSize + objBitsKO (KOCTE makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ - apply (clarsimp simp: split_def objBitsKO_def - Fun.comp_def rf_sr_def split_def Let_def cteSizeBits_def - new_cap_addrs_def field_simps power_add ptr_retyps_gen_def - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ + apply (clarsimp simp: split_def objBitsKO_def rf_sr_def split_def Let_def cteSizeBits_def + new_cap_addrs_def field_simps power_add ptr_retyps_gen_def + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv range_cover_def) apply (clarsimp simp: objBitsKO_def objBits_simps' range_cover.aligned) apply (clarsimp simp: no_fail_def) @@ -4581,8 +4474,8 @@ lemma Arch_initContext_spec': apply (rule allI, rule conseqPre) apply (rule hoarep.Catch[rotated], vcg) apply (rule conseqPost[where A'="{}" and Q'=Q and Q=Q for Q, simplified]) - apply ((vcg , - clarsimp simp: hrs_mem_update_compose h_val_id packed_heap_update_collapse o_def + apply ((vcg, + clarsimp simp: hrs_mem_update_compose h_val_id packed_heap_update_collapse array_updates_rev_app))+ apply (auto simp: h_val_heap_same_hrs_mem_update_typ_disj[OF h_t_valid_c_guard_field _ tag_disj_via_td_name] export_tag_adjust_ti typ_uinfo_t_def array_updates_rev @@ -4591,7 +4484,8 @@ lemma Arch_initContext_spec': lemma ccorres_placeNewObject_tcb: "ccorresG rf_sr \ dc xfdc - (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase tcbBlockSizeBits and valid_queues and (\s. sym_refs (state_refs_of' s)) + (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase tcbBlockSizeBits + and (\s. sym_refs (state_refs_of' s)) and (\s. 2 ^ tcbBlockSizeBits \ gsMaxObjectSize s) and ret_zero regionBase (2 ^ tcbBlockSizeBits) and K (regionBase \ 0 \ range_cover regionBase tcbBlockSizeBits tcbBlockSizeBits 1 @@ -4630,7 +4524,6 @@ proof - apply (rule ptr_retyp_h_t_valid) apply simp apply (rule tcb_ptr_orth_cte_ptrs') - apply (simp add: o_def) apply (intro conjI allI impI) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def kernel_data_refs_domain_eq_rotate) @@ -4687,11 +4580,11 @@ lemma placeNewObject_pte: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac s=\ in createObjects_ccorres_pte [where ptr=regionBase and sz=pageBits]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ apply (clarsimp simp: split_def objBitsKO_def archObjSize_def - Fun.comp_def rf_sr_def split_def Let_def ptr_retyps_gen_def - new_cap_addrs_def field_simps power_add - cong: globals.unfold_congs) + rf_sr_def split_def Let_def ptr_retyps_gen_def + new_cap_addrs_def field_simps power_add + cong: globals.unfold_congs) apply (simp add: Int_ac bit_simps) apply (clarsimp simp: word_bits_conv range_cover_def archObjSize_def bit_simps) apply (clarsimp simp: objBitsKO_def range_cover.aligned archObjSize_def bit_simps) @@ -4863,7 +4756,7 @@ qed lemma placeNewObject_user_data: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase (pageBits+us) - and valid_queues and valid_machine_state' + and valid_machine_state' and ret_zero regionBase (2 ^ (pageBits+us)) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) @@ -5002,7 +4895,7 @@ lemma placeNewObject_user_data_device: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and ret_zero regionBase (2 ^ (pageBits + us)) - and pspace_no_overlap' regionBase (pageBits+us) and valid_queues + and pspace_no_overlap' regionBase (pageBits+us) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) and K (regionBase \ 0 \ range_cover regionBase (pageBits + us) (pageBits+us) (Suc 0) @@ -5208,13 +5101,12 @@ proof - apply clarify apply (intro conjI) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' - APIType_capBits_def invs_queues invs_valid_objs' + APIType_capBits_def invs_valid_objs' invs_urz pageBits_def) apply clarsimp apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def framesize_to_H_def cap_to_H_simps cap_page_table_cap_lift - vmrights_to_H_def) - apply (clarsimp simp: to_bool_def false_def isFrameType_def) + vmrights_to_H_def isFrameType_def) done qed @@ -5238,7 +5130,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -5290,15 +5182,11 @@ lemma threadSet_domain_ccorres [corres]: apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def cteSizeBits_def) apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) - apply (drule ko_at_projectKO_opt) - apply (erule (2) cmap_relation_upd_relI) - subgoal by (simp add: ctcb_relation_def) - apply assumption - apply simp - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) + apply (drule ko_at_projectKO_opt) + apply (erule (2) cmap_relation_upd_relI) + subgoal by (simp add: ctcb_relation_def) + apply assumption + apply simp done lemma createObject_ccorres: @@ -5337,11 +5225,11 @@ proof - apply (rule ccorres_cond_seq) (* Architecture specific objects. *) apply (rule_tac - Q="createObject_hs_preconds regionBase newType userSize isdev" and - S="createObject_c_preconds1 regionBase newType userSize isdev" and - R="createObject_hs_preconds regionBase newType userSize isdev" and - T="createObject_c_preconds1 regionBase newType userSize isdev" - in ccorres_Cond_rhs) + Q="createObject_hs_preconds regionBase newType userSize isdev" and + S="createObject_c_preconds1 regionBase newType userSize isdev" and + R="createObject_hs_preconds regionBase newType userSize isdev" and + T="createObject_c_preconds1 regionBase newType userSize isdev" + in ccorres_Cond_rhs) apply (subgoal_tac "toAPIType newType = None") apply clarsimp apply (rule ccorres_rhs_assoc)+ @@ -5385,10 +5273,9 @@ proof - apply (rule conseqPre, vcg, clarsimp) apply simp apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def apiGetObjectSize_def - cap_untyped_cap_lift to_bool_eq_0 true_def - aligned_add_aligned sign_extend_canonical_address - split: option.splits) + getObjectSize_def apiGetObjectSize_def cap_untyped_cap_lift + aligned_add_aligned sign_extend_canonical_address + split: option.splits) apply (subst word_le_mask_eq, clarsimp simp: mask_def, unat_arith, auto simp: word_bits_conv untypedBits_defs)[1] @@ -5398,11 +5285,11 @@ proof - intro!: Corres_UL_C.ccorres_cond_empty Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" in + ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) apply (ccorres_remove_UNIV_guard) apply (simp add: hrs_htd_update) @@ -5419,14 +5306,13 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply (vcg exspec=Arch_initContext_modifies) - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: createObject_hs_preconds_def createObject_c_preconds_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (simp add: getObjectSize_def objBits_simps word_bits_conv apiGetObjectSize_def @@ -5435,29 +5321,27 @@ proof - region_actually_is_bytes_def APIType_capBits_def) apply (frule(1) ghost_assertion_size_logic_no_unat) apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def apiGetObjectSize_def - cap_thread_cap_lift to_bool_def true_def - aligned_add_aligned - split: option.splits) + getObjectSize_def apiGetObjectSize_def + cap_thread_cap_lift aligned_add_aligned + split: option.splits) apply (frule range_cover.aligned) apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs tcb_ptr_to_ctcb_ptr_def invs_valid_objs' invs_urz isFrameType_def - simp flip: canonical_bit_def) + simp flip: canonical_bit_def) (* Endpoint *) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def nAPIObjects_def - word_sle_def intro!: ccorres_cond_empty ccorres_cond_univ - ccorres_rhs_assoc) + toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.EndpointObject) - (unat (userSizea :: machine_word)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.EndpointObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (simp add: hrs_htd_update) apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_endpoint) apply (rule ccorres_symb_exec_r) @@ -5466,35 +5350,34 @@ proof - apply (rule conseqPre, vcg, clarsimp) apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def getObjectSize_def - objBits_simps apiGetObjectSize_def epSizeBits_def - cap_endpoint_cap_lift to_bool_def true_def sign_extend_canonical_address - split: option.splits dest!: range_cover.aligned) + objBits_simps apiGetObjectSize_def epSizeBits_def + cap_endpoint_cap_lift sign_extend_canonical_address + split: option.splits + dest!: range_cover.aligned) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps apiGetObjectSize_def epSizeBits_def word_bits_conv - elim!: is_aligned_no_wrap' intro!: range_cover_simpleI)[1] + elim!: is_aligned_no_wrap' + intro!: range_cover_simpleI)[1] (* Notification *) apply (clarsimp simp: createObject_c_preconds_def) - apply (clarsimp simp: getObjectSize_def objBits_simps - apiGetObjectSize_def - epSizeBits_def word_bits_conv word_sle_def word_sless_def) + apply (clarsimp simp: getObjectSize_def objBits_simps apiGetObjectSize_def + epSizeBits_def word_bits_conv word_sle_def word_sless_def) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def nAPIObjects_def - word_sle_def intro!: ccorres_cond_empty ccorres_cond_univ - ccorres_rhs_assoc) + toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.NotificationObject) - (unat (userSizea :: machine_word)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.NotificationObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (simp add: hrs_htd_update) apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_notification) apply (rule ccorres_symb_exec_r) @@ -5503,14 +5386,14 @@ proof - apply (rule conseqPre, vcg, clarsimp) apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def sign_extend_canonical_address - apiGetObjectSize_def ntfnSizeBits_def objBits_simps - cap_notification_cap_lift to_bool_def true_def - dest!: range_cover.aligned split: option.splits) + getObjectSize_def sign_extend_canonical_address + apiGetObjectSize_def ntfnSizeBits_def objBits_simps + cap_notification_cap_lift + dest!: range_cover.aligned + split: option.splits) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps apiGetObjectSize_def @@ -5523,18 +5406,18 @@ proof - apiGetObjectSize_def ntfnSizeBits_def word_bits_conv) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def nAPIObjects_def - word_sle_def word_sless_def zero_le_sint - intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc - ccorres_move_c_guards ccorres_Guard_Seq) + toAPIType_def nAPIObjects_def + word_sle_def word_sless_def zero_le_sint + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc + ccorres_move_c_guards ccorres_Guard_Seq) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.CapTableObject) - (unat (userSizea :: machine_word)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.CapTableObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (simp add:field_simps hrs_htd_update) apply (ctac pre only: add: ccorres_placeNewObject_captable) apply (subst gsCNodes_update) @@ -5551,7 +5434,6 @@ proof - apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (frule(1) ghost_assertion_size_logic_no_unat) apply (clarsimp simp: getObjectSize_def objBits_simps @@ -5565,14 +5447,12 @@ proof - apply (frule range_cover.strong_times_64[folded addr_card_wb], simp+) apply (subst h_t_array_valid_retyp, simp+) apply (simp add: power_add cte_C_size cteSizeBits_def) - apply (clarsimp simp: ccap_relation_def cap_to_H_def - cap_cnode_cap_lift to_bool_def true_def - getObjectSize_def - apiGetObjectSize_def cteSizeBits_def - objBits_simps field_simps is_aligned_power2 - addr_card_wb is_aligned_weaken[where y=2] - is_aligned_neg_mask_weaken - split: option.splits) + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_cnode_cap_lift + getObjectSize_def apiGetObjectSize_def cteSizeBits_def + objBits_simps field_simps is_aligned_power2 + addr_card_wb is_aligned_weaken[where y=2] + is_aligned_neg_mask_weaken + split: option.splits) apply (rule conjI) apply (frule range_cover.aligned) apply (simp add: aligned_and is_aligned_weaken sign_extend_canonical_address) @@ -5600,7 +5480,7 @@ lemma ccorres_guard_impR: lemma typ_clear_region_dom: "dom (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'b :: mem_type typ_heap) \ dom ((clift hp) :: 'b :: mem_type typ_heap)" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def) + apply (clarsimp simp:lift_t_def lift_typ_heap_def comp_def) apply (clarsimp simp:lift_state_def) apply (case_tac hp) apply (clarsimp simp:) @@ -6502,7 +6382,8 @@ lemma createObject_caps_overlap_reserved_ret': apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_caps_overlap_reserved_ret'[where sz = "APIType_capBits ty us"]]) apply assumption - apply (case_tac r,simp) + apply (rename_tac rv s) + apply (case_tac rv,simp) apply clarsimp apply (erule caps_overlap_reserved'_subseteq) apply (rule untypedRange_in_capRange) @@ -6575,7 +6456,8 @@ lemma createObject_IRQHandler: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_IRQHandler[where irq = x and P = "\_ _. False"]]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv) done @@ -6592,7 +6474,8 @@ lemma createObject_capClass[wp]: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_range_helper]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv ) apply (rule range_cover_full) apply (simp add:word_bits_conv)+ @@ -7353,7 +7236,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -7514,9 +7397,9 @@ shows "ccorres dc xfdc including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -7550,7 +7433,7 @@ shows "ccorres dc xfdc hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -7712,7 +7595,7 @@ shows "ccorres dc xfdc apply (simp add: o_def) apply (case_tac newType, simp_all add: object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + nAPIObjects_def APIType_capBits_def split:apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp:objBits_simps', unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/RISCV64/SR_lemmas_C.thy b/proof/crefine/RISCV64/SR_lemmas_C.thy index c69dca44a8..e5adf595ef 100644 --- a/proof/crefine/RISCV64/SR_lemmas_C.thy +++ b/proof/crefine/RISCV64/SR_lemmas_C.thy @@ -304,11 +304,15 @@ lemma cmdbnode_relation_mdb_node_to_H [simp]: unfolding cmdbnode_relation_def mdb_node_to_H_def mdb_node_lift_def cte_lift_def by (fastforce split: option.splits) -definition - tcb_no_ctes_proj :: "tcb \ Structures_H.thread_state \ machine_word \ machine_word \ arch_tcb \ bool \ word8 \ word8 \ word8 \ nat \ fault option \ machine_word option" +definition tcb_no_ctes_proj :: + "tcb \ Structures_H.thread_state \ machine_word \ machine_word \ arch_tcb \ bool \ word8 + \ word8 \ word8 \ nat \ fault option \ machine_word option + \ machine_word option \ machine_word option" where - "tcb_no_ctes_proj t \ (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, - tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t)" + "tcb_no_ctes_proj t \ + (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, + tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t, + tcbSchedNext t, tcbSchedPrev t)" lemma tcb_cte_cases_proj_eq [simp]: "tcb_cte_cases p = Some (getF, setF) \ @@ -319,7 +323,7 @@ lemma tcb_cte_cases_proj_eq [simp]: (* NOTE: 5 = cte_level_bits *) lemma map_to_ctes_upd_cte': "\ ksPSpace s p = Some (KOCTE cte'); is_aligned p cte_level_bits; ps_clear p cte_level_bits s \ - \ map_to_ctes (ksPSpace s(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" + \ map_to_ctes ((ksPSpace s)(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" apply (erule (1) map_to_ctes_upd_cte) apply (simp add: field_simps ps_clear_def3 cte_level_bits_def mask_def) done @@ -327,7 +331,7 @@ lemma map_to_ctes_upd_cte': lemma map_to_ctes_upd_tcb': "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = +==> map_to_ctes ((ksPSpace s)(p |-> KOTCB tcb)) = (%x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & getF tcb ~= getF tcb' @@ -454,7 +458,7 @@ lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); + (ctes_of s' = (ctes_of s)(dest \ cte)); (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); (map_to_ptes (ksPSpace s) = map_to_ptes (ksPSpace s')); @@ -479,7 +483,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -645,7 +649,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1412,9 +1415,9 @@ lemma cmap_relation_cong: apply (erule imageI) done -lemma ctcb_relation_null_queue_ptrs: +lemma ctcb_relation_null_ep_ptrs: assumes rel: "cmap_relation mp mp' tcb_ptr_to_ctcb_ptr ctcb_relation" - and same: "map_option tcb_null_queue_ptrs \ mp'' = map_option tcb_null_queue_ptrs \ mp'" + and same: "map_option tcb_null_ep_ptrs \ mp'' = map_option tcb_null_ep_ptrs \ mp'" shows "cmap_relation mp mp'' tcb_ptr_to_ctcb_ptr ctcb_relation" using rel apply (rule iffD1 [OF cmap_relation_cong, OF _ map_option_eq_dom_eq, rotated -1]) @@ -1422,12 +1425,12 @@ lemma ctcb_relation_null_queue_ptrs: apply (rule same [symmetric]) apply (drule compD [OF same]) apply (case_tac b, case_tac b') - apply (simp add: ctcb_relation_def tcb_null_queue_ptrs_def) + apply (simp add: ctcb_relation_def tcb_null_ep_ptrs_def) done lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1441,13 +1444,13 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1455,13 +1458,13 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1469,12 +1472,12 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1482,13 +1485,13 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI @@ -1497,25 +1500,25 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1601,7 +1604,6 @@ where | "thread_state_to_tsType (Structures_H.BlockedOnSend oref badge cg cgr isc) = scast ThreadState_BlockedOnSend" | "thread_state_to_tsType (Structures_H.BlockedOnNotification oref) = scast ThreadState_BlockedOnNotification" - lemma ctcb_relation_thread_state_to_tsType: "ctcb_relation tcb ctcb \ tsType_CL (thread_state_lift (tcbState_C ctcb)) = thread_state_to_tsType (tcbState tcb)" unfolding ctcb_relation_def cthread_state_relation_def @@ -1756,9 +1758,9 @@ lemma memory_cross_over: apply (cut_tac p=ptr in unat_mask_3_less_8) apply (subgoal_tac "(ptr && ~~ mask 3) + (ptr && mask 3) = ptr") apply (subgoal_tac "!n x. n < 8 \ (unat (x::machine_word) = n) = (x = of_nat n)") - apply (auto simp add: eval_nat_numeral unat_eq_0 add.commute - elim!: less_SucE)[1] - apply (clarsimp simp add: unat64_eq_of_nat word_bits_def) + apply (clarsimp simp: eval_nat_numeral) + apply (fastforce simp: add.commute elim!: less_SucE) + apply (clarsimp simp: unat64_eq_of_nat word_bits_def) apply (simp add: add.commute word_plus_and_or_coroll2) done @@ -1910,7 +1912,7 @@ lemma gs_set_assn_Delete_cstate_relation: lemma update_typ_at: assumes at: "obj_at' P p s" and tp: "\obj. P obj \ koTypeOf (injectKOS obj) = koTypeOf ko" - shows "typ_at' T p' (s \ksPSpace := ksPSpace s(p \ ko)\) = typ_at' T p' s" + shows "typ_at' T p' (s \ksPSpace := (ksPSpace s)(p \ ko)\) = typ_at' T p' s" using at by (auto elim!: obj_atE' simp: typ_at'_def ko_wp_at'_def dest!: tp[rule_format] @@ -2036,6 +2038,14 @@ lemma capTCBPtr_eq: apply clarsimp done +lemma rf_sr_ctcb_queue_relation: + "\ (s, s') \ rf_sr; d \ maxDomain; p \ maxPriority \ + \ ctcb_queue_relation (ksReadyQueues s (d, p)) + (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p))" + unfolding rf_sr_def cstate_relation_def cready_queues_relation_def + apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def maxDom_to_H maxPrio_to_H) + done + lemma rf_sr_sched_action_relation: "(s, s') \ rf_sr \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" @@ -2130,5 +2140,18 @@ lemma unat_scast_numDomains: "unat (SCAST(32 signed \ machine_word_len) Kernel_C.numDomains) = unat Kernel_C.numDomains" by (simp add: scast_eq sint_numDomains_to_H unat_numDomains_to_H numDomains_machine_word_safe) +(* link up Kernel_Config loaded from the seL4 build system with physBase in C code *) +lemma physBase_spec: + "\s. \\ {s} Call physBase_'proc {t. ret__unsigned_long_' t = Kernel_Config.physBase }" + apply (rule allI, rule conseqPre, vcg) + apply (simp add: Kernel_Config.physBase_def) + done + +lemma rf_sr_obj_update_helper: + "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined + \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr + \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" + by (simp cong: StateSpace.state.fold_congs globals.fold_congs) + end end diff --git a/proof/crefine/RISCV64/Schedule_C.thy b/proof/crefine/RISCV64/Schedule_C.thy index e25d7dbce3..5d97c3c95b 100644 --- a/proof/crefine/RISCV64/Schedule_C.thy +++ b/proof/crefine/RISCV64/Schedule_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -6,7 +7,7 @@ *) theory Schedule_C -imports Tcb_C +imports Tcb_C Detype_C begin (*FIXME: arch_split: move up?*) @@ -37,15 +38,17 @@ lemma switchToIdleThread_ccorres: "ccorres dc xfdc invs_no_cicd' UNIV hs switchToIdleThread (Call switchToIdleThread_'proc)" apply (cinit) + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l) apply (ctac (no_vcg) add: Arch_switchToIdleThread_ccorres) apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P="\s. thread = ksIdleThread s" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) - apply (wpsimp simp: RISCV64_H.switchToIdleThread_def)+ + apply (wpsimp simp: RISCV64_H.switchToIdleThread_def wp: hoare_drop_imps)+ done lemma Arch_switchToThread_ccorres: @@ -61,8 +64,25 @@ lemma Arch_switchToThread_ccorres: apply clarsimp done +lemma invs_no_cicd'_pspace_aligned': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_aligned' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) +lemma invs_no_cicd'_pspace_distinct': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_distinct' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) +lemma threadGet_exs_valid[wp]: + "tcb_at' t s \ \(=) s\ threadGet f t \\\r. (=) s\" + unfolding threadGet_def liftM_def + apply (wpsimp wp: exs_getObject) + apply (fastforce simp: obj_at'_def objBits_simps')+ + done + +lemma isRunnable_exs_valid[wp]: + "tcb_at' t s \ \(=) s\ isRunnable t \\\r. (=) s\" + unfolding isRunnable_def getThreadState_def + by (wpsimp wp: exs_getObject) (* FIXME: move *) lemma switchToThread_ccorres: @@ -72,47 +92,34 @@ lemma switchToThread_ccorres: hs (switchToThread t) (Call switchToThread_'proc)" - apply (cinit lift: thread_') + apply (clarsimp simp: switchToThread_def) + apply (rule ccorres_symb_exec_l'[OF _ _ isRunnable_sp]; (solves wpsimp)?) + apply (rule ccorres_symb_exec_l'[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule ccorres_stateAssert_fwd)+ + apply (cinit' lift: thread_') apply (ctac (no_vcg) add: Arch_switchToThread_ccorres) apply (ctac (no_vcg) add: tcbSchedDequeue_ccorres) + apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: setCurThread_def simpler_modify_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def) - apply wp+ - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def) - done - -lemma get_tsType_ccorres2: - "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_longlong_' (tcb_at' thread) - (UNIV \ {s. f s = tcb_ptr_to_ctcb_ptr thread} \ - {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] - (getThreadState thread) (Call thread_state_get_tsType_'proc)" - unfolding getThreadState_def - apply (rule ccorres_from_spec_modifies [where P=\, simplified]) - apply (rule thread_state_get_tsType_spec) - apply (rule thread_state_get_tsType_modifies) - apply simp - apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: typ_heap_simps) - apply (rule bexI [rotated, OF threadGet_eq], assumption) - apply simp - apply (drule ctcb_relation_thread_state_to_tsType) - apply simp + apply (clarsimp simp: setCurThread_def simpler_modify_def rf_sr_def cstate_relation_def + Let_def carch_state_relation_def cmachine_state_relation_def) + apply (wpsimp wp: Arch_switchToThread_invs_no_cicd' hoare_drop_imps + | strengthen invs_no_cicd'_pspace_aligned' invs_no_cicd'_pspace_distinct')+ done lemma activateThread_ccorres: "ccorres dc xfdc (ct_in_state' activatable' and (\s. sch_act_wf (ksSchedulerAction s) s) - and valid_queues and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') UNIV [] activateThread (Call activateThread_'proc)" apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule_tac P="activatable' rv" in ccorres_gen_asm) apply (wpc) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) @@ -122,7 +129,7 @@ lemma activateThread_ccorres: apply (rule ccorres_cond_true) apply (rule ccorres_return_Skip) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) @@ -130,7 +137,7 @@ lemma activateThread_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: activateIdleThread_def return_def) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -153,7 +160,7 @@ lemma activateThread_ccorres: apply (subgoal_tac "ksCurThread_' (globals s') = tcb_ptr_to_ctcb_ptr (ksCurThread s)") prefer 2 apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (clarsimp simp: typ_heap_simps ThreadState_Running_def mask_def) + apply (clarsimp simp: typ_heap_simps ThreadState_defs mask_def) done lemma ceqv_Guard_UNIV_Skip: @@ -185,26 +192,55 @@ lemmas ccorres_remove_tail_Guard_Skip = ccorres_abstract[where xf'="\_. ()", OF ceqv_remove_tail_Guard_Skip] lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done lemmas word_log2_max_word_word_size = word_log2_max[where 'a=machine_word_len, simplified word_size, simplified] +lemma ccorres_pre_getQueue: + assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" + shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) + {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) + (cready_queues_index_to_C d p) in + ctcb_queue_relation queue cqueue) \ s' \ P' queue} + hs (getQueue d p >>= (\queue. f queue)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule gq_sp) + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply assumption + apply (clarsimp simp: getQueue_def gets_exs_valid) + apply clarsimp + apply (drule spec, erule mp) + apply (erule rf_sr_ctcb_queue_relation) + apply (simp add: maxDom_to_H maxPrio_to_H)+ + done + lemma chooseThread_ccorres: - "ccorres dc xfdc all_invs_but_ct_idle_or_in_cur_domain' UNIV [] chooseThread (Call chooseThread_'proc)" + "ccorres dc xfdc all_invs_but_ct_idle_or_in_cur_domain' UNIV [] + chooseThread (Call chooseThread_'proc)" proof - note prio_and_dom_limit_helpers [simp] note ksReadyQueuesL2Bitmap_nonzeroI [simp] note Collect_const_mem [simp] + + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) include no_less_1_simps @@ -213,9 +249,22 @@ proof - "\s. invs_no_cicd' s \ ksCurDomain s \ maxDomain" by (simp add: invs_no_cicd'_def) + have invs_no_cicd'_valid_bitmaps: + "\s. invs_no_cicd' s \ valid_bitmaps s" + by (simp add: invs_no_cicd'_def) + + have invs_no_cicd'_pspace_aligned': + "\s. invs_no_cicd' s \ pspace_aligned' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + + have invs_no_cicd'_pspace_distinct': + "\s. invs_no_cicd' s \ pspace_distinct' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + show ?thesis supply if_split[split del] apply (cinit) + apply (rule ccorres_stateAssert)+ apply (simp add: numDomains_sge_1_simp) apply (rule_tac xf'=dom_' and r'="\rv rv'. rv' = ucast rv" in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) @@ -248,7 +297,7 @@ proof - apply (rule_tac P="curdom \ maxDomain" in ccorres_cross_over_guard_no_st) apply (rule_tac P="prio \ maxPriority" in ccorres_cross_over_guard_no_st) apply (rule ccorres_pre_getQueue) - apply (rule_tac P="queue \ []" in ccorres_cross_over_guard_no_st) + apply (rule_tac P="\ tcbQueueEmpty queue" in ccorres_cross_over_guard_no_st) apply (rule ccorres_symb_exec_l) apply (rule ccorres_assert) apply (rule ccorres_symb_exec_r) @@ -263,39 +312,40 @@ proof - apply (rule conseqPre, vcg) apply (rule Collect_mono) apply clarsimp - apply (strengthen queue_in_range) apply assumption apply clarsimp apply (rule conseqPre, vcg) apply clarsimp apply (wp isRunnable_wp)+ - apply (simp add: isRunnable_def) - apply wp apply (clarsimp simp: Let_def guard_is_UNIV_def) - apply (drule invs_no_cicd'_queues) - apply (case_tac queue, simp) - apply (clarsimp simp: tcb_queue_relation'_def cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp add: maxDom_to_H maxPrio_to_H - queue_in_range[where qdom=0, simplified, simplified maxPrio_to_H]) - apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper ) + apply (rule conjI) + apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper) + apply (intro conjI impI) + apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def ctcb_queue_relation_def + tcbQueueEmpty_def option_to_ctcb_ptr_def) + apply (frule_tac qdom=curdom and prio=rv in cready_queues_index_to_C_in_range') + apply fastforce + apply (clarsimp simp: num_tcb_queues_val word_less_nat_alt cready_queues_index_to_C_def2) apply wpsimp apply (clarsimp simp: guard_is_UNIV_def le_maxDomain_eq_less_numDomains word_less_nat_alt numDomains_less_numeric_explicit) - apply (frule invs_no_cicd'_queues) + apply clarsimp apply (frule invs_no_cicd'_max_CurDomain) - apply (frule invs_no_cicd'_queues) - apply (clarsimp simp: valid_queues_def lookupBitmapPriority_le_maxPriority) + apply (frule invs_no_cicd'_pspace_aligned') + apply (frule invs_no_cicd'_pspace_distinct') + apply (frule invs_no_cicd'_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule valid_bitmaps_valid_bitmapQ) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def cong: conj_cong) apply (intro conjI impI) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) - apply (prop_tac "ksCurDomain s = 0") - using unsigned_eq_0_iff apply force - apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (fastforce intro: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) + apply (fastforce dest: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) done qed @@ -376,7 +426,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -410,14 +459,13 @@ lemma isHighestPrio_ccorres: apply (rule ccorres_return_C, simp, simp, simp) apply (rule wp_post_taut) apply (vcg exspec=getHighestPrio_modifies)+ - apply (clarsimp simp: word_le_nat_alt true_def to_bool_def maxDomain_le_unat_ucast_explicit + apply (clarsimp simp: word_le_nat_alt maxDomain_le_unat_ucast_explicit split: if_splits) done lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -431,7 +479,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -448,7 +496,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -476,7 +524,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -496,7 +544,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -507,17 +555,17 @@ lemma schedule_ccorres: apply (rule ccorres_cond2'[where R=\], fastforce) apply clarsimp apply (rule ccorres_return[where R'=UNIV], clarsimp, vcg) - apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s - \ curThread = ksCurThread s - \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" - and P'=UNIV in ccorres_from_vcg) - apply clarsimp - apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) - apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) - apply unat_arith - apply (wpsimp wp: threadGet_obj_at2) + apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s + \ curThread = ksCurThread s + \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" + and P'=UNIV in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) + apply (drule (1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) + apply unat_arith + apply clarsimp apply vcg apply ceqv (* fastfail calculation complete *) @@ -533,18 +581,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -578,10 +625,10 @@ lemma schedule_ccorres: in ccorres_symb_exec_r_known_rv) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: false_def cur_tcb'_def rf_sr_ksCurThread) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) apply (solves \unat_arith, rule iffI; simp\) apply ceqv apply clarsimp @@ -622,13 +669,13 @@ lemma schedule_ccorres: apply (wp (once) hoare_drop_imps) apply wp apply (strengthen strenghten_False_imp[where P="a = ResumeCurrentThread" for a]) - apply (clarsimp simp: conj_ac invs_queues invs_valid_objs' cong: conj_cong) + apply (clarsimp simp: conj_ac invs_valid_objs' cong: conj_cong) apply wp apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) - apply (clarsimp simp: to_bool_def true_def) + apply clarsimp apply (strengthen ko_at'_obj_at'_field) - apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field to_bool_def true_def) + apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field) apply wp apply clarsimp (* when runnable tcbSchedEnqueue curThread *) @@ -637,32 +684,32 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp - apply (clarsimp simp: to_bool_def false_def) apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') apply (rule conjI) apply (clarsimp dest!: rf_sr_cscheduler_relation simp: cscheduler_action_relation_def) apply (rule conjI; clarsimp) apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps max_word_not_0 + apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps split: scheduler_action.splits) apply (frule (1) obj_at_cslift_tcb) apply (clarsimp dest!: rf_sr_cscheduler_relation invs_sch_act_wf' simp: cscheduler_action_relation_def) - apply (intro conjI impI allI; clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def) + apply (intro conjI impI allI; clarsimp simp: typ_heap_simps ctcb_relation_def) apply (fastforce simp: tcb_at_not_NULL tcb_at_1 dest: pred_tcb_at')+ done (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -696,11 +743,7 @@ lemma threadSet_timeSlice_ccorres [corres]: apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) defer - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def) @@ -713,10 +756,10 @@ lemma timerTick_ccorres: supply subst_all [simp del] apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -738,17 +781,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues + apply (ctac add: rescheduleRequired_ccorres) + apply (wp weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -760,7 +803,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -768,13 +810,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) @@ -796,8 +838,8 @@ lemma timerTick_ccorres: apply (rule conjI, clarsimp simp: invs'_def valid_state'_def valid_tcb'_def)+ apply (auto simp: obj_at'_def inQ_def weak_sch_act_wf_def st_tcb_at'_def valid_pspace'_def ct_idle_or_in_cur_domain'_def valid_tcb'_def valid_idle'_def projectKOs)[1] - apply (auto simp: invs'_def valid_state'_def valid_tcb'_def tcb_cte_cases_def - cteSizeBits_def)[1] + apply (auto simp: invs'_def valid_state'_def valid_tcb'_def tcb_cte_cases_def cur_tcb'_def + obj_at'_def cteSizeBits_def)[1] apply (frule invs_cur') apply (clarsimp simp: cur_tcb'_def) diff --git a/proof/crefine/RISCV64/StateRelation_C.thy b/proof/crefine/RISCV64/StateRelation_C.thy index d5dc162b40..8f1cc0da32 100644 --- a/proof/crefine/RISCV64/StateRelation_C.thy +++ b/proof/crefine/RISCV64/StateRelation_C.thy @@ -17,8 +17,7 @@ definition definition "array_relation r n a c \ \i \ n. r (a i) (index c (unat i))" -(* used for bound ntfn/tcb *) -definition +definition option_to_ctcb_ptr :: "machine_word option \ tcb_C ptr" where "option_to_ctcb_ptr x \ case x of None \ NULL | Some t \ tcb_ptr_to_ctcb_ptr t" @@ -362,7 +361,9 @@ where \ tcbTimeSlice atcb = unat (tcbTimeSlice_C ctcb) \ cfault_rel (tcbFault atcb) (seL4_Fault_lift (tcbFault_C ctcb)) (lookup_fault_lift (tcbLookupFailure_C ctcb)) - \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb" + \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb + \ option_to_ctcb_ptr (tcbSchedPrev atcb) = tcbSchedPrev_C ctcb + \ option_to_ctcb_ptr (tcbSchedNext atcb) = tcbSchedNext_C ctcb" abbreviation "ep_queue_relation' \ tcb_queue_relation' tcbEPNext_C tcbEPPrev_C" @@ -545,17 +546,17 @@ definition where "cready_queues_index_to_C qdom prio \ (unat qdom) * numPriorities + (unat prio)" -definition cready_queues_relation :: - "tcb_C typ_heap \ (tcb_queue_C[num_tcb_queues]) \ (domain \ priority \ ready_queue) \ bool" -where - "cready_queues_relation h_tcb queues aqueues \ - \qdom prio. ((qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ - (let cqueue = index queues (cready_queues_index_to_C qdom prio) in - sched_queue_relation' h_tcb (aqueues (qdom, prio)) (head_C cqueue) (end_C cqueue))) - \ (\ (qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ aqueues (qdom, prio) = [])" +definition ctcb_queue_relation :: "tcb_queue \ tcb_queue_C \ bool" where + "ctcb_queue_relation aqueue cqueue \ + head_C cqueue = option_to_ctcb_ptr (tcbQueueHead aqueue) + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd aqueue)" +definition cready_queues_relation :: + "(domain \ priority \ ready_queue) \ (tcb_queue_C[num_tcb_queues]) \ bool" + where + "cready_queues_relation aqueues cqueues \ + \d p. d \ maxDomain \ p \ maxPriority + \ ctcb_queue_relation (aqueues (d, p)) (index cqueues (cready_queues_index_to_C d p))" abbreviation "cte_array_relation astate cstate @@ -694,9 +695,7 @@ where "cstate_relation astate cstate \ let cheap = t_hrs_' cstate in cpspace_relation (ksPSpace astate) (underlying_memory (ksMachineState astate)) cheap \ - cready_queues_relation (clift cheap) - (ksReadyQueues_' cstate) - (ksReadyQueues astate) \ + cready_queues_relation (ksReadyQueues astate) (ksReadyQueues_' cstate) \ zero_ranges_are_zero (gsUntypedZeroRanges astate) cheap \ cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' cstate) (ksReadyQueuesL1Bitmap astate) \ cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' cstate) (ksReadyQueuesL2Bitmap astate) \ diff --git a/proof/crefine/RISCV64/SyscallArgs_C.thy b/proof/crefine/RISCV64/SyscallArgs_C.thy index 6f919d09e9..e4db69dcd8 100644 --- a/proof/crefine/RISCV64/SyscallArgs_C.thy +++ b/proof/crefine/RISCV64/SyscallArgs_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -47,10 +48,8 @@ lemma replyOnRestart_invs'[wp]: "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) - apply (rule hoare_vcg_all_lift) - apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_vcg_all_lift rfk_ksQ) - apply (rule hoare_strengthen_post, rule gts_sp') + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) + apply (rule hoare_strengthen_post, rule gts_sp') apply (clarsimp simp: pred_tcb_at') apply (auto elim!: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread') @@ -289,7 +288,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -415,11 +414,13 @@ lemma is_syscall_error_codes: by ((rule iffD2[OF is_syscall_error_code_def], intro allI, rule conseqPre, vcg, safe, (simp_all add: o_def)?)+) -lemma syscall_error_throwError_ccorres_direct: +lemma syscall_error_throwError_ccorres_direct_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) code" apply (rule ccorres_from_vcg_throws) @@ -429,28 +430,35 @@ lemma syscall_error_throwError_ccorres_direct: apply (simp add: syscall_error_rel_def exception_defs) done -lemma syscall_error_throwError_ccorres_succs: +lemma syscall_error_throwError_ccorres_succs_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) (code ;; remainder)" apply (rule ccorres_guard_imp2, rule ccorres_split_throws) - apply (erule syscall_error_throwError_ccorres_direct) - apply simp + apply (erule syscall_error_throwError_ccorres_direct_gen; assumption) apply (rule HoarePartialProps.augment_Faults) apply (erule iffD1[OF is_syscall_error_code_def, THEN spec]) apply simp+ done -lemmas syscall_error_throwError_ccorres_n = - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct, +lemmas syscall_error_throwError_ccorres_n_gen = + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct_gen, simplified o_apply] - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs, + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs_gen, simplified o_apply] +lemmas syscall_error_throwError_ccorres_n = + syscall_error_throwError_ccorres_n_gen[where arrel="intr_and_se_rel \ dc", simplified] + +lemmas syscall_error_throwError_ccorres_n_inl_rrel = + syscall_error_throwError_ccorres_n_gen[where arrel="inl_rrel (intr_and_se_rel \ dc)", simplified] + definition idButNot :: "'a \ 'a" where "idButNot x = x" @@ -648,9 +656,9 @@ lemma getMRs_tcbContext: apply (thin_tac "thread = t" for t) apply (clarsimp simp add: getMRs_def) apply (wp|wpc)+ - apply (rule_tac P="n < length x" in hoare_gen_asm) + apply (rule_tac P="n < length rv" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -776,12 +784,12 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes) apply (ctac (no_vcg)) apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isFrameCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap rv \ isFrameCap (capCap rv)" in ccorres_case_bools') apply simp apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap rv" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -804,8 +812,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (frule cap_get_tag_isCap_unfolded_H_cap(15),simp) apply (frule capFVMRights_range) apply (simp add: cap_frame_cap_lift) - apply (clarsimp simp: cap_to_H_def vmrights_to_H_def to_bool_def - word_le_make_less + apply (clarsimp simp: cap_to_H_def vmrights_to_H_def word_le_make_less Kernel_C.VMReadWrite_def Kernel_C.VMReadOnly_def Kernel_C.VMKernelOnly_def dest: word_less_cases) @@ -988,7 +995,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) @@ -1135,7 +1142,7 @@ lemma getSyscallArg_ccorres_foo: apply (clarsimp simp: index_msgRegisters_less' ucast_up_less_bounded_iff_less_ucast_down') apply wp[1] apply (wp getMRs_tcbContext) - apply simp + apply fastforce apply (rule ccorres_seq_skip [THEN iffD2]) apply (rule ccorres_add_return2) apply (rule ccorres_symb_exec_l) @@ -1159,7 +1166,7 @@ lemma getSyscallArg_ccorres_foo: in hoare_pre(1)) apply (wp getMRs_user_word) apply (clarsimp simp: msgMaxLength_def unat_less_helper) - apply simp + apply fastforce apply (clarsimp simp: sysargs_rel_def sysargs_rel_n_def) apply (rule conjI, clarsimp simp: unat_of_nat64 word_bits_def) apply (drule equalityD2) diff --git a/proof/crefine/RISCV64/Syscall_C.thy b/proof/crefine/RISCV64/Syscall_C.thy index dc1dc4d696..684151eac4 100644 --- a/proof/crefine/RISCV64/Syscall_C.thy +++ b/proof/crefine/RISCV64/Syscall_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -20,9 +21,6 @@ end context kernel_m begin -(* FIXME: should do this from the beginning *) -declare true_def [simp] false_def [simp] - lemma ccorres_If_False: "ccorres_underlying sr Gamm r xf arrel axf R R' hs b c \ ccorres_underlying sr Gamm r xf arrel axf @@ -52,8 +50,7 @@ lemma cap_cases_one_on_true_sum: lemma performInvocation_Endpoint_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and st_tcb_at' simple' thread and ep_at' epptr - and sch_act_sane and (\s. thread = ksCurThread s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)))) + and sch_act_sane and (\s. thread = ksCurThread s)) (UNIV \ {s. block_' s = from_bool blocking} \ {s. call_' s = from_bool do_call} \ {s. badge_' s = badge} @@ -125,7 +122,6 @@ lemma decodeInvocation_ccorres: and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) and (\s. \v \ set extraCaps. s \' fst v \ cte_at' (snd v) s) and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). ex_nonz_cap_to' y s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and sysargs_rel args buffer) (UNIV \ {s. current_extra_caps_' (globals s) = extraCaps'} \ {s. call_' s = from_bool isCall} @@ -202,7 +198,7 @@ lemma decodeInvocation_ccorres: apply simp apply (rule hoare_use_eq[where f=ksCurThread]) apply (wp sts_invs_minor' sts_st_tcb_at'_cases - setThreadState_ct' hoare_vcg_all_lift sts_ksQ')+ + setThreadState_ct' hoare_vcg_all_lift)+ apply simp apply (vcg exspec=setThreadState_modifies) apply vcg @@ -272,22 +268,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -319,7 +315,7 @@ lemma decodeInvocation_ccorres: apply fastforce apply (simp add: cap_lift_capEPBadge_mask_eq) apply (clarsimp simp: rf_sr_ksCurThread Collect_const_mem - cap_get_tag_isCap "StrictC'_thread_state_defs") + cap_get_tag_isCap ThreadState_defs) apply (frule word_unat.Rep_inverse') apply (simp add: cap_get_tag_isCap[symmetric] cap_get_tag_ReplyCap) apply (rule conjI) @@ -493,7 +489,7 @@ lemma handleInvocation_def2: lemma thread_state_to_tsType_eq_Restart: "(thread_state_to_tsType ts = scast ThreadState_Restart) = (ts = Restart)" - by (cases ts, simp_all add: "StrictC'_thread_state_defs") + by (cases ts, simp_all add: ThreadState_defs) lemma wordFromMessageInfo_spec: "\s. \\ {s} Call wordFromMessageInfo_'proc @@ -510,7 +506,7 @@ lemma wordFromMessageInfo_spec: lemma handleDoubleFault_ccorres: "ccorres dc xfdc (invs' and tcb_at' tptr and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and - sch_act_not tptr and (\s. \p. tptr \ set (ksReadyQueues s p))) + sch_act_not tptr) (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) [] (handleDoubleFault tptr ex1 ex2) (Call handleDoubleFault_'proc)" @@ -524,7 +520,7 @@ lemma handleDoubleFault_ccorres: apply (simp add: getRestartPC_def) apply wp apply clarsimp - apply (simp add: ThreadState_Inactive_def) + apply (simp add: ThreadState_defs) apply (fastforce simp: valid_tcb_state'_def) done @@ -575,8 +571,7 @@ lemma hrs_mem_update_use_hrs_mem: lemma sendFaultIPC_ccorres: "ccorres (cfault_rel2 \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and st_tcb_at' simple' tptr and sch_act_not tptr and - (\s. \p. tptr \ set (ksReadyQueues s p))) + (invs' and st_tcb_at' simple' tptr and sch_act_not tptr) (UNIV \ {s. (cfault_rel (Some fault) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) @@ -654,15 +649,15 @@ lemma sendFaultIPC_ccorres: apply (ctac (no_vcg) add: sendIPC_ccorres) apply (ctac (no_vcg) add: ccorres_return_CE [unfolded returnOk_def comp_def]) apply wp - apply (wp threadSet_pred_tcb_no_state threadSet_invs_trivial threadSet_typ_at_lifts - | simp)+ + apply (wpsimp wp: threadSet_invs_trivial) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_typ_at_lifts) apply (clarsimp simp: guard_is_UNIV_def) apply (subgoal_tac "capEPBadge epcap && mask 64 = capEPBadge epcap") apply (clarsimp simp: cap_get_tag_isCap isEndpointCap_def isCap_simps ccap_relation_ep_helpers) apply (drule cap_get_tag_isCap(4)[symmetric]) - apply (clarsimp simp: cap_get_tag_EndpointCap to_bool_def) + apply (clarsimp simp: cap_get_tag_EndpointCap) apply (clarsimp simp: case_bool_If) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply clarsimp @@ -688,10 +683,9 @@ lemma sendFaultIPC_ccorres: apply vcg apply (clarsimp simp: inQ_def) apply (rule_tac Q="\a b. invs' b \ st_tcb_at' simple' tptr b - \ sch_act_not tptr b \ valid_cap' a b - \ (\p. tptr \ set (ksReadyQueues b p))" + \ sch_act_not tptr b \ valid_cap' a b" and E="\ _. \" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp) apply (clarsimp simp: isCap_simps) apply (clarsimp simp: valid_cap'_def pred_tcb_at')+ @@ -704,8 +698,7 @@ lemma sendFaultIPC_ccorres: done lemma handleFault_ccorres: - "ccorres dc xfdc (invs' and st_tcb_at' simple' t and - sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p))) + "ccorres dc xfdc (invs' and st_tcb_at' simple' t and sch_act_not t) (UNIV \ {s. (cfault_rel (Some flt) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))) )} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t}) @@ -722,12 +715,12 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply clarsimp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply (wp) apply (simp add: guard_is_UNIV_def) apply (simp add: guard_is_UNIV_def) @@ -769,9 +762,7 @@ lemma getMRs_length: lemma handleInvocation_ccorres: "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and - ct_active' and sch_act_simple and - (\s. \x. ksCurThread s \ set (ksReadyQueues s x))) + (invs' and ct_active' and sch_act_simple) (UNIV \ {s. isCall_' s = from_bool isCall} \ {s. isBlocking_' s = from_bool isBlocking}) [] (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" @@ -895,17 +886,16 @@ lemma handleInvocation_ccorres: apply (simp add: invocationCatch_def o_def) apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" - in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + in hoare_strengthen_postE) + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb - hoare_vcg_all_lift sts_ksQ' + hoare_vcg_all_lift | wpc | wps)+ apply auto[1] apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) - apply (simp add: "StrictC'_thread_state_defs" mask_def) + apply (simp add: ThreadState_defs mask_def) apply (simp add: typ_heap_simps) apply (case_tac ts, simp_all add: cthread_state_relation_def)[1] apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) @@ -1061,7 +1051,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1079,7 +1069,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1160,9 +1149,6 @@ lemma ccorres_trim_redundant_throw_break: lemma invs_valid_objs_strengthen: "invs' s \ valid_objs' s" by fastforce -lemma ct_not_ksQ_strengthen: - "thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p) \ thread \ set (ksReadyQueues s p)" by fastforce - lemma option_to_ctcb_ptr_valid_ntfn: "valid_ntfn' ntfn s ==> (option_to_ctcb_ptr (ntfnBoundTCB ntfn) = NULL) = (ntfnBoundTCB ntfn = None)" apply (cases "ntfnBoundTCB ntfn", simp_all add: option_to_ctcb_ptr_def) @@ -1196,8 +1182,7 @@ lemma handleRecv_ccorres: notes rf_sr_upd_safe[simp del] shows "ccorres dc xfdc - (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s - \ sch_act_sane s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s \ sch_act_sane s) {s. isBlocking_' s = from_bool isBlocking} [] (handleRecv isBlocking) @@ -1240,7 +1225,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1258,10 +1243,10 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) - apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) + apply (wp hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) apply (clarsimp dest!: rf_sr_ksCurThread) apply simp @@ -1307,7 +1292,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1324,7 +1309,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1337,7 +1322,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1358,7 +1343,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1371,9 +1356,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1384,13 +1369,11 @@ lemma handleRecv_ccorres: apply clarsimp apply (rename_tac thread epCPtr) apply (rule_tac Q'="(\rv s. invs' s \ st_tcb_at' simple' thread s - \ sch_act_sane s \ (\p. thread \ set (ksReadyQueues s p)) \ thread = ksCurThread s - \ valid_cap' rv s)" in hoare_post_imp_R[rotated]) - apply (clarsimp simp: sch_act_sane_def) - apply (auto dest!: obj_at_valid_objs'[OF _ invs_valid_objs'] - simp: projectKOs valid_obj'_def, - auto simp: pred_tcb_at'_def obj_at'_def objBits_simps projectKOs ct_in_state'_def)[1] - apply wp + \ sch_act_sane s \ thread = ksCurThread s + \ valid_cap' rv s)" in hoare_strengthen_postE_R[rotated]) + apply (intro conjI impI allI; clarsimp simp: sch_act_sane_def) + apply (fastforce dest: obj_at_valid_objs'[OF _ invs_valid_objs'] ko_at_valid_ntfn') + apply wp apply clarsimp apply (vcg exspec=isStopped_modifies exspec=lookupCap_modifies) @@ -1407,8 +1390,8 @@ lemma handleRecv_ccorres: apply (frule tcb_aligned'[OF tcb_at_invs']) apply clarsimp apply (intro conjI impI allI) - apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift - lookup_fault_missing_capability_lift is_cap_fault_def)+ + apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift + lookup_fault_missing_capability_lift is_cap_fault_def)+ apply (clarsimp simp: cap_get_tag_NotificationCap) apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt) apply (clarsimp simp: cnotification_relation_def Let_def) @@ -1439,7 +1422,7 @@ lemma handleYield_ccorres: apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear tcbSchedAppend_valid_objs') apply (vcg exspec= tcbSchedAppend_modifies) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues) + apply (wp weak_sch_act_wf_lift_linear) apply (vcg exspec= tcbSchedDequeue_modifies) apply (clarsimp simp: tcb_at_invs' invs_valid_objs' valid_objs'_maxPriority valid_objs'_maxDomain) @@ -1586,11 +1569,9 @@ lemma ccorres_return_void_C_Seq: lemma ccorres_handleReservedIRQ: "ccorres dc xfdc - (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s \ - (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) (UNIV \ {s. irq_' s = ucast irq}) hs (handleReservedIRQ irq) (Call handleReservedIRQ_'proc)" - supply dc_simp[simp del] apply (cinit lift: irq_') apply (rule ccorres_return_Skip) apply clarsimp @@ -1598,8 +1579,7 @@ lemma ccorres_handleReservedIRQ: lemma handleInterrupt_ccorres: "ccorres dc xfdc - (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s \ - (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) (UNIV \ \\irq = ucast irq\) hs (handleInterrupt irq) @@ -1613,11 +1593,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -1656,7 +1636,7 @@ lemma handleInterrupt_ccorres: apply csymbr apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -1664,7 +1644,7 @@ lemma handleInterrupt_ccorres: apply csymbr+ apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) apply (case_tac rva, simp_all del: Collect_const)[1] @@ -1674,7 +1654,7 @@ lemma handleInterrupt_ccorres: rule ccorres_guard_imp2, rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, clarsimp)+ apply (wpsimp wp: getSlotCap_wp simp: maskIrqSignal_def) apply simp @@ -1683,7 +1663,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -1695,7 +1674,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReservedIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply (vcg exspec=handleReservedIRQ_modifies) apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up ) diff --git a/proof/crefine/RISCV64/TcbAcc_C.thy b/proof/crefine/RISCV64/TcbAcc_C.thy index 87af5af1f6..ee150308a3 100644 --- a/proof/crefine/RISCV64/TcbAcc_C.thy +++ b/proof/crefine/RISCV64/TcbAcc_C.thy @@ -89,22 +89,22 @@ lemma archThreadGet_eq: apply simp done -lemma get_tsType_ccorres [corres]: +lemma get_tsType_ccorres[corres]: "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_longlong_' (tcb_at' thread) - (UNIV \ {s. thread_state_ptr_' s = Ptr &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C''])}) [] - (getThreadState thread) (Call thread_state_ptr_get_tsType_'proc)" + ({s. f s = tcb_ptr_to_ctcb_ptr thread} \ + {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] + (getThreadState thread) (Call thread_state_get_tsType_'proc)" unfolding getThreadState_def - apply (rule ccorres_from_spec_modifies) - apply (rule thread_state_ptr_get_tsType_spec) - apply (rule thread_state_ptr_get_tsType_modifies) - apply simp - apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: typ_heap_simps) + apply (rule ccorres_from_spec_modifies [where P=\, simplified]) + apply (rule thread_state_get_tsType_spec) + apply (rule thread_state_get_tsType_modifies) + apply simp apply (frule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (rule bexI [rotated, OF threadGet_eq], assumption) apply simp - apply (erule ctcb_relation_thread_state_to_tsType) + apply (drule ctcb_relation_thread_state_to_tsType) + apply simp done lemma threadGet_obj_at2: @@ -176,7 +176,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -205,7 +205,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ diff --git a/proof/crefine/RISCV64/TcbQueue_C.thy b/proof/crefine/RISCV64/TcbQueue_C.thy index 5cc7952bb1..bca4869138 100644 --- a/proof/crefine/RISCV64/TcbQueue_C.thy +++ b/proof/crefine/RISCV64/TcbQueue_C.thy @@ -967,49 +967,6 @@ lemma tcb_queue_relation'_prev_sign: \ sign_extend canonical_bit (ptr_val (getPrev tcb)) = ptr_val (getPrev tcb)" by (rule tcb_queue_relation_prev_sign [OF tcb_queue_relation'_queue_rel]) - -lemma cready_queues_relation_null_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcb_null_ep_ptrs \ mp' = option_map tcb_null_ep_ptrs \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply (clarsimp simp: tcb_queue_relation'_def) - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - \ \clag\ - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - done - -lemma cready_queues_relation_not_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcbSchedNext_C \ mp' = option_map tcbSchedNext_C \ mp" - "option_map tcbSchedPrev_C \ mp' = option_map tcbSchedPrev_C \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def tcb_queue_relation'_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply clarsimp - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule same) - apply (rule same) - done - lemma ntfn_ep_disjoint: assumes srs: "sym_refs (state_refs_of' s)" and epat: "ko_at' ep epptr s" @@ -1093,8 +1050,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs cp rel apply - apply (subst map_comp_update) @@ -1376,12 +1333,10 @@ lemma rf_sr_tcb_update_no_queue: (t_hrs_' (globals s')); tcbEPNext_C ctcb = tcbEPNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); tcbEPPrev_C ctcb = tcbEPPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedNext_C ctcb = tcbSchedNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedPrev_C ctcb = tcbSchedPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes @@ -1392,31 +1347,22 @@ lemma rf_sr_tcb_update_no_queue: apply (clarsimp simp: map_comp_update projectKO_opt_tcb cvariable_relation_upd_const typ_heap_simps') apply (intro conjI) - subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_upd_tcb_no_queues, assumption+) - subgoal by fastforce - subgoal by fastforce + subgoal by (clarsimp simp: cmap_relation_def map_comp_update projectKO_opts_defs inj_eq) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) + apply (rule cendpoint_relation_upd_tcb_no_queues, assumption+) subgoal by fastforce subgoal by fastforce - apply (erule cready_queues_relation_not_queue_ptrs) + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) subgoal by fastforce subgoal by fastforce subgoal by (clarsimp simp: carch_state_relation_def typ_heap_simps') by (simp add: cmachine_state_relation_def) -lemma rf_sr_tcb_update_no_queue_helper: - "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined - \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr - \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" - by (simp cong: StateSpace.state.fold_congs globals.fold_congs) - -lemmas rf_sr_tcb_update_no_queue2 - = rf_sr_tcb_update_no_queue_helper [OF rf_sr_tcb_update_no_queue, simplified] +lemmas rf_sr_tcb_update_no_queue2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue, simplified] lemma tcb_queue_relation_not_in_q: "ctcb_ptr_to_tcb_ptr x \ set xs \ @@ -1431,7 +1377,7 @@ lemma rf_sr_tcb_update_not_in_queue: \ live' (KOTCB tcb); invs' s; (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes @@ -1464,18 +1410,9 @@ lemma rf_sr_tcb_update_not_in_queue: apply (drule(1) map_to_ko_atI') apply (drule sym_refs_ko_atD', clarsimp+) subgoal by blast - apply (simp add: cready_queues_relation_def, erule allEI) apply (clarsimp simp: Let_def) - apply (subst tcb_queue_relation_not_in_q) - apply clarsimp - apply (drule valid_queues_obj_at'D, clarsimp) - apply (clarsimp simp: obj_at'_def projectKOs inQ_def) - subgoal by simp apply (simp add: carch_state_relation_def) by (simp add: cmachine_state_relation_def) -lemmas rf_sr_tcb_update_not_in_queue2 - = rf_sr_tcb_update_no_queue_helper [OF rf_sr_tcb_update_not_in_queue, simplified] - end end diff --git a/proof/crefine/RISCV64/Tcb_C.thy b/proof/crefine/RISCV64/Tcb_C.thy index efd21d933a..6c92a4613c 100644 --- a/proof/crefine/RISCV64/Tcb_C.thy +++ b/proof/crefine/RISCV64/Tcb_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -59,8 +60,6 @@ lemma doMachineOp_sched: done context begin interpretation Arch . (*FIXME: arch_split*) -crunch queues[wp]: setupReplyMaster "valid_queues" - (simp: crunch_simps wp: crunch_wps) crunch curThread [wp]: restart "\s. P (ksCurThread s)" (wp: crunch_wps simp: crunch_simps) @@ -97,8 +96,8 @@ lemma getMRs_rel_sched: lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -159,8 +158,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -170,8 +169,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -271,8 +270,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -305,7 +304,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -397,9 +396,10 @@ lemma hrs_mem_update_cong: lemma setPriority_ccorres: "ccorres dc xfdc - (\s. tcb_at' t s \ Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority)) - (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) + (\s. tcb_at' t s \ ksCurDomain s \ maxDomain \ + valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s) + ({s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) [] (setPriority t priority) (Call setPriority_'proc)" apply (cinit lift: tptr_' prio_') apply (ctac(no_vcg) add: tcbSchedDequeue_ccorres) @@ -418,11 +418,11 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) - apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' + apply (wpsimp wp: hoare_drop_imps threadSet_valid_objs' weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state threadSet_tcbDomain_triv simp: st_tcb_at'_def o_def split: if_splits) @@ -431,19 +431,14 @@ lemma setPriority_ccorres: where Q="\rv s. obj_at' (\_. True) t s \ priority \ maxPriority \ - Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ valid_objs' s \ - valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s \ - (\d p. \ t \ set (ksReadyQueues s (d, p)))"]) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues tcbSchedDequeue_nonq) + pspace_aligned' s \ pspace_distinct' s"]) + apply (wp weak_sch_act_wf_lift_linear valid_tcb'_def) apply (clarsimp simp: valid_tcb'_tcbPriority_update) apply clarsimp - apply (frule (1) valid_objs'_maxDomain[where t=t]) - apply (frule (1) valid_objs'_maxPriority[where t=t]) - apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -498,8 +493,8 @@ lemma checkCapAt_ccorres: apply assumption apply (simp only: when_def if_to_top_of_bind) apply (rule ccorres_if_lhs) - apply (simp add: from_bool_def true_def) - apply (simp add: from_bool_def false_def) + apply simp + apply simp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -524,7 +519,7 @@ lemma cteInsert_cap_to'2: apply (simp add: cteInsert_def ex_nonz_cap_to'_def setUntypedCapAsFull_def) apply (rule hoare_vcg_ex_lift) apply (wp updateMDB_weak_cte_wp_at - updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp) + updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of) apply auto done @@ -625,7 +620,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -651,13 +646,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (simp add: assertDerived_def bind_assoc del: Collect_const) apply (rule ccorres_symb_exec_l) @@ -671,7 +666,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -680,36 +675,36 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (rule hoare_strengthen_post[ where Q= "\rv s. - Invariants_H.valid_queues s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ ((\a b. priority = Some (a, b)) \ tcb_at' target s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ fst (the priority) \ maxPriority)"]) + fst (the priority) \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) apply (rule ccorres_split_nothrow_novcg_dc) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -719,26 +714,26 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def tcbIPCBufferSlot_def mask_def objBits_defs) apply csymbr - apply (simp add: if_1_0_0 Collect_False false_def ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) apply (rule ccorres_cond_false_seq, simp) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -747,9 +742,9 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) - apply (simp add: guard_is_UNIV_def if_1_0_0 false_def Collect_const_mem + apply (simp add: guard_is_UNIV_def Collect_const_mem flip: canonical_bit_def) apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def canonical_address_bitfield_extract_tcb) (* \ P *) @@ -759,14 +754,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp split: option.split_asm) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -776,17 +771,17 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) apply (rule hoare_strengthen_post[ - where Q="\a b. (Invariants_H.valid_queues b \ - valid_objs' b \ + where Q="\a b. (valid_objs' b \ sch_act_wf (ksSchedulerAction b) b \ + pspace_aligned' b \ pspace_distinct' b \ ((\a b. priority = Some (a, b)) \ tcb_at' target b \ - ksCurDomain b \ maxDomain \ valid_queues' b \ + ksCurDomain b \ maxDomain \ fst (the priority) \ maxPriority)) \ ((case snd (the buf) of None \ 0 @@ -808,15 +803,15 @@ lemma invokeTCB_ThreadControl_ccorres: prefer 2 apply fastforce apply (strengthen cte_is_derived_capMasterCap_strg - invs_queues invs_weak_sch_act_wf invs_sch_act_wf' + invs_weak_sch_act_wf invs_sch_act_wf' invs_valid_objs' invs_mdb' invs_pspace_aligned', simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp - | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues - invs_valid_queues' | wp hoare_drop_imps)+ + | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf + | wp hoare_drop_imps)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem option_to_0_def split: option.split_asm) @@ -825,7 +820,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply (simp add: conj_comms cong: conj_cong) - apply (strengthen invs_ksCurDomain_maxDomain') + apply (strengthen invs_ksCurDomain_maxDomain' invs_pspace_distinct') apply (wp hoare_vcg_const_imp_lift_R cteDelete_invs') apply simp apply (rule ccorres_split_nothrow_novcg_dc) @@ -838,12 +833,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply (clarsimp simp: inQ_def Collect_const_mem cintr_def exception_defs tcb_cnode_index_defs) apply (simp add: tcbBuffer_def tcbIPCBufferSlot_def word_sle_def - cte_level_bits_def from_bool_def true_def size_of_def case_option_If2 ) + cte_level_bits_def size_of_def case_option_If2 ) apply (rule conjI) apply (clarsimp simp: objBits_simps' word_bits_conv case_option_If2 if_n_0_0 valid_cap'_def capAligned_def obj_at'_def projectKOs) - apply (clarsimp simp: invs_valid_objs' invs_valid_queues' - Invariants_H.invs_queues invs_ksCurDomain_maxDomain') + apply (fastforce simp: invs_valid_objs' invs_ksCurDomain_maxDomain') apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -854,11 +848,10 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -871,34 +864,31 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbVTable_def tcbVTableSlot_def Kernel_C.tcbVTable_def cte_level_bits_def size_of_def option_to_0_def objBits_defs mask_def) apply csymbr - apply (simp add: if_1_0_0 false_def Collect_False + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def if_1_0_0 Collect_const_mem + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift + cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb simp flip: canonical_bit_def) apply simp @@ -909,7 +899,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp @@ -923,20 +913,18 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] checkCap_inv [where P="valid_cap' c" for c] checkCap_inv [where P="sch_act_simple"] | simp)+ - apply (clarsimp simp: guard_is_UNIV_def from_bool_def true_def - word_sle_def if_1_0_0 Collect_const_mem + apply (clarsimp simp: guard_is_UNIV_def word_sle_def Collect_const_mem option_to_0_def Kernel_C.tcbVTable_def tcbVTableSlot_def cte_level_bits_def size_of_def cintr_def tcb_cnode_index_defs objBits_defs mask_def) @@ -945,34 +933,31 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem - Kernel_C.tcbCTable_def tcbCTableSlot_def if_1_0_0 + Kernel_C.tcbCTable_def tcbCTableSlot_def cte_level_bits_def size_of_def option_to_0_def mask_def objBits_defs) apply csymbr - apply (simp add: false_def Collect_False + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def if_1_0_0 Collect_const_mem + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift + cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb simp flip: canonical_bit_def) apply simp @@ -982,20 +967,20 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: cte_is_derived_capMasterCap_strg o_def) apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def - true_def from_bool_def cintr_def objBits_defs mask_def) + cintr_def objBits_defs mask_def) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -1023,7 +1008,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -1061,7 +1046,6 @@ lemma setupReplyMaster_ccorres: apply (simp add: cte_to_H_def cap_to_H_def mdb_node_to_H_def nullMDBNode_def c_valid_cte_def) apply (simp add: cap_reply_cap_lift) - apply (simp add: true_def mask_def to_bool_def) apply simp apply (simp add: cmachine_state_relation_def packed_heap_update_collapse_hrs carch_state_relation_def carch_globals_def @@ -1090,7 +1074,7 @@ lemma restart_ccorres: apply (ctac(no_vcg) add: tcbSchedEnqueue_ccorres) apply (ctac add: possibleSwitchTo_ccorres) apply (wp weak_sch_act_wf_lift)[1] - apply (wp sts_valid_queues setThreadState_st_tcb)[1] + apply (wp sts_valid_objs' setThreadState_st_tcb)[1] apply (simp add: valid_tcb_state'_def) apply wp apply (wp (once) sch_act_wf_lift, (wp tcb_in_cur_domain'_lift)+) @@ -1102,7 +1086,7 @@ lemma restart_ccorres: apply fastforce apply (rule ccorres_return_Skip) apply (wp hoare_drop_imps) - apply (auto simp: Collect_const_mem mask_def "StrictC'_thread_state_defs") + apply (auto simp: Collect_const_mem mask_def ThreadState_defs) done lemma setNextPC_ccorres: @@ -1161,7 +1145,7 @@ lemma postModifyRegisters_ccorres: apply (simp add: if_distrib[where f="asUser t" for t] asUser_return) apply (rule ccorres_add_return2) apply (rule ccorres_stateAssert) - apply (rule ccorres_return_Skip'[unfolded dc_def]) + apply (rule ccorres_return_Skip') by simp+ lemma invokeTCB_CopyRegisters_ccorres: @@ -1237,7 +1221,7 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (rule ccorres_pre_getCurThread) apply (ctac add: postModifyRegisters_ccorres) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" + apply (rule_tac R="\s. rvc = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1267,7 +1251,7 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (fastforce simp: sch_act_wf_weak) apply (wpsimp wp: hoare_drop_imp)+ apply (clarsimp simp add: guard_is_UNIV_def) - apply (clarsimp simp: to_bool_def invs_weak_sch_act_wf invs_valid_objs' + apply (clarsimp simp: invs_weak_sch_act_wf invs_valid_objs' split: if_split cong: if_cong | rule conjI)+ apply (clarsimp dest!: global'_no_ex_cap simp: invs'_def valid_state'_def | rule conjI)+ done @@ -1303,8 +1287,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1313,8 +1297,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1376,8 +1360,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1389,8 +1373,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1463,7 +1447,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1523,7 +1507,7 @@ lemma asUser_getMRs_rel: apply (erule getMRs_rel_context, simp) apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs) apply simp -done + done lemma asUser_sysargs_rel: @@ -1548,7 +1532,7 @@ lemma asUser_setRegister_ko_at': done lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] word_less_1[simp del] + notes hoare_weak_lift_imp [wp] word_less_1[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1563,6 +1547,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: \ {s. buffer_' s = option_to_ptr buffer}) [] (invokeTCB (WriteRegisters dst resume values arch)) (Call invokeTCB_WriteRegisters_'proc)" + supply empty_fail_cond[simp] apply (rule ccorres_gen_asm) apply (erule conjE) apply (cinit lift: n_' dest___ptr_to_struct_tcb_C_' resumeTarget_' buffer_' @@ -1654,14 +1639,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1706,7 +1691,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (clarsimp simp: frame_gp_registers_convs word_less_nat_alt sysargs_rel_def n_frameRegisters_def n_msgRegisters_def split: if_split_asm) - apply (simp add: invs_weak_sch_act_wf invs_valid_objs' invs_queues) + apply (simp add: invs_weak_sch_act_wf invs_valid_objs') apply (fastforce dest!: global'_no_ex_cap simp: invs'_def valid_state'_def) done @@ -1720,7 +1705,7 @@ lemma invokeTCB_Suspend_ccorres: apply (ctac(no_vcg) add: suspend_ccorres[OF cteDeleteOne_ccorres]) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp apply (auto simp: invs'_def valid_state'_def global'_no_ex_cap) done @@ -1734,7 +1719,7 @@ lemma invokeTCB_Resume_ccorres: apply (ctac(no_vcg) add: restart_ccorres) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp done lemma Arch_decodeTransfer_spec: @@ -1811,6 +1796,7 @@ shows (doE reply \ invokeTCB (ReadRegisters target susp n archCp); liftE (replyOnRestart thread reply isCall) odE) (Call invokeTCB_ReadRegisters_'proc)" + supply empty_fail_cond[simp] apply (rule ccorres_gen_asm) apply (cinit' lift: tcb_src_' suspendSource_' n_' call_' simp: invokeTCB_def liftE_bindE bind_assoc) @@ -1836,10 +1822,11 @@ shows apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_getThreadState]) apply (rule ccorres_if_lhs[OF _ ccorres_False[where P'=UNIV]]) apply (rule ccorres_if_lhs) - apply (simp add: Collect_True true_def whileAnno_def del: Collect_const) + apply (simp add: Collect_True whileAnno_def del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1899,15 +1886,15 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) @@ -1915,7 +1902,7 @@ shows rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (RISCV64_H.frameRegisters @ RISCV64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -2024,11 +2011,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (RISCV64_H.frameRegisters @ RISCV64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -2101,11 +2087,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2117,7 +2103,7 @@ shows apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp | simp add: valid_tcb_state'_def)+ - apply (clarsimp simp: ThreadState_Running_def mask_def) + apply (clarsimp simp: ThreadState_defs mask_def) apply (rule mapM_x_wp') apply (rule hoare_pre) apply (wp sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift) @@ -2137,15 +2123,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2154,7 +2140,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2165,7 +2151,7 @@ shows RISCV64.badgeRegister_def RISCV64.capRegister_def "StrictC'_register_defs") apply (vcg exspec=lookupIPCBuffer_modifies) - apply (simp add: false_def) + apply simp apply (ctac(no_vcg) add: setThreadState_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -2174,18 +2160,17 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) apply wp - apply (simp add: Collect_const_mem "StrictC'_thread_state_defs" - mask_def) + apply (simp add: Collect_const_mem ThreadState_defs mask_def) apply vcg apply (rule_tac Q="\rv. invs' and st_tcb_at' ((=) Restart) thread and tcb_at' target" in hoare_post_imp) @@ -2195,7 +2180,7 @@ shows apply (vcg exspec=suspend_modifies) apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def true_def dc_def + apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def split: if_split) done @@ -2260,7 +2245,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2271,13 +2257,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2304,7 +2290,7 @@ lemma decodeReadRegisters_ccorres: apply wp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread - "StrictC'_thread_state_defs" word_sless_def word_sle_def + ThreadState_defs word_sless_def word_sle_def mask_eq_iff_w2p word_size isCap_simps ReadRegistersFlags_defs tcb_at_invs' cap_get_tag_isCap capTCBPtr_eq) @@ -2317,7 +2303,7 @@ lemma decodeReadRegisters_ccorres: valid_tcb_state'_def elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] - apply (clarsimp simp: from_bool_def word_and_1 split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma decodeWriteRegisters_ccorres: @@ -2372,7 +2358,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2383,13 +2370,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2397,7 +2384,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2412,12 +2399,12 @@ lemma decodeWriteRegisters_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem ct_in_state'_def pred_tcb_at') apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) - apply (clarsimp simp: valid_cap'_def "StrictC'_thread_state_defs" + apply (clarsimp simp: valid_cap'_def ThreadState_defs mask_eq_iff_w2p word_size rf_sr_ksCurThread WriteRegisters_resume_def word_sle_def word_sless_def numeral_eqs) apply (frule arg_cong[where f="\x. unat (of_nat x :: machine_word)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2430,8 +2417,7 @@ lemma decodeWriteRegisters_ccorres: apply (rule disjCI2) apply (clarsimp simp: genericTake_def linorder_not_less) apply (subst hd_conv_nth, clarsimp simp: unat_eq_0) - apply (clarsimp simp: from_bool_def word_and_1 - split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma excaps_map_Nil: "(excaps_map caps = []) = (caps = [])" @@ -2499,7 +2485,7 @@ lemma decodeCopyRegisters_ccorres: apply (simp add: case_bool_If if_to_top_of_bindE if_to_top_of_bind del: Collect_const cong: if_cong) - apply (simp add: to_bool_def returnOk_bind Collect_True + apply (simp add: returnOk_bind Collect_True ccorres_invocationCatch_Inr performInvocation_def del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2549,7 +2535,7 @@ lemma decodeCopyRegisters_ccorres: elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] apply (clarsimp simp: word_sle_def CopyRegistersFlags_defs word_sless_def - "StrictC'_thread_state_defs" rf_sr_ksCurThread + ThreadState_defs rf_sr_ksCurThread split: if_split) apply (drule interpret_excaps_eq) apply (clarsimp simp: mask_def excaps_map_def split_def ccap_rights_relation_def @@ -2634,7 +2620,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2655,7 +2641,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -2663,7 +2649,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap from_bool_0 dest!: ccte_relation_ccap_relation) - apply (simp add: from_bool_def false_def true_def + apply (simp add: from_bool_def split: bool.split) apply (auto simp add: longRunningDelete_def isCap_simps split: capability.split)[1] @@ -2671,13 +2657,12 @@ lemma slotCapLongRunningDelete_ccorres: apply (wp hoare_drop_imps isFinalCapability_inv) apply (clarsimp simp: Collect_const_mem guard_is_UNIV_def) apply (rename_tac rv') - apply (case_tac rv'; clarsimp simp: false_def true_def) + apply (case_tac rv'; clarsimp simp: false_def) apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) - apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap - from_bool_def false_def map_comp_Some_iff + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap map_comp_Some_iff dest!: ccte_relation_ccap_relation) done @@ -2692,7 +2677,7 @@ lemma isValidVTableRoot_spec: {s'. ret__unsigned_long_' s' = from_bool (isValidVTableRoot_C (cap_' s))}" apply vcg apply (clarsimp simp: isValidVTableRoot_C_def if_1_0_0 from_bool_0) - apply (simp add: from_bool_def to_bool_def false_def split: if_split) + apply (simp add: to_bool_def split: if_split) done lemma isValidVTableRoot_conv: @@ -2706,9 +2691,8 @@ lemma isValidVTableRoot_conv: apply (case_tac "cap_get_tag cap' = scast cap_page_table_cap") apply (clarsimp split: arch_capability.split simp: isCap_simps) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 - cap_page_table_cap_lift cap_to_H_def - from_bool_def) - apply (clarsimp simp: to_bool_def split: if_split) + cap_page_table_cap_lift cap_to_H_def) + apply (clarsimp split: if_split) apply (clarsimp simp: cap_get_tag_isCap cap_get_tag_isCap_ArchObject) apply (simp split: arch_capability.split_asm add: isCap_simps) apply (case_tac "cap_get_tag cap' = scast cap_page_table_cap") @@ -3023,7 +3007,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -3050,7 +3034,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -3136,7 +3120,7 @@ lemma decodeTCBConfigure_ccorres: ptr_val_tcb_ptr_mask2[unfolded mask_def objBits_defs, simplified] tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper all_ex_eq_helper ucast_ucast_mask objBits_defs) apply (subgoal_tac "args \ [] \ extraCaps \ []") @@ -3172,7 +3156,8 @@ lemma decodeTCBConfigure_ccorres: apply (rule conjI, fastforce) apply (drule interpret_excaps_eq) apply (clarsimp simp: cte_wp_at_ctes_of valid_tcb_state'_def numeral_eqs le_ucast_ucast_le - tcb_at_invs' invs_valid_objs' invs_queues invs_sch_act_wf' + tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + invs_pspace_aligned' invs_pspace_distinct' ct_in_state'_def pred_tcb_at'_def obj_at'_def tcb_st_refs_of'_def) apply (erule disjE; simp add: objBits_defs mask_def) apply (clarsimp simp: idButNot_def interpret_excaps_test_null @@ -3185,7 +3170,7 @@ lemma decodeTCBConfigure_ccorres: capTCBPtr_eq tcb_ptr_to_ctcb_ptr_mask tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper) apply (frule(1) tcb_at_h_t_valid [OF tcb_at_invs']) apply (clarsimp simp: typ_heap_simps numeral_eqs isCap_simps valid_cap'_def capAligned_def @@ -3219,7 +3204,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3287,8 +3271,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3324,7 +3307,7 @@ lemma decodeSetMCPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3353,7 +3336,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3421,8 +3404,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3458,7 +3440,7 @@ lemma decodeSetPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3500,7 +3482,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3508,9 +3490,9 @@ lemma decodeSetSchedParams_ccorres: val="from_bool (length args < 2 \ length extraCaps = 0)" in ccorres_symb_exec_r_known_rv) apply vcg - apply (auto simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 - split: bool.splits)[1] - apply (unat_arith+)[2] + apply (force simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 + unat_arith_simps + split: bool.splits if_splits) apply ceqv apply clarsimp (* @@ -3587,8 +3569,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3637,16 +3618,15 @@ lemma decodeSetSchedParams_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) apply (intro conjI impI allI) - apply (clarsimp simp: unat_eq_0 le_max_word_ucast_id - thread_control_update_mcp_def thread_control_update_priority_def - cap_get_tag_isCap_unfolded_H_cap isCap_simps - interpret_excaps_eq excaps_map_def)+ - done + by (clarsimp simp: unat_eq_0 le_max_word_ucast_id + thread_control_update_mcp_def thread_control_update_priority_def + cap_get_tag_isCap_unfolded_H_cap isCap_simps + interpret_excaps_eq excaps_map_def)+ lemma decodeSetIPCBuffer_ccorres: "interpret_excaps extraCaps' = excaps_map extraCaps \ @@ -3784,11 +3764,10 @@ lemma decodeSetIPCBuffer_ccorres: valid_mdb_ctes_def no_0_def excaps_map_def elim: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' dest!: interpret_excaps_eq)[1] - apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def - word_sle_def ThreadState_Restart_def mask_def) + apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def word_sle_def mask_def) apply (rule conjI[rotated], clarsimp+) apply (drule interpret_excaps_eq[rule_format, where n=0], simp add: excaps_map_Nil) - apply (simp add: mask_def "StrictC'_thread_state_defs" excaps_map_def) + apply (simp add: mask_def ThreadState_defs excaps_map_def) apply (clarsimp simp: ccap_rights_relation_def rightsFromWord_wordFromRights cap_get_tag_isCap) apply (frule cap_get_tag_to_H, subst cap_get_tag_isCap, assumption, assumption) @@ -3821,7 +3800,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3841,7 +3820,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply ((clarsimp simp: option_to_ctcb_ptr_canonical[OF invs_pspace_canonical'] simp flip: canonical_bit_def)+)[3] apply (auto simp: option_to_ctcb_ptr_def objBits_simps' @@ -3855,7 +3834,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3921,7 +3900,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -3954,10 +3933,10 @@ lemma decodeUnbindNotification_ccorres: apply (clarsimp simp: isCap_simps) apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (auto simp: ctcb_relation_def typ_heap_simps cap_get_tag_ThreadCap ct_in_state'_def - option_to_ptr_def option_to_0_def ThreadState_Restart_def - mask_def rf_sr_ksCurThread valid_tcb_state'_def - elim!: pred_tcb'_weakenE - dest!: valid_objs_boundNTFN_NULL) + option_to_ptr_def option_to_0_def ThreadState_defs + mask_def rf_sr_ksCurThread valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: valid_objs_boundNTFN_NULL) done lemma nTFN_case_If_ptr: @@ -4031,7 +4010,7 @@ lemma decodeBindNotification_ccorres: apply csymbr apply (clarsimp simp add: if_to_top_of_bind to_bool_eq_0[symmetric] simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (clarsimp simp: to_bool_def throwError_bind invocationCatch_def) + apply (clarsimp simp: throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg apply (rule conseqPre, vcg) @@ -4054,7 +4033,7 @@ lemma decodeBindNotification_ccorres: apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def valid_ntfn'_def) apply (case_tac "ntfnObj ntfn", simp_all add: isWaitingNtfn_def option_to_ctcb_ptr_def - false_def true_def split: option.split_asm if_split, + split: option.split_asm if_split, auto simp: neq_Nil_conv tcb_queue_relation'_def tcb_at_not_NULL[symmetric] tcb_at_not_NULL)[1] apply ceqv @@ -4118,8 +4097,8 @@ lemma decodeBindNotification_ccorres: apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases exception_defs) - apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def from_bool_0 - ThreadState_Restart_def mask_def true_def + apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def + ThreadState_defs mask_def rf_sr_ksCurThread capTCBPtr_eq) apply (simp add: hd_conv_nth bindE_bind_linearise nTFN_case_If_ptr throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) @@ -4306,7 +4285,7 @@ lemma decodeSetSpace_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -4318,7 +4297,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4333,8 +4312,7 @@ lemma decodeSetSpace_ccorres: apply (rule_tac P'="{s. vRootCap = vRootCap_' s}" in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + apply (clarsimp simp: returnOk_def return_def hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -4444,18 +4422,17 @@ lemma decodeSetSpace_ccorres: rightsFromWord_wordFromRights capTCBPtr_eq tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size) + ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: word_sle_def cap_get_tag_isCap) apply (subgoal_tac "args \ []") apply (clarsimp simp: hd_conv_nth) - apply (drule sym, simp, simp add: true_def from_bool_0) apply (clarsimp simp: objBits_simps') apply fastforce apply clarsimp done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4466,7 +4443,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) @@ -4478,9 +4455,8 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wpsimp wp: hoare_drop_imp simp: guard_is_UNIV_def)+ apply vcg - apply (clarsimp simp: tlsBaseRegister_def RISCV64.tlsBaseRegister_def - invs_weak_sch_act_wf invs_queues C_register_defs - split: if_split) + apply (fastforce simp: tlsBaseRegister_def RISCV64.tlsBaseRegister_def + invs_weak_sch_act_wf C_register_defs) done lemma decodeSetTLSBase_ccorres: @@ -4528,7 +4504,7 @@ lemma decodeSetTLSBase_ccorres: apply (clarsimp simp: ct_in_state'_def sysargs_rel_n_def n_msgRegisters_def) apply (auto simp: valid_tcb_state'_def elim!: pred_tcb'_weakenE)[1] - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (auto simp: unat_eq_0 le_max_word_ucast_id)+ @@ -4680,8 +4656,7 @@ lemma decodeTCBInvocation_ccorres: dest!: st_tcb_at_idle_thread')[1] apply (simp split: sum.split add: cintr_def intr_and_se_rel_def exception_defs syscall_error_rel_def) - apply (simp add: "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size - cap_get_tag_isCap) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply clarsimp done diff --git a/proof/crefine/RISCV64/VSpace_C.thy b/proof/crefine/RISCV64/VSpace_C.thy index 43e24d3253..88666c56e4 100644 --- a/proof/crefine/RISCV64/VSpace_C.thy +++ b/proof/crefine/RISCV64/VSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * @@ -82,7 +83,7 @@ lemma checkVPAlignment_ccorres: apply simp apply simp apply simp - apply (simp split: if_split add: to_bool_def) + apply (simp split: if_split) apply (clarsimp simp: mask_def unlessE_def throwError_def split: if_split) apply (rule ccorres_guard_imp) apply (rule ccorres_return_C) @@ -90,7 +91,7 @@ lemma checkVPAlignment_ccorres: apply simp apply simp apply simp - apply (simp split: if_split add: to_bool_def) + apply (simp split: if_split) apply (clarsimp split: if_split) apply (simp add: word_less_nat_alt) apply (rule order_le_less_trans, rule pageBitsForSize_le) @@ -253,8 +254,7 @@ lemma handleVMFault_ccorres: apply (rule corres_split[OF read_stval_ccorres[ac]]) apply terminates_trivial apply (drule sym, clarsimp) - apply (wpc; simp add: vm_fault_type_from_H_def vm_fault_defs_C - true_def false_def bind_assoc) + apply (corres_cases; simp add: vm_fault_type_from_H_def vm_fault_defs_C bind_assoc) apply (rule returnVMFault_corres; clarsimp simp: exception_defs mask_twice lift_rv_def mask_def vmFaultTypeFSR_def)+ apply wpsimp+ @@ -345,7 +345,7 @@ lemma corres_symb_exec_unknown_r: assumes "\rv. corres_underlying sr nf nf' r P P' a (c rv)" shows "corres_underlying sr nf nf' r P P' a (unknown >>= c)" apply (simp add: unknown_def) - apply (rule corres_symb_exec_r[OF assms]; wp select_inv non_fail_select) + apply (rule corres_symb_exec_r[OF assms]; wp select_inv) done lemma isPageTablePTE_def2: @@ -383,7 +383,7 @@ lemma isPTEPageTable_spec': cpte_relation pte cpte \ \ret__unsigned_long = from_bool (isPageTablePTE pte) \" by vcg - (auto simp: from_bool_def cpte_relation_def isPageTablePTE_def2 Let_def + (auto simp: cpte_relation_def isPageTablePTE_def2 Let_def readable_from_vm_rights_def writable_from_vm_rights_def bit_simps split: bool.split if_split pte.splits vmrights.splits) @@ -406,7 +406,7 @@ lemma isPTEPageTable_corres: apply (drule rf_sr_cpte_relation) apply (drule (1) cmap_relation_ko_atD) apply (clarsimp simp: typ_heap_simps) - apply (cases pte; simp add: readable_from_vm_rights0 isPageTablePTE_def from_bool_def + apply (cases pte; simp add: readable_from_vm_rights0 isPageTablePTE_def cpte_relation_def writable_from_vm_rights_def) done @@ -719,7 +719,7 @@ lemma findVSpaceForASID_ccorres: apply clarsimp apply (rule_tac P="valid_arch_state' and _" and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def NonDetMonad.lift_def + apply (clarsimp simp: throwError_def return_def bindE_def Nondet_Monad.lift_def EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def lookup_fault_lift_invalid_root asid_wf_table_guard) apply (frule rf_sr_asidTable_None[where asid=asid, THEN iffD2], @@ -868,7 +868,7 @@ lemma addrFromKPPtr_spec: \\ret__unsigned_long = addrFromKPPtr (ptr_val (pptr_' s))\" apply vcg apply (simp add: addrFromKPPtr_def kernelELFBaseOffset_def - kernelELFBase_def kernelELFPAddrBase_def) + kernelELFBase_def kernelELFPAddrBase_def mask_def pptrTop_def) done lemma isValidVTableRoot_def2: @@ -895,16 +895,15 @@ lemma setVMRoot_ccorres: apply (subst will_throw_and_catch) apply (simp split: capability.split arch_capability.split option.split) apply (fastforce simp: isCap_simps) - apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr apply ccorres_rewrite apply (subst bind_return_unit) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) - apply (simp flip: dc_def) apply (rule ccorres_return_void_C) - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply (simp add: catch_def bindE_bind_linearise bind_assoc liftE_def) apply csymbr apply csymbr @@ -925,27 +924,27 @@ lemma setVMRoot_ccorres: in ccorres_gen_asm2) apply simp apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def throwError_def dc_def[symmetric], ccorres_rewrite) + apply (simp add: whenE_def throwError_def, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr - apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) - apply (rule hoare_post_taut[where P=\]) - apply (simp add: whenE_def returnOk_def flip: dc_def) + apply (rule wp_post_taut) + apply (simp add: whenE_def returnOk_def) apply (csymbr) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) - apply (rule ccorres_cond_true_seq, simp add: dc_def[symmetric], ccorres_rewrite) + apply (rule ccorres_cond_true_seq, simp, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr - apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply (simp, rule wp_post_tautE) apply clarsimp apply (vcg) @@ -965,15 +964,10 @@ lemma setVMRoot_ccorres: apply (clarsimp simp: isCap_simps isValidVTableRoot_def2) apply (clarsimp simp: cap_get_tag_isCap_ArchObject2) by (clarsimp simp: cap_get_tag_isCap_ArchObject[symmetric] - cap_lift_page_table_cap cap_to_H_def - cap_page_table_cap_lift_def isCap_simps - to_bool_def mask_def isZombieTCB_C_def Let_def - elim!: ccap_relationE - split: if_split_asm cap_CL.splits) - -lemma ccorres_seq_IF_False: - "ccorres_underlying sr \ r xf arrel axf G G' hs a (IF False THEN x ELSE y FI ;; c) = ccorres_underlying sr \ r xf arrel axf G G' hs a (y ;; c)" - by simp + cap_lift_page_table_cap cap_to_H_def + cap_page_table_cap_lift_def isCap_simps isZombieTCB_C_def Let_def + elim!: ccap_relationE + split: if_split_asm cap_CL.splits) (* FIXME x64: needed? *) lemma ptrFromPAddr_mask6_simp[simp]: @@ -1005,12 +999,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -1041,8 +1035,6 @@ lemma msgRegisters_ccorres: (* usually when we call setMR directly, we mean to only set a registers, which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg___unsigned_long = val\ @@ -1059,8 +1051,8 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters not_le conj_commute) apply (subst msgRegisters_ccorres[symmetric]) @@ -1246,7 +1238,6 @@ lemma unmapPage_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: page_size_' asid___unsigned_long_' vptr_' pptr___unsigned_long_') apply (simp add: ignoreFailure_liftM) - apply (fold dc_def) apply (ctac add: findVSpaceForASID_ccorres) apply (rename_tac vspace find_ret) apply (rule ccorres_liftE_Seq) @@ -1256,9 +1247,9 @@ lemma unmapPage_ccorres: apply (simp (no_asm) add: split_def del: Collect_const) apply (rule ccorres_split_unless_throwError_cond[where Q=\ and Q'=\]) apply (clarsimp simp: of_nat_pageBitsForSize split: if_split) - apply (simp add: throwError_def flip: dc_def) + apply (simp add: throwError_def) apply (rule ccorres_return_void_C) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (subst bindE_assoc[symmetric]) @@ -1267,15 +1258,14 @@ lemma unmapPage_ccorres: apply (rule checkMappingPPtr_pte_ccorres[simplified]) apply (rule conseqPre, vcg exspec=isPTEPageTable_spec') apply (clarsimp simp: cpte_relation_def Let_def pte_lift_def isPagePTE_def - typ_heap_simps isPageTablePTE_def bit_simps from_bool_def + typ_heap_simps isPageTablePTE_def bit_simps split: if_split_asm pte.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_bindE - Collect_const[symmetric] dc_def[symmetric] del: Collect_const) apply csymbr apply (rule ccorres_split_nothrow_novcg) - apply (simp add: dc_def[symmetric] ptr_add_assertion_def split_def) + apply (simp add: ptr_add_assertion_def split_def) apply ccorres_rewrite apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) @@ -1294,7 +1284,7 @@ lemma unmapPage_ccorres: apply wpsimp apply (vcg exspec=lookupPTSlot_modifies) apply ccorres_rewrite - apply (simp add: throwError_def flip: dc_def) + apply (simp add: throwError_def) apply (rule ccorres_return_void_C) apply wp apply (vcg exspec=findVSpaceForASID_modifies) @@ -1374,7 +1364,7 @@ lemma performPageInvocationUnmap_ccorres: apply simp apply simp apply simp - apply (simp add: asidInvalid_def flip: dc_def) + apply (simp add: asidInvalid_def) apply (rule ccorres_return_Skip) apply ceqv apply (simp add: liftM_def) @@ -1531,7 +1521,7 @@ lemma setCTE_asidpool': "\ ko_at' (ASIDPool pool) p \ setCTE c p' \\_. ko_at' (ASIDPool pool) p\" apply (clarsimp simp: setCTE_def) apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (clarsimp simp: obj_at'_def) @@ -1666,7 +1656,7 @@ proof - show ?thesis apply (cinit lift: newLvl1pt_' simp: ptIndex_maxPTLevel_pptrBase ptTranslationBits_def) apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState, rename_tac globalPT) - apply (rule ccorres_rel_imp[where r=dc, OF _ dc_simp]) + apply (rule ccorres_rel_imp[where r=dc, simplified]) apply (clarsimp simp: whileAnno_def objBits_simps bit_simps RISCV64.pptrBase_def mask_def) apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr diff --git a/proof/crefine/RISCV64/Wellformed_C.thy b/proof/crefine/RISCV64/Wellformed_C.thy index 85f5ed59a1..f7924396c6 100644 --- a/proof/crefine/RISCV64/Wellformed_C.thy +++ b/proof/crefine/RISCV64/Wellformed_C.thy @@ -151,10 +151,6 @@ where abbreviation "ep_queue_relation \ tcb_queue_relation tcbEPNext_C tcbEPPrev_C" -abbreviation - "sched_queue_relation \ tcb_queue_relation tcbSchedNext_C tcbSchedPrev_C" - - definition wordSizeCase :: "'a \ 'a \ 'a" where "wordSizeCase a b \ (if bitSize (undefined::machine_word) = 32 @@ -260,63 +256,6 @@ definition | Some cap \ Some \ cap_CL = cap, cteMDBNode_CL = mdb_node_lift (cteMDBNode_C c) \" -lemma to_bool_false [simp]: "\ to_bool false" - by (simp add: to_bool_def false_def) - -(* this is slightly weird, but the bitfield generator - masks everything with the expected bit length. - So we do that here too. *) -definition - to_bool_bf :: "'a::len word \ bool" where - "to_bool_bf w \ (w && mask 1) = 1" - -lemma to_bool_bf_mask1 [simp]: - "to_bool_bf (mask (Suc 0))" - by (simp add: mask_def to_bool_bf_def) - -lemma to_bool_bf_0 [simp]: "\to_bool_bf 0" - by (simp add: to_bool_bf_def) - -lemma to_bool_bf_1 [simp]: "to_bool_bf 1" - by (simp add: to_bool_bf_def mask_def) - -lemma to_bool_bf_false [simp]: - "\to_bool_bf false" - by (simp add: false_def) - -lemma to_bool_bf_true [simp]: - "to_bool_bf true" - by (simp add: true_def) - -lemma to_bool_to_bool_bf: - "w = false \ w = true \ to_bool_bf w = to_bool w" - by (auto simp: false_def true_def to_bool_def to_bool_bf_def mask_def) - -lemma to_bool_bf_mask_1 [simp]: - "to_bool_bf (w && mask (Suc 0)) = to_bool_bf w" - by (simp add: to_bool_bf_def) - -lemma to_bool_bf_and [simp]: - "to_bool_bf (a && b) = (to_bool_bf a \ to_bool_bf (b::word64))" - apply (clarsimp simp: to_bool_bf_def) - apply (rule iffI) - apply (subst (asm) bang_eq) - apply (simp add: word_size) - apply (rule conjI) - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply clarsimp - apply (rule word_eqI) - apply (subst (asm) bang_eq)+ - apply (auto simp add: word_size)[1] - done - -lemma to_bool_bf_to_bool_mask: - "w && mask (Suc 0) = w \ to_bool_bf w = to_bool (w::word64)" - by (metis mask_Suc_0 bool_mask mask_1 to_bool_0 to_bool_1 to_bool_bf_def word_gt_0) - definition mdb_node_to_H :: "mdb_node_CL \ mdbnode" where @@ -486,31 +425,31 @@ lemma maxDom_sgt_0_maxDomain: lemma num_domains_calculation: "num_domains = numDomains" - unfolding num_domains_def by eval + unfolding num_domains_val by eval private lemma num_domains_card_explicit: "num_domains = CARD(num_domains)" - by (simp add: num_domains_def) + by (simp add: num_domains_val) lemmas num_domains_index_updates = - index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] - index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] (* C ArrayGuards will throw these at us and there is no way to avoid a proof of being less than a specific number expressed as a word, so we must introduce these. However, being explicit means lack of discipline can lead to a violation. *) -lemma numDomains_less_numeric_explicit[simplified num_domains_def One_nat_def]: +lemma numDomains_less_numeric_explicit[simplified num_domains_val One_nat_def]: "x < Kernel_Config.numDomains \ x < num_domains" by (simp add: num_domains_calculation) -lemma numDomains_less_unat_ucast_explicit[simplified num_domains_def]: +lemma numDomains_less_unat_ucast_explicit[simplified num_domains_val]: "unat x < Kernel_Config.numDomains \ (ucast (x::domain) :: machine_word) < of_nat num_domains" apply (rule word_less_nat_alt[THEN iffD2]) apply transfer apply simp - apply (drule numDomains_less_numeric_explicit, simp add: num_domains_def) + apply (drule numDomains_less_numeric_explicit, simp add: num_domains_val) done lemmas maxDomain_le_unat_ucast_explicit = @@ -535,7 +474,7 @@ value_type num_tcb_queues = "numDomains * numPriorities" lemma num_tcb_queues_calculation: "num_tcb_queues = numDomains * numPriorities" - unfolding num_tcb_queues_def by eval + unfolding num_tcb_queues_val by eval (* Input abbreviations for API object types *) diff --git a/proof/crefine/X64/ADT_C.thy b/proof/crefine/X64/ADT_C.thy index a63a7a3757..82672f5287 100644 --- a/proof/crefine/X64/ADT_C.thy +++ b/proof/crefine/X64/ADT_C.thy @@ -75,8 +75,8 @@ lemma Basic_sem_eq: lemma setTCBContext_C_corres: "\ ccontext_relation tc tc'; t' = tcb_ptr_to_ctcb_ptr t \ \ - corres_underlying rf_sr nf nf' dc (pspace_domain_valid and tcb_at' t) \ - (threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb)\) t) (setTCBContext_C tc' t')" + corres_underlying rf_sr nf nf' dc (pspace_domain_valid and tcb_at' t) \ + (threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb)\) t) (setTCBContext_C tc' t')" apply (simp add: setTCBContext_C_def exec_C_def Basic_sem_eq corres_underlying_def) apply clarsimp apply (simp add: threadSet_def bind_assoc split_def exec_gets) @@ -84,7 +84,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption @@ -107,8 +107,6 @@ lemma setTCBContext_C_corres: apply (simp add: cep_relations_drop_fun_upd) apply (apply_conjunct \match conclusion in \fpu_null_state_relation _\ \ \simp add: fpu_null_state_heap_update_span_disjoint[OF tcb_at'_non_kernel_data_ref]\\) - apply (apply_conjunct \match conclusion in \cready_queues_relation _ _ _\ \ - \erule cready_queues_relation_not_queue_ptrs; rule ext; simp split: if_split\\) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def carch_tcb_relation_def) @@ -541,6 +539,10 @@ end context state_rel begin +definition + "ioapic_nirqs_to_H cstate \ + \x. if x \ of_nat maxNumIOAPIC then ioapic_nirqs_' cstate.[unat x] else 0" + definition "carch_state_to_H cstate \ X64KernelState @@ -555,6 +557,7 @@ definition x64KSKernelVSpace_C (cioport_bitmap_to_H (the (clift (t_hrs_' cstate) (Ptr (symbol_table ''x86KSAllocatedIOPorts''))))) (ucast (num_ioapics_' cstate)) + (ioapic_nirqs_to_H cstate) \ \Map IRQ states to their Haskell equivalent, and out-of-bounds entries to X64IRQFree\ (case_option X64IRQFree id \ (array_map_conv @@ -584,6 +587,8 @@ lemma carch_state_to_H_correct: using valid[simplified valid_arch_state'_def] apply (fastforce simp: valid_asid_table'_def) apply (simp add: ccr3_relation_def split: cr3.splits) + apply (rule conjI) + apply (solves \clarsimp simp: global_ioport_bitmap_relation_def\) apply (rule conjI) prefer 2 apply (rule ext) @@ -591,7 +596,10 @@ lemma carch_state_to_H_correct: array_to_map_def) using valid[simplified valid_arch_state'_def valid_x64_irq_state'_def] apply (case_tac "x \ maxIRQ"; fastforce split: option.split) - apply (clarsimp simp: global_ioport_bitmap_relation_def) + apply (clarsimp simp: array_relation_def ioapic_nirqs_to_H_def) + apply (rule ext) + using valid[simplified valid_arch_state'_def valid_ioapic_def] + apply (clarsimp simp: not_le) done end @@ -619,25 +627,51 @@ lemma tcb_queue_rel'_unique: apply (erule(2) tcb_queue_rel_unique) done -definition - cready_queues_to_H - :: "(tcb_C ptr \ tcb_C) \ (tcb_queue_C[num_tcb_queues]) \ word8 \ word8 \ machine_word list" + +definition tcb_queue_C_to_tcb_queue :: "tcb_queue_C \ tcb_queue" where + "tcb_queue_C_to_tcb_queue q \ + TcbQueue (if head_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (head_C q))) + (if end_C q = NULL then None else Some (ctcb_ptr_to_tcb_ptr (end_C q)))" + +definition cready_queues_to_H :: + "tcb_queue_C[num_tcb_queues] \ (domain \ priority \ ready_queue)" where - "cready_queues_to_H h_tcb cs \ \(qdom, prio). if ucast minDom \ qdom \ qdom \ ucast maxDom - \ ucast seL4_MinPrio \ prio \ prio \ ucast seL4_MaxPrio - then THE aq. let cqueue = index cs (cready_queues_index_to_C qdom prio) - in sched_queue_relation' h_tcb aq (head_C cqueue) (StateRelation_C.end_C cqueue) - else []" + "cready_queues_to_H cs \ + \(qdom, prio). + if qdom \ maxDomain \ prio \ maxPriority + then let cqueue = index cs (cready_queues_index_to_C qdom prio) + in tcb_queue_C_to_tcb_queue cqueue + else TcbQueue None None" lemma cready_queues_to_H_correct: - "cready_queues_relation (clift s) cs as \ - cready_queues_to_H (clift s) cs = as" - apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def - fun_eq_iff) - apply (rule the_equality) - apply simp - apply (clarsimp simp: Let_def) - apply (rule_tac hp="clift s" in tcb_queue_rel'_unique, simp_all add: lift_t_NULL) + "\cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' ch); + no_0_obj' s; ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ cready_queues_to_H (ksReadyQueues_' ch) = ksReadyQueues s" + apply (clarsimp simp: cready_queues_to_H_def cready_queues_relation_def Let_def) + apply (clarsimp simp: fun_eq_iff) + apply (rename_tac d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (rule conjI) + apply (clarsimp simp: tcb_queue_C_to_tcb_queue_def ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (case_tac "tcbQueueHead (ksReadyQueues s (d, p)) = None") + apply (clarsimp simp: tcb_queue.expand) + apply clarsimp + apply (rename_tac queue_head queue_end) + apply (prop_tac "tcb_at' queue_head s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (prop_tac "tcb_at' queue_end s", fastforce simp: tcbQueueEmpty_def obj_at'_def) + apply (drule kernel.tcb_at_not_NULL)+ + apply (fastforce simp: tcb_queue.expand kernel.ctcb_ptr_to_ctcb_ptr) + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits; + metis tcb_queue.exhaust_sel word_not_le) done (* showing that cpspace_relation is actually unique >>>*) @@ -783,19 +817,18 @@ lemma cthread_state_rel_imp_eq: "cthread_state_relation x z \ cthread_state_relation y z \ x=y" apply (simp add: cthread_state_relation_def split_def) apply (cases x) - apply (cases y, simp_all add: ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnReply_def ThreadState_BlockedOnNotification_def - ThreadState_Running_def ThreadState_Inactive_def - ThreadState_IdleThreadState_def ThreadState_BlockedOnSend_def - ThreadState_Restart_def)+ + apply (cases y, simp_all add: ThreadState_defs)+ done -lemma ksPSpace_valid_objs_tcbBoundNotification_nonzero: - "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s - \ map_to_tcbs ah p = Some tcb \ tcbBoundNotification tcb \ Some 0" +lemma map_to_tcbs_Some_refs_nonzero: + "\map_to_tcbs (ksPSpace s) p = Some tcb; no_0_obj' s; valid_objs' s\ + \ tcbBoundNotification tcb \ Some 0 + \ tcbSchedPrev tcb \ Some 0 + \ tcbSchedNext tcb \ Some 0" + supply word_neq_0_conv[simp del] apply (clarsimp simp: map_comp_def split: option.splits) - apply (erule(1) valid_objsE') - apply (clarsimp simp: projectKOs valid_obj'_def valid_tcb'_def) + apply (erule (1) valid_objsE') + apply (fastforce simp: projectKOs valid_obj'_def valid_tcb'_def) done lemma atcbContextGet_inj[simp]: @@ -806,34 +839,75 @@ lemma ccontext_relation_imp_eq2: "\ccontext_relation (atcbContextGet t) x; ccontext_relation (atcbContextGet t') x\ \ t = t'" by (auto dest: ccontext_relation_imp_eq) +lemma tcb_ptr_to_ctcb_ptr_inj: + "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" + by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) + +lemma + assumes "pspace_aligned' as" "pspace_distinct' as" "valid_tcb' atcb as" + shows tcb_at'_tcbBoundNotification: + "bound (tcbBoundNotification atcb) \ ntfn_at' (the (tcbBoundNotification atcb)) as" + and tcb_at'_tcbSchedPrev: + "tcbSchedPrev atcb \ None \ tcb_at' (the (tcbSchedPrev atcb)) as" + and tcb_at'_tcbSchedNext: + "tcbSchedNext atcb \ None \ tcb_at' (the (tcbSchedNext atcb)) as" + using assms + by (clarsimp simp: valid_tcb'_def obj_at'_def)+ + lemma cpspace_tcb_relation_unique: - assumes tcbs: "cpspace_tcb_relation ah ch" "cpspace_tcb_relation ah' ch" - and vs: "\s. ksPSpace s = ah \ no_0_obj' s \ valid_objs' s" - and vs': "\s. ksPSpace s = ah' \ no_0_obj' s \ valid_objs' s" - assumes ctes: " \tcb tcb'. (\p. map_to_tcbs ah p = Some tcb \ - map_to_tcbs ah' p = Some tcb') \ - (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" - shows "map_to_tcbs ah' = map_to_tcbs ah" + assumes tcbs: "cpspace_tcb_relation (ksPSpace as) ch" "cpspace_tcb_relation (ksPSpace as') ch" + assumes vs: "no_0_obj' as" "valid_objs' as" + assumes vs': "no_0_obj' as'" "valid_objs' as'" + assumes ad: "pspace_aligned' as" "pspace_distinct' as" + assumes ad': "pspace_aligned' as'" "pspace_distinct' as'" + assumes ctes: "\tcb tcb'. (\p. map_to_tcbs (ksPSpace as) p = Some tcb \ + map_to_tcbs (ksPSpace as') p = Some tcb') \ + (\x\ran tcb_cte_cases. fst x tcb' = fst x tcb)" + shows "map_to_tcbs (ksPSpace as') = map_to_tcbs (ksPSpace as)" using tcbs(2) tcbs(1) apply (clarsimp simp add: cmap_relation_def) apply (drule inj_image_inv[OF inj_tcb_ptr_to_ctcb_ptr])+ apply (simp add: tcb_ptr_to_ctcb_ptr_def[abs_def] ctcb_offset_def) apply (rule ext) - apply (case_tac "x:dom (map_to_tcbs ah)") + apply (case_tac "x \ dom (map_to_tcbs (ksPSpace as))") apply (drule bspec, assumption)+ apply (simp add: dom_def Collect_eq, drule_tac x=x in spec) apply clarsimp apply (rename_tac p x y) apply (cut_tac ctes) apply (drule_tac x=x in spec, drule_tac x=y in spec, erule impE, fastforce) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs]) - apply (frule ksPSpace_valid_objs_tcbBoundNotification_nonzero[OF vs']) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs]) + apply (frule map_to_tcbs_Some_refs_nonzero[OF _ vs']) + apply (rename_tac atcb atcb') + apply (prop_tac "valid_tcb' atcb as") + apply (fastforce intro: vs ad map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (prop_tac "valid_tcb' atcb' as'") + apply (fastforce intro: vs' ad' map_to_ko_atI tcb_ko_at_valid_objs_valid_tcb') + apply (frule tcb_at'_tcbSchedPrev[OF ad]) + apply (frule tcb_at'_tcbSchedPrev[OF ad']) + apply (frule tcb_at'_tcbSchedNext[OF ad]) + apply (frule tcb_at'_tcbSchedNext[OF ad']) apply (thin_tac "map_to_tcbs x y = Some z" for x y z)+ - apply (case_tac x, case_tac y, case_tac "the (clift ch (tcb_Ptr (p+0x400)))") + apply (case_tac "the (clift ch (tcb_Ptr (p + 2 ^ ctcb_size_bits)))") apply (clarsimp simp: ctcb_relation_def ran_tcb_cte_cases) - apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.splits) - apply (auto simp: cfault_rel_imp_eq cthread_state_rel_imp_eq carch_tcb_relation_def - ccontext_relation_imp_eq2 up_ucast_inj_eq ctcb_size_bits_def) + apply (clarsimp simp: option_to_ctcb_ptr_def option_to_ptr_def option_to_0_def) + apply (rule tcb.expand) + apply clarsimp + apply (intro conjI) + apply (simp add: cthread_state_rel_imp_eq) + apply (simp add: cfault_rel_imp_eq) + apply (case_tac "tcbBoundNotification atcb'", case_tac "tcbBoundNotification atcb"; clarsimp) + apply (clarsimp split: option.splits) + apply (case_tac "tcbSchedPrev atcb'"; case_tac "tcbSchedPrev atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (case_tac "tcbSchedNext atcb'"; case_tac "tcbSchedNext atcb"; clarsimp) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force dest!: kernel.tcb_at_not_NULL) + apply (force simp: tcb_ptr_to_ctcb_ptr_inj) + apply (force simp: carch_tcb_relation_def ccontext_relation_imp_eq2) + apply auto done lemma tcb_queue_rel_clift_unique: @@ -864,10 +938,6 @@ lemma ksPSpace_valid_pspace_ntfnBoundTCB_nonzero: apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def) done -lemma tcb_ptr_to_ctcb_ptr_inj: - "tcb_ptr_to_ctcb_ptr x = tcb_ptr_to_ctcb_ptr y \ x = y" - by (auto simp: tcb_ptr_to_ctcb_ptr_def ctcb_offset_def) - lemma cpspace_ntfn_relation_unique: assumes ntfns: "cpspace_ntfn_relation ah ch" "cpspace_ntfn_relation ah' ch" and vs: "\s. ksPSpace s = ah \ valid_pspace' s" @@ -1217,8 +1287,8 @@ proof - OF valid_objs'_imp_wf_asid_pool'[OF valid_objs] valid_objs'_imp_wf_asid_pool'[OF valid_objs']]) apply (drule (1) cpspace_tcb_relation_unique) - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') - apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs') + apply (fastforce intro: no_0_objs no_0_objs' valid_objs valid_objs')+ + apply (fastforce intro: aligned distinct aligned' distinct')+ apply (intro allI impI,elim exE conjE) apply (rule_tac p=p in map_to_ctes_tcb_ctes, assumption) apply (frule (1) map_to_ko_atI[OF _ aligned distinct]) @@ -1270,7 +1340,7 @@ lemma ksPSpace_eq_imp_valid_tcb'_eq: by (auto simp: ksPSpace_eq_imp_obj_at'_eq[OF ksPSpace] ksPSpace_eq_imp_valid_cap'_eq[OF ksPSpace] ksPSpace_eq_imp_typ_at'_eq[OF ksPSpace] - valid_tcb'_def valid_tcb_state'_def valid_bound_ntfn'_def + valid_tcb'_def valid_tcb_state'_def valid_bound_ntfn'_def valid_bound_tcb'_def split: thread_state.splits option.splits) lemma ksPSpace_eq_imp_valid_arch_obj'_eq: @@ -1433,7 +1503,7 @@ where ksDomSchedule = cDomSchedule_to_H kernel_all_global_addresses.ksDomSchedule, ksCurDomain = ucast (ksCurDomain_' s), ksDomainTime = ksDomainTime_' s, - ksReadyQueues = cready_queues_to_H (clift (t_hrs_' s)) (ksReadyQueues_' s), + ksReadyQueues = cready_queues_to_H (ksReadyQueues_' s), ksReadyQueuesL1Bitmap = cbitmap_L1_to_H (ksReadyQueuesL1Bitmap_' s), ksReadyQueuesL2Bitmap = cbitmap_L2_to_H (ksReadyQueuesL2Bitmap_' s), ksCurThread = ctcb_ptr_to_tcb_ptr (ksCurThread_' s), @@ -1455,16 +1525,16 @@ lemma trivial_eq_conj: "B = C \ (A \ B) = (A \ C)" lemma cstate_to_H_correct: assumes valid: "valid_state' as" assumes cstate_rel: "cstate_relation as cs" + assumes rdyqs: "ksReadyQueues_asrt as" shows "cstate_to_H cs = as \ksMachineState:= observable_memory (ksMachineState as) (user_mem' as)\" apply (subgoal_tac "cstate_to_machine_H cs = observable_memory (ksMachineState as) (user_mem' as)") apply (rule kernel_state.equality, simp_all add: cstate_to_H_def) - apply (rule cstate_to_pspace_H_correct) + apply (rule cstate_to_pspace_H_correct) using valid apply (simp add: valid_state'_def) using cstate_rel valid apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def - observable_memory_def valid_state'_def - valid_pspace'_def) + observable_memory_def valid_state'_def valid_pspace'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def cpspace_relation_def Let_def prod_eq_iff) using cstate_rel @@ -1472,10 +1542,10 @@ lemma cstate_to_H_correct: using valid cstate_rel apply (rule mk_gsUntypedZeroRanges_correct) subgoal - using cstate_rel - by (fastforce simp: cstate_relation_def cpspace_relation_def - Let_def ghost_size_rel_def unat_eq_0 - split: if_split) + using cstate_rel + by (fastforce simp: cstate_relation_def cpspace_relation_def + Let_def ghost_size_rel_def unat_eq_0 + split: if_split) using valid cstate_rel apply (rule cDomScheduleIdx_to_H_correct) using cstate_rel @@ -1489,8 +1559,13 @@ lemma cstate_to_H_correct: using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) apply (rule cready_queues_to_H_correct) - using cstate_rel - apply (clarsimp simp: cstate_relation_def Let_def) + using cstate_rel rdyqs + apply (fastforce intro!: cready_queues_to_H_correct + simp: cstate_relation_def Let_def) + using valid apply (fastforce simp: valid_state'_def) + using rdyqs apply fastforce + using valid apply (fastforce simp: valid_state'_def) + using valid apply (fastforce simp: valid_state'_def) using cstate_rel apply (clarsimp simp: cstate_relation_def Let_def) using cstate_rel diff --git a/proof/crefine/X64/ArchMove_C.thy b/proof/crefine/X64/ArchMove_C.thy index d5a610095d..7186d5fb65 100644 --- a/proof/crefine/X64/ArchMove_C.thy +++ b/proof/crefine/X64/ArchMove_C.thy @@ -16,7 +16,7 @@ lemma ps_clear_is_aligned_ksPSpace_None: \ ksPSpace s (p + d) = None" apply (simp add: ps_clear_def add_diff_eq[symmetric] mask_2pm1[symmetric]) apply (drule equals0D[where a="p + d"]) - apply (simp add: dom_def word_gt_0 del: word_neq_0_conv) + apply (simp add: dom_def word_gt_0) apply (drule mp) apply (rule word_plus_mono_right) apply simp @@ -47,8 +47,6 @@ where "port_mask start end = mask (unat (end && mask wordRadix)) && ~~ mask (unat (start && mask wordRadix))" -declare word_neq_0_conv [simp del] - lemma unat_ucast_prio_L1_cmask_simp: "unat (ucast (p::priority) && 0x3F :: machine_word) = unat (p && 0x3F)" using unat_ucast_prio_mask_simp[where m=6] @@ -183,13 +181,8 @@ lemma vmsz_aligned_aligned_pageBits: lemma empty_fail_findVSpaceForASID[iff]: "empty_fail (findVSpaceForASID asid)" - apply (simp add: findVSpaceForASID_def liftME_def) - apply (intro empty_fail_bindE, simp_all split: option.split) - apply (simp add: assertE_def split: if_split) - apply (simp add: assertE_def split: if_split) - apply (simp add: empty_fail_getObject) - apply (simp add: assertE_def liftE_bindE checkPML4At_def split: if_split) - done + unfolding findVSpaceForASID_def checkPML4At_def + by (wpsimp wp: empty_fail_getObject) crunch inv'[wp]: archThreadGet P @@ -209,7 +202,7 @@ lemma atg_sp': (* FIXME: MOVE to EmptyFail *) lemma empty_fail_archThreadGet [intro!, wp, simp]: "empty_fail (archThreadGet f p)" - by (simp add: archThreadGet_def getObject_def split_def) + by (fastforce simp: archThreadGet_def getObject_def split_def) lemma more_pageBits_inner_beauty: fixes x :: "9 word" @@ -256,14 +249,8 @@ lemma sign_extend_canonical_address: "(x = sign_extend 47 x) = canonical_address x" by (fastforce simp: sign_extended_iff_sign_extend canonical_address_sign_extended) -crunches Arch.switchToThread - for valid_queues'[wp]: valid_queues' - (simp: crunch_simps) crunches switchToIdleThread for ksCurDomain[wp]: "\s. P (ksCurDomain s)" -crunches switchToIdleThread, switchToThread - for valid_pspace'[wp]: valid_pspace' - (simp: whenE_def crunch_simps) lemma setCurrentUserCR3_valid_arch_state'[wp]: "\valid_arch_state' and K (valid_cr3' c)\ setCurrentUserCR3 c \\_. valid_arch_state'\" @@ -272,7 +259,7 @@ lemma setCurrentUserCR3_valid_arch_state'[wp]: lemma setVMRoot_valid_arch_state': "\valid_arch_state'\ setVMRoot t \\_. valid_arch_state'\" apply (simp add: setVMRoot_def getThreadVSpaceRoot_def setCurrentUserVSpaceRoot_def) - apply (wp hoare_whenE_wp getCurrentUserCR3_wp findVSpaceForASID_vs_at_wp + apply (wp whenE_wp getCurrentUserCR3_wp findVSpaceForASID_vs_at_wp | wpcw | clarsimp simp: if_apply_def2 asid_wf_0 | strengthen valid_cr3'_makeCR3)+ @@ -354,7 +341,7 @@ lemma asUser_get_registers: apply (simp add: mapM_empty asUser_return) apply wp apply simp - apply (simp add: mapM_Cons asUser_bind_distrib asUser_return) + apply (simp add: mapM_Cons asUser_bind_distrib asUser_return empty_fail_cond) apply wp apply simp apply (rule hoare_strengthen_post) @@ -374,7 +361,7 @@ lemma asUser_get_registers: (* FIXME: move to where is_aligned_ptrFromPAddr is *) lemma is_aligned_ptrFromPAddr_pageBitsForSize: "is_aligned p (pageBitsForSize sz) \ is_aligned (ptrFromPAddr p) (pageBitsForSize sz)" - by (cases sz ; simp add: is_aligned_ptrFromPAddr_n pageBits_def bit_simps) + by (cases sz ; simp add: is_aligned_ptrFromPAddr_n bit_simps) lemma is_aligned_pageBitsForSize_minimum: "\ is_aligned p (pageBitsForSize sz) ; n \ pageBits \ \ is_aligned p n" @@ -405,10 +392,6 @@ lemma valid_eq_wf_asid_pool'[simp]: declare valid_asid_pool'.simps[simp del] (*<<<*) -(* FIXME: change the original to be predicated! *) -crunch ko_at'2[wp]: doMachineOp "\s. P (ko_at' p t s)" - (simp: crunch_simps) - (* FIXME: change the original to be predicated! *) crunch pred_tcb_at'2[wp]: doMachineOp "\s. P (pred_tcb_at' a b p s)" (simp: crunch_simps) @@ -473,7 +456,7 @@ lemma length_msgRegisters[simplified size_msgRegisters_def]: lemma empty_fail_loadWordUser[intro!, simp]: "empty_fail (loadWordUser x)" - by (simp add: loadWordUser_def ef_loadWord ef_dmo') + by (fastforce simp: loadWordUser_def ef_loadWord ef_dmo') lemma empty_fail_getMRs[iff]: "empty_fail (getMRs t buf mi)" @@ -483,26 +466,14 @@ lemma empty_fail_getReceiveSlots: "empty_fail (getReceiveSlots r rbuf)" proof - note - empty_fail_assertE[iff] - empty_fail_resolveAddressBits[iff] + empty_fail_resolveAddressBits[wp] + empty_fail_rethrowFailure[wp] + empty_fail_rethrowFailure[wp] show ?thesis - apply (clarsimp simp: getReceiveSlots_def loadCapTransfer_def split_def - split: option.split) - apply (rule empty_fail_bind) - apply (simp add: capTransferFromWords_def) - apply (simp add: emptyOnFailure_def unifyFailure_def) - apply (intro empty_fail_catch empty_fail_bindE empty_fail_rethrowFailure, - simp_all add: empty_fail_whenEs) - apply (simp_all add: lookupCap_def split_def lookupCapAndSlot_def - lookupSlotForThread_def liftME_def - getThreadCSpaceRoot_def locateSlot_conv bindE_assoc - lookupSlotForCNodeOp_def lookupErrorOnFailure_def - cong: if_cong) - apply (intro empty_fail_bindE, - simp_all add: getSlotCap_def) - apply (intro empty_fail_If empty_fail_bindE empty_fail_rethrowFailure impI, - simp_all add: empty_fail_whenEs rangeCheck_def) - done + unfolding getReceiveSlots_def loadCapTransfer_def lookupCap_def lookupCapAndSlot_def + by (wpsimp simp: emptyOnFailure_def unifyFailure_def lookupSlotForThread_def + capTransferFromWords_def getThreadCSpaceRoot_def locateSlot_conv bindE_assoc + lookupSlotForCNodeOp_def lookupErrorOnFailure_def rangeCheck_def) qed lemma user_getreg_rv: diff --git a/proof/crefine/X64/Arch_C.thy b/proof/crefine/X64/Arch_C.thy index cab2cc78a7..434d269393 100644 --- a/proof/crefine/X64/Arch_C.thy +++ b/proof/crefine/X64/Arch_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -74,12 +75,12 @@ lemma performPageTableInvocationUnmap_ccorres: apply csymbr apply (simp add: storePTE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply (simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PTE_ccorres) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPageTable_modifies) - apply (simp add: to_bool_def) + apply simp apply (rule ccorres_return_Skip') apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_page_table_cap cap_to_H_def @@ -216,12 +217,12 @@ lemma performPageDirectoryInvocationUnmap_ccorres: apply csymbr apply (simp add: storePDE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply (simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PDE_ccorres) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPageDirectory_modifies) - apply (simp add: to_bool_def) + apply simp apply (rule ccorres_return_Skip') apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_page_directory_cap cap_to_H_def @@ -358,12 +359,12 @@ lemma performPDPTInvocationUnmap_ccorres: apply csymbr apply (simp add: storePDPTE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply (simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PDPTE_ccorres) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPDPT_modifies) - apply (simp add: to_bool_def) + apply simp apply (rule ccorres_return_Skip') apply (simp add: cap_get_tag_isCap_ArchObject[symmetric]) apply (clarsimp simp: cap_lift_pdpt_cap cap_to_H_def @@ -747,7 +748,9 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rename_tac pcap) + apply (rule_tac P = "pcap = (capability.UntypedCap isdev frame pageBits idx)" + in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -886,7 +889,7 @@ shows pageBits_def split: if_split) apply (clarsimp simp: X64SmallPageBits_def word_sle_def is_aligned_mask[symmetric] - ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + ghost_assertion_data_get_gs_clear_region) apply (subst ghost_assertion_size_logic_flex[unfolded o_def, rotated]) apply assumption apply (simp add: ghost_assertion_data_get_gs_clear_region[unfolded o_def]) @@ -940,8 +943,7 @@ lemma isValidNativeRoot_spec: {t. \cap. ccap_relation cap (cap_' s) \ ret__unsigned_long_' t = from_bool (isArchObjectCap cap \ isPML4Cap (capCap cap) \ capPML4MappedASID (capCap cap) \ None)}" apply (vcg, clarsimp) - apply (rule conjI, clarsimp simp: from_bool_def case_bool_If if_1_0_0 - split: if_split) + apply (rule conjI, clarsimp simp: case_bool_If split: if_split) apply (rule conjI; clarsimp simp: cap_pml4_cap_lift) apply (erule ccap_relationE, clarsimp simp: cap_to_H_def isCap_simps to_bool_def split: if_split_asm) @@ -1121,10 +1123,10 @@ lemma decodeX64PageTableInvocation_ccorres: isPML4Cap (capCap (fst (extraCaps ! 0)))" in ccorres_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: hd_conv_nth) apply csymbr @@ -1176,7 +1178,7 @@ lemma decodeX64PageTableInvocation_ccorres: apply (clarsimp simp: typ_heap_simps from_bool_eq_if) apply (auto simp: cpde_relation_def Let_def pde_pde_pt_lift_def pde_pde_pt_lift pde_tag_defs pde_pde_large_lift_def - pde_lift_def from_bool_def case_bool_If + pde_lift_def case_bool_If split: pde.split_asm if_splits)[1] apply ceqv apply clarsimp @@ -1215,7 +1217,7 @@ lemma decodeX64PageTableInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (clarsimp simp: exception_defs) apply clarsimp @@ -1233,7 +1235,7 @@ lemma decodeX64PageTableInvocation_ccorres: apply simp apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -1245,8 +1247,8 @@ lemma decodeX64PageTableInvocation_ccorres: sch_act_wf (ksSchedulerAction b) b \ cte_wp_at' (\_. True) slot b" in hoare_strengthen_post) apply wp - apply (clarsimp simp: isCap_simps invs_valid_objs' valid_cap'_def valid_tcb_state'_def - invs_arch_state' invs_no_0_obj') + apply (fastforce simp: isCap_simps invs_valid_objs' valid_cap'_def valid_tcb_state'_def + invs_arch_state' invs_no_0_obj') apply vcg apply wp apply simp @@ -1276,7 +1278,7 @@ lemma decodeX64PageTableInvocation_ccorres: apply (auto dest: ctes_of_valid')[1] (* X64PageTableUnmap *) apply (rule conjI) - apply (fastforce simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" + apply (fastforce simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size ct_in_state'_def st_tcb_at'_def word_sle_def word_sless_def @@ -1291,7 +1293,6 @@ lemma decodeX64PageTableInvocation_ccorres: apply (clarsimp simp: cap_lift_page_directory_cap hd_conv_nth cap_lift_page_table_cap bit_simps cap_page_directory_cap_lift_def - to_bool_def typ_heap_simps' shiftl_t2n[where n=3] field_simps elim!: ccap_relationE) apply (clarsimp simp: neq_Nil_conv[where xs=extraCaps] @@ -1311,7 +1312,7 @@ lemma decodeX64PageTableInvocation_ccorres: intro!: is_aligned_addrFromPPtr[simplified bit_simps, simplified] simp: vmsz_aligned_def cap_to_H_simps cap_page_table_cap_lift_def bit_simps capAligned_def) apply clarsimp - apply (rule conjI, clarsimp simp: ThreadState_Restart_def mask_def) + apply (rule conjI, clarsimp simp: ThreadState_defs mask_def) apply (rule conjI) (* ccap_relation *) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 cap_page_table_cap_lift[THEN iffD1] @@ -1337,7 +1338,7 @@ lemma decodeX64PageTableInvocation_ccorres: (* the below proof duplicates some of the sections above *) apply (clarsimp simp: pde_tag_defs pde_get_tag_def word_and_1) apply safe - apply (clarsimp simp: ThreadState_Restart_def mask_def) + apply (clarsimp simp: ThreadState_defs mask_def) (* ccap_relation *) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 cap_page_table_cap_lift[THEN iffD1] cap_to_H_simps asid_wf_def3[simplified asid_bits_def, simplified]) @@ -1371,8 +1372,7 @@ lemma checkVPAlignment_spec: apply (clarsimp simp: mask_eq_iff_w2p word_size) apply (rule conjI) apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) - apply (simp add: from_bool_def vmsz_aligned_def is_aligned_mask - mask_def split: if_split) + apply (simp add: vmsz_aligned_def is_aligned_mask mask_def split: if_split) done definition @@ -1589,15 +1589,6 @@ lemma pde_align_ptBits: apply (simp add: bit_simps) done -lemma vaddr_segment_nonsense3_folded: - "is_aligned (p :: machine_word) pageBits \ - (p + ((vaddr >> pageBits) && mask (pt_bits - word_size_bits) << word_size_bits) && ~~ mask pt_bits) = p" - apply (rule is_aligned_add_helper[THEN conjunct2]) - apply (simp add: bit_simps mask_def)+ - apply (rule shiftl_less_t2n[where m=12 and n=3, simplified, OF and_mask_less'[where n=9, unfolded mask_def, simplified]]) - apply simp+ - done - lemma storePDE_Basic_ccorres'': "ccorres dc xfdc (\_. True) @@ -1791,7 +1782,7 @@ lemma performPageInvocationMapPDPTE_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) @@ -1817,8 +1808,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) @@ -1840,8 +1831,8 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -1853,23 +1844,23 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg apply clarsimp apply (rule conseqPre, vcg) apply clarsimp - apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_queues invs_valid_objs' invs_sch_act_wf' + apply (clarsimp simp: invs_no_0_obj' tcb_at_invs' invs_valid_objs' invs_sch_act_wf' rf_sr_ksCurThread msgRegisters_unfold seL4_MessageInfo_lift_def message_info_to_H_def mask_def) apply (cases isCall) apply (auto simp: X64.badgeRegister_def X64_H.badgeRegister_def Kernel_C.badgeRegister_def X64.capRegister_def Kernel_C.RDI_def Kernel_C.RSI_def fromPAddr_def - ThreadState_Running_def pred_tcb_at'_def obj_at'_def ct_in_state'_def) + ThreadState_defs pred_tcb_at'_def obj_at'_def ct_in_state'_def) done lemma vmsz_aligned_addrFromPPtr': @@ -1913,7 +1904,7 @@ lemma shiftr_asid_low_bits_mask_eq_0: apply (rule iffI[rotated]) apply simp apply (rule asid_low_high_bits) - apply (rule upcast_ucast_id[where 'b=machine_word_len]; simp add: asid_low_bits_of_mask_eq) + apply (rule More_Word.ucast_up_inj[where 'b=machine_word_len]; simp add: asid_low_bits_of_mask_eq) apply (simp add: ucast_asid_high_bits_is_shift) apply (simp add: asid_wf_def mask_def) apply (rule asid_wf_0) @@ -1926,18 +1917,6 @@ lemma slotcap_in_mem_valid: apply (erule(1) ctes_of_valid') done -lemma unat_less_iff64: - "\unat (a::machine_word) = b;c < 2^word_bits\ - \ (a < of_nat c) = (b < c)" - apply (rule iffI) - apply (drule unat_less_helper) - apply simp - apply (simp add:unat64_eq_of_nat) - apply (rule of_nat_mono_maybe) - apply (simp add:word_bits_def) - apply simp - done - lemma injection_handler_if_returnOk: "injection_handler Inl (if a then b else returnOk c) = (if a then (injection_handler Inl b) else returnOk c)" @@ -1950,11 +1929,6 @@ lemma injection_handler_if_returnOk: lemma pbfs_less: "pageBitsForSize sz < 31" by (case_tac sz,simp_all add: bit_simps) -definition - to_option :: "('a \ bool) \ 'a \ 'a option" -where - "to_option f x \ if f x then Some x else None" - lemma cte_wp_at_eq_gsMaxObjectSize: "cte_wp_at' ((=) cap o cteCap) slot s \ valid_global_refs' s @@ -2014,14 +1988,14 @@ lemma createSafeMappingEntries_PTE_ccorres: apply (rule_tac P'="{s. lu_ret___struct_lookupPTSlot_ret_C = errstate s}" in ccorres_from_vcg_throws[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk syscall_error_to_H_cases - syscall_error_rel_def exception_defs false_def) + syscall_error_rel_def exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply wpsimp apply (clarsimp simp: isVMPTE_def) apply (vcg exspec=lookupPTSlot_modifies) apply clarsimp - apply (clarsimp simp: cpte_relation_def Let_def vm_attribs_relation_def from_bool_def) + apply (clarsimp simp: cpte_relation_def Let_def vm_attribs_relation_def) apply (rule addrFromPPtr_mask_middle_pml4ShiftBits[simplified, simplified bit_simps]) apply (clarsimp simp: vmsz_aligned_addrFromPPtr' vmsz_aligned_aligned_pageBits[simplified bit_simps]) apply clarsimp @@ -2091,8 +2065,8 @@ lemma createSafeMappingEntries_PDE_ccorres: apply clarsimp apply (erule cmap_relationE1[OF rf_sr_cpde_relation], erule ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps cpde_relation_def Let_def) - apply (case_tac x; fastforce simp: if_1_0_0 pde_lifts isPageTablePDE_def false_def true_def - pde_pde_pt_lift_def) + apply (case_tac x; + fastforce simp: pde_lifts isPageTablePDE_def pde_pde_pt_lift_def) apply ceqv apply (clarsimp simp: pde_case_isPageTablePDE) apply (rule ccorres_Cond_rhs_Seq, clarsimp) @@ -2109,7 +2083,7 @@ lemma createSafeMappingEntries_PDE_ccorres: apply (rule_tac P'="{s. lu_ret___struct_lookupPDSlot_ret_C = errstate s}" in ccorres_from_vcg_throws[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk syscall_error_to_H_cases - syscall_error_rel_def exception_defs false_def) + syscall_error_rel_def exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply clarsimp @@ -2120,8 +2094,7 @@ lemma createSafeMappingEntries_PDE_ccorres: is_aligned_addrFromPPtr_pageBitsForSize[where sz=X64LargePage, simplified] vmsz_aligned_def dest!: addrFromPPtr_mask_middle_shiftBits[where sz=X64LargePage, simplified]) - apply (clarsimp simp: bit_simps cpde_relation_def Let_def true_def false_def - isPageTablePDE_def of_bool_from_bool + apply (clarsimp simp: bit_simps cpde_relation_def isPageTablePDE_def split: pde.splits) done @@ -2160,8 +2133,8 @@ lemma createSafeMappingEntries_PDPTE_ccorres: apply clarsimp apply (erule cmap_relationE1[OF rf_sr_cpdpte_relation], erule ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps cpdpte_relation_def Let_def) - apply (case_tac x; fastforce simp: if_1_0_0 pdpte_lifts isPageDirectoryPDPTE_def false_def true_def - pdpte_pdpte_pd_lift_def) + apply (case_tac x; + fastforce simp: pdpte_lifts isPageDirectoryPDPTE_def pdpte_pdpte_pd_lift_def) apply ceqv apply (clarsimp simp: pdpte_case_isPageDirectoryPDPTE) apply (rule ccorres_Cond_rhs_Seq, clarsimp) @@ -2178,7 +2151,7 @@ lemma createSafeMappingEntries_PDPTE_ccorres: apply (rule_tac P'="{s. lu_ret___struct_lookupPDPTSlot_ret_C = errstate s}" in ccorres_from_vcg_throws[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk syscall_error_to_H_cases - syscall_error_rel_def exception_defs false_def) + syscall_error_rel_def exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply clarsimp @@ -2189,8 +2162,7 @@ lemma createSafeMappingEntries_PDPTE_ccorres: is_aligned_addrFromPPtr_pageBitsForSize[where sz=X64HugePage, simplified] vmsz_aligned_def dest!: addrFromPPtr_mask_middle_shiftBits[where sz=X64HugePage, simplified]) - apply (clarsimp simp: bit_simps cpdpte_relation_def Let_def true_def false_def - isPageDirectoryPDPTE_def of_bool_from_bool + apply (clarsimp simp: bit_simps cpdpte_relation_def isPageDirectoryPDPTE_def split: pdpte.splits) done @@ -2222,7 +2194,7 @@ lemma decodeX86ModeMapPage_ccorres: (Inr (invocation.InvokePage (PageMap (ArchObjectCap cap) slot x pml4))) odE) (Call decodeX86ModeMapPage_'proc)" - supply if_cong[cong] tl_drop_1[simp] Collect_const[simp del] dc_simp[simp del] + supply if_cong[cong] tl_drop_1[simp] Collect_const[simp del] apply (simp add: K_def) apply (rule ccorres_gen_asm) apply (cinit' lift: label___unsigned_long_' page_size_' vroot_' cap_' paddr_' vm_rights_' vm_attr_' @@ -2251,9 +2223,9 @@ lemma decodeX86ModeMapPage_ccorres: apply (wp injection_wp[OF refl] createMappingEntries_wf) apply (simp add: all_ex_eq_helper) apply (vcg exspec=createSafeMappingEntries_PDPTE_modifies) - by (clarsimp simp: invs_valid_objs' tcb_at_invs' vmsz_aligned_addrFromPPtr' invs_queues - valid_tcb_state'_def invs_sch_act_wf' ThreadState_Restart_def rf_sr_ksCurThread - arch_invocation_label_defs mask_def isCap_simps dc_def) + by (fastforce simp: invs_valid_objs' tcb_at_invs' vmsz_aligned_addrFromPPtr' + valid_tcb_state'_def invs_sch_act_wf' ThreadState_defs rf_sr_ksCurThread + arch_invocation_label_defs mask_def isCap_simps) lemma valid_cap'_PageCap_kernel_mappings: "\pspace_in_kernel_mappings' s; isPageCap cap; valid_cap' (ArchObjectCap cap) s\ @@ -2563,7 +2535,7 @@ lemma decodeX64FrameInvocation_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (clarsimp simp: maptype_from_H_def throwError_bind invocationCatch_def split: vmmap_type.split_asm) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) (* throw on mismatched vaddr *) apply simp @@ -2575,7 +2547,6 @@ lemma decodeX64FrameInvocation_ccorres: split: vmmap_type.split_asm) apply (clarsimp simp: X86_MappingNone_def X86_MappingVSpace_def) apply ccorres_rewrite - apply (fold dc_def id_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* frame cap not mapped, check mapping *) @@ -2594,7 +2565,7 @@ lemma decodeX64FrameInvocation_ccorres: apply csymbr apply (simp add: user_vtop_def X64.pptrUserTop_def hd_conv_nth length_ineq_not_Nil) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[unfolded id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* Doesn't throw case *) apply (drule_tac s="Some y" in sym, @@ -2630,7 +2601,6 @@ lemma decodeX64FrameInvocation_ccorres: apply (simp add: word_less_nat_alt user_vtop_def X64.pptrUserTop_def hd_conv_nth length_ineq_not_Nil) apply (ccorres_rewrite) - apply (fold dc_def) apply (rule ccorres_return_Skip) apply clarsimp apply (clarsimp simp: asidInvalid_def) @@ -2653,10 +2623,10 @@ lemma decodeX64FrameInvocation_ccorres: apply (simp add: Collect_False if_to_top_of_bindE) apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg - apply (clarsimp simp: cap_lift_pml4_cap cap_to_H_def get_capPtr_CL_def to_bool_def + apply (clarsimp simp: cap_lift_pml4_cap cap_to_H_def get_capPtr_CL_def cap_pml4_cap_lift_def elim!: ccap_relationE split: if_split) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply (rule ccorres_symb_exec_r) @@ -2670,7 +2640,7 @@ lemma decodeX64FrameInvocation_ccorres: apply (clarsimp simp: framesize_from_to_H user_vtop_def X64.pptrUserTop_def) apply (simp add: injection_handler_throwError throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply csymbr @@ -2707,7 +2677,7 @@ lemma decodeX64FrameInvocation_ccorres: (Some (y, a)))) cap}" and A' = "{}" in conseqPost) apply (vcg exspec=createSafeMappingEntries_PTE_modifies) - apply (clarsimp simp: ThreadState_Restart_def mask_def rf_sr_ksCurThread + apply (clarsimp simp: ThreadState_defs mask_def rf_sr_ksCurThread isCap_simps cap_pml4_cap_lift get_capPtr_CL_def ccap_relation_PML4Cap_BasePtr) apply clarsimp @@ -2742,7 +2712,7 @@ lemma decodeX64FrameInvocation_ccorres: (Some (y, a)))) cap}" and A' = "{}" in conseqPost) apply (vcg exspec=createSafeMappingEntries_PDE_modifies) - apply (clarsimp simp: ThreadState_Restart_def mask_def rf_sr_ksCurThread + apply (clarsimp simp: ThreadState_defs mask_def rf_sr_ksCurThread isCap_simps cap_pml4_cap_lift get_capPtr_CL_def ccap_relation_PML4Cap_BasePtr) apply clarsimp @@ -2762,8 +2732,7 @@ lemma decodeX64FrameInvocation_ccorres: apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: fst_throwError_returnOk exception_defs - syscall_error_rel_def syscall_error_to_H_cases - false_def) + syscall_error_rel_def syscall_error_to_H_cases) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply (simp add: isCap_simps) @@ -2843,7 +2812,7 @@ lemma decodeX64FrameInvocation_ccorres: (* C side *) - apply (clarsimp simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" mask_eq_iff_w2p + apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size word_less_nat_alt from_bool_0 excaps_map_def cte_wp_at_ctes_of n_msgRegisters_def) apply (frule(1) ctes_of_valid') @@ -2865,7 +2834,7 @@ lemma decodeX64FrameInvocation_ccorres: apply (rename_tac pml4_cap b ys pml4_slot) apply (frule ccap_relation_PageCap_MapType) apply (erule_tac c="ArchObjectCap (PML4Cap a b)" for a b in ccap_relationE) - apply (clarsimp simp: cap_lift_pml4_cap to_bool_def cap_pml4_cap_lift_def framesize_from_to_H + apply (clarsimp simp: cap_lift_pml4_cap cap_pml4_cap_lift_def framesize_from_to_H cap_to_H_def[split_simps cap_CL.split] valid_cap'_def user_vtop_def X64.pptrUserTop_def) apply (prop_tac "(cap_C.words_C (cte_C.cap_C pml4_slot).[0] >> 58) && 1 \ 0") @@ -3145,10 +3114,10 @@ lemma decodeX64PageDirectoryInvocation_ccorres: isPML4Cap (capCap (fst (extraCaps ! 0)))" in ccorres_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def from_bool_0 cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def from_bool_0 cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: hd_conv_nth) apply csymbr @@ -3198,7 +3167,7 @@ lemma decodeX64PageDirectoryInvocation_ccorres: apply (clarsimp simp: typ_heap_simps from_bool_eq_if) apply (auto simp: cpdpte_relation_def Let_def pdpte_pdpte_pd_lift_def pdpte_pdpte_pd_lift pdpte_tag_defs pdpte_pdpte_1g_lift_def - pdpte_lift_def from_bool_def case_bool_If + pdpte_lift_def case_bool_If split: pdpte.split_asm if_splits)[1] apply ceqv apply clarsimp @@ -3237,7 +3206,8 @@ lemma decodeX64PageDirectoryInvocation_ccorres: apply (rule_tac P'="{s. errstate s = lookup_pd_ret}" in ccorres_from_vcg_split_throws[where P=\]) apply vcg apply (rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases exception_defs false_def) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (clarsimp simp: exception_defs) apply clarsimp @@ -3254,7 +3224,7 @@ lemma decodeX64PageDirectoryInvocation_ccorres: apply simp apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def - syscall_error_to_H_cases exception_defs false_def) + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (simp add: exception_defs) apply simp @@ -3266,7 +3236,8 @@ lemma decodeX64PageDirectoryInvocation_ccorres: sch_act_wf (ksSchedulerAction b) b \ cte_wp_at' (\_. True) slot b" in hoare_strengthen_post) apply wp - apply (clarsimp simp: isCap_simps invs_valid_objs' valid_cap'_def valid_tcb_state'_def invs_arch_state' invs_no_0_obj') + apply (fastforce simp: isCap_simps invs_valid_objs' valid_cap'_def valid_tcb_state'_def + invs_arch_state' invs_no_0_obj') apply vcg apply wp apply simp @@ -3295,7 +3266,7 @@ lemma decodeX64PageDirectoryInvocation_ccorres: slotcap_in_mem_def) apply (auto dest: ctes_of_valid')[1] apply (rule conjI) - apply (clarsimp simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" + apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size ct_in_state'_def st_tcb_at'_def word_sle_def word_sless_def @@ -3309,7 +3280,6 @@ lemma decodeX64PageDirectoryInvocation_ccorres: apply (clarsimp simp: cap_lift_pdpt_cap hd_conv_nth cap_lift_page_directory_cap bit_simps cap_pdpt_cap_lift_def - to_bool_def typ_heap_simps' shiftl_t2n[where n=3] field_simps elim!: ccap_relationE) apply (clarsimp simp: neq_Nil_conv[where xs=extraCaps] @@ -3329,7 +3299,7 @@ lemma decodeX64PageDirectoryInvocation_ccorres: intro!: is_aligned_addrFromPPtr[simplified bit_simps, simplified] simp: vmsz_aligned_def cap_to_H_simps cap_page_directory_cap_lift_def bit_simps capAligned_def) apply clarsimp - apply (rule conjI, clarsimp simp: ThreadState_Restart_def mask_def) + apply (rule conjI, clarsimp simp: ThreadState_defs mask_def) (* ccap_relation *) apply (rule conjI) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 cap_page_directory_cap_lift[THEN iffD1] @@ -3359,7 +3329,7 @@ lemma decodeX64PageDirectoryInvocation_ccorres: context_conjI creates a mess, separate lemmas would be a bit unwieldy *) apply safe - apply (clarsimp simp: ThreadState_Restart_def mask_def) + apply (clarsimp simp: ThreadState_defs mask_def) (* ccap_relation *) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 cap_page_directory_cap_lift[THEN iffD1] cap_to_H_simps asid_wf_def3[simplified asid_bits_def, simplified]) @@ -3514,7 +3484,7 @@ lemma decodeX64PDPTInvocation_ccorres: >>= invocationCatch thread isBlocking isCall InvokeArchObject) (Call decodeX64PDPTInvocation_'proc)" (is "_ \ _ \ ccorres _ _ ?pre ?cpre _ _ _") - supply Collect_const[simp del] if_cong[cong] dc_simp[simp del] + supply Collect_const[simp del] if_cong[cong] from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] ccorres_IF_True[simp] apply (clarsimp simp only: isCap_simps) apply (cinit' lift: label___unsigned_long_' length___unsigned_long_' cte_' current_extra_caps_' cap_' buffer_' @@ -3619,15 +3589,14 @@ lemma decodeX64PDPTInvocation_ccorres: apply clarsimp apply (rule ccorres_Cond_rhs_Seq) apply ccorres_rewrite - apply clarsimp apply (rule_tac P="isArchObjectCap (fst (extraCaps ! 0)) \ isPML4Cap (capCap (fst (extraCaps ! 0)))" in ccorres_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: hd_conv_nth) apply csymbr @@ -3704,7 +3673,8 @@ lemma decodeX64PDPTInvocation_ccorres: apply (rule_tac P'="{s. errstate s = find_ret}" in ccorres_from_vcg_split_throws[where P=\]) apply vcg apply (rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases exception_defs false_def) + apply (clarsimp simp: throwError_def return_def syscall_error_rel_def + syscall_error_to_H_cases exception_defs) apply (erule lookup_failure_rel_fault_lift[rotated]) apply (fastforce simp: exception_defs) apply clarsimp @@ -3739,7 +3709,7 @@ lemma decodeX64PDPTInvocation_ccorres: elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (auto simp: neq_Nil_conv excaps_in_mem_def slotcap_in_mem_def)[1] apply (rule conjI) - apply (fastforce simp: rf_sr_ksCurThread "StrictC'_thread_state_defs" + apply (fastforce simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size ct_in_state'_def st_tcb_at'_def word_sle_def word_sless_def @@ -3756,8 +3726,7 @@ lemma decodeX64PDPTInvocation_ccorres: simp: neq_Nil_conv valid_cap_simps' isCap_simps get_capMappedASID_CL_def cap_pml4_cap_lift cap_to_H_simps split: if_split_asm) - apply (clarsimp simp: dc_simp neq_Nil_conv[where xs=extraCaps] - excaps_in_mem_def slotcap_in_mem_def + apply (clarsimp simp: neq_Nil_conv[where xs=extraCaps] excaps_in_mem_def slotcap_in_mem_def dest!: sym[where s="ArchObjectCap cp" for cp]) apply (clarsimp simp: word_less_nat_alt hd_conv_nth dest!: length_ineq_not_Nil) apply (rule conjI, fastforce simp: mask_def) @@ -3773,7 +3742,7 @@ lemma decodeX64PDPTInvocation_ccorres: apply (clarsimp simp: get_capMappedASID_CL_def) apply (subst cap_lift_PML4Cap_Base[symmetric]; (assumption | rule sym, assumption)) apply (clarsimp simp: rf_sr_ksCurThread) - apply (rule conjI, fastforce simp: ThreadState_Restart_def mask_def) + apply (rule conjI, fastforce simp: ThreadState_defs mask_def) (* ccap_relation *) apply (rule conjI) apply (erule ccap_relationE[where c="ArchObjectCap (PDPointerTableCap _ _)"]) @@ -3892,7 +3861,7 @@ lemma decodeX64MMUInvocation_ccorres: throwError_bind invocationCatch_def split: invocation_label.split arch_invocation_label.split) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) (* X64ASIDControlMakePool *) apply (clarsimp simp: decodeX64MMUInvocation_def decodeX64ASIDControlInvocation_def isCap_simps) @@ -3903,7 +3872,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule ccorres_cond_true_seq | simp)+ apply (simp add: throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply (simp add: interpret_excaps_test_null excaps_map_def) apply csymbr @@ -3912,14 +3881,14 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule ccorres_cond_true_seq | simp)+ apply (simp add: throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply csymbr apply (simp add: interpret_excaps_test_null[OF Suc_leI]) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: length_ineq_not_Nil throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (subgoal_tac "1 < length extraCaps") prefer 2 @@ -3986,14 +3955,13 @@ lemma decodeX64MMUInvocation_ccorres: apply (cut_tac P="\y. y < i_' x + 1 = rhs y" for rhs in allI, rule less_x_plus_1) apply (fastforce simp: asid_high_bits_def) - apply (clarsimp simp: rf_sr_x86KSASIDTable from_bool_def + apply (clarsimp simp: rf_sr_x86KSASIDTable asid_high_bits_word_bits option_to_ptr_def option_to_0_def order_less_imp_le linorder_not_less order_antisym[OF inc_le]) - apply (clarsimp simp: true_def false_def - split: option.split if_split) + apply (clarsimp split: option.split if_split) apply (auto simp: asid_high_bits_def word_le_nat_alt word_less_nat_alt unat_add_lem[THEN iffD1] Kernel_C_defs)[1] @@ -4012,8 +3980,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (clarsimp simp: asidHighBits_handy_convs word_sle_def word_sless_def from_bool_0 rf_sr_x86KSASIDTable[where n=0, simplified]) - apply (simp add: asid_high_bits_def option_to_ptr_def option_to_0_def - from_bool_def Kernel_C_defs + apply (simp add: asid_high_bits_def option_to_ptr_def option_to_0_def Kernel_C_defs split: option.split if_split) apply fastforce apply ceqv @@ -4026,7 +3993,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (clarsimp split: list.split) apply (fastforce dest!: filter_eq_ConsD) apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply (rule ccorres_Guard_Seq)+ apply (simp add: invocationCatch_use_injection_handler @@ -4051,7 +4018,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (clarsimp simp: to_bool_if cond_throw_whenE bindE_assoc) apply (rule ccorres_split_when_throwError_cond[where Q = \ and Q' = \]) apply fastforce - apply (rule syscall_error_throwError_ccorres_n[simplified id_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_rel_def shiftL_nat syscall_error_to_H_cases) prefer 2 apply vcg @@ -4094,7 +4061,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule_tac Q'=UNIV and A'="{}" in conseqPost) apply (vcg exspec=ensureEmptySlot_modifies) apply (frule length_ineq_not_Nil) - apply (clarsimp simp: null_def ThreadState_Restart_def mask_def hd_conv_nth + apply (clarsimp simp: null_def ThreadState_defs mask_def hd_conv_nth isCap_simps rf_sr_ksCurThread cap_get_tag_UntypedCap word_le_make_less asid_high_bits_def split: list.split) @@ -4169,7 +4136,7 @@ lemma decodeX64MMUInvocation_ccorres: apply ccorres_rewrite apply (clarsimp simp: isCap_simps decodeX64ASIDPoolInvocation_def throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply (clarsimp simp: isCap_simps decodeX64ASIDPoolInvocation_def split: list.split) apply csymbr @@ -4190,7 +4157,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (frule (1) slotcap_in_mem_PML4) apply (clarsimp simp: typ_heap_simps' from_bool_0 split: if_split) apply (fastforce simp: isCap_simps asidInvalid_def cap_lift_pml4_cap cap_to_H_simps - get_capMappedASID_CL_def true_def c_valid_cap_def cl_valid_cap_def + get_capMappedASID_CL_def c_valid_cap_def cl_valid_cap_def elim!: ccap_relationE split: if_splits) apply ceqv apply (rule ccorres_Cond_rhs_Seq) @@ -4234,8 +4201,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def - syscall_error_rel_def exception_defs - syscall_error_to_H_cases false_def) + syscall_error_rel_def exception_defs syscall_error_to_H_cases) apply (simp add: lookup_fault_lift_invalid_root) apply csymbr apply (simp add: liftME_def bindE_assoc if_to_top_of_bind) @@ -4285,9 +4251,7 @@ lemma decodeX64MMUInvocation_ccorres: = capASIDBase cp") apply (subgoal_tac "\x. (x < (i_' xb + 1)) = (x < i_' xb \ x = i_' xb)") - apply (clarsimp simp: inc_le from_bool_def typ_heap_simps - asid_low_bits_def not_less field_simps - false_def + apply (clarsimp simp: inc_le typ_heap_simps asid_low_bits_def not_less field_simps split: if_split bool.splits) apply unat_arith apply (rule iffI) @@ -4341,11 +4305,10 @@ lemma decodeX64MMUInvocation_ccorres: word_sless_def word_sle_def) apply (erule cmap_relationE1[OF rf_sr_cpspace_asidpool_relation], erule ko_at_projectKO_opt) - apply (clarsimp simp: typ_heap_simps from_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps split: if_split) apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (clarsimp simp: cap_lift_asid_pool_cap cap_to_H_def - cap_asid_pool_cap_lift_def false_def - ucast_minus ucast_nat_def + cap_asid_pool_cap_lift_def ucast_minus ucast_nat_def elim!: ccap_relationE) apply ceqv apply (rule ccorres_Guard_Seq)+ @@ -4398,7 +4361,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule_tac t=b and s="snd (extraCaps ! 0)" in subst, fastforce) apply vcg (* Mode stuff *) - apply (rule ccorres_trim_returnE; simp) + apply (rule ccorres_trim_returnE; simp?) apply (rule ccorres_call, rule decodeX64ModeMMUInvocation_ccorres; simp) @@ -4427,10 +4390,10 @@ lemma decodeX64MMUInvocation_ccorres: apply (clarsimp simp: invs_valid_objs') apply (rule conjI, fastforce) apply (clarsimp simp: ctes_of_valid' invs_valid_objs' isCap_simps) - apply (clarsimp simp: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of - invs_sch_act_wf' dest!: isCapDs(1)) + apply (clarsimp simp: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of invs_pspace_distinct' + invs_sch_act_wf' invs_pspace_aligned' + dest!: isCapDs(1)) apply (intro conjI) - apply (simp add: Invariants_H.invs_queues) apply (simp add: valid_tcb_state'_def) apply (fastforce elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread') apply (clarsimp simp: st_tcb_at'_def obj_at'_def) @@ -4468,11 +4431,10 @@ lemma decodeX64MMUInvocation_ccorres: elim!: pred_tcb'_weakenE)[1] apply (clarsimp simp: cte_wp_at_ctes_of asidHighBits_handy_convs word_sle_def word_sless_def asidLowBits_handy_convs - rf_sr_ksCurThread "StrictC'_thread_state_defs" - mask_def[where n=4] + rf_sr_ksCurThread ThreadState_defs mask_def[where n=4] cong: if_cong) - apply (clarsimp simp: to_bool_def ccap_relation_isDeviceCap2 objBits_simps - archObjSize_def pageBits_def from_bool_def case_bool_If) + apply (clarsimp simp: ccap_relation_isDeviceCap2 objBits_simps + archObjSize_def pageBits_def case_bool_If) apply (rule conjI) (* Is Asid Control Cap *) apply (clarsimp simp: neq_Nil_conv excaps_in_mem_def excaps_map_def) @@ -4486,7 +4448,7 @@ lemma decodeX64MMUInvocation_ccorres: hd_conv_nth length_ineq_not_Nil Kernel_C_defs elim!: ccap_relationE) apply (clarsimp simp: to_bool_def unat_eq_of_nat - objBits_simps archObjSize_def pageBits_def from_bool_def case_bool_If + objBits_simps archObjSize_def pageBits_def case_bool_If split: if_splits) apply (clarsimp simp: asid_low_bits_word_bits isCap_simps neq_Nil_conv excaps_map_def excaps_in_mem_def @@ -4497,15 +4459,14 @@ lemma decodeX64MMUInvocation_ccorres: apply (frule interpret_excaps_eq[rule_format, where n=0], simp) apply (rule conjI) apply (clarsimp simp: cap_lift_asid_pool_cap cap_lift_page_directory_cap - cap_to_H_def to_bool_def valid_cap'_def + cap_to_H_def valid_cap'_def cap_page_directory_cap_lift_def cap_asid_pool_cap_lift_def mask_def asid_shiftr_low_bits_less[unfolded mask_def asid_bits_def] word_and_le1 elim!: ccap_relationE split: if_split_asm) apply (clarsimp split: list.split) apply (clarsimp simp: cap_lift_asid_pool_cap cap_lift_page_directory_cap - cap_to_H_def to_bool_def - cap_page_directory_cap_lift_def + cap_to_H_def cap_page_directory_cap_lift_def elim!: ccap_relationE split: if_split_asm) apply (erule rf_sr_cte_at_validD[rotated]) apply (fastforce simp: slotcap_in_mem_def2) @@ -4585,10 +4546,10 @@ lemma setMessageInfo_ksCurThread_ccorres: done lemma invokeX86PortIn8_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (valid_objs' and valid_queues and ct_in_state' ((=) Restart) and + (valid_objs' and ct_in_state' ((=) Restart) and pspace_aligned' and pspace_distinct' and (\s. ksCurThread s = thread \ sch_act_wf (ksSchedulerAction s) s)) (UNIV \ \\invLabel = scast Kernel_C.X86IOPortIn8\ \ \\port = port\ @@ -4619,8 +4580,8 @@ lemma invokeX86PortIn8_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) @@ -4642,8 +4603,8 @@ lemma invokeX86PortIn8_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -4655,10 +4616,10 @@ lemma invokeX86PortIn8_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply (wpsimp wp: hoare_vcg_imp_lift hoare_vcg_all_lift) apply (vcg exspec=in8_modifies) @@ -4666,17 +4627,17 @@ lemma invokeX86PortIn8_ccorres: apply (rule conseqPre, vcg) apply clarsimp by (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def projectKOs - ThreadState_Running_def mask_def rf_sr_ksCurThread - X64_H.badgeRegister_def X64.badgeRegister_def "StrictC'_register_defs" - X64.capRegister_def msgRegisters_unfold message_info_to_H_def - msgRegisters_ccorres[where n=0, simplified n_msgRegisters_def, - simplified, symmetric]) + ThreadState_defs mask_def rf_sr_ksCurThread + X64_H.badgeRegister_def X64.badgeRegister_def "StrictC'_register_defs" + X64.capRegister_def msgRegisters_unfold message_info_to_H_def + msgRegisters_ccorres[where n=0, simplified n_msgRegisters_def, + simplified, symmetric]) lemma invokeX86PortIn16_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (valid_objs' and valid_queues and ct_in_state' ((=) Restart) and + (valid_objs' and ct_in_state' ((=) Restart) and pspace_aligned' and pspace_distinct' and (\s. ksCurThread s = thread \ sch_act_wf (ksSchedulerAction s) s)) (UNIV \ \\invLabel = scast Kernel_C.X86IOPortIn16\ \ \\port = port\ @@ -4707,8 +4668,8 @@ lemma invokeX86PortIn16_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) @@ -4730,8 +4691,8 @@ lemma invokeX86PortIn16_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -4743,10 +4704,10 @@ lemma invokeX86PortIn16_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply (wpsimp wp: hoare_vcg_imp_lift hoare_vcg_all_lift) apply (vcg exspec=in16_modifies) @@ -4754,17 +4715,17 @@ lemma invokeX86PortIn16_ccorres: apply (rule conseqPre, vcg) apply clarsimp by (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def projectKOs - ThreadState_Running_def mask_def rf_sr_ksCurThread - X64_H.badgeRegister_def X64.badgeRegister_def "StrictC'_register_defs" - X64.capRegister_def msgRegisters_unfold message_info_to_H_def - msgRegisters_ccorres[where n=0, simplified n_msgRegisters_def, - simplified, symmetric]) + ThreadState_defs mask_def rf_sr_ksCurThread + X64_H.badgeRegister_def X64.badgeRegister_def "StrictC'_register_defs" + X64.capRegister_def msgRegisters_unfold message_info_to_H_def + msgRegisters_ccorres[where n=0, simplified n_msgRegisters_def, + simplified, symmetric]) lemma invokeX86PortIn32_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (valid_objs' and valid_queues and ct_in_state' ((=) Restart) and + (valid_objs' and ct_in_state' ((=) Restart) and pspace_aligned' and pspace_distinct' and (\s. ksCurThread s = thread \ sch_act_wf (ksSchedulerAction s) s)) (UNIV \ \\invLabel = scast Kernel_C.X86IOPortIn32\ \ \\port = port\ @@ -4793,8 +4754,8 @@ lemma invokeX86PortIn32_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) @@ -4816,8 +4777,8 @@ lemma invokeX86PortIn32_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp apply (vcg exspec=setRegister_modifies) @@ -4829,10 +4790,10 @@ lemma invokeX86PortIn32_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply clarsimp apply (vcg exspec=lookupIPCBuffer_modifies) apply (wpsimp wp: hoare_vcg_imp_lift hoare_vcg_all_lift) apply (vcg exspec=in32_modifies) @@ -4840,14 +4801,14 @@ lemma invokeX86PortIn32_ccorres: apply (rule conseqPre, vcg) apply clarsimp by (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def projectKOs - ThreadState_Running_def mask_def rf_sr_ksCurThread - X64_H.badgeRegister_def X64.badgeRegister_def "StrictC'_register_defs" - X64.capRegister_def msgRegisters_unfold message_info_to_H_def - msgRegisters_ccorres[where n=0, simplified n_msgRegisters_def, - simplified, symmetric]) + ThreadState_defs mask_def rf_sr_ksCurThread + X64_H.badgeRegister_def X64.badgeRegister_def "StrictC'_register_defs" + X64.capRegister_def msgRegisters_unfold message_info_to_H_def + msgRegisters_ccorres[where n=0, simplified n_msgRegisters_def, + simplified, symmetric]) lemma invokeX86PortOut8_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') invs' @@ -4866,7 +4827,7 @@ lemma invokeX86PortOut8_ccorres: done lemma invokeX86PortOut16_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') invs' @@ -4885,7 +4846,7 @@ lemma invokeX86PortOut16_ccorres: done lemma invokeX86PortOut32_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') invs' @@ -5115,7 +5076,7 @@ lemma isIOPortRangeFree_spec: !! unat (port && mask wordRadix))}"]) apply (simp add: port_array_def) apply (rule conseqPre, vcg) - apply (all \clarsimp simp: hrs_simps false_def from_bool_0 wordRadix_def is_up is_down + apply (all \clarsimp simp: hrs_simps from_bool_0 wordRadix_def is_up is_down unat_ucast_upcast uint_up_ucast sint_ucast_eq_uint up_ucast_inj_eq not_max_word_simps[THEN ucast_increment] ucast_cmp_ucast ucast_cmp_ucast[where 'a=16 and y="0x40", simplified]\) @@ -5351,7 +5312,7 @@ proof - apply (rule ccorres_equals_throwError) apply (fastforce simp: whenE_def throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) apply (clarsimp simp: ucast_drop_big_mask) apply (clarsimp simp: invocationCatch_use_injection_handler injection_bindE[OF refl refl] @@ -5363,7 +5324,7 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (clarsimp simp: from_bool_0 injection_handler_throwError) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) apply (clarsimp simp: from_bool_neq_0 injection_handler_returnOk) apply (ctac add: ccorres_injection_handler_csum1 @@ -5376,8 +5337,7 @@ proof - apply ccorres_rewrite apply (rule_tac P="\s. thread = ksCurThread s" in ccorres_cross_over_guard) apply (ctac add: setThreadState_ccorres) - apply (ctac(no_vcg) add: invokeX86PortControl_ccorres - [simplified dc_def o_def id_def]) + apply (ctac(no_vcg) add: invokeX86PortControl_ccorres) apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE, simp+)[1] @@ -5406,8 +5366,8 @@ proof - and sch_act_simple and cte_wp_at' \ slot and (\s. thread = ksCurThread s)" in hoare_strengthen_post) apply (wpsimp wp: getSlotCap_wp) - apply (clarsimp simp: unat_less_2p_word_bits invs_queues invs_valid_objs' - valid_tcb_state'_def + apply (clarsimp simp: unat_less_2p_word_bits invs_valid_objs' + valid_tcb_state'_def invs_pspace_aligned' invs_pspace_distinct' invs_sch_act_wf' st_tcb_strg'[rule_format] st_tcb_at'_def obj_at'_def projectKOs word_le_not_less split: thread_state.splits) @@ -5430,10 +5390,10 @@ proof - apply (clarsimp simp: ct_in_state'_def) apply (rule_tac P="UNIV" in conseqPre) apply (simp add: all_ex_eq_helper, vcg exspec=getSyscallArg_modifies) - apply (clarsimp simp: interpret_excaps_eq rf_sr_ksCurThread ThreadState_Restart_def mask_def) + apply (clarsimp simp: interpret_excaps_eq rf_sr_ksCurThread ThreadState_defs mask_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) apply clarsimp - apply (rule conjI, clarsimp simp: sysargs_rel_to_n o_def dest!: unat_length_4_helper) + apply (rule conjI, clarsimp simp: sysargs_rel_to_n dest!: unat_length_4_helper) apply (clarsimp simp: o_def) done qed @@ -5513,7 +5473,7 @@ proof - apply (rule ccorres_return_CE, simp+)[1] apply (rule ccorres_return_C_errorE, simp+)[1] apply wp - apply (wpsimp wp: ct_in_state'_set sts_running_valid_queues) + apply (wpsimp wp: ct_in_state'_set sts_valid_objs') apply (simp add: Collect_const_mem intr_and_se_rel_def cintr_def exception_defs) apply (vcg exspec=setThreadState_modifies) apply clarsimp @@ -5557,7 +5517,7 @@ proof - apply (rule ccorres_return_CE, simp+)[1] apply (rule ccorres_return_C_errorE, simp+)[1] apply wp - apply (wpsimp wp: ct_in_state'_set sts_running_valid_queues) + apply (wpsimp wp: ct_in_state'_set sts_valid_objs') apply (simp add: Collect_const_mem intr_and_se_rel_def cintr_def exception_defs) apply (vcg exspec=setThreadState_modifies) apply clarsimp @@ -5600,7 +5560,7 @@ proof - apply (rule ccorres_return_CE, simp+)[1] apply (rule ccorres_return_C_errorE, simp+)[1] apply wp - apply (wpsimp wp: ct_in_state'_set sts_running_valid_queues) + apply (wpsimp wp: ct_in_state'_set sts_valid_objs') apply (simp add: Collect_const_mem intr_and_se_rel_def cintr_def exception_defs) apply (vcg exspec=setThreadState_modifies) apply clarsimp @@ -5768,11 +5728,10 @@ proof - apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) apply (clarsimp simp: arch_invocation_label_defs sysargs_rel_to_n valid_tcb_state'_def tcb_at_invs' - invs_queues invs_sch_act_wf' ct_active_st_tcb_at_minor' rf_sr_ksCurThread - ThreadState_Restart_def mask_def + invs_sch_act_wf' ct_active_st_tcb_at_minor' rf_sr_ksCurThread ucast_mask_drop[where n=16, simplified mask_def, simplified]) apply (safe, simp_all add: unat_eq_0 unat_eq_1) - apply (clarsimp dest!: unat_length_2_helper simp: ThreadState_Restart_def mask_def syscall_error_rel_def + apply (clarsimp dest!: unat_length_2_helper simp: ThreadState_defs mask_def syscall_error_rel_def | (thin_tac "P" for P)+, word_bitwise)+ done qed diff --git a/proof/crefine/X64/CLevityCatch.thy b/proof/crefine/X64/CLevityCatch.thy index 8cc7860ec9..e73e479189 100644 --- a/proof/crefine/X64/CLevityCatch.thy +++ b/proof/crefine/X64/CLevityCatch.thy @@ -8,8 +8,9 @@ theory CLevityCatch imports "CBaseRefine.Include_C" ArchMove_C - "CLib.LemmaBucket_C" + "CParser.LemmaBucket_C" "Lib.LemmaBucket" + Boolean_C begin context begin interpretation Arch . (*FIXME: arch_split*) @@ -55,12 +56,12 @@ lemma empty_fail_getExtraCPtrs [intro!, simp]: "empty_fail (getExtraCPtrs sendBuffer info)" apply (simp add: getExtraCPtrs_def) apply (cases info, simp) - apply (cases sendBuffer, simp_all) + apply (cases sendBuffer; fastforce) done lemma empty_fail_loadCapTransfer [intro!, simp]: "empty_fail (loadCapTransfer a)" - by (simp add: loadCapTransfer_def capTransferFromWords_def) + by (fastforce simp: loadCapTransfer_def capTransferFromWords_def) lemma empty_fail_emptyOnFailure [intro!, simp]: "empty_fail m \ empty_fail (emptyOnFailure m)" diff --git a/proof/crefine/X64/CSpaceAcc_C.thy b/proof/crefine/X64/CSpaceAcc_C.thy index 79d4e7a937..7c199a74aa 100644 --- a/proof/crefine/X64/CSpaceAcc_C.thy +++ b/proof/crefine/X64/CSpaceAcc_C.thy @@ -274,7 +274,7 @@ lemma array_assertion_abs_cnode_ctes: apply (metis array_assertion_shrink_right) done -lemmas ccorres_move_array_assertion_cnode_ctes [corres_pre] +lemmas ccorres_move_array_assertion_cnode_ctes [ccorres_pre] = ccorres_move_Guard_Seq [OF array_assertion_abs_cnode_ctes] ccorres_move_Guard [OF array_assertion_abs_cnode_ctes] diff --git a/proof/crefine/X64/CSpace_All.thy b/proof/crefine/X64/CSpace_All.thy index cad9ac6cdc..45a8087a89 100644 --- a/proof/crefine/X64/CSpace_All.thy +++ b/proof/crefine/X64/CSpace_All.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -24,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf @@ -253,8 +254,7 @@ lemma lookupSlotForCNodeOp_ccorres': apply vcg \ \last subgoal\ - apply (clarsimp simp: if_1_0_0 to_bool_def true_def word_size - fromIntegral_def integral_inv) + apply (clarsimp simp: word_size fromIntegral_def integral_inv) apply (case_tac "cap_get_tag root___struct_cap_C = scast cap_cnode_cap") prefer 2 apply clarsimp apply (clarsimp simp: unat_of_nat64 word_sle_def) @@ -290,7 +290,7 @@ lemma lookupSourceSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupSourceSlot_ccorres: @@ -320,7 +320,7 @@ lemma lookupTargetSlot_ccorres': apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres') - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done lemma lookupTargetSlot_ccorres: @@ -350,7 +350,7 @@ lemma lookupPivotSlot_ccorres: apply simp apply simp apply (ctac add: lookupSlotForCNodeOp_ccorres) - apply (clarsimp simp: to_bool_def true_def false_def) + apply clarsimp done end diff --git a/proof/crefine/X64/CSpace_C.thy b/proof/crefine/X64/CSpace_C.thy index 9a6e0b52da..e8b39a5233 100644 --- a/proof/crefine/X64/CSpace_C.thy +++ b/proof/crefine/X64/CSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -127,10 +128,6 @@ lemma Arch_maskCapRights_ccorres [corres]: apply (cases arch_cap) by (fastforce simp add: cap_get_tag_isCap isCap_simps simp del: not_ex simp_thms(44))+ -lemma to_bool_mask_to_bool_bf: - "to_bool (x && 1) = to_bool_bf (x::machine_word)" - by (simp add: to_bool_bf_def to_bool_def) - lemma to_bool_cap_rights_bf: "to_bool (capAllowRead_CL (seL4_CapRights_lift R)) = to_bool_bf (capAllowRead_CL (seL4_CapRights_lift R))" @@ -191,7 +188,7 @@ lemma maskCapRights_ccorres [corres]: apply csymbr apply (simp add: maskCapRights_cap_cases cap_get_tag_isCap del: Collect_const) apply wpc - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -201,7 +198,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -210,7 +207,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -236,7 +233,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ntfn_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -244,7 +241,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -272,7 +269,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_ep_cap_bf to_bool_mask_to_bool_bf to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -281,7 +278,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_from_vcg_throws [where P=\ and P'=UNIV]) @@ -289,7 +286,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply (subst bind_return [symmetric]) apply (rule ccorres_split_throws) apply ctac @@ -302,7 +299,7 @@ lemma maskCapRights_ccorres [corres]: apply wp apply vcg apply vcg - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply ccorres_rewrite @@ -322,7 +319,7 @@ lemma maskCapRights_ccorres [corres]: apply (simp add: ccap_rights_relation_def cap_rights_to_H_def to_bool_reply_cap_bf to_bool_mask_to_bool_bf[simplified] to_bool_cap_rights_bf) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -331,7 +328,7 @@ lemma maskCapRights_ccorres [corres]: apply (rule conseqPre) apply vcg apply (clarsimp simp: return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -341,7 +338,7 @@ lemma maskCapRights_ccorres [corres]: apply vcg apply clarsimp apply (simp add: cap_get_tag_isCap isCap_simps return_def) - apply (simp add: Collect_const_mem from_bool_def) + apply (simp add: Collect_const_mem) apply csymbr apply (simp add: cap_get_tag_isCap isCap_simps del: Collect_const) apply (simp add: ccorres_cond_iffs) @@ -502,8 +499,7 @@ lemma Arch_isCapRevocable_spec: {t. \c c'. ccap_relation c (derivedCap_' s) \ ccap_relation c' (srcCap_' s) \ ret__unsigned_long_' t = from_bool (Arch.isCapRevocable c c')}" apply vcg - by (auto simp: false_def from_bool_def X64_H.isCapRevocable_def - cap_get_tag_isCap_unfolded_H_cap cap_tag_defs isCap_simps + by (auto simp: X64_H.isCapRevocable_def cap_get_tag_isCap_unfolded_H_cap cap_tag_defs isCap_simps cap_get_tag_isCap[unfolded cap_io_port_control_cap_def, simplified] split: capability.splits arch_capability.splits bool.splits) @@ -512,7 +508,7 @@ lemmas isCapRevocable_simps[simp] = Retype_H.isCapRevocable_def[split_simps capa context begin (* revokable_ccorres *) private method revokable'_hammer = solves \( - simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def, + simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs, rule ccorres_guard_imp, rule ccorres_return_C; clarsimp)\ @@ -541,7 +537,7 @@ lemma revokable_ccorres: \ \Uninteresting caps\ apply revokable'_hammer+ \ \NotificationCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_NotificationCap[THEN iffD1]) @@ -550,12 +546,12 @@ lemma revokable_ccorres: apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \IRQHandlerCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (fastforce simp: cap_get_tag_isCap isCap_simps) \ \EndpointCap\ - apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs from_bool_def true_def false_def) + apply (simp add: cap_get_tag_isCap isCap_simps ccorres_cond_iffs) apply (rule ccorres_guard_imp, (rule ccorres_rhs_assoc)+, csymbr, csymbr) apply (rule ccorres_return_C, clarsimp+) apply (frule_tac cap'1=srcCap in cap_get_tag_EndpointCap[THEN iffD1]) @@ -774,7 +770,7 @@ lemma update_freeIndex': show ?thesis apply (cinit lift: cap_ptr_' v64_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -898,7 +894,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -924,14 +920,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1047,19 +1043,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -1196,7 +1190,7 @@ lemma cteMove_ccorres: apply (clarsimp simp: cte_wp_at_ctes_of cteSizeBits_eq ctes_of_canonical ctes_of_aligned_bits) apply assumption apply (clarsimp simp: ccap_relation_NullCap_iff cmdbnode_relation_def - mdb_node_to_H_def nullMDBNode_def false_def) + mdb_node_to_H_def nullMDBNode_def) done (************************************************************************) @@ -1523,8 +1517,8 @@ lemma emptySlot_helper: mdbFirstBadged_CL (cteMDBNode_CL y)") prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) - subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - subgoal by (simp add: to_bool_def mask_def) + subgoal by (simp add: mdb_node_lift_def word_bw_assocs) + subgoal by (simp add: to_bool_def) \ \\ \x\fst \\ apply clarsimp apply (rule fst_setCTE [OF ctes_of_cte_at], assumption ) @@ -1555,7 +1549,7 @@ lemma emptySlot_helper: prefer 2 apply (drule cteMDBNode_CL_lift [symmetric]) subgoal by (simp add: mdb_node_lift_def mask_def word_bw_assocs) - apply (simp add: to_bool_def mask_def split: if_split) + apply (simp add: to_bool_def split: if_split) \ \trivial case where mdbNext rva = 0\ apply (simp add:ccorres_cond_empty_iff) @@ -1718,7 +1712,6 @@ lemma setIRQState_ccorres: apply wp apply (simp add: empty_fail_def getInterruptState_def simpler_gets_def) apply clarsimp - apply (simp add: from_bool_def) apply (cases irqState, simp_all) apply (simp add: Kernel_C.IRQSignal_def Kernel_C.IRQInactive_def) apply (simp add: Kernel_C.IRQTimer_def Kernel_C.IRQInactive_def) @@ -2427,9 +2420,9 @@ lemma freeIOPortRange_ccorres: apply (cinit lift: first_port_' last_port_') apply (rule ccorres_Guard) apply (ctac add: setIOPortMask_ccorres) - by (clarsimp simp: false_def rf_sr_def cstate_relation_def Let_def carch_state_relation_def - global_ioport_bitmap_relation_def typ_heap_simps - split: if_splits) + by (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def + global_ioport_bitmap_relation_def typ_heap_simps + split: if_splits) lemma Arch_postCapDeletion_ccorres: "ccorres dc xfdc @@ -2443,8 +2436,8 @@ lemma Arch_postCapDeletion_ccorres: prefer 3 (* IOPort case *) apply (rule ccorres_rhs_assoc)+ apply csymbr+ - apply (ctac add: freeIOPortRange_ccorres[simplified dc_def]) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (ctac add: freeIOPortRange_ccorres) + apply (rule ccorres_return_Skip)+ apply (clarsimp simp: arch_cleanup_info_wf'_def split: arch_capability.splits) apply (frule cap_get_tag_isCap_unfolded_H_cap) by (clarsimp simp: ccap_relation_def cap_io_port_cap_lift cap_to_H_def) @@ -2469,7 +2462,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -2480,9 +2472,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2532,7 +2524,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2577,7 +2569,7 @@ lemma emptySlot_ccorres: \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2587,7 +2579,7 @@ lemma emptySlot_ccorres: \ \Haskell pre/post for y \ updateCap slot capability.NullCap;\ apply wp \ \C pre/post for y \ updateCap slot capability.NullCap;\ - apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def false_def) + apply (simp add: Collect_const_mem cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) \ \Haskell pre/post for the two nested updates\ apply wp \ \C pre/post for the two nested updates\ @@ -2650,8 +2642,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2660,7 +2652,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ @@ -2743,7 +2735,7 @@ lemma Arch_sameRegionAs_spec: apply (cases capa; cases capb; frule (1) cap_get_tag[where cap'=cap_a]; (frule cap_lifts[where c=cap_a, THEN iffD1])?; frule (1) cap_get_tag[where cap'=cap_b]; (frule cap_lifts[where c=cap_b, THEN iffD1])?) - apply (simp_all add: cap_tag_defs isCap_simps from_bool_def true_def false_def if_0_1_eq) + apply (simp_all add: cap_tag_defs isCap_simps from_bool_def if_0_1_eq) apply (all \clarsimp simp: ccap_relation_def cap_to_H_def c_valid_cap_def cl_valid_cap_def Let_def\) apply (simp add: cap_io_port_cap_lift_def'[simplified cap_tag_defs] mask_def @@ -2987,8 +2979,7 @@ lemma cap_get_capIsPhysical_spec: cap_lift_asid_control_cap word_sle_def cap_lift_irq_control_cap cap_lift_null_cap mask_def objBits_simps cap_lift_domain_cap - ptr_add_assertion_positive from_bool_def - true_def false_def cap_get_tag_scast + ptr_add_assertion_positive cap_get_tag_scast dest!: sym [where t = "ucast (cap_get_tag cap)" for cap] split: vmpage_size.splits)+ (* XXX: slow. there should be a rule for this *) @@ -3072,22 +3063,23 @@ lemma sameRegionAs_spec: apply (simp add: sameRegionAs_def isArchCap_tag_def2 ccap_relation_c_valid_cap) apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps) \ \capa is a ThreadCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(1)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(1)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_thread_cap_lift) apply (simp add: cap_to_H_def) + apply (clarsimp simp: from_bool_0 split: if_split) apply (clarsimp simp: case_bool_If ctcb_ptr_to_tcb_ptr_def if_distrib cong: if_cong) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a NullCap\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an NotificationCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(3)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(3)) apply (simp add: ccap_relation_def map_option_case) @@ -3097,8 +3089,8 @@ lemma sameRegionAs_spec: apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an IRQHandlerCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + apply (case_tac capb, + simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(5)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(5)) apply (simp add: ccap_relation_def map_option_case) @@ -3113,34 +3105,34 @@ lemma sameRegionAs_spec: apply (clarsimp simp: isArchCap_tag_def2) \ \capa is an EndpointCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(4)) apply (frule_tac cap'=cap_b in cap_get_tag_isCap_unfolded_H_cap(4)) apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_endpoint_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) \ \capa is a DomainCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) \ \capa is a Zombie\ - apply (simp add: cap_tag_defs from_bool_def false_def) + apply (simp add: cap_tag_defs) \ \capa is an Arch object cap\ apply (frule_tac cap'=cap_a in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) apply (rule conjI, clarsimp, rule impI)+ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] \ \capb is an Arch object cap\ apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 cap_tag_defs linorder_not_less [THEN sym]) \ \capa is a ReplyCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(8)) @@ -3148,7 +3140,7 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_reply_cap_lift) apply (simp add: cap_to_H_def ctcb_ptr_to_tcb_ptr_def) - apply (clarsimp split: if_split) + apply (clarsimp simp: from_bool_0 split: if_split) \ \capa is an UntypedCap\ apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(9)) apply (intro conjI) @@ -3156,8 +3148,7 @@ lemma sameRegionAs_spec: apply (rule impI, drule(1) cap_get_tag_to_H)+ apply (clarsimp simp: capAligned_def word_bits_conv objBits_simps' get_capZombieBits_CL_def - Let_def word_less_nat_alt - less_mask_eq true_def + Let_def word_less_nat_alt less_mask_eq split: if_split_asm) apply (subgoal_tac "capBlockSize_CL (cap_untyped_cap_lift cap_a) \ 0x3F") apply (simp add: word_le_make_less) @@ -3178,10 +3169,9 @@ lemma sameRegionAs_spec: cap_untyped_cap_lift cap_to_H_def field_simps valid_cap'_def)+)[4] apply (rule impI, simp add: from_bool_0 ccap_relation_get_capIsPhysical[symmetric]) - apply (simp add: from_bool_def false_def) \ \capa is a CNodeCap\ apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def)[1] + isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (clarsimp simp: isArchCap_tag_def2) apply (frule_tac cap'=cap_a in cap_get_tag_isCap_unfolded_H_cap(10)) @@ -3189,10 +3179,9 @@ lemma sameRegionAs_spec: apply (simp add: ccap_relation_def map_option_case) apply (simp add: cap_cnode_cap_lift) apply (simp add: cap_to_H_def) - apply (clarsimp split: if_split bool.split) + apply (clarsimp simp: from_bool_0 split: if_split bool.split) \ \capa is an IRQControlCap\ - apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs from_bool_def false_def true_def)[1] + apply (case_tac capb, simp_all add: cap_get_tag_isCap_unfolded_H_cap isCap_simps cap_tag_defs)[1] apply (frule_tac cap'=cap_b in cap_get_tag_isArchCap_unfolded_H_cap) apply (fastforce simp: isArchCap_tag_def2 split: if_split) done @@ -3228,7 +3217,7 @@ lemma ccap_relation_PageCap_IsDevice: apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_lift_def cap_lift_defs cap_tag_defs Let_def) apply (thin_tac _)+ - by (clarsimp simp: to_bool_def mask_def word_and_1 split: if_splits) + by (clarsimp simp: word_and_1 split: if_splits) lemma ccap_relation_PageCap_Size: "ccap_relation (ArchObjectCap (PageCap p r t s d m)) ccap @@ -3287,14 +3276,14 @@ lemma Arch_sameObjectAs_spec: apply (cases capa) apply (all \frule (1) cap_get_tag[where cap'=cap_a]\) apply (all \(frule cap_lifts[where c=cap_a, THEN iffD1])?\) - apply (all \clarsimp simp: cap_tag_defs isCap_simps from_bool_def true_def false_def if_0_1_eq + apply (all \clarsimp simp: cap_tag_defs isCap_simps split: if_splits\) apply (all \fastforce?\) (* IO ports and frames remain. *) apply (all \cases capb\) apply (all \frule (1) cap_get_tag[where cap'=cap_b]\) apply (all \(frule cap_lifts[where c=cap_b, THEN iffD1])?\) - apply (all \clarsimp simp: cap_tag_defs isCap_simps from_bool_def true_def false_def if_0_1_eq + apply (all \clarsimp simp: cap_tag_defs isCap_simps ccap_relation_PageCap_fields framesize_from_H_eq capAligned_def split: if_splits\) apply (all \(fastforce simp: X64_H.sameRegionAs_def isCap_simps)?\) @@ -3311,8 +3300,7 @@ lemma sameObjectAs_spec: apply vcg apply (clarsimp simp: sameObjectAs_def isArchCap_tag_def2) apply (case_tac capa, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps cap_tag_defs - from_bool_def false_def) + isCap_simps cap_tag_defs) apply fastforce+ \ \capa is an arch cap\ apply (frule cap_get_tag_isArchCap_unfolded_H_cap) @@ -3392,7 +3380,7 @@ lemma isMDBParentOf_spec: apply (simp add: ccte_relation_def map_option_case) apply (simp add: cte_lift_def) apply (clarsimp simp: cte_to_H_def mdb_node_to_H_def split: option.split_asm) - apply (clarsimp simp: Let_def false_def from_bool_def to_bool_def + apply (clarsimp simp: Let_def to_bool_def split: if_split bool.splits) apply ((clarsimp simp: typ_heap_simps dest!: lift_t_g)+)[3] apply (rule_tac x="cteCap ctea" in exI, rule conjI) @@ -3409,11 +3397,11 @@ lemma isMDBParentOf_spec: apply (rule conjI) \ \sameRegionAs = 0\ apply (rule impI) - apply (clarsimp simp: from_bool_def false_def + apply (clarsimp simp: from_bool_def split: if_split bool.splits) \ \sameRegionAs \ 0\ - apply (clarsimp simp: from_bool_def false_def) + apply (clarsimp simp: from_bool_def) apply (clarsimp cong:bool.case_cong if_cong simp: typ_heap_simps) apply (rule conjI) @@ -3421,8 +3409,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_EndpointCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_endpoint_cap") \ \needed also after\ prefer 2 @@ -3437,8 +3424,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (frule cap_get_tag_NotificationCap) apply simp - apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def true_def) \ \badge of A is not 0 now\ - + apply (clarsimp simp: to_bool_def isNotificationCap_def isEndpointCap_def) \ \badge of A is not 0 now\ apply (subgoal_tac "cap_get_tag (cte_C.cap_C cte_b) = scast cap_notification_cap") \ \needed also after\ prefer 2 @@ -3454,7 +3440,7 @@ lemma isMDBParentOf_spec: apply clarsimp apply (simp add: to_bool_def) apply (subgoal_tac "(\ (isEndpointCap (cap_to_H x2b))) \ ( \ (isNotificationCap (cap_to_H x2b)))") - apply (clarsimp simp: true_def) + apply clarsimp apply (clarsimp simp: cap_get_tag_isCap [symmetric]) done @@ -3470,7 +3456,7 @@ lemma updateCapData_spec: apply (simp add: updateCapData_def) apply (case_tac cap, simp_all add: cap_get_tag_isCap_unfolded_H_cap - isCap_simps from_bool_def isArchCap_tag_def2 cap_tag_defs Let_def) + isCap_simps isArchCap_tag_def2 cap_tag_defs Let_def) \ \NotificationCap\ apply clarsimp apply (frule cap_get_tag_isCap_unfolded_H_cap(3)) @@ -3597,7 +3583,6 @@ lemma ensureNoChildren_ccorres: apply (rule conjI) \ \isMDBParentOf is not zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) @@ -3608,7 +3593,6 @@ lemma ensureNoChildren_ccorres: apply (simp add: syscall_error_to_H_cases(9)) \ \isMDBParentOf is zero\ apply clarsimp - apply (simp add: from_bool_def) apply (case_tac "isMDBParentOf (cte_to_H y) (cte_to_H ya)", simp_all)[1] apply (simp add: bind_def) apply (simp add: split_paired_Bex) @@ -3783,7 +3767,7 @@ lemma deriveCap_ccorres': apply csymbr apply (fold case_bool_If) apply wpc - apply (clarsimp simp: cap_get_tag_isCap isCap_simps from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap isCap_simps) apply csymbr apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws [where P=\ and P' = UNIV]) @@ -3792,7 +3776,7 @@ lemma deriveCap_ccorres': apply vcg apply clarsimp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3800,7 +3784,7 @@ lemma deriveCap_ccorres': apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_rhs_assoc)+ @@ -3823,7 +3807,7 @@ lemma deriveCap_ccorres': errstate_def) apply wp apply wpc - apply (clarsimp simp: isCap_simps cap_get_tag_isCap from_bool_def) + apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply csymbr apply (clarsimp simp: isCap_simps cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3837,7 +3821,7 @@ lemma deriveCap_ccorres': apply vcg apply (clarsimp simp: cap_get_tag_isCap liftME_def Let_def isArchCap_T_isArchObjectCap - ccorres_cond_univ_iff from_bool_def) + ccorres_cond_univ_iff) apply (rule ccorres_add_returnOk) apply (rule ccorres_split_nothrow_call_novcgE [where xf'=ret__struct_deriveCap_ret_C_']) @@ -3855,7 +3839,7 @@ lemma deriveCap_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def throwError_def) apply wp - apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap from_bool_def) + apply (simp add: cap_get_tag_isCap isArchCap_T_isArchObjectCap) apply csymbr apply (simp add: cap_get_tag_isCap) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -3866,7 +3850,6 @@ lemma deriveCap_ccorres': cap_get_tag_isArchCap_unfolded_H_cap) done - lemma deriveCap_ccorres: "ccorres (syscall_error_rel \ ccap_relation) deriveCap_xf (invs') (UNIV \ {s. ccap_relation cap (cap_' s)} \ {s. slot_' s = Ptr slot}) [] diff --git a/proof/crefine/X64/CSpace_RAB_C.thy b/proof/crefine/X64/CSpace_RAB_C.thy index ce75b16849..c1cf31bfc5 100644 --- a/proof/crefine/X64/CSpace_RAB_C.thy +++ b/proof/crefine/X64/CSpace_RAB_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -53,7 +54,7 @@ lemma ccorres_remove_bind_returnOk_noguard: apply clarsimp apply (drule not_snd_bindE_I1) apply (erule (4) ccorresE[OF ac]) - apply (clarsimp simp add: bindE_def returnOk_def NonDetMonad.lift_def bind_def return_def + apply (clarsimp simp add: bindE_def returnOk_def Nondet_Monad.lift_def bind_def return_def split_def) apply (rule bexI [rotated], assumption) apply (simp add: throwError_def return_def unif_rrel_def @@ -140,7 +141,8 @@ lemma ccorres_locateSlotCap_push: apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) apply (rule monadic_rewrite_transverse) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_stateAssert) + apply (rule monadic_rewrite_stateAssert[where f="return", simplified]) + apply (rule monadic_rewrite_refl) apply simp apply (rule monadic_rewrite_refl) apply assumption @@ -207,10 +209,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned_longlong = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer @@ -222,7 +222,7 @@ next apply (vcg strip_guards=true) \ \takes a while\ apply clarsimp apply simp - apply (clarsimp simp: cap_get_tag_isCap to_bool_def) + apply (clarsimp simp: cap_get_tag_isCap) \ \Main thm\ proof (induct cap' cptr' guard' rule: resolveAddressBits.induct [case_names ind]) case (ind cap cptr guard) @@ -562,8 +562,8 @@ lemma rightsFromWord_spec: \seL4_CapRights_lift \ret__struct_seL4_CapRights_C = cap_rights_from_word_canon \<^bsup>s\<^esup>w \" apply vcg apply (simp add: seL4_CapRights_lift_def nth_shiftr mask_shift_simps nth_shiftr - cap_rights_from_word_canon_def from_bool_def word_and_1 eval_nat_numeral - word_sless_def word_sle_def) + cap_rights_from_word_canon_def word_and_1 eval_nat_numeral + word_sless_def word_sle_def) done @@ -578,12 +578,6 @@ lemma cap_rights_to_H_from_word_canon [simp]: apply (simp add: cap_rights_to_H_def) done -(* MOVE *) -lemma to_bool_false [simp]: - "to_bool false = False" - unfolding to_bool_def false_def - by simp - lemma tcb_ptr_to_ctcb_ptr_mask [simp]: assumes tcbat: "tcb_at' thread s" shows "ptr_val (tcb_ptr_to_ctcb_ptr thread) && ~~ mask tcbBlockSizeBits = thread" diff --git a/proof/crefine/X64/Ctac_lemmas_C.thy b/proof/crefine/X64/Ctac_lemmas_C.thy index 3fd06ef556..2dd3547fc5 100644 --- a/proof/crefine/X64/Ctac_lemmas_C.thy +++ b/proof/crefine/X64/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -96,7 +96,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -119,7 +119,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/X64/Delete_C.thy b/proof/crefine/X64/Delete_C.thy index 7712ebf5ea..a37d66fe95 100644 --- a/proof/crefine/X64/Delete_C.thy +++ b/proof/crefine/X64/Delete_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -147,7 +148,7 @@ lemma capRemovable_spec: supply if_cong[cong] apply vcg apply (clarsimp simp: cap_get_tag_isCap(1-8)[THEN trans[OF eq_commute]]) - apply (simp add: capRemovable_def from_bool_def[where b=True] true_def) + apply (simp add: capRemovable_def) apply (clarsimp simp: ccap_zombie_radix_less4) apply (subst eq_commute, subst from_bool_eq_if) apply (rule exI, rule conjI, assumption) @@ -228,7 +229,7 @@ lemma cteDelete_ccorres1: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply wp - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R) apply (wp cutMon_validE_drop finaliseSlot_invs) apply fastforce apply (auto simp: cintr_def) @@ -305,7 +306,7 @@ lemma cteDelete_invs'': "\invs' and sch_act_simple and (\s. ex \ ex_cte_cap_to' ptr s)\ cteDelete ptr ex \\rv. invs'\" apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -641,14 +642,14 @@ lemma reduceZombie_ccorres1: apply (clarsimp simp: throwError_def return_def cintr_def) apply vcg apply (wp cutMon_validE_drop) - apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. invs' and cte_at' slot and valid_cap' cap" in hoare_strengthen_postE_R) apply (wp cteDelete_invs'') apply (clarsimp simp: cte_wp_at_ctes_of) apply (fastforce dest: ctes_of_valid') apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (simp add: guard_is_UNIV_def Collect_const_mem) - apply (clarsimp simp: from_bool_def false_def isCap_simps size_of_def cte_level_bits_def) + apply (clarsimp simp: isCap_simps size_of_def cte_level_bits_def) apply (simp only: word_bits_def unat_of_nat unat_arith_simps, simp) apply (simp add: guard_is_UNIV_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -730,8 +731,7 @@ lemma finaliseSlot_ccorres: apply (rule ccorres_drop_cutMon) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def ccap_relation_NullCap_iff) + apply (clarsimp simp: returnOk_def return_def ccap_relation_NullCap_iff) apply (simp add: Collect_True liftE_bindE split_def ccorres_cond_iffs cutMon_walk_bind del: Collect_const cong: call_ignore_cong) @@ -768,8 +768,7 @@ lemma finaliseSlot_ccorres: apply (rule_tac P="\s. cleanup_info_wf' (snd rvb)" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - from_bool_def true_def) + apply (clarsimp simp: returnOk_def return_def) apply (clarsimp simp: cleanup_info_wf'_def arch_cleanup_info_wf'_def split: if_split capability.splits) apply vcg @@ -806,11 +805,11 @@ lemma finaliseSlot_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) apply (drule use_valid [OF _ finaliseCap_cases, OF _ TrueI]) - apply (simp add: from_bool_def false_def irq_opt_relation_def true_def + apply (simp add: irq_opt_relation_def split: if_split_asm) apply vcg apply wp - apply (simp add: guard_is_UNIV_def true_def) + apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) apply (simp only: liftE_bindE cutMon_walk_bind Let_def @@ -835,7 +834,6 @@ lemma finaliseSlot_ccorres: in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (simp add: from_bool_def false_def) apply fastforce apply ceqv apply (simp only: from_bool_0 simp_thms Collect_False @@ -858,7 +856,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -880,7 +878,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -910,11 +908,11 @@ lemma finaliseSlot_ccorres: simp: isCap_simps final_matters'_def o_def) apply clarsimp apply (frule valid_globals_cte_wpD'[rotated], clarsimp) - apply (clarsimp simp: cte_wp_at_ctes_of false_def from_bool_def) + apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -1009,26 +1007,23 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR) apply simp apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) @@ -1036,9 +1031,8 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def) - apply (simp add: guard_is_UNIV_def from_bool_def true_def cintr_def - Collect_const_mem exception_defs) - apply (simp add: guard_is_UNIV_def from_bool_def true_def) + apply (simp add: guard_is_UNIV_def cintr_def Collect_const_mem exception_defs) + apply (simp add: guard_is_UNIV_def) apply (rule getCTE_wp) apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def) apply (drule invs_mdb') diff --git a/proof/crefine/X64/Detype_C.thy b/proof/crefine/X64/Detype_C.thy index 7925e7625c..eae4571c82 100644 --- a/proof/crefine/X64/Detype_C.thy +++ b/proof/crefine/X64/Detype_C.thy @@ -123,16 +123,6 @@ lemma h_t_valid_typ_region_bytes: by (simp add: valid_footprint_typ_region_bytes[OF neq_byte] size_of_def) -lemma proj_d_lift_state_hrs_htd_update [simp]: - "proj_d (lift_state (hrs_htd_update f hp)) = f (hrs_htd hp)" - by (cases hp) (simp add: hrs_htd_update_def proj_d_lift_state hrs_htd_def) - -lemma proj_d_lift_state_hrs_htd [simp]: - "proj_d (lift_state hp), g \\<^sub>t x = hrs_htd hp, g \\<^sub>t x" - apply (cases hp) - apply (simp add: proj_d_lift_state hrs_htd_def) - done - lemma heap_list_s_heap_list': fixes p :: "'a :: c_type ptr" shows "hrs_htd hp,\ \\<^sub>t p \ @@ -1484,14 +1474,6 @@ lemma map_comp_restrict_map: "(f \\<^sub>m (restrict_map m S)) = (restrict_map (f \\<^sub>m m) S)" by (rule ext, simp add: restrict_map_def map_comp_def) -lemma size_td_uinfo_array_tag_n_m[simp]: - "size_td (uinfo_array_tag_n_m (ta :: ('a :: c_type) itself) n m) - = size_of (TYPE('a)) * n" - apply (induct n) - apply (simp add: uinfo_array_tag_n_m_def) - apply (simp add: uinfo_array_tag_n_m_def size_of_def) - done - lemma modify_machinestate_assert_cnodes_swap: "do x \ modify (ksMachineState_update f); y \ stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) S) []; g od @@ -1560,13 +1542,13 @@ lemma deleteObjects_ccorres': doMachineOp_modify modify_modify o_def ksPSpace_ksMSu_comm bind_assoc modify_machinestate_assert_cnodes_swap modify_modify_bind) - apply (rule ccorres_stateAssert_fwd) + apply (rule ccorres_stateAssert_fwd)+ apply (rule ccorres_stateAssert_after) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - @@ -1704,36 +1686,10 @@ proof - done moreover - from invs have "valid_queues s" .. - hence "\p. \t \ set (ksReadyQueues s p). tcb_at' t s \ ko_wp_at' live' t s" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec, drule spec) - apply clarsimp - apply (drule (1) bspec) - apply (rule conjI) - apply (erule obj_at'_weakenE) - apply simp - apply (simp add: obj_at'_real_def) - apply (erule ko_wp_at'_weakenE) - apply (clarsimp simp: live'_def projectKOs inQ_def) - done - hence tat: "\p. \t \ set (ksReadyQueues s p). tcb_at' t s" - and tlive: "\p. \t \ set (ksReadyQueues s p). ko_wp_at' live' t s" - by auto from sr have - "cready_queues_relation (clift ?th_s) - (ksReadyQueues_' (globals s')) (ksReadyQueues s)" - unfolding cready_queues_relation_def rf_sr_def cstate_relation_def - cpspace_relation_def - apply (clarsimp simp: Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp) - apply fastforce - apply ((subst lift_t_typ_region_bytes, rule cm_disj_tcb, assumption+, - simp_all add: objBits_simps archObjSize_def pageBits_def projectKOs)[1])+ - \ \waiting ...\ - apply (simp add: tcb_queue_relation_live_restrict - [OF D.valid_untyped tat tlive rl]) - done + "cready_queues_relation (ksReadyQueues s) (ksReadyQueues_' (globals s'))" + unfolding cready_queues_relation_def rf_sr_def cstate_relation_def cpspace_relation_def + by (clarsimp simp: Let_def all_conj_distrib) moreover from cs have clift: diff --git a/proof/crefine/X64/Finalise_C.thy b/proof/crefine/X64/Finalise_C.thy index f858a4f40d..1cd280bf5e 100644 --- a/proof/crefine/X64/Finalise_C.thy +++ b/proof/crefine/X64/Finalise_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,6 +17,108 @@ declare if_split [split del] definition "option_map2 f m = option_map f \ m" +definition ksReadyQueues_head_end_2 :: "(domain \ priority \ ready_queue) \ bool" where + "ksReadyQueues_head_end_2 qs \ + \d p. tcbQueueHead (qs (d, p)) \ None \ tcbQueueEnd (qs (d, p)) \ None" + +abbreviation "ksReadyQueues_head_end s \ ksReadyQueues_head_end_2 (ksReadyQueues s)" + +lemmas ksReadyQueues_head_end_def = ksReadyQueues_head_end_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end: + "ksReadyQueues_asrt s \ ksReadyQueues_head_end s" + by (fastforce dest: tcbQueueHead_iff_tcbQueueEnd + simp: ready_queue_relation_def ksReadyQueues_asrt_def ksReadyQueues_head_end_def) + +lemma tcbSchedEnqueue_ksReadyQueues_head_end[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: tcbQueueEmpty_def obj_at'_def ksReadyQueues_head_end_def split: if_splits) + done + +lemma ksReadyQueues_head_end_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end (s\ksSchedulerAction := ChooseNewThread\) = ksReadyQueues_head_end s" + by (simp add: ksReadyQueues_head_end_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + +lemma setThreadState_ksReadyQueues_head_end[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end\" + unfolding setThreadState_def + by (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + +definition ksReadyQueues_head_end_tcb_at'_2 :: + "(domain \ priority \ ready_queue) \ (obj_ref \ tcb) \ bool" where + "ksReadyQueues_head_end_tcb_at'_2 qs tcbs \ + \d p. (\head. tcbQueueHead (qs (d, p)) = Some head \ tcbs head \ None) + \ (\end. tcbQueueEnd (qs (d, p)) = Some end \ tcbs end \ None)" + +abbreviation "ksReadyQueues_head_end_tcb_at' s \ + ksReadyQueues_head_end_tcb_at'_2 (ksReadyQueues s) (tcbs_of' s)" + +lemmas ksReadyQueues_head_end_tcb_at'_def = ksReadyQueues_head_end_tcb_at'_2_def + +lemma ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at': + "\ksReadyQueues_asrt s; pspace_aligned' s; pspace_distinct' s\ + \ ksReadyQueues_head_end_tcb_at' s" + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def + ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI allI) + apply (case_tac "ts = []", clarsimp) + apply (fastforce dest!: heap_path_head hd_in_set + simp: opt_pred_def tcbQueueEmpty_def split: option.splits) + apply (fastforce simp: queue_end_valid_def opt_pred_def tcbQueueEmpty_def + split: option.splits) + done + +lemma tcbSchedEnqueue_ksReadyQueues_head_end_tcb_at'[wp]: + "tcbSchedEnqueue tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def + apply (wpsimp wp: threadSet_wp threadGet_wp simp: bitmap_fun_defs) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma ksReadyQueues_head_end_tcb_at'_ksSchedulerAction_update[simp]: + "ksReadyQueues_head_end_tcb_at' (s\ksSchedulerAction := ChooseNewThread\) + = ksReadyQueues_head_end_tcb_at' s" + by (simp add: ksReadyQueues_head_end_tcb_at'_def) + +crunches rescheduleRequired + for ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + +lemma setThreadState_ksReadyQueues_head_end_tcb_at'[wp]: + "setThreadState ts tcbPtr \ksReadyQueues_head_end_tcb_at'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: ksReadyQueues_head_end_tcb_at'_def split: if_splits) + done + +lemma head_end_ksReadyQueues_': + "\ (s, s') \ rf_sr; ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s; + d \ maxDomain; p \ maxPriority \ + \ head_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL + \ end_C (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p)) = NULL" + apply (frule (2) rf_sr_ctcb_queue_relation[where d=d and p=p]) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: option.splits) + apply (rename_tac "end" head end_tcb head_tcb) + apply (prop_tac "tcb_at' head s \ tcb_at' end s") + apply (fastforce intro!: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (fastforce dest: tcb_at_not_NULL) + done + lemma tcbSchedEnqueue_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. \d v. option_map2 tcbPriority_C (cslift s) \tcb = Some v \ unat v \ numPriorities @@ -27,7 +130,9 @@ lemma tcbSchedEnqueue_cslift_spec: \ None \ option_map2 tcbDomain_C (cslift s) (head_C (index \ksReadyQueues (unat (d*0x100 + v)))) - \ None)\ + \ None) + \ (head_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL + \ end_C (index \ksReadyQueues (unat (d * 0x100 + v))) \ NULL)\ Call tcbSchedEnqueue_'proc {s'. option_map2 tcbEPNext_C (cslift s') = option_map2 tcbEPNext_C (cslift s) \ option_map2 tcbEPPrev_C (cslift s') = option_map2 tcbEPPrev_C (cslift s) @@ -44,8 +149,8 @@ lemma tcbSchedEnqueue_cslift_spec: apply (rule conjI) apply (clarsimp simp: typ_heap_simps cong: if_cong) apply (simp split: if_split) - apply (clarsimp simp: typ_heap_simps if_Some_helper cong: if_cong) - by (simp split: if_split) + by (auto simp: typ_heap_simps' if_Some_helper numPriorities_def + cong: if_cong split: if_splits) lemma setThreadState_cslift_spec: "\s. \\\<^bsub>/UNIV\<^esub> \s. s \\<^sub>c \tptr \ (\x. ksSchedulerAction_' (globals s) = tcb_Ptr x @@ -141,8 +246,9 @@ lemma ctcb_relation_tcbPriority_maxPriority_numPriorities: done lemma tcbSchedEnqueue_cslift_precond_discharge: - "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; - valid_queues s; valid_objs' s \ \ + "\ (s, s') \ rf_sr; obj_at' (P :: tcb \ bool) x s; valid_objs' s ; + ksReadyQueues_head_end s; ksReadyQueues_head_end_tcb_at' s; + pspace_aligned' s; pspace_distinct' s\ \ (\d v. option_map2 tcbPriority_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some v \ unat v < numPriorities \ option_map2 tcbDomain_C (cslift s') (tcb_ptr_to_ctcb_ptr x) = Some d @@ -153,31 +259,49 @@ lemma tcbSchedEnqueue_cslift_precond_discharge: \ None \ option_map2 tcbDomain_C (cslift s') (head_C (index (ksReadyQueues_' (globals s')) (unat (d*0x100 + v)))) - \ None))" + \ None) + \ (head_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL + \ end_C (index (ksReadyQueues_' (globals s')) (unat (d * 0x100 + v))) \ NULL))" apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps' option_map2_def) + apply (rename_tac tcb tcb') apply (frule_tac t=x in valid_objs'_maxPriority, fastforce simp: obj_at'_def) apply (frule_tac t=x in valid_objs'_maxDomain, fastforce simp: obj_at'_def) apply (drule_tac P="\tcb. tcbPriority tcb \ maxPriority" in obj_at_ko_at2', simp) apply (drule_tac P="\tcb. tcbDomain tcb \ maxDomain" in obj_at_ko_at2', simp) apply (simp add: ctcb_relation_tcbDomain_maxDomain_numDomains ctcb_relation_tcbPriority_maxPriority_numPriorities) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_ctcb_queue_relation) apply (simp add: maxDom_to_H maxPrio_to_H)+ + apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in head_end_ksReadyQueues_', fastforce+) apply (simp add: cready_queues_index_to_C_def2 numPriorities_def le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ctcb_relation_def) apply (frule arg_cong[where f=unat], subst(asm) unat_ucast_up_simp, simp) - apply (frule tcb_queue'_head_end_NULL) - apply (erule conjunct1[OF valid_queues_valid_q]) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: h_t_valid_clift_Some_iff) + apply (frule (3) head_end_ksReadyQueues_', fastforce+) + apply (clarsimp simp: ksReadyQueues_head_end_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (prop_tac "\ tcbQueueEmpty ((ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)))") + apply (clarsimp simp: tcbQueueEmpty_def ctcb_queue_relation_def option_to_ctcb_ptr_def + split: option.splits) + apply (clarsimp simp: ksReadyQueues_head_end_tcb_at'_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (clarsimp simp: tcbQueueEmpty_def) + apply (rename_tac head "end" head_tcb end_tcb) + apply (prop_tac "tcb_at' head s") + apply (fastforce intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def split: option.splits) + apply (frule_tac thread=head in obj_at_cslift_tcb) + apply fastforce + apply (clarsimp dest: obj_at_cslift_tcb simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) done lemma cancel_all_ccorres_helper: "ccorres dc xfdc - (\s. valid_objs' s \ valid_queues s + (\s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s \ (\t\set ts. tcb_at' t s \ t \ 0) \ sch_act_wf (ksSchedulerAction s) s) {s'. \p. ep_queue_relation (cslift s') ts @@ -200,8 +324,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -210,7 +333,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -233,17 +356,15 @@ next apply (erule cmap_relationE1 [OF cmap_relation_tcb]) apply (erule ko_at_projectKO_opt) apply (fastforce intro: typ_heap_simps) - apply (wp sts_running_valid_queues | simp)+ + apply (wp sts_valid_objs' | simp)+ apply (rule ceqv_refl) apply (rule "Cons.hyps") apply (wp sts_valid_objs' sts_sch_act sch_act_wf_lift hoare_vcg_const_Ball_lift - sts_running_valid_queues sts_st_tcb' setThreadState_oa_queued | simp)+ + sts_st_tcb' | simp)+ apply (vcg exspec=setThreadState_cslift_spec exspec=tcbSchedEnqueue_cslift_spec) - apply (clarsimp simp: tcb_at_not_NULL - Collect_const_mem valid_tcb_state'_def - ThreadState_Restart_def mask_def - valid_objs'_maxDomain valid_objs'_maxPriority) + apply (clarsimp simp: tcb_at_not_NULL Collect_const_mem valid_tcb_state'_def + ThreadState_defs mask_def valid_objs'_maxDomain valid_objs'_maxPriority) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (rule conjI) @@ -253,16 +374,13 @@ next st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - apply clarsimp - apply clarsimp - apply clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; clarsimp?) + apply simp apply clarsimp apply (rule conjI) apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) apply clarsimp - apply clarsimp + apply clarsimp+ apply (subst ep_queue_relation_shift, fastforce) apply (drule_tac x="tcb_ptr_to_ctcb_ptr thread" in fun_cong)+ @@ -271,17 +389,23 @@ next done qed +crunches setEndpoint, setNotification + for ksReadyQueues_head_end[wp]: ksReadyQueues_head_end + and ksReadyQueues_head_end_tcb_at'[wp]: ksReadyQueues_head_end_tcb_at' + (simp: updateObject_default_def) + lemma cancelAllIPC_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. epptr_' s = Ptr epptr}) [] + invs' (UNIV \ {s. epptr_' s = Ptr epptr}) [] (cancelAllIPC epptr) (Call cancelAllIPC_'proc)" apply (cinit lift: epptr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -290,8 +414,8 @@ lemma cancelAllIPC_ccorres: apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -326,29 +450,26 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear - cancelAllIPC_mapM_x_valid_queues | simp)+ apply (rule mapM_x_wp', wp)+ apply (wp sts_st_tcb') apply (clarsimp split: if_split) - apply (rule mapM_x_wp', wp)+ + apply (rule mapM_x_wp', wp sts_valid_objs')+ apply (clarsimp simp: valid_tcb_state'_def) apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -378,23 +499,26 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (clarsimp simp: valid_ep'_def invs_valid_objs' invs_queues) + apply (clarsimp simp: valid_ep'_def invs_valid_objs') apply (rule cmap_relationE1[OF cmap_relation_ep], assumption) apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) apply (clarsimp simp: projectKOs valid_obj'_def valid_ep'_def) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') subgoal by (auto simp: typ_heap_simps cendpoint_relation_def Let_def tcb_queue_relation'_def invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority @@ -404,22 +528,18 @@ lemma cancelAllIPC_ccorres: apply clarsimp done -lemma empty_fail_getNotification: - "empty_fail (getNotification ep)" - unfolding getNotification_def - by (auto intro: empty_fail_getObject) - lemma cancelAllSignals_ccorres: "ccorres dc xfdc - (invs') (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] + invs' (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] (cancelAllSignals ntfnptr) (Call cancelAllSignals_'proc)" apply (cinit lift: ntfnPtr_') + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -428,18 +548,15 @@ lemma cancelAllSignals_ccorres: apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ksReadyQueues_asrt and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -447,8 +564,8 @@ lemma cancelAllSignals_ccorres: apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -467,13 +584,12 @@ lemma cancelAllSignals_ccorres: subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv apply (ctac add: rescheduleRequired_ccorres) - apply (wp cancelAllIPC_mapM_x_valid_queues) - apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear + apply (wp mapM_x_wp' weak_sch_act_wf_lift_linear sts_valid_objs' sts_st_tcb' | clarsimp simp: valid_tcb_state'_def split: if_split)+ apply (simp add: guard_is_UNIV_def) apply (wp set_ntfn_valid_objs' hoare_vcg_const_Ball_lift @@ -484,10 +600,14 @@ lemma cancelAllSignals_ccorres: apply (erule ko_at_projectKO_opt) apply (frule obj_at_valid_objs', clarsimp+) apply (clarsimp simp add: valid_obj'_def valid_ntfn'_def projectKOs) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') subgoal by (auto simp: typ_heap_simps cnotification_relation_def - Let_def tcb_queue_relation'_def - invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority - intro!: obj_at_conj') + Let_def tcb_queue_relation'_def + invs_valid_objs' valid_objs'_maxDomain valid_objs'_maxPriority + intro!: obj_at_conj') apply (clarsimp simp: guard_is_UNIV_def) apply (wp getNotification_wp) apply clarsimp @@ -568,16 +688,16 @@ lemma tcb_queue_relation2_cong: context kernel_m begin -lemma setThreadState_ccorres_valid_queues'_simple: - "ccorres dc xfdc (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ sch_act_simple s) +lemma setThreadState_ccorres_simple: + "ccorres dc xfdc (\s. tcb_at' thread s \ \ runnable' st \ sch_act_simple s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (setThreadState st thread) (Call setThreadState_'proc)" apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues'_simple) - apply (wp threadSet_valid_queues'_and_not_runnable') - apply (clarsimp simp: weak_sch_act_wf_def valid_queues'_def) + apply (wp threadSet_tcbState_st_tcb_at') + apply (fastforce simp: weak_sch_act_wf_def) done lemma updateRestartPC_ccorres: @@ -593,9 +713,7 @@ lemma updateRestartPC_ccorres: done crunches updateRestartPC - for valid_queues'[wp]: valid_queues' - and sch_act_simple[wp]: sch_act_simple - and valid_queues[wp]: Invariants_H.valid_queues + for sch_act_simple[wp]: sch_act_simple and valid_objs'[wp]: valid_objs' and tcb_at'[wp]: "tcb_at' p" @@ -635,25 +753,16 @@ lemma suspend_ccorres: apply clarsimp apply (rule iffI) apply simp - apply (erule thread_state_to_tsType.elims; simp add: StrictC'_thread_state_defs) + apply (erule thread_state_to_tsType.elims; simp add: ThreadState_defs) apply (ctac (no_vcg) add: updateRestartPC_ccorres) apply (rule ccorres_return_Skip) apply ceqv - apply (ctac(no_vcg) add: setThreadState_ccorres_valid_queues'_simple) - apply (ctac add: tcbSchedDequeue_ccorres') - apply (rule_tac Q="\_. - (\s. \t' d p. (t' \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d - \ tcbPriority tcb = p) t' s \ - (t' \ thread \ st_tcb_at' runnable' t' s)) \ - distinct (ksReadyQueues s (d, p))) and valid_queues' and valid_objs' and tcb_at' thread" - in hoare_post_imp) + apply (ctac(no_vcg) add: setThreadState_ccorres_simple) + apply (ctac add: tcbSchedDequeue_ccorres) + apply (rule_tac Q="\_. valid_objs' and tcb_at' thread and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) apply clarsimp - apply (drule_tac x="t" in spec) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp elim!: obj_at'_weakenE simp: inQ_def) - apply (wp sts_valid_queues_partial)[1] + apply (wp sts_valid_objs')[1] apply clarsimp apply (wpsimp simp: valid_tcb_state'_def) apply clarsimp @@ -668,9 +777,8 @@ lemma suspend_ccorres: apply (rule cancelIPC_sch_act_simple) apply (rule cancelIPC_tcb_at'[where t=thread]) apply (rule delete_one_conc_fr.cancelIPC_invs) - apply (fastforce simp: invs_valid_queues' invs_queues invs_valid_objs' - valid_tcb_state'_def) - apply (auto simp: "StrictC'_thread_state_defs") + apply (fastforce simp: invs_valid_objs' valid_tcb_state'_def) + apply (auto simp: ThreadState_defs) done lemma cap_to_H_NTFNCap_tag: @@ -693,8 +801,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -713,7 +821,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal by (simp add: carch_state_relation_def global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps) @@ -726,7 +834,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -778,7 +886,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -813,9 +921,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -832,13 +940,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -878,7 +986,7 @@ lemma finaliseCap_True_cases_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap Collect_False del: Collect_const) apply (fold case_bool_If) - apply (simp add: false_def) + apply simp apply csymbr apply wpc apply (simp add: cap_get_tag_isCap ccorres_cond_univ_iff Let_def) @@ -1028,8 +1136,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] fun_upd_def[symmetric] - when_def + apply (clarsimp simp: liftM_def fun_upd_def[symmetric] when_def simp del: Collect_const) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_x86KSASIDTable_ksArchState) @@ -1098,7 +1205,7 @@ lemma deleteASIDPool_ccorres: apply (drule_tac x="of_nat n" in spec)+ apply (simp add: asid_low_bits_def word_le_nat_alt) apply (simp add: word_unat.Abs_inverse unats_def) - apply (clarsimp simp: asid_map_relation_def false_def asid_map_tag_defs + apply (clarsimp simp: asid_map_relation_def asid_map_tag_defs asid_map_lift_def Let_def split: option.split_asm asid_map_CL.split_asm if_splits) apply (rule ccorres_rhs_assoc)+ @@ -1191,12 +1298,10 @@ lemma deleteASID_ccorres: apply (simp add: asid_high_bits_def) apply ceqv apply wpc - apply (simp add: ccorres_cond_iffs dc_def[symmetric] - Collect_False + apply (simp add: ccorres_cond_iffs Collect_False cong: call_ignore_cong) apply (rule ccorres_return_Skip) - apply (clarsimp simp: dc_def[symmetric] when_def - liftM_def + apply (clarsimp simp: when_def liftM_def cong: conj_cong call_ignore_cong) apply ccorres_rewrite apply (rule ccorres_rhs_assoc)+ @@ -1223,7 +1328,7 @@ lemma deleteASID_ccorres: apply (simp add: asid_low_bits_def Kernel_C.asidLowBits_def mask_def word_and_le1 asid_map_relation_def) apply (rule conjI, fastforce simp: asid_map_lifts from_bool_def case_bool_If inv_ASIDPool) - apply (fastforce simp: from_bool_def case_bool_If inv_ASIDPool asid_map_lift_def + apply (fastforce simp: case_bool_If inv_ASIDPool asid_map_lift_def split: option.split_asm asid_map_CL.split_asm if_split_asm) apply ceqv apply (rule ccorres_cond2[where R=\]) @@ -1296,7 +1401,7 @@ lemma deleteASID_ccorres: lemma setObject_ccorres_lemma: fixes val :: "'a :: pspace_storable" shows - "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := ksPSpace s (ptr \ injectKO val) \, s') \ rf_sr},{}; + "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val) \, s') \ rf_sr},{}; \s s' val (val' :: 'a). \ ko_at' val' ptr s; (s, s') \ rf_sr \ \ s' \ Q s; \val :: 'a. updateObject val = updateObject_default val; @@ -1319,7 +1424,7 @@ lemma setObject_ccorres_lemma: apply (subgoal_tac "fst (setObject ptr val \) = {}") apply simp apply (erule notE, erule_tac s=\ in empty_failD[rotated]) - apply (simp add: setObject_def split_def) + apply (simp add: setObject_def split_def empty_fail_cond) apply (rule ccontr) apply (clarsimp elim!: nonemptyE) apply (frule use_valid [OF _ obj_at_setObject3[where P=\]], simp_all)[1] @@ -1462,16 +1567,16 @@ lemma unmapPageTable_ccorres: apply (simp add: from_bool_0) apply ccorres_rewrite apply (clarsimp simp: throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (simp add: from_bool_0) - apply (rule ccorres_liftE[simplified dc_def]) + apply (rule ccorres_liftE', simp) apply (ctac add: flushTable_ccorres) apply (csymbr, rename_tac invalidPDE) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def) apply (csymbr, rename_tac root) - apply (ctac add: invalidatePageStructureCacheASID_ccorres[simplified dc_def]) + apply (ctac add: invalidatePageStructureCacheASID_ccorres) apply wp apply (clarsimp simp add: guard_is_UNIV_def) apply wp @@ -1479,14 +1584,14 @@ lemma unmapPageTable_ccorres: apply (vcg exspec=flushTable_modifies) apply (clarsimp simp: guard_is_UNIV_def) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (clarsimp,wp) - apply (rule_tac Q'="\_ s. invs' s \ page_table_at' ptPtr s" in hoare_post_imp_R) + apply (rule_tac Q'="\_ s. invs' s \ page_table_at' ptPtr s" in hoare_strengthen_postE_R) apply wp apply clarsimp apply (vcg exspec=lookupPDSlot_modifies) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply wp apply vcg apply (auto simp add: asid_wf_def mask_def) @@ -1508,12 +1613,6 @@ lemma no_0_pml4_at'[elim!]: apply (drule spec[where x=0], clarsimp simp: bit_simps) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1534,7 +1633,7 @@ lemma isFinalCapability_ccorres: apply (simp add: mdbPrev_to_H[symmetric]) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (simp add: return_def from_bool_def false_def) + apply (simp add: return_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_l[OF _ getCTE_inv getCTE_wp empty_fail_getCTE]) apply (rule_tac P="cte_wp_at' ((=) cte) slot @@ -1573,10 +1672,9 @@ lemma isFinalCapability_ccorres: apply (rule cmap_relationE1 [OF cmap_relation_cte], assumption+, simp?, simp add: typ_heap_simps)+ apply (drule ccte_relation_ccap_relation)+ - apply (auto simp: false_def true_def from_bool_def split: bool.splits)[1] + apply (auto simp: from_bool_def split: bool.splits)[1] apply (wp getCTE_wp') - apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem false_def - from_bool_0 true_def from_bool_def) + apply (clarsimp simp add: guard_is_UNIV_def Collect_const_mem) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -1611,7 +1709,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned_longlong = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1622,25 +1720,23 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] - del: Collect_const) + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) apply (strengthen invs_mdb_strengthen' invs_urz) apply (wp typ_at_lifts isFinalCapability_inv | strengthen invs_valid_objs')+ - apply (clarsimp simp: from_bool_def true_def irq_opt_relation_def - invs_pspace_aligned' cte_wp_at_ctes_of) + apply (clarsimp simp: irq_opt_relation_def invs_pspace_aligned' cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (clarsimp simp: typ_heap_simps ccte_relation_ccap_relation ccap_relation_NullCap_iff) apply (wp isFinalCapability_inv) apply simp - apply (simp del: Collect_const add: false_def) + apply (simp del: Collect_const) apply (rule ccorres_return_Skip) apply (clarsimp simp: Collect_const_mem cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) @@ -1664,7 +1760,7 @@ lemma deletingIRQHandler_ccorres: (UNIV \ {s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) @@ -1723,8 +1819,6 @@ lemma irq_opt_relation_Some_ucast: apply (clarsimp simp: word_le_nat_alt Kernel_C.maxIRQ_def) done -lemmas upcast_ucast_id = More_Word.ucast_up_inj - lemma irq_opt_relation_Some_ucast': "\ x && mask 8 = x; ucast x \ (scast Kernel_C.maxIRQ :: 8 word) \ x \ (scast Kernel_C.maxIRQ :: machine_word) \ \ irq_opt_relation (Some (ucast x)) (ucast x)" @@ -1752,7 +1846,7 @@ lemma option_to_ctcb_ptr_not_0: done lemma update_tcb_map_to_tcb: - "map_to_tcbs (ksPSpace s(p \ KOTCB tcb)) + "map_to_tcbs ((ksPSpace s)(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) @@ -1773,26 +1867,9 @@ lemma ep_queue_relation_shift2: apply (clarsimp split: option.split_asm) done -lemma sched_queue_relation_shift: - "(option_map2 tcbSchedNext_C (f (cslift s)) - = option_map2 tcbSchedNext_C (cslift s) - \ option_map2 tcbSchedPrev_C (f (cslift s)) - = option_map2 tcbSchedPrev_C (cslift s)) - \ sched_queue_relation (f (cslift s)) ts qPrev qHead - = sched_queue_relation (cslift s) ts qPrev qHead" - apply clarsimp - apply (induct ts arbitrary: qPrev qHead) - apply simp - apply simp - apply (simp add: option_map2_def fun_eq_iff - map_option_case) - apply (drule_tac x=qHead in spec)+ - apply (clarsimp split: option.split_asm) - done - lemma cendpoint_relation_udpate_arch: "\ cslift x p = Some tcb ; cendpoint_relation (cslift x) v v' \ - \ cendpoint_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cendpoint_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def split: endpoint.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1803,29 +1880,13 @@ lemma cendpoint_relation_udpate_arch: lemma cnotification_relation_udpate_arch: "\ cslift x p = Some tcb ; cnotification_relation (cslift x) v v' \ - \ cnotification_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cnotification_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cnotification_relation_def Let_def tcb_queue_relation'_def split: notification.splits ntfn.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) apply (safe ; case_tac "xa = p" ; clarsimp simp: option_map2_def map_option_case) done -lemma sanitiseSetRegister_ccorres: - "\ val = val'; reg' = register_from_H reg\ \ - ccorres dc xfdc (tcb_at' tptr) - UNIV - hs - (asUser tptr (setRegister reg (local.sanitiseRegister False reg val))) - (\unsigned_long_eret_2 :== CALL sanitiseRegister(reg',val',0);; - CALL setRegister(tcb_ptr_to_ctcb_ptr tptr,reg',\unsigned_long_eret_2))" - apply (rule ccorres_guard_imp2) - apply (rule ccorres_symb_exec_r) - apply (ctac add: setRegister_ccorres) - apply (vcg) - apply (rule conseqPre, vcg) - apply (fastforce simp: sanitiseRegister_def split: register.splits) - by (auto simp: sanitiseRegister_def from_bool_def simp del: Collect_const split: register.splits bool.splits) - lemma case_option_both[simp]: "(case f of None \ P | _ \ P) = P" by (auto split: option.splits) @@ -1897,16 +1958,16 @@ lemma unmapPageDirectory_ccorres: apply (simp add: from_bool_0) apply ccorres_rewrite apply (clarsimp simp: throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (simp add: from_bool_0) - apply (rule ccorres_liftE[simplified dc_def]) + apply (rule ccorres_liftE', simp) apply (ctac add: flushPD_ccorres) apply (csymbr, rename_tac invalidPDPTE) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDPTE_Basic_ccorres) apply (simp add: cpdpte_relation_def Let_def) apply (csymbr, rename_tac root) - apply (ctac add: invalidatePageStructureCacheASID_ccorres[simplified dc_def]) + apply (ctac add: invalidatePageStructureCacheASID_ccorres) apply wp apply (clarsimp simp add: guard_is_UNIV_def) apply wp @@ -1914,11 +1975,11 @@ lemma unmapPageDirectory_ccorres: apply (vcg exspec=flushPD_modifies) apply (clarsimp simp: guard_is_UNIV_def) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply wpsimp apply (vcg exspec=lookupPDPTSlot_modifies) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply wp apply vcg apply (auto simp add: asid_wf_def mask_def) @@ -1952,14 +2013,14 @@ lemma unmapPDPointerTable_ccorres: apply (erule ko_at_projectKO_opt) apply (rename_tac pml4e_C) apply (fastforce simp: typ_heap_simps cpml4e_relation_def Let_def - isPDPointerTablePML4E_def from_bool_def + isPDPointerTablePML4E_def split: bool.splits if_split pml4e.split_asm) apply ceqv apply (rule ccorres_Cond_rhs_Seq) apply ccorres_rewrite apply (clarsimp simp: from_bool_0 isPDPointerTablePML4E_def split: pml4e.splits; clarsimp simp: throwError_def; - rule ccorres_return_void_C[simplified dc_def]) + rule ccorres_return_void_C) apply (clarsimp simp: isPDPointerTablePML4E_def liftE_def bind_assoc split: pml4e.split_asm) apply (ctac add: flushPDPT_ccorres) apply csymbr @@ -1967,7 +2028,7 @@ lemma unmapPDPointerTable_ccorres: apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePML4E_Basic_ccorres') apply (fastforce simp: cpml4e_relation_def) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply wp apply (fastforce simp: guard_is_UNIV_def) apply wp @@ -1975,7 +2036,7 @@ lemma unmapPDPointerTable_ccorres: apply vcg apply ccorres_rewrite apply (clarsimp simp: throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (wpsimp wp: hoare_drop_imps) apply (vcg exspec=findVSpaceForASID_modifies) apply (auto simp: invs_arch_state' invs_no_0_obj' asid_wf_def mask_def typ_heap_simps @@ -1986,20 +2047,18 @@ lemma ccap_relation_PDPT_IsMapped: "ccap_relation (ArchObjectCap (PDPointerTableCap x1 x2)) cap \ capPDPTIsMapped_CL (cap_pdpt_cap_lift cap) = from_bool (\ Option.is_none x2)" apply (frule cap_get_tag_isCap_unfolded_H_cap) - apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_pdpt_cap_lift_def cap_lift_def Let_def - cap_tag_defs to_bool_def true_def false_def - split: if_split) - apply word_bitwise + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_pdpt_cap_lift_def cap_lift_def + cap_tag_defs to_bool_def + split: if_split) done lemma ccap_relation_PML4_IsMapped: "ccap_relation (ArchObjectCap (PML4Cap x1 x2)) cap \ capPML4IsMapped_CL (cap_pml4_cap_lift cap) = from_bool (\ Option.is_none x2)" apply (frule cap_get_tag_isCap_unfolded_H_cap) - apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_pml4_cap_lift_def cap_lift_def Let_def - cap_tag_defs to_bool_def true_def false_def - split: if_split) - apply word_bitwise + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_pml4_cap_lift_def cap_lift_def + cap_tag_defs to_bool_def + split: if_split) done lemma if_case_opt_same_branches: @@ -2049,7 +2108,7 @@ lemma Mode_finaliseCap_ccorres_pdpt: R'=UNIV in ccorres_symb_exec_r_known_rv_UNIV) apply (vcg exspec=unmapPDPT_modifies) - apply (fastforce simp: false_def cap_get_tag_isCap_unfolded_H_cap + apply (fastforce simp: cap_get_tag_isCap_unfolded_H_cap ccap_relation_PDPT_IsMapped from_bool_eq_if') apply ceqv apply (rule ccorres_Cond_rhs_Seq) @@ -2111,7 +2170,7 @@ lemma Mode_finaliseCap_ccorres_pml4: R'=UNIV in ccorres_symb_exec_r_known_rv_UNIV) apply vcg - apply (fastforce simp: false_def cap_get_tag_isCap_unfolded_H_cap + apply (fastforce simp: cap_get_tag_isCap_unfolded_H_cap ccap_relation_PML4_IsMapped from_bool_eq_if') apply ceqv apply (rule ccorres_Cond_rhs_Seq) @@ -2164,7 +2223,7 @@ lemma Mode_finaliseCap_ccorres_page_cap: od) (Call Mode_finaliseCap_'proc)" supply Collect_const[simp del] - apply (cinit' lift: cap_' simp: false_def) + apply (cinit' lift: cap_') apply csymbr apply (clarsimp simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs) apply ccorres_rewrite @@ -2199,7 +2258,7 @@ lemma Mode_finaliseCap_ccorres_page_cap: dest!: x_less_2_0_1) lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -2252,7 +2311,7 @@ lemma Arch_finaliseCap_ccorres: apply (clarsimp simp: return_def) apply wp apply fastforce - apply (fastforce simp: false_def) + apply fastforce \ \PML4Cap\ apply (rule ccorres_guard_imp) apply (rule ccorres_add_return2) @@ -2262,7 +2321,7 @@ lemma Arch_finaliseCap_ccorres: apply (clarsimp simp: return_def) apply wp apply fastforce - apply (fastforce simp: false_def) + apply fastforce \ \PageCap\ apply (clarsimp simp: isCap_simps) apply (rule ccorres_guard_imp[where A="?abstract_pre" and A'=UNIV]) @@ -2314,7 +2373,7 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_directory_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_directory_cap_lift_def + cap_page_directory_cap_lift_def asid_bits_def split: if_split_asm) apply simp @@ -2358,8 +2417,7 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (frule cap_lift_page_table_cap) apply (clarsimp simp: ccap_relation_def cap_to_H_def capAligned_def - to_bool_def cap_page_table_cap_lift_def - asid_bits_def + cap_page_table_cap_lift_def asid_bits_def split: if_split_asm) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], return_NullCap_pair_ccorres) @@ -2426,7 +2484,7 @@ lemma fpuThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (fpuThreadDelete thread) (Call fpuThreadDelete_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit lift: thread_') apply clarsimp apply (ctac (no_vcg) add: nativeThreadUsingFPU_ccorres) @@ -2443,7 +2501,6 @@ lemma prepareThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (prepareThreadDelete thread) (Call Arch_prepareThreadDelete_'proc)" - supply dc_simp[simp del] apply (cinit lift: thread_', rename_tac cthread) apply (ctac add: fpuThreadDelete_ccorres) apply fastforce @@ -2493,7 +2550,7 @@ lemma finaliseCap_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (simp, rule ccorres_fail) - apply (simp add: from_bool_0 Collect_True Collect_False false_def + apply (simp add: from_bool_0 Collect_True Collect_False del: Collect_const) apply csymbr apply (simp add: cap_get_tag_isCap Collect_False Collect_True @@ -2578,7 +2635,7 @@ lemma finaliseCap_ccorres: apply (simp add: isArchCap_T_isArchObjectCap[symmetric] del: Collect_const) apply (rule ccorres_if_lhs) - apply (simp add: Collect_False Collect_True Let_def true_def + apply (simp add: Collect_False Collect_True Let_def del: Collect_const) apply (rule_tac P="(capIRQ cap) \ X64.maxIRQ" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc)+ @@ -2598,18 +2655,18 @@ lemma finaliseCap_ccorres: apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -2618,8 +2675,7 @@ lemma finaliseCap_ccorres: irq_opt_relation_def) apply wp apply (simp add: guard_is_UNIV_def) - apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem - false_def from_bool_def) + apply (clarsimp simp: cap_get_tag_isCap word_sle_def Collect_const_mem) apply (intro impI conjI) apply (clarsimp split: bool.splits) apply (clarsimp split: bool.splits) @@ -2636,7 +2692,7 @@ lemma finaliseCap_ccorres: split: option.splits cap_CL.splits if_splits) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) - apply (clarsimp simp: isCap_simps from_bool_def false_def) + apply (clarsimp simp: isCap_simps) apply (clarsimp simp: tcb_cnode_index_defs ptr_add_assertion_def) apply clarsimp apply (frule cap_get_tag_to_H, erule(1) cap_get_tag_isCap [THEN iffD2]) diff --git a/proof/crefine/X64/Interrupt_C.thy b/proof/crefine/X64/Interrupt_C.thy index 81a4d41337..cb5bced472 100644 --- a/proof/crefine/X64/Interrupt_C.thy +++ b/proof/crefine/X64/Interrupt_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,7 +17,7 @@ lemma invokeIRQHandler_AckIRQ_ccorres: (InterruptDecls_H.invokeIRQHandler (AckIRQ irq)) (Call invokeIRQHandler_AckIRQ_'proc)" apply (cinit lift: irq_' simp: Interrupt_H.invokeIRQHandler_def invokeIRQHandler_def) apply (ctac add: maskInterrupt_ccorres) - apply (simp add: from_bool_def false_def) + apply simp done lemma getIRQSlot_ccorres: @@ -74,7 +75,7 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply simp apply simp apply simp @@ -111,7 +112,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply (simp add: ucast_up_ucast is_up) apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -229,7 +230,7 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (clarsimp simp: Collect_const_mem neq_Nil_conv dest!: interpret_excaps_eq) apply (simp add: rf_sr_ksCurThread if_1_0_0 mask_def[where n=4] - "StrictC'_thread_state_defs" cap_get_tag_isCap excaps_map_def + ThreadState_defs cap_get_tag_isCap excaps_map_def word_sless_def word_sle_def) apply (simp add: invocationCatch_def throwError_bind interpret_excaps_test_null Collect_True @@ -257,24 +258,23 @@ lemma decodeIRQHandlerInvocation_ccorres: apply (simp add: syscall_error_to_H_cases) apply simp apply (clarsimp simp: Collect_const_mem tcb_at_invs') - apply (clarsimp simp: invs_queues invs_valid_objs' + apply (clarsimp simp: invs_valid_objs' ct_in_state'_def ccap_rights_relation_def - mask_def[where n=4] - "StrictC'_thread_state_defs") + mask_def[where n=4] ThreadState_defs) apply (subst pred_tcb'_weakenE, assumption, fastforce)+ apply (clarsimp simp: rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_n_def word_less_nat_alt) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits) apply (intro conjI impI allI) apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth - slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def + slotcap_in_mem_def valid_tcb_state'_def dest!: interpret_excaps_eq split: bool.splits)+ - apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[4] + apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[6] apply (drule ctes_of_valid') apply fastforce apply (clarsimp simp add:valid_cap_simps' X64.maxIRQ_def) @@ -348,7 +348,7 @@ lemma invokeIRQControl_ccorres: (performIRQControl (IssueIRQHandler irq slot parent)) (Call invokeIRQControl_'proc)" by (clarsimp simp: performIRQControl_def liftE_def bind_assoc - intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def K_def, simplified]) + intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def, simplified]) lemma isIRQActive_ccorres: "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' @@ -367,8 +367,7 @@ lemma isIRQActive_ccorres: Let_def cinterrupt_relation_def) apply (drule spec, drule(1) mp) apply (case_tac "intStateIRQTable (ksInterruptState \) irq") - apply (simp add: from_bool_def irq_state_defs Kernel_C.maxIRQ_def - word_le_nat_alt)+ + apply (simp add: irq_state_defs Kernel_C.maxIRQ_def word_le_nat_alt)+ done lemma Platform_maxIRQ: @@ -531,32 +530,61 @@ lemma ccorres_pre_gets_x64KSNumIOAPICs_ksArchState: apply clarsimp done +lemma ccorres_pre_gets_x64KSIOAPICnIRQs_ksArchState: + assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" + shows "ccorres r xf + (\s. (\rv. x64KSIOAPICnIRQs (ksArchState s) = rv \ P rv s)) + {s. \rv. s \ P' rv } hs + (gets (x64KSIOAPICnIRQs \ ksArchState) >>= (\rv. f rv)) c" + apply (rule ccorres_guard_imp) + apply (rule ccorres_symb_exec_l) + defer + apply wp[1] + apply (rule gets_sp) + apply (clarsimp simp: empty_fail_def simpler_gets_def) + apply assumption + apply clarsimp + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply clarsimp + done + +lemma rf_sr_x64KSIOAPICnIRQs: + "\ (s,s') \ rf_sr; i < of_nat maxNumIOAPIC \ \ + ioapic_nirqs_' (globals s').[unat i] = x64KSIOAPICnIRQs (ksArchState s) i" + by (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def + array_relation_def) + lemma ioapic_decode_map_pin_to_vector_ccorres: "ccorres (intr_and_se_rel \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - \ - (UNIV - \ {s. ioapic___unsigned_long_' s = ioapic} - \ {s. pin___unsigned_long_' s = pin} - \ {s. level___unsigned_long_' s = level} - \ {s. polarity_' s = polarity}) + valid_ioapic + (\\ioapic___unsigned_long = ioapic\ \ + \\pin___unsigned_long = pin\ \ + \\level___unsigned_long = level\ \ + \\polarity = polarity\) hs (doE numIOAPICs <- liftE (gets (x64KSNumIOAPICs \ ksArchState)); + ioapic_nirqs <- liftE (gets (x64KSIOAPICnIRQs \ ksArchState)); whenE (numIOAPICs = 0) (throwError (Inl IllegalOperation)); whenE (uint (numIOAPICs - 1) < uint ioapic) - (throwError (Inl (RangeError 0 (numIOAPICs - 1)))); - whenE (uint (ioapicIRQLines - 1) < uint pin) - (throwError (Inl (RangeError 0 (ioapicIRQLines - 1)))); + (throwError (Inl (RangeError 0 (numIOAPICs - 1)))); + whenE (uint (ucast (ioapic_nirqs ioapic - 1) :: machine_word) < uint pin) + (throwError (Inl (RangeError 0 (ucast (ioapic_nirqs ioapic - 1))))); whenE (1 < uint level) (throwError (Inl (RangeError 0 1))); whenE (1 < uint polarity) (throwError (Inl (RangeError 0 1))) odE) - (Call ioapic_decode_map_pin_to_vector_'proc)" - supply Collect_const[simp del] + (Call ioapic_decode_map_pin_to_vector_'proc)" + supply Collect_const[simp del] word_less_1[simp del] (* for uniform array guard on ioapic_nirqs *) apply (cinit' lift: ioapic___unsigned_long_' pin___unsigned_long_' level___unsigned_long_' polarity_') apply (simp add: ioapicIRQLines_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (clarsimp simp: liftE_bindE) apply (rule ccorres_pre_gets_x64KSNumIOAPICs_ksArchState) + apply (rule ccorres_pre_gets_x64KSIOAPICnIRQs_ksArchState) apply (rule_tac Q="\s. x64KSNumIOAPICs (ksArchState s) = numIOAPICs" and Q'=\ in ccorres_split_when_throwError_cond) apply (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) @@ -574,23 +602,55 @@ lemma ioapic_decode_map_pin_to_vector_ccorres: EXCEPTION_SYSCALL_ERROR_def EXCEPTION_NONE_def syscall_error_rel_def) apply (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) apply (subst ucast_sub_ucast; fastforce simp: lt1_neq0) + apply (rule_tac P="numIOAPICs \ of_nat maxNumIOAPIC" in ccorres_gen_asm) + apply (clarsimp simp: not_less word_le_def[symmetric]) + apply (prop_tac "ioapic < of_nat maxNumIOAPIC", + solves \simp add: le_m1_iff_lt[THEN iffD1] word_neq_0_conv\) + apply (rule ccorres_prove_guard) + (* array guard where array dimension is maxNumIOAPIC *) + apply (solves \simp add: Kernel_Config.maxNumIOAPIC_def\) + apply ccorres_rewrite + apply (rename_tac ioapic_nirqs) + apply (rule_tac Q="\s. ioapic_nirqs = x64KSIOAPICnIRQs (ksArchState s) \ + 0 < x64KSIOAPICnIRQs (ksArchState s) ioapic" and + Q'=\ + in ccorres_split_when_throwError_cond) + apply (fastforce simp: word_le_def scast_ucast_up_eq_ucast uint_up_ucast is_up + rf_sr_x64KSIOAPICnIRQs + uint_minus_1_less_le_eq) + (* Need to VCG it as the range error depends on the global state *) + apply (rule_tac P="\s. ioapic_nirqs = x64KSIOAPICnIRQs (ksArchState s) \ + numIOAPICs \ of_nat maxNumIOAPIC \ + 0 < x64KSIOAPICnIRQs (ksArchState s) ioapic \ + x64KSIOAPICnIRQs (ksArchState s) ioapic \ ucast ioapicIRQLines" + and P'="UNIV" in ccorres_from_vcg_throws) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: fst_throwError_returnOk syscall_error_to_H_cases + EXCEPTION_SYSCALL_ERROR_def EXCEPTION_NONE_def syscall_error_rel_def) + apply (simp add: rf_sr_x64KSIOAPICnIRQs + scast_ucast_up_eq_ucast ioapicIRQLines_def sint_ucast_eq_uint is_down + scast_ucast_up_minus_1_ucast) + apply (rule conjI, uint_arith) + apply (rule conjI, uint_arith) + (* array guard where array dimension is maxNumIOAPIC *) + apply (solves \simp add: Kernel_Config.maxNumIOAPIC_def\) apply (rule_tac Q=\ and Q'=\ in ccorres_split_when_throwError_cond) - apply (fastforce simp: word_le_def add1_zle_eq[symmetric]) + apply clarsimp + apply (metis arith_special(21) diff_eq_diff_eq uint_1 word_less_def word_less_sub1 + word_neq_0_conv word_sub_less_iff) apply (fastforce simp: syscall_error_to_H_cases intro: syscall_error_throwError_ccorres_n) - apply (rule_tac Q=\ and Q'=\ in ccorres_split_when_throwError_cond) + apply (rule_tac Q=\ and Q'=\ + in ccorres_split_when_throwError_cond[where b="returnOk ()", simplified]) apply clarsimp apply (metis arith_special(21) diff_eq_diff_eq uint_1 word_less_def word_less_sub1 word_neq_0_conv word_sub_less_iff) apply (fastforce simp: syscall_error_to_H_cases intro: syscall_error_throwError_ccorres_n) - apply (rule_tac Q=\ and Q'=\ - in ccorres_split_when_throwError_cond[where b="returnOk ()", simplified]) - apply clarsimp - apply (metis arith_special(21) diff_eq_diff_eq uint_1 word_less_def word_less_sub1 - word_neq_0_conv word_sub_less_iff) - apply (fastforce simp: syscall_error_to_H_cases intro: syscall_error_throwError_ccorres_n) - apply (ctac add: ccorres_return_CE) - apply vcg+ - apply fastforce + apply (ctac add: ccorres_return_CE) + apply vcg+ + apply (clarsimp simp: not_less) + apply (prop_tac "ioapic < x64KSNumIOAPICs (ksArchState s)") + apply (meson word_leq_minus_one_le word_less_eq_iff_unsigned) + apply (fastforce simp: valid_ioapic_def) done (* Bundle of definitions for minIRQ, maxIRQ, minUserIRQ, etc *) @@ -719,7 +779,7 @@ from assms show ?thesis apply (rule ccorres_Cond_rhs_Seq) apply ccorres_rewrite apply (auto split: invocation_label.split arch_invocation_label.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases invocation_eq_use_types)[1] apply clarsimp apply (rule ccorres_rhs_assoc2) @@ -739,13 +799,13 @@ from assms show ?thesis apply (erule ccorres_disj_division; clarsimp split: invocation_label.split simp: invocation_eq_use_types) apply (auto split: list.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases)[2] (* Insufficient extra caps *) apply (erule ccorres_disj_division; clarsimp split: invocation_label.split simp: invocation_eq_use_types) apply (auto split: list.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases)[2] (* Arguments OK *) apply ccorres_rewrite @@ -772,7 +832,7 @@ from assms show ?thesis word_sless_alt is_down sint_ucast_eq_uint word_le_not_less invocationCatch_use_injection_handler injection_handler_throwError syscall_error_to_H_cases - intro: syscall_error_throwError_ccorres_n[simplified id_def dc_def]) | + intro: syscall_error_throwError_ccorres_n) | ccorres_rewrite)+)[2] apply (erule ccorres_disj_division; clarsimp simp: invocation_eq_use_types) (* X64IRQIssueIRQHandlerIOAPIC *) @@ -792,7 +852,7 @@ from assms show ?thesis apply (simp add: injection_handler_whenE injection_handler_throwError) apply (rule ccorres_split_when_throwError_cond[where Q=\ and Q'=\]) apply clarsimp - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply csymbr apply (ctac add: ccorres_injection_handler_csum1 @@ -828,8 +888,7 @@ from assms show ?thesis where g="\_. injection_handler P Q >>=E R" for P Q R]) apply (clarsimp simp: injection_handler_returnOk) apply (simp only: bindE_K_bind) - apply (ctac add: ioapic_decode_map_pin_to_vector_ccorres - [simplified o_def id_def dc_def K_def]) + apply (ctac add: ioapic_decode_map_pin_to_vector_ccorres) apply ccorres_rewrite apply (simp add: ccorres_invocationCatch_Inr performInvocation_def returnOk_bind liftE_bindE bindE_assoc @@ -865,15 +924,12 @@ from assms show ?thesis apply (rule hoare_weaken_pre[where P="?pre"]) apply (rule isIRQActive_wp) apply (clarsimp simp: sysargs_rel_to_n unat_less_2p_word_bits - invs_valid_objs' tcb_at_invs' invs_queues valid_tcb_state'_def + invs_valid_objs' tcb_at_invs' valid_tcb_state'_def invs_sch_act_wf' ct_in_state'_def cte_wp_at_weakenE' - pred_tcb'_weakenE) + pred_tcb'_weakenE invs_pspace_aligned' invs_pspace_distinct') apply (subst pred_tcb'_weakenE, assumption, fastforce)+ - apply (rule conjI) - apply (rule TrueI) - apply (rule conjI) - apply (rule impI) - apply (rule TrueI) + apply (rule conjI, fastforce) + apply clarsimp apply (rule_tac irq1="yf" in irq64_helper_two) apply (simp only: unat_def) apply (vcg exspec=isIRQActive_modifies) @@ -895,7 +951,7 @@ from assms show ?thesis apply (simp add: injection_handler_whenE injection_handler_throwError) apply (rule ccorres_split_when_throwError_cond[where Q=\ and Q'=\]) apply clarsimp - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply csymbr apply (ctac add: ccorres_injection_handler_csum1 @@ -931,7 +987,7 @@ from assms show ?thesis (* Handle the conditional checks on PCI bus/dev/func *) apply ((rule_tac Q=\ and Q'=\ in ccorres_split_when_throwError_cond, fastforce, - rule syscall_error_throwError_ccorres_n[simplified id_def dc_def], + rule syscall_error_throwError_ccorres_n, fastforce simp: syscall_error_to_H_cases)+)[3] apply ccorres_rewrite apply csymbr @@ -964,9 +1020,9 @@ from assms show ?thesis apply (rule hoare_weaken_pre[where P="?pre"]) apply wp apply (clarsimp simp: invs_valid_objs' tcb_at_invs' - invs_queues valid_tcb_state'_def + valid_tcb_state'_def invs_pspace_aligned' invs_sch_act_wf' ct_in_state'_def - cte_wp_at_weakenE') + cte_wp_at_weakenE' invs_pspace_distinct') apply (subst pred_tcb'_weakenE, assumption, fastforce)+ apply (intro conjI impI) apply (rule TrueI)+ @@ -997,12 +1053,12 @@ from assms show ?thesis apply clarsimp apply (fastforce simp: guard_is_UNIV_def interpret_excaps_eq excaps_map_def split: Product_Type.prod.split) - apply (auto simp: invs_queues invs_valid_objs' ct_in_state'_def irqIntOffset_def + apply (auto simp: invs_valid_objs' ct_in_state'_def irqIntOffset_def ccap_rights_relation_def mask_def[where n=4] - "StrictC'_thread_state_defs" rf_sr_ksCurThread cte_wp_at_ctes_of + ThreadState_defs rf_sr_ksCurThread cte_wp_at_ctes_of sysargs_rel_def sysargs_rel_n_def excaps_map_def excaps_in_mem_def slotcap_in_mem_def - valid_tcb_state'_def from_bool_def toBool_def word_less_nat_alt + valid_tcb_state'_def word_less_nat_alt c_irq_const_defs irq64_helper_one irq64_helper_two irq64_helper_three irq64_helper_four irq64_helper_five unat_less_2p_word_bits word_less_alt word_sless_alt is_down sint_ucast_eq_uint @@ -1151,9 +1207,8 @@ lemma decodeIRQControlInvocation_ccorres: apply wp apply (vcg exspec=Arch_decodeIRQControlInvocation_modifies) apply (simp add: syscall_error_to_H_cases) - apply (clarsimp simp: if_1_0_0 interpret_excaps_test_null excaps_map_def + apply (clarsimp simp: interpret_excaps_test_null excaps_map_def Collect_const_mem word_sless_def word_sle_def - ThreadState_Restart_def unat_of_nat mask_def cong: if_cong) apply (rule conjI) apply (cut_tac unat_lt2p[where x="args ! 2"]) @@ -1168,9 +1223,7 @@ lemma decodeIRQControlInvocation_ccorres: apply (clarsimp simp: neq_Nil_conv numeral_eqs[symmetric] word_sle_def word_sless_def) apply (drule interpret_excaps_eq[rule_format, where n=0], simp) - apply (clarsimp simp: "StrictC'_thread_state_defs" - rf_sr_ksCurThread ccap_rights_relation_def - rightsFromWord_wordFromRights) + apply (clarsimp simp: rf_sr_ksCurThread ccap_rights_relation_def rightsFromWord_wordFromRights) done end end diff --git a/proof/crefine/X64/Invoke_C.thy b/proof/crefine/X64/Invoke_C.thy index 2535448c2f..3b08dd7449 100644 --- a/proof/crefine/X64/Invoke_C.thy +++ b/proof/crefine/X64/Invoke_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -63,11 +64,11 @@ lemma setDomain_ccorres: apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) - apply (simp add: when_def to_bool_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (simp add: when_def) + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -75,15 +76,17 @@ lemma setDomain_ccorres: apply simp apply wp apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s)" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp threadSet_all_invs_but_sch_extra) - apply (clarsimp simp:valid_pspace_valid_objs' st_tcb_at_def[symmetric] - sch_act_simple_def st_tcb_at'_def o_def weak_sch_act_wf_def split:if_splits) + apply (fastforce simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) apply (simp add: guard_is_UNIV_def) - apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" in hoare_strengthen_post) + apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued - tcbSchedDequeue_not_in_queue hoare_vcg_imp_lift hoare_vcg_all_lift) + hoare_vcg_imp_lift hoare_vcg_all_lift) apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) apply (fastforce simp: valid_tcb'_def tcb_cte_cases_def invs'_def valid_state'_def valid_pspace'_def) @@ -191,10 +194,10 @@ lemma decodeDomainInvocation_ccorres: apply clarsimp apply (vcg exspec=getSyscallArg_modifies) - apply (clarsimp simp: valid_tcb_state'_def invs_valid_queues' invs_valid_objs' - invs_queues invs_sch_act_wf' ct_in_state'_def pred_tcb_at' + apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' + invs_sch_act_wf' ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread word_sle_def word_sless_def sysargs_rel_to_n - mask_eq_iff_w2p mask_eq_iff_w2p word_size "StrictC'_thread_state_defs") + mask_eq_iff_w2p mask_eq_iff_w2p word_size ThreadState_defs) apply (rule conjI) apply (clarsimp simp: linorder_not_le isCap_simps) apply (rule conjI, clarsimp simp: unat64_eq_of_nat) @@ -202,7 +205,7 @@ lemma decodeDomainInvocation_ccorres: apply (drule_tac x="extraCaps ! 0" and P="\v. valid_cap' (fst v) s" in bspec) apply (clarsimp simp: nth_mem interpret_excaps_test_null excaps_map_def) apply (clarsimp simp: valid_cap_simps' pred_tcb'_weakenE active_runnable') - apply (rule conjI) + apply (intro conjI; fastforce?) apply (fastforce simp: tcb_st_refs_of'_def elim:pred_tcb'_weakenE) apply (simp add: word_le_nat_alt unat_ucast unat_numDomains_to_H le_maxDomain_eq_less_numDomains) apply (clarsimp simp: ccap_relation_def cap_to_H_simps cap_thread_cap_lift) @@ -227,7 +230,7 @@ lemma invokeCNodeDelete_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteDelete_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -247,7 +250,7 @@ lemma invokeCNodeRevoke_ccorres: apply (rule ccorres_trim_returnE, simp, simp) apply (rule ccorres_callE) apply (rule cteRevoke_ccorres[simplified]) - apply (simp add: from_bool_def true_def)+ + apply simp+ done @@ -380,7 +383,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -545,12 +548,10 @@ lemma hasCancelSendRights_spec: apply clarsimp apply (drule sym, drule (1) cap_get_tag_to_H) apply (clarsimp simp: hasCancelSendRights_def to_bool_def - true_def false_def split: if_split bool.splits) apply (rule impI) apply (case_tac cap, - auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs - from_bool_def false_def true_def hasCancelSendRights_def + auto simp: cap_get_tag_isCap_unfolded_H_cap cap_tag_defs hasCancelSendRights_def dest: cap_get_tag_isArchCap_unfolded_H_cap split: capability.splits bool.splits)[1] done @@ -627,9 +628,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] - if_1_0_0 dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -712,8 +711,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -740,13 +738,12 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr performInvocation_def - bindE_assoc false_def) + bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeInsert_ccorres) @@ -759,16 +756,16 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply simp apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper[OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def - valid_tcb_state'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def + valid_tcb_state'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply wp @@ -817,12 +814,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk - ccorres_invocationCatch_Inr false_def + ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) @@ -836,15 +832,15 @@ lemma decodeCNodeInvocation_ccorres: apply (vcg exspec=setThreadState_modifies) apply (simp add: conj_comms valid_tcb_state'_def) apply (wp injection_wp_E[OF refl]) - apply (rule hoare_post_imp_R) - apply (rule_tac Q'="\rv. valid_pspace' and valid_queues + apply (rule hoare_strengthen_postE_R) + apply (rule_tac Q'="\rv. valid_pspace' and valid_cap' rv and valid_objs' and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_vcg_R_conj) apply (rule deriveCap_Null_helper [OF deriveCap_derived]) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (simp add: is_derived'_def badge_derived'_def) + apply (fastforce simp: is_derived'_def badge_derived'_def) apply (simp add: Collect_const_mem all_ex_eq_helper) apply (vcg exspec=deriveCap_modifies) apply (simp add: Collect_const_mem) @@ -878,7 +874,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply csymbr apply csymbr - apply (simp add: cap_get_tag_NullCap true_def) + apply (simp add: cap_get_tag_NullCap) apply (ctac add: setThreadState_ccorres) apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) @@ -897,7 +893,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -916,15 +912,14 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk ccorres_invocationCatch_Inr numeral_eqs performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) - apply (simp add: true_def ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (ctac(no_vcg) add: invokeCNodeMove_ccorres) apply (rule ccorres_alternative2) apply (rule ccorres_return_CE, simp+)[1] @@ -952,14 +947,16 @@ lemma decodeCNodeInvocation_ccorres: apply (rule validE_R_validE) apply (rule_tac Q'="\a b. cte_wp_at' (\x. True) a b \ invs' b \ tcb_at' thread b \ sch_act_wf (ksSchedulerAction b) b \ valid_tcb_state' Restart b - \ Q2 b" for Q2 in hoare_post_imp_R) - prefer 2 - apply (clarsimp simp:cte_wp_at_ctes_of) - apply (drule ctes_of_valid') - apply (erule invs_valid_objs') - apply (clarsimp simp:valid_updateCapDataI invs_queues invs_valid_objs' invs_valid_pspace') - apply (assumption) - apply (wp hoare_vcg_all_lift_R injection_wp_E[OF refl] + \ Q2 b" for Q2 in hoare_strengthen_postE_R) + prefer 2 + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule ctes_of_valid') + apply (erule invs_valid_objs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (clarsimp simp:valid_updateCapDataI invs_valid_objs' invs_valid_pspace') + apply assumption + apply (wp hoare_vcg_all_liftE_R injection_wp_E[OF refl] lsfco_cte_at' hoare_vcg_const_imp_lift_R )+ apply (simp add: Collect_const_mem word_sle_def word_sless_def @@ -1016,13 +1013,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_returnOk bindE_assoc - injection_bindE[OF refl refl] split_def - dc_def[symmetric]) + injection_bindE[OF refl refl] split_def) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc)+ apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) - apply (simp add: ccorres_invocationCatch_Inr performInvocation_def - dc_def[symmetric] bindE_assoc) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) apply (rule ccorres_alternative2) @@ -1031,7 +1026,7 @@ lemma decodeCNodeInvocation_ccorres: apply (wp sts_valid_pspace_hangers)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_split_throws) apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg @@ -1061,8 +1056,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1106,12 +1100,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1171,8 +1163,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1234,8 +1225,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1243,8 +1233,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1258,7 +1247,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1297,7 +1286,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule_tac Q'="\rvb. invs' and cte_at' rv and cte_at' rva and tcb_at' thread" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of weak_derived_updateCapData capBadge_updateCapData_True) @@ -1322,16 +1311,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1346,7 +1335,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_liftE_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1361,7 +1350,7 @@ lemma decodeCNodeInvocation_ccorres: apply simp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: valid_tcb_state'_def invs_valid_objs' invs_valid_pspace' - ct_in_state'_def pred_tcb_at' invs_queues + ct_in_state'_def pred_tcb_at' cur_tcb'_def word_sle_def word_sless_def unat_lt2p[where 'a=machine_word_len, folded word_bits_def]) apply (rule conjI) @@ -1375,7 +1364,7 @@ lemma decodeCNodeInvocation_ccorres: apply (frule interpret_excaps_eq) apply (clarsimp simp: excaps_map_def mask_def[where n=4] ccap_rights_relation_def rightsFromWord_wordFromRights - "StrictC'_thread_state_defs" map_comp_Some_iff + ThreadState_defs map_comp_Some_iff rf_sr_ksCurThread hd_conv_nth hd_drop_conv_nth) apply ((rule conjI | clarsimp simp: rightsFromWord_wordFromRights @@ -1384,8 +1373,7 @@ lemma decodeCNodeInvocation_ccorres: map_option_Some_eq2 neq_Nil_conv ccap_relation_def numeral_eqs hasCancelSendRights_not_Null ccap_relation_NullCap_iff[symmetric] - if_1_0_0 interpret_excaps_test_null - mdbRevocable_CL_cte_to_H false_def true_def + interpret_excaps_test_null mdbRevocable_CL_cte_to_H | clarsimp simp: typ_heap_simps' | frule length_ineq_not_Nil)+) done @@ -1394,9 +1382,6 @@ end context begin interpretation Arch . (*FIXME: arch_split*) -crunch valid_queues[wp]: insertNewCap "valid_queues" - (wp: crunch_wps) - lemmas setCTE_def3 = setCTE_def2[THEN eq_reflection] lemma setCTE_sch_act_wf[wp]: @@ -1466,7 +1451,7 @@ lemma seL4_MessageInfo_lift_def2: lemma globals_update_id: "globals_update (t_hrs_'_update (hrs_htd_update id)) x = x" - by (simp add:id_def hrs_htd_update_def) + by (simp add: hrs_htd_update_def) lemma getObjectSize_spec: "\s. \\\s. \t \ of_nat (length (enum::object_type list) - 1)\ Call getObjectSize_'proc @@ -1523,7 +1508,7 @@ shows "\ctes_of (s::kernel_state) (ptr_val p) = Some cte; is_aligned ptr bits; bits < word_bits; {ptr..ptr + 2 ^ bits - 1} \ {ptr_val p..ptr_val p + mask cteSizeBits} = {}; ((clift hp) :: (cte_C ptr \ cte_C)) p = Some to\ \ (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: (cte_C ptr \ cte_C)) p = Some to" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def restrict_map_def split:if_splits) + apply (clarsimp simp:lift_t_def lift_typ_heap_def restrict_map_def split:if_splits) apply (intro conjI impI) apply (case_tac hp) apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) @@ -1852,8 +1837,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2375,7 +2359,7 @@ lemma invokeUntyped_Retype_ccorres: (Call invokeUntyped_Retype_'proc)" apply (cinit lift: retypeBase_' srcSlot_' reset_' newType_' userSize_' deviceMemory_' destCNode_' destOffset_' destLength_' - simp: when_def) + simp: when_def archOverlap_def) apply (rule ccorres_move_c_guard_cte) apply csymbr apply (rule ccorres_abstract_cleanup) @@ -2444,7 +2428,7 @@ lemma invokeUntyped_Retype_ccorres: apply (clarsimp simp: misc unat_of_nat_eq[OF range_cover.weak, OF cover]) apply (vcg exspec=cap_untyped_cap_ptr_set_capFreeIndex_modifies) apply simp - apply (rule validE_validE_R, rule hoare_post_impErr, + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule hoare_vcg_conj_liftE1[rotated, where Q="\_ s. case gsCNodes s cnodeptr of None \ False | Some n \ length destSlots + unat start \ 2 ^ n"], @@ -2633,7 +2617,7 @@ lemma mapME_ensureEmptySlot': apply (erule meta_allE) apply wp apply (fold validE_R_def) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -2642,7 +2626,7 @@ lemma mapME_ensureEmptySlot: mapME (\x. injection_handler Inl (ensureEmptySlot (f x))) [S .e. (E::machine_word)] \\rva s. \slot. S \ slot \ slot \ E \ (\cte. cteCap cte = capability.NullCap \ ctes_of s (f slot) = Some cte)\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule mapME_ensureEmptySlot') apply clarsimp done @@ -2827,7 +2811,6 @@ lemma Arch_isFrameType_spec: apply (auto simp: object_type_from_H_def ) done - lemma decodeUntypedInvocation_ccorres_helper: notes TripleSuc[simp] notes valid_untyped_inv_wcap'.simps[simp del] tl_drop_1[simp] @@ -3007,8 +2990,8 @@ lemma decodeUntypedInvocation_ccorres_helper: [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -3231,8 +3214,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) apply (rule ccorres_alternative2) @@ -3281,7 +3263,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply vcg apply (rule ccorres_guard_imp [where Q =\ and Q' = UNIV,rotated],assumption+) - apply (simp add: o_def) + apply simp apply (simp add: liftE_validE) apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3298,8 +3280,7 @@ lemma decodeUntypedInvocation_ccorres_helper: unat_of_nat_APIType_capBits word_size hd_conv_nth length_ineq_not_Nil not_less word_le_nat_alt isCap_simps valid_cap_simps') apply (strengthen word_of_nat_less) - apply (clarsimp simp: StrictC'_thread_state_defs mask_def true_def false_def - from_bool_0 ccap_relation_isDeviceCap2 + apply (clarsimp simp: ThreadState_defs mask_def ccap_relation_isDeviceCap2 split: if_split) apply (clarsimp simp: not_less shiftr_overflow maxUntypedSizeBits_def unat_of_nat_APIType_capBits) @@ -3313,10 +3294,10 @@ lemma decodeUntypedInvocation_ccorres_helper: and ex_cte_cap_to' (capCNodePtr rv) and (\s. case gsCNodes s (capCNodePtr rv) of None \ False | Some n \ args ! 4 + args ! 5 - 1 < 2 ^ n) - and sch_act_simple and ct_active'" in hoare_post_imp_R) + and sch_act_simple and ct_active'" in hoare_strengthen_postE_R) prefer 2 apply (clarsimp simp: invs_valid_objs' invs_mdb' - invs_queues ct_in_state'_def pred_tcb_at') + ct_in_state'_def pred_tcb_at') apply (subgoal_tac "ksCurThread s \ ksIdleThread sa") prefer 2 apply clarsimp @@ -3348,7 +3329,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) @@ -3361,7 +3342,7 @@ lemma decodeUntypedInvocation_ccorres_helper: \ invs' s \ ksCurThread s = thread \ valid_cap' r s \ (\rf\cte_refs' r (irq_node' s). ex_cte_cap_to' rf s) - \ sch_act_simple s \ ct_active' s" in hoare_post_imp_R) + \ sch_act_simple s \ ct_active' s" in hoare_strengthen_postE_R) apply clarsimp apply (wp injection_wp_E[OF refl] getSlotCap_cap_to' getSlotCap_capAligned @@ -3405,8 +3386,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (clarsimp simp: hd_drop_conv_nth2 hd_conv_nth neq_Nil_lengthI ct_in_state'_def pred_tcb_at' rf_sr_ksCurThread mask_eq_iff_w2p - "StrictC'_thread_state_defs" numeral_eqs[symmetric] - cap_get_tag_isCap cte_wp_at_ctes_of + numeral_eqs[symmetric] cap_get_tag_isCap cte_wp_at_ctes_of unat_eq_0 ccHoarePost_def) apply (rule conjI) apply (clarsimp simp: linorder_not_less isCap_simps) @@ -3478,18 +3458,16 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption - apply (rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_bindE[OF monadic_rewrite_refl]) - apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P=x in monadic_rewrite_gen_asm) - apply simp + apply (simp add: liftE_bindE stateAssert_def2 bind_assoc) + apply (monadic_rewrite_r monadic_rewrite_if_r_True) + apply (monadic_rewrite_r_method monadic_rewrite_symb_exec_r_drop wpsimp) apply (rule monadic_rewrite_refl) - apply (wp | simp)+ - apply (simp add: gets_bind_ign) + apply wpsimp + apply (rule monadic_rewrite_refl) apply (rule monadic_rewrite_refl) apply (clarsimp simp: ex_cte_cap_wp_to'_def excaps_in_mem_def) apply (drule(1) bspec)+ diff --git a/proof/crefine/X64/IpcCancel_C.thy b/proof/crefine/X64/IpcCancel_C.thy index a32787fcf8..b02cee0a07 100644 --- a/proof/crefine/X64/IpcCancel_C.thy +++ b/proof/crefine/X64/IpcCancel_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -12,12 +13,12 @@ context kernel_m begin lemma cready_queues_index_to_C_in_range': - assumes prems: "qdom \ ucast maxDom" "prio \ ucast maxPrio" + assumes prems: "qdom \ maxDomain" "prio \ maxPriority" shows "cready_queues_index_to_C qdom prio < num_tcb_queues" proof - have P: "unat prio < numPriorities" using prems - by (simp add: numPriorities_def seL4_MaxPrio_def Suc_le_lessD unat_le_helper) + by (simp add: numPriorities_def Suc_le_lessD unat_le_helper maxDomain_def maxPriority_def) have Q: "unat qdom < numDomains" using prems by (simp add: maxDom_to_H le_maxDomain_eq_less_numDomains word_le_nat_alt) @@ -27,40 +28,22 @@ proof - qed lemmas cready_queues_index_to_C_in_range = - cready_queues_index_to_C_in_range'[simplified num_tcb_queues_def] + cready_queues_index_to_C_in_range'[simplified num_tcb_queues_val] lemma cready_queues_index_to_C_inj: "\ cready_queues_index_to_C qdom prio = cready_queues_index_to_C qdom' prio'; - prio \ ucast maxPrio; prio' \ ucast maxPrio \ \ prio = prio' \ qdom = qdom'" + prio \ maxPriority; prio' \ maxPriority \ \ prio = prio' \ qdom = qdom'" apply (rule context_conjI) - apply (auto simp: cready_queues_index_to_C_def numPriorities_def + apply (auto simp: cready_queues_index_to_C_def numPriorities_def maxPriority_def seL4_MaxPrio_def word_le_nat_alt dest: arg_cong[where f="\x. x mod 256"]) done lemma cready_queues_index_to_C_distinct: - "\ qdom = qdom' \ prio \ prio'; prio \ ucast maxPrio; prio' \ ucast maxPrio \ + "\ qdom = qdom' \ prio \ prio'; prio \ maxPriority; prio' \ maxPriority \ \ cready_queues_index_to_C qdom prio \ cready_queues_index_to_C qdom' prio'" apply (auto simp: cready_queues_index_to_C_inj) done -lemma cstate_relation_ksReadyQueues_update: - "\ cstate_relation hs cs; arr = ksReadyQueues_' cs; - sched_queue_relation' (clift (t_hrs_' cs)) v (head_C v') (end_C v'); - qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ cstate_relation (ksReadyQueues_update (\qs. qs ((qdom, prio) := v)) hs) - (ksReadyQueues_'_update (\_. Arrays.update arr - (cready_queues_index_to_C qdom prio) v') cs)" - apply (clarsimp simp: cstate_relation_def Let_def - cmachine_state_relation_def - carch_state_relation_def carch_globals_def - cready_queues_relation_def seL4_MinPrio_def minDom_def) - apply (frule cready_queues_index_to_C_in_range, assumption) - apply clarsimp - apply (frule_tac qdom=qdoma and prio=prioa in cready_queues_index_to_C_in_range, assumption) - apply (frule cready_queues_index_to_C_distinct, assumption+) - apply clarsimp - done - lemma cmap_relation_drop_fun_upd: "\ cm x = Some v; \v''. rel v'' v = rel v'' v' \ \ cmap_relation am (cm (x \ v')) f rel @@ -71,16 +54,6 @@ lemma cmap_relation_drop_fun_upd: apply (auto split: if_split) done -lemma valid_queuesD': - "\ obj_at' (inQ d p) t s; valid_queues' s \ - \ t \ set (ksReadyQueues s (d, p))" - by (simp add: valid_queues'_def) - -lemma invs_valid_queues'[elim!]: - "invs' s \ valid_queues' s" - by (simp add: invs'_def valid_state'_def) - - lemma ntfn_ptr_get_queue_spec: "\s. \ \ {\. s = \ \ \ \\<^sub>c \<^bsup>\\<^esup>ntfnPtr} \ret__struct_tcb_queue_C :== PROC ntfn_ptr_get_queue(\ntfnPtr) \head_C \ret__struct_tcb_queue_C = Ptr (ntfnQueue_head_CL (notification_lift (the (cslift s \<^bsup>s\<^esup>ntfnPtr)))) \ @@ -219,7 +192,7 @@ lemma cancelSignal_ccorres_helper: apply (drule (2) ntfn_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) - apply (frule null_ep_queue [simplified Fun.comp_def]) + apply (frule null_ep_queue [simplified comp_def]) apply (intro impI conjI allI) \ \empty case\ apply clarsimp @@ -235,22 +208,19 @@ lemma cancelSignal_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) - apply (simp add: carch_state_relation_def carch_globals_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def) + apply (simp add: carch_state_relation_def carch_globals_def) apply (clarsimp simp: carch_state_relation_def carch_globals_def typ_heap_simps' packed_heap_update_collapse_hrs fpu_null_state_heap_update_tag_disj_simps @@ -274,33 +244,30 @@ lemma cancelSignal_ccorres_helper: apply (elim conjE) apply (intro conjI) \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue) - apply fastforce - apply assumption+ - apply simp - apply (erule (1) map_to_ko_atI') + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue) + apply fastforce + apply assumption+ + apply simp + apply (erule (1) map_to_ko_atI') \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def - split: ntfn.splits split del: if_split) - apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) - apply (clarsimp simp add: h_t_valid_clift_Some_iff) - apply (subst tcb_queue_relation'_next_sign; assumption?) - apply fastforce - apply (simp add: notification_lift_def sign_extend_sign_extend_eq) - apply (clarsimp simp: h_t_valid_clift_Some_iff notification_lift_def sign_extend_sign_extend_eq) - apply (subst tcb_queue_relation'_prev_sign; assumption?) + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def isWaitingNtfn_def + split: ntfn.splits split del: if_split) + apply (erule iffD1 [OF tcb_queue_relation'_cong [OF refl _ _ refl], rotated -1]) + apply (clarsimp simp add: h_t_valid_clift_Some_iff) + apply (subst tcb_queue_relation'_next_sign; assumption?) apply fastforce - apply simp + apply (simp add: notification_lift_def sign_extend_sign_extend_eq) + apply (clarsimp simp: h_t_valid_clift_Some_iff notification_lift_def sign_extend_sign_extend_eq) + apply (subst tcb_queue_relation'_prev_sign; assumption?) + apply fastforce apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply simp subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def fpu_null_state_heap_update_tag_disj_simps global_ioport_bitmap_heap_update_tag_disj_simps @@ -436,9 +403,9 @@ lemma isStopped_ccorres [corres]: apply vcg apply clarsimp apply clarsimp - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma isRunnable_ccorres [corres]: @@ -464,71 +431,9 @@ lemma isRunnable_ccorres [corres]: apply (vcg) apply (clarsimp) apply (clarsimp) - apply (clarsimp simp: to_bool_def true_def false_def typ_heap_simps - ctcb_relation_thread_state_to_tsType split: thread_state.splits) - apply (simp add: "StrictC'_thread_state_defs")+ -done - - - -lemma tcb_queue_relation_update_head: - fixes getNext_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" and - getPrev_update :: "(tcb_C ptr \ tcb_C ptr) \ tcb_C \ tcb_C" - assumes qr: "tcb_queue_relation getNext getPrev mp queue NULL qhead" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - and fgN: "fg_cons getNext (getNext_update \ (\x _. x))" - and fgP: "fg_cons getPrev (getPrev_update \ (\x _. x))" - and npu: "\f t. getNext (getPrev_update f t) = getNext t" - and pnu: "\f t. getPrev (getNext_update f t) = getPrev t" - shows "tcb_queue_relation getNext getPrev - (upd_unless_null qhead (getPrev_update (\_. qhead') (the (mp qhead))) - (mp(qhead' := Some (getPrev_update (\_. NULL) (getNext_update (\_. qhead) tcb))))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) NULL qhead'" - using qr qh' cs_tcb valid_ep qhN - apply (subgoal_tac "qhead \ qhead'") - apply (clarsimp simp: pnu upd_unless_null_def fg_consD1 [OF fgN] fg_consD1 [OF fgP] npu) - apply (cases queue) - apply simp - apply (frule (2) tcb_queue_relation_next_not_NULL) - apply simp - apply (clarsimp simp: fg_consD1 [OF fgN] fg_consD1 [OF fgP] pnu npu) - apply (subst tcb_queue_relation_cong [OF refl refl refl, where mp' = mp]) - apply (clarsimp simp: inj_eq) - apply (intro impI conjI) - apply (frule_tac x = x in imageI [where f = tcb_ptr_to_ctcb_ptr]) - apply simp - apply simp - apply simp - apply clarsimp - apply (cases queue) - apply simp - apply simp - done - -lemma tcbSchedEnqueue_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qhead' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qhead' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qhead' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qhead (tcbSchedPrev_C_update (\_. qhead') (the (mp qhead))) - (mp(qhead' \ tcb\tcbSchedNext_C := qhead, tcbSchedPrev_C := NULL\))) - (ctcb_ptr_to_tcb_ptr qhead' # queue) qhead' (if qend = NULL then qhead' else qend)" - using sr qh' cs_tcb valid_ep qhN - apply - - apply (erule tcb_queue_relationE') - apply (rule tcb_queue_relationI') - apply (erule (5) tcb_queue_relation_update_head - [where getNext_update = tcbSchedNext_C_update and getPrev_update = tcbSchedPrev_C_update], simp_all)[1] - apply simp - apply (intro impI) - apply (erule (1) tcb_queue_relation_not_NULL') - apply simp + apply (clarsimp simp: typ_heap_simps ctcb_relation_thread_state_to_tsType + split: thread_state.splits) + apply (simp add: ThreadState_defs)+ done lemma tcb_ptr_to_ctcb_ptr_imageD: @@ -543,93 +448,6 @@ lemma ctcb_ptr_to_tcb_ptr_imageI: apply simp done -lemma tcb_queue'_head_end_NULL: - assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" - and tat: "\t\set queue. tcb_at' t s" - shows "(qend = NULL) = (qhead = NULL)" - using qr tat - apply - - apply (erule tcb_queue_relationE') - apply (simp add: tcb_queue_head_empty_iff) - apply (rule impI) - apply (rule tcb_at_not_NULL) - apply (erule bspec) - apply simp - done - -lemma tcb_queue_relation_qhead_mem: - "\ tcb_queue_relation getNext getPrev mp queue NULL qhead; - (\tcb\set queue. tcb_at' tcb t) \ - \ qhead \ NULL \ ctcb_ptr_to_tcb_ptr qhead \ set queue" - by (clarsimp simp: tcb_queue_head_empty_iff tcb_queue_relation_head_hd) - -lemma tcb_queue_relation_qhead_valid: - "\ tcb_queue_relation getNext getPrev (cslift s') queue NULL qhead; - (s, s') \ rf_sr; (\tcb\set queue. tcb_at' tcb s) \ - \ qhead \ NULL \ s' \\<^sub>c qhead" - apply (frule (1) tcb_queue_relation_qhead_mem) - apply clarsimp - apply(drule (3) tcb_queue_memberD) - apply (simp add: h_t_valid_clift_Some_iff) - done - -lemmas tcb_queue_relation_qhead_mem' = tcb_queue_relation_qhead_mem [OF tcb_queue_relation'_queue_rel] -lemmas tcb_queue_relation_qhead_valid' = tcb_queue_relation_qhead_valid [OF tcb_queue_relation'_queue_rel] - - -lemma valid_queues_valid_q: - "valid_queues s \ (\tcb\set (ksReadyQueues s (qdom, prio)). tcb_at' tcb s) \ distinct (ksReadyQueues s (qdom, prio))" - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec [where x = qdom]) - apply (drule spec [where x = prio]) - apply clarsimp - apply (drule (1) bspec, erule obj_at'_weakenE) - apply simp - done - -lemma invs_valid_q: - "invs' s \ (\tcb\set (ksReadyQueues s (qdom, prio)). tcb_at' tcb s) \ distinct (ksReadyQueues s (qdom, prio))" - apply (rule valid_queues_valid_q) - apply (clarsimp simp: invs'_def valid_state'_def) - done - -lemma tcbQueued_not_in_queues: - assumes vq: "valid_queues s" - and objat: "obj_at' (Not \ tcbQueued) thread s" - shows "thread \ set (ksReadyQueues s (d, p))" - using vq objat - apply - - apply clarsimp - apply (drule (1) valid_queues_obj_at'D) - apply (erule obj_atE')+ - apply (clarsimp simp: inQ_def) - done - - -lemma rf_sr_sched_queue_relation: - "\ (s, s') \ rf_sr; d \ ucast maxDom; p \ ucast maxPrio \ - \ sched_queue_relation' (cslift s') (ksReadyQueues s (d, p)) - (head_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p))) - (end_C (index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p)))" - unfolding rf_sr_def cstate_relation_def cready_queues_relation_def - apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def) - done - -lemma ready_queue_not_in: - assumes vq: "valid_queues s" - and inq: "t \ set (ksReadyQueues s (d, p))" - and neq: "d \ d' \ p \ p'" - shows "t \ set (ksReadyQueues s (d', p'))" -proof - assume "t \ set (ksReadyQueues s (d', p'))" - hence "obj_at' (inQ d' p') t s" using vq by (rule valid_queues_obj_at'D) - moreover have "obj_at' (inQ d p) t s" using inq vq by (rule valid_queues_obj_at'D) - ultimately show False using neq - by (clarsimp elim!: obj_atE' simp: inQ_def) -qed - lemma ctcb_relation_unat_prio_eq: "ctcb_relation tcb tcb' \ unat (tcbPriority tcb) = unat (tcbPriority_C tcb')" apply (clarsimp simp: ctcb_relation_def) @@ -663,145 +481,6 @@ lemma threadSet_queued_ccorres [corres]: apply (clarsimp simp: typ_heap_simps) done -lemma ccorres_pre_getQueue: - assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" - shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) - {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) - (cready_queues_index_to_C d p) in - sched_queue_relation' (cslift s') queue (head_C cqueue) (end_C cqueue)) \ s' \ P' queue} - hs (getQueue d p >>= (\queue. f queue)) c" - apply (rule ccorres_guard_imp2) - apply (rule ccorres_symb_exec_l2) - defer - defer - apply (rule gq_sp) - defer - apply (rule ccorres_guard_imp) - apply (rule cc) - apply clarsimp - apply assumption - apply assumption - apply (clarsimp simp: getQueue_def gets_exs_valid) - apply clarsimp - apply (drule spec, erule mp) - apply (simp add: Let_def) - apply (erule rf_sr_sched_queue_relation) - apply (simp add: maxDom_to_H maxPrio_to_H)+ - done - -lemma state_relation_queue_update_helper': - "\ (s, s') \ rf_sr; - (\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))); - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subst(asm) disj_imp_rhs) - apply (subst obj_at'_and[symmetric]) - apply (rule disjI1, erule obj_at'_weakenE, simp add: inQ_def) - apply (subst(asm) disj_imp_rhs) - apply (subst(asm) obj_at'_and[symmetric]) - apply (rule conjI, erule obj_at'_weakenE, simp) - apply (rule allI, rule allI) - apply (drule_tac x=d' in spec) - apply (drule_tac x=p' in spec) - apply clarsimp - apply (drule(1) bspec) - apply (clarsimp simp: inQ_def obj_at'_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (intro conjI) - \ \cpspace_relation\ - apply (erule nonemptyE, drule(1) bspec) - apply (clarsimp simp: cpspace_relation_def) - apply (drule obj_at_ko_at', clarsimp) - apply (rule cmap_relationE1, assumption, - erule ko_at_projectKO_opt) - apply (frule null_sched_queue) - apply (frule null_sched_epD) - apply (intro conjI) - \ \tcb relation\ - apply (drule ctcb_relation_null_queue_ptrs, - simp_all)[1] - \ \endpoint relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cendpoint_relation_upd_tcb_no_queues, simp+) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (erule cnotification_relation_upd_tcb_no_queues, simp+) - \ \ready queues\ - apply (simp add: cready_queues_relation_def Let_def cready_queues_index_to_C_in_range - seL4_MinPrio_def minDom_def) - apply clarsimp - apply (frule cready_queues_index_to_C_distinct, assumption+) - apply (clarsimp simp: cready_queues_index_to_C_in_range all_conj_distrib) - apply (rule iffD1 [OF tcb_queue_relation'_cong[OF refl], rotated -1], - drule spec, drule spec, erule mp, simp+) - apply clarsimp - apply (drule_tac x="tcb_ptr_to_ctcb_ptr x" in fun_cong)+ - apply (clarsimp simp: restrict_map_def - split: if_split_asm) - by (auto simp: carch_state_relation_def cmachine_state_relation_def - elim!: fpu_null_state_typ_heap_preservation) - -lemma state_relation_queue_update_helper: - "\ (s, s') \ rf_sr; valid_queues s; - globals t = ksReadyQueues_'_update - (\_. Arrays.update (ksReadyQueues_' (globals s')) prio' q') - (t_hrs_'_update f (globals s')); - sched_queue_relation' (cslift t) q (head_C q') (end_C q'); - cslift t |` ( - tcb_ptr_to_ctcb_ptr ` S ) - = cslift s' |` ( - tcb_ptr_to_ctcb_ptr ` S ); - option_map tcb_null_sched_ptrs \ cslift t - = option_map tcb_null_sched_ptrs \ cslift s'; - cslift_all_but_tcb_C t s'; - zero_ranges_are_zero (gsUntypedZeroRanges s) (f (t_hrs_' (globals s'))) - = zero_ranges_are_zero (gsUntypedZeroRanges s) (t_hrs_' (globals s')); - hrs_htd (t_hrs_' (globals t)) = hrs_htd (t_hrs_' (globals s')); - prio' = cready_queues_index_to_C qdom prio; - \x \ S. obj_at' (inQ qdom prio) x s - \ (obj_at' (\tcb. tcbPriority tcb = prio) x s - \ obj_at' (\tcb. tcbDomain tcb = qdom) x s) - \ (tcb_at' x s \ (\d' p'. (d' \ qdom \ p' \ prio) - \ x \ set (ksReadyQueues s (d', p')))); - S \ {}; qdom \ ucast maxDom; prio \ ucast maxPrio \ - \ (s \ksReadyQueues := (ksReadyQueues s)((qdom, prio) := q)\, t) \ rf_sr" - apply (subgoal_tac "\d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))") - apply (erule(5) state_relation_queue_update_helper', simp_all) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE, clarsimp) - done - -(* FIXME: move *) -lemma from_bool_vals [simp]: - "from_bool True = scast true" - "from_bool False = scast false" - "scast true \ scast false" - by (auto simp add: from_bool_def true_def false_def) - (* FIXME: move *) lemma cmap_relation_no_upd: "\ cmap_relation a c f rel; a p = Some ko; rel ko v; inj f \ \ cmap_relation a (c(f p \ v)) f rel" @@ -846,8 +525,8 @@ lemma cready_queues_index_to_C_def2: lemma ready_queues_index_spec: "\s. \ \ {s'. s' = s \ (Kernel_Config.numDomains \ 1 \ dom_' s' = 0)} Call ready_queues_index_'proc - \\ret__unsigned_long = (dom_' s) * 0x100 + (prio_' s)\" - by vcg (simp add: numDomains_sge_1_simp) + \\ret__unsigned_long = (dom_' s) * word_of_nat numPriorities + (prio_' s)\" + by vcg (simp add: numDomains_sge_1_simp numPriorities_def) lemma prio_to_l1index_spec: "\s. \ \ {s} Call prio_to_l1index_'proc @@ -942,56 +621,6 @@ lemma cbitmap_L2_relation_bit_set: apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) done -lemma carch_state_relation_enqueue_simp: - "carch_state_relation (ksArchState \) - (t_hrs_'_update f - (globals \' \ksReadyQueuesL1Bitmap_' := l1upd, ksReadyQueuesL2Bitmap_' := l2upd \) - \ksReadyQueues_' := rqupd \) = - carch_state_relation (ksArchState \) (t_hrs_'_update f (globals \'))" - unfolding carch_state_relation_def - by clarsimp - -lemma t_hrs_ksReadyQueues_upd_absorb: - "t_hrs_'_update f (g s) \ksReadyQueues_' := rqupd \ = - t_hrs_'_update f (g s \ksReadyQueues_' := rqupd\)" - by simp - -lemma rf_sr_drop_bitmaps_enqueue_helper: - "\ (\,\') \ rf_sr ; - cbitmap_L1_relation ksqL1upd' ksqL1upd ; cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ - ((\\ksReadyQueues := ksqupd, ksReadyQueuesL1Bitmap := ksqL1upd, ksReadyQueuesL2Bitmap := ksqL2upd\, - \'\idx_' := i', queue_' := queue_upd', - globals := t_hrs_'_update f - (globals \' - \ksReadyQueuesL1Bitmap_' := ksqL1upd', - ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueues_' := ksqupd'\)\) \ rf_sr) = - ((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', queue_' := queue_upd', - globals := t_hrs_'_update f - (globals \' \ksReadyQueues_' := ksqupd'\)\) \ rf_sr)" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) - -lemma cmachine_state_relation_enqueue_simp: - "cmachine_state_relation (ksMachineState \) - (t_hrs_'_update f - (globals \' \ksReadyQueuesL1Bitmap_' := l1upd, ksReadyQueuesL2Bitmap_' := l2upd \) - \ksReadyQueues_' := rqupd \) = - cmachine_state_relation (ksMachineState \) (t_hrs_'_update f (globals \'))" - unfolding cmachine_state_relation_def - by clarsimp - -lemma tcb_queue_relation'_empty_ksReadyQueues: - "\ sched_queue_relation' (cslift x) (q s) NULL NULL ; \t\ set (q s). tcb_at' t s \ \ q s = []" - apply (clarsimp simp add: tcb_queue_relation'_def) - apply (subst (asm) eq_commute) - apply (cases "q s" rule: rev_cases, simp) - apply (clarsimp simp: tcb_at_not_NULL) - done - lemma invert_prioToL1Index_c_simp: "p \ maxPriority \ @@ -1005,13 +634,247 @@ lemma c_invert_assist: "3 - (ucast (p :: priority) >> 6 :: machine_word) < 4" using prio_ucast_shiftr_wordRadix_helper'[simplified wordRadix_def] by - (rule word_less_imp_diff_less, simp_all) +lemma addToBitmap_ccorres: + "ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (addToBitmap tdom prio) (Call addToBitmap_'proc)" + supply prio_and_dom_limit_helpers[simp] invert_prioToL1Index_c_simp[simp] + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (frule maxDomain_le_unat_ucast_explicit) + apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (intro conjI impI allI) + apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (rule conjI) + apply (clarsimp intro!: cbitmap_L1_relation_bit_set) + apply (fastforce dest!: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) + done + +lemma rf_sr_tcb_update_twice: + "h_t_valid (hrs_htd (hrs2 (globals s') (t_hrs_' (gs2 (globals s'))))) c_guard + (ptr (t_hrs_' (gs2 (globals s'))) (globals s')) + \ ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs :: tcb_C ptr) (v ths gs)) + (hrs_mem_update (heap_update (ptr ths gs) (v' ths gs)) (hrs2 gs ths))) (gs2 gs)) s') \ rf_sr) + = ((s, globals_update (\gs. t_hrs_'_update (\ths. + hrs_mem_update (heap_update (ptr ths gs) (v ths gs)) (hrs2 gs ths)) (gs2 gs)) s') \ rf_sr)" + by (simp add: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def typ_heap_simps' + carch_state_relation_def cmachine_state_relation_def + packed_heap_update_collapse_hrs) + +lemmas rf_sr_tcb_update_no_queue_gen2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue_gen, simplified] + +lemma tcb_queue_prepend_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueuePrepend queue tcbPtr) (Call tcb_queue_prepend_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply fastforce + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueHead queue)) s + \ head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)}" + and R="\head_C cqueue = option_to_ctcb_ptr (tcbQueueHead queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply clarsimp + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma tcb_queue_append_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None) + \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s) + \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueAppend queue tcbPtr) (Call tcb_queue_append_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit lift: tcb_') + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + in ccorres_gen_asm2) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="?abs" + and R'="\\queue = cqueue\" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=ctcb_queue_relation and xf'=queue_' in ccorres_split_nothrow) + apply (rule_tac Q="?abs" + and Q'="\s'. queue_' s' = cqueue" + in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply clarsimp + apply (rule ccorres_return[where R=\]) + apply (rule conseqPre, vcg) + apply (fastforce simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_seq_skip'[THEN iffD1]) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def) + apply (clarsimp simp: ctcb_relation_def option_to_ctcb_ptr_def split: if_splits) + apply ceqv + apply simp + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_Guard) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr + \ ko_at' tcb (the (tcbQueueEnd queue)) s + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)}" + and R="\end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd queue)\" + in threadSet_ccorres_lemma4[where P=\ and P'=\]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce intro!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' tcb_cte_cases_def cteSizeBits_def + ctcb_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + apply fastforce + apply ceqv + apply (rule ccorres_return_Skip') + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply ceqv + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply wpsimp + apply vcg + apply (vcg exspec=tcb_queue_empty_modifies) + apply clarsimp + apply (frule (1) tcb_at_h_t_valid) + by (force dest: tcb_at_h_t_valid + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def tcbQueueEmpty_def) + +lemma getQueue_ccorres: + "ccorres ctcb_queue_relation queue_' + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (getQueue tdom prio) (\queue :== \ksReadyQueues.[unat \idx])" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: getQueue_def gets_def get_def bind_def return_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + done + +lemma setQueue_ccorres: + "ctcb_queue_relation queue cqueue \ + ccorres dc xfdc + (K (tdom \ maxDomain \ prio \ maxPriority)) + \\idx = word_of_nat (cready_queues_index_to_C tdom prio)\ hs + (setQueue tdom prio queue) + (Basic (\s. globals_update + (ksReadyQueues_'_update + (\_. Arrays.update (ksReadyQueues_' (globals s)) (unat (idx_' s)) cqueue)) s))" + apply (rule ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: setQueue_def get_def modify_def put_def bind_def) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + carch_state_relation_def cmachine_state_relation_def) + apply (frule (1) cready_queues_index_to_C_in_range) + apply (clarsimp simp: unat_of_nat_eq cready_queues_relation_def) + apply (frule cready_queues_index_to_C_distinct, assumption+) + apply (frule_tac qdom=d and prio=p in cready_queues_index_to_C_in_range) + apply fastforce + apply clarsimp + done + +crunch (empty_fail) empty_fail[wp]: isRunnable + lemma tcbSchedEnqueue_ccorres: "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - hs - (tcbSchedEnqueue t) - (Call tcbSchedEnqueue_'proc)" + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedEnqueue t) (Call tcbSchedEnqueue_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] note invert_prioToL1Index_c_simp[simp] @@ -1022,24 +885,12 @@ proof - show ?thesis apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" - in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac runnable) + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1047,245 +898,244 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - (* queue is empty, set t to be new queue *) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (subgoal_tac - "head_C (ksReadyQueues_' (globals x) - .[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL; simp add: valid_queues_valid_q) - apply (subgoal_tac - "end_C (ksReadyQueues_' (globals x) - .[cready_queues_index_to_C (tcbDomain tcb) (tcbPriority tcb)]) = NULL") - prefer 2 - apply (frule_tac s=\ in tcb_queue'_head_end_NULL[symmetric]; simp add: valid_queues_valid_q) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (frule maxDomain_le_unat_ucast_explicit) - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - - apply (rule conjI) - apply (clarsimp simp: l2BitmapSize_def' wordRadix_def c_invert_assist) - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - apply (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def typ_heap_simps c_guard_clift - elim: obj_at'_weaken)+ - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply clarsimp - apply (rule conjI; clarsimp simp: queue_in_range) - (* invalid, disagreement between C and Haskell on emptiness of queue *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (drule tcb_queue_relation'_empty_ksReadyQueues; simp add: valid_queues_valid_q) - (* queue was not empty, add t to queue and leave bitmaps alone *) - apply (drule (1) obj_at_cslift_tcb) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def) - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply clarsimp - apply (frule_tac t=\ in tcb_queue_relation_qhead_mem') - apply (simp add: valid_queues_valid_q) - apply (frule(1) tcb_queue_relation_qhead_valid') - apply (simp add: valid_queues_valid_q) - apply (clarsimp simp: typ_heap_simps h_t_valid_clift_Some_iff numPriorities_def - cready_queues_index_to_C_def2) - apply (drule_tac qhead'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedEnqueue_update, - simp_all add: valid_queues_valid_q)[1] + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - del: fun_upd_restrict_conv - cong: if_cong - split del: if_split)[1] - apply simp - apply (rule conjI) + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_prepend_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 4 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule (1) valid_queues_obj_at'D) - apply clarsimp - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (simp add: fpu_state_preservation[OF _ h_t_valid_clift] typ_heap_simps') - apply (simp add: typ_heap_simps c_guard_clift) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - apply (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) - done + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2) + done qed -lemmas tcbSchedDequeue_update - = tcbDequeue_update[where tn=tcbSchedNext_C and tn_update=tcbSchedNext_C_update - and tp=tcbSchedPrev_C and tp_update=tcbSchedPrev_C_update, - simplified] - -lemma tcb_queue_relation_prev_next: - "\ tcb_queue_relation tn tp mp queue qprev qhead; - tcbp \ set queue; distinct (ctcb_ptr_to_tcb_ptr qprev # queue); - \t \ set queue. tcb_at' t s; qprev \ tcb_Ptr 0 \ mp qprev \ None; - mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp tcb \ tcb_Ptr 0 \ (tp tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ tp tcb = qprev) - \ mp (tp tcb) \ None \ tp tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp tcb)" - apply (induct queue arbitrary: qprev qhead) - apply simp - apply simp - apply (erule disjE) - apply clarsimp - apply (case_tac "queue") - apply clarsimp - apply clarsimp - apply (rule conjI) - apply clarsimp - apply clarsimp - apply (drule_tac f=ctcb_ptr_to_tcb_ptr in arg_cong[where y="tp tcb"], simp) - apply clarsimp - apply fastforce - done - -lemma tcb_queue_relation_prev_next': - "\ tcb_queue_relation' tn tp mp queue qhead qend; tcbp \ set queue; distinct queue; - \t \ set queue. tcb_at' t s; mp (tcb_ptr_to_ctcb_ptr tcbp) = Some tcb \ - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tn tcb) \ None \ tn tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tp tcb \ tcb_Ptr 0 \ tp tcb \ tcb_ptr_to_ctcb_ptr ` set queue - \ mp (tp tcb) \ None \ tp tcb \ tcb_ptr_to_ctcb_ptr tcbp) - \ (tn tcb \ tcb_Ptr 0 \ tn tcb \ tp tcb)" - apply (clarsimp simp: tcb_queue_relation'_def split: if_split_asm) - apply (drule(1) tcb_queue_relation_prev_next, simp_all) - apply (fastforce dest: tcb_at_not_NULL) - apply clarsimp - done - -(* L1 bitmap only updated if L2 entry bits end up all zero *) -lemma rf_sr_drop_bitmaps_dequeue_helper_L2: - "\ (\,\') \ rf_sr ; - cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) - -lemma rf_sr_drop_bitmaps_dequeue_helper: - "\ (\,\') \ rf_sr ; - cbitmap_L1_relation ksqL1upd' ksqL1upd ; cbitmap_L2_relation ksqL2upd' ksqL2upd \ - \ -((\\ksReadyQueues := ksqupd, - ksReadyQueuesL2Bitmap := ksqL2upd, - ksReadyQueuesL1Bitmap := ksqL1upd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueuesL2Bitmap_' := ksqL2upd', - ksReadyQueuesL1Bitmap_' := ksqL1upd', - ksReadyQueues_' := ksqupd'\\) - \ rf_sr) - = -((\\ksReadyQueues := ksqupd\, - \'\idx_' := i', - queue_' := queue_upd', - globals := globals \' - \ksReadyQueues_' := ksqupd'\\) \ rf_sr) -" - unfolding rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def - by (clarsimp simp: rf_sr_cbitmap_L1_relation rf_sr_cbitmap_L2_relation) +lemma tcbSchedAppend_ccorres: + "ccorres dc xfdc + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedAppend t) (Call tcbSchedAppend_'proc)" +proof - + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] + (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the + shape of the proof compared to when numDomains > 1 *) + note word_less_1[simp del] + show ?thesis + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule ccorres_symb_exec_l) + apply (rule ccorres_assert) + apply (thin_tac "runnable") + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def unless_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_rhs_assoc2)+ + apply (simp only: bind_assoc[symmetric]) + apply (rule ccorres_split_nothrow_novcg_dc) + prefer 2 + apply (rule ccorres_move_c_guard_tcb) + apply (simp only: dc_def[symmetric]) + apply ctac + apply (rule ccorres_rhs_assoc)+ + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rename_tac queue cqueue) + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue)" + and R="\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)" + and R'="{s'. queue_' s' = cqueue}" + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_cond[where R=\]) + apply (fastforce dest!: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (ctac add: addToBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply ceqv + apply (ctac add: tcb_queue_append_ccorres) + apply (rule ccorres_Guard) + apply (rule setQueue_ccorres) + apply fastforce + apply wpsimp + apply (vcg exspec=tcb_queue_prepend_modifies) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + apply (vcg exspec=addToBitmap_modifies) + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) + apply clarsimp + apply vcg + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply wpsimp + apply (wpsimp wp: isRunnable_wp) + apply wpsimp + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (frule (1) obj_at_cslift_tcb) + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (frule (3) obj_at'_tcbQueueEnd_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (rule conjI) + apply (clarsimp simp: maxDomain_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (clarsimp simp: word_less_nat_alt cready_queues_index_to_C_def2 tcbQueueEmpty_def) + done +qed (* FIXME same proofs as bit_set, maybe can generalise? *) lemma cbitmap_L1_relation_bit_clear: @@ -1302,27 +1152,6 @@ lemma cbitmap_L1_relation_bit_clear: invertL1Index_def l2BitmapSize_def' le_maxDomain_eq_less_numDomains word_le_nat_alt num_domains_index_updates) -lemma cready_queues_relation_empty_queue_helper: - "\ tcbDomain ko \ maxDomain ; tcbPriority ko \ maxPriority ; - cready_queues_relation (cslift \') (ksReadyQueues_' (globals \')) (ksReadyQueues \)\ - \ - cready_queues_relation (cslift \') - (Arrays.update (ksReadyQueues_' (globals \')) (unat (tcbDomain ko) * 256 + unat (tcbPriority ko)) - (tcb_queue_C.end_C_update (\_. NULL) - (head_C_update (\_. NULL) - (ksReadyQueues_' (globals \').[unat (tcbDomain ko) * 256 + unat (tcbPriority ko)])))) - ((ksReadyQueues \)((tcbDomain ko, tcbPriority ko) := []))" - unfolding cready_queues_relation_def Let_def - using maxPrio_to_H[simp] maxDom_to_H[simp] - apply clarsimp - apply (frule (1) cready_queues_index_to_C_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (fold cready_queues_index_to_C_def[simplified numPriorities_def]) - apply (case_tac "qdom = tcbDomain ko", - simp_all add: prio_and_dom_limit_helpers seL4_MinPrio_def - minDom_def) - apply (fastforce simp: cready_queues_index_to_C_in_range simp: cready_queues_index_to_C_distinct)+ - done - lemma cbitmap_L2_relationD: "\ cbitmap_L2_relation cbitmap2 abitmap2 ; d \ maxDomain ; i < l2BitmapSize \ \ cbitmap2.[unat d].[i] = abitmap2 (d, i)" @@ -1352,15 +1181,10 @@ lemma cbitmap_L2_relation_bit_clear: apply (case_tac "da = d" ; clarsimp simp: num_domains_index_updates) done -lemma tcbSchedDequeue_ccorres': +lemma removeFromBitmap_ccorres: "ccorres dc xfdc - ((\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p))) - and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" + (K (tdom \ maxDomain \ prio \ maxPriority)) (\\dom = ucast tdom\ \ \\prio = ucast prio\) hs + (removeFromBitmap tdom prio) (Call removeFromBitmap_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] @@ -1369,452 +1193,290 @@ proof - shape of the proof compared to when numDomains > 1 *) include no_less_1_simps - have ksQ_tcb_at': "\s ko d p. - \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct (ksReadyQueues s (d, p)) \ - \t\set (ksReadyQueues s (d, p)). tcb_at' t s" - by (fastforce dest: spec elim: obj_at'_weakenE) - - have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < 4" + have invert_l1_index_limit: "\p. invertL1Index (prioToL1Index p) < l2BitmapSize" unfolding invertL1Index_def l2BitmapSize_def' prioToL1Index_def by simp show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" - in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) + supply if_split[split del] + (* pull out static assms *) + apply simp + apply (rule ccorres_grab_asm[where P=\, simplified]) + apply (cinit lift: dom_' prio_') + apply clarsimp + apply csymbr + apply csymbr + (* we can clear up all C guards now *) + apply (clarsimp simp: maxDomain_le_unat_ucast_explicit word_and_less') + apply (simp add: invert_prioToL1Index_c_simp word_less_nat_alt) + apply (simp add: invert_l1_index_limit[simplified l2BitmapSize_def']) + apply ccorres_rewrite + (* handle L2 update *) + apply (rule_tac ccorres_split_nothrow_novcg_dc) + apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) + apply (rule allI, rule conseqPre, vcg) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L2_relation) + apply (erule cbitmap_L2_relation_update) + apply (erule (1) cbitmap_L2_relation_bit_clear) + (* the check on the C side is identical to checking the L2 entry, rewrite the condition *) + apply (simp add: getReadyQueuesL2Bitmap_def) + apply (rule ccorres_symb_exec_l3, rename_tac l2) + apply (rule_tac C'="{s. l2 = 0}" + and Q="\s. l2 = ksReadyQueuesL2Bitmap s (tdom, invertL1Index (prioToL1Index prio))" + in ccorres_rewrite_cond_sr[where Q'=UNIV]) + apply clarsimp + apply (frule rf_sr_cbitmap_L2_relation) + apply (clarsimp simp: cbitmap_L2_relationD invert_l1_index_limit split: if_split) + (* unset L1 bit when L2 entry is empty *) + apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply (clarsimp simp: simpler_gets_def get_def modify_def + put_def bind_def return_def bitmap_fun_defs) + apply (frule rf_sr_cbitmap_L1_relation) + apply (erule cbitmap_L1_relation_update) + apply (erule (1) cbitmap_L1_relation_bit_clear) + apply wpsimp+ + apply (fastforce simp: guard_is_UNIV_def) + apply clarsimp + done +qed + +lemma ctcb_ptr_to_tcb_ptr_option_to_ctcb_ptr[simp]: + "ctcb_ptr_to_tcb_ptr (option_to_ctcb_ptr (Some ptr)) = ptr" + by (clarsimp simp: option_to_ctcb_ptr_def) + +lemma tcb_queue_remove_ccorres: + "ccorres ctcb_queue_relation ret__struct_tcb_queue_C_' + (\s. tcb_at' tcbPtr s \ valid_objs' s + \ (tcbQueueHead queue \ None \ tcbQueueEnd queue \ None)) + (\ctcb_queue_relation queue \queue\ \ \\tcb = tcb_ptr_to_ctcb_ptr tcbPtr\) hs + (tcbQueueRemove queue tcbPtr) (Call tcb_queue_remove_'proc)" + (is "ccorres _ _ ?abs _ _ _ _") + supply if_split[split del] + apply (cinit' lift: tcb_') + apply (rename_tac tcb') + apply (simp only: tcbQueueRemove_def) + \ \cinit is not able to lift queue_' because queue_' is later modified in the C program\ + apply (rule_tac xf'=queue_' in ccorres_abstract, ceqv, rename_tac cqueue) + apply (rule_tac P="ctcb_queue_relation queue cqueue" in ccorres_gen_asm2) + apply (rule ccorres_pre_getObject_tcb, rename_tac tcb) + apply (rule ccorres_symb_exec_l, rename_tac beforePtrOpt) + apply (rule ccorres_symb_exec_l, rename_tac afterPtrOpt) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="before___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr beforePtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedPrev tcb = beforePtrOpt)" + and R'=UNIV + in ccorres_symb_exec_r_known_rv) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="(\s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) - \ distinct(ksReadyQueues s (d, p))) - and valid_queues' and obj_at' (inQ rva rvb) t - and (\s. rva \ maxDomain \ rvb \ maxPriority)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs when_def - null_def) - - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule(1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" in rf_sr_sched_queue_relation) - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (frule_tac s=\ in tcb_queue_relation_prev_next'; (fastforce simp: ksQ_tcb_at')?) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (intro conjI; - clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift)+ - apply (drule(2) filter_empty_unfiltered_contr, simp)+ - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_clear) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (clarsimp simp: cbitmap_L2_relation_def - word_size prioToL1Index_def wordRadix_def mask_def - word_le_nat_alt - numPriorities_def wordBits_def l2BitmapSize_def' - invertL1Index_def numDomains_less_numeric_explicit) - apply (case_tac "d = tcbDomain ko" - ; fastforce simp: le_maxDomain_eq_less_numDomains - numDomains_less_numeric_explicit) - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by ((fastforce simp: ksQ_tcb_at')+) - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst (asm) num_domains_index_updates) - subgoal by (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - apply (simp add: invert_l1_index_limit) - - apply (frule rf_sr_cbitmap_L2_relation) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - apply (fastforce simp: l2BitmapSize_def' invert_l1_index_limit) - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - (* impossible case *) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (drule(2) filter_empty_unfiltered_contr, fastforce) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply fold_subgoals[2] - apply (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next', assumption) - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (rule conjI; clarsimp) - apply (simp add: typ_heap_simps) - apply (clarsimp simp: h_t_valid_c_guard [OF h_t_valid_field, OF h_t_valid_clift] - h_t_valid_field[OF h_t_valid_clift] h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - - apply (fastforce simp: tcb_null_sched_ptrs_def typ_heap_simps c_guard_clift - elim: obj_at'_weaken)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split, - simp_all add: typ_heap_simps')[1] - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - subgoal by (simp add: fpu_state_preservation[OF _ h_t_valid_clift] typ_heap_simps') - subgoal by fastforce + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac xf'="after___ptr_to_struct_tcb_C_'" + and val="option_to_ctcb_ptr afterPtrOpt" + and R="ko_at' tcb tcbPtr and K (tcbSchedNext tcb = afterPtrOpt)" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: obj_at_cslift_tcb simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_cond_seq) + apply (rule ccorres_cond[where R="?abs"]) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply (fastforce intro: ccorres_return_C') + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) apply clarsimp - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems - by (fastforce dest!: tcb_queue_relation'_empty_ksReadyQueues - elim: obj_at'_weaken)+ - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by - (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def - clift_heap_update_same[OF h_t_valid_clift] - fpu_state_preservation[OF _ h_t_valid_clift])+ - apply (rule conjI; clarsimp simp: queue_in_range[simplified maxDom_to_H maxPrio_to_H]) - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) apply fastforce - prefer 3 - apply fastforce - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (drule(2) filter_empty_unfiltered_contr[simplified filter_noteq_op], simp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* impossible case, C L2 update disagrees with Haskell update *) - apply (subst (asm) num_domains_index_updates) - apply (simp add: le_maxDomain_eq_less_numDomains word_le_nat_alt) - apply (subst (asm) Arrays.index_update) - subgoal using invert_l1_index_limit - by (fastforce simp add: invert_prioToL1Index_c_simp intro: nat_Suc_less_le_imp) - apply (frule rf_sr_cbitmap_L2_relation) - apply (simp add: invert_prioToL1Index_c_simp) - apply (drule_tac i="invertL1Index (prioToL1Index (tcbPriority ko))" - in cbitmap_L2_relationD, assumption) - subgoal by (simp add: invert_l1_index_limit l2BitmapSize_def') - apply (fastforce simp: prioToL1Index_def invertL1Index_def mask_def wordRadix_def) - - apply (simp add: invert_prioToL1Index_c_simp) - apply (subst rf_sr_drop_bitmaps_dequeue_helper_L2, assumption) - subgoal by (fastforce dest: rf_sr_cbitmap_L2_relation elim!: cbitmap_L2_relation_bit_clear) - - (* trivial case, setting queue to empty *) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def - cmachine_state_relation_def) - apply (erule (2) cready_queues_relation_empty_queue_helper) - - apply (frule (1) valid_queuesD') - apply (drule (1) obj_at_cslift_tcb, clarsimp simp: inQ_def) - apply (frule_tac d="tcbDomain ko" and p="tcbPriority ko" - in rf_sr_sched_queue_relation) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce simp: maxDom_to_H maxPrio_to_H)+ - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (frule_tac s=\ in tcb_queue_relation_prev_next') - apply (fastforce simp add: ksQ_tcb_at')+ - apply (drule_tac s=\ in tcbSchedDequeue_update, assumption, - simp_all add: remove1_filter ksQ_tcb_at')[1] - apply (clarsimp simp: filter_noteq_op upd_unless_null_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI, clarsimp) - apply (clarsimp simp: h_val_field_clift' - h_t_valid_clift[THEN h_t_valid_field] h_t_valid_clift) - apply (clarsimp simp: typ_heap_simps) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (clarsimp simp: typ_heap_simps) - apply (fastforce simp: typ_heap_simps) - apply (fastforce simp: tcb_null_sched_ptrs_def) - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[4] - subgoal premises prems using prems - by - (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def - clift_heap_update_same[OF h_t_valid_clift] - fpu_state_preservation[OF _ h_t_valid_clift])+ - apply (clarsimp) - apply (rule conjI; clarsimp simp: typ_heap_simps) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (rule conjI; clarsimp) - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - (* invalid, missing bitmap updates on haskell side *) - apply (drule tcb_queue_relation'_empty_ksReadyQueues) - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems by (fastforce elim: obj_at'_weaken)+ - apply (erule_tac S="set (ksReadyQueues \ (tcbDomain ko, tcbPriority ko))" - in state_relation_queue_update_helper', - (simp | rule globals.equality)+, - simp_all add: clift_field_update if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 typ_heap_simps - maxDom_to_H maxPrio_to_H - cong: if_cong split del: if_split)[1] - apply (fold_subgoals (prefix))[2] - subgoal premises prems using prems - by (fastforce simp: typ_heap_simps c_guard_clift tcb_null_sched_ptrs_def)+ - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) - apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def valid_tcb'_def inQ_def) -qed - -lemma tcbSchedDequeue_ccorres: - "ccorres dc xfdc - (valid_queues and valid_queues' and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedDequeue t) - (Call tcbSchedDequeue_'proc)" - apply (rule ccorres_guard_imp [OF tcbSchedDequeue_ccorres']) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp)+ - done - -lemma tcb_queue_relation_append: - "\ tcb_queue_relation tn tp mp queue qprev qhead; queue \ []; - qend' \ tcb_ptr_to_ctcb_ptr ` set queue; mp qend' = Some tcb; - queue = queue' @ [ctcb_ptr_to_tcb_ptr qend]; distinct queue; - \x \ set queue. tcb_ptr_to_ctcb_ptr x \ NULL; qend' \ NULL; - \v f g. tn (tn_update f v) = f (tn v) \ tp (tp_update g v) = g (tp v) - \ tn (tp_update f v) = tn v \ tp (tn_update g v) = tp v \ - \ tcb_queue_relation tn tp - (mp (qend \ tn_update (\_. qend') (the (mp qend)), - qend' \ tn_update (\_. NULL) (tp_update (\_. qend) tcb))) - (queue @ [ctcb_ptr_to_tcb_ptr qend']) qprev qhead" - using [[hypsubst_thin = true]] - apply clarsimp - apply (induct queue' arbitrary: qprev qhead) - apply clarsimp - apply clarsimp - done - -lemma tcbSchedAppend_update: - assumes sr: "sched_queue_relation' mp queue qhead qend" - and qh': "qend' \ tcb_ptr_to_ctcb_ptr ` set queue" - and cs_tcb: "mp qend' = Some tcb" - and valid_ep: "\t\set queue. tcb_at' t s" "distinct queue" - and qhN: "qend' \ NULL" - shows - "sched_queue_relation' - (upd_unless_null qend (tcbSchedNext_C_update (\_. qend') (the (mp qend))) - (mp(qend' \ tcb\tcbSchedNext_C := NULL, tcbSchedPrev_C := qend\))) - (queue @ [ctcb_ptr_to_tcb_ptr qend']) (if queue = [] then qend' else qhead) qend'" - using sr qh' valid_ep cs_tcb qhN - apply - - apply (rule rev_cases[where xs=queue]) - apply (simp add: tcb_queue_relation'_def upd_unless_null_def) - apply (clarsimp simp: tcb_queue_relation'_def upd_unless_null_def tcb_at_not_NULL) - apply (drule_tac qend'=qend' and tn_update=tcbSchedNext_C_update - and tp_update=tcbSchedPrev_C_update and qend="tcb_ptr_to_ctcb_ptr y" - in tcb_queue_relation_append, simp_all) - apply (fastforce simp add: tcb_at_not_NULL) - apply (simp add: fun_upd_twist) - done - -lemma tcb_queue_relation_qend_mems: - "\ tcb_queue_relation' getNext getPrev mp queue qhead qend; - \x \ set queue. tcb_at' x s \ - \ (qend = NULL \ queue = []) - \ (qend \ NULL \ ctcb_ptr_to_tcb_ptr qend \ set queue)" - apply (clarsimp simp: tcb_queue_relation'_def) - apply (drule bspec, erule last_in_set) - apply (simp add: tcb_at_not_NULL) + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply (rule ccorres_cond_seq) + apply (rule_tac Q="?abs" and Q'=\ in ccorres_cond_both') + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def split: if_splits) + apply clarsimp + apply (rule ccorres_assert2) + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (fastforce intro: ccorres_return_C') + apply vcg + apply (rule conseqPre, vcg) + apply clarsimp + apply wpsimp + apply vcg + apply wpsimp + apply vcg + apply clarsimp + apply (rule ccorres_assert2)+ + apply (rule ccorres_rhs_assoc)+ + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the beforePtrOpt) s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb)+ + apply (rule_tac P=\ and P'="tcb_at' tcbPtr" + and Q="\s tcb'. {s'. (s, s') \ rf_sr \ ko_at' tcb' (the afterPtrOpt) s}" + in threadSet_ccorres_lemma3) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply clarsimp + apply ceqv + apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) + apply (rule ccorres_move_c_guard_tcb) + apply (rule_tac Q="\s tcb. {s'. (s, s') \ rf_sr \ ko_at' tcb tcbPtr s}" + in threadSet_ccorres_lemma3[where P=\ and P'=\, simplified]) + apply (rule conseqPre, vcg) + apply clarsimp + apply (frule (1) obj_at_cslift_tcb[where thread=tcbPtr]) + apply (fastforce elim!: rf_sr_tcb_update_no_queue_gen2 + simp: typ_heap_simps' ctcb_relation_def option_to_ctcb_ptr_def + tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply ceqv + apply (fastforce intro: ccorres_return_C') + apply (wpsimp | vcg)+ + apply (clarsimp split: if_splits) + apply normalise_obj_at' + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + by (intro conjI impI; + clarsimp simp: ctcb_queue_relation_def typ_heap_simps option_to_ctcb_ptr_def + valid_tcb'_def valid_bound_tcb'_def) + +lemma tcbQueueRemove_tcb_at'_head: + "\\s. valid_objs' s \ (\head. tcbQueueHead queue = Some head \ tcb_at' head s)\ + tcbQueueRemove queue t + \\rv s. \ tcbQueueEmpty rv \ tcb_at' (the (tcbQueueHead rv)) s\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp haskell_assert_wp hoare_vcg_imp_lift') + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def valid_bound_tcb'_def tcbQueueEmpty_def obj_at'_def) done -lemma tcbSchedAppend_ccorres: +lemma tcbSchedDequeue_ccorres: "ccorres dc xfdc - (valid_queues and tcb_at' t and valid_objs') - (UNIV \ \\tcb = tcb_ptr_to_ctcb_ptr t\) - [] - (tcbSchedAppend t) - (Call tcbSchedAppend_'proc)" + (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct') + \\tcb = tcb_ptr_to_ctcb_ptr t\ hs + (tcbSchedDequeue t) (Call tcbSchedDequeue_'proc)" proof - note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) include no_less_1_simps show ?thesis - apply (cinit lift: tcb_') - apply (rule_tac r'="\rv rv'. rv = to_bool rv'" - and xf'="ret__unsigned_longlong_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (simp add: when_def unless_def del: Collect_const split del: if_split) - apply (rule ccorres_cond[where R=\]) - apply (simp add: to_bool_def) - apply (rule ccorres_rhs_assoc)+ - apply csymbr - apply csymbr - apply csymbr - apply csymbr - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="dom_'" in ccorres_split_nothrow) - apply (rule threadGet_vcg_corres) - apply (rule allI, rule conseqPre, vcg) - apply clarsimp - apply (drule obj_at_ko_at', clarsimp) - apply (drule spec, drule(1) mp, clarsimp) - apply (clarsimp simp: typ_heap_simps ctcb_relation_def) - apply ceqv - apply (rule_tac r'="\rv rv'. rv' = ucast rv" - and xf'="prio_'" in ccorres_split_nothrow) + apply (cinit lift: tcb_') + apply (rule ccorres_stateAssert)+ + apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'="ret__unsigned_longlong_'" + in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (simp add: when_def del: Collect_const split del: if_split) + apply (rule ccorres_cond[where R=\]) + apply (simp add: to_bool_def) + apply (rule ccorres_rhs_assoc)+ + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply csymbr + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="dom_'" in ccorres_split_nothrow) apply (rule threadGet_vcg_corres) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1822,130 +1484,80 @@ proof - apply (drule spec, drule(1) mp, clarsimp) apply (clarsimp simp: typ_heap_simps ctcb_relation_def) apply ceqv - apply (rule ccorres_rhs_assoc2)+ - apply (simp only: bind_assoc[symmetric]) - apply (rule ccorres_split_nothrow_novcg_dc) - prefer 2 - apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) - apply ctac - prefer 2 - apply (wp, clarsimp, wp+) - apply (rule_tac P="\s. valid_queues s \ (\p. t \ set (ksReadyQueues s p)) - \ (\tcb. ko_at' tcb t s \ tcbDomain tcb =rva - \ tcbPriority tcb = rvb \ valid_tcb' tcb s)" - and P'=UNIV in ccorres_from_vcg) - apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: getQueue_def gets_def get_def setQueue_def modify_def - put_def bind_def return_def bitmap_fun_defs null_def) - apply (clarsimp simp: queue_in_range valid_tcb'_def) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (simp add: invert_prioToL1Index_c_simp) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp) - apply (rule conjI) - apply (fastforce simp: c_invert_assist l2BitmapSize_def' wordRadix_def) - apply (simp add: t_hrs_ksReadyQueues_upd_absorb) - apply (subst rf_sr_drop_bitmaps_enqueue_helper, assumption) - apply (fastforce intro: cbitmap_L1_relation_bit_set) - subgoal by (fastforce intro: cbitmap_L2_relation_bit_set simp: wordRadix_def mask_def) - apply (erule(1) state_relation_queue_update_helper[where S="{t}"], - (simp | rule globals.equality)+, - simp_all add: cready_queues_index_to_C_def2 numPriorities_def - t_hrs_ksReadyQueues_upd_absorb upd_unless_null_def - typ_heap_simps)[1] - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken) - apply (fastforce simp: tcb_null_sched_ptrs_def elim: obj_at'_weaken) - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (rule conjI, solves \clarsimp simp: le_maxDomain_eq_less_numDomains - unat_trans_ucast_helper\) - apply (clarsimp simp: maxDomain_le_unat_ucast_explicit) - apply (rule conjI; clarsimp simp: queue_in_range) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp - apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] + apply (rule_tac r'="\rv rv'. rv' = ucast rv" and xf'="prio_'" in ccorres_split_nothrow) + apply (rule threadGet_vcg_corres) + apply (rule allI, rule conseqPre, vcg) apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (clarsimp simp: upd_unless_null_def cready_queues_index_to_C_def numPriorities_def) - apply (drule (1) obj_at_cslift_tcb) - apply clarsimp - apply (frule_tac d="tcbDomain tcb" and p="tcbPriority tcb" - in rf_sr_sched_queue_relation) - apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (drule spec, drule(1) mp, clarsimp) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def) + apply ceqv + apply (rule ccorres_symb_exec_r) + apply (rule ccorres_Guard_Seq) + apply (simp add: bind_assoc) + apply (ctac add: getQueue_ccorres) + apply (rule_tac r'=ctcb_queue_relation and xf'=new_queue_' in ccorres_split_nothrow) + apply (ctac add: tcb_queue_remove_ccorres) + apply ceqv + apply (rename_tac queue' newqueue) + apply (rule ccorres_Guard_Seq) + apply (ctac add: setQueue_ccorres) + apply (rule ccorres_split_nothrow_novcg_dc) + apply ctac + apply (rule_tac xf'=ret__unsigned_long_' + and val="from_bool (tcbQueueEmpty queue')" + and R="\s. \ tcbQueueEmpty queue' \ tcb_at' (the (tcbQueueHead queue')) s" + in ccorres_symb_exec_r_known_rv[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (fastforce dest: tcb_at_not_NULL + simp: ctcb_queue_relation_def option_to_ctcb_ptr_def + tcbQueueEmpty_def split: option.splits) + apply ceqv + apply (rule ccorres_cond[where R=\]) + apply fastforce + apply (ctac add: removeFromBitmap_ccorres) + apply (rule ccorres_return_Skip) + apply vcg + apply (wpsimp wp: hoare_vcg_imp_lift') + apply (clarsimp simp: guard_is_UNIV_def) + apply (wpsimp wp: hoare_vcg_imp_lift') + apply vcg + apply ((wpsimp wp: tcbQueueRemove_tcb_at'_head | wp (once) hoare_drop_imps)+)[1] + apply (vcg exspec=tcb_queue_remove_modifies) + apply wpsimp + apply vcg + apply vcg + apply (rule conseqPre, vcg) apply clarsimp - apply (frule_tac s=\ in tcb_queue'_head_end_NULL) - apply (simp add: valid_queues_valid_q) - apply (frule_tac s=\ in tcb_queue_relation_qend_mems, - simp add: valid_queues_valid_q) - apply (drule_tac qend'="tcb_ptr_to_ctcb_ptr t" and s=\ in tcbSchedAppend_update, - simp_all add: valid_queues_valid_q)[1] - apply clarsimp - apply (rule tcb_at_not_NULL, erule obj_at'_weakenE, simp) - apply (clarsimp simp: cready_queues_index_to_C_def2 numPriorities_def) - apply (frule(2) obj_at_cslift_tcb[OF valid_queues_obj_at'D]) - apply (clarsimp simp: h_val_field_clift' h_t_valid_clift) - apply (erule_tac S="{t, v}" for v in state_relation_queue_update_helper, - (simp | rule globals.equality)+, - simp_all add: typ_heap_simps if_Some_helper numPriorities_def - cready_queues_index_to_C_def2 upd_unless_null_def - cong: if_cong split del: if_split - del: fun_upd_restrict_conv)[1] - apply simp - apply (rule conjI) - apply clarsimp - apply (drule_tac s="tcb_ptr_to_ctcb_ptr t" in sym, simp) - apply (clarsimp simp add: fun_upd_twist) - prefer 3 - apply (simp add: obj_at'_weakenE[OF _ TrueI]) - apply (rule disjI1, erule valid_queues_obj_at'D) - subgoal by simp - subgoal by simp - subgoal by (fastforce simp: tcb_null_sched_ptrs_def) - apply (simp add: guard_is_UNIV_def) - apply simp - apply (wp threadGet_wp) + apply (wpsimp wp: threadGet_wp) + apply vcg + apply clarsimp + apply (wpsimp wp: threadGet_wp) apply vcg - apply simp - apply (wp threadGet_wp) - apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply simp - apply (wp threadGet_wp) - apply vcg - by (fastforce simp: valid_objs'_def obj_at'_def ran_def projectKOs typ_at'_def - valid_obj'_def inQ_def - dest!: valid_queues_obj_at'D) + apply (rule ccorres_return_Skip) + apply (wpsimp wp: threadGet_wp) + apply (vcg expsec=thread_state_get_tcbQueued_modifies) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule (1) obj_at_cslift_tcb) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def) + apply (cut_tac qdom="tcbDomain tcb" and prio="tcbPriority tcb" + in cready_queues_index_to_C_in_range) + apply fastforce + apply fastforce + apply (rule conjI) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (force dest!: tcbQueueHead_iff_tcbQueueEnd simp: tcbQueueEmpty_def obj_at'_def) + by (fastforce simp: word_less_nat_alt + cready_queues_index_to_C_def2 ctcb_relation_def + typ_heap_simps le_maxDomain_eq_less_numDomains(2) unat_trans_ucast_helper) qed -lemma true_eq_from_bool [simp]: - "(scast true = from_bool P) = P" - by (simp add: true_def from_bool_def split: bool.splits) - lemma isStopped_spec: "\s. \ \ ({s} \ {s. cslift s (thread_' s) \ None}) Call isStopped_'proc {s'. ret__unsigned_long_' s' = from_bool (tsType_CL (thread_state_lift (tcbState_C (the (cslift s (thread_' s))))) \ @@ -1991,8 +1603,11 @@ lemma tcb_at_1: done lemma rescheduleRequired_ccorres: - "ccorres dc xfdc (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorres dc xfdc + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' + and pspace_aligned' and pspace_distinct') + UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -2102,10 +1717,12 @@ lemma cguard_UNIV: by fastforce lemma lookupBitmapPriority_le_maxPriority: - "\ ksReadyQueuesL1Bitmap s d \ 0 ; valid_queues s \ + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + \d p. d > maxDomain \ p > maxPriority \ tcbQueueEmpty (ksReadyQueues s (d, p)); + valid_bitmaps s \ \ lookupBitmapPriority d s \ maxPriority" - unfolding valid_queues_def valid_queues_no_bitmap_def - by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) + apply (clarsimp simp: valid_bitmaps_def) + by (fastforce dest!: bitmapQ_from_bitmap_lookup bitmapQ_ksReadyQueuesI intro: ccontr) lemma rf_sr_ksReadyQueuesL1Bitmap_not_zero: "\ (\, s') \ rf_sr ; d \ maxDomain ; ksReadyQueuesL1Bitmap_' (globals s').[unat d] \ 0 \ @@ -2115,10 +1732,10 @@ lemma rf_sr_ksReadyQueuesL1Bitmap_not_zero: done lemma ksReadyQueuesL1Bitmap_word_log2_max: - "\valid_queues s; ksReadyQueuesL1Bitmap s d \ 0\ - \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" - unfolding valid_queues_def - by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) + "\valid_bitmaps s; ksReadyQueuesL1Bitmap s d \ 0\ + \ word_log2 (ksReadyQueuesL1Bitmap s d) < l2BitmapSize" + unfolding valid_bitmaps_def + by (fastforce dest: word_log2_nth_same bitmapQ_no_L1_orphansD) lemma word_log2_max_word64[simp]: "word_log2 (w :: 64 word) < 64" @@ -2126,7 +1743,7 @@ lemma word_log2_max_word64[simp]: by (simp add: word_size) lemma rf_sr_ksReadyQueuesL2Bitmap_simp: - "\ (\, s') \ rf_sr ; d \ maxDomain ; valid_queues \ ; ksReadyQueuesL1Bitmap \ d \ 0 \ + "\ (\, s') \ rf_sr ; d \ maxDomain ; valid_bitmaps \ ; ksReadyQueuesL1Bitmap \ d \ 0 \ \ ksReadyQueuesL2Bitmap_' (globals s').[unat d].[word_log2 (ksReadyQueuesL1Bitmap \ d)] = ksReadyQueuesL2Bitmap \ (d, word_log2 (ksReadyQueuesL1Bitmap \ d))" apply (frule rf_sr_cbitmap_L2_relation) @@ -2135,9 +1752,9 @@ lemma rf_sr_ksReadyQueuesL2Bitmap_simp: done lemma ksReadyQueuesL2Bitmap_nonzeroI: - "\ d \ maxDomain ; valid_queues s ; ksReadyQueuesL1Bitmap s d \ 0 \ + "\ d \ maxDomain ; valid_bitmaps s ; ksReadyQueuesL1Bitmap s d \ 0 \ \ ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d))) \ 0" - unfolding valid_queues_def + unfolding valid_bitmaps_def apply clarsimp apply (frule bitmapQ_no_L1_orphansD) apply (erule word_log2_nth_same) @@ -2330,11 +1947,6 @@ lemma getCurDomain_maxDom_ccorres_dom_': rf_sr_ksCurDomain) done -lemma rf_sr_cscheduler_action_relation: - "(s, s') \ rf_sr - \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" - by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - lemma threadGet_get_obj_at'_has_domain: "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" by (wp threadGet_obj_at') (simp add: obj_at'_def) @@ -2342,16 +1954,15 @@ lemma threadGet_get_obj_at'_has_domain: lemma possibleSwitchTo_ccorres: shows "ccorres dc xfdc - (valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t and (\s. ksCurDomain s \ maxDomain) - and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') ({s. target_' s = tcb_ptr_to_ctcb_ptr t} \ UNIV) [] (possibleSwitchTo t ) (Call possibleSwitchTo_'proc)" supply if_split [split del] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] @@ -2376,7 +1987,7 @@ lemma possibleSwitchTo_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2393,8 +2004,8 @@ lemma possibleSwitchTo_ccorres: lemma scheduleTCB_ccorres': "ccorres dc xfdc - (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_queues - and valid_objs') + (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] (do (runnable, curThread, action) \ do @@ -2407,7 +2018,8 @@ lemma scheduleTCB_ccorres': rescheduleRequired od) (Call scheduleTCB_'proc)" - apply (cinit' lift: tptr_' simp del: word_neq_0_conv) + supply empty_fail_cond[simp] + apply (cinit' lift: tptr_') apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) defer @@ -2428,7 +2040,7 @@ lemma scheduleTCB_ccorres': \ (\t. ksSchedulerAction s = SwitchToThread t \ tcb_at' t s)" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def if_1_0_0 split del: if_split) + apply (clarsimp simp: return_def) apply (clarsimp simp: from_bool_0 rf_sr_ksCurThread) apply (rule conjI) apply (clarsimp simp: st_tcb_at'_def) @@ -2436,32 +2048,35 @@ lemma scheduleTCB_ccorres': apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL split: scheduler_action.split_asm) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp apply (clarsimp simp: st_tcb_at'_def obj_at'_def weak_sch_act_wf_def) done lemma scheduleTCB_ccorres_valid_queues'_pre: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and valid_queues and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs') - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" - apply (cinit' lift: tptr_' simp del: word_neq_0_conv) + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and pspace_aligned' and pspace_distinct') + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] + apply (cinit' lift: tptr_') apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) defer @@ -2489,7 +2104,7 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: apply (drule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def weak_sch_act_wf_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (fold_subgoals (prefix))[6] subgoal premises prems using prems by (clarsimp simp: rf_sr_def cstate_relation_def Let_def @@ -2500,17 +2115,17 @@ lemma scheduleTCB_ccorres_valid_queues'_pre: split: scheduler_action.split_asm) apply wp+ apply (simp add: isRunnable_def isStopped_def) - apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done - lemmas scheduleTCB_ccorres_valid_queues' = scheduleTCB_ccorres_valid_queues'_pre[unfolded bind_assoc return_bind split_conv] lemma rescheduleRequired_ccorres_valid_queues'_simple: - "ccorresG rf_sr \ dc xfdc (valid_queues' and sch_act_simple) UNIV [] rescheduleRequired (Call rescheduleRequired_'proc)" + "ccorresG rf_sr \ dc xfdc + sch_act_simple UNIV [] + rescheduleRequired (Call rescheduleRequired_'proc)" apply cinit apply (rule ccorres_symb_exec_l) apply (rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) @@ -2543,16 +2158,18 @@ lemma rescheduleRequired_ccorres_valid_queues'_simple: split: scheduler_action.split_asm) lemma scheduleTCB_ccorres_valid_queues'_pre_simple: - "ccorresG rf_sr \ dc xfdc (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) - (UNIV \ \\tptr = tcb_ptr_to_ctcb_ptr thread\) [] - (do (runnable, curThread, action) \ do - runnable \ isRunnable thread; - curThread \ getCurThread; - action \ getSchedulerAction; - return (runnable, curThread, action) od; - when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired - od) - (Call scheduleTCB_'proc)" + "ccorresG rf_sr \ dc xfdc + (tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' and sch_act_simple) + \\tptr = tcb_ptr_to_ctcb_ptr thread\ [] + (do (runnable, curThread, action) \ do runnable \ isRunnable thread; + curThread \ getCurThread; + action \ getSchedulerAction; + return (runnable, curThread, action) + od; + when (\ runnable \ curThread = thread \ action = ResumeCurrentThread) rescheduleRequired + od) + (Call scheduleTCB_'proc)" + supply empty_fail_cond[simp] apply (cinit' lift: tptr_' simp del: word_neq_0_conv) apply (rule ccorres_rhs_assoc2)+ apply (rule_tac xf'="ret__int_'" in ccorres_split_nothrow_novcg) @@ -2582,7 +2199,7 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: typ_heap_simps) apply (subgoal_tac "ksSchedulerAction \ = ResumeCurrentThread") apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: "StrictC'_thread_state_defs")[1] + apply (case_tac "tcbState ko", simp_all add: ThreadState_defs)[1] apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def tcb_at_not_NULL @@ -2590,11 +2207,10 @@ lemma scheduleTCB_ccorres_valid_queues'_pre_simple: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cscheduler_action_relation_def) apply wp+ - apply (simp add: isRunnable_def isStopped_def) - apply wp + apply (simp add: isRunnable_def isStopped_def) apply (simp add: guard_is_UNIV_def) apply clarsimp - apply (clarsimp simp: st_tcb_at'_def obj_at'_def valid_queues'_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) done lemmas scheduleTCB_ccorres_valid_queues'_simple @@ -2614,48 +2230,35 @@ lemma threadSet_weak_sch_act_wf_runnable': apply (clarsimp) done -lemma threadSet_valid_queues_and_runnable': "\\s. valid_queues s \ (\p. thread \ set (ksReadyQueues s p) \ runnable' st)\ - threadSet (tcbState_update (\_. st)) thread - \\rv s. valid_queues s\" - apply (wp threadSet_valid_queues) - apply (clarsimp simp: inQ_def) -done - lemma setThreadState_ccorres[corres]: "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues s \ valid_objs' s \ valid_tcb_state' st s \ - (ksSchedulerAction s = SwitchToThread thread \ runnable' st) \ - (\p. thread \ set (ksReadyQueues s p) \ runnable' st) \ - sch_act_wf (ksSchedulerAction s) s) - ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} - \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) hs + (\s. tcb_at' thread s \ valid_objs' s \ valid_tcb_state' st s \ + pspace_aligned' s \ pspace_distinct' s \ + (ksSchedulerAction s = SwitchToThread thread \ runnable' st) \ + sch_act_wf (ksSchedulerAction s) s) + ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) hs (setThreadState st thread) (Call setThreadState_'proc)" - apply (cinit lift: tptr_' cong add: call_ignore_cong) + apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres) - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' - threadSet_valid_objs') + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') by (clarsimp simp: weak_sch_act_wf_def valid_queues_def valid_tcb'_tcbState_update) -lemma threadSet_valid_queues'_and_not_runnable': "\tcb_at' thread and valid_queues' and (\s. (\ runnable' st))\ - threadSet (tcbState_update (\_. st)) thread - \\rv. tcb_at' thread and st_tcb_at' (not runnable') thread and valid_queues' \" - - apply (wp threadSet_valid_queues' threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: pred_neg_def valid_queues'_def inQ_def)+ -done - lemma setThreadState_ccorres_valid_queues': - "ccorres dc xfdc - (\s. tcb_at' thread s \ valid_queues' s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s \ Invariants_H.valid_queues s \ (\p. thread \ set (ksReadyQueues s p)) \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s) + "ccorres dc xfdc + (\s. tcb_at' thread s \ \ runnable' st \ weak_sch_act_wf (ksSchedulerAction s) s + \ sch_act_not thread s \ valid_objs' s \ valid_tcb_state' st s + \ pspace_aligned' s \ pspace_distinct' s) ({s'. (\cl fl. cthread_state_relation_lifted st (cl\tsType_CL := ts_' s' && mask 4\, fl))} - \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] - (setThreadState st thread) (Call setThreadState_'proc)" - apply (cinit lift: tptr_' cong add: call_ignore_cong) + \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr thread}) [] + (setThreadState st thread) (Call setThreadState_'proc)" + apply (cinit lift: tptr_' cong add: call_ignore_cong) apply (ctac (no_vcg) add: threadSet_tcbState_simple_corres) apply (ctac add: scheduleTCB_ccorres_valid_queues') - apply (wp threadSet_valid_queues'_and_not_runnable' threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues_and_runnable' threadSet_valid_objs') - by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs' + threadSet_tcbState_st_tcb_at') + by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) lemma simp_list_case_return: "(case x of [] \ return e | y # ys \ return f) = return (if x = [] then e else f)" @@ -2676,28 +2279,26 @@ lemma cancelSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (ctac (no_vcg) add: cancelSignal_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') - apply ((wp setNotification_nosch setNotification_ksQ hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] - apply (simp add: "StrictC'_thread_state_defs") + apply ((wp setNotification_nosch hoare_vcg_all_lift set_ntfn_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+)[1] + apply (simp add: ThreadState_defs) apply (rule conjI, clarsimp, rule conjI, clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) subgoal by ((auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" + isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ntfn'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] | - clarsimp simp: eq_commute)+) + | clarsimp simp: eq_commute)+) apply (clarsimp) apply (frule (1) ko_at_valid_ntfn'[OF _ invs_valid_objs']) apply (frule (2) ntfn_blocked_in_queueD) by (auto simp: obj_at'_def projectKOs st_tcb_at'_def invs'_def valid_state'_def - isTS_defs cte_wp_at_ctes_of "StrictC'_thread_state_defs" valid_ntfn'_def + isTS_defs cte_wp_at_ctes_of valid_ntfn'_def cthread_state_relation_def sch_act_wf_weak isWaitingNtfn_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: ntfn.splits option.splits | clarsimp simp: eq_commute | drule_tac x=thread in bspec)+ (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2839,8 +2440,8 @@ lemma cpspace_relation_ep_update_an_ep: and pal: "pspace_aligned' s" "pspace_distinct' s" and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat pal rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2862,8 +2463,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2875,15 +2476,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "machine_word" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2962,7 +2563,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2980,7 +2581,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -2996,28 +2597,25 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) - subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - subgoal by simp - apply (erule (1) map_to_ko_atI') - apply (simp add: heap_to_user_data_def Let_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + subgoal by (simp add: cendpoint_relation_def Let_def EPState_Idle_def) + subgoal by simp + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + subgoal by simp + apply (erule (1) map_to_ko_atI') + apply (simp add: heap_to_user_data_def Let_def) subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def fpu_null_state_heap_update_tag_disj_simps global_ioport_bitmap_heap_update_tag_disj_simps packed_heap_update_collapse_hrs - elim!: fpu_null_state_typ_heap_preservation) + elim!: fpu_null_state_typ_heap_preservation) subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) subgoal by (simp add: objBits_simps') @@ -3036,53 +2634,56 @@ lemma cancelIPC_ccorres_helper: cpspace_relation_def update_ep_map_tos typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - subgoal by (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def split: endpoint.splits split del: if_split) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + subgoal by (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def isSendEP_def isRecvEP_def + split: endpoint.splits split del: if_split) \ \recv case\ - apply (subgoal_tac "pspace_canonical' \") - prefer 2 - apply fastforce - apply (clarsimp simp add: h_t_valid_clift_Some_iff ctcb_offset_defs - tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask - tcb_queue_relation'_next_sign tcb_queue_relation'_prev_sign - cong: tcb_queue_relation'_cong) - subgoal by (intro impI conjI; simp) - \ \send case\ - apply (subgoal_tac "pspace_canonical' \") - prefer 2 - apply fastforce - apply (clarsimp simp add: h_t_valid_clift_Some_iff ctcb_offset_defs - tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask - tcb_queue_relation'_next_sign tcb_queue_relation'_prev_sign - cong: tcb_queue_relation'_cong) + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce + apply (clarsimp simp: h_t_valid_clift_Some_iff ctcb_offset_defs + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + tcb_queue_relation'_next_sign tcb_queue_relation'_prev_sign + cong: tcb_queue_relation'_cong) subgoal by (intro impI conjI; simp) - subgoal by simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + \ \send case\ + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce + apply (clarsimp simp: h_t_valid_clift_Some_iff ctcb_offset_defs + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + tcb_queue_relation'_next_sign tcb_queue_relation'_prev_sign + cong: tcb_queue_relation'_cong) + subgoal by (intro impI conjI; simp) + \ \send case\ + apply (subgoal_tac "pspace_canonical' \") + prefer 2 + apply fastforce + apply (clarsimp simp: h_t_valid_clift_Some_iff ctcb_offset_defs + tcb_queue_relation'_next_mask tcb_queue_relation'_prev_mask + tcb_queue_relation'_next_sign tcb_queue_relation'_prev_sign + cong: tcb_queue_relation'_cong) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') subgoal by (clarsimp simp: carch_state_relation_def carch_globals_def fpu_null_state_heap_update_tag_disj_simps global_ioport_bitmap_heap_update_tag_disj_simps packed_heap_update_collapse_hrs - elim!: fpu_null_state_typ_heap_preservation) + elim!: fpu_null_state_typ_heap_preservation) subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) subgoal by (simp add: objBits_simps') subgoal by (simp add: objBits_simps) by assumption -declare empty_fail_get[iff] - lemma getThreadState_ccorres_foo: "(\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c) \ ccorres r xf (\s. \ts. st_tcb_at' ((=) ts) t s \ P ts s) @@ -3135,8 +2736,7 @@ lemma cancelIPC_ccorres1: apply (rule_tac P="rv' = thread_state_to_tsType rv" in ccorres_gen_asm2) apply wpc \ \BlockedOnReceive\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) + apply (simp add: word_sle_def ccorres_cond_iffs cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3152,7 +2752,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del: if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3162,10 +2762,9 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \BlockedOnReply case\ - apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs + apply (simp add: ThreadState_defs ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3205,14 +2804,12 @@ lemma cancelIPC_ccorres1: apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3225,7 +2822,8 @@ lemma cancelIPC_ccorres1: apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3234,10 +2832,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3253,7 +2853,7 @@ lemma cancelIPC_ccorres1: apply (ctac (no_vcg) add: cancelIPC_ccorres_helper) apply (ctac add: setThreadState_ccorres_valid_queues') apply (wp hoare_vcg_all_lift set_ep_valid_objs' | simp add: valid_tcb_state'_def split del:if_split)+ - apply (simp add: "StrictC'_thread_state_defs") + apply (simp add: ThreadState_defs) apply clarsimp apply (rule conseqPre, vcg, rule subset_refl) apply (rule conseqPre, vcg) @@ -3263,7 +2863,8 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def ThreadState_defs ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg @@ -3286,37 +2887,35 @@ lemma cancelIPC_ccorres1: subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_recv) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isRecvEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits) + split: thread_state.splits endpoint.splits) apply (rule conjI) apply (clarsimp simp: inQ_def) - apply (rule conjI) - apply clarsimp apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (rule conjI) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits) + split: thread_state.splits) apply clarsimp apply (frule (2) ep_blocked_in_queueD_send) apply (frule (1) ko_at_valid_ep'[OF _ invs_valid_objs']) subgoal by (auto simp: obj_at'_def projectKOs pred_tcb_at'_def invs'_def valid_state'_def isTS_defs cte_wp_at_ctes_of isSendEP_def cthread_state_relation_def sch_act_wf_weak valid_ep'_def - dest!: valid_queues_not_runnable'_not_ksQ[where t=thread] split: thread_state.splits endpoint.splits)[1] + split: thread_state.splits endpoint.splits)[1] apply (auto simp: isTS_defs cthread_state_relation_def typ_heap_simps weak_sch_act_wf_def) apply (case_tac ts, auto simp: isTS_defs cthread_state_relation_def typ_heap_simps) diff --git a/proof/crefine/X64/Ipc_C.thy b/proof/crefine/X64/Ipc_C.thy index 29766b18d2..95297c56f9 100644 --- a/proof/crefine/X64/Ipc_C.thy +++ b/proof/crefine/X64/Ipc_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -26,10 +27,6 @@ lemma replyFromKernel_success_empty: unfolding replyFromKernel_def replyFromKernel_success_empty_def by (simp add: setMRs_Nil submonad_asUser.fn_stateAssert) -crunch valid_queues[wp]: handleFaultReply valid_queues - -crunch valid_queues'[wp]: handleFaultReply valid_queues' - crunch sch_act_wf: handleFaultReply "\s. sch_act_wf (ksSchedulerAction s) s" crunch valid_ipc_buffer_ptr' [wp]: copyMRs "valid_ipc_buffer_ptr' p" @@ -407,6 +404,7 @@ lemma handleArchFaultReply': msg \ getMRs s sb tag; handleArchFaultReply f r (msgLabel tag) msg od) x' = handleArchFaultReply' f s r tag x'" + supply empty_fail_cond[simp] apply (unfold handleArchFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -505,13 +503,10 @@ lemma getSanitiseRegisterInfo_moreMapM_comm: apply (auto split: option.splits) done - lemma monadic_rewrite_threadGet_return: "monadic_rewrite True False (tcb_at' r) (return x) (do t \ threadGet f r; return x od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done context begin interpretation Arch . @@ -526,18 +521,14 @@ end lemma monadic_rewrite_getSanitiseRegisterInfo_return: "monadic_rewrite True False (tcb_at' r) (return x) (do t \ getSanitiseRegisterInfo r; return x od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done lemma monadic_rewrite_getSanitiseRegisterInfo_drop: "monadic_rewrite True False (tcb_at' r) (d) (do t \ getSanitiseRegisterInfo r; d od)" - apply (rule monadic_rewrite_symb_exec_r') - apply wp+ - apply (rule monadic_rewrite_refl) - apply wp + apply (wp_pre, monadic_rewrite_symb_exec_r_drop) + apply (auto intro: monadic_rewrite_refl) done context kernel_m begin interpretation Arch . @@ -558,7 +549,8 @@ lemma handleFaultReply': msg \ getMRs s sb tag; handleFaultReply f r (msgLabel tag) msg od) (handleFaultReply' f s r)" - supply if_cong[cong] + supply if_cong[cong] empty_fail_cond[simp] + supply empty_fail_asUser[wp] empty_fail_getRegister[wp] apply (unfold handleFaultReply'_def getMRs_def msgMaxLength_def bit_def msgLengthBits_def msgRegisters_unfold fromIntegral_simp1 fromIntegral_simp2 @@ -573,41 +565,41 @@ lemma handleFaultReply': zip_Cons X64_H.exceptionMessage_def X64.exceptionMessage_def mapM_x_Cons mapM_x_Nil) - apply (rule monadic_rewrite_symb_exec_l, wp+) - apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) - apply (case_tac rv; (case_tac "msgLength tag < scast n_msgRegisters", - (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm - asUser_getRegister_getSanitiseRegisterInfo_comm - asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm - asUser_comm[OF neq] asUser_getRegister_threadGet_comm - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp asUser_typ_ats lookupIPCBuffer_inv )+)+)) - apply wp + apply (rule monadic_rewrite_symb_exec_l) + apply (rule_tac P="tcb_at' s and tcb_at' r" in monadic_rewrite_inst) + apply (case_tac sb; (case_tac "msgLength tag < scast n_msgRegisters", + (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_mapMloadWordUser_getSanitiseRegisterInfo_comm + asUser_getRegister_getSanitiseRegisterInfo_comm + asUser_getRegister_discarded asUser_mapMloadWordUser_threadGet_comm + asUser_comm[OF neq] asUser_getRegister_threadGet_comm + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp)+)+)) + apply wp+ (* capFault *) - apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_asUser empty_fail_getRegister)+)+ - apply(case_tac rv) - apply (clarsimp - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] - empty_fail_loadWordUser)+ + apply (repeat 5 \rule monadic_rewrite_symb_exec_l\) (* until case sb *) + apply (case_tac sb) + apply (clarsimp + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + | wp mapM_x_mapM_valid[OF mapM_x_wp'[OF loadWordUser_inv]] + empty_fail_loadWordUser)+ (* UnknownSyscallException *) apply (simp add: zip_append2 mapM_x_append asUser_bind_distrib split_def bind_assoc) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_do_flip) apply (rule monadic_rewrite_bind_tail) apply (rule_tac P="inj (case_bool s r)" in monadic_rewrite_gen_asm) apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse]) - apply (rule monadic_rewrite_weaken[where F=False and E=True], simp) + apply (rule monadic_rewrite_weaken_flags[where F=False and E=True], simp) apply (rule isolate_thread_actions_rewrite_bind bool.simps setRegister_simple zipWithM_setRegister_simple @@ -627,90 +619,90 @@ lemma handleFaultReply': upto_enum_word mapM_x_Cons mapM_x_Nil) apply (simp add: getSanitiseRegisterInfo_moreMapM_comm asUser_getRegister_getSanitiseRegisterInfo_comm getSanitiseRegisterInfo_lookupIPCBuffer_comm) apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) - apply (case_tac sb) + apply (rule monadic_rewrite_bind_tail [where Q="\_. tcb_at' r"]) + apply (case_tac sb) + apply (case_tac "msgLength tag < scast n_msgRegisters") + apply (erule disjE[OF word_less_cases], + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + | wp asUser_typ_ats)+)+ apply (case_tac "msgLength tag < scast n_msgRegisters") apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - word_le_nat_alt[of 4, simplified linorder_not_less[symmetric, of 4]] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - | wp asUser_typ_ats)+)+ - apply (case_tac "msgLength tag < scast n_msgRegisters") - apply (erule disjE[OF word_less_cases], - ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib - mapM_x_Cons mapM_x_Nil bind_assoc - zipWithM_x_Nil - asUser_getRegister_discarded - asUser_comm[OF neq] take_zip - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_return submonad_asUser.fn_stateAssert - | rule monadic_rewrite_bind_tail monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_threadGet_return - monadic_rewrite_getSanitiseRegisterInfo_return - | wp asUser_typ_ats mapM_wp')+)+ - apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def - linorder_not_less syscallMessage_unfold) - apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, - OF order_less_le_trans, rotated])+ - apply (subgoal_tac "\n :: machine_word. n \ scast n_syscallMessage \ [n .e. msgMaxLength] - = [n .e. scast n_syscallMessage] - @ [scast n_syscallMessage + 1 .e. msgMaxLength]") - apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: machine_word"] - upto_enum_word[where y="scast n_syscallMessage + 1 :: machine_word"]) - apply (clarsimp simp: bind_assoc asUser_bind_distrib asUser_getRegister_threadGet_comm - mapM_x_Cons mapM_x_Nil threadGet_discarded - asUser_comm [OF neq] asUser_getRegister_discarded - submonad_asUser.fn_stateAssert take_zip - bind_subst_lift [OF submonad_asUser.stateAssert_fn] - word_less_nat_alt X64_H.sanitiseRegister_def - split_def n_msgRegisters_def msgMaxLength_def - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - word_size msgLengthBits_def n_syscallMessage_def Let_def - split del: if_split - cong: if_weak_cong register.case_cong) - - - apply (rule monadic_rewrite_bind_tail)+ - apply (subst (2) upto_enum_word) - apply (case_tac "ma < unat n_syscallMessage - 4") - - apply (erule disjE[OF nat_less_cases'], - ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib - mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons - bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] - asUser_loadWordUser_comm loadWordUser_discarded asUser_return - zip_take_triv2 msgMaxLength_def - no_fail_stateAssert - cong: if_weak_cong - | simp - | rule monadic_rewrite_bind_tail - monadic_rewrite_refl - monadic_rewrite_symb_exec_l[OF stateAssert_inv] - monadic_rewrite_symb_exec_l[OF mapM_x_mapM_valid[OF mapM_x_wp']] - monadic_rewrite_threadGet_return - monadic_rewrite_getSanitiseRegisterInfo_return - monadic_rewrite_getSanitiseRegisterInfo_drop - | wp asUser_typ_ats empty_fail_loadWordUser)+)+ - apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) - apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: machine_word))" - and k="Suc msgMaxLength" in upt_add_eq_append') - apply (simp add: n_syscallMessage_def) - apply (simp add: n_syscallMessage_def msgMaxLength_unfold) - apply (simp add: n_syscallMessage_def msgMaxLength_def - msgLengthBits_def shiftL_nat - del: upt.simps upt_rec_numeral) - apply (simp add: upto_enum_word cong: if_weak_cong) - apply wp+ + ( clarsimp simp: n_msgRegisters_def asUser_bind_distrib + mapM_x_Cons mapM_x_Nil bind_assoc + zipWithM_x_Nil + asUser_getRegister_discarded + asUser_comm[OF neq] take_zip + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_return submonad_asUser.fn_stateAssert + | rule monadic_rewrite_bind_tail monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + | wp asUser_typ_ats mapM_wp')+)+ + apply (simp add: n_msgRegisters_def word_le_nat_alt n_syscallMessage_def + linorder_not_less syscallMessage_unfold) + apply (clarsimp | frule neq0_conv[THEN iffD2, THEN not0_implies_Suc, + OF order_less_le_trans, rotated])+ + apply (subgoal_tac "\n :: machine_word. n \ scast n_syscallMessage \ [n .e. msgMaxLength] + = [n .e. scast n_syscallMessage] + @ [scast n_syscallMessage + 1 .e. msgMaxLength]") + apply (simp only: upto_enum_word[where y="scast n_syscallMessage :: machine_word"] + upto_enum_word[where y="scast n_syscallMessage + 1 :: machine_word"]) + apply (clarsimp simp: bind_assoc asUser_bind_distrib asUser_getRegister_threadGet_comm + mapM_x_Cons mapM_x_Nil threadGet_discarded + asUser_comm [OF neq] asUser_getRegister_discarded + submonad_asUser.fn_stateAssert take_zip + bind_subst_lift [OF submonad_asUser.stateAssert_fn] + word_less_nat_alt X64_H.sanitiseRegister_def + split_def n_msgRegisters_def msgMaxLength_def + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + word_size msgLengthBits_def n_syscallMessage_def Let_def + split del: if_split + cong: if_weak_cong register.case_cong) + + + apply (rule monadic_rewrite_bind_tail)+ + apply (subst (2) upto_enum_word) + apply (case_tac "ma < unat n_syscallMessage - 4") + + apply (erule disjE[OF nat_less_cases'], + ( clarsimp simp: n_syscallMessage_def bind_assoc asUser_bind_distrib + mapM_x_Cons mapM_x_Nil zipWithM_x_mapM_x mapM_Cons + bind_comm_mapM_comm [OF asUser_loadWordUser_comm, symmetric] + asUser_loadWordUser_comm loadWordUser_discarded asUser_return + zip_take_triv2 msgMaxLength_def + no_fail_stateAssert + cong: if_weak_cong + | simp + | rule monadic_rewrite_bind_tail + monadic_rewrite_refl + monadic_rewrite_symb_exec_l[OF _ stateAssert_inv] + monadic_rewrite_symb_exec_l[OF _ mapM_x_mapM_valid[OF mapM_x_wp']] + monadic_rewrite_threadGet_return + monadic_rewrite_getSanitiseRegisterInfo_return + monadic_rewrite_getSanitiseRegisterInfo_drop + | wp asUser_typ_ats empty_fail_loadWordUser)+)+ + apply (clarsimp simp: upto_enum_word word_le_nat_alt simp del: upt.simps cong: if_weak_cong) + apply (cut_tac i="unat n" and j="Suc (unat (scast n_syscallMessage :: machine_word))" + and k="Suc msgMaxLength" in upt_add_eq_append') + apply (simp add: n_syscallMessage_def) + apply (simp add: n_syscallMessage_def msgMaxLength_unfold) + apply (simp add: n_syscallMessage_def msgMaxLength_def + msgLengthBits_def shiftL_nat + del: upt.simps upt_rec_numeral) + apply (simp add: upto_enum_word cong: if_weak_cong) + apply wp+ (* ArchFault *) apply (simp add: neq inj_case_bool split: bool.split) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_is_refl) apply (rule ext) apply (unfold handleArchFaultReply'[symmetric] getMRs_def msgMaxLength_def @@ -728,7 +720,7 @@ begin (* FIXME: move *) lemma ccorres_merge_return: - "ccorres (\a c. r (f a) c) xf P P' hs H C \ + "ccorres (r \ f) xf P P' hs H C \ ccorres r xf P P' hs (do x \ H; return (f x) od) C" by (rule ccorres_return_into_rel) @@ -1204,7 +1196,7 @@ lemma setMRs_syscall_error_ccorres: | wp hoare_case_option_wp | (simp del: Collect_const, vcg exspec=setMR_modifies) )+ - apply (simp add: msgMaxLength_unfold if_1_0_0 true_def false_def) + apply (simp add: msgMaxLength_unfold) apply (clarsimp split:if_split_asm simp:syscall_error_to_H_def map_option_Some_eq2 ucast_and_mask ucast_nat_def) apply (simp add: msgFromLookupFailure_def split: lookup_failure.split @@ -1358,10 +1350,6 @@ shows apply (auto split: if_split) done -declare zipWith_Nil2[simp] - -declare zipWithM_x_Nil2[simp] - lemma getRestartPC_ccorres [corres]: "ccorres (=) ret__unsigned_long_' \ (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\) hs @@ -1374,18 +1362,14 @@ lemma getRestartPC_ccorres [corres]: done lemma asUser_tcbFault_obj_at: - "\obj_at' (\tcb. P (tcbFault tcb)) t\ asUser t' m - \\rv. obj_at' (\tcb. P (tcbFault tcb)) t\" + "asUser t' m \obj_at' (\tcb. P (tcbFault tcb)) t\" apply (simp add: asUser_def split_def) apply (wp threadGet_wp) apply (simp cong: if_cong) done lemma asUser_atcbContext_obj_at: - "t \ t' \ - \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\ - asUser t' m - \\rv. obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" + "t \ t' \ asUser t' m \obj_at' (\tcb. P ((atcbContextGet o tcbArch) tcb)) t\" apply (simp add: asUser_def split_def atcbContextGet_def atcbContextSet_def) apply (wp threadGet_wp) apply simp @@ -1605,53 +1589,54 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) - apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) X64_H.syscallMessage = msg) - sender and valid_pspace' - and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" - in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) - apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters - option_to_0_def liftM_def[symmetric] - split: option.split_asm) - apply (rule ccorres_guard_imp2) - apply (rule_tac t=sender and r="X64_H.syscallMessage ! (n + unat n_msgRegisters)" - in ccorres_add_getRegister) - apply (ctac(no_vcg)) - apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" - in ccorres_cross_over_guard) - apply (rule ccorres_move_array_assertion_ipc_buffer - | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ - apply (simp add: storeWordUser_def) - apply (rule ccorres_pre_stateAssert) - apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) - apply (simp add: pred_conj_def) - apply (wp user_getreg_rv) - apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def - syscallMessage_ccorres msgRegisters_ccorres - unat_add_lem[THEN iffD1] unat_of_nat64 - word_bits_def word_size_def) - apply (simp only:field_simps imp_ex imp_conjL) - apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def - pointerInUserData_h_t_valid - atcbContextGet_def - projectKOs objBits_simps word_less_nat_alt - unat_add_lem[THEN iffD1] unat_of_nat) - apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def - MessageID_Syscall_def - msg_align_bits valid_ipc_buffer_ptr'_def) - apply (erule aligned_add_aligned) - apply (rule aligned_add_aligned[where n=3]) - apply (simp add: is_aligned_def) - apply (rule is_aligned_mult_triv2 [where n=3, simplified]) - apply (simp add: wb_gt_2)+ - apply (simp add: n_msgRegisters_def) - apply (vcg exspec=getRegister_modifies) - apply simp - apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) - apply (rule hoare_pre) - apply (wp hoare_case_option_wp | wpc)+ - apply clarsimp - apply (simp add: n_msgRegisters_def word_bits_def) + apply (rule ccorres_rel_imp) + apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) X64_H.syscallMessage = msg) + sender and valid_pspace' + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" + in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) + apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters + option_to_0_def liftM_def[symmetric] + split: option.split_asm) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="X64_H.syscallMessage ! (n + unat n_msgRegisters)" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" + in ccorres_cross_over_guard) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (simp add: storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) + apply (simp add: pred_conj_def) + apply (wp user_getreg_rv) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + syscallMessage_ccorres msgRegisters_ccorres + unat_add_lem[THEN iffD1] unat_of_nat64 + word_bits_def word_size_def) + apply (simp only:field_simps imp_ex imp_conjL) + apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def + pointerInUserData_h_t_valid + atcbContextGet_def + projectKOs objBits_simps word_less_nat_alt + unat_add_lem[THEN iffD1] unat_of_nat) + apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def + MessageID_Syscall_def + msg_align_bits valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n=3]) + apply (simp add: is_aligned_def) + apply (rule is_aligned_mult_triv2 [where n=3, simplified]) + apply (simp add: wb_gt_2)+ + apply (simp add: n_msgRegisters_def) + apply (vcg exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) + apply (rule hoare_pre) + apply (wp hoare_case_option_wp | wpc)+ + apply clarsimp + apply (simp add: n_msgRegisters_def word_bits_def) + apply simp apply (simp add: n_msgRegisters_def) apply (frule (1) option_to_0_imp) apply (subst drop_zip) @@ -1659,7 +1644,7 @@ proof - apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (wp mapM_x_wp_inv setMR_atcbContext_obj_at[simplified atcbContextGet_def, simplified] @@ -1749,7 +1734,7 @@ proof - split: list.split_asm) apply (simp split: list.split) apply (wp setMR_tcbFault_obj_at asUser_inv[OF getRestartPC_inv] - hoare_case_option_wp static_imp_wp + hoare_case_option_wp hoare_weak_lift_imp | simp add: option_to_ptr_def guard_is_UNIVI seL4_VMFault_PrefetchFault_def seL4_VMFault_Addr_def @@ -1789,6 +1774,7 @@ proof - let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some ft) sender" note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV [where xf'=ret__unsigned_longlong_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] show ?thesis apply (unfold K_def) apply (intro ccorres_gen_asm) @@ -2050,7 +2036,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def X64.badgeRegister_def X64.capRegister_def Kernel_C.badgeRegister_def "StrictC'_register_defs") @@ -2088,7 +2074,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -2559,7 +2545,7 @@ lemma transferCapsLoop_ccorres: \ \\destSlot = (if slots = [] then NULL else cte_Ptr (hd slots)) \ length slots \ 1 \ slots \ [0]\)" defines "is_the_ep \ \cap. isEndpointCap cap \ ep \ None \ capEPPtr cap = the ep" - defines "stable \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" + defines "stable_masked \ \scap excap. excap \ scap \ excap = maskedAsFull scap scap" defines "relative_at \ \scap slot s. cte_wp_at' (\cte. badge_derived' scap (cteCap cte) \ capASID scap = capASID (cteCap cte) \ @@ -2574,7 +2560,7 @@ lemma transferCapsLoop_ccorres: (\s. (\x \ set caps. s \' fst x \ cte_wp_at' (\cte. slots \ [] \ is_the_ep (cteCap cte) \ (fst x) = (cteCap cte)) (snd x) s - \ cte_wp_at' (\cte. fst x \ NullCap \ stable (fst x) (cteCap cte)) (snd x) s)) and + \ cte_wp_at' (\cte. fst x \ NullCap \ stable_masked (fst x) (cteCap cte)) (snd x) s)) and (\s. \ sl \ (set slots). cte_wp_at' (isNullCap o cteCap) sl s) and (\_. n + length caps \ 3 \ distinct slots )) (precond n mi slots) @@ -2640,22 +2626,22 @@ next by (simp add:relative_at_def) have stableD: - "\scap excap. stable scap excap + "\scap excap. stable_masked scap excap \ (badge_derived' scap excap \ capASID scap = capASID excap \ cap_asid_base' scap = cap_asid_base' excap \ cap_vptr' scap = cap_vptr' excap)" - apply (clarsimp simp:stable_def) + apply (clarsimp simp:stable_masked_def) apply (case_tac "excap = scap",simp+) apply (simp add:maskedAsFull_misc) done have stable_eq: - "\scap excap. \stable scap excap; isEndpointCap excap\ \ scap = excap" - by (simp add:isCap_simps stable_def maskedAsFull_def split:if_splits) + "\scap excap. \stable_masked scap excap; isEndpointCap excap\ \ scap = excap" + by (simp add:isCap_simps stable_masked_def maskedAsFull_def split:if_splits) have is_the_ep_stable: - "\a b. \a \ NullCap \ stable a b; \ is_the_ep b \ \ \ is_the_ep a" - apply (clarsimp simp:stable_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) + "\a b. \a \ NullCap \ stable_masked a b; \ is_the_ep b \ \ \ is_the_ep a" + apply (clarsimp simp:stable_masked_def maskedAsFull_def is_the_ep_def isCap_simps split:if_splits) apply auto done @@ -2814,8 +2800,8 @@ next \ (\x\set slots. cte_wp_at' (isNullCap \ cteCap) x s) \ (\x\set xs'. s \' fst x \ cte_wp_at' (\c. is_the_ep (cteCap c) \ fst x = cteCap c) (snd x) s - \ cte_wp_at' (\c. fst x \ NullCap \ stable (fst x) (cteCap c)) (snd x) s)" - in hoare_post_imp_R) + \ cte_wp_at' (\c. fst x \ NullCap \ stable_masked (fst x) (cteCap c)) (snd x) s)" + in hoare_strengthen_postE_R) prefer 2 apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace_mdb' valid_pspace'_splits valid_pspace_valid_objs' is_derived_capMasterCap image_def) @@ -2825,10 +2811,10 @@ next apply (rule conjI) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def isCap_simps stable_def) + apply (clarsimp simp:is_the_ep_def isCap_simps stable_masked_def) apply (drule(1) bspec)+ apply (rule conjI | clarsimp)+ - apply (clarsimp simp:is_the_ep_def stable_def split:if_splits)+ + apply (clarsimp simp:is_the_ep_def stable_masked_def split:if_splits)+ apply (case_tac "a = cteCap cteb",clarsimp) apply (simp add:maskedAsFull_def split:if_splits) apply (simp add:maskedAsFull_again) @@ -2862,9 +2848,8 @@ next word_sle_def t2n_mask_eq_if) apply (rule conjI) apply (clarsimp simp: ccap_rights_relation_def cap_rights_to_H_def - false_def true_def to_bool_def allRights_def - excaps_map_def split_def - dest!: drop_n_foo interpret_excaps_eq) + allRights_def excaps_map_def split_def + dest!: drop_n_foo interpret_excaps_eq) apply (clarsimp simp:from_bool_def split:bool.splits) apply (case_tac "isEndpointCap (fst x)") apply (clarsimp simp: cap_get_tag_EndpointCap ep_cap_not_null cap_get_tag_isCap[symmetric]) @@ -2904,7 +2889,7 @@ next apply (rule conseqPre, vcg) apply (clarsimp split del: if_split) apply (clarsimp split del: if_split - simp add: Collect_const[symmetric] precond_def true_def false_def + simp add: Collect_const[symmetric] precond_def simp del: Collect_const) apply (rule HoarePartial.Seq[rotated] HoarePartial.Cond[OF order_refl] HoarePartial.Basic[OF order_refl] HoarePartial.Skip[OF order_refl] @@ -2931,14 +2916,14 @@ next apply (subgoal_tac "fst x = cteCap cte",simp) apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp simp:valid_cap_simps' isCap_simps) apply (subgoal_tac "slots \ []") apply simp apply clarsimp apply (elim disjE) - apply (clarsimp simp:ep_cap_not_null stable_def) + apply (clarsimp simp:ep_cap_not_null stable_masked_def) apply (clarsimp dest!:ccap_relation_lift stable_eq simp: cap_get_tag_isCap) apply (clarsimp dest!:ccap_relation_lift simp:cap_get_tag_isCap is_the_ep_def) apply (clarsimp simp:valid_cap_simps' isCap_simps) @@ -3131,10 +3116,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3162,6 +3148,7 @@ proof - let ?curr = "\s. current_extra_caps_' (globals s)" let ?EXCNONE = "{s. ret__unsigned_long_' s = scast EXCEPTION_NONE}" let ?interpret = "\v n. take n (array_to_list (excaprefs_C v))" + note empty_fail_cond[simp] show ?thesis apply (rule ccorres_gen_asm)+ apply (cinit(no_subst_asm) lift: thread_' bufferPtr_' info_' simp: whileAnno_def) @@ -3187,9 +3174,10 @@ proof - apply csymbr apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, rule ccorres_splitE_novcg) @@ -3198,7 +3186,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 8) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -3208,7 +3196,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 5" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 5" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -3229,7 +3217,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -3237,9 +3225,9 @@ proof - apply (clarsimp simp: cfault_rel2_def) apply (clarsimp simp: cfault_rel_def) apply (simp add: seL4_Fault_CapFault_lift) - apply (clarsimp simp: is_cap_fault_def to_bool_def false_def) + apply (clarsimp simp: is_cap_fault_def) apply wp - apply (rule hoare_post_imp_R, rule lsft_real_cte) + apply (rule hoare_strengthen_postE_R, rule lsft_real_cte) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps') apply (vcg exspec=lookupSlot_modifies) apply vcg @@ -3270,9 +3258,8 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv - \ (\x \ set rva. snd x \ 0)" - in ccorres_from_vcg_throws) + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def seL4_MsgExtraCapBits_def) @@ -3293,7 +3280,7 @@ proof - liftE_bindE[symmetric]) apply (wp mapME_length mapME_set | simp)+ apply (rule_tac Q'="\rv. no_0_obj' and real_cte_at' rv" - in hoare_post_imp_R, wp lsft_real_cte) + in hoare_strengthen_postE_R, wp lsft_real_cte) apply (clarsimp simp: cte_wp_at_ctes_of) apply (wpsimp)+ apply (clarsimp simp: guard_is_UNIV_def @@ -3366,7 +3353,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3412,7 +3399,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def option_to_0_def option_to_ptr_def @@ -3424,7 +3411,7 @@ qed lemma lookupIPCBuffer_not_Some_0: "\\\ lookupIPCBuffer r t \\rv. K (rv \ Some 0)\" apply (simp add: lookupIPCBuffer_def X64_H.lookupIPCBuffer_def) - apply (wp hoare_post_taut haskell_assert_wp + apply (wp hoare_TrueI haskell_assert_wp | simp add: Let_def getThreadBufferSlot_def locateSlotTCB_def | intro conjI impI | wpc)+ done @@ -3475,7 +3462,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3493,7 +3479,7 @@ lemma replyFromKernel_error_ccorres [corres]: message_info_to_H_def valid_pspace_valid_objs') apply (clarsimp simp: msgLengthBits_def msgFromSyscallError_def syscall_error_to_H_def syscall_error_type_defs - mask_def true_def option_to_ptr_def + mask_def option_to_ptr_def split: if_split_asm) done @@ -3544,14 +3530,12 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac - apply (clarsimp simp: guard_is_UNIV_def false_def option_to_ptr_def split: option.splits) + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' receiver and K (rv \ Some 0) and (case_option \ valid_ipc_buffer_ptr' rv) @@ -3560,7 +3544,7 @@ lemma doIPCTransfer_ccorres [corres]: apply (auto simp: valid_ipc_buffer_ptr'_def option_to_0_def split: option.splits)[1] apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) - apply (auto simp: to_bool_def true_def) + apply auto done lemma fault_case_absorb_bind: @@ -3581,7 +3565,6 @@ lemma Arch_getSanitiseRegisterInfo_ccorres: (Call Arch_getSanitiseRegisterInfo_'proc)" apply (cinit' lift: thread_' simp: getSanitiseRegisterInfo_def) apply (rule ccorres_return_C, simp+) - apply (simp add: false_def) done lemma copyMRsFaultReply_ccorres_exception: @@ -3612,7 +3595,7 @@ proof - apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3624,7 +3607,7 @@ proof - apply (vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (auto simp: from_bool_def sanitiseRegister_def)[1] + apply (auto simp: sanitiseRegister_def)[1] apply wp apply clarsimp apply vcg @@ -3665,7 +3648,7 @@ proof - n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -3716,6 +3699,7 @@ lemma copyMRsFaultReply_ccorres_syscall: let ?obj_at_ft = "obj_at' (\tcb. tcbFault tcb = Some f) s" note symb_exec_r_fault = ccorres_symb_exec_r_known_rv_UNIV [where xf'=ret__unsigned_' and R="?obj_at_ft" and R'=UNIV] + note empty_fail_cond[simp] show ?thesis apply (unfold K_def, rule ccorres_gen_asm) using [[goals_limit=1]] apply (cinit' lift: sender_' receiver_' @@ -3820,7 +3804,6 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=3, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -3837,7 +3820,6 @@ lemma copyMRsFaultReply_ccorres_syscall: msg_align_bits sanitiseRegister_def simp del: upt_rec_numeral cong: if_cong register.case_cong, simp_all add: word_less_nat_alt unat_add_lem[THEN iffD1] unat_of_nat)[1] - apply (rule_tac x=rv in exI, auto)[1] apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def msgRegisters_ccorres syscallMessage_ccorres @@ -3859,8 +3841,8 @@ lemma copyMRsFaultReply_ccorres_syscall: apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -3872,7 +3854,7 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva ; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -3914,7 +3896,7 @@ lemma handleArchFaultReply_corres: apply simp+ apply (rule ccorres_symb_exec_l) apply (ctac add: ccorres_return_C) - apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp simp: to_bool_def true_def)+ + apply (wp mapM_wp' empty_fail_loadWordUser | clarsimp)+ done (* MOVE *) @@ -3962,7 +3944,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -4008,7 +3990,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -4050,9 +4032,9 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply vcg_step apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def scast_def + message_info_to_H_def scast_def length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def + min_def word_less_nat_alt guard_is_UNIV_def seL4_Faults seL4_Arch_Faults split: if_split) apply (simp add: length_exceptionMessage length_syscallMessage) @@ -4060,10 +4042,8 @@ lemma handleFaultReply_ccorres [corres]: apply clarsimp apply (vcg exspec=getRegister_modifies) apply (clarsimp simp: n_exceptionMessage_def n_syscallMessage_def - message_info_to_H_def to_bool_def - length_exceptionMessage length_syscallMessage - min_def word_less_nat_alt true_def - obj_at'_def + message_info_to_H_def length_exceptionMessage length_syscallMessage + min_def word_less_nat_alt obj_at'_def split: if_split) apply (fastforce simp: seL4_Faults seL4_Arch_Faults) done @@ -4103,7 +4083,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ apply (clarsimp split: if_split) done @@ -4131,7 +4111,7 @@ lemma transferCaps_local_slots: transferCaps tag caps ep receiver receiveBuffer \\tag'. cte_wp_at' (\cte. P (cteCap cte)) slot\" apply (simp add: transferCaps_def pred_conj_def) - apply (rule hoare_seq_ext[rotated]) + apply (rule bind_wp_fwd) apply (rule hoare_vcg_conj_lift) apply (rule get_rs_real_cte_at') apply (rule get_recv_slot_inv') @@ -4197,10 +4177,6 @@ lemma doReplyTransfer_ccorres [corres]: \ \\grant = from_bool grant\) hs (doReplyTransfer sender receiver slot grant) (Call doReplyTransfer_'proc)" -proof - - have invs_valid_queues_strg: "\s. invs' s \ valid_queues s" - by clarsimp - show ?thesis apply (cinit lift: sender_' receiver_' slot_' grant_') apply (rule getThreadState_ccorres_foo) apply (rule ccorres_assert2) @@ -4226,14 +4202,13 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs split del: if_split) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_running_valid_queues setThreadState_st_tcb)+ + apply (wpsimp wp: sts_valid_objs' setThreadState_st_tcb)+ apply (wp cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) @@ -4242,15 +4217,13 @@ proof - apply wp apply (simp add: cap_get_tag_isCap) apply (strengthen invs_weak_sch_act_wf_strg - cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct] - invs_valid_queues_strg) + cte_wp_at_imp_consequent'[where P="\ct. Ex (ccap_relation (cteCap ct))" for ct]) apply (simp add: cap_reply_cap_def) apply (wp doIPCTransfer_reply_or_replyslot) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4279,22 +4252,20 @@ proof - apply (ctac (no_vcg)) apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) - apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] - apply (ctac add: setThreadState_ccorres_valid_queues'_simple) + apply (wp sts_valid_objs' setThreadState_st_tcb | simp)+ + apply (ctac add: setThreadState_ccorres_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_sch_act hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def - ThreadState_Inactive_def mask_def to_bool_def - option_to_ctcb_ptr_def) + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def option_to_ctcb_ptr_def) - apply (rule_tac Q="\rv. valid_queues and tcb_at' receiver and valid_queues' and + apply (rule_tac Q="\rv. tcb_at' receiver and valid_objs' and sch_act_simple and (\s. ksCurDomain s \ maxDomain) and - (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + (\s. sch_act_wf (ksSchedulerAction s) s) and + pspace_aligned' and pspace_distinct'" in hoare_post_imp) apply (clarsimp simp: inQ_def weak_sch_act_wf_def) - apply (wp threadSet_valid_queues threadSet_sch_act handleFaultReply_sch_act_wf) + apply (wp threadSet_sch_act handleFaultReply_sch_act_wf) apply (clarsimp simp: guard_is_UNIV_def) apply assumption apply clarsimp @@ -4303,15 +4274,14 @@ proof - apply (erule(1) cmap_relation_ko_atE [OF cmap_relation_tcb]) apply (clarsimp simp: ctcb_relation_def typ_heap_simps) apply wp - apply (strengthen vp_invs_strg' invs_valid_queues') + apply (strengthen vp_invs_strg') apply (wp cteDeleteOne_tcbFault cteDeleteOne_sch_act_wf) apply vcg apply (rule conseqPre, vcg) apply (simp(no_asm_use) add: gs_set_assn_Delete_cstate_relation[unfolded o_def] subset_iff rf_sr_def) - apply (clarsimp simp: guard_is_UNIV_def to_bool_def true_def - option_to_ptr_def option_to_0_def false_def - ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def + ThreadState_defs mask_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs option_to_ctcb_ptr_def split: option.splits) @@ -4320,7 +4290,6 @@ proof - cap_get_tag_isCap) apply fastforce done -qed lemma ccorres_getCTE_cte_at: "ccorresG rf_sr \ r xf P P' hs (getCTE p >>= f) c @@ -4340,7 +4309,7 @@ lemma ccorres_getCTE_cte_at: done lemma setupCallerCap_ccorres [corres]: - "ccorres dc xfdc (valid_queues and valid_pspace' and (\s. \d p. sender \ set (ksReadyQueues s (d, p))) + "ccorres dc xfdc (valid_pspace' and (\s. sch_act_wf (ksSchedulerAction s) s) and sch_act_not sender and tcb_at' sender and tcb_at' receiver and tcb_at' sender and tcb_at' receiver) @@ -4353,8 +4322,7 @@ lemma setupCallerCap_ccorres [corres]: apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) apply (cinit lift: sender_' receiver_' canGrant_') apply (clarsimp simp: word_sle_def - tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] - , fold dc_def)[1] + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) apply ccorres_remove_UNIV_guard apply (ctac(no_vcg)) apply (rule ccorres_move_array_assertion_tcb_ctes) @@ -4375,14 +4343,14 @@ lemma setupCallerCap_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg)) apply (rule ccorres_assert) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply csymbr apply (ctac add: cteInsert_ccorres) apply simp apply (wp getSlotCap_cte_wp_at) apply (clarsimp simp: ccap_relation_def cap_lift_reply_cap cap_to_H_simps cap_reply_cap_lift_def - false_def tcbSlots Kernel_C.tcbCaller_def + tcbSlots Kernel_C.tcbCaller_def size_of_def cte_level_bits_def) apply (simp add: is_aligned_neg_mask) apply (wp getCTE_wp') @@ -4402,11 +4370,11 @@ lemma setupCallerCap_ccorres [corres]: apply (simp add: locateSlot_conv) apply wp apply (clarsimp simp: ccap_rights_relation_def allRights_def - mask_def true_def cap_rights_to_H_def tcbCallerSlot_def + mask_def cap_rights_to_H_def tcbCallerSlot_def Kernel_C.tcbCaller_def) apply simp apply wp - apply (clarsimp simp: Kernel_C.ThreadState_BlockedOnReply_def mask_def + apply (clarsimp simp: ThreadState_defs mask_def valid_pspace'_def tcbReplySlot_def valid_tcb_state'_def Collect_const_mem tcb_cnode_index_defs) @@ -4430,7 +4398,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4451,7 +4419,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -4473,23 +4441,20 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs fpu_null_state_heap_update_tag_disj_simps global_ioport_bitmap_heap_update_tag_disj_simps @@ -4516,30 +4481,27 @@ lemma sendIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - split: endpoint.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) - apply (simp add: objBits_simps') - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) + apply (simp add: objBits_simps') + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs fpu_null_state_heap_update_tag_disj_simps global_ioport_bitmap_heap_update_tag_disj_simps @@ -4552,25 +4514,11 @@ lemma sendIPC_dequeue_ccorres_helper: apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def) done -(* FIXME RAF: this is the old formulation, the above one does not work as expected *) -lemma rf_sr_tcb_update_twice: - "h_t_valid (hrs_htd (hrs2 (globals s') (t_hrs_' (gs2 (globals s'))))) c_guard - (ptr (t_hrs_' (gs2 (globals s'))) (globals s')) - \ ((s, globals_update (\gs. t_hrs_'_update (\ths. - hrs_mem_update (heap_update (ptr ths gs :: tcb_C ptr) (v ths gs)) - (hrs_mem_update (heap_update (ptr ths gs) (v' ths gs)) (hrs2 gs ths))) (gs2 gs)) s') \ rf_sr) - = ((s, globals_update (\gs. t_hrs_'_update (\ths. - hrs_mem_update (heap_update (ptr ths gs) (v ths gs)) (hrs2 gs ths)) (gs2 gs)) s') \ rf_sr)" - by (simp add: rf_sr_def cstate_relation_def Let_def - cpspace_relation_def typ_heap_simps' - carch_state_relation_def cmachine_state_relation_def - packed_heap_update_collapse_hrs) - lemma sendIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and pspace_canonical' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_canonical' and + pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (bos = ThreadState_BlockedOnSend \ epptr' = epptr \ badge' = badge \ cg = from_bool canGrant \ cgr = from_bool canGrantReply @@ -4623,14 +4571,13 @@ lemma sendIPC_block_ccorres_helper: (simp add: typ_heap_simps')+)[1] apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnSend_def mask_def - from_bool_def to_bool_def) + ThreadState_defs mask_def) apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend split: bool.split) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_queues + apply (wp threadSet_weak_sch_act_wf_runnable' threadSet_valid_objs') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -4735,6 +4682,19 @@ lemma tcb_queue_relation_qend_valid': apply (simp add: h_t_valid_clift_Some_iff) done +lemma tcb_queue'_head_end_NULL: + assumes qr: "tcb_queue_relation' getNext getPrev mp queue qhead qend" + and tat: "\t\set queue. tcb_at' t s" + shows "(qend = NULL) = (qhead = NULL)" + using qr tat + apply - + apply (erule tcb_queue_relationE') + apply (simp add: tcb_queue_head_empty_iff split: if_splits) + apply (rule tcb_at_not_NULL) + apply (erule bspec) + apply simp + done + lemma tcbEPAppend_spec: "\s queue. \ \ \s. \t. (t, s) \ rf_sr \ (\tcb\set queue. tcb_at' tcb t) \ distinct queue @@ -4821,7 +4781,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4837,12 +4797,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -4860,34 +4820,31 @@ lemma sendIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Send_def) - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) - apply (rule conjI, simp add: mask_def) - subgoal - by (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_and_not_mask_canonical - dest!: st_tcb_strg'[rule_format]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Send_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + apply (rule conjI, simp add: mask_def) + subgoal + by (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_and_not_mask_canonical + dest!: st_tcb_strg'[rule_format]) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (simp only:projectKOs injectKO_ep objBits_simps) - apply clarsimp - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (simp only:projectKOs injectKO_ep objBits_simps) + apply clarsimp + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -4907,41 +4864,38 @@ lemma sendIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Send_def - split: if_split) - subgoal - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, clarsimp) - apply (rule conjI, fastforce simp: mask_def) - apply (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_and_not_mask_canonical - dest!: st_tcb_strg'[rule_format]) - apply (clarsimp, rule conjI, fastforce simp: mask_def) - by (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_and_not_mask_canonical - dest!: st_tcb_strg'[rule_format]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Send_def + split: if_split) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) + apply (rule conjI, fastforce simp: mask_def) + apply (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_and_not_mask_canonical + dest!: st_tcb_strg'[rule_format]) + apply (clarsimp, rule conjI, fastforce simp: mask_def) + by (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_and_not_mask_canonical + dest!: st_tcb_strg'[rule_format]) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -4964,8 +4918,7 @@ lemma ctcb_relation_blockingIPCCanGrantD: lemma sendIPC_ccorres [corres]: "ccorres dc xfdc (invs' and st_tcb_at' simple' thread - and sch_act_not thread and ep_at' epptr and - (\s. \d p. thread \ set (ksReadyQueues s (d, p)))) + and sch_act_not thread and ep_at' epptr) (UNIV \ \\blocking = from_bool blocking\ \ \\do_call = from_bool do_call\ \ \\badge = badge\ @@ -4996,8 +4949,7 @@ lemma sendIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread and ko_at' ep epptr - and ep_at' epptr - and (\s. \d p. thread \ set (ksReadyQueues s (d, p)))" + and ep_at' epptr" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc \ \RecvEP case\ @@ -5037,28 +4989,23 @@ lemma sendIPC_ccorres [corres]: apply (ctac(no_vcg) add: possibleSwitchTo_ccorres) apply (clarsimp split del: if_split) apply (wpc ; ccorres_rewrite) - apply (clarsimp simp: from_bool_def disj_imp[symmetric] split del: if_split) + apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not - possibleSwitchTo_sch_act_not sts_st_tcb' - possibleSwitchTo_ksQ' sts_valid_queues sts_ksQ' + possibleSwitchTo_sch_act_not sts_st_tcb' sts_valid_objs' simp: valid_tcb_state'_def)+ apply vcg - apply (wpsimp wp: doIPCTransfer_sch_act setEndpoint_ksQ hoare_vcg_all_lift - set_ep_valid_objs' setEndpoint_valid_mdb' + apply (wpsimp wp: doIPCTransfer_sch_act hoare_vcg_all_lift + set_ep_valid_objs' setEndpoint_valid_mdb' | wp (once) hoare_drop_imp | strengthen sch_act_wf_weak)+ - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def Collect_const_mem - ThreadState_Running_def mask_def from_bool_def - option_to_ptr_def option_to_0_def + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs Collect_const_mem mask_def + option_to_ptr_def option_to_0_def split: bool.split_asm) \ \IdleEP case\ @@ -5175,10 +5122,10 @@ lemma ctcb_relation_blockingIPCCanGrantReplyD: done lemma receiveIPC_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and valid_objs' and pspace_canonical' and + "ccorres dc xfdc (tcb_at' thread and valid_objs' and pspace_canonical' and + pspace_aligned' and pspace_distinct' and sch_act_not thread and ep_at' epptr and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (epptr = epptr && ~~ mask 4) and K (isEndpointCap cap \ ccap_relation cap cap')) UNIV hs @@ -5213,12 +5160,12 @@ lemma receiveIPC_block_ccorres_helper: apply (erule(1) rf_sr_tcb_update_no_queue_gen, (simp add: typ_heap_simps)+) apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def ccap_relation_ep_helpers - ThreadState_BlockedOnReceive_def mask_def cap_get_tag_isCap) + ThreadState_defs mask_def cap_get_tag_isCap) apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (clarsimp simp: sch_act_wf_weak valid_tcb'_def valid_tcb_state'_def @@ -5245,7 +5192,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5261,12 +5208,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5284,41 +5231,38 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Recv_def - split: if_split) - subgoal - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask - valid_ep'_def - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, clarsimp) - apply (rule conjI, fastforce simp: mask_def) - apply (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_and_not_mask_canonical - dest!: st_tcb_strg'[rule_format]) - apply (clarsimp, rule conjI, fastforce simp: mask_def) - by (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_and_not_mask_canonical - dest!: st_tcb_strg'[rule_format]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Recv_def + split: if_split) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + valid_ep'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) + apply (rule conjI, fastforce simp: mask_def) + apply (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_and_not_mask_canonical + dest!: st_tcb_strg'[rule_format]) + apply (clarsimp, rule conjI, fastforce simp: mask_def) + by (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_and_not_mask_canonical + dest!: st_tcb_strg'[rule_format]) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -5338,33 +5282,30 @@ lemma receiveIPC_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep', assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - mask_def [where n=3] EPState_Recv_def) - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) - subgoal - apply (rule conjI, fastforce simp: mask_def) - by (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_and_not_mask_canonical - dest!: st_tcb_strg'[rule_format]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep', assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + mask_def [where n=3] EPState_Recv_def) + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask) + subgoal + apply (rule conjI, fastforce simp: mask_def) + by (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_and_not_mask_canonical + dest!: st_tcb_strg'[rule_format]) + apply (simp add: isSendEP_def isRecvEP_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue, assumption+) apply (simp add: isSendEP_def isRecvEP_def) - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - apply (rule cnotification_relation_ep_queue, assumption+) - apply (simp add: isSendEP_def isRecvEP_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -5393,7 +5334,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5414,7 +5355,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5436,24 +5377,21 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def - tcb_queue_relation'_def) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (simp add: cendpoint_relation_def Let_def EPState_Idle_def + tcb_queue_relation'_def) apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) - apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') + apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps elim!: fpu_null_state_typ_heap_preservation) @@ -5479,30 +5417,27 @@ lemma receiveIPC_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (rule cpspace_relation_ep_update_ep, assumption+) - apply (clarsimp simp: cendpoint_relation_def Let_def - isRecvEP_def isSendEP_def - tcb_queue_relation'_def valid_ep'_def - split: endpoint.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) - apply (clarsimp simp: objBits_simps') - apply (clarsimp split: if_split) - apply simp - \ \ntfn relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (rule cpspace_relation_ep_update_ep, assumption+) + apply (clarsimp simp: cendpoint_relation_def Let_def + isRecvEP_def isSendEP_def + tcb_queue_relation'_def valid_ep'_def + split: endpoint.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (erule (1) tcb_and_not_mask_canonical[OF invs_pspace_canonical']) + apply (clarsimp simp: objBits_simps') + apply (clarsimp split: if_split) apply simp - apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) - apply simp - apply (erule (1) map_to_ko_atI') - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + \ \ntfn relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+) + apply simp + apply (erule (1) map_to_ko_atI') apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -5569,7 +5504,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp) @@ -5624,7 +5559,6 @@ lemma receiveIPC_ccorres [corres]: notes option.case_cong_weak [cong] shows "ccorres dc xfdc (invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and valid_cap' cap and K (isEndpointCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -5685,7 +5619,7 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_longlong_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5700,7 +5634,6 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule_tac A="invs' and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and ko_at' ep (capEPPtr cap)" in ccorres_guard_imp2 [where A'=UNIV]) apply wpc @@ -5715,20 +5648,18 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5740,18 +5671,16 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -5829,12 +5758,10 @@ lemma receiveIPC_ccorres [corres]: split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) - apply (clarsimp simp: from_bool_def split del: if_split simp del: Collect_const) + apply (clarsimp split del: if_split simp del: Collect_const) apply ccorres_rewrite apply (wpc ; clarsimp ; ccorres_rewrite) apply csymbr @@ -5846,28 +5773,25 @@ lemma receiveIPC_ccorres [corres]: apply ccorres_rewrite apply ctac apply (ctac add: possibleSwitchTo_ccorres) - apply (wpsimp wp: sts_st_tcb' sts_valid_queues) + apply (wpsimp wp: sts_st_tcb' sts_valid_objs') apply (vcg exspec=setThreadState_modifies) - apply (fastforce simp: guard_is_UNIV_def ThreadState_Inactive_def - mask_def ThreadState_Running_def cap_get_tag_isCap - ccap_relation_ep_helpers) + apply (fastforce simp: guard_is_UNIV_def ThreadState_defs mask_def + cap_get_tag_isCap ccap_relation_ep_helpers) apply (clarsimp simp: valid_tcb_state'_def) - apply (rule_tac Q="\_. valid_pspace' and valid_queues + apply (rule_tac Q="\_. valid_pspace' and st_tcb_at' ((=) sendState) sender and tcb_at' thread and (\s. sch_act_wf (ksSchedulerAction s) s) - and (\s. (\a b. sender \ set (ksReadyQueues s (a, b)))) and sch_act_not sender and K (thread \ sender) and (\s. ksCurDomain s \ maxDomain)" in hoare_post_imp) - apply (clarsimp simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak - obj_at'_def) + apply (fastforce simp: valid_pspace_valid_objs' pred_tcb_at'_def sch_act_wf_weak + obj_at'_def) apply (wpsimp simp: guard_is_UNIV_def option_to_ptr_def option_to_0_def conj_ac)+ - apply (rule_tac Q="\rv. valid_queues and valid_pspace' + apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender and tcb_at' thread and sch_act_not sender and K (thread \ sender) and ep_at' (capEPPtr cap) and (\s. ksCurDomain s \ maxDomain) - and (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. sender \ set (ksReadyQueues s (d, p))))" + and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) subgoal by (auto, auto simp: st_tcb_at'_def obj_at'_def) apply (wp hoare_vcg_all_lift set_ep_valid_objs') @@ -5886,7 +5810,7 @@ lemma receiveIPC_ccorres [corres]: projectKOs invs'_def valid_state'_def st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' isBlockedOnReceive_def projectKO_opt_tcb - from_bool_def to_bool_def objBits_simps' + objBits_simps' elim!: delta_sym_refs split: if_split_asm bool.splits) (*very long*) apply (frule(1) sym_refs_obj_atD' [OF _ invs_sym']) @@ -5899,24 +5823,20 @@ lemma receiveIPC_ccorres [corres]: projectKOs invs'_def valid_state'_def st_tcb_at'_def valid_tcb_state'_def ko_wp_at'_def invs_valid_objs' isBlockedOnReceive_def projectKO_opt_tcb objBits_simps' - from_bool_def to_bool_def elim: delta_sym_refs split: if_split_asm bool.splits) (*very long *) apply (clarsimp simp: obj_at'_def state_refs_of'_def projectKOs) apply (frule(1) sym_refs_ko_atD' [OF _ invs_sym']) - apply (frule invs_queues) apply clarsimp apply (rename_tac list x xa) apply (rule_tac P="x\set list" in case_split) apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) - apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -5944,11 +5864,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ntfn_blocked_in_queueD) apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) apply (elim conjE) @@ -5968,7 +5887,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5990,23 +5909,20 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def - tcb_queue_relation'_def) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (simp add: cnotification_relation_def Let_def NtfnState_Idle_def + tcb_queue_relation'_def) + apply simp apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -6035,33 +5951,30 @@ lemma sendSignal_dequeue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) - apply simp - apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) - apply simp+ - apply (erule (1) map_to_ko_atI') - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) - apply (clarsimp simp: cnotification_relation_def Let_def - isWaitingNtfn_def - tcb_queue_relation'_def valid_ntfn'_def - split: Structures_H.notification.splits list.splits - split del: if_split) - apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") - apply (rule conjI) - subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) - apply (rule context_conjI) - subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) - apply clarsimp - apply (clarsimp split: if_split) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) apply simp - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply (rule cendpoint_relation_ntfn_queue [OF invs_sym'], assumption+) + apply simp+ + apply (erule (1) map_to_ko_atI') + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) + apply (clarsimp simp: cnotification_relation_def Let_def + isWaitingNtfn_def + tcb_queue_relation'_def valid_ntfn'_def + split: Structures_H.notification.splits list.splits + split del: if_split) + apply (subgoal_tac "tcb_at' (if x22 = [] then x21 else last x22) \") + apply (rule conjI) + subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) + apply (rule context_conjI) + subgoal by (erule (1) tcb_ptr_sign_extend_canonical[OF invs_pspace_canonical']) + apply clarsimp + apply (clarsimp split: if_split) + apply simp apply (clarsimp simp: carch_state_relation_def global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -6156,7 +6069,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -6171,23 +6084,21 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) - apply (wp sts_running_valid_queues sts_st_tcb_at'_cases + apply (ctac add: possibleSwitchTo_ccorres) + apply (wp sts_valid_objs' sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" in hoare_post_imp) apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def X64_H.badgeRegister_def Kernel_C.badgeRegister_def X64.badgeRegister_def X64.capRegister_def - Kernel_C.RDI_def - "StrictC'_thread_state_defs"less_mask_eq - Collect_const_mem) + Kernel_C.RDI_def ThreadState_defs less_mask_eq Collect_const_mem) apply (case_tac ts, simp_all add: receiveBlocked_def typ_heap_simps - cthread_state_relation_def "StrictC'_thread_state_defs")[1] + cthread_state_relation_def ThreadState_defs)[1] \ \ActiveNtfn case\ apply (rename_tac old_badge) apply (rule ccorres_cond_false) @@ -6238,16 +6149,14 @@ lemma sendSignal_ccorres [corres]: apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) - apply (wp weak_sch_act_wf_lift_linear - setThreadState_oa_queued - sts_valid_queues tcb_in_cur_domain'_lift)[1] - apply (wp sts_valid_queues sts_runnable) + apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift)[1] + apply (wp sts_valid_objs' sts_runnable) apply (wp setThreadState_st_tcb set_ntfn_valid_objs' | clarsimp)+ - apply (clarsimp simp: guard_is_UNIV_def ThreadState_Running_def mask_def + apply (clarsimp simp: guard_is_UNIV_def ThreadState_defs mask_def badgeRegister_def Kernel_C.badgeRegister_def X64.badgeRegister_def X64.capRegister_def Kernel_C.RDI_def) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Idle_def @@ -6270,10 +6179,10 @@ lemma sendSignal_ccorres [corres]: done lemma receiveSignal_block_ccorres_helper: - "ccorres dc xfdc (tcb_at' thread and valid_queues and sch_act_not thread and + "ccorres dc xfdc (tcb_at' thread and sch_act_not thread and valid_objs' and ntfn_at' ntfnptr and pspace_canonical' and - (\s. sch_act_wf (ksSchedulerAction s) s \ - (\d p. thread \ set (ksReadyQueues s (d, p)))) and + pspace_aligned' and pspace_distinct' and + (\s. sch_act_wf (ksSchedulerAction s) s) and K (ntfnptr = ntfnptr && ~~ mask 4)) UNIV hs (setThreadState (Structures_H.thread_state.BlockedOnNotification @@ -6303,13 +6212,12 @@ lemma receiveSignal_block_ccorres_helper: (simp add: typ_heap_simps')+) apply (simp add: tcb_cte_cases_def) apply (simp add: ctcb_relation_def cthread_state_relation_def - ThreadState_BlockedOnNotification_def mask_def - from_bool_def to_bool_def) + ThreadState_defs mask_def) apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) apply ceqv apply clarsimp apply ctac - apply (wp threadSet_valid_queues hoare_vcg_all_lift threadSet_valid_objs' + apply (wp hoare_vcg_all_lift threadSet_valid_objs' threadSet_weak_sch_act_wf_runnable') apply (clarsimp simp: guard_is_UNIV_def) apply (auto simp: weak_sch_act_wf_def valid_tcb'_def tcb_cte_cases_def @@ -6320,16 +6228,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "machine_word" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6389,7 +6298,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6405,12 +6314,12 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6427,37 +6336,34 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=3] NtfnState_Waiting_def) - subgoal - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask valid_ntfn'_def - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, fastforce simp: mask_def) - apply (rule context_conjI) - subgoal by (fastforce simp: valid_pspace'_def objBits_simps' - intro!: tcb_ptr_sign_extend_canonical - dest!: st_tcb_strg'[rule_format]) - by clarsimp - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - subgoal by (clarsimp simp: comp_def) + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=3] NtfnState_Waiting_def) + subgoal + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask valid_ntfn'_def + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, fastforce simp: mask_def) + apply (rule context_conjI) + subgoal by (fastforce simp: valid_pspace'_def objBits_simps' + intro!: tcb_ptr_sign_extend_canonical + dest!: st_tcb_strg'[rule_format]) + by clarsimp + apply (simp add: isWaitingNtfn_def) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -6477,48 +6383,45 @@ lemma receiveSignal_enqueue_ccorres_helper: typ_heap_simps') apply (elim conjE) apply (intro conjI) - \ \tcb relation\ - apply (erule ctcb_relation_null_queue_ptrs) - apply (clarsimp simp: comp_def) - \ \ep relation\ - apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + \ \tcb relation\ + apply (erule ctcb_relation_null_ep_ptrs) + apply (clarsimp simp: comp_def) + \ \ep relation\ + apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) + apply simp + apply (rule cendpoint_relation_ntfn_queue, assumption+) + apply (simp add: isWaitingNtfn_def) apply simp - apply (rule cendpoint_relation_ntfn_queue, assumption+) - apply (simp add: isWaitingNtfn_def) - apply simp - apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) - apply (erule(2) map_to_ko_at_updI') - apply (clarsimp simp: objBitsKO_def) - apply (clarsimp simp: obj_at'_def projectKOs) - \ \ntfn relation\ - apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) - apply (case_tac "ntfn", simp_all)[1] - apply (clarsimp simp: cnotification_relation_def Let_def - mask_def [where n=3] NtfnState_Waiting_def - split: if_split) - subgoal for _ _ ko' - apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask - dest: tcb_queue_relation_next_not_NULL) - apply (rule conjI, clarsimp) - apply (rule conjI, fastforce simp: mask_def) - apply (rule context_conjI) - subgoal by (fastforce intro!: tcb_ptr_sign_extend_canonical - dest!: st_tcb_strg'[rule_format]) - apply clarsimp - apply clarsimp + apply (frule_tac x=p in map_to_ko_atI, clarsimp, clarsimp) + apply (erule(2) map_to_ko_at_updI') + apply (clarsimp simp: objBitsKO_def) + apply (clarsimp simp: obj_at'_def projectKOs) + \ \ntfn relation\ + apply (rule cpspace_relation_ntfn_update_ntfn', assumption+) + apply (case_tac "ntfn", simp_all)[1] + apply (clarsimp simp: cnotification_relation_def Let_def + mask_def [where n=3] NtfnState_Waiting_def + split: if_split) + subgoal for _ _ ko' + apply (clarsimp simp: tcb_queue_relation'_def is_aligned_neg_mask + dest: tcb_queue_relation_next_not_NULL) + apply (rule conjI, clarsimp) apply (rule conjI, fastforce simp: mask_def) - apply (rule conjI) + apply (rule context_conjI) subgoal by (fastforce intro!: tcb_ptr_sign_extend_canonical dest!: st_tcb_strg'[rule_format]) - apply (subgoal_tac "canonical_address (ntfnQueue_head_CL (notification_lift ko'))") - apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) - apply (clarsimp simp: notification_lift_def canonical_address_sign_extended - sign_extended_sign_extend) - done - apply (simp add: isWaitingNtfn_def) - \ \queue relation\ - apply (rule cready_queues_relation_null_queue_ptrs, assumption+) - apply (clarsimp simp: comp_def) + apply clarsimp + apply clarsimp + apply (rule conjI, fastforce simp: mask_def) + apply (rule conjI) + subgoal by (fastforce intro!: tcb_ptr_sign_extend_canonical + dest!: st_tcb_strg'[rule_format]) + apply (subgoal_tac "canonical_address (ntfnQueue_head_CL (notification_lift ko'))") + apply (clarsimp simp: canonical_address_sign_extended sign_extended_iff_sign_extend) + apply (clarsimp simp: notification_lift_def canonical_address_sign_extended + sign_extended_sign_extend) + done + apply (simp add: isWaitingNtfn_def) apply (clarsimp simp: carch_state_relation_def packed_heap_update_collapse_hrs global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps @@ -6533,7 +6436,6 @@ lemma receiveSignal_enqueue_ccorres_helper: lemma receiveSignal_ccorres [corres]: "ccorres dc xfdc (invs' and valid_cap' cap and st_tcb_at' simple' thread and sch_act_not thread - and (\s. \d p. thread \ set (ksReadyQueues s (d, p))) and K (isNotificationCap cap)) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr thread\ \ \ccap_relation cap \cap\ @@ -6577,11 +6479,10 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6592,7 +6493,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6649,8 +6550,7 @@ lemma receiveSignal_ccorres [corres]: apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6662,7 +6562,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/X64/IsolatedThreadAction.thy b/proof/crefine/X64/IsolatedThreadAction.thy index da78a7872e..f11e35e610 100644 --- a/proof/crefine/X64/IsolatedThreadAction.thy +++ b/proof/crefine/X64/IsolatedThreadAction.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -10,25 +11,8 @@ begin context begin interpretation Arch . -datatype tcb_state_regs = TCBStateRegs "thread_state" "MachineTypes.register \ machine_word" - -definition - "tsrContext tsr \ case tsr of TCBStateRegs ts regs \ regs" - -definition - "tsrState tsr \ case tsr of TCBStateRegs ts regs \ ts" - -lemma accessors_TCBStateRegs[simp]: - "TCBStateRegs (tsrState v) (tsrContext v) = v" - by (cases v, simp add: tsrState_def tsrContext_def) - -lemma tsrContext_simp[simp]: - "tsrContext (TCBStateRegs st con) = con" - by (simp add: tsrContext_def) - -lemma tsrState_simp[simp]: - "tsrState (TCBStateRegs st con) = st" - by (simp add: tsrState_def) +datatype tcb_state_regs = + TCBStateRegs (tsrState : thread_state) (tsrContext : "MachineTypes.register \ machine_word") definition get_tcb_state_regs :: "kernel_object option \ tcb_state_regs" @@ -83,12 +67,12 @@ lemma put_tcb_state_regs_twice[simp]: "put_tcb_state_regs tsr (put_tcb_state_regs tsr' tcb) = put_tcb_state_regs tsr tcb" apply (simp add: put_tcb_state_regs_def put_tcb_state_regs_tcb_def - atcbContextSet_def - makeObject_tcb newArchTCB_def newContext_def initContext_def + makeObject_tcb newArchTCB_def atcbContextSet_def split: tcb_state_regs.split option.split Structures_H.kernel_object.split) apply (intro all_tcbI impI allI) - apply (case_tac q, simp) + using atcbContextSet_def atcbContext_set_set + apply fastforce+ done lemma partial_overwrite_twice[simp]: @@ -395,7 +379,7 @@ lemma obj_at_partial_overwrite_id2: lemma objBits_2n: "(1 :: machine_word) < 2 ^ objBits obj" - by (simp add: objBits_def objBitsKO_def archObjSize_def pageBits_def objBits_simps' + by (simp add: archObjSize_def pageBits_def objBits_simps' split: kernel_object.split arch_kernel_object.split) lemma getObject_get_assert: @@ -417,7 +401,7 @@ lemma getObject_get_assert: apply (simp add: lookupAround2_known1 assert_opt_def obj_at'_def projectKO_def2 split: option.split) - apply (clarsimp simp: fail_def fst_return conj_comms project_inject + apply (clarsimp simp: fail_set fst_return conj_comms project_inject objBits_def) apply (simp only: assert2[symmetric], rule bind_apply_cong[OF refl]) @@ -477,7 +461,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -534,15 +518,15 @@ lemma thread_actions_isolatable_bind: \t. \tcb_at' t\ f \\rv. tcb_at' t\ \ \ thread_actions_isolatable idx (f >>= g)" apply (clarsimp simp: thread_actions_isolatable_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (subst isolate_thread_actions_wrap_bind, simp) apply simp apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (erule monadic_rewrite_bind2, assumption) + apply (erule monadic_rewrite_bind_l, assumption) apply (rule hoare_vcg_all_lift, assumption) apply (simp add: bind_assoc id_def) apply (rule monadic_rewrite_refl) @@ -607,7 +591,7 @@ lemma select_f_isolatable: apply (clarsimp simp: thread_actions_isolatable_def isolate_thread_actions_def split_def select_f_selects liftM_def bind_assoc) - apply (rule monadic_rewrite_imp, rule monadic_rewrite_transverse) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_transverse) apply (rule monadic_rewrite_drop_modify monadic_rewrite_bind_tail)+ apply wp+ apply (simp add: gets_bind_ign getSchedulerAction_def) @@ -739,12 +723,10 @@ lemma transferCaps_simple_rewrite: (transferCaps mi caps ep r rBuf) (return (mi \ msgExtraCaps := 0, msgCapsUnwrapped := 0 \))" including no_pre + supply empty_fail_getReceiveSlots[wp] (* FIXME *) apply (rule monadic_rewrite_gen_asm) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (simp add: transferCaps_simple, rule monadic_rewrite_refl) - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getReceiveSlots)+) - apply (rule monadic_rewrite_refl) + apply (simp add: transferCaps_simple) + apply (monadic_rewrite_symb_exec_l_drop, rule monadic_rewrite_refl) apply simp done @@ -758,7 +740,8 @@ lemma lookupExtraCaps_simple_rewrite: lemma lookupIPC_inv: "\P\ lookupIPCBuffer f t \\rv. P\" by wp -lemmas empty_fail_user_getreg = empty_fail_asUser[OF empty_fail_getRegister] +(* FIXME move *) +lemmas empty_fail_user_getreg[intro!, wp, simp] = empty_fail_asUser[OF empty_fail_getRegister] lemma copyMRs_simple: "msglen \ of_nat (length msgRegisters) \ @@ -795,25 +778,23 @@ lemma doIPCTransfer_simple_rewrite: apply (simp add: doIPCTransfer_def bind_assoc doNormalTransfer_def getMessageInfo_def cong: option.case_cong) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="fault = None" in monadic_rewrite_gen_asm, simp) + apply (rule monadic_rewrite_bind_tail) + apply (monadic_rewrite_symb_exec_l_known None, simp) apply (rule monadic_rewrite_bind_tail) - apply (rule_tac x=msgInfo in monadic_rewrite_symb_exec, - (wp empty_fail_user_getreg user_getreg_rv)+) - apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) - apply (rule monadic_rewrite_bind_head) - apply (rule transferCaps_simple_rewrite) - apply (wp threadGet_const)+ + apply (monadic_rewrite_symb_exec_l_known msgInfo) + apply (simp add: lookupExtraCaps_simple_rewrite returnOk_catch_bind) + apply (rule monadic_rewrite_bind) + apply (rule monadic_rewrite_from_simple, rule copyMRs_simple) + apply (rule monadic_rewrite_bind_head) + apply (rule transferCaps_simple_rewrite) + apply (wp threadGet_const user_getreg_rv asUser_inv)+ apply (simp add: bind_assoc) - apply (rule monadic_rewrite_symb_exec2[OF lookupIPC_inv empty_fail_lookupIPCBuffer] - monadic_rewrite_symb_exec2[OF threadGet_inv empty_fail_threadGet] - monadic_rewrite_symb_exec2[OF user_getreg_inv' empty_fail_user_getreg] - monadic_rewrite_bind_head monadic_rewrite_bind_tail - | wp)+ + apply (rule monadic_rewrite_symb_exec_l_drop[OF _ lookupIPC_inv empty_fail_lookupIPCBuffer] + monadic_rewrite_symb_exec_l_drop[OF _ threadGet_inv empty_fail_threadGet] + monadic_rewrite_symb_exec_l_drop[OF _ user_getreg_inv' empty_fail_user_getreg] + monadic_rewrite_bind_head monadic_rewrite_bind_tail)+ apply (case_tac "messageInfoFromWord msgInfo") apply simp apply (rule monadic_rewrite_refl) @@ -825,7 +806,7 @@ lemma doIPCTransfer_simple_rewrite: lemma monadic_rewrite_setSchedulerAction_noop: "monadic_rewrite F E (\s. ksSchedulerAction s = act) (setSchedulerAction act) (return ())" unfolding setSchedulerAction_def - apply (rule monadic_rewrite_imp, rule monadic_rewrite_modify_noop) + apply (rule monadic_rewrite_guard_imp, rule monadic_rewrite_modify_noop) apply simp done @@ -839,9 +820,10 @@ lemma rescheduleRequired_simple_rewrite: apply auto done -lemma empty_fail_isRunnable: +(* FIXME move *) +lemma empty_fail_isRunnable[intro!, wp, simp]: "empty_fail (isRunnable t)" - by (simp add: isRunnable_def isStopped_def) + by (simp add: isRunnable_def isStopped_def empty_fail_cond) lemma setupCallerCap_rewrite: "monadic_rewrite True True (\s. reply_masters_rvk_fb (ctes_of s)) @@ -860,23 +842,19 @@ lemma setupCallerCap_rewrite: apply (simp add: setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv getSlotCap_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_bind_tail)+ - apply (rule monadic_rewrite_assert)+ - apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) - \ mdbRevocable (cteMDBNode masterCTE)" - in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec2, (wp | simp)+)+ - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getCTE)+)+ - apply (rule monadic_rewrite_refl) - apply (wp getCTE_wp' | simp add: cte_wp_at_ctes_of)+ - apply (clarsimp simp: reply_masters_rvk_fb_def) - apply fastforce + apply (rule monadic_rewrite_bind_tail)+ + apply (rule monadic_rewrite_assert)+ + apply (rule_tac P="mdbFirstBadged (cteMDBNode masterCTE) + \ mdbRevocable (cteMDBNode masterCTE)" + in monadic_rewrite_gen_asm) + apply (rule monadic_rewrite_trans) + apply monadic_rewrite_symb_exec_l + apply monadic_rewrite_symb_exec_l_drop + apply (rule monadic_rewrite_refl) + apply wpsimp+ + apply (rule monadic_rewrite_refl) + apply (wpsimp wp: getCTE_wp' simp: cte_wp_at_ctes_of)+ + apply (fastforce simp: reply_masters_rvk_fb_def) done lemma oblivious_getObject_ksPSpace_default: @@ -945,64 +923,41 @@ lemma oblivious_switchToThread_schact: threadSet_def tcbSchedEnqueue_def unless_when asUser_def getQueue_def setQueue_def storeWordUser_def setRegister_def pointerInUserData_def isRunnable_def isStopped_def - getThreadState_def tcbSchedDequeue_def bitmap_fun_defs) + getThreadState_def tcbSchedDequeue_def tcbQueueRemove_def bitmap_fun_defs + ksReadyQueues_asrt_def) by (safe intro!: oblivious_bind - | simp_all add: oblivious_setVMRoot_schact)+ + | simp_all add: ready_qs_runnable_def idleThreadNotQueued_def + oblivious_setVMRoot_schact)+ -lemma empty_fail_getCurThread[iff]: +(* FIXME move *) +lemma empty_fail_getCurThread[intro!, wp, simp]: "empty_fail getCurThread" by (simp add: getCurThread_def) + lemma activateThread_simple_rewrite: "monadic_rewrite True True (ct_in_state' ((=) Running)) (activateThread) (return ())" apply (simp add: activateThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_tail)+ - apply (rule_tac P="state = Running" in monadic_rewrite_gen_asm) - apply simp + apply wp_pre + apply (monadic_rewrite_symb_exec_l) + apply (monadic_rewrite_symb_exec_l_known Running, simp) apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_getThreadState)+) - apply (rule monadic_rewrite_refl) - apply wp - apply (rule monadic_rewrite_symb_exec2, - simp_all add: getCurThread_def) - apply (rule monadic_rewrite_refl) + apply wpsimp+ apply (clarsimp simp: ct_in_state'_def elim!: pred_tcb'_weakenE) done end -lemma setCTE_obj_at_prio[wp]: - "\obj_at' (\tcb. P (tcbPriority tcb)) t\ setCTE p v \\rv. obj_at' (\tcb. P (tcbPriority tcb)) t\" - unfolding setCTE_def - by (rule setObject_cte_obj_at_tcb', simp+) - -crunch obj_at_prio[wp]: cteInsert "obj_at' (\tcb. P (tcbPriority tcb)) t" - (wp: crunch_wps) - -crunch ctes_of[wp]: asUser "\s. P (ctes_of s)" - (wp: crunch_wps) - -lemma tcbSchedEnqueue_tcbPriority[wp]: - "\obj_at' (\tcb. P (tcbPriority tcb)) t\ - tcbSchedEnqueue t' - \\rv. obj_at' (\tcb. P (tcbPriority tcb)) t\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp | simp cong: if_cong)+ - done - -crunch obj_at_prio[wp]: cteDeleteOne "obj_at' (\tcb. P (tcbPriority tcb)) t" - (wp: crunch_wps setEndpoint_obj_at_tcb' - setThreadState_obj_at_unchanged setNotification_tcb setBoundNotification_obj_at_unchanged - simp: crunch_simps unless_def) +crunches setBoundNotification, cteDeleteOne + for obj_at_prio[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t" + (wp: crunch_wps simp: crunch_simps) lemma setThreadState_no_sch_change: "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ setThreadState st t \\rv s. P (ksSchedulerAction s)\" - (is "NonDetMonad.valid ?P ?f ?Q") + (is "Nondet_VCG.valid ?P ?f ?Q") apply (simp add: setThreadState_def setSchedulerAction_def) - apply (wp hoare_pre_cont[where a=rescheduleRequired]) + apply (wp hoare_pre_cont[where f=rescheduleRequired]) apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) apply (clarsimp split: if_split) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs) @@ -1113,8 +1068,6 @@ lemma setCTE_assert_modify: apply (rule word_and_le2) apply (simp add: objBits_simps mask_def field_simps) apply (simp add: simpler_modify_def cong: option.case_cong if_cong) - apply (rule kernel_state.fold_congs[OF refl refl]) - apply (clarsimp simp: projectKO_opt_tcb cong: if_cong) apply (clarsimp simp: lookupAround2_char1 word_and_le2) apply (rule ccontr, clarsimp) apply (erule(2) ps_clearD) @@ -1131,7 +1084,7 @@ lemma setCTE_assert_modify: apply (erule disjE) apply clarsimp apply (frule(1) tcb_cte_cases_aligned_helpers) - apply (clarsimp simp: domI[where m = cte_cte_cases] field_simps) + apply (clarsimp simp: domI field_simps) apply (clarsimp simp: lookupAround2_char1 obj_at'_def projectKOs objBits_simps) apply (clarsimp simp: obj_at'_def lookupAround2_char1 @@ -1186,8 +1139,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) @@ -1260,37 +1212,24 @@ lemma thread_actions_isolatableD: lemma tcbSchedDequeue_rewrite: "monadic_rewrite True True (obj_at' (Not \ tcbQueued) t) (tcbSchedDequeue t) (return ())" apply (simp add: tcbSchedDequeue_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule_tac P="\ queued" in monadic_rewrite_gen_asm) - apply (simp add: when_def) + apply wp_pre + apply monadic_rewrite_symb_exec_l + apply (monadic_rewrite_symb_exec_l_known False, simp) apply (rule monadic_rewrite_refl) - apply (wp threadGet_const) - apply (rule monadic_rewrite_symb_exec2) - apply wp+ - apply (rule monadic_rewrite_refl) - apply (clarsimp) + apply (wpsimp wp: threadGet_const)+ done +(* FIXME: improve automation here *) lemma switchToThread_rewrite: "monadic_rewrite True True (ct_in_state' (Not \ runnable') and cur_tcb' and obj_at' (Not \ tcbQueued) t) (switchToThread t) (do Arch.switchToThread t; setCurThread t od)" apply (simp add: switchToThread_def Thread_H.switchToThread_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bind) - apply (rule tcbSchedDequeue_rewrite) - apply (rule monadic_rewrite_refl) - apply (wp Arch_switchToThread_obj_at_pre)+ - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_symb_exec) - apply (wp+, simp) - apply (rule monadic_rewrite_refl) - apply (wp) + apply (monadic_rewrite_l tcbSchedDequeue_rewrite, simp) + (* strip LHS of getters and asserts until LHS and RHS are the same *) + apply (repeat_unless \rule monadic_rewrite_refl\ monadic_rewrite_symb_exec_l) + apply wpsimp+ apply (clarsimp simp: comp_def) done @@ -1328,9 +1267,33 @@ lemma threadGet_isolatable: thread_actions_isolatable_fail) done +lemma tcbQueued_put_tcb_state_regs_tcb: + "tcbQueued (put_tcb_state_regs_tcb tsr tcb) = tcbQueued tcb" + apply (clarsimp simp: put_tcb_state_regs_tcb_def) + by (cases tsr; clarsimp) + +lemma idleThreadNotQueued_isolatable: + "thread_actions_isolatable idx (stateAssert idleThreadNotQueued [])" + apply (simp add: stateAssert_def2 stateAssert_def) + apply (intro thread_actions_isolatable_bind[OF _ _ hoare_pre(1)] + gets_isolatable + thread_actions_isolatable_if + thread_actions_isolatable_returns + thread_actions_isolatable_fail) + unfolding idleThreadNotQueued_def + apply (clarsimp simp: obj_at_partial_overwrite_If) + apply (clarsimp simp: obj_at'_def tcbQueued_put_tcb_state_regs_tcb) + apply wpsimp+ + done + lemma setCurThread_isolatable: "thread_actions_isolatable idx (setCurThread t)" - by (simp add: setCurThread_def modify_isolatable) + unfolding setCurThread_def + apply (rule thread_actions_isolatable_bind) + apply (rule idleThreadNotQueued_isolatable) + apply (fastforce intro: modify_isolatable) + apply wpsimp + done lemma isolate_thread_actions_tcbs_at: assumes f: "\x. \tcb_at' (idx x)\ f \\rv. tcb_at' (idx x)\" shows @@ -1353,7 +1316,7 @@ lemma isolate_thread_actions_rewrite_bind: \ monadic_rewrite False True (\s. \x. tcb_at' (idx x) s) (f >>= g) (isolate_thread_actions idx (f' >>= g') (g'' o f'') (g''' o f'''))" - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule monadic_rewrite_bind, assumption+) apply (wp isolate_thread_actions_tcbs_at) @@ -1361,7 +1324,7 @@ lemma isolate_thread_actions_rewrite_bind: apply (subst isolate_thread_actions_wrap_bind, assumption) apply (rule monadic_rewrite_in_isolate_thread_actions, assumption) apply (rule monadic_rewrite_transverse) - apply (rule monadic_rewrite_bind2) + apply (rule monadic_rewrite_bind_l) apply (erule(1) thread_actions_isolatableD) apply (rule thread_actions_isolatableD, assumption+) apply (rule hoare_vcg_all_lift, assumption) @@ -1468,7 +1431,7 @@ lemma monadic_rewrite_isolate_final2: (isolate_thread_actions idx f f' f'') (isolate_thread_actions idx g g' g'')" apply (simp add: isolate_thread_actions_def split_def) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule_tac P="\ s'. Q s" in monadic_rewrite_bind) apply (insert mr)[1] @@ -1476,14 +1439,14 @@ lemma monadic_rewrite_isolate_final2: apply auto[1] apply (rule_tac P="P and (\s. tcbs = get_tcb_state_regs o ksPSpace s o idx \ sa = ksSchedulerAction s)" - in monadic_rewrite_refl3) + in monadic_rewrite_pre_imp_eq) apply (clarsimp simp: exec_modify eqs return_def) apply wp+ apply (clarsimp simp: o_def eqs) done lemmas monadic_rewrite_isolate_final - = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_refl2, simplified] + = monadic_rewrite_isolate_final2[where R=\, OF monadic_rewrite_is_refl, simplified] lemma copy_registers_isolate_general: "\ inj idx; idx x = t; idx y = t' \ \ @@ -1503,7 +1466,7 @@ lemma copy_registers_isolate_general: select_f_returns o_def ksPSpace_update_partial_id) apply (simp add: return_def simpler_modify_def) apply (simp add: mapM_x_Cons) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_trans) apply (rule isolate_thread_actions_rewrite_bind, assumption) apply (rule copy_register_isolate, assumption+) @@ -1597,27 +1560,17 @@ lemma setThreadState_rewrite_simple: (setThreadState st t) (threadSet (tcbState_update (\_. st)) t)" supply if_split[split del] - apply (simp add: setThreadState_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail)+ - apply (simp add: when_def) - apply (rule monadic_rewrite_gen_asm) - apply (subst if_not_P) - apply assumption - apply (rule monadic_rewrite_refl) - apply wp+ - apply (rule monadic_rewrite_symb_exec2, - (wp empty_fail_isRunnable - | (simp only: getCurThread_def getSchedulerAction_def - , rule empty_fail_gets))+)+ - apply (rule monadic_rewrite_refl) - apply (simp add: conj_comms, wp hoare_vcg_imp_lift threadSet_tcbState_st_tcb_at') - apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) + apply (simp add: setThreadState_def when_def) + apply (monadic_rewrite_l monadic_rewrite_if_l_False + \wpsimp wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at'\) + (* take the threadSet, drop everything until return () *) + apply (rule monadic_rewrite_trans[OF monadic_rewrite_bind_tail]) + apply (rule monadic_rewrite_symb_exec_l_drop)+ + apply (rule monadic_rewrite_refl) + apply (wpsimp simp: getCurThread_def + wp: hoare_vcg_disj_lift hoare_vcg_imp_lift' threadSet_tcbState_st_tcb_at')+ apply (rule monadic_rewrite_refl) - apply clarsimp + apply (clarsimp simp: obj_at'_def sch_act_simple_def st_tcb_at'_def) done end diff --git a/proof/crefine/X64/PSpace_C.thy b/proof/crefine/X64/PSpace_C.thy index 14a69364ef..1c5b5ee0ba 100644 --- a/proof/crefine/X64/PSpace_C.thy +++ b/proof/crefine/X64/PSpace_C.thy @@ -47,7 +47,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko \ \ ccorres dc xfdc P P' hs (setObject p ko) c" diff --git a/proof/crefine/X64/Recycle_C.thy b/proof/crefine/X64/Recycle_C.thy index 28c7b902fd..4363ee8e8d 100644 --- a/proof/crefine/X64/Recycle_C.thy +++ b/proof/crefine/X64/Recycle_C.thy @@ -454,7 +454,7 @@ lemma mapM_x_store_memset_ccorres_assist: "\ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -793,8 +793,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -848,7 +848,7 @@ lemma ctcb_relation_blocking_ipc_badge: apply (simp add: isBlockedOnSend_def split: Structures_H.thread_state.split_asm) apply (clarsimp simp: cthread_state_relation_def) apply (clarsimp simp add: ctcb_relation_def cthread_state_relation_def) - apply (cases "tcbState tcb", simp_all add: "StrictC'_thread_state_defs") + apply (cases "tcbState tcb", simp_all add: ThreadState_defs) done lemma cendpoint_relation_q_cong: @@ -870,16 +870,6 @@ lemma cnotification_relation_q_cong: apply (auto intro: iffD1[OF tcb_queue_relation'_cong[OF refl refl refl]]) done -lemma tcbSchedEnqueue_ep_at: - "\obj_at' (P :: endpoint \ bool) ep\ - tcbSchedEnqueue t - \\rv. obj_at' P ep\" - including no_pre - apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp threadGet_wp, clarsimp, wp+) - apply (clarsimp split: if_split, wp) - done - lemma ccorres_duplicate_guard: "ccorres r xf (P and P) Q hs f f' \ ccorres r xf P Q hs f f'" by (erule ccorres_guard_imp, auto) @@ -899,12 +889,13 @@ lemma cancelBadgedSends_ccorres: (UNIV \ {s. epptr_' s = Ptr ptr} \ {s. badge_' s = bdg}) [] (cancelBadgedSends ptr bdg) (Call cancelBadgedSends_'proc)" apply (cinit lift: epptr_' badge_' simp: whileAnno_def) - apply (simp add: list_case_return2 + apply (rule ccorres_stateAssert) + apply (simp add: list_case_return cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong del: Collect_const) - apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_longlong_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule ccorres_pre_getEndpoint, rename_tac ep) + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_longlong_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -914,22 +905,22 @@ lemma cancelBadgedSends_ccorres: split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -952,8 +943,9 @@ lemma cancelBadgedSends_ccorres: st_tcb_at' (\st. isBlockedOnSend st \ blockingObject st = ptr) x s) \ distinct (xs @ list) \ ko_at' IdleEP ptr s \ (\p. \x \ set (xs @ list). \rf. (x, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ pspace_aligned' s \ pspace_distinct' s \ pspace_canonical' s - \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s" + \ pspace_aligned' s \ pspace_distinct' s \ pspace_canonical' s + \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="\xs. {s. ep_queue_relation' (cslift s) (xs @ list) (head_C (queue_' s)) (end_C (queue_' s))} \ {s. thread_' s = (case list of [] \ tcb_Ptr 0 @@ -1008,7 +1000,7 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: mask_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -1018,7 +1010,7 @@ lemma cancelBadgedSends_ccorres: apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -1043,9 +1035,9 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -1055,8 +1047,9 @@ lemma cancelBadgedSends_ccorres: apply (rule_tac rrel=dc and xf=xfdc and P="\s. (\t \ set (x @ a # lista). tcb_at' t s) \ (\p. \t \ set (x @ a # lista). \rf. (t, rf) \ {r \ state_refs_of' s p. snd r \ NTFNBound}) - \ valid_queues s \ distinct (x @ a # lista) - \ pspace_aligned' s \ pspace_distinct' s" + \ distinct (x @ a # lista) + \ pspace_aligned' s \ pspace_distinct' s + \ ksReadyQueues_head_end s \ ksReadyQueues_head_end_tcb_at' s" and P'="{s. ep_queue_relation' (cslift s) (x @ a # lista) (head_C (queue_' s)) (end_C (queue_' s))}" in ccorres_from_vcg) @@ -1072,8 +1065,7 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: return_def rf_sr_def cstate_relation_def Let_def) apply (rule conjI) apply (clarsimp simp: cpspace_relation_def) - apply (rule conjI, erule ctcb_relation_null_queue_ptrs) - apply (rule null_ep_queue) + apply (rule conjI, erule ctcb_relation_null_ep_ptrs) subgoal by (simp add: o_def) apply (rule conjI) apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1]) @@ -1096,9 +1088,6 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: image_iff) apply (drule_tac x=p in spec) subgoal by fastforce - apply (rule conjI) - apply (erule cready_queues_relation_not_queue_ptrs, - auto dest: null_ep_schedD[unfolded o_def] simp: o_def)[1] apply (clarsimp simp: carch_state_relation_def cmachine_state_relation_def elim!: fpu_null_state_typ_heap_preservation) apply (rule ccorres_symb_exec_r2) @@ -1108,16 +1097,15 @@ lemma cancelBadgedSends_ccorres: apply wp apply simp apply vcg - apply (wp hoare_vcg_const_Ball_lift tcbSchedEnqueue_ep_at - sch_act_wf_lift) + apply (wp hoare_vcg_const_Ball_lift sch_act_wf_lift) apply simp apply (vcg exspec=tcbSchedEnqueue_cslift_spec) apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases - sts_sch_act sts_valid_queues setThreadState_oa_queued) + sts_sch_act sts_valid_objs') apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) @@ -1130,21 +1118,18 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp simp: typ_heap_simps st_tcb_at'_def) apply (drule(1) obj_at_cslift_tcb) apply (clarsimp simp: ctcb_relation_blocking_ipc_badge) - apply (rule conjI, simp add: "StrictC'_thread_state_defs" mask_def) + apply (rule conjI, simp add: ThreadState_defs mask_def) apply (rule conjI) apply clarsimp apply (frule rf_sr_cscheduler_relation) apply (clarsimp simp: cscheduler_action_relation_def st_tcb_at'_def split: scheduler_action.split_asm) apply (rename_tac word) - apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge) - apply simp - subgoal by clarsimp - subgoal by clarsimp + apply (frule_tac x=word in tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule conjI) - apply (frule(3) tcbSchedEnqueue_cslift_precond_discharge) + apply (frule tcbSchedEnqueue_cslift_precond_discharge; simp?) subgoal by clarsimp apply clarsimp apply (rule context_conjI) @@ -1184,9 +1169,19 @@ lemma cancelBadgedSends_ccorres: apply (clarsimp split: if_split) apply (drule sym_refsD, clarsimp) apply (drule(1) bspec)+ - by (auto simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def tcb_bound_refs'_def - dest!: symreftype_inverse') - + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply (fastforce simp: obj_at'_def projectKOs state_refs_of'_def pred_tcb_at'_def + tcb_bound_refs'_def + dest!: symreftype_inverse') + apply (frule ksReadyQueues_asrt_ksReadyQueues_head_end) + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') + apply (frule (2) ksReadyQueues_asrt_ksReadyQueues_head_end_tcb_at') + apply fastforce + done lemma tcb_ptr_to_ctcb_ptr_force_fold: "x + 2 ^ ctcb_size_bits = ptr_val (tcb_ptr_to_ctcb_ptr x)" diff --git a/proof/crefine/X64/Refine_C.thy b/proof/crefine/X64/Refine_C.thy index 0160076ea4..c9fe110199 100644 --- a/proof/crefine/X64/Refine_C.thy +++ b/proof/crefine/X64/Refine_C.thy @@ -49,6 +49,7 @@ proof - show ?thesis apply (cinit') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (ctac (no_vcg) add: getActiveIRQ_ccorres) apply (rule ccorres_Guard_Seq)? @@ -63,7 +64,7 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply vcg apply vcg apply (clarsimp simp: irqInvalid_def ucast_8_32_neq) @@ -76,7 +77,7 @@ proof - apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (rule_tac Q="\rv s. invs' s \ (\x. rv = Some x \ x \ X64.maxIRQ) \ rv \ Some 0x3FF" in hoare_post_imp) apply (clarsimp simp: non_kernel_IRQs_def) apply (wp getActiveIRQ_le_maxIRQ getActiveIRQ_neq_Some0xFF | simp)+ @@ -92,6 +93,7 @@ lemma handleUnknownSyscall_ccorres: (callKernel (UnknownSyscall n)) (Call handleUnknownSyscall_'proc)" apply (cinit' lift: w_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -104,14 +106,12 @@ lemma handleUnknownSyscall_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply fastforce - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply fastforce apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -129,8 +129,10 @@ lemma handleVMFaultEvent_ccorres: (callKernel (VMFaultEvent vmfault_type)) (Call handleVMFaultEvent_'proc)" apply (cinit' lift:vm_faultType_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (simp add: catch_def) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) @@ -159,13 +161,13 @@ lemma handleVMFaultEvent_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ - apply (case_tac x, clarsimp, wp) + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ + apply (case_tac rv, clarsimp, wp) apply (clarsimp, wp, simp) apply wp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: simple_sane_strg[unfolded sch_act_sane_not]) - by (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def ct_not_ksQ + by (auto simp: ct_in_state'_def cfault_rel_def is_cap_fault_def elim: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread' rf_sr_ksCurThread) @@ -178,6 +180,7 @@ lemma handleUserLevelFault_ccorres: (callKernel (UserLevelFault word1 word2)) (Call handleUserLevelFault_'proc)" apply (cinit' lift:w_a_' w_b_') apply (simp add: callKernel_def handleEvent_def) + apply (rule ccorres_stateAssert) apply (simp add: liftE_bind bind_assoc) apply (rule ccorres_symb_exec_r) apply (rule ccorres_pre_getCurThread) @@ -190,16 +193,14 @@ lemma handleUserLevelFault_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (clarsimp, vcg) apply (clarsimp, rule conseqPre, vcg, clarsimp) apply clarsimp apply (intro impI conjI allI) - apply (simp add: ct_in_state'_def) - apply (erule pred_tcb'_weakenE) - apply simp - apply (clarsimp simp: ct_not_ksQ) - apply (clarsimp simp add: sch_act_simple_def split: scheduler_action.split) + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply simp apply (rule active_ex_cap') apply (erule active_from_running') apply (erule invs_iflive') @@ -230,6 +231,7 @@ lemma handleSyscall_ccorres: (callKernel (SyscallEvent sysc)) (Call handleSyscall_'proc)" apply (cinit' lift: syscall_') apply (simp add: callKernel_def handleEvent_def minus_one_norm) + apply (rule ccorres_stateAssert) apply (simp add: handleE_def handleE'_def) apply (rule ccorres_split_nothrow_novcg) apply wpc @@ -372,11 +374,10 @@ lemma handleSyscall_ccorres: apply wp[1] apply clarsimp apply wp - apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s \ - (\p. ksCurThread s \ set (ksReadyQueues s p))" + apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s" in hoare_post_imp) apply (simp add: ct_in_state'_def) - apply (wp handleReply_sane handleReply_ct_not_ksQ) + apply (wp handleReply_sane) \ \SysYield\ apply (clarsimp simp: syscall_from_H_def syscall_defs) apply (rule ccorres_cond_empty |rule ccorres_cond_univ)+ @@ -402,11 +403,11 @@ lemma handleSyscall_ccorres: apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) - apply (wp schedule_invs' schedule_sch_act_wf | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + apply (wp schedule_invs' schedule_sch_act_wf + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply (simp | wpc | wp hoare_drop_imp handleReply_sane handleReply_nonz_cap_to_ct schedule_invs' - handleReply_ct_not_ksQ[simplified] | strengthen ct_active_not_idle'_strengthen invs_valid_objs_strengthen)+ apply (rule_tac Q="\rv. invs' and ct_active'" in hoare_post_imp, simp) apply (wp hy_invs') @@ -424,7 +425,7 @@ lemma handleSyscall_ccorres: apply (frule active_ex_cap') apply (clarsimp simp: invs'_def valid_state'_def) apply (clarsimp simp: simple_sane_strg ct_in_state'_def st_tcb_at'_def obj_at'_def - isReply_def ct_not_ksQ) + isReply_def) apply (rule conjI, fastforce) apply (auto simp: syscall_from_H_def Kernel_C.SysSend_def split: option.split_asm) @@ -458,7 +459,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -489,7 +490,7 @@ lemma no_fail_callKernel: apply (rule corres_nofail) apply (rule corres_guard_imp) apply (rule kernel_corres) - apply (force simp: word_neq_0_conv) + apply (force simp: word_neq_0_conv schact_is_rct_def) apply (simp add: sch_act_simple_def) apply metis done @@ -502,6 +503,7 @@ lemma handleHypervisorEvent_ccorres: apply (simp add: callKernel_def handleEvent_def handleHypervisorEvent_C_def) apply (simp add: liftE_def bind_assoc) apply (rule ccorres_guard_imp) + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l) apply (cases t; simp add: handleHypervisorFault_def) apply (ctac (no_vcg) add: schedule_ccorres) @@ -511,7 +513,7 @@ lemma handleHypervisorEvent_ccorres: apply simp apply assumption apply (wp schedule_sch_act_wf schedule_invs' - | strengthen invs_queues_imp invs_valid_objs_strengthen)+ + | strengthen invs_valid_objs_strengthen invs_pspace_aligned' invs_pspace_distinct')+ apply clarsimp+ done @@ -611,9 +613,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -642,14 +644,14 @@ lemma threadSet_all_invs_triv': apply (simp add: tcb_cte_cases_def) apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state - | simp add: tcb_cap_cases_def tcb_arch_ref_def + | simp add: tcb_cap_cases_def tcb_arch_ref_def exst_same_def | rule threadSet_ct_in_state' | wp (once) hoare_vcg_disj_lift)+ apply clarsimp apply (rule exI, rule conjI, assumption) - apply (clarsimp simp: invs_def invs'_def cur_tcb_def cur_tcb'_def) + apply (clarsimp simp: invs_def invs'_def cur_tcb_def cur_tcb'_def invs_psp_aligned invs_distinct) apply (simp add: state_relation_def) done @@ -694,13 +696,13 @@ lemma entry_corres_C: apply simp apply (rule corres_split) (* FIXME: fastpath - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) *) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (rule getContext_corres[unfolded o_def], simp) + apply (rule getContext_corres, simp) apply (wp threadSet_all_invs_triv' callKernel_cur)+ apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) apply simp @@ -730,15 +732,7 @@ lemma ct_running'_C: apply (frule (1) map_to_ko_atI') apply (erule obj_at'_weakenE) apply (clarsimp simp: ctcb_relation_def cthread_state_relation_def) - apply (case_tac "tcbState ko", simp_all add: - ThreadState_Running_def - ThreadState_BlockedOnReceive_def - ThreadState_BlockedOnSend_def - ThreadState_BlockedOnReply_def - ThreadState_BlockedOnNotification_def - ThreadState_Inactive_def - ThreadState_IdleThreadState_def - ThreadState_Restart_def) + apply (case_tac "tcbState ko"; simp add: ThreadState_defs) done lemma full_invs_both: @@ -802,7 +796,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineState, clarsimp) @@ -829,7 +823,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -870,17 +864,22 @@ lemma dmo_domain_user_mem'[wp]: done lemma do_user_op_corres_C: - "corres_underlying rf_sr False False (=) (invs' and ex_abs einvs) \ - (doUserOp f tc) (doUserOp_C f tc)" + "corres_underlying rf_sr False False (=) + (invs' and ksReadyQueues_asrt and ex_abs einvs) \ + (doUserOp f tc) (doUserOp_C f tc)" apply (simp only: doUserOp_C_def doUserOp_def split_def) apply (rule corres_guard_imp) apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: simpler_gets_def getCurThread_def corres_underlying_def rf_sr_def cstate_relation_def Let_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_lift_def) - apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) + apply (rule_tac P="valid_state' and ksReadyQueues_asrt" + and P'=\ and r'="(=)" + in corres_split) apply (clarsimp simp: cstate_to_A_def absKState_def rf_sr_def cstate_to_H_correct ptable_rights_def) apply (rule_tac P=pspace_distinct' and P'=\ and r'="(=)" @@ -897,7 +896,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -917,7 +916,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def @@ -978,6 +977,9 @@ lemma refinement2_both: apply (subst cstate_to_H_correct) apply (fastforce simp: full_invs'_def invs'_def) apply (clarsimp simp: rf_sr_def) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) apply (simp add:absKState_def observable_memory_def absExst_def) apply (rule MachineTypes.machine_state.equality,simp_all)[1] apply (rule ext) @@ -1004,13 +1006,35 @@ lemma refinement2_both: apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (erule_tac P="a \ b \ c \ (\x. e x)" for a b c d e in disjE) apply (clarsimp simp add: do_user_op_C_def do_user_op_H_def monad_to_transition_def) apply (rule rev_mp, rule_tac f="uop" and tc=af in do_user_op_corres_C) apply (clarsimp simp: corres_underlying_def invs_def ex_abs_def) - apply (fastforce simp: full_invs'_def ex_abs_def) + apply (drule bspec) + apply fastforce + apply clarsimp + apply (elim impE) + apply (clarsimp simp: full_invs'_def ex_abs_def) + apply (intro conjI) + apply (rule ksReadyQueues_asrt_cross) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: lift_state_relation_def full_invs_def) + apply (frule state_relation_ready_queues_relation) + apply (fastforce simp: ready_queues_relation_def Let_def tcbQueueEmpty_def) + apply fastforce apply (clarsimp simp: check_active_irq_C_def check_active_irq_H_def) apply (rule rev_mp, rule check_active_irq_corres_C) @@ -1102,7 +1126,7 @@ lemma kernel_all_subset_kernel: check_active_irq_H_def checkActiveIRQ_def) apply clarsimp apply (erule in_monad_imp_rewriteE[where F=True]) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule monadic_rewrite_bind_tail)+ apply (rule monadic_rewrite_bind_head[where P=\]) apply (simp add: callKernel_C_def callKernel_withFastpath_C_def diff --git a/proof/crefine/X64/Retype_C.thy b/proof/crefine/X64/Retype_C.thy index 6360c8a645..f51d8c09b2 100644 --- a/proof/crefine/X64/Retype_C.thy +++ b/proof/crefine/X64/Retype_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -633,46 +634,6 @@ lemma field_of_t_refl: apply (simp add: unat_eq_0) done -lemma typ_slice_list_array: - "x < size_td td * n - \ typ_slice_list (map (\i. DTPair td (nm i)) [0.. k < n - \ gd (p +\<^sub>p int k) - \ h_t_valid htd gd (p +\<^sub>p int k)" - apply (clarsimp simp: h_t_array_valid_def h_t_valid_def valid_footprint_def - size_of_def[symmetric, where t="TYPE('a)"]) - apply (drule_tac x="k * size_of TYPE('a) + y" in spec) - apply (drule mp) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute) - apply (clarsimp simp: ptr_add_def add.assoc) - apply (erule map_le_trans[rotated]) - apply (clarsimp simp: uinfo_array_tag_n_m_def) - apply (subst typ_slice_list_array) - apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) - apply (simp add: mult.commute size_of_def) - apply (simp add: size_of_def list_map_mono) - done - lemma h_t_valid_ptr_retyps_gen: assumes sz: "nptrs * size_of TYPE('a :: mem_type) < addr_card" and gd: "gd p'" @@ -804,11 +765,6 @@ lemma ptr_add_orth: apply (simp add: addr_card_wb [symmetric]) done -lemma dom_lift_t_heap_update: - "dom (lift_t g (hrs_mem_update v hp)) = dom (lift_t g hp)" - by (clarsimp simp add: lift_t_def lift_typ_heap_if s_valid_def hrs_htd_def hrs_mem_update_def split_def dom_def - intro!: Collect_cong split: if_split) - lemma h_t_valid_ptr_retyps_gen_same: assumes guard: "\n' < nptrs. gd (CTypesDefs.ptr_add (Ptr p :: 'a ptr) (of_nat n'))" assumes cleared: "region_is_bytes' p (nptrs * size_of TYPE('a :: mem_type)) htd" @@ -1141,7 +1097,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -1173,29 +1129,6 @@ lemma update_ti_t_machine_word_0s: "word_rcat [0, 0, 0, 0,0,0,0,(0 :: word8)] = (0 :: machine_word)" by (simp_all add: typ_info_word word_rcat_def bin_rcat_def) -lemma is_aligned_ptr_aligned: - fixes p :: "'a :: c_type ptr" - assumes al: "is_aligned (ptr_val p) n" - and alignof: "align_of TYPE('a) = 2 ^ n" - shows "ptr_aligned p" - using al unfolding is_aligned_def ptr_aligned_def - by (simp add: alignof) - -lemma is_aligned_c_guard: - "is_aligned (ptr_val p) n - \ ptr_val p \ 0 - \ align_of TYPE('a) = 2 ^ m - \ size_of TYPE('a) \ 2 ^ n - \ m \ n - \ c_guard (p :: ('a :: c_type) ptr)" - apply (clarsimp simp: c_guard_def c_null_guard_def) - apply (rule conjI) - apply (rule is_aligned_ptr_aligned, erule(1) is_aligned_weaken, simp) - apply (erule is_aligned_get_word_bits, simp_all) - apply (rule intvl_nowrap[where x=0, simplified], simp) - apply (erule is_aligned_no_wrap_le, simp+) - done - lemma retype_guard_helper: assumes cover: "range_cover p sz (objBitsKO ko) n" and ptr0: "p \ 0" @@ -2838,21 +2771,6 @@ next done qed -(* FIXME: move *) -lemma ccorres_to_vcg_nf: - "\ccorres rrel xf P P' [] a c; no_fail Q a; \s. P s \ Q s\ - \ \\ {s. P \ \ s \ P' \ (\, s) \ rf_sr} c - {s. \(rv, \')\fst (a \). (\', s) \ rf_sr \ rrel rv (xf s)}" - apply (rule HoarePartial.conseq_exploit_pre) - apply clarsimp - apply (rule conseqPre) - apply (drule ccorres_to_vcg') - prefer 2 - apply simp - apply (simp add: no_fail_def) - apply clarsimp - done - lemma mdb_node_get_mdbNext_heap_ccorres: "ccorres (=) ret__unsigned_longlong_' \ UNIV hs (liftM (mdbNext \ cteMDBNode) (getCTE parent)) @@ -2884,8 +2802,8 @@ lemma getCTE_pre_cte_at: lemmas ccorres_getCTE_cte_at = ccorres_guard_from_wp [OF getCTE_pre_cte_at empty_fail_getCTE] ccorres_guard_from_wp_bind [OF getCTE_pre_cte_at empty_fail_getCTE] -lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre iffD2 [OF empty_fail_liftM]] -lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre iffD2 [OF empty_fail_liftM]] +lemmas ccorres_guard_from_wp_liftM = ccorres_guard_from_wp [OF liftM_pre empty_fail_liftM] +lemmas ccorres_guard_from_wp_bind_liftM = ccorres_guard_from_wp_bind [OF liftM_pre empty_fail_liftM] lemmas ccorres_liftM_getCTE_cte_at = ccorres_guard_from_wp_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] ccorres_guard_from_wp_bind_liftM [OF getCTE_pre_cte_at empty_fail_getCTE] @@ -2916,9 +2834,10 @@ lemma insertNewCap_ccorres_helper: apply (rule conjI) apply (erule (2) cmap_relation_updI) apply (simp add: ccap_relation_def ccte_relation_def cte_lift_def) - subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf is_aligned_neg_mask_weaken - c_valid_cte_def true_def canonical_address_sign_extended sign_extended_iff_sign_extend cteSizeBits_def - split: option.splits) + subgoal by (simp add: cte_to_H_def map_option_Some_eq2 mdb_node_to_H_def to_bool_mask_to_bool_bf + is_aligned_neg_mask_weaken c_valid_cte_def canonical_address_sign_extended + sign_extended_iff_sign_extend cteSizeBits_def + split: option.splits) subgoal by simp apply (erule_tac t = s' in ssubst) apply (simp cong: lifth_update) @@ -3232,16 +3151,6 @@ lemma createNewCaps_untyped_if_helper: (\ gbits \ sz) = (s' \ \of_nat sz < (of_nat gbits :: machine_word)\)" by (clarsimp simp: not_le unat_of_nat64 word_less_nat_alt lt_word_bits_lt_pow) -lemma true_mask1 [simp]: - "true && mask (Suc 0) = true" - unfolding true_def - by (simp add: bang_eq cong: conj_cong) - -lemma to_bool_simps [simp]: - "to_bool true" "\ to_bool false" - unfolding true_def false_def to_bool_def - by simp_all - lemma heap_list_update': "\ n = length v; length v \ 2 ^ word_bits \ \ heap_list (heap_update_list p v h) n p = v" by (simp add: heap_list_update addr_card_wb) @@ -3624,7 +3533,6 @@ lemma cnc_tcb_helper: assumes rfsr: "(\\ksPSpace := ks\, x) \ rf_sr" assumes al: "is_aligned (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb)" assumes ptr0: "ctcb_ptr_to_tcb_ptr p \ 0" - assumes vq: "valid_queues \" assumes pal: "pspace_aligned' (\\ksPSpace := ks\)" assumes pno: "pspace_no_overlap' (ctcb_ptr_to_tcb_ptr p) (objBitsKO kotcb) (\\ksPSpace := ks\)" assumes pds: "pspace_distinct' (\\ksPSpace := ks\)" @@ -3752,8 +3660,7 @@ proof - apply (simp add: hrs_mem_def, subst rep0) apply (simp only: take_replicate, simp add: cte_C_size objBits_simps') apply (simp add: cte_C_size objBits_simps') - apply (simp add: fun_eq_iff o_def - split: if_split) + apply (simp add: fun_eq_iff split: if_split) apply (simp add: hrs_comm packed_heap_update_collapse typ_heap_simps) apply (subst clift_heap_update_same_td_name', simp_all, @@ -3995,20 +3902,20 @@ proof - unfolding ctcb_relation_def makeObject_tcb heap_updates_defs initContext_registers_def apply (simp add: fbtcb minBound_word) apply (intro conjI) - apply (simp add: cthread_state_relation_def thread_state_lift_def - eval_nat_numeral ThreadState_Inactive_def) - apply (clarsimp simp: ccontext_relation_def newContext_def2 carch_tcb_relation_def - newArchTCB_def cregs_relation_def atcbContextGet_def fpu_relation_def) - apply (case_tac r; simp add: C_register_defs index_foldr_update - atcbContext_def newArchTCB_def newContext_def - initContext_def selCS3_eq selDS3_eq) - apply (clarsimp simp: fpu_relation_def) - apply (simp add: thread_state_lift_def index_foldr_update atcbContextGet_def) - apply (simp add: Kernel_Config.timeSlice_def) - apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def - lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def - index_foldr_update seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def - split: if_split)+ + apply (simp add: cthread_state_relation_def thread_state_lift_def + eval_nat_numeral ThreadState_defs) + apply (clarsimp simp: ccontext_relation_def newContext_def2 carch_tcb_relation_def + newArchTCB_def cregs_relation_def atcbContextGet_def fpu_relation_def) + apply (case_tac r; simp add: C_register_defs index_foldr_update + atcbContext_def newArchTCB_def newContext_def + initContext_def) + apply (simp add: thread_state_lift_def index_foldr_update atcbContextGet_def) + apply (simp add: Kernel_Config.timeSlice_def) + apply (simp add: cfault_rel_def seL4_Fault_lift_def seL4_Fault_get_tag_def Let_def + lookup_fault_lift_def lookup_fault_get_tag_def lookup_fault_invalid_root_def + index_foldr_update seL4_Fault_NullFault_def option_to_ptr_def option_to_0_def + split: if_split)+ + apply (simp add: option_to_ctcb_ptr_def) done have pks: "ks (ctcb_ptr_to_tcb_ptr p) = None" @@ -4059,15 +3966,6 @@ proof - apply (fastforce simp: dom_def) done - hence kstcb: "\qdom prio. ctcb_ptr_to_tcb_ptr p \ set (ksReadyQueues \ (qdom, prio))" using vq - apply (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x = qdom in spec) - apply (drule_tac x = prio in spec) - apply clarsimp - apply (drule (1) bspec) - apply (simp add: obj_at'_def) - done - have ball_subsetE: "\P S R. \ \x \ S. P x; R \ S \ \ \x \ R. P x" by blast @@ -4191,7 +4089,7 @@ proof - apply (simp add: cl_cte [simplified] cl_tcb [simplified] cl_rest [simplified] tag_disj_via_td_name) apply (clarsimp simp: cready_queues_relation_def Let_def htd_safe[simplified] kernel_data_refs_domain_eq_rotate) - apply (simp add: heap_updates_def kstcb tcb_queue_update_other' hrs_htd_update + apply (simp add: heap_updates_def tcb_queue_update_other' hrs_htd_update ptr_retyp_to_array[simplified] irq[simplified]) apply (match premises in H: \fpu_null_state_relation _\ \ \match premises in _[thin]: _ (multi) \ \insert H\\) @@ -4446,7 +4344,7 @@ lemma mapM_x_storeWord_step: apply (subst if_not_P) apply (subst not_less) apply (erule is_aligned_no_overflow) - apply (simp add: mapM_x_map comp_def upto_enum_word del: upt.simps) + apply (simp add: mapM_x_map upto_enum_word del: upt.simps) apply (subst div_power_helper_64 [OF sz2, simplified]) apply assumption apply (simp add: word_bits_def unat_minus_one del: upt.simps) @@ -4876,7 +4774,7 @@ lemma copyGlobalMappings_ccorres: apply (cinit lift: new_vspace_' simp:) apply csymbr apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState, rename_tac skimPM) - apply (rule ccorres_rel_imp[where r=dc, OF _ dc_simp]) + apply (rule ccorres_rel_imp[where r=dc, simplified]) apply (clarsimp simp: whileAnno_def objBits_simps archObjSize_def getPML4Index_def bit_simps X64.pptrBase_def mask_def) apply csymbr @@ -5069,12 +4967,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -5107,12 +5003,10 @@ lemma ccorres_placeNewObject_notification: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ntfn [where ptr=regionBase and n="1" and sz="objBitsKO (KONotification makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps' - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps' + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -5172,11 +5066,10 @@ lemma ccorres_placeNewObject_captable: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac createObjects_ccorres_cte [where ptr=regionBase and n="2 ^ unat userSize" and sz="unat userSize + objBitsKO (KOCTE makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ - apply (clarsimp simp: split_def objBitsKO_def - Fun.comp_def rf_sr_def split_def Let_def cteSizeBits_def - new_cap_addrs_def field_simps power_add ptr_retyps_gen_def - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ + apply (clarsimp simp: split_def objBitsKO_def rf_sr_def split_def Let_def cteSizeBits_def + new_cap_addrs_def field_simps power_add ptr_retyps_gen_def + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv range_cover_def) apply (clarsimp simp: objBitsKO_def objBits_simps' range_cover.aligned) apply (clarsimp simp: no_fail_def) @@ -5285,7 +5178,8 @@ lemma rf_sr_fpu_null_relation: lemma ccorres_placeNewObject_tcb: "ccorresG rf_sr \ dc xfdc - (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase tcbBlockSizeBits and valid_queues and (\s. sym_refs (state_refs_of' s)) + (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase tcbBlockSizeBits + and (\s. sym_refs (state_refs_of' s)) and (\s. 2 ^ tcbBlockSizeBits \ gsMaxObjectSize s) and ret_zero regionBase (2 ^ tcbBlockSizeBits) and K (regionBase \ 0 \ range_cover regionBase tcbBlockSizeBits tcbBlockSizeBits 1 @@ -5384,11 +5278,11 @@ lemma placeNewObject_pte: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac s=\ in createObjects_ccorres_pte [where ptr=regionBase and sz=pageBits]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ apply (clarsimp simp: split_def objBitsKO_def archObjSize_def - Fun.comp_def rf_sr_def split_def Let_def ptr_retyps_gen_def - new_cap_addrs_def field_simps power_add - cong: globals.unfold_congs) + rf_sr_def split_def Let_def ptr_retyps_gen_def + new_cap_addrs_def field_simps power_add + cong: globals.unfold_congs) apply (simp add: Int_ac bit_simps) apply (clarsimp simp: word_bits_conv range_cover_def archObjSize_def bit_simps) apply (clarsimp simp: objBitsKO_def range_cover.aligned archObjSize_def bit_simps) @@ -5694,7 +5588,7 @@ qed lemma placeNewObject_user_data: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and pspace_no_overlap' regionBase (pageBits+us) - and valid_queues and valid_machine_state' + and valid_machine_state' and ret_zero regionBase (2 ^ (pageBits+us)) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) @@ -5833,7 +5727,7 @@ lemma placeNewObject_user_data_device: "ccorresG rf_sr \ dc xfdc (pspace_aligned' and pspace_distinct' and ret_zero regionBase (2 ^ (pageBits + us)) - and pspace_no_overlap' regionBase (pageBits+us) and valid_queues + and pspace_no_overlap' regionBase (pageBits+us) and (\s. sym_refs (state_refs_of' s)) and (\s. 2^(pageBits + us) \ gsMaxObjectSize s) and K (regionBase \ 0 \ range_cover regionBase (pageBits + us) (pageBits+us) (Suc 0) @@ -6062,13 +5956,13 @@ proof - apply clarify apply (intro conjI) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' - APIType_capBits_def invs_queues invs_valid_objs' + APIType_capBits_def invs_valid_objs' invs_urz pageBits_def) apply clarsimp apply (clarsimp simp: pageBits_def ccap_relation_def APIType_capBits_def framesize_to_H_def cap_to_H_simps cap_page_table_cap_lift vmrights_to_H_def) - apply (clarsimp simp: to_bool_def false_def isFrameType_def) + apply (clarsimp simp: isFrameType_def) \ \PageDirectoryObject\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') @@ -6091,13 +5985,13 @@ proof - apply clarify apply (intro conjI) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' - APIType_capBits_def invs_queues invs_valid_objs' + APIType_capBits_def invs_valid_objs' invs_urz bit_simps) apply clarsimp apply (clarsimp simp: ccap_relation_def APIType_capBits_def framesize_to_H_def cap_to_H_simps cap_page_directory_cap_lift vmrights_to_H_def bit_simps) - apply (clarsimp simp: to_bool_def false_def isFrameType_def) + apply (clarsimp simp: isFrameType_def) \ \PDPointerTableObject\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') @@ -6120,12 +6014,12 @@ proof - apply clarify apply (intro conjI) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' - APIType_capBits_def invs_queues invs_valid_objs' invs_urz bit_simps) + APIType_capBits_def invs_valid_objs' invs_urz bit_simps) apply clarsimp apply (clarsimp simp: ccap_relation_def APIType_capBits_def framesize_to_H_def cap_to_H_simps cap_pdpt_cap_lift vmrights_to_H_def bit_simps) - apply (clarsimp simp: to_bool_def false_def isFrameType_def) + apply (clarsimp simp: isFrameType_def) \ \PML4Object\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') @@ -6151,13 +6045,12 @@ proof - apply clarify apply (intro conjI) apply (clarsimp simp: invs_pspace_aligned' invs_pspace_distinct' invs_valid_global' - APIType_capBits_def invs_queues invs_valid_objs' invs_urz bit_simps) + APIType_capBits_def invs_valid_objs' invs_urz bit_simps) apply clarsimp apply (clarsimp simp: ccap_relation_def APIType_capBits_def - framesize_to_H_def cap_to_H_simps cap_pml4_cap_lift - vmrights_to_H_def bit_simps) - apply (clarsimp simp: to_bool_def false_def isFrameType_def - c_valid_cap_def cl_valid_cap_def asidInvalid_def) + framesize_to_H_def cap_to_H_simps cap_pml4_cap_lift + vmrights_to_H_def bit_simps) + apply (clarsimp simp: isFrameType_def c_valid_cap_def cl_valid_cap_def asidInvalid_def) done qed @@ -6204,7 +6097,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -6257,15 +6150,11 @@ lemma threadSet_domain_ccorres [corres]: apply (simp add: map_to_ctes_upd_tcb_no_ctes map_to_tcbs_upd tcb_cte_cases_def) apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) - apply (drule ko_at_projectKO_opt) - apply (erule (2) cmap_relation_upd_relI) - subgoal by (simp add: ctcb_relation_def) - apply assumption - apply simp - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) + apply (drule ko_at_projectKO_opt) + apply (erule (2) cmap_relation_upd_relI) + subgoal by (simp add: ctcb_relation_def) + apply assumption + apply simp done lemma createObject_ccorres: @@ -6304,11 +6193,11 @@ proof - apply (rule ccorres_cond_seq) (* Architecture specific objects. *) apply (rule_tac - Q="createObject_hs_preconds regionBase newType userSize isdev" and - S="createObject_c_preconds1 regionBase newType userSize isdev" and - R="createObject_hs_preconds regionBase newType userSize isdev" and - T="createObject_c_preconds1 regionBase newType userSize isdev" - in ccorres_Cond_rhs) + Q="createObject_hs_preconds regionBase newType userSize isdev" and + S="createObject_c_preconds1 regionBase newType userSize isdev" and + R="createObject_hs_preconds regionBase newType userSize isdev" and + T="createObject_c_preconds1 regionBase newType userSize isdev" + in ccorres_Cond_rhs) apply (subgoal_tac "toAPIType newType = None") apply clarsimp apply (rule ccorres_rhs_assoc)+ @@ -6341,21 +6230,20 @@ proof - intro!: Corres_UL_C.ccorres_cond_empty Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.Untyped) - (unat (userSizea :: machine_word)) isdev" and - A'=UNIV in - ccorres_guard_imp) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.Untyped) + (unat (userSizea :: machine_word)) isdev" and + A'=UNIV in + ccorres_guard_imp) apply (rule ccorres_symb_exec_r) apply (rule ccorres_return_C, simp, simp, simp) apply vcg apply (rule conseqPre, vcg, clarsimp) apply simp apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def apiGetObjectSize_def - cap_untyped_cap_lift to_bool_eq_0 true_def - aligned_add_aligned sign_extend_canonical_address - split: option.splits) + getObjectSize_def apiGetObjectSize_def cap_untyped_cap_lift + aligned_add_aligned sign_extend_canonical_address + split: option.splits) apply (subst word_le_mask_eq, clarsimp simp: mask_def, unat_arith, auto simp: word_bits_conv untypedBits_defs)[1] @@ -6365,11 +6253,11 @@ proof - intro!: Corres_UL_C.ccorres_cond_empty Corres_UL_C.ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.TCBObject) (unat userSizea) isdev" in + ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) apply (ccorres_remove_UNIV_guard) apply (simp add: hrs_htd_update) @@ -6386,14 +6274,13 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply (vcg exspec=Arch_initContext_modifies) - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: createObject_hs_preconds_def createObject_c_preconds_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (simp add: getObjectSize_def objBits_simps word_bits_conv apiGetObjectSize_def @@ -6402,10 +6289,10 @@ proof - region_actually_is_bytes_def APIType_capBits_def) apply (frule(1) ghost_assertion_size_logic_no_unat) apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def apiGetObjectSize_def - cap_thread_cap_lift to_bool_def true_def - aligned_add_aligned - split: option.splits) + getObjectSize_def apiGetObjectSize_def + cap_thread_cap_lift + aligned_add_aligned + split: option.splits) apply (frule range_cover.aligned) apply (clarsimp simp: ctcb_ptr_to_tcb_ptr_def ctcb_offset_defs tcb_ptr_to_ctcb_ptr_def @@ -6413,17 +6300,16 @@ proof - (* Endpoint *) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def nAPIObjects_def - word_sle_def intro!: ccorres_cond_empty ccorres_cond_univ - ccorres_rhs_assoc) + toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.EndpointObject) - (unat (userSizea :: machine_word)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.EndpointObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.EndpointObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (simp add: hrs_htd_update) apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_endpoint) apply (rule ccorres_symb_exec_r) @@ -6432,35 +6318,34 @@ proof - apply (rule conseqPre, vcg, clarsimp) apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def getObjectSize_def - objBits_simps apiGetObjectSize_def epSizeBits_def - cap_endpoint_cap_lift to_bool_def true_def sign_extend_canonical_address - split: option.splits dest!: range_cover.aligned) + objBits_simps apiGetObjectSize_def epSizeBits_def + cap_endpoint_cap_lift sign_extend_canonical_address + split: option.splits + dest!: range_cover.aligned) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps apiGetObjectSize_def epSizeBits_def word_bits_conv - elim!: is_aligned_no_wrap' intro!: range_cover_simpleI)[1] + elim!: is_aligned_no_wrap' + intro!: range_cover_simpleI)[1] (* Notification *) apply (clarsimp simp: createObject_c_preconds_def) - apply (clarsimp simp: getObjectSize_def objBits_simps - apiGetObjectSize_def - epSizeBits_def word_bits_conv word_sle_def word_sless_def) + apply (clarsimp simp: getObjectSize_def objBits_simps apiGetObjectSize_def + epSizeBits_def word_bits_conv word_sle_def word_sless_def) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def nAPIObjects_def - word_sle_def intro!: ccorres_cond_empty ccorres_cond_univ - ccorres_rhs_assoc) + toAPIType_def nAPIObjects_def word_sle_def + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.NotificationObject) - (unat (userSizea :: machine_word)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.NotificationObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.NotificationObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (simp add: hrs_htd_update) apply (ctac (no_vcg) pre only: add: ccorres_placeNewObject_notification) apply (rule ccorres_symb_exec_r) @@ -6469,19 +6354,20 @@ proof - apply (rule conseqPre, vcg, clarsimp) apply wp apply (clarsimp simp: ccap_relation_def cap_to_H_def - getObjectSize_def sign_extend_canonical_address - apiGetObjectSize_def ntfnSizeBits_def objBits_simps - cap_notification_cap_lift to_bool_def true_def - dest!: range_cover.aligned split: option.splits) + getObjectSize_def sign_extend_canonical_address + apiGetObjectSize_def ntfnSizeBits_def objBits_simps + cap_notification_cap_lift + dest!: range_cover.aligned + split: option.splits) apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (auto simp: getObjectSize_def objBits_simps - apiGetObjectSize_def - ntfnSizeBits_def word_bits_conv - elim!: is_aligned_no_wrap' intro!: range_cover_simpleI)[1] + apiGetObjectSize_def + ntfnSizeBits_def word_bits_conv + elim!: is_aligned_no_wrap' + intro!: range_cover_simpleI)[1] (* CapTable *) apply (clarsimp simp: createObject_c_preconds_def) @@ -6489,18 +6375,18 @@ proof - apiGetObjectSize_def ntfnSizeBits_def word_bits_conv) apply (clarsimp simp: Kernel_C_defs object_type_from_H_def - toAPIType_def nAPIObjects_def - word_sle_def word_sless_def zero_le_sint - intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc - ccorres_move_c_guards ccorres_Guard_Seq) + toAPIType_def nAPIObjects_def + word_sle_def word_sless_def zero_le_sint + intro!: ccorres_cond_empty ccorres_cond_univ ccorres_rhs_assoc + ccorres_move_c_guards ccorres_Guard_Seq) apply (rule_tac - A ="createObject_hs_preconds regionBase - (APIObjectType apiobject_type.CapTableObject) - (unat (userSizea :: machine_word)) isdev" and - A'="createObject_c_preconds1 regionBase - (APIObjectType apiobject_type.CapTableObject) - (unat userSizea) isdev" in - ccorres_guard_imp2) + A ="createObject_hs_preconds regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat (userSizea :: machine_word)) isdev" and + A'="createObject_c_preconds1 regionBase + (APIObjectType apiobject_type.CapTableObject) + (unat userSizea) isdev" in + ccorres_guard_imp2) apply (simp add:field_simps hrs_htd_update) apply (ctac pre only: add: ccorres_placeNewObject_captable) apply (subst gsCNodes_update) @@ -6517,28 +6403,26 @@ proof - apply (clarsimp simp: createObject_hs_preconds_def isFrameType_def) apply (frule invs_pspace_aligned') apply (frule invs_pspace_distinct') - apply (frule invs_queues) apply (frule invs_sym') apply (frule(1) ghost_assertion_size_logic_no_unat) apply (clarsimp simp: getObjectSize_def objBits_simps - apiGetObjectSize_def - cteSizeBits_def word_bits_conv add.commute createObject_c_preconds_def - region_actually_is_bytes_def - invs_valid_objs' invs_urz - elim!: is_aligned_no_wrap' - dest: word_of_nat_le intro!: range_coverI)[1] + apiGetObjectSize_def + cteSizeBits_def word_bits_conv add.commute createObject_c_preconds_def + region_actually_is_bytes_def + invs_valid_objs' invs_urz + elim!: is_aligned_no_wrap' + dest: word_of_nat_le + intro!: range_coverI) apply (clarsimp simp: createObject_hs_preconds_def hrs_htd_update isFrameType_def) apply (frule range_cover.strong_times_64[folded addr_card_wb], simp+) apply (subst h_t_array_valid_retyp, simp+) apply (simp add: power_add cte_C_size cteSizeBits_def) - apply (clarsimp simp: ccap_relation_def cap_to_H_def - cap_cnode_cap_lift to_bool_def true_def - getObjectSize_def - apiGetObjectSize_def cteSizeBits_def - objBits_simps field_simps is_aligned_power2 - addr_card_wb is_aligned_weaken[where y=2] - is_aligned_neg_mask_weaken - split: option.splits) + apply (clarsimp simp: ccap_relation_def cap_to_H_def cap_cnode_cap_lift + getObjectSize_def apiGetObjectSize_def cteSizeBits_def + objBits_simps field_simps is_aligned_power2 + addr_card_wb is_aligned_weaken[where y=2] + is_aligned_neg_mask_weaken + split: option.splits) apply (rule conjI) apply (frule range_cover.aligned) apply (simp add: aligned_and is_aligned_weaken sign_extend_canonical_address) @@ -6566,7 +6450,7 @@ lemma ccorres_guard_impR: lemma typ_clear_region_dom: "dom (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'b :: mem_type typ_heap) \ dom ((clift hp) :: 'b :: mem_type typ_heap)" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def) + apply (clarsimp simp:lift_t_def lift_typ_heap_def comp_def) apply (clarsimp simp:lift_state_def) apply (case_tac hp) apply (clarsimp simp:) @@ -7384,14 +7268,6 @@ lemma insertNewCap_sch_act_simple[wp]: "\sch_act_simple\insertNewCap a b c\\_. sch_act_simple\" by (simp add:sch_act_simple_def,wp) -lemma insertNewCap_ct_active'[wp]: - "\ct_active'\insertNewCap a b c\\_. ct_active'\" - apply (simp add:ct_in_state'_def) - apply (rule hoare_pre) - apply wps - apply (wp insertNewCap_ksCurThread | simp)+ - done - lemma updateMDB_ctes_of_cap: "\\s. (\x\ran(ctes_of s). P (cteCap x)) \ no_0 (ctes_of s)\ updateMDB srcSlot t @@ -7616,7 +7492,8 @@ lemma createObject_caps_overlap_reserved_ret': apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_caps_overlap_reserved_ret'[where sz = "APIType_capBits ty us"]]) apply assumption - apply (case_tac r,simp) + apply (rename_tac rv s) + apply (case_tac rv,simp) apply clarsimp apply (erule caps_overlap_reserved'_subseteq) apply (rule untypedRange_in_capRange) @@ -7689,7 +7566,8 @@ lemma createObject_IRQHandler: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_IRQHandler[where irq = x and P = "\_ _. False"]]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv) done @@ -7706,7 +7584,8 @@ lemma createObject_capClass[wp]: apply clarsimp apply (rule hoare_strengthen_post[OF createNewCaps_range_helper]) apply assumption - apply (case_tac r,clarsimp+) + apply (rename_tac rv s) + apply (case_tac rv; clarsimp) apply (clarsimp simp:word_bits_conv ) apply (rule range_cover_full) apply (simp add:word_bits_conv)+ @@ -8487,7 +8366,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -8648,9 +8527,9 @@ shows "ccorres dc xfdc including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -8684,7 +8563,7 @@ shows "ccorres dc xfdc hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -8846,7 +8725,7 @@ shows "ccorres dc xfdc apply (simp add: o_def) apply (case_tac newType, simp_all add: object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + nAPIObjects_def APIType_capBits_def split:apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp:objBits_simps', unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/X64/SR_lemmas_C.thy b/proof/crefine/X64/SR_lemmas_C.thy index 5b83c100d2..8b00610a92 100644 --- a/proof/crefine/X64/SR_lemmas_C.thy +++ b/proof/crefine/X64/SR_lemmas_C.thy @@ -292,11 +292,15 @@ lemma cmdbnode_relation_mdb_node_to_H [simp]: unfolding cmdbnode_relation_def mdb_node_to_H_def mdb_node_lift_def cte_lift_def by (fastforce split: option.splits) -definition - tcb_no_ctes_proj :: "tcb \ Structures_H.thread_state \ machine_word \ machine_word \ arch_tcb \ bool \ word8 \ word8 \ word8 \ nat \ fault option \ machine_word option" +definition tcb_no_ctes_proj :: + "tcb \ Structures_H.thread_state \ machine_word \ machine_word \ arch_tcb \ bool \ word8 + \ word8 \ word8 \ nat \ fault option \ machine_word option + \ machine_word option \ machine_word option" where - "tcb_no_ctes_proj t \ (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, - tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t)" + "tcb_no_ctes_proj t \ + (tcbState t, tcbFaultHandler t, tcbIPCBuffer t, tcbArch t, tcbQueued t, + tcbMCP t, tcbPriority t, tcbDomain t, tcbTimeSlice t, tcbFault t, tcbBoundNotification t, + tcbSchedNext t, tcbSchedPrev t)" lemma tcb_cte_cases_proj_eq [simp]: "tcb_cte_cases p = Some (getF, setF) \ @@ -307,7 +311,7 @@ lemma tcb_cte_cases_proj_eq [simp]: (* NOTE: 5 = cte_level_bits *) lemma map_to_ctes_upd_cte': "\ ksPSpace s p = Some (KOCTE cte'); is_aligned p cte_level_bits; ps_clear p cte_level_bits s \ - \ map_to_ctes (ksPSpace s(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" + \ map_to_ctes ((ksPSpace s)(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" apply (erule (1) map_to_ctes_upd_cte) apply (simp add: field_simps ps_clear_def3 cte_level_bits_def mask_def) done @@ -315,7 +319,7 @@ lemma map_to_ctes_upd_cte': lemma map_to_ctes_upd_tcb': "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = +==> map_to_ctes ((ksPSpace s)(p |-> KOTCB tcb)) = (%x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & getF tcb ~= getF tcb' @@ -442,7 +446,7 @@ lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); + (ctes_of s' = (ctes_of s)(dest \ cte)); (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); (map_to_pml4es (ksPSpace s) = map_to_pml4es (ksPSpace s')); @@ -470,7 +474,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -672,7 +676,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1062,7 +1065,6 @@ lemma cstate_relation_only_t_hrs: ksCurThread_' s = ksCurThread_' t; ksIdleThread_' s = ksIdleThread_' t; ksWorkUnitsCompleted_' s = ksWorkUnitsCompleted_' t; - intStateIRQNode_' s = intStateIRQNode_' t; intStateIRQTable_' s = intStateIRQTable_' t; x86KSASIDTable_' s = x86KSASIDTable_' t; x64KSCurrentUserCR3_' s = x64KSCurrentUserCR3_' t; @@ -1072,6 +1074,7 @@ lemma cstate_relation_only_t_hrs: ksCurDomain_' s = ksCurDomain_' t; ksDomainTime_' s = ksDomainTime_' t; num_ioapics_' s = num_ioapics_' t; + ioapic_nirqs_' s = ioapic_nirqs_' t; x86KSIRQState_' s = x86KSIRQState_' t \ \ cstate_relation a s = cstate_relation a t" @@ -1088,7 +1091,6 @@ lemma rf_sr_upd: "(ksCurThread_' (globals x)) = (ksCurThread_' (globals y))" "(ksIdleThread_' (globals x)) = (ksIdleThread_' (globals y))" "(ksWorkUnitsCompleted_' (globals x)) = (ksWorkUnitsCompleted_' (globals y))" - "intStateIRQNode_'(globals x) = intStateIRQNode_' (globals y)" "intStateIRQTable_'(globals x) = intStateIRQTable_' (globals y)" "x86KSASIDTable_' (globals x) = x86KSASIDTable_' (globals y)" "x64KSCurrentUserCR3_' (globals x) = x64KSCurrentUserCR3_' (globals y)" @@ -1098,6 +1100,7 @@ lemma rf_sr_upd: "ksCurDomain_' (globals x) = ksCurDomain_' (globals y)" "ksDomainTime_' (globals x) = ksDomainTime_' (globals y)" "num_ioapics_' (globals x) = num_ioapics_' (globals y)" + "ioapic_nirqs_' (globals x) = ioapic_nirqs_' (globals y)" "x86KSIRQState_' (globals x) = x86KSIRQState_' (globals y)" shows "((a, x) \ rf_sr) = ((a, y) \ rf_sr)" unfolding rf_sr_def using assms @@ -1111,7 +1114,6 @@ lemma rf_sr_upd_safe[simp]: and sa: "(ksSchedulerAction_' (globals (g y))) = (ksSchedulerAction_' (globals y))" and ct: "(ksCurThread_' (globals (g y))) = (ksCurThread_' (globals y))" and it: "(ksIdleThread_' (globals (g y))) = (ksIdleThread_' (globals y))" - and isn: "intStateIRQNode_'(globals (g y)) = intStateIRQNode_' (globals y)" and ist: "intStateIRQTable_'(globals (g y)) = intStateIRQTable_' (globals y)" and dsi: "ksDomScheduleIdx_' (globals (g y)) = ksDomScheduleIdx_' (globals y)" and cdom: "ksCurDomain_' (globals (g y)) = ksCurDomain_' (globals y)" @@ -1121,11 +1123,12 @@ lemma rf_sr_upd_safe[simp]: "x64KSCurrentUserCR3_' (globals (g y)) = x64KSCurrentUserCR3_' (globals y)" "phantom_machine_state_' (globals (g y)) = phantom_machine_state_' (globals y)" "num_ioapics_' (globals (g y)) = num_ioapics_' (globals y)" + "ioapic_nirqs_' (globals (g y)) = ioapic_nirqs_' (globals y)" "x86KSIRQState_' (globals (g y)) = x86KSIRQState_' (globals y)" and gs: "ghost'state_' (globals (g y)) = ghost'state_' (globals y)" and wu: "(ksWorkUnitsCompleted_' (globals (g y))) = (ksWorkUnitsCompleted_' (globals y))" shows "((a, (g y)) \ rf_sr) = ((a, y) \ rf_sr)" - using rl rq rqL1 rqL2 sa ct it isn ist arch wu gs dsi cdom dt by - (rule rf_sr_upd) + using assms by - (rule rf_sr_upd) (* More of a well-formed lemma, but \ *) lemma valid_mdb_cslift_next: @@ -1469,9 +1472,9 @@ lemma cmap_relation_cong: apply (erule imageI) done -lemma ctcb_relation_null_queue_ptrs: +lemma ctcb_relation_null_ep_ptrs: assumes rel: "cmap_relation mp mp' tcb_ptr_to_ctcb_ptr ctcb_relation" - and same: "map_option tcb_null_queue_ptrs \ mp'' = map_option tcb_null_queue_ptrs \ mp'" + and same: "map_option tcb_null_ep_ptrs \ mp'' = map_option tcb_null_ep_ptrs \ mp'" shows "cmap_relation mp mp'' tcb_ptr_to_ctcb_ptr ctcb_relation" using rel apply (rule iffD1 [OF cmap_relation_cong, OF _ map_option_eq_dom_eq, rotated -1]) @@ -1479,7 +1482,7 @@ lemma ctcb_relation_null_queue_ptrs: apply (rule same [symmetric]) apply (drule compD [OF same]) apply (case_tac b, case_tac b') - apply (simp add: ctcb_relation_def tcb_null_queue_ptrs_def) + apply (simp add: ctcb_relation_def tcb_null_ep_ptrs_def) done (* FIXME x64: do we still need these? @@ -1494,7 +1497,7 @@ lemma ntfnQueue_tail_mask_4 [simp]: lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1508,16 +1511,16 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KONotification ko)) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KONotification ko)) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KONotification ko)) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1525,16 +1528,16 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOEndpoint ko)) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1542,15 +1545,15 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOTCB ko)) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOTCB ko)) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOTCB ko)) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1558,16 +1561,16 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOASIDPool ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOASIDPool ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOASIDPool ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOASIDPool ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI @@ -1576,28 +1579,28 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOPTE ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOPTE ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPTE ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPTE ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1605,23 +1608,23 @@ lemma update_pte_map_tos: auto simp: projectKO_opts_defs) lemma update_pde_map_to_pdes: - "map_to_pdes (ksPSpace s(p \ KOArch (KOPDE pde))) + "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDE pde))) = (map_to_pdes (ksPSpace s))(p \ pde)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pde_map_tos: fixes P :: "pde \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOPDE ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOPDE ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPDE ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPDE ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1629,23 +1632,23 @@ lemma update_pde_map_tos: auto simp: projectKO_opts_defs) lemma update_pdpte_map_to_pdptes: - "map_to_pdptes (ksPSpace s(p \ KOArch (KOPDPTE pdpte))) + "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPDPTE pdpte))) = (map_to_pdptes (ksPSpace s))(p \ pdpte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pdpte_map_tos: fixes P :: "pdpte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOPDPTE ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOPDPTE ko))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPDPTE ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDPTE ko))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1653,23 +1656,23 @@ lemma update_pdpte_map_tos: auto simp: projectKO_opts_defs) lemma update_pml4e_map_to_pml4es: - "map_to_pml4es (ksPSpace s(p \ KOArch (KOPML4E pml4e))) + "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPML4E pml4e))) = (map_to_pml4es (ksPSpace s))(p \ pml4e)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pml4e_map_tos: fixes P :: "pml4e \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_ctes (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOPML4E ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOPML4E ko))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_ctes (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPML4E ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPML4E ko))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1755,7 +1758,6 @@ where | "thread_state_to_tsType (Structures_H.BlockedOnSend oref badge cg cgr isc) = scast ThreadState_BlockedOnSend" | "thread_state_to_tsType (Structures_H.BlockedOnNotification oref) = scast ThreadState_BlockedOnNotification" - lemma ctcb_relation_thread_state_to_tsType: "ctcb_relation tcb ctcb \ tsType_CL (thread_state_lift (tcbState_C ctcb)) = thread_state_to_tsType (tcbState tcb)" unfolding ctcb_relation_def cthread_state_relation_def @@ -1974,9 +1976,9 @@ lemma memory_cross_over: apply (cut_tac p=ptr in unat_mask_3_less_8) apply (subgoal_tac "(ptr && ~~ mask 3) + (ptr && mask 3) = ptr") apply (subgoal_tac "!n x. n < 8 \ (unat (x::machine_word) = n) = (x = of_nat n)") - apply (auto simp add: eval_nat_numeral unat_eq_0 add.commute - elim!: less_SucE)[1] - apply (clarsimp simp add: unat64_eq_of_nat word_bits_def) + apply (clarsimp simp: eval_nat_numeral) + apply (fastforce simp: add.commute elim!: less_SucE) + apply (clarsimp simp: unat64_eq_of_nat word_bits_def) apply (simp add: add.commute word_plus_and_or_coroll2) done @@ -2168,7 +2170,7 @@ lemma gs_set_assn_Delete_cstate_relation: lemma update_typ_at: assumes at: "obj_at' P p s" and tp: "\obj. P obj \ koTypeOf (injectKOS obj) = koTypeOf ko" - shows "typ_at' T p' (s \ksPSpace := ksPSpace s(p \ ko)\) = typ_at' T p' s" + shows "typ_at' T p' (s \ksPSpace := (ksPSpace s)(p \ ko)\) = typ_at' T p' s" using at by (auto elim!: obj_atE' simp: typ_at'_def ko_wp_at'_def dest!: tp[rule_format] @@ -2365,6 +2367,14 @@ lemma capTCBPtr_eq: apply clarsimp done +lemma rf_sr_ctcb_queue_relation: + "\ (s, s') \ rf_sr; d \ maxDomain; p \ maxPriority \ + \ ctcb_queue_relation (ksReadyQueues s (d, p)) + (index (ksReadyQueues_' (globals s')) (cready_queues_index_to_C d p))" + unfolding rf_sr_def cstate_relation_def cready_queues_relation_def + apply (clarsimp simp: Let_def seL4_MinPrio_def minDom_def maxDom_to_H maxPrio_to_H) + done + lemma rf_sr_sched_action_relation: "(s, s') \ rf_sr \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" @@ -2482,6 +2492,12 @@ lemma fpu_null_state_heap_update_tag_disj': by (clarsimp simp: fpu_null_state_relation_def hrs_mem_update_def hrs_htd_def split: prod.splits) +lemma rf_sr_obj_update_helper: + "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined + \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr + \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" + by (simp cong: StateSpace.state.fold_congs globals.fold_congs) + lemmas h_t_valid_nested_fields = h_t_valid_field[OF h_t_valid_field[OF h_t_valid_field]] h_t_valid_field[OF h_t_valid_field] diff --git a/proof/crefine/X64/Schedule_C.thy b/proof/crefine/X64/Schedule_C.thy index 06dbb13037..e0b618e692 100644 --- a/proof/crefine/X64/Schedule_C.thy +++ b/proof/crefine/X64/Schedule_C.thy @@ -1,11 +1,12 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) theory Schedule_C -imports Tcb_C +imports Tcb_C Detype_C begin (*FIXME: arch_split: move up?*) @@ -32,19 +33,41 @@ lemma Arch_switchToIdleThread_ccorres: apply (clarsimp simp: invs_no_cicd'_def valid_pspace'_def valid_idle'_tcb_at'_ksIdleThread) done +lemma invs_no_cicd'_pspace_aligned': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_aligned' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + +lemma invs_no_cicd'_pspace_distinct': + "all_invs_but_ct_idle_or_in_cur_domain' s \ pspace_distinct' s" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + +lemma threadGet_exs_valid[wp]: + "tcb_at' t s \ \(=) s\ threadGet f t \\\r. (=) s\" + unfolding threadGet_def liftM_def + apply (wpsimp wp: exs_getObject) + apply (fastforce simp: obj_at'_def objBits_simps')+ + done + +lemma isRunnable_exs_valid[wp]: + "tcb_at' t s \ \(=) s\ isRunnable t \\\r. (=) s\" + unfolding isRunnable_def getThreadState_def + by (wpsimp wp: exs_getObject) + lemma switchToIdleThread_ccorres: "ccorres dc xfdc invs_no_cicd' UNIV hs switchToIdleThread (Call switchToIdleThread_'proc)" apply (cinit) + apply (rule ccorres_stateAssert) apply (rule ccorres_symb_exec_l) apply (ctac (no_vcg) add: Arch_switchToIdleThread_ccorres) apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P="\s. thread = ksIdleThread s" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: simpler_modify_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) - apply (wpsimp simp: X64_H.switchToIdleThread_def)+ + apply (wpsimp simp: X64_H.switchToIdleThread_def wp: hoare_drop_imps)+ done lemma Arch_switchToThread_ccorres: @@ -68,47 +91,34 @@ lemma switchToThread_ccorres: hs (switchToThread t) (Call switchToThread_'proc)" - apply (cinit lift: thread_') + apply (clarsimp simp: switchToThread_def) + apply (rule ccorres_symb_exec_l'[OF _ _ isRunnable_sp]; (solves wpsimp)?) + apply (rule ccorres_symb_exec_l'[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule ccorres_stateAssert_fwd)+ + apply (cinit' lift: thread_') apply (ctac (no_vcg) add: Arch_switchToThread_ccorres) apply (ctac (no_vcg) add: tcbSchedDequeue_ccorres) + apply (simp add: setCurThread_def) + apply (rule ccorres_stateAssert) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: setCurThread_def simpler_modify_def) - apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def - carch_state_relation_def cmachine_state_relation_def) - apply wp+ - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def) - done - -lemma get_tsType_ccorres2: - "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_longlong_' (tcb_at' thread) - (UNIV \ {s. f s = tcb_ptr_to_ctcb_ptr thread} \ - {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] - (getThreadState thread) (Call thread_state_get_tsType_'proc)" - unfolding getThreadState_def - apply (rule ccorres_from_spec_modifies [where P=\, simplified]) - apply (rule thread_state_get_tsType_spec) - apply (rule thread_state_get_tsType_modifies) - apply simp - apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: typ_heap_simps) - apply (rule bexI [rotated, OF threadGet_eq], assumption) - apply simp - apply (drule ctcb_relation_thread_state_to_tsType) - apply simp + apply (clarsimp simp: setCurThread_def simpler_modify_def rf_sr_def cstate_relation_def + Let_def carch_state_relation_def cmachine_state_relation_def) + apply (wpsimp wp: Arch_switchToThread_invs_no_cicd' hoare_drop_imps + | strengthen invs_no_cicd'_pspace_aligned' invs_no_cicd'_pspace_distinct')+ done lemma activateThread_ccorres: "ccorres dc xfdc (ct_in_state' activatable' and (\s. sch_act_wf (ksSchedulerAction s) s) - and valid_queues and valid_objs') + and valid_objs' and pspace_aligned' and pspace_distinct') UNIV [] activateThread (Call activateThread_'proc)" apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule_tac P="activatable' rv" in ccorres_gen_asm) apply (wpc) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) @@ -118,7 +128,7 @@ lemma activateThread_ccorres: apply (rule ccorres_cond_true) apply (rule ccorres_return_Skip) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) @@ -126,7 +136,7 @@ lemma activateThread_ccorres: apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: activateIdleThread_def return_def) apply (rule_tac P=\ and P'=UNIV in ccorres_inst, simp) - apply (simp add: "StrictC'_thread_state_defs" del: Collect_const) + apply (simp add: ThreadState_defs del: Collect_const) apply (rule ccorres_cond_false) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -149,7 +159,7 @@ lemma activateThread_ccorres: apply (subgoal_tac "ksCurThread_' (globals s') = tcb_ptr_to_ctcb_ptr (ksCurThread s)") prefer 2 apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (clarsimp simp: typ_heap_simps ThreadState_Running_def mask_def) + apply (clarsimp simp: typ_heap_simps ThreadState_defs mask_def) done lemma ceqv_Guard_UNIV_Skip: @@ -181,26 +191,55 @@ lemmas ccorres_remove_tail_Guard_Skip = ccorres_abstract[where xf'="\_. ()", OF ceqv_remove_tail_Guard_Skip] lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done lemmas word_log2_max_word_word_size = word_log2_max[where 'a=machine_word_len, simplified word_size, simplified] +lemma ccorres_pre_getQueue: + assumes cc: "\queue. ccorres r xf (P queue) (P' queue) hs (f queue) c" + shows "ccorres r xf (\s. P (ksReadyQueues s (d, p)) s \ d \ maxDomain \ p \ maxPriority) + {s'. \queue. (let cqueue = index (ksReadyQueues_' (globals s')) + (cready_queues_index_to_C d p) in + ctcb_queue_relation queue cqueue) \ s' \ P' queue} + hs (getQueue d p >>= (\queue. f queue)) c" + apply (rule ccorres_guard_imp2) + apply (rule ccorres_symb_exec_l2) + defer + defer + apply (rule gq_sp) + defer + apply (rule ccorres_guard_imp) + apply (rule cc) + apply clarsimp + apply assumption + apply assumption + apply (clarsimp simp: getQueue_def gets_exs_valid) + apply clarsimp + apply (drule spec, erule mp) + apply (erule rf_sr_ctcb_queue_relation) + apply (simp add: maxDom_to_H maxPrio_to_H)+ + done + lemma chooseThread_ccorres: - "ccorres dc xfdc all_invs_but_ct_idle_or_in_cur_domain' UNIV [] chooseThread (Call chooseThread_'proc)" + "ccorres dc xfdc all_invs_but_ct_idle_or_in_cur_domain' UNIV [] + chooseThread (Call chooseThread_'proc)" proof - note prio_and_dom_limit_helpers [simp] note ksReadyQueuesL2Bitmap_nonzeroI [simp] note Collect_const_mem [simp] + + note prio_and_dom_limit_helpers[simp] word_sle_def[simp] maxDom_to_H[simp] maxPrio_to_H[simp] + note invert_prioToL1Index_c_simp[simp] (* when numDomains = 1, array bounds checks would become _ = 0 rather than _ < 1, changing the shape of the proof compared to when numDomains > 1 *) include no_less_1_simps @@ -209,9 +248,22 @@ proof - "\s. invs_no_cicd' s \ ksCurDomain s \ maxDomain" by (simp add: invs_no_cicd'_def) + have invs_no_cicd'_valid_bitmaps: + "\s. invs_no_cicd' s \ valid_bitmaps s" + by (simp add: invs_no_cicd'_def) + + have invs_no_cicd'_pspace_aligned': + "\s. invs_no_cicd' s \ pspace_aligned' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + + have invs_no_cicd'_pspace_distinct': + "\s. invs_no_cicd' s \ pspace_distinct' s" + by (simp add: invs_no_cicd'_def valid_pspace'_def) + show ?thesis supply if_split[split del] apply (cinit) + apply (rule ccorres_stateAssert)+ apply (simp add: numDomains_sge_1_simp) apply (rule_tac xf'=dom_' and r'="\rv rv'. rv' = ucast rv" in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) @@ -244,7 +296,7 @@ proof - apply (rule_tac P="curdom \ maxDomain" in ccorres_cross_over_guard_no_st) apply (rule_tac P="prio \ maxPriority" in ccorres_cross_over_guard_no_st) apply (rule ccorres_pre_getQueue) - apply (rule_tac P="queue \ []" in ccorres_cross_over_guard_no_st) + apply (rule_tac P="\ tcbQueueEmpty queue" in ccorres_cross_over_guard_no_st) apply (rule ccorres_symb_exec_l) apply (rule ccorres_assert) apply (rule ccorres_symb_exec_r) @@ -259,39 +311,40 @@ proof - apply (rule conseqPre, vcg) apply (rule Collect_mono) apply clarsimp - apply (strengthen queue_in_range) apply assumption apply clarsimp apply (rule conseqPre, vcg) apply clarsimp apply (wp isRunnable_wp)+ - apply (simp add: isRunnable_def) - apply wp apply (clarsimp simp: Let_def guard_is_UNIV_def) - apply (drule invs_no_cicd'_queues) - apply (case_tac queue, simp) - apply (clarsimp simp: tcb_queue_relation'_def cready_queues_index_to_C_def numPriorities_def) - apply (clarsimp simp add: maxDom_to_H maxPrio_to_H - queue_in_range[where qdom=0, simplified, simplified maxPrio_to_H]) - apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper ) + apply (rule conjI) + apply (clarsimp simp: le_maxDomain_eq_less_numDomains unat_trans_ucast_helper) + apply (intro conjI impI) + apply (clarsimp simp: cready_queues_index_to_C_def numPriorities_def ctcb_queue_relation_def + tcbQueueEmpty_def option_to_ctcb_ptr_def) + apply (frule_tac qdom=curdom and prio=rv in cready_queues_index_to_C_in_range') + apply fastforce + apply (clarsimp simp: num_tcb_queues_val word_less_nat_alt cready_queues_index_to_C_def2) apply wpsimp apply (clarsimp simp: guard_is_UNIV_def le_maxDomain_eq_less_numDomains word_less_nat_alt numDomains_less_numeric_explicit) - apply (frule invs_no_cicd'_queues) + apply clarsimp apply (frule invs_no_cicd'_max_CurDomain) - apply (frule invs_no_cicd'_queues) - apply (clarsimp simp: valid_queues_def lookupBitmapPriority_le_maxPriority) + apply (frule invs_no_cicd'_pspace_aligned') + apply (frule invs_no_cicd'_pspace_distinct') + apply (frule invs_no_cicd'_valid_bitmaps) + apply (frule valid_bitmaps_bitmapQ_no_L1_orphans) + apply (frule valid_bitmaps_valid_bitmapQ) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def cong: conj_cong) apply (intro conjI impI) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) - apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) - apply (prop_tac "ksCurDomain s = 0") - using unsigned_eq_0_iff apply force - apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (fastforce intro: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) + apply (fastforce dest: lookupBitmapPriority_le_maxPriority) + apply (fastforce dest!: bitmapQ_from_bitmap_lookup valid_bitmapQ_bitmapQ_simp) + apply (fastforce dest!: lookupBitmapPriority_obj_at' + simp: ready_queue_relation_def ksReadyQueues_asrt_def st_tcb_at'_def obj_at'_def) done qed @@ -372,7 +425,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -406,14 +458,13 @@ lemma isHighestPrio_ccorres: apply (rule ccorres_return_C, simp, simp, simp) apply (rule wp_post_taut) apply (vcg exspec=getHighestPrio_modifies)+ - apply (clarsimp simp: word_le_nat_alt true_def to_bool_def maxDomain_le_unat_ucast_explicit + apply (clarsimp simp: word_le_nat_alt maxDomain_le_unat_ucast_explicit split: if_splits) done lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -427,7 +478,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -444,7 +495,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -472,7 +523,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -492,7 +543,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -503,17 +554,17 @@ lemma schedule_ccorres: apply (rule ccorres_cond2'[where R=\], fastforce) apply clarsimp apply (rule ccorres_return[where R'=UNIV], clarsimp, vcg) - apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s - \ curThread = ksCurThread s - \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" - and P'=UNIV in ccorres_from_vcg) - apply clarsimp - apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) - apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) - apply unat_arith - apply (wpsimp wp: threadGet_obj_at2) + apply (rule_tac P="\s. obj_at' (\tcb. tcbPriority tcb = curPrio) curThread s + \ curThread = ksCurThread s + \ obj_at' (\tcb. tcbPriority tcb = targetPrio) candidate s" + and P'=UNIV in ccorres_from_vcg) + apply clarsimp + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def cur_tcb'_def rf_sr_ksCurThread) + apply (drule (1) obj_at_cslift_tcb)+ + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) + apply unat_arith + apply clarsimp apply vcg apply ceqv (* fastfail calculation complete *) @@ -529,18 +580,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -574,10 +624,10 @@ lemma schedule_ccorres: in ccorres_symb_exec_r_known_rv) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: false_def cur_tcb'_def rf_sr_ksCurThread) + apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread) apply (drule (1) obj_at_cslift_tcb)+ - apply (clarsimp simp: typ_heap_simps ctcb_relation_def to_bool_def split: if_split) + apply (clarsimp simp: typ_heap_simps ctcb_relation_def split: if_split) apply (solves \unat_arith, rule iffI; simp\) apply ceqv apply clarsimp @@ -618,13 +668,13 @@ lemma schedule_ccorres: apply (wp (once) hoare_drop_imps) apply wp apply (strengthen strenghten_False_imp[where P="a = ResumeCurrentThread" for a]) - apply (clarsimp simp: conj_ac invs_queues invs_valid_objs' cong: conj_cong) + apply (clarsimp simp: conj_ac invs_valid_objs' cong: conj_cong) apply wp apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) apply (clarsimp, vcg exspec=tcbSchedEnqueue_modifies) - apply (clarsimp simp: to_bool_def true_def) + apply clarsimp apply (strengthen ko_at'_obj_at'_field) - apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field to_bool_def true_def) + apply (clarsimp cong: imp_cong simp: ko_at'_obj_at'_field) apply wp apply clarsimp (* when runnable tcbSchedEnqueue curThread *) @@ -633,21 +683,21 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp - apply (clarsimp simp: to_bool_def false_def) apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') + apply (frule invs_pspace_aligned') + apply (frule invs_pspace_distinct') apply (rule conjI) apply (clarsimp dest!: rf_sr_cscheduler_relation simp: cscheduler_action_relation_def) apply (rule conjI; clarsimp) apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps max_word_not_0 + apply (clarsimp simp: cscheduler_action_relation_def typ_heap_simps split: scheduler_action.splits) apply (frule (1) obj_at_cslift_tcb) apply (clarsimp dest!: rf_sr_cscheduler_relation invs_sch_act_wf' @@ -658,7 +708,7 @@ lemma schedule_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -692,11 +742,7 @@ lemma threadSet_timeSlice_ccorres [corres]: map_to_tcbs_upd) apply (simp add: cep_relations_drop_fun_upd cvariable_relation_upd_const ko_at_projectKO_opt) - apply (rule conjI) defer - apply (erule cready_queues_relation_not_queue_ptrs) - apply (rule ext, simp split: if_split) - apply (rule ext, simp split: if_split) apply (drule ko_at_projectKO_opt) apply (erule (2) cmap_relation_upd_relI) apply (simp add: ctcb_relation_def) @@ -709,10 +755,10 @@ lemma timerTick_ccorres: supply subst_all [simp del] apply (cinit) apply (rule ccorres_pre_getCurThread) - apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) + apply (ctac add: get_tsType_ccorres [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -734,17 +780,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues + apply (ctac add: rescheduleRequired_ccorres) + apply (wp weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: ThreadState_defs, rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -756,7 +802,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -764,13 +809,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/X64/StateRelation_C.thy b/proof/crefine/X64/StateRelation_C.thy index 7f1b6b9949..0f17b0fbc4 100644 --- a/proof/crefine/X64/StateRelation_C.thy +++ b/proof/crefine/X64/StateRelation_C.thy @@ -16,8 +16,7 @@ definition definition "array_relation r n a c \ \i \ n. r (a i) (index c (unat i))" -(* used for bound ntfn/tcb *) -definition +definition option_to_ctcb_ptr :: "machine_word option \ tcb_C ptr" where "option_to_ctcb_ptr x \ case x of None \ NULL | Some t \ tcb_ptr_to_ctcb_ptr t" @@ -192,6 +191,7 @@ where global_ioport_bitmap_relation (clift (t_hrs_' cstate)) (x64KSAllocatedIOPorts astate) \ fpu_null_state_relation (t_hrs_' cstate) \ x64KSNumIOAPICs astate = UCAST (32 \ 64) (num_ioapics_' cstate) \ + array_relation (=) (of_nat Kernel_Config.maxNumIOAPIC) (x64KSIOAPICnIRQs astate) (ioapic_nirqs_' cstate) \ array_relation x64_irq_state_relation maxIRQ (x64KSIRQState astate) (x86KSIRQState_' cstate) \ carch_globals astate" @@ -439,7 +439,9 @@ where \ tcbTimeSlice atcb = unat (tcbTimeSlice_C ctcb) \ cfault_rel (tcbFault atcb) (seL4_Fault_lift (tcbFault_C ctcb)) (lookup_fault_lift (tcbLookupFailure_C ctcb)) - \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb" + \ option_to_ptr (tcbBoundNotification atcb) = tcbBoundNotification_C ctcb + \ option_to_ctcb_ptr (tcbSchedPrev atcb) = tcbSchedPrev_C ctcb + \ option_to_ctcb_ptr (tcbSchedNext atcb) = tcbSchedNext_C ctcb" abbreviation "ep_queue_relation' \ tcb_queue_relation' tcbEPNext_C tcbEPPrev_C" @@ -776,17 +778,17 @@ definition where "cready_queues_index_to_C qdom prio \ (unat qdom) * numPriorities + (unat prio)" -definition cready_queues_relation :: - "tcb_C typ_heap \ (tcb_queue_C[num_tcb_queues]) \ (domain \ priority \ ready_queue) \ bool" -where - "cready_queues_relation h_tcb queues aqueues \ - \qdom prio. ((qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ - (let cqueue = index queues (cready_queues_index_to_C qdom prio) in - sched_queue_relation' h_tcb (aqueues (qdom, prio)) (head_C cqueue) (end_C cqueue))) - \ (\ (qdom \ ucast minDom \ qdom \ ucast maxDom \ - prio \ ucast minPrio \ prio \ ucast maxPrio) \ aqueues (qdom, prio) = [])" +definition ctcb_queue_relation :: "tcb_queue \ tcb_queue_C \ bool" where + "ctcb_queue_relation aqueue cqueue \ + head_C cqueue = option_to_ctcb_ptr (tcbQueueHead aqueue) + \ end_C cqueue = option_to_ctcb_ptr (tcbQueueEnd aqueue)" +definition cready_queues_relation :: + "(domain \ priority \ ready_queue) \ (tcb_queue_C[num_tcb_queues]) \ bool" + where + "cready_queues_relation aqueues cqueues \ + \d p. d \ maxDomain \ p \ maxPriority + \ ctcb_queue_relation (aqueues (d, p)) (index cqueues (cready_queues_index_to_C d p))" abbreviation "cte_array_relation astate cstate @@ -926,9 +928,7 @@ where "cstate_relation astate cstate \ let cheap = t_hrs_' cstate in cpspace_relation (ksPSpace astate) (underlying_memory (ksMachineState astate)) cheap \ - cready_queues_relation (clift cheap) - (ksReadyQueues_' cstate) - (ksReadyQueues astate) \ + cready_queues_relation (ksReadyQueues astate) (ksReadyQueues_' cstate) \ zero_ranges_are_zero (gsUntypedZeroRanges astate) cheap \ cbitmap_L1_relation (ksReadyQueuesL1Bitmap_' cstate) (ksReadyQueuesL1Bitmap astate) \ cbitmap_L2_relation (ksReadyQueuesL2Bitmap_' cstate) (ksReadyQueuesL2Bitmap astate) \ diff --git a/proof/crefine/X64/SyscallArgs_C.thy b/proof/crefine/X64/SyscallArgs_C.thy index b2323eeb5e..4cf05834c6 100644 --- a/proof/crefine/X64/SyscallArgs_C.thy +++ b/proof/crefine/X64/SyscallArgs_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -46,10 +47,8 @@ lemma replyOnRestart_invs'[wp]: "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) - apply (rule hoare_vcg_all_lift) - apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_vcg_all_lift rfk_ksQ) - apply (rule hoare_strengthen_post, rule gts_sp') + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) + apply (rule hoare_strengthen_post, rule gts_sp') apply (clarsimp simp: pred_tcb_at') apply (auto elim!: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread') @@ -288,7 +287,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -414,11 +413,13 @@ lemma is_syscall_error_codes: by ((rule iffD2[OF is_syscall_error_code_def], intro allI, rule conseqPre, vcg, safe, (simp_all add: o_def)?)+) -lemma syscall_error_throwError_ccorres_direct: +lemma syscall_error_throwError_ccorres_direct_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) code" apply (rule ccorres_from_vcg_throws) @@ -428,28 +429,35 @@ lemma syscall_error_throwError_ccorres_direct: apply (simp add: syscall_error_rel_def exception_defs) done -lemma syscall_error_throwError_ccorres_succs: +lemma syscall_error_throwError_ccorres_succs_gen: "\ is_syscall_error_code f code; + \x y g. arrel (Inl x) y = (intr_and_se_rel \ g) (Inl x) y; \err' ft'. syscall_error_to_H (f err') ft' = Some err \ \ - ccorres (intr_and_se_rel \ dc) (liftxf errstate id v' ret__unsigned_long_') + ccorres_underlying rf_sr \ rrel xf + arrel (liftxf errstate id v' ret__unsigned_long_') \ (UNIV) (SKIP # hs) (throwError (Inl err)) (code ;; remainder)" apply (rule ccorres_guard_imp2, rule ccorres_split_throws) - apply (erule syscall_error_throwError_ccorres_direct) - apply simp + apply (erule syscall_error_throwError_ccorres_direct_gen; assumption) apply (rule HoarePartialProps.augment_Faults) apply (erule iffD1[OF is_syscall_error_code_def, THEN spec]) apply simp+ done -lemmas syscall_error_throwError_ccorres_n = - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct, +lemmas syscall_error_throwError_ccorres_n_gen = + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_direct_gen, simplified o_apply] - is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs, + is_syscall_error_codes[THEN syscall_error_throwError_ccorres_succs_gen, simplified o_apply] +lemmas syscall_error_throwError_ccorres_n = + syscall_error_throwError_ccorres_n_gen[where arrel="intr_and_se_rel \ dc", simplified] + +lemmas syscall_error_throwError_ccorres_n_inl_rrel = + syscall_error_throwError_ccorres_n_gen[where arrel="inl_rrel (intr_and_se_rel \ dc)", simplified] + definition idButNot :: "'a \ 'a" where "idButNot x = x" @@ -654,9 +662,9 @@ lemma getMRs_tcbContext: apply (thin_tac "thread = t" for t) apply (clarsimp simp add: getMRs_def) apply (wp|wpc)+ - apply (rule_tac P="n < length x" in hoare_gen_asm) + apply (rule_tac P="n < length rv" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -782,12 +790,12 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes) apply (ctac (no_vcg)) apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isPageCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap rv \ isPageCap (capCap rv)" in ccorres_case_bools') apply simp apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap rv" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -810,8 +818,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (frule cap_get_tag_isCap_unfolded_H_cap(18),simp) apply (frule capFVMRights_range) apply (simp add: cap_frame_cap_lift) - apply (clarsimp simp: cap_to_H_def vmrights_to_H_def to_bool_def - word_le_make_less + apply (clarsimp simp: cap_to_H_def vmrights_to_H_def word_le_make_less Kernel_C.VMReadWrite_def Kernel_C.VMReadOnly_def Kernel_C.VMKernelOnly_def dest: word_less_cases) @@ -994,7 +1001,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) @@ -1141,7 +1148,7 @@ lemma getSyscallArg_ccorres_foo: apply (simp add: index_msgRegisters_less unat_less_helper) apply wp[1] apply (wp getMRs_tcbContext) - apply simp + apply fastforce apply (rule ccorres_seq_skip [THEN iffD2]) apply (rule ccorres_add_return2) apply (rule ccorres_symb_exec_l) @@ -1165,7 +1172,7 @@ lemma getSyscallArg_ccorres_foo: in hoare_pre(1)) apply (wp getMRs_user_word) apply (clarsimp simp: msgMaxLength_def unat_less_helper) - apply simp + apply fastforce apply (clarsimp simp: sysargs_rel_def sysargs_rel_n_def) apply (rule conjI, clarsimp simp: unat_of_nat64 word_bits_def) apply (drule equalityD2) diff --git a/proof/crefine/X64/Syscall_C.thy b/proof/crefine/X64/Syscall_C.thy index 99cdb2475f..cf60ace7f6 100644 --- a/proof/crefine/X64/Syscall_C.thy +++ b/proof/crefine/X64/Syscall_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -19,9 +20,6 @@ end context kernel_m begin -(* FIXME: should do this from the beginning *) -declare true_def [simp] false_def [simp] - lemma ccorres_If_False: "ccorres_underlying sr Gamm r xf arrel axf R R' hs b c \ ccorres_underlying sr Gamm r xf arrel axf @@ -51,8 +49,7 @@ lemma cap_cases_one_on_true_sum: lemma performInvocation_Endpoint_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and st_tcb_at' simple' thread and ep_at' epptr - and sch_act_sane and (\s. thread = ksCurThread s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)))) + and sch_act_sane and (\s. thread = ksCurThread s)) (UNIV \ {s. block_' s = from_bool blocking} \ {s. call_' s = from_bool do_call} \ {s. badge_' s = badge} @@ -124,7 +121,6 @@ lemma decodeInvocation_ccorres: and (\s. \v \ set extraCaps. ex_cte_cap_wp_to' isCNodeCap (snd v) s) and (\s. \v \ set extraCaps. s \' fst v \ cte_at' (snd v) s) and (\s. \v \ set extraCaps. \y \ zobj_refs' (fst v). ex_nonz_cap_to' y s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and sysargs_rel args buffer) (UNIV \ {s. call_' s = from_bool isCall} \ {s. block_' s = from_bool isBlocking} @@ -200,7 +196,7 @@ lemma decodeInvocation_ccorres: apply simp apply (rule hoare_use_eq[where f=ksCurThread]) apply (wp sts_invs_minor' sts_st_tcb_at'_cases - setThreadState_ct' hoare_vcg_all_lift sts_ksQ')+ + setThreadState_ct' hoare_vcg_all_lift)+ apply simp apply (vcg exspec=setThreadState_modifies) apply vcg @@ -270,22 +266,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -317,7 +313,7 @@ lemma decodeInvocation_ccorres: apply fastforce apply (simp add: cap_lift_capEPBadge_mask_eq) apply (clarsimp simp: rf_sr_ksCurThread Collect_const_mem - cap_get_tag_isCap "StrictC'_thread_state_defs") + cap_get_tag_isCap ThreadState_defs) apply (frule word_unat.Rep_inverse') apply (simp add: cap_get_tag_isCap[symmetric] cap_get_tag_ReplyCap) apply (rule conjI) @@ -491,7 +487,7 @@ lemma handleInvocation_def2: lemma thread_state_to_tsType_eq_Restart: "(thread_state_to_tsType ts = scast ThreadState_Restart) = (ts = Restart)" - by (cases ts, simp_all add: "StrictC'_thread_state_defs") + by (cases ts, simp_all add: ThreadState_defs) lemma wordFromMessageInfo_spec: "\s. \\ {s} Call wordFromMessageInfo_'proc @@ -508,7 +504,7 @@ lemma wordFromMessageInfo_spec: lemma handleDoubleFault_ccorres: "ccorres dc xfdc (invs' and tcb_at' tptr and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and - sch_act_not tptr and (\s. \p. tptr \ set (ksReadyQueues s p))) + sch_act_not tptr) (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) [] (handleDoubleFault tptr ex1 ex2) (Call handleDoubleFault_'proc)" @@ -522,7 +518,7 @@ lemma handleDoubleFault_ccorres: apply (simp add: getRestartPC_def) apply wp apply clarsimp - apply (simp add: ThreadState_Inactive_def) + apply (simp add: ThreadState_defs) apply (fastforce simp: valid_tcb_state'_def) done @@ -573,8 +569,7 @@ lemma hrs_mem_update_use_hrs_mem: lemma sendFaultIPC_ccorres: "ccorres (cfault_rel2 \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and st_tcb_at' simple' tptr and sch_act_not tptr and - (\s. \p. tptr \ set (ksReadyQueues s p))) + (invs' and st_tcb_at' simple' tptr and sch_act_not tptr) (UNIV \ {s. (cfault_rel (Some fault) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))))} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr tptr}) @@ -651,15 +646,15 @@ lemma sendFaultIPC_ccorres: apply (ctac (no_vcg) add: sendIPC_ccorres) apply (ctac (no_vcg) add: ccorres_return_CE [unfolded returnOk_def comp_def]) apply wp - apply (wp threadSet_pred_tcb_no_state threadSet_invs_trivial threadSet_typ_at_lifts - | simp)+ + apply (wpsimp wp: threadSet_invs_trivial) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_typ_at_lifts) apply (clarsimp simp: guard_is_UNIV_def) apply (subgoal_tac "capEPBadge epcap && mask 64 = capEPBadge epcap") apply (clarsimp simp: cap_get_tag_isCap isEndpointCap_def isCap_simps ccap_relation_ep_helpers) apply (drule cap_get_tag_isCap(4)[symmetric]) - apply (clarsimp simp: cap_get_tag_EndpointCap to_bool_def) + apply (clarsimp simp: cap_get_tag_EndpointCap) apply (clarsimp simp: case_bool_If) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply clarsimp @@ -685,10 +680,9 @@ lemma sendFaultIPC_ccorres: apply vcg apply (clarsimp simp: inQ_def) apply (rule_tac Q="\a b. invs' b \ st_tcb_at' simple' tptr b - \ sch_act_not tptr b \ valid_cap' a b - \ (\p. tptr \ set (ksReadyQueues b p))" + \ sch_act_not tptr b \ valid_cap' a b" and E="\ _. \" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp) apply (clarsimp simp: isCap_simps) apply (clarsimp simp: valid_cap'_def pred_tcb_at')+ @@ -701,8 +695,7 @@ lemma sendFaultIPC_ccorres: done lemma handleFault_ccorres: - "ccorres dc xfdc (invs' and st_tcb_at' simple' t and - sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p))) + "ccorres dc xfdc (invs' and st_tcb_at' simple' t and sch_act_not t) (UNIV \ {s. (cfault_rel (Some flt) (seL4_Fault_lift(current_fault_' (globals s))) (lookup_fault_lift(current_lookup_fault_' (globals s))) )} \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t}) @@ -719,12 +712,12 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply clarsimp - apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] + apply ((wp hoare_vcg_all_liftE_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] apply (wp) apply (simp add: guard_is_UNIV_def) apply (simp add: guard_is_UNIV_def) @@ -766,9 +759,7 @@ lemma getMRs_length: lemma handleInvocation_ccorres: "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and - ct_active' and sch_act_simple and - (\s. \x. ksCurThread s \ set (ksReadyQueues s x))) + (invs' and ct_active' and sch_act_simple) (UNIV \ {s. isCall_' s = from_bool isCall} \ {s. isBlocking_' s = from_bool isBlocking}) [] (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" @@ -892,17 +883,16 @@ lemma handleInvocation_ccorres: apply (simp add: invocationCatch_def o_def) apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" - in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + in hoare_strengthen_postE) + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb - hoare_vcg_all_lift sts_ksQ' + hoare_vcg_all_lift | wpc | wps)+ apply auto[1] apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) - apply (simp add: "StrictC'_thread_state_defs" mask_def) + apply (simp add: ThreadState_defs mask_def) apply (simp add: typ_heap_simps) apply (case_tac ts, simp_all add: cthread_state_relation_def)[1] apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) @@ -1059,7 +1049,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1077,7 +1067,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1158,9 +1147,6 @@ lemma ccorres_trim_redundant_throw_break: lemma invs_valid_objs_strengthen: "invs' s \ valid_objs' s" by fastforce -lemma ct_not_ksQ_strengthen: - "thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p) \ thread \ set (ksReadyQueues s p)" by fastforce - lemma option_to_ctcb_ptr_valid_ntfn: "valid_ntfn' ntfn s ==> (option_to_ctcb_ptr (ntfnBoundTCB ntfn) = NULL) = (ntfnBoundTCB ntfn = None)" apply (cases "ntfnBoundTCB ntfn", simp_all add: option_to_ctcb_ptr_def) @@ -1194,8 +1180,7 @@ lemma handleRecv_ccorres: notes rf_sr_upd_safe[simp del] shows "ccorres dc xfdc - (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s - \ sch_act_sane s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (\s. invs' s \ st_tcb_at' simple' (ksCurThread s) s \ sch_act_sane s) {s. isBlocking_' s = from_bool isBlocking} [] (handleRecv isBlocking) @@ -1238,7 +1223,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1256,10 +1241,10 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) - apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) + apply (wp hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) apply (clarsimp dest!: rf_sr_ksCurThread) apply simp @@ -1305,7 +1290,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1322,7 +1307,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1335,7 +1320,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1356,7 +1341,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1369,9 +1354,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1382,13 +1367,11 @@ lemma handleRecv_ccorres: apply clarsimp apply (rename_tac thread epCPtr) apply (rule_tac Q'="(\rv s. invs' s \ st_tcb_at' simple' thread s - \ sch_act_sane s \ (\p. thread \ set (ksReadyQueues s p)) \ thread = ksCurThread s - \ valid_cap' rv s)" in hoare_post_imp_R[rotated]) - apply (clarsimp simp: sch_act_sane_def) - apply (auto dest!: obj_at_valid_objs'[OF _ invs_valid_objs'] - simp: projectKOs valid_obj'_def, - auto simp: pred_tcb_at'_def obj_at'_def objBits_simps projectKOs ct_in_state'_def)[1] - apply wp + \ sch_act_sane s \ thread = ksCurThread s + \ valid_cap' rv s)" in hoare_strengthen_postE_R[rotated]) + apply (intro conjI impI allI; clarsimp simp: sch_act_sane_def) + apply (fastforce dest: obj_at_valid_objs'[OF _ invs_valid_objs'] ko_at_valid_ntfn') + apply wp apply clarsimp apply (vcg exspec=isStopped_modifies exspec=lookupCap_modifies) @@ -1405,8 +1388,8 @@ lemma handleRecv_ccorres: apply (frule tcb_aligned'[OF tcb_at_invs']) apply clarsimp apply (intro conjI impI allI) - apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift - lookup_fault_missing_capability_lift is_cap_fault_def)+ + apply (clarsimp simp: cfault_rel_def seL4_Fault_CapFault_lift + lookup_fault_missing_capability_lift is_cap_fault_def)+ apply (clarsimp simp: cap_get_tag_NotificationCap) apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt) apply (clarsimp simp: cnotification_relation_def Let_def) @@ -1437,7 +1420,7 @@ lemma handleYield_ccorres: apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear tcbSchedAppend_valid_objs') apply (vcg exspec= tcbSchedAppend_modifies) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues) + apply (wp weak_sch_act_wf_lift_linear) apply (vcg exspec= tcbSchedDequeue_modifies) apply (clarsimp simp: tcb_at_invs' invs_valid_objs' valid_objs'_maxPriority valid_objs'_maxDomain) @@ -1594,11 +1577,9 @@ lemma ccorres_return_void_C_Seq: lemma ccorres_handleReservedIRQ: "ccorres dc xfdc - (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s \ - (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) (UNIV \ {s. irq_' s = ucast irq}) hs (handleReservedIRQ irq) (Call handleReservedIRQ_'proc)" - supply dc_simp[simp del] apply (cinit lift: irq_') apply (rule ccorres_return_Skip) apply clarsimp @@ -1606,8 +1587,7 @@ lemma ccorres_handleReservedIRQ: lemma handleInterrupt_ccorres: "ccorres dc xfdc - (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s \ - (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) (UNIV \ \\irq = ucast irq\) hs (handleInterrupt irq) @@ -1621,11 +1601,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -1665,7 +1645,7 @@ lemma handleInterrupt_ccorres: apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -1674,7 +1654,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) @@ -1686,7 +1666,7 @@ lemma handleInterrupt_ccorres: rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, ctac (no_vcg) add: maskInterrupt_ccorres, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, wp, simp)+) apply (wp getSlotCap_wp) apply simp @@ -1695,7 +1675,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -1707,7 +1686,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReservedIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply (vcg exspec=handleReservedIRQ_modifies) apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up ) diff --git a/proof/crefine/X64/TcbAcc_C.thy b/proof/crefine/X64/TcbAcc_C.thy index 8dd61964ef..7888ad6521 100644 --- a/proof/crefine/X64/TcbAcc_C.thy +++ b/proof/crefine/X64/TcbAcc_C.thy @@ -89,22 +89,22 @@ lemma archThreadGet_eq: apply simp done -lemma get_tsType_ccorres [corres]: +lemma get_tsType_ccorres[corres]: "ccorres (\r r'. r' = thread_state_to_tsType r) ret__unsigned_longlong_' (tcb_at' thread) - (UNIV \ {s. thread_state_ptr_' s = Ptr &(tcb_ptr_to_ctcb_ptr thread\[''tcbState_C''])}) [] - (getThreadState thread) (Call thread_state_ptr_get_tsType_'proc)" + ({s. f s = tcb_ptr_to_ctcb_ptr thread} \ + {s. cslift s (Ptr &(f s\[''tcbState_C''])) = Some (thread_state_' s)}) [] + (getThreadState thread) (Call thread_state_get_tsType_'proc)" unfolding getThreadState_def - apply (rule ccorres_from_spec_modifies) - apply (rule thread_state_ptr_get_tsType_spec) - apply (rule thread_state_ptr_get_tsType_modifies) - apply simp - apply (frule (1) obj_at_cslift_tcb) - apply (clarsimp simp: typ_heap_simps) + apply (rule ccorres_from_spec_modifies [where P=\, simplified]) + apply (rule thread_state_get_tsType_spec) + apply (rule thread_state_get_tsType_modifies) + apply simp apply (frule (1) obj_at_cslift_tcb) apply (clarsimp simp: typ_heap_simps) apply (rule bexI [rotated, OF threadGet_eq], assumption) apply simp - apply (erule ctcb_relation_thread_state_to_tsType) + apply (drule ctcb_relation_thread_state_to_tsType) + apply simp done lemma threadGet_obj_at2: @@ -177,7 +177,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -206,7 +206,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ diff --git a/proof/crefine/X64/TcbQueue_C.thy b/proof/crefine/X64/TcbQueue_C.thy index d81d6b6baa..7a79843bdf 100644 --- a/proof/crefine/X64/TcbQueue_C.thy +++ b/proof/crefine/X64/TcbQueue_C.thy @@ -964,49 +964,6 @@ lemma tcb_queue_relation'_prev_sign: \ sign_extend 47 (ptr_val (getPrev tcb)) = ptr_val (getPrev tcb)" by (rule tcb_queue_relation_prev_sign [OF tcb_queue_relation'_queue_rel]) - -lemma cready_queues_relation_null_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcb_null_ep_ptrs \ mp' = option_map tcb_null_ep_ptrs \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply (clarsimp simp: tcb_queue_relation'_def) - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - \ \clag\ - apply (rule ext) - apply (case_tac "mp' x") - apply (frule compD [OF same]) - apply simp - apply (frule compD [OF same]) - apply (clarsimp simp: tcb_null_ep_ptrs_def) - apply (case_tac z, case_tac a) - apply simp - done - -lemma cready_queues_relation_not_queue_ptrs: - assumes rel: "cready_queues_relation mp cq aq" - and same: "option_map tcbSchedNext_C \ mp' = option_map tcbSchedNext_C \ mp" - "option_map tcbSchedPrev_C \ mp' = option_map tcbSchedPrev_C \ mp" - shows "cready_queues_relation mp' cq aq" - using rel - apply (clarsimp simp: cready_queues_relation_def tcb_queue_relation'_def Let_def all_conj_distrib) - apply (drule spec, drule spec, drule mp, (erule conjI)+, assumption) - apply clarsimp - apply (erule iffD2 [OF tcb_queue_relation_only_next_prev, rotated -1]) - apply (rule same) - apply (rule same) - done - lemma ntfn_ep_disjoint: assumes srs: "sym_refs (state_refs_of' s)" and epat: "ko_at' ep epptr s" @@ -1090,8 +1047,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs cp rel apply - apply (subst map_comp_update) @@ -1383,7 +1340,7 @@ lemma user_fpu_state_C_in_tcb_C_offset: "(typ_uinfo_t TYPE(user_fpu_state_C), n) \ td_set (typ_uinfo_t TYPE(tcb_C)) 0 \ n = 0" \ \Examine the fields of tcb_C.\ apply (simp add: typ_uinfo_t_def tcb_C_typ_info_unfold td_set_export_uinfo_eq td_set_adjust_ti_eq - image_comp image_Un apfst_comp o_def[where f=export_uinfo] + image_comp image_Un apfst_comp del: export_uinfo_typdesc_simp) apply (elim disjE) apply (all \drule td_set_image_field_lookup[rotated]; clarsimp\) @@ -1455,14 +1412,12 @@ lemma rf_sr_tcb_update_no_queue: (t_hrs_' (globals s')); tcbEPNext_C ctcb = tcbEPNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); tcbEPPrev_C ctcb = tcbEPPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedNext_C ctcb = tcbSchedNext_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); - tcbSchedPrev_C ctcb = tcbSchedPrev_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))); fpuState_C (tcbContext_C (tcbArch_C ctcb)) = fpuState_C (tcbContext_C (tcbArch_C (the (cslift s' (tcb_ptr_to_ctcb_ptr thread))))); (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes @@ -1484,20 +1439,11 @@ lemma rf_sr_tcb_update_no_queue: apply (rule cnotification_relation_upd_tcb_no_queues, assumption+) subgoal by (clarsimp intro!: ext) subgoal by (clarsimp intro!: ext) - apply (erule cready_queues_relation_not_queue_ptrs) - subgoal by (clarsimp intro!: ext) - subgoal by (clarsimp intro!: ext) subgoal by (clarsimp simp: carch_state_relation_def fpu_null_state_preservation typ_heap_simps') by (simp add: cmachine_state_relation_def) -lemma rf_sr_tcb_update_no_queue_helper: - "(s, s'\ globals := globals s' \ t_hrs_' := t_hrs_' (globals (undefined - \ globals := (undefined \ t_hrs_' := f (globals s') (t_hrs_' (globals s')) \)\))\\) \ rf_sr - \ (s, globals_update (\v. t_hrs_'_update (f v) v) s') \ rf_sr" - by (simp cong: StateSpace.state.fold_congs globals.fold_congs) - -lemmas rf_sr_tcb_update_no_queue2 - = rf_sr_tcb_update_no_queue_helper [OF rf_sr_tcb_update_no_queue, simplified] +lemmas rf_sr_tcb_update_no_queue2 = + rf_sr_obj_update_helper[OF rf_sr_tcb_update_no_queue, simplified] lemma tcb_queue_relation_not_in_q: "ctcb_ptr_to_tcb_ptr x \ set xs \ @@ -1512,7 +1458,7 @@ lemma rf_sr_tcb_update_not_in_queue: \ live' (KOTCB tcb); invs' s; (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes @@ -1545,20 +1491,11 @@ lemma rf_sr_tcb_update_not_in_queue: apply (drule(1) map_to_ko_atI') apply (drule sym_refs_ko_atD', clarsimp+) subgoal by blast - apply (simp add: cready_queues_relation_def, erule allEI) apply (clarsimp simp: Let_def) - apply (subst tcb_queue_relation_not_in_q) - apply clarsimp - apply (drule valid_queues_obj_at'D, clarsimp) - apply (clarsimp simp: obj_at'_def projectKOs inQ_def) - subgoal by simp apply (simp add: carch_state_relation_def) subgoal by (clarsimp simp: fpu_null_state_heap_update_span_disjoint[OF tcb_at'_non_kernel_data_ref'] global_ioport_bitmap_heap_update_tag_disj_simps obj_at'_def projectKOs) by (simp add: cmachine_state_relation_def) -lemmas rf_sr_tcb_update_not_in_queue2 - = rf_sr_tcb_update_no_queue_helper [OF rf_sr_tcb_update_not_in_queue, simplified] - end end diff --git a/proof/crefine/X64/Tcb_C.thy b/proof/crefine/X64/Tcb_C.thy index 417f4c3f96..e08967be07 100644 --- a/proof/crefine/X64/Tcb_C.thy +++ b/proof/crefine/X64/Tcb_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -58,8 +59,6 @@ lemma doMachineOp_sched: done context begin interpretation Arch . (*FIXME: arch_split*) -crunch queues[wp]: setupReplyMaster "valid_queues" - (simp: crunch_simps wp: crunch_wps) crunch curThread [wp]: restart "\s. P (ksCurThread s)" (wp: crunch_wps simp: crunch_simps) @@ -96,8 +95,8 @@ lemma getMRs_rel_sched: lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -155,8 +154,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -166,8 +165,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -264,8 +263,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -298,7 +297,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -390,9 +389,10 @@ lemma hrs_mem_update_cong: lemma setPriority_ccorres: "ccorres dc xfdc - (\s. tcb_at' t s \ Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority)) - (UNIV \ {s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) + (\s. tcb_at' t s \ ksCurDomain s \ maxDomain \ + valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ (priority \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s) + ({s. tptr_' s = tcb_ptr_to_ctcb_ptr t} \ {s. prio_' s = ucast priority}) [] (setPriority t priority) (Call setPriority_'proc)" apply (cinit lift: tptr_' prio_') apply (ctac(no_vcg) add: tcbSchedDequeue_ccorres) @@ -411,11 +411,11 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) - apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' + apply (wpsimp wp: hoare_drop_imps threadSet_valid_objs' weak_sch_act_wf_lift_linear threadSet_pred_tcb_at_state threadSet_tcbDomain_triv simp: st_tcb_at'_def o_def split: if_splits) @@ -424,19 +424,14 @@ lemma setPriority_ccorres: where Q="\rv s. obj_at' (\_. True) t s \ priority \ maxPriority \ - Invariants_H.valid_queues s \ ksCurDomain s \ maxDomain \ valid_objs' s \ - valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s \ - (\d p. \ t \ set (ksReadyQueues s (d, p)))"]) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues tcbSchedDequeue_nonq) + pspace_aligned' s \ pspace_distinct' s"]) + apply (wp weak_sch_act_wf_lift_linear valid_tcb'_def) apply (clarsimp simp: valid_tcb'_tcbPriority_update) apply clarsimp - apply (frule (1) valid_objs'_maxDomain[where t=t]) - apply (frule (1) valid_objs'_maxPriority[where t=t]) - apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -491,8 +486,8 @@ lemma checkCapAt_ccorres: apply assumption apply (simp only: when_def if_to_top_of_bind) apply (rule ccorres_if_lhs) - apply (simp add: from_bool_def true_def) - apply (simp add: from_bool_def false_def) + apply simp + apply simp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -517,7 +512,7 @@ lemma cteInsert_cap_to'2: apply (simp add: cteInsert_def ex_nonz_cap_to'_def setUntypedCapAsFull_def) apply (rule hoare_vcg_ex_lift) apply (wp updateMDB_weak_cte_wp_at - updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp) + updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of) apply auto done @@ -618,7 +613,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -644,13 +639,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres) apply ceqv apply csymbr - apply (simp add: true_def Collect_True + apply (simp add: Collect_True del: Collect_const) apply (simp add: assertDerived_def bind_assoc del: Collect_const) apply (rule ccorres_symb_exec_l) @@ -664,7 +659,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -673,36 +668,36 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (rule hoare_strengthen_post[ where Q= "\rv s. - Invariants_H.valid_queues s \ valid_objs' s \ weak_sch_act_wf (ksSchedulerAction s) s \ ((\a b. priority = Some (a, b)) \ tcb_at' target s \ ksCurDomain s \ maxDomain \ - valid_queues' s \ fst (the priority) \ maxPriority)"]) + fst (the priority) \ maxPriority) \ + pspace_aligned' s \ pspace_distinct' s"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) apply (rule ccorres_split_nothrow_novcg_dc) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -712,26 +707,26 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def tcbIPCBufferSlot_def mask_def objBits_defs) apply csymbr - apply (simp add: if_1_0_0 Collect_False false_def ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) apply (rule ccorres_cond_false_seq, simp) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -740,9 +735,9 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) - apply (simp add: guard_is_UNIV_def if_1_0_0 false_def Collect_const_mem) + apply (simp add: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def canonical_address_bitfield_extract_tcb) (* \ P *) apply simp @@ -751,14 +746,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp split: option.split_asm) apply (rule ccorres_pre_getCurThread) apply (rename_tac curThread) - apply (simp add: when_def to_bool_def) + apply (simp add: when_def) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule_tac C'="{s. target = curThread}" and Q="\s. ksCurThread s = curThread" and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -768,17 +763,17 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) apply (rule hoare_strengthen_post[ - where Q="\a b. (Invariants_H.valid_queues b \ - valid_objs' b \ + where Q="\a b. (valid_objs' b \ sch_act_wf (ksSchedulerAction b) b \ + pspace_aligned' b \ pspace_distinct' b \ ((\a b. priority = Some (a, b)) \ tcb_at' target b \ - ksCurDomain b \ maxDomain \ valid_queues' b \ + ksCurDomain b \ maxDomain \ fst (the priority) \ maxPriority)) \ ((case snd (the buf) of None \ 0 @@ -800,15 +795,15 @@ lemma invokeTCB_ThreadControl_ccorres: prefer 2 apply fastforce apply (strengthen cte_is_derived_capMasterCap_strg - invs_queues invs_weak_sch_act_wf invs_sch_act_wf' + invs_weak_sch_act_wf invs_sch_act_wf' invs_valid_objs' invs_mdb' invs_pspace_aligned', simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp - | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues - invs_valid_queues' | wp hoare_drop_imps)+ + | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf + | wp hoare_drop_imps)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem option_to_0_def split: option.split_asm) @@ -817,7 +812,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg apply (simp add: conj_comms cong: conj_cong) - apply (strengthen invs_ksCurDomain_maxDomain') + apply (strengthen invs_ksCurDomain_maxDomain' invs_pspace_distinct') apply (wp hoare_vcg_const_imp_lift_R cteDelete_invs') apply simp apply (rule ccorres_split_nothrow_novcg_dc) @@ -830,12 +825,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply (clarsimp simp: inQ_def Collect_const_mem cintr_def exception_defs tcb_cnode_index_defs) apply (simp add: tcbBuffer_def tcbIPCBufferSlot_def word_sle_def - cte_level_bits_def from_bool_def true_def size_of_def case_option_If2 ) + cte_level_bits_def size_of_def case_option_If2 ) apply (rule conjI) apply (clarsimp simp: objBits_simps' word_bits_conv case_option_If2 if_n_0_0 valid_cap'_def capAligned_def obj_at'_def projectKOs) - apply (clarsimp simp: invs_valid_objs' invs_valid_queues' - Invariants_H.invs_queues invs_ksCurDomain_maxDomain') + apply (fastforce simp: invs_valid_objs' invs_ksCurDomain_maxDomain') apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -846,11 +840,10 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -863,34 +856,29 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - del: Collect_const) + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbVTable_def tcbVTableSlot_def Kernel_C.tcbVTable_def cte_level_bits_def size_of_def option_to_0_def objBits_defs mask_def) apply csymbr - apply (simp add: if_1_0_0 false_def Collect_False - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def if_1_0_0 Collect_const_mem canonical_address_bitfield_extract_tcb) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift + cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) apply vcg @@ -899,7 +887,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp @@ -913,20 +901,18 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] checkCap_inv [where P="valid_cap' c" for c] checkCap_inv [where P="sch_act_simple"] | simp)+ - apply (clarsimp simp: guard_is_UNIV_def from_bool_def true_def - word_sle_def if_1_0_0 Collect_const_mem + apply (clarsimp simp: guard_is_UNIV_def word_sle_def Collect_const_mem option_to_0_def Kernel_C.tcbVTable_def tcbVTableSlot_def cte_level_bits_def size_of_def cintr_def tcb_cnode_index_defs objBits_defs mask_def) @@ -935,34 +921,29 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - del: Collect_const) + apply (simp add: Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: true_def Collect_True - assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr - apply (simp add: false_def Collect_False ccorres_cond_iffs + apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem Kernel_C.tcbCTable_def tcbCTableSlot_def if_1_0_0 cte_level_bits_def size_of_def option_to_0_def mask_def objBits_defs) apply csymbr - apply (simp add: false_def Collect_False - del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (clarsimp simp: guard_is_UNIV_def false_def - ccap_relation_def cap_thread_cap_lift - cap_to_H_def if_1_0_0 Collect_const_mem canonical_address_bitfield_extract_tcb) + apply (rule ccorres_return_Skip) + apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift + cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) apply vcg @@ -970,20 +951,20 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: cte_is_derived_capMasterCap_strg o_def) apply (wp cteDelete_invs' hoare_case_option_wp cteDelete_deletes cteDelete_sch_act_simple | strengthen invs_valid_objs')+ - apply (rule hoare_post_imp_R[where Q' = "\r. invs'"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r. invs'"]) apply (wp cteDelete_invs') apply (clarsimp simp:cte_wp_at_ctes_of) apply simp apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def - true_def from_bool_def cintr_def objBits_defs mask_def) + cintr_def objBits_defs mask_def) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -1010,7 +991,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -1048,7 +1029,6 @@ lemma setupReplyMaster_ccorres: apply (simp add: cte_to_H_def cap_to_H_def mdb_node_to_H_def nullMDBNode_def c_valid_cte_def) apply (simp add: cap_reply_cap_lift) - apply (simp add: true_def mask_def to_bool_def) apply simp apply (simp add: cmachine_state_relation_def packed_heap_update_collapse_hrs carch_state_relation_def carch_globals_def @@ -1079,7 +1059,7 @@ lemma restart_ccorres: apply (ctac(no_vcg) add: tcbSchedEnqueue_ccorres) apply (ctac add: possibleSwitchTo_ccorres) apply (wp weak_sch_act_wf_lift)[1] - apply (wp sts_valid_queues setThreadState_st_tcb)[1] + apply (wp sts_valid_objs' setThreadState_st_tcb)[1] apply (simp add: valid_tcb_state'_def) apply wp apply (wp (once) sch_act_wf_lift, (wp tcb_in_cur_domain'_lift)+) @@ -1091,7 +1071,7 @@ lemma restart_ccorres: apply fastforce apply (rule ccorres_return_Skip) apply (wp hoare_drop_imps) - apply (auto simp: Collect_const_mem mask_def "StrictC'_thread_state_defs") + apply (auto simp: Collect_const_mem mask_def ThreadState_defs) done lemma setNextPC_ccorres: @@ -1157,10 +1137,10 @@ lemma postModifyRegisters_ccorres: apply (simp add: if_distrib[where f="asUser t" for t] asUser_return) apply (rule_tac R="\s. ct = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_add_return2) apply (rule ccorres_stateAssert) - apply (rule ccorres_return_Skip'[unfolded dc_def]) + apply (rule ccorres_return_Skip') by simp+ lemma invokeTCB_CopyRegisters_ccorres: @@ -1236,7 +1216,7 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (rule ccorres_pre_getCurThread) apply (ctac add: postModifyRegisters_ccorres) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" + apply (rule_tac R="\s. rvc = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1266,7 +1246,7 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (fastforce simp: sch_act_wf_weak) apply (wpsimp wp: hoare_drop_imp)+ apply (clarsimp simp add: guard_is_UNIV_def) - apply (clarsimp simp: to_bool_def invs_weak_sch_act_wf invs_valid_objs' + apply (clarsimp simp: invs_weak_sch_act_wf invs_valid_objs' split: if_split cong: if_cong | rule conjI)+ apply (clarsimp dest!: global'_no_ex_cap simp: invs'_def valid_state'_def | rule conjI)+ done @@ -1302,8 +1282,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1312,8 +1292,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1372,8 +1352,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1385,8 +1365,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1457,7 +1437,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1517,7 +1497,7 @@ lemma asUser_getMRs_rel: apply (erule getMRs_rel_context, simp) apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs) apply simp -done + done lemma asUser_sysargs_rel: @@ -1542,7 +1522,7 @@ lemma asUser_setRegister_ko_at': done lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] word_less_1[simp del] + notes hoare_weak_lift_imp [wp] word_less_1[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1557,6 +1537,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: \ {s. buffer_' s = option_to_ptr buffer}) [] (invokeTCB (WriteRegisters dst resume values arch)) (Call invokeTCB_WriteRegisters_'proc)" + supply empty_fail_cond[simp] apply (rule ccorres_gen_asm) apply (erule conjE) apply (cinit lift: n_' dest___ptr_to_struct_tcb_C_' resumeTarget_' buffer_' @@ -1648,14 +1629,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1700,7 +1681,7 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (clarsimp simp: frame_gp_registers_convs word_less_nat_alt sysargs_rel_def n_frameRegisters_def n_msgRegisters_def split: if_split_asm) - apply (simp add: invs_weak_sch_act_wf invs_valid_objs' invs_queues) + apply (simp add: invs_weak_sch_act_wf invs_valid_objs') apply (fastforce dest!: global'_no_ex_cap simp: invs'_def valid_state'_def) done @@ -1714,7 +1695,7 @@ lemma invokeTCB_Suspend_ccorres: apply (ctac(no_vcg) add: suspend_ccorres[OF cteDeleteOne_ccorres]) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp apply (auto simp: invs'_def valid_state'_def global'_no_ex_cap) done @@ -1728,7 +1709,7 @@ lemma invokeTCB_Resume_ccorres: apply (ctac(no_vcg) add: restart_ccorres) apply (rule ccorres_return_CE, simp+)[1] apply wp - apply (clarsimp simp: from_bool_def true_def) + apply clarsimp done lemma Arch_decodeTransfer_spec: @@ -1805,6 +1786,7 @@ shows (doE reply \ invokeTCB (ReadRegisters target susp n archCp); liftE (replyOnRestart thread reply isCall) odE) (Call invokeTCB_ReadRegisters_'proc)" + supply empty_fail_cond[simp] apply (rule ccorres_gen_asm) apply (cinit' lift: tcb_src_' suspendSource_' n_' call_' simp: invokeTCB_def liftE_bindE bind_assoc) @@ -1830,10 +1812,11 @@ shows apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_getThreadState]) apply (rule ccorres_if_lhs[OF _ ccorres_False[where P'=UNIV]]) apply (rule ccorres_if_lhs) - apply (simp add: Collect_True true_def whileAnno_def del: Collect_const) + apply (simp add: Collect_True whileAnno_def del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1893,15 +1876,15 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) @@ -1909,7 +1892,7 @@ shows rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (X64_H.frameRegisters @ X64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -2018,11 +2001,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (X64_H.frameRegisters @ X64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -2095,11 +2077,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2111,7 +2093,7 @@ shows apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) apply (wp | simp add: valid_tcb_state'_def)+ - apply (clarsimp simp: ThreadState_Running_def mask_def) + apply (clarsimp simp: ThreadState_defs mask_def) apply (rule mapM_x_wp') apply (rule hoare_pre) apply (wp sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift) @@ -2131,15 +2113,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2148,7 +2130,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2159,7 +2141,7 @@ shows X64.badgeRegister_def X64.capRegister_def "StrictC'_register_defs") apply (vcg exspec=lookupIPCBuffer_modifies) - apply (simp add: false_def) + apply simp apply (ctac(no_vcg) add: setThreadState_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -2168,18 +2150,17 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) apply wp - apply (simp add: Collect_const_mem "StrictC'_thread_state_defs" - mask_def) + apply (simp add: Collect_const_mem ThreadState_defs mask_def) apply vcg apply (rule_tac Q="\rv. invs' and st_tcb_at' ((=) Restart) thread and tcb_at' target" in hoare_post_imp) @@ -2189,7 +2170,7 @@ shows apply (vcg exspec=suspend_modifies) apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def true_def dc_def + apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def split: if_split) done @@ -2254,7 +2235,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2265,13 +2247,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2298,7 +2280,7 @@ lemma decodeReadRegisters_ccorres: apply wp apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread - "StrictC'_thread_state_defs" word_sless_def word_sle_def + ThreadState_defs word_sless_def word_sle_def mask_eq_iff_w2p word_size isCap_simps ReadRegistersFlags_defs tcb_at_invs' cap_get_tag_isCap capTCBPtr_eq) @@ -2311,7 +2293,7 @@ lemma decodeReadRegisters_ccorres: valid_tcb_state'_def elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] - apply (clarsimp simp: from_bool_def word_and_1 split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma decodeWriteRegisters_ccorres: @@ -2366,7 +2348,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2377,13 +2360,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2391,7 +2374,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2406,12 +2389,12 @@ lemma decodeWriteRegisters_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply (clarsimp simp: Collect_const_mem ct_in_state'_def pred_tcb_at') apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) - apply (clarsimp simp: valid_cap'_def "StrictC'_thread_state_defs" + apply (clarsimp simp: valid_cap'_def ThreadState_defs mask_eq_iff_w2p word_size rf_sr_ksCurThread WriteRegisters_resume_def word_sle_def word_sless_def numeral_eqs) apply (frule arg_cong[where f="\x. unat (of_nat x :: machine_word)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2424,8 +2407,7 @@ lemma decodeWriteRegisters_ccorres: apply (rule disjCI2) apply (clarsimp simp: genericTake_def linorder_not_less) apply (subst hd_conv_nth, clarsimp simp: unat_eq_0) - apply (clarsimp simp: from_bool_def word_and_1 - split: if_split) + apply (clarsimp simp: word_and_1 split: if_split) done lemma excaps_map_Nil: "(excaps_map caps = []) = (caps = [])" @@ -2493,7 +2475,7 @@ lemma decodeCopyRegisters_ccorres: apply (simp add: case_bool_If if_to_top_of_bindE if_to_top_of_bind del: Collect_const cong: if_cong) - apply (simp add: to_bool_def returnOk_bind Collect_True + apply (simp add: returnOk_bind Collect_True ccorres_invocationCatch_Inr performInvocation_def del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2543,7 +2525,7 @@ lemma decodeCopyRegisters_ccorres: elim!: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1] apply (clarsimp simp: word_sle_def CopyRegistersFlags_defs word_sless_def - "StrictC'_thread_state_defs" rf_sr_ksCurThread + ThreadState_defs rf_sr_ksCurThread split: if_split) apply (drule interpret_excaps_eq) apply (clarsimp simp: mask_def excaps_map_def split_def ccap_rights_relation_def @@ -2627,7 +2609,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2648,7 +2630,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -2656,7 +2638,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap from_bool_0 dest!: ccte_relation_ccap_relation) - apply (simp add: from_bool_def false_def true_def + apply (simp add: from_bool_def split: bool.split) apply (auto simp add: longRunningDelete_def isCap_simps split: capability.split)[1] @@ -2664,13 +2646,12 @@ lemma slotCapLongRunningDelete_ccorres: apply (wp hoare_drop_imps isFinalCapability_inv) apply (clarsimp simp: Collect_const_mem guard_is_UNIV_def) apply (rename_tac rv') - apply (case_tac rv'; clarsimp simp: false_def true_def) + apply (case_tac rv'; clarsimp simp: false_def) apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) - apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap - from_bool_def false_def map_comp_Some_iff + apply (clarsimp simp: typ_heap_simps cap_get_tag_isCap map_comp_Some_iff dest!: ccte_relation_ccap_relation) done @@ -2685,7 +2666,7 @@ lemma isValidVTableRoot_spec: {s'. ret__unsigned_long_' s' = from_bool (isValidVTableRoot_C (cap_' s))}" apply vcg apply (clarsimp simp: isValidVTableRoot_C_def if_1_0_0 from_bool_0) - apply (simp add: from_bool_def to_bool_def false_def split: if_split) + apply (simp add: to_bool_def split: if_split) done lemma isValidVTableRoot_conv: @@ -2699,9 +2680,8 @@ lemma isValidVTableRoot_conv: apply (case_tac "cap_get_tag cap' = scast cap_pml4_cap") apply (clarsimp split: arch_capability.split simp: isCap_simps) apply (clarsimp simp: ccap_relation_def map_option_Some_eq2 - cap_pml4_cap_lift cap_to_H_def - from_bool_def) - apply (clarsimp simp: to_bool_def split: if_split) + cap_pml4_cap_lift cap_to_H_def) + apply (clarsimp split: if_split) apply (clarsimp simp: cap_get_tag_isCap cap_get_tag_isCap_ArchObject) apply (simp split: arch_capability.split_asm add: isCap_simps) apply (case_tac "cap_get_tag cap' = scast cap_pml4_cap") @@ -3016,7 +2996,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -3043,7 +3023,7 @@ lemma decodeTCBConfigure_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -3129,7 +3109,7 @@ lemma decodeTCBConfigure_ccorres: ptr_val_tcb_ptr_mask2[unfolded mask_def objBits_defs, simplified] tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper all_ex_eq_helper ucast_ucast_mask objBits_defs) apply (subgoal_tac "args \ [] \ extraCaps \ []") @@ -3165,7 +3145,8 @@ lemma decodeTCBConfigure_ccorres: apply (rule conjI, fastforce) apply (drule interpret_excaps_eq) apply (clarsimp simp: cte_wp_at_ctes_of valid_tcb_state'_def numeral_eqs le_ucast_ucast_le - tcb_at_invs' invs_valid_objs' invs_queues invs_sch_act_wf' + tcb_at_invs' invs_valid_objs' invs_sch_act_wf' + invs_pspace_aligned' invs_pspace_distinct' ct_in_state'_def pred_tcb_at'_def obj_at'_def tcb_st_refs_of'_def) apply (erule disjE; simp add: objBits_defs mask_def) apply (clarsimp simp: idButNot_def interpret_excaps_test_null @@ -3178,7 +3159,7 @@ lemma decodeTCBConfigure_ccorres: capTCBPtr_eq tcb_ptr_to_ctcb_ptr_mask tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - StrictC'_thread_state_defs mask_eq_iff_w2p word_size + ThreadState_defs mask_eq_iff_w2p word_size from_bool_all_helper) apply (frule(1) tcb_at_h_t_valid [OF tcb_at_invs']) apply (clarsimp simp: typ_heap_simps numeral_eqs isCap_simps valid_cap'_def capAligned_def @@ -3212,7 +3193,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3280,8 +3260,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3317,7 +3296,7 @@ lemma decodeSetMCPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3346,7 +3325,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3414,8 +3393,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3451,7 +3429,7 @@ lemma decodeSetPriority_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) @@ -3493,7 +3471,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3501,9 +3479,9 @@ lemma decodeSetSchedParams_ccorres: val="from_bool (length args < 2 \ length extraCaps = 0)" in ccorres_symb_exec_r_known_rv) apply vcg - apply (auto simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 - split: bool.splits)[1] - apply (unat_arith+)[2] + apply (force simp: interpret_excaps_test_null excaps_map_def from_bool_def unat_eq_0 + unat_arith_simps + split: bool.splits if_splits) apply ceqv apply clarsimp (* @@ -3580,8 +3558,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3630,16 +3607,15 @@ lemma decodeSetSchedParams_ccorres: elim!: obj_at'_weakenE pred_tcb'_weakenE dest!: st_tcb_at_idle_thread')[1] apply (clarsimp simp: interpret_excaps_eq excaps_map_def) - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size option_to_0_def) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size option_to_0_def) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (clarsimp simp: valid_cap'_def capAligned_def interpret_excaps_eq excaps_map_def) apply (intro conjI impI allI) - apply (clarsimp simp: unat_eq_0 le_max_word_ucast_id - thread_control_update_mcp_def thread_control_update_priority_def - cap_get_tag_isCap_unfolded_H_cap isCap_simps - interpret_excaps_eq excaps_map_def)+ - done + by (clarsimp simp: unat_eq_0 le_max_word_ucast_id + thread_control_update_mcp_def thread_control_update_priority_def + cap_get_tag_isCap_unfolded_H_cap isCap_simps + interpret_excaps_eq excaps_map_def)+ lemma decodeSetIPCBuffer_ccorres: "interpret_excaps extraCaps' = excaps_map extraCaps \ @@ -3777,11 +3753,10 @@ lemma decodeSetIPCBuffer_ccorres: valid_mdb_ctes_def no_0_def excaps_map_def elim: pred_tcb'_weakenE dest!: st_tcb_at_idle_thread' dest!: interpret_excaps_eq)[1] - apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def - word_sle_def ThreadState_Restart_def mask_def) + apply (clarsimp simp: option_to_0_def rf_sr_ksCurThread word_sless_def word_sle_def mask_def) apply (rule conjI[rotated], clarsimp+) apply (drule interpret_excaps_eq[rule_format, where n=0], simp add: excaps_map_Nil) - apply (simp add: mask_def "StrictC'_thread_state_defs" excaps_map_def) + apply (simp add: mask_def ThreadState_defs excaps_map_def) apply (clarsimp simp: ccap_rights_relation_def rightsFromWord_wordFromRights cap_get_tag_isCap) apply (frule cap_get_tag_to_H, subst cap_get_tag_isCap, assumption, assumption) @@ -3814,7 +3789,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3834,7 +3809,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply ((clarsimp simp: option_to_ctcb_ptr_canonical[OF invs_pspace_canonical'])+)[3] apply (auto simp: option_to_ctcb_ptr_def objBits_simps' bindNTFN_alignment_junk)[1] @@ -3848,7 +3823,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3914,7 +3889,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -3947,10 +3922,10 @@ lemma decodeUnbindNotification_ccorres: apply (clarsimp simp: isCap_simps) apply (frule cap_get_tag_isCap_unfolded_H_cap) apply (auto simp: ctcb_relation_def typ_heap_simps cap_get_tag_ThreadCap ct_in_state'_def - option_to_ptr_def option_to_0_def ThreadState_Restart_def - mask_def rf_sr_ksCurThread valid_tcb_state'_def - elim!: pred_tcb'_weakenE - dest!: valid_objs_boundNTFN_NULL) + option_to_ptr_def option_to_0_def ThreadState_defs + mask_def rf_sr_ksCurThread valid_tcb_state'_def + elim!: pred_tcb'_weakenE + dest!: valid_objs_boundNTFN_NULL) done lemma nTFN_case_If_ptr: @@ -4024,7 +3999,7 @@ lemma decodeBindNotification_ccorres: apply csymbr apply (clarsimp simp add: if_to_top_of_bind to_bool_eq_0[symmetric] simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (clarsimp simp: to_bool_def throwError_bind invocationCatch_def) + apply (clarsimp simp: throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg apply (rule conseqPre, vcg) @@ -4047,7 +4022,7 @@ lemma decodeBindNotification_ccorres: apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def valid_ntfn'_def) apply (case_tac "ntfnObj ntfn", simp_all add: isWaitingNtfn_def option_to_ctcb_ptr_def - false_def true_def split: option.split_asm if_split, + split: option.split_asm if_split, auto simp: neq_Nil_conv tcb_queue_relation'_def tcb_at_not_NULL[symmetric] tcb_at_not_NULL)[1] apply ceqv @@ -4111,8 +4086,8 @@ lemma decodeBindNotification_ccorres: apply (rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def syscall_error_rel_def syscall_error_to_H_cases exception_defs) - apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def from_bool_0 - ThreadState_Restart_def mask_def true_def + apply (clarsimp simp add: guard_is_UNIV_def isWaitingNtfn_def + ThreadState_defs mask_def rf_sr_ksCurThread capTCBPtr_eq) apply (simp add: hd_conv_nth bindE_bind_linearise nTFN_case_If_ptr throwError_bind invocationCatch_def) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) @@ -4299,7 +4274,7 @@ lemma decodeSetSpace_ccorres: in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) apply (subgoal_tac "extraCaps \ []") - apply (clarsimp simp: returnOk_def return_def hd_conv_nth false_def) + apply (clarsimp simp: returnOk_def return_def hd_conv_nth) apply fastforce apply clarsimp apply ceqv @@ -4311,7 +4286,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4326,8 +4301,7 @@ lemma decodeSetSpace_ccorres: apply (rule_tac P'="{s. vRootCap = vRootCap_' s}" in ccorres_from_vcg[where P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: returnOk_def return_def - hd_drop_conv_nth2 false_def) + apply (clarsimp simp: returnOk_def return_def hd_drop_conv_nth2) apply fastforce apply ceqv apply (ctac add: ccorres_injection_handler_csum1 @@ -4437,18 +4411,17 @@ lemma decodeSetSpace_ccorres: rightsFromWord_wordFromRights capTCBPtr_eq tcb_cnode_index_defs size_of_def option_to_0_def rf_sr_ksCurThread - "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size) + ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: word_sle_def cap_get_tag_isCap) apply (subgoal_tac "args \ []") apply (clarsimp simp: hd_conv_nth) - apply (drule sym, simp, simp add: true_def from_bool_0) apply (clarsimp simp: objBits_simps') apply fastforce apply clarsimp done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4459,7 +4432,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) @@ -4471,9 +4444,9 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wpsimp wp: hoare_drop_imp simp: guard_is_UNIV_def)+ apply vcg - apply (clarsimp simp: tlsBaseRegister_def X64.tlsBaseRegister_def - invs_weak_sch_act_wf invs_queues TLS_BASE_def FS_BASE_def - split: if_split) + apply (fastforce simp: tlsBaseRegister_def X64.tlsBaseRegister_def + invs_weak_sch_act_wf TLS_BASE_def FS_BASE_def + split: if_split) done lemma decodeSetTLSBase_ccorres: @@ -4521,7 +4494,7 @@ lemma decodeSetTLSBase_ccorres: apply (clarsimp simp: ct_in_state'_def sysargs_rel_n_def n_msgRegisters_def) apply (auto simp: valid_tcb_state'_def elim!: pred_tcb'_weakenE)[1] - apply (simp add: StrictC'_thread_state_defs mask_eq_iff_w2p word_size) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (frule rf_sr_ksCurThread) apply (simp only: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply (auto simp: unat_eq_0 le_max_word_ucast_id)+ @@ -4673,8 +4646,7 @@ lemma decodeTCBInvocation_ccorres: dest!: st_tcb_at_idle_thread')[1] apply (simp split: sum.split add: cintr_def intr_and_se_rel_def exception_defs syscall_error_rel_def) - apply (simp add: "StrictC'_thread_state_defs" mask_eq_iff_w2p word_size - cap_get_tag_isCap) + apply (simp add: ThreadState_defs mask_eq_iff_w2p word_size) apply (simp add: cap_get_tag_isCap[symmetric], drule(1) cap_get_tag_to_H) apply clarsimp done diff --git a/proof/crefine/X64/VSpace_C.thy b/proof/crefine/X64/VSpace_C.thy index 7e0963eca3..7a7a6ee44d 100644 --- a/proof/crefine/X64/VSpace_C.thy +++ b/proof/crefine/X64/VSpace_C.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -81,7 +82,7 @@ lemma checkVPAlignment_ccorres: apply simp apply simp apply simp - apply (simp split: if_split add: to_bool_def) + apply (simp split: if_split) apply (clarsimp simp: mask_def unlessE_def throwError_def split: if_split) apply (rule ccorres_guard_imp) apply (rule ccorres_return_C) @@ -89,7 +90,7 @@ lemma checkVPAlignment_ccorres: apply simp apply simp apply simp - apply (simp split: if_split add: to_bool_def) + apply (simp split: if_split) apply (clarsimp split: if_split) apply (simp add: word_less_nat_alt) apply (rule order_le_less_trans, rule pageBitsForSize_le) @@ -287,8 +288,8 @@ lemma handleVMFault_ccorres: apply simp apply terminates_trivial apply (drule sym, clarsimp) - apply (wpc; simp add: vm_fault_type_from_H_def X86InstructionFault_def X86DataFault_def - true_def false_def bind_assoc) + apply (corres_cases; simp add: vm_fault_type_from_H_def X86InstructionFault_def X86DataFault_def + bind_assoc) apply (rule returnVMFault_corres; clarsimp simp: exception_defs mask_twice lift_rv_def)+ apply wpsimp+ @@ -447,7 +448,7 @@ lemma corres_symb_exec_unknown_r: assumes "\rv. corres_underlying sr nf nf' r P P' a (c rv)" shows "corres_underlying sr nf nf' r P P' a (unknown >>= c)" apply (simp add: unknown_def) - apply (rule corres_symb_exec_r[OF assms]; wp select_inv non_fail_select) + apply (rule corres_symb_exec_r[OF assms]; wp select_inv) done lemma lookupPML4Slot'_spec: @@ -609,7 +610,7 @@ lemma lookupPDPTSlot_ccorres: apply (rule corres_symb_exec_lookupPML4Slot'; rename_tac pml4e_ptr) apply (rule corres_symb_exec_unknown_r; rename_tac undefined) apply (rule corres_symb_exec_pml4e_ptr_get_present'; rename_tac present) - apply wpc + apply corres_cases apply (rule_tac F="present = 0" in corres_gen_asm2) apply (simp add: bind_assoc) apply (rule corres_symb_exec_lookup_fault_missing_capability_new'; rename_tac lookup_fault) @@ -653,7 +654,7 @@ lemma lookupPDPTSlot_ccorres': apply csymbr apply csymbr apply (rule ccorres_abstract_cleanup) - apply (rule_tac P="(ret__unsigned_longlong = 0) = (rv = X64_H.InvalidPML4E)" + apply (rule_tac P="(ret__unsigned_longlong = 0) = (pml4e = X64_H.InvalidPML4E)" in ccorres_gen_asm2) apply (wpc; ccorres_rewrite) apply (rule_tac P=\ and P' =UNIV in ccorres_from_vcg_throws) @@ -665,9 +666,9 @@ lemma lookupPDPTSlot_ccorres': apply (thin_tac "_ = PDPointerTablePML4E _ _ _ _ _ _") apply (simp add: bind_liftE_distrib liftE_bindE returnOk_liftE[symmetric]) apply (rule ccorres_stateAssert) - apply (rule_tac P="pd_pointer_table_at' (ptrFromPAddr (pml4eTable rv)) - and ko_at' rv (lookup_pml4_slot pm vptr) - and K (isPDPointerTablePML4E rv)" + apply (rule_tac P="pd_pointer_table_at' (ptrFromPAddr (pml4eTable pml4e)) + and ko_at' pml4e (lookup_pml4_slot pm vptr) + and K (isPDPointerTablePML4E pml4e)" and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) @@ -891,7 +892,7 @@ lemma findVSpaceForASID_ccorres: apply clarsimp apply (rule_tac P="valid_arch_state' and _" and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def NonDetMonad.lift_def + apply (clarsimp simp: throwError_def return_def bindE_def Nondet_Monad.lift_def EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def lookup_fault_lift_invalid_root) apply (frule rf_sr_asidTable_None[where asid=asid, THEN iffD2], simp add: asid_wf_def3, assumption, assumption) @@ -1189,33 +1190,33 @@ lemma setVMRoot_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap_ArchObject2) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: throwError_def catch_def dc_def[symmetric]) + apply (simp add: throwError_def catch_def) apply (rule ccorres_cond_true_seq, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply (rule ccorres_rhs_assoc) apply (csymbr, rename_tac is_mapped) apply csymbr apply (rule_tac P="to_bool (capPML4IsMapped_CL (cap_pml4_cap_lift vRootCap')) = (capPML4MappedASID (capCap vRootCap) \ None)" in ccorres_gen_asm2) - apply (clarsimp simp: to_bool_def dc_def[symmetric]) + apply (clarsimp simp: to_bool_def) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: throwError_def catch_def dc_def[symmetric], ccorres_rewrite) + apply (simp add: throwError_def catch_def, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply (simp add: catch_def bindE_bind_linearise bind_assoc liftE_def) apply (csymbr, rename_tac pml4_ptr, csymbr) apply (csymbr, rename_tac asid', csymbr) @@ -1231,37 +1232,37 @@ lemma setVMRoot_ccorres: in ccorres_gen_asm2) apply simp apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def throwError_def dc_def[symmetric], ccorres_rewrite) + apply (simp add: whenE_def throwError_def, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply (simp add: whenE_def returnOk_def) apply (csymbr, rename_tac base_addr) apply (rule ccorres_symb_exec_r) apply (ctac add: getCurrentUserCR3_ccorres, rename_tac currentCR3 currentCR3') - apply (rule ccorres_if_bind, rule ccorres_if_lhs; simp add: dc_def[symmetric]) + apply (rule ccorres_if_bind, rule ccorres_if_lhs; simp) apply (rule ccorres_cond_true) apply (ctac add: setCurrentUserCR3_ccorres) apply (rule ccorres_cond_false) apply (rule ccorres_return_Skip) - apply (simp, rule hoare_post_taut[where P=\]) + apply (simp, rule wp_post_taut) apply vcg apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (rule ccorres_cond_true_seq, simp add: dc_def[symmetric], ccorres_rewrite) + apply (rule ccorres_cond_true_seq, simp, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply (simp add: asid_wf_0, rule wp_post_tautE) apply (vcg exspec=findVSpaceForASID_modifies) apply (wpsimp wp: getSlotCap_wp) @@ -1276,13 +1277,12 @@ lemma setVMRoot_ccorres: apply (frule cte_wp_at_valid_objs_valid_cap'; clarsimp simp: invs_cicd_valid_objs') apply (clarsimp simp: isCap_simps valid_cap'_def mask_def asid_wf_def) apply (clarsimp simp: tcb_cnode_index_defs cte_level_bits_def tcbVTableSlot_def - cte_at_tcb_at_32' to_bool_def) + cte_at_tcb_at_32') apply (clarsimp simp: cap_get_tag_isCap_ArchObject2 dest!: isCapDs) apply (clarsimp simp: cap_get_tag_isCap_ArchObject[symmetric] cap_lift_pml4_cap cap_to_H_def - cap_pml4_cap_lift_def - to_bool_def mask_def + cap_pml4_cap_lift_def mask_def ccr3_relation_defs Let_def cr3_lift_def word_bw_assocs elim!: ccap_relationE @@ -1296,10 +1296,6 @@ lemma setVMRoot_ccorres: apply (match premises in H: \cr3_C.words_C _.[0] && _ = 0\ \ \insert H; word_bitwise\) done -lemma ccorres_seq_IF_False: - "ccorres_underlying sr \ r xf arrel axf G G' hs a (IF False THEN x ELSE y FI ;; c) = ccorres_underlying sr \ r xf arrel axf G G' hs a (y ;; c)" - by simp - (* FIXME x64: needed? *) lemma ptrFromPAddr_mask6_simp[simp]: "ptrFromPAddr ps && mask 6 = ps && mask 6" @@ -1329,12 +1325,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -1365,8 +1361,6 @@ lemma msgRegisters_ccorres: (* usually when we call setMR directly, we mean to only set a registers, which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg___unsigned_long = val\ @@ -1383,8 +1377,8 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) - apply (rule hoare_post_taut[of \]) + apply (clarsimp simp: return_def) + apply (rule hoare_TrueI[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters not_le conj_commute) apply (subst msgRegisters_ccorres[symmetric]) @@ -1696,14 +1690,15 @@ lemma modeUnmapPage_ccorres: rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (simp only: bindE_assoc[symmetric]) apply (rule ccorres_splitE_novcg) - apply (clarsimp simp: inl_rrel_def) - apply (rule checkMappingPPtr_pdpte_ccorres[simplified inl_rrel_def]) - apply (rule conseqPre, vcg) - apply (clarsimp simp: typ_heap_simps') - apply (intro conjI impI) - apply (auto simp: pdpte_pdpte_1g_lift_def pdpte_lift_def cpdpte_relation_def - isHugePagePDPTE_def pdpteFrame_def - split: if_split_asm pdpte.split_asm pdpte.split)[5] + apply (rule ccorres_rel_imp2) + apply (rule checkMappingPPtr_pdpte_ccorres) + apply (rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps') + apply (auto simp: pdpte_pdpte_1g_lift_def pdpte_lift_def cpdpte_relation_def + isHugePagePDPTE_def pdpteFrame_def + split: if_split_asm pdpte.split_asm pdpte.split)[1] + apply fastforce + apply (fastforce simp: inl_rrel_def split: sum.splits) apply ceqv apply csymbr apply (rule ccorres_add_returnOk) @@ -1750,11 +1745,11 @@ lemma unmapPage_ccorres: apply (rule ccorres_splitE_novcg[where r'=dc and xf'=xfdc]) \ \X64SmallPage\ apply (rule ccorres_Cond_rhs) - apply (simp add: framesize_to_H_def dc_def[symmetric]) + apply (simp add: framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac pt_slot pt_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (simp only: bindE_assoc[symmetric]) @@ -1768,11 +1763,9 @@ lemma unmapPage_ccorres: split: if_split_asm pte.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_liftM - Collect_const[symmetric] dc_def[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr - apply (simp add: dc_def[symmetric]) apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) apply wp @@ -1786,12 +1779,11 @@ lemma unmapPage_ccorres: apply (vcg exspec=lookupPTSlot_modifies) \ \X64LargePage\ apply (rule ccorres_Cond_rhs) - apply (simp add: framesize_to_H_def dc_def[symmetric] - del: Collect_const) + apply (simp add: framesize_to_H_def del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (ctac add: lookupPDSlot_ccorres) apply (rename_tac pd_slot pd_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) @@ -1806,11 +1798,9 @@ lemma unmapPage_ccorres: split: if_split_asm pde.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_liftM - Collect_const[symmetric] dc_def[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr - apply (simp add: dc_def[symmetric]) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def) apply wp @@ -1823,12 +1813,12 @@ lemma unmapPage_ccorres: apply simp apply (vcg exspec=lookupPDSlot_modifies) \ \X64HugePage\ - apply (simp add: framesize_to_H_def dc_def[symmetric]) + apply (simp add: framesize_to_H_def) apply (rule ccorres_add_return2) apply (ctac add: modeUnmapPage_ccorres) apply (rule ccorres_from_vcg_might_throw[where P="\" and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: true_def false_def return_def inl_rrel_def split: sum.split_asm) + apply (clarsimp simp: return_def inl_rrel_def split: sum.split_asm) apply wp apply (vcg exspec=modeUnmapPage_modifies) apply ceqv @@ -1838,13 +1828,13 @@ lemma unmapPage_ccorres: apply clarsimp apply ccorres_rewrite apply (clarsimp simp: liftE_liftM) - apply (ctac add: invalidateTranslationSingleASID_ccorres[simplified dc_def]) - apply clarsimp + apply (ctac add: invalidateTranslationSingleASID_ccorres) + apply wpsimp apply clarsimp apply (clarsimp simp: guard_is_UNIV_def conj_comms tcb_cnode_index_defs) apply (simp add: throwError_def) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wpsimp apply (simp add: Collect_const_mem) @@ -2064,7 +2054,7 @@ lemma performPageInvocationUnmap_ccorres: apply (rule ccorres_rhs_assoc) apply (drule_tac s=cap in sym, simp) (* schematic ugliness *) apply ccorres_rewrite - apply (ctac add: performPageInvocationUnmap_ccorres'[simplified K_def, simplified]) + apply (ctac add: performPageInvocationUnmap_ccorres') apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) @@ -2231,7 +2221,7 @@ lemma setCTE_asidpool': "\ ko_at' (ASIDPool pool) p \ setCTE c p' \\_. ko_at' (ASIDPool pool) p\" apply (clarsimp simp: setCTE_def) apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (clarsimp simp: obj_at'_def projectKOs) @@ -2343,13 +2333,13 @@ lemma performASIDPoolInvocation_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule_tac ccorres_split_nothrow [where r'=dc and xf'=xfdc]) apply (simp add: updateCap_def) - apply (rule_tac A="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPML4Cap' rv \ asid \ mask asid_bits \ asid \ ucast asidInvalid)" + apply (rule_tac A="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPML4Cap' oldcap \ asid \ mask asid_bits \ asid \ ucast asidInvalid)" and A'=UNIV in ccorres_guard_imp2) apply (rule ccorres_pre_getCTE) - apply (rule_tac P="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPML4Cap' rv \ asid \ mask asid_bits \ asid \ ucast asidInvalid) - and cte_wp_at' ((=) rva) ctSlot" + apply (rule_tac P="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPML4Cap' oldcap \ asid \ mask asid_bits \ asid \ ucast asidInvalid) + and cte_wp_at' ((=) rv) ctSlot" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -2382,7 +2372,7 @@ lemma performASIDPoolInvocation_ccorres: apply (simp add: cte_to_H_def c_valid_cte_def) apply (simp add: cap_pml4_cap_lift) apply (simp (no_asm) add: cap_to_H_def) - apply (simp add: to_bool_def asid_bits_def le_mask_imp_and_mask word_bits_def) + apply (simp add: asid_bits_def le_mask_imp_and_mask word_bits_def) apply (clarsimp simp: c_valid_cap_def cl_valid_cap_def) apply (erule (1) cap_lift_PML4Cap_Base) apply simp @@ -2420,7 +2410,7 @@ lemma performASIDPoolInvocation_ccorres: apply (rule conseqPre, vcg) apply clarsimp apply (wpsimp wp: liftM_wp) - apply (wpsimp wp: getASID_wp simp: o_def inv_def) + apply (wpsimp wp: getASID_wp simp: inv_def) apply (clarsimp simp: empty_fail_getObject) apply (wpsimp wp: udpateCap_asidpool' hoare_vcg_all_lift hoare_vcg_imp_lift') apply vcg diff --git a/proof/crefine/X64/Wellformed_C.thy b/proof/crefine/X64/Wellformed_C.thy index 334c53f332..aad234acd3 100644 --- a/proof/crefine/X64/Wellformed_C.thy +++ b/proof/crefine/X64/Wellformed_C.thy @@ -165,10 +165,6 @@ where abbreviation "ep_queue_relation \ tcb_queue_relation tcbEPNext_C tcbEPPrev_C" -abbreviation - "sched_queue_relation \ tcb_queue_relation tcbSchedNext_C tcbSchedPrev_C" - - definition wordSizeCase :: "'a \ 'a \ 'a" where "wordSizeCase a b \ (if bitSize (undefined::machine_word) = 32 @@ -284,63 +280,6 @@ definition | Some cap \ Some \ cap_CL = cap, cteMDBNode_CL = mdb_node_lift (cteMDBNode_C c) \" -lemma to_bool_false [simp]: "\ to_bool false" - by (simp add: to_bool_def false_def) - -(* this is slightly weird, but the bitfield generator - masks everything with the expected bit length. - So we do that here too. *) -definition - to_bool_bf :: "'a::len word \ bool" where - "to_bool_bf w \ (w && mask 1) = 1" - -lemma to_bool_bf_mask1 [simp]: - "to_bool_bf (mask (Suc 0))" - by (simp add: mask_def to_bool_bf_def) - -lemma to_bool_bf_0 [simp]: "\to_bool_bf 0" - by (simp add: to_bool_bf_def) - -lemma to_bool_bf_1 [simp]: "to_bool_bf 1" - by (simp add: to_bool_bf_def mask_def) - -lemma to_bool_bf_false [simp]: - "\to_bool_bf false" - by (simp add: false_def) - -lemma to_bool_bf_true [simp]: - "to_bool_bf true" - by (simp add: true_def) - -lemma to_bool_to_bool_bf: - "w = false \ w = true \ to_bool_bf w = to_bool w" - by (auto simp: false_def true_def to_bool_def to_bool_bf_def mask_def) - -lemma to_bool_bf_mask_1 [simp]: - "to_bool_bf (w && mask (Suc 0)) = to_bool_bf w" - by (simp add: to_bool_bf_def) - -lemma to_bool_bf_and [simp]: - "to_bool_bf (a && b) = (to_bool_bf a \ to_bool_bf (b::word64))" - apply (clarsimp simp: to_bool_bf_def) - apply (rule iffI) - apply (subst (asm) bang_eq) - apply (simp add: word_size) - apply (rule conjI) - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply (rule word_eqI) - apply (auto simp add: word_size)[1] - apply clarsimp - apply (rule word_eqI) - apply (subst (asm) bang_eq)+ - apply (auto simp add: word_size)[1] - done - -lemma to_bool_bf_to_bool_mask: - "w && mask (Suc 0) = w \ to_bool_bf w = to_bool (w::word64)" - by (metis mask_Suc_0 bool_mask mask_1 to_bool_0 to_bool_1 to_bool_bf_def word_gt_0) - definition mdb_node_to_H :: "mdb_node_CL \ mdbnode" where @@ -529,31 +468,31 @@ lemma maxDom_sgt_0_maxDomain: lemma num_domains_calculation: "num_domains = numDomains" - unfolding num_domains_def by eval + unfolding num_domains_val by eval private lemma num_domains_card_explicit: "num_domains = CARD(num_domains)" - by (simp add: num_domains_def) + by (simp add: num_domains_val) lemmas num_domains_index_updates = - index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] - index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_def, + index_update2[where 'b=num_domains, folded num_domains_card_explicit num_domains_val, simplified num_domains_calculation] (* C ArrayGuards will throw these at us and there is no way to avoid a proof of being less than a specific number expressed as a word, so we must introduce these. However, being explicit means lack of discipline can lead to a violation. *) -lemma numDomains_less_numeric_explicit[simplified num_domains_def One_nat_def]: +lemma numDomains_less_numeric_explicit[simplified num_domains_val One_nat_def]: "x < Kernel_Config.numDomains \ x < num_domains" by (simp add: num_domains_calculation) -lemma numDomains_less_unat_ucast_explicit[simplified num_domains_def]: +lemma numDomains_less_unat_ucast_explicit[simplified num_domains_val]: "unat x < Kernel_Config.numDomains \ (ucast (x::domain) :: machine_word) < of_nat num_domains" apply (rule word_less_nat_alt[THEN iffD2]) apply transfer apply simp - apply (drule numDomains_less_numeric_explicit, simp add: num_domains_def) + apply (drule numDomains_less_numeric_explicit, simp add: num_domains_val) done lemmas maxDomain_le_unat_ucast_explicit = @@ -578,7 +517,7 @@ value_type num_tcb_queues = "numDomains * numPriorities" lemma num_tcb_queues_calculation: "num_tcb_queues = numDomains * numPriorities" - unfolding num_tcb_queues_def by eval + unfolding num_tcb_queues_val by eval (* Input abbreviations for API object types *) diff --git a/proof/crefine/autocorres-test/AutoCorresTest.thy b/proof/crefine/autocorres-test/AutoCorresTest.thy index 8b557a84a1..01ee243362 100644 --- a/proof/crefine/autocorres-test/AutoCorresTest.thy +++ b/proof/crefine/autocorres-test/AutoCorresTest.thy @@ -77,7 +77,7 @@ lemma reorder_gets: (do g; x \ gets f; h x od)" - by (fastforce simp: bind_def' NonDetMonad.valid_def gets_def get_def return_def) + by (fastforce simp: bind_def' Nondet_VCG.valid_def gets_def get_def return_def) thm (* no arguments, no precondition, dc return *) @@ -100,7 +100,7 @@ lemma (* handleYield_ccorres: *) (* Show that current thread is unmodified. * FIXME: proper way to do this? *) apply (subst reorder_gets[symmetric, unfolded K_bind_def]) - using tcbSchedDequeue'_modifies apply (fastforce simp: NonDetMonad.valid_def) + using tcbSchedDequeue'_modifies apply (fastforce simp: Nondet_VCG.valid_def) apply (subst double_gets_drop_regets) apply (rule corres_pre_getCurThread_wrapper) apply (rule corres_split[OF tcbSchedDequeue_ccorres[ac]]) @@ -146,7 +146,7 @@ lemma corres_noop2_no_exs: apply (clarsimp simp: corres_underlying_def) apply (rule conjI) apply (drule x, drule y) - apply (clarsimp simp: NonDetMonad.valid_def empty_fail_def Ball_def Bex_def) + apply (clarsimp simp: Nondet_VCG.valid_def empty_fail_def Ball_def Bex_def) apply fast apply (insert z) apply (clarsimp simp: no_fail_def) @@ -164,7 +164,7 @@ lemma corres_symb_exec_l_no_exs: apply (erule x) apply (rule gets_wp) apply (erule nf) - apply (rule non_fail_gets) + apply (rule no_fail_gets) apply (rule z) apply (rule y) apply (rule gets_wp) diff --git a/proof/crefine/lib/AutoCorresModifiesProofs.thy b/proof/crefine/lib/AutoCorresModifiesProofs.thy index 1ae3af10be..5ff2fabc27 100644 --- a/proof/crefine/lib/AutoCorresModifiesProofs.thy +++ b/proof/crefine/lib/AutoCorresModifiesProofs.thy @@ -32,7 +32,7 @@ text \ (via L1_call_simpl), so the limitations of ac_corres do not apply. \ lemma autocorres_modifies_transfer: - notes select_wp[wp] hoare_seq_ext[wp] + notes bind_wp[wp] fixes \ globals f' f_'proc modifies_eqn P xf assumes f'_def: "f' \ AC_call_L1 P globals xf (L1_call_simpl check_termination \ f_'proc)" assumes f_modifies: "\\. \\\<^bsub>/UNIV\<^esub> {\} Call f_'proc {t. modifies_eqn (globals t) (globals \)}" @@ -413,7 +413,7 @@ fun modifies_call_tac (callee_modifies: incr_net) ctxt n = DETERM ( (* VCG for trivial state invariants, such as globals modifies specs. * Takes vcg rules from "valid_inv". *) -val valid_invN = Context.theory_name @{theory} ^ ".valid_inv" +val valid_invN = Context.theory_name { long=true } @{theory} ^ ".valid_inv" fun modifies_vcg_tac leaf_tac ctxt n = let val vcg_rules = Named_Theorems.get ctxt valid_invN |> Tactic.build_net; fun vcg n st = Seq.make (fn () => let diff --git a/proof/crefine/lib/AutoCorres_C.thy b/proof/crefine/lib/AutoCorres_C.thy index 10187ee164..e8160cc0df 100644 --- a/proof/crefine/lib/AutoCorres_C.thy +++ b/proof/crefine/lib/AutoCorres_C.thy @@ -69,10 +69,10 @@ FIXME: Move this change into AutoCorres itself, or the underlying VCG library. lemmas [wp del] = NonDetMonadEx.validE_whenE - NonDetMonadVCG.hoare_whenE_wps + Nondet_VCG.whenE_wps lemmas hoare_whenE_wp2 [wp] = - NonDetMonadVCG.hoare_whenE_wps[simplified if_apply_def2] + Nondet_VCG.whenE_wps[simplified if_apply_def2] section \Rules for proving @{term ccorres_underlying} goals\ @@ -256,11 +256,11 @@ method ccorres_to_corres_pre_step = (rule ccorres_to_corres_pre_intros | erule ccorres_to_corres_pre_elims) method ccorres_to_corres_pre_process = ( - (elim pred_andE)?, + (elim inf1E inf2E)?, (simp only: Int_assoc)?, (ccorres_to_corres_pre_step+)?, (rule ccorres_to_corres_pre_finalise), - (intro pred_andI TrueI; clarsimp) + (intro pred_conjI TrueI; clarsimp) ) text \ @@ -296,8 +296,8 @@ lemma ccorres_to_corres_with_termination: "\s s'. \ cstate_relation s (globals s'); P s; \ snd (dspec_f s); G s' \ \ \ \ Call f_'proc \ Normal s'" shows "corres_underlying {(s, s'). cstate_relation s s'} True True R P Q dspec_f ac_f" - using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def - apply (clarsimp simp: corres_underlying_def ccorres_underlying_def rf_sr_def) + using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def rf_sr_def + apply (clarsimp simp: corres_underlying_def ccorres_underlying_def) apply (rule conjI) apply (fastforce simp: unif_rrel_def intro: EHOther dest: in_AC_call_simpl) apply (clarsimp simp: AC_call_L1_def L2_call_L1_def L1_call_simpl_def) @@ -334,8 +334,8 @@ lemma ccorres_to_corres_no_termination: assumes pre: "\s'. G s' \ ccorres_to_corres_pre Q \ Q' s'" assumes ret: "\r s'. R r (ret_xf s') \ R' r (ret_xf' s')" shows "corres_underlying {(s, s'). cstate_relation s s'} True True R P Q dspec_f ac_f" - using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def - apply (clarsimp simp: ac_def corres_underlying_def ccorres_underlying_def rf_sr_def) + using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def rf_sr_def + apply (clarsimp simp: ac_def corres_underlying_def ccorres_underlying_def) apply (rule conjI) apply (fastforce simp: unif_rrel_def intro: EHOther dest: in_AC_call_simpl) apply (clarsimp simp: AC_call_L1_def L2_call_L1_def L1_call_simpl_def) @@ -694,7 +694,7 @@ lemma exec_no_fault: using valid ce asms apply - apply (frule hoare_sound) - apply (clarsimp simp: NonDetMonad.bind_def cvalid_def split_def HoarePartialDef.valid_def) + apply (clarsimp simp: Nondet_Monad.bind_def cvalid_def split_def HoarePartialDef.valid_def) apply (drule spec, drule spec, drule (1) mp) apply auto done @@ -707,7 +707,7 @@ lemma exec_no_stuck: using valid ce asms apply - apply (frule hoare_sound) - apply (clarsimp simp: NonDetMonad.bind_def cvalid_def split_def HoarePartialDef.valid_def) + apply (clarsimp simp: Nondet_Monad.bind_def cvalid_def split_def HoarePartialDef.valid_def) apply (drule spec, drule spec, drule (1) mp) apply auto done @@ -805,17 +805,6 @@ section \Additional infrastructure\ context kernel begin -lemma wpc_helper_corres_final: - "corres_underlying sr nf nf' rv Q Q' f f' - \ wpc_helper (P, P') (Q, {s. Q' s}) (corres_underlying sr nf nf' rv P (\s. s \ P') f f')" - apply (clarsimp simp: wpc_helper_def) - apply (erule corres_guard_imp) - apply auto - done - -wpc_setup "\m. corres_underlying sr nf nf' rv P P' m f'" wpc_helper_corres_final -wpc_setup "\m. corres_underlying sr nf nf' rv P P' (m >>= f) f'" wpc_helper_corres_final - lemma condition_const: "condition (\_. P) L R = (if P then L else R)" by (simp add: condition_def split: if_splits) @@ -932,7 +921,7 @@ lemma terminates_spec_no_fail: using spec_result_Normal p_spec by simp have L1_call_simpl_no_fail: "no_fail (\s. P s s) (L1_call_simpl check_termination \ f_'proc)" - apply (wpsimp simp: L1_call_simpl_def wp: non_fail_select select_wp) + apply (wpsimp simp: L1_call_simpl_def) using terminates normal by auto have select_f_L1_call_simpl_no_fail: "\s. no_fail (\_. P s s) (select_f (L1_call_simpl check_termination \ f_'proc s))" @@ -945,10 +934,10 @@ lemma terminates_spec_no_fail: using normal by auto show ?thesis apply (clarsimp simp: ac AC_call_L1_def L2_call_L1_def) - apply (wpsimp wp: select_f_L1_call_simpl_no_fail non_fail_select - wp_del: select_f_wp) - apply (rule hoare_strengthen_post[OF select_f_L1_call_simpl_rv], fastforce) - apply (wpsimp wp: select_wp nf_pre)+ + apply (wpsimp wp_del: select_f_wp) + apply (rule hoare_strengthen_post[OF select_f_L1_call_simpl_rv], fastforce) + apply (wpsimp wp: select_f_L1_call_simpl_no_fail)+ + apply (fastforce simp: nf_pre) done qed diff --git a/proof/crefine/lib/Boolean_C.thy b/proof/crefine/lib/Boolean_C.thy new file mode 100644 index 0000000000..4058603201 --- /dev/null +++ b/proof/crefine/lib/Boolean_C.thy @@ -0,0 +1,138 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Definitions and rules for C boolean constants parsed from the kernel *) + +theory Boolean_C +imports + "CSpec.KernelInc_C" +begin + +lemma true_and_1[simp]: + "true && 1 = true" + "signed true && 1 = signed true" + "unsigned true && 1 = unsigned true" + by (simp add: true_def)+ + +lemma to_bool_true[simp]: + "to_bool true" + "to_bool (signed true)" + "to_bool (unsigned true)" + by (simp add: to_bool_def true_def)+ + +lemma true_equals_simps[simp]: + "true = 1 \ True" + "signed true = 1 \ True" + "unsigned true = 1 \ True" + "true = 0 \ False" + "signed true = 0 \ False" + "unsigned true = 0 \ False" + by (simp add: true_def)+ + +lemma true_eq_from_bool[simp]: + "(true = from_bool P) = P" + "(signed true = from_bool P) = P" + "(unsigned true = from_bool P) = P" + by (simp add: from_bool_def split: bool.splits)+ + +lemma false_and_1[simp]: + "false && 1 = false" + "signed false && 1 = signed false" + "unsigned false && 1 = unsigned false" + by (simp add: false_def)+ + +lemma to_bool_false[simp]: + "\ to_bool false" + "\ to_bool (signed false)" + "\ to_bool (unsigned false)" + by (simp add: to_bool_def false_def)+ + +lemma false_equals_simps[simp]: + "false = 0 \ True" + "signed false = 0 \ True" + "unsigned false = 0 \ True" + "false = 1 \ False" + "signed false = 1 \ False" + "unsigned false = 1 \ False" + by (simp add: false_def)+ + +lemma false_eq_from_bool[simp]: + "(false = from_bool P) = (\ P)" + "(signed false = from_bool P) = (\ P)" + "(unsigned false = from_bool P) = (\ P)" + by (simp add: from_bool_def split: bool.splits)+ + +lemma from_bool_vals[simp]: + "from_bool True = signed true" + "from_bool False = signed false" + by (simp add: from_bool_def)+ + +lemma true_neq_false[simp]: + "true \ false" + "signed true \ signed false" + "unsigned true \ unsigned false" + by (simp add: true_def false_def)+ + +(* The bitfield generator masks everything with the expected bit length, so we do that here too. *) +definition + to_bool_bf :: "'a::len word \ bool" where + "to_bool_bf w \ (w && mask 1) = 1" + +lemma to_bool_bf_mask1[simp]: + "to_bool_bf (mask (Suc 0))" + by (simp add: mask_def to_bool_bf_def) + +lemma to_bool_bf_0[simp]: + "\to_bool_bf 0" + by (simp add: to_bool_bf_def) + +lemma to_bool_bf_1[simp]: + "to_bool_bf 1" + by (simp add: to_bool_bf_def mask_def) + +lemma to_bool_bf_false[simp]: + "\to_bool_bf false" + by (simp add: false_def) + +lemma to_bool_bf_true[simp]: + "to_bool_bf true" + by (simp add: true_def) + +lemma to_bool_to_bool_bf: + "w = false \ w = true \ to_bool_bf w = to_bool w" + by (auto simp: false_def true_def to_bool_def to_bool_bf_def mask_def) + +lemma to_bool_bf_mask_1[simp]: + "to_bool_bf (w && mask (Suc 0)) = to_bool_bf w" + by (simp add: to_bool_bf_def) + +lemma to_bool_bf_and[simp]: + "to_bool_bf (a && b) = (to_bool_bf a \ to_bool_bf b)" + apply (clarsimp simp: to_bool_bf_def) + apply (rule iffI) + apply (subst (asm) bang_eq) + apply (simp add: word_size) + apply (rule conjI) + apply (rule word_eqI) + apply (auto simp add: word_size)[1] + apply (rule word_eqI) + apply (auto simp add: word_size)[1] + apply clarsimp + apply (rule word_eqI) + apply (subst (asm) bang_eq)+ + apply (auto simp add: word_size)[1] + done + +lemma to_bool_bf_to_bool_mask: + "w && mask (Suc 0) = w \ to_bool_bf w = to_bool (w::machine_word)" + by (metis mask_Suc_0 bool_mask mask_1 to_bool_0 to_bool_1 to_bool_bf_def word_gt_0) + +lemma to_bool_mask_to_bool_bf: + "to_bool (x && 1) = to_bool_bf (x::machine_word)" + by (simp add: to_bool_bf_def to_bool_def) + +end \ No newline at end of file diff --git a/proof/crefine/lib/CToCRefine.thy b/proof/crefine/lib/CToCRefine.thy index 35082d7098..0820bed0c6 100644 --- a/proof/crefine/lib/CToCRefine.thy +++ b/proof/crefine/lib/CToCRefine.thy @@ -9,7 +9,8 @@ theory CToCRefine imports "CSpec.Substitute" "CLib.SimplRewrite" - "CLib.TypHeapLib" + Lib.Lib + "CParser.TypHeapLib" begin lemma spec_statefn_simulates_lookup_tree_Node: @@ -27,14 +28,16 @@ ML \ fun mk_meta_eq_safe t = mk_meta_eq t handle THM _ => t; -val unfold_bodies = Simplifier.make_simproc @{context} "unfold constants named *_body" - {lhss = [@{term "v"}], - proc= fn _ => - (fn ctxt => (fn t => case head_of (Thm.term_of t) of - Const (s, _) => if String.isSuffix "_body" s - then try (Global_Theory.get_thm (Proof_Context.theory_of ctxt) #> mk_meta_eq_safe) (suffix "_def" s) - else NONE - | _ => NONE))} +val unfold_bodies = Simplifier.make_simproc @{context} + {name = "unfold constants named *_body", + lhss = [@{term "v"}], + proc = fn _ => + (fn ctxt => (fn t => case head_of (Thm.term_of t) of + Const (s, _) => if String.isSuffix "_body" s + then try (Global_Theory.get_thm (Proof_Context.theory_of ctxt) #> mk_meta_eq_safe) (suffix "_def" s) + else NONE + | _ => NONE)), + identifier = []} \ theorem spec_refine: diff --git a/proof/crefine/lib/Corres_C.thy b/proof/crefine/lib/Corres_C.thy index 49ae2eab3c..179fa1d7f6 100644 --- a/proof/crefine/lib/Corres_C.thy +++ b/proof/crefine/lib/Corres_C.thy @@ -10,6 +10,55 @@ imports SR_lemmas_C begin +(* FIXME AARCH64: move up to CCorres_UL begin *) +(* check RISCV for duplicates *) + +(* note: moving this lemma outside of kernel_m locale currently causes some proofs to fail *) +lemma ccorres_cases: + assumes "P \ ccorres_underlying srel Ga rrel xf arrel axf G G' hs a b" + assumes "\P \ ccorres_underlying srel Ga rrel xf arrel axf H H' hs a b" + shows "ccorres_underlying srel Ga rrel xf arrel axf + (\s. (P \ G s) \ (\P \ H s)) + ({s. P \ s \ G'} \ {s. \P \ s \ H'}) hs + a b" + by (cases P, auto simp: assms) + +lemma ccorres_dc_comp: + "ccorres_underlying srel G (dc \ R) xf P P' hs m c = ccorres_underlying srel G dc xf P P' hs m c" + by simp + +(* FIXME AARCH64: remove from CSpace_RAB_C; also in other arches *) +lemma ccorres_gen_asm_state: + assumes rl: "\s. P s \ ccorres_underlying srel Ga r xf arrel axf G G' hs a c" + shows "ccorres_underlying srel Ga r xf arrel axf (G and P) G' hs a c" +proof (rule ccorres_guard_imp2) + show "ccorres_underlying srel Ga r xf arrel axf (G and (\_. \s. P s)) G' hs a c" + apply (rule ccorres_gen_asm) + apply (erule exE) + apply (erule rl) + done +next + fix s s' + assume "(s, s') \ srel" and "(G and P) s" and "s' \ G'" + thus "(G and (\_. \s. P s)) s \ s' \ G'" + by fastforce +qed + +(* FIXME AARCH64: duplicates in Ipc_C and Tcb_C; also other arches *) +lemma ccorres_abstract_known: + "\ \rv' t t'. ceqv \ xf' rv' t t' g (g' rv'); + ccorres_underlying srel \ rvr xf arel axf P P' hs f (g' val) \ + \ ccorres_underlying srel \ rvr xf arel axf P (P' \ {s. xf' s = val}) hs f g" + apply (rule ccorres_guard_imp2) + apply (rule_tac xf'=xf' in ccorres_abstract) + apply assumption + apply (rule_tac P="rv' = val" in ccorres_gen_asm2) + apply simp + apply simp + done + +(* move up to CCorres_UL end *) + abbreviation "return_C \ CLanguage.creturn global_exn_var_'_update" lemmas return_C_def = creturn_def @@ -190,11 +239,22 @@ lemma ccorres_split_nothrow_novcgE: apply (clarsimp simp: guard_is_UNIV_def split: sum.split) done -(* Unit would be more appropriate, but the record package will simplify xfdc to () *) +\ \Unit would be more appropriate, but the record package will rewrite xfdc to (). + This can happen even when protected by a cong rule, as seen in the following example. +definition + "xfdc \ \(t :: cstate). ()" +lemma + "\x b. \snd x = b\ + \ ccorres_underlying rf_sr \ + (\a b. dc a b) (\a. xfdc a) dc xfdc + \ UNIV [SKIP] a c" + supply ccorres_weak_cong[cong] + apply clarify + oops\ definition "xfdc (t :: cstate) \ (0 :: nat)" -lemma xfdc_equal [simp]: +lemma xfdc_equal[simp]: "xfdc t = xfdc s" unfolding xfdc_def by simp @@ -275,10 +335,22 @@ lemma ccorres_return_C_errorE': apply simp done +lemma ccorres_return_void_C_Seq: + "ccorres_underlying sr \ r rvxf arrel xf P P' hs X return_void_C \ + ccorres_underlying sr \ r rvxf arrel xf P P' hs X (return_void_C ;; Z)" + apply (clarsimp simp: return_void_C_def) + apply (erule ccorres_semantic_equiv0[rotated]) + apply (rule semantic_equivI) + apply (clarsimp simp: exec_assoc[symmetric]) + apply (rule exec_Seq_cong, simp) + apply (rule iffI) + apply (auto elim!:exec_Normal_elim_cases intro: exec.Throw exec.Seq)[1] + apply (auto elim!:exec_Normal_elim_cases intro: exec.Throw) + done + context kernel begin - abbreviation "ccorres r xf \ ccorres_underlying rf_sr \ r xf r xf" @@ -655,7 +727,7 @@ lemma cte_C_cap_C_update: fixes val :: "cap_C" and ptr :: "cte_C ptr" assumes cl: "clift hp ptr = Some z" shows "(clift (hrs_mem_update (heap_update (Ptr &(ptr\[''cap_C''])) val) hp)) = - clift hp(ptr \ cte_C.cap_C_update (\_. val) z)" + (clift hp)(ptr \ cte_C.cap_C_update (\_. val) z)" using cl by (simp add: clift_field_update) @@ -826,7 +898,7 @@ lemma ccorres_sequence_x_while_genQ': \n. Suc n < length xs \ \F (n * j)\ xs ! n \\_. F (Suc n * j)\; i + length xs * j < 2 ^ len_of TYPE('c); \s f. xf (xf_update f s) = f (xf s) \ globals (xf_update f s) = globals s; j > 0 \ \ ccorres (\rv i'. i' = of_nat (i + length xs * of_nat j)) xf (\s. P 0 \ F 0 s) ({s. xf s = of_nat i} \ Q) hs - (NonDetMonad.sequence_x xs) + (Nondet_Monad.sequence_x xs) (While {s. P (xf s)} (body;; Basic (\s. xf_update (\_. xf s + of_nat j) s)))" apply (simp add: sequence_x_sequence liftM_def[symmetric] @@ -834,7 +906,7 @@ lemma ccorres_sequence_x_while_genQ': apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_while_genQ [where xf'=xfdc and r'=dc and xf_update=xf_update, simplified], - (simp add: dc_def)+) + (simp add: dc_def cong: ccorres_all_cong)+) done lemma ccorres_sequence_x_while_gen': @@ -845,7 +917,7 @@ lemma ccorres_sequence_x_while_gen': \n. Suc n < length xs \ \F (n * j)\ xs ! n \\_. F (Suc n * j)\; i + length xs * j < 2 ^ len_of TYPE('c); \s f. xf (xf_update f s) = f (xf s) \ globals (xf_update f s) = globals s; 0 < j \ \ ccorres (\rv i'. i' = of_nat (i + length xs * of_nat j)) xf (F 0) {s. xf s = of_nat i} hs - (NonDetMonad.sequence_x xs) + (Nondet_Monad.sequence_x xs) (While {s. P (xf s)} (body;; Basic (\s. xf_update (\_. xf s + of_nat j) s)))" apply (simp add: sequence_x_sequence liftM_def[symmetric] @@ -853,17 +925,13 @@ lemma ccorres_sequence_x_while_gen': apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_while_gen' [where xf'=xfdc and r'=dc and xf_update=xf_update, simplified], - (simp add: dc_def)+) + (simp add: dc_def cong: ccorres_all_cong)+) done lemma i_xf_for_sequence: "\s f. i_' (i_'_update f s) = f (i_' s) \ globals (i_'_update f s) = globals s" by simp -lemmas ccorres_sequence_x_while' - = ccorres_sequence_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_sequence_x_while_genQ: fixes xf :: "globals myvars \ ('c :: len) word" assumes one: "\n < length xs. ccorres dc xfdc (F (n * j) ) ({s. xf s = of_nat n * of_nat j} \ Q) hs (xs ! n) body" @@ -881,8 +949,9 @@ lemma ccorres_sequence_x_while_genQ: (While {s. P (xf s)} (body ;; Basic (\s. xf_update (\_. xf s + of_nat j) s))))" apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequence_x_while_genQ' [where i=0 and xf_update=xf_update and Q=Q, simplified]) - apply (simp add: assms hi[simplified])+ + apply (rule ccorres_rel_imp) + apply (rule ccorres_sequence_x_while_genQ' [where i=0 and xf_update=xf_update and Q=Q, simplified]) + apply (simp add: assms hi[simplified])+ apply (rule conseqPre, vcg) apply (clarsimp simp add: xf) apply (rule conseqPre, vcg) @@ -904,22 +973,15 @@ lemma ccorres_sequence_x_while_gen: (While {s. P (xf s)} (body ;; Basic (\s. xf_update (\_. xf s + of_nat j) s))))" apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequence_x_while_gen' [where i=0 and xf_update=xf_update, simplified]) - apply (simp add: assms hi[simplified])+ + apply (rule ccorres_rel_imp) + apply (rule ccorres_sequence_x_while_gen' [where i=0 and xf_update=xf_update, simplified]) + apply (simp add: assms hi[simplified])+ apply vcg apply (simp add: xf) apply vcg apply (simp add: xf rf_sr_def) done -lemmas ccorres_sequence_x_while - = ccorres_sequence_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - -lemmas ccorres_sequence_x_whileQ - = ccorres_sequence_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_mapM_x_while_gen: fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ ccorres dc xfdc (F (n * j)) {s. xf s = of_nat n * of_nat j} hs (f (xs ! n)) body" @@ -937,13 +999,9 @@ lemma ccorres_mapM_x_while_gen: unfolding mapM_x_def apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_x_while_gen[where xf_update=xf_update]) - apply (simp add: assms hi[simplified])+ + apply (simp add: assms hi[simplified])+ done -lemmas ccorres_mapM_x_while - = ccorres_mapM_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_mapM_x_while_genQ: fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ ccorres dc xfdc (F (n * j)) ({s. xf s = of_nat n * of_nat j} \ Q) hs (f (xs ! n)) body" @@ -963,13 +1021,9 @@ lemma ccorres_mapM_x_while_genQ: unfolding mapM_x_def apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_x_while_genQ[where xf_update=xf_update]) - apply (simp add: assms hi[simplified])+ + apply (simp add: assms hi[simplified])+ done -lemmas ccorres_mapM_x_whileQ - = ccorres_mapM_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_mapM_x_while_gen': fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ @@ -988,14 +1042,10 @@ lemma ccorres_mapM_x_while_gen': unfolding mapM_x_def apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_x_while_gen'[where xf_update=xf_update]) - apply (clarsimp simp only: length_map nth_map rl) - apply (simp add: assms hi[simplified])+ + apply (clarsimp simp only: length_map nth_map rl) + apply (simp add: assms hi[simplified])+ done -lemmas ccorres_mapM_x_while' - = ccorres_mapM_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_zipWithM_x_while_genQ: fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ n < length ys \ ccorres dc xfdc (F (n * j)) ({s. xf s = of_nat n * of_nat j} \ Q) @@ -1015,21 +1065,53 @@ lemma ccorres_zipWithM_x_while_genQ: (body ;; Basic (\s. xf_update (\_. xf s + of_nat j) s))))" unfolding zipWithM_x_def apply (rule ccorres_guard_imp) - apply (rule ccorres_rel_imp [OF ccorres_sequence_x_while_genQ[where F=F, OF _ _ _ _ _ xf j]], - simp_all add: length_zipWith) - apply (simp add: length_zipWith zipWith_nth) - apply (rule rl) - apply (rule guard) - apply (rule bodyi) - apply (simp add: zipWith_nth hi[simplified]) - apply (rule wb) + apply (rule ccorres_rel_imp [OF ccorres_sequence_x_while_genQ[where F=F, OF _ _ _ _ _ xf j]]; + simp) + apply (simp add: zipWith_nth) + apply (rule rl) + apply (rule guard) + apply (rule bodyi) + apply (simp add: zipWith_nth hi[simplified]) + apply (rule wb) + apply simp+ done +\ \Temporarily remove ccorres_weak_cong, so that the following lemmas can be constructed + with simplified return relations. + We do not use ccorres_all_cong due to it causing unexpected eta-expansion.\ +context +notes ccorres_weak_cong[cong del] +begin +lemmas ccorres_sequence_x_while' + = ccorres_sequence_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_sequence_x_while + = ccorres_sequence_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_sequence_x_whileQ + = ccorres_sequence_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_mapM_x_while + = ccorres_mapM_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_mapM_x_whileQ + = ccorres_mapM_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_mapM_x_while' + = ccorres_mapM_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + lemmas ccorres_zipWithM_x_while_gen = ccorres_zipWithM_x_while_genQ[where Q=UNIV, simplified] lemmas ccorres_zipWithM_x_while = ccorres_zipWithM_x_while_gen[OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, where j=1, simplified] +end end @@ -1114,7 +1196,7 @@ proof - apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_splitE) - apply (simp add: inl_rrel_inl_rrel) + apply simp apply (rule_tac ys="zs" in one'') apply simp apply (rule ceqv_refl) @@ -1141,7 +1223,8 @@ proof - qed thus ?thesis by (clarsimp simp: init_xs_def dest!: spec[where x=Nil] - elim!: ccorres_rel_imp2 inl_inrE) + elim!: ccorres_rel_imp2 inl_inrE + cong: ccorres_all_cong) qed lemma ccorres_sequenceE_while_down: @@ -1223,10 +1306,11 @@ lemma ccorres_sequenceE_while: Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp diff --git a/proof/crefine/lib/Ctac.thy b/proof/crefine/lib/Ctac.thy index 5a17cedf70..e5b71f8994 100644 --- a/proof/crefine/lib/Ctac.thy +++ b/proof/crefine/lib/Ctac.thy @@ -1755,7 +1755,7 @@ next apply (simp add: simpl_sequence_Cons sequenceE_Cons) apply (rule ccorres_guard_imp2) apply (rule ccorres_splitE) - apply (simp add: inl_rrel_inl_rrel) + apply simp apply (rule Cons.prems(1)[where zs=Nil, simplified]) apply (rule ceqv_refl) apply (simp add: liftME_def[symmetric] liftME_liftM) @@ -1809,7 +1809,7 @@ lemma mapME_x_simpl_sequence_fun_related: clarsimp elim!: inl_inrE) apply (erule_tac x="length zs" in meta_allE | erule_tac x="xs ! length zs" in meta_allE)+ - apply (simp add: dc_def) + apply (simp add: dc_def cong: ccorres_all_cong) done lemmas mapME_x_simpl_sequence_same @@ -1818,8 +1818,8 @@ lemmas mapME_x_simpl_sequence_same lemmas call_ignore_cong = refl[of "call i f g r" for i f g r] (* These could be done with ML patterns, but this fits in better with tactics *) -lemmas match_valid = trivial[of "NonDetMonad.valid P a P'" for P a P'] -lemmas match_validE = trivial[of "NonDetMonad.validE P a P' P''" for P a P' P''] +lemmas match_valid = trivial[of "Nondet_VCG.valid P a P'" for P a P'] +lemmas match_validE = trivial[of "Nondet_VCG.validE P a P' P''" for P a P' P''] lemmas match_hoare = trivial[of "HoarePartialDef.hoarep G T F P C P' A" for G T F P C P' A] lemmas match_all_hoare = trivial[of "\x. HoarePartialDef.hoarep G T F (P x) C (P' x) (A x)" for G T F P C P' A] lemmas match_xpres = trivial[of "xpres xf v \ c" for xf v \ c] @@ -1858,11 +1858,10 @@ method_setup ctac_print_xf = \CtacImpl.corres_print_xf\ "Print out what ctac thinks is the current xf" (* Set up wpc *) -lemma - wpc_helper_ccorres_final: - "ccorres_underlying sr G rv xf arrel axf Q Q' hs f f' - \ wpc_helper (P, P') (Q, Q') - (ccorres_underlying sr G rv xf arrel axf P P' hs f f')" +lemma wpc_helper_ccorres_final: + "ccorres_underlying sr G rv xf arrel axf Q Q'' hs f f' + \ wpc_helper (P, P', P'') (Q, Q', Q'') + (ccorres_underlying sr G rv xf arrel axf P P'' hs f f')" apply (clarsimp simp: wpc_helper_def) apply (erule ccorres_guard_imp) apply auto @@ -1870,14 +1869,15 @@ lemma wpc_setup "\m. ccorres_underlying sr G rv xf arrel axf P P' hs m conc" wpc_helper_ccorres_final wpc_setup "\m. ccorres_underlying sr G rv xf arrel axf P P' hs (m >>= a) conc" wpc_helper_ccorres_final +wpc_setup "\m. ccorres_underlying sr G rv xf arrel axf P P' hs (m >>=E a) conc" wpc_helper_ccorres_final context kernel begin (* Set up ctac proof sets. These are tried in reverse order (further down is tried first) *) -declare ccorres_Guard [corres_pre] -declare ccorres_Guard_Seq [corres_pre] +declare ccorres_Guard [ccorres_pre] +declare ccorres_Guard_Seq [ccorres_pre] lemma c_guard_field_abs: fixes p :: "'a :: mem_type ptr" @@ -1913,13 +1913,6 @@ lemmas ccorres_move_c_guards = ccorres_move_Guard[OF abs_c_guard_from_abs_h_t_valid] ccorres_move_Guard -lemma h_t_array_valid_array_assertion: - "h_t_array_valid htd ptr n \ 0 < n - \ array_assertion ptr n htd" - apply (simp add: array_assertion_def) - apply (fastforce intro: exI[where x=0]) - done - lemma array_assertion_abs_to_const: "\s s'. (s, s') \ rf_sr \ P s \ P' s' \ (Suc 0 = 0 \ array_assertion (ptr s s') (n s s') (htd s s')) @@ -1956,6 +1949,18 @@ lemmas ccorres_move_const_guards = ccorres_move_const_guard ccorres_move_const_guard[unfolded Collect_const] +lemma ccorres_prove_guard_direct: + "\ G; ccorres_underlying rf_sr Gamm rrel xf arrel axf P P' hs m (c) \ \ + ccorres_underlying rf_sr Gamm rrel xf arrel axf P P' hs m (Guard F \G\ c)" + by (rule ccorres_guard_imp, erule ccorres_move_const_guard; simp) + +lemma ccorres_prove_guard_seq: + "\ G; ccorres_underlying rf_sr Gamm rrel xf arrel axf P P' hs m (c;; d) \ \ + ccorres_underlying rf_sr Gamm rrel xf arrel axf P P' hs m (Guard F \G\ c;; d)" + by (rule ccorres_guard_imp, erule ccorres_move_const_guard; simp) + +lemmas ccorres_prove_guard = ccorres_prove_guard_direct ccorres_prove_guard_seq + lemma liftM_exs_valid: "\P\ m \\\rv. Q (f rv)\ \ \P\ liftM f m \\Q\" unfolding liftM_def exs_valid_def @@ -2038,6 +2043,7 @@ fun tac ctxt = ORELSE (resolve_tac ctxt [@{thm xpresI}] THEN' simp_tac (ctxt |> Splitter.del_split @{thm "if_split"})) 1 )) THEN simp_tac (put_simpset HOL_basic_ss ctxt addsimps @{thms com.case}) 1 + THEN no_name_eta_tac ctxt \ end diff --git a/proof/crefine/lib/ctac-method.ML b/proof/crefine/lib/ctac-method.ML index 71651db945..f2c65eee11 100644 --- a/proof/crefine/lib/ctac-method.ML +++ b/proof/crefine/lib/ctac-method.ML @@ -123,7 +123,7 @@ fun ceqv_simpl_seq ctxt = Config.get ctxt (fst ceqv_simpl_sequence_pair) val setup = Attrib.setup @{binding "corres"} (Attrib.add_del ctac_add ctac_del) "correspondence rules" - #> Attrib.setup @{binding "corres_pre"} + #> Attrib.setup @{binding "ccorres_pre"} (Attrib.add_del ctac_pre_add ctac_pre_del) "correspondence preprocessing rules" #> Attrib.setup @{binding "corres_post"} @@ -425,8 +425,8 @@ fun ceqv_restore_args_tac ctxt = SUBGOAL (fn (t, n) => case val proc = dest_Const i |> fst |> Long_Name.base_name val pinfo = Hoare.get_data ctxt |> #proc_info val params = Symtab.lookup pinfo proc |> the |> #params - |> filter (fn (v, _) => v = HoarePackage.In) - val new_upds = map (snd #> suffix Record.updateN #> Syntax.read_term ctxt + |> filter (fn (v, _, _) => v = HoarePackage.In) + val new_upds = map (#2 #> suffix Record.updateN #> Syntax.read_term ctxt #> dest_Const #> fst) params |> filter_out (member (op =) cnames) @@ -1107,7 +1107,9 @@ fun shorten_names mp = mp -- Shorten_Names.shorten_names_preserve_new >> MethodExtras.then_all_new val corres_ctac_tactic = let - fun tac upds ctxt = Method.SIMPLE_METHOD' (corres_ctac (apply upds default_ctac_opts) ctxt); + fun tac upds ctxt + = Method.SIMPLE_METHOD' (corres_ctac (apply upds default_ctac_opts) ctxt + THEN_ALL_NEW (fn _ => no_name_eta_tac ctxt)); val option_args = Args.parens (P.list (Scan.first ctac_options)) val opt_option_args = Scan.lift (Scan.optional option_args []) @@ -1133,7 +1135,9 @@ val corres_abstract_args = corres_pre_abstract_args corres_pre_lift_tac_clift; val corres_abstract_init_args = corres_pre_abstract_args corres_pre_lift_tac_cinit; val corres_symb_rhs = let - fun tac upds ctxt = Method.SIMPLE_METHOD' (corres_symb_rhs_tac (apply upds default_csymbr_opts) ctxt); + fun tac upds ctxt + = Method.SIMPLE_METHOD' (corres_symb_rhs_tac (apply upds default_csymbr_opts) ctxt + THEN_ALL_NEW (fn _ => no_name_eta_tac ctxt)); val option_args = Args.parens (P.list (Scan.first csymbr_options)) val opt_option_args = Scan.lift (Scan.optional option_args []) @@ -1143,7 +1147,8 @@ in end; val corres_ceqv = let - fun tac upds ctxt = Method.SIMPLE_METHOD' (corres_solve_ceqv (#trace (apply upds default_ceqv_opts)) 0 ctxt); + fun tac upds ctxt + = Method.SIMPLE_METHOD' (corres_solve_ceqv (#trace (apply upds default_ceqv_opts)) 0 ctxt); val option_args = Args.parens (P.list (Scan.first ceqv_options)) val opt_option_args = Scan.lift (Scan.optional option_args []) @@ -1156,7 +1161,8 @@ end; * We should be able to get the xfs from the goal ... *) fun corres_boilerplate unfold_haskell_p = let fun tac (upds, xfs : string list) ctxt - = Method.SIMPLE_METHOD' (corres_boilerplate_tac (apply upds default_cinit_opts) unfold_haskell_p xfs ctxt) + = Method.SIMPLE_METHOD' (corres_boilerplate_tac (apply upds default_cinit_opts) unfold_haskell_p xfs ctxt + THEN_ALL_NEW (fn _ => no_name_eta_tac ctxt)) val var_lift_args = Args.$$$ liftoptN |-- Args.colon |-- Scan.repeat (Scan.unless (Scan.first boilerplate_modifiers) Args.name) diff --git a/proof/drefine/Arch_DR.thy b/proof/drefine/Arch_DR.thy index 3972822eb8..f334eb3721 100644 --- a/proof/drefine/Arch_DR.thy +++ b/proof/drefine/Arch_DR.thy @@ -363,7 +363,7 @@ proof - apply (clarsimp simp add: corres_alternate2 split: ARM_A.pde.split) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: transform_pt_slot_ref_def all_pd_pt_slots_def opt_object_page_directory @@ -409,7 +409,7 @@ proof - apply (rename_tac word1 set word2) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: pd_aligned obj_at_def lookup_pd_slot_pd a_type_simps) @@ -458,7 +458,7 @@ proof - lookup_error_injection dc_def[symmetric]) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: transform_pde_def obj_at_def opt_object_page_directory @@ -477,7 +477,7 @@ proof - lookup_error_injection dc_def[symmetric]) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: transform_pde_def obj_at_def opt_object_page_directory @@ -557,7 +557,6 @@ lemma select_ret_or_throw_twiceE: done crunch inv[wp]: select_ret_or_throw "P" - (wp: select_wp) lemma corres_initial_bindE_rdonly_select_ret_or_throw: assumes y: "\rv'. corres_underlying sr nf nf' (e \ r) P P' (select_ret_or_throw S X) (d rv')" @@ -659,7 +658,7 @@ proof (induct x) apply (rule ucast_up_inj[where 'b=32]) apply (simp add: ucast_ucast_mask is_aligned_mask asid_low_bits_def) apply simp - apply (wp select_wp | simp add:valid_cap_def split del: if_split)+ + apply (wp | simp add:valid_cap_def split del: if_split)+ done next case ASIDControlCap @@ -737,7 +736,7 @@ next apply (rule less_trans) apply simp apply simp - apply (wp lsfco_not_idle select_inv select_wp | simp)+ + apply (wp lsfco_not_idle select_inv | simp)+ apply (simp add: cte_wp_at_caps_of_state neq_Nil_conv invs_mdb_cte mdb_cte_at_rewrite) apply auto done @@ -782,8 +781,9 @@ next in corres_alternative_throw_splitE[OF _ _ returnOk_wp[where x="()"], simplified]) apply (rule corres_from_rdonly, simp_all)[1] apply (wp+ | simp)+ - apply (rule hoare_strengthen_post, rule hoare_post_taut) - apply (case_tac r, auto simp add: in_monad)[1] + apply (rule hoare_strengthen_post, rule hoare_TrueI) + apply (rename_tac rv s) + apply (case_tac rv, auto simp add: in_monad)[1] apply (simp add: corres_whenE_throwError_split_rhs corres_alternate2 check_vp_alignment_def unlessE_whenE) apply (clarsimp simp add: liftE_bindE[symmetric]) @@ -821,8 +821,9 @@ next in corres_alternative_throw_splitE[OF _ _ returnOk_wp[where x="()"], simplified]) apply (rule corres_from_rdonly, simp_all)[1] apply (wp+ | simp)+ - apply (rule hoare_strengthen_post, rule hoare_post_taut) - apply (case_tac r, auto simp add: in_monad)[1] + apply (rule hoare_strengthen_post, rule hoare_TrueI) + apply (rename_tac rv s) + apply (case_tac rv, auto simp add: in_monad)[1] apply (simp add: corres_whenE_throwError_split_rhs corres_alternate2 check_vp_alignment_def unlessE_whenE) apply (clarsimp simp add: liftE_bindE[symmetric]) @@ -934,8 +935,9 @@ next for I in corres_alternative_throw_splitE[OF _ _ returnOk_wp[where x="()"], simplified]) apply (rule corres_from_rdonly, simp_all)[1] apply (wp | simp)+ - apply (rule hoare_strengthen_post, rule hoare_post_taut) - apply (case_tac r, auto simp add: in_monad)[1] + apply (rule hoare_strengthen_post, rule hoare_TrueI) + apply (rename_tac rv s) + apply (case_tac rv, auto simp add: in_monad)[1] apply (simp add: corres_whenE_throwError_split_rhs corres_alternate2 check_vp_alignment_def unlessE_whenE) apply clarsimp @@ -945,7 +947,7 @@ next corres_alternate2) apply (rule corres_alternate1) apply (rule corres_from_rdonly,simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def, wp) apply (clarsimp simp: in_monad select_def arch_invocation_relation_def translate_arch_invocation_def transform_page_table_inv_def @@ -961,7 +963,7 @@ next apply (simp add: pd_shifting_dual ucast_nat_def shiftr_20_less triple_shift_fun le_shiftr linorder_not_le) apply (rule hoare_pre, wp, auto)[1] - apply (wp | simp)+ + apply (wp weak_if_wp | simp)+ apply (clarsimp simp: is_final_cap'_def is_final_cap_def split:list.splits) apply (simp add: liftE_bindE is_final_cap_def corres_symb_exec_in_gets @@ -1099,10 +1101,10 @@ lemma set_cap_opt_cap': "\\s. P ((\p. opt_cap p s) (slot \ cap))\ KHeap_D.set_cap slot cap \\rv s. P (\p. opt_cap p s)\" apply (cases slot) apply (clarsimp simp add:KHeap_D.set_cap_def split_def) - apply (rule hoare_seq_ext [OF _ dget_object_sp]) + apply (rule bind_wp [OF _ dget_object_sp]) apply (case_tac obj; simp add: KHeap_D.set_object_def has_slots_def update_slots_def object_slots_def split del: if_split cong: if_cong bind_cong; - wpsimp wp: select_wp) + wpsimp) by (auto elim!:rsubst[where P=P] simp: opt_cap_def slots_of_def object_slots_def) lemma set_cap_opt_cap: @@ -1203,7 +1205,7 @@ lemma invoke_page_table_corres: apply clarsimp apply (wp store_pte_cte_wp_at) apply fastforce - apply (wp hoare_post_taut)+ + apply wpsimp+ apply (rule_tac Q="\rv s. invs s \ valid_etcbs s \ a \ idle_thread s \ cte_wp_at \ (a,b) s \ caps_of_state s' = caps_of_state s" in hoare_strengthen_post) apply wp @@ -1585,23 +1587,20 @@ lemma valid_etcbs_clear_um_detype: by (clarsimp simp: valid_etcbs_def st_tcb_at_def is_etcb_at_def st_tcb_at_kh_def obj_at_kh_def obj_at_def detype_def detype_ext_def clear_um_def) - lemma unat_map_upd: - "unat_map (Some \ transform_asid_table_entry \ arm_asid_table - as (asid_high_bits_of base \ frame)) = - unat_map (Some \ transform_asid_table_entry \ arm_asid_table as) - (unat (asid_high_bits_of base) \ AsidPoolCap frame 0)" + "unat_map (Some \ transform_asid_table_entry \ (asid_table as)(asid_high_bits_of base \ frame)) = + (unat_map (Some \ transform_asid_table_entry \ asid_table as)) + (unat (asid_high_bits_of base) \ AsidPoolCap frame 0)" apply (rule ext) - apply (clarsimp simp:unat_map_def asid_high_bits_of_def - transform_asid_table_entry_def) + apply (clarsimp simp:unat_map_def asid_high_bits_of_def transform_asid_table_entry_def) apply (intro impI conjI) apply (subgoal_tac "x<256") - apply (clarsimp simp:unat_map_def asid_high_bits_of_def asid_low_bits_def - transform_asid_table_entry_def transform_asid_def) + apply (clarsimp simp: unat_map_def asid_high_bits_of_def asid_low_bits_def + transform_asid_table_entry_def transform_asid_def) apply (drule_tac x="of_nat x" in unat_cong) apply (subst (asm) word_unat.Abs_inverse) apply (clarsimp simp:unats_def unat_ucast)+ -done + done declare descendants_of_empty[simp] diff --git a/proof/drefine/CNode_DR.thy b/proof/drefine/CNode_DR.thy index 781aafe733..87e0fd58be 100644 --- a/proof/drefine/CNode_DR.thy +++ b/proof/drefine/CNode_DR.thy @@ -121,7 +121,7 @@ lemma dcorres_opt_parent_set_parent_helper: "dcorres dc \ P (gets (opt_parent (transform_cslot_ptr src)) >>= case_option (return ()) - (\parent. modify (\s. s\cdl_cdt := cdl_cdt s(transform_cslot_ptr child \ parent)\))) + (\parent. modify (\s. s\cdl_cdt := (cdl_cdt s)(transform_cslot_ptr child \ parent)\))) g \ dcorres dc \ (\s. cdt s child = None \ cte_at child s \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s) \ P s) @@ -143,7 +143,7 @@ lemma dcorres_opt_parent_set_parent_helper: lemma dcorres_set_parent_helper: "dcorres dc \ P - (modify (\s. s\cdl_cdt := cdl_cdt s(transform_cslot_ptr child \ parent)\)) + (modify (\s. s\cdl_cdt := (cdl_cdt s)(transform_cslot_ptr child \ parent)\)) g \ dcorres dc \ (\s. cdt s child = None \ cte_at child s \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s) \ P s) @@ -218,7 +218,7 @@ lemma insert_cap_sibling_corres: apply (rule_tac s=s' in transform_cdt_slot_inj_on_cte_at[where P=\]) apply (auto simp: swp_def dest: mdb_cte_atD elim!: ranE)[1] - apply ((wp set_cap_caps_of_state2 get_cap_wp static_imp_wp + apply ((wp set_cap_caps_of_state2 get_cap_wp hoare_weak_lift_imp | simp add: swp_def cte_wp_at_caps_of_state)+) apply (wp set_cap_idle | simp add:set_untyped_cap_as_full_def split del: if_split)+ @@ -231,7 +231,7 @@ lemma insert_cap_sibling_corres: cte_wp_at_caps_of_state has_parent_cte_at is_physical_def dest!:is_untyped_cap_eqD) apply fastforce - apply (wp get_cap_wp set_cap_idle static_imp_wp + apply (wp get_cap_wp set_cap_idle hoare_weak_lift_imp | simp add:set_untyped_cap_as_full_def split del: if_split)+ apply (rule_tac Q = "\r s. cdt s sibling = None @@ -244,8 +244,7 @@ lemma insert_cap_sibling_corres: cte_wp_at_caps_of_state has_parent_cte_at is_physical_def dest!:is_untyped_cap_eqD) apply fastforce - apply (wp get_cap_wp set_cap_idle | simp)+ - apply clarsimp + apply (wpsimp wp: get_cap_wp set_cap_idle)+ apply (clarsimp simp: not_idle_thread_def) apply (clarsimp simp: caps_of_state_transform_opt_cap cte_wp_at_caps_of_state transform_cap_def) @@ -303,7 +302,7 @@ lemma insert_cap_child_corres: apply (rule_tac s=s' in transform_cdt_slot_inj_on_cte_at[where P=\]) apply (auto simp: swp_def dest: mdb_cte_atD elim!: ranE)[1] - apply (wp set_cap_caps_of_state2 get_cap_wp static_imp_wp + apply (wp set_cap_caps_of_state2 get_cap_wp hoare_weak_lift_imp | simp add: swp_def cte_wp_at_caps_of_state)+ apply (wp set_cap_idle | simp add:set_untyped_cap_as_full_def split del:if_split)+ @@ -314,14 +313,14 @@ lemma insert_cap_child_corres: apply (wp set_cap_mdb_cte_at | simp add:not_idle_thread_def)+ apply (clarsimp simp:mdb_cte_at_def cte_wp_at_caps_of_state) apply fastforce - apply (wp get_cap_wp set_cap_idle static_imp_wp + apply (wp get_cap_wp set_cap_idle hoare_weak_lift_imp | simp split del:if_split add:set_untyped_cap_as_full_def)+ apply (rule_tac Q = "\r s. not_idle_thread (fst child) s \ (\cap. caps_of_state s src = Some cap) \ should_be_parent_of src_capa (is_original_cap s src) cap (cap_insert_dest_original cap src_capa) \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" in hoare_strengthen_post) - apply (wp set_cap_mdb_cte_at static_imp_wp | simp add:not_idle_thread_def)+ + apply (wp set_cap_mdb_cte_at hoare_weak_lift_imp | simp add:not_idle_thread_def)+ apply (clarsimp simp:mdb_cte_at_def cte_wp_at_caps_of_state) apply fastforce apply clarsimp @@ -700,7 +699,7 @@ lemma cap_revoke_corres_helper: show ?case supply if_cong[cong] apply (subst cap_revoke.simps) - apply (rule monadic_rewrite_corres2[where P =\,simplified]) + apply (rule monadic_rewrite_corres_l[where P =\,simplified]) apply (rule Finalise_DR.monadic_trancl_preemptible_step) apply (rule dcorres_expand_pfx) apply (clarsimp simp:liftE_bindE) @@ -719,7 +718,7 @@ lemma cap_revoke_corres_helper: apply (clarsimp simp:empty_set_eq)+ apply (clarsimp simp:returnOk_def lift_def) apply (rule corres_guard_imp) - apply (rule monadic_rewrite_corres2[where P=\ ,simplified]) + apply (rule monadic_rewrite_corres_l[where P=\ ,simplified]) apply (rule monadic_trancl_preemptible_return) apply (rule corres_trivial) apply (clarsimp simp:returnOk_def boolean_exception_def)+ @@ -734,7 +733,7 @@ lemma cap_revoke_corres_helper: apply simp+ apply (clarsimp simp: lift_def empty_set_eq)+ apply (rule corres_guard_imp) - apply (rule monadic_rewrite_corres2[where P=\ ,simplified]) + apply (rule monadic_rewrite_corres_l[where P=\ ,simplified]) apply (rule monadic_trancl_preemptible_return) apply (rule corres_trivial) apply (clarsimp simp:returnOk_def boolean_exception_def)+ @@ -756,7 +755,7 @@ lemma cap_revoke_corres_helper: apply (erule cte_wp_at_weakenE, simp) apply (simp,blast) apply simp+ - apply (wp select_wp,(clarsimp simp: select_ext_def in_monad)+) + apply (wp, (clarsimp simp: select_ext_def in_monad)+) apply (rule dcorres_expand_pfx) apply (rule_tac r'="\cap cap'. cap = transform_cap cap'" and Q ="\r. \" and Q'="\r s. cte_wp_at (\x. x = r) (aa,ba) s \ s = sfix" in corres_split_forwards') @@ -792,9 +791,10 @@ lemma cap_revoke_corres_helper: in corres_split_forwards') apply (rule corres_guard_imp[OF corres_trivial[OF preemption_corres]]) apply simp+ - apply (rule alternative_valid) - apply (simp add:valid_def throwError_def return_def) - apply (simp add:valid_def returnOk_def return_def) + apply wp + apply (simp add:valid_def throwError_def return_def) + apply (simp add:valid_def returnOk_def return_def) + apply fastforce apply (clarsimp simp: valid_def) apply clarsimp apply (case_tac rva) @@ -877,28 +877,28 @@ lemma corres_mapM_to_mapM_x: by (simp add: mapM_x_mapM liftM_def[symmetric]) lemma ep_waiting_set_recv_upd_kh: - "ep_at epptr s \ (ep_waiting_set_recv epptr (update_kheap (kheap s(epptr \ kernel_object.Endpoint X)) s)) + "ep_at epptr s \ (ep_waiting_set_recv epptr (update_kheap ((kheap s)(epptr \ kernel_object.Endpoint X)) s)) = (ep_waiting_set_recv epptr s)" apply (rule set_eqI) apply (clarsimp simp:ep_waiting_set_recv_def obj_at_def is_ep_def) done lemma ep_waiting_set_send_upd_kh: - "ep_at epptr s \ (ep_waiting_set_send epptr (update_kheap (kheap s(epptr \ kernel_object.Endpoint X)) s)) + "ep_at epptr s \ (ep_waiting_set_send epptr (update_kheap ((kheap s)(epptr \ kernel_object.Endpoint X)) s)) = (ep_waiting_set_send epptr s)" apply (rule set_eqI) apply (clarsimp simp:ep_waiting_set_send_def obj_at_def is_ep_def) done lemma ntfn_waiting_set_upd_kh: - "ep_at epptr s \ (ntfn_waiting_set epptr (update_kheap (kheap s(epptr \ kernel_object.Endpoint X)) s)) + "ep_at epptr s \ (ntfn_waiting_set epptr (update_kheap ((kheap s)(epptr \ kernel_object.Endpoint X)) s)) = (ntfn_waiting_set epptr s)" apply (rule set_eqI) apply (clarsimp simp:ntfn_waiting_set_def obj_at_def is_ep_def) done lemma dcorres_ep_cancel_badge_sends: - notes hoare_post_taut[wp] + notes hoare_TrueI[wp] shows "dcorres dc \ (valid_state and valid_etcbs) (CSpace_D.cancel_badged_sends epptr word2) @@ -1346,7 +1346,7 @@ next show ?case apply (clarsimp simp:mapM_Cons) apply (subst do_machine_op_bind) - apply (clarsimp simp:ef_storeWord)+ + apply (clarsimp simp:ef_storeWord empty_fail_cond)+ apply (subst corrupt_frame_duplicate[symmetric]) apply (rule corres_guard_imp) apply (rule corres_split) @@ -1569,7 +1569,7 @@ lemma copy_global_mappings_dwp: apply (rule_tac Q = "\r s. valid_idle s \ transform s = cs" in hoare_strengthen_post) apply (rule mapM_x_wp') apply wp - apply (rule_tac Q="\s. valid_idle s \ transform s = cs" in hoare_vcg_precond_imp) + apply (rule_tac Q="\s. valid_idle s \ transform s = cs" in hoare_weaken_pre) apply (rule dcorres_to_wp) apply (rule corres_guard_imp[OF store_pde_set_cap_corres]) apply (clarsimp simp:kernel_mapping_slots_def) @@ -1779,7 +1779,7 @@ lemma thread_set_valid_idle: apply (simp add: thread_set_def not_idle_thread_def) apply (simp add: gets_the_def valid_idle_def) apply wp - apply (rule_tac Q="not_idle_thread thread and valid_idle" in hoare_vcg_precond_imp) + apply (rule_tac Q="not_idle_thread thread and valid_idle" in hoare_weaken_pre) apply (clarsimp simp: KHeap_A.set_object_def get_object_def in_monad get_def put_def bind_def obj_at_def return_def valid_def not_idle_thread_def valid_idle_def pred_tcb_at_def) apply simp+ @@ -1946,7 +1946,7 @@ context notes if_cong[cong] begin crunch valid_etcbs[wp]: cancel_badged_sends valid_etcbs -(wp: mapM_x_wp hoare_drop_imps hoare_unless_wp ignore: filterM) +(wp: mapM_x_wp hoare_drop_imps unless_wp ignore: filterM) end lemma cap_revoke_valid_etcbs[wp]: @@ -2090,7 +2090,7 @@ lemma decode_cnode_error_corres: apply (rule corres_symb_exec_r_dcE, wp) apply (rule corres_symb_exec_r_dcE, wp) apply (rule corres_symb_exec_r_dcE) - apply (rule hoare_pre, wp hoare_whenE_wp) + apply (rule hoare_pre, wp whenE_wp) apply simp apply (rule corres_trivial) apply (simp split: gen_invocation_labels.split invocation_label.split list.split) @@ -2319,7 +2319,7 @@ lemma lsfco_not_idle: "\valid_objs and valid_cap cap and valid_idle\ CSpace_A.lookup_slot_for_cnode_op b cap idx depth \\rv. not_idle_thread (fst rv)\, -" - apply (rule_tac Q'="\rv. real_cte_at rv and valid_idle" in hoare_post_imp_R) + apply (rule_tac Q'="\rv. real_cte_at rv and valid_idle" in hoare_strengthen_postE_R) apply (rule hoare_pre, wp) apply simp apply (clarsimp simp: obj_at_def not_idle_thread_def valid_idle_def @@ -2481,7 +2481,7 @@ lemma decode_cnode_corres: apply (rule dcorres_returnOk) apply (simp add:translate_cnode_invocation_def) apply wp+ - apply (rule hoare_post_imp_R[OF validE_validE_R]) + apply (rule hoare_strengthen_postE_R[OF validE_validE_R]) apply (rule hoareE_TrueI[where P = \]) apply (wp|simp)+ apply (strengthen mask_cap_valid) @@ -2648,17 +2648,13 @@ lemma decode_cnode_corres: apply simp apply (rule dcorres_returnOk) apply (simp add:translate_cnode_invocation_def) - apply (wp get_cap_wp hoare_whenE_wp|clarsimp)+ - apply (rule hoare_post_imp_R[OF validE_validE_R]) - apply (rule hoareE_TrueI[where P = \]) - apply fastforce - apply (wp hoare_drop_imp|simp)+ + apply (wp get_cap_wp | simp)+ apply (rule_tac Q'="\r. real_cte_at src_slota and valid_objs and real_cte_at dest_slota and valid_idle and not_idle_thread (fst src_slota) and not_idle_thread (fst dest_slota) and not_idle_thread (fst r) and valid_etcbs" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp lsfco_not_idle) apply (clarsimp simp:Invariants_AI.cte_wp_valid_cap) apply (wp lsfco_not_idle)+ diff --git a/proof/drefine/Corres_D.thy b/proof/drefine/Corres_D.thy index 17afc63fad..979907b6c1 100644 --- a/proof/drefine/Corres_D.thy +++ b/proof/drefine/Corres_D.thy @@ -84,7 +84,7 @@ lemma corres_free_return: lemma corres_free_set_object: "\ \ s s'. s = transform s' \ P s \ P' s' \ - s = transform ((\s. s \kheap := kheap s (ptr \ obj)\) s')\ \ + s = transform ((\s. s \kheap := (kheap s)(ptr \ obj)\) s')\ \ dcorres dc P P' (return a) (set_object ptr obj )" by (clarsimp simp: corres_underlying_def put_def return_def modify_def bind_def get_def set_object_def get_object_def in_monad) @@ -244,7 +244,7 @@ lemma dcorres_gets_the: lemma wpc_helper_dcorres: "dcorres r Q Q' f f' - \ wpc_helper (P, P') (Q, {s. Q' s}) (dcorres r P (\s. s \ P') f f')" + \ wpc_helper (P, P', P'') (Q, Q', Q'') (dcorres r P P' f f')" apply (clarsimp simp: wpc_helper_def) apply (erule corres_guard_imp) apply simp @@ -273,7 +273,7 @@ lemma hoare_mapM_idempotent: "\ \ a R. \ R \ x a \< apply atomize apply (erule_tac x=a in allE) apply (erule_tac x=R in allE) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply wp apply assumption done @@ -633,6 +633,7 @@ lemma dcorres_rhs_noop_above: "\ dcorres anyrel P P' (return ()) m; dcor lemmas dcorres_rhs_noop_below_True = dcorres_rhs_noop_below[OF _ _ hoare_TrueI hoare_TrueI] lemmas dcorres_rhs_noop_above_True = dcorres_rhs_noop_above[OF _ _ hoare_TrueI hoare_TrueI] +\ \FIXME: remove\ declare hoare_TrueI[simp] lemma dcorres_dc_rhs_noop_below_gen: diff --git a/proof/drefine/Finalise_DR.thy b/proof/drefine/Finalise_DR.thy index 86b8abd7a8..bd137e2c90 100644 --- a/proof/drefine/Finalise_DR.thy +++ b/proof/drefine/Finalise_DR.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -86,9 +87,9 @@ lemma dcorres_unmap_page_empty: apply (rule corres_symb_exec_l) apply (rule corres_guard_imp) apply (rule_tac x = "[]" in select_pick_corres) - apply (clarsimp simp:mapM_x_def sequence_x_def del:hoare_post_taut) + apply (clarsimp simp:mapM_x_def sequence_x_def del:hoare_TrueI) prefer 4 - apply (rule hoare_post_taut) + apply (rule hoare_TrueI) apply simp_all apply (simp add:exs_valid_def slots_with_def gets_def has_slots_def get_def bind_def return_def ) done @@ -209,13 +210,14 @@ lemma delete_cap_one_shrink_descendants: apply (clarsimp simp add:empty_slot_def) apply (wp dxo_wp_weak) apply simp - apply (rule_tac P="\s. valid_mdb s \ cdt s = xa \ cdt pres = xa \ slot \ CSpaceAcc_A.descendants_of p (cdt s) - \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" - in hoare_vcg_precond_imp) + apply (rename_tac slot_p cdt') + apply (rule_tac P="\s. valid_mdb s \ cdt s = cdt' \ cdt pres = cdt' \ slot \ CSpaceAcc_A.descendants_of p (cdt s) + \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" + in hoare_weaken_pre) apply (rule_tac Q ="\r s. Q r s \ (mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s))" for Q in hoare_strengthen_post) apply (rule hoare_vcg_conj_lift) apply (rule delete_cdt_slot_shrink_descendants[where y= "cdt pres" and p = p]) - apply (rule_tac Q="\s. mdb_cte_at (swp (cte_wp_at ((\)cap.NullCap)) s ) xa" in hoare_vcg_precond_imp) + apply (rule_tac Q="\s. mdb_cte_at (swp (cte_wp_at ((\)cap.NullCap)) s ) cdt'" in hoare_weaken_pre) apply (case_tac slot) apply (clarsimp simp:set_cdt_def get_def put_def bind_def valid_def mdb_cte_at_def) apply (assumption) @@ -244,7 +246,7 @@ lemma delete_cap_one_shrink_descendants: apply (drule descendants_not_null_cap) apply simp apply (clarsimp simp:cte_wp_at_def) -done + done lemma invs_emptyable_descendants: "\invs s;CSpaceAcc_A.descendants_of slot (cdt s) = {(a, b)}\ @@ -484,8 +486,8 @@ lemma dcorres_deleting_irq_handler: apply (rule corres_guard_imp) apply (rule corres_split[OF dcorres_get_irq_slot]) apply (simp, rule delete_cap_simple_corres,simp) - apply (rule hoare_vcg_precond_imp [where Q="invs and valid_etcbs"]) - including no_pre + apply (rule hoare_weaken_pre [where Q="invs and valid_etcbs"]) + including classic_wp_pre apply (wpsimp simp:get_irq_slot_def)+ apply (rule irq_node_image_not_idle) apply (simp add:invs_def valid_state_def)+ @@ -541,7 +543,7 @@ lemma flush_space_dwp[wp]: apply (clarsimp split:option.splits) apply (rule do_machine_op_wp) apply clarsimp - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (rule do_machine_op_wp) apply clarsimp apply wp @@ -649,7 +651,7 @@ lemma opt_object_asid_pool: lemma transform_asid_pool_contents_upd: "transform_asid_pool_contents (pool(ucast asid := pd)) = - transform_asid_pool_contents pool(snd (transform_asid asid) \ transform_asid_pool_entry pd)" + (transform_asid_pool_contents pool)(snd (transform_asid asid) \ transform_asid_pool_entry pd)" apply (clarsimp simp:transform_asid_pool_contents_def transform_asid_def) apply (rule ext) apply (case_tac x) @@ -697,7 +699,7 @@ lemma dcorres_set_vm_root: apply (wp do_machine_op_wp | clarsimp)+ apply (rule_tac Q = "\_ s. transform s = cs" in hoare_post_imp) apply simp - apply (wpsimp wp: hoare_whenE_wp do_machine_op_wp [OF allI] hoare_drop_imps find_pd_for_asid_inv + apply (wpsimp wp: whenE_wp do_machine_op_wp [OF allI] hoare_drop_imps find_pd_for_asid_inv simp: arm_context_switch_def get_hw_asid_def load_hw_asid_def if_apply_def2)+ done @@ -1147,7 +1149,7 @@ lemma dcorres_delete_cap_simple_set_pt: lemma transform_page_table_contents_upd: - "transform_page_table_contents fun(unat (y && mask pt_bits >> 2) \ transform_pte pte) = + "(transform_page_table_contents fun)(unat (y && mask pt_bits >> 2) \ transform_pte pte) = transform_page_table_contents (fun(ucast ((y::word32) && mask pt_bits >> 2) := pte))" apply (rule ext) apply (clarsimp simp: transform_page_table_contents_def unat_map_def) @@ -1166,7 +1168,7 @@ lemma transform_page_table_contents_upd: lemma transform_page_directory_contents_upd: "ucast ((ptr::word32) && mask pd_bits >> 2) \ kernel_mapping_slots - \ transform_page_directory_contents f(unat (ptr && mask pd_bits >> 2) \ transform_pde a_pde) + \ (transform_page_directory_contents f)(unat (ptr && mask pd_bits >> 2) \ transform_pde a_pde) = transform_page_directory_contents (f(ucast (ptr && mask pd_bits >> 2) := a_pde))" apply (rule ext) apply (simp (no_asm) add: transform_page_directory_contents_def unat_map_def) @@ -1401,11 +1403,11 @@ lemma remain_pt_pd_relation: apply (subgoal_tac "ptr\ y") apply (simp add: store_pte_def) apply wp - apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageTable x)) (ptr && ~~ mask pt_bits) - and pt_page_relation (y && ~~ mask pt_bits) pg_id y S" in hoare_vcg_precond_imp) + apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageTable rv)) (ptr && ~~ mask pt_bits) + and pt_page_relation (y && ~~ mask pt_bits) pg_id y S" in hoare_weaken_pre) apply (clarsimp simp: set_pt_def) - apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageTable x)) (ptr && ~~ mask pt_bits) - and pt_page_relation (y && ~~ mask pt_bits) pg_id y S" in hoare_vcg_precond_imp) + apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageTable rv)) (ptr && ~~ mask pt_bits) + and pt_page_relation (y && ~~ mask pt_bits) pg_id y S" in hoare_weaken_pre) apply (clarsimp simp: valid_def set_object_def get_object_def in_monad) apply (drule_tac x= y in bspec,simp) apply (clarsimp simp: pt_page_relation_def dest!: ucast_inj_mask| rule conjI)+ @@ -1425,11 +1427,11 @@ lemma remain_pd_section_relation: \\r s. pd_section_relation (y && ~~ mask pd_bits) sid y s\" apply (simp add: store_pde_def) apply wp - apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory x)) (ptr && ~~ mask pd_bits) - and pd_section_relation (y && ~~ mask pd_bits) sid y " in hoare_vcg_precond_imp) + apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory rv)) (ptr && ~~ mask pd_bits) + and pd_section_relation (y && ~~ mask pd_bits) sid y " in hoare_weaken_pre) apply (clarsimp simp: set_pd_def) - apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory x)) (ptr && ~~ mask pd_bits) - and pd_section_relation (y && ~~ mask pd_bits) sid y " in hoare_vcg_precond_imp) + apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory rv)) (ptr && ~~ mask pd_bits) + and pd_section_relation (y && ~~ mask pd_bits) sid y " in hoare_weaken_pre) apply (clarsimp simp: valid_def set_object_def get_object_def in_monad) apply (clarsimp simp: pd_section_relation_def dest!: ucast_inj_mask | rule conjI)+ apply (drule mask_compare_imply) @@ -1447,11 +1449,11 @@ lemma remain_pd_super_section_relation: \\r s. pd_super_section_relation (y && ~~ mask pd_bits) sid y s\" apply (simp add: store_pde_def) apply wp - apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory x)) (ptr && ~~ mask pd_bits) - and pd_super_section_relation (y && ~~ mask pd_bits) sid y " in hoare_vcg_precond_imp) + apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory rv)) (ptr && ~~ mask pd_bits) + and pd_super_section_relation (y && ~~ mask pd_bits) sid y " in hoare_weaken_pre) apply (clarsimp simp: set_pd_def) - apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory x)) (ptr && ~~ mask pd_bits) - and pd_super_section_relation (y && ~~ mask pd_bits) sid y " in hoare_vcg_precond_imp) + apply (rule_tac Q = "ko_at (ArchObj (arch_kernel_obj.PageDirectory rv)) (ptr && ~~ mask pd_bits) + and pd_super_section_relation (y && ~~ mask pd_bits) sid y " in hoare_weaken_pre) apply (clarsimp simp: valid_def set_object_def get_object_def in_monad) apply (clarsimp simp: pd_super_section_relation_def dest!: ucast_inj_mask | rule conjI)+ apply (drule mask_compare_imply) @@ -1984,7 +1986,7 @@ lemma check_mapping_pptr_section_relation: "\\\ check_mapping_pptr w ARMSection (Inr (lookup_pd_slot rv' b)) \\rv s. rv \ pd_section_relation (lookup_pd_slot rv' b && ~~ mask pd_bits) w (lookup_pd_slot rv' b) s\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (simp add:check_mapping_pptr_def) apply (wp get_pde_wp) apply (clarsimp simp: obj_at_def) @@ -1995,7 +1997,7 @@ done lemma check_mapping_pptr_super_section_relation: "\\\ check_mapping_pptr w ARMSuperSection (Inr (lookup_pd_slot rv' b)) \\rv s. rv \ pd_super_section_relation (lookup_pd_slot rv' b && ~~ mask pd_bits) w (lookup_pd_slot rv' b) s\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (simp add:check_mapping_pptr_def) apply (wp get_pde_wp) apply (clarsimp simp: obj_at_def) @@ -2007,7 +2009,7 @@ lemma lookup_pt_slot_aligned: "\invs and \\ pd and K (is_aligned pd pd_bits \ is_aligned vptr 16 \ vptr < kernel_base)\ lookup_pt_slot pd vptr \\rb s. is_aligned rb 6\, -" apply (rule hoare_gen_asmE)+ - apply (rule hoare_pre, rule hoare_post_imp_R, rule lookup_pt_slot_cap_to) + apply (rule hoare_pre, rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to) apply auto done @@ -2159,7 +2161,7 @@ lemma dcorres_page_table_mapped: apply (simp add:transform_pde_def) apply (rule dcorres_returnOk,simp) apply wp+ - apply (rule hoare_post_imp_R[OF find_pd_for_asid_aligned_pd]) + apply (rule hoare_strengthen_postE_R[OF find_pd_for_asid_aligned_pd]) apply simp apply (erule less_kernel_base_mapping_slots) apply (simp add:pd_bits_def pageBits_def) @@ -2287,7 +2289,7 @@ lemma find_pd_for_asid_kernel_mapping_help: "\pspace_aligned and valid_vspace_objs and K (v find_pd_for_asid a \\rv s. ucast (lookup_pd_slot rv v && mask pd_bits >> 2) \ kernel_mapping_slots \,-" apply (rule hoare_gen_asmE) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule find_pd_for_asid_aligned_pd_bits) apply simp apply (rule less_kernel_base_mapping_slots) @@ -2502,6 +2504,7 @@ lemma dcorres_delete_asid: apply (wp | clarsimp)+ apply simp apply (wp | clarsimp)+ + apply (rule hoare_pre, wp, clarsimp) apply (rule hoare_pre, wp) apply simp apply (wp | clarsimp)+ @@ -2779,11 +2782,9 @@ lemma monadic_trancl_f: lemma monadic_trancl_step: "monadic_rewrite False False \ (monadic_trancl f x) (do y \ f x; monadic_trancl f y od)" - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_trancl_steps) - apply (rule monadic_rewrite_bind_head) - apply (rule monadic_trancl_f) + apply (monadic_rewrite_l monadic_trancl_steps) + apply (monadic_rewrite_l monadic_trancl_f) + apply (rule monadic_rewrite_refl) apply simp done @@ -2819,30 +2820,20 @@ lemma monadic_trancl_preemptible_steps: (monadic_trancl_preemptible f x) (doE y \ monadic_trancl_preemptible f x; monadic_trancl_preemptible f y odE)" - apply (simp add: monadic_trancl_preemptible_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_trancl_steps) - apply (simp add: bindE_def) - apply (rule_tac Q="\\" in monadic_rewrite_bind_tail) - apply (case_tac x) - apply (simp add: lift_def monadic_trancl_lift_Inl) - apply (rule monadic_rewrite_refl) - apply (simp add: lift_def) - apply (rule monadic_rewrite_refl) - apply (wp | simp)+ + unfolding monadic_trancl_preemptible_def bindE_def + apply (monadic_rewrite_l monadic_trancl_steps) + apply (rule monadic_rewrite_bind_tail) + apply (case_tac y; simp add: lift_def monadic_trancl_lift_Inl) + apply (rule monadic_rewrite_refl)+ + apply wpsimp+ done lemma monadic_trancl_preemptible_f: "monadic_rewrite False False (\_. True) (monadic_trancl_preemptible f x) (f x)" - apply (simp add: monadic_trancl_preemptible_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_trancl_f) - apply (simp add: lift_def) - apply (rule monadic_rewrite_refl) - apply simp + unfolding monadic_trancl_preemptible_def + apply (monadic_rewrite_l monadic_trancl_f) + apply (fastforce simp: lift_def intro!: monadic_rewrite_refl)+ done lemma monadic_trancl_preemptible_step: @@ -2850,24 +2841,17 @@ lemma monadic_trancl_preemptible_step: (monadic_trancl_preemptible f x) (doE y \ f x; monadic_trancl_preemptible f y odE)" - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_trancl_preemptible_steps) - apply (rule monadic_rewrite_bindE_head) - apply (rule monadic_trancl_preemptible_f) - apply simp + apply (monadic_rewrite_l monadic_trancl_preemptible_steps) + apply (monadic_rewrite_l monadic_trancl_preemptible_f) + apply (fastforce intro!: monadic_rewrite_refl)+ done lemma monadic_trancl_preemptible_return: "monadic_rewrite False False (\_. True) (monadic_trancl_preemptible f x) (returnOk x)" - apply (simp add: monadic_trancl_preemptible_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (rule monadic_trancl_return) - apply (simp add: returnOk_def) - apply (rule monadic_rewrite_refl) - apply simp + unfolding monadic_trancl_preemptible_def + apply (monadic_rewrite_l monadic_trancl_return) + apply (fastforce simp: returnOk_def intro!: monadic_rewrite_refl)+ done lemma dcorres_get_cap_symb_exec: @@ -3211,24 +3195,18 @@ lemma finalise_slot_inner1_add_if_Null: od od)" supply if_cong[cong] - apply (simp add: finalise_slot_inner1_def) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_if_rhs) - apply (simp add: PageTableUnmap_D.is_final_cap_def) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_tail[where j="\_. j" for j, OF _ gets_wp])+ - apply (rename_tac remove, rule_tac P=remove in monadic_rewrite_gen_asm) - apply simp - apply (rule monadic_rewrite_refl) - apply (simp add: gets_bind_ign when_def) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_bind_head) - apply (rule monadic_rewrite_pick_alternative_1) - apply simp - apply (rule monadic_rewrite_refl) - apply (rule monadic_rewrite_refl) - apply wp + apply (rule monadic_rewrite_weaken_flags[where F=False and E=False, simplified]) + apply (simp add: finalise_slot_inner1_def when_def PageTableUnmap_D.is_final_cap_def) + apply (rule monadic_rewrite_bind_tail) + apply (rule monadic_rewrite_if_r, clarsimp) + apply (monadic_rewrite_l monadic_rewrite_pick_alternative_1) + apply (monadic_rewrite_l monadic_rewrite_if_l_False) + apply monadic_rewrite_symb_exec_l_drop + apply (monadic_rewrite_symb_exec_l_known True) + apply monadic_rewrite_symb_exec_l + apply (rule monadic_rewrite_refl) + apply wpsimp+ + apply (rule monadic_rewrite_refl) apply (clarsimp simp: CSpace_D.cap_removeable_def) done @@ -3275,12 +3253,12 @@ lemma finalise_preemption_corres: apply (rule dcorres_symb_exec_rE) apply (simp split: option.splits) apply (rule conjI, clarsimp) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_return) apply (rule dcorres_returnOk, simp) apply clarsimp apply (rule corres_guard_imp) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_f) apply (rule corres_alternate2[OF dcorres_throw], simp_all)[4] apply ((wp | simp)+)[1] @@ -3288,7 +3266,7 @@ lemma finalise_preemption_corres: apply ((simp add: reset_work_units_def | wp)+)[1] apply clarsimp apply (rule corres_guard_imp) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_return) apply (rule dcorres_returnOk, simp_all)[3] apply (rule hoare_TrueI) @@ -3405,7 +3383,7 @@ proof (induct arbitrary: S rule: rec_del.induct, apply (subst rec_del_simps_ext[unfolded split_def]) apply simp apply (rule corres_guard_imp) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_steps) apply (simp add: cutMon_walk_bindE) apply (rule corres_splitEE) @@ -3414,7 +3392,7 @@ proof (induct arbitrary: S rule: rec_del.induct, apply (simp add: liftME_def[symmetric]) apply (rule_tac R="fst rv" in corres_cases) apply (simp add: when_def) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_f) apply (simp add: finalise_slot_inner2_def[unfolded split_def]) apply (rule corres_alternate1, rule corres_alternate2) @@ -3425,7 +3403,7 @@ proof (induct arbitrary: S rule: rec_del.induct, apply (simp add: liftM_def[symmetric] o_def dc_def[symmetric]) apply (rule empty_slot_corres) apply (simp add: when_def) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_return) apply (rule corres_trivial, simp add: returnOk_liftE) apply wp @@ -3499,7 +3477,7 @@ next apply (rule stronger_corres_guard_imp) apply (simp add: cutMon_walk_bind) apply (rule corres_drop_cutMon_bind) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_rewrite_bindE_head) apply (rule monadic_trancl_preemptible_step) apply (simp add: finalise_slot_inner2_def @@ -3508,7 +3486,7 @@ next apply (rule corres_alternate1)+ apply (simp add: liftE_bindE bind_bindE_assoc bind_assoc) apply (rule select_pick_corres_asm, assumption) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_rewrite_bind_head) apply (rule finalise_slot_inner1_add_if_Null[unfolded split_def]) apply (simp add: bind_assoc if_to_top_of_bind) @@ -3521,7 +3499,7 @@ next apply simp apply (rule corres_drop_cutMon) apply (rule corres_underlying_gets_pre_lhs)+ - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_rewrite_bindE_head) apply (rule monadic_trancl_preemptible_return) apply simp @@ -3551,13 +3529,10 @@ next apply (rule corres_if_rhs_only) apply (rule_tac F=remove in corres_note_assumption, simp) apply (simp add: when_def) - apply (rule monadic_rewrite_corres2) - apply (rule monadic_rewrite_bind) - apply (rule monadic_rewrite_pick_alternative_1) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_bindE_head) - apply (rule monadic_trancl_preemptible_return) - apply wp+ + apply (rule monadic_rewrite_corres_l) + apply (monadic_rewrite_l monadic_rewrite_pick_alternative_1, simp) + apply (monadic_rewrite_l monadic_trancl_preemptible_return) + apply (rule monadic_rewrite_refl) apply simp apply (rule corres_underlying_gets_pre_lhs) apply (rule corres_drop_cutMon) @@ -3565,7 +3540,7 @@ next apply (rule corres_if_rhs_only) apply simp apply (rule corres_drop_cutMon) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_rewrite_bind) apply (rule monadic_rewrite_pick_alternative_2) apply (rule monadic_rewrite_bind_tail) @@ -3576,7 +3551,7 @@ next apply (rule corres_underlying_gets_pre_lhs) apply (rule corres_trivial, simp add: returnOk_liftE) apply (wp | simp)+ - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_rewrite_bind_head) apply (rule monadic_rewrite_pick_alternative_2) apply (simp add: cutMon_walk_bind) @@ -3599,7 +3574,7 @@ next apply (frule cte_at_replicate_zbits) apply (clarsimp simp: cte_wp_at_caps_of_state caps_of_state_transform_opt_cap) apply (clarsimp simp: transform_cslot_ptr_def) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_rewrite_bindE_head) apply (rule monadic_rewrite_trans) apply (rule monadic_trancl_preemptible_steps) @@ -3629,7 +3604,7 @@ next | simp add: not_idle_thread_def del: gets_to_return)+ apply (simp add: conj_comms) apply (wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at - hoare_vcg_const_Ball_lift set_cap_cte_cap_wp_to static_imp_wp + hoare_vcg_const_Ball_lift set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master[simplified in_monad, simplified] | simp only: not_idle_thread_def pred_conj_def simp_thms)+ apply (rule hoare_strengthen_post) @@ -3674,7 +3649,7 @@ next apply (rule corres_drop_cutMon) apply (simp add: liftME_def[symmetric] liftE_bindE[symmetric]) apply (rule stronger_corres_guard_imp) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_f) apply (simp add: finalise_slot_inner2_def[unfolded split_def]) apply (rule corres_alternate1, rule corres_alternate1, rule corres_alternate2) @@ -3696,7 +3671,7 @@ next apply simp apply (rule stronger_corres_guard_imp) apply (simp add: cutMon_walk_bindE) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_steps) apply (rule corres_splitEE) apply (rule "4.hyps"[simplified, folded dc_def]) @@ -3706,7 +3681,7 @@ next apply (simp add: liftE_bindE) apply (rule corres_symb_exec_r) apply (simp add: liftME_def[symmetric] split del: if_split) - apply (rule monadic_rewrite_corres2) + apply (rule monadic_rewrite_corres_l) apply (rule monadic_trancl_preemptible_return) apply (rule corres_if_rhs_only) apply (simp add: returnOk_liftE) diff --git a/proof/drefine/Intent_DR.thy b/proof/drefine/Intent_DR.thy index 67eeb7abbd..14696e0c51 100644 --- a/proof/drefine/Intent_DR.thy +++ b/proof/drefine/Intent_DR.thy @@ -302,16 +302,16 @@ lemma dcorres_set_object_tcb: apply (clarsimp simp: option_map_def restrict_map_def map_add_def) done -lemma set_cxt_none_det_intent_corres: +lemma set_cxt_none_det_intent_corres': "\kheap s' y = Some (TCB obj'); ekheap s' y \ None; valid_idle s';not_idle_thread y s'\ \ dcorres dc ((=) (transform s')) ((=) s') (corrupt_tcb_intent y) - (KHeap_A.set_object y (TCB (tcb_arch_update (arch_tcb_context_set cxt) obj')))" + (KHeap_A.set_object y (TCB (tcb_arch_update f obj')))" apply (clarsimp simp:bind_assoc corrupt_tcb_intent_def get_thread_def gets_def gets_the_def) apply (rule corres_guard_imp) apply (rule_tac P="(=)(transform s')" and Q="(=) s'" - and x="transform_full_intent (machine_state (update_kheap ((kheap s')(y\(TCB (tcb_arch_update (arch_tcb_context_set cxt) obj')))) s')) - y (tcb_arch_update (arch_tcb_context_set cxt) obj')" + and x="transform_full_intent (machine_state (update_kheap ((kheap s')(y\(TCB (tcb_arch_update f obj')))) s')) + y (tcb_arch_update f obj')" in select_pick_corres) apply (clarsimp simp:update_thread_def get_object_def gets_the_def gets_def bind_assoc) @@ -330,6 +330,13 @@ lemma set_cxt_none_det_intent_corres: apply clarsimp done +lemma set_cxt_none_det_intent_corres: + "\kheap s' y = Some (TCB obj'); ekheap s' y \ None; valid_idle s';not_idle_thread y s'\ + \ dcorres dc ((=) (transform s')) ((=) s') + (corrupt_tcb_intent y) + (KHeap_A.set_object y (TCB (tcb_arch_update (arch_tcb_context_set cxt) obj')))" + by (rule set_cxt_none_det_intent_corres') + lemma set_message_info_corres: "dcorres dc \ (valid_idle and not_idle_thread y and valid_etcbs) (corrupt_tcb_intent y) (set_message_info y m)" @@ -425,13 +432,16 @@ lemma set_registers_corres: done lemma set_mrs_corres_no_recv_buffer: - "dcorres dc \ (valid_idle and not_idle_thread y and valid_etcbs) (corrupt_tcb_intent y) (set_mrs y None msg)" - apply (clarsimp simp:set_mrs_def get_thread_def arch_tcb_update_aux3 arch_tcb_set_registers_def) + "dcorres dc \ (valid_idle and not_idle_thread y and valid_etcbs) + (corrupt_tcb_intent y) (set_mrs y None msg)" + unfolding set_mrs_def + apply (subst arch_tcb_update_aux3) + apply simp apply (rule dcorres_absorb_gets_the, clarsimp) apply (drule(1) valid_etcbs_get_tcb_get_etcb) apply (rule corres_dummy_return_l) apply (rule corres_split_forwards' [where Q'="%x. \" and Q="%x. \"]) - apply (rule set_cxt_none_det_intent_corres) + apply (rule set_cxt_none_det_intent_corres') apply (simp add:get_tcb_def get_etcb_def split:option.splits Structures_A.kernel_object.splits | wp)+ @@ -551,16 +561,10 @@ lemma mapM_load_word_offs_do_machine_op: "mapM (load_word_offs ptr) list = do_machine_op (mapM loadWord (map (\offs. ptr + of_nat (offs * word_size)) list))" apply (subst submonad_mapM[OF submonad_do_machine_op submonad_do_machine_op]) - apply (simp add: loadWord_def) + apply (simp add: loadWord_def empty_fail_cond) apply (simp add: load_word_offs_def[abs_def] mapM_map_simp o_def) done -lemma and_assoc: - "(A and B and C) = (A and (B and C))" - apply (rule ext) - apply clarsimp -done - lemma det_spec_return: "det_spec P (return x)" by (clarsimp simp:return_def det_spec_def) @@ -1044,7 +1048,7 @@ lemma get_ipc_buffer_words: "\(=) sa and ko_at (TCB obj) thread and K_bind (evalMonad (lookup_ipc_buffer in_receive thread) sa = Some (Some buf))\ mapM (load_word_offs (buf)) (ls) \\buf_mrs s. buf_mrs = get_ipc_buffer_words (machine_state sa) obj (ls)\" - apply (simp add:and_assoc get_ipc_buffer_words_def) + apply (simp add: pred_conj_aci get_ipc_buffer_words_def) apply (rule wp_spec) apply clarsimp apply (drule lookup_ipc_buffer_SomeB_evalMonad) @@ -1070,7 +1074,7 @@ done lemma get_tcb_mrs_wp: "\(=) sa and ko_at (TCB obj) thread and K_bind (evalMonad (lookup_ipc_buffer False thread) sa = Some (op_buf))\ - get_mrs thread (op_buf) (data_to_message_info (arch_tcb_context_get (tcb_arch obj) msg_info_register)) + get_mrs thread (op_buf) (data_to_message_info (arch_tcb_get_registers (tcb_arch obj) msg_info_register)) \\rv s. rv = get_tcb_mrs (machine_state sa) obj\" apply (case_tac op_buf) apply (clarsimp simp:get_mrs_def thread_get_def gets_the_def) @@ -1078,8 +1082,10 @@ lemma get_tcb_mrs_wp: apply (clarsimp simp:get_tcb_mrs_def Let_def) apply (clarsimp simp:Suc_leI[OF msg_registers_lt_msg_max_length] split del:if_split) apply (clarsimp simp:get_tcb_message_info_def get_ipc_buffer_words_empty) - apply (clarsimp dest!:get_tcb_SomeD simp:obj_at_def arch_tcb_get_registers_def) - apply (clarsimp simp:get_mrs_def thread_get_def gets_the_def arch_tcb_get_registers_def) + apply (clarsimp dest!:get_tcb_SomeD + simp:obj_at_def arch_tcb_get_registers_def arch_tcb_context_get_def) + apply (clarsimp simp:get_mrs_def thread_get_def gets_the_def arch_tcb_get_registers_def + arch_tcb_context_get_def) apply (clarsimp simp:Suc_leI[OF msg_registers_lt_msg_max_length] split del:if_split) apply (wp|wpc)+ apply (rule_tac P = "tcb = obj" in hoare_gen_asm) @@ -1089,7 +1095,7 @@ lemma get_tcb_mrs_wp: (get_ipc_buffer_words (machine_state sa) obj ([Suc (length msg_registers)..gets_the (get_tcb thread);return (arch_tcb_context_get (tcb_arch tcb) register) od)" + (do tcb\gets_the (get_tcb thread);return (arch_tcb_get_registers (tcb_arch tcb) register) od)" apply (simp add: assert_opt_def as_user_def set_object_def get_object_def gets_the_def a_type_def assert_def put_def select_f_def getRegister_def gets_def get_def return_def bind_def) apply (rule ext) apply (case_tac "get_tcb thread s") apply (clarsimp simp: fail_def return_def)+ - apply (clarsimp simp: get_tcb_def split: option.splits Structures_A.kernel_object.splits) + apply (clarsimp simp: get_tcb_def arch_tcb_get_registers_def arch_tcb_context_get_def + split: option.splits Structures_A.kernel_object.splits) done lemma select_f_evalMonad: @@ -1895,7 +1902,7 @@ lemma ex_cte_cap_wp_to_not_idle: lemma pspace_aligned_set_cxt_mrs[wp]: "\ko_at (TCB tcb) thread and pspace_aligned\ - KHeap_A.set_object thread (TCB (tcb_arch_update (arch_tcb_context_set t) tcb)) + KHeap_A.set_object thread (TCB (tcb_arch_update (tcb_context_update f) tcb)) \\rv. pspace_aligned\" apply (wp set_object_aligned) apply (clarsimp simp:obj_at_def) @@ -1903,7 +1910,7 @@ lemma pspace_aligned_set_cxt_mrs[wp]: lemma pspace_distinct_set_cxt_mrs[wp]: "\ko_at (TCB tcb) thread and pspace_distinct\ - KHeap_A.set_object thread (TCB (tcb_arch_update (arch_tcb_context_set t) tcb)) + KHeap_A.set_object thread (TCB (tcb_arch_update (tcb_context_update f) tcb)) \\rv. pspace_distinct\" apply (wp set_object_distinct) apply (clarsimp simp:obj_at_def) @@ -1912,7 +1919,7 @@ lemma pspace_distinct_set_cxt_mrs[wp]: lemma valid_objs_set_cxt_mrs[wp]: "\ko_at (TCB tcb) thread and valid_objs\ - KHeap_A.set_object thread (TCB (tcb_arch_update (arch_tcb_context_set t) tcb)) + KHeap_A.set_object thread (TCB (tcb_arch_update (tcb_context_update f) tcb)) \\rv. valid_objs\" apply (wp set_object_valid_objs) apply (clarsimp simp:obj_at_def) @@ -1928,14 +1935,14 @@ lemma valid_objs_set_cxt_mrs[wp]: lemma ipc_frame_set_cxt_mrs[wp]: "\ko_at (TCB tcb) thread and ipc_frame_wp_at P a\ - KHeap_A.set_object thread (TCB (tcb_arch_update (arch_tcb_context_set t) tcb)) + KHeap_A.set_object thread (TCB (tcb_arch_update (tcb_context_update f) tcb)) \\rv. ipc_frame_wp_at P a\" by (clarsimp simp: KHeap_A.set_object_def get_object_def get_def put_def bind_def valid_def return_def obj_at_def ipc_frame_wp_at_def in_monad) lemma ipc_buffer_set_cxt_mrs[wp]: "\ko_at (TCB tcb) thread and ipc_buffer_wp_at P a\ - KHeap_A.set_object thread (TCB (tcb_arch_update (arch_tcb_context_set t) tcb)) + KHeap_A.set_object thread (TCB (tcb_arch_update (tcb_context_update f) tcb)) \\rv. ipc_buffer_wp_at P a\" by (clarsimp simp: KHeap_A.set_object_def get_object_def get_def put_def bind_def valid_def return_def obj_at_def ipc_buffer_wp_at_def in_monad) @@ -1961,7 +1968,7 @@ lemma set_mrs_corres: apply (rule corres_guard_imp) apply (rule corres_split [where r'=dc]) apply (clarsimp, drule(1) valid_etcbs_get_tcb_get_etcb) - apply (rule_tac s'=s' in set_cxt_none_det_intent_corres; clarsimp) + apply (rule_tac s'=s' in set_cxt_none_det_intent_corres'; clarsimp) apply (clarsimp dest!: get_tcb_SomeD) apply (clarsimp dest!: get_etcb_SomeD) apply (rule corres_dummy_return_l) @@ -1974,13 +1981,13 @@ lemma set_mrs_corres: apply wp+ apply (wp set_object_valid_etcbs) apply (simp del:upt.simps) - apply (auto dest!:get_tcb_SomeD simp:obj_at_def ipc_frame_wp_at_def) + apply (auto dest!:get_tcb_SomeD simp:obj_at_def ipc_frame_wp_at_def arch_tcb_get_registers_def) done lemma set_registers_ipc_frame_ptr_at[wp]: "\ipc_frame_wp_at buf y\as_user thread (setRegister r rv) \%x. ipc_frame_wp_at buf y\" apply (clarsimp simp: as_user_def select_f_def - arch_tcb_update_aux3 + arch_tcb_update_aux3 arch_tcb_context_set_def setRegister_def simpler_modify_def) apply wp apply clarsimp @@ -1996,7 +2003,7 @@ lemma set_registers_ipc_buffer_ptr_at[wp]: apply (clarsimp simp: as_user_def select_f_def setRegister_def - arch_tcb_update_aux3 + arch_tcb_update_aux3 arch_tcb_context_set_def simpler_modify_def) apply wp apply clarsimp diff --git a/proof/drefine/Interrupt_DR.thy b/proof/drefine/Interrupt_DR.thy index ab351cffda..636578b4aa 100644 --- a/proof/drefine/Interrupt_DR.thy +++ b/proof/drefine/Interrupt_DR.thy @@ -551,7 +551,7 @@ lemma cap_delete_one_original: "slot \ slot' \ \\s. is_original_cap s slot\ cap_delete_one slot' \\r s. is_original_cap s slot\" apply (clarsimp simp:cap_delete_one_def unless_def) - apply (wp hoare_when_wp) + apply (wp when_wp) apply (clarsimp simp:empty_slot_def) apply wp apply (clarsimp simp:set_cdt_def) @@ -570,7 +570,7 @@ lemma cte_wp_at_neq_slot_cap_delete_one: \\rv. cte_wp_at P slot\" supply send_signal_interrupt_states [wp_unsafe del] validNF_prop [wp_unsafe del] apply (clarsimp simp:cap_delete_one_def unless_def) - apply (wp hoare_when_wp) + apply (wp when_wp) apply (clarsimp simp:empty_slot_def) apply (wp cte_wp_at_neq_slot_set_cap) apply clarsimp diff --git a/proof/drefine/Ipc_DR.thy b/proof/drefine/Ipc_DR.thy index df57c5f723..51d174c9e9 100644 --- a/proof/drefine/Ipc_DR.thy +++ b/proof/drefine/Ipc_DR.thy @@ -72,7 +72,7 @@ lemma handle_reply_cur_thread_idle_thread: apply ((wps|wp cap_delete_one_it)+)[1] apply (wp do_ipc_transfer_cur_thread_idle_thread dxo_wp_weak)+ apply (clarsimp simp: trans_state_def) - apply (case_tac xf) + apply (case_tac rvf) apply (simp | wp set_thread_state_cur_thread_idle_thread thread_set_cur_thread_idle_thread)+ apply ((wps | wp)+)[1] @@ -278,30 +278,11 @@ lemma cte_at_into_opt_cap: apply (clarsimp simp: caps_of_state_transform_opt_cap) done -abbreviation - "meqv \ monadic_rewrite False True" - -lemma mr_opt_cap_into_object: - assumes mr: "\obj. monadic_rewrite F E (Q obj) m m'" - shows "monadic_rewrite F E ((\s. \obj. cdl_objects s (fst p) = Some obj \ object_slots obj (snd p) \ None \ Q obj s) and (\s. opt_cap p s \ None)) m m'" - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_exists [where P = "\obj s. cdl_objects s (fst p) = Some obj \ object_slots obj (snd p) \ None", OF mr]) - apply clarsimp - apply (rule conjI) - apply simp - apply (simp add: opt_cap_def split_def KHeap_D.slots_of_def split: option.splits) - done - lemma object_slots_has_slots [simp]: "object_slots obj p = Some v \ has_slots obj" unfolding object_slots_def has_slots_def by (simp split: cdl_object.splits) -lemma meqv_sym: - "meqv P a a' \ meqv P a' a" - unfolding monadic_rewrite_def - by fastforce - lemma dcorres_when_l: assumes tc: "R \ dcorres dc \ P l r" and fc: "\ R \ dcorres dc \ Q (return ()) r" @@ -721,7 +702,7 @@ lemma tcb_sched_action_tcb_at_not_idle[wp]: lemma valid_idle_cancel_all_ipc: "\valid_idle and valid_state :: det_state \ bool\ IpcCancel_A.cancel_all_ipc word1 \\a. valid_idle\" - including no_pre + including classic_wp_pre apply (simp add:cancel_all_ipc_def) apply (wp|wpc|simp)+ apply (rename_tac queue list) @@ -732,7 +713,7 @@ lemma valid_idle_cancel_all_ipc: apply (rule hoare_conjI) apply (rule_tac P="(\s. (queue = list) \ (\a\ set list. tcb_at a s \ not_idle_thread a s)) and valid_idle and ko_at (kernel_object.Endpoint Structures_A.endpoint.IdleEP) word1" - in hoare_vcg_precond_imp) + in hoare_weaken_pre) apply (wp | clarsimp)+ apply (rule set_thread_state_ko) apply (simp add:is_tcb_def) @@ -750,7 +731,7 @@ lemma valid_idle_cancel_all_ipc: apply (rule hoare_conjI) apply (rule_tac P="(\s. (queue = list) \ (\a\ set list. tcb_at a s \ not_idle_thread a s)) and valid_idle and ko_at (kernel_object.Endpoint Structures_A.endpoint.IdleEP) word1" - in hoare_vcg_precond_imp) + in hoare_weaken_pre) apply (rule set_thread_state_ko) apply (simp add:is_tcb_def) apply (wp valid_idle_set_thread_state) @@ -776,7 +757,7 @@ lemma valid_idle_cancel_all_ipc: lemma valid_idle_cancel_all_signals: "\valid_idle and valid_state :: det_state \ bool\ IpcCancel_A.cancel_all_signals word1 \\a. valid_idle\" - including no_pre + including classic_wp_pre apply (simp add:cancel_all_signals_def) apply (wp|wpc|simp)+ apply (rename_tac list) @@ -787,7 +768,7 @@ lemma valid_idle_cancel_all_signals: apply (rule hoare_conjI) apply (rule_tac P="(\s. (\a\ set list. tcb_at a s \ not_idle_thread a s)) and valid_idle and ko_at (kernel_object.Notification (ntfn_set_obj ntfn Structures_A.ntfn.IdleNtfn)) word1" - in hoare_vcg_precond_imp) + in hoare_weaken_pre) apply (rule set_thread_state_ko) apply (simp add:is_tcb_def) apply (wp valid_idle_set_thread_state)+ @@ -813,7 +794,7 @@ lemma not_idle_after_reply_cancel_ipc: apply (simp add:cap_delete_one_def unless_def) apply wp+ apply (simp add:IpcCancel_A.empty_slot_def) - apply (wp set_cap_idle select_wp | simp add: if_apply_def2 imp_conjR + apply (wp set_cap_idle | simp add: if_apply_def2 imp_conjR | strengthen imp_consequent[where P="invs s" for s] imp_consequent[where P="valid_idle s" for s])+ apply (strengthen invs_valid_idle) apply (wp thread_set_invs_trivial | simp add: ran_tcb_cap_cases)+ @@ -1030,7 +1011,7 @@ lemma evalMonad_mapM: lemma evalMonad_get_extra_cptrs: "\evalMonad (lookup_ipc_buffer False thread) s = Some (Some buf);get_tcb thread s = Some tcb; - (evalMonad (Ipc_A.get_extra_cptrs (Some buf) (data_to_message_info (arch_tcb_context_get (tcb_arch tcb) msg_info_register))) s) = Some a + (evalMonad (Ipc_A.get_extra_cptrs (Some buf) (data_to_message_info (arch_tcb_get_registers (tcb_arch tcb) msg_info_register))) s) = Some a \ \ a = (map (to_bl) (cdl_intent_extras $ transform_full_intent (machine_state s) thread tcb))" including no_pre @@ -1055,7 +1036,7 @@ lemma evalMonad_get_extra_cptrs: apply (rule weak_det_spec_mapM[OF weak_det_spec_loadWord]) apply (rule empty_when_fail_mapM) apply (clarsimp simp:empty_when_fail_loadWord weak_det_spec_loadWord) - apply (clarsimp simp:get_tcb_message_info_def) + apply (clarsimp simp:get_tcb_message_info_def arch_tcb_context_get_def arch_tcb_get_registers_def) done lemma dcorres_symb_exec_r_evalMonad: @@ -1252,7 +1233,7 @@ lemma cap_insert_cte_wp_at_masked_as_full: shows "\\s. if slot = dest then P cap else cte_wp_at P slot s\ cap_insert cap src dest \\uu. cte_wp_at P slot\" apply (simp add:cap_insert_def set_untyped_cap_as_full_def) - apply (wp set_cap_cte_wp_at hoare_vcg_if_lift get_cap_wp static_imp_wp dxo_wp_weak + apply (wp set_cap_cte_wp_at hoare_vcg_if_lift get_cap_wp hoare_weak_lift_imp dxo_wp_weak | simp split del:if_split)+ apply (intro conjI impI allI | clarsimp simp:cte_wp_at_caps_of_state)+ @@ -1304,10 +1285,9 @@ next apply (rule dcorres_set_extra_badge,simp) apply (rule Cons.hyps, rule refl, rule refl, simp) apply wp[1] - apply (simp add: store_word_offs_def set_extra_badge_def - not_idle_thread_def ipc_frame_wp_at_def - split_def) - apply (wp evalMonad_lookup_ipc_buffer_wp) + apply (simp add: store_word_offs_def set_extra_badge_def not_idle_thread_def + ipc_frame_wp_at_def split_def) + apply (wpsimp wp: evalMonad_lookup_ipc_buffer_wp) apply (erule cte_wp_at_weakenE) apply (simp add:ipc_buffer_wp_at_def)+ apply wp @@ -1353,8 +1333,8 @@ next apply (rule cap_insert_weak_cte_wp_at_not_null) apply clarsimp+ apply (wp cap_insert_idle valid_irq_node_typ hoare_vcg_ball_lift cap_insert_cte_wp_at)+ - apply (simp add: if_apply_def2) - apply wp + apply (wpsimp simp: if_apply_def2) + apply (wpsimp simp: if_apply_def2) apply (simp add: if_apply_def2) apply (rule validE_R_validE) apply (simp add:conj_comms ball_conj_distrib split del:if_split) @@ -1362,9 +1342,10 @@ next (cte_wp_at (is_derived (cdt s) (slot_ptr, slot_idx) cap') (slot_ptr, slot_idx) s) \ pspace_aligned s \ pspace_distinct s \ valid_objs s \ valid_idle s \ valid_mdb s \ QM s cap'))" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 - apply (subgoal_tac "r\ cap.NullCap \ cte_wp_at ((\) cap.NullCap) (slot_ptr, slot_idx) s") + apply (rename_tac rv s) + apply (subgoal_tac "rv \ cap.NullCap \ cte_wp_at ((\) cap.NullCap) (slot_ptr, slot_idx) s") apply (intro impI) apply simp apply (elim conjE) @@ -1550,11 +1531,11 @@ lemma get_receive_slot_dcorres: apply (rule dcorres_returnOk) apply clarsimp+ apply (wp|clarsimp)+ - apply (rule hoare_post_imp_R[OF hoare_True_E_R]) + apply (rule hoare_strengthen_postE_R[OF hoareE_R_TrueI]) apply (intro impI, simp) apply (wp lsfco_not_idle) apply clarsimp - apply (rule hoare_post_impErr[OF hoareE_TrueI TrueI]) + apply (rule hoare_strengthen_postE[OF hoareE_TrueI TrueI]) apply simp apply (wp lsfco_not_idle | clarsimp)+ apply (rule conjI; rule TrueI) @@ -1612,7 +1593,7 @@ lemma transfer_caps_loop_None: lemma get_rs_length [wp]: "\\\ get_receive_slots rcv buffer \\slots s. length slots \ 1\" apply (cases buffer) - apply (simp del: hoare_True_E_R|wp)+ + apply (simp|wp)+ done lemma transfer_caps_dcorres: @@ -1664,10 +1645,10 @@ lemma dcorres_lookup_extra_caps: \ ((=) s) (Endpoint_D.lookup_extra_caps thread (cdl_intent_extras (transform_full_intent (machine_state s) thread t))) - (Ipc_A.lookup_extra_caps thread buffer (data_to_message_info (arch_tcb_context_get (tcb_arch t) msg_info_register)))" + (Ipc_A.lookup_extra_caps thread buffer (data_to_message_info (arch_tcb_get_registers (tcb_arch t) msg_info_register)))" apply (clarsimp simp:lookup_extra_caps_def liftE_bindE Endpoint_D.lookup_extra_caps_def) apply (rule corres_symb_exec_r) - apply (rule_tac F = "evalMonad (get_extra_cptrs buffer (data_to_message_info (arch_tcb_context_get (tcb_arch t) msg_info_register))) s = Some rv" + apply (rule_tac F = "evalMonad (get_extra_cptrs buffer (data_to_message_info (arch_tcb_get_registers (tcb_arch t) msg_info_register))) s = Some rv" in corres_gen_asm2) apply (rule corres_mapME[where S = "{(x,y). x = of_bl y \ length y = word_bits}"]) prefer 3 @@ -1714,13 +1695,13 @@ lemma dcorres_lookup_extra_caps: done lemma dcorres_copy_mrs': - notes hoare_post_taut[wp] if_cong[cong] + notes hoare_TrueI[wp] if_cong[cong] shows "dcorres dc \ ((\s. evalMonad (lookup_ipc_buffer in_receive recv) s = Some rv) and valid_idle and not_idle_thread thread and not_idle_thread recv and tcb_at recv and valid_objs and pspace_aligned and pspace_distinct and valid_etcbs) (corrupt_ipc_buffer recv in_receive) - (copy_mrs send rva recv rv (mi_length (data_to_message_info (arch_tcb_context_get (tcb_arch tcb) msg_info_register))))" + (copy_mrs send rva recv rv (mi_length (data_to_message_info (arch_tcb_get_registers (tcb_arch tcb) msg_info_register))))" apply (rule dcorres_expand_pfx) apply (clarsimp simp:corrupt_ipc_buffer_def) apply (case_tac rv) @@ -1877,7 +1858,7 @@ done lemma ipc_buffer_wp_at_copy_mrs[wp]: "\ipc_buffer_wp_at buf t \ - copy_mrs send rva recv rv (mi_length (data_to_message_info (arch_tcb_context_get (tcb_arch obj') msg_info_register))) + copy_mrs send rva recv rv (mi_length (data_to_message_info (arch_tcb_get_registers (tcb_arch obj') msg_info_register))) \\r. ipc_buffer_wp_at buf t\" unfolding copy_mrs_def apply (wp|wpc)+ @@ -1950,7 +1931,7 @@ lemma corres_complete_ipc_transfer: apply (wp hoare_vcg_ball_lift | clarsimp)+ apply (rule validE_validE_R) apply (rule hoare_vcg_conj_liftE1) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule validE_validE_R) apply (rule hoare_vcg_conj_liftE1[OF lookup_extra_caps_srcs]) apply (rule hoare_post_imp_dc2_actual[OF lookup_extra_caps_inv[where P=valid_objs]]) @@ -2006,13 +1987,13 @@ lemma dcorres_handle_arch_fault_reply: "dcorres dc \ (tcb_at y and valid_idle and not_idle_thread y and valid_etcbs) (corrupt_tcb_intent y) (handle_arch_fault_reply a y mi mrs)" - apply (cases a) - apply (clarsimp simp: handle_arch_fault_reply_def) - apply (rule corres_guard_imp) - apply (rule corres_corrupt_tcb_intent_return) - apply assumption - apply (erule pred_andE | rule pred_andI | assumption)+ - done + apply (cases a) + apply (clarsimp simp: handle_arch_fault_reply_def) + apply (rule corres_guard_imp) + apply (rule corres_corrupt_tcb_intent_return) + apply assumption + apply clarsimp + done lemma dcorres_handle_fault_reply: @@ -2277,7 +2258,7 @@ lemma set_endpoint_valid_irq_node[wp]: apply wp apply (simp add:set_simple_ko_def) apply (wp hoare_vcg_all_lift) - apply (rule_tac Q="\s. \irq. cap_table_at 0 (interrupt_irq_node s irq) s \ ep_at w s" in hoare_vcg_precond_imp) + apply (rule_tac Q="\s. \irq. cap_table_at 0 (interrupt_irq_node s irq) s \ ep_at w s" in hoare_weaken_pre) apply (clarsimp simp: set_object_def get_object_def in_monad get_def put_def bind_def return_def valid_def obj_at_def) apply (drule_tac x = irq in spec) @@ -2705,10 +2686,10 @@ lemma not_idle_thread_resolve_address_bits: CSpace_A.resolve_address_bits (tcb_ctable obj, blist) \\rv. not_idle_thread (fst (fst rv))\, \\_. \\" apply (rule validE_R_validE) - apply (rule_tac hoare_vcg_precond_impE_R) + apply (rule_tac hoare_weaken_preE_R) apply (rule validE_validE_R) apply (rule_tac Q="\r. valid_global_refs and valid_objs and valid_idle and valid_irq_node and ex_cte_cap_to (fst r)" - in hoare_post_impErr[where E="\x y. True"]) + in hoare_strengthen_postE[where E="\x y. True"]) apply (wp rab_cte_cap_to) apply clarsimp apply (drule ex_cte_cap_to_not_idle, auto simp: not_idle_thread_def)[1] @@ -2838,7 +2819,7 @@ lemma send_fault_ipc_corres: apply (rule hoare_validE_conj) prefer 2 apply wp+ - apply (rule hoare_post_imp_R, rule lookup_cap_valid) + apply (rule hoare_strengthen_postE_R, rule lookup_cap_valid) apply (clarsimp simp: valid_cap_simps) apply clarsimp+ apply (intro conjI; clarsimp) diff --git a/proof/drefine/KHeap_DR.thy b/proof/drefine/KHeap_DR.thy index 7ce4474d92..7d053d7e3a 100644 --- a/proof/drefine/KHeap_DR.thy +++ b/proof/drefine/KHeap_DR.thy @@ -82,11 +82,10 @@ termination CSpace_D.resolve_address_bits end -crunch cdl_cdt [wp]: "KHeap_D.set_cap" "\s. P (cdl_cdt s)" - (wp: crunch_wps select_wp simp: crunch_simps) - -crunch cdl_cdt [wp]: "PageTableUnmap_D.cancel_all_ipc", "PageTableUnmap_D.unbind_maybe_notification" "\s. P (cdl_cdt s)" - (wp: crunch_wps select_wp simp: crunch_simps) +crunches + "KHeap_D.set_cap", "PageTableUnmap_D.cancel_all_ipc", "PageTableUnmap_D.unbind_maybe_notification" + for cdl_cdt [wp]: "\s. P (cdl_cdt s)" + (wp: crunch_wps simp: crunch_simps) lemma descendants_cdl_cdt_lift: "(\P. \\s. P (cdl_cdt s)\ f \\_ s. P (cdl_cdt s)\) \ @@ -602,7 +601,7 @@ lemma xf_cnode_contents: lemma transform_cnode_contents_upd: "\well_formed_cnode_n sz cn; cn sl' = Some ocap'\ \ - transform_cnode_contents sz cn(nat (bl_to_bin sl') \ transform_cap cap') = + (transform_cnode_contents sz cn)(nat (bl_to_bin sl') \ transform_cap cap') = transform_cnode_contents sz (cn(sl' \ cap'))" apply (rule ext) apply clarsimp @@ -621,7 +620,7 @@ lemma transform_cnode_contents_upd: lemma caps_of_state_cnode_upd: "\ kheap s p' = Some (CNode sz cn); well_formed_cnode_n sz cn; cn sl' = Some ocap' \ \ - caps_of_state (update_kheap (kheap s(p' \ CNode sz (cn(sl' \ cap')))) s) = + caps_of_state (update_kheap ((kheap s)(p' \ CNode sz (cn(sl' \ cap')))) s) = (caps_of_state s) ((p',sl') \ cap')" apply (rule ext) apply (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases wf_cs_upd) @@ -2589,10 +2588,10 @@ lemma unbind_notification_valid_state[wp]: "\valid_state\ IpcCancel_A.unbind_notification t \\rv. valid_state\" supply if_cong[cong] apply (simp add: unbind_notification_def valid_state_def valid_pspace_def) - apply (rule hoare_seq_ext [OF _ gbn_sp]) + apply (rule bind_wp [OF _ gbn_sp]) apply (case_tac ntfnptr, clarsimp, wp, simp) apply clarsimp - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (wp valid_irq_node_typ set_simple_ko_valid_objs | clarsimp split del: if_split)+ apply (intro conjI impI; @@ -2623,7 +2622,7 @@ lemma unbind_maybe_notification_valid_state[wp]: "\valid_state\ IpcCancel_A.unbind_maybe_notification a \\rv. valid_state\" supply if_cong[cong] apply (simp add: unbind_maybe_notification_def valid_state_def valid_pspace_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac "ntfn_bound_tcb ntfn", clarsimp, wp, simp+) apply (wp valid_irq_node_typ set_simple_ko_valid_objs | clarsimp split del: if_split)+ @@ -2656,10 +2655,10 @@ lemma unbind_maybe_notification_valid_state[wp]: lemma unbind_notification_valid_idle[wp]: "\valid_idle\ IpcCancel_A.unbind_notification t \\rv. valid_idle\" apply (simp add: unbind_notification_def) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (case_tac ntfnptr, clarsimp, wp, simp) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (wp | clarsimp)+ apply (auto simp: obj_at_def is_ntfn_def) done @@ -2667,7 +2666,7 @@ lemma unbind_notification_valid_idle[wp]: lemma unbind_maybe_notification_valid_idle[wp]: "\valid_idle\ IpcCancel_A.unbind_maybe_notification a \\rv. valid_idle\" apply (simp add: unbind_maybe_notification_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (case_tac "ntfn_bound_tcb ntfn", clarsimp, wp, simp) apply clarsimp apply (wp | clarsimp)+ @@ -2715,7 +2714,7 @@ lemma set_parent_corres: get_def set_cdt_def return_def bind_def) apply (simp add:transform_current_thread_def weak_valid_mdb_def) apply (rename_tac s') - apply (subgoal_tac "transform s'\cdl_cdt:=cdl_cdt(transform s') + apply (subgoal_tac "transform s'\cdl_cdt:=(cdl_cdt(transform s')) (transform_cslot_ptr slot' \ transform_cslot_ptr pslot')\ = cdl_cdt_single_update (transform s') (transform_cslot_ptr slot') (transform_cslot_ptr pslot')") apply (clarsimp simp:cdl_cdt_transform) @@ -2776,15 +2775,16 @@ lemma get_tcb_reply_cap_wp_original_cap: apply (rule hoare_post_imp [where Q="\r. cte_wp_at (\c. r \ cap.NullCap) (sid,tcb_cnode_index 2) and valid_mdb and tcb_at sid and valid_objs and cte_wp_at ((=) r) (sid,tcb_cnode_index 2)"]) + apply (rename_tac rv s) apply clarsimp - apply (subgoal_tac "is_master_reply_cap r \ obj_ref_of r = sid") + apply (subgoal_tac "is_master_reply_cap rv \ obj_ref_of rv = sid") apply clarsimp apply (frule cte_wp_tcb_cap_valid) apply simp+ apply (clarsimp simp:valid_mdb_def reply_master_revocable_def) - apply (drule_tac x = "obj_ref_of r" in spec) + apply (drule_tac x = "obj_ref_of rv" in spec) apply (drule_tac x = "tcb_cnode_index 2" in spec) - apply (drule_tac x = r in spec) + apply (drule_tac x = rv in spec) apply (drule iffD1[OF cte_wp_at_caps_of_state])+ apply clarsimp apply (frule cte_wp_tcb_cap_valid) @@ -2819,7 +2819,7 @@ done lemma transform_objects_update_kheap_simp: "\kheap s ptr = Some ko; ekheap s ptr = opt_etcb\ - \ transform_objects (update_kheap (kheap s(ptr \ obj)) s) = + \ transform_objects (update_kheap ((kheap s)(ptr \ obj)) s) = (\x. if x \ ptr then transform_objects s x else (if ptr = idle_thread s then None else Some (transform_object (machine_state s) ptr opt_etcb obj)))" @@ -3373,11 +3373,11 @@ lemma not_idle_thread_resolve_address_bits: CSpace_A.resolve_address_bits (tcb_ctable obj, blist) \\rv s. not_idle_thread (fst (fst rv)) s \ valid_etcbs s\, \\_. \\" apply (rule validE_R_validE) - apply (rule_tac hoare_vcg_precond_impE_R) + apply (rule_tac hoare_weaken_preE_R) apply (rule validE_validE_R) apply (rule_tac Q="\r. valid_etcbs and valid_global_refs and valid_objs and valid_idle and valid_irq_node and ex_cte_cap_to (fst r)" - in hoare_post_impErr[where E="\x y. True"]) + in hoare_strengthen_postE[where E="\x y. True"]) apply (wp rab_cte_cap_to) apply (auto intro: ex_cte_cap_wp_to_not_idle)[2] apply (clarsimp simp:ex_cte_cap_to_def) @@ -3472,7 +3472,7 @@ lemma dcorres_lookup_cap_and_slot: apply (rule get_cap_corres, rule refl) apply (rule dcorres_returnOk, simp) apply ((wp|simp)+) - apply (rule hoare_post_imp_R [where Q'="\rv. valid_idle and valid_etcbs and real_cte_at (fst rv)"]) + apply (rule hoare_strengthen_postE_R [where Q'="\rv. valid_idle and valid_etcbs and real_cte_at (fst rv)"]) apply (wp lookup_slot_real_cte_at_wp) apply (clarsimp simp: valid_idle_def not_idle_thread_def pred_tcb_at_def obj_at_def is_cap_table_def) diff --git a/proof/drefine/README.md b/proof/drefine/README.md index 3ab5945bb7..d1908b4ccc 100644 --- a/proof/drefine/README.md +++ b/proof/drefine/README.md @@ -19,9 +19,9 @@ specification][capDL]. It is described as part of an ICFEM '13 Building -------- -To build from the `l4v/` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - ./isabelle/bin/isabelle build -d . -v -b DRefine + L4V_ARCH=ARM ./run_tests DRefine Important Theories ------------------ diff --git a/proof/drefine/Schedule_DR.thy b/proof/drefine/Schedule_DR.thy index cbec79861b..3b46964809 100644 --- a/proof/drefine/Schedule_DR.thy +++ b/proof/drefine/Schedule_DR.thy @@ -81,7 +81,7 @@ lemma change_current_domain_and_switch_to_idle_thread_dcorres: Schedule_D.switch_to_thread None od) switch_to_idle_thread" - including no_pre + including classic_wp_pre apply (clarsimp simp: Schedule_D.switch_to_thread_def switch_to_idle_thread_def) apply (rule dcorres_symb_exec_r) apply (rule corres_guard_imp) @@ -139,7 +139,7 @@ lemma corrupt_intents_current_thread: by (simp add: corrupt_intents_def) crunch cdl_cur: corrupt_frame "\s. cdl_current_thread s = x" - (wp: select_wp simp: corrupt_intents_current_thread) + (simp: corrupt_intents_current_thread) (* Switching to the active thread has no effect. *) lemma switch_to_thread_idempotent_corres: @@ -593,7 +593,8 @@ lemma schedule_dcorres: * tcb context of a thread does affect the state translation to capDL *) lemma get_tcb_message_info_nextPC [simp]: - "get_tcb_message_info (tcb_arch_update (tcb_context_update (\ctx. ctx(NextIP := pc))) tcb) = + "get_tcb_message_info (tcb_arch_update (tcb_context_update + (\ctx. UserContext ((user_regs ctx)(NextIP := pc)))) tcb) = get_tcb_message_info tcb" by (simp add: get_tcb_message_info_def arch_tcb_context_get_def @@ -601,24 +602,29 @@ lemma get_tcb_message_info_nextPC [simp]: ARM.msgInfoRegister_def) lemma map_msg_registers_nextPC [simp]: - "map ((tcb_context tcb)(NextIP := pc)) msg_registers = - map (tcb_context tcb) msg_registers" + "map ((user_regs (tcb_context tcb))(NextIP := pc)) msg_registers = + map (user_regs (tcb_context tcb)) msg_registers" by (simp add: msg_registers_def ARM.msgRegisters_def upto_enum_red fromEnum_def toEnum_def enum_register) lemma get_ipc_buffer_words_nextPC [simp]: - "get_ipc_buffer_words m (tcb_arch_update (tcb_context_update (\ctx. ctx(NextIP := pc))) tcb) = + "get_ipc_buffer_words m (tcb_arch_update (tcb_context_update + (\ctx. UserContext ((user_regs ctx)(NextIP := pc)))) tcb) = get_ipc_buffer_words m tcb" by (rule ext) (simp add: get_ipc_buffer_words_def) lemma get_tcb_mrs_nextPC [simp]: - "get_tcb_mrs m (tcb_arch_update (tcb_context_update (\ctx. ctx(NextIP := pc))) tcb) = + "get_tcb_mrs m (tcb_arch_update (tcb_context_update + (\ctx. UserContext ((user_regs ctx)(NextIP := pc)))) tcb) = get_tcb_mrs m tcb" by (simp add: get_tcb_mrs_def Let_def arch_tcb_context_get_def) lemma transform_tcb_NextIP: - "transform_tcb m t (tcb_arch_update (tcb_context_update (\ctx. ctx(NextIP:= pc))) tcb) + "transform_tcb m t (tcb_arch_update (tcb_context_update + (\ctx. UserContext ((user_regs ctx)(NextIP := pc)))) tcb) = transform_tcb m t tcb" + apply (rule ext) + apply (simp add: transform_tcb_def transform_full_intent_def Let_def) by (auto simp add: transform_tcb_def transform_full_intent_def Let_def cap_register_def ARM.capRegister_def arch_tcb_context_get_def) diff --git a/proof/drefine/StateTranslationProofs_DR.thy b/proof/drefine/StateTranslationProofs_DR.thy index dfcd7e6a77..2a6bce8c2d 100644 --- a/proof/drefine/StateTranslationProofs_DR.thy +++ b/proof/drefine/StateTranslationProofs_DR.thy @@ -54,10 +54,12 @@ abbreviation "update_kheap kh s \ kheap_update (\_. kh) s" abbreviation -"tcb_set_mi tcb msg \ tcb \tcb_context := (tcb_context tcb)(msg_info_register := msg)\" +"tcb_set_mi tcb msg \ + tcb \tcb_context := modify_registers (\rs. rs(msg_info_register := msg)) (tcb_context tcb)\" abbreviation -"update_tcb_cxt_badge msg tcb\ tcb \tcb_context := (tcb_context tcb)(badge_register := msg)\" +"update_tcb_cxt_badge msg tcb\ + tcb \tcb_context := modify_registers (\rs. rs(badge_register := msg)) (tcb_context tcb)\" abbreviation "update_tcb_state state tcb \ tcb \tcb_state := state\" @@ -66,7 +68,7 @@ abbreviation "update_tcb_boundntfn ntfn_opt tcb \ tcb \tcb_bound_notification := ntfn_opt\" abbreviation -"dupdate_cdl_object ptr obj s \ cdl_objects_update (\_. cdl_objects s(ptr \ obj)) s" +"dupdate_cdl_object ptr obj s \ cdl_objects_update (\_. (cdl_objects s)(ptr \ obj)) s" abbreviation "dupdate_tcb_intent intent tcb\ tcb \cdl_tcb_intent := intent\" diff --git a/proof/drefine/StateTranslation_D.thy b/proof/drefine/StateTranslation_D.thy index 24e957f0ed..8fa77e7d91 100644 --- a/proof/drefine/StateTranslation_D.thy +++ b/proof/drefine/StateTranslation_D.thy @@ -53,12 +53,12 @@ where else if x = 2 then Some EndpointType else if x = 3 then Some NotificationType else if x = 4 then Some CNodeType - else if x = 5 then Some (FrameType 12) - else if x = 6 then Some (FrameType 16) - else if x = 7 then Some (FrameType 20) - else if x = 8 then Some (FrameType 24) - else if x = 9 then Some PageTableType - else if x = 10 then Some PageDirectoryType + else if x = 5 then Some PageDirectoryType + else if x = 6 then Some (FrameType 12) + else if x = 7 then Some (FrameType 16) + else if x = 8 then Some (FrameType 20) + else if x = 9 then Some (FrameType 24) + else if x = 10 then Some PageTableType else None" definition @@ -755,7 +755,8 @@ where definition get_tcb_message_info :: "tcb \ Structures_A.message_info" where - "get_tcb_message_info t \ data_to_message_info ((arch_tcb_context_get (tcb_arch t)) msg_info_register)" + "get_tcb_message_info t \ + data_to_message_info ((user_regs (arch_tcb_context_get (tcb_arch t))) msg_info_register)" definition get_tcb_mrs :: "machine_state \ tcb \ word32 list" @@ -763,7 +764,7 @@ where "get_tcb_mrs ms tcb \ let info = get_tcb_message_info tcb; - cpu_mrs = map (arch_tcb_context_get (tcb_arch tcb)) msg_registers; + cpu_mrs = map (user_regs (arch_tcb_context_get (tcb_arch tcb))) msg_registers; mem_mrs = get_ipc_buffer_words ms tcb [length msg_registers + 1 ..< Suc msg_max_length] in (take (unat (mi_length info)) (cpu_mrs @ mem_mrs))" @@ -782,7 +783,7 @@ where (invocation_type (mi_label mi)) (get_tcb_mrs ms tcb)), cdl_intent_error = guess_error (mi_label mi), - cdl_intent_cap = arch_tcb_context_get (tcb_arch tcb) cap_register, + cdl_intent_cap = user_regs (arch_tcb_context_get (tcb_arch tcb)) cap_register, cdl_intent_extras = get_ipc_buffer_words ms tcb [buffer_cptr_index ..< buffer_cptr_index + (unat (mi_extra_caps mi))], cdl_intent_recv_slot = case (get_ipc_buffer_words ms tcb [offset ..< offset + 3]) of [croot, index, depth] \ Some (croot, index, unat depth) diff --git a/proof/drefine/Syscall_DR.thy b/proof/drefine/Syscall_DR.thy index 0e1a3b35c1..cbb20f70a4 100644 --- a/proof/drefine/Syscall_DR.thy +++ b/proof/drefine/Syscall_DR.thy @@ -742,7 +742,7 @@ lemma perform_invocation_corres: apply (rule corres_split[OF invoke_cnode_corres]) apply (clarsimp simp:lift_def,case_tac rv',simp add: throwError_def) apply (simp) - apply (rule hoare_triv[of \], rule hoare_post_taut)+ + apply (rule hoare_triv[of \], rule hoare_TrueI)+ apply clarsimp+ done @@ -756,7 +756,7 @@ lemma perform_invocation_corres: apply (rule_tac F = "\x. rv' = Inr x" in corres_gen_asm2) apply (rule corres_trivial) apply (clarsimp simp:lift_def returnOk_def) - apply (rule hoare_triv[of \], rule hoare_post_taut)+ + apply (rule hoare_triv[of \], rule hoare_TrueI)+ apply ((wp|simp add: liftE_def)+) (* arch *) apply (rename_tac arch_label) @@ -767,7 +767,7 @@ lemma perform_invocation_corres: apply (rule_tac F = "\x. rv' = Inr x" in corres_gen_asm2) apply (rule corres_trivial) apply (clarsimp simp:lift_def returnOk_def) - apply (rule hoare_triv[of \], rule hoare_post_taut) + apply (rule hoare_triv[of \], rule hoare_TrueI) apply ((wp|simp add: liftE_def)+) done @@ -826,7 +826,7 @@ lemma handle_fault_corres: apply (simp add:send_fault_ipc_def not_idle_thread_def Let_def) apply (wp hoare_drop_imps hoare_vcg_ball_lift | wpc)+ apply (rule_tac Q'="\r s. invs s \ valid_etcbs s \ obj\ idle_thread s \ obj = cur_thread s'" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp:invs_mdb invs_valid_idle) apply wp @@ -842,7 +842,7 @@ lemma handle_fault_corres: lemma get_tcb_mrs_wp: "\ko_at (TCB obj) thread and K_bind (evalMonad (lookup_ipc_buffer False thread) sa = Some (op_buf)) and (=) sa\ - get_mrs thread (op_buf) (data_to_message_info (arch_tcb_context_get (tcb_arch obj) msg_info_register)) + get_mrs thread (op_buf) (data_to_message_info (arch_tcb_get_registers (tcb_arch obj) msg_info_register)) \\rv s. rv = get_tcb_mrs (machine_state sa) obj\" apply (case_tac op_buf) apply (clarsimp simp:get_mrs_def thread_get_def gets_the_def arch_tcb_get_registers_def) @@ -850,12 +850,14 @@ lemma get_tcb_mrs_wp: apply (clarsimp simp:get_tcb_mrs_def Let_def) apply (clarsimp simp:Suc_leI[OF msg_registers_lt_msg_max_length] split del:if_split) apply (clarsimp simp:get_tcb_message_info_def get_ipc_buffer_words_empty) - apply (clarsimp dest!:get_tcb_SomeD simp:obj_at_def) + apply (clarsimp dest!:get_tcb_SomeD simp:obj_at_def arch_tcb_context_get_def) apply (clarsimp simp:get_mrs_def thread_get_def gets_the_def arch_tcb_get_registers_def) apply (clarsimp simp:Suc_leI[OF msg_registers_lt_msg_max_length] split del:if_split) apply (wp|wpc)+ apply (rule_tac P = "tcb = obj" in hoare_gen_asm) - apply (clarsimp simp: get_tcb_mrs_def Let_def get_tcb_message_info_def Suc_leI[OF msg_registers_lt_msg_max_length] split del:if_split) + apply (clarsimp simp: get_tcb_mrs_def Let_def get_tcb_message_info_def Suc_leI[OF msg_registers_lt_msg_max_length] + arch_tcb_context_get_def + split del:if_split) apply (rule_tac Q="\buf_mrs s. buf_mrs = (get_ipc_buffer_words (machine_state sa) obj ([Suc (length msg_registers)..P\ CSpace_A.lookup_cap_and_slot t (to_bl (arch_tcb_context_get (tcb_arch obj'a) cap_register)) \\x. P\, \\ft s. True\" + "\P\ CSpace_A.lookup_cap_and_slot t (to_bl (arch_tcb_get_registers (tcb_arch obj'a) cap_register)) \\x. P\, \\ft s. True\" apply (simp add:CSpace_A.lookup_cap_and_slot_def) apply (wp | clarsimp simp:liftE_bindE)+ done @@ -1039,9 +1041,9 @@ lemma decode_invocation_corres': (cdl_intent_op (transform_full_intent (machine_state s) (cur_thread s) ctcb))) rv) ((\(slot, cap, extracaps, buffer). - do args \ get_mrs (cur_thread s) buffer (data_to_message_info (arch_tcb_context_get (tcb_arch ctcb) msg_info_register)); - Decode_A.decode_invocation (mi_label (data_to_message_info (arch_tcb_context_get (tcb_arch ctcb) msg_info_register))) args - (to_bl (arch_tcb_context_get (tcb_arch ctcb) cap_register)) slot cap extracaps + do args \ get_mrs (cur_thread s) buffer (data_to_message_info (arch_tcb_get_registers (tcb_arch ctcb) msg_info_register)); + Decode_A.decode_invocation (mi_label (data_to_message_info (arch_tcb_get_registers (tcb_arch ctcb) msg_info_register))) args + (to_bl (arch_tcb_get_registers (tcb_arch ctcb) cap_register)) slot cap extracaps od) rv')" apply (rule dcorres_expand_pfx) @@ -1056,11 +1058,13 @@ lemma decode_invocation_corres': apply clarsimp apply (rule dcorres_expand_pfx) apply (rule corres_guard_imp[OF decode_invocation_corres]) - apply (clarsimp simp:transform_full_intent_def Let_def get_tcb_message_info_def)+ + apply (solves \clarsimp simp: transform_full_intent_def Let_def get_tcb_message_info_def + arch_tcb_get_registers_def arch_tcb_context_get_def\)+ apply (wp get_tcb_mrs_wp | clarsimp)+ apply (rule dcorres_expand_pfx) apply (rule dcorres_free_throw[OF decode_invocation_error_branch]) - apply (clarsimp simp:transform_full_intent_def Let_def get_tcb_message_info_def)+ + apply (clarsimp simp:transform_full_intent_def Let_def get_tcb_message_info_def + arch_tcb_get_registers_def arch_tcb_context_get_def)+ done lemma reply_from_kernel_error: @@ -1223,7 +1227,7 @@ lemma not_master_reply_cap_lcs[wp]: apply wp apply (simp add:split_def) apply wp - apply (rule_tac Q ="\cap. cte_wp_at (\x. x = cap) (fst x) and real_cte_at (fst x) + apply (rule_tac Q ="\cap. cte_wp_at (\x. x = cap) (fst rv) and real_cte_at (fst rv) and valid_reply_masters and valid_objs" in hoare_strengthen_post) apply (wp get_cap_cte_wp_at) apply clarify @@ -1238,7 +1242,7 @@ lemma not_master_reply_cap_lcs'[wp]: "\valid_reply_masters and valid_objs\ CSpace_A.lookup_cap_and_slot t ptr \\rv s. cte_wp_at (Not \ is_master_reply_cap) (snd rv) s\,-" apply (rule_tac Q' = "\rv s. \ is_master_reply_cap (fst rv) \ cte_wp_at ((=) (fst rv)) (snd rv) s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (rule hoare_pre,wp,simp) apply (clarsimp simp:cte_wp_at_def) done @@ -1290,7 +1294,8 @@ lemma handle_invocation_corres: cap = transform_cap cap' \ slot = transform_cslot_ptr slot' \ extra = transform_cap_list extra'" in corres_split_bind_case_sum) apply (rule_tac Q = "\x. \" and Q'="\x. (=) s'a" in corres_initial_splitE) - apply (clarsimp simp: transform_full_intent_def Let_def) + apply (clarsimp simp: transform_full_intent_def Let_def arch_tcb_get_registers_def + arch_tcb_context_get_def) apply (rule corres_guard_imp[OF dcorres_lookup_cap_and_slot[simplified]]) apply (clarsimp simp: word_bits_def not_idle_thread_def invs_def valid_state_def)+ apply (rule dcorres_symb_exec_r_evalMonad) @@ -1325,38 +1330,29 @@ lemma handle_invocation_corres: apply (simp add: not_idle_thread_def) apply (strengthen invs_valid_idle) apply wp+ - apply (simp add:conj_comms not_idle_thread_def split_def) + \ \The following proof is quite fragile. If clarsimp is used, either on its own or as part + of wpsimp, then it rewrites pairs and necessary rules no longer match.\ + apply (simp add: not_idle_thread_def split_def) apply (wp sts_Restart_invs set_thread_state_ct_active)+ - apply (simp add:conj_comms split_def msg_from_syscall_error_simp) - apply (wp | simp add:split_def)+ - apply (rule_tac Q'="\r s. s = s'a \ ex_nonz_cap_to (cur_thread s) s \ valid_invocation r s \ - invocation_duplicates_valid r s" - in hoare_post_imp_R) - apply (simp add:split_def liftE_bindE[symmetric]) - apply (wp decode_inv_wf) - apply (clarsimp simp:ct_in_state_def st_tcb_at_def obj_at_def not_idle_thread_def)+ - apply (rule wp_post_tautE) - apply clarsimp - apply wp - apply (simp add:split_def liftE_bindE[symmetric]) - apply (wp | simp add: split_def liftE_bindE[symmetric])+ + apply (simp add: split_def msg_from_syscall_error_simp) + apply (wp | simp add: split_def)+ apply (rule_tac Q="\r s. s = s'a \ evalMonad (lookup_ipc_buffer False (cur_thread s'a)) s'a = Some r \ - cte_wp_at (Not \ is_master_reply_cap) (snd x) s \ - cte_wp_at ((=) (fst x)) (snd x) s \ - real_cte_at (snd x) s \ - s \ fst x \ - ex_cte_cap_wp_to (\_. True) (snd x) s \ - (\r\zobj_refs (fst x). ex_nonz_cap_to r s) \ - (\r\cte_refs (fst x) (interrupt_irq_node s). ex_cte_cap_wp_to \ r s)" + cte_wp_at (Not \ is_master_reply_cap) (snd rv) s \ + cte_wp_at ((=) (fst rv)) (snd rv) s \ + real_cte_at (snd rv) s \ + s \ fst rv \ + ex_cte_cap_wp_to (\_. True) (snd rv) s \ + (\r\zobj_refs (fst rv). ex_nonz_cap_to r s) \ + (\r\cte_refs (fst rv) (interrupt_irq_node s). ex_cte_cap_wp_to \ r s)" in hoare_strengthen_post) apply (wp evalMonad_wp) - apply (simp add: empty_when_fail_lookup_ipc_buffer weak_det_spec_lookup_ipc_buffer)+ + apply (simp add: empty_when_fail_lookup_ipc_buffer weak_det_spec_lookup_ipc_buffer)+ apply wp apply (clarsimp simp: invs_def valid_state_def valid_pspace_def valid_idle_def st_tcb_ex_cap ct_in_state_def pred_tcb_at_def not_idle_thread_def obj_at_def dest!: get_tcb_SomeD) - apply (wp)+ + apply wp+ apply (clarsimp simp:invs_def valid_state_def not_idle_thread_def pred_tcb_at_def obj_at_def) apply simp_all done @@ -1420,6 +1416,7 @@ lemma handle_recv_corres: \ not_idle_thread (cur_thread s) s \ (st_tcb_at active (cur_thread s') s \ invs s \ valid_etcbs s) \ ko_at (TCB obj') (cur_thread s') s " and R= "\r. \" in corres_splitEE[where r'="\x y. x = transform_cap y"]) + apply (simp add: arch_tcb_get_registers_def arch_tcb_context_get_def) apply (rule lookup_cap_corres, simp) apply (simp add: word_bits_def) apply (rule dcorres_expand_pfx) @@ -1448,7 +1445,7 @@ lemma handle_recv_corres: apply clarsimp apply wp+ apply (rule hoare_vcg_conj_liftE_R) - apply (rule hoare_post_imp_R, rule lookup_cap_valid) + apply (rule hoare_strengthen_postE_R, rule lookup_cap_valid) apply (clarsimp simp: valid_cap_def) apply wp+ apply (simp add:injection_handler_def) @@ -1458,7 +1455,7 @@ lemma handle_recv_corres: apply (rule hoare_vcg_E_elim) apply (simp add: lookup_cap_def lookup_slot_for_thread_def split_def) apply wp - apply (rule hoare_post_impErr[OF resolve_address_bits_valid_fault]) + apply (rule hoare_strengthen_postE[OF resolve_address_bits_valid_fault]) apply simp apply simp apply wp @@ -1533,7 +1530,7 @@ lemma handle_vm_fault_wp: apply wp apply (clarsimp simp:do_machine_op_def getDFSR_def) apply wp - apply (case_tac x) + apply (case_tac rv) apply clarsimp apply (rule_tac P="P and (\x. snd (aa,ba) = machine_state x)" in hoare_post_imp) apply (assumption) @@ -1542,7 +1539,7 @@ lemma handle_vm_fault_wp: apply (clarsimp simp:gets_def alternative_def get_def bind_def select_def return_def) apply (clarsimp simp:do_machine_op_def getFAR_def) apply wp - apply (case_tac x) + apply (case_tac rv) apply clarsimp apply (rule_tac P="P and (\x. snd (aa,ba) = machine_state x)" in hoare_post_imp) apply (assumption) @@ -1553,7 +1550,7 @@ lemma handle_vm_fault_wp: apply wp apply (clarsimp simp:do_machine_op_def getIFSR_def) apply wp - apply (case_tac x) + apply (case_tac rv) apply clarsimp apply (rule_tac P="P and (\x. snd (aa,ba) = machine_state x)" in hoare_post_imp) apply (assumption) diff --git a/proof/drefine/Tcb_DR.thy b/proof/drefine/Tcb_DR.thy index cc70e833fe..19cc276775 100644 --- a/proof/drefine/Tcb_DR.thy +++ b/proof/drefine/Tcb_DR.thy @@ -147,40 +147,40 @@ lemma decode_set_space_translate_tcb_invocation: apply (rule_tac P ="croot_cap' = a \ is_cnode_cap a" in hoare_gen_asmE) apply clarsimp apply (rule validE_validE_R) - apply (wp hoare_post_impErr[OF derive_cnode_cap_as_vroot],simp) + apply (wp hoare_strengthen_postE[OF derive_cnode_cap_as_vroot],simp) apply (wp|clarsimp)+ - apply (wp hoare_post_impErr[OF derive_cnode_cap_as_croot],simp) + apply (wp hoare_strengthen_postE[OF derive_cnode_cap_as_croot],simp) apply (wp|clarsimp)+ apply (clarsimp simp:whenE_def | rule conjI | wp)+ apply (rule_tac P ="croot_cap' = update_cap_data False (x! 1) a \ is_cnode_cap croot_cap'" in hoare_gen_asmE) apply (rule validE_validE_R) apply simp - apply (rule_tac s1 = s in hoare_post_impErr[OF derive_cnode_cap_as_vroot],simp) + apply (rule_tac s1 = s in hoare_strengthen_postE[OF derive_cnode_cap_as_vroot],simp) apply (rule conjI|simp split:if_split_asm)+ apply (wp|clarsimp)+ apply (rule validE_validE_R) - apply (rule_tac s1 = s in hoare_post_impErr[OF derive_cnode_cap_as_croot]) + apply (rule_tac s1 = s in hoare_strengthen_postE[OF derive_cnode_cap_as_croot]) apply (wp|clarsimp)+ apply (clarsimp simp:whenE_def | rule conjI | wp)+ apply (rule_tac P ="croot_cap' = a \ is_cnode_cap a" in hoare_gen_asmE) apply clarsimp apply (rule validE_validE_R) - apply (rule_tac s1 = s in hoare_post_impErr[OF derive_cnode_cap_as_vroot],simp) + apply (rule_tac s1 = s in hoare_strengthen_postE[OF derive_cnode_cap_as_vroot],simp) apply (clarsimp split:if_splits simp:valid_vtable_root_update) apply (wp|clarsimp)+ - apply (wp hoare_post_impErr[OF derive_cnode_cap_as_croot],simp) + apply (wp hoare_strengthen_postE[OF derive_cnode_cap_as_croot],simp) apply (wp|clarsimp)+ apply (clarsimp simp: | rule conjI | wp)+ apply (rule_tac P ="croot_cap' = update_cap_data False (x! 1) a \ is_cnode_cap croot_cap'" in hoare_gen_asmE) apply (rule validE_validE_R) apply simp - apply (rule_tac s1 = s in hoare_post_impErr[OF derive_cnode_cap_as_vroot],simp) + apply (rule_tac s1 = s in hoare_strengthen_postE[OF derive_cnode_cap_as_vroot],simp) apply (rule conjI|simp split:if_split_asm)+ apply (rule valid_vtable_root_update) apply clarsimp+ apply (wp|clarsimp)+ apply (rule validE_validE_R) - apply (rule hoare_post_impErr[OF derive_cnode_cap_as_croot]) + apply (rule hoare_strengthen_postE[OF derive_cnode_cap_as_croot]) apply fastforce+ apply (wp|clarsimp)+ done @@ -427,7 +427,9 @@ lemma dcorres_idempotent_as_user: done lemma transform_full_intent_kheap_update_eq: - "\ q \ u' \ \ transform_full_intent (machine_state (s\kheap := kheap s(u' \ x')\)) q = transform_full_intent (machine_state s) q" + "q \ u' \ + transform_full_intent (machine_state (s\kheap := (kheap s)(u' \ x')\)) q = + transform_full_intent (machine_state s) q" by simp (* Suspend functions correspond. *) @@ -566,7 +568,7 @@ lemma restart_corres: apply wp apply (simp add:not_idle_thread_def) apply ((wp|wps)+)[2] - apply (rule_tac Q="(=) s' and invs" in hoare_vcg_precond_imp) + apply (rule_tac Q="(=) s' and invs" in hoare_weaken_pre) apply (rule hoare_strengthen_post [where Q="\r. invs and tcb_at obj_id and not_idle_thread obj_id and valid_etcbs"]) apply (simp add:not_idle_thread_def) @@ -637,7 +639,7 @@ lemma invoke_tcb_corres_write_regs: apply simp apply wp apply (clarsimp simp: when_def) - apply (wpsimp wp: hoare_when_wp)+ + apply (wpsimp wp: when_wp)+ apply (wp wp_post_taut | simp add:invs_def valid_state_def | fastforce)+ done @@ -710,7 +712,7 @@ lemma not_idle_after_suspend [wp]: "\invs and not_idle_thread obj_id' and tcb_at obj_id'\ IpcCancel_A.suspend obj_id' \\rv. valid_idle \" apply (rule hoare_strengthen_post) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule suspend_invs) apply (simp add:not_idle_thread_def invs_def valid_state_def)+ done @@ -965,11 +967,13 @@ lemma TPIDRURW_notin_msg_registers[simp]: done lemma transform_full_intent_update_tpidrurw[simp]: - "transform_full_intent ms ref (tcb\tcb_arch := arch_tcb_context_set (s(TPIDRURW := a)) (tcb_arch tcb)\) - = transform_full_intent ms ref (tcb\tcb_arch := arch_tcb_context_set s (tcb_arch tcb)\)" - apply (clarsimp simp: transform_full_intent_def cap_register_def ARM.capRegister_def) + "transform_full_intent ms ref (tcb\tcb_arch := arch_tcb_set_registers (s(TPIDRURW := a)) (tcb_arch tcb)\) + = transform_full_intent ms ref (tcb\tcb_arch := arch_tcb_set_registers s (tcb_arch tcb)\)" + apply (clarsimp simp: transform_full_intent_def cap_register_def ARM.capRegister_def + arch_tcb_set_registers_def arch_tcb_context_get_def Let_def) by (fastforce simp: get_tcb_message_info_def msg_info_register_def ARM.msgInfoRegister_def - get_tcb_mrs_def get_ipc_buffer_words_def Suc_le_eq Let_def) + get_tcb_mrs_def get_ipc_buffer_words_def Suc_le_eq Let_def + arch_tcb_context_get_def) lemma as_user_valid_irq_node[wp]: "\valid_irq_node\ @@ -985,7 +989,10 @@ lemma set_register_TPIDRURW_tcb_abstract_inv[wp]: "\\cxt. P (transform_tcb ms ref (tcb\tcb_arch := arch_tcb_context_set cxt (tcb_arch tcb)\) etcb)\ setRegister TPIDRURW a \\_ cxt. P (transform_tcb ms ref (tcb\tcb_arch := arch_tcb_context_set cxt (tcb_arch tcb)\) etcb)\" - by (simp add: setRegister_def simpler_modify_def valid_def transform_tcb_def) + supply transform_full_intent_update_tpidrurw[simplified arch_tcb_set_registers_def, simp] + apply (simp add: setRegister_def simpler_modify_def valid_def arch_tcb_context_set_def + transform_tcb_def) + done lemma dcorres_tcb_update_ipc_buffer: "dcorres (dc \ dc) (\) (invs and valid_etcbs and tcb_at obj_id' and not_idle_thread obj_id' @@ -1048,7 +1055,7 @@ lemma dcorres_tcb_update_ipc_buffer: apply wpsimp+ apply (rule validE_validE_R) apply (rule_tac Q = "\r s. invs s \ valid_etcbs s \ not_idle_thread obj_id' s \ tcb_at obj_id' s" - in hoare_post_impErr[where E="\x. \"]) + in hoare_strengthen_postE[where E="\x. \"]) apply (simp add:not_idle_thread_def) apply (wp cap_delete_cte_at cap_delete_deletes) apply (clarsimp simp:invs_def valid_state_def not_idle_thread_def) @@ -1114,9 +1121,8 @@ lemma dcorres_tcb_update_ipc_buffer: apply wp apply wp apply wpsimp - apply (wp hoare_when_wp)+ - apply (rule hoare_strengthen_post[OF hoare_TrueI[where P = \]],clarsimp+) - apply (wp wp_post_taut hoare_drop_imp get_cap_weak_wp)+ + apply (wp when_wp)+ + apply (wpsimp wp: wp_post_taut hoare_drop_imp get_cap_weak_wp simp_del: hoare_TrueI)+ apply (clarsimp simp:conj_comms) apply (wp thread_set_global_refs_triv thread_set_valid_idle) apply (clarsimp simp:tcb_cap_cases_def) @@ -1128,12 +1134,12 @@ lemma dcorres_tcb_update_ipc_buffer: apply (wp thread_set_cte_at update_ipc_buffer_valid_objs thread_set_valid_cap thread_set_cte_wp_at_trivial) apply (fastforce simp:tcb_cap_cases_def) apply (simp add: transform_tcb_slot_4) - apply (rule hoare_post_impErr[OF validE_R_validE[OF hoare_True_E_R]]) + apply (rule hoare_strengthen_postE[OF validE_R_validE[OF hoareE_R_TrueI]]) apply simp+ apply (rule_tac Q = "\r s. invs s \ valid_etcbs s \ not_idle_thread (fst a') s \ tcb_at obj_id' s \ not_idle_thread obj_id' s \ not_idle_thread ab s \ cte_wp_at (\a. True) (ab,ba) s \ cte_wp_at (\c. c = cap.NullCap) (obj_id', tcb_cnode_index 4) s \ is_aligned a msg_align_bits" - in hoare_post_impErr[where E="\x. \"]) + in hoare_strengthen_postE[where E="\x. \"]) apply (simp add:not_idle_thread_def) apply (wp cap_delete_cte_at cap_delete_deletes cap_delete_valid_cap) apply (clarsimp simp:invs_valid_objs invs_mdb invs_valid_idle) @@ -1207,7 +1213,7 @@ lemma dcorres_tcb_update_vspace_root: apply (drule (3) ex_cte_cap_to_not_idle, simp add: not_idle_thread_def) apply (rule corres_trivial) apply clarsimp - apply (wp hoare_when_wp) + apply (wp when_wp) apply (rule hoare_strengthen_post[OF hoare_TrueI[where P =\ ]]) apply clarsimp apply (rule hoare_strengthen_post[OF hoare_TrueI[where P =\ ]]) @@ -1218,7 +1224,7 @@ lemma dcorres_tcb_update_vspace_root: apply (rule_tac Q = "\r s. invs s \ valid_etcbs s \ not_idle_thread ba s \ not_idle_thread (fst a') s \ cte_wp_at (\_. True) (ba, c) s \ cte_wp_at (\c. c = cap.NullCap) (obj_id', tcb_cnode_index (Suc 0)) s" - in hoare_post_impErr[where E="\x. \"]) + in hoare_strengthen_postE[where E="\x. \"]) apply (simp add: not_idle_thread_def) apply (wp cap_delete_cte_at cap_delete_deletes) apply (clarsimp simp: invs_def valid_state_def valid_pspace_def) @@ -1294,7 +1300,7 @@ lemma dcorres_tcb_update_cspace_root: apply (erule valid_cap_aligned) apply (rule corres_trivial) apply (clarsimp) - apply (wp hoare_when_wp) + apply (wp when_wp) apply (rule hoare_strengthen_post[OF hoare_TrueI[where P =\ ]]) apply clarsimp apply wp+ @@ -1311,7 +1317,7 @@ lemma dcorres_tcb_update_cspace_root: not_idle_thread (fst a') s \ cte_wp_at (\_. True) (ba, c) s \ cte_wp_at (\c. c = cap.NullCap) (obj_id', tcb_cnode_index 0) s \ no_cap_to_obj_dr_emp aaa s" - in hoare_post_impErr[where E = "\r. \"]) + in hoare_strengthen_postE[where E = "\r. \"]) apply (simp add:not_idle_thread_def) apply (wp cap_delete_cte_at cap_delete_deletes cap_delete_valid_cap) apply (simp add:invs_valid_objs) @@ -1370,13 +1376,11 @@ done lemma hoare_case_someE: "\P\a\\r s. Q s\,- \ \\s. case x of None \ True | Some y \ P s\ a \\rv s. case x of None \ True | Some y \ Q s\,-" - apply (case_tac x) - apply clarsimp+ -done + by (case_tac x; wpsimp) lemma case_option_wpE: "(\x. \P x\a\\r. Q x\,-) \ \case_option \ P z\a\\r. case_option \ Q z\,-" - by (clarsimp split:option.splits) + by (wpsimp split: option.splits) lemma option_update_thread_not_idle_thread[wp]: "\not_idle_thread x and not_idle_thread a\option_update_thread a b c\\r. not_idle_thread x\" @@ -1577,7 +1581,7 @@ lemma dcorres_thread_control: apply (wp cap_delete_deletes cap_delete_cte_at cap_delete_valid_cap case_option_wpE | simp add: not_idle_thread_def option.split[where P="\x. x"])+ - apply (rule_tac Q'="\_. ?P" in hoare_post_imp_R[rotated]) + apply (rule_tac Q'="\_. ?P" in hoare_strengthen_postE_R[rotated]) apply (clarsimp simp: is_valid_vtable_root_def is_cnode_or_valid_arch_def is_arch_cap_def not_idle_thread_def emptyable_def split: option.splits) diff --git a/proof/drefine/Untyped_DR.thy b/proof/drefine/Untyped_DR.thy index ba477c80bb..78bf4f3fe7 100644 --- a/proof/drefine/Untyped_DR.thy +++ b/proof/drefine/Untyped_DR.thy @@ -65,12 +65,16 @@ next apply (rule someI2_ex, fastforce+)+ done + (* FIXME: For some reason Nondet_In_Monad.in_fail doesn't fire below. This version would probably + have been better in the first place. *) + have [simp]: "\s. fst (fail s) = {}" by (simp add: fail_def) + have loadWord_const: "\a s. \x\fst (loadWord a s). snd x = s" apply (case_tac "is_aligned a 2") apply (simp add: loadWord_def is_aligned_mask exec_gets) apply (simp add: return_def) - apply (simp add: loadWord_def exec_gets fail_def is_aligned_mask) + apply (simp add: loadWord_def exec_gets is_aligned_mask) done have loadWord_atMostOneResult: @@ -78,7 +82,7 @@ next apply (case_tac "is_aligned a 2") apply (simp add: loadWord_def is_aligned_mask exec_gets) apply (simp add: return_def) - apply (simp add: loadWord_def exec_gets fail_def is_aligned_mask) + apply (simp add: loadWord_def exec_gets is_aligned_mask) done have mapM_loadWord_atMostOneResult[rule_format]: @@ -648,6 +652,7 @@ lemma clearMemory_unused_corres_noop: (return ()) (do_machine_op (clearMemory p (2 ^ (obj_bits_api ty us))))" (is "\ ?def; ?szv; ?in \ \ dcorres dc \ ?P ?f ?g") + supply empty_fail_cond[simp] apply (drule page_objects_default_object[where us=us and dev = dev], clarsimp) apply (rename_tac pgsz) apply (simp add: clearMemory_def do_machine_op_bind cleanCacheRange_PoC_def @@ -731,7 +736,7 @@ lemma init_arch_objects_corres_noop: done lemma monad_commute_set_cap_cdt: - "monad_commute \ (KHeap_D.set_cap ptr cap) (modify (\s. s\cdl_cdt := cdl_cdt s(ptr2 \ ptr3)\))" + "monad_commute \ (KHeap_D.set_cap ptr cap) (modify (\s. s\cdl_cdt := (cdl_cdt s)(ptr2 \ ptr3)\))" apply (clarsimp simp:monad_commute_def) apply (rule sym) apply (subst bind_assoc[symmetric]) @@ -860,7 +865,7 @@ lemma create_cap_mdb_cte_at: \ cte_wp_at ((\)cap.NullCap) parent s \ cte_at (fst tup) s\ create_cap type sz parent dev tup \\rv s. mdb_cte_at (swp (cte_wp_at ((\)cap.NullCap)) s) (cdt s)\" apply (simp add: create_cap_def split_def mdb_cte_at_def) - apply (wp hoare_vcg_all_lift set_cap_default_not_none set_cdt_cte_wp_at static_imp_wp dxo_wp_weak + apply (wp hoare_vcg_all_lift set_cap_default_not_none set_cdt_cte_wp_at hoare_weak_lift_imp dxo_wp_weak | simp | wps)+ apply (fastforce simp: cte_wp_at_caps_of_state) done @@ -1041,7 +1046,7 @@ lemma create_caps_loop_dcorres: done crunch valid_idle[wp]: init_arch_objects "valid_idle" - (wp: crunch_wps hoare_unless_wp ignore: clearMemory) + (wp: crunch_wps unless_wp ignore: clearMemory) lemma update_available_range_dcorres: "dcorres dc \ ( K(\idx. untyped_cap = transform_cap (cap.UntypedCap dev ptr sz idx) @@ -1207,15 +1212,6 @@ lemma mapME_x_upt_length_ys: by (metis mapME_x_map_simp[where f="\_. ()" and m="\_. m" for m, unfolded o_def] map_replicate_const length_upt minus_nat.diff_0) -lemma monadic_set_cap_id: - "monadic_rewrite False True - (cte_wp_at ((=) cap) p) - (set_cap cap p) (return ())" - by (clarsimp simp: monadic_rewrite_def set_cap_id return_def) - -lemmas monadic_set_cap_id2 - = monadic_rewrite_transverse[OF monadic_set_cap_id monadic_rewrite_refl] - (* FIXME: move *) lemma mapME_x_append: "mapME_x f (xs @ ys) = (doE mapME_x f xs; mapME_x f ys odE)" @@ -1564,9 +1560,9 @@ lemma invoke_untyped_corres: Q="\_. valid_etcbs and invs and valid_untyped_inv_wcap untyped_invocation (Some (cap.UntypedCap dev ptr' sz (if reset then 0 else idx))) and ct_active and (\s. reset \ pspace_no_overlap {ptr' .. ptr' + 2 ^ sz - 1} s)" - in hoare_post_impErr) - apply (wp hoare_whenE_wp) - apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc) + in hoare_strengthen_postE) + apply (wp whenE_wp) + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc) apply (clarsimp simp only: if_True simp_thms ptrs, intro conjI, assumption+) apply simp apply (clarsimp simp only: ui ptrs) @@ -1662,9 +1658,9 @@ end lemma mapME_x_inv_wp2: "(\x. \P and E\ f x \\rv. P and E\,\\rv. E\) \ \P and E\ mapME_x f xs \\rv. P\,\\rv. E\" - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule mapME_x_inv_wp[where E="\_. E"]) - apply (rule hoare_post_impErr, assumption) + apply (rule hoare_strengthen_postE, assumption) apply simp_all done @@ -1843,17 +1839,17 @@ lemma decode_untyped_corres: apply simp apply clarsimp apply (rule hoare_pre) - apply (wp hoare_drop_imp | simp)+ + apply wpsimp apply fastforce apply (clarsimp simp: conj_comms is_cnode_cap_transform_cap split del: if_split) apply (rule validE_R_validE) apply (rule_tac Q' = "\a s. invs s \ valid_etcbs s \ valid_cap a s \ cte_wp_at ((=) (cap.UntypedCap dev ptr sz idx)) slot' s \ (Structures_A.is_cnode_cap a \ not_idle_thread (obj_ref_of a) s)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (rule hoare_pre) apply (wp get_cap_wp) apply (rule_tac Q' = "\a s. invs s \ valid_etcbs s \ cte_wp_at ((=) (cap.UntypedCap dev ptr sz idx)) slot' s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: cte_wp_at_caps_of_state) apply (frule_tac p = "(x,y)" for x y in caps_of_state_valid[rotated]) diff --git a/proof/infoflow/ADT_IF.thy b/proof/infoflow/ADT_IF.thy index fe421f508b..7df8515f10 100644 --- a/proof/infoflow/ADT_IF.thy +++ b/proof/infoflow/ADT_IF.thy @@ -787,7 +787,7 @@ lemma kernel_entry_if_invs: kernel_entry_if e tc \\_. invs\" unfolding kernel_entry_if_def - by (wpsimp wp: thread_set_invs_trivial static_imp_wp + by (wpsimp wp: thread_set_invs_trivial hoare_weak_lift_imp simp: arch_tcb_update_aux2 ran_tcb_cap_cases)+ lemma kernel_entry_if_globals_equiv: @@ -796,7 +796,7 @@ lemma kernel_entry_if_globals_equiv: kernel_entry_if e tc \\_. globals_equiv st\" apply (simp add: kernel_entry_if_def) - apply (wp static_imp_wp handle_event_globals_equiv + apply (wp hoare_weak_lift_imp handle_event_globals_equiv thread_set_invs_trivial thread_set_context_globals_equiv | simp add: ran_tcb_cap_cases arch_tcb_update_aux2)+ apply (clarsimp simp: cur_thread_idle) @@ -831,7 +831,7 @@ lemma kernel_entry_silc_inv[wp]: \\_. silc_inv aag st\" unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases arch_tcb_update_aux2 - wp: static_imp_wp handle_event_silc_inv thread_set_silc_inv thread_set_invs_trivial + wp: hoare_weak_lift_imp handle_event_silc_inv thread_set_silc_inv thread_set_invs_trivial thread_set_not_state_valid_sched thread_set_pas_refined | wp (once) hoare_vcg_imp_lift | force)+ @@ -1016,7 +1016,7 @@ lemma kernel_entry_pas_refined[wp]: \\_. pas_refined aag\" unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases schact_is_rct_def arch_tcb_update_aux2 - wp: static_imp_wp handle_event_pas_refined thread_set_pas_refined + wp: hoare_weak_lift_imp handle_event_pas_refined thread_set_pas_refined guarded_pas_domain_lift thread_set_invs_trivial thread_set_not_state_valid_sched | force)+ @@ -1026,7 +1026,7 @@ lemma kernel_entry_if_domain_sep_inv: \\_. domain_sep_inv irqs st\" unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases arch_tcb_update_aux2 - wp: handle_event_domain_sep_inv static_imp_wp + wp: handle_event_domain_sep_inv hoare_weak_lift_imp thread_set_invs_trivial thread_set_not_state_valid_sched)+ lemma kernel_entry_if_valid_sched: @@ -1037,7 +1037,7 @@ lemma kernel_entry_if_valid_sched: by (wpsimp simp: kernel_entry_if_def ran_tcb_cap_cases arch_tcb_update_aux2 wp: handle_event_valid_sched thread_set_invs_trivial hoare_vcg_disj_lift thread_set_no_change_tcb_state ct_in_state_thread_state_lift - thread_set_not_state_valid_sched static_imp_wp)+ + thread_set_not_state_valid_sched hoare_weak_lift_imp)+ lemma kernel_entry_if_irq_masks: "\(\s. P (irq_masks_of_state s)) and domain_sep_inv False st and invs\ @@ -1665,7 +1665,7 @@ lemma schedule_if_globals_equiv_scheduler[wp]: \\_. globals_equiv_scheduler st\" apply (simp add: schedule_if_def) apply wp - apply (wp globals_equiv_scheduler_inv'[where P="invs"] activate_thread_globals_equiv) + apply (wpsimp wp: globals_equiv_scheduler_inv'[where P="invs"] activate_thread_globals_equiv) apply (simp add: invs_arch_state invs_valid_idle) apply (wp | simp)+ done @@ -2390,7 +2390,7 @@ lemma preemption_point_irq_state_inv'[wp]: lemma validE_validE_E': "\P\ f \Q\, \E\ \ \P\ f -, \E\" apply (rule validE_validE_E) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply assumption apply simp+ done @@ -2473,7 +2473,7 @@ lemma rec_del_irq_state_inv: "\irq_state_inv st and domain_sep_inv False sta and K (irq_is_recurring irq st)\ rec_del call \\_. irq_state_inv st\, \\_. irq_state_next st\" - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule use_spec) apply (rule rec_del_irq_state_inv') apply auto @@ -2505,7 +2505,7 @@ proof(induct rule: cap_revoke.induct[where ?a1.0=s]) apply (wp drop_spec_validE[OF preemption_point_irq_state_inv[simplified validE_R_def]] drop_spec_validE[OF preemption_point_irq_state_inv'[where irq=irq]] drop_spec_validE[OF valid_validE[OF preemption_point_domain_sep_inv]] - cap_delete_domain_sep_inv cap_delete_irq_state_inv select_wp + cap_delete_domain_sep_inv cap_delete_irq_state_inv drop_spec_validE[OF assertE_wp] drop_spec_validE[OF returnOk_wp] drop_spec_validE[OF liftE_wp] drop_spec_validE[OF hoare_vcg_conj_liftE1] | simp | wp (once) hoare_drop_imps)+ @@ -2602,7 +2602,7 @@ lemma invoke_untyped_irq_state_inv: \\_. irq_state_inv st\, \\_. irq_state_next st\" apply (cases ui, simp add: invoke_untyped_def mapM_x_def[symmetric]) apply (rule hoare_pre) - apply (wp mapM_x_wp' hoare_whenE_wp reset_untyped_cap_irq_state_inv[where irq=irq] + apply (wp mapM_x_wp' whenE_wp reset_untyped_cap_irq_state_inv[where irq=irq] | rule irq_state_inv_triv | simp)+ done @@ -2618,8 +2618,7 @@ lemma perform_invocation_irq_state_inv: invoke_tcb_irq_state_inv invoke_cnode_irq_state_inv[simplified validE_R_def] | clarsimp | simp add: invoke_domain_def)+\)?) apply wp - apply (wp irq_state_inv_triv' invoke_irq_control_irq_masks) - apply clarsimp + apply (wpsimp wp: irq_state_inv_triv' invoke_irq_control_irq_masks) apply assumption apply auto[1] apply wp @@ -2643,7 +2642,7 @@ lemma handle_invocation_irq_state_inv: split del: if_split) apply (wp syscall_valid) apply ((wp irq_state_inv_triv | wpc | simp)+)[2] - apply (wp static_imp_wp perform_invocation_irq_state_inv hoare_vcg_all_lift + apply (wp hoare_weak_lift_imp perform_invocation_irq_state_inv hoare_vcg_all_lift hoare_vcg_ex_lift decode_invocation_IRQHandlerCap | wpc | wp (once) hoare_drop_imps @@ -2731,7 +2730,7 @@ lemma kernel_entry_if_next_irq_state_of_state: \ next_irq_state_of_state b = next_irq_state_of_state i_s" apply (simp add: kernel_entry_if_def in_bind in_return | elim conjE exE)+ apply (erule use_validE_R) - apply (rule_tac Q'="\_. irq_state_inv i_s" in hoare_post_imp_R) + apply (rule_tac Q'="\_. irq_state_inv i_s" in hoare_strengthen_postE_R) apply (rule validE_validE_R') apply (rule handle_event_irq_state_inv[where sta=st and irq=irq] | simp)+ apply (clarsimp simp: irq_state_inv_def) @@ -2752,7 +2751,7 @@ lemma kernel_entry_if_next_irq_state_of_state_next: apply (simp add: kernel_entry_if_def in_bind in_return | elim conjE exE)+ apply (erule use_validE_E) apply (rule validE_validE_E) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule handle_event_irq_state_inv[where sta=st and irq=irq and st=i_s]) apply simp+ apply (simp add: irq_state_next_def) diff --git a/proof/infoflow/ARM/ArchADT_IF.thy b/proof/infoflow/ARM/ArchADT_IF.thy index 152d537534..b99baef57a 100644 --- a/proof/infoflow/ARM/ArchADT_IF.thy +++ b/proof/infoflow/ARM/ArchADT_IF.thy @@ -67,7 +67,7 @@ lemma do_user_op_if_invs[ADT_IF_assms]: do_user_op_if f tc \\_. invs and ct_running\" apply (simp add: do_user_op_if_def split_def) - apply (wp do_machine_op_ct_in_state select_wp device_update_invs | wp (once) dmo_invs | simp)+ + apply (wp do_machine_op_ct_in_state device_update_invs | wp (once) dmo_invs | simp)+ apply (clarsimp simp: user_mem_def user_memory_update_def simpler_modify_def restrict_map_def invs_def cur_tcb_def ptable_rights_s_def ptable_lift_s_def) apply (frule ptable_rights_imp_frame) @@ -77,31 +77,31 @@ lemma do_user_op_if_invs[ADT_IF_assms]: done crunch domain_sep_inv[ADT_IF_assms, wp]: do_user_op_if "domain_sep_inv irqs st" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch valid_sched[ADT_IF_assms, wp]: do_user_op_if "valid_sched" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch irq_masks[ADT_IF_assms, wp]: do_user_op_if "\s. P (irq_masks_of_state s)" - (ignore: user_memory_update wp: select_wp dmo_wp no_irq) + (ignore: user_memory_update wp: dmo_wp no_irq) crunch valid_list[ADT_IF_assms, wp]: do_user_op_if "valid_list" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) lemma do_user_op_if_scheduler_action[ADT_IF_assms, wp]: "do_user_op_if f tc \\s. P (scheduler_action s)\" - by (simp add: do_user_op_if_def | wp select_wp | wpc)+ + by (simp add: do_user_op_if_def | wp | wpc)+ lemma do_user_op_silc_inv[ADT_IF_assms, wp]: "do_user_op_if f tc \silc_inv aag st\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_pas_refined[ADT_IF_assms, wp]: "do_user_op_if f tc \pas_refined aag\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done crunches do_user_op_if @@ -109,7 +109,7 @@ crunches do_user_op_if and cur_domain[ADT_IF_assms, wp]: "\s. P (cur_domain s)" and idle_thread[ADT_IF_assms, wp]: "\s. P (idle_thread s)" and domain_fields[ADT_IF_assms, wp]: "domain_fields P" - (wp: select_wp ignore: user_memory_update) + (ignore: user_memory_update) lemma do_use_op_guarded_pas_domain[ADT_IF_assms, wp]: "do_user_op_if f tc \guarded_pas_domain aag\" @@ -235,7 +235,7 @@ lemma do_user_op_if_idle_equiv[ADT_IF_assms, wp]: do_user_op_if uop tc \\_. idle_equiv st\" unfolding do_user_op_if_def - by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv select_wp) + by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv) lemma not_in_global_refs_vs_lookup: "\ (\\p) s; valid_vs_lookup s; valid_global_refs s; valid_arch_state s; valid_global_objs s \ @@ -254,7 +254,7 @@ lemma kernel_entry_if_valid_pdpt_objs[wp]: apply (simp add: kernel_entry_if_def) apply (wp | wpc | simp add: kernel_entry_if_def)+ apply (wpsimp simp: ran_tcb_cap_cases arch_tcb_update_aux2 - wp: static_imp_wp thread_set_invs_trivial)+ + wp: hoare_weak_lift_imp thread_set_invs_trivial)+ done lemma kernel_entry_if_valid_vspace_objs_if[ADT_IF_assms, wp]: @@ -273,7 +273,7 @@ lemma schedule_if_valid_pdpt_objs[ADT_IF_assms, wp]: lemma do_user_op_if_valid_pdpt_objs[ADT_IF_assms, wp]: "\valid_vspace_objs_if\ do_user_op_if a b \\rv s. valid_vspace_objs_if s\" - by (simp add: do_user_op_if_def | wp select_wp | wpc)+ + by (simp add: do_user_op_if_def | wp | wpc)+ lemma valid_vspace_objs_if_ms_update[ADT_IF_assms, simp]: "valid_vspace_objs_if (machine_state_update f s) = valid_vspace_objs_if s" @@ -282,20 +282,20 @@ lemma valid_vspace_objs_if_ms_update[ADT_IF_assms, simp]: lemma do_user_op_if_irq_state_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_state_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_masks_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_masks_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_measure_if[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_measure_if s)\" apply (rule hoare_pre) apply (simp add: do_user_op_if_def user_memory_update_def irq_measure_if_def - | wps |wp dmo_wp select_wp | wpc)+ + | wps |wp dmo_wp | wpc)+ done lemma invoke_tcb_irq_state_inv[ADT_IF_assms]: @@ -313,7 +313,7 @@ lemma invoke_tcb_irq_state_inv[ADT_IF_assms]: apply ((wp irq_state_inv_triv | simp)+)[2] (* just ThreadControl left *) apply (simp add: split_def cong: option.case_cong) - by (wp hoare_vcg_all_lift_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R + by (wp hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R checked_cap_insert_domain_sep_inv cap_delete_deletes cap_delete_irq_state_inv[where st=st and sta=sta and irq=irq] cap_delete_irq_state_next[where st=st and sta=sta and irq=irq] diff --git a/proof/infoflow/ARM/ArchArch_IF.thy b/proof/infoflow/ARM/ArchArch_IF.thy index 2faa1f5882..c90ad957b9 100644 --- a/proof/infoflow/ARM/ArchArch_IF.thy +++ b/proof/infoflow/ARM/ArchArch_IF.thy @@ -62,7 +62,7 @@ crunches set_irq_state, arch_post_cap_deletion, handle_arch_fault_reply crunch irq_state_of_state[Arch_IF_assms, wp]: arch_switch_to_idle_thread, arch_switch_to_thread "\s :: det_state. P (irq_state_of_state s)" - (wp: dmo_wp modify_wp crunch_wps hoare_whenE_wp + (wp: dmo_wp modify_wp crunch_wps whenE_wp simp: invalidateLocalTLB_ASID_def setHardwareASID_def set_current_pd_def machine_op_lift_def machine_rest_lift_def crunch_simps storeWord_def dsb_def isb_def writeTTBR0_def) @@ -76,7 +76,7 @@ crunch irq_state_of_state[wp]: arch_perform_invocation "\s. P (irq_state crunch irq_state_of_state[Arch_IF_assms, wp]: arch_finalise_cap, prepare_thread_delete "\s :: det_state. P (irq_state_of_state s)" - (wp: select_wp modify_wp crunch_wps dmo_wp + (wp: modify_wp crunch_wps dmo_wp simp: crunch_simps invalidateLocalTLB_ASID_def dsb_def cleanCaches_PoU_def invalidate_I_PoU_def clean_D_PoU_def) @@ -398,9 +398,10 @@ lemma find_pd_for_asid_assert_reads_respects: unfolding find_pd_for_asid_assert_def apply (wpsimp wp: get_pde_rev find_pd_for_asid_reads_respects hoare_vcg_all_lift) apply (rule_tac Q'="\rv s. is_subject aag (lookup_pd_slot rv 0 && ~~ mask pd_bits)" - in hoare_post_imp_R) - apply (rule find_pd_for_asid_pd_slot_authorised) - apply (subgoal_tac "lookup_pd_slot r 0 = r") + in hoare_strengthen_postE_R) + apply (rule find_pd_for_asid_pd_slot_authorised) + apply (rename_tac rv s) + apply (subgoal_tac "lookup_pd_slot rv 0 = rv") apply fastforce apply (simp add: lookup_pd_slot_def) apply fastforce @@ -410,7 +411,7 @@ lemma modify_arm_hwasid_table_reads_respects: "reads_respects aag l \ (modify (\s. s\arch_state := arch_state s\arm_hwasid_table := param\\))" apply (simp add: equiv_valid_def2) apply (rule modify_ev2) - (* FIXME: slow 5s *) + (* slow 5s *) by (auto simp: reads_equiv_def affects_equiv_def states_equiv_for_def equiv_for_def intro: equiv_asids_triv' split: if_splits) @@ -419,7 +420,7 @@ lemma modify_arm_asid_map_reads_respects: "reads_respects aag l \ (modify (\s. s\arch_state := arch_state s\arm_asid_map := param\\))" apply (simp add: equiv_valid_def2) apply (rule modify_ev2) - (* FIXME: slow 5s *) + (* slow 5s *) by (auto simp: reads_equiv_def affects_equiv_def states_equiv_for_def equiv_for_def intro: equiv_asids_triv' split: if_splits) @@ -427,7 +428,7 @@ lemma modify_arm_next_asid_reads_respects: "reads_respects aag l \ (modify (\s. s\arch_state := arch_state s\arm_next_asid := param\\))" apply (simp add: equiv_valid_def2) apply (rule modify_ev2) - (* FIXME: slow 5s *) + (* slow 5s *) by (auto simp: reads_equiv_def affects_equiv_def states_equiv_for_def equiv_for_def intro: equiv_asids_triv' split: if_splits) @@ -567,7 +568,7 @@ lemma set_vm_root_states_equiv_for[wp]: "set_vm_root thread \states_equiv_for P Q R S st\" unfolding set_vm_root_def catch_def fun_app_def set_current_pd_def isb_def dsb_def writeTTBR0_def by (wpsimp wp: arm_context_switch_states_equiv_for do_machine_op_mol_states_equiv_for - hoare_vcg_all_lift hoare_whenE_wp hoare_drop_imps + hoare_vcg_all_lift whenE_wp hoare_drop_imps simp: dmo_bind_valid if_apply_def2)+ lemma set_vm_root_reads_respects: @@ -624,7 +625,7 @@ lemma unmap_page_table_reads_respects: (unmap_page_table asid vaddr pt)" unfolding unmap_page_table_def fun_app_def page_table_mapped_def by (wp dmo_mol_reads_respects store_pde_reads_respects get_pde_rev - flush_table_reads_respects find_pd_for_asid_reads_respects hoare_vcg_all_lift_R + flush_table_reads_respects find_pd_for_asid_reads_respects hoare_vcg_all_liftE_R | wpc | simp add: cleanByVA_PoU_def | wp (once) hoare_drop_imps)+ lemma perform_page_table_invocation_reads_respects: @@ -764,9 +765,9 @@ lemma perform_page_invocation_reads_respects: lemma equiv_asids_arm_asid_table_update: "\ equiv_asids R s t; kheap s pool_ptr = kheap t pool_ptr \ \ equiv_asids R - (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s) + (s\arch_state := arch_state s\arm_asid_table := (asid_table s) (asid_high_bits_of asid \ pool_ptr)\\) - (t\arch_state := arch_state t\arm_asid_table := arm_asid_table (arch_state t) + (t\arch_state := arch_state t\arm_asid_table := (asid_table t) (asid_high_bits_of asid \ pool_ptr)\\)" by (clarsimp simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap) @@ -1277,6 +1278,7 @@ lemma do_flush_globals_equiv: apply (cases "typ") by (wp dmo_cacheRangeOp_lift | simp add: do_flush_def cache_machine_op_defs do_flush_defs do_machine_op_bind when_def + empty_fail_cond | clarsimp | rule conjI)+ lemma perform_page_directory_invocation_globals_equiv: @@ -1350,7 +1352,7 @@ lemma unmap_page_globals_equiv: \ (\xa\set [0 , 4 .e. 0x3C]. xa + lookup_pd_slot x vptr && ~~ mask pd_bits \ arm_global_pd (arch_state sa)))" - and E="\_. globals_equiv st" in hoare_post_impErr) + and E="\_. globals_equiv st" in hoare_strengthen_postE) apply (wp find_pd_for_asid_not_arm_global_pd_large_page) apply simp apply simp @@ -1426,8 +1428,8 @@ lemma set_mrs_globals_equiv: apply (clarsimp) apply (insert length_msg_lt_msg_max) apply (simp) - apply (wp set_object_globals_equiv static_imp_wp) - apply (wp hoare_vcg_all_lift set_object_globals_equiv static_imp_wp)+ + apply (wp set_object_globals_equiv hoare_weak_lift_imp) + apply (wp hoare_vcg_all_lift set_object_globals_equiv hoare_weak_lift_imp)+ apply (clarsimp simp:arm_global_pd_not_tcb)+ done @@ -1442,7 +1444,7 @@ lemma perform_page_invocation_globals_equiv: apply (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift dmo_cacheRangeOp_lift mapM_swp_store_pde_globals_equiv mapM_x_swp_store_pte_globals_equiv mapM_x_swp_store_pde_globals_equiv set_cap_globals_equiv'' - unmap_page_globals_equiv store_pte_globals_equiv store_pde_globals_equiv static_imp_wp + unmap_page_globals_equiv store_pte_globals_equiv store_pde_globals_equiv hoare_weak_lift_imp do_flush_globals_equiv set_mrs_globals_equiv set_message_info_globals_equiv | wpc | simp add: do_machine_op_bind cleanByVA_PoU_def)+ by (auto simp: cte_wp_parent_not_global_pd authorised_for_globals_page_inv_def valid_page_inv_def @@ -1477,7 +1479,7 @@ lemma perform_asid_control_invocation_globals_equiv: max_index_upd_invs_simple set_cap_no_overlap set_cap_caps_no_overlap max_index_upd_caps_overlap_reserved region_in_kernel_window_preserved - hoare_vcg_all_lift get_cap_wp static_imp_wp + hoare_vcg_all_lift get_cap_wp hoare_weak_lift_imp set_cap_idx_up_aligned_area[where dev = False,simplified] | simp)+ (* factor out the implication -- we know what the relevant components of the diff --git a/proof/infoflow/ARM/ArchDecode_IF.thy b/proof/infoflow/ARM/ArchDecode_IF.thy index 20a5feef8d..4a257da281 100644 --- a/proof/infoflow/ARM/ArchDecode_IF.thy +++ b/proof/infoflow/ARM/ArchDecode_IF.thy @@ -188,7 +188,7 @@ lemma lookup_pt_slot_no_fail_is_subject: lemma arch_decode_invocation_reads_respects_f[Decode_IF_assms]: notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] - notes hoare_whenE_wps[wp_split del] + notes whenE_wps[wp_split del] shows "reads_respects_f aag l (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot @@ -204,7 +204,7 @@ lemma arch_decode_invocation_reads_respects_f[Decode_IF_assms]: apply (wp check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF ensure_safe_mapping_reads_respects] reads_respects_f_inv'[OF resolve_vaddr_reads_respects] reads_respects_f_inv'[OF create_mapping_entries_rev] diff --git a/proof/infoflow/ARM/ArchFinalCaps.thy b/proof/infoflow/ARM/ArchFinalCaps.thy index 261ac95d0c..13a036dc8a 100644 --- a/proof/infoflow/ARM/ArchFinalCaps.thy +++ b/proof/infoflow/ARM/ArchFinalCaps.thy @@ -181,7 +181,7 @@ lemma perform_page_invocation_silc_inv: apply (wp mapM_wp[OF _ subset_refl] set_cap_silc_inv mapM_x_wp[OF _ subset_refl] perform_page_table_invocation_silc_inv_get_cap_helper'[where st=st] - hoare_vcg_all_lift hoare_vcg_if_lift static_imp_wp + hoare_vcg_all_lift hoare_vcg_if_lift hoare_weak_lift_imp | wpc | simp only: swp_def o_def fun_app_def K_def | wp (once) hoare_drop_imps)+ @@ -212,7 +212,7 @@ lemma perform_asid_control_invocation_silc_inv: apply (rule hoare_pre) apply (wp modify_wp cap_insert_silc_inv' retype_region_silc_inv[where sz=pageBits] set_cap_silc_inv get_cap_slots_holding_overlapping_caps[where st=st] - delete_objects_silc_inv static_imp_wp + delete_objects_silc_inv hoare_weak_lift_imp | wpc | simp )+ apply (clarsimp simp: authorised_asid_control_inv_def silc_inv_def valid_aci_def ptr_range_def page_bits_def) apply (rule conjI) @@ -275,15 +275,15 @@ lemma arch_invoke_irq_control_silc_inv[FinalCaps_assms]: done lemma invoke_tcb_silc_inv[FinalCaps_assms]: - notes static_imp_wp [wp] - static_imp_conj_wp [wp] + notes hoare_weak_lift_imp [wp] + hoare_weak_lift_imp_conj [wp] shows "\silc_inv aag st and einvs and simple_sched_action and pas_refined aag and tcb_inv_wf tinv and K (authorised_tcb_inv aag tinv)\ invoke_tcb tinv \\_. silc_inv aag st\" apply (case_tac tinv) apply ((wp restart_silc_inv hoare_vcg_if_lift suspend_silc_inv mapM_x_wp[OF _ subset_refl] - static_imp_wp + hoare_weak_lift_imp | wpc | simp split del: if_split add: authorised_tcb_inv_def check_cap_at_def | clarsimp @@ -300,7 +300,7 @@ lemma invoke_tcb_silc_inv[FinalCaps_assms]: apply (simp add: split_def cong: option.case_cong) (* slow, ~2 mins *) apply (simp only: conj_ac cong: conj_cong imp_cong | - wp checked_insert_pas_refined checked_cap_insert_silc_inv hoare_vcg_all_lift_R + wp checked_insert_pas_refined checked_cap_insert_silc_inv hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R cap_delete_silc_inv_not_transferable cap_delete_pas_refined' cap_delete_deletes diff --git a/proof/infoflow/ARM/ArchIRQMasks_IF.thy b/proof/infoflow/ARM/ArchIRQMasks_IF.thy index d63a5b7392..3d48530a78 100644 --- a/proof/infoflow/ARM/ArchIRQMasks_IF.thy +++ b/proof/infoflow/ARM/ArchIRQMasks_IF.thy @@ -24,9 +24,6 @@ lemma delete_objects_irq_masks[IRQMasks_IF_assms, wp]: apply (wp dmo_wp no_irq_mapM_x no_irq | simp add: freeMemory_def no_irq_storeWord)+ done -crunch irq_masks[wp]: cleanCacheRange_PoU "\s. P (irq_masks s)" - (ignore_del: cleanCacheRange_PoU) - crunch irq_masks[IRQMasks_IF_assms, wp]: invoke_untyped "\s. P (irq_masks_of_state s)" (ignore: delete_objects wp: crunch_wps dmo_wp wp: mapME_x_inv_wp preemption_point_inv @@ -34,7 +31,7 @@ crunch irq_masks[IRQMasks_IF_assms, wp]: invoke_untyped "\s. P (irq_mask mapM_x_def_bak unless_def) crunch irq_masks[IRQMasks_IF_assms, wp]: finalise_cap "\s. P (irq_masks_of_state s)" - (wp: select_wp crunch_wps dmo_wp no_irq + (wp: crunch_wps dmo_wp no_irq simp: crunch_simps no_irq_setHardwareASID no_irq_invalidateLocalTLB_ASID no_irq_set_current_pd no_irq_invalidateLocalTLB_VAASID no_irq_cleanByVA_PoU) @@ -83,14 +80,14 @@ lemma dmo_getActiveIRQ_return_axiom[IRQMasks_IF_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre, rule dmo_wp) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done crunch irq_masks[IRQMasks_IF_assms, wp]: activate_thread "\s. P (irq_masks_of_state s)" crunch irq_masks[IRQMasks_IF_assms, wp]: schedule "\s. P (irq_masks_of_state s)" - (wp: dmo_wp alternative_wp select_wp crunch_wps simp: crunch_simps clearExMonitor_def) + (wp: dmo_wp crunch_wps simp: crunch_simps clearExMonitor_def) end @@ -132,23 +129,23 @@ lemma invoke_tcb_irq_masks[IRQMasks_IF_assms]: (* just ThreadControl left *) apply (simp add: split_def cong: option.case_cong) apply wpsimp+ - apply (rule hoare_post_impErr[OF cap_delete_irq_masks[where P=P]]) + apply (rule hoare_strengthen_postE[OF cap_delete_irq_masks[where P=P]]) apply blast apply blast - apply (wpsimp wp: hoare_vcg_all_lift_R hoare_vcg_const_imp_lift_R + apply (wpsimp wp: hoare_vcg_all_liftE_R hoare_vcg_const_imp_lift_R hoare_vcg_all_lift hoare_drop_imps checked_cap_insert_domain_sep_inv)+ apply (rule_tac Q="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" - and E="\_ s. P (irq_masks_of_state s)" in hoare_post_impErr) + and E="\_ s. P (irq_masks_of_state s)" in hoare_strengthen_postE) apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ apply (rule_tac Q="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" - and E="\_ s. P (irq_masks_of_state s)" in hoare_post_impErr) + and E="\_ s. P (irq_masks_of_state s)" in hoare_strengthen_postE) apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (simp add: option_update_thread_def | wp static_imp_wp hoare_vcg_all_lift | wpc)+ + apply (simp add: option_update_thread_def | wp hoare_weak_lift_imp hoare_vcg_all_lift | wpc)+ by fastforce+ end diff --git a/proof/infoflow/ARM/ArchIpc_IF.thy b/proof/infoflow/ARM/ArchIpc_IF.thy index 1f1c607746..97e6ad953f 100644 --- a/proof/infoflow/ARM/ArchIpc_IF.thy +++ b/proof/infoflow/ARM/ArchIpc_IF.thy @@ -388,7 +388,7 @@ lemma set_mrs_equiv_but_for_labels[Ipc_IF_assms]: pasObjectAbs aag x \ L) | _ \ True))" in hoare_strengthen_post) apply (wp mapM_x_wp' store_word_offs_equiv_but_for_labels | simp add: split_def)+ - apply (case_tac xa, clarsimp split: if_split_asm elim!: in_set_zipE) + apply (case_tac x, clarsimp split: if_split_asm elim!: in_set_zipE) apply (clarsimp simp: for_each_byte_of_word_def) apply (erule bspec) apply (clarsimp simp: ptr_range_def) @@ -420,7 +420,7 @@ lemma set_mrs_equiv_but_for_labels[Ipc_IF_assms]: apply (simp add: word_size_def) apply (erule is_aligned_no_overflow') apply simp - apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift static_imp_wp | simp)+ + apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift hoare_weak_lift_imp | simp)+ apply (fastforce dest: get_tcb_not_asid_pool_at)+ done diff --git a/proof/infoflow/ARM/ArchNoninterference.thy b/proof/infoflow/ARM/ArchNoninterference.thy index e4dfcc2a17..b9958a3093 100644 --- a/proof/infoflow/ARM/ArchNoninterference.thy +++ b/proof/infoflow/ARM/ArchNoninterference.thy @@ -23,9 +23,10 @@ lemma do_user_op_if_integrity[Noninterference_assms]: \\_. integrity aag X st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_respects_Write dmo_device_update_respects_Write - hoare_vcg_all_lift hoare_vcg_imp_lift) + hoare_vcg_all_lift hoare_vcg_imp_lift + wp_del: select_wp) apply (rule hoare_pre_cont) - apply (wp select_wp | wpc | clarsimp)+ + apply (wp | wpc | clarsimp)+ apply (rule conjI) apply clarsimp apply (simp add: restrict_map_def ptable_lift_s_def ptable_rights_s_def split: if_splits) @@ -53,12 +54,12 @@ lemma do_user_op_if_globals_equiv_scheduler[Noninterference_assms]: \\_. globals_equiv_scheduler st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_globals_equiv_scheduler - dmo_device_memory_update_globals_equiv_scheduler select_wp)+ + dmo_device_memory_update_globals_equiv_scheduler)+ apply (auto simp: ptable_lift_s_def ptable_rights_s_def) done crunch silc_dom_equiv[Noninterference_assms, wp]: do_user_op_if "silc_dom_equiv aag st" - (ignore: do_machine_op user_memory_update wp: crunch_wps select_wp) + (ignore: do_machine_op user_memory_update wp: crunch_wps) lemma sameFor_scheduler_affects_equiv[Noninterference_assms]: "\ (s,s') \ same_for aag PSched; (s,s') \ same_for aag (Partition l); @@ -350,7 +351,7 @@ lemma getActiveIRQ_ret_no_dmo[Noninterference_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done @@ -375,7 +376,7 @@ lemma dmo_getActive_IRQ_reads_respect_scheduler[Noninterference_assms]: lemma integrity_asids_update_reference_state[Noninterference_assms]: "is_subject aag t - \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := kheap s(t \ blah)\)" + \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := (kheap s)(t \ blah)\)" by clarsimp lemma getActiveIRQ_no_non_kernel_IRQs[Noninterference_assms]: diff --git a/proof/infoflow/ARM/ArchPasUpdates.thy b/proof/infoflow/ARM/ArchPasUpdates.thy index 1c81b5482c..8f8aaaa622 100644 --- a/proof/infoflow/ARM/ArchPasUpdates.thy +++ b/proof/infoflow/ARM/ArchPasUpdates.thy @@ -14,7 +14,7 @@ named_theorems PasUpdates_assms crunches arch_post_cap_deletion, arch_finalise_cap, prepare_thread_delete for domain_fields[PasUpdates_assms, wp]: "domain_fields P" - ( wp: syscall_valid select_wp crunch_wps rec_del_preservation cap_revoke_preservation modify_wp + ( wp: syscall_valid crunch_wps rec_del_preservation cap_revoke_preservation modify_wp simp: crunch_simps check_cap_at_def filterM_mapM unless_def ignore: without_preemption filterM rec_del check_cap_at cap_revoke ignore_del: retype_region_ext create_cap_ext cap_insert_ext ethread_set cap_move_ext diff --git a/proof/infoflow/ARM/ArchRetype_IF.thy b/proof/infoflow/ARM/ArchRetype_IF.thy index b98cc83d8b..182613de2b 100644 --- a/proof/infoflow/ARM/ArchRetype_IF.thy +++ b/proof/infoflow/ARM/ArchRetype_IF.thy @@ -278,7 +278,7 @@ lemma copy_global_mappings_globals_equiv: "\globals_equiv s and (\s. x \ arm_global_pd (arch_state s) \ is_aligned x pd_bits)\ copy_global_mappings x \\_. globals_equiv s\" - unfolding copy_global_mappings_def including no_pre + unfolding copy_global_mappings_def including classic_wp_pre apply simp apply wp apply (rule_tac Q="\_. globals_equiv s and (\s. x \ arm_global_pd (arch_state s) \ @@ -471,30 +471,31 @@ lemma reset_untyped_cap_reads_respects_g: apply (frule(1) caps_of_state_valid) apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps free_index_of_def invs_valid_global_objs) - apply (simp add: aligned_add_aligned is_aligned_shiftl) - apply (clarsimp simp: Kernel_Config.resetChunkBits_def) + apply (simp add: aligned_add_aligned is_aligned_shiftl) + apply (clarsimp simp: Kernel_Config.resetChunkBits_def) apply (rule hoare_pre) apply (wp preemption_point_inv' set_untyped_cap_invs_simple set_cap_cte_wp_at set_cap_no_overlap only_timer_irq_inv_pres[where Q=\, OF _ set_cap_domain_sep_inv] + irq_state_independent_A_conjI | simp)+ apply (strengthen empty_descendants_range_in) - apply (wp only_timer_irq_inv_pres[where P=\ and Q=\] no_irq_clearMemory - | simp | wp (once) dmo_wp)+ - apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) - apply (frule(1) caps_of_state_valid) - apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps free_index_of_def) - apply (wp | simp)+ - apply (wp delete_objects_reads_respects_g) - apply (simp add: if_apply_def2) - apply (strengthen invs_valid_global_objs) - apply (wp add: delete_objects_invs_ex hoare_vcg_const_imp_lift - delete_objects_pspace_no_overlap_again - delete_objects_valid_arch_state - only_timer_irq_inv_pres[where P=\ and Q=\] - del: Untyped_AI.delete_objects_pspace_no_overlap - | simp)+ - apply (rule get_cap_reads_respects_g) - apply (wp get_cap_wp) + apply (wp only_timer_irq_inv_pres[where P=\ and Q=\] no_irq_clearMemory + | simp | wp (once) dmo_wp)+ + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) + apply (frule(1) caps_of_state_valid) + apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps free_index_of_def) + apply (wp | simp)+ + apply (wp delete_objects_reads_respects_g) + apply (simp add: if_apply_def2) + apply (strengthen invs_valid_global_objs) + apply (wp add: delete_objects_invs_ex hoare_vcg_const_imp_lift + delete_objects_pspace_no_overlap_again + delete_objects_valid_arch_state + only_timer_irq_inv_pres[where P=\ and Q=\] + del: Untyped_AI.delete_objects_pspace_no_overlap + | simp)+ + apply (rule get_cap_reads_respects_g) + apply (wp get_cap_wp) apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) apply (frule(1) caps_of_state_valid) apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps @@ -583,9 +584,9 @@ lemma invoke_untyped_reads_respects_g_wcap[Retype_IF_assms]: and Q="\_. invs and valid_untyped_inv_wcap ui (Some (UntypedCap dev ptr sz (If reset 0 idx))) and ct_active and (\s. reset \ pspace_no_overlap {ptr .. ptr + 2 ^ sz - 1} s)" - in hoare_post_impErr) - apply (rule hoare_pre, wp hoare_whenE_wp) - apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc) + in hoare_strengthen_postE) + apply (rule hoare_pre, wp whenE_wp) + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc) apply (clarsimp simp only: if_True simp_thms, intro conjI, assumption+) apply simp apply assumption diff --git a/proof/infoflow/ARM/ArchScheduler_IF.thy b/proof/infoflow/ARM/ArchScheduler_IF.thy index f2406c7032..20465707aa 100644 --- a/proof/infoflow/ARM/ArchScheduler_IF.thy +++ b/proof/infoflow/ARM/ArchScheduler_IF.thy @@ -131,12 +131,12 @@ lemma thread_set_context_globals_equiv[Scheduler_IF_assms]: lemma arch_scheduler_affects_equiv_update[Scheduler_IF_assms]: "arch_scheduler_affects_equiv st s - \ arch_scheduler_affects_equiv st (s\kheap := kheap s(x \ TCB y')\)" + \ arch_scheduler_affects_equiv st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: arch_scheduler_affects_equiv_def) lemma equiv_asid_equiv_update[Scheduler_IF_assms]: "\ get_tcb x s = Some y; equiv_asid asid st s \ - \ equiv_asid asid st (s\kheap := kheap s(x \ TCB y')\)" + \ equiv_asid asid st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: equiv_asid_def obj_at_def get_tcb_def) end @@ -186,7 +186,7 @@ lemma globals_equiv_scheduler_inv'[Scheduler_IF_assms]: lemma clearExMonitor_globals_equiv_scheduler[wp]: "do_machine_op clearExMonitor \globals_equiv_scheduler sta\" - unfolding clearExMonitor_def including no_pre + unfolding clearExMonitor_def including classic_wp_pre apply (wp dmo_no_mem_globals_equiv_scheduler) apply simp apply (simp add: simpler_modify_def valid_def) @@ -434,7 +434,7 @@ lemma thread_set_scheduler_affects_equiv[Scheduler_IF_assms, wp]: split: option.splits kernel_object.splits) apply (subst arch_tcb_update_aux) apply simp - apply (subgoal_tac "s = (s\kheap := kheap s(idle_thread s \ TCB y)\)", simp) + apply (subgoal_tac "s = (s\kheap := (kheap s)(idle_thread s \ TCB y)\)", simp) apply (rule state.equality) apply (rule ext) apply simp+ diff --git a/proof/infoflow/ARM/ArchSyscall_IF.thy b/proof/infoflow/ARM/ArchSyscall_IF.thy index df9021d36e..5db6f7d824 100644 --- a/proof/infoflow/ARM/ArchSyscall_IF.thy +++ b/proof/infoflow/ARM/ArchSyscall_IF.thy @@ -43,7 +43,7 @@ lemma sts_authorised_for_globals_inv[Syscall_IF_assms]: apply wpsimp+ apply (rename_tac page_invocation) apply (case_tac page_invocation) - apply (simp | wp hoare_ex_wp)+ + apply (simp | wp hoare_vcg_ex_lift)+ done lemma dmo_maskInterrupt_globals_equiv[Syscall_IF_assms, wp]: @@ -120,11 +120,10 @@ lemma decode_arch_invocation_authorised_for_globals[Syscall_IF_assms]: apply (simp add: split_def Let_def cong: cap.case_cong arch_cap.case_cong if_cong option.case_cong split del: if_split) - apply (wp select_wp select_ext_weak_wp whenE_throwError_wp check_vp_wpR unlessE_wp get_pde_wp + apply (wp select_ext_weak_wp whenE_throwError_wp check_vp_wpR unlessE_wp get_pde_wp get_master_pde_wp find_pd_for_asid_authority3 create_mapping_entries_parent_for_refs | wpc - | simp add: authorised_for_globals_page_inv_def - del: hoare_True_E_R)+ + | simp add: authorised_for_globals_page_inv_def)+ apply (simp cong: if_cong) apply (wp hoare_vcg_if_lift2) apply (rule hoare_conjI) diff --git a/proof/infoflow/ARM/ArchTcb_IF.thy b/proof/infoflow/ARM/ArchTcb_IF.thy index 2d068493a0..692d9cf25d 100644 --- a/proof/infoflow/ARM/ArchTcb_IF.thy +++ b/proof/infoflow/ARM/ArchTcb_IF.thy @@ -95,19 +95,19 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: supply set_priority_extended.dxo_eq[simp del] reschedule_required_ext_extended.dxo_eq[simp del] apply (simp add: split_def cong: option.case_cong) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule_tac P="case ep of Some v \ length v = word_bits | _ \ True" in hoare_gen_asm) apply wp - apply ((simp add: conj_comms(1, 2) del: hoare_True_E_R - | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_lift_R + apply ((simp add: conj_comms(1, 2) + | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | (wp check_cap_inv2[where Q="\_. pas_refined aag"] check_cap_inv2[where Q="\_ s. t \ idle_thread s"] out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap out_tcb_valid - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -121,9 +121,8 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid check_cap_inv2[where Q="\_. P"] cap_delete_P cap_insert_P thread_set_P thread_set_P' set_mcpriority_P set_mcpriority_idle_thread - dxo_wp_weak static_imp_wp) + dxo_wp_weak hoare_weak_lift_imp) | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def - del: hoare_True_E_R | wpc | strengthen use_no_cap_to_obj_asid_strg tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] @@ -144,7 +143,7 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: lemma tc_reads_respects_f[Tcb_IF_assms]: assumes domains_distinct[wp]: "pas_domains_distinct aag" and tc[simp]: "ti = ThreadControl x41 x42 x43 x44 x45 x46 x47 x48" - notes validE_valid[wp del] static_imp_wp [wp] + notes validE_valid[wp del] hoare_weak_lift_imp [wp] shows "reads_respects_f aag l (silc_inv aag st and only_timer_irq_inv irq st' and einvs and simple_sched_action @@ -170,7 +169,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: check_cap_inv[OF check_cap_inv[OF cap_insert_ct]] get_thread_state_rev[THEN reads_respects_f[where aag=aag and st=st and Q=\]] - hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift cap_delete_reads_respects[where st=st] checked_insert_pas_refined thread_set_pas_refined reads_respects_f[OF checked_insert_reads_respects, where st=st] @@ -202,7 +201,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: check_cap_inv[OF check_cap_inv[OF cap_insert_cur_domain]] check_cap_inv[OF check_cap_inv[OF cap_insert_ct]] get_thread_state_rev[THEN reads_respects_f[where st=st and Q=\]] - hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift cap_delete_reads_respects[where st=st] checked_insert_pas_refined thread_set_pas_refined reads_respects_f[OF checked_insert_reads_respects] checked_cap_insert_silc_inv[where st=st] @@ -221,7 +220,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: invs_psp_aligned invs_vspace_objs invs_arch_state | wp (once) hoare_drop_imp)+ apply (simp add: option_update_thread_def tcb_cap_cases_def - | wp static_imp_wp static_imp_conj_wp thread_set_pas_refined + | wp hoare_weak_lift_imp hoare_weak_lift_imp_conj thread_set_pas_refined reads_respects_f[OF thread_set_reads_respects, where st=st and Q="\"] | wpc)+ apply (wp hoare_vcg_all_lift thread_set_tcb_fault_handler_update_invs diff --git a/proof/infoflow/ARM/ArchUserOp_IF.thy b/proof/infoflow/ARM/ArchUserOp_IF.thy index 02bf34edc7..394b5ac167 100644 --- a/proof/infoflow/ARM/ArchUserOp_IF.thy +++ b/proof/infoflow/ARM/ArchUserOp_IF.thy @@ -982,7 +982,7 @@ lemma do_user_op_reads_respects_g: apply (rule spec_equiv_valid_guard_imp) apply (wpsimp wp: dmo_user_memory_update_reads_respects_g dmo_device_state_update_reads_respects_g dmo_setExMonitor_reads_respects_g dmo_device_state_update_reads_respects_g - select_ev select_wp dmo_getExMonitor_reads_respects_g dmo_wp) + select_ev dmo_getExMonitor_reads_respects_g dmo_wp) apply clarsimp apply (rule conjI) apply clarsimp diff --git a/proof/infoflow/ARM/Example_Valid_State.thy b/proof/infoflow/ARM/Example_Valid_State.thy index c309028529..6badce88aa 100644 --- a/proof/infoflow/ARM/Example_Valid_State.thy +++ b/proof/infoflow/ARM/Example_Valid_State.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -205,7 +206,8 @@ definition "High_pt_ptr = kernel_base + 0xC00" (* init_globals_frame \ {kernel_base + 0x5000,... kernel_base + 0x5FFF} *) -definition "shared_page_ptr = kernel_base + 0x6000" +definition "shared_page_ptr_virt = kernel_base + 0x6000" +definition "shared_page_ptr_phys = addrFromPPtr shared_page_ptr_virt" definition "Low_pd_ptr = kernel_base + 0x20000" definition "High_pd_ptr = kernel_base + 0x24000" @@ -233,14 +235,14 @@ lemmas s0_ptr_defs = Low_pd_ptr_def High_pd_ptr_def Low_pt_ptr_def High_pt_ptr_def Low_tcb_ptr_def High_tcb_ptr_def idle_tcb_ptr_def timer_irq_def Low_prio_def High_prio_def Low_time_slice_def Low_domain_def High_domain_def init_irq_node_ptr_def init_globals_frame_def init_global_pd_def - kernel_base_def shared_page_ptr_def + kernel_base_def shared_page_ptr_virt_def (* Distinctness proof of kernel pointers. *) distinct ptrs_distinct [simp]: Low_tcb_ptr High_tcb_ptr idle_tcb_ptr Low_pt_ptr High_pt_ptr - shared_page_ptr ntfn_ptr + shared_page_ptr_virt ntfn_ptr Low_pd_ptr High_pd_ptr Low_cnode_ptr High_cnode_ptr Silc_cnode_ptr irq_cnode_ptr init_globals_frame init_global_pd @@ -507,7 +509,7 @@ definition Low_pt' :: "word8 \ pte " where "Low_pt' \ (\_. InvalidPTE) - (0 := SmallPagePTE shared_page_ptr {} vm_read_write)" + (0 := SmallPagePTE shared_page_ptr_phys {} vm_read_write)" definition Low_pt :: kernel_object @@ -542,7 +544,7 @@ definition where "High_pt' \ (\_. InvalidPTE) - (0 := SmallPagePTE shared_page_ptr {} vm_read_only)" + (0 := SmallPagePTE shared_page_ptr_phys {} vm_read_only)" definition @@ -872,7 +874,7 @@ definition Sys1AgentMap :: "(auth_graph_label subject_label) agent_map" where "Sys1AgentMap \ - (\p. if ptrFromPAddr shared_page_ptr \ p \ p < ptrFromPAddr shared_page_ptr + 0x1000 + (\p. if p \ ptr_range shared_page_ptr_virt pageBits then partition_label Low else partition_label IRQ0) \ \set the range of the shared_page to Low, default everything else to IRQ0\ (Low_cnode_ptr := partition_label Low, @@ -901,12 +903,11 @@ lemma Sys1AgentMap_simps: "Sys1AgentMap Low_tcb_ptr = partition_label Low" "Sys1AgentMap High_tcb_ptr = partition_label High" "Sys1AgentMap idle_tcb_ptr = partition_label Low" - "\p. \ptrFromPAddr shared_page_ptr \ p; p < ptrFromPAddr shared_page_ptr + 0x1000\ + "\p. p \ ptr_range shared_page_ptr_virt pageBits \ Sys1AgentMap p = partition_label Low" unfolding Sys1AgentMap_def apply simp_all - by (auto simp: ptrFromPAddr_def pptrBaseOffset_def - pptrBase_def physBase_def s0_ptr_defs) + by (auto simp: s0_ptr_defs ptr_range_def pageBits_def) definition Sys1ASIDMap :: "(auth_graph_label subject_label) agent_asid_map" @@ -998,8 +999,7 @@ lemma thread_bounds_of_state_s0: lemma Sys1_wellformed': "policy_wellformed (pasPolicy Sys1PAS) False irqs x" - apply (clarsimp simp: Sys1PAS_def Sys1AgentMap_simps policy_wellformed_def - Sys1AuthGraph_def) + apply (clarsimp simp: Sys1PAS_def policy_wellformed_def Sys1AuthGraph_def) done corollary Sys1_wellformed: @@ -1009,8 +1009,7 @@ corollary Sys1_wellformed: lemma Sys1_pas_wellformed: "pas_wellformed Sys1PAS" - apply (clarsimp simp: Sys1PAS_def Sys1AgentMap_simps policy_wellformed_def - Sys1AuthGraph_def) + apply (clarsimp simp: Sys1PAS_def policy_wellformed_def Sys1AuthGraph_def) done lemma domains_of_state_s0[simp]: @@ -1029,12 +1028,10 @@ lemma Sys1_pas_refined: apply (clarsimp simp: pas_refined_def) apply (intro conjI) apply (simp add: Sys1_pas_wellformed) - apply (clarsimp simp: irq_map_wellformed_aux_def s0_internal_def Sys1AgentMap_simps Sys1PAS_def) + apply (clarsimp simp: irq_map_wellformed_aux_def s0_internal_def Sys1PAS_def) apply (clarsimp simp: Sys1AgentMap_def) - apply (clarsimp simp: ptrFromPAddr_def s0_ptr_defs cte_level_bits_def - pptrBaseOffset_def pptrBase_def physBase_def) - apply (drule le_less_trans[OF irq_node_offs_min[simplified s0_ptr_defs cte_level_bits_def, simplified]]) - apply simp + apply (clarsimp simp: s0_ptr_defs ptr_range_def pageBits_def cte_level_bits_def) + apply word_bitwise apply (clarsimp simp: tcb_domain_map_wellformed_aux_def Sys1PAS_def Sys1AgentMap_def default_domain_def minBound_word @@ -1065,18 +1062,10 @@ lemma Sys1_pas_refined: dest!: graph_ofD split: if_splits) apply (rule Sys1AgentMap_simps(13)) - apply simp - apply (drule_tac x=ac in plus_one_helper2) - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def - shared_page_ptr_def kernel_base_def) - apply (simp add: add.commute) + apply (simp add: ptr_range_def pageBits_def shared_page_ptr_phys_def) apply (erule notE) apply (rule Sys1AgentMap_simps(13)[symmetric]) - apply simp - apply (drule_tac x=ac in plus_one_helper2) - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def - s0_ptr_defs) - apply (simp add: add.commute) + apply (simp add: ptr_range_def pageBits_def shared_page_ptr_phys_def) apply (rule subsetI, clarsimp) apply (erule state_asids_to_policy_aux.cases) @@ -1261,7 +1250,7 @@ lemma valid_obj_s0[simp]: apply (simp add: well_formed_cnode_n_def) apply (fastforce simp: Low_pd'_def High_pd'_def Low_pt'_def High_pt'_def Low_pt_ptr_def High_pt_ptr_def - shared_page_ptr_def + shared_page_ptr_phys_def shared_page_ptr_virt_def valid_vm_rights_def vm_kernel_only_def kernel_base_def pageBits_def pt_bits_def vmsz_aligned_def is_aligned_def[THEN iffD2] @@ -1611,11 +1600,8 @@ lemma valid_arch_objs_s0[simp]: "valid_vspace_objs s0_internal" apply (clarsimp simp: valid_vspace_objs_def obj_at_def s0_internal_def) apply (drule kh0_SomeD) - apply (erule disjE | clarsimp simp: pageBits_def addrFromPPtr_def - pptrBaseOffset_def pptrBase_def physBase_def is_aligned_def - obj_at_def kh0_def kh0_obj_def kernel_mapping_slots_def - High_pt'_def Low_pt'_def High_pd'_def Low_pd'_def ptrFromPAddr_def - | erule vs_lookupE, force simp: vs_lookup_def arch_state0_def vs_asid_refs_def)+ + apply (erule disjE | clarsimp simp: addrFromPPtr_def + | erule vs_lookupE, force simp: arch_state0_def vs_asid_refs_def)+ done @@ -1638,11 +1624,10 @@ lemma valid_arch_caps_s0[simp]: lemma valid_global_objs_s0[simp]: "valid_global_objs s0_internal" apply (clarsimp simp: valid_global_objs_def s0_internal_def arch_state0_def) - by (force simp: valid_vso_at_def obj_at_def kh0_def kh0_obj_def s0_ptr_defs - addrFromPPtr_def pptrBaseOffset_def pptrBase_def - physBase_def is_aligned_def pageBits_def - kernel_mapping_slots_def empty_table_def pde_ref_def - valid_pde_mappings_def)+ + apply (force simp: valid_vso_at_def obj_at_def kh0_def kh0_obj_def + is_aligned_addrFromPPtr kernel_base_aligned_pageBits + kernel_mapping_slots_def empty_table_def pde_ref_def valid_pde_mappings_def) + done lemma valid_kernel_mappings_s0[simp]: "valid_kernel_mappings s0_internal" @@ -1659,9 +1644,8 @@ lemma equal_kernel_mappings_s0[simp]: "equal_kernel_mappings s0_internal" apply (clarsimp simp: equal_kernel_mappings_def obj_at_def s0_internal_def) apply (drule kh0_SomeD)+ - by (erule disjE - | force simp: kh0_obj_def High_pd'_def Low_pd'_def s0_ptr_defs kernel_mapping_slots_def - addrFromPPtr_def pptrBaseOffset_def pptrBase_def physBase_def)+ + apply (force simp: kh0_obj_def High_pd'_def Low_pd'_def s0_ptr_defs kernel_mapping_slots_def) + done lemma valid_asid_map_s0[simp]: "valid_asid_map s0_internal" diff --git a/proof/infoflow/Arch_IF.thy b/proof/infoflow/Arch_IF.thy index 521014babc..20b15e6bf0 100644 --- a/proof/infoflow/Arch_IF.thy +++ b/proof/infoflow/Arch_IF.thy @@ -433,11 +433,11 @@ crunch irq_state_of_state[wp]: handle_recv, handle_reply "\s. P (irq_sta crunch irq_state_of_state[wp]: invoke_irq_handler "\s. P (irq_state_of_state s)" crunch irq_state_of_state[wp]: schedule "\s. P (irq_state_of_state s)" - (wp: dmo_wp modify_wp crunch_wps hoare_whenE_wp + (wp: dmo_wp modify_wp crunch_wps whenE_wp simp: machine_op_lift_def machine_rest_lift_def crunch_simps) crunch irq_state_of_state[wp]: finalise_cap "\s. P (irq_state_of_state s)" - (wp: select_wp modify_wp crunch_wps dmo_wp simp: crunch_simps) + (wp: modify_wp crunch_wps dmo_wp simp: crunch_simps) crunch irq_state_of_state[wp]: send_signal, restart "\s. P (irq_state_of_state s)" diff --git a/proof/infoflow/CNode_IF.thy b/proof/infoflow/CNode_IF.thy index 505c314cf6..606e60c135 100644 --- a/proof/infoflow/CNode_IF.thy +++ b/proof/infoflow/CNode_IF.thy @@ -500,7 +500,7 @@ lemma gets_irq_masks_equiv_valid: by (fastforce simp: equiv_valid_def2 equiv_valid_2_def in_monad) lemma irq_state_increment_reads_respects_memory: - "equiv_valid_inv (equiv_machine_state P And equiv_irq_state) + "equiv_valid_inv (equiv_machine_state P and equiv_irq_state) (equiv_for (\x. aag_can_affect_label aag l \ pasObjectAbs aag x \ subjectReads (pasPolicy aag) l) underlying_memory) @@ -511,7 +511,7 @@ lemma irq_state_increment_reads_respects_memory: done lemma irq_state_increment_reads_respects_device: - "equiv_valid_inv (equiv_machine_state P And equiv_irq_state) + "equiv_valid_inv (equiv_machine_state P and equiv_irq_state) (equiv_for (\x. aag_can_affect_label aag l \ pasObjectAbs aag x \ subjectReads (pasPolicy aag) l) device_state) @@ -576,7 +576,7 @@ lemma preemption_point_def2: odE" apply (rule ext) apply (simp add: preemption_point_def OR_choiceE_def wrap_ext_bool_det_ext_ext_def - ef_mk_ef work_units_limit_reached_def select_f_def) + ef_mk_ef work_units_limit_reached_def select_f_def empty_fail_cond) apply (clarsimp simp: work_units_limit_reached_def gets_def liftE_def select_f_def get_def lift_def return_def bind_def bindE_def split_def image_def split: option.splits sum.splits) diff --git a/proof/infoflow/Decode_IF.thy b/proof/infoflow/Decode_IF.thy index 40a3779db8..0e70187967 100644 --- a/proof/infoflow/Decode_IF.thy +++ b/proof/infoflow/Decode_IF.thy @@ -62,9 +62,9 @@ lemma decode_cnode_invocation_rev: apply (simp add: unlessE_whenE) apply wp apply (wp if_apply_ev derive_cap_rev whenE_inv hoare_vcg_imp_lift_R - lookup_slot_for_cnode_op_rev hoare_vcg_all_lift_R + lookup_slot_for_cnode_op_rev hoare_vcg_all_liftE_R lookup_slot_for_cnode_op_authorised ensure_empty_rev get_cap_rev - | simp add: split_def unlessE_whenE split del: if_split del: hoare_True_E_R + | simp add: split_def unlessE_whenE split del: if_split | wpc | wp (once) hoare_drop_imps, wp (once) lookup_slot_for_cnode_op_authorised | strengthen aag_can_read_self)+ @@ -110,8 +110,8 @@ lemma slot_cap_long_running_delete_reads_respects_f: apply (fastforce simp: long_running_delete_def is_final_cap_def gets_bind_ign intro: return_ev)+ apply (wp is_final_cap_reads_respects[where st=st])[1] apply (fastforce simp: long_running_delete_def is_final_cap_def gets_bind_ign intro: return_ev)+ - apply (wp reads_respects_f[OF get_cap_rev, where Q="\" and st=st], blast) - apply (wp get_cap_wp | simp)+ + apply (wpsimp wp: reads_respects_f[OF get_cap_rev, where Q="\" and st=st]) + apply (wp get_cap_wp) apply (fastforce intro!: cte_wp_valid_cap aag_has_auth_to_obj_refs_of_owned_cap simp: is_zombie_def dest: silc_inv_not_subject) done @@ -227,14 +227,14 @@ lemma decode_tcb_invocation_reads_respects_f: apply (simp add: unlessE_def[symmetric] unlessE_whenE split del: if_split cong: gen_invocation_labels.case_cong) apply (rule equiv_valid_guard_imp) - apply (wp (once) requiv_cur_thread_eq range_check_ev respects_f[OF derive_cap_rev] - derive_cap_inv slot_cap_long_running_delete_reads_respects_f[where st=st] - respects_f[OF check_valid_ipc_buffer_rev] check_valid_ipc_buffer_inv - respects_f[OF decode_set_priority_rev] respects_f[OF decode_set_mcpriority_rev] - respects_f[OF decode_set_sched_params_rev] - respects_f[OF get_simple_ko_reads_respects] - respects_f[OF get_bound_notification_reads_respects'] - | wp (once) whenE_throwError_wp + apply (wp requiv_cur_thread_eq range_check_ev respects_f[OF derive_cap_rev] + derive_cap_inv slot_cap_long_running_delete_reads_respects_f[where st=st] + respects_f[OF check_valid_ipc_buffer_rev] check_valid_ipc_buffer_inv + respects_f[OF decode_set_priority_rev] respects_f[OF decode_set_mcpriority_rev] + respects_f[OF decode_set_sched_params_rev] + respects_f[OF get_simple_ko_reads_respects] + respects_f[OF get_bound_notification_reads_respects'] + whenE_throwError_wp | wp (once) hoare_drop_imps | wpc | simp add: if_apply_def2 split del: if_split add: o_def split_def)+ diff --git a/proof/infoflow/FinalCaps.thy b/proof/infoflow/FinalCaps.thy index 5acdb5aa8e..9140977cc3 100644 --- a/proof/infoflow/FinalCaps.thy +++ b/proof/infoflow/FinalCaps.thy @@ -647,8 +647,7 @@ lemma set_cap_slots_holding_overlapping_caps_helper: obj_refs cap = {} \ cap_irqs cap \ {}; ko_at (TCB tcb) (fst slot) s; tcb_cap_cases (snd slot) = Some (getF, setF, blah) \ \ x \ slots_holding_overlapping_caps cap - (s\kheap := kheap s(fst slot \ - TCB (setF (\ x. capa) tcb))\)" + (s\kheap := (kheap s)(fst slot \ TCB (setF (\x. capa) tcb))\)" apply (clarsimp simp: slots_holding_overlapping_caps_def) apply (rule_tac x=cap' in exI) apply (clarsimp simp: get_cap_cte_wp_at') @@ -741,7 +740,7 @@ lemma set_cap_silc_inv: apply (rule equiv_forI) apply (erule use_valid) unfolding set_cap_def - apply (wp set_object_wp get_object_wp static_imp_wp | simp add: split_def | wpc)+ + apply (wp set_object_wp get_object_wp hoare_weak_lift_imp | simp add: split_def | wpc)+ apply clarsimp apply (rule conjI) apply fastforce @@ -919,7 +918,7 @@ lemma cap_swap_silc_inv: apply (rule hoare_gen_asm) unfolding cap_swap_def apply (rule hoare_pre) - apply (wp set_cap_silc_inv hoare_vcg_ex_lift static_imp_wp + apply (wp set_cap_silc_inv hoare_vcg_ex_lift hoare_weak_lift_imp set_cap_slots_holding_overlapping_caps_other[where aag=aag] set_cdt_silc_inv | simp split del: if_split)+ apply (rule conjI) @@ -955,7 +954,7 @@ lemma cap_move_silc_inv: apply (rule hoare_pre) apply (wp set_cap_silc_inv hoare_vcg_ex_lift set_cap_slots_holding_overlapping_caps_other[where aag=aag] - set_cdt_silc_inv static_imp_wp + set_cdt_silc_inv hoare_weak_lift_imp | simp)+ apply (rule conjI) apply (fastforce simp: cap_points_to_label_def) @@ -985,7 +984,7 @@ lemma cap_insert_silc_inv: \\_. silc_inv aag st\" unfolding cap_insert_def (* The order here matters. The first two need to be first. *) - apply (wp assert_wp static_imp_conj_wp set_cap_silc_inv hoare_vcg_ex_lift + apply (wp assert_wp hoare_weak_lift_imp_conj set_cap_silc_inv hoare_vcg_ex_lift set_untyped_cap_as_full_slots_holding_overlapping_caps_other[where aag=aag] get_cap_wp update_cdt_silc_inv | simp | wp (once) hoare_drop_imps)+ apply clarsimp @@ -1192,14 +1191,14 @@ lemma cap_delete_one_silc_inv: cap_delete_one slot \\_. silc_inv aag st\" unfolding cap_delete_one_def - by (wpsimp wp: hoare_unless_wp empty_slot_silc_inv get_cap_wp) + by (wpsimp wp: unless_wp empty_slot_silc_inv get_cap_wp) lemma cap_delete_one_silc_inv_subject: "\silc_inv aag st and K (is_subject aag (fst slot))\ cap_delete_one slot \\_. silc_inv aag st\" unfolding cap_delete_one_def - apply (wpsimp wp: hoare_unless_wp empty_slot_silc_inv get_cap_wp) + apply (wpsimp wp: unless_wp empty_slot_silc_inv get_cap_wp) unfolding silc_inv_def by simp @@ -1208,9 +1207,9 @@ lemma reply_cancel_ipc_silc_inv: reply_cancel_ipc t \\_. silc_inv aag st\" unfolding reply_cancel_ipc_def - apply (wp cap_delete_one_silc_inv select_wp hoare_vcg_if_lift | simp)+ + apply (wp cap_delete_one_silc_inv hoare_vcg_if_lift | simp)+ apply wps - apply (wp static_imp_wp hoare_vcg_all_lift hoare_vcg_ball_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_ball_lift) apply clarsimp apply (rename_tac b a) apply (frule(1) descendants_of_owned_or_transferable, force, force, elim disjE) @@ -1244,7 +1243,7 @@ lemma cancel_ipc_indirect_silc_inv: cancel_ipc t \\_. silc_inv aag st\" unfolding cancel_ipc_def - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule hoare_name_pre_state) apply (clarsimp simp: st_tcb_def2 receive_blocked_def) apply (simp add: blocked_cancel_ipc_def split: thread_state.splits) @@ -1295,7 +1294,7 @@ lemma update_restart_pc_silc_inv[wp]: lemma validE_validE_R': "\P\ f \Q\, \R\ \ \P\ f \Q\, -" apply (rule validE_validE_R) - apply (erule hoare_post_impErr) + apply (erule hoare_strengthen_postE) by auto lemma finalise_cap_ret_subset_cap_irqs: @@ -1442,7 +1441,7 @@ lemma finalise_cap_ret_is_subject: finalise_cap cap is_final \\rv _ :: det_state. case (fst rv) of Zombie ptr bits n \ is_subject aag (obj_ref_of (fst rv)) | _ \ True\" - including no_pre + including classic_wp_pre apply (case_tac cap, simp_all add: is_zombie_def) apply (wp | simp add: comp_def | rule impI | rule conjI)+ apply (fastforce simp: valid_def dest: arch_finalise_cap_ret) @@ -1569,7 +1568,7 @@ lemma rec_del_silc_inv': valid_validE_R[OF rec_del_respects(2)[simplified]] "2.hyps" drop_spec_validE[OF liftE_wp] set_cap_silc_inv set_cap_pas_refined replace_cap_invs final_cap_same_objrefs set_cap_cte_cap_wp_to - set_cap_cte_wp_at static_imp_wp hoare_vcg_ball_lift + set_cap_cte_wp_at hoare_weak_lift_imp hoare_vcg_ball_lift | simp add: finalise_cap_not_reply_master_unlifted split del: if_split)+ (* where the action is *) apply (simp cong: conj_cong add: conj_comms) @@ -1608,7 +1607,7 @@ lemma rec_del_silc_inv': finalise_cap_invs[where slot=slot] finalise_cap_replaceable[where sl=slot] finalise_cap_makes_halted[where slot=slot] - finalise_cap_auth' static_imp_wp) + finalise_cap_auth' hoare_weak_lift_imp) apply (wp drop_spec_validE[OF liftE_wp] get_cap_auth_wp[where aag=aag] | simp add: is_final_cap_def)+ @@ -1703,7 +1702,7 @@ lemma thread_set_tcb_registers_caps_merge_default_tcb_silc_inv[wp]: by (rule thread_set_silc_inv; simp add: tcb_cap_cases_def tcb_registers_caps_merge_def) crunch silc_inv[wp]: cancel_badged_sends "silc_inv aag st" - ( wp: crunch_wps hoare_unless_wp simp: crunch_simps ignore: filterM set_object thread_set + ( wp: crunch_wps unless_wp simp: crunch_simps ignore: filterM set_object thread_set simp: filterM_mapM) @@ -1719,11 +1718,11 @@ lemma rec_del_silc_inv_CTEDelete_transferable': apply (wp rec_del_silc_inv_not_transferable) apply simp apply (subst rec_del.simps[abs_def]) - apply (wp add: hoare_K_bind without_preemption_wp empty_slot_silc_inv static_imp_wp wp_transferable + apply (wp add: hoare_K_bind without_preemption_wp empty_slot_silc_inv hoare_weak_lift_imp wp_transferable rec_del_Finalise_transferable del: wp_not_transferable | wpc)+ - apply (rule hoare_post_impErr,rule rec_del_Finalise_transferable) + apply (rule hoare_strengthen_postE,rule rec_del_Finalise_transferable) apply force apply force apply (clarsimp) @@ -1773,7 +1772,7 @@ lemma cap_revoke_silc_inv': apply (rule spec_valid_conj_liftE1, (wp | simp)+) apply (rule drop_spec_validE[OF valid_validE[OF cap_delete_silc_inv]]) apply (wp drop_spec_validE[OF assertE_wp] drop_spec_validE[OF without_preemption_wp] - get_cap_wp select_wp drop_spec_validE[OF returnOk_wp])+ + get_cap_wp drop_spec_validE[OF returnOk_wp])+ apply clarsimp apply (clarsimp cong: conj_cong simp: conj_comms) apply (rule conjI) @@ -2161,7 +2160,7 @@ lemma cap_insert_silc_inv': apply (wp set_cap_silc_inv hoare_vcg_ex_lift set_untyped_cap_as_full_slots_holding_overlapping_caps_other[where aag=aag] get_cap_wp update_cdt_silc_inv set_cap_caps_of_state2 - set_untyped_cap_as_full_cdt_is_original_cap static_imp_wp + set_untyped_cap_as_full_cdt_is_original_cap hoare_weak_lift_imp | simp split del: if_split)+ apply (intro allI impI conjI) apply clarsimp @@ -2266,7 +2265,7 @@ lemma cap_delete_one_cte_wp_at_other: cap_delete_one irq_slot \\rv s. cte_wp_at P slot s\" unfolding cap_delete_one_def - apply (wp hoare_unless_wp empty_slot_cte_wp_elsewhere get_cap_wp | simp)+ + apply (wp unless_wp empty_slot_cte_wp_elsewhere get_cap_wp | simp)+ done @@ -2284,7 +2283,7 @@ lemma cap_insert_silc_inv''': apply (wp set_cap_silc_inv hoare_vcg_ex_lift set_untyped_cap_as_full_slots_holding_overlapping_caps_other[where aag=aag] get_cap_wp update_cdt_silc_inv set_cap_caps_of_state2 - set_untyped_cap_as_full_cdt_is_original_cap static_imp_wp + set_untyped_cap_as_full_cdt_is_original_cap hoare_weak_lift_imp | simp split del: if_split)+ apply (intro impI conjI allI) apply clarsimp @@ -2321,7 +2320,7 @@ lemma invoke_irq_handler_silc_inv: apply (rule hoare_gen_asm) apply (case_tac hi) apply (wp cap_insert_silc_inv'' cap_delete_one_silc_inv_subject cap_delete_one_cte_wp_at_other - static_imp_wp hoare_vcg_ex_lift + hoare_weak_lift_imp hoare_vcg_ex_lift slots_holding_overlapping_caps_from_silc_inv[where aag=aag and st=st] | simp add: authorised_irq_hdl_inv_def get_irq_slot_def conj_comms)+ apply (clarsimp simp: pas_refined_def irq_map_wellformed_aux_def) @@ -2401,7 +2400,7 @@ lemma transfer_caps_silc_inv: apply (simp add: transfer_caps_def) apply (wpc | wp)+ apply (rule_tac P = "\x \ set dest_slots. is_subject aag (fst x)" in hoare_gen_asm) - apply (wp transfer_caps_loop_pres_dest cap_insert_silc_inv) + apply (wpsimp wp: transfer_caps_loop_pres_dest cap_insert_silc_inv) apply (fastforce simp: silc_inv_def) apply (wp get_receive_slots_authorised hoare_vcg_all_lift hoare_vcg_imp_lift | simp)+ apply (fastforce elim: cte_wp_at_weakenE) @@ -2489,7 +2488,7 @@ lemma send_ipc_silc_inv: send_ipc block call badge can_grant can_grant_reply thread epptr \\_. silc_inv aag st\" unfolding send_ipc_def - apply (wp setup_caller_cap_silc_inv static_imp_wp do_ipc_transfer_silc_inv gts_wp + apply (wp setup_caller_cap_silc_inv hoare_weak_lift_imp do_ipc_transfer_silc_inv gts_wp | wpc | simp add:st_tcb_at_tcb_states_of_state_eq | rule conjI impI @@ -2544,7 +2543,7 @@ lemma receive_ipc_base_silc_inv: \\_. silc_inv aag st\" apply (clarsimp simp: thread_get_def get_thread_state_def cong: endpoint.case_cong) apply (rule hoare_pre) - apply (wp setup_caller_cap_silc_inv static_imp_wp do_ipc_transfer_silc_inv + apply (wp setup_caller_cap_silc_inv hoare_weak_lift_imp do_ipc_transfer_silc_inv | wpc | simp split del: if_split)+ apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift set_simple_ko_get_tcb | wpc | simp split del: if_split)+ @@ -2574,13 +2573,13 @@ lemma receive_ipc_silc_inv: apply (rule hoare_gen_asm) apply (simp del: AllowSend_def split: cap.splits) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (case_tac ntfnptr, simp_all) (* old receive case, not bound *) apply (rule hoare_pre, wp receive_ipc_base_silc_inv, clarsimp simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (case_tac "isActive ntfn", simp_all) (* new ntfn-binding case *) apply (rule hoare_pre, wp, clarsimp) @@ -2606,7 +2605,7 @@ lemma send_fault_ipc_silc_inv: \ invs s \ valid_fault fault \ is_subject aag (fst (fst rv))" - in hoare_post_imp_R[rotated]) + in hoare_strengthen_postE_R[rotated]) apply (force dest!: cap_auth_caps_of_state simp: invs_valid_objs invs_sym_refs cte_wp_at_caps_of_state aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def) @@ -2632,7 +2631,7 @@ lemma setup_reply_master_silc_inv: unfolding setup_reply_master_def apply (wp set_cap_silc_inv hoare_vcg_ex_lift slots_holding_overlapping_caps_from_silc_inv[where aag=aag and st=st and P="\"] - get_cap_wp static_imp_wp + get_cap_wp hoare_weak_lift_imp | simp)+ apply (clarsimp simp: cap_points_to_label_def silc_inv_def) done @@ -2773,12 +2772,12 @@ lemma handle_invocation_silc_inv: apply (rule_tac E="\ft. silc_inv aag st and pas_refined aag and is_subject aag \ cur_thread and invs and (\_. valid_fault ft \ is_subject aag thread)" - and R="Q" and Q=Q for Q in hoare_post_impErr) + and R="Q" and Q=Q for Q in hoare_strengthen_postE) apply (wp lookup_extra_caps_authorised lookup_extra_caps_auth | simp)+ apply (rule_tac E="\ft. silc_inv aag st and pas_refined aag and is_subject aag \ cur_thread and invs and (\_. valid_fault (CapFault x False ft) \ is_subject aag thread)" - and R="Q" and Q=Q for Q in hoare_post_impErr) + and R="Q" and Q=Q for Q in hoare_strengthen_postE) apply (wp lookup_cap_and_slot_authorised lookup_cap_and_slot_cur_auth | simp)+ apply (auto intro: st_tcb_ex_cap simp: ct_in_state_def runnable_eq_active) done @@ -2820,7 +2819,7 @@ lemma handle_recv_silc_inv: in hoare_strengthen_post, wp, clarsimp simp: invs_valid_objs invs_sym_refs)+ apply (rule_tac Q'="\r s. silc_inv aag st s \ invs s \ pas_refined aag s \ is_subject aag thread \ tcb_at thread s \ cur_thread s = thread" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply ((clarsimp simp add: invs_valid_objs invs_sym_refs | intro impI allI conjI @@ -2856,7 +2855,7 @@ lemma handle_event_silc_inv: crunch silc_inv[wp]: activate_thread "silc_inv aag st" crunch silc_inv[wp]: schedule "silc_inv aag st" - ( wp: alternative_wp OR_choice_weak_wp select_wp crunch_wps + ( wp: OR_choice_weak_wp crunch_wps ignore: set_scheduler_action simp: crunch_simps) diff --git a/proof/infoflow/Finalise_IF.thy b/proof/infoflow/Finalise_IF.thy index 4ebce39689..990edbc6d3 100644 --- a/proof/infoflow/Finalise_IF.thy +++ b/proof/infoflow/Finalise_IF.thy @@ -570,10 +570,10 @@ lemma tcb_sched_action_reads_respects: apply (force intro: domtcbs simp: get_etcb_def) apply (simp add: equiv_valid_def2 ethread_get_def) apply (rule equiv_valid_rv_bind) - apply (wp equiv_valid_rv_trivial', simp) + apply (wpsimp wp: equiv_valid_rv_trivial') apply (rule equiv_valid_2_bind) prefer 2 - apply (wp equiv_valid_rv_trivial, simp) + apply (wpsimp wp: equiv_valid_rv_trivial) apply (rule equiv_valid_2_bind) apply (rule_tac P=\ and P'=\ and L="{pasObjectAbs aag t}" and L'="{pasObjectAbs aag t}" in ev2_invisible[OF domains_distinct]) @@ -583,7 +583,7 @@ lemma tcb_sched_action_reads_respects: apply (simp | wp)+ apply (clarsimp simp: equiv_valid_2_def gets_apply_def get_def bind_def return_def labels_are_invisible_def) - apply wp+ + apply wpsimp+ apply (force intro: domtcbs simp: get_etcb_def pas_refined_def tcb_domain_map_wellformed_aux_def) done @@ -601,7 +601,7 @@ lemma possible_switch_to_reads_respects: (possible_switch_to tptr)" apply (simp add: possible_switch_to_def ethread_get_def) apply (case_tac "aag_can_read aag tptr \ aag_can_affect aag l tptr") - apply (wp static_imp_wp tcb_sched_action_reads_respects | wpc | simp)+ + apply (wp hoare_weak_lift_imp tcb_sched_action_reads_respects | wpc | simp)+ apply (clarsimp simp: get_etcb_def) apply ((intro conjI impI allI | elim aag_can_read_self reads_equivE affects_equivE equiv_forE conjE disjE @@ -994,7 +994,7 @@ lemma reply_cancel_ipc_reads_respects_f: unfolding reply_cancel_ipc_def apply (rule gen_asm_ev) apply (wp cap_delete_one_reads_respects_f_transferable[where st=st] - select_singleton_ev select_inv select_wp assert_wp + select_singleton_ev select_inv assert_wp reads_respects_f[OF get_cap_rev, where st=st] reads_respects_f[OF thread_set_reads_respects, where st=st] reads_respects_f[OF gets_descendants_of_revrv[folded equiv_valid_def2]] @@ -1186,7 +1186,7 @@ proof (induct s rule: rec_del.induct, simp_all only: rec_del_fails drop_spec_ev[ apply (wp drop_spec_ev[OF returnOk_ev_pre] drop_spec_ev[OF liftE_ev] hoareE_TrueI reads_respects_f[OF empty_slot_reads_respects, where st=st] empty_slot_silc_inv) apply (rule "1.hyps") - apply (rule_tac Q'="\r s. silc_inv aag st s \ is_subject aag (fst slot)" in hoare_post_imp_R) + apply (rule_tac Q'="\r s. silc_inv aag st s \ is_subject aag (fst slot)" in hoare_strengthen_postE_R) apply (wp validE_validE_R'[OF rec_del_silc_inv_not_transferable] | fastforce simp: silc_inv_def)+ done next @@ -1202,13 +1202,13 @@ next apply (rule_tac Q'="\rv s. emptyable (slot_rdcall (ReduceZombieCall (fst rvb) slot exposed)) s \ (\ exposed \ ex_cte_cap_wp_to (\cp. cap_irqs cp = {}) slot s) \ - is_subject aag (fst slot)" in hoare_post_imp_R) + is_subject aag (fst slot)" in hoare_strengthen_postE_R) apply (wp rec_del_emptyable reduce_zombie_cap_to) apply simp apply (wp drop_spec_ev[OF liftE_ev] set_cap_reads_respects_f[where st=st] set_cap_silc_inv[where st=st] | simp)+ apply (wp replace_cap_invs set_cap_cte_wp_at set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to hoare_vcg_const_Ball_lift static_imp_wp + set_cap_cte_cap_wp_to hoare_vcg_const_Ball_lift hoare_weak_lift_imp drop_spec_ev[OF liftE_ev] finalise_cap_reads_respects set_cap_silc_inv set_cap_only_timer_irq_inv set_cap_pas_refined_not_transferable | simp add: cte_wp_at_eq_simp @@ -1311,7 +1311,7 @@ next | simp add: in_monad)+ apply (rule_tac Q'="\ _. silc_inv aag st and K (pasObjectAbs aag (fst slot) \ SilcLabel \ is_subject aag (fst slot))" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply (clarsimp) apply (rule conjI, assumption) @@ -1363,7 +1363,7 @@ lemma rec_del_Finalise_transferableE_R: \\_. P\, -" apply (rule hoare_pre) apply (simp add: validE_R_def) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule rec_del_Finalise_transferable) by force+ @@ -1384,7 +1384,7 @@ lemma rec_del_CTEDeleteCall_reads_respects_f: apply fastforce apply (subst rec_del.simps[abs_def]) apply (wp when_ev reads_respects_f[OF empty_slot_reads_respects] empty_slot_silc_inv - rec_del_Finalise_transferable_read_respects_f hoare_vcg_all_lift_R hoare_drop_impE_R + rec_del_Finalise_transferable_read_respects_f hoare_vcg_all_liftE_R hoare_drop_impE_R rec_del_Finalise_transferableE_R | wpc | simp)+ apply clarsimp diff --git a/proof/infoflow/IRQMasks_IF.thy b/proof/infoflow/IRQMasks_IF.thy index ad42ea82a4..f03e566665 100644 --- a/proof/infoflow/IRQMasks_IF.thy +++ b/proof/infoflow/IRQMasks_IF.thy @@ -169,7 +169,7 @@ end crunch irq_masks[wp]: cancel_ipc "\s. P (irq_masks_of_state s)" - (wp: select_wp crunch_wps simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_masks[wp]: restart, set_mcpriority "\s. P (irq_masks_of_state s)" @@ -189,7 +189,7 @@ lemma preemption_point_irq_masks[wp]: by (wp preemption_point_inv, simp+) crunch irq_masks[wp]: cancel_badged_sends "\s. P (irq_masks_of_state s)" - (wp: crunch_wps dmo_wp no_irq hoare_unless_wp + (wp: crunch_wps dmo_wp no_irq unless_wp simp: filterM_mapM crunch_simps no_irq_clearMemory ignore: filterM) @@ -212,7 +212,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) drop_spec_validE[OF valid_validE[OF preemption_point_domain_sep_inv]] cap_delete_domain_sep_inv cap_delete_irq_masks drop_spec_validE[OF assertE_wp] drop_spec_validE[OF returnOk_wp] - drop_spec_validE[OF liftE_wp] select_wp + drop_spec_validE[OF liftE_wp] drop_spec_validE[OF hoare_vcg_conj_liftE1] | simp | wp (once) hoare_drop_imps)+ apply fastforce @@ -261,7 +261,7 @@ lemma decode_invocation_IRQHandlerCap: apply (simp add: decode_invocation_def split del: if_split) apply (rule hoare_pre) apply (wp | wpc | simp add: o_def)+ - apply (rule hoare_post_imp_R[where Q'="\\"]) + apply (rule hoare_strengthen_postE_R[where Q'="\\"]) apply wp apply (clarsimp simp: uncurry_def) apply (wp | wpc | simp add: decode_irq_handler_invocation_def o_def split del: if_split)+ @@ -306,7 +306,7 @@ lemma handle_invocation_irq_masks: \\rv s. P (irq_masks_of_state s)\" apply (simp add: handle_invocation_def ts_Restart_case_helper split_def liftE_liftM_liftME liftME_def bindE_assoc) - apply (wp static_imp_wp syscall_valid perform_invocation_irq_masks[where st=st] + apply (wp hoare_weak_lift_imp syscall_valid perform_invocation_irq_masks[where st=st] hoare_vcg_all_lift hoare_vcg_ex_lift decode_invocation_IRQHandlerCap | simp add: invs_valid_objs)+ done @@ -345,7 +345,7 @@ lemma call_kernel_irq_masks: (\x. rv = Some x \ x \ maxIRQ)" in hoare_strengthen_post) apply (wp | simp)+ apply (rule_tac Q="\x s. P (irq_masks_of_state s) \ domain_sep_inv False st s" - and F="E" for E in hoare_post_impErr) + and F="E" for E in hoare_strengthen_postE) apply (rule valid_validE) apply (wp handle_event_irq_masks[where st=st] valid_validE[OF handle_event_domain_sep_inv] | simp)+ diff --git a/proof/infoflow/InfoFlow_IF.thy b/proof/infoflow/InfoFlow_IF.thy index 90a98428b8..2de302236b 100644 --- a/proof/infoflow/InfoFlow_IF.thy +++ b/proof/infoflow/InfoFlow_IF.thy @@ -677,7 +677,7 @@ lemma requiv_wuc_eq[intro]: by (simp add: reads_equiv_def2) lemma update_object_noop: - "kheap s ptr = Some obj \ s\kheap := kheap s(ptr \ obj)\ = s" + "kheap s ptr = Some obj \ s\kheap := (kheap s)(ptr \ obj)\ = s" by (clarsimp simp: map_upd_triv) lemma set_object_rev: @@ -764,14 +764,14 @@ lemma gets_kheap_revrv: lemma gets_machine_state_revrv: "reads_equiv_valid_rv_inv (affects_equiv aag l) aag - (equiv_machine_state (aag_can_read aag or aag_can_affect aag l) And equiv_irq_state) + (equiv_machine_state (aag_can_read aag or aag_can_affect aag l) and equiv_irq_state) \ (gets machine_state)" by (fastforce simp: equiv_valid_2_def gets_def get_def return_def bind_def elim: reads_equivE affects_equivE equiv_forE intro: equiv_forI) lemma gets_machine_state_revrv': - "reads_equiv_valid_rv_inv A aag (equiv_machine_state (aag_can_read aag) And equiv_irq_state) + "reads_equiv_valid_rv_inv A aag (equiv_machine_state (aag_can_read aag) and equiv_irq_state) \ (gets machine_state)" by (fastforce simp: equiv_valid_2_def gets_def get_def return_def bind_def elim: reads_equivE affects_equivE equiv_forE @@ -892,7 +892,7 @@ context InfoFlow_IF_1 begin lemma do_machine_op_spec_reads_respects': assumes equiv_dmo: - "equiv_valid_inv (equiv_machine_state (aag_can_read aag) And equiv_irq_state) + "equiv_valid_inv (equiv_machine_state (aag_can_read aag) and equiv_irq_state) (equiv_machine_state (aag_can_affect aag l)) Q f" assumes guard: "\s. P s \ Q (machine_state s)" @@ -948,7 +948,7 @@ lemma do_machine_op_rev: unfolding do_machine_op_def equiv_valid_def2 apply (rule_tac W="\ rv rv'. equiv_machine_state (aag_can_read aag) rv rv' \ equiv_irq_state rv rv'" and Q="\ rv s. rv = machine_state s " in equiv_valid_rv_bind) - apply (blast intro: equiv_valid_rv_guard_imp[OF gets_machine_state_revrv'[simplified bipred_conj_def]]) + apply (blast intro: equiv_valid_rv_guard_imp[OF gets_machine_state_revrv'[simplified pred_conj_def]]) apply (rule_tac R'="\ (r, ms') (r', ms''). r = r' \ equiv_machine_state (aag_can_read aag) ms' ms''" and Q="\ (r,ms') s. ms' = rv \ rv = machine_state s " and Q'="\ (r',ms'') s. ms'' = rv' \ rv' = machine_state s" diff --git a/proof/infoflow/Interrupt_IF.thy b/proof/infoflow/Interrupt_IF.thy index be97fd3eca..862c1b8d81 100644 --- a/proof/infoflow/Interrupt_IF.thy +++ b/proof/infoflow/Interrupt_IF.thy @@ -44,7 +44,7 @@ lemma invoke_irq_handler_reads_respects_f: cap_delete_one_reads_respects_f[where st=st] reads_respects_f[OF get_irq_slot_reads_respects, where Q="\"] cap_insert_silc_inv'' cap_delete_one_silc_inv_subject - cap_delete_one_cte_wp_at_other static_imp_wp + cap_delete_one_cte_wp_at_other hoare_weak_lift_imp hoare_vcg_ex_lift slots_holding_overlapping_caps_from_silc_inv[where aag=aag and st=st] | simp | simp add: get_irq_slot_def)+ apply (clarsimp simp: pas_refined_def irq_map_wellformed_aux_def) diff --git a/proof/infoflow/Ipc_IF.thy b/proof/infoflow/Ipc_IF.thy index 35bd9bc802..b7a5718a20 100644 --- a/proof/infoflow/Ipc_IF.thy +++ b/proof/infoflow/Ipc_IF.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -217,7 +218,7 @@ lemma update_waiting_ntfn_equiv_but_for_labels: update_waiting_ntfn nptr list boundtcb badge \\_. equiv_but_for_labels aag L st\" unfolding update_waiting_ntfn_def - apply (wp static_imp_wp as_user_equiv_but_for_labels set_thread_state_runnable_equiv_but_for_labels + apply (wp hoare_weak_lift_imp as_user_equiv_but_for_labels set_thread_state_runnable_equiv_but_for_labels set_thread_state_pas_refined set_notification_equiv_but_for_labels set_simple_ko_pred_tcb_at set_simple_ko_pas_refined hoare_vcg_disj_lift possible_switch_to_equiv_but_for_labels @@ -322,46 +323,23 @@ lemma no_fail_gts: lemma sts_noop: "monadic_rewrite True True (tcb_at tcb and (\s. tcb \ cur_thread s)) (set_thread_state_ext tcb) (return ())" - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_add_get) - apply (rule monadic_rewrite_bind_tail) - apply (clarsimp simp: set_thread_state_ext_def) - apply (rule_tac x="tcb_state (the (get_tcb tcb x))" in monadic_rewrite_symb_exec) - apply (wp gts_wp | simp)+ - apply (rule monadic_rewrite_symb_exec) - apply wp+ - apply (rule monadic_rewrite_symb_exec) - apply wp+ - apply (simp only: when_def) - apply (rule monadic_rewrite_trans) - apply (rule monadic_rewrite_if) - apply (rule monadic_rewrite_impossible[where g="return ()"]) - apply (rule monadic_rewrite_refl) - apply simp - apply (rule monadic_rewrite_refl) - apply wp + unfolding set_thread_state_ext_def when_def + apply (monadic_rewrite_l monadic_rewrite_if_l_False \wpsimp wp: gts_wp\) + apply (monadic_rewrite_symb_exec_l_drop)+ + apply (rule monadic_rewrite_refl) by (auto simp: pred_tcb_at_def obj_at_def is_tcb_def get_tcb_def) lemma sts_to_modify': "monadic_rewrite True True (tcb_at tcb and (\s :: det_state. tcb \ cur_thread s)) (set_thread_state tcb st) - (modify (\s. s\kheap := kheap s(tcb \ TCB (the (get_tcb tcb s)\tcb_state := st\))\))" + (modify (\s. s\kheap := (kheap s)(tcb \ TCB (the (get_tcb tcb s)\tcb_state := st\))\))" apply (clarsimp simp: set_thread_state_def set_object_def) - apply (rule monadic_rewrite_add_get) - apply (rule monadic_rewrite_bind_tail) - apply (rule monadic_rewrite_imp) - apply (rule monadic_rewrite_trans) - apply (simp only: bind_assoc[symmetric]) - apply (rule monadic_rewrite_bind_tail) - apply (rule sts_noop) - apply (wpsimp wp: get_object_wp, simp) - apply (rule_tac x="the (get_tcb tcb x)" in monadic_rewrite_symb_exec, (wp | simp)+) - apply (rule_tac x="x" in monadic_rewrite_symb_exec, (wp | simp)+) - apply (wpsimp wp: get_object_wp simp: a_type_def)+ - apply (rule_tac P="(=) x" in monadic_rewrite_refl3) - apply (clarsimp simp add: put_def modify_def get_def bind_def) - apply assumption - apply wp + apply (monadic_rewrite_l sts_noop \wpsimp wp: get_object_wp\) + apply (simp add: bind_assoc) + apply monadic_rewrite_symb_exec_l+ + apply (rule_tac P="\s'. s' = s \ tcba = the (get_tcb tcb s)" in monadic_rewrite_pre_imp_eq) + apply (clarsimp simp: put_def modify_def get_def bind_def) + apply (wpsimp wp: get_object_wp)+ by (clarsimp simp: get_tcb_def tcb_at_def) lemma sts_no_fail: @@ -417,7 +395,7 @@ lemma cancel_ipc_to_blocked_nosts: apply (rule hoare_modifyE_var[where P="tcb_at tcb and (\s. tcb \ cur_thread s)"]) apply (clarsimp simp: tcb_at_def get_tcb_def) apply (simp add: modify_modify) - apply (rule monadic_rewrite_refl2) + apply (rule monadic_rewrite_is_refl) apply (fastforce simp add: simpler_modify_def o_def get_tcb_def) apply (wp gts_wp)+ apply (simp add: set_thread_state_def bind_assoc gets_the_def) @@ -623,6 +601,9 @@ lemma send_signal_reads_respects: set_thread_state_runnable_equiv_but_for_labels get_simple_ko_wp gts_wp update_waiting_ntfn_equiv_but_for_labels blocked_cancel_ipc_nosts_equiv_but_for_labels + \ \FIXME: The following line is working around the fact that wp (once) doesn't invoke + wp_pre. If that is changed then it could be removed.\ + | wp_pre0 | wpc | wps)+ apply (elim conjE) @@ -917,7 +898,7 @@ lemma transfer_caps_loop_reads_respects': apply (rule_tac Q'="\capd s. (capd \ NullCap \ cte_wp_at (is_derived (cdt s) (obj,ind) capd) (obj, ind) s) \ (capd \ NullCap \ QM s capd)" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply (clarsimp simp: cte_wp_at_caps_of_state split del: if_split) apply (strengthen is_derived_is_transferable[mk_strg I' O], assumption, solves\simp\) @@ -983,7 +964,7 @@ lemma lookup_slot_for_cnode_op_rev: apply (wp resolve_address_bits_rev lookup_error_on_failure_rev whenE_throwError_wp | wpc - | rule hoare_post_imp_R[OF hoare_True_E_R[where P="\"]] + | rule hoare_strengthen_postE_R[OF wp_post_tautE_R] | simp add: split_def split del: if_split)+ done @@ -1158,7 +1139,7 @@ lemma transfer_caps_reads_respects: (transfer_caps mi caps endpoint receiver receive_buffer)" unfolding transfer_caps_def fun_app_def by (wp transfer_caps_loop_reads_respects get_receive_slots_rev - get_receive_slots_authorised hoare_vcg_all_lift static_imp_wp + get_receive_slots_authorised hoare_vcg_all_lift hoare_weak_lift_imp | wpc | simp add: ball_conj_distrib)+ lemma aag_has_auth_to_read_mrs: @@ -1382,7 +1363,7 @@ lemma receive_ipc_base_reads_respects: as_user_set_register_reads_respects' | simp | intro allI impI | rule pre_ev, wpc)+)[2] apply (intro allI impI) - apply (wp static_imp_wp set_simple_ko_reads_respects set_thread_state_reads_respects + apply (wp hoare_weak_lift_imp set_simple_ko_reads_respects set_thread_state_reads_respects setup_caller_cap_reads_respects do_ipc_transfer_reads_respects possible_switch_to_reads_respects gets_cur_thread_ev set_thread_state_pas_refined set_simple_ko_reads_respects hoare_vcg_all_lift @@ -1420,7 +1401,7 @@ lemma receive_ipc_reads_respects: apply (rename_tac epptr badge rights) apply (wp receive_ipc_base_reads_respects complete_signal_reads_respects - static_imp_wp set_simple_ko_reads_respects set_thread_state_reads_respects + hoare_weak_lift_imp set_simple_ko_reads_respects set_thread_state_reads_respects setup_caller_cap_reads_respects complete_signal_reads_respects thread_get_reads_respects get_thread_state_reads_respects @@ -1530,7 +1511,7 @@ lemma send_fault_ipc_reads_respects: hoare_vcg_conj_lift hoare_vcg_ex_lift hoare_vcg_all_lift thread_set_pas_refined cap_fault_on_failure_rev lookup_slot_for_thread_rev - lookup_slot_for_thread_authorised hoare_vcg_all_lift_R + lookup_slot_for_thread_authorised hoare_vcg_all_liftE_R thread_get_reads_respects get_cap_auth_wp[where aag=aag] get_cap_rev thread_set_tcb_fault_set_invs | wpc @@ -1543,7 +1524,7 @@ lemma send_fault_ipc_reads_respects: \ pas_cur_domain aag s \ valid_fault fault \ is_subject aag (fst (fst rv))" - in hoare_post_imp_R[rotated]) + in hoare_strengthen_postE_R[rotated]) apply (fastforce simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def) apply (wp get_cap_auth_wp[where aag=aag] lookup_slot_for_thread_authorised thread_get_reads_respects @@ -1719,7 +1700,7 @@ next apply (wp cap_insert_globals_equiv'') apply (rule_tac Q="\_. globals_equiv st and valid_arch_state and valid_global_objs" and E="\_. globals_equiv st and valid_arch_state and valid_global_objs" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (simp add: whenE_def, rule conjI) apply (rule impI, wp)+ apply (simp)+ @@ -1739,7 +1720,7 @@ lemma copy_mrs_globals_equiv: "\globals_equiv s and valid_arch_state and (\s. receiver \ idle_thread s)\ copy_mrs sender sbuf receiver rbuf n \\_. globals_equiv s\" - unfolding copy_mrs_def including no_pre + unfolding copy_mrs_def including classic_wp_pre apply (wp | wpc)+ apply (rule_tac Q="\_. globals_equiv s" in hoare_strengthen_post) apply (wp mapM_wp' | wpc)+ @@ -1919,7 +1900,7 @@ lemma cancel_ipc_blocked_globals_equiv: cancel_ipc a \\_. globals_equiv st\" unfolding cancel_ipc_def - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule hoare_pre) apply (wpc; (simp,rule blocked_cancel_ipc_globals_equiv)?) apply (rule hoare_pre_cont)+ @@ -1982,7 +1963,7 @@ lemma send_fault_ipc_valid_global_objs: apply (wp) apply (simp add: Let_def) apply (wp send_ipc_valid_global_objs | wpc)+ - apply (rule_tac Q'="\_. valid_global_objs" in hoare_post_imp_R) + apply (rule_tac Q'="\_. valid_global_objs" in hoare_strengthen_postE_R) apply (wp | simp)+ done @@ -2004,7 +1985,7 @@ lemma send_fault_ipc_globals_equiv: apply (rule_tac Q'="\_. globals_equiv st and valid_objs and valid_arch_state and valid_global_refs and pspace_distinct and pspace_aligned and valid_global_objs and K (valid_fault fault) and valid_idle and - (\s. sym_refs (state_refs_of s))" in hoare_post_imp_R) + (\s. sym_refs (state_refs_of s))" in hoare_strengthen_postE_R) apply (wp | simp)+ apply (clarsimp) apply (rule valid_tcb_fault_update) @@ -2024,7 +2005,7 @@ lemma handle_fault_globals_equiv: unfolding handle_fault_def apply (wp handle_double_fault_globals_equiv) apply (rule_tac Q="\_. globals_equiv st and valid_arch_state" and - E="\_. globals_equiv st and valid_arch_state" in hoare_post_impErr) + E="\_. globals_equiv st and valid_arch_state" in hoare_strengthen_postE) apply (wp send_fault_ipc_globals_equiv | simp)+ done diff --git a/proof/infoflow/Noninterference.thy b/proof/infoflow/Noninterference.thy index 709ec11847..ea1df9ae52 100644 --- a/proof/infoflow/Noninterference.thy +++ b/proof/infoflow/Noninterference.thy @@ -325,7 +325,7 @@ lemma prop_of_two_valid: by (rule hoare_pre, wps f g, wp, simp) lemma thread_set_tcb_context_update_wp: - "\\s. P (s\kheap := kheap s(t \ TCB (tcb_arch_update f (the (get_tcb t s))))\)\ + "\\s. P (s\kheap := (kheap s)(t \ TCB (tcb_arch_update f (the (get_tcb t s))))\)\ thread_set (tcb_arch_update f) t \\_. P\" apply (simp add: thread_set_def) @@ -381,8 +381,7 @@ lemma kernel_entry_if_globals_equiv_scheduler: and (\s. ct_idle s \ tc = idle_context s)\ kernel_entry_if e tc \\_. globals_equiv_scheduler st\" - apply (wp globals_equiv_scheduler_inv' kernel_entry_if_globals_equiv) - apply (clarsimp) + apply (wpsimp wp: globals_equiv_scheduler_inv' kernel_entry_if_globals_equiv) apply assumption apply clarsimp done @@ -460,7 +459,7 @@ lemma schedule_cur_domain: schedule \\_ s. P (cur_domain s)\" (is "\?PRE\ _ \_\") - supply hoare_pre_cont[where a=next_domain, wp add] + supply hoare_pre_cont[where f=next_domain, wp add] ethread_get_wp[wp del] if_split[split del] if_cong[cong] apply (simp add: schedule_def schedule_choose_new_thread_def | wp | wpc)+ apply (rule_tac Q="\_. ?PRE" in hoare_strengthen_post) @@ -477,7 +476,7 @@ lemma schedule_domain_fields: schedule \\_. domain_fields P\" (is "\?PRE\ _ \_\") - supply hoare_pre_cont[where a=next_domain, wp add] + supply hoare_pre_cont[where f=next_domain, wp add] ethread_get_wp[wp del] if_split[split del] if_cong[cong] apply (simp add: schedule_def schedule_choose_new_thread_def | wp | wpc)+ apply (rule_tac Q="\_. ?PRE" in hoare_strengthen_post) @@ -631,7 +630,7 @@ locale Noninterference_1 = "reads_respects_g aag l \ (do_machine_op (storeWord ptr w))" and integrity_asids_update_reference_state: "is_subject aag t - \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := kheap s(t \ blah)\)" + \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := (kheap s)(t \ blah)\)" and partitionIntegrity_subjectAffects_aobj: "\ partitionIntegrity aag s s'; kheap s x = Some (ArchObj ao); kheap s x \ kheap s' x; silc_inv aag st s; pas_refined aag s; pas_wellformed_noninterference aag \ @@ -685,7 +684,7 @@ locale Noninterference_1 = begin lemma integrity_update_reference_state: - "\ is_subject aag t; integrity aag X st s; st = st'\kheap := kheap st'(t \ blah)\ \ + "\ is_subject aag t; integrity aag X st s; st = st'\kheap := (kheap st')(t \ blah)\ \ \ integrity (aag :: 'a subject_label PAS) X st' s" apply (erule integrity_trans[rotated]) apply (clarsimp simp: integrity_def opt_map_def integrity_asids_update_reference_state) @@ -2090,10 +2089,10 @@ lemma tcb_sched_action_reads_respects_g': apply (force intro: domtcbs simp: get_etcb_def) apply (simp add: equiv_valid_def2 ethread_get_def) apply (rule equiv_valid_rv_bind) - apply (wp equiv_valid_rv_trivial', simp) + apply (wpsimp wp: equiv_valid_rv_trivial') apply (rule equiv_valid_2_bind) prefer 2 - apply (wp equiv_valid_rv_trivial, simp) + apply (wpsimp wp: equiv_valid_rv_trivial) apply (rule equiv_valid_2_bind) apply (rule_tac P="\" and P'="\" and L="{pasObjectAbs aag thread}" and L'="{pasObjectAbs aag thread}" in ev2_invisible') @@ -2103,8 +2102,7 @@ lemma tcb_sched_action_reads_respects_g': apply (rule doesnt_touch_globalsI | simp | wp)+ apply (clarsimp simp: equiv_valid_2_def gets_apply_def get_def bind_def return_def labels_are_invisible_def) - apply wp+ - apply clarsimp + apply wpsimp+ apply (clarsimp simp: pas_refined_def tcb_domain_map_wellformed_aux_def) apply (erule_tac x="(thread, tcb_domain y)" in ballE) apply force @@ -2272,7 +2270,7 @@ lemma schedule_choose_new_thread_reads_respects_g: apply (subst gets_app_rewrite[where y=domain_time and f="\x. x = 0"])+ apply (wp gets_domain_time_zero_ev set_scheduler_action_reads_respects_g choose_thread_reads_respects_g ev_pre_cont[where f=next_domain] - hoare_pre_cont[where a=next_domain] when_ev) + hoare_pre_cont[where f=next_domain] when_ev) apply (clarsimp simp: valid_sched_def word_neq_0_conv) done @@ -2938,7 +2936,7 @@ lemma thread_get_tcb_context_reads_respects_g: and Q="\\" in equiv_valid_rv_bind) apply (rule thread_get_tcb_context_reads_respects_g_helper) apply (rule return_ev2, simp) - apply (rule hoare_post_taut) + apply (rule hoare_TrueI) done (* this is a little more complicated because the context isn't diff --git a/proof/infoflow/Noninterference_Base_Alternatives.thy b/proof/infoflow/Noninterference_Base_Alternatives.thy index 0a58dd6716..6619c4008f 100644 --- a/proof/infoflow/Noninterference_Base_Alternatives.thy +++ b/proof/infoflow/Noninterference_Base_Alternatives.thy @@ -5,7 +5,9 @@ *) theory Noninterference_Base_Alternatives -imports Noninterference_Base "Lib.Eisbach_Methods" + imports + Noninterference_Base + Eisbach_Tools.Eisbach_Methods begin text \ diff --git a/proof/infoflow/PasUpdates.thy b/proof/infoflow/PasUpdates.thy index 14b5ae3469..1d6ceed8db 100644 --- a/proof/infoflow/PasUpdates.thy +++ b/proof/infoflow/PasUpdates.thy @@ -45,7 +45,7 @@ crunch domain_fields[wp]: cap_swap_ext, set_thread_state_ext, tcb_sched_action, reschedule_required, cap_swap_for_delete, finalise_cap, cap_move, cap_swap, cap_delete, cancel_badged_sends, cap_insert "domain_fields P" - ( wp: syscall_valid select_wp crunch_wps rec_del_preservation cap_revoke_preservation modify_wp + ( wp: syscall_valid crunch_wps rec_del_preservation cap_revoke_preservation modify_wp simp: crunch_simps check_cap_at_def filterM_mapM unless_def ignore: without_preemption filterM rec_del check_cap_at cap_revoke ignore_del: retype_region_ext create_cap_ext cap_insert_ext ethread_set cap_move_ext diff --git a/proof/infoflow/README.md b/proof/infoflow/README.md index 8ac0d9b158..636f15d4be 100644 --- a/proof/infoflow/README.md +++ b/proof/infoflow/README.md @@ -24,9 +24,9 @@ the [C Refinement Proof](../crefine/). Building -------- -To build from the `l4v/` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - ./isabelle/bin/isabelle build -d . -v -b InfoFlow + L4V_ARCH=ARM ./run_tests InfoFlow Important Theories ------------------ @@ -42,4 +42,3 @@ specification is [`InfoFlow`](InfoFlow.thy). Confidentiality is a relational property and the theory [`EquivValid`](../../lib/EquivValid.thy) defines these generically for the nondeterministic state monad of the abstract specification. - diff --git a/proof/infoflow/RISCV64/ArchADT_IF.thy b/proof/infoflow/RISCV64/ArchADT_IF.thy index a70fe81d27..a0909702ea 100644 --- a/proof/infoflow/RISCV64/ArchADT_IF.thy +++ b/proof/infoflow/RISCV64/ArchADT_IF.thy @@ -25,7 +25,7 @@ lemma do_user_op_if_invs[ADT_IF_assms]: do_user_op_if f tc \\_. invs and ct_running\" apply (simp add: do_user_op_if_def split_def) - apply (wp do_machine_op_ct_in_state select_wp device_update_invs | wp (once) dmo_invs | simp)+ + apply (wp do_machine_op_ct_in_state device_update_invs | wp (once) dmo_invs | simp)+ apply (clarsimp simp: user_mem_def user_memory_update_def simpler_modify_def restrict_map_def invs_def cur_tcb_def ptable_rights_s_def ptable_lift_s_def) apply (frule ptable_rights_imp_frame) @@ -35,31 +35,31 @@ lemma do_user_op_if_invs[ADT_IF_assms]: done crunch domain_sep_inv[ADT_IF_assms, wp]: do_user_op_if "domain_sep_inv irqs st" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch valid_sched[ADT_IF_assms, wp]: do_user_op_if "valid_sched" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch irq_masks[ADT_IF_assms, wp]: do_user_op_if "\s. P (irq_masks_of_state s)" - (ignore: user_memory_update wp: select_wp dmo_wp no_irq) + (ignore: user_memory_update wp: dmo_wp no_irq) crunch valid_list[ADT_IF_assms, wp]: do_user_op_if "valid_list" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) lemma do_user_op_if_scheduler_action[ADT_IF_assms, wp]: "do_user_op_if f tc \\s. P (scheduler_action s)\" - by (simp add: do_user_op_if_def | wp select_wp | wpc)+ + by (simp add: do_user_op_if_def | wp | wpc)+ lemma do_user_op_silc_inv[ADT_IF_assms, wp]: "do_user_op_if f tc \silc_inv aag st\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_pas_refined[ADT_IF_assms, wp]: "do_user_op_if f tc \pas_refined aag\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done crunches do_user_op_if @@ -67,7 +67,7 @@ crunches do_user_op_if and cur_domain[ADT_IF_assms, wp]: "\s. P (cur_domain s)" and idle_thread[ADT_IF_assms, wp]: "\s. P (idle_thread s)" and domain_fields[ADT_IF_assms, wp]: "domain_fields P" - (wp: select_wp ignore: user_memory_update) + (ignore: user_memory_update) lemma do_use_op_guarded_pas_domain[ADT_IF_assms, wp]: "do_user_op_if f tc \guarded_pas_domain aag\" @@ -187,7 +187,7 @@ lemma do_user_op_if_idle_equiv[ADT_IF_assms, wp]: do_user_op_if uop tc \\_. idle_equiv st\" unfolding do_user_op_if_def - by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv select_wp) + by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv) lemma kernel_entry_if_valid_vspace_objs_if[ADT_IF_assms, wp]: "\valid_vspace_objs_if and invs and (\s. e \ Interrupt \ ct_active s)\ @@ -214,20 +214,20 @@ lemma valid_vspace_objs_if_ms_update[ADT_IF_assms, simp]: lemma do_user_op_if_irq_state_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_state_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_masks_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_masks_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_measure_if[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_measure_if s)\" apply (rule hoare_pre) apply (simp add: do_user_op_if_def user_memory_update_def irq_measure_if_def - | wps |wp dmo_wp select_wp | wpc)+ + | wps |wp dmo_wp | wpc)+ done lemma invoke_tcb_irq_state_inv[ADT_IF_assms]: @@ -244,7 +244,7 @@ lemma invoke_tcb_irq_state_inv[ADT_IF_assms]: defer apply ((wp irq_state_inv_triv | simp)+)[2] apply (simp add: split_def cong: option.case_cong) - by (wp hoare_vcg_all_lift_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R + by (wp hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R checked_cap_insert_domain_sep_inv cap_delete_deletes cap_delete_irq_state_inv[where st=st and sta=sta and irq=irq] cap_delete_irq_state_next[where st=st and sta=sta and irq=irq] diff --git a/proof/infoflow/RISCV64/ArchArch_IF.thy b/proof/infoflow/RISCV64/ArchArch_IF.thy index 56faabe673..e1e856a3ea 100644 --- a/proof/infoflow/RISCV64/ArchArch_IF.thy +++ b/proof/infoflow/RISCV64/ArchArch_IF.thy @@ -61,7 +61,7 @@ crunches set_irq_state, arch_post_cap_deletion, handle_arch_fault_reply crunch irq_state_of_state[Arch_IF_assms, wp]: arch_switch_to_idle_thread, arch_switch_to_thread "\s :: det_state. P (irq_state_of_state s)" - (wp: dmo_wp modify_wp crunch_wps hoare_whenE_wp + (wp: dmo_wp modify_wp crunch_wps whenE_wp simp: machine_op_lift_def setVSpaceRoot_def machine_rest_lift_def crunch_simps storeWord_def) @@ -74,7 +74,7 @@ crunch irq_state_of_state[wp]: arch_perform_invocation "\s. P (irq_state crunch irq_state_of_state[Arch_IF_assms, wp]: arch_finalise_cap, prepare_thread_delete "\s :: det_state. P (irq_state_of_state s)" - (wp: select_wp modify_wp crunch_wps dmo_wp + (wp: modify_wp crunch_wps dmo_wp simp: crunch_simps hwASIDFlush_def) lemma equiv_asid_machine_state_update[Arch_IF_assms, simp]: @@ -243,7 +243,7 @@ lemma set_vm_root_states_equiv_for[wp]: "set_vm_root thread \states_equiv_for P Q R S st\" unfolding set_vm_root_def catch_def fun_app_def by (wpsimp wp: do_machine_op_mol_states_equiv_for - hoare_vcg_all_lift hoare_whenE_wp hoare_drop_imps + hoare_vcg_all_lift whenE_wp hoare_drop_imps simp: setVSpaceRoot_def dmo_bind_valid if_apply_def2)+ lemma find_vspace_for_asid_reads_respects: @@ -328,7 +328,7 @@ lemma unmap_page_table_reads_respects: apply (rule equiv_valid_guard_imp) apply (wp dmo_mol_reads_respects store_pte_reads_respects get_pte_rev pt_lookup_from_level_reads_respects pt_lookup_from_level_is_subject - find_vspace_for_asid_wp find_vspace_for_asid_reads_respects hoare_vcg_all_lift_R + find_vspace_for_asid_wp find_vspace_for_asid_reads_respects hoare_vcg_all_liftE_R | wpc | simp add: sfence_def | wp (once) hoare_drop_imps)+ apply clarsimp apply (frule vspace_for_asid_is_subject) @@ -400,10 +400,10 @@ lemma perform_page_invocation_reads_respects: lemma equiv_asids_riscv_asid_table_update: "\ equiv_asids R s t; kheap s pool_ptr = kheap t pool_ptr \ \ equiv_asids R - (s\arch_state := arch_state s\riscv_asid_table := riscv_asid_table (arch_state s) - (asid_high_bits_of asid \ pool_ptr)\\) - (t\arch_state := arch_state t\riscv_asid_table := riscv_asid_table (arch_state t) - (asid_high_bits_of asid \ pool_ptr)\\)" + (s\arch_state := arch_state s\riscv_asid_table := (asid_table s) + (asid_high_bits_of asid \ pool_ptr)\\) + (t\arch_state := arch_state t\riscv_asid_table := (asid_table t) + (asid_high_bits_of asid \ pool_ptr)\\)" by (clarsimp simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap opt_map_def) lemma riscv_asid_table_update_reads_respects: @@ -464,7 +464,7 @@ lemma copy_global_mappings_valid_arch_state: and (\s. x \ global_refs s \ is_aligned x pt_bits)\ copy_global_mappings x \\_. valid_arch_state\" - unfolding copy_global_mappings_def including no_pre + unfolding copy_global_mappings_def including classic_wp_pre apply simp apply wp apply (rule_tac Q="\_. valid_arch_state and valid_global_vspace_mappings and pspace_aligned @@ -943,8 +943,8 @@ lemma set_mrs_globals_equiv: apply (clarsimp) apply (insert length_msg_lt_msg_max) apply (simp) - apply (wp set_object_globals_equiv static_imp_wp) - apply (wp hoare_vcg_all_lift set_object_globals_equiv static_imp_wp)+ + apply (wp set_object_globals_equiv hoare_weak_lift_imp) + apply (wp hoare_vcg_all_lift set_object_globals_equiv hoare_weak_lift_imp)+ apply (fastforce simp: valid_arch_state_def obj_at_def get_tcb_def dest: valid_global_arch_objs_pt_at) done @@ -981,7 +981,7 @@ lemma perform_pg_inv_unmap_globals_equiv: apply (rule hoare_weaken_pre) apply (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift mapM_x_swp_store_pte_globals_equiv set_cap_globals_equiv'' unmap_page_globals_equiv store_pte_globals_equiv - store_pte_globals_equiv static_imp_wp set_message_info_globals_equiv + store_pte_globals_equiv hoare_weak_lift_imp set_message_info_globals_equiv unmap_page_valid_arch_state perform_pg_inv_get_addr_globals_equiv | wpc | simp add: do_machine_op_bind sfence_def)+ apply (clarsimp simp: acap_map_data_def) @@ -998,7 +998,7 @@ lemma perform_pg_inv_map_globals_equiv: unfolding perform_pg_inv_map_def by (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift mapM_x_swp_store_pte_globals_equiv set_cap_globals_equiv'' unmap_page_globals_equiv store_pte_globals_equiv - store_pte_globals_equiv static_imp_wp set_message_info_globals_equiv + store_pte_globals_equiv hoare_weak_lift_imp set_message_info_globals_equiv unmap_page_valid_arch_state perform_pg_inv_get_addr_globals_equiv | wpc | simp add: do_machine_op_bind sfence_def | fastforce)+ @@ -1049,7 +1049,7 @@ lemma perform_asid_control_invocation_globals_equiv: max_index_upd_invs_simple set_cap_no_overlap set_cap_caps_no_overlap max_index_upd_caps_overlap_reserved region_in_kernel_window_preserved - hoare_vcg_all_lift get_cap_wp static_imp_wp + hoare_vcg_all_lift get_cap_wp hoare_weak_lift_imp set_cap_idx_up_aligned_area[where dev = False,simplified] | simp)+ (* factor out the implication -- we know what the relevant components of the diff --git a/proof/infoflow/RISCV64/ArchDecode_IF.thy b/proof/infoflow/RISCV64/ArchDecode_IF.thy index 2038b51e5a..5be04e10b1 100644 --- a/proof/infoflow/RISCV64/ArchDecode_IF.thy +++ b/proof/infoflow/RISCV64/ArchDecode_IF.thy @@ -97,7 +97,7 @@ lemma pas_cap_cur_auth_ASIDControlCap: lemma decode_asid_pool_invocation_reads_respects_f: notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] - notes hoare_whenE_wps[wp_split del] + notes whenE_wps[wp_split del] shows "reads_respects_f aag l (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot @@ -128,7 +128,7 @@ lemma decode_asid_pool_invocation_reads_respects_f: lemma decode_asid_control_invocation_reads_respects_f: notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] - notes hoare_whenE_wps[wp_split del] + notes whenE_wps[wp_split del] shows "reads_respects_f aag l (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot @@ -144,7 +144,7 @@ lemma decode_asid_control_invocation_reads_respects_f: apply (wp check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF lookup_error_on_failure_rev] gets_apply_ev is_final_cap_reads_respects @@ -174,7 +174,7 @@ lemma decode_asid_control_invocation_reads_respects_f: lemma decode_frame_invocation_reads_respects_f: notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] - notes hoare_whenE_wps[wp_split del] + notes whenE_wps[wp_split del] shows "reads_respects_f aag l (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot @@ -193,7 +193,7 @@ lemma decode_frame_invocation_reads_respects_f: reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF get_pte_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF lookup_error_on_failure_rev] find_vspace_for_asid_reads_respects is_final_cap_reads_respects @@ -236,7 +236,7 @@ lemma decode_frame_invocation_reads_respects_f: lemma decode_page_table_invocation_reads_respects_f: notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] - notes hoare_whenE_wps[wp_split del] + notes whenE_wps[wp_split del] shows "reads_respects_f aag l (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot @@ -254,7 +254,7 @@ lemma decode_page_table_invocation_reads_respects_f: reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF get_pte_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF lookup_error_on_failure_rev] find_vspace_for_asid_reads_respects is_final_cap_reads_respects @@ -303,7 +303,7 @@ lemma decode_page_table_invocation_reads_respects_f: lemma arch_decode_invocation_reads_respects_f[Decode_IF_assms]: notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] - notes hoare_whenE_wps[wp_split del] + notes whenE_wps[wp_split del] shows "reads_respects_f aag l (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot diff --git a/proof/infoflow/RISCV64/ArchFinalCaps.thy b/proof/infoflow/RISCV64/ArchFinalCaps.thy index 4dd9854b4e..a87de97586 100644 --- a/proof/infoflow/RISCV64/ArchFinalCaps.thy +++ b/proof/infoflow/RISCV64/ArchFinalCaps.thy @@ -160,7 +160,7 @@ lemma perform_page_invocation_silc_inv: apply (wp mapM_wp[OF _ subset_refl] set_cap_silc_inv mapM_x_wp[OF _ subset_refl] perform_page_table_invocation_silc_inv_get_cap_helper'[where st=st] - hoare_vcg_all_lift hoare_vcg_if_lift static_imp_wp + hoare_vcg_all_lift hoare_vcg_if_lift hoare_weak_lift_imp | wpc | simp only: swp_def o_def fun_app_def K_def | wp (once) hoare_drop_imps)+ @@ -186,7 +186,7 @@ lemma perform_asid_control_invocation_silc_inv: apply (rule hoare_pre) apply (wp modify_wp cap_insert_silc_inv' retype_region_silc_inv[where sz=pageBits] set_cap_silc_inv get_cap_slots_holding_overlapping_caps[where st=st] - delete_objects_silc_inv static_imp_wp + delete_objects_silc_inv hoare_weak_lift_imp | wpc | simp )+ apply (clarsimp simp: authorised_asid_control_inv_def silc_inv_def valid_aci_def ptr_range_def page_bits_def) apply (rule conjI) @@ -250,15 +250,15 @@ lemma arch_invoke_irq_control_silc_inv[FinalCaps_assms]: done lemma invoke_tcb_silc_inv[FinalCaps_assms]: - notes static_imp_wp [wp] - static_imp_conj_wp [wp] + notes hoare_weak_lift_imp [wp] + hoare_weak_lift_imp_conj [wp] shows "\silc_inv aag st and einvs and simple_sched_action and pas_refined aag and tcb_inv_wf tinv and K (authorised_tcb_inv aag tinv)\ invoke_tcb tinv \\_. silc_inv aag st\" apply (case_tac tinv) apply ((wp restart_silc_inv hoare_vcg_if_lift suspend_silc_inv mapM_x_wp[OF _ subset_refl] - static_imp_wp + hoare_weak_lift_imp | wpc | simp split del: if_split add: authorised_tcb_inv_def check_cap_at_def | clarsimp @@ -277,7 +277,7 @@ lemma invoke_tcb_silc_inv[FinalCaps_assms]: apply (strengthen use_no_cap_to_obj_asid_strg | clarsimp | simp only: conj_ac cong: conj_cong imp_cong - | wp checked_insert_pas_refined checked_cap_insert_silc_inv hoare_vcg_all_lift_R + | wp checked_insert_pas_refined checked_cap_insert_silc_inv hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R cap_delete_silc_inv_not_transferable cap_delete_pas_refined' cap_delete_deletes diff --git a/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy b/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy index f192b8de6c..1eefa73f01 100644 --- a/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy +++ b/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy @@ -30,7 +30,7 @@ crunch irq_masks[IRQMasks_IF_assms, wp]: invoke_untyped "\s. P (irq_mask simp: crunch_simps no_irq_clearMemory mapM_x_def_bak unless_def) crunch irq_masks[IRQMasks_IF_assms, wp]: finalise_cap "\s. P (irq_masks_of_state s)" - ( wp: select_wp crunch_wps dmo_wp no_irq + ( wp: crunch_wps dmo_wp no_irq simp: crunch_simps no_irq_setVSpaceRoot no_irq_hwASIDFlush) crunch irq_masks[IRQMasks_IF_assms, wp]: send_signal "\s. P (irq_masks_of_state s)" @@ -77,14 +77,14 @@ lemma dmo_getActiveIRQ_return_axiom[IRQMasks_IF_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre, rule dmo_wp) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done crunch irq_masks[IRQMasks_IF_assms, wp]: activate_thread "\s. P (irq_masks_of_state s)" crunch irq_masks[IRQMasks_IF_assms, wp]: schedule "\s. P (irq_masks_of_state s)" - (wp: dmo_wp alternative_wp select_wp crunch_wps simp: crunch_simps) + (wp: dmo_wp crunch_wps simp: crunch_simps) end @@ -125,23 +125,23 @@ lemma invoke_tcb_irq_masks[IRQMasks_IF_assms]: (* just ThreadControl left *) apply (simp add: split_def cong: option.case_cong) apply wpsimp+ - apply (rule hoare_post_impErr[OF cap_delete_irq_masks[where P=P]]) + apply (rule hoare_strengthen_postE[OF cap_delete_irq_masks[where P=P]]) apply blast apply blast - apply (wpsimp wp: hoare_vcg_all_lift_R hoare_vcg_const_imp_lift_R + apply (wpsimp wp: hoare_vcg_all_liftE_R hoare_vcg_const_imp_lift_R hoare_vcg_all_lift hoare_drop_imps checked_cap_insert_domain_sep_inv)+ apply (rule_tac Q="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" - and E="\_ s. P (irq_masks_of_state s)" in hoare_post_impErr) + and E="\_ s. P (irq_masks_of_state s)" in hoare_strengthen_postE) apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ apply (rule_tac Q="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" - and E="\_ s. P (irq_masks_of_state s)" in hoare_post_impErr) + and E="\_ s. P (irq_masks_of_state s)" in hoare_strengthen_postE) apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (simp add: option_update_thread_def | wp static_imp_wp hoare_vcg_all_lift | wpc)+ + apply (simp add: option_update_thread_def | wp hoare_weak_lift_imp hoare_vcg_all_lift | wpc)+ by fastforce+ lemma init_arch_objects_irq_masks: diff --git a/proof/infoflow/RISCV64/ArchIpc_IF.thy b/proof/infoflow/RISCV64/ArchIpc_IF.thy index 7898354887..020e234073 100644 --- a/proof/infoflow/RISCV64/ArchIpc_IF.thy +++ b/proof/infoflow/RISCV64/ArchIpc_IF.thy @@ -387,7 +387,7 @@ lemma set_mrs_equiv_but_for_labels[Ipc_IF_assms]: pasObjectAbs aag x \ L) | _ \ True))" in hoare_strengthen_post) apply (wp mapM_x_wp' store_word_offs_equiv_but_for_labels | simp add: split_def)+ - apply (case_tac xa, clarsimp split: if_split_asm elim!: in_set_zipE) + apply (case_tac x, clarsimp split: if_split_asm elim!: in_set_zipE) apply (clarsimp simp: for_each_byte_of_word_def) apply (erule bspec) apply (clarsimp simp: ptr_range_def) @@ -419,7 +419,7 @@ lemma set_mrs_equiv_but_for_labels[Ipc_IF_assms]: apply (simp add: word_size_def) apply (erule is_aligned_no_overflow') apply simp - apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift static_imp_wp | simp)+ + apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift hoare_weak_lift_imp | simp)+ apply (fastforce dest: get_tcb_not_asid_pool_at)+ done diff --git a/proof/infoflow/RISCV64/ArchNoninterference.thy b/proof/infoflow/RISCV64/ArchNoninterference.thy index dcf4c00cfc..6ebdc9d167 100644 --- a/proof/infoflow/RISCV64/ArchNoninterference.thy +++ b/proof/infoflow/RISCV64/ArchNoninterference.thy @@ -19,9 +19,10 @@ lemma do_user_op_if_integrity[Noninterference_assms]: \\_. integrity aag X st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_respects_Write dmo_device_update_respects_Write - hoare_vcg_all_lift hoare_vcg_imp_lift) + hoare_vcg_all_lift hoare_vcg_imp_lift + wp_del: select_wp) apply (rule hoare_pre_cont) - apply (wp select_wp | wpc | clarsimp)+ + apply (wp | wpc | clarsimp)+ apply (rule conjI) apply clarsimp apply (simp add: restrict_map_def ptable_lift_s_def ptable_rights_s_def split: if_splits) @@ -39,12 +40,12 @@ lemma do_user_op_if_globals_equiv_scheduler[Noninterference_assms]: \\_. globals_equiv_scheduler st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_globals_equiv_scheduler - dmo_device_memory_update_globals_equiv_scheduler select_wp)+ + dmo_device_memory_update_globals_equiv_scheduler)+ apply (auto simp: ptable_lift_s_def ptable_rights_s_def) done crunch silc_dom_equiv[Noninterference_assms, wp]: do_user_op_if "silc_dom_equiv aag st" - (ignore: do_machine_op user_memory_update wp: crunch_wps select_wp) + (ignore: do_machine_op user_memory_update wp: crunch_wps) lemma sameFor_scheduler_affects_equiv[Noninterference_assms]: "\ (s,s') \ same_for aag PSched; (s,s') \ same_for aag (Partition l); @@ -93,7 +94,7 @@ lemma arch_globals_equiv_strengthener_thread_independent[Noninterference_assms]: lemma integrity_asids_update_reference_state[Noninterference_assms]: "is_subject aag t - \ integrity_asids aag {pasSubject aag} x a s (s\kheap := kheap s(t \ blah)\)" + \ integrity_asids aag {pasSubject aag} x a s (s\kheap := (kheap s)(t \ blah)\)" by (clarsimp simp: opt_map_def) lemma inte_obj_arch: @@ -352,7 +353,7 @@ lemma getActiveIRQ_ret_no_dmo[Noninterference_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done diff --git a/proof/infoflow/RISCV64/ArchPasUpdates.thy b/proof/infoflow/RISCV64/ArchPasUpdates.thy index a7392bebfd..f785571dc1 100644 --- a/proof/infoflow/RISCV64/ArchPasUpdates.thy +++ b/proof/infoflow/RISCV64/ArchPasUpdates.thy @@ -14,7 +14,7 @@ named_theorems PasUpdates_assms crunches arch_post_cap_deletion, arch_finalise_cap, prepare_thread_delete for domain_fields[PasUpdates_assms, wp]: "domain_fields P" - ( wp: syscall_valid select_wp crunch_wps rec_del_preservation cap_revoke_preservation modify_wp + ( wp: syscall_valid crunch_wps rec_del_preservation cap_revoke_preservation modify_wp simp: crunch_simps check_cap_at_def filterM_mapM unless_def ignore: without_preemption filterM rec_del check_cap_at cap_revoke ignore_del: retype_region_ext create_cap_ext cap_insert_ext ethread_set cap_move_ext diff --git a/proof/infoflow/RISCV64/ArchRetype_IF.thy b/proof/infoflow/RISCV64/ArchRetype_IF.thy index 98d74b5619..1a1781c1fd 100644 --- a/proof/infoflow/RISCV64/ArchRetype_IF.thy +++ b/proof/infoflow/RISCV64/ArchRetype_IF.thy @@ -232,7 +232,7 @@ lemma copy_global_mappings_globals_equiv: "\globals_equiv s and (\s. x \ riscv_global_pt (arch_state s) \ is_aligned x pt_bits)\ copy_global_mappings x \\_. globals_equiv s\" - unfolding copy_global_mappings_def including no_pre + unfolding copy_global_mappings_def including classic_wp_pre apply simp apply wp apply (rule_tac Q="\_. globals_equiv s and (\s. x \ riscv_global_pt (arch_state s) \ @@ -413,30 +413,31 @@ lemma reset_untyped_cap_reads_respects_g: apply (frule(1) caps_of_state_valid) apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps free_index_of_def invs_valid_global_objs) - apply (simp add: aligned_add_aligned is_aligned_shiftl) - apply (clarsimp simp: Kernel_Config.resetChunkBits_def) + apply (simp add: aligned_add_aligned is_aligned_shiftl) + apply (clarsimp simp: Kernel_Config.resetChunkBits_def) apply (rule hoare_pre) apply (wp preemption_point_inv' set_untyped_cap_invs_simple set_cap_cte_wp_at set_cap_no_overlap only_timer_irq_inv_pres[where Q=\, OF _ set_cap_domain_sep_inv] + irq_state_independent_A_conjI | simp)+ apply (strengthen empty_descendants_range_in) - apply (wp only_timer_irq_inv_pres[where P=\ and Q=\] no_irq_clearMemory - | simp | wp (once) dmo_wp)+ - apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) - apply (frule(1) caps_of_state_valid) - apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps free_index_of_def) - apply (wp | simp)+ - apply (wp delete_objects_reads_respects_g) - apply (simp add: if_apply_def2) - apply (strengthen invs_valid_global_objs) - apply (wp add: delete_objects_invs_ex hoare_vcg_const_imp_lift - delete_objects_pspace_no_overlap_again - only_timer_irq_inv_pres[where P=\ and Q=\] - delete_objects_valid_arch_state - del: Untyped_AI.delete_objects_pspace_no_overlap - | simp)+ - apply (rule get_cap_reads_respects_g) - apply (wp get_cap_wp) + apply (wp only_timer_irq_inv_pres[where P=\ and Q=\] no_irq_clearMemory + | simp | wp (once) dmo_wp)+ + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) + apply (frule(1) caps_of_state_valid) + apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps free_index_of_def) + apply (wp | simp)+ + apply (wp delete_objects_reads_respects_g) + apply (simp add: if_apply_def2) + apply (strengthen invs_valid_global_objs) + apply (wp add: delete_objects_invs_ex hoare_vcg_const_imp_lift + delete_objects_pspace_no_overlap_again + only_timer_irq_inv_pres[where P=\ and Q=\] + delete_objects_valid_arch_state + del: Untyped_AI.delete_objects_pspace_no_overlap + | simp)+ + apply (rule get_cap_reads_respects_g) + apply (wp get_cap_wp) apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) apply (frule(1) caps_of_state_valid) apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps @@ -530,9 +531,9 @@ lemma invoke_untyped_reads_respects_g_wcap[Retype_IF_assms]: and Q="\_. invs and valid_untyped_inv_wcap ui (Some (UntypedCap dev ptr sz (If reset 0 idx))) and ct_active and (\s. reset \ pspace_no_overlap {ptr .. ptr + 2 ^ sz - 1} s)" - in hoare_post_impErr) - apply (rule hoare_pre, wp hoare_whenE_wp) - apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc) + in hoare_strengthen_postE) + apply (rule hoare_pre, wp whenE_wp) + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc) apply (clarsimp simp only: if_True simp_thms, intro conjI, assumption+) apply simp apply assumption diff --git a/proof/infoflow/RISCV64/ArchScheduler_IF.thy b/proof/infoflow/RISCV64/ArchScheduler_IF.thy index f39e0482a4..8f19866dec 100644 --- a/proof/infoflow/RISCV64/ArchScheduler_IF.thy +++ b/proof/infoflow/RISCV64/ArchScheduler_IF.thy @@ -130,12 +130,12 @@ lemma thread_set_context_globals_equiv[Scheduler_IF_assms]: lemma arch_scheduler_affects_equiv_update[Scheduler_IF_assms]: "arch_scheduler_affects_equiv st s - \ arch_scheduler_affects_equiv st (s\kheap := kheap s(x \ TCB y')\)" + \ arch_scheduler_affects_equiv st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: arch_scheduler_affects_equiv_def) lemma equiv_asid_equiv_update[Scheduler_IF_assms]: "\ get_tcb x s = Some y; equiv_asid asid st s \ - \ equiv_asid asid st (s\kheap := kheap s(x \ TCB y')\)" + \ equiv_asid asid st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: equiv_asid_def obj_at_def get_tcb_def) end @@ -184,8 +184,7 @@ lemma arch_switch_to_thread_globals_equiv_scheduler[Scheduler_IF_assms]: arch_switch_to_thread thread \\_. globals_equiv_scheduler sta\" unfolding arch_switch_to_thread_def storeWord_def - by (wpsimp wp: dmo_wp modify_wp thread_get_wp' - | wp (once) globals_equiv_scheduler_inv'[where P="\"])+ + by (wpsimp wp: dmo_wp modify_wp thread_get_wp' globals_equiv_scheduler_inv'[where P="\"]) crunches arch_activate_idle_thread for silc_dom_equiv[Scheduler_IF_assms, wp]: "silc_dom_equiv aag st" @@ -363,7 +362,7 @@ lemma thread_set_scheduler_affects_equiv[Scheduler_IF_assms, wp]: split: option.splits kernel_object.splits) apply (subst arch_tcb_update_aux) apply simp - apply (subgoal_tac "s = (s\kheap := kheap s(idle_thread s \ TCB y)\)", simp) + apply (subgoal_tac "s = (s\kheap := (kheap s)(idle_thread s \ TCB y)\)", simp) apply (rule state.equality) apply (rule ext) apply simp+ diff --git a/proof/infoflow/RISCV64/ArchSyscall_IF.thy b/proof/infoflow/RISCV64/ArchSyscall_IF.thy index c49fa7564c..0c098e2729 100644 --- a/proof/infoflow/RISCV64/ArchSyscall_IF.thy +++ b/proof/infoflow/RISCV64/ArchSyscall_IF.thy @@ -43,7 +43,7 @@ lemma sts_authorised_for_globals_inv[Syscall_IF_assms]: apply wpsimp+ apply (rename_tac page_invocation) apply (case_tac page_invocation) - apply (simp | wp hoare_ex_wp)+ + apply (simp | wp hoare_vcg_ex_lift)+ done lemma dmo_maskInterrupt_globals_equiv[Syscall_IF_assms, wp]: diff --git a/proof/infoflow/RISCV64/ArchTcb_IF.thy b/proof/infoflow/RISCV64/ArchTcb_IF.thy index 1e17e1d882..62e84f2347 100644 --- a/proof/infoflow/RISCV64/ArchTcb_IF.thy +++ b/proof/infoflow/RISCV64/ArchTcb_IF.thy @@ -94,22 +94,22 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: supply set_priority_extended.dxo_eq[simp del] reschedule_required_ext_extended.dxo_eq[simp del] apply (simp add: split_def cong: option.case_cong) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule_tac P="case ep of Some v \ length v = word_bits | _ \ True" in hoare_gen_asm) apply wp apply ((strengthen use_no_cap_to_obj_asid_strg tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] tcb_cap_always_valid_strg[where p="tcb_cnode_index (Suc 0)"] - | simp add: conj_comms(1, 2) del: hoare_True_E_R - | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_lift_R + | simp add: conj_comms(1, 2) + | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | (wp check_cap_inv2[where Q="\_. pas_refined aag"] check_cap_inv2[where Q="\_ s. t \ idle_thread s"] out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap out_tcb_valid - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -123,9 +123,8 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid check_cap_inv2[where Q="\_. P"] cap_delete_P cap_insert_P thread_set_P thread_set_P' set_mcpriority_P set_mcpriority_idle_thread - dxo_wp_weak static_imp_wp) + dxo_wp_weak hoare_weak_lift_imp) | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def option_update_thread_def - del: hoare_True_E_R | wpc)+) (*slow*) apply (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] is_cap_simps is_valid_vtable_root_def @@ -140,7 +139,7 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: lemma tc_reads_respects_f[Tcb_IF_assms]: assumes domains_distinct[wp]: "pas_domains_distinct aag" and tc[simp]: "ti = ThreadControl x41 x42 x43 x44 x45 x46 x47 x48" - notes validE_valid[wp del] static_imp_wp [wp] + notes validE_valid[wp del] hoare_weak_lift_imp [wp] shows "reads_respects_f aag l (silc_inv aag st and only_timer_irq_inv irq st' and einvs and simple_sched_action @@ -166,7 +165,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: check_cap_inv[OF check_cap_inv[OF cap_insert_ct]] get_thread_state_rev[THEN reads_respects_f[where aag=aag and st=st and Q=\]] - hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift cap_delete_reads_respects[where st=st] checked_insert_pas_refined thread_set_pas_refined reads_respects_f[OF checked_insert_reads_respects, where st=st] @@ -198,7 +197,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: check_cap_inv[OF check_cap_inv[OF cap_insert_cur_domain]] check_cap_inv[OF check_cap_inv[OF cap_insert_ct]] get_thread_state_rev[THEN reads_respects_f[where st=st and Q=\]] - hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift cap_delete_reads_respects[where st=st] checked_insert_pas_refined thread_set_pas_refined reads_respects_f[OF checked_insert_reads_respects] checked_cap_insert_silc_inv[where st=st] @@ -217,7 +216,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: invs_psp_aligned invs_vspace_objs invs_arch_state | wp (once) hoare_drop_imp)+ apply (simp add: option_update_thread_def tcb_cap_cases_def - | wp static_imp_wp static_imp_conj_wp thread_set_pas_refined + | wp hoare_weak_lift_imp hoare_weak_lift_imp_conj thread_set_pas_refined reads_respects_f[OF thread_set_reads_respects, where st=st and Q="\"] | wpc)+ apply (wp hoare_vcg_all_lift thread_set_tcb_fault_handler_update_invs diff --git a/proof/infoflow/RISCV64/ArchUserOp_IF.thy b/proof/infoflow/RISCV64/ArchUserOp_IF.thy index 33680e99cb..7e0168d38f 100644 --- a/proof/infoflow/RISCV64/ArchUserOp_IF.thy +++ b/proof/infoflow/RISCV64/ArchUserOp_IF.thy @@ -820,7 +820,7 @@ lemma do_user_op_reads_respects_g: apply (clarsimp simp: globals_equiv_def reads_equiv_g_def) apply (rule spec_equiv_valid_guard_imp) apply (wpsimp wp: dmo_user_memory_update_reads_respects_g dmo_device_state_update_reads_respects_g - dmo_device_state_update_reads_respects_g select_ev select_wp dmo_wp) + dmo_device_state_update_reads_respects_g select_ev dmo_wp) apply clarsimp apply (rule conjI) apply clarsimp diff --git a/proof/infoflow/RISCV64/Example_Valid_State.thy b/proof/infoflow/RISCV64/Example_Valid_State.thy index 9095cc2ace..cc5a6f658e 100644 --- a/proof/infoflow/RISCV64/Example_Valid_State.thy +++ b/proof/infoflow/RISCV64/Example_Valid_State.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -8,6 +9,7 @@ theory Example_Valid_State imports "ArchNoninterference" "Lib.Distinct_Cmd" + "AInvs.KernelInit_AI" begin section \Example\ @@ -218,7 +220,8 @@ definition "High_cnode_ptr = pptr_base + 0x18000" definition "Silc_cnode_ptr = pptr_base + 0x20000" definition "irq_cnode_ptr = pptr_base + 0x28000" -definition "shared_page_ptr = pptr_base + 0x200000" +definition "shared_page_ptr_virt = pptr_base + 0x200000" +definition "shared_page_ptr_phys = addrFromPPtr shared_page_ptr_virt" definition "timer_irq \ 10" (* not sure exactly how this fits in *) @@ -236,15 +239,15 @@ lemmas s0_ptr_defs = ntfn_ptr_def irq_cnode_ptr_def Low_pd_ptr_def High_pd_ptr_def Low_pt_ptr_def High_pt_ptr_def Low_tcb_ptr_def High_tcb_ptr_def idle_tcb_ptr_def timer_irq_def Low_prio_def High_prio_def Low_time_slice_def Low_domain_def High_domain_def init_irq_node_ptr_def riscv_global_pt_ptr_def - pptr_base_def pptrBase_def canonical_bit_def shared_page_ptr_def + pptr_base_def pptrBase_def canonical_bit_def shared_page_ptr_virt_def (* Distinctness proof of kernel pointers. *) distinct ptrs_distinct[simp]: Low_tcb_ptr High_tcb_ptr idle_tcb_ptr ntfn_ptr - Low_pt_ptr High_pt_ptr shared_page_ptr Low_pd_ptr High_pd_ptr + Low_pt_ptr High_pt_ptr shared_page_ptr_virt Low_pd_ptr High_pd_ptr Low_cnode_ptr High_cnode_ptr Low_pool_ptr High_pool_ptr - Silc_cnode_ptr irq_cnode_ptr riscv_global_pt_ptr shared_page_ptr + Silc_cnode_ptr irq_cnode_ptr riscv_global_pt_ptr by (auto simp: s0_ptr_defs) @@ -384,7 +387,7 @@ definition Low_caps :: cnode_contents where (the_nat_to_bl_10 4) \ ArchObjectCap (ASIDPoolCap Low_pool_ptr Low_asid), (the_nat_to_bl_10 5) - \ ArchObjectCap (FrameCap shared_page_ptr vm_read_write RISCVLargePage False (Some (Low_asid,0))), + \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid,0))), (the_nat_to_bl_10 6) \ ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0))), (the_nat_to_bl_10 318) @@ -413,7 +416,7 @@ lemma Low_caps_ran: ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0))), ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0))), ArchObjectCap (ASIDPoolCap Low_pool_ptr Low_asid), - ArchObjectCap (FrameCap shared_page_ptr vm_read_write RISCVLargePage False (Some (Low_asid,0))), + ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid,0))), NotificationCap ntfn_ptr 0 {AllowSend}, NullCap}" apply (rule equalityI) @@ -438,7 +441,7 @@ definition High_caps :: cnode_contents where (the_nat_to_bl_10 4) \ ArchObjectCap (ASIDPoolCap High_pool_ptr High_asid), (the_nat_to_bl_10 5) - \ ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (High_asid,0))), + \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid,0))), (the_nat_to_bl_10 6) \ ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0))), (the_nat_to_bl_10 318) @@ -454,7 +457,7 @@ lemma High_caps_ran: ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0))), ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0))), ArchObjectCap (ASIDPoolCap High_pool_ptr High_asid), - ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (High_asid,0))), + ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid,0))), NotificationCap ntfn_ptr 0 {AllowRecv}, NullCap}" apply (rule equalityI) @@ -473,7 +476,7 @@ definition Silc_caps :: cnode_contents where ((the_nat_to_bl_10 2) \ CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2), (the_nat_to_bl_10 5) - \ ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (Silc_asid,0))), + \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid,0))), (the_nat_to_bl_10 318) \ NotificationCap ntfn_ptr 0 {AllowSend} )" @@ -483,7 +486,7 @@ definition Silc_cnode :: kernel_object where lemma Silc_caps_ran: "ran Silc_caps = {CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2), - ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (Silc_asid,0))), + ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid,0))), NotificationCap ntfn_ptr 0 {AllowSend}, NullCap}" apply (rule equalityI) @@ -514,7 +517,7 @@ abbreviation ppn_from_addr :: "paddr \ pte_ppn" where abbreviation Low_pt' :: pt where "Low_pt' \ (\_. InvalidPTE) - (0 := PagePTE (ppn_from_addr (addrFromPPtr shared_page_ptr)) {} vm_read_write)" + (0 := PagePTE (ppn_from_addr shared_page_ptr_phys) {} vm_read_write)" definition Low_pt :: kernel_object where "Low_pt \ ArchObj (PageTable Low_pt')" @@ -533,7 +536,7 @@ text \High's VSpace (PageDirectory)\ abbreviation High_pt' :: pt where "High_pt' \ (\_. InvalidPTE) - (0 := PagePTE (ppn_from_addr (addrFromPPtr shared_page_ptr)) {} vm_read_only)" + (0 := PagePTE (ppn_from_addr shared_page_ptr_phys) {} vm_read_only)" definition High_pt :: kernel_object where "High_pt \ ArchObj (PageTable High_pt')" @@ -643,7 +646,7 @@ definition kh0 :: kheap where Low_tcb_ptr \ Low_tcb, High_tcb_ptr \ High_tcb, idle_tcb_ptr \ idle_tcb, - shared_page_ptr \ shared_page, + shared_page_ptr_virt \ shared_page, riscv_global_pt_ptr \ init_global_pt)" lemma irq_node_offs_min: @@ -725,7 +728,7 @@ lemma irq_node_offs_range_distinct[simp]: "High_tcb_ptr \ irq_node_offs_range" "idle_tcb_ptr \ irq_node_offs_range" "riscv_global_pt_ptr \ irq_node_offs_range" - "shared_page_ptr \ irq_node_offs_range" + "shared_page_ptr_virt \ irq_node_offs_range" by(simp add:irq_node_offs_range_def s0_ptr_defs)+ lemma irq_node_offs_distinct[simp]: @@ -744,11 +747,11 @@ lemma irq_node_offs_distinct[simp]: "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ High_tcb_ptr" "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ idle_tcb_ptr" "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ riscv_global_pt_ptr" - "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ shared_page_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ shared_page_ptr_virt" by (simp add:not_inD[symmetric, OF _ irq_node_offs_in_range])+ lemma kh0_dom: - "dom kh0 = {shared_page_ptr, riscv_global_pt_ptr, idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr, + "dom kh0 = {shared_page_ptr_virt, riscv_global_pt_ptr, idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr, High_pt_ptr, Low_pt_ptr, High_pd_ptr, Low_pd_ptr, irq_cnode_ptr, ntfn_ptr, Silc_cnode_ptr, High_pool_ptr, Low_pool_ptr, High_cnode_ptr, Low_cnode_ptr} \ irq_node_offs_range" @@ -764,7 +767,7 @@ lemmas kh0_SomeD' = set_mp[OF equalityD1[OF kh0_dom[simplified dom_def]], OF Col lemma kh0_SomeD: "kh0 x = Some y \ - x = shared_page_ptr \ y = shared_page \ + x = shared_page_ptr_virt \ y = shared_page \ x = riscv_global_pt_ptr \ y = init_global_pt \ x = idle_tcb_ptr \ y = idle_tcb \ x = High_tcb_ptr \ y = High_tcb \ @@ -839,7 +842,7 @@ definition s0_internal :: "det_ext state" where lemma kh_s0_def: "(kheap s0_internal x = Some y) = ( - x = shared_page_ptr \ y = shared_page \ + x = shared_page_ptr_virt \ y = shared_page \ x = riscv_global_pt_ptr \ y = init_global_pt \ x = idle_tcb_ptr \ y = idle_tcb \ x = High_tcb_ptr \ y = High_tcb \ @@ -866,7 +869,7 @@ subsubsection \Defining the policy graph\ definition Sys1AgentMap :: "(auth_graph_label subject_label) agent_map" where "Sys1AgentMap \ \ \set the range of the shared_page to Low, default everything else to IRQ0\ - (\p. if shared_page_ptr \ p \ p < shared_page_ptr + 0x200000 + (\p. if p \ ptr_range shared_page_ptr_virt (pageBitsForSize RISCVLargePage) then partition_label Low else partition_label IRQ0) (Low_cnode_ptr := partition_label Low, @@ -899,11 +902,11 @@ lemma Sys1AgentMap_simps: "Sys1AgentMap Low_tcb_ptr = partition_label Low" "Sys1AgentMap High_tcb_ptr = partition_label High" "Sys1AgentMap idle_tcb_ptr = partition_label Low" - "\p. \ shared_page_ptr \ p; p < shared_page_ptr + 0x200000 \ + "\p. p \ ptr_range shared_page_ptr_virt (pageBitsForSize RISCVLargePage) \ Sys1AgentMap p = partition_label Low" unfolding Sys1AgentMap_def apply simp_all - by (auto simp: ptrFromPAddr_def pptrBaseOffset_def paddrBase_def s0_ptr_defs) + by (auto simp: s0_ptr_defs ptr_range_def) definition Sys1ASIDMap :: "(auth_graph_label subject_label) agent_asid_map" where "Sys1ASIDMap \ @@ -950,17 +953,17 @@ lemma s0_caps_of_state : ((Low_cnode_ptr,(the_nat_to_bl_10 3)), ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0)))), ((Low_cnode_ptr,(the_nat_to_bl_10 6)), ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0)))), ((Low_cnode_ptr,(the_nat_to_bl_10 4)), ArchObjectCap (ASIDPoolCap Low_pool_ptr Low_asid)), - ((Low_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr vm_read_write RISCVLargePage False (Some (Low_asid, 0)))), + ((Low_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid, 0)))), ((Low_cnode_ptr,(the_nat_to_bl_10 318)), NotificationCap ntfn_ptr 0 {AllowSend}), ((High_cnode_ptr,(the_nat_to_bl_10 1)), ThreadCap High_tcb_ptr), ((High_cnode_ptr,(the_nat_to_bl_10 2)), CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2)), ((High_cnode_ptr,(the_nat_to_bl_10 3)), ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0)))), ((High_cnode_ptr,(the_nat_to_bl_10 6)), ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0)))), ((High_cnode_ptr,(the_nat_to_bl_10 4)), ArchObjectCap (ASIDPoolCap High_pool_ptr High_asid)), - ((High_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (High_asid, 0)))), + ((High_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid, 0)))), ((High_cnode_ptr,(the_nat_to_bl_10 318)), NotificationCap ntfn_ptr 0 {AllowRecv}) , ((Silc_cnode_ptr,(the_nat_to_bl_10 2)), CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2)), - ((Silc_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (Silc_asid, 0)))), + ((Silc_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid, 0)))), ((Silc_cnode_ptr,(the_nat_to_bl_10 318)), NotificationCap ntfn_ptr 0 {AllowSend}), ((Low_tcb_ptr,(tcb_cnode_index 0)), CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2)), ((Low_tcb_ptr,(tcb_cnode_index 1)), ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0)))), @@ -1068,7 +1071,7 @@ lemma High_pd_is_aligned[simp]: by (clarsimp simp: s0_ptr_defs pt_bits_def table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def is_aligned_def) lemma shared_page_ptr_is_aligned[simp]: - "is_aligned shared_page_ptr pt_bits" + "is_aligned shared_page_ptr_virt pt_bits" by (clarsimp simp: s0_ptr_defs pt_bits_def table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def is_aligned_def) lemma vs_lookup_s0_SomeD: @@ -1099,13 +1102,18 @@ lemma vs_lookup_s0_SomeD: pool_for_asid_s0 asid_pools_of_s0 vspace_for_pool_def split: if_splits)+ +lemma pt_bits_left_max_minus_1_pageBitsForSize: + "pt_bits_left (max_pt_level - 1) = pageBitsForSize RISCVLargePage" + apply (clarsimp simp: pt_bits_left_def max_pt_level_def2) + done + lemma Sys1_pas_refined: "pas_refined Sys1PAS s0_internal" apply (clarsimp simp: pas_refined_def) apply (intro conjI) apply (simp add: Sys1_pas_wellformed) apply (clarsimp simp: irq_map_wellformed_aux_def s0_internal_def Sys1AgentMap_def Sys1PAS_def) - apply (clarsimp simp: s0_ptr_defs ptrFromPAddr_def pptrBaseOffset_def paddrBase_def) + apply (clarsimp simp: s0_ptr_defs ptr_range_def) apply word_bitwise apply (clarsimp simp: tcb_domain_map_wellformed_aux_def minBound_word High_domain_def Low_domain_def Sys1PAS_def Sys1AgentMap_def default_domain_def) @@ -1116,8 +1124,6 @@ lemma Sys1_pas_refined: apply (elim disjE; clarsimp simp: Sys1AgentMap_simps cap_auth_conferred_def ptr_range_def arch_cap_auth_conferred_def vspace_cap_rights_to_auth_def vm_read_write_def vm_read_only_def cap_rights_to_auth_def) - apply ((fastforce dest: Sys1AgentMap_simps(15) elim: le_less_trans - simp: pt_bits_left_def bit_simps max_pt_level_def2 s0_ptr_defs)+)[3] apply (drule s0_caps_of_state, clarsimp) apply (elim disjE, simp_all)[1] apply (clarsimp simp: state_refs_of_def thread_st_auth_def tcb_states_of_state_s0 @@ -1128,16 +1134,11 @@ lemma Sys1_pas_refined: apply (clarsimp simp: state_vrefs_def) apply (drule vs_lookup_s0_SomeD) apply (elim disjE; clarsimp) - apply ((clarsimp simp: aobjs_of_Some s0_internal_def kh0_obj_def opt_map_def - vs_refs_aux_def Sys1AgentMap_def Sys1AuthGraph_def - graph_of_def pte_ref2_def ptrFromPAddr_addr_from_ppn' - dest!: kh0_SomeD split: option.splits if_splits)+)[4] - apply ((clarsimp simp: aobjs_of_Some s0_internal_def kh0_obj_def opt_map_def vs_refs_aux_def - vm_read_only_def vspace_cap_rights_to_auth_def ptr_range_def pte_ref2_def - Sys1AuthGraph_def Sys1AgentMap_simps graph_of_def ptrFromPAddr_addr_from_ppn' - dest!: kh0_SomeD split: option.splits if_splits, - fastforce dest: Sys1AgentMap_simps(15) elim: le_less_trans - simp: pt_bits_left_def bit_simps max_pt_level_def2 s0_ptr_defs)+)[2] + apply ((clarsimp simp: s0_internal_def kh0_obj_def opt_map_def vs_refs_aux_def + vm_read_only_def vspace_cap_rights_to_auth_def pte_ref2_def + Sys1AuthGraph_def Sys1AgentMap_simps graph_of_def ptrFromPAddr_addr_from_ppn' + shared_page_ptr_phys_def pt_bits_left_max_minus_1_pageBitsForSize + dest!: kh0_SomeD split: option.splits if_splits)+)[6] apply (rule subsetI, clarsimp) apply (erule state_asids_to_policy_aux.cases) apply (drule s0_caps_of_state, clarsimp) @@ -1180,8 +1181,8 @@ lemma Sys1_pas_wellformed_noninterference: done lemma Sys1AgentMap_shared_page_ptr: - "Sys1AgentMap shared_page_ptr = partition_label Low" - by (clarsimp simp: Sys1AgentMap_def s0_ptr_defs) + "Sys1AgentMap shared_page_ptr_virt = partition_label Low" + by (clarsimp simp: Sys1AgentMap_def s0_ptr_defs ptr_range_def bit_simps) lemma silc_inv_s0: "silc_inv Sys1PAS s0_internal s0_internal" @@ -1276,9 +1277,9 @@ lemma valid_caps_s0[simp]: "s0_internal \ ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0)))" "s0_internal \ ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0)))" "s0_internal \ ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0)))" - "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr vm_read_write RISCVLargePage False (Some (Low_asid,0)))" - "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (High_asid,0)))" - "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr vm_read_only RISCVLargePage False (Some (Silc_asid,0)))" + "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid,0)))" + "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid,0)))" + "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid,0)))" "s0_internal \ NotificationCap ntfn_ptr 0 {AllowWrite}" "s0_internal \ NotificationCap ntfn_ptr 0 {AllowRead}" "s0_internal \ ReplyCap Low_tcb_ptr True {AllowGrant,AllowWrite}" @@ -1305,7 +1306,7 @@ lemma valid_obj_s0[simp]: "valid_obj High_tcb_ptr High_tcb s0_internal" "valid_obj idle_tcb_ptr idle_tcb s0_internal" "valid_obj riscv_global_pt_ptr init_global_pt s0_internal" - "valid_obj shared_page_ptr shared_page s0_internal" + "valid_obj shared_page_ptr_virt shared_page s0_internal" apply (simp_all add: valid_obj_def kh0_obj_def) apply (simp add: valid_cs_def Low_caps_ran High_caps_ran Silc_caps_ran valid_cs_size_def word_bits_def cte_level_bits_def)+ @@ -1469,10 +1470,6 @@ lemma valid_reply_masters_s0[simp]: apply (force dest: s0_caps_of_state simp: cte_wp_at_caps_of_state is_master_reply_cap_to_def) done -lemma riscv_global_pt_is_aligned: - "is_aligned riscv_global_pt_ptr pt_bits" - by (clarsimp simp: is_aligned_def bit_simps s0_ptr_defs) - lemma valid_global_refs_s0[simp]: "valid_global_refs s0_internal" apply (clarsimp simp: valid_global_refs_def valid_refs_def cte_wp_at_caps_of_state) @@ -1487,38 +1484,13 @@ lemma valid_arch_state_s0[simp]: "valid_arch_state s0_internal" apply (clarsimp simp: valid_arch_state_def s0_internal_def arch_state0_def) apply (intro conjI) - apply (auto simp: valid_asid_table_def kh0_def kh0_obj_def opt_map_def split: option.splits)[1] - apply (clarsimp simp: valid_uses_def) - apply (intro conjI; clarsimp) - apply (fastforce simp: init_vspace_uses_def canonical_user_canonical - dest: dual_order.strict_trans1[OF _ pptr_base_kernel_elf_base] - above_pptr_base_canonical less_imp_le) - apply (fastforce dest: canonical_user_below_pptr_base - simp: init_vspace_uses_def split: if_splits) - apply (fastforce elim: dual_order.strict_trans[rotated] split: if_splits - simp: init_vspace_uses_def pptr_base_def pptrBase_def - canonical_bit_def kernel_elf_base_def kernelELFBase_def) - apply (fastforce dest: dual_order.strict_trans1[where a=kernel_elf_base, rotated] - dual_order.strict_trans1[OF _ pptr_base_kernel_elf_base] - canonical_user_below_pptr_base - simp: init_vspace_uses_def pptr_base_def pptrBase_def - canonical_bit_def kernel_elf_base_def kernelELFBase_def - split: if_splits) - apply (fastforce dest: dual_order.strict_trans2 canonical_user_below_pptr_base - simp: init_vspace_uses_def pptr_base_def pptrBase_def canonical_bit_def - kdev_base_def kdevBase_def kernel_elf_base_def kernelELFBase_def - split: if_splits) - apply (fastforce dest: dual_order.strict_trans1[where a=kernel_elf_base, rotated] - canonical_user_below_pptr_base - simp: user_window_def user_region_def init_vspace_uses_def pptr_base_def - pptrBase_def canonical_bit_def kernel_elf_base_def kernelELFBase_def - split: if_splits)+ + apply (auto simp: valid_asid_table_def kh0_def kh0_obj_def opt_map_def split: option.splits)[1] apply (fastforce simp: valid_global_arch_objs_def obj_at_def kh0_def a_type_def init_global_pt_def max_pt_level_not_asid_pool_level[symmetric]) apply (clarsimp simp: valid_global_tables_def pt_walk.simps obind_def) apply (fastforce dest: pt_walk_max_level - simp: obind_def opt_map_def riscv_global_pt_is_aligned asid_pool_level_eq - geq_max_pt_level pte_of_def kh0_def kh0_obj_def pte_rights_of_def + simp: obind_def opt_map_def asid_pool_level_eq geq_max_pt_level pte_of_def kh0_def + kh0_obj_def pte_rights_of_def split: if_splits) done @@ -1566,7 +1538,8 @@ lemma valid_arch_objs_s0[simp]: apply (clarsimp simp: valid_vspace_objs_def obj_at_def) apply (drule vs_lookup_s0_SomeD) apply (auto simp: aobjs_of_Some kh_s0_def kh0_obj_def data_at_def obj_at_def - ptrFromPAddr_addr_from_ppn' vmpage_size_of_level_def max_pt_level_def2) + ptrFromPAddr_addr_from_ppn' vmpage_size_of_level_def max_pt_level_def2 + shared_page_ptr_phys_def) done lemma valid_vs_lookup_s0_internal: @@ -1619,7 +1592,7 @@ lemma valid_vs_lookup_s0_internal: apply (clarsimp simp: vref_for_level_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) apply (word_bitwise, fastforce) apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) - apply (rule FalseE, word_bitwise, fastforce) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) \ \Low asid\ apply (rule conjI, clarsimp simp: Low_asid_def asid_low_bits_def) apply (rule_tac x=Low_cnode_ptr in exI) @@ -1634,7 +1607,7 @@ lemma valid_vs_lookup_s0_internal: apply (clarsimp simp: vref_for_level_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) apply (word_bitwise, fastforce) apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) - apply (rule FalseE, word_bitwise, fastforce) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) \ \bot level < max pt level\ apply (clarsimp simp: pool_for_asid_s0 vspace_for_pool_def asid_pools_of_s0 dest!: asid_high_low split: if_splits) @@ -1648,7 +1621,8 @@ lemma valid_vs_lookup_s0_internal: Some (max_pt_level - 1, High_pt_ptr)") apply (clarsimp simp: pt_walk.simps) apply (clarsimp simp: ptes_of_def pts_of_s0 in_omonad split: if_splits) - apply (clarsimp simp: ptes_of_def pts_of_s0 ptrFromPAddr_addr_from_ppn' split: if_splits) + apply (clarsimp simp: ptes_of_def pts_of_s0 shared_page_ptr_phys_def ptrFromPAddr_addr_from_ppn' + split: if_splits) apply (rule_tac x=High_cnode_ptr in exI) apply (rule_tac x="the_nat_to_bl_10 5" in exI) apply (rule exI, intro conjI) @@ -1668,7 +1642,8 @@ lemma valid_vs_lookup_s0_internal: Some (max_pt_level - 1, Low_pt_ptr)") apply (clarsimp simp: pt_walk.simps) apply (clarsimp simp: ptes_of_def pts_of_s0 in_omonad split: if_splits) - apply (clarsimp simp: ptes_of_def pts_of_s0 ptrFromPAddr_addr_from_ppn' split: if_splits) + apply (clarsimp simp: ptes_of_def pts_of_s0 shared_page_ptr_phys_def ptrFromPAddr_addr_from_ppn' + split: if_splits) apply (rule_tac x=Low_cnode_ptr in exI) apply (rule_tac x="the_nat_to_bl_10 5" in exI) apply (rule exI, intro conjI) @@ -1680,10 +1655,10 @@ lemma valid_vs_lookup_s0_internal: \ \No lookups to other ptes\ apply (clarsimp simp: in_omonad ptes_of_def pts_of_s0 split: if_splits) apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) - apply (rule FalseE, word_bitwise, fastforce) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) apply (clarsimp simp: in_omonad ptes_of_def pts_of_s0 split: if_splits) apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) - apply (rule FalseE, word_bitwise, fastforce) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) done lemma valid_arch_caps_s0[simp]: @@ -1752,7 +1727,7 @@ lemma equal_kernel_mappings_s0[simp]: dest!: kh0_SomeD split: if_splits option.splits) apply (clarsimp simp: pts_of_s0) apply (clarsimp simp: s0_internal_def riscv_global_pt_def arch_state0_def kh0_obj_def - kernel_mapping_slots_def s0_ptr_defs misc)+ + kernel_mapping_slots_def s0_ptr_defs misc elf_index_value)+ done lemma valid_asid_map_s0[simp]: @@ -1760,7 +1735,7 @@ lemma valid_asid_map_s0[simp]: by (clarsimp simp: valid_asid_map_def s0_internal_def arch_state0_def) lemma valid_global_pd_mappings_s0_helper: - "\ pptr_base \ vref; vref < pptr_base + 0x40000000 \ + "\ pptr_base \ vref; vref < pptr_base + (1 << kernel_window_bits) \ \ \a b. pt_lookup_target 0 riscv_global_pt_ptr vref (ptes_of s0_internal) = Some (a, b) \ is_aligned b (pt_bits_left a) \ addrFromPPtr b + (vref && mask (pt_bits_left a)) = addrFromPPtr vref" @@ -1771,32 +1746,40 @@ lemma valid_global_pd_mappings_s0_helper: Some (max_pt_level, pt_slot_offset max_pt_level riscv_global_pt_ptr vref)") apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) apply (fastforce simp: ptes_of_def in_omonad s0_internal_def kh0_def init_global_pt_def - global_pte_def riscv_global_pt_is_aligned is_aligned_pt_slot_offset_pte) + global_pte_def is_aligned_pt_slot_offset_pte) apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) apply (rule conjI; clarsimp dest!: pt_walk_max_level simp: max_pt_level_def2 split: if_splits) apply (rule conjI; clarsimp) - apply (clarsimp simp: ptes_of_def pts_of_s0 riscv_global_pt_is_aligned global_pte_def + apply (clarsimp simp: ptes_of_def pts_of_s0 global_pte_def kernel_window_bits_def table_index_offset_pt_bits_left is_aligned_pt_slot_offset_pte split: if_splits) apply (clarsimp simp: misc s0_ptr_defs) apply (word_bitwise, fastforce) apply (clarsimp simp: misc s0_ptr_defs kernel_mapping_slots_def) apply (word_bitwise, fastforce) - apply (clarsimp simp: ptes_of_def pts_of_s0 riscv_global_pt_is_aligned - is_aligned_pt_slot_offset_pte global_pte_def + apply (clarsimp simp: ptes_of_def pts_of_s0 is_aligned_pt_slot_offset_pte global_pte_def split: if_splits) apply (clarsimp simp: addr_from_ppn_def ptrFromPAddr_def addrFromPPtr_def bit_simps mask_def s0_ptr_defs pt_bits_left_def max_pt_level_def2 - pptrBaseOffset_def paddrBase_def is_aligned_def) + pptrBaseOffset_def paddrBase_def is_aligned_def kernel_window_bits_def) apply (word_bitwise, fastforce) apply (clarsimp simp: addr_from_ppn_def ptrFromPAddr_def addrFromPPtr_def bit_simps is_aligned_def s0_ptr_defs pt_bits_left_def max_pt_level_def2 kernel_mapping_slots_def - mask_def pt_slot_offset_def pt_index_def pptrBaseOffset_def paddrBase_def) + mask_def pt_slot_offset_def pt_index_def pptrBaseOffset_def paddrBase_def + toplevel_bits_value elf_index_value kernel_window_bits_def) apply (word_bitwise, fastforce) done +lemma ptes_of_elf_window: + "\kernel_elf_base \ vref; vref < kernel_elf_base + 2 ^ pageBits\ + \ ptes_of s0_internal (pt_slot_offset max_pt_level riscv_global_pt_ptr vref) + = Some (global_pte elf_index)" + unfolding ptes_of_def pts_of_s0 + apply (clarsimp simp: obind_def elf_window_4k is_aligned_pt_slot_offset_pte) + done + lemma valid_global_pd_mappings_s0_helper': - "\ kernel_elf_base \ vref; vref < kernel_elf_base + 0x100000 \ + "\ kernel_elf_base \ vref; vref < kernel_elf_base + (1 << pageBits) \ \ \a b. pt_lookup_target 0 riscv_global_pt_ptr vref (ptes_of s0_internal) = Some (a, b) \ is_aligned b (pt_bits_left a) \ addrFromPPtr b + (vref && mask (pt_bits_left a)) = addrFromKPPtr vref" @@ -1807,36 +1790,21 @@ lemma valid_global_pd_mappings_s0_helper': Some (max_pt_level, pt_slot_offset max_pt_level riscv_global_pt_ptr vref)") apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) apply (fastforce simp: ptes_of_def in_omonad s0_internal_def kh0_def init_global_pt_def - global_pte_def riscv_global_pt_is_aligned is_aligned_pt_slot_offset_pte) - apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) - apply (rule conjI; clarsimp dest!: pt_walk_max_level simp: max_pt_level_def2 split: if_splits) + global_pte_def is_aligned_pt_slot_offset_pte) apply (rule conjI; clarsimp) - apply (clarsimp simp: ptes_of_def pts_of_s0 riscv_global_pt_is_aligned global_pte_def - table_index_offset_pt_bits_left is_aligned_pt_slot_offset_pte - split: if_splits) - apply (clarsimp simp: misc kernel_elf_base_def kernelELFBase_def) - apply (word_bitwise, fastforce) - apply (clarsimp simp: misc s0_ptr_defs kernel_mapping_slots_def - kernel_elf_base_def kernelELFBase_def) - apply (word_bitwise, fastforce) - apply (clarsimp simp: ptes_of_def pts_of_s0 riscv_global_pt_is_aligned - is_aligned_pt_slot_offset_pte global_pte_def - split: if_splits) - apply (clarsimp simp: addr_from_ppn_def bit_simps s0_ptr_defs pt_bits_left_def mask_def - pt_slot_offset_def pt_index_def kernel_elf_base_def kernelELFBase_def) - apply (word_bitwise, fastforce) - apply (clarsimp simp: addr_from_ppn_def ptrFromPAddr_def addrFromKPPtr_def bit_simps - is_aligned_def s0_ptr_defs pt_bits_left_def mask_def pptrBaseOffset_def - paddrBase_def kernel_elf_base_def kernelELFBase_def - kernelELFBaseOffset_def kernelELFPAddrBase_def) - apply (word_bitwise, fastforce) + apply (rule conjI; clarsimp) + apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) + apply (rule conjI; clarsimp) + apply (clarsimp simp: ptes_of_elf_window global_pte_def split: if_splits) + apply (clarsimp simp: ptes_of_elf_window global_pte_def elf_index_value) + apply (clarsimp simp: is_aligned_ptrFromPAddr_kernelELFPAddrBase kernelELFPAddrBase_addrFromKPPtr) done lemma valid_global_pd_mappings_s0[simp]: "valid_global_vspace_mappings s0_internal" unfolding valid_global_vspace_mappings_def Let_def apply (intro conjI) - apply (simp add: s0_internal_def arch_state0_def riscv_global_pt_def riscv_global_pt_is_aligned) + apply (simp add: s0_internal_def arch_state0_def riscv_global_pt_def) apply (fastforce simp: s0_internal_def arch_state0_def in_omonad kernel_window_def init_vspace_uses_def translate_address_def riscv_global_pt_def dest!: valid_global_pd_mappings_s0_helper split: if_splits) @@ -1849,10 +1817,10 @@ lemma pspace_in_kernel_window_s0[simp]: "pspace_in_kernel_window s0_internal" apply (clarsimp simp: pspace_in_kernel_window_def kernel_window_def init_vspace_uses_def s0_internal_def arch_state0_def) - apply (subgoal_tac "x \ {pptr_base.. {pptr_base.. {pptr_base.. {pptr_base.. arch_scheduler_affects_equiv st (s\kheap := kheap s(x \ TCB y')\)" + \ arch_scheduler_affects_equiv st (s\kheap := (kheap s)(x \ TCB y')\)" and arch_scheduler_affects_equiv_sa_update[simp]: "\f. arch_scheduler_affects_equiv (scheduler_action_update f s) s' = arch_scheduler_affects_equiv s s'" @@ -106,7 +106,7 @@ locale Scheduler_IF_1 = "\P. arch_switch_to_idle_thread \\s. P (work_units_completed s)\" and equiv_asid_equiv_update: "\ get_tcb x s = Some y; equiv_asid asid st s \ - \ equiv_asid asid st (s\kheap := kheap s(x \ TCB y')\)" + \ equiv_asid asid st (s\kheap := (kheap s)(x \ TCB y')\)" and equiv_asid_cur_thread_update[simp]: "\f. equiv_asid asid (cur_thread_update f s) s' = equiv_asid asid s s'" "\f. equiv_asid asid s (cur_thread_update f s') = equiv_asid asid s s'" @@ -605,7 +605,7 @@ proof - apply (simp add: scheduler_affects_equiv_def[abs_def]) apply (rule hoare_pre) apply (wps c) - apply (wp static_imp_wp a silc_dom_equiv_states_equiv_lift d e s w i x hoare_vcg_imp_lift) + apply (wp hoare_weak_lift_imp a silc_dom_equiv_states_equiv_lift d e s w i x hoare_vcg_imp_lift) apply fastforce done qed @@ -671,7 +671,7 @@ proof - apply (simp add: asahi_scheduler_affects_equiv_def[abs_def]) apply (rule hoare_pre) apply (wps c) - apply (wp static_imp_wp a silc_dom_equiv_states_equiv_lift d w) + apply (wp hoare_weak_lift_imp a silc_dom_equiv_states_equiv_lift d w) apply clarsimp done qed @@ -731,7 +731,7 @@ proof - apply (simp add: asahi_ex_scheduler_affects_equiv_def[abs_def]) apply (rule hoare_pre) apply (wps c) - apply (wp static_imp_wp a silc_dom_equiv_states_equiv_lift d w x hoare_vcg_imp_lift') + apply (wp hoare_weak_lift_imp a silc_dom_equiv_states_equiv_lift d w x hoare_vcg_imp_lift') apply clarsimp done qed @@ -1431,7 +1431,7 @@ lemma when_next_domain_domain_fields: "\\s. \ B \ domain_fields Q s\ when B next_domain \\_. domain_fields Q\" - by (wpsimp | rule hoare_pre_cont[where a=next_domain])+ + by (wpsimp | rule hoare_pre_cont[where f=next_domain])+ lemma cur_thread_cur_domain: "\ st_tcb_at ((=) st) (cur_thread s) s; \ idle st; invs s; guarded_pas_domain aag s \ @@ -1439,10 +1439,6 @@ lemma cur_thread_cur_domain: by (clarsimp simp: pred_tcb_at_def invs_def valid_idle_def valid_state_def obj_at_def guarded_pas_domain_def) -lemma valid_sched_valid_queues[intro]: - "valid_sched s \ valid_queues s" - by (simp add: valid_sched_def) - lemma ethread_get_wp2: "\\s. \etcb. etcb_at ((=) etcb) t s \ Q (f etcb) s\ ethread_get f t @@ -1488,7 +1484,7 @@ lemma schedule_no_domain_switch: apply (wpsimp wp: hoare_drop_imps simp: if_apply_def2 | simp add: schedule_choose_new_thread_def | wpc - | rule hoare_pre_cont[where a=next_domain] )+ + | rule hoare_pre_cont[where f=next_domain] )+ done lemma schedule_no_domain_fields: @@ -1500,7 +1496,7 @@ lemma schedule_no_domain_fields: apply (wpsimp wp: hoare_drop_imps simp: if_apply_def2 | simp add: schedule_choose_new_thread_def | wpc - | rule hoare_pre_cont[where a=next_domain] )+ + | rule hoare_pre_cont[where f=next_domain] )+ done lemma set_scheduler_action_unobservable: @@ -1687,7 +1683,7 @@ lemma schedule_choose_new_thread_schedule_affects_no_switch: \\_. scheduler_affects_equiv aag l st\" unfolding schedule_choose_new_thread_def by (wpsimp wp: set_scheduler_action_unobservable choose_thread_unobservable - hoare_pre_cont[where a=next_domain]) + hoare_pre_cont[where f=next_domain]) lemma reads_respects_scheduler_invisible_no_domain_switch: assumes domains_distinct[wp]: "pas_domains_distinct aag" @@ -1713,7 +1709,7 @@ lemma reads_respects_scheduler_invisible_no_domain_switch: hoare_vcg_all_lift hoare_vcg_disj_lift | wpc | simp - | rule hoare_pre_cont[where a=next_domain] + | rule hoare_pre_cont[where f=next_domain] | wp (once) hoare_drop_imp[where f="set_scheduler_action choose_new_thread"])+ (* stop on fastfail calculation *) apply (clarsimp simp: conj_ac cong: imp_cong conj_cong) @@ -2221,7 +2217,7 @@ context Scheduler_IF_1 begin lemma scheduler_affects_equiv_update: "\ get_tcb x s = Some y; pasObjectAbs aag x \ reads_scheduler aag l; scheduler_affects_equiv aag l st s \ - \ scheduler_affects_equiv aag l st (s\kheap := kheap s(x \ TCB y')\)" + \ scheduler_affects_equiv aag l st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: scheduler_affects_equiv_def equiv_for_def equiv_asids_def states_equiv_for_def scheduler_globals_frame_equiv_def arch_scheduler_affects_equiv_update equiv_asid_equiv_update) @@ -2397,7 +2393,9 @@ lemma context_update_cur_thread_snippit_cur_domain: (\s. reads_scheduler_cur_domain aag l s \ invs s \ silc_inv aag st s \ (ct_idle s \ uc = idle_context s) \ guarded_pas_domain aag s) (gets cur_thread >>= thread_set (tcb_arch_update (arch_tcb_context_set uc)))" - apply wp + \ \FIXME: maybe should make an equiv_valid_pre?\ + apply (rule equiv_valid_guard_imp) + apply wp apply (clarsimp simp: cur_thread_idle silc_inv_not_cur_thread del: notI) done diff --git a/proof/infoflow/Syscall_IF.thy b/proof/infoflow/Syscall_IF.thy index 1de1f707c5..aba1e52dc2 100644 --- a/proof/infoflow/Syscall_IF.thy +++ b/proof/infoflow/Syscall_IF.thy @@ -174,7 +174,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) cap_delete_pas_refined cap_delete_silc_inv[where st=st] cap_delete_only_timer_irq_inv[where st=st' and irq=irq] drop_spec_ev[OF assertE_ev] drop_spec_ev[OF liftE_ev] - get_cap_wp select_wp select_ev drop_spec_ev2_inv[OF liftE_ev2] + get_cap_wp select_ev drop_spec_ev2_inv[OF liftE_ev2] reads_respects_f[OF get_cap_rev, where st=st and aag=aag] | simp (no_asm) add: returnOk_def | rule next_revoke_eq' | (simp add: pred_conj_def, erule conjE, assumption) @@ -187,8 +187,8 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) apply (frule aag_can_read_self) apply (simp add: equiv_for_def split del: if_split)+ apply (wp drop_spec_ev2_inv[OF liftE_ev2] gets_evrv | simp)+ - apply (wp drop_spec_ev2_inv[OF liftE_ev2] gets_evrv - reads_respects_f[OF get_cap_rev, where st=st and Q=\,simplified equiv_valid_def2]) + apply (wpsimp wp: drop_spec_ev2_inv[OF liftE_ev2] gets_evrv + reads_respects_f[OF get_cap_rev, where st=st and Q=\,simplified equiv_valid_def2]) apply clarsimp+ apply (frule all_children_subjectReads[simplified comp_def]) apply clarsimp @@ -330,7 +330,7 @@ proof (cases oper) apply (rule equiv_valid_guard_imp) apply (wpc | simp | wp reads_respects_f_g'[OF invoke_untyped_reads_respects_g] invoke_untyped_silc_inv)+ - by (simp add: authorised_invocation_def) + by (fastforce simp: authorised_invocation_def) next case InvokeEndpoint then show ?thesis @@ -383,7 +383,7 @@ next apply (rule equiv_valid_guard_imp) apply (wpc | simp | wp reads_respects_f_g'[OF invoke_irq_control_reads_respects_g] invoke_irq_control_silc_inv)+ - by (simp add: invs_def valid_state_def authorised_invocation_def) + by (fastforce simp: invs_def valid_state_def authorised_invocation_def) next case InvokeIRQHandler then show ?thesis @@ -446,14 +446,11 @@ lemma syscall_requiv_f_g: apply assumption+ apply (rule hoare_strengthen_post) apply assumption - apply (case_tac r) - apply simp - apply simp + apply (simp split: sum.splits) apply (rule hoare_strengthen_post, rule hoare_pre) apply assumption apply simp - apply (case_tac r) - apply simp+ + apply (simp split: sum.splits) done (*FIXME: Move to base*) @@ -500,9 +497,7 @@ lemma decode_invocation_authorised_extra: decode_invocation info_label args ptr slot cap excaps \\rv s. authorised_invocation_extra aag rv\,-" unfolding decode_invocation_def authorised_invocation_extra_def - apply (rule hoare_pre) - apply (wp decode_tcb_invocation_authorised_extra | wpc | simp add: split_def o_def uncurry_def)+ - apply auto + apply (wpsimp wp: decode_tcb_invocation_authorised_extra simp: split_def o_def)+ done lemma sts_schact_is_rct_runnable: @@ -586,7 +581,7 @@ lemma handle_invocation_reads_respects_g: | rule hoare_drop_imps)+ apply (rule_tac Q'="\r s. silc_inv aag st s \ invs s \ is_subject aag rv \ is_subject aag (cur_thread s) \ rv \ idle_thread s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp pinv_invs perform_invocation_silc_inv) apply (simp add: invs_def valid_state_def valid_pspace_def) apply (wpsimp wp: reads_respects_f_g' @@ -652,9 +647,9 @@ lemma lookup_cap_cap_fault: "\invs\ lookup_cap c b -, \\f s. valid_fault (CapFault x y f)\" apply (simp add: lookup_cap_def) apply wp - apply (case_tac xa) + apply (case_tac rv) apply (simp add: validE_E_def) - apply (wp) + apply wp apply (fold validE_E_def) apply (wp lookup_slot_for_thread_cap_fault) apply assumption @@ -701,18 +696,17 @@ lemma handle_recv_reads_respects_f: apply (rule_tac Q'="\r s. silc_inv aag st s \ einvs s \ pas_refined aag s \ tcb_at rv s \ pas_cur_domain aag s \ cte_wp_at \ (fst r) s \ is_subject aag rv \ is_subject aag (cur_thread s) \ is_subject aag (fst (fst r))" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply ((wp lookup_slot_for_thread_authorised lookup_slot_cte_at_wp | simp)+)[1] apply (clarsimp simp: silc_inv_not_subject[symmetric] invs_mdb invs_valid_objs) apply (auto intro: caps_of_state_valid reads_ep simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def)[1] apply (wp reads_respects_f[OF handle_fault_reads_respects,where st=st]) apply (wpsimp wp: get_simple_ko_wp get_cap_wp)+ - apply (rule VSpaceEntries_AI.hoare_vcg_all_liftE) apply (rule_tac Q="\r s. silc_inv aag st s \ einvs s \ pas_refined aag s \ tcb_at rv s \ pas_cur_domain aag s \ is_subject aag rv \ is_subject aag (cur_thread s) \ is_subject aag (fst (fst r))" - and E=E and F=E for E in hoare_post_impErr) + and E=E and F=E for E in hoare_strengthen_postE) apply (wp lookup_slot_for_thread_authorised lookup_slot_for_thread_cap_fault) apply ((fastforce simp add:valid_fault_def)+)[3] apply (wp reads_respects_f[OF as_user_reads_respects,where st=st and Q=\]) @@ -729,7 +723,7 @@ lemma handle_recv_globals_equiv: | wpc | simp add: Let_def)+ apply (rule_tac Q="\r s. invs s \ globals_equiv st s" and E = "\r s. valid_fault (CapFault (of_bl ep_cptr) True r)" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (rule hoare_vcg_E_elim) apply (wp lookup_cap_cap_fault receive_ipc_globals_equiv receive_signal_globals_equiv delete_caller_cap_invs @@ -742,7 +736,7 @@ lemma handle_recv_globals_equiv: clarsimp simp: invs_valid_objs invs_valid_global_objs invs_arch_state invs_distinct)+ apply (rule_tac Q'="\r s. invs s \ globals_equiv st s \ thread \ idle_thread s \ tcb_at thread s \ cur_thread s = thread" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp as_user_globals_equiv | simp add: invs_imps valid_fault_def)+ apply (wp delete_caller_cap_invs delete_caller_cap_globals_equiv | simp add: invs_imps invs_valid_idle ct_active_not_idle)+ @@ -911,7 +905,7 @@ lemma handle_event_reads_respects_f_g: apply (rule_tac E="\r s. invs s \ is_subject aag rv \ is_subject aag (cur_thread s) \ valid_fault r \ pas_refined aag s \ pas_cur_domain aag s \ silc_inv aag st s \ rv \ idle_thread s" - and Q="\\" in hoare_post_impErr) + and Q="\\" in hoare_strengthen_postE) apply (rule hoare_vcg_E_conj) apply (wp hv_invs handle_vm_fault_silc_inv)+ apply (simp add: invs_imps invs_mdb invs_valid_idle)+ @@ -989,7 +983,7 @@ lemma handle_invocation_globals_equiv: | simp split del: if_split | wp (once) hoare_drop_imps)+ apply (rule_tac Q="\r. invs and globals_equiv st and (\s. thread \ idle_thread s)" - and E="\_. globals_equiv st" in hoare_post_impErr) + and E="\_. globals_equiv st" in hoare_strengthen_postE) apply (wp pinv_invs perform_invocation_globals_equiv requiv_get_tcb_eq' set_thread_state_globals_equiv sts_authorised_for_globals_inv diff --git a/proof/infoflow/Tcb_IF.thy b/proof/infoflow/Tcb_IF.thy index ca68721039..7ea3fbef57 100644 --- a/proof/infoflow/Tcb_IF.thy +++ b/proof/infoflow/Tcb_IF.thy @@ -90,7 +90,7 @@ next apply (simp add: conj_comms) apply (wp set_cap_P set_cap_Q replace_cap_invs final_cap_same_objrefs set_cap_cte_cap_wp_to - set_cap_cte_wp_at hoare_vcg_const_Ball_lift static_imp_wp + set_cap_cte_wp_at hoare_vcg_const_Ball_lift hoare_weak_lift_imp | rule finalise_cap_not_reply_master | simp add: in_monad)+ apply (rule hoare_strengthen_post) @@ -140,8 +140,7 @@ next apply (rule split_spec_bindE) apply (rule split_spec_bindE[rotated]) apply (rule "4.hyps", assumption+) - apply (wp set_cap_P set_cap_Q get_cap_wp | simp) - apply simp + apply (wpsimp wp: set_cap_P set_cap_Q get_cap_wp) apply simp apply wp apply (clarsimp simp add: zombie_is_cap_toE) @@ -203,11 +202,10 @@ lemma rec_del_globals_equiv: "\\s. invs s \ globals_equiv st s \ emptyable (slot_rdcall call) s \ valid_rec_del_call call s\ rec_del call \\_. globals_equiv st\" - apply (wp finalise_cap_globals_equiv - rec_del_preservation2[where Q="valid_arch_state" - and R="\cap s. invs s \ valid_cap cap s - \ (\p. cap = ThreadCap p \ p \ idle_thread s)"]) - apply simp + apply (wpsimp wp: finalise_cap_globals_equiv + rec_del_preservation2[where Q="valid_arch_state" + and R="\cap s. invs s \ valid_cap cap s + \ (\p. cap = ThreadCap p \ p \ idle_thread s)"]) apply (wp set_cap_globals_equiv'') apply simp apply (wp empty_slot_globals_equiv)+ @@ -308,10 +306,9 @@ lemma invoke_tcb_globals_equiv: apply (rule_tac Q="\_. valid_arch_state and globals_equiv st and (\s. word1 \ idle_thread s) and (\s. word2 \ idle_thread s)" in hoare_strengthen_post) - apply ((wp mapM_x_wp' as_user_globals_equiv invoke_tcb_NotificationControl_globals_equiv - | simp - | intro conjI impI - | clarsimp simp: no_cap_to_idle_thread)+)[6] + apply (wpsimp wp: mapM_x_wp' as_user_globals_equiv invoke_tcb_NotificationControl_globals_equiv + weak_if_wp')+ + apply (intro conjI impI; clarsimp simp: no_cap_to_idle_thread)+ apply (simp del: invoke_tcb.simps tcb_inv_wf.simps) apply (wp invoke_tcb_thread_preservation cap_delete_globals_equiv cap_insert_globals_equiv'' thread_set_globals_equiv set_mcpriority_globals_equiv @@ -486,7 +483,7 @@ context Tcb_IF_2 begin lemma invoke_tcb_reads_respects_f: assumes domains_distinct[wp]: "pas_domains_distinct aag" - notes validE_valid[wp del] static_imp_wp [wp] + notes validE_valid[wp del] hoare_weak_lift_imp [wp] shows "reads_respects_f aag l (silc_inv aag st and only_timer_irq_inv irq st' and einvs @@ -494,14 +491,15 @@ lemma invoke_tcb_reads_respects_f: and tcb_inv_wf ti and is_subject aag \ cur_thread and K (authorised_tcb_inv aag ti \ authorised_tcb_inv_extra aag ti)) (invoke_tcb ti)" - including no_pre + including classic_wp_pre apply (case_tac ti) \ \WriteRegisters\ apply (strengthen invs_mdb | wpsimp wp: when_ev restart_reads_respects_f reschedule_required_reads_respects_f as_user_reads_respects_f restart_silc_inv restart_pas_refined hoare_vcg_if_lift)+ apply (rule hoare_strengthen_post[where Q="\_ s. \rv. R rv s" and R=R for R, rotated]) - apply (erule_tac x=r in allE, assumption) + apply (rename_tac rv s) + apply (erule_tac x=rv in allE, assumption) apply wpsimp+ apply (solves \auto intro!: det_zipWithM simp: det_setRegister det_getRestartPC det_setNextPC diff --git a/proof/infoflow/refine/ADT_IF_Refine.thy b/proof/infoflow/refine/ADT_IF_Refine.thy index 5750ae76e7..e6ac3cdc5c 100644 --- a/proof/infoflow/refine/ADT_IF_Refine.thy +++ b/proof/infoflow/refine/ADT_IF_Refine.thy @@ -282,8 +282,7 @@ locale ADT_IF_Refine_1 = "\K (uop_sane uop)\ doUserOp_if uop tc \\r s. (fst r) \ Some Interrupt\" and handleEvent_corres_arch_extras: "corres (dc \ dc) - (einvs and (\s. event \ Interrupt \ ct_running s) - and (\s. scheduler_action s = resume_cur_thread)) + (einvs and (\s. event \ Interrupt \ ct_running s) and schact_is_rct) (invs' and (\s. event \ Interrupt \ ct_running' s) and (\s. ksSchedulerAction s = ResumeCurrentThread) and arch_extras) @@ -293,7 +292,7 @@ begin lemma kernel_entry_if_corres: "corres (prod_lift (dc \ dc)) (einvs and (\s. event \ Interrupt \ ct_running s) - and (\s. scheduler_action s = resume_cur_thread) + and schact_is_rct and (\s. 0 < domain_time s) and valid_domain_list) (invs' and (\s. event \ Interrupt \ ct_running' s) and arch_extras @@ -305,9 +304,12 @@ lemma kernel_entry_if_corres: apply (rule corres_split) apply simp apply (rule threadset_corresT) - apply (erule arch_tcb_context_set_tcb_relation) - apply (clarsimp simp: tcb_cap_cases_def) - apply (rule allI[OF ball_tcb_cte_casesI]; clarsimp) + apply (erule arch_tcb_context_set_tcb_relation) + apply (clarsimp simp: tcb_cap_cases_def) + apply (rule allI[OF ball_tcb_cte_casesI]; clarsimp) + apply fastforce + apply fastforce + apply fastforce apply (simp add: exst_same_def) apply (rule corres_split[OF handleEvent_corres_arch_extras]) apply (rule corres_stateAssert_assume_stronger[where Q=\ and @@ -320,7 +322,7 @@ lemma kernel_entry_if_corres: apply (wp hoare_TrueI threadSet_invs_trivial thread_set_invs_trivial thread_set_ct_running threadSet_ct_running' thread_set_not_state_valid_sched hoare_vcg_const_imp_lift handle_event_domain_time_inv handle_interrupt_valid_domain_time - | simp add: tcb_cap_cases_def | wpc | wp (once) hoare_drop_imps)+ + | simp add: tcb_cap_cases_def schact_is_rct_def | wpc | wp (once) hoare_drop_imps)+ apply (fastforce simp: invs_def cur_tcb_def) apply force done @@ -340,7 +342,7 @@ lemma kernelEntry_ex_abs[wp]: apply (rule_tac x=sa in exI) apply (clarsimp simp: domain_time_rel_eq domain_list_rel_eq) by (fastforce simp: ct_running_related ct_idle_related schedaction_related - active_from_running' active_from_running) + active_from_running' active_from_running schact_is_rct_def) lemma doUserOp_if_ct_in_state[wp]: "doUserOp_if f tc \ct_in_state' st\" @@ -507,7 +509,7 @@ lemma scheduler'_if_ex_abs[wp]: apply wp apply (clarsimp simp: ex_abs_def) apply (rule exI, rule conjI, assumption) - apply (frule state_relation_schact) + apply (frule state_relation_sched_act_relation) apply (auto simp: domain_list_rel_eq domain_time_rel_eq) done @@ -813,7 +815,6 @@ lemma abstract_invs: crunches checkActiveIRQ_if for ksDomainTime_inv[wp]: "\s. P (ksDomainTime s)" and ksDomSchedule_inv[wp]: "\s. P (ksDomSchedule s)" - (wp: select_wp) lemma kernelEntry_if_valid_domain_time: "e \ Interrupt \ \\\ kernelEntry_if e tc \\_ s. 0 < ksDomainTime s \ valid_domain_list' s\" @@ -1163,7 +1164,7 @@ lemma st_tcb_at_coerce_haskell: apply (drule_tac x=t in bspec) apply fastforce apply clarsimp - apply (simp add: other_obj_relation_def) + apply (simp add: tcb_relation_cut_def) apply clarsimp apply (clarsimp simp: obj_at'_def projectKO_eq projectKO_tcb split: kernel_object.splits) apply (rule_tac x="tcb_state tcb" in exI) @@ -1273,7 +1274,7 @@ lemma haskell_to_abs: apply (rule corres_guard_imp) apply (rule kernel_entry_if_corres) apply clarsimp - apply ((clarsimp simp: full_invs_if_def full_invs_if'_def)+)[2] + apply ((clarsimp simp: full_invs_if_def full_invs_if'_def schact_is_rct_def)+)[2] apply (fastforce simp: prod_lift_def) apply (rule kernelEntry_if_empty_fail) apply (simp add: kernel_handle_preemption_if_def handlePreemption_H_if_def) diff --git a/proof/infoflow/refine/ADT_IF_Refine_C.thy b/proof/infoflow/refine/ADT_IF_Refine_C.thy index ec9dff489c..0825f25e61 100644 --- a/proof/infoflow/refine/ADT_IF_Refine_C.thy +++ b/proof/infoflow/refine/ADT_IF_Refine_C.thy @@ -150,7 +150,8 @@ lemma cur_thread_of_absKState[simp]: by (clarsimp simp: cstate_relation_def Let_def absKState_def cstate_to_H_def) lemma absKState_crelation: - "\ cstate_relation s (globals s'); invs' s \ \ cstate_to_A s' = absKState s" + "\ cstate_relation s (globals s'); invs' s; ksReadyQueues_asrt s\ + \ cstate_to_A s' = absKState s" apply (clarsimp simp add: cstate_to_H_correct invs'_def cstate_to_A_def) apply (clarsimp simp: absKState_def absExst_def observable_memory_def) apply (case_tac s) @@ -193,23 +194,20 @@ lemma handleInterrupt_no_fail: lemma handleEvent_Interrupt_no_fail: "no_fail (invs' and ex_abs einvs) (handleEvent Interrupt)" apply (simp add: handleEvent_def) - apply (rule no_fail_pre) apply wp - apply (rule handleInterrupt_no_fail) - apply (simp add: crunch_simps) - apply (rule_tac Q="\r s. ex_abs (einvs) s \ invs' s \ - (\irq. r = Some irq - \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)" - in hoare_strengthen_post) - apply (rule hoare_vcg_conj_lift) - apply (rule corres_ex_abs_lift) - apply (rule dmo_getActiveIRQ_corres) - apply wp - apply simp - apply wp - apply simp - apply (rule doMachineOp_getActiveIRQ_IRQ_active) - apply clarsimp + apply (rule handleInterrupt_no_fail) + apply (simp add: crunch_simps) + apply (rule_tac Q="\r s. ex_abs (einvs) s \ invs' s \ + (\irq. r = Some irq + \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)" + in hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule corres_ex_abs_lift) + apply (rule dmo_getActiveIRQ_corres) + apply wpsimp + apply (wpsimp wp: doMachineOp_getActiveIRQ_IRQ_active) + apply clarsimp + apply wpsimp apply (clarsimp simp: invs'_def valid_state'_def) done @@ -227,8 +225,7 @@ locale ADT_IF_Refine_1 = kernel_m + (handleEvent Interrupt) (handleInterruptEntry_C_body_if)" and handleInvocation_ccorres': "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and arch_extras and ct_active' and sch_act_simple - and (\s. \x. ksCurThread s \ set (ksReadyQueues s x))) + (invs' and arch_extras and ct_active' and sch_act_simple) (UNIV \ {s. isCall_' s = from_bool isCall} \ {s. isBlocking_' s = from_bool isBlocking}) [] (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" and check_active_irq_corres_C: @@ -272,11 +269,10 @@ lemma handleEvent_ccorres: apply wp[1] apply clarsimp apply wp - apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s \ - (\p. ksCurThread s \ set (ksReadyQueues s p))" + apply (rule_tac Q="\rv s. ct_in_state' simple' s \ sch_act_sane s" in hoare_post_imp) apply (simp add: ct_in_state'_def) - apply (wp handleReply_sane handleReply_ct_not_ksQ) + apply (wp handleReply_sane) \ \SysSend\ apply (simp add: handleSend_def) apply (ctac (no_vcg) add: handleInvocation_ccorres) @@ -359,7 +355,7 @@ lemma handleEvent_ccorres: apply (clarsimp simp: return_def) apply wp apply (simp add: guard_is_UNIV_def) - apply (auto simp: ct_in_state'_def ct_not_ksQ isReply_def is_cap_fault_def + apply (auto simp: ct_in_state'_def isReply_def is_cap_fault_def cfault_rel_def seL4_Fault_UnknownSyscall_lift seL4_Fault_UserException_lift elim: pred_tcb'_weakenE st_tcb_ex_cap'' dest: st_tcb_at_idle_thread' rf_sr_ksCurThread) @@ -387,7 +383,7 @@ lemma kernelEntry_corres_C: apply (erule no_fail_pre) apply (clarsimp simp: all_invs'_def) apply (rule exI, rule conjI, assumption) - apply clarsimp + apply (clarsimp simp: schact_is_rct_def) apply (simp only: bind_assoc) apply (simp add: getCurThread_def) apply (rule corres_guard_imp) @@ -416,9 +412,9 @@ lemma kernelEntry_corres_C: apply (rule threadSet_all_invs_triv'[where e=e]) apply (clarsimp simp: all_invs'_def) apply (rule exI, (rule conjI, assumption)+) - subgoal by force + subgoal by (force simp: schact_is_rct_def) apply simp - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply wp+ apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) apply fastforce @@ -448,7 +444,7 @@ lemma handle_preemption_corres_C: apply clarsimp apply simp apply simp - apply (rule hoare_post_taut[where P=\])+ + apply (rule wp_post_taut)+ apply (fastforce simp: ex_abs_def schedaction_related)+ done @@ -513,7 +509,7 @@ lemma schedule_if_corres_C: apply simp apply simp apply simp - apply (rule hoare_post_taut[where P=\])+ + apply (rule wp_post_taut)+ apply (rule_tac Q="\r. ct_in_state' activatable' and invs' and ex_abs (invs and ct_in_state activatable)" in hoare_strengthen_post) apply (wp schedule_invs' corres_ex_abs_lift) @@ -522,7 +518,7 @@ lemma schedule_if_corres_C: apply (clarsimp simp: ex_abs_def invs'_def valid_state'_def valid_pspace'_def) apply fastforce apply simp - apply (rule hoare_post_taut[where P=\]) + apply (rule wp_post_taut) apply (auto simp: ex_abs_def) done @@ -616,7 +612,6 @@ definition ADT_C_if where (kernel_call_C_if fp) handle_preemption_C_if schedule_C_if kernel_exit_C_if)\" - lemma c_to_haskell: "uop_nonempty uop \ global_automata_refine checkActiveIRQ_H_if (doUserOp_H_if uop) kernelCall_H_if @@ -629,12 +624,16 @@ lemma c_to_haskell: apply (unfold_locales) apply (simp add: ADT_C_if_def) apply (simp_all add: preserves_trivial preserves'_trivial) + apply (clarsimp simp: full_invs_if'_def ex_abs_def) + apply (frule ksReadyQueues_asrt_cross[OF state_relation_ready_queues_relation]) apply (clarsimp simp: lift_snd_rel_def ADT_C_if_def ADT_H_if_def absKState_crelation rf_sr_def full_invs_if'_def) apply (clarsimp simp: rf_sr_def full_invs_if'_def ex_abs_def) apply (simp add: ADT_H_if_def ADT_C_if_def lift_fst_rel_def lift_snd_rel_def) - apply safe - apply (clarsimp simp: absKState_crelation rf_sr_def full_invs_if'_def) + apply (clarsimp simp: full_invs_if'_def) + apply (frule ex_abs_ksReadyQueues_asrt) + apply (clarsimp simp: absKState_crelation rf_sr_def) + apply (frule invs_valid_stateI') apply (rule_tac x="((a,bb),ba)" in bexI) apply simp apply simp @@ -645,7 +644,8 @@ lemma c_to_haskell: kernelCall_H_if_def kernel_call_C_if_def handlePreemption_H_if_def handle_preemption_C_if_def schedule'_H_if_def schedule_C_if_def - kernelExit_H_if_def kernel_exit_C_if_def) + kernelExit_H_if_def kernel_exit_C_if_def invs'_def) + apply (clarsimp split: sys_mode.splits) apply (rule step_corres_lifts,rule corres_guard_imp[OF check_active_irq_corres_C]; fastforce simp: full_invs_if'_def) apply (rule step_corres_lifts,rule corres_guard_imp[OF check_active_irq_corres_C]; fastforce simp: full_invs_if'_def) apply (rule step_corres_lifts,rule corres_guard_imp[OF do_user_op_if_C_corres]; auto simp: full_invs_if'_def ex_abs_def) diff --git a/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy b/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy index d1783c7168..92def05969 100644 --- a/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy +++ b/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy @@ -24,7 +24,7 @@ lemma kernelEntry_invs'[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. invs'\" apply (simp add: kernelEntry_if_def) - apply (wp threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -36,7 +36,7 @@ lemma kernelEntry_arch_extras[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. arch_extras\" apply (simp add: kernelEntry_if_def) - apply (wp handleEvent_valid_duplicates' threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp handleEvent_valid_duplicates' threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -198,7 +198,7 @@ lemma do_user_op_if_corres[ADT_IF_Refine_assms]: apply (rule corres_split[OF corres_machine_op,where r'="(=)"]) apply (rule corres_underlying_trivial) apply (clarsimp simp: user_memory_update_def) - apply (rule non_fail_modify) + apply (rule no_fail_modify) apply (rule corres_split[OF corres_machine_op,where r'="(=)"]) apply (rule corres_underlying_trivial, wp) apply (rule corres_split[OF corres_machine_op, where r'="(=)"]) @@ -246,7 +246,7 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: apply (wp device_update_invs' dmo_setExMonitor_wp' dmo_invs' | simp)+ apply (clarsimp simp add: no_irq_modify user_memory_update_def) apply wpsimp - apply (wp select_wp)+ + apply wp+ apply (clarsimp simp: user_memory_update_def simpler_modify_def restrict_map_def split: option.splits) @@ -257,25 +257,25 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: lemma doUserOp_valid_duplicates[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \arch_extras\" apply (simp add: doUserOp_if_def split_def) - apply (wp dmo_setExMonitor_wp' dmo_invs' select_wp | simp)+ + apply (wp dmo_setExMonitor_wp' dmo_invs' | simp)+ done lemma doUserOp_if_schedact[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksSchedulerAction s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_st_tcb_at[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \st_tcb_at' st t\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_cur_thread[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksCurThread s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_if_corres'[ADT_IF_Refine_assms]: @@ -334,7 +334,7 @@ lemma do_user_op_if_corres'[ADT_IF_Refine_assms]: apply (rule corres_split[OF corres_machine_op', where r'="(=)"]) apply (rule corres_underlying_trivial, simp) apply (rule corres_return_same_trivial) - by (wp hoare_TrueI[where P = \] | simp)+ + by wpsimp+ lemma dmo_getActiveIRQ_corres[ADT_IF_Refine_assms]: "corres (=) \ \ (do_machine_op (getActiveIRQ in_kernel)) (doMachineOp (getActiveIRQ in_kernel'))" @@ -401,7 +401,6 @@ lemma handle_preemption_if_corres[ADT_IF_Refine_assms]: crunches doUserOp_if for ksDomainTime_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomainTime s)" and ksDomSchedule_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomSchedule s)" - (wp: select_wp) crunches checkActiveIRQ_if for arch_extras[ADT_IF_Refine_assms, wp]: arch_extras @@ -422,14 +421,13 @@ lemma doUserOp_if_no_interrupt[ADT_IF_Refine_assms]: doUserOp_if uop tc \\r s. (fst r) \ Some Interrupt\" apply (simp add: doUserOp_if_def del: split_paired_All) - apply (wp select_wp | wpc)+ + apply (wp | wpc)+ apply (clarsimp simp: uop_sane_def simp del: split_paired_All) done lemma handleEvent_corres_arch_extras[ADT_IF_Refine_assms]: "corres (dc \ dc) - (einvs and (\s. event \ Interrupt \ ct_running s) - and (\s. scheduler_action s = resume_cur_thread)) + (einvs and (\s. event \ Interrupt \ ct_running s) and schact_is_rct) (invs' and (\s. event \ Interrupt \ ct_running' s) and (\s. ksSchedulerAction s = ResumeCurrentThread) and arch_extras) diff --git a/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy b/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy index bd82a221c7..24dae6043c 100644 --- a/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy +++ b/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy @@ -49,9 +49,7 @@ qed lemma handleInvocation_ccorres'[ADT_IF_Refine_assms]: "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and arch_extras and - ct_active' and sch_act_simple and - (\s. \x. ksCurThread s \ set (ksReadyQueues s x))) + (invs' and arch_extras and ct_active' and sch_act_simple) (UNIV \ {s. isCall_' s = from_bool isCall} \ {s. isBlocking_' s = from_bool isBlocking}) [] (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" @@ -118,7 +116,7 @@ lemma corres_dmo_getExMonitor_C: apply (rule_tac r'="\(r, ms) (r', ms'). r = r' \ ms = rv \ ms' = rv'" in corres_split) apply (rule corres_trivial, rule corres_select_f') - apply (clarsimp simp: getExMonitor_def machine_rest_lift_def NonDetMonad.bind_def gets_def + apply (clarsimp simp: getExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def) apply (clarsimp simp: getExMonitor_no_fail[simplified no_fail_def]) apply (clarsimp simp: split_def) @@ -132,7 +130,7 @@ lemma corres_dmo_getExMonitor_C: cmachine_state_relation_def Let_def) apply (rule corres_trivial, clarsimp) apply (wp hoare_TrueI)+ - apply (rule TrueI conjI | clarsimp simp: getExMonitor_def machine_rest_lift_def NonDetMonad.bind_def + apply (rule TrueI conjI | clarsimp simp: getExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def)+ done @@ -150,7 +148,7 @@ lemma corres_dmo_setExMonitor_C: ms' = rv'\exclusive_state := es\" in corres_split) apply (rule corres_trivial, rule corres_select_f') - apply (clarsimp simp: setExMonitor_def machine_rest_lift_def NonDetMonad.bind_def gets_def + apply (clarsimp simp: setExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def) apply (clarsimp simp: setExMonitor_no_fail[simplified no_fail_def]) apply (simp add: split_def) @@ -162,7 +160,7 @@ lemma corres_dmo_setExMonitor_C: apply (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def cmachine_state_relation_def Let_def) apply (wp hoare_TrueI)+ - apply (rule TrueI conjI | clarsimp simp: setExMonitor_def machine_rest_lift_def NonDetMonad.bind_def + apply (rule TrueI conjI | clarsimp simp: setExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def)+ done @@ -193,20 +191,24 @@ lemma do_user_op_if_C_corres[ADT_IF_Refine_assms]: apply (rule corres_gen_asm) apply (simp add: doUserOp_if_def doUserOp_C_if_def uop_nonempty_def del: split_paired_All) apply (rule corres_gets_same) - apply (clarsimp simp: absKState_crelation ptable_rights_s'_def ptable_rights_s''_def - rf_sr_def cstate_relation_def Let_def cstate_to_H_correct) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt + simp: absKState_crelation ptable_rights_s'_def ptable_rights_s''_def + rf_sr_def cstate_relation_def Let_def cstate_to_H_correct) apply simp apply (rule corres_gets_same) - apply (clarsimp simp: ptable_xn_s'_def ptable_xn_s''_def ptable_attrs_s_def - absKState_crelation ptable_attrs_s'_def ptable_attrs_s''_def rf_sr_def) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt + simp: ptable_xn_s'_def ptable_xn_s''_def ptable_attrs_s_def + absKState_crelation ptable_attrs_s'_def ptable_attrs_s''_def rf_sr_def) apply simp apply (rule corres_gets_same) + apply clarsimp + apply (frule ex_abs_ksReadyQueues_asrt) apply (clarsimp simp: absKState_crelation curthread_relation ptable_lift_s'_def ptable_lift_s''_def ptable_lift_s_def rf_sr_def) apply simp apply (simp add: getCurThread_def) apply (rule corres_gets_same) - apply (simp add: absKState_crelation rf_sr_def) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt simp: absKState_crelation rf_sr_def) apply simp apply (rule corres_gets_same) apply (rule fun_cong[where x=ptrFromPAddr]) @@ -247,7 +249,7 @@ lemma do_user_op_if_C_corres[ADT_IF_Refine_assms]: apply (rule corres_split[OF device_update_corres_C]) apply (rule corres_split[OF corres_dmo_setExMonitor_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (clarsimp simp: ex_abs_def restrict_map_def invs_pspace_aligned' invs_pspace_distinct' ptable_lift_s'_def ptable_rights_s'_def split: if_splits) diff --git a/proof/infoflow/refine/ARM/Example_Valid_StateH.thy b/proof/infoflow/refine/ARM/Example_Valid_StateH.thy index d6061062dc..9b93e4fe59 100644 --- a/proof/infoflow/refine/ARM/Example_Valid_StateH.thy +++ b/proof/infoflow/refine/ARM/Example_Valid_StateH.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -126,7 +127,7 @@ definition Low_pt'H :: "word8 \ ARM_H.pte " where "Low_pt'H \ (\_. ARM_H.InvalidPTE) - (0 := ARM_H.SmallPagePTE shared_page_ptr (PageCacheable \ {}) (Global \ {}) (XNever \ {}) (vmrights_map vm_read_write))" + (0 := ARM_H.SmallPagePTE shared_page_ptr_phys (PageCacheable \ {}) (Global \ {}) (XNever \ {}) (vmrights_map vm_read_write))" definition Low_ptH :: "word32 \ word32 \ Structures_H.kernel_object option" @@ -173,7 +174,7 @@ definition where "High_pt'H \ (\_. ARM_H.InvalidPTE) - (0 := ARM_H.SmallPagePTE shared_page_ptr (PageCacheable \ {}) (Global \ {}) (XNever \ {}) + (0 := ARM_H.SmallPagePTE shared_page_ptr_phys (PageCacheable \ {}) (Global \ {}) (XNever \ {}) (vmrights_map vm_read_only))" @@ -230,6 +231,8 @@ where \ \tcbFaultHandler =\ 0 \ \tcbIPCBuffer =\ 0 \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None \ \tcbContext =\ (ArchThread undefined)" @@ -254,6 +257,8 @@ where \ \tcbFaultHandler =\ 0 \ \tcbIPCBuffer =\ 0 \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None \ \tcbContext =\ (ArchThread undefined)" @@ -278,6 +283,8 @@ where \ \tcbFaultHandler =\ 0 \ \tcbIPCBuffer =\ 0 \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None \ \tcbContext =\ (ArchThread empty_context)" definition @@ -1079,7 +1086,7 @@ where ksDomSchedule = [(0 ,10), (1, 10)], ksCurDomain = 0, ksDomainTime = 5, - ksReadyQueues = const [], + ksReadyQueues = const (TcbQueue None None), ksReadyQueuesL1Bitmap = const 0, ksReadyQueuesL2Bitmap = const 0, ksCurThread = Low_tcb_ptr, @@ -2186,28 +2193,24 @@ lemma s0H_valid_objs': valid_cte'_def split: if_split_asm) apply (clarsimp simp: valid_obj'_def global_pdH'_def valid_mapping'_def s0_ptr_defs - is_aligned_def ARM.addrFromPPtr_def ARM.ptrFromPAddr_def - pptrBaseOffset_def ARM.pptrBase_def ARM.physBase_def - pptrBase_def physBase_def split: if_split_asm) - apply (clarsimp simp: valid_obj'_def High_pdH_def High_pd'H_def valid_pde'_def pteBits_def - valid_mapping'_def s0_ptr_defs is_aligned_def ARM.addrFromPPtr_def - ARM.pptrBase_def ARM.physBase_def ARM.ptrFromPAddr_def ptBits_def - pageBits_def pptrBaseOffset_def pptrBase_def physBase_def + apply (rule is_aligned_addrFromPPtr_n; clarsimp simp: is_aligned_def) + apply (clarsimp simp: valid_obj'_def High_pdH_def High_pd'H_def valid_mapping'_def s0_ptr_defs + ptBits_def pteBits_def split: if_split_asm) - apply (clarsimp simp: valid_obj'_def Low_pdH_def Low_pd'H_def valid_pde'_def valid_mapping'_def - s0_ptr_defs is_aligned_def ARM.addrFromPPtr_def pteBits_def - ARM.ptrFromPAddr_def ARM.physBase_def ptBits_def pageBits_def - pptrBaseOffset_def pptrBase_def physBase_def + apply (intro conjI impI; rule is_aligned_addrFromPPtr_n; clarsimp simp: is_aligned_def) + apply (clarsimp simp: valid_obj'_def Low_pdH_def Low_pd'H_def valid_mapping'_def s0_ptr_defs + ptBits_def pteBits_def split: if_split_asm) + apply (intro conjI impI; rule is_aligned_addrFromPPtr_n; clarsimp simp: is_aligned_def) apply (clarsimp simp: valid_obj'_def High_ptH_def High_pt'H_def valid_mapping'_def s0_ptr_defs - is_aligned_def ARM.addrFromPPtr_def ARM.ptrFromPAddr_def ARM.pptrBase_def - ARM.physBase_def pptrBaseOffset_def pptrBase_def physBase_def + ptBits_def pteBits_def shared_page_ptr_phys_def split: if_split_asm) + apply (rule is_aligned_addrFromPPtr_n; clarsimp simp: is_aligned_def) apply (clarsimp simp: valid_obj'_def Low_ptH_def Low_pt'H_def valid_mapping'_def s0_ptr_defs - is_aligned_def ARM.addrFromPPtr_def ARM.physBase_def ARM.ptrFromPAddr_def - pptrBaseOffset_def pptrBase_def physBase_def + ptBits_def pteBits_def shared_page_ptr_phys_def split: if_split_asm) + apply (rule is_aligned_addrFromPPtr_n; clarsimp simp: is_aligned_def) done lemmas the_nat_to_bl_simps = @@ -2808,8 +2811,6 @@ lemma s0H_invs: apply (clarsimp simp: sch_act_wf_def s0H_internal_def ct_in_state'_def st_tcb_at'_def obj_at'_def projectKO_eq project_inject objBitsKO_def s0_ptrs_aligned Low_tcbH_def) apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) apply (simp add: objBitsKO_def) - apply (rule conjI) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs s0H_internal_def) apply (rule conjI) apply (clarsimp simp: sym_refs_def state_refs_of'_def refs_of'_def split: option.splits) apply (frule kh0H_SomeD) @@ -2970,9 +2971,16 @@ lemma s0H_invs: apply (rule conjI) apply (clarsimp simp: irqs_masked'_def s0H_internal_def maxIRQ_def timer_irq_def) apply (rule conjI) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKO_eq project_inject s0H_internal_def inQ_def) - apply (frule kh0H_dom_tcb) - apply (elim disjE, (clarsimp simp: kh0H_obj_def)+)[1] + apply (clarsimp simp: sym_heap_def opt_map_def projectKOs split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) + apply (rule conjI) + apply (clarsimp simp: valid_sched_pointers_def opt_map_def projectKOs split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) + apply (rule conjI) + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def s0H_internal_def + tcbQueueEmpty_def bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def) apply (rule conjI) apply (clarsimp simp: ct_not_inQ_def obj_at'_def projectKO_eq project_inject s0H_internal_def objBitsKO_def s0_ptrs_aligned Low_tcbH_def) apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) @@ -3200,7 +3208,7 @@ lemma s0_pspace_rel: apply (clarsimp simp: kh0H_obj_def split del: if_split) apply (cut_tac x=y in pd_offs_in_range(3)) apply (clarsimp simp: pd_offs_range_def pde_relation_def pde_relation_aligned_def) - apply (clarsimp simp: kh0H_all_obj_def kh0_obj_def other_obj_relation_def + apply (clarsimp simp: kh0H_all_obj_def kh0_obj_def tcb_relation_cut_def tcb_relation_def arch_tcb_relation_def fault_rel_optionation_def word_bits_def the_nat_to_bl_simps)+ apply (clarsimp simp: kh0H_obj_def High_pt_def High_pt'H_def High_pt'_def split del: if_split) @@ -3254,7 +3262,14 @@ lemma s0_srel: apply (clarsimp simp: s0_internal_def s0H_internal_def exst0_def kh0H_def option_update_range_def split: if_split_asm option.splits) apply (clarsimp simp: s0_internal_def s0H_internal_def exst0_def etcb_relation_def idle_tcbH_def High_tcbH_def High_etcb_def Low_tcbH_def Low_etcb_def default_etcb_def split: if_split_asm) apply (simp add: s0_internal_def exst0_def s0H_internal_def sched_act_relation_def) - apply (simp add: s0_internal_def exst0_def s0H_internal_def ready_queues_relation_def) + apply (clarsimp simp: s0_internal_def exst0_def s0H_internal_def + ready_queues_relation_def ready_queue_relation_def + list_queue_relation_def queue_end_valid_def + prev_queue_head_def inQ_def tcbQueueEmpty_def + projectKOs opt_map_def opt_pred_def + split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) apply (clarsimp simp: s0_internal_def exst0_def s0H_internal_def ghost_relation_def) apply (rule conjI) apply clarsimp diff --git a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy index 36b3e9505f..4c96dfc7d4 100644 --- a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy +++ b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy @@ -24,7 +24,7 @@ lemma kernelEntry_invs'[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. invs'\" apply (simp add: kernelEntry_if_def) - apply (wp threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -36,7 +36,7 @@ lemma kernelEntry_arch_extras[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. arch_extras\" apply (simp add: kernelEntry_if_def) - apply (wp threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -170,7 +170,7 @@ lemma do_user_op_if_corres[ADT_IF_Refine_assms]: apply (rule corres_split[OF corres_machine_op,where r'="(=)"]) apply (rule corres_underlying_trivial) apply (clarsimp simp: user_memory_update_def) - apply (rule non_fail_modify) + apply (rule no_fail_modify) apply (rule corres_split[OF corres_machine_op,where r'="(=)"]) apply (rule corres_underlying_trivial) apply wp @@ -186,7 +186,7 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: apply (wp device_update_invs' dmo_invs' | simp)+ apply (clarsimp simp add: no_irq_modify user_memory_update_def) apply wpsimp - apply (wp select_wp)+ + apply wp+ apply (clarsimp simp: user_memory_update_def simpler_modify_def restrict_map_def split: option.splits) @@ -197,25 +197,25 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: lemma doUserOp_valid_duplicates[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \arch_extras\" apply (simp add: doUserOp_if_def split_def) - apply (wp dmo_invs' select_wp | simp)+ + apply (wp dmo_invs' | simp)+ done lemma doUserOp_if_schedact[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksSchedulerAction s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_st_tcb_at[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \st_tcb_at' st t\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_cur_thread[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksCurThread s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_if_corres'[ADT_IF_Refine_assms]: @@ -278,7 +278,7 @@ lemma getActiveIRQ_nf: "no_fail (\_. True) (getActiveIRQ in_kernel)" apply (simp add: getActiveIRQ_def) apply (rule no_fail_pre) - apply (rule non_fail_gets non_fail_modify + apply (rule no_fail_gets no_fail_modify no_fail_return | rule no_fail_bind | simp | intro impI conjI)+ apply (wp del: no_irq | simp)+ @@ -348,7 +348,6 @@ lemma handle_preemption_if_corres[ADT_IF_Refine_assms]: crunches doUserOp_if for ksDomainTime_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomainTime s)" and ksDomSchedule_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomSchedule s)" - (wp: select_wp) crunches checkActiveIRQ_if for arch_extras[ADT_IF_Refine_assms, wp]: arch_extras @@ -369,14 +368,13 @@ lemma doUserOp_if_no_interrupt[ADT_IF_Refine_assms]: doUserOp_if uop tc \\r s. (fst r) \ Some Interrupt\" apply (simp add: doUserOp_if_def del: split_paired_All) - apply (wp select_wp | wpc)+ + apply (wp | wpc)+ apply (clarsimp simp: uop_sane_def simp del: split_paired_All) done lemma handleEvent_corres_arch_extras[ADT_IF_Refine_assms]: "corres (dc \ dc) - (einvs and (\s. event \ Interrupt \ ct_running s) - and (\s. scheduler_action s = resume_cur_thread)) + (einvs and (\s. event \ Interrupt \ ct_running s) and schact_is_rct) (invs' and (\s. event \ Interrupt \ ct_running' s) and (\s. ksSchedulerAction s = ResumeCurrentThread) and arch_extras) diff --git a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy index cd5a151eb1..62a5a61064 100644 --- a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy +++ b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy @@ -47,13 +47,11 @@ qed lemma handleInvocation_ccorres'[ADT_IF_Refine_assms]: "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') - (invs' and arch_extras and - ct_active' and sch_act_simple and - (\s. \x. ksCurThread s \ set (ksReadyQueues s x))) + (invs' and arch_extras and ct_active' and sch_act_simple) (UNIV \ {s. isCall_' s = from_bool isCall} \ {s. isBlocking_' s = from_bool isBlocking}) [] (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" - apply (simp only: arch_extras_def pred_and_true) + apply (simp only: arch_extras_def pred_top_right_neutral) apply (rule handleInvocation_ccorres) done @@ -119,20 +117,24 @@ lemma do_user_op_if_C_corres[ADT_IF_Refine_assms]: apply (rule corres_gen_asm) apply (simp add: doUserOp_if_def doUserOp_C_if_def uop_nonempty_def del: split_paired_All) apply (rule corres_gets_same) - apply (clarsimp simp: absKState_crelation ptable_rights_s'_def ptable_rights_s''_def - rf_sr_def cstate_relation_def Let_def cstate_to_H_correct) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt + simp: absKState_crelation ptable_rights_s'_def ptable_rights_s''_def + rf_sr_def cstate_relation_def Let_def cstate_to_H_correct) apply simp apply (rule corres_gets_same) - apply (clarsimp simp: ptable_xn_s'_def ptable_xn_s''_def ptable_attrs_s_def - absKState_crelation ptable_attrs_s'_def ptable_attrs_s''_def rf_sr_def) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt + simp: ptable_xn_s'_def ptable_xn_s''_def ptable_attrs_s_def + absKState_crelation ptable_attrs_s'_def ptable_attrs_s''_def rf_sr_def) apply simp apply (rule corres_gets_same) + apply clarsimp + apply (frule ex_abs_ksReadyQueues_asrt) apply (clarsimp simp: absKState_crelation curthread_relation ptable_lift_s'_def ptable_lift_s''_def ptable_lift_s_def rf_sr_def) apply simp apply (simp add: getCurThread_def) apply (rule corres_gets_same) - apply (simp add: absKState_crelation rf_sr_def) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt simp: absKState_crelation rf_sr_def) apply simp apply (rule corres_gets_same) apply (rule fun_cong[where x=ptrFromPAddr]) @@ -169,7 +171,7 @@ lemma do_user_op_if_C_corres[ADT_IF_Refine_assms]: apply (rule corres_underlying_split4) apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (clarsimp simp: ex_abs_def restrict_map_def invs_pspace_aligned' invs_pspace_distinct' ptable_lift_s'_def ptable_rights_s'_def split: if_splits) diff --git a/proof/infoflow/refine/RISCV64/Example_Valid_StateH.thy b/proof/infoflow/refine/RISCV64/Example_Valid_StateH.thy index 1a2d0af5e8..7e47d663af 100644 --- a/proof/infoflow/refine/RISCV64/Example_Valid_StateH.thy +++ b/proof/infoflow/refine/RISCV64/Example_Valid_StateH.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -40,7 +41,7 @@ definition Low_capsH :: "cnode_index \ (capability \ mdbnode) (the_nat_to_bl_10 4) \ (ArchObjectCap (ASIDPoolCap Low_pool_ptr (ucast Low_asid)), Null_mdb), (the_nat_to_bl_10 5) - \ (ArchObjectCap (FrameCap shared_page_ptr VMReadWrite RISCVLargePage + \ (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadWrite RISCVLargePage False (Some (ucast Low_asid, 0))), MDB 0 (Silc_cnode_ptr + 0xA0) False False), (the_nat_to_bl_10 6) @@ -73,7 +74,7 @@ definition High_capsH :: "cnode_index \ (capability \ mdbnode (the_nat_to_bl_10 4) \ (ArchObjectCap (ASIDPoolCap High_pool_ptr (ucast High_asid)), Null_mdb), (the_nat_to_bl_10 5) - \ (ArchObjectCap (FrameCap shared_page_ptr VMReadOnly RISCVLargePage + \ (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast High_asid, 0))), MDB (Silc_cnode_ptr + 0xA0) 0 False False), (the_nat_to_bl_10 6) @@ -100,7 +101,7 @@ definition Silc_capsH :: "cnode_index \ (capability \ mdbnode ((the_nat_to_bl_10 2) \ (CNodeCap Silc_cnode_ptr 10 2 10, Null_mdb), (the_nat_to_bl_10 5) - \ (ArchObjectCap (FrameCap shared_page_ptr VMReadOnly RISCVLargePage + \ (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast Silc_asid, 0))), MDB (Low_cnode_ptr + 0xA0) (High_cnode_ptr + 0xA0) False False), (the_nat_to_bl_10 318) @@ -128,10 +129,10 @@ text \Global page table\ definition global_pteH' :: "pt_index \ pte" where "global_pteH' idx \ if idx = 0x100 - then PagePTE ((ucast (idx && mask (ptTranslationBits - 1)) << ptTranslationBits * 2)) + then PagePTE ((ucast (idx && mask (ptTranslationBits - 1)) << ptTranslationBits * size max_pt_level)) False False False VMKernelOnly - else if idx = 0x1FE - then PagePTE (2 << ptTranslationBits * 2) False False False VMKernelOnly + else if idx = elf_index + then PagePTE (ucast ((kernelELFPAddrBase && ~~mask toplevel_bits) >> pageBits)) False False False VMKernelOnly else InvalidPTE" definition global_pteH where @@ -149,7 +150,7 @@ text \Low's page tables\ definition Low_pt'H :: "pt_index \ pte" where "Low_pt'H \ (\_. InvalidPTE) - (0 := PagePTE (addrFromPPtr shared_page_ptr >> pt_bits) False False False VMReadWrite)" + (0 := PagePTE (shared_page_ptr_phys >> pt_bits) False False False VMReadWrite)" definition Low_ptH :: "obj_ref \ obj_ref \ kernel_object option" where "Low_ptH \ @@ -174,7 +175,7 @@ text \High's page tables\ definition High_pt'H :: "pt_index \ pte" where "High_pt'H \ (\_. InvalidPTE) - (0 := PagePTE (addrFromPPtr shared_page_ptr >> pt_bits) False False False VMReadOnly)" + (0 := PagePTE (shared_page_ptr_phys >> pt_bits) False False False VMReadOnly)" definition High_ptH :: "obj_ref \ obj_ref \ kernel_object option" where "High_ptH \ @@ -215,6 +216,8 @@ definition Low_tcbH :: tcb where \ \tcbFaultHandler =\ 0 \ \tcbIPCBuffer =\ 0 \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None \ \tcbContext =\ (ArchThread undefined)" @@ -239,6 +242,8 @@ definition High_tcbH :: tcb where \ \tcbFaultHandler =\ 0 \ \tcbIPCBuffer =\ 0 \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None \ \tcbContext =\ (ArchThread undefined)" @@ -261,6 +266,8 @@ definition idle_tcbH :: tcb where \ \tcbFaultHandler =\ 0 \ \tcbIPCBuffer =\ 0 \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None \ \tcbContext =\ (ArchThread empty_context)" @@ -315,7 +322,7 @@ definition kh0H :: "(obj_ref \ kernel_object)" where option_update_range [Low_tcb_ptr \ KOTCB Low_tcbH] \ option_update_range [High_tcb_ptr \ KOTCB High_tcbH] \ option_update_range [idle_tcb_ptr \ KOTCB idle_tcbH] \ - option_update_range (shared_pageH shared_page_ptr) \ + option_update_range (shared_pageH shared_page_ptr_virt) \ option_update_range (global_ptH riscv_global_pt_ptr) ) Map.empty" @@ -333,7 +340,7 @@ lemma s0_ptrs_aligned: "is_aligned Low_tcb_ptr 10" "is_aligned idle_tcb_ptr 10" "is_aligned ntfn_ptr 5" - "is_aligned shared_page_ptr 21" + "is_aligned shared_page_ptr_virt 21" "is_aligned irq_cnode_ptr 10" "is_aligned Low_pool_ptr 12" "is_aligned High_pool_ptr 12" @@ -349,7 +356,7 @@ lemma page_offs_min': done lemma page_offs_min: - "shared_page_ptr \ shared_page_ptr + (ucast (x:: pt_index) << 12)" + "shared_page_ptr_virt \ shared_page_ptr_virt + (ucast (x:: pt_index) << 12)" by (simp_all add: page_offs_min' s0_ptrs_aligned) lemma page_offs_max': @@ -368,7 +375,7 @@ lemma page_offs_max': done lemma page_offs_max: - "shared_page_ptr + (ucast (x :: pt_index) << 12) \ shared_page_ptr + 0x1FFFFF" + "shared_page_ptr_virt + (ucast (x :: pt_index) << 12) \ shared_page_ptr_virt + 0x1FFFFF" by (simp_all add: page_offs_max' s0_ptrs_aligned) definition page_offs_range where @@ -384,7 +391,7 @@ lemma page_offs_in_range': done lemma page_offs_in_range: - "shared_page_ptr + (ucast (x :: pt_index) << 12) \ page_offs_range shared_page_ptr" + "shared_page_ptr_virt + (ucast (x :: pt_index) << 12) \ page_offs_range shared_page_ptr_virt" by (simp_all add: page_offs_in_range' s0_ptrs_aligned) lemma page_offs_range_correct': @@ -425,7 +432,8 @@ lemma page_offs_range_correct': done lemma page_offs_range_correct: - "x \ page_offs_range shared_page_ptr \ \y. x = shared_page_ptr + (ucast (y :: pt_index) << 12)" + "x \ page_offs_range shared_page_ptr_virt + \ \y. x = shared_page_ptr_virt + (ucast (y :: pt_index) << 12)" by (simp_all add: page_offs_range_correct' s0_ptrs_aligned) @@ -799,7 +807,7 @@ lemma not_in_range_None: "x \ pt_offs_range riscv_global_pt_ptr \ global_ptH riscv_global_pt_ptr x = None" "x \ pt_offs_range Low_pt_ptr \ Low_ptH Low_pt_ptr x = None" "x \ pt_offs_range High_pt_ptr \ High_ptH High_pt_ptr x = None" - "x \ page_offs_range shared_page_ptr \ shared_pageH shared_page_ptr x = None" + "x \ page_offs_range shared_page_ptr_virt \ shared_pageH shared_page_ptr_virt x = None" by (auto simp: page_offs_range_def cnode_offs_range_def pt_offs_range_def s0_ptr_defs kh0H_obj_def) lemma kh0H_dom_distinct: @@ -877,13 +885,13 @@ lemma kh0H_dom_distinct: "Low_pool_ptr \ tcb_offs_range idle_tcb_ptr" "irq_cnode_ptr \ tcb_offs_range idle_tcb_ptr" "ntfn_ptr \ tcb_offs_range idle_tcb_ptr" - "idle_tcb_ptr \ page_offs_range shared_page_ptr" - "High_tcb_ptr \ page_offs_range shared_page_ptr" - "Low_tcb_ptr \ page_offs_range shared_page_ptr" - "High_pool_ptr \ page_offs_range shared_page_ptr" - "Low_pool_ptr \ page_offs_range shared_page_ptr" - "irq_cnode_ptr \ page_offs_range shared_page_ptr" - "ntfn_ptr \ page_offs_range shared_page_ptr" + "idle_tcb_ptr \ page_offs_range shared_page_ptr_virt" + "High_tcb_ptr \ page_offs_range shared_page_ptr_virt" + "Low_tcb_ptr \ page_offs_range shared_page_ptr_virt" + "High_pool_ptr \ page_offs_range shared_page_ptr_virt" + "Low_pool_ptr \ page_offs_range shared_page_ptr_virt" + "irq_cnode_ptr \ page_offs_range shared_page_ptr_virt" + "ntfn_ptr \ page_offs_range shared_page_ptr_virt" by (auto simp: tcb_offs_range_def pt_offs_range_def page_offs_range_def cnode_offs_range_def kh0H_obj_def s0_ptr_defs) @@ -899,7 +907,7 @@ lemma kh0H_dom_sets_distinct: "irq_node_offs_range \ tcb_offs_range High_tcb_ptr = {}" "irq_node_offs_range \ tcb_offs_range Low_tcb_ptr = {}" "irq_node_offs_range \ tcb_offs_range idle_tcb_ptr = {}" - "irq_node_offs_range \ page_offs_range shared_page_ptr = {}" + "irq_node_offs_range \ page_offs_range shared_page_ptr_virt = {}" "cnode_offs_range Silc_cnode_ptr \ cnode_offs_range High_cnode_ptr = {}" "cnode_offs_range Silc_cnode_ptr \ cnode_offs_range Low_cnode_ptr = {}" "cnode_offs_range Silc_cnode_ptr \ pt_offs_range riscv_global_pt_ptr = {}" @@ -910,7 +918,7 @@ lemma kh0H_dom_sets_distinct: "cnode_offs_range Silc_cnode_ptr \ tcb_offs_range High_tcb_ptr = {}" "cnode_offs_range Silc_cnode_ptr \ tcb_offs_range Low_tcb_ptr = {}" "cnode_offs_range Silc_cnode_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "cnode_offs_range Silc_cnode_ptr \ page_offs_range shared_page_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ page_offs_range shared_page_ptr_virt = {}" "cnode_offs_range High_cnode_ptr \ cnode_offs_range Low_cnode_ptr = {}" "cnode_offs_range High_cnode_ptr \ pt_offs_range riscv_global_pt_ptr = {}" "cnode_offs_range High_cnode_ptr \ pt_offs_range High_pd_ptr = {}" @@ -920,7 +928,7 @@ lemma kh0H_dom_sets_distinct: "cnode_offs_range High_cnode_ptr \ tcb_offs_range High_tcb_ptr = {}" "cnode_offs_range High_cnode_ptr \ tcb_offs_range Low_tcb_ptr = {}" "cnode_offs_range High_cnode_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "cnode_offs_range High_cnode_ptr \ page_offs_range shared_page_ptr = {}" + "cnode_offs_range High_cnode_ptr \ page_offs_range shared_page_ptr_virt = {}" "cnode_offs_range Low_cnode_ptr \ pt_offs_range riscv_global_pt_ptr = {}" "cnode_offs_range Low_cnode_ptr \ pt_offs_range High_pd_ptr = {}" "cnode_offs_range Low_cnode_ptr \ pt_offs_range Low_pd_ptr = {}" @@ -929,7 +937,7 @@ lemma kh0H_dom_sets_distinct: "cnode_offs_range Low_cnode_ptr \ tcb_offs_range High_tcb_ptr = {}" "cnode_offs_range Low_cnode_ptr \ tcb_offs_range Low_tcb_ptr = {}" "cnode_offs_range Low_cnode_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "cnode_offs_range Low_cnode_ptr \ page_offs_range shared_page_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ page_offs_range shared_page_ptr_virt = {}" "pt_offs_range riscv_global_pt_ptr \ pt_offs_range High_pd_ptr = {}" "pt_offs_range riscv_global_pt_ptr \ pt_offs_range Low_pd_ptr = {}" "pt_offs_range riscv_global_pt_ptr \ pt_offs_range High_pt_ptr = {}" @@ -937,35 +945,35 @@ lemma kh0H_dom_sets_distinct: "pt_offs_range riscv_global_pt_ptr \ tcb_offs_range High_tcb_ptr = {}" "pt_offs_range riscv_global_pt_ptr \ tcb_offs_range Low_tcb_ptr = {}" "pt_offs_range riscv_global_pt_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "pt_offs_range riscv_global_pt_ptr \ page_offs_range shared_page_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ page_offs_range shared_page_ptr_virt = {}" "pt_offs_range High_pd_ptr \ pt_offs_range Low_pd_ptr = {}" "pt_offs_range High_pd_ptr \ pt_offs_range High_pt_ptr = {}" "pt_offs_range High_pd_ptr \ pt_offs_range Low_pt_ptr = {}" "pt_offs_range High_pd_ptr \ tcb_offs_range High_tcb_ptr = {}" "pt_offs_range High_pd_ptr \ tcb_offs_range Low_tcb_ptr = {}" "pt_offs_range High_pd_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "pt_offs_range High_pd_ptr \ page_offs_range shared_page_ptr = {}" + "pt_offs_range High_pd_ptr \ page_offs_range shared_page_ptr_virt = {}" "pt_offs_range Low_pd_ptr \ pt_offs_range High_pt_ptr = {}" "pt_offs_range Low_pd_ptr \ pt_offs_range Low_pt_ptr = {}" "pt_offs_range Low_pd_ptr \ tcb_offs_range High_tcb_ptr = {}" "pt_offs_range Low_pd_ptr \ tcb_offs_range Low_tcb_ptr = {}" "pt_offs_range Low_pd_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "pt_offs_range Low_pd_ptr \ page_offs_range shared_page_ptr = {}" + "pt_offs_range Low_pd_ptr \ page_offs_range shared_page_ptr_virt = {}" "pt_offs_range High_pt_ptr \ pt_offs_range Low_pt_ptr = {}" "pt_offs_range High_pt_ptr \ tcb_offs_range High_tcb_ptr = {}" "pt_offs_range High_pt_ptr \ tcb_offs_range Low_tcb_ptr = {}" "pt_offs_range High_pt_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "pt_offs_range High_pt_ptr \ page_offs_range shared_page_ptr = {}" + "pt_offs_range High_pt_ptr \ page_offs_range shared_page_ptr_virt = {}" "pt_offs_range Low_pt_ptr \ tcb_offs_range High_tcb_ptr = {}" "pt_offs_range Low_pt_ptr \ tcb_offs_range Low_tcb_ptr = {}" "pt_offs_range Low_pt_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "pt_offs_range Low_pt_ptr \ page_offs_range shared_page_ptr = {}" + "pt_offs_range Low_pt_ptr \ page_offs_range shared_page_ptr_virt = {}" "tcb_offs_range High_tcb_ptr \ tcb_offs_range Low_tcb_ptr = {}" "tcb_offs_range High_tcb_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "tcb_offs_range High_tcb_ptr \ page_offs_range shared_page_ptr = {}" + "tcb_offs_range High_tcb_ptr \ page_offs_range shared_page_ptr_virt = {}" "tcb_offs_range Low_tcb_ptr \ tcb_offs_range idle_tcb_ptr = {}" - "tcb_offs_range Low_tcb_ptr \ page_offs_range shared_page_ptr = {}" - "page_offs_range shared_page_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "tcb_offs_range Low_tcb_ptr \ page_offs_range shared_page_ptr_virt = {}" + "page_offs_range shared_page_ptr_virt \ tcb_offs_range idle_tcb_ptr = {}" by (rule disjointI, clarsimp simp: tcb_offs_range_def pt_offs_range_def page_offs_range_def irq_node_offs_range_def cnode_offs_range_def s0_ptr_defs , drule (1) order_trans le_less_trans, fastforce)+ @@ -1036,13 +1044,13 @@ lemma kh0H_dom_distinct': "riscv_global_pt_ptr + (ucast y << 3) \ Low_pool_ptr" "riscv_global_pt_ptr + (ucast y << 3) \ irq_cnode_ptr" "riscv_global_pt_ptr + (ucast y << 3) \ ntfn_ptr" - "shared_page_ptr + (ucast y << 12) \ idle_tcb_ptr" - "shared_page_ptr + (ucast y << 12) \ High_tcb_ptr" - "shared_page_ptr + (ucast y << 12) \ Low_tcb_ptr" - "shared_page_ptr + (ucast y << 12) \ High_pool_ptr" - "shared_page_ptr + (ucast y << 12) \ Low_pool_ptr" - "shared_page_ptr + (ucast y << 12) \ irq_cnode_ptr" - "shared_page_ptr + (ucast y << 12) \ ntfn_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ idle_tcb_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ High_tcb_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ Low_tcb_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ High_pool_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ Low_pool_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ irq_cnode_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ ntfn_ptr" apply (drule offs_in_range, fastforce simp: kh0H_dom_distinct)+ apply (cut_tac x=y in offs_in_range(1), fastforce simp: kh0H_dom_distinct)+ apply (cut_tac x=y in offs_in_range(2), fastforce simp: kh0H_dom_distinct)+ @@ -1057,7 +1065,7 @@ lemma not_disjointI: by fastforce lemma shared_pageH_KOUserData[simp]: - "shared_pageH shared_page_ptr (shared_page_ptr + (UCAST(9 \ 64) y << 12)) = Some KOUserData" + "shared_pageH shared_page_ptr_virt (shared_page_ptr_virt + (UCAST(9 \ 64) y << 12)) = Some KOUserData" apply (clarsimp simp: shared_pageH_def page_offs_min page_offs_max add.commute) apply (cut_tac shared_page_ptr_is_aligned) apply (clarsimp simp: is_aligned_mask mask_def s0_ptr_defs bit_simps) @@ -1083,7 +1091,7 @@ lemma kh0H_simps[simp]: "kh0H (Low_pt_ptr + (ucast y << 3)) = Low_ptH Low_pt_ptr (Low_pt_ptr + (ucast y << 3))" "kh0H (High_pt_ptr + (ucast y << 3)) = High_ptH High_pt_ptr (High_pt_ptr + (ucast y << 3))" "kh0H (riscv_global_pt_ptr + (ucast y << 3)) = global_ptH riscv_global_pt_ptr (riscv_global_pt_ptr + (ucast y << 3))" - "kh0H (shared_page_ptr + (ucast y << 12)) = Some KOUserData" + "kh0H (shared_page_ptr_virt + (ucast y << 12)) = Some KOUserData" supply option.case_cong[cong] apply (fastforce simp: kh0H_def option_update_range_def) by ((clarsimp simp: kh0H_def kh0H_dom_distinct kh0H_dom_distinct' @@ -1100,7 +1108,7 @@ lemma kh0H_dom: "dom kh0H = {idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr, High_pool_ptr, Low_pool_ptr, irq_cnode_ptr, ntfn_ptr} \ irq_node_offs_range \ - page_offs_range shared_page_ptr \ + page_offs_range shared_page_ptr_virt \ cnode_offs_range Silc_cnode_ptr \ cnode_offs_range High_cnode_ptr \ cnode_offs_range Low_cnode_ptr \ @@ -1144,7 +1152,7 @@ lemma kh0H_SomeD: x \ cnode_offs_range High_cnode_ptr \ High_cte High_cnode_ptr x \ None \ y = the (High_cte High_cnode_ptr x) \ x \ cnode_offs_range Silc_cnode_ptr \ Silc_cte Silc_cnode_ptr x \ None \ y = the (Silc_cte Silc_cnode_ptr x) \ x = irq_cnode_ptr \ y = KOCTE irq_cte \ - x \ page_offs_range shared_page_ptr \ y = KOUserData" + x \ page_offs_range shared_page_ptr_virt \ y = KOUserData" apply (frule kh0H_SomeD') apply (elim disjE) by ((clarsimp | drule offs_range_correct)+) @@ -1160,7 +1168,7 @@ definition arch_state0H :: Arch.kernel_state where definition s0H_internal :: "kernel_state" where "s0H_internal \ \ ksPSpace = kh0H, - gsUserPages = [shared_page_ptr \ RISCVLargePage], + gsUserPages = [shared_page_ptr_virt \ RISCVLargePage], gsCNodes = (\x. if \irq :: irq. init_irq_node_ptr + (ucast irq << 5) = x then Some 0 else None) (Low_cnode_ptr \ 10, @@ -1173,7 +1181,7 @@ definition s0H_internal :: "kernel_state" where ksDomSchedule = [(0, 10), (1, 10)], ksCurDomain = 0, ksDomainTime = 5, - ksReadyQueues = const [], + ksReadyQueues = const (TcbQueue None None), ksReadyQueuesL1Bitmap = const 0, ksReadyQueuesL2Bitmap = const 0, ksCurThread = Low_tcb_ptr, @@ -2342,9 +2350,9 @@ lemma valid_caps_s0H[simp]: "valid_cap' (CNodeCap Low_cnode_ptr 10 2 10) s0H_internal" "valid_cap' (CNodeCap High_cnode_ptr 10 2 10) s0H_internal" "valid_cap' (CNodeCap Silc_cnode_ptr 10 2 10) s0H_internal" - "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr VMReadWrite RISCVLargePage False (Some (ucast Low_asid, 0)))) s0H_internal" - "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr VMReadOnly RISCVLargePage False (Some (ucast High_asid, 0)))) s0H_internal" - "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr VMReadOnly RISCVLargePage False (Some (ucast Silc_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadWrite RISCVLargePage False (Some (ucast Low_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast High_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast Silc_asid, 0)))) s0H_internal" "valid_cap' (ArchObjectCap (PageTableCap Low_pt_ptr (Some (ucast Low_asid, 0)))) s0H_internal" "valid_cap' (ArchObjectCap (PageTableCap High_pt_ptr (Some (ucast High_asid, 0)))) s0H_internal" "valid_cap' (ArchObjectCap (PageTableCap Low_pd_ptr (Some (ucast Low_asid, 0)))) s0H_internal" @@ -2644,7 +2652,7 @@ lemma map_to_ctes_kh0H_simps'[simp]: "map_to_ctes kh0H (Low_cnode_ptr + 0x60) = Some (CTE (ArchObjectCap (PageTableCap Low_pd_ptr (Some (ucast Low_asid, 0)))) (MDB 0 (Low_tcb_ptr + 0x20) False False))" "map_to_ctes kh0H (Low_cnode_ptr + 0x80) = Some (CTE (ArchObjectCap (ASIDPoolCap Low_pool_ptr (ucast Low_asid))) Null_mdb)" - "map_to_ctes kh0H (Low_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr VMReadWrite RISCVLargePage + "map_to_ctes kh0H (Low_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadWrite RISCVLargePage False (Some (ucast Low_asid, 0)))) (MDB 0 (Silc_cnode_ptr + 0xA0) False False))" "map_to_ctes kh0H (Low_cnode_ptr + 0xC0) = Some (CTE (ArchObjectCap (PageTableCap Low_pt_ptr (Some (ucast Low_asid, 0)))) Null_mdb)" @@ -2655,14 +2663,14 @@ lemma map_to_ctes_kh0H_simps'[simp]: "map_to_ctes kh0H (High_cnode_ptr + 0x60) = Some (CTE (ArchObjectCap (PageTableCap High_pd_ptr (Some (ucast High_asid, 0)))) (MDB 0 (High_tcb_ptr + 0x20) False False))" "map_to_ctes kh0H (High_cnode_ptr + 0x80) = Some (CTE (ArchObjectCap (ASIDPoolCap High_pool_ptr (ucast High_asid))) Null_mdb)" - "map_to_ctes kh0H (High_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr VMReadOnly RISCVLargePage + "map_to_ctes kh0H (High_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast High_asid, 0)))) (MDB (Silc_cnode_ptr + 0xA0) 0 False False))" "map_to_ctes kh0H (High_cnode_ptr + 0xC0) = Some (CTE (ArchObjectCap (PageTableCap High_pt_ptr (Some (ucast High_asid, 0)))) Null_mdb)" "map_to_ctes kh0H (High_cnode_ptr + 0x27C0) = Some (CTE (NotificationCap ntfn_ptr 0 False True) (MDB 0 (Silc_cnode_ptr + 0x27C0) False False))" "map_to_ctes kh0H (Silc_cnode_ptr + 0x40) = Some (CTE (CNodeCap Silc_cnode_ptr 10 2 10) Null_mdb)" - "map_to_ctes kh0H (Silc_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr VMReadOnly RISCVLargePage + "map_to_ctes kh0H (Silc_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast Silc_asid, 0)))) (MDB (Low_cnode_ptr + 0xA0) (High_cnode_ptr + 0xA0) False False))" "map_to_ctes kh0H (Silc_cnode_ptr + 0x27C0) = Some (CTE (NotificationCap ntfn_ptr 0 True False) @@ -3224,7 +3232,7 @@ lemma valid_arch_state_s0H: apply (intro conjI) apply (clarsimp simp: valid_asid_table'_def s0H_internal_def arch_state0H_def asid_bits_defs asid_high_bits_of_def Low_asid_def High_asid_def mask_def s0_ptr_defs) - apply (clarsimp simp: valid_global_pts'_def riscv_global_pt_is_aligned[simplified bit_simps] + apply (clarsimp simp: valid_global_pts'_def is_aligned_riscv_global_pt_ptr[simplified bit_simps] arch_state0H_def page_table_at'_def typ_at'_def ko_wp_at'_def bit_simps global_ptH_def pt_offs_min) apply (subst objBitsKO_def) @@ -3251,8 +3259,6 @@ lemma s0H_invs: s0H_internal_def s0_ptrs_aligned objBitsKO_def Low_tcbH_def) apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) apply (simp add: objBitsKO_def) - apply (rule conjI) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs s0H_internal_def) apply (rule conjI) apply (clarsimp simp: sym_refs_def state_refs_of'_def refs_of'_def split: option.splits) apply (frule kh0H_SomeD) @@ -3419,9 +3425,16 @@ lemma s0H_invs: apply (rule conjI) apply (clarsimp simp: irqs_masked'_def s0H_internal_def maxIRQ_def timer_irq_def irqInvalid_def) apply (rule conjI) - apply (clarsimp simp: valid_queues'_def obj_at'_def s0H_internal_def inQ_def) - apply (frule kh0H_dom_tcb) - apply (elim disjE, (clarsimp simp: kh0H_obj_def)+)[1] + apply (clarsimp simp: sym_heap_def opt_map_def projectKOs split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) + apply (rule conjI) + apply (clarsimp simp: valid_sched_pointers_def opt_map_def projectKOs split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) + apply (rule conjI) + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def s0H_internal_def + tcbQueueEmpty_def bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def) apply (rule conjI) apply (clarsimp simp: ct_not_inQ_def obj_at'_def objBitsKO_def s0H_internal_def s0_ptrs_aligned Low_tcbH_def) @@ -3446,7 +3459,7 @@ lemma kh0_pspace_dom: "pspace_dom kh0 = {idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr, High_pool_ptr, Low_pool_ptr, irq_cnode_ptr, ntfn_ptr} \ irq_node_offs_range \ - page_offs_range shared_page_ptr \ + page_offs_range shared_page_ptr_virt \ cnode_offs_range Silc_cnode_ptr \ cnode_offs_range High_cnode_ptr \ cnode_offs_range Low_cnode_ptr \ @@ -3492,7 +3505,7 @@ lemma kh0_pspace_dom: apply (force simp: kh0_def kh0_obj_def image_def cte_map_def') apply (rule conjI) apply clarsimp - apply (rule_tac x=shared_page_ptr in exI) + apply (rule_tac x=shared_page_ptr_virt in exI) apply (drule offs_range_correct) apply (clarsimp simp: kh0_def kh0_obj_def image_def s0_ptr_defs cte_map_def' dom_caps bit_simps) apply (rule_tac x="UCAST (9 \ 64) y" in exI) @@ -3594,16 +3607,17 @@ lemma s0_pspace_rel: apply (clarsimp simp: kh0_obj_def bit_simps dest!: less_0x200_exists_ucast) defer apply ((clarsimp simp: kh0_obj_def kh0H_obj_def bit_simps word_bits_def - other_obj_relation_def fault_rel_optionation_def + fault_rel_optionation_def tcb_relation_cut_def tcb_relation_def arch_tcb_relation_def the_nat_to_bl_simps split del: if_split)+)[3] prefer 13 apply ((clarsimp simp: kh0_obj_def kh0H_all_obj_def bit_simps add.commute pt_offs_max pt_offs_min pte_relation_def split del: if_split, - clarsimp simp: s0_ptr_defs addrFromPPtr_def pptrBaseOffset_def paddrBase_def + clarsimp simp: s0_ptr_defs shared_page_ptr_phys_def addrFromPPtr_def pptrBaseOffset_def paddrBase_def vmrights_map_def vm_read_only_def vm_read_write_def - kh0_obj_def kh0H_all_obj_def bit_simps mask_def)+)[5] + kh0_obj_def kh0H_all_obj_def elf_index_value, + (clarsimp simp: bit_simps mask_def)?)+)[5] apply (clarsimp simp: kh0_obj_def kh0H_obj_def well_formed_cnode_n_def cte_relation_def cte_map_def bit_simps) apply (clarsimp simp: kh0H_obj_def bit_simps ntfn_def other_obj_relation_def ntfn_relation_def) @@ -3654,7 +3668,14 @@ lemma s0_srel: High_etcb_def Low_etcb_def default_etcb_def split: if_split_asm) apply (simp add: s0_internal_def exst0_def s0H_internal_def sched_act_relation_def) - apply (simp add: s0_internal_def exst0_def s0H_internal_def ready_queues_relation_def) + apply (clarsimp simp: s0_internal_def exst0_def s0H_internal_def + ready_queues_relation_def ready_queue_relation_def + list_queue_relation_def queue_end_valid_def + prev_queue_head_def inQ_def tcbQueueEmpty_def + projectKOs opt_map_def opt_pred_def + split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) apply (clarsimp simp: s0_internal_def exst0_def s0H_internal_def ghost_relation_def) apply (rule conjI) apply (fastforce simp: kh0_def kh0_obj_def dest: kh0_SomeD) diff --git a/proof/invariant-abstract/AARCH64/ArchADT_AI.thy b/proof/invariant-abstract/AARCH64/ArchADT_AI.thy index 6d2064914b..c0376b854c 100644 --- a/proof/invariant-abstract/AARCH64/ArchADT_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchADT_AI.thy @@ -102,7 +102,7 @@ definition (machine_word \ nat \ vm_attributes \ vm_rights)" where "get_page_info aobjs pt_ref vptr \ (do { - oassert (canonical_address vptr); + oassert (vptr \ user_region); (level, slot) \ pt_lookup_slot pt_ref vptr; pte \ oapply2 (level_type level) slot; K $ pte_info level pte @@ -123,8 +123,8 @@ definition ptable_rights :: "obj_ref \ 'z::state_ext state \ canonical_address vptr" +lemma ptable_lift_Some_user_regionD: + "ptable_lift t s vptr = Some p \ vptr \ user_region" by (clarsimp simp: ptable_lift_def get_page_info_def split: if_splits option.splits) diff --git a/proof/invariant-abstract/AARCH64/ArchAInvsPre.thy b/proof/invariant-abstract/AARCH64/ArchAInvsPre.thy index 2da00ad972..5f77c0be35 100644 --- a/proof/invariant-abstract/AARCH64/ArchAInvsPre.thy +++ b/proof/invariant-abstract/AARCH64/ArchAInvsPre.thy @@ -19,13 +19,12 @@ lemma ucast_ucast_mask_low: "(ucast (x && mask asid_low_bits) :: asid_low_index) by (rule ucast_mask_drop, simp add: asid_low_bits_def) lemma ptes_of_idx: - "\ ptes_of s (pt_type pt) (pt_slot_offset level pt_ptr p) = Some pte; - pts_of s pt_ptr = Some pt; pspace_aligned s \ \ + "\ ptes_of s (level_type level) (pt_slot_offset level pt_ptr p) = Some pte; + pts_of s pt_ptr = Some pt; pt_type pt = level_type level; pspace_aligned s \ \ \idx. pt_apply pt idx = pte" apply (drule_tac pt_ptr=pt_ptr in pspace_aligned_pts_ofD, simp) - sorry (* FIXME AARCH64 apply (fastforce simp: level_pte_of_def in_omonad) - done *) + done lemma pte_info_not_InvalidPTE: "pte_info level pte = Some (b, a, attr, r) \ pte \ InvalidPTE" @@ -43,51 +42,31 @@ lemma data_at_aligned: "\ data_at sz p s; pspace_aligned s\ \ is_aligned p (pageBitsForSize sz)" unfolding data_at_def by (auto simp: obj_at_def dest: pspace_alignedD) -lemma is_aligned_ptrFromPAddr_n_eq: (* FIXME AARCH64: might need a different precondition *) - "sz \ canonical_bit \ is_aligned (ptrFromPAddr x) sz = is_aligned x sz" - apply (rule iffI) - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def paddrBase_def canonical_bit_def) - apply (drule is_aligned_addD2) - apply (erule is_aligned_weaken[rotated]) - apply (simp add: is_aligned_def) - sorry (* FIXME AARCH64 - apply assumption - apply (erule (1) is_aligned_ptrFromPAddr_n) - done *) - lemma some_get_page_info_umapsD: - "\get_page_info (aobjs_of s) pt_ref p = Some (b, a, attr, r); - \\ (max_pt_level, pt_ref) s; p \ user_region; valid_vspace_objs s; pspace_aligned s; - canonical_address p; + "\get_page_info (aobjs_of s) pt_ref vptr = Some (b, a, attr, r); + \\ (max_pt_level, pt_ref) s; vptr \ user_region; valid_vspace_objs s; pspace_aligned s; valid_asid_table s; valid_objs s\ \ \sz. pageBitsForSize sz = a \ is_aligned b a \ data_at sz (ptrFromPAddr b) s" apply (clarsimp simp: get_page_info_def vs_lookup_table_def) - sorry (* FIXME AARCH64 apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def) apply (frule pt_walk_max_level) apply (drule pt_walk_level) apply (rename_tac pte asid pool_ptr vref level pt_ptr') - apply (subgoal_tac "vs_lookup_table level asid p s = Some (level, pt_ptr')") - prefer 2 + apply (prop_tac "vs_lookup_table level asid vptr s = Some (level, pt_ptr')") apply (clarsimp simp: vs_lookup_table_def in_omonad) - apply (drule (2) valid_vspace_objs_strongD; assumption?) - apply (clarsimp simp: pte_info_def split: pte.splits) - apply (rename_tac ppn pt) - apply (frule pspace_aligned_pts_ofD, fastforce) - apply (drule_tac x="table_index (pt_slot_offset level pt_ptr' p)" in bspec) - apply (clarsimp simp: table_index_offset_pt_bits_left simp: kernel_mappings_slots_eq) - apply (erule (1) no_user_region_kernel_mappings) - apply (clarsimp simp: pte_of_def) - apply (subgoal_tac "valid_pte level (PagePTE ppn attr r) s") - prefer 2 - apply simp + apply (drule (2) valid_vspace_objs_strongD; assumption?) + apply (clarsimp simp: pte_info_def split: pte.splits) + apply (rename_tac pt ppn) + apply (frule pspace_aligned_pts_ofD, fastforce) + apply (drule_tac x="PagePTE b ppn attr r" in bspec) + apply (erule (1) pt_slot_offset_pt_range, simp) apply (subst (asm) valid_pte.simps) apply clarsimp - apply (rule_tac x="vmpage_size_of_level level" in exI) + apply (rule_tac x="vmsize_of_level level" in exI) apply (clarsimp simp: obj_at_def) apply (drule (1) data_at_aligned) - apply (simp add: pt_bits_left_le_canonical is_aligned_ptrFromPAddr_n_eq) - done *) + apply (simp add: is_aligned_ptrFromPAddr_n_eq) + done lemma user_mem_dom_cong: "kheap s = kheap s' \ dom (user_mem s) = dom (user_mem s')" @@ -111,31 +90,38 @@ lemma get_vspace_of_thread_asid_or_global_pt: by (auto simp: get_vspace_of_thread_def split: option.split kernel_object.split cap.split arch_cap.split pt_type.splits) +lemma get_page_info_gpd_kmaps: + "\valid_global_objs s; valid_arch_state s; pspace_aligned s; + get_page_info (aobjs_of s) (global_pt s) vptr = Some (b, a, attr, r)\ + \ False" + apply (clarsimp simp: get_page_info_def in_omonad pt_lookup_slot_def pt_lookup_slot_from_level_def) + apply (clarsimp simp: valid_arch_state_def valid_global_tables_def) + apply (subst (asm) pt_walk.simps) + apply (drule (1) pspace_aligned_pts_ofD) + apply (clarsimp simp: ptes_of_def in_omonad pte_info_def + table_base_pt_slot_offset[where level=max_pt_level, simplified]) + done + lemma ptable_rights_imp_frame[AInvsPre_asms]: assumes "valid_state s" - shows "\ ptable_rights t s x \ {}; ptable_lift t s x = Some (addrFromPPtr y) \ \ - in_user_frame y s \ in_device_frame y s" - apply (rule ccontr, frule ptable_lift_Some_canonical_addressD) + shows "\ ptable_rights t s vptr \ {}; ptable_lift t s vptr = Some (addrFromPPtr p) \ \ + in_user_frame p s \ in_device_frame p s" + apply (rule ccontr, frule ptable_lift_Some_user_regionD) using assms get_vspace_of_thread_asid_or_global_pt[of s t] apply (clarsimp simp: ptable_lift_def ptable_rights_def in_user_frame_def in_device_frame_def split: option.splits) - sorry (* FIXME AARCH64 - apply (case_tac "x \ kernel_mappings") - apply (frule (2) some_get_page_info_kmapsD; - fastforce simp: valid_state_def valid_arch_state_def valid_pspace_def) apply (frule some_get_page_info_umapsD) apply (rule get_vspace_of_thread_reachable) apply clarsimp - apply (frule get_page_info_gpd_kmaps[rotated 2]) + apply (frule get_page_info_gpd_kmaps[rotated 3]) apply (simp_all add: valid_state_def valid_pspace_def valid_arch_state_def) - apply (clarsimp simp: data_at_def canonical_not_kernel_is_user) apply clarsimp apply (drule_tac x=sz in spec)+ apply (rename_tac p_addr attr rghts sz) - apply (frule is_aligned_add_helper[OF _ and_mask_less', THEN conjunct2, of _ _ x]) + apply (frule is_aligned_add_helper[OF _ and_mask_less', THEN conjunct2, of _ _ vptr]) apply (simp only: pbfs_less_wb'[simplified word_bits_def]) apply (clarsimp simp: data_at_def ptrFromPAddr_def addrFromPPtr_def field_simps) - apply (subgoal_tac "p_addr + (pptrBaseOffset + (x && mask (pageBitsForSize sz))) + apply (subgoal_tac "p_addr + (pptrBaseOffset + (vptr && mask (pageBitsForSize sz))) && ~~ mask (pageBitsForSize sz) = p_addr + pptrBaseOffset") apply simp apply (subst add.assoc[symmetric]) @@ -147,15 +133,15 @@ lemma ptable_rights_imp_frame[AInvsPre_asms]: apply (rule and_mask_less') apply (case_tac sz; simp add: bit_simps) apply simp - done *) + done end interpretation AInvsPre?: AInvsPre - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (intro_locales; (unfold_locales; fact AInvsPre_asms)?) - qed +qed requalify_facts AARCH64.user_mem_dom_cong diff --git a/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy b/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy index b6469e693c..d87ce20c4c 100644 --- a/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy @@ -42,6 +42,10 @@ lemma invs_valid_asid_table [elim!]: "invs s \ valid_asid_table s" by (simp add: invs_def valid_state_def valid_arch_state_def) +lemma vspace_objs_of_arch_valid_obj: + "\ vspace_objs_of s p = Some ao; valid_objs s \ \ arch_valid_obj ao s" + by (fastforce simp: valid_obj_arch_valid_obj in_omonad vspace_obj_of_Some) + lemmas pt_upd_simps[simp] = pt_upd_def[split_simps pt.split] lemma pt_range_upd: @@ -61,20 +65,14 @@ lemma vs_lookup_table_target: apply (simp add: vs_lookup_target_def vs_lookup_slot_def vs_lookup_table_def obind_assoc) apply (subgoal_tac "level \ asid_pool_level"; clarsimp) apply (cases "level = max_pt_level", clarsimp simp: max_pt_level_plus_one in_omonad) - apply (subgoal_tac "level + 1 \ asid_pool_level") - prefer 2 + apply (prop_tac "level + 1 \ asid_pool_level") apply (metis max_pt_level_plus_one add.right_cancel) - apply (clarsimp simp: obind_assoc simp del: asid_pool_level_neq) - (* FIXME AARCH64: having plus_one_eq_asid_pool in [simp] destroys automation in the next line. - Instead of removing it, find additional simp rule to put into the set? Conversion between - asid_pool_level and max_pt_level should be automatic. *) - apply (subst (asm) pt_walk_split_Some[where level'="level + 1"]; simp add: less_imp_le) + apply (clarsimp simp: obind_assoc asid_pool_level_eq) + apply (subst (asm) pt_walk_split_Some[where level'="level + 1"], simp add: less_imp_le, simp) apply (subst (asm) (2) pt_walk.simps) - apply (subgoal_tac "level + 1 \ asid_pool_level") - prefer 2 - apply (metis max_pt_level_plus_one add.right_cancel) - apply (clarsimp simp: in_omonad simp del: asid_pool_level_neq cong: conj_cong) + apply (clarsimp simp: in_omonad cong: conj_cong) apply (rule_tac x="level + 1" in exI) + apply simp apply (subst pt_walk_vref_for_level; simp add: less_imp_le) apply (clarsimp simp: is_PageTablePTE_def pptr_from_pte_def split: if_split_asm) done @@ -202,17 +200,6 @@ lemma unique_vs_lookup_table: apply (drule table_cap_ref_vs_cap_ref; simp) done -(* FIXME AARCH64: move *) -lemma level_type_less_max_pt_level: - "level < max_pt_level \ level_type level = NormalPT_T" - by (clarsimp simp: level_type_def) - -(* FIXME AARCH64: move *) -(* Depending on config, ptTranslationBits NormalPT_T might be equal to ptTranslationBits VSRootPT_T *) -lemma ptTranslationBits_NormalPT_T_leq: - "ptTranslationBits NormalPT_T \ ptTranslationBits VSRootPT_T" - by (simp add: bit_simps) - lemma vref_for_level_pt_index_idem: assumes "level' \ max_pt_level" and "level'' \ level'" and "level < max_pt_level" shows "vref_for_level @@ -274,9 +261,9 @@ lemma pt_walk_loop_last_level_ptpte_helper_induct: ptes = Some (level' - 1, pptr_from_pte pte)") apply (drule meta_spec, drule meta_spec, drule meta_spec, drule (1) meta_mp, drule meta_mp) - apply (simp add: bit1.minus_one_leq_less) (* FIXME AARCH64: bit1 *) + apply (simp add: vm_level.minus_one_leq_less) apply (drule meta_mp) - apply (simp add: bit1.minus_one_leq_less bit1.neq_0_conv pt_walk_max_level) (* FIXME AARCH64: bit1 *) + apply (simp add: vm_level.minus_one_leq_less vm_level.neq_0_conv pt_walk_max_level) apply (clarsimp simp: pptr_from_pte_aligned_pt_bits) apply (subst pt_walk.simps) apply (clarsimp simp: in_omonad) @@ -293,7 +280,7 @@ lemma pt_walk_loop_last_level_ptpte_helper_induct: apply (drule_tac level'="level'+1" in vref_for_level_eq_mono) apply (fastforce intro: vref_for_level_pt_index_idem) apply (fastforce intro: vref_for_level_pt_index_idem) - apply (erule bit1.plus_one_leq) + apply (erule vm_level.plus_one_leq) apply simp apply (rule conjI, blast) apply (drule_tac level'="level'+1" in vref_for_level_eq_mono @@ -302,7 +289,7 @@ lemma pt_walk_loop_last_level_ptpte_helper_induct: apply (rule_tac pt_walk_split_Some[where level'="level" and level="level - 1" for level, THEN iffD2]) apply (fastforce dest!: vm_level_not_less_zero intro: less_imp_le) - apply (meson bit1.leq_minus1_less bit1.not_less_zero_bit0 le_less less_linear less_trans) + apply (meson vm_level.leq_minus1_less vm_level.not_less_zero_bit0 le_less less_linear less_trans) apply (subgoal_tac "pt_walk (level - 1) level' (pptr_from_pte pte) (vref_for_level vref (level' + 1) || (pt_index level vref << pt_bits_left level')) @@ -310,7 +297,7 @@ lemma pt_walk_loop_last_level_ptpte_helper_induct: prefer 2 apply (rule pt_walk_vref_for_level_eq) apply (subst vref_for_level_pt_index_idem, simp+) - apply (meson bit1.leq_minus1_less bit1.not_less_zero_bit0 le_less less_linear less_trans) + apply (meson vm_level.leq_minus1_less vm_level.not_less_zero_bit0 le_less less_linear less_trans) apply clarsimp apply (subst pt_walk.simps) apply clarsimp @@ -427,17 +414,6 @@ lemma pt_walk_same_for_different_levels: apply clarsimp done -(* FIXME AARCH64: move *) -lemma is_aligned_pt_bits_pte_bits: - "is_aligned p (pt_bits pt_t) \ is_aligned p pte_bits" - by (simp add: bit_simps is_aligned_weaken split: if_splits) - -(* FIXME AARCH64: move *) -lemma pts_of_ptes_of: - "\ pts_of s p = Some pt; is_aligned p (pt_bits (pt_type pt)) \ \ - \pte. ptes_of s (pt_type pt) p = Some pte" - by (clarsimp simp: ptes_of_Some is_aligned_pt_bits_pte_bits) - lemma vs_lookup_table_same_for_different_levels: "\ vs_lookup_table level asid vref s = Some (level, p); vs_lookup_table level' asid vref' s = Some (level', p); @@ -672,19 +648,19 @@ lemma set_pt_pred_tcb_at[wp]: unfolding set_pt_def set_object_def by (wpsimp wp: get_object_wp simp: pred_tcb_at_def obj_at_def) -lemma set_asid_pool_pred_tcb_at[wp]: - "set_asid_pool ptr val \pred_tcb_at proj P t\" +lemma set_asid_pool_pred_tcb_atP[wp]: + "set_asid_pool ptr val \\s. P (pred_tcb_at proj Q t s)\" unfolding set_asid_pool_def set_object_def by (wpsimp wp: get_object_wp simp: pred_tcb_at_def obj_at_def) -lemma mask_pt_bits_inner_beauty: (* FIXME AARCH64: rename *) +lemma table_base_index_eq: "is_aligned p pte_bits \ table_base pt_t p + (table_index pt_t p << pte_bits) = p" apply (subst word_plus_and_or_coroll, word_eqI_solve) apply word_eqI apply (subgoal_tac "p !! n \ \ pte_bits > n"; fastforce) done -lemma more_pt_inner_beauty: (* FIXME AARCH64: rename *) +lemma more_pt_inner_beauty: (* FIXME AARCH64: rename during Refine *) "\ x \ table_index pt_t p; x \ mask (ptTranslationBits pt_t); table_base pt_t p + (x << pte_bits) = p \ \ False" by (metis table_index_plus is_aligned_neg_mask2) @@ -822,6 +798,10 @@ lemma set_asid_pool_valid_asid_table[wp]: using set_asid_pool_asid_pools_of[wp del] by (wp_pre, wps, wp, clarsimp) +lemma set_asid_pool_valid_global_tables[wp]: + "set_asid_pool p ap \valid_global_tables\" + by (wp_pre, wps, wp, clarsimp) + lemma set_asid_pool_None_valid_arch: "\\s. valid_arch_state s \ asid_pools_of s p = Some ap \ @@ -837,10 +817,6 @@ lemma set_asid_pool_valid_objs [wp]: unfolding set_asid_pool_def by (wpsimp wp: set_object_valid_objs simp: valid_obj_def) -lemma invs_valid_global_arch_objs: - "invs s \ valid_global_arch_objs s" - by (clarsimp simp: invs_def valid_state_def valid_arch_state_def) - lemma is_aligned_pt: "\ pt_at pt_t pt s; pspace_aligned s \ \ is_aligned pt (pt_bits pt_t)" apply (clarsimp simp: obj_at_def) @@ -1087,11 +1063,10 @@ lemma set_pt_valid_global: \\_ s. valid_global_refs s\" by (wp valid_global_refs_cte_lift) -(* FIXME AARCH64: use vcpus_of instead *) -lemma set_pt_no_vcpu[wp]: - "\obj_at (is_vcpu and P) p'\ set_pt p pt \\_. obj_at (is_vcpu and P) p'\" +lemma set_pt_vcpus_of[wp]: + "set_pt p pt \\s. P (vcpus_of s)\" unfolding set_pt_def - by (wpsimp wp: set_object_wp_strong simp: obj_at_def is_vcpu_def a_type_def) + by (wp set_object_wp) (auto simp: opt_map_def obj_at_def elim!: rsubst[where P=P]) lemma set_pt_cur: "\\s. cur_tcb s\ @@ -1139,11 +1114,6 @@ lemma set_pt_table_caps[wp]: apply (clarsimp simp: opt_map_def fun_upd_apply split: option.splits) done -(* FIXME AARCH64: move *) -lemma pt_upd_empty_InvalidPTE[simp]: - "pt_upd (empty_pt pt_t) idx InvalidPTE = empty_pt pt_t" - by (auto simp: pt_upd_def empty_pt_def split: pt.splits) - lemma store_pte_valid_table_caps: "\ valid_table_caps and (\s. valid_caps (caps_of_state s) s) and (\s. (\slot asidopt. caps_of_state s slot = Some (ArchObjectCap (PageTableCap (table_base pt_t p) pt_t asidopt)) @@ -1169,7 +1139,7 @@ lemma set_object_caps_of_state: done lemma set_pt_aobjs_of: - "\\s. aobjs_of s p \ None \ P (aobjs_of s(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" + "\\s. aobjs_of s p \ None \ P ((aobjs_of s)(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" unfolding set_pt_def supply fun_upd_apply[simp del] by (wpsimp wp: set_object_wp) @@ -1206,13 +1176,9 @@ lemma set_pt_global_objs [wp]: crunch v_ker_map[wp]: set_pt "valid_kernel_mappings" (ignore: set_object wp: set_object_v_ker_map crunch_wps) - -lemma set_pt_asid_map [wp]: - "\valid_asid_map\ set_pt p pt \\_. valid_asid_map\" - apply (simp add: valid_asid_map_def vspace_at_asid_def) - apply (rule hoare_lift_Pf2 [where f="arch_state"]) - apply wp+ - done +lemma set_pt_asid_map[wp]: + "set_pt p pt \valid_asid_map\" + by (wp valid_asid_map_lift_strong) crunches store_pte for pred_tcb[wp]: "\s. Q (pred_tcb_at proj P t s)" @@ -1256,7 +1222,7 @@ lemma pt_walk_eqI: apply (subst pt_walk.simps) apply (prop_tac "level' < top_level") apply (fastforce dest!: pt_walk_max_level simp: le_less_trans) - apply (fastforce simp: level_pte_of_def in_omonad) + apply (fastforce simp: level_pte_of_def in_omonad if_option_eq) done lemma valid_vspace_obj_valid_pte_upd: @@ -1301,7 +1267,7 @@ lemma pt_walk_upd_idem: \ pt_walk top_level level' pt_ptr vptr (ptes_of s) = Some (level', pt_ptr') \ pt_ptr' \ obj_ref; is_aligned pt_ptr (pt_bits top_level); top_level \ max_pt_level \ - \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := kheap s(obj_ref \ ko)\)) + \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := (kheap s)(obj_ref \ ko)\)) = pt_walk top_level level pt_ptr vptr (ptes_of s)" by (rule pt_walk_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1327,7 +1293,7 @@ lemma pt_walk_pt_None_updD: lemma ptes_of_pt_None_updD: "\ level_pte_of pt_t p' ((pts_of s)(p := None)) = Some pte \ \ ptes_of s pt_t p' = Some pte" - by (clarsimp simp: opt_map_def level_pte_of_def in_omonad split: option.splits if_splits) + by (clarsimp simp: opt_map_def level_pte_of_def in_omonad if_option split: if_splits) lemma vs_lookup_table_eqI: fixes s :: "'z::state_ext state" @@ -1368,7 +1334,7 @@ lemma vs_lookup_table_upd_idem: \ vs_lookup_table level' asid vref s = Some (level', p') \ p' \ obj_ref; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1377,7 +1343,7 @@ lemma vs_lookup_table_Some_upd_idem: "\ vs_lookup_table level asid vref s = Some (level, obj_ref); vref \ user_region; pspace_aligned s; pspace_distinct s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (subst vs_lookup_table_upd_idem; simp?) (fastforce dest: no_loop_vs_lookup_table) @@ -1386,7 +1352,7 @@ lemma ex_vs_lookup_upd_idem: "\ \\ (level, p) s; pspace_aligned s; pspace_distinct s; valid_vspace_objs s; valid_asid_table s; unique_table_refs s; valid_vs_lookup s; valid_caps (caps_of_state s) s \ - \ \\ (level, p) (s\kheap := kheap s(p \ ko)\) = \\ (level, p) s" + \ \\ (level, p) (s\kheap := (kheap s)(p \ ko)\) = \\ (level, p) s" apply (rule iffI; clarsimp) apply (rule_tac x=asid in exI) apply (rule_tac x=vref in exI) @@ -1464,7 +1430,7 @@ lemma pt_lookup_target_pt_upd_eq: by (rule pt_lookup_target_pt_eqI; clarsimp) lemma kheap_pt_upd_simp[simp]: - "(kheap s(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) + "((kheap s)(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) = (kheap s |> aobj_of |> pt_of)(p \ pt)" unfolding aobj_of_def opt_map_def by (auto split: kernel_object.split) @@ -1486,24 +1452,6 @@ lemma kernel_regionsI: unfolding kernel_regions_def by auto -(* FIXME AARCH64: probably remove -lemma user_region_canonical_pptr_base: - "\ p \ user_region; canonical_address p \ \ pptr_base \ p" - using canonical_below_pptr_base_canonical_user word_le_not_less - by (auto simp add: user_region_def not_le) *) - -(* FIXME AARCH64: probably remove -lemma kernel_regions_pptr_base: - "\ p \ kernel_regions s; valid_uses s \ \ pptr_base \ p" - apply (rule user_region_canonical_pptr_base) - apply (simp add: valid_uses_def window_defs) - apply (erule_tac x=p in allE) - apply auto[1] - apply (simp add: valid_uses_def window_defs) - apply (erule_tac x=p in allE) - apply auto[1] - done *) - lemma set_pt_valid_global_vspace_mappings[wp]: "\\\ set_pt p pt \\_. valid_global_vspace_mappings\" unfolding valid_global_vspace_mappings_def by wp @@ -1562,7 +1510,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1570,7 +1518,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1595,7 +1543,7 @@ lemma load_word_offs_in_user_frame[wp]: lemma valid_machine_state_heap_updI: "\ valid_machine_state s; typ_at type p s; a_type obj = type \ - \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" by (fastforce simp: valid_machine_state_def intro: in_user_frame_same_type_upd elim: valid_machine_stateE) @@ -1716,7 +1664,7 @@ lemma set_asid_pool_valid_global [wp]: lemma vs_lookup_table_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" apply (subst vs_lookup_table_upd_idem; fastforce) done @@ -1724,14 +1672,14 @@ lemma vs_lookup_table_unreachable_upd_idem: lemma vs_lookup_table_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_unreachable_upd_idem; fastforce) lemma vs_lookup_target_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" supply fun_upd_apply[simp del] apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def obind_assoc) @@ -1766,12 +1714,12 @@ lemma vs_lookup_target_unreachable_upd_idem: lemma vs_lookup_target_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" by (rule vs_lookup_target_unreachable_upd_idem; fastforce) lemma vs_lookup_table_fun_upd_deep_idem: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ko)\) = Some (level, p'); + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ko)\) = Some (level, p'); vs_lookup_table level' asid vref s = Some (level', p); level' \ level; vref \ user_region; valid_vspace_objs s; valid_asid_table s; pspace_aligned s; pspace_distinct s \ @@ -1860,8 +1808,8 @@ lemma vs_lookup_target_pt_levelI: lemma vs_lookup_target_asid_pool_level_upd_helper: "\ graph_of ap \ graph_of ap'; kheap s p = Some (ArchObj (ASIDPool ap')); vref \ user_region; - vspace_for_pool pool_ptr asid (asid_pools_of s(p \ ap)) = Some pt_ptr; - pool_for_asid asid (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ + vspace_for_pool pool_ptr asid ((asid_pools_of s)(p \ ap)) = Some pt_ptr; + pool_for_asid asid (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ \ vs_lookup_target asid_pool_level asid vref s = Some (asid_pool_level, pt_ptr)" apply (clarsimp simp: pool_for_asid_vs_lookup vspace_for_pool_def entry_for_pool_def in_omonad) apply (clarsimp split: if_splits) @@ -1872,7 +1820,7 @@ lemma vs_lookup_target_asid_pool_level_upd_helper: done lemma vs_lookup_target_None_upd_helper: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some (level, table_ptr); ((\pa. level_pte_of (level_type level) pa ((pts_of s)(p := None))) |> pte_ref) (pt_slot_offset level table_ptr vref) @@ -1964,7 +1912,7 @@ lemma set_asid_pool_equal_mappings[wp]: lemma translate_address_asid_pool_upd: "pts_of s p = None \ translate_address pt_ptr vref - (\pt_t pa. level_pte_of pt_t pa (kheap s(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) + (\pt_t pa. level_pte_of pt_t pa ((kheap s)(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) = translate_address pt_ptr vref (ptes_of s)" by simp @@ -2032,6 +1980,16 @@ lemma set_asid_pool_valid_asid_pool_caps[wp]: unfolding valid_asid_pool_caps_def by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') +lemma set_asid_pool_None_valid_asid_map[wp]: + "\ valid_asid_map and (\s. asid_pools_of s p = Some ap) \ + set_asid_pool p (ap (asid_low := None)) + \\_. valid_asid_map\" + unfolding valid_asid_map_def entry_for_asid_def + apply (clarsimp simp: obind_None_eq pool_for_asid_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift get_object_wp) + apply (fastforce simp: entry_for_pool_def obind_None_eq in_omonad split: if_split_asm) + done + lemma set_asid_pool_invs_unmap: "\invs and (\s. asid_pools_of s p = Some ap) and @@ -2040,7 +1998,7 @@ lemma set_asid_pool_invs_unmap: set_asid_pool p (ap (asid_low := None)) \\_. invs\" apply (simp add: invs_def valid_state_def valid_pspace_def - valid_arch_caps_def valid_asid_map_def) + valid_arch_caps_def) apply (wp valid_irq_node_typ set_asid_pool_typ_at set_asid_pool_vspace_objs_unmap valid_irq_handlers_lift @@ -2220,7 +2178,7 @@ lemma store_pte_valid_asid_pool_caps[wp]: lemma ptes_of_pts_of_pt_type: "\ ptes_of s pt_t p = Some pte'; pts_of s (table_base pt_t p) = Some pt \ \ pt_type pt = pt_t" - by (simp add: level_pte_of_def in_omonad) + by (simp add: level_pte_of_def in_omonad if_option) lemma store_pte_PagePTE_valid_vspace_objs: "\ valid_vspace_objs and pspace_aligned and pspace_distinct and valid_asid_table @@ -2275,39 +2233,9 @@ lemma isPageTablePTE_apply_upd_InvalidPTED: lemma pt_index_table_index_slot_offset_eq: "\ pt_index level vref = table_index level p; is_aligned p pte_bits \ \ pt_slot_offset level (table_base level p) vref = p" for level :: vm_level - using mask_pt_bits_inner_beauty pt_slot_offset_def + using table_base_index_eq pt_slot_offset_def by force -(* FIXME AARCH64: move *) -lemma mask_shiftr_mask_eq: - "m \ m' + n \ (w && mask m >> n) && mask m' = w && mask m >> n" for w :: "'a::len word" - by word_eqI_solve - -(* FIXME AARCH64: move *) -lemma pt_index_mask_eq: - "pt_index level vref && mask (ptTranslationBits level) = pt_index level vref" - by (simp add: pt_index_def bit_simps) - -(* FIXME AARCH64: move *) -lemma table_index_mask_eq: - "table_index pt_t p && mask (ptTranslationBits pt_t) = table_index pt_t p" - by (auto simp add: pt_bits_def bit_simps mask_shiftr_mask_eq) - -(* FIXME AARCH64: move *) -lemma pt_apply_upd_eq: - "pt_type pt = level_type level \ - pt_apply (pt_upd pt (table_index (level_type level) p) pte) (pt_index level vref) = - (if table_index (level_type level) p = pt_index level vref - then pte - else pt_apply pt (pt_index level vref))" - unfolding pt_apply_def pt_upd_def - using pt_index_mask_eq[of max_pt_level] pt_index_mask_eq[where level=level and vref=vref] - using table_index_mask_eq[where pt_t=NormalPT_T] table_index_mask_eq[where pt_t=VSRootPT_T] - apply (cases pt; clarsimp simp: ucast_eq_mask vs_index_ptTranslationBits pt_index_ptTranslationBits) - apply (prop_tac "level_type level = NormalPT_T", simp add: level_type_def) - apply (simp del: level_type_eq add: ptTranslationBits_def) - done - (* If you start with a lookup from asid down to level, and you split off a walk at level', then an update at level' does not affect the extended pt_walk from level'-1 down to level. *) (* FIXME: we should do the same on RISCV64 *) @@ -2325,7 +2253,7 @@ lemma pt_walk_below_pt_upd_idem: pt_walk (level' - 1) level (pptr_from_pte (pt_apply (pt_upd pt (table_index (level_type level') p) pte) (pt_index level' vref))) vref - (\pt_t pa. level_pte_of pt_t pa (pts_of s(table_base (level_type level') p \ + (\pt_t pa. level_pte_of pt_t pa ((pts_of s)(table_base (level_type level') p \ pt_upd pt (table_index (level_type level') p) pte))) = pt_walk (level' - 1) level (pptr_from_pte (pt_apply (pt_upd pt (table_index (level_type level') p) pte) @@ -2334,7 +2262,7 @@ lemma pt_walk_below_pt_upd_idem: apply (rename_tac level'') apply (prop_tac "level'' < level'") apply (drule pt_walk_max_level) - apply (simp add: vm_level_leq_minus1_less) + apply (simp add: vm_level.leq_minus1_less) apply (prop_tac "pt_walk level' level'' (table_base level' p) vref (ptes_of s) = Some (level'', table_base level' p)") apply (subst pt_walk.simps) @@ -2473,7 +2401,7 @@ lemma store_pte_InvalidPTE_valid_vs_lookup: apply (subst (asm) (2) level_pte_of_def) apply (clarsimp simp: in_omonad) apply (rename_tac pt') - apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply if_option) apply (case_tac "table_index level' (pt_slot_offset level' (table_base level' p) vref) = table_index level' p"; clarsimp) (* staying on old path; we can't hit table_base p again *) @@ -2607,7 +2535,7 @@ lemma store_pte_non_InvalidPTE_valid_vs_lookup: apply (subst (asm) (2) level_pte_of_def) apply (clarsimp simp: in_omonad) apply (rename_tac pt') - apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply if_option) apply (case_tac "table_index level' (pt_slot_offset level' (table_base level' p) vref) = table_index level' p"; clarsimp) (* we could not have arrived at our new empty table through a non-empty table and from @@ -2640,11 +2568,6 @@ lemma store_pte_non_InvalidPTE_valid_vs_lookup: apply (drule valid_vs_lookupD; assumption?; clarsimp) done -(* FIXME AARCH64: move *) -lemma vspace_objs_of_ako_at_Some: - "(vspace_objs_of s p = Some (PageTable pt)) = ako_at (PageTable pt) p s" - by (simp add: obj_at_def in_opt_map_eq vspace_obj_of_Some) - (* NOTE: should be able to derive the (pte_ref pte) \ table_base p) from the (pte_ref pte) being unreachable anywhere in the original state (this should come from having an unmapped cap to it) *) @@ -2717,7 +2640,7 @@ lemma store_pte_PageTablePTE_valid_vspace_objs: apply (erule disjE; clarsimp) apply (clarsimp simp: fun_upd_apply) apply (subst (asm) level_pte_of_def) - apply (clarsimp simp: in_omonad) + apply (clarsimp simp: in_omonad if_option) apply (rename_tac pt') apply (clarsimp simp: fun_upd_apply) apply (case_tac "table_index level' (pt_slot_offset level' (table_base level' p) vref) = @@ -2808,6 +2731,13 @@ lemma store_pte_valid_arch_caps: unfolding valid_arch_caps_def by (wpsimp wp: store_pte_valid_vs_lookup store_pte_valid_table_caps) +lemma store_pte_valid_global_tables[wp]: + "\ \s. table_base pt_t p \ global_refs s \ valid_global_tables s \ + store_pte pt_t p pte + \ \_. valid_global_tables \" + unfolding store_pte_def valid_global_tables_2_def + by (wpsimp wp: set_pt_pts_of simp: global_refs_def | wps)+ + lemma store_pte_invs: "\ invs and (\s. table_base pt_t p \ global_refs s) @@ -2892,7 +2822,7 @@ crunches do_machine_op and pspace_in_kernel_window[wp]: pspace_in_kernel_window and cap_refs_in_kernel_window[wp]: cap_refs_in_kernel_window and vspace_at_asid[wp]: "\s. P (vspace_at_asid a pt s)" - and valid_vs_lookup[wp]: "valid_vs_lookup" + and valid_vs_lookup[wp]: "\s. P (valid_vs_lookup s)" and valid_obj[wp]: "valid_obj t obj" (simp: valid_kernel_mappings_def wp: valid_obj_typ) @@ -2907,6 +2837,10 @@ lemma dmo_invs_lift: pspace_respects_device_region_dmo cap_refs_respects_device_region_dmo | wps dmo_inv_prop_lift[where g=irq_masks, OF irq])+ +lemma dmo_machine_op_lift_invs[wp]: + "do_machine_op (machine_op_lift f) \invs\" + by (wp dmo_invs_lift) + lemma as_user_inv: assumes x: "\P. \P\ f \\x. P\" shows "\P\ as_user t f \\x. P\" diff --git a/proof/invariant-abstract/AARCH64/ArchArch_AI.thy b/proof/invariant-abstract/AARCH64/ArchArch_AI.thy index 97bc5ff0f6..b49938cd6e 100644 --- a/proof/invariant-abstract/AARCH64/ArchArch_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchArch_AI.thy @@ -52,22 +52,21 @@ lemma range_cover_full: by (clarsimp simp:range_cover_def unat_eq_0 le_mask_iff[symmetric] word_and_le1 word_bits_def) -definition - valid_arch_inv :: "arch_invocation \ 'z::state_ext state \ bool" -where +definition valid_arch_inv :: "arch_invocation \ 'z::state_ext state \ bool" where "valid_arch_inv ai \ case ai of InvokePageTable pti \ valid_pti pti | InvokePage pgi \ valid_page_inv pgi | InvokeASIDControl aci \ valid_aci aci | InvokeASIDPool api \ valid_apinv api - | InvokeVCPU vi \ valid_vcpu_invocation vi" + | InvokeVCPU vi \ valid_vcpu_invocation vi + | InvokeVSpace vi \ \" lemma check_vp_wpR [wp]: "\\s. vmsz_aligned w sz \ P () s\ check_vp_alignment sz w \P\, -" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: vmsz_aligned_def) done @@ -75,60 +74,14 @@ lemma check_vp_wpR [wp]: lemma check_vp_inv: "\P\ check_vp_alignment sz w \\_. P\" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply simp done - lemma p2_low_bits_max: "(2 ^ asid_low_bits - 1) = (max_word :: asid_low_index)" by (simp add: asid_low_bits_def) -lemma dom_ucast_eq: - "is_aligned y asid_low_bits \ - (- dom (\a::asid_low_index. p (ucast a :: machine_word)) \ {x. ucast x + (y::AARCH64_A.asid) \ 0} = {}) = - (- dom p \ {x. x \ 2 ^ asid_low_bits - 1 \ x + ucast y \ 0} = {})" - apply safe - apply clarsimp - apply (rule ccontr) - apply (erule_tac x="ucast x" in in_emptyE) - apply (clarsimp simp: p2_low_bits_max) - apply (rule conjI) - apply (clarsimp simp: ucast_ucast_mask) - apply (subst (asm) less_mask_eq) - apply (rule word_less_sub_le [THEN iffD1]) - apply (simp add: word_bits_def) - apply (simp add: asid_low_bits_def) - apply simp - apply (clarsimp simp: mask_2pm1[symmetric] ucast_ucast_mask2 is_down is_aligned_mask) - apply (frule and_mask_eq_iff_le_mask[THEN iffD2]) - apply (simp add: asid_low_bits_def) - apply (erule notE) - apply (subst word_plus_and_or_coroll) - apply word_eqI_solve - apply (subst (asm) word_plus_and_or_coroll; word_bitwise, clarsimp simp: word_size) - apply (clarsimp simp: p2_low_bits_max) - apply (rule ccontr) - apply simp - apply (erule_tac x="ucast x" in in_emptyE) - apply clarsimp - apply (rule conjI, blast) - apply (rule conjI) - apply (rule word_less_sub_1) - apply (rule order_less_le_trans) - apply (rule ucast_less, simp) - apply (simp add: asid_low_bits_def) - apply clarsimp - apply (erule notE) - apply (simp add: is_aligned_mask asid_low_bits_def) - apply (subst word_plus_and_or_coroll) - apply word_eqI_solve - apply (subst (asm) word_plus_and_or_coroll) - apply (word_bitwise, clarsimp simp: word_size) - apply (word_bitwise) - done - - lemma asid_high_bits_max_word: "(2 ^ asid_high_bits - 1) = (max_word :: asid_high_index)" by (simp add: asid_high_bits_def) @@ -221,7 +174,8 @@ proof - qed crunch typ_at [wp]: - perform_page_table_invocation, perform_page_invocation, perform_asid_pool_invocation + perform_page_table_invocation, perform_page_invocation, perform_asid_pool_invocation, + perform_vspace_invocation "\s. P (typ_at T p s)" (wp: crunch_wps simp: crunch_simps ignore: store_pte) @@ -240,6 +194,9 @@ lemmas perform_asid_pool_invocation_typ_ats [wp] = lemmas perform_vcpu_invocation_typ_ats [wp] = abs_typ_at_lifts [OF perform_vcpu_invocation_typ_at] +lemmas perform_vspace_invocation_typ_ats [wp] = + abs_typ_at_lifts [OF perform_vspace_invocation_typ_at] + lemma perform_asid_control_invocation_tcb_at: "\invs and valid_aci aci and st_tcb_at active p and K (\w a b c. aci = asid_control_invocation.MakePool w a b c \ w \ p)\ @@ -267,10 +224,9 @@ lemma perform_asid_control_invocation_tcb_at: apply fastforce apply (clarsimp simp: zobj_refs_to_obj_refs) apply (erule(1) in_empty_interE) - sorry (* FIXME AARCH64 - apply (clarsimp simp:page_bits_def) + apply (clarsimp simp: pageBits_def) apply simp - done *) + done lemma ucast_asid_high_btis_of_le [simp]: @@ -290,7 +246,6 @@ lemma invoke_arch_tcb: \\rv. tcb_at tptr\" apply (simp add: arch_perform_invocation_def) apply (cases ai; simp; (wp; clarsimp simp add: st_tcb_at_tcb_at)?) - sorry (* FIXME AARCH64 apply (wp perform_asid_control_invocation_tcb_at) apply (clarsimp simp add: valid_arch_inv_def) apply (clarsimp simp: valid_aci_def) @@ -311,7 +266,7 @@ lemma invoke_arch_tcb: apply (simp add: pageBits_def field_simps del: atLeastAtMost_iff) apply (metis (no_types) orthD1 x_power_minus_1) apply simp - done *) + done end @@ -437,14 +392,15 @@ lemma valid_arch_caps: lemma valid_asid_map': "valid_asid_map s \ valid_asid_map s'" - by (clarsimp simp: valid_asid_map_def) + by (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def s'_def + entry_for_pool_def ko) lemma vspace_for_asid[simp]: "vspace_for_asid asid s' = vspace_for_asid asid s" using ko empty - sorry (* FIXME AARCH64 by (clarsimp simp: vspace_for_asid_def obind_def pool_for_asid_def s'_def vspace_for_pool_def - split: option.splits) *) + entry_for_asid_def entry_for_pool_def + split: option.splits) lemma global_pt[simp]: "global_pt s' = global_pt s" @@ -452,22 +408,29 @@ lemma global_pt[simp]: lemma equal_kernel_mappings: "equal_kernel_mappings s' = equal_kernel_mappings s" - sorry (* FIXME AARCH64 - by (simp add: equal_kernel_mappings_def has_kernel_mappings_def) *) + by (simp add: equal_kernel_mappings_def) end context Arch begin global_naming AARCH64 +lemma vmid_for_asid_empty_update: + "\ asid_table s asid_high = None; asid_pools_of s ap = Some Map.empty \ \ + vmid_for_asid_2 asid ((asid_table s)(asid_high \ ap)) (asid_pools_of s) = vmid_for_asid s asid" + by (clarsimp simp: vmid_for_asid_2_def obind_def entry_for_pool_def opt_map_def + split: option.splits) + lemma valid_arch_state_strg: - "valid_arch_state s \ ap \ ran (asid_table s) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + "valid_arch_state s \ ap \ ran (asid_table s) \ asid_table s asid = None \ + asid_pools_of s ap = Some Map.empty \ + valid_arch_state (asid_table_update asid ap s)" apply (clarsimp simp: valid_arch_state_def) - apply (clarsimp simp: valid_asid_table_def ran_def) - sorry (* FIXME AARCH64 + apply (clarsimp simp: valid_asid_table_def ran_def valid_global_arch_objs_def) + apply (prop_tac "vmid_inv (asid_table_update asid ap s)") + apply (fastforce simp: vmid_inv_def vmid_for_asid_empty_update ran_def) apply (fastforce intro!: inj_on_fun_updI simp: asid_pools_at_eq) - done *) + done lemma valid_vs_lookup_at_upd_strg: @@ -488,7 +451,7 @@ lemma valid_asid_pool_caps_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ obj_refs cap = {ap} \ vs_cap_ref cap = Some (ucast asid << asid_low_bits, 0)) \ - valid_asid_pool_caps_2 (caps_of_state s) (asid_table s(asid \ ap))" + valid_asid_pool_caps_2 (caps_of_state s) ((asid_table s)(asid \ ap))" apply clarsimp apply (prop_tac "asid_update ap asid s", (unfold_locales; assumption)) apply (fastforce dest: asid_update.valid_asid_pool_caps') @@ -581,11 +544,10 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid) \ is_aligned asid asid_low_bits) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg valid_asid_pool_caps_upd_strg) - sorry (* FIXME AARCH64 apply (wp get_cap_wp set_cap_valid_vs_lookup set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_all_lift | simp split del: if_split)+ @@ -595,7 +557,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -610,14 +572,15 @@ lemma cap_insert_simple_arch_caps_ap: apply (simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state) apply (simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state) apply (erule (3) unique_table_refsD) - done *) + done lemma valid_asid_map_asid_upd_strg: "valid_asid_map s \ asid_pools_of s ap = Some Map.empty \ asid_table s asid = None \ valid_asid_map (asid_table_update asid ap s)" - by (simp add: valid_asid_map_def) + by (simp add: valid_asid_map_def entry_for_asid_def obind_None_eq entry_for_pool_def + pool_for_asid_def) lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ @@ -641,10 +604,7 @@ lemma equal_kernel_mappings_asid_upd_strg: asid_pools_of s ap = Some Map.empty \ asid_table s asid = None \ equal_kernel_mappings (asid_table_update asid ap s)" - apply clarsimp - apply (prop_tac "asid_update ap asid s", (unfold_locales; assumption)) - apply (simp add: asid_update.equal_kernel_mappings) - done + by (simp add: equal_kernel_mappings_def) lemma safe_parent_cap_is_device: "safe_parent_for m p cap pcap \ cap_is_device cap = cap_is_device pcap" @@ -673,10 +633,8 @@ lemma cap_insert_ap_invs: (\s. ap \ ran (arm_asid_table (arch_state s)) \ asid_table s (asid_high_bits_of asid) = None)\ cap_insert cap src dest - \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \\rv s. invs (asid_table_update (asid_high_bits_of asid) ap s)\" apply (simp add: invs_def valid_state_def valid_pspace_def) - sorry (* FIXME AARCH64 apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg equal_kernel_mappings_asid_upd_strg valid_asid_map_asid_upd_strg valid_global_objs_asid_upd_strg) @@ -699,7 +657,7 @@ lemma cap_insert_ap_invs: apply (auto simp: obj_at_def is_tcb_def is_cap_table_def valid_cap_def [where c="cap.Zombie a b x" for a b x] dest: obj_ref_is_tcb obj_ref_is_cap_table split: option.splits) - done *) + done lemma max_index_upd_no_cap_to: "\\s. no_cap_to_obj_with_diff_ref cap {slot} s \ @@ -719,63 +677,60 @@ lemma perform_asid_control_invocation_pred_tcb_at: \ ct_active s \ invs s \ valid_aci aci s\ perform_asid_control_invocation aci \\_. pred_tcb_at proj Q t\" - supply - is_aligned_neg_mask_eq[simp del] - is_aligned_neg_mask_weaken[simp del] + supply is_aligned_neg_mask_eq[simp del] is_aligned_neg_mask_weaken[simp del] apply (clarsimp simp: perform_asid_control_invocation_def split: asid_control_invocation.splits) - apply (rename_tac word1 a b aa ba word2) + apply (rename_tac frame slot_p slot_idx parent_p parent_idx base) apply (rule hoare_name_pre_state) - apply (subgoal_tac "is_aligned word1 page_bits") + apply (subgoal_tac "is_aligned frame pageBits") prefer 2 apply (clarsimp simp: valid_aci_def cte_wp_at_caps_of_state) apply (drule(1) caps_of_state_valid[rotated])+ - sorry (* FIXME AARCH64 - apply (simp add:valid_cap_simps cap_aligned_def page_bits_def) + apply (simp add:valid_cap_simps cap_aligned_def pageBits_def) apply (subst delete_objects_rewrite) - apply (simp add:page_bits_def word_bits_def pageBits_def word_size_bits_def)+ - apply (simp add:is_aligned_neg_mask_eq) - apply (wp hoare_vcg_const_imp_lift retype_region_st_tcb_at[where sz=page_bits] set_cap_no_overlap|simp)+ + apply (simp add: word_bits_def pageBits_def word_size_bits_def)+ + apply (simp add: is_aligned_neg_mask_eq) + apply (wp hoare_vcg_const_imp_lift retype_region_st_tcb_at[where sz=pageBits] set_cap_no_overlap|simp)+ apply (strengthen invs_valid_objs invs_psp_aligned) - apply (clarsimp simp:conj_comms) + apply (clarsimp simp: conj_comms) apply (wp max_index_upd_invs_simple get_cap_wp)+ apply (clarsimp simp: valid_aci_def) apply (frule intvl_range_conv) - apply (simp add:word_bits_def page_bits_def pageBits_def) - apply (clarsimp simp:detype_clear_um_independent page_bits_def is_aligned_neg_mask_eq) + apply (simp add: word_bits_def pageBits_def) + apply (clarsimp simp: detype_clear_um_independent is_aligned_neg_mask_eq) apply (rule conjI) - apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (clarsimp simp: cte_wp_at_caps_of_state) apply (simp only: field_simps) apply (rule pspace_no_overlap_detype') apply (rule caps_of_state_valid_cap) - apply (simp add:page_bits_def)+ - apply (simp add:invs_valid_objs invs_psp_aligned)+ + apply (simp add: pageBits_def)+ + apply (simp add: invs_valid_objs invs_psp_aligned)+ apply (rule conjI) apply (frule st_tcb_ex_cap) apply clarsimp apply (clarsimp split: Structures_A.thread_state.splits) apply (clarsimp simp: ex_nonz_cap_to_def) apply (frule invs_untyped_children) - apply (clarsimp simp:cte_wp_at_caps_of_state) - apply (erule_tac ptr="(aa,ba)" in untyped_children_in_mdbE[where P="\c. t \ zobj_refs c" for t]) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule_tac ptr="(parent_p, parent_idx)" + in untyped_children_in_mdbE[where P="\c. t \ zobj_refs c" for t]) apply (simp add: cte_wp_at_caps_of_state)+ apply fastforce apply (clarsimp simp: zobj_refs_to_obj_refs) - apply (fastforce simp:page_bits_def) + subgoal by (fastforce simp: pageBits_def) apply simp - apply (clarsimp simp:obj_bits_api_def arch_kobj_size_def cte_wp_at_caps_of_state - default_arch_object_def empty_descendants_range_in) - apply (frule_tac cap = "(cap.UntypedCap False word1 pageBits idx)" - in detype_invariants[rotated 3],clarsimp+) - apply (simp add:cte_wp_at_caps_of_state - empty_descendants_range_in descendants_range_def2)+ + apply (clarsimp simp: obj_bits_api_def arch_kobj_size_def cte_wp_at_caps_of_state + default_arch_object_def empty_descendants_range_in) + apply (frule_tac cap = "UntypedCap False frame pageBits idx" + in detype_invariants[rotated 3],clarsimp+) + apply (simp add: cte_wp_at_caps_of_state empty_descendants_range_in descendants_range_def2)+ apply (thin_tac "x = Some cap.NullCap" for x)+ apply (drule(1) caps_of_state_valid_cap[OF _ invs_valid_objs]) apply (intro conjI) - apply (clarsimp simp:valid_cap_def cap_aligned_def range_cover_full - invs_psp_aligned invs_valid_objs page_bits_def) + apply (clarsimp simp: valid_cap_def cap_aligned_def range_cover_full invs_psp_aligned + invs_valid_objs pageBits_def) apply (erule pspace_no_overlap_detype) - apply (auto simp:page_bits_def detype_clear_um_independent) - done *) + apply (auto simp: pageBits_def detype_clear_um_independent) + done lemma perform_asid_control_invocation_st_tcb_at: "\st_tcb_at (P and (Not \ inactive) and (Not \ idle)) t @@ -799,9 +754,7 @@ lemma set_cap_idx_up_aligned_area: cap_aligned_def) done -primrec(nonexhaustive) - get_untyped_cap_idx :: "cap \ nat" -where +primrec (nonexhaustive) get_untyped_cap_idx :: "cap \ nat" where "get_untyped_cap_idx (UntypedCap dev ref sz idx) = idx" lemma aci_invs': @@ -827,15 +780,8 @@ proof - ko_at (ArchObj (ASIDPool Map.empty)) ap and (\s. ap \ ran (asid_table s) \ asid_table s (asid_high_bits_of asid) = None))\ cap_insert cap src dest - \\rv s. - invs - (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s - (asid_high_bits_of asid \ ap)\\) \ - Q - (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s - (asid_high_bits_of asid \ ap)\\)\" + \\rv s. invs (asid_table_update (asid_high_bits_of asid) ap s) \ + Q (asid_table_update (asid_high_bits_of asid) ap s)\" apply (wp cap_insert_ap_invs) apply simp apply (rule hoare_pre) @@ -843,13 +789,13 @@ proof - apply (auto simp: cte_wp_at_caps_of_state) done show ?thesis + supply fun_upd_apply[simp del] apply (clarsimp simp: perform_asid_control_invocation_def valid_aci_def split: asid_control_invocation.splits) apply (rename_tac word1 a b aa ba word2) apply (rule hoare_pre) apply (wp hoare_vcg_const_imp_lift) apply (wp cap_insert_invsQ hoare_vcg_ex_lift | simp)+ - sorry (* FIXME AARCH64 apply (simp add: valid_cap_def | strengthen real_cte_tcb_valid safe_parent_strg invs_vobjs_strgs @@ -875,11 +821,11 @@ proof - max_index_upd_caps_overlap_reserved max_index_upd_invs_simple set_cap_cte_cap_wp_to set_cap_cte_wp_at max_index_upd_no_cap_to | simp split del: if_split | wp (once) hoare_vcg_ex_lift)+ - apply (rule_tac P = "is_aligned word1 page_bits" in hoare_gen_asm) + apply (rule_tac P = "is_aligned word1 pageBits" in hoare_gen_asm) apply (subst delete_objects_rewrite) - apply (simp add:page_bits_def pageBits_def word_size_bits_def) - apply (simp add:page_bits_def pageBits_def word_bits_def) - apply (simp add: page_bits_def) + apply (simp add: pageBits_def word_size_bits_def) + apply (simp add: pageBits_def word_bits_def) + apply (simp add: pageBits_def) apply wp apply (clarsimp simp: cte_wp_at_caps_of_state if_option_Some split del: if_split) @@ -898,7 +844,7 @@ proof - apply (clarsimp simp: detype_clear_um_independent obj_bits_api_def arch_kobj_size_def default_arch_object_def conj_comms) apply (rule conjI) - apply (clarsimp simp:valid_cap_simps cap_aligned_def page_bits_def not_le) + apply (clarsimp simp:valid_cap_simps cap_aligned_def pageBits_def not_le) apply (simp add:empty_descendants_range_in) apply (frule valid_cap_aligned) apply (clarsimp simp: cap_aligned_def) @@ -907,8 +853,8 @@ proof - simp add: empty_descendants_range_in) apply (frule pspace_no_overlap_detype, clarify+) apply (frule intvl_range_conv[where bits = pageBits]) - apply (simp add:pageBits_def word_bits_def) - apply (clarsimp simp: page_bits_def) + apply (simp add: pageBits_def word_bits_def) + apply (clarsimp) apply (frule(1) ex_cte_cap_protects) apply (simp add:empty_descendants_range_in) apply fastforce @@ -919,32 +865,33 @@ proof - simp add: free_index_of_def valid_cap_simps valid_untyped_def empty_descendants_range_in range_cover_full clear_um_def max_free_index_def; clarsimp simp:valid_untyped_def valid_cap_simps) - apply (clarsimp simp: cte_wp_at_caps_of_state) - apply (erule(1) cap_to_protected) - apply (simp add:empty_descendants_range_in descendants_range_def2)+ - apply (drule invs_arch_state)+ - apply (clarsimp simp: valid_arch_state_def valid_asid_table_def) - apply (drule (1) subsetD)+ - apply (clarsimp simp: in_opt_map_eq) - apply (erule notE, erule is_aligned_no_overflow) - apply (clarsimp simp: no_cap_to_obj_with_diff_ref_def) - apply (thin_tac "cte_wp_at ((=) cap.NullCap) p s" for p s) - apply (subst(asm) eq_commute, - erule(1) untyped_children_in_mdbE[where cap="cap.UntypedCap dev p bits idx" for dev p bits idx, - simplified, rotated]) - apply (simp add: is_aligned_no_overflow) - apply simp - apply clarsimp - done *) - + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule(1) cap_to_protected) + apply (simp add:empty_descendants_range_in descendants_range_def2)+ + apply (drule invs_arch_state)+ + apply (clarsimp simp: valid_arch_state_def valid_asid_table_def) + apply (drule (1) subsetD)+ + apply (clarsimp simp: in_opt_map_eq) + apply (erule notE, erule is_aligned_no_overflow) + apply (clarsimp simp: no_cap_to_obj_with_diff_ref_def) + apply (thin_tac "cte_wp_at ((=) cap.NullCap) p s" for p s) + apply (subst(asm) eq_commute, + erule(1) untyped_children_in_mdbE[where cap="cap.UntypedCap dev p bits idx" for dev p bits idx, + simplified, rotated]) + apply (simp add: is_aligned_no_overflow) + apply simp + apply clarsimp + apply (clarsimp simp: fun_upd_apply) + done qed lemmas aci_invs[wp] = - aci_invs'[where Q=\,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified] + aci_invs'[where Q=\,simplified hoare_TrueI, OF refl refl refl TrueI TrueI TrueI,simplified] lemma obj_at_upd2: - "obj_at P t' (s\kheap := kheap s(t \ v, x \ v')\) = (if t' = x then P v' else obj_at P t' (s\kheap := kheap s(t \ v)\))" + "obj_at P t' (s\kheap := (kheap s)(t \ v, x \ v')\) = + (if t' = x then P v' else obj_at P t' (s\kheap := (kheap s)(t \ v)\))" by (simp add: obj_at_update obj_at_def) lemma vcpu_invalidate_active_hyp_refs_empty[wp]: @@ -957,21 +904,24 @@ lemma as_user_hyp_refs_empty[wp]: apply (wpsimp wp: set_object_wp) by (clarsimp simp: get_tcb_Some_ko_at obj_at_def arch_tcb_context_set_def) +lemma vcpu_tcb_refs_None[simp]: + "vcpu_tcb_refs None = {}" + by (simp add: vcpu_tcb_refs_def) + lemma dissociate_vcpu_tcb_obj_at_hyp_refs[wp]: "\\s. p \ {t, vr} \ obj_at (\ko. hyp_refs_of ko = {}) p s \ - dissociate_vcpu_tcb t vr + dissociate_vcpu_tcb t vr \\rv s. obj_at (\ko. hyp_refs_of ko = {}) p s\" unfolding dissociate_vcpu_tcb_def apply (cases "p \ {t, vr}"; clarsimp) apply (wp arch_thread_set_wp set_vcpu_wp) apply (clarsimp simp: obj_at_upd2 obj_at_update) - sorry (* FIXME AARCH64 VCPU apply (wp hoare_drop_imp get_vcpu_wp)+ apply (clarsimp simp: obj_at_upd2 obj_at_update) apply (erule disjE; - (wp arch_thread_set_wp set_vcpu_wp + (wp arch_thread_set_wp set_vcpu_wp hoare_drop_imp | clarsimp simp: obj_at_upd2 obj_at_update)+) - done *) + done lemma associate_vcpu_tcb_sym_refs_hyp[wp]: "\\s. sym_refs (state_hyp_refs_of s)\ associate_vcpu_tcb vr t \\rv s. sym_refs (state_hyp_refs_of s)\" @@ -992,8 +942,7 @@ lemma associate_vcpu_tcb_sym_refs_hyp[wp]: apply fastforce apply (rule hoare_pre) apply (wp | wpc | clarsimp)+ - apply (simp add: obj_at_def) - sorry (* FIXME AARCH64 VCPU + apply (simp add: obj_at_def vcpu_tcb_refs_def) apply (wp get_vcpu_ko | wpc | clarsimp)+ apply (rule_tac Q="\rv s. (\t'. obj_at (\tcb. tcb = TCB t' \ rv = tcb_vcpu (tcb_arch t')) t s) \ sym_refs (state_hyp_refs_of s)" @@ -1001,7 +950,7 @@ lemma associate_vcpu_tcb_sym_refs_hyp[wp]: apply (clarsimp simp: obj_at_def) apply (wp arch_thread_get_tcb) apply simp - done *) + done lemma arch_thread_set_inv_neq: "\obj_at P p and K (t \ p)\ arch_thread_set f t \\rv. obj_at P p\" @@ -1016,7 +965,7 @@ lemma ex_nonz_cap_to_vcpu_udpate[simp]: by (simp add: ex_nonz_cap_to_def) lemma caps_of_state_VCPU_update: - "vcpu_at a s \ caps_of_state (s\kheap := kheap s(a \ ArchObj (VCPU b))\) = caps_of_state s" + "vcpu_at a s \ caps_of_state (s\kheap := (kheap s)(a \ ArchObj (VCPU b))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def) lemma set_vcpu_ex_nonz_cap_to[wp]: @@ -1026,7 +975,7 @@ lemma set_vcpu_ex_nonz_cap_to[wp]: done lemma caps_of_state_tcb_arch_update: - "ko_at (TCB y) t' s \ caps_of_state (s\kheap := kheap s(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" + "ko_at (TCB y) t' s \ caps_of_state (s\kheap := (kheap s)(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def tcb_cap_cases_def) lemma arch_thread_set_ex_nonz_cap_to[wp]: @@ -1051,14 +1000,17 @@ lemma associate_vcpu_tcb_if_live_then_nonz_cap[wp]: by (wpsimp wp: arch_thread_set_inv_neq hoare_disjI1 get_vcpu_wp hoare_vcg_all_lift hoare_drop_imps) lemma set_vcpu_valid_arch_Some[wp]: - "\valid_arch_state\ set_vcpu vcpu (v\vcpu_tcb := Some tcb\) \\_. valid_arch_state\" + "set_vcpu vcpu (v\vcpu_tcb := Some tcb\) \valid_arch_state\" apply (wp set_vcpu_wp) - apply (clarsimp simp: valid_arch_state_def) + apply (clarsimp simp: valid_arch_state_def asid_pools_of_vcpu_None_upd_idem + pts_of_vcpu_None_upd_idem) + apply (rule conjI) + apply (clarsimp simp: vmid_inv_def asid_pools_of_vcpu_None_upd_idem) apply (rule conjI) - sorry (* FIXME AARCH64 VCPU - apply (fastforce simp: valid_asid_table_def obj_at_def) - apply (clarsimp simp: obj_at_def is_vcpu_def hyp_live_def arch_live_def split: option.splits) - done *) + apply (clarsimp simp: cur_vcpu_2_def obj_at_def is_vcpu_def hyp_live_def arch_live_def in_opt_pred + split: option.splits) + apply (clarsimp simp: valid_global_arch_objs_def obj_at_def) + done lemma valid_global_objs_vcpu_update_str: "valid_global_objs s \ valid_global_objs (s\arch_state := arm_current_vcpu_update f (arch_state s)\)" @@ -1068,13 +1020,6 @@ lemma valid_global_vspace_mappings_vcpu_update_str: "valid_global_vspace_mappings s \ valid_global_vspace_mappings (s\arch_state := arm_current_vcpu_update f (arch_state s)\)" by (simp add: valid_global_vspace_mappings_def) -lemma arm_current_vcpu_update_valid_global_vspace_mappings[simp]: - "valid_global_vspace_mappings (s\arch_state := arm_current_vcpu_update f (arch_state s)\) - = valid_global_vspace_mappings s" - by (clarsimp simp: valid_global_vspace_mappings_def global_refs_def - split: kernel_object.splits option.splits) - -(* FIXME AARCH64 hyp machine ops crunches associate_vcpu_tcb for pspace_aligned[wp]: pspace_aligned and pspace_distinct[wp]: pspace_distinct @@ -1098,7 +1043,8 @@ crunches associate_vcpu_tcb and valid_asid_map[wp]: valid_asid_map and valid_global_vspace_mappings[wp]: valid_global_vspace_mappings and pspace_in_kernel_window[wp]: pspace_in_kernel_window - (wp: crunch_wps dmo_valid_irq_states device_region_dmos simp: crunch_simps) *) + (wp: crunch_wps dmo_valid_irq_states device_region_dmos + simp: crunch_simps valid_kernel_mappings_def) crunches vcpu_switch for valid_idle[wp]: valid_idle @@ -1121,10 +1067,9 @@ crunches associate_vcpu_tcb for valid_global_refs[wp]: valid_global_refs (wp: crunch_wps) -(* FIXME AARCH64 hyp machine ops -crunches vcpu_restore_reg +crunches vcpu_restore_reg, vcpu_write_reg for valid_irq_states[wp]: valid_irq_states - (wp: crunch_wps dmo_valid_irq_states simp: writeVCPUHardwareReg_def) *) + (wp: crunch_wps dmo_valid_irq_states simp: writeVCPUHardwareReg_def) lemma is_irq_active_sp: "\P\ get_irq_state irq \\rv s. P s \ (rv = interrupt_states s irq)\" @@ -1133,33 +1078,30 @@ lemma is_irq_active_sp: lemma restore_virt_timer_valid_irq_states[wp]: "restore_virt_timer vcpu_ptr \valid_irq_states\" apply (clarsimp simp: restore_virt_timer_def is_irq_active_def liftM_def) - sorry (* FIXME AARCH64 VCPU - apply (repeat_unless \rule hoare_seq_ext[OF _ is_irq_active_sp]\ - \rule hoare_seq_ext_skip, + apply (repeat_unless \rule bind_wp[OF _ is_irq_active_sp]\ + \rule bind_wp_fwd_skip, wpsimp wp: dmo_valid_irq_states - simp: isb_def setHCR_def set_cntv_cval_64_def read_cntpct_def - set_cntv_off_64_def\) - apply (wpsimp simp: do_machine_op_def is_irq_active_def get_irq_state_def) + simp: isb_def setHCR_def read_cntpct_def\) + apply (wpsimp simp: do_machine_op_def is_irq_active_def get_irq_state_def + wp_del: dmo_valid_irq_states) apply (clarsimp simp: valid_irq_states_def valid_irq_masks_def maskInterrupt_def in_monad) - done *) + done -(* FIXME AARCH64 irq_masks are down due to no_irq naming issue due to using crunch crunches vcpu_switch for valid_irq_states[wp]: valid_irq_states (wp: crunch_wps dmo_valid_irq_states set_gic_vcpu_ctrl_hcr_irq_masks set_gic_vcpu_ctrl_vmcr_irq_masks set_gic_vcpu_ctrl_lr_irq_masks set_gic_vcpu_ctrl_apr_irq_masks simp: get_gic_vcpu_ctrl_lr_def get_gic_vcpu_ctrl_apr_def get_gic_vcpu_ctrl_vmcr_def - get_gic_vcpu_ctrl_hcr_def maskInterrupt_def isb_def setHCR_def) *) + get_gic_vcpu_ctrl_hcr_def maskInterrupt_def isb_def setHCR_def) lemma associate_vcpu_tcb_valid_irq_states[wp]: "associate_vcpu_tcb vcpu tcb \valid_irq_states\" apply (clarsimp simp: associate_vcpu_tcb_def) - sorry (* FIXME AARCH64 VCPU by (wp hoare_vcg_all_lift | wp (once) hoare_drop_imps | wpc - | simp add: dissociate_vcpu_tcb_def vcpu_invalidate_active_def)+ *) + | simp add: dissociate_vcpu_tcb_def vcpu_invalidate_active_def)+ crunches associate_vcpu_tcb for cap_refs_respects_device_region[wp]: cap_refs_respects_device_region @@ -1180,33 +1122,36 @@ lemma set_vcpu_tcb_Some_hyp_live[wp]: apply (clarsimp simp: obj_at_def hyp_live_def arch_live_def) done +lemma arch_thread_set_vcpus_of[wp]: + "arch_thread_set f p \\s. P (vcpus_of s)\" + apply (wp arch_thread_set_wp) + apply (clarsimp simp: get_tcb_Some_ko_at) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: obj_at_def opt_map_def) + done + lemma associate_vcpu_tcb_valid_arch_state[wp]: "associate_vcpu_tcb vcpu tcb \valid_arch_state\" + supply fun_upd_apply[simp del] apply (clarsimp simp: associate_vcpu_tcb_def) apply (wpsimp wp: vcpu_switch_valid_arch) - done (* FIXME AARCH64 VCPU this seems too easy, but remove if nothing changes - apply (rule_tac Q="\_. valid_arch_state and obj_at hyp_live vcpu" in hoare_post_imp) + apply (rule_tac Q="\_ s. valid_arch_state s \ vcpu_hyp_live_of s vcpu" in hoare_post_imp) apply fastforce - apply wpsimp - apply (wpsimp wp: arch_thread_set.valid_arch_state) - apply (wpsimp wp: arch_thread_set_wp)+ - done *) + apply wpsimp+ + done -(* FIXME AARCH64 VCPU machine ops crunches restore_virt_timer, vcpu_restore_reg_range, vcpu_save_reg_range, vgic_update_lr for valid_machine_state[wp]: valid_machine_state - (wp: crunch_wps ignore: do_machine_op) *) + (wp: crunch_wps ignore: do_machine_op) lemma vcpu_enable_valid_machine_state[wp]: "vcpu_enable vcpu \valid_machine_state\" - apply (simp add: vcpu_enable_def) - sorry (* FIXME AARCH64 VCPU - by (wpsimp | subst do_machine_op_bind | simp add: isb_def)+ *) + unfolding vcpu_enable_def + by wpsimp -(* FIXME AARCH64 VCPU crunches vcpu_restore, vcpu_save for valid_machine_state[wp]: valid_machine_state - (wp: mapM_wp_inv simp: do_machine_op_bind dom_mapM ignore: do_machine_op) *) + (wp: mapM_wp_inv simp: do_machine_op_bind dom_mapM ignore: do_machine_op) crunches associate_vcpu_tcb for valid_machine_state[wp]: valid_machine_state @@ -1216,19 +1161,17 @@ lemma associate_vcpu_tcb_valid_objs[wp]: "\valid_objs and vcpu_at vcpu\ associate_vcpu_tcb vcpu tcb \\_. valid_objs\" - sorry (* FIXME AARCH64 VCPU broken crunches? by (wp arch_thread_get_wp | wp (once) hoare_drop_imps | wpc | clarsimp simp: associate_vcpu_tcb_def valid_obj_def[abs_def] valid_vcpu_def - | simp add: obj_at_def)+ *) + | simp add: obj_at_def)+ lemma associate_vcpu_tcb_invs[wp]: "\invs and ex_nonz_cap_to vcpu and ex_nonz_cap_to tcb and vcpu_at vcpu and (\s. tcb \ idle_thread s)\ associate_vcpu_tcb vcpu tcb \\_. invs\" - sorry (* FIXME AARCH64 VCPU - by (wpsimp simp: invs_def valid_state_def valid_pspace_def) *) + by (wpsimp simp: invs_def valid_state_def valid_pspace_def) lemma set_vcpu_regs_update[wp]: "\invs and valid_obj p (ArchObj (VCPU vcpu)) and @@ -1263,14 +1206,18 @@ lemma perform_vcpu_invs[wp]: invoke_vcpu_write_register_def) done +lemma perform_vspace_invocation_invs[wp]: + "perform_vspace_invocation vi \invs\" + unfolding perform_vspace_invocation_def + by wpsimp + lemma invoke_arch_invs[wp]: "\invs and ct_active and valid_arch_inv ai\ arch_perform_invocation ai \\rv. invs\" apply (cases ai, simp_all add: valid_arch_inv_def arch_perform_invocation_def) apply (wp perform_vcpu_invs|simp)+ - sorry (* FIXME AARCH64 - done *) + done lemma sts_aobjs_of[wp]: "set_thread_state t st \\s. P (aobjs_of s)\" @@ -1286,15 +1233,14 @@ crunches set_thread_state lemma sts_vspace_for_asid[wp]: "set_thread_state t st \\s. P (vspace_for_asid asid s)\" - apply (simp add: vspace_for_asid_def obind_def split: option.splits) - sorry (* FIXME AARCH64 + apply (simp add: vspace_for_asid_def entry_for_asid_def obind_def split: option.splits) apply (rule conjI; wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift) - done *) + apply fastforce + done lemma sts_vspace_at_asid[wp]: "set_thread_state t st \vspace_at_asid asid pd\" - sorry (* FIXME AARCH64 - unfolding vspace_at_asid_def by wpsimp *) + unfolding vspace_at_asid_def by wpsimp lemma sts_valid_slots_inv[wp]: "set_thread_state t st \valid_slots m\" @@ -1313,8 +1259,7 @@ lemma sts_valid_page_inv[wp]: unfolding valid_page_inv_def apply (cases page_invocation) apply (wpsimp wp: sts_typ_ats hoare_vcg_ex_lift hoare_vcg_disj_lift | wps)+ - sorry (* FIXME AARCH64 something wrong, getting undefined in pre/post; valid_page_inv is missing PageFlush - done *) + done crunch global_refs_inv[wp]: set_thread_state "\s. P (global_refs s)" @@ -1334,32 +1279,22 @@ lemma sts_valid_vcpu_invocation_inv: lemma sts_valid_arch_inv: "\valid_arch_inv ai\ set_thread_state t st \\rv. valid_arch_inv ai\" - apply (cases ai, simp_all add: valid_arch_inv_def) - apply (rename_tac page_table_invocation) - apply (case_tac page_table_invocation, simp_all add: valid_pti_def)[1] - sorry (* FIXME AARCH64 VCPU: valid_arch_inv is missing cases (VCPU and InvokeVSpace) - apply ((wp valid_pde_lift set_thread_state_valid_cap - hoare_vcg_all_lift hoare_vcg_const_imp_lift - hoare_vcg_ex_lift set_thread_state_ko - sts_typ_ats set_thread_state_cte_wp_at - | clarsimp simp: is_tcb_def)+)[4] + apply (cases ai, simp_all add: valid_arch_inv_def; wp?) apply (rename_tac asid_control_invocation) apply (case_tac asid_control_invocation) apply (clarsimp simp: valid_aci_def cte_wp_at_caps_of_state) - apply (rule hoare_pre, wp hoare_vcg_ex_lift cap_table_at_typ_at) - apply clarsimp + apply (wp hoare_vcg_ex_lift cap_table_at_typ_at) apply (clarsimp simp: valid_apinv_def split: asid_pool_invocation.splits) - apply (rule hoare_pre) - apply (wp hoare_vcg_ex_lift set_thread_state_ko) + apply (wp hoare_vcg_ex_lift set_thread_state_ko) apply (clarsimp simp: is_tcb_def, wp sts_valid_vcpu_invocation_inv) - done *) + done crunch_ignore (add: select_ext find_vspace_for_asid) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp hoare_vcg_all_lift - hoare_vcg_all_lift_R hoare_drop_imps simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_drop_imps simp: crunch_simps) declare lookup_slot_for_cnode_op_cap_to [wp] @@ -1387,27 +1322,11 @@ declare mask_shift [simp] declare word_less_sub_le [simp del] -(* FIXME AARCH64 -lemma ptrFromPAddr_addr_from_ppn: - "is_aligned pt_ptr table_size \ - ptrFromPAddr (addr_from_ppn (ucast (addrFromPPtr pt_ptr >> pageBits))) = pt_ptr" - apply (simp add: addr_from_ppn_def ucast_ucast_mask bit_simps) - apply (frule is_aligned_addrFromPPtr[simplified bit_simps]) - apply (simp add: aligned_shiftr_mask_shiftl mask_len_id[where 'a=machine_word_len, simplified]) - done - -lemma ptrFromPAddr_addr_from_ppn': - "is_aligned pt_ptr pt_bits \ - ptrFromPAddr (addr_from_ppn (ucast (addrFromPPtr pt_ptr >> pt_bits))) = pt_ptr" - using ptrFromPAddr_addr_from_ppn by (simp add: bit_simps) -*) - lemma is_aligned_pageBitsForSize_table_size: "is_aligned p (pageBitsForSize vmpage_size) \ is_aligned p (table_size NormalPT_T)" - sorry (* FIXME AARCH64 ^ no obvious mapping from pt_type to vmpage_size (only true for NormalPT_T) apply (erule is_aligned_weaken) apply (simp add: pbfs_atleast_pageBits[unfolded bit_simps] bit_simps) - done *) + done lemma vmsz_aligned_vref_for_level: "\ vmsz_aligned vref sz; pt_bits_left level = pageBitsForSize sz \ \ @@ -1428,6 +1347,16 @@ lemma vs_lookup_slot_pte_at: apply (rule is_aligned_add; simp add: is_aligned_shift) done +(* used in Refine *) +lemma pt_lookup_slot_pte_at: + "\ vspace_for_asid asid s = Some pt; pt_lookup_slot pt vref (ptes_of s) = Some (level, slot); + vref \ user_region; invs s\ + \ pte_at (level_type level) slot s" + apply (drule (1) pt_lookup_slot_vs_lookup_slotI) + apply clarsimp + apply (erule (3) vs_lookup_slot_pte_at) + done + lemma vmpage_size_of_level_pt_bits_left: "\ pt_bits_left level = pageBitsForSize vmpage_size; level \ max_pt_level \ \ vmsize_of_level level = vmpage_size" @@ -1438,87 +1367,130 @@ lemma is_PagePTE_make_user[simp]: "is_PagePTE (make_user_pte p attr R sz) \ make_user_pte p attr R sz = InvalidPTE" by (auto simp: is_PagePTE_def make_user_pte_def) +lemma check_vspace_root_wp[wp]: + "\ \s. \pt asid vref. cap = ArchObjectCap (PageTableCap pt VSRootPT_T (Some (asid, vref))) \ + Q (pt, asid) s \ + check_vspace_root cap n + \Q\, -" + unfolding check_vspace_root_def + by wpsimp + +lemma pt_asid_pool_no_overlap: + "\ kheap s (table_base (level_type level) pte_ptr) = Some (ArchObj (PageTable pt)); + kheap s (table_base (level_type asid_pool_level) pte_ptr) = Some (ArchObj (ASIDPool pool)); + pspace_distinct s; pt_type pt = level_type level \ + \ False" + apply (simp add: level_type_def split: if_split_asm) + apply (cases "table_base VSRootPT_T pte_ptr = table_base NormalPT_T pte_ptr", simp) + apply (drule (3) pspace_distinctD) + apply (clarsimp simp: is_aligned_no_overflow_mask) + apply (metis and_neg_mask_plus_mask_mono pt_bits_NormalPT_T pt_bits_def word_and_le) + done + +lemma pageBitsForSize_max_page_level: + "pt_bits_left level = pageBitsForSize vmpage_size \ level \ max_page_level" + using vm_level.size_inj[where x=level and y=max_page_level, unfolded level_defs, simplified] + by (simp add: pageBitsForSize_def pt_bits_left_def ptTranslationBits_def level_defs + split: vmpage_size.splits if_split_asm) + +lemma pageBitsForSize_level_0_eq: + "pt_bits_left level = pageBitsForSize vmpage_size \ (vmpage_size = ARMSmallPage) = (level = 0)" + using vm_level.size_inj[where x=level and y=max_page_level, unfolded level_defs, simplified] + by (simp add: pageBitsForSize_def pt_bits_left_def ptTranslationBits_def + split: vmpage_size.splits if_split_asm) + +(* FIXME AARCH64: replace user_vtop_canonical_user *) +lemma user_vtop_leq_canonical_user: + "vref \ user_vtop \ vref \ canonical_user" + using user_vtop_leq_canonical_user by simp + lemma decode_fr_inv_map_wf[wp]: - "arch_cap = FrameCap p rights vmpage_size dev option \ - \invs and valid_cap (ArchObjectCap arch_cap) and + assumes "arch_cap = FrameCap p rights vmpage_size dev option" + shows + "\invs and valid_cap (ArchObjectCap arch_cap) and cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and (\s. \x \ set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\ decode_fr_inv_map label args slot arch_cap excaps \valid_arch_inv\,-" - unfolding decode_fr_inv_map_def Let_def - apply (wpsimp wp: check_vp_wpR split_del: if_split) - apply (clarsimp simp: valid_arch_inv_def valid_page_inv_def neq_Nil_conv) - sorry (* FIXME AARCH64 - apply (rename_tac s pt_ptr asid vref ab ba ys pt_slot level) - apply (prop_tac "args!0 \ user_region") - apply (clarsimp simp: user_region_def not_le) - apply (rule user_vtop_canonical_user) - apply (erule aligned_add_mask_lessD) - apply (simp add: vmsz_aligned_def) - apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def is_cap_simps cap_master_cap_simps) - apply (thin_tac "Ball S P" for S P) - apply (frule (1) pt_lookup_slot_vs_lookup_slotI, clarsimp) - apply (clarsimp simp: valid_arch_cap_def valid_cap_def cap_aligned_def wellformed_mapdata_def) - apply (frule is_aligned_pageBitsForSize_table_size) - apply (frule (3) vs_lookup_slot_table_base) - apply (clarsimp simp: same_ref_def make_user_pte_def ptrFromPAddr_addr_from_ppn) - (* FIXME RISCV: remove duplication due to PagePTE/InvalidPTE cases: *) - apply (rule conjI; clarsimp) - apply (rule strengthen_imp_same_first_conj[OF conjI]) - apply (rule_tac x=level in exI) - apply (rule_tac x="args!0" in exI) - apply (fastforce simp: vmsz_aligned_vref_for_level) - apply (rule strengthen_imp_same_first_conj[OF conjI]) - apply (clarsimp simp: valid_slots_def make_user_pte_def wellformed_pte_def - ptrFromPAddr_addr_from_ppn) - apply (rename_tac level' asid' vref') +proof - + have pull_out: "\ P \ Q \ P' \ T \ \ (P \ Q \ T) \ (P' \ T)" for P Q P' T by blast + from assms show ?thesis + unfolding decode_fr_inv_map_def Let_def + apply (cases arch_cap; simp split del: if_split) + apply (wpsimp wp: check_vp_wpR split_del: if_split) + apply (clarsimp simp: if_distribR if_bool_simps disj_imp cong: if_cong split del: if_split) + apply (rule pull_out) + apply clarsimp + apply (prop_tac "\ user_vtop < args ! 0 + mask (pageBitsForSize vmpage_size) \ args!0 \ user_region") + apply (clarsimp simp: user_region_def not_le) + apply (rule user_vtop_leq_canonical_user) + apply (simp add: vmsz_aligned_def not_less) + apply (drule is_aligned_no_overflow_mask) + apply simp + apply (rename_tac pte_ptr level) + apply (clarsimp simp: valid_arch_inv_def valid_page_inv_def neq_Nil_conv) + apply (rename_tac cptr cidx excaps') + apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def is_cap_simps cap_master_cap_simps) + apply (thin_tac "Ball S P" for S P) + apply (frule (1) pt_lookup_slot_vs_lookup_slotI, clarsimp) + apply (clarsimp simp: valid_arch_cap_def valid_cap_def cap_aligned_def wellformed_mapdata_def) + apply (frule is_aligned_pageBitsForSize_table_size) + apply (prop_tac "args!0 \ user_region") + apply (fastforce simp: wellformed_mapdata_def) apply (frule (3) vs_lookup_slot_table_base) - apply (prop_tac "level' \ max_pt_level") - apply (drule_tac level=level in valid_vspace_objs_strongD[rotated]; clarsimp) - apply (rule ccontr, clarsimp simp: not_le) - apply (drule vs_lookup_asid_pool; clarsimp) - apply (clarsimp simp: in_omonad) - apply (drule (1) vs_lookup_table_unique_level; clarsimp) - apply (simp add: vs_lookup_slot_pte_at data_at_def vmpage_size_of_level_pt_bits_left - split: if_split_asm) - apply (rule strengthen_imp_same_first_conj[OF conjI]) - apply (clarsimp simp: wellformed_mapdata_def vspace_for_asid_def) - apply (clarsimp simp: parent_for_refs_def) - apply (frule (3) vs_lookup_slot_table_base) - apply (frule (2) valid_vspace_objs_strongD[rotated]; clarsimp) - apply (drule (1) vs_lookup_table_target) - apply (drule valid_vs_lookupD; clarsimp simp: vmsz_aligned_vref_for_level) - apply (subgoal_tac "is_pt_cap cap") - apply (force simp: is_cap_simps) - apply (fastforce dest: cap_to_pt_is_pt_cap_and_type intro: valid_objs_caps) - apply (rule strengthen_imp_same_first_conj[OF conjI]) - apply (rule_tac x=level in exI) - apply (rule_tac x="args!0" in exI) - apply (fastforce simp: vmsz_aligned_vref_for_level) - apply (rule strengthen_imp_same_first_conj[OF conjI]) - apply (clarsimp simp: valid_slots_def make_user_pte_def wellformed_pte_def - ptrFromPAddr_addr_from_ppn) - apply (rename_tac level' asid' vref') - apply (frule (3) vs_lookup_slot_table_base) - apply (prop_tac "level' \ max_pt_level") - apply (drule_tac level=level in valid_vspace_objs_strongD[rotated]; clarsimp) - apply (rule ccontr, clarsimp simp: not_le) - apply (drule vs_lookup_asid_pool; clarsimp) - apply (clarsimp simp: in_omonad) - apply (drule (1) vs_lookup_table_unique_level; clarsimp) - apply (simp add: vs_lookup_slot_pte_at data_at_def vmpage_size_of_level_pt_bits_left - split: if_split_asm) - apply (rule strengthen_imp_same_first_conj[OF conjI]) - apply (clarsimp simp: wellformed_mapdata_def vspace_for_asid_def) - apply (clarsimp simp: parent_for_refs_def) - apply (frule (3) vs_lookup_slot_table_base) - apply (frule (2) valid_vspace_objs_strongD[rotated]; clarsimp) - apply (drule (1) vs_lookup_table_target) - apply (drule valid_vs_lookupD; clarsimp simp: vmsz_aligned_vref_for_level) - apply (subgoal_tac "is_pt_cap cap") - apply (force simp: is_cap_simps) - apply (fastforce dest: cap_to_pt_is_pt_cap_and_type intro: valid_objs_caps) - done *) + apply (clarsimp simp: same_ref_def make_user_pte_def) + apply (prop_tac "\vref. args ! 0 = vref_for_level vref level \ + vs_lookup_slot level asid vref s = Some (level, pte_ptr) \ + vref \ user_region") + apply (rule_tac x="args!0" in exI) + apply (fastforce simp: vmsz_aligned_vref_for_level) + apply clarsimp + apply (rule conjI, fastforce) + apply (clarsimp simp: valid_slots_def make_user_pte_def wellformed_pte_def) + apply (rule conjI, clarsimp) + apply (rename_tac level' asid' vref') + apply (prop_tac "level' \ max_pt_level") + apply (drule_tac level=level in valid_vspace_objs_strongD[rotated]; clarsimp) + apply (rule ccontr, clarsimp simp: not_le) + apply (drule vs_lookup_asid_pool; clarsimp) + apply (clarsimp simp: in_omonad) + apply (drule (1) pt_asid_pool_no_overlap, fastforce; assumption) + apply (prop_tac "\pt. pts_of s (table_base (level_type level) pte_ptr) = Some pt \ + pt_type pt = level_type level") + apply (drule valid_vspace_objs_strongD[rotated]; clarsimp) + apply (prop_tac "\pt. pts_of s (table_base (level_type level') pte_ptr) = Some pt \ + pt_type pt = level_type level'") + apply (drule_tac level=level' in valid_vspace_objs_strongD[rotated]; clarsimp) + apply clarsimp + apply (drule (3) pts_of_level_type_unique, fastforce) + apply (drule (1) vs_lookup_table_unique_level; clarsimp) + apply (simp add: vs_lookup_slot_pte_at data_at_def vmpage_size_of_level_pt_bits_left + pageBitsForSize_max_page_level pageBitsForSize_level_0_eq + split: if_split_asm) + apply (frule vs_lookup_table_asid_not_0; clarsimp simp: word_neq_0_conv) + apply (clarsimp simp: parent_for_refs_def) + apply (frule (2) valid_vspace_objs_strongD[rotated]; clarsimp) + apply (drule (1) vs_lookup_table_target) + apply (drule valid_vs_lookupD; clarsimp simp: vmsz_aligned_vref_for_level) + apply (rule conjI) + apply (subgoal_tac "is_pt_cap cap \ cap_pt_type cap = level_type level") + apply (force simp: is_cap_simps) + apply (fastforce dest: cap_to_pt_is_pt_cap_and_type intro: valid_objs_caps) + apply (clarsimp simp: valid_mapping_insert_def vs_lookup_slot_def) + apply (thin_tac "if dev then _ else _") + apply (fastforce dest!: vs_lookup_table_is_aligned + simp: pt_slot_offset_offset[where level=max_pt_level, simplified] + user_region_invalid_mapping_slots + split: if_split_asm) + done +qed + +lemma decode_fr_inv_flush_wf[wp]: + "\\\ + decode_fr_inv_flush label args slot (FrameCap word rights vmpage_size dev option) excaps + \valid_arch_inv\, -" + unfolding decode_fr_inv_flush_def valid_arch_inv_def Let_def + by (wpsimp simp: valid_page_inv_def) lemma decode_frame_invocation_wf[wp]: "arch_cap = FrameCap word rights vmpage_size dev option \ @@ -1528,11 +1500,10 @@ lemma decode_frame_invocation_wf[wp]: decode_frame_invocation label args slot arch_cap excaps \valid_arch_inv\,-" unfolding decode_frame_invocation_def - sorry (* FIXME AARCH64 by (wpsimp simp: valid_arch_inv_def valid_page_inv_def cte_wp_at_caps_of_state is_cap_simps valid_arch_cap_def valid_cap_def valid_unmap_def wellformed_mapdata_def vmsz_aligned_def - split: option.split) *) + split: option.split) lemma neg_mask_user_region: "p \ user_region \ p && ~~mask n \ user_region" @@ -1542,37 +1513,48 @@ lemma neg_mask_user_region: apply simp done +lemma ptrFromPAddr_addr_from_ppn: + "\ is_aligned pt_ptr pageBits; pptr_base \ pt_ptr; pt_ptr < pptrTop \ \ + ptrFromPAddr (paddr_from_ppn (ppn_from_pptr pt_ptr)) = pt_ptr" + apply (simp add: paddr_from_ppn_def ppn_from_pptr_def ucast_ucast_ppn ppn_len_def') + apply (frule is_aligned_addrFromPPtr) + apply (simp add: nat_minus_add_max aligned_shiftr_mask_shiftl addrFromPPtr_mask_ipa + mask_len_id[where 'a=machine_word_len, simplified]) + done + lemma decode_pt_inv_map_wf[wp]: - "arch_cap = PageTableCap pt_ptr pt_t pt_map_data \ + "arch_cap = PageTableCap pt_ptr NormalPT_T pt_map_data \ \invs and valid_cap (ArchObjectCap arch_cap) and cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and (\s. \x \ set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\ decode_pt_inv_map label args slot arch_cap excaps \valid_arch_inv\,-" unfolding decode_pt_inv_map_def Let_def + apply (simp split del: if_split) apply wpsimp - sorry (* FIXME AARCH64 apply (clarsimp simp: valid_arch_inv_def valid_pti_def pte_at_eq invalid_pte_at_def wellformed_pte_def valid_cap_def cte_wp_at_caps_of_state) apply (rename_tac p level) apply (prop_tac "args!0 \ user_region") - apply (simp add: wellformed_mapdata_def user_region_def user_vtop_canonical_user) + apply (simp add: wellformed_mapdata_def user_region_def user_vtop_def pptrUserTop_def + canonical_user_def ipa_size_def) apply (rule conjI, clarsimp simp: valid_arch_cap_def wellformed_mapdata_def vspace_for_asid_def - neg_mask_user_region) + entry_for_asid_def neg_mask_user_region) apply (rule conjI, clarsimp simp: is_arch_update_def is_cap_simps cap_master_cap_simps) - apply (simp add: ptrFromPAddr_addr_from_ppn cap_aligned_def) + apply (frule cap_refs_in_kernel_windowD, fastforce) + apply (clarsimp simp: cap_range_def) + apply (drule valid_uses_kernel_window[rotated], fastforce) + apply (clarsimp simp: cap_aligned_def pptr_from_pte_def) + apply (simp add: table_size_def bit_simps ptrFromPAddr_addr_from_ppn) apply (drule (1) pt_lookup_slot_vs_lookup_slotI) - apply (rule_tac x=level in exI, simp add: vm_level_not_less_zero) - apply (clarsimp simp: obj_at_def) - apply (rule conjI, clarsimp) - apply (drule valid_table_caps_pdD, clarsimp) - apply (clarsimp simp: in_omonad) apply (rule_tac x="args!0" in exI) apply (simp add: vref_for_level_def) - done *) + apply (drule valid_table_caps_pdD, clarsimp) + apply (clarsimp simp: vm_level_not_less_zero obj_at_def in_omonad) + done lemma decode_page_table_invocation_wf[wp]: - "arch_cap = PageTableCap pt_ptr pt_t pt_map_data \ + "arch_cap = PageTableCap pt_ptr NormalPT_T pt_map_data \ \invs and valid_cap (ArchObjectCap arch_cap) and cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and real_cte_at slot and (\s. \x \ set excaps. cte_wp_at ((=) (fst x)) (snd x) s)\ @@ -1581,9 +1563,8 @@ lemma decode_page_table_invocation_wf[wp]: unfolding decode_page_table_invocation_def is_final_cap_def apply (wpsimp simp: valid_arch_inv_def valid_pti_def valid_arch_cap_def valid_cap_def cte_wp_at_caps_of_state is_cap_simps) - sorry (* FIXME AARCH64 - apply (rule conjI; clarsimp) - done *) + apply (fastforce dest: vspace_for_asid_valid_pt simp: in_omonad obj_at_def) + done lemma cte_wp_at_eq_simp: "cte_wp_at ((=) cap) = cte_wp_at (\c. c = cap)" @@ -1629,11 +1610,11 @@ lemma decode_asid_control_invocation_wf[wp]: and (\s. descendants_of (snd (excaps!0)) (cdt s) = {}) and cte_wp_at (\c. \idx. c = UntypedCap False frame pageBits idx) (snd (excaps!0)) and (\s. arm_asid_table (arch_state s) free = None)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def) - apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp)+ + apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp)+ apply (rule conjI, fastforce) apply (cases excaps, simp) apply (case_tac list, simp) @@ -1684,6 +1665,13 @@ lemma arch_decode_vcpu_invocation_wf[wp]: apply (clarsimp simp: valid_arch_inv_def valid_cap_def valid_vcpu_invocation_def) done +lemma arch_decode_vspace_invocation_wf[wp]: + "\invs\ + decode_vspace_invocation label args slot (PageTableCap pt VSRootPT_T m) excaps + \valid_arch_inv\, -" + unfolding decode_vspace_invocation_def decode_vs_inv_flush_def + by (wpsimp simp: Let_def valid_arch_inv_def) + lemma arch_decode_inv_wf[wp]: "\invs and valid_cap (ArchObjectCap arch_cap) and cte_wp_at ((=) (ArchObjectCap arch_cap)) slot and real_cte_at slot and @@ -1692,14 +1680,7 @@ lemma arch_decode_inv_wf[wp]: arch_decode_invocation label args x_slot slot arch_cap excaps \valid_arch_inv\,-" unfolding arch_decode_invocation_def - apply wpsimp - subgoal sorry (* VSRootPT *) - subgoal sorry (* NormalPT *) - apply wpsimp+ - apply fastforce - done (* FIXME AARCH64 PTs - (note: ARM_HYP has this as one big proof, this factored-out style is nicer) - unfolding arch_decode_invocation_def by wpsimp fastforce *) + by (wpsimp | fastforce)+ declare word_less_sub_le [simp] @@ -1725,7 +1706,7 @@ lemma perform_vcpu_invocation_pred_tcb_at[wp_unsafe]: done crunch pred_tcb_at [wp]: - perform_page_table_invocation, perform_page_invocation, perform_asid_pool_invocation + perform_page_table_invocation, perform_page_invocation, perform_asid_pool_invocation, perform_vspace_invocation "pred_tcb_at proj P t" (wp: crunch_wps simp: crunch_simps) @@ -1734,24 +1715,10 @@ lemma arch_pinv_st_tcb_at: st_tcb_at (P and (Not \ inactive) and (Not \ idle)) t\ arch_perform_invocation ai \\rv. st_tcb_at P t\" - apply (cases ai; simp add: arch_perform_invocation_def valid_arch_inv_def) - sorry (* FIXME AARCH64 enter MATCH (probably a free var somewhere) - apply (wp perform_asid_control_invocation_st_tcb_at; fastforce elim!: pred_tcb_weakenE)+ - done - NOTE: ARM_HYP proof was: - apply (cases ai, simp_all add: arch_perform_invocation_def valid_arch_inv_def) - apply (wp perform_page_table_invocation_pred_tcb_at, - fastforce elim!: pred_tcb_weakenE) - apply (wp perform_page_directory_invocation_pred_tcb_at, fastforce elim: pred_tcb_weakenE) - apply (wp perform_page_invocation_pred_tcb_at, fastforce elim!: pred_tcb_weakenE) - apply (wp perform_asid_control_invocation_st_tcb_at, - fastforce elim!: pred_tcb_weakenE) - apply (wp perform_asid_pool_invocation_pred_tcb_at, - fastforce elim!: pred_tcb_weakenE) - apply (wp perform_vcpu_invocation_pred_tcb_at, - fastforce elim!: pred_tcb_weakenE) - done - *) + unfolding arch_perform_invocation_def valid_arch_inv_def + by (cases ai; + wpsimp wp: perform_vcpu_invocation_pred_tcb_at perform_asid_control_invocation_st_tcb_at; + fastforce elim!: pred_tcb_weakenE) end diff --git a/proof/invariant-abstract/AARCH64/ArchBCorres2_AI.thy b/proof/invariant-abstract/AARCH64/ArchBCorres2_AI.thy index f740b99624..01ce32467d 100644 --- a/proof/invariant-abstract/AARCH64/ArchBCorres2_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchBCorres2_AI.thy @@ -141,8 +141,7 @@ lemma vppi_event_bcorres[wp]: lemma handle_reserved_irq_bcorres[wp]: "bcorres (handle_reserved_irq a) (handle_reserved_irq a)" unfolding handle_reserved_irq_def by wpsimp -lemma handle_hypervisor_fault_bcorres[wp]: "bcorres (handle_hypervisor_fault a b) (handle_hypervisor_fault a b)" - by (cases b; wpsimp) +crunch (bcorres)bcorres[wp]: handle_hypervisor_fault truncate_state lemma handle_event_bcorres[wp]: "bcorres (handle_event e) (handle_event e)" apply (cases e) diff --git a/proof/invariant-abstract/AARCH64/ArchBCorres_AI.thy b/proof/invariant-abstract/AARCH64/ArchBCorres_AI.thy index c26b03ceb9..16f272cdb3 100644 --- a/proof/invariant-abstract/AARCH64/ArchBCorres_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchBCorres_AI.thy @@ -8,28 +8,11 @@ theory ArchBCorres_AI imports BCorres_AI + ArchBitSetup_AI begin context Arch begin global_naming AARCH64 -(* FIXME AARCH64: move/generalise; port back to RISC-V before we seed AInvs from there. - Ideally we want to pick bit0/bit1 automatically based on the value of asid_pool_level. *) -lemmas vm_level_minus_induct = bit1.minus_induct -lemmas vm_level_of_nat_cases = bit1.of_nat_cases -lemmas vm_level_not_less_zero_bit0 = bit1.not_less_zero_bit0 -lemmas vm_level_leq_minus1_less = bit1.leq_minus1_less -lemmas vm_level_no_overflow_eq_max_bound = bit1.no_overflow_eq_max_bound -lemmas vm_level_size_inj = bit1.size_inj -lemmas vm_level_plus_one_leq = bit1.plus_one_leq -lemmas vm_level_pred = bit1.pred -lemmas vm_level_zero_least = bit1.zero_least -lemmas vm_level_minus1_leq = bit1.minus1_leq -lemmas vm_level_size_plus = bit1.size_plus -lemmas vm_level_size_less = bit1.size_less -lemmas vm_level_from_top_induct = bit1.from_top_induct -lemmas vm_level_size_less_eq = bit1.size_less_eq - - lemma entry_for_asid_truncate[simp]: "entry_for_asid asid (truncate_state s) = entry_for_asid asid s" by (simp add: entry_for_asid_def pool_for_asid_def obind_def @@ -53,7 +36,7 @@ lemma vs_lookup_slot_truncate[simp]: lemma pt_lookup_from_level_bcorres[wp]: "bcorres (pt_lookup_from_level l r b c) (pt_lookup_from_level l r b c)" - by (induct l arbitrary: r b c rule: vm_level_minus_induct; wpsimp simp: pt_lookup_from_level_simps) + by (induct l arbitrary: r b c rule: vm_level.minus_induct; wpsimp simp: pt_lookup_from_level_simps) crunch (bcorres) bcorres[wp]: arch_finalise_cap truncate_state crunch (bcorres) bcorres[wp]: prepare_thread_delete truncate_state diff --git a/proof/invariant-abstract/AARCH64/ArchBitSetup_AI.thy b/proof/invariant-abstract/AARCH64/ArchBitSetup_AI.thy new file mode 100644 index 0000000000..cb90255900 --- /dev/null +++ b/proof/invariant-abstract/AARCH64/ArchBitSetup_AI.thy @@ -0,0 +1,66 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchBitSetup_AI +imports + Include_AI +begin + +(* This context block finds all lemmas with names *.bit1.* or *.bit0.* depending on the size + of vm_level, and locally re-declares them under the prefix vm_level.* so that their name + is available generically without reference to whether vm_level is even or odd *) +context begin global_naming vm_level + +local_setup \ + let + (* From find_theorems.ML *) + fun all_facts_of lthy = + let + val ctxt = Local_Theory.target_of lthy; + val thy = Proof_Context.theory_of ctxt; + val transfer = Global_Theory.transfer_theories thy; + val global_facts = Global_Theory.facts_of thy; + in + (Facts.dest_all (Context.Proof ctxt) false [] global_facts) + |> maps Facts.selections + |> map (apsnd transfer) + end; + + (* We will not be referencing lemma collections, only single names *) + fun is_single (Facts.Named (_, NONE)) = true + | is_single _ = false + + fun name_of (Facts.Named ((name, _), _)) = name + | name_of _ = raise ERROR "name_of: no name" + + (* Add a single theorem with name to the local theory *) + fun add_thm (name, thm) lthy = + Local_Theory.notes [((Binding.name name, []), [([thm], [])])] lthy |> #2 + + (* Add a list of theorems with names to the local theory *) + fun add_thms lthy xs = fold add_thm xs lthy + + (* The top-level constructor name of the vm_level numeral type tells us whether to match + on bit0 or bit1 *) + val bit_name = dest_Type @{typ AARCH64_A.vm_level} |> fst |> Long_Name.base_name + + (* Check whether an exploded long name does have a bit0/bit1 name *) + fun matches_bit_name names = member (op =) names bit_name + + fun redeclare_short_names lthy = + all_facts_of lthy + |> filter (matches_bit_name o Long_Name.explode o Facts.ref_name o fst) + |> filter (is_single o fst) + |> map (fn (thm_ref, thm) => (name_of thm_ref |> Long_Name.base_name, thm)) + |> add_thms lthy + in + redeclare_short_names + end +\ + +end + +end \ No newline at end of file diff --git a/proof/invariant-abstract/AARCH64/ArchBits_AI.thy b/proof/invariant-abstract/AARCH64/ArchBits_AI.thy index 86f9d6b194..f5afcbc3fb 100644 --- a/proof/invariant-abstract/AARCH64/ArchBits_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchBits_AI.thy @@ -50,6 +50,18 @@ lemma invs_valid_vs_lookup[elim!]: "invs s \ valid_vs_lookup s " by (clarsimp simp: invs_def valid_state_def valid_arch_caps_def) +lemma invs_vmid_inv[elim!]: + "invs s \ vmid_inv s" + by (auto simp: invs_def valid_state_def valid_arch_state_def) + +lemma invs_valid_vmid_table[elim!]: + "invs s \ valid_vmid_table s" + by (auto simp: invs_def valid_state_def valid_arch_state_def) + +lemma invs_valid_global_arch_objs[elim!]: + "invs s \ valid_global_arch_objs s" + by (clarsimp simp: invs_def valid_state_def valid_arch_state_def) + lemma pbfs_atleast_pageBits: "pageBits \ pageBitsForSize sz" by (cases sz) (auto simp: pageBits_def) diff --git a/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy b/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy index 3de03c0eec..7d36ce96ff 100644 --- a/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy @@ -389,10 +389,10 @@ lemma swap_of_caps_valid_arch_caps [CNodeInv_AI_assms]: apply (rule conjI; clarsimp) apply (intro allI impI) apply (elim exE conjE) - sorry (* FIXME AARCH64 - subgoal for _ _ _ _ p + subgoal for \ p by (case_tac "p=b"; case_tac "p=a"; - fastforce simp add: valid_table_caps_def empty_table_caps_of) + clarsimp simp add: valid_table_caps_def empty_table_caps_of; + fastforce) apply (simp add: unique_table_caps_def split del: if_split) apply (erule allfEI[where f="id (a := b, b := a)"]) apply (erule allfEI[where f="id (a := b, b := a)"]) @@ -402,7 +402,7 @@ lemma swap_of_caps_valid_arch_caps [CNodeInv_AI_assms]: apply (erule allfEI[where f="id (a := b, b := a)"]) apply (clarsimp split del: if_split split: if_split_asm dest!:vs_cap_ref_to_table_cap_ref dest!: weak_derived_table_cap_ref) - done *) + done lemma cap_swap_asid_map[wp, CNodeInv_AI_assms]: @@ -410,10 +410,8 @@ lemma cap_swap_asid_map[wp, CNodeInv_AI_assms]: cte_wp_at (weak_derived c) a and cte_wp_at (weak_derived c') b\ cap_swap c a c' b \\rv. valid_asid_map\" - apply (simp add: cap_swap_def set_cdt_def valid_asid_map_def vspace_at_asid_def) - apply (rule hoare_pre) - apply (wp set_cap.vs_lookup|simp - |rule hoare_lift_Pf [where f=arch_state])+ + apply (simp add: cap_swap_def set_cdt_def vspace_at_asid_def) + apply (wp set_cap.vs_lookup|simp|rule hoare_lift_Pf [where f=arch_state])+ done @@ -489,34 +487,26 @@ lemma finalise_cap_makes_halted_proof[CNodeInv_AI_assms]: split: option.split | intro impI conjI | rule hoare_drop_imp)+ - sorry (* FIXME AARCH64 apply (drule_tac final_zombie_not_live; (assumption | simp add: invs_iflive)?) apply (clarsimp simp: pred_tcb_at_def is_tcb obj_at_def live_def, elim disjE) apply (clarsimp; case_tac "tcb_state tcb"; simp)+ - apply (rename_tac arch_cap) - apply (case_tac arch_cap, simp_all add: arch_finalise_cap_def) - apply (wp - | clarsimp simp: valid_cap_def obj_at_def is_tcb_def is_cap_table_def - split: option.splits bool.split - | intro impI conjI)+ - done *) + apply (wpsimp simp: arch_finalise_cap_def valid_cap_def obj_at_def is_tcb_def is_cap_table_def) + done lemmas finalise_cap_makes_halted = finalise_cap_makes_halted_proof -(* FIXME AARCH64 VCPU/FPU crunch emptyable[wp,CNodeInv_AI_assms]: finalise_cap "\s. emptyable sl s" (simp: crunch_simps rule: emptyable_lift - wp: crunch_wps suspend_emptyable unbind_notification_invs unbind_maybe_notification_invs) *) - + wp: crunch_wps suspend_emptyable unbind_notification_invs unbind_maybe_notification_invs + arch_finalise_cap_pred_tcb_at) lemma finalise_cap_not_reply_master_unlifted [CNodeInv_AI_assms]: "(rv, s') \ fst (finalise_cap cap sl s) \ \ is_master_reply_cap (fst rv)" - sorry (* FIXME AARCH64 by (case_tac cap, auto simp: is_cap_simps in_monad liftM_def arch_finalise_cap_def - split: if_split_asm arch_cap.split_asm bool.split_asm option.split_asm) *) - + split: if_split_asm arch_cap.split_asm bool.split_asm option.split_asm + pt_type.splits) lemma nat_to_cref_0_replicate [CNodeInv_AI_assms]: "\n. n < word_bits \ nat_to_cref n 0 = replicate n False" @@ -531,17 +521,16 @@ lemma prepare_thread_delete_thread_cap [CNodeInv_AI_assms]: "\\s. caps_of_state s x = Some (cap.ThreadCap p)\ prepare_thread_delete t \\rv s. caps_of_state s x = Some (cap.ThreadCap p)\" - sorry (* FIXME AARCH64 VCPU - by (wpsimp simp: prepare_thread_delete_def) *) + by (wpsimp simp: prepare_thread_delete_def) end global_interpretation CNodeInv_AI?: CNodeInv_AI - proof goal_cases +proof goal_cases interpret Arch . - case 1 show ?case sorry (* FIXME AARCH64 by (unfold_locales; (fact CNodeInv_AI_assms)?) *) - qed + case 1 show ?case by (unfold_locales; (fact CNodeInv_AI_assms)?) +qed termination rec_del by (rule rec_del_termination) @@ -550,9 +539,8 @@ termination rec_del by (rule rec_del_termination) context Arch begin global_naming AARCH64 lemma post_cap_delete_pre_is_final_cap': - "\s. - \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + "\valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -629,7 +617,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) @@ -798,7 +786,7 @@ qed lemmas rec_del_invs'[CNodeInv_AI_assms] = rec_del_invs'' [where Q=\, - simplified hoare_post_taut pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] + simplified hoare_TrueI pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] end @@ -813,15 +801,8 @@ global_interpretation CNodeInv_AI_2?: CNodeInv_AI_2 context Arch begin global_naming AARCH64 lemma finalise_cap_rvk_prog [CNodeInv_AI_assms]: - "\\s. revoke_progress_ord m (\x. map_option cap_to_rpo (caps_of_state s x))\ - finalise_cap a b - \\_ s. revoke_progress_ord m (\x. map_option cap_to_rpo (caps_of_state s x))\" - apply (case_tac a,simp_all add:liftM_def) - apply (wp suspend_rvk_prog deleting_irq_handler_rvk_prog - | clarsimp simp:is_final_cap_def comp_def)+ - sorry (* FIXME AARCH64 probably VCPU - done *) - + "finalise_cap cap f \\s. revoke_progress_ord m (\x. map_option cap_to_rpo (caps_of_state s x))\" + by (cases cap; wpsimp wp: suspend_rvk_prog deleting_irq_handler_rvk_prog) lemma rec_del_rvk_prog [CNodeInv_AI_assms]: "st \ \\s. revoke_progress_ord m (option_map cap_to_rpo \ caps_of_state s) @@ -1008,10 +989,10 @@ end global_interpretation CNodeInv_AI_5?: CNodeInv_AI_5 - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact CNodeInv_AI_assms)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchCSpaceInvPre_AI.thy b/proof/invariant-abstract/AARCH64/ArchCSpaceInvPre_AI.thy index 92a9390407..3e3e165547 100644 --- a/proof/invariant-abstract/AARCH64/ArchCSpaceInvPre_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchCSpaceInvPre_AI.thy @@ -180,7 +180,7 @@ lemma set_cap_valid_table_caps: apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_convert_imp[OF set_cap_caps_of_state] hoare_use_eq[OF set_cap_arch set_cap_obj_at_impossible]) - apply (fastforce simp: cap_asid_def the_arch_cap_def split: if_split_asm) (* FIXME AARCH64: the_arch_cap has duplicate definition *) + apply (fastforce simp: cap_asid_def split: if_split_asm) done lemma cap_asid_vs_cap_ref_None: diff --git a/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy b/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy index 18ef23e004..38a0a88e20 100644 --- a/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy @@ -151,17 +151,10 @@ lemma arch_derived_is_device: split: if_split_asm cap.splits arch_cap.splits)+ done -(* FIXME AARCH64: use vcpus_of instead *) -lemma set_cap_no_vcpu[wp]: - "\obj_at (is_vcpu and P) p\ set_cap cap cref \\_. obj_at (is_vcpu and P) p\" - unfolding set_cap_def2 split_def - by (wpsimp wp: set_object_wp get_object_wp get_cap_wp) - (auto simp: obj_at_def is_vcpu_def) - lemma valid_arch_mdb_simple: "\ valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ - valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) (caps_of_state s(dest \ cap))" + valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) ((caps_of_state s)(dest \ cap))" by (auto simp: valid_arch_mdb_def is_cap_revocable_def arch_is_cap_revocable_def is_simple_cap_def safe_parent_for_def is_cap_simps) @@ -186,34 +179,34 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by (clarsimp simp: valid_arch_mdb_def) lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" by (clarsimp simp: valid_arch_mdb_def) lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -246,10 +239,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by (clarsimp simp: valid_arch_mdb_def)+ end diff --git a/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy b/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy index 8e74745c27..99901c650d 100644 --- a/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy @@ -175,7 +175,7 @@ lemma is_derived_is_cap: lemma vs_lookup_pages_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_pages (s\kheap := kheap s(p \ ko')\) = vs_lookup_pages s" + \ vs_lookup_pages (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_pages s" unfolding vs_lookup_target_def vs_lookup_slot_def apply (frule aobjs_of_non_aobj_upd[where ko'=ko'], simp+) apply (rule ext)+ @@ -190,7 +190,7 @@ lemma vs_lookup_pages_non_aobj_upd: lemma vs_lookup_target_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(p \ ko')\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_target level asid vref s" by (drule vs_lookup_pages_non_aobj_upd[where ko'=ko'], auto dest: fun_cong) diff --git a/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy index 3360a1fe21..d500ad97dc 100644 --- a/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy @@ -15,106 +15,88 @@ named_theorems DetSchedAux_AI_assms crunches init_arch_objects for exst[wp]: "\s. P (exst s)" - and ct[wp]: "\s. P (cur_thread s)" and valid_etcbs[wp, DetSchedAux_AI_assms]: valid_etcbs - (wp: crunch_wps hoare_unless_wp valid_etcbs_lift) + and valid_queues[wp]: valid_queues + and valid_sched_action[wp]: valid_sched_action + and valid_sched[wp]: valid_sched -crunch ct[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (cur_thread s)" - (wp: crunch_wps dxo_wp_weak preemption_point_inv mapME_x_inv_wp - simp: crunch_simps do_machine_op_def detype_def mapM_x_defsym unless_def - ignore: freeMemory retype_region_ext) -crunch ready_queues[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (ready_queues s)" - (wp: crunch_wps mapME_x_inv_wp preemption_point_inv' - simp: detype_def detype_ext_def crunch_simps - wrap_ext_det_ext_ext_def mapM_x_defsym - ignore: freeMemory) -crunch scheduler_action[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (scheduler_action s)" - (wp: crunch_wps mapME_x_inv_wp preemption_point_inv' - simp: detype_def detype_ext_def crunch_simps - wrap_ext_det_ext_ext_def mapM_x_defsym - ignore: freeMemory) -crunch cur_domain[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (cur_domain s)" +(* already proved earlier *) +declare invoke_untyped_cur_thread[DetSchedAux_AI_assms] + +crunches invoke_untyped + for ready_queues[wp, DetSchedAux_AI_assms]: "\s. P (ready_queues s)" + and scheduler_action[wp, DetSchedAux_AI_assms]: "\s. P (scheduler_action s)" + and cur_domain[wp, DetSchedAux_AI_assms]: "\s. P (cur_domain s)" (wp: crunch_wps mapME_x_inv_wp preemption_point_inv' - simp: detype_def detype_ext_def crunch_simps - wrap_ext_det_ext_ext_def mapM_x_defsym - ignore: freeMemory) -crunch idle_thread[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (idle_thread s)" + simp: detype_def detype_ext_def crunch_simps wrap_ext_det_ext_ext_def mapM_x_defsym) + +crunches invoke_untyped + for idle_thread[wp, DetSchedAux_AI_assms]: "\s. P (idle_thread s)" (wp: crunch_wps mapME_x_inv_wp preemption_point_inv dxo_wp_weak - simp: detype_def detype_ext_def crunch_simps - wrap_ext_det_ext_ext_def mapM_x_defsym - ignore: freeMemory retype_region_ext) + simp: detype_def detype_ext_def crunch_simps wrap_ext_det_ext_ext_def mapM_x_defsym + ignore: retype_region_ext) lemma tcb_sched_action_valid_idle_etcb: - "\valid_idle_etcb\ - tcb_sched_action foo thread - \\_. valid_idle_etcb\" - apply (rule valid_idle_etcb_lift) - apply (simp add: tcb_sched_action_def set_tcb_queue_def) - apply (wp | simp)+ - done + "tcb_sched_action foo thread \valid_idle_etcb\" + by (rule valid_idle_etcb_lift) + (wpsimp simp: tcb_sched_action_def set_tcb_queue_def) -crunch ekheap[wp]: do_machine_op "\s. P (ekheap s)" +crunches do_machine_op + for ekheap[wp]: "\s. P (ekheap s)" lemma delete_objects_etcb_at[wp, DetSchedAux_AI_assms]: - "\\s::det_ext state. etcb_at P t s\ delete_objects a b \\r s. etcb_at P t s\" - apply (simp add: delete_objects_def) - apply (simp add: detype_def detype_ext_def wrap_ext_det_ext_ext_def etcb_at_def|wp)+ - done - -crunch etcb_at[wp]: reset_untyped_cap "etcb_at P t" - (wp: preemption_point_inv' mapME_x_inv_wp crunch_wps - simp: unless_def) + "delete_objects a b \etcb_at P t\" + unfolding delete_objects_def detype_def detype_ext_def + by (wpsimp simp: wrap_ext_det_ext_ext_def etcb_at_def) -crunch valid_etcbs[wp]: reset_untyped_cap "valid_etcbs" +crunches reset_untyped_cap + for etcb_at[wp]: "etcb_at P t" + and valid_etcbs[wp]: "valid_etcbs" (wp: preemption_point_inv' mapME_x_inv_wp crunch_wps simp: unless_def) lemma invoke_untyped_etcb_at [DetSchedAux_AI_assms]: - "\(\s :: det_ext state. etcb_at P t s) and valid_etcbs\ invoke_untyped ui \\r s. st_tcb_at (Not o inactive) t s \ etcb_at P t s\" + "\etcb_at P t and valid_etcbs\ + invoke_untyped ui + \\r s. st_tcb_at (Not o inactive) t s \ etcb_at P t s\" apply (cases ui) - apply (simp add: mapM_x_def[symmetric] invoke_untyped_def whenE_def - split del: if_split) - apply (wp retype_region_etcb_at mapM_x_wp' - create_cap_no_pred_tcb_at typ_at_pred_tcb_at_lift - hoare_convert_imp[OF create_cap_no_pred_tcb_at] - hoare_convert_imp[OF _ init_arch_objects_exst] - | simp - | (wp (once) hoare_drop_impE_E))+ + apply (simp add: mapM_x_def[symmetric] invoke_untyped_def) + apply (wpsimp wp: retype_region_etcb_at mapM_x_wp' + create_cap_no_pred_tcb_at typ_at_pred_tcb_at_lift + hoare_convert_imp[OF create_cap_no_pred_tcb_at] + hoare_convert_imp[OF _ init_arch_objects_exst] + hoare_drop_impE_E) done -crunch valid_blocked[wp, DetSchedAux_AI_assms]: init_arch_objects valid_blocked +crunches init_arch_objects + for valid_blocked[wp, DetSchedAux_AI_assms]: valid_blocked (wp: valid_blocked_lift set_cap_typ_at) -lemma perform_asid_control_etcb_at:"\(\s. etcb_at P t s) and valid_etcbs\ - perform_asid_control_invocation aci - \\r s. st_tcb_at (Not \ inactive) t s \ etcb_at P t s\" +lemma perform_asid_control_etcb_at: + "\etcb_at P t and valid_etcbs\ + perform_asid_control_invocation aci + \\r s. st_tcb_at (Not \ inactive) t s \ etcb_at P t s\" apply (simp add: perform_asid_control_invocation_def) - apply (rule hoare_pre) - apply (wp | wpc | simp)+ + apply wpsimp apply (wp hoare_imp_lift_something typ_at_pred_tcb_at_lift)[1] apply (rule hoare_drop_imps) apply (wp retype_region_etcb_at)+ apply simp done -crunch ct[wp]: perform_asid_control_invocation "\s. P (cur_thread s)" - -crunch idle_thread[wp]: perform_asid_control_invocation "\s. P (idle_thread s)" - -crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: static_imp_wp) - -crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: static_imp_wp) - -crunch schedact[wp]: perform_asid_control_invocation "\s :: det_ext state. P (scheduler_action s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) - -crunch rqueues[wp]: perform_asid_control_invocation "\s :: det_ext state. P (ready_queues s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) - -crunch cur_domain[wp]: perform_asid_control_invocation "\s :: det_ext state. P (cur_domain s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) +crunches perform_asid_control_invocation + for idle_thread[wp]: "\s. P (idle_thread s)" + and valid_etcbs[wp]: valid_etcbs + and valid_blocked[wp]: valid_blocked + and schedact[wp]: "\s. P (scheduler_action s)" + and ready_queues[wp]: "\s. P (ready_queues s)" + and cur_domain[wp]: "\s. P (cur_domain s)" + (wp: hoare_weak_lift_imp) lemma perform_asid_control_invocation_valid_sched: "\ct_active and invs and valid_aci aci and valid_sched and valid_idle\ - perform_asid_control_invocation aci + perform_asid_control_invocation aci \\_. valid_sched\" apply (rule hoare_pre) apply (rule_tac I="invs and ct_active and valid_aci aci" in valid_sched_tcb_state_preservation) @@ -130,31 +112,21 @@ lemma perform_asid_control_invocation_valid_sched: apply simp done -crunch valid_queues[wp]: init_arch_objects valid_queues (wp: valid_queues_lift) - -crunch valid_sched_action[wp]: init_arch_objects valid_sched_action (wp: valid_sched_action_lift) - -crunch valid_sched[wp]: init_arch_objects valid_sched (wp: valid_sched_lift) - -(* FIXME AARCH64 issue with crunch ordering, ArchVCPU_AI gets there first rather than some crunch - that does this declaration *) -declare invoke_untyped_cur_thread[DetSchedAux_AI_assms] - end lemmas tcb_sched_action_valid_idle_etcb = AARCH64.tcb_sched_action_valid_idle_etcb global_interpretation DetSchedAux_AI_det_ext?: DetSchedAux_AI_det_ext - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact DetSchedAux_AI_assms)?) - qed +qed global_interpretation DetSchedAux_AI?: DetSchedAux_AI - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact DetSchedAux_AI_assms)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchDetSchedDomainTime_AI.thy b/proof/invariant-abstract/AARCH64/ArchDetSchedDomainTime_AI.thy index 766b0d54f6..cdb3a31885 100644 --- a/proof/invariant-abstract/AARCH64/ArchDetSchedDomainTime_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDetSchedDomainTime_AI.thy @@ -24,22 +24,6 @@ crunches crunch domain_list_inv [wp, DetSchedDomainTime_AI_assms]: arch_finalise_cap "\s. P (domain_list s)" (wp: hoare_drop_imps mapM_wp subset_refl simp: crunch_simps) -lemma handle_hypervisor_fault_domain_time_inv[wp, DetSchedDomainTime_AI_assms]: - "handle_hypervisor_fault t f \\s. P (domain_time s)\" - sorry (* FIXME AARCH64 some crunches are going wrong *) -lemma handle_hypervisor_fault_domain_list_inv[wp, DetSchedDomainTime_AI_assms]: - "handle_hypervisor_fault t f \\s. P (domain_list s)\" - sorry (* FIXME AARCH64 some crunches are going wrong *) -lemma vppi_event_domain_time_inv[wp]: - "vppi_event irq \\s. P (domain_time s)\" - sorry (* FIXME AARCH64 some crunches are going wrong *) -lemma vppi_event_domain_list_inv[wp]: - "vppi_event irq \\s. P (domain_list s)\" - sorry (* FIXME AARCH64 some crunches are going wrong *) -lemma vgic_maintenance_domain_time_inv[wp]: - "vgic_maintenance \\s. P (domain_time s)\" - sorry (* FIXME AARCH64 some crunches are going wrong *) - crunch domain_list_inv [wp, DetSchedDomainTime_AI_assms]: arch_activate_idle_thread, arch_switch_to_thread, arch_switch_to_idle_thread, handle_arch_fault_reply, @@ -48,7 +32,7 @@ crunch domain_list_inv [wp, DetSchedDomainTime_AI_assms]: arch_post_modify_registers, arch_post_cap_deletion, handle_vm_fault, arch_invoke_irq_handler "\s. P (domain_list s)" - (simp: crunch_simps) + (simp: crunch_simps isFpuEnable_def wp: mapM_wp' transfer_caps_loop_pres) crunch domain_time_inv [wp, DetSchedDomainTime_AI_assms]: arch_finalise_cap "\s. P (domain_time s)" (wp: hoare_drop_imps mapM_wp subset_refl simp: crunch_simps) @@ -59,9 +43,9 @@ crunch domain_time_inv [wp, DetSchedDomainTime_AI_assms]: arch_invoke_irq_control, arch_get_sanitise_register_info, prepare_thread_delete, handle_hypervisor_fault, handle_vm_fault, arch_post_modify_registers, arch_post_cap_deletion, make_arch_fault_msg, - arch_invoke_irq_handler + arch_invoke_irq_handler, handle_reserved_irq, arch_mask_irq_signal "\s. P (domain_time s)" - (simp: crunch_simps ignore: handle_hypervisor_fault) + (simp: crunch_simps wp: transfer_caps_loop_pres crunch_wps) crunches do_machine_op for exst[wp]: "\s. P (exst s)" @@ -71,10 +55,10 @@ declare init_arch_objects_exst[DetSchedDomainTime_AI_assms] end global_interpretation DetSchedDomainTime_AI?: DetSchedDomainTime_AI - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact DetSchedDomainTime_AI_assms)?) - qed +qed context Arch begin global_naming AARCH64 @@ -102,13 +86,15 @@ lemma vppi_event_valid_domain_time: apply clarsimp done +lemma irq_vppi_event_index_irqVGICMaintenance[simp]: + "irq_vppi_event_index irqVGICMaintenance = None" + by (simp add: irq_vppi_event_index_def irqVGICMaintenance_def irqVTimerEvent_def) + lemma handle_reserved_irq_valid_domain_time: "\\s :: det_ext state. 0 < domain_time s\ handle_reserved_irq i \\y s. domain_time s = 0 \ scheduler_action s = choose_new_thread\" - unfolding handle_reserved_irq_def when_def - supply if_split[split del] - by (wpsimp wp: vppi_event_valid_domain_time vgic_maintenance_valid_domain_time - hoare_drop_imp[where R="\_ _. irq_vppi_event_index _ = _"]) + unfolding handle_reserved_irq_def + by (wpsimp wp: vppi_event_valid_domain_time vgic_maintenance_valid_domain_time) lemma timer_tick_valid_domain_time: "\ \s :: det_ext state. 0 < domain_time s \ @@ -145,22 +131,21 @@ lemma handle_interrupt_valid_domain_time [DetSchedDomainTime_AI_assms]: apply (rule hoare_post_imp[where Q="\_. ?dtnot0" and a="vppi_event i" for i], fastforce) apply wpsimp+ apply (rule hoare_post_imp[where Q="\_. ?dtnot0" and a="vgic_maintenance"], fastforce) - apply wpsimp + apply wpsimp+ apply (rule hoare_post_imp[where Q="\_. ?dtnot0" and a="get_irq_state i" for i], fastforce) apply wpsimp+ done crunches handle_reserved_irq, arch_mask_irq_signal - for domain_time_inv [wp, DetSchedDomainTime_AI_assms]: "\s. P (domain_time s)" - and domain_list_inv [wp, DetSchedDomainTime_AI_assms]: "\s. P (domain_list s)" + for domain_list_inv [wp, DetSchedDomainTime_AI_assms]: "\s. P (domain_list s)" (wp: crunch_wps mapM_wp subset_refl simp: crunch_simps) end global_interpretation DetSchedDomainTime_AI_2?: DetSchedDomainTime_AI_2 - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact DetSchedDomainTime_AI_assms)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy index 0d080e74c8..fa7b48ae4a 100644 --- a/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy @@ -19,21 +19,16 @@ crunch prepare_thread_delete_idle_thread[wp, DetSchedSchedule_AI_assms]: crunch exst[wp]: set_vcpu "\s. P (exst s)" (wp: crunch_wps) -crunch exst[wp]: vcpu_disable,vcpu_restore,vcpu_save "\s. P (exst s)" +crunches vcpu_disable, vcpu_restore, vcpu_save, vcpu_switch + for exst[wp]: "\s. P (exst s)" (wp: crunch_wps) -(* FIXME AARCH64: check if crunchable, also below *) -lemma vcpu_switch_exst[wp]: - "\\s. P (exst s)\ vcpu_switch param_a \\_ s. P (exst s)\" - unfolding vcpu_switch_def by (rule hoare_pre) wpsimp+ - lemma set_vcpu_etcbs [wp]: "\valid_etcbs\ set_vcpu a b \\_. valid_etcbs\" by (rule valid_etcbs_lift; wp) -(* FIXME AARCH64: param_a, also further below *) lemma vcpu_switch_valid_etcbs[wp]: - "\valid_etcbs\ vcpu_switch param_a \\_. valid_etcbs\" + "vcpu_switch v \valid_etcbs\" by (rule valid_etcbs_lift; wp) lemma pred_tcb_atP[wp]: @@ -67,28 +62,62 @@ lemma vcpu_switch_weak_valid_sched_action[wp]: "\weak_valid_sched_action\ vcpu_switch v \\_. weak_valid_sched_action\" by (rule weak_valid_sched_action_lift; wp) -(* FIXME AARCH64 VCPU -is this one and other pred_tcb_atP used? crunch pred_tcb_atP[wp]: set_vm_root "\s. P (pred_tcb_at proj Q t s)" (wp: crunch_wps simp: crunch_simps) -crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: - arch_switch_to_idle_thread, arch_switch_to_thread, arch_get_sanitise_register_info, arch_post_modify_registers valid_etcbs +lemma set_asid_pool_valid_etcbs[wp]: + "set_asid_pool ptr pool \valid_etcbs\" + unfolding set_asid_pool_def + by (wpsimp wp: hoare_drop_imps valid_etcbs_lift) + +lemma set_asid_pool_valid_queues[wp]: + "set_asid_pool ptr pool \valid_queues\" + unfolding set_asid_pool_def + by (wpsimp wp: valid_queues_lift) + +crunches set_asid_pool, set_vm_root, arch_thread_set + for scheduler_action[wp]: "\s. P (scheduler_action s)" + and ekheap[wp]: "\s. P (ekheap s)" + and cur_domain[wp]: "\s. P (cur_domain s)" + and ready_queues[wp]: "\s. P (ready_queues s)" + +lemma set_asid_pool_weak_valid_sched_action[wp]: + "set_asid_pool ptr pool \weak_valid_sched_action\" + by (rule weak_valid_sched_action_lift; wp) + +lemma set_asid_pool_valid_sched_action'[wp]: + "set_asid_pool ptr pool + \\s. valid_sched_action_2 (scheduler_action s) (ekheap s) (kheap s) thread (cur_domain s)\" + unfolding valid_sched_action_def + by (wpsimp simp: is_activatable_def switch_in_cur_domain_def in_cur_domain_def + wp: hoare_vcg_imp_lift' hoare_vcg_all_lift | wps)+ + +lemma set_vcpu_valid_sched_action'[wp]: + "set_vcpu ptr vcpu + \\s. valid_sched_action_2 (scheduler_action s) (ekheap s) (kheap s) thread (cur_domain s)\" + unfolding valid_sched_action_def + by (wpsimp simp: is_activatable_def switch_in_cur_domain_def in_cur_domain_def + wp: hoare_vcg_imp_lift' hoare_vcg_all_lift | wps)+ + +crunches + arch_switch_to_idle_thread, arch_switch_to_thread, arch_get_sanitise_register_info, + arch_post_modify_registers + for valid_etcbs [wp, DetSchedSchedule_AI_assms]: valid_etcbs (simp: crunch_simps) -crunch valid_queues [wp, DetSchedSchedule_AI_assms]: - switch_to_idle_thread, switch_to_thread, set_vm_root, arch_get_sanitise_register_info, arch_post_modify_registers valid_queues +crunches + switch_to_idle_thread, switch_to_thread, set_vm_root, arch_get_sanitise_register_info, arch_post_modify_registers + for valid_queues [wp, DetSchedSchedule_AI_assms]: valid_queues (simp: crunch_simps ignore: set_tcb_queue tcb_sched_action) -crunch weak_valid_sched_action [wp, DetSchedSchedule_AI_assms]: - switch_to_idle_thread, switch_to_thread, set_vm_root, arch_get_sanitise_register_info, arch_post_modify_registers "weak_valid_sched_action" +crunches + switch_to_idle_thread, switch_to_thread, set_vm_root, arch_get_sanitise_register_info, arch_post_modify_registers + for weak_valid_sched_action [wp, DetSchedSchedule_AI_assms]: weak_valid_sched_action (simp: crunch_simps) -*) -crunch ct_not_in_q[wp]: set_vm_root "ct_not_in_q" - (wp: crunch_wps simp: crunch_simps) - -crunch ct_not_in_q'[wp]: set_vm_root "\s. ct_not_in_q_2 (ready_queues s) (scheduler_action s) t" +crunches set_vm_root + for ct_not_in_q[wp]: "ct_not_in_q" + and ct_not_in_q'[wp]: "\s. ct_not_in_q_2 (ready_queues s) (scheduler_action s) t" (wp: crunch_wps simp: crunch_simps) lemma vcpu_switch_valid_sched_action[wp]: @@ -98,32 +127,26 @@ lemma vcpu_switch_valid_sched_action[wp]: lemma switch_to_idle_thread_ct_not_in_q [wp, DetSchedSchedule_AI_assms]: "\valid_queues and valid_idle\ switch_to_idle_thread \\_. ct_not_in_q\" - apply (simp add: switch_to_idle_thread_def) - apply wp - apply (simp add: arch_switch_to_idle_thread_def) - apply wp+ - sorry (* FIXME AARCH64 VCPU + unfolding switch_to_idle_thread_def arch_switch_to_idle_thread_def + apply wpsimp apply (fastforce simp: valid_queues_def ct_not_in_q_def not_queued_def valid_idle_def pred_tcb_at_def obj_at_def) - done *) + done -(* FIXME AARCH64 VCPU -crunch valid_sched_action'[wp]: set_vm_root "\s. valid_sched_action_2 (scheduler_action s) - (ekheap s) (kheap s) thread (cur_domain s)" - (wp: crunch_wps simp: crunch_simps) *) +crunches set_vm_root, vcpu_switch + for valid_sched_action'[wp]: "\s. valid_sched_action_2 (scheduler_action s) (ekheap s) (kheap s) + thread (cur_domain s)" + (wp: crunch_wps simp: crunch_simps) lemma switch_to_idle_thread_valid_sched_action [wp, DetSchedSchedule_AI_assms]: "\valid_sched_action and valid_idle\ - switch_to_idle_thread + switch_to_idle_thread \\_. valid_sched_action\" - apply (simp add: switch_to_idle_thread_def) - apply wp - apply (simp add: arch_switch_to_idle_thread_def do_machine_op_def split_def) - apply wp+ + unfolding switch_to_idle_thread_def arch_switch_to_idle_thread_def + apply wpsimp apply (clarsimp simp: valid_sched_action_def valid_idle_def is_activatable_def pred_tcb_at_def obj_at_def) - sorry (* FIXME AARCH64 global vspace - done *) + done crunch ct_in_cur_domain'[wp]: set_vm_root "\s. ct_in_cur_domain_2 t (idle_thread s) (scheduler_action s) (cur_domain s) (ekheap s)" @@ -131,11 +154,8 @@ crunch ct_in_cur_domain'[wp]: set_vm_root "\s. ct_in_cur_domain_2 t (idl lemma switch_to_idle_thread_ct_in_cur_domain [wp, DetSchedSchedule_AI_assms]: "\\\ switch_to_idle_thread \\_. ct_in_cur_domain\" - sorry (* FIXME AARCH64 VCPU - by (simp add: switch_to_idle_thread_def arch_switch_to_idle_thread_def do_machine_op_def - split_def - | wp - | simp add: ct_in_cur_domain_def)+ *) + unfolding switch_to_idle_thread_def arch_switch_to_idle_thread_def + by (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_disj_lift | simp add: ct_in_cur_domain_def)+ crunch ct_not_in_q [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread, arch_get_sanitise_register_info, arch_post_modify_registers ct_not_in_q (simp: crunch_simps wp: crunch_wps) @@ -145,55 +165,68 @@ lemma do_machine_op_activatable[wp]: unfolding do_machine_op_def by wpsimp lemma set_vcpu_is_activatable[wp]: - "\is_activatable t\ set_vcpu ptr vcpu \\_. is_activatable t\" + "set_vcpu ptr vcpu \is_activatable t\" unfolding is_activatable_def set_vcpu_def set_object_def apply (wp get_object_wp) apply (clarsimp simp: st_tcb_at_kh_def obj_at_kh_def obj_at_def) done -(* FIXME AARCH64 VCPU +lemma set_asid_pool_is_activatable[wp]: + "set_asid_pool ptr pool \is_activatable t\" + unfolding is_activatable_def set_asid_pool_def + apply (wpsimp wp: set_object_wp) + apply (clarsimp simp: st_tcb_at_kh_def obj_at_kh_def obj_at_def in_omonad st_tcb_at_def) + done + crunches vcpu_disable, vcpu_restore, vcpu_save, vcpu_switch, set_vm_root for is_activatable[wp]: "is_activatable t" and valid_sched[wp, DetSchedSchedule_AI_assms]: valid_sched - (wp: crunch_wps valid_sched_lift simp: crunch_simps) + (wp: crunch_wps valid_sched_lift simp: crunch_simps ignore: set_asid_pool) crunch is_activatable [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread, arch_get_sanitise_register_info, arch_post_modify_registers "is_activatable t" (simp: crunch_simps) -crunch valid_sched_action [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread, arch_get_sanitise_register_info, arch_post_modify_registers valid_sched_action - (simp: crunch_simps) +crunches arch_switch_to_thread, arch_get_sanitise_register_info, arch_post_modify_registers + for valid_sched_action [wp, DetSchedSchedule_AI_assms]: valid_sched_action + (simp: crunch_simps ignore: set_asid_pool + wp: valid_sched_action_lift[where f="set_asid_pool ptr pool" for ptr pool]) crunch valid_sched [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread, arch_get_sanitise_register_info, arch_post_modify_registers valid_sched (simp: crunch_simps) -*) + crunch exst[wp]: set_vm_root "\s. P (exst s)" - (wp: crunch_wps hoare_whenE_wp simp: crunch_simps) + (wp: crunch_wps whenE_wp simp: crunch_simps) -(* FIXME AARCH64 crunch ct_in_cur_domain_2 [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread "\s. ct_in_cur_domain_2 thread (idle_thread s) (scheduler_action s) (cur_domain s) (ekheap s)" - (simp: crunch_simps wp: assert_inv) + (simp: crunch_simps wp: assert_inv crunch_wps) + +crunches set_asid_pool + for valid_blocked[wp]: valid_blocked + (wp: valid_blocked_lift) crunch valid_blocked[wp]: set_vm_root valid_blocked (simp: crunch_simps) +lemma set_asid_pool_ct_in_q[wp]: + "set_asid_pool ptr pool \ct_in_q\" + unfolding ct_in_q_def + by (wpsimp wp: hoare_vcg_imp_lift' | wps)+ + crunch ct_in_q[wp]: set_vm_root ct_in_q (simp: crunch_simps) crunch etcb_at [wp, DetSchedSchedule_AI_assms]: switch_to_thread "etcb_at P t" -*) crunch valid_idle [wp, DetSchedSchedule_AI_assms]: arch_switch_to_idle_thread "valid_idle" (wp: crunch_wps simp: crunch_simps) -(* FIXME AARCH64 VCPU crunch etcb_at [wp, DetSchedSchedule_AI_assms]: arch_switch_to_idle_thread "etcb_at P t" crunch scheduler_action [wp, DetSchedSchedule_AI_assms]: arch_switch_to_idle_thread, next_domain "\s. P (scheduler_action s)" (simp: Let_def) -*) lemma vcpu_switch_ct_in_q[wp]: "\ct_in_q\ vcpu_switch vcpu \\_. ct_in_q\" @@ -203,17 +236,13 @@ lemma vcpu_switch_ct_in_q[wp]: apply wp done -(* FIXME AARCH64 VCPU -crunches do_machine_op, set_vm_root, vcpu_switch +crunches vcpu_switch for valid_blocked[wp]: valid_blocked - and ct_in_q[wp]: ct_in_q - (wp: valid_blocked_lift simp: crunch_simps) *) + (wp: valid_blocked_lift simp: crunch_simps) lemma set_vm_root_valid_blocked_ct_in_q [wp]: "\valid_blocked and ct_in_q\ set_vm_root p \\_. valid_blocked and ct_in_q\" - sorry (* FIXME AARCH64 vm root - by (wp | wpc | auto)+ *) - + by wpsimp lemma as_user_ct_in_q[wp]: "as_user tp S \ct_in_q\" @@ -223,8 +252,7 @@ lemma as_user_ct_in_q[wp]: lemma arch_switch_to_thread_valid_blocked [wp, DetSchedSchedule_AI_assms]: "\valid_blocked and ct_in_q\ arch_switch_to_thread thread \\_. valid_blocked and ct_in_q\" - sorry (* FIXME AARCH64 - by (wpsimp simp: arch_switch_to_thread_def) *) + by (wpsimp simp: arch_switch_to_thread_def) lemma switch_to_idle_thread_ct_not_queued [wp, DetSchedSchedule_AI_assms]: "\valid_queues and valid_etcbs and valid_idle\ @@ -232,37 +260,43 @@ lemma switch_to_idle_thread_ct_not_queued [wp, DetSchedSchedule_AI_assms]: \\rv s. not_queued (cur_thread s) s\" apply (simp add: switch_to_idle_thread_def arch_switch_to_idle_thread_def tcb_sched_action_def | wp)+ - sorry (* FIXME AARCH64 VCPU apply (fastforce simp: valid_sched_2_def valid_queues_2_def valid_idle_def pred_tcb_at_def obj_at_def not_queued_def) - done *) + done + +lemma set_asid_pool_valid_blocked_2[wp]: + "set_asid_pool ptr pool + \\s. valid_blocked_2 (ready_queues s) (kheap s) (scheduler_action s) thread\" + unfolding valid_blocked_2_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') -(* FIXME AARCH64 -crunch valid_blocked_2[wp]: set_vm_root "\s. +lemma set_vcpu_valid_blocked_2[wp]: + "set_vcpu ptr vcpu + \\s. valid_blocked_2 (ready_queues s) (kheap s) (scheduler_action s) thread\" + unfolding valid_blocked_2_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') + +crunch valid_blocked_2[wp]: set_vm_root, vcpu_switch "\s. valid_blocked_2 (ready_queues s) (kheap s) (scheduler_action s) thread" - (wp: crunch_wps simp: crunch_simps) *) + (wp: crunch_wps simp: crunch_simps) lemma switch_to_idle_thread_valid_blocked [wp, DetSchedSchedule_AI_assms]: "\valid_blocked and ct_in_q\ switch_to_idle_thread \\rv. valid_blocked\" apply (simp add: switch_to_idle_thread_def arch_switch_to_idle_thread_def do_machine_op_def | wp | wpc)+ - sorry (* FIXME AARCH64 global vspace apply clarsimp apply (drule(1) ct_in_q_valid_blocked_ct_upd) apply simp - done *) + done -(* FIXME AARCH64 VCPU crunch exst [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread "\s. P (exst s :: det_ext)" crunch cur_thread[wp]: arch_switch_to_idle_thread "\s. P (cur_thread s)" -*) lemma astit_st_tcb_at[wp]: "\st_tcb_at P t\ arch_switch_to_idle_thread \\rv. st_tcb_at P t\" apply (simp add: arch_switch_to_idle_thread_def) - sorry (* FIXME AARCH64 VCPU - by (wpsimp) *) + by (wpsimp) lemma stit_activatable' [DetSchedSchedule_AI_assms]: "\valid_idle\ switch_to_idle_thread \\rv . ct_in_state activatable\" @@ -273,17 +307,12 @@ lemma stit_activatable' [DetSchedSchedule_AI_assms]: lemma switch_to_idle_thread_cur_thread_idle_thread [wp, DetSchedSchedule_AI_assms]: "\\\ switch_to_idle_thread \\_ s. cur_thread s = idle_thread s\" - sorry (* FIXME AARCH64 VCPU - by (wp | simp add:switch_to_idle_thread_def arch_switch_to_idle_thread_def)+ *) + by (wp | simp add:switch_to_idle_thread_def arch_switch_to_idle_thread_def)+ lemma set_pt_valid_etcbs[wp]: "\valid_etcbs\ set_pt ptr pt \\rv. valid_etcbs\" by (wp hoare_drop_imps valid_etcbs_lift | simp add: set_pt_def)+ -lemma set_asid_pool_valid_etcbs[wp]: - "\valid_etcbs\ set_asid_pool ptr pool \\rv. valid_etcbs\" - by (wp hoare_drop_imps valid_etcbs_lift | simp add: set_asid_pool_def)+ - lemma set_pt_valid_sched[wp]: "\valid_sched\ set_pt ptr pt \\rv. valid_sched\" by (wp hoare_drop_imps valid_sched_lift | simp add: set_pt_def)+ @@ -298,25 +327,31 @@ lemma set_asid_pool_valid_sched[wp]: crunch ct_not_in_q [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete ct_not_in_q - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) -(* FIXME AARCH64 VCPU +lemma arch_thread_set_valid_sched[wp]: + "arch_thread_set f p \valid_sched\" + by (wpsimp wp: valid_sched_lift arch_thread_set_pred_tcb_at) + +lemma arch_thread_set_valid_etcbs[wp]: + "arch_thread_set f p \valid_etcbs\" + by (wp valid_etcbs_lift) + crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete valid_etcbs - (wp: hoare_drop_imps hoare_unless_wp select_inv mapM_x_wp mapM_wp subset_refl - if_fun_split simp: crunch_simps ignore: set_object thread_set) *) + (wp: hoare_drop_imps unless_wp select_inv mapM_x_wp mapM_wp subset_refl + if_fun_split simp: crunch_simps ignore: set_object thread_set) crunch simple_sched_action [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete simple_sched_action - (wp: hoare_drop_imps mapM_x_wp mapM_wp select_wp subset_refl + (wp: hoare_drop_imps mapM_x_wp mapM_wp subset_refl simp: unless_def if_fun_split) -(* FIXME AARCH64 VCPU crunch valid_sched [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete, arch_invoke_irq_handler, arch_mask_irq_signal "valid_sched" - (ignore: set_object wp: crunch_wps subset_refl simp: if_fun_split) *) + (ignore: set_object wp: crunch_wps subset_refl simp: if_fun_split) lemma activate_thread_valid_sched [DetSchedSchedule_AI_assms]: "\valid_sched\ activate_thread \\_. valid_sched\" @@ -325,33 +360,29 @@ lemma activate_thread_valid_sched [DetSchedSchedule_AI_assms]: apply (force elim: st_tcb_weakenE) done -(* FIXME AARCH64 unmap_page crunch valid_sched[wp]: perform_page_invocation, perform_page_table_invocation, perform_asid_pool_invocation valid_sched - (wp: mapM_x_wp' mapM_wp' crunch_wps) *) + (wp: mapM_x_wp' mapM_wp' crunch_wps simp: crunch_simps) -(* FIXME AARCH64 VCPU crunches - vcpu_read_reg,vcpu_write_reg,read_vcpu_register,write_vcpu_register,set_message_info,as_user + vcpu_read_reg,vcpu_write_reg,read_vcpu_register,write_vcpu_register,set_message_info,as_user, + perform_vspace_invocation for cur_thread[wp]: "\s. P (cur_thread s)" and valid_sched[wp]: valid_sched and ct_in_state[wp]: "ct_in_state st" - (simp: crunch_simps wp: ct_in_state_thread_state_lift ignore: get_object) *) + (simp: crunch_simps wp: ct_in_state_thread_state_lift ignore: get_object) lemma invoke_vcpu_read_register_valid_sched[wp]: "\valid_sched and ct_active\ invoke_vcpu_read_register v reg \\_. valid_sched\" - sorry (* FIXME AARCH64: missing crunch on read_vcpu_register - unfolding invoke_vcpu_read_register_def by (wpsimp wp: set_thread_state_Running_valid_sched) *) + unfolding invoke_vcpu_read_register_def by (wpsimp wp: set_thread_state_Running_valid_sched) lemma invoke_vcpu_write_register_valid_sched[wp]: "\valid_sched and ct_active\ invoke_vcpu_write_register v reg val \\_. valid_sched\" - sorry (* FIXME AARCH64 VCPU - unfolding invoke_vcpu_write_register_def by (wpsimp wp: set_thread_state_Restart_valid_sched) *) + unfolding invoke_vcpu_write_register_def by (wpsimp wp: set_thread_state_Restart_valid_sched) -(* FIXME AARCH64 VCPU crunch valid_sched[wp]: perform_vcpu_invocation valid_sched - (wp: crunch_wps simp: crunch_simps ignore: set_thread_state) *) + (wp: crunch_wps simp: crunch_simps ignore: set_thread_state) lemma arch_perform_invocation_valid_sched [wp, DetSchedSchedule_AI_assms]: "\invs and valid_sched and ct_active and valid_arch_inv a\ @@ -360,8 +391,7 @@ lemma arch_perform_invocation_valid_sched [wp, DetSchedSchedule_AI_assms]: apply (cases a, simp_all add: arch_perform_invocation_def) apply (wp perform_asid_control_invocation_valid_sched | wpc | simp add: valid_arch_inv_def invs_valid_idle)+ - sorry (* FIXME AARCH64 perform_vspace_invocation - done *) + done crunch valid_sched [wp, DetSchedSchedule_AI_assms]: handle_arch_fault_reply, handle_vm_fault valid_sched @@ -377,20 +407,19 @@ crunch sched_act_not [wp, DetSchedSchedule_AI_assms]: lemma hvmf_st_tcb_at [wp, DetSchedSchedule_AI_assms]: "\st_tcb_at P t' \ handle_vm_fault t w \\rv. st_tcb_at P t' \" - unfolding handle_vm_fault_def sorry (* FIXME AARCH64 by (cases w; wpsimp) *) + unfolding handle_vm_fault_def by (cases w; wpsimp) lemma handle_vm_fault_st_tcb_cur_thread [wp, DetSchedSchedule_AI_assms]: "\ \s. st_tcb_at P (cur_thread s) s \ handle_vm_fault t f \\_ s. st_tcb_at P (cur_thread s) s \" unfolding handle_vm_fault_def apply (fold ct_in_state_def) apply (rule ct_in_state_thread_state_lift; cases f; wpsimp) - sorry (* FIXME AARCH64 *) + done crunch valid_sched [wp, DetSchedSchedule_AI_assms]: arch_invoke_irq_control "valid_sched" -(* FIXME AARCH64 VCPU crunch valid_list [wp, DetSchedSchedule_AI_assms]: - arch_activate_idle_thread, arch_switch_to_thread, arch_switch_to_idle_thread "valid_list" *) + arch_activate_idle_thread, arch_switch_to_thread, arch_switch_to_idle_thread "valid_list" crunch cur_tcb [wp, DetSchedSchedule_AI_assms]: handle_arch_fault_reply, handle_vm_fault, arch_get_sanitise_register_info, arch_post_modify_registers @@ -403,8 +432,7 @@ crunch scheduler_action [wp, DetSchedSchedule_AI_assms]: arch_get_sanitise_regis lemma make_arch_fault_msg_inv: "make_arch_fault_msg f t \P\" - sorry (* FIXME AARCH64 VCPU - by (cases f) wpsimp *) + by (cases f; wpsimp) declare make_arch_fault_msg_inv[DetSchedSchedule_AI_assms] @@ -434,10 +462,10 @@ crunch arch_finalise_cap[wp, DetSchedSchedule_AI_assms]: end global_interpretation DetSchedSchedule_AI?: DetSchedSchedule_AI - proof goal_cases +proof goal_cases interpret Arch . - case 1 show ?case sorry (*FIXME AARCH64 by (unfold_locales; (fact DetSchedSchedule_AI_assms)?) *) - qed + case 1 show ?case by (unfold_locales; (fact DetSchedSchedule_AI_assms)?) +qed context Arch begin global_naming AARCH64 @@ -462,12 +490,11 @@ lemma vgic_maintenance_irq_valid_sched[wp]: hoare_drop_imp[where f="return $ m" for m] | wps | strengthen not_pred_tcb_at_strengthen)+ - sorry (* FIXME AARCH64 vgic_update_lr apply (frule tcb_at_invs) apply (clarsimp simp: runnable_eq halted_eq not_pred_tcb) apply (fastforce intro!: st_tcb_ex_cap[where P=active] simp: st_tcb_at_def ct_in_state_def obj_at_def) - done *) + done lemma vppi_event_irq_valid_sched[wp]: "\valid_sched and invs and scheduler_act_sane and ct_not_queued\ @@ -483,32 +510,34 @@ lemma vppi_event_irq_valid_sched[wp]: cong: vcpu.fold_congs | wps | strengthen not_pred_tcb_at_strengthen)+ - sorry (* FIXME AARCH64 vcpu_update apply (frule tcb_at_invs) apply (clarsimp simp: runnable_eq halted_eq not_pred_tcb) apply (fastforce intro!: st_tcb_ex_cap[where P=active] simp: not_pred_tcb st_tcb_at_def obj_at_def ct_in_state_def) - done *) + done lemma handle_hyp_fault_valid_sched[wp]: "\valid_sched and invs and st_tcb_at active t and not_queued t and scheduler_act_not t and (ct_active or ct_idle)\ handle_hypervisor_fault t fault \\_. valid_sched\" - by (cases fault; wpsimp wp: handle_fault_valid_sched simp: valid_fault_def) + supply if_split[split del] + by (cases fault; wpsimp wp: handle_fault_valid_sched simp: valid_fault_def isFpuEnable_def) lemma handle_reserved_irq_valid_sched: - "\valid_sched and invs and (\s. irq \ non_kernel_IRQs \ scheduler_act_sane s \ ct_not_queued s)\ - handle_reserved_irq irq \\rv. valid_sched\" - unfolding handle_reserved_irq_def irq_vppi_event_index_def when_def - sorry (* FIXME AARCH64 double-when causing problems - unfolding handle_reserved_irq_def by (wpsimp simp: non_kernel_IRQs_def) *) + "\valid_sched and invs and (\s. irq \ non_kernel_IRQs \ scheduler_act_sane s \ ct_not_queued s)\ + handle_reserved_irq irq + \\rv. valid_sched\" + unfolding handle_reserved_irq_def + apply (wpsimp simp: non_kernel_IRQs_def) + apply (simp add: irq_vppi_event_index_def) + done end global_interpretation DetSchedSchedule_AI_handle_hypervisor_fault?: DetSchedSchedule_AI_handle_hypervisor_fault - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact handle_hyp_fault_valid_sched handle_reserved_irq_valid_sched)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchDeterministic_AI.thy b/proof/invariant-abstract/AARCH64/ArchDeterministic_AI.thy index 1bdab0cb8f..4339807139 100644 --- a/proof/invariant-abstract/AARCH64/ArchDeterministic_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDeterministic_AI.thy @@ -19,13 +19,10 @@ crunch valid_list[wp, Deterministic_AI_assms]: vcpu_save, vcpu_enable, vcpu_disable, vcpu_restore, arch_get_sanitise_register_info, arch_post_modify_registers valid_list (wp: crunch_wps simp: unless_def crunch_simps) -(* FIXME AARCH64: crunchable? If not, rename param_a *) lemma vcpu_switch_valid_list[wp, Deterministic_AI_assms]: - "\valid_list\ vcpu_switch param_a \\_. valid_list\" - apply (simp add: vcpu_switch_def) - apply (rule hoare_pre) - apply(wpsimp)+ - done + "vcpu_switch v \valid_list\" + unfolding vcpu_switch_def + by wpsimp crunch valid_list[wp, Deterministic_AI_assms]: cap_swap_for_delete,set_cap,finalise_cap,arch_get_sanitise_register_info, @@ -36,35 +33,35 @@ declare get_cap_inv[Deterministic_AI_assms] end global_interpretation Deterministic_AI_1?: Deterministic_AI_1 - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact Deterministic_AI_assms)?) - qed +qed context Arch begin global_naming AARCH64 crunch valid_list[wp,Deterministic_AI_assms]: arch_invoke_irq_handler valid_list crunch valid_list[wp]: invoke_untyped valid_list - (wp: crunch_wps preemption_point_inv' hoare_unless_wp mapME_x_wp' + (wp: crunch_wps preemption_point_inv' unless_wp mapME_x_wp' simp: mapM_x_def_bak crunch_simps) -crunch valid_list[wp]: invoke_irq_control valid_list +crunches invoke_irq_control, perform_flush + for valid_list[wp]: valid_list lemma perform_page_table_invocation_valid_list[wp]: - "\valid_list\ perform_page_table_invocation a \\_.valid_list\" + "perform_page_table_invocation a \valid_list\" unfolding perform_page_table_invocation_def by (wpsimp wp: mapM_x_wp' simp: perform_pt_inv_map_def perform_pt_inv_unmap_def) lemma perform_page_invocation_valid_list[wp]: - "\valid_list\ perform_page_invocation a \\_.valid_list\" + "perform_page_invocation a \valid_list\" apply (simp add: perform_page_invocation_def) apply (cases a, simp_all add: perform_pg_inv_map_def perform_pg_inv_unmap_def perform_pg_inv_get_addr_def split_def) apply (wp mapM_x_wp' mapM_wp' crunch_wps | intro impI conjI allI | wpc | simp add: set_message_info_def set_mrs_def split: cap.splits arch_cap.splits)+ - sorry (* FIXME AARCH64 perform_flush - done *) + done crunch valid_list[wp]: perform_invocation valid_list (wp: crunch_wps simp: crunch_simps ignore: without_preemption as_user) @@ -95,13 +92,14 @@ lemma handle_interrupt_valid_list[wp, Deterministic_AI_assms]: crunch valid_list[wp, Deterministic_AI_assms]: handle_send,handle_reply valid_list crunch valid_list[wp, Deterministic_AI_assms]: handle_hypervisor_fault valid_list + (simp: isFpuEnable_def) end global_interpretation Deterministic_AI_2?: Deterministic_AI_2 - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact Deterministic_AI_assms)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy b/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy index 1895b78a9c..899b51d9d1 100644 --- a/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy @@ -83,7 +83,7 @@ next qed lemma empty_fail_freeMemory [Detype_AI_asms]: "empty_fail (freeMemory ptr bits)" - by (simp add: freeMemory_def mapM_x_mapM) + by (fastforce simp: freeMemory_def mapM_x_mapM) lemma region_in_kernel_window_detype[simp]: @@ -141,8 +141,9 @@ lemma hyp_refs_of: "\obj p. \ ko_at obj p s \ \p ao. \ko_at (ArchObj ao) p s; arch_valid_obj ao s\ \ arch_valid_obj ao (detype (untyped_range cap) s)" - sorry (* FIXME AARCH64 VCPU - by simp *) + by (auto dest!: hyp_refs_of + simp: arch_valid_obj_def valid_vcpu_def + split: arch_kernel_obj.splits option.splits) lemma sym_hyp_refs_detype[detype_invs_proofs]: "sym_refs (state_hyp_refs_of (detype (untyped_range cap) s))" @@ -181,8 +182,15 @@ lemma tcb_arch_detype[detype_invs_proofs]: "\ko_at (TCB t) p s; valid_arch_tcb (tcb_arch t) s\ \ valid_arch_tcb (tcb_arch t) (detype (untyped_range cap) s)" apply (clarsimp simp: valid_arch_tcb_def) - sorry (* FIXME AARCH64 VCPU - done *) + apply (drule hyp_sym_refs_ko_atD, rule hyp_refsym) + apply clarsimp + apply rotate_tac (* do not pick typ_at *) + apply (drule live_okE) + apply (clarsimp simp: live_def hyp_live_def arch_live_def obj_at_def hyp_refs_of_def + refs_of_ao_def vcpu_tcb_refs_def + split: kernel_object.splits arch_kernel_obj.splits option.splits) + apply clarsimp + done declare arch_state_det[simp] @@ -196,8 +204,7 @@ lemma pts_of_detype[simp]: lemma ptes_of_detype_Some[simp]: "(ptes_of (detype S s) pt_t p = Some pte) = (table_base pt_t p \ S \ ptes_of s pt_t p = Some pte)" - sorry (* FIXME AARCH64 - by (simp add: in_omonad ptes_of_def detype_def) *) + by (simp add: in_omonad ptes_of_def detype_def) lemma asid_pools_of_detype: "asid_pools_of (detype S s) = (\p. if p\S then None else asid_pools_of s p)" @@ -218,16 +225,14 @@ lemma pool_for_asid_detype_Some[simp]: lemma vspace_for_pool_detype_Some[simp]: "(vspace_for_pool ap asid (\p. if p \ S then None else pools p) = Some p) = (ap \ S \ vspace_for_pool ap asid pools = Some p)" - sorry (* FIXME AARCH64 - by (simp add: vspace_for_pool_def obind_def split: option.splits) *) + by (simp add: vspace_for_pool_def entry_for_pool_def obind_def split: option.splits) lemma vspace_for_asid_detype_Some[simp]: "(vspace_for_asid asid (detype S s) = Some p) = ((\ap. pool_for_asid asid s = Some ap \ ap \ S) \ vspace_for_asid asid s = Some p)" - apply (simp add: vspace_for_asid_def obind_def asid_pools_of_detype split: option.splits) - apply (auto simp: pool_for_asid_def) - sorry (* FIXME AARCH64 - done *) + by (simp add: vspace_for_asid_def obind_def asid_pools_of_detype entry_for_asid_def + entry_for_pool_def pool_for_asid_def + split: option.splits) lemma pt_walk_detype: "pt_walk level bot_level pt_ptr vref (ptes_of (detype S s)) = Some (bot_level, p) \ @@ -239,9 +244,8 @@ lemma pt_walk_detype: apply (clarsimp simp: in_omonad split: if_split_asm) apply (erule disjE; clarsimp) apply (drule meta_spec, drule (1) meta_mp) - sorry (* FIXME AARCH64 apply fastforce - done *) + done lemma vs_lookup_table: "vs_lookup_table level asid vref (detype S s) = Some (level, p) \ @@ -270,29 +274,54 @@ lemma vs_lookup_target_preserved: apply (fastforce intro: no_obj_refs) done +lemma asid_table_Some_not_untyped_range: + "asid_table s high_bits = Some table \ table \ untyped_range cap" + using invs_valid_asid_pool_caps[OF invs] + by (auto simp add: valid_asid_pool_caps_def dest: no_obj_refs) + lemma valid_asid_table: "valid_asid_table (detype (untyped_range cap) s)" using valid_arch_state apply (clarsimp simp: valid_asid_table_def valid_arch_state_def) apply (drule (1) subsetD) - apply (clarsimp simp: ran_def) - apply (subgoal_tac "valid_asid_pool_caps s") - prefer 2 - using invs - apply (clarsimp simp: invs_def valid_state_def valid_arch_caps_def) - apply (simp add: valid_asid_pool_caps_def) - apply (erule allE, erule allE, erule (1) impE) - apply clarsimp - apply (drule no_obj_refs; simp) + apply (clarsimp simp: ran_def asid_table_Some_not_untyped_range) done +lemma entry_for_pool_detype_Some[simp]: + "(entry_for_pool pool_ptr asid (\p. if p \ S then None else pools p) = Some p) = + (pool_ptr \ S \ entry_for_pool pool_ptr asid pools = Some p)" + by (clarsimp simp: entry_for_pool_def in_omonad) + +lemma vmid_for_asid_2_detype_Some[simp]: + "(vmid_for_asid_2 asid table (\p. if p \ S then None else pools p) = Some p) = + ((\pool_ptr. table (asid_high_bits_of asid) = Some pool_ptr \ pool_ptr \ S) + \ vmid_for_asid_2 asid table pools = Some p)" + by (fastforce simp: vmid_for_asid_def in_omonad) + lemma vmid_inv_detype: "vmid_inv (detype (untyped_range cap) s)" - sorry (* FIXME AARCH64 *) + apply (prop_tac "vmid_inv s") + using valid_arch_state + apply (simp add: valid_arch_state_def) + apply (fastforce simp: vmid_inv_def is_inv_def asid_pools_of_detype vmid_for_asid_def + in_omonad asid_table_Some_not_untyped_range) + done + +lemma vcpus_of_detype[simp]: + "(vcpus_of (detype S s) p = Some vcpu) = (p \ S \ vcpus_of s p = Some vcpu)" + by (simp add: in_omonad detype_def) + +lemma vcpu_tcbs_of_detype[simp]: + "(vcpu_tcbs_of (detype S s) p = Some aobj) = (p \ S \ vcpu_tcbs_of s p = Some aobj)" + by (simp add: in_omonad detype_def) lemma cur_vcpu_detype: "cur_vcpu (detype (untyped_range cap) s)" - sorry (* FIXME AARCH64 VCPU *) + using valid_arch_state + apply (clarsimp simp: valid_arch_state_def cur_vcpu_def split: option.splits) + apply (frule obj_at_vcpu_hyp_live_of_s[THEN iffD2]) + apply (clarsimp elim!: live_okE simp: hyp_live_strg in_opt_pred split: option.splits) + done lemma valid_global_arch_objs: "valid_global_arch_objs (detype (untyped_range cap) s)" @@ -300,10 +329,18 @@ lemma valid_global_arch_objs: by (fastforce dest!: valid_global_refsD[OF globals cap] simp: cap_range_def valid_global_arch_objs_def valid_arch_state_def) +lemma valid_global_tables: + "valid_global_tables (detype (untyped_range cap) s)" + using valid_arch_state + apply (clarsimp simp: valid_global_tables_2_def valid_arch_state_def) + using untyped_range_in_cap_range[of cap] valid_global_refsD[OF globals cap] + apply (blast intro: global_pt_in_global_refs[of s]) + done + lemma valid_arch_state_detype[detype_invs_proofs]: "valid_arch_state (detype (untyped_range cap) s)" using valid_vs_lookup valid_arch_state ut_mdb valid_global_refsD [OF globals cap] cap - cur_vcpu_detype vmid_inv_detype valid_global_arch_objs + cur_vcpu_detype vmid_inv_detype valid_global_arch_objs valid_global_tables unfolding valid_arch_state_def pred_conj_def by (simp only: valid_asid_table) simp @@ -323,8 +360,7 @@ proof (rule ccontr) using invs by (auto simp: invs_def valid_state_def valid_arch_state_def) ultimately have "\pt. pts_of s p = Some pt \ valid_vspace_obj level (PageTable pt) s" - sorry (* FIXME AARCH64 - by (rule valid_vspace_objs_strongD) *) + by (blast dest!: valid_vspace_objs_strongD) with ap show False by (clarsimp simp: in_omonad) qed @@ -353,32 +389,37 @@ lemma data_at_detype[simp]: lemma valid_vspace_obj: "\ valid_vspace_obj level ao s; vspace_objs_of s p = Some ao; \\(level,p) s \ \ - valid_vspace_obj level ao (detype (untyped_range cap) s)" + valid_vspace_obj level ao (detype (untyped_range cap) s)" using invs apply (cases ao; clarsimp split del: if_split) + apply (rename_tac pool entry asid vref) apply (frule (1) vs_lookup_asid_pool_level, simp add: in_omonad vspace_obj_of_Some) - apply simp - sorry (* FIXME AARCH64 - apply (drule vs_lookup_table_ap_step, simp add: in_omonad, assumption) + apply clarsimp + apply (drule_tac vs_lookup_table_ap_step[OF _ _ ranD]) + apply (erule vspace_objs_of_Some_projections) + apply assumption apply clarsimp apply (erule (2) vs_lookup_target_preserved) - apply (rename_tac pt idx asid vref) - apply (case_tac "pt idx"; simp) - apply (frule_tac idx=idx in vs_lookup_table_pt_step; simp add: in_omonad) - apply (frule pspace_alignedD, fastforce) - apply (simp add: bit_simps) - apply (erule (1) vs_lookup_pt_level, simp add: in_omonad) - apply simp - apply fastforce - apply (fastforce elim: vs_lookup_target_preserved) - apply (frule_tac idx=idx in vs_lookup_table_pt_step; simp add: in_omonad) - apply (frule pspace_alignedD, fastforce) - apply (simp add: bit_simps) + apply (rename_tac pt pte asid vref) + apply (frule vspace_objs_of_arch_valid_obj, fastforce) + apply clarsimp + apply (case_tac pte; simp) + (* PagePTE *) + apply (frule_tac vs_lookup_table_pt_step; simp add: in_omonad) + apply (clarsimp simp: vspace_obj_of_def split: if_split_asm) + apply (drule pspace_alignedD; fastforce simp: pt_bits_def) apply (erule (1) vs_lookup_pt_level, simp add: in_omonad) apply simp - apply fastforce - apply (fastforce elim: vs_lookup_target_preserved) - done *) + apply (fastforce elim: vs_lookup_target_preserved) + (* PageTablePTE *) + apply clarsimp + apply (frule_tac vs_lookup_table_pt_step; simp add: in_omonad) + apply (clarsimp simp: vspace_obj_of_def split: if_split_asm) + apply (drule pspace_alignedD; fastforce simp: pt_bits_def) + apply (erule (1) vs_lookup_pt_level, simp add: in_omonad) + apply simp + apply (fastforce simp: pptr_from_pte_def elim: vs_lookup_target_preserved) + done lemma valid_vspace_obj_detype[detype_invs_proofs]: "valid_vspace_objs (detype (untyped_range cap) s)" proof - @@ -455,8 +496,15 @@ proof - thus ?thesis by (simp add: valid_kernel_mappings_def detype_def ball_ran_eq) qed -lemma valid_asid_map_detype[detype_invs_proofs]: "valid_asid_map (detype (untyped_range cap) s)" - by (simp add: valid_asid_map_def) +lemma valid_asid_map_detype[detype_invs_proofs]: + "valid_asid_map (detype (untyped_range cap) s)" +proof - + have "valid_asid_map s" + using invs by (simp add: invs_def valid_state_def) + thus ?thesis + by (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def + entry_for_pool_def) +qed lemma equal_kernel_mappings_detype[detype_invs_proofs]: "equal_kernel_mappings (detype (untyped_range cap) s)" diff --git a/proof/invariant-abstract/AARCH64/ArchEmptyFail_AI.thy b/proof/invariant-abstract/AARCH64/ArchEmptyFail_AI.thy index cb56719586..a4ff0dfb13 100644 --- a/proof/invariant-abstract/AARCH64/ArchEmptyFail_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchEmptyFail_AI.thy @@ -34,7 +34,7 @@ context Arch begin global_naming AARCH64 crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: handle_fault (simp: kernel_object.splits option.splits arch_cap.splits cap.splits endpoint.splits bool.splits list.splits thread_state.splits split_def catch_def sum.splits - Let_def wp: zipWithM_x_empty_fail) + Let_def) crunch (empty_fail) empty_fail[wp]: decode_tcb_configure, decode_bind_notification, decode_unbind_notification, @@ -50,38 +50,36 @@ lemma decode_tcb_invocation_empty_fail[wp]: by (simp add: decode_tcb_invocation_def split: gen_invocation_labels.splits invocation_label.splits | wp | intro conjI impI)+ -crunch (empty_fail) empty_fail[wp]: find_vspace_for_asid, check_vp_alignment (* FIXME AARCH64 , check_slot *) +crunch (empty_fail) empty_fail[wp]: find_vspace_for_asid, check_vp_alignment, check_vspace_root lemma arch_decode_ARMASIDControlMakePool_empty_fail: "invocation_type label = ArchInvocationLabel ARMASIDControlMakePool \ empty_fail (arch_decode_invocation label b c d e f)" apply (simp add: arch_decode_invocation_def Let_def) apply (wpsimp simp: arch_decode_invocation_def decode_asid_pool_invocation_def) - apply (simp add: decode_asid_control_invocation_def) - apply (intro impI conjI allI) - apply (simp add: split_def) - apply wp - apply simp - apply (subst bindE_assoc[symmetric]) - apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def - bind_def return_def returnOk_def lift_def liftE_def fail_def - gets_def get_def assert_def select_def - split: if_split_asm) + apply (simp add: decode_asid_control_invocation_def) + apply (intro impI conjI allI) + apply (simp add: split_def) + apply (wp (once), simp) + apply (subst bindE_assoc[symmetric]) + apply (rule empty_fail_bindE) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def fail_def + gets_def get_def assert_def select_def + split: if_split_asm) apply wpsimp - apply (wpsimp simp: decode_frame_invocation_def) - subgoal sorry (* missing wp rule *) - subgoal sorry (* PTs *) - apply wpsimp - done (* FIXME AARCH64 VCPU and PageFlush invocations - apply (wpsimp simp: decode_page_table_invocation_def) - done *) + apply (wpsimp simp: decode_frame_invocation_def decode_fr_inv_flush_def Let_def) + apply (wpsimp simp: decode_vspace_invocation_def decode_vs_inv_flush_def + decode_page_table_invocation_def Let_def) + apply (wpsimp simp: decode_vcpu_invocation_def) + done lemma arch_decode_ARMASIDPoolAssign_empty_fail: "invocation_type label = ArchInvocationLabel ARMASIDPoolAssign \ empty_fail (arch_decode_invocation label b c d e f)" unfolding arch_decode_invocation_def decode_page_table_invocation_def decode_frame_invocation_def - decode_asid_control_invocation_def + decode_asid_control_invocation_def decode_fr_inv_flush_def Let_def + decode_vspace_invocation_def decode_vs_inv_flush_def apply (wpsimp; wpsimp?) apply (simp add: decode_asid_pool_invocation_def) apply (intro impI allI conjI) @@ -94,12 +92,11 @@ lemma arch_decode_ARMASIDPoolAssign_empty_fail: apply (rule empty_fail_bindE, wpsimp) apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_def bindE_def - bind_def return_def returnOk_def lift_def liftE_def select_ext_def - gets_def get_def assert_def fail_def) - sorry (* FIXME AARCH64 + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def select_ext_def + gets_def get_def assert_def fail_def) apply wpsimp - done *) + done lemma arch_decode_invocation_empty_fail[wp]: "empty_fail (arch_decode_invocation label b c d e f)" @@ -111,19 +108,20 @@ lemma arch_decode_invocation_empty_fail[wp]: apply (find_goal \succeeds \erule arch_decode_ARMASIDPoolAssign_empty_fail\\) apply ((simp add: arch_decode_ARMASIDControlMakePool_empty_fail arch_decode_ARMASIDPoolAssign_empty_fail)+)[2] - sorry (* FIXME AARCH64 frame caps - by (all \(wpsimp simp: arch_decode_invocation_def decode_asid_pool_invocation_def - decode_asid_control_invocation_def decode_frame_invocation_def - decode_page_table_invocation_def decode_pt_inv_map_def - decode_fr_inv_map_def Let_def)\) (* 15s *) *) + apply (all \(wpsimp simp: arch_decode_invocation_def decode_asid_pool_invocation_def + decode_asid_control_invocation_def decode_frame_invocation_def + decode_page_table_invocation_def decode_pt_inv_map_def + decode_fr_inv_map_def decode_fr_inv_flush_def + decode_vspace_invocation_def decode_vs_inv_flush_def Let_def)\) (* 15s *) + done end global_interpretation EmptyFail_AI_derive_cap?: EmptyFail_AI_derive_cap - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed context Arch begin global_naming AARCH64 @@ -138,28 +136,20 @@ lemma empty_fail_pt_lookup_from_level[wp]: crunch (empty_fail) empty_fail[wp]: vcpu_update, vcpu_save_reg_range, vgic_update_lr, save_virt_timer (ignore: set_object get_object) -lemma vcpu_save_empty_fail[wp,EmptyFail_AI_assms]: "empty_fail (vcpu_save a)" - apply (simp add: vcpu_save_def) - sorry (* FIXME AARCH64 missing empty_fail_dsb - apply (wpsimp wp: empty_fail_dsb empty_fail_isb simp: vgic_update_def) - done *) - crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: maskInterrupt, empty_slot, - finalise_cap, preemption_point, + finalise_cap, preemption_point, vcpu_save, cap_swap_for_delete, decode_invocation (simp: Let_def catch_def split_def OR_choiceE_def mk_ef_def option.splits endpoint.splits notification.splits thread_state.splits sum.splits cap.splits arch_cap.splits kernel_object.splits vmpage_size.splits pte.splits bool.splits list.splits) -crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: setRegister, setNextPC - end global_interpretation EmptyFail_AI_rec_del?: EmptyFail_AI_rec_del - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed context Arch begin global_naming AARCH64 crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: @@ -167,22 +157,22 @@ crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: end global_interpretation EmptyFail_AI_schedule_unit?: EmptyFail_AI_schedule_unit - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed global_interpretation EmptyFail_AI_schedule_det?: EmptyFail_AI_schedule_det - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed global_interpretation EmptyFail_AI_schedule?: EmptyFail_AI_schedule - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed context Arch begin global_naming AARCH64 @@ -209,21 +199,21 @@ crunches possible_switch_to, handle_event, activate_thread end global_interpretation EmptyFail_AI_call_kernel_unit?: EmptyFail_AI_call_kernel_unit - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed global_interpretation EmptyFail_AI_call_kernel_det?: EmptyFail_AI_call_kernel_det - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed global_interpretation EmptyFail_AI_call_kernel?: EmptyFail_AI_call_kernel - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact EmptyFail_AI_assms)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy b/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy index 732b07ed18..5bd5f26a34 100644 --- a/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy @@ -13,13 +13,6 @@ context Arch begin named_theorems Finalise_AI_asms -(* FIXME AARCH64 VCPU/FPU? -crunch caps_of_state[wp]: prepare_thread_delete "\s. P (caps_of_state s)" - (wp: crunch_wps) - -declare prepare_thread_delete_caps_of_state [Finalise_AI_asms] -*) - global_naming AARCH64 lemma valid_global_refs_asid_table_udapte [iff]: @@ -57,7 +50,7 @@ lemma invs_arm_asid_table_unmap: "invs s \ is_aligned base asid_low_bits \ (\asid_low. vmid_for_asid s (asid_of (asid_high_bits_of base) asid_low) = None) - \ tab = arm_asid_table (arch_state s) + \ tab = asid_table s \ invs (s\arch_state := arch_state s\arm_asid_table := tab(asid_high_bits_of base := None)\\)" apply (clarsimp simp: invs_def valid_state_def valid_arch_caps_def) apply (strengthen valid_asid_map_unmap valid_vspace_objs_unmap_strg @@ -67,50 +60,103 @@ lemma invs_arm_asid_table_unmap: valid_asid_pool_caps_def equal_kernel_mappings_asid_table_unmap) done +lemma asid_low_bits_of_add: + "\ is_aligned base asid_low_bits; offset \ mask asid_low_bits \ \ + asid_low_bits_of (base + offset) = ucast offset" + unfolding asid_low_bits_of_def + by (metis and_mask_eq_iff_le_mask asid_bits_of_defs(2) asid_high_bits_shl asid_low_bits_of_mask_eq + constructed_asid_low_bits_of word_and_or_mask_aligned) + +lemma invalidate_asid_entry_vmid_for_asid: + "\\s. asid' \ asid \ vmid_for_asid s asid' = None\ + invalidate_asid_entry asid + \\_ s. vmid_for_asid s asid' = None\" + unfolding invalidate_asid_entry_def + by (wpsimp wp: hoare_vcg_const_imp_lift) + +lemma invalidate_asid_entry_vmid_for_asid_low: + "\\s. asid_low_bits_of asid \ asid_low \ + vmid_for_asid s (asid_of (asid_high_bits_of asid) asid_low) = None\ + invalidate_asid_entry asid + \\_ s. vmid_for_asid s (asid_of (asid_high_bits_of asid) asid_low) = None\" + by (wpsimp wp: invalidate_asid_entry_vmid_for_asid) + +lemma invalidate_asid_entry_vmid_for_asid_add: + "\\s. is_aligned base asid_low_bits \ offset \ mask asid_low_bits \ offset' \ mask asid_low_bits \ + (offset \ offset' \ + vmid_for_asid s (asid_of (asid_high_bits_of base) (ucast offset')) = None) \ + invalidate_asid_entry (base + offset) + \\_ s. vmid_for_asid s (asid_of (asid_high_bits_of base) (ucast offset')) = None\" + apply (rule hoare_assume_pre) + apply (rule hoare_chain, rule invalidate_asid_entry_vmid_for_asid_low[where asid_low="ucast offset'"]) + apply (clarsimp simp: asid_low_bits_of_add asid_high_bits_of_add mask_def) + apply (clarsimp simp: asid_high_bits_of_add mask_def) + done + +crunches invalidate_tlb_by_asid + for vmid_for_asid[wp]: "\s. P (vmid_for_asid s)" + and asid_pools_of[wp]: "\s. P (asid_pools_of s)" + and pool_for_asid[wp]: "\s. P (pool_for_asid asid s)" + +lemma invalidate_asid_entry_asid_pools_of: + "\\s. asid_table s (asid_high_bits_of asid) = Some pptr \ + (\ap entry. asid_pools_of s pptr = Some ap \ + ap (asid_low_bits_of asid) = Some entry \ + P (Some (ap(asid_low_bits_of asid \ ASIDPoolVSpace None (ap_vspace entry)))))\ + invalidate_asid_entry asid + \\rv s. P (asid_pools_of s pptr)\" + unfolding invalidate_asid_entry_def invalidate_asid_def invalidate_vmid_entry_def + by (wpsimp simp: pool_for_asid_def) + lemma delete_asid_pool_invs[wp]: "delete_asid_pool base pptr \invs\" unfolding delete_asid_pool_def supply fun_upd_apply[simp del] apply wpsimp - apply (strengthen invs_arm_asid_table_unmap) - apply (simp add: asid_low_bits_of_def asid_low_bits_def ucast_zero_is_aligned) - sorry (* FIXME AARCH64 - done *) + apply (strengthen invs_arm_asid_table_unmap) + apply (rename_tac table pool) + apply (rule_tac Q="\_ s. (invs s \ is_aligned base asid_low_bits \ table = asid_table s \ + (\ap. asid_pools_of s pptr = Some ap \ + (\asid_low. ap asid_low \ None \ pool asid_low \ None))) \ + (\x \ set [0 .e. mask asid_low_bits]. + vmid_for_asid s (asid_of (asid_high_bits_of base) (ucast x)) = None)" + in hoare_strengthen_post) + apply (rule mapM_set_inv) + apply (wpsimp wp: invalidate_asid_entry_vmid_for_asid) + apply (wp invalidate_asid_entry_asid_pools_of) + apply (wp invalidate_tlb_by_asid_invs hoare_vcg_all_lift) + apply (clarsimp simp: vmid_for_asid_def asid_low_bits_of_add fun_upd_apply + asid_high_bits_of_add mask_def) + apply (wpsimp wp: invalidate_asid_entry_vmid_for_asid_add hoare_vcg_const_imp_lift) + apply (fastforce simp: vmid_for_asid_def entry_for_pool_def obind_def opt_map_def + split: option.splits) + apply (wpsimp wp: invalidate_asid_entry_vmid_for_asid_add invalidate_asid_entry_asid_pools_of) + apply (clarsimp simp: vmid_for_asid_def entry_for_pool_def obind_def opt_map_def + split: option.splits) + apply (metis asid_low_bits_of_and_mask asid_low_bits_of_def asid_low_bits_of_mask_eq + asid_pool_entry.exhaust asid_pool_entry.sel(1) word_and_le1 word_ao_absorbs(8)) + apply wp+ + apply (clarsimp simp: asid_low_bits_of_def ucast_zero_is_aligned asid_low_bits_def) + done + +lemma get_vm_id_pool_for_asid[wp]: + "get_vmid asid' \\s. P (pool_for_asid asid s)\" + by (wp pool_for_asid_lift) -lemma do_machine_op_pool_for_asid[wp]: - "do_machine_op f \\s. P (pool_for_asid asid s)\" - by (wpsimp simp: pool_for_asid_def) - -lemma do_machine_op_vspace_for_asid[wp]: - "do_machine_op f \\s. P (vspace_for_asid asid s)\" - by (wpsimp simp: vspace_for_asid_def obind_def - wp: conjI hoare_vcg_all_lift hoare_vcg_imp_lift' - split: option.splits) - -lemma set_vm_root_pool_for_asid[wp]: - "set_vm_root pt \\s. P (pool_for_asid asid s)\" - sorry (* FIXME AARCH64 - by (wpsimp simp: set_vm_root_def wp: get_cap_wp) *) - -lemma set_vm_root_vspace_for_asid[wp]: - "set_vm_root pt \ \s. P (vspace_for_asid asid s) \" - sorry (* FIXME AARCH64 - by (wpsimp simp: set_vm_root_def wp: get_cap_wp) *) - -lemma clearExMonitor_invs[wp]: - "\invs\ do_machine_op (hwASIDFlush a) \\_. invs\" - sorry (* FIXME AARCH64 - by (wpsimp wp: dmo_invs - simp: hwASIDFlush_def machine_op_lift_def - machine_rest_lift_def in_monad select_f_def) *) +crunches set_vm_root + for pool_for_asid[wp]: "\s. P (pool_for_asid asid s)" + and vspace_for_asid[wp]: "\s. P (vspace_for_asid asid s)" + (simp: crunch_simps) lemma delete_asid_invs[wp]: - "\ invs and valid_asid_table and pspace_aligned \delete_asid asid pd \\_. invs\" + "\ invs and valid_asid_table and pspace_aligned \ delete_asid asid pd \\_. invs\" apply (simp add: delete_asid_def cong: option.case_cong) - apply (wpsimp wp: set_asid_pool_invs_unmap) - sorry (* FIXME AARCH64 + apply (wpsimp wp: set_asid_pool_invs_unmap invalidate_asid_entry_asid_pools_of hoare_vcg_ex_lift + invalidate_asid_entry_vmid_for_asid invalidate_tlb_by_asid_invs + hoare_vcg_imp_lift' + simp: pool_for_asid_def) apply blast - done *) + done lemma delete_asid_pool_unmapped[wp]: "\\s. True \ @@ -124,10 +170,14 @@ lemma set_asid_pool_unmap: set_asid_pool poolptr (pool(asid_low_bits_of asid := None)) \\rv s. vspace_for_asid asid s = None \" unfolding set_asid_pool_def - apply (wpsimp wp: set_object_wp) - sorry (* FIXME AARCH64 - by (simp add: pool_for_asid_def vspace_for_asid_def vspace_for_pool_def obind_def in_omonad - split: option.splits) *) + apply (wp set_object_wp) + by (simp add: pool_for_asid_def entry_for_asid_def entry_for_pool_def vspace_for_asid_def + vspace_for_pool_def obind_def in_omonad + split: option.splits) + +crunches invalidate_asid_entry + for pool_for_asid[wp]: "\s. P (pool_for_asid asid s)" + (simp: pool_for_asid_def) lemma delete_asid_unmapped: "\\s. vspace_for_asid asid s = Some pt\ @@ -135,21 +185,18 @@ lemma delete_asid_unmapped: \\_ s. vspace_for_asid asid s = None\" unfolding delete_asid_def apply (simp cong: option.case_cong) - sorry (* FIXME AARCH64 - apply (wpsimp wp: set_asid_pool_unmap) + apply (wpsimp wp: set_asid_pool_unmap | wp (once) hoare_drop_imps)+ apply (clarsimp simp: vspace_for_asid_def pool_for_asid_def vspace_for_pool_def - obind_def in_omonad obj_at_def + obind_def in_omonad entry_for_asid_def entry_for_pool_def split: option.splits) - done *) + by (meson asid_pool_entry.exhaust_sel) lemma set_pt_tcb_at: "\\s. P (ko_at (TCB tcb) t s)\ set_pt a b \\_ s. P (ko_at (TCB tcb) t s)\" by (wpsimp simp: set_pt_def obj_at_def wp: set_object_wp) lemma set_vcpu_tcb_at_arch: (* generalise? this holds except when the ko is a vcpu *) - "\\s. P (ko_at (TCB tcb) t s)\ - set_vcpu p v - \\_ s. P (ko_at (TCB tcb) t s)\" + "set_vcpu p v \\s. P (ko_at (TCB tcb) t s)\" by (wp set_vcpu_nonvcpu_at; auto) crunch tcb_at_arch: vcpu_switch "\s. P (ko_at (TCB tcb) t s)" @@ -162,9 +209,7 @@ crunch tcb_at_arch: unmap_page "\s. P (ko_at (TCB tcb) t s)" lemmas unmap_page_tcb_at = unmap_page_tcb_at_arch lemma unmap_page_tcb_cap_valid: - "\\s. tcb_cap_valid cap r s\ - unmap_page sz asid vaddr pptr - \\rv s. tcb_cap_valid cap r s\" + "unmap_page sz asid vaddr pptr \\s. tcb_cap_valid cap r s\" apply (rule tcb_cap_valid_typ_st) apply wp apply (simp add: pred_tcb_at_def2) @@ -313,15 +358,19 @@ lemma arch_thread_get_final_cap[wp]: apply auto done +crunches prepare_thread_delete + for caps_of_state[wp]: "\s. P (caps_of_state s)" + (wp: crunch_wps ignore: do_machine_op) + +declare prepare_thread_delete_caps_of_state [Finalise_AI_asms] + lemma dissociate_vcpu_tcb_final_cap[wp]: "\is_final_cap' cap\ dissociate_vcpu_tcb v t \\rv. is_final_cap' cap\" - sorry (* FIXME AARCH64 FPU/VCPU - by (wpsimp simp: is_final_cap'_def2 cte_wp_at_caps_of_state) *) + by (wpsimp simp: is_final_cap'_def2 cte_wp_at_caps_of_state) lemma prepare_thread_delete_final[wp]: "\is_final_cap' cap\ prepare_thread_delete t \ \rv. is_final_cap' cap\" - sorry (* FIXME AARCH64 FPU/VCPU - unfolding prepare_thread_delete_def by wp *) + unfolding prepare_thread_delete_def fpu_thread_delete_def by wpsimp lemma length_and_unat_of_bl_length: "(length xs = x \ unat (of_bl xs :: 'a::len word) < 2 ^ x) = (length xs = x)" @@ -380,8 +429,7 @@ lemma vcpu_set_tcb_at[wp]: "\\s. tcb_at p s\ set_vcpu t crunch tcb_at[wp]: dissociate_vcpu_tcb "\s. tcb_at p s" (wp: crunch_wps) -(* FIXME AARCH64 FPU/VCPU -crunch tcb_at[wp]: prepare_thread_delete "\s. tcb_at p s" *) +crunch tcb_at[wp]: prepare_thread_delete "\s. tcb_at p s" lemma (* finalise_cap_new_valid_cap *)[wp,Finalise_AI_asms]: "\valid_cap cap\ finalise_cap cap x \\rv. valid_cap (fst rv)\" @@ -392,11 +440,10 @@ lemma (* finalise_cap_new_valid_cap *)[wp,Finalise_AI_asms]: split del: if_split | clarsimp | rule conjI)+ (* ArchObjectCap *) - sorry (* FIXME AARCH64 FPU/VCPU apply (wpsimp wp: o_def valid_cap_def cap_aligned_def split_del: if_split | clarsimp simp: arch_finalise_cap_def)+ - done *) + done crunch inv[wp]: arch_thread_get "P" @@ -422,7 +469,7 @@ lemma arch_thread_set_cur_tcb[wp]: "\cur_tcb\ arch_thread_set p lemma cte_wp_at_update_some_tcb: "\kheap s v = Some (TCB tcb) ; tcb_cnode_map tcb = tcb_cnode_map (f tcb)\ - \ cte_wp_at P p (s\kheap := kheap s (v \ TCB (f tcb))\) = cte_wp_at P p s" + \ cte_wp_at P p (s\kheap := (kheap s)(v \ TCB (f tcb))\) = cte_wp_at P p s" apply (clarsimp simp: cte_wp_at_cases2 dest!: get_tcb_SomeD) done @@ -617,7 +664,7 @@ lemma arch_thread_set_valid_objs_vcpu_Some[wp]: lemma sym_refs_update_some_tcb: "\kheap s v = Some (TCB tcb) ; refs_of (TCB tcb) = refs_of (TCB (f tcb))\ - \ sym_refs (state_refs_of (s\kheap := kheap s (v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" + \ sym_refs (state_refs_of (s\kheap := (kheap s)(v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" apply (rule_tac f=sym_refs in arg_cong) apply (rule all_ext) apply (clarsimp simp: sym_refs_def state_refs_of_def) @@ -647,16 +694,8 @@ lemma arch_thread_get_tcb: lemma get_vcpu_ko: "\Q\ get_vcpu p \\rv s. ko_at (ArchObj (VCPU rv)) p s \ Q s\" unfolding get_vcpu_def - apply wpsimp - sorry (* FIXME AARCH64 - apply (rule hoare_allI) - apply (subst eq_commute) - apply (subst (2) eq_commute) - apply clarsimp - apply (rule hoare_drop_imp)+ - apply (subst conj_commute) - apply (wp get_object_sp[simplified pred_conj_def], simp) - done *) + by wpsimp + (simp add: obj_at_def in_omonad) lemma vcpu_invalidate_tcbs_inv[wp]: "\obj_at (\tcb. \t'. tcb = TCB t' \ P t') t\ @@ -666,7 +705,7 @@ lemma vcpu_invalidate_tcbs_inv[wp]: lemma sym_refs_vcpu_None: assumes sym_refs: "sym_refs (state_hyp_refs_of s)" assumes tcb: "ko_at (TCB tcb) t s" "tcb_vcpu (tcb_arch tcb) = Some vr" - shows "sym_refs (state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), + shows "sym_refs (state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), vr \ ArchObj (VCPU (vcpu_tcb_update Map.empty v)))\))" (is "sym_refs (state_hyp_refs_of ?s')") proof - @@ -678,10 +717,9 @@ proof - have "(t,HypTCBRef) \ state_hyp_refs_of s vr" using sym_refsD [of vr _ _ t, OF _ sym_refs] by auto hence vr: "state_hyp_refs_of s vr = {(t,HypTCBRef)}" - sorry (* FIXME AARCH64 hyp by (auto simp: state_hyp_refs_of_def hyp_refs_of_def tcb_vcpu_refs_def vcpu_tcb_refs_def - refs_of_def refs_of_a_def - split: option.splits kernel_object.splits arch_kernel_obj.splits) *) + refs_of_def refs_of_ao_def + split: option.splits kernel_object.splits arch_kernel_obj.splits) moreover from sym_refs vr have "\x r rt. \ (r, rt) \ state_hyp_refs_of s x; x \ t \ \ r \ vr" @@ -695,13 +733,13 @@ proof - using sym_refs unfolding sym_refs_def by (clarsimp simp: split_def) moreover have "state_hyp_refs_of ?s' = (state_hyp_refs_of s) (vr := {}, t := {})" - unfolding state_hyp_refs_of_def sorry (* FIXME AARCH64 hyp by (rule ext) simp *) + unfolding state_hyp_refs_of_def by (rule ext) (simp add: vcpu_tcb_refs_def) ultimately show ?thesis by simp qed lemma arch_thread_set_wp: - "\\s. get_tcb p s \ None \ Q (s\kheap := kheap s(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ + "\\s. get_tcb p s \ None \ Q (s\kheap := (kheap s)(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ arch_thread_set f p \\_. Q\" apply (simp add: arch_thread_set_def) @@ -740,9 +778,8 @@ lemma dissociate_vcpu_tcb_sym_refs_hyp[wp]: apply (clarsimp simp: get_tcb_Some_ko_at obj_at_def sym_refs_vcpu_None split: if_splits) done -(* FIXME AARCH64 VCPU crunch valid_objs[wp]: dissociate_vcpu_tcb "valid_objs" - (wp: crunch_wps simp: crunch_simps valid_obj_def valid_vcpu_def ignore: arch_thread_set) *) + (wp: crunch_wps simp: crunch_simps valid_obj_def valid_vcpu_def ignore: arch_thread_set) lemma set_vcpu_unlive_hyp[wp]: "\\s. vr \ t \ obj_at (Not \ hyp_live) t s\ @@ -761,9 +798,8 @@ lemma arch_thread_set_unlive_hyp[wp]: lemma as_user_unlive_hyp[wp]: "\obj_at (Not \ hyp_live) vr\ as_user t f \\_. obj_at (Not \ hyp_live) vr\" unfolding as_user_def - apply (wpsimp wp: set_object_wp) - sorry (* FIXME AARCH64 VCPU - by (clarsimp simp: obj_at_def hyp_live_def arch_tcb_context_set_def) *) + by (wpsimp wp: set_object_wp) + (clarsimp simp: obj_at_def hyp_live_def get_tcb_Some_ko_at arch_tcb_context_set_def) lemma dissociate_vcpu_tcb_unlive_hyp_vr[wp]: "\\\ dissociate_vcpu_tcb vr t \ \_. obj_at (Not \ hyp_live) vr\" @@ -825,10 +861,9 @@ lemma set_vcpu_if_live_then_nonz_cap_same_refs: apply (wpsimp wp: set_object_iflive[THEN hoare_set_object_weaken_pre] simp: a_type_def live_def hyp_live_def arch_live_def) apply (rule if_live_then_nonz_capD; simp) - sorry (* FIXME AARCH64 VCPU apply (clarsimp simp: live_def hyp_live_def arch_live_def, clarsimp simp: vcpu_tcb_refs_def split: option.splits) - done *) + done lemma vgic_update_if_live_then_nonz_cap[wp]: "\if_live_then_nonz_cap\ vgic_update vcpuptr f \\_. if_live_then_nonz_cap\" @@ -889,13 +924,10 @@ crunches dissociate_vcpu_tcb for cap_refs_respects_device_region[wp]: "cap_refs_respects_device_region" (wp: crunch_wps cap_refs_respects_device_region_dmo simp: crunch_simps read_cntpct_def maskInterrupt_def - (* FIXME AARCH64 these should not be needed *) - enableFpuEL01_def check_export_arch_timer_def isb_def dsb_def ignore: do_machine_op) -(* FIXME AARCH64 VCPU crunch pspace_respects_device_region[wp]: dissociate_vcpu_tcb "pspace_respects_device_region" - (wp: crunch_wps) *) + (wp: crunch_wps) crunch cap_refs_in_kernel_window[wp]: dissociate_vcpu_tcb "cap_refs_in_kernel_window" (wp: crunch_wps simp: crunch_simps) @@ -940,41 +972,38 @@ lemma dmo_maskInterrupt_True_valid_irq_states[wp]: apply (wpsimp simp: valid_irq_masks_def)+ done -(* FIXME AARCH64 crunches vcpu_save_reg, vgic_update, vcpu_disable for valid_irq_states[wp]: valid_irq_states and in_user_frame[wp]: "in_user_frame p" (wp: dmo_maskInterrupt_True_valid_irq_states dmo_valid_irq_states simp: isb_def setHCR_def setSCTLR_def set_gic_vcpu_ctrl_hcr_def getSCTLR_def get_gic_vcpu_ctrl_hcr_def dsb_def readVCPUHardwareReg_def writeVCPUHardwareReg_def - read_cntpct_def maskInterrupt_def check_export_arch_timer_def) *) + read_cntpct_def maskInterrupt_def check_export_arch_timer_def) lemma dmo_writeVCPUHardwareReg_valid_machine_state[wp]: "do_machine_op (writeVCPUHardwareReg r v) \valid_machine_state\" unfolding valid_machine_state_def by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift dmo_machine_state_lift) -(* FIXME AARCH64 crunches vgic_update, vcpu_update, vcpu_write_reg, vcpu_save_reg, save_virt_timer for in_user_frame[wp]: "in_user_frame p" and valid_machine_state[wp]: valid_machine_state and underlying_memory[wp]: "\s. P (underlying_memory (machine_state s))" (simp: readVCPUHardwareReg_def read_cntpct_def - wp: writeVCPUHardwareReg_underlying_memory_pred dmo_machine_state_lift - ignore: do_machine_op) *) + wp: writeVCPUHardwareReg_underlying_memory_inv dmo_machine_state_lift + ignore: do_machine_op) lemma vcpu_disable_valid_machine_state[wp]: "\valid_machine_state\ vcpu_disable vcpu_opt \\_. valid_machine_state\" unfolding vcpu_disable_def valid_machine_state_def - sorry (* FIXME AARCH64 broken crunches by (wpsimp wp: dmo_machine_state_lift hoare_vcg_all_lift hoare_vcg_disj_lift simp: isb_def setHCR_def setSCTLR_def set_gic_vcpu_ctrl_hcr_def getSCTLR_def - get_gic_vcpu_ctrl_hcr_def dsb_def writeVCPUHardwareReg_def maskInterrupt_def) *) + get_gic_vcpu_ctrl_hcr_def dsb_def writeVCPUHardwareReg_def maskInterrupt_def) lemma valid_arch_state_vcpu_update_str: "valid_arch_state s \ valid_arch_state (s\arch_state := arm_current_vcpu_update Map.empty (arch_state s)\)" - sorry (* FIXME AARCH64 VCPU - by (simp add: valid_arch_state_def) *) + unfolding valid_arch_state_def + by (clarsimp simp: cur_vcpu_def valid_global_arch_objs_def) lemma valid_global_refs_vcpu_update_str: "valid_global_refs s \ valid_global_refs (s\arch_state := arm_current_vcpu_update f (arch_state s)\)" @@ -983,20 +1012,18 @@ lemma valid_global_refs_vcpu_update_str: lemma set_vcpu_None_valid_arch[wp]: "\valid_arch_state and (\s. \a. arm_current_vcpu (arch_state s) \ Some (vr, a))\ set_vcpu vr (vcpu_tcb_update Map.empty v) \\_. valid_arch_state\" - apply (wp set_vcpu_wp) - apply (clarsimp simp: valid_arch_state_def) - sorry (* FIXME AARCH64 VCPU - apply (rule conjI) - apply (fastforce simp: valid_asid_table_def obj_at_def) - apply (clarsimp simp: obj_at_def split: option.splits) - done *) + supply fun_upd_apply[simp del] + apply (wpsimp wp: set_vcpu_wp) + apply (clarsimp simp: valid_arch_state_def valid_global_arch_objs_def pts_of_vcpu_None_upd_idem + asid_pools_of_vcpu_None_upd_idem vmid_inv_def pt_at_eq_set_vcpu) + apply (clarsimp simp add: cur_vcpu_def fun_upd_apply in_opt_pred split: option.splits) + done lemma dissociate_vcpu_valid_arch[wp]: "\valid_arch_state\ dissociate_vcpu_tcb vr t \\_. valid_arch_state\" unfolding dissociate_vcpu_tcb_def vcpu_invalidate_active_def arch_get_sanitise_register_info_def - sorry (* FIXME AARCH64 VCPU by (wpsimp wp: get_vcpu_wp arch_thread_get_wp - | strengthen valid_arch_state_vcpu_update_str | wp (once) hoare_drop_imps)+ *) + | strengthen valid_arch_state_vcpu_update_str | wp (once) hoare_drop_imps)+ lemma as_user_valid_irq_states[wp]: "\valid_irq_states\ as_user t f \\rv. valid_irq_states\" @@ -1015,13 +1042,12 @@ lemma dissociate_vcpu_tcb_invs[wp]: "\invs\ dissociate_vcpu_tcb apply (simp add: invs_def valid_state_def valid_pspace_def) apply (simp add: pred_conj_def) apply (rule hoare_vcg_conj_lift[rotated])+ - sorry (* FIXME AARCH64 VCPU apply (wpsimp wp: weak_if_wp get_vcpu_wp arch_thread_get_wp as_user_only_idle arch_thread_set_valid_idle | simp add: dissociate_vcpu_tcb_def vcpu_invalidate_active_def arch_get_sanitise_register_info_def | strengthen valid_arch_state_vcpu_update_str valid_global_refs_vcpu_update_str | simp add: vcpu_disable_def valid_global_vspace_mappings_def valid_global_objs_def | wp (once) hoare_drop_imps)+ - done *) + done crunch invs[wp]: vcpu_finalise invs (ignore: dissociate_vcpu_tcb) @@ -1035,8 +1061,7 @@ lemma arch_finalise_cap_invs' [wp,Finalise_AI_asms]: apply (wp unmap_page_invs | wpc)+ apply (clarsimp simp: valid_cap_def cap_aligned_def) apply (auto simp: mask_def vmsz_aligned_def wellformed_mapdata_def) - sorry (* FIXME AARCH64 FPU/VCPU - done *) + done lemma arch_thread_set_unlive_other: "\\s. vr \ t \ obj_at (Not \ live) vr s\ arch_thread_set (tcb_vcpu_update Map.empty) t \\_. obj_at (Not \ live) vr\" @@ -1084,80 +1109,81 @@ lemma arch_finalise_cap_vcpu: arch_finalise_cap cap x \\rv s. replaceable s sl (fst rv) (cap.ArchObjectCap cap)\" apply (simp add: arch_finalise_cap_def) - apply (rule hoare_pre) - sorry (* FIXME AARCH64 - apply (wp wps | simp add: simps reachable_pg_cap_def| wpc | strengthen strg)+ - done *) + apply (wpsimp wp: wps simp: simps reachable_frame_cap_def | strengthen strg)+ + done lemma obj_at_not_live_valid_arch_cap_strg [Finalise_AI_asms]: "(s \ ArchObjectCap cap \ aobj_ref cap = Some r \ \ typ_at (AArch AVCPU) r s) \ obj_at (\ko. \ live ko) r s" - sorry (* FIXME AARCH64 FPU/VCPU - by (clarsimp simp: live_def valid_cap_def obj_at_def a_type_arch_live valid_cap_simps - hyp_live_def arch_live_def - split: arch_cap.split_asm if_splits) *) + by (clarsimp simp: live_def valid_cap_def valid_arch_cap_ref_def obj_at_def a_type_arch_live + valid_cap_simps hyp_live_def arch_live_def + split: arch_cap.split_asm if_splits) lemma obj_at_not_live_valid_arch_cap_strg' [Finalise_AI_asms]: "(s \ ArchObjectCap cap \ aobj_ref cap = Some r \ cap \ VCPUCap r) \ obj_at (\ko. \ live ko) r s" - sorry (* FIXME AARCH64 FPU/VCPU - by (clarsimp simp: live_def valid_cap_def obj_at_def + by (clarsimp simp: live_def valid_cap_def valid_arch_cap_ref_def obj_at_def hyp_live_def arch_live_def - split: arch_cap.split_asm if_splits) *) - -lemma arch_finalise_cap_replaceable1: - notes strg = tcb_cap_valid_imp_NullCap - obj_at_not_live_valid_arch_cap_strg[where cap=cap] - notes simps = replaceable_def and_not_not_or_imp - (* FIXME AARCH64 vs_lookup_pages_eq_at[THEN fun_cong, symmetric] *) - (* FIXME AARCH64 vs_lookup_pages_eq_ap[THEN fun_cong, symmetric] *) - is_cap_simps vs_cap_ref_def - no_cap_to_obj_with_diff_ref_Null o_def - notes wps = hoare_drop_imp[where R="%_. is_final_cap' cap" for cap] - (* FIXME AARCH64 unmap_page_table_unmapped3 *) valid_cap_typ - assumes X: "\r. cap \ VCPUCap r" - shows - "\\s. s \ cap.ArchObjectCap cap \ - x = is_final_cap' (cap.ArchObjectCap cap) s \ - pspace_aligned s \ valid_vspace_objs s \ valid_objs s \ - valid_asid_table s\ - arch_finalise_cap cap x - \\rv s. replaceable s sl (fst rv) (cap.ArchObjectCap cap)\" - sorry (* FIXME AARCH64 - apply (simp add: arch_finalise_cap_def) - apply (rule hoare_pre) - apply (simp add: simps split: option.splits vmpage_size.splits) - apply (wp wps - | strengthen strg - | simp add: simps reachable_pg_cap_def live_def - | wpc)+ - (* unmap_page case is a bit unpleasant *) - apply (strengthen cases_conj_strg[where P="\ is_final_cap' cap s" for cap s, simplified]) - apply (rule hoare_post_imp, clarsimp split: vmpage_size.split, assumption) - apply (simp add: vspace_bits_defs) - apply (wp hoare_vcg_disj_lift hoare_vcg_all_lift hoare_vcg_const_imp_lift - unmap_page_tcb_cap_valid unmap_page_page_unmapped - unmap_page_section_unmapped)[1] - apply (wp wps - | strengthen strg imp_and_strg tcb_cap_valid_imp_NullCap - | simp add: simps is_master_reply_cap_def reachable_pg_cap_def - | wpc)+ - apply (intro conjI; clarsimp split: cap.splits arch_cap.splits vmpage_size.splits) - by (auto simp: valid_cap_def obj_at_def simps is_master_reply_cap_def - a_type_def data_at_def vspace_bits_defs X - elim!: tcb_cap_valid_imp_NullCap[rule_format, rotated] - split: cap.splits arch_cap.splits vmpage_size.splits) *) + split: arch_cap.split_asm if_splits) crunches set_vm_root for ptes_of[wp]: "\s. P (ptes_of s)" - (* FIXME AARCH64 and asid_pools_of[wp]: "\s. P (asid_pools_of s)" *) and asid_table[wp]: "\s. P (asid_table s)" (simp: crunch_simps) +lemma vs_lookup_table_lift_strong: + assumes "\P. f \\s. P (ptes_of s)\" + assumes "\P ap_ptr. f \\s. P (vspace_for_pool ap_ptr asid (asid_pools_of s))\" + assumes "\P. f \\s. P (asid_table s)\" + shows "f \\s. P (vs_lookup_table level asid vref s)\" + apply (simp add: vs_lookup_table_def obind_def split: option.splits) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_imp_lift' pool_for_asid_lift assms + simp: not_le) + done + +lemma vs_lookup_slot_lift_strong: + assumes "\P. f \\s. P (ptes_of s)\" + assumes "\P ap_ptr. f \\s. P (vspace_for_pool ap_ptr asid (asid_pools_of s))\" + assumes "\P. f \\s. P (asid_table s)\" + shows "f \\s. P (vs_lookup_slot level asid vref s)\" + apply (simp add: vs_lookup_slot_def obind_def split: option.splits) + apply (wpsimp wp: assms hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_imp_lift' pool_for_asid_lift + vs_lookup_table_lift_strong + simp: not_le) + done + +lemma vs_lookup_target_lift_strong: + assumes "\P. f \\s. P (ptes_of s)\" + assumes "\P ap_ptr. f \\s. P (vspace_for_pool ap_ptr asid (asid_pools_of s))\" + assumes "\P. f \\s. P (asid_table s)\" + shows "f \\s. P (vs_lookup_target level asid vref s)\" + apply (simp add: vs_lookup_target_def obind_def split: option.splits) + apply (wpsimp wp: assms hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_imp_lift' pool_for_asid_lift + vs_lookup_slot_lift_strong + simp: not_le) + done + +lemma update_asid_pool_entry_vspace_for_pool: + "\\s. (\entry. f entry \ None \ ap_vspace (the (f entry)) = ap_vspace entry) \ + P (vspace_for_pool ap_ptr asid (asid_pools_of s))\ + update_asid_pool_entry f asid' + \\_ s. P (vspace_for_pool ap_ptr asid (asid_pools_of s)) \" + unfolding update_asid_pool_entry_def + apply (wpsimp simp_del: fun_upd_apply) + apply (erule rsubst[where P=P]) + apply (simp add: vspace_for_pool_def entry_for_pool_def obind_def split: option.splits) + by (metis if_option_None_eq(2) option.sel) + +crunches get_vmid, set_vm_root + for vspace_for_pool[wp]: "\s. P (vspace_for_pool ap_ptr asid (asid_pools_of s))" + (simp: crunch_simps + wp: update_asid_pool_entry_vspace_for_pool + wp_del: update_asid_pool_entry_asid_pools + ignore: update_asid_pool_entry) + lemma set_vm_root_vs_lookup_target[wp]: "set_vm_root tcb \\s. P (vs_lookup_target level asid vref s)\" - sorry (* FIXME AARCH64 - by (wpsimp wp: vs_lookup_target_lift) *) + by (wp vs_lookup_target_lift_strong) lemma vs_lookup_target_no_asid_pool: "\asid_pool_at ptr s; valid_vspace_objs s; valid_asid_table s; pspace_aligned s; @@ -1168,21 +1194,28 @@ lemma vs_lookup_target_no_asid_pool: apply (frule (1) pool_for_asid_validD, clarsimp) apply (subst (asm) pool_for_asid_vs_lookup[symmetric, where vref=0 and level=asid_pool_level, simplified]) apply (drule (1) valid_vspace_objsD; simp add: in_omonad) - sorry (* FIXME AARCH64 - apply (fastforce simp: vspace_for_pool_def in_omonad obj_at_def ran_def) + apply (fastforce simp: vspace_for_pool_def in_omonad obj_at_def ran_def entry_for_pool_def) apply (rename_tac pt_ptr) apply (clarsimp simp: vs_lookup_slot_def obj_at_def split: if_split_asm) apply (clarsimp simp: in_omonad) apply (frule (1) vs_lookup_table_is_aligned; clarsimp?) apply (clarsimp simp: ptes_of_def) + apply (rename_tac pt) apply (drule (1) valid_vspace_objsD; simp add: in_omonad) - apply (simp add: is_aligned_mask) - apply (drule_tac x=0 in bspec) - apply (clarsimp simp: kernel_mapping_slots_def pptr_base_def pptrBase_def pt_bits_left_def - bit_simps level_defs canonical_bit_def) + apply (simp add: is_aligned_mask pt_range_def) + apply (erule_tac x=0 in allE) apply (clarsimp simp: pte_ref_def data_at_def obj_at_def split: pte.splits) - done *) + apply (simp add: pptr_from_pte_def) + done +lemma vs_lookup_target_clear_asid_strg: + "table = asid_table s \ + vs_lookup_target level asid 0 + (s\arch_state := (arch_state s) \arm_asid_table := + table (asid_high_bits_of asid := None)\\) + = None" + by (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def vs_lookup_table_def pool_for_asid_def + obind_def) lemma delete_asid_pool_not_target[wp]: "\asid_pool_at ptr and valid_vspace_objs and valid_asid_table and pspace_aligned\ @@ -1190,13 +1223,11 @@ lemma delete_asid_pool_not_target[wp]: \\rv s. vs_lookup_target level asid 0 s \ Some (level, ptr)\" unfolding delete_asid_pool_def supply fun_upd_apply[simp del] - apply wpsimp - sorry (* FIXME AARCH64 - apply (rule conjI; clarsimp) - apply (frule vs_lookup_target_no_asid_pool[of _ _ level asid]; assumption?) - apply (erule vs_lookup_target_clear_asid_table) + apply (wpsimp) + apply (strengthen vs_lookup_target_clear_asid_strg[THEN None_Some_strg]) + apply (wpsimp wp: mapM_wp' get_asid_pool_wp)+ apply (erule (4) vs_lookup_target_no_asid_pool) - done *) + done lemma delete_asid_pool_not_reachable[wp]: "\asid_pool_at ptr and valid_vspace_objs and valid_asid_table and pspace_aligned\ @@ -1209,22 +1240,20 @@ lemmas reachable_frame_cap_simps = lemma unmap_page_table_pool_for_asid[wp]: "unmap_page_table asid vref pt \\s. P (pool_for_asid asid s)\" - sorry (* FIXME AARCH64 - unfolding unmap_page_table_def by (wpsimp simp: pool_for_asid_def) *) + unfolding unmap_page_table_def by (wpsimp simp: pool_for_asid_def) lemma unmap_page_table_unreachable: - "\ pt_at pt_t pt and valid_asid_table and valid_vspace_objs and pspace_aligned + "\ normal_pt_at pt + and valid_asid_table and valid_vspace_objs and pspace_aligned and pspace_distinct and unique_table_refs and valid_vs_lookup and (\s. valid_caps (caps_of_state s) s) - and K (0 < asid \ vref \ user_region) - and (\s. vspace_for_asid asid s \ Some pt) \ + and K (0 < asid \ vref \ user_region) \ unmap_page_table asid vref pt \\_ s. \ reachable_target (asid, vref) pt s\" unfolding reachable_target_def apply (wpsimp wp: hoare_vcg_all_lift unmap_page_table_not_target) - sorry (* FIXME AARCH64 apply (drule (1) pool_for_asid_validD) apply (clarsimp simp: obj_at_def in_omonad) - done *) + done lemma unmap_page_unreachable: "\ data_at pgsz pptr and valid_asid_table and valid_vspace_objs @@ -1245,27 +1274,59 @@ lemma set_asid_pool_pool_for_asid[wp]: lemma delete_asid_pool_for_asid[wp]: "delete_asid asid pt \\s. P (pool_for_asid asid' s)\" - sorry (* FIXME AARCH64 - unfolding delete_asid_def by wpsimp *) + unfolding delete_asid_def by (wpsimp wp: hoare_drop_imps) -lemma delete_asid_no_vs_lookup_target: - "\\s. vspace_for_asid asid s = Some pt\ +lemma delete_asid_no_vs_lookup_target_vspace: + "\\s. vspace_for_asid asid s = Some pt \ delete_asid asid pt \\rv s. vs_lookup_target level asid vref s \ Some (level, pt)\" apply (rule hoare_assume_pre) apply (prop_tac "0 < asid") - apply (clarsimp simp: vspace_for_asid_def) - sorry (* FIXME AARCH64 + apply (clarsimp simp: vspace_for_asid_def entry_for_asid_def) apply (rule hoare_strengthen_post, rule delete_asid_unmapped) - apply (clarsimp simp: vs_lookup_target_def split: if_split_asm) - apply (clarsimp simp: vs_lookup_slot_def vs_lookup_table_def split: if_split_asm) - apply (clarsimp simp: vspace_for_asid_def obind_def) - apply (clarsimp simp: vs_lookup_slot_def vs_lookup_table_def split: if_split_asm) - apply (clarsimp simp: vspace_for_asid_def obind_def) - done *) + apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def vs_lookup_table_def + vspace_for_asid_def vspace_for_pool_def entry_for_asid_def obind_None_eq + split: if_split_asm) + done + +lemma delete_asid_no_vs_lookup_target_no_vspace: + "\\s. vspace_for_asid asid s \ Some pt \ 0 < asid \ vref \ user_region \ vspace_pt_at pt s \ + valid_vspace_objs s \ valid_asid_table s \ pspace_aligned s \ + delete_asid asid pt + \\rv s. vs_lookup_target level asid vref s \ Some (level, pt)\" + unfolding delete_asid_def + (* We know we are in the case where delete_asid does not do anything *) + apply (wpsimp wp: when_wp[where Q="\_. False", simplified]) + apply (rule conjI, fastforce simp: vs_lookup_target_def vs_lookup_slot_def vs_lookup_table_def) + (* pool_for_asid asid s \ None *) + apply clarsimp + apply (rename_tac ap pool) + apply (rule conjI; clarsimp) + apply (clarsimp simp: vspace_for_asid_def entry_for_asid_def entry_for_pool_def obind_def + split: option.splits if_split_asm) + apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_pool_for_asid split: if_split_asm) + (* asid_pool_level *) + apply (fastforce simp: vspace_for_asid_def entry_for_asid_def vspace_for_pool_def obind_def + split: option.splits) + apply (drule (5) valid_vspace_objs_strong_slotD) + apply (clarsimp simp: in_omonad) + apply (rename_tac pte) + apply (case_tac pte; clarsimp simp: obj_at_def data_at_def) + apply (simp add: pptr_from_pte_def) + done + +lemma delete_asid_no_vs_lookup_target: + "\\s. 0 < asid \ vref \ user_region \ vspace_pt_at pt s \ valid_vspace_objs s \ + valid_asid_table s \ pspace_aligned s \ + delete_asid asid pt + \\rv s. vs_lookup_target level asid vref s \ Some (level, pt)\" + by (rule hoare_pre_cases[where P="\_.True", simplified, + OF delete_asid_no_vs_lookup_target_vspace + delete_asid_no_vs_lookup_target_no_vspace]) lemma delete_asid_unreachable: - "\\s. vspace_for_asid asid s = Some pt \ pt_at VSRootPT_T pt s \ valid_asid_table s \ + "\\s. 0 < asid \ vref \ user_region \ vspace_pt_at pt s \ valid_vspace_objs s \ + valid_asid_table s \ pspace_aligned s \ delete_asid asid pt \\_ s. \ reachable_target (asid, vref) pt s\" unfolding reachable_target_def @@ -1281,83 +1342,37 @@ lemma arch_finalise_cap_replaceable: is_cap_simps vs_cap_ref_def no_cap_to_obj_with_diff_ref_Null o_def reachable_frame_cap_simps - (* FIXME AARCH64 notes wps = hoare_drop_imp[where R="%_. is_final_cap' cap" for cap] valid_cap_typ unmap_page_unreachable unmap_page_table_unreachable - delete_asid_unreachable *) + delete_asid_unreachable vcpu_finalise_unlive[simplified o_def] shows "\\s. s \ ArchObjectCap cap \ x = is_final_cap' (ArchObjectCap cap) s \ - pspace_aligned s \ valid_vspace_objs s \ valid_objs s \ valid_asid_table s \ - valid_arch_caps s\ + pspace_aligned s \ pspace_distinct s \ + valid_vspace_objs s \ valid_objs s \ valid_asid_table s \ valid_arch_caps s\ arch_finalise_cap cap x \\rv s. replaceable s sl (fst rv) (ArchObjectCap cap)\" - sorry (* FIXME AARCH64 apply (simp add: arch_finalise_cap_def valid_arch_caps_def) apply (wpsimp simp: simps valid_objs_caps wp: wps | strengthen strg)+ apply (rule conjI, clarsimp) - apply (clarsimp simp: valid_cap_def) + apply (in_case "ASIDPoolCap ?p ?asid") + apply (clarsimp simp: valid_cap_def obj_at_def) + apply (rule conjI, clarsimp) + apply (in_case "FrameCap ?p ?R ?sz ?dev ?m") + apply (fastforce simp: valid_cap_def wellformed_mapdata_def data_at_def obj_at_def + split: if_split_asm) + apply clarsimp + apply (in_case "PageTableCap ?p ?T ?m") apply (rule conjI; clarsimp) - apply (rule conjI; clarsimp simp: valid_cap_def wellformed_mapdata_def data_at_def split: if_split_asm) + apply (in_case "PageTableCap ?p VSRootPT_T ?m") + apply (rule conjI; clarsimp simp: valid_cap_def wellformed_mapdata_def data_at_def obj_at_def + split: if_split_asm) + apply (in_case "PageTableCap ?p NormalPT_T ?m") apply (rule conjI; clarsimp) - apply (clarsimp simp: valid_cap_def wellformed_mapdata_def cap_aligned_def) - done *) - -(* FIXME AARCH64 this is the ARM_HYP formulation of arch_finalise_cap_replaceable -lemma arch_finalise_cap_replaceable1: - notes strg = tcb_cap_valid_imp_NullCap - obj_at_not_live_valid_arch_cap_strg[where cap=cap] - notes simps = replaceable_def and_not_not_or_imp - vs_lookup_pages_eq_at[THEN fun_cong, symmetric] - vs_lookup_pages_eq_ap[THEN fun_cong, symmetric] - is_cap_simps vs_cap_ref_def - no_cap_to_obj_with_diff_ref_Null o_def - notes wps = hoare_drop_imp[where R="%_. is_final_cap' cap" for cap] - unmap_page_table_unmapped3 valid_cap_typ - assumes X: "\r. cap \ VCPUCap r" - shows - "\\s. s \ cap.ArchObjectCap cap \ - x = is_final_cap' (cap.ArchObjectCap cap) s \ - pspace_aligned s \ valid_vspace_objs s \ valid_objs s \ - valid_asid_table (arm_asid_table (arch_state s)) s\ - arch_finalise_cap cap x - \\rv s. replaceable s sl (fst rv) (cap.ArchObjectCap cap)\" - apply (simp add: arch_finalise_cap_def) - apply (rule hoare_pre) - apply (simp add: simps split: option.splits vmpage_size.splits) - apply (wp wps - | strengthen strg - | simp add: simps reachable_pg_cap_def live_def - | wpc)+ - (* unmap_page case is a bit unpleasant *) - apply (strengthen cases_conj_strg[where P="\ is_final_cap' cap s" for cap s, simplified]) - apply (rule hoare_post_imp, clarsimp split: vmpage_size.split, assumption) - apply (simp add: vspace_bits_defs) - apply (wp hoare_vcg_disj_lift hoare_vcg_all_lift hoare_vcg_const_imp_lift - unmap_page_tcb_cap_valid unmap_page_page_unmapped - unmap_page_section_unmapped)[1] - apply (wp wps - | strengthen strg imp_and_strg tcb_cap_valid_imp_NullCap - | simp add: simps is_master_reply_cap_def reachable_pg_cap_def - | wpc)+ - apply (intro conjI; clarsimp split: cap.splits arch_cap.splits vmpage_size.splits) - by (auto simp: valid_cap_def obj_at_def simps is_master_reply_cap_def - a_type_def data_at_def vspace_bits_defs X - elim!: tcb_cap_valid_imp_NullCap[rule_format, rotated] - split: cap.splits arch_cap.splits vmpage_size.splits) - -lemma arch_finalise_cap_replaceable: - shows - "\\s. s \ cap.ArchObjectCap cap \ - x = is_final_cap' (cap.ArchObjectCap cap) s \ - pspace_aligned s \ valid_vspace_objs s \ valid_objs s \ - valid_asid_table (arm_asid_table (arch_state s)) s\ - arch_finalise_cap cap x - \\rv s. replaceable s sl (fst rv) (cap.ArchObjectCap cap)\" - by (cases cap; simp add: arch_finalise_cap_vcpu arch_finalise_cap_replaceable1) -*) - + apply (clarsimp simp: valid_cap_def obj_at_def) + apply (clarsimp simp: valid_cap_def wellformed_mapdata_def cap_aligned_def obj_at_def) + done global_naming Arch lemma (* deleting_irq_handler_slot_not_irq_node *)[Finalise_AI_asms]: @@ -1417,43 +1432,42 @@ lemma dissociate_vcpu_tcb_no_cap_to_obj_ref[wp]: "\no_cap_to_obj_with_diff_ref cap S\ dissociate_vcpu_tcb v t \\rv. no_cap_to_obj_with_diff_ref cap S\" - sorry (* FIXME AARCH64 VCPU - by (wpsimp simp: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state) *) + by (wpsimp simp: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state) lemma prepare_thread_delete_no_cap_to_obj_ref[wp]: "\no_cap_to_obj_with_diff_ref cap S\ prepare_thread_delete t \\rv. no_cap_to_obj_with_diff_ref cap S\" - sorry (* FIXME AARCH64 VCPU/FPU - unfolding prepare_thread_delete_def by wpsimp *) + unfolding prepare_thread_delete_def + by (wpsimp simp: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state) lemma prepare_thread_delete_unlive_hyp: "\obj_at \ ptr\ prepare_thread_delete ptr \\rv. obj_at (Not \ hyp_live) ptr\" - apply (simp add: prepare_thread_delete_def) - apply (wpsimp wp: hoare_vcg_imp_lift arch_thread_get_wp) + apply (simp add: prepare_thread_delete_def fpu_thread_delete_def) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_all_lift arch_thread_get_wp) apply (clarsimp simp: obj_at_def is_tcb_def hyp_live_def) - sorry (* FIXME AARCH64 VCPU - done *) + done lemma prepare_thread_delete_unlive0: "\obj_at (Not \ live0) ptr\ prepare_thread_delete ptr \\rv. obj_at (Not \ live0) ptr\" - sorry (* FIXME AARCH64 VCPU/FPU - by (simp add: prepare_thread_delete_def) *) + apply (simp add: prepare_thread_delete_def set_thread_state_def set_object_def fpu_thread_delete_def) + apply (wpsimp wp: dissociate_vcpu_tcb_unlive0 simp: obj_at_exst_update comp_def) + done lemma prepare_thread_delete_unlive[wp]: "\obj_at (Not \ live0) ptr\ prepare_thread_delete ptr \\rv. obj_at (Not \ live) ptr\" apply (rule_tac Q="\rv. obj_at (Not \ live0) ptr and obj_at (Not \ hyp_live) ptr" in hoare_strengthen_post) - sorry (* FIXME AARCH64 VCPU/FPU apply (wpsimp wp: hoare_vcg_conj_lift prepare_thread_delete_unlive_hyp prepare_thread_delete_unlive0) apply (clarsimp simp: obj_at_def) apply (clarsimp simp: obj_at_def, case_tac ko, simp_all add: is_tcb_def live_def) - done *) + done lemma finalise_cap_replaceable [Finalise_AI_asms]: "\\s. s \ cap \ x = is_final_cap' cap s \ valid_mdb s \ cte_wp_at ((=) cap) sl s \ valid_objs s \ sym_refs (state_refs_of s) \ (cap_irqs cap \ {} \ if_unsafe_then_cap s \ valid_global_refs s) \ (is_arch_cap cap \ pspace_aligned s \ + pspace_distinct s \ valid_vspace_objs s \ valid_arch_state s \ valid_arch_caps s)\ @@ -1532,8 +1546,8 @@ end interpretation Finalise_AI_1?: Finalise_AI_1 proof goal_cases interpret Arch . - case 1 show ?case sorry (* FIXME AARCH64 - by (intro_locales; (unfold_locales; fact Finalise_AI_asms)?) *) + case 1 show ?case + by (intro_locales; (unfold_locales; fact Finalise_AI_asms)?) qed context Arch begin global_naming AARCH64 @@ -1584,7 +1598,7 @@ crunches (wp: crunch_wps subset_refl) crunch irq_node[Finalise_AI_asms,wp]: prepare_thread_delete "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_node[wp]: arch_finalise_cap "\s. P (interrupt_irq_node s)" (simp: crunch_simps wp: crunch_wps) @@ -1597,10 +1611,6 @@ crunch pred_tcb_at[wp]: crunch pred_tcb_at[wp_unsafe]: arch_finalise_cap "pred_tcb_at proj P t" (simp: crunch_simps wp: crunch_wps) -(* FIXME AARCH64 -crunch empty[wp]: find_free_hw_asid, store_hw_asid, load_hw_asid, set_vm_root_for_flush, page_table_mapped, invalidate_tlb_by_asid - "\s. P (obj_at (empty_table {}) word s)" *) - lemma set_vcpu_empty[wp]: "\\s. P (obj_at (empty_table {}) word s)\ set_vcpu p v \\_ s. P (obj_at (empty_table {}) word s)\" apply (rule set_vcpu.vsobj_at) @@ -1669,31 +1679,25 @@ lemma is_arch_update_reset_page: apply (simp add: is_arch_update_def is_arch_cap_def cap_master_cap_def) done -crunch caps_of_state [wp]: vcpu_finalise "\s. P (caps_of_state s)" - (wp: crunch_wps) - -crunch caps_of_state [wp]: arch_finalise_cap "\s. P (caps_of_state s)" - (wp: crunch_wps simp: crunch_simps) - -lemma set_vm_root_empty[wp]: - "\\s. P (obj_at (empty_table S) p s)\ set_vm_root v \\_ s. P (obj_at (empty_table S) p s) \" - apply (simp add: set_vm_root_def) - sorry (* FIXME AARCH64 - apply wpsimp+ - apply (clarsimp simp: if_apply_def2) - apply (wpsimp+ | rule hoare_conjI[rotated] hoare_drop_imp hoare_allI)+ - done *) +crunches vcpu_finalise, arch_finalise_cap + for caps_of_state [wp]: "\s. P (caps_of_state s)" + (wp: crunch_wps simp: crunch_simps) lemma set_asid_pool_empty[wp]: - "\obj_at (empty_table S) word\ set_asid_pool x2 pool' \\xb. obj_at (empty_table S) word\" - by (wpsimp wp: set_object_wp simp: set_asid_pool_def obj_at_def in_omonad empty_table_def) + "set_asid_pool p ap \\s. P (obj_at (empty_table S) p' s)\" + unfolding set_asid_pool_def + apply (wpsimp wp: set_object_wp) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: obj_at_def in_omonad empty_table_def) + done -lemma delete_asid_empty_table_pt[wp]: - "delete_asid a word \\s. obj_at (empty_table S) word s\" - apply (simp add: delete_asid_def) - apply wpsimp - sorry (* FIXME AARCH64 - done *) +crunches set_global_user_vspace, arm_context_switch + for empty[wp]: "\s. P (obj_at (empty_table S) p s)" + +lemma set_vm_root_empty[wp]: + "set_vm_root v \\s. P (obj_at (empty_table S) p s) \" + unfolding set_vm_root_def + by (wpsimp wp: get_cap_wp) lemma ucast_less_shiftl_helper3: "\ len_of TYPE('b) + 3 < len_of TYPE('a); 2 ^ (len_of TYPE('b) + 3) \ n\ @@ -1739,7 +1743,7 @@ lemma replaceable_or_arch_update_pg: global_naming Arch crunch invs[wp]: prepare_thread_delete invs - (ignore: set_object) + (ignore: set_object do_machine_op wp: dmo_invs_lift) lemma (* finalise_cap_invs *)[Finalise_AI_asms]: shows "\invs and cte_wp_at ((=) cap) slot\ finalise_cap cap x \\rv. invs\" @@ -1748,7 +1752,6 @@ lemma (* finalise_cap_invs *)[Finalise_AI_asms]: unbind_maybe_notification_invs | simp add: o_def split del: if_split cong: if_cong | wpc )+ - sorry (* FIXME AARCH64 FPU/VCPU apply clarsimp (* thread *) apply (frule cte_wp_at_valid_objs_valid_cap, clarsimp) apply (clarsimp simp: valid_cap_def) @@ -1757,14 +1760,11 @@ lemma (* finalise_cap_invs *)[Finalise_AI_asms]: apply (simp add: cap_range_def) apply (wp deleting_irq_handler_invs | simp | intro conjI impI)+ apply (auto dest: cte_wp_at_valid_objs_valid_cap) - done *) + done lemma (* finalise_cap_irq_node *)[Finalise_AI_asms]: "\\s. P (interrupt_irq_node s)\ finalise_cap a b \\_ s. P (interrupt_irq_node s)\" - apply (case_tac a,simp_all) - apply (wp | clarsimp)+ - sorry (* FIXME AARCH64 FPU/VCPU - done *) + by (case_tac a, wpsimp+) lemmas (*arch_finalise_cte_irq_node *) [wp,Finalise_AI_asms] = hoare_use_eq_irq_node [OF arch_finalise_cap_irq_node arch_finalise_cap_cte_wp_at] @@ -1830,7 +1830,7 @@ lemma (* replace_cap_invs_arch_update *)[Finalise_AI_asms]: lemma dmo_pred_tcb_at[wp]: "do_machine_op mop \\s. P (pred_tcb_at f Q t s)\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) + apply wp apply (clarsimp simp: pred_tcb_at_def obj_at_def) done @@ -1873,8 +1873,8 @@ interpretation Finalise_AI_3?: Finalise_AI_3 where replaceable_or_arch_update = replaceable_or_arch_update proof goal_cases interpret Arch . - case 1 show ?case sorry (* FIXME AARCH64 - by (intro_locales; (unfold_locales; fact Finalise_AI_asms)?) *) + case 1 show ?case + by (intro_locales; (unfold_locales; fact Finalise_AI_asms)?) qed context Arch begin global_naming AARCH64 @@ -1908,7 +1908,7 @@ lemma set_asid_pool_obj_at_ptr: locale_abbrev "asid_table_update asid ap s \ - s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\" + s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\" lemma valid_table_caps_table [simp]: "valid_table_caps (s\arch_state := arch_state s\arm_asid_table := table'\\) = valid_table_caps s" @@ -1928,8 +1928,7 @@ lemmas delete_asid_typ_ats[wp] = abs_typ_at_lifts [OF delete_asid_typ_at] lemma arch_finalise_cap_valid_cap[wp]: "arch_finalise_cap cap b \valid_cap c\" unfolding arch_finalise_cap_def - sorry (* FIXME AARCH64 VCPU - by (wpsimp split: arch_cap.split option.split bool.split) *) + by (wpsimp split: arch_cap.split option.split bool.split) global_naming Arch diff --git a/proof/invariant-abstract/AARCH64/ArchInterrupt_AI.thy b/proof/invariant-abstract/AARCH64/ArchInterrupt_AI.thy index 383e43b90b..9c28a6395a 100644 --- a/proof/invariant-abstract/AARCH64/ArchInterrupt_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchInterrupt_AI.thy @@ -19,7 +19,7 @@ primrec arch_irq_control_inv_valid_real :: cte_wp_at ((=) cap.IRQControlCap) src_slot and ex_cte_cap_wp_to is_cnode_cap dest_slot and real_cte_at dest_slot and - K (irq \ maxIRQ))" (*FIXME AARCH64 check this is gone: \ irq \ irqInvalid))" *) + K (irq \ maxIRQ))" defs arch_irq_control_inv_valid_def: "arch_irq_control_inv_valid \ arch_irq_control_inv_valid_real" @@ -49,12 +49,7 @@ lemma decode_irq_control_valid [Interrupt_AI_asms]: apply (clarsimp simp: linorder_not_less word_le_nat_alt unat_ucast maxIRQ_def) apply (cases caps; clarsimp simp: cte_wp_at_eq_simp) apply (intro conjI impI; clarsimp) - done (* FIXME AARCH64 this felt too easy, old proof went: - apply (drule ucast_ucast_mask_eq) - apply (subst and_mask_eq_iff_le_mask) - apply (simp add: mask_def word_le_nat_alt) - apply fast - done *) + done lemma get_irq_slot_different_ARCH[Interrupt_AI_asms]: "\\s. valid_global_refs s \ ex_cte_cap_wp_to is_cnode_cap ptr s\ @@ -104,12 +99,11 @@ lemma no_cap_to_obj_with_diff_IRQHandler_ARCH[Interrupt_AI_asms]: lemma (* set_irq_state_valid_cap *)[Interrupt_AI_asms]: "\valid_cap cap\ set_irq_state IRQSignal irq \\rv. valid_cap cap\" apply (clarsimp simp: set_irq_state_def) - sorry (* FIXME AARCH64 missing crunch apply (wp do_machine_op_valid_cap) apply (auto simp: valid_cap_def valid_untyped_def split: cap.splits option.splits arch_cap.splits split del: if_split) - done *) + done crunch valid_global_refs[Interrupt_AI_asms]: set_irq_state "valid_global_refs" @@ -143,7 +137,7 @@ lemma invoke_irq_handler_invs'[Interrupt_AI_asms]: done show ?thesis apply (cases i, simp_all) - apply (wp dmo_plic_complete_claim) + apply (wp dmo_plic_complete_claim maskInterrupt_invs) apply simp+ apply (rename_tac irq cap prod) apply (rule hoare_pre) diff --git a/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy b/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy index 415c9da7fe..e42dd1b32e 100644 --- a/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy @@ -6,13 +6,9 @@ *) theory ArchInvariants_AI -imports InvariantsPre_AI "Lib.Apply_Trace_Cmd" +imports InvariantsPre_AI "Eisbach_Tools.Apply_Trace_Cmd" begin -(* setup *) - -declare opt_mapE[rule del] - context Arch begin global_naming AARCH64 (* compatibility with other architectures, input only *) @@ -591,12 +587,15 @@ definition pspace_in_kernel_window :: "'z::state_ext state \ bool" w definition vspace_at_asid :: "asid \ obj_ref \ 'z::state_ext state \ bool" where "vspace_at_asid asid pt \ \s. vspace_for_asid asid s = Some pt" +definition kernel_window_range :: "obj_ref set" where + "kernel_window_range \ {pptr_base ..< pptrTop}" + definition valid_uses_2 :: "arm_vspace_region_uses \ bool" where "valid_uses_2 uses \ \p. (\canonical_address p \ uses p = ArmVSpaceInvalidRegion) - \ (p \ {pptr_base ..< pptrTop} + \ (p \ kernel_window_range \ uses p \ {ArmVSpaceKernelWindow, ArmVSpaceInvalidRegion}) - \ (uses p = ArmVSpaceKernelWindow \ p \ {pptr_base ..< pptrTop}) + \ (uses p = ArmVSpaceKernelWindow \ p \ kernel_window_range) \ \The kernel device window doesn't occupy the entire region above kdev_base\ \ (kdev_base \ p \ uses p \ {ArmVSpaceDeviceWindow, ArmVSpaceInvalidRegion}) \ \No user window in hyp kernel address space\ @@ -625,16 +624,40 @@ lemmas vmid_for_asid_def = vmid_for_asid_2_def abbreviation (input) asid_map :: "'z::state_ext state \ asid \ vmid" where "asid_map \ vmid_for_asid" +locale_abbrev + "vmid_table s \ arm_vmid_table (arch_state s)" + (* vmIDs stored in ASID pools form the inverse of the vmid_table *) definition vmid_inv :: "'z::state_ext state \ bool" where - "vmid_inv s \ is_inv (arm_vmid_table (arch_state s)) (vmid_for_asid s)" + "vmid_inv s \ is_inv (vmid_table s) (vmid_for_asid s)" + +(* The vmID table never stores ASID 0 *) +definition valid_vmid_table_2 :: "(vmid \ asid) \ bool" where + "valid_vmid_table_2 table \ \vmid. table vmid \ Some 0" + +locale_abbrev valid_vmid_table :: "'z::state_ext state \ bool" where + "valid_vmid_table s \ valid_vmid_table_2 (vmid_table s)" + +lemmas valid_vmid_table_def = valid_vmid_table_2_def definition valid_global_arch_objs where "valid_global_arch_objs \ \s. vspace_pt_at (global_pt s) s" +(* global_pt is the empty default user-level vspace, corresponding to armKSGlobalUserVSpace in C. + On HYP platforms, the kernel has its own separate page table. + We need to know that the user-level table is empty so we can derive that user-level lookups + fail when there is no other vspace set. *) +definition valid_global_tables_2 :: "(obj_ref \ pt option) \ obj_ref \ bool" where + "valid_global_tables_2 \ \pts global. pts global = Some (empty_pt VSRootPT_T)" + +locale_abbrev valid_global_tables :: "'z::state_ext state \ bool" where + "valid_global_tables \ \s. valid_global_tables_2 (pts_of s) (global_pt s)" + +lemmas valid_global_tables_def = valid_global_tables_2_def + definition valid_arch_state :: "'z::state_ext state \ bool" where - "valid_arch_state \ valid_asid_table and valid_uses and vmid_inv and cur_vcpu and - valid_global_arch_objs" + "valid_arch_state \ valid_asid_table and valid_uses and vmid_inv and valid_vmid_table and + cur_vcpu and valid_global_arch_objs and valid_global_tables" (* ---------------------------------------------------------------------------------------------- *) @@ -684,16 +707,16 @@ definition hyp_refs_of :: "kernel_object \ (obj_ref \ reftype | Notification ntfn \ {} | ArchObj ao \ refs_of_ao ao" -lemmas hyp_refs_of_simps[simp] = hyp_refs_of_def[split_simps arch_kernel_obj.split] +lemmas hyp_refs_of_simps[simp] = hyp_refs_of_def[split_simps kernel_object.split] definition state_hyp_refs_of :: "'z::state_ext state \ obj_ref \ (obj_ref \ reftype) set" where "state_hyp_refs_of \ \s p. case_option {} (hyp_refs_of) (kheap s p)" -(* covered by ASIDPool case of valid_vspace_obj, inv_vmid, and definition of - vspace_for_asid (asid 0 never mapped) *) +(* Mostly covered by ASIDPool case of valid_vspace_obj and vmid_inv, but we still need to make sure + that ASID 0 is never mapped. *) definition valid_asid_map :: "'z::state_ext state \ bool" where - "valid_asid_map \ \" + "valid_asid_map \ \s. entry_for_asid 0 s = None" definition valid_global_objs :: "'z::state_ext state \ bool" where "valid_global_objs \ \" @@ -763,7 +786,7 @@ lemmas simple_bit_simps = lemmas table_bits_simps = pt_bits_def[simplified] pte_bits_def[unfolded word_size_bits_def] vs_index_bits_def -named_theorems bit_simps (* FIXME AARCH64: shadows Word_Lib bit_simps *) +named_theorems bit_simps lemmas [bit_simps] = table_bits_simps simple_bit_simps ipa_size_def valid_vs_slot_bits_def @@ -783,6 +806,14 @@ lemma vcpuBits_bounded[simp,intro!]: "vcpuBits < word_bits" including machine_bit_simps by (simp add: word_bits_def) +lemma ptTranslationBits_le_machine_word[simplified, simp]: + "ptTranslationBits pt_t < LENGTH(machine_word_len)" + by (simp add: bit_simps) + +lemma pte_bits_leq_table_size[simp]: + "pte_bits \ table_size pt_t" + by (simp add: table_size_def) + (* with asid_pool_level normalised to -1, max_pt_level otherwise becomes -2 *) lemma max_pt_level_def2: "max_pt_level = (if config_ARM_PA_SIZE_BITS_40 then 2 else 3)" by (simp add: max_pt_level_def asid_pool_level_def Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) @@ -796,12 +827,12 @@ lemma max_pt_level_gt0[simp]: lemma max_pt_level_enum: "level \ max_pt_level \ if config_ARM_PA_SIZE_BITS_40 then level \ {0,1,2} else level \ {0,1,2,3}" unfolding level_defs Kernel_Config.config_ARM_PA_SIZE_BITS_40_def - by (cases level rule: vm_level_of_nat_cases) (case_tac m; simp; rename_tac m)+ + by (cases level rule: vm_level.of_nat_cases) (case_tac m; simp; rename_tac m)+ lemma max_page_level_enum: "level \ max_page_level \ level \ {0,1,2}" unfolding level_defs - by (cases level rule: vm_level_of_nat_cases) (case_tac m; simp; rename_tac m)+ + by (cases level rule: vm_level.of_nat_cases) (case_tac m; simp; rename_tac m)+ lemma asid_pool_level_size: "size asid_pool_level = (if config_ARM_PA_SIZE_BITS_40 then 3 else 4)" @@ -819,7 +850,7 @@ lemma asid_pool_level_not_0[simp]: lemma vm_level_not_less_zero: fixes level :: vm_level shows "level \ 0 \ level > 0" - using vm_level_not_less_zero_bit0 neqE by blast + using vm_level.not_less_zero_bit0 neqE by blast lemma asid_pool_level_neq[simp]: "(x \ asid_pool_level) = (x \ max_pt_level)" @@ -828,7 +859,7 @@ proof hence "x < asid_pool_level" unfolding asid_pool_level_def by simp thus "x \ max_pt_level" - by (simp add: max_pt_level_def vm_level_leq_minus1_less) + by (simp add: max_pt_level_def vm_level.leq_minus1_less) next note maxBound_minus_one_bit[simp del] assume "x \ max_pt_level" @@ -872,7 +903,9 @@ lemma max_pt_level_less_conv[iff]: lemma max_pt_level_not_asid_pool_level[simp]: "max_pt_level \ asid_pool_level" + "asid_pool_level \ max_pt_level" by (simp add: asid_pool_level_def) + (simp add: level_defs) lemma asid_pool_level_minus: "asid_pool_level = -1" @@ -884,7 +917,7 @@ lemma max_pt_level_plus_one: lemma max_pt_level_less_Suc[iff]: "(level < level + 1) = (level \ max_pt_level)" - apply (simp add: vm_level_no_overflow_eq_max_bound max_pt_level_def flip: asid_pool_level_minus) + apply (simp add: vm_level.no_overflow_eq_max_bound max_pt_level_def flip: asid_pool_level_minus) by (metis asid_pool_level_max asid_pool_level_neq max_pt_level_def antisym_conv2) lemma size_level1[simp]: @@ -892,13 +925,15 @@ lemma size_level1[simp]: proof assume "size level = Suc 0" hence "size level = size (1::vm_level)" by simp - thus "level = 1" by (subst (asm) vm_level_size_inj) + thus "level = 1" by (subst (asm) vm_level.size_inj) qed auto lemma minus_one_max_pt_level[simp]: "(level - 1 = max_pt_level) = (level = asid_pool_level)" by (simp add: max_pt_level_def) +lemmas max_pt_level_eq_minus_one = minus_one_max_pt_level[THEN iffD1] + lemma plus_one_eq_asid_pool: "(level + 1 = asid_pool_level) = (level = max_pt_level)" by (metis add_right_imp_eq max_pt_level_plus_one) @@ -909,7 +944,7 @@ lemma max_inc_pt_level[simp]: lemma vm_level_le_plus_1_mono: "\level' \ level; level \ max_pt_level \ \ level' + 1 \ level + 1" - by (simp add: vm_level_plus_one_leq le_less_trans) + by (simp add: vm_level.plus_one_leq le_less_trans) lemma vm_level_less_plus_1_mono: "\ level' < level; level \ max_pt_level \ \ level' + 1 < level + 1" @@ -917,7 +952,7 @@ lemma vm_level_less_plus_1_mono: lemma level_minus_one_max_pt_level[iff]: "(level - 1 \ max_pt_level) = (0 < level)" - by (metis max_pt_level_less_Suc vm_level_not_less_zero_bit0 vm_level_pred diff_add_cancel + by (metis max_pt_level_less_Suc vm_level.not_less_zero_bit0 vm_level.pred diff_add_cancel not_less_iff_gr_or_eq) (* Sometimes you need type nat directly in the goal, not vm_level *) @@ -1293,6 +1328,14 @@ lemma level_type_eq[simp]: "(level_type level = VSRootPT_T) = (level = max_pt_level)" by (simp add: level_type_def)+ +lemma level_type_less_max_pt_level: + "level < max_pt_level \ level_type level = NormalPT_T" + by (clarsimp simp: level_type_def) + +lemma ptTranslationBits_NormalPT_T_leq: + "ptTranslationBits NormalPT_T \ ptTranslationBits VSRootPT_T" + by (simp add: bit_simps) + lemma valid_vspace_obj_default'[simp]: "\ ao_type = VSpaceObj \ level = max_pt_level; ao_type = PageTableObj \ level \ max_pt_level \ \ @@ -1334,7 +1377,6 @@ lemma hyp_sym_refs_obj_atD: "\ obj_at P p s; sym_refs (state_hyp_refs_of s) \ \ \ko. P ko \ state_hyp_refs_of s p = hyp_refs_of ko \ (\(x, tp)\hyp_refs_of ko. obj_at (\ko. (p, symreftype tp) \ hyp_refs_of ko) x s)" - supply hyp_refs_of_simps[simp del] apply (drule obj_at_state_hyp_refs_ofD) apply (erule exEI, clarsimp) apply (drule sym, simp) @@ -1475,9 +1517,25 @@ lemma canonical_user_ge0[intro!,simp]: "0 < canonical_user" by (simp add: canonical_user_def mask_def ipa_size_def) -lemma pptr_base_kernel_elf_base: - "pptr_base < kernel_elf_base" - by (simp add: pptr_base_def pptrBase_def kernel_elf_base_def kernelELFBase_def) +lemma pptrTop_le_ipa_size: + "pptrTop \ mask ipa_size" + by (simp add: bit_simps pptrTop_def mask_def) + +lemma below_pptrTop_ipa_size: + "p < pptrTop \ p \ mask ipa_size" + using pptrTop_le_ipa_size + by simp + +lemma addrFromPPtr_mask_ipa: + "\ pptr_base \ pt_ptr; pt_ptr < pptrTop \ + \ addrFromPPtr pt_ptr && mask ipa_size = addrFromPPtr pt_ptr" + using pptrTop_le_ipa_size + by (simp add: and_mask_eq_iff_le_mask addrFromPPtr_def pptr_base_def pptrBaseOffset_def + paddrBase_def word_le_imp_diff_le) + +lemma pageBits_less_ipa_size[simp]: + "pageBits < ipa_size" + by (simp add: bit_simps) lemmas window_defs = kernel_window_def not_kernel_window_def kernel_regions_def @@ -1486,7 +1544,24 @@ lemmas window_defs = lemma valid_uses_kernel_window: "\ valid_uses s; p \ kernel_window s \ \ p \ {pptr_base ..< pptrTop} \ canonical_address p" unfolding valid_uses_def window_defs - by (erule_tac x=p in allE) auto + by (erule_tac x=p in allE) (auto simp: kernel_window_range_def) + +lemma kernel_window_bounded: + "\ p \ kernel_window s; valid_uses s \ \ p \ kernel_window_range" + by (fastforce dest: valid_uses_kernel_window simp: kernel_window_range_def) + +lemma pspace_in_kw_bounded: + "\ kheap s p = Some ko; pspace_in_kernel_window s; valid_uses s; pspace_aligned s \ \ + p \ kernel_window_range" + unfolding pspace_aligned_def + apply (drule bspec, fastforce) + apply (simp add: pspace_in_kernel_window_def) + apply (erule allE, erule allE, erule (1) impE) + apply (prop_tac "p \ kernel_window s") + apply (erule set_mp) + apply (clarsimp simp: is_aligned_no_overflow) + apply (fastforce dest: kernel_window_bounded) + done lemma pt_walk_max_level: "pt_walk top_level bot_level pt_ptr vptr ptes = Some (level, p) @@ -1498,8 +1573,8 @@ lemma pt_walk_max_level: apply (clarsimp simp: in_omonad split: if_split_asm) apply (erule disjE; clarsimp) apply (drule meta_spec, drule (1) meta_mp) - apply (drule vm_level_zero_least) - using vm_level_pred less_trans not_less by blast + apply (drule vm_level.zero_least) + using vm_level.pred less_trans not_less by blast lemma pt_walk_min_level: "pt_walk top_level bot_level pt_ptr vptr ptes = Some (level, p) @@ -1511,7 +1586,7 @@ lemma pt_walk_min_level: apply (clarsimp simp: in_omonad split: if_split_asm) apply (erule disjE; clarsimp) apply (drule meta_spec, drule (1) meta_mp) - apply (auto simp: min_def split: if_split_asm dest: vm_level_minus1_leq) + apply (auto simp: min_def split: if_split_asm dest: vm_level.minus1_leq) done lemma pt_walk_top: @@ -1634,9 +1709,9 @@ lemma pt_walk_vref_for_level_eq: apply (simp add: pt_walk.simps) apply (subst pt_walk.simps) apply (subst (2) pt_walk.simps) - apply (simp add: Let_def vm_level_leq_minus1_less) + apply (simp add: Let_def vm_level.leq_minus1_less) apply (drule_tac level'=top_level in vref_for_level_eq_mono) - apply (simp add: vm_level_plus_one_leq) + apply (simp add: vm_level.plus_one_leq) apply (drule_tac pt=pt in vref_for_level_pt_slot_offset) apply (clarsimp simp: obind_def split: option.splits) done @@ -1651,7 +1726,7 @@ lemma pt_walk_vref_for_level1: "\ level \ bot_level; bot_level \ top_level; top_level \ max_pt_level \ \ pt_walk top_level bot_level pt (vref_for_level vref (level+1)) = pt_walk top_level bot_level pt vref" - by (meson max_pt_level_less_Suc vm_level_plus_one_leq leD leI order.trans vref_for_level_idem + by (meson max_pt_level_less_Suc vm_level.plus_one_leq leD leI order.trans vref_for_level_idem pt_walk_vref_for_level_eq) lemma vs_lookup_vref_for_level1: @@ -1684,9 +1759,6 @@ lemma vspace_for_asid_SomeI: by (clarsimp simp: entry_for_asid_def pool_for_asid_def entry_for_pool_def vspace_for_pool_def vspace_for_asid_def obind_def) -(* FIXME AARCH64: move up *) -lemmas ptes_of_def = level_pte_of_def - lemma ptes_of_pts_of: "ptes_of s pt_t pte_ptr = Some pte \ \pt. pts_of s (pte_ptr && ~~mask (pt_bits pt_t)) = Some pt \ pt_t = pt_type pt" @@ -1766,6 +1838,19 @@ lemma constructed_asid_high_bits_of[simp]: apply (fastforce intro: shiftr_le_0 unat_less_power order_less_le_trans ucast_less) done +lemma asid_high_low_inj: + "\ asid_low_bits_of asid' = asid_low_bits_of asid; + asid_high_bits_of asid' = asid_high_bits_of asid \ + \ asid' = asid" + unfolding asid_low_bits_of_def asid_high_bits_of_def + by (drule word_unat_eq_iff[THEN iffD1])+ + (clarsimp elim!: word_mask_shift_eqI + simp: unat_ucast_eq_unat_and_mask asid_low_bits_def shiftr_mask_eq' word_size) + +lemma asid_of_high_low_eq[simp, intro!]: + "asid_of (asid_high_bits_of asid) (asid_low_bits_of asid) = asid" + by (rule asid_high_low_inj; simp) + lemma pt_walk_level: "pt_walk top_level bot_level pt vref ptes = Some (level, p) \ pt_walk top_level level pt vref ptes = Some (level, p)" @@ -1776,7 +1861,7 @@ lemma pt_walk_level: apply (clarsimp simp: in_omonad split: if_split_asm) apply (erule disjE; clarsimp) apply (drule meta_spec, drule (1) meta_mp) - by (fastforce simp: vm_level_leq_minus1_less dest: pt_walk_max_level) + by (fastforce simp: vm_level.leq_minus1_less dest: pt_walk_max_level) lemma vs_lookup_level: "vs_lookup_table bot_level asid vref s = Some (level, p) \ @@ -1849,6 +1934,16 @@ lemma pte_at_eq: "pte_at pt_t p s = (ptes_of s pt_t p \ None)" by (auto simp: obj_at_def pte_at_def in_omonad) +lemma vspace_objs_of_Some_projections[simp]: + "vspace_objs_of s p = Some (ASIDPool pool) \ asid_pools_of s p = Some pool" + "vspace_objs_of s p = Some (PageTable pt) \ pts_of s p = Some pt" + (* no projections for data pages *) + by (auto simp: in_omonad vspace_obj_of_def split: if_splits) + +lemma vspace_objs_of_ako_at_Some: + "(vspace_objs_of s p = Some (PageTable pt)) = ako_at (PageTable pt) p s" + by (simp add: obj_at_def in_opt_map_eq vspace_obj_of_Some) + lemma valid_vspace_objsI [intro?]: "(\p ao asid vref level. \ vs_lookup_table level asid (vref_for_level vref (level+1)) s = Some (level, p); @@ -1898,7 +1993,7 @@ lemma pt_slot_offset_vref: apply (prop_tac "size level \ size max_pt_level", simp) apply (simp add: size_max_pt_level split: if_split_asm) apply (erule_tac x="(9 + (9 * size level + n))" in allE, - (erule impE; clarsimp), simp flip: vm_level_size_less)+ + (erule impE; clarsimp), simp flip: vm_level.size_less)+ done lemma pt_slot_offset_vref_for_level_eq: @@ -1944,6 +2039,41 @@ lemma aligned_vref_for_level_eq: "is_aligned vref (pt_bits_left level) = (vref_for_level vref level = vref)" unfolding vref_for_level_def using is_aligned_neg_mask_eq' by blast +lemma is_aligned_pt_bits_pte_bits: + "is_aligned p (pt_bits pt_t) \ is_aligned p pte_bits" + by (simp add: bit_simps is_aligned_weaken split: if_splits) + +lemma pts_of_ptes_of: + "\ pts_of s p = Some pt; is_aligned p (pt_bits (pt_type pt)) \ \ + \pte. ptes_of s (pt_type pt) p = Some pte" + by (clarsimp simp: ptes_of_Some is_aligned_pt_bits_pte_bits) + +lemma pt_index_mask_eq: + "pt_index level vref && mask (ptTranslationBits level) = pt_index level vref" + by (simp add: pt_index_def bit_simps) + +lemma table_index_mask_eq: + "table_index pt_t p && mask (ptTranslationBits pt_t) = table_index pt_t p" + by (auto simp add: pt_bits_def bit_simps mask_shiftr_mask_eq) + +lemma pt_apply_upd_eq: + "pt_type pt = level_type level \ + pt_apply (pt_upd pt (table_index (level_type level) p) pte) (pt_index level vref) = + (if table_index (level_type level) p = pt_index level vref + then pte + else pt_apply pt (pt_index level vref))" + unfolding pt_apply_def pt_upd_def + using pt_index_mask_eq[of max_pt_level] pt_index_mask_eq[where level=level and vref=vref] + using table_index_mask_eq[where pt_t=NormalPT_T] table_index_mask_eq[where pt_t=VSRootPT_T] + apply (cases pt; clarsimp simp: ucast_eq_mask vs_index_ptTranslationBits pt_index_ptTranslationBits) + apply (prop_tac "level_type level = NormalPT_T", simp add: level_type_def) + apply (simp del: level_type_eq add: ptTranslationBits_def) + done + +lemma pt_upd_empty_InvalidPTE[simp]: + "pt_upd (empty_pt pt_t) idx InvalidPTE = empty_pt pt_t" + by (auto simp: pt_upd_def empty_pt_def split: pt.splits) + lemma is_aligned_table_base_pte_bits[simp]: "is_aligned (table_base vs p) pte_bits" unfolding pte_bits_def @@ -2064,7 +2194,7 @@ lemma pt_walk_split: apply (fastforce simp: obind_assoc intro: opt_bind_cong) apply (subgoal_tac "level' < top_level -1") apply (fastforce simp: obind_assoc intro: opt_bind_cong) - apply (meson vm_level_minus1_leq not_le less_le) + apply (meson vm_level.minus1_leq not_le less_le) done lemma pt_walk_split_short: @@ -2084,7 +2214,7 @@ lemma pt_walk_split_short: apply (subgoal_tac "level' < top_level -1") apply (clarsimp simp: obind_assoc) apply (fastforce simp: obind_def pt_walk.simps intro!: opt_bind_cong) - apply (meson vm_level_minus1_leq not_le less_le) + apply (meson vm_level.minus1_leq not_le less_le) done lemma pt_walk_split_Some: @@ -2141,7 +2271,7 @@ lemma vs_lookup_table_split_last_Some: apply (clarsimp simp: vs_lookup_table_def in_omonad asid_pool_level_eq vm_level_less_max_pt_level) apply (subst (asm) pt_walk_split_Some[where level'="level+1"]) - apply (clarsimp simp add: less_imp_le vm_level_plus_one_leq)+ + apply (clarsimp simp add: less_imp_le vm_level.plus_one_leq)+ apply (subst (asm) (2) pt_walk.simps) apply (clarsimp simp: in_omonad split: if_splits) done @@ -2176,6 +2306,15 @@ lemma valid_vspace_objsD: valid_vspace_obj level ao s" by (simp add: valid_vspace_objs_def) +lemma vspace_for_asid_not_normal_pt: + "\vspace_for_asid asid s = Some pt; normal_pt_at pt s; valid_vspace_objs s\ \ False" + apply (drule vspace_for_asid_vs_lookup) + apply (clarsimp simp: pt_at_eq) + apply (drule (1) valid_vspace_objsD, simp) + apply (fastforce simp: in_omonad) + apply clarsimp + done + (* A static bound on the size of pt_bits. For PA_40 configurations this is 40 (size of the PA/IPA address space). For PA_44 configurations, this is 48, because the page tables can theoretically translate 48 bits, even though the PA/IPA space is only 44 bits wide *) @@ -2201,6 +2340,14 @@ lemma pt_bits_left_le_max_pt_level: apply (simp add: level_defs) done +lemma user_vtop_leq_canonical_user: + "user_vtop \ canonical_user" + by (simp add: user_vtop_def pptrUserTop_def canonical_user_def mask_def ipa_size_def) + +lemma user_vtop_canonical_user: + "vref < user_vtop \ vref \ canonical_user" + using user_vtop_leq_canonical_user by simp + lemma vref_for_level_asid_pool: "vref \ canonical_user \ vref_for_level vref asid_pool_level = 0" apply (clarsimp simp: vref_for_level_def pt_bits_left_def asid_pool_level_size bit_simps max_pt_level_def2 @@ -2223,39 +2370,87 @@ lemma aligned_vref_for_level[simp]: lemmas pt_walk_0[simp] = pt_walk.simps[where level=0, simplified] -(* FIXME AARCH64: the 39 is from the construction of pptrBaseOffset, not sure if we can de-magic that number *) +(* The current definition of pptrBaseOffset comes out to 2^39. The number below is the maximum + alignment this offset allows. We need this to be greater or equal to pt_bits_left max_pt_level *) +definition pptrBaseOffset_alignment :: nat where + "pptrBaseOffset_alignment \ 39" + +(* sanity check for size *) +lemma pptrBaseOffset_alignment_pt_bits_left[simp, intro!]: + "pt_bits_left max_pt_level \ pptrBaseOffset_alignment" + by (simp add: bit_simps pt_bits_left_def pptrBaseOffset_alignment_def size_max_pt_level) + +(* sanity check for alignment *) +lemma pptrBaseOffset_aligned[simp, intro!]: + "is_aligned pptrBaseOffset pptrBaseOffset_alignment" + by (simp add: pptrBaseOffset_alignment_def pptrBaseOffset_def pptrBase_def paddrBase_def + is_aligned_def) + lemma is_aligned_addrFromPPtr_n: - "\ is_aligned p n; n \ 39 \ \ is_aligned (addrFromPPtr p) n" + "\ is_aligned p n; n \ pptrBaseOffset_alignment \ \ is_aligned (addrFromPPtr p) n" apply (simp add: addrFromPPtr_def) apply (erule aligned_sub_aligned) - apply (simp add: pptrBaseOffset_def pptrBase_def paddrBase_def) - apply (erule is_aligned_weaken[rotated]) - apply (simp add: is_aligned_def) - apply simp + apply (erule is_aligned_weaken[OF pptrBaseOffset_aligned]) + apply (simp add: pptrBaseOffset_alignment_def) done lemma is_aligned_addrFromPPtr[intro!]: "is_aligned p pageBits \ is_aligned (addrFromPPtr p) pageBits" - by (simp add: is_aligned_addrFromPPtr_n pageBits_def) + by (simp add: is_aligned_addrFromPPtr_n pageBits_def pptrBaseOffset_alignment_def) + +lemma pptrTop_ucast_ppn: + "\ p < pptrTop; is_aligned p pageBits \ \ + ucast (ucast (p >> pageBits)::ppn) = p >> pageBits" + apply (drule below_pptrTop_ipa_size) + apply word_eqI + using ppn_len_def'[unfolded ppn_len_val] + by (fastforce dest: bit_imp_le_length) + +lemma kernel_window_range_addrFromPPtr: + "p \ kernel_window_range \ addrFromPPtr p < pptrTop" + apply (simp add: kernel_window_range_def addrFromPPtr_def pptrBaseOffset_def + paddrBase_def pptr_base_def) + apply unat_arith + done + +lemma kernel_window_addrFromPPtr: + "\ p \ kernel_window_range; is_aligned p pageBits \ \ + ucast (ucast (addrFromPPtr p >> pageBits)::ppn) = addrFromPPtr p >> pageBits" + apply (rule pptrTop_ucast_ppn) + apply (erule kernel_window_range_addrFromPPtr) + apply (erule is_aligned_addrFromPPtr) + done -(* FIXME AARCH64: the 39 is from the construction of pptrBaseOffset, via is_aligned_addrFromPPtr_n *) lemma is_aligned_ptrFromPAddr_n: - "\is_aligned x sz; sz \ 39\ + "\is_aligned x sz; sz \ pptrBaseOffset_alignment\ \ is_aligned (ptrFromPAddr x) sz" - apply (simp add: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def paddrBase_def) + unfolding ptrFromPAddr_def apply (erule aligned_add_aligned) - apply (erule is_aligned_weaken[rotated]) - apply (simp add: is_aligned_def) + apply (erule is_aligned_weaken[OF pptrBaseOffset_aligned]) apply (rule order.refl) done +lemma is_aligned_pptrBaseOffset_pt_bits_left: + "level \ max_pt_level \ is_aligned pptrBaseOffset (pt_bits_left level)" + by (blast intro: order_trans pt_bits_left_mono is_aligned_weaken) + +lemma is_aligned_ptrFromPAddr_n_eq: + "level \ max_pt_level \ + is_aligned (ptrFromPAddr x) (pt_bits_left level) = is_aligned x (pt_bits_left level)" + apply (rule iffI) + apply (simp add: ptrFromPAddr_def) + apply (erule is_aligned_addD2) + apply (erule is_aligned_pptrBaseOffset_pt_bits_left) + apply (blast intro: order_trans pt_bits_left_mono is_aligned_ptrFromPAddr_n) + done + lemma is_aligned_ptrFromPAddr: "is_aligned p pageBits \ is_aligned (ptrFromPAddr p) pageBits" - by (simp add: is_aligned_ptrFromPAddr_n pageBits_def) + by (simp add: is_aligned_ptrFromPAddr_n pageBits_def pptrBaseOffset_alignment_def) lemma is_aligned_ptrFromPAddr_pt_bits[intro!]: "is_aligned p (pt_bits pt_t) \ is_aligned (ptrFromPAddr p) (pt_bits pt_t)" - by (simp add: is_aligned_ptrFromPAddr_n bit_simps) + by (simp add: is_aligned_ptrFromPAddr_n bit_simps pptrBaseOffset_alignment_def) lemma pspace_aligned_pts_ofD: "\ pspace_aligned s; pts_of s pt_ptr = Some pt \ \ is_aligned pt_ptr (pt_bits (pt_type pt))" @@ -2273,10 +2468,18 @@ lemma pt_slot_offset_pt_range: for level::vm_level by (clarsimp simp: ptes_of_Some) +lemma ucast_ucast_ppn: + "ucast (ucast ptr::ppn) = ptr && mask ppn_len" for ptr::obj_ref + by (simp add: ucast_ucast_mask ppn_len_val) + lemma pte_base_addr_PageTablePTE[simp]: "pte_base_addr (PageTablePTE ppn) = paddr_from_ppn ppn" by (simp add: pte_base_addr_def) +lemma pptr_from_pte_PagePTE[simp]: + "pptr_from_pte (PagePTE p is_small attr rights) = ptrFromPAddr p" + by (simp add: pptr_from_pte_def pte_base_addr_def) + lemma valid_vspace_objs_strongD: "\ valid_vspace_objs s; vs_lookup_table bot_level asid vref s = Some (level, pt_ptr); @@ -2287,7 +2490,7 @@ lemma valid_vspace_objs_strongD: pt_type pt = level_type level" supply valid_vspace_obj.simps[simp del] apply (drule vs_lookup_level) - apply (induct level arbitrary: pt_ptr rule: vm_level_from_top_induct[where y="max_pt_level"]) + apply (induct level arbitrary: pt_ptr rule: vm_level.from_top_induct[where y="max_pt_level"]) apply simp apply (drule (3) vs_lookup_max_pt_valid, simp) apply (rename_tac level pt_ptr) @@ -2435,9 +2638,13 @@ lemma vmid_inv_ap_lift: definition is_vcpu :: "kernel_object \ bool" where "is_vcpu \ \ko. \vcpu. ko = ArchObj (VCPU vcpu)" +lemma obj_at_vcpu_hyp_live_of_s: + "obj_at (is_vcpu and hyp_live) p s = vcpu_hyp_live_of s p" + by (auto simp: obj_at_def in_omonad is_vcpu_def hyp_live_def arch_live_def) + lemma obj_at_vcpu_hyp_live_of: "obj_at (is_vcpu and hyp_live) p = (\s. vcpu_hyp_live_of s p)" - by (rule ext) (auto simp: obj_at_def in_omonad is_vcpu_def hyp_live_def arch_live_def) + using obj_at_vcpu_hyp_live_of_s by blast lemma cur_vcpu_typ_lift: assumes vcpus: "\P. f \\s. P (vcpu_tcbs_of s)\" @@ -2457,7 +2664,7 @@ lemma vspace_for_asid_lift: apply (simp add: obind_def pool_for_asid_def o_def split del: if_split) apply (rule hoare_lift_Pf[where f=asid_table]) apply (rule hoare_lift_Pf[where f=asid_pools_of]) - apply (wpsimp wp: assms entry_for_asid_lift split: option.splits)+ + apply (wpsimp wp: assms entry_for_asid_lift split: option.splits split_del: if_split)+ done lemma valid_global_arch_objs_lift: @@ -2521,7 +2728,7 @@ lemma pool_for_asid_and_mask[simp]: lemma vs_lookup_table_ap_step: "\ vs_lookup_table asid_pool_level asid vref s = Some (asid_pool_level, p); - asid_pools_of s p = Some ap; ap ap_idx = Some entry \ \ + asid_pools_of s p = Some ap; \ap_idx. ap ap_idx = Some entry \ \ \asid'. vs_lookup_target asid_pool_level asid' vref s = Some (asid_pool_level, ap_vspace entry)" apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def in_omonad ran_def) apply (rule_tac x="asid && ~~mask asid_low_bits || ucast ap_idx" in exI) @@ -2596,7 +2803,7 @@ lemma vref_for_level_idx_canonical_user: apply (frule bit_imp_possible_bit) apply simp apply (drule xt1(11), simp) - apply (subst (asm) vm_level_size_less[symmetric]) + apply (subst (asm) vm_level.size_less[symmetric]) apply (simp add: size_max_pt_level) apply (cases "level = max_pt_level") apply (clarsimp simp: bit_simps pt_bits_left_def size_max_pt_level asid_pool_level_eq word_size) @@ -2608,7 +2815,7 @@ lemma vref_for_level_idx_canonical_user: apply (simp add: bit_simps pt_bits_left_def size_max_pt_level asid_pool_level_eq word_size) apply (frule bit_imp_possible_bit) apply (drule xt1(11), simp) - apply (subst (asm) vm_level_size_less[symmetric]) + apply (subst (asm) vm_level.size_less[symmetric]) apply (simp add: size_max_pt_level) done @@ -2689,7 +2896,7 @@ lemma vs_lookup_table_eq_lift: lemma aobjs_of_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ kheap s(p \ ko') |> aobj_of = aobjs_of s" + \ (kheap s)(p \ ko') |> aobj_of = aobjs_of s" by (rule ext) (auto simp: opt_map_def is_ArchObj_def aobj_of_def split: kernel_object.splits if_split_asm) @@ -2715,7 +2922,7 @@ lemma pt_bits_left_inj[simp]: apply (intro iffI; clarsimp?) apply (clarsimp simp: pt_bits_left_def bit_simps split: if_splits) - by (metis vm_level_size_less_eq diff_is_0_eq' mult_zero_right right_diff_distrib' sum_imp_diff + by (metis vm_level.size_less_eq diff_is_0_eq' mult_zero_right right_diff_distrib' sum_imp_diff zero_neq_numeral)+ lemma pt_walk_stopped: @@ -2772,6 +2979,12 @@ lemma hyp_refs_of_rev: vcpu_tcb_refs_def refs_of_ao_def split: kernel_object.splits arch_kernel_obj.splits option.split) +lemma valid_asid_map_lift_strong: + assumes "\P. f \\s. P (asid_table s)\" + assumes "\P. f \\s. P (asid_pools_of s)\" + shows "f \valid_asid_map\" + by (wpsimp simp: valid_asid_map_def wp: entry_for_asid_lift assms) + end locale Arch_asid_table_update_eq = Arch + @@ -2913,6 +3126,14 @@ lemma valid_arch_caps_update [iff]: end +context Arch_arch_update_eq begin + +lemma valid_vmid_table_update[iff]: + "valid_vmid_table (f s) = valid_vmid_table s" + by (simp add: arch) + +end + context Arch_p_arch_update_eq begin sublocale Arch_p_asid_table_update_eq diff --git a/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy b/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy index 8b2b381cee..1896b217a4 100644 --- a/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -307,7 +308,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -434,21 +435,21 @@ crunch obj_at[wp, Ipc_AI_assms]: make_arch_fault_msg "\s. P (obj_at P' lemma dmo_addressTranslateS1_valid_machine_state[wp]: "do_machine_op (addressTranslateS1 addr) \ valid_machine_state \" - sorry (* FIXME AARCH64 *) + by (wpsimp wp: dmo_valid_machine_state) crunch vms[wp, Ipc_AI_assms]: make_arch_fault_msg valid_machine_state (wp: as_user_inv getRestartPC_inv mapM_wp' simp: getRegister_def ignore: do_machine_op) lemma dmo_addressTranslateS1_valid_irq_states[wp]: "do_machine_op (addressTranslateS1 addr) \ valid_irq_states \" - sorry (* FIXME AARCH64 *) + by (wpsimp wp: dmo_valid_irq_states) crunch valid_irq_states[wp, Ipc_AI_assms]: make_arch_fault_msg "valid_irq_states" (wp: as_user_inv getRestartPC_inv mapM_wp' simp: getRegister_def ignore: do_machine_op) lemma dmo_addressTranslateS1_cap_refs_respects_device_region[wp]: "do_machine_op (addressTranslateS1 addr) \ cap_refs_respects_device_region \" - sorry (* FIXME AARCH64 *) + by (wpsimp wp: cap_refs_respects_device_region_dmo) crunch cap_refs_respects_device_region[wp, Ipc_AI_assms]: make_arch_fault_msg "cap_refs_respects_device_region" (wp: as_user_inv getRestartPC_inv mapM_wp' simp: getRegister_def ignore: do_machine_op) @@ -467,7 +468,7 @@ named_theorems Ipc_AI_cont_assms lemma dmo_addressTranslateS1_pspace_respects_device_region[wp]: "do_machine_op (addressTranslateS1 addr) \ pspace_respects_device_region \" - sorry (* FIXME AARCH64 *) + by (wpsimp wp: pspace_respects_device_region_dmo) crunch pspace_respects_device_region[wp]: make_fault_msg "pspace_respects_device_region" (wp: as_user_inv getRestartPC_inv mapM_wp' simp: getRegister_def ignore: do_machine_op) @@ -482,7 +483,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (simp only: ball_conj_distrib[where P="\x. real_cte_at x s" for s]) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ apply (simp split: kernel_object.split_asm) @@ -510,14 +511,14 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by (auto simp: valid_arch_mdb_def) end interpretation Ipc_AI_cont?: Ipc_AI_cont - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales;(fact Ipc_AI_cont_assms)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy b/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy index 01413c352c..dab8bd5aef 100644 --- a/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy @@ -9,10 +9,6 @@ theory ArchKHeap_AI imports KHeapPre_AI begin -(* FIXME AARCH64: if_option is missing all cases for None/Some = if .. *) - -declare if_option_Some_eq[simp] - context Arch begin global_naming AARCH64 definition non_vspace_obj :: "kernel_object \ bool" where @@ -413,7 +409,7 @@ lemma valid_vspace_objs_lift_weak: by (intro valid_vspace_objs_lift vspace_obj_pred_vspace_objs assms) lemma set_pt_pts_of: - "\\s. pts_of s p \ None \ P (pts_of s (p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" + "\\s. pts_of s p \ None \ P ((pts_of s)(p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" unfolding set_pt_def by (wpsimp wp: set_object_wp) (auto elim!: rsubst[where P=P] simp: opt_map_def split: option.splits) @@ -462,7 +458,7 @@ lemma pt_apply_pt_upd_neq: lemma ptes_of_pts_of_upd: "\ is_aligned p pte_bits; pts_of s (table_base pt_t p) = Some pt; pt_t = pt_type pt \ \ (\pt_t' p'. level_pte_of pt_t' p' - (pts_of s (table_base pt_t p \ pt_upd pt (table_index pt_t p) pte))) = + ((pts_of s)(table_base pt_t p \ pt_upd pt (table_index pt_t p) pte))) = ptes_of s (pt_t, p \ pte)" apply (rule ext)+ apply (clarsimp simp: fun_upd2_def) @@ -483,7 +479,7 @@ lemma store_pte_ptes_of_full: done lemma store_pte_ptes_of: - "\\s. ptes_of s pt_t p \ None \ P (ptes_of s pt_t (p \ pte)) \ + "\\s. ptes_of s pt_t p \ None \ P ((ptes_of s pt_t)(p \ pte)) \ store_pte pt_t p pte \\_ s. P (ptes_of s pt_t)\" by (wpsimp wp: store_pte_ptes_of_full simp: fun_upd2_def simp_del: fun_upd_apply) @@ -492,15 +488,11 @@ definition level_of_slot :: "asid \ vspace_ref \ obj_ref "level_of_slot asid vref p s \ GREATEST level. vs_lookup_slot level asid vref s = Some (level, p)" -(* FIXME AARCH64: move *) -lemmas vm_level_GreatestI = bit1.GreatestI -lemmas vm_level_Greatest_le = bit1.Greatest_le - lemma level_of_slotI: "\ vs_lookup_slot level' asid vref s = Some (level', p); level < level'\ \ vs_lookup_slot (level_of_slot asid vref p s) asid vref s = Some (level_of_slot asid vref p s, p) \ level < level_of_slot asid vref p s" - by (auto simp: level_of_slot_def dest: vm_level_GreatestI vm_level_Greatest_le) + by (auto simp: level_of_slot_def dest: vm_level.GreatestI vm_level.Greatest_le) lemma pool_for_asid_no_pt: "\ pool_for_asid asid s = Some p; pts_of s p = Some pte; valid_asid_table s; pspace_aligned s \ @@ -516,7 +508,7 @@ lemma is_aligned_table_base[intro!, simp]: lemma ptes_of_other_typ_at: "\ ptes_of s pt_t p = Some pte; typ_at T p s; T \ AArch (APageTable pt_t); pspace_aligned s; pspace_distinct s \ \ False" - apply (clarsimp simp: obj_at_def in_omonad level_pte_of_def) + apply (clarsimp simp: obj_at_def in_omonad level_pte_of_def if_option_eq) apply (rename_tac ko pt) apply (case_tac "table_base (pt_type pt) p = p", simp) apply (frule (1) pspace_alignedD) @@ -565,6 +557,14 @@ lemma pts_of_type_unique: simp: in_omonad is_aligned_no_overflow_mask and_neg_mask_plus_mask_mono pt_bits_def word_and_le) +lemma pts_of_level_type_unique: + "\ pts_of s (table_base (level_type level) pte_ptr) = Some pt; + pts_of s (table_base (level_type level') pte_ptr) = Some pt'; + pt_type pt = level_type level; pt_type pt' = level_type level'; + pspace_distinct s \ + \ level_type level' = level_type level" + by (metis pts_of_type_unique) + (* If we look up a slot for some level, and we know there is a pte for type pt_t at that slot, then it must agree with the level type of the lookup. *) lemma vs_lookup_slot_level_type: @@ -574,9 +574,6 @@ lemma vs_lookup_slot_level_type: \ level_type level = pt_t" by (fastforce simp: ptes_of_Some intro!: pts_of_type_unique dest: valid_vspace_objs_strong_slotD) -(* FIXME AARCH64: move *) -lemmas vm_level_from_top_full_induct = bit1.from_top_full_induct - (* Removing a page table pte entry at p will cause lookups to stop at higher levels than requested. If performing a shallower lookup than the one requested results in p, then any deeper lookup in the updated state will return a higher level result along the original path. *) @@ -590,7 +587,7 @@ lemma vs_lookup_non_PageTablePTE: (if \level'. vs_lookup_slot level' asid vref s = Some (level', p) \ level < level' then vs_lookup_table (level_of_slot asid vref p s) asid vref s else vs_lookup_table level asid vref s)" - apply (induct level rule: vm_level_from_top_full_induct[where y=max_pt_level]) + apply (induct level rule: vm_level.from_top_full_induct[where y=max_pt_level]) apply (clarsimp simp: geq_max_pt_level) apply (erule disjE; clarsimp) apply (rule conjI; clarsimp) @@ -616,10 +613,10 @@ lemma vs_lookup_non_PageTablePTE: subgoal for x old_pte apply (subst vs_lookup_split[where level'="x+1"]) apply (simp add: less_imp_le) - apply (simp add: vm_level_plus_one_leq) + apply (simp add: vm_level.plus_one_leq) apply (subst (2) vs_lookup_split[where level'="x+1"]) apply (simp add: less_imp_le) - apply (simp add: vm_level_plus_one_leq) + apply (simp add: vm_level.plus_one_leq) apply (erule_tac x="x+1" in allE) apply (simp add: less_imp_le) apply (simp split: if_split_asm) @@ -673,7 +670,7 @@ lemma store_pte_non_PageTablePTE_vs_lookup: lemma store_pte_not_ao: "\\s. \pt. aobjs_of s (table_base pt_t p) = Some (PageTable pt) \ - P (aobjs_of s (table_base pt_t p \ PageTable (pt_upd pt (table_index pt_t p) pte)))\ + P ((aobjs_of s)(table_base pt_t p \ PageTable (pt_upd pt (table_index pt_t p) pte)))\ store_pte pt_t p pte \\_ s. P (aobjs_of s)\" unfolding store_pte_def set_pt_def @@ -757,6 +754,9 @@ private lemma pred_vspace_objs_of_lift: "f \ \s. P (vspace_objs_ private lemma pred_pts_of_lift: "f \ \s. P (pts_of s) \" by (intro vspace_objs_of_pts_lift pred_vspace_objs_of_lift) +private lemma pred_asid_pools_of_lift: "f \ \s. P (asid_pools_of s) \" + by (intro vspace_objs_of_aps_lift pred_vspace_objs_of_lift) + lemma valid_global_vspace_mappings_lift: "f \valid_global_vspace_mappings\" unfolding valid_global_vspace_mappings_def @@ -773,8 +773,8 @@ lemma valid_global_objs_lift_weak: unfolding valid_global_objs_def by wp lemma valid_asid_map_lift: - "\valid_asid_map\ f \\rv. valid_asid_map\" - by (wpsimp simp: valid_asid_map_def) + "f \valid_asid_map\" + by (wp valid_asid_map_lift_strong arch pred_asid_pools_of_lift) lemma valid_kernel_mappings_lift: "\valid_kernel_mappings\ f \\rv. valid_kernel_mappings\" @@ -956,20 +956,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -977,7 +977,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -1001,12 +1001,12 @@ lemma default_tcb_not_live[simp]: "\ live (TCB default_tcb)" lemma valid_vcpu_same_type: "\ valid_vcpu v s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_vcpu v (s\kheap := kheap s(p \ k)\)" + \ valid_vcpu v (s\kheap := (kheap s)(p \ k)\)" by (cases v; case_tac vcpu_tcb; clarsimp simp: valid_vcpu_def typ_at_same_type) lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) @@ -1027,14 +1027,14 @@ lemma valid_arch_mdb_lift: (* interface lemma *) lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" apply (cases ao; simp) apply (fastforce simp: valid_vcpu_def obj_at_def split: option.splits) done lemma valid_vspace_obj_same_type: "\valid_vspace_obj l ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj l ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj l ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -1042,5 +1042,9 @@ lemma invs_valid_uses[elim!]: "invs s \ valid_uses s" by (simp add: invs_def valid_state_def valid_arch_state_def) +crunches set_object + for vmid_table[wp]: "\s. P (vmid_table s)" + (simp: get_object_def) + end end diff --git a/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy b/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy index 30adef55f7..5b0a5e6c44 100644 --- a/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -19,7 +20,7 @@ text \ lemmas ptr_defs = idle_thread_ptr_def init_irq_node_ptr_def arm_global_pt_ptr_def lemmas state_defs = init_A_st_def init_kheap_def init_arch_state_def - init_vspace_uses_def ptr_defs + init_vspace_uses_def ptr_defs global_pt_obj_def lemma is_tcb_TCB[simp]: "is_tcb (TCB t)" by (simp add: is_tcb_def) @@ -58,53 +59,35 @@ lemma pptr_base_num: "pptr_base = 0x8000000000" by (simp add: pptr_base_def pptrBase_def canonical_bit_def) -(* IRQ nodes occupy 11 bits of address space in this RISCV example state: - 6 for irq number, 5 for cte_level_bits. *) +definition irq_node_bits :: nat where + "irq_node_bits = cte_level_bits + LENGTH(irq_len)" + +lemmas irq_node_bits_num = irq_node_bits_def[unfolded cte_level_bits_def, simplified] + +(* Some other architectures need to prove more here, but if the init_irq_node is the last object + in the init state, we only need info about init_irq_node_ptr, and not about + init_irq_node_ptr + mask irq_node bits *) lemma init_irq_ptrs_ineqs: "init_irq_node_ptr + (ucast (irq :: irq) << cte_level_bits) \ init_irq_node_ptr" - "init_irq_node_ptr + (ucast (irq :: irq) << cte_level_bits) + mask cte_level_bits - \ init_irq_node_ptr + mask 11" - "init_irq_node_ptr + (ucast (irq :: irq) << cte_level_bits) - \ init_irq_node_ptr + mask 11" -sorry (* FIXME AARCH64 proof - - have P: "ucast irq < (2 ^ (11 - cte_level_bits) :: machine_word)" + have P: "ucast irq < (2 ^ (irq_node_bits - cte_level_bits) :: machine_word)" apply (rule order_le_less_trans[OF - ucast_le_ucast[where 'a=6 and 'b=64, simplified, THEN iffD2, OF word_n1_ge]]) - apply (simp add: cte_level_bits_def minus_one_norm) + ucast_le_ucast[where 'a=irq_len and 'b=machine_word_len, simplified, THEN iffD2, OF word_n1_ge]]) + apply (simp add: cte_level_bits_def minus_one_norm irq_node_bits_def) done show "init_irq_node_ptr + (ucast (irq :: irq) << cte_level_bits) \ init_irq_node_ptr" - apply (rule is_aligned_no_wrap'[where sz=11]) - apply (simp add: is_aligned_def init_irq_node_ptr_def pptr_base_num) + apply (rule is_aligned_no_wrap'[where sz=irq_node_bits]) + apply (simp add: is_aligned_def init_irq_node_ptr_def pptr_base_num irq_node_bits_num) apply (rule shiftl_less_t2n[OF P]) - apply simp - done - show Q: "init_irq_node_ptr + (ucast (irq :: irq) << cte_level_bits) + mask cte_level_bits - \ init_irq_node_ptr + mask 11" - apply (simp only: add_diff_eq[symmetric] add.assoc) - apply (rule word_add_le_mono2) - apply (simp only: trans [OF shiftl_t2n mult.commute] mask_def mult_1) - apply (rule nasty_split_lt[OF P]) - apply (auto simp: cte_level_bits_def init_irq_node_ptr_def mask_def pptr_base_num) + apply (simp add: irq_node_bits_num) done - show "init_irq_node_ptr + (ucast (irq :: irq) << cte_level_bits) - \ init_irq_node_ptr + mask 11" - apply (simp only: add_diff_eq[symmetric] mask_def mult_1 shiftl_t2n mult.commute) - apply (rule word_add_le_mono2) - apply (rule word_le_minus_one_leq) - apply (rule shiftl_less_t2n[OF P, simplified shiftl_t2n mult.commute]) - apply simp - apply (simp add: cte_level_bits_def init_irq_node_ptr_def pptr_base_num) - done -qed *) +qed lemmas init_irq_ptrs_less_ineqs = init_irq_ptrs_ineqs(1)[THEN order_less_le_trans[rotated]] - init_irq_ptrs_ineqs(2-3)[THEN order_le_less_trans] lemmas init_irq_ptrs_all_ineqs[unfolded init_irq_node_ptr_def cte_level_bits_def] = init_irq_ptrs_ineqs(1)[THEN order_trans[rotated]] - init_irq_ptrs_ineqs(2-3)[THEN order_trans] init_irq_ptrs_less_ineqs init_irq_ptrs_less_ineqs[THEN less_imp_neq] init_irq_ptrs_less_ineqs[THEN less_imp_neq, THEN not_sym] @@ -122,11 +105,15 @@ lemma pspace_aligned_init_A: simp_all add: is_aligned_def word_bits_def)[1] done -lemma pspace_distinct_init_A: "pspace_distinct init_A_st" +lemma pspace_distinct_init_A: + notes ineqs = pptr_base_num init_irq_ptrs_all_ineqs[simplified pptr_base_num mask_def, simplified] + shows "pspace_distinct init_A_st" unfolding pspace_distinct_def - apply (clarsimp simp: state_defs bit_simps empty_cnode_bits kernel_elf_base_def - cte_level_bits_def linorder_not_le cong: if_cong) - apply (safe; simp add: pptr_base_num init_irq_ptrs_all_ineqs[simplified pptr_base_num mask_def, simplified]) + apply (clarsimp simp: state_defs empty_cnode_bits cte_level_bits_def linorder_not_le + split del: if_split cong: if_cong) + apply (clarsimp simp: ineqs split: if_split_asm) + apply (simp add: bit_simps ineqs) + apply (simp add: bit_simps ineqs) apply (cut_tac x="init_irq_node_ptr + (ucast irq << cte_level_bits)" and y="init_irq_node_ptr + (ucast irqa << cte_level_bits)" and sz=cte_level_bits in aligned_neq_into_no_overlap; @@ -179,8 +166,7 @@ lemma pool_for_asid_init_A_st[simp]: lemma vspace_for_asid_init_A_st[simp]: "vspace_for_asid asid init_A_st = None" - sorry (* FIXME AARCH64 - by (simp add: vspace_for_asid_def obind_def) *) + by (simp add: vspace_for_asid_def entry_for_asid_def obind_def) lemma global_pt_init_A_st[simp]: "global_pt init_A_st = arm_global_pt_ptr" @@ -194,41 +180,16 @@ lemma ptes_of_init_A_st_global: "ptes_of init_A_st = (\pt_t p. if pt_t = VSRootPT_T \ table_base VSRootPT_T p = arm_global_pt_ptr \ is_aligned p pte_bits then Some InvalidPTE else None)" - sorry (* FIXME AARCH64: might need adjustment - by (auto simp add: state_defs level_pte_of_def obind_def opt_map_def split: option.splits) *) + by (rule ext)+ (auto simp: state_defs level_pte_of_def obind_def opt_map_def split: option.splits) lemma pt_walk_init_A_st[simp]: "pt_walk max_pt_level level arm_global_pt_ptr vref (ptes_of init_A_st) = Some (max_pt_level, arm_global_pt_ptr)" apply (subst pt_walk.simps) apply (simp add: in_omonad ptes_of_init_A_st_global - is_aligned_pt_slot_offset_pte global_pte_def) - sorry (* FIXME AARCH64 *) - -(* FIXME AARCH64: statement -- this depends on PA_40 config; potentially not needed -lemma table_index_arm_global_pt_ptr: - "table_index VSRootPT_T (pt_slot_offset max_pt_level arm_global_pt_ptr vref) = - (vref >> (ptTranslationBits NormalPT_T) * 2 + pageBits) && mask (ptTranslationBits NormalPT_T)" - apply (simp add: pt_slot_offset_def pt_index_def pt_bits_left_def level_defs - arm_global_pt_ptr_def pptr_base_def pptrBase_def canonical_bit_def) - apply (subst word_plus_and_or_coroll) - apply word_bitwise - apply simp - apply word_bitwise - apply (clarsimp simp: word_size) - done *) - -lemma kernel_window_1G: - "\ pptr_base \ vref; vref < pptr_base + (1 << 30) \ \ - table_index VSRootPT_T (pt_slot_offset max_pt_level arm_global_pt_ptr vref) = 0x100" - sorry (* FIXME AARCH64 -- this depends on PA_40 config; potentially not needed - apply (simp add: table_index_arm_global_pt_ptr) - apply (simp add: bit_simps pptr_base_def pptrBase_def neg_mask_le_high_bits word_size flip: NOT_mask) - apply (subst (asm) mask_def) - apply (simp add: canonical_bit_def) - apply word_bitwise - apply (clarsimp simp: word_size) - done *) + table_base_pt_slot_offset[where level=max_pt_level, simplified] + is_aligned_pt_slot_offset_pte[where pt_t=VSRootPT_T]) + done lemma kernel_window_init_st: "kernel_window init_A_st = { pptr_base ..< pptr_base + (1 << 30) }" @@ -240,43 +201,32 @@ lemma valid_global_vspace_mappings_init_A_st[simp]: by simp lemma valid_uses_init_A_st[simp]: "valid_uses_2 init_vspace_uses" -sorry (* FIXME AARCH64 proof - - note canonical_bit_def[simp] have [simp]: "pptr_base < pptr_base + 0x40000000" by (simp add: pptr_base_def pptrBase_def) - have [simp]: "p \ canonical_user \ \ pptr_base \ p" for p - by (rule notI, drule (1) order_trans) - (simp add: canonical_user_def mask_def pptr_base_def pptrBase_def) - have [simp]: "p \ canonical_user \ \ kernel_elf_base \ p" for p - by (rule notI, drule (1) order_trans) - (simp add: canonical_user_def mask_def kernel_elf_base_def kernelELFBase_def) - have [simp]: "p \ canonical_user \ \ kdev_base \ p" for p - by (rule notI, drule (1) order_trans) - (simp add: canonical_user_def mask_def kdev_base_def kdevBase_def) - have [simp]: "kernel_elf_base \ p \ \ p < pptr_base + 0x40000000" for p - by (rule notI, drule (1) order_le_less_trans) - (simp add: kernel_elf_base_def kernelELFBase_def pptr_base_def pptrBase_def) - have [simp]: "kdev_base \ p \ \ p < kernel_elf_base + 0x100000" for p - by (rule notI, drule (1) order_le_less_trans) - (simp add: kernel_elf_base_def kernelELFBase_def kdev_base_def kdevBase_def) - have "pptr_base + 0x40000000 < kernel_elf_base + 0x100000" - by (simp add: kernel_elf_base_def kernelELFBase_def pptr_base_def pptrBase_def) - thus ?thesis - using canonical_user_pptr_base pptr_base_kernel_elf_base + have "\p. p < pptr_base + 0x40000000 \ canonical_address p" + by (simp add: canonical_address_range canonical_bit_def mask_def pptr_base_def pptrBase_def + word_le_nat_alt word_less_nat_alt) + moreover + have "pptr_base + 0x40000000 < pptrTop" + by (simp add: pptrTop_def pptr_base_def pptrBase_def) + moreover + have "pptr_base + 0x40000000 < kdev_base" + by (simp add: kdev_base_def kdevBase_def pptr_base_def pptrBase_def) + ultimately + show ?thesis unfolding valid_uses_2_def init_vspace_uses_def window_defs - by (auto simp: canonical_user_canonical above_pptr_base_canonical) -qed *) + by (auto simp: kernel_window_range_def) +qed lemma valid_global_arch_objs_init_A_st[simp]: "valid_global_arch_objs init_A_st" - sorry (* FIXME AARCH64 - by (simp add: valid_global_arch_objs_def state_defs level_defs obj_at_def) *) + by (simp add: valid_global_arch_objs_def state_defs level_defs obj_at_def) lemma vspace_for_pool_init_A_st[simp]: "vspace_for_pool ap asid (asid_pools_of init_A_st) = None" - sorry (* FIXME AARCH64 - by (clarsimp simp: vspace_for_pool_def obind_def in_opt_map_eq state_defs split: option.splits) *) + by (clarsimp simp: vspace_for_pool_def obind_def in_opt_map_eq state_defs entry_for_pool_def + split: option.splits) lemma user_region_vs_lookup_target_init_A_st[simp]: "vref \ user_region \ vs_lookup_target bot_level asid vref init_A_st = None" @@ -298,14 +248,6 @@ lemma idle_thread_in_kernel_window_init_arch_state[simp]: apply (rule conjI; unat_arith) done -lemma irq_node_pptr_base_kernel_elf_base: - "\x \ pptr_base + (m + (mask cte_level_bits + 0x3000)); m \ mask (size irq) << cte_level_bits \ - \ \ kernel_elf_base \ x" for irq::irq - apply (simp add: word_size cte_level_bits_def mask_def pptr_base_def pptrBase_def - kernel_elf_base_def kernelELFBase_def canonical_bit_def not_le) - apply unat_arith - done - lemma irq_node_in_kernel_window_init_arch_state': "\ init_irq_node_ptr + m \ x; x \ init_irq_node_ptr + m + mask cte_level_bits; m \ mask (size (irq::irq)) << cte_level_bits\ @@ -315,20 +257,17 @@ lemma irq_node_in_kernel_window_init_arch_state': apply (clarsimp simp: state_defs) apply (rule ccontr, simp add:not_le) apply (drule(1) le_less_trans) - sorry (* FIXME AARCH64 + (* We pick 30 for alignment of pptr_base, because pttr_base is set to 2^40-2^30 *) apply (cut_tac is_aligned_no_wrap'[where ptr=pptr_base - and off="0x3000 + m" - and sz=canonical_bit, simplified]) + and off="0xc000 + m" + and sz=30, simplified]) apply (simp add: add_ac) - apply (auto simp: pptr_base_kernel_elf_base irq_node_pptr_base_kernel_elf_base)[1] apply (simp add: pptr_base_num canonical_bit_def is_aligned_def) apply (simp add: pptr_base_num cte_level_bits_def canonical_bit_def mask_def word_size) apply unat_arith - apply (simp add: kernel_elf_base_def kernelELFBase_def cte_level_bits_def canonical_bit_def - mask_def init_irq_node_ptr_def pptr_base_num word_size) + apply (simp add: cte_level_bits_def mask_def init_irq_node_ptr_def pptr_base_num word_size) apply unat_arith - apply clarsimp - done *) + done lemma irq_node_in_kernel_window_init_arch_state[simp]: "\ init_irq_node_ptr + (ucast (irq::irq) << cte_level_bits) \ x; @@ -341,6 +280,32 @@ lemma irq_node_in_kernel_window_init_arch_state[simp]: apply word_bitwise done +lemma tcb_vcpu_init_arch_tcb_None[simp]: + "tcb_vcpu init_arch_tcb = None" + by (simp add: init_arch_tcb_def) + +lemma pspace_in_kernel_window_init_A_st: + "pspace_in_kernel_window init_A_st" + apply (clarsimp simp: pspace_in_kernel_window_def init_A_st_def init_kheap_def) + apply (safe; clarsimp) + apply (clarsimp simp: ptr_defs pptr_base_num) + apply (clarsimp simp: ptr_defs pptr_base_num kernel_window_def init_arch_state_def + init_vspace_uses_def) + apply unat_arith + apply (clarsimp simp: global_pt_obj_def bit_simps ptr_defs pptr_base_num kernel_window_def + init_arch_state_def init_vspace_uses_def + split: if_split_asm) + apply unat_arith + apply unat_arith + apply (clarsimp simp: ptr_defs pptr_base_num kernel_window_def init_arch_state_def init_vspace_uses_def) + apply (clarsimp simp: ptr_defs pptr_base_num kernel_window_def init_arch_state_def init_vspace_uses_def) + apply unat_arith + apply (clarsimp simp: global_pt_obj_def bit_simps ptr_defs pptr_base_num kernel_window_def + init_arch_state_def init_vspace_uses_def + split: if_split_asm) + apply unat_arith + apply unat_arith + done lemma invs_A: "invs init_A_st" (is "invs ?st") @@ -354,24 +319,19 @@ lemma invs_A: apply (simp add: valid_state_def) apply (rule conjI) apply (simp add: valid_pspace_def) - sorry (* FIXME AARCH64 rest of proof won't make sense until example state is tweaked from RISCV64 - version apply (rule conjI) - apply (clarsimp simp: valid_objs_def state_defs wellformed_pte_def global_pte_def + apply (clarsimp simp: valid_objs_def state_defs wellformed_pte_def valid_pt_range_def valid_obj_def valid_vm_rights_def vm_kernel_only_def dom_if_Some cte_level_bits_def) apply (clarsimp simp: valid_tcb_def tcb_cap_cases_def is_master_reply_cap_def valid_cap_def obj_at_def valid_tcb_state_def valid_arch_tcb_def cap_aligned_def word_bits_def valid_ipc_buffer_cap_simps)+ apply (clarsimp simp: valid_cs_def word_bits_def cte_level_bits_def - init_irq_ptrs_all_ineqs valid_tcb_def + valid_tcb_def split: if_split_asm) - apply auto[1] apply (simp add: pspace_aligned_init_A pspace_distinct_init_A) - apply (rule conjI) - apply (clarsimp simp: if_live_then_nonz_cap_def obj_at_def state_defs live_def hyp_live_def) - apply (rule conjI) - apply (clarsimp simp: zombies_final_def cte_wp_at_cases state_defs + apply (clarsimp simp: if_live_then_nonz_cap_def obj_at_def state_defs live_def hyp_live_def arch_live_def) + apply (clarsimp simp: zombies_final_def cte_wp_at_cases state_defs ex_nonz_cap_to_def tcb_cap_cases_def is_zombie_def) apply (clarsimp simp: sym_refs_def state_refs_of_def state_defs state_hyp_refs_of_def) apply (rule conjI) @@ -387,7 +347,7 @@ lemma invs_A: apply (simp add: valid_ioc_def init_A_st_def init_ioc_def cte_wp_at_cases2) apply (intro allI impI, elim exE conjE) apply (case_tac obj, simp_all add: cap_of_def) - apply (clarsimp simp: init_kheap_def init_global_pt_def split: if_split_asm) + apply (clarsimp simp: init_kheap_def split: if_split_asm) apply (rule conjI) apply (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def state_defs valid_arch_idle_def) apply (rule conjI, clarsimp simp: only_idle_def pred_tcb_at_def obj_at_def state_defs) @@ -406,7 +366,9 @@ lemma invs_A: apply (clarsimp simp: valid_arch_state_def) apply (rule conjI) apply (clarsimp simp: valid_asid_table_def state_defs) - apply (simp add: valid_arch_state_def state_defs obj_at_def a_type_def) + apply (simp add: valid_arch_state_def state_defs obj_at_def a_type_def cur_vcpu_2_def + vmid_inv_def is_inv_def vmid_for_asid_2_def obind_def + valid_global_tables_2_def empty_pt_def valid_vmid_table_def) apply (rule conjI) apply (clarsimp simp: valid_irq_node_def obj_at_def state_defs is_cap_table_def wf_empty_bits @@ -428,14 +390,15 @@ lemma invs_A: apply (clarsimp simp: valid_arch_caps_def valid_asid_pool_caps_def unique_table_caps_def caps_of_state_init_A_st_Null valid_table_caps_def unique_table_refs_def) apply (clarsimp simp: state_defs) - apply (clarsimp simp: valid_global_objs_def valid_kernel_mappings_def valid_asid_map_def) + apply (clarsimp simp: valid_global_objs_def valid_kernel_mappings_def) apply (rule conjI) apply (clarsimp simp: equal_kernel_mappings_def) apply (rule conjI) - apply (clarsimp simp: pspace_in_kernel_window_def init_A_st_def init_kheap_def) - apply (simp add: cap_refs_in_kernel_window_def caps_of_state_init_A_st_Null - valid_refs_def[unfolded cte_wp_at_caps_of_state]) - done *) + apply (clarsimp simp: valid_asid_map_def entry_for_asid_def init_A_st_def init_arch_state_def + obind_def pool_for_asid_def) + apply (simp add: pspace_in_kernel_window_init_A_st cap_refs_in_kernel_window_def + caps_of_state_init_A_st_Null valid_refs_def[unfolded cte_wp_at_caps_of_state]) + done end diff --git a/proof/invariant-abstract/AARCH64/ArchLevityCatch_AI.thy b/proof/invariant-abstract/AARCH64/ArchLevityCatch_AI.thy index a5186b4a56..925b19f736 100644 --- a/proof/invariant-abstract/AARCH64/ArchLevityCatch_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchLevityCatch_AI.thy @@ -30,7 +30,7 @@ lemma ptrFormPAddr_addFromPPtr[simp]: lemma asid_high_bits_of_add_ucast: "is_aligned w asid_low_bits \ - asid_high_bits_of (ucast (x::9 word) + w) = asid_high_bits_of w" + asid_high_bits_of (ucast (x::asid_low_index) + w) = asid_high_bits_of w" apply (rule word_eqI) apply (simp add: word_size asid_high_bits_of_def nth_ucast nth_shiftr is_aligned_nth) apply (subst word_plus_and_or_coroll) diff --git a/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy b/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy index 51f7731e3d..12791169bb 100644 --- a/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy @@ -32,14 +32,14 @@ lemma slot_bits_def2 [Retype_AI_assms]: "slot_bits = cte_level_bits" definition "no_gs_types \ UNIV - {CapTableObject, - ArchObject SmallPageObj, ArchObject LargePageObj, ArchObject HugePageObj}" + ArchObject SmallPageObj, ArchObject LargePageObj, ArchObject HugePageObj, + ArchObject PageTableObj, ArchObject VSpaceObj}" lemma no_gs_types_simps [simp, Retype_AI_assms]: "Untyped \ no_gs_types" "TCBObject \ no_gs_types" "EndpointObject \ no_gs_types" "NotificationObject \ no_gs_types" - "ArchObject PageTableObj \ no_gs_types" "ArchObject ASIDPoolObj \ no_gs_types" by (simp_all add: no_gs_types_def) @@ -108,9 +108,8 @@ lemma dmo_eq_kernel_restricted [wp, Retype_AI_assms]: "\\s. equal_kernel_mappings (kheap_update (f (kheap s)) s)\ do_machine_op m \\rv s. equal_kernel_mappings (kheap_update (f (kheap s)) s)\" - sorry (* FIXME AARCH64 - unfolding do_machine_op_def equal_kernel_mappings_def has_kernel_mappings_def - by (wpsimp simp: in_omonad vspace_for_asid_def pool_for_asid_def) *) + unfolding do_machine_op_def equal_kernel_mappings_def + by (wpsimp simp: in_omonad vspace_for_asid_def pool_for_asid_def) definition "post_retype_invs_check tp \ False" @@ -310,6 +309,14 @@ lemma asid_pools: by (clarsimp simp: in_opt_map_eq s'_def ps_def) (erule pspace_no_overlapC [OF orth _ _ cover vp]) +lemma asid_pools_of': + "asid_pools_of s' p = Some ap \ + asid_pools_of s p = Some ap \ ap = Map.empty \ p \ set (retype_addrs ptr ty n us)" + apply (clarsimp simp: in_opt_map_eq s'_def ps_def split: if_split_asm) + apply (auto simp: default_object_def default_arch_object_def empty_pt_def tyunt + split: apiobject_type.splits aobject_type.splits) + done + lemma pts_of: "pts_of s p = Some pt \ pts_of s' p = Some pt" by (clarsimp simp: in_opt_map_eq s'_def ps_def) @@ -317,12 +324,11 @@ lemma pts_of: lemma pts_of': "pts_of s' p = Some pt \ - pts_of s p = Some pt \ pt = (empty_pt (pt_type pt_t)) \ p \ set (retype_addrs ptr ty n us)" - sorry (* FIXME AARCH64 + pts_of s p = Some pt \ pt = (empty_pt (pt_type pt)) \ p \ set (retype_addrs ptr ty n us)" apply (clarsimp simp: in_opt_map_eq s'_def ps_def split: if_split_asm) - apply (simp add: default_object_def default_arch_object_def tyunt + apply (auto simp: default_object_def default_arch_object_def empty_pt_def tyunt split: apiobject_type.splits aobject_type.splits) - done *) + done lemma valid_asid_table: "valid_asid_table s \ valid_asid_table s'" @@ -334,19 +340,17 @@ lemma valid_global_arch_objs: lemma ptes_of: "ptes_of s pt_t p = Some pte \ ptes_of s' pt_t p = Some pte" - sorry (* FIXME AARCH64 - by (simp add: pte_of_def obind_def pts_of split: option.splits) *) + by (auto simp: level_pte_of_def obind_def pts_of if_option split: option.splits) lemma default_empty: "default_object ty dev us = ArchObj (PageTable pt) \ pt = (empty_pt (pt_type pt))" - sorry (* FIXME AARCH64 ^exists pt_t? - by (simp add: default_object_def default_arch_object_def tyunt - split: apiobject_type.splits aobject_type.splits) *) + by (auto simp: default_object_def default_arch_object_def empty_pt_def tyunt + split: apiobject_type.splits aobject_type.splits) lemma ptes_of': "ptes_of s' pt_t p = Some pte \ ptes_of s pt_t p = Some pte \ pte = InvalidPTE" - sorry (* FIXME AARCH64 - by (fastforce simp: ptes_of_def in_omonad s'_def ps_def split: if_splits dest: default_empty) *) + by (cases pt_t; + fastforce simp: level_pte_of_def in_omonad s'_def ps_def split: if_splits dest: default_empty) lemma pt_walk: "pt_walk top_level bot_level pt vref (ptes_of s) = Some (level, p) \ @@ -397,44 +401,66 @@ lemma global_no_retype: lemma global_pts_no_retype: "valid_global_refs s \ global_pt s \ set (retype_addrs ptr ty n us)" - sorry (* FIXME AARCH64 - by (drule riscv_global_pts_global_ref, erule global_no_retype) *) + by (erule global_no_retype[rotated], simp add: valid_global_refs_def) lemma vcpu_hyp_live_of': "vcpu_hyp_live_of s' = vcpu_hyp_live_of s" - sorry (* FIXME AARCH64 VCPU *) + apply (clarsimp simp: in_opt_map_eq s'_def ps_def split: if_split_asm) + apply (clarsimp del: ext intro!: ext split: option.splits) + apply (rule iffI; clarsimp simp: in_omonad split: if_splits) + apply (fastforce simp: default_object_def default_arch_object_def default_vcpu_def tyunt + split: apiobject_type.splits aobject_type.splits) + apply (erule (1) pspace_no_overlapC[OF orth _ _ cover vp]) + done + +lemma dom_vmid_for_asid[simplified]: + "dom (vmid_for_asid s') = dom (vmid_for_asid s)" + apply (clarsimp simp: dom_def s'_def ps_def obj_at_def vmid_for_asid_def in_omonad + split: if_split_asm) + apply (rule set_eqI) + apply (rule iffI; clarsimp simp: entry_for_pool_def in_omonad split: if_split_asm) + apply (fastforce simp: default_object_def default_arch_object_def default_vcpu_def tyunt + split: apiobject_type.splits aobject_type.splits) + apply (fastforce elim: pspace_no_overlapC[OF orth _ _ cover vp]) + done lemma vmid_inv': "vmid_inv s \ vmid_inv s'" - sorry (* FIXME AARCH64 *) + by (clarsimp simp: vmid_inv_def is_inv_def dom_vmid_for_asid) + (fastforce simp: s'_def ps_def vmid_for_asid_def entry_for_pool_def in_omonad + elim!: pspace_no_overlapC[OF orth _ _ cover vp]) + +lemma valid_global_tables': + "valid_global_tables s \ valid_global_tables s'" + unfolding valid_global_tables_2_def + by (simp add: pts_of) lemma valid_arch_state: "valid_arch_state s \ valid_arch_state s'" apply (simp add: valid_arch_state_def valid_asid_table vcpu_hyp_live_of' vmid_inv' + valid_global_arch_objs valid_global_tables' del: arch_state) apply simp - sorry (* FIXME AARCH64 *) + done lemma vspace_for_pool1: "(vspace_for_pool asid p (asid_pools_of s) = Some pt) \ vspace_for_pool asid p (asid_pools_of s') = Some pt" - sorry (* FIXME AARCH64 - by (simp add: vspace_for_pool_def asid_pools obind_def split: option.splits) *) + by (simp add: vspace_for_pool_def entry_for_pool_def asid_pools obind_def split: option.splits) lemma vspace_for_pool2: "vspace_for_pool asid p (asid_pools_of s') = Some pt \ vspace_for_pool asid p (asid_pools_of s) = Some pt" - apply (clarsimp simp: vspace_for_pool_def in_omonad s'_def ps_def split: if_split_asm) - sorry (* FIXME AARCH64 + apply (clarsimp simp: vspace_for_pool_def entry_for_pool_def in_omonad s'_def ps_def + split: if_split_asm) apply (clarsimp simp: default_object_def default_arch_object_def tyunt split: apiobject_type.splits aobject_type.splits) - done *) + done lemma vspace_for_pool[simp]: "(vspace_for_pool asid p (asid_pools_of s') = Some pt) = (vspace_for_pool asid p (asid_pools_of s) = Some pt)" - sorry (* FIXME AARCH64 - by (rule iffI, erule vspace_for_pool2, erule vspace_for_pool1) *) + by (rule iffI, erule vspace_for_pool2, erule vspace_for_pool1) lemma vs_lookup_table': "(vs_lookup_table level asid vref s' = Some (level, p)) = @@ -458,10 +484,8 @@ lemma wellformed_default_obj[Retype_AI_assms]: "\ ptr' \ set (retype_addrs ptr ty n us); kheap s ptr' = Some (ArchObj ao); arch_valid_obj ao s\ \ arch_valid_obj ao s'" - sorry (* FIXME AARCH64 VCPU: valid_vcpu s' - apply (cases ao; clarsimp elim!: obj_at_pres - split: arch_kernel_obj.splits option.splits)+ - done *) + by (cases ao; clarsimp elim!: obj_at_pres simp: valid_vcpu_def + split: arch_kernel_obj.splits option.splits)+ end @@ -471,16 +495,13 @@ context retype_region_proofs_arch begin lemma hyp_refs_eq: "state_hyp_refs_of s' = state_hyp_refs_of s" unfolding s'_def ps_def - sorry (* FIXME AARCH64 VCPU apply (rule ext) apply (clarsimp simp: state_hyp_refs_of_def orthr split: option.splits) apply (cases ty; simp add: tyunt default_object_def default_tcb_def hyp_refs_of_def tcb_hyp_refs_def default_arch_tcb_def) - apply (rename_tac ao) - apply (clarsimp simp: refs_of_a_def ARM_HYP.vcpu_tcb_refs_def default_arch_object_def - ARM_A.default_vcpu_def + apply (clarsimp simp: refs_of_ao_def vcpu_tcb_refs_def default_arch_object_def default_vcpu_def split: aobject_type.splits) - done *) + done lemma obj_at_valid_pte: "\valid_pte level pte s; \P p. obj_at P p s \ obj_at P p s'\ @@ -507,35 +528,42 @@ lemma valid_vspace_obj_pres: lemma valid_vspace_objs': assumes va: "valid_vspace_objs s" + assumes asid: "valid_asid_table s" + assumes pspace: "pspace_aligned s" shows "valid_vspace_objs s'" proof fix level p ao asid vref assume p: "vs_lookup_table level asid (vref_for_level vref (level + 1)) s' = Some (level, p)" assume vref: "vref \ user_region" - assume "vspace_objs_of s' p = Some ao" - hence "vspace_objs_of s p = Some ao \ ArchObj ao = default_object ty dev us" - by (simp add: ps_def obj_at_def s'_def in_opt_map_eq vspace_obj_of_Some split: if_split_asm) - moreover - { assume "ArchObj ao = default_object ty dev us" with tyunt - have "valid_vspace_obj level ao s'" - apply - - apply (rule valid_vspace_obj_default; assumption?) - sorry (* FIXME AARCH64: this argument is no longer valid; we have to show that p cannot - be on a lookup path in retype *) + assume vsp: "vspace_objs_of s' p = Some ao" + + { assume "level = asid_pool_level" + hence "valid_vspace_obj level ao s'" using va p vref vsp asid + apply (simp add: vs_lookup_table') + apply (frule (1) vs_lookup_asid_pool) + apply (rule valid_vspace_obj_pres) + apply (auto dest!: valid_vspace_objsD + simp: s'_def ps_def opt_map_def vref_for_level_user_region + split: option.splits if_split_asm + elim!: pspace_no_overlapC[OF orth _ _ cover vp]) + done } - moreover - { assume "vspace_objs_of s p = Some ao" - with va p vref - have "valid_vspace_obj level ao s" - by (auto simp: vs_lookup_table' vref_for_level_user_region elim: valid_vspace_objsD) - hence "valid_vspace_obj level ao s'" - by (rule valid_vspace_obj_pres) + moreover { + assume "level \ max_pt_level" + hence "valid_vspace_obj level ao s'" using va p vref vsp pspace asid + apply (simp add: vs_lookup_table') + apply (drule (1) valid_vspace_objs_strongD; simp add: vref_for_level_user_region) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (subst (asm) s'_def, subst (asm) ps_def) + apply (clarsimp simp: split: if_split_asm) + apply (erule (1) pspace_no_overlapC[OF orth _ _ cover vp]) + apply (fastforce intro: obj_at_valid_pte[OF _ obj_at_pres]) + done } - ultimately - show "valid_vspace_obj level ao s'" by blast + ultimately show "valid_vspace_obj level ao s'" + by fastforce qed - sublocale retype_region_proofs_gen?: retype_region_proofs_gen by (unfold_locales, auto simp: hyp_refs_eq[simplified s'_def ps_def] @@ -696,12 +724,19 @@ lemma valid_kernel_mappings: lemma valid_asid_map: "valid_asid_map s \ valid_asid_map s'" - by (clarsimp simp: valid_asid_map_def) + apply (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def + entry_for_pool_def) + apply (fastforce dest!: asid_pools_of') + done lemma vspace_for_asid: "vspace_for_asid asid s' = Some pt \ vspace_for_asid asid s = Some pt" - sorry (* FIXME AARCH64 - by (clarsimp simp: vspace_for_asid_def in_omonad pool_for_asid_def) *) + apply (clarsimp simp: s'_def ps_def vspace_for_asid_def entry_for_asid_def pool_for_asid_def + in_omonad entry_for_pool_def + split: if_split_asm) + apply (fastforce simp: default_object_def default_arch_object_def tyunt + split: apiobject_type.splits aobject_type.splits) + done lemma equal_kernel_mappings: "equal_kernel_mappings s'" @@ -803,7 +838,8 @@ lemma post_retype_invs: valid_global_refs valid_arch_state valid_irq_node_def obj_at_pres valid_arch_caps valid_global_objs_def - valid_vspace_objs' valid_irq_handlers + valid_vspace_objs'[OF _ valid_arch_state_asid_table valid_pspace_aligned2] + valid_irq_handlers valid_mdb_rep2 mdb_and_revokable valid_pspace cur_tcb only_idle valid_kernel_mappings valid_asid_map @@ -821,7 +857,8 @@ sublocale retype_region_proofs_invs?: retype_region_proofs_invs where region_in_kernel_window = region_in_kernel_window and post_retype_invs_check = post_retype_invs_check and post_retype_invs = post_retype_invs - using post_retype_invs valid_cap valid_global_refs valid_arch_state valid_vspace_objs' + using post_retype_invs valid_cap valid_global_refs valid_arch_state + valid_vspace_objs'[OF _ invs_valid_asid_table invs_psp_aligned] by unfold_locales (auto simp: s'_def ps_def) (* local_setup \note_new_facts pre_ctxt_1\ *) diff --git a/proof/invariant-abstract/AARCH64/ArchSchedule_AI.thy b/proof/invariant-abstract/AARCH64/ArchSchedule_AI.thy index 8d7a062c35..90514e9676 100644 --- a/proof/invariant-abstract/AARCH64/ArchSchedule_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchSchedule_AI.thy @@ -31,33 +31,23 @@ lemma dmo_mapM_storeWord_0_invs[wp,Schedule_AI_asms]: global_naming Arch lemma arch_stt_invs [wp,Schedule_AI_asms]: - "\invs\ arch_switch_to_thread t' \\_. invs\" + "arch_switch_to_thread t' \invs\" apply (wpsimp simp: arch_switch_to_thread_def) by (rule sym_refs_VCPU_hyp_live; fastforce) lemma arch_stt_tcb [wp,Schedule_AI_asms]: - "\tcb_at t'\ arch_switch_to_thread t' \\_. tcb_at t'\" - apply (simp add: arch_switch_to_thread_def) - apply (wp) - sorry (* FIXME AARCH64 VCPU - done *) + "arch_switch_to_thread t' \tcb_at t'\" + by (wpsimp simp: arch_switch_to_thread_def wp: tcb_at_typ_at) lemma arch_stt_runnable[Schedule_AI_asms]: - "\st_tcb_at runnable t\ arch_switch_to_thread t \\r . st_tcb_at runnable t\" - apply (simp add: arch_switch_to_thread_def) - apply wp - sorry (* FIXME AARCH64 VCPU - done *) + "arch_switch_to_thread t \st_tcb_at runnable t\" + by (wpsimp simp: arch_switch_to_thread_def) lemma idle_strg: "thread = idle_thread s \ invs s \ invs (s\cur_thread := thread\)" by (clarsimp simp: invs_def valid_state_def valid_idle_def cur_tcb_def pred_tcb_at_def valid_machine_state_def obj_at_def is_tcb_def) -lemma set_vcpu_ct[wp]: - "\\s. P (cur_thread s)\ set_vcpu v v' \\_ s. P (cur_thread s)\" - by (wpsimp simp: set_vcpu_def wp: get_object_wp) - crunches vcpu_update, vgic_update, vgic_update_lr, vcpu_restore_reg_range, vcpu_save_reg_range, vcpu_enable, vcpu_disable, vcpu_save, vcpu_restore, vcpu_switch, vcpu_save @@ -66,16 +56,12 @@ crunches (wp: mapM_x_wp mapM_wp subset_refl) lemma arch_stit_invs[wp, Schedule_AI_asms]: - "\invs\ arch_switch_to_idle_thread \\r. invs\" - sorry (* FIXME AARCH64 VSpace & VCPU - by (wpsimp simp: arch_switch_to_idle_thread_def) *) + "arch_switch_to_idle_thread \invs\" + by (wpsimp simp: arch_switch_to_idle_thread_def) lemma arch_stit_tcb_at[wp]: - "\tcb_at t\ arch_switch_to_idle_thread \\r. tcb_at t\" - apply (simp add: arch_switch_to_idle_thread_def ) - apply (wp tcb_at_typ_at) - sorry (* FIXME AARCH64 VSpace & VCPU - done *) + "arch_switch_to_idle_thread \tcb_at t\" + by (wpsimp simp: arch_switch_to_idle_thread_def wp: tcb_at_typ_at) crunches set_vm_root for ct[wp]: "\s. P (cur_thread s)" @@ -83,30 +69,27 @@ crunches set_vm_root (simp: crunch_simps wp: hoare_drop_imps) lemma arch_stit_activatable[wp, Schedule_AI_asms]: - "\ct_in_state activatable\ arch_switch_to_idle_thread \\rv . ct_in_state activatable\" + "arch_switch_to_idle_thread \ct_in_state activatable\" apply (clarsimp simp: arch_switch_to_idle_thread_def) apply (wpsimp simp: ct_in_state_def wp: ct_in_state_thread_state_lift) - sorry (* FIXME AARCH64 VCPU - done *) + done lemma stit_invs [wp,Schedule_AI_asms]: - "\invs\ switch_to_idle_thread \\rv. invs\" + "switch_to_idle_thread \invs\" apply (simp add: switch_to_idle_thread_def arch_switch_to_idle_thread_def) apply (wpsimp|strengthen idle_strg)+ - sorry (* FIXME AARCH64 VSpace & VCPU - done *) + done lemma stit_activatable[Schedule_AI_asms]: - "\invs\ switch_to_idle_thread \\rv . ct_in_state activatable\" + "\invs\ switch_to_idle_thread \\_. ct_in_state activatable\" apply (simp add: switch_to_idle_thread_def arch_switch_to_idle_thread_def) - apply (wp | simp add: ct_in_state_def)+ - sorry (* FIXME AARCH64 VCPU + apply (wpsimp simp: ct_in_state_def) apply (clarsimp simp: invs_def valid_state_def cur_tcb_def valid_idle_def elim!: pred_tcb_weaken_strongerE) - done *) + done lemma stt_invs [wp,Schedule_AI_asms]: - "\invs\ switch_to_thread t' \\_. invs\" + "switch_to_thread t' \invs\" apply (simp add: switch_to_thread_def) apply wp apply (simp add: trans_state_update[symmetric] del: trans_state_update) @@ -122,17 +105,17 @@ lemma stt_invs [wp,Schedule_AI_asms]: end interpretation Schedule_AI_U?: Schedule_AI_U - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case - by (intro_locales; (unfold_locales; fact Schedule_AI_asms)?) - qed + by (intro_locales; (unfold_locales; fact Schedule_AI_asms)?) +qed interpretation Schedule_AI?: Schedule_AI - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case - by (intro_locales; (unfold_locales; fact Schedule_AI_asms)?) - qed + by (intro_locales; (unfold_locales; fact Schedule_AI_asms)?) +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchSyscall_AI.thy b/proof/invariant-abstract/AARCH64/ArchSyscall_AI.thy index 13c46fec69..c70f75306c 100644 --- a/proof/invariant-abstract/AARCH64/ArchSyscall_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchSyscall_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -49,37 +50,24 @@ lemma eq_no_cap_to_obj_with_diff_ref [Syscall_AI_assms]: table_cap_ref_mask_cap Ball_def) done -crunches getDFSR, getFAR,getIFSR +crunches getESR, getFAR for inv[wp]: "P" -lemma do_machine_op_getDFSR_inv[wp]: - "do_machine_op getDFSR \P\" +lemma do_machine_op_geESR_inv[wp]: + "do_machine_op getESR \P\" by (rule dmo_inv) wp lemma do_machine_op_getFAR_inv[wp]: "do_machine_op getFAR \P\" by (rule dmo_inv) wp -lemma do_machine_op_getIFSR_inv[wp]: - "do_machine_op getIFSR \P\" - by (rule dmo_inv) wp - lemma hv_invs[wp, Syscall_AI_assms]: "\invs\ handle_vm_fault t' flt \\r. invs\" - unfolding handle_vm_fault_def by (cases flt; wpsimp) - -lemma hv_inv_ex [Syscall_AI_assms]: - "\P\ handle_vm_fault t vp \\_ _. True\, \\_. P\" - unfolding handle_vm_fault_def - sorry (* FIXME AARCH64 addressTranslateS1 - by (cases vp; wpsimp wp: dmo_inv getRestartPC_inv det_getRestartPC as_user_inv) *) + unfolding handle_vm_fault_def by (cases flt; wpsimp wp: dmo_invs_lift) lemma handle_vm_fault_valid_fault[wp, Syscall_AI_assms]: "\\\ handle_vm_fault thread ft -,\\rv s. valid_fault rv\" unfolding handle_vm_fault_def - apply (cases ft, simp_all) - apply (wp | simp add: valid_fault_def)+ - done - + by (cases ft; wpsimp simp: valid_fault_def) lemma hvmf_active [Syscall_AI_assms]: "\st_tcb_at active t\ handle_vm_fault t w \\rv. st_tcb_at active t\" @@ -92,18 +80,18 @@ lemma hvmf_ex_cap[wp, Syscall_AI_assms]: declare arch_get_sanitise_register_info_ex_nonz_cap_to[Syscall_AI_assms] lemma hh_invs[wp, Syscall_AI_assms]: - "\invs and ct_active and st_tcb_at active thread and ex_nonz_cap_to_thread\ - handle_hypervisor_fault thread fault + "\invs and ct_active and st_tcb_at active thread and ex_nonz_cap_to thread\ + handle_hypervisor_fault thread fault \\rv. invs\" - sorry (* FIXME AARCH64 ARMVCPUFault - by (cases fault) wpsimp *) + supply if_split[split del] + by (cases fault) (wpsimp simp: valid_fault_def isFpuEnable_def wp: dmo_invs_lift hoare_drop_imps) end global_interpretation Syscall_AI?: Syscall_AI - proof goal_cases +proof goal_cases interpret Arch . case 1 show ?case by (unfold_locales; (fact Syscall_AI_assms)?) - qed +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy b/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy index f13e4ae451..056b1eab53 100644 --- a/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy @@ -21,23 +21,19 @@ lemma activate_idle_invs[Tcb_AI_asms]: by (simp add: arch_activate_idle_thread_def) -lemma empty_fail_getRegister [intro!, simp, Tcb_AI_asms]: - "empty_fail (getRegister r)" - by (simp add: getRegister_def) - +declare getRegister_empty_fail [Tcb_AI_asms] lemma same_object_also_valid: (* arch specific *) "\ same_object_as cap cap'; s \ cap'; wellformed_cap cap; cap_asid cap = None \ (\asid. cap_asid cap = Some asid \ 0 < asid \ asid \ 2^asid_bits - 1); cap_vptr cap = None; cap_asid_base cap = None \ \ s \ cap" - sorry (* FIXME AARCH64 apply (cases cap, (clarsimp simp: same_object_as_def is_cap_simps cap_asid_def wellformed_cap_def wellformed_acap_def valid_cap_def bits_of_def cap_aligned_def split: cap.split_asm arch_cap.split_asm option.splits)+) - done *) + done lemma same_object_obj_refs[Tcb_AI_asms]: "\ same_object_as cap cap' \ @@ -140,11 +136,10 @@ lemma finalise_cap_not_cte_wp_at[Tcb_AI_asms]: | rule impI | rule hoare_drop_imps)+ apply (clarsimp simp: ball_ran_eq x) - sorry (* FIXME AARCH64 VCPU apply (wp delete_one_caps_of_state | rule impI | simp add: deleting_irq_handler_def get_irq_slot_def x ball_ran_eq)+ - done *) + done lemma table_cap_ref_max_free_index_upd[simp,Tcb_AI_asms]: @@ -156,10 +151,10 @@ end global_interpretation Tcb_AI_1?: Tcb_AI_1 where state_ext_t = state_ext_t and is_cnode_or_valid_arch = is_cnode_or_valid_arch - proof goal_cases - interpret Arch . - case 1 show ?case by (unfold_locales; (fact Tcb_AI_asms)?) - qed +proof goal_cases + interpret Arch . + case 1 show ?case by (unfold_locales; (fact Tcb_AI_asms)?) +qed context Arch begin global_naming AARCH64 @@ -184,7 +179,6 @@ lemma cap_delete_no_cap_to_obj_asid[wp, Tcb_AI_asms]: apply (simp add: cap_delete_def no_cap_to_obj_with_diff_ref_ran_caps_form) apply wp - apply simp apply (rule use_spec) apply (rule rec_del_all_caps_in_range) apply (simp | rule obj_ref_none_no_asid)+ @@ -238,19 +232,19 @@ lemma tc_invs[Tcb_AI_asms]: \\rv. invs\" apply (rule hoare_gen_asm)+ apply (simp add: split_def set_mcpriority_def cong: option.case_cong) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply wp - apply ((simp only: simp_thms - | (simp add: conj_comms del: hoare_True_E_R, + apply ((simp only: simp_thms cong: conj_cong + | (simp add: conj_comms, strengthen imp_consequent[where Q="x = None" for x], simp cong: conj_cong) | rule wp_split_const_if wp_split_const_if_R - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | (wp out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -264,24 +258,20 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def - del: hoare_True_E_R | wpc | strengthen use_no_cap_to_obj_asid_strg use_no_cap_to_obj_asid_strg[simplified conj_comms] tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] tcb_cap_always_valid_strg[where p="tcb_cnode_index (Suc 0)"])+) - apply (intro conjI impI; clarsimp?; - (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] - is_cap_simps is_valid_vtable_root_def - is_cnode_or_valid_arch_def tcb_cap_valid_def - invs_valid_objs cap_asid_def vs_cap_ref_def - case_bool_If valid_ipc_buffer_cap_def option_case_eq_None - | split cap.splits arch_cap.splits if_splits)+) - sorry (* FIXME AARCH64 - apply (simp split: option.splits) - done *) + by (intro conjI impI; clarsimp?; + (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] + is_cap_simps is_valid_vtable_root_def + is_cnode_or_valid_arch_def tcb_cap_valid_def + invs_valid_objs cap_asid_def vs_cap_ref_def + case_bool_If valid_ipc_buffer_cap_def option_case_eq_None + | split cap.splits arch_cap.splits if_splits pt_type.splits option.splits)+) lemma check_valid_ipc_buffer_inv: (* arch_specific *) "\P\ check_valid_ipc_buffer vptr cap \\rv. P\" @@ -371,9 +361,8 @@ lemma update_cap_valid[Tcb_AI_asms]: done -(* FIXME AARCH64 VCPU crunch pred_tcb_at: switch_to_thread "pred_tcb_at proj P t" - (wp: crunch_wps simp: crunch_simps) *) + (wp: crunch_wps simp: crunch_simps) crunch typ_at[wp]: invoke_tcb "\s. P (typ_at T p s)" (ignore: check_cap_at setNextPC zipWithM @@ -389,9 +378,9 @@ end global_interpretation Tcb_AI?: Tcb_AI where is_cnode_or_valid_arch = AARCH64.is_cnode_or_valid_arch - proof goal_cases - interpret Arch . - case 1 show ?case by (unfold_locales; (fact Tcb_AI_asms)?) - qed +proof goal_cases + interpret Arch . + case 1 show ?case by (unfold_locales; (fact Tcb_AI_asms)?) +qed end diff --git a/proof/invariant-abstract/AARCH64/ArchUntyped_AI.thy b/proof/invariant-abstract/AARCH64/ArchUntyped_AI.thy index 1fbb0df03d..69869507e8 100644 --- a/proof/invariant-abstract/AARCH64/ArchUntyped_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchUntyped_AI.thy @@ -299,13 +299,12 @@ lemma create_cap_valid_arch_caps[wp, Untyped_AI_assms]: apply (rule conjI) apply (auto simp: is_cap_simps valid_cap_def second_level_tables_def obj_at_def nonempty_table_def a_type_simps in_omonad)[1] - sorry (* FIXME AARCH64 apply (clarsimp simp del: imp_disjL) apply (case_tac "\x. x \ obj_refs cap") apply (clarsimp dest!: obj_ref_elemD) apply fastforce apply (auto simp: is_cap_simps)[1] - done *) + done lemma create_cap_cap_refs_in_kernel_window[wp, Untyped_AI_assms]: @@ -340,9 +339,8 @@ lemma nonempty_default[simp, Untyped_AI_assms]: "tp \ Untyped \ \ nonempty_table S (default_object tp dev us)" apply (case_tac tp, simp_all add: default_object_def nonempty_table_def a_type_def) apply (rename_tac aobject_type) - apply (case_tac aobject_type; simp add: default_arch_object_def) - sorry (* FIXME AARCH64 - done *) + apply (case_tac aobject_type; simp add: default_arch_object_def empty_pt_def) + done crunch cte_wp_at_iin[wp]: init_arch_objects "\s. P (cte_wp_at (P' (interrupt_irq_node s)) p s)" @@ -358,8 +356,7 @@ lemma obj_is_device_vui_eq[Untyped_AI_assms]: apply (intro impI conjI allI, simp_all add: is_frame_type_def default_object_def) apply (simp add: default_arch_object_def split: aobject_type.split) apply (auto simp: arch_is_frame_type_def) - sorry (* FIXME AARCH64 - done *) + done end diff --git a/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy b/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy index b7dc70c657..f82074db89 100644 --- a/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy @@ -9,9 +9,9 @@ theory ArchVCPU_AI imports AInvs begin -context Arch begin global_naming ARM_HYP (*FIXME: arch_split*) +context Arch begin global_naming AARCH64 (*FIXME: arch_split*) -(* FIXME AARCH64: move to ArchInvariants_AI and use there *) +(* This is similar to cur_vcpu_2, but not close enough to reuse. *) definition active_cur_vcpu_of :: "'z state \ obj_ref option" where "active_cur_vcpu_of s \ case arm_current_vcpu (arch_state s) of Some (vr, True) \ Some vr | _ \ None" @@ -76,7 +76,7 @@ crunches do_machine_op (wp: valid_cur_vcpu_lift_cur_thread_update valid_cur_vcpu_lift crunch_wps) lemma valid_cur_vcpu_vcpu_update[simp]: - "vcpu_at v s \ valid_cur_vcpu (s\kheap := kheap s(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" + "vcpu_at v s \ valid_cur_vcpu (s\kheap := (kheap s)(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" by (clarsimp simp: valid_cur_vcpu_def active_cur_vcpu_of_def pred_tcb_at_def obj_at_def) crunches vcpu_save_reg, vcpu_write_reg, save_virt_timer, vgic_update, vcpu_disable @@ -91,16 +91,21 @@ lemma set_vcpu_arch_tcb_at_cur_thread[wp]: crunches vcpu_disable, vcpu_restore, vcpu_save, set_vm_root for arch_tcb_at_cur_thread[wp]: "\s. arch_tcb_at P (cur_thread s) s" - (wp: crunch_wps ignore: set_object) (* FIXME AARCH64: set_object shouldn't be here *) + (wp: crunch_wps ignore: set_object) -lemma invalidate_asid_active_cur_vcpu_of[wp]: - "\\s. P (active_cur_vcpu_of s)\ invalidate_asid param_a \\_ s. P (active_cur_vcpu_of s)\" - sorry (* FIXME AARCH64: crunch was able to deal with this on ARM_HYP with old defs *) - -crunches vcpu_update, do_machine_op, invalidate_asid +crunches vcpu_update, do_machine_op, invalidate_asid, invalidate_asid, invalidate_vmid_entry for active_cur_vcpu_of[wp]: "\s. P (active_cur_vcpu_of s)" (simp: active_cur_vcpu_of_def) +lemma active_cur_vcpu_of_arch_upd_eq: + "arm_current_vcpu s' = arm_current_vcpu (arch_state s) \ + active_cur_vcpu_of (s\arch_state := s'\) = active_cur_vcpu_of s" + unfolding active_cur_vcpu_of_def by simp + +crunches get_vmid, set_vm_root + for active_cur_vcpu_of[wp]: "\s. P (active_cur_vcpu_of s)" + (simp: active_cur_vcpu_of_arch_upd_eq) + lemma vcpu_save_reg_active_cur_vcpu_of[wp]: "vcpu_save_reg vr reg \\s. P (active_cur_vcpu_of s)\" by (wpsimp simp: vcpu_save_reg_def) @@ -134,27 +139,6 @@ lemma valid_cur_vcpu_arm_vmid_table_upd[simp]: "valid_cur_vcpu (s\arch_state := arch_state s \arm_vmid_table := x \\) = valid_cur_vcpu s" by (clarsimp simp: valid_cur_vcpu_def) -lemma get_vmid_active_cur_vcpu_of[wp]: - "get_vmid asid \\s. P (active_cur_vcpu_of s)\" - unfolding get_vmid_def - sorry (* FIXME AARCH64 crunches/vmid? *) - -lemma store_vmid_active_cur_vcpu_of[wp]: - "store_vmid asid p \\s. P (active_cur_vcpu_of s)\" - unfolding store_vmid_def - sorry (* FIXME AARCH64 crunches/vmid? *) - -lemma arm_context_switch_active_cur_vcpu_of[wp]: - "arm_context_switch pd asid \\s. P (active_cur_vcpu_of s)\" - unfolding arm_context_switch_def get_vmid_def - apply (wpsimp wp: load_vmid_wp) - sorry (* FIXME AARCH64 crunches/vmid? *) - -lemma set_vm_root_active_cur_vcpu_of[wp]: - "set_vm_root tcb \\s. P (active_cur_vcpu_of s)\" - sorry (* FIXME AARCH64 missing crunches - by (wpsimp simp: set_vm_root_def wp: get_cap_wp) *) - crunches set_vm_root for valid_cur_vcpu_cur_thread_update[wp]: "\s. valid_cur_vcpu (s\cur_thread := t\)" (wp: valid_cur_vcpu_lift_cur_thread_update) @@ -177,17 +161,15 @@ lemma arch_switch_to_idle_thread_valid_cur_vcpu_cur_thread_update[wp]: "\\s. valid_cur_vcpu s \ valid_idle s \ t = idle_thread s\ arch_switch_to_idle_thread \\_ s. valid_cur_vcpu (s\cur_thread := t\)\" - unfolding arch_switch_to_idle_thread_def + unfolding arch_switch_to_idle_thread_def set_global_user_vspace_def apply wpsimp - sorry (* FIXME AARCH64 - by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def valid_arch_idle_def) *) + by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def valid_arch_idle_def) lemma switch_to_idle_thread_valid_cur_vcpu[wp]: "\valid_cur_vcpu and valid_idle\ switch_to_idle_thread \\_. valid_cur_vcpu\" - sorry (* FIXME AARCH64 arch_switch_to_idle_thread - by (wpsimp simp: switch_to_idle_thread_def) *) + by (wpsimp simp: switch_to_idle_thread_def) lemma tcb_vcpu_update_empty_valid_cur_vcpu[wp]: "\\s. if t = cur_thread s @@ -213,7 +195,7 @@ lemma vcpu_invalid_active_arm_current_vcpu_None[wp]: lemma dissociate_vcpu_tcb_valid_cur_vcpu[wp]: "\\s. valid_cur_vcpu s \ sym_refs (state_hyp_refs_of s)\ - dissociate_vcpu_tcb vr t + dissociate_vcpu_tcb vcpu_ptr t \\_. valid_cur_vcpu\" unfolding dissociate_vcpu_tcb_def apply (wpsimp wp: hoare_vcg_imp_lift' arch_thread_get_wp get_vcpu_wp) @@ -223,16 +205,15 @@ lemma dissociate_vcpu_tcb_valid_cur_vcpu[wp]: lemma associate_vcpu_tcb_valid_cur_vcpu: "\\s. valid_cur_vcpu s \ sym_refs (state_hyp_refs_of s)\ - associate_vcpu_tcb vr t + associate_vcpu_tcb vcpu_ptr t \\_. valid_cur_vcpu\" unfolding associate_vcpu_tcb_def - sorry (* FIXME AARCH64 something going on with _2 rephrase of valid_cur_vcpu apply (wpsimp wp: hoare_vcg_imp_lift') apply (wpsimp wp: arch_thread_set_wp) apply (wpsimp wp: arch_thread_set_wp) apply (rule_tac Q="\_ s. valid_cur_vcpu s \ sym_refs (state_hyp_refs_of s)" in hoare_post_imp) apply (clarsimp simp: pred_tcb_at_def obj_at_def valid_cur_vcpu_def active_cur_vcpu_of_def) - by (wpsimp wp: get_vcpu_wp hoare_drop_imps)+ *) + by (wpsimp wp: get_vcpu_wp hoare_drop_imps)+ lemma set_thread_state_arch_tcb_at[wp]: "set_thread_state ts ref \arch_tcb_at P t\" @@ -271,7 +252,7 @@ lemma schedule_valid_cur_vcpu[wp]: (schedule :: (unit, unit) s_monad) \\_. valid_cur_vcpu\" unfolding schedule_def allActiveTCBs_def - by (wpsimp wp: alternative_wp select_wp) + by wpsimp crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_all_signals, bind_notification, fast_finalise, deleted_irq_handler, post_cap_deletion, cap_delete_one, @@ -281,7 +262,7 @@ crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_a restart, reschedule_required, possible_switch_to, thread_set_priority, reply_from_kernel for arch_state[wp]: "\s. P (arch_state s)" and cur_thread[wp]: "\s. P (cur_thread s)" - (wp: mapM_x_wp_inv thread_set.arch_state select_wp crunch_wps + (wp: mapM_x_wp_inv thread_set.arch_state crunch_wps simp: crunch_simps possible_switch_to_def reschedule_required_def) lemma do_unbind_notification_arch_tcb_at[wp]: @@ -313,7 +294,7 @@ crunches blocked_cancel_ipc, cap_delete_one, cancel_signal lemma reply_cancel_ipc_arch_tcb_at[wp]: "reply_cancel_ipc ntfnptr \arch_tcb_at P t\" unfolding reply_cancel_ipc_def thread_set_def - apply (wpsimp wp: set_object_wp select_wp) + apply (wpsimp wp: set_object_wp) by (clarsimp simp: pred_tcb_at_def obj_at_def get_tcb_def) crunches cancel_ipc, send_ipc, receive_ipc @@ -323,7 +304,7 @@ crunches cancel_ipc, send_ipc, receive_ipc lemma send_fault_ipc_arch_tcb_at[wp]: "send_fault_ipc tptr fault \arch_tcb_at P t\" unfolding send_fault_ipc_def thread_set_def Let_def - by (wpsimp wp: set_object_wp hoare_drop_imps hoare_vcg_all_lift_R + by (wpsimp wp: set_object_wp hoare_drop_imps hoare_vcg_all_liftE_R simp: pred_tcb_at_def obj_at_def get_tcb_def) crunches handle_fault, handle_interrupt, handle_vm_fault, handle_hypervisor_fault, send_signal @@ -354,7 +335,7 @@ crunches send_ipc, send_fault_ipc, receive_ipc, handle_fault, handle_interrupt, crunches init_arch_objects, reset_untyped_cap for arch_state[wp]: "\s. P (arch_state s)" - (wp: crunch_wps preemption_point_inv hoare_unless_wp mapME_x_wp' + (wp: crunch_wps preemption_point_inv unless_wp mapME_x_wp' simp: crunch_simps) crunches invoke_untyped @@ -392,15 +373,15 @@ crunches cap_insert, cap_move crunches suspend, unbind_notification, cap_swap_for_delete for state_hyp_refs_of[wp]: "\s. P (state_hyp_refs_of s)" - (wp: crunch_wps thread_set_hyp_refs_trivial select_wp simp: crunch_simps) + (wp: crunch_wps thread_set_hyp_refs_trivial simp: crunch_simps) lemma prepare_thread_delete_valid_cur_vcpu[wp]: "\\s. valid_cur_vcpu s \ sym_refs (state_hyp_refs_of s)\ - prepare_thread_delete p + prepare_thread_delete t \\_. valid_cur_vcpu\" - unfolding prepare_thread_delete_def arch_thread_get_def - sorry (* FIXME AARCH64 FPU - by (wpsimp wp: dissociate_vcpu_tcb_valid_cur_vcpu) *) + unfolding prepare_thread_delete_def fpu_thread_delete_def + by (wpsimp wp: dissociate_vcpu_tcb_valid_cur_vcpu arch_thread_get_wp + hoare_drop_imps hoare_vcg_all_lift) crunches delete_asid_pool for active_cur_vcpu_of[wp]: "\s. P (active_cur_vcpu_of s)" @@ -412,35 +393,35 @@ crunches store_pte, set_asid_pool and cur_thread[wp]: "\s. P (cur_thread s)" (wp: crunch_wps simp: crunch_simps active_cur_vcpu_of_def) -(* FIXME AARCH64 PTs crunches unmap_page, unmap_page_table, delete_asid for active_cur_vcpu_of[wp]: "\s. P (active_cur_vcpu_of s)" and cur_thread[wp]: "\s. P (cur_thread s)" - (wp: crunch_wps valid_cur_vcpu_lift) *) + (wp: crunch_wps valid_cur_vcpu_lift simp: crunch_simps) -(* FIXME AARCH64 invalidate_tlb_by_asid_va crunches delete_asid_pool, unmap_page, unmap_page_table, delete_asid for valid_cur_vcpu[wp]: valid_cur_vcpu - (wp: valid_cur_vcpu_lift) *) + (wp: valid_cur_vcpu_lift) -(* FIXME AARCH64 crunches vcpu_finalise, arch_finalise_cap, finalise_cap for valid_cur_vcpu[wp]: valid_cur_vcpu - (simp: crunch_simps) *) + (simp: crunch_simps) -crunches prepare_thread_delete, deleting_irq_handler, delete_asid_pool +crunches prepare_thread_delete for sym_refs_state_hyp_refs_of[wp]: "\s. sym_refs (state_hyp_refs_of s)" (wp: crunch_wps simp: crunch_simps) -lemmas store_pte_state_hyp_refs_of[wp] = store_pte_state_hyp_refs_of +crunches invalidate_tlb_by_asid_va, delete_asid_pool, deleting_irq_handler + for state_hyp_refs_of[wp]: "\s. P (state_hyp_refs_of s)" + (wp: crunch_wps simp: crunch_simps) lemma unmap_page_state_hyp_refs_of[wp]: "unmap_page pgsz asid vptr pptr \\s. P (state_hyp_refs_of s)\" - unfolding unmap_page_def sorry (* FIXME AARCH64 lookup_pt_slot_def find_pd_for_asid_def - by (wpsimp wp: hoare_drop_imps mapM_wp_inv get_pde_wp store_pde_state_hyp_refs_of) *) + unfolding unmap_page_def + by (wpsimp wp: hoare_drop_imps mapM_wp_inv get_pte_wp store_pte_state_hyp_refs_of) crunches delete_asid, vcpu_finalise, unmap_page_table, finalise_cap for state_hyp_refs_of[wp]: "\s. sym_refs (state_hyp_refs_of s)" + (wp: crunch_wps) lemma preemption_point_state_hyp_refs_of[wp]: "preemption_point \\s. P (state_hyp_refs_of s)\" @@ -468,8 +449,7 @@ lemma rec_del_valid_cur_vcpu[wp]: \\_. valid_cur_vcpu\" (is "\?pre\ _ \_\") apply (rule_tac Q="\_. ?pre" in hoare_post_imp, fastforce) - sorry (* FIXME AARCH64 - by (rule rec_del_preservation; wpsimp) *) + by (rule rec_del_preservation; wpsimp) crunches cap_delete for valid_cur_vcpu[wp]: valid_cur_vcpu @@ -595,6 +575,11 @@ crunches perform_asid_control_invocation and active_cur_vcpu_of[wp]: "\s. P (active_cur_vcpu_of s)" (simp: active_cur_vcpu_of_def) +crunches perform_vspace_invocation + for cur_thread[wp]: "\s. P (cur_thread s )" + and active_cur_vcpu_of[wp]: "\s. P (active_cur_vcpu_of s)" + and valid_cur_vcpu[wp]: valid_cur_vcpu + lemma perform_asid_control_invocation_valid_cur_vcpu: "\valid_cur_vcpu and invs and valid_aci iv and ct_active\ perform_asid_control_invocation iv @@ -622,9 +607,8 @@ lemma perform_invocation_valid_cur_vcpu[wp]: unfolding arch_perform_invocation_def apply (wpsimp wp: perform_vcpu_invocation_valid_cur_vcpu perform_asid_control_invocation_valid_cur_vcpu) - sorry (* FIXME AARCH64 not all invocations go through, likely missing crunches apply (fastforce simp: valid_arch_inv_def) - done *) + done crunches reply_from_kernel, receive_signal for valid_cur_vcpu[wp]: valid_cur_vcpu diff --git a/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy index 1ad702c73d..862bfb4c09 100644 --- a/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy @@ -31,7 +31,7 @@ lemmas obj_valid_vspace_simps[simp] [split_simps Structures_A.kernel_object.split arch_kernel_obj.split] -abbreviation +locale_abbrev valid_vspace_objs' :: "'z state \ bool" where "valid_vspace_objs' s \ \x \ ran (kheap s). obj_valid_vspace x" @@ -77,37 +77,29 @@ lemma valid_vspace_objs'_vsD: lemma store_pte_valid_vspace_objs'[wp]: "store_pte pt_t p pte \valid_vspace_objs'\" apply (simp add: store_pte_def set_pt_def, wp get_object_wp) - apply (clarsimp simp: obj_at_def) - sorry (* FIXME AARCH64 - apply (rule valid_entries_overwrite_0) - apply (fastforce simp:ran_def) - apply (drule bspec) - apply fastforce + apply (clarsimp simp: obj_at_def pt_upd_def split: pt.splits) + apply (rule conjI; clarsimp; rename_tac pt) + apply (rule valid_entries_overwrite_0, fastforce simp:ran_def) + apply (case_tac "pt pa"; case_tac pte; simp) + apply (rule valid_entries_overwrite_0, fastforce simp:ran_def) apply (case_tac "pt pa"; case_tac pte; simp) - done *) + done + +crunches invalidate_tlb_by_asid_va, invalidate_tlb_by_asid, unmap_page_table + for vspace_objs'[wp]: valid_vspace_objs' lemma unmap_page_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ unmap_page sz asid vptr pptr \\rv. valid_vspace_objs'\" apply (simp add: unmap_page_def mapM_discarded cong: vmpage_size.case_cong) - sorry (* FIXME AARCH64 - apply (wpsimp wp: store_pte_valid_vspace_objs') - done *) - -lemma unmap_page_table_valid_vspace_objs'[wp]: - "\valid_vspace_objs'\ unmap_page_table asid vptr pt \\rv. valid_vspace_objs'\" - apply (simp add: unmap_page_table_def) - sorry (* FIXME AARCH64 - apply (wp get_object_wp store_pte_valid_vspace_objs' | wpc)+ - apply (simp add: obj_at_def) - done *) + apply wpsimp + done crunch valid_vspace_objs'[wp]: set_simple_ko "valid_vspace_objs'" (wp: crunch_wps) -(* FIXME AARCH64 crunch valid_vspace_objs'[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_vspace_objs'" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) *) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ preemption_point \\rv. valid_vspace_objs'\" @@ -120,10 +112,8 @@ lemmas cap_revoke_preservation_valid_vspace_objs = cap_revoke_preservation[OF _, lemmas rec_del_preservation_valid_vspace_objs = rec_del_preservation[OF _ _ _ _, where P=valid_vspace_objs', simplified] -(* FIXME AARCH64 crunch valid_vspace_objs'[wp]: cap_delete, cap_revoke "valid_vspace_objs'" (rule: cap_revoke_preservation_valid_vspace_objs) -*) crunch valid_vspace_objs'[wp]: cancel_badged_sends "valid_vspace_objs'" (simp: crunch_simps filterM_mapM wp: crunch_wps ignore: filterM) @@ -132,16 +122,12 @@ crunch valid_vspace_objs'[wp]: cap_move, cap_insert "valid_vspace_objs'" lemma invoke_cnode_valid_vspace_objs'[wp]: "\valid_vspace_objs' and invs and valid_cnode_inv i\ invoke_cnode i \\rv. valid_vspace_objs'\" - apply (simp add: invoke_cnode_def) - apply (rule hoare_pre) - apply (wp get_cap_wp | wpc | simp split del: if_split)+ - sorry (* FIXME AARCH64 - done *) + unfolding invoke_cnode_def + by (wpsimp wp: get_cap_wp split_del: if_split) -(* FIXME AARCH64 crunch valid_vspace_objs'[wp]: invoke_tcb "valid_vspace_objs'" (wp: check_cap_inv crunch_wps simp: crunch_simps - ignore: check_cap_at) *) + ignore: check_cap_at) lemma invoke_domain_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ invoke_domain t d \\rv. valid_vspace_objs'\" @@ -150,11 +136,10 @@ lemma invoke_domain_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: set_extra_badge, transfer_caps_loop "valid_vspace_objs'" (rule: transfer_caps_loop_pres) -(* FIXME AARCH64 crunch valid_vspace_objs'[wp]: send_ipc, send_signal, do_reply_transfer, invoke_irq_control, invoke_irq_handler "valid_vspace_objs'" (wp: crunch_wps simp: crunch_simps - ignore: clearMemory const_on_failure set_object) *) + ignore: clearMemory const_on_failure set_object) lemma valid_vspace_objs'_trans_state[simp]: "valid_vspace_objs' (trans_state f s) = valid_vspace_objs' s" apply (simp add: obj_valid_vspace_def) @@ -168,8 +153,7 @@ lemma retype_region_valid_vspace_objs'[wp]: elim!: ranE split: if_split_asm simp del:fun_upd_apply) apply (simp add: default_object_def default_arch_object_def split: kernel_object.splits apiobject_type.split aobject_type.split)+ - sorry (* FIXME AARCH64 - done *) + done lemma detype_valid_vspace[elim!]: "valid_vspace_objs' s \ valid_vspace_objs' (detype S s)" @@ -224,17 +208,12 @@ lemma perform_asid_pool_invocation_valid_vspace_objs'[wp]: \ \_. valid_vspace_objs' \" apply (simp add: perform_asid_pool_invocation_def) apply (wpsimp wp: get_cap_wp) - sorry (* FIXME AARCH64 - apply (simp add: cte_wp_at_caps_of_state) - apply (drule (1) valid_capsD) - apply (clarsimp simp: is_ArchObjectCap_def is_PageTableCap_def valid_cap_def) - apply (erule (1) is_aligned_pt) - done *) + done crunch valid_vspace_objs'[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_vspace_objs'" (ignore: delete_objects set_object - wp: static_imp_wp select_wp crunch_wps + wp: hoare_weak_lift_imp crunch_wps simp: crunch_simps unless_def) lemma perform_page_valid_vspace_objs'[wp]: @@ -246,16 +225,17 @@ lemma perform_page_valid_vspace_objs'[wp]: split: sum.split arch_cap.split option.split, safe intro!: hoare_gen_asm hoare_gen_asm[unfolded K_def], simp_all add: mapM_x_Nil mapM_x_Cons mapM_x_map) - sorry (* FIXME AARCH64 apply (wp store_pte_valid_vspace_objs' hoare_vcg_imp_lift[OF set_cap_arch_obj_neg] - hoare_vcg_all_lift + hoare_vcg_all_lift hoare_vcg_const_imp_lift hoare_vcg_if_lift | clarsimp simp: cte_wp_at_weakenE[OF _ TrueI] obj_at_def swp_def valid_page_inv_def valid_slots_def perform_pg_inv_map_def perform_pg_inv_unmap_def - perform_pg_inv_get_addr_def + perform_pg_inv_get_addr_def perform_flush_def split: pte.splits + split del: if_split + | rule conjI | wpc | wp (once) hoare_drop_imps)+ - done *) + done lemma perform_page_table_valid_vspace_objs'[wp]: "\valid_vspace_objs' and valid_pti pinv\ @@ -265,28 +245,22 @@ lemma perform_page_table_valid_vspace_objs'[wp]: cong: page_table_invocation.case_cong option.case_cong cap.case_cong arch_cap.case_cong) apply (rule hoare_pre) - sorry (* FIXME AARCH64 apply (wp hoare_vcg_ex_lift store_pte_valid_vspace_objs' set_cap_arch_obj hoare_vcg_all_lift mapM_x_wp' | wpc | simp add: swp_def | strengthen all_imp_ko_at_from_ex_strg | wp (once) hoare_drop_imps)+ - done *) + done lemma perform_invocation_valid_vspace_objs'[wp]: "\invs and ct_active and valid_invocation i and valid_vspace_objs'\ perform_invocation blocking call i \\rv. valid_vspace_objs'\" - apply (cases i, simp_all) - apply (wp send_signal_interrupt_states | simp)+ - sorry (* FIXME AARCH64 - apply (clarsimp simp:) - apply (wp | wpc | simp)+ - apply (simp add: arch_perform_invocation_def) - apply (wp | wpc | simp)+ + apply (cases i; wpsimp) + apply (wpsimp simp: arch_perform_invocation_def perform_vspace_invocation_def perform_flush_def) apply (auto simp: valid_arch_inv_def intro: valid_objs_caps) - done *) + done crunch valid_vspace_objs'[wp]: handle_fault, reply_from_kernel "valid_vspace_objs'" (simp: crunch_simps wp: crunch_wps) @@ -299,29 +273,25 @@ lemma handle_invocation_valid_vspace_objs'[wp]: | simp add: split_def | wpc | wp (once) hoare_drop_imps)+ apply (auto simp: ct_in_state_def elim: st_tcb_ex_cap) - sorry (* FIXME AARCH64 - done *) + done -(* FIXME AARCH64 crunch valid_vspace_objs'[wp]: activate_thread,switch_to_thread, handle_hypervisor_fault, switch_to_idle_thread, handle_call, handle_recv, handle_reply, handle_send, handle_yield, handle_interrupt "valid_vspace_objs'" - (simp: crunch_simps wp: crunch_wps alternative_valid select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt - OR_choice set_scheduler_action) *) + OR_choice set_scheduler_action) lemma handle_event_valid_vspace_objs'[wp]: "\valid_vspace_objs' and invs and ct_active\ handle_event e \\rv. valid_vspace_objs'\" - sorry (* FIXME AARCH64 - by (case_tac e; simp) (wpsimp simp: Let_def handle_vm_fault_def | wp (once) hoare_drop_imps)+ *) + by (case_tac e; simp) (wpsimp simp: Let_def handle_vm_fault_def | wp (once) hoare_drop_imps)+ lemma schedule_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ schedule :: (unit,unit) s_monad \\_. valid_vspace_objs'\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) + apply wp apply simp - sorry (* FIXME AARCH64 - done *) + done lemma call_kernel_valid_vspace_objs'[wp]: "\invs and (\s. e \ Interrupt \ ct_running s) and valid_vspace_objs'\ @@ -333,8 +303,7 @@ lemma call_kernel_valid_vspace_objs'[wp]: | rule conjI | clarsimp simp: ct_in_state_def | erule pred_tcb_weakenE | wp (once) hoare_drop_imps)+ - sorry (* FIXME AARCH64 missing crunches - done *) + done end diff --git a/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy b/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy index 5a8274989d..d63d99b405 100644 --- a/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy @@ -9,13 +9,18 @@ AARCH64-specific VSpace invariants *) -(* FIXME AARCH64 hyp: many VCPU/hyp-related lemmas from ARM_HYP pulled in because they exist in - ArchVSpace_AI there. However, it isn't clear where they should ultimately live. They might be - in the wrong order in this file (e.g. proved after they are needed). *) theory ArchVSpace_AI imports VSpacePre_AI begin +context Arch_p_asid_table_update_eq begin (* FIXME AARCh64: move to ArchInvariants_AI *) + +lemma valid_asid_map_upd[simp]: + "valid_asid_map (f s) = valid_asid_map s" + by (simp add: valid_asid_map_def) + +end + context Arch begin global_naming AARCH64 sublocale @@ -49,13 +54,12 @@ sublocale vcpu_save: non_vspace_non_cap_op "vcpu_save vcpu'" apply unfold_locales unfolding vcpu_disable_def vcpu_enable_def vcpu_restore_def vcpu_save_def - apply (wpsimp wp: set_vcpu.vsobj_at get_vcpu.vsobj_at mapM_wp mapM_x_wp - simp: vcpu_update_def vgic_update_def vcpu_save_reg_def vcpu_restore_reg_def - vcpu_restore_reg_range_def vcpu_save_reg_range_def vgic_update_lr_def - save_virt_timer_def vcpu_write_reg_def restore_virt_timer_def - vcpu_read_reg_def is_irq_active_def get_irq_state_def + by (wpsimp wp: set_vcpu.vsobj_at get_vcpu.vsobj_at mapM_wp mapM_x_wp + simp: vcpu_update_def vgic_update_def vcpu_save_reg_def vcpu_restore_reg_def + vcpu_restore_reg_range_def vcpu_save_reg_range_def vgic_update_lr_def + save_virt_timer_def vcpu_write_reg_def restore_virt_timer_def + vcpu_read_reg_def is_irq_active_def get_irq_state_def | assumption)+ - done crunches vcpu_read_reg, vcpu_write_reg, vcpu_disable, vcpu_save, vcpu_enable, vcpu_restore, @@ -146,29 +150,6 @@ lemma vs_lookup_target_clear_asid_table: apply blast done -(* FIXME AARCH64: move to Word_Lib *) -lemma word_mask_shift_eqI: - "\ x && mask n = y && mask n; x >> n = y >> n \ \ x = y" - apply (subst mask_or_not_mask[of x n, symmetric]) - apply (subst mask_or_not_mask[of y n, symmetric]) - apply (rule arg_cong2[where f="(OR)"]; blast intro: shiftr_eq_neg_mask_eq) - done - -(* FIXME AARCH64: move *) -lemma asid_high_low_inj: - "\ asid_low_bits_of asid' = asid_low_bits_of asid; - asid_high_bits_of asid' = asid_high_bits_of asid \ - \ asid' = asid" - unfolding asid_low_bits_of_def asid_high_bits_of_def - by (drule word_unat_eq_iff[THEN iffD1])+ - (clarsimp elim!: word_mask_shift_eqI - simp: unat_ucast_eq_unat_and_mask asid_low_bits_def shiftr_mask_eq' word_size) - -(* FIXME AARCH64: move *) -lemma asid_of_high_low_eq[simp, intro!]: - "asid_of (asid_high_bits_of asid) (asid_low_bits_of asid) = asid" - by (rule asid_high_low_inj; simp) - lemma vmid_for_asid_unmap_pool: "\asid_low. vmid_for_asid_2 (asid_of asid_high asid_low) table pools = None \ vmid_for_asid_2 asid (table(asid_high := None)) pools = vmid_for_asid_2 asid table pools" @@ -209,7 +190,7 @@ lemma asid_high_bits_shl: lemma valid_asid_map_unmap: "valid_asid_map s \ is_aligned base asid_low_bits \ valid_asid_map(s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid_high_bits_of base := None)\\)" - by (clarsimp simp: valid_asid_map_def) + by (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def) lemma asid_low_bits_word_bits: "asid_low_bits < word_bits" @@ -268,20 +249,15 @@ crunches vgic_update_lr, vcpu_write_reg, vcpu_save_reg, vcpu_disable, vcpu_resto for valid_objs[wp]: valid_objs (ignore: vcpu_update simp: vcpu_update_def valid_vcpu_def wp: crunch_wps) -(* FIXME AARCH64: set up [simp] centrally properly for a_type *) -lemma a_type_VCPU [simp]: - "a_type (ArchObj (VCPU v)) = AArch AVCPU" - by (simp add: a_type_def) - lemma set_vcpu_wp: - "\\s. vcpu_at p s \ Q (s\kheap := kheap s(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" + "\\s. vcpu_at p s \ Q (s\kheap := (kheap s)(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" unfolding set_vcpu_def apply (wp set_object_wp_strong) apply (clarsimp simp: obj_at_def split: kernel_object.splits arch_kernel_obj.splits) done lemma set_vcpu_vcpus_of[wp]: - "\\s. vcpus_of s p \ None \ P (vcpus_of s (p \ vcpu)) \ set_vcpu p vcpu \\_ s. P (vcpus_of s)\" + "\\s. vcpus_of s p \ None \ P ((vcpus_of s)(p \ vcpu)) \ set_vcpu p vcpu \\_ s. P (vcpus_of s)\" by (wp set_vcpu_wp) (clarsimp simp: in_omonad obj_at_def) lemma get_vcpu_wp: @@ -300,13 +276,17 @@ lemma hyp_live_vcpu_tcb: "hyp_live (ArchObj (VCPU vcpu)) = (vcpu_tcb vcpu \ None)" by (clarsimp simp: hyp_live_def arch_live_def) +lemma pts_of_vcpu_None_upd_idem: + "vcpu_at p s \ (pts_of s)(p := None) = pts_of s" + by (clarsimp simp: opt_map_def obj_at_def) + lemma set_vcpu_valid_arch_state_hyp_live: "\valid_arch_state and K (hyp_live (ArchObj (VCPU vcpu)))\ set_vcpu t vcpu \\_. valid_arch_state\" apply (wpsimp wp: set_vcpu_wp simp: valid_arch_state_def) apply (clarsimp simp: asid_pools_of_vcpu_None_upd_idem vmid_inv_def) apply (rule conjI) - apply (clarsimp simp: cur_vcpu_2_def hyp_live_vcpu_tcb split: option.splits) - apply (clarsimp simp: valid_global_arch_objs_def obj_at_def) + apply (clarsimp simp: cur_vcpu_2_def hyp_live_vcpu_tcb in_opt_pred split: option.splits) + apply (clarsimp simp: valid_global_arch_objs_def obj_at_def pts_of_vcpu_None_upd_idem) done lemma set_vcpu_obj_at: @@ -353,9 +333,6 @@ lemma vcpu_update_vtimer_hyp_live[wp]: "vcpu_update vcpu_ptr (vcpu_vtimer_update f) \ obj_at hyp_live p \" by (wpsimp wp: vcpu_update_obj_at simp: obj_at_def in_omonad) -crunches do_machine_op (* FIXME AARCH64: move to KHeap crunches *) - for kheap[wp]: "\s. P (kheap s)" - crunches vcpu_save_reg, vcpu_write_reg for vcpu_hyp_live[wp]: "\s. P (vcpu_hyp_live_of s)" (simp_del: fun_upd_apply simp: opt_map_upd_triv) @@ -476,7 +453,8 @@ definition case_option True (valid_unmap sz) m \ cte_wp_at ((=) (ArchObjectCap acap)) cslot s \ valid_arch_cap acap s - | PageGetAddr ptr \ \" + | PageGetAddr ptr \ \ + | PageFlush _ _ _ _ _ _ \ \" definition "valid_pti pti \ case pti of @@ -499,7 +477,7 @@ definition and real_cte_at cslot and valid_arch_cap acap and is_final_cap' (ArchObjectCap acap) - and K (is_PageTableCap acap) + and K (is_PageTableCap acap \ acap_pt_type acap = NormalPT_T) and (\s. \asid vref. vs_cap_ref_arch acap = Some (asid, vref) \ vspace_for_asid asid s \ aobj_ref acap)" @@ -573,7 +551,6 @@ crunches get_vmid, invalidate_asid_entry, invalidate_tlb_by_asid, invalidate_tlb and cur[wp]: cur_tcb and valid_objs[wp]: valid_objs -(* FIXME AARCH64: typ_at_lifts should include arch things *) lemmas find_free_vmid_typ_ats[wp] = abs_typ_at_lifts [OF find_free_vmid_typ_at] lemmas invalidate_asid_typ_ats[wp] = abs_typ_at_lifts [OF invalidate_asid_typ_at] lemmas update_asid_pool_entry_typ_ats[wp] = abs_typ_at_lifts [OF update_asid_pool_entry_typ_at] @@ -703,8 +680,8 @@ lemma vmid_for_asid_upd_eq: \ (\asid'. vmid_for_asid_2 asid' (asid_table s) - (asid_pools_of s(pool_ptr \ ap(asid_low_bits_of asid \ - ASIDPoolVSpace vmid vsp)))) + ((asid_pools_of s)(pool_ptr \ ap(asid_low_bits_of asid \ + ASIDPoolVSpace vmid vsp)))) = (vmid_for_asid s) (asid := vmid)" apply (rule ext) apply (clarsimp simp: vmid_for_asid_2_def entry_for_pool_def pool_for_asid_def obind_def @@ -732,14 +709,23 @@ lemma find_free_vmid_vmid_inv[wp]: dest: inj_on_domD) done +lemma invalidate_vmid_entry_valid_vmid_table[wp]: + "invalidate_vmid_entry vmid \valid_vmid_table\" + unfolding invalidate_vmid_entry_def + by (wpsimp simp: valid_vmid_table_def) + +crunches find_free_vmid + for valid_global_tables[wp]: "valid_global_tables" + and valid_vmid_table[wp]: valid_vmid_table + lemma find_free_vmid_valid_arch [wp]: "find_free_vmid \valid_arch_state\" unfolding valid_arch_state_def by wpsimp lemma entry_for_asid_Some_vmidD: - "entry_for_asid asid s = Some entry \ ap_vmid entry = vmid_for_asid s asid \ 0 < asid" + "entry_for_asid asid s = Some entry \ ap_vmid entry = vmid_for_asid s asid" unfolding entry_for_asid_def vmid_for_asid_def entry_for_pool_def pool_for_asid_def - by (auto simp: obind_def opt_map_def split: option.splits) + by (auto simp: obind_def opt_map_def if_option split: option.splits) lemma load_vmid_wp[wp]: "\\s. P (asid_map s asid) s\ load_vmid asid \P\" @@ -766,11 +752,6 @@ lemma valid_machine_state_arm_next_vmid_upd[simp]: unfolding valid_machine_state_def by simp -(* FIXME AARCH64: no need to prove valid_machine_state explicitly any more, but still need to look for these ops: *) -(* lemma dmo_valid_machine_state[wp]: *) - (* "do_machine_op (set_cntv_cval_64 w) \valid_machine_state\" *) (* FIXME AARCH64: find correct op *) - (* "do_machine_op (set_cntv_off_64 w') \valid_machine_state\" *) (* FIXME AARCH64: find correct op *) - lemma vs_lookup_target_vspace_eq: "\ pts_of s' = pts_of s; \pool_ptr. vspace_for_pool pool_ptr asid (asid_pools_of s') = @@ -859,6 +840,26 @@ lemma update_asid_pool_entry_asid_pools[wp]: supply fun_upd_apply[simp del] by wpsimp +lemma valid_vmid_table_None_upd: + "valid_vmid_table_2 table \ valid_vmid_table_2 (table(vmid := None))" + by (simp add: valid_vmid_table_2_def) + +lemma valid_vmid_table_Some_upd: + "\ valid_vmid_table_2 table; asid \ 0 \ \ valid_vmid_table_2 (table (vmid \ asid))" + by (simp add: valid_vmid_table_2_def) + +crunches update_asid_pool_entry, set_asid_pool + for pool_for_asid[wp]: "\s. P (pool_for_asid as s)" + (simp: pool_for_asid_def) + +lemma update_asid_pool_entry_valid_asid_map[wp]: + "update_asid_pool_entry f asid \valid_asid_map\" + unfolding valid_asid_map_def entry_for_asid_def + apply (clarsimp simp: obind_None_eq) + apply (wpsimp wp: hoare_vcg_disj_lift hoare_vcg_ex_lift) + apply (clarsimp simp: pool_for_asid_def entry_for_pool_def obind_None_eq split: if_split_asm) + done + lemma invalidate_asid_entry_invs[wp]: "invalidate_asid_entry asid \invs\" unfolding invalidate_asid_entry_def invalidate_asid_def invalidate_vmid_entry_def invs_def @@ -866,40 +867,44 @@ lemma invalidate_asid_entry_invs[wp]: supply fun_upd_apply[simp del] apply (wpsimp wp: load_vmid_wp valid_irq_handlers_lift valid_irq_node_typ valid_irq_states_triv valid_arch_caps_lift pspace_in_kernel_window_atyp_lift_strong - simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_asid_map_def + simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_global_vspace_mappings_def | wps)+ apply (clarsimp simp: valid_irq_node_def valid_global_refs_def global_refs_def valid_arch_state_def valid_global_objs_def valid_global_arch_objs_def valid_machine_state_def - valid_vspace_objs_def vmid_for_asid_upd_eq comp_upd_simp is_inv_None_upd) + valid_vspace_objs_def vmid_for_asid_upd_eq comp_upd_simp is_inv_None_upd + valid_vmid_table_None_upd) done +crunches find_free_vmid, store_vmid + for valid_asid_map[wp]: valid_asid_map + lemma find_free_vmid_invs[wp]: "find_free_vmid \invs\" unfolding invs_def valid_state_def valid_pspace_def by (wpsimp wp: load_vmid_wp valid_irq_handlers_lift valid_irq_node_typ valid_arch_caps_lift pspace_in_kernel_window_atyp_lift_strong - simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_asid_map_def + simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_global_vspace_mappings_def) lemma store_hw_asid_valid_arch[wp]: - "\valid_arch_state and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None)\ + "\valid_arch_state and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None \ asid \ 0)\ store_vmid asid vmid \\_. valid_arch_state\" unfolding store_vmid_def valid_arch_state_def vmid_inv_def supply fun_upd_apply[simp del] apply (wpsimp simp: valid_global_arch_objs_upd_eq_lift | wps)+ - apply (fastforce simp: vmid_for_asid_upd_eq elim: is_inv_Some_upd) + apply (fastforce simp: vmid_for_asid_upd_eq elim: is_inv_Some_upd intro: valid_vmid_table_Some_upd) done lemma store_vmid_invs[wp]: - "\invs and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None)\ + "\invs and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None \ asid \ 0)\ store_vmid asid vmid \\_. invs\" unfolding invs_def valid_state_def valid_pspace_def by (wpsimp wp: valid_irq_node_typ valid_irq_handlers_lift valid_arch_caps_lift pspace_in_kernel_window_atyp_lift_strong - simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_asid_map_def + simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_global_vspace_mappings_def) lemma invalidate_vmid_entry_None[wp]: @@ -918,11 +923,14 @@ lemma invalidate_vmid_entry_vmid_for_asid_None[wp]: by wpsimp lemma invalidate_asid_vmid_for_asid_None[wp]: - "invalidate_asid asid' \\s. vmid_for_asid s asid = None\" + "\\s. asid' \ asid \ vmid_for_asid s asid = None \ + invalidate_asid asid' + \\_ s. vmid_for_asid s asid = None\" unfolding invalidate_asid_def update_asid_pool_entry_def supply fun_upd_apply[simp del] apply (wpsimp|wps)+ - apply (auto simp: vmid_for_asid_def entry_for_pool_def fun_upd_apply obind_def in_opt_map_None_eq + apply (auto simp: pool_for_asid_def vmid_for_asid_def entry_for_pool_def fun_upd_apply obind_def + in_opt_map_None_eq split: option.split) done @@ -932,12 +940,12 @@ lemma find_free_vmid_None_asid_map[wp]: by wpsimp lemma get_hw_asid_valid_arch[wp]: - "get_vmid asid \valid_arch_state\" + "\valid_arch_state and K (asid \ 0)\ get_vmid asid \\_. valid_arch_state\" unfolding get_vmid_def by wpsimp lemma get_hw_asid_invs[wp]: - "get_vmid asid \invs\" + "\invs and K (asid \ 0)\ get_vmid asid \\_. invs\" unfolding get_vmid_def by (wpsimp wp: store_vmid_invs load_vmid_wp simp: opt_map_def) @@ -954,7 +962,7 @@ crunches invalidate_tlb_by_asid, invalidate_tlb_by_asid_va (ignore: do_machine_op) lemma arm_context_switch_invs [wp]: - "arm_context_switch pt asid \invs\" + "\invs and K (asid \ 0)\ arm_context_switch pt asid \\_. invs\" unfolding arm_context_switch_def by wpsimp crunches set_vm_root @@ -966,6 +974,10 @@ lemma set_global_user_vspace_invs[wp]: unfolding set_global_user_vspace_def by wpsimp +lemma vspace_for_asid_0_None[simp]: + "vspace_for_asid 0 s = None" + by (simp add: vspace_for_asid_def entry_for_asid_def) + lemma set_vm_root_invs[wp]: "set_vm_root t \invs\" unfolding set_vm_root_def @@ -1319,12 +1331,12 @@ next apply simp apply (erule mp) apply (subst pt_walk.simps) - apply (simp add: in_omonad vm_level_leq_minus1_less) + apply (simp add: in_omonad vm_level.leq_minus1_less) apply (subst (asm) (3) pt_walk.simps) apply (case_tac "level = top_level - 1"; clarsimp) apply (subgoal_tac "level < top_level - 1", fastforce) - apply (frule vm_level_zero_least) - apply (subst (asm) vm_level_leq_minus1_less[symmetric], assumption) + apply (frule vm_level.zero_least) + apply (subst (asm) vm_level.leq_minus1_less[symmetric], assumption) apply simp done qed @@ -1437,15 +1449,6 @@ lemma pte_ref_Some_cases: "(pte_ref pte = Some ref) = ((is_PageTablePTE pte \ is_PagePTE pte) \ ref = pptr_from_pte pte)" by (cases pte) (auto simp: pptr_from_pte_def) -(* FIXME AARCH64: move to ArchInv; later clean up all of these Kernel_Config unfoldings *) -lemma max_pt_level_eq_minus_one: - "level - 1 = max_pt_level \ level = asid_pool_level" - unfolding level_defs by (auto simp: Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) - -lemma pptr_from_pte_PagePTE[simp]: (* FIXME AARCH64: move up *) - "pptr_from_pte (PagePTE p is_small attr rights) = ptrFromPAddr p" - by (simp add: pptr_from_pte_def pte_base_addr_def) - lemma store_pte_invalid_vs_lookup_target_unmap: "\\s. vs_lookup_slot level' asid vref s = Some (level', slot) \ pte_refs_of level' slot s = Some p \ @@ -1475,7 +1478,7 @@ lemma store_pte_invalid_vs_lookup_target_unmap: (* PageTablePTE: level' would have to be asid_pool_level, contradiction *) apply (drule (1) vs_lookup_table_step; simp?) apply (rule ccontr) - apply (clarsimp simp flip: bit1.neq_0_conv simp: is_PageTablePTE_def) + apply (clarsimp simp flip: vm_level.neq_0_conv simp: is_PageTablePTE_def) apply (fastforce simp: pte_ref_Some_cases) apply (drule (1) no_loop_vs_lookup_table; simp?) (* PagePTE *) @@ -1561,15 +1564,25 @@ lemma pt_lookup_from_level_wrp: crunches invalidate_tlb_by_asid for vs_lookup_target[wp]: "\s. P (vs_lookup_target level asid vref s)" +lemma normal_pt_not_vspace_for_asid: + "\ normal_pt_at pt s; pspace_aligned s; valid_asid_table s; valid_vspace_objs s \ + \ vspace_for_asid asid s \ Some pt" + apply clarsimp + apply (drule vspace_for_asid_vs_lookup) + apply (drule vs_lookup_table_pt_at; simp) + apply (clarsimp simp: obj_at_def) + done + lemma unmap_page_table_not_target: - "\\s. (\pt_t. pt_at pt_t pt s) \ pspace_aligned s \ pspace_distinct s \ + "\\s. normal_pt_at pt s \ pspace_aligned s \ pspace_distinct s \ valid_asid_table s \ valid_vspace_objs s \ - 0 < asid \ vref \ user_region \ vspace_for_asid asid s \ Some pt \ + 0 < asid \ vref \ user_region \ asid' = asid \ pt' = pt \ vref' = vref \ unmap_page_table asid vref pt \\_ s. vs_lookup_target level asid' vref' s \ Some (level, pt')\" unfolding unmap_page_table_def apply (wpsimp wp: store_pte_invalid_vs_lookup_target_unmap pt_lookup_from_level_wrp) + apply (frule normal_pt_not_vspace_for_asid[where asid=asid]; assumption?) apply (rule conjI; clarsimp) apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def vs_lookup_table_def split: if_split_asm; @@ -1592,7 +1605,7 @@ lemma unmap_page_table_not_target: apply (clarsimp simp: data_at_def obj_at_def) apply (clarsimp simp: vs_lookup_slot_def split: if_split_asm) apply (drule (4) vs_lookup_table_step, simp) - apply (prop_tac "level - 1 < max_pt_level", erule (1) bit1.minus_one_leq_less) (* FIXME AARCH64: bit1 *) + apply (prop_tac "level - 1 < max_pt_level", erule (1) vm_level.minus_one_leq_less) apply fastforce done @@ -1729,7 +1742,7 @@ lemma perform_pt_inv_unmap_invs[wp]: hoare_vcg_all_lift hoare_vcg_imp_lift' mapM_x_swp_store_pte_invs_unmap mapM_x_store_pte_unreachable hoare_vcg_ball_lift unmap_page_table_not_target real_cte_at_typ_valid - simp: cte_wp_at_caps_of_state) + simp: cte_wp_at_caps_of_state cleanCacheRange_PoU_def) apply (clarsimp simp: valid_pti_def cte_wp_at_caps_of_state) apply (clarsimp simp: is_arch_update_def is_cap_simps is_PageTableCap_def update_map_data_def valid_cap_def valid_arch_cap_def cap_aligned_def) @@ -2075,7 +2088,7 @@ lemma perform_pg_inv_map_invs[wp]: unfolding perform_pg_inv_map_def supply if_split[split del] apply (wpsimp wp: store_pte_invs arch_update_cap_invs_map hoare_vcg_all_lift hoare_vcg_imp_lift' - invalidate_tlb_by_asid_va_invs + invalidate_tlb_by_asid_va_invs dmo_invs_lift | strengthen if_pair_imp_strengthen)+ apply (clarsimp simp: valid_page_inv_def cte_wp_at_caps_of_state is_arch_update_def is_cap_simps cap_master_cap_simps parent_for_refs_def valid_slots_def same_ref_def) @@ -2134,7 +2147,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid ptp pt and s' :: "'a::state_ext state" - defines "s' \ s\kheap := kheap s(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" + defines "s' \ s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" assumes ap: "asid_pools_of s ap = Some pool" assumes new: "pool (asid_low_bits_of asid) = None" assumes pt: "pts_of s (ap_vspace ptp) = Some pt" @@ -2192,7 +2205,6 @@ lemma vs_lookup_table: apply (rule conjI; clarsimp) using lookup apply (clarsimp simp: vs_lookup_table_def vspace_for_pool_def in_omonad pool_for_asid_def) - apply (rule conjI, clarsimp) apply (subst pt_walk.simps) using pt aligned apply (clarsimp simp: obind_def ptes_of_def empty_for_user) @@ -2374,7 +2386,7 @@ lemma vmid_for_asid_map_None: "\ asid_pools_of s ap = Some pool; pool_for_asid asid s = Some ap; pool (asid_low_bits_of asid) = None; ap_vmid ape = None \ \ (\asid'. vmid_for_asid_2 asid' (asid_table s) - (asid_pools_of s(ap \ pool(asid_low_bits_of asid \ ape)))) = + ((asid_pools_of s)(ap \ pool(asid_low_bits_of asid \ ape)))) = vmid_for_asid s" unfolding vmid_for_asid_def apply (rule ext) @@ -2405,6 +2417,20 @@ lemma set_asid_pool_valid_arch_state: unfolding valid_arch_state_def by (wpsimp wp: set_asid_pool_vmid_inv|wps)+ +lemma set_asid_pool_invs_valid_asid_map[wp]: + "\valid_asid_map and valid_asid_table and + (\s. asid_pools_of s ap = Some pool \ pool_for_asid asid s = Some ap \ asid \ 0)\ + set_asid_pool ap (pool(asid_low_bits_of asid \ ape)) + \\_. valid_asid_map\" + unfolding valid_asid_map_def entry_for_asid_def + apply (clarsimp simp: obind_None_eq) + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift) + apply (fastforce simp: asid_high_low_inj pool_for_asid_def valid_asid_table_def entry_for_pool_def + obind_None_eq + dest: inj_on_domD + split: if_split_asm) + done + lemma set_asid_pool_invs_map: "\invs and (\s. asid_pools_of s ap = Some pool \ pool_for_asid asid s = Some ap \ @@ -2414,10 +2440,11 @@ lemma set_asid_pool_invs_map: and K (pool (asid_low_bits_of asid) = None \ 0 < asid \ ap_vmid ape = None)\ set_asid_pool ap (pool(asid_low_bits_of asid \ ape)) \\rv. invs\" - apply (simp add: invs_def valid_state_def valid_pspace_def valid_asid_map_def) + apply (simp add: invs_def valid_state_def valid_pspace_def) apply (wpsimp wp: valid_irq_node_typ set_asid_pool_typ_at set_asid_pool_arch_objs_map valid_irq_handlers_lift set_asid_pool_valid_arch_caps_map set_asid_pool_valid_arch_state) + apply (clarsimp simp: valid_arch_state_def) done lemma ako_asid_pools_of: @@ -2551,9 +2578,7 @@ lemma valid_vspace_obj_default: by (cases ty; simp add: default_object_def assms) -(* FIXME AARCH64 another block of VCPU/hyp-related lemmas from ARM_HYP that could potentially go - somewhere else but we won't know until they're proved - SOME OF THESE WILL BE NEEDED FOR PROOFS ABOVE, it's quite tangled *) +(* VCPU lemmas *) crunches vcpu_switch for vs_lookup_table[wp]: "\s. P (vs_lookup_table level asid vref s)" @@ -2563,36 +2588,16 @@ crunches vcpu_switch and equal_mappings[wp]: equal_kernel_mappings and caps_of_state[wp]: "\s. P (caps_of_state s)" -(* FIXME AARCH64 VCPU: double-check if vcpu_switch can live in non_vspace_non_cap_op locale *) - +(* vcpu_switch can unfortunately not live in the non_vspace_non_cap_op locale, because it does not + preserve arch_state *) lemmas vcpu_switch_vs_lookup_pages[wp] = vs_lookup_pages_target_lift[OF vcpu_switch_vs_lookup_target] -crunches vcpu_update,vgic_update,vgic_update_lr,vcpu_disable,vcpu_restore,vcpu_save_reg_range, - vcpu_save, vcpu_switch +crunches vcpu_update, vgic_update, vgic_update_lr, vcpu_disable, vcpu_restore, vcpu_save_reg_range, + vcpu_save, vcpu_switch for distinct[wp]: pspace_distinct (wp: mapM_x_wp mapM_wp subset_refl) - (* lemmas for vcpu_switch invs *) - -(* FIXME AARCH64: move to Machine_AI? *) -(* FIXME AARCH64: naming issue due to using crunch, all these are now blah_no_irq -lemmas isb_irq_masks = no_irq[OF no_irq_isb] -lemmas dsb_irq_masks = no_irq[OF no_irq_dsb] -lemmas setHCR_irq_masks = no_irq[OF no_irq_setHCR] -lemmas setSCTLR_irq_masks = no_irq[OF no_irq_setSCTLR] -lemmas getSCTLR_irq_masks = no_irq[OF no_irq_getSCTLR] -lemmas get_gic_vcpu_ctrl_vmcr_irq_masks = no_irq[OF no_irq_get_gic_vcpu_ctrl_vmcr] -lemmas set_gic_vcpu_ctrl_vmcr_irq_masks = no_irq[OF no_irq_set_gic_vcpu_ctrl_vmcr] -lemmas get_gic_vcpu_ctrl_apr_irq_masks = no_irq[OF no_irq_get_gic_vcpu_ctrl_apr] -lemmas set_gic_vcpu_ctrl_apr_irq_masks = no_irq[OF no_irq_set_gic_vcpu_ctrl_apr] -lemmas get_gic_vcpu_ctrl_lr_irq_masks = no_irq[OF no_irq_get_gic_vcpu_ctrl_lr] -lemmas set_gic_vcpu_ctrl_lr_irq_masks = no_irq[OF no_irq_set_gic_vcpu_ctrl_lr] -lemmas get_gic_vcpu_ctrl_hcr_irq_masks = no_irq[OF no_irq_get_gic_vcpu_ctrl_hcr] -lemmas set_gic_vcpu_ctrl_hcr_irq_masks = no_irq[OF no_irq_set_gic_vcpu_ctrl_hcr] -*) -(* end of move to Machine_AI *) - lemma dmo_isb_invs[wp]: "do_machine_op isb \invs\" and dmo_dsb_invs[wp]: "do_machine_op dsb \invs\" and dmo_setHCR_invs[wp]: "do_machine_op (setHCR w) \invs\" @@ -2760,12 +2765,12 @@ lemma set_vcpu_sym_refs[wp]: apply (clarsimp simp: obj_at_def) done -lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) a = state_hyp_refs_of s a " +lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) a = state_hyp_refs_of s a " by (simp add: state_hyp_refs_of_def) lemma state_hyp_refs_of_simp_eq: "obj_at (\ko'. hyp_refs_of ko' = hyp_refs_of v) p s - \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) p = state_hyp_refs_of s p" + \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) p = state_hyp_refs_of s p" by (clarsimp simp: state_hyp_refs_of_def obj_at_def) lemma set_object_vcpu_sym_refs_hyp: @@ -2804,11 +2809,11 @@ lemma set_vcpu_valid_pspace: done lemma vmid_inv_set_vcpu: - "vcpu_at p s \ vmid_inv (s\kheap := kheap s(p \ ArchObj (VCPU v))\) = vmid_inv s" + "vcpu_at p s \ vmid_inv (s\kheap := (kheap s)(p \ ArchObj (VCPU v))\) = vmid_inv s" by (simp add: vmid_inv_def asid_pools_of_vcpu_None_upd_idem) lemma pt_at_eq_set_vcpu: - "vcpu_at p s \ pt_at pt_t p' (s\kheap := kheap s(p \ ArchObj (VCPU v))\) = pt_at pt_t p' s" + "vcpu_at p s \ pt_at pt_t p' (s\kheap := (kheap s)(p \ ArchObj (VCPU v))\) = pt_at pt_t p' s" by (auto simp add: obj_at_def) lemma set_vcpu_valid_arch_eq_hyp: @@ -2817,10 +2822,10 @@ lemma set_vcpu_valid_arch_eq_hyp: \\_. valid_arch_state\" unfolding valid_arch_state_def apply (wp set_vcpu_wp) - apply (clarsimp simp: vmid_inv_set_vcpu asid_pools_of_vcpu_None_upd_idem + apply (clarsimp simp: vmid_inv_set_vcpu asid_pools_of_vcpu_None_upd_idem pts_of_vcpu_None_upd_idem valid_global_arch_objs_def pt_at_eq_set_vcpu) apply (clarsimp simp: cur_vcpu_def split: option.splits) - by (auto simp: obj_at_def vcpu_tcb_refs_def opt_map_def split: option.splits) + by (auto simp: obj_at_def vcpu_tcb_refs_def opt_map_def in_opt_pred split: option.splits) lemma set_vcpu_invs_eq_hyp: "\obj_at (\ko'. hyp_refs_of ko' = hyp_refs_of (ArchObj (VCPU v))) p @@ -2875,16 +2880,6 @@ lemmas vcpu_update_invs[wp] = vcpu_update_trivial_invs[where upd="\f vcpu. vcpu\vcpu_vgic := f (vcpu_vgic vcpu)\" , folded vgic_update_def, simplified] -(* FIXME AARCH64: move to Machine_AI *) -lemma dmo_gets_inv[wp]: - "\P\ do_machine_op (gets f) \\rv. P\" - unfolding do_machine_op_def by (wpsimp simp: simpler_gets_def) - -(* FIXME AARCH64: move to ArchAcc after crunches *) -lemma dmo_machine_op_lift_invs[wp]: - "do_machine_op (machine_op_lift f) \invs\" - by (wp dmo_invs_lift) - crunches vcpu_restore_reg_range, vcpu_save_reg_range, vgic_update_lr, vcpu_read_reg for invs[wp]: invs (wp: mapM_x_wp) @@ -2915,7 +2910,7 @@ lemma restore_virt_timer_invs[wp]: "\\ s. invs s\ restore_virt_timer vcpu_ptr \\_ . invs\" unfolding restore_virt_timer_def read_cntpct_def is_irq_active_def get_irq_state_def - by (wpsimp wp: set_vcpu_invs_eq_hyp get_vcpu_wp hoare_vcg_all_lift hoare_vcg_imp_lift' + by (wpsimp wp: set_vcpu_invs_eq_hyp get_vcpu_wp hoare_vcg_all_lift hoare_drop_imp maskInterrupt_invs) lemma vcpu_enable_invs[wp]: @@ -2925,7 +2920,7 @@ lemma vcpu_enable_invs[wp]: lemma vcpu_restore_invs[wp]: "vcpu_restore v \invs\" - apply (simp add: vcpu_restore_def do_machine_op_bind dom_mapM) + apply (simp add: vcpu_restore_def do_machine_op_bind dom_mapM empty_fail_cond) apply (wpsimp wp: mapM_wp_inv) done @@ -2966,6 +2961,14 @@ lemma valid_machine_state_arch_state_update [simp]: "valid_machine_state (arch_state_update f s) = valid_machine_state s" by (simp add: valid_machine_state_def) +lemma arm_asid_table_current_vcpu_update[simp]: + "arm_asid_table ((arm_current_vcpu_update v) (arch_state s)) = arm_asid_table (arch_state s)" + by clarsimp + +lemma vmid_inv_current_vcpu_update[simp]: + "vmid_inv (s\arch_state := arm_current_vcpu_update Map.empty (arch_state s)\) = vmid_inv s" + by (clarsimp simp: vmid_inv_def) + lemma valid_irq_node_arch_state_update [simp]: "valid_irq_node (arch_state_update f s) = valid_irq_node s" by (simp add: valid_irq_node_def) @@ -3027,7 +3030,7 @@ crunches save_virt_timer, vcpu_disable, vcpu_invalidate_active, vcpu_restore, vc lemma obj_at_hyp_live_vcpu_regs: "vcpus_of s vcpu_ptr = Some v \ - obj_at hyp_live p (s\kheap := kheap s(vcpu_ptr \ ArchObj (VCPU (v\vcpu_regs := x\)))\) = + obj_at hyp_live p (s\kheap := (kheap s)(vcpu_ptr \ ArchObj (VCPU (v\vcpu_regs := x\)))\) = obj_at hyp_live p s" by (clarsimp simp: in_omonad obj_at_def) diff --git a/proof/invariant-abstract/AARCH64/Machine_AI.thy b/proof/invariant-abstract/AARCH64/Machine_AI.thy index 0b37527398..37116dd77f 100644 --- a/proof/invariant-abstract/AARCH64/Machine_AI.thy +++ b/proof/invariant-abstract/AARCH64/Machine_AI.thy @@ -19,7 +19,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -58,7 +58,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -87,9 +87,9 @@ lemma det_setNextPC: "det (setNextPC p)" text \Failure on empty result\ -crunches loadWord, storeWord, machine_op_lift, clearMemory +crunches loadWord, storeWord, machine_op_lift for (empty_fail) empty_fail[intro!, wp, simp] - (ignore: NonDetMonad.bind mapM_x simp: machine_op_lift_def) + (ignore: Nondet_Monad.bind mapM_x simp: machine_op_lift_def empty_fail_cond) lemmas ef_machine_op_lift = machine_op_lift_empty_fail \ \required for generic interface\ @@ -100,7 +100,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv[wp]: "\irq_state_independent P\ \ getActiveIRQ in_kernel \P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -122,18 +122,6 @@ lemma no_fail_machine_op_lift [simp]: "no_fail \ (machine_op_lift f)" by (simp add: machine_op_lift_def) -lemma no_fail_clearMemory[simp, wp]: - "no_fail (\_. is_aligned p 3) (clearMemory p b)" - apply (simp add: clearMemory_def mapM_x_mapM) - apply (rule no_fail_pre) - apply (wp no_fail_mapM' no_fail_storeWord ) - apply (clarsimp simp: upto_enum_step_def) - apply (erule aligned_add_aligned) - apply (simp add: word_size_def) - apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) - apply simp - done - lemma no_fail_freeMemory[simp, wp]: "no_fail (\_. is_aligned p 3) (freeMemory p b)" apply (simp add: freeMemory_def mapM_x_mapM) @@ -150,7 +138,7 @@ lemma no_fail_getActiveIRQ[wp]: "no_fail \ (getActiveIRQ in_kernel)" apply (simp add: getActiveIRQ_def) apply (rule no_fail_pre) - apply (wp non_fail_select) + apply wp apply simp done @@ -235,9 +223,6 @@ lemma no_irq_getActiveIRQ: "no_irq (getActiveIRQ in_kernel)" lemma no_irq_storeWord: "no_irq (storeWord w p)" by (wpsimp simp: storeWord_def wp: no_irq_modify) -lemma no_irq_clearMemory: "no_irq (clearMemory a b)" - by (wpsimp simp: clearMemory_def no_irq_mapM_x no_irq_storeWord) - crunches ackInterrupt for (no_irq) no_irq[intro!, wp, simp] @@ -291,7 +276,7 @@ crunch_ignore (valid, empty_fail, no_fail) grep -oE "(\w+_impl)|(get\w+)" MachineOps.thy|sort|uniq|sed "s/_impl//;s/$/,/;s/^/ /" with the following manual interventions: - remove false positives: get_def, gets_def, getFPUState, getRegister, getRestartPC - - add readVCPUHardwareReg (which uses non-standard "Val" instead of "_val" (FIXME AARCH64)) + - add read_cntpct - remove final comma - getActiveIRQ does not preserve no_irq *) crunches @@ -306,7 +291,6 @@ crunches dsb, enableFpuEL01, fpuThreadDeleteOp, - getDFSR, getESR, getFAR, get_gic_vcpu_ctrl_apr, @@ -319,7 +303,6 @@ crunches get_gic_vcpu_ctrl_vmcr, get_gic_vcpu_ctrl_vtr, getHSR, - getIFSR, getMemoryRegions, gets, getSCTLR, @@ -345,7 +328,6 @@ crunches switchFpuOwner, readVCPUHardwareReg, writeVCPUHardwareReg, - (* FIXME AARCH64: machine ops missed by the grep above: *) read_cntpct for (no_fail) no_fail[intro!, wp, simp] and (empty_fail) empty_fail[intro!, wp, simp] @@ -353,7 +335,7 @@ crunches and device_state_inv[wp]: "\ms. P (device_state ms)" and irq_masks[wp]: "\s. P (irq_masks s)" and underlying_memory_inv[wp]: "\s. P (underlying_memory s)" - (wp: no_irq_bind ignore: empty_fail NonDetMonad.bind) + (wp: no_irq_bind ignore: empty_fail Nondet_Monad.bind) crunches getFPUState, getRegister, getRestartPC, setNextPC, ackInterrupt, maskInterrupt for (no_fail) no_fail[intro!, wp, simp] @@ -386,6 +368,30 @@ lemma dmo_valid_irq_states[wp]: unfolding valid_irq_states_def do_machine_op_def by (wpsimp, erule use_valid; assumption) +text \Ops that require machine-ops rules derived above\ + +\ \These can't be placed into the sections above, as they require the derivation of the machine op + properties, and those in turn rely on items in specific sections above. There are unlikely to be + many definitions like this in MachineOps.thy\ + +crunches clearMemory + for (empty_fail) empty_fail[intro!, wp, simp] + +lemma no_fail_clearMemory[unfolded word_size_bits_def, simp, wp]: + "no_fail (\_. is_aligned p word_size_bits) (clearMemory p b)" + apply (simp add: clearMemory_def word_size_bits_def mapM_x_mapM) + apply (rule no_fail_pre) + apply (wp no_fail_mapM' no_fail_storeWord ) + apply (clarsimp simp: upto_enum_step_def) + apply (erule aligned_add_aligned) + apply (simp add: word_size_def) + apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) + apply simp + done + +lemma no_irq_clearMemory: "no_irq (clearMemory a b)" + by (wpsimp simp: clearMemory_def no_irq_mapM_x no_irq_storeWord) + text \Misc WP rules\ lemma getActiveIRQ_le_maxIRQ': @@ -393,7 +399,7 @@ lemma getActiveIRQ_le_maxIRQ': getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wpsimp wp: alternative_wp select_wp) + apply wpsimp apply (rule ccontr) apply (simp add: linorder_not_le) done @@ -411,6 +417,10 @@ lemma dmo_getActiveIRQ_non_kernel[wp]: apply clarsimp done +lemma dmo_gets_inv[wp]: + "do_machine_op (gets f) \P\" + unfolding do_machine_op_def by (wpsimp simp: simpler_gets_def) + end context begin interpretation Arch . diff --git a/proof/invariant-abstract/AInvs.thy b/proof/invariant-abstract/AInvs.thy index cf0cf83db3..b3475cbebd 100644 --- a/proof/invariant-abstract/AInvs.thy +++ b/proof/invariant-abstract/AInvs.thy @@ -14,7 +14,7 @@ begin lemma st_tcb_at_nostate_upd: "\ get_tcb t s = Some y; tcb_state y = tcb_state y' \ \ - st_tcb_at P t' (s \kheap := kheap s(t \ TCB y')\) = st_tcb_at P t' s" + st_tcb_at P t' (s \kheap := (kheap s)(t \ TCB y')\) = st_tcb_at P t' s" by (clarsimp simp add: pred_tcb_at_def obj_at_def dest!: get_tcb_SomeD) lemma pred_tcb_at_upd_apply: @@ -44,8 +44,8 @@ lemma kernel_entry_invs: (kernel_entry e us) :: (user_context,unit) s_monad \\rv. invs and (\s. ct_running s \ ct_idle s)\" apply (simp add: kernel_entry_def) - apply (wp akernel_invs thread_set_invs_trivial thread_set_ct_in_state select_wp - do_machine_op_ct_in_state static_imp_wp hoare_vcg_disj_lift + apply (wp akernel_invs thread_set_invs_trivial thread_set_ct_in_state + do_machine_op_ct_in_state hoare_weak_lift_imp hoare_vcg_disj_lift | clarsimp simp add: tcb_cap_cases_def)+ done @@ -89,7 +89,7 @@ lemma do_user_op_invs: \\_. invs and ct_running\" apply (simp add: do_user_op_def split_def) apply (wp device_update_invs) - apply (wp do_machine_op_ct_in_state select_wp dmo_invs | simp add:dom_restrict_plus_eq)+ + apply (wp do_machine_op_ct_in_state dmo_invs | simp add:dom_restrict_plus_eq)+ apply (clarsimp simp: user_memory_update_def simpler_modify_def restrict_map_def invs_def cur_tcb_def split: option.splits if_split_asm) diff --git a/proof/invariant-abstract/ARM/ArchAInvsPre.thy b/proof/invariant-abstract/ARM/ArchAInvsPre.thy index 8459c049bc..97956e008c 100644 --- a/proof/invariant-abstract/ARM/ArchAInvsPre.thy +++ b/proof/invariant-abstract/ARM/ArchAInvsPre.thy @@ -98,15 +98,6 @@ lemma get_pd_of_thread_reachable: split: Structures_A.kernel_object.splits if_split_asm option.splits cap.splits arch_cap.splits) -lemma is_aligned_ptrFromPAddrD: -"\is_aligned (ptrFromPAddr b) a; a \ 24\ \ is_aligned b a" - apply (clarsimp simp: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def) - apply (erule is_aligned_addD2) - apply (rule is_aligned_weaken[where x = 24]) - apply (simp add: is_aligned_def) - apply simp - done - lemma obj_bits_data_at: "data_at sz (ptrFromPAddr b) s \ obj_bits (the (kheap s (ptrFromPAddr b))) = pageBitsForSize sz" @@ -185,10 +176,6 @@ lemma device_frame_in_device_region: \ device_state (machine_state s) p \ None" by (auto simp add: pspace_respects_device_region_def dom_def device_mem_def) -lemma is_aligned_pptrBaseOffset: -"is_aligned pptrBaseOffset (pageBitsForSize sz)" - by (case_tac sz, simp_all add: pptrBaseOffset_def - pptrBase_def physBase_def is_aligned_def)[1] global_naming Arch named_theorems AInvsPre_asms diff --git a/proof/invariant-abstract/ARM/ArchAcc_AI.thy b/proof/invariant-abstract/ARM/ArchAcc_AI.thy index a6f80af246..97468e916f 100644 --- a/proof/invariant-abstract/ARM/ArchAcc_AI.thy +++ b/proof/invariant-abstract/ARM/ArchAcc_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -344,13 +345,13 @@ lemma pde_at_aligned_vptr: split: kernel_object.split_asm arch_kernel_obj.split_asm if_split_asm cong: kernel_object.case_cong) - apply (prove "is_aligned x 2") + apply (prop_tac "is_aligned x 2") subgoal apply (clarsimp simp: upto_enum_step_def word_shift_by_2) by (rule is_aligned_shiftl_self) apply (simp add: aligned_add_aligned word_bits_conv is_aligned_shiftl_self)+ - apply (prove "pd = (x + (pd + (vptr >> 20 << 2)) && ~~ mask pd_bits)") + apply (prop_tac "pd = (x + (pd + (vptr >> 20 << 2)) && ~~ mask pd_bits)") subgoal supply bit_simps[simp del] apply (subst mask_lower_twice[symmetric, where n=6]) @@ -547,7 +548,7 @@ lemma lookup_pt_slot_ptes_aligned_valid: apply (frule (2) valid_vspace_objsD) apply (clarsimp simp: ) subgoal for s _ _ x - apply (prove "page_table_at (ptrFromPAddr x) s") + apply (prop_tac "page_table_at (ptrFromPAddr x) s") subgoal apply (bspec "(ucast (pd + (vptr >> 20 << 2) && mask pd_bits >> 2))";clarsimp) apply (frule kernel_mapping_slots_empty_pdeI) @@ -667,7 +668,7 @@ lemma create_mapping_entries_valid [wp]: apply (clarsimp simp add: valid_mapping_entries_def) apply wp apply (simp add: lookup_pd_slot_def Let_def) - apply (prove "is_aligned pd 14") + apply (prop_tac "is_aligned pd 14") apply (clarsimp simp: obj_at_def add.commute invs_def valid_state_def valid_pspace_def pspace_aligned_def) apply (drule bspec, blast) apply (clarsimp simp: a_type_def split: kernel_object.splits arch_kernel_obj.splits if_split_asm) @@ -1170,7 +1171,7 @@ lemma valid_objs_caps: lemma simpler_set_pt_def: "set_pt p pt = (\s. if \pt. kheap s p = Some (ArchObj (PageTable pt)) then - ({((), s\kheap := kheap s(p \ ArchObj (PageTable pt))\)}, False) + ({((), s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)}, False) else ({}, True))" apply (rule ext) apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def @@ -1187,7 +1188,7 @@ lemma simpler_set_pt_def: lemma valid_set_ptI: "(!!s opt. \P s; kheap s p = Some (ArchObj (PageTable opt))\ - \ Q () (s\kheap := kheap s(p \ ArchObj (PageTable pt))\)) + \ Q () (s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)) \ \P\ set_pt p pt \Q\" by (rule validI) (clarsimp simp: simpler_set_pt_def split: if_split_asm) @@ -1244,14 +1245,14 @@ lemma set_pt_valid_vspace_objs[wp]: apply (clarsimp simp: valid_vspace_objs_def) subgoal for s opt pa rs ao apply (spec pa) - apply (prove "(\\ pa) s") + apply (prop_tac "(\\ pa) s") apply (rule exI[where x=rs]) apply (erule vs_lookupE) apply clarsimp apply (erule vs_lookupI) apply (erule rtrancl.induct, simp) subgoal for \ b c - apply (prove "(b \1 c) s") + apply (prop_tac "(b \1 c) s") apply (thin_tac "_ : rtrancl _")+ apply (clarsimp simp add: vs_lookup1_def obj_at_def vs_refs_def split: if_split_asm) @@ -1475,7 +1476,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1483,7 +1484,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1521,7 +1522,7 @@ lemma valid_machine_state_heap_updI: assumes vm : "valid_machine_state s" assumes tyat : "typ_at type p s" shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + " a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1860,7 +1861,7 @@ lemma valid_pde_typ_at: lemma valid_vspace_obj_same_type: "\valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -2096,8 +2097,8 @@ lemma lookup_pt_slot_looks_up [wp]: apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual) apply (rule exI, rule conjI, assumption) subgoal for s _ x - apply (prove "ptrFromPAddr x + ((vptr >> 12) && 0xFF << 2) && ~~ mask pt_bits = ptrFromPAddr x") - apply (prove "is_aligned (ptrFromPAddr x) 10") + apply (prop_tac "ptrFromPAddr x + ((vptr >> 12) && 0xFF << 2) && ~~ mask pt_bits = ptrFromPAddr x") + apply (prop_tac "is_aligned (ptrFromPAddr x) 10") apply (drule (2) valid_vspace_objsD) apply clarsimp apply (erule_tac x="ucast (vptr >> 20 << 2 >> 2)" in ballE) @@ -2144,7 +2145,7 @@ lemma lookup_pt_slot_reachable [wp]: apply (simp add: pred_conj_def ex_simps [symmetric] del: ex_simps) apply (rule hoare_vcg_ex_lift_R1) apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_looks_up) prefer 2 apply clarsimp @@ -2327,6 +2328,10 @@ lemma shiftr_20_less: by (simp add: word_less_nat_alt word_le_nat_alt shiftr_20_unat_ucast)+ +lemma kernel_base_aligned_pageBits: + "is_aligned kernel_base pageBits" + by (simp add: is_aligned_def kernel_base_def pageBits_def) + lemma kernel_base_ge_observation: "(kernel_base \ x) = (x && ~~ mask 29 = kernel_base)" apply (subst mask_in_range) @@ -2334,7 +2339,6 @@ lemma kernel_base_ge_observation: apply (simp add: kernel_base_def) done - lemma kernel_base_less_observation: "(x < kernel_base) = (x && ~~ mask 29 \ kernel_base)" apply (simp add: linorder_not_le[symmetric] kernel_base_ge_observation) @@ -2543,39 +2547,6 @@ lemma create_mapping_entries_valid_slots [wp]: apply (fastforce intro!: aligned_add_aligned is_aligned_shiftl_self) done -lemma is_aligned_addrFromPPtr_n: - "\ is_aligned p n; n \ 28 \ \ is_aligned (Platform.ARM.addrFromPPtr p) n" - apply (simp add: Platform.ARM.addrFromPPtr_def) - apply (erule aligned_sub_aligned, simp_all) - apply (simp add: pptrBaseOffset_def physBase_def - pptrBase_def pageBits_def) - apply (erule is_aligned_weaken[rotated]) - apply (simp add: is_aligned_def) - done - -lemma is_aligned_addrFromPPtr: - "is_aligned p pageBits \ is_aligned (Platform.ARM.addrFromPPtr p) pageBits" - by (simp add: is_aligned_addrFromPPtr_n pageBits_def) - -lemma is_aligned_ptrFromPAddr_n: - "\is_aligned x sz; sz\ 28\ - \ is_aligned (ptrFromPAddr x) sz" - apply (simp add:ptrFromPAddr_def pptrBaseOffset_def - pptrBase_def physBase_def) - apply (erule aligned_add_aligned) - apply (erule is_aligned_weaken[rotated]) - apply (simp add:is_aligned_def) - apply (simp add:word_bits_def) - done - -lemma is_aligned_ptrFromPAddr: - "is_aligned p pageBits \ is_aligned (ptrFromPAddr p) pageBits" - by (simp add: is_aligned_ptrFromPAddr_n pageBits_def) - -lemma pbfs_le_28[simp]: - "pageBitsForSize sz \ 28" - by (cases sz; simp) - lemma store_pde_lookup_pd: "\\\ pd and page_directory_at pd and valid_vspace_objs and (\s. valid_asid_table (arm_asid_table (arch_state s)) s)\ @@ -3177,7 +3148,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "\\ms. P (device_state ms)\ machine_op_lift f \\_ ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonad.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/ARM/ArchArch_AI.thy b/proof/invariant-abstract/ARM/ArchArch_AI.thy index 53e41c8ed8..dca50fecc4 100644 --- a/proof/invariant-abstract/ARM/ArchArch_AI.thy +++ b/proof/invariant-abstract/ARM/ArchArch_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -18,7 +19,7 @@ definition cte_wp_at (\cap. \idx. cap = cap.UntypedCap False frame pageBits idx ) parent s \ descendants_of parent (cdt s) = {} \ is_aligned base asid_low_bits \ base \ 2^asid_bits - 1 \ - arm_asid_table (arch_state s) (asid_high_bits_of base) = None" + asid_table s (asid_high_bits_of base) = None" lemma safe_parent_strg: @@ -63,7 +64,7 @@ lemma check_vp_wpR [wp]: check_vp_alignment sz w \P\, -" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: vmsz_aligned_def) done @@ -71,7 +72,7 @@ lemma check_vp_wpR [wp]: lemma check_vp_inv: "\P\ check_vp_alignment sz w \\_. P\" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply simp done @@ -269,8 +270,8 @@ end locale asid_update = Arch + fixes ap asid s s' assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s" - assumes empty: "arm_asid_table (arch_state s) asid = None" - defines "s' \ s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\" + assumes empty: "asid_table s asid = None" + defines "s' \ s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\" begin lemma vs_lookup1' [simp]: @@ -285,7 +286,7 @@ lemma vs_lookup_pages1' [simp]: lemma vs_asid_refs' [simp]: "vs_asid_refs (arm_asid_table (arch_state s')) = - vs_asid_refs (arm_asid_table (arch_state s)) \ {([VSRef (ucast asid) None], ap)}" + vs_asid_refs (asid_table s) \ {([VSRef (ucast asid) None], ap)}" apply (simp add: s'_def) apply (rule set_eqI) apply (rule iffI) @@ -400,8 +401,8 @@ end context Arch begin global_naming ARM lemma valid_arch_state_strg: - "valid_arch_state s \ ap \ ran (arm_asid_table (arch_state s)) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + "valid_arch_state s \ ap \ ran (asid_table s) \ asid_pool_at ap s \ + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI) @@ -411,11 +412,11 @@ lemma valid_arch_state_strg: lemma valid_vs_lookup_at_upd_strg: "valid_vs_lookup s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ + asid_table s asid = None \ (\ptr cap. caps_of_state s ptr = Some cap \ ap \ obj_refs cap \ vs_cap_ref cap = Some [VSRef (ucast asid) None]) \ - valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -488,7 +489,7 @@ lemma valid_table_caps_asid_upd [iff]: lemma vs_asid_ref_upd: "([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') - (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\) + (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\) = (if asid_high_bits_of asid' = asid_high_bits_of asid then ap' = ap else ([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') s)" @@ -497,7 +498,7 @@ lemma vs_asid_ref_upd: lemma vs_asid_ref_eq: "([VSRef (ucast asid) None] \ ap) s - = (arm_asid_table (arch_state s) asid = Some ap)" + = (asid_table s asid = Some ap)" by (fastforce elim: vs_lookup_atE intro: vs_lookup_atI) @@ -509,12 +510,12 @@ lemma set_cap_reachable_pg_cap: lemma cap_insert_simple_arch_caps_ap: "\valid_arch_caps and (\s. cte_wp_at (safe_parent_for (cdt s) src cap) src s) and no_cap_to_obj_with_diff_ref cap {dest} - and (\s. arm_asid_table (arch_state s) (asid_high_bits_of asid) = None) + and (\s. asid_table s (asid_high_bits_of asid) = None) and ko_at (ArchObj (ASIDPool Map.empty)) ap and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg) @@ -526,7 +527,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -546,8 +547,8 @@ lemma cap_insert_simple_arch_caps_ap: lemma valid_asid_map_asid_upd_strg: "valid_asid_map s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ - valid_asid_map (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + asid_table s asid = None \ + valid_asid_map (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -559,8 +560,8 @@ lemma valid_asid_map_asid_upd_strg: lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ - valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + asid_table s asid = None \ + valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -572,8 +573,8 @@ lemma valid_vspace_objs_asid_upd_strg: lemma valid_global_objs_asid_upd_strg: "valid_global_objs s \ ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ - valid_global_objs (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + asid_table s asid = None \ + valid_global_objs (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" by clarsimp @@ -590,11 +591,11 @@ lemma cap_insert_ap_invs: K (cap = cap.ArchObjectCap (arch_cap.ASIDPoolCap ap asid)) and (\s. \irq \ cap_irqs cap. irq_issued irq s) and ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap and - (\s. ap \ ran (arm_asid_table (arch_state s)) \ - arm_asid_table (arch_state s) (asid_high_bits_of asid) = None)\ + (\s. ap \ ran (asid_table s) \ + asid_table s (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := ((arm_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_asid_map_asid_upd_strg valid_vspace_objs_asid_upd_strg ) @@ -741,17 +742,17 @@ proof - K (cap = ArchObjectCap (ASIDPoolCap ap asid)) and (\s. \irq\cap_irqs cap. irq_issued irq s) and ko_at (ArchObj (ASIDPool Map.empty)) ap and - (\s. ap \ ran (arm_asid_table (arch_state s)) \ - arm_asid_table (arch_state s) (asid_high_bits_of asid) = None))\ + (\s. ap \ ran (asid_table s) \ + asid_table s (asid_high_bits_of asid) = None))\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -861,7 +862,7 @@ proof - qed -lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified] +lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_TrueI, OF refl refl refl TrueI TrueI TrueI,simplified] lemma invoke_arch_invs[wp]: "\invs and ct_active and valid_arch_inv ai\ @@ -945,7 +946,7 @@ lemma create_mapping_entries_inv [wp]: crunch_ignore (add: select_ext) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) lemma create_mappings_empty [wp]: @@ -1000,13 +1001,13 @@ lemma create_mapping_entries_parent_for_refs: superSectionPDE_offsets_def) apply (rule hoare_pre) apply wp - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to) apply (elim exEI) apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def) apply simp apply (rule hoare_pre) apply wp - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_cap_to_multiple1) apply (elim conjE exEI cte_wp_at_weakenE) apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def @@ -1048,7 +1049,7 @@ lemma find_pd_for_asid_shifting_voodoo: "\pspace_aligned and valid_vspace_objs\ find_pd_for_asid asid \\rv s. v >> 20 = rv + (v >> 20 << 2) && mask pd_bits >> 2\,-" - apply (rule hoare_post_imp_R, + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_aligned_pd) apply (subst pd_shifting_dual, simp) apply (rule word_eqI) @@ -1067,7 +1068,7 @@ lemma find_pd_for_asid_ref_offset_voodoo: \\rv. (ref \ (rv + (v >> 20 << 2) && ~~ mask pd_bits))\,-" apply (rule hoare_gen_asmE) apply (rule_tac Q'="\rv s. is_aligned rv 14 \ (ref \ rv) s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: ucast_ucast_mask mask_asid_low_bits_ucast_ucast) apply (fold asid_low_bits_def) @@ -1232,7 +1233,7 @@ lemma cte_wp_at_page_cap_weaken: lemma find_pd_for_asid_lookup_pd_wp: "\ \s. valid_vspace_objs s \ (\pd. vspace_at_asid asid pd s \ page_directory_at pd s \ (\\ pd) s \ Q pd s) \ find_pd_for_asid asid \ Q \, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_page_directory]) apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_lookup, simplified]) apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_pd_at_asid, simplified]) @@ -1255,7 +1256,6 @@ lemma aligned_sum_less_kernel_base: apply (case_tac sz,simp_all add:kernel_base_def is_aligned_def)+ done - lemma arch_decode_inv_wf[wp]: "\invs and valid_cap (cap.ArchObjectCap arch_cap) and cte_wp_at ((=) (cap.ArchObjectCap arch_cap)) slot and @@ -1266,7 +1266,7 @@ lemma arch_decode_inv_wf[wp]: apply (rename_tac word1 word2) apply (simp add: arch_decode_invocation_def Let_def split_def cong: if_cong split del: if_split) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp| + apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_ext_weak_wp| wpc| simp add: valid_arch_inv_def valid_apinv_def)+)[1] apply (simp add: valid_arch_inv_def valid_apinv_def) @@ -1318,7 +1318,7 @@ lemma arch_decode_inv_wf[wp]: (\s. descendants_of (snd (excaps!0)) (cdt s) = {}) and cte_wp_at (\c. \idx. c = (cap.UntypedCap False frame pageBits idx)) (snd (excaps!0)) and (\s. arm_asid_table (arch_state s) free = None)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def) @@ -1328,7 +1328,7 @@ lemma arch_decode_inv_wf[wp]: apply (simp add: asid_bits_def asid_low_bits_def) apply (simp add: asid_bits_def) apply (simp split del: if_split) - apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp | wpc | simp)+ + apply (wp ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp | wpc | simp)+ apply clarsimp apply (rule conjI, fastforce) apply (cases excaps, simp) @@ -1372,7 +1372,7 @@ lemma arch_decode_inv_wf[wp]: linorder_not_le aligned_sum_less_kernel_base elim: is_aligned_weaken split: vmpage_size.split split: if_splits - intro!: is_aligned_addrFromPPtr is_aligned_addrFromPPtr_n + intro!: is_aligned_addrFromPPtr_n pbfs_atleast_pageBits)[2] apply (cases "invocation_type label = ArchInvocationLabel ARMPageUnmap") apply simp @@ -1386,7 +1386,7 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPageFlushLabel (invocation_type label)") apply (rule hoare_pre) apply simp - apply (wp whenE_throwError_wp static_imp_wp hoare_drop_imps) + apply (wp whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imps) apply (simp add: valid_arch_inv_def valid_page_inv_def) apply (wp find_pd_for_asid_pd_at_asid | wpc)+ apply (clarsimp simp: valid_cap_def mask_def) @@ -1402,7 +1402,7 @@ lemma arch_decode_inv_wf[wp]: cong: if_cong) apply (rename_tac word option) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR get_master_pde_wp hoare_vcg_all_lift_R + apply ((wp whenE_throwError_wp check_vp_wpR get_master_pde_wp hoare_vcg_all_liftE_R | wpc | simp add: valid_arch_inv_def valid_pti_def unlessE_whenE vs_cap_ref_def split: if_splits | rule_tac x="fst p" in hoare_imp_eq_substR @@ -1410,14 +1410,14 @@ lemma arch_decode_inv_wf[wp]: apply (rule_tac Q'="\a b. ko_at (ArchObj (PageDirectory pd)) (a + (args ! 0 >> 20 << 2) && ~~ mask pd_bits) b \ pd (ucast (a + (args ! 0 >> 20 << 2) && mask pd_bits >> 2)) = - InvalidPDE \ L word option p pd a b" for L in hoare_post_imp_R[rotated]) + InvalidPDE \ L word option p pd a b" for L in hoare_strengthen_postE_R[rotated]) apply (intro impI) apply (erule impE) apply clarsimp apply (erule impE) apply (clarsimp split: pde.splits) apply assumption - apply ((wp whenE_throwError_wp hoare_vcg_all_lift_R + apply ((wp whenE_throwError_wp hoare_vcg_all_liftE_R find_pd_for_asid_lookup_slot [unfolded lookup_pd_slot_def Let_def] find_pd_for_asid_ref_offset_voodoo find_pd_for_asid_shifting_voodoo find_pd_for_asid_inv @@ -1468,11 +1468,11 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPDFlushLabel (invocation_type label)") apply simp apply (rule hoare_pre) - apply (wpsimp wp: whenE_throwError_wp static_imp_wp hoare_drop_imp get_master_pte_wp + apply (wpsimp wp: whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imp get_master_pte_wp get_master_pde_wp whenE_throwError_wp simp: resolve_vaddr_def valid_arch_inv_def valid_pdi_def Let_def) apply (rule_tac Q'="\pd' s. vspace_at_asid x2 pd' s \ x2 \ mask asid_bits \ x2 \ 0" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wpsimp+ apply (wpsimp wp: throwError_validE_R simp: valid_cap_def mask_def)+ done diff --git a/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy b/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy index f384a3e349..2879ff79f3 100644 --- a/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy @@ -540,7 +540,7 @@ context Arch begin global_naming ARM lemma post_cap_delete_pre_is_final_cap': "\rv s'' rva s''a s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def arch_cap_cleanup_opt_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -617,7 +617,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) @@ -785,7 +785,7 @@ qed lemmas rec_del_invs'[CNodeInv_AI_assms] = rec_del_invs'' [where Q=\, - simplified hoare_post_taut pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] + simplified hoare_TrueI pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] end diff --git a/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy b/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy index 651cda9600..60f531706b 100644 --- a/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy @@ -165,7 +165,7 @@ lemma valid_arch_mdb_simple: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update: @@ -189,34 +189,34 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by auto lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by auto lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by auto lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True r))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True r))" by auto lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -249,10 +249,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by auto end diff --git a/proof/invariant-abstract/ARM/ArchCSpace_AI.thy b/proof/invariant-abstract/ARM/ArchCSpace_AI.thy index a9d8aabf81..57dbba4c49 100644 --- a/proof/invariant-abstract/ARM/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/ARM/ArchCSpace_AI.thy @@ -184,20 +184,20 @@ lemma is_derived_is_cap: (* FIXME: move to CSpace_I near lemma vs_lookup1_tcb_update *) lemma vs_lookup_pages1_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) (* FIXME: move to CSpace_I near lemma vs_lookup_tcb_update *) lemma vs_lookup_pages_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages s" by (clarsimp simp add: vs_lookup_pages_def vs_lookup_pages1_tcb_update) (* FIXME: move to CSpace_I near lemma vs_lookup1_cnode_update *) lemma vs_lookup_pages1_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ CNode m cs')\) = + vs_lookup_pages1 (s\kheap := (kheap s)(p \ CNode m cs')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) @@ -205,7 +205,7 @@ lemma vs_lookup_pages1_cnode_update: (* FIXME: move to CSpace_I near lemma vs_lookup_cnode_update *) lemma vs_lookup_pages_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages (s\kheap := kheap s(p \ CNode n cs')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ CNode n cs')\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def dest!: vs_lookup_pages1_cnode_update[where m=n and cs'=cs']) diff --git a/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy index 12c801782a..3b16580690 100644 --- a/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy @@ -16,7 +16,7 @@ crunches init_arch_objects for exst[wp]: "\s. P (exst s)" and ct[wp]: "\s. P (cur_thread s)" and valid_etcbs[wp, DetSchedAux_AI_assms]: valid_etcbs - (wp: crunch_wps hoare_unless_wp valid_etcbs_lift) + (wp: crunch_wps unless_wp valid_etcbs_lift) crunch ct[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (cur_thread s)" (wp: crunch_wps dxo_wp_weak preemption_point_inv mapME_x_inv_wp @@ -100,9 +100,9 @@ crunch ct[wp]: perform_asid_control_invocation "\s. P (cur_thread s)" crunch idle_thread[wp]: perform_asid_control_invocation "\s. P (idle_thread s)" -crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: static_imp_wp) +crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: hoare_weak_lift_imp) -crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: static_imp_wp) +crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: hoare_weak_lift_imp) crunch schedact[wp]: perform_asid_control_invocation "\s :: det_ext state. P (scheduler_action s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) diff --git a/proof/invariant-abstract/ARM/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/ARM/ArchDetSchedSchedule_AI.thy index ad668f15d8..9e510ec34d 100644 --- a/proof/invariant-abstract/ARM/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/ARM/ArchDetSchedSchedule_AI.thy @@ -85,7 +85,7 @@ crunch valid_sched [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread, arch_ (simp: crunch_simps ignore: clearExMonitor) crunch exst[wp]: set_vm_root "\s. P (exst s)" - (wp: crunch_wps hoare_whenE_wp simp: crunch_simps) + (wp: crunch_wps whenE_wp simp: crunch_simps) crunch ct_in_cur_domain_2 [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread "\s. ct_in_cur_domain_2 thread (idle_thread s) (scheduler_action s) (cur_domain s) (ekheap s)" @@ -116,7 +116,7 @@ lemma set_vm_root_valid_blocked_ct_in_q [wp]: lemma arch_switch_to_thread_valid_blocked [wp, DetSchedSchedule_AI_assms]: "\valid_blocked and ct_in_q\ arch_switch_to_thread thread \\_. valid_blocked and ct_in_q\" apply (simp add: arch_switch_to_thread_def) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ apply (rule do_machine_op_valid_blocked) apply wp done @@ -194,17 +194,17 @@ lemma set_asid_pool_valid_sched[wp]: crunch ct_not_in_q [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete ct_not_in_q - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete valid_etcbs - (wp: hoare_drop_imps hoare_unless_wp select_inv mapM_x_wp mapM_wp subset_refl + (wp: hoare_drop_imps unless_wp select_inv mapM_x_wp mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: set_object thread_set) crunch simple_sched_action [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete simple_sched_action - (wp: hoare_drop_imps mapM_x_wp mapM_wp select_wp subset_refl + (wp: hoare_drop_imps mapM_x_wp mapM_wp subset_refl simp: unless_def if_fun_split) crunches arch_finalise_cap, prepare_thread_delete, arch_invoke_irq_handler diff --git a/proof/invariant-abstract/ARM/ArchDeterministic_AI.thy b/proof/invariant-abstract/ARM/ArchDeterministic_AI.thy index 0676254896..c89f63aad4 100644 --- a/proof/invariant-abstract/ARM/ArchDeterministic_AI.thy +++ b/proof/invariant-abstract/ARM/ArchDeterministic_AI.thy @@ -34,7 +34,7 @@ crunch valid_list[wp]: invalidate_tlb_by_asid valid_list (wp: crunch_wps preemption_point_inv' simp: crunch_simps filterM_mapM) crunch valid_list[wp]: invoke_untyped valid_list - (wp: crunch_wps preemption_point_inv' hoare_unless_wp mapME_x_wp' + (wp: crunch_wps preemption_point_inv' unless_wp mapME_x_wp' simp: mapM_x_def_bak crunch_simps) crunch valid_list[wp]: invoke_irq_control valid_list diff --git a/proof/invariant-abstract/ARM/ArchDetype_AI.thy b/proof/invariant-abstract/ARM/ArchDetype_AI.thy index 58062ba37e..7224372b96 100644 --- a/proof/invariant-abstract/ARM/ArchDetype_AI.thy +++ b/proof/invariant-abstract/ARM/ArchDetype_AI.thy @@ -89,7 +89,7 @@ next qed lemma empty_fail_freeMemory [Detype_AI_asms]: "empty_fail (freeMemory ptr bits)" - by (simp add: freeMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: freeMemory_def mapM_x_mapM ef_storeWord) lemma region_in_kernel_window_detype[simp]: diff --git a/proof/invariant-abstract/ARM/ArchEmptyFail_AI.thy b/proof/invariant-abstract/ARM/ArchEmptyFail_AI.thy index c00e5b0480..f330926110 100644 --- a/proof/invariant-abstract/ARM/ArchEmptyFail_AI.thy +++ b/proof/invariant-abstract/ARM/ArchEmptyFail_AI.thy @@ -37,7 +37,7 @@ context Arch begin global_naming ARM crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: handle_fault (simp: kernel_object.splits option.splits arch_cap.splits cap.splits endpoint.splits bool.splits list.splits thread_state.splits split_def catch_def sum.splits - Let_def wp: zipWithM_x_empty_fail) + Let_def) crunch (empty_fail) empty_fail[wp]: decode_tcb_configure, decode_bind_notification, decode_unbind_notification, @@ -61,11 +61,13 @@ lemma arch_decode_ARMASIDControlMakePool_empty_fail: apply (simp add: isPageFlushLabel_def isPDFlushLabel_def split: arch_cap.splits)+ apply (rule impI) apply (simp add: split_def) - apply wp - apply simp + apply (wp (once), simp) apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def bind_def return_def returnOk_def lift_def liftE_def fail_def gets_def get_def assert_def select_def split: if_split_asm) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def fail_def gets_def + get_def assert_def select_def + split: if_split_asm) by (simp add: Let_def split: cap.splits arch_cap.splits option.splits bool.splits | wp | intro conjI impI allI)+ lemma arch_decode_ARMASIDPoolAssign_empty_fail: @@ -83,9 +85,9 @@ lemma arch_decode_ARMASIDPoolAssign_empty_fail: apply ((simp | wp)+)[1] apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_def bindE_def - bind_def return_def returnOk_def lift_def liftE_def select_ext_def - gets_def get_def assert_def fail_def) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def select_ext_def + gets_def get_def assert_def fail_def) apply wp done diff --git a/proof/invariant-abstract/ARM/ArchFinalise_AI.thy b/proof/invariant-abstract/ARM/ArchFinalise_AI.thy index 86b085a63c..97391a8e6a 100644 --- a/proof/invariant-abstract/ARM/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/ARM/ArchFinalise_AI.thy @@ -639,7 +639,7 @@ interpretation Finalise_AI_2?: Finalise_AI_2 context Arch begin global_naming ARM crunch irq_node[wp]: arch_finalise_cap "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_node[wp,Finalise_AI_asms]: prepare_thread_delete "\s. P (interrupt_irq_node s)" @@ -694,7 +694,7 @@ lemma flush_table_empty: flush_table ac aa b word \\rv s. obj_at (empty_table (set (arm_global_pts (arch_state s)))) word s\" apply (clarsimp simp: flush_table_def set_vm_root_def) - apply (wp do_machine_op_obj_at arm_context_switch_empty hoare_whenE_wp + apply (wp do_machine_op_obj_at arm_context_switch_empty whenE_wp | wpc | simp | wps)+ @@ -712,7 +712,7 @@ lemma flush_table_empty: s)) s" and Q'="\_ s. obj_at (empty_table (set (arm_global_pts (arch_state s)))) word s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply simp apply (wp find_pd_for_asid_inv mapM_wp | simp @@ -867,10 +867,10 @@ crunch caps_of_state [wp]: arch_finalise_cap "\s. P (caps_of_state s)" (wp: crunch_wps simp: crunch_simps) crunch obj_at[wp]: set_vm_root, invalidate_tlb_by_asid "\s. P' (obj_at P p s)" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) crunch arm_global_pts[wp]: set_vm_root, invalidate_asid_entry "\s. P' (arm_global_pts (arch_state s))" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) lemma delete_asid_empty_table_pd: "\\s. page_directory_at word s @@ -1137,7 +1137,7 @@ lemma arch_finalise_case_no_lookup: | simp add: vs_cap_ref_simps vs_lookup_pages_eq_at[THEN fun_cong, symmetric] vs_lookup_pages_eq_ap[THEN fun_cong, symmetric])+ - apply (wp hoare_vcg_all_lift unmap_page_unmapped static_imp_wp) + apply (wp hoare_vcg_all_lift unmap_page_unmapped hoare_weak_lift_imp) apply (wpc|wp unmap_page_table_unmapped3 delete_asid_unmapped |simp add:vs_cap_ref_def vs_lookup_pages_eq_at[THEN fun_cong,symmetric] @@ -1366,7 +1366,7 @@ lemma set_asid_pool_obj_at_ptr: lemma valid_arch_state_table_strg: "valid_arch_state s \ asid_pool_at p s \ Some p \ arm_asid_table (arch_state s) ` (dom (arm_asid_table (arch_state s)) - {x}) \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\)" + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\)" apply (clarsimp simp: valid_arch_state_def valid_asid_table_def ran_def) apply (rule conjI, fastforce) apply (erule inj_on_fun_upd_strongerI) @@ -1399,8 +1399,8 @@ lemma vs_lookup1_arch [simp]: lemma vs_lookup_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (erule vs_lookupE) apply clarsimp @@ -1432,8 +1432,8 @@ lemma vs_lookup_empty_table: lemma vs_lookup_pages_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (subst (asm) vs_lookup_pages_def) apply (clarsimp simp: Image_def) @@ -1468,7 +1468,7 @@ lemma set_asid_pool_empty_table_objs: set_asid_pool p Map.empty \\rv s. valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of word2 \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of word2 \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vspace_objs_def @@ -1493,7 +1493,7 @@ lemma set_asid_pool_empty_table_lookup: set_asid_pool p Map.empty \\rv s. valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vs_lookup_def @@ -1515,7 +1515,7 @@ lemma set_asid_pool_empty_valid_asid_map: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\rv s. valid_asid_map (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: valid_asid_map_def vspace_at_asid_def @@ -1547,7 +1547,7 @@ lemma set_asid_pool_invs_table: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\x s. invs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def) apply (rule hoare_pre) apply (wp valid_irq_node_typ set_asid_pool_typ_at @@ -1602,7 +1602,7 @@ lemma page_table_mapped_wp_weak: apply (simp add: page_table_mapped_def) apply (rule hoare_pre) apply (wp get_pde_wp | wpc)+ - apply (rule_tac Q'="\_. ?P" in hoare_post_imp_R) + apply (rule_tac Q'="\_. ?P" in hoare_strengthen_postE_R) apply wp apply clarsimp apply simp diff --git a/proof/invariant-abstract/ARM/ArchInvariants_AI.thy b/proof/invariant-abstract/ARM/ArchInvariants_AI.thy index 220a57c201..d3efceb2c4 100644 --- a/proof/invariant-abstract/ARM/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/ARM/ArchInvariants_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -47,7 +48,7 @@ lemma iarch_tcb_context_set[simp]: lemma iarch_tcb_set_registers[simp]: "arch_tcb_to_iarch_tcb (arch_tcb_set_registers regs arch_tcb) = arch_tcb_to_iarch_tcb arch_tcb" - by (simp add: arch_tcb_set_registers_def) + by (simp add: arch_tcb_to_iarch_tcb_def) (* These simplifications allows us to keep many arch-specific proofs unchanged. *) @@ -2492,6 +2493,56 @@ lemma vas_valid_asid_table: "valid_arch_state s \ valid_asid_table (asid_table s) s" by (simp add: valid_arch_state_def) +(* sanity check Arch_Kernel_Config_Lemmas version and shadow original name *) +lemma physBase_aligned[simplified pageBitsForSize_simps]: + "is_aligned physBase (pageBitsForSize ARMSuperSection)" + by simp (rule physBase_aligned) + +lemma pptrBase_aligned[simplified pageBitsForSize_simps]: + "is_aligned pptrBase (pageBitsForSize ARMSuperSection)" + by (simp add: is_aligned_def pptrBase_def) + +lemma pptrBaseOffset_aligned[simplified pageBitsForSize_simps]: + "is_aligned pptrBaseOffset (pageBitsForSize ARMSuperSection)" + by (auto simp: pptrBaseOffset_def physBase_aligned pptrBase_aligned + elim: is_aligned_weaken intro: aligned_sub_aligned) + +lemma pageBitsForSize_limit[simplified pageBitsForSize_simps, simp]: + "pageBitsForSize sz \ pageBitsForSize ARMSuperSection" + by (cases sz; simp) + +lemma is_aligned_pptrBaseOffset[simplified pageBitsForSize_simps]: + "is_aligned pptrBaseOffset (pageBitsForSize sz)" + by (cases sz; clarsimp intro!: is_aligned_weaken[OF pptrBaseOffset_aligned]) + +lemma is_aligned_addrFromPPtr_n[simplified pageBitsForSize_simps]: + "\ is_aligned p n; n \ (pageBitsForSize ARMSuperSection) \ + \ is_aligned (Platform.ARM.addrFromPPtr p) n" + by (auto simp: addrFromPPtr_def + elim!: aligned_sub_aligned + intro: is_aligned_weaken pptrBaseOffset_aligned) + +lemma is_aligned_addrFromPPtr: + "is_aligned p pageBits \ is_aligned (Platform.ARM.addrFromPPtr p) pageBits" + by (simp add: is_aligned_addrFromPPtr_n pageBits_def) + +lemma is_aligned_ptrFromPAddr_n[simplified pageBitsForSize_simps]: + "\ is_aligned p n; n \ (pageBitsForSize ARMSuperSection)\ + \ is_aligned (ptrFromPAddr p) n" + by (auto simp: ptrFromPAddr_def + elim!: aligned_add_aligned + intro: is_aligned_weaken pptrBaseOffset_aligned) + +lemma is_aligned_ptrFromPAddr: + "is_aligned p pageBits \ is_aligned (ptrFromPAddr p) pageBits" + by (simp add: is_aligned_ptrFromPAddr_n pageBits_def) + +lemma is_aligned_ptrFromPAddrD[simplified pageBitsForSize_simps]: + "\ is_aligned (ptrFromPAddr b) a; a \ (pageBitsForSize ARMSuperSection)\ + \ is_aligned b a" + by (simp add: ptrFromPAddr_def) + (erule is_aligned_addD2, erule is_aligned_weaken[OF pptrBaseOffset_aligned]) + end declare ARM.arch_tcb_context_absorbs[simp] diff --git a/proof/invariant-abstract/ARM/ArchIpc_AI.thy b/proof/invariant-abstract/ARM/ArchIpc_AI.thy index 6b5b69b525..dc384972b7 100644 --- a/proof/invariant-abstract/ARM/ArchIpc_AI.thy +++ b/proof/invariant-abstract/ARM/ArchIpc_AI.thy @@ -319,7 +319,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -441,7 +441,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (subst ball_conj_distrib) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ apply (simp split: kernel_object.split_asm) @@ -467,7 +467,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by auto end diff --git a/proof/invariant-abstract/ARM/ArchKHeap_AI.thy b/proof/invariant-abstract/ARM/ArchKHeap_AI.thy index 840c68590d..6680999a50 100644 --- a/proof/invariant-abstract/ARM/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/ARM/ArchKHeap_AI.thy @@ -456,7 +456,7 @@ lemma valid_global_objs_lift': apply (rule hoare_pre) apply (rule hoare_use_eq [where f="\s. arm_global_pts (arch_state s)", OF pts]) apply (rule hoare_use_eq [where f="\s. arm_global_pd (arch_state s)", OF pd]) - apply (wp obj ko emp hoare_vcg_const_Ball_lift hoare_ex_wp) + apply (wp obj ko emp hoare_vcg_const_Ball_lift hoare_vcg_ex_lift) apply (clarsimp simp: second_level_tables_def) done @@ -800,20 +800,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def ARM.hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def ARM.hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def split: option.splits) @@ -821,7 +821,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def split: option.splits) @@ -829,7 +829,7 @@ lemma state_hyp_refs_of_tcb_state_update: lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by (induction ao rule: arch_kernel_obj.induct; clarsimp simp: typ_at_same_type) @@ -843,7 +843,7 @@ lemma default_tcb_not_live: "\ live (TCB default_tcb)" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) lemma valid_ioports_lift: diff --git a/proof/invariant-abstract/ARM/ArchKernelInit_AI.thy b/proof/invariant-abstract/ARM/ArchKernelInit_AI.thy index 01d4bf02cf..1bff83ea01 100644 --- a/proof/invariant-abstract/ARM/ArchKernelInit_AI.thy +++ b/proof/invariant-abstract/ARM/ArchKernelInit_AI.thy @@ -358,9 +358,8 @@ lemma invs_A: apply (clarsimp simp: valid_global_objs_def state_defs) apply (clarsimp simp: valid_ao_at_def obj_at_def empty_table_def pde_ref_def valid_pde_mappings_def valid_vso_at_def) - apply (simp add: kernel_base_def kernel_mapping_slots_def - Platform.ARM.addrFromPPtr_def pptrBaseOffset_def - pptrBase_def physBase_def pageBits_def is_aligned_def) + apply (simp add: kernel_mapping_slots_def kernel_base_def) + apply (rule is_aligned_addrFromPPtr_n; simp add: pageBits_def is_aligned_def) apply (rule conjI) apply (simp add: valid_kernel_mappings_def state_defs valid_kernel_mappings_if_pd_def pde_ref_def diff --git a/proof/invariant-abstract/ARM/ArchRetype_AI.thy b/proof/invariant-abstract/ARM/ArchRetype_AI.thy index 53df899228..f157006b1d 100644 --- a/proof/invariant-abstract/ARM/ArchRetype_AI.thy +++ b/proof/invariant-abstract/ARM/ArchRetype_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -57,17 +58,17 @@ declare store_pde_state_hyp_refs_of [wp] (* These also prove facts about copy_global_mappings *) crunch pspace_aligned[wp]: init_arch_objects "pspace_aligned" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) crunch pspace_distinct[wp]: init_arch_objects "pspace_distinct" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) crunch mdb_inv[wp]: init_arch_objects "\s. P (cdt s)" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) crunch valid_mdb[wp]: init_arch_objects "valid_mdb" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) crunch cte_wp_at[wp]: init_arch_objects "\s. P (cte_wp_at P' p s)" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) crunch typ_at[wp]: init_arch_objects "\s. P (typ_at T p s)" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) lemma mdb_cte_at_store_pde[wp]: "\\s. mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)\ @@ -126,14 +127,14 @@ lemma get_pde_wellformed[wp]: done crunch valid_objs[wp]: init_arch_objects "valid_objs" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) lemma set_pd_arch_state[wp]: "\valid_arch_state\ set_pd ptr val \\rv. valid_arch_state\" by (rule set_pd_valid_arch) crunch valid_arch_state[wp]: init_arch_objects "valid_arch_state" - (ignore: clearMemory set_object wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory set_object wp: crunch_wps unless_wp) lemmas init_arch_objects_valid_cap[wp] = valid_cap_typ [OF init_arch_objects_typ_at] @@ -341,7 +342,7 @@ lemma mapM_x_store_pde_eq_kernel_mappings_restr: \ ko_at (ArchObj (PageDirectory pdv')) pd' s \ pdv (ucast x) = pdv' (ucast x)))\" apply (induct xs rule: rev_induct, simp_all add: mapM_x_Nil mapM_x_append mapM_x_singleton) - apply (erule hoare_seq_ext[rotated]) + apply (erule bind_wp_fwd) apply (simp add: store_pde_def set_pd_def set_object_def cong: bind_cong) apply (wp get_object_wp get_pde_wp) apply (clarsimp simp: obj_at_def split del: if_split) @@ -377,7 +378,7 @@ lemma copy_global_equal_kernel_mappings_restricted: copy_global_mappings pd \\rv s. equal_kernel_mappings (s \ kheap := restrict_map (kheap s) (- S) \)\" apply (simp add: copy_global_mappings_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) + apply (rule bind_wp [OF _ gets_sp]) apply (rule hoare_chain) apply (rule hoare_vcg_conj_lift) apply (rule_tac P="global_pd \ (insert pd S)" in hoare_vcg_prop) @@ -447,9 +448,9 @@ lemma copy_global_invs_mappings_restricted: apply (simp add: valid_pspace_def pred_conj_def) apply (rule hoare_conjI, wp copy_global_equal_kernel_mappings_restricted) apply (clarsimp simp: global_refs_def) - apply (rule valid_prove_more, rule hoare_vcg_conj_lift, rule hoare_TrueI) + apply (rule hoare_post_add, rule hoare_vcg_conj_lift, rule hoare_TrueI) apply (simp add: copy_global_mappings_def valid_pspace_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) + apply (rule bind_wp [OF _ gets_sp]) apply (rule hoare_strengthen_post) apply (rule mapM_x_wp[where S="{x. kernel_base >> 20 \ x \ x < 2 ^ (pd_bits - 2)}"]) @@ -508,7 +509,7 @@ lemma mapM_copy_global_invs_mappings_restricted: apply (fold all_invs_but_equal_kernel_mappings_restricted_eq) apply (induct pds, simp_all only: mapM_x_Nil mapM_x_Cons K_bind_def) apply wpsimp - apply (rule hoare_seq_ext, assumption, thin_tac "P" for P) + apply (rule bind_wp, assumption, thin_tac "P" for P) apply (wpsimp wp: copy_global_invs_mappings_restricted) done diff --git a/proof/invariant-abstract/ARM/ArchTcb_AI.thy b/proof/invariant-abstract/ARM/ArchTcb_AI.thy index 77e2c0c6f3..dd00e5cba4 100644 --- a/proof/invariant-abstract/ARM/ArchTcb_AI.thy +++ b/proof/invariant-abstract/ARM/ArchTcb_AI.thy @@ -187,7 +187,6 @@ lemma cap_delete_no_cap_to_obj_asid[wp, Tcb_AI_asms]: apply (simp add: cap_delete_def no_cap_to_obj_with_diff_ref_ran_caps_form) apply wp - apply simp apply (rule use_spec) apply (rule rec_del_all_caps_in_range) apply (simp add: table_cap_ref_def[simplified, split_simps cap.split] @@ -236,18 +235,19 @@ lemma tc_invs[Tcb_AI_asms]: \\rv. invs\" apply (rule hoare_gen_asm)+ apply (simp add: split_def set_mcpriority_def cong: option.case_cong) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply wp - (* takes long: *) - apply ((simp only: simp_thms + (* takes long: (30 sec) *) + apply ((simp only: simp_thms cong: conj_cong + | (strengthen invs_strengthen)+ | rule wp_split_const_if wp_split_const_if_R - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | (wp out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -261,10 +261,9 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def - del: hoare_True_E_R | wpc | strengthen use_no_cap_to_obj_asid_strg tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] diff --git a/proof/invariant-abstract/ARM/ArchUntyped_AI.thy b/proof/invariant-abstract/ARM/ArchUntyped_AI.thy index 0e5b8a32ea..9f7072f4f2 100644 --- a/proof/invariant-abstract/ARM/ArchUntyped_AI.thy +++ b/proof/invariant-abstract/ARM/ArchUntyped_AI.thy @@ -421,7 +421,7 @@ proof - Some (ArchObj (PageDirectory pd))" let ?ko' = "ArchObj (PageDirectory (pd(ucast (pde_ptr && mask pd_bits >> 2) := pde)))" - let ?s' = "s\kheap := kheap s(pde_ptr && ~~ mask pd_bits \ ?ko')\" + let ?s' = "s\kheap := (kheap s)(pde_ptr && ~~ mask pd_bits \ ?ko')\" have typ_at: "\T p. typ_at T p s \ typ_at T p ?s'" using pd by (clarsimp simp: obj_at_def a_type_def) @@ -475,7 +475,7 @@ lemma copy_global_mappings_nonempty_table: (set (second_level_tables (arch_state s)))) r s) \ valid_global_objs s \ valid_arch_state s \ pspace_aligned s\" apply (simp add: copy_global_mappings_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) + apply (rule bind_wp [OF _ gets_sp]) apply (rule hoare_strengthen_post) apply (rule mapM_x_wp[where S="{x. kernel_base >> 20 \ x \ x < 2 ^ (pd_bits - 2)}"]) @@ -531,7 +531,7 @@ lemma init_arch_objects_nonempty_table[Untyped_AI_assms, wp]: apply (rule hoare_gen_asm) apply (simp add: init_arch_objects_def split del: if_split) apply (rule hoare_pre) - apply (wp hoare_unless_wp | wpc | simp add: reserve_region_def second_level_tables_def)+ + apply (wp unless_wp | wpc | simp add: reserve_region_def second_level_tables_def)+ apply (clarsimp simp: obj_bits_api_def default_arch_object_def pd_bits_def pageBits_def) done @@ -562,7 +562,7 @@ lemma set_pd_cte_wp_at_iin[wp]: crunch cte_wp_at_iin[wp]: init_arch_objects "\s. P (cte_wp_at (P' (interrupt_irq_node s)) p s)" - (ignore: clearMemory wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory wp: crunch_wps unless_wp) lemmas init_arch_objects_ex_cte_cap_wp_to = init_arch_objects_excap diff --git a/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy index 03a62391a0..4495f22940 100644 --- a/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy @@ -138,7 +138,7 @@ lemma mapM_x_store_pte_updates: apply wp apply (clarsimp simp: obj_at_def fun_upd_idem) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: store_pte_def set_pt_def set_object_def) apply (wp get_pt_wp get_object_wp) @@ -231,7 +231,7 @@ lemma mapM_x_store_pde_updates: apply wp apply (clarsimp simp: obj_at_def fun_upd_idem) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: store_pde_def set_pd_def set_object_def) apply (wp get_pd_wp get_object_wp) @@ -389,7 +389,7 @@ lemma unmap_page_table_valid_pdpt_objs[wp]: apply (simp add: page_table_mapped_def) apply (wp get_pde_wp | wpc)+ apply simp - apply (rule hoare_post_impErr, rule valid_validE, + apply (rule hoare_strengthen_postE, rule valid_validE, rule find_pd_for_asid_inv, simp_all) done @@ -402,7 +402,7 @@ lemma set_simple_ko_valid_pdpt_objs[wp]: split: kernel_object.splits) crunch valid_pdpt_objs[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_pdpt_objs" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_pdpt_objs[wp]: "\valid_pdpt_objs\ preemption_point \\rv. valid_pdpt_objs\" @@ -455,11 +455,10 @@ lemma mapM_x_copy_pde_updates: done lemma copy_global_mappings_valid_pdpt_objs[wp]: - notes hoare_pre [wp_pre del] - shows "\valid_pdpt_objs and valid_arch_state and pspace_aligned and K (is_aligned p pd_bits)\ copy_global_mappings p \\rv. valid_pdpt_objs\" + including classic_wp_pre apply (rule hoare_gen_asm) apply (simp add: copy_global_mappings_def) apply wp @@ -699,7 +698,7 @@ lemma invoke_untyped_valid_pdpt[wp]: crunch valid_pdpt_objs[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_pdpt_objs" - (ignore: delete_objects wp: delete_objects_valid_pdpt static_imp_wp) + (ignore: delete_objects wp: delete_objects_valid_pdpt hoare_weak_lift_imp) abbreviation (input) "safe_pt_range \ \slots s. obj_at (\ko. \pt. ko = ArchObj (PageTable pt) @@ -1074,8 +1073,6 @@ lemma perform_invocation_valid_pdpt[wp]: \\rv. valid_pdpt_objs\" apply (cases i, simp_all) apply (wp send_signal_interrupt_states | simp)+ - apply (clarsimp simp: invocation_duplicates_valid_def) - apply (wp | wpc | simp)+ apply (simp add: arch_perform_invocation_def) apply (rule hoare_pre) apply (wp | wpc | simp)+ @@ -1263,7 +1260,7 @@ lemma ensure_safe_mapping_ensures[wp]: apply (rule_tac Q' = "\r s. \x \ set slots. obj_at (\ko. \pt. ko = ArchObj (PageTable pt) \ pt (ucast (x && mask pt_bits >> 2)) = pte.InvalidPTE) - (hd (slot # slots) && ~~ mask pt_bits) s" in hoare_post_imp_R) + (hd (slot # slots) && ~~ mask pt_bits) s" in hoare_strengthen_postE_R) apply (wp mapME_x_accumulate_checks[where Q = "\s. valid_pdpt_objs s"] ) apply (wp get_master_pte_wp| wpc | simp)+ apply clarsimp @@ -1320,7 +1317,7 @@ lemma ensure_safe_mapping_ensures[wp]: apply (rule_tac Q' = "\r s. \x \ set x22. obj_at (\ko. \pd. ko = ArchObj (PageDirectory pd) \ pd (ucast (x && mask pd_bits >> 2)) = InvalidPDE) - (x21 && ~~ mask pd_bits) s" in hoare_post_imp_R) + (x21 && ~~ mask pd_bits) s" in hoare_strengthen_postE_R) apply (wp mapME_x_accumulate_checks[where Q = "\s. valid_pdpt_objs s"] ) apply (wp get_master_pde_wp| wpc | simp)+ apply clarsimp @@ -1457,7 +1454,7 @@ proof - \ \PageMap\ apply (rename_tac dev pg_ptr rights sz pg_map) apply (wpsimp simp: Let_def invocation_duplicates_valid_def page_inv_duplicates_valid_def - wp: ensure_safe_mapping_ensures[THEN hoare_post_imp_R] + wp: ensure_safe_mapping_ensures[THEN hoare_strengthen_postE_R] check_vp_wpR hoare_vcg_if_lift_ER find_pd_for_asid_lookup_pd_wp) apply (fastforce simp: invs_psp_aligned page_directory_at_aligned_pd_bits word_not_le sz valid_cap_def valid_arch_cap_def lookup_pd_slot_eq @@ -1515,15 +1512,14 @@ lemma handle_invocation_valid_pdpt[wp]: crunch valid_pdpt[wp]: handle_event, activate_thread,switch_to_thread, switch_to_idle_thread "valid_pdpt_objs" - (simp: crunch_simps wp: crunch_wps alternative_valid select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt getFAR getDFSR getIFSR OR_choice set_scheduler_action clearExMonitor) lemma schedule_valid_pdpt[wp]: "\valid_pdpt_objs\ schedule :: (unit,unit) s_monad \\_. valid_pdpt_objs\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma call_kernel_valid_pdpt[wp]: diff --git a/proof/invariant-abstract/ARM/ArchVSpace_AI.thy b/proof/invariant-abstract/ARM/ArchVSpace_AI.thy index 66c5b835be..a2d3b612be 100644 --- a/proof/invariant-abstract/ARM/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/ARM/ArchVSpace_AI.thy @@ -1030,7 +1030,7 @@ lemma find_pd_for_asid_lookup_ref: lemma find_pd_for_asid_lookup[wp]: "\\\ find_pd_for_asid asid \\pd. \\ pd\,-" - apply (rule hoare_post_imp_R, rule find_pd_for_asid_lookup_ref) + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_lookup_ref) apply auto done @@ -1045,7 +1045,7 @@ proof - \\pd. pspace_aligned and page_directory_at pd\, -" by (rule hoare_pre) (wp, simp) show ?thesis - apply (rule hoare_post_imp_R, rule x) + apply (rule hoare_strengthen_postE_R, rule x) apply clarsimp apply (erule page_directory_pde_atI) prefer 2 @@ -1637,8 +1637,8 @@ lemma svr_invs [wp]: "\invs\ set_vm_root t' \\_. invs\" apply (simp add: set_vm_root_def) apply (rule hoare_pre) - apply (wp hoare_whenE_wp find_pd_for_asid_inv hoare_vcg_all_lift | wpc | simp add: split_def)+ - apply (rule_tac Q'="\_ s. invs s \ x2 \ mask asid_bits" in hoare_post_imp_R) + apply (wp whenE_wp find_pd_for_asid_inv hoare_vcg_all_lift | wpc | simp add: split_def)+ + apply (rule_tac Q'="\_ s. invs s \ x2 \ mask asid_bits" in hoare_strengthen_postE_R) prefer 2 apply simp apply (rule valid_validE_R) @@ -1677,7 +1677,7 @@ end locale vs_lookup_map_some_pdes = Arch + fixes pd pdp s s' S T pd' - defines "s' \ s\kheap := kheap s(pdp \ ArchObj (PageDirectory pd'))\" + defines "s' \ s\kheap := (kheap s)(pdp \ ArchObj (PageDirectory pd'))\" assumes refs: "vs_refs (ArchObj (PageDirectory pd')) = (vs_refs (ArchObj (PageDirectory pd)) - T) \ S" assumes old: "kheap s pdp = Some (ArchObj (PageDirectory pd))" @@ -1790,7 +1790,7 @@ lemma set_pd_vspace_objs_map: lemma simpler_set_pd_def: "set_pd p pd = (\s. if \pd. kheap s p = Some (ArchObj (PageDirectory pd)) - then ({((), s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\)}, + then ({((), s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\)}, False) else ({}, True))" apply (rule ext) @@ -1847,7 +1847,7 @@ lemma set_pd_valid_vs_lookup_map: apply (drule vs_lookup_pages_apI) apply (simp split: if_split_asm) apply (simp+)[2] - apply (frule_tac s="s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\" + apply (frule_tac s="s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\" in vs_lookup_pages_pdI[rotated -1]) apply (simp del: fun_upd_apply)+ apply (frule vs_lookup_pages_apI) @@ -2760,8 +2760,8 @@ lemma simpler_store_pde_def: "store_pde p pde s = (case kheap s (p && ~~ mask pd_bits) of Some (ArchObj (PageDirectory pd)) => - ({((), s\kheap := (kheap s((p && ~~ mask pd_bits) \ - (ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))))\)}, False) + ({((), s\kheap := (kheap s)(p && ~~ mask pd_bits \ + ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))\)}, False) | _ => ({}, True))" by (auto simp: store_pde_def simpler_set_pd_def get_object_def simpler_gets_def assert_def return_def fail_def set_object_def get_def put_def bind_def get_pd_def @@ -2770,7 +2770,7 @@ lemma simpler_store_pde_def: lemma pde_update_valid_vspace_objs: "[|valid_vspace_objs s; valid_pde pde s; pde_ref pde = None; kheap s (p && ~~ mask pd_bits) = Some (ArchObj (PageDirectory pd))|] ==> valid_vspace_objs - (s\kheap := kheap s(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))\)" + (s\kheap := (kheap s)(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))\)" apply (cut_tac pde=pde and p=p in store_pde_vspace_objs_unmap) apply (clarsimp simp: valid_def) apply (erule allE[where x=s]) @@ -3097,7 +3097,7 @@ lemma mapM_x_swp_store_empty_table': apply (induct slots, simp_all add: mapM_x_Nil mapM_x_Cons) apply wp apply (clarsimp simp: obj_at_def empty_table_def fun_eq_iff) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "\P\ f \Q\" for P f Q) apply (simp add: store_pte_def set_pt_def set_object_def) apply (wp get_object_wp) @@ -3613,7 +3613,7 @@ lemma find_pd_for_asid_lookup_slot [wp]: "\pspace_aligned and valid_vspace_objs\ find_pd_for_asid asid \\rv. \\ (lookup_pd_slot rv vptr && ~~ mask pd_bits)\, -" apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_R_conj) apply (rule find_pd_for_asid_lookup) apply (rule find_pd_for_asid_aligned_pd) @@ -3626,7 +3626,7 @@ lemma find_pd_for_asid_lookup_slot_large_page [wp]: find_pd_for_asid asid \\rv. \\ (x + lookup_pd_slot rv vptr && ~~ mask pd_bits)\, -" apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_R_conj) apply (rule hoare_vcg_R_conj) apply (rule find_pd_for_asid_inv [where P="K (x \ set [0, 4 .e. 0x3C] \ is_aligned vptr 24)", THEN valid_validE_R]) @@ -3640,7 +3640,7 @@ lemma find_pd_for_asid_pde_at_add [wp]: "\K (x \ set [0,4 .e. 0x3C] \ is_aligned vptr 24) and pspace_aligned and valid_vspace_objs\ find_pd_for_asid asid \\rv. pde_at (x + lookup_pd_slot rv vptr)\, -" apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_R_conj) apply (rule find_pd_for_asid_inv [where P= "K (x \ set [0, 4 .e. 0x3C] \ is_aligned vptr 24) and pspace_aligned", THEN valid_validE_R]) @@ -3717,7 +3717,7 @@ lemma lookup_pt_slot_cap_to1[wp]: "\invs and \\pd and K (is_aligned pd pd_bits) and K (vptr < kernel_base)\ lookup_pt_slot pd vptr \\rv s. \a b cap. caps_of_state s (a, b) = Some cap \ is_pt_cap cap \ rv && ~~ mask pt_bits \ obj_refs cap\,-" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_cap_to) apply auto done @@ -3731,7 +3731,7 @@ lemma lookup_pt_slot_cap_to_multiple1: (\a b. cte_wp_at (\c. is_pt_cap c \ cap_asid c \ None \ (\x. x && ~~ mask pt_bits) ` set [rv , rv + 4 .e. rv + 0x3C] \ obj_refs c) (a, b) s)\, -" apply (rule hoare_gen_asmE) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_cap_to) apply (rule conjI, clarsimp) apply (elim exEI) @@ -3759,7 +3759,7 @@ lemma lookup_pt_slot_cap_to_multiple[wp]: and K (is_aligned vptr 16)\ lookup_pt_slot pd vptr \\rv s. \a b. cte_wp_at (\c. (\x. x && ~~ mask pt_bits) ` (\x. x + rv) ` set [0 , 4 .e. 0x3C] \ obj_refs c) (a, b) s\, -" - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to_multiple1) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to_multiple1) apply (elim conjE exEI cte_wp_at_weakenE) apply (simp add: subset_eq p_0x3C_shift) done @@ -3794,7 +3794,7 @@ lemma find_pd_for_asid_cap_to: lemma find_pd_for_asid_cap_to1[wp]: "\invs\ find_pd_for_asid asid \\rv s. \a b cap. caps_of_state s (a, b) = Some cap \ lookup_pd_slot rv vptr && ~~ mask pd_bits \ obj_refs cap\, -" - apply (rule hoare_post_imp_R, rule find_pd_for_asid_cap_to) + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (clarsimp simp: lookup_pd_slot_pd) apply auto done @@ -3804,7 +3804,7 @@ lemma find_pd_for_asid_cap_to2[wp]: \\rv s. \a b. cte_wp_at (\cp. lookup_pd_slot rv vptr && ~~ mask pd_bits \ obj_refs cp \ is_pd_cap cp) (a, b) s\, -" - apply (rule hoare_post_imp_R, rule find_pd_for_asid_cap_to) + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (clarsimp simp: lookup_pd_slot_pd cte_wp_at_caps_of_state) apply auto done @@ -3812,7 +3812,7 @@ lemma find_pd_for_asid_cap_to2[wp]: lemma find_pd_for_asid_cap_to_multiple[wp]: "\invs and K (is_aligned vptr 24)\ find_pd_for_asid asid \\rv s. \x xa. cte_wp_at (\a. (\x. x && ~~ mask pd_bits) ` (\x. x + lookup_pd_slot rv vptr) ` set [0 , 4 .e. 0x3C] \ obj_refs a) (x, xa) s\, -" - apply (rule hoare_gen_asmE, rule hoare_post_imp_R, rule find_pd_for_asid_cap_to) + apply (rule hoare_gen_asmE, rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (elim exEI, clarsimp simp: cte_wp_at_caps_of_state) apply (simp add: lookup_pd_slot_add_eq) done @@ -3823,7 +3823,7 @@ lemma find_pd_for_asid_cap_to_multiple2[wp]: \\rv s. \x\set [0 , 4 .e. 0x3C]. \a b. cte_wp_at (\cp. x + lookup_pd_slot rv vptr && ~~ mask pd_bits \ obj_refs cp \ is_pd_cap cp) (a, b) s\, -" - apply (rule hoare_gen_asmE, rule hoare_post_imp_R, + apply (rule hoare_gen_asmE, rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (intro ballI, elim exEI, clarsimp simp: cte_wp_at_caps_of_state) @@ -3850,7 +3850,7 @@ lemma lookup_pt_slot_cap_to2: lookup_pt_slot pd vptr \\rv s. \oref cref cap. caps_of_state s (oref, cref) = Some cap \ rv && ~~ mask pt_bits \ obj_refs cap \ is_pt_cap cap\, -" - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to) apply fastforce done @@ -3860,7 +3860,7 @@ lemma lookup_pt_slot_cap_to_multiple2: \\rv s. \oref cref. cte_wp_at (\c. (\x. x && ~~ mask pt_bits) ` (\x. x + rv) ` set [0 , 4 .e. 0x3C] \ obj_refs c \ is_pt_cap c) (oref, cref) s\, -" - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to_multiple1) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to_multiple1) apply (clarsimp simp: upto_enum_step_def image_image field_simps linorder_not_le[symmetric] split: if_split_asm) @@ -4547,8 +4547,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid pdp pd s' defines "(s' :: ('a::state_ext) state) \ - s\kheap := kheap s(ap \ ArchObj (ASIDPool - (pool(asid \ pdp))))\" + s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid \ pdp))))\" assumes ap: "kheap s ap = Some (ArchObj (ASIDPool pool))" assumes new: "pool asid = None" assumes pd: "kheap s pdp = Some (ArchObj (PageDirectory pd))" diff --git a/proof/invariant-abstract/ARM/Machine_AI.thy b/proof/invariant-abstract/ARM/Machine_AI.thy index 83a992ab13..c850e25768 100644 --- a/proof/invariant-abstract/ARM/Machine_AI.thy +++ b/proof/invariant-abstract/ARM/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -83,13 +83,13 @@ lemma det_getRestartPC: "det getRestartPC" lemma det_setNextPC: "det (setNextPC p)" by (simp add: setNextPC_def det_setRegister) - +(* FIXME empty_fail: make all empty_fail [intro!, wp], and non-conditional ones [simp] *) lemma ef_loadWord: "empty_fail (loadWord x)" - by (simp add: loadWord_def) + by (fastforce simp: loadWord_def) lemma ef_storeWord: "empty_fail (storeWord x y)" - by (simp add: storeWord_def) + by (fastforce simp: storeWord_def) lemma no_fail_getRestartPC: "no_fail \ getRestartPC" @@ -271,8 +271,7 @@ lemma no_fail_invalidateCacheRange_I[simp, wp]: lemma no_fail_invalidateCacheRange_RAM[simp, wp]: "no_fail \ (invalidateCacheRange_RAM s e p)" apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def) - apply (rule no_fail_pre, wp no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb, simp) - apply (auto intro: hoare_post_taut) + apply (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb) done lemma no_fail_branchFlushRange[simp, wp]: @@ -318,7 +317,7 @@ lemma no_fail_getActiveIRQ[wp]: "no_fail \ (getActiveIRQ in_kernel)" apply (simp add: getActiveIRQ_def) apply (rule no_fail_pre) - apply (wp non_fail_select) + apply wp apply simp done @@ -327,7 +326,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -506,7 +505,7 @@ lemma no_irq_seq [wp]: "\ no_irq f; \x. no_irq (g x) \ \ no_irq (f >>= g)" apply (subst no_irq_def) apply clarsimp - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (wp|simp)+ done @@ -613,7 +612,7 @@ lemma no_irq_clearMemory: "no_irq (clearMemory a b)" lemma getActiveIRQ_le_maxIRQ': "\\s. \irq > maxIRQ. irq_masks s irq\ getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -623,14 +622,14 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_Some0xFF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply simp done lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done @@ -731,11 +730,12 @@ lemma empty_fail_cleanCacheRange_PoC[simp, intro!]: lemma empty_fail_cleanInvalidateCacheRange_RAM[simp, intro!]: "empty_fail (cleanInvalidateCacheRange_RAM s e p)" - by (simp add: cleanInvalidateCacheRange_RAM_def empty_fail_dsb empty_fail_cleanInvalidateL2Range empty_fail_cleanInvalByVA) + by (fastforce simp: cleanInvalidateCacheRange_RAM_def empty_fail_dsb + empty_fail_cleanInvalidateL2Range empty_fail_cleanInvalByVA) lemma empty_fail_cleanCacheRange_RAM[simp, intro!]: "empty_fail (cleanCacheRange_RAM s e p)" - by (simp add: cleanCacheRange_RAM_def empty_fail_dsb empty_fail_cleanL2Range) + by (fastforce simp: cleanCacheRange_RAM_def empty_fail_dsb empty_fail_cleanL2Range) lemma empty_fail_invalidateCacheRange_I[simp, intro!]: "empty_fail (invalidateCacheRange_I s e p)" @@ -743,8 +743,8 @@ lemma empty_fail_invalidateCacheRange_I[simp, intro!]: lemma empty_fail_invalidateCacheRange_RAM[simp, intro!]: "empty_fail (invalidateCacheRange_RAM s e p)" - by (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def - empty_fail_invalidateL2Range empty_fail_invalidateByVA empty_fail_dsb) + by (fastforce simp: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def + empty_fail_invalidateL2Range empty_fail_invalidateByVA empty_fail_dsb) lemma empty_fail_branchFlushRange[simp, intro!]: "empty_fail (branchFlushRange s e p)" @@ -752,16 +752,16 @@ lemma empty_fail_branchFlushRange[simp, intro!]: lemma empty_fail_cleanCaches_PoU[simp, intro!]: "empty_fail cleanCaches_PoU" - by (simp add: cleanCaches_PoU_def empty_fail_dsb empty_fail_clean_D_PoU empty_fail_invalidate_I_PoU) + by (fastforce simp: cleanCaches_PoU_def empty_fail_dsb empty_fail_clean_D_PoU empty_fail_invalidate_I_PoU) lemma empty_fail_cleanInvalidateL1Caches[simp, intro!]: "empty_fail cleanInvalidateL1Caches" - by (simp add: cleanInvalidateL1Caches_def empty_fail_dsb empty_fail_cleanInvalidate_D_PoC - empty_fail_invalidate_I_PoU) + by (fastforce simp: cleanInvalidateL1Caches_def empty_fail_dsb empty_fail_cleanInvalidate_D_PoC + empty_fail_invalidate_I_PoU) lemma empty_fail_clearMemory [simp, intro!]: "\a b. empty_fail (clearMemory a b)" - by (simp add: clearMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: clearMemory_def mapM_x_mapM ef_storeWord) end diff --git a/proof/invariant-abstract/ARM_HYP/ArchAInvsPre.thy b/proof/invariant-abstract/ARM_HYP/ArchAInvsPre.thy index 7c986bbed8..102cd05218 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchAInvsPre.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchAInvsPre.thy @@ -19,15 +19,6 @@ lemma get_pd_of_thread_reachable: split: Structures_A.kernel_object.splits if_split_asm option.splits cap.splits arch_cap.splits) -lemma is_aligned_ptrFromPAddrD: -"\is_aligned (ptrFromPAddr b) a; a \ 25\ \ is_aligned b a" - apply (clarsimp simp: ptrFromPAddr_def pptrBaseOffset_def pptrBase_def physBase_def) - apply (erule is_aligned_addD2) - apply (rule is_aligned_weaken[where x = 25]) - apply (simp add: is_aligned_def) - apply simp - done - lemma obj_bits_data_at: "data_at sz (ptrFromPAddr b) s \ obj_bits (the (kheap s (ptrFromPAddr b))) = pageBitsForSize sz" @@ -105,10 +96,6 @@ lemma device_frame_in_device_region: \ device_state (machine_state s) p \ None" by (auto simp add: pspace_respects_device_region_def dom_def device_mem_def) -lemma is_aligned_pptrBaseOffset: -"is_aligned pptrBaseOffset (pageBitsForSize sz)" - by (case_tac sz, simp_all add: pptrBaseOffset_def - pptrBase_def physBase_def is_aligned_def)[1] global_naming Arch named_theorems AInvsPre_asms diff --git a/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy index e7b944c711..46e77e3e86 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy @@ -376,13 +376,13 @@ lemma pde_at_aligned_vptr: (* ARMHYP *) (* 0x3C \ 0x78?, 24 \> pageBits + pt_bits - pte_bits << pde_bits)) && ~~ mask pd_bits)") + apply (prop_tac "pd = (x + (pd + (vptr >> pageBits + pt_bits - pte_bits << pde_bits)) && ~~ mask pd_bits)") subgoal apply (subst mask_lower_twice[symmetric, where n=7]) apply (simp add: pd_bits_def pageBits_def) @@ -561,7 +561,7 @@ lemma lookup_pt_slot_ptes_aligned_valid: (* ARMHYP *) apply (frule (2) valid_vspace_objsD) apply clarsimp subgoal for s _ _ x - apply (prove "page_table_at (ptrFromPAddr x) s") + apply (prop_tac "page_table_at (ptrFromPAddr x) s") subgoal by (spec "(ucast (pd + (vptr >> (pageBits + pt_bits - pte_bits) << pde_bits) && mask pd_bits >> pde_bits))";clarsimp) apply (rule conjI) @@ -702,7 +702,7 @@ lemma create_mapping_entries_valid [wp]: apply (erule (1) page_directory_pde_at_lookupI) apply (wpsimp simp: valid_mapping_entries_def superSectionPDE_offsets_def vspace_bits_defs lookup_pd_slot_def) - apply (prove "is_aligned pd 14") + apply (prop_tac "is_aligned pd 14") apply (clarsimp simp: obj_at_def add.commute invs_def valid_state_def valid_pspace_def pspace_aligned_def) apply (drule bspec, blast) apply (clarsimp simp: a_type_def vspace_bits_defs split: kernel_object.splits arch_kernel_obj.splits if_split_asm) @@ -1259,7 +1259,7 @@ lemma valid_objs_caps: lemma simpler_set_pt_def: "set_pt p pt = (\s. if \pt. kheap s p = Some (ArchObj (PageTable pt)) then - ({((), s\kheap := kheap s(p \ ArchObj (PageTable pt))\)}, False) + ({((), s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)}, False) else ({}, True))" apply (rule ext) apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def @@ -1275,7 +1275,7 @@ lemma simpler_set_pt_def: lemma valid_set_ptI: "(!!s opt. \P s; kheap s p = Some (ArchObj (PageTable opt))\ - \ Q () (s\kheap := kheap s(p \ ArchObj (PageTable pt))\)) + \ Q () (s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)) \ \P\ set_pt p pt \Q\" by (rule validI) (clarsimp simp: simpler_set_pt_def split: if_split_asm) @@ -1340,14 +1340,14 @@ lemma set_pt_valid_vspace_objs[wp]: apply (clarsimp simp: valid_vspace_objs_def) subgoal for s opt pa rs ao apply (spec pa) - apply (prove "(\\ pa) s") + apply (prop_tac "(\\ pa) s") apply (rule exI[where x=rs]) apply (erule vs_lookupE) apply clarsimp apply (erule vs_lookupI) apply (erule rtrancl.induct, simp) subgoal for \ b c - apply (prove "(b \1 c) s") + apply (prop_tac "(b \1 c) s") apply (thin_tac "_ : rtrancl _")+ apply (clarsimp simp add: vs_lookup1_def obj_at_def vs_refs_def split: if_split_asm) @@ -1582,7 +1582,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1590,7 +1590,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1628,7 +1628,7 @@ lemma valid_machine_state_heap_updI: assumes vm : "valid_machine_state s" assumes tyat : "typ_at type p s" shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + " a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1933,7 +1933,7 @@ lemma set_asid_pool_vspace_objs_unmap': lemma valid_vspace_obj_same_type: "\valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -2212,8 +2212,8 @@ lemma lookup_pt_slot_looks_up [wp]: (* ARMHYP *) apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual) apply (rule exI, rule conjI, assumption) subgoal for s _ x - apply (prove "ptrFromPAddr x + ((vptr >> 12) && 0x1FF << 3) && ~~ mask pt_bits = ptrFromPAddr x") - apply (prove "is_aligned (ptrFromPAddr x) 12") + apply (prop_tac "ptrFromPAddr x + ((vptr >> 12) && 0x1FF << 3) && ~~ mask pt_bits = ptrFromPAddr x") + apply (prop_tac "is_aligned (ptrFromPAddr x) 12") apply (drule (2) valid_vspace_objsD) apply clarsimp apply (erule_tac x="ucast (vptr >> pageBits + pt_bits - pte_bits << pde_bits >> pde_bits)" in allE) @@ -2248,7 +2248,7 @@ lemma lookup_pt_slot_reachable [wp]: (* ARMHYP *) apply (simp add: pred_conj_def ex_simps [symmetric] del: ex_simps) apply (rule hoare_vcg_ex_lift_R1) apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_looks_up) prefer 2 apply clarsimp @@ -2548,39 +2548,6 @@ lemma create_mapping_entries_valid_slots [wp]: (* ARMHYP *) apply (fastforce intro!: aligned_add_aligned is_aligned_shiftl_self) done -lemma is_aligned_addrFromPPtr_n: - "\ is_aligned p n; n \ 28 \ \ is_aligned (Platform.ARM_HYP.addrFromPPtr p) n" - apply (simp add: Platform.ARM_HYP.addrFromPPtr_def) - apply (erule aligned_sub_aligned, simp_all) - apply (simp add: pptrBaseOffset_def physBase_def - pptrBase_def pageBits_def) - apply (erule is_aligned_weaken[rotated]) - apply (simp add: is_aligned_def) - done - -lemma is_aligned_addrFromPPtr: - "is_aligned p pageBits \ is_aligned (Platform.ARM_HYP.addrFromPPtr p) pageBits" - by (simp add: is_aligned_addrFromPPtr_n pageBits_def) - -lemma is_aligned_ptrFromPAddr_n: - "\is_aligned x sz; sz\ 28\ - \ is_aligned (ptrFromPAddr x) sz" - apply (simp add:ptrFromPAddr_def pptrBaseOffset_def - pptrBase_def physBase_def) - apply (erule aligned_add_aligned) - apply (erule is_aligned_weaken[rotated]) - apply (simp add:is_aligned_def) - apply (simp add:word_bits_def) - done - -lemma is_aligned_ptrFromPAddr: - "is_aligned p pageBits \ is_aligned (ptrFromPAddr p) pageBits" - by (simp add: is_aligned_ptrFromPAddr_n pageBits_def) - -lemma pbfs_le_28[simp]: - "pageBitsForSize sz \ 28" - by (cases sz; simp) - lemma store_pde_lookup_pd: (* ARMHYP *) "\\\ pd and page_directory_at pd and valid_vspace_objs and (\s. valid_asid_table (arm_asid_table (arch_state s)) s)\ @@ -3167,7 +3134,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "\\ms. P (device_state ms)\ machine_op_lift f \\_ ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonad.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy index 77e05d1db8..6c893e7618 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -79,7 +80,7 @@ lemma check_vp_wpR [wp]: check_vp_alignment sz w \P\, -" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: vmsz_aligned_def) done @@ -87,7 +88,7 @@ lemma check_vp_wpR [wp]: lemma check_vp_inv: "\P\ check_vp_alignment sz w \\_. P\" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply simp done @@ -294,7 +295,7 @@ locale asid_update = Arch + fixes ap asid s s' assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s" assumes empty: "arm_asid_table (arch_state s) asid = None" - defines "s' \ s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\" + defines "s' \ s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\" context asid_update begin @@ -418,7 +419,7 @@ context Arch begin global_naming ARM_HYP lemma valid_arch_state_strg: "valid_arch_state s \ ap \ ran (arm_asid_table (arch_state s)) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def split: option.split) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI) @@ -432,7 +433,7 @@ lemma valid_vs_lookup_at_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ ap \ obj_refs cap \ vs_cap_ref cap = Some [VSRef (ucast asid) None]) \ - valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -505,7 +506,7 @@ lemma valid_table_caps_asid_upd [iff]: lemma vs_asid_ref_upd: "([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') - (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\) + (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\) = (if asid_high_bits_of asid' = asid_high_bits_of asid then ap' = ap else ([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') s)" @@ -531,7 +532,7 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := (arm_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg) @@ -543,7 +544,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp set_cap_empty_tables[simplified second_level_tables_def, simplified])+ + get_cap_wp hoare_weak_lift_imp set_cap_empty_tables[simplified second_level_tables_def, simplified])+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -564,7 +565,7 @@ lemma valid_asid_map_asid_upd_strg: "valid_asid_map s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ arm_asid_table (arch_state s) asid = None \ - valid_asid_map (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_asid_map (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -577,7 +578,7 @@ lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ arm_asid_table (arch_state s) asid = None \ - valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -603,7 +604,7 @@ lemma cap_insert_ap_invs: arm_asid_table (arch_state s) (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := ((arm_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg @@ -754,11 +755,11 @@ proof - \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -868,10 +869,11 @@ proof - qed -lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified] +lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_TrueI, OF refl refl refl TrueI TrueI TrueI,simplified] lemma obj_at_upd2: - "obj_at P t' (s\kheap := kheap s(t \ v, x \ v')\) = (if t' = x then P v' else obj_at P t' (s\kheap := kheap s(t \ v)\))" + "obj_at P t' (s\kheap := (kheap s)(t \ v, x \ v')\) = + (if t' = x then P v' else obj_at P t' (s\kheap := (kheap s)(t \ v)\))" by (simp add: obj_at_update obj_at_def) lemma vcpu_invalidate_active_hyp_refs_empty[wp]: @@ -941,7 +943,7 @@ lemma ex_nonz_cap_to_vcpu_udpate[simp]: by (simp add: ex_nonz_cap_to_def) lemma caps_of_state_VCPU_update: - "vcpu_at a s \ caps_of_state (s\kheap := kheap s(a \ ArchObj (VCPU b))\) = caps_of_state s" + "vcpu_at a s \ caps_of_state (s\kheap := (kheap s)(a \ ArchObj (VCPU b))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def) lemma set_vcpu_ex_nonz_cap_to[wp]: @@ -951,7 +953,7 @@ lemma set_vcpu_ex_nonz_cap_to[wp]: done lemma caps_of_state_tcb_arch_update: - "ko_at (TCB y) t' s \ caps_of_state (s\kheap := kheap s(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" + "ko_at (TCB y) t' s \ caps_of_state (s\kheap := (kheap s)(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def tcb_cap_cases_def) lemma arch_thread_set_ex_nonz_cap_to[wp]: @@ -1055,8 +1057,8 @@ lemma is_irq_active_sp: lemma restore_virt_timer_valid_irq_states[wp]: "restore_virt_timer vcpu_ptr \valid_irq_states\" apply (clarsimp simp: restore_virt_timer_def is_irq_active_def liftM_def) - apply (repeat_unless \rule hoare_seq_ext[OF _ is_irq_active_sp]\ - \rule hoare_seq_ext_skip, + apply (repeat_unless \rule bind_wp[OF _ is_irq_active_sp]\ + \rule bind_wp_fwd_skip, wpsimp wp: dmo_valid_irq_states simp: isb_def setHCR_def set_cntv_cval_64_def read_cntpct_def set_cntv_off_64_def\) @@ -1144,7 +1146,7 @@ lemma vcpu_enable_valid_machine_state[wp]: crunches vcpu_restore, vcpu_save for valid_machine_state[wp]: valid_machine_state - (wp: mapM_wp_inv simp: do_machine_op_bind dom_mapM ignore: do_machine_op) + (wp: mapM_wp_inv simp: do_machine_op_bind dom_mapM empty_fail_cond ignore: do_machine_op) crunches associate_vcpu_tcb for valid_machine_state[wp]: valid_machine_state @@ -1280,7 +1282,7 @@ crunch inv[wp]: ensure_safe_mapping, create_mapping_entries "P" crunch_ignore (add: select_ext) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) lemma create_mappings_empty [wp]: @@ -1339,13 +1341,13 @@ lemma create_mapping_entries_parent_for_refs: pte_bits_def pde_bits_def) apply (rule hoare_pre) apply wp - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to) apply (elim exEI) apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def) apply simp apply (rule hoare_pre) apply wp - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_cap_to_multiple1) apply (elim conjE exEI cte_wp_at_weakenE) apply (clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def @@ -1387,7 +1389,7 @@ lemma find_pd_for_asid_shifting_voodoo: "\pspace_aligned and valid_vspace_objs\ find_pd_for_asid asid \\rv s. v >> 21 = rv + (v >> 21 << 3) && mask pd_bits >> 3\,-" - apply (rule hoare_post_imp_R, + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_aligned_pd, simp add: vspace_bits_defs) apply (subst pd_shifting_dual[simplified vspace_bits_defs, simplified], simp) apply (rule word_eqI) @@ -1406,7 +1408,7 @@ lemma find_pd_for_asid_ref_offset_voodoo: \\rv. (ref \ (rv + (v >> 21 << 3) && ~~ mask pd_bits))\,-" apply (rule hoare_gen_asmE) apply (rule_tac Q'="\rv s. is_aligned rv 14 \ (ref \ rv) s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: ucast_ucast_mask mask_asid_low_bits_ucast_ucast) apply (fold asid_low_bits_def) @@ -1553,7 +1555,7 @@ lemma create_mapping_entries_same_refs_ex: lemma find_pd_for_asid_lookup_pd_wp: "\ \s. valid_vspace_objs s \ (\pd. vspace_at_asid asid pd s \ page_directory_at pd s \ (\\ pd) s \ Q pd s) \ find_pd_for_asid asid \ Q \, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_page_directory]) apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_lookup, simplified]) apply (rule hoare_vcg_conj_lift_R[OF find_pd_for_asid_pd_at_asid, simplified]) @@ -1580,7 +1582,7 @@ lemma find_pd_for_asid_pde_unfolded[wp]: "\valid_vspace_objs and pspace_aligned\ find_pd_for_asid asid \\pd. pde_at (pd + (vptr >> 21 << 3))\, -" - apply (rule hoare_post_imp_R, rule find_pd_for_asid_pde) + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_pde) apply (simp add: pageBits_def pt_bits_def pde_bits_def) done @@ -1595,7 +1597,7 @@ lemma arch_decode_inv_wf[wp]: apply (rename_tac word1 word2) apply (simp add: arch_decode_invocation_def Let_def decode_mmu_invocation_def split_def cong: if_cong) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp| + apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_ext_weak_wp| wpc| simp add: valid_arch_inv_def valid_apinv_def)+)[1] apply (simp add: if_apply_def2 valid_apinv_def) @@ -1646,7 +1648,7 @@ lemma arch_decode_inv_wf[wp]: (\s. descendants_of (snd (excaps!0)) (cdt s) = {}) and cte_wp_at (\c. \idx. c = (cap.UntypedCap False frame pageBits idx)) (snd (excaps!0)) and (\s. arm_asid_table (arch_state s) free = None)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def) @@ -1656,7 +1658,7 @@ lemma arch_decode_inv_wf[wp]: apply (simp add: asid_bits_def asid_low_bits_def) apply (simp add: asid_bits_def) apply simp - apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp|wpc | simp)+ + apply (wp ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp|wpc | simp)+ apply clarsimp apply (rule conjI, fastforce) apply (cases excaps, simp) @@ -1711,7 +1713,7 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPageFlushLabel (invocation_type label)") apply simp apply (rule hoare_pre) - apply (wp whenE_throwError_wp static_imp_wp hoare_drop_imps) + apply (wp whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imps) apply (simp add: valid_arch_inv_def valid_page_inv_def) apply (wp find_pd_for_asid_pd_at_asid | wpc)+ apply (clarsimp simp: valid_cap_def mask_def) @@ -1727,7 +1729,7 @@ lemma arch_decode_inv_wf[wp]: cong: if_cong) apply (rename_tac word option) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR get_master_pde_wp hoare_vcg_all_lift_R + apply ((wp whenE_throwError_wp check_vp_wpR get_master_pde_wp hoare_vcg_all_liftE_R | wpc | simp add: valid_arch_inv_def valid_pti_def unlessE_whenE vs_cap_ref_def split: if_split @@ -1736,14 +1738,14 @@ lemma arch_decode_inv_wf[wp]: apply (rule_tac Q'="\a b. ko_at (ArchObj (PageDirectory pd)) (a + (args ! 0 >> 21 << 3) && ~~ mask pd_bits) b \ pd (ucast (a + (args ! 0 >> 21 << 3) && mask pd_bits >> 3)) = - InvalidPDE \ L word option p pd a b" for L in hoare_post_imp_R[rotated]) + InvalidPDE \ L word option p pd a b" for L in hoare_strengthen_postE_R[rotated]) apply (intro impI) apply (erule impE) apply (clarsimp simp: pageBits_def pde_bits_def pt_bits_def) apply (erule impE) apply (clarsimp simp: pageBits_def pde_bits_def pt_bits_def split:pde.splits) apply assumption - apply ((wp whenE_throwError_wp hoare_vcg_all_lift_R + apply ((wp whenE_throwError_wp hoare_vcg_all_liftE_R find_pd_for_asid_lookup_slot [unfolded lookup_pd_slot_def Let_def] find_pd_for_asid_ref_offset_voodoo find_pd_for_asid_shifting_voodoo find_pd_for_asid_inv @@ -1794,11 +1796,11 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPDFlushLabel (invocation_type label)") apply simp apply (rule hoare_pre) - apply (wp whenE_throwError_wp static_imp_wp hoare_drop_imp | wpc | simp)+ + apply (wp whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imp | wpc | simp)+ apply (simp add: resolve_vaddr_def) apply (wp get_master_pte_wp get_master_pde_wp whenE_throwError_wp | wpc | simp)+ apply (clarsimp simp: valid_arch_inv_def valid_pdi_def)+ - apply (rule_tac Q'="\pd' s. vspace_at_asid x2 pd' s \ x2 \ mask asid_bits \ x2 \ 0" in hoare_post_imp_R) + apply (rule_tac Q'="\pd' s. vspace_at_asid x2 pd' s \ x2 \ mask asid_bits \ x2 \ 0" in hoare_strengthen_postE_R) apply wp apply clarsimp apply (wp | wpc)+ diff --git a/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy index ffe0163e0b..8a95e71fe3 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy @@ -556,7 +556,7 @@ context Arch begin global_naming ARM_HYP lemma post_cap_delete_pre_is_final_cap': "\rv s'' rva s''a s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def arch_cap_cleanup_opt_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -633,7 +633,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) @@ -801,7 +801,7 @@ qed lemmas rec_del_invs'[CNodeInv_AI_assms] = rec_del_invs'' [where Q=\, - simplified hoare_post_taut pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] + simplified hoare_TrueI pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] end diff --git a/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy index d7eff35935..117d79537e 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy @@ -171,7 +171,7 @@ lemma valid_arch_mdb_simple: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update: @@ -195,34 +195,34 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by auto lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by auto lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by auto lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True r))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True r))" by auto lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -255,10 +255,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by auto diff --git a/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy index 211db78a86..b0d3ab04b2 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy @@ -183,20 +183,20 @@ lemma is_derived_is_cap: (* FIXME: move to CSpace_I near lemma vs_lookup1_tcb_update *) lemma vs_lookup_pages1_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) (* FIXME: move to CSpace_I near lemma vs_lookup_tcb_update *) lemma vs_lookup_pages_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages s" by (clarsimp simp add: vs_lookup_pages_def vs_lookup_pages1_tcb_update) (* FIXME: move to CSpace_I near lemma vs_lookup1_cnode_update *) lemma vs_lookup_pages1_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ CNode m cs')\) = + vs_lookup_pages1 (s\kheap := (kheap s)(p \ CNode m cs')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) @@ -204,7 +204,7 @@ lemma vs_lookup_pages1_cnode_update: (* FIXME: move to CSpace_I near lemma vs_lookup_cnode_update *) lemma vs_lookup_pages_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages (s\kheap := kheap s(p \ CNode n cs')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ CNode n cs')\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def dest!: vs_lookup_pages1_cnode_update[where m=n and cs'=cs']) diff --git a/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy index 19db55399a..35d493914d 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -16,7 +17,7 @@ crunches init_arch_objects for exst[wp]: "\s. P (exst s)" and ct[wp]: "\s. P (cur_thread s)" and valid_etcbs[wp, DetSchedAux_AI_assms]: valid_etcbs - (wp: crunch_wps hoare_unless_wp) + (wp: crunch_wps unless_wp) crunch ct[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (cur_thread s)" (wp: crunch_wps dxo_wp_weak preemption_point_inv mapME_x_inv_wp @@ -101,9 +102,9 @@ crunch ct[wp]: perform_asid_control_invocation "\s. P (cur_thread s)" crunch idle_thread[wp]: perform_asid_control_invocation "\s. P (idle_thread s)" -crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: static_imp_wp) +crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: hoare_weak_lift_imp) -crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: static_imp_wp) +crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: hoare_weak_lift_imp) crunch schedact[wp]: perform_asid_control_invocation "\s :: det_ext state. P (scheduler_action s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) diff --git a/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy index 72134094e0..c81bdd0718 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy @@ -193,7 +193,7 @@ lemma set_vm_root_valid_blocked_ct_in_q[wp]: lemma arch_switch_to_thread_valid_blocked[wp, DetSchedSchedule_AI_assms]: "\valid_blocked and ct_in_q\ arch_switch_to_thread thread \\_. valid_blocked and ct_in_q\" apply (simp add: arch_switch_to_thread_def) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ apply (rule do_machine_op_valid_blocked) apply wpsimp+ done @@ -289,17 +289,17 @@ lemma arch_thread_set_valid_sched[wp]: crunch ct_not_in_q [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete ct_not_in_q - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete valid_etcbs - (wp: hoare_drop_imps hoare_unless_wp select_inv mapM_x_wp mapM_wp subset_refl + (wp: hoare_drop_imps unless_wp select_inv mapM_x_wp mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: set_object thread_set) crunch simple_sched_action [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete simple_sched_action - (wp: hoare_drop_imps mapM_x_wp mapM_wp select_wp subset_refl + (wp: hoare_drop_imps mapM_x_wp mapM_wp subset_refl simp: unless_def if_fun_split) crunches arch_finalise_cap, prepare_thread_delete, arch_invoke_irq_handler diff --git a/proof/invariant-abstract/ARM_HYP/ArchDeterministic_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchDeterministic_AI.thy index 203715a020..bae57561bb 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchDeterministic_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchDeterministic_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -46,7 +47,7 @@ crunch valid_list[wp]: invalidate_tlb_by_asid valid_list ignore: without_preemption filterM ) crunch valid_list[wp]: invoke_untyped valid_list - (wp: crunch_wps preemption_point_inv' hoare_unless_wp mapME_x_wp' + (wp: crunch_wps preemption_point_inv' unless_wp mapME_x_wp' simp: mapM_x_def_bak crunch_simps) crunch valid_list[wp]: invoke_irq_control valid_list diff --git a/proof/invariant-abstract/ARM_HYP/ArchDetype_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchDetype_AI.thy index d3230cce06..e1bd9515bf 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchDetype_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchDetype_AI.thy @@ -93,7 +93,7 @@ next qed lemma empty_fail_freeMemory [Detype_AI_asms]: "empty_fail (freeMemory ptr bits)" - by (simp add: freeMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: freeMemory_def mapM_x_mapM ef_storeWord) lemma region_in_kernel_window_detype[simp]: diff --git a/proof/invariant-abstract/ARM_HYP/ArchEmptyFail_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchEmptyFail_AI.thy index cb3b1c8715..512664ff72 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchEmptyFail_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchEmptyFail_AI.thy @@ -37,7 +37,8 @@ context Arch begin global_naming ARM_HYP crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: handle_fault (simp: kernel_object.splits option.splits arch_cap.splits cap.splits endpoint.splits bool.splits list.splits thread_state.splits split_def catch_def sum.splits - Let_def wp: zipWithM_x_empty_fail empty_fail_addressTranslateS1) + Let_def + wp: empty_fail_addressTranslateS1) crunch (empty_fail) empty_fail[wp]: decode_tcb_configure, decode_bind_notification, decode_unbind_notification, @@ -65,11 +66,13 @@ lemma arch_decode_ARMASIDControlMakePool_empty_fail: prefer 2 apply (simp add: isPageFlushLabel_def isPDFlushLabel_def split: arch_cap.splits)+ apply (simp add: split_def) - apply wp - apply simp + apply (wp (once), simp) apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def bind_def return_def returnOk_def lift_def liftE_def fail_def gets_def get_def assert_def select_def split: if_split_asm) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def bind_def + return_def returnOk_def lift_def liftE_def fail_def gets_def get_def + assert_def select_def + split: if_split_asm) apply (simp add: Let_def split: cap.splits arch_cap.splits option.splits bool.splits | wp | intro conjI impI allI)+ done (* needs tidying up *) @@ -89,9 +92,9 @@ lemma arch_decode_ARMASIDPoolAssign_empty_fail: apply ((simp | wp)+)[1] apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_def bindE_def - bind_def return_def returnOk_def lift_def liftE_def select_ext_def - gets_def get_def assert_def fail_def) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def select_ext_def + gets_def get_def assert_def fail_def) apply wp+ done diff --git a/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy index 46e90e275c..c59dd06e05 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -462,7 +463,7 @@ lemma arch_thread_set_cur_tcb[wp]: "\cur_tcb\ arch_thread_set p lemma cte_wp_at_update_some_tcb: "\kheap s v = Some (TCB tcb) ; tcb_cnode_map tcb = tcb_cnode_map (f tcb)\ - \ cte_wp_at P p (s\kheap := kheap s (v \ TCB (f tcb))\) = cte_wp_at P p s" + \ cte_wp_at P p (s\kheap := (kheap s)(v \ TCB (f tcb))\) = cte_wp_at P p s" apply (clarsimp simp: cte_wp_at_cases2 dest!: get_tcb_SomeD) done @@ -657,7 +658,7 @@ lemma arch_thread_set_valid_objs_vcpu_Some[wp]: lemma sym_refs_update_some_tcb: "\kheap s v = Some (TCB tcb) ; refs_of (TCB tcb) = refs_of (TCB (f tcb))\ - \ sym_refs (state_refs_of (s\kheap := kheap s (v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" + \ sym_refs (state_refs_of (s\kheap := (kheap s)(v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" apply (rule_tac f=sym_refs in arg_cong) apply (rule all_ext) apply (clarsimp simp: sym_refs_def state_refs_of_def) @@ -705,7 +706,7 @@ lemma vcpu_invalidate_tcbs_inv[wp]: lemma sym_refs_vcpu_None: assumes sym_refs: "sym_refs (state_hyp_refs_of s)" assumes tcb: "ko_at (TCB tcb) t s" "tcb_vcpu (tcb_arch tcb) = Some vr" - shows "sym_refs (state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), + shows "sym_refs (state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), vr \ ArchObj (VCPU (vcpu_tcb_update Map.empty v)))\))" (is "sym_refs (state_hyp_refs_of ?s')") proof - @@ -1397,7 +1398,7 @@ crunches (wp: crunch_wps subset_refl) crunch irq_node[Finalise_AI_asms,wp]: prepare_thread_delete "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_node[wp]: arch_finalise_cap "\s. P (interrupt_irq_node s)" (simp: crunch_simps wp: crunch_wps) @@ -1470,7 +1471,7 @@ lemma flush_table_empty: flush_table ac aa b word \\rv s. obj_at (empty_table {}) word s\" apply (clarsimp simp: flush_table_def set_vm_root_def) - apply (wp do_machine_op_obj_at arm_context_switch_P_obj_at hoare_whenE_wp hoare_drop_imp + apply (wp do_machine_op_obj_at arm_context_switch_P_obj_at whenE_wp hoare_drop_imp | wpc | simp | wps)+ @@ -1488,7 +1489,7 @@ lemma flush_table_empty: s)) s" and Q'="\_ s. obj_at (empty_table {}) word s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply simp apply (wp find_pd_for_asid_inv mapM_wp | simp @@ -1653,7 +1654,7 @@ lemma set_vm_root_empty[wp]: done crunch obj_at[wp]: invalidate_tlb_by_asid "\s. P' (obj_at P p s)" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) lemma set_asid_pool_empty[wp]: "\obj_at (empty_table {}) word\ set_asid_pool x2 pool' \\xb. obj_at (empty_table {}) word\" @@ -1878,7 +1879,7 @@ lemma arch_finalise_case_no_lookup: | simp add: vs_cap_ref_simps vs_lookup_pages_eq_at[THEN fun_cong, symmetric] vs_lookup_pages_eq_ap[THEN fun_cong, symmetric])+ - apply (wp hoare_vcg_all_lift unmap_page_unmapped static_imp_wp) + apply (wp hoare_vcg_all_lift unmap_page_unmapped hoare_weak_lift_imp) apply (wpc|wp unmap_page_table_unmapped3 delete_asid_unmapped |simp add:vs_cap_ref_def vs_lookup_pages_eq_at[THEN fun_cong,symmetric] @@ -2108,7 +2109,7 @@ lemma set_asid_pool_obj_at_ptr: lemma valid_arch_state_table_strg: "valid_arch_state s \ asid_pool_at p s \ Some p \ arm_asid_table (arch_state s) ` (dom (arm_asid_table (arch_state s)) - {x}) \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\)" + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\)" apply (clarsimp simp: valid_arch_state_def valid_asid_table_def ran_def split: option.split) apply (rule conjI; clarsimp) apply (rule conjI, fastforce) @@ -2141,8 +2142,8 @@ lemma vs_lookup1_arch [simp]: lemma vs_lookup_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (erule vs_lookupE) apply clarsimp @@ -2174,8 +2175,8 @@ lemma vs_lookup_empty_table: lemma vs_lookup_pages_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (subst (asm) vs_lookup_pages_def) apply (clarsimp simp: Image_def) @@ -2210,7 +2211,7 @@ lemma set_asid_pool_empty_table_objs: set_asid_pool p Map.empty \\rv s. valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of word2 \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of word2 \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vspace_objs_def @@ -2235,7 +2236,7 @@ lemma set_asid_pool_empty_table_lookup: set_asid_pool p Map.empty \\rv s. valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vs_lookup_def @@ -2257,7 +2258,7 @@ lemma set_asid_pool_empty_valid_asid_map: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\rv s. valid_asid_map (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: valid_asid_map_def vspace_at_asid_def @@ -2289,7 +2290,7 @@ lemma set_asid_pool_invs_table: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\x s. invs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def) apply (rule hoare_pre) apply (wp valid_irq_node_typ set_asid_pool_typ_at @@ -2345,7 +2346,7 @@ lemma page_table_mapped_wp_weak: apply (simp add: page_table_mapped_def) apply (rule hoare_pre) apply (wp get_pde_wp | wpc)+ - apply (rule_tac Q'="\_. ?P" in hoare_post_imp_R) + apply (rule_tac Q'="\_. ?P" in hoare_strengthen_postE_R) apply wp apply clarsimp apply simp diff --git a/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy index 5e067fcf25..c8b7c5586a 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -47,7 +48,7 @@ lemma iarch_tcb_context_set[simp]: lemma iarch_tcb_set_registers[simp]: "arch_tcb_to_iarch_tcb (arch_tcb_set_registers regs arch_tcb) = arch_tcb_to_iarch_tcb arch_tcb" - by (simp add: arch_tcb_set_registers_def) + by (simp add: arch_tcb_to_iarch_tcb_def arch_tcb_set_registers_def) lemmas vspace_bits_defs = pd_bits_def pde_bits_def pt_bits_def pte_bits_def pageBits_def @@ -2575,6 +2576,74 @@ qed lemmas pte_ref_pages_simps[simp] = pte_ref_pages_def[split_simps pte.split] lemmas pde_ref_pages_simps[simp] = pde_ref_pages_def[split_simps pde.split] +(* sanity check Arch_Kernel_Config_Lemmas version and shadow original name *) +lemma physBase_aligned[simplified pageBitsForSize_simps]: + "is_aligned physBase (pageBitsForSize ARMSuperSection)" + by simp (rule physBase_aligned) + +lemma pptrBase_aligned[simplified pageBitsForSize_simps]: + "is_aligned pptrBase (pageBitsForSize ARMSuperSection)" + by (simp add: is_aligned_def pptrBase_def) + +lemma pptrBaseOffset_aligned[simplified pageBitsForSize_simps]: + "is_aligned pptrBaseOffset (pageBitsForSize ARMSuperSection)" + by (auto simp: pptrBaseOffset_def physBase_aligned pptrBase_aligned + elim: is_aligned_weaken intro: aligned_sub_aligned) + +lemma pageBitsForSize_limit[simplified pageBitsForSize_simps, simp]: + "pageBitsForSize sz \ pageBitsForSize ARMSuperSection" + by (cases sz; simp) + +lemma is_aligned_pptrBaseOffset[simplified pageBitsForSize_simps]: + "is_aligned pptrBaseOffset (pageBitsForSize sz)" + by (cases sz; clarsimp intro!: is_aligned_weaken[OF pptrBaseOffset_aligned]) + +lemma is_aligned_addrFromPPtr_n[simplified pageBitsForSize_simps]: + "\ is_aligned p n; n \ (pageBitsForSize ARMSuperSection) \ + \ is_aligned (Platform.ARM_HYP.addrFromPPtr p) n" + by (auto simp: addrFromPPtr_def + elim!: aligned_sub_aligned + intro: is_aligned_weaken pptrBaseOffset_aligned) + +lemma is_aligned_addrFromPPtr: + "is_aligned p pageBits \ is_aligned (Platform.ARM_HYP.addrFromPPtr p) pageBits" + by (simp add: is_aligned_addrFromPPtr_n pageBits_def) + +lemma is_aligned_ptrFromPAddr_n[simplified pageBitsForSize_simps]: + "\ is_aligned p n; n \ (pageBitsForSize ARMSuperSection)\ + \ is_aligned (ptrFromPAddr p) n" + by (auto simp: ptrFromPAddr_def + elim!: aligned_add_aligned + intro: is_aligned_weaken pptrBaseOffset_aligned) + +lemma is_aligned_ptrFromPAddr: + "is_aligned p pageBits \ is_aligned (ptrFromPAddr p) pageBits" + by (simp add: is_aligned_ptrFromPAddr_n pageBits_def) + +lemma is_aligned_ptrFromPAddrD[simplified pageBitsForSize_simps]: + "\ is_aligned (ptrFromPAddr b) a; a \ (pageBitsForSize ARMSuperSection)\ + \ is_aligned b a" + by (simp add: ptrFromPAddr_def) + (erule is_aligned_addD2, erule is_aligned_weaken[OF pptrBaseOffset_aligned]) + +lemma addrFromPPtr_mask[simplified ARM_HYP.pageBitsForSize_simps]: + "n \ pageBitsForSize ARMSuperSection + \ addrFromPPtr ptr && mask n = ptr && mask n" + apply (simp add: addrFromPPtr_def) + apply (prop_tac "pptrBaseOffset AND mask n = 0") + apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) + apply (simp flip: mask_eqs(8)) + done + +lemma ptrFromPAddr_mask[simplified ARM_HYP.pageBitsForSize_simps]: + "n \ pageBitsForSize ARMSuperSection + \ ptrFromPAddr ptr && mask n = ptr && mask n" + apply (simp add: ptrFromPAddr_def) + apply (prop_tac "pptrBaseOffset AND mask n = 0") + apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) + apply (simp flip: mask_eqs(7)) + done + end declare ARM_HYP.arch_tcb_context_absorbs[simp] diff --git a/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy index da5902ddaf..c3206fe5a9 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy @@ -321,7 +321,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -495,7 +495,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (simp only: ball_conj_distrib[where P="\x. real_cte_at x s" for s]) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ apply (simp split: kernel_object.split_asm) @@ -521,7 +521,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by auto end diff --git a/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy index 587289b949..f320a301cc 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy @@ -674,20 +674,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def ARM_HYP.hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def ARM_HYP.hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def split: option.splits) @@ -695,7 +695,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def split: option.splits) @@ -712,19 +712,19 @@ lemma valid_vcpu_lift: lemma valid_vcpu_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def ARM_HYP.hyp_refs_of_def) done lemma valid_vcpu_same_type: "\ valid_vcpu v s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_vcpu v (s\kheap := kheap s(p \ k)\)" + \ valid_vcpu v (s\kheap := (kheap s)(p \ k)\)" by (cases v; case_tac vcpu_tcb; clarsimp simp: valid_vcpu_def typ_at_same_type) lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by (induction ao rule: arch_kernel_obj.induct; clarsimp simp: typ_at_same_type valid_vcpu_same_type) @@ -738,7 +738,7 @@ lemma default_tcb_not_live: "\ live (TCB default_tcb)" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) lemma valid_ioports_lift: diff --git a/proof/invariant-abstract/ARM_HYP/ArchRetype_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchRetype_AI.thy index 97a20d3a30..545d7f66b3 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchRetype_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchRetype_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -269,7 +270,7 @@ lemma mapM_x_store_pde_eq_kernel_mappings_restr: \ ko_at (ArchObj (PageDirectory pdv')) pd' s \ pdv (ucast x) = pdv' (ucast x)))\" apply (induct xs rule: rev_induct, simp_all add: mapM_x_Nil mapM_x_append mapM_x_singleton) - apply (erule hoare_seq_ext[rotated]) + apply (erule bind_wp_fwd) apply (simp add: store_pde_def set_pd_def set_object_def cong: bind_cong) apply (wp get_object_wp get_pde_wp) apply (clarsimp simp: obj_at_def split del: if_split) @@ -343,7 +344,7 @@ lemma mapM_copy_global_invs_mappings_restricted: apply (fold all_invs_but_equal_kernel_mappings_restricted_eq) apply (induct pds, simp_all only: mapM_x_Nil mapM_x_Cons K_bind_def) apply (wp, simp) - apply (rule hoare_seq_ext, assumption, thin_tac "P" for P) + apply (rule bind_wp, assumption, thin_tac "P" for P) apply (rule hoare_conjI) apply (rule hoare_pre, rule copy_global_invs_mappings_restricted) apply clarsimp diff --git a/proof/invariant-abstract/ARM_HYP/ArchSchedule_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchSchedule_AI.thy index 89cdc626a1..d8cc9e5115 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchSchedule_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchSchedule_AI.thy @@ -27,22 +27,6 @@ lemma dmo_mapM_storeWord_0_invs[wp,Schedule_AI_asms]: apply wp apply (clarsimp simp: word_bits_conv) done -(* -global_naming ARM_HYP (*FIXME: arch_split*) -lemma set_vm_root_kheap_arch_state[wp]: - "\\s. P (kheap s) (arm_globals_frame (arch_state s))\ set_vm_root a - \\_ s. P (kheap s) (arm_globals_frame (arch_state s))\" (is "valid ?P _ _") - apply (simp add: set_vm_root_def arm_context_switch_def) - apply (wp | wpcw | simp add: arm_context_switch_def get_hw_asid_def - store_hw_asid_def find_pd_for_asid_assert_def find_free_hw_asid_def - invalidate_hw_asid_entry_def invalidate_asid_def load_hw_asid_def)+ - apply (simp add: whenE_def, intro conjI impI) - apply (wp, simp add: returnOk_def validE_E_def validE_def)+ - apply (simp add: whenE_def, intro conjI[rotated] impI) - apply (wp | simp add: returnOk_def validE_E_def validE_def)+ - apply (wp | simp add: throwError_def validE_R_def validE_def)+ -done -*) crunch device_state_inv[wp]: clearExMonitor "\ms. P (device_state ms)" diff --git a/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy index 1a3f9dcd46..7f73b667f3 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy @@ -189,7 +189,6 @@ lemma cap_delete_no_cap_to_obj_asid[wp, Tcb_AI_asms]: apply (simp add: cap_delete_def no_cap_to_obj_with_diff_ref_ran_caps_form) apply wp - apply simp apply (rule use_spec) apply (rule rec_del_all_caps_in_range) apply (rule mp[OF _ obj_ref_none_no_asid(1)[of cap]], simp) @@ -238,18 +237,19 @@ lemma tc_invs[Tcb_AI_asms]: \\rv. invs\" apply (rule hoare_gen_asm)+ apply (simp add: split_def set_mcpriority_def cong: option.case_cong) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply wp - (* takes long: *) - apply ((simp only: simp_thms + (* takes long: (30 sec) *) + apply ((simp only: simp_thms cong: conj_cong + | (strengthen invs_strengthen)+ | rule wp_split_const_if wp_split_const_if_R - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | (wp out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -263,10 +263,9 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def - del: hoare_True_E_R | wpc | strengthen use_no_cap_to_obj_asid_strg tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] diff --git a/proof/invariant-abstract/ARM_HYP/ArchUntyped_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchUntyped_AI.thy index 2cf17b6acf..03a8673772 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchUntyped_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchUntyped_AI.thy @@ -413,7 +413,7 @@ lemma init_arch_objects_nonempty_table[Untyped_AI_assms, wp]: apply (rule hoare_gen_asm) apply (simp add: init_arch_objects_def split del: if_split) apply (rule hoare_pre) - apply (wp hoare_unless_wp | wpc | simp add: reserve_region_def)+ + apply (wp unless_wp | wpc | simp add: reserve_region_def)+ apply (clarsimp simp: obj_bits_api_def default_arch_object_def pd_bits_def pageBits_def) done diff --git a/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy index 2a5210fe58..e0a70bc413 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2022, UNSW (ABN 57 195 873 197) * * SPDX-License-Identifier: GPL-2.0-only @@ -74,7 +75,7 @@ crunches do_machine_op (wp: valid_cur_vcpu_lift_cur_thread_update valid_cur_vcpu_lift crunch_wps) lemma valid_cur_vcpu_vcpu_update[simp]: - "vcpu_at v s \ valid_cur_vcpu (s\kheap := kheap s(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" + "vcpu_at v s \ valid_cur_vcpu (s\kheap := (kheap s)(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" by (clarsimp simp: valid_cur_vcpu_def active_cur_vcpu_of_def pred_tcb_at_def obj_at_def) crunches vcpu_save_reg, vcpu_write_reg, save_virt_timer, vgic_update, vcpu_disable @@ -254,7 +255,7 @@ lemma schedule_valid_cur_vcpu[wp]: (schedule :: (unit, unit) s_monad) \\_. valid_cur_vcpu\" unfolding schedule_def allActiveTCBs_def - by (wpsimp wp: alternative_wp select_wp) + by wpsimp crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_all_signals, bind_notification, fast_finalise, deleted_irq_handler, post_cap_deletion, cap_delete_one, @@ -264,7 +265,7 @@ crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_a restart, reschedule_required, possible_switch_to, thread_set_priority, reply_from_kernel for arch_state[wp]: "\s. P (arch_state s)" and cur_thread[wp]: "\s. P (cur_thread s)" - (wp: mapM_x_wp_inv thread_set.arch_state select_wp crunch_wps + (wp: mapM_x_wp_inv thread_set.arch_state crunch_wps simp: crunch_simps possible_switch_to_def reschedule_required_def) lemma do_unbind_notification_arch_tcb_at[wp]: @@ -296,7 +297,7 @@ crunches blocked_cancel_ipc, cap_delete_one, cancel_signal lemma reply_cancel_ipc_arch_tcb_at[wp]: "reply_cancel_ipc ntfnptr \arch_tcb_at P t\" unfolding reply_cancel_ipc_def thread_set_def - apply (wpsimp wp: set_object_wp select_wp) + apply (wpsimp wp: set_object_wp) by (clarsimp simp: pred_tcb_at_def obj_at_def get_tcb_def) crunches cancel_ipc, send_ipc, receive_ipc @@ -306,7 +307,7 @@ crunches cancel_ipc, send_ipc, receive_ipc lemma send_fault_ipc_arch_tcb_at[wp]: "send_fault_ipc tptr fault \arch_tcb_at P t\" unfolding send_fault_ipc_def thread_set_def Let_def - by (wpsimp wp: set_object_wp hoare_drop_imps hoare_vcg_all_lift_R + by (wpsimp wp: set_object_wp hoare_drop_imps hoare_vcg_all_liftE_R simp: pred_tcb_at_def obj_at_def get_tcb_def) crunches handle_fault, handle_interrupt, handle_vm_fault, handle_hypervisor_fault, send_signal @@ -337,7 +338,7 @@ crunches send_ipc, send_fault_ipc, receive_ipc, handle_fault, handle_interrupt, crunches init_arch_objects, reset_untyped_cap for arch_state[wp]: "\s. P (arch_state s)" - (wp: crunch_wps preemption_point_inv hoare_unless_wp mapME_x_wp' + (wp: crunch_wps preemption_point_inv unless_wp mapME_x_wp' simp: crunch_simps) crunches invoke_untyped @@ -375,7 +376,7 @@ crunches cap_insert, cap_move crunches suspend, unbind_notification, cap_swap_for_delete for state_hyp_refs_of[wp]: "\s. P (state_hyp_refs_of s)" - (wp: crunch_wps thread_set_hyp_refs_trivial select_wp simp: crunch_simps) + (wp: crunch_wps thread_set_hyp_refs_trivial simp: crunch_simps) lemma prepare_thread_delete_valid_cur_vcpu[wp]: "\\s. valid_cur_vcpu s \ sym_refs (state_hyp_refs_of s)\ diff --git a/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy index 11c93b065f..9108a1c2fa 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy @@ -147,7 +147,7 @@ lemma mapM_x_store_pte_updates: apply wp apply (clarsimp simp: obj_at_def fun_upd_idem) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: store_pte_def set_pt_def set_object_def) apply (wp get_pt_wp get_object_wp) @@ -240,7 +240,7 @@ lemma mapM_x_store_pde_updates: apply wp apply (clarsimp simp: obj_at_def fun_upd_idem) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: store_pde_def set_pd_def set_object_def) apply (wp get_pd_wp get_object_wp) @@ -393,7 +393,7 @@ lemma unmap_page_table_valid_pdpt_objs[wp]: apply (simp add: page_table_mapped_def) apply (wp get_pde_wp | wpc)+ apply simp - apply (rule hoare_post_impErr, rule valid_validE, + apply (rule hoare_strengthen_postE, rule valid_validE, rule find_pd_for_asid_inv, simp_all) done @@ -410,7 +410,7 @@ lemma set_simple_ko_valid_pdpt_objs[wp]: done crunch valid_pdpt_objs[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_pdpt_objs" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_pdpt_objs[wp]: "\valid_pdpt_objs\ preemption_point \\rv. valid_pdpt_objs\" @@ -439,7 +439,7 @@ lemma mapM_x_copy_pde_updates: ucast (f x && mask pd_bits >> 3)) ` set xs then pd y else pd' y)))) \))\ mapM_x (\x. get_pde (p + f x) >>= store_pde (p' + f x)) xs \\_. Q\" - including no_pre + including classic_wp_pre apply (induct xs) apply (simp add: mapM_x_Nil) apply wp @@ -631,7 +631,7 @@ lemma invoke_untyped_valid_pdpt[wp]: crunch valid_pdpt_objs[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_pdpt_objs" - (ignore: delete_objects wp: delete_objects_valid_pdpt static_imp_wp) + (ignore: delete_objects wp: delete_objects_valid_pdpt hoare_weak_lift_imp) abbreviation (input) "safe_pt_range \ \slots s. obj_at (\ko. \pt. ko = ArchObj (PageTable pt) @@ -1023,7 +1023,7 @@ lemma perform_page_directory_valid_pdpt[wp]: done crunch valid_pdpt_objs[wp]: perform_vcpu_invocation "valid_pdpt_objs" - (ignore: delete_objects wp: delete_objects_valid_pdpt static_imp_wp) + (ignore: delete_objects wp: delete_objects_valid_pdpt hoare_weak_lift_imp) lemma perform_invocation_valid_pdpt[wp]: @@ -1033,8 +1033,6 @@ lemma perform_invocation_valid_pdpt[wp]: \\rv. valid_pdpt_objs\" apply (cases i, simp_all) apply (wp send_signal_interrupt_states | simp)+ - apply (clarsimp simp: invocation_duplicates_valid_def) - apply (wp | wpc | simp)+ apply (simp add: arch_perform_invocation_def) apply (rule hoare_pre) apply (wp | wpc | simp)+ @@ -1222,7 +1220,7 @@ lemma ensure_safe_mapping_ensures[wp]: apply (rule_tac Q' = "\r s. \x \ set slots. obj_at (\ko. \pt. ko = ArchObj (PageTable pt) \ pt (ucast (x && mask pt_bits >> 3)) = pte.InvalidPTE) - (hd (slot # slots) && ~~ mask pt_bits) s" in hoare_post_imp_R) + (hd (slot # slots) && ~~ mask pt_bits) s" in hoare_strengthen_postE_R) apply (wp mapME_x_accumulate_checks[where Q = "\s. valid_pdpt_objs s"] ) apply (wp get_master_pte_wp| wpc | simp)+ apply clarsimp @@ -1283,7 +1281,7 @@ lemma ensure_safe_mapping_ensures[wp]: apply (rule_tac Q' = "\r s. \x \ set x22. obj_at (\ko. \pd. ko = ArchObj (PageDirectory pd) \ pd (ucast (x && mask pd_bits >> 3)) = InvalidPDE) - (x21 && ~~ mask pd_bits) s" in hoare_post_imp_R) + (x21 && ~~ mask pd_bits) s" in hoare_strengthen_postE_R) apply (wp mapME_x_accumulate_checks[where Q = "\s. valid_pdpt_objs s"] ) apply (wp get_master_pde_wp| wpc | simp)+ apply clarsimp @@ -1422,7 +1420,7 @@ lemma decode_mmu_invocation_valid_pdpt[wp]: \ \PageMap\ apply (rename_tac dev pg_ptr rights sz pg_map) apply (wpsimp simp: Let_def invocation_duplicates_valid_def page_inv_duplicates_valid_def - wp: ensure_safe_mapping_ensures[THEN hoare_post_imp_R] + wp: ensure_safe_mapping_ensures[THEN hoare_strengthen_postE_R] check_vp_wpR hoare_vcg_if_lift_ER find_pd_for_asid_lookup_pd_wp) apply (fastforce simp: invs_psp_aligned page_directory_at_aligned_pd_bits word_not_le sz valid_cap_def valid_arch_cap_def lookup_pd_slot_eq @@ -1513,15 +1511,14 @@ lemma handle_invocation_valid_pdpt[wp]: crunch valid_pdpt[wp]: handle_event, activate_thread,switch_to_thread, switch_to_idle_thread "valid_pdpt_objs" - (simp: crunch_simps wp: crunch_wps alternative_valid select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt getFAR getDFSR getIFSR OR_choice set_scheduler_action clearExMonitor) lemma schedule_valid_pdpt[wp]: "\valid_pdpt_objs\ schedule :: (unit,unit) s_monad \\_. valid_pdpt_objs\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma call_kernel_valid_pdpt[wp]: diff --git a/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy index 64c30d1f69..ed409c0815 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -1132,7 +1133,7 @@ lemma find_pd_for_asid_lookup_ref: lemma find_pd_for_asid_lookup[wp]: "\\\ find_pd_for_asid asid \\pd. \\ pd\,-" - apply (rule hoare_post_imp_R, rule find_pd_for_asid_lookup_ref) + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_lookup_ref) apply auto done @@ -1146,7 +1147,7 @@ proof - \\pd. pspace_aligned and page_directory_at pd\, -" by (rule hoare_pre) (wp, simp) show ?thesis - apply (rule hoare_post_imp_R, rule x) + apply (rule hoare_strengthen_postE_R, rule x) apply clarsimp apply (erule page_directory_pde_atI) prefer 2 @@ -1217,7 +1218,7 @@ lemma arch_thread_set_caps_of_state [wp]: by (wpsimp wp: thread_set_caps_of_state_trivial2 simp: arch_thread_set_is_thread_set) lemma arch_thread_set_wp: - "\\s. get_tcb p s \ None \ Q (s\kheap := kheap s(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ + "\\s. get_tcb p s \ None \ Q (s\kheap := (kheap s)(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ arch_thread_set f p \\_. Q\" apply (simp add: arch_thread_set_def) @@ -1230,7 +1231,7 @@ lemma a_type_VCPU [simp]: by (simp add: a_type_def) lemma set_vcpu_wp: - "\\s. vcpu_at p s \ Q (s\kheap := kheap s(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" + "\\s. vcpu_at p s \ Q (s\kheap := (kheap s)(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" unfolding set_vcpu_def apply (wp set_object_wp_strong) apply (clarsimp simp: obj_at_def split: kernel_object.splits arch_kernel_obj.splits) @@ -1376,7 +1377,7 @@ lemma set_vm_root_valid_arch[wp]: "\valid_arch_state and sym_refs o state_hyp_refs_of\ set_vm_root pd \\_. valid_arch_state\" unfolding set_vm_root_def apply (wpsimp wp: gets_the_get_tcb_wp get_hw_asid_valid_arch - hoare_vcg_imp_lift hoare_vcg_all_lift hoare_whenE_wp + hoare_vcg_imp_lift hoare_vcg_all_lift whenE_wp hoare_drop_imps get_cap_wp simp: if_apply_def2) done @@ -2336,7 +2337,7 @@ lemma set_vcpu_if_live_then_nonz_cap_Some[wp]: (* FIXME: kind of ugly but hey! it works!! *) -lemma state_refs_of_simp: "\ a \ p \ \ state_refs_of (s\kheap := kheap s(p \ v) \) a = state_refs_of s a " +lemma state_refs_of_simp: "\ a \ p \ \ state_refs_of (s\kheap := (kheap s)(p \ v) \) a = state_refs_of s a " by (simp add: state_refs_of_def) lemma state_refs_of_vcpu_simp: "typ_at (AArch AVCPU) p s \ state_refs_of s p = {}" @@ -2362,12 +2363,12 @@ lemma set_vcpu_sym_refs[wp]: apply (clarsimp simp: obj_at_def) done -lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) a = state_hyp_refs_of s a " +lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) a = state_hyp_refs_of s a " by (simp add: state_hyp_refs_of_def) lemma state_hyp_refs_of_simp_eq: "obj_at (\ko'. hyp_refs_of ko' = hyp_refs_of v) p s - \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) p = state_hyp_refs_of s p" + \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) p = state_hyp_refs_of s p" by (clarsimp simp: state_hyp_refs_of_def obj_at_def) lemma set_object_vcpu_sym_refs_hyp: @@ -2537,7 +2538,7 @@ lemma vcpu_enable_invs[wp]: lemma vcpu_restore_invs[wp]: "\\s. invs s\ vcpu_restore v \\_. invs\" - apply (simp add: vcpu_restore_def do_machine_op_bind dom_mapM) + apply (simp add: vcpu_restore_def do_machine_op_bind dom_mapM empty_fail_cond) apply (wpsimp wp: mapM_wp_inv) done @@ -2570,7 +2571,7 @@ lemma vcpu_save_invs[wp]: lemma vcpu_disable_invs[wp]: "\\ s. invs s\ vcpu_disable v \\_ s . invs s\" apply (simp add: vcpu_disable_def) - apply (wpsimp simp: do_machine_op_bind empty_fail_isb + apply (wpsimp simp: do_machine_op_bind empty_fail_isb empty_fail_cond wp: set_vcpu_invs_eq_hyp get_vcpu_wp maskInterrupt_invs | wp hoare_vcg_all_lift hoare_vcg_imp_lift')+ done @@ -2689,7 +2690,7 @@ lemma dmo_setIRQTrigger_invs[wp]: "\invs\ do_machine_op (setIRQT lemma svr_invs [wp]: "\invs\ set_vm_root t' \\_. invs\" unfolding set_vm_root_def - apply (wpsimp wp: gets_the_get_tcb_wp hoare_vcg_all_lift hoare_vcg_imp_lift hoare_whenE_wp + apply (wpsimp wp: gets_the_get_tcb_wp hoare_vcg_all_lift hoare_vcg_imp_lift whenE_wp hoare_vcg_disj_lift hoare_drop_imps get_cap_wp simp: if_apply_def2) apply (thin_tac "cte_wp_at ((=) x) t s" for t) @@ -2728,7 +2729,7 @@ end locale vs_lookup_map_some_pdes = Arch + fixes pd pdp s s' S T pd' - defines "s' \ s\kheap := kheap s(pdp \ ArchObj (PageDirectory pd'))\" + defines "s' \ s\kheap := (kheap s)(pdp \ ArchObj (PageDirectory pd'))\" assumes refs: "vs_refs (ArchObj (PageDirectory pd')) = (vs_refs (ArchObj (PageDirectory pd)) - T) \ S" assumes old: "kheap s pdp = Some (ArchObj (PageDirectory pd))" @@ -2842,7 +2843,7 @@ lemma set_pd_vspace_objs_map: (* ARMHYP *) lemma simpler_set_pd_def: "set_pd p pd = (\s. if \pd. kheap s p = Some (ArchObj (PageDirectory pd)) - then ({((), s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\)}, + then ({((), s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\)}, False) else ({}, True))" apply (rule ext) @@ -2898,7 +2899,7 @@ lemma set_pd_valid_vs_lookup_map: (* ARMHYP *) apply (drule vs_lookup_pages_apI) apply (simp split: if_split_asm) apply (simp+)[2] - apply (frule_tac s="s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\" + apply (frule_tac s="s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\" in vs_lookup_pages_pdI[rotated -1]) apply (simp del: fun_upd_apply)+ apply (frule vs_lookup_pages_apI) @@ -3779,8 +3780,8 @@ lemma simpler_store_pde_def: "store_pde p pde s = (case kheap s (p && ~~ mask pd_bits) of Some (ArchObj (PageDirectory pd)) => - ({((), s\kheap := (kheap s((p && ~~ mask pd_bits) \ - (ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde))))))\)}, False) + ({((), s\kheap := (kheap s)(p && ~~ mask pd_bits \ + (ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde)))))\)}, False) | _ => ({}, True))" by (auto simp: store_pde_def simpler_set_pd_def get_object_def simpler_gets_def assert_def return_def fail_def set_object_def get_def put_def bind_def get_pd_def vspace_bits_defs @@ -3790,7 +3791,7 @@ lemma pde_update_valid_vspace_objs: "[|valid_vspace_objs s; valid_pde pde s; pde_ref pde = None; kheap s (p && ~~ mask pd_bits) = Some (ArchObj (PageDirectory pd))|] ==> valid_vspace_objs - (s\kheap := kheap s(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde))))\)" + (s\kheap := (kheap s)(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde))))\)" apply (cut_tac pde=pde and p=p in store_pde_arch_objs_unmap) apply (clarsimp simp: valid_def) apply (erule allE[where x=s]) @@ -4129,7 +4130,7 @@ lemma mapM_x_swp_store_empty_table': apply (induct slots, simp_all add: mapM_x_Nil mapM_x_Cons) apply wp apply (clarsimp simp: obj_at_def empty_table_def fun_eq_iff) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "\P\ f \Q\" for P f Q) apply (simp add: store_pte_def set_pt_def set_object_def) apply (wp get_object_wp) @@ -4684,7 +4685,7 @@ lemma find_pd_for_asid_lookup_slot [wp]: "\pspace_aligned and valid_vspace_objs\ find_pd_for_asid asid \\rv. \\ (lookup_pd_slot rv vptr && ~~ mask pd_bits)\, -" apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_R_conj) apply (rule find_pd_for_asid_lookup) apply (rule find_pd_for_asid_aligned_pd) @@ -4697,7 +4698,7 @@ lemma find_pd_for_asid_lookup_slot_large_page [wp]: find_pd_for_asid asid \\rv. \\ (x + lookup_pd_slot rv vptr && ~~ mask pd_bits)\, -" apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_R_conj) apply (rule hoare_vcg_R_conj) apply (rule find_pd_for_asid_inv [where P="K (x \ set [0, 8 .e. 0x78] \ is_aligned vptr 25)", THEN valid_validE_R]) @@ -4711,7 +4712,7 @@ lemma find_pd_for_asid_pde_at_add [wp]: "\K (x \ set [0,8 .e. 0x78] \ is_aligned vptr 25) and pspace_aligned and valid_vspace_objs\ find_pd_for_asid asid \\rv. pde_at (x + lookup_pd_slot rv vptr)\, -" apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_R_conj) apply (rule find_pd_for_asid_inv [where P= "K (x \ set [0, 8 .e. 0x78] \ is_aligned vptr 25) and pspace_aligned", THEN valid_validE_R]) @@ -4775,7 +4776,7 @@ lemma lookup_pt_slot_cap_to1[wp]: "\invs and \\pd and K (is_aligned pd pd_bits) and K (vptr < kernel_base)\ lookup_pt_slot pd vptr \\rv s. \a b cap. caps_of_state s (a, b) = Some cap \ is_pt_cap cap \ rv && ~~ mask pt_bits \ obj_refs cap\,-" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_cap_to) apply auto done @@ -4789,7 +4790,7 @@ lemma lookup_pt_slot_cap_to_multiple1: (\a b. cte_wp_at (\c. is_pt_cap c \ cap_asid c \ None \ (\x. x && ~~ mask pt_bits) ` set [rv , rv + 8 .e. rv + 0x78] \ obj_refs c) (a, b) s)\, -" apply (rule hoare_gen_asmE) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_pt_slot_cap_to) apply (rule conjI, clarsimp) apply (elim exEI) @@ -4817,7 +4818,7 @@ lemma lookup_pt_slot_cap_to_multiple[wp]: and K (is_aligned vptr 16)\ lookup_pt_slot pd vptr \\rv s. \a b. cte_wp_at (\c. (\x. x && ~~ mask pt_bits) ` (\x. x + rv) ` set [0 , 8 .e. 0x78] \ obj_refs c) (a, b) s\, -" - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to_multiple1) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to_multiple1) apply (elim conjE exEI cte_wp_at_weakenE) apply (simp add: subset_eq p_0x3C_shift add.commute) done @@ -4852,7 +4853,7 @@ lemma find_pd_for_asid_cap_to: lemma find_pd_for_asid_cap_to1[wp]: "\invs\ find_pd_for_asid asid \\rv s. \a b cap. caps_of_state s (a, b) = Some cap \ lookup_pd_slot rv vptr && ~~ mask pd_bits \ obj_refs cap\, -" - apply (rule hoare_post_imp_R, rule find_pd_for_asid_cap_to) + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (clarsimp simp: lookup_pd_slot_pd) apply auto done @@ -4862,7 +4863,7 @@ lemma find_pd_for_asid_cap_to2[wp]: \\rv s. \a b. cte_wp_at (\cp. lookup_pd_slot rv vptr && ~~ mask pd_bits \ obj_refs cp \ is_pd_cap cp) (a, b) s\, -" - apply (rule hoare_post_imp_R, rule find_pd_for_asid_cap_to) + apply (rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (clarsimp simp: lookup_pd_slot_pd cte_wp_at_caps_of_state) apply auto done @@ -4870,7 +4871,7 @@ lemma find_pd_for_asid_cap_to2[wp]: lemma find_pd_for_asid_cap_to_multiple[wp]: "\invs and K (is_aligned vptr 25)\ find_pd_for_asid asid \\rv s. \x xa. cte_wp_at (\a. (\x. x && ~~ mask pd_bits) ` (\x. x + lookup_pd_slot rv vptr) ` set [0 , 8 .e. 0x78] \ obj_refs a) (x, xa) s\, -" - apply (rule hoare_gen_asmE, rule hoare_post_imp_R, rule find_pd_for_asid_cap_to) + apply (rule hoare_gen_asmE, rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (elim exEI, clarsimp simp: cte_wp_at_caps_of_state) apply (simp add: lookup_pd_slot_add_eq) done @@ -4881,7 +4882,7 @@ lemma find_pd_for_asid_cap_to_multiple2[wp]: \\rv s. \x\set [0 , 8 .e. 0x78]. \a b. cte_wp_at (\cp. x + lookup_pd_slot rv vptr && ~~ mask pd_bits \ obj_refs cp \ is_pd_cap cp) (a, b) s\, -" - apply (rule hoare_gen_asmE, rule hoare_post_imp_R, + apply (rule hoare_gen_asmE, rule hoare_strengthen_postE_R, rule find_pd_for_asid_cap_to) apply (intro ballI, elim exEI, clarsimp simp: cte_wp_at_caps_of_state) @@ -4898,7 +4899,7 @@ lemma lookup_pt_slot_cap_to2: lookup_pt_slot pd vptr \\rv s. \oref cref cap. caps_of_state s (oref, cref) = Some cap \ rv && ~~ mask pt_bits \ obj_refs cap \ is_pt_cap cap\, -" - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to) apply fastforce done @@ -4908,7 +4909,7 @@ lemma lookup_pt_slot_cap_to_multiple2: \\rv s. \oref cref. cte_wp_at (\c. (\x. x && ~~ mask pt_bits) ` (\x. x + rv) ` set [0 , 8 .e. 0x78] \ obj_refs c \ is_pt_cap c) (oref, cref) s\, -" - apply (rule hoare_post_imp_R, rule lookup_pt_slot_cap_to_multiple1) + apply (rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to_multiple1) apply (clarsimp simp: upto_enum_step_def image_image field_simps linorder_not_le[symmetric] split: if_split_asm) @@ -5602,8 +5603,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid pdp pd s' defines "(s' :: ('a::state_ext) state) \ - s\kheap := kheap s(ap \ ArchObj (ASIDPool - (pool(asid \ pdp))))\" + s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid \ pdp))))\" assumes ap: "kheap s ap = Some (ArchObj (ASIDPool pool))" assumes new: "pool asid = None" assumes pd: "kheap s pdp = Some (ArchObj (PageDirectory pd))" diff --git a/proof/invariant-abstract/ARM_HYP/Machine_AI.thy b/proof/invariant-abstract/ARM_HYP/Machine_AI.thy index e18726abd4..cc3d20b5f1 100644 --- a/proof/invariant-abstract/ARM_HYP/Machine_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -83,13 +83,13 @@ lemma det_getRestartPC: "det getRestartPC" lemma det_setNextPC: "det (setNextPC p)" by (simp add: setNextPC_def det_setRegister) - +(* FIXME empty_fail: make all empty_fail [intro!, wp], and non-conditional ones [simp] *) lemma ef_loadWord: "empty_fail (loadWord x)" - by (simp add: loadWord_def) + by (fastforce simp: loadWord_def) lemma ef_storeWord: "empty_fail (storeWord x y)" - by (simp add: storeWord_def) + by (fastforce simp: storeWord_def) lemma no_fail_getRestartPC: "no_fail \ getRestartPC" @@ -280,8 +280,7 @@ lemma no_fail_invalidateCacheRange_I[simp, wp]: lemma no_fail_invalidateCacheRange_RAM[simp, wp]: "no_fail \ (invalidateCacheRange_RAM s e p)" apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def) - apply (rule no_fail_pre, wp no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb, simp) - apply (auto intro: hoare_post_taut) + apply (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb) done lemma no_fail_branchFlushRange[simp, wp]: @@ -327,7 +326,7 @@ lemma no_fail_getActiveIRQ[wp]: "no_fail \ (getActiveIRQ in_kernel)" apply (simp add: getActiveIRQ_def) apply (rule no_fail_pre) - apply (wp non_fail_select) + apply wp apply simp done @@ -336,7 +335,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -565,7 +564,7 @@ lemma no_irq_seq [wp]: "\ no_irq f; \x. no_irq (g x) \ \ no_irq (f >>= g)" apply (subst no_irq_def) apply clarsimp - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (wp|simp)+ done @@ -670,7 +669,7 @@ lemma no_irq_clearMemory: "no_irq (clearMemory a b)" lemma getActiveIRQ_le_maxIRQ': "\\s. \irq > maxIRQ. irq_masks s irq\ getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -680,14 +679,13 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_Some0xFF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done @@ -772,7 +770,7 @@ lemma empty_fail_setHCR[simp, intro!]: lemma empty_fail_addressTranslateS1[simp, intro!]: "empty_fail (addressTranslateS1 w)" - by (simp add: addressTranslateS1_def) + by (fastforce simp: addressTranslateS1_def) lemma empty_fail_writeContextIDAndPD[simp, intro!]: "empty_fail (writeContextIDAndPD asid w)" @@ -804,7 +802,7 @@ lemma empty_fail_set_gic_vcpu_ctrl_apr[simp, intro!]: lemma empty_fail_get_gic_vcpu_ctrl_lr[simp, intro!]: "empty_fail (get_gic_vcpu_ctrl_lr n)" - by (simp add: get_gic_vcpu_ctrl_lr_def) + by (fastforce simp: get_gic_vcpu_ctrl_lr_def) lemma empty_fail_set_gic_vcpu_ctrl_lr[simp, intro!]: "empty_fail (set_gic_vcpu_ctrl_lr n w)" @@ -846,11 +844,12 @@ lemma empty_fail_cleanCacheRange_PoC[simp, intro!]: lemma empty_fail_cleanInvalidateCacheRange_RAM[simp, intro!]: "empty_fail (cleanInvalidateCacheRange_RAM s e p)" - by (simp add: cleanInvalidateCacheRange_RAM_def empty_fail_dsb empty_fail_cleanInvalidateL2Range empty_fail_cleanInvalByVA) + by (fastforce simp: cleanInvalidateCacheRange_RAM_def empty_fail_dsb + empty_fail_cleanInvalidateL2Range empty_fail_cleanInvalByVA) lemma empty_fail_cleanCacheRange_RAM[simp, intro!]: "empty_fail (cleanCacheRange_RAM s e p)" - by (simp add: cleanCacheRange_RAM_def empty_fail_dsb empty_fail_cleanL2Range) + by (fastforce simp: cleanCacheRange_RAM_def empty_fail_dsb empty_fail_cleanL2Range) lemma empty_fail_invalidateCacheRange_I[simp, intro!]: "empty_fail (invalidateCacheRange_I s e p)" @@ -858,8 +857,8 @@ lemma empty_fail_invalidateCacheRange_I[simp, intro!]: lemma empty_fail_invalidateCacheRange_RAM[simp, intro!]: "empty_fail (invalidateCacheRange_RAM s e p)" - by (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def - empty_fail_invalidateL2Range empty_fail_invalidateByVA empty_fail_dsb) + by (fastforce simp: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def + empty_fail_invalidateL2Range empty_fail_invalidateByVA empty_fail_dsb) lemma empty_fail_branchFlushRange[simp, intro!]: "empty_fail (branchFlushRange s e p)" @@ -867,16 +866,16 @@ lemma empty_fail_branchFlushRange[simp, intro!]: lemma empty_fail_cleanCaches_PoU[simp, intro!]: "empty_fail cleanCaches_PoU" - by (simp add: cleanCaches_PoU_def empty_fail_dsb empty_fail_clean_D_PoU empty_fail_invalidate_I_PoU) + by (fastforce simp: cleanCaches_PoU_def empty_fail_dsb empty_fail_clean_D_PoU empty_fail_invalidate_I_PoU) lemma empty_fail_cleanInvalidateL1Caches[simp, intro!]: "empty_fail cleanInvalidateL1Caches" - by (simp add: cleanInvalidateL1Caches_def empty_fail_dsb empty_fail_cleanInvalidate_D_PoC - empty_fail_invalidate_I_PoU) + by (fastforce simp: cleanInvalidateL1Caches_def empty_fail_dsb empty_fail_cleanInvalidate_D_PoC + empty_fail_invalidate_I_PoU) lemma empty_fail_clearMemory [simp, intro!]: "\a b. empty_fail (clearMemory a b)" - by (simp add: clearMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: clearMemory_def mapM_x_mapM ef_storeWord) end diff --git a/proof/invariant-abstract/BCorres_AI.thy b/proof/invariant-abstract/BCorres_AI.thy index 0d03f52be8..ecb56c56ea 100644 --- a/proof/invariant-abstract/BCorres_AI.thy +++ b/proof/invariant-abstract/BCorres_AI.thy @@ -50,7 +50,7 @@ lemma OR_choiceE_bcorres[wp]: done crunch_ignore (bcorres) - (add: NonDetMonad.bind gets modify get put do_extended_op empty_slot_ext mapM_x "when" + (add: Nondet_Monad.bind gets modify get put do_extended_op empty_slot_ext mapM_x "when" select unless mapM catch bindE liftE whenE alternative cap_swap_ext cap_insert_ext cap_move_ext liftM create_cap_ext possible_switch_to reschedule_required set_priority diff --git a/proof/invariant-abstract/Bits_AI.thy b/proof/invariant-abstract/Bits_AI.thy index 06da5d09f6..fc5c25b318 100644 --- a/proof/invariant-abstract/Bits_AI.thy +++ b/proof/invariant-abstract/Bits_AI.thy @@ -14,7 +14,7 @@ lemmas crunch_simps = split_def whenE_def unlessE_def Let_def if_fun_split assertE_def zipWithM_mapM zipWithM_x_mapM lemma in_set_object: - "(rv, s') \ fst (set_object ptr obj s) \ s' = s \ kheap := kheap s (ptr \ obj) \" + "(rv, s') \ fst (set_object ptr obj s) \ s' = s \ kheap := (kheap s) (ptr \ obj) \" by (clarsimp simp: set_object_def get_object_def in_monad) lemma cap_fault_injection: diff --git a/proof/invariant-abstract/CNodeInv_AI.thy b/proof/invariant-abstract/CNodeInv_AI.thy index 9ea1ca7325..d13b0e9bc6 100644 --- a/proof/invariant-abstract/CNodeInv_AI.thy +++ b/proof/invariant-abstract/CNodeInv_AI.thy @@ -401,7 +401,7 @@ lemma cap_derive_not_null_helper2: derive_cap slot cap \\rv s. rv \ cap.NullCap \ Q rv s\, -" apply (drule cap_derive_not_null_helper) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply simp done @@ -441,13 +441,15 @@ lemma decode_cnode_inv_wf[wp]: derive_cap_zobjrefs derive_cap_objrefs_iszombie | wp (once) hoare_drop_imps)+ )[1] apply (wp whenE_throwError_wp | wpcw)+ + apply (rename_tac dest_slot y src_slot) apply simp - apply (rule_tac Q="\src_cap. valid_cap src_cap and ex_cte_cap_wp_to is_cnode_cap x + apply (rule_tac Q="\src_cap. valid_cap src_cap and ex_cte_cap_wp_to is_cnode_cap dest_slot and zombies_final and valid_objs - and real_cte_at src_slot and real_cte_at x + and real_cte_at src_slot and real_cte_at dest_slot and cte_wp_at (\c. c = src_cap) src_slot - and cte_wp_at ((=) cap.NullCap) x" + and cte_wp_at ((=) cap.NullCap) dest_slot" in hoare_post_imp) + apply (rename_tac src_cap s) apply (clarsimp simp: cte_wp_at_caps_of_state all_rights_def) apply (simp add: cap_master_update_cap_data weak_derived_update_cap_data cap_asid_update_cap_data @@ -455,7 +457,7 @@ lemma decode_cnode_inv_wf[wp]: apply (strengthen cap_badge_update_cap_data) apply simp apply (frule (1) caps_of_state_valid_cap) - apply (case_tac "is_zombie r") + apply (case_tac "is_zombie src_cap") apply (clarsimp simp add: valid_cap_def2 update_cap_data_def is_cap_simps split: if_split_asm) @@ -491,8 +493,8 @@ lemma decode_cnode_inv_wf[wp]: apply (simp add: decode_cnode_invocation_def unlessE_def whenE_def split del: if_split) - apply (wp get_cap_wp hoare_vcg_all_lift_R | simp add: )+ - apply (rule_tac Q'="\rv. invs and cte_wp_at (\_. True) rv" in hoare_post_imp_R) + apply (wp get_cap_wp hoare_vcg_all_liftE_R | simp add: )+ + apply (rule_tac Q'="\rv. invs and cte_wp_at (\_. True) rv" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at) apply (clarsimp simp: cte_wp_valid_cap invs_valid_objs has_cancel_send_rights_ep_cap)+ \ \Rotate\ @@ -500,11 +502,12 @@ lemma decode_cnode_inv_wf[wp]: whenE_def unlessE_def) apply (rule hoare_pre) apply (wp get_cap_wp ensure_empty_stronger | simp)+ - apply (rule_tac Q'="\rv s. real_cte_at rv s \ real_cte_at x s + apply (rename_tac dest_slot src_slot) + apply (rule_tac Q'="\rv s. real_cte_at rv s \ real_cte_at dest_slot s \ real_cte_at src_slot s \ ex_cte_cap_wp_to is_cnode_cap rv s - \ ex_cte_cap_wp_to is_cnode_cap x s - \ invs s" in hoare_post_imp_R) + \ ex_cte_cap_wp_to is_cnode_cap dest_slot s + \ invs s" in hoare_strengthen_postE_R) apply wp+ apply (clarsimp simp: cte_wp_at_caps_of_state dest!: real_cte_at_cte del: impI) @@ -690,7 +693,7 @@ lemma cap_swap_not_recursive: \\rv s. card (not_recursive_cspaces s) < n\" apply (cases "p1 = p2", simp_all) apply (simp add: cap_swap_def set_cdt_def when_def) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (wp | simp)+ apply (rule not_recursive_set_cap_doesn't_grow) apply (wp not_recursive_set_cap_shrinks set_cap_cte_wp_at' get_cap_wp hoare_vcg_disj_lift) @@ -896,12 +899,12 @@ context CNodeInv_AI begin lemma preemption_point_not_recursive_cspaces[wp]: "preemption_point \\s. P (not_recursive_cspaces s)\" unfolding preemption_point_def - by (wpsimp wp: OR_choiceE_weak_wp alternative_valid hoare_drop_imp) + by (wpsimp wp: OR_choiceE_weak_wp hoare_drop_imp) lemma preemption_point_caps_of_state[wp]: "preemption_point \\s. P (caps_of_state s)\" unfolding preemption_point_def - by (wpsimp wp: OR_choiceE_weak_wp alternative_valid hoare_drop_imp) + by (wpsimp wp: OR_choiceE_weak_wp hoare_drop_imp) lemma rec_del_termination: "All (rec_del_dom :: rec_del_call \ 'state_ext state \ bool)" @@ -1088,7 +1091,7 @@ end lemma dom_valid_cap[wp]: "\valid_cap c\ do_machine_op f \\_. valid_cap c\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) + apply wp apply simp done @@ -1096,7 +1099,7 @@ lemma dom_valid_cap[wp]: lemma dom_cte_at: "\cte_at c\ do_machine_op f \\_. cte_at c\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) + apply wp apply (simp add: cte_at_cases) done @@ -1855,7 +1858,7 @@ lemma cap_swap_valid_arch_caps[wp]: apply (simp add: cap_swap_def) apply (rule hoare_pre) apply (subst bind_assoc[symmetric], - rule hoare_seq_ext [rotated], + rule bind_wp_fwd, rule swap_of_caps_valid_arch_caps) apply (wp | simp split del: if_split)+ done @@ -2050,21 +2053,19 @@ lemma rec_del_delete_cases: done done - lemma cap_delete_deletes: notes hoare_pre [wp_pre del] shows - "\p. + "\p. \\ :: 'state_ext state \ bool\ cap_delete p \\rv. cte_wp_at (\c. c = cap.NullCap) p\,-" subgoal for p - unfolding cap_delete_def - using rec_del_delete_cases[where sl=p and ex=True] - apply (simp add: validE_R_def) - apply wp - apply simp - done + unfolding cap_delete_def + using rec_del_delete_cases[where sl=p and ex=True] + apply (simp add: validE_R_def) + apply wp + done done end @@ -2370,10 +2371,10 @@ lemma empty_slot_emptyable[wp]: crunch emptyable[wp]: blocked_cancel_ipc "emptyable sl" - (ignore: set_thread_state wp: emptyable_lift sts_st_tcb_at_cases static_imp_wp) + (ignore: set_thread_state wp: emptyable_lift sts_st_tcb_at_cases hoare_weak_lift_imp) crunch emptyable[wp]: cancel_signal "emptyable sl" - (ignore: set_thread_state wp: emptyable_lift sts_st_tcb_at_cases static_imp_wp) + (ignore: set_thread_state wp: emptyable_lift sts_st_tcb_at_cases hoare_weak_lift_imp) lemma cap_delete_one_emptyable[wp]: @@ -2393,7 +2394,7 @@ declare thread_set_Pmdb [wp] lemma reply_cancel_ipc_emptyable[wp]: "\invs and emptyable sl and valid_mdb\ reply_cancel_ipc ptr \\_. emptyable sl\" apply (simp add: reply_cancel_ipc_def) - apply (wp select_wp select_inv hoare_drop_imps | simp add: Ball_def)+ + apply (wp select_inv hoare_drop_imps | simp add: Ball_def)+ apply (wp hoare_vcg_all_lift hoare_convert_imp thread_set_Pmdb thread_set_invs_trivial thread_set_emptyable thread_set_cte_at | simp add: tcb_cap_cases_def descendants_of_cte_at)+ @@ -2509,7 +2510,7 @@ lemma rec_del_invs: rec_del args \\rv. invs :: 'state_ext state \ bool\" apply (rule validE_valid) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule hoare_pre) apply (rule use_spec) apply (rule rec_del_invs') @@ -2711,16 +2712,16 @@ lemmas empty_slot_rvk_prog' = empty_slot_rvk_prog[unfolded o_def] crunch rvk_prog: cancel_ipc "\s. revoke_progress_ord m (\x. option_map cap_to_rpo (caps_of_state s x))" (simp: crunch_simps o_def unless_def is_final_cap_def tcb_cap_cases_def - wp: hoare_drop_imps empty_slot_rvk_prog' select_wp + wp: hoare_drop_imps empty_slot_rvk_prog' thread_set_caps_of_state_trivial) crunch rvk_prog: suspend "\s. revoke_progress_ord m (\x. option_map cap_to_rpo (caps_of_state s x))" (simp: crunch_simps o_def unless_def is_final_cap_def - wp: crunch_wps empty_slot_rvk_prog' select_wp) + wp: crunch_wps empty_slot_rvk_prog') crunch rvk_prog: deleting_irq_handler "\s. revoke_progress_ord m (\x. option_map cap_to_rpo (caps_of_state s x))" (simp: crunch_simps o_def unless_def is_final_cap_def - wp: crunch_wps empty_slot_rvk_prog' select_wp) + wp: crunch_wps empty_slot_rvk_prog') locale CNodeInv_AI_3 = CNodeInv_AI_2 state_ext_t for state_ext_t :: "'state_ext::state_ext itself" + @@ -2829,7 +2830,7 @@ lemma rec_del_emptyable: rec_del args \\rv. emptyable (slot_rdcall args) :: 'state_ext state \ bool\, -" apply (rule validE_validE_R) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule hoare_pre) apply (rule use_spec) apply (rule rec_del_invs') @@ -2847,7 +2848,7 @@ lemma reduce_zombie_cap_to: rec_del (ReduceZombieCall cap slot exp) \\rv (s::'state_ext state). \ exp \ ex_cte_cap_wp_to (\cp. cap_irqs cp = {}) slot s\, -" apply (rule validE_validE_R) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule hoare_pre) apply (rule use_spec) apply (rule rec_del_invs') @@ -2997,7 +2998,7 @@ proof (induct rule: cap_revoke_induct) show ?case apply (subst cap_revoke_simps) apply (wp "1.hyps") - apply (wp x p hoare_drop_imps select_wp)+ + apply (wp x p hoare_drop_imps)+ apply simp_all done qed @@ -3021,7 +3022,7 @@ proof (induct rule: cap_revoke_induct) show ?case apply (subst cap_revoke_simps) apply (wp "1.hyps") - apply (wp x p hoare_drop_imps select_wp)+ + apply (wp x p hoare_drop_imps)+ apply (simp_all add: y) done qed @@ -3366,7 +3367,7 @@ lemma cap_move_valid_arch_caps[wp]: apply (simp add: cap_move_def) apply (rule hoare_pre) apply (subst bind_assoc[symmetric], - rule hoare_seq_ext [rotated], + rule bind_wp_fwd, rule swap_of_caps_valid_arch_caps) apply (wp | simp)+ apply (clarsimp elim!: cte_wp_at_weakenE) diff --git a/proof/invariant-abstract/CSpaceInvPre_AI.thy b/proof/invariant-abstract/CSpaceInvPre_AI.thy index b870061679..3af1af7515 100644 --- a/proof/invariant-abstract/CSpaceInvPre_AI.thy +++ b/proof/invariant-abstract/CSpaceInvPre_AI.thy @@ -24,7 +24,7 @@ lemma set_cap_caps_of_state[wp]: "\\s. P ((caps_of_state s) (ptr \ cap))\ set_cap cap ptr \\rv s. P (caps_of_state s)\" apply (cases ptr) apply (clarsimp simp add: set_cap_def split_def) - apply (rule hoare_seq_ext [OF _ get_object_sp]) + apply (rule bind_wp [OF _ get_object_sp]) apply (case_tac obj; simp_all split del: if_split cong: if_cong bind_cong) apply (wpsimp wp: set_object_wp) apply (fastforce elim!: rsubst[where P=P] diff --git a/proof/invariant-abstract/CSpaceInv_AI.thy b/proof/invariant-abstract/CSpaceInv_AI.thy index b15a6e4758..6d0e8b5a37 100644 --- a/proof/invariant-abstract/CSpaceInv_AI.thy +++ b/proof/invariant-abstract/CSpaceInv_AI.thy @@ -162,12 +162,12 @@ crunch inv [wp]: lookup_cap P lemma cte_at_tcb_update: - "tcb_at t s \ cte_at slot (s\kheap := kheap s(t \ TCB tcb)\) = cte_at slot s" + "tcb_at t s \ cte_at slot (s\kheap := (kheap s)(t \ TCB tcb)\) = cte_at slot s" by (clarsimp simp add: cte_at_cases obj_at_def is_tcb) lemma valid_cap_tcb_update [simp]: - "tcb_at t s \ (s\kheap := kheap s(t \ TCB tcb)\) \ cap = s \ cap" + "tcb_at t s \ (s\kheap := (kheap s)(t \ TCB tcb)\) \ cap = s \ cap" apply (clarsimp simp: is_tcb elim!: obj_atE) apply (subgoal_tac "a_type (TCB tcba) = a_type (TCB tcb)") apply (rule iffI) @@ -181,7 +181,7 @@ lemma valid_cap_tcb_update [simp]: lemma obj_at_tcb_update: "\ tcb_at t s; \x y. P (TCB x) = P (TCB y)\ \ - obj_at P t' (s\kheap := kheap s(t \ TCB tcb)\) = obj_at P t' s" + obj_at P t' (s\kheap := (kheap s)(t \ TCB tcb)\) = obj_at P t' s" apply (simp add: obj_at_def is_tcb_def) apply clarsimp apply (case_tac ko) @@ -191,7 +191,7 @@ lemma obj_at_tcb_update: lemma valid_thread_state_tcb_update: "\ tcb_at t s \ \ - valid_tcb_state ts (s\kheap := kheap s(t \ TCB tcb)\) = valid_tcb_state ts s" + valid_tcb_state ts (s\kheap := (kheap s)(t \ TCB tcb)\) = valid_tcb_state ts s" apply (unfold valid_tcb_state_def) apply (case_tac ts) apply (simp_all add: obj_at_tcb_update is_ep_def is_tcb_def is_ntfn_def) @@ -200,7 +200,7 @@ lemma valid_thread_state_tcb_update: lemma valid_objs_tcb_update: "\tcb_at t s; valid_tcb t tcb s; valid_objs s \ - \ valid_objs (s\kheap := kheap s(t \ TCB tcb)\)" + \ valid_objs (s\kheap := (kheap s)(t \ TCB tcb)\)" apply (clarsimp simp: valid_objs_def dom_def elim!: obj_atE) apply (intro conjI impI) @@ -217,7 +217,7 @@ lemma valid_objs_tcb_update: lemma obj_at_update: - "obj_at P t' (s \kheap := kheap s (t \ v)\) = + "obj_at P t' (s \kheap := (kheap s)(t \ v)\) = (if t = t' then P v else obj_at P t' s)" by (simp add: obj_at_def) @@ -225,7 +225,7 @@ lemma obj_at_update: lemma iflive_tcb_update: "\ if_live_then_nonz_cap s; live (TCB tcb) \ ex_nonz_cap_to t s; obj_at (same_caps (TCB tcb)) t s \ - \ if_live_then_nonz_cap (s\kheap := kheap s(t \ TCB tcb)\)" + \ if_live_then_nonz_cap (s\kheap := (kheap s)(t \ TCB tcb)\)" unfolding fun_upd_def apply (simp add: if_live_then_nonz_cap_def, erule allEI) apply safe @@ -236,7 +236,7 @@ lemma iflive_tcb_update: lemma ifunsafe_tcb_update: "\ if_unsafe_then_cap s; obj_at (same_caps (TCB tcb)) t s \ - \ if_unsafe_then_cap (s\kheap := kheap s(t \ TCB tcb)\)" + \ if_unsafe_then_cap (s\kheap := (kheap s)(t \ TCB tcb)\)" apply (simp add: if_unsafe_then_cap_def, elim allEI) apply (clarsimp dest!: caps_of_state_cteD simp: cte_wp_at_after_update fun_upd_def) @@ -247,7 +247,7 @@ lemma ifunsafe_tcb_update: lemma zombies_tcb_update: "\ zombies_final s; obj_at (same_caps (TCB tcb)) t s \ - \ zombies_final (s\kheap := kheap s(t \ TCB tcb)\)" + \ zombies_final (s\kheap := (kheap s)(t \ TCB tcb)\)" apply (simp add: zombies_final_def is_final_cap'_def2, elim allEI) apply (clarsimp simp: cte_wp_at_after_update fun_upd_def) done @@ -259,14 +259,14 @@ lemma valid_idle_tcb_update: tcb_state t = tcb_state t'; tcb_bound_notification t = tcb_bound_notification t'; tcb_iarch t = tcb_iarch t'; valid_tcb p t' s \ - \ valid_idle (s\kheap := kheap s(p \ TCB t')\)" + \ valid_idle (s\kheap := (kheap s)(p \ TCB t')\)" by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def) lemma valid_reply_caps_tcb_update: "\valid_reply_caps s; ko_at (TCB t) p s; tcb_state t = tcb_state t'; same_caps (TCB t) (TCB t') \ - \ valid_reply_caps (s\kheap := kheap s(p \ TCB t')\)" + \ valid_reply_caps (s\kheap := (kheap s)(p \ TCB t')\)" apply (frule_tac P'="same_caps (TCB t')" in obj_at_weakenE, simp) apply (fastforce simp: valid_reply_caps_def has_reply_cap_def pred_tcb_at_def obj_at_def fun_upd_def @@ -277,13 +277,13 @@ lemma valid_reply_caps_tcb_update: lemma valid_reply_masters_tcb_update: "\valid_reply_masters s; ko_at (TCB t) p s; tcb_state t = tcb_state t'; same_caps (TCB t) (TCB t') \ - \ valid_reply_masters (s\kheap := kheap s(p \ TCB t')\)" + \ valid_reply_masters (s\kheap := (kheap s)(p \ TCB t')\)" by (clarsimp simp: valid_reply_masters_def fun_upd_def is_tcb cte_wp_at_after_update obj_at_def) lemma tcb_state_same_cte_wp_at: "\ ko_at (TCB t) p s; \(getF, v) \ ran tcb_cap_cases. getF t = getF t' \ - \ \P p'. cte_wp_at P p' (s\kheap := kheap s(p \ TCB t')\) + \ \P p'. cte_wp_at P p' (s\kheap := (kheap s)(p \ TCB t')\) = cte_wp_at P p' s" apply (clarsimp simp add: cte_wp_at_cases obj_at_def) apply (case_tac "tcb_cap_cases b") @@ -553,7 +553,7 @@ lemma set_cap_valid_objs: and tcb_cap_valid x p\ set_cap x p \\_. valid_objs\" apply (simp add: set_cap_def split_def) - apply (rule hoare_seq_ext [OF _ get_object_sp]) + apply (rule bind_wp [OF _ get_object_sp]) apply (case_tac obj, simp_all split del: if_split) apply clarsimp apply (wp set_object_valid_objs) @@ -742,7 +742,7 @@ lemma set_cap_pspace: assumes x: "\s f'. f (kheap_update f' s) = f s" shows "\\s. P (f s)\ set_cap p cap \\rv s. P (f s)\" apply (simp add: set_cap_def split_def) - apply (rule hoare_seq_ext [OF _ get_object_sp]) + apply (rule bind_wp [OF _ get_object_sp]) apply (case_tac obj, simp_all split del: if_split cong: if_cong) apply (wpsimp wp: set_object_wp simp: x)+ done @@ -958,7 +958,7 @@ lemma set_cap_zombies: lemma set_cap_obj_at_other: "\\s. P (obj_at P' p s) \ p \ fst p'\ set_cap cap p' \\rv s. P (obj_at P' p s)\" apply (simp add: set_cap_def split_def) - apply (rule hoare_seq_ext [OF _ get_object_inv]) + apply (rule bind_wp [OF _ get_object_inv]) apply (case_tac obj, simp_all split del: if_split) apply (wpsimp wp: set_object_wp simp: obj_at_def)+ done @@ -1468,7 +1468,7 @@ lemma thread_set_mdb: done lemma set_cap_caps_of_state2: - "\\s. P (caps_of_state s (p \ cap)) (cdt s) (is_original_cap s)\ + "\\s. P ((caps_of_state s)(p \ cap)) (cdt s) (is_original_cap s)\ set_cap cap p \\rv s. P (caps_of_state s) (cdt s) (is_original_cap s)\" apply (rule_tac Q="\rv s. \m mr. P (caps_of_state s) m mr @@ -2069,7 +2069,7 @@ lemma cap_insert_obj_at_other: lemma only_idle_tcb_update: "\only_idle s; ko_at (TCB t) p s; tcb_state t = tcb_state t' \ \idle (tcb_state t') \ - \ only_idle (s\kheap := kheap s(p \ TCB t')\)" + \ only_idle (s\kheap := (kheap s)(p \ TCB t')\)" by (clarsimp simp: only_idle_def pred_tcb_at_def obj_at_def) lemma as_user_only_idle : diff --git a/proof/invariant-abstract/CSpace_AI.thy b/proof/invariant-abstract/CSpace_AI.thy index 904ca7a763..f3223533ee 100644 --- a/proof/invariant-abstract/CSpace_AI.thy +++ b/proof/invariant-abstract/CSpace_AI.thy @@ -115,7 +115,7 @@ lemma preemption_point_inv: shows "\irq_state_independent_A P; \f s. P (trans_state f s) = P s\ \ \P\ preemption_point \\_. P\" apply (intro impI conjI | simp add: preemption_point_def o_def - | wp hoare_post_imp[OF _ getActiveIRQ_wp] OR_choiceE_weak_wp alternative_wp[where P=P] + | wp hoare_post_imp[OF _ getActiveIRQ_wp] OR_choiceE_weak_wp | wpc)+ done @@ -172,7 +172,7 @@ proof (induct args arbitrary: s rule: resolve_address_bits'.induct) s \ \P'\ resolve_address_bits' z args \Q\,\\\\" unfolding spec_validE_def apply (fold validE_R_def) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply simp done show ?case @@ -237,7 +237,7 @@ lemma resolve_address_bits_cte_at: "\ valid_objs and valid_cap (fst args) \ resolve_address_bits args \\rv. cte_at (fst rv)\, -" - apply (rule hoare_post_imp_R, rule resolve_address_bits_real_cte_at) + apply (rule hoare_strengthen_postE_R, rule resolve_address_bits_real_cte_at) apply (erule real_cte_at_cte) done @@ -566,9 +566,9 @@ lemma no_True_set_nth: done lemma set_cap_caps_of_state_monad: - "(v, s') \ fst (set_cap cap p s) \ caps_of_state s' = (caps_of_state s (p \ cap))" + "(v, s') \ fst (set_cap cap p s) \ caps_of_state s' = (caps_of_state s)(p \ cap)" apply (drule use_valid) - apply (rule set_cap_caps_of_state [where P="(=) (caps_of_state s (p\cap))"]) + apply (rule set_cap_caps_of_state [where P="(=) ((caps_of_state s)(p\cap))"]) apply (rule refl) apply simp done @@ -1949,15 +1949,15 @@ lemma set_free_index_valid_mdb: proof(intro conjI impI) fix s bits f r dev assume mdb:"untyped_mdb (cdt s) (caps_of_state s)" - assume cstate:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") - show "untyped_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + assume cstate:"caps_of_state s cref = Some (UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") + show "untyped_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (rule untyped_mdb_update_free_index [where capa = ?srccap and m = "caps_of_state s" and src = cref, unfolded free_index_update_def,simplified,THEN iffD2]) apply (simp add:cstate mdb)+ done assume arch_mdb:"valid_arch_mdb (is_original_cap s) (caps_of_state s)" - show "valid_arch_mdb (is_original_cap s) (caps_of_state s(cref \ UntypedCap dev r bits idx))" + show "valid_arch_mdb (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (rule valid_arch_mdb_updates(1)[where capa = ?srccap and m="caps_of_state s" and src=cref, unfolded free_index_update_def, simplified, THEN iffD2]) @@ -1987,7 +1987,7 @@ lemma set_free_index_valid_mdb: done note blah[simp del] = untyped_range.simps usable_untyped_range.simps - show "untyped_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + show "untyped_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using inc cstate apply (unfold untyped_inc_def) apply (intro allI impI) @@ -2023,11 +2023,11 @@ lemma set_free_index_valid_mdb: apply clarsimp+ done assume "ut_revocable (is_original_cap s) (caps_of_state s)" - thus "ut_revocable (is_original_cap s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "ut_revocable (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate by (fastforce simp:ut_revocable_def) assume "reply_caps_mdb (cdt s) (caps_of_state s)" - thus "reply_caps_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "reply_caps_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate apply (simp add:reply_caps_mdb_def del:split_paired_All split_paired_Ex) apply (intro allI impI conjI) @@ -2039,7 +2039,7 @@ lemma set_free_index_valid_mdb: apply fastforce done assume "reply_masters_mdb (cdt s) (caps_of_state s)" - thus "reply_masters_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "reply_masters_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (simp add:reply_masters_mdb_def del:split_paired_All split_paired_Ex) apply (intro allI impI ballI) apply (erule exE) @@ -2051,7 +2051,7 @@ lemma set_free_index_valid_mdb: assume mdb:"mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" and desc_inc:"descendants_inc (cdt s) (caps_of_state s)" and cte:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" - show "descendants_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + show "descendants_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using mdb cte apply (clarsimp simp:swp_def cte_wp_at_caps_of_state) apply (erule descendants_inc_minor[OF desc_inc]) @@ -2147,10 +2147,10 @@ lemma cap_insert_mdb [wp]: apply (rule conjI) apply (simp add: no_mloop_def mdb_insert_abs.parency) apply (intro allI impI conjI) - apply (rule_tac m1 = "caps_of_state s(dest\ cap)" + apply (rule_tac m1 = "(caps_of_state s)(dest\ cap)" and src1 = src in iffD2[OF untyped_mdb_update_free_index,rotated,rotated]) apply (simp add:fun_upd_twist)+ - apply (drule_tac cs' = "caps_of_state s(src \ max_free_index_update capa)" in descendants_inc_minor) + apply (drule_tac cs' = "(caps_of_state s)(src \ max_free_index_update capa)" in descendants_inc_minor) apply (clarsimp simp:cte_wp_at_caps_of_state swp_def) apply clarsimp apply (subst upd_commute) @@ -2175,7 +2175,7 @@ lemma cap_insert_mdb [wp]: apply (clarsimp simp:is_cap_simps free_index_update_def)+ apply (clarsimp simp: reply_master_revocable_def is_derived_def is_master_reply_cap_def is_cap_revocable_def) apply clarsimp - apply (rule_tac m1 = "caps_of_state s(dest\ cap)" + apply (rule_tac m1 = "(caps_of_state s)(dest\ cap)" and src1 = src in reply_mdb_update_free_index[THEN iffD2]) apply ((simp add:fun_upd_twist)+)[3] apply (clarsimp simp:is_cap_simps is_cap_revocable_def) @@ -2200,11 +2200,11 @@ lemma cap_insert_mdb [wp]: apply (erule (1) valid_arch_mdb_updates) apply (clarsimp) apply (intro impI conjI allI) - apply (rule_tac m1 = "caps_of_state s(dest\ cap)" + apply (rule_tac m1 = "(caps_of_state s)(dest\ cap)" and src1 = src in iffD2[OF untyped_mdb_update_free_index,rotated,rotated]) apply (frule mdb_insert_abs_sib.untyped_mdb_sib) apply (simp add:fun_upd_twist)+ - apply (drule_tac cs' = "caps_of_state s(src \ max_free_index_update capa)" in descendants_inc_minor) + apply (drule_tac cs' = "(caps_of_state s)(src \ max_free_index_update capa)" in descendants_inc_minor) apply (clarsimp simp:cte_wp_at_caps_of_state swp_def) apply clarsimp apply (subst upd_commute) @@ -2215,7 +2215,7 @@ lemma cap_insert_mdb [wp]: apply (simp add: no_mloop_def) apply (simp add: mdb_insert_abs_sib.parent_n_eq) apply (simp add: mdb_insert_abs.dest_no_parent_trancl) - apply (rule_tac m = "caps_of_state s(dest\ cap)" and src = src in untyped_inc_update_free_index) + apply (rule_tac m = "(caps_of_state s)(dest\ cap)" and src = src in untyped_inc_update_free_index) apply (simp add:fun_upd_twist)+ apply (frule(3) mdb_insert_abs_sib.untyped_inc) apply (frule_tac p = src in caps_of_state_valid,assumption) @@ -2228,7 +2228,7 @@ lemma cap_insert_mdb [wp]: apply (intro impI conjI) apply (clarsimp simp:is_cap_simps free_index_update_def)+ apply (clarsimp simp: reply_master_revocable_def is_derived_def is_master_reply_cap_def is_cap_revocable_def) - apply (rule_tac m1 = "caps_of_state s(dest\ cap)" + apply (rule_tac m1 = "(caps_of_state s)(dest\ cap)" and src1 = src in iffD2[OF reply_mdb_update_free_index,rotated,rotated]) apply (frule mdb_insert_abs_sib.reply_mdb_sib,simp+) apply (clarsimp simp:ut_revocable_def,case_tac src,clarsimp,simp) @@ -3514,7 +3514,7 @@ lemma set_untyped_cap_as_full_has_reply_cap: set_untyped_cap_as_full src_cap cap src \\rv s. (has_reply_cap t s)\" apply (clarsimp simp:has_reply_cap_def is_reply_cap_to_def) - apply (wp hoare_ex_wp) + apply (wp hoare_vcg_ex_lift) apply (wp set_untyped_cap_as_full_cte_wp_at) apply (clarsimp simp:cte_wp_at_caps_of_state) apply (rule_tac x = a in exI) @@ -4225,7 +4225,7 @@ lemma set_cap_ups_of_heap[wp]: "\\s. P (ups_of_heap (kheap s))\ set_cap cap sl \\_ s. P (ups_of_heap (kheap s))\" apply (simp add: set_cap_def split_def set_object_def) - apply (rule hoare_seq_ext [OF _ get_object_sp]) + apply (rule bind_wp [OF _ get_object_sp]) apply (case_tac obj) by (auto simp: valid_def in_monad obj_at_def get_object_def) @@ -4251,7 +4251,7 @@ lemma set_cap_cns_of_heap[wp]: "\\s. P (cns_of_heap (kheap s))\ set_cap cap sl \\_ s. P (cns_of_heap (kheap s))\" apply (simp add: set_cap_def split_def set_object_def) - apply (rule hoare_seq_ext [OF _ get_object_sp]) + apply (rule bind_wp [OF _ get_object_sp]) apply (case_tac obj) apply (auto simp: valid_def in_monad obj_at_def get_object_def) done diff --git a/proof/invariant-abstract/DetSchedAux_AI.thy b/proof/invariant-abstract/DetSchedAux_AI.thy index 10ab5dba4c..4d45f67751 100644 --- a/proof/invariant-abstract/DetSchedAux_AI.thy +++ b/proof/invariant-abstract/DetSchedAux_AI.thy @@ -313,15 +313,12 @@ lemma invoke_untyped_valid_sched: "\invs and valid_untyped_inv ui and ct_active and valid_sched and valid_idle \ invoke_untyped ui \ \_ . valid_sched \" - including no_pre apply (rule hoare_pre) apply (rule_tac I="invs and valid_untyped_inv ui and ct_active" - in valid_sched_tcb_state_preservation) - apply (wp invoke_untyped_st_tcb_at) - apply simp - apply (wp invoke_untyped_etcb_at)+ - apply (rule hoare_post_impErr, rule hoare_pre, rule invoke_untyp_invs, - simp_all add: invs_valid_idle)[1] + in valid_sched_tcb_state_preservation) + apply (wpsimp wp: invoke_untyped_st_tcb_at invoke_untyped_etcb_at)+ + apply (rule hoare_strengthen_postE, rule invoke_untyp_invs; simp add: invs_valid_idle) + apply simp apply (rule_tac f="\s. P (scheduler_action s)" in hoare_lift_Pf) apply (rule_tac f="\s. x (ready_queues s)" in hoare_lift_Pf) apply wp+ diff --git a/proof/invariant-abstract/DetSchedDomainTime_AI.thy b/proof/invariant-abstract/DetSchedDomainTime_AI.thy index f4e2359308..53d5c92cee 100644 --- a/proof/invariant-abstract/DetSchedDomainTime_AI.thy +++ b/proof/invariant-abstract/DetSchedDomainTime_AI.thy @@ -128,7 +128,7 @@ crunch domain_list_inv[wp]: "\s. P (domain_list s)" crunch domain_list_inv[wp]: finalise_cap "\s. P (domain_list s)" - (wp: crunch_wps hoare_unless_wp select_inv simp: crunch_simps) + (wp: crunch_wps unless_wp select_inv simp: crunch_simps) lemma rec_del_domain_list[wp]: "\\s. P (domain_list s)\ rec_del call \\rv s. P (domain_list s)\" @@ -172,7 +172,7 @@ crunch domain_list_inv[wp]: preemption_point "\s. P (domain_list s)" (wp: OR_choiceE_weak_wp ignore_del: preemption_point) crunch domain_list_inv[wp]: reset_untyped_cap "\s. P (domain_list s)" - (wp: crunch_wps hoare_unless_wp mapME_x_inv_wp select_inv + (wp: crunch_wps unless_wp mapME_x_inv_wp select_inv simp: crunch_simps) context DetSchedDomainTime_AI begin @@ -247,7 +247,6 @@ end section \Preservation of domain time remaining\ crunch domain_time_inv[wp]: do_user_op "(\s. P (domain_time s))" - (wp: select_wp) context DetSchedDomainTime_AI begin @@ -262,14 +261,14 @@ crunch domain_time_inv[wp]: choose_thread "\s. P (domain_time s)" crunch domain_time_inv[wp]: send_signal "\s. P (domain_time s)" - (wp: hoare_drop_imps mapM_x_wp_inv select_wp simp: crunch_simps unless_def) + (wp: hoare_drop_imps mapM_x_wp_inv simp: crunch_simps unless_def) crunch domain_time_inv[wp]: cap_swap_for_delete, empty_slot, get_object, get_cap, tcb_sched_action "\s. P (domain_time s)" crunch domain_time_inv[wp]: finalise_cap "\s. P (domain_time s)" - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) lemma rec_del_domain_time[wp]: @@ -314,7 +313,7 @@ crunch domain_time_inv[wp]: preemption_point "\s. P (domain_time s)" (wp: OR_choiceE_weak_wp ignore_del: preemption_point) crunch domain_time_inv[wp]: reset_untyped_cap "\s. P (domain_time s)" - (wp: crunch_wps hoare_unless_wp mapME_x_inv_wp select_inv + (wp: crunch_wps unless_wp mapME_x_inv_wp select_inv simp: crunch_simps) context DetSchedDomainTime_AI begin @@ -447,7 +446,7 @@ lemma call_kernel_domain_time_inv_det_ext: apply (rule_tac Q="\_ s. 0 < domain_time s \ valid_domain_list s" in hoare_post_imp) apply fastforce apply (wp handle_event_domain_time_inv)+ - apply (rule_tac Q'="\_ s. 0 < domain_time s" in hoare_post_imp_R) + apply (rule_tac Q'="\_ s. 0 < domain_time s" in hoare_strengthen_postE_R) apply (wp handle_event_domain_time_inv) apply fastforce+ done diff --git a/proof/invariant-abstract/DetSchedInvs_AI.thy b/proof/invariant-abstract/DetSchedInvs_AI.thy index e80655d124..5e4395749a 100644 --- a/proof/invariant-abstract/DetSchedInvs_AI.thy +++ b/proof/invariant-abstract/DetSchedInvs_AI.thy @@ -129,6 +129,11 @@ abbreviation valid_blocked_except :: "obj_ref \ det_ext state \ etcb_at' (\t. tcb_domain t = cdom) thread ekh" @@ -281,6 +286,10 @@ lemma valid_queues_lift: apply (wp hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift a) done +lemma valid_sched_valid_queues[elim!]: + "valid_sched s \ valid_queues s" + by (clarsimp simp: valid_sched_def) + lemma typ_at_st_tcb_at_lift: assumes typ_lift: "\P T p. \\s. P (typ_at T p s)\ f \\r s. P (typ_at T p s)\" assumes st_lift: "\P. \st_tcb_at P t\ f \\_. st_tcb_at P t\" @@ -317,7 +326,7 @@ lemma valid_blocked_lift: apply (rule hoare_pre) apply (wps c e d) apply (simp add: valid_blocked_def) - apply (wp hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift static_imp_wp a) + apply (wp hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift hoare_weak_lift_imp a) apply (rule hoare_convert_imp) apply (rule typ_at_st_tcb_at_lift) apply (wp a t)+ @@ -356,7 +365,7 @@ lemma weak_valid_sched_action_lift: shows "\weak_valid_sched_action\ f \\rv. weak_valid_sched_action\" apply (rule hoare_lift_Pf[where f="\s. scheduler_action s", OF _ c]) apply (simp add: weak_valid_sched_action_def) - apply (wp hoare_vcg_all_lift static_imp_wp a) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp a) done lemma switch_in_cur_domain_lift: @@ -367,7 +376,7 @@ lemma switch_in_cur_domain_lift: apply (rule hoare_lift_Pf[where f="\s. scheduler_action s", OF _ b]) apply (rule hoare_lift_Pf[where f="\s. cur_domain s", OF _ c]) apply (simp add: switch_in_cur_domain_def in_cur_domain_def) - apply (wp hoare_vcg_all_lift static_imp_wp a c) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp a c) done lemma valid_sched_action_lift: @@ -382,7 +391,7 @@ lemma valid_sched_action_lift: apply (rule hoare_vcg_conj_lift) apply (rule hoare_lift_Pf[where f="\s. scheduler_action s", OF _ c]) apply (simp add: is_activatable_def) - apply (wp weak_valid_sched_action_lift switch_in_cur_domain_lift static_imp_wp a b c d e)+ + apply (wp weak_valid_sched_action_lift switch_in_cur_domain_lift hoare_weak_lift_imp a b c d e)+ done lemma valid_sched_lift: @@ -401,6 +410,10 @@ lemma valid_sched_lift: valid_sched_action_lift valid_blocked_lift a b c d e f g h i hoare_vcg_conj_lift) done +lemma valid_sched_valid_etcbs[elim!]: + "valid_sched s \ valid_etcbs s" + by (clarsimp simp: valid_sched_def) + lemma valid_etcbs_tcb_etcb: "\ valid_etcbs s; kheap s ptr = Some (TCB tcb) \ \ \etcb. ekheap s ptr = Some etcb" by (force simp: valid_etcbs_def is_etcb_at_def st_tcb_at_def obj_at_def) diff --git a/proof/invariant-abstract/DetSchedSchedule_AI.thy b/proof/invariant-abstract/DetSchedSchedule_AI.thy index 076c14dee9..c3571acfc0 100644 --- a/proof/invariant-abstract/DetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/DetSchedSchedule_AI.thy @@ -573,6 +573,16 @@ lemma set_thread_state_cur_ct_in_cur_domain[wp]: wp set_scheduler_action_wp gts_wp)+ done +lemma set_thread_state_schact_is_rct: + "\schact_is_rct and (\s. ref = cur_thread s \ runnable ts )\ + set_thread_state ref ts + \\_. schact_is_rct\" + unfolding set_thread_state_def set_thread_state_ext_extended.dxo_eq + apply (clarsimp simp: set_thread_state_ext_def) + apply (wpsimp wp: set_object_wp gts_wp simp: set_scheduler_action_def) + apply (clarsimp simp: schact_is_rct_def st_tcb_at_def obj_at_def) + done + lemma set_bound_notification_cur_ct_in_cur_domain[wp]: "\ct_in_cur_domain\ set_bound_notification ref ts \\_. ct_in_cur_domain\" @@ -907,23 +917,19 @@ lemma as_user_valid_sched[wp]: st_tcb_def2 valid_blocked_def) done -lemma switch_to_thread_ct_not_queued[wp]: - "\valid_queues\ switch_to_thread t \\rv s. not_queued (cur_thread s) s\" - apply (simp add: switch_to_thread_def) - including no_pre - apply wp - prefer 4 - apply (rule get_wp) - prefer 3 - apply (rule assert_inv) - prefer 2 - apply (rule arch_switch_to_thread_valid_queues') - apply (simp add: tcb_sched_action_def - tcb_sched_dequeue_def | wp)+ +lemma tcb_sched_action_dequeue_not_queued[wp]: + "\valid_queues\ tcb_sched_action tcb_sched_dequeue t \\_. not_queued t\" + unfolding tcb_sched_action_def tcb_sched_dequeue_def + apply wpsimp apply (clarsimp simp add: valid_queues_def etcb_at_def not_queued_def split: option.splits) done +lemma switch_to_thread_ct_not_queued[wp]: + "\valid_queues\ switch_to_thread t \\rv s. not_queued (cur_thread s) s\" + unfolding switch_to_thread_def + by wpsimp + end lemma ct_not_in_q_def2: @@ -1350,7 +1356,7 @@ lemma append_thread_queued: (* having is_highest_prio match gets_wp makes it very hard to stop and drop imps etc. *) definition - "wrap_is_highest_prio cur_dom target_prio \ NonDetMonad.gets (is_highest_prio cur_dom target_prio)" + "wrap_is_highest_prio cur_dom target_prio \ Nondet_Monad.gets (is_highest_prio cur_dom target_prio)" lemma schedule_choose_new_thread_valid_sched: "\ valid_idle and valid_etcbs and valid_idle_etcb and valid_queues and valid_blocked @@ -1359,7 +1365,7 @@ lemma schedule_choose_new_thread_valid_sched: schedule_choose_new_thread \\_. valid_sched \" unfolding schedule_choose_new_thread_def - apply (wpsimp wp_del: hoare_when_wp + apply (wpsimp wp_del: when_wp wp: set_scheduler_action_rct_valid_sched choose_thread_ct_not_queued choose_thread_ct_activatable choose_thread_cur_dom_or_idle hoare_vcg_disj_lift)+ @@ -1380,7 +1386,7 @@ lemma schedule_valid_sched: tcb_sched_enqueue_cur_ct_in_q) (* switch_thread candidate *) apply (rename_tac candidate) - apply (wp del: hoare_when_wp + apply (wp del: when_wp add: set_scheduler_action_rct_valid_sched schedule_choose_new_thread_valid_sched) apply (rule hoare_vcg_conj_lift) apply (rule_tac t=candidate in set_scheduler_action_cnt_valid_blocked') @@ -1416,7 +1422,7 @@ crunches update_restart_pc crunch ct_not_in_q[wp]: finalise_cap ct_not_in_q - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) end @@ -1503,10 +1509,10 @@ lemma thread_set_not_state_valid_sched: lemma unbind_notification_valid_sched[wp]: "\valid_sched\ unbind_notification ntfnptr \\rv. valid_sched\" apply (simp add: unbind_notification_def) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (case_tac ntfnptra, simp, wp, simp) apply (clarsimp) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (wp set_bound_notification_valid_sched, clarsimp) done @@ -1516,7 +1522,7 @@ crunches update_restart_pc context DetSchedSchedule_AI begin crunch valid_etcbs[wp]: finalise_cap valid_etcbs - (wp: hoare_drop_imps hoare_unless_wp select_inv mapM_x_wp mapM_wp subset_refl + (wp: hoare_drop_imps unless_wp select_inv mapM_x_wp mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: set_object) crunch valid_sched[wp]: cap_swap_for_delete, empty_slot, cap_delete_one valid_sched @@ -1525,7 +1531,7 @@ crunch valid_sched[wp]: cap_swap_for_delete, empty_slot, cap_delete_one valid_sc lemma reply_cancel_ipc_valid_sched[wp]: "\valid_sched\ reply_cancel_ipc tptr \\rv. valid_sched\" apply (simp add: reply_cancel_ipc_def) - apply (wp select_wp hoare_drop_imps thread_set_not_state_valid_sched | simp)+ + apply (wp hoare_drop_imps thread_set_not_state_valid_sched | simp)+ done end @@ -1659,18 +1665,18 @@ crunches update_restart_pc (simp: crunch_simps ignore: set_object) crunch simple_sched_action[wp]: finalise_cap simple_sched_action - (wp: hoare_drop_imps mapM_x_wp mapM_wp select_wp subset_refl + (wp: hoare_drop_imps mapM_x_wp mapM_wp subset_refl simp: unless_def if_fun_split) lemma suspend_valid_sched[wp]: - notes seq_ext_inv = seq_ext[where A=I and B="\_. I" for I] + notes bind_wp_fwd_inv = bind_wp_fwd[where P=I and Q'="\_. I" for I] shows "\valid_sched and simple_sched_action\ suspend t \\rv. valid_sched\" apply (simp add: suspend_def) - apply (rule seq_ext_inv) + apply (rule bind_wp_fwd_inv) apply wpsimp - apply (rule seq_ext_inv) + apply (rule bind_wp_fwd_inv) apply wp - apply (rule seq_ext_inv) + apply (rule bind_wp_fwd_inv) apply wpsimp apply (wp tcb_sched_action_dequeue_strong_valid_sched | simp)+ @@ -1998,7 +2004,7 @@ crunch not_cur_thread[wp]: empty_slot "not_cur_thread thread" (wp: crunch_wps) crunch not_cur_thread[wp]: setup_reply_master, cancel_ipc "not_cur_thread thread" - (wp: hoare_drop_imps select_wp mapM_x_wp simp: unless_def if_fun_split) + (wp: hoare_drop_imps mapM_x_wp simp: unless_def if_fun_split) crunch etcb_at[wp]: setup_reply_master "etcb_at P t" @@ -2131,7 +2137,7 @@ lemma valid_blocked_except_lift: apply (rule hoare_pre) apply (wps c e d) apply (simp add: valid_blocked_except_def) - apply (wp static_imp_wp hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift a) + apply (wp hoare_weak_lift_imp hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift a) apply (rule hoare_convert_imp) apply (rule typ_at_st_tcb_at_lift) apply (wp a t)+ @@ -2170,13 +2176,13 @@ lemmas set_thread_state_active_valid_sched_except_blocked = lemma set_thread_state_runnable_valid_blocked: "\valid_blocked and st_tcb_at runnable ref and (\s. runnable ts)\ set_thread_state ref ts \\_. valid_blocked\" apply (simp add: set_thread_state_def) - apply (rule hoare_seq_ext[OF _ gets_the_get_tcb_sp]) - apply (rule_tac B="\rv. valid_blocked and st_tcb_at runnable ref" in hoare_seq_ext[rotated]) + apply (rule bind_wp[OF _ gets_the_get_tcb_sp]) + apply (rule_tac Q'="\rv. valid_blocked and st_tcb_at runnable ref" in bind_wp_fwd) apply (wp set_object_wp) apply (clarsimp simp: valid_blocked_def not_queued_def runnable_eq_active pred_tcb_at_def st_tcb_at_kh_def obj_at_kh_def obj_at_def) apply (simp add: set_thread_state_ext_def) - apply (rule hoare_seq_ext[OF _ gts_sp]) + apply (rule bind_wp[OF _ gts_sp]) apply (rule_tac S="runnable ts" in hoare_gen_asm_spec) apply (clarsimp simp: pred_tcb_at_def obj_at_def) apply clarsimp @@ -2206,6 +2212,13 @@ end crunch valid_sched[wp]: dec_domain_time valid_sched +lemma thread_set_time_slice_valid_queues[wp]: + "ethread_set (tcb_time_slice_update f) tptr \valid_queues\" + apply (unfold thread_set_time_slice_def ethread_set_def set_eobject_def) + apply wpsimp + apply (fastforce simp: get_etcb_def valid_queues_def is_etcb_at'_def etcb_at'_def) + done + lemma timer_tick_valid_sched[wp]: "\valid_sched\ timer_tick \\rv. valid_sched\" apply (simp add: timer_tick_def crunch_simps thread_set_time_slice_def @@ -2681,7 +2694,7 @@ lemma send_fault_ipc_valid_sched[wp]: \\_. valid_sched\" apply (simp add: send_fault_ipc_def Let_def) apply (wp send_ipc_valid_sched thread_set_not_state_valid_sched thread_set_no_change_tcb_state - hoare_gen_asm'[OF thread_set_tcb_fault_set_invs] hoare_drop_imps hoare_vcg_all_lift_R + hoare_gen_asm'[OF thread_set_tcb_fault_set_invs] hoare_drop_imps hoare_vcg_all_liftE_R ct_in_state_thread_state_lift thread_set_no_change_tcb_state hoare_vcg_disj_lift | wpc | simp | wps)+ @@ -2718,25 +2731,26 @@ lemma handle_double_fault_valid_sched: \\rv. valid_sched\" apply (simp add: valid_sched_def) including no_pre - apply (wp handle_double_fault_valid_queues handle_double_fault_valid_sched_action - set_thread_state_not_runnable_valid_blocked - | rule hoare_conjI | simp add: handle_double_fault_def | fastforce simp: simple_sched_action_def)+ + apply (wpsimp wp: handle_double_fault_valid_queues handle_double_fault_valid_sched_action + set_thread_state_not_runnable_valid_blocked + comb: hoare_weaken_pre + | rule hoare_conjI | simp add: handle_double_fault_def | fastforce simp: simple_sched_action_def)+ done lemma send_fault_ipc_error_sched_act_not[wp]: "\scheduler_act_not t\ send_fault_ipc tptr fault -, \\rv. scheduler_act_not t\" by (simp add: send_fault_ipc_def Let_def | - (wp hoare_drop_imps hoare_vcg_all_lift_R)+ | wpc)+ + (wp hoare_drop_imps hoare_vcg_all_liftE_R)+ | wpc)+ lemma send_fault_ipc_error_cur_thread[wp]: "\\s. P (cur_thread s)\ send_fault_ipc tptr fault -, \\rv s. P (cur_thread s)\" by (simp add: send_fault_ipc_def Let_def | - (wp hoare_drop_imps hoare_vcg_all_lift_R)+ | wpc)+ + (wp hoare_drop_imps hoare_vcg_all_liftE_R)+ | wpc)+ lemma send_fault_ipc_error_not_queued[wp]: "\not_queued t\ send_fault_ipc tptr fault -, \\rv. not_queued t\" by (simp add: send_fault_ipc_def Let_def | - (wp hoare_drop_imps hoare_vcg_all_lift_R)+ | wpc)+ + (wp hoare_drop_imps hoare_vcg_all_liftE_R)+ | wpc)+ context DetSchedSchedule_AI begin lemma handle_fault_valid_sched: @@ -2802,47 +2816,48 @@ lemma receive_ipc_valid_sched: \\rv. valid_sched\" supply option.case_cong_weak[cong] apply (simp add: receive_ipc_def) - including no_pre apply (wp | wpc | simp)+ - apply (wp set_thread_state_sched_act_not_valid_sched | wpc)+ - apply ((wp set_thread_state_sched_act_not_valid_sched - setup_caller_cap_sched_act_not_valid_sched - | simp add: do_nbrecv_failed_transfer_def)+)[2] - apply ((wp possible_switch_to_valid_sched_except sts_st_tcb_at' hoare_drop_imps - set_thread_state_runnable_valid_queues - set_thread_state_runnable_valid_sched_action - set_thread_state_valid_blocked_except | simp | wpc)+)[3] - apply (rule_tac Q="\_. valid_sched and scheduler_act_not (sender) and not_queued (sender) and not_cur_thread (sender) and (\s. sender \ idle_thread s)" in hoare_strengthen_post) - apply wp - apply (simp add: valid_sched_def) - apply ((wp | wpc)+)[1] - apply (simp | wp gts_wp hoare_vcg_all_lift)+ - apply (wp hoare_vcg_imp_lift) - apply ((simp add: set_simple_ko_def set_object_def | - wp hoare_drop_imps | wpc)+)[1] - apply (wp hoare_vcg_imp_lift get_object_wp - set_thread_state_sched_act_not_valid_sched gbn_wp - | simp add: get_simple_ko_def do_nbrecv_failed_transfer_def a_type_def - split: kernel_object.splits - | wpc - | wp (once) hoare_vcg_all_lift hoare_vcg_ex_lift)+ + apply (wp set_thread_state_sched_act_not_valid_sched | wpc)+ + apply ((wp set_thread_state_sched_act_not_valid_sched + setup_caller_cap_sched_act_not_valid_sched + | simp add: do_nbrecv_failed_transfer_def)+)[2] + apply ((wp possible_switch_to_valid_sched_except sts_st_tcb_at' hoare_drop_imps + set_thread_state_runnable_valid_queues + set_thread_state_runnable_valid_sched_action + set_thread_state_valid_blocked_except | simp | wpc)+)[3] + apply (rule_tac Q="\_. valid_sched and scheduler_act_not (sender) and not_queued (sender) + and not_cur_thread (sender) and (\s. sender \ idle_thread s)" + in hoare_strengthen_post) + apply wp + apply (simp add: valid_sched_def) + apply ((wp | wpc)+)[1] + apply (simp | wp gts_wp hoare_vcg_all_lift)+ + apply (wp hoare_vcg_imp_lift) + apply ((simp add: set_simple_ko_def set_object_def + | wp hoare_drop_imps | wpc)+)[1] + apply (wp hoare_vcg_imp_lift get_object_wp + set_thread_state_sched_act_not_valid_sched gbn_wp + | simp add: get_simple_ko_def do_nbrecv_failed_transfer_def a_type_def + split: kernel_object.splits + | wpc + | wp (once) hoare_vcg_all_lift hoare_vcg_ex_lift)+ apply (subst st_tcb_at_kh_simp[symmetric])+ apply (clarsimp simp: st_tcb_at_kh_if_split default_notification_def default_ntfn_def isActive_def) - apply (rename_tac xh xi xj) + apply (rename_tac ref b R ntfn xh xi xj) apply (drule_tac t="hd xh" and P'="\ts. \ active ts" in st_tcb_weakenE) apply clarsimp apply (simp only: st_tcb_at_not) apply (subgoal_tac "hd xh \ idle_thread s") apply (fastforce simp: valid_sched_def valid_sched_action_def weak_valid_sched_action_def valid_queues_def st_tcb_at_not ct_in_state_def not_cur_thread_def runnable_eq_active not_queued_def scheduler_act_not_def split: scheduler_action.splits) -(* clag from send_signal_valid_sched *) + (* clag from send_signal_valid_sched *) apply clarsimp apply (frule invs_valid_idle) - apply (drule_tac ptr=xc in idle_not_queued) + apply (drule_tac ptr=ref in idle_not_queued) apply (clarsimp simp: invs_sym_refs) apply (simp add: state_refs_of_def obj_at_def) apply (frule invs_valid_objs) apply (simp add: valid_objs_def obj_at_def) - apply (drule_tac x = xc in bspec) + apply (drule_tac x = ref in bspec) apply (simp add: dom_def) apply (clarsimp simp: valid_obj_def valid_ntfn_def) apply (drule hd_in_set) @@ -2961,7 +2976,7 @@ lemma unbind_maybe_notification_sym_refs[wp]: unbind_maybe_notification a \\rv s. sym_refs (state_refs_of s)\" apply (simp add: unbind_maybe_notification_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp | wpc | clarsimp)+ apply (rule conjI) @@ -3081,7 +3096,7 @@ lemma handle_recv_valid_sched: apply (wpsimp simp: lookup_cap_def lookup_slot_for_thread_def) apply (wp resolve_address_bits_valid_fault2)+ apply (simp add: valid_fault_def) - apply (wp hoare_drop_imps hoare_vcg_all_lift_R) + apply (wp hoare_drop_imps hoare_vcg_all_liftE_R) apply (wpsimp wp: delete_caller_cap_not_queued | strengthen invs_valid_tcb_ctable_strengthen)+ apply (auto simp: ct_in_state_def tcb_at_invs objs_valid_tcb_ctable invs_valid_objs) done @@ -3192,7 +3207,7 @@ lemma invoke_domain_valid_sched[wp]: ethread_set_valid_blocked ethread_set_ssa_valid_sched_action ethread_set_not_cur_ct_in_cur_domain ethread_set_not_idle_valid_sched ethread_set_not_idle_valid_idle_etcb) - apply (wp static_imp_wp static_imp_conj_wp tcb_dequeue_not_queued tcb_sched_action_dequeue_valid_blocked_except) + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj tcb_dequeue_not_queued tcb_sched_action_dequeue_valid_blocked_except) apply simp apply (wp hoare_vcg_disj_lift) apply (rule_tac Q="\_. valid_sched and not_queued t and valid_idle and (\s. t \ idle_thread s)" in hoare_strengthen_post) @@ -3251,13 +3266,13 @@ lemma handle_invocation_valid_sched: apply (wp set_thread_state_runnable_valid_sched)[1] apply wp+ apply (wp gts_wp hoare_vcg_all_lift) - apply (rule_tac Q="\_. valid_sched" and E="\_. valid_sched" in hoare_post_impErr) + apply (rule_tac Q="\_. valid_sched" and E="\_. valid_sched" in hoare_strengthen_postE) apply wp apply ((clarsimp simp: st_tcb_at_def obj_at_def)+)[2] apply (wp ct_in_state_set set_thread_state_runnable_valid_sched | simp add: split_def if_apply_def2 split del: if_split)+ apply (simp add: validE_E_def) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule lookup_cap_and_slot_valid_fault) apply (wp | simp)+ apply (auto simp: ct_in_state_def valid_sched_def ct_not_in_q_def valid_queues_def not_queued_def runnable_eq_active elim: st_tcb_ex_cap) @@ -3350,8 +3365,8 @@ lemma do_reply_transfer_add_assert: apply (wp a) apply simp apply (simp add: do_reply_transfer_def) - apply (rule hoare_seq_ext) - apply (rule hoare_seq_ext) + apply (rule bind_wp) + apply (rule bind_wp) prefer 2 apply (rule assert_false) apply simp @@ -3461,8 +3476,8 @@ crunch valid_list[wp]: schedule_choose_new_thread valid_list lemma schedule_valid_list[wp]: "\valid_list\ Schedule_A.schedule \\_. valid_list\" apply (simp add: Schedule_A.schedule_def) - apply (wp add: tcb_sched_action_valid_list alternative_wp select_wp gts_wp hoare_drop_imps - del: ethread_get_wp + apply (wp add: tcb_sched_action_valid_list gts_wp hoare_drop_imps + del: ethread_get_wp | wpc | simp)+ done @@ -3484,7 +3499,7 @@ lemma call_kernel_valid_sched: apply (wpsimp wp: getActiveIRQ_neq_non_kernel) apply auto[1] apply (rule_tac Q="\rv. valid_sched and invs" and - E="\rv. valid_sched and invs" in hoare_post_impErr) + E="\rv. valid_sched and invs" in hoare_strengthen_postE) apply (rule valid_validE) apply (wp handle_event_valid_sched) apply (force intro: active_from_running)+ diff --git a/proof/invariant-abstract/Deterministic_AI.thy b/proof/invariant-abstract/Deterministic_AI.thy index f127c88ad2..4eaad8a36b 100644 --- a/proof/invariant-abstract/Deterministic_AI.thy +++ b/proof/invariant-abstract/Deterministic_AI.thy @@ -41,10 +41,8 @@ declare dxo_wp_weak[wp del] (*Some nasty hackery to get around lack of polymorphic type class operations*) -lemma and_assoc: "(A and (B and C)) = (A and B and C)" - apply (rule ext) - apply simp -done +lemma and_assoc: "(A and (B and C)) = (A and B and C)" (* FIXME: eliminate *) + by (simp add: pred_conj_aci) lemma no_children_empty_desc: "(\c. m c \ Some slot) = (descendants_of slot m = {})" @@ -1476,7 +1474,7 @@ end crunch exst[wp]: set_cap "(\s. P (exst s))" (wp: crunch_wps simp: crunch_simps) lemma set_cap_caps_of_state3: - "\\s. P (caps_of_state s (p \ cap)) (cdt s) (exst s) (is_original_cap s)\ + "\\s. P ((caps_of_state s) (p \ cap)) (cdt s) (exst s) (is_original_cap s)\ set_cap cap p \\rv s. P (caps_of_state s) (cdt s) (exst s) (is_original_cap s)\" apply (rule_tac Q="\rv s. \m mr t. P (caps_of_state s) m t mr @@ -3124,7 +3122,7 @@ lemma empty_slot_valid_list[wp]: apply (simp add: empty_slot_def) apply (simp add: set_cdt_def update_cdt_list_def set_cdt_list_def empty_slot_ext_def bind_assoc cong: if_cong) - apply (wp get_cap_wp static_imp_wp | wpc | wp (once) hoare_vcg_all_lift)+ + apply (wp get_cap_wp hoare_weak_lift_imp | wpc | wp (once) hoare_vcg_all_lift)+ apply (clarsimp simp del: fun_upd_apply) apply (frule mdb_empty_abs_simple.intro) apply(case_tac "cdt s sl") @@ -3841,7 +3839,7 @@ crunch valid_list[wp]: thread_set valid_list lemma reply_cancel_ipc_valid_list[wp]: "\valid_list\ reply_cancel_ipc a \\_. valid_list\" unfolding reply_cancel_ipc_def - by (wp select_wp hoare_drop_imps thread_set_mdb | simp)+ + by (wp hoare_drop_imps thread_set_mdb | simp)+ crunch all_but_exst[wp]: update_work_units "all_but_exst P" @@ -3856,7 +3854,7 @@ global_interpretation reset_work_units_ext_extended: is_extended "reset_work_uni lemma preemption_point_inv': "\irq_state_independent_A P; \f s. P (work_units_completed_update f s) = P s\ \ \P\ preemption_point \\_. P\" apply (intro impI conjI | simp add: preemption_point_def o_def - | wp hoare_post_imp[OF _ getActiveIRQ_wp] OR_choiceE_weak_wp alternative_wp[where P=P] + | wp hoare_post_imp[OF _ getActiveIRQ_wp] OR_choiceE_weak_wp | wpc | simp add: update_work_units_def reset_work_units_def)+ done diff --git a/proof/invariant-abstract/Detype_AI.thy b/proof/invariant-abstract/Detype_AI.thy index c8149126ee..0e61737e84 100644 --- a/proof/invariant-abstract/Detype_AI.thy +++ b/proof/invariant-abstract/Detype_AI.thy @@ -1065,10 +1065,10 @@ lemma corres_submonad2: OF _ _ gets_sp gets_sp]) apply clarsimp apply (rule corres_underlying_split [where r'="\(x, x') (y, y'). rvr x y \ (x', y') \ ssr", - OF _ _ hoare_post_taut hoare_post_taut]) + OF _ _ hoare_TrueI hoare_TrueI]) defer apply clarsimp - apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_post_taut hoare_post_taut]) + apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_TrueI hoare_TrueI]) apply (simp add: corres_modify') apply clarsimp apply (simp add: corres_underlying_def select_f_def) @@ -1093,10 +1093,10 @@ lemma corres_submonad3: OF _ _ gets_sp gets_sp]) apply clarsimp apply (rule corres_underlying_split [where r'="\(x, x') (y, y'). rvr x y \ (x', y') \ ssr", - OF _ _ hoare_post_taut hoare_post_taut]) + OF _ _ hoare_TrueI hoare_TrueI]) defer apply clarsimp - apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_post_taut hoare_post_taut]) + apply (rule corres_underlying_split [where r'=dc, OF _ _ hoare_TrueI hoare_TrueI]) apply (simp add: corres_modify') apply clarsimp apply (simp add: corres_underlying_def select_f_def) diff --git a/proof/invariant-abstract/EmptyFail_AI.thy b/proof/invariant-abstract/EmptyFail_AI.thy index 526697bd94..1aa82991c9 100644 --- a/proof/invariant-abstract/EmptyFail_AI.thy +++ b/proof/invariant-abstract/EmptyFail_AI.thy @@ -13,61 +13,7 @@ requalify_facts ef_machine_op_lift end -lemmas [wp] = empty_fail_bind empty_fail_bindE empty_fail_get empty_fail_modify - empty_fail_whenEs empty_fail_when empty_fail_gets empty_fail_assertE - empty_fail_error_bits empty_fail_mapM_x empty_fail_mapM empty_fail_sequence_x - ef_ignore_failure ef_machine_op_lift -lemmas empty_fail_error_bits[simp] - -lemma sequence_empty_fail[wp]: - "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequence ms)" - apply (induct ms) - apply (simp add: sequence_def | wp)+ - done - -lemma sequenceE_empty_fail[wp]: - "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE ms)" - apply (induct ms) - apply (simp add: sequenceE_def | wp)+ - done - -lemma sequenceE_x_empty_fail[wp]: - "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE_x ms)" - apply (induct ms) - apply (simp add: sequenceE_x_def | wp)+ - done - -lemma mapME_empty_fail[wp]: - "(\x. empty_fail (m x)) \ empty_fail (mapME m xs)" - by (clarsimp simp: mapME_def image_def | wp)+ - -lemma mapME_x_empty_fail[wp]: - "(\x. empty_fail (f x)) \ empty_fail (mapME_x f xs)" - by (clarsimp simp: mapME_x_def | wp)+ - -lemma filterM_empty_fail[wp]: - "(\m. m \ set ms \ empty_fail (P m)) \ empty_fail (filterM P ms)" - apply (induct ms) - apply (simp | wp)+ - done - -lemma zipWithM_x_empty_fail[wp]: - "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM_x f xs ys)" - by (clarsimp simp: zipWithM_x_def zipWith_def | wp)+ - -lemma zipWithM_empty_fail[wp]: - "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM f xs ys)" - by (clarsimp simp: zipWithM_def zipWith_def | wp)+ - -lemma handle'_empty_fail[wp]: - "\empty_fail f; \e. empty_fail (handler e)\ \ empty_fail (f handler)" - apply (simp add: handleE'_def | wp)+ - apply (case_tac x, simp_all) - done - -lemma handle_empty_fail[wp]: - "\empty_fail f; \e. empty_fail (handler e)\ \ empty_fail (f handler)" - by (simp add: handleE_def | wp)+ +lemmas [wp] = ef_ignore_failure ef_machine_op_lift lemma lookup_error_on_failure_empty_fail[wp]: "empty_fail f \ empty_fail (lookup_error_on_failure a f)" @@ -81,31 +27,10 @@ lemma unify_failure_empty_fail[wp]: "empty_fail f \ empty_fail (unify_failure f)" by (simp add: unify_failure_def | wp)+ -lemma if_split_empty_fail[wp]: - "\P \ empty_fail f; \ P \ empty_fail g\ \ empty_fail (if P then f else g)" - by simp - lemma const_on_failure_empty_fail[wp]: "empty_fail f \ empty_fail (const_on_failure a f)" by (simp add: const_on_failure_def catch_def split: sum.splits | wp)+ -lemma liftME_empty_fail[simp]: - "empty_fail (liftME f m) = empty_fail m" - apply (simp add: liftME_def) - apply (rule iffI) - apply (simp add: bindE_def) - apply (drule empty_fail_bindD1) - apply (simp | wp)+ - done - -lemma select_empty_fail[wp]: - "S \ {} \ empty_fail (select S)" - by (simp add: empty_fail_def select_def) - -lemma select_f_empty_fail[wp]: - "(fst S = {} \ snd S) \ empty_fail (select_f S)" - by (simp add: select_f_def empty_fail_def) - lemma select_ext_empty_fail: "S \ {} \ empty_fail (select_ext a S)" by (simp add: select_ext_def | wp)+ @@ -120,7 +45,7 @@ lemma do_machine_op_empty_fail[wp]: "empty_fail f \ empty_fail (do_machine_op f)" apply (simp add: do_machine_op_def | wp)+ apply (simp add: empty_fail_def) - apply (simp add: split_def) + apply (simp add: split_def empty_fail_cond) done lemma throw_on_false_empty_fail[wp]: @@ -131,13 +56,9 @@ lemma without_preemption_empty_fail[wp]: "empty_fail f \ empty_fail (without_preemption f)" by simp -lemma put_empty_fail[wp]: - "empty_fail (put f)" - by (simp add: put_def empty_fail_def) - crunch_ignore (empty_fail) - (add: NonDetMonad.bind bindE lift liftE liftM "when" whenE unless unlessE return fail - assert_opt mapM mapM_x sequence_x catch handleE do_extended_op + (add: Nondet_Monad.bind bindE lift liftE liftM "when" whenE unless unlessE return fail + assert_opt mapM mapM_x sequence_x catch handleE do_extended_op returnOk throwError cap_insert_ext empty_slot_ext create_cap_ext cap_swap_ext cap_move_ext reschedule_required possible_switch_to set_thread_state_ext OR_choice OR_choiceE timer_tick getRegister lookup_error_on_failure @@ -146,7 +67,7 @@ crunch_ignore (empty_fail) decode_tcb_invocation without_preemption as_user syscall cap_fault_on_failure check_cap_at zipWithM filterM) -crunch (empty_fail) empty_fail[wp]: set_object, gets_the, get_cap +crunch (empty_fail) empty_fail[wp]: set_object, get_cap (simp: split_def kernel_object.splits) lemma check_cap_at_empty_fail[wp]: @@ -269,7 +190,8 @@ context EmptyFail_AI_derive_cap begin lemma decode_cnode_invocation_empty_fail[wp]: "\a b c d. empty_fail (decode_cnode_invocation a b c d :: (cnode_invocation, 'state_ext) se_monad)" - by (simp add: decode_cnode_invocation_def split: invocation_label.splits list.splits | wp | wpc | intro impI conjI allI)+ + unfolding decode_cnode_invocation_def + by (wp | wpc | intro impI conjI allI)+ end @@ -313,7 +235,7 @@ context EmptyFail_AI_rec_del begin lemma rec_del_spec_empty_fail: fixes call and s :: "'state_ext state" shows "spec_empty_fail (rec_del call) s" -proof (induct rule: rec_del.induct, simp_all only: drop_spec_empty_fail[OF empty_fail] rec_del_fails) +proof (induct rule: rec_del.induct, simp_all only: drop_spec_empty_fail[OF empty_fail_fail] rec_del_fails) case (1 slot exposed s) show ?case apply (subst rec_del.simps) diff --git a/proof/invariant-abstract/Finalise_AI.thy b/proof/invariant-abstract/Finalise_AI.thy index bef35e5619..717e09a5eb 100644 --- a/proof/invariant-abstract/Finalise_AI.thy +++ b/proof/invariant-abstract/Finalise_AI.thy @@ -465,7 +465,7 @@ lemma cap_delete_one_caps_of_state: \\rv s. P (caps_of_state s)\" apply (simp add: cap_delete_one_def unless_def is_final_cap_def) - apply (rule hoare_seq_ext [OF _ get_cap_sp]) + apply (rule bind_wp [OF _ get_cap_sp]) apply (case_tac "can_fast_finalise cap") apply (wp empty_slot_caps_of_state get_cap_wp) apply (clarsimp simp: cte_wp_at_caps_of_state @@ -487,7 +487,7 @@ lemma cancel_ipc_caps_of_state: \\rv s. P (caps_of_state s)\" apply (simp add: cancel_ipc_def reply_cancel_ipc_def cong: Structures_A.thread_state.case_cong) - apply (wpsimp wp: cap_delete_one_caps_of_state select_wp) + apply (wpsimp wp: cap_delete_one_caps_of_state) apply (rule_tac Q="\_ s. (\p. cte_wp_at can_fast_finalise p s \ P ((caps_of_state s) (p \ cap.NullCap))) \ P (caps_of_state s)" @@ -626,7 +626,7 @@ lemma tcb_st_refs_no_TCBBound: lemma (in Finalise_AI_1) unbind_maybe_notification_invs: "\invs\ unbind_maybe_notification ntfnptr \\rv. invs\" apply (simp add: unbind_maybe_notification_def invs_def valid_state_def valid_pspace_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wpsimp wp: valid_irq_node_typ set_simple_ko_valid_objs valid_ioports_lift) apply simp @@ -731,7 +731,7 @@ lemma unbind_notification_not_bound: \\_. obj_at (\ko. \ntfn. ko = Notification ntfn \ ntfn_bound_tcb ntfn = None) ntfnptr\" apply (simp add: unbind_notification_def) apply (rule hoare_pre) - apply (rule hoare_seq_ext[OF _ gbn_wp[where P="\ptr _. ptr = (Some ntfnptr)"]]) + apply (rule bind_wp[OF _ gbn_wp[where P="\ptr _. ptr = (Some ntfnptr)"]]) apply (rule hoare_gen_asm[where P'=\, simplified]) apply (wp sbn_obj_at_impossible simple_obj_set_prop_at | wpc | simp)+ apply (clarsimp simp: obj_at_def) @@ -871,7 +871,7 @@ lemma unbind_maybe_notification_emptyable[wp]: lemma cancel_all_signals_emptyable[wp]: "\invs and emptyable sl\ cancel_all_signals ptr \\_. emptyable sl\" unfolding cancel_all_signals_def unbind_maybe_notification_def - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp cancel_all_emptyable_helper hoare_vcg_const_Ball_lift @@ -883,7 +883,7 @@ lemma cancel_all_signals_emptyable[wp]: lemma cancel_all_ipc_emptyable[wp]: "\invs and emptyable sl\ cancel_all_ipc ptr \\_. emptyable sl\" apply (simp add: cancel_all_ipc_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac ep, simp_all) apply (wp, simp) apply (wp cancel_all_emptyable_helper hoare_vcg_const_Ball_lift @@ -935,7 +935,7 @@ lemma cap_delete_one_deletes_reply: split: if_split_asm elim!: allEI) apply (rule hoare_vcg_all_lift) apply simp - apply (wp static_imp_wp empty_slot_deletes empty_slot_caps_of_state get_cap_wp)+ + apply (wp hoare_weak_lift_imp empty_slot_deletes empty_slot_caps_of_state get_cap_wp)+ apply (fastforce simp: cte_wp_at_caps_of_state valid_reply_caps_def is_cap_simps unique_reply_caps_def is_reply_cap_to_def simp del: split_paired_All) @@ -946,7 +946,7 @@ lemma cap_delete_one_reply_st_tcb_at: cap_delete_one slot \\rv. pred_tcb_at proj P t\" apply (simp add: cap_delete_one_def unless_def is_final_cap_def) - apply (rule hoare_seq_ext [OF _ get_cap_sp]) + apply (rule bind_wp [OF _ get_cap_sp]) apply (rule hoare_assume_pre) apply (clarsimp simp: cte_wp_at_caps_of_state when_def is_reply_cap_to_def) apply wpsimp @@ -1019,10 +1019,10 @@ locale Finalise_AI_3 = Finalise_AI_2 a b crunches suspend, unbind_maybe_notification, unbind_notification for irq_node[wp]: "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_node[wp]: deleting_irq_handler "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) lemmas cancel_all_ipc_cte_irq_node[wp] = hoare_use_eq_irq_node [OF cancel_all_ipc_irq_node cancel_all_ipc_cte_wp_at] diff --git a/proof/invariant-abstract/Include_AI.thy b/proof/invariant-abstract/Include_AI.thy index 15b0164b2a..55f8b63534 100644 --- a/proof/invariant-abstract/Include_AI.thy +++ b/proof/invariant-abstract/Include_AI.thy @@ -6,14 +6,17 @@ theory Include_AI imports + Lib.Lib ArchCrunchSetup_AI - "Lib.Eisbach_WP" - "ASpec.Syscall_A" - "Lib.LemmaBucket" - "Lib.ListLibLemmas" - "Lib.LemmaBucket" - "Lib.SplitRule" - "Rights_AI" + Monads.Eisbach_WP + Monads.Nondet_Strengthen_Setup + ASpec.Syscall_A + Lib.LemmaBucket + Lib.ListLibLemmas + Lib.LemmaBucket + Lib.SplitRule + Rights_AI + Lib.MonadicRewrite begin no_notation bind_drop (infixl ">>" 60) @@ -22,7 +25,7 @@ unbundle l4v_word_context (* Clagged from Bits_R *) -crunch_ignore (add: NonDetMonad.bind return "when" get gets fail assert put modify +crunch_ignore (add: Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE liftE whenE unlessE throw_opt assertE liftM liftME sequence_x zipWithM_x mapM_x sequence mapM sequenceE_x sequenceE mapME mapME_x diff --git a/proof/invariant-abstract/Interrupt_AI.thy b/proof/invariant-abstract/Interrupt_AI.thy index bf17146963..ea6993430c 100644 --- a/proof/invariant-abstract/Interrupt_AI.thy +++ b/proof/invariant-abstract/Interrupt_AI.thy @@ -236,7 +236,7 @@ lemma IRQHandler_valid: lemmas (in Interrupt_AI) invoke_irq_handler_invs[wp] = invoke_irq_handler_invs'[where ex_inv=\ - , simplified hoare_post_taut + , simplified hoare_TrueI , OF TrueI TrueI TrueI , simplified ] @@ -256,7 +256,7 @@ lemma cancel_ipc_noreply_interrupt_states: lemma send_signal_interrupt_states[wp_unsafe]: "\\s. P (interrupt_states s) \ valid_objs s\ send_signal a b \\_ s. P (interrupt_states s)\" apply (simp add: send_signal_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp cancel_ipc_noreply_interrupt_states gts_wp hoare_vcg_all_lift thread_get_wp | wpc | simp)+ apply (clarsimp) diff --git a/proof/invariant-abstract/Invariants_AI.thy b/proof/invariant-abstract/Invariants_AI.thy index d88fe6c45f..09fbb5a5d3 100644 --- a/proof/invariant-abstract/Invariants_AI.thy +++ b/proof/invariant-abstract/Invariants_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -1278,6 +1279,10 @@ lemma valid_objsE [elim]: "\ valid_objs s; kheap s x = Some obj; valid_obj x obj s \ R \ \ R" unfolding valid_objs_def by (auto simp: dom_def) +lemma valid_obj_arch_valid_obj: + "valid_obj p (ArchObj ao) s = arch_valid_obj ao s" + by (simp add: valid_obj_def) + lemma obj_at_ko_at: "obj_at P p s \ \ko. ko_at ko p s \ P ko" @@ -2299,7 +2304,7 @@ lemma valid_tcb_state_typ: assumes P: "\T p. \typ_at T p\ f \\rv. typ_at T p\" shows "\\s. valid_tcb_state st s\ f \\rv s. valid_tcb_state st s\" by (case_tac st, - simp_all add: valid_tcb_state_def hoare_post_taut + simp_all add: valid_tcb_state_def hoare_TrueI ep_at_typ P tcb_at_typ ntfn_at_typ) lemma ntfn_at_typ_at: @@ -2327,7 +2332,7 @@ lemma valid_ep_typ: assumes P: "\p. \typ_at ATCB p\ f \\rv. typ_at ATCB p\" shows "\\s. valid_ep ep s\ f \\rv s. valid_ep ep s\" apply (case_tac ep, - simp_all add: valid_ep_def hoare_post_taut tcb_at_typ) + simp_all add: valid_ep_def hoare_TrueI tcb_at_typ) apply (rule hoare_vcg_conj_lift [OF hoare_vcg_prop]) apply (rule hoare_vcg_conj_lift [OF _ hoare_vcg_prop]) apply (rule hoare_vcg_const_Ball_lift [OF P]) @@ -2340,14 +2345,14 @@ lemma valid_ntfn_typ: assumes P: "\p. \typ_at ATCB p\ f \\rv. typ_at ATCB p\" shows "\\s. valid_ntfn ntfn s\ f \\rv s. valid_ntfn ntfn s\" apply (case_tac "ntfn_obj ntfn", - simp_all add: valid_ntfn_def valid_bound_tcb_def hoare_post_taut tcb_at_typ) + simp_all add: valid_ntfn_def valid_bound_tcb_def hoare_TrueI tcb_at_typ) defer 2 - apply ((case_tac "ntfn_bound_tcb ntfn", simp_all add: hoare_post_taut tcb_at_typ P)+)[2] + apply ((case_tac "ntfn_bound_tcb ntfn", simp_all add: hoare_TrueI tcb_at_typ P)+)[2] apply (rule hoare_vcg_conj_lift [OF hoare_vcg_prop])+ apply (rule hoare_vcg_conj_lift) apply (rule hoare_vcg_const_Ball_lift [OF P]) apply (rule hoare_vcg_conj_lift [OF hoare_vcg_prop]) - apply (case_tac "ntfn_bound_tcb ntfn", simp_all add: hoare_post_taut tcb_at_typ P) + apply (case_tac "ntfn_bound_tcb ntfn", simp_all add: hoare_TrueI tcb_at_typ P) apply (rule hoare_vcg_conj_lift [OF hoare_vcg_prop], simp add: P) done @@ -3087,8 +3092,7 @@ lemma real_cte_at_typ_valid: lemma dmo_aligned[wp]: "do_machine_op f \pspace_aligned\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) - apply (clarsimp simp: pspace_aligned_def) + apply wpsimp done lemma cte_wp_at_eqD2: @@ -3459,6 +3463,10 @@ lemma valid_mask_vm_rights[simp]: "mask_vm_rights V R \ valid_vm_rights" by (simp add: mask_vm_rights_def) +lemma invs_pspace_in_kernel_window[elim!]: + "invs s \ pspace_in_kernel_window s" + by (simp add: invs_def valid_state_def) + lemmas invs_implies = invs_equal_kernel_mappings invs_arch_state @@ -3484,5 +3492,16 @@ lemmas invs_implies = invs_hyp_sym_refs invs_sym_refs tcb_at_invs + invs_pspace_in_kernel_window + +(* Pull invs out of a complex goal and prove it only once. Use as (strengthen invs_strengthen)+, + best in combination with simp and potentially conj_cong. *) +lemma invs_strengthen: + "invs s \ P s \ invs s" + "invs s \ (P s \ Q s) \ P s \ invs s \ Q s" + "invs s \ (P s \ Q s) \ P s \ Q s \ invs s" + "invs s \ (P s \ Q s) \ P s \ (invs and Q) s" + "invs s \ (P s \ Q s) \ P s \ (Q and invs) s" + by auto end diff --git a/proof/invariant-abstract/IpcCancel_AI.thy b/proof/invariant-abstract/IpcCancel_AI.thy index 9261e1ab21..cbfb48803d 100644 --- a/proof/invariant-abstract/IpcCancel_AI.thy +++ b/proof/invariant-abstract/IpcCancel_AI.thy @@ -52,7 +52,7 @@ lemma cancel_all_ipc_valid_objs: "\valid_objs and (\s. sym_refs (state_refs_of s))\ cancel_all_ipc ptr \\_. valid_objs\" apply (simp add: cancel_all_ipc_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac ep, simp_all add: get_ep_queue_def) apply (wp, simp) apply (wp cancel_all_helper hoare_vcg_const_Ball_lift @@ -86,7 +86,7 @@ lemma cancel_all_signals_valid_objs: "\valid_objs and (\s. sym_refs (state_refs_of s))\ cancel_all_signals ptr \\rv. valid_objs\" apply (simp add: cancel_all_signals_def unbind_maybe_notification_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp unbind_notification_valid_objs | wpc | simp_all add:unbind_maybe_notification_def)+ apply (wp cancel_all_helper hoare_vcg_const_Ball_lift @@ -154,7 +154,7 @@ lemma blocked_ipc_st_tcb_at_general: blocked_cancel_ipc st t \\rv. st_tcb_at P t'\" apply (simp add: blocked_cancel_ipc_def) - apply (wp sts_st_tcb_at_cases static_imp_wp, simp+) + apply (wp sts_st_tcb_at_cases hoare_weak_lift_imp, simp+) done @@ -163,7 +163,7 @@ lemma cancel_signal_st_tcb_at_general: cancel_signal t ntfn \\rv. st_tcb_at P t'\" apply (simp add: cancel_signal_def) - apply (wp sts_st_tcb_at_cases ntfn_cases_weak_wp static_imp_wp) + apply (wp sts_st_tcb_at_cases ntfn_cases_weak_wp hoare_weak_lift_imp) apply simp done @@ -196,13 +196,13 @@ lemma update_restart_pc_has_reply_cap[wp]: done crunch st_tcb_at_simple[wp]: reply_cancel_ipc "st_tcb_at simple t" - (wp: crunch_wps select_wp sts_st_tcb_at_cases thread_set_no_change_tcb_state + (wp: crunch_wps sts_st_tcb_at_cases thread_set_no_change_tcb_state simp: crunch_simps unless_def) lemma cancel_ipc_simple [wp]: "\\\ cancel_ipc t \\rv. st_tcb_at simple t\" apply (simp add: cancel_ipc_def) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ gts_sp]) apply (case_tac state, simp_all) apply (wp hoare_strengthen_post [OF blocked_cancel_ipc_simple] hoare_strengthen_post [OF cancel_signal_simple] @@ -230,7 +230,7 @@ context IpcCancel_AI begin crunch typ_at[wp]: cancel_ipc, reply_cancel_ipc, unbind_maybe_notification "\(s :: 'a state). P (typ_at T p s)" - (wp: crunch_wps hoare_vcg_if_splitE select_wp + (wp: crunch_wps hoare_vcg_if_splitE simp: crunch_simps unless_def) lemma cancel_ipc_tcb [wp]: @@ -349,9 +349,9 @@ lemma refs_in_ntfn_bound_refs: lemma blocked_cancel_ipc_invs: "\invs and st_tcb_at ((=) st) t\ blocked_cancel_ipc st t \\rv. invs\" apply (simp add: blocked_cancel_ipc_def) - apply (rule hoare_seq_ext [OF _ gbi_ep_sp]) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) - apply (rule hoare_seq_ext [OF _ get_epq_sp]) + apply (rule bind_wp [OF _ gbi_ep_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_epq_sp]) apply (simp add: invs_def valid_state_def valid_pspace_def) apply (rule hoare_pre, wp valid_irq_node_typ sts_only_idle) apply (simp add: valid_tcb_state_def) @@ -385,7 +385,7 @@ lemma cancel_signal_invs: \\rv. invs\" apply (simp add: cancel_signal_def invs_def valid_state_def valid_pspace_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac "ntfn_obj ntfna", simp_all)[1] apply (rule hoare_pre) apply (wp set_simple_ko_valid_objs valid_irq_node_typ sts_only_idle valid_ioports_lift @@ -437,7 +437,7 @@ lemma reply_cancel_ipc_invs: (cap_delete_one p :: (unit,'z::state_ext) s_monad) \\rv. invs\" shows "\invs\ (reply_cancel_ipc t :: (unit,'z::state_ext) s_monad) \\rv. invs\" apply (simp add: reply_cancel_ipc_def) - apply (wp delete select_wp) + apply (wp delete) apply (rule_tac Q="\rv. invs" in hoare_post_imp) apply (fastforce simp: emptyable_def dest: reply_slot_not_descendant) apply (wp thread_set_invs_trivial) @@ -448,7 +448,7 @@ lemma reply_cancel_ipc_invs: lemma (in delete_one_abs) cancel_ipc_invs[wp]: "\invs\ (cancel_ipc t :: (unit,'a) s_monad) \\rv. invs\" apply (simp add: cancel_ipc_def) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ gts_sp]) apply (case_tac state, simp_all) apply (auto intro!: hoare_weaken_pre [OF return_wp] hoare_weaken_pre [OF blocked_cancel_ipc_invs] @@ -499,7 +499,7 @@ lemma blocked_cancel_ipc_valid_objs[wp]: lemma cancel_signal_valid_objs[wp]: "\valid_objs\ cancel_signal t ntfnptr \\_. valid_objs\" apply (simp add: cancel_signal_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp set_simple_ko_valid_objs | simp only: valid_inactive @@ -527,15 +527,15 @@ lemma no_refs_simple_strg: crunch it[wp]: cancel_all_ipc "\s. P (idle_thread s)" - (wp: crunch_wps select_wp simp: unless_def crunch_simps) + (wp: crunch_wps simp: unless_def crunch_simps) crunch it[wp]: cancel_all_signals, fast_finalise, unbind_notification "\s. P (idle_thread s)" - (wp: crunch_wps select_wp simp: unless_def crunch_simps) + (wp: crunch_wps simp: unless_def crunch_simps) context IpcCancel_AI begin crunch it[wp]: reply_cancel_ipc "\(s::'a state). P (idle_thread s)" - (wp: crunch_wps select_wp simp: unless_def crunch_simps) + (wp: crunch_wps simp: unless_def crunch_simps) crunch it[wp]: cancel_ipc "\(s :: 'a state). P (idle_thread s)" @@ -567,15 +567,15 @@ lemma (in delete_one_abs) reply_cancel_ipc_no_reply_cap[wp]: shows "\invs and tcb_at t\ (reply_cancel_ipc t :: (unit,'a) s_monad) \\rv s. \ has_reply_cap t s\" apply (simp add: reply_cancel_ipc_def) apply wp - apply (rule_tac Q="\rvp s. cte_wp_at (\c. c = cap.NullCap) x s \ - (\sl R. sl \ x \ + apply (rule_tac Q="\rvp s. cte_wp_at (\c. c = cap.NullCap) rv s \ + (\sl R. sl \ rv \ caps_of_state s sl \ Some (cap.ReplyCap t False R))" in hoare_strengthen_post) apply (wp hoare_vcg_conj_lift hoare_vcg_all_lift delete_one_deletes delete_one_caps_of_state) apply (clarsimp simp: has_reply_cap_def cte_wp_at_caps_of_state is_reply_cap_to_def) apply (case_tac "(aa, ba) = (a, b)",simp_all)[1] - apply (wp hoare_vcg_all_lift select_wp | simp del: split_paired_All)+ + apply (wp hoare_vcg_all_lift | simp del: split_paired_All)+ apply (rule_tac Q="\_ s. invs s \ tcb_at t s" in hoare_post_imp) apply (erule conjE) apply (frule(1) reply_cap_descends_from_master) @@ -641,7 +641,7 @@ lemma (in delete_one_pre) reply_cancel_ipc_cte_wp_at_preserved: "(\cap. P cap \ \ can_fast_finalise cap) \ \cte_wp_at P p\ (reply_cancel_ipc t :: (unit,'a) s_monad) \\rv. cte_wp_at P p\" unfolding reply_cancel_ipc_def - apply (wpsimp wp: select_wp delete_one_cte_wp_at_preserved) + apply (wpsimp wp: delete_one_cte_wp_at_preserved) apply (rule_tac Q="\_. cte_wp_at P p" in hoare_post_imp, clarsimp) apply (wpsimp wp: thread_set_cte_wp_at_trivial simp: ran_tcb_cap_cases) apply assumption @@ -735,7 +735,7 @@ lemma reply_cancel_ipc_bound_tcb_at[wp]: reply_cancel_ipc p \\_. bound_tcb_at P t\" unfolding reply_cancel_ipc_def - apply (wpsimp wp: cap_delete_one_bound_tcb_at select_inv select_wp) + apply (wpsimp wp: cap_delete_one_bound_tcb_at select_inv) apply (rule_tac Q="\_. bound_tcb_at P t and valid_mdb and valid_objs and tcb_at p" in hoare_strengthen_post) apply (wpsimp wp: thread_set_no_change_tcb_pred thread_set_mdb) apply (fastforce simp:tcb_cap_cases_def) @@ -867,7 +867,7 @@ lemma cancel_all_ipc_invs_helper: do_extended_op reschedule_required od \\rv. invs\" apply (subst bind_assoc[symmetric]) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply wp apply simp apply (rule hoare_pre) @@ -912,7 +912,7 @@ lemma cancel_all_ipc_invs_helper: lemma cancel_all_ipc_invs: "\invs\ cancel_all_ipc epptr \\rv. invs\" apply (simp add: cancel_all_ipc_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac ep, simp_all add: get_ep_queue_def) apply (wp, fastforce) apply (rule hoare_pre, rule cancel_all_ipc_invs_helper[where k=EPSend]) @@ -968,10 +968,10 @@ lemma bound_tcb_bound_notification_at: lemma unbind_notification_invs: shows "\invs\ unbind_notification t \\rv. invs\" apply (simp add: unbind_notification_def invs_def valid_state_def valid_pspace_def) - apply (rule hoare_seq_ext [OF _ gbn_sp]) + apply (rule bind_wp [OF _ gbn_sp]) apply (case_tac ntfnptr, clarsimp, wp, simp) apply clarsimp - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (wp valid_irq_node_typ set_simple_ko_valid_objs valid_ioports_lift | clarsimp split del: if_split)+ apply (intro conjI impI; @@ -1025,7 +1025,7 @@ lemma tcb_state_refs_no_tcb: lemma cancel_all_signals_invs: "\invs\ cancel_all_signals ntfnptr \\rv. invs\" apply (simp add: cancel_all_signals_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp cancel_all_invs_helper set_simple_ko_valid_objs valid_irq_node_typ hoare_vcg_const_Ball_lift valid_ioports_lift @@ -1079,7 +1079,7 @@ lemma cancel_all_unlive_helper: lemma cancel_all_ipc_unlive[wp]: "\\\ cancel_all_ipc ptr \\ rv. obj_at (Not \ live) ptr\" apply (simp add: cancel_all_ipc_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac ep, simp_all add: set_simple_ko_def get_ep_queue_def) apply wp apply (clarsimp simp: live_def elim!: obj_at_weakenE) @@ -1096,7 +1096,7 @@ lemma cancel_all_signals_unlive[wp]: cancel_all_signals ntfnptr \\ rv. obj_at (Not \ live) ntfnptr\" apply (simp add: cancel_all_signals_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp | wpc @@ -1146,8 +1146,8 @@ lemma cancel_badged_sends_filterM_helper': apply (clarsimp simp: st_tcb_at_refs_of_rev pred_tcb_at_def is_tcb elim!: obj_at_weakenE) apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append) - apply (drule spec, erule hoare_seq_ext[rotated]) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (drule spec, erule bind_wp_fwd) + apply (rule bind_wp [OF _ gts_sp]) apply (rule hoare_pre, wpsimp wp: valid_irq_node_typ sts_only_idle hoare_vcg_const_Ball_lift) apply (clarsimp simp: valid_tcb_state_def) @@ -1185,7 +1185,7 @@ lemma cancel_badged_sends_invs_helper: lemma cancel_badged_sends_invs[wp]: "\invs\ cancel_badged_sends epptr badge \\rv. invs\" apply (simp add: cancel_badged_sends_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac ep; simp) apply wpsimp apply (simp add: invs_def valid_state_def valid_pspace_def) diff --git a/proof/invariant-abstract/Ipc_AI.thy b/proof/invariant-abstract/Ipc_AI.thy index f5299fac13..2b5796fa5b 100644 --- a/proof/invariant-abstract/Ipc_AI.thy +++ b/proof/invariant-abstract/Ipc_AI.thy @@ -7,7 +7,7 @@ theory Ipc_AI imports ArchFinalise_AI - "Lib.WPBang" + "Monads.WPBang" begin context begin interpretation Arch . @@ -51,7 +51,7 @@ lemma lsfco_cte_at: "\valid_objs and valid_cap cn\ lookup_slot_for_cnode_op f cn idx depth \\rv. cte_at rv\,-" - by (rule hoare_post_imp_R, rule lookup_cnode_slot_real_cte, simp add: real_cte_at_cte) + by (rule hoare_strengthen_postE_R, rule lookup_cnode_slot_real_cte, simp add: real_cte_at_cte) declare do_machine_op_tcb[wp] @@ -113,7 +113,7 @@ lemma cap_derive_not_null_helper: \\rv s. rv \ cap.NullCap \ Q rv s\,-" apply (case_tac cap, simp_all add: is_zombie_def, - safe elim!: hoare_post_imp_R) + safe elim!: hoare_strengthen_postE_R) apply (wp | simp add: derive_cap_def is_zombie_def)+ done @@ -395,7 +395,7 @@ lemma lsfco_cte_wp_at_univ: lookup_slot_for_cnode_op f croot idx depth \\rv. cte_wp_at (P rv) rv\, -" apply (rule hoare_gen_asmE) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lsfco_cte_at) apply (clarsimp simp: cte_wp_at_def) done @@ -517,7 +517,7 @@ lemma cap_insert_weak_cte_wp_at2: cap_insert cap src dest \\uu. cte_wp_at P p\" unfolding cap_insert_def - by (wp set_cap_cte_wp_at get_cap_wp static_imp_wp + by (wp set_cap_cte_wp_at get_cap_wp hoare_weak_lift_imp | simp add: cap_insert_def | unfold set_untyped_cap_as_full_def | auto simp: cte_wp_at_def dest!:imp)+ @@ -570,7 +570,7 @@ lemma cap_insert_assume_null: apply (rule hoare_name_pre_state) apply (erule impCE) apply (simp add: cap_insert_def) - apply (rule hoare_seq_ext[OF _ get_cap_sp])+ + apply (rule bind_wp[OF _ get_cap_sp])+ apply (clarsimp simp: valid_def cte_wp_at_caps_of_state in_monad split del: if_split) apply (erule hoare_pre(1)) @@ -603,16 +603,16 @@ lemma transfer_caps_loop_presM: apply (clarsimp simp add: Let_def split_def whenE_def cong: if_cong list.case_cong split del: if_split) apply (rule hoare_pre) - apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift static_imp_wp + apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift hoare_weak_lift_imp | assumption | simp split del: if_split)+ apply (rule cap_insert_assume_null) - apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at static_imp_wp)+ + apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at hoare_weak_lift_imp)+ apply (rule hoare_vcg_conj_liftE_R) apply (rule derive_cap_is_derived_foo) apply (rule_tac Q' ="\cap' s. (vo \ cap'\ cap.NullCap \ cte_wp_at (is_derived (cdt s) (aa, b) cap') (aa, b) s) \ (cap'\ cap.NullCap \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption @@ -1439,9 +1439,7 @@ lemmas get_tcb_ko_atI = get_tcb_ko_at [THEN iffD1] crunch "distinct" [wp]: set_mrs pspace_distinct - (wp: select_wp hoare_vcg_split_case_option mapM_wp - hoare_drop_imps refl - simp: zipWithM_x_mapM) + (wp: mapM_wp simp: zipWithM_x_mapM) crunch "distinct" [wp]: copy_mrs pspace_distinct @@ -1832,7 +1830,7 @@ lemma set_mrs_valid_ioc[wp]: apply (simp add: set_mrs_def) apply (wp | wpc)+ apply (simp only: zipWithM_x_mapM_x split_def) - apply (wp mapM_x_wp' set_object_valid_ioc_caps static_imp_wp + apply (wp mapM_x_wp' set_object_valid_ioc_caps hoare_weak_lift_imp | simp)+ apply (clarsimp simp: obj_at_def get_tcb_def valid_ioc_def split: option.splits Structures_A.kernel_object.splits) @@ -2030,7 +2028,7 @@ lemma update_waiting_invs: update_waiting_ntfn ntfnptr q bound_tcb bdg \\rv. invs\" apply (simp add: update_waiting_ntfn_def) - apply (rule hoare_seq_ext[OF _ assert_sp]) + apply (rule bind_wp[OF _ assert_sp]) apply (rule hoare_pre) apply (wp |simp)+ apply (simp add: invs_def valid_state_def valid_pspace_def) @@ -2120,7 +2118,7 @@ lemma cancel_ipc_cte_wp_at_not_reply_state: \\r. cte_wp_at P p\" apply (simp add: cancel_ipc_def) apply (rule hoare_pre) - apply (wp hoare_pre_cont[where a="reply_cancel_ipc t"] gts_wp | wpc)+ + apply (wp hoare_pre_cont[where f="reply_cancel_ipc t"] gts_wp | wpc)+ apply (clarsimp simp: st_tcb_at_def obj_at_def) done @@ -2128,13 +2126,13 @@ lemma cancel_ipc_cte_wp_at_not_reply_state: lemma sai_invs[wp]: "\invs and ex_nonz_cap_to ntfn\ send_signal ntfn bdg \\rv. invs\" apply (simp add: send_signal_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac "ntfn_obj ntfna", simp_all) apply (case_tac "ntfn_bound_tcb ntfna", simp_all) apply (wp set_ntfn_minor_invs) apply (clarsimp simp: obj_at_def is_ntfn invs_def valid_pspace_def valid_state_def valid_obj_def valid_ntfn_def) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ gts_sp]) apply (rule hoare_pre) apply (rule hoare_vcg_if_split) apply (wp sts_invs_minor | clarsimp split: thread_state.splits)+ @@ -2272,7 +2270,7 @@ lemma pred_tcb_clear: lemma pred_tcb_upd_apply: - "pred_tcb_at proj P t (s\kheap := kheap s(r \ TCB v)\) = + "pred_tcb_at proj P t (s\kheap := (kheap s)(r \ TCB v)\) = (if t = r then P (proj (tcb_to_itcb v)) else pred_tcb_at proj P t s)" by (simp add: pred_tcb_at_def obj_at_def) @@ -2622,14 +2620,14 @@ lemma complete_signal_invs: complete_signal ntfnptr tcb \\_. invs\" apply (simp add: complete_signal_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (rule hoare_pre) apply (wp set_ntfn_minor_invs | wpc | simp)+ apply (rule_tac Q="\_ s. (state_refs_of s ntfnptr = ntfn_bound_refs (ntfn_bound_tcb ntfn)) \ (\T. typ_at T ntfnptr s) \ valid_ntfn (ntfn_set_obj ntfn IdleNtfn) s \ ((\y. ntfn_bound_tcb ntfn = Some y) \ ex_nonz_cap_to ntfnptr s)" in hoare_strengthen_post) - apply (wp hoare_vcg_all_lift static_imp_wp hoare_vcg_ex_lift | wpc + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp hoare_vcg_ex_lift | wpc | simp add: live_def valid_ntfn_def valid_bound_tcb_def split: option.splits)+ apply ((clarsimp simp: obj_at_def state_refs_of_def)+)[2] apply (rule_tac obj_at_valid_objsE[OF _ invs_valid_objs]; clarsimp) @@ -2660,13 +2658,13 @@ lemma ri_invs': apply (simp add: receive_ipc_def split_def) apply (cases cap, simp_all) apply (rename_tac ep badge rights) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) - apply (rule hoare_seq_ext[OF _ gbn_sp]) - apply (rule hoare_seq_ext) + apply (rule bind_wp[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ gbn_sp]) + apply (rule bind_wp) (* set up precondition for old proof *) - apply (rule_tac R="ko_at (Endpoint x) ep and ?pre" in hoare_vcg_if_split) + apply (rule_tac R="ko_at (Endpoint rv) ep and ?pre" in hoare_vcg_if_split) apply (wp complete_signal_invs) - apply (case_tac x) + apply (case_tac rv) apply (wp | rule hoare_pre, wpc | simp)+ apply (simp add: invs_def valid_state_def valid_pspace_def) apply (rule hoare_pre, wp valid_irq_node_typ valid_ioports_lift) @@ -2764,7 +2762,7 @@ lemma ri_invs': done lemmas ri_invs[wp] - = ri_invs'[where Q=\,simplified hoare_post_taut, OF TrueI TrueI TrueI,simplified] + = ri_invs'[where Q=\,simplified hoare_TrueI, OF TrueI TrueI TrueI,simplified] end @@ -2820,7 +2818,7 @@ lemma valid_bound_tcb_typ_at: "(\p. \\s. typ_at ATCB p s\ f \\_ s. typ_at ATCB p s\) \ \\s. valid_bound_tcb tcb s\ f \\_ s. valid_bound_tcb tcb s\" apply (clarsimp simp: valid_bound_tcb_def split: option.splits) - apply (wpsimp wp: hoare_vcg_all_lift tcb_at_typ_at static_imp_wp) + apply (wpsimp wp: hoare_vcg_all_lift tcb_at_typ_at hoare_weak_lift_imp) done crunch bound_tcb[wp]: set_thread_state, set_message_info, set_mrs, as_user "valid_bound_tcb t" @@ -2845,8 +2843,8 @@ lemma rai_invs': apply (simp add: receive_signal_def) apply (cases cap, simp_all) apply (rename_tac ntfn badge rights) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) - apply (case_tac "ntfn_obj x") + apply (rule bind_wp [OF _ get_simple_ko_sp]) + apply (case_tac "ntfn_obj rv") apply (simp add: invs_def valid_state_def valid_pspace_def) apply (rule hoare_pre) apply (wp set_simple_ko_valid_objs valid_irq_node_typ sts_only_idle valid_ioports_lift @@ -2887,7 +2885,7 @@ lemma rai_invs': apply (rule conjI, clarsimp simp: st_tcb_at_reply_cap_valid) apply (rule context_conjI, fastforce simp: pred_tcb_at_def obj_at_def tcb_bound_refs_def2 state_refs_of_def) - apply (subgoal_tac "ntfn_bound_tcb x = None") + apply (subgoal_tac "ntfn_bound_tcb rv = None") apply (rule conjI, clarsimp split: option.splits) apply (rule conjI, erule delta_sym_refs) apply (fastforce simp: pred_tcb_at_def2 obj_at_def symreftype_inverse' @@ -2904,7 +2902,7 @@ lemma rai_invs': apply (rule hoare_pre) apply (wp set_simple_ko_valid_objs hoare_vcg_const_Ball_lift valid_ioports_lift as_user_no_del_ntfn[simplified ntfn_at_def2, simplified] - valid_irq_node_typ ball_tcb_cap_casesI static_imp_wp + valid_irq_node_typ ball_tcb_cap_casesI hoare_weak_lift_imp valid_bound_tcb_typ_at[rule_format] | simp add: valid_ntfn_def)+ apply clarsimp @@ -2921,7 +2919,7 @@ lemma rai_invs': dest: valid_reply_capsD) done -lemmas rai_invs[wp] = rai_invs'[where Q=\,simplified hoare_post_taut, OF TrueI TrueI TrueI,simplified] +lemmas rai_invs[wp] = rai_invs'[where Q=\,simplified hoare_TrueI, OF TrueI TrueI TrueI,simplified] end @@ -2957,7 +2955,7 @@ crunch cap_to[wp]: receive_signal "ex_nonz_cap_to p" (wp: crunch_wps) crunch mdb[wp]: set_message_info valid_mdb - (wp: select_wp crunch_wps mapM_wp') + (wp: crunch_wps mapM_wp') lemma ep_queue_cap_to: "\ ko_at (Endpoint ep) p s; invs s; @@ -2989,7 +2987,7 @@ lemma si_invs': send_ipc bl call badge cg cgr t epptr \\r (s::'state_ext state). invs s \ Q s\" apply (simp add: send_ipc_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (case_tac ep, simp_all) (* ep=IdleEP, bl *) apply (cases bl, simp_all)[1] @@ -3054,7 +3052,7 @@ lemma si_invs': | clarsimp simp:is_cap_simps | wpc | strengthen reply_cap_doesnt_exist_strg disjI2_strg[where Q="cte_wp_at (\cp. is_master_reply_cap cp \ R cp) p s"] - | (wp hoare_vcg_conj_lift static_imp_wp | wp dxo_wp_weak | simp)+ + | (wp hoare_vcg_conj_lift hoare_weak_lift_imp | wp dxo_wp_weak | simp)+ | wp valid_ioports_lift)+ apply (clarsimp simp: ep_redux_simps conj_ac cong: list.case_cong if_cong) apply (frule(1) sym_refs_ko_atD) @@ -3111,11 +3109,11 @@ lemma hf_invs': apply (wpsimp wp: thread_set_invs_trivial thread_set_no_change_tcb_state ex_nonz_cap_to_pres thread_set_cte_wp_at_trivial - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R | clarsimp simp: tcb_cap_cases_def | erule disjE)+ apply (wpe lookup_cap_ex_cap) - apply (wpsimp wp: hoare_vcg_all_lift_R + apply (wpsimp wp: hoare_vcg_all_liftE_R | strengthen reply_cap_doesnt_exist_strg | wp (once) hoare_drop_imps)+ apply (simp add: conj_comms) @@ -3125,7 +3123,7 @@ lemma hf_invs': split: Structures_A.thread_state.splits) done -lemmas hf_invs[wp] = hf_invs'[where Q=\,simplified hoare_post_taut, OF TrueI TrueI TrueI TrueI TrueI,simplified] +lemmas hf_invs[wp] = hf_invs'[where Q=\,simplified hoare_TrueI, OF TrueI TrueI TrueI TrueI TrueI,simplified] end @@ -3189,7 +3187,7 @@ lemma si_blk_makes_simple: send_ipc True call bdg x gr t' ep \\rv. st_tcb_at simple t\" apply (simp add: send_ipc_def) - apply (rule hoare_seq_ext [OF _ get_simple_ko_inv]) + apply (rule bind_wp [OF _ get_simple_ko_inv]) apply (case_tac epa, simp_all) apply (wp sts_st_tcb_at_cases) apply clarsimp @@ -3198,8 +3196,8 @@ lemma si_blk_makes_simple: apply (rule hoare_gen_asm[simplified]) apply (rename_tac list) apply (case_tac list, simp_all split del:if_split) - apply (rule hoare_seq_ext [OF _ set_simple_ko_pred_tcb_at]) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ set_simple_ko_pred_tcb_at]) + apply (rule bind_wp [OF _ gts_sp]) apply (case_tac recv_state, simp_all split del: if_split) apply (wp sts_st_tcb_at_cases setup_caller_cap_makes_simple hoare_drop_imps @@ -3251,20 +3249,20 @@ lemma ri_makes_simple: apply (rule hoare_gen_asm) apply (simp add: receive_ipc_def split_def) apply (case_tac cap, simp_all) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) - apply (rule hoare_seq_ext [OF _ gbn_sp]) - apply (rule hoare_seq_ext) - apply (rename_tac ep I DO x CARE NOT) - apply (rule_tac R="ko_at (Endpoint x) ep and ?pre" in hoare_vcg_if_split) + apply (rule bind_wp [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ gbn_sp]) + apply (rule bind_wp) + apply (rename_tac ep I DO rv CARE NOT) + apply (rule_tac R="ko_at (Endpoint rv) ep and ?pre" in hoare_vcg_if_split) apply (wp complete_signal_invs) - apply (case_tac x, simp_all) + apply (case_tac rv, simp_all) apply (rule hoare_pre, wpc) apply (wp sts_st_tcb_at_cases, simp) apply (simp add: do_nbrecv_failed_transfer_def, wp) apply clarsimp - apply (rule hoare_seq_ext [OF _ assert_sp]) - apply (rule hoare_seq_ext [where B="\s. st_tcb_at simple t'"]) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ assert_sp]) + apply (rule bind_wp [where Q'="\s. st_tcb_at simple t'"]) + apply (rule bind_wp [OF _ gts_sp]) apply (rule hoare_pre) apply (wp setup_caller_cap_makes_simple sts_st_tcb_at_cases hoare_vcg_all_lift hoare_vcg_const_imp_lift diff --git a/proof/invariant-abstract/KHeapPre_AI.thy b/proof/invariant-abstract/KHeapPre_AI.thy index 14f9b8589f..9fca503d7c 100644 --- a/proof/invariant-abstract/KHeapPre_AI.thy +++ b/proof/invariant-abstract/KHeapPre_AI.thy @@ -136,7 +136,7 @@ lemma get_tcb_at: "tcb_at t s \ (\tcb. get_tcb t s = Som lemma typ_at_same_type: assumes "typ_at T p s" "a_type k = a_type ko" "kheap s p' = Some ko" - shows "typ_at T p (s\kheap := kheap s(p' \ k)\)" + shows "typ_at T p (s\kheap := (kheap s)(p' \ k)\)" using assms by (clarsimp simp: obj_at_def) @@ -148,12 +148,12 @@ lemma hoare_to_pure_kheap_upd: assumes typ_eq: "a_type k = a_type ko" assumes valid: "P (s :: ('z :: state_ext) state)" assumes at: "ko_at ko p s" - shows "P (s\kheap := kheap s(p \ k)\)" + shows "P (s\kheap := (kheap s)(p \ k)\)" apply (rule use_valid[where f=" do s' <- get; assert (s' = s); - (modify (\s. s\kheap := kheap s(p \ k)\)); + (modify (\s. s\kheap := (kheap s)(p \ k)\)); return undefined od", OF _ hoare valid]) apply (fastforce simp add: simpler_modify_def get_def bind_def @@ -165,7 +165,7 @@ lemma hoare_to_pure_kheap_upd: by (auto simp add: obj_at_def a_type_def split: kernel_object.splits if_splits) lemma set_object_wp: - "\\s. Q (s\ kheap := kheap s (p \ v)\) \ set_object p v \\_. Q\" + "\\s. Q (s\ kheap := (kheap s) (p \ v)\) \ set_object p v \\_. Q\" apply (simp add: set_object_def get_object_def) apply wp apply blast @@ -200,8 +200,8 @@ lemma hoare_set_object_weaken_pre: [OF assms, where N="\s. \ko. ko_at ko p s \ a_type ko \ a_type v"]) apply fastforce apply (simp add: set_object_def) - apply (rule hoare_seq_ext[OF _ get_object_sp]) - apply (rule hoare_seq_ext[OF _ assert_sp]) + apply (rule bind_wp[OF _ get_object_sp]) + apply (rule bind_wp[OF _ assert_sp]) apply (fastforce intro: hoare_weaken_pre[OF hoare_pre_cont]) done diff --git a/proof/invariant-abstract/KHeap_AI.thy b/proof/invariant-abstract/KHeap_AI.thy index 9d2e946d22..713fd085c2 100644 --- a/proof/invariant-abstract/KHeap_AI.thy +++ b/proof/invariant-abstract/KHeap_AI.thy @@ -103,7 +103,7 @@ lemma pspace_aligned_obj_update: assumes obj: "obj_at P t s" assumes pa: "pspace_aligned s" assumes R: "\k. P k \ a_type k = a_type k'" - shows "pspace_aligned (s\kheap := kheap s(t \ k')\)" + shows "pspace_aligned (s\kheap := (kheap s)(t \ k')\)" using pa obj apply (simp add: pspace_aligned_def cong: conj_cong) apply (clarsimp simp: obj_at_def obj_bits_T dest!: R) @@ -113,7 +113,7 @@ lemma pspace_aligned_obj_update: lemma cte_at_same_type: "\cte_at t s; a_type k = a_type ko; kheap s p = Some ko\ - \ cte_at t (s\kheap := kheap s(p \ k)\)" + \ cte_at t (s\kheap := (kheap s)(p \ k)\)" apply (clarsimp simp: cte_at_cases del: disjCI) apply (elim exE disjE) apply (clarsimp simp: a_type_def well_formed_cnode_n_def length_set_helper @@ -125,13 +125,13 @@ lemma cte_at_same_type: lemma untyped_same_type: "\valid_untyped (cap.UntypedCap dev r n f) s; a_type k = a_type ko; kheap s p = Some ko\ - \ valid_untyped (cap.UntypedCap dev r n f) (s\kheap := kheap s(p \ k)\)" + \ valid_untyped (cap.UntypedCap dev r n f) (s\kheap := (kheap s)(p \ k)\)" unfolding valid_untyped_def by (clarsimp simp: obj_range_def obj_bits_T) lemma valid_cap_same_type: "\ s \ cap; a_type k = a_type ko; kheap s p = Some ko \ - \ s\kheap := kheap s(p \ k)\ \ cap" + \ s\kheap := (kheap s)(p \ k)\ \ cap" apply (simp add: valid_cap_def split: cap.split) apply (auto elim!: typ_at_same_type untyped_same_type simp: ntfn_at_typ ep_at_typ tcb_at_typ cap_table_at_typ @@ -141,7 +141,7 @@ lemma valid_cap_same_type: lemma valid_obj_same_type: "\ valid_obj p' obj s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_obj p' obj (s\kheap := kheap s(p \ k)\)" + \ valid_obj p' obj (s\kheap := (kheap s)(p \ k)\)" apply (cases obj; simp) apply (clarsimp simp add: valid_obj_def valid_cs_def) apply (drule (1) bspec) @@ -348,7 +348,7 @@ lemma get_simple_ko_valid_obj[wp]: get_simple_ko f ep \ \r. valid_obj ep (f r) \" apply (simp add: get_simple_ko_def) - apply (rule hoare_seq_ext) + apply (rule bind_wp) prefer 2 apply (rule hoare_pre_imp [OF _ get_object_valid]) apply (simp add: invs_def valid_state_def valid_pspace_def) @@ -363,7 +363,7 @@ lemma get_simple_ko_valid_simple_obj[wp]: get_simple_ko f ep \ \r. valid_simple_obj (f r) \" apply (simp add: get_simple_ko_def) - apply (rule hoare_seq_ext) + apply (rule bind_wp) prefer 2 apply (rule hoare_pre_imp [OF _ get_object_valid]) apply (simp add: invs_def valid_state_def valid_pspace_def) @@ -460,7 +460,7 @@ lemma set_ntfn_refs_of[wp]: lemma pspace_distinct_same_type: "\ kheap s t = Some ko; a_type ko = a_type ko'; pspace_distinct s\ - \ pspace_distinct (s\kheap := kheap s(t \ ko')\)" + \ pspace_distinct (s\kheap := (kheap s)(t \ ko')\)" apply (clarsimp simp add: pspace_distinct_def obj_bits_T) apply fastforce done @@ -573,7 +573,7 @@ lemma cte_wp_at_after_update: lemma cte_wp_at_after_update': "\ obj_at (same_caps val) p' s \ - \ cte_wp_at P p (s\kheap := kheap s(p' \ val)\) + \ cte_wp_at P p (s\kheap := (kheap s)(p' \ val)\) = cte_wp_at P p s" by (fastforce simp: obj_at_def cte_wp_at_cases split: if_split_asm dest: bspec [OF _ ranI]) @@ -584,7 +584,7 @@ lemma ex_cap_to_after_update: lemma ex_cap_to_after_update': "\ ex_nonz_cap_to p s; obj_at (same_caps val) p' s \ - \ ex_nonz_cap_to p (s\kheap := kheap s(p' \ val)\)" + \ ex_nonz_cap_to p (s\kheap := (kheap s)(p' \ val)\)" by (clarsimp simp: ex_nonz_cap_to_def cte_wp_at_after_update') lemma ex_cte_cap_to_after_update: @@ -772,7 +772,7 @@ lemma as_user_bind[wp]: apply clarsimp apply (rename_tac value_g s tcb fail_g value_f fail_f) apply (rule_tac x="value_f" in exI) - apply (rule_tac x="s\kheap := kheap s(t \ TCB (tcb\tcb_arch := arch_tcb_context_set fail_f (tcb_arch tcb)\))\" in exI) + apply (rule_tac x="s\kheap := (kheap s)(t \ TCB (tcb\tcb_arch := arch_tcb_context_set fail_f (tcb_arch tcb)\))\" in exI) apply fastforce apply clarsimp apply (rename_tac value_g ta s tcb value_f fail_g ko) @@ -997,6 +997,7 @@ crunches do_machine_op and valid_global_refs[wp]: valid_global_refs and valid_irq_node[wp]: valid_irq_node and irq_states[wp]: "\s. P (interrupt_states s)" + and kheap[wp]: "\s. P (kheap s)" (simp: cur_tcb_def zombies_final_pspaceI state_refs_of_pspaceI ex_nonz_cap_to_def ct_in_state_def wp: crunch_wps valid_arch_state_lift vs_lookup_vspace_obj_at_lift) diff --git a/proof/invariant-abstract/KernelInitSepProofs_AI.thy b/proof/invariant-abstract/KernelInitSepProofs_AI.thy index f8a5070aab..b170c11553 100644 --- a/proof/invariant-abstract/KernelInitSepProofs_AI.thy +++ b/proof/invariant-abstract/KernelInitSepProofs_AI.thy @@ -109,7 +109,7 @@ lemma tcb_set_cap_local_via_explosion: unfolding sep_map_ko_def apply (rule hoare_gen_asmE'[simplified K_def pred_conj_def]) apply (clarsimp simp: a_base_type_cmp_of_def) - apply (rule_tac E=E in hoare_post_impErr) + apply (rule_tac E=E in hoare_strengthen_postE) apply (rule hoare_pre) apply (rule ki_set_cap_frame) apply (clarsimp simp only: sep_conj_exists) @@ -138,7 +138,7 @@ lemma cnode_set_cap_local_via_explosion: unfolding sep_map_ko_def apply (rule hoare_gen_asmE'[simplified K_def pred_conj_def]) \ \concludes zero-sized cnode case\ - apply (rule_tac E=E in hoare_post_impErr) + apply (rule_tac E=E in hoare_strengthen_postE) apply (rule hoare_pre) apply (rule ki_set_cap_frame) apply (clarsimp simp only: sep_conj_exists) diff --git a/proof/invariant-abstract/LevityCatch_AI.thy b/proof/invariant-abstract/LevityCatch_AI.thy index b07f68a686..10b229f813 100644 --- a/proof/invariant-abstract/LevityCatch_AI.thy +++ b/proof/invariant-abstract/LevityCatch_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -9,6 +10,9 @@ imports ArchLevityCatch_AI begin +(* FIXME: eliminate mapM_UNIV_wp, use mapM_wp' directly *) +lemmas mapM_UNIV_wp = mapM_wp' + context begin interpretation Arch . requalify_consts @@ -19,34 +23,20 @@ requalify_facts end -(*FIXME: Move or remove *) - -method spec for x :: "_ :: type" = (erule allE[of _ x]) -method bspec for x :: "_ :: type" = (erule ballE[of _ _ x]) -method prove for x :: "prop" = (rule revcut_rl[of "PROP x"]) - lemmas aobj_ref_arch_cap_simps[simp] = aobj_ref_arch_cap -lemma detype_arch_state : +lemma detype_arch_state: "arch_state (detype S s) = arch_state s" by (simp add: detype_def) lemma obj_ref_elemD: "r \ obj_refs cap \ obj_refs cap = {r}" - by (cases cap, simp_all) - -lemma const_on_failure_wp : - "\P\ m \Q\, \\rv. Q n\ \ \P\ const_on_failure n m \Q\" - apply (simp add: const_on_failure_def) - apply wp - done + by (cases cap; simp) lemma get_cap_id: "(v, s') \ fst (get_cap p s) \ (s' = s)" - by (clarsimp simp: get_cap_def get_object_def in_monad - split_def - split: Structures_A.kernel_object.splits) - + by (clarsimp simp: get_cap_def get_object_def in_monad split_def + split: kernel_object.splits) lemmas cap_irq_opt_simps[simp] = cap_irq_opt_def [split_simps cap.split sum.split] @@ -54,77 +44,35 @@ lemmas cap_irq_opt_simps[simp] = lemmas cap_irqs_simps[simp] = cap_irqs_def [unfolded cap_irq_opt_def, split_simps cap.split sum.split, simplified option.simps] - -lemma all_eq_trans: "\ \x. P x = Q x; \x. Q x = R x \ \ \x. P x = R x" - by simp - - declare liftE_wp[wp] declare case_sum_True[simp] declare select_singleton[simp] -crunch_ignore (add: cap_swap_ext - cap_move_ext cap_insert_ext empty_slot_ext create_cap_ext - do_extended_op) - -lemma select_ext_weak_wp[wp]: "\\s. \x\S. Q x s\ select_ext a S \Q\" - apply (simp add: select_ext_def) - apply (wp select_wp) - apply simp - done - -lemma select_ext_wp[wp]:"\\s. a s \ S \ Q (a s) s\ select_ext a S \Q\" - apply (simp add: select_ext_def unwrap_ext_det_ext_ext_def) - apply (wp select_wp) - apply (simp add: unwrap_ext_det_ext_ext_def select_switch_det_ext_ext_def) - done - -(* FIXME: move *) -lemmas mapM_UNIV_wp = mapM_wp[where S="UNIV", simplified] - -lemmas word_simps = - word_size word_ops_nth_size nth_ucast nth_shiftr nth_shiftl - -lemma mask_split_aligned: - assumes len: "m \ a + len_of TYPE('a)" - assumes align: "is_aligned p a" - shows "(p && ~~ mask m) + (ucast ((ucast (p && mask m >> a))::'a::len word) << a) = p" - apply (insert align[simplified is_aligned_nth]) - apply (subst word_plus_and_or_coroll; rule word_eqI; clarsimp simp: word_simps) - apply (rule iffI) - apply (erule disjE; clarsimp) - apply (case_tac "n < m"; case_tac "n < a") - using len by auto - -lemma mask_split_aligned_neg: - fixes x :: "'a::len word" - fixes p :: "'b::len word" - assumes len: "a + len_of TYPE('a) \ len_of TYPE('b)" - "m = a + len_of TYPE('a)" - assumes x: "x \ ucast (p && mask m >> a)" - shows "(p && ~~ mask m) + (ucast x << a) = p \ False" - apply (subst (asm) word_plus_and_or_coroll) - apply (clarsimp simp: word_simps bang_eq) - apply (metis bit_imp_le_length diff_add_inverse le_add1 len(2) less_diff_iff) - apply (insert x) - apply (erule notE) - apply word_eqI - subgoal for n - using len - apply (clarsimp) - apply (spec "n + a") - by (clarsimp simp: add.commute) - done - -lemma mask_alignment_ugliness: - "\ x \ x + z && ~~ mask m; - is_aligned (x + z && ~~ mask m) m; - is_aligned x m; - \n \ m. \z !! n\ - \ False" - apply (erule notE) - apply (subst word_plus_and_or_coroll; word_eqI) - apply (meson linorder_not_le) - by (auto simp: le_def) +crunch_ignore (add: do_extended_op) + +lemma None_Some_strg: + "x = None \ x \ Some y" + by simp + +(* Weakest precondition lemmas that need ASpec concepts: *) + +lemma const_on_failure_wp: + "\P\ m \Q\, \\rv. Q n\ \ \P\ const_on_failure n m \Q\" + by (wpsimp simp: const_on_failure_def) + +(* Weaker wp rule for arguments "a" which do not take type det_ext state. + The stronger rule below will take precendence, because it is declared [wp] + later than this one. This rule here will fire when the stronger one does not + apply because of a looser type than det_ext state. The looser type tends to + happen in goals that are stated by crunch. *) +lemma select_ext_weak_wp[wp]: + "\\s. \x\S. Q x s\ select_ext a S \Q\" + by (wpsimp simp: select_ext_def) + +(* The "real" wp rule for select_ext, requires det_ext state: *) +lemma select_ext_wp[wp]: + "\\s. a s \ S \ Q (a s) s\ select_ext a S \Q\" + unfolding select_ext_def unwrap_ext_det_ext_ext_def + by (wpsimp simp: select_switch_det_ext_ext_def) end diff --git a/proof/invariant-abstract/README.md b/proof/invariant-abstract/README.md index 4515a4d504..ac02cb7f4d 100644 --- a/proof/invariant-abstract/README.md +++ b/proof/invariant-abstract/README.md @@ -9,7 +9,7 @@ Abstract Spec Invariant Proof This proof defines and proves the global invariants of seL4's [abstract specification](../../spec/abstract/). The invariants are -phrased and proved using a [monadic Hoare logic](../../lib/Monad_WP/NonDetMonad.thy) +phrased and proved using a [monadic Hoare logic](../../lib/Monads/nondet/Nondet_Monad.thy) described in a TPHOLS '08 [paper][1]. [1]: https://trustworthy.systems/publications/nictaabstracts/Cock_KS_08.abstract "Secure Microkernels, State Monads and Scalable Refinement" @@ -17,9 +17,9 @@ described in a TPHOLS '08 [paper][1]. Building -------- -To build from the `l4v/` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - ./isabelle/bin/isabelle build -d . -v -b AInvs + L4V_ARCH=ARM ./run_tests AInvs Important Theories ------------------ diff --git a/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy b/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy index d92794c62c..207f86410d 100644 --- a/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy @@ -1018,7 +1018,7 @@ lemma set_object_caps_of_state: done lemma set_pt_aobjs_of: - "\\s. aobjs_of s p \ None \ P (aobjs_of s(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" + "\\s. aobjs_of s p \ None \ P ((aobjs_of s)(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" unfolding set_pt_def supply fun_upd_apply[simp del] by (wpsimp wp: set_object_wp) @@ -1142,7 +1142,7 @@ lemma pt_walk_upd_idem: \ pt_walk top_level level' pt_ptr vptr (ptes_of s) = Some (level', pt_ptr') \ pt_ptr' \ obj_ref; is_aligned pt_ptr pt_bits \ - \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := kheap s(obj_ref \ ko)\)) + \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := (kheap s)(obj_ref \ ko)\)) = pt_walk top_level level pt_ptr vptr (ptes_of s)" by (rule pt_walk_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1208,7 +1208,7 @@ lemma vs_lookup_table_upd_idem: \ vs_lookup_table level' asid vref s = Some (level', p') \ p' \ obj_ref; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1217,7 +1217,7 @@ lemma vs_lookup_table_Some_upd_idem: "\ vs_lookup_table level asid vref s = Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s; unique_table_refs s; valid_vs_lookup s; valid_caps (caps_of_state s) s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (subst vs_lookup_table_upd_idem; simp?) (fastforce dest: no_loop_vs_lookup_table) @@ -1226,7 +1226,7 @@ lemma ex_vs_lookup_upd_idem: "\ \\ (level, p) s; pspace_aligned s; valid_vspace_objs s; valid_asid_table s; unique_table_refs s; valid_vs_lookup s; valid_caps (caps_of_state s) s \ - \ \\ (level, p) (s\kheap := kheap s(p \ ko)\) = \\ (level, p) s" + \ \\ (level, p) (s\kheap := (kheap s)(p \ ko)\) = \\ (level, p) s" apply (rule iffI; clarsimp) apply (rule_tac x=asid in exI) apply (rule_tac x=vref in exI) @@ -1303,7 +1303,7 @@ lemma pt_lookup_target_pt_upd_eq: by (rule pt_lookup_target_pt_eqI; clarsimp) lemma kheap_pt_upd_simp[simp]: - "(kheap s(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) + "((kheap s)(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) = (kheap s |> aobj_of |> pt_of)(p \ pt)" unfolding aobj_of_def opt_map_def by (auto split: kernel_object.split) @@ -1463,7 +1463,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1471,7 +1471,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1509,7 +1509,7 @@ lemma valid_machine_state_heap_updI: assumes vm : "valid_machine_state s" assumes tyat : "typ_at type p s" shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + " a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1668,7 +1668,7 @@ crunch interrupt_states[wp]: set_asid_pool "\s. P (interrupt_states s)" lemma vs_lookup_table_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" apply (subst vs_lookup_table_upd_idem; fastforce) done @@ -1676,14 +1676,14 @@ lemma vs_lookup_table_unreachable_upd_idem: lemma vs_lookup_table_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_unreachable_upd_idem; fastforce) lemma vs_lookup_target_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" supply fun_upd_apply[simp del] apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def obind_assoc) @@ -1718,12 +1718,12 @@ lemma vs_lookup_target_unreachable_upd_idem: lemma vs_lookup_target_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" by (rule vs_lookup_target_unreachable_upd_idem; fastforce) lemma vs_lookup_table_fun_upd_deep_idem: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ko)\) = Some (level, p'); + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ko)\) = Some (level, p'); vs_lookup_table level' asid vref s = Some (level', p); level' \ level; vref \ user_region; unique_table_refs s; valid_vs_lookup s; valid_vspace_objs s; valid_asid_table s; pspace_aligned s; valid_caps (caps_of_state s) s \ @@ -1816,8 +1816,8 @@ lemma vs_lookup_target_pt_levelI: lemma vs_lookup_target_asid_pool_level_upd_helper: "\ graph_of ap \ graph_of ap'; kheap s p = Some (ArchObj (ASIDPool ap')); vref \ user_region; - vspace_for_pool pool_ptr asid (asid_pools_of s(p \ ap)) = Some pt_ptr; - pool_for_asid asid (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ + vspace_for_pool pool_ptr asid ((asid_pools_of s)(p \ ap)) = Some pt_ptr; + pool_for_asid asid (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ \ vs_lookup_target asid_pool_level asid vref s = Some (asid_pool_level, pt_ptr)" apply (clarsimp simp: pool_for_asid_vs_lookup vspace_for_pool_def in_omonad) apply (clarsimp split: if_splits) @@ -1828,7 +1828,7 @@ lemma vs_lookup_target_asid_pool_level_upd_helper: done lemma vs_lookup_target_None_upd_helper: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some (level, table_ptr); ((\pa. pte_of pa ((pts_of s)(p := None))) |> pte_ref) (pt_slot_offset level table_ptr vref) = Some target; @@ -1943,7 +1943,7 @@ lemma set_asid_pool_equal_mappings[wp]: lemma translate_address_asid_pool_upd: "pts_of s p = None \ translate_address pt_ptr vref - (\pa. pte_of pa (kheap s(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) + (\pa. pte_of pa ((kheap s)(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) = translate_address pt_ptr vref (ptes_of s)" by simp @@ -2879,7 +2879,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "machine_op_lift f \\ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonad.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/RISCV64/ArchArch_AI.thy b/proof/invariant-abstract/RISCV64/ArchArch_AI.thy index 654db06750..4acd7b7828 100644 --- a/proof/invariant-abstract/RISCV64/ArchArch_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchArch_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -55,7 +56,7 @@ lemma check_vp_wpR [wp]: check_vp_alignment sz w \P\, -" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: vmsz_aligned_def) done @@ -63,7 +64,7 @@ lemma check_vp_wpR [wp]: lemma check_vp_inv: "\P\ check_vp_alignment sz w \\_. P\" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply simp done @@ -441,7 +442,7 @@ context Arch begin global_naming RISCV64 lemma valid_arch_state_strg: "valid_arch_state s \ ap \ ran (asid_table s) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\riscv_asid_table := riscv_asid_table (arch_state s)(asid \ ap)\\)" + valid_arch_state (s\arch_state := arch_state s\riscv_asid_table := (asid_table s)(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI simp: asid_pools_at_eq) @@ -466,7 +467,7 @@ lemma valid_asid_pool_caps_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ obj_refs cap = {ap} \ vs_cap_ref cap = Some (ucast asid << asid_low_bits, 0)) \ - valid_asid_pool_caps_2 (caps_of_state s) (asid_table s(asid \ ap))" + valid_asid_pool_caps_2 (caps_of_state s) ((asid_table s)(asid \ ap))" apply clarsimp apply (prop_tac "asid_update ap asid s", (unfold_locales; assumption)) apply (fastforce dest: asid_update.valid_asid_pool_caps') @@ -559,7 +560,7 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid) \ is_aligned asid asid_low_bits) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \riscv_asid_table := riscv_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \riscv_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg valid_asid_pool_caps_upd_strg) @@ -572,7 +573,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -651,7 +652,7 @@ lemma cap_insert_ap_invs: asid_table s (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \riscv_asid_table := (riscv_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \riscv_asid_table := ((riscv_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg equal_kernel_mappings_asid_upd_strg valid_asid_map_asid_upd_strg @@ -805,11 +806,11 @@ proof - \\rv s. invs (s\arch_state := arch_state s - \riscv_asid_table := (riscv_asid_table \ arch_state) s + \riscv_asid_table := ((riscv_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \riscv_asid_table := (riscv_asid_table \ arch_state) s + \riscv_asid_table := ((riscv_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -915,7 +916,7 @@ qed lemmas aci_invs[wp] = - aci_invs'[where Q=\,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified] + aci_invs'[where Q=\,simplified hoare_TrueI, OF refl refl refl TrueI TrueI TrueI,simplified] lemma invoke_arch_invs[wp]: "\invs and ct_active and valid_arch_inv ai\ @@ -997,7 +998,7 @@ crunch_ignore (add: select_ext find_vspace_for_asid) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) declare lookup_slot_for_cnode_op_cap_to [wp] @@ -1266,11 +1267,11 @@ lemma decode_asid_control_invocation_wf[wp]: and (\s. descendants_of (snd (excaps!0)) (cdt s) = {}) and cte_wp_at (\c. \idx. c = UntypedCap False frame pageBits idx) (snd (excaps!0)) and (\s. riscv_asid_table (arch_state s) free = None)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def) - apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp)+ + apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp)+ apply (rule conjI, fastforce) apply (cases excaps, simp) apply (case_tac list, simp) diff --git a/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy b/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy index ef1acc9af6..6dfd377e26 100644 --- a/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy @@ -545,7 +545,7 @@ context Arch begin global_naming RISCV64 lemma post_cap_delete_pre_is_final_cap': "\s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -622,7 +622,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) @@ -791,7 +791,7 @@ qed lemmas rec_del_invs'[CNodeInv_AI_assms] = rec_del_invs'' [where Q=\, - simplified hoare_post_taut pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] + simplified hoare_TrueI pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] end diff --git a/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy b/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy index 168c983d2e..63fdd76207 100644 --- a/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy @@ -152,7 +152,7 @@ lemma arch_derived_is_device: lemma valid_arch_mdb_simple: "\ valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ - valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) (caps_of_state s(dest \ cap))" + valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) ((caps_of_state s)(dest \ cap))" by (auto simp: valid_arch_mdb_def is_cap_revocable_def arch_is_cap_revocable_def is_simple_cap_def safe_parent_for_def is_cap_simps) @@ -177,34 +177,34 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by (clarsimp simp: valid_arch_mdb_def) lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" by (clarsimp simp: valid_arch_mdb_def) lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -237,10 +237,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by (clarsimp simp: valid_arch_mdb_def)+ end diff --git a/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy b/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy index c64dcdad81..5465278de5 100644 --- a/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy @@ -175,7 +175,7 @@ lemma is_derived_is_cap: lemma vs_lookup_pages_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_pages (s\kheap := kheap s(p \ ko')\) = vs_lookup_pages s" + \ vs_lookup_pages (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_pages s" unfolding vs_lookup_target_def vs_lookup_slot_def apply (frule aobjs_of_non_aobj_upd[where ko'=ko'], simp+) apply (rule ext)+ @@ -190,7 +190,7 @@ lemma vs_lookup_pages_non_aobj_upd: lemma vs_lookup_target_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(p \ ko')\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_target level asid vref s" by (drule vs_lookup_pages_non_aobj_upd[where ko'=ko'], auto dest: fun_cong) diff --git a/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy index 7061f5d034..20b9604f77 100644 --- a/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy @@ -16,7 +16,7 @@ crunches init_arch_objects for exst[wp]: "\s. P (exst s)" and ct[wp]: "\s. P (cur_thread s)" and valid_etcbs[wp, DetSchedAux_AI_assms]: valid_etcbs - (wp: crunch_wps hoare_unless_wp valid_etcbs_lift) + (wp: crunch_wps unless_wp valid_etcbs_lift) crunch ct[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (cur_thread s)" (wp: crunch_wps dxo_wp_weak preemption_point_inv mapME_x_inv_wp @@ -101,9 +101,9 @@ crunch ct[wp]: perform_asid_control_invocation "\s. P (cur_thread s)" crunch idle_thread[wp]: perform_asid_control_invocation "\s. P (idle_thread s)" -crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: static_imp_wp) +crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: hoare_weak_lift_imp) -crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: static_imp_wp) +crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: hoare_weak_lift_imp) crunch schedact[wp]: perform_asid_control_invocation "\s :: det_ext state. P (scheduler_action s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) diff --git a/proof/invariant-abstract/RISCV64/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/RISCV64/ArchDetSchedSchedule_AI.thy index fc881ee93d..6e43b06c0e 100644 --- a/proof/invariant-abstract/RISCV64/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchDetSchedSchedule_AI.thy @@ -83,7 +83,7 @@ crunch valid_sched [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread, arch_ (simp: crunch_simps) crunch exst[wp]: set_vm_root "\s. P (exst s)" - (wp: crunch_wps hoare_whenE_wp simp: crunch_simps) + (wp: crunch_wps whenE_wp simp: crunch_simps) crunch ct_in_cur_domain_2 [wp, DetSchedSchedule_AI_assms]: arch_switch_to_thread "\s. ct_in_cur_domain_2 thread (idle_thread s) (scheduler_action s) (cur_domain s) (ekheap s)" @@ -183,17 +183,17 @@ lemma set_asid_pool_valid_sched[wp]: crunch ct_not_in_q [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete ct_not_in_q - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete valid_etcbs - (wp: hoare_drop_imps hoare_unless_wp select_inv mapM_x_wp mapM_wp subset_refl + (wp: hoare_drop_imps unless_wp select_inv mapM_x_wp mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: set_object thread_set) crunch simple_sched_action [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete simple_sched_action - (wp: hoare_drop_imps mapM_x_wp mapM_wp select_wp subset_refl + (wp: hoare_drop_imps mapM_x_wp mapM_wp subset_refl simp: unless_def if_fun_split) crunch valid_sched [wp, DetSchedSchedule_AI_assms]: diff --git a/proof/invariant-abstract/RISCV64/ArchDeterministic_AI.thy b/proof/invariant-abstract/RISCV64/ArchDeterministic_AI.thy index 88fbd0fca0..ddf9198f3b 100644 --- a/proof/invariant-abstract/RISCV64/ArchDeterministic_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchDeterministic_AI.thy @@ -31,7 +31,7 @@ context Arch begin global_naming RISCV64 crunch valid_list[wp,Deterministic_AI_assms]: arch_invoke_irq_handler valid_list crunch valid_list[wp]: invoke_untyped valid_list - (wp: crunch_wps preemption_point_inv' hoare_unless_wp mapME_x_wp' + (wp: crunch_wps preemption_point_inv' unless_wp mapME_x_wp' simp: mapM_x_def_bak crunch_simps) crunch valid_list[wp]: invoke_irq_control valid_list diff --git a/proof/invariant-abstract/RISCV64/ArchDetype_AI.thy b/proof/invariant-abstract/RISCV64/ArchDetype_AI.thy index 5bb6fd74bd..ff5c24d729 100644 --- a/proof/invariant-abstract/RISCV64/ArchDetype_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchDetype_AI.thy @@ -82,7 +82,7 @@ next qed lemma empty_fail_freeMemory [Detype_AI_asms]: "empty_fail (freeMemory ptr bits)" - by (simp add: freeMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: freeMemory_def mapM_x_mapM ef_storeWord) lemma region_in_kernel_window_detype[simp]: diff --git a/proof/invariant-abstract/RISCV64/ArchEmptyFail_AI.thy b/proof/invariant-abstract/RISCV64/ArchEmptyFail_AI.thy index d07be67964..69d218d8de 100644 --- a/proof/invariant-abstract/RISCV64/ArchEmptyFail_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchEmptyFail_AI.thy @@ -32,7 +32,7 @@ context Arch begin global_naming RISCV64 crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: handle_fault (simp: kernel_object.splits option.splits arch_cap.splits cap.splits endpoint.splits bool.splits list.splits thread_state.splits split_def catch_def sum.splits - Let_def wp: zipWithM_x_empty_fail) + Let_def) crunch (empty_fail) empty_fail[wp]: decode_tcb_configure, decode_bind_notification, decode_unbind_notification, @@ -54,14 +54,13 @@ lemma arch_decode_RISCVASIDControlMakePool_empty_fail: apply (simp add: decode_asid_control_invocation_def) apply (intro impI conjI allI) apply (simp add: split_def) - apply wp - apply simp + apply (wp (once), simp) apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def - bind_def return_def returnOk_def lift_def liftE_def fail_def - gets_def get_def assert_def select_def - split: if_split_asm) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def fail_def + gets_def get_def assert_def select_def + split: if_split_asm) apply wpsimp apply (wpsimp simp: decode_frame_invocation_def) apply (wpsimp simp: decode_page_table_invocation_def) @@ -84,9 +83,9 @@ lemma arch_decode_RISCVASIDPoolAssign_empty_fail: apply (rule empty_fail_bindE, wpsimp) apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_def bindE_def - bind_def return_def returnOk_def lift_def liftE_def select_ext_def - gets_def get_def assert_def fail_def) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def select_ext_def + gets_def get_def assert_def fail_def) apply wpsimp done diff --git a/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy b/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy index 277775a110..b441746b16 100644 --- a/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy @@ -390,7 +390,7 @@ lemma arch_thread_set_cur_tcb[wp]: "\cur_tcb\ arch_thread_set p lemma cte_wp_at_update_some_tcb: "\kheap s v = Some (TCB tcb) ; tcb_cnode_map tcb = tcb_cnode_map (f tcb)\ - \ cte_wp_at P p (s\kheap := kheap s (v \ TCB (f tcb))\) = cte_wp_at P p s" + \ cte_wp_at P p (s\kheap := (kheap s)(v \ TCB (f tcb))\) = cte_wp_at P p s" apply (clarsimp simp: cte_wp_at_cases2 dest!: get_tcb_SomeD) done @@ -551,7 +551,7 @@ lemma arch_thread_set_valid_objs_context[wp]: lemma sym_refs_update_some_tcb: "\kheap s v = Some (TCB tcb) ; refs_of (TCB tcb) = refs_of (TCB (f tcb))\ - \ sym_refs (state_refs_of (s\kheap := kheap s (v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" + \ sym_refs (state_refs_of (s\kheap := (kheap s)(v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" apply (rule_tac f=sym_refs in arg_cong) apply (rule all_ext) apply (clarsimp simp: sym_refs_def state_refs_of_def) @@ -707,7 +707,7 @@ lemmas reachable_frame_cap_simps = reachable_frame_cap_def[unfolded is_frame_cap_def arch_cap_fun_lift_def, split_simps cap.split] lemma vs_lookup_slot_non_PageTablePTE: - "\ ptes_of s p \ None; ptes_of s' = ptes_of s(p \ pte); \ is_PageTablePTE pte; + "\ ptes_of s p \ None; ptes_of s' = (ptes_of s)(p \ pte); \ is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; valid_asid_table s; pspace_aligned s\ \ vs_lookup_slot level asid vref s' = @@ -1090,16 +1090,6 @@ lemma set_vm_root_empty[wp]: apply (wpsimp wp: get_cap_wp) done -lemma set_asid_pool_empty[wp]: - "\obj_at (empty_table S) word\ set_asid_pool x2 pool' \\xb. obj_at (empty_table S) word\" - by (wpsimp wp: set_object_wp simp: set_asid_pool_def obj_at_def empty_table_def) - -lemma delete_asid_empty_table_pt[wp]: - "delete_asid a word \\s. obj_at (empty_table S) word s\" - apply (simp add: delete_asid_def) - apply wpsimp - done - lemma ucast_less_shiftl_helper3: "\ len_of TYPE('b) + 3 < len_of TYPE('a); 2 ^ (len_of TYPE('b) + 3) \ n\ \ (ucast (x :: 'b::len word) << 3) < (n :: 'a::len word)" @@ -1237,7 +1227,7 @@ lemma (* replace_cap_invs_arch_update *)[Finalise_AI_asms]: lemma dmo_pred_tcb_at[wp]: "do_machine_op mop \\s. P (pred_tcb_at f Q t s)\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) + apply wp apply (clarsimp simp: pred_tcb_at_def obj_at_def) done @@ -1314,7 +1304,7 @@ lemma set_asid_pool_obj_at_ptr: locale_abbrev "asid_table_update asid ap s \ - s\arch_state := arch_state s\riscv_asid_table := riscv_asid_table (arch_state s)(asid \ ap)\\" + s\arch_state := arch_state s\riscv_asid_table := (asid_table s)(asid \ ap)\\" lemma valid_table_caps_table [simp]: "valid_table_caps (s\arch_state := arch_state s\riscv_asid_table := table'\\) = valid_table_caps s" diff --git a/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy b/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy index dc56187409..44ef380453 100644 --- a/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -8,10 +9,6 @@ theory ArchInvariants_AI imports InvariantsPre_AI begin -(* setup *) - -declare opt_mapE[rule del] - \ \---------------------------------------------------------------------------\ section "RISCV64-specific invariant definitions" @@ -1284,9 +1281,55 @@ lemma canonical_user_pptr_base: "canonical_user < pptr_base" by (simp add: canonical_user_def pptr_base_def pptrBase_def canonical_bit_def mask_def) +(* Shadow lemmas from Arch_Kernel_Config_Lemmas and fold definitions to avoid magic numbers. + Written out to make sure the folding has succeeded: *) + +lemma is_page_aligned_physBase: + "is_aligned physBase pageBits" + by (rule is_page_aligned_physBase[folded pageBits_def]) + +lemma kernel_window_sufficient: + "pptrBase + (1 << kernel_window_bits) \ kernelELFBase" + by (rule kernel_window_sufficient[folded kernel_window_bits_def]) + +lemma kernel_elf_window_at_least_page: + "kernelELFBase + 2 ^ pageBits \ kdevBase" + by (rule kernel_elf_window_at_least_page[folded pageBits_def]) + +lemma kernelELFBase_no_overflow: + "kernelELFBase < kernelELFBase + 2 ^ pageBits" + by (rule kernelELFBase_no_overflow[folded pageBits_def]) + +(* end shadowing of Arch_Kernel_Config_Lemmas *) + +lemma is_page_aligned_kernelELFPAddrBase: + "is_aligned kernelELFPAddrBase pageBits" + unfolding kernelELFPAddrBase_def + by (fastforce intro!: is_aligned_add is_page_aligned_physBase + simp: kernelELFPAddrBase_def pageBits_def is_aligned_def) + lemma pptr_base_kernel_elf_base: "pptr_base < kernel_elf_base" - by (simp add: pptr_base_def pptrBase_def canonical_bit_def kernel_elf_base_def kernelELFBase_def) + by (simp add: pptr_base_def kernel_elf_base_def pptrBase_kernelELFBase) + +lemma pptr_base_kdev_base: + "pptr_base < kdev_base" + by (simp add: pptr_base_def pptrBase_def kdev_base_def kdevBase_def canonical_bit_def) + +lemma is_page_aligned_pptrTop: + "is_aligned pptrTop pageBits" + by (simp add: pptrTop_def pageBits_def is_aligned_def) + +lemma is_page_aligned_kernel_elf_base: + "is_aligned kernel_elf_base pageBits" + unfolding kernel_elf_base_def kernelELFBase_def + by (simp add: is_aligned_add is_page_aligned_pptrTop is_aligned_andI1 + is_page_aligned_kernelELFPAddrBase) + +lemma canonical_user_kernel_elf_base: + "canonical_user < kernel_elf_base" + using canonical_user_pptr_base pptr_base_kernel_elf_base + by simp lemma above_pptr_base_canonical: "pptr_base \ p \ canonical_address p" @@ -2564,7 +2607,7 @@ lemma vs_lookup_table_eq_lift: lemma aobjs_of_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ kheap s(p \ ko') |> aobj_of = aobjs_of s" + \ (kheap s)(p \ ko') |> aobj_of = aobjs_of s" by (rule ext) (auto simp: opt_map_def is_ArchObj_def aobj_of_def split: kernel_object.splits if_split_asm) diff --git a/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy b/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy index 0ddc6a1845..c416daba7d 100644 --- a/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy @@ -307,7 +307,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -464,7 +464,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (simp only: ball_conj_distrib[where P="\x. real_cte_at x s" for s]) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ apply (simp split: kernel_object.split_asm) @@ -492,7 +492,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by (auto simp: valid_arch_mdb_def) end diff --git a/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy b/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy index 2a0ddb8847..5484dab712 100644 --- a/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy @@ -288,7 +288,7 @@ lemma translate_address_lift_weak: done lemma set_pt_pts_of: - "\\s. pts_of s p \ None \ P (pts_of s (p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" + "\\s. pts_of s p \ None \ P ((pts_of s)(p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" unfolding set_pt_def by (wpsimp wp: set_object_wp) (auto elim!: rsubst[where P=P] simp: opt_map_def split: option.splits) @@ -310,7 +310,7 @@ lemma pte_ptr_eq: by (fastforce simp: not_le bit_simps) lemma store_pte_ptes_of: - "\\s. ptes_of s p \ None \ P (ptes_of s (p \ pte)) \ store_pte p pte \\_ s. P (ptes_of s)\" + "\\s. ptes_of s p \ None \ P ((ptes_of s)(p \ pte)) \ store_pte p pte \\_ s. P (ptes_of s)\" unfolding store_pte_def pte_of_def apply (wpsimp wp: set_pt_pts_of simp: in_omonad) by (auto simp: obind_def opt_map_def split: option.splits dest!: pte_ptr_eq elim!: rsubst[where P=P]) @@ -371,7 +371,7 @@ lemma vs_lookup_slot_no_asid: If performing a shallower lookup than the one requested results in p, then any deeper lookup in the updated state will return a higher level result along the original path. *) lemma vs_lookup_non_PageTablePTE: - "\ ptes_of s p \ None; ptes_of s' = ptes_of s (p \ pte); + "\ ptes_of s p \ None; ptes_of s' = (ptes_of s)(p \ pte); \ is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; @@ -414,7 +414,7 @@ lemma vs_lookup_non_PageTablePTE: apply (subst pt_walk.simps) apply (subst (2) pt_walk.simps) apply (simp add: less_imp_le cong: if_cong) - apply (subgoal_tac "(ptes_of s(p \ pte)) (pt_slot_offset (x + 1) b vref) + apply (subgoal_tac "((ptes_of s)(p \ pte)) (pt_slot_offset (x + 1) b vref) = ptes_of s (pt_slot_offset (x + 1) b vref)") apply (simp add: obind_def split: option.splits) apply clarsimp @@ -453,7 +453,7 @@ lemma store_pte_non_PageTablePTE_vs_lookup: lemma store_pte_not_ao[wp]: "\\s. \pt. aobjs_of s (p && ~~mask pt_bits) = Some (PageTable pt) \ - P (aobjs_of s (p && ~~mask pt_bits \ + P ((aobjs_of s)(p && ~~mask pt_bits \ PageTable (pt (ucast (p && mask pt_bits >> pte_bits) := pte))))\ store_pte p pte \\_ s. P (aobjs_of s)\" @@ -725,20 +725,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -746,7 +746,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -761,7 +761,7 @@ lemma default_tcb_not_live[simp]: "\ live (TCB default_tcb)" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) @@ -782,12 +782,12 @@ lemma valid_arch_mdb_lift: (* interface lemma *) lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by simp lemma valid_vspace_obj_same_type: "\valid_vspace_obj l ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj l ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj l ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) diff --git a/proof/invariant-abstract/RISCV64/ArchKernelInit_AI.thy b/proof/invariant-abstract/RISCV64/ArchKernelInit_AI.thy index 440f9a49ce..04300d75ff 100644 --- a/proof/invariant-abstract/RISCV64/ArchKernelInit_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchKernelInit_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -208,11 +209,33 @@ lemma pt_walk_init_A_st[simp]: is_aligned_pt_slot_offset_pte global_pte_def) done +(* Since the bottom 30 bits don't matter for 1GB pages, kernelELFPAddrBase and physBase don't have + any influence on the index in the page table, i.e. the virtual address kernel_elf_base is always + within the same 1GB page. *) +lemma elf_index_value: + "elf_index = 0x1FE" +proof - + have mask_0: "\n m x. n < m \ - (1 << m) && x && mask n = 0" + by (metis and.commute and_zero_eq word_mask_and_neg_shift_eq_0 word_bw_assocs(1)) + have "pt_index max_pt_level kernel_elf_base = 0x1FE" + unfolding pt_index_def pt_bits_left_def level_defs bit_simps kernel_elf_base_def + kernelELFBase_def pptrTop_def + apply (subst word_plus_and_or_coroll) + apply (subst mask_0; simp) + apply (simp add: word_and_mask_shift_eq_0 shiftr_over_or_dist) + apply (simp add: mask_def) + done + then show ?thesis + unfolding elf_index_def + by (simp add: bit_simps ucast_ucast_mask) +qed + lemma table_index_riscv_global_pt_ptr: "table_index (pt_slot_offset max_pt_level riscv_global_pt_ptr vref) = - ucast ((vref >> ptTranslationBits * 2 + pageBits) && mask ptTranslationBits)" + ucast ((vref >> toplevel_bits) && mask ptTranslationBits)" apply (simp add: pt_slot_offset_def pt_index_def pt_bits_left_def bit_simps level_defs - riscv_global_pt_ptr_def pptr_base_def pptrBase_def canonical_bit_def) + riscv_global_pt_ptr_def pptr_base_def pptrBase_def canonical_bit_def + toplevel_bits_def) apply (subst word_plus_and_or_coroll) apply word_bitwise apply simp @@ -220,10 +243,14 @@ lemma table_index_riscv_global_pt_ptr: apply (clarsimp simp: word_size) done -lemma kernel_window_1G: - "\ pptr_base \ vref; vref < pptr_base + (1 << 30) \ \ +schematic_goal toplevel_bits_value: + "toplevel_bits = ?v" + by (simp add: toplevel_bits_def level_defs bit_simps pt_bits_left_def) + +lemma kernel_window_bits_table_index: + "\ pptr_base \ vref; vref < pptr_base + (1 << kernel_window_bits) \ \ table_index (pt_slot_offset max_pt_level riscv_global_pt_ptr vref) = 0x100" - apply (simp add: table_index_riscv_global_pt_ptr) + apply (simp add: table_index_riscv_global_pt_ptr toplevel_bits_value kernel_window_bits_def) apply (simp add: bit_simps pptr_base_def pptrBase_def neg_mask_le_high_bits word_size flip: NOT_mask) apply (subst (asm) mask_def) apply (simp add: canonical_bit_def) @@ -237,78 +264,143 @@ lemma kernel_mapping_slots_0x100[simp]: pptrBase_def canonical_bit_def) lemma translate_address_kernel_window: - "\ pptr_base \ vref; vref < pptr_base + (1 << 30) \\ + "\ pptr_base \ vref; vref < pptr_base + (1 << kernel_window_bits) \\ translate_address riscv_global_pt_ptr vref (ptes_of init_A_st) = Some (addrFromPPtr vref)" apply (clarsimp simp: translate_address_def in_omonad pt_lookup_target_def pt_lookup_slot_from_level_def) apply (simp add: ptes_of_init_A_st_global[THEN fun_cong] init_global_pt_def global_pte_def pte_ref_def) - apply (simp add: kernel_window_1G is_aligned_pt_slot_offset_pte) + apply (simp add: kernel_window_bits_table_index is_aligned_pt_slot_offset_pte) apply (simp add: bit_simps addr_from_ppn_def shiftl_shiftl) apply (simp add: ptrFromPAddr_def addrFromPPtr_def) apply (simp add: pptrBaseOffset_def paddrBase_def) - apply (simp add: pt_bits_left_def bit_simps level_defs) + apply (simp add: pt_bits_left_def bit_simps level_defs elf_index_value toplevel_bits_def) apply (rule conjI) apply (rule is_aligned_add) apply (simp add: mask_def) apply (simp add: pptrBase_def canonical_bit_def is_aligned_def) - apply (simp add: pptr_base_def) + apply (simp add: pptr_base_def kernel_window_bits_def) apply (simp add: pptrBase_def neg_mask_le_high_bits flip: NOT_mask) apply (subst word_plus_and_or_coroll; simp add: canonical_bit_def word_size mask_def) apply word_bitwise apply clarsimp done -lemma elf_window_1M: - "\ kernel_elf_base \ vref; vref < kernel_elf_base + (1 << 20) \ \ - table_index (pt_slot_offset max_pt_level riscv_global_pt_ptr vref) = 0x1FE" - apply (simp add: table_index_riscv_global_pt_ptr) - apply (simp add: bit_simps kernel_elf_base_def kernelELFBase_def) +lemma kernelELF_plus_page: + "((kernelELFPAddrBase && mask toplevel_bits) + 2^pageBits) \ 2^toplevel_bits" + by (fastforce intro: aligned_mask_plus_bounded is_page_aligned_kernelELFPAddrBase + simp: toplevel_bits_value bit_simps) + +lemma elf_window_4k: + "\ kernel_elf_base \ vref; vref < kernel_elf_base + (1 << pageBits) \ \ + table_index (pt_slot_offset max_pt_level riscv_global_pt_ptr vref) = elf_index" + using is_page_aligned_kernelELFPAddrBase + apply (simp add: table_index_riscv_global_pt_ptr elf_index_value toplevel_bits_value + kernel_elf_base_def kernelELFBase_def) + apply (simp add: bit_simps add_ac) + apply (drule order_less_le_trans) + apply (rule word_plus_mono_right) + apply (rule kernelELF_plus_page[unfolded bit_simps toplevel_bits_value, simplified]) + apply (simp add: pptrTop_def) + apply (simp add: bit_simps pptrTop_def mask_def is_aligned_nth) apply word_bitwise - apply (clarsimp simp: word_size) + apply clarsimp done -lemma kernel_mapping_slots_0x1FE[simp]: - "0x1FE \ kernel_mapping_slots" +lemma leq_elf_index: + "0x100 \ elf_index" + by (simp add: elf_index_value) + +lemma kernel_mapping_slots_elf_index[simp]: + "elf_index \ kernel_mapping_slots" by (simp add: kernel_mapping_slots_def pptr_base_def bit_simps pt_bits_left_def level_defs - pptrBase_def canonical_bit_def) + pptrBase_def canonical_bit_def leq_elf_index) + +lemma rewrite_eq_minus_to_plus_eq: + "(x = a - b) = (b + x = a)" for x :: "'a::ring" + by auto + +lemma merge_kernelELFPAddrBase_masks: + "((kernelELFPAddrBase && ~~ mask 30) + + (pptrTop + (kernelELFPAddrBase && mask 30) - kernelELFPAddrBase)) + = pptrTop" + by (simp add: mask_out_sub_mask) + +lemma is_aligned_ptrFromPAddr_kernelELFPAddrBase: + "is_aligned + (ptrFromPAddr + (addr_from_ppn (ucast (kernelELFPAddrBase && ~~ mask toplevel_bits >> pageBits)))) + (pt_bits_left max_pt_level)" + apply (simp add: addr_from_ppn_def ptrFromPAddr_def addrFromPPtr_def elf_index_value) + apply (simp add: pt_bits_left_def bit_simps level_defs ucast_ucast_mask toplevel_bits_value) + apply (simp add: pptrBase_def pptrBaseOffset_def paddrBase_def canonical_bit_def) + apply (rule is_aligned_add) + apply (metis and_not_mask is_aligned_andI1 is_aligned_shiftl_self shiftr_and_eq_shiftl) + apply (simp add: is_aligned_def) + done + +lemma kernelELFPAddrBase_addrFromKPPtr: + "\ kernel_elf_base \ vref; vref < kernel_elf_base + (1 << pageBits) \ + \ addr_from_ppn (ucast (kernelELFPAddrBase && ~~ mask toplevel_bits >> pageBits)) + + (vref && mask (pt_bits_left max_pt_level)) + = addrFromKPPtr vref" + apply (simp add: addr_from_ppn_def ptrFromPAddr_def addrFromPPtr_def elf_index_value) + apply (simp add: pt_bits_left_def bit_simps level_defs ucast_ucast_mask toplevel_bits_value) + apply (simp add: addrFromKPPtr_def mask_shiftl_decompose flip: word_and_mask_shiftl) + apply (simp add: ac_simps neg_mask_combine) + apply (simp add: kernel_elf_base_def kernelELFBaseOffset_def) + apply (simp only: kernelELFBase_def) + apply (subst rewrite_eq_minus_to_plus_eq) + apply (simp add: add_ac merge_kernelELFPAddrBase_masks) + apply (subst word_plus_and_or_coroll) + apply (simp add: pptrTop_def mask_def) + apply word_bitwise + apply (subst (asm) word_plus_and_or_coroll) + apply (simp add: pptrTop_def mask_def) + apply word_bitwise + apply (drule order_less_le_trans) + apply (rule word_plus_mono_right) + apply (rule kernelELF_plus_page[unfolded bit_simps toplevel_bits_value, simplified]) + apply (simp add: pptrTop_def) + apply (simp add: pptrTop_def mask_def) + apply word_bitwise + apply clarsimp + done lemma translate_address_kernel_elf_window: - "\ kernel_elf_base \ vref; vref < kernel_elf_base + (1 << 20) \ \ + "\ kernel_elf_base \ vref; vref < kernel_elf_base + (1 << pageBits) \ \ translate_address riscv_global_pt_ptr vref (ptes_of init_A_st) = Some (addrFromKPPtr vref)" apply (clarsimp simp: translate_address_def in_omonad pt_lookup_target_def pt_lookup_slot_from_level_def) apply (simp add: ptes_of_init_A_st_global[THEN fun_cong] init_global_pt_def global_pte_def pte_ref_def) - apply (simp add: elf_window_1M is_aligned_pt_slot_offset_pte) - apply (simp add: bit_simps addr_from_ppn_def shiftl_shiftl) - apply (simp add: ptrFromPAddr_def addrFromPPtr_def) - apply (simp add: addrFromKPPtr_def kernelELFBaseOffset_def kernelELFPAddrBase_def kernelELFBase_def) - apply (simp add: pt_bits_left_def bit_simps level_defs) - apply (rule conjI) - apply (simp add: pptrBase_def pptrBaseOffset_def paddrBase_def canonical_bit_def is_aligned_def) - apply (simp add: kernel_elf_base_def kernelELFBase_def) - apply (subst word_plus_and_or_coroll) - apply (simp add: canonical_bit_def word_size mask_def) - apply word_bitwise - apply (simp add: canonical_bit_def word_size mask_def) - apply word_bitwise - apply clarsimp + apply (simp add: elf_window_4k is_aligned_pt_slot_offset_pte elf_index_value) + apply (clarsimp simp: is_aligned_ptrFromPAddr_kernelELFPAddrBase kernelELFPAddrBase_addrFromKPPtr) done lemma kernel_window_init_st: - "kernel_window init_A_st = { pptr_base ..< pptr_base + (1 << 30) }" - by (auto simp: state_defs kernel_window_def) + "kernel_window init_A_st = { pptr_base ..< pptr_base + (1 << kernel_window_bits) }" + by (auto simp: state_defs kernel_window_def toplevel_bits_def bit_simps) + +lemma abs_kernel_window_sufficient: + "pptr_base + (1 << kernel_window_bits) \ kernel_elf_base" + unfolding pptr_base_def kernel_elf_base_def + using kernel_window_sufficient + by simp + +lemma abs_kernel_elf_window_at_least_page: + "kernel_elf_base + 2 ^ pageBits \ kdev_base" + unfolding kernel_elf_base_def kdev_base_def + using kernel_elf_window_at_least_page + by simp + +lemma kernel_elf_base_no_overflow: + "kernel_elf_base < kernel_elf_base + 2 ^ pageBits" + unfolding kernel_elf_base_def + by (rule kernelELFBase_no_overflow) lemma kernel_elf_window_init_st: - "kernel_elf_window init_A_st = { kernel_elf_base ..< kernel_elf_base + (1 << 20) }" - apply (clarsimp simp: state_defs kernel_elf_window_def kernel_elf_base_def kernelELFBase_def - pptr_base_def pptrBase_def canonical_bit_def) - apply (rule set_eqI, clarsimp) - apply (rule iffI) - apply auto[1] - apply clarsimp - apply word_bitwise - apply clarsimp - done + "kernel_elf_window init_A_st = { kernel_elf_base ..< kernel_elf_base + (1 << pageBits) }" + using abs_kernel_window_sufficient + by (force simp: state_defs kernel_elf_window_def) lemma valid_global_vspace_mappings_init_A_st[simp]: "valid_global_vspace_mappings init_A_st" @@ -319,25 +411,22 @@ lemma valid_global_vspace_mappings_init_A_st[simp]: lemma valid_uses_init_A_st[simp]: "valid_uses_2 init_vspace_uses" proof - note canonical_bit_def[simp] - have [simp]: "pptr_base < pptr_base + 0x40000000" - by (simp add: pptr_base_def pptrBase_def) + have [simplified, simp]: "pptr_base < pptr_base + (1 << kernel_window_bits)" + by (simp add: pptr_base_def pptrBase_def kernel_window_bits_def) have [simp]: "p \ canonical_user \ \ pptr_base \ p" for p by (rule notI, drule (1) order_trans) (simp add: canonical_user_def mask_def pptr_base_def pptrBase_def) have [simp]: "p \ canonical_user \ \ kernel_elf_base \ p" for p - by (rule notI, drule (1) order_trans) - (simp add: canonical_user_def mask_def kernel_elf_base_def kernelELFBase_def) + using canonical_user_kernel_elf_base by simp have [simp]: "p \ canonical_user \ \ kdev_base \ p" for p by (rule notI, drule (1) order_trans) (simp add: canonical_user_def mask_def kdev_base_def kdevBase_def) - have [simp]: "kernel_elf_base \ p \ \ p < pptr_base + 0x40000000" for p - by (rule notI, drule (1) order_le_less_trans) - (simp add: kernel_elf_base_def kernelELFBase_def pptr_base_def pptrBase_def) - have [simp]: "kdev_base \ p \ \ p < kernel_elf_base + 0x100000" for p - by (rule notI, drule (1) order_le_less_trans) - (simp add: kernel_elf_base_def kernelELFBase_def kdev_base_def kdevBase_def) - have "pptr_base + 0x40000000 < kernel_elf_base + 0x100000" - by (simp add: kernel_elf_base_def kernelELFBase_def pptr_base_def pptrBase_def) + have [simplified, simp]: "kernel_elf_base \ p \ \ p < pptr_base + (1 << kernel_window_bits)" for p + using abs_kernel_window_sufficient by simp + have [simp]: "kdev_base \ p \ \ p < kernel_elf_base + 2 ^ pageBits" for p + using abs_kernel_elf_window_at_least_page by simp + have "pptr_base + (1 << kernel_window_bits) < kernel_elf_base + 2 ^ pageBits" + using abs_kernel_window_sufficient kernel_elf_base_no_overflow by simp thus ?thesis using canonical_user_pptr_base pptr_base_kernel_elf_base unfolding valid_uses_2_def init_vspace_uses_def window_defs @@ -374,24 +463,42 @@ lemma valid_vspace_objs_init_A_st[simp]: lemma global_pt_kernel_window_init_arch_state[simp]: "obj_addrs init_global_pt riscv_global_pt_ptr \ kernel_window_2 (riscv_kernel_vspace init_arch_state)" - apply (clarsimp simp: state_defs pptr_base_num bit_simps kernel_window_def kernel_elf_base_def) + apply (clarsimp simp: state_defs pptr_base_num bit_simps kernel_window_def kernel_elf_base_def + kernel_window_bits_def) apply (rule conjI; unat_arith) done lemma idle_thread_in_kernel_window_init_arch_state[simp]: "{idle_thread_ptr..0x3FF + idle_thread_ptr} \ kernel_window_2 (riscv_kernel_vspace init_arch_state)" - apply (clarsimp simp: state_defs pptr_base_num bit_simps kernel_window_def kernel_elf_base_def) + apply (clarsimp simp: state_defs pptr_base_num bit_simps kernel_window_def kernel_elf_base_def + kernel_window_bits_def) apply (rule conjI; unat_arith) done +lemma pptr_base_kernel_window_no_overflow: + "pptr_base \ pptr_base + (1 << kernel_window_bits)" + by (simp add: pptr_base_def pptrBase_def canonical_bit_def kernel_window_bits_def) + lemma irq_node_pptr_base_kernel_elf_base: - "\x \ pptr_base + (m + (mask cte_level_bits + 0x3000)); m \ mask (size irq) << cte_level_bits \ - \ \ kernel_elf_base \ x" for irq::irq - apply (simp add: word_size cte_level_bits_def mask_def pptr_base_def pptrBase_def - kernel_elf_base_def kernelELFBase_def canonical_bit_def not_le) - apply unat_arith - done + fixes irq::irq + assumes "x \ pptr_base + (m + (mask cte_level_bits + 0x3000))" + assumes "m \ mask (size irq) << cte_level_bits" + shows "\ kernel_elf_base \ x" +proof - + have less: + "\m x. \ m \ x; x < 1 << kernel_window_bits \ \ pptr_base + m < kernel_elf_base" + using order_le_less_trans word_plus_strict_mono_right pptr_base_kernel_elf_base + pptr_base_kernel_window_no_overflow abs_kernel_window_sufficient + by fastforce + from assms show ?thesis + apply (simp add: not_le) + apply (erule order_le_less_trans) + apply (rule less[where x="mask cte_level_bits + 0x3000 + mask (size irq) << cte_level_bits"]; + simp add: word_size cte_level_bits_def mask_def kernel_window_bits_def) + apply unat_arith + done +qed lemma irq_node_in_kernel_window_init_arch_state': "\ init_irq_node_ptr + m \ x; x \ init_irq_node_ptr + m + mask cte_level_bits; @@ -410,8 +517,10 @@ lemma irq_node_in_kernel_window_init_arch_state': apply (simp add: pptr_base_num canonical_bit_def is_aligned_def) apply (simp add: pptr_base_num cte_level_bits_def canonical_bit_def mask_def word_size) apply unat_arith - apply (simp add: kernel_elf_base_def kernelELFBase_def cte_level_bits_def canonical_bit_def - mask_def init_irq_node_ptr_def pptr_base_num word_size) + apply clarsimp + apply (thin_tac "kernel_elf_base \ x \ P" for x P) + apply (simp add: cte_level_bits_def canonical_bit_def mask_def init_irq_node_ptr_def + pptr_base_num word_size kernel_window_bits_def) apply unat_arith apply clarsimp done diff --git a/proof/invariant-abstract/RISCV64/ArchRetype_AI.thy b/proof/invariant-abstract/RISCV64/ArchRetype_AI.thy index 2af66423f1..33cf58e6b6 100644 --- a/proof/invariant-abstract/RISCV64/ArchRetype_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchRetype_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only diff --git a/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy b/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy index f2f28a1783..61a471e06c 100644 --- a/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy @@ -181,7 +181,6 @@ lemma cap_delete_no_cap_to_obj_asid[wp, Tcb_AI_asms]: apply (simp add: cap_delete_def no_cap_to_obj_with_diff_ref_ran_caps_form) apply wp - apply simp apply (rule use_spec) apply (rule rec_del_all_caps_in_range) apply (simp | rule obj_ref_none_no_asid)+ @@ -235,19 +234,19 @@ lemma tc_invs[Tcb_AI_asms]: \\rv. invs\" apply (rule hoare_gen_asm)+ apply (simp add: split_def set_mcpriority_def cong: option.case_cong) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply wp - apply ((simp only: simp_thms - | (simp add: conj_comms del: hoare_True_E_R, + apply ((simp only: simp_thms cong: conj_cong + | (simp add: conj_comms, strengthen imp_consequent[where Q="x = None" for x], simp cong: conj_cong) | rule wp_split_const_if wp_split_const_if_R - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | (wp out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -261,10 +260,9 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def - del: hoare_True_E_R | wpc | strengthen use_no_cap_to_obj_asid_strg use_no_cap_to_obj_asid_strg[simplified conj_comms] tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] diff --git a/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy index 3abb8fa675..0c224695ba 100644 --- a/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy @@ -56,7 +56,7 @@ lemma mapM_x_store_pte_updates: apply wp apply (clarsimp simp: obj_at_def fun_upd_idem) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: store_pte_def set_pt_def set_object_def word_size_bits_def) apply (wp get_pt_wp get_object_wp) @@ -106,7 +106,7 @@ crunch valid_vspace_objs'[wp]: set_simple_ko "valid_vspace_objs'" (wp: crunch_wps) crunch valid_vspace_objs'[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_vspace_objs'" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ preemption_point \\rv. valid_vspace_objs'\" @@ -233,7 +233,7 @@ lemma perform_asid_pool_invocation_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_vspace_objs'" (ignore: delete_objects set_object - wp: static_imp_wp select_wp crunch_wps + wp: hoare_weak_lift_imp crunch_wps simp: crunch_simps unless_def) lemma pte_range_interD: @@ -287,8 +287,6 @@ lemma perform_invocation_valid_vspace_objs'[wp]: \\rv. valid_vspace_objs'\" apply (cases i, simp_all) apply (wp send_signal_interrupt_states | simp)+ - apply (clarsimp simp:) - apply (wp | wpc | simp)+ apply (simp add: arch_perform_invocation_def) apply (wp | wpc | simp)+ apply (auto simp: valid_arch_inv_def intro: valid_objs_caps) @@ -311,7 +309,7 @@ lemma handle_invocation_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: activate_thread,switch_to_thread, handle_hypervisor_fault, switch_to_idle_thread, handle_call, handle_recv, handle_reply, handle_send, handle_yield, handle_interrupt "valid_vspace_objs'" - (simp: crunch_simps wp: crunch_wps alternative_valid select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt OR_choice set_scheduler_action) @@ -322,8 +320,7 @@ lemma handle_event_valid_vspace_objs'[wp]: lemma schedule_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ schedule :: (unit,unit) s_monad \\_. valid_vspace_objs'\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma call_kernel_valid_vspace_objs'[wp]: diff --git a/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy b/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy index 51085c42f8..1d9310f722 100644 --- a/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy @@ -1446,7 +1446,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid ptp pt and s' :: "'a::state_ext state" - defines "s' \ s\kheap := kheap s(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" + defines "s' \ s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" assumes ap: "asid_pools_of s ap = Some pool" assumes new: "pool (asid_low_bits_of asid) = None" assumes pt: "pts_of s ptp = Some pt" diff --git a/proof/invariant-abstract/RISCV64/Machine_AI.thy b/proof/invariant-abstract/RISCV64/Machine_AI.thy index c812e7ce42..893fb3eab5 100644 --- a/proof/invariant-abstract/RISCV64/Machine_AI.thy +++ b/proof/invariant-abstract/RISCV64/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -83,13 +83,13 @@ lemma det_getRestartPC: "det getRestartPC" lemma det_setNextPC: "det (setNextPC p)" by (simp add: setNextPC_def det_setRegister) - +(* FIXME empty_fail: make all empty_fail [intro!, wp], and non-conditional ones [simp] *) lemma ef_loadWord: "empty_fail (loadWord x)" - by (simp add: loadWord_def) + by (fastforce simp: loadWord_def) lemma ef_storeWord: "empty_fail (storeWord x y)" - by (simp add: storeWord_def) + by (fastforce simp: storeWord_def) lemma no_fail_getRestartPC: "no_fail \ getRestartPC" @@ -175,7 +175,7 @@ lemma no_fail_getActiveIRQ[wp]: "no_fail \ (getActiveIRQ in_kernel)" apply (simp add: getActiveIRQ_def) apply (rule no_fail_pre) - apply (wp non_fail_select) + apply wp apply simp done @@ -184,7 +184,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -284,7 +284,7 @@ lemma no_irq_seq [wp]: "\ no_irq f; \x. no_irq (g x) \ \ no_irq (f >>= g)" apply (subst no_irq_def) apply clarsimp - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (wp|simp)+ done @@ -329,7 +329,7 @@ lemma getActiveIRQ_le_maxIRQ': getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -338,7 +338,7 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done @@ -356,7 +356,7 @@ lemma empty_fail_initL2Cache: "empty_fail initL2Cache" lemma empty_fail_clearMemory [simp, intro!]: "\a b. empty_fail (clearMemory a b)" - by (simp add: clearMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: clearMemory_def mapM_x_mapM ef_storeWord) lemma no_irq_setVSpaceRoot: "no_irq (setVSpaceRoot r a)" diff --git a/proof/invariant-abstract/Retype_AI.thy b/proof/invariant-abstract/Retype_AI.thy index acab1eac33..0aa287b830 100644 --- a/proof/invariant-abstract/Retype_AI.thy +++ b/proof/invariant-abstract/Retype_AI.thy @@ -979,7 +979,7 @@ lemma non_disjoing_subset: "\A \ B; A \ C \ {}\< lemma pspace_no_overlap_same_type: "\pspace_no_overlap S s; ko_at k p s; a_type ko = a_type k\ - \ pspace_no_overlap S (kheap_update (\_. (kheap s(p \ ko))) s)" + \ pspace_no_overlap S (kheap_update (\_. (kheap s)(p \ ko)) s)" unfolding pspace_no_overlap_def by (clarsimp simp: obj_at_def obj_bits_T) @@ -1904,7 +1904,7 @@ locale retype_region_proofs_invs fixes region_in_kernel_window :: "machine_word set \ 'state_ext state \ bool" assumes valid_global_refs: "valid_global_refs s \ valid_global_refs s'" assumes valid_arch_state: "valid_arch_state s \ valid_arch_state s'" - assumes valid_vspace_objs': "valid_vspace_objs s \ valid_vspace_objs s'" + assumes valid_vspace_objs': "\ invs s; valid_vspace_objs s \ \ valid_vspace_objs s'" assumes valid_cap: "(s::'state_ext state) \ cap \ untyped_range cap \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} = {} @@ -2156,13 +2156,6 @@ lemma swp_clearMemoryVM [simp]: by (rule ext,simp) -(* FIXME: move *) -lemma bind_assoc_reverse: - "(do x \ A; _ \ B x; C x od) = - (do x \ do x \ A; _ \ B x; return x od; C x od)" -by (simp only: bind_assoc return_bind) - - (* FIXME: move *) lemmas do_machine_op_bind = submonad_bind [OF submonad_do_machine_op submonad_do_machine_op diff --git a/proof/invariant-abstract/Schedule_AI.thy b/proof/invariant-abstract/Schedule_AI.thy index 059dd5be19..26d95425fc 100644 --- a/proof/invariant-abstract/Schedule_AI.thy +++ b/proof/invariant-abstract/Schedule_AI.thy @@ -177,8 +177,8 @@ locale Schedule_AI_U = Schedule_AI "TYPE(unit)" lemma (in Schedule_AI_U) schedule_invs[wp]: "\invs\ (Schedule_A.schedule :: (unit,unit) s_monad) \\rv. invs\" apply (simp add: Schedule_A.schedule_def allActiveTCBs_def) - apply (wp OR_choice_weak_wp alternative_wp dmo_invs thread_get_inv - do_machine_op_tcb select_ext_weak_wp select_wp when_def + apply (wp OR_choice_weak_wp dmo_invs thread_get_inv + do_machine_op_tcb select_ext_weak_wp when_def | clarsimp simp: getActiveTCB_def get_tcb_def)+ done @@ -202,9 +202,8 @@ lemma (in Schedule_AI_U) schedule_ct_activateable[wp]: done show ?thesis apply (simp add: Schedule_A.schedule_def allActiveTCBs_def) - apply (wp alternative_wp - select_ext_weak_wp select_wp stt_activatable stit_activatable - | simp add: P Q)+ + apply (wp select_ext_weak_wp stt_activatable stit_activatable + | simp add: P Q)+ apply (clarsimp simp: getActiveTCB_def ct_in_state_def) apply (rule conjI) apply clarsimp diff --git a/proof/invariant-abstract/SubMonad_AI.thy b/proof/invariant-abstract/SubMonad_AI.thy index 13e23d3ad8..99b09dcd5b 100644 --- a/proof/invariant-abstract/SubMonad_AI.thy +++ b/proof/invariant-abstract/SubMonad_AI.thy @@ -42,7 +42,7 @@ lemma assert_get_thread_do_machine_op_comm: apply (rule submonad_comm2 [OF _ _ submonad_do_machine_op]) apply (rule submonad_args_pspace) apply (rule assert_get_tcb_pspace) - apply simp+ + apply (simp add: empty_fail_cond)+ done end diff --git a/proof/invariant-abstract/Syscall_AI.thy b/proof/invariant-abstract/Syscall_AI.thy index 9e9f1fceef..b1d5d43bab 100644 --- a/proof/invariant-abstract/Syscall_AI.thy +++ b/proof/invariant-abstract/Syscall_AI.thy @@ -296,7 +296,7 @@ lemma thread_set_cap_to: "(\tcb. \(getF, v)\ran tcb_cap_cases. getF (f tcb) = getF tcb) \ \ex_nonz_cap_to p\ thread_set f tptr \\_. ex_nonz_cap_to p\" apply (clarsimp simp add: ex_nonz_cap_to_def) - apply (wpsimp wp: hoare_ex_wp thread_set_cte_wp_at_trivial + apply (wpsimp wp: hoare_vcg_ex_lift thread_set_cte_wp_at_trivial | fast)+ done @@ -323,7 +323,7 @@ lemma (in Systemcall_AI_Pre) handle_fault_reply_cte_wp_at: done have NC: "\p' s tcb P nc. get_tcb p' s = Some tcb - \ cte_wp_at P p (s\kheap := kheap s(p' \ TCB (tcb\tcb_arch := arch_tcb_context_set nc (tcb_arch tcb)\))\) + \ cte_wp_at P p (s\kheap := (kheap s)(p' \ TCB (tcb\tcb_arch := arch_tcb_context_set nc (tcb_arch tcb)\))\) = cte_wp_at P p s" apply (drule_tac nc=nc in SC) apply (drule_tac P=P and p=p in cte_wp_at_after_update) @@ -724,7 +724,7 @@ lemma lookup_extras_real_ctes[wp]: lemma lookup_extras_ctes[wp]: "\valid_objs\ lookup_extra_caps t xs info \\rv s. \x \ set rv. cte_at (snd x) s\,-" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookup_extras_real_ctes) apply (simp add: real_cte_at_cte) done @@ -898,7 +898,7 @@ lemma lookup_extra_caps_eq [wp]: by (wpsimp wp: mapME_set simp: lookup_extra_caps_def) -(*FIXME: move to NonDetMonadVCG.valid_validE_R *) +(*FIXME: move to Nondet_VCG.valid_validE_R *) lemma valid_validE_R_gen: "\\rv s. Q' (Inr rv) s \ Q rv s; \P\ f \Q'\\ \ \P\ f \Q\, -" by (fastforce simp: validE_R_def validE_def valid_def split_def) @@ -938,11 +938,8 @@ lemma lcs_ex_cap_to2[wp]: apply (wp lsft_ex_cte_cap_to | simp)+ done -lemma hoare_vcg_const_imp_lift_E[wp]: - "\P\ f -, \Q\ \ \\s. F \ P s\ f -, \\rv s. F \ Q rv s\" - apply (cases F) apply auto - apply wp - done +(* FIXME AARCH64: this should really not be wp *) +declare hoare_vcg_const_imp_lift_E[wp] context Syscall_AI begin @@ -989,7 +986,7 @@ lemma hinv_invs': done lemmas hinv_invs[wp] = hinv_invs' - [where Q=\,simplified hoare_post_taut, OF TrueI TrueI TrueI TrueI,simplified] + [where Q=\,simplified hoare_TrueI, OF TrueI TrueI TrueI TrueI,simplified] (* FIXME: move *) lemma hinv_tcb[wp]: @@ -1106,7 +1103,7 @@ lemma hw_invs[wp]: "\invs and ct_active\ handle_recv is_blocking apply (simp add: split_def) apply (wp resolve_address_bits_valid_fault2)+ apply (simp add: valid_fault_def) - apply ((wp hoare_vcg_all_lift_R lookup_cap_ex_cap + apply ((wp hoare_vcg_all_liftE_R lookup_cap_ex_cap | simp add: obj_at_def | simp add: conj_disj_distribL ball_conj_distrib | wp (once) hoare_drop_imps)+) @@ -1149,8 +1146,6 @@ lemma hy_inv: "(\s f. P (trans_state f s) = P s) \ \ st_tcb_at simple (cur_thread s) s" by (fastforce simp: ct_in_state_def elim!: pred_tcb_weakenE) @@ -1168,8 +1163,8 @@ lemma tcb_caller_cap: lemma (in Syscall_AI) hr_invs[wp]: "\invs :: 'state_ext state \ _\ handle_reply \\rv. invs\" apply (simp add: handle_reply_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext [OF _ get_cap_sp]) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp [OF _ get_cap_sp]) apply (rule hoare_pre) apply (wp | wpc)+ apply (clarsimp simp: cte_wp_at_eq_simp) @@ -1194,7 +1189,7 @@ lemma simple_if_Restart_Inactive: by simp crunch (in Syscall_AI) st_tcb_at_simple[wp]: handle_reply "st_tcb_at simple t' :: 'state_ext state \ _" - (wp: hoare_post_taut crunch_wps sts_st_tcb_at_cases + (wp: hoare_TrueI crunch_wps sts_st_tcb_at_cases thread_set_no_change_tcb_state ignore: set_thread_state simp: simple_if_Restart_Inactive) @@ -1225,7 +1220,7 @@ lemma do_reply_transfer_nonz_cap: do_reply_transfer sender receiver slot grant \\rv. ex_nonz_cap_to p\" apply (simp add: do_reply_transfer_def) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ gts_sp]) apply (rule hoare_pre) apply (wp cap_delete_one_cte_wp_at_preserved hoare_vcg_ex_lift | simp split del: if_split | wpc | strengthen ex_nonz_cap_to_tcb_strg)+ @@ -1289,7 +1284,7 @@ lemma hc_invs[wp]: lemma hr_ct_active[wp]: "\invs and ct_active\ handle_reply \\rv. ct_active :: 'state_ext state \ _\" apply (simp add: handle_reply_def) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (rule_tac t=thread in ct_in_state_decomp) apply ((wp hoare_drop_imps hoare_vcg_all_lift | wpc | simp)+)[1] apply (wp hoare_vcg_all_lift get_cap_wp do_reply_transfer_st_tcb_at_active @@ -1437,7 +1432,7 @@ lemma send_fault_ipc_st_tcb_at_runnable: apply (clarsimp simp: Let_def) apply wpc apply (wp send_ipc_st_tcb_at_runnable thread_set_no_change_tcb_state thread_set_refs_trivial - hoare_vcg_all_lift_R thread_get_wp + hoare_vcg_all_liftE_R thread_get_wp | clarsimp | wp (once) hoare_drop_imps)+ apply (clarsimp simp: pred_tcb_at_def obj_at_def is_tcb) @@ -1468,7 +1463,7 @@ lemma handle_recv_st_tcb_at: apply (rule hoare_pre) apply (wp handle_fault_st_tcb_at_runnable receive_ipc_st_tcb_at_runnable delete_caller_cap_sym_refs rai_pred_tcb_neq - get_simple_ko_wp hoare_drop_imps hoare_vcg_all_lift_R) + get_simple_ko_wp hoare_drop_imps hoare_vcg_all_liftE_R) apply clarsimp apply wp+ apply fastforce diff --git a/proof/invariant-abstract/TcbAcc_AI.thy b/proof/invariant-abstract/TcbAcc_AI.thy index 5bf5055026..b9fa16033a 100644 --- a/proof/invariant-abstract/TcbAcc_AI.thy +++ b/proof/invariant-abstract/TcbAcc_AI.thy @@ -72,7 +72,7 @@ lemma (in TcbAcc_AI_arch_tcb_context_set_eq) thread_get_as_user: apply (clarsimp simp: gets_the_member set_object_def get_object_def in_monad bind_assoc gets_def put_def bind_def get_def return_def select_f_def dest!: get_tcb_SomeD) - apply (subgoal_tac "kheap s(t \ TCB v) = kheap s", simp) + apply (subgoal_tac "(kheap s)(t \ TCB v) = kheap s", simp) apply fastforce done @@ -957,7 +957,7 @@ lemma ct_in_state_decomp: shows "\\s. Pre s \ t = (cur_thread s)\ f \\rv. ct_in_state Prop\" apply (rule hoare_post_imp [where Q="\rv s. t = cur_thread s \ st_tcb_at Prop t s"]) apply (clarsimp simp add: ct_in_state_def) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (wp x y) apply simp done diff --git a/proof/invariant-abstract/Tcb_AI.thy b/proof/invariant-abstract/Tcb_AI.thy index d40c12b1a7..27df6fc27f 100644 --- a/proof/invariant-abstract/Tcb_AI.thy +++ b/proof/invariant-abstract/Tcb_AI.thy @@ -70,8 +70,8 @@ lemma set_thread_state_ct_st: lemma (in Tcb_AI_1) activate_invs: "\(invs::'state_ext::state_ext state \ bool)\ activate_thread \\rv s. invs s \ (ct_running s \ ct_idle s)\" apply (unfold activate_thread_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp [OF _ gts_sp]) apply (case_tac state, simp_all) apply wp apply (clarsimp elim!: pred_tcb_weakenE @@ -129,7 +129,7 @@ lemma setup_reply_master_nonz_cap[wp]: lemma restart_invs[wp]: "\invs and tcb_at t and ex_nonz_cap_to t\ restart t \\rv. invs\" apply (simp add: restart_def) - apply (rule hoare_seq_ext [OF _ gts_sp]) + apply (rule bind_wp [OF _ gts_sp]) apply (wp sts_invs_minor cancel_ipc_ex_nonz_cap_to_tcb hoare_vcg_disj_lift cancel_ipc_simple2 | simp add: if_apply_def2 @@ -175,7 +175,7 @@ lemma (in Tcb_AI_1) copyreg_invs: invoke_tcb (tcb_invocation.CopyRegisters dest src susp resume frames ints arch) \\rv. invs\" apply (wpsimp simp: if_apply_def2 - wp: mapM_x_wp' suspend_nonz_cap_to_tcb static_imp_wp) + wp: mapM_x_wp' suspend_nonz_cap_to_tcb hoare_weak_lift_imp) apply (clarsimp simp: invs_def valid_state_def valid_pspace_def suspend_def dest!: idle_no_ex_cap) done @@ -721,12 +721,12 @@ where | "tcb_inv_wf (tcb_invocation.ThreadControl t sl fe mcp pr croot vroot buf) = (tcb_at t and case_option \ (valid_cap \ fst) croot and K (case_option True (is_cnode_cap \ fst) croot) - and case_option \ ((cte_at And ex_cte_cap_to) \ snd) croot + and case_option \ ((cte_at and ex_cte_cap_to) \ snd) croot and case_option \ (no_cap_to_obj_dr_emp \ fst) croot and K (case_option True (is_valid_vtable_root \ fst) vroot) and case_option \ (valid_cap \ fst) vroot and case_option \ (no_cap_to_obj_dr_emp \ fst) vroot - and case_option \ ((cte_at And ex_cte_cap_to) \ snd) vroot + and case_option \ ((cte_at and ex_cte_cap_to) \ snd) vroot and (case_option \ (case_option \ (valid_cap o fst) o snd) buf) and (case_option \ (case_option \ (no_cap_to_obj_dr_emp o fst) o snd) buf) @@ -735,7 +735,7 @@ where ((swp valid_ipc_buffer_cap (fst v) and is_arch_cap and is_cnode_or_valid_arch) o fst) (snd v)) buf) - and (case_option \ (case_option \ ((cte_at And ex_cte_cap_to) o snd) o snd) buf) + and (case_option \ (case_option \ ((cte_at and ex_cte_cap_to) o snd) o snd) buf) and (\s. {croot, vroot, option_map undefined buf} \ {None} \ cte_at sl s \ ex_cte_cap_to sl s) and K (case_option True (\bl. length bl = word_bits) fe) @@ -778,10 +778,10 @@ lemma set_set_simple_ko_has_reply[wp]: lemma unbind_notification_has_reply[wp]: "\\s. P (has_reply_cap t s)\ unbind_notification t' \\rv s. P (has_reply_cap t s)\" apply (simp add: unbind_notification_def has_reply_cap_def cte_wp_at_caps_of_state) - apply (rule hoare_seq_ext[OF _ gbn_sp]) + apply (rule bind_wp[OF _ gbn_sp]) apply (case_tac ntfnptr, simp, wp, simp) apply (clarsimp) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (wp, clarsimp) done @@ -797,7 +797,7 @@ lemma bind_notification_invs: bind_notification tcbptr ntfnptr \\_. invs\" apply (simp add: bind_notification_def invs_def valid_state_def valid_pspace_def) - apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) + apply (rule bind_wp[OF _ get_simple_ko_sp]) apply (wp valid_irq_node_typ set_simple_ko_valid_objs simple_obj_set_prop_at valid_ioports_lift | clarsimp simp:idle_no_ex_cap split del: if_split)+ apply (intro conjI; @@ -920,9 +920,6 @@ lemma (in Tcb_AI) decode_set_tls_base_wf: apply wpsimp done -declare alternativeE_wp[wp] -declare alternativeE_R_wp[wp] - (*FIXME Move up*) lemma OR_choice_E_weak_wp: "\P\ f \ g \Q\,- \ \P\ OR_choice b f g \Q\,-" apply (simp add: validE_R_def validE_def OR_choice_weak_wp) @@ -992,29 +989,8 @@ lemma (in Tcb_AI) decode_set_sched_params_wf[wp]: decode_set_sched_params args (ThreadCap t) slot excs \tcb_inv_wf\, -" by (wpsimp simp: decode_set_sched_params_def wp: check_prio_wp_weak whenE_throwError_wp) -definition - is_thread_control :: "tcb_invocation \ bool" -where - "is_thread_control tinv \ case tinv of tcb_invocation.ThreadControl a b c d e f g h \ True | _ \ False" - - -primrec - thread_control_target :: "tcb_invocation \ machine_word" -where - "thread_control_target (tcb_invocation.ThreadControl a b c d e f g h) = a" - -lemma is_thread_control_true[simp]: - "is_thread_control (tcb_invocation.ThreadControl a b c d e f g h)" - by (simp add: is_thread_control_def) - -lemma is_thread_control_def2: - "is_thread_control tinv = - (\target slot faultep prio mcp croot vroot buffer. - tinv = tcb_invocation.ThreadControl target slot faultep prio mcp croot vroot buffer)" - by (cases tinv, simp_all add: is_thread_control_def) - lemma decode_set_priority_is_tc[wp]: - "\\\ decode_set_priority args cap slot excs \\rv s. is_thread_control rv\,-" + "\\\ decode_set_priority args cap slot excs \\rv s. is_ThreadControl rv\,-" by (wpsimp simp: decode_set_priority_def) lemma decode_set_priority_inv[wp]: @@ -1056,7 +1032,7 @@ lemma (in Tcb_AI) decode_set_ipc_wf[wp]: done lemma decode_set_ipc_is_tc[wp]: - "\\\ decode_set_ipc_buffer args cap slot excaps \\rv s. is_thread_control rv\,-" + "\\\ decode_set_ipc_buffer args cap slot excaps \\rv s. is_ThreadControl rv\,-" apply (rule hoare_pre) apply (simp add: decode_set_ipc_buffer_def split_def split del: if_split @@ -1102,8 +1078,6 @@ lemma (in Tcb_AI) decode_set_space_wf[wp]: del: length_greater_0_conv) done - - lemma decode_set_space_inv[wp]: "\P\ decode_set_space args cap slot extras \\rv. P\" apply (simp add: decode_set_space_def whenE_def unlessE_def @@ -1112,32 +1086,29 @@ lemma decode_set_space_inv[wp]: apply (wp hoare_drop_imps | simp)+ done - lemma decode_set_space_is_tc[wp]: - "\\\ decode_set_space args cap slot extras \\rv s. is_thread_control rv\,-" + "\\\ decode_set_space args cap slot extras \\rv s. is_ThreadControl rv\,-" apply (rule hoare_pre) apply (simp add: decode_set_space_def whenE_def unlessE_def split del: if_split) - apply (wp | simp only: is_thread_control_true)+ + apply (wp | simp only: tcb_invocation.disc)+ done lemma decode_set_mcpriority_is_tc[wp]: - "\\\ decode_set_mcpriority args cap slot excs \\rv s. is_thread_control rv\,-" + "\\\ decode_set_mcpriority args cap slot excs \\rv s. is_ThreadControl rv\,-" by (wpsimp simp: decode_set_mcpriority_def) lemma decode_set_space_target[wp]: - "\\s. P (obj_ref_of cap)\ decode_set_space args cap slot extras \\rv s. P (thread_control_target rv)\,-" - apply (rule hoare_pre) - apply (simp add: decode_set_space_def whenE_def unlessE_def - split del: if_split) - apply (wp | simp only: thread_control_target.simps)+ - done + "\\s. P (obj_ref_of cap)\ + decode_set_space args cap slot extras + \\rv s. P (tc_target rv)\,-" + unfolding decode_set_space_def + by (wpsimp split_del: if_split) -(* FIXME: move *) +(* FIXME: move to lib and rename*) lemma boring_simp[simp]: "(if x then True else False) = x" by simp - lemma (in Tcb_AI) decode_tcb_conf_wf[wp]: "\(invs::('state_ext::state_ext) state\bool) and tcb_at t and cte_at slot and ex_cte_cap_to slot @@ -1151,14 +1122,14 @@ lemma (in Tcb_AI) decode_tcb_conf_wf[wp]: apply (clarsimp simp add: decode_tcb_configure_def Let_def) apply (rule hoare_pre) apply wp - apply (rule_tac Q'="\set_space s. tcb_inv_wf set_space s \ tcb_inv_wf set_params s - \ is_thread_control set_space \ is_thread_control set_params - \ thread_control_target set_space = t - \ cte_at slot s \ ex_cte_cap_to slot s" - in hoare_post_imp_R) - apply wp - apply (clarsimp simp: is_thread_control_def2 cong: option.case_cong) - apply (wp | simp add: whenE_def split del: if_split)+ + apply (rule_tac Q'="\set_space s. tcb_inv_wf set_space s \ tcb_inv_wf set_params s + \ is_ThreadControl set_space \ is_ThreadControl set_params + \ tc_target set_space = t + \ cte_at slot s \ ex_cte_cap_to slot s" + in hoare_strengthen_postE_R) + apply wp + apply (clarsimp simp: is_ThreadControl_def cong: option.case_cong) + apply (wp | simp add: whenE_def split del: if_split)+ apply (clarsimp simp: linorder_not_less val_le_length_Cons del: ballI) done @@ -1254,7 +1225,7 @@ lemma decode_tcb_inv_wf: \tcb_inv_wf\,-" apply (simp add: decode_tcb_invocation_def Let_def cong: if_cong split del: if_split) - apply (rule hoare_vcg_precond_impE_R) + apply (rule hoare_weaken_preE_R) apply wpc apply (wp decode_tcb_conf_wf decode_readreg_wf decode_writereg_wf decode_copyreg_wf @@ -1340,10 +1311,10 @@ lemma unbind_notification_sym_refs[wp]: unbind_notification a \\rv s. sym_refs (state_refs_of s)\" apply (simp add: unbind_notification_def) - apply (rule hoare_seq_ext [OF _ gbn_sp]) + apply (rule bind_wp [OF _ gbn_sp]) apply (case_tac ntfnptr, simp_all) apply (wp, simp) - apply (rule hoare_seq_ext [OF _ get_simple_ko_sp]) + apply (rule bind_wp [OF _ get_simple_ko_sp]) apply (wp | wpc | simp)+ apply (rule conjI) apply (fastforce simp: obj_at_def pred_tcb_at_def) diff --git a/proof/invariant-abstract/Untyped_AI.thy b/proof/invariant-abstract/Untyped_AI.thy index 6b542fbb03..fe00f63d8a 100644 --- a/proof/invariant-abstract/Untyped_AI.thy +++ b/proof/invariant-abstract/Untyped_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -35,7 +36,7 @@ primrec \ 'z::state_ext state \ bool" where "valid_untyped_inv_wcap (Retype slot reset ptr_base ptr ty us slots dev) - = (\co s. \sz idx. (cte_wp_at (\c. c = (cap.UntypedCap dev ptr_base sz idx) + = (\co s. \sz idx. (cte_wp_at (\c. c = (UntypedCap dev ptr_base sz idx) \ (co = None \ co = Some c)) slot s \ range_cover ptr sz (obj_bits_api ty us) (length slots) \ (idx \ unat (ptr - ptr_base) \ (reset \ ptr = ptr_base)) @@ -168,7 +169,7 @@ lemma compute_free_index_wp: lemma dui_inv[wp]: - "\P\ decode_untyped_invocation label args slot (cap.UntypedCap dev w n idx) cs \\rv. P\" + "\P\ decode_untyped_invocation label args slot (UntypedCap dev w n idx) cs \\rv. P\" apply (simp add: decode_untyped_invocation_def whenE_def split_def data_to_obj_type_def unlessE_def split del: if_split cong: if_cong) @@ -193,7 +194,7 @@ lemma map_ensure_empty: apply wp apply (simp add: mapME_x_def sequenceE_x_def) apply (unfold validE_R_def) - apply (rule seqE[rotated]) + apply (rule bindE_wp) apply (rule hoare_vcg_conj_liftE1) apply (fold sequenceE_x_def mapME_x_def)[1] apply (rule map_ensure_empty_cte_wp_at) @@ -257,7 +258,7 @@ lemma dui_sp_helper: apply (intro impI conjI) apply wpsimp apply (wp get_cap_wp) - apply (rule hoare_post_imp_R [where Q'="\rv. valid_objs and P"] + apply (rule hoare_strengthen_postE_R [where Q'="\rv. valid_objs and P"] ; wpsimp simp: cte_wp_at_caps_of_state) apply simp done @@ -267,11 +268,11 @@ locale Untyped_AI_arch = assumes data_to_obj_type_sp: "\P x. \P\ data_to_obj_type x \\ts (s::'state_ext state). ts \ ArchObject ASIDPoolObj \ P s\, -" assumes dui_inv_wf[wp]: - "\w sz idx slot cs label args dev.\invs and cte_wp_at ((=) (cap.UntypedCap dev w sz idx)) slot + "\w sz idx slot cs label args dev.\invs and cte_wp_at ((=) (UntypedCap dev w sz idx)) slot and (\(s::'state_ext state). \cap \ set cs. is_cnode_cap cap \ (\r\cte_refs cap (interrupt_irq_node s). ex_cte_cap_wp_to is_cnode_cap r s)) and (\s. \x \ set cs. s \ x)\ - decode_untyped_invocation label args slot (cap.UntypedCap dev w sz idx) cs + decode_untyped_invocation label args slot (UntypedCap dev w sz idx) cs \valid_untyped_inv\,-" assumes retype_ret_valid_caps_captable: "\ptr sz dev us n s.\pspace_no_overlap_range_cover ptr sz (s::'state_ext state) \ 0 < us \ range_cover ptr sz (obj_bits_api CapTableObject us) n \ ptr \ 0 @@ -535,7 +536,7 @@ end lemma cte_wp_at_range_cover: "\bits < word_bits; rv\ 2^ sz; invs s; - cte_wp_at ((=) (cap.UntypedCap dev w sz idx)) p s; + cte_wp_at ((=) (UntypedCap dev w sz idx)) p s; 0 < n; n \ unat ((2::machine_word) ^ sz - of_nat rv >> bits)\ \ range_cover (alignUp (w + of_nat rv) bits) sz bits n" apply (clarsimp simp: cte_wp_at_caps_of_state) @@ -566,7 +567,7 @@ lemma diff_neg_mask[simp]: lemma cte_wp_at_caps_descendants_range_inI: - "\ invs s;cte_wp_at (\c. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; + "\ invs s;cte_wp_at (\c. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; idx \ unat (ptr && mask sz);sz < word_bits \ \ descendants_range_in {ptr .. (ptr && ~~mask sz) + 2^sz - 1} cref s" apply (frule invs_mdb) apply (frule(1) le_mask_le_2p) @@ -672,7 +673,7 @@ lemma alignUp_eq: lemma map_ensure_empty_wp: "\ \s. (\x\set xs. cte_wp_at ((=) NullCap) x s) \ P () s \ mapME_x ensure_empty xs \P\, -" - by (rule hoare_post_imp_R, rule map_ensure_empty, simp) + by (rule hoare_strengthen_postE_R, rule map_ensure_empty, simp) lemma cases_imp_eq: "((P \ Q \ R) \ (\ P \ Q \ S)) = (Q \ (P \ R) \ (\ P \ S))" @@ -716,15 +717,15 @@ lemma of_nat_shiftR: lemma valid_untypedD: - "\ s \ cap.UntypedCap dev ptr bits idx; kheap s p = Some ko; pspace_aligned s\ \ - obj_range p ko \ cap_range (cap.UntypedCap dev ptr bits idx) \ {} \ - obj_range p ko \ cap_range (cap.UntypedCap dev ptr bits idx) - \ obj_range p ko \ usable_untyped_range (cap.UntypedCap dev ptr bits idx) = {}" + "\ s \ UntypedCap dev ptr bits idx; kheap s p = Some ko; pspace_aligned s\ \ + obj_range p ko \ cap_range (UntypedCap dev ptr bits idx) \ {} \ + obj_range p ko \ cap_range (UntypedCap dev ptr bits idx) + \ obj_range p ko \ usable_untyped_range (UntypedCap dev ptr bits idx) = {}" by (clarsimp simp: valid_untyped_def valid_cap_def cap_range_def obj_range_def) (meson order_trans) lemma pspace_no_overlap_detype': - "\ s \ cap.UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ + "\ s \ UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ \ pspace_no_overlap {ptr .. ptr + 2 ^ bits - 1} (detype {ptr .. ptr + 2 ^ bits - 1} s)" apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff Int_atLeastAtMost atLeastatMost_empty_iff @@ -739,7 +740,7 @@ lemma pspace_no_overlap_detype': done lemma pspace_no_overlap_detype: - "\ s \ cap.UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ + "\ s \ UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ \ pspace_no_overlap_range_cover ptr bits (detype {ptr .. ptr + 2 ^ bits - 1} s)" apply (drule(2) pspace_no_overlap_detype'[rotated]) apply (drule valid_cap_aligned) @@ -1393,8 +1394,8 @@ lemma set_zip_helper: lemma ex_cte_cap_protects: - "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (cap.UntypedCap dev ptr bits idx)) p' s; - descendants_range_in S p' s; untyped_children_in_mdb s; S\ untyped_range (cap.UntypedCap dev ptr bits idx); + "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (UntypedCap dev ptr bits idx)) p' s; + descendants_range_in S p' s; untyped_children_in_mdb s; S\ untyped_range (UntypedCap dev ptr bits idx); valid_global_refs s \ \ fst p \ S" apply (drule ex_cte_cap_to_obj_ref_disj, erule disjE) @@ -1581,7 +1582,7 @@ crunch mdb[wp]: do_machine_op "\s. P (cdt s)" lemmas dmo_valid_cap[wp] = valid_cap_typ [OF do_machine_op_obj_at] lemma delete_objects_pspace_no_overlap[wp]: - "\\s. (\dev idx. s \ (cap.UntypedCap dev ptr bits idx)) + "\\s. (\dev idx. s \ (UntypedCap dev ptr bits idx)) \ pspace_aligned s \ valid_objs s \ (S = {ptr .. ptr + 2 ^ bits - 1})\ delete_objects ptr bits \\_. pspace_no_overlap S\" @@ -1666,7 +1667,7 @@ lemma caps_overlap_reserved_def2: lemma set_cap_valid_mdb_simple: "\\s. valid_objs s \ valid_mdb s \ descendants_range_in {ptr .. ptr+2^sz - 1} cref s \ cte_wp_at (\c. is_untyped_cap c \ cap_bits c = sz \ obj_ref_of c = ptr \ cap_is_device c = dev) cref s\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv s'. valid_mdb s'\" apply (simp add: valid_mdb_def) apply (rule hoare_pre) @@ -1681,8 +1682,8 @@ lemma set_cap_valid_mdb_simple: fix s f r bits dev assume obj:"valid_objs s" assume mdb:"untyped_mdb (cdt s) (caps_of_state s)" - assume cstate:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") - show "untyped_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + assume cstate:"caps_of_state s cref = Some (UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") + show "untyped_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (rule untyped_mdb_update_free_index [where capa = ?srccap and m = "caps_of_state s" and src = cref, unfolded free_index_update_def,simplified,THEN iffD2]) @@ -1690,12 +1691,12 @@ lemma set_cap_valid_mdb_simple: done assume inc: "untyped_inc (cdt s) (caps_of_state s)" assume drange: "descendants_range_in {r..r + 2 ^ bits - 1} cref s" - have untyped_range_simp: "untyped_range (cap.UntypedCap dev r bits f) = untyped_range (cap.UntypedCap dev r bits idx)" + have untyped_range_simp: "untyped_range (UntypedCap dev r bits f) = untyped_range (UntypedCap dev r bits idx)" by simp note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex - show "untyped_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + show "untyped_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using inc cstate drange apply (unfold untyped_inc_def) apply (intro allI impI) @@ -1762,15 +1763,15 @@ lemma set_cap_valid_mdb_simple: apply simp+ done assume "ut_revocable (is_original_cap s) (caps_of_state s)" - thus "ut_revocable (is_original_cap s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "ut_revocable (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate by (fastforce simp: ut_revocable_def) assume "valid_arch_mdb (is_original_cap s) (caps_of_state s)" - thus "valid_arch_mdb (is_original_cap s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "valid_arch_mdb (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate by (fastforce elim!: valid_arch_mdb_untypeds) assume "reply_caps_mdb (cdt s) (caps_of_state s)" - thus "reply_caps_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "reply_caps_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate apply (simp add: reply_caps_mdb_def del: split_paired_All) apply (intro allI impI conjI) @@ -1781,7 +1782,7 @@ lemma set_cap_valid_mdb_simple: apply clarsimp done assume "reply_masters_mdb (cdt s) (caps_of_state s)" - thus "reply_masters_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "reply_masters_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (simp add: reply_masters_mdb_def del: split_paired_All) apply (intro allI impI ballI) apply (erule exE) @@ -1793,8 +1794,8 @@ lemma set_cap_valid_mdb_simple: assume misc: "mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" "descendants_inc (cdt s) (caps_of_state s)" - "caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" - thus "descendants_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + "caps_of_state s cref = Some (UntypedCap dev r bits f)" + thus "descendants_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply - apply (erule descendants_inc_minor) apply (clarsimp simp: swp_def cte_wp_at_caps_of_state) @@ -1809,7 +1810,7 @@ lemma set_free_index_valid_pspace_simple: \ descendants_range_in {ptr .. ptr+2^sz - 1} cref s \ cte_wp_at (\c. is_untyped_cap c \ cap_bits c = sz \ obj_ref_of c = ptr) cref s \ idx \ 2^ sz\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv s'. valid_pspace s'\" apply (clarsimp simp: valid_pspace_def) apply (wp set_cap_valid_objs update_cap_iflive set_cap_zombies') @@ -1843,9 +1844,9 @@ lemma set_untyped_cap_refs_respects_device_simple: lemma set_untyped_cap_caps_overlap_reserved: "\\s. invs s \ S \ {ptr..ptr + 2 ^ sz - 1} \ - usable_untyped_range (cap.UntypedCap dev ptr sz idx') \ S = {} \ - descendants_range_in S cref s \ cte_wp_at ((=) (cap.UntypedCap dev ptr sz idx)) cref s\ - set_cap (cap.UntypedCap dev ptr sz idx') cref + usable_untyped_range (UntypedCap dev ptr sz idx') \ S = {} \ + descendants_range_in S cref s \ cte_wp_at ((=) (UntypedCap dev ptr sz idx)) cref s\ + set_cap (UntypedCap dev ptr sz idx') cref \\rv s. caps_overlap_reserved S s\" apply (unfold caps_overlap_reserved_def) apply wp @@ -1981,7 +1982,7 @@ lemma descendants_range_in_subseteq: lemma cte_wp_at_pspace_no_overlapI: "\invs s; - cte_wp_at (\c. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; + cte_wp_at (\c. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; idx \ unat (ptr && mask sz); sz < word_bits\ \ pspace_no_overlap_range_cover ptr sz s" apply (clarsimp simp: cte_wp_at_caps_of_state) @@ -2015,7 +2016,7 @@ lemma cte_wp_at_pspace_no_overlapI: lemma descendants_range_caps_no_overlapI: - "\invs s; cte_wp_at ((=) (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s; + "\invs s; cte_wp_at ((=) (UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s; descendants_range_in {ptr .. (ptr && ~~ mask sz) +2^sz - 1} cref s\ \ caps_no_overlap ptr sz s" apply (frule invs_mdb) apply (clarsimp simp: valid_mdb_def cte_wp_at_caps_of_state) @@ -2054,7 +2055,7 @@ lemma shiftr_then_mask_commute: lemma cte_wp_at_caps_no_overlapI: - "\ invs s;cte_wp_at (\c. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; + "\ invs s;cte_wp_at (\c. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; idx \ unat (ptr && mask sz);sz < word_bits \ \ caps_no_overlap ptr sz s" apply (frule invs_mdb) apply (frule(1) le_mask_le_2p) @@ -2176,7 +2177,7 @@ lemma subset_stuff[simp]: done lemma cte_wp_at: - "cte_wp_at ((=) (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s" + "cte_wp_at ((=) (UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s" using vui by (clarsimp simp: cte_wp_at_caps_of_state) @@ -2209,7 +2210,7 @@ proof - by (rule descendants_range_in_subseteq[OF _ subset_stuff]) qed -lemma vc[simp] : "s \cap.UntypedCap dev (ptr && ~~ mask sz) sz idx" +lemma vc[simp] : "s \UntypedCap dev (ptr && ~~ mask sz) sz idx" using misc cte_wp_at apply (clarsimp simp: cte_wp_at_caps_of_state) apply (erule caps_of_state_valid) @@ -2290,7 +2291,7 @@ lemma slots_invD: "\x. x \ set slots \ done lemma usable_range_disjoint: - "usable_untyped_range (cap.UntypedCap dev (ptr && ~~ mask sz) sz + "usable_untyped_range (UntypedCap dev (ptr && ~~ mask sz) sz (unat ((ptr && mask sz) + of_nat (length slots) * 2 ^ obj_bits_api tp us))) \ {ptr..ptr + of_nat (length slots) * 2 ^ obj_bits_api tp us - 1} = {}" proof - @@ -2312,7 +2313,7 @@ lemma usable_range_disjoint: qed lemma detype_locale:"ptr && ~~ mask sz = ptr - \ detype_locale (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s" + \ detype_locale (UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s" using cte_wp_at descendants_range misc by (simp add:detype_locale_def descendants_range_def2 blah invs_untyped_children) @@ -2391,9 +2392,9 @@ crunch tcb[wp]: create_cap "tcb_at t" lemma valid_untyped_cap_inc: - "\s \ cap.UntypedCap dev (ptr&&~~ mask sz) sz idx; + "\s \ UntypedCap dev (ptr&&~~ mask sz) sz idx; idx \ unat (ptr && mask sz); range_cover ptr sz sb n\ - \ s \ cap.UntypedCap dev (ptr && ~~ mask sz) sz + \ s \ UntypedCap dev (ptr && ~~ mask sz) sz (unat ((ptr && mask sz) + of_nat n * 2 ^ sb))" apply (clarsimp simp: valid_cap_def cap_aligned_def valid_untyped_def simp del: usable_untyped_range.simps) apply (intro conjI allI impI) @@ -2416,8 +2417,8 @@ lemma valid_untyped_cap_inc: (* FIXME: move maybe *) lemma tcb_cap_valid_untyped_cong: - "tcb_cap_valid (cap.UntypedCap dev1 a1 b1 c) = - tcb_cap_valid (cap.UntypedCap dev2 a2 b2 c2)" + "tcb_cap_valid (UntypedCap dev1 a1 b1 c) = + tcb_cap_valid (UntypedCap dev2 a2 b2 c2)" apply (rule ext)+ apply (clarsimp simp:tcb_cap_valid_def valid_ipc_buffer_cap_def split:option.splits) apply (simp add: tcb_cap_cases_def @@ -2426,7 +2427,7 @@ lemma tcb_cap_valid_untyped_cong: done lemma tcb_cap_valid_untyped_to_thread: - "tcb_cap_valid (cap.UntypedCap dev a1 b1 c) = + "tcb_cap_valid (UntypedCap dev a1 b1 c) = tcb_cap_valid (cap.ThreadCap 0)" apply (rule ext)+ apply (clarsimp simp:tcb_cap_valid_def valid_ipc_buffer_cap_def split:option.splits) @@ -2456,9 +2457,9 @@ lemma ex_nonz_cap_to_overlap: lemma detype_valid_untyped: - "\invs s; detype S s \ cap.UntypedCap dev ptr sz idx1; + "\invs s; detype S s \ UntypedCap dev ptr sz idx1; {ptr .. ptr + 2 ^ sz - 1} \ S; idx2 \ 2 ^ sz\ - \ detype S s \ cap.UntypedCap dev ptr sz idx2" + \ detype S s \ UntypedCap dev ptr sz idx2" apply (clarsimp simp: detype_def valid_cap_def valid_untyped_def cap_aligned_def) apply (drule_tac x = p in spec) apply clarsimp @@ -2671,7 +2672,7 @@ lemmas unat_of_nat_word_bits = unat_of_nat_eq[where 'a = machine_word_len, unfolded word_bits_len_of, simplified] lemma caps_of_state_pspace_no_overlapD: - "\ caps_of_state s cref = Some (cap.UntypedCap dev ptr sz idx); invs s; + "\ caps_of_state s cref = Some (UntypedCap dev ptr sz idx); invs s; idx < 2 ^ sz \ \ pspace_no_overlap_range_cover (ptr + of_nat idx) sz s" apply (frule(1) caps_of_state_valid) @@ -2692,7 +2693,7 @@ lemma set_untyped_cap_invs_simple: \ pspace_no_overlap_range_cover ptr sz s \ invs s \ cte_wp_at (\c. is_untyped_cap c \ cap_bits c = sz \ cap_is_device c = dev\ obj_ref_of c = ptr) cref s \ idx \ 2^ sz\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv s. invs s\" apply (rule hoare_name_pre_state) apply (clarsimp simp:cte_wp_at_caps_of_state invs_def valid_state_def) @@ -2733,7 +2734,7 @@ lemma reset_untyped_cap_invs_etc: (is "\invs and valid_untyped_inv_wcap ?ui (Some ?cap) and ct_active and _\ ?f \\_. invs and ?vu2 and ct_active and ?psp\, \\_. invs\") apply (simp add: reset_untyped_cap_def) - apply (rule hoare_vcg_seqE[rotated]) + apply (rule bindE_wp_fwd) apply ((wp (once) get_cap_sp)+)[1] apply (rule hoare_name_pre_stateE) apply (clarsimp simp: cte_wp_at_caps_of_state bits_of_def split del: if_split) @@ -2748,8 +2749,8 @@ lemma reset_untyped_cap_invs_etc: apply (frule(1) caps_of_state_pspace_no_overlapD, simp+) apply (simp add: word_bw_assocs field_simps) apply (clarsimp simp: free_index_of_def split del: if_split) - apply (rule_tac B="\_. invs and valid_untyped_inv_wcap ?ui (Some ?cap) - and ct_active and ?psp" in hoare_vcg_seqE[rotated]) + apply (rule_tac Q'="\_. invs and valid_untyped_inv_wcap ?ui (Some ?cap) + and ct_active and ?psp" in bindE_wp_fwd) apply clarsimp apply (rule hoare_pre) apply (wp hoare_vcg_ex_lift hoare_vcg_const_Ball_lift @@ -2789,7 +2790,7 @@ lemma reset_untyped_cap_invs_etc: apply (simp add: valid_cap_def) apply simp apply (clarsimp simp: bits_of_def free_index_of_def) - apply (rule hoare_pre, rule hoare_post_impErr, + apply (rule hoare_pre, rule hoare_strengthen_postE, rule_tac P="\i. invs and ?psp and ct_active and valid_untyped_inv_wcap ?ui (Some (UntypedCap dev ptr sz (if i = 0 then idx else (bd - i) * 2 ^ resetChunkBits)))" and E="\_. invs" @@ -3123,11 +3124,6 @@ lemma create_cap_ex_cap_to[wp]: apply (clarsimp elim!: cte_wp_at_weakenE) done -(* FIXME: move *) -lemma hoare_vcg_split_lift[wp]: - "\P\ f x y \Q\ \ \P\ case (x, y) of (a, b) \ f a b \Q\" - by simp - lemma create_cap_no_cap[wp]: "\\s. (\p'. \ cte_wp_at P p' s) \ \ P (default_cap tp oref sz dev)\ create_cap tp sz p dev (cref, oref) @@ -3278,7 +3274,7 @@ lemma (in Untyped_AI_nonempty_table) create_caps_invs: apply (simp add: mapM_x_def sequence_x_def) apply wpsimp apply (clarsimp simp add: mapM_x_def sequence_x_def) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply assumption apply (thin_tac "valid a b c" for a b c) apply (rule hoare_pre) @@ -3368,8 +3364,8 @@ lemma retype_region_refs_distinct[wp]: lemma unsafe_protected: - "\ cte_wp_at P p s; cte_wp_at ((=) (cap.UntypedCap dev ptr bits idx)) p' s; - descendants_range_in S p' s; invs s; S \ untyped_range (cap.UntypedCap dev ptr bits idx); + "\ cte_wp_at P p s; cte_wp_at ((=) (UntypedCap dev ptr bits idx)) p' s; + descendants_range_in S p' s; invs s; S \ untyped_range (UntypedCap dev ptr bits idx); \cap. P cap \ cap \ cap.NullCap \ \ fst p \ S" apply (rule ex_cte_cap_protects) @@ -3381,8 +3377,8 @@ lemma unsafe_protected: done lemma cap_to_protected: - "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (cap.UntypedCap dev ptr bits idx)) p' s; - descendants_range (cap.UntypedCap dev ptr bits idx) p' s; invs s \ + "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (UntypedCap dev ptr bits idx)) p' s; + descendants_range (UntypedCap dev ptr bits idx) p' s; invs s \ \ ex_cte_cap_wp_to P p (detype {ptr .. ptr + 2 ^ bits - 1} s)" apply (clarsimp simp: ex_cte_cap_wp_to_def, simp add: detype_def descendants_range_def2) apply (intro exI conjI, assumption) @@ -3607,14 +3603,14 @@ lemma invoke_untyp_invs': assumes init_arch_Q: "\tp slot reset sz slots ptr n us refs dev. ui = Invocations_A.Retype slot reset (ptr && ~~ mask sz) ptr tp us slots dev \ \Q and post_retype_invs tp refs - and cte_wp_at (\c. \idx. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) slot + and cte_wp_at (\c. \idx. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) slot and K (refs = retype_addrs ptr tp n us \ range_cover ptr sz (obj_bits_api tp us) n)\ init_arch_objects tp ptr n us refs \\_. Q\" assumes retype_region_Q: "\ptr us tp slot reset sz slots dev. ui = Invocations_A.Retype slot reset (ptr && ~~ mask sz) ptr tp us slots dev \ \\s. invs s \ Q s - \ cte_wp_at (\c. \idx. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) slot s + \ cte_wp_at (\c. \idx. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) slot s \ pspace_no_overlap {ptr..(ptr && ~~ mask sz) + (2 ^ sz - 1)} s \ range_cover ptr sz (obj_bits_api tp us) (length slots) \ (tp = CapTableObject \ 0 < us) @@ -3627,7 +3623,7 @@ lemma invoke_untyp_invs': \ (case ui of Invocations_A.Retype slot reset ptr' ptr tp us slots dev' \ cref = slot \ dev' = dev) \ idx \ 2^ sz\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv. Q\" assumes reset_Q: "\Q'\ reset_untyped_cap (case ui of Retype src_slot _ _ _ _ _ _ _ \ src_slot) \\_. Q\" shows @@ -3671,7 +3667,7 @@ lemma invoke_untyp_invs': note neg_mask_add_mask = word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr,symmetric] note set_cap_free_index_invs_spec = set_free_index_invs[where - cap = "cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx)", + cap = "UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx)", unfolded free_index_update_def free_index_of_def,simplified] have slot_not_in: "(cref, oref) \ set slots" @@ -3690,14 +3686,14 @@ lemma invoke_untyp_invs': show "\(=) s\ invoke_untyped ?ui \\rv s. invs s \ Q s\, \\_ s. invs s \ Q s\" using cover apply (simp add:mapM_x_def[symmetric] invoke_untyped_def) - apply (rule_tac B="\_ s. invs s \ Q s \ ct_active s + apply (rule_tac Q'="\_ s. invs s \ Q s \ ct_active s \ valid_untyped_inv_wcap ?ui (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s \ (reset \ pspace_no_overlap {ptr && ~~ mask sz..(ptr && ~~ mask sz) + 2 ^ sz - 1} s) - " in hoare_vcg_seqE[rotated]) + " in bindE_wp_fwd) apply (simp only: whenE_def) apply (rule hoare_pre, wp) - apply (rule hoare_post_impErr, rule combine_validE, + apply (rule hoare_strengthen_postE, rule combine_validE, rule reset_untyped_cap_invs_etc, rule valid_validE, rule reset_Q') apply (clarsimp simp only: pred_conj_def if_True, blast) apply (wp | simp)+ @@ -3836,7 +3832,7 @@ qed lemmas invoke_untyp_invs[wp] = invoke_untyp_invs'[where Q=\ and Q'=\, simplified, - simplified hoare_post_taut, simplified] + simplified hoare_TrueI, simplified] lemmas invoke_untyped_Q = invoke_untyp_invs'[THEN validE_valid, THEN hoare_conjD2[unfolded pred_conj_def]] @@ -3910,7 +3906,7 @@ lemma update_untyped_cap_valid_objs: lemma valid_untyped_pspace_no_overlap: "pspace_no_overlap {ptr .. ptr + 2 ^ sz - 1} s - \ valid_untyped (cap.UntypedCap dev ptr sz idx) s" + \ valid_untyped (UntypedCap dev ptr sz idx) s" apply (clarsimp simp: valid_untyped_def split del: if_split) apply (drule(1) pspace_no_overlap_obj_range) apply simp diff --git a/proof/invariant-abstract/VSpaceEntries_AI.thy b/proof/invariant-abstract/VSpaceEntries_AI.thy index 7f29675409..cd04d30324 100644 --- a/proof/invariant-abstract/VSpaceEntries_AI.thy +++ b/proof/invariant-abstract/VSpaceEntries_AI.thy @@ -148,7 +148,6 @@ lemma mapME_x_wp: shows "set xs \ S \ \P\ mapME_x f xs \\rv. P\, \E\" apply (subst mapME_x_mapME) apply wp - apply simp apply (rule mapME_wp) apply (rule x) apply assumption+ @@ -156,27 +155,7 @@ lemma mapME_x_wp: lemmas mapME_x_wp' = mapME_x_wp [OF _ subset_refl] -lemma hoare_vcg_all_liftE: - "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_vcg_const_Ball_liftE: - "\ \x. x \ S \ \P x\ f \Q x\,\E\; \\s. True\ f \\r s. True\, \E\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\,\E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_post_conjE: - "\ \ P \ a \ Q \,\E\; \ P \ a \ R \,\E\ \ \ \ P \ a \ Q And R \,\E\" - by (clarsimp simp: validE_def valid_def split_def bipred_conj_def - split: sum.splits) - -lemma hoare_vcg_conj_liftE: - assumes x: "\P\ f \Q\,\E\" - assumes y: "\P'\ f \Q'\,\E\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,\E\" - apply (subst bipred_conj_def[symmetric], rule hoare_post_conjE) - apply (rule hoare_vcg_precond_impE [OF x], simp) - apply (rule hoare_vcg_precond_impE [OF y], simp) - done +lemmas hoare_post_conjE = hoare_validE_pred_conj (* FIXME: eliminate *) lemma mapME_x_accumulate_checks: assumes P: "\x. x \ set xs \ \Q\ f x \\rv. P x\, \E\" @@ -194,11 +173,11 @@ lemma mapME_x_accumulate_checks: show ?case apply (simp add: mapME_x_Cons) apply wp - apply (rule hoare_vcg_conj_liftE) + apply (rule hoare_vcg_conj_liftE_weaker) apply (wp mapME_x_wp' P P' hoare_vcg_const_Ball_liftE | simp add:Q - | rule hoare_post_impErr[OF P])+ + | rule hoare_strengthen_postE[OF P])+ using Cons.prems apply fastforce apply (wp Cons.hyps) @@ -208,7 +187,7 @@ lemma mapME_x_accumulate_checks: using Cons.prems apply fastforce apply (rule hoare_pre) - apply (rule hoare_vcg_conj_liftE) + apply (rule hoare_vcg_conj_liftE_weaker) apply (wp Cons.prems| simp)+ done qed diff --git a/proof/invariant-abstract/X64/ArchAcc_AI.thy b/proof/invariant-abstract/X64/ArchAcc_AI.thy index bc74bb83cd..2e3d15e0e7 100644 --- a/proof/invariant-abstract/X64/ArchAcc_AI.thy +++ b/proof/invariant-abstract/X64/ArchAcc_AI.thy @@ -247,7 +247,7 @@ lemma lookup_pd_slot_inv: "\P\ lookup_pd_slot pd vptr \\_. P\" apply (simp add: lookup_pd_slot_def) apply (rule hoare_pre) - apply (wp get_pdpte_wp lookup_pdpt_slot_inv hoare_vcg_all_lift_R hoare_drop_imps| wpc)+ + apply (wp get_pdpte_wp lookup_pdpt_slot_inv hoare_vcg_all_liftE_R hoare_drop_imps| wpc)+ apply clarsimp done @@ -255,7 +255,7 @@ lemma lookup_pt_slot_inv: "\P\ lookup_pt_slot pd vptr \\_. P\" apply (simp add: lookup_pt_slot_def) apply (rule hoare_pre) - apply (wp get_pde_wp lookup_pd_slot_inv hoare_vcg_all_lift_R hoare_drop_imps| wpc)+ + apply (wp get_pde_wp lookup_pd_slot_inv hoare_vcg_all_liftE_R hoare_drop_imps| wpc)+ apply clarsimp done @@ -1210,17 +1210,16 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) done lemma valid_machine_state_heap_updI: -assumes vm : "valid_machine_state s" -assumes tyat : "typ_at type p s" -shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + assumes vm : "valid_machine_state s" + assumes tyat : "typ_at type p s" + shows "a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1355,7 +1354,7 @@ lemma vs_ref_lvl_obj_same_type: lemma valid_vspace_obj_kheap_upd: "\typ_at (a_type (ArchObj obj)) ptr s; valid_vspace_obj ao s\ - \ valid_vspace_obj ao (s\kheap := kheap s(ptr \ ArchObj obj)\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(ptr \ ArchObj obj)\)" apply (cases ao, simp_all) apply (fastforce simp: a_type_simps obj_at_def valid_pte_def)+ apply (clarsimp) @@ -1421,7 +1420,7 @@ lemma set_object_valid_vspace_objs[wp]: apply simp apply simp apply (rule vs_lookup1_wellformed.wellformed_lookup_axioms - [where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s,simplified]) + [where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s,simplified]) apply (clarsimp simp: obj_at_def cong:vs_ref_lvl_obj_same_type) apply clarsimp apply (rule valid_vspace_obj_kheap_upd) @@ -1486,7 +1485,7 @@ lemma set_object_valid_vs_lookup[wp]: apply simp apply simp apply (rule vs_lookup_pages1_wellformed.wellformed_lookup_axioms - [where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s, simplified]) + [where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s, simplified]) apply (clarsimp simp: obj_at_def cong:vs_ref_lvl_obj_same_type) apply (clarsimp simp: fun_upd_def) apply (subst caps_of_state_after_update) @@ -1591,7 +1590,7 @@ lemma valid_global_refsD: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1642,7 +1641,7 @@ lemma vs_lookup_pages_pt_eq: lemma valid_vspace_obj_same_type: "\valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -2800,7 +2799,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "\\ms. P (device_state ms)\ machine_op_lift f \\_ ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonad.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/X64/ArchArch_AI.thy b/proof/invariant-abstract/X64/ArchArch_AI.thy index e59aa81e47..3a8cbc85fa 100644 --- a/proof/invariant-abstract/X64/ArchArch_AI.thy +++ b/proof/invariant-abstract/X64/ArchArch_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -69,7 +70,7 @@ lemma check_vp_wpR [wp]: check_vp_alignment sz w \P\, -" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: vmsz_aligned_def) done @@ -77,7 +78,7 @@ lemma check_vp_wpR [wp]: lemma check_vp_inv: "\P\ check_vp_alignment sz w \\_. P\" apply (simp add: check_vp_alignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply simp done @@ -285,7 +286,7 @@ locale asid_update = Arch + fixes ap asid s s' assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s" assumes empty: "x64_asid_table (arch_state s) asid = None" - defines "s' \ s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\" + defines "s' \ s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\" context asid_update begin @@ -401,7 +402,7 @@ context Arch begin global_naming X64 lemma valid_arch_state_strg: "valid_arch_state s \ ap \ ran (x64_asid_table (arch_state s)) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_arch_state (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI) @@ -415,7 +416,7 @@ lemma valid_vs_lookup_at_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ ap \ obj_refs cap \ vs_cap_ref cap = Some [VSRef (ucast asid) None]) \ - valid_vs_lookup (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_vs_lookup (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -488,7 +489,7 @@ lemma valid_table_caps_asid_upd [iff]: lemma vs_asid_ref_upd: "([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') - (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\) + (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\) = (if asid_high_bits_of asid' = asid_high_bits_of asid then ap' = ap else ([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') s)" @@ -513,7 +514,7 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \x64_asid_table := x64_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \x64_asid_table := (x64_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg) @@ -525,7 +526,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -546,7 +547,7 @@ lemma valid_asid_map_asid_upd_strg: "valid_asid_map s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ x64_asid_table (arch_state s) asid = None \ - valid_asid_map (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_asid_map (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -559,7 +560,7 @@ lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ x64_asid_table (arch_state s) asid = None \ - valid_vspace_objs (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_vspace_objs (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -572,7 +573,7 @@ lemma valid_global_objs_asid_upd_strg: "valid_global_objs s \ ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap s \ x64_asid_table (arch_state s) asid = None \ - valid_global_objs (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_global_objs (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" by clarsimp lemma safe_parent_cap_is_device: @@ -603,7 +604,7 @@ lemma cap_insert_ap_invs: x64_asid_table (arch_state s) (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \x64_asid_table := (x64_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \x64_asid_table := ((x64_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg valid_asid_map_asid_upd_strg ) @@ -757,11 +758,11 @@ proof - \\rv s. invs (s\arch_state := arch_state s - \x64_asid_table := (x64_asid_table \ arch_state) s + \x64_asid_table := ((x64_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \x64_asid_table := (x64_asid_table \ arch_state) s + \x64_asid_table := ((x64_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -872,7 +873,7 @@ proof - qed -lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified] +lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_TrueI, OF refl refl refl TrueI TrueI TrueI,simplified] lemma set_ioport_mask_tcb_cap_valid[wp]: "\tcb_cap_valid a b\ set_ioport_mask f l bl \\rv. tcb_cap_valid a b\" @@ -1010,7 +1011,7 @@ lemma create_mapping_entries_inv [wp]: crunch_ignore (add: select_ext) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) lemma create_mappings_empty [wp]: @@ -1127,7 +1128,7 @@ lemma create_mapping_entries_parent_for_refs: apply (rule hoare_gen_asmE) apply (cases pgsz; simp add: vmsz_aligned_def) by (wp, - rule hoare_post_imp_R, + rule hoare_strengthen_postE_R, rule lookup_pt_slot_cap_to lookup_pd_slot_cap_to lookup_pdpt_slot_cap_to, elim exEI, clarsimp simp: cte_wp_at_caps_of_state parent_for_refs_def, @@ -1141,7 +1142,7 @@ lemma find_vspace_for_asid_ref_offset_voodoo: \\rv. (ref \ (rv + (get_pml4_index v << word_size_bits) && ~~ mask pml4_bits))\,-" apply (rule hoare_gen_asmE) apply (rule_tac Q'="\rv s. is_aligned rv pml4_bits \ (ref \ rv) s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply simp apply (rule hoare_pre, wp find_vspace_for_asid_lookup_ref) apply simp @@ -1264,7 +1265,7 @@ lemma find_vspace_for_asid_lookup_vspace_wp: "\ \s. valid_vspace_objs s \ (\pm. vspace_at_asid asid pm s \ page_map_l4_at pm s \ (\\ pm) s \ Q pm s) \ find_vspace_for_asid asid \ Q \, -" (is "\ \s. ?v s \ (\pm. ?vpm pm s \ Q pm s) \ ?f \ Q \, -") - apply (rule_tac Q'="\rv s. ?vpm rv s \ (\pm. ?vpm pm s \ Q pm s)" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. ?vpm rv s \ (\pm. ?vpm pm s \ Q pm s)" in hoare_strengthen_postE_R) apply wpsimp apply (simp | fast)+ done @@ -1575,7 +1576,7 @@ lemma arch_decode_inv_wf[wp]: apply (simp add: arch_decode_invocation_def Let_def split_def cong: if_cong split del: if_split) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp + apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_ext_weak_wp | wpc | simp add: valid_arch_inv_def valid_apinv_def)+)[1] apply (simp add: valid_arch_inv_def valid_apinv_def) apply (intro allI impI ballI) @@ -1615,11 +1616,11 @@ lemma arch_decode_inv_wf[wp]: and (\s. descendants_of (snd (excaps!0)) (cdt s) = {}) and cte_wp_at (\c. \idx. c = UntypedCap False frame pageBits idx) (snd (excaps!0)) and (\s. x64_asid_table (arch_state s) free = None)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def asid_wf_high) - apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp | wpc | simp)+ + apply (wp ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp | wpc | simp)+ apply clarsimp apply (rule conjI, fastforce) apply (cases excaps, simp) diff --git a/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy b/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy index 8b9ae68c45..35e7e4b9ce 100644 --- a/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy @@ -556,7 +556,7 @@ context Arch begin global_naming X64 lemma post_cap_delete_pre_is_final_cap': "\rv s'' rva s''a s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -646,7 +646,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) @@ -815,7 +815,7 @@ qed lemmas rec_del_invs'[CNodeInv_AI_assms] = rec_del_invs'' [where Q=\, - simplified hoare_post_taut pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] + simplified hoare_TrueI pred_conj_def simp_thms, OF TrueI TrueI TrueI TrueI, simplified] end diff --git a/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy b/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy index bee60b2a66..2353cdb6d8 100644 --- a/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy @@ -127,7 +127,7 @@ lemma masked_as_full_test_function_stuff[simp]: lemma same_aobject_as_commute: "same_aobject_as x y \ same_aobject_as y x" - by (cases x; cases y; clarsimp simp: same_aobject_as_def) + by (cases x; cases y; clarsimp) lemmas wellformed_cap_simps = wellformed_cap_def [simplified wellformed_acap_def, split_simps cap.split arch_cap.split] @@ -175,7 +175,7 @@ lemma valid_arch_mdb_simple: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by (auto simp: valid_arch_mdb_def ioport_revocable_def is_cap_revocable_def arch_is_cap_revocable_def is_simple_cap_def safe_parent_for_def is_cap_simps) @@ -217,14 +217,14 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by (auto simp: valid_arch_mdb_def ioport_revocable_def is_cap_simps) lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" apply (clarsimp simp: valid_arch_mdb_def ioport_revocable_def is_cap_simps is_cap_revocable_def arch_is_cap_revocable_def) by (clarsimp simp: is_derived_def is_cap_simps is_derived_arch_def split: if_split_asm) @@ -233,7 +233,7 @@ lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by (auto simp: valid_arch_mdb_def ioport_revocable_def is_cap_simps is_cap_revocable_def arch_is_cap_revocable_def free_index_update_def split: cap.splits) @@ -247,7 +247,7 @@ lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by (auto simp: valid_arch_mdb_def ioport_revocable_def split: if_split_asm simp del: split_paired_All) @@ -255,7 +255,7 @@ lemma valid_arch_mdb_weak_derived_update: lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" by (clarsimp simp: valid_arch_mdb_def ioport_revocable_def) lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -295,10 +295,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by (clarsimp simp: valid_arch_mdb_def ioport_revocable_def)+ lemma same_object_as_ioports: diff --git a/proof/invariant-abstract/X64/ArchCSpace_AI.thy b/proof/invariant-abstract/X64/ArchCSpace_AI.thy index bbfa3ce641..a42d297b63 100644 --- a/proof/invariant-abstract/X64/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/X64/ArchCSpace_AI.thy @@ -186,20 +186,20 @@ lemma is_derived_is_cap: (* FIXME: move to CSpace_I near lemma vs_lookup1_tcb_update *) lemma vs_lookup_pages1_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) (* FIXME: move to CSpace_I near lemma vs_lookup_tcb_update *) lemma vs_lookup_pages_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages s" by (clarsimp simp add: vs_lookup_pages_def vs_lookup_pages1_tcb_update) (* FIXME: move to CSpace_I near lemma vs_lookup1_cnode_update *) lemma vs_lookup_pages1_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ CNode m cs')\) = + vs_lookup_pages1 (s\kheap := (kheap s)(p \ CNode m cs')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) @@ -207,7 +207,7 @@ lemma vs_lookup_pages1_cnode_update: (* FIXME: move to CSpace_I near lemma vs_lookup_cnode_update *) lemma vs_lookup_pages_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages (s\kheap := kheap s(p \ CNode n cs')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ CNode n cs')\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def dest!: vs_lookup_pages1_cnode_update[where m=n and cs'=cs']) diff --git a/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy index d6b9504fac..760b927c57 100644 --- a/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy @@ -12,8 +12,8 @@ context Arch begin global_naming X64 named_theorems DetSchedAux_AI_assms -crunch exst[wp]: set_object, init_arch_objects "\s. P (exst s)" (wp: crunch_wps hoare_unless_wp) -crunch ct[wp]: init_arch_objects "\s. P (cur_thread s)" (wp: crunch_wps hoare_unless_wp) +crunch exst[wp]: set_object, init_arch_objects "\s. P (exst s)" (wp: crunch_wps unless_wp) +crunch ct[wp]: init_arch_objects "\s. P (cur_thread s)" (wp: crunch_wps unless_wp) crunch valid_etcbs[wp, DetSchedAux_AI_assms]: init_arch_objects valid_etcbs (wp: valid_etcbs_lift) crunch ct[wp, DetSchedAux_AI_assms]: invoke_untyped "\s. P (cur_thread s)" @@ -99,9 +99,9 @@ crunch ct[wp]: perform_asid_control_invocation "\s. P (cur_thread s)" crunch idle_thread[wp]: perform_asid_control_invocation "\s. P (idle_thread s)" -crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: static_imp_wp) +crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: hoare_weak_lift_imp) -crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: static_imp_wp) +crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: hoare_weak_lift_imp) crunch schedact[wp]: perform_asid_control_invocation "\s :: det_ext state. P (scheduler_action s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) diff --git a/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy index 41f4d78eac..579035e2a1 100644 --- a/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy @@ -87,7 +87,7 @@ crunch valid_sched [wp, DetSchedSchedule_AI_assms]: (simp: crunch_simps ignore: ) crunch exst[wp]: set_vm_root "\s. P (exst s)" - (wp: crunch_wps hoare_whenE_wp simp: crunch_simps) + (wp: crunch_wps whenE_wp simp: crunch_simps) crunch ct_in_cur_domain_2[wp]: set_vm_root "\s. ct_in_cur_domain_2 thread (idle_thread s) (scheduler_action s) (cur_domain s) (ekheap s)" @@ -215,7 +215,7 @@ lemma set_asid_pool_valid_sched[wp]: by (wp hoare_drop_imps valid_sched_lift | simp add: set_asid_pool_def)+ crunch ct_not_in_q[wp]: set_object ct_not_in_q - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) lemma flush_table_ct_not_in_q[wp]: "\ct_not_in_q\ flush_table a b c d \\rv. ct_not_in_q\" @@ -223,7 +223,7 @@ lemma flush_table_ct_not_in_q[wp]: "\ct_not_in_q\ flush_table a crunch ct_not_in_q [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete ct_not_in_q - (wp: crunch_wps hoare_drop_imps hoare_unless_wp select_inv mapM_wp + (wp: crunch_wps hoare_drop_imps unless_wp select_inv mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: tcb_sched_action) lemma flush_table_valid_etcbs[wp]: "\valid_etcbs\ flush_table a b c d \\rv. valid_etcbs\" @@ -235,7 +235,7 @@ lemma set_object_valid_etcbs[wp]: crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete valid_etcbs - (wp: hoare_drop_imps hoare_unless_wp select_inv mapM_x_wp mapM_wp subset_refl + (wp: hoare_drop_imps unless_wp select_inv mapM_x_wp mapM_wp subset_refl if_fun_split simp: crunch_simps ignore: set_object set_object thread_set) lemma flush_table_simple_sched_action[wp]: "\simple_sched_action\ flush_table a b c d \\rv. simple_sched_action\" @@ -243,7 +243,7 @@ lemma flush_table_simple_sched_action[wp]: "\simple_sched_action\valid_sched\ flush_table a b c d \\rv. valid_sched\" diff --git a/proof/invariant-abstract/X64/ArchDeterministic_AI.thy b/proof/invariant-abstract/X64/ArchDeterministic_AI.thy index 9289d23fee..7d83866c3c 100644 --- a/proof/invariant-abstract/X64/ArchDeterministic_AI.thy +++ b/proof/invariant-abstract/X64/ArchDeterministic_AI.thy @@ -36,7 +36,7 @@ global_interpretation Deterministic_AI_1?: Deterministic_AI_1 context Arch begin global_naming X64 crunch valid_list[wp]: invoke_untyped valid_list - (wp: crunch_wps preemption_point_inv' hoare_unless_wp mapME_x_wp' + (wp: crunch_wps preemption_point_inv' unless_wp mapME_x_wp' simp: mapM_x_def_bak crunch_simps) crunch valid_list[wp]: invoke_irq_control, store_pde, store_pte, store_pdpte, store_pml4e, diff --git a/proof/invariant-abstract/X64/ArchDetype_AI.thy b/proof/invariant-abstract/X64/ArchDetype_AI.thy index 9eaf867feb..f1b28872b2 100644 --- a/proof/invariant-abstract/X64/ArchDetype_AI.thy +++ b/proof/invariant-abstract/X64/ArchDetype_AI.thy @@ -82,7 +82,7 @@ next qed lemma empty_fail_freeMemory [Detype_AI_asms]: "empty_fail (freeMemory ptr bits)" - by (simp add: freeMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: freeMemory_def mapM_x_mapM ef_storeWord) lemma region_in_kernel_window_detype[simp]: diff --git a/proof/invariant-abstract/X64/ArchEmptyFail_AI.thy b/proof/invariant-abstract/X64/ArchEmptyFail_AI.thy index c806a6fa4a..dab6b318d6 100644 --- a/proof/invariant-abstract/X64/ArchEmptyFail_AI.thy +++ b/proof/invariant-abstract/X64/ArchEmptyFail_AI.thy @@ -37,7 +37,7 @@ context Arch begin global_naming X64 crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: handle_fault (simp: kernel_object.splits option.splits arch_cap.splits cap.splits endpoint.splits bool.splits list.splits thread_state.splits split_def catch_def sum.splits - Let_def wp: zipWithM_x_empty_fail) + Let_def) lemma port_out_empty_fail[simp, intro!]: assumes ef: "\a. empty_fail (oper a)" @@ -80,12 +80,11 @@ lemma arch_decode_X64ASIDControlMakePool_empty_fail: apply (simp split: arch_cap.splits) apply (intro conjI impI) apply (simp add: split_def) - apply wp - apply simp + apply (wp (once), simp) apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def bind_def return_def - returnOk_def lift_def liftE_def fail_def gets_def get_def assert_def select_def split: if_split_asm) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_ext_def bindE_def bind_def return_def + returnOk_def lift_def liftE_def fail_def gets_def get_def assert_def select_def split: if_split_asm) apply (simp add: Let_def split: cap.splits arch_cap.splits option.splits bool.splits | wp | intro conjI impI allI)+ by (clarsimp simp add: decode_page_invocation_def decode_page_table_invocation_def decode_page_directory_invocation_def decode_pdpt_invocation_def @@ -110,9 +109,9 @@ lemma arch_decode_X64ASIDPoolAssign_empty_fail: apply ((simp | wp)+)[1] apply (subst bindE_assoc[symmetric]) apply (rule empty_fail_bindE) - subgoal by (fastforce simp: empty_fail_def whenE_def throwError_def select_def bindE_def - bind_def return_def returnOk_def lift_def liftE_def select_ext_def - gets_def get_def assert_def fail_def) + subgoal by (force simp: empty_fail_def whenE_def throwError_def select_def bindE_def + bind_def return_def returnOk_def lift_def liftE_def select_ext_def + gets_def get_def assert_def fail_def) apply (clarsimp simp: decode_page_invocation_def decode_page_table_invocation_def decode_page_directory_invocation_def decode_pdpt_invocation_def | wp | intro conjI)+ done @@ -142,9 +141,7 @@ context Arch begin global_naming X64 lemma flush_table_empty_fail[simp, wp]: "empty_fail (flush_table a b c d)" unfolding flush_table_def - apply simp - apply (wp | wpc | simp)+ - done + by wpsimp crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: maskInterrupt, empty_slot, finalise_cap, preemption_point, @@ -152,7 +149,7 @@ crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: maskInterrupt, empty_slo (simp: Let_def catch_def split_def OR_choiceE_def mk_ef_def option.splits endpoint.splits notification.splits thread_state.splits sum.splits cap.splits arch_cap.splits kernel_object.splits vmpage_size.splits pde.splits bool.splits list.splits - forM_x_def empty_fail_mapM_x set_object_def + set_object_def ignore: nativeThreadUsingFPU_impl switchFpuOwner_impl) crunch (empty_fail) empty_fail[wp, EmptyFail_AI_assms]: setRegister, setNextPC diff --git a/proof/invariant-abstract/X64/ArchFinalise_AI.thy b/proof/invariant-abstract/X64/ArchFinalise_AI.thy index 4231614a3b..9a1b9f1cb5 100644 --- a/proof/invariant-abstract/X64/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/X64/ArchFinalise_AI.thy @@ -522,7 +522,7 @@ lemma suspend_unlive': crunch obj_at[wp]: fpu_thread_delete "\s. P' (obj_at P p s)" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) lemma (* fpu_thread_delete_no_cap_to_obj_ref *)[wp,Finalise_AI_asms]: "\no_cap_to_obj_with_diff_ref cap S\ @@ -666,7 +666,7 @@ lemma flush_table_pred_tcb_at: "\\s. pred_tcb_at proj P t s\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch pred_tcb_at[wp]: arch_finalise_cap "pred_tcb_at proj P t" (simp: crunch_simps set_arch_obj_simps wp: crunch_wps set_aobject_pred_tcb_at @@ -739,7 +739,7 @@ lemma flush_table_empty: flush_table ac aa word b \\rv s. obj_at (empty_table (set (x64_global_pdpts (arch_state s)))) word s\" apply (clarsimp simp: flush_table_def set_vm_root_def) - apply (wp do_machine_op_obj_at hoare_whenE_wp mapM_x_wp' + apply (wp do_machine_op_obj_at whenE_wp mapM_x_wp' | wpc | simp | wps @@ -1004,11 +1004,11 @@ crunch caps_of_state [wp]: arch_finalise_cap "\s. P (caps_of_state s)" crunch obj_at[wp]: invalidate_page_structure_cache_asid, hw_asid_invalidate "\s. P' (obj_at P p s)" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) crunch x64_global_pdpts[wp]: invalidate_page_structure_cache_asid, hw_asid_invalidate "\s. P' (x64_global_pdpts (arch_state s))" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) lemma delete_asid_empty_table_pml4: "\\s. page_map_l4_at word s @@ -1379,7 +1379,7 @@ lemma set_asid_pool_obj_at_ptr: lemma valid_arch_state_table_strg: "valid_arch_state s \ asid_pool_at p s \ Some p \ x64_asid_table (arch_state s) ` (dom (x64_asid_table (arch_state s)) - {x}) \ - valid_arch_state (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(x \ p)\\)" + valid_arch_state (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(x \ p)\\)" apply (clarsimp simp: valid_arch_state_def valid_asid_table_def ran_def) apply (rule conjI, fastforce) apply (erule inj_on_fun_upd_strongerI) @@ -1412,8 +1412,8 @@ lemma vs_lookup1_arch [simp]: lemma vs_lookup_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (erule vs_lookupE) apply clarsimp @@ -1445,8 +1445,8 @@ lemma vs_lookup_empty_table: lemma vs_lookup_pages_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (subst (asm) vs_lookup_pages_def) apply (clarsimp simp: Image_def) @@ -1481,7 +1481,7 @@ lemma set_asid_pool_empty_table_objs: set_asid_pool p Map.empty \\rv s. valid_vspace_objs (s\arch_state := arch_state s\x64_asid_table := - x64_asid_table (arch_state s)(asid_high_bits_of word2 \ p)\\)\" + (x64_asid_table (arch_state s))(asid_high_bits_of word2 \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vspace_objs_def @@ -1506,7 +1506,7 @@ lemma set_asid_pool_empty_table_lookup: set_asid_pool p Map.empty \\rv s. valid_vs_lookup (s\arch_state := arch_state s\x64_asid_table := - x64_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (x64_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vs_lookup_def @@ -1525,7 +1525,7 @@ lemma set_asid_pool_empty_table_lookup: lemma valid_ioports_asid_table_upd[iff]: "valid_ioports (s\arch_state := arch_state s - \x64_asid_table := x64_asid_table (arch_state s) + \x64_asid_table := (x64_asid_table (arch_state s)) (asid_high_bits_of base \ p)\\) = valid_ioports s" by (clarsimp simp: valid_ioports_def all_ioports_issued_def issued_ioports_def) @@ -1536,7 +1536,7 @@ lemma set_asid_pool_invs_table: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\x s. invs (s\arch_state := arch_state s\x64_asid_table := - x64_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (x64_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def valid_asid_map_def) apply (wp valid_irq_node_typ set_asid_pool_typ_at set_asid_pool_empty_table_objs valid_ioports_lift diff --git a/proof/invariant-abstract/X64/ArchInterrupt_AI.thy b/proof/invariant-abstract/X64/ArchInterrupt_AI.thy index ac133c90da..5be572964a 100644 --- a/proof/invariant-abstract/X64/ArchInterrupt_AI.thy +++ b/proof/invariant-abstract/X64/ArchInterrupt_AI.thy @@ -20,8 +20,7 @@ where real_cte_at dest_slot and (\s. ioapic < x64_num_ioapics (arch_state s)) and K (minUserIRQ \ irq \ irq \ maxUserIRQ \ - pin < ioapicIRQLines \ level < 2 \ - polarity < 2))" + level < 2 \ polarity < 2))" | "arch_irq_control_inv_valid_real (IssueIRQHandlerMSI irq dest_slot src_slot bus dev func handle) = (cte_wp_at ((=) NullCap) dest_slot and cte_wp_at ((=) IRQControlCap) src_slot and diff --git a/proof/invariant-abstract/X64/ArchInvariants_AI.thy b/proof/invariant-abstract/X64/ArchInvariants_AI.thy index 6ae5c60955..884b521be6 100644 --- a/proof/invariant-abstract/X64/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/X64/ArchInvariants_AI.thy @@ -5,7 +5,7 @@ *) theory ArchInvariants_AI -imports InvariantsPre_AI "Lib.Apply_Trace_Cmd" +imports InvariantsPre_AI "Eisbach_Tools.Apply_Trace_Cmd" begin section "Move this up" diff --git a/proof/invariant-abstract/X64/ArchIpc_AI.thy b/proof/invariant-abstract/X64/ArchIpc_AI.thy index ea5bd40c0d..cbbebda211 100644 --- a/proof/invariant-abstract/X64/ArchIpc_AI.thy +++ b/proof/invariant-abstract/X64/ArchIpc_AI.thy @@ -319,7 +319,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -435,7 +435,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (rule hoare_drop_imps) apply wp apply (subst ball_conj_distrib) - apply (wp get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wp get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift | simp)+ apply (rule hoare_strengthen_post[where Q = "\r s. cap_refs_respects_device_region s \ valid_objs s \ valid_mdb s \ obj_at (\ko. \tcb. ko = TCB tcb) t s"]) @@ -465,7 +465,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" apply (clarsimp simp: valid_arch_mdb_def ioport_revocable_def simp del: split_paired_All) apply (intro conjI impI allI) apply (simp del: split_paired_All) diff --git a/proof/invariant-abstract/X64/ArchKHeap_AI.thy b/proof/invariant-abstract/X64/ArchKHeap_AI.thy index 3791d752da..7c80e816ed 100644 --- a/proof/invariant-abstract/X64/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/X64/ArchKHeap_AI.thy @@ -398,7 +398,7 @@ lemma valid_global_objs_lift': apply (rule hoare_use_eq [where f="\s. x64_global_pds (arch_state s)", OF pds]) apply (rule hoare_use_eq [where f="\s. x64_global_pdpts (arch_state s)", OF pdpts]) apply (rule hoare_use_eq [where f="\s. x64_global_pml4 (arch_state s)", OF pml4]) - apply (wp obj ko emp hoare_vcg_const_Ball_lift hoare_ex_wp) + apply (wp obj ko emp hoare_vcg_const_Ball_lift hoare_vcg_ex_lift) apply (clarsimp simp: second_level_tables_def) done @@ -746,11 +746,11 @@ lemma store_pde_pred_tcb_at: "\pred_tcb_at proj P t\ store_pde ptr val \\rv. pred_tcb_at proj P t\" apply (simp add: store_pde_def set_pd_def set_object_def get_pd_def bind_assoc) - apply (rule hoare_seq_ext [OF _ get_object_sp]) - apply (case_tac x, simp_all) + apply (rule bind_wp [OF _ get_object_sp]) + apply (case_tac rv, simp_all) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all) - apply (rule hoare_seq_ext [OF _ get_object_sp]) + apply (rule bind_wp [OF _ get_object_sp]) apply wp apply (clarsimp simp: pred_tcb_at_def obj_at_def) done @@ -835,20 +835,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -856,7 +856,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -864,7 +864,7 @@ lemma state_hyp_refs_of_tcb_state_update: lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by (induction ao rule: arch_kernel_obj.induct; clarsimp simp: typ_at_same_type) @@ -878,7 +878,7 @@ lemma default_tcb_not_live: "\ live (TCB default_tcb)" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) lemma valid_ioports_lift: diff --git a/proof/invariant-abstract/X64/ArchRetype_AI.thy b/proof/invariant-abstract/X64/ArchRetype_AI.thy index b41ea34ed0..00c1fac7ac 100644 --- a/proof/invariant-abstract/X64/ArchRetype_AI.thy +++ b/proof/invariant-abstract/X64/ArchRetype_AI.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -53,7 +54,7 @@ lemma retype_region_ret_folded [Retype_AI_assms]: apply (simp add:retype_addrs_def) done -lemmas [wp] = hoare_unless_wp +lemmas [wp] = unless_wp (* These also prove facts about copy_global_mappings *) crunch pspace_aligned[wp]: init_arch_objects "pspace_aligned" @@ -217,7 +218,7 @@ lemma mapM_x_store_pml4e_eq_kernel_mappings_restr: \ ko_at (ArchObj (PageMapL4 pmv')) pm' s \ pmv (ucast x) = pmv' (ucast x)))\" apply (induct xs rule: rev_induct, simp_all add: mapM_x_Nil mapM_x_append mapM_x_singleton) - apply (erule hoare_seq_ext[rotated]) + apply (erule bind_wp_fwd) apply (simp add: store_pml4e_def set_object_def set_arch_obj_simps cong: bind_cong) apply (wp get_object_wp get_pml4e_wp) apply (clarsimp simp: obj_at_def split del: if_split) @@ -271,7 +272,7 @@ lemma copy_global_equal_kernel_mappings_restricted: copy_global_mappings pm \\rv s. equal_kernel_mappings (s \ kheap := restrict_map (kheap s) (- S) \)\" apply (simp add: copy_global_mappings_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) + apply (rule bind_wp [OF _ gets_sp]) apply (rule hoare_chain) apply (rule hoare_vcg_conj_lift) apply (rule_tac P="global_pm \ (insert pm S)" in hoare_vcg_prop) @@ -388,9 +389,9 @@ lemma copy_global_invs_mappings_restricted: apply (simp add: valid_pspace_def pred_conj_def) apply (rule hoare_conjI, wp copy_global_equal_kernel_mappings_restricted) apply (clarsimp simp: global_refs_def) - apply (rule valid_prove_more, rule hoare_vcg_conj_lift, rule hoare_TrueI) + apply (rule hoare_post_add, rule hoare_vcg_conj_lift, rule hoare_TrueI) apply (simp add: copy_global_mappings_def valid_pspace_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) + apply (rule bind_wp [OF _ gets_sp]) apply (rule hoare_strengthen_post) apply (rule mapM_x_wp[where S="{x. get_pml4_index pptr_base \ x \ x < 2 ^ (pml4_bits - word_size_bits)}"]) @@ -459,7 +460,7 @@ lemma mapM_copy_global_invs_mappings_restricted: apply (fold all_invs_but_equal_kernel_mappings_restricted_eq) apply (induct pms, simp_all only: mapM_x_Nil mapM_x_Cons K_bind_def) apply (wp, simp) - apply (rule hoare_seq_ext, assumption, thin_tac "P" for P) + apply (rule bind_wp, assumption, thin_tac "P" for P) apply (rule hoare_conjI) apply (rule hoare_pre, rule copy_global_invs_mappings_restricted) apply clarsimp diff --git a/proof/invariant-abstract/X64/ArchTcb_AI.thy b/proof/invariant-abstract/X64/ArchTcb_AI.thy index 089ef5585f..e4da5e45f4 100644 --- a/proof/invariant-abstract/X64/ArchTcb_AI.thy +++ b/proof/invariant-abstract/X64/ArchTcb_AI.thy @@ -198,7 +198,6 @@ lemma cap_delete_no_cap_to_obj_asid[wp, Tcb_AI_asms]: apply (simp add: cap_delete_def no_cap_to_obj_with_diff_ref_ran_caps_form) apply wp - apply simp apply (rule use_spec) apply (rule rec_del_all_caps_in_range) apply (simp add: table_cap_ref_def[simplified, split_simps cap.split] @@ -231,19 +230,20 @@ lemma tc_invs[Tcb_AI_asms]: \\rv. invs\" apply (rule hoare_gen_asm)+ apply (simp add: split_def set_mcpriority_def cong: option.case_cong) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply wp - apply ((simp only: simp_thms - | (simp add: conj_comms del: hoare_True_E_R, + apply ((simp only: simp_thms cong: conj_cong + | (strengthen invs_strengthen)+ + | (simp add: conj_comms, strengthen imp_consequent[where Q="x = None" for x], simp cong: conj_cong) | rule wp_split_const_if wp_split_const_if_R - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R hoare_vcg_E_elim hoare_vcg_const_imp_lift_R hoare_vcg_R_conj | (wp out_invs_trivial case_option_wpE cap_delete_deletes cap_delete_valid_cap cap_insert_valid_cap out_cte_at cap_insert_cte_at cap_delete_cte_at out_valid_cap - hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R + hoare_vcg_const_imp_lift_R hoare_vcg_all_liftE_R thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_invs_trivial[OF ball_tcb_cap_casesI] hoare_vcg_all_lift thread_set_valid_cap out_emptyable @@ -257,10 +257,9 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def - del: hoare_True_E_R | wpc | strengthen use_no_cap_to_obj_asid_strg tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] diff --git a/proof/invariant-abstract/X64/ArchUntyped_AI.thy b/proof/invariant-abstract/X64/ArchUntyped_AI.thy index 8bc57d2909..6096407d12 100644 --- a/proof/invariant-abstract/X64/ArchUntyped_AI.thy +++ b/proof/invariant-abstract/X64/ArchUntyped_AI.thy @@ -389,10 +389,10 @@ lemma create_cap_ioports[wp, Untyped_AI_assms]: (* FIXME: move *) lemma simpler_store_pml4e_def: "store_pml4e p pde s = - (case kheap s (p && ~~ mask pml4_bits) of + (case (kheap s)(p && ~~ mask pml4_bits) of Some (ArchObj (PageMapL4 pml4)) => - ({((), s\kheap := (kheap s((p && ~~ mask pml4_bits) \ - (ArchObj (PageMapL4 (pml4(ucast (p && mask pml4_bits >> word_size_bits) := pde))))))\)}, False) + ({((), s\kheap := (kheap s)(p && ~~ mask pml4_bits \ + (ArchObj (PageMapL4 (pml4(ucast (p && mask pml4_bits >> word_size_bits) := pde)))))\)}, False) | _ => ({}, True))" apply (auto simp: store_pml4e_def set_object_def get_object_def simpler_gets_def assert_def a_type_simps return_def fail_def set_object_def get_def put_def bind_def get_pml4_def aa_type_simps @@ -476,7 +476,7 @@ lemma copy_global_mappings_nonempty_table: (set (second_level_tables (arch_state s)))) r s) \ valid_global_objs s \ valid_arch_state s \ pspace_aligned s\" apply (simp add: copy_global_mappings_def) - apply (rule hoare_seq_ext [OF _ gets_sp]) + apply (rule bind_wp [OF _ gets_sp]) apply (rule hoare_strengthen_post) apply (rule mapM_x_wp[where S="{x. get_pml4_index pptr_base \ x \ x < 2 ^ (pml4_bits - word_size_bits)}"]) @@ -538,7 +538,7 @@ lemma init_arch_objects_nonempty_table[Untyped_AI_assms, wp]: apply (rule hoare_gen_asm) apply (simp add: init_arch_objects_def split del: if_split) apply (rule hoare_pre) - apply (wp hoare_unless_wp | wpc | simp add: reserve_region_def second_level_tables_def)+ + apply (wp unless_wp | wpc | simp add: reserve_region_def second_level_tables_def)+ apply (clarsimp simp: obj_bits_api_def default_arch_object_def pml4_bits_def pageBits_def) done @@ -568,7 +568,7 @@ lemma set_pml4e_cte_wp_at_iin[wp]: crunch cte_wp_at_iin[wp]: init_arch_objects "\s. P (cte_wp_at (P' (interrupt_irq_node s)) p s)" - (ignore: clearMemory store_pml4e wp: crunch_wps hoare_unless_wp) + (ignore: clearMemory store_pml4e wp: crunch_wps unless_wp) lemmas init_arch_objects_ex_cte_cap_wp_to = init_arch_objects_excap diff --git a/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy index ffc6bdb715..ac5f5d0ddf 100644 --- a/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy @@ -141,7 +141,7 @@ lemma mapM_x_store_pte_updates: apply wp apply (clarsimp simp: obj_at_def fun_upd_idem) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: store_pte_def set_pt_def set_object_def word_size_bits_def) apply (wp get_pt_wp get_object_wp) @@ -205,7 +205,7 @@ lemma mapM_x_store_pde_updates: apply wp apply (clarsimp simp: obj_at_def fun_upd_idem) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: store_pde_def set_pd_def set_object_def word_size_bits_def) apply (wp get_pd_wp get_object_wp) @@ -318,7 +318,7 @@ crunch valid_vspace_objs'[wp]: set_simple_ko "valid_vspace_objs'" (wp: crunch_wps) crunch valid_vspace_objs'[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_vspace_objs'" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ preemption_point \\rv. valid_vspace_objs'\" @@ -556,7 +556,7 @@ lemma invoke_untyped_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_vspace_objs'" (ignore: delete_objects set_object - wp: static_imp_wp select_wp crunch_wps + wp: hoare_weak_lift_imp crunch_wps simp: crunch_simps unless_def) lemma pte_range_interD: @@ -681,8 +681,6 @@ lemma perform_invocation_valid_vspace_objs'[wp]: \\rv. valid_vspace_objs'\" apply (cases i, simp_all) apply (wp send_signal_interrupt_states | simp)+ - apply (clarsimp simp:) - apply (wp | wpc | simp)+ apply (simp add: arch_perform_invocation_def) apply (wp | wpc | simp)+ apply (auto simp: valid_arch_inv_def ) @@ -705,7 +703,7 @@ lemma handle_invocation_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: activate_thread,switch_to_thread, handle_hypervisor_fault, switch_to_idle_thread, handle_call, handle_recv, handle_reply, handle_send, handle_yield, handle_interrupt "valid_vspace_objs'" - (simp: crunch_simps wp: crunch_wps alternative_valid select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt getFaultAddress OR_choice set_scheduler_action) @@ -716,8 +714,7 @@ lemma handle_event_valid_vspace_objs'[wp]: lemma schedule_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ schedule :: (unit,unit) s_monad \\_. valid_vspace_objs'\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma call_kernel_valid_vspace_objs'[wp]: diff --git a/proof/invariant-abstract/X64/ArchVSpace_AI.thy b/proof/invariant-abstract/X64/ArchVSpace_AI.thy index 2b84ed8f94..7bbcbdeac1 100644 --- a/proof/invariant-abstract/X64/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/X64/ArchVSpace_AI.thy @@ -432,7 +432,7 @@ lemma find_vspace_for_asid_lookup_ref: lemma find_vspace_for_asid_lookup[wp]: "\\\ find_vspace_for_asid asid \\pd. \\ pd\,-" - apply (rule hoare_post_imp_R, rule find_vspace_for_asid_lookup_ref) + apply (rule hoare_strengthen_postE_R, rule find_vspace_for_asid_lookup_ref) apply auto done @@ -447,7 +447,7 @@ proof - \\pd. pspace_aligned and page_map_l4_at pd\, -" by wpsimp show ?thesis - apply (rule hoare_post_imp_R, rule x) + apply (rule hoare_strengthen_postE_R, rule x) apply clarsimp apply (erule page_map_l4_pml4e_atI) prefer 2 @@ -886,7 +886,7 @@ lemma asid_wf_0: lemma svr_invs [wp]: "\invs\ set_vm_root t' \\_. invs\" apply (simp add: set_vm_root_def) - apply (wp hoare_whenE_wp + apply (wp whenE_wp | wpc | simp add: split_def if_apply_def2 cong: conj_cong if_cong | strengthen valid_cr3_make_cr3)+ @@ -1627,7 +1627,7 @@ lemma update_aobj_not_reachable: apply (rule_tac x = "(aa, baa)" in bexI[rotated]) apply assumption apply (simp add: fun_upd_def[symmetric]) - apply (rule_tac s4 = s in vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(p \ ArchObj aobj)\" for s + apply (rule_tac s4 = s in vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(p \ ArchObj aobj)\" for s ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pde_ref_pages_def Image_def split: if_split_asm pde.split_asm) @@ -2098,11 +2098,11 @@ lemma unmap_page_vs_lookup_pages_pre: note ref_simps[simp] = vs_cap_ref_simps vs_ref_pages_simps note ucast_simps[simp] = up_ucast_inj_eq ucast_up_ucast mask_asid_low_bits_ucast_ucast ucast_ucast_id get_index_neq note [wp_comb del] = hoare_vcg_conj_lift - note [wp_comb] = hoare_post_comb_imp_conj hoare_vcg_precond_imp hoare_vcg_conj_lift - hoare_vcg_precond_impE[OF valid_validE] - hoare_vcg_precond_impE_R[OF valid_validE_R] - hoare_vcg_precond_impE - hoare_vcg_precond_impE_R + note [wp_comb] = hoare_post_comb_imp_conj hoare_weaken_pre hoare_vcg_conj_lift + hoare_weaken_preE[OF valid_validE] + hoare_weaken_preE_R[OF valid_validE_R] + hoare_weaken_preE + hoare_weaken_preE_R show ?thesis apply (clarsimp simp: unmap_page_def vs_cap_ref_simps) @@ -2516,7 +2516,7 @@ lemma mapM_x_swp_store_empty_pt': apply (induct slots, simp_all add: mapM_x_Nil mapM_x_Cons) apply wp apply (clarsimp simp: obj_at_def empty_table_def fun_eq_iff) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "\P\ f \Q\" for P f Q) apply (simp add: store_pte_def set_object_def set_arch_obj_simps) apply (wp get_object_wp | simp) @@ -2535,7 +2535,7 @@ lemma mapM_x_swp_store_empty_pd': apply (induct slots, simp_all add: mapM_x_Nil mapM_x_Cons) apply wp apply (clarsimp simp: obj_at_def empty_table_def fun_eq_iff) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "\P\ f \Q\" for P f Q) apply (simp add: store_pde_def set_object_def set_arch_obj_simps) apply (wp get_object_wp | simp) @@ -2569,7 +2569,7 @@ lemma mapM_x_swp_store_empty_pdpt': apply (induct slots, simp_all add: mapM_x_Nil mapM_x_Cons) apply wp apply (clarsimp simp: obj_at_def empty_table_def fun_eq_iff) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "\P\ f \Q\" for P f Q) apply (simp add: store_pdpte_def set_object_def set_arch_obj_simps) apply (wp get_object_wp | simp) @@ -2838,7 +2838,7 @@ lemma lookup_pages_shrink_store_pdpte: apply (simp add: vs_lookup_pages_def) apply (drule_tac s1 = s in lookup_bound_estimate[OF vs_lookup_pages1_is_wellformed_lookup, rotated -1]) apply (simp add: fun_upd_def[symmetric]) - apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s ptr obj + apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s ptr obj ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pdpte_ref_pages_def split: if_split_asm pde.split_asm) @@ -2852,7 +2852,7 @@ lemma lookup_pages_shrink_store_pde: apply (simp add: vs_lookup_pages_def) apply (drule_tac s1 = s in lookup_bound_estimate[OF vs_lookup_pages1_is_wellformed_lookup, rotated -1]) apply (simp add: fun_upd_def[symmetric]) - apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s ptr obj + apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s ptr obj ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pde_ref_pages_def split: if_split_asm pde.split_asm) @@ -2866,7 +2866,7 @@ lemma lookup_pages_shrink_store_pte: apply (simp add: vs_lookup_pages_def) apply (drule_tac s1 = s in lookup_bound_estimate[OF vs_lookup_pages1_is_wellformed_lookup, rotated -1]) apply (simp add: fun_upd_def[symmetric]) - apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s ptr obj + apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s ptr obj ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pde_ref_pages_def split: if_split_asm pde.split_asm) @@ -3309,7 +3309,7 @@ lemma unmap_page_invs[wp]: apply (wpc | wp | strengthen imp_consequent)+ apply ((wp store_pde_invs store_pte_invs unlessE_wp do_machine_op_global_refs_inv get_pde_wp hoare_vcg_all_lift find_vspace_for_asid_lots get_pte_wp store_pdpte_invs get_pdpte_wp - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R | wpc | simp add: flush_all_def pdpte_ref_pages_def if_apply_def2 | strengthen not_in_global_refs_vs_lookup not_in_global_refs_vs_lookup invs_valid_vs_lookup diff --git a/proof/invariant-abstract/X64/Machine_AI.thy b/proof/invariant-abstract/X64/Machine_AI.thy index a6ea07bbfb..38f01f809e 100644 --- a/proof/invariant-abstract/X64/Machine_AI.thy +++ b/proof/invariant-abstract/X64/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -83,13 +83,13 @@ lemma det_getRestartPC: "det getRestartPC" lemma det_setNextPC: "det (setNextPC p)" by (simp add: setNextPC_def det_setRegister) - +(* FIXME empty_fail: make all empty_fail [intro!, wp], and non-conditional ones [simp] *) lemma ef_loadWord: "empty_fail (loadWord x)" - by (simp add: loadWord_def) + by (fastforce simp: loadWord_def) lemma ef_storeWord: "empty_fail (storeWord x y)" - by (simp add: storeWord_def) + by (fastforce simp: storeWord_def) lemma no_fail_getRestartPC: "no_fail \ getRestartPC" @@ -175,7 +175,7 @@ lemma no_fail_getActiveIRQ[wp]: "no_fail \ (getActiveIRQ in_kernel)" apply (simp add: getActiveIRQ_def) apply (rule no_fail_pre) - apply (wp non_fail_select) + apply wp apply simp done @@ -184,7 +184,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -287,7 +287,7 @@ lemma no_irq_seq [wp]: "\ no_irq f; \x. no_irq (g x) \ \ no_irq (f >>= g)" apply (subst no_irq_def) apply clarsimp - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (wp|simp)+ done @@ -369,7 +369,7 @@ lemma getActiveIRQ_le_maxIRQ': getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -379,14 +379,13 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_Some0xFF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done @@ -407,16 +406,14 @@ lemma empty_fail_initL2Cache: "empty_fail initL2Cache" lemma empty_fail_clearMemory [simp, intro!]: "\a b. empty_fail (clearMemory a b)" - by (simp add: clearMemory_def mapM_x_mapM ef_storeWord) + by (fastforce simp: clearMemory_def mapM_x_mapM ef_storeWord) lemma getFaultAddress_ef[simp,wp]: "empty_fail getFaultAddress" by (simp add: getFaultAddress_def) -(* FIXME x64: move *) lemma ioapicMapPinToVector_ef[simp,wp]: "empty_fail (ioapicMapPinToVector a b c d e)" by (simp add: ioapicMapPinToVector_def) -(* FIXME x64: move *) lemma invalidateTLBEntry_ef[simp,wp]: "empty_fail (invalidateTLBEntry b)" by (simp add: invalidateTLBEntry_def) @@ -426,39 +423,30 @@ lemma invalidateASID_ef[simp,wp]: "empty_fail (invalidateASID a b)" lemma invalidateTranslationSingleASID_ef[simp,wp]: "empty_fail (invalidateTranslationSingleASID a b)" by (simp add: invalidateTranslationSingleASID_def) -(* FIXME x64: move *) lemma hwASIDInvalidate_ef[simp,wp]: "empty_fail (hwASIDInvalidate b a)" by (simp add: hwASIDInvalidate_def) -(* FIXME x64: move *) lemma updateIRQState_ef[simp,wp]: "empty_fail (updateIRQState b c)" - by (simp add: updateIRQState_def) + by (fastforce simp: updateIRQState_def) -(* FIXME x64: move *) lemma writeCR3_ef[simp,wp]: "empty_fail (writeCR3 a b)" by (simp add: writeCR3_def) -(* FIXME x64: move *) lemma in8_ef[simp,wp]: "empty_fail (in8 port)" - by (simp add: in8_def) + by (fastforce simp: in8_def) -(* FIXME x64: move *) lemma in16_ef[simp,wp]: "empty_fail (in16 port)" - by (simp add: in16_def) + by (fastforce simp: in16_def) -(* FIXME x64: move *) lemma in32_ef[simp,wp]: "empty_fail (in32 port)" - by (simp add: in32_def) + by (fastforce simp: in32_def) -(* FIXME x64: move *) lemma out8_ef[simp,wp]: "empty_fail (out8 port dat)" by (simp add: out8_def) -(* FIXME x64: move *) lemma out16_ef[simp,wp]: "empty_fail (out16 port dat)" by (simp add: out16_def) -(* FIXME x64: move *) lemma out32_ef[simp,wp]: "empty_fail (out32 port dat)" by (simp add: out32_def) diff --git a/proof/refine/AARCH64/ADT_H.thy b/proof/refine/AARCH64/ADT_H.thy new file mode 100644 index 0000000000..5f6824b914 --- /dev/null +++ b/proof/refine/AARCH64/ADT_H.thy @@ -0,0 +1,1757 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +chapter \Abstract datatype for the executable specification\ + +theory ADT_H + imports Syscall_R +begin + +text \ + The general refinement calculus (see theory Simulation) requires + the definition of a so-called ``abstract datatype'' for each refinement layer. + This theory defines this datatype for the executable specification. + It is based on the abstract specification because we chose + to base the refinement's observable state on the abstract state. +\ + +consts + initEntry :: machine_word + initFrames :: "machine_word list" + initOffset :: machine_word + initKernelFrames :: "machine_word list" + initBootFrames :: "machine_word list" + initDataStart :: machine_word + +context begin interpretation Arch . (*FIXME: arch_split*) + +text \ + The construction of the abstract data type + for the executable specification largely follows + the one for the abstract specification. +\ +definition Init_H :: "kernel_state global_state set" where + "Init_H \ + ({empty_context} \ snd ` + fst (initKernel (VPtr initEntry) (PPtr initOffset) (map PPtr initFrames) + (map PPtr initKernelFrames) initBootFrames + (newKernelState initDataStart))) \ + {UserMode} \ {None}" + +definition + "user_mem' s \ \p. + if pointerInUserData p s then Some (underlying_memory (ksMachineState s) p) else None" + +definition + "device_mem' s \ \p. + if pointerInDeviceData p s then Some p else None" + +definition vm_rights_of :: "vmrights \ rights set" where + "vm_rights_of x \ case x of VMKernelOnly \ vm_kernel_only + | VMReadOnly \ vm_read_only + | VMReadWrite \ vm_read_write" + +lemma vm_rights_of_vmrights_map_id[simp]: + "rs \ valid_vm_rights \ vm_rights_of (vmrights_map rs) = rs" + by (auto simp: vm_rights_of_def vmrights_map_def valid_vm_rights_def + vm_read_write_def vm_read_only_def vm_kernel_only_def) + +(* We expect 'a to be one of {pt_index, vs_index} *) +definition absPageTable0 :: + "(obj_ref \ Structures_H.kernel_object) \ obj_ref \ 'a::len word \ AARCH64_A.pte" where + "absPageTable0 h a \ \offs. + case h (a + (ucast offs << pte_bits)) of + Some (KOArch (KOPTE (InvalidPTE))) \ Some AARCH64_A.InvalidPTE + | Some (KOArch (KOPTE (PagePTE p small global execNever dev rights))) \ + Some (AARCH64_A.PagePTE p small + {x. global \ x=Global \ \execNever \ x = Execute \ + dev \ x = Device} + (vm_rights_of rights)) + | Some (KOArch (KOPTE (PageTablePTE p))) \ + if p \ mask ppn_len + then Some (AARCH64_A.PageTablePTE (ucast p)) + else None + | _ \ None" + +definition absPageTable :: + "(obj_ref \ Structures_H.kernel_object) \ pt_type option \ obj_ref \ pt" where + "absPageTable h pt_t a \ + case pt_t of + Some NormalPT_T \ + if is_aligned a (pt_bits NormalPT_T) \ (\off::pt_index. absPageTable0 h a off \ None) + then Some (NormalPT (\off. the (absPageTable0 h a off))) + else None + | Some VSRootPT_T \ + if is_aligned a (pt_bits VSRootPT_T) \ (\off::vs_index. absPageTable0 h a off \ None) + then Some (VSRootPT (\off. the (absPageTable0 h a off))) + else None + | None \ None" + +definition absVGIC :: "gicvcpuinterface \ gic_vcpu_interface" where + "absVGIC v \ case v of + VGICInterface hcr vmcr apr lr \ gic_vcpu_interface.make hcr vmcr apr lr" + +lemma absVGIC_eq[simp]: + "absVGIC (vgic_map vgic) = vgic" + by (simp add: vgic_map_def absVGIC_def gic_vcpu_interface.make_def) + +(* Can't pull the whole heap off at once, start with arch specific stuff.*) +definition absHeapArch :: + "(machine_word \ kernel_object) \ (machine_word \ pt_type) \ + machine_word \ arch_kernel_object \ arch_kernel_obj" where + "absHeapArch h pt_types a \ \ako. + case ako of + KOASIDPool (AARCH64_H.ASIDPool ap) \ + Some (AARCH64_A.ASIDPool (\w. map_option abs_asid_entry (ap (ucast w)))) + | KOPTE _ \ + map_option PageTable (absPageTable h (pt_types a) a) + | KOVCPU (VCPUObj tcb vgic regs vppimask vtimer) \ + Some (VCPU \ vcpu_tcb = tcb, + vcpu_vgic = absVGIC vgic, + vcpu_regs = regs, + vcpu_vppi_masked = vppimask, + vcpu_vtimer = vtimer \)" + +definition + "EndpointMap ep \ case ep of + Structures_H.IdleEP \ Structures_A.IdleEP + | Structures_H.SendEP q \ Structures_A.SendEP q + | Structures_H.RecvEP q \ Structures_A.RecvEP q" + +definition + "AEndpointMap ntfn \ + \ ntfn_obj = case ntfnObj ntfn of + Structures_H.IdleNtfn \ Structures_A.IdleNtfn + | Structures_H.WaitingNtfn q \ Structures_A.WaitingNtfn q + | Structures_H.ActiveNtfn b \ Structures_A.ActiveNtfn b + , ntfn_bound_tcb = ntfnBoundTCB ntfn \" + +definition mdata_map' :: + "(asid \ vspace_ref) option \ (Machine_A.AARCH64_A.asid \ vspace_ref) option" where + "mdata_map' = map_option (\(asid, ref). (ucast asid, ref))" + +lemma mdata_map'_inv[simp]: + "mdata_map' (mdata_map m) = m" + by (cases m; simp add: mdata_map_def mdata_map'_def split_def ucast_down_ucast_id is_down) + +fun CapabilityMap :: "capability \ cap" where + "CapabilityMap capability.NullCap = cap.NullCap" +| "CapabilityMap (capability.UntypedCap d ref n idx) = cap.UntypedCap d ref n idx" +| "CapabilityMap (capability.EndpointCap ref b sr rr gr grr) = + cap.EndpointCap ref b {x. sr \ x = AllowSend \ rr \ x = AllowRecv \ + gr \ x = AllowGrant \ grr \ x = AllowGrantReply}" +| "CapabilityMap (capability.NotificationCap ref b sr rr) = + cap.NotificationCap ref b {x. sr \ x = AllowSend \ rr \ x = AllowRecv}" +| "CapabilityMap (capability.CNodeCap ref n L l) = + cap.CNodeCap ref n (bin_to_bl l (uint L))" +| "CapabilityMap (capability.ThreadCap ref) = cap.ThreadCap ref" +| "CapabilityMap capability.DomainCap = cap.DomainCap" +| "CapabilityMap (capability.ReplyCap ref master gr) = + cap.ReplyCap ref master {x. gr \ x = AllowGrant \ x = AllowWrite}" +| "CapabilityMap capability.IRQControlCap = cap.IRQControlCap" +| "CapabilityMap (capability.IRQHandlerCap irq) = cap.IRQHandlerCap irq" +| "CapabilityMap (capability.Zombie p b n) = + cap.Zombie p (case b of ZombieTCB \ None | ZombieCNode n \ Some n) n" +| "CapabilityMap (capability.ArchObjectCap (arch_capability.ASIDPoolCap x y)) = + cap.ArchObjectCap (arch_cap.ASIDPoolCap x (ucast y))" +| "CapabilityMap (capability.ArchObjectCap (arch_capability.ASIDControlCap)) = + cap.ArchObjectCap (arch_cap.ASIDControlCap)" +| "CapabilityMap (capability.ArchObjectCap + (arch_capability.FrameCap word rghts sz d data)) = + cap.ArchObjectCap (arch_cap.FrameCap word (vm_rights_of rghts) sz d (mdata_map' data))" +| "CapabilityMap (capability.ArchObjectCap + (arch_capability.PageTableCap word pt_t data)) = + cap.ArchObjectCap (arch_cap.PageTableCap word pt_t (mdata_map' data))" +| "CapabilityMap (capability.ArchObjectCap + (arch_capability.VCPUCap v)) = + cap.ArchObjectCap (arch_cap.VCPUCap v)" + +(* FIXME: wellformed_cap_simps has lots of duplicates. *) +lemma cap_relation_imp_CapabilityMap: + "\wellformed_cap c; cap_relation c c'\ \ CapabilityMap c' = c" + apply (case_tac c; simp add: wellformed_cap_simps) + apply (rule set_eqI, clarsimp) + apply (case_tac "x", simp_all) + apply (rule set_eqI, clarsimp) + apply (case_tac "x", simp_all add: word_bits_def) + apply clarsimp + apply (simp add: set_eq_iff, rule allI) + apply (case_tac x; clarsimp) + apply (simp add: uint_of_bl_is_bl_to_bin bl_bin_bl[simplified]) + apply (simp add: zbits_map_def split: option.splits) + apply (rename_tac arch_cap) + apply clarsimp + apply (case_tac arch_cap, simp_all add: wellformed_cap_simps) + apply (simp add: ucast_down_ucast_id is_down) + done + +primrec ThStateMap :: "Structures_H.thread_state \ Structures_A.thread_state" where + "ThStateMap Structures_H.thread_state.Running = + Structures_A.thread_state.Running" +| "ThStateMap Structures_H.thread_state.Restart = + Structures_A.thread_state.Restart" +| "ThStateMap Structures_H.thread_state.Inactive = + Structures_A.thread_state.Inactive" +| "ThStateMap Structures_H.thread_state.IdleThreadState = + Structures_A.thread_state.IdleThreadState" +| "ThStateMap Structures_H.thread_state.BlockedOnReply = + Structures_A.thread_state.BlockedOnReply" +| "ThStateMap (Structures_H.thread_state.BlockedOnReceive oref grant) = + Structures_A.thread_state.BlockedOnReceive oref \ receiver_can_grant = grant \" +| "ThStateMap (Structures_H.thread_state.BlockedOnSend oref badge grant grant_reply call) = + Structures_A.thread_state.BlockedOnSend oref + \ sender_badge = badge, + sender_can_grant = grant, + sender_can_grant_reply = grant_reply, + sender_is_call = call \" +| "ThStateMap (Structures_H.thread_state.BlockedOnNotification oref) = + Structures_A.thread_state.BlockedOnNotification oref" + +lemma thread_state_relation_imp_ThStateMap: + "thread_state_relation ts ts' \ ThStateMap ts' = ts" + by (cases ts) simp_all + +definition + "LookupFailureMap \ \lf. case lf of + Fault_H.lookup_failure.InvalidRoot \ + ExceptionTypes_A.lookup_failure.InvalidRoot + | Fault_H.lookup_failure.MissingCapability n \ + ExceptionTypes_A.lookup_failure.MissingCapability n + | Fault_H.lookup_failure.DepthMismatch n m \ + ExceptionTypes_A.lookup_failure.DepthMismatch n m + | Fault_H.lookup_failure.GuardMismatch n g l \ + ExceptionTypes_A.lookup_failure.GuardMismatch n (bin_to_bl l (uint g))" + +lemma LookupFailureMap_lookup_failure_map: + "(\n g. lf = ExceptionTypes_A.GuardMismatch n g \ length g \ word_bits) + \ LookupFailureMap (lookup_failure_map lf) = lf" + by (clarsimp simp add: LookupFailureMap_def lookup_failure_map_def + uint_of_bl_is_bl_to_bin word_bits_def + simp del: bin_to_bl_def + split: ExceptionTypes_A.lookup_failure.splits) + +primrec ArchFaultMap :: "Fault_H.arch_fault \ ExceptionTypes_A.arch_fault" where + "ArchFaultMap (AARCH64_H.VMFault p m) = AARCH64_A.VMFault p m" +| "ArchFaultMap (AARCH64_H.VCPUFault w) = AARCH64_A.VCPUFault w" +| "ArchFaultMap (AARCH64_H.VGICMaintenance m) = AARCH64_A.VGICMaintenance m" +| "ArchFaultMap (AARCH64_H.VPPIEvent irq) = AARCH64_A.VPPIEvent irq" + +primrec FaultMap :: "Fault_H.fault \ ExceptionTypes_A.fault" where + "FaultMap (Fault_H.fault.CapFault ref b failure) = + ExceptionTypes_A.fault.CapFault ref b (LookupFailureMap failure)" +| "FaultMap (Fault_H.fault.ArchFault fault) = + ExceptionTypes_A.fault.ArchFault (ArchFaultMap fault)" +| "FaultMap (Fault_H.fault.UnknownSyscallException n) = + ExceptionTypes_A.fault.UnknownSyscallException n" +| "FaultMap (Fault_H.fault.UserException x y) = + ExceptionTypes_A.fault.UserException x y" + +lemma ArchFaultMap_arch_fault_map: "ArchFaultMap (arch_fault_map f) = f" + by (cases f; simp add: ArchFaultMap_def arch_fault_map_def) + +lemma FaultMap_fault_map[simp]: + "valid_fault ft \ FaultMap (fault_map ft) = ft" + apply (case_tac ft, simp_all) + apply (simp add: valid_fault_def LookupFailureMap_lookup_failure_map) + apply (rule ArchFaultMap_arch_fault_map) + done + +definition + "ArchTcbMap atcb \ + \ tcb_context = atcbContext atcb, tcb_vcpu = atcbVCPUPtr atcb \" + +lemma arch_tcb_relation_imp_ArchTcnMap: + "\ arch_tcb_relation atcb atcb'\ \ ArchTcbMap atcb' = atcb" + by (clarsimp simp: arch_tcb_relation_def ArchTcbMap_def) + +definition + "TcbMap tcb \ + \tcb_ctable = CapabilityMap (cteCap (tcbCTable tcb)), + tcb_vtable = CapabilityMap (cteCap (tcbVTable tcb)), + tcb_reply = CapabilityMap (cteCap (tcbReply tcb)), + tcb_caller = CapabilityMap (cteCap (tcbCaller tcb)), + tcb_ipcframe = CapabilityMap (cteCap (tcbIPCBufferFrame tcb)), + tcb_state = ThStateMap (tcbState tcb), + tcb_fault_handler = to_bl (tcbFaultHandler tcb), + tcb_ipc_buffer = tcbIPCBuffer tcb, + tcb_fault = map_option FaultMap (tcbFault tcb), + tcb_bound_notification = tcbBoundNotification tcb, + tcb_mcpriority = tcbMCP tcb, + tcb_arch = ArchTcbMap (tcbArch tcb)\" + +definition + "absCNode sz h a \ CNode sz (\bl. + if length bl = sz + then Some (CapabilityMap (case (h (a + of_bl bl * 2^cteSizeBits)) of + Some (KOCTE cte) \ cteCap cte)) + else None)" + +definition absHeap :: + "(machine_word \ vmpage_size) \ (machine_word \ nat) \ (machine_word \ pt_type) \ + (machine_word \ Structures_H.kernel_object) \ Structures_A.kheap" where + "absHeap ups cns pt_types h \ \x. + case h x of + Some (KOEndpoint ep) \ Some (Endpoint (EndpointMap ep)) + | Some (KONotification ntfn) \ Some (Notification (AEndpointMap ntfn)) + | Some KOKernelData \ undefined \ \forbidden by pspace_relation\ + | Some KOUserData \ map_option (ArchObj \ DataPage False) (ups x) + | Some KOUserDataDevice \ map_option (ArchObj \ DataPage True) (ups x) + | Some (KOTCB tcb) \ Some (TCB (TcbMap tcb)) + | Some (KOCTE cte) \ map_option (\sz. absCNode sz h x) (cns x) + | Some (KOArch ako) \ map_option ArchObj (absHeapArch h pt_types x ako) + | None \ None" + +lemma unaligned_page_offsets_helper: + "\is_aligned y (pageBitsForSize vmpage_size); n\0; + n < 2 ^ (pageBitsForSize vmpage_size - pageBits)\ + \ \ is_aligned (y + n * 2 ^ pageBits :: machine_word) (pageBitsForSize vmpage_size)" + apply (simp (no_asm_simp) add: is_aligned_mask) + apply (simp add: mask_add_aligned) + apply (cut_tac mask_eq_iff_w2p [of "pageBitsForSize vmpage_size" "n * 2 ^ pageBits"]) + prefer 2 + apply (case_tac vmpage_size, simp_all add: word_size bit_simps) + apply (cut_tac word_power_nonzero_64[of n pageBits]; + simp add: word_bits_conv pageBits_def) + prefer 2 + apply (case_tac vmpage_size, simp_all add: bit_simps word_size) + apply (frule less_trans[of n _ "0x10000000000000"], simp+)+ + apply clarsimp + apply (case_tac vmpage_size, simp_all add: bit_simps) + apply (frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+ + done + +lemma pspace_aligned_distinct_None: + (* NOTE: life would be easier if pspace_aligned and pspace_distinct were defined on PSpace instead of the whole kernel state. *) + assumes pspace_aligned: "\x\dom ha. is_aligned (x :: machine_word) (obj_bits (the (ha x)))" + assumes pspace_distinct: + "\x y ko ko'. + ha x = Some ko \ ha y = Some ko' \ x \ y \ + {x..x + (2 ^ obj_bits ko - 1)} \ {y..y + (2 ^ obj_bits ko' - 1)} = {}" + shows "\ha x = Some ko; y \ {0<..<2^(obj_bits ko)}\ \ ha (x+y) = None" + using pspace_aligned[simplified dom_def, simplified] + apply (erule_tac x=x in allE) + apply (rule ccontr) + apply clarsimp + apply (rename_tac ko') + using pspace_distinct pspace_aligned[simplified dom_def, simplified] + apply (erule_tac x=x in allE) + apply (erule_tac x="x+y" in allE)+ + apply (clarsimp simp add: word_gt_0) + apply (clarsimp simp add: ucast_of_nat_small is_aligned_mask mask_2pm1[symmetric]) + apply (frule (1) is_aligned_AND_less_0) + apply (clarsimp simp add: word_plus_and_or_coroll le_word_or2) + apply (simp add: or.assoc le_word_or2) + apply (simp add: word_plus_and_or_coroll[symmetric]) + apply (subgoal_tac "x + y \ x + mask (obj_bits ko)", simp) + apply (rule word_add_le_mono2) + apply (simp add: mask_def plus_one_helper) + apply (thin_tac "~ P" for P)+ + apply (thin_tac "(x::'a::len word) < y" for x y)+ + apply (thin_tac "x = Some y" for x y)+ + apply (thin_tac "x && mask (obj_bits ko') = 0" for x) + apply (thin_tac "x && y = 0") + apply (clarsimp simp add: dvd_def word_bits_len_of word_bits_conv + and_mask_dvd_nat[symmetric]) + apply (cut_tac x=x in unat_lt2p) + apply (cut_tac x="mask (obj_bits ko)::machine_word" in unat_lt2p) + apply (simp add: mult.commute + add.commute[of "unat (mask (obj_bits ko))"]) + apply (case_tac "k=0", simp+) + apply (subgoal_tac "obj_bits ko\64") + prefer 2 + apply (rule ccontr) + apply (simp add: not_le) + apply (frule_tac a="2::nat" and n=64 in power_strict_increasing, simp+) + apply (case_tac "k=1", simp) + apply (cut_tac m=k and n="2 ^ obj_bits ko" in n_less_n_mult_m, + (simp(no_asm_simp))+) + apply (simp only: mult.commute) + apply (thin_tac "x = y" for x y)+ + apply (clarsimp simp add: le_less) + apply (erule disjE) + prefer 2 + apply (simp add: mask_def) + apply (subgoal_tac "obj_bits ko <= (63::nat)", simp_all) + apply (simp add: mask_def unat_minus_one word_bits_conv) + apply (cut_tac w=k and c="2 ^ obj_bits ko" and b="2^(64-obj_bits ko)" + in less_le_mult_nat) + apply (simp_all add: power_add[symmetric]) + apply (rule ccontr) + apply (simp add: not_less) + apply (simp add: le_less[of "2 ^ (64 - obj_bits ko)"]) + apply (erule disjE) + prefer 2 + apply (clarsimp simp add: power_add[symmetric]) + apply clarsimp + apply (drule mult_less_mono1[of "2 ^ (64 - obj_bits ko)" _ "2 ^ obj_bits ko"]) + apply (simp add: power_add[symmetric])+ + done + +lemma pspace_aligned_distinct_None': + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + shows "\kheap s x = Some ko; y \ {0<..<2^(obj_bits ko)}\ \ kheap s (x+y) = None" + apply (rule pspace_aligned_distinct_None) + apply (rule pspace_aligned[simplified pspace_aligned_def]) + apply (rule pspace_distinct[simplified pspace_distinct_def]) + apply assumption+ + done + +lemma n_less_2p_pageBitsForSize: + "n < 2 ^ (pageBitsForSize sz - pageBits) \ n * 2 ^ pageBits < 2 ^ pageBitsForSize sz" + for n::machine_word + apply (subst mult_ac) + apply (subst shiftl_t2n[symmetric]) + apply (erule shiftl_less_t2n) + using pbfs_less_wb' by (simp add: word_bits_def) + +lemma pte_offset_in_datapage: + "\ n < 2 ^ (pageBitsForSize sz - pageBits); n \ 0 \ \ + (n << pageBits) - (ucast off << pte_bits) < 2 ^ pageBitsForSize sz" + for n::machine_word and off::pt_index + apply (frule n_less_2p_pageBitsForSize) + apply (simp only: bit_simps) + apply (subst shiftl_t2n) + apply (rule order_le_less_trans[rotated], assumption) + apply (rule word_le_imp_diff_le) + prefer 2 + apply (simp add: mult_ac) + apply (subst shiftl_t2n[symmetric]) + apply (subst (asm) mult_ac) + apply (subst (asm) shiftl_t2n[symmetric])+ + apply (rule order_trans[where y="mask pageBits"]) + apply (simp add: le_mask_shiftl_le_mask[where n=9] ucast_leq_mask pageBits_def) + apply word_bitwise + apply (clarsimp simp: nth_w2p pageBits_def rev_bl_order_simps) + apply (cases sz; simp add: pageBits_def ptTranslationBits_def) + done + +lemma absHeap_correct: + fixes s' :: kernel_state + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes valid_objs: "valid_objs s" + assumes pspace_relation: "pspace_relation (kheap s) (ksPSpace s')" + assumes ghost_relation: "ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') + (gsPTTypes (ksArchState s'))" + shows "absHeap (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')) (ksPSpace s') = kheap s" +proof - + from ghost_relation + have gsUserPages: + "\a sz. (\dev. kheap s a = Some (ArchObj (DataPage dev sz))) \ + gsUserPages s' a = Some sz" + and gsCNodes: + "\a n. (\cs. kheap s a = Some (CNode n cs) \ well_formed_cnode_n n cs) \ + gsCNodes s' a = Some n" + and gsPTs: + "\a pt_t. (\pt. kheap s a = Some (ArchObj (PageTable pt)) \ pt_t = pt_type pt) \ + gsPTTypes (ksArchState s') a = Some pt_t" + by (fastforce simp add: ghost_relation_def)+ + + show "?thesis" + supply image_cong_simp [cong del] + apply (rule ext) + apply (simp add: absHeap_def split: option.splits) + apply (rule conjI) + using pspace_relation + apply (clarsimp simp: pspace_relation_def pspace_dom_def UNION_eq dom_def Collect_eq) + apply (erule_tac x=x in allE) + apply clarsimp + apply (case_tac "kheap s x", simp) + apply (erule_tac x=x in allE, clarsimp) + apply (erule_tac x=x in allE, simp add: Ball_def) + apply (erule_tac x=x in allE, clarsimp) + apply (rename_tac a) + apply (case_tac a; simp add: other_obj_relation_def + split: if_split_asm Structures_H.kernel_object.splits) + apply (rename_tac sz cs) + apply (clarsimp simp: image_def cte_map_def well_formed_cnode_n_def Collect_eq dom_def) + apply (erule_tac x="replicate sz False" in allE)+ + apply simp + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: image_def) + apply (erule allE, drule_tac x=0 in bspec, simp, fastforce) + apply (erule_tac x=0 in allE, simp add: not_less) + apply (rename_tac vmpage_size) + apply (case_tac vmpage_size; simp add: bit_simps) + + apply (clarsimp split: kernel_object.splits) + apply (intro conjI impI allI) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply clarsimp + apply (case_tac ko; simp add: tcb_relation_cut_def other_obj_relation_def) + apply (clarsimp simp: cte_relation_def split: if_split_asm) + apply (clarsimp simp: ep_relation_def EndpointMap_def + split: Structures_A.endpoint.splits) + apply (clarsimp simp: EndpointMap_def split: Structures_A.endpoint.splits) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko; simp add: tcb_relation_cut_def other_obj_relation_def) + apply (clarsimp simp: cte_relation_def split: if_split_asm) + apply (clarsimp simp: ntfn_relation_def AEndpointMap_def + split: Structures_A.ntfn.splits) + apply (clarsimp simp: AEndpointMap_def split: Structures_A.ntfn.splits) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko; simp add: tcb_relation_cut_def other_obj_relation_def) + apply (clarsimp simp: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (rename_tac vmpage_size) + apply (cut_tac a=y and sz=vmpage_size in gsUserPages, clarsimp split: if_split_asm) + apply (case_tac "n=0", simp) + apply (case_tac "kheap s (y + n * 2 ^ pageBits)") + apply (rule ccontr) + apply (clarsimp simp: shiftl_t2n mult_ac dest!: gsUserPages[symmetric, THEN iffD1] ) + using pspace_aligned + apply (simp add: pspace_aligned_def dom_def) + apply (erule_tac x=y in allE) + apply (case_tac "n=0",(simp split: if_split_asm)+) + apply (frule (2) unaligned_page_offsets_helper) + apply (frule_tac y="n*2^pageBits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply simp + apply (rule conjI, clarsimp simp add: word_gt_0) + apply (erule n_less_2p_pageBitsForSize) + apply (clarsimp simp: shiftl_t2n mult_ac) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (rename_tac vmpage_size) + apply (cut_tac a=y and sz=vmpage_size in gsUserPages, clarsimp split: if_split_asm) + apply (case_tac "n=0", simp) + apply (case_tac "kheap s (y + n * 2 ^ pageBits)") + apply (rule ccontr) + apply (clarsimp simp: shiftl_t2n mult_ac dest!: gsUserPages[symmetric, THEN iffD1]) + using pspace_aligned + apply (simp add: pspace_aligned_def dom_def) + apply (erule_tac x=y in allE) + apply (case_tac "n=0",simp+) + apply (frule (2) unaligned_page_offsets_helper) + apply (frule_tac y="n*2^pageBits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply simp + apply (rule conjI, clarsimp simp add: word_gt_0) + apply (erule n_less_2p_pageBitsForSize) + apply (clarsimp simp: shiftl_t2n mult_ac) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + prefer 2 + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp add: TcbMap_def tcb_relation_def valid_obj_def) + apply (rename_tac tcb y tcb') + apply (case_tac tcb) + apply (case_tac tcb') + apply (simp add: thread_state_relation_imp_ThStateMap) + apply (subgoal_tac "map_option FaultMap (tcbFault tcb) = tcb_fault") + prefer 2 + apply (simp add: fault_rel_optionation_def) + using valid_objs[simplified valid_objs_def dom_def fun_app_def, simplified] + apply (erule_tac x=y in allE) + apply (clarsimp simp: valid_obj_def valid_tcb_def + split: option.splits) + using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: cap_relation_imp_CapabilityMap valid_obj_def + valid_tcb_def ran_tcb_cap_cases valid_cap_def2 + arch_tcb_relation_imp_ArchTcnMap) + apply (simp add: absCNode_def cte_map_def) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def + split: if_split_asm) + prefer 2 + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm) + apply (simp add: cte_map_def) + apply (clarsimp simp add: cte_relation_def) + apply (cut_tac a=y and n=sz in gsCNodes, clarsimp) + using pspace_aligned[simplified pspace_aligned_def] + apply (drule_tac x=y in bspec, clarsimp) + apply clarsimp + apply (case_tac "(of_bl ya::machine_word) << cte_level_bits = 0", simp) + apply (rule ext) + apply simp + apply (rule conjI) + prefer 2 + using valid_objs[simplified valid_objs_def Ball_def dom_def + fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def + well_formed_cnode_n_def dom_def Collect_eq) + apply (frule_tac x=ya in spec, simp) + apply (erule_tac x=bl in allE) + apply clarsimp+ + apply (frule pspace_relation_absD[OF _ pspace_relation]) + apply (simp add: cte_map_def) + apply (drule_tac x="y + of_bl bl * 2^cte_level_bits" in spec) + apply (clarsimp simp: shiftl_t2n mult_ac) + apply (erule_tac x="cte_relation bl" in allE) + apply (erule impE) + apply (fastforce simp add: well_formed_cnode_n_def) + apply clarsimp + apply (clarsimp simp add: cte_relation_def) + apply (rule cap_relation_imp_CapabilityMap) + using valid_objs[simplified valid_objs_def Ball_def dom_def + fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp: valid_obj_def valid_cs_def valid_cap_def2 ran_def) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + apply (subgoal_tac "kheap s (y + of_bl ya * 2^cte_level_bits) = None") + prefer 2 + using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def) + apply (rule pspace_aligned_distinct_None'[OF + pspace_aligned pspace_distinct], assumption) + apply (clarsimp simp: word_neq_0_conv power_add cte_index_repair) + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq shiftl_t2n mult_ac) + apply (erule_tac x=ya in allE)+ + apply (rule word_mult_less_mono1) + apply (subgoal_tac "sz = length ya") + apply simp + apply (rule of_bl_length, (simp add: word_bits_def)+)[1] + apply fastforce + apply (simp add: cte_level_bits_def) + apply (simp add: word_bits_conv cte_level_bits_def) + apply (drule_tac a="2::nat" in power_strict_increasing, simp+) + apply (simp add: shiftl_t2n mult_ac) + apply (rule ccontr, clarsimp) + apply (cut_tac a="y + of_bl ya * 2^cte_level_bits" and n=yc in gsCNodes) + apply clarsimp + + (* mapping architecture-specific objects *) + apply clarsimp + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_object y ko P arch_kernel_obj) + apply (case_tac arch_kernel_object, simp_all add: absHeapArch_def + split: asidpool.splits) + + apply (in_case "KOASIDPool ?pool") + apply clarsimp + apply (case_tac arch_kernel_obj) + apply (simp add: other_obj_relation_def asid_pool_relation_def + inv_def o_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + apply (simp add: other_obj_relation_def) + + apply (in_case "KOPTE ?pte") + apply (case_tac arch_kernel_obj; + simp add: other_obj_relation_def asid_pool_relation_def inv_def o_def) + apply clarsimp + apply (rename_tac p pte pt idx) + apply (frule pspace_alignedD, rule pspace_aligned) + apply (clarsimp simp add: pte_relation_def) + apply (prop_tac "pt_at (pt_type pt) p s", simp add: obj_at_def) + apply (drule page_table_at_cross[OF _ pspace_aligned pspace_distinct pspace_relation]) + apply (clarsimp simp: page_table_at'_def typ_at'_def ko_wp_at'_def) + apply (cut_tac a=p and pt_t="pt_type pt" in gsPTs, clarsimp) + apply (case_tac "pt_type pt"; clarsimp) + apply (in_case "VSRootPT_T") + apply (clarsimp simp: absPageTable_def split del: if_split split: option.splits) + apply (rule conjI, clarsimp) + apply (rule sym) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], assumption) + apply simp + apply (case_tac "idx << pte_bits = 0", simp) + apply (clarsimp simp: word_neq_0_conv) + apply (rule shiftl_less_t2n) + apply (simp add: table_size_def le_mask_iff_lt_2n[THEN iffD1]) + apply (simp add: table_size_bounded[unfolded word_bits_def, simplified]) + apply (clarsimp split del: if_split) + apply (prop_tac "idx << pte_bits = 0") + apply (rename_tac pt_t') + apply (cut_tac a="p + (idx << pte_bits)" and pt_t=pt_t' in gsPTs) + apply clarsimp + apply (rule ccontr) + apply (drule_tac y="idx << pte_bits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply (clarsimp simp: word_neq_0_conv table_size_def) + apply (rule shiftl_less_t2n, simp) + apply (erule order_le_less_trans) + apply (simp add: mask_def bit_simps) + apply (simp add: bit_simps) + apply simp + apply (thin_tac "pte_relation' pte pte'" for pte pte') + apply (clarsimp simp: pt_bits_def) + apply (case_tac pt; clarsimp) + apply (rename_tac vs) + apply (clarsimp simp: absPageTable0_def) + apply (rule conjI, clarsimp) + apply (rule ext, rename_tac offs) + apply (erule_tac x="ucast offs" in allE, erule impE, rule ucast_leq_mask) + apply (simp add: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (erule_tac x="ucast offs" in allE) + apply clarsimp + apply (rename_tac pte y) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast offs" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (erule pspace_valid_objsE, rule valid_objs) + apply (clarsimp simp: valid_obj_def) + apply (erule_tac x=offs in allE) + apply (clarsimp simp: wellformed_pte_def) + apply (case_tac "vs offs"; clarsimp split: if_split_asm) + apply (rule set_eqI, simp) + apply (rename_tac x, case_tac x; simp) + apply (simp add: ucast_ucast_mask ge_mask_eq) + apply clarsimp + apply (erule_tac x="ucast off" in allE) + apply (erule impE) + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast off" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (case_tac "vs off"; simp add: ucast_leq_mask ppn_len_val) + + (* NormalPT_T is an exact duplicate of the VSRootPT_T case, but I don't see any good way + to factor out the commonality *) + apply (in_case "NormalPT_T") + apply (clarsimp simp: absPageTable_def split del: if_split split: option.splits) + apply (rule conjI, clarsimp) + apply (rule sym) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], assumption) + apply simp + apply (case_tac "idx << pte_bits = 0", simp) + apply (clarsimp simp: word_neq_0_conv) + apply (rule shiftl_less_t2n) + apply (simp add: table_size_def le_mask_iff_lt_2n[THEN iffD1]) + apply (simp add: table_size_bounded[unfolded word_bits_def, simplified]) + apply (clarsimp split del: if_split) + apply (prop_tac "idx << pte_bits = 0") + apply (rename_tac pt_t') + apply (cut_tac a="p + (idx << pte_bits)" and pt_t=pt_t' in gsPTs) + apply clarsimp + apply (rule ccontr) + apply (drule_tac y="idx << pte_bits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply (clarsimp simp: word_neq_0_conv table_size_def) + apply (rule shiftl_less_t2n, simp) + apply (erule order_le_less_trans) + apply (simp add: mask_def bit_simps) + apply (simp add: bit_simps) + apply simp + apply (thin_tac "pte_relation' pte pte'" for pte pte') + apply (clarsimp simp: pt_bits_def) + apply (case_tac pt; clarsimp) + apply (rename_tac vs) + apply (clarsimp simp: absPageTable0_def) + apply (rule conjI, clarsimp) + apply (rule ext, rename_tac offs) + apply (erule_tac x="ucast offs" in allE, erule impE, rule ucast_leq_mask) + apply (simp add: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (erule_tac x="ucast offs" in allE) + apply clarsimp + apply (rename_tac pte y) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast offs" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (erule pspace_valid_objsE, rule valid_objs) + apply (clarsimp simp: valid_obj_def) + apply (erule_tac x=offs in allE) + apply (clarsimp simp: wellformed_pte_def) + apply (case_tac "vs offs"; clarsimp split: if_split_asm) + apply (rule set_eqI, simp) + apply (rename_tac x, case_tac x; simp) + apply (simp add: ucast_ucast_mask ge_mask_eq) + apply clarsimp + apply (erule_tac x="ucast off" in allE) + apply (erule impE) + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast off" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (case_tac "vs off"; simp add: ucast_leq_mask ppn_len_val) + + apply (in_case "DataPage ?p ?sz") + apply (clarsimp split: if_splits) + + apply (in_case "KOVCPU ?vcpu") + apply clarsimp + apply (rename_tac arch_kernel_obj vcpu) + apply (case_tac arch_kernel_obj; + clarsimp simp: other_obj_relation_def pte_relation_def split: if_splits) + apply (rename_tac vcpu') + apply (case_tac vcpu') + apply (clarsimp simp: vcpu_relation_def split: vcpu.splits) + done +qed + +definition + "EtcbMap tcb \ + \tcb_priority = tcbPriority tcb, + time_slice = tcbTimeSlice tcb, + tcb_domain = tcbDomain tcb\" + +definition absEkheap :: + "(machine_word \ Structures_H.kernel_object) \ obj_ref \ etcb option" where + "absEkheap h \ \x. + case h x of + Some (KOTCB tcb) \ Some (EtcbMap tcb) + | _ \ None" + +lemma absEkheap_correct: + assumes pspace_relation: "pspace_relation (kheap s) (ksPSpace s')" + assumes ekheap_relation: "ekheap_relation (ekheap s) (ksPSpace s')" + assumes vetcbs: "valid_etcbs s" + shows "absEkheap (ksPSpace s') = ekheap s" + apply (rule ext) + apply (clarsimp simp: absEkheap_def split: option.splits Structures_H.kernel_object.splits) + apply (subgoal_tac "\x. (\tcb. kheap s x = Some (TCB tcb)) = + (\tcb'. ksPSpace s' x = Some (KOTCB tcb'))") + using vetcbs ekheap_relation + apply (clarsimp simp: valid_etcbs_def is_etcb_at_def dom_def ekheap_relation_def st_tcb_at_def obj_at_def) + apply (erule_tac x=x in allE)+ + apply (rule conjI, force) + apply clarsimp + apply (rule conjI, clarsimp simp: EtcbMap_def etcb_relation_def)+ + apply clarsimp + using pspace_relation + apply (clarsimp simp add: pspace_relation_def pspace_dom_def UNION_eq + dom_def Collect_eq) + apply (rule iffI) + apply (erule_tac x=x in allE)+ + apply (case_tac "ksPSpace s' x", clarsimp) + apply (erule_tac x=x in allE, clarsimp) + apply clarsimp + apply (case_tac a, simp_all add: tcb_relation_cut_def other_obj_relation_def) + apply (insert pspace_relation) + apply (clarsimp simp: obj_at'_def) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE) + apply (clarsimp simp: other_obj_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + AARCH64_A.arch_kernel_obj.split_asm)+ + done + +text \The following function can be used to reverse cte_map.\ +definition + "cteMap cns \ \p. + let P = (\(a,bl). cte_map (a,bl) = p \ cns a = Some (length bl)) + in if \x. P x + then (SOME x. P x) + else (p && ~~ mask tcbBlockSizeBits, bin_to_bl 3 (uint (p >> cte_level_bits)))" + +lemma tcb_cap_cases_length: + "tcb_cap_cases b = Some x \ length b = 3" + by (simp add: tcb_cap_cases_def tcb_cnode_index_def split: if_split_asm) + +lemma TCB_implies_KOTCB: + "\pspace_relation (kheap s) (ksPSpace s'); kheap s a = Some (TCB tcb)\ + \ \tcb'. ksPSpace s' a = Some (KOTCB tcb') \ tcb_relation tcb tcb'" + apply (clarsimp simp add: pspace_relation_def pspace_dom_def + dom_def UNION_eq Collect_eq) + apply (erule_tac x=a in allE)+ + apply (clarsimp simp add: tcb_relation_cut_def + split: Structures_H.kernel_object.splits) + apply (drule iffD1) + apply (fastforce simp add: dom_def image_def) + apply clarsimp + done + +lemma cte_at_CNodeI: + "\kheap s a = Some (CNode (length b) cs); well_formed_cnode_n (length b) cs\ + \ cte_at (a,b) s" + apply (subgoal_tac "\y. cs b = Some y") + apply clarsimp + apply (rule_tac cte=y in cte_wp_at_cteI[of s _ "length b" cs]; simp) + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) + done + +lemma cteMap_correct: + assumes rel: "(s,s') \ state_relation" + assumes valid_objs: "valid_objs s" + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct': "pspace_distinct' s'" + shows "p \ dom (caps_of_state s) \ cteMap (gsCNodes s') (cte_map p) = p" +proof - + from rel have gsCNodes: + "\a n. (\cs. kheap s a = Some (CNode n cs) \ well_formed_cnode_n n cs) \ + gsCNodes s' a = Some n" + by (simp add: state_relation_def ghost_relation_def) + show ?thesis + apply (simp add: dom_def cteMap_def split: if_split_asm) + apply (clarsimp simp: caps_of_state_cte_wp_at split: if_split_asm) + apply (drule cte_wp_cte_at) + apply (intro conjI impI) + apply (rule some_equality) + apply (clarsimp simp add: split_def) + apply (frule gsCNodes[rule_format,THEN iffD2]) + apply clarsimp + apply (frule (1) cte_at_CNodeI) + apply (frule (2) cte_map_inj_eq[OF _ _ _ valid_objs pspace_aligned pspace_distinct]) + apply clarsimp + apply (clarsimp simp add: split_def) + apply (frule gsCNodes[rule_format,THEN iffD2]) + apply clarsimp + apply (frule (1) cte_at_CNodeI) + apply (frule (2) cte_map_inj_eq[OF _ _ _ valid_objs pspace_aligned pspace_distinct]) + apply clarsimp + apply (case_tac p) + apply (clarsimp simp add: cte_wp_at_cases) + apply (erule disjE) + apply clarsimp + apply (drule_tac x=a in spec, drule_tac x=b in spec, simp) + apply (cut_tac a=a and n=sz in gsCNodes[rule_format]) + apply clarsimp + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) + apply (erule_tac x=b in allE) + apply simp + apply (thin_tac "ALL x. P x" for P) + apply clarsimp + apply (frule TCB_implies_KOTCB[OF state_relation_pspace_relation[OF rel]]) + apply clarsimp + using pspace_aligned'[simplified pspace_aligned'_def] + apply (drule_tac x=a in bspec, simp add: dom_def) + apply (simp add: objBitsKO_def cte_map_def) + apply (rule conjI[rotated]) + apply (drule tcb_cap_cases_length) + apply (frule_tac b=b and c=cte_level_bits in bin_to_bl_of_bl_eq) + apply (fastforce simp: cte_level_bits_def objBits_defs shiftl_t2n mult_ac)+ + apply (case_tac "b = [False, False, False]") + apply simp + apply (frule_tac b=b and c=cte_level_bits in bin_to_bl_of_bl_eq) + apply (fastforce simp: tcb_cap_cases_length cte_level_bits_def objBits_defs)+ + apply (subgoal_tac "ksPSpace s' (cte_map (a, b)) = None") + prefer 2 + apply (rule ccontr) + apply clarsimp + using pspace_distinct'[simplified pspace_distinct'_def] + apply (drule_tac x=a in bspec, simp add: dom_def) + apply (simp add: ps_clear_def dom_def mask_2pm1[symmetric] x_power_minus_1) + apply (simp add: objBitsKO_def) + apply (drule_tac a="cte_map (a, b)" in equals0D) + apply (clarsimp simp add: cte_map_def) + apply (drule tcb_cap_cases_length) + apply (erule impE) + apply (rule word_plus_mono_right) + apply (cut_tac 'a=machine_word_len and xs=b in of_bl_length, fastforce simp: word_bits_conv) + apply (drule_tac k="2^cte_level_bits" in word_mult_less_mono1) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + apply (simp add: mask_def) + apply (rule ccontr) + apply (simp add: not_le shiftl_t2n mult_ac) + apply (drule (1) less_trans, fastforce simp: cte_level_bits_def objBits_defs) + apply (drule is_aligned_no_overflow'[simplified mask_2pm1[symmetric]]) + apply (simp add: word_bits_conv) + apply simp + apply (erule impE) + apply (drule is_aligned_no_overflow'[simplified mask_2pm1[symmetric]]) + apply (cut_tac 'a=machine_word_len and xs=b in of_bl_length, simp add: word_bits_conv) + apply (drule_tac k="2^cte_level_bits" in word_mult_less_mono1) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + apply (erule word_random) + apply (rule order.strict_implies_order) + apply (simp add: shiftl_t2n mult_ac) + apply (erule less_trans) + apply (fastforce simp: cte_level_bits_def objBits_defs mask_def) + apply (simp add: mult.commute[of _ "2^cte_level_bits"] + shiftl_t2n[of _ cte_level_bits, simplified, symmetric]) + apply word_bitwise + apply simp + apply (case_tac b, simp) + apply (rename_tac b, case_tac b, simp) + apply (rename_tac b, case_tac b, simp) + apply (clarsimp simp add: test_bit_of_bl eval_nat_numeral cte_level_bits_def) + apply (simp add: cte_map_def shiftl_t2n mult_ac split: option.splits) + apply (drule tcb_cap_cases_length) + apply (rule of_bl_mult_and_not_mask_eq[where m=cte_level_bits, simplified]) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + done +qed + +definition (* NOTE: cnp maps addresses to CNode, offset pairs *) + "absIsOriginalCap cnp h \ \(oref,cref). + cnp (cte_map (oref, cref)) = (oref, cref) \ + cte_map (oref,cref) : dom (map_to_ctes h) \ + (\cte. map_to_ctes h (cte_map (oref,cref)) = Some cte \ + (cteCap cte \ capability.NullCap) \ mdbRevocable (cteMDBNode cte))" + +lemma absIsOriginalCap_correct: + assumes valid_ioc: "valid_ioc s" + assumes valid_objs: "valid_objs s" + assumes rel: "(s,s') \ state_relation" + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct': "pspace_distinct' s'" + shows "absIsOriginalCap (cteMap (gsCNodes s')) (ksPSpace s') = is_original_cap s" +proof - + from valid_ioc + have no_cap_not_orig: + "\p. caps_of_state s p = None \ is_original_cap s p = False" + and null_cap_not_orig: + "\p. caps_of_state s p = Some cap.NullCap \ is_original_cap s p = False" + by (fastforce simp: valid_ioc_def2 null_filter_def)+ + + have cnp: + "\a b. caps_of_state s (a, b) \ None \ + (cteMap (gsCNodes s')) (cte_map (a, b)) = (a, b)" + using cteMap_correct[OF rel valid_objs pspace_aligned pspace_distinct + pspace_aligned' pspace_distinct'] + by (clarsimp simp: dom_def) + + show ?thesis + apply (subgoal_tac "revokable_relation (is_original_cap s) + (null_filter (caps_of_state s)) (ctes_of s') \ + pspace_relation (kheap s) (ksPSpace s')") + prefer 2 + using rel + apply (clarsimp simp add: state_relation_def) + apply (rule ext) + apply (clarsimp simp add: revokable_relation_def + null_filter_def absIsOriginalCap_def + split: if_split_asm) + apply (erule_tac x=a in allE) + apply (erule_tac x=b in allE) + apply (case_tac "caps_of_state s (a, b)") + apply (clarsimp simp: no_cap_not_orig) + apply (frule (1) pspace_relation_cte_wp_atI[OF _ _ valid_objs]) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (subgoal_tac "(a,b) = (aa,ba)", simp) + apply (cut_tac a=aa and b=ba in cnp[rule_format], simp) + apply (simp add: cte_map_def) + apply simp + apply (case_tac "aa = cap.NullCap") + apply (clarsimp simp add: null_cap_not_orig) + apply (frule (1) pspace_relation_ctes_ofI + [OF _ caps_of_state_cteD pspace_aligned' pspace_distinct']) + apply clarsimp + apply (frule (1) pspace_relation_ctes_ofI + [OF _ caps_of_state_cteD pspace_aligned' pspace_distinct']) + apply (clarsimp simp add: dom_def) + apply (cut_tac a=a and b=b in cnp[rule_format], simp+) + apply (case_tac cte, clarsimp) + apply (case_tac aa, simp_all) + apply (rename_tac arch_cap) + apply (case_tac arch_cap, simp_all) + done +qed + +text \ + In the executable specification, + a linked list connects all children of a certain node. + More specifically, the predicate @{term "subtree h c c'"} holds iff + the map @{term h} from addresses to CTEs contains capabilities + at the addresses @{term c} and @{term c'} and + the latter is a child of the former. + + In the abstract specification, the capability-derivation tree @{term "cdt s"} + maps the address of each capability to the address of its immediate parent. + + The definition below takes a binary predicate @{term ds} as parameter, + which represents a childhood relation like @{term "subtree h"}, + and converts this into an optional function to the immediate parent + in the same format as @{term "cdt s"}. +\ +definition + "parent_of' ds \ \x. + if \p. \ ds p x + then None + else Some (THE p. ds p x \ (\q. ds p q \ ds q x \ p = q))" + +definition + "absCDT cnp h \ \(oref,cref). + if cnp (cte_map (oref, cref)) = (oref, cref) + then map_option cnp (parent_of' (subtree h) (cte_map (oref, cref))) + else None" + +lemma valid_mdb_mdb_cte_at: + "valid_mdb s \ mdb_cte_at (\p. \c. caps_of_state s p = Some c \ cap.NullCap \ c) (cdt s)" + by (simp add: valid_mdb_def2) + +lemma absCDT_correct': + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct': "pspace_distinct' s'" + assumes valid_objs: "valid_objs s" + assumes valid_mdb: "valid_mdb s" + assumes rel: "(s,s') \ state_relation" + shows + "absCDT (cteMap (gsCNodes s')) (ctes_of s') = cdt s" (is ?P) + "(case (cdt s x) of None \ caps_of_state s x \ None \ (\q. \(ctes_of s' \ q \ cte_map x)) | + Some p \ + ctes_of s' \ cte_map p \ cte_map x \ + (\q. ctes_of s' \ cte_map p \ q \ + ctes_of s' \ q \ cte_map x \ + cte_map p = q))" (is ?Q) +proof - + have cnp: + "\a b. caps_of_state s (a, b) \ None \ + (cteMap (gsCNodes s')) (cte_map (a, b)) = (a, b)" + using cteMap_correct[OF rel valid_objs pspace_aligned pspace_distinct + pspace_aligned' pspace_distinct'] + by (clarsimp simp: dom_def) + + from rel + have descs_eq: + "\a b. cte_wp_at (\_. True) (a, b) s \ + {y. \x\descendants_of (a, b) (cdt s). y = cte_map x} = + descendants_of' (cte_map (a, b)) (ctes_of s')" + apply (clarsimp simp add: state_relation_def) + apply (clarsimp simp add: swp_def cdt_relation_def image_def) + done + + from rel + have pspace_relation: "pspace_relation (kheap s) (ksPSpace s')" + by (clarsimp simp add: state_relation_def) + + note cdt_has_caps = mdb_cte_atD[OF _ valid_mdb_mdb_cte_at[OF valid_mdb]] + note descendants_of_simps = descendants_of_def cdt_parent_rel_def is_cdt_parent_def + + have descendants_implies: + "\p p'. p' \ descendants_of p (cdt s) \ + \cap cap'. caps_of_state s p = Some cap \ caps_of_state s p' = Some cap'" + apply (clarsimp simp: descendants_of_simps) + apply (frule tranclD2, drule tranclD) + apply (auto dest: cdt_has_caps) + done + + let ?cnp = "cteMap (gsCNodes s')" + have subtree_implies: + "\p p'. subtree (ctes_of s') p p' \ + \cap cap'. ?cnp p' \ descendants_of (?cnp p) (cdt s) \ + caps_of_state s (?cnp p) = Some cap \ + caps_of_state s (?cnp p') = Some cap' \ + (\cte cte'. ctes_of s' p = Some cte \ ctes_of s' p' = Some cte')" + apply (subgoal_tac "(ctes_of s') \ p parentOf p'") + prefer 2 + apply (erule subtree.cases, simp+) + apply (clarsimp simp add: parentOf_def) + apply (frule_tac x=p in pspace_relation_cte_wp_atI[OF pspace_relation _ valid_objs]) + apply clarsimp + apply (frule descs_eq[rule_format, OF cte_wp_at_weakenE], simp) + apply (simp add: descendants_of'_def Collect_eq) + apply (drule spec, drule(1) iffD2) + apply (clarsimp simp: cnp cte_wp_at_caps_of_state) + apply (frule descendants_implies) + apply (clarsimp simp: cnp) + done + have is_parent: + "\a b p cap cap' a' b' c. + \cdt s (a, b) = Some (a', b')\ + \ ctes_of s' \ cte_map (a', b') \ cte_map (a, b) \ + (\q. ctes_of s' \ cte_map (a', b') \ q \ + ctes_of s' \ q \ cte_map (a, b) \ + cte_map (a', b') = q)" + apply (frule cdt_has_caps) + using descs_eq pspace_relation + apply (frule_tac x=a' in spec, erule_tac x=b' in allE) + apply (simp add: cte_wp_at_caps_of_state Collect_eq descendants_of_simps + descendants_of'_def) + apply (rule conjI) + apply fastforce + apply clarsimp + apply (drule subtree_implies)+ + apply (clarsimp simp: cnp) + using valid_mdb + apply (clarsimp simp: cnp descendants_of_simps valid_mdb_def no_mloop_def) + apply (drule_tac x="?cnp q" and y="(a, b)" in tranclD2) + apply clarsimp + apply (fastforce intro: trancl_rtrancl_trancl) + done + + + show ?P + apply (rule ext) + using descs_eq pspace_relation + apply (simp add: absCDT_def) + apply (rule conjI[rotated]) + apply clarsimp + apply (rule sym, rule ccontr, clarsimp) + apply (frule cdt_has_caps) + using cnp + apply fastforce + apply clarsimp + + apply (clarsimp simp: parent_of'_def) + apply (rule conjI) + apply clarsimp + apply (rule sym, rule ccontr, clarsimp) + apply (simp add: descendants_of_simps descendants_of'_def) + apply (rename_tac a' b') + apply (erule_tac x=a' in allE, erule_tac x=b' in allE) + apply (erule_tac x="cte_map (a', b')" in allE, erule notE) + apply (frule cdt_has_caps) + apply (clarsimp simp: cte_wp_at_caps_of_state Collect_eq) + apply fastforce + apply clarsimp + apply (drule subtree_implies) + apply clarsimp + apply (case_tac "cdt s (a, b)") + apply (simp add: descendants_of_simps descendants_of'_def) + apply (drule tranclD2) + apply clarsimp + apply clarsimp + apply (rename_tac a' b') + apply (frule cdt_has_caps) + apply clarsimp + apply (rule trans[rotated]) + apply (rule cnp[rule_format], simp) + apply (rule arg_cong[where f="?cnp"]) + apply (rule the_equality) + apply (rule is_parent,assumption) + apply clarsimp + apply (rule ccontr) + apply (drule_tac x="cte_map (a', b')" in spec, drule mp) + apply simp_all + apply (drule subtree_implies) + apply clarsimp + apply (drule_tac p=pa in ctes_of_cte_wpD) + apply (drule pspace_relation_cte_wp_atI'[OF pspace_relation _ valid_objs]) + apply (clarsimp simp add: cte_wp_at_caps_of_state cnp) + apply (thin_tac "(a, b) \ descendants_of (?cnp p) (cdt s)", + thin_tac "caps_of_state s (?cnp p) = Some cap") + apply (unfold descendants_of'_def) + apply (erule_tac x=a' in allE) + apply (erule_tac x=b' in allE) + apply (simp add: Collect_eq) + apply (erule_tac x="cte_map (a, b)" in allE) + apply (drule iffD1) + apply (rule_tac x="(a, b)" in bexI, simp) + apply (clarsimp simp: cnp descendants_of_simps) + apply (rule trancl.intros(1)) + apply simp_all + apply (rule descs_eq[simplified descendants_of'_def Collect_eq, + rule_format, THEN iffD1]) + apply (clarsimp simp add: cte_wp_at_caps_of_state) + apply (rule_tac x="(a', b')" in bexI, simp) + apply (clarsimp simp: descendants_of_simps) + apply (drule_tac x="(aa,ba)" and y="(a, b)" in tranclD2) + apply clarsimp + apply (drule rtranclD, erule disjE, simp_all)[1] + done + thus ?Q + apply (case_tac x) + apply (case_tac "cdt s (a, b)") + apply (drule sym) + apply (simp add: mdb_cte_at_def) + apply (simp add: absCDT_def split_def) + apply (simp add: parent_of'_def split: if_split_asm) + apply (intro impI) + apply (frule_tac a=a and b=b in cnp[simplified,rule_format]) + apply simp + apply simp + apply (clarsimp simp: is_parent) + done +qed + +lemmas absCDT_correct = absCDT_correct'(1) +lemmas cdt_simple_rel = absCDT_correct'(2) + + +(* Produce a cdt_list from a cdt by sorting the children + sets by reachability via mdbNext. We then demonstrate + that a list satisfying the state relation must + already be sorted in the same way and therefore is + equivalent. *) + +definition sort_cdt_list where + "sort_cdt_list cd m = + (\p. THE xs. set xs = {c. cd c = Some p} \ + partial_sort.psorted (\x y. m \ cte_map x \\<^sup>* cte_map y) xs \ distinct xs)" + +end + +locale partial_sort_cdt = + partial_sort "\ x y. m' \ cte_map x \\<^sup>* cte_map y" + "\ x y. cte_at x (s::det_state) \ cte_at y s \ + (\p. m' \ p \ cte_map x \ m' \ p \ cte_map y)" for m' s + + fixes s'::"kernel_state" + fixes m t + defines "m \ (cdt s)" + defines "t \ (cdt_list s)" + assumes m'_def : "m' = (ctes_of s')" + assumes rel:"(s,s') \ state_relation" + assumes valid_mdb: "valid_mdb s" + assumes assms' : "pspace_aligned s" "pspace_distinct s" "pspace_aligned' s'" + "pspace_distinct' s'" "valid_objs s" "valid_mdb s" "valid_list s" +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma valid_list_2 : "valid_list_2 t m" + apply (insert assms') + apply (simp add: t_def m_def) + done + +lemma has_next_not_child_is_descendant: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "next_not_child slot t m = Some slot2 \ (\p. slot \ descendants_of p m)" + apply (drule next_not_childD) + apply (simp add: m_def finite_depth assms')+ + using assms' + apply (simp add: valid_mdb_def) + apply (elim disjE) + apply (drule next_sib_same_parent[OF valid_list_2]) + apply (elim exE) + apply (rule_tac x=p in exI) + apply (rule child_descendant) + apply simp + apply (elim conjE exE) + apply force + done + +lemma has_next_slot_is_descendant : + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "next_slot slot t m = Some slot2 \ m slot2 = Some slot \ (\p. slot \ descendants_of p m)" + apply (insert valid_list_2) + apply (simp add: next_slot_def next_child_def split: if_split_asm) + apply (case_tac "t slot",simp+) + apply (simp add: valid_list_2_def) + apply (rule disjI1) + apply force + apply (rule disjI2) + apply (erule has_next_not_child_is_descendant) + done + +lemma descendant_has_parent: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "slot \ descendants_of p m \ \q. m slot = Some q" + apply (simp add: descendants_of_def) + apply (drule tranclD2) + apply (simp add: cdt_parent_of_def) + apply force + done + +lemma next_slot_cte_at: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "next_slot slot t m = Some slot2 \ cte_at slot s" + apply (cut_tac valid_mdb_mdb_cte_at) + prefer 2 + apply (cut_tac assms') + apply simp + apply (fold m_def) + apply (simp add: mdb_cte_at_def) + apply (simp add: cte_wp_at_caps_of_state) + apply (drule has_next_slot_is_descendant) + apply (elim disjE) + apply force + apply (elim exE) + apply (drule descendant_has_parent) + apply force + done + +lemma cte_at_has_cap: + "cte_at slot s \ \c. cte_wp_at ((=) c) slot s" + apply (drule cte_at_get_cap_wp) + apply force + done + +lemma next_slot_mdb_next: + notes split_paired_All[simp del] + shows "next_slot slot t m = Some slot2 \ m' \ (cte_map slot) \ (cte_map slot2)" + apply (frule cte_at_has_cap[OF next_slot_cte_at]) + apply (elim exE) + apply (cut_tac s=s and s'=s' in pspace_relation_ctes_ofI) + apply (fold m'_def) + using rel + apply (simp add: state_relation_def) + apply simp + using assms' + apply simp + using assms' + apply simp + apply (subgoal_tac "cdt_list_relation t m m'") + apply (simp add: cdt_list_relation_def) + apply (elim exE) + apply (case_tac cte) + apply (simp add: mdb_next_rel_def mdb_next_def) + apply force + using rel + apply (simp add: state_relation_def m_def t_def m'_def) + done + +lemma next_sib_2_reachable: + "next_sib_2 slot p s = Some slot2 \ m' \ (cte_map slot) \\<^sup>* (cte_map slot2)" + apply (induct slot rule: next_sib_2_pinduct[where s=s and p=p]) + apply (cut_tac slot=slot and s=s and p=p in next_sib_2.psimps[OF next_sib_2_termination]; + simp add: assms') + apply (fold m_def t_def) + apply (simp split: if_split_asm) + apply (case_tac "next_slot slot t m") + apply simp + apply (simp split: if_split_asm) + apply (rule r_into_rtrancl) + apply (erule next_slot_mdb_next) + apply (rule trans) + apply (rule r_into_rtrancl) + apply (rule next_slot_mdb_next) + apply (simp add: assms' valid_list_2)+ + done + +lemma next_sib_reachable: + "next_sib slot t m = Some slot2 \ m slot = Some p \ m' \ (cte_map slot) \\<^sup>* (cte_map slot2)" + apply (rule next_sib_2_reachable) + apply (insert assms') + apply (simp add: t_def m_def) + apply (subst next_sib_def2,simp+) + done + +lemma after_in_list_next_reachable: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "after_in_list (t p) slot = Some slot2 \ m' \ (cte_map slot) \\<^sup>* (cte_map slot2)" + apply (subgoal_tac "m slot = Some p") + apply (rule next_sib_reachable) + apply (simp add: next_sib_def)+ + apply (drule after_in_list_in_list') + apply (insert valid_list_2) + apply (simp add: valid_list_2_def) + done + +lemma sorted_lists: + "psorted (t p)" + apply (rule after_order_sorted) + apply (rule after_in_list_next_reachable) + apply simp + apply (insert assms') + apply (simp add: valid_list_def t_def del: split_paired_All) + done + +lemma finite_children: + notes split_paired_All[simp del] + shows "finite {c. m c = Some p}" + apply (insert assms') + apply(subgoal_tac "{x. x \ descendants_of p (cdt s)} \ {x. cte_wp_at (\_. True) x s}") + prefer 2 + apply(fastforce simp: descendants_of_cte_at) + apply(drule finite_subset) + apply(simp add: cte_wp_at_set_finite) + apply(subgoal_tac "{c. m c = Some p} \ {c. c \ descendants_of p (cdt s)}") + apply (drule finite_subset) + apply simp + apply simp + apply clarsimp + apply (simp add: m_def child_descendant) + done + +lemma ex1_sorted_cdt: + "\!xs. set xs = {c. m c = Some p} \ psorted xs \ distinct xs" + apply (rule psorted_set[OF finite_children]) + apply (simp add: R_set_def) + apply (intro impI conjI allI) + apply (simp add: has_parent_cte_at[OF valid_mdb] m_def) + apply (simp add: has_parent_cte_at[OF valid_mdb] m_def) + + apply (cut_tac s=s and s'=s' and x="(a,b)" in cdt_simple_rel, simp_all add: assms' rel) + apply (simp add: m_def) + apply (cut_tac s=s and s'=s' and x="(aa,ba)" in cdt_simple_rel, simp_all add: assms' rel) + apply (rule_tac x="cte_map p" in exI) + apply (simp add: m'_def) + done + +lemma sort_cdt_list_correct: + "sort_cdt_list m m' = t" + apply (rule ext) + apply (simp add: sort_cdt_list_def) + apply (rule the1_equality) + apply (rule ex1_sorted_cdt) + apply (simp add: sorted_lists) + apply (insert assms') + apply (simp add: valid_list_def t_def m_def del: split_paired_All) + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition absCDTList where + "absCDTList cnp h \ sort_cdt_list (absCDT cnp h) h" + +lemma no_loops_sym_eq: "no_loops m \ m \ a \\<^sup>* b \ m \ b \\<^sup>* a \ a = b" + apply (rule ccontr) + apply (subgoal_tac "m \ a \\<^sup>+ a") + apply (simp add: no_loops_def) + apply (simp add: rtrancl_eq_or_trancl) + done + +lemma mdb_next_single_valued: "single_valued (mdb_next_rel m)" + apply (simp add: single_valued_def mdb_next_rel_def) + done + +lemma substring_next: "m \ a \\<^sup>* b \ m \ a \\<^sup>* c \ m \ b \\<^sup>* c \ m \ c \\<^sup>* b" + apply (rule single_valued_confluent) + apply (rule mdb_next_single_valued) + apply simp+ + done + +lemma ancestor_comparable: "\m \ a \ x; m \ a \ y\ \ m \ x \\<^sup>* y \ m \ y \\<^sup>* x" + apply (rule substring_next) + apply (erule subtree_mdb_next[THEN trancl_into_rtrancl])+ + done + +lemma valid_mdb'_no_loops: "valid_mdb' s \ no_loops (ctes_of s)" + apply (rule mdb_chain_0_no_loops) + apply (simp add: valid_mdb'_def valid_mdb_ctes_def)+ + done + +lemma absCDTList_correct: + notes split_paired_All[simp del] split_paired_Ex[simp del] + assumes valid_mdb: "valid_mdb s" + assumes valid_mdb': "valid_mdb' s'" + assumes valid_list: "valid_list s" + assumes valid_objs: "valid_objs s" + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_distinct': "pspace_distinct' s'" + assumes rel: "(s,s') \ state_relation" + shows "absCDTList (cteMap (gsCNodes s')) (ctes_of s') = cdt_list s" + apply (simp add: absCDTList_def) + apply (subst absCDT_correct[where s=s]) + apply (simp add: assms)+ + apply (rule partial_sort_cdt.sort_cdt_list_correct[where s'=s']) + apply (simp add: partial_sort_cdt_def) + apply (rule context_conjI') + apply unfold_locales + apply (simp add: assms)+ + apply (simp add: partial_sort_cdt_axioms_def) + apply (elim conjE exE) + apply (rule ancestor_comparable,assumption+) + apply (elim conjE) + apply (rule cte_map_inj_eq) + apply (rule no_loops_sym_eq[where m="ctes_of s'"]) + apply (rule valid_mdb'_no_loops[OF valid_mdb']) + apply (simp add: assms)+ + done + +definition + "absInterruptIRQNode is' \ \irq. + case is' of InterruptState node irqs' \ + node + (ucast irq << cte_level_bits)" + +definition + "irq_state_map s \ case s of + irq_state.IRQInactive \ irqstate.IRQInactive + | irq_state.IRQSignal \ irqstate.IRQSignal + | irq_state.IRQTimer \ irqstate.IRQTimer + | irq_state.IRQReserved \ irqstate.IRQReserved" + +definition + "IRQStateMap s \ case s of + irqstate.IRQInactive \ irq_state.IRQInactive + | irqstate.IRQSignal \ irq_state.IRQSignal + | irqstate.IRQTimer \ irq_state.IRQTimer + | irqstate.IRQReserved \ irq_state.IRQReserved" + +definition + "absInterruptStates is' \ case is' of InterruptState node m \ IRQStateMap \ m" + +lemma absInterruptIRQNode_correct: + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') \ + absInterruptIRQNode (ksInterruptState s') = interrupt_irq_node s" + by (rule ext) (clarsimp simp add: absInterruptIRQNode_def interrupt_state_relation_def) + +lemma absInterruptStates_correct: + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') \ + absInterruptStates (ksInterruptState s') = interrupt_states s" + apply (rule ext) + apply (clarsimp simp : absInterruptStates_def IRQStateMap_def interrupt_state_relation_def + irq_state_relation_def) + apply (erule_tac x=x in allE)+ + apply (clarsimp split: irq_state.splits irqstate.splits) + done + +definition + "absArchState s' \ + case s' of + ARMKernelState asid_tbl kvspace vmid_tab next_vmid global_us_vspace current_vcpu + num_list_regs gs_pt_types \ + \ arm_asid_table = asid_tbl \ ucast, + arm_kernel_vspace = kvspace, + arm_vmid_table = map_option ucast \ vmid_tab, + arm_next_vmid = next_vmid, + arm_us_global_vspace = global_us_vspace, + arm_current_vcpu = current_vcpu, + arm_gicvcpu_numlistregs = num_list_regs \" + +lemma absArchState_correct: + "(s,s') \ state_relation \ absArchState (ksArchState s') = arch_state s" + apply (prop_tac "(arch_state s, ksArchState s') \ arch_state_relation") + apply (simp add: state_relation_def) + apply (clarsimp simp: arch_state_relation_def absArchState_def + split: AARCH64_H.kernel_state.splits) + apply (simp add: o_assoc flip: map_option_comp2) + apply (simp add: o_def ucast_up_ucast_id is_up map_option.identity) + done + +definition absSchedulerAction where + "absSchedulerAction action \ + case action of ResumeCurrentThread \ resume_cur_thread + | SwitchToThread t \ switch_thread t + | ChooseNewThread \ choose_new_thread" + +lemma absSchedulerAction_correct: + "sched_act_relation action action' \ absSchedulerAction action' = action" + by (cases action; simp add: absSchedulerAction_def) + +definition + "absExst s \ + \work_units_completed_internal = ksWorkUnitsCompleted s, + scheduler_action_internal = absSchedulerAction (ksSchedulerAction s), + ekheap_internal = absEkheap (ksPSpace s), + domain_list_internal = ksDomSchedule s, + domain_index_internal = ksDomScheduleIdx s, + cur_domain_internal = ksCurDomain s, + domain_time_internal = ksDomainTime s, + ready_queues_internal = (\d p. heap_walk (tcbSchedNexts_of s) (tcbQueueHead (ksReadyQueues s (d, p))) []), + cdt_list_internal = absCDTList (cteMap (gsCNodes s)) (ctes_of s)\" + +lemma absExst_correct: + assumes invs: "einvs s" and invs': "invs' s'" + assumes rel: "(s, s') \ state_relation" + shows "absExst s' = exst s" + apply (rule det_ext.equality) + using rel invs invs' + apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct + absCDTList_correct[THEN fun_cong] state_relation_def invs_def + valid_state_def ready_queues_relation_def ready_queue_relation_def + invs'_def valid_state'_def + valid_pspace_def valid_sched_def valid_pspace'_def curry_def + fun_eq_iff) + apply (fastforce simp: absEkheap_correct) + apply (fastforce simp: list_queue_relation_def Let_def dest: heap_ls_is_walk) + done + + +definition + "absKState s \ + \kheap = absHeap (gsUserPages s) (gsCNodes s) (gsPTTypes (ksArchState s)) (ksPSpace s), + cdt = absCDT (cteMap (gsCNodes s)) (ctes_of s), + is_original_cap = absIsOriginalCap (cteMap (gsCNodes s)) (ksPSpace s), + cur_thread = ksCurThread s, idle_thread = ksIdleThread s, + machine_state = observable_memory (ksMachineState s) (user_mem' s), + interrupt_irq_node = absInterruptIRQNode (ksInterruptState s), + interrupt_states = absInterruptStates (ksInterruptState s), + arch_state = absArchState (ksArchState s), + exst = absExst s\" + + +definition checkActiveIRQ :: "(kernel_state, bool) nondet_monad" where + "checkActiveIRQ \ + do irq \ doMachineOp (getActiveIRQ False); + return (irq \ None) + od" + +definition check_active_irq_H :: + "((user_context \ kernel_state) \ bool \ (user_context \ kernel_state)) set" where + "check_active_irq_H \ {((tc, s), irq, (tc, s')). (irq, s') \ fst (checkActiveIRQ s)}" + +definition doUserOp :: + "user_transition \ user_context \ (kernel_state, event option \ user_context) nondet_monad" + where + "doUserOp uop tc \ + do t \ getCurThread; + trans \ gets (ptable_lift t \ absKState); + perms \ gets (ptable_rights t \ absKState); + + um \ gets (\s. user_mem' s \ ptrFromPAddr); + dm \ gets (\s. device_mem' s \ ptrFromPAddr); + + ds \ gets (device_state \ ksMachineState); + assert (dom (um \ addrFromPPtr) \ - dom ds); + assert (dom (dm \ addrFromPPtr) \ dom ds); + + (e, tc',um',ds') \ select (fst (uop t (restrict_map trans {pa. perms pa \ {}}) perms + (tc, restrict_map um + {pa. \va. trans va = Some pa \ AllowRead \ perms va} + ,(ds \ ptrFromPAddr) |` {pa. \va. trans va = Some pa \ AllowRead \ perms va} ) + )); + doMachineOp (user_memory_update + ((um' |` {pa. \va. trans va = Some pa \ AllowWrite \ perms va} + \ addrFromPPtr) |` (- dom ds))); + doMachineOp (device_memory_update + ((ds' |` {pa. \va. trans va = Some pa \ AllowWrite \ perms va} + \ addrFromPPtr )|` (dom ds))); + return (e, tc') + od" + +definition do_user_op_H :: + "user_transition \ + ((user_context \ kernel_state) \ (event option \ user_context \ kernel_state)) set" where + "do_user_op_H uop \ monad_to_transition (doUserOp uop)" + +definition + "kernelEntry e tc \ do + t \ getCurThread; + threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb) \) t; + callKernel e; + t' \ getCurThread; + threadGet (atcbContextGet o tcbArch) t' + od" + +definition kernel_call_H :: + "event \ ((user_context \ kernel_state) \ mode \ (user_context \ kernel_state)) set" + where + "kernel_call_H e \ + {(s, m, s'). s' \ fst (split (kernelEntry e) s) \ + m = (if ct_running' (snd s') then UserMode else IdleMode)}" + +definition ADT_H :: + "user_transition \ (kernel_state global_state, det_ext observable, unit) data_type" + where + "ADT_H uop \ + \Init = \s. Init_H, + Fin = \((tc,s),m,e). ((tc, absKState s),m,e), + Step = (\u. global_automaton check_active_irq_H (do_user_op_H uop) kernel_call_H)\" + +end + +end diff --git a/proof/refine/AARCH64/ArchAcc_R.thy b/proof/refine/AARCH64/ArchAcc_R.thy new file mode 100644 index 0000000000..c21322d823 --- /dev/null +++ b/proof/refine/AARCH64/ArchAcc_R.thy @@ -0,0 +1,1241 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Lemmas on arch get/set object etc +*) + +theory ArchAcc_R +imports SubMonad_R +begin + +unbundle l4v_word_context + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare if_cong[cong] (* FIXME: if_cong *) + +lemma asid_pool_at_ko: + "asid_pool_at p s \ \pool. ko_at (ArchObj (AARCH64_A.ASIDPool pool)) p s" + by (clarsimp simp: asid_pools_at_eq obj_at_def elim!: opt_mapE) + +lemma corres_gets_asid: + "corres (\a c. a = c o ucast) \ \ (gets asid_table) (gets (armKSASIDTable \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma asid_low_bits [simp]: + "asidLowBits = asid_low_bits" + by (simp add: asid_low_bits_def asidLowBits_def) + +lemma pteBits_pte_bits[simp]: + "pteBits = pte_bits" + by (simp add: bit_simps pteBits_def) + +lemma cte_map_in_cnode1: + "\ x \ x + 2 ^ (cte_level_bits + length y) - 1 \ \ x \ cte_map (x, y)" + apply (simp add: cte_map_def) + apply (rule word_plus_mono_right2[where b="mask (cte_level_bits + length y)"]) + apply (simp add: mask_def add_diff_eq) + apply (rule leq_high_bits_shiftr_low_bits_leq_bits) + apply (rule of_bl_max) + done + +lemma pspace_aligned_cross: + "\ pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ pspace_aligned' s'" + apply (clarsimp simp: pspace_aligned'_def pspace_aligned_def pspace_relation_def) + apply (rename_tac p' ko') + apply (prop_tac "p' \ pspace_dom (kheap s)", fastforce) + apply (thin_tac "pspace_dom k = p" for k p) + apply (clarsimp simp: pspace_dom_def) + apply (drule bspec, fastforce)+ + apply clarsimp + apply (rename_tac ko' a a' P ko) + apply (erule (1) obj_relation_cutsE; clarsimp simp: objBits_simps) + \\CNode\ + apply (clarsimp simp: cte_map_def) + apply (simp only: cteSizeBits_def cte_level_bits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self, simp) + \\TCB\ + apply (clarsimp simp: tcbBlockSizeBits_def elim!: is_aligned_weaken) + \\PageTable\ + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (simp add: bit_simps) + apply (rule is_aligned_shift) + \\DataPage\ + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (rule pbfs_atleast_pageBits) + apply (rule is_aligned_shift) + apply (simp add: other_obj_relation_def) + apply (clarsimp simp: bit_simps' tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def + split: kernel_object.splits Structures_A.kernel_object.splits) + apply (fastforce simp: archObjSize_def split: arch_kernel_object.splits arch_kernel_obj.splits) + done + +lemma of_bl_shift_cte_level_bits: + "(of_bl z :: machine_word) << cte_level_bits \ mask (cte_level_bits + length z)" + by word_bitwise + (simp add: test_bit_of_bl bit_simps word_size cte_level_bits_def rev_bl_order_simps) + +lemma obj_relation_cuts_range_limit: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ + \ \x n. p' = p + x \ is_aligned x n \ n \ obj_bits ko \ x \ mask (obj_bits ko)" + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (drule (1) wf_cs_nD) + apply (clarsimp simp: cte_map_def) + apply (rule_tac x=cte_level_bits in exI) + apply (simp add: is_aligned_shift of_bl_shift_cte_level_bits) + apply (rule_tac x=tcbBlockSizeBits in exI) + apply (simp add: tcbBlockSizeBits_def) + apply (rule_tac x=pte_bits in exI) + apply (simp add: is_aligned_shift mask_def) + apply (rule shiftl_less_t2n) + apply (simp add: table_size_def) + apply (simp add: bit_simps) + apply (rule_tac x=pageBits in exI) + apply (simp add: is_aligned_shift pbfs_atleast_pageBits) + apply (simp add: mask_def shiftl_t2n mult_ac) + apply (erule word_less_power_trans2, rule pbfs_atleast_pageBits) + apply (simp add: pbfs_less_wb'[unfolded word_bits_def, simplified]) + apply fastforce + done + +lemma obj_relation_cuts_range_mask_range: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko'; is_aligned p (obj_bits ko) \ + \ p' \ mask_range p (obj_bits ko)" + apply (drule (1) obj_relation_cuts_range_limit, clarsimp) + apply (rule conjI) + apply (rule word_plus_mono_right2; assumption?) + apply (simp add: is_aligned_no_overflow_mask) + apply (erule word_plus_mono_right) + apply (simp add: is_aligned_no_overflow_mask) + done + +lemma obj_relation_cuts_obj_bits: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ \ objBitsKO ko' \ obj_bits ko" + apply (erule (1) obj_relation_cutsE; + clarsimp simp: objBits_simps objBits_defs bit_simps cte_level_bits_def + pbfs_atleast_pageBits[simplified bit_simps]) + apply (cases ko; simp add: other_obj_relation_def objBits_defs split: kernel_object.splits) + apply (rename_tac ako, case_tac ako; clarsimp; + rename_tac ako', case_tac ako'; clarsimp simp: archObjSize_def) + done + +lemmas is_aligned_add_step_le' = is_aligned_add_step_le[simplified mask_2pm1 add_diff_eq] + +lemma pspace_distinct_cross: + "\ pspace_distinct s; pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ + pspace_distinct' s'" + apply (frule (1) pspace_aligned_cross) + apply (clarsimp simp: pspace_distinct'_def) + apply (rename_tac p' ko') + apply (rule pspace_dom_relatedE; assumption?) + apply (rename_tac p ko P) + apply (frule (1) pspace_alignedD') + apply (frule (1) pspace_alignedD) + apply (rule ps_clearI, assumption) + apply (case_tac ko'; simp add: objBits_simps objBits_defs bit_simps') + apply (simp split: arch_kernel_object.splits add: bit_simps') + apply (rule ccontr, clarsimp) + apply (rename_tac x' ko_x') + apply (frule_tac x=x' in pspace_alignedD', assumption) + apply (rule_tac x=x' in pspace_dom_relatedE; assumption?) + apply (rename_tac x ko_x P') + apply (frule_tac p=x in pspace_alignedD, assumption) + apply (case_tac "p = x") + apply clarsimp + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (clarsimp simp: cte_relation_def cte_map_def objBits_simps) + apply (rule_tac n=cte_level_bits in is_aligned_add_step_le'; assumption?) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (simp add: cte_level_bits_def cteSizeBits_def) + apply (clarsimp simp: pte_relation_def objBits_simps) + apply (rule_tac n=pte_bits in is_aligned_add_step_le'; assumption?) + apply (simp add: objBitsKO_Data) + apply (rule_tac n=pageBits in is_aligned_add_step_le'; assumption?) + apply (case_tac ko; simp split: if_split_asm add: is_other_obj_relation_type_CapTable) + apply (rename_tac ako, case_tac ako; simp add: is_other_obj_relation_type_def split: if_split_asm) + apply (frule (1) obj_relation_cuts_obj_bits) + apply (drule (2) obj_relation_cuts_range_mask_range)+ + apply (prop_tac "x' \ mask_range p' (objBitsKO ko')", simp add: mask_def add_diff_eq) + apply (frule_tac x=p and y=x in pspace_distinctD; assumption?) + apply (drule (4) mask_range_subsetD) + apply (erule (2) in_empty_interE) + done + +lemma asid_pool_at_cross: + "\ asid_pool_at p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned s; pspace_distinct s \ + \ asid_pool_at' p s'" + apply (drule (2) pspace_distinct_cross) + apply (clarsimp simp: obj_at_def typ_at'_def ko_wp_at'_def) + apply (prop_tac "p \ pspace_dom (kheap s)") + apply (clarsimp simp: pspace_dom_def) + apply (rule bexI) + prefer 2 + apply fastforce + apply clarsimp + apply (clarsimp simp: pspace_relation_def) + apply (drule bspec, fastforce) + apply (clarsimp simp: other_obj_relation_def split: kernel_object.splits arch_kernel_object.splits) + apply (clarsimp simp: objBits_simps) + apply (frule (1) pspace_alignedD) + apply (rule conjI, simp add: bit_simps) + apply (clarsimp simp: pspace_distinct'_def) + apply (drule bspec, fastforce) + apply (simp add: objBits_simps) + done + +lemma corres_cross_over_asid_pool_at: + "\ \s. P s \ asid_pool_at p s \ pspace_distinct s \ pspace_aligned s; + corres r P (Q and asid_pool_at' p) f g \ \ + corres r P Q f g" + apply (rule corres_cross_over_guard[where Q="Q and asid_pool_at' p"]) + apply (drule meta_spec, drule (1) meta_mp, clarsimp) + apply (erule asid_pool_at_cross, clarsimp simp: state_relation_def; assumption) + apply assumption + done + +lemma getObject_ASIDPool_corres: + "p' = p \ + corres asid_pool_relation + (asid_pool_at p and pspace_aligned and pspace_distinct) \ + (get_asid_pool p) (getObject p')" + apply (rule corres_cross_over_asid_pool_at, fastforce) + apply (simp add: getObject_def gets_map_def split_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, simp_all)[1] + apply (clarsimp simp: lookupAround2_known1) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule (1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_no_overflow) + apply simp + apply (clarsimp simp add: objBits_simps + split: option.split) + apply (clarsimp simp: in_monad loadObject_default_def) + apply (simp add: bind_assoc exec_gets) + apply (drule asid_pool_at_ko) + apply (clarsimp simp: obj_at_def assert_opt_def fail_def return_def in_omonad + split: option.split) + apply (simp add: in_magnitude_check objBits_simps pageBits_def) + apply (clarsimp simp: state_relation_def pspace_relation_def) + apply (drule bspec, blast) + apply (clarsimp simp: other_obj_relation_def) + done + +lemma storePTE_cte_wp_at'[wp]: + "storePTE ptr val \\s. P (cte_wp_at' P' p s)\" + apply (simp add: storePTE_def) + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply simp + done + +lemma storePTE_state_refs_of[wp]: + "storePTE ptr val \\s. P (state_refs_of' s)\" + unfolding storePTE_def + apply (wp setObject_state_refs_of_eq; + clarsimp simp: updateObject_default_def in_monad) + done + +lemma storePTE_state_hyp_refs_of[wp]: + "\\s. P (state_hyp_refs_of' s)\ + storePTE ptr val + \\rv s. P (state_hyp_refs_of' s)\" + by (wpsimp wp: hoare_drop_imps setObject_state_hyp_refs_of_eq + simp: storePTE_def updateObject_default_def in_monad) + +crunch cte_wp_at'[wp]: setIRQState "\s. P (cte_wp_at' P' p s)" +crunch inv[wp]: getIRQSlot "P" + +lemma setObject_ASIDPool_corres[corres]: + "\ p = p'; a = map_option abs_asid_entry o inv ASIDPool a' o ucast \ \ + corres dc (asid_pool_at p and pspace_aligned and pspace_distinct) \ + (set_asid_pool p a) (setObject p' a')" + apply (simp add: set_asid_pool_def) + apply (rule corres_underlying_symb_exec_l[where P=P and Q="\_. P" for P]) + apply (rule corres_no_failI; clarsimp) + apply (clarsimp simp: gets_map_def bind_def simpler_gets_def assert_opt_def fail_def return_def + obj_at_def in_omonad + split: option.splits) + prefer 2 + apply wpsimp + apply (rule corres_cross_over_asid_pool_at, fastforce) + apply (rule corres_guard_imp) + apply (rule setObject_other_corres [where P="\ko::asidpool. True"]) + apply simp + apply (clarsimp simp: obj_at'_def) + apply (erule map_to_ctes_upd_other, simp, simp) + apply (simp add: a_type_def is_other_obj_relation_type_def) + apply (simp add: objBits_simps) + apply simp + apply (simp add: objBits_simps pageBits_def) + apply (simp add: other_obj_relation_def asid_pool_relation_def) + apply (simp add: typ_at'_def obj_at'_def ko_wp_at'_def) + apply clarsimp + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (clarsimp simp: obj_at_def exs_valid_def assert_def a_type_def return_def fail_def) + apply (auto split: Structures_A.kernel_object.split_asm arch_kernel_obj.split_asm if_split_asm)[1] + apply (simp add: typ_at_to_obj_at_arches) + done + +lemma p_le_table_base: + "is_aligned p pte_bits \ p + mask pte_bits \ table_base pt_t p + mask (table_size pt_t)" + apply (simp add: is_aligned_mask word_plus_and_or_coroll table_size_def pt_bits_def) + apply (subst word_plus_and_or_coroll, word_eqI_solve) + apply word_bitwise + apply (simp add: word_size bit_simps) + done + +lemma table_index_in_table: + "table_index pt_t p \ mask (ptTranslationBits pt_t)" + by (simp add: pt_bits_def table_size_def word_bool_le_funs flip: shiftr_then_mask_commute) + +lemma pte_at_cross: + "\ pte_at pt_t p s; pspace_relation (kheap s) (ksPSpace s'); pspace_aligned s; pspace_distinct s \ + \ pte_at' p s'" + apply (drule (2) pspace_distinct_cross) + apply (clarsimp simp: pte_at_def ptes_of_def in_omonad obj_at_def typ_at'_def ko_wp_at'_def) + apply (simp split: if_split_asm) + apply (prop_tac "p \ pspace_dom (kheap s)") + apply (clarsimp simp: pspace_dom_def) + apply (rule bexI) + prefer 2 + apply fastforce + apply (clarsimp simp: ran_def image_iff) + apply (rule_tac x="table_index pt_t p" in bexI) + apply (simp add: table_base_index_eq) + apply (simp add: table_index_in_table) + apply (clarsimp simp: pspace_relation_def) + apply (drule bspec, fastforce) + apply clarsimp + apply (drule_tac x="table_index pt_t p" in bspec) + apply (simp add: table_index_in_table) + apply (simp add: table_base_index_eq) + apply (clarsimp simp: pte_relation_def) + apply (clarsimp simp: objBits_simps) + apply (clarsimp simp: pspace_distinct'_def) + apply (drule bspec, fastforce) + apply (simp add: objBits_simps) + done + +lemma corres_cross_over_pte_at: + "\ \s. P s \ pte_at pt_t p s \ pspace_distinct s \ pspace_aligned s; + corres r P (P' and pte_at' p) f g\ \ + corres r P P' f g" + apply (rule corres_cross_over_guard[where Q="P' and pte_at' p"]) + apply (drule meta_spec, drule (1) meta_mp, clarsimp) + apply (erule pte_at_cross; assumption?) + apply (simp add: state_relation_def) + apply assumption + done + +lemma getObject_PTE_corres: + "corres pte_relation' (pte_at pt_t p and pspace_aligned and pspace_distinct) \ + (get_pte pt_t p) (getObject p)" + apply (rule corres_cross_over_pte_at, fastforce) + apply (simp add: getObject_def gets_map_def split_def bind_assoc) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: ko_wp_at'_def typ_at'_def lookupAround2_known1) + apply (case_tac ko, simp_all)[1] + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (clarsimp simp: objBits_def cong: option.case_cong) + apply (erule (1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_no_overflow) + apply (simp add: objBits_simps word_bits_def) + apply simp + apply (clarsimp simp: in_monad loadObject_default_def) + apply (simp add: bind_assoc exec_gets fst_assert_opt) + apply (clarsimp simp: pte_at_eq) + apply (clarsimp simp: ptes_of_def) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def in_magnitude_check objBits_simps pte_bits_def word_size_bits_def) + apply (clarsimp simp: state_relation_def pspace_relation_def elim!: opt_mapE) + apply (drule bspec, blast) + apply (clarsimp simp: other_obj_relation_def pte_relation_def) + apply (drule_tac x="table_index pt_t p" in bspec) + apply (simp add: table_index_in_table) + apply (clarsimp simp: table_base_index_eq[simplified bit_simps] bit_simps) + done + +lemmas aligned_distinct_pte_atI' + = aligned_distinct_obj_atI'[where 'a=pte, + simplified, OF _ _ _ refl] + +lemma one_less_2p_pte_bits[simp]: + "(1::machine_word) < 2 ^ pte_bits" + by (simp add: bit_simps) + +lemma pt_apply_upd_eq': + "idx \ mask (ptTranslationBits (pt_type pt)) \ + pt_apply (pt_upd pt (table_index (pt_type pt) p) pte) idx = + (if table_index (pt_type pt) p = idx then pte else pt_apply pt idx)" + unfolding pt_apply_def pt_upd_def + using table_index_mask_eq[where pt_t=NormalPT_T] table_index_mask_eq[where pt_t=VSRootPT_T] + by (cases pt; clarsimp simp: ucast_eq_mask vs_index_ptTranslationBits pt_index_ptTranslationBits + word_le_mask_eq) + +\ \setObject_other_corres unfortunately doesn't work here\ +lemma setObject_PT_corres: + "pte_relation' pte pte' \ + corres dc ((\s. pts_of s (table_base pt_t p) = Some pt) and K (is_aligned p pte_bits \ pt_type pt = pt_t) and + pspace_aligned and pspace_distinct) \ + (set_pt (table_base pt_t p) (pt_upd pt (table_index pt_t p) pte)) + (setObject p pte')" + apply (rule corres_cross_over_pte_at[where p=p]) + apply (fastforce simp: pte_at_eq ptes_of_def in_omonad) + apply (simp add: set_pt_def get_object_def bind_assoc set_object_def gets_map_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply simp + apply (clarsimp simp: obj_at'_def ko_wp_at'_def typ_at'_def lookupAround2_known1) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (simp add: objBits_simps word_bits_def) + apply (clarsimp simp: setObject_def in_monad split_def updateObject_default_def) + apply (simp add: in_magnitude_check objBits_simps a_type_simps) + apply (clarsimp simp: obj_at_def exec_gets) + apply (clarsimp simp: exec_get put_def elim!: opt_mapE) + apply (clarsimp simp: state_relation_def) + apply (rule conjI) + apply (clarsimp simp: pspace_relation_def split del: if_split) + apply (rule conjI) + apply (subst pspace_dom_update, assumption) + apply (simp add: a_type_def) + apply (auto simp: dom_def)[1] + apply (rule conjI) + apply (drule bspec, blast) + apply clarsimp + apply (drule_tac x = x in bspec) + apply simp + apply (rule conjI; clarsimp) + apply (clarsimp simp: pte_relation_def pt_apply_upd_eq') + apply (metis more_pt_inner_beauty) + apply (clarsimp simp: pte_relation_def table_base_index_eq pt_apply_upd_eq' + dest!: more_pt_inner_beauty) + apply (rule ballI) + apply (drule (1) bspec) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: pte_relation_def pt_apply_upd_eq') + apply (metis more_pt_inner_beauty table_base_index_eq) + apply clarsimp + apply (drule bspec, assumption) + apply clarsimp + apply (erule (1) obj_relation_cutsE) + apply simp + apply clarsimp + apply clarsimp + apply (smt (verit, best) pspace_aligned_pts_ofD pts_of_Some pts_of_type_unique aobjs_of_Some + table_base_plus) + apply ((simp split: if_split_asm)+)[2] + apply (simp add: other_obj_relation_def + split: Structures_A.kernel_object.splits arch_kernel_obj.splits) + apply (rule conjI) + apply (clarsimp simp: ekheap_relation_def pspace_relation_def) + apply (drule_tac x=p in bspec, erule domI) + apply (simp add: other_obj_relation_def + split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _ _" \ -\) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x="p && ~~ mask (pt_bits (pt_type pt))" in allE)+ + apply fastforce + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pte')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + subgoal by (fastforce dest: tcbs_of'_non_tcb_update) + apply (simp add: map_to_ctes_upd_other) + apply (simp add: fun_upd_def) + apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) + done + +lemma storePTE_corres: + "pte_relation' pte pte' \ + corres dc (pte_at pt_t p and pspace_aligned and pspace_distinct) \ (store_pte pt_t p pte) (storePTE p pte')" + apply (simp add: store_pte_def storePTE_def) + apply (rule corres_assume_pre) + apply (rule corres_symb_exec_l) + apply (rule corres_symb_exec_l[where P="pte_at pt_t p and pspace_aligned and pspace_distinct"]) + apply (rule corres_symb_exec_l) + apply (erule setObject_PT_corres) + prefer 2 + apply (rule assert_inv) + apply wpsimp + apply wpsimp + prefer 2 + apply (wpsimp simp: ptes_of_def in_omonad obj_at_def pte_at_def split: if_split_asm) + apply (clarsimp simp: exs_valid_def gets_map_def fst_assert_opt in_omonad ptes_of_def + exec_gets pte_at_def) + apply (wpsimp simp: pte_at_def ptes_of_def in_omonad) + apply (wpsimp simp: pte_at_def2) + apply wpsimp + apply (wpsimp simp: pte_at_def2) + done + +lemmas tableBitSimps[simplified bit_simps pteBits_pte_bits, simplified] = ptBits_def +lemmas bitSimps = tableBitSimps + +lemma bit_simps_corres[simp]: + "ptBits pt_t = pt_bits pt_t" + by (simp add: bit_simps bitSimps) + +defs checkPTAt_def: + "checkPTAt pt_t p \ stateAssert (\s. page_table_at' pt_t p s \ + gsPTTypes (ksArchState s) p = Some pt_t) []" + +lemma pte_relation_must_pte: + "pte_relation m (ArchObj (PageTable pt)) ko \ \pte. ko = (KOArch (KOPTE pte))" + apply (case_tac ko) + apply (simp_all add:pte_relation_def) + apply clarsimp + done + +lemma page_table_at_cross: + "\ pt_at pt_t p s; pspace_aligned s; pspace_distinct s; pspace_relation (kheap s) (ksPSpace s') \ \ + page_table_at' pt_t p s'" + apply (clarsimp simp: page_table_at'_def) + apply (rule context_conjI) + apply (clarsimp simp: obj_at_def) + apply (frule (1) pspace_alignedD) + apply (clarsimp simp: bit_simps split: if_splits) + apply clarsimp + apply (rule pte_at_cross; assumption?) + apply (erule (2) page_table_pte_atI_nicer) + done + +lemma getPTE_wp: + "\\s. \ko. ko_at' (ko::pte) p s \ Q ko s\ getObject p \Q\" + by (clarsimp simp: getObject_def split_def loadObject_default_def in_magnitude_check + in_monad valid_def obj_at'_def objBits_simps) + +lemma check_pt_at_lift: + "corres_inst_eq ptr ptr' \ \s s'. (s, s') \ state_relation \ True \ + (pspace_aligned s \ pspace_distinct s \ pt_at pt_t ptr s \ ptr = ptr') \ + \ s' \ page_table_at' pt_t ptr' s' \ gsPTTypes (ksArchState s') ptr' = Some pt_t" + apply clarsimp + apply (rule conjI, fastforce intro!: page_table_at_cross) + by (fastforce simp: ghost_relation_def obj_at_def elim!: state_relationE) + +lemmas checkPTAt_corres[corresK] = + corres_stateAssert_implied_frame[OF check_pt_at_lift, folded checkPTAt_def] + +lemma lookupPTSlotFromLevel_inv: + "lookupPTSlotFromLevel level pt_ptr vptr \P\" + apply (induct level arbitrary: pt_ptr) + apply (subst lookupPTSlotFromLevel.simps) + apply (wpsimp simp: pteAtIndex_def wp: getPTE_wp) + apply (subst lookupPTSlotFromLevel.simps) + apply (wpsimp simp: pteAtIndex_def checkPTAt_def wp: getPTE_wp|assumption)+ + done + +declare lookupPTSlotFromLevel_inv[wp] + +lemma lookupPTFromLevel_inv[wp]: + "lookupPTFromLevel level pt vptr target_pt \P\" +proof (induct level arbitrary: pt) + case 0 show ?case + by (subst lookupPTFromLevel.simps, simp add: checkPTAt_def, wpsimp) +next + case (Suc level) + show ?case + by (subst lookupPTFromLevel.simps, simp add: checkPTAt_def) + (wpsimp wp: Suc getPTE_wp simp: pteAtIndex_def) +qed + +lemma size_maxPTLevel[simp]: + "size max_pt_level = maxPTLevel" + by (simp add: maxPTLevel_def level_defs) + +lemma ptBitsLeft_0[simp]: + "ptBitsLeft 0 = pageBits" + by (simp add: ptBitsLeft_def) + +lemma ptBitsLeft_eq[simp]: + "ptBitsLeft (size level) = pt_bits_left level" + unfolding ptBitsLeft_def pt_bits_left_def + by (clarsimp simp flip: vm_level.size_less_eq + simp: asid_pool_level_size ptTranslationBits_def maxPTLevel_def + split: if_splits) + +lemma ptIndex_eq[simp]: + "ptIndex (size level) p = pt_index level p" + by (clarsimp simp: ptIndex_def pt_index_def levelType_def + simp flip: size_maxPTLevel level_type_eq(1)) + +lemma ptSlotIndex_eq[simp]: + "ptSlotIndex (size level) = pt_slot_offset level" + by (clarsimp intro!: ext simp: ptSlotIndex_def pt_slot_offset_def) + +lemmas ptSlotIndex_0[simp] = ptSlotIndex_eq[where level=0, simplified] + +lemma pteAtIndex_corres: + "level' = size level \ + corres pte_relation' + (pte_at pt_t (pt_slot_offset level pt vptr) and pspace_aligned and pspace_distinct) + \ + (get_pte pt_t (pt_slot_offset level pt vptr)) + (pteAtIndex level' pt vptr)" + by (simp add: pteAtIndex_def) (rule getObject_PTE_corres) + +lemma user_region_or: + "\ vref \ user_region; vref' \ user_region \ \ vref || vref' \ user_region" + by (simp add: user_region_def canonical_user_def le_mask_high_bits word_size) + +lemma lookupPTSlotFromLevel_corres: + "\ level' = size level; pt' = pt; level \ max_pt_level \ \ + corres (\(level, p) (bits, p'). bits = pt_bits_left level \ p' = p) + (pspace_aligned and pspace_distinct and valid_vspace_objs and valid_asid_table and + \\ (level, pt) and K (vptr \ user_region \ level \ max_pt_level)) + \ + (gets_the (pt_lookup_slot_from_level level 0 pt vptr \ ptes_of)) + (lookupPTSlotFromLevel level' pt' vptr)" +proof (induct level arbitrary: pt pt' level') + case 0 + thus ?case by (simp add: lookupPTSlotFromLevel.simps pt_bits_left_def) +next + case (minus level) + from `0 < level` + obtain nlevel where nlevel: "level = nlevel + 1" by (auto intro: that[of "level-1"]) + with `0 < level` + have nlevel1: "nlevel < nlevel + 1" using bit1.pred by fastforce + with nlevel + have level: "size level = Suc (size nlevel)" by simp + + from `0 < level` `level \ max_pt_level` + have level_m1: "level - 1 \ max_pt_level" + by blast + + from level + have levelType[simp]: + "levelType (Suc (size nlevel)) = level_type level" + unfolding levelType_def using vm_level.size_inj + by fastforce + + define vref_step where + "vref_step vref \ vref_for_level vref (level+1) || (pt_index level vptr << pt_bits_left level)" + for vref + + from `level \ max_pt_level` + have vref_for_level_step[simp]: + "vref_for_level (vref_step vref) (level + 1) = vref_for_level vref (level + 1)" + for vref + unfolding vref_step_def + using vref_for_level_pt_index_idem[of level level level vref vptr] by simp + + from `level \ max_pt_level` + have pt_walk_vref[simp]: + "pt_walk max_pt_level level pt (vref_step vref) = + pt_walk max_pt_level level pt vref" for pt vref + by - (rule pt_walk_vref_for_level_eq; simp) + + from `level \ max_pt_level` + have vref_step_user_region[simp]: + "\ vref \ user_region; vptr \ user_region \ \ vref_step vref \ user_region" + for vref + unfolding vref_step_def + using nlevel1 nlevel + by (auto intro!: user_region_or vref_for_level_user_region + simp: pt_bits_left_def bit_simps user_region_def + pt_index_def canonical_user_def word_eqI_simps + dest!: max_pt_level_enum) + + have pt_slot_offset_step[simp]: + "\ is_aligned pt (pt_bits level); vref \ user_region \ \ + pt_slot_offset level pt (vref_step vref) = pt_slot_offset level pt vptr" for vref + unfolding vref_step_def using nlevel1 nlevel + apply simp + apply (clarsimp simp: pt_slot_offset_or_def user_region_def canonical_user_def) + apply (simp add: pt_index_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (simp add: plus_one_eq_asid_pool vref_for_level_def pt_bits_left_def) + apply (rule conjI, simp add: max_pt_level_def) + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def) + apply word_eqI_solve + apply (clarsimp simp: vref_for_level_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (subgoal_tac "nlevel = max_pt_level - 1") + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def split: if_split_asm) + apply word_eqI_solve + apply (subst (asm) add.commute[where a=2]) + apply (drule add_implies_diff) + apply (simp add: max_pt_level_def) + apply (simp add: pt_bits_def) + apply (prop_tac "level_type (nlevel + 1) = NormalPT_T") + apply (drule max_pt_level_enum) + apply (auto simp: level_defs split: if_split_asm)[1] + apply (simp add: bit_simps) + apply word_eqI + apply (drule max_pt_level_enum) + by (auto split: if_split_asm) + + from `0 < level` `level' = size level` `pt' = pt` level `level \ max_pt_level` level_m1 + show ?case + apply (subst pt_lookup_slot_from_level_rec) + apply (simp add: lookupPTSlotFromLevel.simps Let_def obind_comp_dist if_comp_dist + gets_the_if_distrib checkPTAt_def gets_the_oapply2_comp) + apply (rule corres_guard_imp, rule corres_split[where r'=pte_relation']) + apply (rule pteAtIndex_corres, simp) + apply (rule corres_if3) + apply (rename_tac pte pte', case_tac pte; (simp add: isPageTablePTE_def)) + apply (rule corres_stateAssert_implied[where P'=\]) + apply (rule minus(1)) + apply (simp add: nlevel) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply simp + apply clarsimp + apply (prop_tac "pt_at NormalPT_T (getPPtrFromPTE pte) s") + apply (drule (2) valid_vspace_objs_strongD; assumption?) + apply simp + apply (simp add: state_relation_def) + apply (clarsimp simp: pt_at_eq in_omonad AARCH64_A.is_PageTablePTE_def pptr_from_pte_def + getPPtrFromPTE_def isPagePTE_def paddr_from_ppn_def) + apply (frule page_table_at_cross; assumption?) + apply (fastforce simp: state_relation_def) + apply (fastforce simp: ghost_relation_def obj_at_def elim!: state_relationE) + apply (rule corres_inst[where P=\ and P'=\]) + apply (clarsimp simp: ptSlotIndex_def pt_slot_offset_def pt_index_def pt_bits_left_def + ptIndex_def ptBitsLeft_def) + apply (rule conjI; clarsimp) + apply (metis vm_level.size_less_eq size_maxPTLevel) + apply wpsimp+ + apply (frule (5) vs_lookup_table_is_aligned) + apply (rule conjI) + apply (drule (5) valid_vspace_objs_strongD) + apply (clarsimp simp: pte_at_def obj_at_def ptes_of_def in_omonad) + apply (simp add: pt_slot_offset_def) + apply (rule conjI, fastforce) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (simp add: bit_simps) + apply (rule is_aligned_shiftl, simp) + apply clarsimp + apply (rule_tac x=asid in exI) + apply (rule_tac x="vref_step vref" in exI) + apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: level_defs) + apply (subst pt_walk_split_Some[where level'=level]; simp?) + apply (drule vm_level.pred) + apply simp + apply (subst pt_walk.simps) + apply (simp add: in_omonad) + apply simp + done +qed + +lemma lookupPTSlot_corres: + "corres (\(level, p) (bits, p'). bits = pt_bits_left level \ p' = p) + (pspace_aligned and pspace_distinct and valid_vspace_objs + and valid_asid_table and \\(max_pt_level,pt) + and K (vptr \ user_region)) + \ + (gets_the (pt_lookup_slot pt vptr \ ptes_of)) (lookupPTSlot pt vptr)" + unfolding lookupPTSlot_def pt_lookup_slot_def + apply (corresKsimp corres: lookupPTSlotFromLevel_corres) + apply (fastforce simp: vspace_pt_at_eq pt_type_def + dest!: valid_vspace_objs_strongD + split: pt.splits) + done + +lemma lookupPTFromLevel_corres: + "\ level' = size level; pt' = pt \ \ + corres (lfr \ ((=) \ fst)) + (pspace_aligned and pspace_distinct and valid_vspace_objs + and valid_asid_table and \\(level,pt) + and K (vptr \ user_region \ level \ max_pt_level \ pt \ target)) + \ + (pt_lookup_from_level level pt vptr target) + (lookupPTFromLevel level' pt' vptr target)" +proof (induct level arbitrary: level' pt pt') + case 0 + then show ?case + apply (subst lookupPTFromLevel.simps, subst pt_lookup_from_level_simps) + apply simp + apply (rule corres_gen_asm) + apply (simp add: lookup_failure_map_def) + done +next + case (minus level) + + (* FIXME: unfortunate duplication from lookupPTSlotFromLevel_corres *) + from `0 < level` + obtain nlevel where nlevel: "level = nlevel + 1" by (auto intro: that[of "level-1"]) + with `0 < level` + have nlevel1: "nlevel < nlevel + 1" using vm_level.pred by fastforce + with nlevel + have level: "size level = Suc (size nlevel)" by simp + + define vref_step where + "vref_step vref \ + vref_for_level vref (level+1) || (pt_index level vptr << pt_bits_left level)" + for vref + + have vref_for_level_step[simp]: + "level \ max_pt_level \ + vref_for_level (vref_step vref) (level + 1) = vref_for_level vref (level + 1)" + for vref + unfolding vref_step_def + using vref_for_level_pt_index_idem[of level level level vref vptr] by simp + + have pt_walk_vref[simp]: + "level \ max_pt_level \ + pt_walk max_pt_level level pt (vref_step vref) = + pt_walk max_pt_level level pt vref" for pt vref + by (rule pt_walk_vref_for_level_eq; simp) + + have vref_step_user_region[simp]: + "\ vref \ user_region; vptr \ user_region; level \ max_pt_level \ + \ vref_step vref \ user_region" + for vref + unfolding vref_step_def + using nlevel1 nlevel + by (auto intro!: user_region_or vref_for_level_user_region + simp: pt_bits_left_def bit_simps user_region_def + pt_index_def canonical_user_def word_eqI_simps + dest!: max_pt_level_enum) + + have pt_slot_offset_step[simp]: + "\ is_aligned pt (pt_bits level); vref \ user_region \ \ + pt_slot_offset level pt (vref_step vref) = pt_slot_offset level pt vptr" for vref + unfolding vref_step_def using nlevel1 nlevel + apply simp + apply (clarsimp simp: pt_slot_offset_or_def user_region_def canonical_user_def) + apply (simp add: pt_index_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (simp add: plus_one_eq_asid_pool vref_for_level_def pt_bits_left_def) + apply (rule conjI, simp add: max_pt_level_def) + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def) + apply word_eqI_solve + apply (clarsimp simp: vref_for_level_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (subgoal_tac "nlevel = max_pt_level - 1") + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def split: if_split_asm) + apply word_eqI_solve + apply (subst (asm) add.commute[where a=2]) + apply (drule add_implies_diff) + apply (simp add: max_pt_level_def) + apply (simp add: pt_bits_def) + apply (prop_tac "level_type (nlevel + 1) = NormalPT_T") + apply (drule max_pt_level_enum) + apply (auto simp: level_defs split: if_split_asm)[1] + apply (simp add: bit_simps) + apply word_eqI + apply (drule max_pt_level_enum) + by (auto split: if_split_asm) + + note vm_level.size_minus_one[simp] + from minus.prems + show ?case + apply (subst lookupPTFromLevel.simps, subst pt_lookup_from_level_simps) + apply (simp add: unlessE_whenE not_less) + apply (rule corres_gen_asm, simp) + apply (rule corres_initial_splitE[where r'=dc]) + apply (corresKsimp simp: lookup_failure_map_def) + apply (rule corres_splitEE[where r'=pte_relation']) + apply (simp, rule getObject_PTE_corres) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply (rename_tac pte pte', case_tac pte; simp add: isPageTablePTE_def) + apply (rule corres_if) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply (rule corres_returnOk[where P=\ and P'=\], simp) + apply (clarsimp simp: checkPTAt_def) + apply (subst liftE_bindE, rule corres_stateAssert_implied[where P'=\]) + apply (rule minus.hyps) + apply (simp add: minus.hyps(2)) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply clarsimp + apply (prop_tac "pt_at NormalPT_T (pptr_from_pte pte) s") + apply (drule (2) valid_vspace_objs_strongD; assumption?) + apply simp + apply (simp add: state_relation_def) + apply (clarsimp simp: pt_at_eq in_omonad AARCH64_A.is_PageTablePTE_def pptr_from_pte_def + getPPtrFromPTE_def isPagePTE_def paddr_from_ppn_def) + apply (frule page_table_at_cross; assumption?) + apply (fastforce simp: state_relation_def) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply (fastforce simp: ghost_relation_def obj_at_def elim!: state_relationE) + apply wpsimp+ + apply (simp add: vm_level.neq_0_conv) + apply (frule (5) vs_lookup_table_is_aligned) + apply (rule conjI) + apply (drule (5) valid_vspace_objs_strongD) + apply (clarsimp simp: pte_at_def obj_at_def ptes_of_def in_omonad) + apply (rule conjI, fastforce) + apply (simp add: pt_slot_offset_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (simp add: bit_simps) + apply (rule is_aligned_shiftl, simp) + apply clarsimp + apply (rule_tac x=asid in exI) + apply (rule_tac x="vref_step vref" in exI) + apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: level_defs) + apply (subst pt_walk_split_Some[where level'=level]; simp?) + apply (drule vm_level.pred) + apply simp + apply (subst pt_walk.simps) + apply (simp add: in_omonad) + apply wpsimp + done +qed + +declare in_set_zip_refl[simp] + +crunch typ_at' [wp]: storePTE "\s. P (typ_at' T p s)" + (wp: crunch_wps mapM_x_wp' simp: crunch_simps ignore_del: setObject) + +lemmas storePTE_typ_ats[wp] = typ_at_lifts [OF storePTE_typ_at'] + +lemma setObject_asid_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ setObject p' (v::asidpool) \\_ s. P (typ_at' T p s)\" + by (wp setObject_typ_at') + +lemmas setObject_asid_typ_ats' [wp] = typ_at_lifts [OF setObject_asid_typ_at'] + +lemma getObject_pte_inv[wp]: + "\P\ getObject p \\rv :: pte. P\" + by (simp add: getObject_inv loadObject_default_inv) + +lemma corres_gets_global_pt [corres]: + "corres (=) valid_global_arch_objs \ + (gets global_pt) (gets (armKSGlobalUserVSpace \ ksArchState))" + by (clarsimp simp add: state_relation_def arch_state_relation_def) + +lemmas getObject_PTE_corres'[corres] = getObject_PTE_corres[@lift_corres_args] +lemmas storePTE_corres'[corres] = storePTE_corres[@lift_corres_args] + +lemma arch_cap_rights_update: + "acap_relation c c' \ + cap_relation (cap.ArchObjectCap (acap_rights_update (acap_rights c \ msk) c)) + (Arch.maskCapRights (rights_mask_map msk) c')" + apply (cases c, simp_all add: AARCH64_H.maskCapRights_def + acap_rights_update_def Let_def isCap_simps) + apply (simp add: maskVMRights_def vmrights_map_def rights_mask_map_def + validate_vm_rights_def vm_read_write_def vm_read_only_def + vm_kernel_only_def ) + done + +lemma arch_deriveCap_inv: + "\P\ Arch.deriveCap arch_cap u \\rv. P\" + apply (simp add: AARCH64_H.deriveCap_def + cong: if_cong + split del: if_split) + apply (wp undefined_valid) + apply (cases u; simp add: isCap_defs) + done + +lemma arch_deriveCap_valid: + "\valid_cap' (ArchObjectCap arch_cap)\ + Arch.deriveCap u arch_cap + \\rv. valid_cap' rv\,-" + apply (simp add: AARCH64_H.deriveCap_def split del: if_split) + apply (wp undefined_validE_R) + apply (cases arch_cap; simp add: isCap_defs) + apply (simp add: valid_cap'_def capAligned_def capUntypedPtr_def AARCH64_H.capUntypedPtr_def) + done + +lemma mdata_map_simps[simp]: + "mdata_map None = None" + "mdata_map (Some (asid, ref)) = Some (ucast asid, ref)" + by (auto simp add: mdata_map_def) + +lemma arch_deriveCap_corres: + "cap_relation (cap.ArchObjectCap c) (ArchObjectCap c') \ + corres (ser \ (\c c'. cap_relation c c')) + \ \ + (arch_derive_cap c) + (Arch.deriveCap slot c')" + unfolding arch_derive_cap_def AARCH64_H.deriveCap_def Let_def + apply (cases c, simp_all add: isCap_simps split: option.splits split del: if_split) + apply (clarify?, rule corres_noopE; wpsimp)+ + done + +definition + "vmattributes_map \ \R. VMAttributes (Execute \ R) (Device \ R)" + +lemma pte_relation'_Invalid_inv [simp]: + "pte_relation' x AARCH64_H.pte.InvalidPTE = (x = AARCH64_A.pte.InvalidPTE)" + by (cases x) auto + +lemma asidHighBitsOf [simp]: + "asidHighBitsOf asid = ucast (asid_high_bits_of (ucast asid))" + by (word_eqI_solve simp: asidHighBitsOf_def asid_high_bits_of_def asidHighBits_def asid_low_bits_def) + +lemma le_mask_asidBits_asid_wf: + "asid_wf asid \ asid \ mask asidBits" + by (simp add: asidBits_def asidHighBits_def asid_wf_def asid_bits_defs mask_def) + +lemma asid_le_mask_asidBits[simp]: + "UCAST(asid_len \ machine_word_len) asid \ mask asidBits" + by (rule ucast_leq_mask, simp add: asidBits_def asidHighBits_def asid_low_bits_def) + +lemma asid_case_zero[simp]: + "0 < asid \ 0 < UCAST(asid_len \ machine_word_len) asid" + by word_bitwise + +lemma find_vspace_for_asid_rewite: + "find_vspace_for_asid asid = + doE + unlessE (0 < asid) $ throwError ExceptionTypes_A.InvalidRoot; + entry_opt \ liftE $ gets (entry_for_asid asid); + case entry_opt of + Some entry \ returnOk (ap_vspace entry) + | None \ throwError ExceptionTypes_A.InvalidRoot + odE" + unfolding find_vspace_for_asid_def vspace_for_asid_def + apply (cases "0 < asid") + apply simp (* rewrite unlessE before unfolding things *) + apply (fastforce simp: bindE_def throw_opt_def liftE_def simpler_gets_def bind_def return_def + obind_None_eq + split: option.splits) + apply (simp add: liftE_def simpler_gets_def bindE_def bind_def return_def throw_opt_def + throwError_def) + done + +lemma getPoolPtr_corres: + "corres (=) (K (0 < asid)) \ (gets (pool_for_asid asid)) (getPoolPtr (ucast asid))" + unfolding pool_for_asid_def getPoolPtr_def asidRange_def + apply simp + apply corres_pre + apply (rule corres_assert_gen_asm) + apply (rule corres_assert_gen_asm) + apply (rule corres_trivial) + apply (clarsimp simp: gets_return_gets_eq state_relation_def arch_state_relation_def + ucast_up_ucast_id is_up) + apply (simp flip: mask_eq_exp_minus_1) + apply simp + done + +lemma getASIDPoolEntry_corres: + "corres (\r r'. r = map_option abs_asid_entry r') + (valid_vspace_objs and valid_asid_table and pspace_aligned and pspace_distinct + and K (0 < asid)) + (no_0_obj') + (gets (entry_for_asid asid)) + (getASIDPoolEntry (ucast asid))" + unfolding entry_for_asid_def getASIDPoolEntry_def K_def + apply (rule corres_gen_asm) + apply (clarsimp simp: gets_obind_bind_eq entry_for_pool_def obind_comp_dist + cong: option.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="(=)"]) + apply (rule getPoolPtr_corres) + apply (rule_tac x=pool_ptr and x'=poolPtr in option_corres) + apply (rule corres_trivial, simp) + apply clarsimp + apply (rule monadic_rewrite_corres_l) + apply (monadic_rewrite_l gets_oapply_liftM_rewrite) + apply (rule monadic_rewrite_refl) + apply (clarsimp simp: liftM_def) + apply (rule corres_split[OF getObject_ASIDPool_corres[OF refl]]) + apply (rule corres_trivial) + apply (case_tac rv', clarsimp) + apply (clarsimp simp: asid_pool_relation_def asid_low_bits_of_def ucast_ucast_mask2 + is_down asid_low_bits_def ucast_and_mask) + apply wpsimp+ + apply (drule (1) pool_for_asid_validD) + apply (simp add: asid_pools_at_eq) + apply simp + done + +lemma no_0_page_table: + "\ no_0_obj' s; page_table_at' pt_t 0 s \ \ False" + apply (clarsimp simp: page_table_at'_def) + apply (erule_tac x=0 in allE) + apply simp + done + +crunches getASIDPoolEntry + for no_0_obj'[wp]: no_0_obj' + (wp: getObject_inv simp: loadObject_default_def) + +lemma findVSpaceForASID_corres: + assumes "asid' = ucast asid" + shows "corres (lfr \ (=)) + (valid_vspace_objs and valid_asid_table + and pspace_aligned and pspace_distinct + and K (0 < asid)) + (no_0_obj') + (find_vspace_for_asid asid) (findVSpaceForASID asid')" (is "corres _ ?P ?Q _ _") + using assms + apply (simp add: findVSpaceForASID_def) + apply (rule corres_gen_asm) + apply (subst find_vspace_for_asid_rewite) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_initial_splitE[where r'="\r r'. r = map_option abs_asid_entry r'"]) + apply simp + apply (rule getASIDPoolEntry_corres) + apply (rule_tac Q="\entry s. pspace_aligned s \ pspace_distinct s \ + vspace_pt_at (ap_vspace entry) s" + in option_corres[where P=\ and P'=\ and Q'="\_. no_0_obj'"]) + apply (clarsimp simp: lookup_failure_map_def) + apply (rename_tac entry entry') + apply (case_tac entry') + apply (clarsimp simp: checkPTAt_def abs_asid_entry_def) + apply (rename_tac p) + apply (rule_tac Q="\s. page_table_at' VSRootPT_T p s \ + gsPTTypes (ksArchState s) p = Some VSRootPT_T \ + no_0_obj' s" in corres_cross_over_guard) + apply clarsimp + apply (rule conjI) + apply (erule (2) page_table_at_cross, simp add: state_relation_def) + apply (fastforce simp: ghost_relation_def obj_at_def elim!: state_relationE) + apply (simp add: liftE_bindE assertE_liftE) + apply (rule corres_assert_assume) + apply (rule corres_stateAssert_assume) + apply (rule corres_returnOk, simp) + apply clarsimp + apply (fastforce dest: no_0_page_table) + apply simp + apply wpsimp + apply (clarsimp simp: entry_for_asid_def) + apply (drule (2) pool_for_asid_valid_vspace_objs) + apply (fastforce simp: entry_for_pool_def) + apply (wpsimp wp: hoare_drop_imps)+ + done + +lemma setObject_arch: + assumes X: "\p q n ko. \\s. P (ksArchState s)\ updateObject val p q n ko \\rv s. P (ksArchState s)\" + shows "\\s. P (ksArchState s)\ setObject t val \\rv s. P (ksArchState s)\" + apply (simp add: setObject_def split_def) + apply (wp X | simp)+ + done + +lemma setObject_ASID_arch [wp]: + "\\s. P (ksArchState s)\ setObject p (v::asidpool) \\_ s. P (ksArchState s)\" + apply (rule setObject_arch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma setObject_PTE_arch [wp]: + "\\s. P (ksArchState s)\ setObject p (v::pte) \\_ s. P (ksArchState s)\" + apply (rule setObject_arch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma setObject_ASID_valid_arch [wp]: + "setObject p (v::asidpool) \valid_arch_state'\" + by (wpsimp wp: valid_arch_state_lift' setObject_ko_wp_at) + (auto simp: objBits_simps pageBits_def is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma setObject_PTE_valid_arch [wp]: + "\valid_arch_state'\ setObject p (v::pte) \\_. valid_arch_state'\" + by (wpsimp wp: valid_arch_state_lift' setObject_typ_at' setObject_ko_wp_at) + (auto simp: objBits_simps pageBits_def is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma setObject_ASID_ct [wp]: + "\\s. P (ksCurThread s)\ setObject p (e::asidpool) \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def updateObject_default_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_pte_ct [wp]: + "\\s. P (ksCurThread s)\ setObject p (e::pte) \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def updateObject_default_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ASID_cur_tcb' [wp]: + "\\s. cur_tcb' s\ setObject p (e::asidpool) \\_ s. cur_tcb' s\" + apply (simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply wp+ + done + +lemma setObject_pte_cur_tcb' [wp]: + "\\s. cur_tcb' s\ setObject p (e::pte) \\_ s. cur_tcb' s\" + apply (simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply wp+ + done + +lemma getASID_wp: + "\\s. \ko. ko_at' (ko::asidpool) p s \ Q ko s\ getObject p \Q\" + by (clarsimp simp: getObject_def split_def loadObject_default_def + in_magnitude_check pageBits_def in_monad valid_def obj_at'_def objBits_simps) + +lemma storePTE_ctes [wp]: + "\\s. P (ctes_of s)\ storePTE p pte \\_ s. P (ctes_of s)\" + apply (rule ctes_of_from_cte_wp_at [where Q=\, simplified]) + apply (rule storePTE_cte_wp_at') + done + +lemma setObject_ASID_cte_wp_at'[wp]: + "\\s. P (cte_wp_at' P' p s)\ + setObject ptr (asid::asidpool) + \\rv s. P (cte_wp_at' P' p s)\" + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply simp + done + +lemma setObject_ASID_ctes_of'[wp]: + "\\s. P (ctes_of s)\ + setObject ptr (asid::asidpool) + \\rv s. P (ctes_of s)\" + by (rule ctes_of_from_cte_wp_at [where Q=\, simplified]) wp + +lemma clearMemory_vms': + "valid_machine_state' s \ + \x\fst (clearMemory ptr bits (ksMachineState s)). + valid_machine_state' (s\ksMachineState := snd x\)" + apply (clarsimp simp: valid_machine_state'_def + disj_commute[of "pointerInUserData p s" for p s]) + apply (drule_tac x=p in spec, simp) + apply (drule_tac P4="\m'. underlying_memory m' p = 0" + in use_valid[where P=P and Q="\_. P" for P], simp_all) + apply (rule clearMemory_um_eq_0) + done + +lemma dmo_clearMemory_invs'[wp]: + "\invs'\ doMachineOp (clearMemory w sz) \\_. invs'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def) + apply (rule conjI) + apply (simp add: valid_irq_masks'_def, elim allEI, clarsimp) + apply (drule use_valid) + apply (rule no_irq_clearMemory[simplified no_irq_def, rule_format]) + apply simp_all + apply (drule clearMemory_vms') + apply fastforce + done + +end +end diff --git a/proof/refine/AARCH64/ArchMove_R.thy b/proof/refine/AARCH64/ArchMove_R.thy new file mode 100644 index 0000000000..7779fdb2bc --- /dev/null +++ b/proof/refine/AARCH64/ArchMove_R.thy @@ -0,0 +1,43 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch specific lemmas that should be moved into theory files before Refine *) + +theory ArchMove_R +imports + Move_R +begin + +(* Use one of these forms everywhere, rather than choosing at random. *) +lemmas cte_index_repair = mult.commute[where a="(2::'a::len word) ^ cte_level_bits"] +lemmas cte_index_repair_sym = cte_index_repair[symmetric] + +lemma invs_valid_ioc[elim!]: "invs s \ valid_ioc s" + by (clarsimp simp add: invs_def valid_state_def) + +context begin interpretation Arch . + +lemma get_pt_mapM_x_lower: + assumes g: "\P pt x. \ \s. P (kheap s pt_ptr) \ g pt x \ \_ s. P (kheap s pt_ptr) \" + assumes y: "ys \ []" + notes [simp] = gets_map_def get_object_def gets_def get_def bind_def return_def + assert_opt_def fail_def opt_map_def + shows "do pt \ get_pt pt_ptr; mapM_x (g pt) ys od + = mapM_x (\y. get_pt pt_ptr >>= (\pt. g pt y)) ys" + apply (rule get_mapM_x_lower + [where P="\opt_pt s. case kheap s pt_ptr of + Some (ArchObj (PageTable pt)) \ opt_pt = Some pt + | _ \ opt_pt = None", + OF _ _ _ y]) + apply (wp g) + apply (case_tac "kheap s pt_ptr"; simp; rename_tac ko; case_tac ko; simp; + rename_tac ako; case_tac ako; simp)+ + done + +end + +end diff --git a/proof/refine/AARCH64/Arch_R.thy b/proof/refine/AARCH64/Arch_R.thy new file mode 100644 index 0000000000..a7496ad8a3 --- /dev/null +++ b/proof/refine/AARCH64/Arch_R.thy @@ -0,0 +1,2136 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Top level architecture related proofs. +*) + +theory Arch_R +imports Untyped_R Finalise_R +begin + +unbundle l4v_word_context + +lemmas [datatype_schematic] = cap.sel list.sel(1) list.sel(3) + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare arch_cap.sel [datatype_schematic] +declare is_aligned_shiftl [intro!] +declare is_aligned_shiftr [intro!] + +definition + "asid_ci_map i \ + case i of AARCH64_A.MakePool frame slot parent base \ + AARCH64_H.MakePool frame (cte_map slot) (cte_map parent) (ucast base)" + +definition + "valid_aci' aci \ case aci of MakePool frame slot parent base \ + \s. cte_wp_at' (\c. cteCap c = NullCap) slot s \ + cte_wp_at' (\cte. \idx. cteCap cte = UntypedCap False frame pageBits idx) parent s \ + descendants_of' parent (ctes_of s) = {} \ + slot \ parent \ + ex_cte_cap_to' slot s \ + sch_act_simple s \ + is_aligned base asid_low_bits \ asid_wf base" + +lemma vp_strgs': + "valid_pspace' s \ pspace_distinct' s" + "valid_pspace' s \ pspace_aligned' s" + "valid_pspace' s \ valid_mdb' s" + by auto + +lemma safe_parent_strg': + "cte_wp_at' (\cte. cteCap cte = UntypedCap False frame pageBits idx) p s \ + descendants_of' p (ctes_of s) = {} \ + valid_pspace' s + \ safe_parent_for' (ctes_of s) p (ArchObjectCap (ASIDPoolCap frame base))" + apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of) + apply (case_tac cte) + apply (simp add: isCap_simps) + apply (subst conj_comms) + apply (rule context_conjI) + apply (drule ctes_of_valid_cap', fastforce) + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply (drule is_aligned_no_overflow) + apply (clarsimp simp: capRange_def asid_low_bits_def bit_simps) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps capRange_def asid_low_bits_def bit_simps) + done + +lemma descendants_of'_helper: + "\P\ f \\r s. Q (descendants_of' t (null_filter' (ctes_of s)))\ + \ \P\ f \\r s. Q (descendants_of' t (ctes_of s))\" + apply (clarsimp simp:valid_def) + apply (subst null_filter_descendants_of') + prefer 2 + apply fastforce + apply simp + done + +lemma createObject_typ_at': + "\\s. koTypeOf ty = otype \ is_aligned ptr (objBitsKO ty) \ + pspace_aligned' s \ pspace_no_overlap' ptr (objBitsKO ty) s\ + createObjects' ptr (Suc 0) ty 0 + \\rv s. typ_at' otype ptr s\" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (clarsimp simp:createObjects'_def alignError_def split_def | wp unless_wp | wpc )+ + apply (clarsimp simp:obj_at'_def ko_wp_at'_def typ_at'_def pspace_distinct'_def)+ + apply (subgoal_tac "ps_clear ptr (objBitsKO ty) + (s\ksPSpace := \a. if a = ptr then Some ty else ksPSpace s a\)") + apply (simp add:ps_clear_def)+ + apply (rule ccontr) + apply (drule int_not_emptyD) + apply clarsimp + apply (unfold pspace_no_overlap'_def) + apply (erule allE)+ + apply (erule(1) impE) + apply (subgoal_tac "x \ mask_range x (objBitsKO y)") + apply (fastforce simp: is_aligned_neg_mask_eq) + apply (drule(1) pspace_alignedD') + apply (clarsimp simp: is_aligned_no_overflow_mask) + done + +lemma retype_region2_ext_retype_region_ArchObject: + "retype_region ptr n us (ArchObject x)= + retype_region2 ptr n us (ArchObject x)" + apply (rule ext) + apply (simp add: retype_region_def retype_region2_def bind_assoc + retype_region2_ext_def retype_region_ext_def default_ext_def) + apply (rule ext) + apply (intro monad_eq_split_tail ext)+ + apply simp + apply simp + apply (simp add:gets_def get_def bind_def return_def simpler_modify_def ) + apply (rule_tac x = xc in fun_cong) + apply (rule_tac f = do_extended_op in arg_cong) + apply (rule ext) + apply simp + apply simp + done + +lemma set_cap_device_and_range_aligned: + "is_aligned ptr sz \ \\_. True\ + set_cap + (cap.UntypedCap dev ptr sz idx) + aref + \\rv s. + \slot. + cte_wp_at + (\c. cap_is_device c = dev \ + up_aligned_area ptr sz \ cap_range c) + slot s\" + apply (subst is_aligned_neg_mask_eq[symmetric]) + apply simp + apply (wp set_cap_device_and_range) + done + +lemma performASIDControlInvocation_corres: + "asid_ci_map i = i' \ + corres dc + (einvs and ct_active and valid_aci i and schact_is_rct) + (invs' and ct_active' and valid_aci' i') + (perform_asid_control_invocation i) + (performASIDControlInvocation i')" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (cases i) + apply (rename_tac word1 prod1 prod2 word2) + apply (clarsimp simp: asid_ci_map_def) + apply (simp add: perform_asid_control_invocation_def placeNewObject_def2 + performASIDControlInvocation_def) + apply (rule corres_name_pre) + apply (clarsimp simp:valid_aci_def valid_aci'_def cte_wp_at_ctes_of cte_wp_at_caps_of_state) + apply (subgoal_tac "valid_cap' (capability.UntypedCap False word1 pageBits idx) s'") + prefer 2 + apply (case_tac ctea) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply fastforce + apply (frule valid_capAligned) + apply (clarsimp simp: capAligned_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (erule deleteObjects_corres) + apply (simp add:pageBits_def) + apply (rule corres_split[OF getSlotCap_corres], simp) + apply (rule_tac F = " pcap = (cap.UntypedCap False word1 pageBits idxa)" in corres_gen_asm) + apply (rule corres_split[OF updateFreeIndex_corres]) + apply (clarsimp simp:is_cap_simps) + apply (simp add: free_index_of_def) + apply (rule corres_split) + apply (simp add: retype_region2_ext_retype_region_ArchObject ) + apply (rule corres_retype [where ty="Inl (KOArch (KOASIDPool F))" for F, + unfolded APIType_map2_def makeObjectKO_def, + THEN createObjects_corres',simplified, + where val = "makeObject::asidpool"]) + apply simp + apply (simp add: objBits_simps obj_bits_api_def arch_kobj_size_def + default_arch_object_def bit_simps)+ + apply (simp add: obj_relation_retype_def default_object_def + default_arch_object_def objBits_simps) + apply (simp add: other_obj_relation_def asid_pool_relation_def) + apply (simp add: makeObject_asidpool const_def inv_def) + apply (rule range_cover_full) + apply (simp add: obj_bits_api_def arch_kobj_size_def default_arch_object_def bit_simps + word_bits_def)+ + apply (rule corres_split) + apply (rule cteInsert_simple_corres, simp, rule refl, rule refl) + apply (rule_tac F="asid_low_bits_of word2 = 0" in corres_gen_asm) + apply (simp add: is_aligned_mask dc_def[symmetric]) + apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t = t' o ucast"]) + apply (clarsimp simp: state_relation_def arch_state_relation_def) + apply (rule corres_trivial) + apply (rule corres_modify) + apply (thin_tac "x \ state_relation" for x) + apply (clarsimp simp: state_relation_def arch_state_relation_def o_def) + apply (rule ext) + apply (clarsimp simp: up_ucast_inj_eq) + apply wp+ + apply (strengthen safe_parent_strg[where idx = "2^pageBits"]) + apply (strengthen invs_valid_objs invs_distinct + invs_psp_aligned invs_mdb + | simp cong:conj_cong)+ + apply (wp retype_region_plain_invs[where sz = pageBits] + retype_cte_wp_at[where sz = pageBits])+ + apply (strengthen vp_strgs' + safe_parent_strg'[where idx = "2^pageBits"]) + apply (simp cong: conj_cong) + apply (wp createObjects_valid_pspace' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool undefined))"]) + apply (simp add: makeObjectKO_def)+ + apply (simp add:objBits_simps range_cover_full valid_cap'_def canonical_address_and)+ + apply (clarsimp simp:valid_cap'_def) + apply (wp createObject_typ_at' + createObjects_orig_cte_wp_at'[where sz = pageBits]) + apply (rule descendants_of'_helper) + apply (wp createObjects_null_filter' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool undefined))"]) + apply (clarsimp simp: conj_comms obj_bits_api_def arch_kobj_size_def + objBits_simps default_arch_object_def pred_conj_def) + apply (clarsimp simp: conj_comms + | strengthen invs_mdb invs_valid_pspace)+ + apply (simp add:region_in_kernel_window_def) + apply (wp set_untyped_cap_invs_simple[where sz = pageBits] + set_cap_cte_wp_at + set_cap_caps_no_overlap[where sz = pageBits] + set_cap_no_overlap + set_cap_device_and_range_aligned[where dev = False,simplified] + set_untyped_cap_caps_overlap_reserved[where sz = pageBits])+ + apply (clarsimp simp: conj_comms obj_bits_api_def arch_kobj_size_def + objBits_simps default_arch_object_def pred_conj_def + makeObjectKO_def range_cover_full + simp del: capFreeIndex_update.simps + | strengthen invs_valid_pspace' invs_pspace_aligned' + invs_pspace_distinct' + exI[where x="makeObject :: asidpool"])+ + apply (wp updateFreeIndex_forward_invs' + updateFreeIndex_pspace_no_overlap' + updateFreeIndex_caps_no_overlap'' + updateFreeIndex_descendants_of2 + updateFreeIndex_cte_wp_at + updateFreeIndex_caps_overlap_reserved + | simp add: descendants_of_null_filter' split del: if_split)+ + apply (wp get_cap_wp)+ + apply (subgoal_tac "word1 && ~~ mask pageBits = word1 \ pageBits \ word_bits \ word_size_bits \ pageBits") + prefer 2 + apply (clarsimp simp:bit_simps word_bits_def is_aligned_neg_mask_eq) + apply (simp only:delete_objects_rewrite) + apply wp+ + apply (clarsimp simp: conj_comms) + apply (clarsimp simp: conj_comms ex_disj_distrib + | strengthen invs_valid_pspace' invs_pspace_aligned' + invs_pspace_distinct')+ + apply (wp deleteObjects_invs'[where p="makePoolParent i'"] + deleteObjects_cte_wp_at' + deleteObjects_descendants[where p="makePoolParent i'"]) + apply (clarsimp split del: if_split simp:valid_cap'_def) + apply (wp hoare_vcg_ex_lift + deleteObjects_caps_no_overlap''[where slot="makePoolParent i'"] + deleteObject_no_overlap + deleteObjects_ct_active'[where cref="makePoolParent i'"]) + apply (clarsimp simp: is_simple_cap_def valid_cap'_def max_free_index_def is_cap_simps + cong: conj_cong) + apply (strengthen empty_descendants_range_in') + apply (wp deleteObjects_descendants[where p="makePoolParent i'"] + deleteObjects_cte_wp_at' + deleteObjects_null_filter[where p="makePoolParent i'"]) + apply (clarsimp simp:invs_mdb max_free_index_def invs_untyped_children) + apply (subgoal_tac "detype_locale x y sa" for x y) + prefer 2 + apply (simp add:detype_locale_def cte_wp_at_caps_of_state) + apply (thin_tac "caps_of_state s p = Some cap.NullCap" for s p) + apply (fastforce simp: cte_wp_at_caps_of_state descendants_range_def2 + empty_descendants_range_in invs_untyped_children) + apply (intro conjI) + apply (clarsimp) + apply (erule(1) caps_of_state_valid) + subgoal by (fastforce simp:cte_wp_at_caps_of_state descendants_range_def2 empty_descendants_range_in) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems by (clarsimp simp:invs_def valid_state_def)+ + apply (clarsimp simp: schact_is_rct_def) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (drule detype_locale.non_null_present) + apply (fastforce simp: cte_wp_at_caps_of_state) + apply simp + apply (frule_tac ptr = "(aa,ba)" in detype_invariants [rotated 3]) + apply fastforce + apply simp + apply (simp add: cte_wp_at_caps_of_state) + apply (simp add: is_cap_simps) + apply (simp add:empty_descendants_range_in descendants_range_def2) + apply (frule intvl_range_conv[where bits = pageBits]) + apply (clarsimp simp:pageBits_def word_bits_def) + apply (clarsimp simp: invs_valid_objs cte_wp_at_caps_of_state range_cover_full + invs_psp_aligned invs_distinct cap_master_cap_simps is_cap_simps + is_simple_cap_def) + apply (clarsimp simp: conj_comms) + apply (rule conjI, clarsimp simp: is_aligned_asid_low_bits_of_zero) + apply (frule ex_cte_cap_protects) + apply (simp add:cte_wp_at_caps_of_state) + apply (simp add:empty_descendants_range_in) + apply fastforce + apply (rule subset_refl) + apply fastforce + apply (clarsimp simp: is_simple_cap_arch_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp simp: clear_um_def) + apply (simp add:detype_clear_um_independent) + apply (rule conjI) + apply clarsimp + apply (drule_tac p = "(aa,ba)" in cap_refs_in_kernel_windowD2[OF caps_of_state_cteD]) + apply fastforce + apply (clarsimp simp: region_in_kernel_window_def valid_cap_def + cap_aligned_def is_aligned_neg_mask_eq detype_def clear_um_def) + apply fastforce + apply (rule conjI,erule caps_no_overlap_detype[OF descendants_range_caps_no_overlapI]) + apply (clarsimp simp:is_aligned_neg_mask_eq cte_wp_at_caps_of_state) + apply (simp add:empty_descendants_range_in)+ + apply (rule conjI, rule pspace_no_overlap_subset, + rule pspace_no_overlap_detype[OF caps_of_state_valid]) + apply (simp add:invs_psp_aligned invs_valid_objs is_aligned_neg_mask_eq)+ + apply (clarsimp simp: detype_def clear_um_def detype_ext_def valid_sched_def valid_etcbs_def + st_tcb_at_kh_def obj_at_kh_def st_tcb_at_def obj_at_def is_etcb_at_def + wrap_ext_det_ext_ext_def) + apply (simp add: detype_def clear_um_def) + apply (drule_tac x = "cte_map (aa,ba)" in pspace_relation_cte_wp_atI[OF state_relation_pspace_relation]) + apply (simp add:invs_valid_objs)+ + apply clarsimp + apply (drule cte_map_inj_eq) + apply ((fastforce simp:cte_wp_at_caps_of_state)+)[5] + apply (clarsimp simp:cte_wp_at_caps_of_state invs_valid_pspace' conj_comms cte_wp_at_ctes_of + valid_cap_simps') + apply (strengthen refl) + apply clarsimp + apply (frule empty_descendants_range_in') + apply (intro conjI, + simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 + null_filter_descendants_of'[OF null_filter_simp'] + capAligned_def asid_low_bits_def) + apply (erule descendants_range_caps_no_overlapI') + apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) + apply (simp add:empty_descendants_range_in') + apply (simp add:word_bits_def bit_simps) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) + apply (simp add:pageBits_def) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp: cte_wp_at_ctes_of) + apply assumption+ + apply fastforce + apply simp + apply clarsimp + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) + apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + apply fastforce + apply simp + apply clarsimp + done + +definition vcpu_invocation_map :: "vcpu_invocation \ vcpuinvocation" where + "vcpu_invocation_map vcpui \ case vcpui of + vcpu_invocation.VCPUSetTCB v t \ VCPUSetTCB v t + | vcpu_invocation.VCPUInjectIRQ obj n vreg \ VCPUInjectIRQ obj n vreg + | vcpu_invocation.VCPUReadRegister obj vreg \ VCPUReadRegister obj vreg + | vcpu_invocation.VCPUWriteRegister obj vreg word \ VCPUWriteRegister obj vreg word + | vcpu_invocation.VCPUAckVPPI obj irq \ VCPUAckVPPI obj irq" + +(* FIXME AARCH64: move to VSpace_R where page_table_invocation_map is *) +definition + "vspace_invocation_map vsi vsi' \ + case vsi of + AARCH64_A.VSpaceNothing \ vsi' = VSpaceNothing + | AARCH64_A.VSpaceFlush ty start end pstart space asid \ + vsi' = VSpaceFlush ty start end pstart space (ucast asid)" + +(* FIXME AARCH64: move to VSpace_R where valid_psi is *) +definition + "valid_vsi' vsi \ + case vsi of + VSpaceNothing \ \ + | VSpaceFlush ty start end pstart space asid \ \" + +definition + archinv_relation :: "arch_invocation \ Arch.invocation \ bool" +where + "archinv_relation ai ai' \ case ai of + arch_invocation.InvokeVSpace vsi \ + \vsi'. ai' = InvokeVSpace vsi' \ vspace_invocation_map vsi vsi' + | arch_invocation.InvokePageTable pti \ + \pti'. ai' = InvokePageTable pti' \ page_table_invocation_map pti pti' + | arch_invocation.InvokePage pgi \ + \pgi'. ai' = InvokePage pgi' \ page_invocation_map pgi pgi' + | arch_invocation.InvokeASIDControl aci \ + \aci'. ai' = InvokeASIDControl aci' \ aci' = asid_ci_map aci + | arch_invocation.InvokeASIDPool ap \ + \ap'. ai' = InvokeASIDPool ap' \ ap' = asid_pool_invocation_map ap + | arch_invocation.InvokeVCPU vcpui \ + \vcpui'. ai' = InvokeVCPU vcpui' \ vcpui' = vcpu_invocation_map vcpui" + +definition + valid_arch_inv' :: "Arch.invocation \ kernel_state \ bool" +where + "valid_arch_inv' ai \ case ai of + InvokeVSpace vsi \ valid_vsi' vsi + | InvokePageTable pti \ valid_pti' pti + | InvokePage pgi \ valid_page_inv' pgi + | InvokeASIDControl aci \ valid_aci' aci + | InvokeASIDPool ap \ valid_apinv' ap + | InvokeVCPU v \ valid_vcpuinv' v" + +lemma mask_vmrights_corres: + "maskVMRights (vmrights_map R) (rightsFromWord d) = + vmrights_map (mask_vm_rights R (data_to_rights d))" + by (clarsimp simp: rightsFromWord_def data_to_rights_def + vmrights_map_def Let_def maskVMRights_def + mask_vm_rights_def nth_ucast + validate_vm_rights_def vm_read_write_def + vm_kernel_only_def vm_read_only_def + split: bool.splits) + +lemma vm_attributes_corres: + "vmattributes_map (attribs_from_word w) = attribsFromWord w" + by (clarsimp simp: attribsFromWord_def attribs_from_word_def + Let_def vmattributes_map_def) + +lemma checkVPAlignment_corres: + "corres (ser \ dc) \ \ + (check_vp_alignment sz w) + (checkVPAlignment sz w)" + apply (simp add: check_vp_alignment_def checkVPAlignment_def) + apply (cases sz, simp_all add: corres_returnOk unlessE_whenE is_aligned_mask) + apply ((rule corres_guard_imp, rule corres_whenE, rule refl, auto)[1])+ + done + +lemma checkVP_wpR [wp]: + "\\s. vmsz_aligned w sz \ P () s\ + checkVPAlignment sz w \P\, -" + apply (simp add: checkVPAlignment_def unlessE_whenE cong: vmpage_size.case_cong) + apply (rule hoare_pre) + apply (wp whenE_wp|wpc)+ + apply (simp add: is_aligned_mask vmsz_aligned_def) + done + +lemma asidHighBits [simp]: + "asidHighBits = asid_high_bits" + by (simp add: asidHighBits_def asid_high_bits_def) + +declare word_unat_power [symmetric, simp del] + +lemma ARMMMU_improve_cases: + "(if isFrameCap cap then Q + else if isPageTableCap cap \ capPTType cap = NormalPT_T then R + else if isPageTableCap cap \ capPTType cap = VSRootPT_T then S + else if isASIDControlCap cap then T + else if isASIDPoolCap cap then U + else if isVCPUCap cap then V + else undefined) + = + (if isFrameCap cap then Q + else if isPageTableCap cap \ capPTType cap = NormalPT_T then R + else if isPageTableCap cap \ capPTType cap = VSRootPT_T then S + else if isASIDControlCap cap then T + else if isASIDPoolCap cap then U + else V)" + apply (cases cap; simp add: isCap_simps) + apply (rename_tac pt_t m) + apply (case_tac pt_t; simp) + done + +crunch inv[wp]: "AARCH64_H.decodeInvocation" "P" + (wp: crunch_wps mapME_x_inv_wp getASID_wp hoare_vcg_imp_lift' + simp: crunch_simps ARMMMU_improve_cases) + +lemma case_option_corresE: + assumes nonec: "corres r Pn Qn (nc >>=E f) (nc' >>=E g)" + and somec: "\v'. corres r (Ps v') (Qs v') (sc v' >>=E f) (sc' v' >>=E g)" + shows "corres r (case_option Pn Ps v) (case_option Qn Qs v) (case_option nc sc v >>=E f) (case_option nc' sc' v >>=E g)" + apply (cases v) + apply simp + apply (rule nonec) + apply simp + apply (rule somec) + done + + +lemma cap_relation_Untyped_eq: + "cap_relation c (UntypedCap d p sz f) = (c = cap.UntypedCap d p sz f)" + by (cases c) auto + +declare check_vp_alignment_inv[wp del] + +lemma select_ext_fa: + "free_asid_select asid_tbl \ S + \ ((select_ext (\_. free_asid_select asid_tbl) S) :: _ det_ext_monad) + = return (free_asid_select asid_tbl)" + by (simp add: select_ext_def get_def gets_def bind_def assert_def return_def fail_def) + +lemma select_ext_fap: + "free_asid_pool_select p b \ S + \ ((select_ext (\_. free_asid_pool_select p b) S) :: _ det_ext_monad) + = return (free_asid_pool_select p b)" + by (simp add: select_ext_def get_def gets_def bind_def assert_def return_def) + +lemmas vmsz_aligned_imp_aligned + = vmsz_aligned_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN is_aligned_weaken] + +lemma vmrights_map_vm_kernel_only[simp]: + "vmrights_map vm_kernel_only = VMKernelOnly" + by (simp add: vmrights_map_def vm_kernel_only_def) + +lemma not_in_vm_kernel_only[simp]: + "x \ vm_kernel_only" + by (simp add: vm_kernel_only_def) + +lemma vmrights_map_VMKernelOnly: + "vmrights_map (mask_vm_rights R r) = VMKernelOnly \ mask_vm_rights R r = vm_kernel_only" + by (auto simp: vmrights_map_def mask_vm_rights_def validate_vm_rights_def vm_read_write_def + vm_read_only_def split: if_splits) + +lemma vmrights_map_empty[simp]: + "vmrights_map {} = VMKernelOnly" + by (simp add: vmrights_map_def) + +lemma pte_relation_make_user[simp]: + "pte_relation' + (make_user_pte (addrFromPPtr p) + (attribs_from_word a) + (mask_vm_rights R (data_to_rights r)) + sz) + (makeUserPTE (addrFromPPtr p) + (maskVMRights (vmrights_map R) (rightsFromWord r)) + (attribsFromWord a) + sz)" + by (auto simp: make_user_pte_def makeUserPTE_def attribs_from_word_def + attribsFromWord_def mask_vmrights_corres) + +lemma below_user_vtop_in_user_region: + "p \ user_vtop \ p \ user_region" + by (simp add: user_region_def canonical_user_def user_vtop_def pptrUserTop_def bit_simps) + +lemma vmsz_aligned_user_region: + "\ vmsz_aligned p sz; p + mask (pageBitsForSize sz) \ user_vtop \ \ p \ user_region" + apply (simp add: vmsz_aligned_def) + apply (rule below_user_vtop_in_user_region) + apply (drule is_aligned_no_overflow_mask) + apply simp + done + +lemma checkVSpaceRoot_corres[corres]: + "\ cap_relation cap cap'; n' = n \ \ + corres (ser \ (\(pt, asid) (pt', asid'). pt' = pt \ asid' = ucast asid)) + \ \ + (check_vspace_root cap n) (checkVSpaceRoot cap' n')" + unfolding check_vspace_root_def checkVSpaceRoot_def + apply (corres_cases_both simp: cap_relation_def) (* takes a while, quadratic cap cases *) + apply (corres_cases_both simp: mdata_map_def)+ + apply (rule corres_trivial, rule corres_returnOk, simp) + apply clarsimp + apply clarsimp + done + +lemma labelToFlushType_corres: + "labelToFlushType l = label_to_flush_type l" + by (simp add: labelToFlushType_def label_to_flush_type_def + split: invocation_label.split arch_invocation_label.split) + +lemma decodeARMFrameInvocationFlush_corres[corres]: + "corres (ser \ archinv_relation) + (valid_vspace_objs and valid_asid_table and pspace_aligned and pspace_distinct and + K (\asid vref. opt = Some (asid, vref) \ 0 < asid)) + no_0_obj' + (decode_fr_inv_flush l args slot (arch_cap.FrameCap p R sz d opt) excaps) + (decodeARMFrameInvocationFlush l args (FrameCap p (vmrights_map R) sz d (mdata_map opt)))" + unfolding decode_fr_inv_flush_def decodeARMFrameInvocationFlush_def + apply (cases args; clarsimp) + apply (clarsimp simp: Let_def neq_Nil_conv) + apply (corres corres: corres_lookup_error findVSpaceForASID_corres corres_returnOkTT + term_simp: AARCH64_H.fromPAddr_def AARCH64.paddrTop_def AARCH64_H.paddrTop_def + AARCH64.pptrTop_def AARCH64_H.pptrTop_def + | corres_cases_both simp: mdata_map_def)+ + apply (fastforce simp: archinv_relation_def page_invocation_map_def mdata_map_def + labelToFlushType_corres) + apply wpsimp+ + done + +lemma valid_FrameCap_user_region_assert: + "\ s \ cap.ArchObjectCap (arch_cap.FrameCap p R sz d opt); + pspace_in_kernel_window s; valid_uses s; pspace_aligned s \ + \ pptrBase \ p \ p < pptrTop" + by (fastforce simp: kernel_window_range_def pptr_base_def AARCH64.pptrTop_def valid_cap_def + AARCH64_H.pptrTop_def obj_at_def + dest!: pspace_in_kw_bounded + split: if_splits) + +lemma decodeARMFrameInvocation_corres: + "\cap = arch_cap.FrameCap p R sz d opt; acap_relation cap cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap cap) and + cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_wp_at (\_. True) (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap cap') and + (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_wp_at' (\_. True) (snd x) s)) + (decode_frame_invocation l args slot cap excaps) + (decodeARMFrameInvocation l args (cte_map slot) cap' excaps')" + apply (simp add: decode_frame_invocation_def decodeARMFrameInvocation_def Let_def isCap_simps + split del: if_split) + apply (cases "invocation_type l = ArchInvocationLabel ARMPageMap") + apply (case_tac "\(2 < length args \ excaps \ [])") + apply (auto simp: decode_fr_inv_map_def split: list.split)[1] + apply (simp add: decode_fr_inv_map_def Let_def neq_Nil_conv) + apply (elim exE conjE) + apply (simp split: list.split, intro conjI impI allI, simp_all)[1] + apply (simp add: decodeARMFrameInvocationMap_def) + apply (corres corres: corres_lookup_error findVSpaceForASID_corres checkVPAlignment_corres + corres_assert_gen_asm + simp: assertE_liftE + term_simp: mask_def user_vtop_def + | corres_cases_both)+ + apply (simp add: mask_def user_vtop_def) + apply (corres corres: lookupPTSlot_corres[@lift_corres_args] + term_simp: lookup_failure_map_def + | corres_cases_both)+ + apply (rule corres_trivial, rule corres_returnOk) + apply (simp add: archinv_relation_def page_invocation_map_def mapping_map_def) + apply (wpsimp+)[3] + apply corres_cases_both + apply (corres simp: up_ucast_inj_eq) + apply (rule corres_trivial) + apply simp + apply (corres corres: lookupPTSlot_corres[@lift_corres_args]) + apply corres_cases_both + apply (corres term_simp: lookup_failure_map_def) + apply (rule corres_trivial) + apply (rule corres_returnOk) + apply (simp add: archinv_relation_def page_invocation_map_def mapping_map_def) + apply wpsimp+ + subgoal + by (fastforce simp: valid_cap_def wellformed_mapdata_def vmsz_aligned_user_region not_less + dest: valid_FrameCap_user_region_assert + intro: vspace_for_asid_vs_lookup) + apply clarsimp + \ \PageUnmap\ + apply (simp split del: if_split) + apply (cases "invocation_type l = ArchInvocationLabel ARMPageUnmap") + apply simp + apply (rule corres_returnOk) + apply (clarsimp simp: archinv_relation_def page_invocation_map_def) + \ \PageGetAddress\ + apply (cases "invocation_type l = ArchInvocationLabel ARMPageGetAddress") + apply simp + apply (rule corres_returnOk) + apply (clarsimp simp: archinv_relation_def page_invocation_map_def) + \ \isPageFlushLabel\ + apply (cases "isPageFlushLabel (invocation_type l)") + apply simp + apply (corres_cases_right; + corres_cases_right?; + (solves \rule corres_trivial, simp add: isPageFlushLabel_def\)?; + corres_cases_right?) + apply corres+ + apply (fastforce simp: valid_cap_def wellformed_mapdata_def) + apply fastforce + \ \error cases\ + apply (fastforce split: invocation_label.splits arch_invocation_label.splits + simp: isPageFlushLabel_def) + done + +lemma VMReadWrite_vmrights_map[simp]: "vmrights_map vm_read_write = VMReadWrite" + by (simp add: vmrights_map_def vm_read_write_def) + +lemma gets_vspace_for_asid_is_catch: + "gets (vspace_for_asid a) = ((liftME Some (find_vspace_for_asid a)) const (return None))" + apply (simp add: find_vspace_for_asid_def liftME_def liftE_bindE catch_def) + apply (rule ext) + apply (clarsimp simp: bind_def simpler_gets_def throw_opt_def bindE_def throwError_def return_def + returnOk_def + split: option.splits) + done + +lemma maybeVSpaceForASID_corres: + "a' = ucast a \ + corres (=) + (valid_vspace_objs and valid_asid_table and pspace_aligned and pspace_distinct + and K (0 < a)) + no_0_obj' + (gets (vspace_for_asid a)) (maybeVSpaceForASID a')" + apply (simp add: maybeVSpaceForASID_def gets_vspace_for_asid_is_catch) + apply (rule corres_guard_imp) + apply (rule corres_split_catch) + apply (simp add: o_def) + apply (rule findVSpaceForASID_corres, simp) + apply (rule corres_trivial, simp) + apply wpsimp+ + done + +(* FIXME AARCH64: move to ArchAcc_R *) +lemma pageBits_leq_table_size[simp, intro!]: + "pageBits \ table_size (pt_type pt)" + by (simp add: bit_simps) + +lemma decodeARMPageTableInvocation_corres: + "\cap = arch_cap.PageTableCap p pt_t opt; acap_relation cap cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap cap) and + cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_wp_at (\_. True) (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap cap') and + (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_wp_at' (\_. True) (snd x) s)) + (decode_page_table_invocation l args slot cap excaps) + (decodeARMPageTableInvocation l args (cte_map slot) cap' excaps')" + supply option.case_cong[cong] + apply (simp add: decode_page_table_invocation_def decodeARMPageTableInvocation_def Let_def + isCap_simps + split del: if_split) + \ \PageTableMap\ + apply (cases "invocation_type l = ArchInvocationLabel ARMPageTableMap") + apply (simp add: decode_pt_inv_map_def + split: invocation_label.split arch_invocation_label.splits split del: if_split) + apply (simp split: list.split, intro conjI impI allI, simp_all)[1] + apply (clarsimp simp: neq_Nil_conv Let_def decodeARMPageTableInvocationMap_def assertE_liftE) + apply (rule whenE_throwError_corres_initial; (fastforce simp: mdata_map_def)?) + apply (corres' \fastforce\ + term_simp: user_vtop_def + corres: corres_lookup_error findVSpaceForASID_corres + corres_assert_gen_asm + lookupPTSlot_corres[@lift_corres_args] + corres_returnOk[where P="pspace_aligned and pt_at pt_t p and + pspace_in_kernel_window and valid_uses" + and P'=\] + | corres_cases_both)+ + apply (clarsimp simp: archinv_relation_def page_table_invocation_map_def + ppn_from_pptr_def obj_at_def) + apply (frule (1) pspace_alignedD) + apply (rule kernel_window_addrFromPPtr[symmetric]) + apply (erule (3) pspace_in_kw_bounded) + apply (erule is_aligned_weaken) + apply simp + apply wpsimp+ + apply (prop_tac "pptrBase \ p \ p < pptrTop") + apply (clarsimp simp: valid_cap_def obj_at_def simp flip: pptr_base_def) + apply (fastforce dest: pspace_in_kw_bounded + simp: kernel_window_range_def pptrTop_def AARCH64.pptrTop_def) + apply (fastforce simp: valid_cap_def wellformed_mapdata_def below_user_vtop_in_user_region + not_less pt_lookup_slot_pte_at + intro!: vspace_for_asid_vs_lookup) + apply fastforce + \ \PageTableUnmap\ + apply (clarsimp simp: isCap_simps)+ + apply (cases "invocation_type l = ArchInvocationLabel ARMPageTableUnmap") + apply (clarsimp simp: unlessE_whenE liftE_bindE) + apply (rule stronger_corres_guard_imp) + apply (rule corres_symb_exec_r_conj) + apply (rule_tac F="isArchCap isPageTableCap (cteCap cteVal)" + in corres_gen_asm2) + apply (rule corres_split[OF isFinalCapability_corres[where ptr=slot]]) + apply (drule mp) + apply (clarsimp simp: isCap_simps final_matters'_def) + apply (rule whenE_throwError_corres; simp) + apply (rule option_corres) + apply (cases opt; simp add: mdata_map_def) + apply (rule corres_trivial, simp add: returnOk_def archinv_relation_def + page_table_invocation_map_def) + apply (cases opt, clarsimp simp: mdata_map_def) + apply (clarsimp simp: bind_bindE_assoc) + apply datatype_schem + apply (rule corres_trivial, simp add: returnOk_def archinv_relation_def + page_table_invocation_map_def) + apply (cases opt; simp add: mdata_map_def) + apply (simp | wp getCTE_wp' | wp (once) hoare_drop_imps)+ + apply (clarsimp) + apply (rule no_fail_pre, rule no_fail_getCTE) + apply (erule conjunct2) + apply (clarsimp simp: cte_wp_at_caps_of_state invs_vspace_objs + invs_valid_asid_table invs_psp_aligned invs_distinct) + apply (clarsimp simp: valid_cap_def wellformed_mapdata_def) + apply (clarsimp simp: cte_wp_at_ctes_of cap_rights_update_def acap_rights_update_def + cte_wp_at_caps_of_state) + apply (drule pspace_relation_ctes_ofI[OF _ caps_of_state_cteD, rotated], + erule invs_pspace_aligned', clarsimp+) + apply (simp add: isCap_simps invs_no_0_obj') + apply (simp add: isCap_simps split del: if_split) + by (clarsimp split: invocation_label.splits arch_invocation_label.splits) + +lemma list_all2_Cons: "list_all2 f (x#xs) b \ \y ys. b = y # ys" + by (induct b; simp) + +lemma corres_gets_numlistregs[corres]: + "corres (=) \ \ + (gets (arm_gicvcpu_numlistregs \ arch_state)) (gets (armKSGICVCPUNumListRegs \ ksArchState))" + by (clarsimp simp: state_relation_def arch_state_relation_def) + +theorem corres_throwError_ser[corres]: + "corres (ser \ r) (\_. b = syscall_error_map a) (\_. True) (throwError a) (throwError b)" + by simp + +lemmas corres_liftE_rel_sumI = corres_liftE_rel_sum[THEN iffD2] +lemmas corres_liftMI = corres_liftM_simp[THEN iffD2] +lemmas corres_liftM2I = corres_liftM2_simp[THEN iffD2] + +lemma get_vcpu_LR_corres[corres]: + "corres (r \ (\vcpu lr. vgic_lr (vcpu_vgic vcpu) = lr)) (vcpu_at v) (vcpu_at' v) + (liftE (get_vcpu v)) (liftE (liftM (vgicLR \ vcpuVGIC) (getObject v)))" + apply simp + apply (rule corres_rel_imp, rule getObject_vcpu_corres) + apply (rename_tac vcpu', case_tac vcpu') + apply (clarsimp simp: vcpu_relation_def vgic_map_def) + done + +lemma decodeARMVCPUInvocation_corres: + "\acap_relation arch_cap arch_cap'; list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps')\ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap arch_cap) + and (\s. \x\set excaps. s \ fst x \ cte_wp_at (\_. True) (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap arch_cap') + and (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_wp_at' (\_. True) (snd x) s)) + (decode_vcpu_invocation label args arch_cap excaps) + (decodeARMVCPUInvocation label args cptr' cte arch_cap' excaps')" + apply (simp add: decode_vcpu_invocation_def decodeARMVCPUInvocation_def) + apply (cases arch_cap; cases "invocation_type label"; simp add: isVCPUCap_def) + apply (rename_tac vcpui) + apply (case_tac vcpui; simp split del: if_split) + (* set_tcb *) + apply (simp add: decode_vcpu_set_tcb_def decodeVCPUSetTCB_def Let_def isVCPUCap_def) + apply (cases excaps; simp add: null_def) + apply (frule list_all2_Cons) + apply clarsimp + apply (case_tac a; clarsimp simp add: cap_relation_def) + apply (corresK corres: corres_returnOkTT) + apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def) + (* inject_irq *) + apply (simp add: decode_vcpu_inject_irq_def decodeVCPUInjectIRQ_def isVCPUCap_def) + apply (cases args; clarsimp) + apply (clarsimp simp add: rangeCheck_def range_check_def unlessE_whenE) + apply (clarsimp simp: shiftL_nat whenE_bindE_throwError_to_if) + apply (corresKsimp wp: get_vcpu_wp) + apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def + valid_cap'_def valid_cap_def isVIRQActive_def is_virq_active_def + virqType_def virq_type_def + make_virq_def makeVIRQ_def) + (* read register *) + apply (clarsimp simp: decode_vcpu_read_register_def decodeVCPUReadReg_def) + apply (cases args; clarsimp simp: isCap_simps whenE_def split: if_split) + apply (rule corres_returnOk) + apply (simp add: archinv_relation_def vcpu_invocation_map_def) + (* write register *) + apply (clarsimp simp: decode_vcpu_write_register_def decodeVCPUWriteReg_def) + apply (cases args; clarsimp simp: isCap_simps) + apply (case_tac list; clarsimp) + apply (rule corres_returnOk) + apply (simp add: archinv_relation_def vcpu_invocation_map_def) + (* ack vppi *) + apply (simp add: decode_vcpu_ack_vppi_def decodeVCPUAckVPPI_def isVCPUCap_def) + apply (cases args; clarsimp simp: isCap_simps) + apply (simp add: arch_check_irq_def rangeCheck_def ucast_nat_def minIRQ_def unlessE_def + word_le_not_less) + apply (case_tac "a > ucast maxIRQ"; simp add: ucast_nat_def word_le_not_less) + apply (clarsimp simp: irq_vppi_event_index_def irqVPPIEventIndex_def maxIRQ_def + word_le_not_less[symmetric] word_le_nat_alt) + apply (fastforce simp: archinv_relation_def vcpu_invocation_map_def ucast_nat_def IRQ_def + intro: corres_returnOk + split: if_splits) + done + +lemma lookupPTSlot_gets_corres[@lift_corres_args, corres]: + "corres (\fr (bits, b'). case fr of + Some (level, b) \ bits = pt_bits_left level \ b' = b + | _ \ False) + (pspace_aligned and pspace_distinct and valid_vspace_objs + and valid_asid_table and \\(max_pt_level,pt) + and K (vptr \ user_region)) + \ + (gets (pt_lookup_slot pt vptr \ ptes_of)) (lookupPTSlot pt vptr)" + apply (rule corres_rrel_pre) + apply (rule corres_gets_the_gets) + apply (rule lookupPTSlot_corres) + apply clarsimp + done + +lemma max_page_le_max_pt_level: + "max_page_level \ max_pt_level" + by (simp add: level_defs) + +lemma lookupFrame_corres[@lift_corres_args, corres]: + "corres (\fr fr'. case (fr, fr') of + (Some (vmsz, b), Some (bits, b')) \ bits = pageBitsForSize vmsz \ b' = b + | (None, None) \ True + | _ \ False) + (invs and \\ (max_pt_level, vspace) and K (vaddr \ user_region)) + \ + (gets (lookup_frame vspace vaddr \ ptes_of)) (lookupFrame vspace vaddr)" + unfolding lookup_frame_def lookupFrame_def + apply (simp add: gets_obind_bind_eq obind_comp_dist) + apply corres + apply corres_cases_left + apply (rule corres_trivial, simp) + apply corres_cases_right + apply (simp add: gets_obind_bind_eq prod_o_comp gets_prod_comp obind_comp_dist + cong: corres_weaker_cong) + apply corres_cases_left + apply (rename_tac level slot) + apply corres_split + apply (rule corres_gets_the_gets) + apply (simp add: gets_the_oapply2_comp cong: corres_weaker_cong) + apply corres + apply corres_cases_left + apply (rule corres_trivial, simp) + apply (rule corres_if_r') + apply (rename_tac pte) + apply (prop_tac "AARCH64_A.is_PagePTE pte") + apply (case_tac pte; simp add: isPagePTE_def) + apply (clarsimp cong: corres_weaker_cong) + apply (rename_tac slot level pte' pte) + apply (rule_tac F="AARCH64_A.is_PagePTE pte \ level \ max_page_level" in corres_gen_asm) (* FIXME AARCH64: 2 -> max_page_level in spec *) + apply (rule_tac P'="\s. valid_vspace_objs s \ valid_asid_table s \ pspace_aligned s \ + (\asid. vs_lookup_slot level asid vaddr s = Some (level, slot)) \ + vaddr \ user_region \ + ptes_of s level slot = Some pte" and Q'=\ + in corres_assert_gen_asm_cross) + apply (fold is_aligned_mask)[1] + apply (cut_tac max_page_le_max_pt_level) + apply (fastforce simp: AARCH64_A.is_PagePTE_def fromPAddr_def is_aligned_ptrFromPAddr_n_eq + dest!: data_at_aligned valid_vspace_objs_strong_slotD) + apply (rule corres_trivial) + apply (clarsimp simp: max_page_level_def AARCH64_A.is_PagePTE_def pte_base_addr_def) + apply (rule corres_inst[where P'=\]) + apply (rename_tac pte) + apply (prop_tac "\ (AARCH64_A.is_PagePTE pte)") + apply (case_tac pte; simp add: isPagePTE_def) + apply simp (* needs separate step to get ofail *) + apply (simp add: ofail_def) + apply (wpsimp wp: getPTE_wp)+ + apply (clarsimp simp: invs_implies invs_valid_asid_table) + apply (frule vs_lookup_table_asid_not_0, simp, assumption, fastforce) + apply (frule pt_lookup_slot_vs_lookup_slotI[rotated]) + apply (clarsimp simp: vspace_for_asid_def entry_for_asid_def vspace_for_pool_def in_omonad + vs_lookup_table_def word_neq_0_conv) + apply (erule conjI[rotated]) + apply fastforce + apply (fastforce simp: pte_at_def AARCH64_A.is_PagePTE_def dest: valid_vspace_objs_strong_slotD) + apply simp + done + +lemma data_at_is_frame_cap: + "\ caps_of_state s cref = Some cap; obj_refs cap = {p}; data_at pgsz p s; + valid_objs s \ \ + is_frame_cap cap \ cap_bits cap = pageBitsForSize pgsz" + apply (drule (1) caps_of_state_valid_cap) + apply (clarsimp simp: valid_cap_def obj_at_def data_at_def is_ep is_ntfn is_cap_table is_tcb + valid_arch_cap_ref_def + split: cap.splits arch_cap.splits option.splits if_splits) + done + +lemma decodeARMVSpaceInvocation_corres[corres]: + "\ cap = arch_cap.PageTableCap pt VSRootPT_T map_data; acap_relation cap cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap cap) and + cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_at (snd x) s)) + (invs' and valid_cap' (ArchObjectCap cap') and + (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_at' (snd x) s)) + (decode_vspace_invocation (mi_label mi) args slot cap excaps) + (decodeARMVSpaceInvocation (mi_label mi) args cap')" + unfolding decodeARMVSpaceInvocation_def decode_vspace_invocation_def + apply (clarsimp simp: Let_def isCap_simps split del: if_split) + apply (cases "isVSpaceFlushLabel (invocation_type (mi_label mi))"; simp) + apply (clarsimp simp: decode_vs_inv_flush_def split del: if_split) + apply (cases args; clarsimp) + apply (clarsimp simp: neq_Nil_conv) + apply (corres corres: corres_lookup_error findVSpaceForASID_corres corres_returnOkTT + simp: checkValidMappingSize_def + term_simp: archinv_relation_def vspace_invocation_map_def + | corres_cases_both)+ + apply clarsimp + apply (rename_tac pgsz paddr) + apply (rule_tac P'="\s. \level asid vref. + vs_lookup_target level asid vref s = Some (level, ptrFromPAddr paddr) \ + vref \ user_region \ data_at pgsz (ptrFromPAddr paddr) s \ + valid_vs_lookup s \ valid_objs s" and + Q'="pspace_aligned' and pspace_distinct' and valid_global_refs'" + in corres_stateAssert_r_cross) + apply clarsimp + apply (drule valid_vs_lookupD; simp) + apply clarsimp + apply (frule (3) data_at_is_frame_cap) + apply (fastforce dest!: valid_global_refsD_with_objSize pspace_relation_cte_wp_at[rotated] + caps_of_state_cteD + simp: is_frame_cap_eq cte_wp_at_ctes_of) + apply (corres corres: corres_returnOkTT + term_simp: archinv_relation_def vspace_invocation_map_def + labelToFlushType_corres page_base_def pageBase_def + pageBitsForSize_pt_bits_left) + apply wpsimp + apply (wpsimp wp: hoare_drop_imp) + apply wpsimp + apply (wpsimp wp: hoare_drop_imps) + apply wpsimp + apply wpsimp + apply (rename_tac arg0 arg1 args' s s') + apply clarsimp + apply (rule conjI, fastforce simp: valid_cap_def wellformed_mapdata_def)+ + apply clarsimp + apply (rule conjI, fastforce intro!: vspace_for_asid_vs_lookup) + apply (rule context_conjI, + fastforce simp: not_less user_vtop_def intro!: below_user_vtop_in_user_region) + apply (clarsimp simp: lookup_frame_def) + apply (rename_tac level p pte) + apply (drule (1) pt_lookup_slot_vs_lookup_slotI) + apply (rule_tac x=level in exI) + apply (rule_tac x=asid in exI) + apply (rule_tac x=arg0 in exI) + apply (simp add: vs_lookup_target_def obind_def) + apply (rule conjI, solves clarsimp) + apply (clarsimp simp: AARCH64_A.is_PagePTE_def pte_base_addr_def opt_map_red) + apply (fastforce dest!: valid_vspace_objs_strong_slotD) + apply fastforce + done + +lemma dom_ucast_eq: + "is_aligned y asid_low_bits \ + (- dom (\a::asid_low_index. map_option abs_asid_entry (p (ucast a :: machine_word))) \ + {x. ucast x + (y::AARCH64_A.asid) \ 0} = {}) = + (- dom p \ {x. x \ 2 ^ asid_low_bits - 1 \ x + ucast y \ 0} = {})" + apply safe + apply clarsimp + apply (rule ccontr) + apply (erule_tac x="ucast x" in in_emptyE) + apply (clarsimp simp: p2_low_bits_max) + apply (rule conjI) + apply (clarsimp simp: ucast_ucast_mask) + apply (subst (asm) less_mask_eq) + apply (rule word_less_sub_le [THEN iffD1]) + apply (simp add: word_bits_def) + apply (simp add: asid_low_bits_def) + apply simp + apply (clarsimp simp: mask_2pm1[symmetric] ucast_ucast_mask2 is_down is_aligned_mask) + apply (frule and_mask_eq_iff_le_mask[THEN iffD2]) + apply (simp add: asid_low_bits_def) + apply (erule notE) + apply (subst word_plus_and_or_coroll) + apply word_eqI_solve + apply (subst (asm) word_plus_and_or_coroll; word_bitwise, clarsimp simp: word_size) + apply (clarsimp simp: p2_low_bits_max) + apply (rule ccontr) + apply simp + apply (erule_tac x="ucast x" in in_emptyE) + apply clarsimp + apply (rule conjI, blast) + apply (rule conjI) + apply (rule word_less_sub_1) + apply (rule order_less_le_trans) + apply (rule ucast_less, simp) + apply (simp add: asid_low_bits_def) + apply clarsimp + apply (erule notE) + apply (simp add: is_aligned_mask asid_low_bits_def) + apply (subst word_plus_and_or_coroll) + apply word_eqI_solve + apply (subst (asm) word_plus_and_or_coroll) + apply (word_bitwise, clarsimp simp: word_size) + apply (word_bitwise) + done + +lemma assocs_map_option: + "assocs (\x. map_option f (pool x)) = map (\(x,y). (x, map_option f y)) (assocs pool)" + by (simp add: assocs_def) + +lemma fst_hd_map_eq: + "xs \ [] \ fst (hd (map (\p. (fst p, f (snd p))) xs)) = fst (hd xs)" + by (induct xs; simp) + +lemma assocs_dom_comp_split: + "set (map fst (filter (\x. P (fst x) \ snd x = None) (assocs f))) = (- dom f \ Collect P)" + apply (clarsimp simp: in_assocs_is_fun) + apply (rule set_eqI) + apply clarsimp + apply (rule iffI, clarsimp) + apply (erule conjE) + apply (drule not_in_domD) + apply (rule_tac x="(x,None)" in image_eqI) + apply simp + apply simp + done + +lemma arch_decodeInvocation_corres: + "\ acap_relation arch_cap arch_cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap arch_cap) and + cte_wp_at ((=) (cap.ArchObjectCap arch_cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_at (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap arch_cap') and + (\s. \x\set excaps'. s \' fst x \ cte_at' (snd x) s)) + (arch_decode_invocation (mi_label mi) args (to_bl cptr') slot arch_cap excaps) + (Arch.decodeInvocation (mi_label mi) args cptr' (cte_map slot) arch_cap' excaps')" + (* FIXME: check_vp_inv shadowed check_vp_wpR. Instead, + check_vp_wpR should probably be generalised to replace check_vp_inv. *) + supply check_vp_inv[wp del] check_vp_wpR[wp] + apply (simp add: arch_decode_invocation_def + AARCH64_H.decodeInvocation_def + decodeARMMMUInvocation_def + split del: if_split) + apply (cases arch_cap) + \ \ASIDPoolCap\ + apply (simp add: isCap_simps decodeARMMMUInvocation_def decode_asid_pool_invocation_def + decodeARMASIDPoolInvocation_def Let_def + split del: if_split) + apply (cases "invocation_type (mi_label mi) \ ArchInvocationLabel ARMASIDPoolAssign") + apply (simp split: invocation_label.split arch_invocation_label.split) + apply (rename_tac ap asid) + apply (cases "excaps", simp) + apply (cases "excaps'", simp) + apply clarsimp + apply (rename_tac excap0 exslot0 excaps0 excap0' exslot0' excaps0') + apply (case_tac excap0; simp) + apply (rename_tac exarch_cap) + apply (case_tac exarch_cap; simp) + apply (rename_tac pt pt_t map_data) + apply (case_tac "map_data \ None") + apply (clarsimp simp add: mdata_map_def split: pt_type.splits) + apply clarsimp + apply (case_tac pt_t; simp add: mdata_map_def isVTableRoot_def cong: pt_type.case_cong) + apply (corres term_simp: lookup_failure_map_def) + apply (rule_tac F="is_aligned asid asid_low_bits" in corres_gen_asm) + apply (corres' \fastforce\ simp: liftME_def bind_bindE_assoc) + apply (clarsimp simp: asid_pool_relation_def) + apply (subst conj_assoc [symmetric]) + apply (subst assocs_empty_dom_comp [symmetric]) + apply (case_tac rv, simp) + apply (clarsimp simp: o_def dom_ucast_eq) + apply (frule dom_hd_assocsD) + apply (simp add: select_ext_fap[simplified free_asid_pool_select_def] + free_asid_pool_select_def cong: corres_weaker_cong) + apply (simp add: returnOk_liftE[symmetric]) + apply (rule corres_returnOkTT) + apply (simp add: archinv_relation_def asid_pool_invocation_map_def) + apply (case_tac rv, simp add: asid_pool_relation_def) + apply (subst ucast_fst_hd_assocs) + apply (clarsimp simp: o_def dom_map_option) + apply simp + apply (simp add: o_def assocs_map_option filter_map split_def) + apply (subst fst_hd_map_eq; simp?) + apply (clarsimp simp: dom_map_option) + apply (drule arg_cong[where f="map fst" and y="[]"]) + apply (drule arg_cong[where f=set and y="map fst []"]) + apply (subst (asm) assocs_dom_comp_split) + apply (clarsimp simp: split_def) + apply wpsimp+ + apply (fastforce simp: valid_cap_def) + apply simp + \ \ASIDControlCap\ + apply (simp add: isCap_simps decodeARMMMUInvocation_def decode_asid_control_invocation_def + Let_def decodeARMASIDControlInvocation_def + split del: if_split) + apply (cases "invocation_type (mi_label mi) \ ArchInvocationLabel ARMASIDControlMakePool") + apply (simp split: invocation_label.split arch_invocation_label.split) + apply (subgoal_tac "length excaps' = length excaps") + prefer 2 + apply (simp add: list_all2_iff) + apply (cases args, simp) + apply (rename_tac a0 as) + apply (case_tac as, simp) + apply (rename_tac a1 as') + apply (cases excaps, simp) + apply (rename_tac excap0 exs) + apply (case_tac exs) + apply (auto split: list.split)[1] + apply (rename_tac excap1 exss) + apply (case_tac excap0) + apply (rename_tac c0 slot0) + apply (case_tac excap1) + apply (rename_tac c1 slot1) + apply (clarsimp simp: Let_def split del: if_split) + apply (cases excaps', simp) + apply (case_tac list, simp) + apply (rename_tac c0' exs' c1' exss') + apply (clarsimp split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[where r'="\p p'. p = p' o ucast"]) + apply (rule corres_trivial) + apply (clarsimp simp: state_relation_def arch_state_relation_def) + apply (rule corres_splitEE) + apply (rule corres_whenE) + apply (subst assocs_empty_dom_comp [symmetric]) + apply (simp add: o_def) + apply (rule dom_ucast_eq_8) + apply (rule corres_trivial, simp, simp) + apply (simp split del: if_split) + apply (rule_tac F="- dom (asidTable \ ucast) \ {x. x \ 2 ^ asid_high_bits - 1} \ {}" in corres_gen_asm) + apply (drule dom_hd_assocsD) + apply (simp add: select_ext_fa[simplified free_asid_select_def] + free_asid_select_def o_def returnOk_liftE[symmetric] split del: if_split) + apply (thin_tac "fst a \ b \ P" for a b P) + apply (case_tac "isUntypedCap a \ capBlockSize a = objBits (makeObject::asidpool) \ \ capIsDevice a") + prefer 2 + apply (rule corres_guard_imp) + apply (rule corres_trivial) + apply (case_tac ad; simp add: isCap_simps split del: if_split) + apply (case_tac x21; simp split del: if_split) + apply (clarsimp simp: objBits_simps split del: if_split) + apply clarsimp + apply (rule TrueI)+ + apply (clarsimp simp: isCap_simps cap_relation_Untyped_eq lookupTargetSlot_def + objBits_simps bindE_assoc split_def) + apply (rule corres_splitEE) + apply (rule ensureNoChildren_corres, rule refl) + apply (rule corres_splitEE) + apply (erule lookupSlotForCNodeOp_corres, rule refl) + apply (rule corres_splitEE) + apply (rule ensureEmptySlot_corres) + apply clarsimp + apply (rule corres_returnOk[where P="\"]) + apply (clarsimp simp add: archinv_relation_def asid_ci_map_def split_def) + apply (clarsimp simp add: ucast_assocs[unfolded o_def] split_def + filter_map asid_high_bits_def) + apply (simp add: ord_le_eq_trans [OF word_n1_ge]) + apply (wp hoare_drop_imps)+ + apply (simp add: o_def validE_R_def) + apply clarsimp + (* for some reason it takes significantly longer if we don't split off the first conjuncts *) + apply (rule conjI, fastforce)+ + apply (fastforce simp: asid_high_bits_def) + apply clarsimp + apply (simp add: null_def split_def asid_high_bits_def word_le_make_less) + apply (subst hd_map, assumption) + (* need abstract guard to show list nonempty *) + apply (simp add: word_le_make_less) + apply (simp add: ucast_ucast_mask2 is_down) + apply (frule hd_in_set) + apply clarsimp + apply (prop_tac "\x::machine_word. x < 2^asid_high_bits \ x && mask asid_high_bits = x") + apply (clarsimp simp: and_mask_eq_iff_le_mask le_mask_iff_lt_2n[THEN iffD1] asid_high_bits_def) + apply (simp add: asid_high_bits_def) + apply (erule allE, erule (1) impE) + apply (simp add: ucast_shiftl) + apply (subst ucast_ucast_len) + apply (drule hd_in_set) + apply (rule shiftl_less_t2n; simp add: asid_low_bits_def) + apply (fastforce) + + \ \FrameCap\ + apply (rename_tac word cap_rights vmpage_size option) + apply (simp add: isCap_simps decodeARMMMUInvocation_def Let_def split del: if_split) + apply (rule decodeARMFrameInvocation_corres; simp) + + \ \PageTableCap\ + apply (rename_tac pt_t map_data) + apply (simp add: isCap_simps decodeARMMMUInvocation_def Let_def split del: if_split) + apply (case_tac pt_t; clarsimp simp: isCap_simps) + apply (rule decodeARMVSpaceInvocation_corres; simp) + apply (rule decodeARMPageTableInvocation_corres; simp) + + \ \VCPU\ + apply (simp add: isCap_simps acap_relation_def) + apply (rule corres_guard_imp[OF decodeARMVCPUInvocation_corres]; simp) + done + +lemma invokeVCPUInjectIRQ_corres: + "corres (=) (vcpu_at v and pspace_distinct and pspace_aligned) \ + (do y \ invoke_vcpu_inject_irq v index virq; + return [] + od) + (invokeVCPUInjectIRQ v index virq)" + unfolding invokeVCPUInjectIRQ_def invoke_vcpu_inject_irq_def + supply corres_machine_op_Id_eq[corres_term del] + apply (corres corres: corres_machine_op_Id_dc simp: bind_assoc) + apply (fastforce dest: vcpu_at_cross) + done + +lemma invokeVCPUReadReg_corres: + "corres (=) (vcpu_at v and pspace_distinct and pspace_aligned) (no_0_obj') + (invoke_vcpu_read_register v r) + (invokeVCPUReadReg v r)" + unfolding invoke_vcpu_read_register_def invokeVCPUReadReg_def read_vcpu_register_def readVCPUReg_def + apply (rule corres_discard_r) + apply (corres simp: bind_assoc | corres_cases_both)+ + apply (fastforce dest: vcpu_at_cross) + apply (wpsimp simp: getCurThread_def)+ + done + +lemma invokeVCPUWriteReg_corres: + "corres (=) (vcpu_at vcpu and pspace_distinct and pspace_aligned) (no_0_obj') + (do y \ invoke_vcpu_write_register vcpu r v; + return [] + od) + (invokeVCPUWriteReg vcpu r v)" + unfolding invokeVCPUWriteReg_def invoke_vcpu_write_register_def write_vcpu_register_def + writeVCPUReg_def + apply (rule corres_discard_r) + apply (corres simp: bind_assoc | corres_cases_both)+ + apply (fastforce dest: vcpu_at_cross) + apply wpsimp+ + done + +lemma archThreadSet_VCPU_Some_corres[corres]: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set (tcb_vcpu_update (\_. Some v)) t) (archThreadSet (atcbVCPUPtr_update (\_. Some v)) t)" + apply (rule archThreadSet_corres) + apply (simp add: arch_tcb_relation_def) + done + +crunches dissociateVCPUTCB + for no_0_obj'[wp]: no_0_obj' + and ksCurThread[wp]: "\s. P (ksCurThread s)" + (simp: crunch_simps wp: crunch_wps) + +lemma vcpuSwitch_corres'': + "vcpu' = vcpu + \ corres dc (\s. (vcpu \ None \ vcpu_at (the vcpu) s) \ valid_arch_state s) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (vcpu_switch vcpu) + (vcpuSwitch vcpu')" + apply (corres corres: vcpuSwitch_corres') + apply (clarsimp simp: valid_arch_state_def is_vcpu_def obj_at_def cur_vcpu_def in_omonad) + apply fastforce + done + +lemma associateVCPUTCB_corres: + "corres (=) (invs and vcpu_at v and tcb_at t) invs' + (do y \ associate_vcpu_tcb v t; + return [] + od) + (associateVCPUTCB v t)" + unfolding associate_vcpu_tcb_def associateVCPUTCB_def + apply (corres simp: bind_assoc term_simp: vcpu_relation_def + corres: getObject_vcpu_corres setObject_VCPU_corres vcpuSwitch_corres'' + wp: hoare_drop_imps get_vcpu_wp getVCPU_wp + | corres_cases_both simp: vcpu_relation_def)+ + apply (rule_tac Q="\_. invs and tcb_at t" in hoare_strengthen_post) + apply wp + apply clarsimp + apply (rule conjI) + apply (frule (1) sym_refs_vcpu_tcb, fastforce) + apply (clarsimp simp: obj_at_def in_omonad) + apply (fastforce simp: obj_at_def in_omonad) + apply wpsimp+ + apply (rule_tac Q="\_. invs' and tcb_at' t and vcpu_at' v" in hoare_strengthen_post) + apply wpsimp + apply fastforce + apply (wpsimp wp: arch_thread_get_wp archThreadGet_wp)+ + apply (clarsimp simp: invs_implies) + apply (rule conjI; clarsimp) + apply (frule (1) sym_refs_vcpu_tcb, fastforce) + apply (clarsimp simp: obj_at_def in_omonad) + apply (frule (1) sym_refs_tcb_vcpu, fastforce) + apply (clarsimp simp: obj_at_def) + apply clarsimp + apply (fastforce dest: vcpu_at_cross tcb_at_cross) + done + +lemma invokeVCPUAckVPPI_corres: + "corres (=) (vcpu_at vcpu and pspace_distinct and pspace_aligned) \ + (do y \ invoke_vcpu_ack_vppi vcpu vppi; + return [] + od) + (invokeVCPUAckVPPI vcpu vppi)" + unfolding invokeVCPUAckVPPI_def invoke_vcpu_ack_vppi_def write_vcpu_register_def + writeVCPUReg_def + by (corresKsimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) + (auto simp: vcpu_relation_def dest: vcpu_at_cross split: option.splits) + +lemma performARMVCPUInvocation_corres: + notes inv_corres = invokeVCPUInjectIRQ_corres invokeVCPUReadReg_corres + invokeVCPUWriteReg_corres associateVCPUTCB_corres + invokeVCPUAckVPPI_corres + shows "corres (=) (einvs and ct_active and valid_vcpu_invocation iv) + (invs' and ct_active' and valid_vcpuinv' (vcpu_invocation_map iv)) + (perform_vcpu_invocation iv) (performARMVCPUInvocation (vcpu_invocation_map iv))" + unfolding perform_vcpu_invocation_def performARMVCPUInvocation_def + apply (cases iv; simp add: vcpu_invocation_map_def valid_vcpu_invocation_def valid_vcpuinv'_def) + apply (rule inv_corres [THEN corres_guard_imp]; simp add: invs_no_0_obj' invs_implies)+ + done + +lemma arch_performInvocation_corres: + "archinv_relation ai ai' \ + corres (dc \ (=)) + (einvs and ct_active and valid_arch_inv ai and schact_is_rct) + (invs' and ct_active' and valid_arch_inv' ai') + (arch_perform_invocation ai) (Arch.performInvocation ai')" + apply (clarsimp simp: arch_perform_invocation_def + AARCH64_H.performInvocation_def + performARMMMUInvocation_def) + apply (clarsimp simp: archinv_relation_def) + apply (cases ai) + + \ \InvokeVSpace\ + apply (clarsimp simp: performARMMMUInvocation_def perform_vspace_invocation_def + performVSpaceInvocation_def) + apply ((corres simp: perform_flush_def do_flush_def doFlush_def + corres: corres_machine_op_Id_dc + term_simp: vspace_invocation_map_def + | corres_cases_both simp: vspace_invocation_map_def)+)[1] + + \ \InvokePageTable\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp, rule corres_split_nor) + apply (rule performPageTableInvocation_corres; wpsimp) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + + \ \InvokePage\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp) + apply (rule performPageInvocation_corres) + apply (simp add: page_invocation_map_def) + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + + \ \InvokeASIDControl\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule performASIDControlInvocation_corres; wpsimp) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + apply (clarsimp simp: archinv_relation_def) + + \ \InvokeASIDPool\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule performASIDPoolInvocation_corres; wpsimp) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + + \ \InvokeVCPU\ + apply (clarsimp simp: archinv_relation_def) + apply (rule corres_guard_imp[OF performARMVCPUInvocation_corres]; + clarsimp simp: valid_arch_inv_def valid_arch_inv'_def)+ + done + +lemma asid_pool_typ_at_ext': + "asid_pool_at' = obj_at' (\::asidpool \ bool)" + apply (rule ext)+ + apply (simp add: typ_at_to_obj_at_arches) + done + +lemma st_tcb_strg': + "st_tcb_at' P p s \ tcb_at' p s" + by (auto simp: pred_tcb_at') + +lemma performASIDControlInvocation_tcb_at': + "\st_tcb_at' active' p and invs' and ct_active' and valid_aci' aci\ + performASIDControlInvocation aci + \\y. tcb_at' p\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) + apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ + apply (clarsimp simp: projectKO_opts_defs) + apply (strengthen st_tcb_strg' [where P=\]) + apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] + hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp + updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ + apply (case_tac ctea) + apply (clarsimp) + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs')+ + apply (clarsimp simp:valid_cap'_def capAligned_def cte_wp_at_ctes_of) + apply (strengthen refl order_refl + pred_tcb'_weakenE[mk_strg I E]) + apply (clarsimp simp: conj_comms invs_valid_pspace' isCap_simps + descendants_range'_def2 empty_descendants_range_in') + apply (frule ctes_of_valid', clarsimp, simp, + drule capFreeIndex_update_valid_cap'[where fb="2 ^ pageBits", rotated -1], + simp_all) + apply (simp add: pageBits_def is_aligned_def untypedBits_defs) + apply (simp add: valid_cap_simps' range_cover_def objBits_simps untypedBits_defs + capAligned_def unat_eq_0 and_mask_eq_iff_shiftr_0[symmetric] + word_bw_assocs) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range, + fastforce simp add: cte_wp_at_ctes_of, assumption, simp_all) + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + apply clarsimp + done + +crunches performVSpaceInvocation, performARMVCPUInvocation + for tcb_at'[wp]: "\s. tcb_at' p s" + +lemma invokeArch_tcb_at': + "\invs' and valid_arch_inv' ai and ct_active' and st_tcb_at' active' p\ + Arch.performInvocation ai + \\rv. tcb_at' p\" + apply (simp add: AARCH64_H.performInvocation_def performARMMMUInvocation_def) + apply (wpsimp simp: performARMMMUInvocation_def pred_tcb_at' valid_arch_inv'_def + wp: performASIDControlInvocation_tcb_at') + done + +crunch pspace_no_overlap'[wp]: setThreadState "pspace_no_overlap' w s" + (simp: unless_def) + +lemma sts_cte_cap_to'[wp]: + "\ex_cte_cap_to' p\ setThreadState st t \\rv. ex_cte_cap_to' p\" + by (wp ex_cte_cap_to'_pres) + + +lemma sts_valid_arch_inv': (* FIXME AARCH64 cleanup *) + "\valid_arch_inv' ai\ setThreadState st t \\rv. valid_arch_inv' ai\" + apply (cases ai, simp_all add: valid_arch_inv'_def) + apply (clarsimp simp: valid_vsi'_def split: vspace_invocation.splits) + apply (rule conjI|clarsimp|wpsimp)+ + apply (clarsimp simp: valid_pti'_def split: page_table_invocation.splits) + apply (rule conjI|clarsimp|wpsimp)+ + apply (rename_tac page_invocation) + apply (case_tac page_invocation, simp_all add: valid_page_inv'_def)[1] + apply ((wp|simp)+)[2] + apply (clarsimp simp: isCap_simps pred_conj_def) + apply wpsimp + apply wpsimp + apply (clarsimp simp: valid_aci'_def split: asidcontrol_invocation.splits) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule hoare_pre, wp) + apply clarsimp + apply (clarsimp simp: valid_apinv'_def split: asidpool_invocation.splits) + apply (rule hoare_pre, wp) + apply simp + apply (rename_tac vcpui) + apply (case_tac vcpui; wpsimp simp: valid_vcpuinv'_def) + done + +lemma inv_ASIDPool: + "inv ASIDPool = (\v. case v of ASIDPool a \ a)" + by (rule ext) + (simp split: asidpool.splits) + +lemma eq_arch_update': + "ArchObjectCap cp = cteCap cte \ is_arch_update' (ArchObjectCap cp) cte" + by (clarsimp simp: is_arch_update'_def isCap_simps) + +lemma decodeARMFrameInvocationFlush_valid_arch_inv'[wp]: + "\\\ + decodeARMFrameInvocationFlush label args (FrameCap word vmrights vmpage_size d option) + \valid_arch_inv'\, -" + unfolding decodeARMFrameInvocationFlush_def + by (wpsimp simp: valid_arch_inv'_def valid_page_inv'_def cong: if_cong) + +lemma decodeARMFrameInvocationMap_valid_arch_inv'[wp]: + "\invs' and valid_cap' (ArchObjectCap (FrameCap word vmrights vmpage_size d option)) and + cte_wp_at' ((=) (ArchObjectCap (FrameCap word vmrights vmpage_size d option)) \ cteCap) slot and + valid_cap' vspaceCap\ + decodeARMFrameInvocationMap slot (FrameCap word vmrights vmpage_size d option) + vptr rightsMask attr vspaceCap + \valid_arch_inv'\, -" + unfolding valid_arch_inv'_def decodeARMFrameInvocationMap_def + supply checkVPAlignment_inv[wp del] checkVP_wpR[wp] + apply (wpsimp wp: lookupPTSlot_inv getASID_wp + simp: checkVSpaceRoot_def if_apply_def2 valid_page_inv'_def valid_cap'_def + capAligned_def + split_del: if_split cong: if_cong + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule_tac t="cteCap cte" in sym) + apply (clarsimp simp: valid_cap'_def wellformed_mapdata'_def is_arch_update'_def capAligned_def + isCap_simps not_less makeUserPTE_def isPagePTE_def) + apply (fastforce simp: wellformed_mapdata'_def vmsz_aligned_user_region user_vtop_def mask_def) + done + +lemma decode_page_inv_wf[wp]: + "cap = (arch_capability.FrameCap word vmrights vmpage_size d option) \ + \invs' and valid_cap' (capability.ArchObjectCap cap ) and + cte_wp_at' ((=) (capability.ArchObjectCap cap) \ cteCap) slot and + (\s. \x\set excaps. cte_wp_at' ((=) (fst x) \ cteCap) (snd x) s) and + sch_act_simple\ + decodeARMFrameInvocation label args slot cap excaps + \valid_arch_inv'\, -" + apply (simp add: decodeARMFrameInvocation_def Let_def isCap_simps + cong: if_cong split del: if_split) + apply (wpsimp simp: valid_arch_inv'_def valid_page_inv'_def) + apply (clarsimp simp: isCap_simps cte_wp_at_ctes_of is_arch_update'_def) + apply (drule_tac t="cteCap _" in sym)+ + apply clarsimp + apply (drule ctes_of_valid', fastforce)+ + apply clarsimp + done + +lemma below_pptrUserTop_in_user_region: + "p \ pptrUserTop \ p \ user_region" + apply (simp add: user_region_def canonical_user_def pptrUserTop_def) + apply (simp add: bit_simps is_aligned_mask) + done + +lemma checkVSpaceRoot_wp[wp]: + "\\s. \vspace asid x. cap = ArchObjectCap (PageTableCap vspace VSRootPT_T (Some (asid, x))) \ + Q (vspace, asid) s\ + checkVSpaceRoot cap n + \Q\, -" + unfolding checkVSpaceRoot_def + by wpsimp + +lemma phys_canonical_in_kernel_window: + "\ pptrBase \ p; p < pptrTop \ \ canonical_address (addrFromPPtr p >> pageBits)" + apply (simp add: addrFromPPtr_def pptrBaseOffset_def paddrBase_def canonical_address_mask_eq + canonical_bit_def pptrBase_def pageBits_def pptrTop_def) + by word_bitwise clarsimp + +lemma decode_page_table_inv_wf[wp]: + "arch_cap = PageTableCap word pt_t option \ + \invs' and valid_cap' (capability.ArchObjectCap arch_cap) and + cte_wp_at' ((=) (capability.ArchObjectCap arch_cap) \ cteCap) slot and + (\s. \x\set excaps. cte_wp_at' ((=) (fst x) \ cteCap) (snd x) s) and + sch_act_simple\ + decodeARMPageTableInvocation label args slot arch_cap excaps + \valid_arch_inv'\, - " + supply if_cong[cong] if_split [split del] + apply (clarsimp simp: decodeARMPageTableInvocation_def Let_def isCap_simps) + apply (wpsimp simp: decodeARMPageTableInvocationMap_def valid_arch_inv'_def valid_pti'_def + maybeVSpaceForASID_def o_def if_apply_def2 + wp: getPTE_wp hoare_vcg_all_lift hoare_vcg_const_imp_lift + lookupPTSlot_inv isFinalCapability_inv + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: not_le isCap_simps cte_wp_at_ctes_of eq_arch_update') + apply (drule_tac t="cteCap cte" in sym) + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: is_arch_update'_def isCap_simps + split: if_split) + apply (drule_tac t="cteCap ctea" in sym) + apply (drule ctes_of_valid', fastforce)+ + apply (clarsimp simp: valid_cap'_def) + apply (simp add: wellformed_mapdata'_def below_pptrUserTop_in_user_region neg_mask_user_region + phys_canonical_in_kernel_window) + done + +lemma capMaster_isPageTableCap: + "capMasterCap cap' = capMasterCap cap \ + isArchCap isPageTableCap cap' = isArchCap isPageTableCap cap" + by (simp add: capMasterCap_def isArchCap_def isPageTableCap_def + split: capability.splits arch_capability.splits) + +lemma decodeARMVCPUInvocation_valid_arch_inv'[wp]: + "\invs' and valid_cap' (ArchObjectCap (VCPUCap vcpu)) and + cte_wp_at' ((=) (ArchObjectCap (VCPUCap vcpu)) \ cteCap) slot and + (\s. \x\set excaps. cte_wp_at' ((=) (fst x) \ cteCap) (snd x) s) and + (\s. \x\set excaps. \r\cte_refs' (fst x) (irq_node' s). ex_cte_cap_wp_to' (\_. True) r s) and + (\s. \x\set excaps. valid_cap' (fst x) s) and + sch_act_simple\ + decodeARMVCPUInvocation label args cap_index slot (VCPUCap vcpu) excaps + \valid_arch_inv'\, -" + unfolding decodeARMVCPUInvocation_def + apply (wpsimp simp: decodeVCPUSetTCB_def decodeVCPUInjectIRQ_def Let_def decodeVCPUReadReg_def + decodeVCPUWriteReg_def decodeVCPUAckVPPI_def + wp: getVCPU_wp + split_del: if_split) + apply (clarsimp simp: valid_arch_inv'_def valid_vcpuinv'_def isCap_simps null_def neq_Nil_conv) + apply (rename_tac t_slot excaps0 t) + apply (rule conjI) + apply (clarsimp simp: valid_cap'_def) + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rule conjI) + apply (drule_tac t="cteCap cte" for cte in sym) + apply fastforce + apply (rename_tac tcb_cte) + apply (drule_tac t="cteCap tcb_cte" in sym) + apply clarsimp + apply (rule_tac x=t_slot in exI) + apply fastforce + done + +lemma decodeARMVSpaceInvocation_valid_arch_inv'[wp]: + "\\\ + decodeARMVSpaceInvocation label args (PageTableCap vspace VSRootPT_T map_data) + \valid_arch_inv'\, -" + unfolding decodeARMVSpaceInvocation_def + by (wpsimp simp: Let_def valid_arch_inv'_def valid_vsi'_def + cong: if_cong + split_del: if_split) + +lemma arch_decodeInvocation_wf[wp]: + shows "\invs' and valid_cap' (ArchObjectCap arch_cap) and + cte_wp_at' ((=) (ArchObjectCap arch_cap) o cteCap) slot and + (\s. \x \ set excaps. cte_wp_at' ((=) (fst x) o cteCap) (snd x) s) and + (\s. \x \ set excaps. \r \ cte_refs' (fst x) (irq_node' s). ex_cte_cap_to' r s) and + (\s. \x \ set excaps. s \' fst x) and + sch_act_simple\ + Arch.decodeInvocation label args cap_index slot arch_cap excaps + \valid_arch_inv'\,-" + apply (cases arch_cap) + apply (simp add: decodeARMMMUInvocation_def AARCH64_H.decodeInvocation_def + Let_def split_def isCap_simps decodeARMASIDControlInvocation_def + cong: if_cong invocation_label.case_cong arch_invocation_label.case_cong list.case_cong prod.case_cong + split del: if_split) + apply (rule hoare_pre) + apply ((wp whenE_throwError_wp ensureEmptySlot_stronger| + wpc| + simp add: valid_arch_inv'_def valid_aci'_def is_aligned_shiftl_self + split del: if_split)+)[1] + apply (rule_tac Q'= + "\rv. K (fst (hd [p\assocs asidTable . fst p \ 2 ^ asid_high_bits - 1 \ snd p = None]) + << asid_low_bits \ 2 ^ asid_bits - 1) and + real_cte_at' rv and + ex_cte_cap_to' rv and + cte_wp_at' (\cte. \idx. cteCap cte = (UntypedCap False frame pageBits idx)) (snd (excaps!0)) and + sch_act_simple and + (\s. descendants_of' (snd (excaps!0)) (ctes_of s) = {}) " + in hoare_strengthen_postE_R) + apply (simp add: lookupTargetSlot_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of asid_wf_def mask_def) + apply (simp split del: if_split) + apply (wp ensureNoChildren_sp whenE_throwError_wp|wpc)+ + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: null_def neq_Nil_conv) + apply (drule filter_eq_ConsD) + apply clarsimp + apply (rule shiftl_less_t2n) + apply (simp add: asid_bits_def asid_low_bits_def asid_high_bits_def) + apply unat_arith + apply (simp add: asid_bits_def) + apply clarsimp + apply (rule conjI, fastforce) + apply (clarsimp simp: cte_wp_at_ctes_of objBits_simps) + + \ \ASIDPool cap\ + apply (simp add: decodeARMMMUInvocation_def AARCH64_H.decodeInvocation_def + Let_def split_def isCap_simps decodeARMASIDPoolInvocation_def + cong: if_cong split del: if_split) + apply (wpsimp simp: valid_arch_inv'_def valid_apinv'_def wp: getASID_wp cong: if_cong) + apply (clarsimp simp: word_neq_0_conv valid_cap'_def valid_arch_inv'_def valid_apinv'_def) + apply (rule conjI) + apply (erule cte_wp_at_weakenE') + apply (simp, drule_tac t="cteCap c" in sym, simp add: isCap_simps) + apply (subst (asm) conj_assoc [symmetric]) + apply (subst (asm) assocs_empty_dom_comp [symmetric]) + apply (drule dom_hd_assocsD) + apply (simp add: capAligned_def asid_wf_def mask_def) + apply (elim conjE) + apply (subst field_simps, erule is_aligned_add_less_t2n) + apply assumption + apply (simp add: asid_low_bits_def asid_bits_def) + apply assumption + + \ \PageCap\ + apply (simp add: decodeARMMMUInvocation_def isCap_simps AARCH64_H.decodeInvocation_def + cong: if_cong split del: if_split) + apply (wp decode_page_inv_wf, rule refl) + apply clarsimp + + \ \PageTableCap\ + apply (simp add: decodeARMMMUInvocation_def isCap_simps AARCH64_H.decodeInvocation_def + cong: if_cong split del: if_split) + apply (rename_tac pt_t map_data) + apply (case_tac pt_t; clarsimp) + apply wp + apply (wp decode_page_table_inv_wf, rule refl) + apply clarsimp + + \ \VCPUCap\ + apply (clarsimp simp: AARCH64_H.decodeInvocation_def) + apply wp + done + +crunch nosch[wp]: setMRs "\s. P (ksSchedulerAction s)" + (ignore: getRestartPC setRegister transferCapsToSlots + wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +crunch nosch [wp]: performARMMMUInvocation "\s. P (ksSchedulerAction s)" + (simp: crunch_simps + wp: crunch_wps getObject_cte_inv getASID_wp) + +lemmas setObject_cte_st_tcb_at' [wp] = setCTE_pred_tcb_at' [unfolded setCTE_def] + +crunch st_tcb_at': performPageTableInvocation, + performPageInvocation, + performASIDPoolInvocation "st_tcb_at' P t" + (wp: crunch_wps getASID_wp getObject_cte_inv simp: crunch_simps pteAtIndex_def) + +lemma performASIDControlInvocation_st_tcb_at': + "\st_tcb_at' (P and (\) Inactive and (\) IdleThreadState) t and + valid_aci' aci and invs' and ct_active'\ + performASIDControlInvocation aci + \\y. st_tcb_at' P t\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) + apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) + apply (rule hoare_pre) + apply (wp createObjects_orig_obj_at'[where P="P \ tcbState", folded st_tcb_at'_def] + updateFreeIndex_pspace_no_overlap' getSlotCap_wp + hoare_vcg_ex_lift + deleteObjects_cte_wp_at' deleteObjects_invs_derivatives + deleteObjects_st_tcb_at' + hoare_weak_lift_imp + | simp add: placeNewObject_def2)+ + apply (case_tac ctea) + apply (clarsimp) + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs')+ + apply (clarsimp simp:valid_cap'_def capAligned_def cte_wp_at_ctes_of) + apply (rule conjI) + apply clarsimp + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) + apply assumption+ + subgoal by (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + subgoal by fastforce + apply simp + apply (rule conjI,assumption) + apply (clarsimp simp:invs_valid_pspace' objBits_simps range_cover_full descendants_range'_def2 + isCap_simps) + apply (intro conjI) + apply (fastforce simp:empty_descendants_range_in')+ + apply clarsimp + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) + apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + apply fastforce + apply simp + apply auto + done + +lemmas arch_finalise_cap_aligned' = ArchRetypeDecls_H_AARCH64_H_finaliseCap_aligned' + +lemmas arch_finalise_cap_distinct' = ArchRetypeDecls_H_AARCH64_H_finaliseCap_distinct' + +crunch st_tcb_at' [wp]: "Arch.finaliseCap" "st_tcb_at' P t" + (wp: crunch_wps getASID_wp simp: crunch_simps) + +lemma archThreadSet_ex_nonz_cap_to'[wp]: + "archThreadSet f t \ex_nonz_cap_to' v\" + unfolding ex_nonz_cap_to'_def cte_wp_at_ctes_of by wp + +lemma assoc_invs': + "\invs' and + ko_at' (vcpu\vcpuTCBPtr:= None\) v and + obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) t and + ex_nonz_cap_to' v and ex_nonz_cap_to' t\ + do y \ archThreadSet (atcbVCPUPtr_update (\_. Some v)) t; + setObject v (vcpuTCBPtr_update (\_. Some t) vcpu) + od + \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_tcb_valid_objs setObject_vcpu_valid_objs' + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift sym_heap_sched_pointers_lift + setVCPU_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb valid_arch_tcb'_def + | wp (once) hoare_vcg_imp_lift)+ + apply (rule conjI) + apply (clarsimp simp: typ_at_to_obj_at_arches obj_at'_def) + apply (rule conjI) + apply (clarsimp simp: valid_vcpu'_def obj_at'_def objBits_simps) + apply (rule conjI) + apply (clarsimp simp: typ_at_tcb' obj_at'_def) + apply (rule_tac rfs'="state_hyp_refs_of' s" in delta_sym_refs, assumption) + supply fun_upd_apply[simp] + apply (clarsimp simp: hyp_live'_def arch_live'_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def tcb_vcpu_refs'_def + split: option.splits if_split_asm) + apply (clarsimp simp: hyp_live'_def arch_live'_def) + done + +lemma asUser_obj_at_vcpu[wp]: + "\obj_at' (P :: vcpu \ bool) t\ + asUser t' f + \\rv. obj_at' P t\" + apply (simp add: asUser_def threadGet_stateAssert_gets_asUser) + apply (wpsimp wp: threadSet_ko_wp_at2' simp: obj_at'_real_def) + done + +lemma archThreadSet_obj_at'_vcpu[wp]: + "archThreadSet f t \obj_at' (P::vcpu \ bool) p\" + unfolding archThreadSet_def + by (wpsimp wp: obj_at_setObject2 simp: updateObject_default_def in_monad) + +lemma asUser_atcbVCPUPtr[wp]: + "asUser t' f \obj_at' (\t. P (atcbVCPUPtr (tcbArch t))) t\" + unfolding asUser_def threadGet_stateAssert_gets_asUser + by (wpsimp simp: asUser_fetch_def obj_at'_def atcbContextGet_def atcbContextSet_def) + +lemma dissociateVCPUTCB_no_vcpu[wp]: + "\\s. t \ t' \ obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) t s\ + dissociateVCPUTCB vcpu t' \\rv. obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) t\" + unfolding dissociateVCPUTCB_def + by (wpsimp wp: getVCPU_wp setObject_tcb_strongest simp: archThreadSet_def archThreadGet_def) + +lemma dissociateVCPUTCB_no_tcb[wp]: + "\ko_at' v vcpu\ dissociateVCPUTCB vcpu tcb \\rv. ko_at' (vcpuTCBPtr_update Map.empty v) vcpu\" + unfolding dissociateVCPUTCB_def + apply (wpsimp wp: obj_at_setObject3 getVCPU_wp + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def archThreadGet_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma dissociateVCPUTCB_ex_nonz_cap_to'[wp]: + "dissociateVCPUTCB v' t \ex_nonz_cap_to' v\" + unfolding ex_nonz_cap_to'_def cte_wp_at_ctes_of by wp + +lemma vcpuTCBPtr_update_Some_vcpu_live[wp]: + "\if vcpuPtr = vcpuPtr' + then ko_wp_at' is_vcpu' vcpuPtr + else ko_wp_at' (is_vcpu' and hyp_live') vcpuPtr\ + setObject vcpuPtr' (vcpuTCBPtr_update (\_. Some tcbPtr) vcpu) + \\_. ko_wp_at' (is_vcpu' and hyp_live') vcpuPtr\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + by (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def hyp_live'_def + arch_live'_def + split: if_splits) + +lemma vcpuTCBPtr_update_Some_valid_arch_state'[wp]: + "setObject vcpuPtr (vcpuTCBPtr_update (\_. Some tptr) vcpu) \valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift + | rule hoare_lift_Pf[where f=ksArchState]) + by (auto simp: pred_conj_def o_def ko_wp_at'_def) + +definition associateVCPUTCB_helper where + "associateVCPUTCB_helper vcpu v t = do + y \ archThreadSet (atcbVCPUPtr_update (\_. Some v)) t; + setObject v (vcpuTCBPtr_update (\_. Some t) vcpu) + od" + +lemma associateVCPUTCB_invs'[wp]: + "\invs' and ex_nonz_cap_to' vcpu and ex_nonz_cap_to' tcb and vcpu_at' vcpu\ + associateVCPUTCB vcpu tcb + \\_. invs'\" + apply (clarsimp simp: associateVCPUTCB_def) + apply (subst bind_assoc[symmetric], fold associateVCPUTCB_helper_def) + apply wpsimp + apply (rule_tac Q="\_ s. invs' s \ ko_wp_at' (is_vcpu' and hyp_live') vcpu s" in hoare_post_imp) + apply simp + apply (rule hoare_vcg_conj_lift) + apply (wpsimp wp: assoc_invs'[folded associateVCPUTCB_helper_def]) + apply (clarsimp simp: associateVCPUTCB_helper_def) + apply (wpsimp simp: vcpu_at_is_vcpu'[symmetric])+ + apply (wpsimp wp: getVCPU_wp) + apply (rule_tac Q="\_. invs' and obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) tcb and + ex_nonz_cap_to' vcpu and ex_nonz_cap_to' tcb and vcpu_at' vcpu" + in hoare_strengthen_post) + apply wpsimp + apply (clarsimp simp: obj_at'_def) + apply (rename_tac v obj) + apply (case_tac v, simp) + apply (wpsimp wp: getObject_tcb_wp simp: archThreadGet_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma invokeVCPUInjectIRQ_invs'[wp]: + "invokeVCPUInjectIRQ v ir idx \invs'\" + unfolding invokeVCPUInjectIRQ_def + apply (wpsimp wp: dmo_invs' + simp: set_gic_vcpu_ctrl_lr_def machine_op_lift_def machine_rest_lift_def) + apply (clarsimp simp: in_monad select_f_def) + done + +lemma invokeVCPUAckVPPI_invs'[wp]: + "invokeVCPUAckVPPI vcpu_ptr irq \invs'\" + unfolding invokeVCPUAckVPPI_def + by (wpsimp wp: dmo_invs' setVCPU_VPPIMasked_invs' + simp: set_gic_vcpu_ctrl_lr_def machine_op_lift_def machine_rest_lift_def vcpuUpdate_def) + +lemma invokeVCPUReadReg_inv[wp]: + "invokeVCPUReadReg vcpu r \P\" + unfolding invokeVCPUReadReg_def readVCPUReg_def vcpuReadReg_def + by (wpsimp wp: dmo_inv' simp: readVCPUHardwareReg_def getSCTLR_def) + +lemma invokeVCPUWriteReg_invs'[wp]: + "invokeVCPUWriteReg vcpu r v \invs'\" + unfolding invokeVCPUWriteReg_def writeVCPUReg_def vcpuWriteReg_def vcpuUpdate_def + by (wpsimp wp: dmo_machine_op_lift_invs' setVCPU_regs_invs') + +lemma performARMVCPUInvocation_invs'[wp]: + "\invs' and valid_vcpuinv' i\ performARMVCPUInvocation i \\_. invs'\" + unfolding performARMVCPUInvocation_def valid_vcpuinv'_def by wpsimp + + +lemma invs_asid_table_strengthen': + "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ + invs' (s\ksArchState := + armKSASIDTable_update (\_. ((armKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" + apply (clarsimp simp: invs'_def valid_state'_def) + apply (rule conjI) + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (clarsimp simp: valid_arch_state'_def) + apply (clarsimp simp: valid_asid_table'_def ran_def mask_def) + apply (rule conjI) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: mask_def) + apply (rule conjI) + apply (clarsimp simp: valid_pspace'_def) + apply (simp add: valid_machine_state'_def split: option.splits prod.splits) + done + +lemma ex_cte_not_in_untyped_range: + "\(ctes_of s) cref = Some (CTE (capability.UntypedCap d ptr bits idx) mnode); + descendants_of' cref (ctes_of s) = {}; invs' s; + ex_cte_cap_wp_to' (\_. True) x s; valid_global_refs' s\ + \ x \ mask_range ptr bits" + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp:cte_wp_at_ctes_of)+ + done + +lemma makeObject_ASIDPool_not_live[simp]: + "\ (live' (KOArch (KOASIDPool makeObject)))" + by (simp add: makeObject_asidpool live'_def hyp_live'_def arch_live'_def) + +lemma performASIDControlInvocation_invs' [wp]: + "\invs' and ct_active' and valid_aci' aci\ + performASIDControlInvocation aci + \\y. invs'\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: performASIDControlInvocation_def valid_aci'_def + placeNewObject_def2 cte_wp_at_ctes_of + split: asidcontrol_invocation.splits) + apply (rename_tac w1 w2 w3 w4 cte ctea idx) + apply (case_tac ctea) + apply (clarsimp) + apply (frule ctes_of_valid_cap') + apply fastforce + apply (rule hoare_pre) + apply (wp hoare_vcg_const_imp_lift) + apply (strengthen invs_asid_table_strengthen') + apply (wp cteInsert_simple_invs) + apply (wp createObjects'_wp_subst[OF + createObjects_no_cte_invs[where sz = pageBits and ty="Inl (KOArch (KOASIDPool pool))" for pool]] + createObjects_orig_cte_wp_at'[where sz = pageBits] hoare_vcg_const_imp_lift + |simp add: makeObjectKO_def asid_pool_typ_at_ext' valid_cap'_def cong: rev_conj_cong + |strengthen safe_parent_strg'[where idx= "2^ pageBits"])+ + apply (rule hoare_vcg_conj_lift) + apply (rule descendants_of'_helper) + apply (wp createObjects_null_filter' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool ap))" for ap] + createObjects_valid_pspace' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool ap))" for ap] + | simp add: makeObjectKO_def asid_pool_typ_at_ext' valid_cap'_def + cong: rev_conj_cong)+ + apply (simp add: objBits_simps valid_cap'_def capAligned_def range_cover_full) + apply (wp createObjects'_wp_subst[OF createObjects_ex_cte_cap_to[where sz = pageBits]] + createObjects_orig_cte_wp_at'[where sz = pageBits] + hoare_vcg_const_imp_lift + |simp add: makeObjectKO_def asid_pool_typ_at_ext' valid_cap'_def + isCap_simps canonical_address_and + cong: rev_conj_cong + |strengthen safe_parent_strg'[where idx = "2^ pageBits"] + | simp add: bit_simps)+ + apply (simp add:asid_pool_typ_at_ext'[symmetric]) + apply (wp createObject_typ_at') + apply (simp add: objBits_simps valid_cap'_def + capAligned_def range_cover_full makeObjectKO_def + asid_pool_typ_at_ext' + cong: rev_conj_cong) + apply (clarsimp simp:conj_comms + descendants_of_null_filter' + | strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_pspace_aligned' invs_valid_pspace')+ + apply (wp updateFreeIndex_forward_invs' + updateFreeIndex_cte_wp_at + updateFreeIndex_pspace_no_overlap' + updateFreeIndex_caps_no_overlap'' + updateFreeIndex_descendants_of2 + updateFreeIndex_caps_overlap_reserved + updateCap_cte_wp_at_cases hoare_weak_lift_imp + getSlotCap_wp)+ + apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask + | strengthen invs_valid_pspace' invs_pspace_aligned' + invs_pspace_distinct' empty_descendants_range_in')+ + apply (wp deleteObjects_invs'[where p="makePoolParent aci"] + hoare_vcg_ex_lift + deleteObjects_caps_no_overlap''[where slot="makePoolParent aci"] + deleteObject_no_overlap + deleteObjects_cap_to'[where p="makePoolParent aci"] + deleteObjects_ct_active'[where cref="makePoolParent aci"] + deleteObjects_descendants[where p="makePoolParent aci"] + deleteObjects_cte_wp_at' + deleteObjects_null_filter[where p="makePoolParent aci"]) + apply (frule valid_capAligned) + apply (clarsimp simp: invs_mdb' invs_valid_pspace' capAligned_def + cte_wp_at_ctes_of is_simple_cap'_def isCap_simps) + apply (strengthen refl ctes_of_valid_cap'[mk_strg I E]) + apply (clarsimp simp: conj_comms invs_valid_objs') + apply (frule_tac ptr="w1" in descendants_range_caps_no_overlapI'[where sz = pageBits]) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (simp add:empty_descendants_range_in') + apply (frule(1) if_unsafe_then_capD'[OF _ invs_unsafe_then_cap',rotated]) + apply (fastforce simp:cte_wp_at_ctes_of) + apply (drule ex_cte_not_in_untyped_range[rotated -2]) + apply (simp add:invs_valid_global')+ + apply (drule ex_cte_not_in_untyped_range[rotated -2]) + apply (simp add:invs_valid_global')+ + apply (subgoal_tac "is_aligned (2 ^ pageBits) minUntypedSizeBits") + prefer 2 + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1, simplified]) + apply (simp add: pageBits_def untypedBits_defs) + apply (frule_tac cte="CTE (capability.UntypedCap False a b c) m" for a b c m in valid_global_refsD', clarsimp) + apply (simp add: Int_commute) + by (auto simp:empty_descendants_range_in' objBits_simps max_free_index_def + asid_low_bits_def word_bits_def + range_cover_full descendants_range'_def2 is_aligned_mask + null_filter_descendants_of'[OF null_filter_simp'] bit_simps + valid_cap_simps' mask_def) + +lemma performVSpaceInvocation_invs[wp]: + "performVSpaceInvocation vspace \invs'\" + unfolding performVSpaceInvocation_def + by wpsimp + +lemma arch_performInvocation_invs': + "\invs' and ct_active' and valid_arch_inv' invocation\ + Arch.performInvocation invocation + \\rv. invs'\" + unfolding AARCH64_H.performInvocation_def + apply (cases invocation; clarsimp simp: performARMMMUInvocation_def valid_arch_inv'_def) + apply wpsimp+ + done + +end + +end diff --git a/proof/refine/AARCH64/Bits_R.thy b/proof/refine/AARCH64/Bits_R.thy new file mode 100644 index 0000000000..8fd5064679 --- /dev/null +++ b/proof/refine/AARCH64/Bits_R.thy @@ -0,0 +1,464 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Bits_R +imports Corres +begin + +crunch_ignore (add: + bind withoutFailure throw catchFailure rethrowFailure capFaultOnFailure lookupErrorOnFailure + nullCapOnFailure nothingOnFailure withoutPreemption preemptionPoint maskInterrupt unifyFailure + ignoreFailure emptyOnFailure clearMemoryVM assertDerived + setObject getObject updateObject loadObject) + +context Arch +begin + +crunch_ignore (add: lookupPTSlotFromLevel lookupPTFromLevel) + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma throwE_R: "\\\ throw f \P\,-" + by (simp add: validE_R_def) wp + +lemma withoutFailure_wp [wp]: + "\P\ f \Q\ \ \P\ withoutFailure f \Q\,\E\" + "\P\ f \Q\ \ \P\ withoutFailure f \Q\,-" + "\\\ withoutFailure f -,\E\" + by (auto simp: validE_R_def validE_E_def valid_def) + +lemma no_fail_typeError [simp, wp]: + "no_fail \ (typeError xs ko)" + by (simp add: typeError_def) + +lemma isCap_simps: + "isZombie v = (\v0 v1 v2. v = Zombie v0 v1 v2)" + "isArchObjectCap v = (\v0. v = ArchObjectCap v0)" + "isThreadCap v = (\v0. v = ThreadCap v0)" + "isCNodeCap v = (\v0 v1 v2 v3. v = CNodeCap v0 v1 v2 v3)" + "isNotificationCap v = (\v0 v1 v2 v3. v = NotificationCap v0 v1 v2 v3)" + "isEndpointCap v = (\v0 v1 v2 v3 v4 v5. v = EndpointCap v0 v1 v2 v3 v4 v5)" + "isUntypedCap v = (\d v0 v1 f. v = UntypedCap d v0 v1 f)" + "isReplyCap v = (\v0 v1 v2. v = ReplyCap v0 v1 v2)" + "isIRQControlCap v = (v = IRQControlCap)" + "isIRQHandlerCap v = (\v0. v = IRQHandlerCap v0)" + "isNullCap v = (v = NullCap)" + "isDomainCap v = (v = DomainCap)" + "isFrameCap w = (\v0 v1 v2 v3 v4. w = FrameCap v0 v1 v2 v3 v4)" + "isArchFrameCap v = (\v0 v1 v2 v3 v4. v = ArchObjectCap (FrameCap v0 v1 v2 v3 v4))" + "isPageTableCap w = (\v0 v1 v2. w = PageTableCap v0 v1 v2)" + "isASIDControlCap w = (w = ASIDControlCap)" + "isASIDPoolCap w = (\v0 v1. w = ASIDPoolCap v0 v1)" + "isVCPUCap w = (\v. w = VCPUCap v)" + by (auto simp: isCap_defs split: capability.splits arch_capability.splits) + +lemma untyped_not_null [simp]: + "\ isUntypedCap NullCap" by (simp add: isCap_simps) + +text \Miscellaneous facts about low level constructs\ + +lemma projectKO_tcb: + "(projectKO_opt ko = Some t) = (ko = KOTCB t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma tcb_of'_TCB[simp]: + "tcb_of' (KOTCB tcb) = Some tcb" + by (simp add: projectKO_tcb) + +lemma projectKO_cte: + "(projectKO_opt ko = Some t) = (ko = KOCTE t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma projectKO_ep: + "(projectKO_opt ko = Some t) = (ko = KOEndpoint t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma projectKO_ntfn: + "(projectKO_opt ko = Some t) = (ko = KONotification t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma projectKO_ASID: + "(projectKO_opt ko = Some t) = (ko = KOArch (KOASIDPool t))" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_PTE: + "(projectKO_opt ko = Some t) = (ko = KOArch (KOPTE t))" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_user_data: + "(projectKO_opt ko = Some (t :: user_data)) = (ko = KOUserData)" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_user_data_device: + "(projectKO_opt ko = Some (t :: user_data_device)) = (ko = KOUserDataDevice)" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_VCPU: + "(projectKO_opt ko = Some t) = (ko = KOArch (KOVCPU t))" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemmas projectKOs[simp] = + projectKO_ntfn projectKO_ep projectKO_cte projectKO_tcb + projectKO_ASID projectKO_PTE projectKO_user_data projectKO_user_data_device projectKO_VCPU + projectKO_eq projectKO_eq2 + +lemma capAligned_epI: + "ep_at' p s \ capAligned (EndpointCap p a b c d e)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def) + apply (drule ko_wp_at_norm) + apply clarsimp + apply (drule ko_wp_at_aligned) + apply (simp add: objBits_simps capUntypedPtr_def isCap_simps objBits_defs) + done + +lemma capAligned_ntfnI: + "ntfn_at' p s \ capAligned (NotificationCap p a b c)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def capUntypedPtr_def isCap_simps) + apply (fastforce dest: ko_wp_at_norm + dest!: ko_wp_at_aligned simp: objBits_simps') + done + +lemma capAligned_tcbI: + "tcb_at' p s \ capAligned (ThreadCap p)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def capUntypedPtr_def isCap_simps) + apply (fastforce dest: ko_wp_at_norm + dest!: ko_wp_at_aligned simp: objBits_simps') + done + +lemma capAligned_reply_tcbI: + "tcb_at' p s \ capAligned (ReplyCap p m r)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def capUntypedPtr_def isCap_simps) + apply (fastforce dest: ko_wp_at_norm + dest!: ko_wp_at_aligned simp: objBits_simps') + done + +lemma ko_at_valid_objs': + assumes ko: "ko_at' k p s" + assumes vo: "valid_objs' s" + assumes k: "\ko. projectKO_opt ko = Some k \ injectKO k = ko" + shows "valid_obj' (injectKO k) s" using ko vo + by (clarsimp simp: valid_objs'_def obj_at'_def project_inject ranI) + +lemma obj_at_valid_objs': + "\ obj_at' P p s; valid_objs' s \ \ + \k. P k \ + ((\ko. projectKO_opt ko = Some k \ injectKO k = ko) + \ valid_obj' (injectKO k) s)" + apply (drule obj_at_ko_at') + apply clarsimp + apply (rule_tac x=ko in exI) + apply clarsimp + apply (erule (1) ko_at_valid_objs') + apply simp + done + +lemma tcb_in_valid_state': + "\ st_tcb_at' P t s; valid_objs' s \ \ \st. P st \ valid_tcb_state' st s" + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_valid_objs') + apply fastforce + apply (fastforce simp add: valid_obj'_def valid_tcb'_def) + done + +lemma getCurThread_corres: "corres (=) \ \ (gets cur_thread) getCurThread" + by (simp add: getCurThread_def curthread_relation) + +lemma gct_wp [wp]: "\\s. P (ksCurThread s) s\ getCurThread \P\" + by (unfold getCurThread_def, wp) + +lemma getIdleThread_corres: + "corres (=) \ \ (gets idle_thread) getIdleThread" + by (simp add: getIdleThread_def state_relation_def) + +lemma git_wp [wp]: "\\s. P (ksIdleThread s) s\ getIdleThread \P\" + by (unfold getIdleThread_def, wp) + +lemma gsa_wp [wp]: "\\s. P (ksSchedulerAction s) s\ getSchedulerAction \P\" + by (unfold getSchedulerAction_def, wp) + +text \Shorthand names for the relations between faults, errors and failures\ + +definition + fr :: "ExceptionTypes_A.fault \ Fault_H.fault \ bool" +where + fr_def[simp]: + "fr x y \ (y = fault_map x)" + +definition + ser :: "ExceptionTypes_A.syscall_error \ Fault_H.syscall_error \ bool" +where + ser_def[simp]: + "ser x y \ (y = syscall_error_map x)" + +definition + lfr :: "ExceptionTypes_A.lookup_failure \ Fault_H.lookup_failure \ bool" +where + lfr_def[simp]: + "lfr x y \ (y = lookup_failure_map x)" + +text \Correspondence and weakest precondition + rules for the "on failure" transformers\ + +lemma corres_injection: + assumes x: "t = injection_handler fn" + assumes y: "t' = injection_handler fn'" + assumes z: "\ft ft'. f' ft ft' \ f (fn ft) (fn' ft')" + shows "corres (f' \ r) P P' m m' + \ corres (f \ r) P P' (t m) (t' m')" + apply (simp add: injection_handler_def handleE'_def x y) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply assumption + apply (case_tac v, (clarsimp simp: z)+) + apply (rule wp_post_taut) + apply (rule wp_post_taut) + apply simp + apply simp + done + +lemma rethrowFailure_injection: + "rethrowFailure = injection_handler" + by (intro ext, simp add: rethrowFailure_def injection_handler_def o_def) + +lemma capFault_injection: + "capFaultOnFailure addr b = injection_handler (Fault_H.CapFault addr b)" + apply (rule ext) + apply (simp add: capFaultOnFailure_def rethrowFailure_injection) + done + +lemma lookupError_injection: + "lookupErrorOnFailure b = injection_handler (Fault_H.FailedLookup b)" + apply (rule ext) + apply (simp add: lookupErrorOnFailure_def rethrowFailure_injection) + done + +lemma corres_cap_fault: + "corres (lfr \ r) P P' f g \ + corres (fr \ r) P P' (cap_fault_on_failure addr b f) + (capFaultOnFailure addr b g)" + by (fastforce intro: corres_injection[where f'=lfr] + simp: cap_fault_injection capFault_injection) + +lemmas capFault_wp[wp] = injection_wp[OF capFault_injection] +lemmas capFault_wp_E[wp] = injection_wp_E[OF capFault_injection] + +lemmas capFault_bindE = injection_bindE[OF capFault_injection capFault_injection] + +lemmas capFault_liftE[simp] = injection_liftE[OF capFault_injection] + +lemma corres_lookup_error: + "\ corres (lfr \ r) P P' f g \ + \ corres (ser \ r) P P' (lookup_error_on_failure b f) (lookupErrorOnFailure b g)" + by (fastforce intro: corres_injection[where f'=lfr] + simp: lookup_error_injection lookupError_injection) + +lemmas lookupError_wp[wp] = injection_wp[OF lookupError_injection] +lemmas lookupError_wp_E[wp] = injection_wp_E[OF lookupError_injection] + +lemmas lookupError_bindE = injection_bindE[OF lookupError_injection lookupError_injection] + +lemmas lookupError_liftE[simp] = injection_liftE[OF lookupError_injection] + + +lemma unifyFailure_injection: + "unifyFailure = injection_handler (\x. ())" + by (rule ext, + simp add: unifyFailure_def injection_handler_def + rethrowFailure_def o_def) + +lemmas unifyFailure_injection_corres + = corres_injection [where f=dc, simplified, OF _ unifyFailure_injection] + +lemmas unifyFailure_discard + = unifyFailure_injection_corres [OF id_injection, simplified] + +lemmas unifyFailure_wp = injection_wp [OF unifyFailure_injection] + +lemmas unifyFailure_wp_E[wp] = injection_wp_E [OF unifyFailure_injection] + +lemmas corres_unify_failure = + corres_injection [OF unify_failure_injection unifyFailure_injection, rotated] + +lemma ignoreFailure_wp[wp_split]: + "\P\ v \\rv. Q ()\,\\rv. Q ()\ \ + \P\ ignoreFailure v \Q\" + by (simp add: ignoreFailure_def const_def) wp + +lemma ep'_cases_weak_wp: + assumes "\P_A\ a \Q\" + assumes "\q. \P_B\ b q \Q\" + assumes "\q. \P_C\ c q \Q\" + shows + "\P_A and P_B and P_C\ + case ts of + IdleEP \ a + | SendEP q \ b q + | RecvEP q \ c q \Q\" + apply (cases ts) + apply (simp, rule hoare_weaken_pre, rule assms, simp)+ + done + +lemma ntfn'_cases_weak_wp: + assumes "\P_A\ a \Q\" + assumes "\q. \P_B\ b q \Q\" + assumes "\bdg. \P_C\ c bdg \Q\" + shows + "\P_A and P_B and P_C\ + case ts of + IdleNtfn \ a + | WaitingNtfn q \ b q + | ActiveNtfn bdg \ c bdg \Q\" + apply (cases ts) + apply (simp, rule hoare_weaken_pre, rule assms, simp)+ + done + +lemma ko_at_imp_cte_wp_at': + fixes x :: cte + shows "\ ko_at' x ptr s \ \ cte_wp_at' (\cte. cte = x) ptr s" + apply (erule obj_atE') + apply (clarsimp simp: objBits_simps') + apply (erule cte_wp_at_cteI'; simp add: cte_level_bits_def) + done + +lemma modify_map_casesD: + "modify_map m p f p' = Some cte \ + (p \ p' \ m p' = Some cte) \ + (p = p' \ (\cap node. m p = Some (CTE cap node) \ f (CTE cap node) = cte))" + apply (simp add: modify_map_def split: if_split_asm) + apply clarsimp + apply (case_tac z) + apply auto + done + +lemma modify_map_casesE: + "\ modify_map m p f p' = Some cte; + \ p \ p'; m p' = Some cte \ \ P; + \cap node. \ p = p'; m p = Some (CTE cap node); cte = f (CTE cap node) \ \ P + \ \ P" + by (auto dest: modify_map_casesD) + + +lemma modify_map_cases: + "(modify_map m p f p' = Some cte) = + ((p \ p' \ m p' = Some cte) \ + (p = p' \ (\cap node. m p = Some (CTE cap node) \ f (CTE cap node) = cte)))" + apply (rule iffI) + apply (erule modify_map_casesD) + apply (clarsimp simp: modify_map_def) + done + + +lemma no_0_modify_map [simp]: + "no_0 (modify_map m p f) = no_0 m" + by (simp add: no_0_def modify_map_def) + + +lemma modify_map_0 [simp]: + "no_0 m \ modify_map m 0 f = m" + by (rule ext) (auto simp add: modify_map_def no_0_def) + + +lemma modify_map_exists: + "\cap node. m p = Some (CTE cap node) \ \cap' node'. modify_map m q f p = Some (CTE cap' node')" + apply clarsimp + apply (case_tac "f (CTE cap node)") + apply (cases "q=p") + apply (auto simp add: modify_map_cases) + done + + +lemma modify_map_exists_rev: + "modify_map m q f p = Some (CTE cap node) \ \cap' node'. m p = Some (CTE cap' node')" + apply (case_tac "f (CTE cap node)") + apply (cases "q=p") + apply (auto simp add: modify_map_cases) + done + + +lemma modify_map_if: + "(modify_map m p f p' = Some cte) = + (if p = p' + then \cap node. m p = Some (CTE cap node) \ f (CTE cap node) = cte + else \cap node. m p' = Some (CTE cap node) \ cte = CTE cap node)" + apply (cases cte) + apply (rule iffI) + apply (drule modify_map_casesD) + apply auto[1] + apply (auto simp: modify_map_def) + done + +lemma corres_empty_on_failure: + "corres ((\x y. r [] []) \ r) P P' m m' \ + corres r P P' (empty_on_failure m) (emptyOnFailure m')" + apply (simp add: empty_on_failure_def emptyOnFailure_def) + apply (rule corres_guard_imp) + apply (rule corres_split_catch) + apply assumption + apply (rule corres_trivial, simp) + apply wp+ + apply simp+ + done + + + +lemma emptyOnFailure_wp[wp]: + "\P\ m \Q\,\\rv. Q []\ \ \P\ emptyOnFailure m \Q\" + by (simp add: emptyOnFailure_def) wp + +lemma withoutPreemption_lift: + "\P\ f \Q\ \ \P\ withoutPreemption f \Q\, \E\" + by simp + +lemma withoutPreemption_R: + "\\\ withoutPreemption f -, \Q\" + by (wp withoutPreemption_lift) + +lemma ko_at_cte_ipcbuffer: + "ko_at' tcb p s \ cte_wp_at' (\x. x = tcbIPCBufferFrame tcb) (p + tcbIPCBufferSlot * 0x20) s" + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule (2) cte_wp_at_tcbI') + apply (fastforce simp add: tcb_cte_cases_def tcbIPCBufferSlot_def cteSizeBits_def) + apply simp + done + +lemma set_ep_arch': "\\s. P (ksArchState s)\ setEndpoint ntfn p \\_ s. P (ksArchState s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv|simp)+ + done + +lemma corres_const_on_failure: + "corres ((\_ _. r x y) \ r) P P' m m' \ + corres r P P' (const_on_failure x m) (constOnFailure y m')" + apply (simp add: const_on_failure_def constOnFailure_def) + apply (rule corres_guard_imp) + apply (rule corres_split_catch) + apply assumption + apply (rule corres_trivial, simp) + apply (clarsimp simp: const_def) + apply wp+ + apply simp+ + done + +lemma constOnFailure_wp : + "\P\ m \Q\, \\rv. Q n\ \ \P\ constOnFailure n m \Q\" + apply (simp add: constOnFailure_def const_def) + apply (wp|simp)+ + done + +end +end diff --git a/proof/refine/AARCH64/CNodeInv_R.thy b/proof/refine/AARCH64/CNodeInv_R.thy new file mode 100644 index 0000000000..13d655e709 --- /dev/null +++ b/proof/refine/AARCH64/CNodeInv_R.thy @@ -0,0 +1,9061 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Results about CNode Invocations, particularly the + recursive revoke and delete operations. +*) + +theory CNodeInv_R +imports Ipc_R Invocations_R +begin + +unbundle l4v_word_context + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec + valid_cnode_inv' :: "Invocations_H.cnode_invocation \ kernel_state \ bool" +where + "valid_cnode_inv' (Insert cap ptr ptr') = + (valid_cap' cap and + (\s. cte_wp_at' (is_derived' (ctes_of s) ptr cap \ cteCap) ptr s) and + cte_wp_at' (untyped_derived_eq cap \ cteCap) ptr and + cte_wp_at' (\c. cteCap c = NullCap) ptr' and (\s. ptr \ ptr') and + ex_cte_cap_to' ptr')" +| "valid_cnode_inv' (Move cap ptr ptr') = + (cte_wp_at' (\c. weak_derived' cap (cteCap c)) ptr and + cte_wp_at' (\c. isUntypedCap (cteCap c) \ (cteCap c) = cap) ptr and + cte_wp_at' (\c. cteCap c \ NullCap) ptr and valid_cap' cap and + cte_wp_at' (\c. cteCap c = NullCap) ptr' and ex_cte_cap_to' ptr')" +| "valid_cnode_inv' (Revoke ptr) = cte_at' ptr" +| "valid_cnode_inv' (Delete ptr) = cte_at' ptr" +| "valid_cnode_inv' (Rotate s_cap p_cap src pivot dest) = + (valid_cap' s_cap and valid_cap' p_cap and + cte_wp_at' (\c. weak_derived' s_cap (cteCap c)) src and + cte_wp_at' (\c. isUntypedCap (cteCap c) \ (cteCap c) = s_cap) src and + cte_wp_at' (\c. weak_derived' p_cap (cteCap c)) pivot and + cte_wp_at' (\c. isUntypedCap (cteCap c) \ (cteCap c) = p_cap) pivot and + K (src \ pivot \ pivot \ dest \ s_cap \ capability.NullCap \ + p_cap \ capability.NullCap) and + (\s. src \ dest \ cte_wp_at' (\c. cteCap c = NullCap) dest s) and + (\s. ex_cte_cap_to' pivot s \ ex_cte_cap_to' dest s))" +| "valid_cnode_inv' (SaveCaller slot) = + (ex_cte_cap_to' slot and cte_wp_at' (\c. cteCap c = NullCap) slot)" +| "valid_cnode_inv' (CancelBadgedSends cap) = + (valid_cap' cap and K (hasCancelSendRights cap))" + +lemma rightsFromWord_correspondence: + "rightsFromWord w = rights_mask_map (data_to_rights w)" + by (simp add: rightsFromWord_def rights_mask_map_def data_to_rights_def Let_def) + +primrec + cnodeinv_relation :: "Invocations_A.cnode_invocation \ Invocations_H.cnode_invocation \ bool" +where + "cnodeinv_relation (InsertCall c cp1 cp2) x = ( + \c'. cap_relation c c' \ (x = + Insert c' (cte_map cp1) (cte_map cp2)))" +| "cnodeinv_relation (MoveCall c cp1 cp2) x = ( + \c'. cap_relation c c' \ (x = + Move c' (cte_map cp1) (cte_map cp2)))" +| "cnodeinv_relation (RevokeCall cp) x = (x = + Revoke (cte_map cp))" +| "cnodeinv_relation (DeleteCall cp) x = (x = + Delete (cte_map cp))" +| "cnodeinv_relation (RotateCall sc pc src pvt dst) x = (\sc' pc'. + cap_relation sc sc' \ cap_relation pc pc' \ + x = Rotate sc' pc' (cte_map src) (cte_map pvt) (cte_map dst))" +| "cnodeinv_relation (SaveCall p) x = (x = SaveCaller (cte_map p))" +| "cnodeinv_relation (CancelBadgedSendsCall c) x = (\c'. cap_relation c c' \ x = CancelBadgedSends c')" + + +lemma cap_relation_NullCap: + "cap_relation cap cap' \ + (update_cap_data P x cap = cap.NullCap) = (RetypeDecls_H.updateCapData P x cap' = capability.NullCap)" + apply (cases cap) + apply (simp_all add: Let_def mask_cap_def cap_rights_update_def update_cap_data_closedform + arch_update_cap_data_def word_bits_def updateCapData_def isCap_simps + split del: if_split) + apply simp + apply simp + apply (clarsimp simp: word_size word_size_def cnode_padding_bits_def cnode_guard_size_bits_def + cteRightsBits_def cteGuardBits_def) + apply (clarsimp simp: AARCH64_H.updateCapData_def isCap_simps split del: if_split) + done + +(* Sometimes I need something about the state. This is neater (IMHO) and req *) +lemma whenE_throwError_corres': + assumes P: "frel f f'" + assumes Q: "\s s'. \(s, s') \ state_relation; R s; R' s'\ \ P = P'" + assumes R: "\ P \ corres (frel \ rvr) Q Q' m m'" + shows "corres (frel \ rvr) (R and Q) (R' and Q') + (whenE P (throwError f ) >>=E (\_. m )) + (whenE P' (throwError f') >>=E (\_. m'))" + unfolding whenE_def + apply (rule corres_req) + apply (erule Q) + apply simp + apply simp + apply (cases P) + apply (simp add: P) + apply simp + apply (erule corres_guard_imp [OF R]) + apply simp + apply simp + done + +(* FIXME: move *) +lemma corres_split_liftM2: + assumes corr: "corres (\x y. r' x (f y)) P P' a c" + and r1: "\rv rv'. r' rv rv' \ corres r (R rv) (R' rv') (b rv) (d rv')" + and h1: "\Q\ a \R\" and h2: "\Q'\ c \\x. R' (f x)\" + shows "corres r (P and Q) (P' and Q') (a >>= b) (liftM f c >>= d)" + apply (rule corres_guard_imp) + apply (rule corres_split[OF _ _ h1]) + apply (simp add: o_def) + apply (rule corr) + apply (erule r1) + apply wp + apply (simp add: o_def) + apply (rule h2) + apply simp + apply simp + done + +lemma cap_relation_NullCapI: + "cap_relation c c' \ (c = cap.NullCap) = (c' = NullCap)" + by (cases c, auto) + +lemma isCNodeCap_CNodeCap: + "isCNodeCap (CNodeCap a b c d)" + by (simp add: isCap_simps) + +lemma get_cap_corres': + "cte_ptr' = cte_map cte_ptr \ + corres (\x y. cap_relation x (cteCap y)) (cte_at cte_ptr) + (pspace_aligned' and pspace_distinct') (get_cap cte_ptr) + (getCTE cte_ptr')" + by (simp add: get_cap_corres) + +lemma cnode_invok_case_cleanup: + "i \ {CNodeRevoke, CNodeDelete, CNodeCancelBadgedSends, CNodeRotate, CNodeSaveCaller} + \ (case i of CNodeRevoke \ P | CNodeDelete \ Q | CNodeCancelBadgedSends \ R + | CNodeRotate \ S | CNodeSaveCaller \ T + | _ \ U) = U" + by (simp split: gen_invocation_labels.split) + +lemma cancelSendRightsEq: + "cap_relation cap cap' \ hasCancelSendRights cap' = has_cancel_send_rights cap" + by (auto simp: hasCancelSendRights_def has_cancel_send_rights_def all_rights_def + vmrights_map_def + split: cap.splits bool.splits if_splits | + case_tac x)+ + +lemma decodeCNodeInvocation_corres: + "\ cap_relation (cap.CNodeCap w n list) cap'; list_all2 cap_relation cs cs'; + length list \ 64 \ \ + corres + (ser \ cnodeinv_relation) + (invs and cap_table_at n w and K (n \ 0) and (\s. \x \ set cs. s \ x)) (invs' and valid_cap' cap' and (\s. \x \ set cs'. s \' x)) + (decode_cnode_invocation (mi_label mi) args + (cap.CNodeCap w n list) cs) + (decodeCNodeInvocation (mi_label mi) args + cap' cs')" + apply (rule decode_cnode_cases2[where args=args and exs=cs and label="mi_label mi"]) + \ \Move / Insert\ + apply (clarsimp simp: list_all2_Cons1 decode_cnode_invocation_def + decodeCNodeInvocation_def split_def Let_def + unlessE_whenE isCNodeCap_CNodeCap + cnode_invok_case_cleanup + split del: if_split + cong: if_cong list.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (rule corres_splitEE) + apply (rule ensureEmptySlot_corres; simp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp(no_asm) add: liftE_bindE del: de_Morgan_conj split del: if_split) + apply (rule corres_split[OF get_cap_corres']) + apply (simp add: split_def) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply auto[1] + apply (rule_tac r'="\a b. fst b = rights_mask_map (fst a) + \ snd b = fst (snd a) + \ snd (snd a) = (gen_invocation_type (mi_label mi) + \ {CNodeMove, CNodeMutate})" + in corres_splitEE) + apply (rule corres_trivial) + subgoal by (auto split: list.split gen_invocation_labels.split, + auto simp: returnOk_def all_rights_def + rightsFromWord_correspondence) + apply (rule_tac r'=cap_relation in corres_splitEE) + apply (simp add: returnOk_def del: imp_disjL) + apply (rule conjI[rotated], rule impI) + apply (rule deriveCap_corres) + apply (clarsimp simp: cap_relation_mask + cap_map_update_data + split: option.split) + apply clarsimp + apply (clarsimp simp: cap_map_update_data + split: option.split) + apply (rule corres_trivial) + subgoal by (auto simp add: whenE_def, auto simp add: returnOk_def) + apply (wp | wpc | simp(no_asm))+ + apply (wp hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps + | clarsimp)+ + subgoal by (auto elim!: valid_cnode_capI) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Revoke\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp, rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp add: split_beta) + apply (rule corres_returnOkTT) + apply simp + apply wp+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Delete\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp add: split_beta) + apply (rule corres_returnOkTT) + apply simp + apply wp+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \SaveCall\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp add: split_beta) + apply (rule corres_split_norE) + apply (rule ensureEmptySlot_corres) + apply simp + apply (rule corres_returnOkTT) + apply simp + apply (wp hoare_drop_imps)+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \CancelBadgedSends\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp(no_asm) add: split_beta liftE_bindE) + apply (rule corres_split[OF get_cap_corres'], simp) + apply (rule corres_split_norE) + apply (simp add: cancelSendRightsEq) + apply (rule corres_trivial, auto simp add: whenE_def returnOk_def)[1] + apply (rule corres_trivial) + apply (clarsimp simp add: returnOk_def) + apply (wp get_cap_wp getCTE_wp | simp only: whenE_def | clarsimp)+ + apply (rule hoare_trivE_R[where P="\"]) + apply (wpsimp simp: cte_wp_at_ctes_of pred_conj_def) + apply (fastforce elim!: valid_cnode_capI simp: invs_def valid_state_def valid_pspace_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Rotate\ + apply (frule list_all2_lengthD) + apply (clarsimp simp: list_all2_Cons1) + apply (simp add: le_diff_conv2 split_def decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE whenE_whenE_body + del: disj_not1 ser_def split del: if_split) + apply (rule corres_guard_imp, rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (rename_tac dest_slot destSlot) + apply (rule corres_splitEE, (rule lookupSlotForCNodeOp_corres; simp))+ + apply (rule_tac R = "\s. cte_at pivot_slot s \ cte_at dest_slot s + \ cte_at src_slot s \ invs s" in + whenE_throwError_corres' [where R' = \]) + apply simp + apply (elim conjE) + apply rule + apply fastforce + apply (erule disjE) + apply (clarsimp simp add: split_def) + apply (drule (2) cte_map_inj_eq, clarsimp+)[1] + apply (clarsimp simp add: split_def) + apply (drule (2) cte_map_inj_eq, clarsimp+)[1] + apply (rule corres_split_norE) + apply (rule_tac F = "(src_slot \ dest_slot) = (srcSlot \ destSlot)" + and P = "\s. cte_at src_slot s \ cte_at dest_slot s \ invs s" and P' = invs' in corres_req) + apply simp + apply rule + apply clarsimp + apply clarsimp + apply (drule (2) cte_map_inj_eq, clarsimp+)[1] + apply (rule corres_guard_imp) + apply (erule corres_whenE) + apply (rule ensureEmptySlot_corres) + apply clarsimp + apply simp + apply clarsimp + apply clarsimp + apply (simp add: liftE_bindE del: de_Morgan_conj disj_not1 split del: if_split) + apply (rule corres_split_liftM2, simp only: split_beta, rule get_cap_corres) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply (erule cap_relation_NullCapI) + apply (rule corres_split_liftM2, simp only: split_beta, rule get_cap_corres) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply (erule cap_relation_NullCapI) + apply (rule whenE_throwError_corres) + apply simp + apply (simp add: cap_relation_NullCap) + apply (rule corres_returnOkTT) + apply simp + apply (intro conjI) + apply (erule cap_map_update_data)+ + apply (wp hoare_drop_imps)+ + apply simp + apply (wp lsfco_cte_at' lookup_cap_valid lookup_cap_valid') + apply (simp add: if_apply_def2) + apply (wp hoare_drop_imps) + apply wp + apply simp + apply (wp lsfco_cte_at' lookup_cap_valid lookup_cap_valid' hoare_drop_imps + | simp add: if_apply_def2 del: de_Morgan_conj split del: if_split)+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp dest!: list_all2_lengthD simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Errors\ + apply (elim disjE) + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap unlessE_whenE + split: list.split) + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap unlessE_whenE) + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap unlessE_whenE) + apply clarsimp + apply (elim disjE) + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap split_def unlessE_whenE + cnode_invok_case_cleanup + split del: if_split cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (rule corres_trivial, clarsimp split: list.split_asm) + apply wp+ + apply (auto elim!: valid_cnode_capI)[1] + apply fastforce + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap split_def unlessE_whenE + split del: if_split cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF lookupSlotForCNodeOp_corres _ wp_post_tautE wp_post_tautE]) + apply simp + apply simp + apply (clarsimp simp: list_all2_Cons1 list_all2_Nil + split: list.split_asm split del: if_split) + apply (auto elim!: valid_cnode_capI)[1] + apply fastforce + done + +lemma capBadge_updateCapData_True: + "updateCapData True x c \ NullCap \ capBadge (updateCapData True x c) = capBadge c" + apply (simp add: updateCapData_def isCap_simps Let_def + split: if_split_asm split del: if_split) + apply (simp add: AARCH64_H.updateCapData_def) + done + +lemma badge_derived_updateCapData: + "\ updateCapData False x cap \ NullCap; badge_derived' cap cap' \ + \ badge_derived' (updateCapData False x cap) cap'" + by (simp add: badge_derived'_def updateCapData_Master + updateCapData_ordering) + +lemma deriveCap_Null_helper: + assumes "\P\ deriveCap x cap \\rv s. rv \ NullCap \ Q rv s\,-" + shows "\\s. cap \ NullCap \ P s\ deriveCap x cap \\rv s. rv \ NullCap \ Q rv s\,-" + apply (cases "cap = NullCap") + apply (simp add: deriveCap_def isCap_simps) + apply (wp | simp)+ + apply (rule hoare_strengthen_postE_R, rule assms) + apply simp + done + +lemma hasCancelSendRights_not_Null: + "hasCancelSendRights cap \ isEndpointCap cap" + by (clarsimp simp: hasCancelSendRights_def isCap_simps split: capability.splits) + +declare if_split [split del] + +lemma untyped_derived_eq_maskCapRights: + "untyped_derived_eq (RetypeDecls_H.maskCapRights m cap) cap' + = untyped_derived_eq cap cap'" + apply (simp add: untyped_derived_eq_def) + apply (rule imp_cong) + apply (rule capMaster_isUntyped, simp) + apply (clarsimp simp: isCap_simps) + done + +lemma untyped_derived_eq_updateCapData: + "RetypeDecls_H.updateCapData x y cap \ NullCap + \ untyped_derived_eq (RetypeDecls_H.updateCapData x y cap) cap' + = untyped_derived_eq cap cap'" + apply (simp add: untyped_derived_eq_def) + apply (rule imp_cong) + apply (rule capMaster_isUntyped) + apply (erule updateCapData_Master) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: updateCapData_def isCap_simps) + done + +lemma untyped_derived_eq_refl: + "untyped_derived_eq c c" + by (simp add: untyped_derived_eq_def) + +lemma decodeCNodeInv_wf[wp]: + "\invs' and valid_cap' (CNodeCap w n w2 n2) + and (\s. \r\cte_refs' (CNodeCap w n w2 n2) (irq_node' s). + ex_cte_cap_to' r s) + and (\s. \cap \ set cs. s \' cap) + and (\s. \cap \ set cs. \r\cte_refs' cap (irq_node' s). ex_cte_cap_to' r s)\ + decodeCNodeInvocation label args + (CNodeCap w n w2 n2) cs + \valid_cnode_inv'\, -" + apply (rule decode_cnode_cases2[where label=label and args=args and exs=cs]) + \ \Move/Insert\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap + split_def cnode_invok_case_cleanup unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp) + apply (rule deriveCap_Null_helper) + apply (simp add: imp_conjR) + apply ((wp deriveCap_derived deriveCap_untyped_derived + | wp (once) hoare_drop_imps)+)[1] + apply (wp whenE_throwError_wp getCTE_wp | wpc | simp(no_asm))+ + apply (rule_tac Q'="\rv. invs' and cte_wp_at' (\cte. cteCap cte = NullCap) destSlot + and ex_cte_cap_to' destSlot" + in hoare_strengthen_postE_R, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule invs_valid_objs') + apply (simp add: ctes_of_valid' valid_updateCapDataI + weak_derived_updateCapData capBadge_updateCapData_True + badge_derived_updateCapData + badge_derived_mask untyped_derived_eq_maskCapRights + untyped_derived_eq_updateCapData + untyped_derived_eq_refl) + apply (auto simp:isCap_simps updateCapData_def)[1] + apply (wp ensureEmptySlot_stronger | simp | wp (once) hoare_drop_imps)+ + \ \Revoke\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + apply (rule hoare_pre) + apply (wp lsfco_cte_at' | simp)+ + apply clarsimp + \ \Delete\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + apply (rule hoare_pre) + apply (wp lsfco_cte_at' | simp)+ + apply clarsimp + \ \SaveCaller\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE) + apply (rule hoare_pre) + apply (wp lsfco_cte_at' | simp | wp (once) hoare_drop_imps)+ + \ \CancelBadgedSends\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp getCTE_wp | simp)+ + apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_strengthen_postE_R) + apply (wp lsfco_cte_at') + apply (simp add: cte_wp_at_ctes_of imp_ex hasCancelSendRights_not_Null) + apply (clarsimp simp: ctes_of_valid' invs_valid_objs') + apply (simp add: invs_valid_objs') + \ \Rotate\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_def) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp getCTE_wp ensureEmptySlot_stronger + | simp add: o_def)+ + apply (rule_tac Q'="\rv s. cte_at' rv s \ cte_at' destSlot s + \ cte_at' srcSlot s \ ex_cte_cap_to' rv s + \ ex_cte_cap_to' destSlot s + \ invs' s" in hoare_strengthen_postE_R) + apply (wp lsfco_cte_at') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule invs_valid_objs') + apply (simp add: weak_derived_updateCapData capBadge_updateCapData_True + valid_updateCapDataI ctes_of_valid') + apply (fastforce simp:isCap_simps updateCapData_def) + apply (wp lsfco_cte_at')+ + apply clarsimp + \ \Errors\ + apply (elim disjE exE conjE, + simp_all add: decodeCNodeInvocation_def isCNodeCap_CNodeCap + unlessE_whenE cnode_invok_case_cleanup + split: list.split_asm list.split) + by (auto simp: valid_def validE_def validE_R_def in_monad) + +lemma decodeCNodeInvocation_inv[wp]: + "\P\ decodeCNodeInvocation label args cap cs \\rv. P\" + apply (cases "\isCNodeCap cap") + apply (simp only: decodeCNodeInvocation_def Let_def split_def + fst_conv snd_conv, simp) + apply (rule decode_cnode_cases2[where label=label and args=args and exs=cs]) + apply (simp_all add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + Let_def whenE_def unlessE_def cnode_invok_case_cleanup + split del: if_split cong del: if_cong)[6] + apply (fold_subgoals (prefix))[6] + subgoal premises prems + by (safe intro!: hoare_pre[where P=P], + (wp hoare_drop_imps | simp | wpcw)+) + apply (elim disjE exE conjE, + simp_all add: decodeCNodeInvocation_def isCNodeCap_CNodeCap + cnode_invok_case_cleanup unlessE_whenE + split: list.split_asm split del: if_split) + apply (simp_all split: list.split add: unlessE_whenE) + apply safe + apply (wp | simp)+ + done + +text \Various proofs about the two recursive deletion operations. + These call out to various functions in Tcb and Ipc, and are + thus better proved here than in CSpace_R.\ + +text \Proving the termination of rec_del\ + +crunch typ_at[wp]: cancel_ipc "\s. P (typ_at T p s)" + (wp: crunch_wps hoare_vcg_if_splitE simp: crunch_simps) + +declare if_split [split] + +text \Proving desired properties about rec_del/cap_delete\ + +declare of_nat_power [simp del] + +text \Proving desired properties about recursiveDelete/cteDelete\ + +text \Proving the termination of finaliseSlot\ + +definition + not_recursive_ctes :: "kernel_state \ machine_word set" +where + "not_recursive_ctes s \ {ptr. \cap. cteCaps_of s ptr = Some cap + \ \ (isZombie cap \ capZombiePtr cap = ptr)}" + +lemma not_recursive_ctes_wu [simp]: + "not_recursive_ctes (ksWorkUnitsCompleted_update f s) = not_recursive_ctes s" + by (simp add: not_recursive_ctes_def) + +lemma not_recursive_ctes_irq_state_independent[simp, intro!]: + "not_recursive_ctes (s \ ksMachineState := ksMachineState s \ irq_state := x \\) = not_recursive_ctes s" + by (simp add: not_recursive_ctes_def) + +lemma capSwap_not_recursive: + "\\s. card (not_recursive_ctes s) \ n + \ cte_wp_at' (\cte. \ (isZombie (cteCap cte) \ capZombiePtr (cteCap cte) = p1)) p1 s + \ cte_wp_at' (\cte. isZombie (cteCap cte) \ capZombiePtr (cteCap cte) = p1) p2 s + \ p1 \ p2\ + capSwapForDelete p1 p2 + \\rv s. card (not_recursive_ctes s) < n\" + apply (simp add: not_recursive_ctes_def cteSwap_def capSwapForDelete_def) + apply (wp | simp add: o_def | rule getCTE_cteCap_wp)+ + apply (simp add: cte_wp_at_ctes_of modify_map_def cteCaps_of_def + cong: option.case_cong) + apply (elim conjE exE) + apply (simp cong: conj_cong) + apply (erule order_less_le_trans[rotated]) + apply (rule psubset_card_mono) + apply simp + apply (rule psubsetI) + apply clarsimp + apply (rule_tac f="\S. p1 \ S" in distinct_lemma) + apply simp + done + +lemma updateCap_not_recursive: + "\\s. card (not_recursive_ctes s) \ n + \ cte_wp_at' (\cte. isZombie (cteCap cte) \ capZombiePtr (cteCap cte) = ptr + \ isZombie cap \ capZombiePtr cap = ptr) + ptr s\ + updateCap ptr cap + \\rv s. card (not_recursive_ctes s) \ n\" + apply (simp add: not_recursive_ctes_def) + apply wp + apply clarsimp + apply (erule order_trans[rotated]) + apply (rule card_mono, simp) + apply clarsimp + apply (simp add: modify_map_def split: if_split_asm) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + done + +lemma suspend_ctes_of_thread: + "\\s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\ + suspend t + \\rv s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\" + apply (rule hoare_chain) + apply (rule suspend_cte_wp_at'[where P="(=) (ThreadCap t)" and p=x]) + apply (clarsimp simp add: finaliseCap_def Let_def isCap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, simp) + done + +lemma unbindNotification_ctes_of_thread: + "\\s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\ + unbindNotification t + \\rv s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\" + by wp + +lemma prepareThreadDelete_ctes_of_thread: + "\\s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\ + prepareThreadDelete t + \\rv s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\" + by (wpsimp simp: prepareThreadDelete_def fpuThreadDelete_def) + +lemma suspend_not_recursive_ctes: + "\\s. P (not_recursive_ctes s)\ + suspend t + \\rv s. P (not_recursive_ctes s)\" + apply (simp only: suspend_def not_recursive_ctes_def cteCaps_of_def updateRestartPC_def) + apply (wp threadSet_ctes_of | simp add: unless_def del: o_apply)+ + apply (fold cteCaps_of_def) + apply (wp cancelIPC_cteCaps_of) + apply (clarsimp elim!: rsubst[where P=P] intro!: set_eqI) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (auto simp: isCap_simps finaliseCap_def Let_def) + done + +lemma unbindNotification_not_recursive_ctes: + "\\s. P (not_recursive_ctes s)\ + unbindNotification t + \\rv s. P (not_recursive_ctes s)\" + apply (simp only: not_recursive_ctes_def cteCaps_of_def) + apply wp + done + +lemma prepareThreadDelete_not_recursive_ctes: + "\\s. P (not_recursive_ctes s)\ + prepareThreadDelete t + \\rv s. P (not_recursive_ctes s)\" + by (wpsimp simp: prepareThreadDelete_def not_recursive_ctes_def cteCaps_of_def fpuThreadDelete_def) + +definition + finaliseSlot_recset :: "((machine_word \ bool \ kernel_state) \ (machine_word \ bool \ kernel_state)) set" +where + "finaliseSlot_recset \ + wf_sum (\(slot, exposed, state). exposed) + (inv_image (less_than <*lex*> less_than) + (\(x, exp, s). case ctes_of s x of + Some (CTE NullCap node) \ (0, 0) + | Some (CTE (Zombie p zb n) node) \ + (if p = x then 1 else 2, n) + | _ \ (3, 0))) + (measure (\(x, exp, s). card (not_recursive_ctes s)))" + +lemma finaliseSlot_recset_wf: "wf finaliseSlot_recset" + unfolding finaliseSlot_recset_def + by (intro wf_sum_wf wf_rdcall_finalise_ord_lift wf_measure + wf_inv_image wf_lex_prod wf_less_than) + +lemma in_preempt': + "(Inr rv, s') \ fst (preemptionPoint s) \ + \f g. s' = ksWorkUnitsCompleted_update f + (s \ ksMachineState := ksMachineState s \ irq_state := g (irq_state (ksMachineState s)) \\)" + apply (simp add: preemptionPoint_def alternative_def in_monad + getActiveIRQ_def doMachineOp_def split_def + select_f_def select_def getWorkUnits_def setWorkUnits_def + modifyWorkUnits_def return_def returnOk_def + split: option.splits if_splits) + apply (erule disjE) + apply (cases "workUnitsLimit \ ksWorkUnitsCompleted s + 1", drule (1) mp, + rule exI[where x="\x. 0"], rule exI[where x=Suc], force, + rule exI[where x="\x. x + 1"], rule exI[where x=id], force)+ + apply (rule exI[where x="\x. x + 1"], rule exI[where x=id], force) + done + +lemma updateCap_implies_cte_at: + "(rv, s') \ fst (updateCap ptr cap s) + \ cte_at' ptr s" + apply (clarsimp simp: updateCap_def in_monad) + apply (frule in_inv_by_hoareD [OF getCTE_inv]) + apply (drule use_valid [OF _ getCTE_cte_wp_at], simp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma case_Zombie_assert_fold: + "(case cap of Zombie ptr zb n \ haskell_assertE (P ptr) str | _ \ returnOk ()) + = assertE (isZombie cap \ P (capZombiePtr cap))" + by (cases cap, simp_all add: isCap_simps assertE_def) + +termination finaliseSlot' + apply (rule finaliseSlot'.termination, + rule finaliseSlot_recset_wf) + apply (simp add: finaliseSlot_recset_def wf_sum_def) + apply (clarsimp simp: in_monad dest!: in_preempt') + apply (drule in_inv_by_hoareD [OF isFinalCapability_inv]) + apply (frule use_valid [OF _ getCTE_cte_wp_at, OF _ TrueI]) + apply (drule in_inv_by_hoareD [OF getCTE_inv]) + apply (clarsimp simp: in_monad split: if_split_asm) + apply (clarsimp simp: Let_def in_monad finaliseSlot_recset_def + wf_sum_def liftM_def + case_Zombie_assert_fold) + apply (frule use_valid [OF _ getCTE_cte_wp_at, OF _ TrueI]) + apply (drule in_inv_by_hoareD [OF getCTE_inv]) + apply clarsimp + apply (erule use_valid [OF _ capSwap_not_recursive]) + apply (simp add: cte_wp_at_ctes_of) + apply (frule updateCap_implies_cte_at) + apply (erule use_valid [OF _ hoare_vcg_conj_lift, + OF _ updateCap_not_recursive updateCap_ctes_of_wp]) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + apply (frule use_valid [OF _ finaliseCap_cases], simp) + apply (case_tac rv, simp) + apply (simp add: isCap_simps, elim conjE disjE exE) + apply simp + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad + getThreadCSpaceRoot_def locateSlot_conv) + apply (frule(1) use_valid [OF _ unbindNotification_ctes_of_thread, OF _ exI]) + apply (frule(1) use_valid [OF _ suspend_ctes_of_thread]) + apply (frule(1) use_valid [OF _ prepareThreadDelete_ctes_of_thread]) + apply clarsimp + apply (erule use_valid [OF _ prepareThreadDelete_not_recursive_ctes]) + apply (erule use_valid [OF _ suspend_not_recursive_ctes]) + apply (erule use_valid [OF _ unbindNotification_not_recursive_ctes]) + apply simp + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad) + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad) + apply (clarsimp simp: in_monad Let_def locateSlot_conv + finaliseSlot_recset_def wf_sum_def + cte_wp_at_ctes_of cong: if_cong) + apply (clarsimp split: if_split_asm + simp: in_monad + dest!: in_getCTE) + apply (erule use_valid [OF _ updateCap_ctes_of_wp])+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + apply (case_tac ourCTE) + apply (rename_tac cap node) + apply (case_tac rv, simp) + apply (rename_tac cap' node') + apply (case_tac cap'; simp) + apply (erule use_valid [OF _ updateCap_ctes_of_wp])+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + apply (frule use_valid [OF _ finaliseCap_cases], simp) + apply (case_tac ourCTE, case_tac rv, + clarsimp simp: isCap_simps) + apply (elim disjE conjE exE, simp_all)[1] + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad) + apply (frule use_valid [OF _ finaliseCap_cases], simp) + apply (case_tac rv, case_tac ourCTE) + apply (clarsimp simp: isCap_simps cte_wp_at_ctes_of) + apply (elim disjE conjE exE, simp_all)[1] + done + +lemmas finaliseSlot'_simps_ext = + finaliseSlot'.simps [THEN ext [where f="finaliseSlot' slot exp" for slot exp]] + +lemmas finalise_spec_induct = finaliseSlot'.induct[where P= + "\sl exp s. s \ \P sl exp\ finaliseSlot' sl exp \Q sl exp\,\E sl exp\" for P Q E] + +lemma finaliseSlot'_preservation: + assumes wp: + "\cap final. \P\ finaliseCap cap final False \\rv. P\" + "\sl opt. \P\ emptySlot sl opt \\rv. P\" + "\sl1 sl2. \P\ capSwapForDelete sl1 sl2 \\rv. P\" + "\sl cap. \P\ updateCap sl cap \\rv. P\" + "\f s. P (ksWorkUnitsCompleted_update f s) = P s" + assumes irq: "irq_state_independent_H P" + shows + "st \ \P\ finaliseSlot' slot exposed \\rv. P\, \\rv. P\" +proof (induct rule: finalise_spec_induct) + case (1 sl exp s) + show ?case + apply (rule hoare_pre_spec_validE) + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def) + apply wp + apply (simp, wp wp) + apply (wp "1.hyps") + apply (unfold Let_def split_def fst_conv snd_conv + case_Zombie_assert_fold haskell_fail_def) + apply (wp wp preemptionPoint_inv| simp add: o_def irq)+ + apply (wp hoare_drop_imps) + apply (wp wp | simp)+ + apply (wp hoare_drop_imps | simp(no_asm))+ + apply (wp wp)[1] + apply (simp(no_asm)) + apply (rule "1.hyps", (assumption | rule refl)+) + apply (wp wp hoare_drop_imps isFinalCapability_inv + | simp add: locateSlot_conv)+ + done +qed + +lemmas finaliseSlot_preservation + = validE_valid [OF use_spec(2) [OF finaliseSlot'_preservation], + folded finaliseSlot_def] + +lemma cteDelete_preservation: + assumes wp: + "\cap final. \P\ finaliseCap cap final False \\rv. P\" + "\sl opt. \P\ emptySlot sl opt \\rv. P\" + "\sl1 sl2. \P\ capSwapForDelete sl1 sl2 \\rv. P\" + "\sl cap. \P\ updateCap sl cap \\rv. P\" + "\f s. P (ksWorkUnitsCompleted_update f s) = P s" + assumes irq: "irq_state_independent_H P" + shows + "\P\ cteDelete p e \\rv. P\" + apply (simp add: cteDelete_def whenE_def split_def) + apply (wp wp) + apply (simp only: simp_thms cases_simp) + apply (wpsimp wp: finaliseSlot_preservation wp simp: irq)+ + done + +crunch aligned'[wp]: capSwapForDelete pspace_aligned' +crunch distinct'[wp]: capSwapForDelete pspace_distinct' + +lemma cte_wp_at_ctes_ofI: + "\ cte_wp_at' ((=) cte) ptr s \ \ ctes_of s ptr = Some cte" + by (rule ctes_of_eq_cte_wp_at') + +declare modify_map_dom[simp] + +(* subsumes update_prev_next_trancl *) +lemma modify_map_next_trancl: + assumes nxt: "m \ x \\<^sup>+ y" + and inv: "\cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte)" + shows "(modify_map m ptr f) \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ f cte)" + + from nxt have "?m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some inv r_into_trancl next_unfold' + by fastforce + next + case (step q r) + show ?case + proof (rule trancl_into_trancl) + show "?m \ q \ r" using step(2) Some inv + by (simp only: mdb_next_update, clarsimp simp: next_unfold') + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + + +(* subsumes update_prev_next_trancl2 *) +lemma modify_map_next_trancl2: + assumes nxt: "(modify_map m ptr f) \ x \\<^sup>+ y" + and inv: "\cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte)" + shows "m \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ f cte)" + + from nxt have "m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some inv + by (auto intro!: r_into_trancl + simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + next + case (step q r) + show ?case + proof + show "m \ q \ r" using step(2) Some inv + by (auto simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma modify_map_next_trancl_iff: + assumes inv: "\cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte)" + shows "(modify_map m ptr f) \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + using inv + by (auto intro: modify_map_next_trancl modify_map_next_trancl2) + +lemma mdb_chain_0_cap_update: + "mdb_chain_0 (modify_map ctemap ptr (cteCap_update f)) = + mdb_chain_0 ctemap" + unfolding mdb_chain_0_def + by (auto simp: modify_map_next_trancl_iff) + +lemma modify_map_dlist: + assumes nxt: "valid_dlist m" + and inv: "\cte. cteMDBNode (f cte) = cteMDBNode cte" + shows "valid_dlist (modify_map m ptr f)" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some ptrcte) + let ?m = "m(ptr \ f ptrcte)" + + have "valid_dlist ?m" + proof + fix p cte + assume cp: "?m p = Some cte" and n0: "mdbPrev (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'.(m(ptr \ f ptrcte)) (mdbPrev (cteMDBNode cte)) = Some cte' \ + mdbNext (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. m (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEp) + hence ?thesis using peq mdb cp Some + by (cases "ptr = mdbPrev (cteMDBNode cte)") simp_all + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbPrev (cteMDBNode cte)) = ptr") (auto elim: valid_dlistEp simp: inv) + } + ultimately show ?thesis by (cases "p = ptr") auto + next + fix p cte + assume cp: "?m p = Some cte" and n0: "mdbNext (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'.(m(ptr \ f ptrcte)) (mdbNext (cteMDBNode cte)) = Some cte' \ + mdbPrev (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. m (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEn) + hence ?thesis using peq mdb cp Some + by (cases "ptr = mdbNext (cteMDBNode cte)") simp_all + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbNext (cteMDBNode cte)) = ptr") (auto elim: valid_dlistEn simp: inv) + } + ultimately show ?thesis by (cases "p = ptr") auto + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma modify_map_dlist2: + assumes nxt: "valid_dlist (modify_map m ptr f)" + and inv: "\cte. cteMDBNode (f cte) = cteMDBNode cte" + shows "valid_dlist m" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some ptrcte) + let ?m = "modify_map m ptr f" + + have "valid_dlist m" + proof + fix p cte + assume cp: "m p = Some cte" and n0: "mdbPrev (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. m (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. ?m (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEp [where p = ptr] simp: modify_map_same inv) + hence ?thesis using peq cp Some + by (cases "ptr = mdbPrev (cteMDBNode cte)") (clarsimp simp: inv modify_map_same modify_map_other)+ + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbPrev (cteMDBNode cte)) = ptr") (auto elim!: valid_dlistEp simp: inv modify_map_apply) + } + ultimately show ?thesis by (cases "p = ptr") auto + next + fix p cte + assume cp: "m p = Some cte" and n0: "mdbNext (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. m (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. ?m (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEn [where p = ptr] simp: modify_map_same inv) + hence ?thesis using peq cp Some + by (cases "ptr = mdbNext (cteMDBNode cte)") (clarsimp simp: inv modify_map_same modify_map_other)+ + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbNext (cteMDBNode cte)) = ptr") (auto elim!: valid_dlistEn simp: inv modify_map_apply) + } + ultimately show ?thesis by (cases "p = ptr") auto + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma modify_map_dlist_iff: + assumes inv: "\cte. cteMDBNode (f cte) = cteMDBNode cte" + shows "valid_dlist (modify_map m ptr f) = valid_dlist m" + using inv + by (auto intro: modify_map_dlist modify_map_dlist2) + +lemma mdb_chain_0_modify_map_inv: + "\ mdb_chain_0 m; \cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte) \ \ mdb_chain_0 (modify_map m ptr f)" + unfolding mdb_chain_0_def + by (auto simp: modify_map_next_trancl_iff) + +lemma mdb_chain_0_modify_map_replace: + assumes unf: "mdb_chain_0 (modify_map m p (cteMDBNode_update (mdbNext_update (%_. (mdbNext node)))))" + shows "mdb_chain_0 (modify_map m p (cteMDBNode_update (\m. node)))" +proof - + have "modify_map m p (cteMDBNode_update (\m. node)) = + modify_map (modify_map (modify_map (modify_map m p (cteMDBNode_update (mdbNext_update (%_. (mdbNext node))))) p + (cteMDBNode_update (mdbPrev_update (%_. (mdbPrev node))))) p + (cteMDBNode_update (mdbRevocable_update (%_. (mdbRevocable node))))) p + (cteMDBNode_update (mdbFirstBadged_update (%_. (mdbFirstBadged node))))" + apply (cases node) + apply (cases "m p") + apply (simp add: modify_map_None) + apply (case_tac a) + apply (rename_tac mdbnode) + apply (case_tac mdbnode) + apply (clarsimp simp add: next_update_is_modify [symmetric]) + done + + thus ?thesis + apply simp + apply (rule mdb_chain_0_modify_map_inv) + apply (rule mdb_chain_0_modify_map_inv) + apply (rule mdb_chain_0_modify_map_inv [OF unf]) + apply simp_all + done +qed + +lemmas mdb_chain_0_mm_rep_next = + mdb_chain_0_modify_map_replace [OF mdb_chain_0_modify_map_next] + +lemma setCTE_cte_wp_at_other: + "\cte_wp_at' P p and (\s. ptr \ p)\ + setCTE ptr cte + \\uu s. cte_wp_at' P p s\" + apply (simp add: cte_wp_at_ctes_of) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +(* CLAG from _next *) +lemma mdb_chain_0_modify_map_0: + assumes chain: "mdb_chain_0 m" + and no0: "no_0 m" + shows + "mdb_chain_0 (modify_map m ptr (cteMDBNode_update (mdbNext_update (%_. 0))))" + (is "mdb_chain_0 ?M") + unfolding mdb_chain_0_def +proof + fix x + assume "x \ dom ?M" + hence xd: "x \ dom m" + by (clarsimp simp: modify_map_def dom_def split: if_split_asm) + hence x0: "m \ x \\<^sup>+ 0" using chain unfolding mdb_chain_0_def by simp + + show "?M \ x \\<^sup>+ 0" + proof (cases "m ptr") + case None + thus ?thesis + by (simp add: modify_map_def) (rule x0) + next + case (Some cte) + show ?thesis + proof (cases "m \ x \\<^sup>* ptr") + case False + thus ?thesis + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule mdb_trancl_other_update [OF x0]) + done + next + case True + hence "?M \ x \\<^sup>* ptr" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule next_rtrancl_tranclE) + apply simp + apply (rule trancl_into_rtrancl) + apply (erule no_loops_upd_last [OF mdb_chain_0_no_loops [OF chain no0]]) + done + moreover have "?M \ ptr \ 0" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (simp add: mdb_next_update) + done + ultimately show ?thesis by simp + qed + qed +qed + +lemma no_0_lhs_tranclI: "\ no_0 m; dest \ 0 \ \ \ m \ 0 \\<^sup>* dest" + apply rule + apply (erule next_rtrancl_tranclE) + apply simp + apply (drule (1) no_0_lhs_trancl) + apply simp + done + +lemma no_next_prev_rtrancl: + assumes c0: "valid_mdb_ctes m" + and src: "m src = Some (CTE cap src_node)" + and "mdbPrev src_node \ 0" + shows "\ m \ mdbNext src_node \\<^sup>* mdbPrev src_node" +proof + assume asm: "m \ mdbNext src_node \\<^sup>* mdbPrev src_node" + + from c0 have n0: "no_0 m" .. + from c0 have chain: "mdb_chain_0 m" .. + + have "m \ src \\<^sup>+ mdbPrev src_node" + using src + by - (rule rtrancl_into_trancl2 [OF _ asm], clarsimp simp: next_unfold') + + moreover + from c0 have vd: "valid_dlist m" .. + have "m \ mdbPrev src_node \ src" by (rule prev_leadstoI [OF _ _ vd]) fact+ + ultimately have "m \ src \\<^sup>+ src" .. + thus False using mdb_chain_0_no_loops [OF chain n0] + by (simp add: no_loops_trancl_simp) +qed + +lemma ctes_of_strng: + "(\cte. ctes_of s ptr = Some cte \ P cte) + \ (\cte. cte_wp_at' ((=) cte) ptr s \ P cte)" + by (clarsimp simp: cte_wp_at_ctes_of) + +lemma updateCap_valid_cap [wp]: + "\valid_cap' cap\ updateCap ptr cap' \\r. valid_cap' cap\" + unfolding updateCap_def + by (wp setCTE_valid_cap getCTE_wp) (clarsimp dest!: cte_at_cte_wp_atD) + +lemma mdb_chain_0_trancl: + assumes chain: "mdb_chain_0 m" + and n0: "no_0 m" + and ab: "m \ a \\<^sup>+ b" + shows "m \ b \\<^sup>* 0" + using ab +proof induct + case (base y) + thus ?case using chain + by (clarsimp simp: next_unfold') (erule (1) mdb_chain_0_nextD) +next + case (step y z) + thus ?case using n0 + apply - + apply (erule next_rtrancl_tranclE) + apply (simp add: next_unfold') + apply (drule tranclD [where x = y]) + apply clarsimp + apply (drule (1) next_single_value) + apply simp + done +qed + +lemma mdb_chain_0_cases [consumes 3, case_names srcdest destsrc indep]: + assumes chain: "mdb_chain_0 m" + and no: "no_0 m" + and ds: "dest \ src" + and srcdest: "\ m \ src \\<^sup>+ dest; \ m \ dest \\<^sup>* src; m \ dest \\<^sup>* 0 \ \ R" + and destsrc: "\ m \ dest \\<^sup>+ src; \ m \ src \\<^sup>* dest; m \ src \\<^sup>* 0 \ \ R" + and neither: "\ \ m \ src \\<^sup>+ dest; \ m \ dest \\<^sup>+ src \ \ R" + shows "R" +proof (cases "m \ src \\<^sup>+ dest") + case True + + thus ?thesis + proof (rule srcdest) + show "\ m \ dest \\<^sup>* src" by (rule no_loops_tranclE [OF mdb_chain_0_no_loops]) fact+ + + show "m \ dest \\<^sup>* 0" + by (rule mdb_chain_0_trancl) fact+ + qed +next + case False + + note F = False + + show ?thesis + proof (cases "m \ dest \\<^sup>+ src") + case True + thus ?thesis + proof (rule destsrc) + show "\ m \ src \\<^sup>* dest" using False ds + by (clarsimp elim!: next_rtrancl_tranclE) + show "m \ src \\<^sup>* 0" + by (rule mdb_chain_0_trancl) fact+ + qed + next + case False + with F show ?thesis + by (rule neither) + qed +qed + +lemma next_fold: + "\ m a = Some cte; mdbNext (cteMDBNode cte) = b\ \ m \ a \ b" + by (clarsimp simp: next_unfold') + + +lemma cteMDBNode_update_comp [simp]: + "(cteMDBNode_update f \ cteMDBNode_update g) = cteMDBNode_update (f \ g)" + by rule (case_tac x, simp) + +lemma modify_map_lhs_trancl: + "\ m p = Some cte; \ m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* p \ \ + modify_map m p f \ p \\<^sup>+ x = m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* x" + by (clarsimp simp: next_update_is_modify [symmetric] intro!: next_update_lhs_trancl) + +lemma modify_map_lhs_rtrancl: + "\ m p = Some cte; \ m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* p \ \ + modify_map m p f \ p \\<^sup>* x = (x = p \ m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* x)" + apply rule + apply (erule next_rtrancl_tranclE) + apply simp + apply (drule (2) iffD1 [OF modify_map_lhs_trancl]) + apply simp + apply (erule disjE) + apply simp + apply (drule (2) iffD2 [OF modify_map_lhs_trancl]) + apply (erule trancl_into_rtrancl) + done + +lemma next_prev: + assumes cte: "m p = Some cte" + and vd: "valid_dlist m" + and no0: "no_0 m" + and nxt: "m \ q \ p" + shows "q = mdbPrev (cteMDBNode cte)" +proof - + from no0 have p0: "p \ 0" using cte unfolding no_0_def + by - (rule, clarsimp) + + thus ?thesis + using nxt vd cte + apply - + apply (simp add: next_unfold') + apply (erule exE conjE)+ + apply (erule (1) valid_dlistEn, fastforce) + apply simp + done +qed + +declare modify_map_ndom[simp] + +lemma mdb_trancl_other_update_iff: + "\ m \ x \\<^sup>* p \ m(p \ cte) \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + by (auto intro: mdb_trancl_other_update mdb_trancl_update_other) + + + +lemma modify_map_trancl_other_iff: + "\ m \ x \\<^sup>* p \ modify_map m p f \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + apply - + apply (cases "m p") + apply (simp add: modify_map_None) + apply (subst next_update_is_modify [symmetric]) + apply assumption + apply simp + apply (erule mdb_trancl_other_update_iff) + done + +lemma next_modify_map_trancl_last: + assumes chain: "mdb_chain_0 m" + and no0: "no_0 m" + and nxt: "m \ x \\<^sup>+ p" + shows "modify_map m p f \ x \\<^sup>+ p" +proof - + note noloop = mdb_chain_0_no_loops [OF chain no0] + + from noloop nxt have xp: "x \ p" + by (clarsimp dest!: neg_no_loopsI) + + from nxt show ?thesis using xp + proof (induct rule: converse_trancl_induct') + case (base y) + hence "modify_map m p f \ y \ p" + by (clarsimp simp: next_unfold' modify_map_other) + + thus ?case .. + next + case (step y z) + + from noloop step have xp: "z \ p" + by (clarsimp dest!: neg_no_loopsI) + + hence "modify_map m p f \ y \ z" using step + by (clarsimp simp: next_unfold' modify_map_other) + moreover from xp have "modify_map m p f \ z \\<^sup>+ p" using step.hyps by simp + ultimately show ?case by (rule trancl_into_trancl2) + qed +qed + +lemma next_modify_map_trancl_last2: + assumes chain: "mdb_chain_0 (modify_map m p f)" + and no0: "no_0 m" + and nxt: "modify_map m p f \ x \\<^sup>+ p" + shows "m \ x \\<^sup>+ p" +proof - + let ?m = "modify_map m p f" + have no0': "no_0 ?m" using no0 by simp + note noloop = mdb_chain_0_no_loops [OF chain no0'] + + from noloop nxt have xp: "x \ p" + by (clarsimp dest!: neg_no_loopsI) + + from nxt show ?thesis using xp + proof (induct rule: converse_trancl_induct') + case (base y) + hence "m \ y \ p" + by (clarsimp simp: next_unfold' modify_map_other) + + thus ?case .. + next + case (step y z) + + from noloop step have xp: "z \ p" + by (clarsimp dest!: neg_no_loopsI) + + hence "m \ y \ z" using step + by (clarsimp simp: next_unfold' modify_map_other) + moreover from xp have "m \ z \\<^sup>+ p" using step.hyps by simp + ultimately show ?case by (rule trancl_into_trancl2) + qed +qed + +lemma next_modify_map_trancl_last_iff: + assumes c1: "mdb_chain_0 m" + and chain: "mdb_chain_0 (modify_map m p f)" + and no0: "no_0 m" + shows "modify_map m p f \ x \\<^sup>+ p = m \ x \\<^sup>+ p" + using c1 chain no0 + by (auto intro: next_modify_map_trancl_last next_modify_map_trancl_last2) + +lemma next_modify_map_last: + shows "x \ p \ modify_map m p f \ x \ p = m \ x \ p" + by (clarsimp simp: next_unfold' modify_map_other) + +lemma next_rtrancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ mdbNext node \\<^sup>* ptr'" + using nl node + by (clarsimp dest!: tranclD elim!: next_rtrancl_tranclE simp: next_unfold') + +lemma next_trancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "m \ ptr \\<^sup>+ ptr'" + and neq: "mdbNext node \ ptr'" + shows "m \ mdbNext node \\<^sup>+ ptr'" + using nl node neq + by (clarsimp dest!: tranclD elim!: next_rtrancl_tranclE simp: next_unfold') + +lemma next_rtrancl_xp: + assumes node: "m ptr' = Some (CTE cap node)" + and vd: "valid_dlist m" + and no0: "no_0 m" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ ptr \\<^sup>* mdbPrev node" + using nl node + apply - + apply (drule tranclD2) + apply clarsimp + apply (drule (1) next_prev [OF _ vd no0]) + apply simp + done + +lemma next_trancl_xp: + assumes node: "m ptr' = Some (CTE cap node)" + and vd: "valid_dlist m" + and no0: "no_0 m" + and neq: "mdbPrev node \ ptr" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ ptr \\<^sup>+ mdbPrev node" + using neq node nl + apply - + apply (drule (1) next_rtrancl_xp [OF _ vd no0]) + apply (erule next_rtrancl_tranclE) + apply simp + apply simp + done + +lemma next_trancl_np: + assumes node: "m ptr = Some (CTE cap node)" + and node': "m ptr' = Some (CTE cap' node')" + and vd: "valid_dlist m" + and no0: "no_0 m" + and neq: "mdbPrev node' \ ptr" + and neq': "mdbNext node \ mdbPrev node'" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ mdbNext node \\<^sup>+ mdbPrev node'" + by (rule next_trancl_nx [OF _ next_trancl_xp]) fact+ + +lemma neg_next_trancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>+ ptr'" + using nl +proof (rule contrapos_nn) + assume "m \ mdbNext node \\<^sup>+ ptr'" + show "m \ ptr \\<^sup>+ ptr'" + proof (rule trancl_into_trancl2) + show "m \ ptr \ mdbNext node" using node by (rule next_fold, simp) + qed fact+ +qed + +lemma neg_next_rtrancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>* ptr'" + using nl +proof (rule contrapos_nn) + assume "m \ mdbNext node \\<^sup>* ptr'" + show "m \ ptr \\<^sup>+ ptr'" + proof (rule rtrancl_into_trancl2) + show "m \ ptr \ mdbNext node" using node by (rule next_fold, simp) + qed fact+ +qed + +lemma dom_into_not0 [intro?]: + "\ no_0 m; p \ dom m \ \ p \ 0" + by (rule, clarsimp) + +lemma neg_next_trancl_xp: + assumes node: "m ptr' = Some (CTE cap node)" + and dom: "mdbPrev node \ dom m" + and no0: "no_0 m" + and vd: "valid_dlist m" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ ptr \\<^sup>+ mdbPrev node" + using nl +proof (rule contrapos_nn) + assume "m \ ptr \\<^sup>+ mdbPrev node" + + show "m \ ptr \\<^sup>+ ptr'" + proof (rule trancl_into_trancl) + have "mdbPrev node \ 0" using assms by auto + thus "m \ mdbPrev node \ ptr'" using vd node + apply - + apply (erule (1) valid_dlistEp) + apply simp + apply (rule next_fold) + apply simp + apply simp + done + qed fact+ +qed + +lemma neg_next_trancl_np: + assumes node: "m ptr = Some (CTE cap node)" + and node': "m ptr' = Some (CTE cap' node')" + and dom: "mdbPrev node' \ dom m" + and no0: "no_0 m" + and vd: "valid_dlist m" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>+ mdbPrev node'" + by (rule neg_next_trancl_nx [OF _ neg_next_trancl_xp]) fact+ + +lemma neg_next_rtrancl_np: + assumes node: "m ptr = Some (CTE cap node)" + and node': "m ptr' = Some (CTE cap' node')" + and dom: "mdbPrev node' \ dom m" + and no0: "no_0 m" + and vd: "valid_dlist m" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>* mdbPrev node'" + by (rule neg_next_rtrancl_nx [OF _ neg_next_trancl_xp]) fact+ + +lemma neg_next_trancl_trancl: + assumes nxt: "m \ a \\<^sup>* a'" + and ab: "\ m \ b \\<^sup>* a'" + and nl: "\ m \ a' \\<^sup>* b" + shows "\ m \ a \\<^sup>+ b" + using nl nxt + apply - + apply (erule contrapos_nn) + apply (erule next_rtrancl_tranclE) + apply simp + apply (erule (1) next_trancl_split_tt [OF _ _ ab]) + done + +declare domE[elim?] + +lemma ndom_is_0D: + "\ mdbNext node \ dom m; mdb_chain_0 m; no_0 m; m ptr = Some (CTE cap node) \ + \ mdbNext node = 0" + apply - + apply (frule (1) mdb_chain_0_nextD) + apply simp + apply (erule next_rtrancl_tranclE) + apply simp + apply (drule tranclD) + apply (clarsimp simp: next_unfold') + done + +end + +(* almost exactly 1000 lines --- yuck. There is a lot of redundancy here, but I doubt it is worth + exploiting above the cut'n'paste already here. + *) + +lemma (in mdb_swap) cteSwap_chain: + "mdb_chain_0 n" +proof - + have chain: "mdb_chain_0 m" using valid .. + + let ?m = "(modify_map + (modify_map + (modify_map + (modify_map (modify_map m (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + (mdbNext src_node) (cteMDBNode_update (mdbPrev_update (%_. dest)))) + src (cteMDBNode_update (\m. dest2_node))) + dest (cteMDBNode_update (\m. src_node))) + (mdbPrev dest2_node) (cteMDBNode_update (mdbNext_update (%_. src))))" + + let ?n' = "modify_map m src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node))))" + + have [simp]: "src \ dom m" by (rule domI, rule src) + have [simp]: "dest \ dom m" by (rule domI, rule dest) + + have dn: "m \ dest \ mdbNext dest_node" using dest by (rule next_fold, simp) + + have dp: "mdbPrev dest_node \ dom m + \ m \ mdbPrev dest_node \ dest" + proof - + assume "mdbPrev dest_node \ dom m" + hence "mdbPrev dest_node \ 0" using no_0 by - (rule, clarsimp) + thus ?thesis using dest + apply - + apply (clarsimp dest!: dest_prev [where p = "mdbPrev dest_node", simplified]) + apply (erule next_fold) + apply simp + done + qed + + have [simp]: "\ m \ dest \\<^sup>+ dest" + using mdb_chain_0_no_loops [OF chain no_0] + by (simp add: no_loops_trancl_simp) + + have [simp]: "\ m \ src \\<^sup>+ src" + using mdb_chain_0_no_loops [OF chain no_0] + by (simp add: no_loops_trancl_simp) + + have [simp]: "\ m \ mdbNext src_node \\<^sup>* src" + by (rule neg_next_rtrancl_nx, rule src, simp) + + + have sn: "mdbPrev src_node \ dom m + \ m \ mdbPrev src_node \ src" + proof - + assume "mdbPrev src_node \ dom m" + hence "mdbPrev src_node \ 0" using no_0 by - (rule, clarsimp) + thus ?thesis using src + apply - + apply (clarsimp dest!: src_prev [where p = "mdbPrev src_node", simplified]) + apply (erule next_fold) + apply simp + done + qed + + from chain no_0 neq [symmetric] + have "mdb_chain_0 ?m" + proof (cases rule: mdb_chain_0_cases) + case srcdest + + note [simp] = neg_rtrancl_into_trancl [OF srcdest(2)] + note [simp] = srcdest(2) + + have dsneq: "dest \ mdbPrev src_node" + proof + assume "dest = mdbPrev src_node" + hence "m \ dest \\<^sup>* src" + by - (rule r_into_rtrancl, rule next_fold [where m = m, OF dest], simp) + + thus False using srcdest by simp + qed + + from dest have n1 [simp]:"\ m \ mdbNext dest_node \\<^sup>* src" + by (rule neg_next_rtrancl_nx [OF _ neg_rtrancl_into_trancl]) fact+ + + have chain_n': "mdb_chain_0 ?n'" + proof (cases "mdbNext dest_node \ dom m") + case True + thus ?thesis using n1 + by (rule mdb_chain_0_modify_map_next [OF chain no_0]) + next + case False + thus ?thesis using dest chain no_0 + by - (drule (3) ndom_is_0D, simp, erule (1) mdb_chain_0_modify_map_0) + qed + + from dest src + have n4: "mdbPrev src_node \ dom m \ \ m \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + using neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl] + by auto + + hence n2 [simp]: "\ ?n' \ src \\<^sup>* dest" + using dn src + by (auto dest: rtrancl_into_trancl2 simp: modify_map_lhs_rtrancl) + + hence n3: "mdbPrev src_node \ dom m + \ \ modify_map ?n' dest (cteMDBNode_update (mdbNext_update (%_. src))) \ dest \\<^sup>* mdbPrev src_node" + using dest dsneq src n1 + by (simp add: modify_map_lhs_rtrancl modify_map_app) (rule n4) + + from srcdest(1) + show ?thesis + proof (cases rule: tranclE2') + case base + hence ds: "dest = mdbNext src_node" by (clarsimp simp: next_unfold' src) + hence d2: "dest2_node = MDB (mdbNext dest_node) dest (mdbRevocable dest_node) (mdbFirstBadged dest_node)" + using dsneq + unfolding dest2_node_def by clarsimp + + let ?m' = "(modify_map + (modify_map ?n' dest (cteMDBNode_update (mdbNext_update (%_. src)))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest))))" + + let ?goal = "mdb_chain_0 ?m'" + { + assume d1: "mdbPrev src_node \ dom m" and d2: "mdbNext dest_node \ dom m" + hence ?goal + apply (intro mdb_chain_0_modify_map_next) + apply (auto simp: no_0 chain n1 n2 n3[OF d1]) + done + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and "mdbNext dest_node \ dom m" + hence ?goal + by simp ((rule mdb_chain_0_modify_map_next)+, simp_all add: no_0 chain n1 n2) + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal using chain_n' d1 src dest + apply - + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply (simp_all add: no_0 chain n1 n2 n3 [OF d1]) + done + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal using d1 chain_n' + apply simp + apply (rule mdb_chain_0_modify_map_next) + apply (simp_all add: no_0 chain n1 n2) + done + } + ultimately have ?goal + apply (cases "mdbPrev src_node \ dom m") + apply (cases "mdbNext dest_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext dest_node \ dom m") + apply auto + done + + thus ?thesis using ds [symmetric] d2 neqs dsneq + apply simp + apply (subst modify_map_addr_com [OF neqs(2)]) + apply (subst modify_map_comp [symmetric]) + apply (subst modify_map_comp [symmetric]) + apply (simp) + apply (simp add: o_def) + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (subst modify_map_addr_com [where x = src]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (subst modify_map_addr_com [OF dsneq [symmetric]]) + apply (subst modify_map_addr_com [where y = src], simp)+ + apply assumption + done + next + case (trancl c) + hence dsneq': "dest \ mdbNext src_node" using src + apply - + apply rule + apply simp + apply (drule next_fold) + apply simp + apply (drule (1) next_single_value) + apply simp + done + + hence d2n: "dest2_node = dest_node" + unfolding dest2_node_def + by (cases dest_node, simp add: dsneq) + + from trancl obtain d where dnext: "m \ d \ dest" and ncd: "m \ c \\<^sup>* d" + by (clarsimp dest!: tranclD2) + + have ddest: "d = mdbPrev (cteMDBNode (CTE dest_cap dest_node))" + using dest dlist no_0 dnext + by (rule next_prev) + + hence d2: "mdbPrev dest_node \ dom m" using dnext + by (clarsimp simp: next_unfold') + + have dnz: "mdbPrev dest_node \ 0" + by (rule dom_into_not0 [OF no_0 d2]) + + have n5 [simp]: "\ ?n' \ src \\<^sup>* mdbPrev dest_node" + proof - + have "src \ mdbPrev dest_node" + by (simp add: dsneq' [symmetric]) + hence "?n' \ mdbPrev dest_node \ dest" using dp [OF d2] + by (clarsimp simp: next_unfold' modify_map_other) + thus ?thesis using n2 + by - (erule contrapos_nn, erule (1) rtrancl_into_rtrancl) + qed + + let ?n2 = "modify_map ?n' (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src)))" + have chain_n2: "mdb_chain_0 ?n2" + by ((rule chain_n' | rule mdb_chain_0_modify_map_next)+, simp_all add: no_0) + + have r [simp]: "\ m \ mdbNext dest_node \\<^sup>* mdbPrev dest_node" + by (rule neg_next_rtrancl_np [OF _ _ d2 no_0 dlist], rule dest, rule dest, simp) + + have r3 [simp]: "\ m \ mdbNext dest_node \\<^sup>* src" + by (rule neg_next_rtrancl_nx, rule dest, simp) + + have r4 [simp]: "\ m \ dest \\<^sup>+ mdbPrev dest_node" + by (rule neg_next_trancl_xp [OF _ d2 no_0 dlist], rule dest, simp) + + let ?m'' = + "(modify_map (modify_map + (modify_map ?n' (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src)))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node)))))" + + have n2_2 [simp]: + "?n2 \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + apply (cases "mdbNext src_node = mdbPrev dest_node") + apply simp + apply (rule trancl_into_rtrancl) + apply (rule next_modify_map_trancl_last [OF chain_n'], simp add: no_0) + apply (subst modify_map_trancl_other_iff) + apply simp + apply (rule next_trancl_np [OF _ _ dlist no_0]) + apply (rule src, rule dest) + apply (simp add: dsneq' [symmetric]) + apply assumption + apply (rule srcdest(1)) + done + + hence n2_3 [simp]: "\ ?n2 \ mdbNext src_node \\<^sup>+ dest" + proof (rule neg_next_trancl_trancl) + show "\ ?n2 \ dest \\<^sup>* mdbPrev dest_node" + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply (simp add: modify_map_trancl_other_iff) + done + + show "\ ?n2 \ mdbPrev dest_node \\<^sup>* dest" using d2 + by (clarsimp simp: modify_map_lhs_rtrancl modify_map_other dsneq' [symmetric]) + qed + + have r5 [simp]: "mdbPrev src_node \ dom m \ \ m \ dest \\<^sup>+ mdbPrev src_node" + by (rule neg_next_trancl_xp [OF _ _ no_0 dlist], rule src, simp_all) + + have n2_4 [simp]: + "mdbPrev src_node \ dom m \ \ ?n2 \ dest \\<^sup>* mdbPrev src_node" + apply - + apply (rule neg_rtranclI [OF dsneq]) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply (simp_all add: modify_map_trancl_other_iff) + done + + let ?goal = "mdb_chain_0 ?m''" + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + + have r2 [simp]: "\ m \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + using dest src + by (rule neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl]) fact+ + + have ?goal + proof ((rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + + have n2_1: + "\ ?n2 \ mdbPrev dest_node \\<^sup>* mdbPrev src_node" using d2 dsneq' [symmetric] + apply - + apply (erule domE) + apply (subst modify_map_lhs_rtrancl) + apply (clarsimp simp: modify_map_other) + apply simp + apply simp + apply (simp add: dom_into_not0 [OF no_0 d2]) + apply (subst modify_map_lhs_rtrancl, rule src) + apply simp + apply (simp) + done + + have "\ ?n' \ mdbPrev src_node \\<^sup>+ mdbPrev dest_node" + apply (rule neg_next_rtrancl_trancl [where y = src]) + apply (subst modify_map_lhs_rtrancl) + apply (rule src) + apply simp + apply (simp add: dsneq' [symmetric]) + apply (subst next_modify_map_last) + apply simp + apply (rule sn [OF d1]) + done + hence "mdbPrev src_node \ 0 \ \ ?n2 \ mdbPrev src_node \\<^sup>* mdbPrev dest_node" + apply - + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply assumption + done + moreover from no_0 have "mdbPrev src_node \ 0" using d1 by auto + ultimately show + "\ modify_map ?n2 (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest))) \ mdbNext src_node \\<^sup>* dest" using n2_1 + apply - + apply (rule neg_rtranclI) + apply (simp add: dsneq' [symmetric]) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply simp + apply (rule neg_next_trancl_trancl [OF n2_2]) + apply auto + done + qed fact+ + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + + have ?goal + proof (simp add: d1, (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1) + show "\ ?n2 \ mdbNext src_node \\<^sup>* dest" + by (rule neg_rtranclI [OF _ n2_3], simp add: dsneq' [symmetric]) + qed fact+ + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal + by (simp add: m0, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal + by (simp add: m0 d1, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } ultimately have ?goal + apply (cases "mdbPrev src_node \ dom m") + apply (cases "mdbNext src_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext src_node \ dom m") + apply auto + done + + thus ?thesis using no_0 d2n + apply simp + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where x = src]) + apply (simp add: dsneq' [symmetric]) + apply (subst modify_map_addr_com [where x = src]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where y = dest], simp add: dsneq [symmetric] dsneq')+ + apply (subst modify_map_addr_com [where y = "mdbPrev src_node"], simp add: dsneq) + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"], simp add: dsneq dnz)+ + apply (subst modify_map_addr_com [where y = src], simp add: dsneq dsneq' [symmetric] dnz)+ + apply assumption + done + qed + next + case destsrc (* Dual of srcdest *) + + let ?n' = "modify_map m dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node))))" + + note [simp] = neg_rtrancl_into_trancl [OF destsrc(2)] + note [simp] = destsrc(2) + + have dsneq: "src \ mdbPrev dest_node" + proof + assume "src = mdbPrev dest_node" + hence "m \ src \\<^sup>* dest" + by - (rule r_into_rtrancl, rule next_fold [where m = m, OF src], simp) + + thus False using destsrc by simp + qed + + from src have n1 [simp]:"\ m \ mdbNext src_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx [OF _ neg_rtrancl_into_trancl]) fact+ + + have chain_n': "mdb_chain_0 ?n'" + proof (cases "mdbNext src_node \ dom m") + case True + thus ?thesis using n1 + by (rule mdb_chain_0_modify_map_next [OF chain no_0]) + next + case False + thus ?thesis using src chain no_0 + by - (drule (3) ndom_is_0D, simp, erule (1) mdb_chain_0_modify_map_0) + qed + + from src dest + have n4: "mdbPrev dest_node \ dom m \ \ m \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + using neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl] + by auto + + hence n2 [simp]: "\ ?n' \ dest \\<^sup>* src" + using sn dest + by (auto dest: rtrancl_into_trancl2 simp: modify_map_lhs_rtrancl) + + hence n3: "mdbPrev dest_node \ dom m + \ \ modify_map ?n' src (cteMDBNode_update (mdbNext_update (%_. dest))) \ src \\<^sup>* mdbPrev dest_node" + using dest dsneq src n1 + by (simp add: modify_map_lhs_rtrancl modify_map_app) (rule n4) + + from destsrc(1) + show ?thesis + proof (cases rule: tranclE2') + case base + hence ds: "src = mdbNext dest_node" by (clarsimp simp: next_unfold' dest) + hence d2: "dest2_node = MDB dest (mdbPrev dest_node) (mdbRevocable dest_node) (mdbFirstBadged dest_node)" + using dsneq + unfolding dest2_node_def by simp + + let ?m' = "(modify_map + (modify_map ?n' src (cteMDBNode_update (mdbNext_update (%_. dest)))) + (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src))))" + + let ?goal = "mdb_chain_0 ?m'" + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence ?goal + apply (intro mdb_chain_0_modify_map_next) + apply (auto simp: no_0 chain n1 n2 n3 [OF d1]) + done + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence ?goal + by simp ((rule mdb_chain_0_modify_map_next)+, simp_all add: no_0 chain n1 n2) + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal using chain_n' d1 src dest + apply - + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply (simp_all add: no_0 chain n1 n2 n3 [OF d1]) + done + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal using d1 chain_n' + apply simp + apply (rule mdb_chain_0_modify_map_next) + apply (simp_all add: no_0 chain n1 n2) + done + } + ultimately have ?goal + apply (cases "mdbPrev dest_node \ dom m") + apply (cases "mdbNext src_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext src_node \ dom m") + apply auto + done + thus ?thesis using ds [symmetric] d2 neqs dsneq + apply simp + apply (subst modify_map_addr_com [where x = "mdbNext src_node"], simp)+ + apply (subst modify_map_addr_com [OF neqs(1)]) + apply (subst modify_map_comp [symmetric]) + apply (simp) + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where x = src]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (subst modify_map_addr_com [where x = dest], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where y = src], simp)+ + apply (subst modify_map_addr_com [where y = dest], simp)+ + apply assumption + done + next + case (trancl c) + hence dsneq': "src \ mdbNext dest_node" using dest + apply - + apply rule + apply simp + apply (drule next_fold) + apply simp + apply (drule (1) next_single_value) + apply simp + done + + hence d2n: "dest2_node = dest_node" + unfolding dest2_node_def using dsneq + by simp + + from trancl obtain d where dnext: "m \ d \ src" and ncd: "m \ c \\<^sup>* d" + by (clarsimp dest!: tranclD2) + + have ddest: "d = mdbPrev (cteMDBNode (CTE src_cap src_node))" + using src dlist no_0 dnext + by (rule next_prev) + + hence d2: "mdbPrev src_node \ dom m" using dnext + by (clarsimp simp: next_unfold') + + have dnz: "mdbPrev src_node \ 0" + by (rule dom_into_not0 [OF no_0 d2]) + + have n5 [simp]: "\ ?n' \ dest \\<^sup>* mdbPrev src_node" + proof - + have "dest \ mdbPrev src_node" + by (simp add: dsneq' [simplified, symmetric]) + hence "?n' \ mdbPrev src_node \ src" using sn [OF d2] + by (clarsimp simp: next_unfold' modify_map_other) + thus ?thesis using n2 + by - (erule contrapos_nn, erule (1) rtrancl_into_rtrancl) + qed + + let ?n2 = "modify_map ?n' (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))" + have chain_n2: "mdb_chain_0 ?n2" + by ((rule chain_n' | rule mdb_chain_0_modify_map_next)+, simp_all add: no_0) + + have r [simp]: "\ m \ mdbNext src_node \\<^sup>* mdbPrev src_node" + by (rule neg_next_rtrancl_np [OF _ _ d2 no_0 dlist], rule src, rule src, simp) + + have r3 [simp]: "\ m \ mdbNext src_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx, rule src, simp) + + have r5 [simp]: "\ m \ mdbNext dest_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx, rule dest, simp) + + have r4 [simp]: "\ m \ src \\<^sup>+ mdbPrev src_node" + by (rule neg_next_trancl_xp [OF _ d2 no_0 dlist], rule src, simp) + + let ?m'' = + "(modify_map (modify_map + (modify_map ?n' (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src)))) + src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node)))))" + + have n2_2 [simp]: + "?n2 \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + apply (cases "mdbNext dest_node = mdbPrev src_node") + apply simp + apply (rule trancl_into_rtrancl) + apply (rule next_modify_map_trancl_last [OF chain_n'], simp add: no_0) + apply (subst modify_map_trancl_other_iff) + apply simp + apply (rule next_trancl_np [OF _ _ dlist no_0]) + apply (rule dest, rule src) + apply (simp add: dsneq' [simplified]) + apply assumption + apply (rule destsrc(1)) + done + + hence n2_3 [simp]: "\ ?n2 \ mdbNext dest_node \\<^sup>+ src" + proof (rule neg_next_trancl_trancl) + show "\ ?n2 \ src \\<^sup>* mdbPrev src_node" + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply (simp add: modify_map_trancl_other_iff) + done + + show "\ ?n2 \ mdbPrev src_node \\<^sup>* src" using d2 + by (clarsimp simp: modify_map_lhs_rtrancl modify_map_other dsneq' [simplified, symmetric]) + qed + + have r6 [simp]: "mdbPrev dest_node \ dom m \ \ m \ src \\<^sup>+ mdbPrev dest_node" + by (rule neg_next_trancl_xp [OF _ _ no_0 dlist], rule dest, simp_all) + + have n2_4 [simp]: + "mdbPrev dest_node \ dom m \ \ ?n2 \ src \\<^sup>* mdbPrev dest_node" + apply - + apply (rule neg_rtranclI [OF dsneq]) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply (simp_all add: modify_map_trancl_other_iff) + done + + let ?goal = "mdb_chain_0 ?m''" + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + + have r2 [simp]: "\ m \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + using src dest + by (rule neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl]) fact+ + + have ?goal + proof ((rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + + have n2_1: + "\ ?n2 \ mdbPrev src_node \\<^sup>* mdbPrev dest_node" using d2 dsneq' [symmetric] + apply - + apply (erule domE) + apply (subst modify_map_lhs_rtrancl) + apply (clarsimp simp: modify_map_other) + apply simp + apply simp + apply (simp add: dom_into_not0 [OF no_0 d2]) + apply (subst modify_map_lhs_rtrancl, rule dest) + apply simp + apply (simp) + done + have "\ ?n' \ mdbPrev dest_node \\<^sup>+ mdbPrev src_node" + apply (rule neg_next_rtrancl_trancl [where y = dest]) + apply (subst modify_map_lhs_rtrancl) + apply (rule dest) + apply simp + apply (simp add: dsneq' [simplified]) + apply (subst next_modify_map_last) + apply simp + apply (rule dp [OF d1]) + done + hence "mdbPrev dest_node \ 0 \ \ ?n2 \ mdbPrev dest_node \\<^sup>* mdbPrev src_node" + apply - + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply assumption + done + moreover from no_0 have "mdbPrev dest_node \ 0" using d1 by auto + ultimately show + "\ modify_map ?n2 (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src))) \ mdbNext dest_node \\<^sup>* src" using n2_1 dsneq' [symmetric] + apply - + apply (rule neg_rtranclI) + apply (simp) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply simp + apply (rule neg_next_trancl_trancl [OF n2_2]) + apply auto + done + qed fact+ + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + + have ?goal + proof (simp add: d1, (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1) + show "\ ?n2 \ mdbNext dest_node \\<^sup>* src" + by (rule neg_rtranclI [OF _ n2_3], simp add: dsneq' [simplified]) + qed fact+ + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal + by (simp add: m0, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal + by (simp add: m0 d1, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } ultimately have ?goal + apply (cases "mdbPrev dest_node \ dom m") + apply (cases "mdbNext dest_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext dest_node \ dom m") + apply auto + done + thus ?thesis using no_0 d2n dsneq dsneq' + apply simp + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where x = src], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where y = src], simp)+ + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"], simp add: dnz)+ + apply (subst modify_map_addr_com [where y = "mdbPrev src_node"], simp add: dnz)+ + apply (subst modify_map_addr_com [where y = dest], simp add: dnz)+ + apply assumption + done + qed + next + case indep + + have indep_rt1: "\ m \ src \\<^sup>* dest" + by (rule neg_rtranclI, simp) fact+ + + have indep_rt2: "\ m \ dest \\<^sup>* src" + by (rule neg_rtranclI, simp) fact+ + + have dsneq: "src \ mdbPrev dest_node" + proof + assume "src = mdbPrev dest_node" + hence "m \ src \\<^sup>+ dest" + by - (rule r_into_trancl, rule next_fold [where m = m, OF src], simp) + + thus False using indep by simp + qed + + note [simp] = dsneq [simplified] + + have sdneq: "dest \ mdbPrev src_node" + proof + assume "dest = mdbPrev src_node" + hence "m \ dest \\<^sup>+ src" + by - (rule r_into_trancl, rule next_fold [where m = m, OF dest], simp) + + thus False using indep by simp + qed + + note [simp] = sdneq [simplified] + + have dsneq' [simp]: "dest \ mdbNext src_node" + proof + assume "dest = mdbNext src_node" + hence "m \ src \\<^sup>+ dest" + apply - + apply (rule r_into_trancl) + apply (rule next_fold) + apply (rule src) + apply simp + done + thus False using indep by simp + qed + + have dsnp: "mdbPrev src_node \ dom m \ mdbNext dest_node \ mdbPrev src_node" + proof + assume "mdbPrev src_node \ dom m" and "mdbNext dest_node = mdbPrev src_node" + hence "m \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + by simp + moreover have "m \ dest \ mdbNext dest_node" using dest by (rule next_fold, simp) + moreover have "m \ mdbPrev src_node \ src" by (rule sn) fact+ + ultimately have "m \ dest \\<^sup>+ src" by auto + thus False using indep by simp + qed + + have d2n: "dest2_node = dest_node" + unfolding dest2_node_def by (cases dest_node, simp) + + let ?n' = "modify_map m dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node))))" + + let ?n2 = "modify_map ?n' (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))" + + from src have n1 [simp]:"\ m \ mdbNext src_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx [OF _ neg_rtrancl_into_trancl]) (rule indep_rt1) + + have chain_n': "mdb_chain_0 ?n'" + proof (cases "mdbNext src_node \ dom m") + case True + thus ?thesis using n1 + by (rule mdb_chain_0_modify_map_next [OF chain no_0]) + next + case False + thus ?thesis using src chain no_0 + by - (drule (3) ndom_is_0D, simp, erule (1) mdb_chain_0_modify_map_0) + qed + + have chain_n2: "mdb_chain_0 ?n2" + apply (cases "mdbPrev src_node \ dom m") + apply ((rule chain_n' | rule mdb_chain_0_modify_map_next)+, simp_all add: no_0) + apply (subst modify_map_lhs_rtrancl) + apply (rule dest) + apply simp + apply (simp add: sdneq [symmetric]) + apply (rule neg_next_rtrancl_np [OF _ _ _ no_0 dlist]) + apply (rule src, rule src) + apply assumption + apply simp + apply (rule chain_n') + done + + let ?m' = "(modify_map + (modify_map ?n2 + src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node))))) + (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src))))" + + have r1 [simp]: "mdbPrev src_node \ dom m \ \ m \ src \\<^sup>+ mdbPrev src_node" + apply (rule neg_next_trancl_xp) + apply (rule src, assumption, rule no_0, rule dlist) + apply simp + done + + have r [simp]: "mdbPrev src_node \ dom m \ \ ?n' \ src \\<^sup>+ mdbPrev src_node" + by (simp add: modify_map_trancl_other_iff [OF indep_rt1]) + + have r2 [simp]: "mdbPrev dest_node \ dom m \ \ m \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + using src dest indep neg_next_rtrancl_np [OF _ _ _ no_0 dlist] + by auto + + have n2 [simp]: "\ ?n' \ dest \\<^sup>* src" + using sn dest + by (auto dest: rtrancl_into_trancl2 simp: modify_map_lhs_rtrancl) + + have n5 [simp]: "mdbPrev src_node \ dom m \ \ ?n' \ dest \\<^sup>* mdbPrev src_node" + proof - + assume d2: "mdbPrev src_node \ dom m" + have "?n' \ mdbPrev src_node \ src" using sn [OF d2] + by (clarsimp simp: next_unfold' modify_map_other) + thus ?thesis using n2 + by - (erule contrapos_nn, erule (1) rtrancl_into_rtrancl) + qed + + have r4 [simp]: "mdbPrev src_node \ dom m \ \ m \ mdbNext dest_node \\<^sup>+ mdbPrev src_node" + apply (rule neg_next_trancl_np [OF _ _ _ no_0 dlist]) + apply (rule dest) + apply (rule src) + apply assumption + apply (rule indep(2)) + done + + have r5 [simp]: "\ m \ mdbNext dest_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx, rule dest, simp) + have r6 [simp]: " \ m \ mdbNext dest_node \\<^sup>+ src" + by (rule neg_next_trancl_nx, rule dest, rule indep(2)) + have r7 [simp]: " mdbPrev dest_node \ dom m \ \ m \ mdbNext dest_node \\<^sup>+ mdbPrev dest_node" + apply (rule neg_next_trancl_np [OF _ _ _ no_0 dlist]) + apply (rule dest) + apply (rule dest) + apply assumption + apply simp + done + + have n6 [simp]: "\ ?n' \ mdbNext dest_node \\<^sup>+ src" + by (subst modify_map_trancl_other_iff) simp_all + + have n6_r [simp]: "\ ?n' \ mdbNext dest_node \\<^sup>* src" + by (rule neg_rtranclI) (simp_all add: sdneq [symmetric]) + + have n2_3 [simp]: "mdbPrev src_node \ dom m \ \ ?n2 \ mdbNext dest_node \\<^sup>+ src" + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply (simp add: dsnp) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_next_rtrancl_nx) + apply (rule dest) + apply simp_all + done + + have n7 [simp]: "mdbPrev src_node \ dom m \ \ ?n' \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + apply (rule neg_rtranclI) + apply (erule dsnp) + apply (subst modify_map_trancl_other_iff) + apply simp_all + done + + have n8 [simp]: "mdbPrev dest_node \ dom m + \ \ ?n' \ mdbNext dest_node \\<^sup>+ mdbPrev dest_node" + by (simp add: modify_map_trancl_other_iff) + + have n2_5 [simp]: "mdbPrev dest_node \ dom m \ \ ?n2 \ mdbNext dest_node \\<^sup>+ mdbPrev dest_node" + by (cases "mdbPrev src_node \ dom m", simp_all add: modify_map_trancl_other_iff) + + have n2_4 [simp]: "mdbPrev dest_node \ dom m \ \ ?n2 \ mdbNext dest_node \\<^sup>* mdbPrev dest_node" + apply (frule dom_into_not0 [OF no_0]) + apply (cases "mdbPrev src_node \ dom m") + apply (rule neg_rtranclI) + apply (drule dom_into_not0 [OF no_0]) + apply simp + apply simp + apply simp + apply (rule neg_rtranclI) + apply simp + apply simp + done + + have n9 [simp]: "mdbPrev dest_node \ dom m \ + \ modify_map ?n' src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node)))) \ src \\<^sup>* mdbPrev dest_node" + apply (subst modify_map_lhs_rtrancl) + apply (simp add: src modify_map_other) + apply simp + apply simp + apply (rule neg_rtranclI) + apply (drule dom_into_not0 [OF no_0]) + apply simp + apply simp + done + + have chain_n3: "mdbPrev src_node \ dom m \ mdb_chain_0 + (modify_map + (modify_map (modify_map m dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node))))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node)))))" + apply - + apply (cases "mdbNext dest_node \ dom m") + apply (rule mdb_chain_0_modify_map_next [OF chain_n2]) + apply (simp add: no_0) + apply simp + apply (rule neg_rtranclI) + apply (simp add: sdneq [symmetric]) + apply simp + apply (frule ndom_is_0D [OF _ chain no_0]) + apply (rule dest) + apply simp + apply (rule mdb_chain_0_modify_map_0 [OF chain_n2]) + apply (simp_all add: no_0) + done + + have "mdb_chain_0 ?m'" + proof (cases rule: cases2 [of "mdbPrev src_node \ dom m" "mdbPrev dest_node \ dom m"]) + case pos_pos + + thus ?thesis + apply - + apply (rule mdb_chain_0_modify_map_next [OF chain_n3]) + apply (simp_all add: no_0) + apply (subst modify_map_lhs_rtrancl) + apply (simp add: modify_map_other src) + apply simp + apply (rule neg_rtranclI) + apply (simp add: sdneq [symmetric]) + apply simp + apply simp + done + next + case pos_neg + thus ?thesis + by simp (rule chain_n3) + next + case neg_pos + thus ?thesis using no_0 + apply - + apply simp + apply (cases "mdbNext dest_node \ dom m") + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply simp_all + apply (drule ndom_is_0D [OF _ chain no_0], rule dest) + apply simp + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_0 [OF chain_n']) + apply simp_all + apply (subst modify_map_lhs_rtrancl) + apply (simp add: modify_map_other src) + apply simp_all + apply (rule no_0_no_0_lhs_rtrancl) + apply simp + apply (erule (1) dom_into_not0) + done + next + case neg_neg + thus ?thesis using no_0 + apply - + apply (cases "mdbNext dest_node \ dom m") + apply simp + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply simp + apply simp + apply simp + apply (drule ndom_is_0D [OF _ chain no_0], rule dest) + apply simp + apply (rule mdb_chain_0_modify_map_0 [OF chain_n']) + apply simp + done + qed + + thus ?thesis using d2n + apply simp + apply (subst modify_map_addr_com [where x = dest], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where x = src], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where y = dest], simp add: sdneq [symmetric])+ + apply (subst modify_map_addr_com [where y = src], simp) + apply assumption + done + qed + thus ?thesis + unfolding n_def n'_def + apply (simp add: const_def) + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_com [where g = "cteCap_update (%_. scap)"], case_tac x, simp)+ + apply (rule mdb_chain_0_modify_map_inv) + apply (subst modify_map_com [where g = "cteCap_update (%_. dcap)"], case_tac x, simp)+ + apply (rule mdb_chain_0_modify_map_inv) + apply simp_all + done +qed + +lemma (in mdb_swap) next_m_n2: + "n \ p \ p' = m \ s_d_swp p \ s_d_swp p'" + by (simp add: next_m_n) + +lemma (in mdb_swap) n_src [simp]: + "n src = Some (CTE dcap dest2_node)" + unfolding n_def n'_def + apply (simp) + apply (subst modify_map_same | subst modify_map_other, simp add: dest2_node_def)+ + apply (simp add: src) + done + +lemma (in mdb_swap) swap_cases [case_names src_dest dest_src other]: + assumes src_dest: + "\mdbNext src_node = dest; mdbPrev dest_node = src; mdbNext dest_node \ src; mdbPrev src_node \ dest\ \ P" + and dest_src: + "\mdbNext dest_node = src; mdbPrev src_node = dest; mdbNext src_node \ dest; mdbPrev dest_node \ src\ \ P" + and other: + "\mdbNext src_node \ dest; mdbPrev dest_node \ src; mdbNext dest_node \ src; mdbPrev src_node \ dest \ \ P" + shows "P" +proof (cases "mdbNext src_node = dest") + case True + thus ?thesis + proof (rule src_dest) + from True show "mdbPrev dest_node = src" + by simp + show "mdbNext dest_node \ src" + proof + assume "mdbNext dest_node = src" + hence "m \ dest \ src" using dest + by - (rule next_fold, simp+) + moreover have "m \ src \ dest" using src True + by - (rule next_fold, simp+) + finally show False by simp + qed + show "mdbPrev src_node \ dest" + proof + assume "mdbPrev src_node = dest" + hence "mdbNext dest_node = src" using src + by (clarsimp elim: dlistEp) + hence "m \ dest \ src" using dest + by - (rule next_fold, simp+) + moreover have "m \ src \ dest" using src True + by - (rule next_fold, simp+) + finally show False by simp + qed + qed +next + case False + + note firstFalse = False + + show ?thesis + proof (cases "mdbNext dest_node = src") + case True + thus ?thesis + proof (rule dest_src) + from True show "mdbPrev src_node = dest" by simp + show "mdbPrev dest_node \ src" + proof + assume "mdbPrev dest_node = src" + hence "mdbNext src_node = dest" using dest + by (clarsimp elim: dlistEp) + hence "m \ src \ dest" using src + by - (rule next_fold, simp+) + moreover have "m \ dest \ src" using dest True + by - (rule next_fold, simp+) + finally show False by simp + qed + qed fact+ + next + case False + from firstFalse show ?thesis + proof (rule other) + show "mdbPrev dest_node \ src" and "mdbPrev src_node \ dest" using False firstFalse + by simp+ + qed fact+ + qed +qed + +lemma (in mdb_swap) src_prev_next [intro?]: + "mdbPrev src_node \ 0 \ m \ mdbPrev src_node \ src" + using src + apply - + apply (erule dlistEp) + apply simp + apply (rule next_fold) + apply simp + apply simp + done + +lemma (in mdb_swap) dest_prev_next [intro?]: + "mdbPrev dest_node \ 0 \ m \ mdbPrev dest_node \ dest" + using dest + apply - + apply (erule dlistEp) + apply simp + apply (rule next_fold) + apply simp + apply simp + done + +lemma (in mdb_swap) n_dest: + "n dest = Some (CTE scap (MDB (if mdbNext src_node = dest then src else mdbNext src_node) (if mdbPrev src_node = dest then src else mdbPrev src_node) (mdbRevocable src_node) (mdbFirstBadged src_node)))" + unfolding n_def n'_def using dest p_0 + apply (simp only: dest2_next dest2_prev) + apply (cases "mdbPrev src_node = dest") + apply (subgoal_tac "dest \ mdbNext src_node") + apply (simp add: modify_map_same modify_map_other) + apply (cases src_node, simp) + apply clarsimp + apply (cases "mdbNext src_node = dest") + apply (simp add: modify_map_same modify_map_other) + apply (cases src_node, simp) + apply (simp add: modify_map_same modify_map_other) + done + +lemma (in mdb_swap) n_dest_prev: + assumes md: "m (mdbPrev dest_node) = Some cte" + shows "\cte'. n (mdbPrev dest_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if dest = mdbNext src_node then mdbNext dest_node else src) + \ mdbPrev (cteMDBNode cte') = + (if (mdbNext src_node = mdbPrev dest_node \ dest = mdbNext src_node) then dest else + mdbPrev (cteMDBNode cte))" +proof - + have nz: "(mdbPrev dest_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "dest = mdbNext src_node" "mdbNext src_node = mdbPrev dest_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + thus ?thesis using nz md + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + next + case pos_neg + + hence "(mdbPrev dest_node) = src" by simp + thus ?thesis using pos_neg md p_0 + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (simp add: modify_map_same modify_map_other del: dest2_parts ) + apply (simp only: next_unfold' dest2_next dest2_prev) + apply (subst if_not_P) + apply simp+ + done + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + qed +qed + +(* Dual of above *) +lemma (in mdb_swap) n_dest_next: + assumes md: "m (mdbNext dest_node) = Some cte" + shows "\cte'. n (mdbNext dest_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if (src = mdbNext dest_node \ mdbNext dest_node = mdbPrev src_node) then dest else mdbNext (cteMDBNode cte)) + \ mdbPrev (cteMDBNode cte') = (if src = mdbNext dest_node then mdbPrev dest_node else src)" +proof - + have nz: "(mdbNext dest_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "src = mdbNext dest_node" "mdbNext dest_node = mdbPrev src_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + hence "(mdbPrev src_node) \ dest" + by - (rule, simp add: next_dest_prev_src_sym) + thus ?thesis using nz md neg_pos + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + next + case pos_neg + hence pd: "mdbPrev src_node = dest" by simp + + have "mdbNext src_node \ dest" + proof + assume a: "mdbNext src_node = dest" + from pd have "mdbPrev src_node \ 0" by simp + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \ dest" using src next_fold a + by auto + finally show False using pd by simp + qed + thus ?thesis using md p_0 pd pos_neg nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (simp add: modify_map_same modify_map_other del: dest2_parts ) + apply (simp only: dest2_next dest2_prev) + apply (subst if_P [OF refl]) + apply simp+ + done + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + qed +qed + +lemma (in mdb_swap) n_src_prev: + assumes md: "m (mdbPrev src_node) = Some cte" + shows "\cte'. n (mdbPrev src_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if src = mdbNext dest_node then mdbNext src_node else dest) + \ mdbPrev (cteMDBNode cte') = + (if (mdbNext dest_node = mdbPrev src_node \ src = mdbNext dest_node) then src else + mdbPrev (cteMDBNode cte))" +proof - + have nz: "(mdbPrev src_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "dest = mdbNext src_node" "mdbNext src_node = mdbPrev dest_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + thus ?thesis using nz md + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + next + case pos_neg + + hence "(mdbPrev dest_node) = src" by simp + thus ?thesis using pos_neg md p_0 + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other del: dest2_parts ) + done + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other) + qed +qed + +(* Dual of above *) +lemma (in mdb_swap) n_src_next: + assumes md: "m (mdbNext src_node) = Some cte" + shows "\cte'. n (mdbNext src_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if (dest = mdbNext src_node \ mdbNext src_node = mdbPrev dest_node) then src else mdbNext (cteMDBNode cte)) + \ mdbPrev (cteMDBNode cte') = (if dest = mdbNext src_node then mdbPrev src_node else dest)" +proof - + have nz: "(mdbNext src_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "src = mdbNext dest_node" "mdbNext dest_node = mdbPrev src_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + hence "(mdbPrev src_node) \ dest" + by - (rule, simp add: next_dest_prev_src_sym) + thus ?thesis using nz md neg_pos + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other) + next + case pos_neg + hence pd: "mdbPrev src_node = dest" by simp + + have "mdbNext src_node \ dest" + proof + assume a: "mdbNext src_node = dest" + from pd have "mdbPrev src_node \ 0" by simp + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \ dest" using src using a next_fold by auto + finally show False using pd by simp + qed + thus ?thesis using md p_0 pd pos_neg nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other del: dest2_parts ) + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other) + qed +qed + +lemma (in mdb_swap) dest2_node_next: + "mdbNext dest2_node = (if dest = mdbPrev src_node then dest else mdbNext dest_node)" + unfolding dest2_node_def + by simp + +lemma (in mdb_swap) dest2_node_prev: + "mdbPrev dest2_node = (if dest = mdbNext src_node then dest else mdbPrev dest_node)" + unfolding dest2_node_def + by simp + +lemma (in mdb_swap) n_other: + assumes other: "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" + shows "n p = m p" + using other + unfolding n_def n'_def + by (simp add: modify_map_other dest2_node_next dest2_node_prev) + +lemma (in mdb_swap) dom_n_m: + "dom n = dom m" + unfolding n_def n'_def by simp + +lemma (in mdb_swap) other_src_next_dest_src: + fixes cte + defines "p \ mdbNext (cteMDBNode cte)" + assumes dest_src: "mdbNext dest_node = src" + and ps: "m (mdbNext src_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + have sn: "m \ src \ mdbNext src_node" .. + also have pn: "m \ mdbNext src_node \ p" using ps + by (simp add: next_unfold' p_def) + finally have sp [intro?]: "m \ src \\<^sup>+ p" . + + have "m \ dest \ mdbNext dest_node" .. + also have "mdbNext dest_node = src" by fact+ + finally have ds [intro?]: "m \ dest \ src" . + + show "p \ mdbPrev src_node" + proof + assume a: "p = mdbPrev src_node" + hence "mdbPrev src_node \ 0" using p0 by simp + hence "m \ mdbPrev src_node \ src" .. + hence "m \ p \ src" using a by simp + thus False using sp by - (drule (1) trancl_into_trancl2, simp) + qed + + show "p \ src" + proof + assume "p = src" + also have "m \ src \ mdbNext src_node" .. + also have "m \ mdbNext src_node \ p" by (rule pn) + finally show False by simp + qed + + show "p \ mdbNext src_node" using pn + by clarsimp + + show "p \ mdbPrev dest_node" + proof + assume a: "p = mdbPrev dest_node" + hence "mdbPrev dest_node \ 0" using p0 by simp + hence "m \ mdbPrev dest_node \ dest" .. + also have "m \ dest \ src" .. + also have "m \ src \\<^sup>+ p" .. + finally show False using a by simp + qed + + show "p \ dest" + proof + assume "p = dest" + also have "m \ dest \ src" .. + also have "m \ src \\<^sup>+ p" .. + finally show False by simp + qed + + show "p \ mdbNext dest_node" + proof + assume "p = mdbNext dest_node" + also have "mdbNext dest_node = src" by fact+ + also have "m \ src \\<^sup>+ p" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) other_src_prev_src_dest: + fixes cte + defines "p \ mdbPrev (cteMDBNode cte)" + assumes src_dest: "mdbNext src_node = dest" + and ps: "m (mdbPrev src_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + note really_annoying_simps [simp del] = word_neq_0_conv + + have pp: "m \ p \ mdbPrev src_node" + using p0 ps unfolding p_def + by (cases cte, simp) (erule (1) prev_leadstoI [OF _ _ dlist]) + also have "mdbPrev src_node \ 0" using ps no_0 + by (rule no_0_neq) + hence "m \ mdbPrev src_node \ src" .. + finally have ps' [intro?]: "m \ p \\<^sup>+ src" . + + from src_dest src have sd [intro?]: "m \ src \ dest" + by (simp add: next_unfold') + + from ps' sd have pd [intro?]: "m \ p \\<^sup>+ dest" .. + + show "p \ mdbPrev src_node" using pp + by clarsimp + + show "p \ src" using ps' by clarsimp + + show "p \ mdbNext src_node" + proof + assume a: "p = mdbNext src_node" + also have "m \ src \ mdbNext src_node" .. + also have "m \ p \\<^sup>+ src" .. + finally show False by simp + qed + + from src_dest have "mdbPrev dest_node = src" by simp + hence "mdbPrev dest_node \ 0" using mdb_ptr_src.p_0 + by (rule ssubst) + thus "p \ mdbPrev dest_node" + unfolding p_def using ps src_dest + by (cases cte, auto simp add: p_prev_qe) + + show "p \ dest" + proof + assume "p = dest" + hence "dest = p" .. + also have "m \ p \\<^sup>+ src" .. + also have "m \ src \ dest" .. + finally show False by simp + qed + + show "p \ mdbNext dest_node" + proof + assume "p = mdbNext dest_node" + also have "m \ dest \ mdbNext dest_node" .. + also have "m \ p \\<^sup>+ src" .. + also have "m \ src \ dest" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) other_dest_next_src_dest: + fixes cte + defines "p \ mdbNext (cteMDBNode cte)" + assumes src_dest: "mdbNext src_node = dest" + and ps: "m (mdbNext dest_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + have sn: "m \ dest \ mdbNext dest_node" .. + also have pn: "m \ mdbNext dest_node \ p" using ps + by (simp add: next_unfold' p_def) + finally have sp [intro?]: "m \ dest \\<^sup>+ p" . + + have "m \ src \ mdbNext src_node" .. + also have "mdbNext src_node = dest" by fact+ + finally have ds [intro?]: "m \ src \ dest" . + + show "p \ mdbPrev dest_node" + proof + assume a: "p = mdbPrev dest_node" + hence "mdbPrev dest_node \ 0" using p0 by simp + hence "m \ mdbPrev dest_node \ dest" .. + hence "m \ p \ dest" using a by simp + thus False using sp by - (drule (1) trancl_into_trancl2, simp) + qed + + show "p \ dest" + proof + assume "p = dest" + also have "m \ dest \ mdbNext dest_node" .. + also have "m \ mdbNext dest_node \ p" by (rule pn) + finally show False by simp + qed + + show "p \ mdbNext dest_node" using pn + by clarsimp + + show "p \ mdbPrev src_node" + proof + assume a: "p = mdbPrev src_node" + hence "mdbPrev src_node \ 0" using p0 by simp + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \ dest" .. + also have "m \ dest \\<^sup>+ p" .. + finally show False using a by simp + qed + + show "p \ src" + proof + assume "p = src" + also have "m \ src \ dest" .. + also have "m \ dest \\<^sup>+ p" .. + finally show False by simp + qed + + show "p \ mdbNext src_node" + proof + assume "p = mdbNext src_node" + also have "mdbNext src_node = dest" by fact+ + also have "m \ dest \\<^sup>+ p" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) other_dest_prev_dest_src: + fixes cte + defines "p \ mdbPrev (cteMDBNode cte)" + assumes dest_src: "mdbNext dest_node = src" + and ps: "m (mdbPrev dest_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + note really_annoying_simps [simp del] = word_neq_0_conv + + have pp: "m \ p \ mdbPrev dest_node" + using p0 ps unfolding p_def + by (cases cte, simp) (erule (1) prev_leadstoI [OF _ _ dlist]) + also have "mdbPrev dest_node \ 0" using ps no_0 + by (rule no_0_neq) + hence "m \ mdbPrev dest_node \ dest" .. + finally have ps' [intro?]: "m \ p \\<^sup>+ dest" . + + from dest_src dest have sd [intro?]: "m \ dest \ src" + by (simp add: next_unfold') + + from ps' sd have pd [intro?]: "m \ p \\<^sup>+ src" .. + + show "p \ mdbPrev dest_node" using pp + by clarsimp + + show "p \ dest" using ps' by clarsimp + + show "p \ mdbNext dest_node" + proof + assume a: "p = mdbNext dest_node" + also have "m \ dest \ mdbNext dest_node" .. + also have "m \ p \\<^sup>+ dest" .. + finally show False by simp + qed + + from dest_src have "mdbPrev src_node = dest" by simp + hence s0: "mdbPrev src_node \ 0" using p_0 + by (rule ssubst) + have sn: "mdbNext src_node \ dest" using dest_src + by (clarsimp simp: s0) + show "p \ mdbPrev src_node" + unfolding p_def using ps dest_src + by (cases cte) (clarsimp simp: mdb_ptr_src.p_prev_qe sn s0) + + show "p \ src" + proof + assume "p = src" + hence "src = p" .. + also have "m \ p \\<^sup>+ dest" .. + also have "m \ dest \ src" .. + finally show False by simp + qed + + show "p \ mdbNext src_node" + proof + assume "p = mdbNext src_node" + also have "m \ src \ mdbNext src_node" .. + also have "m \ p \\<^sup>+ dest" .. + also have "m \ dest \ src" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) swap_ptr_cases [case_names p_src_prev p_src p_src_next p_dest_prev p_dest p_dest_next p_other]: + "\p = mdbPrev src_node \ P; p = src \ P; p = mdbNext src_node \ P; + p = mdbPrev dest_node \ P; p = dest \ P; p = mdbNext dest_node \ P; + \p \ mdbPrev src_node; p \ src; p \ mdbNext src_node; + p \ mdbPrev dest_node; p \ dest; p \ mdbNext dest_node\ \ P\ \ P" + by auto + +lemma (in mdb_swap) prev_not0_into_dom: + assumes np: "n p = Some cte" + and n0: "mdbPrev (cteMDBNode cte) \ 0" + shows "mdbPrev (cteMDBNode cte) \ dom m" +proof - + note p_next_qe_src = mdb_ptr_src.p_next_qe + + note annoying_simps [simp del] + = next_dest_prev_src next_dest_prev_src_sym prev_dest_next_src prev_dest_next_src_sym + + note really_annoying_simps [simp del] = word_neq_0_conv + + from np have "p \ dom n" by (rule domI) + then obtain ctep where mp: "m p = Some ctep" + by (clarsimp simp add: dom_n_m) + + show ?thesis + proof (cases rule: swap_ptr_cases [where p = p]) + case p_src_prev + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_src_prev) + apply (auto simp: elim: dlistEp) + done + next + case p_src + thus ?thesis using mp np n0 src dest + apply (clarsimp simp add: dest2_node_prev) + apply safe + apply simp+ + apply (erule dlistEp, fastforce) + apply simp + done + next + case p_src_next + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_src_next) + apply (auto simp: elim: dlistEp) + done + next + case p_dest_prev + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_dest_prev) + apply (auto elim: dlistEp) + done + next + case p_dest + thus ?thesis using mp np n0 src dest + apply (clarsimp simp: n_dest) + apply (erule dlistEp, fastforce) + apply simp + done + next + case p_dest_next + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_dest_next) + apply (auto simp: elim: dlistEp) + done + next + case p_other + thus ?thesis using mp np n0 src dest + by (auto simp: n_other elim: dlistEp) + qed +qed + +lemma (in mdb_swap) cteSwap_dlist_helper: + shows "valid_dlist n" +proof + fix p cte + assume np: "n p = Some cte" and n0: "mdbPrev (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. n (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + let ?mn = "mdbPrev (cteMDBNode cte)" + + note p_prev_qe_src = mdb_ptr_src.p_prev_qe + + note annoying_simps [simp del] + = next_dest_prev_src next_dest_prev_src_sym prev_dest_next_src prev_dest_next_src_sym + + note really_annoying_simps [simp del] = word_neq_0_conv + + from np have domn: "p \ dom n" by (rule domI) + then obtain ctep where mp: "m p = Some ctep" + by (clarsimp simp add: dom_n_m) + + have dd: "mdbPrev (cteMDBNode cte) \ dom n" + by (subst dom_n_m, rule prev_not0_into_dom) fact+ + then obtain cte' where mmn: "m (mdbPrev (cteMDBNode cte)) = Some cte'" + by (clarsimp simp add: dom_n_m) + + have dest_src_pn: "\mdbPrev src_node \ 0; mdbNext src_node = dest \ + \ mdbNext dest_node \ mdbPrev src_node" + proof (rule not_sym, rule) + assume "mdbPrev src_node = mdbNext dest_node" and "mdbPrev src_node \ 0" + and msd: "mdbNext src_node = dest" + hence "m \ mdbNext dest_node \ src" + by (auto dest!: src_prev intro: next_fold) + also have "m \ src \ dest" using src next_fold msd by auto + also have "m \ dest \ mdbNext dest_node" .. + finally show False by simp + qed + + have src_dest_pn': "\ mdbPrev dest_node \ 0; mdbNext dest_node = src \ + \ mdbNext src_node \ mdbPrev dest_node" + proof (rule not_sym, rule) + assume a: "mdbPrev dest_node = mdbNext src_node" and "mdbPrev dest_node \ 0" + and msd: "mdbNext dest_node = src" + hence a': "mdbPrev dest_node \ 0" by simp + have "m \ src \ mdbPrev dest_node" by (rule next_fold, rule src, simp add: a) + also have "m \ mdbPrev dest_node \ dest" using a' .. + also have "m \ dest \ src" using dest msd + by - (rule next_fold, simp+) + finally show False by simp + qed + + from domn have domm: "p \ dom m" by (simp add: dom_n_m) + with no_0 have p0: "p \ 0" + by (rule dom_into_not0) + + show ?thesis + proof (cases rule: swap_ptr_cases [where p = p]) + case p_src_prev + + hence psrc [intro?]: "m \ p \ src" using p0 + by (clarsimp intro!: src_prev_next) + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = src" using p_src_prev dest src np n0 + using [[hypsubst_thin = true]] + apply clarsimp + apply (drule n_src_prev) + apply (clarsimp simp: dest_src ) + done + thus ?thesis using p_src_prev mmn dest_src + by (simp add: dest2_node_def) + next + case src_dest + + hence "mdbNext dest_node \ mdbPrev src_node" using p_src_prev p0 + by - (rule dest_src_pn, simp) + hence "?mn = mdbPrev (cteMDBNode ctep)" using p_src_prev src np mp p0 dest src_dest + by simp (drule n_src_prev, clarsimp) + thus ?thesis using p_src_prev src_dest mmn n0 mp + apply simp + apply (subst n_other [OF other_src_prev_src_dest]) + apply simp+ + apply (erule dlistEp [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbPrev src_node = mdbNext dest_node") + case True thus ?thesis using p_src_prev mmn other np mp other + by simp (drule n_dest_next, simp add: dest2_node_next split: if_split_asm) + next + let ?mn' = "mdbPrev (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_src_prev src np mp p0 dest other + by simp (drule n_src_prev, clarsimp) + + have mnp: "m \ ?mn' \ p" using mp mnmn n0 dlist + by (cases ctep, auto intro!: prev_leadstoI) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbNext dest_node") + case True + thus ?thesis using mmn p_src_prev superFalse n0 mp + by (simp add: mnmn) (frule n_dest_next, auto elim: dlistEp simp: other [symmetric]) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + + show "?mn' \ mdbPrev dest_node" using mp other p_src_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe) + + show "?mn' \ dest" + proof + assume "?mn' = dest" + hence "mdbNext dest_node = mdbPrev src_node" using mnp dest p_src_prev + by (simp add: next_unfold') + thus False using superFalse by simp + qed + + show "?mn' \ mdbNext dest_node" by fact+ + + show "?mn' \ mdbPrev src_node" using mp other p_src_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe_src) + + show "?mn' \ src" using src mnp p_src_prev p0 + by (clarsimp simp add: next_unfold') + + show "?mn' \ mdbNext src_node" + proof + assume a: "?mn' = mdbNext src_node" + have "m \ ?mn' \ p" using mnp . + also have "m \ p \ src" .. + also have "m \ src \ mdbNext src_node" .. + finally show False using a by simp + qed + qed + thus ?thesis using mnmn mmn mp p_src_prev n0 + by - (erule dlistEp [where p = p], simp+) + qed + qed + qed + next + case p_src + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = dest" using p_src src dest np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src src_dest + by (simp add: n_dest) + next + case dest_src + hence "?mn = mdbPrev dest_node" using p_src src np + by (clarsimp simp: dest2_node_def) + thus ?thesis using p_src mmn dest_src + apply (simp add: n_dest dest2_node_prev) + apply (drule n_dest_prev) + apply clarsimp + done + next + case other + hence "?mn = mdbPrev dest_node" using p_src src np + by (clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn other + by simp (drule n_dest_prev, clarsimp) + qed + next + case p_src_next + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = mdbPrev src_node" using p_src_next src dest np mp + by (clarsimp simp: n_dest) + thus ?thesis using p_src_next mmn src_dest + by simp (drule n_src_prev, clarsimp) + next + case dest_src + hence "?mn = dest" using p_src_next src np mp + by simp (drule n_src_next, simp) + thus ?thesis using p_src_next dest_src + by (simp add: n_dest) + next + case other + hence "?mn = dest" using p_src_next src np mp + by simp (drule n_src_next, simp) + thus ?thesis using p_src_next mmn other + by (simp add: n_dest) + qed + next + case p_dest_prev + + hence pdest [intro?]: "m \ p \ dest" using p0 + by (clarsimp intro!: dest_prev_next) + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = dest" using p_dest_prev src dest np n0 + using [[hypsubst_thin = true]] + apply clarsimp + apply (drule n_dest_prev) + apply (clarsimp simp: src_dest ) + done + thus ?thesis using p_dest_prev mmn src_dest + by (simp add: n_dest) + next + case dest_src + + hence "mdbNext src_node \ mdbPrev dest_node" using p_dest_prev p0 + by - (rule src_dest_pn', simp) + hence "?mn = mdbPrev (cteMDBNode ctep)" using p_dest_prev dest np mp p0 src dest_src + by simp (drule n_dest_prev, clarsimp) + thus ?thesis using p_dest_prev dest_src mmn n0 mp + apply simp + apply (subst n_other [OF other_dest_prev_dest_src]) + apply simp+ + apply (erule dlistEp [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbNext src_node = mdbPrev dest_node") + case True thus ?thesis using p_dest_prev mmn other np mp other + by simp (drule n_dest_prev, simp add: n_dest) + next + let ?mn' = "mdbPrev (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_dest_prev src np mp p0 dest other + by simp (drule n_dest_prev, clarsimp) + + have mnp: "m \ ?mn' \ p" using mp mnmn n0 dlist + by (cases ctep, auto intro!: prev_leadstoI) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbNext src_node") + case True + thus ?thesis using mmn p_dest_prev superFalse n0 mp + by (simp add: mnmn) (frule n_src_next, auto elim: dlistEp simp: other [symmetric]) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + show "?mn' \ mdbPrev src_node" using mp other p_dest_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe_src) + + show "?mn' \ src" + proof + assume "?mn' = src" + hence "mdbNext src_node = mdbPrev dest_node" using mnp src p_dest_prev + by (simp add: next_unfold') + thus False using superFalse by simp + qed + + show "?mn' \ mdbNext src_node" by fact+ + + show "?mn' \ mdbPrev dest_node" using mp other p_dest_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe) + + show "?mn' \ dest" using dest mnp p_dest_prev p0 + by (clarsimp simp add: next_unfold') + + show "?mn' \ mdbNext dest_node" + proof + assume a: "?mn' = mdbNext dest_node" + have "m \ ?mn' \ p" using mnp . + also have "m \ p \ dest" .. + also have "m \ dest \ mdbNext dest_node" .. + finally show False using a by simp + qed + qed + thus ?thesis using mnmn mmn mp p_dest_prev n0 + by - (erule dlistEp [where p = p], simp+) + qed + qed + qed + next + case p_dest + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = src" using p_dest dest src np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest dest_src + by (simp add: dest2_node_next) + next + case src_dest + hence "?mn = mdbPrev src_node" using p_dest dest np + by (clarsimp simp: n_dest) + thus ?thesis using p_dest mmn src_dest + apply (simp add: n_src n_dest) + apply (drule n_src_prev) + apply clarsimp + done + next + case other + hence "?mn = mdbPrev src_node" using p_dest dest np + by (clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn other + by simp (drule n_src_prev, clarsimp) + qed + next + case p_dest_next + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = mdbPrev dest_node" using p_dest_next dest src np mp + by (clarsimp simp: dest2_node_def) + thus ?thesis using p_dest_next mmn dest_src + by simp (drule n_dest_prev, clarsimp) + next + case src_dest + hence "?mn = src" using p_dest_next dest np mp + by simp (drule n_dest_next, simp) + thus ?thesis using p_dest_next src_dest + by (simp add: dest2_node_def) + next + case other + hence "?mn = src" using p_dest_next dest np mp + by simp (drule n_dest_next, simp) + thus ?thesis using p_dest_next mmn other + by (simp add: dest2_node_def) + qed + next + case p_other + hence eq: "n p = m p" by (rule n_other) + hence eq': "cte = ctep" using mp np by simp + + have mns: "?mn \ src" + proof + assume "?mn = src" + hence "p = mdbNext src_node" using mp mmn src eq' n0 + by (auto elim: dlistEp) + thus False using p_other by simp + qed + + have mnsn: "?mn \ mdbPrev src_node" + proof + assume "?mn = mdbPrev src_node" + hence "src = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_prev_qe_src) + thus False using p_other by simp + qed + + have mnd: "?mn \ dest" + proof + assume "?mn = dest" + hence "p = mdbNext dest_node" using mp mmn dest eq' n0 + by (auto elim: dlistEp) + thus False using p_other by simp + qed + + have mndn: "?mn \ mdbPrev dest_node" + proof + assume "?mn = mdbPrev dest_node" + hence "dest = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_prev_qe) + thus False using p_other by simp + qed + + from dd obtain cten where nmn: "n ?mn = Some cten" by auto + + have mnext: "mdbNext (cteMDBNode cte') = p" using mp mmn + by - (erule dlistEp, rule dom_into_not0 [OF no_0], (clarsimp simp: eq')+) + + show ?thesis + proof (cases rule: cases2 [of "?mn = mdbNext src_node" "?mn = mdbNext dest_node"]) + case pos_pos + thus ?thesis using n0 by simp + next + case pos_neg + thus ?thesis using mmn nmn mnd mndn + by simp (drule n_src_next, simp add: mnext eq' next_dest_prev_src_sym) + next + case neg_pos + thus ?thesis using mmn nmn mns mnsn + by simp (drule n_dest_next, simp add: mnext eq' annoying_simps) + next + case neg_neg + thus ?thesis using mmn nmn mns mnsn mnd mndn mnext + by (simp add: n_other) + qed + qed +next + fix p cte + assume np: "n p = Some cte" and n0: "mdbNext (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. n (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + let ?mn = "mdbNext (cteMDBNode cte)" + + note p_next_qe_src = mdb_ptr_src.p_next_qe + + note annoying_simps [simp del] + = next_dest_prev_src next_dest_prev_src_sym prev_dest_next_src prev_dest_next_src_sym + + from np have domn: "p \ dom n" by (rule domI) + then obtain ctep where mp: "m p = Some ctep" + by (clarsimp simp add: dom_n_m) + + from n0 have dd: "mdbNext (cteMDBNode cte) \ dom n" using np + apply - + apply (erule contrapos_pp) + apply (cases cte) + apply (drule ndom_is_0D [OF _ cteSwap_chain no_0_n, where ptr = p]) + apply simp+ + done + + then obtain cte' where mmn: "m (mdbNext (cteMDBNode cte)) = Some cte'" + by (clarsimp simp add: dom_n_m) + + have src_dest_pn: "\mdbNext dest_node \ 0; mdbNext src_node = dest \ + \ mdbPrev src_node \ mdbNext dest_node" + proof + assume "mdbPrev src_node = mdbNext dest_node" and "mdbNext dest_node \ 0" + and msd: "mdbNext src_node = dest" + hence "m \ mdbNext dest_node \ src" + by (auto dest!: src_prev intro: next_fold) + also have "m \ src \ dest" using src using msd next_fold by auto + also have "m \ dest \ mdbNext dest_node" .. + finally show False by simp + qed + + have src_dest_pn': "\ mdbNext src_node \ 0; mdbNext dest_node = src \ + \ mdbPrev dest_node \ mdbNext src_node" + proof + assume a: "mdbPrev dest_node = mdbNext src_node" and "mdbNext src_node \ 0" + and msd: "mdbNext dest_node = src" + hence a': "mdbPrev dest_node \ 0" by simp + have "m \ src \ mdbPrev dest_node" by (rule next_fold, rule src, simp add: a) + also have "m \ mdbPrev dest_node \ dest" using a' .. + also have "m \ dest \ src" using dest msd + by - (rule next_fold, simp+) + finally show False by simp + qed + + from domn have domm: "p \ dom m" by (simp add: dom_n_m) + with no_0 have p0: "p \ 0" + by (rule dom_into_not0) + + from np have npp: "n \ p \ mdbNext (cteMDBNode cte)" + by (simp add: next_fold) + hence swp: "m \ s_d_swp p \ s_d_swp (mdbNext (cteMDBNode cte))" + by (simp add: next_m_n) + + show ?thesis + proof (cases rule: swap_ptr_cases [where p = p]) + case p_src_prev + + hence p0': "mdbPrev src_node \ 0" using p0 by simp + hence stp: "m \ mdbPrev src_node \ src" .. + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = dest" using stp np mp p_src_prev + by (simp add: next_m_n s_d_swap_def next_unfold') (drule n_src_prev, clarsimp) + thus ?thesis using p_src_prev n_dest src_dest + by auto + next + case dest_src + hence "?mn = mdbNext src_node" using stp np mp p_src_prev + by (clarsimp simp add: next_m_n s_d_swap_def next_unfold' n_dest) + thus ?thesis using p_src_prev mmn dest_src + by simp (drule n_src_next, clarsimp) + next + case other + hence "?mn = dest" using stp np mp p_src_prev + by (clarsimp simp add: next_m_n s_d_swap_def next_unfold' annoying_simps + dest!: n_src_prev) + thus ?thesis using p_src_prev other + by (simp add: n_dest) + qed + next + case p_src + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = mdbNext dest_node" using p_src src np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn src_dest + by simp (drule n_dest_next, clarsimp) + next + case dest_src + hence "?mn = dest" using p_src src np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn dest_src + by (simp add: n_dest) + next + case other + hence "?mn = mdbNext dest_node" using p_src src np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn other + by simp (drule n_dest_next, clarsimp) + qed + next + case p_src_next + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = src" using p_src_next dest np + by (cases cte, clarsimp simp: n_dest) + thus ?thesis using p_src_next mmn src_dest + by (simp add: dest2_node_def) + next + case dest_src + + hence "mdbPrev dest_node \ mdbNext src_node" using p_src_next p0 + by - (rule src_dest_pn', simp+) + hence "?mn = mdbNext (cteMDBNode ctep)" using p_src_next src np mp p0 dest dest_src + by simp (drule n_src_next, clarsimp) + thus ?thesis using p_src_next dest_src mmn n0 mp + apply simp + apply (subst n_other [OF other_src_next_dest_src]) + apply simp+ + apply (erule dlistEn [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbNext src_node = mdbPrev dest_node") + case True thus ?thesis using p_src_next mmn other np mp other + by simp (drule n_dest_prev, simp add: dest2_node_prev split: if_split_asm) + next + let ?mn' = "mdbNext (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_src_next src np mp p0 dest other + by simp (drule n_src_next, clarsimp) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbPrev dest_node") + case True + thus ?thesis using mmn p_src_next superFalse n0 mp + by (simp add: mnmn) (frule n_dest_prev, auto elim: dlistEn) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + have "m \ src \ mdbNext src_node" .. + hence sp [intro?]: "m \ src \ p" by (simp add: p_src_next) + also have mmn'[intro?]: "m \ p \ ?mn'" using mp by (simp add: next_unfold') + finally have smn [intro?]: "m \ src \\<^sup>+ ?mn'" . + (* Sigh *) + + show "?mn' \ mdbPrev src_node" + proof + assume a: "?mn' = mdbPrev src_node" + also have "mdbPrev src_node \ 0" using mmn + by - (rule dom_into_not0 [OF no_0 domI], simp add: a [symmetric] mnmn) + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \\<^sup>+ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ src" using smn + by clarsimp + + show "?mn' \ mdbNext src_node" + proof + assume "?mn' = mdbNext src_node" + also have "mdbNext src_node = p" by (simp add: p_src_next) + also have "m \ p \ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ mdbPrev dest_node" by fact+ + show "?mn' \ dest" using src mp p_src_next mnmn swp + by (clarsimp simp add: next_unfold' s_d_swap_def split: if_split_asm) + show "?mn' \ mdbNext dest_node" using mnmn mp p_src_next swp False superFalse other n0 + by (cases ctep, clarsimp simp add: next_unfold' s_d_swap_def dest!: p_next_eq) + qed + thus ?thesis using mnmn mmn mp p_src_next n0 + by - (erule dlistEn [where p = p], simp+) + qed + qed + qed + next + case p_dest_prev + hence p0': "mdbPrev dest_node \ 0" using p0 by simp + hence stp: "m \ mdbPrev dest_node \ dest" .. + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = src" using stp np mp p_dest_prev + by (simp add: next_m_n s_d_swap_def next_unfold') (drule n_dest_prev, clarsimp) + thus ?thesis using p_dest_prev dest_src + by (simp add: n_src dest2_node_prev) + next + case src_dest + hence "?mn = mdbNext dest_node" using stp np mp p_dest_prev + by (simp add: annoying_simps) (drule n_dest_prev, clarsimp) + thus ?thesis using p_dest_prev mmn src_dest + by simp (drule n_dest_next, clarsimp) + next + case other + hence "?mn = src" using stp np mp p_dest_prev + by simp (drule n_dest_prev, simp) + thus ?thesis using p_dest_prev other + by (simp add: n_src dest2_node_prev) + qed + next + case p_dest + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = mdbNext src_node" using p_dest dest src np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn dest_src + by simp (drule n_src_next, clarsimp) + next + case src_dest + hence "?mn = src" using p_dest dest np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn src_dest + by (simp add: n_src dest2_node_prev) + next + case other + hence "?mn = mdbNext src_node" using p_dest dest np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn other + by simp (drule n_src_next, clarsimp) + qed + next + case p_dest_next + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = dest" using p_dest_next src np + by (cases cte, clarsimp simp: n_src dest2_node_def) + thus ?thesis using p_dest_next mmn dest_src + by (simp add: dest2_node_def n_dest) + next + case src_dest + + hence "mdbPrev src_node \ mdbNext dest_node" using p_dest_next p0 + by - (rule src_dest_pn, simp+) + hence "?mn = mdbNext (cteMDBNode ctep)" using p_dest_next dest np mp p0 src src_dest + by simp (drule n_dest_next, clarsimp) + thus ?thesis using p_dest_next src_dest mmn n0 mp + apply simp + apply (subst n_other [OF other_dest_next_src_dest]) + apply simp+ + apply (erule dlistEn [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbNext dest_node = mdbPrev src_node") + case True thus ?thesis using p_dest_next mmn other np mp other + by simp (drule n_src_prev, simp add: dest2_node_prev n_dest ) + next + let ?mn' = "mdbNext (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_dest_next src np mp p0 dest other + by simp (drule n_dest_next, clarsimp) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbPrev src_node") + case True + thus ?thesis using mmn p_dest_next superFalse n0 mp + by (simp add: mnmn) (frule n_src_prev, auto elim: dlistEn) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + have "m \ dest \ mdbNext dest_node" .. + hence sp [intro?]: "m \ dest \ p" by (simp add: p_dest_next) + also have mmn'[intro?]: "m \ p \ ?mn'" using mp by (simp add: next_unfold') + finally have smn [intro?]: "m \ dest \\<^sup>+ ?mn'" . + (* Sigh *) + + show "?mn' \ mdbPrev dest_node" + proof + assume a: "?mn' = mdbPrev dest_node" + also have "mdbPrev dest_node \ 0" using mmn + by - (rule dom_into_not0 [OF no_0 domI], simp add: a [symmetric] mnmn) + hence "m \ mdbPrev dest_node \ dest" .. + also have "m \ dest \\<^sup>+ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ dest" using smn + by clarsimp + + show "?mn' \ mdbNext dest_node" + proof + assume "?mn' = mdbNext dest_node" + also have "mdbNext dest_node = p" by (simp add: p_dest_next) + also have "m \ p \ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ mdbPrev src_node" by fact+ + show "?mn' \ src" using dest mp p_dest_next mnmn swp + by (clarsimp simp add: next_unfold' s_d_swap_def split: if_split_asm) + show "?mn' \ mdbNext src_node" using mnmn mp p_dest_next swp False superFalse other n0 + by (cases ctep, clarsimp simp add: next_unfold' s_d_swap_def + dest!: p_next_qe_src) + qed + thus ?thesis using mnmn mmn mp p_dest_next n0 + by - (erule dlistEn [where p = p], simp+) + qed + qed + qed + next + case p_other + hence eq: "n p = m p" by (rule n_other) + hence eq': "cte = ctep" using mp np by simp + + have mns: "?mn \ src" + proof + assume "?mn = src" + hence "p = mdbPrev src_node" using mp mmn src eq' n0 + by (auto elim: dlistEn) + thus False using p_other by simp + qed + + have mnsn: "?mn \ mdbNext src_node" + proof + assume "?mn = mdbNext src_node" + hence "src = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_next_qe_src) + thus False using p_other by simp + qed + + have mnd: "?mn \ dest" + proof + assume "?mn = dest" + hence "p = mdbPrev dest_node" using mp mmn dest eq' n0 + by (auto elim: dlistEn) + thus False using p_other by simp + qed + + have mndn: "?mn \ mdbNext dest_node" + proof + assume "?mn = mdbNext dest_node" + hence "dest = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_next_qe) + thus False using p_other by simp + qed + + from dd obtain cten where nmn: "n ?mn = Some cten" by auto + + have mprev: "mdbPrev (cteMDBNode cte') = p" using mp mmn + by - (erule dlistEn, rule dom_into_not0 [OF no_0], (clarsimp simp: eq')+) + + show ?thesis + proof (cases rule: cases2 [of "?mn = mdbPrev src_node" "?mn = mdbPrev dest_node"]) + case pos_pos + thus ?thesis using n0 by simp + next + case pos_neg + thus ?thesis using mmn nmn mnd mndn + by simp (drule n_src_prev, simp add: mprev eq' next_dest_prev_src_sym) + next + case neg_pos + thus ?thesis using mmn nmn mns mnsn + by simp (drule n_dest_prev, simp add: mprev eq' annoying_simps) + next + case neg_neg + thus ?thesis using mmn nmn mns mnsn mnd mndn mprev + by (simp add: n_other) + qed + qed +qed + +lemma sameRegionAs_eq_child: + "\ sameRegionAs cap c; weak_derived' c c' \ + \ sameRegionAs cap c'" + by (clarsimp simp: weak_derived'_def sameRegionAs_def2) + +lemma sameRegionAs_eq_parent: + "\ sameRegionAs c cap; weak_derived' c c' \ + \ sameRegionAs c' cap" + by (clarsimp simp: weak_derived'_def sameRegionAs_def2) + +context mdb_swap +begin + +lemma sameRegionAs_dcap_parent: + "sameRegionAs dcap cap = sameRegionAs dest_cap cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_parent, rule weak_derived_sym', rule dest_derived) + apply (erule sameRegionAs_eq_parent, rule dest_derived) + done + +lemma sameRegionAs_dcap_child: + "sameRegionAs cap dcap = sameRegionAs cap dest_cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_child, rule weak_derived_sym', rule dest_derived) + apply (erule sameRegionAs_eq_child, rule dest_derived) + done + +lemma sameRegionAs_scap_parent: + "sameRegionAs scap cap = sameRegionAs src_cap cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_parent, rule weak_derived_sym', rule src_derived) + apply (erule sameRegionAs_eq_parent, rule src_derived) + done + +lemma sameRegionAs_scap_child: + "sameRegionAs cap scap = sameRegionAs cap src_cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_child, rule weak_derived_sym', rule src_derived) + apply (erule sameRegionAs_eq_child, rule src_derived) + done + +lemmas region_simps = + sameRegionAs_scap_child sameRegionAs_scap_parent + sameRegionAs_dcap_child sameRegionAs_dcap_parent + +lemma master_srcI: + "\ \cap. F (capMasterCap cap) = F cap \ + \ F scap = F src_cap" + using src_derived + by (clarsimp simp: weak_derived'_def elim!: master_eqI) + +lemma isEPsrc: + "isEndpointCap scap = isEndpointCap src_cap" + by (rule master_srcI, rule isCap_Master) + +lemma isEPbadge_src: + "isEndpointCap src_cap \ capEPBadge scap = capEPBadge src_cap" + using src_derived + by (clarsimp simp: isCap_simps weak_derived'_def) + +lemma isNTFNsrc: + "isNotificationCap scap = isNotificationCap src_cap" + by (rule master_srcI, rule isCap_Master) + +lemma isNTFNbadge_src: + "isNotificationCap src_cap \ capNtfnBadge scap = capNtfnBadge src_cap" + using src_derived + by (clarsimp simp: isCap_simps weak_derived'_def) + +lemma isEPdest: + "isEndpointCap dcap = isEndpointCap dest_cap" + using dest_derived by (fastforce simp: isCap_simps weak_derived'_def) + +lemma isEPbadge_dest: + "isEndpointCap dest_cap \ capEPBadge dcap = capEPBadge dest_cap" + using dest_derived by (auto simp: weak_derived'_def isCap_simps) + +lemma isNTFNdest: + "isNotificationCap dcap = isNotificationCap dest_cap" + using dest_derived by (auto simp: weak_derived'_def isCap_simps) + +lemma isNTFNbadge_dest: + "isNotificationCap dest_cap \ capNtfnBadge dcap = capNtfnBadge dest_cap" + using dest_derived by (auto simp: weak_derived'_def isCap_simps) + +lemmas ep_simps = + isEPsrc isEPbadge_src isNTFNsrc isNTFNbadge_src + isEPdest isEPbadge_dest isNTFNdest isNTFNbadge_dest + +end + +lemma sameRegion_ep: + "\ sameRegionAs cap cap'; isEndpointCap cap \ \ isEndpointCap cap'" + by (auto simp: isCap_simps sameRegionAs_def3) + +lemma sameRegion_ntfn: + "\ sameRegionAs cap cap'; isNotificationCap cap \ \ isNotificationCap cap'" + by (auto simp: isCap_simps sameRegionAs_def3) + +lemma (in mdb_swap) cteSwap_valid_badges: + "valid_badges n" +proof - + from valid + have "valid_badges m" .. + thus ?thesis using src dest + apply (clarsimp simp add: valid_badges_def next_m_n2) + apply (frule_tac p=p in n_cap) + apply (frule_tac p=p' in n_cap) + apply (drule badge_n)+ + apply (clarsimp simp: s_d_swap_def sameRegion_ntfn sameRegion_ep + ep_simps region_simps + split: if_split_asm) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + done +qed + +lemma (in mdb_swap) m_trancl: + assumes "m \ p \\<^sup>+ p'" + shows "n \ s_d_swp p \\<^sup>+ s_d_swp p'" + using assms +proof induct + case (base x) + thus ?case by (fastforce simp: next_m_n) +next + case (step x y) + thus ?case by (fastforce simp: next_m_n elim: trancl_trans) +qed + +lemma (in mdb_swap) n_trancl: + "n \ p \\<^sup>+ p' = m \ s_d_swp p \\<^sup>+ s_d_swp p'" +proof + assume "n \ p \\<^sup>+ p'" + thus "m \ s_d_swp p \\<^sup>+ s_d_swp p'" + by induct (auto simp: next_m_n2 elim!: trancl_trans) +next + assume "m \ s_d_swp p \\<^sup>+ s_d_swp p'" + thus "n \ p \\<^sup>+ p'" + by (fastforce dest: m_trancl) +qed + +lemma (in mdb_swap) n_rtrancl: + "n \ p \\<^sup>* p' = m \ s_d_swp p \\<^sup>* s_d_swp p'" + by (simp add: rtrancl_eq_or_trancl n_trancl) + +lemma (in mdb_swap) n_cap_eq': + "(\n'. n p = Some (CTE cap n')) = + (if p = src + then cap = dcap + else if p = dest + then cap = scap + else \n'. m p = Some (CTE cap n'))" + using src dest + apply simp + apply (rule conjI, clarsimp) + apply (rule iffI) + apply (fastforce dest: n_cap) + subgoal by (simp add: n_def modify_map_if dest2_node_def n'_def, auto) + apply clarsimp + apply (rule conjI, fastforce) + apply clarsimp + apply (rule iffI) + apply (fastforce dest: n_cap) + apply (simp add: n_def modify_map_cases n'_def) + apply (simp add: dest2_node_def) + apply auto[1] + apply (cases "mdbNext dest_node = 0") + apply (cases "mdbNext src_node = 0") + apply simp + apply simp + apply (cases "mdbPrev dest_node = mdbNext src_node") + apply simp + apply simp + apply simp + apply (cases "mdbPrev dest_node = mdbNext src_node") + apply simp + apply simp + apply (cases "mdbNext dest_node = p") + apply simp + apply fastforce + apply simp + apply (cases "mdbPrev dest_node = p") + apply simp + apply simp + apply (cases "mdbNext dest_node = p") + apply simp + apply (cases "mdbPrev dest_node = p") + apply simp + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + apply simp + apply (cases "mdbPrev dest_node = p", simp) + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + apply (cases "mdbNext dest_node = p") + apply simp + apply (cases "mdbPrev dest_node = p") + apply simp + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + apply simp + apply (cases "mdbPrev dest_node = p", simp) + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + done + +lemma (in mdb_swap) n_cap_eq: + "(\n'. n p = Some (CTE cap n')) = + (\n'. if p = src then m (s_d_swp p) = Some (CTE dest_cap n') \ cap = dcap + else if p = dest then m (s_d_swp p) = Some (CTE src_cap n') \ cap = scap + else m (s_d_swp p) = Some (CTE cap n'))" + apply (simp add: s_d_swp_def n_cap_eq' src dest) + apply (auto simp: s_d_swap_def) + done + +lemma (in mdb_swap) cteSwap_chunked: + "mdb_chunked n" +proof - + from valid + have "mdb_chunked m" .. + thus ?thesis + apply (clarsimp simp add: mdb_chunked_def is_chunk_def n_trancl n_rtrancl n_cap_eq) + apply (case_tac "p = dest") + apply simp + apply (case_tac "p' = src") + apply (clarsimp simp add: region_simps) + apply (erule_tac x=src in allE) + apply (erule_tac x=dest in allE) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (case_tac "p'' = dest", simp) + apply simp + apply (case_tac "p'' = src") + apply (clarsimp simp: region_simps) + apply simp + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply simp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply clarsimp + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (case_tac "p'' = dest") + apply (clarsimp simp: region_simps) + apply simp + apply (case_tac "p'' = src", simp) + apply simp + apply (clarsimp simp: region_simps) + apply (erule_tac x=src in allE) + apply clarsimp + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply clarsimp + apply (erule impE) + apply (clarsimp simp: s_d_swap_def) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest", simp) + apply clarsimp + apply (case_tac "p''=src") + apply (clarsimp simp: dest) + apply (clarsimp simp: region_simps) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply clarsimp + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply clarsimp + apply (case_tac "p''=dest") + apply (clarsimp simp: region_simps) + apply (erule_tac x=src in allE) + apply clarsimp + apply clarsimp + apply (case_tac "p''=src") + apply (simp add: dest region_simps) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply simp + apply clarsimp + apply (case_tac "p'=dest") + apply clarsimp + apply (case_tac "p=src") + apply (clarsimp simp: region_simps) + apply (erule_tac x=dest in allE) + apply (erule_tac x=src in allE) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply simp + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x="dest" in allE) + apply simp + apply simp + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x="src" in allE) + apply (clarsimp simp: region_simps) + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply (case_tac "p''=src") + apply (simp add: region_simps dest) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply (case_tac "p''=src") + apply (simp add: region_simps dest) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply simp + apply clarsimp + apply (case_tac "p'=src") + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: region_simps) + apply (erule impE) + apply (clarsimp simp: s_d_swap_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply clarsimp + apply clarsimp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply clarsimp + apply clarsimp + apply (case_tac "p=src") + apply clarsimp + apply (erule_tac x="dest" in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply (clarsimp simp: region_simps) + apply (erule impE) + apply (clarsimp simp: s_d_swap_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x=dest in allE) + apply (simp add: dest) + apply simp + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x=dest in allE) + apply (simp add: dest) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x=dest in allE) + apply (simp add: dest) + apply simp + done +qed + +(* FIXME: make this a locale from the start *) +locale weak_der' = + fixes old new + assumes derived: "weak_derived' new old" +begin + +lemma isUntyped_new: + "isUntypedCap new = isUntypedCap old" + using derived by (auto simp: weak_derived'_def isCap_simps) + +lemma capRange_new: + "capRange new = capRange old" + using derived + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule capRange_Master) + apply simp + done + +lemma untypedRange_new: + "untypedRange new = untypedRange old" + using derived + apply (clarsimp simp add: weak_derived'_def) + apply (rule master_eqI, rule untypedRange_Master) + apply simp + done + +lemmas range_simps [simp] = + isUntyped_new capRange_new untypedRange_new + +lemma isReplyMaster_eq: + "(isReplyCap new \ capReplyMaster new) + = (isReplyCap old \ capReplyMaster old)" + using derived + by (fastforce simp: weak_derived'_def isCap_simps) + +end + +lemma master_eqE: + "\ capMasterCap cap = capMasterCap cap'; + \cap. F (capMasterCap cap) = F cap \ + \ F cap = F cap'" + by (rule master_eqI, assumption, simp) + +lemma weak_derived_Null' [simp]: + "weak_derived' cap NullCap = (cap = NullCap)" + by (auto simp add: weak_derived'_def) + +lemma Null_weak_derived_Null' [simp]: + "weak_derived' NullCap cap = (cap = NullCap)" + by (auto simp add: weak_derived'_def) + + + +lemma distinct_zombies_switchE: + "\ distinct_zombies m; m x = Some old_x; m y = Some old_y; + capMasterCap (cteCap old_x) = capMasterCap (cteCap new_y); + capMasterCap (cteCap old_y) = capMasterCap (cteCap new_x) \ + \ distinct_zombies (m(x \ new_x, y \ new_y))" + apply (cases "x = y") + apply clarsimp + apply (erule(1) distinct_zombies_sameMasterE) + apply simp + apply (drule_tac F="\cap. (isUntypedCap cap, isZombie cap, isArchFrameCap cap, + capClass cap, capUntypedPtr cap, capBits cap)" + in master_eqE, + simp add: isCap_Master capClass_Master capUntyped_Master capBits_Master)+ + apply (simp add: distinct_zombies_def distinct_zombie_caps_def + split del: if_split) + apply (intro allI) + apply (drule_tac x="(id (x := y, y := x)) ptr" in spec) + apply (drule_tac x="(id (x := y, y := x)) ptr'" in spec) + apply (clarsimp split del: if_split) + apply (clarsimp simp: isCap_Master + capBits_Master + capClass_Master + capUntyped_Master + split: if_split_asm ) + done + +context mdb_swap +begin + +lemma weak_der_src: + "weak_der' src_cap scap" + apply unfold_locales + apply (rule weak_derived_sym') + apply (rule src_derived) + done + +lemma weak_der_dest: + "weak_der' dest_cap dcap" + apply unfold_locales + apply (rule weak_derived_sym') + apply (rule dest_derived) + done + +lemmas src_range_simps [simp] = weak_der'.range_simps [OF weak_der_src] +lemmas dest_range_simps [simp] = weak_der'.range_simps [OF weak_der_dest] + +lemma caps_contained: + "caps_contained' n" + using valid + apply (clarsimp simp: valid_mdb_ctes_def caps_contained'_def) + apply (drule n_cap)+ + apply (simp split: if_split_asm) + apply (clarsimp dest!: capRange_untyped) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply (clarsimp dest!: capRange_untyped) + apply fastforce + apply fastforce + apply fastforce + done + +lemma untyped_mdb_n: + "untyped_mdb' n" + using untyped_mdb + apply (simp add: n_cap_eq untyped_mdb'_def descendants_of'_def parency) + apply clarsimp + apply (case_tac "p=dest") + apply clarsimp + apply (case_tac "p'=dest", simp) + apply (case_tac "p'=src", simp) + apply clarsimp + apply clarsimp + apply (case_tac "p'=dest") + apply clarsimp + apply (case_tac "p=src", simp) + apply clarsimp + apply clarsimp + apply (case_tac "p=src") + apply clarsimp + apply (case_tac "p'=src",simp) + apply clarsimp + apply clarsimp + apply (case_tac "p'=src",simp) + apply clarsimp + done + + +lemma untyped_inc_n: + assumes untyped_eq: "isUntypedCap src_cap \ scap = src_cap" + "isUntypedCap dest_cap \ dcap = dest_cap" + shows "untyped_inc' n" + using untyped_inc + apply (simp add: n_cap_eq untyped_inc'_def descendants_of'_def parency) + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply (case_tac "p=dest") + apply simp + apply (case_tac "p'=src", simp) + apply (clarsimp simp:untyped_eq) + apply (case_tac "p'=dest", simp) + apply (clarsimp simp: s_d_swap_def untyped_eq) + apply clarsimp + apply (case_tac "p=src") + apply clarsimp + apply (case_tac "p'=dest", simp) + apply (clarsimp simp:untyped_eq) + apply (case_tac "p'=src", simp) + apply (clarsimp simp:untyped_eq) + apply clarsimp + apply (case_tac "p'=src") + apply (clarsimp simp:untyped_eq) + apply simp + apply (case_tac "p'=dest", clarsimp simp:untyped_eq) + apply (clarsimp simp:untyped_eq) + done + +lemma n_next: + "n p = Some cte \ \z. m (s_d_swp p) = Some z \ s_d_swp (mdbNext (cteMDBNode cte)) = mdbNext (cteMDBNode z)" + apply (drule conjI [THEN exI [THEN next_m_n2 [THEN iffD1, unfolded mdb_next_unfold]]]) + apply (rule refl) + apply assumption + done + +lemma n_prevD: + notes if_cong [cong] option.case_cong [cong] + shows "n \ p \ p' \ m \ s_d_swp p \ s_d_swp p'" + apply (cases "p'=0") + apply (simp add: mdb_prev_def) + apply (cases "p=0") + apply (clarsimp simp: mdb_prev_def s_d_swap_def) + apply (rule conjI) + apply clarsimp + apply (simp add: n_dest) + apply (case_tac z) + apply (clarsimp simp: src split: if_split_asm) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: dest) + apply (simp add: dest2_node_def split: if_split_asm) + apply clarsimp + apply (case_tac z) + apply clarsimp + apply (simp add: n_def n'_def modify_map_if dest2_node_def) + apply (insert src dest)[1] + apply (clarsimp split: if_split_asm) + apply (simp add: Invariants_H.valid_dlist_prevD [OF cteSwap_dlist_helper, symmetric]) + apply (simp add: Invariants_H.valid_dlist_prevD [OF dlist, symmetric] next_m_n2) + done + +lemma n_prev: + "n p = Some cte \ \z. m (s_d_swp p) = Some z \ s_d_swp (mdbPrev (cteMDBNode cte)) = mdbPrev (cteMDBNode z)" + apply (drule conjI [THEN exI [THEN n_prevD [unfolded mdb_prev_def]]]) + apply (rule refl) + apply assumption + done + +lemma nullcaps_n: "valid_nullcaps n" +proof - + from valid have "valid_nullcaps m" .. + thus ?thesis using dest_derived src_derived + apply (clarsimp simp: valid_nullcaps_def) + apply (frule n_cap) + apply (frule revokable) + apply (frule badge_n) + apply (frule n_prev) + apply (drule n_next) + apply (insert src dest) + apply (frule_tac x=src in spec) + apply (frule_tac x=dest in spec) + apply (erule_tac x=p in allE) + apply simp + apply (case_tac n) + apply (clarsimp simp: s_d_swap_def nullMDBNode_def AARCH64_H.nullPointer_def split: if_split_asm) + done +qed + +lemma ut_rev_n: "ut_revocable' n" +proof - + from valid have "ut_revocable' m" .. + thus ?thesis using dest_derived src_derived src dest + apply (clarsimp simp: ut_revocable'_def) + + apply (frule n_cap) + apply (frule revokable) + by (auto simp: weak_derived'_def dest2_node_def + split: if_split_asm) +qed + +lemma scap_class[simp]: + "capClass scap = capClass src_cap" + using src_derived + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule capClass_Master) + apply simp + done + +lemma dcap_class[simp]: + "capClass dcap = capClass dest_cap" + using dest_derived + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule capClass_Master) + apply simp + done + +lemma class_links_n: "class_links n" +proof - + from valid have "class_links m" + by (simp add: valid_mdb_ctes_def) + thus ?thesis + apply (clarsimp simp: class_links_def) + apply (case_tac cte, case_tac cte', clarsimp) + apply (drule n_cap)+ + apply (simp add: imp_conjL[symmetric]) + apply (subst(asm) conj_commute) + apply (simp add: imp_conjL) + apply (simp add: imp_conjL[symmetric]) + apply (subst(asm) conj_commute) + apply (simp add: imp_conjL next_m_n2) + apply (elim allE, drule(1) mp) + apply (auto simp: s_d_swap_def src dest + split: if_split_asm) + done +qed + +lemma irq_control_n: "irq_control n" + using src dest dest_derived src_derived + apply (clarsimp simp: irq_control_def) + apply (frule revokable) + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (clarsimp simp: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (clarsimp simp: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (drule (1) irq_controlD, rule irq_control) + apply clarsimp + apply (drule (1) irq_controlD, rule irq_control) + apply clarsimp + done + +lemma distinct_zombies_m: + "distinct_zombies m" + using valid by auto + +lemma distinct_zombies_n: + "distinct_zombies n" + using distinct_zombies_m + apply (simp add: n_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: modify_map_apply src dest) + apply (erule distinct_zombies_switchE, rule src, rule dest) + apply (cut_tac weak_der_src) + apply (clarsimp simp: weak_der'_def weak_derived'_def) + apply (cut_tac weak_der_dest) + apply (clarsimp simp: weak_der'_def weak_derived'_def) + done + +lemma reply_masters_rvk_fb_m: + "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n: + "reply_masters_rvk_fb n" + using reply_masters_rvk_fb_m + weak_der'.isReplyMaster_eq[OF weak_der_src] + weak_der'.isReplyMaster_eq[OF weak_der_dest] + apply (simp add: reply_masters_rvk_fb_def) + apply (frule bspec, rule ranI, rule m_p) + apply (frule bspec, rule ranI, rule mdb_ptr_src.m_p) + apply (clarsimp simp: ball_ran_eq) + apply (case_tac cte, clarsimp) + apply (frule n_cap, frule revokable, frule badge_n) + apply (simp split: if_split_asm) + apply clarsimp + apply (elim allE, drule(1) mp) + apply simp + done + +lemma cteSwap_valid_mdb_helper: + assumes untyped_eq: "isUntypedCap src_cap \ scap = src_cap" + "isUntypedCap dest_cap \ dcap = dest_cap" + shows "valid_mdb_ctes n" + using cteSwap_chain cteSwap_dlist_helper cteSwap_valid_badges + cteSwap_chunked caps_contained untyped_mdb_n untyped_inc_n + nullcaps_n ut_rev_n class_links_n irq_control_n + distinct_zombies_n reply_masters_rvk_fb_n + by (auto simp:untyped_eq) + +end + +lemma cteSwap_ifunsafe'[wp]: + "\if_unsafe_then_cap' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2 + and cte_wp_at' (\cte. cte_refs' (cteCap cte) = cte_refs' c) c1 + and cte_wp_at' (\cte. cte_refs' (cteCap cte) = cte_refs' c') c2\ + cteSwap c c1 c' c2 + \\rv. if_unsafe_then_cap'\" + apply (simp add: ifunsafe'_def3 cteSwap_def) + apply (wp | simp add: o_def | rule getCTE_wp)+ + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (subgoal_tac "ex_cte_cap_to' cref s") + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="(id (c1 := c2, c2 := c1)) crefc" in exI) + apply (clarsimp simp: modify_map_def) + apply fastforce + apply (clarsimp dest!: modify_map_K_D + split: if_split_asm) + apply (drule_tac x=cref in spec) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply fastforce + done + +lemma cteSwap_iflive'[wp]: + "\if_live_then_nonz_cap' + and cte_wp_at' (\cte. zobj_refs' (cteCap cte) = zobj_refs' c) c1 + and cte_wp_at' (\cte. zobj_refs' (cteCap cte) = zobj_refs' c') c2\ + cteSwap c c1 c' c2 + \\rv. if_live_then_nonz_cap'\" + apply (simp add: cteSwap_def) + apply (wp | simp)+ + apply (rule hoare_post_imp, + simp only: if_live_then_nonz_cap'_def imp_conv_disj + ex_nonz_cap_to'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) if_live_then_nonz_capE') + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="(id (c1 := c2, c2 := c1)) cref" in exI) + apply auto + done + +lemmas tcbSlots = + tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def tcbIPCBufferSlot_def + +lemma cteSwap_valid_pspace'[wp]: + "\valid_pspace' and + cte_wp_at' (weak_derived' c o cteCap) c1 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c) c1 and + cte_wp_at' (weak_derived' c' o cteCap) c2 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c') c2 and + valid_cap' c and valid_cap' c' and + K (c1 \ c2)\ + cteSwap c c1 c' c2 + \\rv. valid_pspace'\" + unfolding cteSwap_def + apply (simp add: pred_conj_def valid_pspace'_def valid_mdb'_def) + apply (rule hoare_pre) + apply wp + apply (wp getCTE_inv getCTE_wp) + apply (strengthen imp_consequent, strengthen ctes_of_strng) + apply ((wp sch_act_wf_lift valid_queues_lift + cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp + hoare_vcg_ex_lift getCTE_wp + | simp add: cte_wp_at_ctes_ofI o_def + | rule hoare_drop_imps)+)[6] + apply (clarsimp simp: valid_pspace_no_0[unfolded valid_pspace'_def valid_mdb'_def] + cte_wp_at_ctes_of) + apply (subgoal_tac "c2 \ dom (modify_map + (modify_map + (modify_map + (modify_map (ctes_of s) c1 (cteCap_update (%_. c'))) c2 + (cteCap_update (%_. c))) + (mdbPrev (cteMDBNode cte)) + (cteMDBNode_update (mdbNext_update (%_. c2)))) + (mdbNext (cteMDBNode cte)) + (cteMDBNode_update (mdbPrev_update (%_. c2))))") + apply (erule domE) + apply (intro exI) + apply (rule conjI) + apply (clarsimp simp: modify_map_def cte_wp_at_ctes_of) + apply (rule refl) + apply (case_tac cte) + apply (case_tac cteb) + apply (rule_tac dest_node = "cteMDBNode cteb" in + mdb_swap.cteSwap_valid_mdb_helper [simplified const_def]) + apply (rule mdb_swap.intro) + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (rule mdb_ptr_axioms.intro) + apply simp + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (rule mdb_ptr_axioms.intro) + apply (simp add: cte_wp_at_ctes_of) + apply (erule mdb_swap_axioms.intro) + apply clarsimp + apply (erule weak_derived_sym') + apply clarsimp + apply (erule weak_derived_sym') + apply (simp) + apply clarsimp+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunch tcb_at [wp]: cteSwap "tcb_at' t" +crunch sch [wp]: cteSwap "\s. P (ksSchedulerAction s)" +crunch inQ [wp]: cteSwap "obj_at' (inQ d p) tcb" +crunch ksQ [wp]: cteSwap "\s. P (ksReadyQueues s)" +crunch sym [wp]: cteSwap "\s. sym_refs (state_refs_of' s)" +crunch sym_hyp [wp]: cteSwap "\s. sym_refs (state_hyp_refs_of' s)" +crunch cur [wp]: cteSwap "\s. P (ksCurThread s)" +crunch ksCurDomain [wp]: cteSwap "\s. P (ksCurDomain s)" +crunch ksDomSchedule [wp]: cteSwap "\s. P (ksDomSchedule s)" +crunch it [wp]: cteSwap "\s. P (ksIdleThread s)" +crunch tcbDomain_obj_at'[wp]: cteSwap "obj_at' (\tcb. x = tcbDomain tcb) t" + +lemma cteSwap_idle'[wp]: + "\valid_idle'\ + cteSwap c c1 c' c2 + \\rv s. valid_idle' s\" + apply (simp add: cteSwap_def) + apply (wp updateCap_idle' | simp)+ + done + +lemma weak_derived_zobj: + "weak_derived' c c' \ zobj_refs' c' = zobj_refs' c" + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule zobj_refs_Master) + apply simp + done + +lemma weak_derived_cte_refs: + "weak_derived' c c' \ cte_refs' c' = cte_refs' c" + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule cte_refs_Master) + apply simp + done + +lemma weak_derived_capRange_capBits: + "weak_derived' c c' \ capRange c' = capRange c \ capBits c' = capBits c" + apply (clarsimp simp: weak_derived'_def) + apply (metis capRange_Master capBits_Master) + done + +lemma cteSwap_refs[wp]: + "\valid_global_refs' and cte_wp_at' (weak_derived' c \ cteCap) c1 + and cte_wp_at' (weak_derived' c' \ cteCap) c2\ + cteSwap c c1 c' c2 + \\rv. valid_global_refs'\" + apply (simp add: cteSwap_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) valid_global_refsD_with_objSize)+ + apply (drule weak_derived_capRange_capBits)+ + apply (clarsimp simp: global_refs'_def Int_Un_distrib2) + done + +crunch ksInterrupt[wp]: cteSwap "\s. P (ksInterruptState s)" + +crunch ksArch[wp]: cteSwap "\s. P (ksArchState s)" + +crunch typ_at'[wp]: cteSwap "\s. P (typ_at' T p s)" + +lemma cteSwap_valid_irq_handlers[wp]: + "\valid_irq_handlers' and cte_wp_at' (weak_derived' c \ cteCap) c1 + and cte_wp_at' (weak_derived' c' \ cteCap) c2\ + cteSwap c c1 c' c2 + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF cteSwap_ksInterrupt]) + apply (simp add: cteSwap_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def ran_def) + apply (clarsimp simp add: modify_map_def split: if_split_asm) + apply (auto simp add: weak_derived'_def isCap_simps) + done + +lemma weak_derived_untypedZeroRange: + "\ weak_derived' c c'; isUntypedCap c' \ c' = c \ + \ untypedZeroRange c = untypedZeroRange c'" + apply (clarsimp simp: untypedZeroRange_def isCap_simps) + apply (clarsimp simp: weak_derived'_def) + done + +lemma cteSwap_urz[wp]: + "\untyped_ranges_zero' and valid_pspace' + and cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c) c1 + and cte_wp_at' (weak_derived' c' o cteCap) c2 + and cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c') c2 + and cte_wp_at' (weak_derived' c \ cteCap) c1 + and K (c1 \ c2)\ + cteSwap c c1 c' c2 + \\rv. untyped_ranges_zero'\" + apply (simp add: cteSwap_def) + apply (rule hoare_pre) + apply (rule untyped_ranges_zero_lift) + apply wp+ + apply clarsimp + apply (erule untyped_ranges_zero_delta[where xs="[c1, c2]"]) + apply (simp add: modify_map_def) + apply clarsimp + apply clarsimp + apply (clarsimp simp: ran_restrict_map_insert cte_wp_at_ctes_of + cteCaps_of_def modify_map_def) + apply (drule(1) weak_derived_untypedZeroRange)+ + apply auto + done + +crunch valid_arch_state'[wp]: cteSwap "valid_arch_state'" + +crunch irq_states'[wp]: cteSwap "valid_irq_states'" + +crunch ksqsL1[wp]: cteSwap "\s. P (ksReadyQueuesL1Bitmap s)" + +crunch ksqsL2[wp]: cteSwap "\s. P (ksReadyQueuesL2Bitmap s)" + +crunch st_tcb_at'[wp]: cteSwap "st_tcb_at' P t" + +crunch vms'[wp]: cteSwap "valid_machine_state'" + +crunch pspace_domain_valid[wp]: cteSwap "pspace_domain_valid" + +crunch ct_not_inQ[wp]: cteSwap "ct_not_inQ" + +crunch ksDomScheduleIdx [wp]: cteSwap "\s. P (ksDomScheduleIdx s)" + +crunches cteSwap + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +lemma cteSwap_invs'[wp]: + "\invs' and valid_cap' c and valid_cap' c' and + ex_cte_cap_to' c1 and ex_cte_cap_to' c2 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c) c1 and + cte_wp_at' (weak_derived' c' o cteCap) c2 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c') c2 and + cte_wp_at' (weak_derived' c \ cteCap) c1 and + K (c1 \ c2)\ + cteSwap c c1 c' c2 + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def pred_conj_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_conj_lift sch_act_wf_lift + valid_queues_lift cur_tcb_lift + valid_irq_node_lift irqs_masked_lift tcb_in_cur_domain'_lift + ct_idle_or_in_cur_domain'_lift2) + apply (clarsimp simp: cte_wp_at_ctes_of weak_derived_zobj weak_derived_cte_refs + weak_derived_capRange_capBits) + done + +lemma capSwap_invs'[wp]: + "\invs' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2\ + capSwapForDelete c1 c2 + \\rv. invs'\" + apply (simp add: capSwapForDelete_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (auto dest!: ctes_of_valid') + done + +lemma Zombie_isZombie[simp]: + "isZombie (Zombie x y z)" + by (simp add: isZombie_def) + +lemmas sameObject_sameRegion = sameObjectAs_sameRegionAs + +lemma mdb_next_cap_upd: + "m sl = Some (CTE cap' mdbnode) \ + m (sl \ CTE cap mdbnode) \ p \ p' = m \ p \ p'" + by (simp add: mdb_next_unfold) + +lemma trancl_cap_upd: + "m sl = Some (CTE cap' mdbnode) \ + m (sl \ CTE cap mdbnode) \ p \\<^sup>+ p' = m \ p \\<^sup>+ p'" + apply (rule iffI) + apply (erule trancl_induct) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply elim: trancl_trans) + apply (erule trancl_induct) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply elim: trancl_trans) + done + +lemma rtrancl_cap_upd: + "m sl = Some (CTE cap' mdbnode) \ + m (sl \ CTE cap mdbnode) \ p \\<^sup>* p' = m \ p \\<^sup>* p'" + by (simp add: trancl_cap_upd rtrancl_eq_or_trancl) + +lemma no_loops_tranclD: + "\ m \ p \\<^sup>+ p'; no_loops m \ \ \ m \ p' \\<^sup>+ p" + apply clarsimp + apply (drule (1) trancl_trans) + apply (simp add: no_loops_def) + done + +lemmas mdb_chain_0_tranclD = no_loops_tranclD [OF _ mdb_chain_0_no_loops] + +lemma caps_contained_subrange: + "\ caps_contained' m; m sl = Some (CTE cap n'); capRange cap' \ capRange cap; \isUntypedCap cap; \ isUntypedCap cap' \ + \ caps_contained' (modify_map m sl (cteCap_update (%_. cap')))" + apply (simp add: caps_contained'_def modify_map_apply notUntypedRange) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=sl in allE) + apply simp + apply blast + done +lemma ex_cte_cap_to'_cteCap: + "ex_cte_cap_to' p = (\s. \p' c. cteCaps_of s p' = Some c \ p \ cte_refs' c (irq_node' s))" + apply (simp add: ex_cte_cap_to'_def cte_wp_at_ctes_of cteCaps_of_def) + apply (fastforce intro!: ext) + done + +lemma updateCap_ifunsafe': + "\\s. if_unsafe_then_cap' s \ valid_objs' s + \ cte_wp_at' (\cte. \r\cte_refs' (cteCap cte) (irq_node' s) - cte_refs' cap (irq_node' s). + cte_wp_at' (\cte. cteCap cte = NullCap) r s + \ (r = sl \ cap = NullCap)) sl s + \ (cap \ NullCap \ ex_cte_cap_to' sl s)\ + updateCap sl cap + \\rv. if_unsafe_then_cap'\" + apply (simp add: ifunsafe'_def3 o_def) + apply wp + apply clarsimp + apply (subgoal_tac "ex_cte_cap_to' cref s") + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x=crefa in exI) + apply (clarsimp simp: cteCaps_of_def modify_map_def) + apply (rule ccontr, drule bspec, clarsimp, erule(1) conjI) + apply (clarsimp split: if_split_asm) + apply (drule_tac x=cref in spec) + apply (clarsimp dest!: modify_map_K_D + simp: ex_cte_cap_to'_cteCap + split: if_split_asm) + done + +lemma valid_vmdb [elim!]: + "valid_mdb' s \ vmdb (ctes_of s)" + by unfold_locales (simp add: valid_mdb'_def) + +lemma class_links_update: + "\ class_links m; \cte. m x = Some cte + \ mdbNext (cteMDBNode cte) = mdbNext (cteMDBNode cte') + \ capClass (cteCap cte) = capClass (cteCap cte') \ + \ class_links (m(x \ cte'))" + apply clarsimp + apply (unfold class_links_def) + apply (erule allEI, erule allEI) + apply (clarsimp simp: mdb_next_unfold split del: if_split split: if_split_asm) + done + +lemma sameRegionAs_Zombie[simp]: + "\ sameRegionAs (Zombie p zb n) cap" + by (simp add: sameRegionAs_def3 isCap_simps) + +lemma descendants_of_subset_untyped: + assumes adj: "\x. ((m x = None) = (m' x = None)) + \ (\cte cte'. m x = Some cte \ m' x = Some cte' + \ (isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte')) + \ (capRange (cteCap cte) = capRange (cteCap cte')) + \ (isUntypedCap (cteCap cte) \ cteCap cte = cteCap cte'))" + and desc: "\x. descendants_of' x m \ descendants_of' x m'" + shows "(untyped_inc' m \ untyped_inc' m') + \ (untyped_mdb' m \ untyped_mdb' m')" +proof + have P: "\x cte. \ m' x = Some cte; isUntypedCap (cteCap cte) \ + \ \node. m x = Some (CTE (cteCap cte) node) \ m' x = Some cte" + apply (cut_tac x=x in adj) + apply clarsimp + apply (case_tac y, simp) + done + + show "untyped_inc' m \ untyped_inc' m'" + unfolding untyped_inc'_def + apply (rule impI, erule allEI, erule allEI) + apply clarsimp + apply (drule P | simp)+ + apply clarsimp + apply (cut_tac x=p in desc) + apply (cut_tac x=p' in desc) + by blast + + have Q: "\x cte. m' x = Some cte + \ \cap node. m x = Some (CTE cap node) + \ isUntypedCap cap = isUntypedCap (cteCap cte) + \ capRange cap = capRange (cteCap cte)" + apply (cut_tac x=x in adj) + apply clarsimp + apply (case_tac y, simp) + done + + show "untyped_mdb' m \ untyped_mdb' m'" + unfolding untyped_mdb'_def + apply (rule impI, erule allEI, erule allEI) + apply clarsimp + apply (drule_tac x=p in P, simp) + apply (drule_tac x=p' in Q, simp) + apply clarsimp + apply (cut_tac x=p in desc) + apply blast + done + +qed + +lemma invalid_Thread_CNode: + "\ isThreadCap cap; isCNodeCap cap'; s \' cap; s \' cap' \ + \ capUntypedPtr cap \ capUntypedPtr cap'" + apply (clarsimp simp: valid_cap'_def isCap_simps) + apply (drule_tac x=0 in spec) + apply (clarsimp simp: obj_at'_def) + done + +lemma Final_notUntyped_capRange_disjoint: + "\ isFinal cap sl (cteCaps_of s); cteCaps_of s sl' = Some cap'; + sl \ sl'; capUntypedPtr cap = capUntypedPtr cap'; capBits cap = capBits cap'; + isThreadCap cap \ isCNodeCap cap; s \' cap; + \ isUntypedCap cap'; \ isArchFrameCap cap'; \ isZombie cap'; + capClass cap' = PhysicalClass; valid_objs' s \ + \ P" + apply (clarsimp simp add: isFinal_def) + apply (drule_tac x=sl' in spec) + apply (clarsimp simp: cteCaps_of_def) + apply (drule(1) ctes_of_valid') + apply (elim disjE isCapDs[elim_format]) + apply (clarsimp simp: valid_cap'_def valid_arch_cap'_def valid_arch_cap_ref'_def bit_simps + obj_at'_def objBits_simps + typ_at'_def ko_wp_at'_def + page_table_at'_def + split: capability.split_asm zombie_type.split_asm + arch_capability.split_asm option.split_asm + dest!: spec[where x=0], + (clarsimp simp: sameObjectAs_def3 isCap_simps)?)+ + done + +lemma capBits_capUntyped_capRange: + "\ capBits cap = capBits cap'; + capUntypedPtr cap = capUntypedPtr cap'; + capClass cap = capClass cap' \ + \ capRange cap = capRange cap'" + by (simp add: capRange_def) + +lemma ztc_phys: + "\ isCNodeCap cap \ isThreadCap cap \ isZombie cap \ + \ capClass cap = PhysicalClass" + by (auto simp: isCap_simps) + +lemma ztc_sameRegion: + "\ isCNodeCap cap \ isThreadCap cap \ isZombie cap \ + \ sameRegionAs cap cap' = sameObjectAs cap cap'" + apply (subgoal_tac "\ isUntypedCap cap \ \ isArchFrameCap cap + \ \ isIRQControlCap cap") + apply (simp add: sameRegionAs_def3 sameObjectAs_def3) + apply (auto simp: isCap_simps) + done + +lemma distinct_zombies_seperate_if_zombiedE: + "\ distinct_zombies m; m x = Some cte; + isUntypedCap (cteCap cte) \ isUntypedCap (cteCap cte'); + isArchFrameCap (cteCap cte) \ isArchFrameCap (cteCap cte'); + capClass (cteCap cte') = capClass (cteCap cte); + capBits (cteCap cte') = capBits (cteCap cte); + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte); + \y cte''. \ m y = Some cte''; x \ y; + isZombie (cteCap cte'); \ isZombie (cteCap cte); + \ isZombie (cteCap cte''); + \ isUntypedCap (cteCap cte''); \ isArchFrameCap (cteCap cte''); + capClass (cteCap cte'') = PhysicalClass; + capUntypedPtr (cteCap cte'') = capUntypedPtr (cteCap cte); + capBits (cteCap cte'') = capBits (cteCap cte) + \ \ False \ + \ distinct_zombies (m (x \ cte'))" + apply (cases "isZombie (cteCap cte') \ \ isZombie (cteCap cte)") + apply (subgoal_tac "\y cte''. m y = Some cte'' \ y \ x + \ capUntypedPtr (cteCap cte'') = capUntypedPtr (cteCap cte) + \ capBits (cteCap cte'') = capBits (cteCap cte) + \ \ isZombie (cteCap cte'')") + apply (erule distinct_zombies_seperateE) + apply (drule_tac x=y in spec, clarsimp) + apply auto[1] + apply (clarsimp simp add: distinct_zombies_def distinct_zombie_caps_def) + apply (drule_tac x=y in spec, drule_tac x=x in spec) + apply (frule isZombie_capClass[where cap="cteCap cte'"]) + apply clarsimp + apply (auto simp: isCap_simps)[1] + apply clarsimp + apply (erule(7) distinct_zombies_unzombieE) + done + +lemma mdb_chunked_update_final: + assumes chunked: "mdb_chunked m" + and slot: "m slot = Some (CTE cap node)" + and Fin1: "\x cte. m x = Some cte \ x \ slot + \ \ sameRegionAs cap (cteCap cte)" + and Fin2: "\x cte. m x = Some cte \ x \ slot + \ \ sameRegionAs cap' (cteCap cte)" + and Fin3: "\x cte. m x = Some cte \ x \ slot + \ sameRegionAs (cteCap cte) cap + \ isUntypedCap (cteCap cte)" + and Fin4: "\x cte. m x = Some cte \ x \ slot + \ sameRegionAs (cteCap cte) cap' + \ isUntypedCap (cteCap cte)" + and capR: "capRange cap = capRange cap'" + shows "mdb_chunked (m (slot \ CTE cap' node))" + (is "mdb_chunked ?m'") +proof - + note trancl[simp] = trancl_cap_upd [where m=m, OF slot] + note rtrancl[simp] = rtrancl_cap_upd [where m=m, OF slot] + + have sameRegionAs: + "\x cte. \ m x = Some cte; x \ slot; sameRegionAs (cteCap cte) cap' \ + \ sameRegionAs (cteCap cte) cap" + apply (frule(2) Fin4) + apply (clarsimp simp: sameRegionAs_def3 capR) + apply (clarsimp simp: isCap_simps) + done + + have is_chunk: + "\x cap n p p'. \ is_chunk m cap p p'; m x = Some (CTE cap n); x \ slot \ \ + is_chunk ?m' cap p p'" + apply (simp add: is_chunk_def split del: if_split) + apply (erule allEI) + apply (clarsimp simp: slot) + apply (frule(1) Fin3, simp) + apply (clarsimp simp: sameRegionAs_def3 capR) + apply (clarsimp simp: isCap_simps) + done + + have not_chunk: "\p. \ m \ slot \\<^sup>+ p; p \ slot \ \ \ is_chunk m cap slot p" + apply (simp add: is_chunk_def) + apply (rule_tac x=p in exI) + apply clarsimp + apply (frule(1) Fin1) + apply simp + done + + show ?thesis using chunked + apply (simp add: mdb_chunked_def split del: if_split) + apply (erule allEI, erule allEI) + apply (clarsimp split del: if_split) + apply (clarsimp simp: slot split: if_split_asm) + apply (frule(1) Fin2[OF _ not_sym], simp) + apply (frule(1) sameRegionAs, clarsimp+) + apply (simp add: not_chunk is_chunk) + apply (simp add: is_chunk) + done +qed + +lemma distinct_zombiesD: + "\ m x = Some cte; distinct_zombies m; isZombie (cteCap cte); + y \ x; m y = Some cte'; capBits (cteCap cte') = capBits (cteCap cte); + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte); + \ isUntypedCap (cteCap cte'); \ isArchFrameCap (cteCap cte'); + capClass (cteCap cte') = PhysicalClass \ + \ False" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def) + apply (drule_tac x=x in spec, drule_tac x=y in spec) + apply clarsimp + apply auto + done + +lemma ztc_replace_update_final: + assumes chunk: "mdb_chunked m" + and slot: "m x = Some (CTE cap node)" + and ztc1: "isCNodeCap cap \ isThreadCap cap \ isZombie cap" + and ztc2: "isCNodeCap cap' \ isThreadCap cap' \ isZombie cap'" + and unt: "capUntypedPtr cap = capUntypedPtr cap'" + and bits: "capBits cap = capBits cap'" + and distz: "distinct_zombies m" + and Fin: "isFinal cap x (option_map cteCap \ m)" + and valid: "s \' cap" "s \' cap'" + shows "mdb_chunked (m (x \ CTE cap' node))" +proof (rule mdb_chunked_update_final [OF chunk, OF slot]) + have cases: "capMasterCap cap = capMasterCap cap' + \ isZombie cap \ isZombie cap'" + using bits unt ztc1 ztc2 + invalid_Thread_CNode [OF _ _ valid] + invalid_Thread_CNode [OF _ _ valid(2) valid(1)] + by (auto simp: isCap_simps) + + have Fin': "\y cte. \ m y = Some cte; y \ x \ \ \ sameObjectAs cap (cteCap cte)" + using Fin + apply (clarsimp simp: isFinal_def) + apply (drule_tac x=y in spec) + apply (clarsimp simp: sameObjectAs_def3) + done + + show Fin1: "\y cte. \ m y = Some cte; y \ x \ \ \ sameRegionAs cap (cteCap cte)" + by (clarsimp simp: ztc_sameRegion [OF ztc1] Fin') + + show capR: "capRange cap = capRange cap'" + using unt bits ztc_phys[OF ztc1] ztc_phys[OF ztc2] + by (simp add: capRange_def) + + have capR_neq: "capRange cap' \ {}" + using capAligned_capUntypedPtr [OF valid_capAligned, OF valid(2)] + by (clarsimp simp add: ztc_phys [OF ztc2]) + + have zombie_case_helper: + "\y cte. \ m y = Some cte; y \ x; isZombie cap \ + \ \ sameObjectAs cap' (cteCap cte)" + apply (clarsimp simp: ztc_sameRegion ztc1 ztc2 + elim!: sameObjectAsE) + apply (rule distinct_zombiesD [OF slot distz], simp_all)[1] + apply (drule master_eqE, rule capBits_Master) + apply (simp add: bits) + apply (drule arg_cong[where f=capUntypedPtr]) + apply (simp add: capUntyped_Master unt) + apply (drule arg_cong[where f=isUntypedCap]) + apply (simp add: isCap_Master) + apply (drule arg_cong[where f=isArchFrameCap]) + apply (clarsimp simp add: isCap_Master) + apply (cut_tac ztc2, clarsimp simp: isCap_simps) + apply (drule arg_cong[where f=capClass]) + apply (simp add: capClass_Master ztc_phys[OF ztc2]) + done + + show Fin2: "\y cte. \ m y = Some cte; y \ x \ \ \ sameRegionAs cap' (cteCap cte)" + using capR + apply clarsimp + apply (frule(1) Fin1) + apply (rule disjE [OF cases]) + apply (clarsimp simp: ztc_sameRegion ztc1 ztc2 sameObjectAs_def3) + apply (drule_tac F="\cap. (isNullCap cap, isZombie cap, + isUntypedCap cap, isArchFrameCap cap, + capRange cap)" in master_eqE, + simp add: isCap_Master capRange_Master del: isNullCap)+ + using valid apply (auto simp: isCap_Master capRange_Master)[1] + apply (erule disjE) + apply (drule(2) zombie_case_helper) + apply (simp add: ztc_sameRegion ztc1 ztc2) + apply (clarsimp simp: ztc_sameRegion ztc1 ztc2 + elim!: sameObjectAsE) + done + + have untyped_helper: + "\cap cap'. \ isCNodeCap cap' \ isThreadCap cap' \ isZombie cap'; + sameRegionAs cap cap' \ + \ isUntypedCap cap \ sameRegionAs cap' cap" + apply (erule sameRegionAsE) + apply (clarsimp simp: ztc_sameRegion sameObjectAs_def3) + apply (drule_tac F="\cap. (isNullCap cap, isZombie cap, + isUntypedCap cap, isArchFrameCap cap, + capRange cap)" in master_eqE, + simp add: isCap_Master capRange_Master del: isNullCap)+ + apply (auto simp: isCap_Master capRange_Master isCap_simps)[1] + apply simp + apply (clarsimp simp: isCap_simps)+ + done + + show Fin3: "\y cte. \ m y = Some cte; y \ x; sameRegionAs (cteCap cte) cap \ + \ isUntypedCap (cteCap cte)" + apply (frule(1) Fin1) + apply (drule untyped_helper[OF ztc1]) + apply simp + done + + show Fin4: "\y cte. \ m y = Some cte; y \ x; sameRegionAs (cteCap cte) cap' \ + \ isUntypedCap (cteCap cte)" + apply (frule(1) Fin2) + apply (drule untyped_helper[OF ztc2]) + apply simp + done + +qed + +lemma updateCap_untyped_ranges_zero_simple: + "\cte_wp_at' ((\cp. untypedZeroRange cp = untypedZeroRange cap) o cteCap) sl and untyped_ranges_zero'\ + updateCap sl cap + \\_. untyped_ranges_zero'\" + apply (rule hoare_pre, rule untyped_ranges_zero_lift, wp+) + apply (clarsimp simp: modify_map_def cteCaps_of_def cte_wp_at_ctes_of) + apply (simp add: untyped_ranges_zero_inv_def) + apply (rule arg_cong[where f=ran]) + apply (simp add: fun_eq_iff map_comp_def) + done + +crunch tcb_in_cur_domain'[wp]: updateCap "tcb_in_cur_domain' t" + (wp: crunch_wps simp: crunch_simps rule: tcb_in_cur_domain'_lift) + +crunches updateCap + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +lemma make_zombie_invs': + "\\s. invs' s \ s \' cap \ + cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s \ + cte_wp_at' (\cte. capClass (cteCap cte) = PhysicalClass \ + capUntypedPtr (cteCap cte) = capUntypedPtr cap \ + capBits (cteCap cte) = capBits cap \ + (\r\cte_refs' (cteCap cte) (irq_node' s) - cte_refs' cap (irq_node' s). + cte_wp_at' (\cte. cteCap cte = NullCap) r s) \ + (isZombie cap \ isThreadCap cap \ isCNodeCap cap) \ + final_matters' (cteCap cte) \ + (isThreadCap (cteCap cte) \ isCNodeCap (cteCap cte) + \ isZombie (cteCap cte)) \ \ isUntypedCap (cteCap cte) \ + (\p \ threadCapRefs (cteCap cte). + st_tcb_at' ((=) Inactive) p s + \ bound_tcb_at' ((=) None) p s + \ obj_at' (Not \ tcbQueued) p s + \ ko_wp_at' (Not \ hyp_live') p s + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p s)) sl s\ + updateCap sl cap + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_irq_handlers'_def irq_issued'_def) + apply (wp updateCap_ctes_of_wp sch_act_wf_lift valid_queues_lift cur_tcb_lift + updateCap_iflive' updateCap_ifunsafe' updateCap_idle' + valid_irq_node_lift ct_idle_or_in_cur_domain'_lift2 + updateCap_untyped_ranges_zero_simple + | simp split del: if_split)+ + apply clarsimp + apply (intro conjI[rotated]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (auto simp: untypedZeroRange_def isCap_simps)[1] + apply (clarsimp simp: modify_map_def ran_def split del: if_split + split: if_split_asm) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of isCap_simps) + apply auto[1] + + apply (clarsimp simp: disj_comms cte_wp_at_ctes_of + dest!: ztc_phys capBits_capUntyped_capRange) + apply (frule(1) capBits_capUntyped_capRange, simp) + apply (clarsimp dest!: valid_global_refsD_with_objSize) + + apply (clarsimp simp: disj_comms cte_wp_at_ctes_of + dest!: ztc_phys capBits_capUntyped_capRange) + apply (frule(1) capBits_capUntyped_capRange, simp) + apply (clarsimp dest!: valid_global_refsD_with_objSize) + + apply (auto elim: if_unsafe_then_capD' simp: isCap_simps)[1] + + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule bspec[where x=sl], simp) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (subgoal_tac "st_tcb_at' ((=) Inactive) p' s + \ obj_at' (Not \ tcbQueued) p' s + \ bound_tcb_at' ((=) None) p' s + \ ko_wp_at' (Not \ hyp_live') p' s + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p' s") + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def live'_def hyp_live'_def) + apply (auto dest!: isCapDs)[1] + apply (clarsimp simp: cte_wp_at_ctes_of disj_ac + dest!: isCapDs) + apply (frule ztc_phys[where cap=cap]) + apply (frule(1) capBits_capUntyped_capRange, simp) + apply (case_tac cte) + apply clarsimp + apply (simp add: valid_mdb_ctes_def) + apply (rule conjI) + apply (subst modify_map_dlist_iff) + apply (case_tac cte, simp) + apply simp + apply (rule conjI) + apply (rule mdb_chain_0_modify_map_inv, simp) + apply simp + apply (rule conjI) + apply (clarsimp simp: modify_map_apply) + apply (simp add: valid_badges_def del: fun_upd_apply) + apply clarify + apply (thin_tac "\ isUntypedCap cap" for cap) + apply (clarsimp simp: isCap_simps split: if_split_asm) + subgoal by ((elim disjE | clarsimp simp: isCap_simps)+) + subgoal by (fastforce simp: isCap_simps sameRegionAs_def3) + apply (clarsimp simp: mdb_next_unfold) + apply (erule_tac x=p in allE) + apply (erule_tac x="mdbNext node" in allE) + subgoal by simp + apply (rule conjI) + apply clarsimp + apply (erule (1) caps_contained_subrange, simp) + subgoal by (clarsimp simp: isCap_simps) + apply (clarsimp simp add: isCap_simps) + apply (subgoal_tac "valid_mdb' s") + prefer 2 + apply (simp add: valid_mdb'_def valid_mdb_ctes_def) + apply (rule conjI) + defer + apply (cut_tac m="ctes_of s" + and m'="(modify_map (ctes_of s) sl + (cteCap_update (\_. cap)))" + in descendants_of_subset_untyped) + apply (clarsimp simp: modify_map_def) + apply (rule conjI, clarsimp simp: isCap_simps) + apply clarsimp + apply (simp only: modify_map_apply) + apply (rule use_update_ztc_two [OF descendants_of_update_ztc]) + apply (rule exEI, rule vmdb.isFinal_untypedParent) + apply (rule vmdb.intro, simp add: valid_mdb'_def) + apply assumption + apply (simp add: cteCaps_of_def) + apply (clarsimp simp: isCap_simps) + apply assumption + apply (clarsimp simp: isCap_simps) + apply assumption + apply (simp add: disj_comms) + apply (simp add: capRange_def) + apply (simp add: capRange_def) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid') + apply (simp add: disj_comms) + apply clarsimp + apply (erule(1) mdb_chain_0_no_loops) + apply (erule (3) isFinal_no_descendants) + apply (clarsimp simp: modify_map_apply) + apply (rule conjI, clarsimp simp: valid_nullcaps_def isCap_simps) + apply (rule conjI, clarsimp simp: ut_revocable'_def isCap_simps) + apply (rule conjI, clarsimp elim!: class_links_update) + apply (rule conjI) + apply (erule(1) distinct_zombies_seperate_if_zombiedE) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply simp + apply simp + apply simp + apply (erule_tac sl'=y in Final_notUntyped_capRange_disjoint, + simp add: cteCaps_of_def, + simp_all add: disj_ac)[1] + apply (erule(1) ctes_of_valid_cap') + apply (rule conjI) + apply (subgoal_tac "cap \ IRQControlCap") + apply (clarsimp simp: irq_control_def) + apply (clarsimp simp: isCap_simps) + apply (simp add: reply_masters_rvk_fb_def, erule ball_ran_fun_updI) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: modify_map_apply) + apply (erule(1) ztc_replace_update_final, simp_all) + apply (simp add: cteCaps_of_def) + apply (erule(1) ctes_of_valid_cap') + done + +lemma isFinal_Zombie: + "isFinal (Zombie p' b n) p cs" + by (simp add: isFinal_def sameObjectAs_def isCap_simps) + +lemma shrink_zombie_invs': + "\invs' and (K (isZombie cap)) + and cte_wp_at' (\cte. cteCap cte = Zombie (capZombiePtr cap) (capZombieType cap) (capZombieNumber cap + 1)) sl + and cte_wp_at' (\cte. cteCap cte = NullCap) (capZombiePtr cap + 2^cteSizeBits * (of_nat (capZombieNumber cap)))\ + updateCap sl cap + \\rv. invs'\" + apply (wp make_zombie_invs') + apply (clarsimp simp: cte_wp_at_ctes_of isFinal_Zombie isCap_simps final_matters'_def) + apply (rule context_conjI) + apply (drule ctes_of_valid', clarsimp) + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply clarsimp + apply (rule ccontr, erule notE, rule imageI) + apply (drule word_le_minus_one_leq) + apply (rule ccontr, simp add: linorder_not_less mult.commute mult.left_commute shiftl_t2n) + done + +lemma cte_wp_at_cteCap_norm: + "(cte_wp_at' (\c. P (cteCap c)) p s) = (\cap. cte_wp_at' (\c. cteCap c = cap) p s \ P cap)" + by (auto simp add: cte_wp_at'_def) + +lemma cte_wp_at_conj_eq': + "cte_wp_at' (\c. P c \ Q c) p s = (cte_wp_at' P p s \ cte_wp_at' Q p s)" + by (auto simp add: cte_wp_at'_def) + +lemma cte_wp_at_disj_eq': + "cte_wp_at' (\c. P c \ Q c) p s = (cte_wp_at' P p s \ cte_wp_at' Q p s)" + by (auto simp add: cte_wp_at'_def) + +lemma valid_Zombie_cte_at': + "\ s \' Zombie p zt m; n < zombieCTEs zt \ \ cte_at' (p + (of_nat n * 2^cteSizeBits)) s" + apply (clarsimp simp: valid_cap'_def split: zombie_type.split_asm) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (subgoal_tac "tcb_cte_cases (of_nat n * 2^cteSizeBits) \ None") + apply clarsimp + apply (erule(2) cte_wp_at_tcbI') + apply fastforce + apply simp + apply (thin_tac "a < word_bits" for a) + apply ((clarsimp | erule less_handy_casesE | fastforce simp: objBits_defs)+)[1] + apply (drule spec[where x="of_nat n"]) + apply (subst(asm) less_mask_eq) + apply (rule order_less_le_trans) + apply (erule of_nat_mono_maybe [rotated]) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply simp + apply (clarsimp simp: mult.commute mult.left_commute real_cte_at') + done + +lemma cteSwap_cte_wp_cteCap: + "\\s. p \ sl \ + (p = p' \ cte_at' p' s \ P cap') \ + (p \ p' \ cte_wp_at' (\c. P (cteCap c)) p s)\ + cteSwap cap p' cap' sl + \\rv. cte_wp_at' (\c. P (cteCap c)) p\" + apply (simp add: cteSwap_def) + apply (rule hoare_pre) + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' + hoare_vcg_all_lift) + apply simp + apply (wp hoare_drop_imps)[1] + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ + apply simp + apply (clarsimp simp: o_def) + done + +lemma capSwap_cte_wp_cteCap: + "\\s. p \ sl \ + (p = p' \ cte_wp_at' (\c. P (cteCap c)) sl s) \ + (p \ p' \ cte_wp_at' (\c. P (cteCap c)) p s)\ + capSwapForDelete p' sl + \\rv. cte_wp_at' (\c. P (cteCap c)) p\" + apply(simp add: capSwapForDelete_def) + apply(wp) + apply(rule cteSwap_cte_wp_cteCap) + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ + apply(clarsimp) + apply(rule conjI) + apply(simp add: cte_at_cte_wp_atD) + apply(clarsimp simp: cte_wp_at_cteCap_norm) + apply(unfold cte_at'_def cte_wp_at'_def) + apply(clarsimp) + apply(clarsimp) + done + +lemma cteSwap_cteCaps_of [wp]: + "\\s. P ((cteCaps_of s) ( a := Some cap2, b := Some cap1 ))\ + cteSwap cap1 a cap2 b + \\rv s. P (cteCaps_of s)\" + apply (simp add: cteSwap_def) + apply (wp getCTE_cteCap_wp | simp)+ + apply (clarsimp split: option.split) + apply (erule rsubst[where P=P], intro ext) + apply (clarsimp simp: modify_map_def split: if_split_asm) + done + +lemma capSwap_cteCaps_of [wp]: + notes if_cong [cong] + shows + "\\s. P ((cteCaps_of s) \ (id ( a := b, b := a )))\ + capSwapForDelete a b + \\rv s. P (cteCaps_of s)\" + apply(simp add: capSwapForDelete_def) + apply(wp getCTE_wp getCTE_cteCap_wp) + apply(clarsimp) + apply(rule conjI) + prefer 2 + apply(clarsimp simp: o_def) + apply(clarsimp simp: cte_wp_at_ctes_of o_def) + apply(erule rsubst [where P=P]) + apply(rule ext) + apply(clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemma cte_wp_final_cteCaps_of: + "(cte_wp_at' (\c. isFinal (cteCap c) p (cteCaps_of s)) p s) = + (\cap. cteCaps_of s p = Some cap \ isFinal cap p (cteCaps_of s))" + by (auto simp add: cteCaps_of_def cte_wp_at_ctes_of) + +lemma updateCap_cap_to': + "\\s. ex_cte_cap_to' p s \ + cte_wp_at' (\cte. p \ cte_refs' (cteCap cte) (irq_node' s) - cte_refs' cap (irq_node' s)) sl s\ + updateCap sl cap + \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_cteCap) + apply wp + apply clarsimp + apply (rule_tac x=p' in exI) + apply (clarsimp simp: modify_map_def cte_wp_at_ctes_of cteCaps_of_def) + done + +lemma cteDeleteOne_cap_to'[wp]: + "\ex_cte_cap_wp_to' P p\ cteDeleteOne slot \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: ex_cte_cap_wp_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node'[OF cteDeleteOne_irq_node']) + apply (wp hoare_vcg_ex_lift cteDeleteOne_cte_wp_at_preserved) + apply (case_tac cap, simp_all add: finaliseCap_def Let_def isCap_simps)[1] + apply simp + done + +lemmas setNotification_cap_to'[wp] + = ex_cte_cap_to'_pres [OF setNotification_cte_wp_at' set_ntfn_ksInterrupt] + +lemmas setEndpoint_cap_to'[wp] + = ex_cte_cap_to'_pres [OF setEndpoint_cte_wp_at' setEndpoint_ksInterruptState] + +lemmas setThreadState_cap_to'[wp] + = ex_cte_cap_to'_pres [OF setThreadState_cte_wp_at' setThreadState_ksInterruptState] + +crunch cap_to'[wp]: cancelSignal "ex_cte_cap_wp_to' P p" + (simp: crunch_simps wp: crunch_wps) + +lemma cancelIPC_cap_to'[wp]: + "\ex_cte_cap_wp_to' P p\ cancelIPC t \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: cancelIPC_def Let_def) + apply (rule bind_wp [OF _ gts_sp']) + apply (case_tac state, simp_all add: getThreadReplySlot_def locateSlot_conv) + apply (wp ex_cte_cap_to'_pres [OF threadSet_cte_wp_at'] + | simp add: o_def if_apply_def2 + | wpcw | wp (once) hoare_drop_imps)+ + done + +lemma emptySlot_deletes [wp]: + "\\\ emptySlot p opt \\rv s. cte_wp_at' (\c. cteCap c = NullCap) p s\" + apply (simp add: emptySlot_def case_Null_If) + apply (subst tree_cte_cteCap_eq [unfolded o_def]) + apply (wp getCTE_cteCap_wp opt_return_pres_lift) + apply (clarsimp split: option.splits simp: modify_map_def) + done + +lemma capCylicZombieD[dest!]: + "capCyclicZombie cap slot \ \zb n. cap = Zombie slot zb n" + by (clarsimp simp: capCyclicZombie_def split: capability.split_asm) + +crunches finaliseCap + for typ_at'[wp]: "\s. P (typ_at' T p s)" + (wp: getASID_wp crunch_wps simp: crunch_simps) + +lemma finaliseSlot_abort_cases': + "s \ \\\ + finaliseSlot' sl ex + \\rv s. fst rv \ (\ ex \ cte_wp_at' (\cte. isZombie (cteCap cte) + \ capZombiePtr (cteCap cte) = sl) sl s)\,\\\\" +proof (induct rule: finalise_spec_induct) + case (1 slot exp) + show ?case + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def) + apply (rule hoare_pre_spec_validE) + apply (wp | simp)+ + apply ((wp "1.hyps" updateCap_cte_wp_at_cases)+, + (assumption | rule refl | simp only: split_def fst_conv snd_conv)+) + apply (wp | simp)+ + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift[where Q="\rv. cte_at' slot"]) + apply (wp typ_at_lifts [OF finaliseCap_typ_at'])[1] + apply (rule finaliseCap_cases) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp getCTE_wp isFinalCapability_inv | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done +qed + +lemmas finaliseSlot_abort_cases + = use_spec(2) [OF finaliseSlot_abort_cases', + folded validE_R_def finaliseSlot_def] + +crunch it [wp]: capSwapForDelete "\s. P (ksIdleThread s)" + +lemma cteDelete_delete_cases: + "\\\ + cteDelete slot e + \\rv. cte_wp_at' (\c. cteCap c = NullCap + \ \ e \ isZombie (cteCap c) + \ capZombiePtr (cteCap c) = slot) slot\, -" + apply (simp add: cteDelete_def whenE_def split_def) + apply wp + apply (rule hoare_strengthen_post [OF emptySlot_deletes]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply wp+ + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + done + +lemmas cteDelete_deletes = cteDelete_delete_cases[where e=True, simplified] + +lemma cteSwap_cap_to'[wp]: + "\ex_cte_cap_to' p\ capSwapForDelete c1 c2 \\rv. ex_cte_cap_to' p\" + apply (simp add: cteSwap_def capSwapForDelete_def ex_cte_cap_to'_cteCap) + apply (wp getCTE_cteCap_wp | simp add: o_def)+ + apply (clarsimp split: option.splits) + apply (rule_tac x="(id (c1 := c2, c2 := c1)) p'" in exI) + apply (clarsimp simp: modify_map_def | rule conjI)+ + done + +lemma zombieCTEs_le: + "zombieCTEs zb \ 2 ^ zBits zb" + by (cases zb, simp_all add: objBits_defs) + +lemma valid_cap'_handy_bits: + "s \' Zombie r zb n \ n \ 2 ^ (zBits zb)" + "s \' Zombie r zb n \ n < 2 ^ word_bits" + "\ s \' Zombie r zb n; n \ 0 \ \ of_nat n - 1 < (2 ^ (zBits zb) :: machine_word)" + "s \' Zombie r zb n \ zBits zb < word_bits" + apply (insert zombieCTEs_le[where zb=zb], + simp_all add: valid_cap'_def) + apply (clarsimp elim!: order_le_less_trans) + apply (clarsimp simp: word_less_nat_alt) + apply (subgoal_tac "n \ unats (len_of TYPE (machine_word_len))") + apply (subst unat_minus_one) + apply (drule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (simp add: unats_def) + apply simp + apply (simp add: word_unat.Abs_inverse) + apply (simp only: unats_def mem_simps) + apply (erule order_le_less_trans) + apply (erule order_le_less_trans) + apply (rule power_strict_increasing) + apply (simp only: word_bits_len_of) + apply simp + done + +lemma ex_Zombie_to: + "\ ctes_of s p = Some cte; cteCap cte = Zombie p' b n; + n \ 0; valid_objs' s \ + \ ex_cte_cap_to' p' s" + apply (simp add: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (intro exI, rule conjI, assumption) + apply (simp add: image_def) + apply (rule bexI[where x=0]) + apply simp + apply simp + apply (frule(1) ctes_of_valid') + apply (drule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (simp only: word_bits_len_of) + apply (erule valid_cap'_handy_bits) + apply simp + done + +lemma ex_Zombie_to2: + "\ ctes_of s p = Some cte; cteCap cte = Zombie p' b n; + n \ 0; valid_objs' s \ + \ ex_cte_cap_to' (p' + (2^cteSizeBits * of_nat n - 2^cteSizeBits)) s" + apply (simp add: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (intro exI, rule conjI, assumption) + apply (simp add: image_def shiftl_t2n) + apply (rule bexI[where x="of_nat n - 1"]) + apply (fastforce simp: objBits_defs) + apply (subgoal_tac "n \ unats (len_of TYPE(machine_word_len))") + apply (simp add: word_less_nat_alt) + apply (subst unat_minus_one) + apply (simp add: word_neq_0_conv) + apply (drule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (simp add: unats_def) + apply simp + apply (simp add: word_unat.Abs_inverse) + apply (simp only: unats_def mem_simps word_bits_len_of) + apply (drule(1) ctes_of_valid', simp) + apply (erule valid_cap'_handy_bits) + done + +declare word_to_1_set[simp] + +lemmas finalise_spec_induct2 = finaliseSlot'.induct[where P= + "\sl exp s. P sl (finaliseSlot' sl exp) (\P. exp \ P) s" for P] + +lemma cteSwap_sch_act_simple[wp]: + "\sch_act_simple\ cteSwap cap1 slot1 cap2 slot2 \\_. sch_act_simple\" + by (simp add: cteSwap_def sch_act_simple_def, wp) + +crunch sch_act_simple[wp]: capSwapForDelete sch_act_simple + +lemma updateCap_sch_act_simple[wp]: + "\sch_act_simple\ updateCap slot newCap \\_. sch_act_simple\" + by (simp add: sch_act_simple_def, wp) + +definition + "no_cte_prop P = (if \sl cte. \P\ setCTE sl cte \\_. P\ then P else \)" + +lemma no_cte_prop_top: + "no_cte_prop \ = \" + by (simp add: no_cte_prop_def) + +definition + "finalise_prop_stuff P + = ((\s f. P (ksWorkUnitsCompleted_update f s) = P s) + \ irq_state_independent_H P + \ (\s f. P (gsUntypedZeroRanges_update f s) = P s) + \ (\s f. P (ksInterruptState_update f s) = P s) + \ (\s f. P (ksMachineState_update (irq_state_update f) s) = P s) + \ (\s f. P (ksMachineState_update (irq_masks_update f) s) = P s))" + +lemma setCTE_no_cte_prop: + "\no_cte_prop P\ setCTE sl cte \\_. no_cte_prop P\" + by (simp add: no_cte_prop_def hoare_vcg_prop) + +lemma setInterruptState_no_cte_prop: + "\no_cte_prop P and K (finalise_prop_stuff P)\ setInterruptState st \\_. no_cte_prop P\" + apply (simp add: setInterruptState_def, wp) + apply (clarsimp simp: finalise_prop_stuff_def no_cte_prop_def) + done + +lemma dmo_maskInterrupt_no_cte_prop: + "\no_cte_prop P and K (finalise_prop_stuff P)\ + doMachineOp (maskInterrupt m irq) \\_. no_cte_prop P\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: no_cte_prop_def finalise_prop_stuff_def) + done + +lemma updateTrackedFreeIndex_no_cte_prop[wp]: + "\no_cte_prop P and K (finalise_prop_stuff P)\ + updateTrackedFreeIndex ptr idx \\_. no_cte_prop P\" + apply (simp add: updateTrackedFreeIndex_def getSlotCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: no_cte_prop_def finalise_prop_stuff_def) + done + +crunches emptySlot, capSwapForDelete + for no_cte_prop[wp]: "no_cte_prop P" + (ignore: doMachineOp wp: dmo_maskInterrupt_no_cte_prop) + +lemma reduceZombie_invs'': + assumes fin: + "\s'' rv. \\ (isZombie cap \ capZombieNumber cap = 0); \ (isZombie cap \ \ exposed); isZombie cap \ exposed; + (Inr rv, s'') + \ fst ((withoutPreemption $ locateSlotCap cap (fromIntegral (capZombieNumber cap - 1))) st)\ + \ s'' \ \\s. no_cte_prop Q s \ invs' s \ sch_act_simple s + \ cte_wp_at' (\cte. isZombie (cteCap cte)) slot s + \ ex_cte_cap_to' rv s\ + finaliseSlot rv False + \\rva s. no_cte_prop Q s \ invs' s \ sch_act_simple s + \ (fst rva \ cte_wp_at' (\cte. removeable' rv s (cteCap cte)) rv s) + \ (snd rva \ NullCap \ post_cap_delete_pre' (snd rva) rv (cteCaps_of s))\, + \\rv s. no_cte_prop Q s \ invs' s \ sch_act_simple s\" + assumes stuff: + "finalise_prop_stuff Q" + shows + "st \ \\s. + no_cte_prop Q s \ invs' s \ sch_act_simple s + \ (exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s)\ + reduceZombie cap slot exposed + \\rv s. + no_cte_prop Q s \ invs' s \ sch_act_simple s + \ (exposed \ ex_cte_cap_to' slot s) + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s)\, + \\rv s. no_cte_prop Q s \ invs' s \ sch_act_simple s\" + apply (unfold reduceZombie_def cteDelete_def Let_def + split_def fst_conv snd_conv haskell_fail_def + case_Zombie_assert_fold) + apply (rule hoare_pre_spec_validE) + apply (wp hoare_vcg_disj_lift | simp)+ + apply (wp capSwap_cte_wp_cteCap getCTE_wp' | simp)+ + apply (wp shrink_zombie_invs')[1] + apply (wp | simp)+ + apply (rule getCTE_wp) + apply (wp | simp)+ + apply (rule_tac Q="\cte s. rv = capZombiePtr cap + + of_nat (capZombieNumber cap) * 2^cteSizeBits - 2^cteSizeBits + \ cte_wp_at' (\c. c = cte) slot s \ invs' s + \ no_cte_prop Q s \ sch_act_simple s" + in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of mult.commute mult.left_commute dest!: isCapDs) + apply (simp add: field_simps) + apply (wp getCTE_cte_wp_at)+ + apply simp + apply wp + apply (rule spec_strengthen_postE) + apply (rule_tac Q="\fc s. rv = capZombiePtr cap + + of_nat (capZombieNumber cap) * 2^cteSizeBits - 2^cteSizeBits" + in spec_valid_conj_liftE1) + apply wp[1] + apply (rule fin, assumption+) + apply (clarsimp simp: stuff) + apply (simp add: locateSlot_conv) + apply ((wp | simp)+)[2] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp dest!: isCapDs) + apply (rule conjI) + apply (erule(1) ex_Zombie_to) + apply clarsimp + apply clarsimp + apply clarsimp + apply (clarsimp dest!: isCapDs) + apply (fastforce dest!: ex_Zombie_to2 simp: cte_level_bits_def objBits_defs) + done + +lemmas preemptionPoint_invR = + valid_validE_R [OF preemptionPoint_inv] + +lemmas preemptionPoint_invE = + valid_validE_E [OF preemptionPoint_inv] + +lemma finaliseSlot_invs': + assumes finaliseCap: + "\cap final sl. \no_cte_prop Pr and invs' and sch_act_simple + and cte_wp_at' (\cte. cteCap cte = cap) sl\ finaliseCap cap final False \\rv. no_cte_prop Pr\" + and stuff: "finalise_prop_stuff Pr" + shows + "st \ \\s. + no_cte_prop Pr s \ invs' s \ sch_act_simple s + \ (exposed \ ex_cte_cap_to' slot s) + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s)\ + finaliseSlot' slot exposed + \\rv s. + no_cte_prop Pr s \ invs' s \ sch_act_simple s + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s) + \ (fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) + \ (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\, + \\rv s. no_cte_prop Pr s \ invs' s \ sch_act_simple s\" +proof (induct arbitrary: P p rule: finalise_spec_induct2) + case (1 sl exp s Q q) + let ?P = "\cte. (Q and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie q zb n + \ Q cp \ (isZombie cp \ capZombiePtr cp \ q))" + note hyps = "1.hyps"[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def]] + have Q: "\x y n. {x :: machine_word} = (\x. y + (x << cteSizeBits)) ` {0 ..< n} \ n = 1" + apply (simp only: shiftl_t2n mult_ac) + apply (drule sym) + apply (case_tac "1 < n") + apply (frule_tac x = "y + 0 * 2^cteSizeBits" in eqset_imp_iff) + apply (frule_tac x = "y + 1 * 2^cteSizeBits" in eqset_imp_iff) + apply (subst(asm) imageI, simp) + apply (erule order_less_trans[rotated], simp) + apply (subst(asm) imageI, simp) + apply simp + apply (simp add: linorder_not_less objBits_defs) + apply (case_tac "n < 1") + apply simp + apply simp + done + have R: "\n. n \ 0 \ {0 .. n - 1} = {0 ..< n :: machine_word}" + apply safe + apply simp + apply (erule(1) word_leq_minus_one_le) + apply simp + apply (erule word_le_minus_one_leq) + done + have final_IRQHandler_no_copy: + "\irq sl sl' s. \ isFinal (IRQHandlerCap irq) sl (cteCaps_of s); sl \ sl' \ \ cteCaps_of s sl' \ Some (IRQHandlerCap irq)" + apply (clarsimp simp: isFinal_def sameObjectAs_def2 isCap_simps) + apply fastforce + done + from stuff have stuff': + "finalise_prop_stuff (no_cte_prop Pr)" + by (simp add: no_cte_prop_def finalise_prop_stuff_def) + note stuff'[unfolded finalise_prop_stuff_def, simp] + show ?case + apply (subst finaliseSlot'.simps) + apply (fold reduceZombie_def[unfolded cteDelete_def finaliseSlot_def]) + apply (unfold split_def) + apply (rule hoare_pre_spec_validE) + apply (wp | simp)+ + apply (wp make_zombie_invs' updateCap_cte_wp_at_cases + hoare_vcg_disj_lift)[1] + apply (wp hyps) + apply ((wp preemptionPoint_invE preemptionPoint_invR + | clarsimp simp: sch_act_simple_def + | simp cong: kernel_state.fold_congs machine_state.fold_congs)+)[1] + apply (rule spec_strengthen_postE [OF reduceZombie_invs''[OF _ stuff]]) + prefer 2 + apply fastforce + apply (rule hoare_pre_spec_validE, + rule spec_strengthen_postE) + apply (unfold finaliseSlot_def)[1] + apply (rule hyps[where P="\" and p=sl], (assumption | rule refl)+) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp, simp) + apply (wp make_zombie_invs' updateCap_ctes_of_wp updateCap_cap_to' + hoare_vcg_disj_lift updateCap_cte_wp_at_cases)+ + apply simp + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fin s. invs' s \ sch_act_simple s \ s \' (fst fin) + \ (exp \ ex_cte_cap_to' sl s) + \ no_cte_prop Pr s + \ cte_wp_at' (\cte. cteCap cte = cteCap rv) sl s + \ (q = sl \ exp \ cte_wp_at' (?P) q s)" + in hoare_vcg_conj_lift) + apply (wp hoare_vcg_disj_lift finaliseCap finaliseCap_invs[where sl=sl]) + apply (rule finaliseCap_zombie_cap') + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_cte_refs) + apply (rule finaliseCap_replaceable[where slot=sl]) + apply clarsimp + apply (erule disjE[where P="F \ G" for F G]) + apply (clarsimp simp: capRemovable_def cte_wp_at_ctes_of) + apply (rule conjI, clarsimp) + apply (case_tac b; case_tac "cteCap rv"; simp add: post_cap_delete_pre'_def) + apply (clarsimp simp: final_IRQHandler_no_copy) + apply (drule (1) ctes_of_valid'[OF _ invs_valid_objs']) + apply (clarsimp simp: valid_cap'_def) + apply (clarsimp dest!: isCapDs) + apply (rule conjI) + apply (clarsimp simp: capRemovable_def) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp) + apply (case_tac "cteCap rv", + simp_all add: isCap_simps removeable'_def + fun_eq_iff[where f="cte_refs' cap" for cap] + fun_eq_iff[where f=tcb_cte_cases] + tcb_cte_cases_def + word_neq_0_conv[symmetric])[1] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp) + apply (case_tac "cteCap rv", + simp_all add: isCap_simps removeable'_def + fun_eq_iff[where f="cte_refs' cap" for cap] + fun_eq_iff[where f=tcb_cte_cases] + tcb_cte_cases_def cteSizeBits_def)[1] + apply (frule Q[unfolded cteSizeBits_def, simplified]) + apply clarsimp + apply (simp add: mask_def) + apply (subst(asm) R) + apply (drule valid_capAligned [OF ctes_of_valid']) + apply fastforce + apply (simp add: capAligned_def word_bits_def) + apply (frule Q[unfolded cteSizeBits_def, simplified]) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of capRemovable_def) + apply (subgoal_tac "final_matters' (cteCap rv) \ \ isUntypedCap (cteCap rv)") + apply clarsimp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (case_tac "cteCap rv", + simp_all add: isCap_simps final_matters'_def)[1] + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp simp: removeable'_def) + apply (clarsimp simp: conj_comms) + apply (rule conjI, erule ctes_of_valid', clarsimp) + apply (rule conjI, clarsimp) + apply fastforce + done +qed + +lemma finaliseSlot_invs'': + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s)\ + finaliseSlot slot exposed + \\rv s. invs' s \ sch_act_simple s \ (fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) + \ (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\, + \\rv s. invs' s \ sch_act_simple s\" + unfolding finaliseSlot_def + apply (rule hoare_pre, rule hoare_strengthen_postE, rule use_spec) + apply (rule finaliseSlot_invs'[where P="\" and Pr="\" and p=slot]) + apply (simp_all add: no_cte_prop_top) + apply wp + apply (simp add: finalise_prop_stuff_def) + apply clarsimp + done + +lemma finaliseSlot_invs: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. invs'\" + apply (rule validE_valid, rule hoare_strengthen_postE) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_sch_act_simple: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. sch_act_simple\" + apply (rule validE_valid, rule hoare_strengthen_postE) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_removeable: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ + finaliseSlot slot e + \\rv s. fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s\,-" + apply (rule validE_validE_R, rule hoare_strengthen_postE) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_irqs: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ + finaliseSlot slot e + \\rv s. (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\,-" + apply (rule validE_validE_R, rule hoare_strengthen_postE) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_cte_wp_at: + "\ \cap. P cap \ isZombie cap; p \ slot \ \ + \\s. invs' s \ sch_act_simple s \ ex_cte_cap_to' slot s + \ cte_wp_at' (\cte. P (cteCap cte)) p s\ + finaliseSlot slot False + \\rv s. cte_wp_at' (\cte. P (cteCap cte) \ + (\zb n cp. cteCap cte = Zombie p zb n \ + P cp \ capZombiePtr cp \ p)) p s\,-" + unfolding finaliseSlot_def + apply (rule hoare_pre, unfold validE_R_def) + apply (rule hoare_strengthen_postE, rule use_spec) + apply (rule finaliseSlot_invs'[where P=P and Pr=\ and p=p]) + apply (simp_all add: no_cte_prop_top finalise_prop_stuff_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas reduceZombie_invs' + = reduceZombie_invs''[where Q=\, simplified no_cte_prop_top simp_thms + finalise_prop_stuff_def irq_state_independent_H_def, + OF drop_spec_validE TrueI, + OF hoare_weaken_preE, + OF finaliseSlot_invs'', + THEN use_specE'] + +lemma reduceZombie_invs: + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s\ + reduceZombie cap slot exposed + \\rv s. invs' s\" + apply (rule validE_valid) + apply (rule hoare_strengthen_postE, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) + apply clarsimp+ + done + +lemma reduceZombie_cap_to: + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s\ + reduceZombie cap slot exposed + \\rv s. \ exposed \ ex_cte_cap_to' slot s\, -" + apply (rule validE_validE_R, rule hoare_pre, + rule hoare_strengthen_postE) + apply (rule reduceZombie_invs'[where p=slot]) + apply clarsimp+ + done + +lemma reduceZombie_sch_act_simple: + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s\ + reduceZombie cap slot exposed + \\rv. sch_act_simple\" + apply (rule validE_valid, rule hoare_pre, + rule hoare_strengthen_postE) + apply (rule reduceZombie_invs'[where p=slot]) + apply clarsimp+ + done + +lemma cteDelete_invs': + "\invs' and sch_act_simple and K ex\ cteDelete ptr ex \\rv. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: cteDelete_def whenE_def split_def) + apply (rule hoare_pre, wp finaliseSlot_invs) + apply (rule hoare_strengthen_postE_R) + apply (unfold validE_R_def) + apply (rule use_spec) + apply (rule spec_valid_conj_liftE1) + apply (rule finaliseSlot_removeable) + apply (rule spec_valid_conj_liftE1) + apply (rule finaliseSlot_irqs) + apply (rule finaliseSlot_abort_cases'[folded finaliseSlot_def]) + apply simp + apply simp + done + +declare cases_simp_conj[simp] + +crunch typ_at'[wp]: capSwapForDelete "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +lemma cteDelete_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ cteDelete slot exposed \\_ s. P (typ_at' T p s)\" + by (wp cteDelete_preservation | simp | fastforce)+ + +lemmas cteDelete_typ_at'_lifts [wp] = typ_at_lifts [OF cteDelete_typ_at'] + +lemma cteDelete_cte_at: + "\\\ cteDelete slot bool \\rv. cte_at' slot\" + apply (rule_tac Q="\s. cte_at' slot s \ \ cte_at' slot s" + in hoare_pre(1)) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_disj_lift) + apply (rule typ_at_lifts, rule cteDelete_typ_at') + apply (simp add: cteDelete_def finaliseSlot_def split_def) + apply (rule validE_valid, rule bindE_wp_fwd) + apply (subst finaliseSlot'_simps_ext) + apply (rule bindE_wp_fwd) + apply simp + apply (rule getCTE_sp) + apply (rule hoare_pre, rule hoare_FalseE) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule hoare_FalseE) + apply auto + done + +lemma cteDelete_cte_wp_at_invs: + "\ \cap. P cap \ isZombie cap \ \ + \\s. invs' s \ sch_act_simple s \ ex_cte_cap_to' slot s \ + cte_wp_at' (\cte. P (cteCap cte)) p s\ + cteDelete slot False + \\rv. cte_at' slot and invs' and sch_act_simple + and cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte = NullCap \ + (\zb n cp. cteCap cte = capability.Zombie p zb n \ P cp + \ (capZombiePtr cp \ p \ p = slot))) p\, -" + apply (rule hoare_pre) + apply (wp cteDelete_cte_at) + prefer 2 + apply (erule_tac Q="invs' s \ R" for s R in conjI[rotated]) + apply simp + apply (simp only: cteDelete_def withoutPreemption_def fun_app_def split_def) + apply (cases "p = slot") + apply (cases "\cp. P cp") + apply (simp add: whenE_def) + apply wp + apply (rule hoare_strengthen_post [OF emptySlot_deletes]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply wp + apply (simp add: imp_conjR conj_comms) + apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s \ + (fst rv \ + cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) \ + (fst rv \ + (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))) \ + (\ fst rv \ + cte_wp_at' (\cte. P (cteCap cte) \ + cteCap cte = NullCap \ + (\zb n. cteCap cte = Zombie slot zb n)) + slot s)" + and E="\rv. \" in hoare_strengthen_postE) + apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple + hoare_drop_imps(2)[OF finaliseSlot_irqs]) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) + apply (clarsimp simp: cte_wp_at_ctes_of dest!: isCapDs) + apply simp + apply simp + apply simp + apply (simp add: cte_wp_at_ctes_of validE_R_def) + apply (simp add: whenE_def) + apply (wp emptySlot_cte_wp_cap_other) + apply (rule_tac Q'="\rv s. invs' s \ sch_act_simple s \ + (fst rv \ + cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) \ + (fst rv \ + (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))) \ + cte_wp_at' (\cte. P (cteCap cte) \ + cteCap cte = NullCap \ + (\zb n. cteCap cte = Zombie p zb n) \ + (\cp. P cp \ capZombiePtr cp \ p)) + p s" + in hoare_strengthen_postE_R) + apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple + hoare_drop_imps(2)[OF finaliseSlot_irqs]) + apply (rule hoare_strengthen_postE_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) + apply simp+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + apply simp + done + + +lemma cteDelete_sch_act_simple: + "\invs' and sch_act_simple and (\s. \ exposed \ ex_cte_cap_to' slot s)\ + cteDelete slot exposed \\rv. sch_act_simple\" + apply (simp add: cteDelete_def whenE_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (rule_tac hoare_strengthen_postE [where Q="\rv. sch_act_simple" + and E="\rv. sch_act_simple"]) + apply (rule valid_validE) + apply (wp finaliseSlot_sch_act_simple) + apply simp+ + done + +crunch st_tcb_at'[wp]: "Arch.finaliseCap", unbindMaybeNotification, prepareThreadDelete "st_tcb_at' P t" + (simp: crunch_simps pteAtIndex_def + wp: crunch_wps getObject_inv loadObject_default_inv) +end + + +lemma finaliseCap2_st_tcb_at': + assumes x[simp]: "\st. simple' st \ P st" + shows "\st_tcb_at' P t\ + finaliseCap cap final flag + \\rv. st_tcb_at' P t\" + apply (simp add: finaliseCap_def Let_def + getThreadCSpaceRoot deletingIRQHandler_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply ((wp cancelAllIPC_st_tcb_at cancelAllSignals_st_tcb_at + prepareThreadDelete_st_tcb_at' + suspend_st_tcb_at' cteDeleteOne_st_tcb_at getCTE_wp' + | simp add: isCap_simps getSlotCap_def getIRQSlot_def + locateSlot_conv getInterruptState_def + split del: if_split + | wpc))+ + done + +crunch st_tcb_at'[wp]: capSwapForDelete "st_tcb_at' P t" + +lemma cteDelete_st_tcb_at': + assumes x[simp]: "\st. simple' st \ P st" + shows "\st_tcb_at' P t\ + cteDelete slot ex + \\rv. st_tcb_at' P t\" + apply (rule cteDelete_preservation) + apply (rule finaliseCap2_st_tcb_at' [OF x]) + apply assumption + apply wp+ + apply auto + done + +definition + capToRPO :: "capability \ machine_word option \ nat" +where + "capToRPO cap \ case cap of + NullCap \ (None, 0) + | Zombie p zt n \ (Some p, 2) + | _ \ (None, 3)" + +lemma emptySlot_rvk_prog: + "\\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\ + emptySlot sl opt + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp getCTE_cteCap_wp opt_return_pres_lift) + apply (clarsimp simp: o_def split: option.split) + apply (erule rpo_trans) + apply (rule rpo_delta[where S="{sl}"], simp_all) + apply (simp add: modify_map_def) + apply (simp add: Int_insert_left dom_def modify_map_def) + apply (clarsimp simp: capToRPO_def split: capability.split) + done + +lemma rvk_prog_modify_map: + "\ \x. Some x = m p \ + capToRPO (f x) = capToRPO x + \ rpo_measure p (Some (capToRPO (f x))) + < rpo_measure p (Some (capToRPO x)) \ + \ revoke_progress_ord (option_map capToRPO \ m) (option_map capToRPO \ (modify_map m p f))" + apply (cases "m p") + apply (simp add: modify_map_def fun_upd_idem) + apply (simp add: revoke_progress_ord_def) + apply simp + apply (erule disjE) + apply (simp add: modify_map_def fun_upd_idem) + apply (simp add: revoke_progress_ord_def) + apply (rule rpo_delta[where S="{p}"], + simp_all add: modify_map_def dom_def) + done + +lemma capSwap_rvk_prog: + "\\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s) + \ cte_wp_at' (\cte. \n. (capToRPO (cteCap cte)) = (Some p1, Suc n)) p2 s + \ cte_wp_at' (\cte. fst (capToRPO (cteCap cte)) \ Some p1) p1 s\ + capSwapForDelete p1 p2 + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\" + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (cases "p1 = p2") + apply simp + apply (erule rpo_trans) + apply (rule rpo_delta[where S="{p1, p2}"], simp_all) + apply (simp add: Int_insert_left dom_def) + apply (case_tac "capToRPO (cteCap ctea)") + apply simp + apply arith + done + +lemmas setObject_ASID_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF setObject_ASID_ctes_of'] +lemmas cancelAllIPC_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF cancelAllIPC_ctes_of] +lemmas cancelAllSignals_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF cancelAllSignals_ctes_of] +lemmas setEndpoint_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ep_ctes_of] +lemmas setNotification_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ntfn_ctes_of] + +lemmas emptySlot_rvk_prog' = emptySlot_rvk_prog[unfolded o_def] +lemmas threadSet_ctesCaps_of = cteCaps_of_ctes_of_lift[OF threadSet_ctes_of] + +lemmas storePTE_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF storePTE_ctes] + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma vcpuSwitch_rvk_prog': + "vcpuSwitch v \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma vcpuFinalise_rvk_prog': + "vcpuFinalise v \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma dissociateVCPUTCB_rvk_prog': + "dissociateVCPUTCB v t \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma vcpuUpdate_rvk_prog': + "vcpuUpdate p f \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma loadVMID_rvk_prog': + "loadVMID p \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma archThreadSet_rvk_prog': + "archThreadSet f p \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +crunch rvk_prog': finaliseCap + "\s. revoke_progress_ord m (\x. option_map capToRPO (cteCaps_of s x))" + (wp: crunch_wps emptySlot_rvk_prog' threadSet_ctesCaps_of + getObject_inv loadObject_default_inv + simp: crunch_simps unless_def o_def pteAtIndex_def setBoundNotification_def + ignore: setCTE threadSet) + +lemmas finalise_induct3 = finaliseSlot'.induct[where P= + "\sl exp s. P sl (finaliseSlot' sl exp) s" for P] + +lemma finaliseSlot_rvk_prog: + "s \ \\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\ + finaliseSlot' slot e + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\,\\\\" +proof (induct rule: finalise_induct3) + case (1 sl ex st) + show ?case + apply (subst finaliseSlot'.simps) + apply (unfold split_def) + apply (rule hoare_pre_spec_validE) + apply wp + apply ((wp | simp)+)[1] + apply (wp "1.hyps") + apply (unfold Let_def split_def fst_conv + snd_conv haskell_fail_def + case_Zombie_assert_fold) + apply (wp capSwap_rvk_prog | simp only: withoutPreemption_def)+ + apply (wp preemptionPoint_inv)[1] + apply force + apply force + apply (wp capSwap_rvk_prog | simp only: withoutPreemption_def)+ + apply (wp getCTE_wp | simp)+ + apply (rule hoare_strengthen_post [OF emptySlot_rvk_prog[where m=m]]) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def o_def + dest!: isCapDs) + apply (erule rpo_trans) + apply (rule rvk_prog_modify_map[unfolded o_def]) + apply (clarsimp simp: capToRPO_def) + apply (rule spec_strengthen_postE, + rule "1.hyps", (assumption | rule refl)+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule rpo_trans) + apply (rule rvk_prog_modify_map[unfolded o_def]) + apply (clarsimp simp: cteCaps_of_def capToRPO_def dest!: isCapDs) + apply ((wp | simp add: locateSlot_conv)+)[2] + apply (rule drop_spec_validE) + apply simp + apply (rule_tac Q="\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s) + \ cte_wp_at' (\cte. cteCap cte = fst rvb) sl s" + in hoare_post_imp) + apply (clarsimp simp: o_def cte_wp_at_ctes_of capToRPO_def + dest!: isCapDs) + apply (simp split: capability.split_asm) + apply (wp updateCap_cte_wp_at_cases | simp)+ + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fc s. cte_wp_at' (\cte. cteCap cte = cteCap rv) sl s + \ revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)" + in hoare_vcg_conj_lift) + apply (wp finaliseCap_rvk_prog'[folded o_def])[1] + apply (rule finaliseCap_cases) + apply (clarsimp simp: o_def cte_wp_at_ctes_of cteCaps_of_def) + apply (strengthen imp_consequent, simp) + apply (erule rpo_trans) + apply (rule rvk_prog_modify_map[unfolded o_def]) + apply (erule disjE, simp add: capRemovable_def) + apply (auto dest!: isCapDs simp: capToRPO_def split: if_split if_split_asm)[1] + apply (wp isFinalCapability_inv getCTE_wp | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of o_def) + done +qed + +lemma cteDelete_rvk_prog: + "\\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\ + cteDelete slot e + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\,-" + including no_pre + apply (simp add: cteDelete_def whenE_def split_def) + apply (wp emptySlot_rvk_prog) + apply (simp only: cases_simp) + apply (simp add: finaliseSlot_def) + apply (rule use_spec, rule finaliseSlot_rvk_prog) + done + +text \Proving correspondence between the delete functions.\ + +definition + "spec_corres s r P P' f f' \ corres r (P and ((=) s)) P' f f'" + +lemma use_spec_corres': + assumes x: "\s. Q s \ spec_corres s r P P' f f'" + shows "corres r (P and Q) P' f f'" + apply (clarsimp simp: corres_underlying_def) + apply (frule x) + apply (clarsimp simp: spec_corres_def corres_underlying_def) + apply (erule(1) my_BallE, simp)+ + done + +lemmas use_spec_corres = use_spec_corres'[where Q="\", simplified] + +lemma drop_spec_corres: + "corres r P P' f f' \ spec_corres s r P P' f f'" + unfolding spec_corres_def + apply (erule corres_guard_imp) + apply simp + apply assumption + done + +lemma spec_corres_split: + assumes x: "spec_corres s r' P P' f f'" + assumes y: "\rv rv' s'. \ (rv, s') \ fst (f s); r' rv rv' \ \ + spec_corres s' r (R rv) (R' rv') (g rv) (g' rv')" + assumes z: "\Q\ f \R\" "\Q'\ f' \R'\" + shows "spec_corres s r (P and Q) (P' and Q') (f >>= g) (f' >>= g')" +proof - + have w: "\rv rv'. r' rv rv' \ corres r (R rv and (\s'. (rv, s') \ fst (f s))) (R' rv') (g rv) (g' rv')" + apply (rule use_spec_corres') + apply (erule(1) y) + done + show ?thesis + unfolding spec_corres_def + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule x[unfolded spec_corres_def]) + apply (erule w) + apply (wp z) + apply (rule univ_wp) + apply (rule z) + apply simp + apply assumption + done +qed + +lemma spec_corres_splitE: + assumes x: "spec_corres s (e \ r') P P' f f'" + assumes y: "\rv rv' s'. \ (Inr rv, s') \ fst (f s); r' rv rv' \ \ + spec_corres s' (e \ r) (R rv) (R' rv') (g rv) (g' rv')" + assumes z: "\Q\ f \R\,-" "\Q'\ f' \R'\,-" + shows "spec_corres s (e \ r) (P and Q) (P' and Q') (f >>=E g) (f' >>=E g')" +proof - + note w = z[unfolded validE_R_def validE_def] + show ?thesis + unfolding bindE_def + apply (rule spec_corres_split [OF x _ w(1) w(2)]) + apply (case_tac rv) + apply (clarsimp simp: lift_def spec_corres_def) + apply (clarsimp simp: lift_def) + apply (erule(1) y) + done +qed + +lemmas spec_corres_split' = spec_corres_split [OF drop_spec_corres] +lemmas spec_corres_splitE' = spec_corres_splitE [OF drop_spec_corres] + +lemma spec_corres_guard_imp: + assumes x: "spec_corres s r Q Q' f f'" + assumes y: "P s \ Q s" "\s'. P' s' \ Q' s'" + shows "spec_corres s r P P' f f'" + unfolding spec_corres_def + apply (rule corres_guard_imp) + apply (rule x[unfolded spec_corres_def]) + apply (clarsimp elim!: y) + apply (erule y) + done + +lemma spec_corres_returns[simp]: + "spec_corres s r P P' (return x) (return y) = (\s'. (P s \ P' s' \ (s, s') \ state_relation) \ r x y)" + "spec_corres s r' P P' (returnOk x) (returnOk y) = (\s'. (P s \ P' s' \ (s, s') \ state_relation) \ r' (Inr x) (Inr y))" + by (simp add: spec_corres_def returnOk_def)+ + +lemma cte_map_replicate: + "cte_map (ptr, replicate bits False) = ptr" + by (simp add: cte_map_def) + +lemma spec_corres_locate_Zombie: + "\ P s \ valid_cap (cap.Zombie ptr bits (Suc n)) s; + spec_corres s r P P' f (f' (cte_map (ptr, nat_to_cref (zombie_cte_bits bits) n))) \ + \ spec_corres s r P P' f (locateSlotCap (Zombie ptr (zbits_map bits) (Suc n)) (of_nat n) >>= f')" + unfolding spec_corres_def + apply (simp add: locateSlot_conv cte_level_bits_def stateAssert_def bind_assoc) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_assume_pre, clarsimp) + apply (frule cte_at_nat_to_cref_zbits, rule lessI) + apply (subst(asm) cte_map_nat_to_cref) + apply (drule valid_Zombie_n_less_cte_bits) + apply simp + apply (clarsimp simp: valid_cap_def cap_aligned_def word_bits_def + split: option.split_asm) + apply (simp add: mult.commute cte_level_bits_def) + apply (clarsimp simp: isCap_simps valid_cap_def) + apply (simp only: assert_def, subst if_P) + apply (cases bits, simp_all add: zbits_map_def) + apply (clarsimp simp: cap_table_at_gsCNodes isCap_simps + zbits_map_def) + apply (rule word_of_nat_less) + apply (simp add: cap_aligned_def) + apply (erule corres_guard_imp, simp_all) + apply wp+ + done + +lemma spec_corres_req: + "\ \s'. \ P s; P' s'; (s, s') \ state_relation \ \ F; + F \ spec_corres s r P P' f f' \ + \ spec_corres s r P P' f f'" + unfolding spec_corres_def + apply (rule corres_assume_pre, erule meta_mp) + apply simp + done + +lemma zombie_alignment_oddity: + "\ cte_wp_at (\c. c = cap.Zombie (cte_map slot) zb n) slot s; + invs s \ \ (cte_map slot, replicate (zombie_cte_bits zb) False) = slot" + apply (frule cte_wp_at_valid_objs_valid_cap, clarsimp+) + apply (rule cte_map_inj_eq) + apply (simp only: cte_map_replicate) + apply (erule cte_at_replicate_zbits) + apply (erule cte_wp_at_weakenE, simp) + apply clarsimp+ + done + +primrec + rec_del_concrete :: "rec_del_call \ (bool \ capability) kernel_p set" +where + "rec_del_concrete (CTEDeleteCall ptr ex) + = {liftME (\x. (True, NullCap)) (cteDelete (cte_map ptr) ex)}" +| "rec_del_concrete (FinaliseSlotCall ptr ex) + = {finaliseSlot (cte_map ptr) ex}" +| "rec_del_concrete (ReduceZombieCall cap slot ex) + = (if red_zombie_will_fail cap then {} else + (\cap. liftME (\x. (True, NullCap)) (reduceZombie cap (cte_map slot) ex)) ` {cap'. cap_relation cap cap'})" + +lemma rec_del_concrete_empty: + "red_zombie_will_fail cap \ rec_del_concrete (ReduceZombieCall cap slot ex) = {}" + by simp + +lemmas rec_del_concrete_unfold + = rec_del_concrete.simps red_zombie_will_fail.simps + if_True if_False ball_simps simp_thms + +lemma cap_relation_removables: + "\ cap_relation cap cap'; isNullCap cap' \ isZombie cap'; + s \ cap; cte_at slot s; invs s \ + \ cap_removeable cap slot = capRemovable cap' (cte_map slot) + \ cap_cyclic_zombie cap slot = capCyclicZombie cap' (cte_map slot)" + apply (clarsimp simp: capRemovable_def isCap_simps + capCyclicZombie_def cap_cyclic_zombie_def + split: cap_relation_split_asm arch_cap.split_asm) + apply (rule iffD1 [OF conj_commute], rule context_conjI) + apply (rule iffI) + apply (clarsimp simp: cte_map_replicate) + apply clarsimp + apply (frule(1) cte_map_inj_eq [rotated, OF _ cte_at_replicate_zbits]) + apply clarsimp+ + apply (simp add: cte_map_replicate) + apply simp + apply simp + done + +lemma spec_corres_add_asm: + "spec_corres s r P Q f g \ spec_corres s r (P and F) Q f g" + unfolding spec_corres_def + apply (erule corres_guard_imp) + apply simp+ + done + +lemma spec_corres_gen_asm2: + "(F \ spec_corres s r Q P' f g) \ spec_corres s r Q (P' and (\s. F)) f g" + unfolding spec_corres_def + by (auto intro: corres_gen_asm2) + +crunch typ_at'[wp]: reduceZombie "\s. P (typ_at' T p s)" + (simp: crunch_simps wp: crunch_wps) + +lemmas reduceZombie_typ_ats[wp] = typ_at_lifts [OF reduceZombie_typ_at'] + +lemma spec_corres_if: + "\ G = G'; G \ spec_corres s r P P' a c; \ G \ spec_corres s r Q Q' b d\ + \ spec_corres s r (\x. (G \ P x) \ (\ G \ Q x)) (\x. (G' \ P' x) \ (\ G' \ Q' x)) + (if G then a else b) (if G' then c else d)" + by simp + +lemma spec_corres_liftME2: + "spec_corres s (f \ r) P P' m (liftME fn m') + = spec_corres s (f \ (\x. r x \ fn)) P P' m m'" + by (simp add: spec_corres_def) + + +lemma rec_del_ReduceZombie_emptyable: + "\invs + and (cte_wp_at ((=) cap) slot and is_final_cap' cap + and (\y. is_zombie cap)) and + (\s. \ ex \ ex_cte_cap_wp_to (\cp. cap_irqs cp = {}) slot s) and + emptyable slot and + (\s. \ cap_removeable cap slot \ (\t\obj_refs cap. halted_if_tcb t s))\ + rec_del (ReduceZombieCall cap slot ex) \\rv. emptyable slot\, -" + by (rule rec_del_emptyable [where args="ReduceZombieCall cap slot ex", simplified]) + +crunch sch_act_simple[wp]: cteDelete sch_act_simple + +lemmas preemption_point_valid_list = preemption_point_inv'[where P="valid_list", simplified] + +lemma finaliseSlot_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ finaliseSlot ptr exposed \\_ s. P (typ_at' T p s)\" + by (rule finaliseSlot_preservation, (wp | simp)+) + +lemmas finaliseSlot_typ_ats[wp] = typ_at_lifts[OF finaliseSlot_typ_at'] + +lemmas rec_del_valid_list_irq_state_independent[wp] = + rec_del_preservation[OF cap_swap_for_delete_valid_list set_cap_valid_list empty_slot_valid_list finalise_cap_valid_list preemption_point_valid_list] + +lemma rec_del_corres: + "\C \ rec_del_concrete args. + spec_corres s (dc \ (case args of + FinaliseSlotCall _ _ \ (\r r'. fst r = fst r' + \ cap_relation (snd r) (snd r') ) + | _ \ dc)) + (einvs and simple_sched_action + and valid_rec_del_call args + and cte_at (slot_rdcall args) + and emptyable (slot_rdcall args) + and (\s. \ exposed_rdcall args \ ex_cte_cap_wp_to (\cp. cap_irqs cp = {}) (slot_rdcall args) s) + and (\s. case args of ReduceZombieCall cap sl ex \ + \t\obj_refs cap. halted_if_tcb t s + | _ \ True)) + (invs' and sch_act_simple and cte_at' (cte_map (slot_rdcall args)) and + (\s. \ exposed_rdcall args \ ex_cte_cap_to' (cte_map (slot_rdcall args)) s) + and (\s. case args of ReduceZombieCall cap sl ex \ + \cp'. cap_relation cap cp' + \ ((cte_wp_at' (\cte. cteCap cte = cp') (cte_map sl)) + and (\s. \ capRemovable cp' (cte_map sl) + \ (\ ex \ \ capCyclicZombie cp' (cte_map sl)))) s + | _ \ True)) + (rec_del args) C" +proof (induct rule: rec_del.induct, + simp_all only: rec_del_fails rec_del_concrete_empty + red_zombie_will_fail.simps ball_simps(5)) + case (1 slot exposed) + show ?case + apply (clarsimp simp: cteDelete_def liftME_def bindE_assoc + split_def) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE) + apply (rule "1.hyps"[simplified rec_del_concrete_unfold dc_def]) + apply (rule drop_spec_corres) + apply (simp(no_asm) add: dc_def[symmetric] liftME_def[symmetric] + whenE_liftE) + apply (rule corres_when, simp) + apply simp + apply (rule emptySlot_corres) + apply (wp rec_del_invs rec_del_valid_list rec_del_cte_at finaliseSlot_invs hoare_drop_imps + preemption_point_inv' + | simp)+ + done +next + case (2 slot exposed) + have prove_imp: + "\P Q. \ P \ Q \ \ (P \ Q) = True" + by simp + show ?case + apply (simp only: rec_del_concrete_unfold finaliseSlot_def) + apply (subst rec_del_simps_ext) + apply (subst finaliseSlot'_simps_ext) + apply (fold reduceZombie_def[unfolded cteDelete_def finaliseSlot_def]) + apply (unfold fun_app_def unlessE_whenE K_bind_def split_def) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE') + apply simp + apply (rule get_cap_corres) + apply (rule spec_corres_if) + apply auto[1] + apply (rule drop_spec_corres, rule corres_trivial, + simp add: returnOk_def) + apply (rule spec_corres_splitE') + apply simp + apply (rule isFinalCapability_corres[where ptr=slot]) + apply (rule spec_corres_splitE') + apply simp + apply (rule finaliseCap_corres[where sl=slot]) + apply simp + apply simp + apply simp + + apply (rule_tac F="isZombie (fst rv'b) \ isNullCap (fst rv'b)" + in spec_corres_gen_asm2) + apply (rule spec_corres_req[rotated]) + apply (rule_tac F="\s. invs s \ cte_at slot s \ s \ fst rvb" + in spec_corres_add_asm) + apply (rule spec_corres_if) + apply (erule conjunct1) + apply (rule drop_spec_corres, rule corres_trivial, + simp add: returnOk_def) + apply (rule spec_corres_if) + apply (erule conjunct2) + apply (rule drop_spec_corres, + simp add: liftME_def[symmetric] o_def dc_def[symmetric]) + apply (rule updateCap_corres) + apply simp + apply (simp(no_asm_use) add: cap_cyclic_zombie_def split: cap.split_asm) + apply (simp add: is_cap_simps) + apply (rule spec_corres_splitE') + apply simp + apply (rule updateCap_corres, erule conjunct1) + apply (case_tac "fst rvb", auto simp: isCap_simps is_cap_simps)[1] + apply (rule spec_corres_splitE) + apply (rule iffD1 [OF spec_corres_liftME2[where fn="\v. (True, NullCap)"]]) + apply (rule bspec [OF "2.hyps"(1), unfolded fun_app_def], assumption+) + apply (case_tac "fst rvb", simp_all add: isCap_simps is_cap_simps)[1] + apply (rename_tac nat) + apply (case_tac nat, simp_all)[1] + apply clarsimp + apply (rule spec_corres_splitE'[OF preemptionPoint_corres]) + apply (rule "2.hyps"(2)[unfolded fun_app_def rec_del_concrete_unfold + finaliseSlot_def], + assumption+) + apply (wp preemption_point_inv')[1] + apply clarsimp+ + apply (wp preemptionPoint_invR) + apply simp + apply clarsimp + apply simp + apply (wp rec_del_invs rec_del_cte_at reduce_zombie_cap_somewhere + rec_del_ReduceZombie_emptyable + reduceZombie_invs reduce_zombie_cap_to | simp)+ + apply (wp reduceZombie_cap_to reduceZombie_sch_act_simple) + apply simp + apply (wp replace_cap_invs final_cap_same_objrefs + set_cap_cte_wp_at set_cap_cte_cap_wp_to + hoare_vcg_const_Ball_lift hoare_weak_lift_imp + | simp add: conj_comms + | erule finalise_cap_not_reply_master [simplified])+ + apply (elim conjE, strengthen exI[mk_strg I], + strengthen asm_rl[where psi="(cap_relation cap cap')" for cap cap', mk_strg I E]) + apply (wp make_zombie_invs' updateCap_cap_to' + updateCap_cte_wp_at_cases + hoare_vcg_ex_lift hoare_weak_lift_imp) + apply clarsimp + apply (drule_tac cap=a in cap_relation_removables, + clarsimp, assumption+) + apply (clarsimp simp: conj_comms) + apply (wp | simp)+ + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fin s. einvs s \ simple_sched_action s + \ replaceable s slot (fst fin) rv + \ cte_wp_at ((=) rv) slot s \ s \ fst fin + \ emptyable slot s + \ (\t\obj_refs (fst fin). halted_if_tcb t s)" + in hoare_vcg_conj_lift) + apply (wp finalise_cap_invs finalise_cap_replaceable + finalise_cap_makes_halted + hoare_vcg_disj_lift hoare_vcg_ex_lift)[1] + apply (rule finalise_cap_cases[where slot=slot]) + apply clarsimp + apply (frule if_unsafe_then_capD, clarsimp, clarsimp) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule disjE[where P="c = cap.NullCap \ P" for c P]) + apply clarsimp + apply (clarsimp simp: conj_comms dest!: is_cap_simps [THEN iffD1]) + apply (frule trans [OF _ appropriate_Zombie, OF sym]) + apply (case_tac rv, simp_all add: fst_cte_ptrs_def is_cap_simps + is_final_cap'_def)[1] + apply (wp | simp)+ + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fin s. invs' s \ sch_act_simple s \ s \' fst fin + \ (exposed \ ex_cte_cap_to' (cte_map slot) s) + \ cte_wp_at' (\cte. cteCap cte = cteCap rv') (cte_map slot) s" + in hoare_vcg_conj_lift) + apply (wp hoare_vcg_disj_lift finaliseCap_invs[where sl="cte_map slot"])[1] + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_replaceable[where slot="cte_map slot"]) + apply (rule finaliseCap_cte_refs) + apply clarsimp + apply (erule disjE[where P="F \ G" for F G]) + apply (clarsimp simp: capRemovable_def cte_wp_at_ctes_of) + apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) + apply (case_tac "cteCap rv'", + auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] + apply (wp isFinalCapability_inv hoare_weak_lift_imp + | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ + apply (rule isFinal[where x="cte_map slot"]) + apply (wp get_cap_wp| simp add: conj_comms)+ + apply (wp getCTE_wp') + apply clarsimp + apply (frule cte_wp_at_valid_objs_valid_cap[where P="(=) cap" for cap]) + apply fastforce + apply (fastforce simp: cte_wp_at_caps_of_state) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarsimp) + apply ((clarsimp | rule conjI)+)[1] + done + +next + case (3 ptr bits n slot) + show ?case + apply simp + apply (rule drop_spec_corres) + apply (simp add: reduceZombie_def case_Zombie_assert_fold) + apply (rule stronger_corres_guard_imp[rotated]) + apply assumption + apply (rule conjI) + apply clarsimp + apply (drule cte_wp_valid_cap, clarsimp) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (drule cte_at_replicate_zbits) + apply (drule cte_at_get_cap_wp, clarsimp) + apply (rule cte_wp_at_weakenE') + apply (erule(1) pspace_relation_cte_wp_at[OF state_relation_pspace_relation]) + apply clarsimp+ + apply (rule TrueI) + apply assumption + apply (rule_tac F="(ptr, replicate (zombie_cte_bits bits) False) \ slot" in corres_req) + apply (clarsimp simp: capCyclicZombie_def cte_map_replicate) + apply (rule_tac F="ptr \ cte_map slot" in corres_req) + apply (elim conjE exE) + apply (frule cte_wp_valid_cap, clarsimp) + apply (drule cte_map_inj) + apply (erule cte_at_replicate_zbits) + apply (erule cte_wp_at_weakenE, simp) + apply clarsimp+ + apply (simp add: cte_map_replicate) + apply (simp add: liftM_def liftME_def[symmetric]) + apply (simp add: liftE_bindE) + apply (rule corres_symb_exec_r [OF _ getCTE_sp]) + apply (rule_tac F="isZombie (cteCap x) \ capZombiePtr (cteCap x) \ ptr" + in corres_req) + apply (clarsimp simp: state_relation_def dest!: isCapDs) + apply (drule pspace_relation_cte_wp_atI') + apply (subst(asm) eq_commute, assumption) + apply clarsimp + apply clarsimp + apply (case_tac c, simp_all)[1] + apply (clarsimp simp: cte_wp_at_def) + apply (drule(1) zombies_finalD2, clarsimp+) + apply (fold dc_def) + apply (rule corres_guard_imp, rule capSwapForDelete_corres) + apply (simp add: cte_map_replicate) + apply simp + apply clarsimp + apply (rule conjI, clarsimp)+ + apply (rule conjI, rule cte_at_replicate_zbits, erule cte_wp_valid_cap) + apply clarsimp + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule tcb_valid_nonspecial_cap, fastforce) + apply (clarsimp simp: ran_tcb_cap_cases is_cap_simps is_nondevice_page_cap_simps + split: Structures_A.thread_state.split) + apply (simp add: ran_tcb_cap_cases is_cap_simps is_nondevice_page_cap_simps) + apply fastforce + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_map_replicate) + done +next + note if_cong [cong] option.case_cong [cong] + case (4 ptr bits n slot) + let ?target = "(ptr, nat_to_cref (zombie_cte_bits bits) n)" + note hyps = "4.hyps"[simplified rec_del_concrete_unfold spec_corres_liftME2] + show ?case + apply (simp only: rec_del_concrete_unfold cap_relation.simps) + apply (simp add: reduceZombie_def Let_def + liftE_bindE + del: inf_apply) + apply (subst rec_del_simps_ext) + apply (rule_tac F="ptr + 2 ^ cte_level_bits * of_nat n + = cte_map ?target" + in spec_corres_req) + apply clarsimp + apply (drule cte_wp_valid_cap, clarsimp) + apply (subst cte_map_nat_to_cref) + apply (drule valid_Zombie_n_less_cte_bits, simp) + apply (clarsimp simp: valid_cap_def cap_aligned_def word_bits_def + split: option.split_asm) + apply (simp add: cte_level_bits_def) + apply (simp add: spec_corres_liftME2 pred_conj_assoc) + apply (rule spec_corres_locate_Zombie) + apply (auto dest: cte_wp_valid_cap)[1] + apply (rule_tac F="n < 2 ^ (word_bits - cte_level_bits)" in spec_corres_req) + apply clarsimp + apply (drule cte_wp_valid_cap, clarsimp) + apply (frule valid_Zombie_n_less_cte_bits) + apply (drule Suc_le_lessD) + apply (erule order_less_le_trans) + apply (rule power_increasing) + apply (clarsimp simp: valid_cap_def cap_aligned_def + split: option.split_asm) + apply (simp add: cte_level_bits_def word_bits_def) + apply simp + apply simp + apply (rule spec_corres_gen_asm2) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE) + apply (rule hyps) + apply (simp add: in_monad) + apply (rule drop_spec_corres) + apply (simp add: liftE_bindE del: rec_del.simps) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="cteCap ourCTE = Zombie ptr (zbits_map bits) (Suc n) + \ cteCap ourCTE = NullCap + \ (\zb n cp. cteCap ourCTE = Zombie (cte_map slot) zb n + \ cp = Zombie ptr (zbits_map bits) (Suc n) + \ capZombiePtr cp \ cte_map slot)" + in corres_gen_asm2) + apply (rule_tac P="invs and cte_wp_at (\c. c = new_cap) slot + and cte_wp_at (\c. c = cap.NullCap \ \ False \ is_zombie c + \ ?target \ fst_cte_ptrs c) ?target" + and P'="invs' and sch_act_simple + and cte_wp_at' (\c. c = ourCTE) (cte_map slot) + and cte_at' (cte_map ?target)" + in corres_inst) + apply (erule disjE) + apply (case_tac new_cap, simp_all split del: if_split)[1] + apply (simp add: liftME_def[symmetric]) + apply (rule stronger_corres_guard_imp) + apply (rule corres_symb_exec_r) + apply (rule_tac F="cteCap endCTE = capability.NullCap" + in corres_gen_asm2, simp) + apply (rule updateCap_corres) + apply simp + apply (simp add: is_cap_simps) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) + apply (wp, (wp getCTE_wp)+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule no_fail_pre, wp, simp) + apply clarsimp + apply (frule zombies_finalD, clarsimp) + apply (clarsimp simp: is_cap_simps) + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule cte_wp_valid_cap[unfolded cte_wp_at_eq_simp], clarsimp) + apply (drule cte_wp_at_norm[where p="?target"], clarsimp) + apply (erule disjE) + apply (drule(1) pspace_relation_cte_wp_at + [OF state_relation_pspace_relation], + clarsimp+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: is_cap_simps fst_cte_ptrs_def + cte_wp_at_ctes_of) + apply (frule cte_at_cref_len [rotated, OF cte_at_replicate_zbits]) + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply clarsimp + apply (drule(1) nat_to_cref_replicate_Zombie) + apply simp + apply (clarsimp simp: capRemovable_def cte_wp_at_def) + apply (drule(1) zombies_finalD2, clarsimp+) + apply (simp add: is_cap_simps) + apply (erule disjE) + apply (case_tac new_cap, simp_all split del: if_split)[1] + apply (simp add: assertE_def returnOk_def) + apply (elim exE conjE) + apply (case_tac new_cap, simp_all)[1] + apply (clarsimp simp add: is_zombie_def) + apply (simp add: assertE_def liftME_def[symmetric] + split del: if_split) + apply (rule corres_req[rotated], subst if_P, assumption) + apply (simp add: returnOk_def) + apply (clarsimp simp: zombie_alignment_oddity cte_map_replicate) + apply (wp get_cap_cte_wp_at getCTE_wp' rec_del_cte_at + rec_del_invs rec_del_delete_cases)+ + apply (rule hoare_strengthen_postE_R) + apply (rule_tac P="\cp. cp = Zombie ptr (zbits_map bits) (Suc n)" + in cteDelete_cte_wp_at_invs[where p="cte_map slot"]) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of | rule conjI)+ + apply (clarsimp simp: capRemovable_def shiftl_t2n[symmetric]) + apply (drule arg_cong[where f="\x. x >> cte_level_bits"], + subst(asm) shiftl_shiftr_id) + apply (clarsimp simp: cte_level_bits_def word_bits_def) + apply (rule order_less_le_trans) + apply (erule of_nat_mono_maybe [rotated]) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def cte_level_bits_def) + apply simp + apply (simp add: word_bits_def) + apply simp + apply (erule(1) notE [rotated, OF _ of_nat_neq_0]) + apply (erule order_less_le_trans) + apply (rule power_increasing) + apply (simp add: word_bits_def cte_level_bits_def) + apply simp + apply clarsimp + apply (frule cte_wp_valid_cap, clarsimp) + apply (rule conjI, erule cte_at_nat_to_cref_zbits) + apply simp + apply (simp add: halted_emptyable) + apply (erule(1) zombie_is_cap_toE) + apply simp + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarsimp+) + apply (frule valid_Zombie_cte_at'[where n=n]) + apply (clarsimp simp: valid_cap'_def) + apply (intro conjI) + apply (fastforce simp: cte_wp_at_ctes_of cte_level_bits_def objBits_defs + mult.commute mult.left_commute) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="cte_map slot" in exI) + apply (clarsimp simp: image_def) + apply (rule_tac x="of_nat n" in bexI) + apply (fastforce simp: cte_level_bits_def objBits_defs mult.commute mult.left_commute shiftl_t2n) + apply simp + apply (subst field_simps, rule plus_one_helper2) + apply simp + apply (frule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def cte_level_bits_def) + apply simp + apply clarsimp + apply (drule_tac f="\x. x - 1" and y=0 in arg_cong) + apply (clarsimp simp: word_bits_def cte_level_bits_def) + done +qed + +lemma cteDelete_corres: + "corres (dc \ dc) + (einvs and simple_sched_action and cte_at ptr and emptyable ptr) + (invs' and sch_act_simple and cte_at' (cte_map ptr)) + (cap_delete ptr) (cteDelete (cte_map ptr) True)" + unfolding cap_delete_def + using rec_del_corres[where args="CTEDeleteCall ptr True"] + apply (simp add: spec_corres_liftME2 liftME_def[symmetric]) + apply (erule use_spec_corres) + done + + +text \The revoke functions, and their properties, are + slightly easier to deal with than the delete + function. However, their termination arguments + are complex, requiring that the delete functions + reduce the number of non-null capabilities.\ + +definition + cteRevoke_recset :: "((machine_word \ kernel_state) \ (machine_word \ kernel_state)) set" +where + "cteRevoke_recset \ measure (\(sl, s). (\mp. \x \ dom mp. rpo_measure x (mp x)) + (option_map capToRPO \ cteCaps_of s))" + +lemma wf_cteRevoke_recset: + "wf cteRevoke_recset" + by (simp add: cteRevoke_recset_def) + +termination cteRevoke + apply (rule cteRevoke.termination) + apply (rule wf_cteRevoke_recset) + apply (clarsimp simp add: cteRevoke_recset_def in_monad + dest!: in_getCTE in_preempt') + apply (frule use_validE_R [OF _ cteDelete_rvk_prog]) + apply (rule rpo_sym) + apply (frule use_validE_R [OF _ cteDelete_deletes]) + apply simp + apply (simp add: revoke_progress_ord_def) + apply (erule disjE) + apply (drule_tac f="\f. f (mdbNext (cteMDBNode rv))" in arg_cong) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def capToRPO_def) + apply (simp split: capability.split_asm) + apply (case_tac rvb, clarsimp) + apply assumption + done + +lemma cteRevoke_preservation': + assumes x: "\ptr. \P\ cteDelete ptr True \\rv. P\" + assumes y: "\f s. P (ksWorkUnitsCompleted_update f s) = P s" + assumes irq: "irq_state_independent_H P" + shows "s \ \P\ cteRevoke ptr \\rv. P\,\\rv. P\" +proof (induct rule: cteRevoke.induct) + case (1 p s') + show ?case + apply (subst cteRevoke.simps) + apply (wp "1.hyps") + apply (wp x y preemptionPoint_inv hoare_drop_imps irq | clarsimp)+ + done +qed + +lemmas cteRevoke_preservation = + validE_valid [OF use_spec(2) [OF cteRevoke_preservation']] + +lemma cteRevoke_typ_at': + "\\s. P (typ_at' T p s)\ cteRevoke ptr \\rv s. P (typ_at' T p s)\" + by (wp cteRevoke_preservation | clarsimp)+ + +lemma cteRevoke_invs': + "\invs' and sch_act_simple\ cteRevoke ptr \\rv. invs'\" + apply (rule_tac Q="\rv. invs' and sch_act_simple" in hoare_strengthen_post) + apply (wp cteRevoke_preservation cteDelete_invs' cteDelete_sch_act_simple)+ + apply simp_all + done + +declare cteRevoke.simps[simp del] + +lemma spec_corres_symb_exec_l_Ex: + assumes x: "\rv. (rv, s) \ fst (f s) \ spec_corres s r (Q rv) P' (g rv) h" + shows "spec_corres s r (\s. \rv. Q rv s \ (rv, s) \ fst (f s)) P' + (do rv \ f; g rv od) h" +proof - + have y: "\rv. corres r (\s'. s' = s \ Q rv s \ (rv, s) \ fst (f s)) P' (g rv) h" + apply (rule corres_req) + defer + apply (rule corres_guard_imp, + erule x[unfolded spec_corres_def]) + apply simp+ + done + show ?thesis + unfolding spec_corres_def + apply (rule corres_guard_imp, + rule corres_symb_exec_l_Ex, + rule y) + apply simp+ + done +qed + +lemma spec_corres_symb_exec_l_Ex2: + assumes y: "P s \ \rv. (rv, s) \ fst (f s)" + assumes x: "\rv. (rv, s) \ fst (f s) \ + spec_corres s r (\s. \s'. (rv, s) \ fst (f s') \ P s') P' (g rv) h" + shows "spec_corres s r P P' (do rv \ f; g rv od) h" + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_symb_exec_l_Ex) + apply (erule x) + apply (frule y) + apply fastforce + apply assumption + done + +lemma spec_corres_symb_exec_r_All: + assumes nf: "\rv. no_fail (Q' rv) g" + assumes x: "\rv. spec_corres s r P (Q' rv) f (h rv)" + shows "spec_corres s r P (\s. (\p \ fst (g s). snd p = s \ Q' (fst p) s) \ (\rv. Q' rv s)) + f (do rv \ g; h rv od)" + unfolding spec_corres_def + apply (rule corres_guard_imp, + rule corres_symb_exec_r_All, + rule nf, + rule x[unfolded spec_corres_def]) + apply simp+ + done + +lemma spec_corres_symb_exec_r_Ex: + assumes y: "\s. P' s \ \p \ fst (g s). snd p = s" + assumes z: "\s. P' s \ \p \ fst (g s). snd p = s" + assumes nf: "no_fail P' g" + assumes x: "\rv. spec_corres s r P (\s. \s'. (rv, s) \ fst (g s') \ P' s') f (h rv)" + shows "spec_corres s r P P' f (do rv \ g; h rv od)" + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_symb_exec_r_All) + prefer 2 + apply (rule x) + apply (insert nf)[1] + apply (clarsimp simp: no_fail_def) + apply (frule y) + apply (drule(1) bspec) + apply fastforce + apply assumption + apply (frule y) + apply (rule conjI) + apply clarsimp + apply (drule(1) bspec) + apply fastforce + apply (frule z) + apply fastforce + done + +lemma in_getCTE_cte_wp_at': + "(rv, s') \ fst (getCTE p s) = (s = s' \ cte_wp_at' ((=) rv) p s)" + apply (rule iffI) + apply (clarsimp dest!: in_getCTE simp: cte_wp_at'_def) + apply (clarsimp simp: cte_wp_at'_def getCTE_def) + done + +lemma state_relation_cap_relation: + "\ (s, s') \ state_relation; cte_wp_at ((=) cap) p s; + cte_wp_at' ((=) cte) (cte_map p) s'; + valid_objs s; pspace_distinct' s'; pspace_aligned' s' \ + \ cap_relation cap (cteCap cte)" + apply (cases p, clarsimp simp: state_relation_def) + apply (drule(3) pspace_relation_cte_wp_at) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma descendants_of_empty_state_relation: + "\ (s, s') \ state_relation; cte_at p s \ \ + (descendants_of p (cdt s) = {}) = (descendants_of' (cte_map p) (ctes_of s') = {})" + apply (clarsimp simp only: state_relation_def cdt_relation_def swp_def) + apply (drule spec, drule(1) mp) + apply (fastforce) + done + +lemma subtree_first_step: + "\ ctes_of s p = Some cte; ctes_of s \ p \ p' \ + \ mdbNext (cteMDBNode cte) \ nullPointer \ + (\cte'. ctes_of s (mdbNext (cteMDBNode cte)) = Some cte' + \ isMDBParentOf cte cte')" + apply (erule subtree.induct) + apply (clarsimp simp: mdb_next_unfold nullPointer_def parentOf_def) + apply clarsimp + done + +lemma cap_revoke_mdb_stuff1: + "\ (s, s') \ state_relation; cte_wp_at ((=) cap) p s; + cte_wp_at' ((=) cte) (cte_map p) s'; invs s; invs' s'; + cap \ cap.NullCap; cteCap cte \ NullCap \ + \ (descendants_of p (cdt s) = {}) + = (\ (mdbNext (cteMDBNode cte) \ nullPointer + \ cte_wp_at' (isMDBParentOf cte) (mdbNext (cteMDBNode cte)) s'))" + apply (subst descendants_of_empty_state_relation) + apply assumption + apply (clarsimp elim!: cte_wp_at_weakenE) + apply (simp add: descendants_of'_def) + apply safe + apply (drule spec[where x="mdbNext (cteMDBNode cte)"]) + apply (erule notE, rule subtree.direct_parent) + apply (clarsimp simp: mdb_next_unfold cte_wp_at_ctes_of) + apply (simp add: nullPointer_def) + apply (clarsimp simp: parentOf_def cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) subtree_first_step) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) subtree_first_step) + apply clarsimp + done + +lemma select_bind_spec_corres': + "\P sa \ x \ S; spec_corres sa r P P' (f x) g\ +\ spec_corres sa r P P' (select S >>= f) g" + apply (clarsimp simp add: spec_corres_def + corres_underlying_def bind_def + select_def + | drule(1) bspec | erule rev_bexI | rule conjI)+ + done + +lemma cap_revoke_mdb_stuff4: + "\ (s, s') \ state_relation; cte_wp_at ((=) cap) p s; + cte_wp_at' ((=) cte) (cte_map p) s'; invs s; valid_list s; invs' s'; + cap \ cap.NullCap; cteCap cte \ NullCap; + descendants_of p (cdt s) \ {} \ + \ \p'. mdbNext (cteMDBNode cte) = cte_map p' + \ next_child p (cdt_list s) = Some p'" + apply(subgoal_tac "descendants_of p (cdt s) \ {}") + prefer 2 + apply simp + apply (subst(asm) cap_revoke_mdb_stuff1) + apply assumption+ + apply (clarsimp simp: cte_wp_at_ctes_of state_relation_def) + apply (drule(1) pspace_relation_cte_wp_atI[where x="mdbNext c" for c]) + apply clarsimp + apply clarsimp + apply (intro exI, rule conjI [OF refl]) + apply(simp add: cdt_list_relation_def) + apply(erule_tac x="fst p" in allE, erule_tac x="snd p" in allE) + apply(case_tac "cte", simp) + apply(case_tac "next_slot p (cdt_list s) (cdt s)") + apply(simp add: next_slot_def empty_list_empty_desc next_child_None_empty_desc) + apply(frule cte_at_next_slot') + apply(erule invs_mdb) + apply(simp add: invs_def valid_state_def finite_depth) + apply(assumption) + apply(simp add: next_slot_def empty_list_empty_desc) + apply(frule invs_valid_pspace, simp add: valid_pspace_def) + apply(rule cte_map_inj_eq) + apply(simp add: cte_wp_at_def)+ + done + +lemma cteRevoke_corres': + "spec_corres s (dc \ dc) + (einvs and simple_sched_action and cte_at ptr) + (invs' and sch_act_simple and cte_at' (cte_map ptr)) + (cap_revoke ptr) (\s. cteRevoke (cte_map ptr) s)" +proof (induct rule: cap_revoke.induct) + case (1 slot s') + show ?case + apply (subst cap_revoke.simps) + apply (subst cteRevoke.simps[abs_def]) + apply (simp add: liftE_bindE next_revoke_cap_def select_ext_def bind_assoc) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (clarsimp simp: cte_wp_at_def) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (simp add: in_monad) + apply (rule spec_corres_symb_exec_r_Ex) + apply (clarsimp elim!: use_valid [OF _ getCTE_inv]) + apply (clarsimp simp: cte_at'_def getCTE_def) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (simp add: in_monad in_get_cap_cte_wp_at + in_getCTE_cte_wp_at') + apply (rule_tac F="cap_relation cap (cteCap cte)" + in spec_corres_req) + apply (clarsimp | erule(2) state_relation_cap_relation)+ + apply (case_tac "cap = cap.NullCap") + apply (simp add: whenE_def) + apply (case_tac "cteCap cte = NullCap") + apply (simp add: whenE_def) + apply (case_tac "descendants_of slot (cdt s') = {}") + apply (case_tac "mdbNext (cteMDBNode cte) = nullPointer") + apply (simp add: whenE_def) + apply (simp add: whenE_def[where P=True]) + apply (rule spec_corres_symb_exec_r_Ex) + apply (clarsimp elim!: use_valid [OF _ getCTE_inv]) + apply clarsimp + apply (subgoal_tac "cte_at' (mdbNext (cteMDBNode cte)) s") + apply (clarsimp simp: getCTE_def cte_at'_def) + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (rule_tac F="\ isMDBParentOf cte nextCTE" + in spec_corres_req) + apply (clarsimp simp: in_getCTE_cte_wp_at') + apply (subst(asm) cap_revoke_mdb_stuff1, assumption+) + apply (clarsimp simp: cte_wp_at'_def) + apply (simp add: whenE_def) + apply (rule_tac F="mdbNext (cteMDBNode cte) \ nullPointer" + in spec_corres_req) + apply clarsimp + apply (subst(asm) cap_revoke_mdb_stuff1, assumption+) + apply (clarsimp simp: cte_wp_at'_def) + apply (simp add: whenE_def[where P=True]) + apply (rule spec_corres_symb_exec_r_Ex) + apply (clarsimp elim!: use_valid [OF _ getCTE_inv]) + apply (subgoal_tac "cte_at' (mdbNext (cteMDBNode cte)) s") + apply (clarsimp simp: getCTE_def cte_at'_def) + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (simp add: in_monad in_get_cap_cte_wp_at + in_getCTE_cte_wp_at') + apply(case_tac "next_child slot (cdt_list s')") + apply(rule_tac F="False" in spec_corres_req) + apply(clarsimp) + apply(frule next_child_NoneD) + apply(simp add: empty_list_empty_desc) + apply(simp) + apply (rule_tac F="valid_list s'" in spec_corres_req,simp) + apply (frule next_child_child_set, assumption) + apply simp + apply (rule spec_corres_symb_exec_l_Ex2) + apply (simp add: in_monad) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (simp add: in_monad) + apply (drule next_childD, simp) + apply (simp add: child_descendant) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (clarsimp simp: in_monad) + apply (drule next_childD, simp) + apply (clarsimp) + apply (drule child_descendant) + apply (drule descendants_of_cte_at, erule invs_mdb) + apply (clarsimp simp: cte_wp_at_def) + apply (simp add: in_monad) + apply(case_tac "capa = cap.NullCap") + apply(rule_tac F="False" in spec_corres_req) + apply(clarsimp) + apply(drule next_childD, simp) + apply(clarsimp) + apply(drule child_descendant) + apply(drule cap_revoke_mdb_stuff3) + apply(erule invs_mdb) + apply(clarsimp simp: cte_wp_at_def) + apply(simp) + apply (simp) + apply (rule_tac F="isMDBParentOf cte nextCTE" + in spec_corres_req) + apply clarsimp + apply(frule cap_revoke_mdb_stuff1, (simp add: in_get_cap_cte_wp_at)+) + apply (clarsimp simp: cte_wp_at'_def) + + apply (rule spec_corres_req) + apply clarsimp + apply (rule cap_revoke_mdb_stuff4, (simp add: in_get_cap_cte_wp_at)+) + apply (clarsimp simp: whenE_def) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE' [OF cteDelete_corres]) + apply (rule spec_corres_splitE' [OF preemptionPoint_corres]) + apply (rule "1.hyps", + (simp add: cte_wp_at_def in_monad select_def next_revoke_cap_def select_ext_def + | assumption | rule conjI refl)+)[1] + apply (wp cap_delete_cte_at cteDelete_invs' cteDelete_sch_act_simple + preemptionPoint_invR preemption_point_inv' | clarsimp)+ + apply (clarsimp simp: cte_wp_at_cte_at) + apply(drule next_childD, simp) + apply(clarsimp, drule child_descendant) + apply (fastforce simp: emptyable_def dest: reply_slot_not_descendant) + apply (clarsimp elim!: cte_wp_at_weakenE') + done +qed + +lemmas cteRevoke_corres = use_spec_corres [OF cteRevoke_corres'] + +lemma arch_recycleCap_improve_cases: + "\ \ isFrameCap cap; \ isPageTableCap cap; \ isVCPUCap cap; \ isASIDControlCap cap \ + \ (if isASIDPoolCap cap then v else undefined) = v" + by (cases cap, simp_all add: isCap_simps) + +crunch typ_at'[wp]: invokeCNode "\s. P (typ_at' T p s)" + (ignore: finaliseSlot + simp: crunch_simps filterM_mapM unless_def + arch_recycleCap_improve_cases + wp: crunch_wps undefined_valid finaliseSlot_preservation) + +lemmas invokeCNode_typ_ats [wp] = typ_at_lifts [OF invokeCNode_typ_at'] + +crunch st_tcb_at'[wp]: cteMove "st_tcb_at' P t" + (wp: crunch_wps) + +lemma threadSet_st_tcb_at2: + assumes x: "\tcb. P (tcbState tcb) \ P (tcbState (f tcb))" + shows "\st_tcb_at' P t\ threadSet f t' \\rv. st_tcb_at' P t\" + including no_pre + apply (simp add: threadSet_def pred_tcb_at'_def) + apply (wp setObject_tcb_strongest) + apply (rule hoare_strengthen_post, rule getObject_tcb_sp) + apply (clarsimp simp: obj_at'_def x) + done + +crunch st_tcb_at_simplish[wp]: "cancelBadgedSends" "st_tcb_at' (\st. P st \ simple' st) t" + (wp: crunch_wps threadSet_st_tcb_at2 + simp: crunch_simps filterM_mapM makeObject_tcb unless_def) + +lemma cancelBadgedSends_st_tcb_at': + assumes x: "\st. simple' st \ P st" + shows "\st_tcb_at' P t\ cancelBadgedSends a b \\_. st_tcb_at' P t\" + apply (rule hoare_chain) + apply (rule cancelBadgedSends_st_tcb_at_simplish[where P=P and t=t]) + apply (auto simp: x elim!: pred_tcb'_weakenE) + done + +lemmas cteRevoke_st_tcb_at' + = cteRevoke_preservation [OF cteDelete_st_tcb_at'] +lemmas cteRevoke_st_tcb_at_simplish + = cteRevoke_st_tcb_at'[where P="\st. Q st \ simple' st", + simplified] for Q + +lemmas finaliseSlot_st_tcb_at' + = finaliseSlot_preservation [OF finaliseCap2_st_tcb_at' + emptySlot_pred_tcb_at' + capSwapForDelete_st_tcb_at' + updateCap_pred_tcb_at'] +lemmas finaliseSlot_st_tcb_at_simplish + = finaliseSlot_st_tcb_at'[where P="\st. Q st \ simple' st", + simplified] for Q + +lemma updateCap_valid_objs [wp]: + "\\s. valid_objs' s \ s \' cap\ + updateCap ptr cap + \\r. valid_objs'\" + unfolding updateCap_def + apply (wp setCTE_valid_objs getCTE_wp) + apply clarsimp + apply (erule cte_at_cte_wp_atD) + done + +end + +lemma (in mdb_move) [intro!]: + shows "mdb_chain_0 m" using valid + by (auto simp: valid_mdb_ctes_def) + +lemma (in mdb_move) m'_badged: + "m' p = Some (CTE cap node) + \ if p = dest then mdbFirstBadged node = mdbFirstBadged src_node \ cap = cap' + else if p = src then \ mdbFirstBadged node \ cap = NullCap + else \node'. m p = Some (CTE cap node') \ mdbFirstBadged node = mdbFirstBadged node'" + using src dest neq + apply (clarsimp simp: m'_def n_def modify_map_cases nullMDBNode_def) + apply (rule conjI, clarsimp) + apply clarsimp + apply auto + done + +lemma (in mdb_move) m'_next: + "m' \ p \ p' \ + if p = src then p' = 0 + else if p = dest then m \ src \ p' + else if p' = dest then m \ p \ src + else m \ p \ p'" + using src dest src_0 dest_0 dlist neq src_neq_prev + apply (simp add: m'_def n_def) + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply (rename_tac cap node) + apply simp + apply (simp add: modify_map_cases) + apply (cases "mdbPrev src_node = p") + apply clarsimp + apply (erule_tac p=src in valid_dlistEp, assumption) + apply clarsimp + apply clarsimp + apply simp + apply (cases "p=src", simp) + apply clarsimp + apply (case_tac "mdbNext node = p") + apply clarsimp + apply clarsimp + apply (erule_tac p=p in valid_dlistEn, assumption) + apply clarsimp + apply (clarsimp simp: prev) + done + +lemma (in mdb_move) sameRegionAs_parent_eq: + "sameRegionAs cap cap' = sameRegionAs cap src_cap" + using parency unfolding weak_derived'_def + by (simp add: sameRegionAs_def2) + +lemma (in mdb_move) m'_cap: + "m' p = Some (CTE c node) \ + if p = src then c = NullCap + else if p = dest then c = cap' + else \node'. m p = Some (CTE c node')" + using src dest neq + apply (simp add: m'_def n_def) + apply (auto simp add: modify_map_if split: if_split_asm) + done + +context mdb_move +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma m_to_src: + "m \ p \ src = (p \ 0 \ p = mdbPrev src_node)" + apply (insert src) + apply (rule iffI) + apply (clarsimp simp add: mdb_next_unfold) + apply (rule conjI, clarsimp) + apply (case_tac z) + apply clarsimp + apply (erule_tac p=p in dlistEn, clarsimp) + apply clarsimp + apply (clarsimp simp add: mdb_next_unfold) + apply (erule dlistEp, clarsimp) + apply clarsimp + done + +lemma m_from_prev_src: + "m \ mdbPrev src_node \ p = (mdbPrev src_node \ 0 \ p = src)" + apply (insert src) + apply (rule iffI) + apply (clarsimp simp: mdb_next_unfold) + apply (rule conjI, clarsimp) + apply (erule dlistEp, clarsimp) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (erule dlistEp, clarsimp) + apply clarsimp + done + +lemma m'_nextD: + "m' \ p \ p' \ + (if p = src then p' = 0 + else if p = dest then m \ src \ p' + else if p = mdbPrev src_node then p' = dest \ p \ 0 + else m \ p \ p')" + using src dest src_0 dest_0 dlist neq src_neq_prev + apply (simp add: m'_def n_def) + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply simp + apply (simp add: modify_map_cases) + apply (cases "mdbPrev src_node = p") + apply clarsimp + apply simp + apply (cases "p=src", simp) + apply clarsimp + done + + +lemmas prev_src = prev_p_next + +lemma m'_next_eq: + notes if_cong [cong] + shows + "m' \ p \ p' = + (if p = src then p' = 0 + else if p = dest then m \ src \ p' + else if p = mdbPrev src_node then p' = dest \ p \ 0 + else m \ p \ p')" + apply (insert src dest) + apply (rule iffI) + apply (drule m'_nextD, simp) + apply (cases "p=0") + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply (simp split: if_split_asm) + apply (simp add: mdb_next_unfold m'_def n_def modify_map_cases) + apply (simp add: mdb_next_unfold m'_def n_def modify_map_cases neq) + apply (simp add: mdb_next_unfold m'_def n_def modify_map_cases neq) + apply clarsimp + apply (drule prev_src) + apply (clarsimp simp: mdb_next_unfold) + apply (case_tac z) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold m'_def n_def modify_map_cases) + apply (cases "mdbNext src_node = p") + apply (clarsimp) + apply (case_tac z) + apply clarsimp + apply clarsimp + done + +declare dest_0 [simp] + +lemma m'_swp_eq: + "m' \ p \ p' = m \ s_d_swap p src dest \ s_d_swap p' src dest" + by (auto simp add: m'_next_eq s_d_swap_def m_to_src m_from_prev_src) + +lemma m'_tranclD: + "m' \ p \\<^sup>+ p' \ m \ s_d_swap p src dest \\<^sup>+ s_d_swap p' src dest" + apply (erule trancl.induct) + apply (fastforce simp: m'_swp_eq) + apply (fastforce simp: m'_swp_eq intro: trancl_trans) + done + +lemma m_tranclD: + "m \ p \\<^sup>+ p' \ m' \ s_d_swap p src dest \\<^sup>+ s_d_swap p' src dest" + apply (erule trancl.induct) + apply (fastforce simp: m'_swp_eq) + apply (fastforce simp: m'_swp_eq intro: trancl_trans) + done + +lemma m'_trancl_eq: + "m' \ p \\<^sup>+ p' = m \ s_d_swap p src dest \\<^sup>+ s_d_swap p' src dest" + by (auto dest: m_tranclD m'_tranclD) + +lemma m'_rtrancl_eq: + "m' \ p \\<^sup>* p' = m \ s_d_swap p src dest \\<^sup>* s_d_swap p' src dest" + by (auto simp: rtrancl_eq_or_trancl m'_trancl_eq s_d_swap_def) + +lemma m_cap: + "m p = Some (CTE c node) \ + if p = src then \node'. c = src_cap \ m' dest = Some (CTE cap' node') + else if p = dest then \node'. c = NullCap \ m' src = Some (CTE NullCap node') + else \node'. m' p = Some (CTE c node')" + apply (auto simp: src dest) + apply (auto simp: m'_def n_def src dest modify_map_if neq) + done + +lemma sameRegion_cap'_src [simp]: + "sameRegionAs cap' c = sameRegionAs src_cap c" + using parency unfolding weak_derived'_def + apply (case_tac "isReplyCap src_cap"; clarsimp) + apply (clarsimp simp: capMasterCap_def split: capability.splits arch_capability.splits + ; fastforce simp: sameRegionAs_def AARCH64_H.sameRegionAs_def isCap_simps split: if_split_asm)+ + done + +lemma chunked': + "mdb_chunked m'" + using chunked + apply (clarsimp simp: mdb_chunked_def) + apply (drule m'_cap)+ + apply (clarsimp simp: m'_trancl_eq sameRegion_cap'_src split: if_split_asm) + apply (erule_tac x=src in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply (clarsimp simp: src s_d_swap_other) + apply (rule conjI) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def split: if_split_asm) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (simp add: s_d_swap_other) + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (clarsimp simp: src sameRegionAs_parent_eq) + apply (rule conjI) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (simp add: s_d_swap_other) + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + done + +lemma isUntypedCap': + "isUntypedCap cap' = isUntypedCap src_cap" + using parency unfolding weak_derived'_def + by (clarsimp simp: weak_derived'_def dest!: capMaster_isUntyped) + +lemma capRange': + "capRange cap' = capRange src_cap" + using parency unfolding weak_derived'_def + by (clarsimp simp: weak_derived'_def dest!: capMaster_capRange) + +lemma untypedRange': + "untypedRange cap' = untypedRange src_cap" + using parency unfolding weak_derived'_def + by (clarsimp simp: weak_derived'_def dest!: capMaster_untypedRange) + +lemmas ut' = isUntypedCap' capRange' untypedRange' + +lemma m'_revocable: + "m' p = Some (CTE c node) \ + if p = src then \mdbRevocable node + else if p = dest then mdbRevocable node = mdbRevocable src_node + else \node'. m p = Some (CTE c node') \ mdbRevocable node = mdbRevocable node'" + apply (insert src dest neq) + apply (frule m'_cap) + apply (clarsimp simp: m'_def n_def modify_map_if nullMDBNode_def split: if_split_asm) + done + +lemma cteMove_valid_mdb_helper: + "(isUntypedCap cap' \ cap' = src_cap) \valid_mdb_ctes m'" +proof + note sameRegion_cap'_src [simp del] + note dest_0 [simp del] src_0 [simp del] + note src_next [simp del] + note rtrancl0 [simp del] + + show "valid_dlist m'" by (rule dlist') + show "no_0 m'" by (rule no_0') + + have chain: "mdb_chain_0 m" .. + + have mp: "cte_mdb_prop m dest (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer)" using dest prev nxt + unfolding cte_mdb_prop_def + by (simp add: nullPointer_def) + hence nsd: "\ m \ mdbNext src_node \\<^sup>* dest" using dlist + by (auto elim: next_rtrancl_tranclE dest: null_mdb_no_trancl [OF _ no_0]) + + have sd: "mdbNext src_node \ 0 \ mdbNext src_node \ dom m" + proof - + assume T: "mdbNext src_node \ 0" + have "m \ src \ mdbNext src_node" by (rule m_p_next) + moreover have "m \ src \\<^sup>+ 0" using chain src unfolding mdb_chain_0_def by (clarsimp simp: dom_def) + ultimately have "m \ mdbNext src_node \\<^sup>+ 0" using T + by (auto elim: tranclE2' simp: next_unfold') + thus "mdbNext src_node \ dom m" + by - (erule tranclE2', (clarsimp simp: next_unfold')+) + qed + + let ?m = "(modify_map + (modify_map (modify_map m (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) src + (cteMDBNode_update (mdbNext_update (%_. (mdbNext nullMDBNode))))) + dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node)))))" + + let ?goal = "mdb_chain_0 ?m" + { + assume "mdbPrev src_node = 0" and T: "mdbNext src_node = 0" + hence ms: "m (mdbPrev src_node) = None" using no_0 by (simp add: no_0_def) + hence ?goal using T + by (auto simp: modify_map_None [where m = m, OF ms] nullPointer_def + intro!: mdb_chain_0_modify_map_0) + } moreover + { + assume "mdbPrev src_node \ 0" and "mdbNext src_node = 0" + hence ?goal + apply - + apply (simp add: nullMDBNode_def nullPointer_def) + apply (subst modify_map_addr_com [where y = dest], simp add: neq)+ + apply (rule mdb_chain_0_modify_map_0) + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_0 [OF chain no_0]) + apply clarsimp + apply (clarsimp simp: dest) + apply (subst next_update_is_modify [symmetric], rule dest) + apply simp + apply (subst next_update_lhs_rtrancl) + apply simp + apply (rule no_0_lhs_tranclI [OF no_0 dest_0]) + apply simp + apply (rule no_0_lhs_tranclI [OF no_0]) + apply simp + apply clarsimp + done + } moreover + { + assume "mdbPrev src_node = 0" and T: "mdbNext src_node \ 0" + hence ms: "m (mdbPrev src_node) = None" using no_0 by (simp add: no_0_def) + hence ?goal using T + apply (simp add: modify_map_None nullPointer_def) + apply (subst modify_map_addr_com [OF neq]) + apply (rule mdb_chain_0_modify_map_0) + apply (rule mdb_chain_0_modify_map_next [OF chain no_0 sd, OF T nsd]) + apply clarsimp + done + } moreover + { + assume U: "mdbPrev src_node \ 0" and T: "mdbNext src_node \ 0" + hence ?goal using dlist + apply - + apply (simp add: nullPointer_def) + apply (subst modify_map_addr_com [where y = dest], simp add: neq)+ + apply (rule mdb_chain_0_modify_map_0) + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain no_0 sd nsd, OF T]) + apply clarsimp + apply (clarsimp simp: dest) + apply (subst next_update_is_modify [symmetric], rule dest) + apply simp + apply (subst next_update_lhs_rtrancl) + apply simp + apply (rule nsd) + apply simp + apply (rule no_next_prev_rtrancl [OF valid], rule src, rule U) + apply clarsimp + done + } + ultimately have ?goal + apply (cases "mdbPrev src_node = 0") + apply (cases "mdbNext src_node = 0") + apply auto[2] + apply (cases "mdbNext src_node = 0") + apply auto + done + + thus "mdb_chain_0 m'" + unfolding m'_def n_def + apply - + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [OF src_neq_prev]) + apply (subst modify_map_addr_com [OF prev_neq_dest2]) + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [OF neq_sym])+ + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_com [ where g = "(cteCap_update (%_. cap'))"], + case_tac x, simp)+ + apply (rule mdb_chain_0_modify_map_inv) + apply (subst modify_map_com [ where g = "(cteCap_update (%_. capability.NullCap))"], + case_tac x, simp)+ + apply (erule mdb_chain_0_modify_map_inv) + apply simp + apply simp + done + + from valid + have "valid_badges m" .. + thus "valid_badges m'" using src dest parency + apply (clarsimp simp: valid_badges_def2) + apply (drule m'_badged)+ + apply (drule m'_next) + apply (clarsimp simp add: weak_derived'_def split: if_split_asm) + apply (erule_tac x=src in allE, erule_tac x=p' in allE, + erule allE, erule impE, erule exI) + apply clarsimp + apply (erule_tac x=p in allE, erule_tac x=src in allE, + erule allE, erule impE, erule exI) + apply clarsimp + by fastforce + + from valid + have "caps_contained' m" by (simp add: valid_mdb_ctes_def) + with src dest neq parency + show "caps_contained' m'" + apply (clarsimp simp: caps_contained'_def) + apply (drule m'_cap)+ + apply (clarsimp split: if_split_asm) + apply (clarsimp dest!: capRange_untyped) + apply (erule_tac x=src in allE, erule_tac x=p' in allE) + apply (clarsimp simp add: weak_derived'_def) + apply (drule capMaster_untypedRange) + apply clarsimp + apply blast + apply (erule_tac x=p in allE, erule_tac x=src in allE) + apply (clarsimp simp: weak_derived'_def) + apply (frule capMaster_isUntyped) + apply (drule capMaster_capRange) + apply clarsimp + apply blast + by fastforce + + show "mdb_chunked m'" by (rule chunked') + + from untyped_mdb + show "untyped_mdb' m'" + apply (simp add: untyped_mdb'_def) + apply clarsimp + apply (drule m'_cap)+ + apply (clarsimp simp: descendants split: if_split_asm) + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (simp add: src ut') + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src ut') + done + + assume isUntypedCap_eq:"isUntypedCap cap' \ cap' = src_cap" + from untyped_inc + show "untyped_inc' m'" + using isUntypedCap_eq + apply (simp add: untyped_inc'_def) + apply clarsimp + apply (drule m'_cap)+ + apply (clarsimp simp: descendants split: if_split_asm) + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (clarsimp simp add: src ut') + apply (intro conjI impI) + apply clarsimp+ + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (clarsimp simp add: src ut') + apply (intro conjI impI) + apply clarsimp+ + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + done + + note if_cong [cong] + + from not_null parency + have "src_cap \ NullCap \ cap' \ NullCap" + by (clarsimp simp: weak_derived'_def) + moreover + from valid + have "valid_nullcaps m" .. + ultimately + show vn': "valid_nullcaps m'" + apply (clarsimp simp: valid_nullcaps_def) + apply (frule m'_cap) + apply (insert src dest) + apply (frule spec, erule allE, erule (1) impE) + apply (clarsimp split: if_split_asm) + apply (simp add: n_def m'_def) + apply (simp add: modify_map_if) + apply (simp add: n_def m'_def) + apply (simp add: modify_map_if) + apply (clarsimp split: if_split_asm) + apply (erule disjE) + apply clarsimp + apply (erule allE, erule allE, erule (1) impE) + apply clarsimp + apply (insert dlist) + apply (erule_tac p=src in valid_dlistEn, assumption) + apply clarsimp + apply (clarsimp simp: nullMDBNode_def nullPointer_def) + apply (erule allE, erule allE, erule (1) impE) + apply clarsimp + apply (erule_tac p=src in valid_dlistEp, assumption) + apply clarsimp + apply (clarsimp simp: nullMDBNode_def nullPointer_def) + done + + from valid + have "ut_revocable' m" .. + thus "ut_revocable' m'" using src dest parency + apply (clarsimp simp: ut_revocable'_def) + apply (frule m'_cap) + apply (frule m'_revocable) + apply (clarsimp split: if_split_asm) + apply (subgoal_tac "isUntypedCap src_cap") + apply simp + apply (clarsimp simp: weak_derived'_def dest!: capMaster_isUntyped) + done + + from src + have src': "m' src = Some (CTE NullCap nullMDBNode)" + by (simp add: m'_def n_def modify_map_if) + with dlist' no_0' + have no_prev_of_src': "\p. \m' \ p \ src" + apply clarsimp + apply (frule (3) vdlist_nextD) + apply (simp add: mdb_prev_def mdb_next_unfold nullPointer_def) + done + + from valid + have "class_links m" .. + thus "class_links m'" using src dest parency + apply (clarsimp simp: class_links_def weak_derived'_def) + apply (case_tac cte) + apply (case_tac cte') + apply clarsimp + apply (case_tac "p'=src") + apply (simp add: no_prev_of_src') + apply (drule m'_next) + apply (drule m'_cap)+ + apply (clarsimp split: if_split_asm) + apply (fastforce dest!: capMaster_capClass) + apply (fastforce dest!: capMaster_capClass) + apply fastforce + done + + show "irq_control m'" using src dest parency + apply (clarsimp simp: irq_control_def) + apply (frule m'_revocable) + apply (drule m'_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp add: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule m'_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule m'_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: weak_derived'_def) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (erule (1) irq_controlD, rule irq_control) + done + + have distz: "distinct_zombies m" + using valid by (simp add: valid_mdb_ctes_def) + + thus "distinct_zombies m'" + apply (simp add: m'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n_def distinct_zombies_nonCTE_modify_map + modify_map_apply src dest neq) + apply (erule distinct_zombies_switchE, rule dest, rule src) + apply simp + apply (cut_tac parency) + apply (clarsimp simp: weak_derived'_def) + done + + have "reply_masters_rvk_fb m" using valid .. + thus "reply_masters_rvk_fb m'" using neq parency + apply (simp add: m'_def n_def reply_masters_rvk_fb_def + ball_ran_modify_map_eq) + apply (simp add: modify_map_apply m_p dest) + apply (intro ball_ran_fun_updI, simp_all) + apply (frule bspec, rule ranI, rule m_p) + apply (clarsimp simp: weak_derived'_def) + apply (drule master_eqE[where F=isReplyCap], simp add: isCap_Master) + apply (simp add: isCap_simps)+ + done + +qed + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteMove_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. cteCap c \ NullCap) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. if_live_then_nonz_cap'\" + unfolding cteMove_def + apply simp + apply wp + apply (simp only: if_live_then_nonz_cap'_def imp_conv_disj + ex_nonz_cap_to'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift updateCap_cte_wp_at_cases + getCTE_wp hoare_weak_lift_imp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) if_live_then_nonz_capE') + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (drule_tac x="(id (src := dest, dest := src)) cref" in spec) + apply (clarsimp dest!: weak_derived_zobj split: if_split_asm) + done + +lemma cteMove_valid_pspace' [wp]: + "\\x. valid_pspace' x \ + cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ + cte_wp_at' (\c. isUntypedCap (cteCap c) \ capability = cteCap c) word1 x \ + cte_wp_at' (\c. cteCap c \ NullCap) word1 x \ + x \' capability \ + cte_wp_at' (\c. cteCap c = capability.NullCap) word2 x\ + cteMove capability word1 word2 + \\y. valid_pspace'\" + unfolding cteMove_def + apply (simp add: pred_conj_def valid_pspace'_def valid_mdb'_def) + apply (wp sch_act_wf_lift valid_queues_lift + cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp getCTE_wp | simp)+ + apply (clarsimp simp: invs'_def valid_state'_def)+ + apply (clarsimp dest!: cte_at_cte_wp_atD) + apply (rule_tac x = cte in exI) + apply clarsimp + apply (clarsimp dest!: cte_at_cte_wp_atD) + apply (rule_tac x = ctea in exI) + apply (clarsimp simp: isCap_simps) + apply rule + apply (clarsimp elim!: valid_mdb_ctesE) + apply (case_tac ctea) + apply (case_tac cte) + apply (rule_tac old_dest_node = "cteMDBNode cte" and src_cap = "cteCap ctea" in + mdb_move.cteMove_valid_mdb_helper) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of weak_derived'_def isCap_simps simp del: not_ex) + apply unfold_locales + apply (simp_all add: valid_mdb'_def cte_wp_at_ctes_of nullPointer_def weak_derived'_def) + apply clarsimp + done + +lemma cteMove_ifunsafe': + "\if_unsafe_then_cap' + and cte_wp_at' (\c. cteCap c = capability.NullCap) dest + and ex_cte_cap_to' dest + and cte_wp_at' (\c. weak_derived' (cteCap c) cap) src\ + cteMove cap src dest + \\rv. if_unsafe_then_cap'\" + apply (rule hoare_pre) + apply (simp add: ifunsafe'_def3 cteMove_def o_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (subgoal_tac "ex_cte_cap_to' cref s") + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="(id (dest := src, src := dest)) crefb" + in exI) + apply (auto simp: modify_map_def dest!: weak_derived_cte_refs + split: if_split_asm)[1] + apply (case_tac "cref = dest") + apply simp + apply (rule if_unsafe_then_capD'[where P="\cte. cteCap cte \ NullCap"]) + apply (clarsimp simp add: cte_wp_at_ctes_of modify_map_def + split: if_split_asm) + apply simp+ + done + +lemma cteMove_idle'[wp]: + "\\s. valid_idle' s\ + cteMove cap src dest + \\rv. valid_idle'\" + apply (simp add: cteMove_def) + apply (wp updateCap_idle' | simp)+ + apply (wp getCTE_wp') + apply (clarsimp simp: valid_idle'_def cte_wp_at_ctes_of weak_derived'_def) + done + +crunch ksInterrupt[wp]: cteMove "\s. P (ksInterruptState s)" + (wp: crunch_wps) + +crunch ksArch[wp]: cteMove "\s. P (ksArchState s)" + (wp: crunch_wps) + +lemma cteMove_irq_handlers' [wp]: + "\\s. valid_irq_handlers' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF cteMove_ksInterrupt]) + apply (simp add: cteMove_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ran_def) + apply (subst(asm) imp_ex, subst(asm) all_comm) + apply (drule_tac x="(id (src := dest, dest := src)) a" in spec) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (auto simp: cteCaps_of_def weak_derived'_def) + done + +lemmas cteMove_valid_irq_node'[wp] + = valid_irq_node_lift[OF cteMove_ksInterrupt cteMove_typ_at'] + +crunch valid_arch_state'[wp]: cteMove "valid_arch_state'" + (wp: crunch_wps) + +crunch global_refs_noop[wp]: cteMove "\s. P (global_refs' s)" + (wp: crunch_wps) +crunch gsMaxObjectSize[wp]: cteMove "\s. P (gsMaxObjectSize s)" + (wp: crunch_wps) + +lemma cteMove_global_refs' [wp]: + "\\s. valid_global_refs' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. valid_global_refs'\" + apply (rule hoare_name_pre_state, clarsimp simp: valid_global_refs'_def) + apply (frule_tac p=src and cte="the (ctes_of s src)" in cte_at_valid_cap_sizes_0) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (simp add: valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=global_refs', OF cteMove_global_refs_noop]) + apply (rule hoare_use_eq [where f=gsMaxObjectSize], wp) + apply (simp add: cteMove_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ran_def all_conj_distrib[symmetric] + imp_conjR[symmetric]) + apply (subst(asm) imp_ex, subst(asm) all_comm) + apply (drule_tac x="(id (dest := src, src := dest)) a" in spec) + apply (clarsimp simp: modify_map_def cteCaps_of_def + split: if_split_asm dest!: weak_derived_capRange_capBits) + apply auto? + done + +lemma cteMove_urz [wp]: + "\\s. untyped_ranges_zero' s + \ valid_pspace' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. isUntypedCap (cteCap c) \ cap = cteCap c) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. untyped_ranges_zero'\" + apply (clarsimp simp: cteMove_def) + apply (rule hoare_pre) + apply (wp untyped_ranges_zero_lift getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of + split del: if_split) + apply (erule untyped_ranges_zero_delta[where xs="[src, dest]"], + (clarsimp simp: modify_map_def)+) + apply (clarsimp simp: ran_restrict_map_insert modify_map_def + cteCaps_of_def untypedZeroRange_def[where ?x0.0=NullCap]) + apply (drule weak_derived_untypedZeroRange[OF weak_derived_sym'], clarsimp) + apply auto + done + +crunches updateMDB + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +(* FIXME: arch_split *) +lemma haskell_assert_inv: + "haskell_assert Q L \P\" + by wpsimp + +lemma cteMove_invs' [wp]: + "\\x. invs' x \ ex_cte_cap_to' word2 x \ + cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ + cte_wp_at' (\c. isUntypedCap (cteCap c) \ capability = cteCap c) word1 x \ + cte_wp_at' (\c. (cteCap c) \ NullCap) word1 x \ + x \' capability \ + cte_wp_at' (\c. cteCap c = capability.NullCap) word2 x\ + cteMove capability word1 word2 + \\y. invs'\" + apply (simp add: invs'_def valid_state'_def pred_conj_def) + apply (rule hoare_pre) + apply ((rule hoare_vcg_conj_lift, (wp cteMove_ifunsafe')[1]) + | rule hoare_vcg_conj_lift[rotated])+ + apply (unfold cteMove_def) + apply (wp cur_tcb_lift valid_queues_lift haskell_assert_inv + sch_act_wf_lift ct_idle_or_in_cur_domain'_lift2 tcb_in_cur_domain'_lift)+ + apply clarsimp + done + +lemma cteMove_cte_wp_at: + "\\s. cte_at' ptr s \ (if p = ptr then (Q capability.NullCap) else (if p' = ptr then Q cap else cte_wp_at' (Q \ cteCap) ptr s))\ + cteMove cap p p' + \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" + unfolding cteMove_def + apply (fold o_def) + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma cteMove_ex: + "\ex_cte_cap_to' ptr and + cte_wp_at' (weak_derived' cap o cteCap) p and + cte_wp_at' ((=) NullCap o cteCap) p' and + K (p \ p') \ + cteMove cap p p' + \\_. ex_cte_cap_to' ptr\" + unfolding ex_cte_cap_to'_def + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteMove_ksInterrupt]) + apply (wp hoare_vcg_ex_lift cteMove_cte_wp_at) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac "cref = p") + apply simp + apply (rule_tac x=p' in exI) + apply (clarsimp simp: weak_derived'_def dest!: capMaster_same_refs) + apply (rule_tac x=cref in exI) + apply clarsimp + done + +lemmas cteMove_typ_at_lifts [wp] = typ_at_lifts [OF cteMove_typ_at'] + +lemmas finalise_slot_corres' + = rec_del_corres[where args="FinaliseSlotCall slot exp", + simplified rec_del_concrete.simps, + simplified, folded finalise_slot_def] for slot exp +lemmas finalise_slot_corres = use_spec_corres [OF finalise_slot_corres'] + +lemma corres_disj_abs: + "\ corres rv P R f g; corres rv Q R f g \ + \ corres rv (\s. P s \ Q s) R f g" + by (auto simp: corres_underlying_def) + +crunch ksMachine[wp]: updateCap "\s. P (ksMachineState s)" + +lemma cap_relation_same: + "\ cap_relation cap cap'; cap_relation cap cap'' \ + \ cap' = cap''" + by (clarsimp split: cap_relation_split_asm + arch_cap.split_asm) + +crunch gsUserPages[wp]: updateCap "\s. P (gsUserPages s)" +crunch gsCNodes[wp]: updateCap "\s. P (gsCNodes s)" +crunch ksWorkUnitsCompleted[wp]: updateCap "\s. P (ksWorkUnitsCompleted s)" +crunch ksDomSchedule[wp]: updateCap "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: updateCap "\s. P (ksDomScheduleIdx s)" +crunch ksDomainTime[wp]: updateCap "\s. P (ksDomainTime s)" + +crunches updateCap + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + +lemma corres_null_cap_update: + "cap_relation cap cap' \ + corres dc (invs and cte_wp_at ((=) cap) slot) + (invs' and cte_at' (cte_map slot)) + (set_cap cap slot) (updateCap (cte_map slot) cap')" + apply (rule corres_caps_decomposition[rotated]) + apply (wp updateCap_ctes_of_wp)+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_apply + fun_upd_def[symmetric]) + apply (frule state_relation_pspace_relation) + apply (frule(1) pspace_relation_ctes_ofI, clarsimp+) + apply (drule(1) cap_relation_same) + apply (case_tac cte) + apply (clarsimp simp: cte_wp_at_caps_of_state fun_upd_idem) + apply (clarsimp simp: state_relation_def) + apply (erule_tac P="\caps. cdt_relation caps m ctes" for m ctes in rsubst) + apply (rule ext, clarsimp simp: cte_wp_at_caps_of_state eq_commute) + apply(clarsimp simp: cdt_list_relation_def state_relation_def) + apply(case_tac "next_slot (a, b) (cdt_list s) (cdt s)") + apply(simp) + apply(clarsimp) + apply(erule_tac x=a in allE, erule_tac x=b in allE) + apply(simp) + apply(clarsimp simp: modify_map_def split: if_split_asm) + apply(case_tac z) + apply(clarsimp) + apply (simp add: state_relation_def) + apply (simp add: state_relation_def) + apply (clarsimp simp: state_relation_def fun_upd_def[symmetric] + cte_wp_at_caps_of_state fun_upd_idem) + apply (clarsimp simp: state_relation_def) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap pt_types_of_heap_eq o_def) + apply (subst return_bind[where x="()", symmetric], subst updateCap_def, + rule corres_split_forwards') + apply (rule corres_guard_imp, rule getCTE_symb_exec_r, simp+) + prefer 3 + apply clarsimp + apply (rule setCTE_corres) + apply (wp | simp)+ + apply (fastforce elim!: cte_wp_at_weakenE) + apply wp + apply fastforce + done + +declare corres_False' [simp] + +lemma invokeCNode_corres: + "cnodeinv_relation ci ci' \ + corres (dc \ dc) + (einvs and simple_sched_action and valid_cnode_inv ci) + (invs' and sch_act_simple and valid_cnode_inv' ci') + (invoke_cnode ci) (invokeCNode ci')" + apply (simp add: invoke_cnode_def invokeCNode_def) + apply (cases ci, simp_all) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule cteInsert_corres) + apply simp+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def + elim!: cte_wp_at_cte_at) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (erule cteMove_corres) + apply (clarsimp simp: cte_wp_at_caps_of_state real_cte_tcb_valid) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule cteRevoke_corres) + apply (rule corres_guard_imp [OF cteDelete_corres]) + apply (clarsimp simp: cte_at_typ cap_table_at_typ halted_emptyable) + apply simp + apply (rename_tac cap1 cap2 p1 p2 p3) + apply (elim conjE exE) + apply (intro impI conjI) + apply simp + apply (rule corres_guard_imp) + apply (rule_tac F="wellformed_cap cap1 \ wellformed_cap cap2" + in corres_gen_asm) + apply (erule (1) cteSwap_corres [OF refl refl], simp+) + apply (simp add: invs_def valid_state_def valid_pspace_def + real_cte_tcb_valid valid_cap_def2) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + cte_wp_at_ctes_of weak_derived'_def) + apply (simp split del: if_split) + apply (rule_tac F = "cte_map p1 \ cte_map p3" in corres_req) + apply clarsimp + apply (drule (2) cte_map_inj_eq [OF _ cte_wp_at_cte_at cte_wp_at_cte_at]) + apply clarsimp + apply clarsimp + apply clarsimp + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (erule cteMove_corres) + apply (erule cteMove_corres) + apply wp + apply (simp add: cte_wp_at_caps_of_state) + apply (wp cap_move_caps_of_state cteMove_cte_wp_at [simplified o_def])+ + apply (simp add: real_cte_tcb_valid invs_def valid_state_def valid_pspace_def) + apply (elim conjE exE) + apply (drule(3) real_cte_weak_derived_not_reply_masterD)+ + apply (clarsimp simp: cte_wp_at_caps_of_state + ex_cte_cap_to_cnode_always_appropriate_strg + cte_wp_at_conj) + apply (simp add: cte_wp_at_ctes_of) + apply (elim conjE exE) + apply (intro impI conjI) + apply fastforce + apply (fastforce simp: weak_derived'_def) + apply simp + apply (erule weak_derived_sym') + apply clarsimp + apply simp + apply clarsimp + apply simp + apply clarsimp + apply clarsimp + apply (rename_tac prod) + apply (simp add: getThreadCallerSlot_def locateSlot_conv objBits_simps) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (subgoal_tac "thread + 2^cte_level_bits * tcbCallerSlot = cte_map (thread, tcb_cnode_index 3)") + prefer 2 + apply (simp add: cte_map_def tcb_cnode_index_def tcbCallerSlot_def cte_level_bits_def) + apply (rule corres_split[OF getSlotCap_corres], simp) + apply (rule_tac P="\s. (is_reply_cap cap \ cap = cap.NullCap) \ + (is_reply_cap cap \ + (einvs and cte_at (threada, tcb_cnode_index 3) and + cte_wp_at (\c. c = cap.NullCap) prod and + real_cte_at prod and valid_cap cap and + K ((threada, tcb_cnode_index 3) \ prod)) s)" and + P'="\s. (isReplyCap rv' \ \ capReplyMaster rv') \ (invs' and + cte_wp_at' + (\c. weak_derived' rv' (cteCap c) \ + cteCap c \ capability.NullCap) + (cte_map (threada, tcb_cnode_index 3)) and + cte_wp_at' (\c. cteCap c = capability.NullCap) (cte_map prod)) s" in corres_inst) + apply (case_tac cap, simp_all add: isCap_simps is_cap_simps split: bool.split)[1] + apply clarsimp + apply (rule corres_guard_imp) + apply (rule cteMove_corres) + apply (simp add: real_cte_tcb_valid)+ + apply (wp get_cap_wp) + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp)+ + apply clarsimp + apply (rule conjI) + apply (rule tcb_at_cte_at) + apply fastforce + apply (simp add: tcb_cap_cases_def) + apply (clarsimp simp: cte_wp_at_cte_at) + apply (rule conjI) + apply (frule tcb_at_invs) + apply (frule_tac ref="tcb_cnode_index 3" and Q="is_reply_cap or (=) cap.NullCap" + in tcb_cap_wp_at) + apply (clarsimp split: Structures_A.thread_state.split_asm)+ + apply (clarsimp simp: cte_wp_at_def is_cap_simps all_rights_def) + apply clarsimp + apply (rule conjI, simp add: cte_wp_valid_cap invs_valid_objs) + apply (clarsimp simp: cte_wp_at_def is_cap_simps all_rights_def) + apply clarsimp + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply clarsimp + apply (case_tac "has_cancel_send_rights x7", + frule has_cancel_send_rights_ep_cap, + simp add: is_cap_simps) + apply (clarsimp simp: when_def unless_def isCap_simps) + apply (rule corres_guard_imp) + apply (rule cancelBadgedSends_corres) + apply (simp add: valid_cap_def) + apply (simp add: valid_cap'_def) + apply (clarsimp) + done + +lemma updateCap_noop_irq_handlers: + "\valid_irq_handlers' and cte_wp_at' (\cte. cteCap cte = cap) slot\ + updateCap slot cap + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq[where f=ksInterruptState, OF updateCap_ksInterruptState]) + apply wp + apply (simp, subst(asm) tree_cte_cteCap_eq[unfolded o_def]) + apply (simp split: option.split_asm + add: modify_map_apply fun_upd_idem) + done + +crunch ct_idle_or_in_cur_domain'[wp]: updateCap ct_idle_or_in_cur_domain' + (rule: ct_idle_or_in_cur_domain'_lift2) + +lemma updateCap_noop_invs: + "\invs' and cte_wp_at' (\cte. cteCap cte = cap) slot\ + updateCap slot cap + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def + valid_pspace'_def valid_mdb'_def) + apply (rule hoare_pre) + apply (wp updateCap_ctes_of_wp updateCap_iflive' + updateCap_ifunsafe' updateCap_idle' + valid_irq_node_lift + updateCap_noop_irq_handlers sch_act_wf_lift + untyped_ranges_zero_lift) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_apply) + apply (strengthen untyped_ranges_zero_delta[where xs=Nil, mk_strg I E]) + apply (case_tac cte) + apply (clarsimp simp: fun_upd_idem cteCaps_of_def modify_map_apply + valid_mdb'_def) + apply (frule(1) ctes_of_valid') + apply (frule(1) valid_global_refsD_with_objSize) + apply clarsimp + apply (rule_tac P="(=) cte" for cte in if_unsafe_then_capD') + apply (simp add: cte_wp_at_ctes_of) + apply assumption + apply clarsimp + done + +lemmas make_zombie_or_noop_or_arch_invs + = hoare_vcg_disj_lift [OF updateCap_noop_invs + hoare_vcg_disj_lift [OF make_zombie_invs' arch_update_updateCap_invs], + simplified] + +lemma invokeCNode_invs' [wp]: + "\invs' and sch_act_simple and valid_cnode_inv' cinv\ + invokeCNode cinv \\y. invs'\" + unfolding invokeCNode_def + apply (cases cinv) + apply (wp cteRevoke_invs' cteInsert_invs | simp split del: if_split)+ + apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def isCap_simps badge_derived'_def) + apply (erule(1) valid_irq_handlers_ctes_ofD) + apply (clarsimp simp: invs'_def valid_state'_def) + defer + apply (wp cteRevoke_invs' | simp)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (erule weak_derived_sym') + defer + apply (simp add: getSlotCap_def getThreadCallerSlot_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp haskell_fail_wp getCTE_wp|wpc)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac ctea) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply fastforce + apply ((wp cteDelete_invs'|simp split del: if_split)+) + apply (wp cteMove_ex cteMove_cte_wp_at)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (fastforce simp: isCap_simps weak_derived'_def) + apply (rule hoare_pre) + apply simp + apply (wp | wpc | simp add: unless_def)+ + done + +declare withoutPreemption_lift [wp] + +crunch irq_states' [wp]: capSwapForDelete valid_irq_states' + +lemma setVCPU_valid_irq_states' [wp]: + "setObject p (vcpu::vcpu) \valid_irq_states'\" + by (wp valid_irq_states_lift') + +crunches writeVCPUHardwareReg, readVCPUHardwareReg + for irq_masks[wp]: "\s. P (irq_masks s)" + +crunches vcpuUpdate, vcpuWriteReg, vcpuSaveReg, vcpuRestoreReg, vcpuReadReg + for irq_states'[wp]: valid_irq_states' + and ksInterrupt[wp]: "\s. P (ksInterruptState s)" + (ignore: getObject setObject) + +lemma saveVirtTimer_irq_states'[wp]: + "saveVirtTimer vcpu_ptr \valid_irq_states'\" + unfolding saveVirtTimer_def + by (wpsimp simp: read_cntpct_def + wp: doMachineOp_irq_states') + +lemma restoreVirtTimer_irq_states'[wp]: + "restoreVirtTimer vcpu_ptr \valid_irq_states'\" + unfolding restoreVirtTimer_def isIRQActive_def + by (simp add: liftM_bind) + (wpsimp wp: maskInterrupt_irq_states' getIRQState_wp hoare_vcg_imp_lift' doMachineOp_irq_states' + simp: if_apply_def2 read_cntpct_def) + +crunches + vcpuDisable, vcpuEnable, vcpuRestore, vcpuRestoreReg, vcpuSaveReg, + vcpuUpdate, vgicUpdateLR, vcpuSave + for irq_states' [wp]: valid_irq_states' + (wp: crunch_wps maskInterrupt_irq_states'[where b=True, simplified] no_irq no_irq_mapM_x + simp: crunch_simps no_irq_isb no_irq_dsb + set_gic_vcpu_ctrl_hcr_def setSCTLR_def setHCR_def get_gic_vcpu_ctrl_hcr_def + getSCTLR_def get_gic_vcpu_ctrl_lr_def get_gic_vcpu_ctrl_apr_def + get_gic_vcpu_ctrl_vmcr_def + set_gic_vcpu_ctrl_vmcr_def set_gic_vcpu_ctrl_apr_def uncurry_def + set_gic_vcpu_ctrl_lr_def + ignore: saveVirtTimer) + +crunch irq_states' [wp]: finaliseCap valid_irq_states' + (wp: crunch_wps unless_wp getASID_wp no_irq_setVSpaceRoot + simp: crunch_simps o_def pteAtIndex_def) + +lemma finaliseSlot_IRQInactive': + "s \ \valid_irq_states'\ finaliseSlot' a b + \\_. valid_irq_states'\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" +proof (induct rule: finalise_spec_induct) + case (1 sl exp s) + show ?case + apply (rule hoare_pre_spec_validE) + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def) + apply (wp "1.hyps") + apply (unfold Let_def split_def fst_conv snd_conv + case_Zombie_assert_fold haskell_fail_def) + apply (wp getCTE_wp' preemptionPoint_invR| simp add: o_def irq_state_independent_HI)+ + apply (rule hoare_post_imp [where Q="\_. valid_irq_states'"]) + apply simp + apply wp[1] + apply (rule spec_strengthen_postE) + apply (rule "1.hyps", (assumption|rule refl)+) + apply simp + apply (wp hoare_drop_imps hoare_vcg_all_lift | simp add: locateSlot_conv)+ + done +qed + +lemma finaliseSlot_IRQInactive: + "\valid_irq_states'\ finaliseSlot a b + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (unfold validE_E_def) + apply (rule hoare_strengthen_postE) + apply (rule use_spec(2) [OF finaliseSlot_IRQInactive', folded finaliseSlot_def]) + apply (rule TrueI) + apply assumption + done + +lemma finaliseSlot_irq_states': + "\valid_irq_states'\ finaliseSlot a b \\rv. valid_irq_states'\" + by (wp finaliseSlot_preservation | clarsimp)+ + +lemma cteDelete_IRQInactive: + "\valid_irq_states'\ cteDelete x y + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: cteDelete_def split_def) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) + apply (rule validE_E_validE) + apply (rule finaliseSlot_IRQInactive) + apply simp + apply simp + apply assumption + done + +lemma cteDelete_irq_states': + "\valid_irq_states'\ cteDelete x y + \\rv. valid_irq_states'\" + apply (simp add: cteDelete_def split_def) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) + apply (rule hoare_valid_validE) + apply (rule finaliseSlot_irq_states') + apply simp + apply simp + apply assumption + done + +lemma preemptionPoint_IRQInactive_spec: + "s \ \valid_irq_states'\ preemptionPoint + \\_. valid_irq_states'\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply wp + apply (rule hoare_pre, wp preemptionPoint_invR) + apply clarsimp+ + done + +lemma cteRevoke_IRQInactive': + "s \ \valid_irq_states'\ cteRevoke x + \\_. \\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" +proof (induct rule: cteRevoke.induct) + case (1 p s') + show ?case + apply (subst cteRevoke.simps) + apply (wp "1.hyps" unlessE_wp whenE_wp preemptionPoint_IRQInactive_spec + cteDelete_IRQInactive cteDelete_irq_states' getCTE_wp')+ + apply clarsimp + done +qed + +lemma cteRevoke_IRQInactive: + "\valid_irq_states'\ cteRevoke x + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (unfold validE_E_def) + apply (rule use_spec) + apply (rule cteRevoke_IRQInactive') + done + +lemma inv_cnode_IRQInactive: + "\valid_irq_states'\ invokeCNode cnode_inv + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: invokeCNode_def) + apply (rule hoare_pre) + apply (wp cteRevoke_IRQInactive finaliseSlot_IRQInactive + cteDelete_IRQInactive + whenE_wp + | wpc + | simp add: split_def)+ + done + +end + +end \ No newline at end of file diff --git a/proof/refine/AARCH64/CSpace1_R.thy b/proof/refine/AARCH64/CSpace1_R.thy new file mode 100644 index 0000000000..73709b9e1c --- /dev/null +++ b/proof/refine/AARCH64/CSpace1_R.thy @@ -0,0 +1,7242 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + CSpace refinement +*) + +theory CSpace1_R +imports + CSpace_I +begin + +context Arch begin global_naming AARCH64_A (*FIXME: arch_split*) + +lemmas final_matters_def = final_matters_def[simplified final_matters_arch_def] + +declare final_matters_simps[simp del] + +lemmas final_matters_simps[simp] + = final_matters_def[split_simps cap.split arch_cap.split] + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma isMDBParentOf_CTE1: + "isMDBParentOf (CTE cap node) cte = + (\cap' node'. cte = CTE cap' node' \ sameRegionAs cap cap' + \ mdbRevocable node + \ (isEndpointCap cap \ capEPBadge cap \ 0 \ + capEPBadge cap = capEPBadge cap' \ \ mdbFirstBadged node') + \ (isNotificationCap cap \ capNtfnBadge cap \ 0 \ + capNtfnBadge cap = capNtfnBadge cap' \ \ mdbFirstBadged node'))" + apply (simp add: isMDBParentOf_def Let_def split: cte.splits split del: if_split) + apply (clarsimp simp: Let_def) + apply (fastforce simp: isCap_simps) + done + +lemma isMDBParentOf_CTE: + "isMDBParentOf (CTE cap node) cte = + (\cap' node'. cte = CTE cap' node' \ sameRegionAs cap cap' + \ mdbRevocable node + \ (capBadge cap, capBadge cap') \ capBadge_ordering (mdbFirstBadged node'))" + apply (simp add: isMDBParentOf_CTE1) + apply (intro arg_cong[where f=Ex] ext conj_cong refl) + apply (cases cap, simp_all add: isCap_simps) + apply (auto elim!: sameRegionAsE simp: isCap_simps) + done + +lemma isMDBParentOf_trans: + "\ isMDBParentOf a b; isMDBParentOf b c \ \ isMDBParentOf a c" + apply (cases a) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (frule(1) sameRegionAs_trans, simp) + apply (erule(1) capBadge_ordering_trans) + done + +lemma parentOf_trans: + "\ s \ a parentOf b; s \ b parentOf c \ \ s \ a parentOf c" + by (auto simp: parentOf_def elim: isMDBParentOf_trans) + +lemma subtree_parent: + "s \ a \ b \ s \ a parentOf b" + by (erule subtree.induct) auto + +lemma leadsto_is_prev: + "\ m \ p \ c; m c = Some (CTE cap node); + valid_dlist m; no_0 m \ \ + p = mdbPrev node" + by (fastforce simp add: next_unfold' valid_dlist_def Let_def no_0_def) + +lemma subtree_target_Some: + "m \ a \ b \ m b \ None" + by (erule subtree.cases) (auto simp: parentOf_def) + +lemma subtree_prev_loop: + "\ m p = Some (CTE cap node); no_loops m; valid_dlist m; no_0 m \ \ + m \ p \ mdbPrev node = False" + apply clarsimp + apply (frule subtree_target_Some) + apply (drule subtree_mdb_next) + apply (subgoal_tac "m \ p \\<^sup>+ p") + apply (simp add: no_loops_def) + apply (erule trancl_into_trancl) + apply (clarsimp simp: mdb_next_unfold) + apply (fastforce simp: next_unfold' valid_dlist_def no_0_def Let_def) + done + +lemma subtree_trans_lemma: + assumes "s \ b \ c" + shows "s \ a \ b \ s \ a \ c" + using assms +proof induct + case direct_parent + thus ?case + by (blast intro: trans_parent parentOf_trans subtree_parent) +next + case (trans_parent y z) + have IH: "s \ a \ b \ s \ a \ y" by fact+ + have step: "s \ y \ z" "z \ 0" "s \ b parentOf z" by fact+ + + have "s \ a \ b" by fact+ + hence "s \ a \ y" and "s \ a parentOf b" by (auto intro: IH subtree_parent) + moreover + with step + have "s \ a parentOf z" by - (rule parentOf_trans) + ultimately + show ?case using step by - (rule subtree.trans_parent) +qed + +lemma subtree_trans: "\ s \ a \ b; s \ b \ c \ \ s \ a \ c" + by (rule subtree_trans_lemma) + +lemma same_arch_region_as_relation: + "\acap_relation c d; acap_relation c' d'\ \ + arch_same_region_as c c' = + sameRegionAs (ArchObjectCap d) (ArchObjectCap d')" + by (cases c; cases c') + (auto simp: AARCH64_H.sameRegionAs_def sameRegionAs_def Let_def isCap_simps mask_def + add_diff_eq) + +lemma is_phyiscal_relation: + "cap_relation c c' \ is_physical c = isPhysicalCap c'" + by (auto simp: is_physical_def arch_is_physical_def + split: cap.splits arch_cap.splits) + +lemma obj_ref_of_relation: + "\ cap_relation c c'; capClass c' = PhysicalClass \ \ + obj_ref_of c = capUntypedPtr c'" + by (cases c; simp) (rename_tac arch_cap, case_tac arch_cap, auto) + +lemma obj_size_relation: + "\ cap_relation c c'; capClass c' = PhysicalClass \ \ + obj_size c = capUntypedSize c'" + apply (cases c, simp_all add: objBits_simps' zbits_map_def + cte_level_bits_def + split: option.splits sum.splits) + apply (rename_tac arch_cap) + apply (case_tac arch_cap; simp add: objBits_def AARCH64_H.capUntypedSize_def bit_simps') + done + +lemma same_region_as_relation: + "\ cap_relation c d; cap_relation c' d' \ \ + same_region_as c c' = sameRegionAs d d'" + apply (cases c) + apply clarsimp + apply (clarsimp simp: sameRegionAs_def isCap_simps Let_def is_phyiscal_relation) + apply (auto simp: obj_ref_of_relation obj_size_relation cong: conj_cong)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def bits_of_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply simp + apply (cases c') + apply (clarsimp simp: same_arch_region_as_relation| + clarsimp simp: sameRegionAs_def isCap_simps Let_def)+ + done + +lemma can_be_is: + "\ cap_relation c (cteCap cte); cap_relation c' (cteCap cte'); + mdbRevocable (cteMDBNode cte) = r; + mdbFirstBadged (cteMDBNode cte') = r' \ \ + should_be_parent_of c r c' r' = isMDBParentOf cte cte'" + unfolding should_be_parent_of_def isMDBParentOf_def + apply (cases cte) + apply (rename_tac cap mdbnode) + apply (cases cte') + apply (rename_tac cap' mdbnode') + apply (clarsimp split del: if_split) + apply (case_tac "mdbRevocable mdbnode") + prefer 2 + apply simp + apply (clarsimp split del: if_split) + apply (case_tac "RetypeDecls_H.sameRegionAs cap cap'") + prefer 2 + apply (simp add: same_region_as_relation) + apply (simp add: same_region_as_relation split del: if_split) + apply (cases c, simp_all add: isCap_simps) + apply (cases c', auto simp: sameRegionAs_def Let_def isCap_simps)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps is_cap_simps)[1] + apply (auto simp: Let_def)[1] + done + +lemma no_fail_getCTE [wp]: + "no_fail (cte_at' p) (getCTE p)" + apply (simp add: getCTE_def getObject_def split_def + loadObject_cte alignCheck_def unless_def + alignError_def is_aligned_mask[symmetric] + cong: kernel_object.case_cong) + apply (rule no_fail_pre, (wp | wpc)+) + apply (clarsimp simp: cte_wp_at'_def getObject_def + loadObject_cte split_def in_monad + dest!: in_singleton + split del: if_split) + apply (clarsimp simp: in_monad typeError_def objBits_simps + magnitudeCheck_def + split: kernel_object.split_asm if_split_asm option.split_asm + split del: if_split) + apply simp+ + done + +lemma tcb_cases_related: + "tcb_cap_cases ref = Some (getF, setF, restr) \ + \getF' setF'. (\x. tcb_cte_cases (cte_map (x, ref) - x) = Some (getF', setF')) + \ (\tcb tcb'. tcb_relation tcb tcb' \ cap_relation (getF tcb) (cteCap (getF' tcb')))" + by (simp add: tcb_cap_cases_def tcb_cnode_index_def to_bl_1 + cte_map_def' tcb_relation_def + split: if_split_asm) + +lemma pspace_relation_cte_wp_at: + "\ pspace_relation (kheap s) (ksPSpace s'); + cte_wp_at ((=) c) (cref, oref) s; pspace_aligned' s'; + pspace_distinct' s' \ + \ cte_wp_at' (\cte. cap_relation c (cteCap cte)) (cte_map (cref, oref)) s'" + apply (simp add: cte_wp_at_cases) + apply (erule disjE) + apply clarsimp + apply (drule(1) pspace_relation_absD) + apply (simp add: unpleasant_helper) + apply (drule spec, drule mp, erule domI) + apply (clarsimp simp: cte_relation_def) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=cte]) + apply simp + apply (drule ko_at_imp_cte_wp_at') + apply (clarsimp elim!: cte_wp_at_weakenE') + apply clarsimp + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: tcb_relation_cut_def) + apply (simp split: kernel_object.split_asm) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb]) + apply simp + apply (drule tcb_cases_related) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule(2) cte_wp_at_tcbI') + apply fastforce + apply simp + done + +lemma pspace_relation_ctes_ofI: + "\ pspace_relation (kheap s) (ksPSpace s'); + cte_wp_at ((=) c) slot s; pspace_aligned' s'; + pspace_distinct' s' \ + \ \cte. ctes_of s' (cte_map slot) = Some cte \ cap_relation c (cteCap cte)" + apply (cases slot, clarsimp) + apply (drule(3) pspace_relation_cte_wp_at) + apply (simp add: cte_wp_at_ctes_of) + done + +lemma get_cap_corres_P: + "corres (\x y. cap_relation x (cteCap y) \ P x) + (cte_wp_at P cslot_ptr) + (pspace_aligned' and pspace_distinct') + (get_cap cslot_ptr) (getCTE (cte_map cslot_ptr))" + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule cte_wp_at_norm) + apply (clarsimp simp: state_relation_def) + apply (drule (3) pspace_relation_ctes_ofI) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (cases cslot_ptr) + apply (rename_tac oref cref) + apply (clarsimp simp: cte_wp_at_def) + apply (frule in_inv_by_hoareD[OF getCTE_inv]) + apply (drule use_valid [where P="\", OF _ getCTE_sp TrueI]) + apply (clarsimp simp: state_relation_def) + apply (drule pspace_relation_ctes_ofI) + apply (simp add: cte_wp_at_def) + apply assumption+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas get_cap_corres = get_cap_corres_P[where P="\", simplified] + +lemma cap_relation_masks: + "cap_relation c c' \ cap_relation + (cap_rights_update (cap_rights c \ rmask) c) + (RetypeDecls_H.maskCapRights (rights_mask_map rmask) c')" + apply (case_tac c, simp_all add: isCap_defs maskCapRights_def Let_def + rights_mask_map_def maskVMRights_def + AllowSend_def AllowRecv_def + cap_rights_update_def + split del: if_split) + apply (clarsimp simp add: isCap_defs) + by (rule ArchAcc_R.arch_cap_rights_update + [simplified, simplified rights_mask_map_def]) + +lemma getCTE_wp: + "\\s. cte_at' p s \ (\cte. cte_wp_at' ((=) cte) p s \ Q cte s)\ getCTE p \Q\" + apply (clarsimp simp add: getCTE_def valid_def cte_wp_at'_def) + apply (drule getObject_cte_det) + apply clarsimp + done + +lemma getCTE_ctes_of: + "\\s. ctes_of s p \ None \ P (the (ctes_of s p)) (ctes_of s)\ getCTE p \\rv s. P rv (ctes_of s)\" + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma getCTE_wp': + "\\s. \cte. cte_wp_at' ((=) cte) p s \ Q cte s\ getCTE p \Q\" + apply (clarsimp simp add: getCTE_def valid_def cte_wp_at'_def) + apply (drule getObject_cte_det) + apply clarsimp + done + +lemma getSlotCap_corres: + "cte_ptr' = cte_map cte_ptr \ + corres cap_relation + (cte_at cte_ptr) + (pspace_distinct' and pspace_aligned') + (get_cap cte_ptr) + (getSlotCap cte_ptr')" + apply (simp add: getSlotCap_def) + apply (subst bind_return [symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_trivial, simp) + apply (wp | simp)+ + done + +lemma maskCapRights [simp]: + "cap_relation c c' \ + cap_relation (mask_cap msk c) (maskCapRights (rights_mask_map msk) c')" + by (simp add: mask_cap_def cap_relation_masks) + +lemma maskCap_valid [simp]: + "s \' RetypeDecls_H.maskCapRights R cap = s \' cap" + by (clarsimp simp: valid_cap'_def maskCapRights_def isCap_simps + capAligned_def AARCH64_H.maskCapRights_def + split: capability.split arch_capability.split) + +lemma getSlotCap_valid_cap: + "\valid_objs'\ getSlotCap t \\r. valid_cap' r and cte_at' t\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_valid_cap | simp)+ + done + +lemmas getSlotCap_valid_cap1 [wp] = getSlotCap_valid_cap [THEN hoare_conjD1] +lemmas getSlotCap_valid_cap2 [wp] = getSlotCap_valid_cap [THEN hoare_conjD2] + +lemma resolveAddressBits_real_cte_at': + "\ valid_objs' and valid_cap' cap \ + resolveAddressBits cap addr depth + \\rv. real_cte_at' (fst rv)\, -" +proof (induct rule: resolveAddressBits.induct) + case (1 cap addr depth) + show ?case + apply (clarsimp simp: validE_def validE_R_def valid_def split: sum.split) + apply (subst (asm) resolveAddressBits.simps) + apply (simp only: Let_def split: if_split_asm) + prefer 2 + apply (simp add: in_monad) + apply (simp only: in_bindE_R K_bind_def) + apply (elim exE conjE) + apply (simp only: split: if_split_asm) + apply (clarsimp simp: in_monad locateSlot_conv stateAssert_def) + apply (cases cap) + apply (simp_all add: isCap_defs)[12] + apply (clarsimp simp add: valid_cap'_def objBits_simps' cte_level_bits_def + split: option.split_asm) + apply (simp only: in_bindE_R K_bind_def) + apply (elim exE conjE) + apply (simp only: cap_case_CNodeCap split: if_split_asm) + apply (drule_tac cap=nextCap in isCapDs(4), elim exE) + apply (simp only: in_bindE_R K_bind_def) + apply (frule (12) 1 [OF refl], (assumption | rule refl)+) + apply (clarsimp simp: in_monad locateSlot_conv objBits_simps stateAssert_def) + apply (cases cap) + apply (simp_all add: isCap_defs)[12] + apply (frule in_inv_by_hoareD [OF getSlotCap_inv]) + apply simp + apply (frule (1) post_by_hoare [OF getSlotCap_valid_cap]) + apply (simp add: valid_def validE_def validE_R_def) + apply (erule allE, erule impE, blast) + apply (drule (1) bspec) + apply simp + apply (clarsimp simp: in_monad locateSlot_conv objBits_simps stateAssert_def) + apply (cases cap) + apply (simp_all add: isCap_defs)[12] + apply (frule in_inv_by_hoareD [OF getSlotCap_inv]) + apply (clarsimp simp: valid_cap'_def cte_level_bits_def objBits_defs) + done +qed + +lemma resolveAddressBits_cte_at': + "\ valid_objs' and valid_cap' cap \ + resolveAddressBits cap addr depth + \\rv. cte_at' (fst rv)\, \\rv s. True\" + apply (fold validE_R_def) + apply (rule hoare_strengthen_postE_R) + apply (rule resolveAddressBits_real_cte_at') + apply (erule real_cte_at') + done + +declare AllowSend_def[simp] +declare AllowRecv_def[simp] + +lemma cap_map_update_data: + assumes "cap_relation c c'" + shows "cap_relation (update_cap_data p x c) (updateCapData p x c')" +proof - + note simps = update_cap_data_def updateCapData_def word_size + isCap_defs is_cap_defs Let_def badge_bits_def + cap_rights_update_def badge_update_def + { fix x :: machine_word + define y where "y \ (x >> 6) && mask 58" (* guard_bits *) + define z where "z \ unat (x && mask 6)" (* cnode_guard_size_bits *) + have "of_bl (to_bl (y && mask z)) = (of_bl (replicate (64-z) False @ drop (64-z) (to_bl y))::machine_word)" + by (simp add: bl_and_mask) + then + have "y && mask z = of_bl (drop (64 - z) (to_bl y))" + apply simp + apply (subst test_bit_eq_iff [symmetric]) + apply (rule ext) + apply (clarsimp simp: test_bit_of_bl nth_append) + done + } note x = this + from assms + show ?thesis + apply (cases c) + apply (simp_all add: simps)[5] + defer + apply (simp_all add: simps)[4] + apply (clarsimp simp: simps the_arch_cap_def) + apply (rename_tac arch_cap) + apply (case_tac arch_cap; simp add: simps arch_update_cap_data_def + AARCH64_H.updateCapData_def) + \ \CNodeCap\ + apply (simp add: simps word_bits_def the_cnode_cap_def andCapRights_def + rightsFromWord_def data_to_rights_def nth_ucast cteRightsBits_def cteGuardBits_def) + apply (insert x) + apply (subgoal_tac "unat (x && mask 6) < unat (2^6::machine_word)") + prefer 2 + apply (fold word_less_nat_alt)[1] + apply (rule and_mask_less_size) + apply (simp add: word_size) + apply (simp add: word_bw_assocs cnode_padding_bits_def cnode_guard_size_bits_def) + done +qed + + +lemma cte_map_shift: + assumes bl: "to_bl cref' = zs @ cref" + assumes pre: "guard \ cref" + assumes len: "cbits + length guard \ length cref" + assumes aligned: "is_aligned ptr (5 + cbits)" (* cte_level_bits *) + assumes cbits: "cbits \ word_bits - cte_level_bits" + shows + "ptr + 32 * ((cref' >> length cref - (cbits + length guard)) && mask cbits) = \ \2^cte_level_bits\ + cte_map (ptr, take cbits (drop (length guard) cref))" +proof - + let ?l = "length cref - (cbits + length guard)" + from pre obtain xs where + xs: "cref = guard @ xs" by (auto simp: prefix_def less_eq_list_def) + hence len_c: "length cref = length guard + length xs" by simp + with len have len_x: "cbits \ length xs" by simp + + from bl xs + have cref': "to_bl cref' = zs @ guard @ xs" by simp + hence "length (to_bl cref') = length \" by simp + hence 64: "64 = length zs + length guard + length xs" by simp + + have len_conv [simp]: "size ptr = word_bits" + by (simp add: word_size word_bits_def) + + have "to_bl ((cref' >> ?l) && mask cbits) = (replicate (64 - cbits) False) @ + drop (64 - cbits) (to_bl (cref' >> ?l))" + by (simp add: bl_shiftl word_size bl_and_mask + cte_level_bits_def word_bits_def) + also + from len_c len_x cref' 64 + have "\ = (replicate (64 - cbits) False) @ take cbits xs" + by (simp add: bl_shiftr word_size add.commute add.left_commute) + also + from len_x len_c 64 + have "\ = to_bl (of_bl (take cbits (drop (length guard) cref)) :: machine_word)" + by (simp add: xs word_rev_tf takefill_alt rev_take rev_drop) + + finally + show ?thesis + by (simp add: cte_map_def') +qed + +lemma cte_map_shift': + "\ to_bl cref' = zs @ cref; guard \ cref; length cref = cbits + length guard; + is_aligned ptr (5 + cbits); cbits \ word_bits - cte_level_bits \ \ + ptr + 32 * (cref' && mask cbits) = cte_map (ptr, drop (length guard) cref)" + by (auto dest: cte_map_shift) + +lemma cap_relation_Null2 [simp]: + "cap_relation c NullCap = (c = cap.NullCap)" + by (cases c) auto + +lemmas cnode_cap_case_if = cap_case_CNodeCap + +lemma corres_stateAssert_assume_stronger: + "\ corres_underlying sr nf nf' r P Q f (g ()); + \s s'. \ (s, s') \ sr; P s; Q s' \ \ P' s' \ \ + corres_underlying sr nf nf' r P Q f (stateAssert P' [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply (rule corres_symb_exec_r [OF _ get_sp]) + apply (rule_tac F="P' x" in corres_req) + apply clarsimp + apply (auto elim: corres_guard_imp)[1] + apply wp+ + done + +lemma cap_table_at_gsCNodes: + "cap_table_at bits ptr s \ (s, s') \ state_relation + \ gsCNodes s' ptr = Some bits" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply blast + done + +lemma rab_corres': + "\ cap_relation (fst a) c'; drop (64-bits) (to_bl cref') = snd a; + bits = length (snd a) \ \ + corres (lfr \ (\(cte, bits) (cte', bits'). + cte' = cte_map cte \ length bits = bits')) + (valid_objs and pspace_aligned and valid_cap (fst a)) + (valid_objs' and pspace_distinct' and pspace_aligned' and valid_cap' c') + (resolve_address_bits a) + (resolveAddressBits c' cref' bits)" +unfolding resolve_address_bits_def +proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) + case (1 z cap cref) + show ?case + proof (cases "isCNodeCap c'") + case True + with "1.prems" + obtain ptr guard cbits where caps: + "cap = cap.CNodeCap ptr cbits guard" + "c' = CNodeCap ptr cbits (of_bl guard) (length guard)" + apply (cases cap, simp_all add: isCap_defs) + apply auto + done + with "1.prems" + have IH: "\vd vc c' f' cref' bits. + \ cbits + length guard \ 0; \ length cref < cbits + length guard; guard \ cref; + vc = drop (cbits + length guard) cref; vc \ []; vd \ cap.NullCap; + cap_relation vd c'; bits = length vc; is_cnode_cap vd; + drop (64 - bits) (to_bl cref') = vc \ + \ corres (lfr \ (\(cte, bits) (cte', bits'). + cte' = cte_map cte \ length bits = bits')) + (valid_objs and pspace_aligned and (\s. s \ fst (vd,vc))) + (valid_objs' and pspace_distinct' and pspace_aligned' and (\s. s \' c')) + (resolve_address_bits' z (vd, vc)) + (CSpace_H.resolveAddressBits c' cref' bits)" + apply - + apply (rule "1.hyps" [of _ cbits guard, OF caps(1)]) + prefer 7 + apply (clarsimp simp: in_monad) + apply (rule get_cap_success) + apply (auto simp: in_monad intro!: get_cap_success) (* takes time *) + done + note if_split [split del] + { assume "cbits + length guard = 0 \ cbits = 0 \ guard = []" + hence ?thesis + apply (simp add: caps isCap_defs + resolveAddressBits.simps resolve_address_bits'.simps) + apply (rule corres_fail) + apply (clarsimp simp: valid_cap_def) + done + } + moreover + { assume "cbits + length guard \ 0 \ \(cbits = 0 \ guard = [])" + hence [simp]: "((cbits + length guard = 0) = False) \ + ((cbits = 0 \ guard = []) = False) \ + (0 < cbits \ guard \ []) " by simp + from "1.prems" + have ?thesis + apply - + apply (rule corres_assume_pre) + apply (subgoal_tac "is_aligned ptr (5 + cbits) \ cbits \ word_bits - cte_level_bits") (*cte_level_bits *) + prefer 2 + apply (clarsimp simp: caps) + apply (erule valid_CNodeCapE) + apply fastforce + apply fastforce + apply (fastforce simp: word_bits_def cte_level_bits_def) + apply (thin_tac "t \ state_relation" for t) + apply (erule conjE) + apply (subst resolveAddressBits.simps) + apply (subst resolve_address_bits'.simps) + apply (simp add: caps isCap_defs Let_def) + apply (simp add: linorder_not_less drop_postfix_eq) + apply (simp add: liftE_bindE[where a="locateSlotCap a b" for a b]) + apply (simp add: locateSlot_conv) + apply (rule corres_stateAssert_assume_stronger[rotated]) + apply (clarsimp simp: valid_cap_def cap_table_at_gsCNodes isCap_simps) + apply (rule and_mask_less_size, simp add: word_bits_def word_size cte_level_bits_def) + apply (erule exE) + apply (cases "guard \ cref") + prefer 2 + apply (clarsimp simp: guard_mask_shift lookup_failure_map_def unlessE_whenE) + apply (clarsimp simp: guard_mask_shift unlessE_whenE) + apply (cases "length cref < cbits + length guard") + apply (simp add: lookup_failure_map_def) + apply simp + apply (cases "length cref = cbits + length guard") + apply clarsimp + apply (rule corres_noopE) + prefer 2 + apply wp + apply wp + apply (clarsimp simp: objBits_simps cte_level_bits_def) + apply (erule (2) valid_CNodeCapE) + apply (erule (3) cte_map_shift') + apply simp + apply simp + apply (subgoal_tac "cbits + length guard < length cref"; simp) + apply (rule corres_initial_splitE) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule getSlotCap_corres) + apply (simp add: objBits_simps cte_level_bits_def) + apply (erule (1) cte_map_shift) + apply simp + apply assumption + apply (simp add: cte_level_bits_def) + apply clarsimp + apply (clarsimp simp: valid_cap_def) + apply (erule cap_table_at_cte_at) + apply simp + apply clarsimp + apply (case_tac "is_cnode_cap rv") + prefer 2 + apply (simp add: cnode_cap_case_if) + apply (rule corres_noopE) + prefer 2 + apply (rule no_fail_pre, rule no_fail_returnOK) + apply (rule TrueI) + prefer 2 + apply (simp add: unlessE_whenE cnode_cap_case_if) + apply (rule IH, (simp_all)[9]) + apply clarsimp + apply (drule postfix_dropD) + apply clarsimp + apply (subgoal_tac "64 + (cbits + length guard) - length cref = (cbits + length guard) + (64 - length cref)") + prefer 2 + apply (drule len_drop_lemma) + apply simp + apply arith + apply simp + apply (subst drop_drop [symmetric]) + apply simp + apply wp + apply (clarsimp simp: objBits_simps cte_level_bits_def) + apply (erule (1) cte_map_shift) + apply simp + apply assumption + apply (simp add: cte_level_bits_def) + apply (wp get_cap_wp) + apply clarsimp + apply (erule (1) cte_wp_valid_cap) + apply wpsimp + done + } + ultimately + show ?thesis by fast + next + case False + with "1.prems" + show ?thesis + by (cases cap) + (auto simp: resolve_address_bits'.simps resolveAddressBits.simps + isCap_defs lookup_failure_map_def) + qed +qed + +lemma getThreadCSpaceRoot: + "getThreadCSpaceRoot t = return t" + by (simp add: getThreadCSpaceRoot_def locateSlot_conv + tcbCTableSlot_def) + +lemma getThreadVSpaceRoot: + "getThreadVSpaceRoot t = return (t+2^cteSizeBits)" (*2^cte_level_bits*) + by (simp add: getThreadVSpaceRoot_def locateSlot_conv objBits_simps' + tcbVTableSlot_def shiftl_t2n cte_level_bits_def) + +lemma getSlotCap_tcb_corres: + "corres (\t c. cap_relation (tcb_ctable t) c) + (tcb_at t and valid_objs and pspace_aligned) + (pspace_distinct' and pspace_aligned') + (gets_the (get_tcb t)) + (getSlotCap t)" + (is "corres ?r ?P ?Q ?f ?g") + using get_cap_corres [where cslot_ptr = "(t, tcb_cnode_index 0)"] + apply (simp add: getSlotCap_def liftM_def[symmetric]) + apply (drule corres_guard_imp [where P="?P" and P'="?Q"]) + apply (clarsimp simp: cte_at_cases tcb_at_def + dest!: get_tcb_SomeD) + apply simp + apply (subst(asm) corres_cong [OF refl refl gets_the_tcb_get_cap[symmetric] refl refl]) + apply simp + apply (simp add: o_def cte_map_def tcb_cnode_index_def) + done + +lemma lookupSlotForThread_corres: + "corres (lfr \ (\(cref, bits) cref'. cref' = cte_map cref)) + (valid_objs and pspace_aligned and tcb_at t) + (valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' t) + (lookup_slot_for_thread t (to_bl cptr)) + (lookupSlotForThread t cptr)" + apply (unfold lookup_slot_for_thread_def lookupSlotForThread_def) + apply (simp add: const_def) + apply (simp add: getThreadCSpaceRoot) + apply (fold returnOk_liftE) + apply simp + apply (rule corres_initial_splitE) + apply (subst corres_liftE_rel_sum) + apply (rule corres_guard_imp) + apply (rule getSlotCap_tcb_corres) + apply simp + apply simp + apply (subst bindE_returnOk[symmetric]) + apply (rule corres_initial_splitE) + apply (rule rab_corres') + apply simp + apply (simp add: word_size) + apply simp + apply (clarsimp simp: word_size) + prefer 4 + apply wp + apply clarsimp + apply (erule (1) objs_valid_tcb_ctable) + prefer 4 + apply wp + apply clarsimp + apply simp + prefer 2 + apply (rule hoare_weaken_preE) + apply (rule resolve_address_bits_cte_at [unfolded validE_R_def]) + apply clarsimp + prefer 2 + apply (rule hoare_weaken_preE) + apply (rule resolveAddressBits_cte_at') + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (simp add: returnOk_def split_def) + done + +lemmas rab_cte_at' [wp] = resolveAddressBits_cte_at' [folded validE_R_def] + +lemma lookupSlot_cte_at_wp[wp]: + "\valid_objs'\ lookupSlotForThread t addr \\rv. cte_at' rv\, \\r. \\" + apply (simp add: lookupSlotForThread_def) + apply (simp add: getThreadCSpaceRoot_def locateSlot_conv tcbCTableSlot_def) + apply (wp | simp add: split_def)+ + done + +lemma lookupSlot_inv[wp]: + "\P\ lookupSlotForThread t addr \\_. P\" + apply (simp add: lookupSlotForThread_def) + apply (simp add: getThreadCSpaceRoot_def locateSlot_conv tcbCTableSlot_def) + apply (wp | simp add: split_def)+ + done + +lemma lookupCap_corres: + "corres (lfr \ cap_relation) + (valid_objs and pspace_aligned and tcb_at t) + (valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' t) + (lookup_cap t (to_bl ref)) (lookupCap t ref)" + apply (simp add: lookup_cap_def lookupCap_def bindE_assoc + lookupCapAndSlot_def liftME_def split_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF lookupSlotForThread_corres]) + apply (simp add: split_def getSlotCap_def liftM_def[symmetric] o_def) + apply (rule get_cap_corres) + apply (rule hoare_pre, wp lookup_slot_cte_at_wp|simp)+ + done + +lemma setObject_cte_obj_at_tcb': + assumes x: "\tcb f. P (tcbCTable_update f tcb) = P tcb" + "\tcb f. P (tcbVTable_update f tcb) = P tcb" + "\tcb f. P (tcbReply_update f tcb) = P tcb" + "\tcb f. P (tcbCaller_update f tcb) = P tcb" + "\tcb f. P (tcbIPCBufferFrame_update f tcb) = P tcb" + shows + "\\s. P' (obj_at' (P :: tcb \ bool) p s)\ + setObject c (cte::cte) + \\_ s. P' (obj_at' P p s)\" + apply (clarsimp simp: setObject_def in_monad split_def + valid_def lookupAround2_char1 + obj_at'_def ps_clear_upd) + apply (clarsimp elim!: rsubst[where P=P']) + apply (clarsimp simp: updateObject_cte in_monad objBits_simps + tcbCTableSlot_def tcbVTableSlot_def x + typeError_def + split: if_split_asm + Structures_H.kernel_object.split_asm) + done + +lemma setCTE_typ_at': + "\\s. P (typ_at' T p s)\ setCTE c cte \\_ s. P (typ_at' T p s)\" + by (clarsimp simp add: setCTE_def) (wp setObject_typ_at') + +lemmas setObject_typ_at [wp] = setObject_typ_at' [where P=id, simplified] + +lemma setCTE_typ_at [wp]: + "\typ_at' T p\ setCTE c cte \\_. typ_at' T p\" + by (clarsimp simp add: setCTE_def) wp + +lemmas setCTE_typ_ats [wp] = typ_at_lifts [OF setCTE_typ_at'] + +lemma setObject_cte_ksCurDomain[wp]: + "\\s. P (ksCurDomain s)\ setObject ptr (cte::cte) \\_ s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_cte_inv | simp)+ + done + +lemma setCTE_tcb_in_cur_domain': + "\tcb_in_cur_domain' t\ + setCTE c cte + \\_. tcb_in_cur_domain' t\" + unfolding tcb_in_cur_domain'_def setCTE_def + apply (rule_tac f="\s. ksCurDomain s" in hoare_lift_Pf) + apply (wp setObject_cte_obj_at_tcb' | simp)+ + done + +lemma setCTE_ctes_of_wp [wp]: + "\\s. P ((ctes_of s) (p \ cte))\ + setCTE p cte + \\rv s. P (ctes_of s)\" + by (simp add: setCTE_def ctes_of_setObject_cte) + +lemma setCTE_weak_cte_wp_at: + "\\s. (if p = ptr then P (cteCap cte) else cte_wp_at' (\c. P (cteCap c)) p s)\ + setCTE ptr cte + \\uu s. cte_wp_at'(\c. P (cteCap c)) p s\" + apply (simp add: cte_wp_at_ctes_of) + apply wp + apply clarsimp + done + +lemma updateMDB_weak_cte_wp_at: + "\cte_wp_at' (\c. P (cteCap c)) p\ + updateMDB m f + \\uu. cte_wp_at'(\c. P (cteCap c)) p\" + unfolding updateMDB_def + apply simp + apply safe + apply (wp setCTE_weak_cte_wp_at getCTE_wp) + apply (auto simp: cte_wp_at'_def) + done + +lemma cte_wp_at_extract': + "\ cte_wp_at' (\c. c = x) p s; cte_wp_at' P p s \ \ P x" + by (clarsimp simp: cte_wp_at_ctes_of) + +lemmas setCTE_valid_objs = setCTE_valid_objs' + +lemma capFreeIndex_update_valid_cap': + "\fa \ fb; fb \ 2 ^ bits; is_aligned (of_nat fb :: machine_word) minUntypedSizeBits; + s \' capability.UntypedCap d v bits fa\ + \ s \' capability.UntypedCap d v bits fb" + apply (clarsimp simp:valid_cap'_def capAligned_def valid_untyped'_def ko_wp_at'_def) + apply (intro conjI impI allI) + apply (elim allE) + apply (erule(1) impE)+ + apply (erule disjE) + apply simp_all + apply (rule disjI1) + apply clarsimp + apply (erule disjoint_subset2[rotated]) + apply (clarsimp) + apply (rule word_plus_mono_right) + apply (rule of_nat_mono_maybe_le[THEN iffD1]) + apply (subst word_bits_def[symmetric]) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp + apply simp + apply (subst word_bits_def[symmetric]) + apply (erule le_less_trans) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp+ + apply (erule is_aligned_no_wrap') + apply (rule word_of_nat_less) + apply simp + apply (erule allE)+ + apply (erule(1) impE)+ + apply simp + done + +lemma maxFreeIndex_update_valid_cap'[simp]: + "s \' capability.UntypedCap d v0a v1a fa \ + s \' capability.UntypedCap d v0a v1a (maxFreeIndex v1a)" + apply (rule capFreeIndex_update_valid_cap'[rotated -1]) + apply assumption + apply (clarsimp simp: valid_cap'_def capAligned_def ko_wp_at'_def + maxFreeIndex_def shiftL_nat)+ + apply (erule is_aligned_weaken[OF is_aligned_triv]) + done + +lemma ctes_of_valid_cap'': + "\ ctes_of s p = Some r; valid_objs' s\ \ s \' (cteCap r)" + apply (rule cte_wp_at_valid_objs_valid_cap'[where P="(=) r", simplified]) + apply (simp add: cte_wp_at_ctes_of) + apply assumption + done + +lemma cap_insert_objs' [wp]: + "\valid_objs' + and valid_cap' cap\ + cteInsert cap src dest \\rv. valid_objs'\" + including no_pre + apply (simp add: cteInsert_def updateCap_def setUntypedCapAsFull_def bind_assoc split del: if_split) + apply (wp setCTE_valid_objs) + apply simp + apply wp+ + apply (clarsimp simp: updateCap_def) + apply (wp|simp)+ + apply (rule hoare_drop_imp)+ + apply wp+ + apply (rule hoare_strengthen_post[OF getCTE_sp]) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps + dest!: ctes_of_valid_cap'') + done + +lemma cteInsert_weak_cte_wp_at: + "\\s. if p = dest then P cap else p \ src \ + cte_wp_at' (\c. P (cteCap c)) p s\ + cteInsert cap src dest + \\uu. cte_wp_at'(\c. P (cteCap c)) p\" + unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def + apply (simp add: bind_assoc split del: if_split) + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ + apply (wp getCTE_ctes_wp)+ + apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ +done + +lemma setCTE_valid_cap: + "\valid_cap' c\ setCTE ptr cte \\r. valid_cap' c\" + by (rule typ_at_lifts, rule setCTE_typ_at') + +lemma updateMDB_valid_cap: + "\valid_cap' c\ updateMDB ptr f \\_. valid_cap' c\" + unfolding updateMDB_def + apply simp + apply rule + apply (wp setCTE_valid_cap) + done + +lemma set_is_modify: + "m p = Some cte \ + m (p \ cteMDBNode_update (\_. (f (cteMDBNode cte))) cte) = + m (p \ cteMDBNode_update f cte)" + apply (case_tac cte) + apply (rule ext) + apply clarsimp + done + +lemma updateMDB_ctes_of_wp: + "\\s. (p \ 0 \ P (modify_map (ctes_of s) p (cteMDBNode_update f))) \ + (p = 0 \ P (ctes_of s))\ + updateMDB p f + \\rv s. P (ctes_of s)\" + apply (simp add: updateMDB_def) + apply safe + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (simp add: modify_map_def set_is_modify) + done + +lemma updateMDB_ctes_of_no_0 [wp]: + "\\s. no_0 (ctes_of s) \ + P (modify_map (ctes_of s) p (cteMDBNode_update f))\ + updateMDB p f + \\rv s. P (ctes_of s)\" + by (wp updateMDB_ctes_of_wp) clarsimp + +lemma updateMDB_no_0 [wp]: + "\\s. no_0 (ctes_of s)\ + updateMDB p f + \\rv s. no_0 (ctes_of s)\" + by wp simp + +lemma isMDBParentOf_next_update [simp]: + "isMDBParentOf (cteMDBNode_update (mdbNext_update f) cte) cte' = + isMDBParentOf cte cte'" + "isMDBParentOf cte (cteMDBNode_update (mdbNext_update f) cte') = + isMDBParentOf cte cte'" + apply (cases cte) + apply (cases cte') + apply (simp add: isMDBParentOf_def) + apply (cases cte) + apply (cases cte') + apply (clarsimp simp: isMDBParentOf_def) + done + +lemma isMDBParentOf_next_update_cte [simp]: + "isMDBParentOf (CTE cap (mdbNext_update f node)) cte' = + isMDBParentOf (CTE cap node) cte'" + "isMDBParentOf cte (CTE cap (mdbNext_update f node)) = + isMDBParentOf cte (CTE cap node)" + apply (cases cte') + apply (simp add: isMDBParentOf_def) + apply (cases cte) + apply (clarsimp simp: isMDBParentOf_def) + done + +lemma valid_mdbD1': + "\ ctes_of s p = Some cte; mdbNext (cteMDBNode cte) \ 0; valid_mdb' s \ \ + \c. ctes_of s (mdbNext (cteMDBNode cte)) = Some c \ mdbPrev (cteMDBNode c) = p" + by (clarsimp simp add: valid_mdb'_def valid_mdb_ctes_def valid_dlist_def Let_def) + +lemma valid_mdbD2': + "\ ctes_of s p = Some cte; mdbPrev (cteMDBNode cte) \ 0; valid_mdb' s \ \ + \c. ctes_of s (mdbPrev (cteMDBNode cte)) = Some c \ mdbNext (cteMDBNode c) = p" + by (clarsimp simp add: valid_mdb'_def valid_mdb_ctes_def valid_dlist_def Let_def) + +lemma prev_next_update: + "cteMDBNode_update (mdbNext_update f) (cteMDBNode_update (mdbPrev_update f') x) = + cteMDBNode_update (mdbPrev_update f') (cteMDBNode_update (mdbNext_update f) x)" + apply (cases x) + apply (rename_tac cap mdbnode) + apply (case_tac mdbnode) + apply simp + done + +lemmas modify_map_prev_next_up [simp] = + modify_map_com [where f="cteMDBNode_update (mdbNext_update f)" and + g="cteMDBNode_update (mdbPrev_update f')" for f f', + OF prev_next_update] + +lemma update_prev_next_trancl: + assumes nxt: "m \ x \\<^sup>+ y" + shows "(modify_map m ptr (cteMDBNode_update (mdbPrev_update z))) \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ cteMDBNode_update (mdbPrev_update z) cte)" + + from nxt have "?m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some + by - (rule r_into_trancl, clarsimp simp: next_unfold') + next + case (step q r) + show ?case + proof (rule trancl_into_trancl) + show "?m \ q \ r" using step(2) Some + by (simp only: mdb_next_update, clarsimp simp: next_unfold') + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma update_prev_next_trancl2: + assumes nxt: "(modify_map m ptr (cteMDBNode_update (mdbPrev_update z))) \ x \\<^sup>+ y" + shows "m \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ cteMDBNode_update (mdbPrev_update z) cte)" + + from nxt have "m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some + by (fastforce simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + next + case (step q r) + show ?case + proof + show "m \ q \ r" using step(2) Some + by (auto simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma next_update_lhs: + "(m(p \ cte) \ p \ x) = (x = mdbNext (cteMDBNode cte))" + by (auto simp: mdb_next_update) + +lemma next_update_lhs_trancl: + assumes np: "\ m \ mdbNext (cteMDBNode cte) \\<^sup>* p" + shows "(m(p \ cte) \ p \\<^sup>+ x) = (m \ mdbNext (cteMDBNode cte) \\<^sup>* x)" +proof + assume "m(p \ cte) \ p \\<^sup>+ x" + thus "m \ mdbNext (cteMDBNode cte) \\<^sup>* x" + proof (cases rule: tranclE2') + case base + thus ?thesis + by (simp add: next_update_lhs) + next + case (trancl q) + hence "m(p \ cte) \ mdbNext (cteMDBNode cte) \\<^sup>+ x" + by (simp add: next_update_lhs) + thus ?thesis + by (rule trancl_into_rtrancl [OF mdb_trancl_update_other]) fact+ + qed +next + assume "m \ mdbNext (cteMDBNode cte) \\<^sup>* x" + hence "m(p \ cte) \ mdbNext (cteMDBNode cte) \\<^sup>* x" + by (rule mdb_rtrancl_other_update) fact+ + moreover + have "m(p \ cte) \ p \ mdbNext (cteMDBNode cte)" by (simp add: next_update_lhs) + ultimately show "m(p \ cte) \ p \\<^sup>+ x" by simp +qed + +lemma next_update_lhs_rtrancl: + assumes np: "\ m \ mdbNext (cteMDBNode cte) \\<^sup>* p" + shows "(m(p \ cte) \ p \\<^sup>* x) = (p = x \ m \ mdbNext (cteMDBNode cte) \\<^sup>* x)" + apply rule + apply (erule next_rtrancl_tranclE) + apply (auto simp add: next_update_lhs_trancl [OF np, symmetric]) + done + +definition + cte_mdb_prop :: "(machine_word \ cte) \ machine_word \ (mdbnode \ bool) \ bool" +where + "cte_mdb_prop m p P \ (\cte. m p = Some cte \ P (cteMDBNode cte))" + +lemma cte_mdb_prop_no_0: + "\ no_0 m; cte_mdb_prop m p P \ \ p \ 0" + unfolding cte_mdb_prop_def no_0_def + by auto + +lemma mdb_chain_0_modify_map_prev: + "mdb_chain_0 m \ mdb_chain_0 (modify_map m ptr (cteMDBNode_update (mdbPrev_update f)))" + unfolding mdb_chain_0_def + apply rule + apply (rule update_prev_next_trancl) + apply (clarsimp simp: modify_map_def dom_def split: option.splits if_split_asm) + done + +lemma mdb_chain_0_modify_map_next: + assumes chain: "mdb_chain_0 m" + and no0: "no_0 m" + and dom: "target \ dom m" + and npath: "\ m \ target \\<^sup>* ptr" + shows + "mdb_chain_0 (modify_map m ptr (cteMDBNode_update (mdbNext_update (\_. target))))" + (is "mdb_chain_0 ?M") + unfolding mdb_chain_0_def +proof + fix x + assume "x \ dom ?M" + hence xd: "x \ dom m" + by (clarsimp simp: modify_map_def dom_def split: if_split_asm) + hence x0: "m \ x \\<^sup>+ 0" using chain unfolding mdb_chain_0_def by simp + + from dom have t0: "m \ target \\<^sup>+ 0" + using chain unfolding mdb_chain_0_def by simp + + show "?M \ x \\<^sup>+ 0" + proof (cases "m ptr") + case None + thus ?thesis + by (simp add: modify_map_def) (rule x0) + next + case (Some cte) + show ?thesis + proof (cases "m \ x \\<^sup>* ptr") + case False + thus ?thesis + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule mdb_trancl_other_update [OF x0]) + done + next + case True + hence "?M \ x \\<^sup>* ptr" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule next_rtrancl_tranclE) + apply simp + apply (rule trancl_into_rtrancl) + apply (erule no_loops_upd_last [OF mdb_chain_0_no_loops [OF chain no0]]) + done + moreover have "?M \ ptr \ target" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (simp add: mdb_next_update) + done + moreover have "?M \ target \\<^sup>+ 0" using t0 + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule mdb_trancl_other_update [OF _ npath]) + done + ultimately show ?thesis by simp + qed + qed +qed + +lemma mdb_chain_0D: + "\ mdb_chain_0 m; p \ dom m \ \ m \ p \\<^sup>+ 0" + unfolding mdb_chain_0_def by auto + +lemma mdb_chain_0_nextD: + "\ mdb_chain_0 m; m p = Some cte \ \ m \ mdbNext (cteMDBNode cte) \\<^sup>* 0" + apply (drule mdb_chain_0D) + apply (erule domI) + apply (erule tranclE2) + apply (simp add: next_unfold') + apply (simp add: next_unfold') + done + +lemma null_mdb_no_next: + "\ valid_dlist m; no_0 m; + cte_mdb_prop m target (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer) \ + \ \ m \ x \ target" + unfolding cte_mdb_prop_def + by (auto elim: valid_dlistE elim!: valid_mdb_ctesE + simp: nullPointer_def no_0_def next_unfold') + +lemma null_mdb_no_trancl: + "\ valid_dlist m; no_0 m; + cte_mdb_prop m target (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer) \ + \ \ m \ x \\<^sup>+ target" + by (auto dest: null_mdb_no_next elim: tranclE) + +lemma null_mdb_no_next2: + "\ no_0 m; x \ 0; + cte_mdb_prop m target (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer) \ + \ \ m \ target \ x" + unfolding cte_mdb_prop_def + by (auto elim!: valid_mdb_ctesE simp: nullPointer_def no_0_def next_unfold') + +definition + "capASID cap \ case cap of + ArchObjectCap (FrameCap _ _ _ _ (Some (asid, _))) \ Some asid + | ArchObjectCap (PageTableCap _ _ (Some (asid, _))) \ Some asid + | _ \ None" + +lemmas capASID_simps [simp] = + capASID_def [split_simps capability.split arch_capability.split option.split prod.split] + +definition + "cap_asid_base' cap \ case cap of + ArchObjectCap (ASIDPoolCap _ asid) \ Some asid + | _ \ None" + +lemmas cap_asid_base'_simps [simp] = + cap_asid_base'_def [split_simps capability.split arch_capability.split option.split prod.split] + +definition + "cap_vptr' cap \ case cap of + ArchObjectCap (FrameCap _ _ _ _ (Some (_, vptr))) \ Some vptr + | ArchObjectCap (PageTableCap _ _ (Some (_, vptr))) \ Some vptr + | _ \ None" + +lemmas cap_vptr'_simps [simp] = + cap_vptr'_def [split_simps capability.split arch_capability.split option.split prod.split] + +definition + "weak_derived' cap cap' \ + capMasterCap cap = capMasterCap cap' \ + capBadge cap = capBadge cap' \ + capASID cap = capASID cap' \ + cap_asid_base' cap = cap_asid_base' cap' \ + cap_vptr' cap = cap_vptr' cap' \ + \ \check all fields of ReplyCap except capReplyCanGrant\ + (isReplyCap cap \ capTCBPtr cap = capTCBPtr cap' \ + capReplyMaster cap = capReplyMaster cap')" + +lemma capASID_update [simp]: + "capASID (RetypeDecls_H.updateCapData P x c) = capASID c" + unfolding capASID_def + apply (cases c, simp_all add: updateCapData_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: updateCapData_def + AARCH64_H.updateCapData_def + isCap_simps Let_def) + done + +lemma cap_vptr_update' [simp]: + "cap_vptr' (RetypeDecls_H.updateCapData P x c) = cap_vptr' c" + unfolding capASID_def + apply (cases c, simp_all add: updateCapData_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: updateCapData_def + AARCH64_H.updateCapData_def + isCap_simps Let_def) + done + +lemma cap_asid_base_update' [simp]: + "cap_asid_base' (RetypeDecls_H.updateCapData P x c) = cap_asid_base' c" + unfolding cap_asid_base'_def + apply (cases c, simp_all add: updateCapData_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: updateCapData_def + AARCH64_H.updateCapData_def + isCap_simps Let_def) + done + +lemma updateCapData_Master: + "updateCapData P d cap \ NullCap \ + capMasterCap (updateCapData P d cap) = capMasterCap cap" + apply (cases cap, simp_all add: updateCapData_def isCap_simps Let_def + split: if_split_asm) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, simp_all add: AARCH64_H.updateCapData_def) + done + +lemma updateCapData_Reply: + "isReplyCap (updateCapData P x c) = isReplyCap c" + apply (cases "updateCapData P x c = NullCap") + apply (clarsimp simp: isCap_simps) + apply (simp add: updateCapData_def isCap_simps Let_def) + apply (drule updateCapData_Master) + apply (rule master_eqI, rule isCap_Master) + apply simp + done + +lemma weak_derived_updateCapData: + "\ (updateCapData P x c) \ NullCap; weak_derived' c c'; + capBadge (updateCapData P x c) = capBadge c' \ + \ weak_derived' (updateCapData P x c) c'" + apply (clarsimp simp add: weak_derived'_def updateCapData_Master) + apply (clarsimp elim: impE dest!: iffD1[OF updateCapData_Reply]) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: Let_def isCap_simps updateCapData_def) + done + +lemma maskCapRights_Reply[simp]: + "isReplyCap (maskCapRights r c) = isReplyCap c" + apply (insert capMasterCap_maskCapRights) + apply (rule master_eqI, rule isCap_Master) + apply simp + done + +lemma capASID_mask [simp]: + "capASID (maskCapRights x c) = capASID c" + unfolding capASID_def + apply (cases c, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemma cap_vptr_mask' [simp]: + "cap_vptr' (maskCapRights x c) = cap_vptr' c" + unfolding cap_vptr'_def + apply (cases c, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemma cap_asid_base_mask' [simp]: + "cap_asid_base' (maskCapRights x c) = cap_asid_base' c" + unfolding cap_vptr'_def + apply (cases c, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemmas cteInsert_valid_objs = cap_insert_objs' + +lemma subtree_not_Null: + assumes null: "m p = Some (CTE capability.NullCap node)" + assumes sub: "m \ c \ p" + shows "False" using sub null + by induct (auto simp: parentOf_def) + +lemma Null_not_subtree: + assumes null: "m c = Some (CTE capability.NullCap node)" + assumes sub: "m \ c \ p" + shows "False" using sub null + by induct (auto simp: parentOf_def) + +lemma subtree_Null_update: + assumes "no_0 m" "valid_dlist m" + assumes null: "m p = Some (CTE NullCap node)" + assumes node: "mdbPrev node = 0" + assumes init: "mdbNext (cteMDBNode cte) = 0" + shows "m \ c \ c' = m (p \ cte) \ c \ c'" +proof + assume "m \ c \ c'" + thus "m (p \ cte) \ c \ c'" using null init + proof induct + case direct_parent + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold parentOf_def) + apply assumption + apply (simp add: parentOf_def) + apply (rule conjI) + apply clarsimp + apply clarsimp + done + next + case (trans_parent y z) + have "m \ c \ y" "m \ y \ z" "z \ 0" "m \ c parentOf z" by fact+ + with trans_parent.prems + show ?case + apply - + apply (rule subtree.trans_parent) + apply (erule (1) trans_parent.hyps) + apply (clarsimp simp: mdb_next_unfold parentOf_def) + apply (drule (1) subtree_not_Null) + apply simp + apply assumption + apply (fastforce simp: parentOf_def) + done + qed +next + assume m: "m (p \ cte) \ c \ c'" + thus "m \ c \ c'" using assms m + proof induct + case (direct_parent x) + thus ?case + apply - + apply (cases "c=p") + apply (clarsimp simp: mdb_next_unfold) + apply (rule subtree.direct_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (cases "p\x") + apply (clarsimp simp: parentOf_def split: if_split_asm) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (case_tac z) + apply clarsimp + apply (clarsimp simp: no_0_def valid_dlist_def Let_def) + apply (erule_tac x=c in allE) + apply clarsimp + done + next + case (trans_parent x y) + have "m(p \ cte) \ c \ x" "m(p \ cte) \ x \ y" + "y \ 0" "m(p \ cte) \ c parentOf y" by fact+ + with trans_parent.prems + show ?case + apply - + apply (cases "p=x") + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (frule (5) trans_parent.hyps) + apply (rule subtree.trans_parent) + apply assumption + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def simp del: fun_upd_apply) + apply (cases "p=y") + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (clarsimp simp: valid_dlist_def Let_def) + apply (erule_tac x=x in allE) + apply (clarsimp simp: no_0_def) + apply (case_tac "p\c") + apply clarsimp + apply clarsimp + apply (erule (1) Null_not_subtree) + done + qed +qed + + +corollary descendants_of_Null_update': + assumes "no_0 m" "valid_dlist m" + assumes "m p = Some (CTE NullCap node)" + assumes "mdbPrev node = 0" + assumes "mdbNext (cteMDBNode cte) = 0" + shows "descendants_of' c (m (p \ cte)) = descendants_of' c m" using assms + by (simp add: descendants_of'_def subtree_Null_update [symmetric]) + +lemma ps_clear_32: + "\ ps_clear p tcbBlockSizeBits s; is_aligned p tcbBlockSizeBits \ + \ ksPSpace s (p + 2^cteSizeBits) = None" + apply (simp add: ps_clear_def) + apply (drule equals0D[where a="p + 2^cteSizeBits"]) + apply (simp add: dom_def field_simps objBits_defs) + apply (erule impE) + apply (rule word_plus_mono_right) + apply (simp add: mask_def) + apply (erule is_aligned_no_wrap') + apply (simp add: mask_def) + apply (erule mp) + apply (erule is_aligned_no_wrap') + apply simp + done + +lemma cte_at_cte_map_in_obj_bits: + "\ cte_at p s; pspace_aligned s; valid_objs s \ + \ cte_map p \ {fst p .. fst p + 2 ^ (obj_bits (the (kheap s (fst p)))) - 1} + \ kheap s (fst p) \ None" + apply (simp add: cte_at_cases) + apply (elim disjE conjE exE) + apply (clarsimp simp: well_formed_cnode_n_def) + apply (drule(1) pspace_alignedD[rotated]) + apply (erule(1) valid_objsE) + apply (frule arg_cong[where f="\S. snd p \ S"]) + apply (simp(no_asm_use) add: domIff) + apply (clarsimp simp: cte_map_def split_def + well_formed_cnode_n_def length_set_helper ex_with_length + valid_obj_def valid_cs_size_def valid_cs_def) + apply (subgoal_tac "of_bl (snd p) * 2^cte_level_bits < 2 ^ (cte_level_bits + length (snd p))") + apply (rule conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: shiftl_t2n mult_ac) + apply (subst add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult_ac) + apply (erule is_aligned_no_wrap') + apply (rule word_power_less_1) + apply (simp add: cte_level_bits_def word_bits_def) + apply (simp add: power_add) + apply (subst mult.commute, rule word_mult_less_mono1) + apply (rule of_bl_length) + apply (simp add: word_bits_def) + apply (simp add: cte_level_bits_def) + apply (simp add: cte_level_bits_def word_bits_def) + apply (drule power_strict_increasing [where a="2 :: nat"]) + apply simp + apply simp + apply (clarsimp simp: cte_map_def split_def field_simps) + apply (subgoal_tac "of_bl (snd p) * 2^cte_level_bits < (2^tcb_bits :: machine_word)") + apply (drule(1) pspace_alignedD[rotated]) + apply (rule conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv shiftl_t2n mult_ac) + apply simp + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult_ac) + apply (drule word_le_minus_one_leq) + apply simp + apply (erule is_aligned_no_wrap') + apply simp + apply (simp add: tcb_cap_cases_def tcb_cnode_index_def to_bl_1 cte_level_bits_def + split: if_split_asm) + done + +lemma cte_map_inj: + assumes neq: "p \ p'" + assumes c1: "cte_at p s" + assumes c2: "cte_at p' s" + assumes vo: "valid_objs s" + assumes al: "pspace_aligned s" + assumes pd: "pspace_distinct s" + shows "cte_map p \ cte_map p'" + using cte_at_cte_map_in_obj_bits [OF c1 al vo] + cte_at_cte_map_in_obj_bits [OF c2 al vo] + pd + apply (clarsimp simp: pspace_distinct_def + simp del: atLeastAtMost_iff Int_atLeastAtMost) + apply (elim allE, drule mp) + apply (erule conjI)+ + defer + apply (simp add: field_simps + del: atLeastAtMost_iff Int_atLeastAtMost) + apply blast + apply (clarsimp simp: cte_map_def split_def) + apply (thin_tac "b \ a" for b a)+ + apply (rule notE[OF neq]) + apply (insert cte_at_length_limit [OF c1 vo]) + apply (simp add: shiftl_t2n[where n=5, simplified, simplified mult.commute, symmetric] + word_bits_def cte_level_bits_def prod_eq_iff) + apply (insert cte_at_cref_len[where p="fst p" and c="snd p" and c'="snd p'", simplified, OF c1]) + apply (simp add: c2 prod_eqI) + apply (subst rev_is_rev_conv[symmetric]) + apply (rule nth_equalityI) + apply simp + apply clarsimp + apply (drule_tac x="i + 5" in word_eqD) + apply (simp add: nth_shiftl test_bit_of_bl nth_rev) + done + +lemma cte_map_inj_ps: + assumes "p \ p'" + assumes "cte_at p s" + assumes "cte_at p' s" + assumes "valid_pspace s" + shows "cte_map p \ cte_map p'" using assms + apply - + apply (rule cte_map_inj) + apply (auto simp: valid_pspace_def) + done + +lemma cte_map_inj_eq: + "\cte_map p = cte_map p'; + cte_at p s; cte_at p' s; + valid_objs s; pspace_aligned s; pspace_distinct s\ + \ p = p'" + apply (rule classical) + apply (drule (5) cte_map_inj) + apply simp + done + +lemma tcb_cases_related2: + "tcb_cte_cases (v - x) = Some (getF, setF) \ + \getF' setF' restr. tcb_cap_cases (tcb_cnode_index (unat ((v - x) >> cte_level_bits))) = Some (getF', setF', restr) + \ cte_map (x, tcb_cnode_index (unat ((v - x) >> cte_level_bits))) = v + \ (\tcb tcb'. tcb_relation tcb tcb' \ cap_relation (getF' tcb) (cteCap (getF tcb'))) + \ (\tcb tcb' cap cte. tcb_relation tcb tcb' \ cap_relation cap (cteCap cte) + \ tcb_relation (setF' (\x. cap) tcb) (setF (\x. cte) tcb'))" + apply (clarsimp simp: tcb_cte_cases_def tcb_relation_def cte_level_bits_def cteSizeBits_def + tcb_cap_cases_simps[simplified] + split: if_split_asm) + apply (simp_all add: tcb_cnode_index_def cte_level_bits_def cte_map_def field_simps to_bl_1) + done + +lemma other_obj_relation_KOCTE[simp]: + "\ other_obj_relation ko (KOCTE cte)" + by (simp add: other_obj_relation_def + split: Structures_A.kernel_object.splits + AARCH64_A.arch_kernel_obj.splits) + +lemma cte_map_pulls_tcb_to_abstract: + "\ y = cte_map z; pspace_relation (kheap s) (ksPSpace s'); + ksPSpace s' x = Some (KOTCB tcb); + pspace_aligned s; pspace_distinct s; valid_objs s; + cte_at z s; (y - x) \ dom tcb_cte_cases \ + \ \tcb'. kheap s x = Some (TCB tcb') \ tcb_relation tcb' tcb + \ (z = (x, tcb_cnode_index (unat ((y - x) >> cte_level_bits))))" + apply (rule pspace_dom_relatedE, assumption+) + apply (erule(1) obj_relation_cutsE; + clarsimp simp: other_obj_relation_def + split: Structures_A.kernel_object.split_asm + AARCH64_A.arch_kernel_obj.split_asm if_split_asm) + apply (drule tcb_cases_related2) + apply clarsimp + apply (frule(1) cte_wp_at_tcbI [OF _ _ TrueI, where t="(a, b)" for a b, simplified]) + apply (erule(5) cte_map_inj_eq [OF sym]) + done + +lemma pspace_relation_update_tcbs: + "\ pspace_relation s s'; s x = Some (TCB otcb); s' x = Some (KOTCB otcb'); + tcb_relation tcb tcb' \ + \ pspace_relation (s(x \ TCB tcb)) (s'(x \ KOTCB tcb'))" + apply (simp add: pspace_relation_def pspace_dom_update + dom_fun_upd2 + del: dom_fun_upd) + apply (erule conjE) + apply (rule ballI, drule(1) bspec) + apply (clarsimp simp: tcb_relation_cut_def split: Structures_A.kernel_object.split_asm) + apply (drule bspec, fastforce) + apply clarsimp + apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + done + +lemma cte_map_pulls_cte_to_abstract: + "\ y = cte_map z; pspace_relation (kheap s) (ksPSpace s'); + ksPSpace s' y = Some (KOCTE cte); + valid_objs s; pspace_aligned s; pspace_distinct s; cte_at z s \ + \ \sz cs cap. kheap s (fst z) = Some (CNode sz cs) \ cs (snd z) = Some cap + \ cap_relation cap (cteCap cte)" + apply (rule pspace_dom_relatedE, assumption+) + apply (erule(1) obj_relation_cutsE, simp_all) + apply clarsimp + apply (frule(1) cte_map_inj_eq[OF sym], simp_all) + apply (rule cte_wp_at_cteI, (fastforce split: if_split_asm)+) + done + +lemma pspace_relation_update_ctes: + assumes ps: "pspace_relation s s'" + and octe: "s' z = Some (KOCTE octe)" + and s'': "\x. s'' x = (case (s x) of None \ None | Some ko \ + (case ko of CNode sz cs \ + Some (CNode sz (\y. if y \ dom cs \ cte_map (x, y) = z + then Some cap else cs y)) + | _ \ Some ko))" + and rel: "cap_relation cap (cteCap cte')" + shows "pspace_relation s'' (s'(z \ KOCTE cte'))" +proof - + have funny_update_no_dom: + "\fun P v. dom (\y. if y \ dom fun \ P y then Some v else fun y) + = dom fun" + by (rule set_eqI, simp add: domIff) + + have funny_update_well_formed_cnode: + "\sz fun P v. + well_formed_cnode_n sz (\y. if y \ dom fun \ P y then Some v else fun y) + = well_formed_cnode_n sz fun" + by (simp add: well_formed_cnode_n_def funny_update_no_dom) + + have obj_relation_cuts1: + "\ko x. obj_relation_cuts (the (case ko of CNode sz cs \ + Some (CNode sz (\y. if y \ dom cs \ cte_map (x, y) = z + then Some cap else cs y)) + | _ \ Some ko)) x + = obj_relation_cuts ko x" + by (simp split: Structures_A.kernel_object.split + add: funny_update_well_formed_cnode funny_update_no_dom) + + have domEq[simp]: + "dom s'' = dom s" + using s'' + apply (intro set_eqI) + apply (simp add: domIff split: option.split Structures_A.kernel_object.split) + done + + have obj_relation_cuts2: + "\x. x \ dom s'' \ obj_relation_cuts (the (s'' x)) x = obj_relation_cuts (the (s x)) x" + using s'' + by (clarsimp simp add: obj_relation_cuts1 dest!: domD) + + show ?thesis using ps octe + apply (clarsimp simp add: pspace_relation_def dom_fun_upd2 + simp del: dom_fun_upd split del: if_split) + apply (rule conjI) + apply (erule subst[where t="dom s'"]) + apply (simp add: pspace_dom_def obj_relation_cuts2) + apply (simp add: obj_relation_cuts2) + apply (rule ballI, drule(1) bspec)+ + apply clarsimp + apply (intro conjI impI) + apply (simp add: s'') + apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] + apply (clarsimp simp: cte_relation_def rel) + apply (rule obj_relation_cutsE, assumption+, simp_all add: s'') + apply (clarsimp simp: cte_relation_def) + apply (clarsimp simp: is_other_obj_relation_type other_obj_relation_def + split: Structures_A.kernel_object.split_asm) + done +qed + +definition pspace_relations where + "pspace_relations ekh kh kh' \ pspace_relation kh kh' \ ekheap_relation ekh kh'" + +lemma set_cap_not_quite_corres_prequel: + assumes cr: + "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + "(x,t') \ fst (setCTE p' c' s')" + "valid_objs s" "pspace_aligned s" "pspace_distinct s" "cte_at p s" + "pspace_aligned' s'" "pspace_distinct' s'" + assumes c: "cap_relation c (cteCap c')" + assumes p: "p' = cte_map p" + shows "\t. ((),t) \ fst (set_cap c p s) \ + pspace_relations (ekheap t) (kheap t) (ksPSpace t')" + using cr + apply (clarsimp simp: setCTE_def setObject_def in_monad split_def) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim disjE exE conjE) + apply (clarsimp simp: lookupAround2_char1 pspace_relations_def) + apply (frule(5) cte_map_pulls_tcb_to_abstract[OF p]) + apply (simp add: domI) + apply (frule tcb_cases_related2) + apply (clarsimp simp: set_cap_def2 split_def bind_def get_object_def + simpler_gets_def assert_def fail_def return_def + set_object_def get_def put_def) + apply (rule conjI) + apply (erule(2) pspace_relation_update_tcbs) + apply (simp add: c) + apply (clarsimp simp: ekheap_relation_def pspace_relation_def) + apply (drule bspec, erule domI) + apply (clarsimp simp: etcb_relation_def tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: pspace_relations_def) + apply (frule(5) cte_map_pulls_cte_to_abstract[OF p]) + apply (clarsimp simp: set_cap_def split_def bind_def get_object_def + simpler_gets_def assert_def a_type_def fail_def return_def + set_object_def get_def put_def domI) + apply (erule(1) valid_objsE) + apply (clarsimp simp: valid_obj_def valid_cs_def valid_cs_size_def exI) + apply (rule conjI, clarsimp) + apply (rule conjI) + apply (erule(1) pspace_relation_update_ctes[where cap=c]) + apply clarsimp + apply (intro conjI impI) + apply (rule ext, clarsimp simp add: domI p) + apply (drule cte_map_inj_eq [OF _ _ cr(6) cr(3-5)]) + apply (simp add: cte_at_cases domI) + apply (simp add: prod_eq_iff) + apply (insert p)[1] + apply (clarsimp split: option.split Structures_A.kernel_object.split + intro!: ext) + apply (drule cte_map_inj_eq [OF _ _ cr(6) cr(3-5)]) + apply (simp add: cte_at_cases domI well_formed_cnode_invsI[OF cr(3)]) + apply clarsimp + apply (simp add: c) + apply (clarsimp simp: ekheap_relation_def pspace_relation_def) + apply (drule bspec, erule domI) + apply (clarsimp simp: etcb_relation_def tcb_cte_cases_def split: if_split_asm) + apply (simp add: wf_cs_insert) + done + +lemma setCTE_pspace_only: + "(rv, s') \ fst (setCTE p v s) \ \ps'. s' = ksPSpace_update (\s. ps') s" + apply (clarsimp simp: setCTE_def setObject_def in_monad split_def + dest!: in_inv_by_hoareD [OF updateObject_cte_inv]) + apply (rule exI, rule refl) + done + +lemma set_cap_not_quite_corres: + assumes cr: + "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + "cur_thread s = ksCurThread s'" + "idle_thread s = ksIdleThread s'" + "machine_state s = ksMachineState s'" + "work_units_completed s = ksWorkUnitsCompleted s'" + "domain_index s = ksDomScheduleIdx s'" + "domain_list s = ksDomSchedule s'" + "cur_domain s = ksCurDomain s'" + "domain_time s = ksDomainTime s'" + "(x,t') \ fst (updateCap p' c' s')" + "valid_objs s" "pspace_aligned s" "pspace_distinct s" "cte_at p s" + "pspace_aligned' s'" "pspace_distinct' s'" + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s')" + "(arch_state s, ksArchState s') \ arch_state_relation" + assumes c: "cap_relation c c'" + assumes p: "p' = cte_map p" + shows "\t. ((),t) \ fst (set_cap c p s) \ + pspace_relations (ekheap t) (kheap t) (ksPSpace t') \ + cdt t = cdt s \ + cdt_list t = cdt_list s \ + ekheap t = ekheap s \ + scheduler_action t = scheduler_action s \ + ready_queues t = ready_queues s \ + is_original_cap t = is_original_cap s \ + interrupt_state_relation (interrupt_irq_node t) (interrupt_states t) + (ksInterruptState t') \ + (arch_state t, ksArchState t') \ arch_state_relation \ + cur_thread t = ksCurThread t' \ + idle_thread t = ksIdleThread t' \ + machine_state t = ksMachineState t' \ + work_units_completed t = ksWorkUnitsCompleted t' \ + domain_index t = ksDomScheduleIdx t' \ + domain_list t = ksDomSchedule t' \ + cur_domain t = ksCurDomain t' \ + domain_time t = ksDomainTime t'" + using cr + apply (clarsimp simp: updateCap_def in_monad) + apply (drule use_valid [OF _ getCTE_sp[where P="\s. s2 = s" for s2], OF _ refl]) + apply clarsimp + apply (drule(7) set_cap_not_quite_corres_prequel) + apply simp + apply (rule c) + apply (rule p) + apply (erule exEI) + apply clarsimp + apply (frule setCTE_pspace_only) + apply (clarsimp simp: set_cap_def split_def in_monad set_object_def + get_object_def + split: Structures_A.kernel_object.split_asm if_split_asm) + done + +lemma descendants_of_eq': + assumes "cte_at p s" + assumes "cte_at src s" + assumes "cdt_relation (swp cte_at s) (cdt s) m'" + assumes "valid_mdb s" + assumes "valid_objs s" "pspace_aligned s" "pspace_distinct s" + shows "(cte_map src \ descendants_of' (cte_map p) m') = (src \ descendants_of p (cdt s))" + using assms + apply (simp add: cdt_relation_def del: split_paired_All) + apply (rule iffI) + prefer 2 + apply (auto simp del: split_paired_All)[1] + apply (erule_tac x=p in allE) + apply simp + apply (drule sym) + apply clarsimp + apply (frule (1) descendants_of_cte_at) + apply (drule (5) cte_map_inj_eq) + apply simp + done + +lemma setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedPrevs_of s)" + shows "P (ps |> tcb_of' |> tcbSchedPrev)" + using use_valid[OF step setObject_cte_tcbSchedPrevs_of(1)] pre + by auto + +lemma setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedNexts_of s)" + shows "P (ps |> tcb_of' |> tcbSchedNext)" + using use_valid[OF step setObject_cte_tcbSchedNexts_of(1)] pre + by auto + +lemma setObject_cte_inQ_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (inQ domain priority |< tcbs_of' s)" + shows "P (inQ domain priority |< (ps |> tcb_of'))" + using use_valid[OF step setObject_cte_inQ(1)] pre + by auto + +lemma updateCap_stuff: + assumes "(x, s'') \ fst (updateCap p cap s')" + shows "(ctes_of s'' = modify_map (ctes_of s') p (cteCap_update (K cap))) \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + (ksArchState s'' = ksArchState s') \ + (pspace_aligned' s' \ pspace_aligned' s'') \ + (pspace_distinct' s' \ pspace_distinct' s'') \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" + using assms + apply (clarsimp simp: updateCap_def in_monad) + apply (drule use_valid [where P="\s. s2 = s" for s2, OF _ getCTE_sp refl]) + apply (rule conjI) + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_apply) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: setCTE_def) + apply (intro conjI impI) + apply (erule(1) use_valid [OF _ setObject_aligned]) + apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace; simp) + apply (erule setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace; simp) + apply (fastforce elim: setObject_cte_inQ_of_use_valid_ksPSpace) + done + +(* FIXME: move *) +lemma pspace_relation_cte_wp_atI': + "\ pspace_relation (kheap s) (ksPSpace s'); + cte_wp_at' ((=) cte) x s'; valid_objs s \ + \ \c slot. cte_wp_at ((=) c) slot s \ cap_relation c (cteCap cte) \ x = cte_map slot" + apply (simp add: cte_wp_at_cases') + apply (elim disjE conjE exE) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm)[1] + apply (intro exI, rule conjI[OF _ conjI [OF _ refl]]) + apply (simp add: cte_wp_at_cases domI well_formed_cnode_invsI) + apply (simp split: if_split_asm) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + apply (subgoal_tac "n = x - y", clarsimp) + apply (drule tcb_cases_related2, clarsimp) + apply (intro exI, rule conjI) + apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) + apply fastforce + apply simp + apply clarsimp + apply (simp add: other_obj_relation_def + split: Structures_A.kernel_object.split_asm + AARCH64_A.arch_kernel_obj.split_asm) + done + +lemma pspace_relation_cte_wp_atI: + "\ pspace_relation (kheap s) (ksPSpace s'); + ctes_of (s' :: kernel_state) x = Some cte; valid_objs s \ + \ \c slot. cte_wp_at ((=) c) slot s \ cap_relation c (cteCap cte) \ x = cte_map slot" + apply (erule pspace_relation_cte_wp_atI'[where x=x]) + apply (simp add: cte_wp_at_ctes_of) + apply assumption + done + +lemma sameRegion_corres: + "\ sameRegionAs c' d'; cap_relation c c'; cap_relation d d' \ + \ same_region_as c d" + by (simp add: same_region_as_relation) + +lemma is_final_cap_unique: + assumes cte: "ctes_of s' (cte_map slot) = Some cte" + assumes fin: "cte_wp_at (\c. final_matters c \ is_final_cap' c s) slot s" + assumes psr: "pspace_relation (kheap s) (ksPSpace s')" + assumes cte': "ctes_of s' x = Some cte'" + assumes neq: "x \ cte_map slot" + assumes region: "sameRegionAs (cteCap cte) (cteCap cte')" + assumes valid: "pspace_aligned s" "valid_objs s" "pspace_aligned' s'" "pspace_distinct' s'" + shows "False" +proof - + from fin obtain c where + c: "cte_wp_at ((=) c) slot s" and + final: "is_final_cap' c s" and + fm: "final_matters c" + by (auto simp add: cte_wp_at_cases) + with valid psr cte + have cr: "cap_relation c (cteCap cte)" + by (auto dest!: pspace_relation_ctes_ofI) + from cte' psr valid + obtain slot' c' where + c': "cte_wp_at ((=) c') slot' s" and + cr': "cap_relation c' (cteCap cte')" and + x: "x = cte_map slot'" + by (auto dest!: pspace_relation_cte_wp_atI) + with neq + have s: "slot \ slot'" by clarsimp + from region cr cr' + have reg: "same_region_as c c'" by (rule sameRegion_corres) + hence fm': "final_matters c'" using fm + apply - + apply (rule ccontr) + apply (simp add: final_matters_def split: cap.split_asm arch_cap.split_asm) + done + hence ref: "obj_refs c = obj_refs c'" using fm reg + apply (simp add: final_matters_def split: cap.split_asm arch_cap.split_asm) + done + have irq: "cap_irqs c = cap_irqs c'" using reg fm fm' + by (simp add: final_matters_def split: cap.split_asm) + have arch_ref: "arch_gen_refs c = arch_gen_refs c'" using fm reg + by (clarsimp simp: final_matters_def is_cap_simps + split: cap.split_asm arch_cap.split_asm) + + from final have refs_non_empty: "obj_refs c \ {} \ cap_irqs c \ {} \ arch_gen_refs c \ {}" + by (clarsimp simp add: is_final_cap'_def gen_obj_refs_def) + + define S where "S \ {cref. \cap'. fst (get_cap cref s) = {(cap', s)} \ + (gen_obj_refs c \ gen_obj_refs cap' \ {})}" + + have "is_final_cap' c s = (\cref. S = {cref})" + by (simp add: is_final_cap'_def S_def) + moreover + from c refs_non_empty + have "slot \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + moreover + from c' refs_non_empty ref irq arch_ref + have "slot' \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + ultimately + show False using s final by auto +qed + +lemma obj_refs_relation_Master: + "cap_relation cap cap' \ + obj_refs cap = (if isUntypedCap (capMasterCap cap') then {} + else if capClass (capMasterCap cap') = PhysicalClass + then {capUntypedPtr (capMasterCap cap')} + else {})" + by (simp add: isCap_simps + split: cap_relation_split_asm arch_cap.split_asm) + +lemma cap_irqs_relation_Master: + "cap_relation cap cap' \ + cap_irqs cap = (case capMasterCap cap' of IRQHandlerCap irq \ {irq} | _ \ {})" + by (simp split: cap_relation_split_asm arch_cap.split_asm) + +lemma arch_gen_refs_relation_Master: + "cap_relation cap cap' \ arch_gen_refs cap = {}" + by (simp split: cap_relation_split_asm arch_cap.split_asm) + +lemma is_final_cap_unique_sym: + assumes cte: "ctes_of s' (cte_map slot) = Some cte" + assumes fin: "cte_wp_at (\c. is_final_cap' c s) slot s" + assumes psr: "pspace_relation (kheap s) (ksPSpace s')" + assumes cte': "ctes_of s' x = Some cte'" + assumes neq: "x \ cte_map slot" + assumes master: "capMasterCap (cteCap cte') = capMasterCap (cteCap cte)" + assumes valid: "pspace_aligned s" "valid_objs s" "pspace_aligned' s'" "pspace_distinct' s'" + shows "False" +proof - + from fin obtain c where + c: "cte_wp_at ((=) c) slot s" and + final: "is_final_cap' c s" + by (auto simp add: cte_wp_at_cases) + with valid psr cte + have cr: "cap_relation c (cteCap cte)" + by (auto dest!: pspace_relation_ctes_ofI) + from cte' psr valid + obtain slot' c' where + c': "cte_wp_at ((=) c') slot' s" and + cr': "cap_relation c' (cteCap cte')" and + x: "x = cte_map slot'" + by (auto dest!: pspace_relation_cte_wp_atI) + with neq + have s: "slot \ slot'" by clarsimp + have irq: "cap_irqs c = cap_irqs c'" + using master cr cr' + by (simp add: cap_irqs_relation_Master) + have ref: "obj_refs c = obj_refs c'" + using master cr cr' + by (simp add: obj_refs_relation_Master) + have arch_ref: "arch_gen_refs c = arch_gen_refs c'" + using master cr cr' + by (clarsimp simp: arch_gen_refs_relation_Master) + + from final have refs_non_empty: "obj_refs c \ {} \ cap_irqs c \ {} \ arch_gen_refs c \ {}" + by (clarsimp simp add: is_final_cap'_def gen_obj_refs_def) + + define S where "S \ {cref. \cap'. fst (get_cap cref s) = {(cap', s)} \ + (gen_obj_refs c \ gen_obj_refs cap' \ {})}" + + have "is_final_cap' c s = (\cref. S = {cref})" + by (simp add: is_final_cap'_def S_def) + moreover + from c refs_non_empty + have "slot \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + moreover + from c' refs_non_empty ref irq arch_ref + have "slot' \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + ultimately + show False using s final by auto +qed + +lemma isMDBParent_sameRegion: + "isMDBParentOf cte cte' \ sameRegionAs (cteCap cte) (cteCap cte')" + by (simp add: isMDBParentOf_def split: cte.split_asm if_split_asm) + +lemma no_loops_no_subtree: + "no_loops m \ m \ x \ x = False" + apply clarsimp + apply (drule subtree_mdb_next) + apply (simp add: no_loops_def) + done + +definition + "caps_contained2 m \ + \c c' cap n cap' n'. + m c = Some (CTE cap n) \ m c' = Some (CTE cap' n') \ + (isCNodeCap cap' \ isThreadCap cap') \ + capUntypedPtr cap' \ untypedRange cap \ + capUntypedPtr cap' + capUntypedSize cap' - 1 \ untypedRange cap" + +lemma capUntypedPtr_capRange: + "\ ctes_of s p = Some (CTE cap node); + capClass cap = PhysicalClass; + valid_objs' s \ \ + capUntypedPtr cap \ capRange cap" + apply (erule capAligned_capUntypedPtr[rotated]) + apply (drule (1) ctes_of_valid_cap') + apply (erule valid_capAligned) + done + +lemma descendants_of_update_ztc: + assumes c: "\x. \ m \ x \ slot; \ P \ \ + \cte'. m x = Some cte' + \ capMasterCap (cteCap cte') \ capMasterCap (cteCap cte) + \ sameRegionAs (cteCap cte') (cteCap cte)" + assumes m: "m slot = Some cte" + assumes z: "isZombie cap \ isCNodeCap cap \ isThreadCap cap" + defines "cap' \ cteCap cte" + assumes F: "\x cte'. \ m x = Some cte'; x \ slot; P \ + \ isUntypedCap (cteCap cte') \ capClass (cteCap cte') \ PhysicalClass + \ capUntypedPtr (cteCap cte') \ capUntypedPtr cap'" + assumes pu: "capRange cap' = capRange cap \ capUntypedPtr cap' = capUntypedPtr cap" + assumes a: "capAligned cap'" + assumes t: "isZombie cap' \ isCNodeCap cap' \ isThreadCap cap'" + assumes n: "no_loops m" + defines "m' \ m(slot \ cteCap_update (\_. cap) cte)" + shows "((c \ slot \ P) \ descendants_of' c m \ descendants_of' c m') + \ (P \ descendants_of' c m' \ descendants_of' c m)" +proof (simp add: descendants_of'_def subset_iff, + simp only: all_simps(6)[symmetric], intro conjI allI) + note isMDBParentOf_CTE[simp] + + have utp: "capUntypedPtr cap' \ capRange cap'" + using t a + by (auto elim!: capAligned_capUntypedPtr simp: isCap_simps) + + have ztc_parent: "\cap cap'. isZombie cap \ isCNodeCap cap \ isThreadCap cap + \ sameRegionAs cap cap' + \ capUntypedPtr cap = capUntypedPtr cap' + \ capClass cap' = PhysicalClass \ \ isUntypedCap cap'" + by (auto simp: isCap_simps sameRegionAs_def3) + + have ztc_child: "\cap cap'. isZombie cap \ isCNodeCap cap \ isThreadCap cap + \ sameRegionAs cap' cap + \ capClass cap' = PhysicalClass \ + (isUntypedCap cap' \ capUntypedPtr cap' = capUntypedPtr cap)" + by (auto simp: isCap_simps sameRegionAs_def3) + + have notparent: "\x cte'. \ m x = Some cte'; x \ slot; P \ + \ \ isMDBParentOf cte cte'" + using t utp + apply clarsimp + apply (drule_tac cte'=cte' in F, simp+) + apply (simp add: cap'_def) + apply (cases cte, case_tac cte', clarsimp) + apply (frule(1) ztc_parent, clarsimp) + done + + have notparent2: "\x cte'. \ m x = Some cte'; x \ slot; P \ + \ \ isMDBParentOf (cteCap_update (\_. cap) cte) cte'" + using z utp + apply clarsimp + apply (drule_tac cte'=cte' in F, simp+) + apply (cases cte, case_tac cte', clarsimp) + apply (frule(1) ztc_parent) + apply (clarsimp simp: pu) + done + + fix x + { assume cx: "m \ c \ x" and cP: "c \ slot \ P" + hence c_neq_x [simp]: "c \ x" + by (clarsimp simp: n no_loops_no_subtree) + from cx c_neq_x cP m + have s_neq_c [simp]: "c \ slot" + apply (clarsimp simp del: c_neq_x) + apply (drule subtree_parent) + apply (clarsimp simp: parentOf_def notparent) + done + + have parent: "\x cte'. \ m x = Some cte'; isMDBParentOf cte' cte; m \ x \ slot; x \ slot \ + \ isMDBParentOf cte' (cteCap_update (\_. cap) cte)" + using t z pu + apply - + apply (cases P) + apply (frule(2) F) + apply (clarsimp simp: cap'_def) + apply (case_tac cte') + apply (rename_tac capability mdbnode) + apply (case_tac cte) + apply clarsimp + apply (frule(1) ztc_child) + apply (case_tac "isUntypedCap capability") + apply (simp add: isCap_simps) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply simp + apply (frule(1) c, clarsimp) + apply (clarsimp simp: cap'_def) + apply (case_tac cte') + apply (case_tac cte) + apply clarsimp + apply (erule sameRegionAsE) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps)+ + done + + from cx + have "m' \ c \ x" + proof induct + case (direct_parent c') + hence "m \ c \ c'" by (rule subtree.direct_parent) + with direct_parent m + show ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold m'_def m) + apply assumption + apply (clarsimp simp: parentOf_def) + apply (clarsimp simp add: m'_def) + apply (erule(2) parent) + apply simp + done + next + case (trans_parent c' c'') + moreover + from trans_parent + have "m \ c \ c''" by - (rule subtree.trans_parent) + ultimately + show ?case using z m pu t + apply - + apply (erule subtree.trans_parent) + apply (clarsimp simp: mdb_next_unfold m'_def m) + apply assumption + apply (clarsimp simp: parentOf_def m'_def) + apply (erule(2) parent) + apply simp + done + qed + } + thus "(c = slot \ P) \ m \ c \ x \ m' \ c \ x" + by blast + + { assume subcx: "m' \ c \ x" and P: "P" + + have mdb_next_eq: "\x y. m' \ x \ y = m \ x \ y" + by (simp add: mdb_next_unfold m'_def m) + have mdb_next_eq_trans: "\x y. m' \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + apply (rule arg_cong[where f="\S. v \ S\<^sup>+" for v]) + apply (simp add: set_eq_iff mdb_next_eq) + done + + have subtree_neq: "\x y. m' \ x \ y \ x \ y" + apply clarsimp + apply (drule subtree_mdb_next) + apply (clarsimp simp: mdb_next_eq_trans n no_loops_trancl_simp) + done + + have parent2: "\x cte'. \ m x = Some cte'; isMDBParentOf cte' (cteCap_update (\_. cap) cte); + x \ slot \ + \ isMDBParentOf cte' cte" + using t z pu P + apply (drule_tac cte'=cte' in F, simp, simp) + apply (simp add: cap'_def) + apply (cases cte) + apply (case_tac cte') + apply (rename_tac cap' node') + apply (clarsimp) + apply (frule(1) ztc_child) + apply (case_tac "isUntypedCap cap'") + apply (simp add:isCap_simps) + apply (clarsimp simp: isCap_simps sameRegionAs_def3) + apply clarsimp + done + + from subcx have "m \ c \ x" + proof induct + case (direct_parent c') + thus ?case + using subtree_neq [OF subtree.direct_parent [OF direct_parent(1-3)]] + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp: mdb_next_unfold m'_def m split: if_split_asm) + apply assumption + apply (insert z m t pu) + apply (simp add: cap'_def) + apply (simp add: m'_def parentOf_def split: if_split_asm) + apply (clarsimp simp: parent2) + apply (clarsimp simp add: notparent2 [OF _ _ P]) + done + next + case (trans_parent c' c'') + thus ?case + using subtree_neq [OF subtree.trans_parent [OF trans_parent(1, 3-5)]] + apply - + apply (erule subtree.trans_parent) + apply (clarsimp simp: mdb_next_unfold m'_def m split: if_split_asm) + apply assumption + apply (insert z m t pu) + apply (simp add: cap'_def) + apply (simp add: m'_def parentOf_def split: if_split_asm) + apply (clarsimp simp: parent2) + apply (clarsimp simp: notparent2 [OF _ _ P]) + done + qed + } + thus "P \ m' \ c \ x \ m \ c \ x" + by simp +qed + +lemma use_update_ztc_one: + "((c \ slot \ True) \ descendants_of' c m \ descendants_of' c m') + \ (True \ descendants_of' c m' \ descendants_of' c m) + \ descendants_of' c m = descendants_of' c m'" + by clarsimp + +lemma use_update_ztc_two: + "((c \ slot \ False) \ descendants_of' c m \ descendants_of' c m') + \ (False \ descendants_of' c m' \ descendants_of' c m) + \ descendants_of' slot m = {} + \ descendants_of' c m \ descendants_of' c m'" + by auto + +lemmas cte_wp_at'_obj_at' = cte_wp_at_obj_cases' + +lemma cte_at'_obj_at': + "cte_at' addr s = (obj_at' (\_ :: cte. True) addr s + \ (\n \ dom tcb_cte_cases. tcb_at' (addr - n) s))" + by (simp add: cte_wp_at'_obj_at') + +lemma ctes_of_valid: + "\ cte_wp_at' ((=) cte) p s; valid_objs' s \ + \ s \' cteCap cte" + apply (simp add: cte_wp_at'_obj_at') + apply (erule disjE) + apply (subgoal_tac "ko_at' cte p s") + apply (drule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def valid_cte'_def) + apply (simp add: obj_at'_def cte_level_bits_def objBits_simps) + apply clarsimp + apply (drule obj_at_ko_at') + apply clarsimp + apply (drule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def valid_tcb'_def) + apply (fastforce) + done + +lemma no_fail_setCTE [wp]: + "no_fail (cte_at' p) (setCTE p c)" + apply (clarsimp simp: setCTE_def setObject_def split_def unless_def + updateObject_cte alignCheck_def alignError_def + typeError_def is_aligned_mask[symmetric] + cong: kernel_object.case_cong) + apply (wp|wpc)+ + apply (clarsimp simp: cte_wp_at'_def getObject_def split_def + in_monad loadObject_cte + dest!: in_singleton + split del: if_split) + apply (clarsimp simp: typeError_def alignCheck_def alignError_def + in_monad is_aligned_mask[symmetric] objBits_simps + magnitudeCheck_def + split: kernel_object.split_asm if_split_asm option.splits + split del: if_split) + apply simp_all + done + +lemma no_fail_updateCap [wp]: + "no_fail (cte_at' p) (updateCap p cap')" + apply (simp add: updateCap_def) + apply (rule no_fail_pre, wp) + apply simp + done + +lemma capRange_cap_relation: + "\ cap_relation cap cap'; cap_relation cap cap' \ capClass cap' = PhysicalClass \ + \ capRange cap' = {obj_ref_of cap .. obj_ref_of cap + obj_size cap - 1}" + by (simp add: capRange_def objBits_simps' cte_level_bits_def + asid_low_bits_def zbits_map_def bit_simps + split: cap_relation_split_asm arch_cap.split_asm + option.split sum.split) + +lemma cap_relation_untyped_ptr_obj_refs: + "cap_relation cap cap' \ capClass cap' = PhysicalClass \ \ isUntypedCap cap' + \ capUntypedPtr cap' \ obj_refs cap" + by (clarsimp simp add: isCap_simps + simp del: not_ex + split: cap_relation_split_asm arch_cap.split_asm) + +lemma obj_refs_cap_relation_untyped_ptr: + "\ cap_relation cap cap'; obj_refs cap \ {} \ \ capUntypedPtr cap' \ obj_refs cap" + by (clarsimp split: cap_relation_split_asm arch_cap.split_asm) + +lemma is_final_untyped_ptrs: + "\ ctes_of (s' :: kernel_state) (cte_map slot) = Some cte; ctes_of s' y = Some cte'; cte_map slot \ y; + pspace_relation (kheap s) (ksPSpace s'); valid_objs s; pspace_aligned s; pspace_distinct s; + cte_wp_at (\cap. is_final_cap' cap s \ obj_refs cap \ {}) slot s \ + \ capClass (cteCap cte') \ PhysicalClass + \ isUntypedCap (cteCap cte') + \ capUntypedPtr (cteCap cte) \ capUntypedPtr (cteCap cte')" + apply clarsimp + apply (drule(2) pspace_relation_cte_wp_atI[rotated])+ + apply clarsimp + apply (drule_tac s=s in cte_map_inj_eq, + (clarsimp elim!: cte_wp_at_weakenE[OF _ TrueI])+) + apply (clarsimp simp: cte_wp_at_def) + apply (erule(3) final_cap_duplicate [where r="ObjRef (capUntypedPtr (cteCap cte))", + OF _ _ distinct_lemma[where f=cte_map]]) + apply (rule obj_ref_is_gen_obj_ref) + apply (erule(1) obj_refs_cap_relation_untyped_ptr) + apply (rule obj_ref_is_gen_obj_ref) + apply (erule(1) obj_refs_cap_relation_untyped_ptr) + apply (rule obj_ref_is_gen_obj_ref) + apply (drule(2) cap_relation_untyped_ptr_obj_refs)+ + apply simp + done + +lemma capClass_ztc_relation: + "\ is_zombie c \ is_cnode_cap c \ is_thread_cap c; + cap_relation c c' \ \ capClass c' = PhysicalClass" + by (auto simp: is_cap_simps) + +lemma pspace_relationsD: + "\pspace_relation kh kh'; ekheap_relation ekh kh'\ \ pspace_relations ekh kh kh'" + by (simp add: pspace_relations_def) + +lemma updateCap_corres: + "\cap_relation cap cap'; + is_zombie cap \ is_cnode_cap cap \ is_thread_cap cap \ + \ corres dc (\s. invs s \ + cte_wp_at (\c. (is_zombie c \ is_cnode_cap c \ + is_thread_cap c) \ + is_final_cap' c s \ + obj_ref_of c = obj_ref_of cap \ + obj_size c = obj_size cap) slot s) + invs' + (set_cap cap slot) (updateCap (cte_map slot) cap')" + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule cte_wp_at_norm) + apply (clarsimp simp: state_relation_def) + apply (drule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp add: state_relation_def) + apply (drule(1) pspace_relationsD) + apply (frule (3) set_cap_not_quite_corres; fastforce?) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply clarsimp + apply (rule bexI) + prefer 2 + apply simp + apply (clarsimp simp: in_set_cap_cte_at_swp pspace_relations_def) + apply (drule updateCap_stuff) + apply simp + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _ _" \ -\) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (rule conjI) + prefer 2 + apply (rule conjI) + apply (unfold cdt_list_relation_def)[1] + apply (intro allI impI) + apply (erule_tac x=c in allE) + apply (auto elim!: modify_map_casesE)[1] + apply (unfold revokable_relation_def)[1] + apply (drule set_cap_caps_of_state_monad) + apply (simp add: cte_wp_at_caps_of_state del: split_paired_All) + apply (intro allI impI) + apply (erule_tac x=c in allE) + apply (erule impE[where P="\y. v = Some y" for v]) + apply (clarsimp simp: null_filter_def is_zombie_def split: if_split_asm) + apply (auto elim!: modify_map_casesE del: disjE)[1] (* slow *) + apply (case_tac "ctes_of b (cte_map slot)") + apply (simp add: modify_map_None) + apply (simp add: modify_map_apply) + apply (simp add: cdt_relation_def del: split_paired_All) + apply (intro allI impI) + apply (rule use_update_ztc_one [OF descendants_of_update_ztc]) + apply simp + apply assumption + apply (auto simp: is_cap_simps isCap_simps)[1] + apply (frule(3) is_final_untyped_ptrs [OF _ _ not_sym], clarsimp+) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (simp add: is_cap_simps, elim disjE exE, simp_all)[1] + apply (simp add: eq_commute) + apply (drule cte_wp_at_norm, clarsimp) + apply (drule(1) pspace_relation_ctes_ofI, clarsimp+) + apply (drule(1) capClass_ztc_relation)+ + apply (simp add: capRange_cap_relation obj_ref_of_relation[symmetric]) + apply (rule valid_capAligned, rule ctes_of_valid) + apply (simp add: cte_wp_at_ctes_of) + apply clarsimp + apply (drule cte_wp_at_norm, clarsimp) + apply (drule(1) pspace_relation_ctes_ofI, clarsimp+) + apply (simp add: is_cap_simps, elim disjE exE, simp_all add: isCap_simps)[1] + apply clarsimp + done + +lemma exst_set_cap: + "(x,s') \ fst (set_cap p c s) \ exst s' = exst s" + by (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + +lemma updateMDB_eqs: + assumes "(x, s'') \ fst (updateMDB p f s')" + shows "ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksInterruptState s'' = ksInterruptState s' \ + ksArchState s'' = ksArchState s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ + ksDomSchedule s'' = ksDomSchedule s' \ + ksCurDomain s'' = ksCurDomain s' \ + ksDomainTime s'' = ksDomainTime s'" using assms + apply (clarsimp simp: updateMDB_def Let_def in_monad split: if_split_asm) + apply (drule in_inv_by_hoareD [OF getCTE_inv]) + apply (clarsimp simp: setCTE_def setObject_def in_monad split_def) + apply (drule in_inv_by_hoareD [OF updateObject_cte_inv]) + apply simp + done + +lemma updateMDB_pspace_relation: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relation (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" + shows "pspace_relation (kheap s) (ksPSpace s'')" using assms + apply (clarsimp simp: updateMDB_def Let_def in_monad split: if_split_asm) + apply (drule_tac P="(=) s'" in use_valid [OF _ getCTE_sp], rule refl) + apply clarsimp + apply (clarsimp simp: setCTE_def setObject_def in_monad + split_def) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim disjE conjE exE) + apply (clarsimp simp: cte_wp_at_cases' lookupAround2_char1) + apply (erule disjE) + apply (clarsimp simp: tcb_ctes_clear cte_level_bits_def objBits_defs) + apply clarsimp + apply (rule pspace_dom_relatedE, assumption+) + apply (rule obj_relation_cutsE, assumption+; + clarsimp split: Structures_A.kernel_object.split_asm + AARCH64_A.arch_kernel_obj.split_asm if_split_asm + simp: other_obj_relation_def) + apply (frule(1) tcb_cte_cases_aligned_helpers(1)) + apply (frule(1) tcb_cte_cases_aligned_helpers(2)) + apply (clarsimp simp del: diff_neg_mask) + apply (subst map_upd_triv[symmetric, where t="kheap s"], assumption) + apply (erule(2) pspace_relation_update_tcbs) + apply (case_tac tcba) + apply (simp add: tcb_cte_cases_def tcb_relation_def del: diff_neg_mask + split: if_split_asm) + apply (clarsimp simp: cte_wp_at_cases') + apply (erule disjE) + apply (rule pspace_dom_relatedE, assumption+) + apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] + apply (clarsimp simp: cte_relation_def) + apply (simp add: pspace_relation_def dom_fun_upd2 + del: dom_fun_upd) + apply (erule conjE) + apply (rule ballI, drule(1) bspec) + apply (rule ballI, drule(1) bspec) + apply clarsimp + apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] + apply (clarsimp simp: cte_relation_def) + apply clarsimp + apply (drule_tac y=p in tcb_ctes_clear[rotated], assumption+) + apply fastforce + apply fastforce + done + +lemma updateMDB_ekheap_relation: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "ekheap_relation (ekheap s) (ksPSpace s')" + shows "ekheap_relation (ekheap s) (ksPSpace s'')" using assms + apply (clarsimp simp: updateMDB_def Let_def setCTE_def setObject_def in_monad ekheap_relation_def etcb_relation_def split_def split: if_split_asm) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (drule_tac P="(=) s'" in use_valid [OF _ getCTE_sp], rule refl) + apply (drule bspec, erule domI) + apply (clarsimp simp: tcb_cte_cases_def lookupAround2_char1 split: if_split_asm) + done + +lemma updateMDB_pspace_relations: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" + shows "pspace_relations (ekheap s) (kheap s) (ksPSpace s'')" using assms + by (simp add: pspace_relations_def updateMDB_pspace_relation updateMDB_ekheap_relation) + +lemma updateMDB_ctes_of: + assumes "(x, s') \ fst (updateMDB p f s)" + assumes "no_0 (ctes_of s)" + shows "ctes_of s' = modify_map (ctes_of s) p (cteMDBNode_update f)" + using assms + apply (clarsimp simp: valid_def) + apply (drule use_valid) + apply (rule updateMDB_ctes_of_no_0) + prefer 2 + apply assumption + apply simp + done + +crunch aligned[wp]: updateMDB "pspace_aligned'" +crunch pdistinct[wp]: updateMDB "pspace_distinct'" +crunch tcbSchedPrevs_of[wp]: updateMDB "\s. P (tcbSchedPrevs_of s)" +crunch tcbSchedNexts_of[wp]: updateMDB "\s. P (tcbSchedNexts_of s)" +crunch inQ_opt_pred[wp]: updateMDB "\s. P (inQ d p |< tcbs_of' s)" +crunch inQ_opt_pred'[wp]: updateMDB "\s. P (\d p. inQ d p |< tcbs_of' s)" +crunch ksReadyQueues[wp]: updateMDB "\s. P (ksReadyQueues s)" + (wp: crunch_wps simp: crunch_simps setObject_def updateObject_cte) + +lemma setCTE_rdyq_projs[wp]: + "setCTE p f \\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)\" + apply (rule hoare_lift_Pf2[where f=ksReadyQueues]) + apply (rule hoare_lift_Pf2[where f=tcbSchedNexts_of]) + apply (rule hoare_lift_Pf2[where f=tcbSchedPrevs_of]) + apply wpsimp+ + done + +crunches updateMDB + for rdyq_projs[wp]:"\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)" + +lemma updateMDB_the_lot: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" "no_0 (ctes_of s')" + shows "ctes_of s'' = modify_map (ctes_of s') p (cteMDBNode_update f) \ + ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + ksInterruptState s'' = ksInterruptState s' \ + ksArchState s'' = ksArchState s' \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + pspace_relations (ekheap s) (kheap s) (ksPSpace s'') \ + pspace_aligned' s'' \ pspace_distinct' s'' \ + no_0 (ctes_of s'') \ + ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ + ksDomSchedule s'' = ksDomSchedule s' \ + ksCurDomain s'' = ksCurDomain s' \ + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" +using assms + apply (simp add: updateMDB_eqs updateMDB_pspace_relations split del: if_split) + apply (frule (1) updateMDB_ctes_of) + apply clarsimp + apply (rule conjI) + apply (erule use_valid) + apply wp + apply simp + apply (erule use_valid, wpsimp wp: hoare_vcg_all_lift) + apply (simp add: comp_def) + done + +lemma is_cap_revocable_eq: + "\ cap_relation c c'; cap_relation src_cap src_cap'; sameRegionAs src_cap' c'; + is_untyped_cap src_cap \ \ is_ep_cap c \ \ is_ntfn_cap c\ + \ is_cap_revocable c src_cap = isCapRevocable c' src_cap'" + apply (clarsimp simp: isCap_simps objBits_simps bit_simps arch_is_cap_revocable_def + bits_of_def is_cap_revocable_def Retype_H.isCapRevocable_def + sameRegionAs_def3 isCapRevocable_def + split: cap_relation_split_asm arch_cap.split_asm) + done + +lemma isMDBParentOf_prev_update [simp]: + "isMDBParentOf (cteMDBNode_update (mdbPrev_update f) cte) cte' = + isMDBParentOf cte cte'" + "isMDBParentOf cte (cteMDBNode_update (mdbPrev_update f) cte') = + isMDBParentOf cte cte'" + apply (cases cte) + apply (cases cte') + apply (simp add: isMDBParentOf_def) + apply (cases cte) + apply (cases cte') + apply (clarsimp simp: isMDBParentOf_def) + done + +lemma prev_update_subtree [simp]: + "modify_map m' x (cteMDBNode_update (mdbPrev_update f)) \ a \ b = m' \ a \ b" + (is "?m' = ?m") +proof + assume "?m" + thus ?m' + proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold modify_map_def) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def) + apply fastforce + done + next + case (trans_parent c c') + thus ?case + apply - + apply (rule subtree.trans_parent) + apply (rule trans_parent.hyps) + apply (clarsimp simp add: mdb_next_unfold modify_map_def) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def) + apply fastforce + done + qed +next + assume "?m'" + thus ?m + proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold modify_map_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def split: if_split_asm) + done + next + case (trans_parent c c') + thus ?case + apply - + apply (rule subtree.trans_parent) + apply (rule trans_parent.hyps) + apply (clarsimp simp add: mdb_next_unfold modify_map_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def split: if_split_asm) + done + qed +qed + +lemma prev_update_modify_mdb_relation: + "cdt_relation c m (modify_map m' x (cteMDBNode_update (mdbPrev_update f))) + = cdt_relation c m m'" + by (fastforce simp: cdt_relation_def descendants_of'_def) + +lemma subtree_prev_0: + assumes s: "m \ a \ b" + assumes n: "m b = Some cte" "mdbPrev (cteMDBNode cte) = 0" + assumes d: "valid_dlist m" + assumes 0: "no_0 m" + shows "False" using s n +proof induct + case (direct_parent c) + have "m \ a \ c" by fact+ + then obtain cte' where a: "m a = Some cte'" and "mdbNext (cteMDBNode cte') = c" + by (auto simp add: mdb_next_unfold) + moreover + have "m c = Some cte" by fact+ + moreover + have "c \ 0" by fact+ + ultimately + have "mdbPrev (cteMDBNode cte) = a" using d + by (fastforce simp add: valid_dlist_def Let_def) + moreover + have "mdbPrev (cteMDBNode cte) = 0" by fact+ + moreover + from a have "a \ 0" using assms by auto + ultimately + show False by simp +next + case (trans_parent c' c) + have "m \ c' \ c" by fact+ + then obtain cte' where c': "m c' = Some cte'" and "mdbNext (cteMDBNode cte') = c" + by (auto simp add: mdb_next_unfold) + moreover + have "m c = Some cte" by fact+ + moreover + have "c \ 0" by fact+ + ultimately + have "mdbPrev (cteMDBNode cte) = c'" using d + by (fastforce simp add: valid_dlist_def Let_def) + moreover + have "mdbPrev (cteMDBNode cte) = 0" by fact+ + moreover + from c' have "c' \ 0" using assms by auto + ultimately + show False by simp +qed + +lemma subtree_next_0: + assumes s: "m \ a \ b" + assumes n: "m a = Some cte" "mdbNext (cteMDBNode cte) = 0" + shows "False" using s n + by induct (auto simp: mdb_next_unfold) + +definition + "isArchCap P cap \ case cap of ArchObjectCap acap \ P acap | _ \ False" + +lemma isArchCap_simps[simp]: + "isArchCap P (capability.ThreadCap xc) = False" + "isArchCap P capability.NullCap = False" + "isArchCap P capability.DomainCap = False" + "isArchCap P (capability.NotificationCap xca xba xaa xd) = False" + "isArchCap P (capability.EndpointCap xda xcb xbb xab xe xi) = False" + "isArchCap P (capability.IRQHandlerCap xf) = False" + "isArchCap P (capability.Zombie xbc xac xg) = False" + "isArchCap P (capability.ArchObjectCap xh) = P xh" + "isArchCap P (capability.ReplyCap xad xi xia) = False" + "isArchCap P (capability.UntypedCap d xae xj f) = False" + "isArchCap P (capability.CNodeCap xfa xea xdb xcc) = False" + "isArchCap P capability.IRQControlCap = False" + by (simp add: isArchCap_def)+ + +definition + "badge_derived' cap' cap \ + capMasterCap cap = capMasterCap cap' \ + (capBadge cap, capBadge cap') \ capBadge_ordering False" + +definition vs_cap_ref_arch' :: "arch_capability \ (asid \ vspace_ref) option" where + "vs_cap_ref_arch' acap \ + case acap of + ASIDPoolCap _ asid \ Some (asid, 0) + | ASIDControlCap \ None + | FrameCap _ _ _ _ m \ m + | PageTableCap _ _ m \ m" + +lemmas vs_cap_ref_arch'_simps[simp] = vs_cap_ref_arch'_def[split_simps arch_capability.split] + +definition + "vs_cap_ref' = arch_cap'_fun_lift vs_cap_ref_arch' None" + +lemmas vs_cap_ref'_simps[simp] = + vs_cap_ref'_def[THEN fun_cong, unfolded arch_cap'_fun_lift_def, split_simps capability.split] + +definition + "is_derived' m p cap' cap \ + cap' \ NullCap \ + \ isZombie cap \ + \ isIRQControlCap cap' \ + badge_derived' cap' cap \ + (isUntypedCap cap \ descendants_of' p m = {}) \ + (isReplyCap cap = isReplyCap cap') \ + (isReplyCap cap \ capReplyMaster cap) \ + (isReplyCap cap' \ \ capReplyMaster cap') \ + (vs_cap_ref' cap = vs_cap_ref' cap' \ isArchFrameCap cap) \ + (isArchCap isPageTableCap cap \ capASID cap = capASID cap' \ capASID cap \ None)" + +lemma zbits_map_eq[simp]: + "(zbits_map zbits = zbits_map zbits') = (zbits = zbits')" + by (simp add: zbits_map_def split: option.split sum.split) + +lemma master_cap_relation: + "\ cap_relation c c'; cap_relation d d' \ \ + (capMasterCap c' = capMasterCap d') = + (cap_master_cap c = cap_master_cap d)" + by (auto simp add: cap_master_cap_def capMasterCap_def split: cap.splits arch_cap.splits) + +lemma cap_badge_relation: + "\ cap_relation c c'; cap_relation d d' \ \ + (capBadge c' = capBadge d') = + (cap_badge c = cap_badge d)" + by (auto simp add: cap_badge_def split: cap.splits arch_cap.splits) + +lemma capBadge_ordering_relation: + "\ cap_relation c c'; cap_relation d d' \ \ + ((capBadge c', capBadge d') \ capBadge_ordering f) = + ((cap_badge c, cap_badge d) \ capBadge_ordering f)" + apply (cases c) + by (auto simp add: cap_badge_def capBadge_ordering_def split: cap.splits) + +lemma is_reply_cap_relation: + "cap_relation c c' \ is_reply_cap c = (isReplyCap c' \ \ capReplyMaster c')" + by (cases c, auto simp: is_cap_simps isCap_simps) + +lemma is_reply_master_relation: + "cap_relation c c' \ + is_master_reply_cap c = (isReplyCap c' \ capReplyMaster c')" + by (cases c, auto simp add: is_cap_simps isCap_simps) + +lemma cap_asid_cap_relation: + "cap_relation c c' \ capASID c' = map_option ucast (cap_asid c)" + by (auto simp: capASID_def cap_asid_def split: cap.splits arch_cap.splits option.splits) + +lemma isArchCapE[elim!]: + "\ isArchCap P cap; \arch_cap. cap = ArchObjectCap arch_cap \ P arch_cap \ Q \ \ Q" + by (cases cap, simp_all) + +lemma is_derived_eq: + "\ cap_relation c c'; cap_relation d d'; + cdt_relation (swp cte_at s) (cdt s) (ctes_of s'); cte_at p s \ \ + is_derived (cdt s) p c d = is_derived' (ctes_of s') (cte_map p) c' d'" + unfolding cdt_relation_def + apply (erule allE, erule impE, simp) + apply (clarsimp simp: is_derived_def is_derived'_def badge_derived'_def) + apply (rule conjI) + apply (clarsimp simp: is_cap_simps isCap_simps) + apply (cases c, auto simp: isCap_simps cap_master_cap_def capMasterCap_def)[1] + apply (case_tac "isIRQControlCap d'") + apply (frule(1) master_cap_relation) + apply (clarsimp simp: isCap_simps cap_master_cap_def + is_zombie_def is_reply_cap_def is_master_reply_cap_def + split: cap_relation_split_asm arch_cap.split_asm)[1] + apply (frule(1) master_cap_relation) + apply (frule(1) cap_badge_relation) + apply (frule cap_asid_cap_relation) + apply (frule(1) capBadge_ordering_relation) + apply (case_tac d) + apply (simp_all add: isCap_simps is_cap_simps cap_master_cap_def + capMasterCap_def + split: cap_relation_split_asm arch_cap.split_asm) + apply fastforce + apply (auto simp: up_ucast_inj_eq split:arch_cap.splits arch_capability.splits option.splits) + done +end + +locale masterCap = + fixes cap cap' + assumes master: "capMasterCap cap = capMasterCap cap'" +begin +interpretation Arch . (*FIXME: arch_split*) + +lemma isZombie [simp]: + "isZombie cap' = isZombie cap" using master + by (simp add: capMasterCap_def isZombie_def split: capability.splits) + +lemma isUntypedCap [simp]: + "isUntypedCap cap' = isUntypedCap cap" using master + by (simp add: capMasterCap_def isUntypedCap_def split: capability.splits) + +lemma isArchFrameCap [simp]: + "isArchFrameCap cap' = isArchFrameCap cap" using master + by (simp add: capMasterCap_def isArchFrameCap_def + split: capability.splits arch_capability.splits) + +lemma isIRQHandlerCap [simp]: + "isIRQHandlerCap cap' = isIRQHandlerCap cap" using master + by (simp add: capMasterCap_def isIRQHandlerCap_def split: capability.splits) + +lemma isEndpointCap [simp]: + "isEndpointCap cap' = isEndpointCap cap" using master + by (simp add: capMasterCap_def isEndpointCap_def split: capability.splits) + +lemma isNotificationCap [simp]: + "isNotificationCap cap' = isNotificationCap cap" using master + by (simp add: capMasterCap_def isNotificationCap_def split: capability.splits) + +lemma isIRQControlCap [simp]: + "isIRQControlCap cap' = isIRQControlCap cap" using master + by (simp add: capMasterCap_def isIRQControlCap_def split: capability.splits) + +lemma isReplyCap [simp]: + "isReplyCap cap' = isReplyCap cap" using master + by (simp add: capMasterCap_def isReplyCap_def split: capability.splits) + +lemma capRange [simp]: + "capRange cap' = capRange cap" using master + by (simp add: capRange_def capMasterCap_def split: capability.splits arch_capability.splits) + +lemma isDomain1: + "(cap' = DomainCap) = (cap = DomainCap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemma isDomain2: + "(DomainCap = cap') = (DomainCap = cap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemma isNull1: + "(cap' = NullCap) = (cap = NullCap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemma isNull2: + "(NullCap = cap') = (NullCap = cap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemmas isNull [simp] = isNull1 isNull2 + +lemmas isDomain [simp] = isDomain1 isDomain2 + +lemma sameRegionAs1: + "sameRegionAs c cap' = sameRegionAs c cap" using master + by (simp add: sameRegionAs_def3) + +lemma sameRegionAs2: + "sameRegionAs cap' c = sameRegionAs cap c" using master + by (simp add: sameRegionAs_def3) + +lemmas sameRegionAs [simp] = sameRegionAs1 sameRegionAs2 + +lemma isMDBParentOf1: + assumes "\isReplyCap cap" + assumes "\isEndpointCap cap" + assumes "\isNotificationCap cap" + shows "isMDBParentOf c (CTE cap' m) = isMDBParentOf c (CTE cap m)" +proof - + from assms + have c': + "\isReplyCap cap'" "\isEndpointCap cap'" + "\isNotificationCap cap'" by auto + note isReplyCap [simp del] isEndpointCap [simp del] isNotificationCap [simp del] + from c' assms + show ?thesis + apply (cases c, clarsimp) + apply (simp add: isMDBParentOf_CTE) + apply (rule iffI) + apply clarsimp + apply (clarsimp simp: capBadge_ordering_def capBadge_def isCap_simps sameRegionAs_def3 + split: if_split_asm) + apply clarsimp + apply (clarsimp simp: capBadge_ordering_def capBadge_def isCap_simps sameRegionAs_def3 + split: if_split_asm) + done +qed + +lemma isMDBParentOf2: + assumes "\isReplyCap cap" + assumes "\isEndpointCap cap" + assumes "\isNotificationCap cap" + shows "isMDBParentOf (CTE cap' m) c = isMDBParentOf (CTE cap m) c" +proof - + from assms + have c': + "\isReplyCap cap'" "\isEndpointCap cap'" + "\isNotificationCap cap'" by auto + note isReplyCap [simp del] isEndpointCap [simp del] isNotificationCap [simp del] + from c' assms + show ?thesis + apply (cases c, clarsimp) + apply (simp add: isMDBParentOf_CTE) + apply (auto simp: capBadge_ordering_def capBadge_def isCap_simps sameRegionAs_def3 + split: if_split_asm)[1] + done +qed + +lemmas isMDBParentOf = isMDBParentOf1 isMDBParentOf2 + +end + + +lemma same_master_descendants: + assumes slot: "m slot = Some cte" + assumes master: "capMasterCap (cteCap cte) = capMasterCap cap'" + assumes c': "\isReplyCap cap'" "\isEndpointCap cap'" "\isNotificationCap cap'" + defines "m' \ m(slot \ cteCap_update (\_. cap') cte)" + shows "descendants_of' p m' = descendants_of' p m" +proof (rule set_eqI, simp add: descendants_of'_def) + obtain cap n where cte: "cte = CTE cap n" by (cases cte) + then + interpret masterCap cap cap' + using master by (simp add: masterCap_def) + + from c' + have c: "\isReplyCap cap" + "\isEndpointCap cap" + "\isNotificationCap cap" by auto + + note parent [simp] = isMDBParentOf [OF c] + + { fix a b + from slot + have "m' \ a \ b = m \ a \ b" + by (simp add: m'_def mdb_next_unfold) + } note this [simp] + + { fix a b + from slot cte + have "m' \ a parentOf b = m \ a parentOf b" + by (simp add: m'_def parentOf_def) + } note this [simp] + + fix x + { assume "m \ p \ x" + hence "m' \ p \ x" + proof induct + case (direct_parent c') + thus ?case + by (auto intro: subtree.direct_parent) + next + case trans_parent + thus ?case + by (auto elim: subtree.trans_parent) + qed + } + moreover { + assume "m' \ p \ x" + hence "m \ p \ x" + proof induct + case (direct_parent c') + thus ?case + by (auto intro: subtree.direct_parent) + next + case trans_parent + thus ?case + by (auto elim: subtree.trans_parent) + qed + } + ultimately + show "m' \ p \ x = m \ p \ x" by blast +qed + +lemma is_ep_cap_relation: + "cap_relation c c' \ isEndpointCap c' = is_ep_cap c" + apply (simp add: isCap_simps is_cap_simps) + apply (cases c, auto) + done + +lemma is_ntfn_cap_relation: + "cap_relation c c' \ isNotificationCap c' = is_ntfn_cap c" + apply (simp add: isCap_simps is_cap_simps) + apply (cases c, auto) + done + +(* Just for convenience like free_index_update *) +definition freeIndex_update where + "freeIndex_update c' g \ case c' of capability.UntypedCap d ref sz f \ capability.UntypedCap d ref sz (g f) | _ \ c'" + +lemma freeIndex_update_not_untyped[simp]: "\isUntypedCap c \ freeIndex_update c g = c" + by (case_tac c,simp_all add:freeIndex_update_def isCap_simps) + +locale mdb_insert = + mdb_ptr_src?: mdb_ptr m _ _ src src_cap src_node + + mdb_ptr_dest?: mdb_ptr m _ _ dest dest_cap dest_node + for m src src_cap src_node dest dest_cap dest_node + + + fixes c' :: capability + + assumes dest_cap: "dest_cap = NullCap" + assumes dest_prev: "mdbPrev dest_node = 0" + assumes dest_next: "mdbNext dest_node = 0" + + assumes valid_badges: "valid_badges m" + assumes ut_rev: "ut_revocable' m" + + fixes n + + defines "n \ + modify_map + (modify_map + (modify_map m dest (cteCap_update (\_. c'))) + dest + (cteMDBNode_update + (\m. mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node))))) + src + (cteMDBNode_update (mdbNext_update (\a. dest)))" + + assumes neq: "src \ dest" + +locale mdb_insert_der = mdb_insert + + assumes partial_is_derived': "is_derived' m src c' src_cap" + + +context mdb_insert +begin + +lemmas src = mdb_ptr_src.m_p +lemmas dest = mdb_ptr_dest.m_p + + +lemma no_0_n [intro!]: "no_0 n" by (auto simp: n_def) +lemmas n_0_simps [iff] = no_0_simps [OF no_0_n] + +lemmas neqs [simp] = neq neq [symmetric] + +definition + "new_dest \ CTE c' (mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node)))" + +definition + "new_src \ CTE src_cap (mdbNext_update (\a. dest) src_node)" + +lemma n: "n = m (dest \ new_dest, src \ new_src)" + using src dest + by (simp add: n_def modify_map_apply new_dest_def new_src_def) + +lemma dest_no_parent [iff]: + "m \ dest \ x = False" using dest dest_next + by (auto dest: subtree_next_0) + +lemma dest_no_child [iff]: + "m \ x \ dest = False" using dest dest_prev + by (auto dest: subtree_prev_0) + +lemma dest_no_descendants: "descendants_of' dest m = {}" + by (simp add: descendants_of'_def) + +lemma descendants_not_dest: "dest \ descendants_of' p m \ False" + by (simp add: descendants_of'_def) + +lemma src_next: "m \ src \ mdbNext src_node" + by (simp add: src mdb_next_unfold) + +lemma src_next_rtrancl_conv [simp]: + "m \ mdbNext src_node \\<^sup>* dest = m \ src \\<^sup>+ dest" + apply (rule iffI) + apply (insert src_next) + apply (erule (1) rtrancl_into_trancl2) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma dest_no_next [iff]: + "m \ x \ dest = False" using dest dest_prev dlist + apply clarsimp + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply simp + apply (rule dlistEn [where p=x], assumption) + apply clarsimp + apply clarsimp + done + +lemma dest_no_next_trans [iff]: + "m \ x \\<^sup>+ dest = False" + by (clarsimp dest!: tranclD2) + +lemma dest_no_prev [iff]: + "m \ dest \ p = (p = 0)" using dest dest_next + by (simp add: mdb_next_unfold) + +lemma dest_no_prev_trancl [iff]: + "m \ dest \\<^sup>+ p = (p = 0)" + apply (rule iffI) + apply (drule tranclD) + apply (clarsimp simp: dest_next) + apply simp + apply (insert chain dest) + apply (simp add: mdb_chain_0_def) + apply auto + done + +lemma chain_n: + "mdb_chain_0 n" +proof - + from chain + have "m \ mdbNext src_node \\<^sup>* 0" using dlist src + apply (cases "mdbNext src_node = 0") + apply simp + apply (erule dlistEn, simp) + apply (auto simp: mdb_chain_0_def) + done + moreover + have "\m \ mdbNext src_node \\<^sup>* src" + using src_next + apply clarsimp + apply (drule (1) rtrancl_into_trancl2) + apply simp + done + moreover + have "\ m \ 0 \\<^sup>* dest" using no_0 dest + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + have "\ m \ 0 \\<^sup>* src" using no_0 src + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + note chain + ultimately + show "mdb_chain_0 n" using no_0 src dest + apply (simp add: n new_src_def new_dest_def) + apply (auto intro!: mdb_chain_0_update no_0_update simp: next_update_lhs_rtrancl) + done +qed + +lemma no_loops_n: "no_loops n" using chain_n no_0_n + by (rule mdb_chain_0_no_loops) + +lemma irrefl_trancl_simp [iff]: + "n \ x \\<^sup>+ x = False" + using no_loops_n by (rule no_loops_trancl_simp) + +lemma n_direct_eq: + "n \ p \ p' = (if p = src then p' = dest else + if p = dest then m \ src \ p' + else m \ p \ p')" + using src dest dest_prev + by (auto simp: mdb_next_update n new_src_def new_dest_def + src_next mdb_next_unfold) + +lemma n_dest: + "n dest = Some new_dest" + by (simp add: n) + +end + +lemma revokable_plus_orderD: + "\ isCapRevocable new old; (capBadge old, capBadge new) \ capBadge_ordering P; + capMasterCap old = capMasterCap new \ + \ (isUntypedCap new \ (\x. capBadge old = Some 0 \ capBadge new = Some x \ x \ 0))" + by (clarsimp simp: Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def isCap_simps + AARCH64_H.arch_capability.simps + split: if_split_asm capability.split_asm AARCH64_H.arch_capability.split_asm) + +lemma valid_badges_def2: + "valid_badges m = + (\p p' cap node cap' node'. + m p = Some (CTE cap node) \ + m p' = Some (CTE cap' node') \ + m \ p \ p' \ + capMasterCap cap = capMasterCap cap' \ + capBadge cap \ None \ + capBadge cap \ capBadge cap' \ + capBadge cap' \ Some 0 \ + mdbFirstBadged node')" + apply (simp add: valid_badges_def) + apply (intro arg_cong[where f=All] ext imp_cong [OF refl]) + apply (case_tac cap, simp_all add: isCap_simps cong: weak_imp_cong) + apply (fastforce simp: sameRegionAs_def3 isCap_simps)+ + done + +lemma sameRegionAs_update_untyped: + "RetypeDecls_H.sameRegionAs (capability.UntypedCap d a b c) = + RetypeDecls_H.sameRegionAs (capability.UntypedCap d a b c')" + apply (rule ext) + apply (case_tac x) + apply (clarsimp simp:sameRegionAs_def isCap_simps)+ + done + +lemma sameRegionAs_update_untyped': + "RetypeDecls_H.sameRegionAs cap (capability.UntypedCap d a b f) = + RetypeDecls_H.sameRegionAs cap (capability.UntypedCap d a b f')" + apply (case_tac cap) + apply (clarsimp simp:sameRegionAs_def isCap_simps)+ + done + +(*The newly inserted cap should never have children.*) +lemma (in mdb_insert_der) dest_no_parent_n: + "n \ dest \ p = False" + using src partial_is_derived' valid_badges ut_rev + apply clarsimp + apply (erule subtree.induct) + prefer 2 + apply simp + apply (clarsimp simp: parentOf_def mdb_next_unfold n_dest new_dest_def n) + apply (cases "mdbNext src_node = dest") + apply (subgoal_tac "m \ src \ dest") + apply simp + apply (subst mdb_next_unfold) + apply (simp add: src) + apply (case_tac "isUntypedCap src_cap") + apply (clarsimp simp: isCap_simps isMDBParentOf_CTE is_derived'_def + badge_derived'_def freeIndex_update_def capMasterCap_def + split: capability.splits) + apply (simp add: ut_revocable'_def) + apply (drule spec[where x=src], simp add: isCap_simps) + apply (simp add: descendants_of'_def) + apply (drule spec[where x="mdbNext src_node"]) + apply (erule notE, rule direct_parent) + apply (simp add: mdb_next_unfold) + apply simp + apply (simp add: parentOf_def src isMDBParentOf_CTE isCap_simps + cong: sameRegionAs_update_untyped) + apply (clarsimp simp: isMDBParentOf_CTE is_derived'_def badge_derived'_def) + apply (drule(2) revokable_plus_orderD) + apply (erule sameRegionAsE, simp_all) + apply (simp add: valid_badges_def2) + apply (erule_tac x=src in allE) + apply (erule_tac x="mdbNext src_node" in allE) + apply (clarsimp simp: src mdb_next_unfold) + apply (case_tac "capBadge cap'", simp_all) + apply (clarsimp simp add: isCap_simps capMasterCap_def + simp del: not_ex + split: capability.splits) + apply (clarsimp simp: isCap_simps)+ + done + +locale mdb_insert_child = mdb_insert_der + + assumes child: + "isMDBParentOf + (CTE src_cap src_node) + (CTE c' (mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node))))" + +context mdb_insert_child +begin + +lemma new_child [simp]: + "isMDBParentOf new_src new_dest" + by (simp add: new_src_def new_dest_def) (rule child) + +lemma n_dest_child: + "n \ src \ dest" + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def src dest n) + done + +lemma parent_m_n: + assumes "m \ p \ p'" + shows "if p' = src then n \ p \ dest \ n \ p \ p' else n \ p \ p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply (cases "p = src") + apply simp + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule subtree.trans_parent [where c'=dest]) + apply (rule n_dest_child) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (clarsimp simp: new_src_def src) + apply simp + apply (subgoal_tac "n \ p \ c") + prefer 2 + apply (rule subtree.direct_parent) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: new_src_def src) + apply clarsimp + apply (erule subtree_trans) + apply (rule n_dest_child) + done +next + case (trans_parent c d) + thus ?case + apply - + apply (cases "c = dest", simp) + apply (cases "d = dest", simp) + apply (cases "c = src") + apply clarsimp + apply (erule subtree.trans_parent [where c'=dest]) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (rule conjI, clarsimp) + apply (clarsimp simp: new_src_def src) + apply clarsimp + apply (subgoal_tac "n \ p \ d") + apply clarsimp + apply (erule subtree_trans, rule n_dest_child) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: src new_src_def) + done +qed + +lemma n_to_dest [simp]: + "n \ p \ dest = (p = src)" + by (simp add: n_direct_eq) + +lemma parent_n_m: + assumes "n \ p \ p'" + shows "if p' = dest then p \ src \ m \ p \ src else m \ p \ p'" +proof - + from assms have [simp]: "p \ dest" by (clarsimp simp: dest_no_parent_n) + from assms + show ?thesis + proof induct + case (direct_parent c) + thus ?case + apply simp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply simp + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + next + case (trans_parent c d) + thus ?case + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp split: if_split_asm) + apply (simp add: n_direct_eq) + apply (cases "p=src") + apply simp + apply (rule subtree.direct_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + qed +qed + +lemma descendants: + "descendants_of' p n = + (if src \ descendants_of' p m \ p = src + then descendants_of' p m \ {dest} else descendants_of' p m)" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (fastforce dest!: parent_n_m dest: parent_m_n simp: n_dest_child split: if_split_asm) + done + +end + +locale mdb_insert_sib = mdb_insert_der + + assumes no_child: + "\isMDBParentOf + (CTE src_cap src_node) + (CTE c' (mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node))))" +begin +interpretation Arch . (*FIXME: arch_split*) + +(* If dest is inserted as sibling, src can not have had children. + If it had had children, then dest_node which is just a derived copy + of src_node would be a child as well. *) +lemma src_no_mdb_parent: + "isMDBParentOf (CTE src_cap src_node) cte = False" + using no_child partial_is_derived' + apply clarsimp + apply (case_tac cte) + apply (clarsimp simp: isMDBParentOf_CTE is_derived'_def badge_derived'_def) + apply (erule sameRegionAsE) + apply (clarsimp simp add: sameRegionAs_def3) + subgoal by (cases src_cap; auto simp: capMasterCap_def Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def + freeIndex_update_def isCap_simps + split: capability.split_asm arch_capability.split_asm) (* long *) + apply (clarsimp simp: isCap_simps sameRegionAs_def3 capMasterCap_def freeIndex_update_def + split:capability.splits arch_capability.splits) + apply (clarsimp simp: isCap_simps sameRegionAs_def3 freeIndex_update_def + capRange_def split:capability.splits + simp del: Int_atLeastAtMost atLeastAtMost_iff) + apply auto[1] + apply (clarsimp simp: isCap_simps sameRegionAs_def3)+ + done + +lemma src_no_parent: + "m \ src \ p = False" + by (clarsimp dest!: subtree_parent simp: src_no_mdb_parent parentOf_def src) + +lemma parent_preserved: + "isMDBParentOf cte (CTE src_cap src_node) \ isMDBParentOf cte new_dest" + using partial_is_derived' + apply (cases cte) + apply (case_tac "isUntypedCap src_cap") + apply (clarsimp simp:isCap_simps isMDBParentOf_CTE freeIndex_update_def new_dest_def) + apply (clarsimp simp:is_derived'_def isCap_simps badge_derived'_def capMasterCap_def split:capability.splits) + apply (clarsimp simp:sameRegionAs_def2 capMasterCap_def isCap_simps split:capability.splits) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (clarsimp simp: new_dest_def) + apply (rename_tac cap node) + apply (clarsimp simp: is_derived'_def badge_derived'_def) + apply (rule conjI) + apply (simp add: sameRegionAs_def2) + apply (cases "isCapRevocable c' src_cap") + apply simp + apply (drule(2) revokable_plus_orderD) + apply (erule disjE) + apply (clarsimp simp: isCap_simps) + by ((fastforce elim: capBadge_ordering_trans simp: isCap_simps)+) + +lemma src_no_parent_n [simp]: + "n \ src \ p = False" + apply clarsimp + apply (erule subtree.induct) + apply (simp add: n_direct_eq) + apply (clarsimp simp: parentOf_def n src dest new_src_def + new_dest_def no_child) + apply simp + done + +lemma parent_n: + "n \ p \ p' \ if p' = dest then m \ p \ src else m \ p \ p'" + apply (cases "p=dest", simp add: dest_no_parent_n) + apply (cases "p=src", simp) + apply (erule subtree.induct) + apply simp + apply (rule conjI) + apply (clarsimp simp: n_direct_eq) + apply clarsimp + apply (rule direct_parent) + apply (simp add: n_direct_eq) + apply assumption + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply simp + apply (rule conjI) + apply (clarsimp simp: n_direct_eq split: if_split_asm) + apply (clarsimp simp: n_direct_eq split: if_split_asm) + apply (erule trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply (erule trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + +lemma parent_m: + "m \ p \ p' \ n \ p \ p' \ (p' = src \ n \ p \ dest)" + apply (cases "p=src", simp add: src_no_parent) + apply (erule subtree.induct) + apply (rule conjI) + apply (rule direct_parent) + apply (clarsimp simp: n_direct_eq) + apply assumption + apply (fastforce simp add: parentOf_def n src new_src_def) + apply clarsimp + apply (rule trans_parent [where c'=src]) + apply (rule direct_parent) + apply (simp add: n_direct_eq) + apply (rule notI, simp) + apply simp + apply (simp add: parentOf_def n src new_src_def) + apply (clarsimp simp: dest dest_cap) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def dest src n) + apply (rule conjI, clarsimp simp: dest dest_cap) + apply (clarsimp intro!: parent_preserved) + apply clarsimp + apply (case_tac "c'=src") + apply simp + apply (erule trans_parent [where c'=dest]) + apply (clarsimp simp: n_direct_eq) + apply clarsimp + apply (fastforce simp: parentOf_def dest src n) + apply clarsimp + apply (rule conjI) + apply (erule trans_parent) + apply (simp add: n_direct_eq) + apply clarsimp + apply assumption + apply (fastforce simp: parentOf_def dest src n new_src_def) + apply clarsimp + apply (rule trans_parent [where c'=src]) + apply (erule trans_parent) + apply (simp add: n_direct_eq) + apply clarsimp + apply simp + apply (fastforce simp: parentOf_def dest src n new_src_def) + apply (simp add: n_direct_eq) + apply simp + apply (fastforce simp: parentOf_def dest src n new_src_def + intro!: parent_preserved) + done + +lemma parent_n_eq: + "n \ p \ p' = (if p' = dest then m \ p \ src else m \ p \ p')" + apply (rule iffI) + apply (erule parent_n) + apply (simp split: if_split_asm) + apply (drule parent_m, simp) + apply (drule parent_m, clarsimp) + done + +lemma descendants: + "descendants_of' p n = + descendants_of' p m \ (if src \ descendants_of' p m then {dest} else {})" + by (rule set_eqI) (simp add: descendants_of'_def parent_n_eq) + +end +context begin interpretation Arch . (*FIXME: arch_split*) +lemma mdb_None: + assumes F: "\p'. cte_map p \ descendants_of' p' m' \ False" + assumes R: "cdt_relation (swp cte_at s) (cdt s) m'" + assumes "valid_mdb s" + shows "cdt s p = None" + apply (simp add: descendants_of_None [symmetric]) + apply clarsimp + apply (frule descendants_of_cte_at2, rule assms) + apply (insert R) + apply (simp add: cdt_relation_def) + apply (erule allE, erule allE, erule (1) impE) + apply (rule_tac p'="cte_map (a,b)" in F) + apply (drule sym) + apply simp + done + +declare if_split [split del] + +lemma derived_sameRegionAs: + "\ is_derived' m p cap' cap; s \' cap' \ + \ sameRegionAs cap cap'" + unfolding is_derived'_def badge_derived'_def + apply (simp add: sameRegionAs_def3) + apply (cases "isUntypedCap cap \ isArchFrameCap cap") + apply (rule disjI2, rule disjI1) + apply (erule disjE) + apply (clarsimp simp: isCap_simps valid_cap'_def capAligned_def + is_aligned_no_overflow capRange_def + split: capability.splits arch_capability.splits option.splits) + apply (clarsimp simp: isCap_simps valid_cap'_def capAligned_def + is_aligned_no_overflow capRange_def + split: capability.splits arch_capability.splits option.splits) + apply (clarsimp simp: isCap_simps valid_cap'_def + is_aligned_no_overflow capRange_def vs_cap_ref_arch'_def + split: capability.splits arch_capability.splits option.splits) + done + +lemma no_fail_updateMDB [wp]: + "no_fail (\s. p \ 0 \ cte_at' p s) (updateMDB p f)" + apply (simp add: updateMDB_def) + apply (rule no_fail_pre, wp) + apply (simp split: if_split) + done + +lemma updateMDB_cte_at' [wp]: + "\cte_at' p\ + updateMDB x y + \\_. cte_at' p\" + by (wpsimp wp: updateMDB_weak_cte_wp_at) + +lemma updateCap_cte_at' [wp]: + "\cte_at' p\ updateCap c p' \\_. cte_at' p\" + unfolding updateCap_def by wp + +lemma nullMDBNode_pointers[simp]: + "mdbPrev nullMDBNode = nullPointer" + "mdbNext nullMDBNode = nullPointer" + by (simp add: nullMDBNode_def)+ + +lemma maxFreeIndex_eq[simp]: "maxFreeIndex nat1 = max_free_index nat1" + by (clarsimp simp:maxFreeIndex_def max_free_index_def shiftL_nat) + +definition maskedAsFull :: "capability \ capability \ capability" +where "maskedAsFull srcCap newCap \ + if isUntypedCap srcCap \ isUntypedCap newCap \ + capPtr srcCap = capPtr newCap \ capBlockSize srcCap = capBlockSize newCap + then capFreeIndex_update (\_. maxFreeIndex (capBlockSize srcCap)) srcCap + else srcCap" + +lemma is_derived_maskedAsFull[simp]: + "is_derived' m slot c (maskedAsFull src_cap' a) = + is_derived' m slot c src_cap'" + apply (clarsimp simp: maskedAsFull_def isCap_simps split:if_splits) + apply (case_tac c) + apply (clarsimp simp:is_derived'_def isCap_simps badge_derived'_def)+ + done + + +lemma maskedAsFull_revokable: + "is_derived' x y c' src_cap' \ + isCapRevocable c' (maskedAsFull src_cap' a) = isCapRevocable c' src_cap'" + apply (case_tac src_cap') + apply (simp_all add:maskedAsFull_def isCap_simps) + apply (case_tac c') + apply (simp_all add:maskedAsFull_def is_derived'_def isCap_simps) + apply (simp_all add:badge_derived'_def capMasterCap_simps split:arch_capability.splits) + apply (clarsimp split:if_splits simp:Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def isCap_simps)+ + done + +lemma parentOf_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "(m \ p parentOf x) \ (m' \ p parentOf x)" + apply (clarsimp simp:parentOf_def) + apply (frule iffD1[OF dom,OF domI]) + apply (frule iffD1[OF dom[where x = p],OF domI]) + apply clarsimp + apply (frule_tac x1 = p in conjunct1[OF sameRegion]) + apply assumption + apply (frule_tac x1 = x in conjunct2[OF sameRegion]) + apply assumption + apply (drule_tac x = "cteCap y" in fun_cong) + apply (drule_tac x = "cteCap cte'" in fun_cong) + apply (drule_tac x = p in misc) + apply assumption + apply (drule_tac x = x in misc) + apply assumption + apply ((simp only: isMDBParentOf_def split_def split: cte.splits if_split_asm); clarsimp) + by (clarsimp simp: sameRegionAs_def isCap_simps Let_def split: if_split_asm)+ (* long *) + +lemma parentOf_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "(m \ p parentOf x) = (m' \ p parentOf x)" + apply (rule iffI) + apply (rule parentOf_preserve_oneway[OF dom sameRegion misc node]) + apply (assumption)+ + apply (rule parentOf_preserve_oneway) + apply (auto simp:dom sameRegion misc node) +done + +lemma updateUntypedCap_descendants_of: + "\m src = Some cte; isUntypedCap (cteCap cte)\ + \ descendants_of' slot (m(src \ cteCap_update + (\_. (capFreeIndex_update (\_. idx) (cteCap cte))) cte)) = + descendants_of' slot m" + apply (rule set_eqI) + + apply (clarsimp simp:descendants_of'_def subtree_def) + apply (rule_tac x = x in fun_cong) + apply (rule_tac f = lfp in arg_cong) + apply (rule ext)+ + apply (cut_tac x = xa in parentOf_preserve + [where m = "m(src \ cteCap_update (\_. capFreeIndex_update (\_. idx) (cteCap cte)) cte)" + and m' = m and p = slot]) + apply (clarsimp,rule iffI,fastforce+) + apply (clarsimp simp:isCap_simps split:if_splits) + apply (clarsimp simp:sameRegionAs_def isCap_simps split:if_splits) + apply (rule ext) + apply (clarsimp simp:sameRegionAs_def isCap_simps split:if_splits)+ + apply (simp add:mdb_next_def split:if_splits) + apply (simp del:fun_upd_apply) + apply (subgoal_tac "\p. m(src \ cteCap_update (\_. capFreeIndex_update (\_. idx) (cteCap cte)) cte) \ p \ xa + = m \ p \ xa") + apply simp + apply (clarsimp simp:mdb_next_rel_def mdb_next_def split:if_splits) + done + +crunches setCTE + for tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + +lemma setCTE_UntypedCap_corres: + "\cap_relation cap (cteCap cte); is_untyped_cap cap; idx' = idx\ + \ corres dc (cte_wp_at ((=) cap) src and valid_objs and + pspace_aligned and pspace_distinct) + (cte_wp_at' ((=) cte) (cte_map src) and + pspace_distinct' and pspace_aligned') + (set_cap (free_index_update (\_. idx) cap) src) + (setCTE (cte_map src) (cteCap_update + (\cap. (capFreeIndex_update (\_. idx') (cteCap cte))) cte))" + apply (case_tac cte) + apply (clarsimp simp:is_cap_simps) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply clarsimp + apply (clarsimp simp add: state_relation_def split_def) + apply (drule (1) pspace_relationsD) + apply (frule_tac c = "cap.UntypedCap dev r bits idx" + in set_cap_not_quite_corres_prequel) + apply assumption+ + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption+ + apply simp+ + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply (clarsimp simp: pspace_relations_def) + apply (subst conj_assoc[symmetric]) + apply clarsimp + apply (rule conjI) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation _ _" \ -\) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (rule use_valid[OF _ setCTE_tcbSchedPrevs_of], assumption) + apply (rule use_valid[OF _ setCTE_tcbSchedNexts_of], assumption) + apply (rule use_valid[OF _ setCTE_ksReadyQueues], assumption) + apply (rule use_valid[OF _ setCTE_inQ_opt_pred], assumption) + apply (rule use_valid[OF _ set_cap_exst], assumption) + apply clarsimp + apply (rule conjI) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (rule conjI) + prefer 2 + apply (rule conjI) + apply (frule mdb_set_cap, frule exst_set_cap) + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: cdt_list_relation_def cte_wp_at_ctes_of split: if_split_asm) + apply (rule conjI) + prefer 2 + apply (frule setCTE_pspace_only) + apply clarsimp + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (frule set_cap_caps_of_state_monad) + apply (drule is_original_cap_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: revokable_relation_def simp del: fun_upd_apply) + apply (clarsimp split: if_split_asm) + apply (frule cte_map_inj_eq) + prefer 2 + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (simp add: null_filter_def split: if_split_asm) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule caps_of_state_cte_at) + apply fastforce + apply fastforce + apply fastforce + apply clarsimp + apply (simp add: null_filter_def split: if_split_asm) + apply (erule_tac x=aa in allE, erule_tac x=bb in allE) + apply (case_tac cte) + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps isCap_simps cte_wp_at_ctes_of) + apply (simp add: null_filter_def cte_wp_at_caps_of_state split: if_split_asm) + apply (erule_tac x=aa in allE, erule_tac x=bb in allE) + apply (clarsimp) + apply (clarsimp simp: cdt_relation_def) + apply (frule set_cap_caps_of_state_monad) + apply (frule mdb_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (frule cte_wp_at_norm) + apply (clarsimp simp:cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (drule_tac slot = "cte_map (aa,bb)" in updateUntypedCap_descendants_of) + apply (clarsimp simp:isCap_simps) + apply (drule_tac x = aa in spec) + apply (drule_tac x = bb in spec) + apply (erule impE) + apply (clarsimp simp: cte_wp_at_caps_of_state split:if_splits) + apply auto + done + +lemma getCTE_get: + "\cte_wp_at' P src\ getCTE src \\rv s. P rv\" + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma setUntypedCapAsFull_corres: + "\cap_relation c c'; cap_relation src_cap (cteCap srcCTE)\ + \ corres dc (cte_wp_at ((=) src_cap) src and valid_objs and + pspace_aligned and pspace_distinct) + (cte_wp_at' ((=) srcCTE) (cte_map src) and + pspace_aligned' and pspace_distinct') + (set_untyped_cap_as_full src_cap c src) + (setUntypedCapAsFull (cteCap srcCTE) c' (cte_map src))" + apply (clarsimp simp:set_untyped_cap_as_full_def setUntypedCapAsFull_def + split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp del:capFreeIndex_update.simps simp:updateCap_def) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_r) + apply (rule_tac F="cte = srcCTE" in corres_gen_asm2) + apply (simp) + apply (rule setCTE_UntypedCap_corres) + apply simp+ + apply (clarsimp simp:free_index_update_def isCap_simps is_cap_simps) + apply (subst identity_eq) + apply (wp getCTE_sp getCTE_get)+ + apply (clarsimp simp:cte_wp_at_ctes_of)+ + apply (clarsimp simp:is_cap_simps isCap_simps)+ + apply (case_tac c,simp_all) + apply (case_tac src_cap,simp_all) + done + +(* FIXME: SELFOUR-421 move *) +lemma isUntypedCap_simps[simp]: + "isUntypedCap (capability.UntypedCap uu uv uw ux) = True" + "isUntypedCap (capability.NullCap) = False" + "isUntypedCap (capability.EndpointCap v va vb vc vd ve) = False" + "isUntypedCap (capability.NotificationCap v va vb vc) = False" + "isUntypedCap (capability.ReplyCap v1 v2 v3) = False" + "isUntypedCap (capability.CNodeCap x1 x2 x3 x4) = False" + "isUntypedCap (capability.ThreadCap v) = False" + "isUntypedCap (capability.DomainCap) = False" + "isUntypedCap (capability.IRQControlCap) = False" + "isUntypedCap (capability.IRQHandlerCap y1) = False" + "isUntypedCap (capability.Zombie v va1 vb1) = False" + "isUntypedCap (capability.ArchObjectCap z) = False" + by (simp_all add: isUntypedCap_def split: capability.splits) + +lemma cap_relation_masked_as_full: + "\cap_relation src_cap src_cap';cap_relation c c'\ \ + cap_relation (masked_as_full src_cap c) (maskedAsFull src_cap' c')" + apply (clarsimp simp: masked_as_full_def maskedAsFull_def + split: if_splits) + apply (case_tac src_cap; clarsimp) + by (case_tac c; clarsimp) + +lemma setUntypedCapAsFull_pspace_distinct[wp]: + "\pspace_distinct' and cte_wp_at' ((=) srcCTE) slot\ + setUntypedCapAsFull (cteCap srcCTE) c slot \\r. pspace_distinct'\" + apply (clarsimp simp: setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp:valid_def) + apply (drule updateCap_stuff) + apply simp + apply (wp|clarsimp)+ +done + +lemma setUntypedCapAsFull_pspace_aligned[wp]: + "\pspace_aligned' and cte_wp_at' ((=) srcCTE) slot\ + setUntypedCapAsFull (cteCap srcCTE) c slot + \\r. pspace_aligned'\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp:valid_def) + apply (drule updateCap_stuff) + apply simp + apply (wp|clarsimp)+ +done + +(* wp rules about setFreeIndex and setUntypedCapAsFull *) +lemma setUntypedCapAsFull_ctes_of: + "\\s. src \ dest \ P (ctes_of s dest) \ + src = dest \ P (Some (CTE (maskedAsFull (cteCap srcCTE) cap) + (cteMDBNode srcCTE))) \ + cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (ctes_of s dest)\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (simp add:updateCap_def) + apply (wp getCTE_wp) + apply (clarsimp split:if_splits simp:cte_wp_at_ctes_of if_distrib) + apply (case_tac "src = dest") + apply (case_tac srcCTE) + apply (clarsimp simp:maskedAsFull_def) + apply clarsimp + apply wp + apply (case_tac srcCTE) + apply (fastforce simp:maskedAsFull_def cte_wp_at_ctes_of split: if_splits) + done + +lemma setUntypedCapAsFull_ctes_of_no_0: + "\\s. no_0 ((ctes_of s)(a:=b)) \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. no_0 ((ctes_of s)(a:=b)) \" + apply (clarsimp simp:no_0_def split:if_splits) + apply (wp hoare_vcg_imp_lift setUntypedCapAsFull_ctes_of[where dest = 0]) + apply (auto simp:cte_wp_at_ctes_of) + done + +lemma valid_dlist_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte'" + shows "valid_dlist m \ valid_dlist m'" + apply (clarsimp simp:valid_dlist_def Let_def) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (elim allE impE) + apply assumption + apply (intro conjI impI) + apply clarsimp + apply (frule(1) misc) + apply (clarsimp) + apply (frule_tac b = cte' in domI[where m = m]) + apply (drule iffD1[OF dom]) + apply clarsimp + apply (drule(1) misc)+ + apply simp + apply clarsimp + apply (frule(1) misc) + apply (clarsimp) + apply (frule_tac b = cte' in domI[where m = m]) + apply (drule iffD1[OF dom]) + apply clarsimp + apply (drule(1) misc)+ + apply simp +done + +lemma valid_dlist_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte'" + shows "valid_dlist m = valid_dlist m'" + apply (rule iffI) + apply (rule valid_dlist_preserve_oneway[OF dom misc]) + apply simp+ + apply (rule valid_dlist_preserve_oneway) + apply (simp add:dom misc)+ +done + +lemma ut_revocable_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte')" + shows "ut_revocable' m \ ut_revocable' m'" + apply (clarsimp simp:ut_revocable'_def Let_def) + apply (drule_tac x = p in spec) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (case_tac r) + apply clarsimp + apply (elim allE impE) + apply (frule(1) misc) + apply (clarsimp) + apply (drule(1) misc)+ + apply simp +done + +lemma ut_revocable_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte')" + shows "ut_revocable' m = ut_revocable' m'" + apply (rule iffI) + apply (rule ut_revocable_preserve_oneway[OF dom misc]) + apply (assumption)+ + apply (rule ut_revocable_preserve_oneway[OF dom[symmetric]]) + apply (simp add:misc)+ +done + +lemma class_links_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ capClass (cteCap cte) = capClass (cteCap cte')" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "class_links m \ class_links m'" + apply (clarsimp simp:class_links_def Let_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (case_tac r) + apply clarsimp + apply (frule_tac b = cte' in domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (elim allE impE) + apply simp + apply (frule(1) misc) + apply (clarsimp simp:mdb_next_rel_def node) + apply (drule(1) misc)+ + apply simp +done + +lemma class_links_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ capClass (cteCap cte) = capClass (cteCap cte')" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "class_links m = class_links m'" + apply (rule iffI) + apply (rule class_links_preserve_oneway[OF dom misc]) + apply (simp add:node)+ + apply (rule class_links_preserve_oneway) + apply (simp add:dom misc node)+ +done + +lemma distinct_zombies_preserve_oneway: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isZombie (cteCap cte) = isZombie (cteCap cte') \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') \ + isArchFrameCap (cteCap cte) = isArchFrameCap (cteCap cte') \ + capBits (cteCap cte) = capBits (cteCap cte') \ + capUntypedPtr (cteCap cte) = capUntypedPtr (cteCap cte') \ + capClass (cteCap cte) = capClass (cteCap cte')" + assumes node: "\p. mdb_next m p = mdb_next m' p" + shows "distinct_zombies m \ distinct_zombies m'" + apply (clarsimp simp:distinct_zombies_def distinct_zombie_caps_def Let_def) + apply (drule_tac x = ptr in spec) + apply (drule_tac x = ptr' in spec) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (case_tac r) + apply clarsimp + apply (frule_tac a=ptr' in domI[where m = m'],drule iffD2[OF dom],erule domE) + apply clarsimp + apply (drule(1) misc)+ + apply clarsimp + done + +lemma distinct_zombies_preserve: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isZombie (cteCap cte) = isZombie (cteCap cte') \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') \ + isArchFrameCap (cteCap cte) = isArchFrameCap (cteCap cte') \ + capBits (cteCap cte) = capBits (cteCap cte') \ + capUntypedPtr (cteCap cte) = capUntypedPtr (cteCap cte') \ + capClass (cteCap cte) = capClass (cteCap cte')" + assumes node: "\p. mdb_next m p = mdb_next m' p" + shows "distinct_zombies m = distinct_zombies m'" + apply (rule iffI) + apply (rule distinct_zombies_preserve_oneway[OF dom misc node]) + apply (assumption)+ + apply (rule distinct_zombies_preserve_oneway) + apply (simp add:dom misc node)+ + done + +lemma caps_contained'_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + shows "caps_contained' m \ caps_contained' m'" + apply (clarsimp simp:caps_contained'_def) + apply (frule iffD2[OF dom,OF domI]) + apply (frule_tac x1 = p' in iffD2[OF dom,OF domI]) + apply clarsimp + apply (case_tac y,case_tac ya) + apply (drule_tac x= p in spec) + apply (drule_tac x= p' in spec) + apply (frule_tac x = p in misc) + apply assumption + apply (frule_tac x = p' in misc) + apply assumption + apply (elim allE impE) + apply fastforce+ + done + +lemma caps_contained'_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + shows "caps_contained' m = caps_contained' m'" + apply (rule iffI) + apply (rule caps_contained'_preserve_oneway[OF dom misc]) + apply (assumption)+ + apply (rule caps_contained'_preserve_oneway) + apply (auto simp:dom misc) + done + +lemma is_chunk_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows " \m x =Some (CTE a b);m' x = Some (CTE c d)\ \ is_chunk m a p p' \ is_chunk m' c p p'" + apply (clarsimp simp:is_chunk_def) + apply (drule_tac x= p'' in spec) + apply (subgoal_tac "m \ p \\<^sup>+ p'' = m' \ p \\<^sup>+ p''") + apply (subgoal_tac "m \ p'' \\<^sup>* p' = m' \ p'' \\<^sup>* p'") + apply (frule iffD1[OF dom,OF domI]) + apply (clarsimp) + apply (frule_tac x1 = p'' in iffD1[OF dom,OF domI]) + apply clarsimp + apply (frule_tac x = p'' in sameRegion,assumption) + apply clarsimp + apply (frule_tac x = x in sameRegion,assumption) + apply clarsimp + apply (case_tac y) + apply (drule_tac fun_cong)+ + apply fastforce + apply simp + apply (erule iffD1[OF connect_eqv_singleE',rotated -1]) + apply (clarsimp simp: mdb_next_rel_def node) + apply (rule connect_eqv_singleE) + apply (clarsimp simp: mdb_next_rel_def node) + done + +lemma is_chunk_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows " \m x =Some (CTE a b);m' x = Some (CTE c d)\ \ is_chunk m a p p' = is_chunk m' c p p'" + apply (rule iffI) + apply (rule is_chunk_preserve_oneway[OF dom sameRegion node],assumption+) + apply (rule is_chunk_preserve_oneway) + apply (auto simp:dom sameRegion node) + done + +lemma mdb_chunked_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows + "mdb_chunked m \ mdb_chunked m'" + apply (clarsimp simp:mdb_chunked_def) + apply (drule_tac x=p in spec) + apply (drule_tac x=p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (case_tac ya) + apply (case_tac y) + apply (frule_tac x = p in sameRegion,assumption) + apply (frule_tac x = p' in sameRegion,assumption) + apply clarsimp + apply (erule impE) + apply (drule fun_cong)+ + apply fastforce + apply (subgoal_tac "m \ p \\<^sup>+ p' = m' \ p \\<^sup>+ p'") + apply (subgoal_tac "m \ p' \\<^sup>+ p = m' \ p' \\<^sup>+ p") + apply (frule_tac m = m and + x = p and c = cap and p = p and p'=p' in is_chunk_preserve[rotated -1]) + apply (simp add:dom) + apply (rule sameRegion) + apply simp+ + apply (rule node) + apply assumption + apply (frule_tac x = p' and c = cap' and p = p' and p'=p in is_chunk_preserve[rotated -1]) + apply (rule dom) + apply (rule sameRegion) + apply assumption+ + apply (rule node) + apply assumption + apply clarsimp + apply (rule connect_eqv_singleE) + apply (clarsimp simp:mdb_next_rel_def node) + apply (rule connect_eqv_singleE) + apply (clarsimp simp:mdb_next_rel_def node) + done + +lemma mdb_chunked_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows + "mdb_chunked m = mdb_chunked m'" + apply (rule iffI) + apply (erule mdb_chunked_preserve_oneway[rotated -1]) + apply (simp add:dom sameRegion node)+ + apply (erule mdb_chunked_preserve_oneway[rotated -1]) + apply (simp add:dom[symmetric]) + apply (frule sameRegion) + apply assumption + apply simp + apply (simp add:node) + done + +lemma valid_badges_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows "valid_badges m \ valid_badges m'" + apply (clarsimp simp:valid_badges_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (case_tac y,case_tac ya) + apply clarsimp + apply (erule impE) + apply (simp add: mdb_next mdb_next_rel_def) + apply (erule impE) + apply (drule(1) sameRegion)+ + apply clarsimp + apply (drule fun_cong)+ + apply fastforce + apply (drule(1) misc)+ + apply (clarsimp simp:isCap_simps sameRegionAs_def split:if_splits) + done + +lemma valid_badges_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows "valid_badges m = valid_badges m'" + apply (rule iffI) + apply (rule valid_badges_preserve_oneway[OF dom misc sameRegion mdb_next]) + apply assumption+ + apply (rule valid_badges_preserve_oneway) + apply (simp add:dom misc sameRegion mdb_next)+ + done + +lemma mdb_untyped'_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows + "untyped_mdb' m \ untyped_mdb' m'" + apply (clarsimp simp:untyped_mdb'_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (case_tac y,case_tac ya) + apply (frule misc) + apply fastforce + apply clarsimp + apply (frule_tac x = p' in misc) + apply fastforce + apply (frule_tac x = p in misc) + apply assumption + apply clarsimp + apply (clarsimp simp: descendants_of'_def Invariants_H.subtree_def) + apply (erule_tac f1 = "\x. lfp x y" for y in iffD1[OF arg_cong,rotated]) + apply (rule ext)+ + apply (subgoal_tac "\p p'. (m \ p \ p') = (m' \ p \ p')") + apply (thin_tac "P" for P)+ + apply (subgoal_tac "(m \ p parentOf x) = (m' \ p parentOf x)") + apply fastforce + apply (rule parentOf_preserve[OF dom]) + apply (simp add:misc sameRegion mdb_next mdb_next_rel_def)+ + done + + +lemma untyped_mdb'_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows + "untyped_mdb' m = untyped_mdb' m'" + apply (rule iffI) + apply (erule mdb_untyped'_preserve_oneway[rotated -1]) + apply (simp add:dom misc sameRegion range mdb_next)+ + apply (erule mdb_untyped'_preserve_oneway[rotated -1]) + apply (simp add:dom[symmetric]) + apply (frule(1) misc,simp) + apply (frule(1) sameRegion,simp) + apply (simp add:mdb_next[symmetric])+ +done + +lemma irq_control_preserve_oneway: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isIRQControlCap (cteCap cte) = isIRQControlCap (cteCap cte') \ + cteMDBNode cte = cteMDBNode cte'" + shows "irq_control m \ irq_control m'" + apply (clarsimp simp:irq_control_def) + apply (frule iffD2[OF dom,OF domI]) + apply clarsimp + apply (frule(1) misc) + apply (clarsimp simp:isCap_simps) + apply (case_tac y) + apply (elim allE impE) + apply fastforce + apply clarsimp + apply (drule_tac x = p' in spec) + apply (erule impE) + apply (frule_tac x1 = p' in iffD2[OF dom,OF domI]) + apply clarsimp + apply (drule(1) misc)+ + apply (case_tac y) + apply (simp add:isCap_simps)+ + done + +lemma irq_control_preserve: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isIRQControlCap (cteCap cte) = isIRQControlCap (cteCap cte') \ + cteMDBNode cte = cteMDBNode cte'" + shows "irq_control m = irq_control m'" + apply (rule iffI[OF irq_control_preserve_oneway[OF dom misc]]) + apply (assumption)+ + apply (rule irq_control_preserve_oneway) + apply (simp add:dom misc)+ + done + +end + +locale mdb_inv_preserve = + fixes m m' + assumes dom: "\x. (x\ dom m) = (x\ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ isNullCap (cteCap cte) = isNullCap (cteCap cte') + \ isReplyCap (cteCap cte) = isReplyCap (cteCap cte') + \ (isReplyCap (cteCap cte) \ capReplyMaster (cteCap cte) = capReplyMaster (cteCap cte')) + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ capClass (cteCap cte) = capClass (cteCap cte') + \ isZombie (cteCap cte) = isZombie (cteCap cte') + \ isArchFrameCap (cteCap cte) = isArchFrameCap (cteCap cte') + \ capBits (cteCap cte) = capBits (cteCap cte') + \ RetypeDecls_H.capUntypedPtr (cteCap cte) = RetypeDecls_H.capUntypedPtr (cteCap cte') + \ capRange (cteCap cte) = capRange (cteCap cte') + \ isIRQControlCap (cteCap cte) = isIRQControlCap (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" +begin +interpretation Arch . (*FIXME: arch_split*) +lemma preserve_stuff: + "valid_dlist m = valid_dlist m' + \ ut_revocable' m = ut_revocable' m' + \ class_links m = class_links m' + \ distinct_zombies m = distinct_zombies m' + \ caps_contained' m = caps_contained' m' + \ mdb_chunked m = mdb_chunked m' + \ valid_badges m = valid_badges m' + \ untyped_mdb' m = untyped_mdb' m' + \ irq_control m = irq_control m'" + apply (intro conjI) + apply (rule valid_dlist_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule ut_revocable_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule class_links_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule distinct_zombies_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule caps_contained'_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule mdb_chunked_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule valid_badges_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule untyped_mdb'_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule irq_control_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + done + +lemma untyped_inc': + assumes subeq: "\x cte cte'. \m x =Some cte;m' x = Some cte';isUntypedCap (cteCap cte)\ \ + usableUntypedRange (cteCap cte') \ usableUntypedRange (cteCap cte)" + shows "untyped_inc' m \ untyped_inc' m'" + apply (clarsimp simp:untyped_inc'_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (rename_tac cte cte') + apply (case_tac cte) + apply (rename_tac cap node) + apply (case_tac cte') + apply (drule_tac x = cap in spec) + apply clarsimp + apply (frule_tac x = p' in misc) + apply assumption + apply (frule_tac x = p in misc) + apply assumption + apply clarsimp + apply (drule(1) subeq,simp)+ + apply (subgoal_tac "\p p'. (p' \descendants_of' p m) = (p' \ descendants_of' p m')") + apply clarsimp + apply (intro conjI impI) + apply clarsimp + apply (drule(1) disjoint_subset2[rotated],clarsimp+)+ + apply (erule disjE) + apply clarsimp+ + apply (thin_tac "P" for P)+ + apply (clarsimp simp: descendants_of'_def Invariants_H.subtree_def) + apply (rule_tac f = "\x. lfp x c" for c in arg_cong) + apply (subgoal_tac "\p p'. (m \ p \ p') = (m' \ p \ p')") + apply (rule ext)+ + apply clarsimp + apply (subgoal_tac "(m \ pa parentOf x) = (m' \ pa parentOf x)") + apply fastforce + apply (rule parentOf_preserve[OF dom]) + apply (simp add:misc sameRegion mdb_next mdb_next_rel_def)+ + done + +lemma descendants_of: + "descendants_of' p m = descendants_of' p m'" + apply (rule set_eqI) + apply (clarsimp simp:descendants_of'_def Invariants_H.subtree_def) + apply (rule_tac f = "\x. lfp x c" for c in arg_cong) + apply (rule ext)+ + apply (subgoal_tac "\p p'. (m \ p \ p') = (m' \ p \ p')") + apply clarsimp + apply (subgoal_tac "(m \ p parentOf xa) = (m' \ p parentOf xa)") + apply fastforce + apply (rule parentOf_preserve[OF dom]) + apply (simp add:misc sameRegion mdb_next mdb_next_rel_def)+ + done + +lemma by_products: + "reply_masters_rvk_fb m = reply_masters_rvk_fb m' + \ no_0 m = no_0 m' \ mdb_chain_0 m = mdb_chain_0 m' + \ valid_nullcaps m = valid_nullcaps m'" +apply (intro conjI) + apply (simp add:ran_dom reply_masters_rvk_fb_def mdb_inv_preserve_def dom misc sameRegion mdb_next) + apply (rule iffI) + apply clarsimp + apply (drule_tac x = y in bspec) + apply (rule iffD2[OF dom]) + apply clarsimp + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (clarsimp simp:misc)+ + apply (drule_tac x = y in bspec) + apply (rule iffD1[OF dom]) + apply clarsimp + apply (frule iffD1[OF dom,OF domI],rotate_tac) + apply (clarsimp simp:misc)+ + apply (clarsimp simp:no_0_def) + apply (rule ccontr) + apply (simp add:dom_in) + apply (subst (asm) dom[symmetric]) + apply fastforce + apply (rule iffI) + apply (clarsimp simp:mdb_chain_0_def) + apply (drule_tac x =x in bspec) + apply (rule iffD2[OF dom],clarsimp) + apply (erule_tac iffD1[OF connect_eqv_singleE,rotated]) + apply (cut_tac p = p in mdb_next) + apply (clarsimp simp: mdb_next_rel_def) + apply (clarsimp simp:mdb_chain_0_def) + apply (drule_tac x =x in bspec) + apply (rule iffD1[OF dom],clarsimp) + apply (erule_tac iffD1[OF connect_eqv_singleE,rotated]) + apply (cut_tac p = p in mdb_next) + apply (clarsimp simp: mdb_next_rel_def) + apply (simp add:valid_nullcaps_def) + apply (rule forall_eq,clarsimp)+ + apply (rule iffI) + apply clarsimp + apply (frule iffD2[OF dom,OF domI]) + apply (clarsimp) + apply (case_tac y) + apply (drule misc) + apply assumption + apply (clarsimp simp:isCap_simps) + apply clarsimp + apply (frule iffD1[OF dom,OF domI]) + apply (clarsimp) + apply (case_tac y) + apply (drule misc) + apply assumption + apply (clarsimp simp:isCap_simps) +done + +end + +lemma mdb_inv_preserve_modify_map: + "mdb_inv_preserve m m' \ + mdb_inv_preserve (modify_map m slot (cteMDBNode_update f)) + (modify_map m' slot (cteMDBNode_update f))" + apply (clarsimp simp:mdb_inv_preserve_def split:if_splits) + apply (intro conjI) + apply (clarsimp simp:modify_map_dom) + apply (clarsimp simp:modify_map_def split:if_splits)+ + apply (clarsimp simp:option_map_def o_def split:option.splits if_splits) + apply (drule_tac x = p in spec)+ + apply (intro allI conjI impI) + apply (clarsimp simp:mdb_next_def split:if_splits)+ + done + +lemma mdb_inv_preserve_updateCap: + "\m slot = Some cte;isUntypedCap (cteCap cte)\ \ + mdb_inv_preserve m (modify_map m slot + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap cte))))" + apply (clarsimp simp:mdb_inv_preserve_def modify_map_dom isCap_simps modify_map_def split:if_splits) + apply (intro conjI impI allI) + apply fastforce + apply (simp add:sameRegionAs_update_untyped) + apply (rule ext,simp add:sameRegionAs_update_untyped') + apply (simp add:mdb_next_def split:if_splits) + done + +lemma mdb_inv_preserve_fun_upd: + "mdb_inv_preserve m m' \ mdb_inv_preserve (m(a \ b)) (m'(a \ b))" + by (clarsimp simp:mdb_inv_preserve_def mdb_next_def split:if_splits) + +lemma updateCap_ctes_of_wp: + "\\s. P (modify_map (ctes_of s) ptr (cteCap_update (\_. cap)))\ + updateCap ptr cap + \\r s. P (ctes_of s)\" + by (rule validI, simp add: updateCap_stuff) + +lemma updateCap_cte_wp_at': + "\\s. cte_at' ptr s \ Q (cte_wp_at' (\cte. if p = ptr then P' (CTE cap (cteMDBNode cte)) else P' cte) p s)\ + updateCap ptr cap \\rv s. Q (cte_wp_at' P' p s)\" + apply (simp add:updateCap_def cte_wp_at_ctes_of) + apply (wp setCTE_ctes_of_wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, auto split: if_split) + done + +lemma updateCapFreeIndex_mdb_chain_0: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chain_0 (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (mdb_chain_0 (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_valid_badges: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_badges (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (valid_badges (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_caps_contained: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (caps_contained' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (caps_contained' (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_valid_nullcaps: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_nullcaps (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (valid_nullcaps (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_ut_revocable: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (ut_revocable'(Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (ut_revocable' (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_class_links: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (class_links (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (class_links (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_reply_masters_rvk_fb: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (reply_masters_rvk_fb (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (reply_masters_rvk_fb (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_distinct_zombies: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (distinct_zombies (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (distinct_zombies (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_mdb_chunked: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chunked (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (mdb_chunked (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_untyped_mdb': + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (untyped_mdb' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (untyped_mdb' (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_irq_control: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (irq_control (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (irq_control (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma setUntypedCapAsFull_mdb_chunked: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chunked (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (mdb_chunked (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_mdb_chunked) + apply (clarsimp simp:preserve cte_wp_at_ctes_of)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_untyped_mdb': + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (untyped_mdb' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (untyped_mdb' (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_untyped_mdb') + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_mdb_chain_0: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chain_0 (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (mdb_chain_0 (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_mdb_chain_0) + apply (clarsimp simp:preserve cte_wp_at_ctes_of)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_irq_control: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (irq_control (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (irq_control (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_irq_control) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_valid_badges: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_badges (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (valid_badges (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_valid_badges) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_caps_contained: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (caps_contained' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (caps_contained' (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_caps_contained) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_valid_nullcaps: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_nullcaps (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (valid_nullcaps (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_valid_nullcaps) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_ut_revocable: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (ut_revocable' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (ut_revocable' (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_ut_revocable) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_class_links: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (class_links(Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (class_links (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_class_links) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_distinct_zombies: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (distinct_zombies (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (distinct_zombies (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_distinct_zombies) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_reply_masters_rvk_fb: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (reply_masters_rvk_fb (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (reply_masters_rvk_fb (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_reply_masters_rvk_fb) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma modify_map_eq[simp]: + "\m slot = Some srcCTE; cap = cteCap srcCTE\ + \(modify_map m slot (cteCap_update (\_. cap))) = m" + apply (rule ext) + apply (case_tac srcCTE) + apply (auto simp:modify_map_def split:if_splits) + done + +lemma setUntypedCapAsFull_ctes: + "\\s. cte_wp_at' (\c. c = srcCTE) src s \ + P (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull (cteCap srcCTE) cap))) + \ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (ctes_of s)\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp:isCap_simps max_free_index_def maskedAsFull_def) + apply wp + apply (clarsimp simp:isCap_simps cte_wp_at_ctes_of + max_free_index_def maskedAsFull_def split:if_splits) + done + +lemma setUntypedCapAsFull_valid_cap: + "\valid_cap' cap and cte_wp_at' ((=) srcCTE) slot\ + setUntypedCapAsFull (cteCap srcCTE) c slot + \\r. valid_cap' cap\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp:updateCap_def) + apply (wp|clarsimp)+ +done + +lemma cteCap_update_simps: + "cteCap_update f srcCTE = CTE (f (cteCap srcCTE)) (cteMDBNode srcCTE)" + apply (case_tac srcCTE) + apply auto +done + +lemma setUntypedCapAsFull_cte_wp_at: + "\cte_wp_at' ((=) srcCTE) slot and + (\s. cte_wp_at' (\c. P c) dest s \ dest \ slot \ + dest = slot \ cte_wp_at' (\c. P (CTE (maskedAsFull (cteCap c) c') + (cteMDBNode c))) slot s) \ + setUntypedCapAsFull (cteCap srcCTE) c' slot + \\r s. cte_wp_at' (\c. P c) dest s\" + apply (clarsimp simp:setUntypedCapAsFull_def cte_wp_at_ctes_of split:if_splits) + apply (case_tac "dest = slot") + apply (intro conjI impI) + apply (clarsimp simp:updateCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp:maskedAsFull_def cte_wp_at_ctes_of cteCap_update_simps) + apply wp + apply (case_tac srcCTE) + apply (fastforce simp:maskedAsFull_def cte_wp_at_ctes_of) + apply (intro conjI impI) + apply (clarsimp simp:updateCap_def | wp setCTE_weak_cte_wp_at getCTE_wp)+ + apply (simp add:cte_wp_at'_def) + apply (clarsimp simp:updateCap_def | wp setCTE_weak_cte_wp_at getCTE_wp)+ + done + +lemma mdb_inv_preserve_sym:"mdb_inv_preserve a b \ mdb_inv_preserve b a" + by (simp add:mdb_inv_preserve_def) + + +lemma mdb_inv_preserve_refl[simp]: + "mdb_inv_preserve m m" + by (simp add:mdb_inv_preserve_def) + +lemma setUntypedCapAsFull_mdb[wp]: + "\\s. valid_mdb' s \ cte_wp_at' (\c. c = srcCTE) slot s \ + setUntypedCapAsFull (cteCap srcCTE) cap slot + \\rv s. valid_mdb' s\" + apply (clarsimp simp:valid_mdb'_def) + apply (wp setUntypedCapAsFull_ctes) + apply (subgoal_tac "mdb_inv_preserve (ctes_of s) (modify_map (ctes_of s) slot + (cteCap_update (\_. maskedAsFull (cteCap srcCTE) cap)))") + apply (frule mdb_inv_preserve.untyped_inc') + apply (clarsimp simp:modify_map_def max_free_index_def + maskedAsFull_def isCap_simps cte_wp_at_ctes_of + split:if_splits) + apply (clarsimp simp:valid_mdb_ctes_def mdb_inv_preserve.preserve_stuff)+ + apply (clarsimp simp:mdb_inv_preserve.by_products[OF mdb_inv_preserve_sym]) + apply (clarsimp simp:maskedAsFull_def cte_wp_at_ctes_of split:if_splits) + apply (erule(1) mdb_inv_preserve_updateCap) + done + +lemma (in mdb_insert_abs_sib) next_slot_no_parent': + "\valid_list_2 t m; finite_depth m; no_mloop m; m src = None\ + \ next_slot p t (m(dest := None)) = next_slot p t m" + by (insert next_slot_no_parent, simp add: n_def) + +lemma no_parent_next_not_child_None: + "\m p = None; finite_depth m\ \ next_not_child p t m = None" + apply(rule next_not_child_NoneI) + apply(fastforce simp: descendants_of_def cdt_parent_defs dest: tranclD2) + apply(simp add: next_sib_def) + apply(simp) + done + +lemma (in mdb_insert_abs_sib) next_slot': + "\valid_list_2 t m; finite_depth m; no_mloop m; m src = Some src_p; t src = []\ + \ next_slot p (t(src_p := list_insert_after (t src_p) src dest)) + (m(dest := Some src_p)) = + (if p = src then Some dest + else if p = dest then next_slot src t m else next_slot p t m)" + by (insert next_slot, simp add: n_def) + +lemmas valid_list_def = valid_list_2_def + +crunch valid_list[wp]: set_untyped_cap_as_full valid_list + +lemma updateMDB_the_lot': + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relations (ekheap sa) (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" "no_0 (ctes_of s')" "ekheap s = ekheap sa" + shows "ctes_of s'' = modify_map (ctes_of s') p (cteMDBNode_update f) \ + ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + ksInterruptState s'' = ksInterruptState s' \ + ksArchState s'' = ksArchState s' \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + pspace_relations (ekheap s) (kheap s) (ksPSpace s'') \ + pspace_aligned' s'' \ pspace_distinct' s'' \ + no_0 (ctes_of s'') \ + ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ + ksDomSchedule s'' = ksDomSchedule s' \ + ksCurDomain s'' = ksCurDomain s' \ + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" + apply (rule updateMDB_the_lot) + using assms + apply (fastforce simp: pspace_relations_def)+ + done + +lemma cte_map_inj_eq': + "\(cte_map p = cte_map p'); + cte_at p s \ cte_at p' s \ + valid_objs s \ pspace_aligned s \ pspace_distinct s\ + \ p = p'" + apply (rule cte_map_inj_eq; fastforce) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma cteInsert_corres: + notes split_paired_All[simp del] split_paired_Ex[simp del] + trans_state_update'[symmetric,simp] + assumes "cap_relation c c'" "src' = cte_map src" "dest' = cte_map dest" + shows "corres dc + (valid_objs and pspace_distinct and pspace_aligned and + valid_mdb and valid_list and K (src\dest) and + cte_wp_at (\c. c=Structures_A.NullCap) dest and + (\s. cte_wp_at (is_derived (cdt s) src c) src s)) + (pspace_distinct' and pspace_aligned' and valid_mdb' and valid_cap' c' and + cte_wp_at' (\c. cteCap c=NullCap) dest') + (cap_insert c src dest) + (cteInsert c' src' dest')" + (is "corres _ (?P and (\s. cte_wp_at _ _ s)) (?P' and cte_wp_at' _ _) _ _") + using assms + unfolding cap_insert_def cteInsert_def + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="cteCap rv' = NullCap" in corres_gen_asm2) + apply simp + apply (rule_tac P="?P and cte_at dest and + (\s. cte_wp_at (is_derived (cdt s) src c) src s) and + cte_wp_at ((=) src_cap) src" and + Q="?P' and + cte_wp_at' ((=) rv') (cte_map dest) and + cte_wp_at' ((=) srcCTE) (cte_map src)" + in corres_assert_assume) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply (case_tac rv') + apply (simp add: initMDBNode_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: nullPointer_def) + apply (rule corres_guard_imp) + apply (rule_tac R="\r. ?P and cte_at dest and + (\s. (is_derived (cdt s) src c) src_cap) and + cte_wp_at ((=) (masked_as_full src_cap c)) src" and + R'="\r. ?P' and cte_wp_at' ((=) rv') (cte_map dest) and + cte_wp_at' ((=) (CTE (maskedAsFull (cteCap srcCTE) c') (cteMDBNode srcCTE))) + (cte_map src)" + in corres_split[where r'=dc]) + apply (rule setUntypedCapAsFull_corres; simp) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (erule_tac valid_dlistEn[where p = "cte_map src"]) + apply (simp+)[3] + apply (clarsimp simp: corres_underlying_def state_relation_def + in_monad valid_mdb'_def valid_mdb_ctes_def) + apply (drule (1) pspace_relationsD) + apply (drule (18) set_cap_not_quite_corres) + apply (rule refl) + apply (elim conjE exE) + apply (rule bind_execI, assumption) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (erule mdb_insert_abs.intro) + apply (rule mdb_Null_None) + apply (simp add: op_equal) + apply simp + apply (rule mdb_Null_descendants) + apply (simp add: op_equal) + apply simp + apply (subgoal_tac "no_mloop (cdt a)") + prefer 2 + apply (simp add: valid_mdb_def) + apply (clarsimp simp: exec_gets update_cdt_def bind_assoc set_cdt_def + exec_get exec_put set_original_def modify_def + simp del: fun_upd_apply + | (rule bind_execI[where f="cap_insert_ext x y z i p" for x y z i p], clarsimp simp: exec_gets exec_get put_def mdb_insert_abs.cap_insert_ext_det_def2 update_cdt_list_def set_cdt_list_def, rule refl))+ + apply (clarsimp simp: put_def state_relation_def) + apply (drule updateCap_stuff) + apply clarsimp + apply (drule (3) updateMDB_the_lot', simp, simp, elim conjE) + apply (drule (3) updateMDB_the_lot', simp, simp, elim conjE) + apply (drule (3) updateMDB_the_lot', simp, simp, elim conjE) + apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def + prev_update_modify_mdb_relation) + apply (subgoal_tac "cte_map dest \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "cte_map src \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (thin_tac "ksMachineState t = p" for p t)+ + apply (thin_tac "ksCurThread t = p" for p t)+ + apply (thin_tac "ksIdleThread t = p" for p t)+ + apply (thin_tac "ksSchedulerAction t = p" for p t)+ + apply (clarsimp simp: pspace_relations_def) + + apply (rule conjI) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (rule conjI) + defer + apply(rule conjI) + apply (thin_tac "ctes_of s = t" for s t)+ + apply (thin_tac "pspace_relation s t" for s t)+ + apply (thin_tac "machine_state t = s" for s t)+ + apply (case_tac "srcCTE") + apply (rename_tac src_cap' src_node) + apply (case_tac "rv'") + apply (rename_tac dest_node) + apply (clarsimp simp: in_set_cap_cte_at_swp) + apply (subgoal_tac "cte_at src a \ is_derived (cdt a) src c src_cap") + prefer 2 + apply (fastforce simp: cte_wp_at_def) + apply (erule conjE) + apply (subgoal_tac "mdb_insert (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') src_node + (cte_map dest) NullCap dest_node") + prefer 2 + apply (rule mdb_insert.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_insert_axioms.intro) + apply (rule refl) + apply assumption + apply assumption + apply assumption + apply assumption + apply (erule (5) cte_map_inj) + apply (frule mdb_insert_der.intro) + apply (rule mdb_insert_der_axioms.intro) + apply (simp add: is_derived_eq) + apply (simp (no_asm_simp) add: cdt_relation_def split: if_split) + apply (subgoal_tac "descendants_of dest (cdt a) = {}") + prefer 2 + apply (drule mdb_insert.dest_no_descendants) + apply (fastforce simp add: cdt_relation_def) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (erule mdb_insert_abs.intro) + apply (rule mdb_None) + apply (erule(1) mdb_insert.descendants_not_dest) + apply assumption + apply assumption + apply assumption + apply(simp add: cdt_list_relation_def) + apply(subgoal_tac "no_mloop (cdt a) \ finite_depth (cdt a)") + prefer 2 + apply(simp add: finite_depth valid_mdb_def) + apply(intro conjI impI allI) + apply(simp cong: option.case_cong) + apply(simp split: option.split) + apply(subgoal_tac "\aa. cdt a src = Some aa \ src \ aa") + prefer 2 + apply(fastforce simp: no_mloop_weaken) + apply(simp add: fun_upd_twist) + apply(rule allI) + apply(case_tac "next_child src (cdt_list (a))") + apply(frule next_child_NoneD) + apply(subst mdb_insert_abs.next_slot) + apply(simp_all)[5] + apply(case_tac "ca=src") + apply(simp) + apply(clarsimp simp: modify_map_def) + apply(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(rule impI) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x=src in allE)+ + subgoal by(fastforce) + apply(simp) + apply(rule impI) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + + apply(frule(1) next_childD) + apply(simp add: mdb_insert_abs.next_slot) + apply(case_tac "ca=src") + apply(simp) + apply(clarsimp simp: modify_map_def) + subgoal by(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(rule impI) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x=src in allE)+ + subgoal by(fastforce) + apply(simp) + apply(rule impI) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + + apply(subgoal_tac "mdb_insert_sib (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') + src_node (cte_map dest) capability.NullCap dest_node c'") + prefer 2 + apply(simp add: mdb_insert_sib_def) + apply(rule mdb_insert_sib_axioms.intro) + apply (subst can_be_is [symmetric]) + apply simp + apply (rule cap_relation_masked_as_full) + apply (simp+)[3] + apply simp + apply simp + apply simp + apply (subst (asm) is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (subst is_derived_eq[symmetric]; assumption) + apply assumption + subgoal by (clarsimp simp: cte_wp_at_def is_derived_def is_cap_simps cap_master_cap_simps + dest!:cap_master_cap_eqDs) + apply (subgoal_tac "is_original_cap a src = mdbRevocable src_node") + apply (frule(4) iffD1[OF is_derived_eq]) + apply (drule_tac src_cap' = src_cap' in + maskedAsFull_revokable[where a = c',symmetric]) + subgoal by(simp) + apply (simp add: revokable_relation_def) + apply (erule_tac x=src in allE)+ + apply simp + apply (erule impE) + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state split: if_splits) + subgoal by (clarsimp simp: masked_as_full_def is_cap_simps free_index_update_def split: if_splits) + apply(simp) + + apply(subgoal_tac "cdt_list (a) src = []") + prefer 2 + apply(rule ccontr) + apply(simp add: empty_list_empty_desc) + apply(simp add: no_children_empty_desc[symmetric]) + apply(erule exE) + apply(drule_tac p="cte_map caa" in mdb_insert_sib.src_no_parent) + apply(subgoal_tac "cte_map caa\descendants_of' (cte_map src) (ctes_of b)") + subgoal by(simp add: descendants_of'_def) + apply(simp add: cdt_relation_def) + apply(erule_tac x=src in allE) + apply(drule child_descendant)+ + apply(drule_tac x=caa and f=cte_map in imageI) + subgoal by(simp) + + apply(case_tac "cdt a src") + apply(simp) + apply(subst mdb_insert_abs_sib.next_slot_no_parent') + apply(simp add: mdb_insert_abs_sib_def) + apply(simp_all add: fun_upd_idem)[5] + + apply(case_tac "ca=src") + subgoal by(simp add: next_slot_def no_parent_next_not_child_None) + apply(case_tac "ca = dest") + subgoal by(simp add: next_slot_def no_parent_next_not_child_None + mdb_insert_abs.dest empty_list_empty_desc) + apply(case_tac "next_slot ca (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + + apply(simp add: fun_upd_idem) + apply(subst mdb_insert_abs_sib.next_slot') + subgoal by(simp add: mdb_insert_abs_sib_def) + apply(simp_all)[5] + apply(case_tac "ca=src") + apply(clarsimp simp: modify_map_def) + subgoal by(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(case_tac "next_slot src (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x=src in allE)+ + subgoal by(fastforce) + apply(simp) + apply(case_tac "next_slot ca (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply (thin_tac "ctes_of t = t'" for t t')+ + apply (clarsimp simp: modify_map_apply) + apply (clarsimp simp: revokable_relation_def split: if_split) + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "mdbRevocable node = isCapRevocable c' (cteCap srcCTE)") + prefer 2 + apply (case_tac rv') + subgoal by (clarsimp simp add: const_def modify_map_def split: if_split_asm) + apply simp + apply (rule is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (drule(3) is_derived_eq[THEN iffD1,rotated -1]) + subgoal by (simp add: cte_wp_at_def) + apply assumption + apply assumption + subgoal by (clarsimp simp: cap_master_cap_simps cte_wp_at_def is_derived_def is_cap_simps + split:if_splits dest!:cap_master_cap_eqDs) + apply clarsimp + apply (case_tac srcCTE) + apply (case_tac rv') + apply clarsimp + apply (subgoal_tac "\cap' node'. ctes_of b (cte_map (aa,bb)) = Some (CTE cap' node')") + prefer 2 + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (case_tac z) + subgoal by clarsimp + apply clarsimp + apply (drule set_cap_caps_of_state_monad)+ + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp: cte_wp_at_caps_of_state null_filter_def split: if_splits) + + apply clarsimp + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (subgoal_tac "mdbRevocable node = mdbRevocable node'") + subgoal by clarsimp + apply (subgoal_tac "cte_map (aa,bb) \ cte_map dest") + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply (erule (5) cte_map_inj) + apply (wp set_untyped_cap_full_valid_objs set_untyped_cap_as_full_valid_mdb + set_untyped_cap_as_full_cte_wp_at setUntypedCapAsFull_valid_cap + setUntypedCapAsFull_cte_wp_at | clarsimp simp: cte_wp_at_caps_of_state| wps)+ + apply (case_tac rv',clarsimp simp:cte_wp_at_ctes_of maskedAsFull_def) + apply (wp getCTE_wp' get_cap_wp)+ + apply clarsimp + subgoal by (fastforce elim: cte_wp_at_weakenE) + apply (clarsimp simp: cte_wp_at'_def) + apply (thin_tac "ctes_of s = t" for s t)+ + apply (thin_tac "pspace_relation s t" for s t)+ + apply (thin_tac "machine_state t = s" for s t)+ + apply (case_tac "srcCTE") + apply (rename_tac src_cap' src_node) + apply (case_tac "rv'") + apply (rename_tac dest_node) + apply (clarsimp simp: in_set_cap_cte_at_swp) + apply (subgoal_tac "cte_at src a \ is_derived (cdt a) src c src_cap") + prefer 2 + subgoal by (fastforce simp: cte_wp_at_def) + apply (erule conjE) + apply (subgoal_tac "mdb_insert (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') src_node + (cte_map dest) NullCap dest_node") + prefer 2 + apply (rule mdb_insert.intro) + apply (rule mdb_ptr.intro) + subgoal by (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + subgoal by (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_insert_axioms.intro) + apply (rule refl) + apply assumption + apply assumption + apply assumption + apply assumption + apply (erule (5) cte_map_inj) + apply (frule mdb_insert_der.intro) + apply (rule mdb_insert_der_axioms.intro) + apply (simp add: is_derived_eq) + apply (simp (no_asm_simp) add: cdt_relation_def split: if_split) + apply (subgoal_tac "descendants_of dest (cdt a) = {}") + prefer 2 + apply (drule mdb_insert.dest_no_descendants) + subgoal by (fastforce simp add: cdt_relation_def simp del: split_paired_All) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (erule mdb_insert_abs.intro) + apply (rule mdb_None) + apply (erule(1) mdb_insert.descendants_not_dest) + apply assumption + apply assumption + apply assumption + apply (rule conjI) + apply (intro impI allI) + apply (unfold const_def) + apply (frule(4) iffD1[OF is_derived_eq]) + apply (drule_tac src_cap' = src_cap' in + maskedAsFull_revokable[where a = c',symmetric]) + apply simp + apply (subst mdb_insert_child.descendants) + apply (rule mdb_insert_child.intro) + apply simp + apply (rule mdb_insert_child_axioms.intro) + apply (subst can_be_is [symmetric]) + apply simp + apply (rule cap_relation_masked_as_full) + apply (simp+)[3] + apply simp + apply simp + apply (subst (asm) is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (subst is_derived_eq[symmetric], assumption, assumption, + assumption, assumption, assumption) + apply assumption + subgoal by (clarsimp simp: cte_wp_at_def is_derived_def is_cap_simps cap_master_cap_simps + dest!:cap_master_cap_eqDs) + apply (subgoal_tac "is_original_cap a src = mdbRevocable src_node") + prefer 2 + apply (simp add: revokable_relation_def del: split_paired_All) + apply (erule_tac x=src in allE) + apply (erule impE) + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state cap_master_cap_simps + split: if_splits dest!:cap_master_cap_eqDs) + subgoal by (clarsimp simp: masked_as_full_def is_cap_simps free_index_update_def split: if_splits) + subgoal by simp + subgoal by clarsimp + apply (subst mdb_insert_abs.descendants_child, assumption) + apply (frule_tac p=ca in in_set_cap_cte_at) + apply (subst descendants_of_eq') + prefer 2 + apply assumption + apply (simp_all)[6] + apply (simp add: cdt_relation_def split: if_split del: split_paired_All) + apply clarsimp + apply (drule (5) cte_map_inj)+ + apply simp + apply clarsimp + apply (subst mdb_insert_abs_sib.descendants, erule mdb_insert_abs_sib.intro) + apply (frule(4) iffD1[OF is_derived_eq]) + apply (drule_tac src_cap' = src_cap' in maskedAsFull_revokable[where a = c',symmetric]) + apply simp + apply (subst mdb_insert_sib.descendants) + apply (rule mdb_insert_sib.intro, assumption) + apply (rule mdb_insert_sib_axioms.intro) + apply (subst can_be_is [symmetric]) + apply simp + apply (rule cap_relation_masked_as_full) + apply (simp+)[3] + apply simp + apply simp + apply simp + apply (subst (asm) is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (subst is_derived_eq[symmetric], assumption, assumption, + assumption, assumption, assumption) + apply assumption + subgoal by (clarsimp simp: cte_wp_at_def is_derived_def is_cap_simps cap_master_cap_simps + dest!:cap_master_cap_eqDs) + apply (subgoal_tac "is_original_cap a src = mdbRevocable src_node") + subgoal by simp + apply (simp add: revokable_relation_def del: split_paired_All) + apply (erule_tac x=src in allE) + apply (erule impE) + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state split: if_splits) + subgoal by (clarsimp simp: masked_as_full_def is_cap_simps free_index_update_def split: if_splits) + subgoal by simp + apply (simp split: if_split) + apply (frule_tac p="(aa, bb)" in in_set_cap_cte_at) + apply (rule conjI) + apply (clarsimp simp: descendants_of_eq') + subgoal by (simp add: cdt_relation_def del: split_paired_All) + apply (clarsimp simp: descendants_of_eq') + subgoal by (simp add: cdt_relation_def del: split_paired_All) + done + + +declare if_split [split] + +lemma updateCap_no_0: + "\\s. no_0 (ctes_of s)\ updateCap cap ptr \\_ s. no_0 (ctes_of s)\" + apply (simp add: updateCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of no_0_def) + done + +lemma revokable_relation_prev [simp]: + "revokable_relation revo cs (modify_map m p (cteMDBNode_update (mdbPrev_update f))) = + revokable_relation revo cs m" + apply (rule iffI) + apply (clarsimp simp add: revokable_relation_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (clarsimp simp add: revokable_relation_def modify_map_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (case_tac z) + apply auto + done + +lemma revokable_relation_next [simp]: + "revokable_relation revo cs (modify_map m p (cteMDBNode_update (mdbNext_update f))) = + revokable_relation revo cs m" + apply (rule iffI) + apply (clarsimp simp add: revokable_relation_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (clarsimp simp add: revokable_relation_def modify_map_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (case_tac z) + apply auto + done + +lemma revokable_relation_cap [simp]: + "revokable_relation revo cs (modify_map m p (cteCap_update f)) = + revokable_relation revo cs m" + apply (rule iffI) + apply (clarsimp simp add: revokable_relation_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (clarsimp simp add: revokable_relation_def modify_map_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (case_tac z) + apply auto + done + +lemma mdb_cap_update: + "cteMDBNode_update f (cteCap_update f' x) = + cteCap_update f' (cteMDBNode_update f x)" + by (cases x) simp + +lemmas modify_map_mdb_cap = + modify_map_com [where f="cteMDBNode_update f" and + g="cteCap_update f'" for f f', + OF mdb_cap_update] + +lemma prev_leadstoD: + "\ m \ mdbPrev node \ c; m p = Some (CTE cap node); + valid_dlist m; no_0 m \ \ + c = p" + by (fastforce simp add: next_unfold' valid_dlist_def Let_def no_0_def) + +lemma prev_leadstoI: + "\ m p = Some (CTE cap node); mdbPrev node \ 0; valid_dlist m\ + \ m \ mdbPrev node \ p" + by (fastforce simp add: valid_dlist_def Let_def next_unfold') + +lemma mdb_next_modify_prev: + "modify_map m x (cteMDBNode_update (mdbPrev_update f)) \ a \ b = m \ a \ b" + by (auto simp add: next_unfold' modify_map_def) + +lemma mdb_next_modify_revocable: + "modify_map m x (cteMDBNode_update (mdbRevocable_update f)) \ a \ b = m \ a \ b" + by (auto simp add: next_unfold' modify_map_def) + +lemma mdb_next_modify_cap: + "modify_map m x (cteCap_update f) \ a \ b = m \ a \ b" + by (auto simp add: next_unfold' modify_map_def) + +lemmas mdb_next_modify [simp] = + mdb_next_modify_prev + mdb_next_modify_revocable + mdb_next_modify_cap + +lemma in_getCTE: + "(cte, s') \ fst (getCTE p s) \ s' = s \ cte_wp_at' ((=) cte) p s" + apply (frule in_inv_by_hoareD [OF getCTE_inv]) + apply (drule use_valid [OF _ getCTE_cte_wp_at], rule TrueI) + apply (simp add: cte_wp_at'_def) + done + +lemma isMDBParentOf_eq_parent: + "\ isMDBParentOf c cte; + weak_derived' (cteCap c) (cteCap c'); + mdbRevocable (cteMDBNode c') = mdbRevocable (cteMDBNode c) \ + \ isMDBParentOf c' cte" + apply (cases c) + apply (cases c') + apply (cases cte) + apply clarsimp + apply (clarsimp simp: weak_derived'_def isMDBParentOf_CTE) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps) + done + +lemma isMDBParentOf_eq_child: + "\ isMDBParentOf cte c; + weak_derived' (cteCap c) (cteCap c'); + mdbFirstBadged (cteMDBNode c') = mdbFirstBadged (cteMDBNode c) \ + \ isMDBParentOf cte c'" + apply (cases c) + apply (cases c') + apply (cases cte) + apply clarsimp + apply (clarsimp simp: weak_derived'_def isMDBParentOf_CTE) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps) + done + +lemma isMDBParentOf_eq: + "\ isMDBParentOf c d; + weak_derived' (cteCap c) (cteCap c'); + mdbRevocable (cteMDBNode c') = mdbRevocable (cteMDBNode c); + weak_derived' (cteCap d) (cteCap d'); + mdbFirstBadged (cteMDBNode d') = mdbFirstBadged (cteMDBNode d) \ + \ isMDBParentOf c' d'" + apply (drule (2) isMDBParentOf_eq_parent) + apply (erule (2) isMDBParentOf_eq_child) + done + +lemma weak_derived_refl' [intro!, simp]: + "weak_derived' c c" + by (simp add: weak_derived'_def) + +lemma weak_derived_sym': + "weak_derived' c d \ weak_derived' d c" + by (clarsimp simp: weak_derived'_def isCap_simps) +end +locale mdb_swap = + mdb_ptr_src?: mdb_ptr m _ _ src src_cap src_node + + mdb_ptr_dest?: mdb_ptr m _ _ dest dest_cap dest_node + for m src src_cap src_node dest dest_cap dest_node + + + assumes neq: "src \ dest" + + fixes scap dcap + + assumes src_derived: "weak_derived' src_cap scap" + assumes dest_derived: "weak_derived' dest_cap dcap" + + fixes n' + defines "n' \ + modify_map + (modify_map + (modify_map + (modify_map m src (cteCap_update (\_. dcap))) + dest + (cteCap_update (\_. scap))) + (mdbPrev src_node) + (cteMDBNode_update (mdbNext_update (\_. dest)))) + (mdbNext src_node) + (cteMDBNode_update (mdbPrev_update (\_. dest)))" + + fixes dest2 + assumes dest2: "n' dest = Some dest2" + + fixes n + defines "n \ + (modify_map + (modify_map + (modify_map + (modify_map n' + src (cteMDBNode_update (const (cteMDBNode dest2)))) + dest (cteMDBNode_update (const src_node))) + (mdbPrev (cteMDBNode dest2)) (cteMDBNode_update (mdbNext_update (\_. src)))) + (mdbNext (cteMDBNode dest2)) (cteMDBNode_update (mdbPrev_update (\_. src))))" + +begin + +lemma no_0_n' [intro!]: "no_0 n'" by (auto simp: n'_def) +lemma no_0_n [intro!]: "no_0 n" by (auto simp: n_def) + +lemmas n_0_simps [iff] = no_0_simps [OF no_0_n] +lemmas n'_0_simps [iff] = no_0_simps [OF no_0_n'] + +lemmas neqs [simp] = neq neq [symmetric] + +lemma src: "m src = Some (CTE src_cap src_node)" .. +lemma dest: "m dest = Some (CTE dest_cap dest_node)" .. + +lemma src_prev: + "\ mdbPrev src_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbNext node = src" + using src + apply - + apply (erule dlistEp, simp) + apply (case_tac cte') + apply simp + done + +lemma src_next: + "\ mdbNext src_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbPrev node = src" + using src + apply - + apply (erule dlistEn, simp) + apply (case_tac cte') + apply simp + done + +lemma dest_prev: + "\ mdbPrev dest_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbNext node = dest" + using dest + apply - + apply (erule dlistEp, simp) + apply (case_tac cte') + apply simp + done + +lemma dest_next: + "\ mdbNext dest_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbPrev node = dest" + using dest + apply - + apply (erule dlistEn, simp) + apply (case_tac cte') + apply simp + done + +lemma next_dest_prev_src [simp]: + "(mdbNext dest_node = src) = (mdbPrev src_node = dest)" + apply (rule iffI) + apply (drule dest_next, simp) + apply (clarsimp simp: src) + apply (drule src_prev, simp) + apply (clarsimp simp: dest) + done + +lemmas next_dest_prev_src_sym [simp] = next_dest_prev_src [THEN x_sym] + +lemma prev_dest_next_src [simp]: + "(mdbPrev dest_node = src) = (mdbNext src_node = dest)" + apply (rule iffI) + apply (drule dest_prev, simp) + apply (clarsimp simp: src) + apply (drule src_next, simp) + apply (clarsimp simp: dest) + done + +lemmas prev_dest_next_src_sym [simp] = prev_dest_next_src [THEN x_sym] + +lemma revokable_n': + "\ n' p = Some (CTE cap node) \ \ + \cap' node'. m p = Some (CTE cap' node') \ mdbRevocable node = mdbRevocable node'" + by (fastforce simp add: n'_def elim!: modify_map_casesE) + +lemma badge_n': + "\ n' p = Some (CTE cap node) \ \ + \cap' node'. m p = Some (CTE cap' node') \ mdbFirstBadged node = mdbFirstBadged node'" + by (fastforce simp add: n'_def elim!: modify_map_casesE) + +lemma cteMDBNode_update_split_asm: + "P (cteMDBNode_update f cte) = (\ (\cap mdb. cte = CTE cap mdb \ \ P (CTE cap (f mdb))))" + by (cases cte, simp) + +lemma revokable: + "n p = Some (CTE cap node) \ + if p = src then mdbRevocable node = mdbRevocable dest_node + else if p = dest then mdbRevocable node = mdbRevocable src_node + else \cap' node'. m p = Some (CTE cap' node') \ + mdbRevocable node = mdbRevocable node'" + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply (clarsimp simp: n_def) + apply (clarsimp simp add: modify_map_def map_option_case split: if_split_asm option.splits) + apply (auto split: cteMDBNode_update_split_asm elim: revokable_n' revokable_n'[OF sym]) + done + +lemma badge_n: + "n p = Some (CTE cap node) \ + if p = src then mdbFirstBadged node = mdbFirstBadged dest_node + else if p = dest then mdbFirstBadged node = mdbFirstBadged src_node + else \cap' node'. m p = Some (CTE cap' node') \ + mdbFirstBadged node = mdbFirstBadged node'" + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply (clarsimp simp: n_def) + apply (clarsimp simp add: modify_map_def map_option_case split: if_split_asm option.splits) + apply (auto split: cteMDBNode_update_split_asm elim: badge_n' badge_n'[OF sym]) + done + +lemma n'_cap: + "n' p = Some (CTE cap node) \ + if p = src then cap = dcap + else if p = dest then cap = scap + else \node'. m p = Some (CTE cap node')" + apply clarsimp + apply (rule conjI) + apply (fastforce simp add: n'_def modify_map_cases) + apply clarsimp + apply (rule conjI) + apply (fastforce simp add: n'_def modify_map_cases) + apply clarsimp + apply (simp add: n'_def modify_map_cases) + apply fastforce + done + +lemma n_cap: + "n p = Some (CTE cap node) \ + if p = src then cap = dcap + else if p = dest then cap = scap + else \node'. m p = Some (CTE cap node')" + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def split: if_split_asm) + apply clarsimp + apply (insert src dest dest2) + apply (clarsimp simp: n_def modify_map_cases) + apply (auto dest: n'_cap) + done + +lemma dest2_cap [simp]: + "cteCap dest2 = scap" + using dest2 by (cases dest2) (simp add: n'_cap) + +lemma n'_next: + "n' p = Some (CTE cap node) \ + if p = mdbPrev src_node then mdbNext node = dest + else \cap' node'. m p = Some (CTE cap' node') \ mdbNext node = mdbNext node'" + apply (simp add: n'_def) + apply (rule conjI) + apply clarsimp + apply (simp add: modify_map_cases) + apply clarsimp + apply clarsimp + apply (auto simp add: modify_map_cases) + done + +lemma dest2_next: + "mdbNext (cteMDBNode dest2) = + (if dest = mdbPrev src_node then dest else mdbNext dest_node)" + using dest2 dest by (cases dest2) (clarsimp dest!: n'_next) + +lemma n'_prev: + "n' p = Some (CTE cap node) \ + if p = mdbNext src_node then mdbPrev node = dest + else \cap' node'. m p = Some (CTE cap' node') \ mdbPrev node = mdbPrev node'" + apply (simp add: n'_def) + apply (rule conjI) + apply clarsimp + apply (simp add: modify_map_cases) + apply clarsimp + apply clarsimp + by (auto simp add: modify_map_cases) + +lemma dest2_prev: + "mdbPrev (cteMDBNode dest2) = + (if dest = mdbNext src_node then dest else mdbPrev dest_node)" + using dest2 dest by (cases dest2) (clarsimp dest!: n'_prev) + +lemma dest2_rev [simp]: + "mdbRevocable (cteMDBNode dest2) = mdbRevocable dest_node" + using dest2 dest by (cases dest2) (clarsimp dest!: revokable_n') + +lemma dest2_bdg [simp]: + "mdbFirstBadged (cteMDBNode dest2) = mdbFirstBadged dest_node" + using dest2 dest by (cases dest2) (clarsimp dest!: badge_n') + +definition + "dest2_node \ MDB (if dest = mdbPrev src_node then dest else mdbNext dest_node) + (if dest = mdbNext src_node then dest else mdbPrev dest_node) + (mdbRevocable dest_node) + (mdbFirstBadged dest_node)" + +lemma dest2_parts [simp]: + "dest2 = CTE scap dest2_node" + unfolding dest2_node_def + apply (subst dest2_prev [symmetric]) + apply (subst dest2_next [symmetric]) + apply (subst dest2_rev [symmetric]) + apply (subst dest2_bdg [symmetric]) + apply (subst dest2_cap [symmetric]) + apply (cases dest2) + apply (rename_tac mdbnode) + apply (case_tac mdbnode) + apply (simp del: dest2_cap) + done + +lemma prev_dest_src [simp]: + "(mdbPrev dest_node = mdbPrev src_node) = (mdbPrev dest_node = 0 \ mdbPrev src_node = 0)" + apply (subst mdb_ptr.p_prev_eq) + apply (rule mdb_ptr_axioms) + apply rule + apply simp + done + +lemmas prev_dest_src_sym [simp] = prev_dest_src [THEN x_sym] + +lemma next_dest_src [simp]: + "(mdbNext dest_node = mdbNext src_node) = (mdbNext dest_node = 0 \ mdbNext src_node = 0)" + apply (subst mdb_ptr.p_next_eq) + apply (rule mdb_ptr_axioms) + apply rule + apply simp + done + +lemmas next_dest_src_sym [simp] = next_dest_src [THEN x_sym] + +definition + s_d_swp :: "machine_word \ machine_word" +where + "s_d_swp p \ s_d_swap p src dest" + +declare s_d_swp_def [simp] + + +lemma n_exists: + "m p = Some (CTE cap node) \ \cap' node'. n p = Some (CTE cap' node')" + apply (simp add: n_def n'_def) + apply (intro modify_map_exists) + apply simp + done + +lemma m_exists: + "n p = Some (CTE cap node) \ \cap' node'. m p = Some (CTE cap' node')" + apply (simp add: n_def n'_def) + apply (drule modify_map_exists_rev, clarsimp)+ + done + +lemma next_src_node [simp]: + "(m (mdbNext src_node) = Some (CTE cap src_node)) = False" + apply clarsimp + apply (subgoal_tac "m \ mdbNext src_node \ mdbNext src_node") + apply simp + apply (simp add: mdb_next_unfold) + done + +lemma mdbNext_update_self [simp]: + "(mdbNext_update (\_. x) node = node) = (mdbNext node = x)" + by (cases node) auto + +lemmas p_next_eq_src = mdb_ptr_src.p_next_eq + +lemma next_m_n: + shows "m \ p \ p' = n \ s_d_swp p \ s_d_swp p'" + using src dest + apply (simp add: n_def n'_def modify_map_mdb_cap const_def) + apply (simp add: s_d_swap_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def + split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases) + apply (auto simp add: dest2_node_def split: if_split_asm)[1] + apply clarsimp + apply (simp add: mdb_next_unfold modify_map_cases) + apply (simp add: dest2_node_def const_def) + apply (intro impI) + apply (rule conjI, clarsimp) + apply (rule iffI) + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI) + apply clarsimp + apply (rule_tac x="CTE dest_cap (mdbNext_update (\_. src) src_node)" + in exI) + apply simp + apply (rule_tac x=dest_node in exI) + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def + split: if_split_asm) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (rule_tac x="CTE dest_cap src_node" in exI) + apply simp + apply (case_tac "mdbPrev src_node = dest") + apply clarsimp + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI, clarsimp) + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply clarsimp + apply (frule p_next_eq_src [where p'=p]) + apply simp + apply (case_tac "mdbNext src_node = 0", simp) + apply simp + apply (rule allI) + apply (rule disjCI2) + apply simp + apply (erule disjE) + apply clarsimp + apply (rule disjCI2) + apply (clarsimp del: notI) + apply (rule notI) + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply clarsimp + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply clarsimp + apply (case_tac "mdbPrev dest_node = p") + apply simp + apply (frule dest_prev, clarsimp) + apply (elim exE conjE) + apply simp + apply (case_tac "mdbNext src_node = p") + apply fastforce + apply fastforce + apply (subgoal_tac "dest \ mdbNext node") + prefer 2 + apply (rule notI) + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply simp + apply (rule allI) + apply (cases "mdbNext src_node = p") + apply simp + apply (subgoal_tac "mdbPrev src_node \ p") + prefer 2 + apply clarsimp + apply simp + apply (subgoal_tac "src \ mdbNext node") + apply clarsimp + apply (rule notI) + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply simp + apply (subgoal_tac "src \ mdbPrev node") + prefer 2 + apply (rule notI) + apply (erule dlistEp [where p=p]) + apply clarsimp + apply clarsimp + apply (rule disjCI2) + apply simp + apply (erule disjE) + apply clarsimp + apply simp + apply (rule disjCI) + apply simp + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: mdb_next_unfold modify_map_cases dest2_node_def) + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply simp + apply (rule conjI) + apply (rule impI) + apply simp + apply (rule iffI) + apply simp + apply (rule dlistEn [where p=p], assumption, clarsimp) + apply clarsimp + apply (elim exE conjE) + apply (case_tac "mdbPrev src_node = p") + apply simp + apply (drule src_prev, clarsimp) + apply clarsimp + apply clarsimp + apply (drule p_next_eq_src [where p'=p]) + apply simp + apply clarsimp + apply (rule iffI) + apply simp + apply (subgoal_tac "mdbPrev src_node = p") + prefer 2 + apply (erule dlistEn [where p=p], clarsimp) + apply clarsimp + apply fastforce + apply (elim exE conjE) + apply simp + apply (case_tac "mdbPrev dest_node = p") + apply (frule dest_prev) + apply clarsimp + apply hypsubst_thin + apply clarsimp + apply simp + apply (case_tac "mdbNext src_node = p") + apply simp + apply (elim exE conjE) + apply (frule src_next, clarsimp) + apply simp + apply (case_tac "mdbPrev src_node = p") + apply clarsimp + apply (subgoal_tac "mdbNext (cteMDBNode z) = mdbNext node") + prefer 2 + apply (case_tac nodea) + apply (case_tac z) + apply (rename_tac capability mdbnode) + apply (case_tac mdbnode) + apply clarsimp + apply simp + apply (rule dlistEn [where p=p], assumption, clarsimp) + apply clarsimp + apply simp + apply (case_tac "mdbPrev src_node = p") + apply simp + apply (frule src_prev, clarsimp) + apply simp + apply simp + apply (rule dlistEn [where p=p], assumption, clarsimp) + apply clarsimp + apply clarsimp + apply (simp add: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI) + apply (rule impI) + apply simp + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply simp + apply (case_tac "mdbPrev src_node \ p") + apply simp + apply simp + apply (frule src_prev, clarsimp) + apply simp + apply clarsimp + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply simp + apply (case_tac "mdbPrev dest_node = p") + apply simp + apply (frule dest_prev, clarsimp) + apply clarsimp + apply simp + apply (case_tac "mdbPrev src_node = p") + apply simp + apply (frule src_prev, clarsimp) + apply fastforce + apply simp + apply (case_tac "mdbNext src_node = p") + apply simp + apply simp + done + +lemma n_next: + "n p = Some (CTE cap node) \ + if p = dest then + (if mdbNext src_node = dest then mdbNext node = src + else mdbNext node = mdbNext src_node) + else if p = src then + (if mdbNext dest_node = src then mdbNext node = dest + else mdbNext node = mdbNext dest_node) + else if p = mdbPrev src_node then mdbNext node = dest + else if p = mdbPrev dest_node then mdbNext node = src + else \cap' node'. m p = Some (CTE cap' node') \ mdbNext node = mdbNext node'" + apply (simp add: n_def del: dest2_parts split del: if_split) + apply (simp only: dest2_next dest2_prev split del: if_split) + apply (simp add: dest2_node_def split del: if_split) + apply (simp add: n'_def const_def cong: if_cong split del: if_split) + apply(case_tac "p=dest") + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(case_tac "p=src") + apply(simp split del: if_split) + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(case_tac "p=mdbPrev src_node") + apply(simp split del: if_split) + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(fastforce) + apply(fastforce) + apply(case_tac "p=mdbPrev dest_node") + apply(simp split del: if_split) + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(fastforce) + apply(simp split del: if_split) + apply (clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(fastforce)+ + done + +lemma parent_of_m_n: + "m \ p parentOf c = + n \ s_d_swp p parentOf s_d_swp c" + apply (clarsimp simp add: parentOf_def) + apply (rule iffI) + apply clarsimp + apply (case_tac cte, case_tac cte') + apply (rename_tac cap0 node0 cap1 node1) + apply clarsimp + apply (subgoal_tac "\cap0' node0'. n (s_d_swap c src dest) = Some (CTE cap0' node0')") + prefer 2 + apply (simp add: s_d_swap_def) + apply (fastforce intro: n_exists) + apply (subgoal_tac "\cap1' node1'. n (s_d_swap p src dest) = Some (CTE cap1' node1')") + prefer 2 + apply (simp add: s_d_swap_def) + apply (fastforce intro: n_exists) + apply clarsimp + apply (insert src_derived dest_derived)[1] + apply (erule isMDBParentOf_eq) + apply simp + apply (drule n_cap)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule revokable)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule n_cap)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule badge_n)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply clarsimp + apply (case_tac cte, case_tac cte') + apply (rename_tac cap0 node0 cap1 node1) + apply clarsimp + apply (subgoal_tac "\cap0' node0' cap1' node1'. + m c = Some (CTE cap0' node0') \ + m p = Some (CTE cap1' node1')") + prefer 2 + apply (drule m_exists)+ + apply clarsimp + apply (simp add: s_d_swap_def src dest split: if_split_asm) + apply clarsimp + apply (insert src_derived dest_derived)[1] + apply (erule isMDBParentOf_eq) + apply simp + apply (rule weak_derived_sym') + apply (drule n_cap)+ + apply (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule revokable)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (rule weak_derived_sym') + apply (drule n_cap)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule badge_n)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + done + +lemma parency_m_n: + assumes "m \ p \ p'" + shows "n \ s_d_swp p \ s_d_swp p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (subst (asm) next_m_n, assumption) + apply simp + apply (subst (asm) parent_of_m_n, assumption) + done +next + case (trans_parent c c') + thus ?case + apply - + apply (erule subtree.trans_parent) + apply (subst (asm) next_m_n, assumption) + apply simp + apply (subst (asm) parent_of_m_n, assumption) + done +qed + +lemma parency_n_m: + assumes "n \ p \ p'" + shows "m \ s_d_swp p \ s_d_swp p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (subst next_m_n, simp) + apply simp + apply (subst parent_of_m_n, simp) + done +next + case (trans_parent c c') + thus ?case + apply - + apply (erule subtree.trans_parent) + apply (subst next_m_n, simp) + apply simp + apply (subst parent_of_m_n, simp) + done +qed + +lemma parency: + "n \ p \ p' = m \ s_d_swp p \ s_d_swp p'" + apply (rule iffI) + apply (erule parency_n_m) + apply (drule parency_m_n) + apply simp + done + +lemma descendants: + "descendants_of' p n = + (let swap = \S. S - {src,dest} \ + (if src \ S then {dest} else {}) \ + (if dest \ S then {src} else {}) in + swap (descendants_of' (s_d_swp p) m))" + apply (simp add: Let_def parency descendants_of'_def s_d_swap_def) + apply auto + done + +end + +lemma inj_on_descendants_cte_map: + "\ valid_mdb s; + valid_objs s; pspace_distinct s; pspace_aligned s \ \ + inj_on cte_map (descendants_of p (cdt s))" + apply (clarsimp simp add: inj_on_def) + apply (drule (1) descendants_of_cte_at)+ + apply (drule (5) cte_map_inj_eq) + apply simp + done + +lemmas revokable_relation_simps [simp del] = + revokable_relation_cap revokable_relation_next revokable_relation_prev + +declare if_split [split del] + +(* +lemma corres_bind_ext: +"corres_underlying srel nf rrel G G' g (g') \ +corres_underlying srel nf rrel G G' (do do_extended_op (return ()); g od) (g')" + apply (simp add: corres_underlying_def do_extended_op_def return_def gets_def get_def put_def bind_def select_f_def modify_def mk_ef_def wrap_ext_op_det_ext_ext_def wrap_ext_op_unit_def) + done +*) + +(* consider putting in AINVS or up above cteInsert_corres *) +lemma next_slot_eq: + "\next_slot p t' m' = x; t' = t; m' = m\ \ next_slot p t m = x" + by simp + +lemma inj_on_image_set_diff15 : (* for compatibility of assumptions *) + "\inj_on f C; A \ C; B \ C\ \ f ` (A - B) = f ` A - f ` B" +by (rule inj_on_image_set_diff; auto) + +lemma cteSwap_corres: + assumes srcdst: "src' = cte_map src" "dest' = cte_map dest" + assumes scr: "cap_relation scap scap'" + assumes dcr: "cap_relation dcap dcap'" + assumes wf_caps: "wellformed_cap scap" "wellformed_cap dcap" + notes trans_state_update'[symmetric,simp] + shows "corres dc + (valid_objs and pspace_aligned and pspace_distinct and + valid_mdb and valid_list and + (\s. cte_wp_at (weak_derived scap) src s \ + cte_wp_at (weak_derived dcap) dest s \ + src \ dest \ (\cap. tcb_cap_valid cap src s) + \ (\cap. tcb_cap_valid cap dest s))) + (valid_mdb' and pspace_aligned' and pspace_distinct' and + (\s. cte_wp_at' (weak_derived' scap' o cteCap) src' s \ + cte_wp_at' (weak_derived' dcap' o cteCap) dest' s)) + (cap_swap scap src dcap dest) (cteSwap scap' src' dcap' dest')" + (is "corres _ ?P ?P' _ _") using assms including no_pre + supply None_upd_eq[simp del] + apply (unfold cap_swap_def cteSwap_def) + apply (cases "src=dest") + apply (rule corres_assume_pre) + apply simp + apply (rule corres_assume_pre) + apply (subgoal_tac "cte_map src \ cte_map dest") + prefer 2 + apply (erule cte_map_inj) + apply (fastforce simp: cte_wp_at_def) + apply (fastforce simp: cte_wp_at_def) + apply simp + apply simp + apply simp + apply (thin_tac "t : state_relation" for t) + apply (thin_tac "(P and (\s. Q s)) s" for Q P) + apply (thin_tac "(P and (\s. Q s)) s'" for Q P) + apply clarsimp + apply (rule corres_symb_exec_r) + prefer 2 + apply (rule getCTE_sp) + defer + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at'_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply (wp hoare_weak_lift_imp getCTE_wp' updateCap_no_0 updateCap_ctes_of_wp| + simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def modify_map_exists_eq) + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEp) + apply simp + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEn) + apply simp + apply clarsimp + apply (case_tac cte) + apply (rename_tac cap node) + apply (case_tac cte1) + apply (rename_tac src_cap src_node) + apply (case_tac ctea) + apply (rename_tac dest_cap dest_node) + apply clarsimp + apply (rule conjI, clarsimp) + apply (subgoal_tac "mdbPrev node = mdbNext src_node \ + mdbPrev node = mdbPrev dest_node") + apply (erule disjE) + apply simp + apply (erule (1) valid_dlistEn, simp) + apply simp + apply (erule_tac p="cte_map dest" in valid_dlistEp, assumption, simp) + apply simp + apply (auto simp: modify_map_if split: if_split_asm)[1] + apply clarsimp + apply (subgoal_tac "mdbNext node = mdbPrev src_node \ + mdbNext node = mdbNext dest_node") + apply (erule disjE) + apply simp + apply (erule (1) valid_dlistEp, simp) + apply simp + apply (erule_tac p="cte_map dest" in valid_dlistEn, assumption, simp) + apply simp + apply (auto simp: modify_map_if split: if_split_asm)[1] + apply (clarsimp simp: corres_underlying_def in_monad + state_relation_def) + apply (clarsimp simp: valid_mdb'_def) + apply (drule(1) pspace_relationsD) + apply (drule (12) set_cap_not_quite_corres) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption+ + apply (rule refl) + apply (elim exE conjE) + apply (rule bind_execI, assumption) + apply (drule updateCap_stuff, elim conjE, erule(1) impE) + apply (subgoal_tac "valid_objs t \ pspace_distinct t \ pspace_aligned t \ cte_at dest t") + prefer 2 + apply (rule conjI) + apply (erule use_valid, rule set_cap_valid_objs) + apply simp + apply (drule_tac p=dest in cte_wp_at_norm, clarsimp) + apply (drule (1) cte_wp_valid_cap) + apply (erule (2) weak_derived_valid_cap) + apply (fastforce elim: use_valid [OF _ set_cap_aligned] + use_valid [OF _ set_cap_cte_at] + use_valid [OF _ set_cap_distinct] + cte_wp_at_weakenE) + apply (elim conjE) + apply (drule (14) set_cap_not_quite_corres) + apply simp + apply assumption+ + apply (rule refl) + apply (elim exE conjE) + apply (rule bind_execI, assumption) + apply (clarsimp simp: exec_gets) + apply (clarsimp simp: set_cdt_def bind_assoc) + + apply (clarsimp simp: set_original_def bind_assoc exec_get exec_put exec_gets modify_def cap_swap_ext_def + update_cdt_list_def set_cdt_list_def simp del: fun_upd_apply + | rule refl | clarsimp simp: put_def simp del: fun_upd_apply )+ + apply (simp cong: option.case_cong) + apply (drule updateCap_stuff, elim conjE, erule(1) impE) + apply (drule (2) updateMDB_the_lot') + apply (erule (1) impE, assumption) + apply (fastforce simp only: no_0_modify_map) + apply assumption + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (drule in_getCTE, elim conjE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (simp only: pspace_relations_def refl) + apply (rule conjI, rule TrueI)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (thin_tac "machine_state t = p" for t p)+ + apply (thin_tac "cur_thread t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (simp only: simp_thms no_0_modify_map) + apply (clarsimp simp: cte_wp_at_ctes_of cong: if_cong) + apply (thin_tac "ctes_of x = y" for x y)+ + apply (case_tac cte1) + apply (rename_tac src_cap src_node) + apply (case_tac cte) + apply (rename_tac dest_cap dest_node) + apply clarsimp + apply (subgoal_tac "mdb_swap (ctes_of b) (cte_map src) src_cap src_node + (cte_map dest) dest_cap dest_node scap' dcap' cte2") + prefer 2 + apply (rule mdb_swap.intro) + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (erule mdb_ptr_axioms.intro) + apply (erule mdb_swap_axioms.intro) + apply (erule weak_derived_sym') + apply (erule weak_derived_sym') + apply assumption + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (thin_tac "ready_queues t = p" for t p)+ + apply (thin_tac "cur_domain t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (thin_tac "idle_thread t = p" for t p)+ + apply (thin_tac "work_units_completed t = p" for t p)+ + apply (thin_tac "domain_index t = p" for t p)+ + apply (thin_tac "domain_list t = p" for t p)+ + apply (thin_tac "domain_time t = p" for t p)+ + apply (thin_tac "ekheap t = p" for t p)+ + apply (thin_tac "scheduler_action t = p" for t p)+ + apply (thin_tac "ksArchState t = p" for t p)+ + apply (thin_tac "gsCNodes t = p" for t p)+ + apply (thin_tac "ksWorkUnitsCompleted t = p" for t p)+ + apply (thin_tac "ksInterruptState t = p" for t p)+ + apply (thin_tac "ksIdleThread t = p" for t p)+ + apply (thin_tac "gsUserPages t = p" for t p)+ + apply (thin_tac "pspace_relation s s'" for s s')+ + apply (thin_tac "ekheap_relation e p" for e p)+ + apply (thin_tac "interrupt_state_relation n s s'" for n s s')+ + apply (thin_tac "(s,s') \ arch_state_relation" for s s')+ + apply (rule conjI) + subgoal by (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv AARCH64.data_at_def) + apply(subst conj_assoc[symmetric]) + apply (rule conjI) + prefer 2 + apply (clarsimp simp add: revokable_relation_def in_set_cap_cte_at + simp del: split_paired_All) + apply (drule set_cap_caps_of_state_monad)+ + apply (simp del: split_paired_All split: if_split) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state simp del: split_paired_All) + apply (drule(1) mdb_swap.revokable) + apply (erule_tac x="dest" in allE) + apply (erule impE) + subgoal by (clarsimp simp: null_filter_def weak_derived_Null split: if_splits) + apply simp + apply (clarsimp simp del: split_paired_All) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state simp del: split_paired_All) + apply (drule (1) mdb_swap.revokable) + apply (subgoal_tac "cte_map (aa,bb) \ cte_map src") + apply (simp del: split_paired_All) + apply (erule_tac x="src" in allE) + apply (erule impE) + subgoal by (clarsimp simp: null_filter_def weak_derived_Null split: if_splits) + subgoal by simp + apply (drule caps_of_state_cte_at)+ + apply (erule (5) cte_map_inj) + apply (clarsimp simp: cte_wp_at_caps_of_state simp del: split_paired_All) + apply (drule (1) mdb_swap.revokable) + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp: null_filter_def split: if_splits) + apply clarsimp + apply (subgoal_tac "cte_map (aa,bb) \ cte_map src") + apply (subgoal_tac "cte_map (aa,bb) \ cte_map dest") + subgoal by (clarsimp simp del: split_paired_All) + apply (drule caps_of_state_cte_at)+ + apply (drule null_filter_caps_of_stateD)+ + apply (erule cte_map_inj, erule cte_wp_cte_at, assumption+) + apply (drule caps_of_state_cte_at)+ + apply (drule null_filter_caps_of_stateD)+ + apply (erule cte_map_inj, erule cte_wp_cte_at, assumption+) + apply (subgoal_tac "no_loops (ctes_of b)") + prefer 2 + subgoal by (simp add: valid_mdb_ctes_def mdb_chain_0_no_loops) + apply (subgoal_tac "mdb_swap_abs (cdt a) src dest a") + prefer 2 + apply (erule mdb_swap_abs.intro) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (rule refl) + apply assumption + apply (frule mdb_swap_abs''.intro) + apply (drule_tac t="cdt_list (a)" in mdb_swap_abs'.intro) + subgoal by (simp add: mdb_swap_abs'_axioms_def) + apply (thin_tac "modify_map m f p p' = t" for m f p p' t) + apply(rule conjI) + apply (simp add: cdt_relation_def del: split_paired_All) + apply (intro allI impI) + apply (subst mdb_swap.descendants, assumption) + apply (subst mdb_swap_abs.descendants, assumption) + apply (simp add: mdb_swap_abs.s_d_swp_def mdb_swap.s_d_swp_def + del: split_paired_All) + apply (subst image_Un)+ + apply (subgoal_tac "cte_at (s_d_swap c src dest) a") + prefer 2 + apply (simp add: s_d_swap_def split: if_split) + apply (rule conjI, clarsimp simp: cte_wp_at_caps_of_state) + apply (rule impI, rule conjI, clarsimp simp: cte_wp_at_caps_of_state) + apply (fastforce dest: in_set_cap_cte_at) + apply (subgoal_tac "s_d_swap (cte_map c) (cte_map src) (cte_map dest) = + cte_map (s_d_swap c src dest)") + prefer 2 + apply (simp add: s_d_swap_def split: if_splits) + apply (drule cte_map_inj, + erule cte_wp_at_weakenE, rule TrueI, + erule cte_wp_at_weakenE, rule TrueI, + assumption+)+ + apply simp + apply (subgoal_tac "descendants_of' (cte_map (s_d_swap c src dest)) (ctes_of b) = + cte_map ` descendants_of (s_d_swap c src dest) (cdt a)") + prefer 2 + apply (simp del: split_paired_All) + apply simp + apply (simp split: if_split) + apply (frule_tac p="s_d_swap c src dest" in inj_on_descendants_cte_map, assumption+) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (subst inj_on_image_set_diff15, assumption) + apply (rule subset_refl) + apply simp + apply simp + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply (subst insert_minus_eq, assumption) + apply clarsimp + apply (subst insert_minus_eq [where x="cte_map dest"], assumption) + apply (subst inj_on_image_set_diff15) + apply (erule (3) inj_on_descendants_cte_map) + apply (rule subset_refl) + apply clarsimp + subgoal by auto + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply clarsimp + apply (subst inj_on_image_set_diff15) + apply (erule (3) inj_on_descendants_cte_map) + apply (rule subset_refl) + apply clarsimp + apply simp + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply clarsimp + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply(clarsimp simp: cdt_list_relation_def) + apply(subst next_slot_eq[OF mdb_swap_abs'.next_slot]) + apply(assumption) + apply(fastforce split: option.split) + apply(simp) + apply(frule finite_depth) + apply(frule mdb_swap.n_next) + apply(simp) + apply(case_tac "(aa, bb)=src") + apply(case_tac "next_slot dest (cdt_list (a)) (cdt a) = Some src") + apply(simp) + apply(erule_tac x="fst dest" in allE, erule_tac x="snd dest" in allE) + apply(simp) + apply(simp) + apply(case_tac "next_slot dest (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(erule_tac x="fst dest" in allE, erule_tac x="snd dest" in allE) + apply(simp) + apply(subgoal_tac "mdbNext dest_node \ cte_map src") + apply(simp) + apply(simp) + apply(rule_tac s=a in cte_map_inj) + apply(simp) + apply(rule cte_at_next_slot') + apply(simp) + apply(simp) + apply(simp) + apply(simp) + apply(erule cte_wp_at_weakenE, rule TrueI) + apply(simp_all)[3] + apply(case_tac "(aa, bb)=dest") + apply(case_tac "next_slot src (cdt_list (a)) (cdt a) = Some dest") + apply(simp) + apply(erule_tac x="fst src" in allE, erule_tac x="snd src" in allE) + apply(simp) + apply(simp) + apply(case_tac "next_slot src (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(erule_tac x="fst src" in allE, erule_tac x="snd src" in allE) + apply(simp) + apply(subgoal_tac "mdbNext src_node \ cte_map dest") + apply(simp) + apply(simp) + apply(rule_tac s=a in cte_map_inj) + apply(simp) + apply(rule cte_at_next_slot') + apply(simp) + apply(simp) + apply(simp) + apply(simp) + apply(erule cte_wp_at_weakenE, rule TrueI) + apply(simp_all)[3] + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a) = Some src") + apply(simp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp) + apply(subgoal_tac "cte_at (aa, bb) a") + apply(subgoal_tac "cte_map (aa, bb) \ cte_map dest \ + cte_map (aa, bb) \ cte_map src \ + cte_map (aa, bb) = mdbPrev src_node") + apply(clarsimp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE cap' node'" in valid_mdbD1') + apply(clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a) = Some dest") + apply(simp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp) + apply(subgoal_tac "cte_at (aa, bb) a") + apply(subgoal_tac "cte_map (aa, bb) \ cte_map dest \ + cte_map (aa, bb) \ cte_map src \ + cte_map (aa, bb) = mdbPrev dest_node") + apply(subgoal_tac "cte_map (aa, bb) \ mdbPrev src_node") + apply(clarsimp) + apply(clarsimp simp: mdb_swap.prev_dest_src) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE cap' node'" in valid_mdbD1') + apply(clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(simp) + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a)") + apply(simp) + apply(clarsimp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp) + apply(subgoal_tac "cte_at (aa, bb) a") + apply(subgoal_tac "cte_map (aa, bb) \ cte_map dest \ + cte_map (aa, bb) \ cte_map src \ + cte_map (aa, bb) \ mdbPrev src_node \ + cte_map (aa, bb) \ mdbPrev dest_node") + apply(clarsimp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(rule cte_map_inj) + apply simp_all[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE src_cap src_node" in valid_mdbD2') + subgoal by (clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(rule cte_at_next_slot') + apply(simp_all)[9] + apply(erule cte_wp_at_weakenE, simp) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE dest_cap dest_node" in valid_mdbD2') + apply(clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(rule cte_at_next_slot') + apply(simp_all)[9] + apply(erule cte_wp_at_weakenE, simp) + by (rule cte_at_next_slot; simp) + + +lemma capSwapForDelete_corres: + assumes "src' = cte_map src" "dest' = cte_map dest" + shows "corres dc + (valid_objs and pspace_aligned and pspace_distinct and + valid_mdb and valid_list and cte_at src and cte_at dest + and (\s. \cap. tcb_cap_valid cap src s) + and (\s. \cap. tcb_cap_valid cap dest s)) + (valid_mdb' and pspace_distinct' and pspace_aligned') + (cap_swap_for_delete src dest) (capSwapForDelete src' dest')" + using assms + apply (simp add: cap_swap_for_delete_def capSwapForDelete_def) + apply (cases "src = dest") + apply (clarsimp simp: when_def) + apply (rule corres_assume_pre) + apply clarsimp + apply (frule_tac s=s in cte_map_inj) + apply (simp add: caps_of_state_cte_at)+ + apply (simp add: when_def liftM_def) + apply (rule corres_guard_imp) + apply (rule_tac P1=wellformed_cap in corres_split[OF get_cap_corres_P]) + apply (rule_tac P1=wellformed_cap in corres_split[OF get_cap_corres_P]) + apply (rule cteSwap_corres, rule refl, rule refl, clarsimp+) + apply (wp get_cap_wp getCTE_wp')+ + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (drule (1) caps_of_state_valid_cap)+ + apply (simp add: valid_cap_def2) + apply (clarsimp simp: cte_wp_at_ctes_of) +done + +declare if_split [split] +declare revokable_relation_simps [simp] + +definition + "no_child' s cte \ + let next = mdbNext (cteMDBNode cte) in + (next \ 0 \ cte_at' next s \ cte_wp_at' (\cte'. \isMDBParentOf cte cte') next s)" + +definition + "child_save' s cte' cte \ + let cap = cteCap cte; cap' = cteCap cte' in + sameRegionAs cap cap' \ + (isEndpointCap cap \ (capEPBadge cap = capEPBadge cap' \ no_child' s cte)) \ + (isNotificationCap cap \ (capNtfnBadge cap = capNtfnBadge cap' \ no_child' s cte))" + +lemma subtree_no_parent: + assumes "m \ p \ x" + assumes "mdbNext (cteMDBNode cte) \ 0" + assumes "\ isMDBParentOf cte next" + assumes "m p = Some cte" + assumes "m (mdbNext (cteMDBNode cte)) = Some next" + shows "False" using assms + by induct (auto simp: parentOf_def mdb_next_unfold) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma ensureNoChildren_corres: + "p' = cte_map p \ + corres (ser \ dc) (cte_at p) (pspace_aligned' and pspace_distinct' and cte_at' p' and valid_mdb') + (ensure_no_children p) (ensureNoChildren p')" + apply (simp add: ensureNoChildren_def ensure_no_children_descendants + liftE_bindE nullPointer_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply simp + apply (case_tac "mdbNext (cteMDBNode cte) = 0") + apply (simp add: whenE_def) + apply (clarsimp simp: returnOk_def corres_underlying_def return_def) + apply (erule notE) + apply (clarsimp simp: state_relation_def cdt_relation_def + simp del: split_paired_All) + apply (erule allE, erule (1) impE) + apply (subgoal_tac "descendants_of' (cte_map p) (ctes_of b) = {}") + apply simp + apply (clarsimp simp: descendants_of'_def) + apply (subst (asm) cte_wp_at_ctes_of) + apply clarsimp + apply (erule (2) subtree_next_0) + apply (simp add: whenE_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (erule (2) valid_dlistEn) + apply simp + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: corres_underlying_def + throwError_def returnOk_def return_def) + apply (subgoal_tac "pspace_aligned' b \ pspace_distinct' b") + prefer 2 + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: state_relation_def cdt_relation_def + simp del: split_paired_All) + apply (erule allE, erule (1) impE) + apply (clarsimp simp: descendants_of'_def) + apply (subgoal_tac "ctes_of b \ cte_map p \ mdbNext (cteMDBNode cte)") + apply simp + apply (rule direct_parent) + apply (simp add: mdb_next_unfold) + apply assumption + apply (simp add: parentOf_def) + apply clarsimp + apply (clarsimp simp: corres_underlying_def + throwError_def returnOk_def return_def) + apply (subgoal_tac "pspace_aligned' b \ pspace_distinct' b") + prefer 2 + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: state_relation_def cdt_relation_def + simp del: split_paired_All) + apply (erule allE, erule (1) impE) + apply (subgoal_tac "descendants_of' (cte_map p) (ctes_of b) = {}") + apply simp + apply (clarsimp simp: descendants_of'_def) + apply (erule (4) subtree_no_parent) + done + +end +end diff --git a/proof/refine/AARCH64/CSpace_I.thy b/proof/refine/AARCH64/CSpace_I.thy new file mode 100644 index 0000000000..549e4686d2 --- /dev/null +++ b/proof/refine/AARCH64/CSpace_I.thy @@ -0,0 +1,2033 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + CSpace invariants +*) + +theory CSpace_I +imports ArchAcc_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma capUntypedPtr_simps [simp]: + "capUntypedPtr (ThreadCap r) = r" + "capUntypedPtr (NotificationCap r badge a b) = r" + "capUntypedPtr (EndpointCap r badge a b c d) = r" + "capUntypedPtr (Zombie r bits n) = r" + "capUntypedPtr (ArchObjectCap x) = Arch.capUntypedPtr x" + "capUntypedPtr (UntypedCap d r n f) = r" + "capUntypedPtr (CNodeCap r n g n2) = r" + "capUntypedPtr (ReplyCap r m a) = r" + "Arch.capUntypedPtr (ASIDPoolCap r asid) = r" + "Arch.capUntypedPtr (FrameCap r rghts sz d mapdata) = r" + "Arch.capUntypedPtr (PageTableCap r pt_t mapdata2) = r" + "Arch.capUntypedPtr (VCPUCap r) = r" + by (auto simp: capUntypedPtr_def AARCH64_H.capUntypedPtr_def) + +lemma rights_mask_map_UNIV [simp]: + "rights_mask_map UNIV = allRights" + by (simp add: rights_mask_map_def allRights_def) + +declare insert_UNIV[simp] + +lemma maskCapRights_allRights [simp]: + "maskCapRights allRights c = c" + unfolding maskCapRights_def isCap_defs allRights_def + AARCH64_H.maskCapRights_def maskVMRights_def + by (cases c) (simp_all add: Let_def split: arch_capability.split vmrights.split) + +lemma getCTE_inv [wp]: "\P\ getCTE addr \\rv. P\" + by (simp add: getCTE_def) wp + +lemma getEndpoint_inv [wp]: + "\P\ getEndpoint ptr \\rv. P\" + by (simp add: getEndpoint_def getObject_inv loadObject_default_inv) + +lemma getNotification_inv [wp]: + "\P\ getNotification ptr \\rv. P\" + by (simp add: getNotification_def getObject_inv loadObject_default_inv) + +lemma getSlotCap_inv [wp]: "\P\ getSlotCap addr \\rv. P\" + by (simp add: getSlotCap_def, wp) + +declare resolveAddressBits.simps[simp del] + +lemma cap_case_CNodeCap: + "(case cap of CNodeCap a b c d \ P + | _ \ Q) + = (if isCNodeCap cap then P else Q)" + by (cases cap, simp_all add: isCap_simps) + +lemma resolveAddressBits_inv_induct: + shows + "s \ \P\ + resolveAddressBits cap cref depth + \\rv. P\,\\rv. P\" +proof (induct arbitrary: s rule: resolveAddressBits.induct) + case (1 cap fn cref depth) + show ?case + apply (subst resolveAddressBits.simps) + apply (simp add: Let_def split_def cap_case_CNodeCap[unfolded isCap_simps] + split del: if_split cong: if_cong) + apply (rule hoare_pre_spec_validE) + apply ((elim exE | wp (once) spec_strengthen_postE[OF "1.hyps"])+, + (rule refl conjI | simp add: in_monad split del: if_split)+) + apply (wp | simp add: locateSlot_conv split del: if_split + | wp (once) hoare_drop_imps)+ + done +qed + +lemma rab_inv' [wp]: + "\P\ resolveAddressBits cap addr depth \\rv. P\" + by (rule validE_valid, rule use_specE', rule resolveAddressBits_inv_induct) + +lemmas rab_inv'' [wp] = rab_inv' [folded resolveAddressBits_decl_def] + +crunch inv [wp]: lookupCap P + +lemma updateObject_cte_inv: + "\P\ updateObject (cte :: cte) ko x y n \\rv. P\" + apply (simp add: updateObject_cte) + apply (cases ko, simp_all add: typeError_def unless_def + split del: if_split + cong: if_cong) + apply (wp | simp)+ + done + +definition + "no_mdb cte \ mdbPrev (cteMDBNode cte) = 0 \ mdbNext (cteMDBNode cte) = 0" + +lemma mdb_next_update: + "m (x \ y) \ a \ b = + (if a = x then mdbNext (cteMDBNode y) = b else m \ a \ b)" + by (simp add: mdb_next_rel_def mdb_next_def) + +lemma neg_no_loopsI: + "m \ c \\<^sup>+ c \ \ no_loops m" + unfolding no_loops_def by auto + +lemma valid_dlistEp [elim?]: + "\ valid_dlist m; m p = Some cte; mdbPrev (cteMDBNode cte) \ 0; + \cte'. \ m (mdbPrev (cteMDBNode cte)) = Some cte'; + mdbNext (cteMDBNode cte') = p \ \ P \ \ + P" + unfolding valid_dlist_def Let_def by blast + +lemma valid_dlistEn [elim?]: + "\ valid_dlist m; m p = Some cte; mdbNext (cteMDBNode cte) \ 0; + \cte'. \ m (mdbNext (cteMDBNode cte)) = Some cte'; + mdbPrev (cteMDBNode cte') = p \ \ P \ \ + P" + unfolding valid_dlist_def Let_def by blast + +lemmas valid_dlistE = valid_dlistEn valid_dlistEp + +lemma mdb_next_update_other: + "\ m (x \ y) \ a \ b; x \ a \ \ m \ a \ b" + by (simp add: mdb_next_rel_def mdb_next_def) + +lemma mdb_trancl_update_other: + assumes upd: "m(p \ cte) \ x \\<^sup>+ y" + and nopath: "\ m \ x \\<^sup>* p" + shows "m \ x \\<^sup>+ y" + using upd nopath +proof induct + case (base y) + + have "m \ x \ y" + proof (rule mdb_next_update_other) + from base show "p \ x" by clarsimp + qed fact+ + + thus ?case .. +next + case (step y z) + hence ih: "m \ x \\<^sup>+ y" by auto + + from ih show ?case + proof + show "m \ y \ z" + proof (rule mdb_next_update_other) + show "p \ y" + proof (cases "x = p") + case True thus ?thesis using step.prems by simp + next + case False thus ?thesis using step.prems ih + by - (erule contrapos_nn, rule trancl_into_rtrancl, simp) + qed + qed fact+ + qed +qed + +lemma next_unfold': + "m \ c \ y = (\cte. m c = Some cte \ mdbNext (cteMDBNode cte) = y)" + unfolding mdb_next_rel_def + by (simp add: next_unfold split: option.splits) + +lemma no_self_loop_next_noloop: + assumes no_loop: "no_loops m" + and lup: "m ptr = Some cte" + shows "mdbNext (cteMDBNode cte) \ ptr" +proof - + from no_loop have "\ m \ ptr \ ptr" + unfolding no_loops_def + by - (drule spec, erule contrapos_nn, erule r_into_trancl) + + thus ?thesis using lup + by (simp add: next_unfold') +qed + + +lemma valid_dlistI [intro?]: + defines "nxt \ \cte. mdbNext (cteMDBNode cte)" + and "prv \ \cte. mdbPrev (cteMDBNode cte)" + assumes r1: "\p cte. \ m p = Some cte; prv cte \ 0 \ \ \cte'. m (prv cte) = Some cte' \ nxt cte' = p" + and r2: "\p cte. \ m p = Some cte; nxt cte \ 0 \ \ \cte'. m (nxt cte) = Some cte' \ prv cte' = p" + shows "valid_dlist m" + unfolding valid_dlist_def + by (auto dest: r1 r2 simp: Let_def prv_def nxt_def) + +lemma no_loops_tranclE: + assumes nl: "no_loops m" + and nxt: "m \ x \\<^sup>+ y" + shows "\ m \ y \\<^sup>* x" +proof + assume "m \ y \\<^sup>* x" + hence "m \ x \\<^sup>+ x" using nxt + by simp + + thus False using nl + unfolding no_loops_def by auto +qed + +lemma neg_next_rtrancl_trancl: + "\ \ m \ y \\<^sup>* r; m \ x \ y \ \ \ m \ x \\<^sup>+ r" + apply (erule contrapos_nn) + apply (drule tranclD) + apply (clarsimp simp: next_unfold') + done + +lemma next_trancl_split_tt: + assumes p1: "m \ x \\<^sup>+ y" + and p2: "m \ x \\<^sup>+ p" + and nm: "\ m \ p \\<^sup>* y" + shows "m \ y \\<^sup>* p" + using p2 p1 nm +proof induct + case base thus ?case + by (clarsimp dest!: tranclD) (drule (1) next_single_value, simp) +next + case (step q r) + + show ?case + proof (cases "q = y") + case True thus ?thesis using step + by fastforce + next + case False + have "m \ y \\<^sup>* q" + proof (rule step.hyps) + have "\ m \ q \\<^sup>+ y" + by (rule neg_next_rtrancl_trancl) fact+ + thus "\ m \ q \\<^sup>* y" using False + by (clarsimp dest!: rtranclD) + qed fact+ + thus ?thesis by (rule rtrancl_into_rtrancl) fact+ + qed +qed + +lemma no_loops_upd_last: + assumes noloop: "no_loops m" + and nxt: "m \ x \\<^sup>+ p" + shows "m (p \ cte) \ x \\<^sup>+ p" +proof - + from noloop nxt have xp: "x \ p" + by (clarsimp dest!: neg_no_loopsI) + + from nxt show ?thesis using xp + proof (induct rule: converse_trancl_induct') + case (base y) + hence "m (p \ cte) \ y \ p" using noloop + by (auto simp add: mdb_next_update) + thus ?case .. + next + case (step y z) + + from noloop step have xp: "z \ p" + by (clarsimp dest!: neg_no_loopsI) + + hence "m (p \ cte) \ y \ z" using step + by (simp add: mdb_next_update) + + moreover from xp have "m (p \ cte) \ z \\<^sup>+ p" using step.hyps assms + by (auto simp del: fun_upd_apply) + ultimately show ?case by (rule trancl_into_trancl2) + qed +qed + + +lemma no_0_neq [intro?]: + "\m c = Some cte; no_0 m\ \ c \ 0" + by (auto simp add: no_0_def) + +lemma no_0_update: + assumes no0: "no_0 m" + and pnz: "p \ 0" + shows "no_0 (m(p \ cte))" + using no0 pnz unfolding no_0_def by simp + +lemma has_loop_update: + assumes lp: "m(p \ cte) \ c \\<^sup>+ c'" + and cn0: "c' \ 0" + and mnext: "mdbNext (cteMDBNode cte) = 0" + and mn0: "no_0 m" + and pn0: "p \ 0" + shows "m \ c \\<^sup>+ c'" + using lp cn0 +proof induct + case (base y) + have "m \ c \ y" + proof (rule mdb_next_update_other) + show "p \ c" using base + by (clarsimp intro: contrapos_nn simp: mdb_next_update mnext) + qed fact+ + + thus ?case .. +next + case (step y z) + + show ?case + proof + have "y \ 0" by (rule no_0_lhs [OF _ no_0_update]) fact+ + thus "m \ c \\<^sup>+ y" using step by simp + next + have "z \ 0" by fact+ + hence "p \ y" using step.hyps mnext + by (clarsimp simp: mdb_next_update) + thus "m \ y \ z" + by (rule mdb_next_update_other [OF step.hyps(2)]) + qed +qed + +lemma mdb_rtrancl_update_other: + assumes upd: "m(p \ cte) \ x \\<^sup>* y" + and nopath: "\ m \ x \\<^sup>* p" + shows "m \ x \\<^sup>* y" + using upd +proof (cases rule: next_rtrancl_tranclE) + case eq thus ?thesis by simp +next + case trancl thus ?thesis + by (auto intro: trancl_into_rtrancl elim: mdb_trancl_update_other [OF _ nopath]) +qed + +lemma mdb_trancl_other_update: + assumes upd: "m \ x \\<^sup>+ y" + and np: "\ m \ x \\<^sup>* p" + shows "m(p \ cte) \ x \\<^sup>+ y" + using upd +proof induct + case (base q) + from np have "x \ p" by clarsimp + hence"m (p \ cte) \ x \ q" + using base by (simp add: mdb_next_update del: fun_upd_apply) + thus ?case .. +next + case (step q r) + + show ?case + proof + from step.hyps(1) np have "q \ p" + by (auto elim!: contrapos_nn) + + thus x: "m(p \ cte) \ q \ r" + using step by (simp add: mdb_next_update del: fun_upd_apply) + qed fact+ +qed + +lemma mdb_rtrancl_other_update: + assumes upd: "m \ x \\<^sup>* y" + and nopath: "\ m \ x \\<^sup>* p" + shows "m(p \ cte) \ x \\<^sup>* y" + using upd +proof (cases rule: next_rtrancl_tranclE) + case eq thus ?thesis by simp +next + case trancl thus ?thesis + by (auto intro: trancl_into_rtrancl elim: mdb_trancl_other_update [OF _ nopath]) +qed + +lemma mdb_chain_0_update: + assumes x: "m \ mdbNext (cteMDBNode cte) \\<^sup>* 0" + and np: "\ m \ mdbNext (cteMDBNode cte) \\<^sup>* p" + assumes p: "p \ 0" + assumes 0: "no_0 m" + assumes n: "mdb_chain_0 m" + shows "mdb_chain_0 (m(p \ cte))" + unfolding mdb_chain_0_def +proof rule + fix x + assume "x \ dom (m(p \ cte))" + hence x: "x = p \ x \ dom m" by simp + + have cnxt: "m(p \ cte) \ mdbNext (cteMDBNode cte) \\<^sup>* 0" + by (rule mdb_rtrancl_other_update) fact+ + + from x show "m(p \ cte) \ x \\<^sup>+ 0" + proof + assume xp: "x = p" + show ?thesis + proof (rule rtrancl_into_trancl2 [OF _ cnxt]) + show "m(p \ cte) \ x \ mdbNext (cteMDBNode cte)" using xp + by (simp add: mdb_next_update) + qed + next + assume x: "x \ dom m" + + show ?thesis + proof (cases "m \ x \\<^sup>* p") + case False + from n have "m \ x \\<^sup>+ 0" + unfolding mdb_chain_0_def + using x by auto + + thus ?thesis + by (rule mdb_trancl_other_update) fact+ + next + case True + hence "m(p \ cte) \ x \\<^sup>* p" + proof (cases rule: next_rtrancl_tranclE) + case eq thus ?thesis by simp + next + case trancl + have "no_loops m" by (rule mdb_chain_0_no_loops) fact+ + thus ?thesis + by (rule trancl_into_rtrancl [OF no_loops_upd_last]) fact+ + qed + moreover + have "m(p \ cte) \ p \ mdbNext (cteMDBNode cte)" by (simp add: mdb_next_update) + ultimately show ?thesis using cnxt by simp + qed + qed +qed + +lemma mdb_chain_0_update_0: + assumes x: "mdbNext (cteMDBNode cte) = 0" + assumes p: "p \ 0" + assumes 0: "no_0 m" + assumes n: "mdb_chain_0 m" + shows "mdb_chain_0 (m(p \ cte))" + using x 0 p + apply - + apply (rule mdb_chain_0_update [OF _ _ p 0 n]) + apply (auto elim: next_rtrancl_tranclE dest: no_0_lhs_trancl) + done + +lemma valid_badges_0_update: + assumes nx: "mdbNext (cteMDBNode cte) = 0" + assumes pv: "mdbPrev (cteMDBNode cte) = 0" + assumes p: "m p = Some cte'" + assumes m: "no_mdb cte'" + assumes 0: "no_0 m" + assumes d: "valid_dlist m" + assumes v: "valid_badges m" + shows "valid_badges (m(p \ cte))" +proof (unfold valid_badges_def, clarify) + fix c c' cap cap' n n' + assume c: "(m(p \ cte)) c = Some (CTE cap n)" + assume c': "(m(p \ cte)) c' = Some (CTE cap' n')" + assume nxt: "m(p \ cte) \ c \ c'" + assume r: "sameRegionAs cap cap'" + + from p 0 have p0: "p \ 0" by (clarsimp simp: no_0_def) + + from c' p0 0 + have "c' \ 0" by (clarsimp simp: no_0_def) + with nx nxt + have cp: "c \ p" by (clarsimp simp add: mdb_next_unfold) + moreover + from pv nx nxt p p0 c d m 0 + have "c' \ p" + apply clarsimp + apply (simp add: mdb_next_unfold split: if_split_asm) + apply (erule (1) valid_dlistEn, simp) + apply (clarsimp simp: no_mdb_def no_0_def) + done + moreover + with nxt c c' cp + have "m \ c \ c'" by (simp add: mdb_next_unfold) + ultimately + show "(isEndpointCap cap \ + capEPBadge cap \ capEPBadge cap' \ + capEPBadge cap' \ 0 \ + mdbFirstBadged n') \ + (isNotificationCap cap \ + capNtfnBadge cap \ capNtfnBadge cap' \ + capNtfnBadge cap' \ 0 \ + mdbFirstBadged n')" + using r c c' v by (fastforce simp: valid_badges_def) +qed + +definition + "caps_no_overlap' m S \ + \p c n. m p = Some (CTE c n) \ capRange c \ S = {}" + +definition + fresh_virt_cap_class :: "capclass \ cte_heap \ bool" +where + "fresh_virt_cap_class C m \ + C \ PhysicalClass \ C \ (capClass \ cteCap) ` ran m" + +lemma fresh_virt_cap_class_Physical[simp]: + "fresh_virt_cap_class PhysicalClass = \" + by (rule ext, simp add: fresh_virt_cap_class_def)+ + +lemma fresh_virt_cap_classD: + "\ m x = Some cte; fresh_virt_cap_class C m \ + \ C \ PhysicalClass \ capClass (cteCap cte) \ C" + by (auto simp: fresh_virt_cap_class_def) + +lemma capRange_untyped: + "capRange cap' \ untypedRange cap \ {} \ isUntypedCap cap" + by (cases cap, auto simp: isCap_simps) + +lemma capRange_of_untyped [simp]: + "capRange (UntypedCap d r n f) = untypedRange (UntypedCap d r n f)" + by (simp add: capRange_def isCap_simps capUntypedSize_def) + +lemma caps_contained_no_overlap: + "\ caps_no_overlap' m (capRange cap); caps_contained' m\ + \ caps_contained' (m(p \ CTE cap n))" + apply (clarsimp simp add: caps_contained'_def) + apply (rule conjI) + apply clarsimp + apply (rule conjI, clarsimp dest!: capRange_untyped) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=p' in allE, erule allE, erule impE, erule exI) + apply (frule capRange_untyped) + apply (clarsimp simp add: isCap_simps) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=pa in allE, erule allE, erule impE, erule exI) + apply (frule capRange_untyped) + apply (clarsimp simp: isCap_simps) + apply blast + done + +lemma no_mdb_next: + "\ m p = Some cte; no_mdb cte; valid_dlist m; no_0 m \ \ \ m \ x \ p" + apply clarsimp + apply (frule vdlist_nextD0) + apply clarsimp + apply assumption + apply (clarsimp simp: mdb_prev_def no_mdb_def mdb_next_unfold) + done + +lemma no_mdb_rtrancl: + "\ m p = Some cte; no_mdb cte; p \ x; valid_dlist m; no_0 m \ \ \ m \ x \\<^sup>* p" + apply (clarsimp dest!: rtranclD) + apply (drule tranclD2) + apply clarsimp + apply (drule (3) no_mdb_next) + apply fastforce + done + +lemma isNullCap [simp]: + "isNullCap cap = (cap = NullCap)" + by (simp add: isCap_simps) + +lemma isDomainCap [simp]: + "isDomainCap cap = (cap = DomainCap)" + by (simp add: isCap_simps) + +lemma isPhysicalCap[simp]: + "isPhysicalCap cap = (capClass cap = PhysicalClass)" + by (simp add: isPhysicalCap_def AARCH64_H.isPhysicalCap_def + split: capability.split arch_capability.split) + +definition capMasterCap :: "capability \ capability" where + "capMasterCap cap \ case cap of + EndpointCap ref bdg s r g gr \ EndpointCap ref 0 True True True True + | NotificationCap ref bdg s r \ NotificationCap ref 0 True True + | CNodeCap ref bits gd gs \ CNodeCap ref bits 0 0 + | ThreadCap ref \ ThreadCap ref + | ReplyCap ref master g \ ReplyCap ref True True + | UntypedCap d ref n f \ UntypedCap d ref n 0 + | ArchObjectCap acap \ ArchObjectCap (case acap of + FrameCap ref rghts sz d mapdata \ + FrameCap ref VMReadWrite sz d None + | ASIDPoolCap pool asid \ + ASIDPoolCap pool 0 + | PageTableCap ptr pt_t data \ + PageTableCap ptr pt_t None + | VCPUCap ptr \ + VCPUCap ptr + | _ \ acap) + | _ \ cap" + +lemmas capMasterCap_simps[simp] = capMasterCap_def[split_simps capability.split arch_capability.split] + +lemma capMasterCap_eqDs1: + "capMasterCap cap = EndpointCap ref bdg s r g gr + \ bdg = 0 \ s \ r \ g \ gr + \ (\bdg s r g gr. cap = EndpointCap ref bdg s r g gr)" + "capMasterCap cap = NotificationCap ref bdg s r + \ bdg = 0 \ s \ r + \ (\bdg s r. cap = NotificationCap ref bdg s r)" + "capMasterCap cap = CNodeCap ref bits gd gs + \ gd = 0 \ gs = 0 \ (\gd gs. cap = CNodeCap ref bits gd gs)" + "capMasterCap cap = ThreadCap ref + \ cap = ThreadCap ref" + "capMasterCap cap = NullCap + \ cap = NullCap" + "capMasterCap cap = DomainCap + \ cap = DomainCap" + "capMasterCap cap = IRQControlCap + \ cap = IRQControlCap" + "capMasterCap cap = IRQHandlerCap irq + \ cap = IRQHandlerCap irq" + "capMasterCap cap = Zombie ref tp n + \ cap = Zombie ref tp n" + "capMasterCap cap = UntypedCap d ref bits 0 + \ \f. cap = UntypedCap d ref bits f" + "capMasterCap cap = ReplyCap ref master g + \ master \ g \ (\master g. cap = ReplyCap ref master g)" + "capMasterCap cap = ArchObjectCap (FrameCap ref rghts sz d mapdata) + \ rghts = VMReadWrite \ mapdata = None + \ (\rghts mapdata. cap = ArchObjectCap (FrameCap ref rghts sz d mapdata))" + "capMasterCap cap = ArchObjectCap ASIDControlCap + \ cap = ArchObjectCap ASIDControlCap" + "capMasterCap cap = ArchObjectCap (ASIDPoolCap pool asid) + \ asid = 0 \ (\asid. cap = ArchObjectCap (ASIDPoolCap pool asid))" + "capMasterCap cap = ArchObjectCap (PageTableCap ptr pt_t data) + \ data = None \ (\data. cap = ArchObjectCap (PageTableCap ptr pt_t data))" + "capMasterCap cap = ArchObjectCap (VCPUCap v) + \ cap = ArchObjectCap (VCPUCap v)" + by (clarsimp simp: capMasterCap_def + split: capability.split_asm arch_capability.split_asm)+ + +lemmas capMasterCap_eqDs[dest!] = capMasterCap_eqDs1 capMasterCap_eqDs1 [OF sym] + +definition + capBadge :: "capability \ machine_word option" +where + "capBadge cap \ if isEndpointCap cap then Some (capEPBadge cap) + else if isNotificationCap cap then Some (capNtfnBadge cap) + else None" + +lemma capBadge_simps[simp]: + "capBadge (UntypedCap d p n f) = None" + "capBadge (NullCap) = None" + "capBadge (DomainCap) = None" + "capBadge (EndpointCap ref badge s r g gr) = Some badge" + "capBadge (NotificationCap ref badge s r) = Some badge" + "capBadge (CNodeCap ref bits gd gs) = None" + "capBadge (ThreadCap ref) = None" + "capBadge (Zombie ref b n) = None" + "capBadge (ArchObjectCap cap) = None" + "capBadge (IRQControlCap) = None" + "capBadge (IRQHandlerCap irq) = None" + "capBadge (ReplyCap tcb master g) = None" + by (simp add: capBadge_def isCap_defs)+ + +lemma capClass_Master: + "capClass (capMasterCap cap) = capClass cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split) + +lemma capRange_Master: + "capRange (capMasterCap cap) = capRange cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split, + simp add: capRange_def) + +lemma master_eqI: + "\ \cap. F (capMasterCap cap) = F cap; F (capMasterCap cap) = F (capMasterCap cap') \ + \ F cap = F cap'" + by simp + +lemmas isArchFrameCap_simps[simp] = + isArchFrameCap_def[split_simps capability.split arch_capability.split] + +lemma isCap_Master: + "isZombie (capMasterCap cap) = isZombie cap" + "isArchObjectCap (capMasterCap cap) = isArchObjectCap cap" + "isThreadCap (capMasterCap cap) = isThreadCap cap" + "isCNodeCap (capMasterCap cap) = isCNodeCap cap" + "isNotificationCap (capMasterCap cap) = isNotificationCap cap" + "isEndpointCap (capMasterCap cap) = isEndpointCap cap" + "isUntypedCap (capMasterCap cap) = isUntypedCap cap" + "isReplyCap (capMasterCap cap) = isReplyCap cap" + "isIRQControlCap (capMasterCap cap) = isIRQControlCap cap" + "isIRQHandlerCap (capMasterCap cap) = isIRQHandlerCap cap" + "isNullCap (capMasterCap cap) = isNullCap cap" + "isDomainCap (capMasterCap cap) = isDomainCap cap" + "isArchFrameCap (capMasterCap cap) = isArchFrameCap cap" + by (simp add: isCap_simps capMasterCap_def + split: capability.split arch_capability.split)+ + +lemma capUntypedSize_capBits: + "capClass cap = PhysicalClass \ capUntypedSize cap = 2 ^ (capBits cap)" + apply (simp add: capUntypedSize_def objBits_simps' + AARCH64_H.capUntypedSize_def bit_simps' + split: capability.splits arch_capability.splits + zombie_type.splits) + apply fastforce + done + +lemma sameRegionAs_def2: + "sameRegionAs cap cap' = (\cap cap'. + (cap = cap' + \ (\ isNullCap cap \ \ isZombie cap + \ \ isUntypedCap cap \ \ isArchFrameCap cap) + \ (\ isNullCap cap' \ \ isZombie cap' + \ \ isUntypedCap cap' \ \ isArchFrameCap cap')) + \ (capRange cap' \ {} \ capRange cap' \ capRange cap + \ (isUntypedCap cap \ (isArchFrameCap cap \ isArchFrameCap cap'))) + \ (isIRQControlCap cap \ isIRQHandlerCap cap')) + (capMasterCap cap) (capMasterCap cap')" + apply (cases "isUntypedCap cap") + apply (clarsimp simp: sameRegionAs_def Let_def + isCap_Master capRange_Master capClass_Master) + apply (clarsimp simp: isCap_simps + capMasterCap_def[where cap="UntypedCap d p n f" for d p n f]) + apply (simp add: capRange_def capUntypedSize_capBits) + apply (intro impI iffI) + apply (clarsimp del: subsetI intro!: range_subsetI) + apply clarsimp + apply (simp cong: conj_cong) + apply (simp add: capMasterCap_def sameRegionAs_def isArchFrameCap_def + split: capability.split + split del: if_split cong: if_cong) + apply (simp add: AARCH64_H.sameRegionAs_def isCap_simps + split: arch_capability.split + split del: if_split cong: if_cong) + apply (clarsimp simp: capRange_def Let_def isCap_simps) + apply (simp add: range_subset_eq2 cong: conj_cong) + apply (simp add: conj_comms mask_def add_diff_eq) + done + +lemma sameObjectAs_def2: + "sameObjectAs cap cap' = (\cap cap'. + (cap = cap' + \ (\ isNullCap cap \ \ isZombie cap \ \ isUntypedCap cap) + \ (\ isNullCap cap' \ \ isZombie cap' \ \ isUntypedCap cap') + \ (isArchFrameCap cap \ capRange cap \ {}) + \ (isArchFrameCap cap' \ capRange cap' \ {}))) + (capMasterCap cap) (capMasterCap cap')" + apply (simp add: sameObjectAs_def sameRegionAs_def2 + isCap_simps capMasterCap_def + split: capability.split) + apply (clarsimp simp: AARCH64_H.sameObjectAs_def isCap_simps + split: arch_capability.split cong: if_cong) + apply (clarsimp simp: AARCH64_H.sameRegionAs_def isCap_simps + split del: if_split cong: if_cong) + apply (simp add: capRange_def isCap_simps mask_def add_diff_eq + split del: if_split) + apply fastforce + done + +lemmas sameRegionAs_def3 = + sameRegionAs_def2 [simplified capClass_Master capRange_Master isCap_Master] + +lemmas sameObjectAs_def3 = + sameObjectAs_def2 [simplified capClass_Master capRange_Master isCap_Master] + +lemma sameRegionAsE: + "\ sameRegionAs cap cap'; + \ capMasterCap cap = capMasterCap cap'; \ isNullCap cap; \ isZombie cap; + \ isUntypedCap cap; \ isArchFrameCap cap\ \ R; + \ capRange cap' \ {}; capRange cap' \ capRange cap; isUntypedCap cap \ \ R; + \ capRange cap' \ {}; capRange cap' \ capRange cap; isArchFrameCap cap; + isArchFrameCap cap' \ \ R; + \ isIRQControlCap cap; isIRQHandlerCap cap' \ \ R + \ \ R" + by (simp add: sameRegionAs_def3, fastforce) + +lemma sameObjectAsE: + "\ sameObjectAs cap cap'; + \ capMasterCap cap = capMasterCap cap'; \ isNullCap cap; \ isZombie cap; + \ isUntypedCap cap; + isArchFrameCap cap \ capRange cap \ {} \ \ R \ \ R" + by (clarsimp simp add: sameObjectAs_def3) + +lemma sameObjectAs_sameRegionAs: + "sameObjectAs cap cap' \ sameRegionAs cap cap'" + by (clarsimp simp add: sameObjectAs_def2 sameRegionAs_def2 isCap_simps) + +lemma sameObjectAs_sym: + "sameObjectAs c d = sameObjectAs d c" + by (simp add: sameObjectAs_def2 eq_commute conj_comms) + +lemma untypedRange_Master: + "untypedRange (capMasterCap cap) = untypedRange cap" + by (simp add: capMasterCap_def split: capability.split) + +lemma sameObject_capRange: + "sameObjectAs cap cap' \ capRange cap' = capRange cap" + apply (rule master_eqI, rule capRange_Master) + apply (clarsimp simp: sameObjectAs_def2) + done + +lemma sameRegionAs_Null [simp]: + "sameRegionAs c NullCap = False" + "sameRegionAs NullCap c = False" + by (simp add: sameRegionAs_def3 capRange_def isCap_simps)+ + +lemma isMDBParent_Null [simp]: + "isMDBParentOf c (CTE NullCap m) = False" + "isMDBParentOf (CTE NullCap m) c = False" + unfolding isMDBParentOf_def by (auto split: cte.splits) + +lemma capUntypedSize_simps [simp]: + "capUntypedSize (ThreadCap r) = 1 << objBits (undefined::tcb)" + "capUntypedSize (NotificationCap r badge a b) = 1 << objBits (undefined::Structures_H.notification)" + "capUntypedSize (EndpointCap r badge a b c d) = 1 << objBits (undefined::endpoint)" + "capUntypedSize (Zombie r zs n) = 1 << (zBits zs)" + "capUntypedSize NullCap = 0" + "capUntypedSize DomainCap = 1" + "capUntypedSize (ArchObjectCap x) = Arch.capUntypedSize x" + "capUntypedSize (UntypedCap d r n f) = 1 << n" + "capUntypedSize (CNodeCap r n g n2) = 1 << (objBits (undefined::cte) + n)" + "capUntypedSize (ReplyCap r m a) = 1 << objBits (undefined :: tcb)" + "capUntypedSize IRQControlCap = 1" + "capUntypedSize (IRQHandlerCap irq) = 1" + by (auto simp add: capUntypedSize_def isCap_simps objBits_simps' + split: zombie_type.splits) + +lemma sameRegionAs_classes: + "sameRegionAs cap cap' \ capClass cap = capClass cap'" + apply (erule sameRegionAsE) + apply (rule master_eqI, rule capClass_Master) + apply simp + apply (simp add: capRange_def split: if_split_asm) + apply (clarsimp simp: isCap_simps)+ + done + +lemma capAligned_capUntypedPtr: + "\ capAligned cap; capClass cap = PhysicalClass \ \ + capUntypedPtr cap \ capRange cap" + by (simp add: capRange_def capAligned_def is_aligned_no_overflow) + +lemma sameRegionAs_capRange_Int: + "\ sameRegionAs cap cap'; capClass cap = PhysicalClass \ capClass cap' = PhysicalClass; + capAligned cap; capAligned cap' \ + \ capRange cap' \ capRange cap \ {}" + apply (frule sameRegionAs_classes, simp) + apply (drule(1) capAligned_capUntypedPtr)+ + apply (erule sameRegionAsE) + apply (subgoal_tac "capRange (capMasterCap cap') \ capRange (capMasterCap cap) \ {}") + apply (simp(no_asm_use) add: capRange_Master) + apply (clarsimp simp: capRange_Master) + apply blast + apply blast + apply (clarsimp simp: isCap_simps) + done + +lemma sameRegionAs_trans: + "\ sameRegionAs a b; sameRegionAs b c \ \ sameRegionAs a c" + apply (simp add: sameRegionAs_def2, elim conjE disjE, simp_all) + by (auto simp: isCap_simps capRange_def) (* long *) + +lemma capMasterCap_maskCapRights[simp]: + "capMasterCap (maskCapRights msk cap) + = capMasterCap cap" + apply (cases cap; + simp add: maskCapRights_def Let_def isCap_simps capMasterCap_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: AARCH64_H.maskCapRights_def Let_def isCap_simps) + done + +lemma capBadge_maskCapRights[simp]: + "capBadge (maskCapRights msk cap) = capBadge cap" + apply (cases cap; + simp add: maskCapRights_def Let_def isCap_simps capBadge_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: AARCH64_H.maskCapRights_def Let_def isCap_simps) + done + +lemma getObject_cte_det: + "(r::cte,s') \ fst (getObject p s) \ fst (getObject p s) = {(r,s)} \ s' = s" + apply (clarsimp simp add: getObject_def bind_def get_def gets_def + return_def loadObject_cte split_def) + apply (clarsimp split: kernel_object.split_asm if_split_asm option.split_asm + simp: in_monad typeError_def alignError_def magnitudeCheck_def) + apply (simp_all add: bind_def return_def assert_opt_def split_def + alignCheck_def is_aligned_mask[symmetric] + unless_def when_def magnitudeCheck_def) + done + +lemma cte_wp_at_obj_cases': + "cte_wp_at' P p s = + (obj_at' P p s \ (\n \ dom tcb_cte_cases. obj_at' (P \ (fst (the (tcb_cte_cases n)))) (p - n) s))" + apply (simp add: cte_wp_at_cases' obj_at'_def) + apply (rule iffI) + apply (erule disjEI + | clarsimp simp: objBits_simps' cte_level_bits_def + | rule rev_bexI, erule domI)+ + apply fastforce + done + +lemma cte_wp_at_valid_objs_valid_cap': + "\ cte_wp_at' P p s; valid_objs' s \ \ \cte. P cte \ s \' (cteCap cte)" + apply (simp add: cte_wp_at_obj_cases') + apply (elim disjE bexE conjE) + apply (drule(1) obj_at_valid_objs') + apply (clarsimp simp: valid_obj'_def valid_cte'_def) + apply (drule(1) obj_at_valid_objs') + apply (clarsimp simp: valid_obj'_def valid_cte'_def valid_tcb'_def) + apply (fastforce dest: bspec [OF _ ranI]) + done + +lemma getCTE_valid_cap: + "\valid_objs'\ getCTE t \\cte s. s \' (cteCap cte) \ cte_at' t s\" + apply (clarsimp simp add: getCTE_def valid_def) + apply (frule in_inv_by_hoareD [OF getObject_cte_inv], clarsimp) + apply (subst conj_commute) + apply (subgoal_tac "cte_wp_at' ((=) a) t s") + apply (rule conjI) + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (drule(1) cte_wp_at_valid_objs_valid_cap') + apply clarsimp + apply (drule getObject_cte_det) + apply (simp add: cte_wp_at'_def) + done + +lemmas getCTE_valid_cap' [wp] = + getCTE_valid_cap [THEN hoare_conjD1 [unfolded pred_conj_def]] + +lemma ctes_of_valid_cap': + "\ ctes_of s p = Some (CTE c n); valid_objs' s\ \ s \' c" + apply (rule cte_wp_at_valid_objs_valid_cap'[where P="(=) (CTE c n)", simplified]) + apply (simp add: cte_wp_at_ctes_of) + apply assumption + done + +lemma valid_capAligned: + "valid_cap' c s \ capAligned c" + by (simp add: valid_cap'_def) + +lemma caps_no_overlap'_no_region: + "\ caps_no_overlap' m (capRange cap); valid_objs' s; + m = ctes_of s; s \' cap; fresh_virt_cap_class (capClass cap) m \ \ + \c n p. m p = Some (CTE c n) \ + \ sameRegionAs c cap \ \ sameRegionAs cap c" + apply (clarsimp simp add: caps_no_overlap'_def) + apply (erule allE)+ + apply (erule impE, erule exI) + apply (frule (1) ctes_of_valid_cap') + apply (drule valid_capAligned)+ + apply (case_tac "capClass cap = PhysicalClass") + apply (auto dest: sameRegionAs_capRange_Int)[1] + apply (drule(1) fresh_virt_cap_classD) + apply (auto dest: sameRegionAs_classes) + done + +definition + "initMDBNode \ MDB nullPointer nullPointer True True" + +lemma init_next [simp]: + "mdbNext initMDBNode = 0" + by (simp add: initMDBNode_def nullPointer_def) + +lemma init_prev [simp]: + "mdbPrev initMDBNode = 0" + by (simp add: initMDBNode_def nullPointer_def) + +lemma mdb_chunked_init: + assumes x: "m x = Some cte" + assumes no_m: "no_mdb cte" + assumes no_c: "caps_no_overlap' m (capRange cap)" + assumes no_v: "fresh_virt_cap_class (capClass cap) m" + assumes no_0: "no_0 m" + assumes dlist: "valid_dlist m" + assumes chain: "mdb_chain_0 m" + assumes chunked: "mdb_chunked m" + assumes valid: "valid_objs' s" "m = ctes_of s" "s \' cap" + shows "mdb_chunked (m(x \ CTE cap initMDBNode))" + unfolding mdb_chunked_def +proof clarify + fix p p' c c' n n' + define m' where "m' \ m (x \ CTE cap initMDBNode)" + assume p: "m' p = Some (CTE c n)" + assume p': "m' p' = Some (CTE c' n')" + assume r: "sameRegionAs c c'" + assume neq: "p \ p'" + + note no_region = caps_no_overlap'_no_region [OF no_c valid no_v] + + from chain x no_0 + have chain': "mdb_chain_0 m'" + unfolding m'_def + apply - + apply (rule mdb_chain_0_update, clarsimp) + apply clarsimp + apply (drule rtranclD) + apply (erule disjE, clarsimp) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply assumption + apply assumption + done + moreover + from x no_0 + have x0 [simp]: "x \ 0" by clarsimp + with no_0 + have "no_0 m'" + unfolding m'_def + by (rule no_0_update) + ultimately + have nl: "no_loops m'" by (rule mdb_chain_0_no_loops) + + from p p' r neq no_region + have px: "p \ x" + by (clarsimp simp: m'_def) blast + moreover + from p p' r neq no_region + have p'x: "p' \ x" + by (clarsimp simp: m'_def) blast + ultimately + have m: + "(m \ p \\<^sup>+ p' \ m \ p' \\<^sup>+ p) \ + (m \ p \\<^sup>+ p' \ is_chunk m c p p') \ + (m \ p' \\<^sup>+ p \ is_chunk m c' p' p)" + using chunked p p' neq r + unfolding mdb_chunked_def m'_def + by simp + + from x no_m px [symmetric] dlist no_0 + have npx: "\ m \ p \\<^sup>* x" by (rule no_mdb_rtrancl) + + from x no_m p'x [symmetric] dlist no_0 + have np'x: "\ m \ p' \\<^sup>* x" by (rule no_mdb_rtrancl) + + show "(m' \ p \\<^sup>+ p' \ m' \ p' \\<^sup>+ p) \ + (m' \ p \\<^sup>+ p' \ is_chunk m' c p p') \ + (m' \ p' \\<^sup>+ p \ is_chunk m' c' p' p)" + proof (cases "m \ p \\<^sup>+ p'") + case True + with m + have ch: "is_chunk m c p p'" by simp + + from True npx + have "m' \ p \\<^sup>+ p'" + unfolding m'_def + by (rule mdb_trancl_other_update) + moreover + with nl + have "\ m' \ p' \\<^sup>+ p" + apply clarsimp + apply (drule (1) trancl_trans) + apply (simp add: no_loops_def) + done + moreover + have "is_chunk m' c p p'" + unfolding is_chunk_def + proof clarify + fix p'' + assume "m' \ p \\<^sup>+ p''" + with npx + have "m \ p \\<^sup>+ p''" + unfolding m'_def + by - (rule mdb_trancl_update_other) + moreover + then + have p''x: "p'' \ x" + using dlist x no_m no_0 + apply clarsimp + apply (drule tranclD2) + apply clarsimp + apply (frule vdlist_nextD0, simp, assumption) + apply (clarsimp simp: mdb_prev_def mdb_next_unfold no_mdb_def) + done + moreover + assume "m' \ p'' \\<^sup>* p'" + { + moreover + from x no_m p''x [symmetric] dlist no_0 + have "\m \ p'' \\<^sup>* x" by (rule no_mdb_rtrancl) + ultimately + have "m \ p'' \\<^sup>* p'" + unfolding m'_def + by (rule mdb_rtrancl_update_other) + } + ultimately + have "\cap'' n''. + m p'' = Some (CTE cap'' n'') \ sameRegionAs c cap''" + using ch + by (simp add: is_chunk_def) + with p''x + show "\cap'' n''. + m' p'' = Some (CTE cap'' n'') \ sameRegionAs c cap''" + by (simp add: m'_def) + qed + ultimately + show ?thesis by simp + next + case False + with m + have p'p: "m \ p' \\<^sup>+ p" by simp + with m + have ch: "is_chunk m c' p' p" by simp + from p'p np'x + have "m' \ p' \\<^sup>+ p" + unfolding m'_def + by (rule mdb_trancl_other_update) + moreover + with nl + have "\ m' \ p \\<^sup>+ p'" + apply clarsimp + apply (drule (1) trancl_trans) + apply (simp add: no_loops_def) + done + moreover + have "is_chunk m' c' p' p" + unfolding is_chunk_def + proof clarify + fix p'' + assume "m' \ p' \\<^sup>+ p''" + with np'x + have "m \ p' \\<^sup>+ p''" + unfolding m'_def + by - (rule mdb_trancl_update_other) + moreover + then + have p''x: "p'' \ x" + using dlist x no_m no_0 + apply clarsimp + apply (drule tranclD2) + apply clarsimp + apply (frule vdlist_nextD0, simp, assumption) + apply (clarsimp simp: mdb_prev_def mdb_next_unfold no_mdb_def) + done + moreover + assume "m' \ p'' \\<^sup>* p" + { + moreover + from x no_m p''x [symmetric] dlist no_0 + have "\m \ p'' \\<^sup>* x" by (rule no_mdb_rtrancl) + ultimately + have "m \ p'' \\<^sup>* p" + unfolding m'_def + by (rule mdb_rtrancl_update_other) + } + ultimately + have "\cap'' n''. + m p'' = Some (CTE cap'' n'') \ sameRegionAs c' cap''" + using ch + by (simp add: is_chunk_def) + with p''x + show "\cap'' n''. + m' p'' = Some (CTE cap'' n'') \ sameRegionAs c' cap''" + by (simp add: m'_def) + qed + ultimately + show ?thesis by simp + qed +qed + +lemma cte_refs_capRange: + "\ s \' c; \irq. c \ IRQHandlerCap irq \ \ cte_refs' c x \ capRange c" + apply (cases c; simp add: capRange_def isCap_simps) + apply (clarsimp dest!: valid_capAligned + simp: capAligned_def objBits_simps field_simps) + apply (frule tcb_cte_cases_small) + apply (intro conjI) + apply (erule(1) is_aligned_no_wrap') + apply (rule word_plus_mono_right[where z="2^tcbBlockSizeBits - 1", simplified field_simps]) + apply (drule word_le_minus_one_leq, simp) + apply (erule is_aligned_no_wrap'[where off="2^tcbBlockSizeBits - 1", simplified field_simps]) + apply (drule word_le_minus_one_leq) + apply simp + defer + \ \CNodeCap\ + apply (clarsimp simp: objBits_simps capAligned_def dest!: valid_capAligned) + apply (rename_tac word1 nat1 word2 nat2 x) + apply (subgoal_tac "x * 2^cteSizeBits < 2 ^ (cteSizeBits + nat1)") + apply (intro conjI) + apply (simp add: shiftl_t2n mult_ac) + apply (erule(1) is_aligned_no_wrap') + apply (simp add: add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply simp + apply (simp add: shiftl_t2n mult_ac) + apply (erule is_aligned_no_wrap') + apply simp + apply (simp add: power_add field_simps mask_def) + apply (erule word_mult_less_mono1) + apply (simp add: objBits_defs) + apply (frule power_strict_increasing [where a="2 :: nat" and n="y + z" for y z]) + apply simp + apply (simp only: power_add) + apply (simp add: word_bits_def) + \ \Zombie\ + apply (rename_tac word zombie_type nat) + apply (clarsimp simp: capAligned_def valid_cap'_def objBits_simps) + apply (subgoal_tac "xa * 2^cteSizeBits < 2 ^ zBits zombie_type") + apply (intro conjI) + apply (simp add: shiftl_t2n mult_ac) + apply (erule(1) is_aligned_no_wrap') + apply (simp add: add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult_ac) + apply (erule is_aligned_no_wrap') + apply simp + apply (case_tac zombie_type) + apply simp + apply (rule div_lt_mult) + apply (simp add: objBits_defs) + apply (erule order_less_le_trans) + apply (simp add: word_le_nat_alt) + apply (subst le_unat_uoi[where z=5]) + apply simp + apply simp + apply (simp add: objBits_defs) + apply (simp add: objBits_simps' power_add mult.commute) + apply (rule word_mult_less_mono1) + apply (erule order_less_le_trans) + apply (simp add: word_le_nat_alt) + apply (subst le_unat_uoi) + apply (subst unat_power_lower) + prefer 2 + apply assumption + apply (simp add: word_bits_def) + apply (simp add: word_bits_def) + apply simp + apply (frule power_strict_increasing [where a="2 :: nat" and n="y + z" for y z]) + apply simp + apply (simp only: power_add) + apply (simp add: word_bits_def) + done + +lemma untypedCapRange: + "isUntypedCap cap \ capRange cap = untypedRange cap" + by (clarsimp simp: isCap_simps) + +lemma no_direct_loop [simp]: + "no_loops m \ m (mdbNext node) \ Some (CTE cap node)" + by (fastforce simp: mdb_next_rel_def mdb_next_def no_loops_def) + +lemma no_loops_direct_simp: + "no_loops m \ m \ x \ x = False" + by (auto simp add: no_loops_def) + +lemma no_loops_trancl_simp: + "no_loops m \ m \ x \\<^sup>+ x = False" + by (auto simp add: no_loops_def) + +lemma subtree_mdb_next: + "m \ a \ b \ m \ a \\<^sup>+ b" + by (erule subtree.induct) (auto simp: mdb_next_rel_def intro: trancl_into_trancl) +end + +context mdb_order +begin + +lemma no_loops: "no_loops m" + using chain no_0 by (rule mdb_chain_0_no_loops) + +lemma irrefl_direct_simp [iff]: + "m \ x \ x = False" + using no_loops by (rule no_loops_direct_simp) + +lemma irrefl_trancl_simp [iff]: + "m \ x \\<^sup>+ x = False" + using no_loops by (rule no_loops_trancl_simp) + +lemma irrefl_subtree [iff]: + "m \ x \ x = False" + by (clarsimp dest!: subtree_mdb_next) + +end (* of context mdb_order *) + +lemma no_loops_prev_next_0: + fixes m :: cte_heap + assumes src: "m src = Some (CTE src_cap src_node)" + assumes no_loops: "no_loops m" + assumes dlist: "valid_dlist m" + shows "(mdbPrev src_node = mdbNext src_node) = + (mdbPrev src_node = 0 \ mdbNext src_node = 0)" +proof - + { assume "mdbPrev src_node = mdbNext src_node" + moreover + assume "mdbNext src_node \ 0" + ultimately + obtain cte where + "m (mdbNext src_node) = Some cte" + "mdbNext (cteMDBNode cte) = src" + using src dlist + by (fastforce simp add: valid_dlist_def Let_def) + hence "m \ src \\<^sup>+ src" using src + apply - + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: next_unfold') + apply (rule r_into_trancl) + apply (simp add: next_unfold') + done + with no_loops + have False by (simp add: no_loops_def) + } + thus ?thesis by auto blast +qed + +lemma no_loops_next_prev_0: + fixes m :: cte_heap + assumes "m src = Some (CTE src_cap src_node)" + assumes "no_loops m" + assumes "valid_dlist m" + shows "(mdbNext src_node = mdbPrev src_node) = + (mdbPrev src_node = 0 \ mdbNext src_node = 0)" + apply (rule iffI) + apply (drule sym) + apply (simp add: no_loops_prev_next_0 [OF assms]) + apply clarsimp + done + +locale vmdb = mdb_next + + assumes valid: "valid_mdb_ctes m" + +sublocale vmdb < mdb_order + using valid + by (auto simp: greater_def greater_eq_def mdb_order_def valid_mdb_ctes_def) + +context vmdb +begin + +declare no_0 [intro!] +declare no_loops [intro!] + +lemma dlist [intro!]: "valid_dlist m" + using valid by (simp add: valid_mdb_ctes_def) + +lemmas m_0_simps [iff] = no_0_simps [OF no_0] + +lemma prev_next_0_p: + assumes "m p = Some (CTE cap node)" + shows "(mdbPrev node = mdbNext node) = + (mdbPrev node = 0 \ mdbNext node = 0)" + using assms by (rule no_loops_prev_next_0) auto + +lemma next_prev_0_p: + assumes "m p = Some (CTE cap node)" + shows "(mdbNext node = mdbPrev node) = + (mdbPrev node = 0 \ mdbNext node = 0)" + using assms by (rule no_loops_next_prev_0) auto + +lemmas dlistEn = valid_dlistEn [OF dlist] +lemmas dlistEp = valid_dlistEp [OF dlist] + +lemmas dlist_prevD = vdlist_prevD [OF _ _ dlist no_0] +lemmas dlist_nextD = vdlist_nextD [OF _ _ dlist no_0] +lemmas dlist_prevD0 = vdlist_prevD0 [OF _ _ dlist] +lemmas dlist_nextD0 = vdlist_nextD0 [OF _ _ dlist] +lemmas dlist_prev_src_unique = vdlist_prev_src_unique [OF _ _ _ dlist] +lemmas dlist_next_src_unique = vdlist_next_src_unique [OF _ _ _ dlist] + +lemma subtree_not_0 [simp]: + "\m \ p \ 0" + apply clarsimp + apply (erule subtree.cases) + apply auto + done + +lemma not_0_subtree [simp]: + "\m \ 0 \ p" + apply clarsimp + apply (erule subtree.induct) + apply (auto simp: mdb_next_unfold) + done + +lemma not_0_next [simp]: + "\ m \ 0 \ p" + by (clarsimp simp: mdb_next_unfold) + +lemma not_0_trancl [simp]: + "\ m \ 0 \\<^sup>+ p" + by (clarsimp dest!: tranclD) + +lemma rtrancl0 [simp]: + "m \ 0 \\<^sup>* p = (p = 0)" + by (auto dest!: rtranclD) + +lemma valid_badges: "valid_badges m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma nullcaps: "valid_nullcaps m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma + caps_contained: "caps_contained' m" and + chunked: "mdb_chunked m" and + untyped_mdb: "untyped_mdb' m" and + untyped_inc: "untyped_inc' m" and + class_links: "class_links m" and + irq_control: "irq_control m" + using valid by (simp add: valid_mdb_ctes_def)+ + +end (* of context vmdb *) + +lemma no_self_loop_next: + assumes vmdb: "valid_mdb_ctes m" + and lup: "m ptr = Some cte" + shows "mdbNext (cteMDBNode cte) \ ptr" +proof - + from vmdb have "no_loops m" .. + thus ?thesis + by (rule no_self_loop_next_noloop) fact+ +qed + +lemma no_self_loop_prev: + assumes vmdb: "valid_mdb_ctes m" + and lup: "m ptr = Some cte" + shows "mdbPrev (cteMDBNode cte) \ ptr" +proof + assume prev: "mdbPrev (cteMDBNode cte) = ptr" + + from vmdb have "no_0 m" .. + with lup have "ptr \ 0" + by (rule no_0_neq) + + moreover have "mdbNext (cteMDBNode cte) \ ptr" + by (rule no_self_loop_next) fact+ + + moreover from vmdb have "valid_dlist m" .. + + ultimately show False using lup prev + by - (erule (1) valid_dlistEp, simp_all) +qed + + +locale mdb_ptr = vmdb + + fixes p cap node + assumes m_p [intro!]: "m p = Some (CTE cap node)" +begin + +lemma p_not_next [simp]: + "(p = mdbNext node) = False" + using valid m_p by (fastforce dest: no_self_loop_next) + +lemma p_not_prev [simp]: + "(p = mdbPrev node) = False" + using valid m_p by (fastforce dest: no_self_loop_prev) + +lemmas next_not_p [simp] = p_not_next [THEN x_sym] +lemmas prev_not_p [simp] = p_not_prev [THEN x_sym] + +lemmas prev_next_0 [simp] = prev_next_0_p [OF m_p] next_prev_0_p [OF m_p] + +lemma p_0 [simp]: "p \ 0" using m_p by clarsimp + +lemma p_nextD: + assumes p': "m p' = Some (CTE cap' node')" + assumes eq: "mdbNext node = mdbNext node'" + shows "p = p' \ mdbNext node = 0 \ mdbNext node' = 0" +proof (cases "mdbNext node = 0") + case True thus ?thesis using eq by simp +next + case False + with eq have n': "mdbNext node' \ 0" by simp + + have "p = p'" + apply (rule dlistEn [OF m_p, simplified, OF False]) + apply (simp add: eq) + apply (rule dlistEn [OF p', simplified, OF n']) + apply clarsimp + done + + thus ?thesis by blast +qed + +lemma p_next_eq: + assumes "m p' = Some (CTE cap' node')" + shows "(mdbNext node = mdbNext node') = + (p = p' \ mdbNext node = 0 \ mdbNext node' = 0)" + using assms m_p + apply - + apply (rule iffI) + apply (erule (1) p_nextD) + apply auto + done + +lemma p_prevD: + assumes p': "m p' = Some (CTE cap' node')" + assumes eq: "mdbPrev node = mdbPrev node'" + shows "p = p' \ mdbPrev node = 0 \ mdbPrev node' = 0" +proof (cases "mdbPrev node = 0") + case True thus ?thesis using eq by simp +next + case False + with eq have n': "mdbPrev node' \ 0" by simp + + have "p = p'" + apply (rule dlistEp [OF m_p, simplified, OF False]) + apply (simp add: eq) + apply (rule dlistEp [OF p', simplified, OF n']) + apply clarsimp + done + + thus ?thesis by blast +qed + +lemma p_prev_eq: + assumes "m p' = Some (CTE cap' node')" + shows "(mdbPrev node = mdbPrev node') = + (p = p' \ mdbPrev node = 0 \ mdbPrev node' = 0)" + using assms m_p + apply - + apply (rule iffI) + apply (erule (1) p_prevD) + apply auto + done + +lemmas p_prev_qe = p_prev_eq [THEN x_sym] +lemmas p_next_qe = p_next_eq [THEN x_sym] + +lemma m_p_prev [intro!]: + "m \ mdbPrev node \ p" + using m_p by (clarsimp simp: mdb_prev_def) + +lemma m_p_next [intro!]: + "m \ p \ mdbNext node" + using m_p by (clarsimp simp: mdb_next_unfold) + +lemma next_p_prev: + "mdbNext node \ 0 \ m \ p \ mdbNext node" + by (rule dlist_nextD0 [OF m_p_next]) + +lemma prev_p_next: + "mdbPrev node \ 0 \ m \ mdbPrev node \ p" + by (rule dlist_prevD0 [OF m_p_prev]) + +lemma p_next: + "(m \ p \ p') = (p' = mdbNext node)" + using m_p by (auto simp: mdb_next_unfold) + +end (* of locale mdb_ptr *) + +lemma no_mdb_not_target: + "\ m \ c \ c'; m p = Some cte; no_mdb cte; valid_dlist m; no_0 m \ + \ c' \ p" + apply clarsimp + apply (subgoal_tac "c \ 0") + prefer 2 + apply (clarsimp simp: mdb_next_unfold) + apply (drule (3) vdlist_nextD) + apply (clarsimp simp: mdb_prev_def) + apply (simp add: no_mdb_def) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma valid_dlist_init: + "\ valid_dlist m; m p = Some cte; no_mdb cte \ \ + valid_dlist (m (p \ CTE cap initMDBNode))" + apply (simp add: initMDBNode_def Let_def nullPointer_def) + apply (clarsimp simp: no_mdb_def valid_dlist_def Let_def) + apply fastforce + done +end + +lemma (in mdb_ptr) descendants_of_init': + assumes n: "no_mdb (CTE cap node)" + shows + "descendants_of' p' (m(p \ CTE c initMDBNode)) = + descendants_of' p' m" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (rule iffI) + apply (erule subtree.induct) + apply (frule no_mdb_not_target [where p=p]) + apply simp + apply (simp add: no_mdb_def) + apply (rule valid_dlist_init[OF dlist, OF m_p n]) + apply (insert no_0)[1] + apply (clarsimp simp: no_0_def) + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def split: if_split_asm) + apply (frule no_mdb_not_target [where p=p]) + apply simp + apply (simp add: no_mdb_def) + apply (rule valid_dlist_init[OF dlist, OF m_p n]) + apply (insert no_0)[1] + apply (clarsimp simp: no_0_def) + apply (subgoal_tac "p' \ p") + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def m_p split: if_split_asm) + apply clarsimp + apply (drule subtree_mdb_next)+ + apply (drule tranclD)+ + apply clarsimp + apply (insert n)[1] + apply (clarsimp simp: mdb_next_unfold m_p no_mdb_def) + apply (erule subtree.induct) + apply (frule no_mdb_not_target [where p=p], rule m_p, rule n) + apply (rule dlist) + apply (rule no_0) + apply (subgoal_tac "p'\p") + prefer 2 + apply (insert n)[1] + apply (clarsimp simp: mdb_next_unfold m_p no_mdb_def) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def) + apply (frule no_mdb_not_target [where p=p], rule m_p, rule n) + apply (rule dlist) + apply (rule no_0) + apply (subgoal_tac "c'\p") + prefer 2 + apply (insert n)[1] + apply (clarsimp simp: mdb_next_unfold m_p no_mdb_def) + apply (subgoal_tac "p'\p") + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def) + apply clarsimp + apply (drule subtree_mdb_next) + apply (drule tranclD) + apply clarsimp + apply (insert n) + apply (clarsimp simp: mdb_next_unfold no_mdb_def m_p) + done + +lemma untyped_mdb_init: + "\ valid_mdb_ctes m; m p = Some cte; no_mdb cte; + caps_no_overlap' m (capRange cap); untyped_mdb' m; + valid_objs' s; s \' cap; + m = ctes_of s\ + \ untyped_mdb' (m(p \ CTE cap initMDBNode))" + apply (clarsimp simp add: untyped_mdb'_def) + apply (rule conjI) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=p' in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned)+ + apply (drule untypedCapRange)+ + apply simp + apply (cases cte) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (subgoal_tac "mdb_ptr (ctes_of s) p capability mdbnode") + prefer 2 + apply (simp add: vmdb_def mdb_ptr_def mdb_ptr_axioms_def) + apply (clarsimp simp: mdb_ptr.descendants_of_init') + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=pa in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned untypedCapRange)+ + apply simp + apply blast + done + +lemma aligned_untypedRange_non_empty: + "\capAligned c; isUntypedCap c\ \ untypedRange c \ {}" + apply (frule untypedCapRange) + apply (drule capAligned_capUntypedPtr) + apply (clarsimp simp: isCap_simps) + apply blast + done + +lemma untypedRange_not_emptyD: "untypedRange c' \ {} \ isUntypedCap c'" + by (case_tac c'; simp add: isCap_simps) + +lemma usableRange_subseteq: + "\capAligned c';isUntypedCap c'\ \ usableUntypedRange c' \ untypedRange c'" + apply (clarsimp simp:isCap_simps capAligned_def mask_def add_diff_eq split:if_splits) + apply (erule order_trans[OF is_aligned_no_wrap']) + apply (erule of_nat_power) + apply (simp add:word_bits_def)+ + done + +lemma untypedRange_in_capRange: "untypedRange x \ capRange x" + by (case_tac x; simp add: capRange_def) + +lemma untyped_inc_init: + "\ valid_mdb_ctes m; m p = Some cte; no_mdb cte; + caps_no_overlap' m (capRange cap); + valid_objs' s; s \' cap; + m = ctes_of s\ + \ untyped_inc' (m(p \ CTE cap initMDBNode))" + apply (clarsimp simp add: valid_mdb_ctes_def untyped_inc'_def) + apply (intro conjI impI) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=p' in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned)+ + apply (frule usableRange_subseteq[OF _ untypedRange_not_emptyD]) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (frule_tac c' = c' in usableRange_subseteq) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (drule(1) aligned_untypedRange_non_empty)+ + apply (thin_tac "All P" for P) + apply (subgoal_tac "untypedRange cap \ untypedRange c' = {}") + apply (intro conjI) + apply simp + apply (drule(2) set_inter_not_emptyD2) + apply fastforce + apply (drule(2) set_inter_not_emptyD1) + apply fastforce + apply (drule(2) set_inter_not_emptyD3) + apply simp+ + apply (rule disjoint_subset2[OF _ disjoint_subset]) + apply (rule untypedRange_in_capRange)+ + apply (simp add:Int_ac) + apply clarsimp + apply (cases cte) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (subgoal_tac "mdb_ptr (ctes_of s) p capability mdbnode") + prefer 2 + apply (simp add: vmdb_def mdb_ptr_def mdb_ptr_axioms_def valid_mdb_ctes_def untyped_inc'_def) + apply (clarsimp simp: mdb_ptr.descendants_of_init') + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=pa in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned)+ + apply (frule usableRange_subseteq[OF _ untypedRange_not_emptyD]) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (frule_tac c' = c in usableRange_subseteq) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (drule (1) aligned_untypedRange_non_empty)+ + apply (drule untypedCapRange)+ + apply (thin_tac "All P" for P) + apply (subgoal_tac "untypedRange cap \ untypedRange c = {}") + apply (intro conjI) + apply simp + apply (drule(2) set_inter_not_emptyD1) + apply fastforce + apply (drule(2) set_inter_not_emptyD2) + apply fastforce + apply (drule(2) set_inter_not_emptyD3) + apply simp+ + apply (rule disjoint_subset2[OF _ disjoint_subset]) + apply (rule untypedRange_in_capRange)+ + apply (simp add:Int_ac) + done +context begin interpretation Arch . (*FIXME: arch_split*) +lemma valid_nullcaps_init: + "\ valid_nullcaps m; cap \ NullCap \ \ valid_nullcaps (m(p \ CTE cap initMDBNode))" + by (simp add: valid_nullcaps_def initMDBNode_def nullPointer_def) +end + +lemma class_links_init: + "\ class_links m; no_0 m; m p = Some cte; + no_mdb cte; valid_dlist m \ + \ class_links (m(p \ CTE cap initMDBNode))" + apply (simp add: class_links_def split del: if_split) + apply (erule allEI, erule allEI) + apply simp + apply (intro conjI impI) + apply clarsimp + apply (drule no_mdb_not_target[where p=p], simp) + apply (simp add: no_mdb_def) + apply (erule(2) valid_dlist_init) + apply (clarsimp simp add: no_0_def) + apply simp + apply (clarsimp simp: mdb_next_unfold) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma distinct_zombies_copyE: + "\ distinct_zombies m; m x = Some cte; + capClass (cteCap cte') = PhysicalClass + \ isZombie (cteCap cte) = isZombie (cteCap cte'); + \ capClass (cteCap cte') = PhysicalClass; isUntypedCap (cteCap cte) \ + \ isUntypedCap (cteCap cte'); + \ capClass (cteCap cte') = PhysicalClass; isArchFrameCap (cteCap cte) \ + \ isArchFrameCap (cteCap cte'); + isZombie (cteCap cte') \ x = y; + capClass (cteCap cte') = PhysicalClass \ + capBits (cteCap cte') = capBits (cteCap cte); + capClass (cteCap cte') = PhysicalClass \ capClass (cteCap cte) = PhysicalClass; + capClass (cteCap cte') = PhysicalClass \ + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte) \ + \ distinct_zombies (m (y \ cte'))" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def) + apply clarsimp + apply (intro allI conjI impI) + apply clarsimp + apply (drule_tac x=y in spec) + apply (drule_tac x=ptr' in spec) + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (drule_tac x=ptr in spec) + apply (drule_tac x=x in spec) + apply clarsimp + apply auto[1] + apply clarsimp + apply (drule_tac x=ptr in spec) + apply (drule_tac x=ptr' in spec) + apply auto[1] + done + +lemmas distinct_zombies_sameE + = distinct_zombies_copyE [where y=x and x=x for x, simplified, + OF _ _ _ _ _] +context begin interpretation Arch . (*FIXME: arch_split*) +lemma capBits_Master: + "capBits (capMasterCap cap) = capBits cap" + by (clarsimp simp: capMasterCap_def split: capability.split arch_capability.split) + +lemma capUntyped_Master: + "capUntypedPtr (capMasterCap cap) = capUntypedPtr cap" + by (clarsimp simp: capMasterCap_def AARCH64_H.capUntypedPtr_def split: capability.split arch_capability.split) + +lemma distinct_zombies_copyMasterE: + "\ distinct_zombies m; m x = Some cte; + capClass (cteCap cte') = PhysicalClass + \ capMasterCap (cteCap cte) = capMasterCap (cteCap cte'); + isZombie (cteCap cte') \ x = y \ + \ distinct_zombies (m (y \ cte'))" + apply (erule(1) distinct_zombies_copyE, simp_all) + apply (rule master_eqI, rule isCap_Master, simp) + apply (drule_tac f=isUntypedCap in arg_cong) + apply (simp add: isCap_Master) + apply (drule_tac f=isArchFrameCap in arg_cong) + apply (simp add: isCap_Master) + apply (rule master_eqI, rule capBits_Master, simp) + apply clarsimp + apply (drule_tac f=capClass in arg_cong, simp add: capClass_Master) + apply (drule_tac f=capUntypedPtr in arg_cong, simp add: capUntyped_Master) + done + +lemmas distinct_zombies_sameMasterE + = distinct_zombies_copyMasterE[where x=x and y=x for x, simplified, + OF _ _ _] + +lemma isZombie_capClass: "isZombie cap \ capClass cap = PhysicalClass" + by (clarsimp simp: isCap_simps) + +lemma distinct_zombies_unzombieE: + "\ distinct_zombies m; m x = Some cte; + isZombie (cteCap cte') \ isZombie (cteCap cte); + isUntypedCap (cteCap cte) \ isUntypedCap (cteCap cte'); + isArchFrameCap (cteCap cte) \ isArchFrameCap (cteCap cte'); + capClass (cteCap cte') = capClass (cteCap cte); + capBits (cteCap cte') = capBits (cteCap cte); + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte) \ + \ distinct_zombies (m(x \ cte'))" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def + split del: if_split) + apply (erule allEI, erule allEI) + apply clarsimp + done + +lemma distinct_zombies_seperateE: + "\ distinct_zombies m; + \y cte. m y = Some cte \ x \ y + \ \ isUntypedCap (cteCap cte) + \ \ isArchFrameCap (cteCap cte) + \ capClass (cteCap cte) = PhysicalClass + \ capClass (cteCap cte') = PhysicalClass + \ capUntypedPtr (cteCap cte) = capUntypedPtr (cteCap cte') + \ capBits (cteCap cte) = capBits (cteCap cte') \ False \ + \ distinct_zombies (m (x \ cte'))" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def) + apply (intro impI allI conjI) + apply (clarsimp simp: isZombie_capClass) + apply fastforce + apply clarsimp + apply (frule isZombie_capClass) + apply (subgoal_tac "\ isUntypedCap (cteCap z) \ \ isArchFrameCap (cteCap z)") + apply fastforce + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (erule notE[rotated], elim allE, erule mp) + apply auto[1] + done + +lemma distinct_zombies_init: + "\ distinct_zombies m; caps_no_overlap' m (capRange (cteCap cte)); + capAligned (cteCap cte); \x cte. m x = Some cte \ capAligned (cteCap cte) \ + \ distinct_zombies (m (p \ cte))" + apply (erule distinct_zombies_seperateE) + apply (rename_tac y cte') + apply (clarsimp simp: caps_no_overlap'_def) + apply (drule_tac x=y in spec)+ + apply (case_tac cte') + apply (rename_tac capability mdbnode) + apply clarsimp + apply (subgoal_tac "capRange capability \ capRange (cteCap cte)") + apply (clarsimp simp: capRange_def) + apply (drule(1) capAligned_capUntypedPtr)+ + apply clarsimp + done + +definition + "no_irq' m \ \p cte. m p = Some cte \ cteCap cte \ IRQControlCap" + +lemma no_irqD': + "\ m p = Some (CTE IRQControlCap n); no_irq' m \ \ False" + unfolding no_irq'_def + apply (erule allE, erule allE, erule (1) impE) + apply auto + done + +lemma irq_control_init: + assumes no_irq: "cap = IRQControlCap \ no_irq' m" + assumes ctrl: "irq_control m" + shows "irq_control (m(p \ CTE cap initMDBNode))" + using no_irq + apply (clarsimp simp: irq_control_def) + apply (rule conjI) + apply (clarsimp simp: initMDBNode_def) + apply (erule (1) no_irqD') + apply clarsimp + apply (frule irq_revocable, rule ctrl) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule (1) no_irqD') + apply clarsimp + apply (erule (1) irq_controlD, rule ctrl) + done + +lemma valid_mdb_ctes_init: + "\ valid_mdb_ctes m; m p = Some cte; no_mdb cte; + caps_no_overlap' m (capRange cap); s \' cap; + valid_objs' s; m = ctes_of s; cap \ NullCap; + fresh_virt_cap_class (capClass cap) (ctes_of s); + cap = capability.IRQControlCap \ no_irq' (ctes_of s) \ \ + valid_mdb_ctes (m (p \ CTE cap initMDBNode))" + apply (simp add: valid_mdb_ctes_def) + apply (rule conjI, rule valid_dlist_init, simp+) + apply (subgoal_tac "p \ 0") + prefer 2 + apply (erule no_0_neq, clarsimp) + apply (clarsimp simp: no_0_update) + apply (rule conjI, rule mdb_chain_0_update_0, simp+) + apply (rule conjI, rule valid_badges_0_update, simp+) + apply (rule conjI, erule (1) caps_contained_no_overlap) + apply (rule conjI, rule mdb_chunked_init, simp+) + apply (rule conjI) + apply (rule untyped_mdb_init, (simp add: valid_mdb_ctes_def)+) + apply (rule conjI) + apply (rule untyped_inc_init, (simp add: valid_mdb_ctes_def)+) + apply (rule conjI) + apply (erule(1) valid_nullcaps_init) + apply (rule conjI, simp add: ut_revocable'_def initMDBNode_def) + apply (rule conjI, erule(4) class_links_init) + apply (rule conjI) + apply (erule distinct_zombies_init, simp+) + apply (erule valid_capAligned) + apply clarsimp + apply (case_tac ctea, clarsimp) + apply (rule valid_capAligned, erule(1) ctes_of_valid_cap') + apply (rule conjI) + apply (erule (1) irq_control_init) + apply (simp add: ran_def reply_masters_rvk_fb_def) + apply (auto simp: initMDBNode_def)[1] + done + +lemma setCTE_state_refs_of'[wp]: + "\\s. P (state_refs_of' s)\ setCTE p cte \\rv s. P (state_refs_of' s)\" + unfolding setCTE_def + apply (rule setObject_state_refs_of_eq) + apply (clarsimp simp: updateObject_cte in_monad typeError_def + in_magnitude_check objBits_simps + split: kernel_object.split_asm if_split_asm) + done + +lemma setCTE_valid_mdb: + fixes cap + defines "cte \ CTE cap initMDBNode" + shows + "\\s. valid_mdb' s \ cte_wp_at' no_mdb ptr s \ + s \' cap \ valid_objs' s \ cap \ NullCap \ + caps_no_overlap' (ctes_of s) (capRange cap) \ + fresh_virt_cap_class (capClass cap) (ctes_of s) \ + (cap = capability.IRQControlCap \ no_irq' (ctes_of s))\ + setCTE ptr cte + \\r. valid_mdb'\" + apply (simp add: valid_mdb'_def setCTE_def cte_def cte_wp_at_ctes_of) + apply (wp ctes_of_setObject_cte) + apply (clarsimp simp del: fun_upd_apply) + apply (erule (8) valid_mdb_ctes_init [OF _ _ _ _ _ _ refl]) + done + +lemma setCTE_valid_objs'[wp]: + "\valid_objs' and (valid_cap' (cteCap cte)) \ + setCTE p cte \\rv. valid_objs'\" + unfolding setCTE_def + apply (rule setObject_valid_objs') + apply (clarsimp simp: prod_eq_iff lookupAround2_char1 updateObject_cte objBits_simps) + apply (clarsimp simp: prod_eq_iff lookupAround2_char1 + updateObject_cte in_monad typeError_def + valid_obj'_def valid_tcb'_def valid_cte'_def + tcb_cte_cases_def cteSizeBits_def + split: kernel_object.split_asm if_split_asm) + done + +lemma getCTE_cte_wp_at: + "\\\ getCTE p \\rv. cte_wp_at' (\c. c = rv) p\" + apply (clarsimp simp: valid_def cte_wp_at'_def getCTE_def) + apply (frule state_unchanged [OF getObject_cte_inv]) + apply simp + apply (drule getObject_cte_det, simp) + done + +lemma getCTE_sp: + "\P\ getCTE p \\rv. cte_wp_at' (\c. c = rv) p and P\" + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule getCTE_cte_wp_at) + apply (rule getCTE_inv) + apply (rule conjI, rule TrueI, assumption) + apply simp + done + +lemmas setCTE_ad[wp] = + setObject_aligned[where 'a=cte, folded setCTE_def] + setObject_distinct[where 'a=cte, folded setCTE_def] +lemmas setCTE_map_to_ctes = + ctes_of_setObject_cte[folded setCTE_def] + +lemma getCTE_ctes_wp: + "\\s. \cte. ctes_of s ptr = Some cte \ P cte s\ getCTE ptr \P\" + apply (rule hoare_strengthen_post, rule getCTE_sp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma updateMDB_valid_objs'[wp]: + "\valid_objs'\ updateMDB m p \\rv. valid_objs'\" + apply (clarsimp simp add: updateMDB_def) + apply (wp | simp)+ + done + +lemma cte_overwrite: + "cteMDBNode_update (\x. m) (cteCap_update (\x. c) v) = CTE c m" + by (cases v, simp) + +lemma setCTE_no_0_obj' [wp]: + "\no_0_obj'\ setCTE p c \\_. no_0_obj'\" + by (simp add: setCTE_def) wp + +crunches setCTE + for pspace_canonical'[wp]: pspace_canonical' + +declare mresults_fail[simp] + +end + +end (* of theory *) diff --git a/proof/refine/AARCH64/CSpace_R.thy b/proof/refine/AARCH64/CSpace_R.thy new file mode 100644 index 0000000000..484b7fc7bc --- /dev/null +++ b/proof/refine/AARCH64/CSpace_R.thy @@ -0,0 +1,6264 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + CSpace refinement +*) + +theory CSpace_R +imports CSpace1_R +begin + +lemma setCTE_pred_tcb_at': + "\pred_tcb_at' proj P t\ + setCTE c cte + \\rv. pred_tcb_at' proj P t\" + unfolding pred_tcb_at'_def setCTE_def + apply (rule setObject_cte_obj_at_tcb') + apply (simp add: tcb_to_itcb'_def)+ + done + +locale mdb_move = + mdb_ptr m _ _ src src_cap src_node + for m src src_cap src_node + + + fixes dest cap' + + fixes old_dest_node + assumes dest: "m dest = Some (CTE NullCap old_dest_node)" + assumes prev: "mdbPrev old_dest_node = 0" + assumes nxt: "mdbNext old_dest_node = 0" + + assumes parency: "weak_derived' src_cap cap'" + assumes not_null: "src_cap \ NullCap" + assumes neq: "src \ dest" + + fixes n + defines "n \ + modify_map + (modify_map + (modify_map + (modify_map + (modify_map m dest (cteCap_update (\_. cap'))) + src (cteCap_update (\_. NullCap))) + dest (cteMDBNode_update (\m. src_node))) + src (cteMDBNode_update (\m. nullMDBNode))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (\_. dest)))" + + fixes m' + defines "m' \ + modify_map n (mdbNext src_node) + (cteMDBNode_update (mdbPrev_update (\_. dest)))" +begin +interpretation Arch . (*FIXME: arch_split*) + + +lemmas src = m_p + +lemma [intro?]: + shows src_0: "src \ 0" + and dest_0: "dest \ 0" + using no_0 src dest + by (auto simp: no_0_def) + +lemma src_neq_next: + "src \ mdbNext src_node" + by simp + +lemma src_neq_prev: + "src \ mdbPrev src_node" + by simp + +lemmas src_neq_next2 = src_neq_next [symmetric] +lemmas src_neq_prev2 = src_neq_prev [symmetric] + +lemma n: + "n = modify_map (m(dest \ CTE cap' src_node, + src \ CTE capability.NullCap nullMDBNode)) + (mdbPrev src_node) + (cteMDBNode_update (mdbNext_update (\_. dest)))" + using neq src dest no_0 + by (simp add: n_def modify_map_apply) + +lemma dest_no_parent [iff]: + "m \ dest \ x = False" using dest nxt + by (auto dest: subtree_next_0) + +lemma dest_no_child [iff]: + "m \ x \ dest = False" using dest prev + by (auto dest: subtree_prev_0) + +lemma src_no_parent [iff]: + "n \ src \ x = False" + apply clarsimp + apply (erule subtree_next_0) + apply (auto simp add: n modify_map_def nullPointer_def) + done + +lemma no_0_n: "no_0 n" by (simp add: n_def no_0) +lemma no_0': "no_0 m'" by (simp add: m'_def no_0_n) + +lemma next_neq_dest [iff]: + "mdbNext src_node \ dest" + using dlist src dest prev dest_0 no_0 + by (fastforce simp add: valid_dlist_def no_0_def Let_def) + +lemma prev_neq_dest [simp]: + "mdbPrev src_node \ dest" + using dlist src dest nxt dest_0 no_0 + by (fastforce simp add: valid_dlist_def no_0_def Let_def) + +lemmas next_neq_dest2 [simp] = next_neq_dest [symmetric] +lemmas prev_neq_dest2 [simp] = prev_neq_dest [symmetric] + +lemma dlist': + "valid_dlist m'" + using src dest prev neq nxt dlist no_0 + apply (simp add: m'_def n no_0_def) + apply (simp add: valid_dlist_def Let_def) + apply clarsimp + apply (case_tac cte) + apply (rename_tac cap node) + apply (rule conjI) + apply (clarsimp simp: modify_map_def nullPointer_def split: if_split_asm) + apply (case_tac z) + apply fastforce + apply (case_tac z) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (rule conjI) + apply fastforce + apply clarsimp + apply (rule conjI, fastforce) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "mdbNext mdbnode = mdbPrev src_node") + prefer 2 + apply fastforce + apply (subgoal_tac "mdbNext mdbnode = src") + prefer 2 + apply fastforce + apply fastforce + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + apply fastforce + subgoal by fastforce + subgoal by fastforce + apply (rule conjI, clarsimp) + apply fastforce + apply (clarsimp, rule conjI, fastforce) + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + subgoal by fastforce + apply (clarsimp simp: modify_map_def nullPointer_def split: if_split_asm) + apply (case_tac z) + apply (clarsimp, rule conjI, fastforce) + apply (clarsimp, rule conjI, fastforce) + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + subgoal by fastforce + apply (case_tac z) + subgoal by fastforce + subgoal by fastforce + apply (rule conjI) + apply clarsimp + apply fastforce + apply clarsimp + apply (rule conjI, fastforce) + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + subgoal by fastforce + done + +lemma src_no_child [iff]: + "n \ x \ src = False" +proof - + from src_neq_next + have "m' src = Some (CTE capability.NullCap nullMDBNode)" + by (simp add: m'_def n modify_map_def) + hence "m' \ x \ src = False" using dlist' no_0' + by (auto elim!: subtree_prev_0 simp: nullPointer_def) + thus ?thesis by (simp add: m'_def) +qed + +lemma dest_not_parentOf_c[iff]: + "m \ dest parentOf c = False" + using dest by (simp add: parentOf_def) + +lemma dest_source [iff]: + "(m \ dest \ x) = (x = 0)" + using dest nxt by (simp add: next_unfold') + +lemma dest_no_target [iff]: + "m \ p \ dest = False" + using dlist no_0 prev dest + by (fastforce simp: valid_dlist_def Let_def no_0_def next_unfold') + +lemma parent_preserved: + "isMDBParentOf cte' (CTE cap' src_node) = + isMDBParentOf cte' (CTE src_cap src_node)" + using parency unfolding weak_derived'_def + apply (cases cte') + apply (simp add: isMDBParentOf_CTE sameRegionAs_def2) + done + +lemma children_preserved: + "isMDBParentOf (CTE cap' src_node) cte' = + isMDBParentOf (CTE src_cap src_node) cte'" + using parency unfolding weak_derived'_def + apply (cases cte') + apply (simp add: isMDBParentOf_CTE sameRegionAs_def2) + done + +lemma no_src_subtree_n_m: + assumes no_src: "\ m \ p \ src" "p \ src" "p \ dest" + assumes px: "n \ p \ x" + shows "m \ p \ x" using px +proof induct + case (direct_parent c) + thus ?case using neq no_src no_loops + apply - + apply (case_tac "c=dest") + apply (cases "m (mdbPrev src_node)") + apply (unfold n)[1] + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update) + apply (rename_tac cte') + apply clarsimp + apply (subgoal_tac "p = mdbPrev src_node") + prefer 2 + apply (simp add: n) + apply (subst (asm) modify_map_apply, simp) + apply (clarsimp simp:_mdb_next_update split: if_split_asm) + apply clarsimp + apply (simp add: n) + apply (subst (asm) modify_map_apply, simp)+ + apply (insert dest)[1] + apply (clarsimp simp add: parentOf_def mdb_next_unfold) + apply (subgoal_tac "m \ mdbPrev src_node \ src") + apply simp + apply (rule subtree.direct_parent) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0, clarsimp simp: no_0_def)[1] + apply (rule dlist) + apply (rule src_0) + apply (simp add: parentOf_def src parent_preserved) + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: mdb_next_update) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp add: parentOf_def split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp add: parentOf_def split: if_split_asm) + done +next + case (trans_parent c c') + thus ?case using neq no_src + apply - + apply (case_tac "c' = dest") + apply clarsimp + apply (subgoal_tac "c = mdbPrev src_node") + prefer 2 + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply clarsimp + apply (cases "m (mdbPrev src_node)") + apply (unfold n)[1] + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update) + apply (subgoal_tac "m \ p \ src") + apply simp + apply (rule subtree.trans_parent, assumption) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0, clarsimp simp: no_0_def)[1] + apply (rule dlist) + apply (rule src_0) + apply (clarsimp simp: n) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: parentOf_def src parent_preserved + split: if_split_asm) + apply (rule subtree.trans_parent, assumption) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: parentOf_def split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: parentOf_def split: if_split_asm) + done +qed + +lemma subtree_m_n: + assumes p_neq: "p \ dest" "p \ src" + assumes px: "m \ p \ x" + shows "if x = src then n \ p \ dest else n \ p \ x" using px +proof induct + case (direct_parent c) + thus ?case using p_neq + apply - + apply simp + apply (rule conjI) + apply clarsimp + apply (drule leadsto_is_prev) + apply (rule src) + apply (rule dlist) + apply (rule no_0) + apply (clarsimp simp: parentOf_def) + apply (rule subtree.direct_parent) + apply (simp add: n modify_map_apply mdb_next_update) + apply (rule dest_0) + apply (clarsimp simp: n modify_map_apply parentOf_def + neq [symmetric] src parent_preserved) + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: mdb_next_update) + apply (subst modify_map_apply, simp) + apply (clarsimp simp: mdb_next_update) + apply (drule prev_leadstoD) + apply (rule src) + apply (rule dlist) + apply (rule no_0) + apply simp + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: parentOf_def) + apply (subst modify_map_apply, simp) + apply (clarsimp simp add: parentOf_def) + apply fastforce + done +next + case (trans_parent c c') + thus ?case using p_neq + apply - + apply (clarsimp split: if_split_asm) + apply (erule subtree.trans_parent) + apply (clarsimp simp: next_unfold' src n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: neq [symmetric] src split: option.splits) + apply (subst modify_map_apply, simp) + apply (clarsimp simp add: neq [symmetric] src split: option.splits) + apply assumption + apply (clarsimp simp: mdb_next_unfold src n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: parentOf_def) + apply (subst modify_map_apply, simp) + apply (fastforce simp add: parentOf_def) + apply (rule conjI) + apply clarsimp + apply (cases "m c", simp add: mdb_next_unfold) + apply (drule leadsto_is_prev) + apply (rule src) + apply (rule dlist) + apply (rule no_0) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n modify_map_apply mdb_next_update) + apply (rule dest_0) + apply (clarsimp simp: n modify_map_apply parentOf_def neq [symmetric] src) + apply (rule conjI, clarsimp) + apply (clarsimp simp: parent_preserved) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: mdb_next_update) + apply (subst modify_map_apply, simp) + apply (clarsimp simp add: mdb_next_update) + apply (rule conjI, clarsimp) + apply clarsimp + apply (drule prev_leadstoD, rule src, rule dlist, rule no_0) + apply simp + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: parentOf_def) + apply (subst modify_map_apply, simp) + apply (fastforce simp add: parentOf_def) + done +qed + +lemmas neq_sym [simp] = neq [symmetric] + +lemmas src_prev_loop [simp] = + subtree_prev_loop [OF src no_loops dlist no_0] + +lemma subtree_src_dest: + "m \ src \ x \ n \ dest \ x" + apply (erule subtree.induct) + apply (clarsimp simp: mdb_next_unfold src) + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: mdb_next_update) + apply (subst modify_map_apply, simp) + apply (simp add: mdb_next_update) + apply assumption + apply (simp add: n) + apply (clarsimp simp add: modify_map_def parentOf_def src children_preserved) + apply (subgoal_tac "c'' \ src") + prefer 2 + apply (drule (3) subtree.trans_parent) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: mdb_next_update) + apply fastforce + apply (subst modify_map_apply, simp) + apply (simp add: mdb_next_update) + apply fastforce + apply assumption + apply (fastforce simp: n modify_map_def parentOf_def src children_preserved) + done + +lemma src_next [simp]: + "m \ src \ mdbNext src_node" + by (simp add: next_unfold' src) + +lemma dest_no_trancl_target [simp]: + "m \ x \\<^sup>+ dest = False" + by (clarsimp dest!: tranclD2) + +lemma m'_next: + "\m' p = Some (CTE cte' node'); m p = Some (CTE cte node)\ + \ mdbNext node' + = (if p = src then 0 + else if p = dest then mdbNext src_node + else if mdbNext node = src then dest + else mdbNext node)" + apply(simp, intro conjI impI) + apply(clarsimp simp: n m'_def modify_map_def split: if_split_asm) + apply(clarsimp simp: n m'_def modify_map_def nullPointer_def) + apply(subgoal_tac "mdbPrev src_node = p") + prefer 2 + apply(erule dlistEn) + apply(simp) + apply(case_tac "cte'a") + apply(clarsimp simp: src) + apply(clarsimp simp: n m'_def modify_map_def split: if_split_asm) + apply(clarsimp simp: dest n m'_def modify_map_def) + apply(clarsimp simp: n m'_def modify_map_def nullPointer_def) + apply(clarsimp simp: n m'_def modify_map_def split: if_split_asm) + apply(insert m_p no_0) + apply(erule_tac p=src in dlistEp) + apply(clarsimp simp: no_0_def) + apply(clarsimp) + done + + +lemma mdb_next_from_dest: + "n \ dest \\<^sup>+ x \ m \ src \\<^sup>+ x" + apply (erule trancl_induct) + apply (rule r_into_trancl) + apply (simp add: n modify_map_def next_unfold' src) + apply (cases "m (mdbPrev src_node)") + apply (simp add: n) + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply (fastforce intro: trancl_into_trancl) + apply (simp add: n) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply (subgoal_tac "m \ src \\<^sup>+ src") + apply simp + apply (erule trancl_into_trancl) + apply (rule prev_leadstoI, rule src) + apply (insert no_0)[1] + apply (clarsimp simp add: no_0_def) + apply (rule dlist) + apply (fastforce intro: trancl_into_trancl) + done + +lemma dest_loop: + "n \ dest \ dest = False" + apply clarsimp + apply (drule subtree_mdb_next) + apply (drule mdb_next_from_dest) + apply simp + done + + +lemma subtree_dest_src: + "n \ dest \ x \ m \ src \ x" + apply (erule subtree.induct) + apply (clarsimp simp: mdb_next_unfold src) + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp add: mdb_next_update next_unfold src) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp add: mdb_next_update next_unfold src) + apply assumption + apply (simp add: n) + apply (simp add: modify_map_def parentOf_def) + apply (clarsimp simp: src children_preserved) + apply (subgoal_tac "c' \ dest") + prefer 2 + apply clarsimp + apply (subgoal_tac "c'' \ dest") + prefer 2 + apply clarsimp + apply (drule (3) trans_parent) + apply (simp add: dest_loop) + apply (subgoal_tac "c' \ mdbPrev src_node") + prefer 2 + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: mdb_next_update nullPointer_def split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update nullPointer_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: n modify_map_def parentOf_def src children_preserved + split: if_split_asm) + done + +lemma subtree_n_m: + assumes p_neq: "p \ dest" "p \ src" + assumes px: "n \ p \ x" + shows "if x = dest then m \ p \ src else m \ p \ x" using px +proof induct + case (direct_parent c) + thus ?case using p_neq + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "p = mdbPrev src_node") + prefer 2 + apply (drule mdb_next_modify_prev [where x="mdbNext src_node" and f="\_. dest", THEN iffD2]) + apply (fold m'_def) + apply (drule leadsto_is_prev) + apply (fastforce simp: n m'_def modify_map_def) + apply (rule dlist') + apply (rule no_0') + apply simp + apply clarsimp + apply (rule subtree.direct_parent) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0)[1] + apply (clarsimp simp add: next_unfold' n modify_map_def no_0_def split: if_split_asm) + apply (rule dlist) + apply (rule src_0) + apply (clarsimp simp: parentOf_def n modify_map_def src parent_preserved) + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: next_unfold' mdb_next_unfold) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply assumption + apply (simp add: n) + apply (clarsimp simp add: parentOf_def modify_map_def split: if_split_asm) + done +next + case (trans_parent c c') + thus ?case using p_neq + apply - + apply (simp split: if_split_asm) + apply clarsimp + apply (subgoal_tac "c' = mdbNext src_node") + prefer 2 + apply (clarsimp simp add: mdb_next_unfold n modify_map_def) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: mdb_next_unfold src) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def n split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "c = mdbPrev src_node") + prefer 2 + apply (drule mdb_next_modify_prev [where x="mdbNext src_node" and f="\_. dest", THEN iffD2]) + apply (fold m'_def) + apply (drule leadsto_is_prev) + apply (fastforce simp: n m'_def modify_map_def) + apply (rule dlist') + apply (rule no_0') + apply simp + apply clarsimp + apply (erule subtree.trans_parent) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0)[1] + apply (clarsimp simp: next_unfold' no_0_def n modify_map_def) + apply (rule dlist) + apply (rule src_0) + apply (clarsimp simp: parentOf_def n modify_map_def src + parent_preserved split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent) + apply (clarsimp simp add: n modify_map_def mdb_next_unfold nullPointer_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: n modify_map_def parentOf_def split: if_split_asm) + done +qed + +lemma descendants: + "descendants_of' p m' = + (if p = src + then {} + else if p = dest + then descendants_of' src m + else descendants_of' p m - {src} \ + (if src \ descendants_of' p m then {dest} else {}))" + apply (rule set_eqI) + apply (simp add: descendants_of'_def m'_def) + apply (auto simp: subtree_m_n intro: subtree_src_dest subtree_dest_src no_src_subtree_n_m) + apply (auto simp: subtree_n_m) + done +end + +context mdb_move_abs +begin + +end + +context mdb_move +begin + +end + +lemma updateCap_dynamic_duo: + "\ (rv, s') \ fst (updateCap x cap s); pspace_aligned' s; pspace_distinct' s \ + \ pspace_aligned' s' \ pspace_distinct' s'" + unfolding updateCap_def + apply (rule conjI) + apply (erule use_valid | wp | assumption)+ + done + +declare const_apply[simp] + +lemma next_slot_eq2: + "\case n q of None \ next_slot p t' m' = x | Some q' \ next_slot p (t'' q') (m'' q') = x; + case n q of None \ (t' = t \ m' = m) | Some q' \ t'' q' = t \ m'' q' = m\ + \ next_slot p t m = x" + apply(simp split: option.splits) + done + +lemma set_cap_not_quite_corres': + assumes cr: + "pspace_relations (ekheap (a)) (kheap s) (ksPSpace s')" + "ekheap (s) = ekheap (a)" + "cur_thread s = ksCurThread s'" + "idle_thread s = ksIdleThread s'" + "machine_state s = ksMachineState s'" + "work_units_completed s = ksWorkUnitsCompleted s'" + "domain_index s = ksDomScheduleIdx s'" + "domain_list s = ksDomSchedule s'" + "cur_domain s = ksCurDomain s'" + "domain_time s = ksDomainTime s'" + "(x,t') \ fst (updateCap p' c' s')" + "valid_objs s" "pspace_aligned s" "pspace_distinct s" "cte_at p s" + "pspace_aligned' s'" "pspace_distinct' s'" + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s')" + "(arch_state s, ksArchState s') \ arch_state_relation" + assumes c: "cap_relation c c'" + assumes p: "p' = cte_map p" + shows "\t. ((),t) \ fst (set_cap c p s) \ + pspace_relations (ekheap t) (kheap t) (ksPSpace t') \ + cdt t = cdt s \ + cdt_list t = cdt_list (s) \ + ekheap t = ekheap (s) \ + scheduler_action t = scheduler_action (s) \ + ready_queues t = ready_queues (s) \ + is_original_cap t = is_original_cap s \ + interrupt_state_relation (interrupt_irq_node t) (interrupt_states t) + (ksInterruptState t') \ + (arch_state t, ksArchState t') \ arch_state_relation \ + cur_thread t = ksCurThread t' \ + idle_thread t = ksIdleThread t' \ + machine_state t = ksMachineState t' \ + work_units_completed t = ksWorkUnitsCompleted t' \ + domain_index t = ksDomScheduleIdx t' \ + domain_list t = ksDomSchedule t' \ + cur_domain t = ksCurDomain t' \ + domain_time t = ksDomainTime t'" + apply (rule set_cap_not_quite_corres) + using cr + apply (fastforce simp: c p pspace_relations_def)+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma cteMove_corres: + assumes cr: "cap_relation cap cap'" + notes trans_state_update'[symmetric,simp] + shows + "corres dc (einvs and + cte_at ptr and + cte_wp_at (\c. c = cap.NullCap) ptr' and + valid_cap cap and tcb_cap_valid cap ptr' and K (ptr \ ptr')) + (invs' and + cte_wp_at' (\c. weak_derived' cap' (cteCap c) \ cteCap c \ NullCap) (cte_map ptr) and + cte_wp_at' (\c. cteCap c = NullCap) (cte_map ptr')) + (cap_move cap ptr ptr') (cteMove cap' (cte_map ptr) (cte_map ptr'))" + (is "corres _ ?P ?P' _ _") + supply subst_all [simp del] + apply (simp add: cap_move_def cteMove_def const_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply (rule corres_assert_assume) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule corres_assert_assume) + prefer 2 + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (case_tac oldCTE) + apply (clarsimp simp: valid_nullcaps_def initMDBNode_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (clarsimp simp: nullPointer_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply (drule invs_mdb') + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEp, simp) + apply clarsimp + apply (erule (2) valid_dlistEn, simp) + apply (clarsimp simp: in_monad state_relation_def) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac oldCTE) + apply (rename_tac x old_dest_node) + apply (case_tac cte) + apply (rename_tac src_cap src_node) + apply clarsimp + apply (subgoal_tac "\c. caps_of_state a ptr = Some c") + prefer 2 + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply clarsimp + apply (subgoal_tac "cap_relation c src_cap") + prefer 2 + apply (drule caps_of_state_cteD) + apply (drule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply fastforce + apply (drule (1) pspace_relationsD) + apply (drule_tac p=ptr' in set_cap_not_quite_corres, assumption+) + apply fastforce + apply fastforce + apply fastforce + apply (erule cte_wp_at_weakenE, rule TrueI) + apply fastforce + apply fastforce + apply assumption + apply fastforce + apply (rule cr) + apply (rule refl) + apply (clarsimp simp: split_def) + apply (rule bind_execI, assumption) + apply (drule_tac p=ptr and c="cap.NullCap" in set_cap_not_quite_corres') + apply assumption+ + apply (frule use_valid [OF _ set_cap_valid_objs]) + apply fastforce + apply assumption + apply (frule use_valid [OF _ set_cap_aligned]) + apply fastforce + apply assumption + apply (frule use_valid [OF _ set_cap_distinct]) + apply fastforce + apply assumption + apply (frule use_valid [OF _ set_cap_cte_at]) + prefer 2 + apply assumption + apply assumption + apply (drule updateCap_stuff) + apply (elim conjE mp, fastforce) + apply (drule updateCap_stuff) + apply (elim conjE mp, fastforce) + apply assumption + apply simp + apply simp + apply (rule refl) + apply clarsimp + apply (rule bind_execI, assumption) + apply(subgoal_tac "mdb_move_abs ptr ptr' (cdt a) a") + apply (frule mdb_move_abs'.intro) + prefer 2 + apply(rule mdb_move_abs.intro) + apply(clarsimp) + apply(fastforce elim!: cte_wp_at_weakenE) + apply(simp) + apply(simp) + apply (clarsimp simp: exec_gets exec_get exec_put set_cdt_def + set_original_def bind_assoc modify_def + |(rule bind_execI[where f="cap_move_ext x y z x'" for x y z x'], clarsimp simp: mdb_move_abs'.cap_move_ext_det_def2 update_cdt_list_def set_cdt_list_def put_def) | rule refl )+ + apply (clarsimp simp: put_def) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (frule updateCap_dynamic_duo, fastforce, fastforce) + apply (frule(2) updateCap_dynamic_duo [OF _ conjunct1 conjunct2]) + apply (subgoal_tac "no_0 (ctes_of b)") + prefer 2 + apply fastforce + apply (frule(1) use_valid [OF _ updateCap_no_0]) + apply (frule(2) use_valid [OF _ updateCap_no_0, OF _ use_valid [OF _ updateCap_no_0]]) + apply (elim conjE) + apply (drule (5) updateMDB_the_lot', elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply (drule updateCap_stuff, elim conjE, erule (1) impE) + apply (drule updateCap_stuff, clarsimp) + apply (subgoal_tac "pspace_distinct' b \ pspace_aligned' b") + prefer 2 + subgoal by fastforce + apply (thin_tac "ctes_of t = s" for t s)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksIdleThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (subgoal_tac "\p. cte_at p ta = cte_at p a") + prefer 2 + apply (simp add: set_cap_cte_eq) + apply (clarsimp simp add: swp_def cte_wp_at_ctes_of simp del: split_paired_All) + apply (subgoal_tac "cte_at ptr' a") + prefer 2 + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (subgoal_tac "cte_map ptr \ cte_map ptr'") + prefer 2 + apply (erule (2) cte_map_inj) + apply fastforce + apply fastforce + apply fastforce + apply (clarsimp simp: pspace_relations_def) + apply (rule conjI) + subgoal by (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (thin_tac "gsCNodes t = p" for t p)+ + apply (thin_tac "ksWorkUnitsCompleted t = p" for t p)+ + apply (thin_tac "cur_thread t = p" for t p)+ + apply (thin_tac "domain_index t = p" for t p)+ + apply (thin_tac "domain_time t = p" for t p)+ + apply (thin_tac "cur_domain t = p" for t p)+ + apply (thin_tac "scheduler_action t = p" for t p)+ + apply (thin_tac "ready_queues t = p" for t p)+ + apply (thin_tac "idle_thread t = p" for t p)+ + apply (thin_tac "machine_state t = p" for t p)+ + apply (thin_tac "work_units_completed t = p" for t p)+ + apply (thin_tac "ksArchState t = p" for t p)+ + apply (thin_tac "gsUserPages t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksInterruptState t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ekheap_relation t p" for t p)+ + apply (thin_tac "pspace_relation t p" for t p)+ + apply (thin_tac "interrupt_state_relation s t p" for s t p)+ + apply (thin_tac "ghost_relation s t p q" for s t p q)+ + apply (thin_tac "sched_act_relation t p" for t p)+ + apply (thin_tac "ready_queues_relation t p" for t p)+ + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + defer + apply (drule set_cap_caps_of_state_monad)+ + apply (simp add: modify_map_mdb_cap) + apply (simp add: modify_map_apply) + apply (clarsimp simp add: revokable_relation_def simp del: fun_upd_apply) + apply simp + apply (rule conjI) + apply clarsimp + apply (erule_tac x="fst ptr" in allE) + apply (erule_tac x="snd ptr" in allE) + apply simp + apply (erule impE) + subgoal by (clarsimp simp: cte_wp_at_caps_of_state null_filter_def split: if_split_asm) + apply simp + apply clarsimp + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp only: null_filter_def cap.simps option.simps + fun_upd_def simp_thms + split: if_splits) + apply clarsimp + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_cte_at) + apply (frule_tac p="(aa,bb)" and p'="ptr'" in cte_map_inj, assumption+) + apply fastforce + apply fastforce + apply fastforce + apply (clarsimp split: if_split_asm) + apply (subgoal_tac "(aa,bb) \ ptr") + apply (frule_tac p="(aa,bb)" and p'="ptr" in cte_map_inj, assumption+) + apply fastforce + apply fastforce + apply fastforce + apply clarsimp + subgoal by (simp add: null_filter_def split: if_splits) (*long *) + apply (subgoal_tac "mdb_move (ctes_of b) (cte_map ptr) src_cap src_node (cte_map ptr') cap' old_dest_node") + prefer 2 + apply (rule mdb_move.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro) + apply (simp add: valid_pspace'_def valid_mdb'_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_move_axioms.intro) + apply assumption + apply (simp add: nullPointer_def) + apply (simp add: nullPointer_def) + apply (erule weak_derived_sym') + subgoal by clarsimp + apply assumption + apply (rule conjI) + apply (simp (no_asm) add: cdt_relation_def) + apply clarsimp + apply (subst mdb_move.descendants, assumption) + apply (subst mdb_move_abs.descendants[simplified fun_upd_apply]) + apply (rule mdb_move_abs.intro) + apply fastforce + apply (fastforce elim!: cte_wp_at_weakenE) + subgoal by simp + subgoal by simp + apply (case_tac "(aa,bb) = ptr", simp) + apply (subgoal_tac "cte_map (aa,bb) \ cte_map ptr") + prefer 2 + apply (erule (2) cte_map_inj, fastforce, fastforce, fastforce) + apply (case_tac "(aa,bb) = ptr'") + subgoal by (simp add: cdt_relation_def del: split_paired_All) + apply (subgoal_tac "cte_map (aa,bb) \ cte_map ptr'") + prefer 2 + apply (erule (2) cte_map_inj, fastforce, fastforce, fastforce) + apply (simp only: if_False) + apply simp + apply (subgoal_tac "descendants_of' (cte_map (aa, bb)) (ctes_of b) = + cte_map ` descendants_of (aa, bb) (cdt a)") + prefer 2 + apply (simp add: cdt_relation_def del: split_paired_All) + apply simp + apply (rule conjI) + apply clarsimp + apply (subst inj_on_image_set_diff15) + apply (rule inj_on_descendants_cte_map) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply (rule subset_refl) + subgoal by simp + apply simp + apply clarsimp + apply (drule (1) cte_map_inj_eq) + apply (erule descendants_of_cte_at) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + subgoal by simp + apply(clarsimp simp: cdt_list_relation_def) + apply(subst next_slot_eq2) + apply(simp split: option.splits) + apply(intro conjI impI) + apply(rule mdb_move_abs'.next_slot_no_parent) + apply(simp, fastforce, simp) + apply(intro allI impI) + apply(rule mdb_move_abs'.next_slot) + apply(simp, fastforce, simp) + subgoal by (fastforce split: option.splits) + apply(case_tac "ctes_of b (cte_map (aa, bb))") + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply(case_tac ab) + apply(frule mdb_move.m'_next) + apply(simp, fastforce) + apply(case_tac "(aa, bb) = ptr") + apply(simp) + apply(case_tac "(aa, bb) = ptr'") + apply(case_tac "next_slot ptr (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(erule_tac x="fst ptr" in allE) + apply(erule_tac x="snd ptr" in allE) + subgoal by(clarsimp split: if_split_asm) + apply(frule invs_mdb, frule invs_valid_pspace) + apply(frule finite_depth) + apply simp + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a) = Some ptr") + apply(frule(3) cte_at_next_slot) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + subgoal by (clarsimp simp: cte_map_inj_eq valid_pspace_def split: if_split_asm) + apply(simp) + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(frule(3) cte_at_next_slot) + apply(frule(3) cte_at_next_slot') + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + by(clarsimp simp: cte_map_inj_eq valid_pspace_def split: if_split_asm) + +lemmas cur_tcb_lift = + hoare_lift_Pf [where f = ksCurThread and P = tcb_at', folded cur_tcb'_def] + +lemma valid_bitmapQ_lift: + assumes prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" + and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\Invariants_H.valid_bitmapQ\ f \\_. Invariants_H.valid_bitmapQ\" + unfolding valid_bitmapQ_def bitmapQ_def + apply (wp hoare_vcg_all_lift) + apply (wps prq prqL1 prqL2) + apply (rule hoare_vcg_prop, assumption) + done + +lemma bitmapQ_no_L1_orphans_lift: + assumes prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" + and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\ bitmapQ_no_L1_orphans \ f \\_. bitmapQ_no_L1_orphans \" + unfolding valid_bitmapQ_def bitmapQ_def bitmapQ_no_L1_orphans_def + apply (wp hoare_vcg_all_lift) + apply (wps prq prqL1 prqL2) + apply (rule hoare_vcg_prop, assumption) + done + +lemma bitmapQ_no_L2_orphans_lift: + assumes prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" + and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\ bitmapQ_no_L2_orphans \ f \\_. bitmapQ_no_L2_orphans \" + unfolding valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def + apply (wp hoare_vcg_all_lift) + apply (wps prq prqL1 prqL2) + apply (rule hoare_vcg_prop, assumption) + done + +lemma setCTE_norqL1 [wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL1Bitmap s) \" + by (clarsimp simp: valid_def dest!: setCTE_pspace_only) + +lemma setCTE_norqL2 [wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL2Bitmap s) \" + by (clarsimp simp: valid_def dest!: setCTE_pspace_only) + +crunches cteInsert + for nosch[wp]: "\s. P (ksSchedulerAction s)" + and norq[wp]: "\s. P (ksReadyQueues s)" + and norqL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and norqL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + (wp: updateObject_cte_inv crunch_wps ignore_del: setObject) + +lemmas updateMDB_typ_ats [wp] = typ_at_lifts [OF updateMDB_typ_at'] +lemmas updateCap_typ_ats [wp] = typ_at_lifts [OF updateCap_typ_at'] +lemmas cteInsert_typ_ats [wp] = typ_at_lifts [OF cteInsert_typ_at'] + +lemma setObject_cte_ct: + "\\s. P (ksCurThread s)\ setObject t (v::cte) \\rv s. P (ksCurThread s)\" + by (clarsimp simp: valid_def setCTE_def[symmetric] dest!: setCTE_pspace_only) + +crunch ct[wp]: cteInsert "\s. P (ksCurThread s)" + (wp: setObject_cte_ct hoare_drop_imps) +end +context mdb_insert +begin +interpretation Arch . (*FIXME: arch_split*) +lemma n_src_dest: + "n \ src \ dest" + by (simp add: n_direct_eq) + +lemma dest_chain_0 [simp, intro!]: + "n \ dest \\<^sup>+ 0" + using chain_n n_dest + by (simp add: mdb_chain_0_def) blast + +lemma m_tranclD: + "m \ p \\<^sup>+ p' \ p' \ dest \ (p = dest \ p' = 0) \ n \ p \\<^sup>+ p'" + apply (erule trancl_induct) + apply (rule context_conjI, clarsimp) + apply (rule context_conjI, clarsimp) + apply (cases "p = src") + apply simp + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (rule n_src_dest) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + apply (cases "p = dest", simp) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + apply clarsimp + apply (rule context_conjI, clarsimp) + apply (rule context_conjI, clarsimp simp: mdb_next_unfold) + apply (case_tac "y = src") + apply clarsimp + apply (erule trancl_trans) + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (rule n_src_dest) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + apply (case_tac "y = dest", simp) + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + done + +lemma n_trancl_eq': + "n \ p \\<^sup>+ p' = + (if p' = dest then m \ p \\<^sup>* src + else if p = dest then m \ src \\<^sup>+ p' + else m \ p \\<^sup>+ p')" + apply (rule iffI) + apply (erule trancl_induct) + apply (clarsimp simp: n_direct_eq) + apply (fastforce split: if_split_asm) + apply (clarsimp simp: n_direct_eq split: if_split_asm) + apply fastforce + apply fastforce + apply (fastforce intro: trancl_trans) + apply (fastforce intro: trancl_trans) + apply (simp split: if_split_asm) + apply (drule rtranclD) + apply (erule disjE) + apply (fastforce intro: n_src_dest) + apply (clarsimp dest!: m_tranclD) + apply (erule trancl_trans) + apply (fastforce intro: n_src_dest) + apply (drule m_tranclD, clarsimp) + apply (drule tranclD) + apply clarsimp + apply (insert n_src_dest)[1] + apply (drule (1) next_single_value) + subgoal by (clarsimp dest!: rtrancl_eq_or_trancl[THEN iffD1]) + apply (drule m_tranclD) + apply clarsimp + done + +lemma n_trancl_eq: + "n \ p \\<^sup>+ p' = + (if p' = dest then p = src \ m \ p \\<^sup>+ src + else if p = dest then m \ src \\<^sup>+ p' + else m \ p \\<^sup>+ p')" + by (safe; clarsimp simp: n_trancl_eq' + dest!: rtrancl_eq_or_trancl[THEN iffD1] + intro!: rtrancl_eq_or_trancl[THEN iffD2]) + +lemma n_rtrancl_eq: + "n \ p \\<^sup>* p' = + (if p' = dest then p = dest \ p \ dest \ m \ p \\<^sup>* src + else if p = dest then p' \ src \ m \ src \\<^sup>* p' + else m \ p \\<^sup>* p')" + apply clarsimp + by (safe; clarsimp simp: n_trancl_eq' + dest!: rtrancl_eq_or_trancl[THEN iffD1] + intro!: rtrancl_eq_or_trancl[THEN iffD2]) + +lemma n_cap: + "n p = Some (CTE cap node) \ + \node'. if p = dest then cap = c' \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node')" + by (simp add: n src dest new_src_def new_dest_def split: if_split_asm) + +lemma m_cap: + "m p = Some (CTE cap node) \ + \node'. if p = dest then cap = dest_cap \ n p = Some (CTE c' node') + else n p = Some (CTE cap node')" + apply (simp add: n new_src_def new_dest_def) + apply (cases "p=dest") + apply (auto simp: src dest) + done + +lemma chunked_m: + "mdb_chunked m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma derived_region1 [simp]: + "badge_derived' c' src_cap \ + sameRegionAs c' cap = sameRegionAs src_cap cap" + by (clarsimp simp add: badge_derived'_def sameRegionAs_def2) + +lemma derived_region2 [simp]: + "badge_derived' c' src_cap \ + sameRegionAs cap c' = sameRegionAs cap src_cap" + by (clarsimp simp add: badge_derived'_def sameRegionAs_def2) + +lemma chunked_n: + assumes b: "badge_derived' c' src_cap" + shows "mdb_chunked n" + using chunked_m src b + apply (clarsimp simp: mdb_chunked_def) + apply (drule n_cap)+ + apply clarsimp + apply (simp split: if_split_asm) + apply clarsimp + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply simp + apply (case_tac "src=p'") + apply (clarsimp simp: n_trancl_eq) + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply (clarsimp simp: n_trancl_eq) + apply (rule conjI) + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply (clarsimp split: if_split_asm) + apply clarsimp + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (rule conjI) + apply clarsimp + apply (erule_tac x=src in allE) + apply simp + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: n_trancl_eq) + apply (case_tac "p=src") + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply clarsimp + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (clarsimp simp: n_trancl_eq) + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (simp add: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule sameRegionAsE, simp_all add: sameRegionAs_def3)[1] + apply blast + apply blast + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply fastforce + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply clarsimp + apply (simp add: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule_tac x=p in allE, simp, erule(1) sameRegionAs_trans) + apply fastforce + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + done + +end + +context mdb_insert_der +begin + +lemma untyped_c': + "untypedRange c' = untypedRange src_cap" + "isUntypedCap c' = isUntypedCap src_cap" + using partial_is_derived' + apply - + apply (case_tac "isUntypedCap src_cap") + by (clarsimp simp:isCap_simps freeIndex_update_def is_derived'_def + badge_derived'_def capMasterCap_def split:if_splits capability.splits)+ + +lemma capRange_c': + "capRange c' = capRange src_cap" + using partial_is_derived' untyped_c' + apply - + apply (case_tac "isUntypedCap src_cap") + apply (clarsimp simp:untypedCapRange) + apply (rule master_eqI, rule capRange_Master) + apply simp + apply (rule arg_cong) + apply (auto simp:isCap_simps freeIndex_update_def is_derived'_def + badge_derived'_def capMasterCap_def split:if_splits capability.splits) + done + +lemma untyped_no_parent: + "isUntypedCap src_cap \ \ m \ src \ p" + using partial_is_derived' untyped_c' + by (clarsimp simp: is_derived'_def isCap_simps freeIndex_update_def descendants_of'_def) + +end + +lemma (in mdb_insert) n_revocable: + "n p = Some (CTE cap node) \ + \node'. if p = dest then mdbRevocable node = isCapRevocable c' src_cap + else mdbRevocable node = mdbRevocable node' \ m p = Some (CTE cap node')" + using src dest + by (clarsimp simp: n new_src_def new_dest_def split: if_split_asm) + +lemma (in mdb_insert_der) irq_control_n: + "irq_control n" + using src dest partial_is_derived' + apply (clarsimp simp: irq_control_def) + apply (frule n_cap) + apply (drule n_revocable) + apply (clarsimp split: if_split_asm) + apply (simp add: is_derived'_def isCap_simps) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (erule disjE) + apply (clarsimp simp: is_derived'_def isCap_simps) + apply (erule (1) irq_controlD, rule irq_control) + done + +context mdb_insert_child +begin + +lemma untyped_mdb_n: + shows "untyped_mdb' n" + using untyped_mdb + apply (clarsimp simp add: untyped_mdb'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (erule disjE, clarsimp) + apply (simp add: descendants_of'_def) + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src untyped_c' capRange_c') + apply (erule disjE) + apply clarsimp + apply (simp add: descendants_of'_def untyped_c') + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (fastforce simp: src dest: untyped_no_parent) + apply (case_tac "p=src", simp) + apply simp + done + +lemma parent_untyped_must_not_usable: + "\ptr \ src; m ptr = Some (CTE ccap node'); + untypedRange ccap = untypedRange src_cap; capAligned src_cap; + isUntypedCap src_cap \ + \ usableUntypedRange ccap = {}" + using untyped_inc src + apply (clarsimp simp:untyped_inc'_def) + apply (erule_tac x = ptr in allE) + apply (erule_tac x = src in allE) + apply clarsimp + apply (subgoal_tac "isUntypedCap ccap") + apply clarsimp + apply (drule_tac p = ptr in untyped_no_parent) + apply (simp add:descendants_of'_def) + apply (drule (1) aligned_untypedRange_non_empty) + apply (case_tac ccap,simp_all add:isCap_simps) + done + +lemma untyped_inc_n: + "\capAligned src_cap;isUntypedCap src_cap \ usableUntypedRange src_cap = {}\ + \ untyped_inc' n" + using untyped_inc + apply (clarsimp simp add: untyped_inc'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=dest", simp) + apply (simp add: descendants_of'_def untyped_c') + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply (frule_tac p=p in untyped_no_parent) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (case_tac "p = src") + using src + apply clarsimp + apply (drule(4) parent_untyped_must_not_usable) + apply simp + apply (intro conjI) + apply clarsimp + apply clarsimp + using src + apply clarsimp + apply clarsimp + apply (case_tac "p=dest") + apply (simp add: descendants_of'_def untyped_c') + apply (erule_tac x=p' in allE) + apply (erule_tac x=src in allE) + apply (clarsimp simp:src) + apply (frule_tac p=p' in untyped_no_parent) + apply (case_tac "p' = src") + apply (clarsimp simp:src) + apply (elim disjE) + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply simp+ + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply simp+ + apply (clarsimp simp:Int_ac) + apply (erule_tac x=p' in allE) + apply (erule_tac x=p in allE) + apply (case_tac "p' = src") + apply (clarsimp simp:src descendants_of'_def untyped_c') + apply (elim disjE) + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply (simp,intro conjI,clarsimp+) + apply (intro conjI) + apply clarsimp+ + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply (simp,intro conjI,clarsimp+) + apply (intro conjI) + apply clarsimp+ + apply (clarsimp simp:Int_ac,intro conjI,clarsimp+) + apply (clarsimp simp:descendants_of'_def) + apply (case_tac "p = src") + apply simp + apply (elim disjE) + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply (simp,intro conjI,clarsimp+) + apply (intro conjI) + apply clarsimp+ + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply clarsimp+ + apply fastforce + apply (clarsimp simp:Int_ac,intro conjI,clarsimp+) + apply (intro conjI) + apply (elim disjE) + apply (simp add:Int_ac)+ + apply clarsimp + done + +end + +context mdb_insert_sib +begin + +lemma untyped_mdb_n: + shows "untyped_mdb' n" + using untyped_mdb + apply (clarsimp simp add: untyped_mdb'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm simp: descendants_of'_def capRange_c' untyped_c') + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (fastforce simp: src dest: untyped_no_parent) + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src) + done + +lemma not_untyped: "capAligned c' \ \isUntypedCap src_cap" + using no_child partial_is_derived' ut_rev src + apply (clarsimp simp: ut_revocable'_def isMDBParentOf_CTE) + apply (erule_tac x=src in allE) + apply simp + apply (clarsimp simp: is_derived'_def freeIndex_update_def isCap_simps capAligned_def + badge_derived'_def) + apply (clarsimp simp: sameRegionAs_def3 capMasterCap_def isCap_simps + is_aligned_no_overflow split:capability.splits) + done + +lemma untyped_inc_n: + assumes c': "capAligned c'" + shows "untyped_inc' n" + using untyped_inc not_untyped [OF c'] + apply (clarsimp simp add: untyped_inc'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (simp add: descendants_of'_def untyped_c') + apply (case_tac "p = dest") + apply (clarsimp simp: untyped_c') + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + done + +end + +lemma trancl_prev_update: + "modify_map m ptr (cteMDBNode_update (mdbPrev_update z)) \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + apply (rule iffI) + apply (erule update_prev_next_trancl2) + apply (erule update_prev_next_trancl) + done + +lemma rtrancl_prev_update: + "modify_map m ptr (cteMDBNode_update (mdbPrev_update z)) \ x \\<^sup>* y = m \ x \\<^sup>* y" + by (simp add: trancl_prev_update rtrancl_eq_or_trancl) + +lemma mdb_chunked_prev_update: + "mdb_chunked (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = mdb_chunked m" + apply (simp add: mdb_chunked_def trancl_prev_update rtrancl_prev_update is_chunk_def) + apply (rule iffI) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=cap in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply (erule allE, erule impE, blast) + apply clarsimp + apply blast + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=cap in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply clarsimp + apply blast + apply (erule allE, erule impE, blast) + apply clarsimp + apply blast + apply clarsimp + apply blast + done + +lemma descendants_of_prev_update: + "descendants_of' p (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = + descendants_of' p m" + by (simp add: descendants_of'_def) + +lemma untyped_mdb_prev_update: + "untyped_mdb' (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = untyped_mdb' m" + apply (simp add: untyped_mdb'_def descendants_of_prev_update) + apply (rule iffI) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply (erule allE, erule impE, blast) + apply clarsimp + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply clarsimp + done + +lemma untyped_inc_prev_update: + "untyped_inc' (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = untyped_inc' m" + apply (simp add: untyped_inc'_def descendants_of_prev_update) + apply (rule iffI) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply (erule allE, erule impE, blast) + apply clarsimp + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply clarsimp + done + +lemma is_derived_badge_derived': + "is_derived' m src cap cap' \ badge_derived' cap cap'" + by (simp add: is_derived'_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteInsert_mdb_chain_0: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. mdb_chain_0 (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_mdb_chain_0 mdb_inv_preserve_fun_upd | simp del:fun_upd_apply)+ + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac x) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert (ctes_of s) src s_cap s_node dest NullCap node" for node) + apply (drule mdb_insert.chain_n) + apply (rule mdb_chain_0_modify_map_prev) + apply (simp add:modify_map_apply) + apply (clarsimp simp: valid_badges_def) + apply unfold_locales + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption + done + +lemma cteInsert_mdb_chunked: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. mdb_chunked (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_mdb_chunked mdb_inv_preserve_fun_upd,simp) + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert (ctes_of s) src s_cap s_node dest NullCap d_node") + apply (drule mdb_insert.chunked_n, erule is_derived_badge_derived') + apply (clarsimp simp: modify_map_apply mdb_chunked_prev_update fun_upd_def) + apply unfold_locales + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption + done + +lemma cteInsert_untyped_mdb: + "\valid_mdb' and pspace_distinct' and pspace_aligned' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. untyped_mdb' (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_untyped_mdb' mdb_inv_preserve_fun_upd,simp) + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert_der (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply unfold_locales[1] + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption + apply assumption + apply (case_tac "isMDBParentOf (CTE s_cap s_node) (CTE cap + (mdbFirstBadged_update (\a. isCapRevocable cap s_cap) + (mdbRevocable_update (\a. isCapRevocable cap s_cap) (mdbPrev_update (\a. src) s_node))))") + apply (subgoal_tac "mdb_insert_child (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_child_def mdb_insert_child_axioms_def) + apply (drule mdb_insert_child.untyped_mdb_n) + apply (clarsimp simp: modify_map_apply untyped_mdb_prev_update + descendants_of_prev_update fun_upd_def) + apply (subgoal_tac "mdb_insert_sib (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_sib_def mdb_insert_sib_axioms_def) + apply (drule mdb_insert_sib.untyped_mdb_n) + apply (clarsimp simp: modify_map_apply untyped_mdb_prev_update + descendants_of_prev_update fun_upd_def) + done + +lemma valid_mdb_ctes_maskedAsFull: + "\valid_mdb_ctes m;m src = Some (CTE s_cap s_node)\ + \ valid_mdb_ctes (m(src \ CTE (maskedAsFull s_cap cap) s_node))" + apply (clarsimp simp: maskedAsFull_def) + apply (intro conjI impI) + apply (frule mdb_inv_preserve_updateCap + [where m = m and slot = src and index = "max_free_index (capBlockSize cap)"]) + apply simp + apply (drule mdb_inv_preserve_sym) + apply (clarsimp simp:valid_mdb_ctes_def modify_map_def) + apply (frule mdb_inv_preserve.preserve_stuff,simp) + apply (frule mdb_inv_preserve.by_products,simp) + apply (rule mdb_inv_preserve.untyped_inc') + apply (erule mdb_inv_preserve_sym) + apply (clarsimp split:if_split_asm simp: isCap_simps max_free_index_def) + apply simp + apply (subgoal_tac "m = m(src \ CTE s_cap s_node)") + apply simp + apply (rule ext) + apply clarsimp + done + +lemma capAligned_maskedAsFull: + "capAligned s_cap \ capAligned (maskedAsFull s_cap cap)" + apply (case_tac s_cap) + apply (clarsimp simp:isCap_simps capAligned_def maskedAsFull_def max_free_index_def)+ + done + +lemma maskedAsFull_derived': + "\m src = Some (CTE s_cap s_node); is_derived' m ptr b c\ + \ is_derived' (m(src \ CTE (maskedAsFull s_cap cap) s_node)) ptr b c" + apply (subgoal_tac "m(src \ CTE (maskedAsFull s_cap cap) s_node) + = (modify_map m src (cteCap_update (\_. maskedAsFull s_cap cap)))") + apply simp + apply (clarsimp simp:maskedAsFull_def is_derived'_def) + apply (intro conjI impI) + apply (simp add:modify_map_def del:cteCap_update.simps) + apply (subst same_master_descendants) + apply simp + apply (clarsimp simp:isCap_simps capASID_def )+ + apply (clarsimp simp:modify_map_def) + done + +lemma maskedAsFull_usable_empty: + "\capMasterCap cap = capMasterCap s_cap; + isUntypedCap (maskedAsFull s_cap cap)\ + \ usableUntypedRange (maskedAsFull s_cap cap) = {}" + apply (simp add:isCap_simps maskedAsFull_def max_free_index_def split:if_split_asm) + apply fastforce+ + done + +lemma capAligned_master: + "\capAligned cap; capMasterCap cap = capMasterCap ncap\ \ capAligned ncap" + apply (case_tac cap) + apply (clarsimp simp:capAligned_def)+ + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + apply (clarsimp simp:capAligned_def)+ + done + +lemma cteInsert_untyped_inc': + "\valid_mdb' and pspace_distinct' and pspace_aligned' and valid_objs' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. untyped_inc' (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_untyped_mdb' mdb_inv_preserve_fun_upd) + apply (wp getCTE_wp setUntypedCapAsFull_ctes)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert_der + (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull s_cap cap))) + src (maskedAsFull s_cap cap) s_node dest NullCap d_node cap") + prefer 2 + apply unfold_locales[1] + apply (clarsimp simp:modify_map_def valid_mdb_ctes_maskedAsFull)+ + apply (erule(2) valid_mdb_ctesE[OF valid_mdb_ctes_maskedAsFull]) + apply (clarsimp simp:modify_map_def) + apply (erule(2) valid_mdb_ctesE[OF valid_mdb_ctes_maskedAsFull]) + apply simp + apply (clarsimp simp:modify_map_def maskedAsFull_derived') + apply (case_tac "isMDBParentOf (CTE (maskedAsFull s_cap cap) s_node) (CTE cap + (mdbFirstBadged_update (\a. isCapRevocable cap (maskedAsFull s_cap cap)) + (mdbRevocable_update (\a. isCapRevocable cap (maskedAsFull s_cap cap)) + (mdbPrev_update (\a. src) s_node))))") + apply (subgoal_tac "mdb_insert_child + (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull s_cap cap))) + src (maskedAsFull s_cap cap) s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_child_def mdb_insert_child_axioms_def) + apply (drule mdb_insert_child.untyped_inc_n) + apply (rule capAligned_maskedAsFull[OF valid_capAligned]) + apply (erule(1) ctes_of_valid_cap') + apply (intro impI maskedAsFull_usable_empty) + apply (clarsimp simp:is_derived'_def badge_derived'_def) + apply simp + apply (clarsimp simp: modify_map_apply untyped_inc_prev_update maskedAsFull_revokable + descendants_of_prev_update) + apply (subgoal_tac "mdb_insert_sib + (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull s_cap cap))) + src (maskedAsFull s_cap cap) s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_sib_def mdb_insert_sib_axioms_def) + apply (drule mdb_insert_sib.untyped_inc_n) + apply (rule capAligned_master[OF valid_capAligned]) + apply (erule(1) ctes_of_valid_cap') + apply (clarsimp simp:is_derived'_def badge_derived'_def) + apply (clarsimp simp: modify_map_apply untyped_inc_prev_update maskedAsFull_revokable + descendants_of_prev_update) + done + +lemma irq_control_prev_update: + "irq_control (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = irq_control m" + apply (simp add: irq_control_def) + apply (rule iffI) + apply clarsimp + apply (simp only: modify_map_if) + apply (erule_tac x=p in allE) + apply (simp (no_asm_use) split: if_split_asm) + apply (case_tac "x=p") + apply fastforce + apply clarsimp + apply (erule_tac x=p' in allE) + apply simp + apply (case_tac "x=p'") + apply simp + apply fastforce + apply clarsimp + apply (erule_tac x=p in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply clarsimp + apply (case_tac "x=p'") + apply clarsimp + apply clarsimp + apply clarsimp + apply (case_tac "x=p'") + apply clarsimp + apply clarsimp + done + +lemma cteInsert_irq_control: + "\valid_mdb' and pspace_distinct' and pspace_aligned' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. irq_control (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 setUntypedCapAsFull_irq_control mdb_inv_preserve_fun_upd + mdb_inv_preserve_modify_map,simp) + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert_der (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply unfold_locales[1] + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption+ + apply (drule mdb_insert_der.irq_control_n) + apply (clarsimp simp: modify_map_apply irq_control_prev_update fun_upd_def) + done + +lemma capMaster_isUntyped: + "capMasterCap c = capMasterCap c' \ isUntypedCap c = isUntypedCap c'" + by (simp add: capMasterCap_def isCap_simps split: capability.splits) + +lemma capMaster_capRange: + "capMasterCap c = capMasterCap c' \ capRange c = capRange c'" + by (simp add: capMasterCap_def capRange_def split: capability.splits arch_capability.splits) + +lemma capMaster_untypedRange: + "capMasterCap c = capMasterCap c' \ untypedRange c = untypedRange c'" + by (simp add: capMasterCap_def capRange_def split: capability.splits arch_capability.splits) + +lemma capMaster_capClass: + "capMasterCap c = capMasterCap c' \ capClass c = capClass c'" + by (simp add: capMasterCap_def split: capability.splits arch_capability.splits) + +lemma distinct_zombies_nonCTE_modify_map: + "\m x f. \ \cte. cteCap (f cte) = cteCap cte \ + \ distinct_zombies (modify_map m x f) = distinct_zombies m" + apply (simp add: distinct_zombies_def modify_map_def o_def) + apply (rule_tac f=distinct_zombie_caps in arg_cong) + apply (rule ext) + apply simp + apply (simp add: map_option.compositionality o_def) + done + +lemma updateCapFreeIndex_dlist: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_dlist (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (valid_dlist (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma setUntypedCapAsFull_valid_dlist: + assumes preserve: + "\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_dlist (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (valid_dlist (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_dlist) + apply (clarsimp simp:preserve cte_wp_at_ctes_of)+ + apply wp + apply clarsimp + done + +lemma valid_dlist_prevD: + "\m p = Some cte;valid_dlist m;mdbPrev (cteMDBNode cte) \ 0\ + \ (\cte'. m (mdbPrev (cteMDBNode cte)) = Some cte' \ + mdbNext (cteMDBNode cte') = p)" + by (clarsimp simp:valid_dlist_def Let_def) + +lemma valid_dlist_nextD: + "\m p = Some cte;valid_dlist m;mdbNext (cteMDBNode cte) \ 0\ + \ (\cte'. m (mdbNext (cteMDBNode cte)) = Some cte' \ + mdbPrev (cteMDBNode cte') = p)" + by (clarsimp simp:valid_dlist_def Let_def) + +lemma no_loops_no_l2_loop: + "\valid_dlist m; no_loops m; m p = Some cte;mdbPrev (cteMDBNode cte) = mdbNext (cteMDBNode cte)\ + \ mdbNext (cteMDBNode cte) = 0" + apply (rule ccontr) + apply (subgoal_tac "m \ p \ (mdbNext (cteMDBNode cte))") + prefer 2 + apply (clarsimp simp:mdb_next_rel_def mdb_next_def) + apply (subgoal_tac "m \ (mdbNext (cteMDBNode cte)) \ p") + prefer 2 + apply (clarsimp simp:mdb_next_rel_def mdb_next_def) + apply (frule(2) valid_dlist_nextD) + apply clarsimp + apply (frule(1) valid_dlist_prevD) + apply simp+ + apply (drule(1) transitive_closure_trans) + apply (simp add:no_loops_def) + done + +lemma cteInsert_no_0: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and + (\s. src \ dest) and K (capAligned cap) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. no_0 (ctes_of s) \" + apply (rule hoare_name_pre_state) + apply clarsimp + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map getCTE_wp + setUntypedCapAsFull_valid_dlist mdb_inv_preserve_fun_upd | simp)+ + apply (intro conjI impI) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (clarsimp simp:valid_mdb_ctes_def no_0_def) + done + +lemma cteInsert_valid_dlist: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and + (\s. src \ dest) and K (capAligned cap) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. valid_dlist (ctes_of s) \" + apply (rule hoare_name_pre_state) + apply clarsimp + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map getCTE_wp + setUntypedCapAsFull_valid_dlist mdb_inv_preserve_fun_upd | simp)+ + apply (intro conjI impI) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI) + apply (clarsimp simp:valid_mdb_ctes_def no_0_def)+ + apply (frule mdb_chain_0_no_loops) + apply (simp add:no_0_def) + apply (rule valid_dlistI) + apply (case_tac "p = dest") + apply (clarsimp simp:modify_map_def nullPointer_def split:if_split_asm)+ + apply (frule(2) valid_dlist_prevD) + apply simp + apply (subgoal_tac "mdbPrev (cteMDBNode ctea) \ mdbNext (cteMDBNode ctea)") + prefer 2 + apply (clarsimp) + apply (drule(3) no_loops_no_l2_loop[rotated -1],simp) + apply (subgoal_tac "mdbPrev (cteMDBNode ctea) \ dest") + apply clarsimp+ + apply (frule_tac p = p and m = "ctes_of sa" in valid_dlist_prevD) + apply simp+ + apply fastforce + apply (case_tac "p = dest") + apply (clarsimp simp:modify_map_def nullPointer_def split:if_split_asm)+ + apply (frule(2) valid_dlist_nextD,clarsimp) + apply (clarsimp simp:modify_map_def nullPointer_def split:if_split_asm) + apply (frule(2) valid_dlist_nextD) + apply simp + apply (subgoal_tac "mdbPrev (cteMDBNode ctea) \ mdbNext (cteMDBNode ctea)") + prefer 2 + apply (clarsimp) + apply (drule(3) no_loops_no_l2_loop[rotated -1],simp) + apply clarsimp + apply (intro conjI impI) + apply clarsimp+ + apply (drule_tac cte = cte' in no_loops_no_l2_loop,simp) + apply simp+ + apply (frule(2) valid_dlist_nextD) + apply clarsimp + apply (frule_tac p = p and m = "ctes_of sa" in valid_dlist_nextD) + apply clarsimp+ + apply (rule conjI) + apply fastforce + apply (intro conjI impI,clarsimp+) + apply (frule_tac valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac valid_dlist_nextD) + apply clarsimp+ + done + +lemma cteInsert_mdb' [wp]: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and K (capAligned cap) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) \ + cteInsert cap src dest + \\_. valid_mdb'\" + apply (simp add:valid_mdb'_def valid_mdb_ctes_def) + apply (rule_tac Q = "\r s. valid_dlist (ctes_of s) \ irq_control (ctes_of s) \ + no_0 (ctes_of s) \ mdb_chain_0 (ctes_of s) \ + mdb_chunked (ctes_of s) \ untyped_mdb' (ctes_of s) \ untyped_inc' (ctes_of s) \ + Q s" for Q + in hoare_strengthen_post) + prefer 2 + apply clarsimp + apply assumption + apply (rule hoare_name_pre_state) + apply (wp cteInsert_no_0 cteInsert_valid_dlist cteInsert_mdb_chain_0 cteInsert_untyped_inc' + cteInsert_mdb_chunked cteInsert_untyped_mdb cteInsert_irq_control) + apply (unfold cteInsert_def) + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 + setUntypedCapAsFull_valid_dlist setUntypedCapAsFull_distinct_zombies + setUntypedCapAsFull_valid_badges setUntypedCapAsFull_caps_contained + setUntypedCapAsFull_valid_nullcaps setUntypedCapAsFull_ut_revocable + setUntypedCapAsFull_class_links setUntypedCapAsFull_reply_masters_rvk_fb + mdb_inv_preserve_fun_upd + mdb_inv_preserve_modify_map getCTE_wp| simp del:fun_upd_apply)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + defer + apply (clarsimp simp:valid_mdb_ctes_def valid_mdb'_def simp del:fun_upd_apply)+ + apply (case_tac cte) + apply (rename_tac cap1 node1) + apply (case_tac x) + apply (rename_tac cap2 node2) + apply (case_tac node1) + apply (case_tac node2) + apply (clarsimp simp:valid_mdb_ctes_def no_0_def nullPointer_def) + apply (intro conjI impI) + apply clarsimp + apply (rename_tac s src_cap word1 word2 bool1a bool2a bool1 bool2) +proof - + fix s :: kernel_state + fix bool1 bool2 src_cap word1 word2 bool1a bool2a + let ?c1 = "(CTE src_cap (MDB word1 word2 bool1a bool2a))" + let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" + let ?C = "(modify_map + (modify_map + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (cteMDBNode_update (\a. MDB word1 src (isCapRevocable cap src_cap) (isCapRevocable cap src_cap)))) + src (cteMDBNode_update (mdbNext_update (\_. dest)))) + word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" + let ?m = "ctes_of s" + let ?prv = "\cte. mdbPrev (cteMDBNode cte)" + let ?nxt = "\cte. mdbNext (cteMDBNode cte)" + + assume "pspace_distinct' s" and "pspace_aligned' s" and srcdest: "src \ dest" + and dest0: "dest \ 0" + and cofs: "ctes_of s src = Some ?c1" and cofd: "ctes_of s dest = Some ?c2" + and is_der: "is_derived' (ctes_of s) src cap src_cap" + and aligned: "capAligned cap" + and vd: "valid_dlist ?m" + and no0: "?m 0 = None" + and chain: "mdb_chain_0 ?m" + and badges: "valid_badges ?m" + and chunk: "mdb_chunked ?m" + and contained: "caps_contained' ?m" + and untyped_mdb: "untyped_mdb' ?m" + and untyped_inc: "untyped_inc' ?m" + and class_links: "class_links ?m" + and distinct_zombies: "distinct_zombies ?m" + and irq: "irq_control ?m" + and reply_masters_rvk_fb: "reply_masters_rvk_fb ?m" + and vn: "valid_nullcaps ?m" + and ut_rev:"ut_revocable' ?m" + + have no_loop: "no_loops ?m" + apply (rule mdb_chain_0_no_loops[OF chain]) + apply (simp add:no_0_def no0) + done + + have badge: "badge_derived' cap src_cap" + using is_der + by (clarsimp simp:is_derived'_def) + + have vmdb: "valid_mdb_ctes ?m" + by (auto simp: vmdb_def valid_mdb_ctes_def no_0_def, fact+) + + have src0: "src \ 0" + using cofs no0 by clarsimp + + have destnull: + "cte_mdb_prop ?m dest (\m. mdbPrev m = 0 \ mdbNext m = 0)" + using cofd unfolding cte_mdb_prop_def + by auto + + have srcwd: "?m \ src \ word1" + using cofs by (simp add: next_unfold') + + have w1ned[simp]: "word1 \ dest" + proof (cases "word1 = 0") + case True thus ?thesis using dest0 by auto + next + case False + thus ?thesis using cofs cofd src0 dest0 False vd + by - (erule (1) valid_dlistEn, (clarsimp simp: nullPointer_def)+) + qed + + have w2ned[simp]: "word2 \ dest" + proof (cases "word2 = 0") + case True thus ?thesis using dest0 by auto + next + case False + thus ?thesis using cofs cofd src0 dest0 False vd + by - (erule (1) valid_dlistEp, (clarsimp simp: nullPointer_def)+) + qed + + have w1nes[simp]: "word1 \ src" using vmdb cofs + by - (drule (1) no_self_loop_next, simp) + + have w2nes[simp]: "word2 \ src" using vmdb cofs + by - (drule (1) no_self_loop_prev, simp) + + from is_der have notZomb1: "\ isZombie cap" + by (clarsimp simp: isCap_simps is_derived'_def badge_derived'_def) + + from is_der have notZomb2: "\ isZombie src_cap" + by (clarsimp simp: isCap_simps is_derived'_def) + + from badge have masters: "capMasterCap cap = capMasterCap src_cap" + by (clarsimp simp: badge_derived'_def) + + note blah[simp] = w2nes[symmetric] w1nes[symmetric] w1ned[symmetric] + w2ned[symmetric] srcdest srcdest[symmetric] + + have mdb_next_disj: + "\p p'. (?C \ p \ p' \ + ?m \ p \ p' \ p \ src \ p'\ dest \ (p' = word1 \ p' = 0) + \ p = src \ p' = dest \ p = dest \ p' = word1)" + apply (case_tac "p = src") + apply (clarsimp simp:mdb_next_unfold modify_map_cases) + apply (case_tac "p = dest") + apply (clarsimp simp:mdb_next_unfold modify_map_cases)+ + using cofs cofd vd no0 + apply - + apply (case_tac "p = word1") + apply clarsimp + apply (intro conjI) + apply clarsimp + apply (frule_tac p = "word1" and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac p = "mdbNext node" and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac p = "mdbNext node" in no_loops_no_l2_loop[OF _ no_loop]) + apply simp+ + apply (intro conjI) + apply clarsimp + apply (frule_tac p = p and m = "?m" in valid_dlist_nextD) + apply (clarsimp+)[3] + apply (intro impI) + apply (rule ccontr) + apply clarsimp + apply (frule_tac p = src and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac p = p and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + done + + have ctes_ofD: + "\p cte. \?C p = Some cte; p\ dest; p\ src\ \ \cteb. (?m p = Some cteb \ cteCap cte = cteCap cteb)" + by (clarsimp simp:modify_map_def split:if_splits) + + + show "valid_badges ?C" + using srcdest badge cofs badges cofd + unfolding valid_badges_def + apply (intro impI allI) + apply (drule mdb_next_disj) + apply (elim disjE) + defer + apply (clarsimp simp:modify_map_cases dest0 src0) + apply (clarsimp simp: Retype_H.isCapRevocable_def isCapRevocable_def badge_derived'_def) + subgoal by (case_tac src_cap,auto simp:isCap_simps sameRegionAs_def) + apply (clarsimp simp:modify_map_cases valid_badges_def) + apply (frule_tac x=src in spec, erule_tac x=word1 in allE, erule allE, erule impE) + apply fastforce + apply simp + apply (clarsimp simp:mdb_next_unfold badge_derived'_def split: if_split_asm) + apply (thin_tac "All P" for P) + subgoal by (cases src_cap, + auto simp:mdb_next_unfold isCap_simps sameRegionAs_def Let_def split: if_splits) + apply (case_tac "word1 = p'") + apply (clarsimp simp:modify_map_cases valid_badges_def mdb_next_unfold src0 dest0 no0)+ + apply (case_tac "p = dest") + apply (clarsimp simp:dest0 src0 no0)+ + apply (case_tac z) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (drule_tac x = p in spec,drule_tac x = "mdbNext mdbnode" in spec) + by (auto simp:isCap_simps sameRegionAs_def) + + from badge + have isUntyped_eq: "isUntypedCap cap = isUntypedCap src_cap" + apply (clarsimp simp:badge_derived'_def) + apply (case_tac cap,auto simp:isCap_simps) + done + + from badge + have [simp]: "capRange cap = capRange src_cap" + apply (clarsimp simp:badge_derived'_def) + apply (case_tac cap) + apply (clarsimp simp:isCap_simps capRange_def)+ + (* 5 subgoals *) + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + (* 9 subgoals *) + apply (clarsimp simp:isCap_simps capRange_def)+ + done + + have [simp]: "untypedRange cap = untypedRange src_cap" + using badge + apply (clarsimp simp:badge_derived'_def dest!:capMaster_untypedRange) + done + + from contained badge srcdest cofs cofd is_der no0 + show "caps_contained' ?C" + apply (clarsimp simp add: caps_contained'_def) + apply (case_tac "p = dest") + apply (case_tac "p' = dest") + apply (clarsimp simp:modify_map_def split:if_splits) + apply (case_tac src_cap,auto)[1] + apply (case_tac "p' = src") + apply (clarsimp simp:modify_map_def split:if_splits) + apply (clarsimp simp:badge_derived'_def) + apply (case_tac src_cap,auto)[1] + apply (drule(2) ctes_ofD) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (frule capRange_untyped) + apply (erule_tac x=src in allE, erule_tac x=p' in allE, simp) + apply (case_tac cteb) + apply (clarsimp) + apply blast + apply (case_tac "p' = dest") + apply (case_tac "p = src") + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule capRange_untyped) + subgoal by (case_tac cap,auto simp:isCap_simps badge_derived'_def) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule_tac x = word1 in spec) + apply (drule_tac x = src in spec) + apply (case_tac z) + apply (clarsimp simp:isUntyped_eq) + apply blast + apply (drule_tac x = p in spec) + apply (drule_tac x = src in spec) + apply (frule capRange_untyped) + apply (clarsimp simp:isUntyped_eq) + apply blast + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (clarsimp simp:modify_map_def split:if_splits) + apply ((case_tac z,fastforce)+)[5] + by fastforce+ + + show "valid_nullcaps ?C" + using is_der vn cofs vd no0 + apply (simp add: valid_nullcaps_def) + apply (clarsimp simp:modify_map_def is_derived'_def) + apply (rule conjI) + apply (clarsimp simp: is_derived'_def badge_derived'_def)+ + apply (drule_tac x = word1 in spec) + apply (case_tac z) + apply (clarsimp simp:nullMDBNode_def) + apply (drule(1) valid_dlist_nextD) + apply simp + apply clarsimp + apply (simp add:nullPointer_def src0) + done + + from vmdb srcdest cofs ut_rev + show "ut_revocable' ?C" + apply (clarsimp simp: valid_mdb_ctes_def ut_revocable'_def modify_map_def) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: Retype_H.isCapRevocable_def isCapRevocable_def isCap_simps)+ + apply auto + apply (drule_tac x= src in spec) + apply clarsimp + apply (case_tac z) + apply clarsimp + done + + from class_links srcdest badge cofs cofd no0 vd + show "class_links ?C" + unfolding class_links_def + apply (intro allI impI) + apply (drule mdb_next_disj) + apply (elim disjE) + apply (clarsimp simp:modify_map_def mdb_next_unfold split:if_split_asm) + apply (clarsimp simp: badge_derived'_def modify_map_def + split: if_split_asm) + apply (erule capMaster_capClass) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule_tac x = src in spec) + apply (drule_tac x = word1 in spec) + apply (clarsimp simp:mdb_next_unfold) + apply (case_tac z) + apply (clarsimp simp:badge_derived'_def) + apply (drule capMaster_capClass) + apply simp + done + + from distinct_zombies badge + show "distinct_zombies ?C" + apply (simp add:distinct_zombies_nonCTE_modify_map) + apply (erule_tac distinct_zombies_copyMasterE[where x=src]) + apply (rule cofs) + apply (simp add: masters) + apply (simp add: notZomb1 notZomb2) + done + + from reply_masters_rvk_fb is_der + show "reply_masters_rvk_fb ?C" + apply (clarsimp simp:reply_masters_rvk_fb_def) + apply (erule ranE) + apply (clarsimp simp:modify_map_def split:if_split_asm) + apply fastforce+ + apply (clarsimp simp:is_derived'_def isCap_simps) + apply fastforce + done +qed + +crunch state_refs_of'[wp]: cteInsert "\s. P (state_refs_of' s)" + (wp: crunch_wps) + +lemma setCTE_state_hyp_refs_of'[wp]: + "\\s. P (state_hyp_refs_of' s)\ setCTE p cte \\rv s. P (state_hyp_refs_of' s)\" + unfolding setCTE_def + apply (rule setObject_state_hyp_refs_of_eq) + apply (clarsimp simp: updateObject_cte in_monad typeError_def + in_magnitude_check objBits_simps + split: kernel_object.split_asm if_split_asm) + done + +crunches cteInsert + for state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + and aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + and pspace_canonical'[wp]: pspace_canonical' + and no_0_obj'[wp]: no_0_obj' + (wp: crunch_wps) + +lemma cteInsert_valid_pspace: + "\valid_pspace' and valid_cap' cap and (\s. src \ dest) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_. valid_pspace'\" + unfolding valid_pspace'_def + apply (rule hoare_pre) + apply (wp cteInsert_valid_objs) + apply (fastforce elim: valid_capAligned) + done + +lemma setCTE_ko_wp_at_live[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + setCTE p v + \\rv s. P (ko_wp_at' live' p' s)\" + apply (clarsimp simp: setCTE_def setObject_def split_def + valid_def in_monad ko_wp_at'_def + split del: if_split + elim!: rsubst[where P=P]) + apply (drule(1) updateObject_cte_is_tcb_or_cte [OF _ refl, rotated]) + apply (elim exE conjE disjE) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def hyp_live'_def + lookupAround2_char1) + apply (simp add: tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def) + done + +lemma setCTE_iflive': + "\\s. cte_wp_at' (\cte'. \p'\zobj_refs' (cteCap cte') + - zobj_refs' (cteCap cte). + ko_wp_at' (Not \ live') p' s) p s + \ if_live_then_nonz_cap' s\ + setCTE p cte + \\rv s. if_live_then_nonz_cap' s\" + unfolding if_live_then_nonz_cap'_def ex_nonz_cap_to'_def + apply (rule hoare_pre) + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift setCTE_weak_cte_wp_at) + apply clarsimp + apply (drule spec, drule(1) mp) + apply clarsimp + apply (rule_tac x=cref in exI) + apply (clarsimp simp: cte_wp_at'_def) + apply (rule ccontr) + apply (drule bspec, fastforce) + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma updateMDB_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s\ + updateMDB p m + \\rv s. if_live_then_nonz_cap' s\" + apply (clarsimp simp: updateMDB_def) + apply (rule bind_wp [OF _ getCTE_sp]) + apply (wp setCTE_iflive') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +lemma updateCap_iflive': + "\\s. cte_wp_at' (\cte'. \p'\zobj_refs' (cteCap cte') + - zobj_refs' cap. + ko_wp_at' (Not \ live') p' s) p s + \ if_live_then_nonz_cap' s\ + updateCap p cap + \\rv s. if_live_then_nonz_cap' s\" + apply (simp add: updateCap_def) + apply (rule bind_wp [OF _ getCTE_sp]) + apply (wp setCTE_iflive') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +lemma setCTE_ko_wp_at_not_live[wp]: + "\\s. P (ko_wp_at' (Not \ live') p' s)\ + setCTE p v + \\rv s. P (ko_wp_at' (Not \ live') p' s)\" + apply (clarsimp simp: setCTE_def setObject_def split_def + valid_def in_monad ko_wp_at'_def + split del: if_split + elim!: rsubst[where P=P]) + apply (drule(1) updateObject_cte_is_tcb_or_cte [OF _ refl, rotated]) + apply (elim exE conjE disjE) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def hyp_live'_def + lookupAround2_char1) + apply (simp add: tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def) + done + +lemma setUntypedCapAsFull_ko_wp_not_at'[wp]: + "\\s. P (ko_wp_at' (Not \ live') p' s)\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P ( ko_wp_at' (Not \ live') p' s)\" + apply (clarsimp simp:setUntypedCapAsFull_def updateCap_def) + apply (wp setCTE_ko_wp_at_live setCTE_ko_wp_at_not_live) +done + +lemma setUntypedCapAsFull_ko_wp_at'[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P ( ko_wp_at' live' p' s)\" + apply (clarsimp simp:setUntypedCapAsFull_def updateCap_def) + apply (wp setCTE_ko_wp_at_live setCTE_ko_wp_at_live) + done + +(*FIXME:MOVE*) +lemma zobj_refs'_capFreeIndex_update[simp]: + "isUntypedCap ctecap \ + zobj_refs' (capFreeIndex_update f (ctecap)) = zobj_refs' ctecap" + by (case_tac ctecap,auto simp:isCap_simps) + +lemma setUntypedCapAsFull_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\rv s. if_live_then_nonz_cap' s\" + apply (clarsimp simp:if_live_then_nonz_cap'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift) + apply (clarsimp simp:setUntypedCapAsFull_def split del: if_split) + apply (wp hoare_vcg_if_split) + apply (clarsimp simp:ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (wp updateCap_ctes_of_wp)+ + apply clarsimp + apply (elim allE impE) + apply (assumption) + apply (clarsimp simp:ex_nonz_cap_to'_def cte_wp_at_ctes_of modify_map_def split:if_splits) + apply (rule_tac x = cref in exI) + apply (intro conjI impI; clarsimp) + done + + +lemma maskedAsFull_simps[simp]: + "maskedAsFull capability.NullCap cap = capability.NullCap" + by (auto simp:maskedAsFull_def) + +lemma cteInsert_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteInsert cap src dest + \\rv. if_live_then_nonz_cap'\" + apply (simp add: cteInsert_def split del: if_split) + apply (wp updateCap_iflive' hoare_drop_imps) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (wp hoare_vcg_conj_lift hoare_vcg_ex_lift hoare_vcg_ball_lift getCTE_wp + setUntypedCapAsFull_ctes_of setUntypedCapAsFull_if_live_then_nonz_cap')+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI) + apply (rule_tac x = "case (ctes_of s dest) of Some a \a" in exI) + apply (clarsimp) + apply (case_tac cte,simp) + apply clarsimp+ + done + +lemma ifunsafe'_def2: + "if_unsafe_then_cap' = + (\s. \cref cte. ctes_of s cref = Some cte \ cteCap cte \ NullCap + \ (\cref' cte'. ctes_of s cref' = Some cte' + \ cref \ cte_refs' (cteCap cte') (irq_node' s)))" + by (fastforce simp: if_unsafe_then_cap'_def cte_wp_at_ctes_of ex_cte_cap_to'_def) + +lemma ifunsafe'_def3: + "if_unsafe_then_cap' = + (\s. \cref cap. cteCaps_of s cref = Some cap \ cap \ NullCap + \ (\cref' cap'. cteCaps_of s cref' = Some cap' + \ cref \ cte_refs' cap' (irq_node' s)))" + by (fastforce simp: cteCaps_of_def o_def ifunsafe'_def2) + +lemma tree_cte_cteCap_eq: + "cte_wp_at' (P \ cteCap) p s = (case_option False P (cteCaps_of s p))" + apply (simp add: cte_wp_at_ctes_of cteCaps_of_def) + apply (cases "ctes_of s p", simp_all) + done + +lemma updateMDB_cteCaps_of: + "\\s. P (cteCaps_of s)\ updateMDB ptr f \\rv s. P (cteCaps_of s)\" + apply (simp add: cteCaps_of_def) + apply (wp updateMDB_ctes_of_wp) + apply (safe elim!: rsubst [where P=P] intro!: ext) + apply (case_tac "ctes_of s x") + apply (clarsimp simp: modify_map_def)+ + done + +lemma setCTE_ksInterruptState[wp]: + "\\s. P (ksInterruptState s)\ setCTE param_a param_b \\_ s. P (ksInterruptState s)\" + by (wp setObject_ksInterrupt updateObject_cte_inv | simp add: setCTE_def)+ + +crunch ksInterruptState[wp]: cteInsert "\s. P (ksInterruptState s)" + (wp: crunch_wps) + +lemmas updateMDB_cteCaps_of_ksInt[wp] + = hoare_use_eq [where f=ksInterruptState, OF updateMDB_ksInterruptState updateMDB_cteCaps_of] + +lemma updateCap_cteCaps_of: + "\\s. P (modify_map (cteCaps_of s) ptr (K cap))\ updateCap ptr cap \\rv s. P (cteCaps_of s)\" + apply (simp add: cteCaps_of_def) + apply (wp updateCap_ctes_of_wp) + apply (erule rsubst [where P=P]) + apply (case_tac "ctes_of s ptr"; fastforce simp: modify_map_def) + done + +lemmas updateCap_cteCaps_of_int[wp] + = hoare_use_eq[where f=ksInterruptState, OF updateCap_ksInterruptState updateCap_cteCaps_of] + +lemma getCTE_cteCap_wp: + "\\s. case (cteCaps_of s ptr) of None \ True | Some cap \ Q cap s\ getCTE ptr \\rv. Q (cteCap rv)\" + apply (wp getCTE_wp) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + done + +lemma capFreeIndex_update_cte_refs'[simp]: + "isUntypedCap a \ cte_refs' (capFreeIndex_update f a) = cte_refs' a " + apply (rule ext) + apply (clarsimp simp:isCap_simps) + done + +lemma cteInsert_ifunsafe'[wp]: + "\if_unsafe_then_cap' and cte_wp_at' (\c. cteCap c = NullCap) dest + and ex_cte_cap_to' dest\ + cteInsert cap src dest + \\rv s. if_unsafe_then_cap' s\" + apply (simp add: ifunsafe'_def3 cteInsert_def setUntypedCapAsFull_def + split del: if_split) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_to'_def + cteCaps_of_def + dest!: modify_map_K_D + split: if_split_asm) + apply (intro conjI) + apply clarsimp + apply (erule_tac x = crefa in allE) + apply (clarsimp simp:modify_map_def split:if_split_asm) + apply (rule_tac x = cref in exI) + apply fastforce + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply fastforce + apply (intro conjI impI) + apply clarsimp + apply (rule_tac x = cref' in exI) + apply fastforce + apply (clarsimp simp:modify_map_def) + apply (erule_tac x = crefa in allE) + apply (intro conjI impI) + apply clarsimp + apply (rule_tac x = cref in exI) + apply fastforce + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply fastforce +done + +lemma setCTE_inQ[wp]: + "\\s. P (obj_at' (inQ d p) t s)\ setCTE ptr v \\rv s. P (obj_at' (inQ d p) t s)\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb') + apply (simp_all add: inQ_def) + done + +crunch inQ[wp]: cteInsert "\s. P (obj_at' (inQ d p) t s)" + (wp: crunch_wps) + +lemma setCTE_it'[wp]: + "\\s. P (ksIdleThread s)\ setCTE c p \\_ s. P (ksIdleThread s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte) + by (wpsimp+; auto) + +lemma setCTE_idle [wp]: + "\valid_idle'\ setCTE p cte \\rv. valid_idle'\" + apply (simp add: valid_idle'_def) + apply (rule hoare_lift_Pf [where f="ksIdleThread"]) + apply (intro hoare_vcg_conj_lift; (solves \wpsimp\)?) + apply (clarsimp simp: setCTE_def) + apply (rule setObject_cte_obj_at_tcb'[where P="idle_tcb'", simplified]) + apply wpsimp + done + +lemma getCTE_no_idle_cap: + "\valid_global_refs'\ + getCTE p + \\rv s. ksIdleThread s \ capRange (cteCap rv)\" + apply (wp getCTE_wp) + apply (clarsimp simp: valid_global_refs'_def valid_refs'_def cte_wp_at_ctes_of) + apply blast + done + +lemma updateMDB_idle'[wp]: + "\valid_idle'\ updateMDB p m \\rv. valid_idle'\" + apply (clarsimp simp add: updateMDB_def) + apply (rule hoare_pre) + apply (wp | simp add: valid_idle'_def)+ + by fastforce + +lemma updateCap_idle': + "\valid_idle'\ updateCap p c \\rv. valid_idle'\" + apply (simp add: updateCap_def) + apply (wp | simp)+ + done + +crunch idle [wp]: setUntypedCapAsFull "valid_idle'" + (wp: crunch_wps simp: cte_wp_at_ctes_of) + +lemma cteInsert_idle'[wp]: + "\valid_idle'\ cteInsert cap src dest \\rv. valid_idle'\" + apply (simp add: cteInsert_def) + apply (wp updateMDB_idle' updateCap_idle' | rule hoare_drop_imp | simp)+ + done + +lemma setCTE_arch [wp]: + "\\s. P (ksArchState s)\ setCTE p c \\_ s. P (ksArchState s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte) + apply (wpsimp+; auto) + done + +lemma setCTE_valid_arch[wp]: + "\valid_arch_state'\ setCTE p c \\_. valid_arch_state'\" + apply (wp valid_arch_state_lift' setCTE_typ_at') + apply (simp add: setCTE_def) + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (rule_tac P=P in rsubst, assumption) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (erule disjE) + apply (clarsimp simp: ko_wp_at'_def lookupAround2_char1 is_vcpu'_def ps_clear_upd) + apply (clarsimp simp: ko_wp_at'_def lookupAround2_char1 is_vcpu'_def ps_clear_upd) + apply assumption + done + +lemma setCTE_global_refs[wp]: + "\\s. P (global_refs' s)\ setCTE p c \\_ s. P (global_refs' s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte global_refs'_def) + apply (wpsimp+; auto) + done + +lemma setCTE_gsMaxObjectSize[wp]: + "\\s. P (gsMaxObjectSize s)\ setCTE p c \\_ s. P (gsMaxObjectSize s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte) + apply (wpsimp+; auto) + done + +lemma setCTE_valid_globals[wp]: + "\valid_global_refs' and (\s. kernel_data_refs \ capRange (cteCap c) = {}) + and (\s. 2 ^ capBits (cteCap c) \ gsMaxObjectSize s)\ + setCTE p c + \\_. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_refs'_def pred_conj_def) + apply (rule hoare_lift_Pf2 [where f=global_refs']) + apply (rule hoare_lift_Pf2 [where f=gsMaxObjectSize]) + apply wp + apply (clarsimp simp: ran_def valid_cap_sizes'_def) + apply metis + apply wp+ + done + +lemma updateMDB_global_refs [wp]: + "\valid_global_refs'\ updateMDB p m \\rv. valid_global_refs'\" + apply (clarsimp simp add: updateMDB_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def valid_cap_sizes'_def) + apply blast + done + +lemma updateCap_global_refs [wp]: + "\valid_global_refs' and (\s. kernel_data_refs \ capRange cap = {}) + and (\s. 2 ^ capBits cap \ gsMaxObjectSize s)\ + updateCap p cap + \\rv. valid_global_refs'\" + apply (clarsimp simp add: updateCap_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +crunch arch [wp]: cteInsert "\s. P (ksArchState s)" + (wp: crunch_wps simp: cte_wp_at_ctes_of) + +crunches cteInsert + for valid_arch[wp]: valid_arch_state' + (wp: crunch_wps) + +lemma cteInsert_valid_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s \ (\irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + cteInsert cap src dest + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def cteInsert_def irq_issued'_def setUntypedCapAsFull_def) + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI impI) + apply (clarsimp simp:ran_dom modify_map_dom) + apply (drule bspec) + apply fastforce + apply (clarsimp simp:isCap_simps modify_map_def split:if_splits) + apply (clarsimp simp:ran_dom modify_map_dom) + apply (drule bspec) + apply fastforce + apply (clarsimp simp:modify_map_def split:if_splits) + done + +lemma setCTE_arch_ctes_of_wp [wp]: + "\\s. P (ksArchState s) ((ctes_of s)(p \ cte))\ + setCTE p cte + \\rv s. P (ksArchState s) (ctes_of s)\" + apply (simp add: setCTE_def ctes_of_setObject_cte) + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim exE conjE disjE rsubst[where P="P (ksArchState s)" for s]) + apply (clarsimp simp: lookupAround2_char1) + apply (subst map_to_ctes_upd_tcb; assumption?) + apply (clarsimp simp: mask_def objBits_defs field_simps ps_clear_def3) + apply (clarsimp simp: tcb_cte_cases_change) + apply (erule rsubst[where P="P (ksArchState s)" for s]) + apply (rule ext, clarsimp) + apply (intro conjI impI) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + apply (drule(1) cte_wp_at_tcbI'[where P="(=) cte"]) + apply (simp add: ps_clear_def3 field_simps) + apply assumption+ + apply (simp add: cte_wp_at_ctes_of) + by (clarsimp simp: map_to_ctes_upd_cte ps_clear_def3 field_simps mask_def) + +lemma setCTE_irq_states' [wp]: + "\valid_irq_states'\ setCTE x y \\_. valid_irq_states'\" + apply (rule valid_irq_states_lift') + apply wp + apply (simp add: setCTE_def) + apply (wp setObject_ksMachine) + apply (simp add: updateObject_cte) + apply (rule hoare_pre) + apply (wp unless_wp|wpc|simp)+ + apply fastforce + apply assumption + done + +crunch irq_states' [wp]: cteInsert valid_irq_states' + (wp: crunch_wps) + +crunch pred_tcb_at'[wp]: cteInsert "pred_tcb_at' proj P t" + (wp: crunch_wps) + +crunch state_hyp_refs_of'[wp]: setupReplyMaster "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps) + +lemma setCTE_cteCaps_of[wp]: + "\\s. P ((cteCaps_of s)(p \ cteCap cte))\ + setCTE p cte + \\rv s. P (cteCaps_of s)\" + apply (simp add: cteCaps_of_def) + apply wp + apply (fastforce elim!: rsubst[where P=P]) + done + +crunches setupReplyMaster + for inQ[wp]: "\s. P (obj_at' (inQ d p) t s)" + and norq[wp]: "\s. P (ksReadyQueues s)" + and ct[wp]: "\s. P (ksCurThread s)" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and it[wp]: "\s. P (ksIdleThread s)" + and nosch[wp]: "\s. P (ksSchedulerAction s)" + and irq_node'[wp]: "\s. P (irq_node' s)" + and pspace_canonical'[wp]: pspace_canonical' + (wp: crunch_wps) + +lemmas setCTE_cteCap_wp_irq[wp] = + hoare_use_eq_irq_node' [OF setCTE_ksInterruptState setCTE_cteCaps_of] + +crunch global_refs'[wp]: setUntypedCapAsFull "\s. P (global_refs' s) " + (simp: crunch_simps) + + +lemma setUntypedCapAsFull_valid_refs'[wp]: + "\\s. valid_refs' R (ctes_of s) \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\yb s. valid_refs' R (ctes_of s)\" + apply (clarsimp simp:valid_refs'_def setUntypedCapAsFull_def split del:if_split) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp:ran_dom) + apply (drule_tac x = y in bspec) + apply (drule_tac a = y in domI) + apply (simp add:modify_map_dom) + apply (clarsimp simp:modify_map_def cte_wp_at_ctes_of isCap_simps split:if_splits) + done + +crunch gsMaxObjectSize[wp]: setUntypedCapAsFull "\s. P (gsMaxObjectSize s)" + +lemma setUntypedCapAsFull_sizes[wp]: + "\\s. valid_cap_sizes' sz (ctes_of s) \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\rv s. valid_cap_sizes' sz (ctes_of s)\" + apply (clarsimp simp:valid_cap_sizes'_def setUntypedCapAsFull_def split del:if_split) + apply (rule hoare_pre) + apply (wp updateCap_ctes_of_wp | wps)+ + apply (clarsimp simp:ran_dom) + apply (drule_tac x = y in bspec) + apply (drule_tac a = y in domI) + apply (simp add:modify_map_dom) + apply (clarsimp simp:modify_map_def cte_wp_at_ctes_of isCap_simps split:if_splits) + done + +lemma setUntypedCapAsFull_valid_global_refs'[wp]: + "\\s. valid_global_refs' s \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\yb s. valid_global_refs' s\" + apply (clarsimp simp: valid_global_refs'_def) + apply (rule hoare_pre,wps) + apply wp + apply simp +done + +lemma capMaster_eq_capBits_eq: + "capMasterCap cap = capMasterCap cap' \ capBits cap = capBits cap'" + by (metis capBits_Master) + +lemma valid_global_refsD_with_objSize: + "\ ctes_of s p = Some cte; valid_global_refs' s \ \ + kernel_data_refs \ capRange (cteCap cte) = {} \ global_refs' s \ kernel_data_refs + \ 2 ^ capBits (cteCap cte) \ gsMaxObjectSize s" + by (clarsimp simp: valid_global_refs'_def valid_refs'_def valid_cap_sizes'_def ran_def) blast + +lemma cteInsert_valid_globals [wp]: + "\valid_global_refs' and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\rv. valid_global_refs'\" + apply (simp add: cteInsert_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) + apply (frule capMaster_eq_capBits_eq) + apply (drule capMaster_capRange) + apply (drule (1) valid_global_refsD_with_objSize) + apply simp + done + +lemma setCTE_ksMachine[wp]: + "\\s. P (ksMachineState s)\ setCTE x y \\_ s. P (ksMachineState s)\" + apply (clarsimp simp: setCTE_def) + apply (wp setObject_ksMachine) + apply (clarsimp simp: updateObject_cte + split: Structures_H.kernel_object.splits) + apply (safe, (wp unless_wp | simp)+) + done + +crunch ksMachine[wp]: cteInsert "\s. P (ksMachineState s)" + (wp: crunch_wps) + +lemma cteInsert_vms'[wp]: + "\valid_machine_state'\ cteInsert cap src dest \\rv. valid_machine_state'\" + apply (simp add: cteInsert_def valid_machine_state'_def pointerInDeviceData_def + pointerInUserData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv | + intro hoare_drop_imp|assumption)+ + done + +crunch pspace_domain_valid[wp]: cteInsert "pspace_domain_valid" + (wp: crunch_wps) + +lemma setCTE_ct_not_inQ[wp]: + "\ct_not_inQ\ setCTE ptr cte \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setCTE_nosch]) + apply (simp add: setCTE_def ct_not_inQ_def) + apply (rule hoare_weaken_pre) + apply (wps setObject_cte_ct) + apply (rule setObject_cte_obj_at_tcb') + apply (clarsimp simp add: obj_at'_def)+ + done + +crunch ct_not_inQ[wp]: cteInsert "ct_not_inQ" + (simp: crunch_simps wp: hoare_drop_imp) + +lemma setCTE_ksCurDomain[wp]: + "\\s. P (ksCurDomain s)\ + setCTE p cte + \\rv s. P (ksCurDomain s)\" + apply (simp add: setCTE_def) + apply wp + done + +lemma setObject_cte_ksDomSchedule[wp]: "\ \s. P (ksDomSchedule s) \ setObject ptr (v::cte) \ \_ s. P (ksDomSchedule s) \" + apply (simp add: setObject_def split_def) + apply (wp updateObject_cte_inv | simp)+ + done + +lemma setCTE_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ + setCTE p cte + \\rv s. P (ksDomSchedule s)\" + apply (simp add: setCTE_def) + apply wp + done + +crunch ksCurDomain[wp]: cteInsert "\s. P (ksCurDomain s)" + (wp: crunch_wps ) + +crunch ksIdleThread[wp]: cteInsert "\s. P (ksIdleThread s)" + (wp: crunch_wps) + +crunch ksDomSchedule[wp]: cteInsert "\s. P (ksDomSchedule s)" + (wp: crunch_wps) + +lemma setCTE_tcbDomain_inv[wp]: + "\obj_at' (\tcb. P (tcbDomain tcb)) t\ setCTE ptr v \\_. obj_at' (\tcb. P (tcbDomain tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +crunch tcbDomain_inv[wp]: cteInsert "obj_at' (\tcb. P (tcbDomain tcb)) t" + (wp: crunch_simps hoare_drop_imps) + +lemma setCTE_tcbPriority_inv[wp]: + "\obj_at' (\tcb. P (tcbPriority tcb)) t\ setCTE ptr v \\_. obj_at' (\tcb. P (tcbPriority tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +crunch tcbPriority_inv[wp]: cteInsert "obj_at' (\tcb. P (tcbPriority tcb)) t" + (wp: crunch_simps hoare_drop_imps) + + +lemma cteInsert_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ cteInsert a b c \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift)+ + done + +lemma setObject_cte_domIdx: + "\\s. P (ksDomScheduleIdx s)\ setObject t (v::cte) \\rv s. P (ksDomScheduleIdx s)\" + by (clarsimp simp: valid_def setCTE_def[symmetric] dest!: setCTE_pspace_only) + +crunch ksDomScheduleIdx[wp]: cteInsert "\s. P (ksDomScheduleIdx s)" + (wp: setObject_cte_domIdx hoare_drop_imps) + +crunch gsUntypedZeroRanges[wp]: cteInsert "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_cte_inv crunch_wps) + +definition + "untyped_derived_eq cap cap' + = (isUntypedCap cap \ cap = cap')" + +lemma ran_split: + "inj_on m (dom m) + \ ran (\x. if P x then m' x else m x) + = ((ran m - (ran (restrict_map m (Collect P)))) + \ (ran (restrict_map m' (Collect P))))" + apply (clarsimp simp: ran_def restrict_map_def set_eq_iff) + apply (safe, simp_all) + apply (auto dest: inj_onD[OF _ trans[OF _ sym]]) + done + +lemma ran_split_eq: + "inj_on m (dom m) + \ \x. \ P x \ m' x = m x + \ ran m' + = ((ran m - (ran (restrict_map m (Collect P)))) + \ (ran (restrict_map m' (Collect P))))" + apply (rule trans[rotated], erule ran_split) + apply (rule arg_cong[where f=ran]) + apply auto + done + +lemma usableUntypedRange_uniq: + "cteCaps_of s x = Some cp + \ cteCaps_of s y = Some cp' + \ isUntypedCap cp + \ isUntypedCap cp' + \ capAligned cp + \ capAligned cp' + \ untyped_inc' (ctes_of s) + \ usableUntypedRange cp = usableUntypedRange cp' + \ usableUntypedRange cp \ {} + \ x = y" + apply (cases "the (ctes_of s x)") + apply (cases "the (ctes_of s y)") + apply (clarsimp simp: cteCaps_of_def) + apply (frule untyped_incD'[where p=x and p'=y], simp+) + apply (drule(1) usableRange_subseteq)+ + apply blast + done + +lemma usableUntypedRange_empty: + "valid_cap' cp s \ isUntypedCap cp + \ (usableUntypedRange cp = {}) = (capFreeIndex cp = maxFreeIndex (capBlockSize cp))" + apply (clarsimp simp: isCap_simps max_free_index_def valid_cap_simps' capAligned_def) + apply (rule order_trans, rule word_plus_mono_right) + apply (rule_tac x="2 ^ capBlockSize cp - 1" in word_of_nat_le) + apply (simp add: unat_2p_sub_1 untypedBits_defs) + apply (simp add: field_simps is_aligned_no_overflow) + apply (simp add: field_simps mask_def) + done + +lemma restrict_map_is_map_comp: + "restrict_map m S = m \\<^sub>m (\x. if x \ S then Some x else None)" + by (simp add: restrict_map_def map_comp_def fun_eq_iff) + +lemma untypedZeroRange_to_usableCapRange: + "untypedZeroRange c = Some (x, y) \ valid_cap' c s + \ isUntypedCap c \ usableUntypedRange c = {x .. y} + \ x \ y" + apply (clarsimp simp: untypedZeroRange_def split: if_split_asm) + apply (frule(1) usableUntypedRange_empty) + apply (clarsimp simp: isCap_simps valid_cap_simps' max_free_index_def) + apply (simp add: getFreeRef_def mask_def add_diff_eq) + done + +lemma untyped_ranges_zero_delta: + assumes urz: "untyped_ranges_zero' s" + and other: "\p. p \ set xs \ cps' p = cteCaps_of s p" + and vmdb: "valid_mdb' s" + and vobj: "valid_objs' s" + and eq: "ran (restrict_map (untypedZeroRange \\<^sub>m cteCaps_of s) (set xs)) + \ gsUntypedZeroRanges s + \ utr' = ((gsUntypedZeroRanges s - ran (restrict_map (untypedZeroRange \\<^sub>m cteCaps_of s) (set xs))) + \ ran (restrict_map (untypedZeroRange \\<^sub>m cps') (set xs)))" + notes Collect_const[simp del] + shows "untyped_ranges_zero_inv cps' utr'" + apply (subst eq) + apply (clarsimp simp: urz[unfolded untyped_ranges_zero_inv_def]) + apply (fastforce simp: map_comp_Some_iff restrict_map_Some_iff elim!: ranE)[1] + apply (simp add: untyped_ranges_zero_inv_def urz[unfolded untyped_ranges_zero_inv_def]) + apply (rule sym, rule trans, rule_tac P="\x. x \ set xs" + and m="untypedZeroRange \\<^sub>m cteCaps_of s" in ran_split_eq) + apply (rule_tac B="dom (untypedZeroRange \\<^sub>m (\cp. if valid_cap' cp s + then Some cp else None) \\<^sub>m cteCaps_of s)" in subset_inj_on[rotated]) + apply (clarsimp simp: map_comp_Some_iff cteCaps_of_def) + apply (case_tac "the (ctes_of s x)", clarsimp) + apply (frule ctes_of_valid_cap'[OF _ vobj]) + apply blast + apply (cut_tac vmdb) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (clarsimp intro!: inj_onI simp: map_comp_Some_iff + split: if_split_asm) + apply (drule(1) untypedZeroRange_to_usableCapRange)+ + apply (clarsimp) + apply (drule(2) usableUntypedRange_uniq, (simp add: valid_capAligned)+) + apply (simp add: map_comp_def other) + apply (simp add: restrict_map_is_map_comp) + done + +lemma ran_restrict_map_insert: + "ran (restrict_map m (insert x S)) = (set_option (m x) \ ran (restrict_map m S))" + by (auto simp add: ran_def restrict_map_Some_iff) + +lemmas untyped_ranges_zero_fun_upd + = untyped_ranges_zero_delta[where xs="[x]" and cps'="cps(x \ cp)", + simplified ran_restrict_map_insert list.simps, simplified] for x cps cp + +lemma cteInsert_untyped_ranges_zero[wp]: + "\untyped_ranges_zero' and (\s. src \ dest) and valid_mdb' + and valid_objs' + and cte_wp_at' (untyped_derived_eq cap o cteCap) src\ + cteInsert cap src dest + \\rv. untyped_ranges_zero'\" + apply (rule hoare_pre) + apply (rule untyped_ranges_zero_lift, wp) + apply (simp add: cteInsert_def setUntypedCapAsFull_def) + apply (wp getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def cteCaps_of_def + fun_upd_def[symmetric]) + apply (intro impI conjI allI; erule + untyped_ranges_zero_delta[where xs="[src, dest]", unfolded cteCaps_of_def], + simp_all add: ran_restrict_map_insert) + apply (clarsimp simp: isCap_simps untypedZeroRange_def + untyped_derived_eq_def badge_derived'_def + split: if_split_asm) + apply blast + apply (case_tac "isUntypedCap cap", simp_all add: untyped_derived_eq_def) + apply (clarsimp simp: isCap_simps untypedZeroRange_def + untyped_derived_eq_def badge_derived'_def + split: if_split_asm) + apply blast + done + +crunches cteInsert + for tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: crunch_wps rule: valid_bitmaps_lift) + +lemma cteInsert_invs: + "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and + (\s. src \ dest) and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) + and cte_wp_at' (untyped_derived_eq cap o cteCap) src + and ex_cte_cap_to' dest and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + cteInsert cap src dest + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift + valid_irq_node_lift irqs_masked_lift cteInsert_norq + sym_heap_sched_pointers_lift) + apply (auto simp: invs'_def valid_state'_def valid_pspace'_def elim: valid_capAligned) + done + +lemma deriveCap_corres: + "\cap_relation c c'; cte = cte_map slot \ \ + corres (ser \ cap_relation) + (cte_at slot) + (pspace_aligned' and pspace_distinct' and cte_at' cte and valid_mdb') + (derive_cap slot c) (deriveCap cte c')" + apply (unfold derive_cap_def deriveCap_def) + apply (case_tac c) + apply (simp_all add: returnOk_def Let_def is_zombie_def isCap_simps + split: sum.splits) + apply (rule_tac Q="\_ _. True" and Q'="\_ _. True" in + corres_initial_splitE [OF ensureNoChildren_corres]) + apply simp + apply clarsimp + apply wp+ + apply clarsimp + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule arch_deriveCap_corres) + apply (clarsimp simp: o_def)+ + done + +crunch inv[wp]: deriveCap "P" + (simp: crunch_simps wp: crunch_wps arch_deriveCap_inv) + +lemma valid_NullCap: + "valid_cap' NullCap = \" + by (rule ext, simp add: valid_cap_simps' capAligned_def word_bits_def) + +lemma deriveCap_valid [wp]: + "\\s. s \' c\ + deriveCap slot c + \\rv s. s \' rv\,-" + apply (simp add: deriveCap_def split del: if_split) + apply (rule hoare_pre) + apply (wp arch_deriveCap_valid | simp add: o_def)+ + apply (simp add: valid_NullCap) + apply (clarsimp simp: isCap_simps) + done + +lemma lookup_cap_valid': + "\valid_objs'\ lookupCap t c \valid_cap'\, -" + apply (simp add: lookupCap_def lookupCapAndSlot_def + lookupSlotForThread_def split_def) + apply (wp | simp)+ + done + +lemma capAligned_Null [simp]: + "capAligned NullCap" + by (simp add: capAligned_def is_aligned_def word_bits_def) + +lemma cte_wp_at'_conjI: + "\ cte_wp_at' P p s; cte_wp_at' Q p s \ \ cte_wp_at' (\c. P c \ Q c) p s" + by (auto simp add: cte_wp_at'_def) + +crunch inv'[wp]: rangeCheck "P" + (simp: crunch_simps) + +lemma lookupSlotForCNodeOp_inv'[wp]: + "\P\ lookupSlotForCNodeOp src croot ptr depth \\rv. P\" + apply (simp add: lookupSlotForCNodeOp_def split_def unlessE_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp hoare_drop_imps) + apply simp + done + +(* FIXME: move *) +lemma loadWordUser_inv [wp]: + "\P\ loadWordUser p \\rv. P\" + unfolding loadWordUser_def + by (wp dmo_inv' loadWord_inv) + +lemma capTransferFromWords_inv: + "\P\ capTransferFromWords buffer \\_. P\" + apply (simp add: capTransferFromWords_def) + apply wp + done + +lemma lct_inv' [wp]: + "\P\ loadCapTransfer b \\rv. P\" + unfolding loadCapTransfer_def + apply (wp capTransferFromWords_inv) + done + +lemma maskCapRightsNull [simp]: + "maskCapRights R NullCap = NullCap" + by (simp add: maskCapRights_def isCap_defs) + +lemma maskCapRightsUntyped [simp]: + "maskCapRights R (UntypedCap d r n f) = UntypedCap d r n f" + by (simp add: maskCapRights_def isCap_defs Let_def) + +declare if_option_Some[simp] + +lemma lookup_cap_corres: + "\ epcptr = to_bl epcptr' \ \ + corres (lfr \ cap_relation) + (valid_objs and pspace_aligned and tcb_at thread) + (valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread) + (lookup_cap thread epcptr) + (lookupCap thread epcptr')" + apply (simp add: lookup_cap_def lookupCap_def lookupCapAndSlot_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF lookupSlotForThread_corres]) + apply (simp add: split_def) + apply (subst bindE_returnOk[symmetric]) + apply (rule corres_splitEE) + apply simp + apply (rule getSlotCap_corres, rule refl) + apply (rule corres_returnOk [of _ \ \]) + apply simp + apply wp+ + apply auto + done + +lemma ensureEmptySlot_corres: + "q = cte_map p \ + corres (ser \ dc) (invs and cte_at p) invs' + (ensure_empty p) (ensureEmptySlot q)" + apply (clarsimp simp add: ensure_empty_def ensureEmptySlot_def unlessE_whenE liftE_bindE) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_trivial) + apply (case_tac cap, auto simp add: whenE_def returnOk_def)[1] + apply wp+ + apply (clarsimp simp: invs_valid_objs invs_psp_aligned) + apply fastforce + done + +lemma ensureEmpty_inv[wp]: + "\P\ ensureEmptySlot p \\rv. P\" + by (simp add: ensureEmptySlot_def unlessE_whenE whenE_def | wp)+ + +lemma lookupSlotForCNodeOp_corres: + "\cap_relation c c'; ptr = to_bl ptr'\ + \ corres (ser \ (\cref cref'. cref' = cte_map cref)) + (valid_objs and pspace_aligned and valid_cap c) + (valid_objs' and pspace_aligned' and pspace_distinct' and valid_cap' c') + (lookup_slot_for_cnode_op s c ptr depth) + (lookupSlotForCNodeOp s c' ptr' depth)" + apply (simp add: lookup_slot_for_cnode_op_def lookupSlotForCNodeOp_def) + apply (clarsimp simp: lookup_failure_map_def split_def word_size) + apply (clarsimp simp: rangeCheck_def[unfolded fun_app_def unlessE_def] whenE_def + word_bits_def toInteger_nat fromIntegral_def fromInteger_nat) + apply (rule corres_lookup_error) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule rab_corres'; simp) + apply (rule corres_trivial) + apply (clarsimp simp: returnOk_def lookup_failure_map_def + split: list.split) + apply wp+ + apply clarsimp + apply clarsimp + done + +lemma ensureNoChildren_wp: + "\\s. (descendants_of' p (ctes_of s) \ {} \ Q s) + \ (descendants_of' p (ctes_of s) = {} \ P () s)\ + ensureNoChildren p + \P\,\\_. Q\" + apply (simp add: ensureNoChildren_def whenE_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def descendants_of'_def) + apply (intro conjI impI allI) + apply clarsimp + apply (drule spec, erule notE, rule subtree.direct_parent) + apply (simp add:mdb_next_rel_def mdb_next_def) + apply simp + apply (simp add: parentOf_def) + apply clarsimp + apply (erule (4) subtree_no_parent) + apply clarsimp + apply (erule (2) subtree_next_0) + done + +lemma deriveCap_derived: + "\\s. c'\ capability.NullCap \ cte_wp_at' (\cte. badge_derived' c' (cteCap cte) + \ capASID c' = capASID (cteCap cte) + \ cap_asid_base' c' = cap_asid_base' (cteCap cte) + \ cap_vptr' c' = cap_vptr' (cteCap cte)) slot s + \ valid_objs' s\ + deriveCap slot c' + \\rv s. rv \ NullCap \ + cte_wp_at' (is_derived' (ctes_of s) slot rv \ cteCap) slot s\, -" + unfolding deriveCap_def badge_derived'_def + apply (cases c'; (solves \(wp ensureNoChildren_wp | simp add: isCap_simps Let_def + | clarsimp simp: badge_derived'_def + | erule cte_wp_at_weakenE' disjE + | rule is_derived'_def[THEN meta_eq_to_obj_eq, THEN iffD2])+\)?) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: AARCH64_H.deriveCap_def Let_def isCap_simps + split: if_split, + safe) + apply ((wp throwError_validE_R undefined_validE_R + | clarsimp simp: isCap_simps capAligned_def cte_wp_at_ctes_of + | drule valid_capAligned + | drule(1) bits_low_high_eq + | simp add: capBadge_def sameObjectAs_def + is_derived'_def isCap_simps up_ucast_inj_eq + is_aligned_no_overflow badge_derived'_def + capAligned_def capASID_def + | clarsimp split: option.split_asm)+) + done + +lemma untyped_derived_eq_ArchObjectCap: + "untyped_derived_eq (capability.ArchObjectCap cap) = \" + by (rule ext, simp add: untyped_derived_eq_def isCap_simps) + +lemma arch_deriveCap_untyped_derived[wp]: + "\\s. cte_wp_at' (\cte. untyped_derived_eq c' (cteCap cte)) slot s\ + AARCH64_H.deriveCap slot (capCap c') + \\rv s. cte_wp_at' (untyped_derived_eq rv o cteCap) slot s\, -" + apply (wpsimp simp: AARCH64_H.deriveCap_def Let_def untyped_derived_eq_ArchObjectCap split_del: if_split + wp: undefined_validE_R) + apply(clarsimp simp: cte_wp_at_ctes_of isCap_simps untyped_derived_eq_def) + by (case_tac "capCap c'"; fastforce) + +lemma deriveCap_untyped_derived: + "\\s. cte_wp_at' (\cte. untyped_derived_eq c' (cteCap cte)) slot s\ + deriveCap slot c' + \\rv s. cte_wp_at' (untyped_derived_eq rv o cteCap) slot s\, -" + apply (simp add: deriveCap_def split del: if_split) + apply (rule hoare_pre) + apply (wp arch_deriveCap_inv | simp add: o_def untyped_derived_eq_ArchObjectCap)+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps untyped_derived_eq_def) + done + +lemma setCTE_corres: + "cap_relation cap (cteCap cte) \ + corres_underlying {(s, s'). pspace_relations (ekheap (s)) (kheap s) (ksPSpace s')} False True dc + (pspace_distinct and pspace_aligned and valid_objs and cte_at p) + (pspace_aligned' and pspace_distinct' and cte_at' (cte_map p)) + (set_cap cap p) + (setCTE (cte_map p) cte)" + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply simp + apply clarsimp + apply (drule(8) set_cap_not_quite_corres_prequel) + apply simp + apply fastforce + done + +locale_abbrev + "pt_types_of s \ pts_of s ||> pt_type" + +(* oldish-style, but still needed for the heap-only form below *) +definition pt_types_of_heap :: "(obj_ref \ Structures_A.kernel_object) \ obj_ref \ pt_type" where + "pt_types_of_heap h \ h |> aobj_of |> pt_of ||> pt_type" + +lemma pt_types_of_heap_eq: + "pt_types_of_heap (kheap s) = pt_types_of s" + by (simp add: pt_types_of_heap_def) + +(* FIXME: move to StateRelation *) +lemma ghost_relation_of_heap: + "ghost_relation h ups cns pt_types \ + ups_of_heap h = ups \ cns_of_heap h = cns \ pt_types_of_heap h = pt_types" + apply (rule iffI) + apply (rule conjI) + apply (rule ext) + apply (clarsimp simp add: ghost_relation_def ups_of_heap_def) + apply (drule_tac x=x in spec) + apply (auto simp: ghost_relation_def ups_of_heap_def + split: option.splits Structures_A.kernel_object.splits + arch_kernel_obj.splits)[1] + subgoal for x dev sz + by (drule_tac x = sz in spec,simp) + apply (rule conjI) + apply (rule ext) + apply (clarsimp simp add: ghost_relation_def cns_of_heap_def) + apply (drule_tac x=x in spec)+ + apply (rule ccontr) + apply (simp split: option.splits Structures_A.kernel_object.splits + arch_kernel_obj.splits)[1] + apply (simp split: if_split_asm) + apply force + apply (drule not_sym) + apply clarsimp + apply (erule_tac x=y in allE) + apply simp + apply (rule ext) + apply (clarsimp simp: ghost_relation_def cns_of_heap_def) + apply (thin_tac P for P) \ \DataPages\ + apply (thin_tac P for P) \ \CNodes\ + apply (simp add: pt_types_of_heap_def) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (clarsimp?, rule conjI, clarsimp, rule sym, rule ccontr, force)+ + apply force + apply (auto simp: ghost_relation_def ups_of_heap_def cns_of_heap_def pt_types_of_heap_def in_omonad + split: option.splits Structures_A.kernel_object.splits + arch_kernel_obj.splits if_split_asm)[1] + done + +lemma corres_caps_decomposition: + assumes x: "corres_underlying {(s, s'). pspace_relations (ekheap (s)) (kheap s) (ksPSpace s')} False True r P P' f g" + assumes u: "\P. \\s. P (new_caps s)\ f \\rv s. P (caps_of_state s)\" + "\P. \\s. P (new_mdb s)\ f \\rv s. P (cdt s)\" + "\P. \\s. P (new_list s)\ f \\rv s. P (cdt_list (s))\" + "\P. \\s. P (new_rvk s)\ f \\rv s. P (is_original_cap s)\" + "\P. \\s. P (new_ctes s)\ g \\rv s. P (ctes_of s)\" + "\P. \\s. P (new_ms s)\ f \\rv s. P (machine_state s)\" + "\P. \\s. P (new_ms' s)\ g \\rv s. P (ksMachineState s)\" + "\P. \\s. P (new_wuc s)\ f \\rv s. P (work_units_completed s)\" + "\P. \\s. P (new_wuc' s)\ g \\rv s. P (ksWorkUnitsCompleted s)\" + "\P. \\s. P (new_ct s)\ f \\rv s. P (cur_thread s)\" + "\P. \\s. P (new_ct' s)\ g \\rv s. P (ksCurThread s)\" + "\P. \\s. P (new_as s)\ f \\rv s. P (arch_state s)\" + "\P. \\s. P (new_as' s)\ g \\rv s. P (ksArchState s)\" + "\P. \\s. P (new_id s)\ f \\rv s. P (idle_thread s)\" + "\P. \\s. P (new_id' s)\ g \\rv s. P (ksIdleThread s)\" + "\P. \\s. P (new_irqn s)\ f \\rv s. P (interrupt_irq_node s)\" + "\P. \\s. P (new_irqs s)\ f \\rv s. P (interrupt_states s)\" + "\P. \\s. P (new_irqs' s)\ g \\rv s. P (ksInterruptState s)\" + "\P. \\s. P (new_ups s)\ f \\rv s. P (ups_of_heap (kheap s))\" + "\P. \\s. P (new_ups' s)\ g \\rv s. P (gsUserPages s)\" + "\P. \\s. P (new_cns s)\ f \\rv s. P (cns_of_heap (kheap s))\" + "\P. \\s. P (new_cns' s)\ g \\rv s. P (gsCNodes s)\" + "\P. \\s. P (new_pt_types s)\ f \\rv s. P (pt_types_of s)\" + "\P. \\s. P (new_ready_queues s)\ f \\rv s. P (ready_queues s)\" + "\P. \\s. P (new_action s)\ f \\rv s. P (scheduler_action s)\" + "\P. \\s. P (new_sa' s)\ g \\rv s. P (ksSchedulerAction s)\" + "\P. \\s. P (new_ksReadyQueues s) (new_tcbSchedNexts_of s) (new_tcbSchedPrevs_of s) + (\d p. new_inQs d p s)\ + g \\rv s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)\" + "\P. \\s. P (new_di s)\ f \\rv s. P (domain_index s)\" + "\P. \\s. P (new_dl s)\ f \\rv s. P (domain_list s)\" + "\P. \\s. P (new_cd s)\ f \\rv s. P (cur_domain s)\" + "\P. \\s. P (new_dt s)\ f \\rv s. P (domain_time s)\" + "\P. \\s. P (new_dsi' s)\ g \\rv s. P (ksDomScheduleIdx s)\" + "\P. \\s. P (new_ds' s)\ g \\rv s. P (ksDomSchedule s)\" + "\P. \\s. P (new_cd' s)\ g \\rv s. P (ksCurDomain s)\" + "\P. \\s. P (new_dt' s)\ g \\rv s. P (ksDomainTime s)\" + assumes z: "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ cdt_relation ((\) None \ new_caps s) (new_mdb s) (new_ctes s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ cdt_list_relation (new_list s) (new_mdb s) (new_ctes s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ sched_act_relation (new_action s) (new_sa' s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ ready_queues_relation_2 (new_ready_queues s) (new_ksReadyQueues s') + (new_tcbSchedNexts_of s') (new_tcbSchedPrevs_of s') + (\d p. new_inQs d p s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ revokable_relation (new_rvk s) (null_filter (new_caps s)) (new_ctes s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ (new_as s, new_as' s') \ arch_state_relation + \ interrupt_state_relation (new_irqn s) (new_irqs s) (new_irqs' s') + \ new_ct s = new_ct' s' \ new_id s = new_id' s' + \ new_ms s = new_ms' s' \ new_di s = new_dsi' s' + \ new_dl s = new_ds' s' \ new_cd s = new_cd' s' \ new_dt s = new_dt' s' \ new_wuc s = new_wuc' s'" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ new_ups s = new_ups' s'" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ new_cns s = new_cns' s'" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ new_pt_types s = gsPTTypes (new_as' s')" + shows "corres r P P' f g" +proof - + have all_ext: "\f f'. (\p. f p = f' p) = (f = f')" + by fastforce + have mdb_wp': + "\ctes. \\s. cdt_relation ((\) None \ new_caps s) (new_mdb s) ctes\ + f + \\rv s. \m ca. (\p. ca p = ((\) None \ caps_of_state s) p) \ m = cdt s + \ cdt_relation ca m ctes\" + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift u) + apply (subst all_ext) + apply (simp add: o_def) + done + note mdb_wp = mdb_wp' [simplified all_ext simp_thms] + have list_wp': + "\ctes. \\s. cdt_list_relation (new_list s) (new_mdb s) ctes\ + f + \\rv s. \m t. t = cdt_list s \ m = cdt s + \ cdt_list_relation t m ctes\" + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift u) + apply (simp add: o_def) + done + note list_wp = list_wp' [simplified all_ext simp_thms] + have rvk_wp': + "\ctes. \\s. revokable_relation (new_rvk s) (null_filter (new_caps s)) ctes\ + f + \\rv s. revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) ctes\" + unfolding revokable_relation_def + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift hoare_vcg_disj_lift u) + done + have exs_wp': + "\ctes. \\s. revokable_relation (new_rvk s) (null_filter (new_caps s)) ctes\ + f + \\rv s. revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) ctes\" + unfolding revokable_relation_def + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift hoare_vcg_disj_lift u) + done + note rvk_wp = rvk_wp' [simplified all_ext simp_thms] + have swp_cte_at: + "\s. swp cte_at s = ((\) None \ caps_of_state s)" + by (rule ext, simp, subst neq_commute, simp add: cte_wp_at_caps_of_state) + have abs_irq_together': + "\P. \\s. P (new_irqn s) (new_irqs s)\ f + \\rv s. \irn. interrupt_irq_node s = irn \ P irn (interrupt_states s)\" + by (wp hoare_vcg_ex_lift u, simp) + note abs_irq_together = abs_irq_together'[simplified] + show ?thesis + unfolding state_relation_def swp_cte_at + apply (subst conj_assoc[symmetric]) + apply (subst pspace_relations_def[symmetric]) + apply (rule corres_underlying_decomposition [OF x]) + apply (simp add: ghost_relation_of_heap) + apply (wpsimp wp: hoare_vcg_conj_lift mdb_wp rvk_wp list_wp u abs_irq_together simp: pt_types_of_heap_eq)+ + apply (intro z[simplified o_def] conjI | simp add: state_relation_def pspace_relations_def swp_cte_at + | (clarsimp, drule (1) z(6), simp add: state_relation_def pspace_relations_def swp_cte_at))+ + done +qed + +lemma getCTE_symb_exec_r: + "corres_underlying sr False nf' dc \ (cte_at' p) (return ()) (getCTE p)" + apply (rule corres_no_failI, wp) + apply (clarsimp simp: return_def + elim!: use_valid [OF _ getCTE_inv]) + done + +lemma updateMDB_symb_exec_r: + "corres_underlying {(s, s'). pspace_relations (ekheap s) (kheap s) (ksPSpace s')} False nf' dc + \ (pspace_aligned' and pspace_distinct' and (no_0 \ ctes_of) and (\s. p \ 0 \ cte_at' p s)) + (return ()) (updateMDB p m)" + using no_fail_updateMDB [of p m] + apply (clarsimp simp: corres_underlying_def return_def no_fail_def) + apply (drule(1) updateMDB_the_lot, simp, assumption+) + apply clarsimp + done + +lemma updateMDB_ctes_of_cases: + "\\s. P (modify_map (ctes_of s) p (if p = 0 then id else cteMDBNode_update f))\ + updateMDB p f \\rv s. P (ctes_of s)\" + apply (simp add: updateMDB_def split del: if_split) + apply (rule hoare_pre, wp getCTE_ctes_of) + apply (clarsimp simp: modify_map_def map_option_case + split: option.split + | rule conjI ext | erule rsubst[where P=P])+ + apply (case_tac y, simp) + done + +lemma setCTE_state_bits[wp]: + "\\s. P (ksMachineState s)\ setCTE p v \\rv s. P (ksMachineState s)\" + "\\s. Q (ksIdleThread s)\ setCTE p v \\rv s. Q (ksIdleThread s)\" + "\\s. R (ksArchState s)\ setCTE p v \\rv s. R (ksArchState s)\" + "\\s. S (ksInterruptState s)\ setCTE p v \\rv s. S (ksInterruptState s)\" + apply (simp_all add: setCTE_def setObject_def split_def) + apply (wp updateObject_cte_inv | simp)+ + done + +lemma cte_map_eq_subst: + "\ cte_at p s; cte_at p' s; valid_objs s; pspace_aligned s; pspace_distinct s \ + \ (cte_map p = cte_map p') = (p = p')" + by (fastforce elim!: cte_map_inj_eq) + +lemma revokable_relation_simp: + "\ (s, s') \ state_relation; null_filter (caps_of_state s) p = Some c; ctes_of s' (cte_map p) = Some (CTE cap node) \ + \ mdbRevocable node = is_original_cap s p" + by (cases p, clarsimp simp: state_relation_def revokable_relation_def) + +crunches setCTE + for gsUserPages[wp]: "\s. P (gsUserPages s)" + and gsCNodes[wp]: "\s. P (gsCNodes s)" + and domain_time[wp]: "\s. P (ksDomainTime s)" + and work_units_completed[wp]: "\s. P (ksWorkUnitsCompleted s)" + (simp: setObject_def wp: updateObject_cte_inv) + +lemma set_original_symb_exec_l': + "corres_underlying {(s, s'). f (ekheap s) (kheap s) s'} False nf' dc P P' (set_original p b) (return x)" + by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) + +lemma create_reply_master_corres: + "\ sl' = cte_map sl ; AllowGrant \ rights \ \ + corres dc + (cte_wp_at ((=) cap.NullCap) sl and valid_pspace and valid_mdb and valid_list) + (cte_wp_at' (\c. cteCap c = NullCap \ mdbPrev (cteMDBNode c) = 0) sl' + and valid_mdb' and valid_pspace') + (do + y \ set_original sl True; + set_cap (cap.ReplyCap thread True rights) sl + od) + (setCTE sl' (CTE (capability.ReplyCap thread True True) initMDBNode))" + apply clarsimp + apply (rule corres_caps_decomposition) + defer + apply (wp|simp)+ + apply (clarsimp simp: o_def cdt_relation_def cte_wp_at_ctes_of + split del: if_split cong: if_cong simp del: id_apply) + apply (case_tac cte, clarsimp) + apply (fold fun_upd_def) + apply (subst descendants_of_Null_update') + apply fastforce + apply fastforce + apply assumption + apply assumption + apply (simp add: nullPointer_def) + apply (subgoal_tac "cte_at (a, b) s") + prefer 2 + apply (drule not_sym, clarsimp simp: cte_wp_at_caps_of_state + split: if_split_asm) + apply (simp add: state_relation_def cdt_relation_def) + apply (clarsimp simp: o_def cdt_list_relation_def cte_wp_at_ctes_of + split del: if_split cong: if_cong simp del: id_apply) + apply (case_tac cte, clarsimp) + apply (clarsimp simp: state_relation_def cdt_list_relation_def) + apply (simp split: if_split_asm) + apply (erule_tac x=a in allE, erule_tac x=b in allE) + apply clarsimp + apply(case_tac "next_slot (a, b) (cdt_list s) (cdt s)") + apply(simp) + apply(simp) + apply(fastforce simp: valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply (clarsimp simp: state_relation_def) + apply (clarsimp simp: state_relation_def) + apply (clarsimp simp add: revokable_relation_def cte_wp_at_ctes_of + split del: if_split) + apply simp + apply (rule conjI) + apply (clarsimp simp: initMDBNode_def) + apply clarsimp + apply (subgoal_tac "null_filter (caps_of_state s) (a, b) \ None") + prefer 2 + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state + split: if_split_asm) + apply (subgoal_tac "cte_at (a,b) s") + prefer 2 + apply clarsimp + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_cte_at) + apply (clarsimp split: if_split_asm cong: conj_cong + simp: cte_map_eq_subst revokable_relation_simp + cte_wp_at_cte_at valid_pspace_def) + apply (clarsimp simp: state_relation_def) + apply (clarsimp elim!: state_relationE simp: ghost_relation_of_heap pt_types_of_heap_eq o_def)+ + apply (rule corres_guard_imp) + apply (rule corres_underlying_symb_exec_l [OF set_original_symb_exec_l']) + apply (rule setCTE_corres) + apply simp + apply wp + apply (clarsimp simp: cte_wp_at_cte_at valid_pspace_def) + apply (clarsimp simp: valid_pspace'_def cte_wp_at'_def) + done + +lemma cte_map_nat_to_cref: + "\ n < 2 ^ b; b < word_bits \ \ + cte_map (p, nat_to_cref b n) = p + (of_nat n * 2^cte_level_bits)" + apply (clarsimp simp: cte_map_def nat_to_cref_def shiftl_t2n + dest!: less_is_drop_replicate) + apply (subst mult_ac) + apply (rule arg_cong [where f="\x. x * 2^cte_level_bits"]) + apply (subst of_drop_to_bl) + apply (simp add: word_bits_def) + apply (subst mask_eq_iff_w2p) + apply (simp add: word_size) + apply (simp add: word_less_nat_alt word_size word_bits_def) + apply (rule order_le_less_trans; assumption?) + apply (subst unat_of_nat) + apply (rule mod_less_eq_dividend) + done + +lemma valid_nullcapsE: + "\ valid_nullcaps m; m p = Some (CTE NullCap n); + \ mdbPrev n = 0; mdbNext n = 0 \ \ P \ + \ P" + by (fastforce simp: valid_nullcaps_def nullMDBNode_def nullPointer_def) + +lemma valid_nullcaps_prev: + "\ m (mdbPrev n) = Some (CTE NullCap n'); m p = Some (CTE c n); + no_0 m; valid_dlist m; valid_nullcaps m \ \ False" + apply (erule (1) valid_nullcapsE) + apply (erule_tac p=p in valid_dlistEp, assumption) + apply clarsimp + apply clarsimp + done + +lemma valid_nullcaps_next: + "\ m (mdbNext n) = Some (CTE NullCap n'); m p = Some (CTE c n); + no_0 m; valid_dlist m; valid_nullcaps m \ \ False" + apply (erule (1) valid_nullcapsE) + apply (erule_tac p=p in valid_dlistEn, assumption) + apply clarsimp + apply clarsimp + done + +defs noReplyCapsFor_def: + "noReplyCapsFor \ \t s. \sl m r. \ cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) sl s" + +lemma pspace_relation_no_reply_caps: + assumes pspace: "pspace_relation (kheap s) (ksPSpace s')" + and invs: "invs s" + and tcb: "tcb_at t s" + and m_cte': "cte_wp_at' ((=) cte) sl' s'" + and m_null: "cteCap cte = capability.NullCap" + and m_sl: "sl' = cte_map (t, tcb_cnode_index 2)" + shows "noReplyCapsFor t s'" +proof - + from tcb have m_cte: "cte_at (t, tcb_cnode_index 2) s" + by (clarsimp elim!: tcb_at_cte_at) + have m_cte_null: + "cte_wp_at (\c. c = cap.NullCap) (t, tcb_cnode_index 2) s" + using pspace invs + apply (frule_tac pspace_relation_cte_wp_atI') + apply (rule assms) + apply clarsimp + apply (clarsimp simp: m_sl) + apply (frule cte_map_inj_eq) + apply (rule m_cte) + apply (erule cte_wp_cte_at) + apply clarsimp+ + apply (clarsimp elim!: cte_wp_at_weakenE simp: m_null) + done + have no_reply_caps: + "\sl m r. \ cte_wp_at (\c. c = cap.ReplyCap t m r) sl s" + by (rule no_reply_caps_for_thread [OF invs tcb m_cte_null]) + hence noReplyCaps: + "\sl m r. \ cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) sl s'" + apply (intro allI) + apply (clarsimp simp: cte_wp_at_neg2 cte_wp_at_ctes_of simp del: split_paired_All) + apply (frule pspace_relation_cte_wp_atI [OF pspace _ invs_valid_objs [OF invs]]) + apply (clarsimp simp: cte_wp_at_neg2 simp del: split_paired_All) + apply (drule_tac x="(a, b)" in spec) + apply (clarsimp simp: cte_wp_cte_at cte_wp_at_caps_of_state) + apply (case_tac c, simp_all) + apply fastforce + done + thus ?thesis + by (simp add: noReplyCapsFor_def) +qed + +lemma setupReplyMaster_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + (setup_reply_master t) (setupReplyMaster t)" + apply (simp add: setupReplyMaster_def setup_reply_master_def) + apply (simp add: locateSlot_conv tcbReplySlot_def objBits_def objBitsKO_def) + apply (simp add: nullMDBNode_def, fold initMDBNode_def) + apply (rule_tac F="t + 2*2^cte_level_bits = cte_map (t, tcb_cnode_index 2)" in corres_req) + apply (clarsimp simp: tcb_cnode_index_def2 cte_map_nat_to_cref word_bits_def cte_level_bits_def) + apply (clarsimp simp: cte_level_bits_def) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_when) + apply fastforce + apply (rule_tac P'="einvs and tcb_at t" in corres_stateAssert_implied) + apply (rule create_reply_master_corres; simp) + apply (subgoal_tac "\cte. cte_wp_at' ((=) cte) (cte_map (t, tcb_cnode_index 2)) s' + \ cteCap cte = capability.NullCap") + apply (fastforce dest: pspace_relation_no_reply_caps + state_relation_pspace_relation) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def cte_wp_at_ctes_of) + apply (rule_tac Q="\rv. einvs and tcb_at t and + cte_wp_at ((=) rv) (t, tcb_cnode_index 2)" + in hoare_strengthen_post) + apply (wp hoare_drop_imps get_cap_wp) + apply (clarsimp simp: invs_def valid_state_def elim!: cte_wp_at_weakenE) + apply (rule_tac Q="\rv. valid_pspace' and valid_mdb' and + cte_wp_at' ((=) rv) (cte_map (t, tcb_cnode_index 2))" + in hoare_strengthen_post) + apply (wp hoare_drop_imps getCTE_wp') + apply (rename_tac rv s) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (case_tac rv, fastforce elim: valid_nullcapsE) + apply (fastforce elim: tcb_at_cte_at) + apply (clarsimp simp: cte_at'_obj_at' tcb_cte_cases_def cte_map_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + done + +crunch tcb'[wp]: setupReplyMaster "tcb_at' t" + (wp: crunch_wps) + +crunch idle'[wp]: setupReplyMaster "valid_idle'" + +(* Levity: added (20090126 19:32:14) *) +declare stateAssert_wp [wp] + +lemma setupReplyMaster_valid_mdb: + "slot = t + 2 ^ objBits (undefined :: cte) * tcbReplySlot \ + \valid_mdb' and valid_pspace' and tcb_at' t\ + setupReplyMaster t + \\rv. valid_mdb'\" + apply (clarsimp simp: setupReplyMaster_def locateSlot_conv + nullMDBNode_def) + apply (fold initMDBNode_def) + apply (wp setCTE_valid_mdb getCTE_wp') + apply clarsimp + apply (intro conjI) + apply (case_tac cte) + apply (fastforce simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def + no_mdb_def + elim: valid_nullcapsE) + apply (frule obj_at_aligned') + apply (simp add: valid_cap'_def capAligned_def + objBits_simps' word_bits_def)+ + apply (clarsimp simp: valid_pspace'_def) + apply (clarsimp simp: caps_no_overlap'_def capRange_def) + apply (clarsimp simp: fresh_virt_cap_class_def + elim!: ranE) + apply (clarsimp simp add: noReplyCapsFor_def cte_wp_at_ctes_of) + apply (case_tac x) + apply (rename_tac capability mdbnode) + apply (case_tac capability; simp) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; simp) + apply fastforce + done + +lemma setupReplyMaster_valid_objs [wp]: + "\ valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' t\ + setupReplyMaster t + \\_. valid_objs'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp setCTE_valid_objs getCTE_wp') + apply (clarsimp) + apply (frule obj_at_aligned') + apply (simp add: valid_cap'_def capAligned_def + objBits_simps' word_bits_def)+ + done + +lemma setupReplyMaster_wps[wp]: + "\pspace_aligned'\ setupReplyMaster t \\rv. pspace_aligned'\" + "\pspace_distinct'\ setupReplyMaster t \\rv. pspace_distinct'\" + "slot = cte_map (t, tcb_cnode_index 2) \ + \\s. P ((cteCaps_of s)(slot \ (capability.ReplyCap t True True))) \ P (cteCaps_of s)\ + setupReplyMaster t + \\rv s. P (cteCaps_of s)\" + apply (simp_all add: setupReplyMaster_def locateSlot_conv) + apply (wp getCTE_wp | simp add: o_def cte_wp_at_ctes_of)+ + apply clarsimp + apply (rule_tac x=cte in exI) + apply (clarsimp simp: tcbReplySlot_def objBits_simps' fun_upd_def word_bits_def + tcb_cnode_index_def2 cte_map_nat_to_cref cte_level_bits_def) + done + +crunch no_0_obj'[wp]: setupReplyMaster no_0_obj' + (wp: crunch_wps simp: crunch_simps) + +lemma setupReplyMaster_valid_pspace': + "\valid_pspace' and tcb_at' t\ + setupReplyMaster t + \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp setupReplyMaster_valid_mdb) + apply (simp_all add: valid_pspace'_def) + done + +lemma setupReplyMaster_ifunsafe'[wp]: + "slot = t + 2 ^ objBits (undefined :: cte) * tcbReplySlot \ + \if_unsafe_then_cap' and ex_cte_cap_to' slot\ + setupReplyMaster t + \\rv s. if_unsafe_then_cap' s\" + apply (simp add: ifunsafe'_def3 setupReplyMaster_def locateSlot_conv) + apply (wp getCTE_wp') + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of cteCaps_of_def + cte_level_bits_def objBits_simps') + apply (drule_tac x=crefa in spec) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=cref in exI, fastforce) + apply clarsimp + apply (rule_tac x=cref' in exI, fastforce) + done + + +lemma setupReplyMaster_iflive'[wp]: + "\if_live_then_nonz_cap'\ setupReplyMaster t \\rv. if_live_then_nonz_cap'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp setCTE_iflive' getCTE_wp') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +declare azobj_refs'_only_vcpu[simp] + +lemma setupReplyMaster_global_refs[wp]: + "\\s. valid_global_refs' s \ thread \ global_refs' s \ tcb_at' thread s + \ ex_nonz_cap_to' thread s \ valid_objs' s\ + setupReplyMaster thread + \\rv. valid_global_refs'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp getCTE_wp') + apply (clarsimp simp: capRange_def cte_wp_at_ctes_of objBits_simps) + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rename_tac "prev_cte") + apply (case_tac prev_cte, simp) + apply (frule(1) ctes_of_valid_cap') + apply (drule(1) valid_global_refsD_with_objSize)+ + apply (clarsimp simp: valid_cap'_def objBits_simps' obj_at'_def projectKOs + split: capability.split_asm) + done + +crunch valid_arch'[wp]: setupReplyMaster "valid_arch_state'" + (wp: crunch_wps simp: crunch_simps) + +lemma ex_nonz_tcb_cte_caps': + "\ex_nonz_cap_to' t s; tcb_at' t s; valid_objs' s; sl \ dom tcb_cte_cases\ \ + ex_cte_cap_to' (t + sl) s" + apply (clarsimp simp: ex_nonz_cap_to'_def ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (subgoal_tac "s \' cteCap cte") + apply (rule_tac x=cref in exI, rule_tac x=cte in exI) + apply (clarsimp simp: valid_cap'_def obj_at'_def dom_def typ_at_to_obj_at_arches + split: cte.split_asm capability.split_asm) + apply (case_tac cte) + apply (clarsimp simp: ctes_of_valid_cap') + done + +lemma ex_nonz_cap_not_global': + "\ex_nonz_cap_to' t s; valid_objs' s; valid_global_refs' s\ \ + t \ global_refs' s" + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (frule(1) valid_global_refsD') + apply clarsimp + apply (drule orthD1, erule (1) subsetD) + apply (subgoal_tac "s \' cteCap cte") + apply (fastforce simp: valid_cap'_def capRange_def capAligned_def + is_aligned_no_overflow + split: cte.split_asm capability.split_asm) + apply (case_tac cte) + apply (clarsimp simp: ctes_of_valid_cap') + done + +crunch typ_at'[wp]: setupReplyMaster "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: crunch_simps) + +lemma setCTE_irq_handlers': + "\\s. valid_irq_handlers' s \ (\irq. cteCap cte = IRQHandlerCap irq \ irq_issued' irq s)\ + setCTE ptr cte + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def cteCaps_of_def irq_issued'_def) + apply (wp hoare_use_eq [where f=ksInterruptState, OF setCTE_ksInterruptState setCTE_ctes_of_wp]) + apply (auto simp: ran_def) + done + +lemma setupReplyMaster_irq_handlers'[wp]: + "\valid_irq_handlers'\ setupReplyMaster t \\rv. valid_irq_handlers'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp setCTE_irq_handlers' getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +crunches setupReplyMaster + for irq_states'[wp]: valid_irq_states' + and irqs_masked' [wp]: irqs_masked' + and pred_tcb_at' [wp]: "pred_tcb_at' proj P t" + and ksMachine[wp]: "\s. P (ksMachineState s)" + and pspace_domain_valid[wp]: "pspace_domain_valid" + and ct_not_inQ[wp]: "ct_not_inQ" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksIdlethread[wp]: "\s. P (ksIdleThread s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and scheduler_action[wp]: "\s. P (ksSchedulerAction s)" + and obj_at'_inQ[wp]: "obj_at' (inQ d p) t" + and tcbDomain_inv[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t" + and tcbPriority_inv[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t" + and ready_queues[wp]: "\s. P (ksReadyQueues s)" + and ready_queuesL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ready_queuesL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) + +lemma setupReplyMaster_vms'[wp]: + "\valid_machine_state'\ setupReplyMaster t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def ) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + apply wp+ + done + +lemma setupReplyMaster_urz[wp]: + "\untyped_ranges_zero' and valid_mdb' and valid_objs'\ + setupReplyMaster t + \\rv. untyped_ranges_zero'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp untyped_ranges_zero_lift getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (subst untyped_ranges_zero_fun_upd, assumption, simp_all) + apply (clarsimp simp: cteCaps_of_def untypedZeroRange_def Let_def isCap_simps) + done + +lemma setupReplyMaster_invs'[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + setupReplyMaster t + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp setupReplyMaster_valid_pspace' sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift + valid_queues_lift cur_tcb_lift hoare_vcg_disj_lift sym_heap_sched_pointers_lift + valid_bitmaps_lift + valid_irq_node_lift | simp)+ + apply (clarsimp simp: ex_nonz_tcb_cte_caps' valid_pspace'_def + objBits_simps' tcbReplySlot_def + ex_nonz_cap_not_global' dom_def) + done + +lemma setupReplyMaster_cte_wp_at'': + "\cte_wp_at' (\cte. P (cteCap cte)) p and K (\ P NullCap)\ + setupReplyMaster t + \\rv s. cte_wp_at' (P \ cteCap) p s\" + apply (simp add: setupReplyMaster_def locateSlot_conv tree_cte_cteCap_eq) + apply (wp getCTE_wp') + apply (fastforce simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemmas setupReplyMaster_cte_wp_at' = setupReplyMaster_cte_wp_at''[unfolded o_def] + +lemma setupReplyMaster_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setupReplyMaster t \\rv. ex_nonz_cap_to' p\" + apply (simp add: ex_nonz_cap_to'_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_ex_lift setupReplyMaster_cte_wp_at') + apply clarsimp + done + +definition + is_arch_update' :: "capability \ cte \ bool" +where + "is_arch_update' cap cte \ isArchObjectCap cap \ capMasterCap cap = capMasterCap (cteCap cte)" + +lemma mdb_next_pres: + "\ m p = Some v; + mdbNext (cteMDBNode x) = mdbNext (cteMDBNode v) \ \ + m(p \ x) \ a \ b = m \ a \ b" + by (simp add: mdb_next_unfold) + +lemma mdb_next_trans_next_pres: + "\ m p = Some v; mdbNext (cteMDBNode x) = mdbNext (cteMDBNode v) \ \ + m(p \ x) \ a \\<^sup>+ b = m \ a \\<^sup>+ b" + apply (rule iffI) + apply (erule trancl_induct) + apply (fastforce simp: mdb_next_pres) + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (fastforce simp: mdb_next_pres) + apply (erule trancl_induct) + apply (rule r_into_trancl) + apply (simp add: mdb_next_pres del: fun_upd_apply) + apply (erule trancl_trans) + apply (fastforce simp: mdb_next_pres simp del: fun_upd_apply) + done + +lemma mdb_next_rtrans_next_pres: + "\ m p = Some v; mdbNext (cteMDBNode x) = mdbNext (cteMDBNode v) \ \ + m(p \ x) \ a \\<^sup>* b = m \ a \\<^sup>* b" + by (safe; clarsimp simp: mdb_next_trans_next_pres + dest!: rtrancl_eq_or_trancl[THEN iffD1] + intro!: rtrancl_eq_or_trancl[THEN iffD2] mdb_next_trans_next_pres[THEN iffD1]) + + +lemma arch_update_descendants': + "\ is_arch_update' cap oldcte; m p = Some oldcte\ \ + descendants_of' x (m(p \ cteCap_update (\_. cap) oldcte)) = descendants_of' x m" + apply (erule same_master_descendants) + apply (auto simp: is_arch_update'_def isCap_simps) + done + +lemma arch_update_setCTE_mdb: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and valid_mdb'\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv. valid_mdb'\" + apply (simp add: valid_mdb'_def) + apply wp + apply (clarsimp simp: valid_mdb_ctes_def cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (rule conjI) + apply (rule valid_dlistI) + apply (fastforce split: if_split_asm elim: valid_dlistE) + apply (fastforce split: if_split_asm elim: valid_dlistE) + apply (rule conjI) + apply (clarsimp simp: no_0_def) + apply (rule conjI) + apply (simp add: mdb_chain_0_def mdb_next_trans_next_pres) + apply blast + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: valid_badges_def mdb_next_pres simp del: fun_upd_apply) + apply (clarsimp simp: is_arch_update'_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: isCap_simps) + prefer 2 + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p in allE) + apply simp + apply (simp add: sameRegionAs_def3) + apply (rule conjI) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply (rule conjI) + apply (clarsimp simp: caps_contained'_def simp del: fun_upd_apply) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def) + apply (frule capMaster_untypedRange) + apply (frule capMaster_capRange) + apply (drule sym [where s="capMasterCap cap"]) + apply (frule masterCap.intro) + apply (clarsimp simp: masterCap.isUntypedCap split: if_split_asm) + subgoal by fastforce + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p in allE) + apply fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p' in allE) + subgoal by fastforce + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def) + apply (clarsimp simp: mdb_chunked_def mdb_next_trans_next_pres simp del: fun_upd_apply) + apply (drule sym [where s="capMasterCap cap"]) + apply (frule masterCap.intro) + apply (clarsimp split: if_split_asm) + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (clarsimp simp: masterCap.sameRegionAs) + apply (simp add: masterCap.sameRegionAs is_chunk_def mdb_next_trans_next_pres + mdb_next_rtrans_next_pres) + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p in allE) + apply (clarsimp simp: masterCap.sameRegionAs) + apply (simp add: masterCap.sameRegionAs is_chunk_def mdb_next_trans_next_pres + mdb_next_rtrans_next_pres) + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (simp add: masterCap.sameRegionAs is_chunk_def mdb_next_trans_next_pres + mdb_next_rtrans_next_pres) + subgoal by fastforce + apply (rule conjI) + apply (clarsimp simp: is_arch_update'_def untyped_mdb'_def arch_update_descendants' + simp del: fun_upd_apply) + apply (cases oldcte) + apply clarsimp + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: isCap_simps) + apply (frule capMaster_isUntyped) + apply (drule capMaster_capRange) + apply simp + apply (rule conjI) + apply (clarsimp simp: untyped_inc'_def arch_update_descendants' + simp del: fun_upd_apply) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def) + apply (drule capMaster_untypedRange) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply (erule_tac x=pa in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: valid_nullcaps_def is_arch_update'_def isCap_simps) + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: ut_revocable'_def is_arch_update'_def isCap_simps) + apply (rule conjI) + apply (clarsimp simp: class_links_def simp del: fun_upd_apply) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def mdb_next_pres) + apply (drule capMaster_capClass) + apply (clarsimp split: if_split_asm) + apply fastforce + apply (rule conjI) + apply (erule(1) distinct_zombies_sameMasterE) + apply (clarsimp simp: is_arch_update'_def) + apply (clarsimp simp: irq_control_def) + apply (cases oldcte) + apply (subgoal_tac "cap \ IRQControlCap") + prefer 2 + apply (clarsimp simp: is_arch_update'_def isCap_simps) + apply (rule conjI) + apply clarsimp + apply (simp add: reply_masters_rvk_fb_def) + apply (erule ball_ran_fun_updI) + apply (clarsimp simp add: is_arch_update'_def isCap_simps) + done + +lemma capMaster_zobj_refs: + "capMasterCap c = capMasterCap c' \ zobj_refs' c = zobj_refs' c'" + by (simp add: capMasterCap_def split: capability.splits arch_capability.splits) + +lemma cte_refs_Master: + "cte_refs' (capMasterCap cap) = cte_refs' cap" + by (rule ext, simp add: capMasterCap_def split: capability.split) + +lemma zobj_refs_Master: + "zobj_refs' (capMasterCap cap) = zobj_refs' cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split) + +lemma capMaster_same_refs: + "capMasterCap a = capMasterCap b \ cte_refs' a = cte_refs' b \ zobj_refs' a = zobj_refs' b" + apply (rule conjI) + apply (rule master_eqI, rule cte_refs_Master, simp) + apply (rule master_eqI, rule zobj_refs_Master, simp) + done + +lemma arch_update_setCTE_iflive: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and if_live_then_nonz_cap'\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv. if_live_then_nonz_cap'\" + apply (wp setCTE_iflive') + apply (clarsimp simp: cte_wp_at_ctes_of is_arch_update'_def dest!: capMaster_zobj_refs) + done + +lemma arch_update_setCTE_ifunsafe: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and if_unsafe_then_cap'\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv s. if_unsafe_then_cap' s\" + apply (clarsimp simp: ifunsafe'_def2 cte_wp_at_ctes_of pred_conj_def) + apply (rule hoare_lift_Pf2 [where f=irq_node']) + prefer 2 + apply wp + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of is_arch_update'_def) + apply (frule capMaster_same_refs) + apply clarsimp + apply (rule conjI, clarsimp) + apply (erule_tac x=p in allE) + apply clarsimp + apply (erule impE) + apply clarsimp + apply clarsimp + apply (rule_tac x=cref' in exI) + apply clarsimp + apply clarsimp + apply (erule_tac x=cref in allE) + apply clarsimp + apply (rule_tac x=cref' in exI) + apply clarsimp + done + +lemma setCTE_cur_tcb[wp]: + "\cur_tcb'\ setCTE ptr val \\rv. cur_tcb'\" + by (wp cur_tcb_lift) + +lemma setCTE_vms'[wp]: + "\valid_machine_state'\ setCTE ptr val \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def ) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + apply wp+ + done + +lemma arch_update_setCTE_invs: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and invs' and valid_cap' cap\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wp arch_update_setCTE_mdb valid_queues_lift sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift + arch_update_setCTE_iflive arch_update_setCTE_ifunsafe + valid_irq_node_lift setCTE_typ_at' setCTE_irq_handlers' + setCTE_pred_tcb_at' irqs_masked_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift valid_bitmaps_lift + | simp add: pred_tcb_at'_def)+ + apply (clarsimp simp: valid_global_refs'_def is_arch_update'_def fun_upd_def[symmetric] + cte_wp_at_ctes_of isCap_simps untyped_ranges_zero_fun_upd) + apply (frule capMaster_eq_capBits_eq) + apply (frule capMaster_isUntyped) + apply (frule capMaster_capRange) + apply (clarsimp simp: valid_refs'_def valid_cap_sizes'_def) + apply (subst untyped_ranges_zero_delta[where xs="[p]"], assumption, simp_all) + apply (clarsimp simp: ran_restrict_map_insert cteCaps_of_def + untypedZeroRange_def Let_def + isCap_simps(1-11)[where v="ArchObjectCap ac" for ac]) + apply (fastforce simp: ran_def) + done + +definition + "safe_parent_for' m p cap \ + \parent node. m p = Some (CTE parent node) \ + sameRegionAs parent cap \ + ((\irq. cap = IRQHandlerCap irq) \ + parent = IRQControlCap \ + (\p n'. m p \ Some (CTE cap n')) + \ + isUntypedCap parent \ descendants_of' p m = {} \ capRange cap \ {} + \ capBits cap \ capBits parent)" + +definition + "is_simple_cap' cap \ + cap \ NullCap \ + cap \ IRQControlCap \ + \ isUntypedCap cap \ + \ isReplyCap cap \ + \ isEndpointCap cap \ + \ isNotificationCap cap \ + \ isThreadCap cap \ + \ isCNodeCap cap \ + \ isZombie cap \ + \ isArchFrameCap cap" + +end + +(* FIXME: duplicated *) +locale mdb_insert_simple = mdb_insert + + assumes safe_parent: "safe_parent_for' m src c'" + assumes simple: "is_simple_cap' c'" +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma dest_no_parent_n: + "n \ dest \ p = False" + using src simple safe_parent + apply clarsimp + apply (erule subtree.induct) + prefer 2 + apply simp + apply (clarsimp simp: parentOf_def mdb_next_unfold n_dest new_dest_def n) + apply (cases "mdbNext src_node = dest") + apply (subgoal_tac "m \ src \ dest") + apply simp + apply (subst mdb_next_unfold) + apply (simp add: src) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (clarsimp simp: is_simple_cap'_def Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def + split: capability.splits arch_capability.splits) + apply (cases c', auto simp: isCap_simps)[1] + apply (clarsimp simp add: sameRegionAs_def2) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: safe_parent_for'_def isCap_simps) + apply (cases c', auto simp: isCap_simps)[1] + done + +lemma src_node_revokable [simp]: + "mdbRevocable src_node" + using safe_parent ut_rev src + apply (clarsimp simp add: safe_parent_for'_def) + apply (erule disjE) + apply clarsimp + apply (erule irq_revocable, rule irq_control) + apply (clarsimp simp: ut_revocable'_def) + done + +lemma new_child [simp]: + "isMDBParentOf new_src new_dest" + using safe_parent ut_rev src + apply (simp add: new_src_def new_dest_def isMDBParentOf_def) + apply (clarsimp simp: safe_parent_for'_def) + apply (auto simp: isCap_simps) + done + +lemma n_dest_child: + "n \ src \ dest" + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def src dest n) + done + +lemma parent_m_n: + assumes "m \ p \ p'" + shows "if p' = src then n \ p \ dest \ n \ p \ p' else n \ p \ p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply (cases "p = src") + apply simp + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule subtree.trans_parent [where c'=dest]) + apply (rule n_dest_child) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (clarsimp simp: new_src_def src) + apply simp + apply (subgoal_tac "n \ p \ c") + prefer 2 + apply (rule subtree.direct_parent) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: new_src_def src) + apply clarsimp + apply (erule subtree_trans) + apply (rule n_dest_child) + done +next + case (trans_parent c d) + thus ?case + apply - + apply (cases "c = dest", simp) + apply (cases "d = dest", simp) + apply (cases "c = src") + apply clarsimp + apply (erule subtree.trans_parent [where c'=dest]) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (rule conjI, clarsimp) + apply (clarsimp simp: new_src_def src) + apply clarsimp + apply (subgoal_tac "n \ p \ d") + apply clarsimp + apply (erule subtree_trans, rule n_dest_child) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: src new_src_def) + done +qed + +lemma n_to_dest [simp]: + "n \ p \ dest = (p = src)" + by (simp add: n_direct_eq) + +lemma parent_n_m: + assumes "n \ p \ p'" + shows "if p' = dest then p \ src \ m \ p \ src else m \ p \ p'" +proof - + from assms have [simp]: "p \ dest" by (clarsimp simp: dest_no_parent_n) + from assms + show ?thesis + proof induct + case (direct_parent c) + thus ?case + apply simp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply simp + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + next + case (trans_parent c d) + thus ?case + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp split: if_split_asm) + apply (simp add: n_direct_eq) + apply (cases "p=src") + apply simp + apply (rule subtree.direct_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + qed +qed + + +lemma descendants: + "descendants_of' p n = + (if src \ descendants_of' p m \ p = src + then descendants_of' p m \ {dest} else descendants_of' p m)" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (fastforce dest!: parent_n_m dest: parent_m_n simp: n_dest_child split: if_split_asm) + done + +end + +declare if_split [split del] + +lemma setUntypedCapAsFull_safe_parent_for': + "\\s. safe_parent_for' (ctes_of s) slot a \ cte_wp_at' ((=) srcCTE) slot s\ + setUntypedCapAsFull (cteCap srcCTE) c' slot + \\rv s. safe_parent_for' (ctes_of s) slot a\" + apply (clarsimp simp:safe_parent_for'_def setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (ctes_of s) + (modify_map (ctes_of s) slot + (cteCap_update (\_. capFreeIndex_update (\_. max_free_index (capBlockSize c')) (cteCap srcCTE))))") + apply (frule mdb_inv_preserve.descendants_of[where p = slot]) + apply (clarsimp simp:isCap_simps modify_map_def cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (clarsimp cong:sameRegionAs_update_untyped) + apply (rule mdb_inv_preserve_updateCap) + apply (simp add:cte_wp_at_ctes_of) + apply simp + apply wp + apply simp + done + +lemma maskedAsFull_revokable_safe_parent: + "\is_simple_cap' c'; safe_parent_for' m p c'; m p = Some cte; + cteCap cte = (maskedAsFull src_cap' a)\ + \ isCapRevocable c' (maskedAsFull src_cap' a) = isCapRevocable c' src_cap'" + apply (clarsimp simp:isCapRevocable_def AARCH64_H.isCapRevocable_def maskedAsFull_def + split:if_splits capability.splits) + apply (intro allI impI conjI) + apply (clarsimp simp:isCap_simps is_simple_cap'_def)+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteInsert_simple_corres: + assumes "cap_relation c c'" "src' = cte_map src" "dest' = cte_map dest" + notes trans_state_update'[symmetric,simp] + shows "corres dc + (valid_objs and pspace_distinct and pspace_aligned and + valid_mdb and valid_list and K (src\dest) and + cte_wp_at (\c. c=cap.NullCap) dest and + K (is_simple_cap c) and + (\s. cte_wp_at (safe_parent_for (cdt s) src c) src s)) + (pspace_distinct' and pspace_aligned' and valid_mdb' and valid_cap' c' and + K (is_simple_cap' c') and + cte_wp_at' (\c. cteCap c=NullCap) dest' and + (\s. safe_parent_for' (ctes_of s) src' c')) + (cap_insert c src dest) + (cteInsert c' src' dest')" + (is "corres _ (?P and (\s. cte_wp_at _ _ s)) (?P' and cte_wp_at' _ _ and _) _ _") + using assms + unfolding cap_insert_def cteInsert_def + supply subst_all [simp del] + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="cteCap rv' = NullCap" in corres_gen_asm2) + apply simp + apply (rule_tac P="?P and cte_at dest and + (\s. cte_wp_at (safe_parent_for (cdt s) src c) src s) and + cte_wp_at ((=) src_cap) src" and + Q="?P' and + cte_wp_at' ((=) rv') (cte_map dest) and + cte_wp_at' ((=) srcCTE) (cte_map src) and + (\s. safe_parent_for' (ctes_of s) src' c')" + in corres_assert_assume) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply (case_tac rv') + apply (simp add: initMDBNode_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: nullPointer_def) + apply (rule corres_guard_imp) + apply (rule_tac R="\r. ?P and cte_at dest and + (\s. cte_wp_at (safe_parent_for (cdt s) src c) src s) and + cte_wp_at ((=) (masked_as_full src_cap c)) src" and + R'="\r. ?P' and cte_wp_at' ((=) rv') (cte_map dest) + and cte_wp_at' ((=) (CTE (maskedAsFull (cteCap srcCTE) c') (cteMDBNode srcCTE))) (cte_map src) + and (\s. safe_parent_for' (ctes_of s) src' c')" + in corres_split[where r'=dc]) + apply (rule setUntypedCapAsFull_corres; simp) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (erule_tac valid_dlistEn[where p = "cte_map src"]) + apply (simp+)[3] + apply (clarsimp simp: corres_underlying_def state_relation_def + in_monad valid_mdb'_def valid_mdb_ctes_def) + apply (drule (1) pspace_relationsD) + apply (drule (18) set_cap_not_quite_corres) + apply (rule refl) + apply (elim conjE exE) + apply (rule bind_execI, assumption) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (clarsimp simp: cte_wp_at_caps_of_state valid_mdb_def2) + apply (rule mdb_insert_abs.intro) + apply clarsimp + apply (erule (1) mdb_cte_at_Null_None) + apply (erule (1) mdb_cte_at_Null_descendants) + apply (subgoal_tac "no_mloop (cdt a)") + prefer 2 + apply (simp add: valid_mdb_def) + apply (clarsimp simp: exec_gets update_cdt_def bind_assoc set_cdt_def + exec_get exec_put set_original_def modify_def + simp del: fun_upd_apply + + | (rule bind_execI[where f="cap_insert_ext x y z x' y'" for x y z x' y'], clarsimp simp: mdb_insert_abs.cap_insert_ext_det_def2 update_cdt_list_def set_cdt_list_def put_def simp del: fun_upd_apply) | rule refl)+ + + apply (clarsimp simp: put_def state_relation_def simp del: fun_upd_apply) + apply (drule updateCap_stuff) + apply clarsimp + apply (drule (3) updateMDB_the_lot', simp only: no_0_modify_map, simp only:, elim conjE) + apply (drule (3) updateMDB_the_lot', simp only: no_0_modify_map, simp only:, elim conjE) + apply (drule (3) updateMDB_the_lot', simp only: no_0_modify_map, simp only:, elim conjE) + apply (clarsimp simp: pspace_relations_def) + apply (rule conjI) + subgoal by (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (thin_tac "gsCNodes t = p" for t p)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksWorkUnitsCompleted t = p" for t p)+ + apply (thin_tac "ksIdleThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (thin_tac "cur_thread t = p" for t p)+ + apply (thin_tac "domain_index t = p" for t p)+ + apply (thin_tac "domain_time t = p" for t p)+ + apply (thin_tac "cur_domain t = p" for t p)+ + apply (thin_tac "scheduler_action t = p" for t p)+ + apply (thin_tac "ready_queues t = p" for t p)+ + apply (thin_tac "idle_thread t = p" for t p)+ + apply (thin_tac "machine_state t = p" for t p)+ + apply (thin_tac "work_units_completed t = p" for t p)+ + apply (thin_tac "ksArchState t = p" for t p)+ + apply (thin_tac "gsUserPages t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksInterruptState t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ctes_of t = p" for t p)+ + apply (thin_tac "ekheap_relation t p" for t p)+ + apply (thin_tac "pspace_relation t p" for t p)+ + apply (thin_tac "interrupt_state_relation s t p" for s t p)+ + apply (thin_tac "sched_act_relation t p" for t p)+ + apply (thin_tac "ready_queues_relation t p" for t p)+ + apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def prev_update_modify_mdb_relation) + apply (subgoal_tac "cte_map dest \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def + valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "cte_map src \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def + valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "should_be_parent_of src_cap (is_original_cap a src) c (is_cap_revocable c src_cap) = True") + prefer 2 + apply (subst should_be_parent_of_masked_as_full[symmetric]) + apply (subst safe_parent_is_parent) + apply ((simp add: cte_wp_at_caps_of_state)+)[4] + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + defer + apply (clarsimp simp: modify_map_apply) + apply (clarsimp simp: revokable_relation_def simp del: fun_upd_apply) + apply (simp split: if_split) + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "mdbRevocable node = isCapRevocable c' (cteCap srcCTE)") + prefer 2 + apply (case_tac rv') + apply (clarsimp simp add: const_def modify_map_def split: if_split_asm) + apply clarsimp + apply (rule is_cap_revocable_eq, assumption, assumption) + apply (subst same_region_as_relation [symmetric]) + prefer 3 + apply (rule safe_parent_same_region) + apply (simp add: cte_wp_at_caps_of_state) + apply assumption + apply assumption + apply (clarsimp simp: cte_wp_at_def is_simple_cap_def) + apply clarsimp + apply (case_tac srcCTE) + apply (case_tac rv') + apply clarsimp + apply (subgoal_tac "\cap' node'. ctes_of b (cte_map (aa,bb)) = Some (CTE cap' node')") + prefer 2 + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply clarsimp + apply (drule set_cap_caps_of_state_monad)+ + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp: cte_wp_at_caps_of_state null_filter_def split: if_splits) + apply clarsimp + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (subgoal_tac "mdbRevocable node = mdbRevocable node'") + apply clarsimp + apply (subgoal_tac "cte_map (aa,bb) \ cte_map dest") + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply (erule (5) cte_map_inj) + apply (wp set_untyped_cap_full_valid_objs set_untyped_cap_as_full_valid_mdb set_untyped_cap_as_full_valid_list + set_untyped_cap_as_full_cte_wp_at setUntypedCapAsFull_valid_cap + setUntypedCapAsFull_cte_wp_at setUntypedCapAsFull_safe_parent_for' | clarsimp | wps)+ + apply (clarsimp simp:cte_wp_at_caps_of_state ) + apply (case_tac rv',clarsimp simp:cte_wp_at_ctes_of maskedAsFull_def) + apply (wp getCTE_wp' get_cap_wp)+ + apply clarsimp + subgoal by (fastforce elim: cte_wp_at_weakenE) + subgoal by (clarsimp simp: cte_wp_at'_def) + apply (case_tac "srcCTE") + apply (rename_tac src_cap' src_node) + apply (case_tac "rv'") + apply (rename_tac dest_node) + apply (clarsimp simp: in_set_cap_cte_at_swp) + apply (subgoal_tac "cte_at src a \ safe_parent_for (cdt a) src c src_cap") + prefer 2 + subgoal by (fastforce simp: cte_wp_at_def) + apply (erule conjE) + apply (subgoal_tac "mdb_insert (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') src_node + (cte_map dest) NullCap dest_node") + prefer 2 + apply (rule mdb_insert.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro; assumption) + apply (rule mdb_insert_axioms.intro; assumption?) + apply (rule refl) + apply (erule (5) cte_map_inj) + apply (rule conjI) + apply (simp (no_asm_simp) add: cdt_relation_def split: if_split) + apply (intro impI allI) + apply (frule mdb_insert_simple_axioms.intro) + apply(clarsimp simp:cte_wp_at_ctes_of) + apply (drule (1) mdb_insert_simple.intro) + apply (drule_tac src_cap' = src_cap' in maskedAsFull_revokable_safe_parent[symmetric]) + apply simp+ + apply (subst mdb_insert_simple.descendants) + apply simp + apply (subst mdb_insert_abs.descendants_child, assumption) + apply (frule set_cap_caps_of_state_monad) + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + subgoal by (clarsimp simp: cte_wp_at_caps_of_state split: if_split_asm) + apply (simp add: descendants_of_eq' cdt_relation_def split: if_split del: split_paired_All) + apply clarsimp + apply (drule (5) cte_map_inj)+ + apply simp + (* exact reproduction of proof in cteInsert_corres, + as it does not used is_derived *) + apply(simp add: cdt_list_relation_def del: split_paired_All split_paired_Ex) + apply(subgoal_tac "no_mloop (cdt a) \ finite_depth (cdt a)") + prefer 2 + apply(simp add: finite_depth valid_mdb_def) + apply(intro impI allI) + apply(simp add: fun_upd_twist) + + apply(subst next_slot_eq[OF mdb_insert_abs.next_slot]) + apply(simp_all del: fun_upd_apply) + apply(simp split: option.splits del: fun_upd_apply add: fun_upd_twist) + apply(intro allI impI) + apply(subgoal_tac "src \ (aa, bb)") + prefer 2 + apply(rule notI) + apply(simp add: valid_mdb_def no_mloop_weaken) + apply(subst fun_upd_twist, simp, simp) + + apply(case_tac "ca=src") + apply(simp) + apply(clarsimp simp: modify_map_def) + subgoal by(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(case_tac "next_slot src (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + apply(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(simp_all)[6] + apply(erule_tac x="fst src" in allE) + apply(erule_tac x="snd src" in allE) + apply(fastforce) + apply(simp) + apply(case_tac "next_slot ca (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(subgoal_tac "cte_at ca a") + prefer 2 + subgoal by (rule cte_at_next_slot; simp) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + subgoal by (drule cte_map_inj_eq; simp) + apply(drule_tac p="cte_map src" in valid_mdbD1') + apply(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + apply(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x=aa in allE) + apply(erule_tac x=bb in allE) + apply(fastforce) + subgoal by (drule cte_map_inj_eq; simp) + subgoal by (drule cte_map_inj_eq; simp) + subgoal by (drule cte_map_inj_eq; simp) + by(fastforce) + +declare if_split [split] + +lemma sameRegion_capRange_sub: + "sameRegionAs cap cap' \ capRange cap' \ capRange cap" + apply (clarsimp simp: sameRegionAs_def2 isCap_Master capRange_Master) + apply (erule disjE, fastforce dest!: capMaster_capRange) + apply (erule disjE, fastforce) + apply (clarsimp simp: isCap_simps capRange_def split: if_split_asm) + done + +lemma safe_parent_for_capRange_capBits: + "\ safe_parent_for' m p cap; m p = Some cte \ \ capRange cap \ capRange (cteCap cte) + \ capBits cap \ capBits (cteCap cte)" + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE) + apply (clarsimp simp: capRange_def) + by (auto simp: sameRegionAs_def2 isCap_simps capRange_def + capMasterCap_def capRange_Master objBits_simps + split:capability.splits arch_capability.splits) + +lemma safe_parent_Null: + "\ m src = Some (CTE NullCap n); safe_parent_for' m src c' \ \ False" + by (simp add: safe_parent_for'_def) + +lemma notUntypedRange: + "\isUntypedCap cap \ untypedRange cap = {}" + by (cases cap) (auto simp: isCap_simps) + +lemma safe_parent_for_untypedRange: + "\ safe_parent_for' m p cap; m p = Some cte \ \ untypedRange cap \ untypedRange (cteCap cte)" + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE) + apply clarsimp + apply clarsimp + apply (simp add: sameRegionAs_def2) + apply (erule disjE) + apply clarsimp + apply (drule capMaster_untypedRange) + apply blast + apply (erule disjE) + apply (clarsimp simp: capRange_Master untypedCapRange) + apply (cases "isUntypedCap cap") + apply (clarsimp simp: capRange_Master untypedCapRange) + apply blast + apply (drule notUntypedRange) + apply simp + apply (clarsimp simp: isCap_Master isCap_simps) + done + +lemma safe_parent_for_capUntypedRange: + "\ safe_parent_for' m p cap; m p = Some cte \ \ capRange cap \ untypedRange (cteCap cte)" + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE) + apply (clarsimp simp: capRange_def) + apply clarsimp + apply (simp add: sameRegionAs_def2) + apply (erule disjE) + apply clarsimp + apply (frule capMaster_capRange) + apply (clarsimp simp: capRange_Master untypedCapRange) + apply (erule disjE) + apply (clarsimp simp: capRange_Master untypedCapRange) + apply blast + apply (clarsimp simp: isCap_Master isCap_simps) + done + +lemma safe_parent_for_descendants': + "\ safe_parent_for' m p cap; m p = Some (CTE pcap n); isUntypedCap pcap \ \ descendants_of' p m = {}" + by (auto simp: safe_parent_for'_def isCap_simps) + +lemma safe_parent_not_ep': + "\ safe_parent_for' m p cap; m p = Some (CTE src_cap n) \ \ \isEndpointCap src_cap" + by (auto simp: safe_parent_for'_def isCap_simps) + +lemma safe_parent_not_ntfn': + "\ safe_parent_for' m p cap; m p = Some (CTE src_cap n) \ \ \isNotificationCap src_cap" + by (auto simp: safe_parent_for'_def isCap_simps) + +lemma safe_parent_capClass: + "\ safe_parent_for' m p cap; m p = Some (CTE src_cap n) \ \ capClass cap = capClass src_cap" + by (auto simp: safe_parent_for'_def isCap_simps sameRegionAs_def2 capRange_Master capRange_def + capMasterCap_def + split: capability.splits arch_capability.splits) +end +locale mdb_insert_simple' = mdb_insert_simple + + fixes n' + defines "n' \ modify_map n (mdbNext src_node) (cteMDBNode_update (mdbPrev_update (\_. dest)))" +begin +interpretation Arch . (*FIXME: arch_split*) +lemma no_0_n' [intro!]: "no_0 n'" by (auto simp: n'_def) +lemmas n_0_simps' [iff] = no_0_simps [OF no_0_n'] + +lemmas no_0_m_prev [iff] = no_0_prev [OF no_0] +lemmas no_0_n_prev [iff] = no_0_prev [OF no_0_n'] + +lemma chain_n': "mdb_chain_0 n'" + unfolding n'_def + by (rule mdb_chain_0_modify_map_prev) (rule chain_n) + +lemma no_loops_n': "no_loops n'" using chain_n' no_0_n' + by (rule mdb_chain_0_no_loops) + +lemma n_direct_eq': + "n' \ p \ p' = (if p = src then p' = dest else + if p = dest then m \ src \ p' + else m \ p \ p')" + by (simp add: n'_def n_direct_eq) + +lemma dest_no_next_p: + "m p = Some cte \ mdbNext (cteMDBNode cte) \ dest" + using dest dest_prev + apply (cases cte) + apply (rule notI) + apply (rule dlistEn, assumption) + apply clarsimp + apply clarsimp + done + +lemma dest_no_src_next [iff]: + "mdbNext src_node \ dest" + using src by (clarsimp dest!: dest_no_next_p) + +lemma n_dest': + "n' dest = Some new_dest" + by (simp add: n'_def n modify_map_if new_dest_def) + +lemma n'_trancl_eq: + "n' \ p \\<^sup>+ p' = + (if p' = dest then p = src \ m \ p \\<^sup>+ src + else if p = dest then m \ src \\<^sup>+ p' + else m \ p \\<^sup>+ p')" + unfolding n'_def trancl_prev_update + by (simp add: n_trancl_eq) + +lemma n_rtrancl_eq': + "n' \ p \\<^sup>* p' = + (if p' = dest then p = dest \ p \ dest \ m \ p \\<^sup>* src + else if p = dest then p' \ src \ m \ src \\<^sup>* p' + else m \ p \\<^sup>* p')" + unfolding n'_def rtrancl_prev_update + by (simp add: n_rtrancl_eq) + +lemma n'_cap: + "n' p = Some (CTE cap node) \ + \node'. if p = dest then cap = c' \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node')" + by (auto simp add: n'_def n src dest new_src_def new_dest_def modify_map_if split: if_split_asm) + +lemma n'_rev: + "n' p = Some (CTE cap node) \ + \node'. if p = dest then mdbRevocable node = isCapRevocable c' src_cap \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node') \ mdbRevocable node = mdbRevocable node'" + by (auto simp add: n'_def n src dest new_src_def new_dest_def modify_map_if split: if_split_asm) + +lemma m_cap': + "m p = Some (CTE cap node) \ + \node'. if p = dest then cap = dest_cap \ n' p = Some (CTE c' node') + else n' p = Some (CTE cap node')" + apply (simp add: n'_def n new_src_def new_dest_def modify_map_if) + apply (cases "p=dest") + apply (auto simp: src dest) + done + +lemma descendants': + "descendants_of' p n' = + (if src \ descendants_of' p m \ p = src + then descendants_of' p m \ {dest} else descendants_of' p m)" + by (simp add: n'_def descendants descendants_of_prev_update) + +lemma ut_revocable_n' [simp]: + "ut_revocable' n'" + using dest + apply (clarsimp simp: ut_revocable'_def) + apply (frule n'_cap) + apply (drule n'_rev) + apply clarsimp + apply (clarsimp simp: n_dest' new_dest_def split: if_split_asm) + apply (clarsimp simp: Retype_H.isCapRevocable_def isCap_simps) + apply (drule_tac p=p and m=m in ut_revocableD', assumption) + apply (rule ut_rev) + apply simp + done + +lemma valid_nc' [simp]: + "valid_nullcaps n'" + unfolding valid_nullcaps_def + using src dest dest_prev dest_next simple safe_parent + apply (clarsimp simp: n'_def n_def modify_map_if) + apply (rule conjI) + apply (clarsimp simp: is_simple_cap'_def) + apply clarsimp + apply (rule conjI) + apply (fastforce dest!: safe_parent_Null) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) valid_nullcaps_next, rule no_0, rule dlist, rule nullcaps) + apply simp + apply clarsimp + apply (erule nullcapsD', rule nullcaps) + done + +lemma n'_prev_eq: + "n' \ p \ p' = + (if p' = mdbNext src_node \ p' \ 0 then p = dest + else if p' = dest then p = src + else m \ p \ p')" + using src dest dest_prev dest_next + apply (cases "p' = 0", simp) + apply (simp split del: if_split) + apply (cases "p' = mdbNext src_node") + apply (clarsimp simp: modify_map_apply n'_def n_def mdb_prev_def) + apply (clarsimp simp: modify_map_if) + apply (rule iffI, clarsimp) + apply clarsimp + apply (rule dlistEn, assumption, simp) + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply (cases "p' = dest") + apply (clarsimp simp: modify_map_if n'_def n_def mdb_prev_def) + apply clarsimp + apply (clarsimp simp: modify_map_if n'_def n_def mdb_prev_def) + apply (cases "p' = src", simp) + apply clarsimp + apply (rule iffI, clarsimp) + apply clarsimp + apply (case_tac z) + apply clarsimp + done + +lemma m_prev_of_next: + "m \ p \ mdbNext src_node = (p = src \ mdbNext src_node \ 0)" + using src + apply (clarsimp simp: mdb_prev_def) + apply (rule iffI) + apply clarsimp + apply (rule dlistEn, assumption, clarsimp) + apply clarsimp + apply clarsimp + apply (rule dlistEn, assumption, clarsimp) + apply clarsimp + done + +lemma src_next_eq: + "m \ p \ mdbNext src_node = (if mdbNext src_node \ 0 then p = src else m \ p \ 0)" + using src + apply - + apply (rule iffI) + prefer 2 + apply (clarsimp split: if_split_asm) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (frule (1) dlist_nextD0) + apply (clarsimp simp: m_prev_of_next) + apply clarsimp + done + +lemma src_next_eq': + "m (mdbNext src_node) = Some cte \ m \ p \ mdbNext src_node = (p = src)" + by (subst src_next_eq) auto + +lemma dest_no_prev [iff]: + "\ m \ dest \ p" + using dest dest_next + apply (clarsimp simp: mdb_prev_def) + apply (rule dlistEp [where p=p], assumption, clarsimp) + apply clarsimp + done + +lemma src_prev [iff]: + "m \ src \ p = (p = mdbNext src_node \ p \ 0)" + using src + apply - + apply (rule iffI) + prefer 2 + apply (clarsimp simp: mdb_ptr_src.next_p_prev) + apply (clarsimp simp: mdb_prev_def) + apply (rule conjI) + prefer 2 + apply clarsimp + apply (rule dlistEp [where p=p], assumption, clarsimp) + apply simp + done + +lemma dlist' [simp]: + "valid_dlist n'" + using src dest + apply (unfold valid_dlist_def3 n_direct_eq' n'_prev_eq) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply simp + apply (intro conjI impI allI notI) + apply (fastforce simp: src_next_eq') + apply (clarsimp simp: src_next_eq split: if_split_asm) + apply (simp add: mdb_ptr_src.p_next) + apply (erule (1) dlist_nextD0) + apply clarsimp + apply clarsimp + apply clarsimp + apply (erule (1) dlist_prevD0) + done + +lemma utRange_c': + "untypedRange c' \ untypedRange src_cap" + using safe_parent src + by - (drule (1) safe_parent_for_untypedRange, simp) + +lemma capRange_c': + "capRange c' \ capRange src_cap" + using safe_parent src + by - (drule (1) safe_parent_for_capRange_capBits, simp) + +lemma not_ut_c' [simp]: + "\isUntypedCap c'" + using simple + by (simp add: is_simple_cap'_def) + +lemma utCapRange_c': + "capRange c' \ untypedRange src_cap" + using safe_parent src + by - (drule (1) safe_parent_for_capUntypedRange, simp) + +lemma ut_descendants: + "isUntypedCap src_cap \ descendants_of' src m = {}" + using safe_parent src + by (rule safe_parent_for_descendants') + +lemma ut_mdb' [simp]: + "untyped_mdb' n'" + using src dest utRange_c' capRange_c' utCapRange_c' + apply (clarsimp simp: untyped_mdb'_def) + apply (drule n'_cap)+ + apply (clarsimp simp: descendants') + apply (clarsimp split: if_split_asm) + apply (cases "isUntypedCap src_cap") + prefer 2 + apply (drule_tac p=p and p'=src and m=m in untyped_mdbD', assumption+) + apply blast + apply (rule untyped_mdb) + apply simp + apply (frule ut_descendants) + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + apply blast + apply (fastforce elim: untyped_mdbD' intro!: untyped_mdb) + done + +lemma n'_badge: + "n' p = Some (CTE cap node) \ + \node'. if p = dest then mdbFirstBadged node = isCapRevocable c' src_cap \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node') \ mdbFirstBadged node = mdbFirstBadged node'" + by (auto simp add: n'_def n src dest new_src_def new_dest_def modify_map_if split: if_split_asm) + +lemma src_not_ep [simp]: + "\isEndpointCap src_cap" + using safe_parent src by (rule safe_parent_not_ep') + +lemma src_not_ntfn [simp]: + "\isNotificationCap src_cap" + using safe_parent src by (rule safe_parent_not_ntfn') + +lemma c_not_ep [simp]: + "\isEndpointCap c'" + using simple by (simp add: is_simple_cap'_def) + +lemma c_not_ntfn [simp]: + "\isNotificationCap c'" + using simple by (simp add: is_simple_cap'_def) + +lemma valid_badges' [simp]: + "valid_badges n'" + using simple src dest + apply (clarsimp simp: valid_badges_def) + apply (simp add: n_direct_eq') + apply (frule_tac p=p in n'_badge) + apply (frule_tac p=p' in n'_badge) + apply (drule n'_cap)+ + apply (clarsimp split: if_split_asm) + apply (insert valid_badges) + apply (simp add: valid_badges_def) + apply blast + done + +lemma caps_contained' [simp]: + "caps_contained' n'" + using src dest capRange_c' utCapRange_c' + apply (clarsimp simp: caps_contained'_def) + apply (drule n'_cap)+ + apply clarsimp + apply (clarsimp split: if_split_asm) + apply (drule capRange_untyped) + apply simp + apply (drule capRange_untyped) + apply clarsimp + apply (cases "isUntypedCap src_cap") + prefer 2 + apply (drule_tac p=p and p'=src in caps_containedD', assumption+) + apply blast + apply (rule caps_contained) + apply blast + apply (frule capRange_untyped) + apply (drule (3) untyped_incD', rule untyped_inc) + apply (clarsimp simp: ut_descendants) + apply blast + apply (drule (3) caps_containedD', rule caps_contained) + apply blast + done + +lemma capClass_c' [simp]: + "capClass c' = capClass src_cap" + using safe_parent src by (rule safe_parent_capClass) + +lemma class_links' [simp]: + "class_links n'" + using src dest + apply (clarsimp simp: class_links_def) + apply (simp add: n_direct_eq') + apply (case_tac cte, case_tac cte') + apply clarsimp + apply (drule n'_cap)+ + apply clarsimp + apply (clarsimp split: if_split_asm) + apply (drule (2) class_linksD, rule class_links) + apply simp + apply (drule (2) class_linksD, rule class_links) + apply simp + done + +lemma untyped_inc' [simp]: + "untyped_inc' n'" + using src dest + apply (clarsimp simp: untyped_inc'_def) + apply (drule n'_cap)+ + apply (clarsimp simp: descendants') + apply (clarsimp split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (frule_tac p=src and p'=p' in untyped_incD', assumption+, rule untyped_inc) + apply (clarsimp simp: ut_descendants) + apply (intro conjI, clarsimp+) + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + done + +lemma sameRegion_src [simp]: + "sameRegionAs src_cap c'" + using safe_parent src + apply (simp add: safe_parent_for'_def) + done + +lemma sameRegion_src_c': + "sameRegionAs cap src_cap \ sameRegionAs cap c'" + using safe_parent simple src capRange_c' + apply (simp add: safe_parent_for'_def) + apply (erule disjE) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps capRange_def) + apply (clarsimp simp: sameRegionAs_def2 isCap_Master capRange_Master) + apply (erule disjE) + apply (elim conjE) + apply (erule disjE) + apply blast + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + done + +lemma irq_c'_new: + assumes irq_src: "isIRQControlCap src_cap" + shows "m p = Some (CTE cap node) \ \ sameRegionAs c' cap" + using safe_parent irq_src src + apply (clarsimp simp: safe_parent_for'_def isCap_simps) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps) + done + +lemma ut_capRange_non_empty: + "isUntypedCap src_cap \ capRange c' \ {}" + using safe_parent src unfolding safe_parent_for'_def + by (clarsimp simp: isCap_simps) + + +lemma ut_sameRegion_non_empty: + "\ isUntypedCap src_cap; sameRegionAs c' cap \ \ capRange cap \ {}" + using simple safe_parent src + apply (clarsimp simp: is_simple_cap'_def sameRegionAs_def2 isCap_Master) + apply (erule disjE) + apply (clarsimp simp: ut_capRange_non_empty dest!: capMaster_capRange) + apply clarsimp + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE, clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps capRange_def) + done + +lemma ut_c'_new: + assumes ut_src: "isUntypedCap src_cap" + shows "m p = Some (CTE cap node) \ \ sameRegionAs c' cap" + using src simple + apply clarsimp + apply (drule untyped_mdbD', rule ut_src, assumption) + apply (clarsimp simp: is_simple_cap'_def sameRegionAs_def2 isCap_Master capRange_Master) + apply (fastforce simp: isCap_simps) + apply (frule sameRegion_capRange_sub) + apply (drule ut_sameRegion_non_empty [OF ut_src]) + apply (insert utCapRange_c') + apply blast + apply (rule untyped_mdb) + apply (simp add: ut_descendants [OF ut_src]) + done + +lemma c'_new: + "m p = Some (CTE cap node) \ \ sameRegionAs c' cap" + using safe_parent src unfolding safe_parent_for'_def + apply (elim exE conjE) + apply (erule disjE) + apply (erule irq_c'_new [rotated]) + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (drule (1) ut_c'_new) + apply simp + done + +lemma irq_control_src: + "\ isIRQControlCap src_cap; + m p = Some (CTE cap node); + sameRegionAs cap c' \ \ p = src" + using safe_parent src unfolding safe_parent_for'_def + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: sameRegionAs_def2 isCap_Master) + apply (erule disjE, clarsimp simp: isCap_simps) + apply (erule disjE, clarsimp simp: isCap_simps capRange_def) + apply (clarsimp simp: isCap_simps) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + done + +lemma not_irq_parentD: + "\ isIRQControlCap src_cap \ + isUntypedCap src_cap \ descendants_of' src m = {} \ capRange c' \ {}" + using src safe_parent unfolding safe_parent_for'_def + by (clarsimp simp: isCap_simps) + +lemma ut_src_only_ut_c_parents: + "\ isUntypedCap src_cap; sameRegionAs cap c'; m p = Some (CTE cap node) \ \ isUntypedCap cap" + using safe_parent src unfolding safe_parent_for'_def + apply clarsimp + apply (erule disjE, clarsimp simp: isCap_simps) + apply clarsimp + apply (rule ccontr) + apply (drule (3) untyped_mdbD') + apply (frule sameRegion_capRange_sub) + apply (insert utCapRange_c')[1] + apply blast + apply (rule untyped_mdb) + apply simp + done + +lemma ut_src: + "\ isUntypedCap src_cap; sameRegionAs cap c'; m p = Some (CTE cap node) \ \ + isUntypedCap cap \ untypedRange cap \ untypedRange src_cap \ {}" + apply (frule (2) ut_src_only_ut_c_parents) + apply simp + apply (frule sameRegion_capRange_sub) + apply (insert utCapRange_c')[1] + apply (simp add: untypedCapRange) + apply (drule ut_capRange_non_empty) + apply blast + done + + +lemma chunked' [simp]: + "mdb_chunked n'" + using src dest + apply (clarsimp simp: mdb_chunked_def) + apply (drule n'_cap)+ + apply (clarsimp simp: n'_trancl_eq) + apply (clarsimp split: if_split_asm) + prefer 3 + apply (frule (3) mdb_chunkedD, rule chunked) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: is_chunk_def n'_trancl_eq n_rtrancl_eq' n_dest' new_dest_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply clarsimp + apply (erule_tac x=src in allE) + apply simp + apply (erule sameRegion_src_c') + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (frule_tac p=p'' in m_cap') + apply clarsimp + apply clarsimp + apply (clarsimp simp: is_chunk_def n'_trancl_eq n_rtrancl_eq' n_dest' new_dest_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply clarsimp + apply (erule_tac x=src in allE) + apply simp + apply (erule sameRegion_src_c') + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (frule_tac p=p'' in m_cap') + apply clarsimp + apply (case_tac "p' = src") + apply simp + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n_rtrancl_eq') + apply (erule disjE) + apply (simp add: n_dest' new_dest_def) + apply clarsimp + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply clarsimp + apply (drule c'_new) + apply (erule (1) notE) + apply (case_tac "p=src") + apply clarsimp + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n_rtrancl_eq') + apply (erule disjE) + apply (clarsimp simp: n_dest' new_dest_def) + apply clarsimp + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply (case_tac "isIRQControlCap src_cap") + apply (drule (2) irq_control_src) + apply simp + apply (drule not_irq_parentD) + apply clarsimp + apply (frule (2) ut_src) + apply clarsimp + apply (subgoal_tac "src \ descendants_of' p m") + prefer 2 + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + apply fastforce + apply (frule_tac m=m and p=p and p'=src in mdb_chunkedD, assumption+) + apply (clarsimp simp: descendants_of'_def) + apply (drule subtree_parent) + apply (clarsimp simp: parentOf_def isMDBParentOf_def split: if_split_asm) + apply simp + apply (rule chunked) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + prefer 2 + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n_rtrancl_eq' split: if_split_asm) + apply (clarsimp simp: n_dest' new_dest_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap') + apply clarsimp + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply (clarsimp simp: descendants_of'_def) + apply (drule subtree_mdb_next) + apply (drule (1) trancl_trans) + apply simp + done + +lemma distinct_zombies_m: + "distinct_zombies m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma untyped_rangefree: + "\ isUntypedCap src_cap; m x = Some cte; x \ src; \ isUntypedCap (cteCap cte) \ + \ capRange (cteCap cte) \ capRange c'" + apply (frule ut_descendants) + apply (cases cte, clarsimp) + apply (frule(2) untyped_mdbD' [OF src _ _ _ _ untyped_mdb]) + apply (simp add: untypedCapRange[symmetric]) + apply (frule ut_capRange_non_empty) + apply (cut_tac capRange_c') + apply blast + apply simp + done + +lemma notZomb: + "\ isZombie src_cap" "\ isZombie c'" + using sameRegion_src simple + by (auto simp: isCap_simps sameRegionAs_def3 + simp del: sameRegion_src, + auto simp: is_simple_cap'_def isCap_simps) + +lemma notArchPage: + "\ isArchFrameCap c'" + using simple + by (clarsimp simp: isCap_simps is_simple_cap'_def) + +lemma distinct_zombies[simp]: + "distinct_zombies n'" + using distinct_zombies_m + apply (simp add: n'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n_def modify_map_apply src dest) + apply (rule distinct_zombies_sameE[rotated]) + apply (simp add: src) + apply simp+ + apply (cases "isUntypedCap src_cap") + apply (erule distinct_zombies_seperateE) + apply (case_tac "y = src") + apply (clarsimp simp add: src) + apply (frule(3) untyped_rangefree) + apply (simp add: capRange_def) + apply (rule sameRegionAsE [OF sameRegion_src], simp_all) + apply (erule distinct_zombies_copyMasterE, rule src) + apply simp + apply (simp add: notZomb) + apply (simp add: notArchPage) + apply (clarsimp simp: isCap_simps) + apply (erule distinct_zombies_sameMasterE, rule dest) + apply (clarsimp simp: isCap_simps) + done + +lemma irq' [simp]: + "irq_control n'" using simple + apply (clarsimp simp: irq_control_def) + apply (frule n'_cap) + apply (drule n'_rev) + apply (clarsimp split: if_split_asm) + apply (simp add: is_simple_cap'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n'_cap) + apply (clarsimp split: if_split_asm) + apply (erule disjE) + apply (clarsimp simp: is_simple_cap'_def) + apply (erule (1) irq_controlD, rule irq_control) + done + +lemma reply_masters_rvk_fb: + "reply_masters_rvk_fb m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma reply_masters_rvk_fb' [simp]: + "reply_masters_rvk_fb n'" + using reply_masters_rvk_fb simple + apply (simp add: reply_masters_rvk_fb_def n'_def + n_def ball_ran_modify_map_eq) + apply (subst ball_ran_modify_map_eq) + apply (clarsimp simp: modify_map_def m_p is_simple_cap'_def) + apply (simp add: ball_ran_modify_map_eq m_p is_simple_cap'_def + dest_cap isCap_simps) + done + +lemma mdb: + "valid_mdb_ctes n'" + by (simp add: valid_mdb_ctes_def no_0_n' chain_n') + +end + +lemma updateCapFreeIndex_no_0: + assumes preserve:"\m m'. mdb_inv_preserve m m' + \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (no_0(Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (no_0 (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (simp add:cte_wp_at_ctes_of)+ + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteInsert_simple_mdb': + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and K (capAligned cap) and + (\s. safe_parent_for' (ctes_of s) src cap) and K (is_simple_cap' cap) \ + cteInsert cap src dest + \\_. valid_mdb'\" + unfolding cteInsert_def valid_mdb'_def + apply simp + apply (rule hoare_name_pre_state) + apply (rule hoare_pre) + apply (wp updateCap_ctes_of_wp getCTE_wp' setUntypedCapAsFull_ctes + mdb_inv_preserve_updateCap mdb_inv_preserve_modify_map | clarsimp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: valid_mdb_ctes_def) + apply (case_tac cte) + apply (rename_tac src_cap src_node) + apply (case_tac ctea) + apply (rename_tac dest_cap dest_node) + apply clarsimp + apply (subst modify_map_eq) + apply simp+ + apply (clarsimp simp:maskedAsFull_def is_simple_cap'_def) + apply (subgoal_tac "mdb_insert_simple' + (ctes_of sa) src src_cap src_node dest NullCap dest_node cap") + prefer 2 + apply (intro mdb_insert_simple'.intro + mdb_insert_simple.intro mdb_insert_simple_axioms.intro + mdb_ptr.intro mdb_insert.intro vmdb.intro + mdb_ptr_axioms.intro mdb_insert_axioms.intro) + apply (simp add:modify_map_def valid_mdb_ctes_maskedAsFull)+ + apply (clarsimp simp:nullPointer_def)+ + apply ((clarsimp simp:valid_mdb_ctes_def)+) + apply (drule mdb_insert_simple'.mdb) + apply (clarsimp simp:valid_mdb_ctes_def) + done + +lemma cteInsert_valid_globals_simple: + "\valid_global_refs' and (\s. safe_parent_for' (ctes_of s) src cap)\ + cteInsert cap src dest + \\rv. valid_global_refs'\" + apply (simp add: cteInsert_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) safe_parent_for_capRange_capBits) + apply (drule (1) valid_global_refsD_with_objSize) + apply (auto elim: order_trans[rotated]) + done + +lemma cteInsert_simple_invs: + "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and + (\s. src \ dest) and (\s. safe_parent_for' (ctes_of s) src cap) + and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s) + and cte_at' src + and ex_cte_cap_to' dest and K (is_simple_cap' cap)\ + cteInsert cap src dest + \\rv. invs'\" + apply (rule hoare_pre) + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wp cur_tcb_lift sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift + valid_irq_node_lift irqs_masked_lift sym_heap_sched_pointers_lift + cteInsert_simple_mdb' cteInsert_valid_globals_simple + cteInsert_norq | simp add: pred_tcb_at'_def)+ + apply (auto simp: invs'_def valid_state'_def valid_pspace'_def + is_simple_cap'_def untyped_derived_eq_def o_def + elim: valid_capAligned) + done + +lemma ensureEmptySlot_stronger [wp]: + "\\s. cte_wp_at' (\c. cteCap c = NullCap) p s \ P s\ ensureEmptySlot p \\rv. P\, -" + apply (simp add: ensureEmptySlot_def whenE_def unlessE_whenE) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at'_def) + done + +lemma lookupSlotForCNodeOp_real_cte_at'[wp]: + "\valid_objs' and valid_cap' rootCap\ + lookupSlotForCNodeOp isSrc rootCap cref depth + \\rv. real_cte_at' rv\,-" + apply (simp add: lookupSlotForCNodeOp_def split_def unlessE_def + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp resolveAddressBits_real_cte_at' | simp | wp (once) hoare_drop_imps)+ + done + +lemma cte_refs_maskCapRights[simp]: + "cte_refs' (maskCapRights rghts cap) = cte_refs' cap" + by (rule ext, cases cap, + simp_all add: maskCapRights_def isCap_defs Let_def + AARCH64_H.maskCapRights_def + split del: if_split + split: arch_capability.split) + +lemma getSlotCap_cap_to'[wp]: + "\\\ getSlotCap cp \\rv s. \r\cte_refs' rv (irq_node' s). ex_cte_cap_to' r s\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (fastforce simp: cte_wp_at_ctes_of ex_cte_cap_to'_def) + done + +lemma getSlotCap_cap_to2: + "\\ and K (\cap. P cap \ Q cap)\ + getSlotCap slot + \\rv s. P rv \ (\x \ cte_refs' rv (irq_node' s). ex_cte_cap_wp_to' Q x s)\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_wp_to'_def) + apply fastforce + done + +lemma locateSlot_cap_to'[wp]: + "\\s. isCNodeCap cap \ (\r \ cte_refs' cap (irq_node' s). ex_cte_cap_wp_to' P r s)\ + locateSlotCNode (capCNodePtr cap) n (v && mask (capCNodeBits cap)) + \ex_cte_cap_wp_to' P\" + apply (simp add: locateSlot_conv) + apply wp + apply (clarsimp dest!: isCapDs valid_capAligned + simp: objBits_simps' mult.commute capAligned_def cte_level_bits_def shiftl_t2n) + apply (erule bspec) + apply (clarsimp intro!: word_and_le1) + done + +lemma rab_cap_to'': + assumes P: "\cap. isCNodeCap cap \ P cap" + shows + "s \ \\s. isCNodeCap cap \ (\r\cte_refs' cap (irq_node' s). ex_cte_cap_wp_to' P r s)\ + resolveAddressBits cap cref depth + \\rv s. ex_cte_cap_wp_to' P (fst rv) s\,\\\\" +proof (induct arbitrary: s rule: resolveAddressBits.induct) + case (1 cap fn cref depth) + show ?case + apply (subst resolveAddressBits.simps) + apply (simp add: Let_def split_def cap_case_CNodeCap[unfolded isCap_simps] + split del: if_split cong: if_cong) + apply (rule hoare_pre_spec_validE) + apply ((elim exE | wp (once) spec_strengthen_postE[OF "1.hyps"])+, + (rule refl conjI | simp add: in_monad split del: if_split del: cte_refs'.simps)+) + apply (wp getSlotCap_cap_to2 + | simp add: assertE_def split_def whenE_def locateSlotCap_def + split del: if_split | simp add: imp_conjL[symmetric] + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: P) + done +qed + +lemma rab_cap_to'[wp]: + "\(\s. isCNodeCap cap \ (\r\cte_refs' cap (irq_node' s). ex_cte_cap_wp_to' P r s)) + and K (\cap. isCNodeCap cap \ P cap)\ + resolveAddressBits cap cref depth + \\rv s. ex_cte_cap_wp_to' P (fst rv) s\,-" + apply (rule hoare_gen_asmE) + apply (unfold validE_R_def) + apply (rule use_spec, rule rab_cap_to'') + apply simp + done + +lemma lookupCNode_cap_to'[wp]: + "\\s. \r\cte_refs' rootCap (irq_node' s). ex_cte_cap_to' r s\ + lookupSlotForCNodeOp isSrc rootCap cref depth + \\p. ex_cte_cap_to' p\,-" + apply (simp add: lookupSlotForCNodeOp_def Let_def split_def unlessE_def + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | simp)+ + done + +lemma badge_derived'_refl[simp]: "badge_derived' c c" + by (simp add: badge_derived'_def) + +lemma derived'_not_Null: + "\ is_derived' m p c capability.NullCap" + "\ is_derived' m p capability.NullCap c" + by (clarsimp simp: is_derived'_def badge_derived'_def)+ + +lemma getSlotCap_wp: + "\\s. (\cap. cte_wp_at' (\c. cteCap c = cap) p s \ Q cap s)\ + getSlotCap p \Q\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at'_def) + done + +lemma storeWordUser_typ_at' : + "\\s. P (typ_at' T p s)\ storeWordUser v w \\_ s. P (typ_at' T p s)\" + unfolding storeWordUser_def by wpsimp + +lemma arch_update_updateCap_invs: + "\cte_wp_at' (is_arch_update' cap) p and invs' and valid_cap' cap\ + updateCap p cap + \\_. invs'\" + apply (simp add: updateCap_def) + apply (wp arch_update_setCTE_invs getCTE_wp') + apply clarsimp + done + +lemma setCTE_set_cap_ready_queues_relation_valid_corres: + assumes pre: "ready_queues_relation s s'" + assumes step_abs: "(x, t) \ fst (set_cap cap slot s)" + assumes step_conc: "(y, t') \ fst (setCTE slot' cap' s')" + shows "ready_queues_relation t t'" + apply (clarsimp simp: ready_queues_relation_def) + apply (insert pre) + apply (rule use_valid[OF step_abs set_cap_exst]) + apply (rule use_valid[OF step_conc setCTE_ksReadyQueues]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedNexts_of]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedPrevs_of]) + apply (clarsimp simp: ready_queues_relation_def Let_def) + using use_valid[OF step_conc setCTE_inQ_opt_pred] + by fast + +lemma updateCap_same_master: + "\ cap_relation cap cap' \ \ + corres dc (valid_objs and pspace_aligned and pspace_distinct and + cte_wp_at (\c. cap_master_cap c = cap_master_cap cap \ + \is_reply_cap c \ \is_master_reply_cap c \ + \is_ep_cap c \ \is_ntfn_cap c) slot) + (pspace_aligned' and pspace_distinct' and cte_at' (cte_map slot)) + (set_cap cap slot) + (updateCap (cte_map slot) cap')" (is "_ \ corres _ ?P ?P' _ _") + apply (unfold updateCap_def) + apply (rule corres_guard_imp) + apply (rule_tac Q="?P" and R'="\cte. ?P' and (\s. ctes_of s (cte_map slot) = Some cte)" + in corres_symb_exec_r_conj) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply clarsimp + apply (clarsimp simp add: state_relation_def) + apply (drule (1) pspace_relationsD) + apply (frule (4) set_cap_not_quite_corres_prequel) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption + apply assumption + apply simp + apply (rule refl) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply (clarsimp simp: pspace_relations_def) + apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ready_queues_relation a b" for a b \ -\) + subgoal by (erule setCTE_set_cap_ready_queues_relation_valid_corres; assumption) + apply (rule conjI) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (rule conjI) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (intro allI conjI) + apply (frule use_valid[OF _ setCTE_gsUserPages]) + prefer 2 + apply simp+ + apply (frule use_valid[OF _ setCTE_gsCNodes]) + prefer 2 + apply simp+ + apply (rule use_valid[OF _ setCTE_arch]) + prefer 2 + apply simp+ + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + prefer 2 + apply (rule conjI) + prefer 2 + apply (frule setCTE_pspace_only) + apply clarsimp + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (frule set_cap_caps_of_state_monad) + apply (drule is_original_cap_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: revokable_relation_def simp del: fun_upd_apply) + apply (clarsimp split: if_split_asm) + apply (drule cte_map_inj_eq) + prefer 2 + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (simp add: null_filter_def split: if_split_asm) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule caps_of_state_cte_at) + apply fastforce + apply fastforce + apply fastforce + apply clarsimp + apply (simp add: null_filter_def split: if_split_asm) + apply (erule_tac x=aa in allE, erule_tac x=bb in allE) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule disjE) + apply (clarsimp simp: cap_master_cap_simps dest!: cap_master_cap_eqDs) + apply (case_tac rv) + apply clarsimp + apply (subgoal_tac "(aa,bb) \ slot") + prefer 2 + apply clarsimp + apply (simp add: null_filter_def cte_wp_at_caps_of_state split: if_split_asm) + apply (clarsimp simp: cdt_relation_def) + apply (frule set_cap_caps_of_state_monad) + apply (frule mdb_set_cap, frule exst_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (frule cte_wp_at_norm) + apply (clarsimp simp del: fun_upd_apply) + apply (frule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply (clarsimp simp del: fun_upd_apply) + apply (subst same_master_descendants) + apply assumption + apply (clarsimp simp: master_cap_relation) + apply (frule_tac d=c in master_cap_relation [symmetric], assumption) + apply (frule is_reply_cap_relation[symmetric], + drule is_reply_master_relation[symmetric])+ + apply simp + apply (drule masterCap.intro) + apply (drule masterCap.isReplyCap) + apply simp + apply (drule is_ep_cap_relation)+ + apply (drule master_cap_ep) + apply simp + apply (drule is_ntfn_cap_relation)+ + apply (drule master_cap_ntfn) + apply simp + apply (simp add: in_set_cap_cte_at) + apply(simp add: cdt_list_relation_def split del: if_split) + apply(intro allI impI) + apply(erule_tac x=aa in allE)+ + apply(erule_tac x=bb in allE)+ + apply(clarsimp split: if_split_asm) + apply(case_tac rv, clarsimp) + apply (wp getCTE_wp')+ + apply clarsimp + apply (rule no_fail_pre, wp) + apply clarsimp + apply assumption + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma updateCapFreeIndex_valid_mdb_ctes: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + and coin :"\m cte. \m src = Some cte\ \ (\cte'. (Q m) src = Some cte' \ cteCap cte = cteCap cte')" + and assoc :"\m f. Q (modify_map m src (cteCap_update f)) = modify_map (Q m) src (cteCap_update f)" + shows + "\\s. usableUntypedRange (capFreeIndex_update (\_. index) cap) \ usableUntypedRange cap \ isUntypedCap cap + \ valid_mdb_ctes (Q (ctes_of s)) \ cte_wp_at' (\c. cteCap c = cap) src s\ + updateCap src (capFreeIndex_update (\_. index) cap) + \\r s. (valid_mdb_ctes (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) cap))))") + apply (clarsimp simp:valid_mdb_ctes_def) + apply (intro conjI) + apply ((simp add:mdb_inv_preserve.preserve_stuff mdb_inv_preserve.by_products)+)[7] + apply (rule mdb_inv_preserve.untyped_inc') + apply assumption + apply (clarsimp simp:assoc cte_wp_at_ctes_of) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule coin) + apply clarsimp + apply (erule(1) subsetD) + apply simp + apply (simp_all add:mdb_inv_preserve.preserve_stuff mdb_inv_preserve.by_products) + apply (rule preserve) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ + done + +lemma usableUntypedRange_mono1: + "is_aligned ptr sz \ idx \ 2 ^ sz \ idx' \ 2 ^ sz + \ sz < word_bits + \ idx \ idx' + \ usableUntypedRange (UntypedCap dev ptr sz idx) + \ usableUntypedRange (UntypedCap dev' ptr sz idx')" + apply clarsimp + apply (rule word_plus_mono_right) + apply (rule of_nat_mono_maybe_le[THEN iffD1]) + apply (subst word_bits_def[symmetric]) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp + apply simp + apply (subst word_bits_def[symmetric]) + apply (erule le_less_trans) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp+ + apply (erule is_aligned_no_wrap') + apply (rule word_of_nat_less) + apply simp + done + +lemma usableUntypedRange_mono2: + "isUntypedCap cap + \ isUntypedCap cap' + \ capAligned cap \ capFreeIndex cap \ 2 ^ capBlockSize cap + \ capFreeIndex cap' \ 2 ^ capBlockSize cap' + \ capFreeIndex cap \ capFreeIndex cap' + \ capPtr cap' = capPtr cap + \ capBlockSize cap' = capBlockSize cap + \ usableUntypedRange cap \ usableUntypedRange cap'" + apply (clarsimp simp only: isCap_simps capability.sel del: subsetI) + apply (rule usableUntypedRange_mono1, auto simp: capAligned_def) + done + +lemma ctes_of_cte_wpD: + "ctes_of s p = Some cte \ cte_wp_at' ((=) cte) p s" + by (simp add: cte_wp_at_ctes_of) + +lemma updateFreeIndex_forward_valid_objs': + "\\s. valid_objs' s \ cte_wp_at' ((\cap. isUntypedCap cap + \ capFreeIndex cap \ idx \ idx \ 2 ^ capBlockSize cap + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits) o cteCap) src s\ + updateFreeIndex src idx + \\r s. valid_objs' s\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def updateCap_def getSlotCap_def) + apply (wp getCTE_wp') + apply clarsimp + apply (frule(1) CSpace1_R.ctes_of_valid) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps capAligned_def + valid_cap_simps' is_aligned_weaken[OF is_aligned_triv]) + apply (clarsimp simp add: valid_untyped'_def + simp del: usableUntypedRange.simps) + apply (erule allE, erule notE, erule ko_wp_at'_weakenE) + apply (rule disjCI2, simp only: simp_thms) + apply (rule notI, erule notE, erule disjoint_subset2[rotated]) + apply (rule usableUntypedRange_mono1, simp_all) + done + +crunches updateFreeIndex + for pspace_aligned'[wp]: "pspace_aligned'" + and pspace_distinct'[wp]: "pspace_distinct'" + and no_0_obj[wp]: "no_0_obj'" + and pspace_canonical'[wp]: pspace_canonical' + +lemma updateFreeIndex_forward_valid_mdb': + "\\s. valid_mdb' s \ valid_objs' s \ cte_wp_at' ((\cap. isUntypedCap cap + \ capFreeIndex cap \ idx \ idx \ 2 ^ capBlockSize cap) o cteCap) src s\ + updateFreeIndex src idx + \\r s. valid_mdb' s\" + apply (simp add: valid_mdb'_def updateFreeIndex_def + updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateCapFreeIndex_valid_mdb_ctes getCTE_wp' | simp)+ + apply clarsimp + apply (frule(1) CSpace1_R.ctes_of_valid) + apply (clarsimp simp: cte_wp_at_ctes_of del: subsetI) + apply (rule usableUntypedRange_mono2, + auto simp add: isCap_simps valid_cap_simps' capAligned_def) + done + +lemma updateFreeIndex_forward_invs': + "\\s. invs' s \ cte_wp_at' ((\cap. isUntypedCap cap + \ capFreeIndex cap \ idx \ idx \ 2 ^ capBlockSize cap + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits) o cteCap) src s\ + updateFreeIndex src idx + \\r s. invs' s\" + apply (clarsimp simp:invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule hoare_vcg_conj_lift) + apply (simp add: valid_pspace'_def, wp updateFreeIndex_forward_valid_objs' + updateFreeIndex_forward_valid_mdb') + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def) + apply (wp sch_act_wf_lift valid_queues_lift updateCap_iflive' tcb_in_cur_domain'_lift + | simp add: pred_tcb_at'_def)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: ifunsafe'_def3 cteInsert_def setUntypedCapAsFull_def + split del: if_split) + apply wp+ + apply (wp valid_irq_node_lift) + apply (rule hoare_vcg_conj_lift) + apply (simp add:updateCap_def) + apply (wp setCTE_irq_handlers' getCTE_wp) + apply (simp add:updateCap_def) + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + sym_heap_sched_pointers_lift valid_bitmaps_lift + | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] + | simp add: getSlotCap_def)+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (clarsimp simp: isCap_simps valid_pspace'_def) + apply (frule(1) valid_global_refsD_with_objSize) + apply clarsimp + apply (intro conjI allI impI) + apply (clarsimp simp: modify_map_def cteCaps_of_def ifunsafe'_def3 split:if_splits) + apply (drule_tac x=src in spec) + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (drule_tac x = cref in spec) + apply clarsimp + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (erule untyped_ranges_zero_fun_upd, simp_all) + apply (clarsimp simp: untypedZeroRange_def cteCaps_of_def isCap_simps) + done + +lemma no_fail_getSlotCap: + "no_fail (cte_at' p) (getSlotCap p)" + apply (rule no_fail_pre) + apply (simp add: getSlotCap_def | wp)+ + done + +end +end diff --git a/proof/refine/AARCH64/Corres.thy b/proof/refine/AARCH64/Corres.thy new file mode 100644 index 0000000000..01c1985822 --- /dev/null +++ b/proof/refine/AARCH64/Corres.thy @@ -0,0 +1,15 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Corres +imports StateRelation "CorresK.CorresK_Lemmas" +begin + +text \Instantiating the corres framework to this particular state relation.\ +abbreviation + "corres \ corres_underlying state_relation False True" + +end diff --git a/proof/refine/AARCH64/Detype_R.thy b/proof/refine/AARCH64/Detype_R.thy new file mode 100644 index 0000000000..ca15168e2b --- /dev/null +++ b/proof/refine/AARCH64/Detype_R.thy @@ -0,0 +1,5229 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Detype_R +imports Retype_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +text \Establishing that the invariants are maintained + when a region of memory is detyped, that is, + removed from the model.\ + +definition + "descendants_range_in' S p \ + \m. \p' \ descendants_of' p m. \c n. m p' = Some (CTE c n) \ capRange c \ S = {}" + +lemma null_filter_simp'[simp]: + "null_filter' (null_filter' x) = null_filter' x" + apply (rule ext) + apply (auto simp:null_filter'_def split:if_splits) + done + +lemma descendants_range_in'_def2: + "descendants_range_in' S p = (\m. \p'\descendants_of' p (null_filter' m). + \c n. (null_filter' m) p' = Some (CTE c n) \ capRange c \ S = {})" + apply (clarsimp simp:descendants_range_in'_def + split:if_splits) + apply (rule ext) + apply (rule subst[OF null_filter_descendants_of']) + apply simp + apply (rule iffI) + apply (clarsimp simp:null_filter'_def)+ + apply (drule(1) bspec) + apply (elim allE impE ballE) + apply (rule ccontr) + apply (clarsimp split:if_splits simp:descendants_of'_def) + apply (erule(1) subtree_not_Null) + apply fastforce + apply simp + done + +definition + "descendants_range' cap p \ + \m. \p' \ descendants_of' p m. \c n. m p' = Some (CTE c n) \ capRange c \ capRange cap = {}" + +lemma descendants_rangeD': + "\ descendants_range' cap p m; m \ p \ p'; m p' = Some (CTE c n) \ + \ capRange c \ capRange cap = {}" + by (simp add: descendants_range'_def descendants_of'_def) + +lemma descendants_range_in_lift': + assumes st: "\P. \\s. Q s \ P ((swp descendants_of') (null_filter' (ctes_of s)))\ + f \\r s. P ((swp descendants_of') (null_filter' (ctes_of s)))\" + assumes cap_range: + "\P p. \\s. Q' s \ cte_wp_at' (\c. P (capRange (cteCap c))) p s\ f \\r s. cte_wp_at' (\c. P (capRange (cteCap c))) p s\" + shows "\\s. Q s \ Q' s \ descendants_range_in' S slot (ctes_of s)\ f \\r s. descendants_range_in' S slot (ctes_of s)\" + apply (clarsimp simp:descendants_range_in'_def2) + apply (subst swp_def[where f = descendants_of', THEN meta_eq_to_obj_eq, + THEN fun_cong, THEN fun_cong, symmetric])+ + apply (simp only: Ball_def[unfolded imp_conv_disj]) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift st cap_range) + apply (rule_tac Q = "\r s. cte_wp_at' (\c. capRange (cteCap c) \ S = {}) x s" + in hoare_strengthen_post) + apply (wp cap_range) + apply (clarsimp simp:cte_wp_at_ctes_of null_filter'_def) + apply clarsimp + apply (drule spec, drule(1) mp) + apply (subst (asm) null_filter_descendants_of') + apply simp + apply (case_tac "(ctes_of s) x") + apply (clarsimp simp:descendants_of'_def null_filter'_def subtree_target_Some) + apply (case_tac a) + apply (clarsimp simp:cte_wp_at_ctes_of null_filter'_def split:if_splits) + done + +lemma descendants_range_inD': + "\descendants_range_in' S p ms; p'\descendants_of' p ms; ms p' = Some cte\ + \ capRange (cteCap cte) \ S = {}" + apply (case_tac cte) + apply (auto simp:descendants_range_in'_def cte_wp_at_ctes_of dest!:bspec) + done +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma descendants_range'_def2: + "descendants_range' cap p = descendants_range_in' (capRange cap) p" + by (simp add: descendants_range_in'_def descendants_range'_def) + + +defs deletionIsSafe_def: + "deletionIsSafe \ \ptr bits s. \p t m r. + (cte_wp_at' (\cte. cteCap cte = capability.ReplyCap t m r) p s \ + t \ mask_range ptr bits) \ + (\ko. ksPSpace s p = Some (KOArch ko) \ p \ mask_range ptr bits \ 6 \ bits)" + +defs deletionIsSafe_delete_locale_def: + "deletionIsSafe_delete_locale \ \ptr bits s. \p. ko_wp_at' live' p s \ p \ mask_range ptr bits" + +defs ksASIDMapSafe_def: + "ksASIDMapSafe \ \s. True" + +defs cNodePartialOverlap_def: + "cNodePartialOverlap \ \cns inRange. \p n. cns p = Some n + \ (\ is_aligned p (cte_level_bits + n) + \ cte_level_bits + n \ word_bits + \ (\ mask_range p (cte_level_bits + n) \ {p. inRange p} + \ \ mask_range p (cte_level_bits + n) \ {p. \ inRange p}))" + +defs pTablePartialOverlap_def: + "pTablePartialOverlap \ \pts inRange. + \p pt_t. pts p = Some pt_t \ + (\ is_aligned p (pt_bits pt_t) \ + (\ mask_range p (pt_bits pt_t) \ {p. inRange p} \ + \ mask_range p (pt_bits pt_t) \ {p. \ inRange p}))" + + +(* FIXME: move *) +lemma deleteObjects_def2: + "is_aligned ptr bits \ + deleteObjects ptr bits = do + stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; + doMachineOp (freeMemory ptr bits); + stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ mask_range ptr bits)) []; + stateAssert (\s. \ pTablePartialOverlap (gsPTTypes (ksArchState s)) (\x. x \ mask_range ptr bits)) []; + modify (\s. s \ ksPSpace := \x. if x \ mask_range ptr bits + then None else ksPSpace s x, + gsUserPages := \x. if x \ mask_range ptr bits + then None else gsUserPages s x, + gsCNodes := \x. if x \ mask_range ptr bits + then None else gsCNodes s x, + ksArchState := gsPTTypes_update (\_ x. if x \ mask_range ptr bits + then Nothing + else gsPTTypes (ksArchState s) x) + (ksArchState s)\); + stateAssert ksASIDMapSafe [] + od" + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def o_def) + apply (rule bind_eqI, rule ext) + apply (rule bind_eqI, rule ext) + apply (rule bind_eqI, rule ext) + apply (simp add: bind_assoc[symmetric]) + apply (rule bind_cong[rotated], rule refl) + apply (simp add: bind_assoc modify_modify deleteRange_def gets_modify_def) + apply (rule ext, simp add: exec_modify stateAssert_def assert_def bind_assoc exec_get + NOT_eq[symmetric] neg_mask_in_mask_range exec_gets) + apply (clarsimp simp: simpler_modify_def) + apply (simp add: data_map_filterWithKey_def split: if_split_asm) + apply (rule arg_cong2[where f=ksArchState_update]) + apply (rule ext) + apply clarsimp + apply (rename_tac s, case_tac s, clarsimp) + apply (rename_tac ksArch ksMachine, case_tac ksArch, clarsimp) + apply (simp add: NOT_eq[symmetric] mask_in_range ext) + apply (rule arg_cong2[where f=gsCNodes_update]) + apply (simp add: NOT_eq[symmetric] mask_in_range ext) + apply (rule arg_cong2[where f=gsUserPages_update]) + apply (simp add: NOT_eq[symmetric] mask_in_range ext) + apply (rule arg_cong[where f="\f. ksPSpace_update f s" for s]) + apply (simp add: NOT_eq[symmetric] mask_in_range ext split: option.split) + done + +lemma deleteObjects_def3: + "deleteObjects ptr bits = + do + assert (is_aligned ptr bits); + stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; + doMachineOp (freeMemory ptr bits); + stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ mask_range ptr bits)) []; + stateAssert (\s. \ pTablePartialOverlap (gsPTTypes (ksArchState s)) (\x. x \ mask_range ptr bits)) []; + modify (\s. s \ ksPSpace := \x. if x \ mask_range ptr bits + then None else ksPSpace s x, + gsUserPages := \x. if x \ mask_range ptr bits + then None else gsUserPages s x, + gsCNodes := \x. if x \ mask_range ptr bits + then None else gsCNodes s x, + ksArchState := gsPTTypes_update (\_ x. if x \ mask_range ptr bits + then Nothing + else gsPTTypes (ksArchState s) x) + (ksArchState s) \); + stateAssert ksASIDMapSafe [] + od" + apply (cases "is_aligned ptr bits") + apply (simp add: deleteObjects_def2) + apply (simp add: deleteObjects_def is_aligned_mask + unless_def alignError_def) + done + +lemma obj_relation_cuts_in_obj_range: + "\ (y, P) \ obj_relation_cuts ko x; x \ obj_range x ko; + kheap s x = Some ko; valid_objs s; pspace_aligned s \ \ y \ obj_range x ko" + apply (cases ko; simp) + apply (clarsimp split: if_split_asm) + apply (subgoal_tac "cte_at (x, ya) s") + apply (drule(2) cte_at_cte_map_in_obj_bits) + apply (simp add: obj_range_def) + apply (fastforce intro: cte_wp_at_cteI) + apply (frule(1) pspace_alignedD) + apply (frule valid_obj_sizes, erule ranI) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp) + apply (clarsimp simp only: obj_range_def atLeastAtMost_iff + obj_bits.simps arch_kobj_size.simps) + apply (rule context_conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: table_size_def) + apply (rule shiftl_less_t2n) + apply (erule order_le_less_trans) + apply (simp add: bit_simps mask_def) + apply (simp add: bit_simps) + apply (subst add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply (subst word_less_sub_le, simp add: bit_simps) + apply (rule shiftl_less_t2n) + apply (erule order_le_less_trans) + apply (simp add: bit_simps mask_def) + apply (simp add: bit_simps) + apply (simp add: field_simps) + apply (clarsimp simp only: obj_range_def atLeastAtMost_iff) + apply (rule conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: shiftl_t2n mult_ac) + apply (erule word_less_power_trans2) + apply (rule pbfs_atleast_pageBits) + using pbfs_less_wb' + apply (simp add: word_bits_def) + apply (subst add_diff_eq[symmetric]) + apply (rule word_plus_mono_right; simp add: add_diff_eq) + apply (simp add: shiftl_t2n mult_ac) + apply (rule word_less_power_trans2; (simp add: pbfs_atleast_pageBits)?) + using pbfs_less_wb' + apply (simp add: word_bits_def) + done + +lemma obj_relation_cuts_eqv_base_in_detype_range: + "\ (y, P) \ obj_relation_cuts ko x; kheap s x = Some ko; + valid_objs s; pspace_aligned s; + valid_untyped (cap.UntypedCap d base bits idx) s \ + \ (x \ mask_range base bits) = (y \ mask_range base bits)" + apply (simp add: valid_untyped_def mask_def add_diff_eq del: atLeastAtMost_iff) + apply (subgoal_tac "x \ obj_range x ko") + apply (subgoal_tac "y \ obj_range x ko") + apply blast + apply (erule(4) obj_relation_cuts_in_obj_range) + apply (simp add: obj_range_def) + apply (rule is_aligned_no_overflow) + apply (erule(1) pspace_alignedD) + done + +lemma detype_pspace_relation: + assumes psp: "pspace_relation (kheap s) (ksPSpace s')" + and bwb: "bits < word_bits" + and al: "is_aligned base bits" + and vs: "valid_pspace s" + and vu: "valid_untyped (cap.UntypedCap d base bits idx) s" + shows "pspace_relation (kheap (detype (mask_range base bits) s)) + (\x. if x \ mask_range base bits then None else ksPSpace s' x)" + (is "pspace_relation ?ps ?ps'") +proof - + let ?range = "mask_range base bits" + let ?ps'' = "(kheap s |` (-?range))" + + have pa: "pspace_aligned s" and vo: "valid_objs s" + using vs by (simp add: valid_pspace_def)+ + + have pspace: + "\x. \ x \ ?range; x \ dom (kheap s) \ \ ?ps x = kheap s x" + by (clarsimp simp add: detype_def field_simps) + + have pspace'': + "\x. \ x \ ?range; x \ dom (kheap s) \ \ ?ps'' x = kheap s x" + by (clarsimp simp add: detype_def) + + have psdom_pre: "dom ?ps = (dom (kheap s) - ?range)" + by (fastforce simp:field_simps) + + show ?thesis + unfolding pspace_relation_def + proof (intro conjI) + + have domeq': "dom (ksPSpace s') = pspace_dom (kheap s)" + using psp by (simp add: pspace_relation_def) + + note eqv_base_in = obj_relation_cuts_eqv_base_in_detype_range + [OF _ _ vo pa vu] + + note atLeastAtMost_iff[simp del] + show domeq: "pspace_dom ?ps = dom ?ps'" + apply (simp add: dom_if_None domeq') + apply (simp add: pspace_dom_def detype_def dom_if_None) + apply (intro set_eqI iffI, simp_all) + apply (clarsimp simp: eqv_base_in field_simps) + apply (rule rev_bexI, erule domI) + apply (simp add: image_def, erule rev_bexI, simp) + apply (elim exE bexE DiffE conjE domE) + apply (rule bexI, assumption) + apply (clarsimp simp add: eqv_base_in field_simps) + done + + show "\x\dom ?ps. + \(y, P)\obj_relation_cuts (the (?ps x)) x. + P (the (?ps x)) + (the (if y \ ?range then None else ksPSpace s' y))" + using psp + apply (simp add: pspace_relation_def psdom_pre split del: if_split) + apply (erule conjE, rule ballI, erule DiffE, drule(1) bspec) + apply (erule domE) + apply (simp add: field_simps detype_def cong: conj_cong) + apply (erule ballEI, clarsimp) + apply (simp add: eqv_base_in) + done + qed +qed + +declare plus_Collect_helper2[simp] + +lemma cte_map_obj_range_helper: + "\ cte_at cref s; pspace_aligned s; valid_objs s \ + \ \ko. kheap s (fst cref) = Some ko \ cte_map cref \ obj_range (fst cref) ko" + apply (drule(2) cte_at_cte_map_in_obj_bits) + apply (clarsimp simp: obj_range_def) + done + +lemma cte_map_untyped_range: + "\ s \ cap; cte_at cref s; pspace_aligned s; valid_objs s \ + \ (cte_map cref \ untyped_range cap) = (fst cref \ untyped_range cap)" + apply (cases cap, simp_all) + apply (drule(2) cte_map_obj_range_helper) + apply (clarsimp simp: valid_cap_def valid_untyped_def) + apply (elim allE, drule(1) mp) + apply (rule iffI) + apply (erule impE) + apply (rule notemptyI[where x="cte_map cref"]) + apply simp + apply clarsimp + apply (drule subsetD [OF _ p_in_obj_range]) + apply simp+ + apply (erule impE) + apply (rule notemptyI[where x="fst cref"]) + apply (simp add: p_in_obj_range) + apply clarsimp + apply (drule(1) subsetD) + apply simp + done + +lemma pspace_aligned'_cut: + "pspace_aligned' s \ + pspace_aligned' (s \ ksPSpace := \x. if P x then None else ksPSpace s x\)" + by (simp add: pspace_aligned'_def dom_if_None) + +lemma pspace_distinct'_cut: + "pspace_distinct' s \ + pspace_distinct' (s \ ksPSpace := \x. if P x then None else ksPSpace s x\)" + by (simp add: pspace_distinct'_def dom_if_None ps_clear_def Diff_Int_distrib) + +lemma ko_wp_at_delete': + "pspace_distinct' s \ + ko_wp_at' P p (s \ ksPSpace := \x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x \) + = (\ (base \ p \ p \ base + mask magnitude) \ ko_wp_at' P p s)" + apply (simp add: ko_wp_at'_def ps_clear_def dom_if_None) + apply (intro impI iffI) + apply clarsimp + apply (drule(1) pspace_distinctD') + apply (simp add: ps_clear_def) + apply (clarsimp simp: Diff_Int_distrib) + done + +lemma obj_at_delete': + "pspace_distinct' s \ + obj_at' P p (s \ ksPSpace := \x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x \) + = (\ (base \ p \ p \ base + mask magnitude) \ obj_at' P p s)" + unfolding obj_at'_real_def + by (rule ko_wp_at_delete') + +lemma cte_wp_at_delete': + "\ s \' UntypedCap d base magnitude idx; pspace_distinct' s \ \ + cte_wp_at' P p (s \ ksPSpace := \x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x \) + = (\ (base \ p \ p \ base + mask magnitude) \ cte_wp_at' P p s)" + apply (simp add: cte_wp_at_obj_cases' obj_at_delete') + apply (subgoal_tac "\Q n. obj_at' Q (p - n) s \ tcb_cte_cases n \ None \ + ((p - n) \ mask_range base magnitude) + = (p \ mask_range base magnitude)") + apply auto[1] + apply (clarsimp simp: obj_at'_real_def valid_cap'_def + valid_untyped'_def + simp del: atLeastAtMost_iff) + apply (drule_tac x="p - n" in spec) + apply (clarsimp simp: ko_wp_at'_def capAligned_def + simp del: atLeastAtMost_iff) + apply (thin_tac "is_aligned x minUntypedSizeBits" for x) + apply (drule(1) aligned_ranges_subset_or_disjoint) + apply (subgoal_tac "{p, p - n} \ obj_range' (p - n) (KOTCB obj)") + apply (clarsimp simp del: atLeastAtMost_iff + simp: field_simps objBits_simps obj_range'_def mask_def) + apply fastforce + apply (simp add: obj_range'_def neg_mask_in_mask_range[symmetric] + del: atLeastAtMost_iff) + apply (simp add: objBits_simps) + apply (frule(1) tcb_cte_cases_aligned_helpers) + apply simp + done + +lemma map_to_ctes_delete: + assumes vc: "s \' UntypedCap d base magnitude idx" + and vs: "pspace_distinct' s" + shows + "map_to_ctes (\x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x) + = (\x. if base \ x \ x \ base + mask magnitude then None else ctes_of s x)" + using cte_wp_at_delete' [where P="(=) cte" for cte, OF vc vs] + arg_cong [where f=Not, OF cte_wp_at_delete' [OF vc vs, where P="\"]] + apply (simp (no_asm_use) add: cte_wp_at_ctes_of) + apply (rule ext) + apply (case_tac "map_to_ctes (\x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x) x") + apply (fastforce split: if_split_asm) + apply simp + done + +lemma word_range_card: + "base \base + h \ card {base..base + (h::machine_word)} = (unat h) + 1" +proof (induct h rule: word_induct2) + case zero show ?case by simp +next + case (suc h) + have interval_plus_one_word32: + "\base ceil. \base \ ceil + 1;ceil \ ceil + 1\ \ + {base..ceil + 1} = {base .. ceil } \ {ceil + (1::machine_word)}" + by (auto intro:order_antisym simp:not_le inc_le) + show ?case + using suc plus_one_helper2[where n = h and x = h,simplified] + apply (subst add.commute[where a = 1]) + apply (subst add.assoc[symmetric]) + apply (subst interval_plus_one_word32) + apply (simp add: field_simps) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + apply (simp add: field_simps) + apply (simp add: field_simps) + apply (subst card_Un_disjoint; simp) + apply (clarsimp simp: field_simps) + apply (subst suc) + apply (erule word_plus_mono_right2) + apply (simp add: field_simps) + apply simp + apply (simp add: unatSuc) + done +qed + +end + +locale detype_locale' = detype_locale + constrains s::"det_state" + +lemma (in detype_locale') deletionIsSafe: + assumes sr: "(s, s') \ state_relation" + and cap: "cap = cap.UntypedCap d base magnitude idx" + and vs: "valid_pspace s" + and al: "is_aligned base magnitude" + and vu: "valid_untyped (cap.UntypedCap d base magnitude idx) s" + shows "deletionIsSafe base magnitude s'" +proof - + interpret Arch . (* FIXME: arch_split *) + note [simp del] = atLeastatMost_subset_iff atLeastLessThan_iff atLeastAtMost_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + have "\t m r. \ptr. cte_wp_at ((=) (cap.ReplyCap t m r)) ptr s + \ t \ mask_range base magnitude" + by (fastforce dest!: valid_cap2 simp: cap obj_reply_refs_def mask_def add_diff_eq) + hence "\ptr t m r. cte_wp_at ((=) (cap.ReplyCap t m r)) ptr s + \ t \ mask_range base magnitude" + by (fastforce simp del: split_paired_All) + hence "\t. t \ mask_range base magnitude \ + (\ptr m r. \ cte_wp_at ((=) (cap.ReplyCap t m r)) ptr s)" + by fastforce + hence cte: "\t. t \ mask_range base magnitude \ + (\ptr m r. \ cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) ptr s')" + unfolding deletionIsSafe_def + apply - + apply (erule allEI) + apply (rule impI, drule(1) mp) + apply (thin_tac "t \ S" for S) + apply (intro allI) + apply (clarsimp simp: cte_wp_at_neg2 cte_wp_at_ctes_of + simp del: split_paired_All) + apply (frule pspace_relation_cte_wp_atI [rotated]) + apply (rule invs_valid_objs [OF invs]) + apply (rule state_relation_pspace_relation [OF sr]) + apply (clarsimp simp: cte_wp_at_neg2 simp del: split_paired_All) + apply (drule_tac x="(a,b)" in spec) + apply (clarsimp simp: cte_wp_cte_at cte_wp_at_caps_of_state) + apply (case_tac c, simp_all) + apply fastforce + done + + have arch: + "\ ko p. \ ksPSpace s' p = Some (KOArch ko); p \ mask_range base magnitude \ \ 6 \ magnitude" + using sr vs vu + apply (clarsimp simp: state_relation_def) + apply (erule(1) pspace_dom_relatedE) + apply (frule obj_relation_cuts_eqv_base_in_detype_range[symmetric]) + apply simp + apply (clarsimp simp:valid_pspace_def)+ + apply simp + apply (clarsimp simp:valid_untyped_def add_mask_fold cong: if_cong) + apply (drule spec)+ + apply (erule(1) impE) + apply (erule impE) + apply (drule p_in_obj_range) + apply (clarsimp)+ + apply blast + apply clarsimp + apply (drule card_mono[rotated]) + apply fastforce + apply (clarsimp simp:valid_pspace_def obj_range_def p_assoc_help) + apply (subst (asm) word_range_card) + apply (rule is_aligned_no_overflow') + apply (erule(1) pspace_alignedD) + apply (subst (asm) word_range_card) + apply (rule is_aligned_no_overflow_mask[OF al]) + apply (rule ccontr) + apply (simp add:not_le) + apply (subgoal_tac "obj_bits koa < word_bits") + prefer 2 + apply (case_tac koa; simp add:objBits_simps word_bits_def) + apply (drule(1) valid_cs_size_objsI) + apply (clarsimp simp:valid_cs_size_def word_bits_def cte_level_bits_def) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add:bit_simps word_bits_def) + apply (simp add:pageBitsForSize_def bit_simps split:vmpage_size.splits) + apply (subgoal_tac "6 \ obj_bits koa") + apply (simp add: unat_mask_word64 mask_2pm1[symmetric] le_diff_iff) + apply (case_tac koa, simp_all add: other_obj_relation_def + objBits_simps cte_relation_def + split: if_splits) + apply (rename_tac ako, + case_tac ako; + simp add: arch_kobj_size_def bit_simps pageBitsForSize_def + split: vmpage_size.splits) + apply (rename_tac ako, + case_tac ako; + simp add: arch_kobj_size_def bit_simps pageBitsForSize_def + split: vmpage_size.splits) + done + thus ?thesis using cte by (auto simp: deletionIsSafe_def) +qed + +context begin interpretation Arch . (*FIXME: arch_split*) + +text \Invariant preservation across concrete deletion\ + +lemma caps_containedD': + "\ ctes_of s p = Some cte; ctes_of s p' = Some cte'; + \ isUntypedCap (cteCap cte); capRange (cteCap cte) \ untypedRange (cteCap cte') \ {}; + caps_contained' (ctes_of s) \ \ + capRange (cteCap cte) \ untypedRange (cteCap cte')" + apply (cases cte, cases cte') + apply (simp add: caps_contained'_def) + apply blast + done + +lemma untyped_mdbD': + "\ ctes p = Some cte; ctes p' = Some cte'; + isUntypedCap (cteCap cte); capRange (cteCap cte') \ untypedRange (cteCap cte) \ {}; + \ isUntypedCap (cteCap cte'); + untyped_mdb' ctes \ \ p' \ descendants_of' p ctes" + by (cases cte, cases cte', simp add: untyped_mdb'_def) + +lemma ko_wp_at_state_refs_ofD: + "\ ko_wp_at' P p s \ \ (\ko. P ko \ state_refs_of' s p = refs_of' ko)" + by (fastforce simp: ko_wp_at'_def state_refs_of'_def) + +lemma sym_refs_ko_wp_atD: + "\ ko_wp_at' P p s; sym_refs (state_refs_of' s) \ + \ (\ko. P ko \ state_refs_of' s p = refs_of' ko + \ (\(x, tp) \ refs_of' ko. (p, symreftype tp) \ state_refs_of' s x))" + apply (clarsimp dest!: ko_wp_at_state_refs_ofD) + apply (rule exI, erule conjI) + apply (drule sym) + apply clarsimp + apply (erule(1) sym_refsD) + done + +lemma ko_wp_at_state_hyp_refs_ofD: + "\ ko_wp_at' P p s \ \ (\ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' ko)" + by (fastforce simp: ko_wp_at'_def state_hyp_refs_of'_def) + +lemma sym_hyp_refs_ko_wp_atD: + "\ ko_wp_at' P p s; sym_refs (state_hyp_refs_of' s) \ + \ (\ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' ko + \ (\(x, tp) \ hyp_refs_of' ko. (p, symreftype tp) \ state_hyp_refs_of' s x))" + apply (clarsimp dest!: ko_wp_at_state_hyp_refs_ofD) + apply (rule exI, erule conjI) + apply (drule sym) + apply clarsimp + apply (erule(1) sym_refsD) + done + +lemma zobj_refs_capRange: + "capAligned c \ zobj_refs' c \ capRange c" + apply (cases c; simp add: capAligned_def capRange_def is_aligned_no_overflow) + apply (rename_tac ac) + apply (case_tac ac; simp) + apply clarsimp + apply (drule is_aligned_no_overflow) + apply simp + done + +end + +locale delete_locale = + fixes s' and base and bits and ptr and idx and d + assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s'" + and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s')" + and invs: "invs' s'" + and ct_act: "ct_active' s'" + and sa_simp: "sch_act_simple s'" + and al: "is_aligned base bits" + and safe: "deletionIsSafe base bits s'" + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_objs: "valid_objs' s'" + and pa: "pspace_aligned' s'" + and pc: "pspace_canonical' s'" + and pd: "pspace_distinct' s'" + and vbm: "valid_bitmaps s'" + and sym_sched: "sym_heap_sched_pointers s'" + and vsp: "valid_sched_pointers s'" + and sym_refs: "sym_refs (state_refs_of' s')" + and sym_hyp_refs: "sym_refs (state_hyp_refs_of' s')" + and iflive: "if_live_then_nonz_cap' s'" + and ifunsafe: "if_unsafe_then_cap' s'" + and dlist: "valid_dlist (ctes_of s')" + and no_0: "no_0 (ctes_of s')" + and chain_0: "mdb_chain_0 (ctes_of s')" + and badges: "valid_badges (ctes_of s')" + and contained: "caps_contained' (ctes_of s')" + and chunked: "mdb_chunked (ctes_of s')" + and umdb: "untyped_mdb' (ctes_of s')" + and uinc: "untyped_inc' (ctes_of s')" + and nullcaps: "valid_nullcaps (ctes_of s')" + and ut_rev: "ut_revocable' (ctes_of s')" + and dist_z: "distinct_zombies (ctes_of s')" + and irq_ctrl: "irq_control (ctes_of s')" + and clinks: "class_links (ctes_of s')" + and rep_r_fb: "reply_masters_rvk_fb (ctes_of s')" + and idle: "valid_idle' s'" + and refs: "valid_global_refs' s'" + and arch: "valid_arch_state' s'" + and virq: "valid_irq_node' (irq_node' s') s'" + and virqh: "valid_irq_handlers' s'" + and virqs: "valid_irq_states' s'" + and no_0_objs: "no_0_obj' s'" + and ctnotinQ: "ct_not_inQ s'" + and irqs_masked: "irqs_masked' s'" + and ctcd: "ct_idle_or_in_cur_domain' s'" + and cdm: "ksCurDomain s' \ maxDomain" + and vds: "valid_dom_schedule' s'" + using invs + by (auto simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + +abbreviation + "base_bits \ mask_range base bits" + +abbreviation pspace' :: pspace where + "pspace' \ \x. if base \ x \ x \ base + mask bits then None else ksPSpace s' x" + +abbreviation state' :: kernel_state where + "state' \ (s' \ ksPSpace := pspace' \)" + +lemma ko_wp_at'[simp]: + "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s' \ p \ base_bits)" + by (fastforce simp add: ko_wp_at_delete'[OF pd]) + +lemma obj_at'[simp]: + "\P p. (obj_at' P p state') = (obj_at' P p s' \ p \ base_bits)" + by (fastforce simp add: obj_at'_real_def) + +lemma typ_at'[simp]: + "typ_at' P p state' = (typ_at' P p s' \ p \ base_bits)" + by (simp add: typ_at'_def) + +lemma valid_untyped[simp]: + "s' \' UntypedCap d base bits idx" + using cte_wp_at_valid_objs_valid_cap' [OF cap valid_objs] + by clarsimp + +lemma cte_wp_at'[simp]: + "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s' \ p \ base_bits)" + by (fastforce simp:cte_wp_at_delete'[where idx = idx,OF valid_untyped pd ]) + +(* the bits of caps they need for validity argument are within their capRanges *) +lemma valid_cap_ctes_pre: + "\c. s' \' c \ case c of CNodeCap ref bits g gs \ + \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c + | Zombie ref (ZombieCNode bits) n \ + \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c + | ArchObjectCap (PageTableCap ref pt_t data) \ + \x. x \ mask (ptTranslationBits pt_t) \ ref + (x << pte_bits) \ capRange c + | ArchObjectCap (FrameCap ref r sz d m) \ + \p<2 ^ (pageBitsForSize sz - pageBits). ref + (p << pageBits) \ capRange c + | _ \ True" + apply (drule valid_capAligned) + apply (simp split: capability.split zombie_type.split arch_capability.split, safe) + using pre_helper[where a=cteSizeBits] + apply (clarsimp simp add: capRange_def capAligned_def objBits_simps field_simps) + apply (clarsimp simp add: capRange_def capAligned_def shiftl_t2n) + apply (frule pre_helper2[where bits=pageBits]; simp add: pbfs_atleast_pageBits mult_ac) + using pbfs_less_wb' apply (simp add: word_bits_conv) + apply (clarsimp simp add: capRange_def capAligned_def shiftl_t2n + simp del: atLeastAtMost_iff capBits.simps) + apply (simp del: atLeastAtMost_iff) + apply (drule_tac bits="pte_bits" and x="ucast x" in pre_helper2; simp add: mult_ac) + apply (simp add: bit_simps) + apply (simp add: table_size_def) + apply (erule order_le_less_trans) + apply (simp add: mask_def bit_simps) + apply (clarsimp simp add: capRange_def capAligned_def + simp del: atLeastAtMost_iff capBits.simps) + using pre_helper[where a=cteSizeBits] + apply (clarsimp simp add: capRange_def capAligned_def objBits_simps field_simps) + done + +lemma replycap_argument: + "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s' + \ t \ mask_range base bits" + using safe + by (force simp: deletionIsSafe_def cte_wp_at_ctes_of) + +lemma valid_cap': + "\p c. \ s' \' c; cte_wp_at' (\cte. cteCap cte = c) p s'; + capRange c \ mask_range base bits = {} \ \ state' \' c" + apply (subgoal_tac "capClass c = PhysicalClass \ capUntypedPtr c \ capRange c") + apply (subgoal_tac "capClass c = PhysicalClass \ + capUntypedPtr c \ mask_range base bits") + apply (frule valid_cap_ctes_pre) + apply (case_tac c, simp_all add: valid_cap'_def replycap_argument + del: atLeastAtMost_iff + split: zombie_type.split_asm) + apply (simp add: field_simps del: atLeastAtMost_iff) + apply blast + defer + apply (simp add: valid_untyped'_def) + apply (simp add: field_simps bit_simps word_size_def del: atLeastAtMost_iff) + apply blast + apply blast + apply (clarsimp simp: capAligned_capUntypedPtr) + apply (rename_tac arch_cap) + apply (case_tac arch_cap; simp del: atLeastAtMost_iff add: frame_at'_def page_table_at'_def) + apply blast + apply blast + done + +lemma objRefs_notrange: + assumes asms: "ctes_of s' p = Some c" "\ isUntypedCap (cteCap c)" + shows "capRange (cteCap c) \ base_bits = {}" +proof - + from cap obtain node + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, simp) + done + + show ?thesis using asms cap + apply - + apply (rule ccontr) + apply (drule untyped_mdbD' [OF ctes_of _ _ _ _ umdb]) + apply (simp add: isUntypedCap_def) + apply (simp add: add_mask_fold) + apply assumption + using nodesc + apply (simp add:descendants_range'_def2) + apply (drule(1) descendants_range_inD') + apply (simp add:asms) + apply (simp add: add_mask_fold) + done +qed + +lemma ctes_of_valid [elim!]: + "ctes_of s' p = Some cte \ s' \' cteCap cte" + by (case_tac cte, simp add: ctes_of_valid_cap' [OF _ valid_objs]) + +lemma valid_cap2: + "\ cte_wp_at' (\cte. cteCap cte = c) p s' \ \ state' \' c" + apply (case_tac "isUntypedCap c") + apply (drule cte_wp_at_valid_objs_valid_cap' [OF _ valid_objs]) + apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def) + apply (rule valid_cap'[rotated], assumption) + apply (clarsimp simp: cte_wp_at_ctes_of dest!: objRefs_notrange) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma ex_nonz_cap_notRange: + "ex_nonz_cap_to' p s' \ p \ base_bits" + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (case_tac "isUntypedCap (cteCap cte)") + apply (clarsimp simp: isCap_simps) + apply (drule subsetD[OF zobj_refs_capRange, rotated]) + apply (rule valid_capAligned, erule ctes_of_valid) + apply (drule(1) objRefs_notrange) + apply (drule_tac a=p in equals0D) + apply simp + done + +lemma live_notRange: + "\ ko_wp_at' P p s'; \ko. P ko \ live' ko \ \ p \ base_bits" + apply (drule if_live_then_nonz_capE' [OF iflive ko_wp_at'_weakenE]) + apply simp + apply (erule ex_nonz_cap_notRange) + done + +lemma deletionIsSafe_delete_locale_holds: + "deletionIsSafe_delete_locale base bits s'" + by (fastforce dest: live_notRange simp: deletionIsSafe_delete_locale_def) + +lemma refs_notRange: + "(x, tp) \ state_refs_of' s' y \ y \ base_bits" + apply (drule state_refs_of'_elemD) + apply (erule live_notRange) + apply (rule refs_of_live') + apply clarsimp + done + +lemma hyp_refs_notRange: + "(x, tp) \ state_hyp_refs_of' s' y \ y \ base_bits" + apply (drule state_hyp_refs_of'_elemD) + apply (erule live_notRange) + apply (rule hyp_refs_of_live') + apply clarsimp + done + +lemma sym_refs_VCPU_hyp_live': + "\ko_wp_at' ((=) (KOArch (KOVCPU v))) p s'; sym_refs (state_hyp_refs_of' s'); vcpuTCBPtr v = Some t\ + \ ko_wp_at' (\ko. koTypeOf ko = TCBT \ hyp_live' ko) t s'" + apply (drule (1) sym_hyp_refs_ko_wp_atD) + apply (clarsimp) + apply (drule state_hyp_refs_of'_elemD) + apply (simp add: ko_wp_at'_def) + apply (clarsimp simp: hyp_refs_of_rev' hyp_live'_def) + done + +lemma sym_refs_TCB_hyp_live': + "\ko_wp_at' ((=) (KOTCB t)) p s'; sym_refs (state_hyp_refs_of' s'); atcbVCPUPtr (tcbArch t) = Some v\ + \ ko_wp_at' (\ko. koTypeOf ko = ArchT VCPUT \ hyp_live' ko) v s'" + apply (drule (1) sym_hyp_refs_ko_wp_atD) + apply (clarsimp) + apply (drule state_hyp_refs_of'_elemD) + apply (simp add: ko_wp_at'_def) + apply (clarsimp simp: hyp_refs_of_rev' hyp_live'_def arch_live'_def) + done +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) +(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) +(* FIXME: move *) +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r P Q x x'" + shows "corres r (P \ machine_state) (Q \ ksMachineState) + (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad3 + [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma ekheap_relation_detype: + "ekheap_relation ekh kh \ + ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" + by (fastforce simp add: ekheap_relation_def split: if_split_asm) + +lemma cap_table_at_gsCNodes_eq: + "(s, s') \ state_relation + \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply (drule_tac x = ptr in spec)+ + apply (drule_tac x = bits in spec)+ + apply fastforce + done + +lemma cNodeNoPartialOverlap: + "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) + (\x. base \ x \ x \ base + mask magnitude)) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodePartialOverlap_def) + apply (drule(1) cte_wp_valid_cap) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table) + apply (frule(1) pspace_alignedD) + apply (simp add: add_mask_fold) + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow_mask add_mask_fold) + apply (blast intro: order_trans) + apply (simp add: is_aligned_no_overflow_mask power_overflow word_bits_def) + apply wp+ + done + +lemma state_rel_ghost: + "(s,s') \ state_relation \ + ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s'))" + by (erule state_relationE) + +lemma ghost_PTTypes: + "\ ghost_relation kh gsu gsc pt_Ts; pt_Ts p = Some pt_t \ \ + (\pt. kh p = Some (ArchObj (PageTable pt)) \ pt_t = pt_type pt)" + by (clarsimp simp: ghost_relation_def) + +lemma pTableNoPartialOverlap: + "corres dc + (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ + valid_objs s \ pspace_aligned s) + \ + (return x) + (stateAssert (\s. \ pTablePartialOverlap (gsPTTypes (ksArchState s)) + (\x. base \ x \ x \ base + mask magnitude)) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: pTablePartialOverlap_def) + apply (frule state_rel_ghost) + apply (drule (1) ghost_PTTypes) + apply clarsimp + apply (drule(1) cte_wp_valid_cap) + apply (clarsimp simp: valid_cap_def valid_untyped_def) + apply (frule(1) pspace_alignedD) + apply (simp add: add_mask_fold) + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (clarsimp simp: pt_bits_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow_mask add_mask_fold) + apply (blast intro: order_trans) + apply (simp add: is_aligned_no_overflow_mask power_overflow word_bits_def) + apply wp+ + done + +lemma corres_return_bind: (* FIXME AARCH64: move to Corres_UL *) + "corres_underlying sr nf nf' r P P' (do return (); f od) g \ corres_underlying sr nf nf' r P P' f g" + by simp + +lemma corres_return_bind2: (* FIXME AARCH64: move to Corres_UL *) + "corres_underlying sr nf nf' r P P' f (do return (); g od) \ corres_underlying sr nf nf' r P P' f g" + by simp + +crunches doMachineOp + for gsCNodes[wp]: "\s. P (gsCNodes s)" + and deletionIsSafe_delete_locale[wp]: "deletionIsSafe_delete_locale base magnitude" + (simp: deletionIsSafe_delete_locale_def) + +lemma detype_tcbSchedNexts_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedNext) + = tcbSchedNexts_of s'" + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def live'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_tcbSchedPrevs_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedPrev) + = tcbSchedPrevs_of s'" + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def live'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_inQ: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ \d p. (inQ d p |< ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of')) + = (inQ d p |< tcbs_of' s')" + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: inQ_def opt_pred_def ko_wp_at'_def live'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_ready_queues_relation: + "\pspace_aligned' s'; pspace_distinct' s'; + \p. p \ {lower..upper} \ \ ko_wp_at' live' p s'; + ready_queues_relation s s'; upper = upper'\ + \ ready_queues_relation_2 + (ready_queues (detype {lower..upper'} s)) + (ksReadyQueues s') + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedNext) + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedPrev) + (\d p. inQ d p |< ((\x. if lower \ x \ x \ upper then None else ksPSpace s' x) |> tcb_of'))" + apply (clarsimp simp: detype_ext_def ready_queues_relation_def Let_def) + apply (frule (1) detype_tcbSchedNexts_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_tcbSchedPrevs_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_inQ[where S="{lower..upper}"]; simp) + apply (fastforce simp add: detype_def detype_ext_def wrap_ext_det_ext_ext_def) + done + +lemma deleteObjects_corres: + "is_aligned base magnitude \ magnitude \ 3 \ + corres dc + (\s. einvs s + \ s \ (cap.UntypedCap d base magnitude idx) + \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) + \ untyped_children_in_mdb s \ if_unsafe_then_cap s + \ valid_mdb s \ valid_global_refs s \ ct_active s + \ schact_is_rct s) + (\s'. invs' s' + \ cte_wp_at' (\cte. cteCap cte = UntypedCap d base magnitude idx) ptr s' + \ descendants_range' (UntypedCap d base magnitude idx) ptr (ctes_of s') + \ ct_active' s' + \ s' \' (UntypedCap d base magnitude idx)) + (delete_objects base magnitude) (deleteObjects base magnitude)" + apply (simp add: deleteObjects_def2) + apply (rule corres_stateAssert_implied[where P'=\, simplified]) + prefer 2 + apply clarsimp + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (rule_tac ptr=ptr and idx=idx and d=d in delete_locale.deletionIsSafe_delete_locale_holds) + apply (clarsimp simp: delete_locale_def) + apply (intro conjI) + apply (fastforce simp: sch_act_simple_def schact_is_rct_def state_relation_def) + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (simp add: ksASIDMapSafe_def) + apply (simp add: delete_objects_def) + apply (rule corres_underlying_split[where r'=dc]) + apply (rule corres_guard_imp[where r=dc]) + apply (rule corres_machine_op[OF corres_Id]; simp) + apply (rule no_fail_freeMemory; simp) + apply simp + apply (auto elim: is_aligned_weaken)[1] + apply (rule corres_return_bind) + apply (rule corres_split[OF cNodeNoPartialOverlap]) + apply (rule corres_return_bind) + apply (rule corres_split[OF pTableNoPartialOverlap]) + apply simp + apply (rule_tac P="\s. valid_objs s \ valid_list s \ + (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ + descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ + s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ + valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ + zombies_final s \ sym_refs (state_refs_of s) \ sym_refs (state_hyp_refs_of s) \ + untyped_children_in_mdb s \ if_unsafe_then_cap s \ + valid_global_refs s" and + P'="\s. s \' capability.UntypedCap d base magnitude idx \ + valid_pspace' s \ + deletionIsSafe_delete_locale base magnitude s" in corres_modify) + apply (simp add: valid_pspace'_def) + apply (rule state_relation_null_filterE, assumption, + simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] + apply (simp add: detype_def, rule state.equality; + simp add: detype_ext_def wrap_ext_det_ext_ext_def) + apply (intro exI, fastforce) + apply (rule ext, clarsimp simp add: null_filter_def) + apply (rule sym, rule ccontr, clarsimp) + apply (drule(4) cte_map_not_null_outside') + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply simp + apply (rule ext, clarsimp simp: null_filter'_def map_to_ctes_delete) + apply (rule sym, rule ccontr, clarsimp) + apply (frule(2) pspace_relation_cte_wp_atI + [OF state_relation_pspace_relation]) + apply (elim exE) + apply (frule(4) cte_map_not_null_outside') + apply (rule cte_wp_at_weakenE, erule conjunct1) + apply (case_tac y, clarsimp) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def + valid_nullcaps_def) + apply clarsimp + apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, + erule cte_wp_at_weakenE[OF _ TrueI], assumption+) + apply (simp add: add_mask_fold) + apply (simp add: add_mask_fold) + apply (rule detype_pspace_relation[simplified], + simp_all add: state_relation_pspace_relation valid_pspace_def)[1] + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: valid_cap_def, assumption) + apply (fastforce simp: detype_def detype_ext_def add_mask_fold wrap_ext_det_ext_ext_def + intro!: ekheap_relation_detype) + apply (rule detype_ready_queues_relation; blast?) + apply (clarsimp simp: deletionIsSafe_delete_locale_def) + apply (erule state_relation_ready_queues_relation) + apply (simp add: add_mask_fold) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap + detype_def) + apply (drule_tac t="gsUserPages s'" in sym) + apply (drule_tac t="gsCNodes s'" in sym) + apply (drule_tac t="gsPTTypes (ksArchState s')" in sym) + apply (auto simp: ups_of_heap_def cns_of_heap_def ext pt_types_of_heap_def add_mask_fold + opt_map_def + split: option.splits kernel_object.splits)[1] + apply (simp add: valid_mdb_def) + apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | + simp add: invs_def valid_state_def valid_pspace_def + descendants_range_def | wp (once) hoare_drop_imps)+ + apply fastforce + apply (wpsimp wp: hoare_vcg_op_lift) + done +end + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma live_idle_untyped_range': + "ko_wp_at' live' p s' \ p = idle_thread_ptr \ p \ base_bits" + apply (case_tac "ko_wp_at' live' p s'") + apply (drule if_live_then_nonz_capE'[OF iflive ko_wp_at'_weakenE]) + apply simp + apply (erule ex_nonz_cap_notRange) + apply clarsimp + apply (insert invs_valid_global'[OF invs] cap invs_valid_idle'[OF invs]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_global_refsD') + apply (clarsimp simp: valid_idle'_def) + using atLeastAtMost_iff apply (simp add: p_assoc_help mask_eq_exp_minus_1) + by fastforce + +lemma untyped_range_live_idle': + "p \ base_bits \ \ (ko_wp_at' live' p s' \ p = idle_thread_ptr)" + using live_idle_untyped_range' by blast + +lemma valid_obj': + "\ valid_obj' obj s'; ko_wp_at' ((=) obj) p s'; sym_heap_sched_pointers s' \ + \ valid_obj' obj state'" + apply (case_tac obj, simp_all add: valid_obj'_def) + apply (rename_tac endpoint) + apply (case_tac endpoint, simp_all add: valid_ep'_def)[1] + apply (clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs]) + apply (drule(1) bspec)+ + apply (clarsimp dest!: refs_notRange) + apply (clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs]) + apply (drule(1) bspec)+ + apply (clarsimp dest!: refs_notRange) + apply (rename_tac notification) + apply (case_tac notification, simp_all add: valid_ntfn'_def valid_bound_tcb'_def)[1] + apply (rename_tac ntfn bound) + apply (case_tac ntfn, simp_all split:option.splits)[1] + apply ((clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs] refs_notRange)+)[4] + apply (drule(1) bspec)+ + apply (clarsimp dest!: refs_notRange) + apply (clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs] refs_notRange) + apply (frule sym_refs_ko_wp_atD [OF _ sym_refs]) + apply (clarsimp simp: valid_tcb'_def ko_wp_at'_def + objBits_simps) + apply (rule conjI) + apply (erule ballEI, clarsimp elim!: ranE) + apply (rule_tac p="p + x" in valid_cap2) + apply (erule(2) cte_wp_at_tcbI') + apply fastforce + apply simp + apply (intro conjI) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; clarsimp simp: valid_tcb_state'_def dest!: refs_notRange) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; + clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def + dest!: refs_notRange split: option.splits) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac prev) + apply (cut_tac P=live' and p=prev in live_notRange; fastforce?) + apply (fastforce dest: sym_heapD2[where p'=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def live'_def) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac "next") + apply (cut_tac P=live' and p="next" in live_notRange; fastforce?) + apply (fastforce dest!: sym_heapD1[where p=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def live'_def) + using sym_hyp_refs + apply (clarsimp simp add: valid_arch_tcb'_def split: option.split_asm) + apply (drule (1) sym_refs_TCB_hyp_live'[rotated]) + apply (clarsimp simp: ko_wp_at'_def objBits_simps; (rule conjI|assumption)+) + apply (drule live_notRange, clarsimp simp: live'_def) + apply (case_tac ko; simp) + apply clarsimp + apply (clarsimp simp: valid_cte'_def) + apply (rule_tac p=p in valid_cap2) + apply (clarsimp simp: ko_wp_at'_def objBits_simps' cte_level_bits_def[symmetric]) + apply (erule(2) cte_wp_at_cteI') + apply simp + done + +lemma tcbSchedNexts_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedNext) = tcbSchedNexts_of s'" + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def live'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma tcbSchedPrevs_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedPrev) = tcbSchedPrevs_of s'" + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def live'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma st_tcb: + "\P p. \ st_tcb_at' P p s'; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" + by (fastforce simp: pred_tcb_at'_def obj_at'_real_def live'_def hyp_live'_def dest: live_notRange) + +lemma irq_nodes_global: + "\irq :: irq. irq_node' s' + (ucast irq << cteSizeBits) \ global_refs' s'" + by (simp add: global_refs'_def) + +lemma global_refs: + "global_refs' s' \ base_bits = {}" + using cap + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule valid_global_refsD' [OF _ refs]) + apply (fastforce simp add: field_simps mask_def) + done + +lemma global_refs2: + "global_refs' s' \ (- base_bits)" + using global_refs by blast + +lemma irq_nodes_range: + "\irq :: irq. irq_node' s' + (ucast irq << cteSizeBits) \ base_bits" + using irq_nodes_global global_refs + by blast + +lemma cte_refs_notRange: + assumes asms: "ctes_of s' p = Some c" + shows "cte_refs' (cteCap c) (irq_node' s') \ base_bits = {}" +proof - + from cap obtain node + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, simp) + done + + show ?thesis using asms + apply - + apply (rule ccontr) + apply (clarsimp elim!: nonemptyE) + apply (frule ctes_of_valid) + apply (frule valid_capAligned) + apply (case_tac "\irq. cteCap c = IRQHandlerCap irq") + apply (insert irq_nodes_range)[1] + apply clarsimp + apply (frule subsetD [OF cte_refs_capRange]) + apply simp + apply assumption + apply (frule caps_containedD' [OF _ ctes_of _ _ contained]) + apply (clarsimp dest!: isCapDs) + apply (rule_tac x=x in notemptyI) + apply (simp add: field_simps mask_def) + apply (simp add: add_mask_fold) + apply (drule objRefs_notrange) + apply (clarsimp simp: isCap_simps) + apply blast + done +qed + +lemma non_null_present: + "cte_wp_at' (\c. cteCap c \ NullCap) p s' \ p \ base_bits" + apply (drule (1) if_unsafe_then_capD' [OF _ ifunsafe]) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of + dest!: cte_refs_notRange simp del: atLeastAtMost_iff) + apply blast + done + +lemma cte_cap: + "ex_cte_cap_to' p s' \ ex_cte_cap_to' p state'" + apply (clarsimp simp: ex_cte_cap_to'_def) + apply (frule non_null_present [OF cte_wp_at_weakenE']) + apply clarsimp + apply fastforce + done + +lemma idle_notRange: + "\cref. \ cte_wp_at' (\c. ksIdleThread s' \ capRange (cteCap c)) cref s' + \ ksIdleThread s' \ base_bits" + apply (insert cap) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule_tac x=ptr in allE, clarsimp simp: field_simps mask_def) + done + +abbreviation + "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + mask bits then None else ksPSpace s' x)" + +lemmas tree_to_ctes = map_to_ctes_delete [OF valid_untyped pd] + +lemma map_to_ctesE[elim!]: + "\ ctes' x = Some cte; \ ctes_of s' x = Some cte; x \ base_bits \ \ P \ \ P" + by (clarsimp simp: tree_to_ctes split: if_split_asm) + +lemma not_nullMDBNode: + "\ ctes_of s' x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" + using nullcaps + apply (cases cte) + apply (simp add: valid_nullcaps_def) + done + +lemma mdb_src: "\ ctes_of s' \ x \ y; y \ 0 \ \ x \ base_bits" + apply (rule non_null_present) + apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def) + done + +lemma mdb_dest: "\ ctes_of s' \ x \ y; y \ 0 \ \ y \ base_bits" + apply (case_tac "x = 0") + apply (insert no_0, simp add: next_unfold')[1] + apply (drule(1) vdlist_nextD0 [OF _ _ dlist]) + apply (rule non_null_present) + apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of mdb_prev_def) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def) + done + +lemma trancl_next[elim]: + "\ ctes_of s' \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" + apply (erule rev_mp, erule converse_trancl_induct) + apply clarsimp + apply (rule r_into_trancl) + apply (simp add: next_unfold' tree_to_ctes) + apply clarsimp + apply (rule_tac b=z in trancl_into_trancl2) + apply (simp add: next_unfold' tree_to_ctes) + apply (case_tac "z = 0") + apply (insert no_0)[1] + apply (erule tranclE2) + apply (simp add: next_unfold') + apply (simp add: next_unfold') + apply (drule(1) mdb_dest) + apply (simp add: next_unfold') + done + +lemma mdb_parent_notrange: + "ctes_of s' \ x \ y \ x \ base_bits \ y \ base_bits" + apply (erule subtree.induct) + apply (frule(1) mdb_src, drule(1) mdb_dest, simp) + apply (drule(1) mdb_dest, simp) + done + +lemma mdb_parent: + "ctes_of s' \ x \ y \ ctes' \ x \ y" + apply (erule subtree.induct) + apply (frule(1) mdb_src, frule(1) mdb_dest) + apply (rule subtree.direct_parent) + apply (simp add: next_unfold' tree_to_ctes) + apply assumption + apply (simp add: parentOf_def tree_to_ctes) + apply (frule(1) mdb_src, frule(1) mdb_dest) + apply (erule subtree.trans_parent) + apply (simp add: next_unfold' tree_to_ctes) + apply assumption + apply (frule mdb_parent_notrange) + apply (simp add: parentOf_def tree_to_ctes) + done + +lemma trancl_next_rev: + "ctes' \ x \\<^sup>+ y \ ctes_of s' \ x \\<^sup>+ y" + apply (erule converse_trancl_induct) + apply (rule r_into_trancl) + apply (clarsimp simp: next_unfold') + apply (rule_tac b=z in trancl_into_trancl2) + apply (clarsimp simp: next_unfold') + apply assumption + done + +lemma is_chunk[elim!]: + "is_chunk (ctes_of s') cap x y \ is_chunk ctes' cap x y" + apply (simp add: is_chunk_def) + apply (erule allEI) + apply (clarsimp dest!: trancl_next_rev) + apply (drule rtranclD, erule disjE) + apply (clarsimp simp: tree_to_ctes) + apply (cut_tac p=y in non_null_present) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + apply (clarsimp dest!: trancl_next_rev simp: trancl_into_rtrancl) + apply (clarsimp simp: tree_to_ctes) + apply (cut_tac p=p'' in non_null_present) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply simp + done + +end + +lemma exists_disj: + "((\a. P a \ Q a)\(\a. P a \ Q' a)) + = (\a. P a \ (Q a \ Q' a))" + by auto + +lemma (in delete_locale) delete_invs': + "invs' (ksMachineState_update + (\ms. underlying_memory_update + (\m x. if base \ x \ x \ base + (2 ^ bits - 1) then 0 else m x) ms) + state')" (is "invs' (?state'')") +using vds +proof (simp add: invs'_def valid_state'_def valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def, + safe) + interpret Arch . (*FIXME: arch_split*) + let ?s = state' + let ?ran = base_bits + + show "pspace_aligned' ?s" using pa + by (simp add: pspace_aligned'_def dom_def) + + show "pspace_canonical' ?s" using pc + by (simp add: pspace_canonical'_def dom_def) + + show pspace_distinct'_state': "pspace_distinct' ?s" using pd + by (clarsimp simp add: pspace_distinct'_def ps_clear_def + dom_if_None Diff_Int_distrib) + + show "valid_objs' ?s" using valid_objs sym_sched + apply (clarsimp simp: valid_objs'_def ran_def) + apply (rule_tac p=a in valid_obj') + apply fastforce + apply (frule pspace_alignedD'[OF _ pa]) + apply (frule pspace_distinctD'[OF _ pd]) + apply (clarsimp simp: ko_wp_at'_def) + apply fastforce + done + + from sym_refs show "sym_refs (state_refs_of' ?s)" + apply - + apply (clarsimp simp: state_refs_ko_wp_at_eq + elim!: rsubst[where P=sym_refs]) + apply (rule ext) + apply safe + apply (simp add: refs_notRange[simplified] state_refs_ko_wp_at_eq) + done + + from sym_hyp_refs show "sym_refs (state_hyp_refs_of' ?s)" + apply - + apply (clarsimp simp: state_hyp_refs_ko_wp_at_eq + elim!: rsubst[where P=sym_refs]) + apply (rule ext) + apply safe + apply (simp add: hyp_refs_notRange[simplified] state_hyp_refs_ko_wp_at_eq) + done + + show "if_live_then_nonz_cap' ?s" using iflive + apply (clarsimp simp: if_live_then_nonz_cap'_def) + apply (drule spec, drule(1) mp) + apply (clarsimp simp: ex_nonz_cap_to'_def) + apply (rule exI, rule conjI, assumption) + apply (drule non_null_present [OF cte_wp_at_weakenE']) + apply clarsimp + apply simp + done + + from ifunsafe show "if_unsafe_then_cap' ?s" + by (clarsimp simp: if_unsafe_then_cap'_def + intro!: cte_cap) + + from idle_notRange refs + have "ksIdleThread s' \ ?ran" + apply (simp add: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def) + apply blast + done + with idle show "valid_idle' ?s" + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def) + apply (clarsimp simp add: ps_clear_def dom_if_None Diff_Int_distrib) + done + + from tcb_at_invs' [OF invs] ct_act + show "cur_tcb' ?s" unfolding cur_tcb'_def + apply (clarsimp simp: cur_tcb'_def ct_in_state'_def) + apply (drule st_tcb) + apply simp + apply simp + apply (simp add: pred_tcb_at'_def) + done + + let ?ctes' = ctes' + + from no_0 show no_0': "no_0 ?ctes'" + by (simp add: no_0_def tree_to_ctes) + + from dlist show "valid_dlist ?ctes'" + apply (simp only: valid_dlist_def3) + apply (rule conjI) + apply (drule conjunct1) + apply (elim allEI) + apply (clarsimp simp: mdb_prev_def next_unfold' + tree_to_ctes) + apply (rule ccontr, clarsimp) + apply (cut_tac p="mdbNext (cteMDBNode cte)" in non_null_present) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def no_0) + apply simp + apply (drule conjunct2) + apply (elim allEI) + apply (clarsimp simp: mdb_prev_def next_unfold' + tree_to_ctes) + apply (rule ccontr, clarsimp) + apply (cut_tac p="mdbPrev (cteMDBNode z)" in non_null_present) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def no_0) + apply simp + done + + from chain_0 show "mdb_chain_0 ?ctes'" + by (fastforce simp: mdb_chain_0_def Ball_def) + + from umdb show "untyped_mdb' ?ctes'" + apply (simp add: untyped_mdb'_def) + apply (erule allEI)+ + apply (clarsimp simp: descendants_of'_def) + apply (rule mdb_parent) + apply (clarsimp simp: tree_to_ctes split: if_split_asm) + done + + from badges show "valid_badges ?ctes'" + by (simp add: valid_badges_def tree_to_ctes next_unfold') + + from contained show "caps_contained' ?ctes'" + by (simp add: caps_contained'_def tree_to_ctes) + + from chunked show "mdb_chunked ?ctes'" + apply (simp add: mdb_chunked_def) + apply (elim allEI) + apply clarsimp + apply (intro conjI impI) + apply (erule disjEI) + apply fastforce + apply fastforce + apply (clarsimp dest!: trancl_next_rev) + apply (clarsimp dest!: trancl_next_rev) + done + + from uinc show "untyped_inc' ?ctes'" + apply (simp add: untyped_inc'_def) + apply (elim allEI) + apply clarsimp + apply (safe del: impCE, simp_all add: descendants_of'_def + mdb_parent) + done + + from nullcaps show "valid_nullcaps ?ctes'" + by (clarsimp simp: valid_nullcaps_def) + + from ut_rev + show "ut_revocable' ?ctes'" + by (clarsimp simp: ut_revocable'_def) + + show "class_links ?ctes'" using clinks + by (simp add: class_links_def tree_to_ctes mdb_next_unfold) + + show "valid_global_refs' ?s" using refs + by (simp add: valid_global_refs'_def tree_to_ctes valid_cap_sizes'_def + global_refs'_def valid_refs'_def ball_ran_eq) + + show "valid_arch_state' ?s" + using arch global_refs2 + apply (simp add: valid_arch_state'_def global_refs'_def) + apply (case_tac "armHSCurVCPU (ksArchState s')"; clarsimp simp add: split_def) + apply (drule live_notRange, clarsimp, case_tac ko; simp add: is_vcpu'_def live'_def) + done + + show "valid_irq_node' (irq_node' s') ?s" + using virq irq_nodes_range + by (simp add: valid_irq_node'_def mult.commute mult.left_commute ucast_ucast_mask_8) + + show "valid_irq_handlers' ?s" using virqh + apply (simp add: valid_irq_handlers'_def irq_issued'_def + cteCaps_of_def tree_to_ctes Ball_def) + apply (erule allEI) + apply (clarsimp simp: ran_def) + done + + from irq_ctrl + show "irq_control ?ctes'" + by (clarsimp simp: irq_control_def) + + from dist_z + show "distinct_zombies ?ctes'" + apply (simp add: tree_to_ctes distinct_zombies_def + distinct_zombie_caps_def + split del: if_split) + apply (erule allEI, erule allEI) + apply clarsimp + done + + show "reply_masters_rvk_fb ?ctes'" + using rep_r_fb + by (simp add: tree_to_ctes reply_masters_rvk_fb_def + ball_ran_eq) + + from virqs + show "valid_irq_states' s'" . + + from no_0_objs + show "no_0_obj' state'" + by (simp add: no_0_obj'_def) + + from irqs_masked + show "irqs_masked' state'" + by (simp add: irqs_masked'_def) + + from sa_simp ct_act + show "sch_act_wf (ksSchedulerAction s') state'" + apply (simp add: sch_act_simple_def) + apply (case_tac "ksSchedulerAction s'", simp_all add: ct_in_state'_def) + apply (fastforce dest!: st_tcb elim!: pred_tcb'_weakenE) + done + + from invs + have "pspace_domain_valid s'" by (simp add: invs'_def valid_state'_def) + thus "pspace_domain_valid state'" + by (simp add: pspace_domain_valid_def) + + from invs + have "valid_machine_state' s'" by (simp add: invs'_def valid_state'_def) + thus "valid_machine_state' ?state''" + apply (clarsimp simp: valid_machine_state'_def) + apply (drule_tac x=p in spec) + apply (simp add: pointerInUserData_def pointerInDeviceData_def typ_at'_def) + apply (simp add: ko_wp_at'_def exists_disj) + apply (elim exE conjE) + apply (cut_tac ptr'=p in mask_in_range) + apply fastforce + using valid_untyped[simplified valid_cap'_def capability.simps] + apply (simp add: valid_untyped'_def capAligned_def) + apply (elim conjE) + apply (drule_tac x="p && ~~ mask pageBits" in spec) + apply (cut_tac x=p in is_aligned_neg_mask[OF le_refl]) + apply (clarsimp simp: mask_2pm1 ko_wp_at'_def obj_range'_def objBitsKO_def) + apply (frule is_aligned_no_overflow'[of base bits]) + apply (frule is_aligned_no_overflow'[of _ pageBits]) + apply (frule (1) aligned_ranges_subset_or_disjoint + [where n=bits and n'=pageBits]) + apply (case_tac ko, simp_all add: objBits_simps) + apply (auto simp add: x_power_minus_1) + done + + from sa_simp ctnotinQ + show "ct_not_inQ state'" + apply (clarsimp simp: ct_not_inQ_def pred_tcb_at'_def) + apply (drule obj_at'_and + [THEN iffD2, OF conjI, + OF ct_act [unfolded ct_in_state'_def pred_tcb_at'_def]]) + apply (clarsimp simp: obj_at'_real_def) + apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) + apply clarsimp + apply (case_tac "tcbState obj"; clarsimp simp: live'_def) + apply (clarsimp dest!: ex_nonz_cap_notRange) + done + + from ctcd show "ct_idle_or_in_cur_domain' state'" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (intro impI) + apply (elim disjE impE) + apply simp+ + apply (intro impI) + apply (rule disjI2) + apply (drule obj_at'_and + [THEN iffD2, OF conjI, + OF ct_act [unfolded ct_in_state'_def st_tcb_at'_def]]) + apply (clarsimp simp: obj_at'_real_def) + apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) + apply (clarsimp simp: live'_def) + apply (case_tac "tcbState obj"; clarsimp) + apply (clarsimp dest!: ex_nonz_cap_notRange elim!: ko_wp_at'_weakenE) + done + + from cdm show "ksCurDomain s' \ maxDomain" . + + from invs + have urz: "untyped_ranges_zero' s'" by (simp add: invs'_def valid_state'_def) + show "untyped_ranges_zero_inv (cteCaps_of state') (gsUntypedZeroRanges s')" + apply (simp add: untyped_zero_ranges_cte_def + urz[unfolded untyped_zero_ranges_cte_def, rule_format, symmetric]) + apply (clarsimp simp: fun_eq_iff intro!: arg_cong[where f=Ex]) + apply safe + apply (drule non_null_present[OF cte_wp_at_weakenE']) + apply (clarsimp simp: untypedZeroRange_def) + apply simp + done + + from vbm + show "valid_bitmaps state'" + by (simp add: valid_bitmaps_def bitmapQ_defs) + + from sym_sched + show "sym_heap (pspace' |> tcb_of' |> tcbSchedNext) (pspace' |> tcb_of' |> tcbSchedPrev)" + using pa pd pspace_distinct'_state' + by (fastforce simp: tcbSchedNexts_of_pspace' tcbSchedPrevs_of_pspace') + + from vsp show "valid_sched_pointers_2 (pspace' |> tcb_of' |> tcbSchedPrev) + (pspace' |> tcb_of' |> tcbSchedNext) + (tcbQueued |< (pspace' |> tcb_of'))" + by (clarsimp simp: valid_sched_pointers_def opt_pred_def opt_map_def) + +qed (clarsimp) + +lemma (in delete_locale) delete_ko_wp_at': + assumes objs: "ko_wp_at' P p s' \ ex_nonz_cap_to' p s'" + shows "ko_wp_at' P p state'" + using objs + by (clarsimp simp: ko_wp_at'_def ps_clear_def dom_if_None Diff_Int_distrib + dest!: ex_nonz_cap_notRange) + +lemma (in delete_locale) null_filter': + assumes descs: "Q (null_filter' (ctes_of s'))" + shows "Q (null_filter' (ctes_of state'))" + using descs ifunsafe + apply (clarsimp elim!: rsubst[where P=Q]) + apply (rule ext) + apply (clarsimp simp:null_filter'_def tree_to_ctes) + apply (rule ccontr) + apply (clarsimp) + apply (cut_tac p = x in non_null_present) + apply (simp add:cte_wp_at_ctes_of) + apply (rule ccontr) + apply simp + apply (erule(1) not_nullMDBNode) + apply (case_tac y,simp) + apply simp + done + +lemma (in delete_locale) delete_ex_cte_cap_to': + assumes exc: "ex_cte_cap_to' p s'" + shows "ex_cte_cap_to' p state'" + using exc + by (clarsimp elim!: cte_cap) + + +lemma deleteObjects_null_filter: + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and (\s. P (null_filter' (ctes_of s))) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv s. P (null_filter' (ctes_of s))\" + apply (simp add: deleteObjects_def3) + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subgoal_tac "delete_locale s ptr bits p idx d") + apply (drule_tac Q = P in delete_locale.null_filter') + apply assumption + apply (clarsimp simp:p_assoc_help) + apply (simp add: eq_commute field_simps mask_def) + apply (subgoal_tac "ksPSpace (s\ksMachineState := snd ((), b)\) = + ksPSpace s", simp only:, simp) + apply (unfold_locales, simp_all) + done + +lemma deleteObjects_descendants: + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and (\s. descendants_range_in' H p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv s. descendants_range_in' H p (ctes_of s)\" + apply (simp add:descendants_range_in'_def2) + apply (wp deleteObjects_null_filter) + apply fastforce + done + +lemma doMachineOp_modify: + "doMachineOp (modify g) = modify (ksMachineState_update g)" + apply (simp add: doMachineOp_def split_def select_f_returns) + apply (rule ext) + apply (simp add: simpler_gets_def simpler_modify_def bind_def) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma deleteObjects_invs': + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. invs'\" +proof - + show ?thesis + apply (rule hoare_pre) + apply (rule_tac G="is_aligned ptr bits \ 3 \ bits \ bits \ word_bits" in hoare_grab_asm) + apply (clarsimp simp add: deleteObjects_def2) + apply (simp add: freeMemory_def bind_assoc doMachineOp_bind) + apply (simp add: bind_assoc[where f="\_. modify f" for f, symmetric]) + apply (simp add: mapM_x_storeWord_step[simplified word_size_bits_def] + doMachineOp_modify modify_modify) + apply (simp add: bind_assoc intvl_range_conv'[where 'a=machine_word_len, folded word_bits_def] mask_def field_simps) + apply (wp) + apply (simp cong: if_cong) + apply (subgoal_tac "is_aligned ptr bits \ 3 \ bits \ bits < word_bits",simp) + apply clarsimp + apply (frule(2) delete_locale.intro, simp_all)[1] + apply (simp add: ksASIDMapSafe_def invs'_gsTypes_update) + apply (rule subst[rotated, where P=invs'], erule delete_locale.delete_invs') + apply (simp add: field_simps mask_def) + apply clarsimp + apply (drule invs_valid_objs') + apply (drule (1) cte_wp_at_valid_objs_valid_cap') + apply (clarsimp simp add: valid_cap'_def capAligned_def untypedBits_defs) + done +qed + +lemma deleteObjects_st_tcb_at': + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and st_tcb_at' (P and (\) Inactive and (\) IdleThreadState) t + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. st_tcb_at' P t\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subgoal_tac "delete_locale s ptr bits p idx d") + apply (drule delete_locale.delete_ko_wp_at' + [where p = t and + P="case_option False (P \ tcbState) \ projectKO_opt", + simplified eq_commute]) + apply (simp add: pred_tcb_at'_def obj_at'_real_def) + apply (rule conjI) + apply (fastforce elim: ko_wp_at'_weakenE simp: projectKO_opt_tcb) + apply (erule if_live_then_nonz_capD' [rotated]) + apply (clarsimp simp: live'_def) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def + field_simps ko_wp_at'_def ps_clear_def + cong:if_cong + split: option.splits) + apply (simp add: delete_locale_def) + done + +lemma deleteObjects_cap_to': + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and ex_cte_cap_to' p' + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. ex_cte_cap_to' p'\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subgoal_tac "delete_locale s ptr bits p idx d") + apply (drule delete_locale.delete_ex_cte_cap_to', assumption) + apply (simp cong:if_cong) + apply (subgoal_tac + "s\ksMachineState := b, + ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\ = + ksMachineState_update (\_. b) + (s\ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\)",erule ssubst) + apply (simp add: field_simps ex_cte_cap_wp_to'_def cong:if_cong) + apply simp + apply (simp add: delete_locale_def) + done + +lemma valid_untyped_no_overlap: + "\ valid_untyped' d ptr bits idx s; is_aligned ptr bits; valid_pspace' s \ + \ pspace_no_overlap' ptr bits (s\ksPSpace := ksPSpace s |` (- mask_range ptr bits)\)" + apply (clarsimp simp del: atLeastAtMost_iff + simp: pspace_no_overlap'_def valid_cap'_def valid_untyped'_def) + apply (drule_tac x=x in spec) + apply (drule restrict_map_Some_iff[THEN iffD1]) + apply clarsimp + apply (frule pspace_alignedD') + apply (simp add: valid_pspace'_def) + apply (frule pspace_distinctD') + apply (simp add: valid_pspace'_def) + apply (unfold ko_wp_at'_def obj_range'_def) + apply (drule (1) aligned_ranges_subset_or_disjoint) + apply (clarsimp simp del: Int_atLeastAtMost atLeastAtMost_iff atLeastatMost_subset_iff) + apply (elim disjE) + apply (subgoal_tac "ptr \ mask_range x (objBitsKO ko)") + apply (clarsimp simp:p_assoc_help mask_def) + apply (clarsimp simp:p_assoc_help mask_def) + apply (fastforce simp: mask_def add_diff_eq)+ + done + +lemma deleteObject_no_overlap[wp]: + "\valid_cap' (UntypedCap d ptr bits idx) and valid_pspace'\ + deleteObjects ptr bits + \\rv s. pspace_no_overlap' ptr bits s\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply (clarsimp simp: valid_cap'_def cong:if_cong) + apply (drule (2) valid_untyped_no_overlap) + apply (subgoal_tac + "s\ksMachineState := b, + ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\ = + ksMachineState_update (\_. b) + (s\ksPSpace := ksPSpace s |` (- mask_range ptr bits)\)", simp) + apply (case_tac s, simp) + apply (rule ext) + apply simp + done + +lemma deleteObjects_cte_wp_at': + "\\s. cte_wp_at' P p s \ p \ mask_range ptr bits + \ s \' (UntypedCap d ptr bits idx) \ valid_pspace' s\ + deleteObjects ptr bits + \\rv s. cte_wp_at' P p s\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply (clarsimp simp: valid_pspace'_def cong:if_cong) + apply (subgoal_tac + "s\ksMachineState := b, + ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\ = + ksMachineState_update (\_. b) + (s\ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\)", erule ssubst) + apply (simp add: cte_wp_at_delete' x_power_minus_1) + apply (case_tac s, simp) + done + +lemma deleteObjects_invs_derivatives: + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. valid_pspace'\" + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. valid_mdb'\" + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. pspace_aligned'\" + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. pspace_distinct'\" + by (safe intro!: hoare_strengthen_post [OF deleteObjects_invs']) + +lemma deleteObjects_nosch: + "\\s. P (ksSchedulerAction s)\ + deleteObjects ptr sz + \\rv s. P (ksSchedulerAction s)\" + by (simp add: deleteObjects_def3 | wp hoare_drop_imp)+ + +(* Prooving the reordering here *) + +lemma createObjects'_wp_subst: + "\\P\createObjects a b c d\\r. Q\\ \ \P\createObjects' a b c d\\r. Q\" + apply (clarsimp simp:createObjects_def valid_def return_def bind_def) + apply (drule_tac x = s in spec) + apply (clarsimp simp:split_def) + apply auto + done + +definition pspace_no_overlap_cell' where + "pspace_no_overlap_cell' p \ \kh. + \x ko. kh x = Some ko \ p \ mask_range x (objBitsKO ko)" + +lemma pspace_no_overlap'_lift: + assumes typ_at:"\slot P Q. \\s. P (typ_at' Q slot s)\ f \\r s. P (typ_at' Q slot s) \" + assumes ps :"\Q\ f \\r s. pspace_aligned' s \ pspace_distinct' s \" + shows "\Q and pspace_no_overlap' ptr sz \ f \\r. pspace_no_overlap' ptr sz\" +proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (clarsimp simp:valid_def pspace_no_overlap'_def) + apply (drule_tac x = x in spec) + apply (subgoal_tac "\ko'. ksPSpace s x = Some ko' \ koTypeOf ko = koTypeOf ko'") + apply (clarsimp dest!:objBits_type) + apply (rule ccontr) + apply clarsimp + apply (frule_tac slot1 = x and Q1 = "koTypeOf ko" and P1 = "\a. \ a" in use_valid[OF _ typ_at]) + apply (clarsimp simp:typ_at'_def ko_wp_at'_def)+ + apply (frule(1) use_valid[OF _ ps]) + apply (clarsimp simp:valid_pspace'_def) + apply (frule(1) pspace_alignedD') + apply (drule(1) pspace_distinctD') + apply simp + done +qed + +lemma setCTE_pspace_no_overlap': + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz\ + setCTE cte src + \\r. pspace_no_overlap' ptr sz\" + apply (rule pspace_no_overlap'_lift; wp setCTE_typ_at') + apply auto + done + +lemma getCTE_commute: + assumes cte_at_modify: + "\Q. \\s. P s \ cte_wp_at' Q dest s \ f \\a s. cte_wp_at' Q dest s\" + shows "monad_commute (P and cte_at' dest) (getCTE dest) f" + proof - + have getsame: "\x y s. (x,y)\ fst (getCTE dest s) \ y = s" + apply (drule use_valid) + prefer 3 + apply (simp|wp)+ + done + show ?thesis + apply (simp add:monad_commute_def bind_assoc getCTE_def split_def cte_at'_def) + apply (clarsimp simp:bind_def split_def return_def) + apply (rule conjI) + apply (rule set_eqI) + apply (rule iffI) + apply clarsimp + apply (rule bexI[rotated], assumption) + apply (drule_tac Q1 ="(=) cte" in use_valid[OF _ cte_at_modify]) + apply (simp add:cte_wp_at'_def) + apply (simp add:cte_wp_at'_def) + apply clarsimp + apply (rule conjI) + apply (frule_tac Q1 = "(=) cte" in use_valid[OF _ cte_at_modify]) + apply (clarsimp simp:cte_wp_at'_def ko_wp_at'_def) + apply (clarsimp simp:cte_wp_at'_def) + apply (rule bexI[rotated], assumption) + apply (metis fst_eqD getObject_cte_det snd_eqD) + apply (cut_tac no_failD[OF no_fail_getCTE[unfolded getCTE_def]]) + prefer 2 + apply (simp add:cte_wp_at'_def) + apply fastforce + apply simp + apply (rule iffI) + apply clarsimp+ + apply (cut_tac s = b in no_failD[OF no_fail_getCTE[unfolded getCTE_def]]) + prefer 2 + apply fastforce + apply (drule_tac Q1 = "(=) cte" in use_valid[OF _ cte_at_modify]) + apply (simp add:cte_wp_at'_def) + apply (simp add:cte_wp_at_ctes_of) + done +qed + +definition "cte_check \ \b src a next. (case b of + KOTCB tcb \ (is_aligned a (objBits tcb) + \ (case next of None \ True | Some z \ 2^(objBits tcb) \ z - a)) \ + (src - a = tcbVTableSlot << cteSizeBits + \ src - a = tcbCTableSlot << cteSizeBits + \ src - a = tcbReplySlot << cteSizeBits + \ src - a = tcbCallerSlot << cteSizeBits + \ src - a = tcbIPCBufferSlot << cteSizeBits ) + | KOCTE v1 \ ( src = a \ (is_aligned a (objBits (makeObject::cte))) + \ (case next of None \ True | Some z \ 2^(objBits (makeObject::cte)) \ z - a)) + | _ \ False)" + +definition locateCTE where + "locateCTE src \ + (do ps \ gets ksPSpace; + (before, after) \ return (lookupAround2 src ps); + (ptr,val) \ maybeToMonad before; + assert (cte_check val src ptr after); + return ptr + od)" + +definition cte_update where + "cte_update \ \cte b src a. (case b of + KOTCB tcb \ if (src - a = tcbVTableSlot << cteSizeBits) then KOTCB (tcbVTable_update (\_. cte) tcb) + else if (src - a = tcbCTableSlot << cteSizeBits) then KOTCB (tcbCTable_update (\_. cte) tcb) + else if (src - a = tcbReplySlot << cteSizeBits) then KOTCB (tcbReply_update (\_. cte) tcb) + else if (src - a = tcbCallerSlot << cteSizeBits) then KOTCB (tcbCaller_update (\_. cte) tcb) + else if (src - a = tcbIPCBufferSlot << cteSizeBits) then KOTCB (tcbIPCBufferFrame_update (\_. cte) tcb) + else KOTCB tcb + | KOCTE v1 \ KOCTE cte + | x \ x)" + +lemma simpler_updateObject_def: + "updateObject (cte::cte) b src a next = + (\s. (if (cte_check b src a next) then ({(cte_update cte b src a,s)}, False) + else fail s))" + apply (rule ext) + apply (clarsimp simp:ObjectInstances_H.updateObject_cte objBits_simps) + apply (case_tac b) + apply (simp_all add:cte_check_def typeError_def fail_def + tcbIPCBufferSlot_def + tcbCallerSlot_def tcbReplySlot_def + tcbCTableSlot_def tcbVTableSlot_def) + by (intro conjI impI; + clarsimp simp:alignCheck_def unless_def when_def not_less[symmetric] + alignError_def is_aligned_mask magnitudeCheck_def + cte_update_def return_def tcbIPCBufferSlot_def + tcbCallerSlot_def tcbReplySlot_def + tcbCTableSlot_def tcbVTableSlot_def objBits_simps + cteSizeBits_def split:option.splits; + fastforce simp:return_def fail_def bind_def)+ + + +lemma setCTE_def2: + "(setCTE src cte) = + (do ptr \ locateCTE src; + modify (ksPSpace_update (\ps. ps(ptr \ (cte_update cte (the (ps ptr)) src ptr )))) od)" + apply (clarsimp simp:setCTE_def setObject_def split_def locateCTE_def bind_assoc) + apply (rule ext) + apply (rule_tac Q = "\r s'. s'= x \ r = ksPSpace x " in monad_eq_split) + apply (rule_tac Q = "\ptr s'. s' = x \ snd ptr = the ((ksPSpace x) (fst ptr) ) " in monad_eq_split) + apply (clarsimp simp:assert_def return_def fail_def bind_def simpler_modify_def) + apply (clarsimp simp:simpler_updateObject_def fail_def) + apply (wp|clarsimp simp:)+ + apply (simp add:lookupAround2_char1) + apply wp + apply simp + done + +lemma singleton_locateCTE: + "a \ fst (locateCTE src s) = ({a} = fst (locateCTE src s))" + apply (clarsimp simp:locateCTE_def assert_opt_def assert_def + gets_def get_def bind_def return_def split_def) + apply (clarsimp simp:return_def fail_def + split:if_splits option.splits)+ + done + +lemma locateCTE_inv: + "\P\locateCTE s\\r. P\" + apply (simp add:locateCTE_def split_def) + apply wp + apply clarsimp + done + +lemma locateCTE_case: + "\\\ locateCTE src + \\r s. \obj. ksPSpace s r = Some obj \ + (case obj of KOTCB tcb \ True | KOCTE v \ True | _ \ False)\" + apply (clarsimp simp:locateCTE_def split_def | wp)+ + apply (clarsimp simp: lookupAround2_char1) + apply (case_tac b) + apply (simp_all add:cte_check_def) + done + +lemma cte_wp_at_top: + "(cte_wp_at' \ src s) + = (\a b. ( fst (lookupAround2 src (ksPSpace s)) = Some (a, b) \ + cte_check b src a (snd (lookupAround2 src (ksPSpace s)))))" + apply (simp add: cte_wp_at'_def getObject_def gets_def get_def bind_def return_def split_def + assert_opt_def fail_def + split: option.splits) + apply (clarsimp simp:loadObject_cte) + apply (rename_tac obj) + apply (case_tac obj; simp) + apply ((simp add: typeError_def fail_def cte_check_def + split: Structures_H.kernel_object.splits)+)[5] + apply (simp add: loadObject_cte cte_check_def tcbIPCBufferSlot_def tcbCallerSlot_def + tcbReplySlot_def tcbCTableSlot_def tcbVTableSlot_def objBits_simps + cteSizeBits_def) + apply (simp add: alignCheck_def bind_def alignError_def fail_def return_def objBits_simps + magnitudeCheck_def in_monad is_aligned_mask when_def unless_def + split: option.splits) + apply (intro conjI impI allI; simp add: not_le) + apply (clarsimp simp:cte_check_def) + apply (simp add: alignCheck_def bind_def alignError_def fail_def return_def objBits_simps + magnitudeCheck_def in_monad is_aligned_mask when_def unless_def + split: option.splits) + apply (intro conjI impI allI; simp add:not_le) + apply (simp add: typeError_def fail_def cte_check_def split: Structures_H.kernel_object.splits) + done + +lemma locateCTE_monad: + assumes ko_wp_at: "\Q dest. + \\s. P1 s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f + \\a s. ko_wp_at' (\obj. Q (objBitsKO obj)) dest s\" + assumes cte_wp_at: "\ dest. + \\s. P2 s \ cte_wp_at' \ dest s \ f + \\a s. cte_wp_at' \ dest s\" + assumes psp_distinct: + "\\s. P3 s \ f \\a s. pspace_distinct' s\" + assumes psp_aligned: + "\\s. P4 s \ f \\a s. pspace_aligned' s\" + shows + "\{(ptr, s)} = fst (locateCTE src s); + (r, s') \ fst (f s);pspace_aligned' s;pspace_distinct' s;(P1 and P2 and P3 and P4) s\ + \ {(ptr,s')} = fst (locateCTE src s')" +proof - + + have src_in_range: + "\obj src a m s'. \cte_check obj src a m;ksPSpace s' a = Some obj\ \ src \ {a..a + 2 ^ objBitsKO obj - 1}" + proof - + fix obj src a m + show "\s'. \cte_check obj src a m; ksPSpace s' a = Some obj\ \ src \ {a..a + 2 ^ objBitsKO obj - 1}" + by (case_tac obj) + (auto simp add: cte_check_def objBits_simps' diff_eq_eq + add.commute[where b=a] + word_plus_mono_right is_aligned_no_wrap' + tcbVTableSlot_def tcbCTableSlot_def tcbReplySlot_def + tcbCallerSlot_def tcbIPCBufferSlot_def ) + qed + + note blah[simp del] = usableUntypedRange.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + have step1: + "\(ptr, s) \ fst (locateCTE src s); + (r, s') \ fst (f s); pspace_aligned' s; pspace_distinct' s; (P1 and P2 and P3 and P4) s\ + \ (ptr,s') \ fst (locateCTE src s')" + apply (frule use_valid[OF _ locateCTE_case]) + apply simp + apply (clarsimp simp: locateCTE_def gets_def split_def + get_def bind_def return_def assert_opt_def fail_def assert_def + split: option.splits if_split_asm) + apply (frule_tac dest1 = src in use_valid[OF _ cte_wp_at]) + apply simp + apply (subst cte_wp_at_top) + apply simp + apply (clarsimp simp add:cte_wp_at_top) + apply (clarsimp simp:lookupAround2_char1) + apply (frule_tac dest1 = ptr and Q1 = "\x. x = objBitsKO b" in use_valid[OF _ ko_wp_at]) + apply (frule(1) pspace_alignedD') + apply (frule(1) pspace_distinctD') + apply (auto simp add:ko_wp_at'_def)[1] + apply (clarsimp simp add:ko_wp_at'_def) + apply (rule ccontr) + apply (frule use_valid[OF _ psp_distinct]) + apply simp + apply (frule use_valid[OF _ psp_aligned]) + apply simp + apply (frule_tac x = a in pspace_distinctD') + apply simp + apply (frule_tac s = s' and a = ptr in rule_out_intv[rotated]) + apply simp+ + apply (frule_tac s = s' and b = ptr and a = a in rule_out_intv) + apply simp+ + apply (thin_tac "\x. P x \ Q x" for P Q)+ + apply (drule_tac p = ptr and p' = a in aligned_ranges_subset_or_disjoint) + apply (erule(1) pspace_alignedD') + apply (drule(1) src_in_range)+ + apply (drule base_member_set[OF pspace_alignedD']) + apply simp + apply (simp add:objBitsKO_bounded2[unfolded word_bits_def,simplified]) + apply (drule base_member_set[OF pspace_alignedD']) + apply simp + apply (simp add:objBitsKO_bounded2[unfolded word_bits_def,simplified]) + apply (clarsimp simp:field_simps mask_def) + apply blast + done + assume + "{(ptr, s)} = fst (locateCTE src s)" + "(r, s') \ fst (f s)" + "pspace_aligned' s" + "pspace_distinct' s" + "(P1 and P2 and P3 and P4) s" + thus ?thesis + using assms step1 + by (clarsimp simp:singleton_locateCTE) +qed + +lemma empty_fail_locateCTE: + "empty_fail (locateCTE src)" + by (fastforce simp: locateCTE_def bind_assoc split_def) + +lemma fail_empty_locateCTE: + "snd (locateCTE src s) \ fst (locateCTE src s) = {}" + by (auto simp: assert_def fail_def locateCTE_def bind_assoc return_def split_def gets_def + get_def bind_def assert_opt_def image_def + split:option.splits if_split_asm)+ + +lemma locateCTE_commute: + assumes nf: "no_fail P0 f" "no_fail P1 (locateCTE src)" + and psp_distinct: "\\s. P2 s \ f \\a s. pspace_distinct' s\" + and psp_aligned: "\\s. P3 s \ f \\a s. pspace_aligned' s\" + assumes ko_wp_at: "\Q dest. + \\s. (P0 and P1 and P2 and P3) s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f + \\a s. ko_wp_at' (\obj. Q (objBitsKO obj)) dest s\" + and cte_wp_at: "\ dest. + \\s. (P0 and P1 and P2 and P3) s \ cte_wp_at' \ dest s \ f + \\a s. cte_wp_at' \ dest s\" + shows "monad_commute (P0 and P1 and P2 and P3 and P4 and P5 and pspace_aligned' and pspace_distinct') + (locateCTE src) f" +proof - + have same: + "\ptr val next s s'. (ptr, s') \ fst (locateCTE src s) + \ s' = s" + by (erule use_valid[OF _ locateCTE_inv],simp) + show ?thesis + apply (clarsimp simp:monad_commute_def) + apply (clarsimp simp:bind_def return_def) + apply (intro conjI iffI set_eqI) + apply (clarsimp) + apply (frule same) + apply (clarsimp) + apply (rule bexI[rotated], assumption) + apply (frule singleton_locateCTE[THEN iffD1]) + apply (frule locateCTE_monad [OF ko_wp_at cte_wp_at psp_distinct psp_aligned]) + apply assumption+ + apply simp + apply (clarsimp) + apply (rule bexI[rotated]) + apply (fastforce) + apply clarsimp + apply clarsimp + apply (frule empty_failD2[OF empty_fail_locateCTE no_failD[OF nf(2)]]) + apply clarsimp + apply (rule bexI[rotated],assumption) + apply (clarsimp) + apply (frule_tac s = bb in same) + apply (frule_tac s = s in same) + apply clarsimp + apply (frule_tac s1 = s in singleton_locateCTE[THEN iffD1]) + apply (frule locateCTE_monad [OF ko_wp_at cte_wp_at psp_distinct psp_aligned]) + apply assumption+ + apply simp + apply (rule bexI[rotated],assumption) + apply (drule sym) + apply (clarsimp simp:singleton_locateCTE singleton_iff) + apply fastforce + apply (clarsimp simp:split_def image_def) + apply (elim disjE) + apply clarsimp + apply (drule same) + apply simp + apply (frule no_failD[OF nf(2)]) + apply simp + apply (clarsimp simp:split_def image_def) + apply (elim disjE) + apply clarsimp + apply (frule empty_failD2[OF empty_fail_locateCTE no_failD[OF nf(2)]]) + apply clarsimp + apply (frule same) + apply simp + apply (frule singleton_locateCTE[THEN iffD1]) + apply (frule locateCTE_monad [OF ko_wp_at cte_wp_at psp_distinct psp_aligned]) + apply assumption+ + apply simp + apply (clarsimp) + apply (simp add: fail_empty_locateCTE) + apply (simp add: no_failD[OF nf(1)]) + done +qed + +lemmas getObjSize_simps = AARCH64_H.getObjectSize_def[split_simps AARCH64_H.object_type.split apiobject_type.split] + +lemma arch_toAPIType_simps: + "toAPIType ty = Some a \ ty = APIObjectType a" + by (case_tac ty,auto simp:AARCH64_H.toAPIType_def) + +lemma createObject_cte_wp_at': + "\\s. Types_H.getObjectSize ty us < word_bits \ + is_aligned ptr (Types_H.getObjectSize ty us) \ + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) s \ + cte_wp_at' (\c. P c) slot s \ pspace_aligned' s \ + pspace_distinct' s\ + RetypeDecls_H.createObject ty ptr us d + \\r s. cte_wp_at' (\c. P c) slot s \" + apply (simp add:createObject_def) + apply (rule hoare_pre) + apply (wpc + | wp createObjects_orig_cte_wp_at'[where sz = "(Types_H.getObjectSize ty us)"] + threadSet_cte_wp_at' + | simp add: AARCH64_H.createObject_def placeNewDataObject_def + unless_def placeNewObject_def2 objBits_simps range_cover_full + curDomain_def bit_simps + getObjSize_simps apiGetObjectSize_def tcbBlockSizeBits_def + epSizeBits_def ntfnSizeBits_def cteSizeBits_def updatePTType_def + | intro conjI impI | clarsimp dest!: arch_toAPIType_simps)+ + done + +lemma createObject_getCTE_commute: + "monad_commute + (cte_wp_at' (\_. True) dests and pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + K (ptr \ dests) and K (Types_H.getObjectSize ty us < word_bits) and + K (is_aligned ptr (Types_H.getObjectSize ty us))) + (RetypeDecls_H.createObject ty ptr us d) (getCTE dests)" + apply (rule monad_commute_guard_imp[OF commute_commute]) + apply (rule getCTE_commute) + apply (rule hoare_pre) + apply (wp createObject_cte_wp_at') + apply (clarsimp simp:cte_wp_at_ctes_of) + apply assumption + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma simpler_placeNewObject_def: + "\us < word_bits;is_aligned ptr (objBitsKO (injectKOS val) + us); + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s; pspace_aligned' s \ \ placeNewObject ptr val us s = + modify (ksPSpace_update + (\_. foldr (\addr map. map(addr \ injectKOS val)) (new_cap_addrs (2 ^ us) ptr (injectKOS val)) + (ksPSpace s))) s" + apply (clarsimp simp:placeNewObject_def2) + apply (clarsimp simp:createObjects'_def) + apply (simp add:bind_def in_monad when_def is_aligned_mask[THEN iffD1]) + apply (clarsimp simp:return_def bind_def gets_def assert_def fail_def get_def split_def + split:option.splits) + apply (clarsimp simp: new_cap_addrs_fold' word_1_le_power[where 'a=machine_word_len, folded word_bits_def] lookupAround2_char1 not_less) + apply (drule(1) pspace_no_overlapD'[rotated]) + apply (drule_tac x = a in in_empty_interE) + apply clarsimp + apply (drule(1) pspace_alignedD') + apply (simp add:is_aligned_no_overflow) + apply (clarsimp simp: shiftL_nat p_assoc_help) + apply simp + done + +lemma fail_set: "fst (fail s) = {}" + by (clarsimp simp: fail_def) + +lemma locateCTE_cte_no_fail: + "no_fail (cte_at' src) (locateCTE src)" + apply (clarsimp simp:no_fail_def cte_wp_at'_def getObject_def + locateCTE_def return_def gets_def get_def bind_def split_def + assert_opt_def assert_def in_fail fail_set split:option.splits) + apply (clarsimp simp:cte_check_def ObjectInstances_H.loadObject_cte) + apply (drule in_singleton) + by (auto simp: objBits_simps cteSizeBits_def alignError_def + alignCheck_def in_monad is_aligned_mask magnitudeCheck_def + typeError_def + cong: if_cong split: if_splits option.splits kernel_object.splits) + +lemma not_in_new_cap_addrs: + "\is_aligned ptr (objBitsKO obj + us); + objBitsKO obj + us < word_bits; + pspace_no_overlap' ptr (objBitsKO obj + us) s; + ksPSpace s dest = Some ko;pspace_aligned' s\ + \ dest \ set (new_cap_addrs (2 ^ us) ptr obj)" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (rule ccontr) + apply simp + apply (drule(1) pspace_no_overlapD'[rotated]) + apply (erule_tac x = dest in in_empty_interE) + apply (clarsimp) + apply (erule(1) is_aligned_no_overflow[OF pspace_alignedD']) + apply (erule subsetD[rotated]) + apply (simp add:p_assoc_help) + apply (rule new_cap_addrs_subset[unfolded ptr_add_def,simplified]) + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma placeNewObject_pspace_aligned': + "\K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us)\ + placeNewObject ptr val us + \\r s. pspace_aligned' s\" + apply (clarsimp simp:valid_def) + apply (simp add:simpler_placeNewObject_def simpler_modify_def) + apply (subst data_map_insert_def[symmetric])+ + apply (erule(2) Retype_R.retype_aligned_distinct' [unfolded data_map_insert_def[symmetric]]) + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma placeNewObject_pspace_distinct': + "\\s. objBitsKO (injectKOS val) + us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s \ pspace_distinct' s\ + placeNewObject ptr val us + \\a. pspace_distinct'\" + apply (clarsimp simp:valid_def) + apply (simp add:simpler_placeNewObject_def simpler_modify_def) + apply (subst data_map_insert_def[symmetric])+ + apply (erule(2) Retype_R.retype_aligned_distinct' + [unfolded data_map_insert_def[symmetric]]) + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma placeNewObject_ko_wp_at': + "\\s. (if slot \ set (new_cap_addrs (2 ^ us) ptr (injectKOS val)) + then P (injectKOS val) + else ko_wp_at' P slot s) \ + objBitsKO (injectKOS val) + us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s \ pspace_distinct' s\ + placeNewObject ptr val us + \\a. ko_wp_at' P slot\" + apply (clarsimp simp:valid_def split del:if_split) + apply (simp add:simpler_placeNewObject_def simpler_modify_def) + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_ko_wp_at') + apply simp+ + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma cte_wp_at_cases_mask': + "cte_wp_at' P p = (\s. + (obj_at' P p s + \ p && mask tcbBlockSizeBits \ dom tcb_cte_cases + \ obj_at' (P \ fst (the (tcb_cte_cases (p && mask tcbBlockSizeBits)))) + (p && ~~ mask tcbBlockSizeBits) s))" + apply (rule ext) + apply (simp add:cte_wp_at_obj_cases_mask) + done + +lemma not_in_new_cap_addrs': + "\dest \ set (new_cap_addrs (2 ^ us) ptr obj); + is_aligned ptr (objBitsKO obj + us); + objBitsKO obj + us < word_bits; + pspace_no_overlap' ptr (objBitsKO obj + us) s; + pspace_aligned' s \ + \ ksPSpace s dest = None" + apply (rule ccontr) + apply clarsimp + apply (drule not_in_new_cap_addrs) + apply simp+ + done + +lemma placeNewObject_cte_wp_at': + "\K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + K (ptr \ src) and cte_wp_at' P src and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us)\ + placeNewObject ptr val us + \\r s. cte_wp_at' P src s\" + apply (clarsimp simp:placeNewObject_def2) + apply (wp createObjects_orig_cte_wp_at') + apply (auto simp:range_cover_full) + done + + +lemma placeNewObject_cte_wp_at'': + "\\s. cte_wp_at' P slot s \ + objBitsKO (injectKOS val) + us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s \ pspace_distinct' s\ + placeNewObject ptr val us \\a s. cte_wp_at' P slot s\" + apply (simp add:cte_wp_at_cases_mask' obj_at'_real_def) + apply (wp hoare_vcg_disj_lift placeNewObject_ko_wp_at') + apply (clarsimp simp:conj_comms) + apply (intro conjI impI allI impI) + apply (drule(4) not_in_new_cap_addrs') + apply (clarsimp simp:ko_wp_at'_def) + apply (drule (4)not_in_new_cap_addrs')+ + apply (clarsimp simp:ko_wp_at'_def) + apply (elim disjE) + apply simp + apply clarsimp + apply (drule (4)not_in_new_cap_addrs')+ + apply (clarsimp simp:ko_wp_at'_def) + done + +lemma no_fail_placeNewObject: + "no_fail (\s. us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s) + (placeNewObject ptr val us)" + by (clarsimp simp:no_fail_def simpler_modify_def simpler_placeNewObject_def) + +lemma placeNewObject_locateCTE_commute: + "monad_commute + (K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + (objBitsKO (injectKOS val) + us) < word_bits \ ptr \ src) and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) and + pspace_aligned' and pspace_distinct' and cte_at' src) + (placeNewObject ptr val us) (locateCTE src)" + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF locateCTE_commute]) + apply (wp no_fail_placeNewObject locateCTE_cte_no_fail + placeNewObject_pspace_aligned' + placeNewObject_pspace_distinct' + placeNewObject_ko_wp_at' | simp)+ + apply (clarsimp simp:ko_wp_at'_def) + apply (drule(3) not_in_new_cap_addrs) + apply fastforce+ + apply (wp placeNewObject_cte_wp_at'') + apply clarsimp + apply fastforce + done + +lemma update_ksPSpaceI: + "kh = kh' \ s\ksPSpace := kh\ = s\ksPSpace := kh'\" + by simp + +lemma placeNewObject_modify_commute: + "monad_commute + (K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) and + pspace_aligned' and ko_wp_at' (\a. objBitsKO (f (Some a)) = objBitsKO a) ptr') + (placeNewObject ptr val us) + (modify (ksPSpace_update (\ps. ps(ptr' \ f (ps ptr')))))" + apply (clarsimp simp:monad_commute_def simpler_modify_def bind_def split_def return_def) + apply (subst simpler_placeNewObject_def; (simp add:range_cover_def)?) + apply (clarsimp simp: simpler_modify_def) + apply (frule(1) range_cover_full) + apply (simp add: simpler_placeNewObject_def) + apply (subgoal_tac "pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) + (ksPSpace_update (\ps. ps(ptr' \ f (ps ptr'))) s)") + prefer 2 + apply (clarsimp simp:ko_wp_at'_def) + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (case_tac "x = ptr'") + apply (subgoal_tac "objBitsKO koa = objBitsKO ko") + apply (drule(1) pspace_no_overlapD') + apply (clarsimp simp:field_simps mask_def) + apply (clarsimp) + apply (drule_tac x = x and s = s in pspace_no_overlapD'[rotated]) + apply (simp) + apply (clarsimp simp:field_simps mask_def) + apply (subgoal_tac "pspace_aligned' (ksPSpace_update (\ps. ps(ptr' \ f (ps ptr'))) s)") + prefer 2 + apply (subst pspace_aligned'_def) + apply (rule ballI) + apply (erule domE) + apply (clarsimp simp:ko_wp_at'_def split:if_split_asm) + apply (drule(1) pspace_alignedD')+ + apply simp + apply (simp add:simpler_placeNewObject_def) + apply (clarsimp simp:simpler_modify_def Fun.comp_def singleton_iff image_def) + apply (intro conjI update_ksPSpaceI ext) + apply (clarsimp simp:ko_wp_at'_def foldr_upd_app_if) + apply (frule(1) pspace_no_overlapD') + apply (drule subsetD[rotated]) + apply (rule new_cap_addrs_subset) + apply (erule range_cover_rel) + apply simp + apply simp + apply (drule_tac x = ptr' in in_empty_interE) + apply (clarsimp simp:is_aligned_no_overflow) + apply (clarsimp simp:range_cover_def ptr_add_def obj_range'_def p_assoc_help) + apply simp + done + +lemma cte_update_objBits[simp]: + "(objBitsKO (cte_update cte b src a)) = objBitsKO b" + by (case_tac b, + (simp add:objBits_simps cte_update_def)+) + +lemma locateCTE_ret_neq: + "\ko_wp_at' (\x. koTypeOf x \ TCBT \ koTypeOf x \ CTET) ptr\ + locateCTE src \\r s. ptr \ r\" + apply (clarsimp simp add:valid_def) + apply (frule use_valid[OF _ locateCTE_case]) + apply simp + apply (frule(1) use_valid[OF _ locateCTE_inv]) + apply (clarsimp simp:ko_wp_at'_def koTypeOf_def) + apply (auto split:Structures_H.kernel_object.split_asm) + done + +lemma locateCTE_ko_wp_at': + "\cte_at' src and pspace_distinct' \ + locateCTE src + \\rv. ko_wp_at' \ rv \" + apply (clarsimp simp:locateCTE_def split_def) + apply wp + apply (clarsimp simp: cte_wp_at'_def getObject_def gets_def split_def get_def bind_def return_def + ko_wp_at'_def lookupAround2_char1 assert_opt_def) + apply (clarsimp split:option.splits + simp:fail_def return_def lookupAround2_char1) + apply (rename_tac ko) + apply (case_tac ko; + clarsimp simp: cte_check_def objBits_simps cte_update_def dest!: pspace_distinctD') + done + + +lemma setCTE_placeNewObject_commute: + "monad_commute + (K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + K(ptr \ src) and cte_wp_at' (\_. True) src and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us)) + (setCTE src cte) (placeNewObject ptr val us)" + apply (clarsimp simp: setCTE_def2 split_def) + apply (rule commute_commute) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF placeNewObject_modify_commute]) + apply (rule placeNewObject_locateCTE_commute) + apply (wp locateCTE_inv locateCTE_ko_wp_at' | simp)+ + done + +lemma doMachineOp_upd_heap_commute: + "monad_commute \ (doMachineOp x) (modify (ksPSpace_update P))" + apply (clarsimp simp:doMachineOp_def split_def simpler_modify_def + gets_def get_def return_def bind_def select_f_def) + apply (clarsimp simp:monad_commute_def bind_def return_def) + apply fastforce + done + +lemma magnitudeCheck_det: + "\ksPSpace s ptr = Some ko; is_aligned ptr (objBitsKO ko); + ps_clear ptr (objBitsKO ko) s\ + \ magnitudeCheck ptr (snd (lookupAround2 ptr (ksPSpace s))) + (objBitsKO ko) s = + ({((), s)},False)" + apply (frule in_magnitude_check'[THEN iffD2]) + apply (case_tac ko) + apply (simp add: objBits_simps' pageBits_def)+ + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object) + apply (simp add:archObjSize_def pageBits_def vcpuBits_def)+ + apply (subgoal_tac + "\ snd (magnitudeCheck ptr (snd (lookupAround2 ptr (ksPSpace s))) (objBitsKO ko) s)") + apply (drule singleton_in_magnitude_check) + apply (drule_tac x = s in spec) + apply (case_tac + "(magnitudeCheck ptr (snd (lookupAround2 ptr (ksPSpace s))) (objBitsKO ko) s)") + apply simp + apply (rule ccontr) + apply (clarsimp simp:magnitudeCheck_assert assert_def fail_def return_def + split:if_splits option.splits) + done + +lemma getPTE_det: + "ko_wp_at' ((=) (KOArch (KOPTE pte))) p s + \ getObject p s = ({((pte::pte),s)},False)" + apply (clarsimp simp: ko_wp_at'_def getObject_def split_def + bind_def gets_def return_def get_def assert_opt_def + split: if_splits) + apply (clarsimp simp: fail_def return_def lookupAround2_known1) + apply (simp add: loadObject_default_def) + apply (clarsimp simp: projectKO_def projectKO_opt_pte alignCheck_def + objBits_simps unless_def) + apply (clarsimp simp: bind_def return_def is_aligned_mask) + apply (intro conjI) + apply (intro set_eqI iffI) + apply clarsimp + apply (subst (asm) in_magnitude_check') + apply (simp add:archObjSize_def is_aligned_mask)+ + apply (rule bexI[rotated]) + apply (rule in_magnitude_check'[THEN iffD2]) + apply (simp add:is_aligned_mask)+ + apply (clarsimp simp:image_def) + apply (clarsimp simp: magnitudeCheck_assert assert_def objBits_def archObjSize_def + return_def fail_def lookupAround2_char2 + split: option.splits if_split_asm) + apply (rule ccontr) + apply (simp add: ps_clear_def flip: is_aligned_mask) + apply (erule_tac x = x2 in in_empty_interE) + apply (clarsimp simp:less_imp_le) + apply (rule conjI) + apply (subst add.commute) + apply (rule word_diff_ls') + apply (clarsimp simp:field_simps not_le plus_one_helper mask_def) + apply (simp add: is_aligned_no_overflow_mask add_ac) + apply simp + apply blast + done + +lemma in_dom_eq: + "m a = Some obj \ dom (\b. if b = a then Some g else m b) = dom m" + by (rule set_eqI,clarsimp simp:dom_def) + +lemma setCTE_pte_at': + "\ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr and + cte_wp_at' (\_. True) src and pspace_distinct'\ + setCTE src cte + \\x s. ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr s\" + apply (clarsimp simp:setCTE_def2) + including no_pre apply wp + apply (simp add:split_def) + apply (clarsimp simp:valid_def) + apply (subgoal_tac "b = s") + prefer 2 + apply (erule use_valid[OF _ locateCTE_inv]) + apply simp + apply (subgoal_tac "ptr \ a") + apply (frule use_valid[OF _ locateCTE_ko_wp_at']) + apply simp + apply (clarsimp simp:ko_wp_at'_def ps_clear_def) + apply (simp add:in_dom_eq) + apply (drule use_valid[OF _ locateCTE_case]) + apply simp + apply (clarsimp simp:ko_wp_at'_def objBits_simps) + done + +lemma storePTE_det: + "ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr s + \ storePTE ptr (new_pte::pte) s = + modify (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPTE new_pte)))) s" + apply (clarsimp simp:ko_wp_at'_def storePTE_def split_def + bind_def gets_def return_def + get_def setObject_def + assert_opt_def split:if_splits) + apply (clarsimp simp:lookupAround2_known1 return_def alignCheck_def + updateObject_default_def split_def + unless_def projectKO_def + projectKO_opt_pte bind_def when_def + is_aligned_mask[symmetric] objBits_simps) + apply (drule magnitudeCheck_det; simp add:objBits_simps) + done + +lemma modify_obj_commute: + "monad_commute (K (ptr\ ptr')) + (modify (ksPSpace_update (\ps. ps(ptr \ ko)))) + (modify (ksPSpace_update (\ps. ps(ptr' \ ko'))))" + apply (clarsimp simp:monad_commute_def return_def bind_def simpler_modify_def) + apply (case_tac s) + apply auto + done + +lemma modify_specify: + "(\s. modify (ksPSpace_update (\_. P (ksPSpace s))) s) = + modify (ksPSpace_update (\ps. P ps))" + by (auto simp: simpler_modify_def) + +lemma modify_specify2: + "(modify (ksPSpace_update (\_. P (ksPSpace s))) >>= g) s = + (modify (ksPSpace_update (\ps. P ps)) >>=g) s" + apply (clarsimp simp:simpler_modify_def bind_def) + apply (rule arg_cong[where f = "\x. g () x"],simp) + done + +lemma modify_pte_pte_at': + "\pte_at' ptr\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pte_at' ptr\" + apply wp + apply (clarsimp simp del: fun_upd_apply + simp: typ_at'_def ko_wp_at'_def objBits_simps) + apply (clarsimp simp:ps_clear_def) + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (clarsimp simp:archObjSize_def) + done + +lemma modify_pte_pspace_distinct': + "\pte_at' ptr and pspace_distinct'\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pspace_distinct'\" + apply (clarsimp simp: simpler_modify_def ko_wp_at'_def valid_def typ_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (subst pspace_distinct'_def) + apply (intro ballI) + apply (erule domE) + apply (clarsimp split:if_splits) + apply (drule(1) pspace_distinctD') + apply (simp add:objBits_simps) + apply (simp add:ps_clear_def) + apply (drule_tac x = x in pspace_distinctD') + apply simp + unfolding ps_clear_def + apply (erule disjoint_subset2[rotated]) + apply clarsimp + done + +lemma modify_pte_pspace_aligned': + "\pte_at' ptr and pspace_aligned'\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pspace_aligned'\" + apply (clarsimp simp: simpler_modify_def ko_wp_at'_def valid_def typ_at'_def) + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (subst pspace_aligned'_def) + apply (intro ballI) + apply (erule domE) + apply (clarsimp split:if_splits) + apply (drule(1) pspace_alignedD') + apply (simp add:objBits_simps) + apply (simp add:ps_clear_def) + apply (drule_tac x = x in pspace_alignedD') + apply simp + apply simp + done + +lemma modify_pte_psp_no_overlap': + "\pte_at' ptr and pspace_no_overlap' ptr' sz\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pspace_no_overlap' ptr' sz\" +proof - + note [simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (clarsimp simp:simpler_modify_def ko_wp_at'_def valid_def typ_at'_def) + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (clarsimp split:if_splits) + apply (drule(1) pspace_no_overlapD') + apply (simp add:objBits_simps field_simps mask_def) + apply (drule(1) pspace_no_overlapD')+ + apply (simp add:field_simps mask_def) + done +qed + +lemma koTypeOf_pte: + "koTypeOf ko = ArchT PTET \ \pte. ko = KOArch (KOPTE pte)" + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + done + +lemma modify_mapM_x: + "(modify (ksPSpace_update (foldr (\addr map. map(addr \ obj)) list))) = + (mapM_x (\x. modify (ksPSpace_update (\m. m(x\ obj)))) (rev list))" + apply (induct list) + apply (clarsimp simp:mapM_x_Nil) + apply (rule ext) + apply (simp add:simpler_modify_def return_def) + apply (clarsimp simp:mapM_x_append mapM_x_singleton simpler_modify_def) + apply (drule sym) + apply (rule ext) + apply (simp add:Fun.comp_def bind_def) + done + +lemma modify_obj_commute': + "monad_commute (K (ptr\ ptr') and ko_wp_at' \ ptr') + (modify (ksPSpace_update (\ps. ps(ptr \ ko)))) + (modify (ksPSpace_update (\ps. ps(ptr' \ f (the (ps ptr'))))))" + apply (clarsimp simp:monad_commute_def return_def + bind_def simpler_modify_def ko_wp_at'_def) + apply (case_tac s) + apply clarsimp + apply (rule ext) + apply clarsimp + done + +lemma setCTE_doMachineOp_commute: + assumes nf: "no_fail Q (doMachineOp x)" + shows "monad_commute (cte_at' dest and pspace_aligned' and pspace_distinct' and Q) + (setCTE dest cte) + (doMachineOp x)" + apply (simp add:setCTE_def2 split_def) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule doMachineOp_upd_heap_commute) + apply (rule commute_commute[OF locateCTE_commute]) + apply (wp nf locateCTE_cte_no_fail)+ + apply clarsimp + apply (wp|clarsimp|fastforce)+ + done + +lemma placeNewObject_valid_arch_state: + "\valid_arch_state' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) and + pspace_aligned' and pspace_distinct' and + K (is_aligned ptr (objBitsKO (injectKOS val) + us)) and + K ( (objBitsKO (injectKOS val)+ us)< word_bits)\ + placeNewObject ptr val us + \\rv s. valid_arch_state' s\" + apply (simp add:placeNewObject_def2 split_def) + apply (rule createObjects'_wp_subst) + apply (wp createObjects_valid_arch) + apply clarsimp + apply (intro conjI,simp) + apply (erule(1) range_cover_full) + done + +lemma setCTE_modify_gsCNode_commute: + "monad_commute P (setCTE src (cte::cte)) + (modify (%ks. ks\gsCNodes := f (gsCNodes ks)\))" + by (auto simp: monad_commute_def setCTE_def setObject_def split_def bind_def + return_def simpler_modify_def simpler_gets_def assert_opt_def + fail_def simpler_updateObject_def + split: option.splits if_split_asm) + +lemma setCTE_modify_gsUserPages_commute: + "monad_commute P (setCTE src (cte::cte)) + (modify (%ks. ks\gsUserPages := f (gsUserPages ks)\))" + by (auto simp: monad_commute_def setCTE_def setObject_def split_def bind_def + return_def simpler_modify_def simpler_gets_def assert_opt_def + fail_def simpler_updateObject_def + split: option.splits if_split_asm) + +lemma setCTE_updatePTType_commute: + "monad_commute \ (setCTE src cte) (updatePTType p pt_t)" + unfolding updatePTType_def + apply (clarsimp simp: monad_commute_def) + apply (clarsimp simp: setCTE_def setObject_def bind_assoc exec_gets exec_modify) + apply (case_tac "lookupAround2 src (ksPSpace s)"; clarsimp simp: bind_assoc) + apply (simp add: assert_opt_def bind_assoc simpler_updateObject_def + simpler_modify_def simpler_gets_def return_def split_def fail_def + split: option.splits) + apply (clarsimp simp: bind_def fail_def) + apply (case_tac s, rename_tac arch mach, case_tac arch, simp) + apply fastforce + done + +lemma getTCB_det: + "ko_wp_at' ((=) (KOTCB tcb)) p s + \ getObject p s = ({(tcb,s)},False)" + apply (clarsimp simp:ko_wp_at'_def getObject_def split_def + bind_def gets_def return_def get_def + assert_opt_def split:if_splits) + apply (clarsimp simp: fail_def return_def lookupAround2_known1) + apply (simp add:loadObject_default_def) + apply (clarsimp simp:projectKO_def projectKO_opt_tcb alignCheck_def is_aligned_mask + objBits_simps' unless_def) + apply (clarsimp simp:bind_def return_def) + apply (intro conjI) + apply (intro set_eqI iffI) + apply clarsimp + apply (subst (asm) in_magnitude_check') + apply (simp add:archObjSize_def is_aligned_mask)+ + apply (rule bexI[rotated]) + apply (rule in_magnitude_check'[THEN iffD2]) + apply (simp add:is_aligned_mask)+ + apply (clarsimp simp:image_def) + apply (clarsimp simp: magnitudeCheck_assert assert_def objBits_def archObjSize_def + return_def fail_def lookupAround2_char2 + split:option.splits if_split_asm) + apply (rule ccontr) + apply (simp add:ps_clear_def field_simps) + apply (erule_tac x = x2 in in_empty_interE) + apply (clarsimp simp:less_imp_le) + apply (rule conjI) + apply (subst add.commute) + apply (rule word_diff_ls') + apply (clarsimp simp:field_simps not_le plus_one_helper mask_def) + apply (simp add:field_simps is_aligned_no_overflow_mask flip: is_aligned_mask) + apply simp + apply auto + done + +lemma threadSet_det: + "tcb_at' ptr s + \ threadSet f ptr s = + modify (ksPSpace_update + (\ps. ps(ptr \ (\t. case t of Some (KOTCB tcb) \ KOTCB (f tcb)) (ps ptr)))) s" + apply (clarsimp simp add: threadSet_def bind_def obj_at'_def) + apply (subst getTCB_det, simp add: ko_wp_at'_def)+ + apply (clarsimp simp: setObject_def gets_def get_def) + apply (subst bind_def) + apply (clarsimp simp: split_def) + apply (simp add: lookupAround2_known1 bind_assoc projectKO_def assert_opt_def + updateObject_default_def projectKO_opt_tcb) + apply (clarsimp simp: alignCheck_def unless_def when_def is_aligned_mask objBits_simps) + apply (clarsimp simp: magnitudeCheck_det bind_def) + apply (cut_tac ko = "KOTCB obj" in magnitudeCheck_det) + apply (simp add: objBits_simps is_aligned_mask)+ + apply (clarsimp simp: modify_def get_def put_def bind_def) + done + + +lemma setCTE_modify_tcbDomain_commute: + "monad_commute + (tcb_at' ptr and cte_wp_at' (\_. True) src and pspace_distinct' and pspace_aligned') (setCTE src cte) + (threadSet (tcbDomain_update (\_. ra)) ptr)" + proof - + note blah[simp del] = atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + atLeastAtMost_iff + + have hint: + "\P ptr a cte b src ra. monad_commute (tcb_at' ptr and ko_wp_at' P a ) + (threadSet (tcbDomain_update (\_. ra)) ptr) + (modify (ksPSpace_update (\ps. ps(a \ cte_update cte (the (ps a)) src a))))" + apply (clarsimp simp: monad_commute_def bind_def simpler_modify_def return_def) + apply (clarsimp simp: threadSet_det simpler_modify_def) + apply (subgoal_tac "tcb_at' ptr (ksPSpace_update (\ps. ps(a \ cte_update cte (the (ps a)) src a)) s)") + prefer 2 + apply (clarsimp simp:obj_at'_def) + apply (intro conjI impI) + apply simp + apply (clarsimp simp: projectKO_opt_tcb split:Structures_H.kernel_object.split_asm) + apply (simp add:cte_update_def) + apply (clarsimp simp: projectKO_opt_tcb split:Structures_H.kernel_object.split_asm) + apply (simp add:ps_clear_def) + apply (simp add:ps_clear_def) + apply (rule ccontr,simp) + apply (erule in_emptyE) + apply (clarsimp simp:ko_wp_at'_def) + apply blast + apply (simp add:threadSet_det simpler_modify_def) + apply (subst (asm) obj_at'_def) + apply (thin_tac "tcb_at' ptr P" for P) + apply (clarsimp simp: obj_at'_def projectKO_opt_tcb, + simp split: Structures_H.kernel_object.split_asm) + apply (case_tac s,clarsimp) + apply (intro conjI) + apply clarsimp + apply (rule ext,clarsimp) + apply (case_tac obj) + apply (simp add:cte_update_def) + apply clarsimp + apply (rule ext) + apply simp + done + + show ?thesis + apply (rule commute_name_pre_state) + apply (clarsimp simp add: setCTE_def2) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule hint) + apply (rule commute_commute) + apply (rule locateCTE_commute) + apply (wp locateCTE_cte_no_fail)+ + apply (wp threadSet_ko_wp_at2') + apply (clarsimp simp:objBits_simps) + apply (wp|simp)+ + apply (wp locateCTE_inv locateCTE_ko_wp_at') + apply clarsimp + apply fastforce + done +qed + +lemma curDomain_commute: + assumes cur:"\P. \\s. P (ksCurDomain s)\ f \\r s. P (ksCurDomain s)\" + shows "monad_commute \ f curDomain" + apply (clarsimp simp add:monad_commute_def curDomain_def get_def return_def + gets_def bind_def) + apply (rule conjI) + apply (rule set_eqI) + apply (rule iffI) + apply clarsimp + apply (rule bexI[rotated], assumption) + apply clarsimp + apply (frule_tac P1 = "\x. x = ksCurDomain s" in use_valid[OF _ cur]) + apply simp+ + apply clarsimp + apply (rule bexI[rotated], assumption) + apply clarsimp + apply (frule_tac P1 = "\x. x = ksCurDomain s" in use_valid[OF _ cur]) + apply simp+ + apply auto + done + +crunch inv[wp]: curDomain P + +lemma placeNewObject_tcb_at': + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr (objBits (makeObject::tcb)) + and K (is_aligned ptr (objBits (makeObject::tcb)))\ + placeNewObject ptr (makeObject::tcb) 0 + \\_ s. tcb_at' ptr s \" + apply (simp add: placeNewObject_def placeNewObject'_def split_def alignError_def) + apply wpsimp + apply (clarsimp simp: obj_at'_def objBits_simps ps_clear_def) + apply (fastforce intro!: set_eqI dest: pspace_no_overlap_disjoint' simp: add_mask_fold) + done + +lemma monad_commute_if_weak_r: + "\ monad_commute P1 f h1; monad_commute P2 f h2\ \ + monad_commute (P1 and P2) f (if d then h1 else h2)" + apply (clarsimp) + apply (intro conjI impI) + apply (erule monad_commute_guard_imp,simp)+ + done + +lemma createObject_setCTE_commute: + "monad_commute + (cte_wp_at' (\_. True) src and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and K (ptr \ src) and + K (is_aligned ptr (Types_H.getObjectSize ty us)) and + K (Types_H.getObjectSize ty us < word_bits)) + (RetypeDecls_H.createObject ty ptr us d) + (setCTE src cte)" + apply (rule commute_grab_asm)+ + apply (subgoal_tac "ptr && mask (Types_H.getObjectSize ty us) = 0") + prefer 2 + apply (clarsimp simp: range_cover_def is_aligned_mask) + apply (clarsimp simp: createObject_def) + apply (case_tac ty, + simp_all add: AARCH64_H.toAPIType_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add: + AARCH64_H.getObjectSize_def apiGetObjectSize_def + tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def + cteSizeBits_def) + \ \Untyped\ + apply (simp add: monad_commute_guard_imp[OF return_commute]) + \ \TCB, EP, NTFN\ + apply (rule monad_commute_guard_imp[OF commute_commute]) + apply (rule monad_commute_split[OF monad_commute_split]) + apply (rule monad_commute_split[OF commute_commute[OF return_commute]]) + apply (rule setCTE_modify_tcbDomain_commute) + apply wp + apply (rule curDomain_commute) + apply wp+ + apply (rule setCTE_placeNewObject_commute) + apply (wp placeNewObject_tcb_at' placeNewObject_cte_wp_at' + placeNewObject_pspace_distinct' + placeNewObject_pspace_aligned' + | clarsimp simp: objBits_simps')+ + apply (rule monad_commute_guard_imp[OF commute_commute] + ,rule monad_commute_split[OF commute_commute[OF return_commute]] + ,rule setCTE_placeNewObject_commute + ,(wp|clarsimp simp: objBits_simps')+)+ + \ \CNode\ + apply (rule monad_commute_guard_imp[OF commute_commute]) + apply (rule monad_commute_split)+ + apply (rule return_commute[THEN commute_commute]) + apply (rule setCTE_modify_gsCNode_commute[of \]) + apply (rule hoare_triv[of \]) + apply wp + apply (rule setCTE_placeNewObject_commute) + apply (wp|clarsimp simp: objBits_simps')+ + \ \Arch Objects\ + apply ((rule monad_commute_guard_imp[OF commute_commute] + , rule monad_commute_split[OF commute_commute[OF return_commute]] + , clarsimp simp: AARCH64_H.createObject_def + placeNewDataObject_def bind_assoc + split del: if_split + ,(rule monad_commute_split return_commute[THEN commute_commute] + setCTE_modify_gsUserPages_commute[of \] + modify_wp[of "%_. \"] + setCTE_doMachineOp_commute + setCTE_placeNewObject_commute + setCTE_updatePTType_commute + monad_commute_if_weak_r + | wp placeNewObject_pspace_distinct' + placeNewObject_pspace_aligned' + placeNewObject_cte_wp_at' + placeNewObject_valid_arch_state + | erule is_aligned_weaken + | simp add: objBits_simps word_bits_def mult_2 add.assoc + pageBits_less_word_bits[unfolded word_bits_def, simplified])+)+) + apply (simp add: bit_simps) + done + + +lemma createObject_updateMDB_commute: + "monad_commute + ((\s. src \ 0 \ cte_wp_at' (\_. True) src s) and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + pspace_aligned' and pspace_distinct' and valid_arch_state' and + K (ptr \ src) and + K (is_aligned ptr (Types_H.getObjectSize ty us)) and + K ((Types_H.getObjectSize ty us)< word_bits)) + (updateMDB src f) (RetypeDecls_H.createObject ty ptr us d)" + apply (clarsimp simp:updateMDB_def split:if_split_asm) + apply (intro conjI impI) + apply (simp add: monad_commute_guard_imp[OF return_commute]) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule createObject_setCTE_commute) + apply (rule createObject_getCTE_commute) + apply wp + apply (auto simp:range_cover_full) + done + +lemma updateMDB_pspace_no_overlap': + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz\ + updateMDB slot f + \\rv s. pspace_no_overlap' ptr sz s\" + apply (rule hoare_pre) + apply (clarsimp simp: updateMDB_def split del: if_split) + apply (wp setCTE_pspace_no_overlap') + apply clarsimp + done + +lemma ctes_of_ko_at: + "ctes_of s p = Some a \ + (\ptr ko. (ksPSpace s ptr = Some ko \ p \ obj_range' ptr ko))" + apply (clarsimp simp: map_to_ctes_def Let_def split: if_split_asm) + apply (intro exI conjI, assumption) + apply (simp add: obj_range'_def objBits_simps' is_aligned_no_overflow_mask) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_simps' obj_range'_def word_and_le2) + apply (thin_tac "P" for P)+ + apply (simp add: mask_def) + apply word_bitwise + done + +lemma pspace_no_overlapD2': + "\is_aligned ptr sz; pspace_no_overlap' ptr sz s;sz < word_bits; + ctes_of s slot = Some cte\ + \ slot \ ptr" + apply (drule ctes_of_ko_at) + apply clarsimp + apply (drule(1) pspace_no_overlapD') + apply (erule in_empty_interE) + apply (simp add:obj_range'_def add_mask_fold) + apply clarsimp + apply (subst is_aligned_neg_mask_eq[symmetric]) + apply simp + apply (simp add: is_aligned_no_overflow) + done + +lemma caps_overlap_reserved'_subseteq: + "\caps_overlap_reserved' B s; A\ B\ \ caps_overlap_reserved' A s" + apply (clarsimp simp:caps_overlap_reserved'_def) + apply (drule(1) bspec) + apply (erule disjoint_subset2) + apply simp + done + +definition weak_valid_dlist where + "weak_valid_dlist \ \m. + (\p cte. + m p = Some cte \ + (let next = mdbNext (cteMDBNode cte) + in (next \ 0 \ (\cte'. m next = Some cte' \ cteCap cte'\ capability.NullCap))))" + +lemma valid_arch_state'_updateMDB: + "\valid_arch_state' \ updateMDB a b \\rv. valid_arch_state'\" + by (clarsimp simp:updateMDB_def valid_arch_state_def,wp) + +lemma fail_commute: + "monad_commute \ fail f = empty_fail f" + apply (simp add: monad_commute_def empty_fail_def) + apply (simp add: fail_def bind_def del: split_paired_Ex) + apply blast + done + +lemma modify_commute: + "monad_commute P (modify f) (modify g) + = (\s. P s \ f (g s) = g (f s))" + apply (simp add: monad_commute_def exec_modify) + apply (simp add: return_def eq_commute) + done + +lemma createObjects_gsUntypedZeroRanges_commute': + "monad_commute \ + (createObjects' ptr n ko us) + (modify (\s. s \ gsUntypedZeroRanges := f (gsUntypedZeroRanges s) \ ))" + apply (simp add: createObjects'_def unless_def when_def alignError_def + fail_commute) + apply clarsimp + apply (rule commute_commute) + apply (strengthen monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], OF _ _ hoare_vcg_prop] + | simp add: modify_commute split: option.split prod.split)+ + apply (simp add: monad_commute_def exec_modify exec_gets assert_def) + done + +lemma assert_commute2: "empty_fail f + \ monad_commute \ (assert G) f" + apply (clarsimp simp:assert_def monad_commute_def) + apply (simp add: fail_def bind_def empty_fail_def del: split_paired_Ex) + apply blast + done + +lemma monad_commute_gsUntyped_updatePTType: + "monad_commute \ (modify (\s. s\gsUntypedZeroRanges := f (gsUntypedZeroRanges s)\)) + (updatePTType ptr pt_t)" + unfolding updatePTType_def + apply (clarsimp simp: monad_commute_def exec_gets exec_modify bind_assoc) + apply (clarsimp simp: return_def) + apply (case_tac s, rename_tac arch mach, case_tac arch) + apply fastforce + done + +lemma threadSet_gsUntypedZeroRanges_commute': + "monad_commute \ + (threadSet fn ptr) + (modify (\s. s \ gsUntypedZeroRanges := f (gsUntypedZeroRanges s) \ ))" + apply (simp add: threadSet_def getObject_def setObject_def) + apply (rule commute_commute) + apply (strengthen monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], OF _ _ hoare_vcg_prop] + | simp add: modify_commute updateObject_default_def alignCheck_assert + magnitudeCheck_assert return_commute return_commute[THEN commute_commute] + projectKO_def2 assert_commute2 assert_commute2[THEN commute_commute] + assert_opt_def2 loadObject_default_def + split: option.split prod.split)+ + apply (simp add: monad_commute_def exec_gets exec_modify) + done + +lemma createObject_gsUntypedZeroRanges_commute: + "monad_commute + \ + (RetypeDecls_H.createObject ty ptr us dev) + (modify (\s. s \ gsUntypedZeroRanges := f (gsUntypedZeroRanges s) \ ))" + apply (simp add: createObject_def AARCH64_H.createObject_def + placeNewDataObject_def + placeNewObject_def2 bind_assoc fail_commute + return_commute toAPIType_def + split: option.split apiobject_type.split object_type.split) + apply (strengthen monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], + OF _ _ hoare_vcg_prop, THEN commute_commute] + monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], + OF _ _ hoare_vcg_prop] + | simp add: modify_commute createObjects_gsUntypedZeroRanges_commute' + createObjects_gsUntypedZeroRanges_commute'[THEN commute_commute] + return_commute return_commute[THEN commute_commute] + threadSet_gsUntypedZeroRanges_commute'[THEN commute_commute] + monad_commute_gsUntyped_updatePTType + split: option.split prod.split cong: if_cong)+ + apply (simp add: curDomain_def monad_commute_def exec_modify exec_gets) + done + +lemma monad_commute_If_rhs: + "monad_commute P a b \ monad_commute Q a c + \ monad_commute (\s. (R \ P s) \ (\ R \ Q s)) a (if R then b else c)" + by simp + +lemma case_eq_if_isUntypedCap: + "(case c of UntypedCap _ _ _ _ \ x | _ \ y) + = (if isUntypedCap c then x else y)" + by (cases c, simp_all add: isCap_simps) + +lemma createObject_updateTrackedFreeIndex_commute: + "monad_commute + (cte_wp_at' (\_. True) slot and pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and + K (ptr \ slot) and K (Types_H.getObjectSize ty us < word_bits) and + K (is_aligned ptr (Types_H.getObjectSize ty us))) + (RetypeDecls_H.createObject ty ptr us dev) (updateTrackedFreeIndex slot idx)" + apply (simp add: updateTrackedFreeIndex_def getSlotCap_def updateCap_def) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF _ createObject_getCTE_commute] + monad_commute_split[OF _ createObject_gsUntypedZeroRanges_commute] + createObject_gsUntypedZeroRanges_commute)+ + apply (wp getCTE_wp')+ + apply (clarsimp simp: pspace_no_overlap'_def) + done + +lemma createObject_updateNewFreeIndex_commute: + "monad_commute + (cte_wp_at' (\_. True) slot and pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and + K (ptr \ slot) and K (Types_H.getObjectSize ty us < word_bits) and + K (is_aligned ptr (Types_H.getObjectSize ty us))) + (RetypeDecls_H.createObject ty ptr us dev) (updateNewFreeIndex slot)" + apply (simp add: updateNewFreeIndex_def getSlotCap_def case_eq_if_isUntypedCap + updateTrackedFreeIndex_def) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF _ createObject_getCTE_commute]) + apply (rule monad_commute_If_rhs) + apply (rule createObject_updateTrackedFreeIndex_commute) + apply (rule commute_commute[OF return_commute]) + apply (wp getCTE_wp') + apply clarsimp + done + +lemma new_cap_object_comm_helper: + "monad_commute + (pspace_aligned' and pspace_distinct' and (\s. no_0 (ctes_of s)) and + (\s. weak_valid_dlist (ctes_of s)) and + (\s. valid_nullcaps (ctes_of s)) and + cte_wp_at' (\c. isUntypedCap (cteCap c)) parent and + cte_wp_at' (\c. cteCap c = capability.NullCap) slot and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and + K (Types_H.getObjectSize ty us capability.NullCap) and + K (is_aligned ptr (Types_H.getObjectSize ty us) \ ptr \ 0 \ parent \ 0)) + (RetypeDecls_H.createObject ty ptr us d) (insertNewCap parent slot cap)" + apply (clarsimp simp:insertNewCap_def bind_assoc liftM_def) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF _ createObject_getCTE_commute])+ + apply (rule monad_commute_split[OF _ commute_commute[OF assert_commute]]) + apply (rule monad_commute_split[OF _ createObject_setCTE_commute]) + apply (rule monad_commute_split[OF _ commute_commute[OF createObject_updateMDB_commute]]) + apply (rule monad_commute_split[OF _ commute_commute[OF createObject_updateMDB_commute]]) + apply (rule createObject_updateNewFreeIndex_commute) + apply (wp getCTE_wp hoare_vcg_imp_lift hoare_vcg_disj_lift valid_arch_state'_updateMDB + updateMDB_pspace_no_overlap' setCTE_pspace_no_overlap' + | clarsimp simp:conj_comms)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (frule_tac slot = slot in pspace_no_overlapD2') + apply simp+ + apply (frule_tac slot = parent in pspace_no_overlapD2') + apply simp+ + apply (case_tac ctea,clarsimp) + apply (frule_tac p = slot in nullcapsD') + apply simp+ + apply (subgoal_tac "(mdbNext (cteMDBNode cte) = 0 \ + (\ctea. ctes_of s (mdbNext (cteMDBNode cte)) = Some ctea))") + apply (elim disjE) + apply clarsimp+ + apply (frule_tac slot = "(mdbNext (cteMDBNode cte))" + in pspace_no_overlapD2') + apply simp+ + apply (clarsimp simp:weak_valid_dlist_def) + apply (drule_tac x = "parent " in spec) + apply clarsimp + done + +crunches updateNewFreeIndex + for pspace_aligned'[wp]: "pspace_aligned'" + and pspace_distinct'[wp]: "pspace_distinct'" + and pspace_canonical'[wp]: "pspace_canonical'" + and valid_arch_state'[wp]: "valid_arch_state'" + and pspace_no_overlap'[wp]: "pspace_no_overlap' ptr n" + and ctes_of[wp]: "\s. P (ctes_of s)" + +lemma updateNewFreeIndex_cte_wp_at[wp]: + "\\s. P (cte_wp_at' P' p s)\ updateNewFreeIndex slot \\rv s. P (cte_wp_at' P' p s)\" + by (simp add: cte_wp_at_ctes_of, wp) + +lemma new_cap_object_commute: + "monad_commute + (cte_wp_at' (\c. isUntypedCap (cteCap c)) parent and + (\s. \slot\set list. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_pspace' and valid_arch_state' and + K (distinct (map fst (zip list caps))) and + K (\cap \ set caps. cap \ capability.NullCap) and + K (Types_H.getObjectSize ty us ptr \ 0)) + (RetypeDecls_H.createObject ty ptr us d) + (zipWithM_x (insertNewCap parent) list caps)" + apply (clarsimp simp:zipWithM_x_mapM_x) + apply (rule monad_commute_guard_imp) + apply (rule mapM_x_commute[where f = fst]) + apply (simp add:split_def) + apply (rule new_cap_object_comm_helper) + apply (clarsimp simp:insertNewCap_def split_def) + apply (wp updateMDB_weak_cte_wp_at updateMDB_pspace_no_overlap' + getCTE_wp valid_arch_state'_updateMDB + setCTE_weak_cte_wp_at setCTE_pspace_no_overlap') + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (case_tac "parent \ aa") + prefer 2 + apply simp + apply (clarsimp simp: conj_comms) + apply (intro conjI exI) + apply (clarsimp simp: no_0_def) + apply (clarsimp simp: weak_valid_dlist_def modify_map_def Let_def) + subgoal by (intro conjI impI; fastforce) + apply (clarsimp simp:valid_nullcaps_def) + apply (frule_tac x = "p" in spec) + apply (case_tac ctec) + apply (case_tac cte) + apply (rename_tac cap' node') + apply (case_tac node') + apply (rename_tac word1 word2 bool1 bool2) + apply (clarsimp simp:modify_map_def split:if_split_asm) + apply (case_tac z) + apply (drule_tac x = word1 in spec) + apply (clarsimp simp:weak_valid_dlist_def) + apply (drule_tac x = parent in spec) + apply clarsimp + apply (clarsimp simp:valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + apply (intro conjI) + apply (clarsimp simp:weak_valid_dlist_def Let_def) + apply (frule(2) valid_dlist_nextD) + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply (drule_tac m = "ctes_of s" in nullcapsD') + apply simp + apply (clarsimp simp: no_0_def nullPointer_def) + apply (erule in_set_zipE) + apply clarsimp + apply (erule in_set_zipE) + apply clarsimp + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma createObjects'_pspace_no_overlap: + "gz = (objBitsKO val) + us \ + \pspace_no_overlap' (ptr + (1 + of_nat n << gz)) gz and + K (range_cover ptr sz gz (Suc (Suc n)) \ ptr \ 0)\ + createObjects' ptr (Suc n) val us + \\addrs s. pspace_no_overlap' (ptr + (1 + of_nat n << gz)) gz s\" +proof - + note simps [simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + assume "gz = (objBitsKO val) + us" + thus ?thesis + apply - + apply (rule hoare_gen_asm) + apply (clarsimp simp:createObjects'_def split_def new_cap_addrs_fold') + apply (subst new_cap_addrs_fold') + apply clarsimp + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) + apply simp+ + apply (simp add:word_le_sub1) + apply (wp haskell_assert_wp unless_wp | wpc + | simp add:alignError_def if_apply_def2 del: fun_upd_apply hoare_fail_any)+ + apply (rule impI) + apply (subgoal_tac + "pspace_no_overlap' (ptr + (1 + of_nat n << objBitsKO val + us)) + (objBitsKO val + us) + (s\ksPSpace := foldr (\addr map. map(addr \ val)) + (new_cap_addrs (unat (1 + of_nat n << us)) ptr val) (ksPSpace s)\)") + apply (intro conjI impI allI) + apply assumption+ + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (subst (asm) foldr_upd_app_if) + apply (subst is_aligned_neg_mask_eq) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply simp + apply (clarsimp split:if_splits) + apply (drule obj_range'_subset_strong[rotated]) + apply (rule range_cover_rel[OF range_cover_le[where n = "Suc n"]],assumption) + apply simp + apply simp + apply (drule range_cover.unat_of_nat_n_shift + [OF range_cover_le[where n = "Suc n"],where gbits = us]) + apply simp + apply (simp add:shiftl_t2n field_simps)+ + apply (simp add:obj_range'_def) + apply (erule disjoint_subset) + apply (clarsimp simp: simps) + apply (thin_tac "x \ y" for x y) + apply (subst (asm) le_m1_iff_lt[THEN iffD1]) + apply (drule_tac range_cover_no_0[rotated,where p = "Suc n"]) + apply simp + apply simp + apply (simp add:field_simps) + apply (simp add: power_add[symmetric]) + apply (simp add: word_neq_0_conv) + apply (simp add: power_add[symmetric] field_simps) + apply (frule range_cover_subset[where p = "Suc n"]) + apply simp + apply simp + apply (drule(1) pspace_no_overlapD') + apply (subst (asm) is_aligned_neg_mask_eq) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply simp + apply (simp add:word_le_sub1 shiftl_t2n mask_def field_simps) + done +qed + +lemma createNewCaps_not_nc: + "\\\ + createNewCaps ty ptr (Suc (length as)) us d + \\r s. (\cap\set r. cap \ capability.NullCap)\" + apply (clarsimp simp:simp:createNewCaps_def Arch_createNewCaps_def split del: if_split) + apply (rule hoare_pre) + apply wpc + apply wp + apply (simp add:Arch_createNewCaps_def split del: if_split) + apply (wpc|wp|clarsimp)+ + done + +lemma doMachineOp_psp_no_overlap: + "\\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s \ + doMachineOp f + \\y s. pspace_no_overlap' ptr sz s\" + by (wp pspace_no_overlap'_lift,simp) + +lemma createObjects'_psp_distinct: + "\ pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and + K (range_cover ptr sz ((objBitsKO ko) + us) n \ n \ 0 \ + is_aligned ptr (objBitsKO ko + us) \ objBitsKO ko + us < word_bits) \ + createObjects' ptr n ko us + \\rv s. pspace_distinct' s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:createObjects'_def split_def) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[where gbits = us,rotated]) + apply simp+ + apply unat_arith + apply (rule hoare_pre) + apply (wpc|wp|simp add: unless_def alignError_def del: fun_upd_apply hoare_fail_any)+ + apply clarsimp + apply (subst data_map_insert_def[symmetric])+ + apply (simp add: range_cover.unat_of_nat_n_shift) + apply (drule(2) retype_aligned_distinct'(1)[where ko = ko and n= "n*2^us" ]) + apply (erule range_cover_rel) + apply simp + apply clarsimp + apply (simp add: range_cover.unat_of_nat_n_shift) + done + +lemma createObjects'_psp_aligned: + "\ pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and + K (range_cover ptr sz ((objBitsKO ko) + us) n \ n \ 0 \ + is_aligned ptr (objBitsKO ko + us) \ objBitsKO ko + us < word_bits)\ + createObjects' ptr n ko us + \\rv s. pspace_aligned' s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: createObjects'_def split_def) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[where gbits = us,rotated]) + apply simp+ + apply unat_arith + apply (rule hoare_pre) + apply (wpc|wp|simp add: unless_def alignError_def del: fun_upd_apply hoare_fail_any)+ + apply clarsimp + apply (frule(2) retype_aligned_distinct'(2)[where ko = ko and n= "n*2^us" ]) + apply (erule range_cover_rel) + apply simp + apply clarsimp + apply (subst data_map_insert_def[symmetric])+ + apply (simp add: range_cover.unat_of_nat_n_shift) + done + +lemma pspace_no_overlap'_le: + assumes psp: "pspace_no_overlap' ptr sz s" "sz'\ sz" + assumes b: "sz < word_bits" + shows "pspace_no_overlap' ptr sz' s" +proof - + note no_simps [simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + have diff_cancel: "\a b c. (a::machine_word) + b - c = b + (a - c)" + by simp + have bound: "(ptr && ~~ mask sz') - (ptr && ~~ mask sz) \ mask sz - mask sz'" + using neg_mask_diff_bound[OF psp(2)] + by (simp add: mask_def) + show ?thesis + using psp + apply (clarsimp simp:pspace_no_overlap'_def) + apply (drule_tac x = x in spec) + apply clarsimp + apply (erule disjoint_subset2[rotated]) + apply (clarsimp simp: no_simps) + apply (rule word_plus_mcs[OF _ is_aligned_no_overflow_mask]) + apply (simp add:diff_cancel p_assoc_help) + apply (rule le_plus) + apply (rule bound) + apply (erule mask_mono) + apply simp + done +qed + +lemma pspace_no_overlap'_le2: + assumes "pspace_no_overlap' ptr sz s" "ptr \ ptr'" "ptr' &&~~ mask sz = ptr && ~~ mask sz" + shows "pspace_no_overlap' ptr' sz s" + proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + using assms + apply (clarsimp simp:pspace_no_overlap'_def) + apply (drule_tac x = x in spec) + apply clarsimp + apply (erule disjoint_subset2[rotated]) + apply (clarsimp simp:blah) + done +qed + +lemma pspace_no_overlap'_tail: + "\range_cover ptr sz us (Suc (Suc n)); pspace_aligned' s; pspace_distinct' s; + pspace_no_overlap' ptr sz s; ptr \ 0\ + \ pspace_no_overlap' (ptr + (1 + of_nat n << us)) sz s" + apply (erule pspace_no_overlap'_le2) + apply (erule(1) range_cover_ptr_le) + apply (erule(1) range_cover_tail_mask) + done + +lemma createNewCaps_pspace_no_overlap': + "\\s. range_cover ptr sz (Types_H.getObjectSize ty us) (Suc (Suc n)) \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s \ + ptr \ 0\ + createNewCaps ty ptr (Suc n) us d + \\r s. pspace_no_overlap' + (ptr + (1 + of_nat n << Types_H.getObjectSize ty us)) + (Types_H.getObjectSize ty us) s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: createNewCaps_def) + apply (subgoal_tac "pspace_no_overlap' (ptr + (1 + of_nat n << (Types_H.getObjectSize ty us))) + (Types_H.getObjectSize ty us) s") + prefer 2 + apply (rule pspace_no_overlap'_le[where sz = sz]) + apply (rule pspace_no_overlap'_tail) + apply simp+ + apply (simp add:range_cover_def) + apply (simp add:range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]) + apply (rule_tac Q = "\r. pspace_no_overlap' (ptr + (1 + of_nat n << Types_H.getObjectSize ty us)) + (Types_H.getObjectSize ty us) and + pspace_aligned' and pspace_distinct'" in hoare_strengthen_post) + apply (case_tac ty) + apply (simp_all add: apiGetObjectSize_def + AARCH64_H.toAPIType_def + AARCH64_H.getObjectSize_def objBits_simps objBits_defs + pageBits_def ptBits_def + createObjects_def) + apply (rule hoare_pre) + apply wpc + apply (clarsimp simp: apiGetObjectSize_def curDomain_def + AARCH64_H.toAPIType_def + AARCH64_H.getObjectSize_def objBits_simps objBits_defs + pageBits_def ptBits_def + createObjects_def Arch_createNewCaps_def + split: apiobject_type.splits + | wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap[where sz = sz] + createObjects'_psp_aligned[where sz = sz] createObjects'_psp_distinct[where sz = sz] + mapM_x_wp_inv + | assumption)+ + apply (intro conjI range_cover_le[where n = "Suc n"] | simp)+ + apply ((simp add:objBits_simps pageBits_def range_cover_def word_bits_def)+)[5] + by ((clarsimp simp: apiGetObjectSize_def bit_simps toAPIType_def + getObjectSize_def objBits_simps + createObjects_def Arch_createNewCaps_def unless_def + split: apiobject_type.splits + | wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap + createObjects'_psp_aligned createObjects'_psp_distinct + mapM_x_wp_inv + | assumption | clarsimp simp: word_bits_def + | intro conjI range_cover_le[where n = "Suc n"] range_cover.aligned)+) + +lemma objSize_eq_capBits: + "Types_H.getObjectSize ty us = APIType_capBits ty us" + by (cases ty; + clarsimp simp: getObjectSize_def objBits_simps bit_simps + APIType_capBits_def apiGetObjectSize_def ptBits_def + split: apiobject_type.splits) + +lemma createNewCaps_ret_len: + "\K (n < 2 ^ word_bits \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv s. n = length rv\" + including no_pre + apply (rule hoare_name_pre_state) + apply clarsimp + apply (case_tac ty) + apply (simp_all add:createNewCaps_def AARCH64_H.toAPIType_def) + apply (rule hoare_pre) + apply wpc + apply ((wp+)|simp add:Arch_createNewCaps_def AARCH64_H.toAPIType_def + unat_of_nat_minus_1 + [where 'a=machine_word_len, folded word_bits_def] | + erule hoare_strengthen_post[OF createObjects_ret],clarsimp+ | intro conjI impI)+ + apply (rule hoare_pre, + ((wp+) + | simp add: Arch_createNewCaps_def toAPIType_def unat_of_nat_minus_1 + | erule hoare_strengthen_post[OF createObjects_ret],clarsimp+ + | intro conjI impI)+)+ + done + +lemma no_overlap_check: + "\range_cover ptr sz bits n; pspace_no_overlap' ptr sz s; + pspace_aligned' s;n\ 0\ + \ case_option (return ()) + (\x. assert (fst x < ptr)) + (fst (lookupAround2 (ptr + of_nat (shiftL n bits - Suc 0)) + (ksPSpace s))) s = + return () s" + apply (clarsimp split:option.splits simp:assert_def lookupAround2_char1 not_less) + apply (rule ccontr) + apply (frule(1) pspace_no_overlapD') + apply (erule_tac x = a in in_empty_interE) + apply clarsimp + apply (drule(1) pspace_alignedD') + apply (erule is_aligned_no_overflow) + apply clarsimp + apply (erule order_trans) + apply (frule range_cover_cell_subset[where x = "of_nat n - 1"]) + apply (rule gt0_iff_gem1[THEN iffD1]) + apply (simp add:word_gt_0) + apply (rule range_cover_not_zero) + apply simp + apply assumption + apply (clarsimp simp:shiftL_nat field_simps) + apply (erule impE) + apply (frule range_cover_subset_not_empty[rotated,where x = "of_nat n - 1"]) + apply (rule gt0_iff_gem1[THEN iffD1]) + apply (simp add:word_gt_0) + apply (rule range_cover_not_zero) + apply simp + apply assumption + apply (clarsimp simp:field_simps) + apply simp + done + +lemma new_caps_addrs_append: + "\range_cover ptr sz (objBitsKO va + us) (Suc n)\ \ + new_cap_addrs (unat (of_nat n + (1::machine_word) << us)) ptr val = + new_cap_addrs (unat (((of_nat n)::machine_word) << us)) ptr val @ + new_cap_addrs (unat ((2::machine_word) ^ us)) + ((((of_nat n)::machine_word) << objBitsKO val + us) + ptr) val" + apply (subst add.commute) + apply (clarsimp simp:new_cap_addrs_def) + apply (subst upt_add_eq_append'[where j="unat (((of_nat n)::machine_word) << us)"]) + prefer 3 + apply simp + apply (subst upt_lhs_sub_map) + apply (simp add:Fun.comp_def field_simps) + apply (subst unat_sub[symmetric]) + apply (simp add:shiftl_t2n) + apply (subst mult.commute) + apply (subst mult.commute[where a = "2 ^ us"])+ + apply (rule word_mult_le_mono1) + apply (simp add:word_le_nat_alt) + apply (subst of_nat_Suc[symmetric]) + apply (frule range_cover.unat_of_nat_n) + apply (drule range_cover.unat_of_nat_n[OF range_cover_le[where n = n]]) + apply simp + apply simp + apply (simp add: p2_gt_0) + apply (simp add:range_cover_def word_bits_def) + apply (subst word_bits_def[symmetric]) + apply (subst of_nat_Suc[symmetric]) + apply (subst range_cover.unat_of_nat_n) + apply simp + apply (subst unat_power_lower) + apply (simp add:range_cover_def) + apply (frule range_cover.range_cover_n_le(2)) + apply (subst mult.commute) + apply (rule le_less_trans[OF nat_le_power_trans[where m = sz]]) + apply (erule le_trans) + apply simp + apply (simp add:range_cover_def) + apply (simp add:range_cover_def[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: power_add [symmetric] shiftl_t2n field_simps) + apply simp + apply (frule range_cover_le[where n = n]) + apply simp + apply (drule range_cover_rel[where sbit'= "objBitsKO va"]) + apply simp+ + apply (drule range_cover_rel[where sbit'= "objBitsKO va"]) + apply simp+ + apply (drule range_cover.unat_of_nat_n)+ + apply (simp add:shiftl_t2n) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps ) + done + +lemma modify_comp: + "modify (ksPSpace_update (\a. f (g a))) = + (do modify (ksPSpace_update (\a. (g a))); + modify (ksPSpace_update (\a. f a)) + od)" + by (clarsimp simp:simpler_modify_def bind_def Fun.comp_def) + +lemma modify_objs_commute: + "monad_commute (K ((set lst1) \ (set lst2) = {})) + (modify (ksPSpace_update (foldr (\addr map. map(addr \ val)) lst1))) + (modify (ksPSpace_update (foldr (\addr map. map(addr \ val)) lst2)))" + apply (clarsimp simp:monad_commute_def simpler_modify_def bind_def return_def) + apply (case_tac s,simp) + apply (rule ext) + apply (clarsimp simp:foldr_upd_app_if) + done + +lemma new_cap_addrs_disjoint: + "\range_cover ptr sz (objBitsKO val + us) (Suc (Suc n))\ + \ set (new_cap_addrs (2^us) + (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) val) \ + set (new_cap_addrs (unat ((1::machine_word) + of_nat n << us)) ptr val) = {}" + apply (frule range_cover.unat_of_nat_n_shift[where gbits = us,symmetric]) + apply simp + apply (frule range_cover_rel[where sbit' = "objBitsKO val"]) + apply (simp add:field_simps)+ + apply (frule new_cap_addrs_distinct) + apply (subst (asm) add.commute[where b = 2])+ + apply (subst (asm) new_caps_addrs_append[where n = "Suc n",simplified]) + apply (simp add:field_simps) + apply (clarsimp simp:field_simps Int_ac range_cover_def) + done + +lemma pspace_no_overlap'_modify: + "\K (range_cover ptr sz (objBitsKO val + us) (Suc (Suc n)) \ ptr \ 0) and + pspace_no_overlap' (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) + (objBitsKO val + us)\ + modify (ksPSpace_update + (foldr (\addr map. map(addr \ val)) + (new_cap_addrs (unat ((1::machine_word) + of_nat n << us)) ptr val))) + \\r. pspace_no_overlap' + (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) + (objBitsKO val + us)\" + proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (clarsimp simp:simpler_modify_def valid_def pspace_no_overlap'_def) + apply (frule(1) range_cover_tail_mask) + apply (simp add:field_simps) + apply (drule_tac x = x in spec) + apply (clarsimp simp:foldr_upd_app_if split:if_splits) + apply (frule obj_range'_subset_strong[rotated]) + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (rule range_cover_rel,assumption) + apply simp + apply clarsimp + apply (frule range_cover.unat_of_nat_n_shift[where gbits = us,symmetric]) + apply simp+ + apply (simp add:field_simps) + apply (simp add:obj_range'_def) + apply (erule disjoint_subset) + apply (frule(1) range_cover_ptr_le) + apply (subgoal_tac + "\ ptr + (1 + of_nat n << us + objBitsKO val) \ ptr + (1 + of_nat n << us) * 2 ^ objBitsKO val - 1") + apply (clarsimp simp:blah field_simps) + apply (clarsimp simp: not_le) + apply (rule word_leq_le_minus_one) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps objSize_eq_capBits ) + apply (rule neq_0_no_wrap) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps objSize_eq_capBits ) + apply simp + done +qed + +lemma createObjects_Cons: + "\range_cover ptr sz (objBitsKO val + us) (Suc (Suc n)); + pspace_distinct' s;pspace_aligned' s; + pspace_no_overlap' ptr sz s;pspace_aligned' s; ptr \ 0\ + \ createObjects' ptr (Suc (Suc n)) val us s = + (do createObjects' ptr (Suc n) val us; + createObjects' (((1 + of_nat n) << (objBitsKO val + us)) + ptr) + (Suc 0) val us + od) s" + supply option.case_cong[cong] subst_all [simp del] + apply (clarsimp simp:createObjects'_def split_def bind_assoc) + apply (subgoal_tac "is_aligned (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) (objBitsKO val + us)") + prefer 2 + apply (clarsimp simp:field_simps) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply (rule monad_eq_split[where Q ="\x s'. s' = s \ ptr && mask (objBitsKO val + us) = 0"]) + apply (clarsimp simp:is_aligned_mask[symmetric]) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[rotated,where gbits = us]) + apply simp+ + apply (simp add:word_le_sub1) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (drule range_cover_not_zero_shift[rotated,where gbits = us]) + apply simp+ + apply (simp add:word_le_sub1) + apply (subst new_cap_addrs_fold') + apply (rule word_1_le_power) + apply (simp add:range_cover_def) + apply (rule monad_eq_split[where Q ="\r s'. r = ksPSpace s \ s' = s"]) + apply (rule monad_eq_split2[where Q = "\r s'. s' = s"]) + apply (simp add:field_simps) + apply (subst no_overlap_check) + apply (erule range_cover_le) + apply simp+ + apply (subst no_overlap_check) + apply (erule range_cover_le) + apply simp+ + apply clarsimp + apply (simp add:new_caps_addrs_append[where n = "Suc n",simplified]) + apply (subst modify_specify2[where g = return,simplified]) + apply (subst modify_specify2) + apply (subst modify_specify) + apply (simp add:modify_comp) + apply (subst monad_commute_simple[OF modify_objs_commute,where g= "\x y. return ()",simplified]) + apply (frule range_cover.sz(1)) + apply (frule range_cover.sz(2)) + apply clarsimp + apply (erule new_cap_addrs_disjoint) + apply (rule monad_eq_split2[where Q = + "\r. pspace_no_overlap' (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) + (objBitsKO val + us) and pspace_aligned'"]) + apply (simp add:shiftl_t2n field_simps) + apply (clarsimp) + apply (rule sym) + apply (clarsimp simp:gets_def get_def) + apply (subst bind_def,simp) + apply (subst monad_eq) + apply (rule no_overlap_check) + apply (erule range_cover_full) + apply (simp add:range_cover_def word_bits_def) + apply (simp add:field_simps) + apply simp+ + apply (clarsimp simp:simpler_modify_def) + apply wp + apply (clarsimp simp del:fun_upd_apply) + apply (rule conjI) + apply (rule use_valid[OF _ pspace_no_overlap'_modify[where sz = sz]]) + apply (simp add:simpler_modify_def) + apply (clarsimp simp:field_simps) + apply (rule pspace_no_overlap'_le) + apply (erule pspace_no_overlap'_tail) + apply simp+ + apply (simp add:range_cover_def) + apply (erule range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]) + apply (subst data_map_insert_def[symmetric]) + apply (drule(2) retype_aligned_distinct'(2)) + prefer 2 + apply (simp cong: kernel_state.fold_congs) + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (rule range_cover_le[OF range_cover_rel,OF _ _ _ le_refl]) + apply simp+ + apply (drule range_cover.unat_of_nat_n_shift[where gbits = us]) + apply simp + apply simp + apply (wp haskell_assert_wp | wpc)+ + apply simp + apply (wp unless_wp |clarsimp)+ + apply (drule range_cover.aligned) + apply (simp add:is_aligned_mask) + done + +lemma doMachineOp_ksArchState_commute: + "monad_commute \ (doMachineOp f) (gets (g \ ksArchState))" + apply (clarsimp simp:monad_commute_def gets_def return_def get_def bind_def) + apply (intro conjI set_eqI iffI) + apply (clarsimp simp: doMachineOp_def select_f_def gets_def get_def bind_def + return_def simpler_modify_def) + apply (erule bexI[rotated]) + apply clarsimp + apply (clarsimp simp: doMachineOp_def select_f_def gets_def get_def bind_def return_def + simpler_modify_def) + apply (erule bexI[rotated]) + apply clarsimp+ + done + +lemma gsCNodes_upd_createObjects'_comm: + "do _ \ modify (gsCNodes_update f); + x \ createObjects' ptr n obj us; + m x + od = + do x \ createObjects' ptr n obj us; + _ \ modify (gsCNodes_update f); + m x + od" + apply (rule ext) + apply (case_tac x) + by (auto simp: createObjects'_def split_def bind_assoc return_def unless_def + when_def simpler_gets_def alignError_def fail_def assert_def + simpler_modify_def bind_def + split: option.splits) + +lemma gsUserPages_upd_createObjects'_comm: + "do _ \ modify (gsUserPages_update f); + x \ createObjects' ptr n obj us; + m x + od = + do x \ createObjects' ptr n obj us; + _ \ modify (gsUserPages_update f); + m x + od" + apply (rule ext) + apply (case_tac x) + by (auto simp: createObjects'_def split_def bind_assoc return_def unless_def + when_def simpler_gets_def alignError_def fail_def assert_def + simpler_modify_def bind_def + split: option.splits) + +lemma ksArchState_upd_createObjects'_comm: + "do _ \ modify (\s. ksArchState_update (f (ksArchState s)) s); + x \ createObjects' ptr n obj us; + m x + od = + do x \ createObjects' ptr n obj us; + _ \ modify (\s. ksArchState_update (f (ksArchState s)) s); + m x + od" + apply (rule ext) + apply (case_tac x) + by (auto simp: createObjects'_def split_def bind_assoc return_def unless_def + when_def simpler_gets_def alignError_def fail_def assert_def + simpler_modify_def bind_def + split: option.splits) + +(* FIXME: move *) +lemma ef_dmo': + "empty_fail f \ empty_fail (doMachineOp f)" + by (auto simp: empty_fail_def doMachineOp_def split_def select_f_def + simpler_modify_def simpler_gets_def return_def bind_def image_def) + +(* FIXME: move *) +lemma dmo'_when_fail_comm: + assumes "empty_fail f" + shows "doMachineOp f >>= (\x. when P fail >>= (\_. m x)) = + when P fail >>= (\_. doMachineOp f >>= m)" + apply (rule ext) + apply (cut_tac ef_dmo'[OF assms]) + apply (auto simp add: empty_fail_def when_def fail_def return_def + bind_def split_def image_def, fastforce) + done + +(* FIXME: move *) +lemma dmo'_gets_ksPSpace_comm: + "doMachineOp f >>= (\_. gets ksPSpace >>= m) = + gets ksPSpace >>= (\x. doMachineOp f >>= (\_. m x))" + apply (rule ext) + apply (clarsimp simp: doMachineOp_def simpler_modify_def simpler_gets_def + return_def select_f_def bind_def split_def image_def) + apply (rule conjI) + apply (rule set_eqI, clarsimp) + apply (rule iffI; clarsimp) + apply (metis eq_singleton_redux prod_injects(2)) + apply (intro exI conjI bexI[rotated], simp+)[1] + apply (rule iffI; clarsimp; intro exI conjI bexI[rotated], simp+)[1] + done + +lemma dmo'_ksPSpace_update_comm': + assumes "empty_fail f" + shows "doMachineOp f >>= (\x. modify (ksPSpace_update g) >>= (\_. m x)) = + modify (ksPSpace_update g) >>= (\_. doMachineOp f >>= m)" +proof - + have ksMachineState_ksPSpace_update: + "\s. ksMachineState (ksPSpace_update g s) = ksMachineState s" + by simp + have updates_independent: + "\f. ksPSpace_update g \ ksMachineState_update f = + ksMachineState_update f \ ksPSpace_update g" + by (rule ext) simp + from assms + show ?thesis + apply (simp add: doMachineOp_def split_def bind_assoc) + apply (simp add: gets_modify_comm2[OF ksMachineState_ksPSpace_update]) + apply (rule arg_cong_bind1) + apply (simp add: empty_fail_def select_f_walk[OF empty_fail_modify] + modify_modify_bind updates_independent) + done +qed + +lemma dmo'_createObjects'_comm: + assumes ef: "empty_fail f" + shows "do _ \ doMachineOp f; x \ createObjects' ptr n obj us; m x od = + do x \ createObjects' ptr n obj us; _ \ doMachineOp f; m x od" + apply (simp add: createObjects'_def bind_assoc split_def unless_def + alignError_def dmo'_when_fail_comm[OF ef] + dmo'_gets_ksPSpace_comm + dmo'_ksPSpace_update_comm'[OF ef, symmetric]) + apply (rule arg_cong_bind1) + apply (rule arg_cong_bind1) + apply (rename_tac u w) + apply (case_tac "fst (lookupAround2 (ptr + of_nat (shiftL n (objBitsKO obj + + us) - Suc 0)) w)", clarsimp+) + apply (simp add: assert_into_when dmo'_when_fail_comm[OF ef]) + done + +lemma dmo'_gsUserPages_upd_comm: + assumes "empty_fail f" + shows "doMachineOp f >>= (\x. modify (gsUserPages_update g) >>= (\_. m x)) = + modify (gsUserPages_update g) >>= (\_. doMachineOp f >>= m)" +proof - + have ksMachineState_ksPSpace_update: + "\s. ksMachineState (gsUserPages_update g s) = ksMachineState s" + by simp + have updates_independent: + "\f. gsUserPages_update g \ ksMachineState_update f = + ksMachineState_update f \ gsUserPages_update g" + by (rule ext) simp + from assms + show ?thesis + apply (simp add: doMachineOp_def split_def bind_assoc) + apply (simp add: gets_modify_comm2[OF ksMachineState_ksPSpace_update]) + apply (rule arg_cong_bind1) + apply (simp add: empty_fail_def select_f_walk[OF empty_fail_modify] + modify_modify_bind updates_independent) + done +qed + +lemma rewrite_step: + assumes rewrite: "\s. P s \ f s = f' s" + shows "P s \ ( f >>= g ) s = (f' >>= g ) s" + by (simp add:bind_def rewrite) + +lemma rewrite_through_step: + assumes rewrite: "\s r. P s \ f r s = f' r s" + assumes hoare: "\Q\ g \\r. P\" + shows "Q s \ + (do x \ g; + y \ f x; + h x y od) s = + (do x \ g; + y \ f' x; + h x y od) s" + apply (rule monad_eq_split[where Q = "\r. P"]) + apply (simp add:bind_def rewrite) + apply (rule hoare) + apply simp + done + +lemma threadSet_commute: + assumes preserve: "\P and tcb_at' ptr \ f \\r. tcb_at' ptr\" + assumes commute: "monad_commute P' f + ( modify (ksPSpace_update + (\ps. ps(ptr \ + case ps ptr of Some (KOTCB tcb) \ KOTCB (tcbDomain_update (\_. r) tcb)))))" + shows "monad_commute (tcb_at' ptr and P and P') f (threadSet (tcbDomain_update (\_. r)) ptr)" + apply (clarsimp simp add: monad_commute_def) + apply (subst rewrite_through_step[where h = "\x y. return (x,())",simplified bind_assoc]) + apply (erule threadSet_det) + apply (rule preserve) + apply simp + apply (subst rewrite_step[OF threadSet_det]) + apply assumption + apply simp + using commute + apply (simp add:monad_commute_def) + done + +lemma createObjects_setDomain_commute: + "monad_commute + (\s. range_cover ptr' (objBitsKO (KOTCB makeObject)) + (objBitsKO (KOTCB makeObject) + 0) (Suc 0) \ + pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr' (objBitsKO (KOTCB makeObject)) s \ + tcb_at' ptr s \ is_aligned ptr' (objBitsKO (KOTCB makeObject))) + (createObjects' ptr' (Suc 0) (KOTCB makeObject) 0) + (threadSet (tcbDomain_update (\_. r)) ptr)" + apply (rule monad_commute_guard_imp) + apply (rule threadSet_commute) + apply (wp createObjects_orig_obj_at'[where sz = "(objBitsKO (KOTCB makeObject))"]) + apply clarsimp + apply assumption + apply (simp add:placeNewObject_def2[where val = "makeObject::tcb",simplified,symmetric]) + apply (rule placeNewObject_modify_commute) + apply (clarsimp simp: objBits_simps' typ_at'_def word_bits_def + obj_at'_def ko_wp_at'_def projectKO_opt_tcb) + apply (clarsimp split:Structures_H.kernel_object.splits) + done + + +lemma createObjects_setDomains_commute: + "monad_commute + (\s. \x\ set xs. tcb_at' (f x) s \ + range_cover ptr (objBitsKO (KOTCB makeObject)) (objBitsKO (KOTCB makeObject)) (Suc 0) \ + pspace_aligned' s \ + pspace_distinct' s \ + pspace_no_overlap' ptr (objBitsKO (KOTCB makeObject)) s \ + is_aligned ptr (objBitsKO (KOTCB makeObject))) + (mapM_x (threadSet (tcbDomain_update (\_. r))) (map f xs)) + (createObjects' ptr (Suc 0) (KOTCB makeObject) 0)" +proof (induct xs) + case Nil + show ?case + apply (simp add:monad_commute_def mapM_x_Nil) + done +next + case (Cons x xs) + show ?case + apply (simp add:mapM_x_Cons) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule commute_commute[OF Cons.hyps]) + apply (rule createObjects_setDomain_commute) + apply (wp hoare_vcg_ball_lift) + apply clarsimp + done +qed + +lemma createObjects'_pspace_no_overlap2: + "\pspace_no_overlap' (ptr + (1 + of_nat n << gz)) sz + and K (gz = (objBitsKO val) + us) + and K (range_cover ptr sz gz (Suc (Suc n)) \ ptr \ 0)\ + createObjects' ptr (Suc n) val us + \\addrs s. pspace_no_overlap' (ptr + (1 + of_nat n << gz)) sz s\" +proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (rule hoare_gen_asm)+ + apply (clarsimp simp:createObjects'_def split_def new_cap_addrs_fold') + apply (subst new_cap_addrs_fold') + apply clarsimp + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) + apply simp+ + apply (simp add:word_le_sub1) + apply (wp haskell_assert_wp unless_wp |wpc + |simp add:alignError_def del:fun_upd_apply)+ + apply (rule conjI) + apply (rule impI) + apply (subgoal_tac + "pspace_no_overlap' (ptr + (1 + of_nat n << objBitsKO val + us)) + sz + (s\ksPSpace := foldr (\addr map. map(addr \ val)) + (new_cap_addrs (unat (1 + of_nat n << us)) ptr val) (ksPSpace s)\)") + apply (intro conjI impI allI) + apply assumption+ + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (subst (asm) foldr_upd_app_if) + apply (subst range_cover_tail_mask) + apply simp+ + apply (clarsimp split:if_splits) + apply (drule obj_range'_subset_strong[rotated]) + apply (rule range_cover_rel[OF range_cover_le[where n = "Suc n"]],assumption) + apply simp+ + apply (drule range_cover.unat_of_nat_n_shift + [OF range_cover_le[where n = "Suc n"],where gbits = us]) + apply simp+ + apply (simp add:shiftl_t2n field_simps)+ + apply (simp add:obj_range'_def) + apply (erule disjoint_subset) + apply (clarsimp simp:blah) + apply (thin_tac "x \ y" for x y) + apply (subst (asm) le_m1_iff_lt[THEN iffD1]) + apply (drule_tac range_cover_no_0[rotated,where p = "Suc n"]) + apply simp + apply simp + apply (simp add:field_simps) + apply (simp add: power_add[symmetric]) + apply (simp add: word_neq_0_conv) + apply (simp add: power_add[symmetric] field_simps) + apply (frule range_cover_subset[where p = "Suc n"]) + apply simp + apply simp + apply (drule(1) pspace_no_overlapD') + apply (subst (asm) range_cover_tail_mask) + apply simp+ + apply (simp add:word_le_sub1 shiftl_t2n field_simps mask_def) + apply auto + done +qed + +lemma new_cap_addrs_def2: + "n < 2^64 \ new_cap_addrs (Suc n) ptr obj = map (\n. ptr + (n << objBitsKO obj)) [0.e.of_nat n]" + by (simp add:new_cap_addrs_def upto_enum_word unat_of_nat Fun.comp_def) + +lemma createTCBs_tcb_at': + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz + (objBitsKO (KOTCB makeObject)) (Suc n) \ + createObjects' ptr (Suc n) (KOTCB makeObject) 0 + \\rv s. + (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)\" + apply (simp add:createObjects'_def split_def alignError_def) + apply (wp unless_wp |wpc)+ + apply (subst data_map_insert_def[symmetric])+ + apply clarsimp + apply (subgoal_tac "(\x\of_nat n. + tcb_at' (ptr + x * 2^tcbBlockSizeBits) (s\ksPSpace := + foldr (\addr. data_map_insert addr (KOTCB makeObject)) + (new_cap_addrs (Suc n) ptr (KOTCB makeObject)) + (ksPSpace s)\))") + apply (subst (asm) new_cap_addrs_def2) + apply (drule range_cover.weak) + apply simp + apply simp + apply (clarsimp simp: retype_obj_at_disj') + apply (clarsimp simp: new_cap_addrs_def image_def) + apply (drule_tac x = "unat x" in bspec) + apply (simp add:objBits_simps' shiftl_t2n) + apply (rule unat_less_helper) + apply (rule ccontr) + apply simp + apply (simp add: objBits_simps shiftl_t2n) + done + +lemma createNewCaps_Cons: + assumes cover:"range_cover ptr sz (Types_H.getObjectSize ty us) (Suc (Suc n))" + and "valid_pspace' s" "valid_arch_state' s" + and "pspace_no_overlap' ptr sz s" + and "ptr \ 0" + shows "createNewCaps ty ptr (Suc (Suc n)) us d s + = (do x \ createNewCaps ty ptr (Suc n) us d; + r \ RetypeDecls_H.createObject ty + (((1 + of_nat n) << Types_H.getObjectSize ty us) + ptr) us d; + return (x @ [r]) + od) s" +proof - + have append :"[0.e.(1::machine_word) + of_nat n] = [0.e.of_nat n] @ [1 + of_nat n]" + using cover + apply - + apply (frule range_cover_not_zero[rotated]) + apply simp + apply (frule range_cover.unat_of_nat_n) + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (frule range_cover_not_zero[rotated]) + apply simp + apply (frule range_cover.unat_of_nat_n) + apply (subst upto_enum_red'[where X = "2 + of_nat n",simplified]) + apply (simp add:field_simps word_le_sub1) + apply clarsimp + apply (subst upto_enum_red'[where X = "1 + of_nat n",simplified]) + apply (simp add:field_simps word_le_sub1) + apply simp + done + + have conj_impI: + "\A B C. \C;C\B\ \ B \ C" + by simp + + have suc_of_nat: "(1::machine_word) + of_nat n = of_nat (1 + n)" + by simp + + have gsUserPages_update[simp]: + "\f. (\ks. ks \gsUserPages := f (gsUserPages ks)\) = gsUserPages_update f" + by (rule ext) simp + have gsCNodes_update[simp]: + "\f. (\ks. ks \gsCNodes := f (gsCNodes ks)\) = gsCNodes_update f" + by (rule ext) simp + have ksArchState_update[simp]: + "\f. (\ks. ks \ksArchState := f (ksArchState ks)\) = ksArchState_update f" + by (rule ext) simp + + have if_eq[simp]: + "!!x a b pgsz. (if a = ptr + (1 + of_nat n << b) then Some pgsz + else if a \ (\n. ptr + (n << b)) ` {x. x \ of_nat n} + then Just pgsz else x a) = + (if a \ (\n. ptr + (n << b)) ` {x. x \ 1 + of_nat n} + then Just pgsz else x a)" + apply (simp only: Just_def if3_fold2) + apply (rule_tac x="x a" in fun_cong) + apply (rule arg_cong2[where f=If, OF _ refl]) + apply (subgoal_tac "{x. x \ (1::machine_word) + of_nat n} = + {1 + of_nat n} \ {x. x \ of_nat n}") + apply (simp add: add.commute) + apply safe + apply (clarsimp simp: word_le_less_eq[of _ "1 + of_nat n"]) + apply (metis plus_one_helper add.commute) + using cover + apply - + apply (drule range_cover_le[where n = "Suc n"], simp) + apply (simp only: suc_of_nat word_le_nat_alt Suc_eq_plus1) + apply (frule range_cover.unat_of_nat_n) + apply simp + apply (drule range_cover_le[where n=n], simp) + apply (frule range_cover.unat_of_nat_n, simp) + done + + show ?thesis + using assms + apply (clarsimp simp:valid_pspace'_def) + apply (frule range_cover.aligned) + apply (frule(3) pspace_no_overlap'_tail) + apply simp + apply (drule_tac ptr = "ptr + x" for x + in pspace_no_overlap'_le[where sz' = "Types_H.getObjectSize ty us"]) + apply (simp add:range_cover_def word_bits_def) + apply (erule range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]) + apply (simp add: createNewCaps_def) + apply (case_tac ty) + apply (simp add: AARCH64_H.toAPIType_def Arch_createNewCaps_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add: bind_assoc AARCH64_H.toAPIType_def) + \ \Untyped\ + apply (simp add: bind_assoc AARCH64_H.getObjectSize_def + mapM_def sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat mapM_x_def sequence_x_def append + fromIntegral_def integral_inv[unfolded Fun.comp_def]) + \ \TCB, EP, NTFN\ + apply (simp add: bind_assoc + AARCH64_H.getObjectSize_def + sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat append mapM_x_append2 + fromIntegral_def integral_inv[unfolded Fun.comp_def])+ + apply (subst monad_eq) + apply (rule createObjects_Cons) + apply (simp add: field_simps shiftl_t2n bind_assoc pageBits_def + objBits_simps placeNewObject_def2)+ + apply (rule_tac Q = "\r s. pspace_aligned' s \ + pspace_distinct' s \ + pspace_no_overlap' (ptr + (2^tcbBlockSizeBits + of_nat n * 2^tcbBlockSizeBits)) (objBitsKO (KOTCB makeObject)) s \ + range_cover (ptr + 2^tcbBlockSizeBits) sz + (objBitsKO (KOTCB makeObject)) (Suc n) + \ (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)" + in monad_eq_split2) + apply simp + apply (subst monad_commute_simple[symmetric]) + apply (rule commute_commute[OF curDomain_commute]) + apply wpsimp+ + apply (rule_tac Q = "\r s. r = (ksCurDomain s) \ + pspace_aligned' s \ + pspace_distinct' s \ + pspace_no_overlap' (ptr + (2^tcbBlockSizeBits + of_nat n * 2^tcbBlockSizeBits)) (objBitsKO (KOTCB makeObject)) s \ + range_cover (ptr + 2^tcbBlockSizeBits) sz + (objBitsKO (KOTCB makeObject)) (Suc n) + \ (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s) + " in monad_eq_split) + apply (subst monad_commute_simple[symmetric]) + apply (rule createObjects_setDomains_commute) + apply (clarsimp simp:objBits_simps) + apply (rule conj_impI) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n = tcbBlockSizeBits]) + apply (simp add:is_aligned_def objBits_defs) + apply (cut_tac is_aligned_shift[where m = tcbBlockSizeBits and k = "of_nat n", + unfolded shiftl_t2n,simplified]) + apply (simp add:field_simps)+ + apply (erule range_cover_full) + apply (simp add: word_bits_conv objBits_defs) + apply (rule_tac Q = "\x s. (ksCurDomain s) = r" in monad_eq_split2) + apply simp + apply (rule_tac Q = "\x s. (ksCurDomain s) = r" in monad_eq_split) + apply (subst rewrite_step[where f = curDomain and + P ="\s. ksCurDomain s = r" and f' = "return r"]) + apply (simp add:curDomain_def bind_def gets_def get_def) + apply simp + apply (simp add:mapM_x_singleton) + apply wp + apply simp + apply (wp mapM_x_wp') + apply simp + apply (simp add:curDomain_def,wp) + apply simp + apply (wp createObjects'_psp_aligned[where sz = sz] + createObjects'_psp_distinct[where sz = sz]) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_post_imp[OF _ + createObjects'_pspace_no_overlap[unfolded shiftl_t2n, + where gz = tcbBlockSizeBits and sz = sz, simplified]]) + apply (simp add:objBits_simps field_simps) + apply (simp add: objBits_simps) + apply (wp createTCBs_tcb_at'[where sz = sz]) + apply (clarsimp simp:objBits_simps word_bits_def field_simps) + apply (frule range_cover_le[where n = "Suc n"],simp+) + apply (drule range_cover_offset[where p = 1,rotated]) + apply simp + apply (simp add: objBits_defs) + apply (((simp add: bind_assoc + AARCH64_H.getObjectSize_def + mapM_def sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat mapM_x_def sequence_x_def append + fromIntegral_def integral_inv[unfolded Fun.comp_def])+ + , subst monad_eq, rule createObjects_Cons + , (simp add: field_simps shiftl_t2n bind_assoc pageBits_def + objBits_simps placeNewObject_def2)+)+)[2] + + apply (in_case "CapTableObject") + apply (simp add: bind_assoc + AARCH64_H.getObjectSize_def + mapM_def sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat mapM_x_def sequence_x_def append + fromIntegral_def integral_inv[unfolded Fun.comp_def])+ + apply (subst monad_eq, rule createObjects_Cons) + apply (simp add: field_simps shiftl_t2n bind_assoc pageBits_def + objBits_simps placeNewObject_def2)+ + apply (subst gsCNodes_update gsCNodes_upd_createObjects'_comm)+ + apply (simp add: modify_modify_bind) + apply (rule fun_cong[where x=s]) + apply (rule arg_cong_bind1)+ + apply (rule arg_cong_bind[OF _ refl]) + apply (rule arg_cong[where f=modify, OF ext], simp) + apply (rule arg_cong2[where f=gsCNodes_update, OF ext refl]) + apply (rule ext) + apply simp + + apply (in_case "HugePageObject") + apply (simp add: Arch_createNewCaps_def + Retype_H.createObject_def createObjects_def bind_assoc + AARCH64_H.toAPIType_def + AARCH64_H.createObject_def placeNewDataObject_def) + apply (intro conjI impI) + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + getObjectSize_def bit_simps + add.commute append) + apply ((subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+)[1] + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + getObjectSize_def + pageBits_def add.commute append) + apply (subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+ + + apply (in_case "VSpaceObject") + apply (simp add: Arch_createNewCaps_def Retype_H.createObject_def createObjects_def + bind_assoc AARCH64_H.toAPIType_def AARCH64_H.createObject_def) + apply (subst monad_eq, rule createObjects_Cons) + apply ((simp add: field_simps shiftl_t2n + getObjectSize_def bit_simps objBits_simps ptBits_def)+)[6] + apply (simp add: bind_assoc placeNewObject_def2) + apply (simp add: field_simps updatePTType_def bind_assoc gets_modify_def + getObjectSize_def placeNewObject_def2 objBits_simps append) + apply (subst ksArchState_update ksArchState_upd_createObjects'_comm + | simp add: modify_modify_bind o_def + | simp only: o_def cong: if_cong)+ + apply (rule bind_apply_cong, simp) + apply (rule bind_apply_cong, simp) + apply (rule monad_eq_split_tail, simp) + apply (rule fun_cong, rule arg_cong[where f=modify]) + apply (simp flip: if_eq) + apply (simp cong: if_cong del: if_eq) + apply (rule ext) + apply (rename_tac s', case_tac s') + apply (rename_tac ksArch ksMachine, case_tac ksArch) + apply fastforce + + apply (in_case "SmallPageObject") + apply (simp add: Arch_createNewCaps_def + Retype_H.createObject_def createObjects_def bind_assoc + toAPIType_def + AARCH64_H.createObject_def placeNewDataObject_def) + apply (intro conjI impI) + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n bit_simps + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps bit_simps + getObjectSize_def add.commute append) + apply ((subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+)[1] + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + AARCH64_H.getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + AARCH64_H.getObjectSize_def + pageBits_def add.commute append) + apply (subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+ + + apply (in_case "LargePageObject") + apply (simp add: Arch_createNewCaps_def + Retype_H.createObject_def createObjects_def bind_assoc + toAPIType_def AARCH64_H.createObject_def placeNewDataObject_def) + apply (intro conjI impI) + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps bit_simps + getObjectSize_def add.commute append) + apply ((subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+)[1] + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + AARCH64_H.getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + getObjectSize_def bit_simps add.commute append) + apply (subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+ + + apply (in_case "PageTableObject") + apply (simp add: Arch_createNewCaps_def Retype_H.createObject_def createObjects_def bind_assoc + AARCH64_H.toAPIType_def AARCH64_H.createObject_def) + apply (subst monad_eq, rule createObjects_Cons) + apply ((simp add: field_simps shiftl_t2n + getObjectSize_def bit_simps objBits_simps ptBits_def)+)[6] + apply (simp add: bind_assoc placeNewObject_def2) + apply (simp add: field_simps updatePTType_def bind_assoc gets_modify_def + getObjectSize_def placeNewObject_def2 objBits_simps append) + apply (subst ksArchState_update ksArchState_upd_createObjects'_comm + | simp add: modify_modify_bind o_def + | simp only: o_def cong: if_cong)+ + apply (rule bind_apply_cong, simp) + apply (rule bind_apply_cong, simp) + apply (rule monad_eq_split_tail, simp) + apply (rule fun_cong, rule arg_cong[where f=modify]) + apply (simp flip: if_eq) + apply (simp cong: if_cong del: if_eq) + apply (rule ext) + apply (rename_tac s', case_tac s') + apply (rename_tac ksArch ksMachine, case_tac ksArch) + apply fastforce + apply (in_case "VCPUObject") + apply (simp add: Arch_createNewCaps_def Retype_H.createObject_def + createObjects_def bind_assoc AARCH64_H.toAPIType_def + AARCH64_H.createObject_def) + apply (subst monad_eq, rule createObjects_Cons) + apply ((simp add: field_simps shiftl_t2n getObjectSize_def + bit_simps objBits_simps ptBits_def)+)[6] + apply (simp add: bind_assoc placeNewObject_def2) + apply (simp add: add_ac bit_simps getObjectSize_def objBits_simps append) + done +qed + +lemma createObject_def2: + "(RetypeDecls_H.createObject ty ptr us dev >>= (\x. return [x])) = + createNewCaps ty ptr (Suc 0) us dev" + apply (clarsimp simp:createObject_def createNewCaps_def placeNewObject_def2) + apply (case_tac ty; simp add: toAPIType_def) + defer + apply ((clarsimp simp: Arch_createNewCaps_def createObjects_def shiftL_nat + AARCH64_H.createObject_def placeNewDataObject_def + placeNewObject_def2 objBits_simps bind_assoc + clearMemory_def clearMemoryVM_def fun_upd_def[symmetric] + word_size mapM_x_singleton storeWordVM_def + updatePTType_def gets_modify_def)+)[6] + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (clarsimp simp: Arch_createNewCaps_def createObjects_def shiftL_nat + AARCH64_H.createObject_def placeNewObject_def2 objBits_simps bind_assoc + clearMemory_def clearMemoryVM_def word_size mapM_x_singleton + storeWordVM_def)+ + done + + +lemma createNewObjects_def2: + "\dslots \ []; length ( dslots ) < 2^word_bits; + cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s; + \slot \ set dslots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s; + pspace_no_overlap' ptr sz s; + caps_no_overlap'' ptr sz s; + caps_overlap_reserved' + {ptr..ptr + of_nat (length dslots) * 2 ^ Types_H.getObjectSize ty us - 1} s; + valid_pspace' s; + distinct dslots; + valid_arch_state' s; + range_cover ptr sz (Types_H.getObjectSize ty us) (length dslots); + ptr \ 0; sz \ maxUntypedSizeBits; canonical_address (ptr && ~~ mask sz); + ksCurDomain s \ maxDomain\ + \ createNewObjects ty parent dslots ptr us d s = + insertNewCaps ty parent dslots ptr us d s" + apply (clarsimp simp:insertNewCaps_def createNewObjects_def neq_Nil_conv) + proof - + fix y ys + have list_inc: "\n. [0.e.Suc n] = [0 .e. n] @ [n+1]" + by simp + assume le: "Suc (length (ys::machine_word list)) < 2 ^ word_bits" + assume list_nc: "\slot \ set ys. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s" + assume dist: "distinct ys" + assume extra: "y\ set ys" "cte_wp_at' (\c. cteCap c = capability.NullCap) y s" + assume not_0: "ptr \ 0" + assume sz_limit: "sz \ maxUntypedSizeBits" + assume ptr_cn: "canonical_address (ptr && ~~ mask sz)" + assume kscd: "ksCurDomain s \ maxDomain" + assume valid_psp: "valid_pspace' s" + assume valid_arch_state: "valid_arch_state' s" + assume psp_no_overlap: "pspace_no_overlap' ptr sz s" + assume caps_no_overlap: "caps_no_overlap'' ptr sz s" + assume caps_reserved: "caps_overlap_reserved' + {ptr..ptr + (1 + of_nat (length ys)) * 2 ^ (Types_H.getObjectSize ty us) - 1} s" + assume range_cover: "range_cover ptr sz (Types_H.getObjectSize ty us) (Suc (length ys))" + assume unt_at: "cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s" + show "zipWithM_x + (\num slot. + RetypeDecls_H.createObject ty ((num << Types_H.getObjectSize ty us) + ptr) us d >>= + insertNewCap parent slot) + [0.e.of_nat (length ys)] (y # ys) s = + (createNewCaps ty ptr (Suc (length ys)) us d >>= zipWithM_x (insertNewCap parent) (y # ys)) s" + using le list_nc dist extra range_cover not_0 sz_limit ptr_cn caps_reserved + proof (induct ys arbitrary: y rule:rev_induct) + case Nil + show ?case + by (clarsimp simp:zipWithM_x_def zipWith_def + sequence_x_def createObject_def2[symmetric]) + next + case (snoc a as b) + have caps_r:"caps_overlap_reserved' + {ptr..ptr + (1 + of_nat (length as)) * 2 ^ Types_H.getObjectSize ty us - 1} s" + using snoc.prems + apply - + apply (erule caps_overlap_reserved'_subseteq) + apply (cut_tac is_aligned_no_overflow + [where ptr = "ptr + ((1 + of_nat (length as)) << APIType_capBits ty us)" + and sz = " Types_H.getObjectSize ty us"]) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps objSize_eq_capBits ) + apply (rule order_trans[OF word_sub_1_le]) + apply (drule(1) range_cover_no_0[where p = "Suc (length as)"]) + apply simp + apply (simp add:word_arith_nat_Suc power_add[symmetric] field_simps) + apply (simp add:shiftl_t2n) + apply (rule aligned_add_aligned[OF range_cover.aligned]) + apply (simp add:objSize_eq_capBits)+ + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def objSize_eq_capBits)+ + done + show ?case + apply simp + using snoc.prems + apply (subst upto_enum_inc_1_len) + apply (rule word_of_nat_less) + apply (simp add:word_bits_def minus_one_norm) + apply (subst append_Cons[symmetric]) + apply (subst zipWithM_x_append1) + apply (clarsimp simp:unat_of_nat64 bind_assoc) + apply (subst monad_eq) + apply (rule snoc.hyps) + apply (simp add:caps_r | rule range_cover_le)+ + apply (simp add:snoc.hyps bind_assoc) + apply (rule sym) + apply (subst monad_eq) + apply (erule createNewCaps_Cons[OF _ valid_psp valid_arch_state psp_no_overlap not_0]) + apply (rule sym) + apply (simp add:bind_assoc del:upto_enum_nat) + apply (rule_tac Q = "(\r s. (\cap\set r. cap \ capability.NullCap) \ + cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s \ + cte_wp_at' (\c. cteCap c = capability.NullCap) b s \ + (\slot\set as. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) \ + pspace_no_overlap' (ptr + (1 + of_nat (length as) << Types_H.getObjectSize ty us)) + (Types_H.getObjectSize ty us) s + \ valid_pspace' s \ valid_arch_state' s \ Q r s)" for Q in monad_eq_split) + apply (subst append_Cons[symmetric]) + apply (subst zipWithM_x_append1) + apply clarsimp + apply assumption + apply (clarsimp simp:field_simps) + apply (subst monad_commute_simple[OF commute_commute]) + apply (rule new_cap_object_commute) + apply (clarsimp) + apply (frule_tac p = "1 + length as" in range_cover_no_0[rotated]) + apply clarsimp + apply simp + apply (subst (asm) Abs_fnat_hom_add[symmetric]) + apply (intro conjI) + apply (simp add:range_cover_def word_bits_def) + apply (rule aligned_add_aligned[OF range_cover.aligned],simp) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply (simp add:range_cover_def) + apply (clarsimp simp:field_simps shiftl_t2n) + apply (clarsimp simp:createNewCaps_def) + apply (wp createNewCaps_not_nc createNewCaps_pspace_no_overlap'[where sz = sz] + createNewCaps_cte_wp_at'[where sz = sz] hoare_vcg_ball_lift + createNewCaps_valid_pspace[where sz = sz] + createNewCaps_obj_at'[where sz=sz]) + apply simp + apply (rule range_cover_le) + apply (simp add:objSize_eq_capBits caps_r)+ + apply (wp createNewCaps_ret_len createNewCaps_valid_arch_state) + apply (frule range_cover_le[where n = "Suc (length as)"]) + apply simp+ + using psp_no_overlap caps_r valid_psp unt_at caps_no_overlap valid_arch_state + apply (clarsimp simp: valid_pspace'_def objSize_eq_capBits) + apply (auto simp: kscd) + done + qed +qed + +lemma createNewObjects_corres_helper: +assumes check: "distinct dslots" + and cover: "range_cover ptr sz (Types_H.getObjectSize ty us) (length dslots)" + and not_0: "ptr \ 0" "length dslots \ 0" + and sz_limit: "sz \ maxUntypedSizeBits" + and ptr_cn: "canonical_address (ptr && ~~ mask sz)" + and c: "corres r P P' f (insertNewCaps ty parent dslots ptr us d)" + and imp: "\s. P' s \ (cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s + \ (\slot \ set dslots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) + \ pspace_no_overlap' ptr sz s + \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr..ptr + of_nat (length dslots) * + 2^ (Types_H.getObjectSize ty us) - 1} s + \ valid_pspace' s \ valid_arch_state' s \ ksCurDomain s \ maxDomain)" + shows "corres r P P' f (createNewObjects ty parent dslots ptr us d)" + using check cover not_0 sz_limit ptr_cn + apply (clarsimp simp:corres_underlying_def) + apply (frule imp) + apply (frule range_cover.range_cover_le_n_less(1)[where 'a=machine_word_len, folded word_bits_def, OF _ le_refl]) + apply clarsimp + apply (simp add:createNewObjects_def2) + using c + apply (clarsimp simp:corres_underlying_def) + apply (drule(1) bspec) + apply clarsimp + done + +lemma createNewObjects_wp_helper: + assumes check: "distinct dslots" + and cover: "range_cover ptr sz (Types_H.getObjectSize ty us) (length dslots)" + and not_0: "ptr \ 0" "length dslots \ 0" + and ptr_cn: "canonical_address (ptr && ~~ mask sz)" + and sz_limit: "sz \ maxUntypedSizeBits" + shows "\P\ insertNewCaps ty parent dslots ptr us d \Q\ + \ \P and (cte_wp_at' (\c. isUntypedCap (cteCap c)) parent + and (\s. \slot \ set dslots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) + and pspace_no_overlap' ptr sz + and caps_no_overlap'' ptr sz + and valid_pspace' + and valid_arch_state' + and caps_overlap_reserved' + {ptr..ptr + of_nat (length dslots) * 2^ (Types_H.getObjectSize ty us) - 1} and (\s. ksCurDomain s \ maxDomain)) + \ (createNewObjects ty parent dslots ptr us d) \Q\" + using assms + apply (clarsimp simp:valid_def) + apply (drule_tac x = s in spec) + apply (frule range_cover.range_cover_le_n_less(1)[where 'a=machine_word_len, folded word_bits_def, OF _ le_refl]) + apply (simp add:createNewObjects_def2[symmetric]) + apply (drule(1) bspec) + apply clarsimp + done + +lemma createObject_def3: + "createObject = + (\ty ptr us d. createNewCaps ty ptr (Suc 0) us d >>= (\m. return (hd m)))" + apply (rule ext)+ + apply (simp add:createObject_def2[symmetric]) + done + +crunches updatePTType + for pspace_no_overlap'[wp]: "pspace_no_overlap' p n" + +lemma ArchCreateObject_pspace_no_overlap': + "\\s. pspace_no_overlap' + (ptr + (of_nat n << APIType_capBits ty userSize)) sz s \ + pspace_aligned' s \ pspace_distinct' s \ + range_cover ptr sz (APIType_capBits ty userSize) (n + 2) \ ptr \ 0\ + AARCH64_H.createObject ty + (ptr + (of_nat n << APIType_capBits ty userSize)) userSize d + \\archCap. pspace_no_overlap' + (ptr + (1 + of_nat n << APIType_capBits ty userSize)) sz\" + apply (rule hoare_pre) + apply (clarsimp simp:AARCH64_H.createObject_def) + apply wpc + apply (wp doMachineOp_psp_no_overlap + createObjects'_pspace_no_overlap2 hoare_when_weak_wp + createObjects'_psp_aligned[where sz = sz] + createObjects'_psp_distinct[where sz = sz] + | simp add: placeNewObject_def2 word_shiftl_add_distrib + | simp add: placeNewObject_def2 word_shiftl_add_distrib + | simp add: placeNewDataObject_def placeNewObject_def2 word_shiftl_add_distrib + field_simps split del: if_split + | clarsimp simp add: add.assoc[symmetric],wp createObjects'_pspace_no_overlap2[where n =0 and sz = sz,simplified] + | clarsimp simp add: APIType_capBits_def objBits_simps pageBits_def)+ + apply (clarsimp simp: conj_comms) + apply (frule(1) range_cover_no_0[where p = n]) + apply simp + apply (subgoal_tac "is_aligned (ptr + (of_nat n << APIType_capBits ty userSize)) + (APIType_capBits ty userSize) ") + prefer 2 + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (simp add:is_aligned_shiftl_self range_cover_sz') + apply (simp add: APIType_capBits_def) + apply (frule range_cover_offset[rotated,where p = n]) + apply simp+ + apply (frule range_cover_le[where n = "Suc (Suc 0)"]) + apply simp + apply (frule pspace_no_overlap'_le2) + apply (rule range_cover_compare_offset) + apply simp+ + apply (clarsimp simp:word_shiftl_add_distrib + ,simp add:field_simps) + apply (clarsimp simp:add.assoc[symmetric]) + apply (rule range_cover_tail_mask[where n =0,simplified]) + apply (drule range_cover_offset[rotated,where p = n]) + apply simp + apply (clarsimp simp:shiftl_t2n field_simps) + apply (metis numeral_2_eq_2) + apply (simp add:shiftl_t2n field_simps) + apply (intro conjI allI) + apply (clarsimp simp: field_simps word_bits_conv + APIType_capBits_def shiftl_t2n objBits_simps bit_simps + | rule conjI | erule range_cover_le,simp)+ + done + +lemma to_from_apiTypeD: "toAPIType ty = Some x \ ty = fromAPIType x" + by (cases ty) (auto simp add: fromAPIType_def + toAPIType_def) + +lemma createObject_pspace_no_overlap': + "\\s. pspace_no_overlap' + (ptr + (of_nat n << APIType_capBits ty userSize)) sz s \ + pspace_aligned' s \ pspace_distinct' s + \ range_cover ptr sz (APIType_capBits ty userSize) (n + 2) + \ ptr \ 0\ + createObject ty (ptr + (of_nat n << APIType_capBits ty userSize)) userSize d + \\rv s. pspace_no_overlap' + (ptr + (1 + of_nat n << APIType_capBits ty userSize)) sz s\" + apply (rule hoare_pre) + apply (clarsimp simp:createObject_def) + apply wpc + apply (wp ArchCreateObject_pspace_no_overlap') + apply wpc + apply wp + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 curDomain_def word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply clarsimp + apply (frule(1) range_cover_no_0[where p = n]) + apply simp + apply (frule pspace_no_overlap'_le2) + apply (rule range_cover_compare_offset) + apply simp+ + apply (clarsimp simp:word_shiftl_add_distrib + ,simp add:field_simps) + apply (clarsimp simp:add.assoc[symmetric]) + apply (rule range_cover_tail_mask[where n =0,simplified]) + apply (drule range_cover_offset[rotated,where p = n]) + apply simp + apply (clarsimp simp:shiftl_t2n field_simps) + apply (metis numeral_2_eq_2) + apply (simp add:shiftl_t2n field_simps) + apply (frule range_cover_offset[rotated,where p = n]) + apply simp+ + apply (auto simp: word_shiftl_add_distrib field_simps shiftl_t2n elim: range_cover_le, + auto simp add: APIType_capBits_def fromAPIType_def objBits_def + dest!: to_from_apiTypeD) + done + +crunches updatePTType + for aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + +lemma createObject_pspace_aligned_distinct': + "\pspace_aligned' and K (is_aligned ptr (APIType_capBits ty us)) + and pspace_distinct' and pspace_no_overlap' ptr (APIType_capBits ty us) + and K (ty = APIObjectType apiobject_type.CapTableObject \ us < 28)\ + createObject ty ptr us d + \\xa s. pspace_aligned' s \ pspace_distinct' s\" + apply (rule hoare_pre) + apply (wp placeNewObject_pspace_aligned' unless_wp + placeNewObject_pspace_distinct' + | simp add: AARCH64_H.createObject_def Retype_H.createObject_def objBits_simps + curDomain_def placeNewDataObject_def + split del: if_split + | wpc | intro conjI impI)+ + apply (auto simp: APIType_capBits_def objBits_simps' bit_simps word_bits_def + AARCH64_H.toAPIType_def + split: AARCH64_H.object_type.splits apiobject_type.splits) + done + +declare objSize_eq_capBits [simp] + +lemma createNewObjects_Cons: + assumes dlength: "length dest < 2 ^ word_bits" + shows "createNewObjects ty src (dest @ [lt]) ptr us d = + do createNewObjects ty src dest ptr us d; + (RetypeDecls_H.createObject ty ((of_nat (length dest) << APIType_capBits ty us) + ptr) us d + >>= insertNewCap src lt) + od" + proof - + from dlength + have expand:"dest\[] \ [(0::machine_word) .e. of_nat (length dest)] + = [0.e.of_nat (length dest - 1)] @ [of_nat (length dest)]" + apply (cases dest) + apply clarsimp+ + apply (rule upto_enum_inc_1_len) + apply (rule word_of_nat_less) + apply (simp add: word_bits_conv minus_one_norm) + done + + have length:"\length dest < 2 ^ word_bits;dest \ []\ + \ length [(0::machine_word) .e. of_nat (length dest - 1)] = length dest" + proof (induct dest) + case Nil thus ?case by simp + next + case (Cons x xs) + thus ?case by (simp add:unat_of_nat64) + qed + + show ?thesis + using dlength + apply (case_tac "dest = []") + apply (simp add: zipWithM_x_def createNewObjects_def + sequence_x_def zipWith_def) + apply (clarsimp simp:createNewObjects_def) + apply (subst expand) + apply simp + apply (subst zipWithM_x_append1) + apply (rule length) + apply (simp add:field_simps)+ + done +qed + +lemma updateNewFreeIndex_cteCaps_of[wp]: + "\\s. P (cteCaps_of s)\ updateNewFreeIndex slot \\rv s. P (cteCaps_of s)\" + by (simp add: cteCaps_of_def, wp) + +lemma insertNewCap_wps[wp]: + "\pspace_aligned'\ insertNewCap parent slot cap \\rv. pspace_aligned'\" + "\pspace_distinct'\ insertNewCap parent slot cap \\rv. pspace_distinct'\" + "\\s. P ((cteCaps_of s)(slot \ cap))\ + insertNewCap parent slot cap + \\rv s. P (cteCaps_of s)\" + apply (simp_all add: insertNewCap_def) + apply (wp hoare_drop_imps + | simp add: o_def)+ + apply (fastforce elim!: rsubst[where P=P]) + done + +crunch typ_at'[wp]: insertNewCap "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +end +end diff --git a/proof/refine/AARCH64/EmptyFail.thy b/proof/refine/AARCH64/EmptyFail.thy new file mode 100644 index 0000000000..3744f2b4a7 --- /dev/null +++ b/proof/refine/AARCH64/EmptyFail.thy @@ -0,0 +1,137 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory EmptyFail +imports Bits_R +begin + +(* Collect empty_fail lemmas here. naming convention is emtpy_fail_NAME. + Unless there is a good reason, they should all be [intro!, wp, simp] *) + +lemma empty_fail_projectKO [simp, intro!]: + "empty_fail (projectKO v)" + unfolding empty_fail_def projectKO_def + by (simp add: return_def fail_def split: option.splits) + +lemma empty_fail_alignCheck [intro!, wp, simp]: + "empty_fail (alignCheck a b)" + unfolding alignCheck_def + by (fastforce simp: alignError_def) + +lemma empty_fail_magnitudeCheck [intro!, wp, simp]: + "empty_fail (magnitudeCheck a b c)" + unfolding magnitudeCheck_def + by (fastforce split: option.splits) + +lemma empty_fail_loadObject_default [intro!, wp, simp]: + shows "empty_fail (loadObject_default x b c d)" + by (auto simp: loadObject_default_def + split: option.splits) + +lemma empty_fail_threadGet [intro!, wp, simp]: + "empty_fail (threadGet f p)" + by (fastforce simp: threadGet_def getObject_def split_def) + +lemma empty_fail_getCTE [intro!, wp, simp]: + "empty_fail (getCTE slot)" + apply (simp add: getCTE_def getObject_def split_def) + apply (intro empty_fail_bind, simp_all) + apply (simp add: loadObject_cte typeError_def alignCheck_def alignError_def + magnitudeCheck_def + split: Structures_H.kernel_object.split) + apply (auto split: option.split) + done + +lemma empty_fail_updateObject_cte [intro!, wp, simp]: + "empty_fail (updateObject (v :: cte) ko a b c)" + by (fastforce simp: updateObject_cte typeError_def unless_def split: kernel_object.splits ) + +lemma empty_fail_setCTE [intro!, wp, simp]: + "empty_fail (setCTE p cte)" + unfolding setCTE_def + by (fastforce simp: setObject_def split_def) + +lemma empty_fail_updateCap [intro!, wp, simp]: + "empty_fail (updateCap p f)" + unfolding updateCap_def by auto + +lemma empty_fail_updateMDB [intro!, wp, simp]: + "empty_fail (updateMDB a b)" + unfolding updateMDB_def Let_def by auto + +lemma empty_fail_getSlotCap [intro!, wp, simp]: + "empty_fail (getSlotCap a)" + unfolding getSlotCap_def by fastforce + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma empty_fail_getObject: + assumes "\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel)" + shows "empty_fail (getObject x :: 'a :: pspace_storable kernel)" + apply (simp add: getObject_def split_def) + apply (safe intro!: assms) + done + +lemma empty_fail_updateTrackedFreeIndex [intro!, wp, simp]: + shows "empty_fail (updateTrackedFreeIndex p idx)" + by (fastforce simp add: updateTrackedFreeIndex_def) + +lemma empty_fail_updateNewFreeIndex [intro!, wp, simp]: + shows "empty_fail (updateNewFreeIndex p)" + apply (simp add: updateNewFreeIndex_def) + apply safe + apply (simp split: capability.split) + done + +lemma empty_fail_insertNewCap [intro!, wp, simp]: + "empty_fail (insertNewCap p p' cap)" + unfolding insertNewCap_def by fastforce + +lemma empty_fail_getIRQSlot [intro!, wp, simp]: + "empty_fail (getIRQSlot irq)" + by (fastforce simp: getIRQSlot_def getInterruptState_def locateSlot_conv) + +lemma empty_fail_getObject_ntfn [intro!, wp, simp]: + "empty_fail (getObject p :: Structures_H.notification kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getNotification [intro!, wp, simp]: + "empty_fail (getNotification ep)" + by (simp add: getNotification_def) + +lemma empty_fail_lookupIPCBuffer [intro!, wp, simp]: + "empty_fail (lookupIPCBuffer a b)" + by (clarsimp simp: lookupIPCBuffer_def Let_def getThreadBufferSlot_def locateSlot_conv + split: capability.splits arch_capability.splits | wp | wpc | safe)+ + +lemma empty_fail_updateObject_default [intro!, wp, simp]: + "empty_fail (updateObject_default v ko a b c)" + by (fastforce simp: updateObject_default_def typeError_def unless_def split: kernel_object.splits ) + +lemma empty_fail_threadSet [intro!, wp, simp]: + "empty_fail (threadSet f p)" + by (fastforce simp: threadSet_def getObject_def setObject_def split_def) + +lemma empty_fail_getThreadState[iff]: + "empty_fail (getThreadState t)" + by (simp add: getThreadState_def) + +declare empty_fail_stateAssert [wp] + +lemma empty_fail_getSchedulerAction [intro!, wp, simp]: + "empty_fail getSchedulerAction" + by (simp add: getSchedulerAction_def getObject_def split_def) + +lemma empty_fail_scheduleSwitchThreadFastfail [intro!, wp, simp]: + "empty_fail (scheduleSwitchThreadFastfail a b c d)" + by (simp add: scheduleSwitchThreadFastfail_def split: if_splits) + +lemma empty_fail_curDomain [intro!, wp, simp]: + "empty_fail curDomain" + by (simp add: curDomain_def) + +end +end diff --git a/proof/refine/AARCH64/EmptyFail_H.thy b/proof/refine/AARCH64/EmptyFail_H.thy new file mode 100644 index 0000000000..5b3b6380bf --- /dev/null +++ b/proof/refine/AARCH64/EmptyFail_H.thy @@ -0,0 +1,324 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory EmptyFail_H +imports Refine +begin + +crunch_ignore (empty_fail) + (add: handleE' getCTE getObject updateObject + CSpaceDecls_H.resolveAddressBits + doMachineOp suspend restart schedule) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemmas forM_empty_fail[intro!, wp, simp] = empty_fail_mapM[simplified forM_def[symmetric]] +lemmas forM_x_empty_fail[intro!, wp, simp] = empty_fail_mapM_x[simplified forM_x_def[symmetric]] +lemmas forME_x_empty_fail[intro!, wp, simp] = empty_fail_mapME_x[simplified forME_x_def[symmetric]] + +lemma withoutPreemption_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (withoutPreemption m)" + by simp + +lemma withoutFailure_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (withoutFailure m)" + by simp + +lemma catchFailure_empty_fail[intro!, wp, simp]: + "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchFailure f g)" + by (simp add: empty_fail_catch) + +lemma emptyOnFailure_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (emptyOnFailure m)" + by (simp add: emptyOnFailure_def empty_fail_catch) + +lemma rethrowFailure_empty_fail [intro!, wp, simp]: + "empty_fail m \ empty_fail (rethrowFailure f m)" + by (wpsimp simp:rethrowFailure_def o_def) + +lemma unifyFailure_empty_fail [intro!, wp, simp]: + "empty_fail f \ empty_fail (unifyFailure f)" + by (simp add: unifyFailure_def) + +lemma lookupErrorOnFailure_empty_fail [intro!, wp, simp]: + "empty_fail f \ empty_fail (lookupErrorOnFailure isSource f)" + by (simp add: lookupErrorOnFailure_def) + +lemma setObject_empty_fail [intro!, wp, simp]: + assumes x: "(\a b c. empty_fail (updateObject v a x b c))" + shows "empty_fail (setObject x v)" + by (wpsimp simp: setObject_def split_def wp: x) + +lemma asUser_empty_fail [intro!, wp, simp]: + "empty_fail f \ empty_fail (asUser t f)" + unfolding asUser_def + by (wpsimp | simp add: empty_fail_def)+ + +lemma capFaultOnFailure_empty_fail [intro!, wp, simp]: + "empty_fail m \ empty_fail (capFaultOnFailure cptr rp m)" + by (simp add: capFaultOnFailure_def) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: locateSlotCap + +lemma resolveAddressBits_spec_empty_fail: + notes spec_empty_fail_bindE'[wp_split] + shows + "spec_empty_fail (CSpace_H.resolveAddressBits a b c) s" +proof (induct arbitrary: s rule: resolveAddressBits.induct) + case (1 a b c s) + show ?case + apply (simp add: resolveAddressBits.simps) + apply (wp | simp | wpc | intro impI conjI | rule drop_spec_empty_fail)+ + apply (rule use_spec_empty_fail) + apply (rule 1 | simp add: in_monad | rule drop_spec_empty_fail | force)+ + done + qed + +lemmas resolveAddressBits_empty_fail[intro!, wp, simp] = + resolveAddressBits_spec_empty_fail[THEN use_spec_empty_fail] + +declare ef_dmo'[intro!, wp, simp] + +lemma empty_fail_getObject_ep [intro!, wp, simp]: + "empty_fail (getObject p :: endpoint kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getObject_tcb [intro!, wp, simp]: + shows "empty_fail (getObject x :: tcb kernel)" + by (auto intro: empty_fail_getObject) + +lemma getEndpoint_empty_fail [intro!, wp, simp]: + "empty_fail (getEndpoint ep)" + by (simp add: getEndpoint_def) + +lemma constOnFailure_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (constOnFailure x m)" + by (simp add: constOnFailure_def const_def empty_fail_catch) + +lemma ArchRetypeDecls_H_deriveCap_empty_fail[intro!, wp, simp]: + "isPageTableCap y \ isFrameCap y \ isASIDControlCap y \ isASIDPoolCap y \ isVCPUCap y + \ empty_fail (Arch.deriveCap x y)" + apply (simp add: AARCH64_H.deriveCap_def) + by (auto simp: isCap_simps) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: ensureNoChildren + +lemma deriveCap_empty_fail[intro!, wp, simp]: + "empty_fail (RetypeDecls_H.deriveCap slot y)" + apply (simp add: Retype_H.deriveCap_def) + apply (clarsimp simp: empty_fail_bindE) + apply (case_tac "capCap y") + apply (simp_all add: isCap_simps) + done + +crunch (empty_fail) empty_fail[intro!, wp, simp]: setExtraBadge, cteInsert + +lemma transferCapsToSlots_empty_fail[intro!, wp, simp]: + "empty_fail (transferCapsToSlots ep buffer n caps slots mi)" + apply (induct caps arbitrary: slots n mi) + apply simp + apply (simp add: Let_def split_def + split del: if_split) + apply (simp | wp | wpc | safe)+ + done + +crunch (empty_fail) empty_fail[intro!, wp, simp]: lookupTargetSlot, ensureEmptySlot, lookupSourceSlot, lookupPivotSlot + +lemma decodeCNodeInvocation_empty_fail[intro!, wp, simp]: + "empty_fail (decodeCNodeInvocation label args cap exs)" + apply (rule_tac label=label and args=args and exs=exs in decode_cnode_cases2) + apply (simp_all add: decodeCNodeInvocation_def + split_def cnode_invok_case_cleanup unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + by (simp | wp | wpc | safe)+ + +lemma empty_fail_getObject_ap [intro!, wp, simp]: + "empty_fail (getObject p :: asidpool kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getObject_pte [intro!, wp, simp]: + "empty_fail (getObject p :: pte kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getObject_vcpu [intro!, wp, simp]: + "empty_fail (getObject p :: vcpu kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_lookupPTSlotFromLevel[intro!, wp, simp]: + "empty_fail (lookupPTSlotFromLevel level pt vPtr)" +proof (induct level arbitrary: pt) + case 0 + then show ?case by (subst lookupPTSlotFromLevel.simps, simp) +next + case (Suc level) + then show ?case + by (subst lookupPTSlotFromLevel.simps) (wpsimp simp: checkPTAt_def pteAtIndex_def) +qed + +(* FIXME AARCH64 this and empty_fail_pt_type_exhausted are needed to effectively crunch decodeARMMMUInvocation, + so should be moved much higher and then deployed to other crunches of decodeARMMMUInvocation, + which are hand-held at present *) +lemma empty_fail_arch_cap_exhausted: + "\\ isFrameCap cap; \ isPageTableCap cap; \ isASIDControlCap cap; \ isASIDPoolCap cap; + \ isVCPUCap cap\ + \ empty_fail undefined" + by (cases cap; simp add: isCap_simps) + +(* FIXME AARCH64 move somewhere high up, see empty_fail_arch_cap_exhausted *) +lemma empty_fail_pt_type_exhausted: + "\ pt_t \ NormalPT_T; pt_t \ VSRootPT_T \ + \ False" + by (case_tac pt_t; simp) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: decodeARMMMUInvocation + (simp: Let_def pteAtIndex_def + wp: empty_fail_catch empty_fail_pt_type_exhausted empty_fail_arch_cap_exhausted) + +lemma ignoreFailure_empty_fail[intro!, wp, simp]: + "empty_fail x \ empty_fail (ignoreFailure x)" + by (simp add: ignoreFailure_def empty_fail_catch) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: cancelIPC, setThreadState, tcbSchedDequeue, setupReplyMaster, isStopped, possibleSwitchTo, tcbSchedAppend +(simp: Let_def setNotification_def setBoundNotification_def wp: empty_fail_getObject) + +crunch (empty_fail) "_H_empty_fail"[intro!, wp, simp]: "ThreadDecls_H.suspend" + (ignore_del: ThreadDecls_H.suspend) + +lemma ThreadDecls_H_restart_empty_fail[intro!, wp, simp]: + "empty_fail (ThreadDecls_H.restart target)" + by (fastforce simp: restart_def) + +lemma vcpuUpdate_empty_fail[intro!, wp, simp]: + "empty_fail (vcpuUpdate p f)" + by (fastforce simp: vcpuUpdate_def) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: vcpuEnable, vcpuRestore + (simp: uncurry_def) + +lemma empty_fail_lookupPTFromLevel[intro!, wp, simp]: + "empty_fail (lookupPTFromLevel level ptPtr vPtr target)" + by (induct level arbitrary: ptPtr; subst lookupPTFromLevel.simps; simp; wpsimp) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: finaliseCap, preemptionPoint, capSwapForDelete +(wp: empty_fail_catch simp: Let_def ignore: lookupPTFromLevel) + +lemmas finalise_spec_empty_fail_induct = finaliseSlot'.induct[where P= + "\sl exp s. spec_empty_fail (finaliseSlot' sl exp) s"] + +lemma spec_empty_fail_If: + "\ P \ spec_empty_fail f s; \ P \ spec_empty_fail g s \ + \ spec_empty_fail (if P then f else g) s" + by (simp split: if_split) + +lemma spec_empty_whenE': + "\ P \ spec_empty_fail f s \ \ spec_empty_fail (whenE P f) s" + by (simp add: whenE_def spec_empty_returnOk) + +lemma finaliseSlot_spec_empty_fail: + notes spec_empty_fail_bindE'[rotated, wp_split] + shows "spec_empty_fail (finaliseSlot x b) s" +unfolding finaliseSlot_def +proof (induct rule: finalise_spec_empty_fail_induct) + case (1 x b s) + show ?case + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def Let_def K_bind_def fun_app_def) + apply (wp spec_empty_whenE' spec_empty_fail_If | wpc + | rule 1[unfolded Let_def K_bind_def split_def fun_app_def, + simplified], (simp | intro conjI)+ + | rule drop_spec_empty_fail | simp)+ + done +qed + +lemmas finaliseSlot_empty_fail[intro!, wp, simp] = + finaliseSlot_spec_empty_fail[THEN use_spec_empty_fail] + +lemma checkCapAt_empty_fail[intro!, wp, simp]: + "empty_fail action \ empty_fail (checkCapAt cap ptr action)" + by (fastforce simp: checkCapAt_def) + +lemma assertDerived_empty_fail[intro!, wp, simp]: + "empty_fail f \ empty_fail (assertDerived src cap f)" + by (fastforce simp: assertDerived_def) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: cteDelete + +lemma spec_empty_fail_unlessE': + "\ \ P \ spec_empty_fail f s \ \ spec_empty_fail (unlessE P f) s" + by (simp add:unlessE_def spec_empty_returnOk) + +lemma cteRevoke_spec_empty_fail: + notes spec_empty_fail_bindE'[wp_split] + shows "spec_empty_fail (cteRevoke p) s" +proof (induct rule: cteRevoke.induct) + case (1 p s) + show ?case + apply (simp add: cteRevoke.simps) + apply (wp spec_empty_whenE' spec_empty_fail_unlessE' | rule drop_spec_empty_fail, wp)+ + apply (rule 1, auto simp add: in_monad) + done +qed + +lemmas cteRevoke_empty_fail[intro!, wp, simp] = + cteRevoke_spec_empty_fail[THEN use_spec_empty_fail] + +lemma Syscall_H_syscall_empty_fail[intro!, wp, simp]: + "\empty_fail a; \x. empty_fail (b x); \x. empty_fail (c x); + \x. empty_fail (d x); \x. empty_fail (e x)\ + \ empty_fail (syscall a b c d e)" + apply (simp add:syscall_def) + apply (wp | wpc | simp)+ + done + +lemma catchError_empty_fail[intro!, wp, simp]: + "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchError f g)" + by fastforce + +crunch (empty_fail) empty_fail[intro!, wp, simp]: + chooseThread, getDomainTime, nextDomain, isHighestPrio + (wp: empty_fail_catch) + +lemma ThreadDecls_H_schedule_empty_fail[intro!, wp, simp]: + "empty_fail schedule" + apply (simp add: schedule_def) + apply (clarsimp simp: scheduleChooseNewThread_def split: if_split | wp | wpc)+ + done + +crunch (empty_fail) empty_fail[wp, simp]: setMRs, setMessageInfo +(wp: empty_fail_catch simp: const_def Let_def) + +crunch (empty_fail) empty_fail: decodeVCPUInjectIRQ, decodeVCPUWriteReg, decodeVCPUReadReg, doFlush, + decodeVCPUAckVPPI + (simp: Let_def) + +crunch (empty_fail) empty_fail[wp, simp]: handleFault + +lemma handleHypervisorFault_empty_fail[intro!, wp, simp]: + "empty_fail (handleHypervisorFault t f)" + by (cases f, simp add: handleHypervisorFault_def isFpuEnable_def split del: if_split) + wpsimp + +crunch (empty_fail) empty_fail: callKernel + (wp: empty_fail_catch) + +theorem call_kernel_serial: + "\ (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and + schact_is_rct and + (\s. 0 < domain_time s \ valid_domain_list s)) s; + \s'. (s, s') \ state_relation \ + (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) s' \ + \ fst (call_kernel event s) \ {}" + apply (cut_tac m = "call_kernel event" in corres_underlying_serial) + apply (rule kernel_corres) + apply (rule callKernel_empty_fail) + apply auto + done + +end + +end diff --git a/proof/refine/AARCH64/Finalise_R.thy b/proof/refine/AARCH64/Finalise_R.thy new file mode 100644 index 0000000000..653e599282 --- /dev/null +++ b/proof/refine/AARCH64/Finalise_R.thy @@ -0,0 +1,4077 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Finalise_R +imports + IpcCancel_R + InterruptAcc_R + Retype_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare doUnbindNotification_def[simp] + +text \Properties about empty_slot/emptySlot\ + +lemma case_Null_If: + "(case c of NullCap \ a | _ \ b) = (if c = NullCap then a else b)" + by (case_tac c, simp_all) + +crunches emptySlot + for aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + and pspace_canonical'[wp]: pspace_canonical' + (simp: case_Null_If) + +lemma updateCap_cte_wp_at_cases: + "\\s. (ptr = ptr' \ cte_wp_at' (P \ cteCap_update (K cap)) ptr' s) \ (ptr \ ptr' \ cte_wp_at' P ptr' s)\ + updateCap ptr cap + \\rv. cte_wp_at' P ptr'\" + apply (clarsimp simp: valid_def) + apply (drule updateCap_stuff) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + done + +crunches postCapDeletion, updateTrackedFreeIndex + for cte_wp_at'[wp]: "cte_wp_at' P p" + +lemma updateFreeIndex_cte_wp_at: + "\\s. cte_at' p s \ P (cte_wp_at' (if p = p' then P' + o (cteCap_update (capFreeIndex_update (K idx))) else P') p' s)\ + updateFreeIndex p idx + \\rv s. P (cte_wp_at' P' p' s)\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def + split del: if_split) + apply (rule hoare_pre) + apply (wp updateCap_cte_wp_at' getSlotCap_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (cases "p' = p", simp_all) + apply (case_tac cte, simp) + done + +lemma emptySlot_cte_wp_cap_other: + "\(\s. cte_wp_at' (\c. P (cteCap c)) p s) and K (p \ p')\ + emptySlot p' opt + \\rv s. cte_wp_at' (\c. P (cteCap c)) p s\" + apply (rule hoare_gen_asm) + apply (simp add: emptySlot_def clearUntypedFreeIndex_def getSlotCap_def) + apply (rule hoare_pre) + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + updateFreeIndex_cte_wp_at getCTE_wp' hoare_vcg_all_lift + | simp add: | wpc + | wp (once) hoare_drop_imps)+ + done + +lemmas clearUntypedFreeIndex_typ_ats[wp] = typ_at_lifts[OF clearUntypedFreeIndex_typ_at'] + +crunch tcb_at'[wp]: postCapDeletion "tcb_at' t" +crunch ct[wp]: emptySlot "\s. P (ksCurThread s)" +crunch cur_tcb'[wp]: clearUntypedFreeIndex "cur_tcb'" + (wp: cur_tcb_lift) + +crunch ksRQ[wp]: emptySlot "\s. P (ksReadyQueues s)" +crunch ksRQL1[wp]: emptySlot "\s. P (ksReadyQueuesL1Bitmap s)" +crunch ksRQL2[wp]: emptySlot "\s. P (ksReadyQueuesL2Bitmap s)" +crunch obj_at'[wp]: postCapDeletion "obj_at' P p" + +crunch inQ[wp]: clearUntypedFreeIndex "\s. P (obj_at' (inQ d p) t s)" +crunch tcbDomain[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbDomain tcb)) t" +crunch tcbPriority[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbPriority tcb)) t" + +crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" +crunch ksCurDomain[wp]: emptySlot "\s. P (ksCurDomain s)" + +lemma emptySlot_sch_act_wf [wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + emptySlot sl opt + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp sch_act_wf_lift tcb_in_cur_domain'_lift | wpcw | simp)+ + done + +lemma updateCap_valid_objs' [wp]: + "\valid_objs' and valid_cap' cap\ + updateCap ptr cap \\r. valid_objs'\" + unfolding updateCap_def + by (wp setCTE_valid_objs getCTE_wp) (clarsimp dest!: cte_at_cte_wp_atD) + +lemma updateFreeIndex_valid_objs' [wp]: + "\valid_objs'\ clearUntypedFreeIndex ptr \\r. valid_objs'\" + apply (simp add: clearUntypedFreeIndex_def getSlotCap_def) + apply (wp getCTE_wp' | wpc | simp add: updateTrackedFreeIndex_def)+ + done + +crunch valid_objs'[wp]: emptySlot "valid_objs'" + +crunch state_refs_of'[wp]: setInterruptState "\s. P (state_refs_of' s)" + (simp: state_refs_of'_pspaceI) +crunch state_refs_of'[wp]: emptySlot "\s. P (state_refs_of' s)" + (wp: crunch_wps) +crunch state_hyp_refs_of'[wp]: setInterruptState "\s. P (state_hyp_refs_of' s)" + (simp: state_hyp_refs_of'_pspaceI) +crunch state_hyp_refs_of'[wp]: emptySlot "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps) + +lemma mdb_chunked2D: + "\ mdb_chunked m; m \ p \ p'; m \ p' \ p''; + m p = Some (CTE cap nd); m p'' = Some (CTE cap'' nd''); + sameRegionAs cap cap''; p \ p'' \ + \ \cap' nd'. m p' = Some (CTE cap' nd') \ sameRegionAs cap cap'" + apply (subgoal_tac "\cap' nd'. m p' = Some (CTE cap' nd')") + apply (clarsimp simp add: mdb_chunked_def) + apply (drule spec[where x=p]) + apply (drule spec[where x=p'']) + apply clarsimp + apply (drule mp, erule trancl_into_trancl2) + apply (erule trancl.intros(1)) + apply (simp add: is_chunk_def) + apply (drule spec, drule mp, erule trancl.intros(1)) + apply (drule mp, rule trancl_into_rtrancl) + apply (erule trancl.intros(1)) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (case_tac z, simp) + done + +lemma nullPointer_eq_0_simp[simp]: + "(nullPointer = 0) = True" + "(0 = nullPointer) = True" + by (simp add: nullPointer_def)+ + +lemma no_0_no_0_lhs_trancl [simp]: + "no_0 m \ \ m \ 0 \\<^sup>+ x" + by (rule, drule tranclD, clarsimp simp: next_unfold') + +lemma no_0_no_0_lhs_rtrancl [simp]: + "\ no_0 m; x \ 0 \ \ \ m \ 0 \\<^sup>* x" + by (clarsimp dest!: rtranclD) + +end +locale mdb_empty = + mdb_ptr?: mdb_ptr m _ _ slot s_cap s_node + for m slot s_cap s_node + + + fixes n + defines "n \ + modify_map + (modify_map + (modify_map + (modify_map m (mdbPrev s_node) + (cteMDBNode_update (mdbNext_update (%_. (mdbNext s_node))))) + (mdbNext s_node) + (cteMDBNode_update + (\mdb. mdbFirstBadged_update (%_. (mdbFirstBadged mdb \ mdbFirstBadged s_node)) + (mdbPrev_update (%_. (mdbPrev s_node)) mdb)))) + slot (cteCap_update (%_. capability.NullCap))) + slot (cteMDBNode_update (const nullMDBNode))" +begin +interpretation Arch . (*FIXME: arch_split*) + +lemmas m_slot_prev = m_p_prev +lemmas m_slot_next = m_p_next +lemmas prev_slot_next = prev_p_next +lemmas next_slot_prev = next_p_prev + +lemma n_revokable: + "n p = Some (CTE cap node) \ + (\cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then \ mdbRevocable node + else mdbRevocable node = mdbRevocable node'))" + by (auto simp add: n_def modify_map_if nullMDBNode_def split: if_split_asm) + +lemma m_revokable: + "m p = Some (CTE cap node) \ + (\cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then \ mdbRevocable node' + else mdbRevocable node' = mdbRevocable node))" + apply (clarsimp simp add: n_def modify_map_if nullMDBNode_def split: if_split_asm) + apply (cases "p=slot", simp) + apply (cases "p=mdbNext s_node", simp) + apply (cases "p=mdbPrev s_node", simp) + apply clarsimp + apply simp + apply (cases "p=mdbPrev s_node", simp) + apply simp + done + +lemma no_0_n: + "no_0 n" + using no_0 by (simp add: n_def) + +lemma n_next: + "n p = Some (CTE cap node) \ + (\cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then mdbNext node = 0 + else if p = mdbPrev s_node + then mdbNext node = mdbNext s_node + else mdbNext node = mdbNext node'))" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases "p = mdbPrev s_node") + apply (auto simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + done + +lemma n_prev: + "n p = Some (CTE cap node) \ + (\cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then mdbPrev node = 0 + else if p = mdbNext s_node + then mdbPrev node = mdbPrev s_node + else mdbPrev node = mdbPrev node'))" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases "p = mdbNext s_node") + apply (auto simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + done + +lemma n_cap: + "n p = Some (CTE cap node) \ + \cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then cap = NullCap + else cap' = cap)" + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases node) + apply auto + done + +lemma m_cap: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then cap' = NullCap + else cap' = cap)" + apply (clarsimp simp: n_def modify_map_cases initMDBNode_def) + apply (cases node) + apply clarsimp + apply (cases "p=slot", simp) + apply clarsimp + apply (cases "mdbNext s_node = p", simp) + apply fastforce + apply simp + apply (cases "mdbPrev s_node = p", simp) + apply fastforce + done + +lemma n_badged: + "n p = Some (CTE cap node) \ + \cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then \ mdbFirstBadged node + else if p = mdbNext s_node + then mdbFirstBadged node = (mdbFirstBadged node' \ mdbFirstBadged s_node) + else mdbFirstBadged node = mdbFirstBadged node')" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases "p = mdbNext s_node") + apply (auto simp: n_def modify_map_if nullMDBNode_def split: if_split_asm) + done + +lemma m_badged: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then \ mdbFirstBadged node' + else if p = mdbNext s_node + then mdbFirstBadged node' = (mdbFirstBadged node \ mdbFirstBadged s_node) + else mdbFirstBadged node' = mdbFirstBadged node)" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if nullMDBNode_def split: if_split_asm) + apply (cases "p = mdbNext s_node") + apply (clarsimp simp: n_def modify_map_if nullMDBNode_def split: if_split_asm) + apply clarsimp + apply (cases "p = mdbPrev s_node") + apply (auto simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + done + +lemmas slot = m_p + +lemma m_next: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then mdbNext node' = 0 + else if p = mdbPrev s_node + then mdbNext node' = mdbNext s_node + else mdbNext node' = mdbNext node)" + apply (subgoal_tac "p \ 0") + prefer 2 + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if) + apply (cases "p = mdbPrev s_node") + apply (simp add: n_def modify_map_if) + apply simp + apply (simp add: n_def modify_map_if) + apply (cases "mdbNext s_node = p") + apply fastforce + apply fastforce + done + +lemma m_prev: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then mdbPrev node' = 0 + else if p = mdbNext s_node + then mdbPrev node' = mdbPrev s_node + else mdbPrev node' = mdbPrev node)" + apply (subgoal_tac "p \ 0") + prefer 2 + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if) + apply (cases "p = mdbPrev s_node") + apply (simp add: n_def modify_map_if) + apply simp + apply (simp add: n_def modify_map_if) + apply (cases "mdbNext s_node = p") + apply fastforce + apply fastforce + done + +lemma n_nextD: + "n \ p \ p' \ + if p = slot then p' = 0 + else if p = mdbPrev s_node + then m \ p \ slot \ p' = mdbNext s_node + else m \ p \ p'" + apply (clarsimp simp: mdb_next_unfold split del: if_split cong: if_cong) + apply (case_tac z) + apply (clarsimp split del: if_split) + apply (drule n_next) + apply (elim exE conjE) + apply (simp split: if_split_asm) + apply (frule dlist_prevD [OF m_slot_prev]) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma n_next_eq: + "n \ p \ p' = + (if p = slot then p' = 0 + else if p = mdbPrev s_node + then m \ p \ slot \ p' = mdbNext s_node + else m \ p \ p')" + apply (rule iffI) + apply (erule n_nextD) + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply (simp add: n_def modify_map_if slot) + apply hypsubst_thin + apply (case_tac z) + apply simp + apply (drule m_next) + apply clarsimp + apply (case_tac z) + apply simp + apply (drule m_next) + apply clarsimp + done + +lemma n_prev_eq: + "n \ p \ p' = + (if p' = slot then p = 0 + else if p' = mdbNext s_node + then m \ slot \ p' \ p = mdbPrev s_node + else m \ p \ p')" + apply (rule iffI) + apply (clarsimp simp: mdb_prev_def split del: if_split cong: if_cong) + apply (case_tac z) + apply (clarsimp split del: if_split) + apply (drule n_prev) + apply (elim exE conjE) + apply (simp split: if_split_asm) + apply (frule dlist_nextD [OF m_slot_next]) + apply (clarsimp simp: mdb_prev_def) + apply (clarsimp simp: mdb_prev_def split: if_split_asm) + apply (simp add: n_def modify_map_if slot) + apply hypsubst_thin + apply (case_tac z) + apply clarsimp + apply (drule m_prev) + apply clarsimp + apply (case_tac z) + apply simp + apply (drule m_prev) + apply clarsimp + done + +lemma valid_dlist_n: + "valid_dlist n" using dlist + apply (clarsimp simp: valid_dlist_def2 [OF no_0_n]) + apply (simp add: n_next_eq n_prev_eq m_slot_next m_slot_prev cong: if_cong) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp simp: next_slot_prev prev_slot_next) + apply (fastforce dest!: dlist_prev_src_unique) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: valid_dlist_def2 [OF no_0]) + apply (case_tac "mdbNext s_node = 0") + apply simp + apply (subgoal_tac "m \ slot \ c'") + prefer 2 + apply fastforce + apply (clarsimp simp: mdb_next_unfold slot) + apply (frule next_slot_prev) + apply (drule (1) dlist_prev_src_unique, simp) + apply simp + apply clarsimp + apply (rule conjI, clarsimp) + apply (fastforce dest: dlist_next_src_unique) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: valid_dlist_def2 [OF no_0]) + apply (clarsimp simp: mdb_prev_def slot) + apply (clarsimp simp: valid_dlist_def2 [OF no_0]) + done + +lemma caps_contained_n: + "caps_contained' n" + using valid + apply (clarsimp simp: valid_mdb_ctes_def caps_contained'_def) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (erule disjE, clarsimp) + apply clarsimp + apply fastforce + done + +lemma chunked: + "mdb_chunked m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma valid_badges: + "valid_badges m" + using valid .. + +lemma valid_badges_n: + "valid_badges n" +proof - + from valid_badges + show ?thesis + apply (simp add: valid_badges_def2) + apply clarsimp + apply (drule_tac p=p in n_cap) + apply (frule n_cap) + apply (drule n_badged) + apply (clarsimp simp: n_next_eq) + apply (case_tac "p=slot", simp) + apply clarsimp + apply (case_tac "p'=slot", simp) + apply clarsimp + apply (case_tac "p = mdbPrev s_node") + apply clarsimp + apply (insert slot)[1] + (* using mdb_chunked to show cap in between is same as on either side *) + apply (subgoal_tac "capMasterCap s_cap = capMasterCap cap'") + prefer 2 + apply (thin_tac "\p. P p" for P) + apply (drule mdb_chunked2D[OF chunked]) + apply (fastforce simp: mdb_next_unfold) + apply assumption+ + apply (simp add: sameRegionAs_def3) + apply (intro disjI1) + apply (fastforce simp:isCap_simps capMasterCap_def split:capability.splits) + apply clarsimp + apply clarsimp + apply (erule sameRegionAsE, auto simp: isCap_simps capMasterCap_def split:capability.splits)[1] + (* instantiating known valid_badges on both sides to transitively + give the link we need *) + apply (frule_tac x="mdbPrev s_node" in spec) + apply simp + apply (drule spec, drule spec, drule spec, + drule(1) mp, drule(1) mp) + apply simp + apply (drule_tac x=slot in spec) + apply (drule_tac x="mdbNext s_node" in spec) + apply simp + apply (drule mp, simp(no_asm) add: mdb_next_unfold) + apply simp + apply (cases "capBadge s_cap", simp_all)[1] + apply clarsimp + apply (case_tac "p' = mdbNext s_node") + apply clarsimp + apply (frule vdlist_next_src_unique[where y=slot]) + apply (simp add: mdb_next_unfold slot) + apply clarsimp + apply (rule dlist) + apply clarsimp + apply clarsimp + apply fastforce + done +qed + +lemma to_slot_eq [simp]: + "m \ p \ slot = (p = mdbPrev s_node \ p \ 0)" + apply (rule iffI) + apply (frule dlist_nextD0, simp) + apply (clarsimp simp: mdb_prev_def slot mdb_next_unfold) + apply (clarsimp intro!: prev_slot_next) + done + +lemma n_parent_of: + "\ n \ p parentOf p'; p \ slot; p' \ slot \ \ m \ p parentOf p'" + apply (clarsimp simp: parentOf_def) + apply (case_tac cte, case_tac cte') + apply clarsimp + apply (frule_tac p=p in n_cap) + apply (frule_tac p=p in n_badged) + apply (drule_tac p=p in n_revokable) + apply (clarsimp) + apply (frule_tac p=p' in n_cap) + apply (frule_tac p=p' in n_badged) + apply (drule_tac p=p' in n_revokable) + apply (clarsimp split: if_split_asm; + clarsimp simp: isMDBParentOf_def isCap_simps split: if_split_asm cong: if_cong) + done + +lemma m_parent_of: + "\ m \ p parentOf p'; p \ slot; p' \ slot; p\p'; p'\mdbNext s_node \ \ n \ p parentOf p'" + apply (clarsimp simp add: parentOf_def) + apply (case_tac cte, case_tac cte') + apply clarsimp + apply (frule_tac p=p in m_cap) + apply (frule_tac p=p in m_badged) + apply (drule_tac p=p in m_revokable) + apply clarsimp + apply (frule_tac p=p' in m_cap) + apply (frule_tac p=p' in m_badged) + apply (drule_tac p=p' in m_revokable) + apply clarsimp + apply (simp split: if_split_asm; + clarsimp simp: isMDBParentOf_def isCap_simps split: if_split_asm cong: if_cong) + done + +lemma m_parent_of_next: + "\ m \ p parentOf mdbNext s_node; m \ p parentOf slot; p \ slot; p\mdbNext s_node \ + \ n \ p parentOf mdbNext s_node" + using slot + apply (clarsimp simp add: parentOf_def) + apply (case_tac cte'a, case_tac cte) + apply clarsimp + apply (frule_tac p=p in m_cap) + apply (frule_tac p=p in m_badged) + apply (drule_tac p=p in m_revokable) + apply (frule_tac p="mdbNext s_node" in m_cap) + apply (frule_tac p="mdbNext s_node" in m_badged) + apply (drule_tac p="mdbNext s_node" in m_revokable) + apply (frule_tac p="slot" in m_cap) + apply (frule_tac p="slot" in m_badged) + apply (drule_tac p="slot" in m_revokable) + apply (clarsimp simp: isMDBParentOf_def isCap_simps split: if_split_asm cong: if_cong) + done + +lemma parency_n: + assumes "n \ p \ p'" + shows "m \ p \ p' \ p \ slot \ p' \ slot" +using assms +proof induct + case (direct_parent c') + moreover + hence "p \ slot" + by (clarsimp simp: n_next_eq) + moreover + from direct_parent + have "c' \ slot" + by (clarsimp simp add: n_next_eq split: if_split_asm) + ultimately + show ?case + apply simp + apply (simp add: n_next_eq split: if_split_asm) + prefer 2 + apply (erule (1) subtree.direct_parent) + apply (erule (2) n_parent_of) + apply clarsimp + apply (frule n_parent_of, simp, simp) + apply (rule subtree.trans_parent[OF _ m_slot_next], simp_all) + apply (rule subtree.direct_parent) + apply (erule prev_slot_next) + apply simp + apply (clarsimp simp: parentOf_def slot) + apply (case_tac cte'a) + apply (case_tac ctea) + apply clarsimp + apply (frule(2) mdb_chunked2D [OF chunked prev_slot_next m_slot_next]) + apply (clarsimp simp: isMDBParentOf_CTE) + apply simp + apply (simp add: slot) + apply (clarsimp simp add: isMDBParentOf_CTE) + apply (insert valid_badges) + apply (simp add: valid_badges_def2) + apply (drule spec[where x=slot]) + apply (drule spec[where x="mdbNext s_node"]) + apply (simp add: slot m_slot_next) + apply (insert valid_badges) + apply (simp add: valid_badges_def2) + apply (drule spec[where x="mdbPrev s_node"]) + apply (drule spec[where x=slot]) + apply (simp add: slot prev_slot_next) + apply (case_tac cte, case_tac cte') + apply (rename_tac cap'' node'') + apply (clarsimp simp: isMDBParentOf_CTE) + apply (frule n_cap, drule n_badged) + apply (frule n_cap, drule n_badged) + apply clarsimp + apply (case_tac cap'', simp_all add: isCap_simps)[1] + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + done +next + case (trans_parent c c') + moreover + hence "p \ slot" + by (clarsimp simp: n_next_eq) + moreover + from trans_parent + have "c' \ slot" + by (clarsimp simp add: n_next_eq split: if_split_asm) + ultimately + show ?case + apply clarsimp + apply (simp add: n_next_eq split: if_split_asm) + prefer 2 + apply (erule (2) subtree.trans_parent) + apply (erule n_parent_of, simp, simp) + apply clarsimp + apply (rule subtree.trans_parent) + apply (rule subtree.trans_parent, assumption) + apply (rule prev_slot_next) + apply clarsimp + apply clarsimp + apply (frule n_parent_of, simp, simp) + apply (clarsimp simp: parentOf_def slot) + apply (case_tac cte'a) + apply (rename_tac cap node) + apply (case_tac ctea) + apply clarsimp + apply (subgoal_tac "sameRegionAs cap s_cap") + prefer 2 + apply (insert chunked)[1] + apply (simp add: mdb_chunked_def) + apply (erule_tac x="p" in allE) + apply (erule_tac x="mdbNext s_node" in allE) + apply simp + apply (drule isMDBParent_sameRegion)+ + apply clarsimp + apply (subgoal_tac "m \ p \\<^sup>+ slot") + prefer 2 + apply (rule trancl_trans) + apply (erule subtree_mdb_next) + apply (rule r_into_trancl) + apply (rule prev_slot_next) + apply clarsimp + apply (subgoal_tac "m \ p \\<^sup>+ mdbNext s_node") + prefer 2 + apply (erule trancl_trans) + apply fastforce + apply simp + apply (erule impE) + apply clarsimp + apply clarsimp + apply (thin_tac "s \ t" for s t) + apply (simp add: is_chunk_def) + apply (erule_tac x=slot in allE) + apply (erule impE, fastforce) + apply (erule impE, fastforce) + apply (clarsimp simp: slot) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (insert valid_badges, simp add: valid_badges_def2) + apply (drule spec[where x=slot], drule spec[where x="mdbNext s_node"]) + apply (simp add: slot m_slot_next) + apply (case_tac cte, case_tac cte') + apply (rename_tac cap'' node'') + apply (clarsimp simp: isMDBParentOf_CTE) + apply (frule n_cap, drule n_badged) + apply (frule n_cap, drule n_badged) + apply (clarsimp split: if_split_asm) + apply (drule subtree_mdb_next) + apply (drule no_loops_tranclE[OF no_loops]) + apply (erule notE, rule trancl_into_rtrancl) + apply (rule trancl.intros(2)[OF _ m_slot_next]) + apply (rule trancl.intros(1), rule prev_slot_next) + apply simp + apply (case_tac cap'', simp_all add: isCap_simps)[1] + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (rule m_slot_next) + apply simp + apply (erule n_parent_of, simp, simp) + done +qed + +lemma parency_m: + assumes "m \ p \ p'" + shows "p \ slot \ (if p' \ slot then n \ p \ p' else m \ p \ mdbNext s_node \ n \ p \ mdbNext s_node)" +using assms +proof induct + case (direct_parent c) + thus ?case + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_next_eq) + apply clarsimp + apply (subgoal_tac "mdbPrev s_node \ 0") + prefer 2 + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (erule m_parent_of, simp, simp) + apply clarsimp + apply clarsimp + apply (drule dlist_next_src_unique) + apply fastforce + apply clarsimp + apply simp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_next_eq) + apply (drule subtree_parent) + apply (clarsimp simp: parentOf_def) + apply (drule subtree_parent) + apply (erule (1) m_parent_of_next) + apply clarsimp + apply clarsimp + done +next + case (trans_parent c c') + thus ?case + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (cases "c=slot") + apply simp + apply (erule impE) + apply (erule subtree.trans_parent) + apply fastforce + apply (clarsimp simp: slot mdb_next_unfold) + apply (clarsimp simp: slot mdb_next_unfold) + apply (clarsimp simp: slot mdb_next_unfold) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n_next_eq) + apply clarsimp + apply (subgoal_tac "mdbPrev s_node \ 0") + prefer 2 + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (erule m_parent_of, simp, simp) + apply clarsimp + apply (drule subtree_mdb_next) + apply (drule trancl_trans) + apply (erule r_into_trancl) + apply simp + apply clarsimp + apply (drule dlist_next_src_unique) + apply fastforce + apply clarsimp + apply simp + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n_next_eq) + apply clarsimp + apply (rule m_parent_of_next, erule subtree_parent, assumption, assumption) + apply clarsimp + done +qed + +lemma parency: + "n \ p \ p' = (p \ slot \ p' \ slot \ m \ p \ p')" + by (auto dest!: parency_n parency_m) + +lemma descendants: + "descendants_of' p n = + (if p = slot then {} else descendants_of' p m - {slot})" + by (auto simp add: parency descendants_of'_def) + +lemma n_tranclD: + "n \ p \\<^sup>+ p' \ m \ p \\<^sup>+ p' \ p' \ slot" + apply (erule trancl_induct) + apply (clarsimp simp add: n_next_eq split: if_split_asm) + apply (rule mdb_chain_0D) + apply (rule chain) + apply (clarsimp simp: slot) + apply (blast intro: trancl_trans prev_slot_next) + apply fastforce + apply (clarsimp simp: n_next_eq split: if_split_asm) + apply (erule trancl_trans) + apply (blast intro: trancl_trans prev_slot_next) + apply (fastforce intro: trancl_trans) + done + +lemma m_tranclD: + "m \ p \\<^sup>+ p' \ + if p = slot then n \ mdbNext s_node \\<^sup>* p' + else if p' = slot then n \ p \\<^sup>+ mdbNext s_node + else n \ p \\<^sup>+ p'" + using no_0_n + apply - + apply (erule trancl_induct) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule r_into_trancl) + apply (clarsimp simp: n_next_eq) + apply clarsimp + apply (rule conjI) + apply (insert m_slot_next)[1] + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply (rule r_into_trancl) + apply (clarsimp simp: n_next_eq) + apply (rule context_conjI) + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule prev_slot_next) + apply (drule trancl_trans, erule r_into_trancl) + apply simp + apply clarsimp + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n_next_eq) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule rtrancl_trans) + apply (rule r_into_rtrancl) + apply (simp add: n_next_eq) + apply (rule conjI) + apply clarsimp + apply (rule context_conjI) + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply clarsimp + apply (simp split: if_split_asm) + apply (clarsimp simp: mdb_next_unfold slot) + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (clarsimp simp add: n_next_eq) + apply (rule context_conjI) + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma n_trancl_eq: + "n \ p \\<^sup>+ p' = (m \ p \\<^sup>+ p' \ (p = slot \ p' = 0) \ p' \ slot)" + using no_0_n + apply - + apply (rule iffI) + apply (frule n_tranclD) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: n_next_eq) + apply (simp add: rtrancl_eq_or_trancl) + apply clarsimp + apply (drule m_tranclD) + apply (simp split: if_split_asm) + apply (rule r_into_trancl) + apply (simp add: n_next_eq) + done + +lemma n_rtrancl_eq: + "n \ p \\<^sup>* p' = + (m \ p \\<^sup>* p' \ + (p = slot \ p' = 0 \ p' = slot) \ + (p' = slot \ p = slot))" + by (auto simp: rtrancl_eq_or_trancl n_trancl_eq) + +lemma mdb_chain_0_n: + "mdb_chain_0 n" + using chain + apply (clarsimp simp: mdb_chain_0_def) + apply (drule bspec) + apply (fastforce simp: n_def modify_map_if split: if_split_asm) + apply (simp add: n_trancl_eq) + done + +lemma mdb_chunked_n: + "mdb_chunked n" + using chunked + apply (clarsimp simp: mdb_chunked_def) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=slot", clarsimp) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (clarsimp simp: is_chunk_def) + apply (simp add: n_trancl_eq n_rtrancl_eq) + apply (rule conjI) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + done + +lemma untyped_mdb_n: + "untyped_mdb' n" + using untyped_mdb + apply (simp add: untyped_mdb'_def descendants_of'_def parency) + apply clarsimp + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=slot", simp) + apply clarsimp + done + +lemma untyped_inc_n: + "untyped_inc' n" + using untyped_inc + apply (simp add: untyped_inc'_def descendants_of'_def parency) + apply clarsimp + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=slot", simp) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + done + +lemmas vn_prev [dest!] = valid_nullcaps_prev [OF _ slot no_0 dlist nullcaps] +lemmas vn_next [dest!] = valid_nullcaps_next [OF _ slot no_0 dlist nullcaps] + +lemma nullcaps_n: "valid_nullcaps n" +proof - + from valid have "valid_nullcaps m" .. + thus ?thesis + apply (clarsimp simp: valid_nullcaps_def nullMDBNode_def nullPointer_def) + apply (frule n_cap) + apply (frule n_next) + apply (frule n_badged) + apply (frule n_revokable) + apply (drule n_prev) + apply (case_tac n) + apply (insert slot) + apply (fastforce split: if_split_asm) + done +qed + +lemma ut_rev_n: "ut_revocable' n" + apply(insert valid) + apply(clarsimp simp: ut_revocable'_def) + apply(frule n_cap) + apply(drule n_revokable) + apply(clarsimp simp: isCap_simps split: if_split_asm) + apply(simp add: valid_mdb_ctes_def ut_revocable'_def) + done + +lemma class_links_n: "class_links n" + using valid slot + apply (clarsimp simp: valid_mdb_ctes_def class_links_def) + apply (case_tac cte, case_tac cte') + apply (drule n_nextD) + apply (clarsimp simp: split: if_split_asm) + apply (simp add: no_0_n) + apply (drule n_cap)+ + apply clarsimp + apply (frule spec[where x=slot], + drule spec[where x="mdbNext s_node"], + simp, simp add: m_slot_next) + apply (drule spec[where x="mdbPrev s_node"], + drule spec[where x=slot], simp) + apply (drule n_cap)+ + apply clarsimp + apply (fastforce split: if_split_asm) + done + +lemma distinct_zombies_m: "distinct_zombies m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma distinct_zombies_n[simp]: + "distinct_zombies n" + using distinct_zombies_m + apply (simp add: n_def distinct_zombies_nonCTE_modify_map) + apply (subst modify_map_apply[where p=slot]) + apply (simp add: modify_map_def slot) + apply simp + apply (rule distinct_zombies_sameMasterE) + apply (simp add: distinct_zombies_nonCTE_modify_map) + apply (simp add: modify_map_def slot) + apply simp + done + +lemma irq_control_n [simp]: "irq_control n" + using slot + apply (clarsimp simp: irq_control_def) + apply (frule n_revokable) + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp simp: if_split_asm) + apply (erule (1) irq_controlD, rule irq_control) + done + +lemma reply_masters_rvk_fb_m: "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n [simp]: "reply_masters_rvk_fb n" + using reply_masters_rvk_fb_m + apply (simp add: reply_masters_rvk_fb_def n_def + ball_ran_modify_map_eq + modify_map_comp[symmetric]) + apply (subst ball_ran_modify_map_eq) + apply (frule bspec, rule ranI, rule slot) + apply (simp add: nullMDBNode_def isCap_simps modify_map_def + slot) + apply (subst ball_ran_modify_map_eq) + apply (clarsimp simp add: modify_map_def) + apply fastforce + apply (simp add: ball_ran_modify_map_eq) + done + +lemma vmdb_n: "valid_mdb_ctes n" + by (simp add: valid_mdb_ctes_def valid_dlist_n + no_0_n mdb_chain_0_n valid_badges_n + caps_contained_n mdb_chunked_n + untyped_mdb_n untyped_inc_n + nullcaps_n ut_rev_n class_links_n) + +end + +context begin interpretation Arch . +crunches postCapDeletion, clearUntypedFreeIndex + for ctes_of[wp]: "\s. P (ctes_of s)" + +lemma emptySlot_mdb [wp]: + "\valid_mdb'\ + emptySlot sl opt + \\_. valid_mdb'\" + unfolding emptySlot_def + apply (simp only: case_Null_If valid_mdb'_def) + apply (wp updateCap_ctes_of_wp getCTE_wp' + opt_return_pres_lift | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp) + apply (case_tac cte) + apply (rename_tac cap node) + apply (simp) + apply (subgoal_tac "mdb_empty (ctes_of s) sl cap node") + prefer 2 + apply (rule mdb_empty.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro) + apply (simp add: valid_mdb_ctes_def) + apply (rule mdb_ptr_axioms.intro) + apply (simp add: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp simp: valid_mdb_ctes_def) + apply (erule mdb_empty.vmdb_n[unfolded const_def]) + done +end + +lemma if_live_then_nonz_cap'_def2: + "if_live_then_nonz_cap' = + (\s. \ptr. ko_wp_at' live' ptr s \ + (\p zr. (option_map zobj_refs' o cteCaps_of s) p = Some zr \ ptr \ zr))" + by (fastforce simp: if_live_then_nonz_cap'_def ex_nonz_cap_to'_def cte_wp_at_ctes_of + cteCaps_of_def) + +lemma updateMDB_ko_wp_at_live[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + updateMDB p m + \\rv s. P (ko_wp_at' live' p' s)\" + unfolding updateMDB_def Let_def + apply (rule hoare_pre, wp) + apply simp + done + +lemma updateCap_ko_wp_at_live[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + updateCap p cap + \\rv s. P (ko_wp_at' live' p' s)\" + unfolding updateCap_def + by wp + +primrec + threadCapRefs :: "capability \ machine_word set" +where + "threadCapRefs (ThreadCap r) = {r}" +| "threadCapRefs (ReplyCap t m x) = {}" +| "threadCapRefs NullCap = {}" +| "threadCapRefs (UntypedCap d r n i) = {}" +| "threadCapRefs (EndpointCap r badge x y z t) = {}" +| "threadCapRefs (NotificationCap r badge x y) = {}" +| "threadCapRefs (CNodeCap r b g gsz) = {}" +| "threadCapRefs (Zombie r b n) = {}" +| "threadCapRefs (ArchObjectCap ac) = {}" +| "threadCapRefs (IRQHandlerCap irq) = {}" +| "threadCapRefs (IRQControlCap) = {}" +| "threadCapRefs (DomainCap) = {}" + +definition + "isFinal cap p m \ + \isUntypedCap cap \ + (\p' c. m p' = Some c \ + p \ p' \ \isUntypedCap c \ + \ sameObjectAs cap c)" + +lemma not_FinalE: + "\ \ isFinal cap sl cps; isUntypedCap cap \ P; + \p c. \ cps p = Some c; p \ sl; \ isUntypedCap c; sameObjectAs cap c \ \ P + \ \ P" + by (fastforce simp: isFinal_def) + +definition + "removeable' sl \ \s cap. + (\p. p \ sl \ cte_wp_at' (\cte. capMasterCap (cteCap cte) = capMasterCap cap) p s) + \ ((\p \ cte_refs' cap (irq_node' s). p \ sl \ cte_wp_at' (\cte. cteCap cte = NullCap) p s) + \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s))" + +lemma not_Final_removeable: + "\ isFinal cap sl (cteCaps_of s) + \ removeable' sl s cap" + apply (erule not_FinalE) + apply (clarsimp simp: removeable'_def isCap_simps) + apply (clarsimp simp: cteCaps_of_def sameObjectAs_def2 removeable'_def + cte_wp_at_ctes_of) + apply fastforce + done + +context begin interpretation Arch . +crunch ko_wp_at'[wp]: postCapDeletion "\s. P (ko_wp_at' P' p s)" +crunch cteCaps_of[wp]: postCapDeletion "\s. P (cteCaps_of s)" + (simp: cteCaps_of_def o_def) +end + +crunch ko_at_live[wp]: clearUntypedFreeIndex "\s. P (ko_wp_at' live' ptr s)" + +lemma clearUntypedFreeIndex_cteCaps_of[wp]: + "\\s. P (cteCaps_of s)\ + clearUntypedFreeIndex sl \\y s. P (cteCaps_of s)\" + by (simp add: cteCaps_of_def, wp) + +lemma emptySlot_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s\ + emptySlot sl opt + \\rv. if_live_then_nonz_cap'\" + apply (simp add: emptySlot_def case_Null_If if_live_then_nonz_cap'_def2 + del: comp_apply) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + getCTE_wp opt_return_pres_lift + clearUntypedFreeIndex_ctes_of + clearUntypedFreeIndex_cteCaps_of + hoare_vcg_ex_lift + | wp (once) hoare_vcg_imp_lift + | simp add: cte_wp_at_ctes_of del: comp_apply)+ + apply (clarsimp simp: modify_map_same imp_conjR[symmetric]) + apply (drule spec, drule(1) mp) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def split: if_split_asm) + apply (case_tac "p \ sl") + apply blast + apply (simp add: removeable'_def cteCaps_of_def) + apply (erule disjE) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def + dest!: capMaster_same_refs) + apply fastforce + apply clarsimp + apply (drule(1) bspec) + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma setIRQState_irq_node'[wp]: + "\\s. P (irq_node' s)\ setIRQState state irq \\_ s. P (irq_node' s)\" + apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) + apply wp + apply simp + done + +context begin interpretation Arch . +crunch irq_node'[wp]: emptySlot "\s. P (irq_node' s)" +end + +lemma emptySlot_ifunsafe'[wp]: + "\\s. if_unsafe_then_cap' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s\ + emptySlot sl opt + \\rv. if_unsafe_then_cap'\" + apply (simp add: ifunsafe'_def3) + apply (rule hoare_pre, rule hoare_use_eq_irq_node'[OF emptySlot_irq_node']) + apply (simp add: emptySlot_def case_Null_If) + apply (wp opt_return_pres_lift | simp add: o_def)+ + apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of)+ + apply (clarsimp simp: tree_cte_cteCap_eq[unfolded o_def] + modify_map_same + modify_map_comp[symmetric] + split: option.split_asm if_split_asm + dest!: modify_map_K_D) + apply (clarsimp simp: modify_map_def) + apply (drule_tac x=cref in spec, clarsimp) + apply (case_tac "cref' \ sl") + apply (rule_tac x=cref' in exI) + apply (clarsimp simp: modify_map_def) + apply (simp add: removeable'_def) + apply (erule disjE) + apply (clarsimp simp: modify_map_def) + apply (subst(asm) tree_cte_cteCap_eq[unfolded o_def]) + apply (clarsimp split: option.split_asm dest!: capMaster_same_refs) + apply fastforce + apply clarsimp + apply (drule(1) bspec) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemmas ctes_of_valid'[elim] = ctes_of_valid_cap''[where r=cte for cte] + +crunch valid_idle'[wp]: setInterruptState "valid_idle'" + (simp: valid_idle'_def) + +context begin interpretation Arch . +crunch valid_idle'[wp]: emptySlot "valid_idle'" +crunches deletedIRQHandler, getSlotCap, clearUntypedFreeIndex, updateMDB, getCTE, updateCap + for ksArch[wp]: "\s. P (ksArchState s)" +crunch ksIdle[wp]: emptySlot "\s. P (ksIdleThread s)" +crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" +end + +lemma emptySlot_cteCaps_of: + "\\s. P ((cteCaps_of s)(p \ NullCap))\ + emptySlot p opt + \\rv s. P (cteCaps_of s)\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp opt_return_pres_lift getCTE_cteCap_wp + clearUntypedFreeIndex_cteCaps_of) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + apply (auto elim!: rsubst[where P=P] + simp: modify_map_def fun_upd_def[symmetric] o_def + fun_upd_idem cteCaps_of_def + split: option.splits) + done + +context begin interpretation Arch . + +crunch cteCaps_of[wp]: deletedIRQHandler "\s. P (cteCaps_of s)" + +lemma deletedIRQHandler_valid_global_refs[wp]: + "\valid_global_refs'\ deletedIRQHandler irq \\rv. valid_global_refs'\" + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF deletedIRQHandler_irq_node']) + apply (rule hoare_use_eq [where f=ksIdleThread, OF deletedIRQHandler_ksIdle]) + apply (rule hoare_use_eq [where f=ksArchState, OF deletedIRQHandler_ksArch]) + apply (rule hoare_use_eq[where f="gsMaxObjectSize"], wp) + apply (simp add: valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule deletedIRQHandler_cteCaps_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: valid_refs'_cteCaps valid_cap_sizes_cteCaps ball_ran_eq) + done + +lemma clearUntypedFreeIndex_valid_global_refs[wp]: + "\valid_global_refs'\ clearUntypedFreeIndex irq \\rv. valid_global_refs'\" + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF clearUntypedFreeIndex_irq_node']) + apply (rule hoare_use_eq [where f=ksIdleThread, OF clearUntypedFreeIndex_ksIdle]) + apply (rule hoare_use_eq [where f=ksArchState, OF clearUntypedFreeIndex_ksArch]) + apply (rule hoare_use_eq[where f="gsMaxObjectSize"], wp) + apply (simp add: valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule clearUntypedFreeIndex_cteCaps_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: valid_refs'_cteCaps valid_cap_sizes_cteCaps ball_ran_eq) + done + +crunch valid_global_refs[wp]: global.postCapDeletion "valid_global_refs'" + +lemma emptySlot_valid_global_refs[wp]: + "\valid_global_refs' and cte_at' sl\ emptySlot sl opt \\rv. valid_global_refs'\" + apply (clarsimp simp: emptySlot_def) + apply (wpsimp wp: getCTE_wp hoare_drop_imps hoare_vcg_ex_lift simp: cte_wp_at_ctes_of) + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (frule(1) cte_at_valid_cap_sizes_0) + apply (clarsimp simp: valid_refs'_cteCaps valid_cap_sizes_cteCaps ball_ran_eq) + done +end + +lemmas doMachineOp_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF doMachineOp_ctes doMachineOp_ksInterruptState] + +lemma deletedIRQHandler_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s \ (IRQHandlerCap irq \ ran (cteCaps_of s))\ + deletedIRQHandler irq + \\rv. valid_irq_handlers'\" + apply (simp add: deletedIRQHandler_def setIRQState_def setInterruptState_def getInterruptState_def) + apply wp + apply (clarsimp simp: valid_irq_handlers'_def irq_issued'_def ran_def cteCaps_of_def) + done + +context begin interpretation Arch . + +lemma postCapDeletion_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s \ (cap \ NullCap \ cap \ ran (cteCaps_of s))\ + postCapDeletion cap + \\rv. valid_irq_handlers'\" + by (wpsimp simp: Retype_H.postCapDeletion_def AARCH64_H.postCapDeletion_def) + +definition + "post_cap_delete_pre' cap sl cs \ case cap of + IRQHandlerCap irq \ irq \ maxIRQ \ (\sl'. sl \ sl' \ cs sl' \ Some cap) + | _ \ False" + +end + +crunch ksInterruptState[wp]: clearUntypedFreeIndex "\s. P (ksInterruptState s)" + +lemma emptySlot_valid_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s + \ (\sl'. info \ NullCap \ sl' \ sl \ cteCaps_of s sl' \ Some info)\ + emptySlot sl info + \\rv. valid_irq_handlers'\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp | wpc)+ + apply (unfold valid_irq_handlers'_def irq_issued'_def) + apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of + | wps clearUntypedFreeIndex_ksInterruptState)+ + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of ran_def modify_map_def + split: option.split) + apply auto + done + +declare setIRQState_irq_states' [wp] + +context begin interpretation Arch . +crunch irq_states' [wp]: emptySlot valid_irq_states' + +crunch no_0_obj' [wp]: emptySlot no_0_obj' + (wp: crunch_wps) + +end + +lemma deletedIRQHandler_irqs_masked'[wp]: + "\irqs_masked'\ deletedIRQHandler irq \\_. irqs_masked'\" + apply (simp add: deletedIRQHandler_def setIRQState_def getInterruptState_def setInterruptState_def) + apply (wp dmo_maskInterrupt) + apply (simp add: irqs_masked'_def) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +crunch irqs_masked'[wp]: emptySlot "irqs_masked'" + +lemma setIRQState_umm: + "\\s. P (underlying_memory (ksMachineState s))\ + setIRQState irqState irq + \\_ s. P (underlying_memory (ksMachineState s))\" + by (simp add: setIRQState_def maskInterrupt_def + setInterruptState_def getInterruptState_def + | wp dmo_lift')+ + +crunch umm[wp]: emptySlot "\s. P (underlying_memory (ksMachineState s))" + (wp: setIRQState_umm) + +lemma emptySlot_vms'[wp]: + "\valid_machine_state'\ emptySlot slot irq \\_. valid_machine_state'\" + by (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + +crunch pspace_domain_valid[wp]: emptySlot "pspace_domain_valid" + +crunch ksDomSchedule[wp]: emptySlot "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: emptySlot "\s. P (ksDomScheduleIdx s)" + +lemma deletedIRQHandler_ct_not_inQ[wp]: + "\ct_not_inQ\ deletedIRQHandler irq \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF deletedIRQHandler_nosch]) + apply (rule hoare_weaken_pre) + apply (wps deletedIRQHandler_ct) + apply (simp add: deletedIRQHandler_def setIRQState_def) + apply (wp) + apply (simp add: comp_def) + done + +crunch ct_not_inQ[wp]: emptySlot "ct_not_inQ" + +crunch tcbDomain[wp]: emptySlot "obj_at' (\tcb. P (tcbDomain tcb)) t" + +lemma emptySlot_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ emptySlot sl opt \\_. ct_idle_or_in_cur_domain'\" + by (wp ct_idle_or_in_cur_domain'_lift2 tcb_in_cur_domain'_lift | simp)+ + +crunch gsUntypedZeroRanges[wp]: postCapDeletion "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps simp: crunch_simps) + +lemma untypedZeroRange_modify_map_isUntypedCap: + "m sl = Some v \ \ isUntypedCap v \ \ isUntypedCap (f v) + \ (untypedZeroRange \\<^sub>m modify_map m sl f) = (untypedZeroRange \\<^sub>m m)" + by (simp add: modify_map_def map_comp_def fun_eq_iff untypedZeroRange_def) + +lemma emptySlot_untyped_ranges[wp]: + "\untyped_ranges_zero' and valid_objs' and valid_mdb'\ + emptySlot sl opt \\rv. untyped_ranges_zero'\" + apply (simp add: emptySlot_def case_Null_If) + apply (rule hoare_pre) + apply (rule bind_wp) + apply (rule untyped_ranges_zero_lift) + apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of + | wpc | simp add: clearUntypedFreeIndex_def updateTrackedFreeIndex_def + getSlotCap_def + split: option.split)+ + apply (clarsimp simp: modify_map_comp[symmetric] modify_map_same) + apply (case_tac "\ isUntypedCap (the (cteCaps_of s sl))") + apply (case_tac "the (cteCaps_of s sl)", + simp_all add: untyped_ranges_zero_inv_def + untypedZeroRange_modify_map_isUntypedCap isCap_simps)[1] + apply (clarsimp simp: isCap_simps untypedZeroRange_def modify_map_def) + apply (strengthen untyped_ranges_zero_fun_upd[mk_strg I E]) + apply simp + apply (simp add: untypedZeroRange_def isCap_simps) + done + +crunch valid_arch'[wp]: emptySlot valid_arch_state' + (wp: crunch_wps) + +crunches deletedIRQHandler, updateMDB, updateCap, clearUntypedFreeIndex + for valid_arch'[wp]: valid_arch_state' + (wp: valid_arch_state_lift' crunch_wps) + +crunches global.postCapDeletion + for valid_arch'[wp]: valid_arch_state' + +crunches emptySlot + for valid_bitmaps[wp]: valid_bitmaps + and tcbQueued_opt_pred[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and sched_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + (wp: valid_bitmaps_lift) + +lemma emptySlot_invs'[wp]: + "\\s. invs' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s + \ (info \ NullCap \ post_cap_delete_pre' info sl (cteCaps_of s) )\ + emptySlot sl info + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift cur_tcb_lift) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: post_cap_delete_pre'_def cteCaps_of_def + split: capability.split_asm arch_capability.split_asm) + by auto + +lemma deletedIRQHandler_corres: + "corres dc \ \ + (deleted_irq_handler irq) + (deletedIRQHandler irq)" + apply (simp add: deleted_irq_handler_def deletedIRQHandler_def) + apply (rule setIRQState_corres) + apply (simp add: irq_state_relation_def) + done + +lemma arch_postCapDeletion_corres: + "acap_relation cap cap' \ corres dc \ \ (arch_post_cap_deletion cap) (AARCH64_H.postCapDeletion cap')" + by (clarsimp simp: arch_post_cap_deletion_def AARCH64_H.postCapDeletion_def) + +lemma postCapDeletion_corres: + "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" + apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) + +lemma set_cap_trans_state: + "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" + apply (cases p) + apply (clarsimp simp add: set_cap_def in_monad set_object_def get_object_def) + apply (case_tac y) + apply (auto simp add: in_monad set_object_def well_formed_cnode_n_def split: if_split_asm) + done + +lemma clearUntypedFreeIndex_noop_corres: + "corres dc \ (cte_at' (cte_map slot)) + (return ()) (clearUntypedFreeIndex (cte_map slot))" + apply (simp add: clearUntypedFreeIndex_def) + apply (rule corres_guard_imp) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_r_conj[where P'="cte_at' (cte_map slot)"]) + apply (rule corres_trivial, simp) + apply (wp getCTE_wp' | wpc + | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ + apply (clarsimp simp: state_relation_def) + apply (rule no_fail_pre) + apply (wp no_fail_getSlotCap getCTE_wp' + | wpc | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ + done + +lemma clearUntypedFreeIndex_valid_pspace'[wp]: + "\valid_pspace'\ clearUntypedFreeIndex slot \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (rule hoare_pre) + apply (wp | simp add: valid_mdb'_def)+ + done + +lemma emptySlot_corres: + "cap_relation info info' \ corres dc (einvs and cte_at slot) (invs' and cte_at' (cte_map slot)) + (empty_slot slot info) (emptySlot (cte_map slot) info')" + unfolding emptySlot_def empty_slot_def + apply (simp add: case_Null_If) + apply (rule corres_guard_imp) + apply (rule corres_split_noop_rhs[OF clearUntypedFreeIndex_noop_corres]) + apply (rule_tac R="\cap. einvs and cte_wp_at ((=) cap) slot" and + R'="\cte. valid_pspace' and cte_wp_at' ((=) cte) (cte_map slot)" in + corres_split[OF get_cap_corres]) + defer + apply (wp get_cap_wp getCTE_wp')+ + apply (simp add: cte_wp_at_ctes_of) + apply (wp hoare_vcg_imp_lift' clearUntypedFreeIndex_valid_pspace') + apply fastforce + apply (fastforce simp: cte_wp_at_ctes_of) + apply simp + apply (rule conjI, clarsimp) + defer + apply clarsimp + apply (rule conjI, clarsimp) + apply clarsimp + apply (simp only: bind_assoc[symmetric]) + apply (rule corres_underlying_split[where r'=dc, OF _ postCapDeletion_corres]) + defer + apply wpsimp+ + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (rule conjI, clarsimp) + apply (erule (2) valid_dlistEp) + apply simp + apply clarsimp + apply (erule (2) valid_dlistEn) + apply simp + apply (clarsimp simp: in_monad bind_assoc exec_gets) + apply (subgoal_tac "mdb_empty_abs a") + prefer 2 + apply (rule mdb_empty_abs.intro) + apply (rule vmdb_abs.intro) + apply fastforce + apply (frule mdb_empty_abs'.intro) + apply (simp add: mdb_empty_abs'.empty_slot_ext_det_def2 update_cdt_list_def set_cdt_list_def exec_gets set_cdt_def bind_assoc exec_get exec_put set_original_def modify_def del: fun_upd_apply | subst bind_def, simp, simp add: mdb_empty_abs'.empty_slot_ext_det_def2)+ + apply (simp add: put_def) + apply (simp add: exec_gets exec_get exec_put del: fun_upd_apply | subst bind_def)+ + apply (clarsimp simp: state_relation_def) + apply (drule updateMDB_the_lot, fastforce simp: pspace_relations_def, fastforce, fastforce) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def) + apply (elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply clarsimp + apply (drule_tac s'=s''a and c=cap.NullCap in set_cap_not_quite_corres) + subgoal by simp + subgoal by simp + subgoal by simp + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + apply fastforce + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption + subgoal by simp + subgoal by simp + subgoal by simp + subgoal by simp + apply (rule refl) + apply clarsimp + apply (drule updateCap_stuff, elim conjE, erule (1) impE) + apply clarsimp + apply (drule updateMDB_the_lot, force simp: pspace_relations_def, assumption+, simp) + apply (rule bexI) + prefer 2 + apply (simp only: trans_state_update[symmetric]) + apply (rule set_cap_trans_state) + apply (rule set_cap_revokable_update) + apply (erule set_cap_cdt_update) + apply clarsimp + apply (thin_tac "ctes_of t = s" for t s)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac rv') + apply (rename_tac s_cap s_node) + apply (subgoal_tac "cte_at slot a") + prefer 2 + apply (fastforce elim: cte_wp_at_weakenE) + apply (subgoal_tac "mdb_empty (ctes_of b) (cte_map slot) s_cap s_node") + prefer 2 + apply (rule mdb_empty.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro) + subgoal by (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def) + apply (rule mdb_ptr_axioms.intro) + subgoal by simp + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv) + apply (simp add: pspace_relations_def) + apply (rule conjI) + apply (clarsimp simp: data_at_def ghost_relation_typ_at set_cap_a_type_inv) + apply (rule conjI) + prefer 2 + apply (rule conjI) + apply (clarsimp simp: cdt_list_relation_def) + apply(frule invs_valid_pspace, frule invs_mdb) + apply(subgoal_tac "no_mloop (cdt a) \ finite_depth (cdt a)") + prefer 2 + subgoal by(simp add: finite_depth valid_mdb_def) + apply(subgoal_tac "valid_mdb_ctes (ctes_of b)") + prefer 2 + subgoal by(simp add: mdb_empty_def mdb_ptr_def vmdb_def) + apply(clarsimp simp: valid_pspace_def) + + apply(case_tac "cdt a slot") + apply(simp add: next_slot_eq[OF mdb_empty_abs'.next_slot_no_parent]) + apply(case_tac "next_slot (aa, bb) (cdt_list a) (cdt a)") + subgoal by (simp) + apply(clarsimp) + apply(frule(1) mdb_empty.n_next) + apply(clarsimp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq) + apply(drule cte_at_next_slot) + apply(assumption)+ + apply(simp) + apply(subgoal_tac "(ab, bc) = slot") + prefer 2 + apply(drule_tac cte="CTE s_cap s_node" in valid_mdbD2') + subgoal by (clarsimp simp: valid_mdb_ctes_def no_0_def) + subgoal by (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply(clarsimp) + apply(rule cte_map_inj_eq) + apply(assumption) + apply(drule(3) cte_at_next_slot', assumption) + apply(assumption)+ + apply(simp) + apply(drule_tac p="(aa, bb)" in no_parent_not_next_slot) + apply(assumption)+ + apply(clarsimp) + + apply(simp add: next_slot_eq[OF mdb_empty_abs'.next_slot] split del: if_split) + apply(case_tac "next_slot (aa, bb) (cdt_list a) (cdt a)") + subgoal by (simp) + apply(case_tac "(aa, bb) = slot", simp) + apply(case_tac "next_slot (aa, bb) (cdt_list a) (cdt a) = Some slot") + apply(simp) + apply(case_tac "next_slot ac (cdt_list a) (cdt a)", simp) + apply(simp) + apply(frule(1) mdb_empty.n_next) + apply(clarsimp) + apply(erule_tac x=aa in allE', erule_tac x=bb in allE) + apply(erule_tac x=ac in allE, erule_tac x=bd in allE) + apply(clarsimp split: if_split_asm) + apply(drule(1) no_self_loop_next) + apply(simp) + apply(drule_tac cte="CTE cap' node'" in valid_mdbD1') + apply(fastforce simp: valid_mdb_ctes_def no_0_def) + subgoal by (simp add: valid_mdb'_def) + apply(clarsimp) + apply(simp) + apply(frule(1) mdb_empty.n_next) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(clarsimp split: if_split_asm) + apply(drule(1) no_self_loop_prev) + apply(clarsimp) + apply(drule_tac cte="CTE s_cap s_node" in valid_mdbD2') + apply(clarsimp simp: valid_mdb_ctes_def no_0_def) + apply clarify + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(drule(3) cte_at_next_slot') + apply(assumption)+ + apply(simp) + apply(erule disjE) + apply(drule cte_map_inj_eq) + apply(drule(3) cte_at_next_slot) + apply(assumption)+ + apply(simp) + subgoal by (simp) + apply (simp add: revokable_relation_def) + apply (clarsimp simp: in_set_cap_cte_at) + apply (rule conjI) + apply clarsimp + apply (drule(1) mdb_empty.n_revokable) + subgoal by clarsimp + apply clarsimp + apply (drule (1) mdb_empty.n_revokable) + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + apply (drule set_cap_caps_of_state_monad) + subgoal by (force simp: null_filter_def) + apply clarsimp + apply (subgoal_tac "cte_at (aa, bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD, erule cte_wp_cte_at) + apply (drule (2) cte_map_inj_ps, fastforce) + subgoal by simp + apply (clarsimp simp add: cdt_relation_def) + apply (subst mdb_empty_abs.descendants, assumption) + apply (subst mdb_empty.descendants, assumption) + apply clarsimp + apply (frule_tac p="(aa, bb)" in in_set_cap_cte_at) + apply clarsimp + apply (frule (2) cte_map_inj_ps, fastforce) + apply simp + apply (case_tac "slot \ descendants_of (aa,bb) (cdt a)") + apply (subst inj_on_image_set_diff) + apply (rule inj_on_descendants_cte_map) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply fastforce + subgoal by simp + subgoal by simp + apply simp + apply (subgoal_tac "cte_map slot \ descendants_of' (cte_map (aa,bb)) (ctes_of b)") + subgoal by simp + apply (erule_tac x=aa in allE, erule allE, erule (1) impE) + apply (drule_tac s="cte_map ` u" for u in sym) + apply clarsimp + apply (drule cte_map_inj_eq, assumption) + apply (erule descendants_of_cte_at, fastforce) + apply fastforce + apply fastforce + apply fastforce + apply simp + done + + + +text \Some facts about is_final_cap/isFinalCapability\ + +lemma isFinalCapability_inv: + "\P\ isFinalCapability cap \\_. P\" + apply (simp add: isFinalCapability_def Let_def + split del: if_split cong: if_cong) + apply (rule hoare_pre, wp) + apply (rule hoare_post_imp [where Q="\s. P"], simp) + apply wp + apply simp + done + +definition + final_matters' :: "capability \ bool" +where + "final_matters' cap \ case cap of + EndpointCap ref bdg s r g gr \ True + | NotificationCap ref bdg s r \ True + | ThreadCap ref \ True + | CNodeCap ref bits gd gs \ True + | Zombie ptr zb n \ True + | IRQHandlerCap irq \ True + | ArchObjectCap acap \ (case acap of + FrameCap ref rghts sz d mapdata \ False + | ASIDControlCap \ False + | _ \ True) + | _ \ False" + +lemma final_matters_Master: + "final_matters' (capMasterCap cap) = final_matters' cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split, + simp add: final_matters'_def) + +lemma final_matters_sameRegion_sameObject: + "final_matters' cap \ sameRegionAs cap cap' = sameObjectAs cap cap'" + apply (rule iffI) + apply (erule sameRegionAsE) + apply (simp add: sameObjectAs_def3) + apply (clarsimp simp: isCap_simps sameObjectAs_sameRegionAs final_matters'_def + split:capability.splits arch_capability.splits)+ + done + +lemma final_matters_sameRegion_sameObject2: + "\ final_matters' cap'; \ isUntypedCap cap; \ isIRQHandlerCap cap'; \ isArchIOPortCap cap' \ + \ sameRegionAs cap cap' = sameObjectAs cap cap'" + apply (rule iffI) + apply (erule sameRegionAsE) + apply (simp add: sameObjectAs_def3) + apply (fastforce simp: isCap_simps final_matters'_def) + apply simp + apply (clarsimp simp: final_matters'_def isCap_simps) + apply (clarsimp simp: final_matters'_def isCap_simps) + apply (clarsimp simp: final_matters'_def isCap_simps) + apply (erule sameObjectAs_sameRegionAs) + done + +lemma notFinal_prev_or_next: + "\ \ isFinal cap x (cteCaps_of s); mdb_chunked (ctes_of s); + valid_dlist (ctes_of s); no_0 (ctes_of s); + ctes_of s x = Some (CTE cap node); final_matters' cap \ + \ (\cap' node'. ctes_of s (mdbPrev node) = Some (CTE cap' node') + \ sameObjectAs cap cap') + \ (\cap' node'. ctes_of s (mdbNext node) = Some (CTE cap' node') + \ sameObjectAs cap cap')" + apply (erule not_FinalE) + apply (clarsimp simp: isCap_simps final_matters'_def) + apply (clarsimp simp: mdb_chunked_def cte_wp_at_ctes_of cteCaps_of_def + del: disjCI) + apply (erule_tac x=x in allE, erule_tac x=p in allE) + apply simp + apply (case_tac z, simp add: sameObjectAs_sameRegionAs) + apply (elim conjE disjE, simp_all add: is_chunk_def) + apply (rule disjI2) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + apply (drule spec[where x="mdbNext node"]) + apply simp + apply (drule mp[where P="ctes_of s \ x \\<^sup>+ mdbNext node"]) + apply (rule trancl.intros(1), simp add: mdb_next_unfold) + apply clarsimp + apply (drule rtranclD) + apply (erule disjE, clarsimp+) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold final_matters_sameRegion_sameObject) + apply (rule disjI1) + apply clarsimp + apply (drule tranclD2) + apply clarsimp + apply (frule vdlist_nextD0) + apply clarsimp + apply assumption + apply (clarsimp simp: mdb_prev_def) + apply (drule rtranclD) + apply (erule disjE, clarsimp+) + apply (drule spec, drule(1) mp) + apply (drule mp, rule trancl_into_rtrancl, erule trancl.intros(1)) + apply clarsimp + apply (drule iffD1 [OF final_matters_sameRegion_sameObject, rotated]) + apply (subst final_matters_Master[symmetric]) + apply (subst(asm) final_matters_Master[symmetric]) + apply (clarsimp simp: sameObjectAs_def3) + apply (clarsimp simp: sameObjectAs_def3) + done + +lemma isFinal: + "\\s. valid_mdb' s \ cte_wp_at' ((=) cte) x s + \ final_matters' (cteCap cte) + \ Q (isFinal (cteCap cte) x (cteCaps_of s)) s\ + isFinalCapability cte + \Q\" + unfolding isFinalCapability_def + apply (cases cte) + apply (rename_tac cap node) + apply (unfold Let_def) + apply (simp only: if_False) + apply (wp getCTE_wp') + apply (cases "mdbPrev (cteMDBNode cte) = nullPointer") + apply simp + apply (clarsimp simp: valid_mdb_ctes_def valid_mdb'_def + cte_wp_at_ctes_of) + apply (rule conjI, clarsimp simp: nullPointer_def) + apply (erule rsubst[where P="\x. Q x s" for s], simp) + apply (rule classical) + apply (drule(5) notFinal_prev_or_next) + apply clarsimp + apply (clarsimp simp: nullPointer_def) + apply (erule rsubst[where P="\x. Q x s" for s]) + apply (rule sym, rule iffI) + apply (rule classical) + apply (drule(5) notFinal_prev_or_next) + apply clarsimp + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (case_tac cte) + apply clarsimp + apply (clarsimp simp add: isFinal_def) + apply (erule_tac x="mdbNext node" in allE) + apply simp + apply (erule impE) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (drule (1) mdb_chain_0_no_loops) + apply simp + apply (clarsimp simp: sameObjectAs_def3 isCap_simps) + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of + valid_mdb_ctes_def valid_mdb'_def) + apply (case_tac cte) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule rsubst[where P="\x. Q x s" for s]) + apply clarsimp + apply (clarsimp simp: isFinal_def cteCaps_of_def) + apply (erule_tac x="mdbPrev node" in allE) + apply simp + apply (erule impE) + apply clarsimp + apply (drule (1) mdb_chain_0_no_loops) + apply (subgoal_tac "ctes_of s (mdbNext node) = Some (CTE cap node)") + apply clarsimp + apply (erule (1) valid_dlistEp) + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply (clarsimp simp add: sameObjectAs_def3 isCap_simps) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule rsubst[where P="\x. Q x s" for s], simp) + apply (rule classical, drule(5) notFinal_prev_or_next) + apply (clarsimp simp: sameObjectAs_sym nullPointer_def) + apply (clarsimp simp: nullPointer_def) + apply (erule rsubst[where P="\x. Q x s" for s]) + apply (rule sym, rule iffI) + apply (rule classical, drule(5) notFinal_prev_or_next) + apply (clarsimp simp: sameObjectAs_sym) + apply auto[1] + apply (clarsimp simp: isFinal_def cteCaps_of_def) + apply (case_tac cte) + apply (erule_tac x="mdbNext node" in allE) + apply simp + apply (erule impE) + apply clarsimp + apply (drule (1) mdb_chain_0_no_loops) + apply simp + apply clarsimp + apply (clarsimp simp: isCap_simps sameObjectAs_def3) + done +end + +lemma (in vmdb) isFinal_no_subtree: + "\ m \ sl \ p; isFinal cap sl (option_map cteCap o m); + m sl = Some (CTE cap n); final_matters' cap \ \ False" + apply (erule subtree.induct) + apply (case_tac "c'=sl", simp) + apply (clarsimp simp: isFinal_def parentOf_def mdb_next_unfold cteCaps_of_def) + apply (erule_tac x="mdbNext n" in allE) + apply simp + apply (clarsimp simp: isMDBParentOf_CTE final_matters_sameRegion_sameObject) + apply (clarsimp simp: isCap_simps sameObjectAs_def3) + apply clarsimp + done + +lemma isFinal_no_descendants: + "\ isFinal cap sl (cteCaps_of s); ctes_of s sl = Some (CTE cap n); + valid_mdb' s; final_matters' cap \ + \ descendants_of' sl (ctes_of s) = {}" + apply (clarsimp simp add: descendants_of'_def cteCaps_of_def) + apply (erule(3) vmdb.isFinal_no_subtree[rotated]) + apply unfold_locales[1] + apply (simp add: valid_mdb'_def) + done + +lemma (in vmdb) isFinal_untypedParent: + assumes x: "m slot = Some cte" "isFinal (cteCap cte) slot (option_map cteCap o m)" + "final_matters' (cteCap cte) \ \ isIRQHandlerCap (cteCap cte)" + shows + "m \ x \ slot \ + (\cte'. m x = Some cte' \ isUntypedCap (cteCap cte') \ RetypeDecls_H.sameRegionAs (cteCap cte') (cteCap cte))" + apply (cases "x=slot", simp) + apply (insert x) + apply (frule subtree_mdb_next) + apply (drule subtree_parent) + apply (drule tranclD) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold parentOf_def isFinal_def) + apply (case_tac cte') + apply (rename_tac c' n') + apply (cases cte) + apply (rename_tac c n) + apply simp + apply (erule_tac x=x in allE) + apply clarsimp + apply (drule isMDBParent_sameRegion) + apply simp + apply (rule classical, simp) + apply (simp add: final_matters_sameRegion_sameObject2 + sameObjectAs_sym) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma no_fail_isFinalCapability [wp]: + "no_fail (valid_mdb' and cte_wp_at' ((=) cte) p) (isFinalCapability cte)" + apply (simp add: isFinalCapability_def) + apply (clarsimp simp: Let_def split del: if_split) + apply (rule no_fail_pre, wp getCTE_wp') + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def cte_wp_at_ctes_of nullPointer_def) + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEp) + apply simp + apply clarsimp + apply (rule conjI) + apply (erule (2) valid_dlistEn) + apply simp + apply clarsimp + apply (rule valid_dlistEn, assumption+) + apply (erule (2) valid_dlistEp) + apply simp + done + +lemma corres_gets_lift: + assumes inv: "\P. \P\ g \\_. P\" + assumes res: "\Q'\ g \\r s. r = g' s\" + assumes Q: "\s. Q s \ Q' s" + assumes nf: "no_fail Q g" + shows "corres r P Q f (gets g') \ corres r P Q f g" + apply (clarsimp simp add: corres_underlying_def simpler_gets_def) + apply (drule (1) bspec) + apply (rule conjI) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply simp + apply (frule in_inv_by_hoareD [OF inv]) + apply simp + apply (drule use_valid, rule res) + apply (erule Q) + apply simp + apply (insert nf) + apply (clarsimp simp: no_fail_def) + done + +lemma obj_refs_Master: + "\ cap_relation cap cap'; P cap \ + \ obj_refs cap = + (if capClass (capMasterCap cap') = PhysicalClass + \ \ isUntypedCap (capMasterCap cap') + then {capUntypedPtr (capMasterCap cap')} else {})" + by (clarsimp simp: isCap_simps + split: cap_relation_split_asm arch_cap.split_asm) + +lemma isFinalCapability_corres': + "final_matters' (cteCap cte) \ + corres (=) (invs and cte_wp_at ((=) cap) ptr) + (invs' and cte_wp_at' ((=) cte) (cte_map ptr)) + (is_final_cap cap) (isFinalCapability cte)" + apply (rule corres_gets_lift) + apply (rule isFinalCapability_inv) + apply (rule isFinal[where x="cte_map ptr"]) + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule refl) + apply (rule no_fail_pre, wp, fastforce) + apply (simp add: is_final_cap_def) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def state_relation_def) + apply (frule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply clarsimp + apply (rule iffI) + apply (simp add: is_final_cap'_def2 isFinal_def) + apply clarsimp + apply (subgoal_tac "obj_refs cap \ {} \ cap_irqs cap \ {} \ arch_gen_refs cap \ {}") + prefer 2 + apply (erule_tac x=a in allE) + apply (erule_tac x=b in allE) + apply (clarsimp simp: cte_wp_at_def gen_obj_refs_Int) + apply (subgoal_tac "ptr = (a,b)") + prefer 2 + apply (erule_tac x="fst ptr" in allE) + apply (erule_tac x="snd ptr" in allE) + apply (clarsimp simp: cte_wp_at_def gen_obj_refs_Int) + apply clarsimp + apply (rule context_conjI) + apply (clarsimp simp: isCap_simps) + apply (cases cap, auto)[1] + apply clarsimp + apply (drule_tac x=p' in pspace_relation_cte_wp_atI, assumption) + apply fastforce + apply clarsimp + apply (erule_tac x=aa in allE) + apply (erule_tac x=ba in allE) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (clarsimp simp: sameObjectAs_def3 obj_refs_Master cap_irqs_relation_Master + arch_gen_refs_relation_Master gen_obj_refs_Int + cong: if_cong + split: capability.split_asm) + apply (clarsimp simp: isFinal_def is_final_cap'_def3) + apply (rule_tac x="fst ptr" in exI) + apply (rule_tac x="snd ptr" in exI) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_def final_matters'_def + gen_obj_refs_Int + split: cap_relation_split_asm arch_cap.split_asm) + apply clarsimp + apply (drule_tac p="(a,b)" in cte_wp_at_norm) + apply clarsimp + apply (frule_tac slot="(a,b)" in pspace_relation_ctes_ofI, assumption) + apply fastforce + apply fastforce + apply clarsimp + apply (frule_tac p="(a,b)" in cte_wp_valid_cap, fastforce) + apply (erule_tac x="cte_map (a,b)" in allE) + apply simp + apply (erule impCE, simp, drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply fastforce + apply fastforce + apply (erule invs_distinct) + apply simp + apply (frule_tac p=ptr in cte_wp_valid_cap, fastforce) + apply (clarsimp simp: cte_wp_at_def gen_obj_refs_Int) + apply (rule conjI) + apply (rule classical) + apply (frule(1) zombies_finalD2[OF _ _ _ invs_zombies], + simp?, clarsimp, assumption+) + subgoal by (clarsimp simp: sameObjectAs_def3 isCap_simps valid_cap_def valid_arch_cap_def + valid_arch_cap_ref_def obj_at_def is_obj_defs a_type_def + final_matters'_def + split: cap.split_asm arch_cap.split_asm option.split_asm if_split_asm, + simp_all add: is_cap_defs) + apply (rule classical) + apply (clarsimp simp: cap_irqs_def cap_irq_opt_def sameObjectAs_def3 isCap_simps + acap_relation_def + split: cap.split_asm arch_cap.split_asm) + done + +lemma isFinalCapability_corres: + "corres (\rv rv'. final_matters' (cteCap cte) \ rv = rv') + (invs and cte_wp_at ((=) cap) ptr) + (invs' and cte_wp_at' ((=) cte) (cte_map ptr)) + (is_final_cap cap) (isFinalCapability cte)" + apply (cases "final_matters' (cteCap cte)") + apply simp + apply (erule isFinalCapability_corres') + apply (subst bind_return[symmetric], + rule corres_symb_exec_r) + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp: in_monad is_final_cap_def simpler_gets_def) + apply (wp isFinalCapability_inv)+ + apply fastforce + done + +text \Facts about finalise_cap/finaliseCap and + cap_delete_one/cteDelete in no particular order\ + + +definition + finaliseCapTrue_standin_simple_def: + "finaliseCapTrue_standin cap fin \ finaliseCap cap fin True" + +context +begin + +declare if_cong [cong] + +lemmas finaliseCapTrue_standin_def + = finaliseCapTrue_standin_simple_def + [unfolded finaliseCap_def, simplified] + +lemmas cteDeleteOne_def' + = eq_reflection [OF cteDeleteOne_def] +lemmas cteDeleteOne_def + = cteDeleteOne_def'[folded finaliseCapTrue_standin_simple_def] + +crunches cteDeleteOne, suspend, prepareThreadDelete + for typ_at'[wp]: "\s. P (typ_at' T p s)" + (wp: crunch_wps getObject_inv loadObject_default_inv + simp: crunch_simps unless_def o_def + ignore_del: setObject) + +end + +lemmas cancelAllIPC_typs[wp] = typ_at_lifts [OF cancelAllIPC_typ_at'] +lemmas cancelAllSignals_typs[wp] = typ_at_lifts [OF cancelAllSignals_typ_at'] +lemmas suspend_typs[wp] = typ_at_lifts [OF suspend_typ_at'] + +definition + cap_has_cleanup' :: "capability \ bool" +where + "cap_has_cleanup' cap \ case cap of + IRQHandlerCap _ \ True + | ArchObjectCap acap \ False + | _ \ False" + +lemmas cap_has_cleanup'_simps[simp] = cap_has_cleanup'_def[split_simps capability.split] + +lemma finaliseCap_cases[wp]: + "\\\ + finaliseCap cap final flag + \\rv s. fst rv = NullCap \ (snd rv \ NullCap \ final \ cap_has_cleanup' cap \ snd rv = cap) + \ + isZombie (fst rv) \ final \ \ flag \ snd rv = NullCap + \ capUntypedPtr (fst rv) = capUntypedPtr cap + \ (isThreadCap cap \ isCNodeCap cap \ isZombie cap)\" + apply (simp add: finaliseCap_def AARCH64_H.finaliseCap_def Let_def + getThreadCSpaceRoot + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply ((wp | simp add: isCap_simps split del: if_split + | wpc + | simp only: valid_NullCap fst_conv snd_conv)+)[1] + apply (simp only: simp_thms fst_conv snd_conv option.simps if_cancel + o_def) + apply (intro allI impI conjI TrueI) + apply (auto simp add: isCap_simps cap_has_cleanup'_def) + done + +crunch aligned'[wp]: finaliseCap "pspace_aligned'" + (simp: crunch_simps assertE_def unless_def o_def + wp: getObject_inv loadObject_default_inv crunch_wps) + +crunch distinct'[wp]: finaliseCap "pspace_distinct'" + (simp: crunch_simps assertE_def unless_def o_def + wp: getObject_inv loadObject_default_inv crunch_wps) + +crunch typ_at'[wp]: finaliseCap "\s. P (typ_at' T p s)" + (simp: crunch_simps assertE_def + wp: getObject_inv loadObject_default_inv crunch_wps) +lemmas finaliseCap_typ_ats[wp] = typ_at_lifts[OF finaliseCap_typ_at'] + +lemma unmapPageTable_it'[wp]: + "unmapPageTable asid vaddr pt \\s. P (ksIdleThread s)\" + unfolding unmapPageTable_def by wpsimp + +crunch it'[wp]: finaliseCap "\s. P (ksIdleThread s)" + (wp: mapM_x_wp_inv mapM_wp' hoare_drop_imps getObject_inv loadObject_default_inv + simp: crunch_simps updateObject_default_def o_def) + +lemma ntfn_q_refs_of'_mult: + "ntfn_q_refs_of' ntfn = (case ntfn of Structures_H.WaitingNtfn q \ set q | _ \ {}) \ {NTFNSignal}" + by (cases ntfn, simp_all) + +lemma tcb_st_not_Bound: + "(p, NTFNBound) \ tcb_st_refs_of' ts" + "(p, TCBBound) \ tcb_st_refs_of' ts" + by (auto simp: tcb_st_refs_of'_def split: Structures_H.thread_state.split) + +crunches setBoundNotification + for valid_bitmaps[wp]: valid_bitmaps + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: valid_bitmaps_lift) + +lemma unbindNotification_invs[wp]: + "\invs'\ unbindNotification tcb \\rv. invs'\" + apply (simp add: unbindNotification_def invs'_def valid_state'_def) + apply (rule bind_wp[OF _ gbn_sp']) + apply (case_tac ntfnPtr, clarsimp, wp, clarsimp) + apply clarsimp + apply (rule bind_wp[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ sym_heap_sched_pointers_lift + untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ + apply (rule conjI) + apply (clarsimp elim!: obj_atE' + dest!: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at' conj_comms) + apply (frule bound_tcb_ex_cap'', clarsimp+) + apply (frule(1) sym_refs_bound_tcb_atD') + apply (frule(1) sym_refs_obj_atD') + apply (clarsimp simp: refs_of_rev') + apply normalise_obj_at' + apply (subst delta_sym_refs, assumption) + apply (auto split: if_split_asm)[1] + apply (auto simp: tcb_st_not_Bound ntfn_q_refs_of'_mult split: if_split_asm)[1] + apply (frule obj_at_valid_objs', clarsimp+) + apply (simp add: valid_ntfn'_def valid_obj'_def live'_def + split: ntfn.splits) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: obj_at'_def ko_wp_at'_def live'_def) + done + +lemma ntfn_bound_tcb_at': + "\sym_refs (state_refs_of' s); valid_objs' s; ko_at' ntfn ntfnptr s; + ntfnBoundTCB ntfn = Some tcbptr; P (Some ntfnptr)\ + \ bound_tcb_at' P tcbptr s" + apply (drule_tac x=ntfnptr in sym_refsD[rotated]) + apply (clarsimp simp: obj_at'_def) + apply (fastforce simp: state_refs_of'_def) + apply (auto simp: pred_tcb_at'_def obj_at'_def valid_obj'_def valid_ntfn'_def + state_refs_of'_def refs_of_rev' + simp del: refs_of_simps + split: option.splits if_split_asm) + done + + +lemma unbindMaybeNotification_invs[wp]: + "\invs'\ unbindMaybeNotification ntfnptr \\rv. invs'\" + apply (simp add: unbindMaybeNotification_def invs'_def valid_state'_def) + apply (rule bind_wp[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sym_heap_sched_pointers_lift valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ + untyped_ranges_zero_lift + | wpc | clarsimp simp: cteCaps_of_def o_def)+ + apply safe[1] + defer 3 + defer 7 + apply (fold_subgoals (prefix))[8] + subgoal premises prems using prems + by (auto simp: pred_tcb_at' valid_pspace'_def valid_obj'_def valid_ntfn'_def + ko_wp_at'_def live'_def + elim!: obj_atE' valid_objsE' if_live_then_nonz_capE' + split: option.splits ntfn.splits) + apply (rule delta_sym_refs, assumption) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems by (fastforce simp: symreftype_inverse' ntfn_q_refs_of'_def + split: ntfn.splits if_split_asm + dest!: ko_at_state_refs_ofD')+ + apply (rule delta_sym_refs, assumption) + apply (clarsimp split: if_split_asm) + apply (frule ko_at_state_refs_ofD', simp) + apply (clarsimp split: if_split_asm) + apply (frule_tac P="(=) (Some ntfnptr)" in ntfn_bound_tcb_at', simp_all add: valid_pspace'_def)[1] + subgoal by (fastforce simp: ntfn_q_refs_of'_def state_refs_of'_def tcb_ntfn_is_bound'_def + tcb_st_refs_of'_def + dest!: bound_tcb_at_state_refs_ofD' + split: ntfn.splits thread_state.splits) + apply (frule ko_at_state_refs_ofD', simp) + done + +(* Ugh, required to be able to split out the abstract invs *) +lemma finaliseCap_True_invs[wp]: + "\invs'\ finaliseCap cap final True \\rv. invs'\" + apply (simp add: finaliseCap_def Let_def) + apply safe + apply (wp irqs_masked_lift| simp | wpc)+ + done + +lemma invalidateASIDEntry_invs'[wp]: + "invalidateASIDEntry asid \invs'\" + unfolding invalidateASIDEntry_def + by wpsimp + +lemma invs_asid_update_strg': + "invs' s \ tab = armKSASIDTable (ksArchState s) \ + invs' (s\ksArchState := armKSASIDTable_update + (\_. tab (asid := None)) (ksArchState s)\)" + apply (simp add: invs'_def) + apply (simp add: valid_state'_def) + apply (simp add: valid_global_refs'_def global_refs'_def valid_arch_state'_def + valid_asid_table'_def valid_machine_state'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def + cong: option.case_cong) + apply (auto simp add: ran_def split: if_split_asm) + done + +crunches invalidateTLBByASID + for asidTable[wp]: "\s. P (armKSASIDTable (ksArchState s))" + +lemma deleteASIDPool_invs[wp]: + "\invs'\ deleteASIDPool asid pool \\rv. invs'\" + apply (simp add: deleteASIDPool_def) + apply wp + apply (simp del: fun_upd_apply) + apply (strengthen invs_asid_update_strg') + apply (wp mapM_wp' getObject_inv loadObject_default_inv + | simp)+ + done + +lemma deleteASID_invs'[wp]: + "deleteASID asid pd \invs'\" + unfolding deleteASID_def + by (wpsimp wp: getASID_wp hoare_drop_imps simp: getPoolPtr_def) + +lemma valid_objs_valid_tcb': + "\ valid_objs' s ; ko_at' (t :: tcb) p s \ \ valid_tcb' t s" + by (fastforce simp add: obj_at'_def ran_def valid_obj'_def valid_objs'_def) + +lemmas archThreadSet_typ_ats[wp] = typ_at_lifts [OF archThreadSet_typ_at'] + +lemma archThreadSet_valid_objs'[wp]: + "\valid_objs' and (\s. \tcb. ko_at' tcb t s \ valid_arch_tcb' (f (tcbArch tcb)) s)\ + archThreadSet f t \\_. valid_objs'\" + unfolding archThreadSet_def + apply (wp setObject_tcb_valid_objs getObject_tcb_wp) + apply clarsimp + apply normalise_obj_at' + apply (drule (1) valid_objs_valid_tcb') + apply (clarsimp simp: valid_obj'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +crunch no_0_obj'[wp]: archThreadSet no_0_obj' + +lemma archThreadSet_ctes_of[wp]: + "archThreadSet f t \\s. P (ctes_of s)\" + unfolding archThreadSet_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (auto simp: tcb_cte_cases_def cteSizeBits_def) + done + +crunch ksCurDomain[wp]: archThreadSet "\s. P (ksCurDomain s)" + (wp: setObject_cd_inv) + +lemma archThreadSet_obj_at': + "(\tcb. P tcb \ P (tcb \ tcbArch:= f (tcbArch tcb)\)) \ archThreadSet f t \obj_at' P t'\" + unfolding archThreadSet_def + apply (wpsimp wp: getObject_tcb_wp setObject_tcb_strongest) + apply normalise_obj_at' + apply auto + done + +lemma archThreadSet_tcbDomain[wp]: + "archThreadSet f t \obj_at' (\tcb. x = tcbDomain tcb) t'\" + by (wpsimp wp: archThreadSet_obj_at') + +lemma archThreadSet_inQ[wp]: + "archThreadSet f t' \\s. P (obj_at' (inQ d p) t s)\" + unfolding obj_at'_real_def archThreadSet_def + apply (wpsimp wp: setObject_ko_wp_at getObject_tcb_wp + simp: objBits_simps' archObjSize_def vcpuBits_def pageBits_def + | simp)+ + apply (auto simp: obj_at'_def ko_wp_at'_def) + done + +crunch ct[wp]: archThreadSet "\s. P (ksCurThread s)" + (wp: setObject_ct_inv) + +crunch sched[wp]: archThreadSet "\s. P (ksSchedulerAction s)" + (wp: setObject_sa_unchanged) + +crunch L1[wp]: archThreadSet "\s. P (ksReadyQueuesL1Bitmap s)" + (wp: setObject_sa_unchanged) + +crunch L2[wp]: archThreadSet "\s. P (ksReadyQueuesL2Bitmap s)" + (wp: setObject_sa_unchanged) + +crunch ksArch[wp]: archThreadSet "\s. P (ksArchState s)" + +crunch ksDomSchedule[wp]: archThreadSet "\s. P (ksDomSchedule s)" + (wp: setObject_ksDomSchedule_inv) + +crunch ksDomScheduleIdx[wp]: archThreadSet "\s. P (ksDomScheduleIdx s)" + +lemma setObject_tcb_ksInterruptState[wp]: + "setObject t (v :: tcb) \\s. P (ksInterruptState s)\" + by (wpsimp simp: setObject_def wp: updateObject_default_inv) + +lemma setObject_tcb_gsMaxObjectSize[wp]: + "setObject t (v :: tcb) \\s. P (gsMaxObjectSize s)\" + by (wpsimp simp: setObject_def wp: updateObject_default_inv) + +crunches archThreadSet + for pspace_canonical'[wp]: pspace_canonical' + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + (wp: setObject_ksMachine updateObject_default_inv) + +lemma archThreadSet_state_refs_of'[wp]: + "archThreadSet f t \\s. P (state_refs_of' s)\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_state_refs_of' getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (auto simp: state_refs_of'_def obj_at'_def) + done + +lemma archThreadSet_state_hyp_refs_of'[wp]: + "\\s. \tcb. ko_at' tcb t s \ P ((state_hyp_refs_of' s)(t := tcb_hyp_refs' (f (tcbArch tcb))))\ + archThreadSet f t \\_ s. P (state_hyp_refs_of' s)\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_state_hyp_refs_of' getObject_tcb_wp simp: objBits_simps') + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply auto + done + +lemma archThreadSet_if_live'[wp]: + "\\s. if_live_then_nonz_cap' s \ + (\tcb. ko_at' tcb t s \ atcbVCPUPtr (f (tcbArch tcb)) \ None \ ex_nonz_cap_to' t s)\ + archThreadSet f t \\_. if_live_then_nonz_cap'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_iflive' getObject_tcb_wp) + apply normalise_obj_at' + apply (clarsimp simp: tcb_cte_cases_def if_live_then_nonz_cap'_def cteSizeBits_def) + apply (erule_tac x=t in allE) + apply (erule impE) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def live'_def hyp_live'_def) + apply simp + done + +lemma archThreadSet_ifunsafe'[wp]: + "archThreadSet f t \if_unsafe_then_cap'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_ifunsafe' getObject_tcb_wp) + apply normalise_obj_at' + apply (auto simp: tcb_cte_cases_def if_live_then_nonz_cap'_def cteSizeBits_def) + done + +lemma archThreadSet_valid_idle'[wp]: + "archThreadSet f t \valid_idle'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_idle' getObject_tcb_wp) + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + done + +lemma archThreadSet_ko_wp_at_no_vcpu[wp]: + "archThreadSet f t \ko_wp_at' (is_vcpu' and hyp_live') p\" + unfolding archThreadSet_def + apply (wpsimp wp: getObject_tcb_wp setObject_ko_wp_at simp: objBits_simps' | rule refl)+ + apply normalise_obj_at' + apply (auto simp: ko_wp_at'_def obj_at'_real_def is_vcpu'_def) + done + +lemma archThreadSet_valid_arch_state'[wp]: + "archThreadSet f t \valid_arch_state'\" + unfolding valid_arch_state'_def valid_asid_table'_def option_case_all_conv split_def + apply (rule hoare_lift_Pf[where f=ksArchState]; wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift) + apply (clarsimp simp: pred_conj_def) + done + +lemma archThreadSet_ct_not_inQ[wp]: + "archThreadSet f t \ct_not_inQ\" + unfolding ct_not_inQ_def + apply (rule hoare_lift_Pf[where f=ksCurThread]; wp?) + apply (wpsimp wp: hoare_vcg_imp_lift simp: o_def) + done + +lemma archThreadSet_obj_at'_pte[wp]: + "archThreadSet f t \obj_at' (P::pte \ bool) p\" + unfolding archThreadSet_def + by (wpsimp wp: obj_at_setObject2 simp: updateObject_default_def in_monad) + +crunch pspace_domain_valid[wp]: archThreadSet pspace_domain_valid + +lemma setObject_tcb_gsUntypedZeroRanges[wp]: + "setObject ptr (tcb::tcb) \\s. P (gsUntypedZeroRanges s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +crunch gsUntypedZeroRanges[wp]: archThreadSet "\s. P (gsUntypedZeroRanges s)" + +lemma archThreadSet_untyped_ranges_zero'[wp]: + "archThreadSet f t \untyped_ranges_zero'\" + by (rule hoare_lift_Pf[where f=cteCaps_of]; wp cteCaps_of_ctes_of_lift) + +lemma archThreadSet_tcb_at'[wp]: + "\\\ archThreadSet f t \\_. tcb_at' t\" + unfolding archThreadSet_def + by (wpsimp wp: getObject_tcb_wp simp: obj_at'_def) + +lemma setObject_tcb_tcbs_of'[wp]: + "\\s. P ((tcbs_of' s) (t \ tcb))\ + setObject t tcb + \\_ s. P (tcbs_of' s)\" + unfolding setObject_def + apply (wpsimp simp: updateObject_default_def) + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma archThreadSet_tcbSchedPrevs_of[wp]: + "archThreadSet f t \\s. P (tcbSchedPrevs_of s)\" + unfolding archThreadSet_def + apply (wp getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def obj_at'_def split: option.splits) + done + +lemma archThreadSet_tcbSchedNexts_of[wp]: + "archThreadSet f t \\s. P (tcbSchedNexts_of s)\" + unfolding archThreadSet_def + apply (wp getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def obj_at'_def split: option.splits) + done + +lemma archThreadSet_tcbQueued[wp]: + "archThreadSet f t \\s. P (tcbQueued |< tcbs_of' s)\" + unfolding archThreadSet_def + apply (wp getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_pred_def opt_map_def obj_at'_def split: option.splits) + done + +lemma archThreadSet_valid_sched_pointers[wp]: + "archThreadSet f t \valid_sched_pointers\" + by (wp_pre, wps, wp, assumption) + +lemma dissoc_invs': + "\invs' and (\s. \p. (\a. armHSCurVCPU (ksArchState s) = Some (p, a)) \ p \ v) and + ko_at' vcpu v and K (vcpuTCBPtr vcpu = Some t) and + obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = Some v) t\ + do + archThreadSet (atcbVCPUPtr_update (\_. Nothing)) t; + setObject v $ vcpuTCBPtr_update (\_. Nothing) vcpu + od \\_. invs' and tcb_at' t\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_tcb_valid_objs setObject_vcpu_valid_objs' + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_valid_arch' archThreadSet_if_live' sym_heap_sched_pointers_lift + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb valid_arch_tcb'_def + | clarsimp simp: live'_def hyp_live'_def arch_live'_def)+ + supply fun_upd_apply[simp] + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def tcb_vcpu_refs'_def valid_vcpu'_def + split: option.splits if_split_asm) + apply safe + apply (rule_tac rfs'="state_hyp_refs_of' s" in delta_sym_refs) + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def tcb_vcpu_refs'_def + split: option.splits if_split_asm)+ + done + +lemma setVCPU_archThreadSet_None_eq: + "do + archThreadSet (atcbVCPUPtr_update (\_. Nothing)) t; + setObject v $ vcpuTCBPtr_update (\_. Nothing) vcpu; + f + od = do + do + archThreadSet (atcbVCPUPtr_update (\_. Nothing)) t; + setObject v $ vcpuTCBPtr_update (\_. Nothing) vcpu + od; + f + od" by (simp add: bind_assoc) + +lemma vcpuInvalidateActive_inactive[wp]: + "\\\ vcpuInvalidateActive \\rv s. \p. (\a. armHSCurVCPU (ksArchState s) = Some (p, a)) \ P p rv s\" + unfolding vcpuInvalidateActive_def modifyArchState_def by wpsimp + +lemma vcpuDisableNone_obj_at'[wp]: + "vcpuDisable None \\s. P (obj_at' P' p s)\" + unfolding vcpuDisable_def by wpsimp + +lemma vcpuInvalidateActive_obj_at'[wp]: + "vcpuInvalidateActive \\s. P (obj_at' P' p s)\" + unfolding vcpuInvalidateActive_def modifyArchState_def by wpsimp + +lemma when_assert_eq: + "(when P $ haskell_fail xs) = assert (\P)" + by (simp add: assert_def when_def) + +lemma dissociateVCPUTCB_invs'[wp]: + "dissociateVCPUTCB vcpu tcb \invs'\" + unfolding dissociateVCPUTCB_def setVCPU_archThreadSet_None_eq when_assert_eq + apply (wpsimp wp: dissoc_invs' getVCPU_wp | wpsimp wp: getObject_tcb_wp simp: archThreadGet_def)+ + apply (drule tcb_ko_at') + apply clarsimp + apply (rule exI, rule conjI, assumption) + apply clarsimp + apply (rule conjI) + apply normalise_obj_at' + apply (rule conjI) + apply normalise_obj_at' + apply (clarsimp simp: obj_at'_def) + done + +lemma vcpuFinalise_invs'[wp]: "vcpuFinalise vcpu \invs'\" + unfolding vcpuFinalise_def by wpsimp + +lemma arch_finaliseCap_invs[wp]: + "\invs' and valid_cap' (ArchObjectCap cap)\ Arch.finaliseCap cap fin \\rv. invs'\" + unfolding AARCH64_H.finaliseCap_def Let_def by wpsimp + +lemma setObject_tcb_unlive[wp]: + "\\s. vr \ t \ ko_wp_at' (Not \ live') vr s\ + setObject t (tcbArch_update (\_. atcbVCPUPtr_update Map.empty (tcbArch tcb)) tcb) + \\_. ko_wp_at' (Not \ live') vr\" + apply (rule wp_pre) + apply (wpsimp wp: setObject_ko_wp_at simp: objBits_simps', simp+) + apply (clarsimp simp: tcb_at_typ_at' typ_at'_def ko_wp_at'_def ) + done + +lemma setVCPU_unlive[wp]: + "\\\ setObject vr (vcpuTCBPtr_update Map.empty vcpu) \\_. ko_wp_at' (Not \ live') vr\" + apply (rule wp_pre) + apply (wpsimp wp: setObject_ko_wp_at + simp: objBits_def objBitsKO_def archObjSize_def vcpuBits_def pageBits_def) + apply simp+ + apply (clarsimp simp: live'_def hyp_live'_def arch_live'_def ko_wp_at'_def obj_at'_def) + done + +lemma asUser_unlive[wp]: + "\ko_wp_at' (Not \ live') vr\ asUser t f \\_. ko_wp_at' (Not \ live') vr\" + unfolding asUser_def + apply (wpsimp simp: threadSet_def atcbContextSet_def objBits_simps' split_def + wp: setObject_ko_wp_at) + apply (rule refl, simp) + apply (wpsimp simp: atcbContextGet_def wp: getObject_tcb_wp threadGet_wp)+ + apply (clarsimp simp: tcb_at_typ_at' typ_at'_def ko_wp_at'_def[where p=t]) + apply (case_tac ko; simp) + apply (rename_tac tcb) + apply (rule_tac x=tcb in exI) + apply (clarsimp simp: obj_at'_def ko_wp_at'_def live'_def hyp_live'_def) + done + +lemma dissociateVCPUTCB_unlive: + "\ \ \ dissociateVCPUTCB vcpu tcb \ \_. ko_wp_at' (Not o live') vcpu \" + unfolding dissociateVCPUTCB_def setVCPU_archThreadSet_None_eq when_assert_eq + by (wpsimp wp: getVCPU_wp[where p=vcpu] | + wpsimp wp: getObject_tcb_wp hoare_vcg_conj_lift hoare_vcg_ex_lift + getVCPU_wp[where p=vcpu] setVCPU_unlive[simplified o_def] + setObject_tcb_unlive hoare_drop_imp setObject_tcb_strongest + simp: archThreadGet_def archThreadSet_def)+ + +lemma vcpuFinalise_unlive[wp]: + "\ \ \ vcpuFinalise v \ \_. ko_wp_at' (Not o live') v \" + apply (wpsimp simp: vcpuFinalise_def wp: dissociateVCPUTCB_unlive getVCPU_wp) + apply (frule state_hyp_refs_of'_vcpu_absorb) + apply (auto simp: ko_wp_at'_def) + apply (rule_tac x="KOArch (KOVCPU ko)" in exI) + apply (clarsimp simp: live'_def hyp_live'_def arch_live'_def obj_at'_def) + done + +crunches setVMRoot, deleteASIDPool, invalidateTLBByASID, invalidateASIDEntry, vcpuFinalise + for ctes_of[wp]: "\s. P (ctes_of s)" + (wp: crunch_wps getObject_inv loadObject_default_inv getASID_wp simp: crunch_simps) + +lemma deleteASID_ctes_of[wp]: + "deleteASID a ptr \\s. P (ctes_of s)\" + unfolding deleteASID_def by (wpsimp wp: getASID_wp hoare_drop_imps hoare_vcg_all_lift) + +lemma arch_finaliseCap_removeable[wp]: + "\\s. s \' ArchObjectCap cap \ invs' s + \ (final_matters' (ArchObjectCap cap) + \ (final = isFinal (ArchObjectCap cap) slot (cteCaps_of s))) \ + Arch.finaliseCap cap final + \\rv s. isNullCap (fst rv) \ removeable' slot s (ArchObjectCap cap) \ isNullCap (snd rv)\" + unfolding AARCH64_H.finaliseCap_def + apply (wpsimp wp: hoare_vcg_op_lift simp: removeable'_def isCap_simps cte_wp_at_ctes_of) + apply (fastforce simp: final_matters'_def isFinal_def cte_wp_at_ctes_of cteCaps_of_def + sameObjectAs_def3) + done + +lemma isZombie_Null: + "\ isZombie NullCap" + by (simp add: isCap_simps) + +lemma prepares_delete_helper'': + assumes x: "\P\ f \\rv. ko_wp_at' (Not \ live') p\" + shows "\P and K ((\x. cte_refs' cap x = {}) + \ zobj_refs' cap = {p} + \ threadCapRefs cap = {})\ + f \\rv s. removeable' sl s cap\" + apply (rule hoare_gen_asm) + apply (rule hoare_strengthen_post [OF x]) + apply (clarsimp simp: removeable'_def) + done + +crunches finaliseCapTrue_standin, unbindNotification + for ctes_of[wp]: "\s. P (ctes_of s)" + (wp: crunch_wps getObject_inv loadObject_default_inv simp: crunch_simps) + +lemma cteDeleteOne_cteCaps_of: + "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ + P ((cteCaps_of s)(p \ NullCap)))\ + cteDeleteOne p + \\rv s. P (cteCaps_of s)\" + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (rule bind_wp [OF _ getCTE_sp]) + apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") + apply (simp add: finaliseCapTrue_standin_simple_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def + finaliseCap_def isCap_simps) + apply (drule_tac x=s in fun_cong) + apply (simp add: return_def fail_def) + apply (wp emptySlot_cteCaps_of) + apply (simp add: cteCaps_of_def) + apply (wp (once) hoare_drop_imps) + apply (wp isFinalCapability_inv getCTE_wp')+ + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + apply (auto simp: fun_upd_idem fun_upd_def[symmetric] o_def) + done + +lemma cteDeleteOne_isFinal: + "\\s. isFinal cap slot (cteCaps_of s)\ + cteDeleteOne p + \\rv s. isFinal cap slot (cteCaps_of s)\" + apply (wp cteDeleteOne_cteCaps_of) + apply (clarsimp simp: isFinal_def sameObjectAs_def2) + done + +lemmas setEndpoint_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ep_ctes_of] +lemmas setNotification_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ntfn_ctes_of] +lemmas threadSet_cteCaps_of = cteCaps_of_ctes_of_lift [OF threadSet_ctes_of] + +crunches archThreadSet, vcpuUpdate, dissociateVCPUTCB + for isFinal: "\s. isFinal cap slot (cteCaps_of s)" + (wp: cteCaps_of_ctes_of_lift) + +crunch isFinal: suspend, prepareThreadDelete "\s. isFinal cap slot (cteCaps_of s)" + (ignore: threadSet + wp: threadSet_cteCaps_of crunch_wps + simp: crunch_simps unless_def o_def) + +lemma isThreadCap_threadCapRefs_tcbptr: + "isThreadCap cap \ threadCapRefs cap = {capTCBPtr cap}" + by (clarsimp simp: isCap_simps) + +lemma isArchObjectCap_Cap_capCap: + "isArchObjectCap cap \ ArchObjectCap (capCap cap) = cap" + by (clarsimp simp: isCap_simps) + +lemma cteDeleteOne_deletes[wp]: + "\\\ cteDeleteOne p \\rv s. cte_wp_at' (\c. cteCap c = NullCap) p s\" + apply (subst tree_cte_cteCap_eq[unfolded o_def]) + apply (wp cteDeleteOne_cteCaps_of) + apply clarsimp + done + +crunch irq_node'[wp]: finaliseCap "\s. P (irq_node' s)" + (wp: crunch_wps getObject_inv loadObject_default_inv + updateObject_default_inv setObject_ksInterrupt + simp: crunch_simps o_def) + +lemma deletingIRQHandler_removeable': + "\invs' and (\s. isFinal (IRQHandlerCap irq) slot (cteCaps_of s)) + and K (cap = IRQHandlerCap irq)\ + deletingIRQHandler irq + \\rv s. removeable' slot s cap\" + apply (rule hoare_gen_asm) + apply (simp add: deletingIRQHandler_def getIRQSlot_def locateSlot_conv + getInterruptState_def getSlotCap_def) + apply (simp add: removeable'_def tree_cte_cteCap_eq[unfolded o_def]) + apply (subst tree_cte_cteCap_eq[unfolded o_def])+ + apply (wp hoare_use_eq_irq_node' [OF cteDeleteOne_irq_node' cteDeleteOne_cteCaps_of] + getCTE_wp') + apply (clarsimp simp: cte_level_bits_def ucast_nat_def shiftl_t2n mult_ac cteSizeBits_def + split: option.split_asm) + done + +lemma finaliseCap_cte_refs: + "\\s. s \' cap\ + finaliseCap cap final flag + \\rv s. fst rv \ NullCap \ cte_refs' (fst rv) = cte_refs' cap\" + apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot + AARCH64_H.finaliseCap_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc | simp only: o_def)+ + apply (frule valid_capAligned) + apply (cases cap, simp_all add: isCap_simps) + apply (clarsimp simp: tcb_cte_cases_def word_count_from_top objBits_defs) + apply clarsimp + apply (rule ext, simp) + apply (rule image_cong [OF _ refl]) + apply (fastforce simp: mask_def capAligned_def objBits_simps shiftL_nat) + done + +lemma deletingIRQHandler_final: + "\\s. isFinal cap slot (cteCaps_of s) + \ (\final. finaliseCap cap final True = fail)\ + deletingIRQHandler irq + \\rv s. isFinal cap slot (cteCaps_of s)\" + apply (simp add: deletingIRQHandler_def isFinal_def getIRQSlot_def + getInterruptState_def locateSlot_conv getSlotCap_def) + apply (wp cteDeleteOne_cteCaps_of getCTE_wp') + apply (auto simp: sameObjectAs_def3) + done + +declare suspend_unqueued [wp] + +lemma unbindNotification_valid_objs'_helper: + "valid_tcb' tcb s \ valid_tcb' (tcbBoundNotification_update (\_. None) tcb) s " + by (clarsimp simp: valid_bound_ntfn'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def + split: option.splits ntfn.splits) + +lemma unbindNotification_valid_objs'_helper': + "valid_ntfn' tcb s \ valid_ntfn' (ntfnBoundTCB_update (\_. None) tcb) s " + by (clarsimp simp: valid_bound_tcb'_def valid_ntfn'_def + split: option.splits ntfn.splits) + +lemmas setNotification_valid_tcb' = typ_at'_valid_tcb'_lift [OF setNotification_typ_at'] + +lemma unbindNotification_valid_objs'[wp]: + "\valid_objs'\ + unbindNotification t + \\rv. valid_objs'\" + apply (simp add: unbindNotification_def) + apply (rule hoare_pre) + apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift + setNotification_valid_tcb' getNotification_wp + | wpc | clarsimp simp: setBoundNotification_def unbindNotification_valid_objs'_helper)+ + apply (clarsimp elim!: obj_atE') + apply (rule valid_objsE', assumption+) + apply (clarsimp simp: valid_obj'_def unbindNotification_valid_objs'_helper') + done + +lemma unbindMaybeNotification_valid_objs'[wp]: + "\valid_objs'\ + unbindMaybeNotification t + \\rv. valid_objs'\" + apply (simp add: unbindMaybeNotification_def) + apply (rule bind_wp[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift + setNotification_valid_tcb' getNotification_wp + | wpc | clarsimp simp: setBoundNotification_def unbindNotification_valid_objs'_helper)+ + apply (clarsimp elim!: obj_atE') + apply (rule valid_objsE', assumption+) + apply (clarsimp simp: valid_obj'_def unbindNotification_valid_objs'_helper') + done + +lemma unbindMaybeNotification_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ unbindMaybeNotification t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: unbindMaybeNotification_def) + apply (rule hoare_pre) + apply (wp sbn_sch_act' | wpc | simp)+ + done + +lemma valid_cong: + "\ \s. P s = P' s; \s. P' s \ f s = f' s; + \rv s' s. \ (rv, s') \ fst (f' s); P' s \ \ Q rv s' = Q' rv s' \ + \ \P\ f \Q\ = \P'\ f' \Q'\" + by (clarsimp simp add: valid_def, blast) + +lemma sym_refs_ntfn_bound_eq: "sym_refs (state_refs_of' s) + \ obj_at' (\ntfn. ntfnBoundTCB ntfn = Some t) x s + = bound_tcb_at' (\st. st = Some x) t s" + apply (rule iffI) + apply (drule (1) sym_refs_obj_atD') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def refs_of_rev') + apply (drule(1) sym_refs_bound_tcb_atD') + apply (clarsimp simp: obj_at'_def ko_wp_at'_def refs_of_rev') + done + +lemma unbindMaybeNotification_obj_at'_bound: + "\\\ + unbindMaybeNotification r + \\_ s. obj_at' (\ntfn. ntfnBoundTCB ntfn = None) r s\" + apply (simp add: unbindMaybeNotification_def) + apply (rule bind_wp[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp obj_at_setObject2 + | wpc + | simp add: setBoundNotification_def threadSet_def updateObject_default_def in_monad)+ + apply (simp add: setNotification_def obj_at'_real_def cong: valid_cong) + apply (wp setObject_ko_wp_at, (simp add: objBits_simps')+) + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + done + +crunches unbindNotification, unbindMaybeNotification + for isFinal[wp]: "\s. isFinal cap slot (cteCaps_of s)" + (wp: sts_bound_tcb_at' threadSet_cteCaps_of crunch_wps getObject_inv + loadObject_default_inv + ignore: threadSet + simp: setBoundNotification_def) + +crunches cancelSignal, cancelAllIPC + for bound_tcb_at'[wp]: "bound_tcb_at' P t" + (wp: sts_bound_tcb_at' threadSet_cteCaps_of crunch_wps getObject_inv + loadObject_default_inv + ignore: threadSet) + +lemma finaliseCapTrue_standin_bound_tcb_at': + "\\s. bound_tcb_at' P t s \ (\tt b r. cap = ReplyCap tt b r) \ + finaliseCapTrue_standin cap final + \\_. bound_tcb_at' P t\" + apply (case_tac cap, simp_all add:finaliseCapTrue_standin_def) + apply (clarsimp simp: isNotificationCap_def) + apply (wp, clarsimp) + done + +lemma capDeleteOne_bound_tcb_at': + "\bound_tcb_at' P tptr and cte_wp_at' (isReplyCap \ cteCap) callerCap\ + cteDeleteOne callerCap \\rv. bound_tcb_at' P tptr\" + apply (simp add: cteDeleteOne_def unless_def) + apply (rule hoare_pre) + apply (wp finaliseCapTrue_standin_bound_tcb_at' hoare_vcg_all_lift + hoare_vcg_if_lift2 getCTE_cteCap_wp + | wpc | simp | wp (once) hoare_drop_imp)+ + apply (clarsimp simp: cteCaps_of_def isReplyCap_def cte_wp_at_ctes_of + split: option.splits) + apply (case_tac "cteCap cte", simp_all) + done + +lemma cancelIPC_bound_tcb_at'[wp]: + "\bound_tcb_at' P tptr\ cancelIPC t \\rv. bound_tcb_at' P tptr\" + apply (simp add: cancelIPC_def Let_def) + apply (rule bind_wp[OF _ gts_sp']) + apply (case_tac "state", simp_all) + defer 2 + apply (rule hoare_pre) + apply ((wp sts_bound_tcb_at' getEndpoint_wp | wpc | simp)+)[8] + apply (simp add: getThreadReplySlot_def locateSlot_conv liftM_def) + apply (rule hoare_pre) + apply (wp capDeleteOne_bound_tcb_at' getCTE_ctes_of) + apply (rule_tac Q="\_. bound_tcb_at' P tptr" in hoare_post_imp) + apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of) + apply (wp threadSet_pred_tcb_no_state | simp)+ + done + +lemma archThreadSet_bound_tcb_at'[wp]: + "archThreadSet f t \bound_tcb_at' P t'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp simp: pred_tcb_at'_def) + by (auto simp: obj_at'_def objBits_simps) + +lemmas asUser_bound_obj_at'[wp] = asUser_pred_tcb_at' [of itcbBoundNotification] + +lemmas setObject_vcpu_pred_tcb_at'[wp] = + setObject_vcpu_obj_at'_no_vcpu [of _ "\ko. tst (pr (tcb_to_itcb' ko))" for tst pr, folded pred_tcb_at'_def] + +crunches dissociateVCPUTCB, vgicUpdateLR + for bound_tcb_at'[wp]: "bound_tcb_at' P t" + (wp: sts_bound_tcb_at' getVCPU_wp crunch_wps hoare_vcg_all_lift hoare_vcg_if_lift3 + ignore: archThreadSet) + +crunches suspend, prepareThreadDelete + for bound_tcb_at'[wp]: "bound_tcb_at' P t" + (wp: sts_bound_tcb_at' cancelIPC_bound_tcb_at' + ignore: threadSet) + +lemma unbindNotification_bound_tcb_at': + "\\_. True\ unbindNotification t \\rv. bound_tcb_at' ((=) None) t\" + apply (simp add: unbindNotification_def) + apply (wp setBoundNotification_bound_tcb gbn_wp' | wpc | simp)+ + done + +crunches unbindNotification, unbindMaybeNotification + for weak_sch_act_wf[wp]: "\s. weak_sch_act_wf (ksSchedulerAction s) s" + +lemma unbindNotification_tcb_at'[wp]: + "\tcb_at' t'\ unbindNotification t \\rv. tcb_at' t'\" + apply (simp add: unbindNotification_def) + apply (wp gbn_wp' | wpc | simp)+ + done + +lemma unbindMaybeNotification_tcb_at'[wp]: + "\tcb_at' t'\ unbindMaybeNotification t \\rv. tcb_at' t'\" + apply (simp add: unbindMaybeNotification_def) + apply (wp gbn_wp' | wpc | simp)+ + done + +lemma dissociateVCPUTCB_cte_wp_at'[wp]: + "dissociateVCPUTCB v t \cte_wp_at' P p\" + unfolding cte_wp_at_ctes_of by wp + +lemmas dissociateVCPUTCB_typ_ats'[wp] = typ_at_lifts[OF dissociateVCPUTCB_typ_at'] + +crunch cte_wp_at'[wp]: prepareThreadDelete "cte_wp_at' P p" +crunch valid_cap'[wp]: prepareThreadDelete "valid_cap' cap" + +lemma unset_vcpu_hyp_unlive[wp]: + "\\\ archThreadSet (atcbVCPUPtr_update Map.empty) t \\_. ko_wp_at' (Not \ hyp_live') t\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_ko_wp_at' getObject_tcb_wp; (simp add: objBits_simps')?)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def hyp_live'_def) + done + + lemma unset_tcb_hyp_unlive[wp]: + "\\\ setObject vr (vcpuTCBPtr_update Map.empty vcpu) \\_. ko_wp_at' (Not \ hyp_live') vr\" + apply (wpsimp wp: setObject_ko_wp_at' getObject_tcb_wp + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + | simp)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def hyp_live'_def arch_live'_def) + done + +lemma setObject_vcpu_hyp_unlive[wp]: + "\\s. t \ vr \ ko_wp_at' (Not \ hyp_live') t s\ + setObject vr (vcpuTCBPtr_update Map.empty vcpu) + \\_. ko_wp_at' (Not \ hyp_live') t\" + apply (rule wp_pre) + apply (wpsimp wp: setObject_ko_wp_at + simp: objBits_def objBitsKO_def archObjSize_def vcpuBits_def pageBits_def + | simp)+ + apply (clarsimp simp: tcb_at_typ_at' typ_at'_def ko_wp_at'_def ) + done + +lemma asUser_hyp_unlive[wp]: + "asUser f t \ko_wp_at' (Not \ hyp_live') t'\" + unfolding asUser_def + apply (wpsimp wp: threadSet_ko_wp_at2' threadGet_wp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def hyp_live'_def atcbContextSet_def) + done + +lemma dissociateVCPUTCB_hyp_unlive[wp]: + "\\\ dissociateVCPUTCB v t \\_. ko_wp_at' (Not \ hyp_live') t\" + unfolding dissociateVCPUTCB_def + by (cases "v = t"; wpsimp wp: unset_tcb_hyp_unlive unset_vcpu_hyp_unlive[simplified comp_def]) + +lemma prepareThreadDelete_hyp_unlive[wp]: + "\\\ prepareThreadDelete t \\_. ko_wp_at' (Not \ hyp_live') t\" + unfolding prepareThreadDelete_def archThreadGet_def fpuThreadDelete_def + apply (wpsimp wp: getObject_tcb_wp hoare_vcg_imp_lift' hoare_vcg_ex_lift) + apply (auto simp: ko_wp_at'_def obj_at'_def hyp_live'_def) + done + +lemma fpuThreadDeleteOp_invs'[wp]: + "\invs'\ doMachineOp (fpuThreadDeleteOp t) \\rv. invs'\" + apply (wp dmo_invs' no_irq_fpuThreadDeleteOp no_irq) + apply clarsimp + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" + in use_valid) + apply wpsimp+ + done + +crunch invs[wp]: prepareThreadDelete "invs'" (ignore: doMachineOp) + +end + +lemma tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\\s. \ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + tcbQueueRemove q t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + by (fastforce dest!: heap_ls_last_None + simp: list_queue_relation_def prev_queue_head_def queue_end_valid_def + obj_at'_def opt_map_def ps_clear_def objBits_simps + split: if_splits) + +lemma tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\valid_sched_pointers\ + tcbSchedDequeue t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding tcbSchedDequeue_def + by (wpsimp wp: tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at' threadGet_wp) + (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def + valid_sched_pointers_def opt_pred_def opt_map_def + split: option.splits) + +crunches updateRestartPC, cancelIPC + for valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps) + +lemma suspend_tcbSchedNext_tcbSchedPrev_None: + "\invs'\ suspend t \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding suspend_def + by (wpsimp wp: hoare_drop_imps tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at') + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma archThreadSet_tcbSchedPrevNext[wp]: + "archThreadSet f t' \obj_at' (\tcb. P (tcbSchedNext tcb) (tcbSchedPrev tcb)) t\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp) + apply normalise_obj_at' + apply auto + done + +crunches prepareThreadDelete + for tcbSchedPrevNext[wp]: "obj_at' (\tcb. P (tcbSchedNext tcb) (tcbSchedPrev tcb)) t" + (wp: threadGet_wp getVCPU_wp archThreadGet_wp crunch_wps simp: crunch_simps) + +end + +lemma (in delete_one_conc_pre) finaliseCap_replaceable: + "\\s. invs' s \ cte_wp_at' (\cte. cteCap cte = cap) slot s + \ (final_matters' cap \ (final = isFinal cap slot (cteCaps_of s))) + \ weak_sch_act_wf (ksSchedulerAction s) s\ + finaliseCap cap final flag + \\rv s. (isNullCap (fst rv) \ removeable' slot s cap + \ (snd rv \ NullCap \ snd rv = cap \ cap_has_cleanup' cap + \ isFinal cap slot (cteCaps_of s))) + \ + (isZombie (fst rv) \ snd rv = NullCap + \ isFinal cap slot (cteCaps_of s) + \ capClass cap = capClass (fst rv) + \ capUntypedPtr (fst rv) = capUntypedPtr cap + \ capBits (fst rv) = capBits cap + \ capRange (fst rv) = capRange cap + \ (isThreadCap cap \ isCNodeCap cap \ isZombie cap) + \ (\p \ threadCapRefs cap. st_tcb_at' ((=) Inactive) p s + \ obj_at' (Not \ tcbQueued) p s + \ bound_tcb_at' ((=) None) p s + \ ko_wp_at' (Not \ hyp_live') p s + \ obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) p s))\" + apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp prepares_delete_helper'' [OF cancelAllIPC_unlive] + prepares_delete_helper'' [OF cancelAllSignals_unlive] + suspend_isFinal prepareThreadDelete_unqueued + prepareThreadDelete_inactive prepareThreadDelete_isFinal + suspend_makes_inactive + deletingIRQHandler_removeable' + deletingIRQHandler_final[where slot=slot ] + unbindMaybeNotification_obj_at'_bound + getNotification_wp + suspend_bound_tcb_at' + unbindNotification_bound_tcb_at' + suspend_tcbSchedNext_tcbSchedPrev_None + | simp add: isZombie_Null isThreadCap_threadCapRefs_tcbptr + isArchObjectCap_Cap_capCap + | (rule hoare_strengthen_post [OF arch_finaliseCap_removeable[where slot=slot]], + clarsimp simp: isCap_simps) + | wpc)+ + apply clarsimp + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+) + apply (case_tac "cteCap cte", + simp_all add: isCap_simps capRange_def cap_has_cleanup'_def + final_matters'_def objBits_simps + not_Final_removeable finaliseCap_def, + simp_all add: removeable'_def) + (* thread *) + apply (frule capAligned_capUntypedPtr [OF valid_capAligned], simp) + apply (clarsimp simp: valid_cap'_def) + apply (drule valid_globals_cte_wpD'[rotated], clarsimp) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (clarsimp simp: obj_at'_def | rule conjI)+ + done + +lemma cteDeleteOne_cte_wp_at_preserved: + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\\s. cte_wp_at' (\cte. P (cteCap cte)) p s\ + cteDeleteOne ptr + \\rv s. cte_wp_at' (\cte. P (cteCap cte)) p s\" + apply (simp add: tree_cte_cteCap_eq[unfolded o_def]) + apply (rule hoare_pre, wp cteDeleteOne_cteCaps_of) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) + done + +crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" + (simp: crunch_simps wp: crunch_wps) + +lemma cancelIPC_cteCaps_of: + "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ + P ((cteCaps_of s)(p \ NullCap))) \ + P (cteCaps_of s)\ + cancelIPC t + \\rv s. P (cteCaps_of s)\" + apply (simp add: cancelIPC_def Let_def capHasProperty_def + getThreadReplySlot_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp cteDeleteOne_cteCaps_of getCTE_wp' | wpcw + | simp add: cte_wp_at_ctes_of + | wp (once) hoare_drop_imps cteCaps_of_ctes_of_lift)+ + apply (wp hoare_convert_imp hoare_vcg_all_lift + threadSet_ctes_of threadSet_cteCaps_of + | clarsimp)+ + apply (wp cteDeleteOne_cteCaps_of getCTE_wp' | wpcw | simp + | wp (once) hoare_drop_imps cteCaps_of_ctes_of_lift)+ + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (drule_tac x="mdbNext (cteMDBNode x)" in spec) + apply clarsimp + apply (auto simp: o_def map_option_case fun_upd_def[symmetric]) + done + +lemma cancelIPC_cte_wp_at': + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\\s. cte_wp_at' (\cte. P (cteCap cte)) p s\ + cancelIPC t + \\rv s. cte_wp_at' (\cte. P (cteCap cte)) p s\" + apply (simp add: tree_cte_cteCap_eq[unfolded o_def]) + apply (rule hoare_pre, wp cancelIPC_cteCaps_of) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) + done + +crunches tcbSchedDequeue + for cte_wp_at'[wp]: "cte_wp_at' P p" + (wp: crunch_wps) + +lemma suspend_cte_wp_at': + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\cte_wp_at' (\cte. P (cteCap cte)) p\ + suspend t + \\rv. cte_wp_at' (\cte. P (cteCap cte)) p\" + apply (simp add: suspend_def updateRestartPC_def) + apply (rule hoare_pre) + apply (wp threadSet_cte_wp_at' cancelIPC_cte_wp_at' + | simp add: x)+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunch cte_wp_at'[wp]: deleteASIDPool "cte_wp_at' P p" + (simp: crunch_simps assertE_def + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma deleteASID_cte_wp_at'[wp]: + "\cte_wp_at' P p\ deleteASID param_a param_b \\_. cte_wp_at' P p\" + apply (simp add: deleteASID_def + cong: option.case_cong) + apply (wp setObject_cte_wp_at'[where Q="\"] getObject_inv + loadObject_default_inv setVMRoot_cte_wp_at' + | clarsimp simp: updateObject_default_def in_monad + | rule equals0I + | wpc)+ + done + +crunches unmapPageTable, unmapPage, unbindNotification, finaliseCapTrue_standin + for cte_wp_at'[wp]: "cte_wp_at' P p" + (simp: crunch_simps wp: crunch_wps getObject_inv loadObject_default_inv) + +crunch cte_wp_at'[wp]: vcpuFinalise "cte_wp_at' P p" + (wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma arch_finaliseCap_cte_wp_at[wp]: + "\cte_wp_at' P p\ Arch.finaliseCap cap fin \\rv. cte_wp_at' P p\" + apply (simp add: AARCH64_H.finaliseCap_def) + apply (wpsimp wp: unmapPage_cte_wp_at') + done + +lemma deletingIRQHandler_cte_preserved: + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\cte_wp_at' (\cte. P (cteCap cte)) p\ + deletingIRQHandler irq + \\rv. cte_wp_at' (\cte. P (cteCap cte)) p\" + apply (simp add: deletingIRQHandler_def getSlotCap_def + getIRQSlot_def locateSlot_conv getInterruptState_def) + apply (wpsimp wp: cteDeleteOne_cte_wp_at_preserved getCTE_wp' simp: x) + done + +lemma finaliseCap_equal_cap[wp]: + "\cte_wp_at' (\cte. cteCap cte = cap) sl\ + finaliseCap cap fin flag + \\rv. cte_wp_at' (\cte. cteCap cte = cap) sl\" + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp suspend_cte_wp_at' deletingIRQHandler_cte_preserved + | clarsimp simp: finaliseCap_def | wpc)+ + apply (case_tac cap) + apply auto + done + +lemma setThreadState_st_tcb_at_simplish': + "simple' st \ + \st_tcb_at' (P or simple') t\ + setThreadState st t' + \\rv. st_tcb_at' (P or simple') t\" + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + done + +lemmas setThreadState_st_tcb_at_simplish + = setThreadState_st_tcb_at_simplish'[unfolded pred_disj_def] + +crunch st_tcb_at_simplish: cteDeleteOne + "st_tcb_at' (\st. P st \ simple' st) t" + (wp: crunch_wps getObject_inv loadObject_default_inv threadSet_pred_tcb_no_state + simp: crunch_simps unless_def ignore: threadSet) + +lemma cteDeleteOne_st_tcb_at[wp]: + assumes x[simp]: "\st. simple' st \ P st" shows + "\st_tcb_at' P t\ cteDeleteOne slot \\rv. st_tcb_at' P t\" + apply (subgoal_tac "\Q. P = (Q or simple')") + apply (clarsimp simp: pred_disj_def) + apply (rule cteDeleteOne_st_tcb_at_simplish) + apply (rule_tac x=P in exI) + apply auto + done + +lemma cteDeleteOne_reply_pred_tcb_at: + "\\s. pred_tcb_at' proj P t s \ (\t' r. cte_wp_at' (\cte. cteCap cte = ReplyCap t' False r) slot s)\ + cteDeleteOne slot + \\rv. pred_tcb_at' proj P t\" + apply (simp add: cteDeleteOne_def unless_def isFinalCapability_def) + apply (rule bind_wp [OF _ getCTE_sp]) + apply (rule hoare_assume_pre) + apply (clarsimp simp: cte_wp_at_ctes_of when_def isCap_simps + Let_def finaliseCapTrue_standin_def) + apply (intro impI conjI, (wp | simp)+) + done + +lemmas setNotification_typ_at'[wp] = typ_at_lifts[OF setNotification_typ_at'] + +crunches setBoundNotification, setNotification + for sch_act_simple[wp]: sch_act_simple + (wp: sch_act_simple_lift) + +crunches cteDeleteOne, unbindNotification + for sch_act_simple[wp]: sch_act_simple + (wp: crunch_wps ssa_sch_act_simple sts_sch_act_simple getObject_inv + loadObject_default_inv + simp: crunch_simps + rule: sch_act_simple_lift) + +lemma rescheduleRequired_sch_act_not[wp]: + "\\\ rescheduleRequired \\rv. sch_act_not t\" + apply (simp add: rescheduleRequired_def setSchedulerAction_def) + apply (wp hoare_TrueI | simp)+ + done + +crunch sch_act_not[wp]: cteDeleteOne "sch_act_not t" + (simp: crunch_simps case_Null_If unless_def + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma cancelAllIPC_mapM_x_weak_sch_act: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (rule mapM_x_wp_inv) + apply (wp) + apply (clarsimp) + done + +lemma cancelAllIPC_mapM_x_valid_objs': + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\_. valid_objs'\" + apply (rule hoare_strengthen_post) + apply (rule mapM_x_wp') + apply (wpsimp wp: sts_valid_objs') + apply (clarsimp simp: valid_tcb_state'_def)+ + done + +lemma cancelAllIPC_mapM_x_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + by (wp mapM_x_wp' | simp)+ + +lemma rescheduleRequired_oa_queued': + "rescheduleRequired \obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t\" + unfolding rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + by wpsimp + +lemma cancelAllIPC_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + cancelAllIPC epptr + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: cancelAllIPC_def) + apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift + rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at' + getEndpoint_wp + | wpc + | simp)+ + done + +lemma cancelAllSignals_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + cancelAllSignals epptr + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" +apply (simp add: cancelAllSignals_def) +apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift + rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at' + getNotification_wp + | wpc + | simp)+ +done + +lemma unbindMaybeNotification_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + unbindMaybeNotification r + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding unbindMaybeNotification_def + by (wpsimp wp: getNotification_wp gbn_wp' simp: setBoundNotification_def)+ + +crunch sch_act[wp]: isFinalCapability "\s. sch_act_wf (ksSchedulerAction s) s" + (simp: crunch_simps) + +crunch weak_sch_act[wp]: + isFinalCapability "\s. weak_sch_act_wf (ksSchedulerAction s) s" + (simp: crunch_simps) + +crunch ksCurDomain[wp]: cteDeleteOne "\s. P (ksCurDomain s)" + (wp: crunch_wps simp: crunch_simps unless_def) + +lemma cteDeleteOne_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cteDeleteOne slot \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (wp emptySlot_tcbDomain cancelAllIPC_tcbDomain_obj_at' cancelAllSignals_tcbDomain_obj_at' + isFinalCapability_inv getCTE_wp + unbindMaybeNotification_tcbDomain_obj_at' + | rule hoare_drop_imp + | simp add: finaliseCapTrue_standin_def Let_def + split del: if_split + | wpc)+ + apply (clarsimp simp: cte_wp_at'_def) + done + +end + +global_interpretation delete_one_conc_pre + by (unfold_locales, wp) + (wp cteDeleteOne_tcbDomain_obj_at' cteDeleteOne_typ_at' cteDeleteOne_reply_pred_tcb_at | simp)+ + +lemma cteDeleteOne_invs[wp]: + "\invs'\ cteDeleteOne ptr \\rv. invs'\" + apply (simp add: cteDeleteOne_def unless_def + split_def finaliseCapTrue_standin_simple_def) + apply wp + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_True_invs) + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_replaceable[where slot=ptr]) + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_cte_refs) + apply (rule finaliseCap_equal_cap[where sl=ptr]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule disjE) + apply simp + apply (clarsimp dest!: isCapDs simp: capRemovable_def) + apply (clarsimp simp: removeable'_def fun_eq_iff[where f="cte_refs' cap" for cap] + del: disjCI) + apply (rule disjI2) + apply (rule conjI) + subgoal by auto + subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def + live'_def hyp_live'_def ko_wp_at'_def) + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp + | wp (once) isFinal[where x=ptr])+ + apply (fastforce simp: cte_wp_at_ctes_of) + done + +global_interpretation delete_one_conc_fr: delete_one_conc + by unfold_locales wp + +declare cteDeleteOne_invs[wp] + +lemma deletingIRQHandler_invs' [wp]: + "\invs'\ deletingIRQHandler i \\_. invs'\" + apply (simp add: deletingIRQHandler_def getSlotCap_def + getIRQSlot_def locateSlot_conv getInterruptState_def) + apply (wp getCTE_wp') + apply simp + done + +lemma finaliseCap_invs: + "\invs' and sch_act_simple and valid_cap' cap + and cte_wp_at' (\cte. cteCap cte = cap) sl\ + finaliseCap cap fin flag + \\rv. invs'\" + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp hoare_drop_imps hoare_vcg_all_lift | simp only: o_def | wpc)+ + apply clarsimp + apply (intro conjI impI) + apply (clarsimp dest!: isCapDs simp: valid_cap'_def) + apply (drule invs_valid_global', drule(1) valid_globals_cte_wpD') + apply (drule valid_capAligned, drule capAligned_capUntypedPtr) + apply (clarsimp dest!: isCapDs) + apply (clarsimp dest!: isCapDs) + apply (clarsimp dest!: isCapDs) + done + +lemma finaliseCap_zombie_cap[wp]: + "\cte_wp_at' (\cte. (P and isZombie) (cteCap cte)) sl\ + finaliseCap cap fin flag + \\rv. cte_wp_at' (\cte. (P and isZombie) (cteCap cte)) sl\" + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp suspend_cte_wp_at' + deletingIRQHandler_cte_preserved + | clarsimp simp: finaliseCap_def isCap_simps | wpc)+ + done + +lemma finaliseCap_zombie_cap': + "\cte_wp_at' (\cte. (P and isZombie) (cteCap cte)) sl\ + finaliseCap cap fin flag + \\rv. cte_wp_at' (\cte. P (cteCap cte)) sl\" + apply (rule hoare_strengthen_post) + apply (rule finaliseCap_zombie_cap) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma finaliseCap_cte_cap_wp_to[wp]: + "\ex_cte_cap_wp_to' P sl\ finaliseCap cap fin flag \\rv. ex_cte_cap_wp_to' P sl\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre, rule hoare_use_eq_irq_node' [OF finaliseCap_irq_node']) + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (wp suspend_cte_wp_at' + deletingIRQHandler_cte_preserved + hoare_vcg_ex_lift + | clarsimp simp: finaliseCap_def isCap_simps + | rule conjI + | wpc)+ + apply fastforce + done + +crunch valid_cap'[wp]: unbindNotification "valid_cap' cap" + +lemma finaliseCap_valid_cap[wp]: + "\valid_cap' cap\ finaliseCap cap final flag \\rv. valid_cap' (fst rv)\" + apply (simp add: finaliseCap_def Let_def + getThreadCSpaceRoot + AARCH64_H.finaliseCap_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp | simp only: valid_NullCap o_def fst_conv | wpc)+ + apply simp + apply (intro conjI impI) + apply (clarsimp simp: valid_cap'_def isCap_simps capAligned_def + objBits_simps shiftL_nat)+ + done + +lemma no_idle_thread_cap: + "\ cte_wp_at ((=) (cap.ThreadCap (idle_thread s))) sl s; valid_global_refs s \ \ False" + apply (cases sl) + apply (clarsimp simp: valid_global_refs_def valid_refs_def cte_wp_at_caps_of_state) + apply ((erule allE)+, erule (1) impE) + apply (clarsimp simp: cap_range_def) + done + +lemmas getCTE_no_0_obj'_helper + = getCTE_inv + hoare_strengthen_post[where Q="\_. no_0_obj'" and P=no_0_obj' and a="getCTE slot" for slot] + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunches invalidateTLBByASID + for nosch[wp]: "\s. P (ksSchedulerAction s)" + +crunch nosch[wp]: dissociateVCPUTCB, unmapPageTable "\s. P (ksSchedulerAction s)" + (wp: crunch_wps getVCPU_wp getObject_inv hoare_vcg_all_lift hoare_vcg_if_lift3 + simp: loadObject_default_def updateObject_default_def) + +crunch nosch[wp]: "Arch.finaliseCap" "\s. P (ksSchedulerAction s)" + (wp: crunch_wps getObject_inv simp: loadObject_default_def updateObject_default_def) + +crunch sch_act_simple[wp]: finaliseCap sch_act_simple + (simp: crunch_simps + rule: sch_act_simple_lift + wp: getObject_inv loadObject_default_inv crunch_wps) + +end + + +lemma interrupt_cap_null_or_ntfn: + "invs s + \ cte_wp_at (\cp. is_ntfn_cap cp \ cp = cap.NullCap) (interrupt_irq_node s irq, []) s" + apply (frule invs_valid_irq_node) + apply (clarsimp simp: valid_irq_node_def) + apply (drule_tac x=irq in spec) + apply (drule cte_at_0) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (drule caps_of_state_cteD) + apply (frule if_unsafe_then_capD, clarsimp+) + apply (clarsimp simp: ex_cte_cap_wp_to_def cte_wp_at_caps_of_state) + apply (frule cte_refs_obj_refs_elem, erule disjE) + apply (clarsimp | drule caps_of_state_cteD valid_global_refsD[rotated] + | rule irq_node_global_refs[where irq=irq])+ + apply (simp add: cap_range_def) + apply (clarsimp simp: appropriate_cte_cap_def + split: cap.split_asm) + done + +lemma (in delete_one) deletingIRQHandler_corres: + "corres dc (einvs) (invs') + (deleting_irq_handler irq) (deletingIRQHandler irq)" + apply (simp add: deleting_irq_handler_def deletingIRQHandler_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule_tac P'="cte_at' (cte_map slot)" in corres_symb_exec_r_conj) + apply (rule_tac F="isNotificationCap rv \ rv = capability.NullCap" + and P="cte_wp_at (\cp. is_ntfn_cap cp \ cp = cap.NullCap) slot + and einvs" + and P'="invs' and cte_wp_at' (\cte. cteCap cte = rv) + (cte_map slot)" in corres_req) + apply (clarsimp simp: cte_wp_at_caps_of_state state_relation_def) + apply (drule caps_of_state_cteD) + apply (drule(1) pspace_relation_cte_wp_at, clarsimp+) + apply (auto simp: cte_wp_at_ctes_of is_cap_simps isCap_simps)[1] + apply simp + apply (rule corres_guard_imp, rule delete_one_corres[unfolded dc_def]) + apply (auto simp: cte_wp_at_caps_of_state is_cap_simps can_fast_finalise_def)[1] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp getCTE_wp' | simp add: getSlotCap_def)+ + apply (wp | simp add: get_irq_slot_def getIRQSlot_def + locateSlot_conv getInterruptState_def)+ + apply (clarsimp simp: ex_cte_cap_wp_to_def interrupt_cap_null_or_ntfn) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma sym_refs_vcpu_tcb: (* FIXME: move to AInvs *) + "\ vcpus_of s v = Some vcpu; vcpu_tcb vcpu = Some t; sym_refs (state_hyp_refs_of s) \ \ + \tcb. ko_at (TCB tcb) t s \ tcb_vcpu (tcb_arch tcb) = Some v" + apply (frule hyp_sym_refs_obj_atD[where p=v and P="(=) (ArchObj (VCPU vcpu))", rotated]) + apply (fastforce simp: obj_at_def in_omonad) + apply (clarsimp simp: obj_at_def hyp_refs_of_def) + apply (rename_tac ko) + apply (case_tac ko; simp add: tcb_vcpu_refs_def split: option.splits) + apply (rename_tac koa) + apply (case_tac koa; simp add: refs_of_ao_def vcpu_tcb_refs_def split: option.splits) + done + +lemma vcpuFinalise_corres[corres]: + "vcpu' = vcpu \ + corres dc (invs and vcpu_at vcpu) no_0_obj' (vcpu_finalise vcpu) (vcpuFinalise vcpu')" + apply (simp add: vcpuFinalise_def vcpu_finalise_def) + apply (corres corres: getObject_vcpu_corres + simp: vcpu_relation_def + wp: get_vcpu_wp getVCPU_wp + | corres_cases_both)+ + apply (fastforce simp: obj_at_def in_omonad dest: sym_refs_vcpu_tcb) + apply (fastforce elim: vcpu_at_cross) + done + +lemma return_NullCap_pair_corres[corres]: + "corres (\r r'. cap_relation (fst r) (fst r') \ cap_relation (snd r) (snd r')) + \ \ + (return (cap.NullCap, cap.NullCap)) (return (NullCap, NullCap))" + by (corres corres: corres_returnTT) + +lemma arch_finaliseCap_corres: + "\ final_matters' (ArchObjectCap cap') \ final = final'; acap_relation cap cap' \ + \ corres (\r r'. cap_relation (fst r) (fst r') \ cap_relation (snd r) (snd r')) + (\s. invs s \ valid_etcbs s + \ s \ cap.ArchObjectCap cap + \ (final_matters (cap.ArchObjectCap cap) + \ final = is_final_cap' (cap.ArchObjectCap cap) s) + \ cte_wp_at ((=) (cap.ArchObjectCap cap)) sl s) + (\s. invs' s \ s \' ArchObjectCap cap' \ + (final_matters' (ArchObjectCap cap') \ + final' = isFinal (ArchObjectCap cap') (cte_map sl) (cteCaps_of s))) + (arch_finalise_cap cap final) (Arch.finaliseCap cap' final')" + apply (simp add: arch_finalise_cap_def AARCH64_H.finaliseCap_def) + apply (corres_cases_both simp: final_matters'_def acap_relation_def mdata_map_def | + corres corres: deleteASIDPool_corres[@lift_corres_args] unmapPageTable_corres)+ + apply (clarsimp simp: valid_cap_def) + apply (rule conjI, clarsimp simp: wellformed_mapdata_def valid_unmap_def vmsz_aligned_def)+ + apply (fastforce dest: vspace_for_asid_not_normal_pt simp: wellformed_mapdata_def) + apply (clarsimp simp: cap_aligned_def cte_wp_at_caps_of_state) + apply fastforce + done + +lemma unbindNotification_corres: + "corres dc + (invs and tcb_at t) + invs' + (unbind_notification t) + (unbindNotification t)" + apply (simp add: unbind_notification_def unbindNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getBoundNotification_corres]) + apply (rule corres_option_split) + apply simp + apply (rule corres_return_trivial) + apply (rule corres_split[OF getNotification_corres]) + apply clarsimp + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def split:Structures_A.ntfn.splits) + apply (rule setBoundNotification_corres) + apply (wp gbn_wp' gbn_wp)+ + apply (clarsimp elim!: obj_at_valid_objsE + dest!: bound_tcb_at_state_refs_ofD invs_valid_objs + simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def obj_at_def + valid_tcb_def valid_bound_ntfn_def invs_psp_aligned invs_distinct + split: option.splits) + apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' + simp: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def tcb_ntfn_is_bound'_def + split: option.splits) + done + +lemma unbindMaybeNotification_corres: + "corres dc + (invs and ntfn_at ntfnptr) (invs' and ntfn_at' ntfnptr) + (unbind_maybe_notification ntfnptr) + (unbindMaybeNotification ntfnptr)" + apply (simp add: unbind_maybe_notification_def unbindMaybeNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres]) + apply (rule corres_option_split) + apply (clarsimp simp: ntfn_relation_def split: Structures_A.ntfn.splits) + apply (rule corres_return_trivial) + apply simp + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def split: Structures_A.ntfn.splits) + apply (rule setBoundNotification_corres) + apply (wp get_simple_ko_wp getNotification_wp)+ + apply (clarsimp elim!: obj_at_valid_objsE + dest!: bound_tcb_at_state_refs_ofD invs_valid_objs + simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def invs_psp_aligned invs_distinct + valid_tcb_def valid_bound_ntfn_def valid_ntfn_def + split: option.splits) + apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' + simp: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def + tcb_ntfn_is_bound'_def valid_ntfn'_def + split: option.splits) + done + +lemma fast_finaliseCap_corres: + "\ final_matters' cap' \ final = final'; cap_relation cap cap'; + can_fast_finalise cap \ + \ corres dc + (\s. invs s \ valid_sched s \ s \ cap + \ cte_wp_at ((=) cap) sl s) + (\s. invs' s \ s \' cap') + (fast_finalise cap final) + (do + p \ finaliseCap cap' final' True; + assert (capRemovable (fst p) (cte_map ptr) \ snd p = NullCap) + od)" + apply (cases cap, simp_all add: finaliseCap_def isCap_simps + corres_liftM2_simp[unfolded liftM_def] + o_def dc_def[symmetric] when_def + can_fast_finalise_def capRemovable_def + split del: if_split cong: if_cong) + apply (clarsimp simp: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (rule ep_cancel_corres) + apply simp + apply (simp add: valid_cap_def) + apply (simp add: valid_cap'_def) + apply (clarsimp simp: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindMaybeNotification_corres]) + apply (rule cancelAllSignals_corres) + apply (wp abs_typ_at_lifts unbind_maybe_notification_invs typ_at_lifts hoare_drop_imps getNotification_wp + | wpc)+ + apply (clarsimp simp: valid_cap_def) + apply (clarsimp simp: valid_cap'_def valid_obj'_def + dest!: invs_valid_objs' obj_at_valid_objs' ) + done + +lemma cap_delete_one_corres: + "corres dc (einvs and cte_wp_at can_fast_finalise ptr) + (invs' and cte_at' (cte_map ptr)) + (cap_delete_one ptr) (cteDeleteOne (cte_map ptr))" + apply (simp add: cap_delete_one_def cteDeleteOne_def' + unless_def when_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="can_fast_finalise cap" in corres_gen_asm) + apply (rule corres_if) + apply fastforce + apply (rule corres_split[OF isFinalCapability_corres[where ptr=ptr]]) + apply (simp add: split_def bind_assoc [THEN sym]) + apply (rule corres_split[OF fast_finaliseCap_corres[where sl=ptr]]) + apply simp+ + apply (rule emptySlot_corres, simp) + apply (wp hoare_drop_imps)+ + apply (wp isFinalCapability_inv | wp (once) isFinal[where x="cte_map ptr"])+ + apply (rule corres_trivial, simp) + apply (wp get_cap_wp getCTE_wp)+ + apply (clarsimp simp: cte_wp_at_caps_of_state can_fast_finalise_Null + elim!: caps_of_state_valid_cap) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + done + +context +notes option.case_cong_weak[cong] +begin +crunches ThreadDecls_H.suspend, unbindNotification + for no_0_obj'[wp]: no_0_obj' + (simp: crunch_simps wp: crunch_wps getCTE_no_0_obj'_helper) +end + +end +(* FIXME: strengthen locale instead *) + +global_interpretation delete_one + apply unfold_locales + apply (rule corres_guard_imp) + apply (rule cap_delete_one_corres) + apply auto + done + +lemma finaliseCap_corres: + "\ final_matters' cap' \ final = final'; cap_relation cap cap'; + flag \ can_fast_finalise cap \ + \ corres (\x y. cap_relation (fst x) (fst y) \ cap_relation (snd x) (snd y)) + (\s. einvs s \ s \ cap \ (final_matters cap \ final = is_final_cap' cap s) + \ cte_wp_at ((=) cap) sl s) + (\s. invs' s \ s \' cap' \ sch_act_simple s \ + (final_matters' cap' \ + final' = isFinal cap' (cte_map sl) (cteCaps_of s))) + (finalise_cap cap final) (finaliseCap cap' final' flag)" + supply invs_no_0_obj'[simp] + apply (cases cap, simp_all add: finaliseCap_def isCap_simps + corres_liftM2_simp[unfolded liftM_def] + o_def dc_def[symmetric] when_def + can_fast_finalise_def + split del: if_split cong: if_cong) + apply (clarsimp simp: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule ep_cancel_corres) + apply (simp add: valid_cap_def) + apply (simp add: valid_cap'_def) + apply (clarsimp simp add: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindMaybeNotification_corres]) + apply (rule cancelAllSignals_corres) + apply (wp abs_typ_at_lifts unbind_maybe_notification_invs typ_at_lifts hoare_drop_imps hoare_vcg_all_lift | wpc)+ + apply (clarsimp simp: valid_cap_def) + apply (clarsimp simp: valid_cap'_def) + apply (fastforce simp: final_matters'_def shiftL_nat zbits_map_def) + apply (clarsimp simp add: final_matters'_def getThreadCSpaceRoot + liftM_def[symmetric] o_def zbits_map_def + dc_def[symmetric]) + apply (rename_tac t) + apply (rule_tac P="\s. t \ idle_thread s" and P'="\s. t \ ksIdleThread s" in corres_add_guard) + apply clarsimp + apply (rule context_conjI) + apply (clarsimp dest!: no_idle_thread_cap) + apply (clarsimp simp: state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindNotification_corres]) + apply (rule corres_split[OF suspend_corres]) + apply (clarsimp simp: liftM_def[symmetric] o_def dc_def[symmetric] zbits_map_def) + apply (rule prepareThreadDelete_corres, simp) + apply (wp unbind_notification_invs unbind_notification_simple_sched_action + delete_one_conc_fr.suspend_objs')+ + apply (clarsimp simp add: valid_cap_def) + apply (clarsimp simp add: valid_cap'_def) + apply (simp add: final_matters'_def liftM_def[symmetric] + o_def dc_def[symmetric]) + apply (intro impI, rule corres_guard_imp) + apply (rule deletingIRQHandler_corres) + apply simp + apply simp + apply (clarsimp simp: final_matters'_def) + apply (rule_tac F="False" in corres_req) + apply clarsimp + apply (frule zombies_finalD, (clarsimp simp: is_cap_simps)+) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply simp + apply (clarsimp split del: if_split simp: o_def) + apply (rule corres_guard_imp [OF arch_finaliseCap_corres], (fastforce simp: valid_sched_def)+) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma threadSet_ct_idle_or_in_cur_domain': + "\ct_idle_or_in_cur_domain' and (\s. \tcb. tcbDomain tcb = ksCurDomain s \ tcbDomain (F tcb) = ksCurDomain s)\ + threadSet F t + \\_. ct_idle_or_in_cur_domain'\" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_imp_lift) + apply wps + apply wp + apply wps + apply wp + apply (auto simp: obj_at'_def) + done + +lemma cte_wp_at_norm_eq': + "cte_wp_at' P p s = (\cte. cte_wp_at' ((=) cte) p s \ P cte)" + by (simp add: cte_wp_at_ctes_of) + +lemma isFinal_cte_wp_def: + "isFinal cap p (cteCaps_of s) = + (\isUntypedCap cap \ + (\p'. p \ p' \ + cte_at' p' s \ + cte_wp_at' (\cte'. \ isUntypedCap (cteCap cte') \ + \ sameObjectAs cap (cteCap cte')) p' s))" + apply (simp add: isFinal_def cte_wp_at_ctes_of cteCaps_of_def) + apply (rule iffI) + apply clarsimp + apply (case_tac cte) + apply fastforce + apply fastforce + done + +lemma valid_cte_at_neg_typ': + assumes T: "\P T p. \\s. P (typ_at' T p s)\ f \\_ s. P (typ_at' T p s)\" + shows "\\s. \ cte_at' p' s\ f \\rv s. \ cte_at' p' s\" + apply (simp add: cte_at_typ') + apply (rule hoare_vcg_conj_lift [OF T]) + apply (simp only: imp_conv_disj) + apply (rule hoare_vcg_all_lift) + apply (rule hoare_vcg_disj_lift [OF T]) + apply (rule hoare_vcg_prop) + done + +lemma isFinal_lift: + assumes x: "\P p. \cte_wp_at' P p\ f \\_. cte_wp_at' P p\" + assumes y: "\P T p. \\s. P (typ_at' T p s)\ f \\_ s. P (typ_at' T p s)\" + shows "\\s. cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s\ + f + \\r s. cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s\" + apply (subst cte_wp_at_norm_eq') + apply (subst cte_wp_at_norm_eq' [where P="\cte. isFinal (cteCap cte) sl m" for sl m]) + apply (simp only: isFinal_cte_wp_def imp_conv_disj de_Morgan_conj) + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift x hoare_vcg_disj_lift + valid_cte_at_neg_typ' [OF y]) + done + +lemmas final_matters'_simps = final_matters'_def [split_simps capability.split arch_capability.split] + +crunch idle_thread[wp]: deleteCallerCap "\s. P (ksIdleThread s)" + (wp: crunch_wps) +crunch sch_act_simple: deleteCallerCap sch_act_simple + (wp: crunch_wps) +crunch sch_act_not[wp]: deleteCallerCap "sch_act_not t" + (wp: crunch_wps) +crunch typ_at'[wp]: deleteCallerCap "\s. P (typ_at' T p s)" + (wp: crunch_wps) +lemmas deleteCallerCap_typ_ats[wp] = typ_at_lifts [OF deleteCallerCap_typ_at'] + +lemma setEndpoint_sch_act_not_ct[wp]: + "\\s. sch_act_not (ksCurThread s) s\ + setEndpoint ptr val \\_ s. sch_act_not (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps setEndpoint_ct', wp, simp) + +lemma sbn_ct_in_state'[wp]: + "\ct_in_state' P\ setBoundNotification ntfn t \\_. ct_in_state' P\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply (wps setBoundNotification_ct') + apply (wp sbn_st_tcb', clarsimp) + done + +lemma set_ntfn_ct_in_state'[wp]: + "\ct_in_state' P\ setNotification a ntfn \\_. ct_in_state' P\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply (wps setNotification_ksCurThread, wp, clarsimp) + done + +lemma unbindMaybeNotification_ct_in_state'[wp]: + "\ct_in_state' P\ unbindMaybeNotification t \\_. ct_in_state' P\" + apply (simp add: unbindMaybeNotification_def) + apply (wp | wpc | simp)+ + done + +lemma setNotification_sch_act_sane: + "\sch_act_sane\ setNotification a ntfn \\_. sch_act_sane\" + by (wp sch_act_sane_lift) + + +lemma unbindMaybeNotification_sch_act_sane[wp]: + "\sch_act_sane\ unbindMaybeNotification t \\_. sch_act_sane\" + apply (simp add: unbindMaybeNotification_def) + apply (wp setNotification_sch_act_sane sbn_sch_act_sane | wpc | clarsimp)+ + done + +end + +end diff --git a/proof/refine/AARCH64/IncKernelInit.thy b/proof/refine/AARCH64/IncKernelInit.thy new file mode 100644 index 0000000000..93c1390f7a --- /dev/null +++ b/proof/refine/AARCH64/IncKernelInit.thy @@ -0,0 +1,13 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory IncKernelInit +imports ADT_H Tcb_R Arch_R +begin + +(* Dummy include file for kernel init *) + +end diff --git a/proof/refine/AARCH64/InitLemmas.thy b/proof/refine/AARCH64/InitLemmas.thy new file mode 100644 index 0000000000..d469761d28 --- /dev/null +++ b/proof/refine/AARCH64/InitLemmas.thy @@ -0,0 +1,28 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* General lemmas removed from KernelInit *) + +theory InitLemmas +imports IncKernelInit +begin + +declare headM_tailM_Cons[simp] + +declare cart_singletons[simp] + +declare less_1_simp[simp] + +declare is_aligned_no_overflow[simp] + +declare unless_True[simp] + +declare maybe_fail_bind_fail[simp] + +crunch cte_wp_at'[wp]: setPriority "cte_wp_at' P p" (simp: crunch_simps) +crunch irq_node'[wp]: setPriority "\s. P (irq_node' s)" (simp: crunch_simps) + +end diff --git a/proof/refine/AARCH64/Init_R.thy b/proof/refine/AARCH64/Init_R.thy new file mode 100644 index 0000000000..ce9e5cbc2b --- /dev/null +++ b/proof/refine/AARCH64/Init_R.thy @@ -0,0 +1,136 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2021, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Init_R +imports + KHeap_R + +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* + This provides a very simple witness that the state relation used in the first refinement proof is + non-trivial, by exhibiting a pair of related states. This helps guard against silly mistakes in + the state relation, since we currently assume that the system starts in a state satisfying + invariants and state relations. + + Note that the states we exhibit are not intended to be useful states. They are just the simplest + possible states that prove non-triviality of the state relation. In particular, these states do + not satisfy the respective invariant conditions. In future, this could be improved by exhibiting + a tuple of more realistic states that are related across all levels of the refinement, and that + also satisfy respective invariant. Ultimately, we would like to prove functional correctness of + kernel initialisation. That would allow us to start from a minimal but real configuration that + would allow us to make a much smaller set of assumptions about the initial configuration of the + system. +*) + +definition zeroed_arch_abstract_state :: + arch_state + where + "zeroed_arch_abstract_state \ \ + arm_asid_table = Map.empty, + arm_kernel_vspace = K ArmVSpaceUserRegion, + arm_vmid_table = Map.empty, + arm_next_vmid = 0, + arm_us_global_vspace = 0, + arm_current_vcpu = None, + arm_gicvcpu_numlistregs = 0 + \" + +definition zeroed_main_abstract_state :: + abstract_state + where + "zeroed_main_abstract_state \ \ + kheap = Map.empty, + cdt = Map.empty, + is_original_cap = \, + cur_thread = 0, + idle_thread = 0, + machine_state = init_machine_state, + interrupt_irq_node = (\irq. ucast irq << cte_level_bits), + interrupt_states = (K irq_state.IRQInactive), + arch_state = zeroed_arch_abstract_state + \" + +definition zeroed_extended_state :: + det_ext + where + "zeroed_extended_state \ \ + work_units_completed_internal = 0, + scheduler_action_internal = resume_cur_thread, + ekheap_internal = Map.empty, + domain_list_internal = [], + domain_index_internal = 0, + cur_domain_internal = 0, + domain_time_internal = 0, + ready_queues_internal = (\_ _. []), + cdt_list_internal = K [] + \" + +definition zeroed_abstract_state :: + det_state + where + "zeroed_abstract_state \ abstract_state.extend zeroed_main_abstract_state + (state.fields zeroed_extended_state)" + +definition zeroed_arch_intermediate_state :: + Arch.kernel_state + where + "zeroed_arch_intermediate_state \ + ARMKernelState Map.empty (K ArmVSpaceUserRegion) + Map.empty 0 0 None 0 Map.empty" + +definition zeroed_intermediate_state :: + global.kernel_state + where + "zeroed_intermediate_state \ \ + ksPSpace = Map.empty, + gsUserPages = Map.empty, + gsCNodes = Map.empty, + gsUntypedZeroRanges = {}, + gsMaxObjectSize = 0, + ksDomScheduleIdx = 0, + ksDomSchedule = [], + ksCurDomain = 0, + ksDomainTime = 0, + ksReadyQueues = K (TcbQueue None None), + ksReadyQueuesL1Bitmap = K 0, + ksReadyQueuesL2Bitmap = K 0, + ksCurThread = 0, + ksIdleThread = 0, + ksSchedulerAction = ResumeCurrentThread, + ksInterruptState = (InterruptState 0 (K IRQInactive)), + ksWorkUnitsCompleted = 0, + ksArchState = zeroed_arch_intermediate_state, + ksMachineState = init_machine_state + \" + +lemmas zeroed_state_defs = zeroed_main_abstract_state_def zeroed_abstract_state_def + zeroed_arch_abstract_state_def zeroed_extended_state_def + zeroed_intermediate_state_def abstract_state.defs + zeroed_arch_intermediate_state_def + +lemma non_empty_refine_state_relation: + "(zeroed_abstract_state, zeroed_intermediate_state) \ state_relation" + apply (clarsimp simp: state_relation_def zeroed_state_defs state.defs) + apply (intro conjI) + apply (clarsimp simp: pspace_relation_def pspace_dom_def) + apply (clarsimp simp: ekheap_relation_def) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def queue_end_valid_def + opt_pred_def list_queue_relation_def tcbQueueEmpty_def + prev_queue_head_def) + apply (clarsimp simp: ghost_relation_def) + apply (fastforce simp: cdt_relation_def swp_def dest: cte_wp_at_domI) + apply (clarsimp simp: cdt_list_relation_def map_to_ctes_def) + apply (clarsimp simp: revokable_relation_def map_to_ctes_def) + apply (clarsimp simp: zeroed_state_defs arch_state_relation_def) + apply (clarsimp simp: interrupt_state_relation_def irq_state_relation_def cte_level_bits_def) + done + +end +end diff --git a/proof/refine/AARCH64/InterruptAcc_R.thy b/proof/refine/AARCH64/InterruptAcc_R.thy new file mode 100644 index 0000000000..a377906d78 --- /dev/null +++ b/proof/refine/AARCH64/InterruptAcc_R.thy @@ -0,0 +1,170 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory InterruptAcc_R +imports TcbAcc_R +begin + +lemma getIRQSlot_corres: + "corres (\sl sl'. sl' = cte_map sl) \ \ (get_irq_slot irq) (getIRQSlot irq)" + apply (simp add: getIRQSlot_def get_irq_slot_def locateSlot_conv + liftM_def[symmetric]) + apply (simp add: getInterruptState_def) + apply (clarsimp simp: state_relation_def interrupt_state_relation_def) + apply (simp add: cte_map_def cte_level_bits_def + ucast_nat_def shiftl_t2n) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma setIRQState_corres: + "irq_state_relation state state' \ + corres dc \ \ (set_irq_state state irq) (setIRQState state' irq)" + apply (simp add: set_irq_state_def setIRQState_def + bind_assoc[symmetric]) + apply (subgoal_tac "(state = irq_state.IRQInactive) = (state' = irqstate.IRQInactive)") + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (simp add: getInterruptState_def setInterruptState_def + simpler_gets_def simpler_modify_def bind_def) + apply (simp add: simpler_modify_def[symmetric]) + apply (rule corres_trivial, rule corres_modify) + apply (simp add: state_relation_def swp_def) + apply (clarsimp simp: interrupt_state_relation_def) + apply (rule corres_machine_op) + apply (rule corres_Id | simp)+ + apply wpsimp+ + apply (clarsimp simp: irq_state_relation_def + split: irq_state.split_asm irqstate.split_asm) + done + +lemma setIRQState_invs[wp]: + "\\s. invs' s \ (state \ IRQSignal \ IRQHandlerCap irq \ ran (cteCaps_of s)) \ + (state \ IRQInactive \ irq \ maxIRQ)\ + setIRQState state irq + \\rv. invs'\" + apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs'_def valid_state'_def cur_tcb'_def + valid_idle'_def valid_irq_node'_def + valid_arch_state'_def valid_global_refs'_def + global_refs'_def valid_machine_state'_def + if_unsafe_then_cap'_def ex_cte_cap_to'_def + valid_irq_handlers'_def irq_issued'_def + cteCaps_of_def valid_irq_masks'_def + bitmapQ_defs valid_bitmaps_def + split: option.splits) + apply (rule conjI, clarsimp) + apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) + apply (rule conjI, fastforce) + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (rule conjI, clarsimp) + apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) + apply (rule conjI) + apply fastforce + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma getIRQSlot_real_cte[wp]: + "\invs'\ getIRQSlot irq \real_cte_at'\" + apply (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def valid_irq_node'_def + cte_level_bits_def ucast_nat_def cteSizeBits_def shiftl_t2n) + done + +lemma getIRQSlot_cte_at[wp]: + "\invs'\ getIRQSlot irq \cte_at'\" + apply (rule hoare_strengthen_post [OF getIRQSlot_real_cte]) + apply (clarsimp simp: real_cte_at') + done + +lemma work_units_updated_state_relationI[intro!]: + "(s,s') \ state_relation \ + (work_units_completed_update (\_. work_units_completed s + 1) s, s'\ksWorkUnitsCompleted := ksWorkUnitsCompleted s' + 1\) \ state_relation" + apply (simp add: state_relation_def) + done + +lemma work_units_and_irq_state_state_relationI [intro!]: + "(s, s') \ state_relation \ + (s \ work_units_completed := n, machine_state := machine_state s \ irq_state := f (irq_state (machine_state s)) \\, + s' \ ksWorkUnitsCompleted := n, ksMachineState := ksMachineState s' \ irq_state := f (irq_state (ksMachineState s')) \\) + \ state_relation" + by (simp add: state_relation_def swp_def) + +lemma preemptionPoint_corres: + "corres (dc \ dc) \ \ preemption_point preemptionPoint" + apply (simp add: preemption_point_def preemptionPoint_def) + by (auto simp: preemption_point_def preemptionPoint_def o_def gets_def liftE_def whenE_def getActiveIRQ_def + corres_underlying_def select_def bind_def get_def bindE_def select_f_def modify_def + alternative_def throwError_def returnOk_def return_def lift_def doMachineOp_def split_def + put_def getWorkUnits_def setWorkUnits_def modifyWorkUnits_def do_machine_op_def + update_work_units_def wrap_ext_bool_det_ext_ext_def work_units_limit_def workUnitsLimit_def + work_units_limit_reached_def OR_choiceE_def reset_work_units_def mk_ef_def + elim: state_relationE) + (* what? *) + (* who says our proofs are not automatic.. *) + +lemma preemptionPoint_inv: + assumes "(\f s. P (ksWorkUnitsCompleted_update f s) = P s)" + "irq_state_independent_H P" + shows "\P\ preemptionPoint \\_. P\" using assms + apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) + apply (wpc + | wp whenE_wp bind_wp [OF _ select_inv] hoare_drop_imps + | simp)+ + done + +lemma ct_running_irq_state_independent[intro!, simp]: + "ct_running (s \machine_state := machine_state s \irq_state := f (irq_state (machine_state s)) \ \) + = ct_running s" + by (simp add: ct_in_state_def) + +lemma ct_idle_irq_state_independent[intro!, simp]: + "ct_idle (s \machine_state := machine_state s \irq_state := f (irq_state (machine_state s)) \ \) + = ct_idle s" + by (simp add: ct_in_state_def) + +lemma typ_at'_irq_state_independent[simp, intro!]: + "P (typ_at' T p (s \ksMachineState := ksMachineState s \ irq_state := f (irq_state (ksMachineState s)) \\)) + = P (typ_at' T p s)" + by (simp add: typ_at'_def) + +lemma sch_act_simple_irq_state_independent[intro!, simp]: + "sch_act_simple (s \ ksMachineState := ksMachineState s \ irq_state := f (irq_state (ksMachineState s)) \ \) = + sch_act_simple s" + by (simp add: sch_act_simple_def) + +lemma invs'_irq_state_independent [simp, intro!]: + "invs' (s\ksMachineState := ksMachineState s + \irq_state := f (irq_state (ksMachineState s))\\) = + invs' s" + apply (clarsimp simp: irq_state_independent_H_def invs'_def valid_state'_def + valid_pspace'_def sch_act_wf_def + valid_queues_def sym_refs_def state_refs_of'_def + if_live_then_nonz_cap'_def if_unsafe_then_cap'_def + valid_idle'_def valid_global_refs'_def + valid_arch_state'_def valid_irq_node'_def + valid_irq_handlers'_def valid_irq_states'_def + irqs_masked'_def bitmapQ_defs valid_bitmaps_def + pspace_domain_valid_def cur_tcb'_def + valid_machine_state'_def tcb_in_cur_domain'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def + cong: if_cong option.case_cong) + apply (rule iffI[rotated]) + apply (clarsimp) + apply (case_tac "ksSchedulerAction s", simp_all) + apply clarsimp + apply (case_tac "ksSchedulerAction s", simp_all) + done + +lemma preemptionPoint_invs [wp]: + "\invs'\ preemptionPoint \\_. invs'\" + by (wp preemptionPoint_inv | clarsimp)+ + +end +end diff --git a/proof/refine/AARCH64/Interrupt_R.thy b/proof/refine/AARCH64/Interrupt_R.thy new file mode 100644 index 0000000000..fb63b89d88 --- /dev/null +++ b/proof/refine/AARCH64/Interrupt_R.thy @@ -0,0 +1,1153 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Refinement for interrupt controller operations +*) + +theory Interrupt_R +imports Ipc_R Invocations_R +begin + +context Arch begin + +(*FIXME: arch_split: move up *) +requalify_types + irqcontrol_invocation + +lemmas [crunch_def] = decodeIRQControlInvocation_def performIRQControl_def + +context begin global_naming global + +(*FIXME: arch_split: move up *) +requalify_types + Invocations_H.irqcontrol_invocation + +(*FIXME: arch_split*) +requalify_facts + Interrupt_H.decodeIRQControlInvocation_def + Interrupt_H.performIRQControl_def + +end +end + +primrec + irq_handler_inv_relation :: "irq_handler_invocation \ irqhandler_invocation \ bool" +where + "irq_handler_inv_relation (Invocations_A.ACKIrq irq) x = (x = AckIRQ irq)" +| "irq_handler_inv_relation (Invocations_A.ClearIRQHandler irq) x = (x = ClearIRQHandler irq)" +| "irq_handler_inv_relation (Invocations_A.SetIRQHandler irq cap ptr) x = + (\cap'. x = SetIRQHandler irq cap' (cte_map ptr) \ cap_relation cap cap')" + +primrec + arch_irq_control_inv_relation :: "arch_irq_control_invocation \ Arch.irqcontrol_invocation \ bool" +where + "arch_irq_control_inv_relation (AARCH64_A.ARMIRQControlInvocation i ptr ptr' t) x = + (x = AARCH64_H.IssueIRQHandler i (cte_map ptr) (cte_map ptr') t)" + +primrec + irq_control_inv_relation :: "irq_control_invocation \ irqcontrol_invocation \ bool" +where + "irq_control_inv_relation (Invocations_A.IRQControl irq slot slot') x + = (x = IssueIRQHandler irq (cte_map slot) (cte_map slot'))" +| "irq_control_inv_relation (Invocations_A.ArchIRQControl ivk) x + = (\ivk'. x = ArchIRQControl ivk' \ arch_irq_control_inv_relation ivk ivk')" + +primrec + irq_handler_inv_valid' :: "irqhandler_invocation \ kernel_state \ bool" +where + "irq_handler_inv_valid' (AckIRQ irq) = (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)" +| "irq_handler_inv_valid' (ClearIRQHandler irq) = \" +| "irq_handler_inv_valid' (SetIRQHandler irq cap cte_ptr) + = (valid_cap' cap and valid_cap' (IRQHandlerCap irq) + and K (isNotificationCap cap) + and cte_wp_at' (badge_derived' cap \ cteCap) cte_ptr + and (\s. \ptr'. cte_wp_at' (\cte. cteCap cte = IRQHandlerCap irq) ptr' s) + and ex_cte_cap_wp_to' isCNodeCap cte_ptr)" + +primrec + arch_irq_control_inv_valid' :: "Arch.irqcontrol_invocation \ kernel_state \ bool" +where + "arch_irq_control_inv_valid' (AARCH64_H.IssueIRQHandler irq ptr ptr' t) = + (cte_wp_at' (\cte. cteCap cte = NullCap) ptr and + cte_wp_at' (\cte. cteCap cte = IRQControlCap) ptr' and + ex_cte_cap_to' ptr and real_cte_at' ptr and + (Not o irq_issued' irq) and K (irq \ maxIRQ))" + +primrec + irq_control_inv_valid' :: "irqcontrol_invocation \ kernel_state \ bool" +where + "irq_control_inv_valid' (ArchIRQControl ivk) = arch_irq_control_inv_valid' ivk" +| "irq_control_inv_valid' (IssueIRQHandler irq ptr ptr') = + (cte_wp_at' (\cte. cteCap cte = NullCap) ptr and + cte_wp_at' (\cte. cteCap cte = IRQControlCap) ptr' and + ex_cte_cap_to' ptr and real_cte_at' ptr and + (Not o irq_issued' irq) and K (irq \ maxIRQ))" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma decodeIRQHandlerInvocation_corres: + "\ list_all2 cap_relation (map fst caps) (map fst caps'); + list_all2 (\p pa. snd pa = cte_map (snd p)) caps caps' \ \ + corres (ser \ irq_handler_inv_relation) invs invs' + (decode_irq_handler_invocation label irq caps) + (decodeIRQHandlerInvocation label irq caps')" + apply (simp add: decode_irq_handler_invocation_def decodeIRQHandlerInvocation_def + split del: if_split) + apply (cases caps) + apply (simp add: returnOk_def split: invocation_label.split gen_invocation_labels.split list.splits split del: if_split) + defer + apply (clarsimp simp: list_all2_Cons1 split del: if_split) + apply (simp add: returnOk_def split: invocation_label.split gen_invocation_labels.split list.splits) + apply (clarsimp split: cap_relation_split_asm arch_cap.split_asm simp: returnOk_def) + done + +crunch inv[wp]: decodeIRQHandlerInvocation "P" + (simp: crunch_simps) + +lemma decode_irq_handler_valid'[wp]: + "\\s. invs' s \ (\cap \ set caps. s \' fst cap) + \ (\ptr'. cte_wp_at' (\cte. cteCap cte = IRQHandlerCap irq) ptr' s) + \ (\cap \ set caps. \r \ cte_refs' (fst cap) (irq_node' s). ex_cte_cap_to' r s) + \ (\cap \ set caps. ex_cte_cap_wp_to' isCNodeCap (snd cap) s) + \ (\cap \ set caps. cte_wp_at' (badge_derived' (fst cap) \ cteCap) (snd cap) s) + \ s \' IRQHandlerCap irq\ + decodeIRQHandlerInvocation label irq caps + \irq_handler_inv_valid'\,-" + apply (simp add: decodeIRQHandlerInvocation_def Let_def split_def + split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc | simp)+ + apply (clarsimp simp: neq_Nil_conv isCap_simps) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_irq_handlers_ctes_ofD) + apply (simp add: invs'_def valid_state'_def) + apply (simp add: irq_issued'_def) + apply clarsimp + done + +lemma is_irq_active_corres: + "corres (=) \ \ (is_irq_active irq) (isIRQActive irq)" + apply (simp add: is_irq_active_def isIRQActive_def get_irq_state_def + getIRQState_def getInterruptState_def) + apply (clarsimp simp: state_relation_def interrupt_state_relation_def) + apply (drule_tac x=irq in spec)+ + apply (simp add: irq_state_relation_def + split: irqstate.split_asm irq_state.split_asm) + done + +crunch inv: isIRQActive "P" + +lemma isIRQActive_wp: + "\\s. \rv. (irq_issued' irq s \ rv) \ Q rv s\ isIRQActive irq \Q\" + apply (simp add: isIRQActive_def getIRQState_def + getInterruptState_def) + apply wp + apply (clarsimp simp: irq_issued'_def) + done + +lemma checkIRQ_corres: + "corres (ser \ dc) \ \ (arch_check_irq irq) (checkIRQ irq)" + unfolding arch_check_irq_def checkIRQ_def rangeCheck_def + apply (rule corres_guard_imp) + apply (clarsimp simp: minIRQ_def unlessE_whenE not_le) + apply (rule corres_whenE) + apply (fastforce simp: ucast_nat_def)+ + done + +lemma whenE_rangeCheck_eq: + "(rangeCheck (x :: 'a :: {linorder, integral}) y z) = + (whenE (x < fromIntegral y \ fromIntegral z < x) + (throwError (RangeError (fromIntegral y) (fromIntegral z))))" + by (simp add: rangeCheck_def unlessE_whenE linorder_not_le[symmetric]) + +lemmas irq_const_defs = maxIRQ_def minIRQ_def + +crunches arch_check_irq, checkIRQ + for inv: "P" + (simp: crunch_simps) + +lemma arch_check_irq_valid: + "\\\ arch_check_irq y \\_. (\s. unat y \ unat maxIRQ)\, -" + unfolding arch_check_irq_def + apply (wpsimp simp: validE_R_def wp: whenE_throwError_wp) + by (meson le_trans unat_ucast_le word_le_not_less word_less_eq_iff_unsigned) + +lemma arch_check_irq_valid': + "\\\ arch_check_irq y \\_ _. unat y \ unat maxIRQ\, \\_. \\" + by (wp arch_check_irq_valid) + +lemma arch_decodeIRQControlInvocation_corres: + "list_all2 cap_relation caps caps' \ + corres (ser \ arch_irq_control_inv_relation) + (invs and (\s. \cp \ set caps. s \ cp)) + (invs' and (\s. \cp \ set caps'. s \' cp)) + (arch_decode_irq_control_invocation label args slot caps) + (AARCH64_H.decodeIRQControlInvocation label args (cte_map slot) caps')" + apply (clarsimp simp: arch_decode_irq_control_invocation_def + AARCH64_H.decodeIRQControlInvocation_def Let_def) + apply (rule conjI; clarsimp) + prefer 2 + apply (cases caps + ; fastforce split: arch_invocation_label.splits list.splits invocation_label.splits + simp: length_Suc_conv list_all2_Cons1 whenE_rangeCheck_eq liftE_bindE) + apply (cases caps, simp split: list.split) + apply (case_tac "\n. length args = Suc (Suc (Suc (Suc n)))", + clarsimp simp: length_Suc_conv list_all2_Cons1 whenE_rangeCheck_eq liftE_bindE) + prefer 2 apply (fastforce split: list.split) + \\ARMIRQIssueIRQHandler\ + apply (rule conjI, clarsimp) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF checkIRQ_corres]) + apply (rule_tac F="unat y \ unat maxIRQ" in corres_gen_asm) + apply (clarsimp simp add: minIRQ_def maxIRQ_def ucast_nat_def) + apply (rule corres_split_eqr[OF is_irq_active_corres]) + apply (rule whenE_throwError_corres, clarsimp, clarsimp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; clarsimp) + apply (rule corres_splitEE[OF ensureEmptySlot_corres], simp) + apply (rule corres_returnOkTT) + apply (clarsimp simp: arch_irq_control_inv_relation_def) + apply (wp del: arch_check_irq_inv + | wpsimp wp: isIRQActive_inv checkIRQ_inv arch_check_irq_valid' + simp: invs_valid_objs invs_psp_aligned invs_valid_objs' + invs_pspace_aligned' invs_pspace_distinct' + | strengthen invs_valid_objs invs_psp_aligned + | wp (once) hoare_drop_imps arch_check_irq_inv)+ + apply (auto split: arch_invocation_label.splits invocation_label.splits) + done + +lemma irqhandler_simp[simp]: + "gen_invocation_type label \ IRQIssueIRQHandler \ + (case gen_invocation_type label of IRQIssueIRQHandler \ b | _ \ c) = c" + by (clarsimp split: gen_invocation_labels.splits) + +lemma decodeIRQControlInvocation_corres: + "list_all2 cap_relation caps caps' \ + corres (ser \ irq_control_inv_relation) + (invs and (\s. \cp \ set caps. s \ cp)) (invs' and (\s. \cp \ set caps'. s \' cp)) + (decode_irq_control_invocation label args slot caps) + (decodeIRQControlInvocation label args (cte_map slot) caps')" + apply (clarsimp simp: decode_irq_control_invocation_def decodeIRQControlInvocation_def + arch_check_irq_def AARCH64_H.checkIRQ_def + split del: if_split cong: if_cong) + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (cases caps, simp split: list.split) + apply (case_tac "\n. length args = Suc (Suc (Suc n))") + apply (clarsimp simp: list_all2_Cons1 Let_def split_def liftE_bindE + length_Suc_conv checkIRQ_def) + defer + apply (prop_tac "length args \ 2", arith) + apply (clarsimp split: list.split) + apply (simp add: minIRQ_def o_def) + apply (auto intro!: corres_guard_imp[OF arch_decodeIRQControlInvocation_corres])[1] + apply (auto intro!: corres_guard_imp[OF arch_decodeIRQControlInvocation_corres] + dest!: not_le_imp_less + simp: minIRQ_def o_def length_Suc_conv whenE_rangeCheck_eq ucast_nat_def + split: list.splits)[1] + apply (rule corres_guard_imp) + apply (simp add: whenE_rangeCheck_eq) + apply (rule whenE_throwError_corres, clarsimp, fastforce) + apply (rule_tac F="unat y \ unat maxIRQ" in corres_gen_asm) + apply (clarsimp simp add: minIRQ_def maxIRQ_def ucast_nat_def) + apply (rule corres_split_eqr[OF is_irq_active_corres]) + apply (rule whenE_throwError_corres, clarsimp, clarsimp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; clarsimp) + apply (rule corres_splitEE[OF ensureEmptySlot_corres], simp) + apply (rule corres_returnOkTT) + apply (clarsimp simp: arch_irq_control_inv_relation_def) + apply (wpsimp wp: isIRQActive_inv arch_check_irq_valid' checkIRQ_inv + simp: invs_valid_objs invs_psp_aligned invs_valid_objs' + invs_pspace_aligned' invs_pspace_distinct' + | strengthen invs_valid_objs invs_psp_aligned + | wp (once) hoare_drop_imps arch_check_irq_inv)+ + apply (auto split: arch_invocation_label.splits invocation_label.splits + simp: not_less unat_le_helper) + done + +crunch inv[wp]: "InterruptDecls_H.decodeIRQControlInvocation" "P" + (simp: crunch_simps wp: crunch_wps) + +(* Levity: added (20090201 10:50:27) *) +declare ensureEmptySlot_stronger [wp] + +lemma arch_decode_irq_control_valid'[wp]: + "\\s. invs' s \ (\cap \ set caps. s \' cap) + \ (\cap \ set caps. \r \ cte_refs' cap (irq_node' s). ex_cte_cap_to' r s) + \ cte_wp_at' (\cte. cteCap cte = IRQControlCap) slot s\ + AARCH64_H.decodeIRQControlInvocation label args slot caps + \arch_irq_control_inv_valid'\,-" + apply (clarsimp simp add: AARCH64_H.decodeIRQControlInvocation_def Let_def split_def + rangeCheck_def unlessE_whenE + split del: if_split + cong: if_cong list.case_cong prod.case_cong arch_invocation_label.case_cong) + apply (rule hoare_pre) + apply (simp add: rangeCheck_def unlessE_whenE checkIRQ_def + cong: list.case_cong prod.case_cong + | wp whenE_throwError_wp isIRQActive_wp ensureEmptySlot_stronger + | wpc + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs_valid_objs' irq_const_defs unat_word_ariths word_le_nat_alt + not_less unat_le_helper unat_of_nat unat_ucast_mask) + apply (meson le_trans word_and_le2 word_less_eq_iff_unsigned) + done + +lemma decode_irq_control_valid'[wp]: + "\\s. invs' s \ (\cap \ set caps. s \' cap) + \ (\cap \ set caps. \r \ cte_refs' cap (irq_node' s). ex_cte_cap_to' r s) + \ cte_wp_at' (\cte. cteCap cte = IRQControlCap) slot s\ + decodeIRQControlInvocation label args slot caps + \irq_control_inv_valid'\,-" + apply (simp add: decodeIRQControlInvocation_def Let_def split_def checkIRQ_def + rangeCheck_def unlessE_whenE + split del: if_split cong: if_cong list.case_cong + gen_invocation_labels.case_cong) + apply (wpsimp wp: ensureEmptySlot_stronger isIRQActive_wp whenE_throwError_wp + simp: o_def + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs_valid_objs' irq_const_defs unat_word_ariths word_le_nat_alt + not_less unat_le_helper unat_of_nat unat_ucast_mask) + apply (meson le_trans word_and_le2 word_less_eq_iff_unsigned) + done + +lemma valid_globals_ex_cte_cap_irq: + "\ ex_cte_cap_wp_to' isCNodeCap ptr s; valid_global_refs' s; + valid_objs' s \ + \ ptr \ intStateIRQNode (ksInterruptState s) + 2 ^ cte_level_bits * ucast (irq :: irq)" + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_wp_to'_def) + apply (drule(1) ctes_of_valid'[rotated]) + apply (drule(1) valid_global_refsD') + apply (drule subsetD[rotated], erule cte_refs_capRange) + apply (clarsimp simp: isCap_simps) + apply (subgoal_tac "irq_node' s + 2 ^ cte_level_bits * ucast irq \ global_refs' s") + apply blast + apply (simp add: global_refs'_def cte_level_bits_def cteSizeBits_def shiftl_t2n mult.commute mult.left_commute) + done + +lemma no_fail_plic_complete_claim [simp, wp]: + "no_fail \ (AARCH64.plic_complete_claim irw)" + unfolding AARCH64.plic_complete_claim_def + by (rule no_fail_machine_op_lift) + +lemma arch_invokeIRQHandler_corres: + "irq_handler_inv_relation i i' \ + corres dc \ \ (arch_invoke_irq_handler i) (AARCH64_H.invokeIRQHandler i')" + apply (cases i; clarsimp simp: AARCH64_H.invokeIRQHandler_def) + apply (rule corres_machine_op, rule corres_Id; simp?) + done + + +lemma invokeIRQHandler_corres: + "irq_handler_inv_relation i i' \ + corres dc (einvs and irq_handler_inv_valid i) + (invs' and irq_handler_inv_valid' i') + (invoke_irq_handler i) + (InterruptDecls_H.invokeIRQHandler i')" + supply arch_invoke_irq_handler.simps[simp del] + apply (cases i; simp add: Interrupt_H.invokeIRQHandler_def) + apply (rule corres_guard_imp, rule arch_invokeIRQHandler_corres; simp) + apply (rename_tac word cap prod) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule corres_split_nor[OF cap_delete_one_corres]) + apply (rule cteInsert_corres, simp+) + apply (rule_tac Q="\rv s. einvs s \ cte_wp_at (\c. c = cap.NullCap) irq_slot s + \ (a, b) \ irq_slot + \ cte_wp_at (is_derived (cdt s) (a, b) cap) (a, b) s" + in hoare_post_imp) + apply fastforce + apply (wp cap_delete_one_still_derived)+ + apply (strengthen invs_mdb_strengthen') + apply wp+ + apply (simp add: conj_comms eq_commute) + apply (wp get_irq_slot_different hoare_drop_imps)+ + apply (clarsimp simp: valid_state_def invs_def) + apply (erule cte_wp_at_weakenE, simp add: is_derived_use_interrupt) + apply fastforce + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule cap_delete_one_corres) + apply wp+ + apply simp+ + done + +lemma ntfn_badge_derived_enough_strg: + "cte_wp_at' (\cte. isNotificationCap cap \ badge_derived' cap (cteCap cte)) ptr s + \ cte_wp_at' (is_derived' ctes ptr cap \ cteCap) ptr s" + by (clarsimp simp: cte_wp_at_ctes_of isCap_simps + badge_derived'_def is_derived'_def) + +lemma cteDeleteOne_ex_cte_cap_to'[wp]: + "\ex_cte_cap_wp_to' P p\ cteDeleteOne ptr \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteDeleteOne_irq_node']) + apply (wp hoare_vcg_ex_lift cteDeleteOne_cte_wp_at_preserved) + apply (case_tac cap, simp_all add: finaliseCap_def isCap_simps) + done + +lemma cteDeleteOne_other_cap: + "\(\s. cte_wp_at' (P o cteCap) p s) and K (p \ p')\ + cteDeleteOne p' + \\rv s. cte_wp_at' (P o cteCap) p s\" + apply (rule hoare_gen_asm) + apply (simp add: tree_cte_cteCap_eq) + apply (wp cteDeleteOne_cteCaps_of) + apply simp + done + +lemma isnt_irq_handler_strg: + "(\ isIRQHandlerCap cap) \ (\irq. cap = IRQHandlerCap irq \ P irq)" + by (clarsimp simp: isCap_simps) + +lemma dmo_plic_complete_claim_invs'[wp]: + "doMachineOp (AARCH64.plic_complete_claim irq) \invs'\" + apply (wp dmo_invs') + apply (clarsimp simp: in_monad AARCH64.plic_complete_claim_def machine_op_lift_def machine_rest_lift_def select_f_def) + done + +lemma doMachineOp_maskInterrupt_False[wp]: + "\ \s. invs' s \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive \ + doMachineOp (maskInterrupt False irq) + \\_. invs'\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (simp add: valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma invoke_arch_irq_handler_invs'[wp]: + "\invs' and irq_handler_inv_valid' i\ AARCH64_H.invokeIRQHandler i \\rv. invs'\" + by (cases i; wpsimp simp: AARCH64_H.invokeIRQHandler_def) + +lemma invoke_irq_handler_invs'[wp]: + "\invs' and irq_handler_inv_valid' i\ + InterruptDecls_H.invokeIRQHandler i \\rv. invs'\" + apply (cases i; simp add: Interrupt_H.invokeIRQHandler_def) + apply wpsimp + apply (wp cteInsert_invs)+ + apply (strengthen ntfn_badge_derived_enough_strg isnt_irq_handler_strg) + apply (wp cteDeleteOne_other_cap cteDeleteOne_other_cap[unfolded o_def]) + apply (rename_tac word1 cap word2) + apply (simp add: getInterruptState_def getIRQSlot_def locateSlot_conv) + apply wp + apply (rename_tac word1 cap word2 s) + apply (clarsimp simp: ucast_nat_def) + apply (drule_tac irq=word1 in valid_globals_ex_cte_cap_irq) + apply clarsimp+ + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_to'_def + isCap_simps untyped_derived_eq_def) + apply (fastforce simp: cte_level_bits_def cteSizeBits_def shiftl_t2n)+ + done + +lemma IRQHandler_valid': + "(s' \' IRQHandlerCap irq) = (irq \ maxIRQ)" + by (simp add: valid_cap'_def capAligned_def word_bits_conv) + +crunch valid_mdb'[wp]: setIRQState "valid_mdb'" + +method do_machine_op_corres + = (rule corres_machine_op, rule corres_Id, rule refl, simp, wp) + +lemma no_fail_setIRQTrigger: "no_fail \ (setIRQTrigger irq trig)" + by (simp add: setIRQTrigger_def) + +lemma setIRQTrigger_corres: + "corres dc \ \ (do_machine_op (setIRQTrigger irq t)) (doMachineOp (setIRQTrigger irq t))" + apply (rule corres_machine_op) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (wp + | rule corres_underlying_trivial + | rule no_fail_setIRQTrigger + | simp add: dc_def)+ + done + +lemma arch_performIRQControl_corres: + "arch_irq_control_inv_relation x2 ivk' \ corres (dc \ dc) + (einvs and arch_irq_control_inv_valid x2) + (invs' and arch_irq_control_inv_valid' ivk') + (arch_invoke_irq_control x2) + (Arch.performIRQControl ivk')" + apply (cases x2; simp add: AARCH64_H.performIRQControl_def invoke_irq_control.cases IRQ_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule setIRQTrigger_corres) + apply (rule corres_split_nor) + apply (rule setIRQState_corres) + apply (simp add: irq_state_relation_def) + apply (rule cteInsert_simple_corres; simp) + apply (wp | simp add: irq_state_relation_def IRQHandler_valid IRQHandler_valid')+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def cte_wp_at_caps_of_state + is_simple_cap_def is_cap_simps arch_irq_control_inv_valid_def + safe_parent_for_def is_simple_cap_arch_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def IRQHandler_valid + IRQHandler_valid' is_simple_cap'_def isCap_simps IRQ_def) + apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of) + apply (case_tac ctea) + apply (clarsimp simp: isCap_simps sameRegionAs_def3) + apply (auto dest: valid_irq_handlers_ctes_ofD)[1] + done + +lemma performIRQControl_corres: + "irq_control_inv_relation i i' \ + corres (dc \ dc) (einvs and irq_control_inv_valid i) + (invs' and irq_control_inv_valid' i') + (invoke_irq_control i) + (performIRQControl i')" + apply (cases i, simp_all add: performIRQControl_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF setIRQState_corres]) + apply (simp add: irq_state_relation_def) + apply (rule cteInsert_simple_corres) + apply (wp | simp add: IRQHandler_valid IRQHandler_valid')+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def + cte_wp_at_caps_of_state is_simple_cap_def is_simple_cap_arch_def + is_cap_simps safe_parent_for_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + IRQHandler_valid IRQHandler_valid' is_simple_cap'_def + isCap_simps) + apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of) + apply (case_tac ctea) + apply (clarsimp simp: isCap_simps sameRegionAs_def3) + apply (auto dest: valid_irq_handlers_ctes_ofD)[1] + by (clarsimp simp: arch_performIRQControl_corres) + +crunch valid_cap'[wp]: setIRQState "valid_cap' cap" + +lemma setIRQState_cte_cap_to'[wp]: + "\ex_cte_cap_to' p\ setIRQState st irq \\_. ex_cte_cap_to' p\" + apply (simp add: setIRQState_def doMachineOp_def + split_def setInterruptState_def getInterruptState_def) + apply wp + apply (clarsimp simp: ex_cte_cap_to'_def) + done + +lemma setIRQState_issued[wp]: + "\K (st = IRQSignal)\ setIRQState st irq \\rv. irq_issued' irq\" + apply (simp add: setIRQState_def irq_issued'_def setInterruptState_def + getInterruptState_def) + apply wp + apply clarsimp + done + +lemma dmo_setIRQTrigger_invs'[wp]: + "\invs'\ doMachineOp (setIRQTrigger irq t) \\r. invs'\" + apply (wp dmo_invs' no_irq_setIRQTrigger no_irq) + apply clarsimp + apply (drule_tac P4="\m'. underlying_memory m' p = underlying_memory m p" + in use_valid[where P=P and Q="\_. P" for P]) + apply (wpsimp simp: setIRQTrigger_def machine_op_lift_def machine_rest_lift_def split_def)+ + done + +lemma arch_invoke_irq_control_invs'[wp]: + "\invs' and arch_irq_control_inv_valid' i\ AARCH64_H.performIRQControl i \\rv. invs'\" + apply (simp add: AARCH64_H.performIRQControl_def) + apply (rule hoare_pre) + apply (wpsimp wp: cteInsert_simple_invs simp: cte_wp_at_ctes_of isCap_simps IRQ_def) + apply (clarsimp simp: cte_wp_at_ctes_of IRQHandler_valid' is_simple_cap'_def isCap_simps + safe_parent_for'_def sameRegionAs_def3) + apply (rule conjI, clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac ctea) + apply (auto dest: valid_irq_handlers_ctes_ofD + simp: invs'_def valid_state'_def IRQ_def) + done + +lemma invoke_irq_control_invs'[wp]: + "\invs' and irq_control_inv_valid' i\ performIRQControl i \\rv. invs'\" + apply (cases i, simp_all add: performIRQControl_def) + apply (rule hoare_pre) + apply (wp cteInsert_simple_invs | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: cte_wp_at_ctes_of IRQHandler_valid' + is_simple_cap'_def isCap_simps + safe_parent_for'_def sameRegionAs_def3) + apply (case_tac ctea) + apply (auto dest: valid_irq_handlers_ctes_ofD + simp: invs'_def valid_state'_def) + done + +lemma getIRQState_corres: + "corres irq_state_relation \ \ + (get_irq_state irq) (getIRQState irq)" + apply (simp add: get_irq_state_def getIRQState_def getInterruptState_def) + apply (clarsimp simp: state_relation_def interrupt_state_relation_def) + done + +lemma getIRQState_prop: + "\\s. P (intStateIRQTable (ksInterruptState s) irq)\ + getIRQState irq + \\rv s. P rv\" + apply (simp add: getIRQState_def getInterruptState_def) + apply wp + apply simp + done + +lemma decDomainTime_corres: + "corres dc \ \ dec_domain_time decDomainTime" + apply (simp add:dec_domain_time_def corres_underlying_def decDomainTime_def simpler_modify_def) + apply (clarsimp simp:state_relation_def) + done + +lemma thread_state_case_if: + "(case state of Structures_A.thread_state.Running \ f | _ \ g) = + (if state = Structures_A.thread_state.Running then f else g)" + by (case_tac state,auto) + +lemma threadState_case_if: + "(case state of Structures_H.thread_state.Running \ f | _ \ g) = + (if state = Structures_H.thread_state.Running then f else g)" + by (case_tac state,auto) + +lemma ready_qs_distinct_domain_time_update[simp]: + "ready_qs_distinct (domain_time_update f s) = ready_qs_distinct s" + by (clarsimp simp: ready_qs_distinct_def) + +lemma timerTick_corres: + "corres dc + (cur_tcb and valid_sched and pspace_aligned and pspace_distinct) invs' + timer_tick timerTick" + apply (simp add: timerTick_def timer_tick_def) + apply (simp add: thread_state_case_if threadState_case_if) + apply (rule_tac Q="cur_tcb and valid_sched and pspace_aligned and pspace_distinct" + and Q'=invs' + in corres_guard_imp) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply simp + apply (rule corres_split[OF getThreadState_corres]) + apply (rename_tac state state') + apply (rule corres_split[where r' = dc]) + apply (rule corres_if[where Q = \ and Q' = \]) + apply (case_tac state,simp_all)[1] + apply (rule_tac r'="(=)" in corres_split[OF ethreadget_corres]) + apply (simp add:etcb_relation_def) + apply (rename_tac ts ts') + apply (rule_tac R="1 < ts" in corres_cases) + apply (simp) + apply (unfold thread_set_time_slice_def) + apply (rule ethread_set_corres, simp+) + apply (clarsimp simp: etcb_relation_def) + apply simp + apply (rule corres_split[OF ethread_set_corres]) + apply (simp add: sch_act_wf_weak etcb_relation_def pred_conj_def)+ + apply (rule corres_split[OF tcbSchedAppend_corres], simp) + apply (rule rescheduleRequired_corres) + apply wp + apply ((wpsimp wp: tcbSchedAppend_sym_heap_sched_pointers + tcbSchedAppend_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply ((wp thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply wpsimp+ + apply (rule corres_when, simp) + apply (rule corres_split[OF decDomainTime_corres]) + apply (rule corres_split[OF getDomainTime_corres]) + apply (rule corres_when,simp) + apply (rule rescheduleRequired_corres) + apply (wp hoare_drop_imp)+ + apply (wpsimp simp: dec_domain_time_def) + apply (wpsimp simp: decDomainTime_def) + apply (wpsimp wp: hoare_weak_lift_imp threadSet_timeslice_invs + tcbSchedAppend_valid_objs' + threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf + rescheduleRequired_weak_sch_act_wf)+ + apply (strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_time_slice_valid_queues) + apply ((wpsimp wp: thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+)[1] + apply wpsimp + apply wpsimp + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs' + | wp (once) hoare_drop_imp)+)[1] + apply (wpsimp wp: gts_wp gts_wp')+ + apply (clarsimp simp: cur_tcb_def) + apply (frule valid_sched_valid_etcbs) + apply (frule (1) tcb_at_is_etcb_at) + apply (frule valid_sched_valid_queues) + apply (fastforce simp: pred_tcb_at_def obj_at_def valid_sched_weak_strg) + apply (clarsimp simp: etcb_at_def split: option.splits) + apply fastforce + apply (fastforce simp: valid_state'_def ct_not_inQ_def) + apply fastforce + done + +lemma corres_return_VGICMaintenance [corres]: + "corres ((=) o arch_fault_map) (K (a=b)) \ + (return (AARCH64_A.VGICMaintenance a)) (return (AARCH64_H.VGICMaintenance b))" + by simp + +lemmas corres_gets_numlistregs [corres] = corres_gets_gicvcpu_numlistregs (* FIXME AARCH64: de-duplicate *) + +lemmas corres_eq_trivial = corres_Id[where f = h and g = h for h, simplified] + +lemma countTrailingZeros_simp[simp]: + "countTrailingZeros = word_ctz" + unfolding countTrailingZeros_def word_ctz_def + by (simp add: to_bl_upt) + +crunches doMachineOp + for sch_act_ct_rq[wp]: "\s. P (ksSchedulerAction s) (ksCurThread s) (ksReadyQueues s)" + and pred_tcb_at'_ct[wp]: "\s. pred_tcb_at' proj test (ksCurThread s) s" + and ex_nonz_cap_to'[wp]: "\s. P (ex_nonz_cap_to' (ksCurThread s) s)" + +lemma dmo_wp_no_rest: + "\K((\s f. P s = (P (machine_state_update (machine_state_rest_update f) s)))) and P\ + do_machine_op (machine_op_lift f) + \\_. P\" + apply (simp add: do_machine_op_def machine_op_lift_def bind_assoc) + apply wpsimp + apply (clarsimp simp add: machine_rest_lift_def in_monad select_f_def ignore_failure_def) + apply (clarsimp split: if_splits) + apply (drule_tac x=s in spec) + apply (drule_tac x="\_. b" in spec) + apply simp + apply (erule rsubst[OF _ arg_cong[where f=P]]) + apply clarsimp + done + +lemma dmo_gets_wp: + "\\s. P (f (machine_state s)) s\ do_machine_op (gets f) \P\" + by (wpsimp simp: submonad_do_machine_op.gets) + +crunches vgicUpdateLR + for ksCurThread[wp]: "\s. P (ksCurThread s)" + +lemma virqType_eq[simp]: + "virqType = virq_type" + unfolding virqType_def virq_type_def + by simp + +lemma virqSetEOIIRQEN_eq[simp]: + "AARCH64_H.virqSetEOIIRQEN = AARCH64_A.virqSetEOIIRQEN" + unfolding virqSetEOIIRQEN_def AARCH64_A.virqSetEOIIRQEN_def + by auto + +lemma not_pred_tcb': + "(\pred_tcb_at' proj P t s) = (\tcb_at' t s \ pred_tcb_at' proj (\a. \P a) t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma vgic_maintenance_corres [corres]: + "corres dc einvs + (\s. invs' s \ sch_act_not (ksCurThread s) s) + vgic_maintenance vgicMaintenance" +proof - + (* hoare_lift_Pf-style rules match too often, slowing down proof unless specialised *) + note vilr = hoare_lift_Pf2[where f=cur_thread and m="vgic_update_lr v i virq" for v i virq] + note vilr' = hoare_lift_Pf2[where f=ksCurThread and m="vgicUpdateLR v i virq" for v i virq] + note wplr = vilr[where P="st_tcb_at active"] + vilr[where P="ex_nonz_cap_to"] + note wplr' = vilr'[where P="sch_act_not"] + vilr'[where P="ex_nonz_cap_to'"] + vilr'[where P="st_tcb_at' simple'"] + show ?thesis + unfolding vgic_maintenance_def vgicMaintenance_def isRunnable_def Let_def + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_gets_current_vcpu], simp, rename_tac hsCurVCPU) + (* we only care about the one case we do something: active current vcpu *) + apply (rule_tac R="hsCurVCPU = None" in corres_cases') + apply (rule corres_trivial, simp) + apply (clarsimp, rename_tac vcpu_ptr active) + apply wpfix + apply (rule_tac R="\ active" in corres_cases') + apply (rule corres_trivial, simp) + apply clarsimp + + apply (rule corres_split_eqr[OF corres_machine_op], + (rule corres_Id; wpsimp simp: get_gic_vcpu_ctrl_misr_def + get_gic_vcpu_ctrl_eisr1_def + get_gic_vcpu_ctrl_eisr0_def))+ + apply (rename_tac eisr0 eisr1 flags) + apply (rule corres_split[OF corres_gets_numlistregs]) + apply (rule corres_split[where r'="\rv rv'. rv' = arch_fault_map rv"]) + apply (rule corres_if[rotated -1]) + apply (rule corres_trivial, simp) + apply clarsimp + apply (rule corres_if, simp) + apply (rule corres_trivial, simp) + supply if_split[split del] + apply (clarsimp simp: bind_assoc cong: if_cong) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply clarsimp + apply (rule corres_split_dc[OF vgicUpdateLR_corres]) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF getThreadState_corres]) + apply (fold dc_def) + apply (rule corres_when) + apply clarsimp + apply (rename_tac threadState threadState') + apply (case_tac threadState; simp) + apply (rule handleFault_corres) + apply clarsimp + apply clarsimp + apply (wp gts_wp) + apply (wp gts_wp') + apply (rule_tac + Q="\rv. tcb_at rv and einvs + and (\_. valid_fault (ExceptionTypes_A.fault.ArchFault rva))" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb runnable_eq pred_conj_def) + apply (strengthen st_tcb_ex_cap'[where P=active], clarsimp) + apply (fastforce simp: pred_tcb_at_def obj_at_def) + apply wp + apply clarsimp + apply (rule_tac Q="\rv x. tcb_at' rv x + \ invs' x + \ sch_act_not rv x" + in hoare_post_imp) + apply (rename_tac rv s) + apply clarsimp + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: pred_tcb_at'_def) + apply (rule conjI, erule_tac p=rv in obj_at'_weakenE, + fastforce split: thread_state.splits) + apply (erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) + apply wp + apply (wpsimp wp: wplr wplr' hoare_vcg_all_lift + hoare_vcg_imp_lift' dmo_gets_wp dmo'_gets_wp + simp: get_gic_vcpu_ctrl_misr_def if_apply_def2 + get_gic_vcpu_ctrl_eisr1_def + get_gic_vcpu_ctrl_eisr0_def + | strengthen tcb_at_invs tcb_at_invs')+ + + apply (frule invs_arch_state) + apply (clarsimp simp: valid_arch_state_def valid_fault_def obj_at_def cur_vcpu_def in_omonad) + apply (clarsimp simp: tcb_at_invs') + apply (frule invs_arch_state') + apply (clarsimp simp: valid_arch_state'_def vcpu_at_is_vcpu') + apply (erule ko_wp_at'_weakenE, simp) + done +qed + +lemma vppiEvent_corres: + "corres dc einvs + (\s. invs' s \ sch_act_not (ksCurThread s) s) + (vppi_event irq) (vppiEvent irq)" + unfolding vppi_event_def vppiEvent_def isRunnable_def + supply [[simproc del: defined_all]] + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_gets_current_vcpu]) + apply (clarsimp simp del: subst_all (* avoid destroying useful name of rv *)) + (* we only care about the one case we do something: active current vcpu *) + apply (rule_tac R="hsCurVCPU = None" in corres_cases') + apply (rule corres_trivial, simp) + apply (clarsimp, rename_tac vcpu_ptr active) + apply wpfix + apply (rule_tac R="\ active" in corres_cases') + apply (rule corres_trivial, simp) + apply clarsimp + + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF vcpuUpdate_corres]) + apply (fastforce simp: vcpu_relation_def irq_vppi_event_index_def + irqVPPIEventIndex_def IRQ_def) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF getThreadState_corres], rename_tac gts gts') + apply (fold dc_def) + apply (rule corres_when) + apply (case_tac gts; fastforce) + apply (rule handleFault_corres, simp) + apply (wp gts_st_tcb_at hoare_vcg_imp_lift') + apply (wp gts_st_tcb_at' hoare_vcg_imp_lift') + (* on both sides, we check that the current thread is runnable, then have to know it + is runnable directly afterwards, which is obvious and should not propagate further; + clean up the postconditions of the thread_get and threadGet *) + apply (rule_tac + Q="\rv. tcb_at rv and einvs + and (\_. valid_fault (ExceptionTypes_A.fault.ArchFault + (AARCH64_A.VPPIEvent irq)))" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb runnable_eq pred_conj_def) + apply (strengthen st_tcb_ex_cap'[where P=active], fastforce) + apply wp + apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) + apply (rule_tac Q="\rv x. tcb_at' rv x + \ invs' x + \ sch_act_not rv x" in hoare_post_imp) + apply (rename_tac rv s) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: pred_tcb_at'_def) + apply (rule conjI, erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) + apply (erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) + apply wp + apply (wpsimp wp: vcpu_update_tcb_at hoare_vcg_all_lift hoare_vcg_imp_lift' + cong: vcpu.fold_congs)+ + apply (strengthen tcb_at_invs) + apply (wpsimp wp: dmo_maskInterrupt_True maskInterrupt_invs + setVCPU_VPPIMasked_invs' simp: vcpuUpdate_def + | wps)+ + apply (frule invs_arch_state) + apply (simp add: valid_arch_state_def valid_fault_def tcb_at_invs) + apply (clarsimp simp: obj_at_def cur_vcpu_def in_omonad) + apply clarsimp + apply (frule invs_arch_state') + apply (rule conjI) + apply (clarsimp simp: valid_arch_state'_def vcpu_at_is_vcpu') + apply (erule ko_wp_at'_weakenE, simp) + apply (simp add: tcb_at_invs') + done + +lemma handle_reserved_irq_corres[corres]: + "corres dc einvs + (\s. invs' s \ (irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) + (handle_reserved_irq irq) (handleReservedIRQ irq)" + apply (clarsimp simp: handle_reserved_irq_def handleReservedIRQ_def irqVPPIEventIndex_def + irq_vppi_event_index_def non_kernel_IRQs_def IRQ_def irqVGICMaintenance_def + irqVTimerEvent_def) + apply (rule conjI; clarsimp) + apply (rule corres_guard_imp, rule vppiEvent_corres, assumption, fastforce) + apply (rule corres_guard_imp) + apply (rule corres_when) + apply (fastforce intro: vgic_maintenance_corres simp: unat_arith_simps)+ + done + +lemma handleInterrupt_corres: + "corres dc + (einvs) (invs' and (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)) + (handle_interrupt irq) (handleInterrupt irq)" + (is "corres dc (einvs) ?P' ?f ?g") + apply (simp add: handle_interrupt_def handleInterrupt_def ) + apply (rule conjI[rotated]; rule impI) + + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQState_corres, + where R="\rv. einvs" + and R'="\rv. invs' and (\s. rv \ IRQInactive)"]) + defer + apply (wp getIRQState_prop getIRQState_inv do_machine_op_bind doMachineOp_bind | simp add: do_machine_op_bind doMachineOp_bind )+ + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule corres_machine_op, rule corres_eq_trivial ; (simp add: dc_def no_fail_bind)+)+ + apply ((wp | simp)+)[4] + + apply (rule corres_gen_asm2) + apply (case_tac st, simp_all add: irq_state_relation_def split: irqstate.split_asm) + apply (simp add: getSlotCap_def bind_assoc) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule corres_split[OF get_cap_corres, + where R="\rv. einvs and valid_cap rv" + and R'="\rv. invs' and valid_cap' (cteCap rv)"]) + apply (rule corres_underlying_split[where r'=dc]) + apply (case_tac xb, simp_all add: doMachineOp_return)[1] + apply (clarsimp simp add: when_def doMachineOp_return) + apply (rule corres_guard_imp, rule sendSignal_corres) + apply (clarsimp simp: valid_cap_def valid_cap'_def arch_mask_irq_signal_def + maskIrqSignal_def do_machine_op_bind doMachineOp_bind)+ + apply corres + apply (rule corres_machine_op, rule corres_eq_trivial; simp)+ + apply wpsimp+ + apply fastforce + apply (rule corres_guard_imp) + apply (rule corres_split) + apply simp + apply (rule corres_split[OF timerTick_corres corres_machine_op]) + apply (rule corres_eq_trivial, wpsimp+) + apply (rule corres_machine_op) + apply (rule corres_eq_trivial; simp) + apply wp+ + apply (clarsimp simp: invs_distinct invs_psp_aligned schact_is_rct_def) + apply clarsimp + done + +crunch ksDomainTime[wp]: rescheduleRequired "\s. P (ksDomainTime s)" +(simp:tcbSchedEnqueue_def wp:unless_wp) + +crunch ksDomainTime[wp]: tcbSchedAppend "\s. P (ksDomainTime s)" +(simp:tcbSchedEnqueue_def wp:unless_wp) + +lemma updateTimeSlice_valid_pspace[wp]: + "\valid_pspace'\ threadSet (tcbTimeSlice_update (\_. ts')) thread + \\r. valid_pspace'\" + apply (wp threadSet_valid_pspace'T) + apply (auto simp:tcb_cte_cases_def cteSizeBits_def) + done + +lemma dom_upd_eq: + "f t = Some y \ dom (\a. if a = t then Some x else f a) = dom f" + by (auto split: if_split_asm) + +lemma updateTimeSlice_hyp_refs[wp]: + "\\s. P (state_hyp_refs_of' s)\ + threadSet (tcbTimeSlice_update f) thread + \\r s. P (state_hyp_refs_of' s)\" + unfolding threadSet_def + apply (clarsimp simp: setObject_def split_def) + apply (wp getObject_tcb_wp | simp add: updateObject_default_def)+ + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + apply (erule subst[where P=P, rotated]) + apply (rule ext) + apply (clarsimp simp: objBitsKO_def ps_clear_def dom_upd_eq split: option.splits) + done + +crunches tcbSchedAppend + for irq_handlers'[wp]: valid_irq_handlers' + and irqs_masked'[wp]: irqs_masked' + and ct[wp]: cur_tcb' + (simp: unless_def tcb_cte_cases_def cteSizeBits_def wp: crunch_wps cur_tcb_lift) + +lemma timerTick_invs'[wp]: + "timerTick \invs'\" + apply (simp add: timerTick_def) + apply (wpsimp wp: threadSet_invs_trivial threadSet_pred_tcb_no_state + rescheduleRequired_all_invs_but_ct_not_inQ + simp: tcb_cte_cases_def) + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (simp add: decDomainTime_def) + apply wp + apply simp + apply wpc + apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs + rescheduleRequired_all_invs_but_ct_not_inQ + hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain')+ + apply (rule hoare_strengthen_post[OF tcbSchedAppend_all_invs_but_ct_not_inQ']) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ + apply (rule_tac Q="\_. invs'" in hoare_strengthen_post) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv + threadSet_valid_objs' threadSet_timeslice_invs)+ + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ + apply (wp gts_wp')+ + apply (auto simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def cong: conj_cong) + done + +lemma resetTimer_invs'[wp]: + "\invs'\ doMachineOp resetTimer \\_. invs'\" + apply (wp dmo_invs' no_irq no_irq_resetTimer) + apply clarsimp + apply (drule_tac Q="%_ b. underlying_memory b p = underlying_memory m p" + in use_valid) + apply (simp add: resetTimer_def + machine_op_lift_def machine_rest_lift_def split_def) + apply wp + apply clarsimp+ + done + +lemma dmo_ackInterrupt[wp]: +"\invs'\ doMachineOp (ackInterrupt irq) \\y. invs'\" + apply (wp dmo_invs' no_irq no_irq_ackInterrupt) + apply safe + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" + in use_valid) + apply ((clarsimp simp: ackInterrupt_def machine_op_lift_def + machine_rest_lift_def split_def | wp)+)[3] + done + +lemma runnable'_eq: + "runnable' st = (st = Running \ st = Restart)" + by (cases st; simp) + +lemma vgicMaintenance_invs'[wp]: + "\invs' and (\s. sch_act_not (ksCurThread s) s)\ + vgicMaintenance + \\_. invs'\" + supply if_split[split del] + apply (clarsimp simp: vgicMaintenance_def get_gic_vcpu_ctrl_lr_def set_gic_vcpu_ctrl_lr_def + get_gic_vcpu_ctrl_misr_def get_gic_vcpu_ctrl_eisr1_def get_gic_vcpu_ctrl_eisr0_def + doMachineOp_bind) + apply (wpsimp simp: if_apply_def2 wp: hoare_vcg_const_imp_lift) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) + apply (rule_tac Q="\_ s. tcb_at' (ksCurThread s) s + \ invs' s + \ sch_act_not (ksCurThread s) s" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) + apply (rule conjI) + apply (fastforce elim!: st_tcb_ex_cap'' simp: valid_state'_def valid_pspace'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def idle_tcb'_def) + apply wps + apply (wpsimp simp: if_apply_def2 + wp: hoare_vcg_const_imp_lift hoare_drop_imps dmo'_gets_wp + | wps)+ + apply (clarsimp cong: conj_cong imp_cong split: if_split) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: conj_cong imp_cong split: if_split) + apply (rule conjI) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) + apply (rule conjI) + apply (fastforce elim!: st_tcb_ex_cap'' simp: valid_state'_def valid_pspace'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def idle_tcb'_def) + apply clarsimp + done + +lemma vppiEvent_invs'[wp]: + "\invs' and (\s. sch_act_not (ksCurThread s) s)\ + vppiEvent irq \\y. invs'\" + supply if_split[split del] + apply (clarsimp simp: vppiEvent_def doMachineOp_bind) + apply (wpsimp simp: if_apply_def2 wp: hoare_vcg_const_imp_lift) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) + apply (rule_tac Q="\_ s. tcb_at' (ksCurThread s) s + \ invs' s + \ sch_act_not (ksCurThread s) s" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) + apply (rule conjI) + apply (fastforce elim!: st_tcb_ex_cap'' simp: valid_state'_def valid_pspace'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def idle_tcb'_def) + apply wps + apply (wpsimp simp: if_apply_def2 vcpuUpdate_def + wp: hoare_vcg_const_imp_lift hoare_drop_imps + setVCPU_VPPIMasked_invs' dmo_maskInterrupt_True + | wps)+ + done + +lemma hint_invs[wp]: + "\invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)\ + handleInterrupt irq \\rv. invs'\" + apply (simp add: handleInterrupt_def getSlotCap_def cong: irqstate.case_cong) + apply (rule conjI; rule impI) + apply (wp dmo_maskInterrupt_True getCTE_wp' | wpc | simp add: doMachineOp_bind maskIrqSignal_def)+ + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of ex_nonz_cap_to'_def) + apply fastforce + apply (wpsimp wp: threadSet_invs_trivial getIRQState_wp + simp: inQ_def handleReservedIRQ_def if_apply_def2 irqVPPIEventIndex_def + IRQ_def irqVTimerEvent_def irqVGICMaintenance_def unat_arith_simps + split_del: if_split)+ + apply (clarsimp split: if_split_asm)+ + apply (clarsimp simp: non_kernel_IRQs_def irqVTimerEvent_def irqVGICMaintenance_def + unat_arith_simps) + done + +crunch st_tcb_at'[wp]: timerTick "st_tcb_at' P t" + (wp: threadSet_pred_tcb_no_state) + +end + +end diff --git a/proof/refine/AARCH64/InvariantUpdates_H.thy b/proof/refine/AARCH64/InvariantUpdates_H.thy new file mode 100644 index 0000000000..899ed9627d --- /dev/null +++ b/proof/refine/AARCH64/InvariantUpdates_H.thy @@ -0,0 +1,381 @@ +(* + * Copyright 2021, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory InvariantUpdates_H +imports Invariants_H +begin + +(* FIXME: use locales to shorten this work *) + +lemma ps_clear_domE[elim?]: + "\ ps_clear x n s; dom (ksPSpace s) = dom (ksPSpace s') \ \ ps_clear x n s'" + by (simp add: ps_clear_def) + +lemma ps_clear_upd: + "ksPSpace s y = Some v \ + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" + by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ + +lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] + +lemma ct_not_inQ_ksMachineState_update[simp]: + "ct_not_inQ (ksMachineState_update f s) = ct_not_inQ s" + by (simp add: ct_not_inQ_def) + +lemma ct_in_current_domain_ksMachineState[simp]: + "ct_idle_or_in_cur_domain' (ksMachineState_update p s) = ct_idle_or_in_cur_domain' s" + by (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma invs'_machine: + assumes mask: "irq_masks (f (ksMachineState s)) = + irq_masks (ksMachineState s)" + assumes vms: "valid_machine_state' (ksMachineState_update f s) = + valid_machine_state' s" + shows "invs' (ksMachineState_update f s) = invs' s" +proof - + show ?thesis + apply (cases "ksSchedulerAction s") + apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_bitmaps_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) + done +qed + +lemma invs_no_cicd'_machine: + assumes mask: "irq_masks (f (ksMachineState s)) = + irq_masks (ksMachineState s)" + assumes vms: "valid_machine_state' (ksMachineState_update f s) = + valid_machine_state' s" + shows "invs_no_cicd' (ksMachineState_update f s) = invs_no_cicd' s" +proof - + show ?thesis + apply (cases "ksSchedulerAction s") + apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_bitmaps_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) + done +qed + +lemma pspace_no_overlap_queues [simp]: + "pspace_no_overlap' w sz (ksReadyQueues_update f s) = pspace_no_overlap' w sz s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_ksSchedulerAction[simp]: + "pspace_no_overlap' a b (ksSchedulerAction_update f s) = + pspace_no_overlap' a b s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_ksArchState_update[simp]: + "pspace_no_overlap' p n (ksArchState_update f s) = + pspace_no_overlap' p n s" + by (simp add: pspace_no_overlap'_def) + +lemma ksReadyQueues_update_id[simp]: + "ksReadyQueues_update id s = s" + by simp + +lemma ct_not_inQ_ksReadyQueues_update[simp]: + "ct_not_inQ (ksReadyQueues_update f s) = ct_not_inQ s" + by (simp add: ct_not_inQ_def) + +lemma inQ_context[simp]: + "inQ d p (tcbArch_update f tcb) = inQ d p tcb" + by (cases tcb, simp add: inQ_def) + +lemma valid_tcb'_tcbQueued[simp]: + "valid_tcb' (tcbQueued_update f tcb) = valid_tcb' tcb" + by (cases tcb, rule ext, simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma valid_tcb'_tcbFault_update[simp]: + "valid_tcb' tcb s \ valid_tcb' (tcbFault_update f tcb) s" + by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma valid_tcb'_tcbTimeSlice_update[simp]: + "valid_tcb' (tcbTimeSlice_update f tcb) s = valid_tcb' tcb s" + by (simp add:valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma valid_bitmaps_ksSchedulerAction_update[simp]: + "valid_bitmaps (ksSchedulerAction_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma ex_cte_cap_wp_to'_gsCNodes_update[simp]: + "ex_cte_cap_wp_to' P p (gsCNodes_update f s') = ex_cte_cap_wp_to' P p s'" + by (simp add: ex_cte_cap_wp_to'_def) +lemma ex_cte_cap_wp_to'_gsUserPages_update[simp]: + "ex_cte_cap_wp_to' P p (gsUserPages_update f s') = ex_cte_cap_wp_to' P p s'" + by (simp add: ex_cte_cap_wp_to'_def) + +lemma pspace_no_overlap'_gsCNodes_update[simp]: + "pspace_no_overlap' p b (gsCNodes_update f s') = pspace_no_overlap' p b s'" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_gsUserPages_update[simp]: + "pspace_no_overlap' p b (gsUserPages_update f s') = pspace_no_overlap' p b s'" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_ksMachineState_update[simp]: + "pspace_no_overlap' p n (ksMachineState_update f s) = + pspace_no_overlap' p n s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap_gsUntypedZeroRanges[simp]: + "pspace_no_overlap' ptr n (gsUntypedZeroRanges_update f s) + = pspace_no_overlap' ptr n s" + by (simp add: pspace_no_overlap'_def) + +lemma vms'_ct[simp]: + "valid_machine_state' (ksCurThread_update f s) = valid_machine_state' s" + by (simp add: valid_machine_state'_def) + +lemma tcb_in_cur_domain_ct[simp]: + "tcb_in_cur_domain' t (ksCurThread_update f s) = tcb_in_cur_domain' t s" + by (fastforce simp: tcb_in_cur_domain'_def) + +lemma valid_bitmaps_ksCurDomain[simp]: + "valid_bitmaps (ksCurDomain_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_ksDomScheduleIdx[simp]: + "valid_bitmaps (ksDomScheduleIdx_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_ksDomSchedule[simp]: + "valid_bitmaps (ksDomSchedule_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_ksDomainTime[simp]: + "valid_bitmaps (ksDomainTime_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_ksWorkUnitsCompleted[simp]: + "valid_bitmaps (ksWorkUnitsCompleted_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_irq_node'_ksCurDomain[simp]: + "valid_irq_node' w (ksCurDomain_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksDomScheduleIdx[simp]: + "valid_irq_node' w (ksDomScheduleIdx_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksDomSchedule[simp]: + "valid_irq_node' w (ksDomSchedule_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksDomainTime[simp]: + "valid_irq_node' w (ksDomainTime_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksWorkUnitsCompleted[simp]: + "valid_irq_node' w (ksWorkUnitsCompleted_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma ex_cte_cap_wp_to_work_units[simp]: + "ex_cte_cap_wp_to' P slot (ksWorkUnitsCompleted_update f s) + = ex_cte_cap_wp_to' P slot s" + by (simp add: ex_cte_cap_wp_to'_def) + +add_upd_simps "ct_in_state' P (gsUntypedZeroRanges_update f s)" +declare upd_simps[simp] + +lemma ct_not_inQ_ksArchState_update[simp]: + "ct_not_inQ (ksArchState_update f s) = ct_not_inQ s" + by (simp add: ct_not_inQ_def) + +lemma ct_in_current_domain_ArchState_update[simp]: + "ct_idle_or_in_cur_domain' (ksArchState_update f s) = ct_idle_or_in_cur_domain' s" + by (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma pspace_no_overlap_queuesL1 [simp]: + "pspace_no_overlap' w sz (ksReadyQueuesL1Bitmap_update f s) = pspace_no_overlap' w sz s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap_queuesL2 [simp]: + "pspace_no_overlap' w sz (ksReadyQueuesL2Bitmap_update f s) = pspace_no_overlap' w sz s" + by (simp add: pspace_no_overlap'_def) + +lemma tcb_in_cur_domain'_ksSchedulerAction_update[simp]: + "tcb_in_cur_domain' t (ksSchedulerAction_update f s) = tcb_in_cur_domain' t s" + by (simp add: tcb_in_cur_domain'_def) + +lemma ct_idle_or_in_cur_domain'_ksSchedulerAction_update[simp]: + "b \ ResumeCurrentThread \ + ct_idle_or_in_cur_domain' (s\ksSchedulerAction := b\)" + apply (clarsimp simp add: ct_idle_or_in_cur_domain'_def) + done + +lemma sch_act_simple_wu [simp, intro!]: + "sch_act_simple (ksWorkUnitsCompleted_update f s) = sch_act_simple s" + by (simp add: sch_act_simple_def) + +lemma sch_act_simple_ksPSpace_update[simp]: + "sch_act_simple (ksPSpace_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma ps_clear_ksReadyQueue[simp]: + "ps_clear x n (ksReadyQueues_update f s) = ps_clear x n s" + by (simp add: ps_clear_def) + +lemma inQ_tcbIPCBuffer_update_idem[simp]: + "inQ d p (tcbIPCBuffer_update f ko) = inQ d p ko" + by (clarsimp simp: inQ_def) + +lemma valid_mdb_interrupts'[simp]: + "valid_mdb' (ksInterruptState_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma valid_mdb'_ksReadyQueues_update[simp]: + "valid_mdb' (ksReadyQueues_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma vms_ksReadyQueues_update[simp]: + "valid_machine_state' (ksReadyQueues_update f s) = valid_machine_state' s" + by (simp add: valid_machine_state'_def) + +lemma ct_in_state'_ksMachineState_update[simp]: + "ct_in_state' x (ksMachineState_update f s) = ct_in_state' x s" + by (simp add: ct_in_state'_def)+ + +lemma ex_cte_cap_wp_to'_ksMachineState_update[simp]: + "ex_cte_cap_wp_to' x y (ksMachineState_update f s) = ex_cte_cap_wp_to' x y s" + by (simp add: ex_cte_cap_wp_to'_def)+ + +lemma ps_clear_ksMachineState_update[simp]: + "ps_clear a b (ksMachineState_update f s) = ps_clear a b s" + by (simp add: ps_clear_def) + +lemma ct_in_state_ksSched[simp]: + "ct_in_state' P (ksSchedulerAction_update f s) = ct_in_state' P s" + unfolding ct_in_state'_def + apply auto + done + +lemma invs'_wu [simp]: + "invs' (ksWorkUnitsCompleted_update f s) = invs' s" + apply (simp add: invs'_def cur_tcb'_def valid_state'_def valid_bitmaps_def + valid_irq_node'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + bitmapQ_defs) + done + +lemma valid_arch_state'_interrupt[simp]: + "valid_arch_state' (ksInterruptState_update f s) = valid_arch_state' s" + by (simp add: valid_arch_state'_def cong: option.case_cong) + +lemma valid_bitmapQ_ksSchedulerAction_upd[simp]: + "valid_bitmapQ (ksSchedulerAction_update f s) = valid_bitmapQ s" + unfolding bitmapQ_defs by simp + +lemma bitmapQ_no_L1_orphans_ksSchedulerAction_upd[simp]: + "bitmapQ_no_L1_orphans (ksSchedulerAction_update f s) = bitmapQ_no_L1_orphans s" + unfolding bitmapQ_defs by simp + +lemma bitmapQ_no_L2_orphans_ksSchedulerAction_upd[simp]: + "bitmapQ_no_L2_orphans (ksSchedulerAction_update f s) = bitmapQ_no_L2_orphans s" + unfolding bitmapQ_defs by simp + +lemma cur_tcb'_ksReadyQueuesL1Bitmap_upd[simp]: + "cur_tcb' (ksReadyQueuesL1Bitmap_update f s) = cur_tcb' s" + unfolding cur_tcb'_def by simp + +lemma cur_tcb'_ksReadyQueuesL2Bitmap_upd[simp]: + "cur_tcb' (ksReadyQueuesL2Bitmap_update f s) = cur_tcb' s" + unfolding cur_tcb'_def by simp + +lemma ex_cte_cap_wp_to'_ksReadyQueuesL1Bitmap[simp]: + "ex_cte_cap_wp_to' P p (ksReadyQueuesL1Bitmap_update f s) = ex_cte_cap_wp_to' P p s" + unfolding ex_cte_cap_wp_to'_def by simp + +lemma ex_cte_cap_wp_to'_ksReadyQueuesL2Bitmap[simp]: + "ex_cte_cap_wp_to' P p (ksReadyQueuesL2Bitmap_update f s) = ex_cte_cap_wp_to' P p s" + unfolding ex_cte_cap_wp_to'_def by simp + +lemma sch_act_simple_readyQueue[simp]: + "sch_act_simple (ksReadyQueues_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma sch_act_simple_ksReadyQueuesL1Bitmap[simp]: + "sch_act_simple (ksReadyQueuesL1Bitmap_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma sch_act_simple_ksReadyQueuesL2Bitmap[simp]: + "sch_act_simple (ksReadyQueuesL2Bitmap_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma ksDomainTime_invs[simp]: + "invs' (ksDomainTime_update f s) = invs' s" + by (simp add: invs'_def valid_state'_def cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_machine_state'_def bitmapQ_defs) + +lemma valid_machine_state'_ksDomainTime[simp]: + "valid_machine_state' (ksDomainTime_update f s) = valid_machine_state' s" + by (simp add:valid_machine_state'_def) + +lemma cur_tcb'_ksDomainTime[simp]: + "cur_tcb' (ksDomainTime_update f s) = cur_tcb' s" + by (simp add:cur_tcb'_def) + +lemma ct_idle_or_in_cur_domain'_ksDomainTime[simp]: + "ct_idle_or_in_cur_domain' (ksDomainTime_update f s) = ct_idle_or_in_cur_domain' s" + by (simp add:ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma sch_act_sane_ksMachineState[simp]: + "sch_act_sane (ksMachineState_update f s) = sch_act_sane s" + by (simp add: sch_act_sane_def) + +lemma ct_not_inQ_update_cnt[simp]: + "ct_not_inQ (s\ksSchedulerAction := ChooseNewThread\)" + by (simp add: ct_not_inQ_def) + +lemma ct_not_inQ_update_stt[simp]: + "ct_not_inQ (s\ksSchedulerAction := SwitchToThread t\)" + by (simp add: ct_not_inQ_def) + +lemma invs'_update_cnt[elim!]: + "invs' s \ invs' (s\ksSchedulerAction := ChooseNewThread\)" + by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_irq_node'_def cur_tcb'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def bitmapQ_defs) + + +context begin interpretation Arch . + +lemma valid_arch_state'_vmid_next_update[simp]: + "valid_arch_state' (s\ksArchState := armKSNextVMID_update f (ksArchState s)\) = + valid_arch_state' s" + by (auto simp: valid_arch_state'_def split: option.split) + +lemma invs'_armKSNextVMID_update[simp]: + "invs' (s\ksArchState := armKSNextVMID_update f s'\) = invs' (s\ksArchState := s'\)" + by (simp add: invs'_def valid_state'_def valid_global_refs'_def global_refs'_def table_refs'_def + valid_machine_state'_def valid_arch_state'_def cong: option.case_cong) + +lemma invs_no_cicd'_armKSNextVMID_update[simp]: + "invs_no_cicd' (s\ksArchState := armKSNextVMID_update f s'\) = invs_no_cicd' (s\ksArchState := s'\)" + by (simp add: invs_no_cicd'_def valid_state'_def valid_global_refs'_def global_refs'_def table_refs'_def + valid_machine_state'_def valid_arch_state'_def cong: option.case_cong) + +lemma invs'_gsTypes_update: + "ksA' = ksArchState s \ invs' (s \ksArchState := gsPTTypes_update f ksA'\) = invs' s" + by (simp add: invs'_def valid_state'_def valid_global_refs'_def global_refs'_def + valid_machine_state'_def valid_arch_state'_def + cong: option.case_cong) + +end + +end \ No newline at end of file diff --git a/proof/refine/AARCH64/Invariants_H.thy b/proof/refine/AARCH64/Invariants_H.thy new file mode 100644 index 0000000000..f03c85e5f1 --- /dev/null +++ b/proof/refine/AARCH64/Invariants_H.thy @@ -0,0 +1,3579 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Invariants_H +imports + LevityCatch + "AInvs.ArchDetSchedSchedule_AI" + "Lib.Heap_List" +begin + +(* global data and code of the kernel, not covered by any cap *) +axiomatization + kernel_data_refs :: "word64 set" + +context Arch begin + +declare lookupPTSlotFromLevel.simps[simp del] +declare lookupPTFromLevel.simps[simp del] + +lemmas haskell_crunch_def [crunch_def] = + deriveCap_def finaliseCap_def + hasCancelSendRights_def sameRegionAs_def isPhysicalCap_def + sameObjectAs_def updateCapData_def maskCapRights_def + createObject_def capUntypedPtr_def capUntypedSize_def + performInvocation_def decodeInvocation_def + +context begin global_naming global +requalify_facts + Retype_H.deriveCap_def Retype_H.finaliseCap_def + Retype_H.hasCancelSendRights_def Retype_H.sameRegionAs_def Retype_H.isPhysicalCap_def + Retype_H.sameObjectAs_def Retype_H.updateCapData_def Retype_H.maskCapRights_def + Retype_H.createObject_def Retype_H.capUntypedPtr_def Retype_H.capUntypedSize_def + Retype_H.performInvocation_def Retype_H.decodeInvocation_def +end + +end + +\ \---------------------------------------------------------------------------\ + +section "Invariants on Executable Spec" + +context begin interpretation Arch . + +definition ps_clear :: "obj_ref \ nat \ kernel_state \ bool" where + "ps_clear p n s \ (mask_range p n - {p}) \ dom (ksPSpace s) = {}" + +definition pspace_no_overlap' :: "obj_ref \ nat \ kernel_state \ bool" where + "pspace_no_overlap' ptr bits \ + \s. \x ko. ksPSpace s x = Some ko \ + (mask_range x (objBitsKO ko)) \ {ptr .. (ptr && ~~ mask bits) + mask bits} = {}" + +definition ko_wp_at' :: "(kernel_object \ bool) \ obj_ref \ kernel_state \ bool" where + "ko_wp_at' P p s \ \ko. ksPSpace s p = Some ko \ is_aligned p (objBitsKO ko) \ P ko \ + ps_clear p (objBitsKO ko) s" + +definition obj_at' :: "('a::pspace_storable \ bool) \ machine_word \ kernel_state \ bool" where + obj_at'_real_def: + "obj_at' P p s \ ko_wp_at' (\ko. \obj. projectKO_opt ko = Some obj \ P obj) p s" + +definition typ_at' :: "kernel_object_type \ machine_word \ kernel_state \ bool" where + "typ_at' T \ ko_wp_at' (\ko. koTypeOf ko = T)" + +abbreviation ep_at' :: "obj_ref \ kernel_state \ bool" where + "ep_at' \ obj_at' ((\x. True) :: endpoint \ bool)" + +abbreviation ntfn_at' :: "obj_ref \ kernel_state \ bool" where + "ntfn_at' \ obj_at' ((\x. True) :: notification \ bool)" + +abbreviation tcb_at' :: "obj_ref \ kernel_state \ bool" where + "tcb_at' \ obj_at' ((\x. True) :: tcb \ bool)" + +abbreviation real_cte_at' :: "obj_ref \ kernel_state \ bool" where + "real_cte_at' \ obj_at' ((\x. True) :: cte \ bool)" + +abbreviation ko_at' :: "'a::pspace_storable \ obj_ref \ kernel_state \ bool" where + "ko_at' v \ obj_at' (\k. k = v)" + +abbreviation + "vcpu_at' \ typ_at' (ArchT VCPUT)" + +abbreviation pte_at' :: "obj_ref \ kernel_state \ bool" where + "pte_at' \ typ_at' (ArchT PTET)" + +end + +record itcb' = + itcbState :: thread_state + itcbFaultHandler :: cptr + itcbIPCBuffer :: vptr + itcbBoundNotification :: "machine_word option" + itcbPriority :: priority + itcbFault :: "fault option" + itcbTimeSlice :: nat + itcbMCP :: priority + +definition tcb_to_itcb' :: "tcb \ itcb'" where + "tcb_to_itcb' tcb \ \ itcbState = tcbState tcb, + itcbFaultHandler = tcbFaultHandler tcb, + itcbIPCBuffer = tcbIPCBuffer tcb, + itcbBoundNotification = tcbBoundNotification tcb, + itcbPriority = tcbPriority tcb, + itcbFault = tcbFault tcb, + itcbTimeSlice = tcbTimeSlice tcb, + itcbMCP = tcbMCP tcb\" + +lemma itcb_simps[simp]: + "itcbState (tcb_to_itcb' tcb) = tcbState tcb" + "itcbFaultHandler (tcb_to_itcb' tcb) = tcbFaultHandler tcb" + "itcbIPCBuffer (tcb_to_itcb' tcb) = tcbIPCBuffer tcb" + "itcbBoundNotification (tcb_to_itcb' tcb) = tcbBoundNotification tcb" + "itcbPriority (tcb_to_itcb' tcb) = tcbPriority tcb" + "itcbFault (tcb_to_itcb' tcb) = tcbFault tcb" + "itcbTimeSlice (tcb_to_itcb' tcb) = tcbTimeSlice tcb" + "itcbMCP (tcb_to_itcb' tcb) = tcbMCP tcb" + by (auto simp: tcb_to_itcb'_def) + +definition pred_tcb_at' :: "(itcb' \ 'a) \ ('a \ bool) \ machine_word \ kernel_state \ bool" + where + "pred_tcb_at' proj test \ obj_at' (\ko. test (proj (tcb_to_itcb' ko)))" + +abbreviation st_tcb_at' :: "(thread_state \ bool) \ obj_ref \ kernel_state \ bool" where + "st_tcb_at' \ pred_tcb_at' itcbState" + +abbreviation bound_tcb_at' :: "(obj_ref option \ bool) \ obj_ref \ kernel_state \ bool" where + "bound_tcb_at' \ pred_tcb_at' itcbBoundNotification" + +abbreviation mcpriority_tcb_at' :: "(priority \ bool) \ obj_ref \ kernel_state \ bool" where + "mcpriority_tcb_at' \ pred_tcb_at' itcbMCP" + +lemma st_tcb_at'_def: + "st_tcb_at' test \ obj_at' (test \ tcbState)" + by (simp add: pred_tcb_at'_def o_def) + +text \ cte with property at \ +definition cte_wp_at' :: "(cte \ bool) \ obj_ref \ kernel_state \ bool" where + "cte_wp_at' P p s \ \cte::cte. fst (getObject p s) = {(cte,s)} \ P cte" + +abbreviation cte_at' :: "obj_ref \ kernel_state \ bool" where + "cte_at' \ cte_wp_at' \" + +abbreviation tcb_of' :: "kernel_object \ tcb option" where + "tcb_of' \ projectKO_opt" + +abbreviation tcbs_of' :: "kernel_state \ obj_ref \ tcb option" where + "tcbs_of' s \ ksPSpace s |> tcb_of'" + +abbreviation tcbSchedPrevs_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedPrevs_of s \ tcbs_of' s |> tcbSchedPrev" + +abbreviation tcbSchedNexts_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedNexts_of s \ tcbs_of' s |> tcbSchedNext" + +abbreviation sym_heap_sched_pointers :: "global.kernel_state \ bool" where + "sym_heap_sched_pointers s \ sym_heap (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + +definition tcb_cte_cases :: "machine_word \ ((tcb \ cte) \ ((cte \ cte) \ tcb \ tcb))" where + "tcb_cte_cases \ [ 0 << cteSizeBits \ (tcbCTable, tcbCTable_update), + 1 << cteSizeBits \ (tcbVTable, tcbVTable_update), + 2 << cteSizeBits \ (tcbReply, tcbReply_update), + 3 << cteSizeBits \ (tcbCaller, tcbCaller_update), + 4 << cteSizeBits \ (tcbIPCBufferFrame, tcbIPCBufferFrame_update) ]" + +definition max_ipc_words :: machine_word where + "max_ipc_words \ capTransferDataSize + msgMaxLength + msgMaxExtraCaps + 2" + +definition tcb_st_refs_of' :: "thread_state \ (obj_ref \ reftype) set" where + "tcb_st_refs_of' st \ case st of + (BlockedOnReceive x _) => {(x, TCBBlockedRecv)} + | (BlockedOnSend x _ _ _ _) => {(x, TCBBlockedSend)} + | (BlockedOnNotification x) => {(x, TCBSignal)} + | _ => {}" + +definition ep_q_refs_of' :: "endpoint \ (obj_ref \ reftype) set" where + "ep_q_refs_of' ep \ case ep of + IdleEP => {} + | (RecvEP q) => set q \ {EPRecv} + | (SendEP q) => set q \ {EPSend}" + +definition ntfn_q_refs_of' :: "Structures_H.ntfn \ (obj_ref \ reftype) set" where + "ntfn_q_refs_of' ntfn \ case ntfn of + IdleNtfn => {} + | (WaitingNtfn q) => set q \ {NTFNSignal} + | (ActiveNtfn b) => {}" + +definition ntfn_bound_refs' :: "obj_ref option \ (obj_ref \ reftype) set" where + "ntfn_bound_refs' t \ set_option t \ {NTFNBound}" + +definition tcb_bound_refs' :: "obj_ref option \ (obj_ref \ reftype) set" where + "tcb_bound_refs' a \ set_option a \ {TCBBound}" + +definition refs_of' :: "kernel_object \ (obj_ref \ reftype) set" where + "refs_of' ko \ case ko of + (KOTCB tcb) => tcb_st_refs_of' (tcbState tcb) \ tcb_bound_refs' (tcbBoundNotification tcb) + | (KOEndpoint ep) => ep_q_refs_of' ep + | (KONotification ntfn) => ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn) + | _ => {}" + +definition state_refs_of' :: "kernel_state \ obj_ref \ (obj_ref \ reftype) set" where + "state_refs_of' s \ \x. + case ksPSpace s x of + None \ {} + | Some ko \ if is_aligned x (objBitsKO ko) \ ps_clear x (objBitsKO ko) s + then refs_of' ko else {}" + +(* the non-hyp, non-arch part of live' *) +primrec live0' :: "Structures_H.kernel_object \ bool" where + "live0' (KOTCB tcb) = + (bound (tcbBoundNotification tcb) + \ tcbSchedPrev tcb \ None \ tcbSchedNext tcb \ None + \ tcbQueued tcb + \ (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState))" +| "live0' (KOCTE cte) = False" +| "live0' (KOEndpoint ep) = (ep \ IdleEP)" +| "live0' (KONotification ntfn) = (bound (ntfnBoundTCB ntfn) \ (\ts. ntfnObj ntfn = WaitingNtfn ts))" +| "live0' (KOUserData) = False" +| "live0' (KOUserDataDevice) = False" +| "live0' (KOKernelData) = False" +| "live0' (KOArch ako) = False" + +(* hyp_refs *) + +definition tcb_vcpu_refs' :: "machine_word option \ (obj_ref \ reftype) set" where + "tcb_vcpu_refs' t \ set_option t \ {TCBHypRef}" + +definition tcb_hyp_refs' :: "arch_tcb \ (obj_ref \ reftype) set" where + "tcb_hyp_refs' t \ tcb_vcpu_refs' (AARCH64_H.atcbVCPUPtr t)" + +definition vcpu_tcb_refs' :: "obj_ref option \ (obj_ref \ reftype) set" where + "vcpu_tcb_refs' t \ set_option t \ {HypTCBRef}" + +definition refs_of_a' :: "arch_kernel_object \ (obj_ref \ reftype) set" where + "refs_of_a' x \ case x of + AARCH64_H.KOASIDPool asidpool \ {} + | AARCH64_H.KOPTE pte \ {} + | AARCH64_H.KOVCPU vcpu \ vcpu_tcb_refs' (AARCH64_H.vcpuTCBPtr vcpu)" + +definition hyp_refs_of' :: "kernel_object \ (obj_ref \ reftype) set" where + "hyp_refs_of' x \ case x of + (KOTCB tcb) \ tcb_hyp_refs' (tcbArch tcb) + | (KOCTE cte) \ {} + | (KOEndpoint ep) \ {} + | (KONotification ntfn) \ {} + | (KOUserData) \ {} + | (KOUserDataDevice) \ {} + | (KOKernelData) \ {} + | (KOArch ako) \ refs_of_a' ako" + +definition state_hyp_refs_of' :: "kernel_state \ obj_ref \ (obj_ref \ reftype) set" where + "state_hyp_refs_of' s \ + (\p. case (ksPSpace s p) of + None \ {} + | Some ko \ (if is_aligned p (objBitsKO ko) \ ps_clear p (objBitsKO ko) s + then hyp_refs_of' ko + else {}))" + +definition arch_live' :: "arch_kernel_object \ bool" where + "arch_live' ao \ case ao of + AARCH64_H.KOVCPU vcpu \ bound (AARCH64_H.vcpuTCBPtr vcpu) + | _ \ False" + +definition hyp_live' :: "kernel_object \ bool" where + "hyp_live' ko \ case ko of + (KOTCB tcb) \ bound (AARCH64_H.atcbVCPUPtr (tcbArch tcb)) + | (KOArch ako) \ arch_live' ako + | _ \ False" + +definition live' :: "kernel_object \ bool" where + "live' ko \ case ko of + KOTCB tcb => live0' ko \ hyp_live' ko + | KOCTE cte => False + | KOEndpoint ep => live0' ko + | KONotification ntfn => live0' ko + | KOUserData => False + | KOUserDataDevice => False + | KOKernelData => False + | KOArch ako => hyp_live' ko" + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec azobj_refs' :: "arch_capability \ obj_ref set" where + "azobj_refs' (ASIDPoolCap _ _) = {}" +| "azobj_refs' ASIDControlCap = {}" +| "azobj_refs' (FrameCap _ _ _ _ _) = {}" +| "azobj_refs' (PageTableCap _ _ _) = {}" +| "azobj_refs' (VCPUCap v) = {v}" + +lemma azobj_refs'_only_vcpu: + "(x \ azobj_refs' acap) = (acap = VCPUCap x)" + by (cases acap) auto +end + +fun zobj_refs' :: "capability \ obj_ref set" where + "zobj_refs' NullCap = {}" +| "zobj_refs' DomainCap = {}" +| "zobj_refs' (UntypedCap d r n f) = {}" +| "zobj_refs' (EndpointCap r badge x y z t) = {r}" +| "zobj_refs' (NotificationCap r badge x y) = {r}" +| "zobj_refs' (CNodeCap r b g gsz) = {}" +| "zobj_refs' (ThreadCap r) = {r}" +| "zobj_refs' (Zombie r b n) = {}" +| "zobj_refs' (ArchObjectCap ac) = azobj_refs' ac" +| "zobj_refs' (IRQControlCap) = {}" +| "zobj_refs' (IRQHandlerCap irq) = {}" +| "zobj_refs' (ReplyCap tcb m x) = {}" + +definition ex_nonz_cap_to' :: "obj_ref \ kernel_state \ bool" where + "ex_nonz_cap_to' ref \ \s. \cref. cte_wp_at' (\c. ref \ zobj_refs' (cteCap c)) cref s" + +definition if_live_then_nonz_cap' :: "kernel_state \ bool" where + "if_live_then_nonz_cap' s \ \ptr. ko_wp_at' live' ptr s \ ex_nonz_cap_to' ptr s" + +fun cte_refs' :: "capability \ obj_ref \ obj_ref set" where + "cte_refs' (CNodeCap ref bits _ _) x = (\x. ref + (x << cteSizeBits)) ` {0 .. mask bits}" +| "cte_refs' (ThreadCap ref) x = (\x. ref + x) ` dom tcb_cte_cases" +| "cte_refs' (Zombie ref _ n) x = (\x. ref + (x << cteSizeBits)) ` {0 ..< of_nat n}" +| "cte_refs' (IRQHandlerCap irq) x = {x + (ucast irq << cteSizeBits)}" +| "cte_refs' _ _ = {}" + + +abbreviation irq_node' :: "kernel_state \ obj_ref" where + "irq_node' s \ intStateIRQNode (ksInterruptState s)" + +definition ex_cte_cap_wp_to' :: "(capability \ bool) \ obj_ref \ kernel_state \ bool" where + "ex_cte_cap_wp_to' P ptr \ + \s. \cref. cte_wp_at' (\c. P (cteCap c) \ ptr \ cte_refs' (cteCap c) (irq_node' s)) cref s" + +abbreviation ex_cte_cap_to' :: "obj_ref \ kernel_state \ bool" where + "ex_cte_cap_to' \ ex_cte_cap_wp_to' \" + +definition if_unsafe_then_cap' :: "kernel_state \ bool" where + "if_unsafe_then_cap' s \ + \cref. cte_wp_at' (\c. cteCap c \ NullCap) cref s \ ex_cte_cap_to' cref s" + + +section "Valid caps and objects (design spec)" + +context begin interpretation Arch . + +primrec acapBits :: "arch_capability \ nat" where + "acapBits (ASIDPoolCap _ _) = asidLowBits + word_size_bits" +| "acapBits ASIDControlCap = asidHighBits + word_size_bits" +| "acapBits (FrameCap _ _ sz _ _) = pageBitsForSize sz" +| "acapBits (PageTableCap _ pt_t _) = table_size pt_t" +| "acapBits (VCPUCap v) = vcpuBits" + +end + +primrec zBits :: "zombie_type \ nat" where + "zBits (ZombieCNode n) = objBits (undefined::cte) + n" +| "zBits (ZombieTCB) = tcbBlockSizeBits" + +primrec capBits :: "capability \ nat" where + "capBits NullCap = 0" +| "capBits DomainCap = 0" +| "capBits (UntypedCap _ _ b _) = b" +| "capBits (EndpointCap _ _ _ _ _ _) = objBits (undefined::endpoint)" +| "capBits (NotificationCap _ _ _ _) = objBits (undefined::Structures_H.notification)" +| "capBits (CNodeCap _ b _ _) = objBits (undefined::cte) + b" +| "capBits (ThreadCap _) = objBits (undefined::tcb)" +| "capBits (Zombie _ z _) = zBits z" +| "capBits (IRQControlCap) = 0" +| "capBits (IRQHandlerCap _) = 0" +| "capBits (ReplyCap _ _ _) = objBits (undefined :: tcb)" +| "capBits (ArchObjectCap x) = acapBits x" + +definition capAligned :: "capability \ bool" where + "capAligned c \ is_aligned (capUntypedPtr c) (capBits c) \ capBits c < word_bits" + +definition obj_range' :: "machine_word \ kernel_object \ machine_word set" where + "obj_range' p ko \ mask_range p (objBitsKO ko)" + +primrec (nonexhaustive) usableUntypedRange :: "capability \ machine_word set" where + "usableUntypedRange (UntypedCap _ p n f) = (if f < 2^n then {p+of_nat f .. p + mask n} else {})" + +definition valid_untyped' :: "bool \ obj_ref \ nat \ nat \ kernel_state \ bool" where + "valid_untyped' d ptr bits idx s \ + \ptr'. \ ko_wp_at' (\ko. mask_range ptr bits \ obj_range' ptr' ko + \ obj_range' ptr' ko \ + usableUntypedRange (UntypedCap d ptr bits idx) \ {}) ptr' s" + +primrec zombieCTEs :: "zombie_type \ nat" where + "zombieCTEs ZombieTCB = 5" +| "zombieCTEs (ZombieCNode n) = 2 ^ n" + +context begin interpretation Arch . + +definition page_table_at' :: "pt_type \ obj_ref \ kernel_state \ bool" where + "page_table_at' pt_t p \ \s. + is_aligned p (ptBits pt_t) \ + (\i \ mask (ptTranslationBits pt_t). pte_at' (p + (i << pte_bits)) s)" + +lemmas vspace_table_at'_defs = page_table_at'_def + +abbreviation asid_pool_at' :: "obj_ref \ kernel_state \ bool" where + "asid_pool_at' \ typ_at' (ArchT ASIDPoolT)" + +definition asid_wf :: "asid \ bool" where + "asid_wf asid \ asid \ mask asid_bits" + +definition wellformed_mapdata' :: "asid \ vspace_ref \ bool" where + "wellformed_mapdata' \ \(asid, vref). 0 < asid \ asid_wf asid \ vref \ user_region" + +definition wellformed_acap' :: "arch_capability \ bool" where + "wellformed_acap' ac \ + case ac of + ASIDPoolCap r asid \ is_aligned asid asid_low_bits \ asid_wf asid + | FrameCap r rghts sz dev mapdata \ + case_option True wellformed_mapdata' mapdata \ + case_option True (swp vmsz_aligned sz \ snd) mapdata + | PageTableCap pt_t r (Some mapdata) \ wellformed_mapdata' mapdata + | _ \ True" + +lemmas wellformed_acap'_simps[simp] = wellformed_acap'_def[split_simps arch_capability.split] + +definition frame_at' :: "obj_ref \ vmpage_size \ bool \ kernel_state \ bool" where + "frame_at' r sz dev s \ + \p < 2 ^ (pageBitsForSize sz - pageBits). + typ_at' (if dev then UserDataDeviceT else UserDataT) (r + (p << pageBits)) s" + +definition valid_arch_cap_ref' :: "arch_capability \ kernel_state \ bool" where + "valid_arch_cap_ref' ac s \ case ac of + ASIDPoolCap r as \ typ_at' (ArchT ASIDPoolT) r s + | ASIDControlCap \ True + | FrameCap r rghts sz dev mapdata \ frame_at' r sz dev s + | PageTableCap r pt_t mapdata \ page_table_at' pt_t r s + | VCPUCap r \ vcpu_at' r s" + +lemmas valid_arch_cap_ref'_simps[simp] = + valid_arch_cap_ref'_def[split_simps arch_capability.split] + +definition valid_arch_cap' :: "arch_capability \ kernel_state \ bool" where + "valid_arch_cap' cap \ \s. wellformed_acap' cap \ valid_arch_cap_ref' cap s" + +lemmas valid_arch_cap'_simps[simp] = + valid_arch_cap'_def[unfolded wellformed_acap'_def valid_arch_cap_ref'_def, + split_simps arch_capability.split, simplified] + +definition arch_cap'_fun_lift :: "(arch_capability \ 'a) \ 'a \ capability \ 'a" where + "arch_cap'_fun_lift P F c \ case c of ArchObjectCap ac \ P ac | _ \ F" + +lemmas arch_cap'_fun_lift_simps[simp] = arch_cap'_fun_lift_def[split_simps capability.split] + +definition valid_acap' :: "capability \ kernel_state \ bool" where + "valid_acap' \ arch_cap'_fun_lift valid_arch_cap' \" + +definition + valid_cap' :: "capability \ kernel_state \ bool" +where valid_cap'_def: + "valid_cap' c s \ capAligned c \ + (case c of + NullCap \ True + | DomainCap \ True + | UntypedCap d r n f \ + valid_untyped' d r n f s \ r \ 0 \ minUntypedSizeBits \ n \ n \ maxUntypedSizeBits + \ f \ 2^n \ is_aligned (of_nat f :: machine_word) minUntypedSizeBits + \ canonical_address r + | EndpointCap r badge x y z t \ ep_at' r s + | NotificationCap r badge x y \ ntfn_at' r s + | CNodeCap r bits guard guard_sz \ + bits \ 0 \ bits + guard_sz \ word_bits \ guard && mask guard_sz = guard \ + (\addr. real_cte_at' (r + 2^cteSizeBits * (addr && mask bits)) s) + | ThreadCap r \ tcb_at' r s + | ReplyCap r m x \ tcb_at' r s + | IRQControlCap \ True + | IRQHandlerCap irq \ irq \ maxIRQ + | Zombie r b n \ n \ zombieCTEs b \ zBits b < word_bits + \ (case b of ZombieTCB \ tcb_at' r s | ZombieCNode n \ n \ 0 + \ (\addr. real_cte_at' (r + 2^cteSizeBits * (addr && mask n)) s)) + | ArchObjectCap ac \ valid_arch_cap' ac s)" + +(* Use abbreviation, not syntax, so that it can be input-only *) +abbreviation (input) valid_cap'_syn :: + "kernel_state \ capability \ bool" ("_ \'' _" [60, 60] 61) where + "s \' c \ valid_cap' c s" + +definition valid_cte' :: "cte \ kernel_state \ bool" where + "valid_cte' cte s \ s \' (cteCap cte)" + +definition valid_tcb_state' :: "thread_state \ kernel_state \ bool" where + "valid_tcb_state' ts s \ case ts of + BlockedOnReceive ref a \ ep_at' ref s + | BlockedOnSend ref a b d c \ ep_at' ref s + | BlockedOnNotification ref \ ntfn_at' ref s + | _ \ True" + +definition valid_ipc_buffer_ptr' :: "machine_word \ kernel_state \ bool" where + "valid_ipc_buffer_ptr' a s \ + is_aligned a msg_align_bits \ typ_at' UserDataT (a && ~~ mask pageBits) s" + +definition valid_bound_ntfn' :: "machine_word option \ kernel_state \ bool" where + "valid_bound_ntfn' ntfn_opt s \ + case ntfn_opt of None \ True | Some a \ ntfn_at' a s" + +definition is_device_frame_cap' :: "capability \ bool" where + "is_device_frame_cap' cap \ case cap of ArchObjectCap (FrameCap _ _ _ dev _) \ dev | _ \ False" + +definition valid_arch_tcb' :: "Structures_H.arch_tcb \ kernel_state \ bool" where + "valid_arch_tcb' \ \t s. \v. atcbVCPUPtr t = Some v \ vcpu_at' v s " + +abbreviation opt_tcb_at' :: "machine_word option \ kernel_state \ bool" where + "opt_tcb_at' \ none_top tcb_at'" + +lemmas opt_tcb_at'_def = none_top_def + +definition valid_tcb' :: "tcb \ kernel_state \ bool" where + "valid_tcb' t s \ (\(getF, setF) \ ran tcb_cte_cases. s \' cteCap (getF t)) + \ valid_tcb_state' (tcbState t) s + \ is_aligned (tcbIPCBuffer t) msg_align_bits + \ valid_bound_ntfn' (tcbBoundNotification t) s + \ tcbDomain t \ maxDomain + \ tcbPriority t \ maxPriority + \ tcbMCP t \ maxPriority + \ opt_tcb_at' (tcbSchedPrev t) s + \ opt_tcb_at' (tcbSchedNext t) s + \ valid_arch_tcb' (tcbArch t) s" + +definition valid_ep' :: "Structures_H.endpoint \ kernel_state \ bool" where + "valid_ep' ep s \ case ep of + IdleEP \ True + | SendEP ts \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts) + | RecvEP ts \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts)" + +definition valid_bound_tcb' :: "machine_word option \ kernel_state \ bool" where + "valid_bound_tcb' tcb_opt s \ case tcb_opt of None \ True | Some t \ tcb_at' t s" + +definition valid_ntfn' :: "Structures_H.notification \ kernel_state \ bool" where + "valid_ntfn' ntfn s \ (case ntfnObj ntfn of + IdleNtfn \ True + | WaitingNtfn ts \ + (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts + \ (case ntfnBoundTCB ntfn of Some tcb \ ts = [tcb] | _ \ True)) + | ActiveNtfn b \ True) + \ valid_bound_tcb' (ntfnBoundTCB ntfn) s" + +(* FIXME AARCH64: remove if unused at the end *) +definition valid_mapping' :: "machine_word \ vmpage_size \ kernel_state \ bool" where + "valid_mapping' x sz s \ is_aligned x (pageBitsForSize sz) \ ptrFromPAddr x \ 0" + +definition valid_vcpu' :: "vcpu \ bool" where + "valid_vcpu' v \ case vcpuTCBPtr v of None \ True | Some vt \ is_aligned vt tcbBlockSizeBits" + +(* This is a slight abuse of "canonical_address". What we really need to know for ADT_C in CRefine + is that the top pageBits bits of TablePTEs have a known value, because we shift left by pageBits. + What we actually know is that this is a physical address, so it is bound by the physical address + space size, which depending on config can be 40, 44, or 48. 48 happens to also be the bound for + the virtual address space, which canonical_address is for. This is good enough for our purposes. *) +definition ppn_bounded :: "pte \ bool" where + "ppn_bounded pte \ case pte of PageTablePTE ppn \ canonical_address ppn | _ \ True" + +definition valid_arch_obj' :: "arch_kernel_object \ bool" where + "valid_arch_obj' ako \ case ako of + KOPTE pte \ ppn_bounded pte + | KOVCPU vcpu \ valid_vcpu' vcpu + | _ \ True" + +definition valid_obj' :: "Structures_H.kernel_object \ kernel_state \ bool" where + "valid_obj' ko s \ case ko of + KOEndpoint endpoint \ valid_ep' endpoint s + | KONotification notification \ valid_ntfn' notification s + | KOKernelData \ False + | KOUserData \ True + | KOUserDataDevice \ True + | KOTCB tcb \ valid_tcb' tcb s + | KOCTE cte \ valid_cte' cte s + | KOArch ako \ valid_arch_obj' ako" + +definition + pspace_aligned' :: "kernel_state \ bool" +where + "pspace_aligned' s \ + \x \ dom (ksPSpace s). is_aligned x (objBitsKO (the (ksPSpace s x)))" + +definition + pspace_distinct' :: "kernel_state \ bool" +where + "pspace_distinct' s \ + \x \ dom (ksPSpace s). ps_clear x (objBitsKO (the (ksPSpace s x))) s" + +definition pspace_canonical' :: "kernel_state \ bool" where + "pspace_canonical' s \ \p \ dom (ksPSpace s). canonical_address p" + +definition + valid_objs' :: "kernel_state \ bool" +where + "valid_objs' s \ \obj \ ran (ksPSpace s). valid_obj' obj s" + + +type_synonym cte_heap = "machine_word \ cte option" + +definition + map_to_ctes :: "(machine_word \ kernel_object) \ cte_heap" +where + "map_to_ctes m \ \x. + let cte_bits = objBitsKO (KOCTE undefined); + tcb_bits = objBitsKO (KOTCB undefined); + y = (x && (~~ mask tcb_bits)) + in + if \cte. m x = Some (KOCTE cte) \ is_aligned x cte_bits + \ {x + 1 .. x + (1 << cte_bits) - 1} \ dom m = {} + then case m x of Some (KOCTE cte) \ Some cte + else if \tcb. m y = Some (KOTCB tcb) + \ {y + 1 .. y + (1 << tcb_bits) - 1} \ dom m = {} + then case m y of Some (KOTCB tcb) \ + option_map (\(getF, setF). getF tcb) (tcb_cte_cases (x - y)) + else None" + +abbreviation + "ctes_of s \ map_to_ctes (ksPSpace s)" + +definition + mdb_next :: "cte_heap \ machine_word \ machine_word option" +where + "mdb_next s c \ option_map (mdbNext o cteMDBNode) (s c)" + +definition + mdb_next_rel :: "cte_heap \ (machine_word \ machine_word) set" +where + "mdb_next_rel m \ {(x, y). mdb_next m x = Some y}" + +abbreviation + mdb_next_direct :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \ _" [60,0,60] 61) +where + "m \ a \ b \ (a, b) \ mdb_next_rel m" + +abbreviation + mdb_next_trans :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \\<^sup>+ _" [60,0,60] 61) +where + "m \ a \\<^sup>+ b \ (a,b) \ (mdb_next_rel m)\<^sup>+" + +abbreviation + mdb_next_rtrans :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \\<^sup>* _" [60,0,60] 61) +where + "m \ a \\<^sup>* b \ (a,b) \ (mdb_next_rel m)\<^sup>*" + +definition + "valid_badges m \ + \p p' cap node cap' node'. + m p = Some (CTE cap node) \ + m p' = Some (CTE cap' node') \ + (m \ p \ p') \ + (sameRegionAs cap cap') \ + (isEndpointCap cap \ + capEPBadge cap \ capEPBadge cap' \ + capEPBadge cap' \ 0 \ + mdbFirstBadged node') + \ + (isNotificationCap cap \ + capNtfnBadge cap \ capNtfnBadge cap' \ + capNtfnBadge cap' \ 0 \ + mdbFirstBadged node')" + +fun (sequential) + untypedRange :: "capability \ machine_word set" +where + "untypedRange (UntypedCap d p n f) = {p .. p + 2 ^ n - 1}" +| "untypedRange c = {}" + +primrec + acapClass :: "arch_capability \ capclass" +where + "acapClass (ASIDPoolCap _ _) = PhysicalClass" +| "acapClass ASIDControlCap = ASIDMasterClass" +| "acapClass (FrameCap _ _ _ _ _) = PhysicalClass" +| "acapClass (PageTableCap _ _ _) = PhysicalClass" +| "acapClass (VCPUCap _) = PhysicalClass" + +primrec + capClass :: "capability \ capclass" +where + "capClass (NullCap) = NullClass" +| "capClass (DomainCap) = DomainClass" +| "capClass (UntypedCap d p n f) = PhysicalClass" +| "capClass (EndpointCap ref badge s r g gr) = PhysicalClass" +| "capClass (NotificationCap ref badge s r) = PhysicalClass" +| "capClass (CNodeCap ref bits g gs) = PhysicalClass" +| "capClass (ThreadCap ref) = PhysicalClass" +| "capClass (Zombie r b n) = PhysicalClass" +| "capClass (IRQControlCap) = IRQClass" +| "capClass (IRQHandlerCap irq) = IRQClass" +| "capClass (ReplyCap tcb m g) = ReplyClass tcb" +| "capClass (ArchObjectCap cap) = acapClass cap" + +definition + "capRange cap \ + if capClass cap \ PhysicalClass then {} + else {capUntypedPtr cap .. capUntypedPtr cap + 2 ^ capBits cap - 1}" + +definition + "caps_contained' m \ + \p p' c n c' n'. + m p = Some (CTE c n) \ + m p' = Some (CTE c' n') \ + \isUntypedCap c' \ + capRange c' \ untypedRange c \ {} \ + capRange c' \ untypedRange c" + +definition + valid_dlist :: "cte_heap \ bool" +where + "valid_dlist m \ + \p cte. m p = Some cte \ + (let prev = mdbPrev (cteMDBNode cte); + next = mdbNext (cteMDBNode cte) + in (prev \ 0 \ (\cte'. m prev = Some cte' \ mdbNext (cteMDBNode cte') = p)) \ + (next \ 0 \ (\cte'. m next = Some cte' \ mdbPrev (cteMDBNode cte') = p)))" + +definition + "no_0 m \ m 0 = None" +definition + "no_loops m \ \c. \ m \ c \\<^sup>+ c" +definition + "mdb_chain_0 m \ \x \ dom m. m \ x \\<^sup>+ 0" + +definition + "class_links m \ \p p' cte cte'. + m p = Some cte \ + m p' = Some cte' \ + m \ p \ p' \ + capClass (cteCap cte) = capClass (cteCap cte')" + +definition + "is_chunk m cap p p' \ + \p''. m \ p \\<^sup>+ p'' \ m \ p'' \\<^sup>* p' \ + (\cap'' n''. m p'' = Some (CTE cap'' n'') \ sameRegionAs cap cap'')" + +definition + "mdb_chunked m \ \p p' cap cap' n n'. + m p = Some (CTE cap n) \ + m p' = Some (CTE cap' n') \ + sameRegionAs cap cap' \ + p \ p' \ + (m \ p \\<^sup>+ p' \ m \ p' \\<^sup>+ p) \ + (m \ p \\<^sup>+ p' \ is_chunk m cap p p') \ + (m \ p' \\<^sup>+ p \ is_chunk m cap' p' p)" + +definition + parentOf :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ parentOf _" [60,0,60] 61) +where + "s \ c' parentOf c \ + \cte' cte. s c = Some cte \ s c' = Some cte' \ isMDBParentOf cte' cte" + + +context +notes [[inductive_internals =true]] +begin + +inductive + subtree :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \ _" [60,0,60] 61) + for s :: cte_heap and c :: machine_word +where + direct_parent: + "\ s \ c \ c'; c' \ 0; s \ c parentOf c'\ \ s \ c \ c'" + | + trans_parent: + "\ s \ c \ c'; s \ c' \ c''; c'' \ 0; s \ c parentOf c'' \ \ s \ c \ c''" + +end + +definition + "descendants_of' c s \ {c'. s \ c \ c'}" + + +definition + "untyped_mdb' m \ + \p p' c n c' n'. + m p = Some (CTE c n) \ isUntypedCap c \ + m p' = Some (CTE c' n') \ \ isUntypedCap c' \ + capRange c' \ untypedRange c \ {} \ + p' \ descendants_of' p m" + +definition + "untyped_inc' m \ + \p p' c c' n n'. + m p = Some (CTE c n) \ isUntypedCap c \ + m p' = Some (CTE c' n') \ isUntypedCap c' \ + (untypedRange c \ untypedRange c' \ + untypedRange c' \ untypedRange c \ + untypedRange c \ untypedRange c' = {}) \ + (untypedRange c \ untypedRange c' \ (p \ descendants_of' p' m \ untypedRange c \ usableUntypedRange c' ={})) \ + (untypedRange c' \ untypedRange c \ (p' \ descendants_of' p m \ untypedRange c' \ usableUntypedRange c ={})) \ + (untypedRange c = untypedRange c' \ (p' \ descendants_of' p m \ usableUntypedRange c={} + \ p \ descendants_of' p' m \ usableUntypedRange c' = {} \ p = p'))" + +definition + "valid_nullcaps m \ \p n. m p = Some (CTE NullCap n) \ n = nullMDBNode" + +definition + "ut_revocable' m \ \p cap n. m p = Some (CTE cap n) \ isUntypedCap cap \ mdbRevocable n" + +definition + "irq_control m \ + \p n. m p = Some (CTE IRQControlCap n) \ + mdbRevocable n \ + (\p' n'. m p' = Some (CTE IRQControlCap n') \ p' = p)" + +definition + isArchFrameCap :: "capability \ bool" +where + "isArchFrameCap cap \ case cap of ArchObjectCap (FrameCap _ _ _ _ _) \ True | _ \ False" + +definition + distinct_zombie_caps :: "(machine_word \ capability option) \ bool" +where + "distinct_zombie_caps caps \ \ptr ptr' cap cap'. caps ptr = Some cap + \ caps ptr' = Some cap' \ ptr \ ptr' \ isZombie cap + \ capClass cap' = PhysicalClass \ \ isUntypedCap cap' \ \ isArchFrameCap cap' + \ capBits cap = capBits cap' \ capUntypedPtr cap \ capUntypedPtr cap'" + +definition + distinct_zombies :: "cte_heap \ bool" +where + "distinct_zombies m \ distinct_zombie_caps (option_map cteCap \ m)" + +definition + reply_masters_rvk_fb :: "cte_heap \ bool" +where + "reply_masters_rvk_fb ctes \ \cte \ ran ctes. + isReplyCap (cteCap cte) \ capReplyMaster (cteCap cte) + \ mdbRevocable (cteMDBNode cte) \ mdbFirstBadged (cteMDBNode cte)" + +definition + valid_mdb_ctes :: "cte_heap \ bool" +where + "valid_mdb_ctes \ \m. valid_dlist m \ no_0 m \ mdb_chain_0 m \ + valid_badges m \ caps_contained' m \ + mdb_chunked m \ untyped_mdb' m \ + untyped_inc' m \ valid_nullcaps m \ + ut_revocable' m \ class_links m \ distinct_zombies m + \ irq_control m \ reply_masters_rvk_fb m" + +definition + valid_mdb' :: "kernel_state \ bool" +where + "valid_mdb' \ \s. valid_mdb_ctes (ctes_of s)" + +definition + "no_0_obj' \ \s. ksPSpace s 0 = None" + +definition + valid_pspace' :: "kernel_state \ bool" +where + "valid_pspace' \ valid_objs' and + pspace_aligned' and + pspace_canonical' and + pspace_distinct' and + no_0_obj' and + valid_mdb'" + +primrec + runnable' :: "Structures_H.thread_state \ bool" +where + "runnable' (Structures_H.Running) = True" +| "runnable' (Structures_H.Inactive) = False" +| "runnable' (Structures_H.Restart) = True" +| "runnable' (Structures_H.IdleThreadState) = False" +| "runnable' (Structures_H.BlockedOnReceive a b) = False" +| "runnable' (Structures_H.BlockedOnReply) = False" +| "runnable' (Structures_H.BlockedOnSend a b c d e) = False" +| "runnable' (Structures_H.BlockedOnNotification x) = False" + +definition inQ :: "domain \ priority \ tcb \ bool" where + "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" + +lemma inQ_implies_tcbQueueds_of: + "(inQ domain priority |< tcbs_of' s') tcbPtr \ (tcbQueued |< tcbs_of' s') tcbPtr" + by (clarsimp simp: opt_map_def opt_pred_def inQ_def split: option.splits) + +defs ready_qs_runnable_def: + "ready_qs_runnable s \ \t. obj_at' tcbQueued t s \ st_tcb_at' runnable' t s" + +definition + (* for given domain and priority, the scheduler bitmap indicates a thread is in the queue *) + (* second level of the bitmap is stored in reverse for better cache locality in common case *) + bitmapQ :: "domain \ priority \ kernel_state \ bool" +where + "bitmapQ d p s \ ksReadyQueuesL1Bitmap s d !! prioToL1Index p + \ ksReadyQueuesL2Bitmap s (d, invertL1Index (prioToL1Index p)) + !! unat (p && mask wordRadix)" +definition + (* A priority is used as a two-part key into the bitmap structure. If an L2 bitmap entry + is set without an L1 entry, updating the L1 entry (shared by many priorities) may make + unexpected threads schedulable *) + bitmapQ_no_L2_orphans :: "kernel_state \ bool" +where + "bitmapQ_no_L2_orphans \ \s. + \d i j. ksReadyQueuesL2Bitmap s (d, invertL1Index i) !! j \ i < l2BitmapSize + \ (ksReadyQueuesL1Bitmap s d !! i)" + +definition + (* If the scheduler finds a set bit in L1 of the bitmap, it must find some bit set in L2 + when it looks there. This lets it omit a check. + L2 entries have wordBits bits each. That means the L1 word only indexes + a small number of L2 entries, despite being stored in a wordBits word. + We allow only bits corresponding to L2 indices to be set. + *) + bitmapQ_no_L1_orphans :: "kernel_state \ bool" +where + "bitmapQ_no_L1_orphans \ \s. + \d i. ksReadyQueuesL1Bitmap s d !! i \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ + i < l2BitmapSize" + +definition valid_bitmapQ :: "kernel_state \ bool" where + "valid_bitmapQ \ \s. \d p. bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p))" + +definition valid_bitmaps :: "kernel_state \ bool" where + "valid_bitmaps \ \s. valid_bitmapQ s \ bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" + +lemma valid_bitmaps_valid_bitmapQ[elim!]: + "valid_bitmaps s \ valid_bitmapQ s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L2_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L2_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L1_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L1_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_lift: + assumes prq: "\P. f \\s. P (ksReadyQueues s)\" + assumes prqL1: "\P. f \\s. P (ksReadyQueuesL1Bitmap s)\" + assumes prqL2: "\P. f \\s. P (ksReadyQueuesL2Bitmap s)\" + shows "f \valid_bitmaps\" + unfolding valid_bitmaps_def valid_bitmapQ_def bitmapQ_def + bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +(* when a thread gets added to / removed from a queue, but before bitmap updated *) +definition valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" where + "valid_bitmapQ_except d' p' \ \s. + \d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)))" + +lemmas bitmapQ_defs = valid_bitmapQ_def valid_bitmapQ_except_def bitmapQ_def + bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def + +\ \ + The tcbSchedPrev and tcbSchedNext fields of a TCB are used only to indicate membership in + one of the ready queues. \ +definition valid_sched_pointers_2 :: + "(obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ (obj_ref \ bool) \ bool " + where + "valid_sched_pointers_2 prevs nexts ready \ + \ptr. prevs ptr \ None \ nexts ptr \ None \ ready ptr" + +abbreviation valid_sched_pointers :: "kernel_state \ bool" where + "valid_sched_pointers s \ + valid_sched_pointers_2 (tcbSchedPrevs_of s) (tcbSchedNexts_of s) (tcbQueued |< tcbs_of' s)" + +lemmas valid_sched_pointers_def = valid_sched_pointers_2_def + +lemma valid_sched_pointersD: + "\valid_sched_pointers s; \ (tcbQueued |< tcbs_of' s) t\ + \ tcbSchedPrevs_of s t = None \ tcbSchedNexts_of s t = None" + by (fastforce simp: valid_sched_pointers_def in_opt_pred opt_map_red) + +definition tcb_in_cur_domain' :: "machine_word \ kernel_state \ bool" where + "tcb_in_cur_domain' t \ \s. obj_at' (\tcb. ksCurDomain s = tcbDomain tcb) t s" + +definition + ct_idle_or_in_cur_domain' :: "kernel_state \ bool" where + "ct_idle_or_in_cur_domain' \ \s. ksSchedulerAction s = ResumeCurrentThread \ + ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s" + +definition + "ct_in_state' test \ \s. st_tcb_at' test (ksCurThread s) s" + +definition + "ct_not_inQ \ \s. ksSchedulerAction s = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s" + +abbreviation + "idle' \ \st. st = Structures_H.IdleThreadState" + +abbreviation + "activatable' st \ runnable' st \ idle' st" + +primrec + sch_act_wf :: "scheduler_action \ kernel_state \ bool" +where + "sch_act_wf ResumeCurrentThread = ct_in_state' activatable'" +| "sch_act_wf ChooseNewThread = \" +| "sch_act_wf (SwitchToThread t) = (\s. st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s)" + +definition sch_act_simple :: "kernel_state \ bool" where + "sch_act_simple \ \s. (ksSchedulerAction s = ResumeCurrentThread) \ + (ksSchedulerAction s = ChooseNewThread)" + +definition sch_act_sane :: "kernel_state \ bool" where + "sch_act_sane \ \s. \t. ksSchedulerAction s = SwitchToThread t \ t \ ksCurThread s" + +abbreviation + "sch_act_not t \ \s. ksSchedulerAction s \ SwitchToThread t" + +definition idle_tcb'_2 :: "Structures_H.thread_state \ machine_word option \ bool" where + "idle_tcb'_2 \ \(st, ntfn_opt). (idle' st \ ntfn_opt = None)" + +abbreviation + "idle_tcb' tcb \ idle_tcb'_2 (tcbState tcb, tcbBoundNotification tcb)" + +lemmas idle_tcb'_def = idle_tcb'_2_def + +definition valid_idle' :: "kernel_state \ bool" where + "valid_idle' \ \s. obj_at' idle_tcb' (ksIdleThread s) s \ idle_thread_ptr = ksIdleThread s" + +lemma valid_idle'_tcb_at': + "valid_idle' s \ obj_at' idle_tcb' (ksIdleThread s) s" + by (clarsimp simp: valid_idle'_def) + +definition valid_irq_node' :: "machine_word \ kernel_state \ bool" where + "valid_irq_node' x \ + \s. is_aligned x (size (0::irq) + cteSizeBits) \ + (\irq :: irq. real_cte_at' (x + (ucast irq << cteSizeBits)) s)" + +definition valid_refs' :: "machine_word set \ cte_heap \ bool" where + "valid_refs' R \ \m. \c \ ran m. R \ capRange (cteCap c) = {}" + +(* Addresses of all PTEs in a VSRoot table at p *) +definition table_refs' :: "machine_word \ machine_word set" where + "table_refs' p \ (\i. p + (i << pte_bits)) ` mask_range 0 (ptTranslationBits VSRootPT_T)" + +definition global_refs' :: "kernel_state \ obj_ref set" where + "global_refs' \ \s. + {ksIdleThread s} \ + table_refs' (armKSGlobalUserVSpace (ksArchState s)) \ + range (\irq :: irq. irq_node' s + (ucast irq << cteSizeBits))" + +definition valid_cap_sizes' :: "nat \ cte_heap \ bool" where + "valid_cap_sizes' n hp \ \cte \ ran hp. 2 ^ capBits (cteCap cte) \ n" + +definition valid_global_refs' :: "kernel_state \ bool" where + "valid_global_refs' \ \s. + valid_refs' kernel_data_refs (ctes_of s) + \ global_refs' s \ kernel_data_refs + \ valid_cap_sizes' (gsMaxObjectSize s) (ctes_of s)" + +definition pspace_domain_valid :: "kernel_state \ bool" where + "pspace_domain_valid \ \s. + \x ko. ksPSpace s x = Some ko \ mask_range x (objBitsKO ko) \ kernel_data_refs = {}" + +definition valid_asid_table' :: "(asid \ machine_word) \ bool" where + "valid_asid_table' table \ dom table \ mask_range 0 asid_high_bits \ 0 \ ran table" + +definition "is_vcpu' \ \ko. \vcpu. ko = (KOArch (KOVCPU vcpu))" + +definition max_armKSGICVCPUNumListRegs :: nat where + "max_armKSGICVCPUNumListRegs \ 63" + +definition valid_arch_state' :: "kernel_state \ bool" where + "valid_arch_state' \ \s. + valid_asid_table' (armKSASIDTable (ksArchState s)) \ + 0 \ ran (armKSVMIDTable (ksArchState s)) \ + (case armHSCurVCPU (ksArchState s) of + Some (v, b) \ ko_wp_at' (is_vcpu' and hyp_live') v s + | _ \ True) \ + armKSGICVCPUNumListRegs (ksArchState s) \ max_armKSGICVCPUNumListRegs \ + canonical_address (addrFromKPPtr (armKSGlobalUserVSpace (ksArchState s)))" + +definition irq_issued' :: "irq \ kernel_state \ bool" where + "irq_issued' irq \ \s. intStateIRQTable (ksInterruptState s) irq = IRQSignal" + +definition cteCaps_of :: "kernel_state \ machine_word \ capability option" where + "cteCaps_of s \ option_map cteCap \ ctes_of s" + +definition valid_irq_handlers' :: "kernel_state \ bool" where + "valid_irq_handlers' \ \s. \cap \ ran (cteCaps_of s). \irq. + cap = IRQHandlerCap irq \ irq_issued' irq s" + +definition + "irqs_masked' \ \s. \irq > maxIRQ. intStateIRQTable (ksInterruptState s) irq = IRQInactive" + +definition + "valid_irq_masks' table masked \ \irq. table irq = IRQInactive \ masked irq" + +abbreviation + "valid_irq_states' s \ + valid_irq_masks' (intStateIRQTable (ksInterruptState s)) (irq_masks (ksMachineState s))" + +defs pointerInUserData_def: + "pointerInUserData p \ typ_at' UserDataT (p && ~~ mask pageBits)" + +(* pointerInDeviceData is not defined in spec but is necessary for valid_machine_state' *) +definition pointerInDeviceData :: "machine_word \ kernel_state \ bool" where + "pointerInDeviceData p \ typ_at' UserDataDeviceT (p && ~~ mask pageBits)" + +definition + "valid_machine_state' \ + \s. \p. pointerInUserData p s \ pointerInDeviceData p s \ underlying_memory (ksMachineState s) p = 0" + +definition + "untyped_ranges_zero_inv cps urs \ urs = ran (untypedZeroRange \\<^sub>m cps)" + +abbreviation + "untyped_ranges_zero' s \ untyped_ranges_zero_inv (cteCaps_of s) (gsUntypedZeroRanges s)" + +(* FIXME: this really should be a definition like the above. *) +(* The schedule is invariant. *) +abbreviation + "valid_dom_schedule' \ + \s. ksDomSchedule s \ [] \ (\x\set (ksDomSchedule s). dschDomain x \ maxDomain \ 0 < dschLength x) + \ ksDomSchedule s = ksDomSchedule (newKernelState undefined) + \ ksDomScheduleIdx s < length (ksDomSchedule (newKernelState undefined))" + +definition valid_state' :: "kernel_state \ bool" where + "valid_state' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s + \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s + \ valid_irq_handlers' s + \ valid_irq_states' s + \ valid_machine_state' s + \ irqs_masked' s + \ sym_heap_sched_pointers s + \ valid_sched_pointers s + \ valid_bitmaps s + \ ct_not_inQ s + \ ct_idle_or_in_cur_domain' s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s + \ untyped_ranges_zero' s" + +definition + "cur_tcb' s \ tcb_at' (ksCurThread s) s" + +definition + invs' :: "kernel_state \ bool" where + "invs' \ valid_state' and cur_tcb'" + + +subsection "Derived concepts" + +abbreviation + "awaiting_reply' ts \ ts = Structures_H.BlockedOnReply" + + (* x-symbol doesn't have a reverse leadsto.. *) +definition + mdb_prev :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \ _" [60,0,60] 61) +where + "m \ c \ c' \ (\z. m c' = Some z \ c = mdbPrev (cteMDBNode z))" + +definition + makeObjectT :: "kernel_object_type \ kernel_object" + where + "makeObjectT tp \ case tp of + EndpointT \ injectKO (makeObject :: endpoint) + | NotificationT \ injectKO (makeObject :: Structures_H.notification) + | CTET \ injectKO (makeObject :: cte) + | TCBT \ injectKO (makeObject :: tcb) + | UserDataT \ injectKO (makeObject :: user_data) + | UserDataDeviceT \ injectKO (makeObject :: user_data_device) + | KernelDataT \ KOKernelData + | ArchT atp \ (case atp of + PTET \ injectKO (makeObject :: pte) + | ASIDPoolT \ injectKO (makeObject :: asidpool) + | VCPUT \ injectKO (makeObject :: vcpu))" + +definition + objBitsT :: "kernel_object_type \ nat" + where + "objBitsT tp \ objBitsKO (makeObjectT tp)" + + +abbreviation + "active' st \ st = Structures_H.Running \ st = Structures_H.Restart" + +lemma runnable_eq_active': "runnable' = active'" + apply (rule ext) + apply (case_tac st, simp_all) + done + +abbreviation + "simple' st \ st = Structures_H.Inactive \ + st = Structures_H.Running \ + st = Structures_H.Restart \ + idle' st \ awaiting_reply' st" + +abbreviation + "ct_active' \ ct_in_state' active'" + +abbreviation + "ct_running' \ ct_in_state' (\st. st = Structures_H.Running)" + +abbreviation(input) + "all_invs_but_sym_refs_ct_not_inQ' + \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s \ untyped_ranges_zero' s" + +abbreviation(input) + "all_invs_but_ct_not_inQ' + \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s \ untyped_ranges_zero' s" + +lemma all_invs_but_sym_refs_not_ct_inQ_check': + "(all_invs_but_sym_refs_ct_not_inQ' and sym_refs \ state_refs_of' and sym_refs \ state_hyp_refs_of' and ct_not_inQ) = invs'" + by (simp add: pred_conj_def conj_commute conj_left_commute invs'_def valid_state'_def) + +lemma all_invs_but_not_ct_inQ_check': + "(all_invs_but_ct_not_inQ' and ct_not_inQ) = invs'" + by (simp add: pred_conj_def conj_commute conj_left_commute invs'_def valid_state'_def) + +definition + "all_invs_but_ct_idle_or_in_cur_domain' + \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_not_inQ s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s \ untyped_ranges_zero' s" + +lemmas invs_no_cicd'_def = all_invs_but_ct_idle_or_in_cur_domain'_def + +lemma all_invs_but_ct_idle_or_in_cur_domain_check': + "(all_invs_but_ct_idle_or_in_cur_domain' and ct_idle_or_in_cur_domain') = invs'" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def pred_conj_def + conj_left_commute conj_commute invs'_def valid_state'_def) + +abbreviation (input) + "invs_no_cicd' \ all_invs_but_ct_idle_or_in_cur_domain'" + +lemma invs'_to_invs_no_cicd'_def: + "invs' = (all_invs_but_ct_idle_or_in_cur_domain' and ct_idle_or_in_cur_domain')" + by (fastforce simp: invs'_def all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def ) +end + +locale mdb_next = + fixes m :: cte_heap + + fixes greater_eq + defines "greater_eq a b \ m \ a \\<^sup>* b" + + fixes greater + defines "greater a b \ m \ a \\<^sup>+ b" + +locale mdb_order = mdb_next + + assumes no_0: "no_0 m" + assumes chain: "mdb_chain_0 m" + +\ \---------------------------------------------------------------------------\ +section "Alternate split rules for preserving subgoal order" +context begin interpretation Arch . (*FIXME: arch_split*) +lemma ntfn_splits[split]: + " P (case ntfn of Structures_H.ntfn.IdleNtfn \ f1 + | Structures_H.ntfn.ActiveNtfn x \ f2 x + | Structures_H.ntfn.WaitingNtfn x \ f3 x) = + ((ntfn = Structures_H.ntfn.IdleNtfn \ P f1) \ + (\x2. ntfn = Structures_H.ntfn.ActiveNtfn x2 \ + P (f2 x2)) \ + (\x3. ntfn = Structures_H.ntfn.WaitingNtfn x3 \ + P (f3 x3)))" + "P (case ntfn of Structures_H.ntfn.IdleNtfn \ f1 + | Structures_H.ntfn.ActiveNtfn x \ f2 x + | Structures_H.ntfn.WaitingNtfn x \ f3 x) = + (\ (ntfn = Structures_H.ntfn.IdleNtfn \ \ P f1 \ + (\x2. ntfn = Structures_H.ntfn.ActiveNtfn x2 \ + \ P (f2 x2)) \ + (\x3. ntfn = Structures_H.ntfn.WaitingNtfn x3 \ + \ P (f3 x3))))" + by (case_tac ntfn; simp)+ +\ \---------------------------------------------------------------------------\ + +section "Lemmas" + +schematic_goal wordBits_def': "wordBits = numeral ?n" (* arch-specific consequence *) + by (simp add: wordBits_def word_size) + +lemma valid_bound_ntfn'_None[simp]: + "valid_bound_ntfn' None = \" + by (auto simp: valid_bound_ntfn'_def) + +lemma valid_bound_ntfn'_Some[simp]: + "valid_bound_ntfn' (Some x) = ntfn_at' x" + by (auto simp: valid_bound_ntfn'_def) + +lemma valid_bound_tcb'_None[simp]: + "valid_bound_tcb' None = \" + by (auto simp: valid_bound_tcb'_def) + +lemma valid_bound_tcb'_Some[simp]: + "valid_bound_tcb' (Some x) = tcb_at' x" + by (auto simp: valid_bound_tcb'_def) + +lemma objBitsKO_Data: + "objBitsKO (if dev then KOUserDataDevice else KOUserData) = pageBits" + by (simp add: objBits_def objBitsKO_def word_size_def) + +lemmas objBits_defs = tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def cteSizeBits_def vcpuBits_def +lemmas untypedBits_defs = minUntypedSizeBits_def maxUntypedSizeBits_def +lemmas objBits_simps = objBits_def objBitsKO_def word_size_def archObjSize_def +lemmas objBits_simps' = objBits_simps objBits_defs + +lemmas wordRadix_def' = wordRadix_def[simplified] + +lemma ps_clear_def2: + "p \ p + 1 \ ps_clear p n s = ({p + 1 .. p + (1 << n) - 1} \ dom (ksPSpace s) = {})" + apply (simp add: ps_clear_def mask_def add_diff_eq) + apply safe + apply (drule_tac a=x in equals0D) + apply clarsimp + apply (drule mp, simp) + apply (erule disjE) + apply simp + apply clarsimp + apply (drule_tac a=x in equals0D) + apply clarsimp + apply (case_tac "p + 1 \ x") + apply clarsimp + apply (simp add: linorder_not_le) + apply (drule plus_one_helper, simp) + done + +lemma projectKO_stateI: + "fst (projectKO e s) = {(obj, s)} \ fst (projectKO e s') = {(obj, s')}" + unfolding projectKO_def + by (auto simp: fail_def return_def valid_def split: option.splits) + +lemma singleton_in_magnitude_check: + "(x, s) \ fst (magnitudeCheck a b c s') \ \s'. fst (magnitudeCheck a b c s') = {(x, s')}" + by (simp add: magnitudeCheck_def when_def in_monad return_def + split: if_split_asm option.split_asm) + +lemma wordSizeCase_simp [simp]: "wordSizeCase a b = b" + by (simp add: wordSizeCase_def wordBits_def word_size) + +lemma projectKO_eq: + "(fst (projectKO ko c) = {(obj, c)}) = (projectKO_opt ko = Some obj)" + by (simp add: projectKO_def fail_def return_def split: option.splits) + +lemma obj_at'_def': + "obj_at' P p s = (\ko obj. ksPSpace s p = Some ko \ is_aligned p (objBitsKO ko) + \ fst (projectKO ko s) = {(obj,s)} \ P obj + \ ps_clear p (objBitsKO ko) s)" + apply (simp add: obj_at'_real_def ko_wp_at'_def projectKO_eq + True_notin_set_replicate_conv objBits_def) + apply fastforce + done + +lemma obj_at'_def: + "obj_at' P p s \ \ko obj. ksPSpace s p = Some ko \ is_aligned p (objBitsKO ko) + \ fst (projectKO ko s) = {(obj,s)} \ P obj + \ ps_clear p (objBitsKO ko) s" + by (simp add: obj_at'_def') + +lemma obj_atE' [elim?]: + assumes objat: "obj_at' P ptr s" + and rl: "\ko obj. + \ ksPSpace s ptr = Some ko; is_aligned ptr (objBitsKO ko); + fst (projectKO ko s) = {(obj,s)}; P obj; + ps_clear ptr (objBitsKO ko) s \ \ R" + shows "R" + using objat unfolding obj_at'_def by (auto intro!: rl) + +lemma obj_atI' [intro?]: + "\ ksPSpace s ptr = Some ko; is_aligned ptr (objBitsKO ko); + fst (projectKO ko s) = {(obj, s)}; P obj; + ps_clear ptr (objBitsKO ko) s \ + \ obj_at' P ptr s" + unfolding obj_at'_def by (auto) + +lemma vcpu_at_is_vcpu': + "vcpu_at' v = ko_wp_at' is_vcpu' v" + apply (rule ext) + apply (clarsimp simp: typ_at'_def is_vcpu'_def ko_wp_at'_def) + apply (rule iffI; clarsimp?) + apply (case_tac ko; simp; rename_tac ako; case_tac ako; simp) + done + +lemma cte_at'_def: + "cte_at' p s \ \cte::cte. fst (getObject p s) = {(cte,s)}" + by (simp add: cte_wp_at'_def) + + +lemma tcb_cte_cases_simps[simp]: + "tcb_cte_cases 0 = Some (tcbCTable, tcbCTable_update)" + "tcb_cte_cases 32 = Some (tcbVTable, tcbVTable_update)" + "tcb_cte_cases 64 = Some (tcbReply, tcbReply_update)" + "tcb_cte_cases 96 = Some (tcbCaller, tcbCaller_update)" + "tcb_cte_cases 128 = Some (tcbIPCBufferFrame, tcbIPCBufferFrame_update)" + by (simp add: tcb_cte_cases_def cteSizeBits_def)+ + +lemma refs_of'_simps[simp]: + "refs_of' (KOTCB tcb) = tcb_st_refs_of' (tcbState tcb) \ tcb_bound_refs' (tcbBoundNotification tcb)" + "refs_of' (KOCTE cte) = {}" + "refs_of' (KOEndpoint ep) = ep_q_refs_of' ep" + "refs_of' (KONotification ntfn) = ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn)" + "refs_of' (KOUserData) = {}" + "refs_of' (KOUserDataDevice) = {}" + "refs_of' (KOKernelData) = {}" + "refs_of' (KOArch ako) = {}" + by (auto simp: refs_of'_def) + +lemma tcb_st_refs_of'_simps[simp]: + "tcb_st_refs_of' (Running) = {}" + "tcb_st_refs_of' (Inactive) = {}" + "tcb_st_refs_of' (Restart) = {}" + "tcb_st_refs_of' (BlockedOnReceive x'' a') = {(x'', TCBBlockedRecv)}" + "tcb_st_refs_of' (BlockedOnSend x a b c d) = {(x, TCBBlockedSend)}" + "tcb_st_refs_of' (BlockedOnNotification x') = {(x', TCBSignal)}" + "tcb_st_refs_of' (BlockedOnReply) = {}" + "tcb_st_refs_of' (IdleThreadState) = {}" + by (auto simp: tcb_st_refs_of'_def) + +lemma ep_q_refs_of'_simps[simp]: + "ep_q_refs_of' IdleEP = {}" + "ep_q_refs_of' (RecvEP q) = set q \ {EPRecv}" + "ep_q_refs_of' (SendEP q) = set q \ {EPSend}" + by (auto simp: ep_q_refs_of'_def) + +lemma ntfn_q_refs_of'_simps[simp]: + "ntfn_q_refs_of' IdleNtfn = {}" + "ntfn_q_refs_of' (WaitingNtfn q) = set q \ {NTFNSignal}" + "ntfn_q_refs_of' (ActiveNtfn b) = {}" + by (auto simp: ntfn_q_refs_of'_def) + +lemma ntfn_bound_refs'_simps[simp]: + "ntfn_bound_refs' (Some t) = {(t, NTFNBound)}" + "ntfn_bound_refs' None = {}" + by (auto simp: ntfn_bound_refs'_def) + +lemma tcb_bound_refs'_simps[simp]: + "tcb_bound_refs' (Some a) = {(a, TCBBound)}" + "tcb_bound_refs' None = {}" + by (auto simp: tcb_bound_refs'_def) + +lemma refs_of_rev': + "(x, TCBBlockedRecv) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ (\a. tcbState tcb = BlockedOnReceive x a))" + "(x, TCBBlockedSend) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ (\a b c d. tcbState tcb = BlockedOnSend x a b c d))" + "(x, TCBSignal) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ tcbState tcb = BlockedOnNotification x)" + "(x, EPRecv) \ refs_of' ko = + (\ep. ko = KOEndpoint ep \ (\q. ep = RecvEP q \ x \ set q))" + "(x, EPSend) \ refs_of' ko = + (\ep. ko = KOEndpoint ep \ (\q. ep = SendEP q \ x \ set q))" + "(x, NTFNSignal) \ refs_of' ko = + (\ntfn. ko = KONotification ntfn \ (\q. ntfnObj ntfn = WaitingNtfn q \ x \ set q))" + "(x, TCBBound) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ (tcbBoundNotification tcb = Some x))" + "(x, NTFNBound) \ refs_of' ko = + (\ntfn. ko = KONotification ntfn \ (ntfnBoundTCB ntfn = Some x))" + by (auto simp: refs_of'_def + tcb_st_refs_of'_def + ep_q_refs_of'_def + ntfn_q_refs_of'_def + ntfn_bound_refs'_def + tcb_bound_refs'_def + split: Structures_H.kernel_object.splits + Structures_H.thread_state.splits + Structures_H.endpoint.splits + Structures_H.notification.splits + Structures_H.ntfn.splits)+ + +lemma tcb_hyp_refs_of'_simps[simp]: + "tcb_hyp_refs' atcb = tcb_vcpu_refs' (atcbVCPUPtr atcb)" + by (auto simp: tcb_hyp_refs'_def) + +lemma tcb_vcpu_refs_of'_simps[simp]: + "tcb_vcpu_refs' (Some vc) = {(vc, TCBHypRef)}" + "tcb_vcpu_refs' None = {}" + by (auto simp: tcb_vcpu_refs'_def) + +lemma vcpu_tcb_refs_of'_simps[simp]: + "vcpu_tcb_refs' (Some tcb) = {(tcb, HypTCBRef)}" + "vcpu_tcb_refs' None = {}" + by (auto simp: vcpu_tcb_refs'_def) + +lemma refs_of_a'_simps[simp]: + "refs_of_a' (KOASIDPool p) = {}" + "refs_of_a' (KOPTE pt) = {}" + "refs_of_a' (KOVCPU v) = vcpu_tcb_refs' (vcpuTCBPtr v)" + by (auto simp: refs_of_a'_def) + +lemma hyp_refs_of'_simps[simp]: + "hyp_refs_of' (KOCTE cte) = {}" + "hyp_refs_of' (KOTCB tcb) = tcb_hyp_refs' (tcbArch tcb)" + "hyp_refs_of' (KOEndpoint ep) = {}" + "hyp_refs_of' (KONotification ntfn) = {}" + "hyp_refs_of' (KOUserData) = {}" + "hyp_refs_of' (KOUserDataDevice) = {}" + "hyp_refs_of' (KOKernelData) = {}" + "hyp_refs_of' (KOArch ao) = refs_of_a' ao" + by (auto simp: hyp_refs_of'_def) + +lemma hyp_refs_of_rev': + "(x, TCBHypRef) \ hyp_refs_of' ko = + (\tcb. ko = KOTCB tcb \ (atcbVCPUPtr (tcbArch tcb) = Some x))" + "(x, HypTCBRef) \ hyp_refs_of' ko = + (\v. ko = KOArch (KOVCPU v) \ (vcpuTCBPtr v = Some x))" + by (auto simp: hyp_refs_of'_def tcb_hyp_refs'_def tcb_vcpu_refs'_def + vcpu_tcb_refs'_def refs_of_a'_def + split: kernel_object.splits arch_kernel_object.splits option.split) + +lemma ko_wp_at'_weakenE: + "\ ko_wp_at' P p s; \ko. P ko \ Q ko \ \ ko_wp_at' Q p s" + by (clarsimp simp: ko_wp_at'_def) + +lemma projectKO_opt_tcbD: + "projectKO_opt ko = Some (tcb :: tcb) \ ko = KOTCB tcb" + by (cases ko, simp_all add: projectKO_opt_tcb) + +lemma st_tcb_at_refs_of_rev': + "ko_wp_at' (\ko. (x, TCBBlockedRecv) \ refs_of' ko) t s + = st_tcb_at' (\ts. \a. ts = BlockedOnReceive x a) t s" + "ko_wp_at' (\ko. (x, TCBBlockedSend) \ refs_of' ko) t s + = st_tcb_at' (\ts. \a b c d. ts = BlockedOnSend x a b c d) t s" + "ko_wp_at' (\ko. (x, TCBSignal) \ refs_of' ko) t s + = st_tcb_at' (\ts. ts = BlockedOnNotification x) t s" + by (fastforce simp: refs_of_rev' pred_tcb_at'_def obj_at'_real_def + projectKO_opt_tcb[where e="KOTCB y" for y] + elim!: ko_wp_at'_weakenE + dest!: projectKO_opt_tcbD)+ + +lemma state_refs_of'_elemD: + "\ ref \ state_refs_of' s x \ \ ko_wp_at' (\obj. ref \ refs_of' obj) x s" + by (clarsimp simp add: state_refs_of'_def ko_wp_at'_def + split: option.splits if_split_asm) + +lemma obj_at_state_refs_ofD': + "obj_at' P p s \ \obj. P obj \ state_refs_of' s p = refs_of' (injectKO obj)" + apply (clarsimp simp: obj_at'_real_def project_inject ko_wp_at'_def conj_commute) + apply (rule exI, erule conjI) + apply (clarsimp simp: state_refs_of'_def) + done + +lemma ko_at_state_refs_ofD': + "ko_at' ko p s \ state_refs_of' s p = refs_of' (injectKO ko)" + by (clarsimp dest!: obj_at_state_refs_ofD') + +definition + tcb_ntfn_is_bound' :: "machine_word option \ tcb \ bool" +where + "tcb_ntfn_is_bound' ntfn tcb \ tcbBoundNotification tcb = ntfn" + +lemma st_tcb_at_state_refs_ofD': + "st_tcb_at' P t s \ \ts ntfnptr. P ts \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = (tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr)" + by (auto simp: pred_tcb_at'_def tcb_ntfn_is_bound'_def obj_at'_def projectKO_eq + project_inject state_refs_of'_def) + +lemma bound_tcb_at_state_refs_ofD': + "bound_tcb_at' P t s \ \ts ntfnptr. P ntfnptr \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = (tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr)" + by (auto simp: pred_tcb_at'_def obj_at'_def tcb_ntfn_is_bound'_def projectKO_eq + project_inject state_refs_of'_def) + +lemma sym_refs_obj_atD': + "\ obj_at' P p s; sym_refs (state_refs_of' s) \ \ + \obj. P obj \ state_refs_of' s p = refs_of' (injectKO obj) + \ (\(x, tp)\refs_of' (injectKO obj). ko_wp_at' (\ko. (p, symreftype tp) \ refs_of' ko) x s)" + apply (drule obj_at_state_refs_ofD') + apply (erule exEI, clarsimp) + apply (drule sym, simp) + apply (drule(1) sym_refsD) + apply (erule state_refs_of'_elemD) + done + +lemma sym_refs_ko_atD': + "\ ko_at' ko p s; sym_refs (state_refs_of' s) \ \ + state_refs_of' s p = refs_of' (injectKO ko) \ + (\(x, tp)\refs_of' (injectKO ko). ko_wp_at' (\ko. (p, symreftype tp) \ refs_of' ko) x s)" + by (drule(1) sym_refs_obj_atD', simp) + +lemma sym_refs_st_tcb_atD': + "\ st_tcb_at' P t s; sym_refs (state_refs_of' s) \ \ + \ts ntfnptr. P ts \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr + \ (\(x, tp)\tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr. ko_wp_at' (\ko. (t, symreftype tp) \ refs_of' ko) x s)" + apply (drule st_tcb_at_state_refs_ofD') + apply (erule exE)+ + apply (rule_tac x=ts in exI) + apply (rule_tac x=ntfnptr in exI) + apply clarsimp + apply (frule obj_at_state_refs_ofD') + apply (drule (1)sym_refs_obj_atD') + apply auto + done + +lemma sym_refs_bound_tcb_atD': + "\ bound_tcb_at' P t s; sym_refs (state_refs_of' s) \ \ + \ts ntfnptr. P ntfnptr \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr + \ (\(x, tp)\tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr. ko_wp_at' (\ko. (t, symreftype tp) \ refs_of' ko) x s)" + apply (drule bound_tcb_at_state_refs_ofD') + apply (erule exE)+ + apply (rule_tac x=ts in exI) + apply (rule_tac x=ntfnptr in exI) + apply clarsimp + apply (frule obj_at_state_refs_ofD') + apply (drule (1)sym_refs_obj_atD') + apply auto + done + +lemma state_hyp_refs_of'_elemD: + "\ ref \ state_hyp_refs_of' s x \ \ ko_wp_at' (\obj. ref \ hyp_refs_of' obj) x s" + by (clarsimp simp add: state_hyp_refs_of'_def ko_wp_at'_def + split: option.splits if_split_asm) + +lemma obj_at_state_hyp_refs_ofD': + "obj_at' P p s \ \ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' (injectKO ko)" + apply (clarsimp simp: obj_at'_real_def project_inject ko_wp_at'_def conj_commute) + apply (rule exI, erule conjI) + apply (clarsimp simp: state_hyp_refs_of'_def) + done + +lemma ko_at_state_hyp_refs_ofD': + "ko_at' ko p s \ state_hyp_refs_of' s p = hyp_refs_of' (injectKO ko)" + by (clarsimp dest!: obj_at_state_hyp_refs_ofD') + +lemma hyp_sym_refs_obj_atD': + "\ obj_at' P p s; sym_refs (state_hyp_refs_of' s) \ \ + \ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' (injectKO ko) \ + (\(x, tp)\hyp_refs_of' (injectKO ko). ko_wp_at' (\ko. (p, symreftype tp) \ hyp_refs_of' ko) x s)" + apply (drule obj_at_state_hyp_refs_ofD') + apply (erule exEI, clarsimp) + apply (drule sym, simp) + apply (drule(1) sym_refsD) + apply (erule state_hyp_refs_of'_elemD) + done + +lemma refs_of_live': + "refs_of' ko \ {} \ live' ko" + apply (cases ko, simp_all add: live'_def) + apply clarsimp + apply (rename_tac notification) + apply (case_tac "ntfnObj notification"; simp) + apply fastforce+ + done + +lemma hyp_refs_of_hyp_live': + "hyp_refs_of' ko \ {} \ hyp_live' ko" + apply (cases ko, simp_all) + apply (rename_tac tcb_ext) + apply (simp add: tcb_hyp_refs'_def hyp_live'_def) + apply (case_tac "atcbVCPUPtr (tcbArch tcb_ext)"; clarsimp) + apply (clarsimp simp: hyp_live'_def arch_live'_def refs_of_a'_def vcpu_tcb_refs'_def + split: arch_kernel_object.splits option.splits) + done + +lemma hyp_refs_of_live': + "hyp_refs_of' ko \ {} \ live' ko" + by (cases ko, simp_all add: live'_def hyp_refs_of_hyp_live') + +lemma if_live_then_nonz_capE': + "\ if_live_then_nonz_cap' s; ko_wp_at' live' p s \ + \ ex_nonz_cap_to' p s" + by (fastforce simp: if_live_then_nonz_cap'_def) + +lemma if_live_then_nonz_capD': + assumes x: "if_live_then_nonz_cap' s" "ko_wp_at' P p s" + assumes y: "\obj. \ P obj; ksPSpace s p = Some obj; is_aligned p (objBitsKO obj) \ \ live' obj" + shows "ex_nonz_cap_to' p s" using x + by (clarsimp elim!: if_live_then_nonz_capE' y + simp: ko_wp_at'_def) + +lemma if_live_state_refsE: + "\ if_live_then_nonz_cap' s; + state_refs_of' s p \ {} \ \ ex_nonz_cap_to' p s" + by (clarsimp simp: state_refs_of'_def ko_wp_at'_def + split: option.splits if_split_asm + elim!: refs_of_live' if_live_then_nonz_capE') + +lemmas ex_cte_cap_to'_def = ex_cte_cap_wp_to'_def + +lemma if_unsafe_then_capD': + "\ cte_wp_at' P p s; if_unsafe_then_cap' s; \cte. P cte \ cteCap cte \ NullCap \ + \ ex_cte_cap_to' p s" + unfolding if_unsafe_then_cap'_def + apply (erule allE, erule mp) + apply (clarsimp simp: cte_wp_at'_def) + done + +lemmas valid_cap_simps' = + valid_cap'_def[split_simps capability.split arch_capability.split] + +lemma max_ipc_words: + "max_ipc_words = 0x80" + unfolding max_ipc_words_def + by (simp add: msgMaxLength_def msgLengthBits_def msgMaxExtraCaps_def msgExtraCapBits_def capTransferDataSize_def) + +lemma valid_objsE' [elim]: + "\ valid_objs' s; ksPSpace s x = Some obj; valid_obj' obj s \ R \ \ R" + unfolding valid_objs'_def by auto + +lemma pspace_distinctD': + "\ ksPSpace s x = Some v; pspace_distinct' s \ \ ps_clear x (objBitsKO v) s" + apply (simp add: pspace_distinct'_def) + apply (drule bspec, erule domI) + apply simp + done + +lemma pspace_alignedD': + "\ ksPSpace s x = Some v; pspace_aligned' s \ \ is_aligned x (objBitsKO v)" + apply (simp add: pspace_aligned'_def) + apply (drule bspec, erule domI) + apply simp + done + +lemma next_unfold: + "mdb_next s c = + (case s c of Some cte \ Some (mdbNext (cteMDBNode cte)) | None \ None)" + by (simp add: mdb_next_def split: option.split) + +lemma is_physical_cases: + "(capClass cap = PhysicalClass) = + (case cap of NullCap \ False + | DomainCap \ False + | IRQControlCap \ False + | IRQHandlerCap irq \ False + | ReplyCap r m cr \ False + | ArchObjectCap ASIDControlCap \ False + | _ \ True)" + by (simp split: capability.splits arch_capability.splits zombie_type.splits) + +lemma sch_act_sane_not: + "sch_act_sane s = sch_act_not (ksCurThread s) s" + by (auto simp: sch_act_sane_def) + +lemma objBits_cte_conv: "objBits (cte :: cte) = cteSizeBits" + by (simp add: objBits_simps word_size) + +lemmas valid_irq_states'_def = valid_irq_masks'_def + +lemma valid_pspaceE' [elim]: + "\valid_pspace' s; + \ valid_objs' s; pspace_aligned' s; pspace_distinct' s; pspace_canonical' s; no_0_obj' s; + valid_mdb' s \ \ R \ \ R" + unfolding valid_pspace'_def by simp + +lemma idle'_no_refs: + "valid_idle' s \ state_refs_of' s (ksIdleThread s) = {}" + by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def tcb_ntfn_is_bound'_def + projectKO_eq project_inject state_refs_of'_def idle_tcb'_def) + +lemma idle'_not_queued': + "\valid_idle' s; sym_refs (state_refs_of' s); + state_refs_of' s ptr = insert t queue \ {rt}\\ + ksIdleThread s \ queue" + by (frule idle'_no_refs, fastforce simp: valid_idle'_def sym_refs_def) + +lemma idle'_not_queued: + "\valid_idle' s; sym_refs (state_refs_of' s); + state_refs_of' s ptr = queue \ {rt}\ \ + ksIdleThread s \ queue" + by (frule idle'_no_refs, fastforce simp: valid_idle'_def sym_refs_def) + + +lemma obj_at_conj': + "\ obj_at' P p s; obj_at' Q p s \ \ obj_at' (\k. P k \ Q k) p s" + by (auto simp: obj_at'_def) + +lemma pred_tcb_at_conj': + "\ pred_tcb_at' proj P t s; pred_tcb_at' proj Q t s \ \ pred_tcb_at' proj (\a. P a \ Q a) t s" + apply (simp add: pred_tcb_at'_def) + apply (erule (1) obj_at_conj') + done + +lemma obj_at_False' [simp]: + "obj_at' (\k. False) t s = False" + by (simp add: obj_at'_def) + +lemma pred_tcb_at_False' [simp]: + "pred_tcb_at' proj (\st. False) t s = False" + by (simp add: pred_tcb_at'_def obj_at'_def) + +lemma obj_at'_pspaceI: + "obj_at' t ref s \ ksPSpace s = ksPSpace s' \ obj_at' t ref s'" + by (auto intro!: projectKO_stateI simp: obj_at'_def ps_clear_def) + +lemma cte_wp_at'_pspaceI: + "\cte_wp_at' P p s; ksPSpace s = ksPSpace s'\ \ cte_wp_at' P p s'" + apply (clarsimp simp add: cte_wp_at'_def getObject_def) + apply (drule equalityD2) + apply (clarsimp simp: in_monad loadObject_cte gets_def + get_def bind_def return_def split_def) + apply (case_tac b) + apply (simp_all add: in_monad typeError_def) + prefer 2 + apply (simp add: in_monad return_def alignError_def assert_opt_def + alignCheck_def magnitudeCheck_def when_def bind_def + split: if_split_asm option.splits) + apply (clarsimp simp: in_monad return_def alignError_def fail_def assert_opt_def + alignCheck_def bind_def when_def + objBits_cte_conv tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def objBits_defs + split: if_split_asm cong: image_cong + dest!: singleton_in_magnitude_check) + done + +lemma valid_untyped'_pspaceI: + "\ksPSpace s = ksPSpace s'; valid_untyped' d p n idx s\ + \ valid_untyped' d p n idx s'" + by (simp add: valid_untyped'_def ko_wp_at'_def ps_clear_def) + +lemma typ_at'_pspaceI: + "typ_at' T p s \ ksPSpace s = ksPSpace s' \ typ_at' T p s'" + by (simp add: typ_at'_def ko_wp_at'_def ps_clear_def) + +lemma frame_at'_pspaceI: + "frame_at' p sz d s \ ksPSpace s = ksPSpace s' \ frame_at' p sz d s'" + by (simp add: frame_at'_def typ_at'_def ko_wp_at'_def ps_clear_def) + +lemma valid_cap'_pspaceI: + "s \' cap \ ksPSpace s = ksPSpace s' \ s' \' cap" + unfolding valid_cap'_def + by (cases cap) + (auto intro: obj_at'_pspaceI[rotated] + cte_wp_at'_pspaceI valid_untyped'_pspaceI + typ_at'_pspaceI[rotated] frame_at'_pspaceI[rotated] + simp: vspace_table_at'_defs valid_arch_cap'_def valid_arch_cap_ref'_def + split: arch_capability.split zombie_type.split option.splits) + +lemma valid_obj'_pspaceI: + "valid_obj' obj s \ ksPSpace s = ksPSpace s' \ valid_obj' obj s'" + unfolding valid_obj'_def + by (cases obj) + (auto simp: valid_ep'_def valid_ntfn'_def valid_tcb'_def valid_cte'_def + valid_tcb_state'_def valid_bound_tcb'_def + valid_bound_ntfn'_def valid_arch_tcb'_def + split: Structures_H.endpoint.splits Structures_H.notification.splits + Structures_H.thread_state.splits ntfn.splits option.splits + intro: obj_at'_pspaceI valid_cap'_pspaceI typ_at'_pspaceI) + +lemma pred_tcb_at'_pspaceI: + "pred_tcb_at' proj P t s \ ksPSpace s = ksPSpace s' \ pred_tcb_at' proj P t s'" + unfolding pred_tcb_at'_def by (fast intro: obj_at'_pspaceI) + +lemma valid_mdb'_pspaceI: + "valid_mdb' s \ ksPSpace s = ksPSpace s' \ valid_mdb' s'" + unfolding valid_mdb'_def by simp + +lemma state_refs_of'_pspaceI: + "P (state_refs_of' s) \ ksPSpace s = ksPSpace s' \ P (state_refs_of' s')" + unfolding state_refs_of'_def ps_clear_def by (simp cong: option.case_cong) + +lemma state_hyp_refs_of'_pspaceI: + "P (state_hyp_refs_of' s) \ ksPSpace s = ksPSpace s' \ P (state_hyp_refs_of' s')" + unfolding state_hyp_refs_of'_def ps_clear_def by (simp cong: option.case_cong) + +lemma valid_pspace': + "valid_pspace' s \ ksPSpace s = ksPSpace s' \ valid_pspace' s'" + by (auto simp add: valid_pspace'_def valid_objs'_def pspace_aligned'_def + pspace_distinct'_def ps_clear_def no_0_obj'_def ko_wp_at'_def + typ_at'_def pspace_canonical'_def + intro: valid_obj'_pspaceI valid_mdb'_pspaceI) + +lemma ex_cte_cap_to_pspaceI'[elim]: + "ex_cte_cap_to' p s \ ksPSpace s = ksPSpace s' \ + intStateIRQNode (ksInterruptState s) = intStateIRQNode (ksInterruptState s') + \ ex_cte_cap_to' p s'" + by (fastforce simp: ex_cte_cap_to'_def elim: cte_wp_at'_pspaceI) + +lemma valid_idle'_pspace_itI[elim]: + "\ valid_idle' s; ksPSpace s = ksPSpace s'; ksIdleThread s = ksIdleThread s' \ + \ valid_idle' s'" + apply (clarsimp simp: valid_idle'_def ex_nonz_cap_to'_def) + apply (erule obj_at'_pspaceI, assumption) + done + +lemma obj_at'_weaken: + assumes x: "obj_at' P t s" + assumes y: "\obj. P obj \ P' obj" + shows "obj_at' P' t s" + by (insert x, clarsimp simp: obj_at'_def y) + +lemma cte_wp_at_weakenE': + "\cte_wp_at' P t s; \c. P c \ P' c\ \ cte_wp_at' P' t s" + by (fastforce simp: cte_wp_at'_def) + +lemma obj_at'_weakenE: + "\ obj_at' P p s; \k. P k \ P' k \ \ obj_at' P' p s" + by (clarsimp simp: obj_at'_def) + +lemma pred_tcb'_weakenE: + "\ pred_tcb_at' proj P t s; \st. P st \ P' st \ \ pred_tcb_at' proj P' t s" + apply (simp add: pred_tcb_at'_def) + apply (erule obj_at'_weakenE) + apply clarsimp + done + +lemma lookupAround2_char1: + "(fst (lookupAround2 x s) = Some (y, v)) = + (y \ x \ s y = Some v \ (\z. y < z \ z \ x \ s z = None))" + apply (simp add: lookupAround2_def Let_def split_def lookupAround_def + split del: if_split + split: option.split) + apply (intro conjI impI iffI) + apply (clarsimp split: if_split_asm) + apply (rule Max_prop) + apply (simp add: order_less_imp_le) + apply fastforce + apply (clarsimp split: if_split_asm) + apply (rule Max_prop) + apply clarsimp + apply fastforce + apply (clarsimp split: if_split_asm) + apply (subst(asm) Max_less_iff) + apply simp + apply fastforce + apply (fastforce intro: order_neq_le_trans) + apply (clarsimp cong: conj_cong) + apply (rule conjI) + apply fastforce + apply (rule order_antisym) + apply (subst Max_le_iff) + apply simp + apply fastforce + apply clarsimp + apply (rule ccontr) + apply (fastforce simp add: linorder_not_le) + apply (rule Max_ge) + apply simp + apply fastforce + apply (intro allI impI iffI) + apply clarsimp + apply simp + apply clarsimp + apply (drule spec[where x=x]) + apply simp + done + +lemma lookupAround2_None1: + "(fst (lookupAround2 x s) = None) = (\y \ x. s y = None)" + apply (simp add: lookupAround2_def Let_def split_def lookupAround_def + split del: if_split + split: option.split) + apply safe + apply (fastforce split: if_split_asm) + apply (clarsimp simp: order_less_imp_le) + apply fastforce + done + +lemma lookupAround2_None2: + "(snd (lookupAround2 x s) = None) = (\y. x < y \ s y = None)" + apply (simp add: lookupAround2_def Let_def split_def del: maybe_def + split: option.splits) + apply (simp add: o_def map_option_is_None [where f=fst, unfolded map_option_case]) + apply (simp add: lookupAround_def Let_def) + apply fastforce + done + +lemma lookupAround2_char2: + "(snd (lookupAround2 x s) = Some y) = (x < y \ s y \ None \ (\z. x < z \ z < y \ s z = None))" + apply (simp add: lookupAround2_def Let_def split_def o_def + del: maybe_def + split: option.splits) + apply (simp add: o_def map_option_is_None [where f=fst, unfolded map_option_case]) + apply (simp add: lookupAround_def Let_def) + apply (rule conjI) + apply fastforce + apply clarsimp + apply (rule iffI) + apply (frule subst[where P="\x. x \ y2" for y2, OF _ Min_in]) + apply simp + apply fastforce + apply clarsimp + apply (subst(asm) Min_gr_iff, simp, fastforce, simp(no_asm_use), fastforce) + apply clarsimp + apply (rule order_antisym) + apply (fastforce intro: Min_le) + apply (subst Min_ge_iff) + apply simp + apply fastforce + apply clarsimp + apply (rule ccontr, simp add: linorder_not_le) + done + +lemma ps_clearI: + "\ is_aligned p n; (1 :: machine_word) < 2 ^ n; + \x. \ x > p; x \ p + 2 ^ n - 1 \ \ ksPSpace s x = None \ + \ ps_clear p n s" + apply (subgoal_tac "p \ p + 1") + apply (simp add: ps_clear_def2) + apply (rule ccontr, erule nonemptyE, clarsimp) + apply (drule word_leq_le_minus_one[where x="z + 1" for z]) + apply clarsimp + apply simp + apply (erule is_aligned_get_word_bits) + apply (erule(1) is_aligned_no_wrap') + apply simp + done + +lemma ps_clear_lookupAround2: + "\ ps_clear p' n s; ksPSpace s p' = Some x; + p' \ p; p \ p' + 2 ^ n - 1; + \ fst (lookupAround2 p (ksPSpace s)) = Some (p', x); + case_option True (\x. x - p' >= 2 ^ n) (snd (lookupAround2 p (ksPSpace s))) + \ \ P (lookupAround2 p (ksPSpace s)) \ \ P (lookupAround2 p (ksPSpace s))" + apply (drule meta_mp) + apply (cases "fst (lookupAround2 p (ksPSpace s))") + apply (simp add: lookupAround2_None1) + apply clarsimp + apply (clarsimp simp: lookupAround2_char1) + apply (frule spec[where x=p']) + apply (simp add: linorder_not_less ps_clear_def mask_def add_diff_eq) + apply (drule_tac f="\S. a \ S" in arg_cong) + apply (simp add: domI) + apply (frule(1) order_trans, simp) + apply (erule meta_mp) + apply (clarsimp split: option.split) + apply (clarsimp simp: lookupAround2_char2 ps_clear_def mask_def add_diff_eq) + apply (drule_tac a=x2 in equals0D) + apply (simp add: domI) + apply (subst(asm) order_less_imp_le[OF order_le_less_trans[where y=p]], + assumption, assumption) + apply simp + apply (erule impCE, simp_all) + apply (simp add: linorder_not_le) + apply (subst(asm) add_diff_eq[symmetric], + subst(asm) add.commute, + drule word_l_diffs(2), + fastforce simp only: field_simps) + apply (rule ccontr, simp add: linorder_not_le) + apply (drule word_le_minus_one_leq, fastforce) + done + +lemma in_magnitude_check: + "\ is_aligned x n; (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some y \ \ + ((v, s') \ fst (magnitudeCheck x (snd (lookupAround2 x (ksPSpace s))) n s)) + = (s' = s \ ps_clear x n s)" + apply (rule iffI) + apply (clarsimp simp: magnitudeCheck_def in_monad lookupAround2_None2 + lookupAround2_char2 + split: option.split_asm) + apply (erule(1) ps_clearI) + apply simp + apply (erule(1) ps_clearI) + apply (simp add: linorder_not_less) + apply (drule word_leq_le_minus_one[where x="2 ^ n"]) + apply (clarsimp simp: power_overflow) + apply (drule word_l_diffs) + apply simp + apply (simp add: field_simps) + apply clarsimp + apply (erule is_aligned_get_word_bits) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: is_aligned_no_overflow) + apply (clarsimp simp add: magnitudeCheck_def in_monad + split: option.split_asm) + apply simp + apply (simp add: power_overflow) + done + +lemma in_magnitude_check3: + "\ \z. x < z \ z \ y \ ksPSpace s z = None; is_aligned x n; + (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some v; x \ y; y - x < 2 ^ n \ \ + fst (magnitudeCheck x (snd (lookupAround2 y (ksPSpace s))) n s) + = (if ps_clear x n s then {((), s)} else {})" + apply (rule set_eqI, rule iffI) + apply (clarsimp simp: magnitudeCheck_def lookupAround2_char2 + lookupAround2_None2 in_monad + split: option.split_asm) + apply (drule(1) range_convergence1) + apply (erule(1) ps_clearI) + apply simp + apply (erule is_aligned_get_word_bits) + apply (drule(1) range_convergence2) + apply (erule(1) ps_clearI) + apply (simp add: linorder_not_less) + apply (drule word_leq_le_minus_one[where x="2 ^ n" for n], simp) + apply (drule word_l_diffs, simp) + apply (simp add: field_simps) + apply (simp add: power_overflow) + apply (clarsimp split: if_split_asm) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (drule word_le_minus_one_leq[where x="y - x"]) + apply (drule word_plus_mono_right[where x=x and y="y - x"]) + apply (erule is_aligned_get_word_bits) + apply (simp add: field_simps is_aligned_no_overflow) + apply simp + apply (simp add: field_simps) + apply (simp add: magnitudeCheck_def return_def + iffD2[OF linorder_not_less] when_def + split: option.split_asm) + done + +lemma in_alignCheck[simp]: + "((v, s') \ fst (alignCheck x n s)) = (s' = s \ is_aligned x n)" + by (simp add: alignCheck_def in_monad is_aligned_mask[symmetric] + alignError_def conj_comms + cong: conj_cong) + +lemma tcb_space_clear: + "\ tcb_cte_cases (y - x) = Some (getF, setF); + is_aligned x tcbBlockSizeBits; ps_clear x tcbBlockSizeBits s; + ksPSpace s x = Some (KOTCB tcb); ksPSpace s y = Some v; + \ x = y; getF = tcbCTable; setF = tcbCTable_update \ \ P + \ \ P" + apply (cases "x = y") + apply simp + apply (clarsimp simp: ps_clear_def mask_def add_diff_eq) + apply (drule_tac a=y in equals0D) + apply (simp add: domI) + apply (subgoal_tac "\z. y = x + z \ z < 2 ^ tcbBlockSizeBits") + apply (elim exE conjE) + apply (frule(1) is_aligned_no_wrap'[rotated, rotated]) + apply (simp add: word_bits_conv objBits_defs) + apply (erule notE, subst field_simps, rule word_plus_mono_right) + apply (drule word_le_minus_one_leq,simp,erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (simp add: objBits_defs) + apply (rule_tac x="y - x" in exI) + apply (simp add: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + done + +lemma tcb_ctes_clear: + "\ tcb_cte_cases (y - x) = Some (getF, setF); + is_aligned x tcbBlockSizeBits; ps_clear x tcbBlockSizeBits s; + ksPSpace s x = Some (KOTCB tcb) \ + \ \ ksPSpace s y = Some (KOCTE cte)" + apply clarsimp + apply (erule(4) tcb_space_clear) + apply simp + done + +lemma cte_wp_at_cases': + shows "cte_wp_at' P p s = + ((\cte. ksPSpace s p = Some (KOCTE cte) \ is_aligned p cte_level_bits + \ P cte \ ps_clear p cteSizeBits s) \ + (\n tcb getF setF. ksPSpace s (p - n) = Some (KOTCB tcb) \ is_aligned (p - n) tcbBlockSizeBits + \ tcb_cte_cases n = Some (getF, setF) \ P (getF tcb) \ ps_clear (p - n) tcbBlockSizeBits s))" + (is "?LHS = ?RHS") + apply (rule iffI) + apply (clarsimp simp: cte_wp_at'_def split_def + getObject_def bind_def simpler_gets_def + assert_opt_def return_def fail_def + split: option.splits + del: disjCI) + apply (clarsimp simp: loadObject_cte typeError_def alignError_def + fail_def return_def objBits_simps' + is_aligned_mask[symmetric] alignCheck_def + tcbVTableSlot_def field_simps tcbCTableSlot_def + tcbReplySlot_def tcbCallerSlot_def + tcbIPCBufferSlot_def + lookupAround2_char1 + cte_level_bits_def Ball_def + unless_def when_def bind_def + split: kernel_object.splits if_split_asm option.splits + del: disjCI) + apply (subst(asm) in_magnitude_check3, simp+, + simp split: if_split_asm, (rule disjI2)?, intro exI, rule conjI, + erule rsubst[where P="\x. ksPSpace s x = v" for s v], + fastforce simp add: field_simps, simp)+ + apply (subst(asm) in_magnitude_check3, simp+) + apply (simp split: if_split_asm + add: ) + apply (simp add: cte_wp_at'_def getObject_def split_def + bind_def simpler_gets_def return_def + assert_opt_def fail_def objBits_defs + split: option.splits) + apply (elim disjE conjE exE) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply (simp add: cte_level_bits_def word_bits_conv) + apply (simp add: cte_level_bits_def) + apply (simp add: loadObject_cte unless_def alignCheck_def + is_aligned_mask[symmetric] objBits_simps' + cte_level_bits_def magnitudeCheck_def + return_def fail_def) + apply (clarsimp simp: bind_def return_def when_def fail_def + split: option.splits) + apply simp + apply (erule(1) ps_clear_lookupAround2) + prefer 3 + apply (simp add: loadObject_cte unless_def alignCheck_def + is_aligned_mask[symmetric] objBits_simps' + cte_level_bits_def magnitudeCheck_def + return_def fail_def tcbCTableSlot_def tcbVTableSlot_def + tcbIPCBufferSlot_def tcbReplySlot_def tcbCallerSlot_def + split: option.split_asm) + apply (clarsimp simp: bind_def tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (clarsimp simp: bind_def tcb_cte_cases_def iffD2[OF linorder_not_less] + return_def cteSizeBits_def + split: if_split_asm) + apply (subgoal_tac "p - n \ (p - n) + n", simp) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (simp add: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (subgoal_tac "(p - n) + n \ (p - n) + 0x7FF") + apply (simp add: field_simps) + apply (rule word_plus_mono_right) + apply (simp add: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (erule is_aligned_no_wrap') + apply simp + done + +lemma tcb_at_cte_at': + "tcb_at' t s \ cte_at' t s" + apply (clarsimp simp add: cte_wp_at_cases' obj_at'_def projectKO_def + del: disjCI) + apply (case_tac ko) + apply (simp_all add: projectKO_opt_tcb fail_def) + apply (rule exI[where x=0]) + apply (clarsimp simp add: return_def objBits_simps) + done + +lemma cte_wp_atE' [consumes 1, case_names CTE TCB]: + assumes cte: "cte_wp_at' P ptr s" + and r1: "\cte. + \ ksPSpace s ptr = Some (KOCTE cte); ps_clear ptr cte_level_bits s; + is_aligned ptr cte_level_bits; P cte \ \ R" + and r2: "\ tcb ptr' getF setF. + \ ksPSpace s ptr' = Some (KOTCB tcb); ps_clear ptr' tcbBlockSizeBits s; is_aligned ptr' tcbBlockSizeBits; + tcb_cte_cases (ptr - ptr') = Some (getF, setF); P (getF tcb) \ \ R" + shows "R" + by (rule disjE [OF iffD1 [OF cte_wp_at_cases' cte]]) (auto intro: r1 r2 simp: cte_level_bits_def objBits_defs) + +lemma cte_wp_at_cteI': + assumes "ksPSpace s ptr = Some (KOCTE cte)" + assumes "is_aligned ptr cte_level_bits" + assumes "ps_clear ptr cte_level_bits s" + assumes "P cte" + shows "cte_wp_at' P ptr s" + using assms by (simp add: cte_wp_at_cases' cte_level_bits_def objBits_defs) + +lemma cte_wp_at_tcbI': + assumes "ksPSpace s ptr' = Some (KOTCB tcb)" + assumes "is_aligned ptr' tcbBlockSizeBits" + assumes "ps_clear ptr' tcbBlockSizeBits s" + and "tcb_cte_cases (ptr - ptr') = Some (getF, setF)" + and "P (getF tcb)" + shows "cte_wp_at' P ptr s" + using assms + apply (simp add: cte_wp_at_cases') + apply (rule disjI2, rule exI[where x="ptr - ptr'"]) + apply simp + done + +lemma obj_at_ko_at': + "obj_at' P p s \ \ko. ko_at' ko p s \ P ko" + by (auto simp add: obj_at'_def) + +lemma obj_at_aligned': + fixes P :: "('a :: pspace_storable) \ bool" + assumes oat: "obj_at' P p s" + and oab: "\(v :: 'a) (v' :: 'a). objBits v = objBits v'" + shows "is_aligned p (objBits (obj :: 'a))" + using oat + apply (clarsimp simp add: obj_at'_def) + apply (clarsimp simp add: projectKO_def fail_def return_def + project_inject objBits_def[symmetric] + split: option.splits) + apply (erule subst[OF oab]) + done + +(* locateSlot *) +lemma locateSlot_conv: + "locateSlotBasic A B = return (A + 2 ^ cte_level_bits * B)" + "locateSlotTCB = locateSlotBasic" + "locateSlotCNode A bits B = (do + x \ stateAssert (\s. case (gsCNodes s A) of None \ False | Some n \ n = bits \ B < 2 ^ n) []; + locateSlotBasic A B od)" + "locateSlotCap c B = (do + x \ stateAssert (\s. ((isCNodeCap c \ (isZombie c \ capZombieType c \ ZombieTCB)) + \ (case gsCNodes s (capUntypedPtr c) of None \ False + | Some n \ (isCNodeCap c \ n = capCNodeBits c + \ isZombie c \ n = zombieCTEBits (capZombieType c)) \ B < 2 ^ n)) + \ isThreadCap c \ (isZombie c \ capZombieType c = ZombieTCB)) []; + locateSlotBasic (capUntypedPtr c) B od)" + apply (simp_all add: locateSlotCap_def locateSlotTCB_def fun_eq_iff) + apply (simp add: locateSlotBasic_def objBits_simps cte_level_bits_def objBits_defs) + apply (simp add: locateSlotCNode_def stateAssert_def) + apply (cases c, simp_all add: locateSlotCNode_def isZombie_def isThreadCap_def + isCNodeCap_def capUntypedPtr_def stateAssert_def + bind_assoc exec_get locateSlotTCB_def + objBits_simps + split: zombie_type.split cong: option.case_cong) + done + +lemma typ_at_tcb': + "typ_at' TCBT = tcb_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_tcb)[9] + apply (case_tac ko) + apply (auto simp: projectKO_opt_tcb) + done + +lemma typ_at_ep: + "typ_at' EndpointT = ep_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_ep)[9] + apply (case_tac ko) + apply (auto simp: projectKO_opt_ep) + done + +lemma typ_at_ntfn: + "typ_at' NotificationT = ntfn_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_ntfn)[8] + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_ntfn) + done + +lemma typ_at_cte: + "typ_at' CTET = real_cte_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_cte)[8] + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_cte) + done + +lemma cte_at_typ': + "cte_at' c = (\s. typ_at' CTET c s \ (\n. typ_at' TCBT (c - n) s \ n \ dom tcb_cte_cases))" +proof - + have P: "\ko. (koTypeOf ko = CTET) = (\cte. ko = KOCTE cte)" + "\ko. (koTypeOf ko = TCBT) = (\tcb. ko = KOTCB tcb)" + by (case_tac ko, simp_all)+ + have Q: "\P f. (\x. (\y. x = f y) \ P x) = (\y. P (f y))" + by fastforce + show ?thesis + by (fastforce simp: cte_wp_at_cases' obj_at'_real_def typ_at'_def + ko_wp_at'_def objBits_simps' P Q conj_comms cte_level_bits_def) +qed + +lemma typ_at_lift_tcb': + "\typ_at' TCBT p\ f \\_. typ_at' TCBT p\ \ \tcb_at' p\ f \\_. tcb_at' p\" + by (simp add: typ_at_tcb') + +lemma typ_at_lift_ep': + "\typ_at' EndpointT p\ f \\_. typ_at' EndpointT p\ \ \ep_at' p\ f \\_. ep_at' p\" + by (simp add: typ_at_ep) + +lemma typ_at_lift_ntfn': + "\typ_at' NotificationT p\ f \\_. typ_at' NotificationT p\ \ \ntfn_at' p\ f \\_. ntfn_at' p\" + by (simp add: typ_at_ntfn) + +lemma typ_at_lift_cte': + "\typ_at' CTET p\ f \\_. typ_at' CTET p\ \ \real_cte_at' p\ f \\_. real_cte_at' p\" + by (simp add: typ_at_cte) + +lemma typ_at_lift_cte_at': + assumes x: "\T p. \typ_at' T p\ f \\rv. typ_at' T p\" + shows "\cte_at' c\ f \\rv. cte_at' c\" + apply (simp only: cte_at_typ') + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift x) + done + +lemma typ_at_lift_page_table_at': + assumes x: "\T p. f \typ_at' T p\" + shows "f \page_table_at' pt_t p\" + unfolding page_table_at'_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift' x) + +lemma ko_wp_typ_at': + "ko_wp_at' P p s \ \T. typ_at' T p s" + by (clarsimp simp: typ_at'_def ko_wp_at'_def) + +lemma koType_obj_range': + "koTypeOf k = koTypeOf k' \ obj_range' p k = obj_range' p k'" + apply (rule ccontr) + apply (simp add: obj_range'_def objBitsKO_def archObjSize_def + split: kernel_object.splits arch_kernel_object.splits) + done + +lemma typ_at_lift_valid_untyped': + assumes P: "\T p. \\s. \typ_at' T p s\ f \\rv s. \typ_at' T p s\" + shows "\\s. valid_untyped' d p n idx s\ f \\rv s. valid_untyped' d p n idx s\" + apply (clarsimp simp: valid_untyped'_def split del:if_split) + apply (rule hoare_vcg_all_lift) + apply (clarsimp simp: valid_def split del:if_split) + apply (frule ko_wp_typ_at') + apply clarsimp + apply (cut_tac T=T and p=ptr' in P) + apply (simp add: valid_def) + apply (erule_tac x=s in allE) + apply (erule impE) + prefer 2 + apply (drule (1) bspec) + apply simp + apply (clarsimp simp: typ_at'_def ko_wp_at'_def simp del:atLeastAtMost_iff) + apply (elim disjE) + apply (clarsimp simp:psubset_eq simp del:atLeastAtMost_iff) + apply (drule_tac p=ptr' in koType_obj_range') + apply (erule impE) + apply simp + apply simp + apply (drule_tac p = ptr' in koType_obj_range') + apply (clarsimp split:if_splits) + done + +lemma typ_at_lift_asid_at': + "(\T p. \typ_at' T p\ f \\_. typ_at' T p\) \ \asid_pool_at' p\ f \\_. asid_pool_at' p\" + by assumption + +lemma typ_at_lift_vcpu_at': + "(\T p. \typ_at' T p\ f \\_. typ_at' T p\) \ \vcpu_at' p\ f \\_. vcpu_at' p\" + by assumption + +lemma typ_at_lift_frame_at': + assumes "\T p. f \typ_at' T p\" + shows "f \frame_at' p sz d\" + unfolding frame_at'_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_const_imp_lift assms split_del: if_split) + +lemma typ_at_lift_valid_cap': + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + shows "\\s. valid_cap' cap s\ f \\rv s. valid_cap' cap s\" + including no_pre + apply (simp add: valid_cap'_def) + apply wp + apply (case_tac cap; + simp add: valid_cap'_def P[of id, simplified] typ_at_lift_tcb' + hoare_vcg_prop typ_at_lift_ep' + typ_at_lift_ntfn' typ_at_lift_cte_at' + hoare_vcg_conj_lift [OF typ_at_lift_cte_at']) + apply (rename_tac zombie_type nat) + apply (case_tac zombie_type; simp) + apply (wp typ_at_lift_tcb' P hoare_vcg_all_lift typ_at_lift_cte')+ + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: P[of id, simplified] vspace_table_at'_defs + hoare_vcg_prop All_less_Ball + split del: if_split) + apply (wp hoare_vcg_const_Ball_lift P typ_at_lift_valid_untyped' + hoare_vcg_all_lift typ_at_lift_cte' typ_at_lift_frame_at')+ + done + + +lemma typ_at_lift_valid_irq_node': + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + shows "\valid_irq_node' p\ f \\_. valid_irq_node' p\" + apply (simp add: valid_irq_node'_def) + apply (wp hoare_vcg_all_lift P typ_at_lift_cte') + done + +lemma valid_bound_tcb_lift: + "(\T p. \typ_at' T p\ f \\_. typ_at' T p\) \ + \valid_bound_tcb' tcb\ f \\_. valid_bound_tcb' tcb\" + by (auto simp: valid_bound_tcb'_def valid_def typ_at_tcb'[symmetric] split: option.splits) + +lemma valid_arch_tcb_lift': + assumes x: "\T p. \typ_at' T p\ f \\rv. typ_at' T p\" + shows "\\s. valid_arch_tcb' tcb s\ f \\rv s. valid_arch_tcb' tcb s\" + apply (clarsimp simp add: valid_arch_tcb'_def) + apply (cases "atcbVCPUPtr tcb"; simp) + apply (wp x)+ + done + +lemmas typ_at_lifts = typ_at_lift_tcb' typ_at_lift_ep' + typ_at_lift_ntfn' typ_at_lift_cte' + typ_at_lift_cte_at' + typ_at_lift_page_table_at' + typ_at_lift_asid_at' + typ_at_lift_vcpu_at' + typ_at_lift_valid_untyped' + typ_at_lift_valid_cap' + valid_bound_tcb_lift + valid_arch_tcb_lift' + +lemma valid_arch_state_armKSGlobalUserVSpace: + "valid_arch_state' s \ canonical_address (addrFromKPPtr (armKSGlobalUserVSpace (ksArchState s)))" + by (simp add: valid_arch_state'_def) + +lemma mdb_next_unfold: + "s \ c \ c' = (\z. s c = Some z \ c' = mdbNext (cteMDBNode z))" + by (auto simp add: mdb_next_rel_def mdb_next_def) + +lemma valid_dlist_prevD: + "\ valid_dlist m; c \ 0; c' \ 0 \ \ m \ c \ c' = m \ c \ c'" + by (fastforce simp add: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + + +lemma no_0_simps [simp]: + assumes "no_0 m" + shows "((m 0 = Some cte) = False) \ ((Some cte = m 0) = False)" + using assms by (simp add: no_0_def) + +lemma valid_dlist_def2: + "no_0 m \ valid_dlist m = (\c c'. c \ 0 \ c' \ 0 \ m \ c \ c' = m \ c \ c')" + apply (rule iffI) + apply (simp add: valid_dlist_prevD) + apply (clarsimp simp: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + apply (subgoal_tac "p\0") + prefer 2 + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule_tac x="mdbPrev (cteMDBNode cte)" in allE) + apply simp + apply (erule_tac x=p in allE) + apply clarsimp + apply clarsimp + apply (erule_tac x=p in allE) + apply simp + apply (erule_tac x="mdbNext (cteMDBNode cte)" in allE) + apply clarsimp + done + +lemma valid_dlist_def3: + "valid_dlist m = ((\c c'. m \ c \ c' \ c' \ 0 \ m \ c \ c') \ + (\c c'. m \ c \ c' \ c \ 0 \ m \ c \ c'))" + apply (rule iffI) + apply (simp add: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + apply fastforce + apply (clarsimp simp add: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + apply fastforce + done + +lemma vdlist_prevD: + "\ m \ c \ c'; m c = Some cte; valid_dlist m; no_0 m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_nextD: + "\ m \ c \ c'; m c' = Some cte; valid_dlist m; no_0 m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_prevD0: + "\ m \ c \ c'; c \ 0; valid_dlist m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_nextD0: + "\ m \ c \ c'; c' \ 0; valid_dlist m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_prev_src_unique: + "\ m \ p \ x; m \ p \ y; p \ 0; valid_dlist m \ \ x = y" + by (drule (2) vdlist_prevD0)+ (clarsimp simp: mdb_next_unfold) + +lemma vdlist_next_src_unique: + "\ m \ x \ p; m \ y \ p; p \ 0; valid_dlist m \ \ x = y" + by (drule (2) vdlist_nextD0)+ (clarsimp simp: mdb_prev_def) + +lemma cte_at_cte_wp_atD: + "cte_at' p s \ \cte. cte_wp_at' ((=) cte) p s" + by (clarsimp simp add: cte_wp_at'_def) + +lemma valid_pspace_no_0 [elim]: + "valid_pspace' s \ no_0 (ctes_of s)" + by (auto simp: valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + +lemma valid_pspace_dlist [elim]: + "valid_pspace' s \ valid_dlist (ctes_of s)" + by (auto simp: valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + +lemma next_rtrancl_tranclE [consumes 1, case_names eq trancl]: + assumes major: "m \ x \\<^sup>* y" + and r1: "x = y \ P" + and r2: "\ x \ y; m \ x \\<^sup>+ y \ \ P" + shows "P" + using major + by (auto dest: rtranclD intro: r1 r2) + +lemmas trancl_induct' [induct set] = trancl_induct [consumes 1, case_names base step] + +lemma next_single_value: + "\ m \ x \ y; m \ x \ z \ \ y = z" + unfolding mdb_next_rel_def by simp + +lemma loop_split: + assumes loop: "m \ c \\<^sup>+ c" + and split: "m \ c \\<^sup>+ c'" + shows "m \ c' \\<^sup>+ c" + using split loop +proof induct + case base + thus ?case + by (auto dest: next_single_value elim: tranclE2) +next + case (step y z) + hence "m \ y \\<^sup>+ c" by simp + hence "m \ z \\<^sup>* c" using step.hyps + by (metis next_single_value tranclD) + + thus ?case using step.prems + by (cases rule: next_rtrancl_tranclE, simp_all) +qed + +lemma no_0_lhs: + "\ m \ c \ y; no_0 m \ \ c \ 0" + unfolding no_0_def + by (erule contrapos_pn, simp add: mdb_next_unfold) + +lemma no_0_lhs_trancl: + "\ m \ c \\<^sup>+ y; no_0 m \ \ c \ 0" + by (erule tranclE2, (rule no_0_lhs, simp_all)+) + +lemma mdb_chain_0_no_loops: + assumes asm: "mdb_chain_0 m" + and no0: "no_0 m" + shows "no_loops m" +proof - + { + fix c + assume mc: "m \ c \\<^sup>+ c" + + with asm have "m \ c \\<^sup>+ 0" + unfolding mdb_chain_0_def + apply - + apply (erule bspec, erule tranclE2) + apply (auto intro: domI simp: mdb_next_unfold) + done + + with mc have "m \ 0 \\<^sup>+ c" by (rule loop_split) + hence False using no0 + by (clarsimp dest!: no_0_lhs_trancl) + } + thus "no_loops m" unfolding no_loops_def by auto +qed + +lemma valid_mdb_ctesE [elim]: + "\valid_mdb_ctes m; + \ valid_dlist m; no_0 m; mdb_chain_0 m; valid_badges m; + caps_contained' m; mdb_chunked m; untyped_mdb' m; + untyped_inc' m; valid_nullcaps m; ut_revocable' m; + class_links m; distinct_zombies m; irq_control m; + reply_masters_rvk_fb m \ + \ P\ \ P" + unfolding valid_mdb_ctes_def by auto + +lemma valid_mdb_ctesI [intro]: + "\valid_dlist m; no_0 m; mdb_chain_0 m; valid_badges m; + caps_contained' m; mdb_chunked m; untyped_mdb' m; + untyped_inc' m; valid_nullcaps m; ut_revocable' m; + class_links m; distinct_zombies m; irq_control m; + reply_masters_rvk_fb m \ + \ valid_mdb_ctes m" + unfolding valid_mdb_ctes_def by auto + +end +locale PSpace_update_eq = + fixes f :: "kernel_state \ kernel_state" + assumes pspace: "ksPSpace (f s) = ksPSpace s" +begin + +lemma state_refs_of'_eq[iff]: + "state_refs_of' (f s) = state_refs_of' s" + by (rule state_refs_of'_pspaceI [OF _ pspace], rule refl) + +lemma state_hyp_refs_of'_eq[iff]: + "state_hyp_refs_of' (f s) = state_hyp_refs_of' s" + by (rule state_hyp_refs_of'_pspaceI [OF _ pspace], rule refl) + +lemma valid_space_update [iff]: + "valid_pspace' (f s) = valid_pspace' s" + by (fastforce simp: valid_pspace' pspace) + +lemma obj_at_update [iff]: + "obj_at' P p (f s) = obj_at' P p s" + by (fastforce intro: obj_at'_pspaceI simp: pspace) + +lemma ko_wp_at_update [iff]: + "ko_wp_at' P p (f s) = ko_wp_at' P p s" + by (simp add: pspace ko_wp_at'_def ps_clear_def) + +lemma cte_wp_at_update [iff]: + "cte_wp_at' P p (f s) = cte_wp_at' P p s" + by (fastforce intro: cte_wp_at'_pspaceI simp: pspace) + +lemma ex_nonz_cap_to_eq'[iff]: + "ex_nonz_cap_to' p (f s) = ex_nonz_cap_to' p s" + by (simp add: ex_nonz_cap_to'_def) + +lemma iflive_update [iff]: + "if_live_then_nonz_cap' (f s) = if_live_then_nonz_cap' s" + by (simp add: if_live_then_nonz_cap'_def ex_nonz_cap_to'_def) + +lemma valid_objs_update [iff]: + "valid_objs' (f s) = valid_objs' s" + apply (simp add: valid_objs'_def pspace) + apply (fastforce intro: valid_obj'_pspaceI simp: pspace) + done + +lemma pspace_aligned_update [iff]: + "pspace_aligned' (f s) = pspace_aligned' s" + by (simp add: pspace pspace_aligned'_def) + +lemma pspace_distinct_update [iff]: + "pspace_distinct' (f s) = pspace_distinct' s" + by (simp add: pspace pspace_distinct'_def ps_clear_def) + +lemma pspace_canonical_update [iff]: + "pspace_canonical' (f s) = pspace_canonical' s" + by (simp add: pspace pspace_canonical'_def ps_clear_def) + +lemma pred_tcb_at_update [iff]: + "pred_tcb_at' proj P p (f s) = pred_tcb_at' proj P p s" + by (simp add: pred_tcb_at'_def) + +lemma valid_cap_update [iff]: + "(f s) \' c = s \' c" + by (auto intro: valid_cap'_pspaceI simp: pspace) + +lemma typ_at_update' [iff]: + "typ_at' T p (f s) = typ_at' T p s" + by (simp add: typ_at'_def) + +lemma page_table_at_update' [iff]: + "page_table_at' pt_t p (f s) = page_table_at' pt_t p s" + by (simp add: page_table_at'_def) + +lemma frame_at_update' [iff]: + "frame_at' p sz d (f s) = frame_at' p sz d s" + by (simp add: frame_at'_def) + +lemma no_0_obj'_update [iff]: + "no_0_obj' (f s) = no_0_obj' s" + by (simp add: no_0_obj'_def pspace) + +lemma pointerInUserData_update[iff]: + "pointerInUserData p (f s) = pointerInUserData p s" + by (simp add: pointerInUserData_def) + +lemma pointerInDeviceData_update[iff]: + "pointerInDeviceData p (f s) = pointerInDeviceData p s" + by (simp add: pointerInDeviceData_def) + +lemma pspace_domain_valid_update [iff]: + "pspace_domain_valid (f s) = pspace_domain_valid s" + by (simp add: pspace_domain_valid_def pspace) + +end + +locale Arch_Idle_update_eq = + fixes f :: "kernel_state \ kernel_state" + assumes arch: "ksArchState (f s) = ksArchState s" + assumes idle: "ksIdleThread (f s) = ksIdleThread s" + assumes int_nd: "intStateIRQNode (ksInterruptState (f s)) + = intStateIRQNode (ksInterruptState s)" + assumes maxObj: "gsMaxObjectSize (f s) = gsMaxObjectSize s" +begin + +lemma global_refs_update' [iff]: + "global_refs' (f s) = global_refs' s" + by (simp add: global_refs'_def arch idle int_nd) + +end + +locale P_Arch_Idle_update_eq = PSpace_update_eq + Arch_Idle_update_eq +begin + +lemma valid_global_refs_update' [iff]: + "valid_global_refs' (f s) = valid_global_refs' s" + by (simp add: valid_global_refs'_def pspace arch idle maxObj) + +lemma valid_arch_state_update' [iff]: + "valid_arch_state' (f s) = valid_arch_state' s" + by (simp add: valid_arch_state'_def arch cong: option.case_cong) + +lemma valid_idle_update' [iff]: + "valid_idle' (f s) = valid_idle' s" + by (auto simp: pspace idle) + +lemma ifunsafe_update [iff]: + "if_unsafe_then_cap' (f s) = if_unsafe_then_cap' s" + by (simp add: if_unsafe_then_cap'_def ex_cte_cap_to'_def int_nd) + +end + +locale Int_update_eq = + fixes f :: "kernel_state \ kernel_state" + assumes int: "ksInterruptState (f s) = ksInterruptState s" +begin + +lemma irqs_masked_update [iff]: + "irqs_masked' (f s) = irqs_masked' s" + by (simp add: irqs_masked'_def int) + +lemma irq_issued_update'[iff]: + "irq_issued' irq (f s) = irq_issued' irq s" + by (simp add: irq_issued'_def int) + +end + +locale P_Cur_update_eq = PSpace_update_eq + + assumes curt: "ksCurThread (f s) = ksCurThread s" + assumes curd: "ksCurDomain (f s) = ksCurDomain s" +begin + +lemma sch_act_wf[iff]: + "sch_act_wf ks (f s) = sch_act_wf ks s" +apply (cases ks) +apply (simp_all add: ct_in_state'_def st_tcb_at'_def tcb_in_cur_domain'_def curt curd) +done + +end + +locale P_Int_update_eq = PSpace_update_eq + Int_update_eq +begin + +lemma valid_irq_handlers_update'[iff]: + "valid_irq_handlers' (f s) = valid_irq_handlers' s" + by (simp add: valid_irq_handlers'_def cteCaps_of_def pspace) + +end + +locale P_Int_Cur_update_eq = + P_Int_update_eq + P_Cur_update_eq + +locale P_Arch_Idle_Int_update_eq = P_Arch_Idle_update_eq + P_Int_update_eq + +locale P_Arch_Idle_Int_Cur_update_eq = + P_Arch_Idle_Int_update_eq + P_Cur_update_eq + +interpretation sa_update: + P_Arch_Idle_Int_Cur_update_eq "ksSchedulerAction_update f" + by unfold_locales auto + +interpretation ready_queue_update: + P_Arch_Idle_Int_Cur_update_eq "ksReadyQueues_update f" + by unfold_locales auto + +interpretation ready_queue_bitmap1_update: + P_Arch_Idle_Int_Cur_update_eq "ksReadyQueuesL1Bitmap_update f" + by unfold_locales auto + +interpretation ready_queue_bitmap2_update: + P_Arch_Idle_Int_Cur_update_eq "ksReadyQueuesL2Bitmap_update f" + by unfold_locales auto + +interpretation cur_thread_update': + P_Arch_Idle_Int_update_eq "ksCurThread_update f" + by unfold_locales auto + +interpretation machine_state_update': + P_Arch_Idle_Int_Cur_update_eq "ksMachineState_update f" + by unfold_locales auto + +interpretation interrupt_state_update': + P_Cur_update_eq "ksInterruptState_update f" + by unfold_locales auto + +interpretation idle_update': + P_Int_Cur_update_eq "ksIdleThread_update f" + by unfold_locales auto + +interpretation arch_state_update': + P_Int_Cur_update_eq "ksArchState_update f" + by unfold_locales auto + +interpretation wu_update': + P_Arch_Idle_Int_Cur_update_eq "ksWorkUnitsCompleted_update f" + by unfold_locales auto + +interpretation gsCNodes_update: P_Arch_Idle_update_eq "gsCNodes_update f" + by unfold_locales simp_all + +interpretation gsUserPages_update: P_Arch_Idle_update_eq "gsUserPages_update f" + by unfold_locales simp_all +lemma ko_wp_at_aligned: + "ko_wp_at' ((=) ko) p s \ is_aligned p (objBitsKO ko)" + by (simp add: ko_wp_at'_def) + +interpretation ksCurDomain: + P_Arch_Idle_Int_update_eq "ksCurDomain_update f" + by unfold_locales auto + +interpretation ksDomScheduleIdx: + P_Arch_Idle_Int_Cur_update_eq "ksDomScheduleIdx_update f" + by unfold_locales auto + +interpretation ksDomSchedule: + P_Arch_Idle_Int_Cur_update_eq "ksDomSchedule_update f" + by unfold_locales auto + +interpretation ksDomainTime: + P_Arch_Idle_Int_Cur_update_eq "ksDomainTime_update f" + by unfold_locales auto + +interpretation gsUntypedZeroRanges: + P_Arch_Idle_Int_Cur_update_eq "gsUntypedZeroRanges_update f" + by unfold_locales auto + +lemma ko_wp_at_norm: + "ko_wp_at' P p s \ \ko. P ko \ ko_wp_at' ((=) ko) p s" + by (auto simp add: ko_wp_at'_def) + +lemma valid_mdb_machine_state [iff]: + "valid_mdb' (ksMachineState_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma cte_wp_at_norm': + "cte_wp_at' P p s \ \cte. cte_wp_at' ((=) cte) p s \ P cte" + by (simp add: cte_wp_at'_def) + +lemma pred_tcb_at' [elim!]: + "pred_tcb_at' proj P t s \ tcb_at' t s" + by (auto simp add: pred_tcb_at'_def obj_at'_def) + +lemma valid_pspace_mdb' [elim!]: + "valid_pspace' s \ valid_mdb' s" + by (simp add: valid_pspace'_def) + +lemmas hoare_use_eq_irq_node' = hoare_use_eq[where f=irq_node'] + +lemma ex_cte_cap_to'_pres: + "\ \P p. \cte_wp_at' P p\ f \\rv. cte_wp_at' P p\; + \P. \\s. P (irq_node' s)\ f \\rv s. P (irq_node' s)\ \ + \ \ex_cte_cap_wp_to' P p\ f \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: ex_cte_cap_wp_to'_def) + apply (rule hoare_pre) + apply (erule hoare_use_eq_irq_node') + apply (rule hoare_vcg_ex_lift) + apply assumption + apply simp + done + +section "Relationship of Executable Spec to Kernel Configuration" + +text \ + Some values are set per kernel configuration (e.g. number of domains), but other related + values (e.g. maximum domain) are derived from storage constraints (e.g. bytes used). + To relate the two, we must look at the values of kernel configuration constants. + To allow the proofs to work for all permitted values of these constants, their definitions + should only be unfolded in this section, and the derived properties kept to a minimum.\ + +lemma le_maxDomain_eq_less_numDomains: + shows "x \ unat maxDomain \ x < Kernel_Config.numDomains" + "y \ maxDomain \ unat y < Kernel_Config.numDomains" + by (auto simp: Kernel_Config.numDomains_def maxDomain_def word_le_nat_alt) + + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma page_table_pte_atI': + "\ page_table_at' pt_t p s; i \ mask (ptTranslationBits pt_t) \ \ + pte_at' (p + (i << pte_bits)) s" + by (simp add: page_table_at'_def) + +lemma valid_global_refsD': + "\ ctes_of s p = Some cte; valid_global_refs' s \ \ + kernel_data_refs \ capRange (cteCap cte) = {} \ global_refs' s \ kernel_data_refs" + by (clarsimp simp: valid_global_refs'_def valid_refs'_def ran_def) blast + +lemma no_0_prev: + "no_0 m \ \ m \ p \ 0" + by (simp add: mdb_prev_def) + +lemma ut_revocableD': + "\m p = Some (CTE cap n); isUntypedCap cap; ut_revocable' m \ \ mdbRevocable n" + unfolding ut_revocable'_def by blast + +lemma nullcapsD': + "\m p = Some (CTE NullCap n); valid_nullcaps m \ \ n = nullMDBNode" + unfolding valid_nullcaps_def by blast + +lemma untyped_mdbD': + "\m p = Some (CTE c n); isUntypedCap c; + m p' = Some (CTE c' n'); \isUntypedCap c'; + capRange c' \ untypedRange c \ {}; untyped_mdb' m \ \ + p' \ descendants_of' p m" + unfolding untyped_mdb'_def by blast + +lemma untyped_incD': + "\ m p = Some (CTE c n); isUntypedCap c; + m p' = Some (CTE c' n'); isUntypedCap c'; untyped_inc' m \ \ + (untypedRange c \ untypedRange c' \ untypedRange c' \ untypedRange c \ untypedRange c \ untypedRange c' = {}) \ + (untypedRange c \ untypedRange c' \ (p \ descendants_of' p' m \ untypedRange c \ usableUntypedRange c' = {})) \ + (untypedRange c' \ untypedRange c \ (p' \ descendants_of' p m \ untypedRange c' \ usableUntypedRange c = {})) \ + (untypedRange c = untypedRange c' \ (p' \ descendants_of' p m \ usableUntypedRange c = {} + \ p \ descendants_of' p' m \ usableUntypedRange c' = {} \ p = p'))" + unfolding untyped_inc'_def + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (elim allE impE) + apply simp+ + done + +lemma caps_containedD': + "\ m p = Some (CTE c n); m p' = Some (CTE c' n'); + \ isUntypedCap c'; capRange c' \ untypedRange c \ {}; + caps_contained' m\ + \ capRange c' \ untypedRange c" + unfolding caps_contained'_def by blast + +lemma class_linksD: + "\ m p = Some cte; m p' = Some cte'; m \ p \ p'; class_links m \ \ + capClass (cteCap cte) = capClass (cteCap cte')" + using class_links_def by blast + +lemma mdb_chunkedD: + "\ m p = Some (CTE cap n); m p' = Some (CTE cap' n'); + sameRegionAs cap cap'; p \ p'; mdb_chunked m \ + \ (m \ p \\<^sup>+ p' \ m \ p' \\<^sup>+ p) \ + (m \ p \\<^sup>+ p' \ is_chunk m cap p p') \ + (m \ p' \\<^sup>+ p \ is_chunk m cap' p' p)" + using mdb_chunked_def by blast + +lemma irq_controlD: + "\ m p = Some (CTE IRQControlCap n); m p' = Some (CTE IRQControlCap n'); + irq_control m \ \ p' = p" + unfolding irq_control_def by blast + +lemma irq_revocable: + "\ m p = Some (CTE IRQControlCap n); irq_control m \ \ mdbRevocable n" + unfolding irq_control_def by blast + +lemma sch_act_wf_arch [simp]: + "sch_act_wf sa (ksArchState_update f s) = sch_act_wf sa s" + by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) + +lemma valid_bitmaps_arch[simp]: + "valid_bitmaps (ksArchState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma if_unsafe_then_cap_arch' [simp]: + "if_unsafe_then_cap' (ksArchState_update f s) = if_unsafe_then_cap' s" + by (simp add: if_unsafe_then_cap'_def ex_cte_cap_to'_def) + +lemma valid_idle_arch' [simp]: + "valid_idle' (ksArchState_update f s) = valid_idle' s" + by (simp add: valid_idle'_def) + +lemma valid_irq_node_arch' [simp]: + "valid_irq_node' w (ksArchState_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma sch_act_wf_machine_state [simp]: + "sch_act_wf sa (ksMachineState_update f s) = sch_act_wf sa s" + by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) + +lemma valid_irq_node'_machine_state [simp]: + "valid_irq_node' x (ksMachineState_update f s) = valid_irq_node' x s" + by (simp add: valid_irq_node'_def) + +lemma valid_bitmaps_machine_state[simp]: + "valid_bitmaps (ksMachineState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +(* these should be reasonable safe for automation because of the 0 pattern *) +lemma no_0_ko_wp' [elim!]: + "\ ko_wp_at' Q 0 s; no_0_obj' s \ \ P" + by (simp add: ko_wp_at'_def no_0_obj'_def) + +lemma no_0_obj_at' [elim!]: + "\ obj_at' Q 0 s; no_0_obj' s \ \ P" + by (simp add: obj_at'_def no_0_obj'_def) + +lemma no_0_typ_at' [elim!]: + "\ typ_at' T 0 s; no_0_obj' s \ \ P" + by (clarsimp simp: typ_at'_def) + +lemma no_0_ko_wp'_eq [simp]: + "no_0_obj' s \ ko_wp_at' P 0 s = False" + by (simp add: ko_wp_at'_def no_0_obj'_def) + +lemma no_0_obj_at'_eq [simp]: + "no_0_obj' s \ obj_at' P 0 s = False" + by (simp add: obj_at'_def no_0_obj'_def) + +lemma no_0_typ_at'_eq [simp]: + "no_0_obj' s \ typ_at' P 0 s = False" + by (simp add: typ_at'_def) + +lemma valid_pspace_valid_objs'[elim!]: + "valid_pspace' s \ valid_objs' s" + by (simp add: valid_pspace'_def) + +declare badgeBits_def [simp] + +lemma simple_sane_strg: + "sch_act_simple s \ sch_act_sane s" + by (simp add: sch_act_sane_def sch_act_simple_def) + +lemma sch_act_wf_cases: + "sch_act_wf action = (case action of + ResumeCurrentThread \ ct_in_state' activatable' + | ChooseNewThread \ \ + | SwitchToThread t \ \s. st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s)" +by (cases action) auto +end + +lemma (in PSpace_update_eq) cteCaps_of_update[iff]: "cteCaps_of (f s) = cteCaps_of s" + by (simp add: cteCaps_of_def pspace) + +lemma vms_sch_act_update'[iff]: + "valid_machine_state' (ksSchedulerAction_update f s) = + valid_machine_state' s" + by (simp add: valid_machine_state'_def ) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemmas bit_simps' = pteBits_def asidHighBits_def asidPoolBits_def asid_low_bits_def + asid_high_bits_def bit_simps + +lemma objBitsT_simps: + "objBitsT EndpointT = epSizeBits" + "objBitsT NotificationT = ntfnSizeBits" + "objBitsT CTET = cteSizeBits" + "objBitsT TCBT = tcbBlockSizeBits" + "objBitsT UserDataT = pageBits" + "objBitsT UserDataDeviceT = pageBits" + "objBitsT KernelDataT = pageBits" + "objBitsT (ArchT PTET) = word_size_bits" + "objBitsT (ArchT ASIDPoolT) = pageBits" + "objBitsT (ArchT VCPUT) = vcpuBits" + unfolding objBitsT_def makeObjectT_def + by (simp add: makeObject_simps objBits_simps bit_simps')+ + + +lemma objBitsT_koTypeOf : + "(objBitsT (koTypeOf ko)) = objBitsKO ko" + apply (cases ko; simp add: objBits_simps objBitsT_simps) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp add: archObjSize_def objBitsT_simps bit_simps') + done + +lemma typ_at_aligned': + "\ typ_at' tp p s \ \ is_aligned p (objBitsT tp)" + by (clarsimp simp add: typ_at'_def ko_wp_at'_def objBitsT_koTypeOf) + +lemma obj_at'_and: + "obj_at' (P and P') t s = (obj_at' P t s \ obj_at' P' t s)" + by (rule iffI, (clarsimp simp: obj_at'_def)+) + +lemma obj_at'_activatable_st_tcb_at': + "obj_at' (activatable' \ tcbState) t = st_tcb_at' activatable' t" + by (rule ext, clarsimp simp: st_tcb_at'_def) + +lemma st_tcb_at'_runnable_is_activatable: + "st_tcb_at' runnable' t s \ st_tcb_at' activatable' t s" + by (simp add: st_tcb_at'_def) + (fastforce elim: obj_at'_weakenE) + +lemma tcb_at'_has_tcbPriority: + "tcb_at' t s \ \p. obj_at' (\tcb. tcbPriority tcb = p) t s" + by (clarsimp simp add: obj_at'_def) + +lemma pred_tcb_at'_Not: + "pred_tcb_at' f (Not o P) t s = (tcb_at' t s \ \ pred_tcb_at' f P t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma obj_at'_conj_distrib: + "obj_at' (\ko. P ko \ Q ko) p s \ obj_at' P p s \ obj_at' Q p s" + by (auto simp: obj_at'_def) + +lemma obj_at'_conj: + "obj_at' (\ko. P ko \ Q ko) p s = (obj_at' P p s \ obj_at' Q p s)" + using obj_at'_conj_distrib obj_at_conj' by blast + +lemma not_obj_at'_strengthen: + "obj_at' (Not \ P) p s \ \ obj_at' P p s" + by (clarsimp simp: obj_at'_def) + +lemma not_pred_tcb_at'_strengthen: + "pred_tcb_at' f (Not \ P) p s \ \ pred_tcb_at' f P p s" + by (clarsimp simp: pred_tcb_at'_def obj_at'_def) + +lemma obj_at'_ko_at'_prop: + "ko_at' ko t s \ obj_at' P t s = P ko" + by (drule obj_at_ko_at', clarsimp simp: obj_at'_def) + +lemma valid_refs'_cteCaps: + "valid_refs' S (ctes_of s) = (\c \ ran (cteCaps_of s). S \ capRange c = {})" + by (fastforce simp: valid_refs'_def cteCaps_of_def elim!: ranE) + +lemma valid_cap_sizes_cteCaps: + "valid_cap_sizes' n (ctes_of s) = (\c \ ran (cteCaps_of s). 2 ^ capBits c \ n)" + apply (simp add: valid_cap_sizes'_def cteCaps_of_def) + apply (fastforce elim!: ranE) + done + +lemma cte_at_valid_cap_sizes_0: + "valid_cap_sizes' n ctes \ ctes p = Some cte \ 0 < n" + apply (clarsimp simp: valid_cap_sizes'_def) + apply (drule bspec, erule ranI) + apply (rule Suc_le_lessD, erule order_trans[rotated]) + apply simp + done + +lemma invs_valid_stateI' [elim!]: + "invs' s \ valid_state' s" + by (simp add: invs'_def) + +lemma tcb_at_invs' [elim!]: + "invs' s \ tcb_at' (ksCurThread s) s" + by (simp add: invs'_def cur_tcb'_def) + +lemma invs_valid_objs' [elim!]: + "invs' s \ valid_objs' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs_pspace_aligned' [elim!]: + "invs' s \ pspace_aligned' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs_pspace_distinct' [elim!]: + "invs' s \ pspace_distinct' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs_valid_pspace' [elim!]: + "invs' s \ valid_pspace' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_arch_state' [elim!]: + "invs' s \ valid_arch_state' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_cur' [elim!]: + "invs' s \ cur_tcb' s" + by (simp add: invs'_def) + +lemma invs_mdb' [elim!]: + "invs' s \ valid_mdb' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma valid_mdb_no_loops [elim!]: + "valid_mdb_ctes m \ no_loops m" + by (auto intro: mdb_chain_0_no_loops) + +lemma invs_no_loops [elim!]: + "invs' s \ no_loops (ctes_of s)" + apply (rule valid_mdb_no_loops) + apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def) + done + +lemma invs_iflive'[elim!]: + "invs' s \ if_live_then_nonz_cap' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_unsafe_then_cap' [elim!]: + "invs' s \ if_unsafe_then_cap' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym' [elim!]: + "invs' s \ sym_refs (state_refs_of' s)" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym_hyp' [elim!]: + "invs' s \ sym_refs (state_hyp_refs_of' s)" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sch_act_wf' [elim!]: + "invs' s \ sch_act_wf (ksSchedulerAction s) s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_bitmaps[elim!]: + "invs' s \ valid_bitmaps s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym_heap_sched_pointers[elim!]: + "invs' s \ sym_heap_sched_pointers s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_sched_pointers[elim!]: + "invs' s \ valid_sched_pointers s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_idle'[elim!]: + "invs' s \ valid_idle' s" + by (fastforce simp: invs'_def valid_state'_def) + +lemma invs_valid_global'[elim!]: + "invs' s \ valid_global_refs' s" + by (fastforce simp: invs'_def valid_state'_def) + +lemma invs_pspace_canonical'[elim!]: + "invs' s \ pspace_canonical' s" + by (fastforce dest!: invs_valid_pspace' simp: valid_pspace'_def) + +lemma valid_pspace_canonical'[elim!]: + "valid_pspace' s \ pspace_canonical' s" + by (rule valid_pspaceE') + +lemma invs'_invs_no_cicd: + "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" + by (simp add: invs'_to_invs_no_cicd'_def) + +lemma einvs_valid_etcbs: "einvs s \ valid_etcbs s" + by (clarsimp simp: valid_sched_def) + +lemma invs'_bitmapQ_no_L1_orphans: + "invs' s \ bitmapQ_no_L1_orphans s" + by (simp add: invs'_def valid_state'_def valid_bitmaps_def) + +lemma invs_ksCurDomain_maxDomain' [elim!]: + "invs' s \ ksCurDomain s \ maxDomain" + by (simp add: invs'_def valid_state'_def) + +lemma simple_st_tcb_at_state_refs_ofD': + "st_tcb_at' simple' t s \ bound_tcb_at' (\x. tcb_bound_refs' x = state_refs_of' s t) t s" + by (fastforce simp: pred_tcb_at'_def obj_at'_def state_refs_of'_def + projectKO_eq project_inject) + +lemma cur_tcb_arch' [iff]: + "cur_tcb' (ksArchState_update f s) = cur_tcb' s" + by (simp add: cur_tcb'_def) + +lemma cur_tcb'_machine_state [simp]: + "cur_tcb' (ksMachineState_update f s) = cur_tcb' s" + by (simp add: cur_tcb'_def) + +lemma invs_no_0_obj'[elim!]: + "invs' s \ no_0_obj' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs'_gsCNodes_update[simp]: + "invs' (gsCNodes_update f s') = invs' s'" + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) + done + +lemma invs'_gsUserPages_update[simp]: + "invs' (gsUserPages_update f s') = invs' s'" + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) + done + +lemma pred_tcb'_neq_contra: + "\ pred_tcb_at' proj P p s; pred_tcb_at' proj Q p s; \st. P st \ Q st \ \ False" + by (clarsimp simp: pred_tcb_at'_def obj_at'_def) + +lemma invs'_ksDomSchedule: + "invs' s \ KernelStateData_H.ksDomSchedule s = KernelStateData_H.ksDomSchedule (newKernelState undefined)" +unfolding invs'_def valid_state'_def by clarsimp + +lemma invs'_ksDomScheduleIdx: + "invs' s \ KernelStateData_H.ksDomScheduleIdx s < length (KernelStateData_H.ksDomSchedule (newKernelState undefined))" +unfolding invs'_def valid_state'_def by clarsimp + +lemma valid_bitmap_valid_bitmapQ_exceptE: + "\ valid_bitmapQ_except d p s; bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)); + bitmapQ_no_L2_orphans s \ + \ valid_bitmapQ s" + unfolding valid_bitmapQ_def valid_bitmapQ_except_def + by force + +lemma valid_bitmap_valid_bitmapQ_exceptI[intro]: + "valid_bitmapQ s \ valid_bitmapQ_except d p s" + unfolding valid_bitmapQ_except_def valid_bitmapQ_def + by simp + +lemma mask_wordRadix_less_wordBits: + assumes sz: "wordRadix \ size w" + shows "unat ((w::'a::len word) && mask wordRadix) < wordBits" + using word_unat_mask_lt[where m=wordRadix and w=w] assms + by (simp add: wordRadix_def wordBits_def') + +lemma priority_mask_wordRadix_size: + "unat ((w::priority) && mask wordRadix) < wordBits" + by (rule mask_wordRadix_less_wordBits, simp add: wordRadix_def word_size) + +lemma canonical_address_mask_eq: + "canonical_address p = (p && mask (Suc canonical_bit) = p)" + by (simp add: canonical_address_def canonical_address_of_def ucast_ucast_mask canonical_bit_def) + +lemma canonical_address_and: + "canonical_address p \ canonical_address (p && b)" + by (simp add: canonical_address_range word_and_le) + +lemma canonical_address_add: + assumes "is_aligned p n" + assumes "f < 2 ^ n" + assumes "n \ canonical_bit" + assumes "canonical_address p" + shows "canonical_address (p + f)" +proof - + from `f < 2 ^ n` + have "f \ mask n" + by (simp add: mask_plus_1 plus_one_helper) + + from `canonical_address p` + have "p && mask (Suc canonical_bit) = p" + by (simp add: canonical_address_mask_eq) + moreover + from `f \ mask n` `is_aligned p n` + have "p + f = p || f" + by (simp add: word_and_or_mask_aligned) + moreover + from `f \ mask n` `n \ canonical_bit` + have "f \ mask (Suc canonical_bit)" + using le_smaller_mask by fastforce + hence "f && mask (Suc canonical_bit) = f" + by (simp add: le_mask_imp_and_mask) + ultimately + have "(p + f) && mask (Suc canonical_bit) = p + f" + by (simp add: word_ao_dist) + thus ?thesis + by (simp add: canonical_address_mask_eq) +qed + +lemma range_cover_canonical_address: + "\ range_cover ptr sz us n ; p < n ; + canonical_address (ptr && ~~ mask sz) ; sz \ maxUntypedSizeBits \ + \ canonical_address (ptr + of_nat p * 2 ^ us)" + apply (subst word_plus_and_or_coroll2[symmetric, where w = "mask sz"]) + apply (subst add.commute) + apply (subst add.assoc) + apply (rule canonical_address_add[where n=sz] ; simp add: untypedBits_defs is_aligned_neg_mask) + apply (drule (1) range_cover.range_cover_compare) + apply (clarsimp simp: word_less_nat_alt) + apply unat_arith + apply (simp add: canonical_bit_def) + done + +end +(* The normalise_obj_at' tactic was designed to simplify situations similar to: + ko_at' ko p s \ + obj_at' (complicated_P (obj_at' (complicated_Q (obj_at' ...)) p s)) p s + + It seems to also offer assistance in cases where there is lots of st_tcb_at', ko_at', obj_at' + confusion. If your goal looks like that kind of mess, try it out. It can help to not unfold + obj_at'_def which speeds up proofs. + *) +context begin + +private definition + "ko_at'_defn v \ ko_at' v" + +private lemma ko_at_defn_rewr: + "ko_at'_defn ko p s \ (obj_at' P p s = P ko)" + unfolding ko_at'_defn_def + by (auto simp: obj_at'_def) + +private lemma ko_at_defn_uniqueD: + "ko_at'_defn ko p s \ ko_at'_defn ko' p s \ ko' = ko" + unfolding ko_at'_defn_def + by (auto simp: obj_at'_def) + +private lemma ko_at_defn_pred_tcb_at': + "ko_at'_defn ko p s \ (pred_tcb_at' proj P p s = P (proj (tcb_to_itcb' ko)))" + by (auto simp: pred_tcb_at'_def ko_at_defn_rewr) + +private lemma ko_at_defn_ko_wp_at': + "ko_at'_defn ko p s \ (ko_wp_at' P p s = P (injectKO ko))" + by (clarsimp simp: ko_at'_defn_def obj_at'_real_def + ko_wp_at'_def project_inject) + +method normalise_obj_at' = + (clarsimp?, elim obj_at_ko_at'[folded ko_at'_defn_def, elim_format], + clarsimp simp: ko_at_defn_rewr ko_at_defn_pred_tcb_at' ko_at_defn_ko_wp_at', + ((drule(1) ko_at_defn_uniqueD)+)?, + clarsimp simp: ko_at'_defn_def) + +end + +add_upd_simps "invs' (gsUntypedZeroRanges_update f s)" + (obj_at'_real_def) +declare upd_simps[simp] + +lemma neq_out_intv: + "\ a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" + by simp + +lemma rule_out_intv: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a \ b \ + \ b \ mask_range a (objBitsKO obj)" + apply (drule(1) pspace_distinctD') + apply (subst (asm) ps_clear_def) + apply (drule_tac x = b in orthD2) + apply fastforce + apply (drule neq_out_intv) + apply (simp add: mask_def add_diff_eq) + apply (simp add: mask_def add_diff_eq) + done + +lemma ptr_range_mask_range: + "{ptr..ptr + 2 ^ bits - 1} = mask_range ptr bits" + unfolding mask_def + by simp + +lemma distinct_obj_range'_not_subset: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ \ obj_range' b obj' \ obj_range' a obj" + unfolding obj_range'_def + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule (3) rule_out_intv) + using is_aligned_no_overflow_mask + by fastforce + +lemma obj_range'_disjoint: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ obj_range' a obj \ obj_range' b obj' = {}" + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule_tac p=a and p'=b in aligned_mask_range_cases) + apply assumption + apply (fastforce dest: distinct_obj_range'_not_subset + simp: obj_range'_def) + done + +qualify AARCH64_H (in Arch) + +(* + Then idea with this class is to be able to generically constrain + predicates over pspace_storable values to are not of type VCPU, + this is useful for invariants such as obj_at' that are trivially + true (sort of) if the predicate and the function (in the Hoare triple) + manipulate different types of objects +*) + +class no_vcpu = pspace_storable + + assumes not_vcpu: "koType TYPE('a) \ ArchT AARCH64_H.VCPUT" + +instance tcb :: no_vcpu by intro_classes auto +instance endpoint :: no_vcpu by intro_classes auto +instance notification :: no_vcpu by intro_classes auto +instance cte :: no_vcpu by intro_classes auto +instance user_data :: no_vcpu by intro_classes auto +instance user_data_device :: no_vcpu by intro_classes auto + +end_qualify + +instantiation AARCH64_H.asidpool :: no_vcpu +begin +interpretation Arch . +instance by intro_classes auto +end + +instantiation AARCH64_H.pte :: no_vcpu +begin +interpretation Arch . +instance by intro_classes auto +end + +end diff --git a/proof/refine/AARCH64/Invocations_R.thy b/proof/refine/AARCH64/Invocations_R.thy new file mode 100644 index 0000000000..0bc901c2ef --- /dev/null +++ b/proof/refine/AARCH64/Invocations_R.thy @@ -0,0 +1,26 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Invocations_R +imports Bits_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma invocationType_eq[simp]: + "invocationType = invocation_type" + unfolding invocationType_def invocation_type_def Let_def + by (rule ext, simp) (metis from_to_enum maxBound_is_bound') + +lemma genInvocationType_eq[simp]: + "genInvocationType = gen_invocation_type" + by (rule ext) (simp add: genInvocationType_def gen_invocation_type_def) + +end + +declare resolveAddressBits.simps[simp del] + +end diff --git a/proof/refine/AARCH64/IpcCancel_R.thy b/proof/refine/AARCH64/IpcCancel_R.thy new file mode 100644 index 0000000000..534c7b9a00 --- /dev/null +++ b/proof/refine/AARCH64/IpcCancel_R.thy @@ -0,0 +1,2351 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory IpcCancel_R +imports + Schedule_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunch aligned'[wp]: cancelAllIPC pspace_aligned' + (wp: crunch_wps mapM_x_wp' simp: unless_def) +crunch distinct'[wp]: cancelAllIPC pspace_distinct' + (wp: crunch_wps mapM_x_wp' simp: unless_def) + +crunch aligned'[wp]: cancelAllSignals pspace_aligned' + (wp: crunch_wps mapM_x_wp') +crunch distinct'[wp]: cancelAllSignals pspace_distinct' + (wp: crunch_wps mapM_x_wp') + +lemma cancelSignal_simple[wp]: + "\\\ cancelSignal t ntfn \\rv. st_tcb_at' simple' t\" + apply (simp add: cancelSignal_def Let_def) + apply (wp setThreadState_st_tcb | simp)+ + done + +lemma cancelSignal_pred_tcb_at': + "\pred_tcb_at' proj P t' and K (t \ t')\ + cancelSignal t ntfnptr + \\rv. pred_tcb_at' proj P t'\" + apply (simp add: cancelSignal_def) + apply (wp sts_pred_tcb_neq' getNotification_wp | wpc | clarsimp)+ + done + +crunch pred_tcb_at'[wp]: emptySlot "pred_tcb_at' proj P t" + (wp: setCTE_pred_tcb_at') + +defs capHasProperty_def: + "capHasProperty ptr P \ cte_wp_at' (\c. P (cteCap c)) ptr" + +end + +(* Assume various facts about cteDeleteOne, proved in Finalise_R *) +locale delete_one_conc_pre = + assumes delete_one_st_tcb_at: + "\P. (\st. simple' st \ P st) \ + \st_tcb_at' P t\ cteDeleteOne slot \\rv. st_tcb_at' P t\" + assumes delete_one_typ_at: + "\P. \\s. P (typ_at' T p s)\ cteDeleteOne slot \\rv s. P (typ_at' T p s)\" + assumes delete_one_aligned: + "\pspace_aligned'\ cteDeleteOne slot \\rv. pspace_aligned'\" + assumes delete_one_distinct: + "\pspace_distinct'\ cteDeleteOne slot \\rv. pspace_distinct'\" + assumes delete_one_it: + "\P. \\s. P (ksIdleThread s)\ cteDeleteOne cap \\rv s. P (ksIdleThread s)\" + assumes delete_one_sch_act_simple: + "\sch_act_simple\ cteDeleteOne sl \\rv. sch_act_simple\" + assumes delete_one_sch_act_not: + "\t. \sch_act_not t\ cteDeleteOne sl \\rv. sch_act_not t\" + assumes delete_one_reply_st_tcb_at: + "\P t. \\s. st_tcb_at' P t s \ (\t' r. cte_wp_at' (\cte. cteCap cte = ReplyCap t' False r) slot s)\ + cteDeleteOne slot + \\rv. st_tcb_at' P t\" + assumes delete_one_ksCurDomain: + "\P. \\s. P (ksCurDomain s)\ cteDeleteOne sl \\_ s. P (ksCurDomain s)\" + assumes delete_one_tcbDomain_obj_at': + "\P. \obj_at' (\tcb. P (tcbDomain tcb)) t'\ cteDeleteOne slot \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + +lemma (in delete_one_conc_pre) cancelIPC_simple[wp]: + "\\\ cancelIPC t \\rv. st_tcb_at' simple' t\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def + cong: Structures_H.thread_state.case_cong list.case_cong) + apply (rule bind_wp [OF _ gts_sp']) + apply (rule hoare_pre) + apply (wpc + | wp sts_st_tcb_at'_cases hoare_vcg_conj_lift + hoare_vcg_const_imp_lift delete_one_st_tcb_at + threadSet_pred_tcb_no_state + hoare_strengthen_post [OF cancelSignal_simple] + | simp add: o_def if_fun_split + | rule hoare_drop_imps + | clarsimp elim!: pred_tcb'_weakenE)+ + apply (auto simp: pred_tcb_at' + elim!: pred_tcb'_weakenE) + done + +lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at': + "\st_tcb_at' P t' and K (t \ t')\ + cancelIPC t + \\rv. st_tcb_at' P t'\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv + capHasProperty_def isCap_simps) + apply (wp sts_pred_tcb_neq' hoare_drop_imps delete_one_reply_st_tcb_at + | wpc | clarsimp)+ + apply (wp getCTE_wp | clarsimp)+ + apply (wp hoare_vcg_ex_lift threadSet_cte_wp_at' hoare_vcg_imp_lift + cancelSignal_pred_tcb_at' sts_pred_tcb_neq' getEndpoint_wp gts_wp' + threadSet_pred_tcb_no_state + | wpc | clarsimp)+ + apply (auto simp: cte_wp_at_ctes_of isCap_simps) + done + +context begin interpretation Arch . +crunch typ_at'[wp]: emptySlot "\s. P (typ_at' T p s)" +end + +crunch tcb_at'[wp]: cancelSignal "tcb_at' t" + (wp: crunch_wps simp: crunch_simps) + +context delete_one_conc_pre +begin + +lemmas delete_one_typ_ats[wp] = typ_at_lifts [OF delete_one_typ_at] + +lemma cancelIPC_tcb_at'[wp]: + "\tcb_at' t\ cancelIPC t' \\_. tcb_at' t\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) + apply (wp delete_one_typ_ats hoare_drop_imps + | simp add: o_def if_apply_def2 | wpc | assumption)+ + done + +end + +declare delete_remove1 [simp] +declare delete.simps [simp del] + +lemma invs_weak_sch_act_wf[elim!]: + "invs' s \ weak_sch_act_wf (ksSchedulerAction s) s" + apply (drule invs_sch_act_wf') + apply (clarsimp simp: weak_sch_act_wf_def) + done + +lemma blocked_cancelIPC_corres: + "\ st = Structures_A.BlockedOnReceive epPtr p' \ + st = Structures_A.BlockedOnSend epPtr p; thread_state_relation st st' \ \ + corres dc (invs and st_tcb_at ((=) st) t) (invs' and st_tcb_at' ((=) st') t) + (blocked_cancel_ipc st t) + (do ep \ getEndpoint epPtr; + y \ assert (\ (case ep of IdleEP \ True | _ \ False)); + ep' \ + if remove1 t (epQueue ep) = [] then return IdleEP + else + return $ epQueue_update (%_. (remove1 t (epQueue ep))) ep; + y \ setEndpoint epPtr ep'; + setThreadState Structures_H.thread_state.Inactive t + od)" + apply (simp add: blocked_cancel_ipc_def gbep_ret) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres]) + apply (rule_tac F="ep \ IdleEP" in corres_gen_asm2) + apply (rule corres_assert_assume[rotated]) + apply (clarsimp split: endpoint.splits) + apply (rule_tac P="invs and st_tcb_at ((=) st) t" and + P'="invs' and st_tcb_at' ((=) st') t" in corres_inst) + apply (case_tac rv) + apply (simp add: ep_relation_def) + apply (simp add: get_ep_queue_def ep_relation_def split del: if_split) + apply (rename_tac list) + apply (case_tac "remove1 t list") + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (simp add: valid_tcb_state_def pred_conj_def) + apply (wp weak_sch_act_wf_lift)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply (simp add: get_ep_queue_def ep_relation_def split del: if_split) + apply (rename_tac list) + apply (case_tac "remove1 t list") + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (simp add: valid_tcb_state_def pred_conj_def) + apply (wp weak_sch_act_wf_lift)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply (wp getEndpoint_wp)+ + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply (fastforce simp: ko_wp_at'_def obj_at'_def dest: sym_refs_st_tcb_atD') + done + +lemma cancelSignal_corres: + "corres dc + (invs and st_tcb_at ((=) (Structures_A.BlockedOnNotification ntfn)) t) + (invs' and st_tcb_at' ((=) (BlockedOnNotification ntfn)) t) + (cancel_signal t ntfn) + (cancelSignal t ntfn)" + apply (simp add: cancel_signal_def cancelSignal_def Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres]) + apply (rule_tac F="isWaitingNtfn (ntfnObj ntfnaa)" in corres_gen_asm2) + apply (case_tac "ntfn_obj ntfna") + apply (simp add: ntfn_relation_def isWaitingNtfn_def) + apply (simp add: isWaitingNtfn_def ntfn_relation_def split del: if_split) + apply (rename_tac list) + apply (rule_tac R="remove1 t list = []" in corres_cases) + apply (simp del: dc_simp) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (simp add: list_case_If del: dc_simp) + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp add: ntfn_relation_def neq_Nil_conv) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (simp add: isWaitingNtfn_def ntfn_relation_def) + apply (wp getNotification_wp)+ + apply (clarsimp simp: conj_comms st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (erule pspace_valid_objsE) + apply fastforce + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_tcb_state_def) + apply (drule sym, simp add: obj_at_def) + apply (clarsimp simp: conj_comms pred_tcb_at' cong: conj_cong) + apply (rule conjI) + apply (simp add: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarsimp + apply (frule ko_at_valid_objs') + apply fastforce + apply simp + apply (clarsimp simp: valid_obj'_def valid_tcb'_def valid_tcb_state'_def) + apply (drule sym, simp) + apply (clarsimp simp: invs_weak_sch_act_wf) + apply (drule sym_refs_st_tcb_atD', fastforce) + apply (fastforce simp: isWaitingNtfn_def ko_wp_at'_def obj_at'_def + ntfn_bound_refs'_def + split: Structures_H.notification.splits ntfn.splits option.splits) + done + +lemma cte_map_tcb_2: + "cte_map (t, tcb_cnode_index 2) = t + 2*2^cte_level_bits" + by (simp add: cte_map_def tcb_cnode_index_def to_bl_1 shiftl_t2n) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cte_wp_at_master_reply_cap_to_ex_rights: + "cte_wp_at (is_master_reply_cap_to t) ptr + = (\s. \rights. cte_wp_at ((=) (cap.ReplyCap t True rights)) ptr s)" + by (rule ext, rule iffI; clarsimp simp: cte_wp_at_def is_master_reply_cap_to_def) + +lemma cte_wp_at_reply_cap_to_ex_rights: + "cte_wp_at (is_reply_cap_to t) ptr + = (\s. \rights. cte_wp_at ((=) (cap.ReplyCap t False rights)) ptr s)" + by (rule ext, rule iffI; clarsimp simp: cte_wp_at_def is_reply_cap_to_def) + +lemma reply_no_descendants_mdbNext_null: + assumes descs: "descendants_of (t, tcb_cnode_index 2) (cdt s) = {}" + and sr: "(s, s') \ state_relation" + and invs: "valid_reply_caps s" "valid_reply_masters s" + "valid_objs s" "valid_mdb s" "valid_mdb' s'" "pspace_aligned' s'" + "pspace_distinct' s'" + and tcb: "st_tcb_at (Not \ halted) t s" + and cte: "ctes_of s' (t + 2*2^cte_level_bits) = Some cte" + shows "mdbNext (cteMDBNode cte) = nullPointer" +proof - + from invs st_tcb_at_reply_cap_valid[OF tcb] + have "cte_wp_at (is_master_reply_cap_to t) (t, tcb_cnode_index 2) s" + by (fastforce simp: cte_wp_at_caps_of_state is_cap_simps is_master_reply_cap_to_def) + + hence "\r. cteCap cte = capability.ReplyCap t True r" + using invs sr + by (fastforce simp: cte_wp_at_master_reply_cap_to_ex_rights shiftl_t2n cte_index_repair + cte_wp_at_ctes_of cte cte_map_def tcb_cnode_index_def + dest: pspace_relation_cte_wp_at state_relation_pspace_relation) + + hence class_link: + "\cte'. ctes_of s' (mdbNext (cteMDBNode cte)) = Some cte' \ + capClass (cteCap cte') = ReplyClass t" + using invs + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (drule class_linksD[where m="ctes_of s'", OF cte]) + apply (simp add: mdb_next_unfold cte) + apply assumption + apply simp + done + + from invs tcb descs have "\ptr m g. + cte_wp_at ((=) (cap.ReplyCap t m g)) ptr s \ ptr = (t, tcb_cnode_index 2)" + apply (intro allI impI) + apply (case_tac m) + apply (fastforce simp: invs_def valid_state_def valid_reply_masters_def + cte_wp_at_master_reply_cap_to_ex_rights) + apply (fastforce simp: has_reply_cap_def cte_wp_at_reply_cap_to_ex_rights + dest: reply_master_no_descendants_no_reply elim: st_tcb_at_tcb_at) + done + hence "\ptr m mdb r. + ctes_of s' ptr = Some (CTE (capability.ReplyCap t m r) mdb) \ ptr = t + 2*2^cte_level_bits" + using sr invs + apply (intro allI impI) + apply (drule(2) pspace_relation_cte_wp_atI + [OF state_relation_pspace_relation]) + apply (elim exE, case_tac c, simp_all del: split_paired_All) + apply (elim allE, erule impE, fastforce) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def shiftl_t2n) + done + hence class_unique: + "\ptr cte'. ctes_of s' ptr = Some cte' \ + capClass (cteCap cte') = ReplyClass t \ + ptr = t + 2*2^cte_level_bits" + apply (intro allI impI) + apply (case_tac cte', rename_tac cap node, case_tac cap, simp_all) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, simp_all) + done + + from invs have no_null: "ctes_of s' nullPointer = None" + by (clarsimp simp: no_0_def nullPointer_def valid_mdb'_def valid_mdb_ctes_def) + + from invs cte have no_loop: "mdbNext (cteMDBNode cte) \ t + 2*2^cte_level_bits" + by (fastforce simp: mdb_next_rel_def mdb_next_def + valid_mdb'_def + dest: valid_mdb_no_loops no_loops_direct_simp) + + from invs cte have + "mdbNext (cteMDBNode cte) \ nullPointer \ + (\cte'. ctes_of s' (mdbNext (cteMDBNode cte)) = Some cte')" + by (fastforce simp: valid_mdb'_def valid_mdb_ctes_def nullPointer_def + elim: valid_dlistEn) + hence + "mdbNext (cteMDBNode cte) \ nullPointer \ + mdbNext (cteMDBNode cte) = t + 2*2^cte_level_bits" + using class_link class_unique + by clarsimp + thus ?thesis + by (simp add: no_loop) +qed + +lemma reply_descendants_mdbNext_nonnull: + assumes descs: "descendants_of (t, tcb_cnode_index 2) (cdt s) \ {}" + and sr: "(s, s') \ state_relation" + and tcb: "st_tcb_at (Not \ halted) t s" + and cte: "ctes_of s' (t + 2*2^cte_level_bits) = Some cte" + shows "mdbNext (cteMDBNode cte) \ nullPointer" +proof - + from tcb have "cte_at (t, tcb_cnode_index 2) s" + by (simp add: st_tcb_at_tcb_at tcb_at_cte_at dom_tcb_cap_cases) + hence "descendants_of' (t + 2*2^cte_level_bits) (ctes_of s') \ {}" + using sr descs + by (fastforce simp: state_relation_def cdt_relation_def cte_map_def tcb_cnode_index_def shiftl_t2n mult_ac) + thus ?thesis + using cte unfolding nullPointer_def + by (fastforce simp: descendants_of'_def dest: subtree_next_0) +qed + +lemma reply_descendants_of_mdbNext: + "\ (s, s') \ state_relation; valid_reply_caps s; valid_reply_masters s; + valid_objs s; valid_mdb s; valid_mdb' s'; pspace_aligned' s'; + pspace_distinct' s'; st_tcb_at (Not \ halted) t s; + ctes_of s' (t + 2*2^cte_level_bits) = Some cte \ \ + (descendants_of (t, tcb_cnode_index 2) (cdt s) = {}) = + (mdbNext (cteMDBNode cte) = nullPointer)" + apply (case_tac "descendants_of (t, tcb_cnode_index 2) (cdt s) = {}") + apply (simp add: reply_no_descendants_mdbNext_null) + apply (simp add: reply_descendants_mdbNext_nonnull) + done + +lemma reply_mdbNext_is_descendantD: + assumes sr: "(s, s') \ state_relation" + and invs: "invs' s'" + and tcb: "tcb_at t s" + and cte: "ctes_of s' (t + 2*2^cte_level_bits) = Some cte" + and desc: "descendants_of (t, tcb_cnode_index 2) (cdt s) = {sl}" + shows "mdbNext (cteMDBNode cte) = cte_map sl" +proof - + from tcb have "cte_at (t, tcb_cnode_index 2) s" + by (simp add: tcb_at_cte_at dom_tcb_cap_cases) + hence "descendants_of' (t + 2*2^cte_level_bits) (ctes_of s') = {cte_map sl}" + using sr desc + by (fastforce simp: state_relation_def cdt_relation_def cte_map_def tcb_cnode_index_def shiftl_t2n mult_ac) + thus ?thesis + using cte invs + apply (clarsimp simp: descendants_of'_def) + apply (frule singleton_eqD, drule CollectD) + apply (erule subtree.cases) + apply (clarsimp simp: mdb_next_rel_def mdb_next_def) + apply (subgoal_tac "c' = cte_map sl") + apply (fastforce dest: invs_no_loops no_loops_direct_simp) + apply fastforce + done +qed +end + +locale delete_one_conc = delete_one_conc_pre + + assumes delete_one_invs: + "\p. \invs'\ cteDeleteOne p \\rv. invs'\" + +locale delete_one = delete_one_conc + delete_one_abs + + assumes delete_one_corres: + "corres dc (einvs and cte_wp_at can_fast_finalise ptr) + (invs' and cte_at' (cte_map ptr)) + (cap_delete_one ptr) (cteDeleteOne (cte_map ptr))" + +lemma (in delete_one) cancelIPC_ReplyCap_corres: + "corres dc (einvs and st_tcb_at awaiting_reply t) + (invs' and st_tcb_at' awaiting_reply' t) + (reply_cancel_ipc t) + (do y \ threadSet (\tcb. tcb \ tcbFault := None \) t; + slot \ getThreadReplySlot t; + callerCap \ liftM (mdbNext \ cteMDBNode) (getCTE slot); + when (callerCap \ nullPointer) (do + y \ stateAssert (capHasProperty callerCap (\cap. isReplyCap cap + \ \ capReplyMaster cap)) + []; + cteDeleteOne callerCap + od) + od)" + proof - + interpret Arch . (*FIXME: arch_split*) + show ?thesis + apply (simp add: reply_cancel_ipc_def getThreadReplySlot_def + locateSlot_conv liftM_def tcbReplySlot_def + del: split_paired_Ex) + apply (rule_tac Q="\_. invs and valid_list and valid_sched and st_tcb_at awaiting_reply t" + and Q'="\_. invs' and st_tcb_at' awaiting_reply' t" + in corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule threadset_corresT; simp?) + apply (simp add: tcb_relation_def fault_rel_optionation_def) + apply (simp add: tcb_cap_cases_def) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: exst_same_def) + apply (fastforce simp: st_tcb_at_tcb_at) + apply clarsimp + defer + apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state + threadSet_invs_trivial threadSet_pred_tcb_no_state thread_set_not_state_valid_sched + | fastforce simp: tcb_cap_cases_def inQ_def + | wp (once) sch_act_simple_lift)+ + apply (rule corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule get_cap_corres [where cslot_ptr="(t, tcb_cnode_index 2)", + simplified cte_map_tcb_2 cte_index_repair_sym]) + apply (clarsimp dest!: st_tcb_at_tcb_at + tcb_at_cte_at [where ref="tcb_cnode_index 2"]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + defer + apply (rule hoare_vcg_conj_lift [OF get_cap_inv get_cap_cte_wp_at, simplified]) + apply (rule hoare_vcg_conj_lift [OF getCTE_inv getCTE_cte_wp_at, simplified]) + apply (rename_tac cte) + apply (rule corres_symb_exec_l [OF _ _ gets_sp]) + apply (rule_tac F="\r. cap = cap.ReplyCap t True r \ + cteCap cte = capability.ReplyCap t True (AllowGrant \ r)" in corres_req) + apply (fastforce simp: cte_wp_at_caps_of_state is_cap_simps + dest!: st_tcb_at_reply_cap_valid) + apply (rule_tac F="(descs = {}) = (mdbNext (cteMDBNode cte) = nullPointer)" + in corres_req) + apply (fastforce simp: st_tcb_at_tcb_at cte_wp_at_ctes_of st_tcb_def2 cte_index_repair + dest: reply_descendants_of_mdbNext) + apply (elim exE) + apply (case_tac "descs = {}", simp add: when_def) + apply (rule_tac F="\sl. descs = {sl}" in corres_req) + apply (fastforce intro: st_tcb_at_tcb_at dest: reply_master_one_descendant) + apply (erule exE, frule singleton_eqD) + apply (rule_tac F="mdbNext (cteMDBNode cte) = cte_map sl" in corres_req) + apply (clarsimp dest!: st_tcb_at_tcb_at) + apply (fastforce simp: cte_wp_at_ctes_of cte_level_bits_def + elim!: reply_mdbNext_is_descendantD) + apply (simp add: when_def getSlotCap_def capHasProperty_def + del: split_paired_Ex) + apply (rule corres_guard_imp) + apply (rule_tac P'="\s. \r'. cte_wp_at ((=) (cap.ReplyCap t False r')) sl s" + in corres_stateAssert_implied [OF delete_one_corres]) + apply (fastforce dest: pspace_relation_cte_wp_at + state_relation_pspace_relation + simp: cte_wp_at_ctes_of isCap_simps) + apply (fastforce simp: invs_def valid_state_def valid_mdb_def reply_mdb_def + reply_masters_mdb_def cte_wp_at_caps_of_state + can_fast_finalise_def) + apply (fastforce simp: valid_mdb'_def valid_mdb_ctes_def + cte_wp_at_ctes_of nullPointer_def + elim: valid_dlistEn dest: invs_mdb') + apply (simp add: exs_valid_def gets_def get_def return_def bind_def + del: split_paired_Ex split_paired_All) + apply (wp) + done +qed + +lemma (in delete_one) cancel_ipc_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + (cancel_ipc t) (cancelIPC t)" + apply (simp add: cancel_ipc_def cancelIPC_def Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac P="einvs and st_tcb_at ((=) state) t" and + P'="invs' and st_tcb_at' ((=) statea) t" in corres_inst) + apply (case_tac state, simp_all add: isTS_defs list_case_If)[1] + apply (rule corres_guard_imp) + apply (rule blocked_cancelIPC_corres) + apply fastforce + apply fastforce + apply simp + apply simp + apply (clarsimp simp add: isTS_defs list_case_If) + apply (rule corres_guard_imp) + apply (rule blocked_cancelIPC_corres) + apply fastforce + apply fastforce + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule cancelIPC_ReplyCap_corres) + apply (clarsimp elim!: st_tcb_weakenE) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply (rule corres_guard_imp [OF cancelSignal_corres], simp+) + apply (wp gts_sp[where P="\",simplified])+ + apply (rule hoare_strengthen_post) + apply (rule gts_sp'[where P="\"]) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply fastforce + apply simp + done + +lemma setNotification_utr[wp]: + "\untyped_ranges_zero'\ setNotification ntfn nobj \\rv. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + done + +crunch gsUntypedZeroRanges[wp]: setEndpoint "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma setEndpoint_utr[wp]: + "\untyped_ranges_zero'\ setEndpoint p ep \\rv. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + done + +declare cart_singleton_empty [simp] +declare cart_singleton_empty2[simp] + +crunch ksQ[wp]: setNotification "\s. P (ksReadyQueues s p)" + (wp: setObject_queues_unchanged_tcb updateObject_default_inv) + +lemma sch_act_simple_not_t[simp]: "sch_act_simple s \ sch_act_not t s" + by (clarsimp simp: sch_act_simple_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunches setNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift) + +lemma cancelSignal_invs': + "\invs' and st_tcb_at' (\st. st = BlockedOnNotification ntfn) t and sch_act_not t\ + cancelSignal t ntfn \\rv. invs'\" + proof - + have NTFNSN: "\ntfn ntfn'. + \\s. sch_act_not (ksCurThread s) s \ setNotification ntfn ntfn' + \\_ s. sch_act_not (ksCurThread s) s\" + apply (rule hoare_weaken_pre) + apply (wps setNotification_ksCurThread) + apply (wp, simp) + done + show ?thesis + apply (simp add: cancelSignal_def invs'_def valid_state'_def Let_def) + apply (wp valid_irq_node_lift sts_sch_act' irqs_masked_lift + hoare_vcg_all_lift + setThreadState_ct_not_inQ NTFNSN + hoare_vcg_all_lift + | simp add: valid_tcb_state'_def list_case_If split del: if_split)+ + prefer 2 + apply assumption + apply (rule hoare_strengthen_post) + apply (rule get_ntfn_sp') + apply (rename_tac rv s) + apply (clarsimp simp: pred_tcb_at') + apply (rule conjI) + apply (clarsimp simp: valid_ntfn'_def) + apply (case_tac "ntfnObj rv", simp_all add: isWaitingNtfn_def) + apply (frule ko_at_valid_objs') + apply (simp add: valid_pspace_valid_objs') + apply (clarsimp simp: projectKO_opt_ntfn split: kernel_object.splits) + apply (simp add: valid_obj'_def valid_ntfn'_def) + apply (frule st_tcb_at_state_refs_ofD') + apply (frule ko_at_state_refs_ofD') + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp simp: ntfn_bound_refs'_def split: if_split_asm) + apply (clarsimp split: if_split_asm) + subgoal + by (safe; simp add: ntfn_bound_refs'_def tcb_bound_refs'_def + obj_at'_def tcb_ntfn_is_bound'_def + split: option.splits) + subgoal + by (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def + tcb_bound_refs'_def) + subgoal + by (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def + tcb_bound_refs'_def ntfn_q_refs_of'_def remove1_empty + split: ntfn.splits) + apply (rule conjI, clarsimp elim!: if_live_state_refsE) + apply (fastforce simp: sym_refs_def dest!: idle'_no_refs) + apply (case_tac "ntfnObj rv", simp_all) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + apply (rule conjI, clarsimp split: option.splits) + apply (frule st_tcb_at_state_refs_ofD') + apply (frule ko_at_state_refs_ofD') + apply (rule conjI) + apply (erule delta_sym_refs) + apply (fastforce simp: ntfn_bound_refs'_def split: if_split_asm) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def + set_eq_subset) + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def + set_eq_subset) + apply (clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp elim!: if_live_state_refsE) + apply (rule conjI) + apply (case_tac "ntfnBoundTCB rv") + apply (clarsimp elim!: if_live_state_refsE)+ + apply (clarsimp dest!: idle'_no_refs) + done + qed + +lemmas setEndpoint_valid_arch[wp] + = valid_arch_state_lift' [OF setEndpoint_typ_at' set_ep_arch'] + +lemma ep_redux_simps3: + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ RecvEP (y # ys)) + = (set xs \ {EPRecv})" + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ SendEP (y # ys)) + = (set xs \ {EPSend})" + by (fastforce split: list.splits simp: valid_ep_def valid_ntfn_def)+ + +declare setEndpoint_ksMachine [wp] +declare setEndpoint_valid_irq_states' [wp] + +lemma setEndpoint_vms[wp]: + "\valid_machine_state'\ setEndpoint epptr ep' \\_. valid_machine_state'\" + by (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + +crunch ksQ[wp]: setEndpoint "\s. P (ksReadyQueues s p)" + (wp: setObject_queues_unchanged_tcb updateObject_default_inv) + +crunch ksCurDomain[wp]: setEndpoint "\s. P (ksCurDomain s)" + (wp: setObject_ep_cur_domain) + +lemma setEndpoint_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ setEndpoint ptr ep \\_ s. P (ksDomSchedule s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ setEndpoint ptr ep \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift hoare_vcg_imp_lift setObject_ep_ct + | rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setEndpoint_def)+ + done + +lemma setEndpoint_ct_not_inQ[wp]: + "\ct_not_inQ\ setEndpoint eeptr ep' \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setEndpoint_nosch]) + apply (simp add: setEndpoint_def) + apply (rule hoare_weaken_pre) + apply (wps setObject_ep_ct) + apply (wp obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad)+ + done + +lemma setEndpoint_ksDomScheduleIdx[wp]: + "setEndpoint ptr ep \\s. P (ksDomScheduleIdx s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +end + +crunches setEndpoint + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift simp: updateObject_default_def) + +lemma (in delete_one_conc) cancelIPC_invs[wp]: + shows "\tcb_at' t and invs'\ cancelIPC t \\rv. invs'\" +proof - + have P: "\xs v f. (case xs of [] \ return v | y # ys \ return (f (y # ys))) + = return (case xs of [] \ v | y # ys \ f xs)" + by (clarsimp split: list.split) + have EPSCHN: "\eeptr ep'. \\s. sch_act_not (ksCurThread s) s\ + setEndpoint eeptr ep' + \\_ s. sch_act_not (ksCurThread s) s\" + apply (rule hoare_weaken_pre) + apply (wps setEndpoint_ct') + apply (wp, simp) + done + have Q: + "\epptr. \st_tcb_at' (\st. \a. (st = BlockedOnReceive epptr a) + \ (\a b c d. st = BlockedOnSend epptr a b c d)) t + and invs'\ + do ep \ getEndpoint epptr; + y \ assert (\ (case ep of IdleEP \ True | _ \ False)); + ep' \ case remove1 t (epQueue ep) + of [] \ return Structures_H.endpoint.IdleEP + | x # xs \ return (epQueue_update (%_. x # xs) ep); + y \ setEndpoint epptr ep'; + setThreadState Inactive t + od \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (subst P) + apply (wp valid_irq_node_lift valid_global_refs_lift' + irqs_masked_lift sts_sch_act' + setThreadState_ct_not_inQ EPSCHN + hoare_vcg_all_lift getEndpoint_wp + | simp add: valid_tcb_state'_def split del: if_split + | wpc)+ + apply (clarsimp simp: pred_tcb_at' fun_upd_def[symmetric] conj_comms + split del: if_split cong: if_cong) + apply (rule conjI, clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def) + apply (rule conjI) + apply (clarsimp simp: obj_at'_def valid_ep'_def + dest!: pred_tcb_at') + apply (clarsimp, rule conjI) + apply (auto simp: pred_tcb_at'_def obj_at'_def)[1] + apply (rule conjI) + apply (clarsimp split: Structures_H.endpoint.split_asm list.split + simp: valid_ep'_def) + apply (rename_tac list x xs) + apply (frule distinct_remove1[where x=t]) + apply (cut_tac xs=list in set_remove1_subset[where x=t]) + apply auto[1] + apply (rename_tac list x xs) + apply (frule distinct_remove1[where x=t]) + apply (cut_tac xs=list in set_remove1_subset[where x=t]) + apply auto[1] + apply (thin_tac "sym_refs (state_hyp_refs_of' s)" for s) + apply (frule(1) sym_refs_ko_atD') + apply (rule conjI) + apply (clarsimp elim!: if_live_state_refsE split: Structures_H.endpoint.split_asm) + apply (drule st_tcb_at_state_refs_ofD') + apply (clarsimp simp: ep_redux_simps3 valid_ep'_def + split: Structures_H.endpoint.split_asm + cong: list.case_cong) + apply (frule_tac x=t in distinct_remove1) + apply (frule_tac x=t in set_remove1_eq) + by (auto elim!: delta_sym_refs + simp: symreftype_inverse' tcb_st_refs_of'_def tcb_bound_refs'_def + split: thread_state.splits if_split_asm) + have R: + "\invs' and tcb_at' t\ + do y \ threadSet (\tcb. tcb \ tcbFault := None \) t; + slot \ getThreadReplySlot t; + callerCap \ liftM (mdbNext \ cteMDBNode) (getCTE slot); + when (callerCap \ nullPointer) (do + y \ stateAssert (capHasProperty callerCap (\cap. isReplyCap cap + \ \ capReplyMaster cap)) + []; + cteDeleteOne callerCap + od) + od + \\rv. invs'\" + unfolding getThreadReplySlot_def + by (wp valid_irq_node_lift delete_one_invs hoare_drop_imps + threadSet_invs_trivial irqs_masked_lift + | simp add: o_def if_apply_def2 + | fastforce simp: inQ_def)+ + show ?thesis + apply (simp add: cancelIPC_def crunch_simps + cong: if_cong list.case_cong) + apply (rule bind_wp [OF _ gts_sp']) + apply (case_tac state, + simp_all add: isTS_defs) + apply (safe intro!: hoare_weaken_pre[OF Q] + hoare_weaken_pre[OF R] + hoare_weaken_pre[OF return_wp] + hoare_weaken_pre[OF cancelSignal_invs'] + elim!: pred_tcb'_weakenE) + apply (auto simp: pred_tcb_at'_def obj_at'_def + dest: invs_sch_act_wf') + done +qed + +lemma (in delete_one_conc_pre) cancelIPC_sch_act_simple[wp]: + "\sch_act_simple\ + cancelIPC t + \\rv. sch_act_simple\" + apply (simp add: cancelIPC_def cancelSignal_def Let_def + cong: if_cong Structures_H.thread_state.case_cong) + apply (wp hoare_drop_imps delete_one_sch_act_simple + | simp add: getThreadReplySlot_def | wpcw + | rule sch_act_simple_lift + | (rule_tac Q="\rv. sch_act_simple" in hoare_post_imp, simp))+ + done + +lemma cancelSignal_st_tcb_at: + assumes x[simp]: "P Inactive" shows + "\st_tcb_at' P t\ + cancelSignal t' ntfn + \\rv. st_tcb_at' P t\" + apply (simp add: cancelSignal_def Let_def list_case_If) + apply (wp sts_st_tcb_at'_cases hoare_vcg_const_imp_lift + hoare_drop_imp[where R="%rv s. P' rv" for P']) + apply clarsimp+ + done + +lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at: + assumes x[simp]: "\st. simple' st \ P st" shows + "\st_tcb_at' P t\ + cancelIPC t' + \\rv. st_tcb_at' P t\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def + cong: if_cong Structures_H.thread_state.case_cong) + apply (rule bind_wp [OF _ gts_sp']) + apply (case_tac rv, simp_all add: isTS_defs list_case_If) + apply (wp sts_st_tcb_at'_cases delete_one_st_tcb_at + threadSet_pred_tcb_no_state + cancelSignal_st_tcb_at hoare_drop_imps + | clarsimp simp: o_def if_fun_split)+ + done + +lemma weak_sch_act_wf_lift_linear: + "\ \t. \\s. sa s \ SwitchToThread t\ f \\rv s. sa s \ SwitchToThread t\; + \t. \st_tcb_at' runnable' t\ f \\rv. st_tcb_at' runnable' t\; + \t. \tcb_in_cur_domain' t\ f \\rv. tcb_in_cur_domain' t\ \ + \ \\s. weak_sch_act_wf (sa s) s\ f \\rv s. weak_sch_act_wf (sa s) s\" + apply (simp only: weak_sch_act_wf_def imp_conv_disj) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_conj_lift) + apply simp_all + done + +lemma sts_sch_act_not[wp]: + "\sch_act_not t\ setThreadState st t' \\rv. sch_act_not t\" + apply (simp add: setThreadState_def rescheduleRequired_def) + apply (wp hoare_drop_imps | simp | wpcw)+ + done + +crunches cancelSignal, setBoundNotification + for sch_act_not[wp]: "sch_act_not t" + (wp: crunch_wps) + +lemma cancelSignal_tcb_at_runnable': + "t \ t' \ + \st_tcb_at' runnable' t'\ cancelSignal t ntfnptr \\_. st_tcb_at' runnable' t'\" + unfolding cancelSignal_def + by (wpsimp wp: sts_pred_tcb_neq' hoare_drop_imp) + +lemma cancelAllIPC_tcb_at_runnable': + "\st_tcb_at' runnable' t\ cancelAllIPC epptr \\_. st_tcb_at' runnable' t\" + unfolding cancelAllIPC_def + by (wpsimp wp: mapM_x_wp' sts_st_tcb' hoare_drop_imp) + +lemma cancelAllSignals_tcb_at_runnable': + "\st_tcb_at' runnable' t\ cancelAllSignals ntfnptr \\_. st_tcb_at' runnable' t\" + unfolding cancelAllSignals_def + by (wpsimp wp: mapM_x_wp' sts_st_tcb' hoare_drop_imp) + +crunches unbindNotification, bindNotification, unbindMaybeNotification + for st_tcb_at'[wp]: "st_tcb_at' P p" + (wp: threadSet_pred_tcb_no_state ignore: threadSet) + +lemma (in delete_one_conc_pre) finaliseCap_tcb_at_runnable': + "\st_tcb_at' runnable' t\ finaliseCap cap final True \\_. st_tcb_at' runnable' t\" + apply (clarsimp simp add: finaliseCap_def Let_def) + apply (rule conjI | clarsimp | wp cancelAllIPC_tcb_at_runnable' getObject_ntfn_inv + cancelAllSignals_tcb_at_runnable' + | wpc)+ + done + +crunch pred_tcb_at'[wp]: isFinalCapability "pred_tcb_at' proj st t" + (simp: crunch_simps) + +lemma (in delete_one_conc_pre) cteDeleteOne_tcb_at_runnable': + "\st_tcb_at' runnable' t\ cteDeleteOne callerCap \\_. st_tcb_at' runnable' t\" + apply (simp add: cteDeleteOne_def unless_def) + apply (wp finaliseCap_tcb_at_runnable' hoare_drop_imps | clarsimp)+ + done + +crunches getThreadReplySlot, getEndpoint + for pred_tcb_at'[wp]: "pred_tcb_at' proj st t" + +lemma (in delete_one_conc_pre) cancelIPC_tcb_at_runnable': + "\st_tcb_at' runnable' t'\ cancelIPC t \\_. st_tcb_at' runnable' t'\" + (is "\?PRE\ _ \_\") + apply (clarsimp simp: cancelIPC_def Let_def) + apply (case_tac "t'=t") + apply (rule_tac Q'="\st. st_tcb_at' runnable' t and K (runnable' st)" in bind_wp) + apply (case_tac rv; simp) + apply (wp sts_pred_tcb_neq' | simp | wpc)+ + apply (clarsimp) + apply (rule_tac Q="\rv. ?PRE" in hoare_post_imp, fastforce) + apply (wp cteDeleteOne_tcb_at_runnable' + threadSet_pred_tcb_no_state + cancelSignal_tcb_at_runnable' + sts_pred_tcb_neq' hoare_drop_imps + | wpc | simp add: o_def if_fun_split)+ + done + +crunch ksCurDomain[wp]: cancelSignal "\s. P (ksCurDomain s)" + (wp: crunch_wps) + +lemma (in delete_one_conc_pre) cancelIPC_ksCurDomain[wp]: + "\\s. P (ksCurDomain s)\ cancelIPC t \\_ s. P (ksCurDomain s)\" + unfolding cancelIPC_def Let_def + by (wpsimp wp: hoare_vcg_conj_lift delete_one_ksCurDomain hoare_drop_imps + simp: getThreadReplySlot_def o_def if_fun_split) + +(* FIXME move *) +lemma setBoundNotification_not_ntfn: + "(\tcb ntfn. P (tcb\tcbBoundNotification := ntfn\) \ P tcb) + \ \obj_at' P t'\ setBoundNotification ntfn t \\_. obj_at' P t'\" + apply (simp add: setBoundNotification_def) + apply (wp hoare_vcg_conj_lift + | wpc + | rule hoare_drop_imps + | simp)+ + done + +lemma setBoundNotification_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ setBoundNotification st t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply wps + apply (wp setBoundNotification_not_ntfn | simp)+ + done + +lemma setThreadState_tcbDomain_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding setThreadState_def + by wpsimp + +crunches cancelSignal + for tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + (wp: crunch_wps) + +lemma (in delete_one_conc_pre) cancelIPC_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelIPC t \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: cancelIPC_def Let_def) + apply (wp hoare_vcg_conj_lift + delete_one_tcbDomain_obj_at' + | wpc + | rule hoare_drop_imps + | simp add: getThreadReplySlot_def o_def if_fun_split)+ + done + +lemma (in delete_one_conc_pre) cancelIPC_tcb_in_cur_domain': + "\tcb_in_cur_domain' t'\ cancelIPC t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply wps + apply (wp cancelIPC_tcbDomain_obj_at' | simp)+ + done + +lemma (in delete_one_conc_pre) cancelIPC_sch_act_not: + "\sch_act_not t'\ cancelIPC t \\_. sch_act_not t'\" + apply (simp add: cancelIPC_def Let_def) + apply (wp hoare_vcg_conj_lift + delete_one_sch_act_not + | wpc + | simp add: getThreadReplySlot_def o_def if_apply_def2 + split del: if_split + | rule hoare_drop_imps)+ + done + +lemma (in delete_one_conc_pre) cancelIPC_weak_sch_act_wf: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cancelIPC t + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (rule weak_sch_act_wf_lift_linear) + apply (wp cancelIPC_sch_act_not cancelIPC_tcb_in_cur_domain' cancelIPC_tcb_at_runnable')+ + done + +text \The suspend operation, significant as called from delete\ + +lemma rescheduleRequired_weak_sch_act_wf: + "\\\ rescheduleRequired \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: rescheduleRequired_def setSchedulerAction_def) + apply (wp hoare_TrueI | simp add: weak_sch_act_wf_def)+ + done + +lemma sts_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s + \ (ksSchedulerAction s = SwitchToThread t \ runnable' st)\ + setThreadState st t + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + including classic_wp_pre + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_weak_sch_act_wf) + apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp, simp) + apply (simp add: weak_sch_act_wf_def) + apply (wp hoare_vcg_all_lift) + apply (wps threadSet_nosch) + apply (wp hoare_vcg_const_imp_lift threadSet_pred_tcb_at_state threadSet_tcbDomain_triv | simp)+ + done + +lemma sbn_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ setBoundNotification ntfn t \\rv s. P (ksSchedulerAction s)\" + by (simp add: setBoundNotification_def, wp threadSet_nosch) + + +lemma sbn_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setBoundNotification ntfn t + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (wp weak_sch_act_wf_lift sbn_st_tcb') + + +lemma set_ep_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setEndpoint epptr ep + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (wp weak_sch_act_wf_lift) + done + +lemma setObject_ntfn_sa_unchanged[wp]: + "\\s. P (ksSchedulerAction s)\ + setObject ptr (ntfn::Structures_H.notification) + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setObject_def split_def) + apply (wp | simp add: updateObject_default_def)+ + done + +lemma setObject_oa_unchanged[wp]: + "\\s. obj_at' (\tcb::tcb. P tcb) t s\ + setObject ptr (ntfn::Structures_H.notification) + \\rv s. obj_at' P t s\" + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_type + updateObject_default_def + in_monad) + done + +lemma setNotification_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setNotification ntfnptr ntfn + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (wp hoare_vcg_all_lift hoare_convert_imp hoare_vcg_conj_lift + | simp add: setNotification_def weak_sch_act_wf_def st_tcb_at'_def tcb_in_cur_domain'_def)+ + apply (rule hoare_pre) + apply (wps setObject_ntfn_cur_domain) + apply (wp setObject_ntfn_obj_at'_tcb | simp add: o_def)+ + done + +lemmas ipccancel_weak_sch_act_wfs + = weak_sch_act_wf_lift[OF _ setCTE_pred_tcb_at'] + +lemma updateObject_ep_inv: + "\P\ updateObject (obj::endpoint) ko p q n \\rv. P\" + by simp (rule updateObject_default_inv) + +lemma asUser_tcbQueued_inv[wp]: + "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ + done + +context begin interpretation Arch . + +crunches cancel_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_state \ _" + (simp: crunch_simps wp: crunch_wps) + +end + +crunches asUser + for valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps) + +crunches set_thread_state + for in_correct_ready_q[wp]: in_correct_ready_q + (wp: crunch_wps) + +crunches set_thread_state_ext + for ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps) + +lemma set_thread_state_ready_qs_distinct[wp]: + "set_thread_state ref ts \ready_qs_distinct\" + unfolding set_thread_state_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) + +lemma as_user_ready_qs_distinct[wp]: + "as_user tptr f \ready_qs_distinct\" + unfolding as_user_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) + +lemma (in delete_one) suspend_corres: + "corres dc (einvs and tcb_at t) invs' + (IpcCancel_A.suspend t) (ThreadDecls_H.suspend t)" + apply (rule corres_cross_over_guard[where P'=P' and Q="tcb_at' t and P'" for P']) + apply (fastforce dest!: tcb_at_cross state_relation_pspace_relation) + apply (simp add: IpcCancel_A.suspend_def Thread_H.suspend_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF cancel_ipc_corres]) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule corres_split_nor) + apply (rule corres_if) + apply (case_tac state; simp) + apply (simp add: update_restart_pc_def updateRestartPC_def) + apply (rule asUser_corres') + apply (simp add: AARCH64.nextInstructionRegister_def AARCH64.faultRegister_def + AARCH64_H.nextInstructionRegister_def AARCH64_H.faultRegister_def) + apply (simp add: AARCH64_H.Register_def) + apply (subst unit_dc_is_eq) + apply (rule corres_underlying_trivial) + apply (wpsimp simp: AARCH64.setRegister_def AARCH64.getRegister_def) + apply (rule corres_return_trivial) + apply (rule corres_split_nor[OF setThreadState_corres]) + apply wpsimp + apply (rule tcbSchedDequeue_corres, simp) + apply wp + apply (wpsimp wp: sts_valid_objs') + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def valid_tcb_state'_def)+ + apply (rule hoare_post_imp[where Q = "\rv s. einvs s \ tcb_at t s"]) + apply (simp add: invs_implies invs_strgs valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct valid_sched_def) + apply wp + apply (rule hoare_post_imp[where Q = "\_ s. invs' s \ tcb_at' t s"]) + apply (fastforce simp: invs'_def valid_tcb_state'_def) + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ + apply fastforce+ + done + +context begin interpretation Arch . + +lemma archThreadGet_corres: + "(\a a'. arch_tcb_relation a a' \ f a = f' a') \ + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_get f t) (archThreadGet f' t)" + unfolding arch_thread_get_def archThreadGet_def + apply (corresKsimp corres: getObject_TCB_corres) + apply (clarsimp simp: tcb_relation_def) + done + +lemma tcb_vcpu_relation: + "arch_tcb_relation a a' \ tcb_vcpu a = atcbVCPUPtr a'" + unfolding arch_tcb_relation_def by auto + +lemma archThreadGet_VCPU_corres[corres]: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_get tcb_vcpu t) (archThreadGet atcbVCPUPtr t)" + by (rule archThreadGet_corres) (erule tcb_vcpu_relation) + +lemma when_fail_assert: + "when P fail = assert (\P)" + by (simp add: when_def assert_def) + +lemma opt_case_when: + "(case x of None \ return () | Some (c, _) \ when (c = v) f) = + when (\a. x = Some (v, a)) f" + by (cases x) (auto simp add: split_def) + +lemma corres_gets_current_vcpu[corres]: + "corres (=) \ \ (gets (arm_current_vcpu \ arch_state)) + (gets (armHSCurVCPU \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma vcpuInvalidateActive_corres[corres]: + "corres dc \ no_0_obj' vcpu_invalidate_active vcpuInvalidateActive" + unfolding vcpuInvalidateActive_def vcpu_invalidate_active_def + apply (corresKsimp corres: vcpuDisable_corres + corresK: corresK_modifyT + simp: modifyArchState_def) + apply (clarsimp simp: state_relation_def arch_state_relation_def) + done + +lemma tcb_ko_at': + "tcb_at' t s \ \ta::tcb. ko_at' ta t s" + by (clarsimp simp: obj_at'_def) + +lemma archThreadSet_corres: + assumes "\a a'. arch_tcb_relation a a' \ arch_tcb_relation (f a) (f' a')" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set f t) (archThreadSet f' t)" +proof - + from assms + have tcb_rel: + "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation (tcb\tcb_arch := f (tcb_arch tcb)\) + (tcbArch_update (\_. f' (tcbArch tcb')) tcb')" + by (simp add: tcb_relation_def) + show ?thesis + unfolding arch_thread_set_def archThreadSet_def + by (corres' \(rotate_tac, erule tcb_rel) | + (rule ball_tcb_cte_casesI; simp) | + simp add: exst_same_def tcb_cap_cases_def\ + corres: getObject_TCB_corres setObject_update_TCB_corres') +qed + +lemma archThreadSet_VCPU_None_corres[corres]: + "t = t' \ corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set (tcb_vcpu_update Map.empty) t) (archThreadSet (atcbVCPUPtr_update Map.empty) t')" + apply simp + apply (rule archThreadSet_corres) + apply (simp add: arch_tcb_relation_def) + done + +lemmas corresK_as_user' = + asUser_corres'[atomized, THEN corresK_lift_rule, THEN mp] + +crunch typ_at'[wp]: vcpuInvalidateActive "\s. P (typ_at' T p s)" + +lemma getVCPU_wp: + "\\s. \ko. ko_at' (ko::vcpu) p s \ Q ko s\ getObject p \Q\" + by (clarsimp simp: getObject_def split_def loadObject_default_def + in_magnitude_check pageBits_def vcpuBits_def + in_monad valid_def obj_at'_def objBits_simps) + +lemma imp_drop_strg: + "Q \ P \ Q" + by simp + +lemma dissociateVCPUTCB_corres[corres]: + "\ v' = v; t' = t \ \ + corres dc (obj_at (\ko. \tcb. ko = TCB tcb \ tcb_vcpu (tcb_arch tcb) = Some v) t and + obj_at (\ko. \vcpu. ko = ArchObj (VCPU vcpu) \ vcpu_tcb vcpu = Some t) v and + pspace_aligned and pspace_distinct) + (no_0_obj') + (dissociate_vcpu_tcb v t) (dissociateVCPUTCB v' t')" + unfolding dissociate_vcpu_tcb_def dissociateVCPUTCB_def sanitiseRegister_def sanitise_register_def + apply (clarsimp simp: bind_assoc when_fail_assert opt_case_when) + apply (corres corres: getObject_vcpu_corres setObject_VCPU_corres asUser_corres' + simp: vcpu_relation_def archThreadSet_def tcb_ko_at' tcb_at_typ_at') + apply (wpsimp simp: tcb_at_typ_at' archThreadGet_def + wp: get_vcpu_wp getVCPU_wp arch_thread_get_wp getObject_tcb_wp)+ + apply (clarsimp simp: obj_at_def is_tcb in_omonad) + apply normalise_obj_at' + apply (rule context_conjI) + apply (rule vcpu_at_cross; assumption?) + apply (clarsimp simp: obj_at_def) + apply (clarsimp simp: obj_at_def) + apply (rename_tac tcb vcpu) + apply (prop_tac "ko_at (TCB tcb) t s", clarsimp simp: obj_at_def) + apply (drule (3) ko_tcb_cross) + apply (prop_tac "ako_at (VCPU vcpu) v s", clarsimp simp: obj_at_def) + apply (drule (3) ko_vcpu_cross) + apply normalise_obj_at' + apply (clarsimp simp: tcb_relation_def arch_tcb_relation_def vcpu_relation_def) + done + +lemma sym_refs_tcb_vcpu: + "\ ko_at (TCB tcb) t s; tcb_vcpu (tcb_arch tcb) = Some v; sym_refs (state_hyp_refs_of s) \ \ + \vcpu. ko_at (ArchObj (VCPU vcpu)) v s \ vcpu_tcb vcpu = Some t" + apply (drule (1) hyp_sym_refs_obj_atD) + apply (clarsimp simp: obj_at_def hyp_refs_of_def) + apply (rename_tac ko) + apply (case_tac ko; simp add: tcb_vcpu_refs_def split: option.splits) + apply (rename_tac koa) + apply (case_tac koa; simp add: vcpu_tcb_refs_def split: option.splits) + done + +lemma fpuThreadDelete_corres[corres]: + "t' = t \ corres dc \ \ (fpu_thread_delete t) (fpuThreadDelete t')" + by (corres simp: fpu_thread_delete_def fpuThreadDelete_def) + +crunches fpu_thread_delete + for aligned[wp]: pspace_aligned + and distinct[wp]: pspace_distinct + and obj_at[wp]: "\s. P (obj_at Q p s)" + +crunches fpuThreadDelete + for obj_at'[wp]: "\s. P (obj_at' Q p s)" + and no_0_obj'[wp]: no_0_obj' + +lemma prepareThreadDelete_corres[corres]: + "t' = t \ + corres dc (invs and tcb_at t) no_0_obj' + (prepare_thread_delete t) (prepareThreadDelete t')" + apply (simp add: prepare_thread_delete_def prepareThreadDelete_def) + apply (corres corres: archThreadGet_corres + wp: arch_thread_get_wp getObject_tcb_wp hoare_vcg_op_lift + simp: archThreadGet_def + | corres_cases_both)+ + apply (fastforce dest: sym_refs_tcb_vcpu simp: obj_at_def) + apply (clarsimp simp: tcb_ko_at') + done + +end + +lemma no_refs_simple_strg': + "st_tcb_at' simple' t s' \ P {} \ st_tcb_at' (\st. P (tcb_st_refs_of' st)) t s'" + by (fastforce elim!: pred_tcb'_weakenE)+ + +crunch it[wp]: cancelSignal "\s. P (ksIdleThread s)" + (wp: crunch_wps simp: crunch_simps) + +lemma (in delete_one_conc_pre) cancelIPC_it[wp]: + "\\s. P (ksIdleThread s)\ + cancelIPC t + \\_ s. P (ksIdleThread s)\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) + apply (wp hoare_drop_imps delete_one_it | wpc | simp add:if_apply_def2 Fun.comp_def)+ + done + +crunch ct_idle_or_in_cur_domain'[wp]: tcbSchedDequeue ct_idle_or_in_cur_domain' + (wp: crunch_wps) + +lemma asUser_sch_act_simple[wp]: + "\sch_act_simple\ asUser s t \\_. sch_act_simple\" + unfolding sch_act_simple_def + apply (rule asUser_nosch) + done + +lemma (in delete_one_conc) suspend_invs'[wp]: + "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ + ThreadDecls_H.suspend t \\rv. invs'\" + apply (simp add: suspend_def) + apply (wpsimp wp: sts_invs_minor' gts_wp' simp: updateRestartPC_def + | strengthen no_refs_simple_strg')+ + apply (rule_tac Q="\_. invs' and sch_act_simple and st_tcb_at' simple' t + and (\s. t \ ksIdleThread s)" + in hoare_post_imp) + apply clarsimp + apply wpsimp + apply (fastforce elim: pred_tcb'_weakenE) + done + +lemma (in delete_one_conc_pre) suspend_tcb'[wp]: + "\tcb_at' t'\ ThreadDecls_H.suspend t \\rv. tcb_at' t'\" + apply (simp add: suspend_def) + apply (wpsimp simp: updateRestartPC_def) + done + +lemma (in delete_one_conc_pre) suspend_sch_act_simple[wp]: + "\sch_act_simple\ + ThreadDecls_H.suspend t \\rv. sch_act_simple\" + apply (simp add: suspend_def when_def updateRestartPC_def) + apply (wp cancelIPC_sch_act_simple | simp add: unless_def + | rule sch_act_simple_lift)+ + apply (simp add: updateRestartPC_def) + apply (rule asUser_nosch) + apply wpsimp+ + done + +lemma (in delete_one_conc) suspend_objs': + "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ + suspend t \\rv. valid_objs'\" + apply (rule_tac Q="\_. invs'" in hoare_strengthen_post) + apply (wp suspend_invs') + apply fastforce + done + +lemma (in delete_one_conc_pre) suspend_st_tcb_at': + assumes x[simp]: "\st. simple' st \ P st" shows + "\st_tcb_at' P t\ + suspend thread + \\rv. st_tcb_at' P t\" + apply (simp add: suspend_def) + unfolding updateRestartPC_def + apply (wp sts_st_tcb_at'_cases threadSet_pred_tcb_no_state + cancelIPC_st_tcb_at hoare_drop_imps + | simp)+ + apply clarsimp + done + +lemmas (in delete_one_conc_pre) suspend_makes_simple' = + suspend_st_tcb_at' [where P=simple', simplified] + +lemma suspend_makes_inactive: + "\K (t = t')\ suspend t \\rv. st_tcb_at' ((=) Inactive) t'\" + apply (cases "t = t'", simp_all) + apply (simp add: suspend_def unless_def) + apply (wp threadSet_pred_tcb_no_state setThreadState_st_tcb | simp)+ + done + +declare threadSet_sch_act_sane [wp] +declare sts_sch_act_sane [wp] + +lemma tcbSchedEnqueue_sch_act_not_ct[wp]: + "\\s. sch_act_not (ksCurThread s) s\ tcbSchedEnqueue t \\_ s. sch_act_not (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps, wp, simp) + +lemma sts_sch_act_not_ct[wp]: + "\\s. sch_act_not (ksCurThread s) s\ + setThreadState st t \\_ s. sch_act_not (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps, wp, simp) + +text \Cancelling all IPC in an endpoint or notification object\ + +lemma ep_cancel_corres_helper: + "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs and valid_queues + and pspace_aligned and pspace_distinct) + (valid_objs' and sym_heap_sched_pointers and valid_sched_pointers) + (mapM_x (\t. do + y \ set_thread_state t Structures_A.Restart; + tcb_sched_action tcb_sched_enqueue t + od) list) + (mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) list)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule_tac S="{t. (fst t = snd t) \ fst t \ set list}" + in corres_mapM_x) + apply clarsimp + apply (rule corres_guard_imp) + apply (subst bind_return_unit, rule corres_split[OF _ tcbSchedEnqueue_corres]) + apply simp + apply (rule corres_guard_imp [OF setThreadState_corres]) + apply simp + apply (simp add: valid_tcb_state_def) + apply simp + apply simp + apply (wpsimp wp: sts_st_tcb_at') + apply (wpsimp wp: sts_valid_objs' | strengthen valid_objs'_valid_tcbs')+ + apply fastforce + apply (wpsimp wp: hoare_vcg_const_Ball_lift set_thread_state_runnable_valid_queues + sts_st_tcb_at' sts_valid_objs' + simp: valid_tcb_state'_def)+ + done + +crunches set_simple_ko + for ready_qs_distinct[wp]: ready_qs_distinct + and in_correct_ready_q[wp]: in_correct_ready_q + (rule: ready_qs_distinct_lift wp: crunch_wps) + +lemma ep_cancel_corres: + "corres dc (invs and valid_sched and ep_at ep) (invs' and ep_at' ep) + (cancel_all_ipc ep) (cancelAllIPC ep)" +proof - + have P: + "\list. + corres dc (\s. (\t \ set list. tcb_at t s) \ valid_pspace s \ ep_at ep s + \ valid_etcbs s \ weak_valid_sched_action s \ valid_queues s) + (\s. (\t \ set list. tcb_at' t s) \ valid_pspace' s + \ ep_at' ep s \ weak_sch_act_wf (ksSchedulerAction s) s + \ valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s) + (do x \ set_endpoint ep Structures_A.IdleEP; + x \ mapM_x (\t. do + y \ set_thread_state t Structures_A.Restart; + tcb_sched_action tcb_sched_enqueue t + od) list; + reschedule_required + od) + (do x \ setEndpoint ep IdleEP; + x \ mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) list; + rescheduleRequired + od)" + apply (rule corres_underlying_split) + apply (rule corres_guard_imp [OF setEndpoint_corres]) + apply (simp add: ep_relation_def)+ + apply (rule corres_split[OF _ rescheduleRequired_corres]) + apply (rule ep_cancel_corres_helper) + apply (rule mapM_x_wp') + apply (wp weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ + apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_add) + apply (rule mapM_x_wp') + apply ((wpsimp wp: hoare_vcg_const_Ball_lift mapM_x_wp' sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[3] + apply fastforce + apply (wp hoare_vcg_const_Ball_lift set_ep_valid_objs' + | (clarsimp simp: valid_ep'_def) + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def + | strengthen valid_objs'_valid_tcbs'))+ + done + + show ?thesis + apply (simp add: cancel_all_ipc_def cancelAllIPC_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ep_sp']) + apply (rule corres_guard_imp [OF getEndpoint_corres], simp+) + apply (case_tac epa, simp_all add: ep_relation_def + get_ep_queue_def) + apply (rule corres_guard_imp [OF P] + | clarsimp simp: valid_obj_def valid_ep_def + valid_obj'_def valid_ep'_def + invs_valid_pspace + valid_sched_def valid_sched_action_def + | erule obj_at_valid_objsE + | drule ko_at_valid_objs' + | rule conjI | clarsimp simp: invs'_def valid_state'_def)+ + done +qed + +(* FIXME move *) +lemma set_ntfn_tcb_obj_at' [wp]: + "\obj_at' (P::tcb \ bool) t\ + setNotification ntfn v + \\_. obj_at' P t\" + apply (clarsimp simp: setNotification_def, wp) + done + +lemma cancelAllSignals_corres: + "corres dc (invs and valid_sched and ntfn_at ntfn) (invs' and ntfn_at' ntfn) + (cancel_all_signals ntfn) (cancelAllSignals ntfn)" + apply (simp add: cancel_all_signals_def cancelAllSignals_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ntfn_sp']) + apply (rule corres_guard_imp [OF getNotification_corres]) + apply simp+ + apply (case_tac "ntfn_obj ntfna", simp_all add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def) + apply (rule corres_split[OF _ rescheduleRequired_corres]) + apply (rule ep_cancel_corres_helper) + apply (wp mapM_x_wp'[where 'b="det_ext state"] + weak_sch_act_wf_lift_linear + set_thread_state_runnable_weak_valid_sched_action + | simp)+ + apply (rename_tac list) + apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_objs' s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_add) + apply (rule mapM_x_wp') + apply (rule hoare_name_pre_state) + apply (wpsimp wp: hoare_vcg_const_Ball_lift sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+ + apply (wp hoare_vcg_const_Ball_lift set_ntfn_aligned' set_ntfn_valid_objs' + weak_sch_act_wf_lift_linear + | simp)+ + apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def + invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def valid_sched_def + valid_sched_action_def valid_obj'_def + | erule obj_at_valid_objsE | drule ko_at_valid_objs' + | fastforce)+ + done + +lemma ep'_Idle_case_helper: + "(case ep of IdleEP \ a | _ \ b) = (if (ep = IdleEP) then a else b)" + by (cases ep, simp_all) + +lemma rescheduleRequired_notresume: + "\\s. ksSchedulerAction s \ ResumeCurrentThread\ + rescheduleRequired \\_ s. ksSchedulerAction s = ChooseNewThread\" + proof - + have ssa: "\\\ setSchedulerAction ChooseNewThread + \\_ s. ksSchedulerAction s = ChooseNewThread\" + by (simp add: setSchedulerAction_def | wp)+ + show ?thesis + by (simp add: rescheduleRequired_def, wp ssa) + qed + +lemma setThreadState_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + setThreadState st t + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + (is "\?PRE\ _ \_\") +proof - + have nrct: + "\\s. ksSchedulerAction s \ ResumeCurrentThread\ + rescheduleRequired + \\_ s. ksSchedulerAction s \ ResumeCurrentThread\" + by (rule hoare_strengthen_post [OF rescheduleRequired_notresume], simp) + show ?thesis + apply (simp add: setThreadState_def) + apply (wpsimp wp: hoare_vcg_imp_lift [OF nrct]) + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp) + apply (clarsimp) + apply (rule hoare_convert_imp [OF threadSet_nosch threadSet_ct]) + apply assumption + done +qed + +lemma tcbSchedEnqueue_valid_pspace'[wp]: + "tcbSchedEnqueue tcbPtr \valid_pspace'\" + unfolding valid_pspace'_def + by wpsimp + +lemma cancel_all_invs'_helper: + "\all_invs_but_sym_refs_ct_not_inQ' and (\s. \x \ set q. tcb_at' x s) + and (\s. sym_refs (\x. if x \ set q then {r \ state_refs_of' s x. snd r = TCBBound} + else state_refs_of' s x) + \ sym_refs (\x. state_hyp_refs_of' s x) + \ (\x \ set q. ex_nonz_cap_to' x s))\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\rv. all_invs_but_ct_not_inQ'\" + apply (rule mapM_x_inv_wp2) + apply clarsimp + apply (rule hoare_pre) + apply (wp valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + hoare_vcg_const_Ball_lift untyped_ranges_zero_lift sts_st_tcb' sts_valid_objs' + | simp add: cteCaps_of_def o_def)+ + apply (unfold fun_upd_apply Invariants_H.tcb_st_refs_of'_simps) + apply clarsimp + apply (intro conjI) + apply (clarsimp simp: valid_tcb_state'_def global'_no_ex_cap + elim!: rsubst[where P=sym_refs] + dest!: set_mono_suffix + intro!: ext + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def))+ + done + +lemma ep_q_refs_max: + "\ ko_at' r p s; sym_refs (state_refs_of' s); r \ IdleEP \ + \ (state_refs_of' s p \ (set (epQueue r) \ {EPSend, EPRecv})) + \ (\x\set (epQueue r). \ntfnptr. state_refs_of' s x \ + {(p, TCBBlockedSend), (p, TCBBlockedRecv), (ntfnptr, TCBBound)})" + apply (frule(1) sym_refs_ko_atD') + apply (drule ko_at_state_refs_ofD') + apply (case_tac r) + apply (clarsimp simp: st_tcb_at_refs_of_rev' tcb_bound_refs'_def + | rule conjI | drule(1) bspec | drule st_tcb_at_state_refs_ofD' + | case_tac ntfnptr)+ + done + +lemma rescheduleRequired_invs'[wp]: + "rescheduleRequired \invs'\" + apply (simp add: rescheduleRequired_def) + apply (wpsimp wp: ssa_invs') + done + +lemma invs_rct_ct_activatable': + "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ + \ st_tcb_at' activatable' (ksCurThread s) s" + by (simp add: invs'_def valid_state'_def ct_in_state'_def) + +lemma not_in_epQueue: + assumes ko_at: "ko_at' r ep_ptr s" and + srefs: "sym_refs (state_refs_of' s)" and + nidle: "r \ IdleEP" and + st_act: "st_tcb_at' simple' t s" + shows "t \ set (epQueue r)" +proof + assume t_epQ: "t \ set (epQueue r)" + + with ko_at nidle + have "(t, EPRecv) \ state_refs_of' s ep_ptr + \ (t, EPSend) \ state_refs_of' s ep_ptr" + by - (drule ko_at_state_refs_ofD', case_tac r, (clarsimp)+) + + with ko_at srefs + have "(ep_ptr, TCBBlockedRecv) \ state_refs_of' s t + \ (ep_ptr, TCBBlockedSend) \ state_refs_of' s t" + apply - + apply (frule(1) sym_refs_ko_atD') + apply (drule ko_at_state_refs_ofD') + apply (case_tac r) + apply (clarsimp simp: st_tcb_at_refs_of_rev' + | drule(1) bspec | drule st_tcb_at_state_refs_ofD')+ + done + + with ko_at have "st_tcb_at' (Not \ simple') t s" + apply - + apply (erule disjE) + apply (drule state_refs_of'_elemD) + apply (simp add: st_tcb_at_refs_of_rev') + apply (erule pred_tcb'_weakenE) + apply (clarsimp) + apply (drule state_refs_of'_elemD) + apply (simp add: st_tcb_at_refs_of_rev') + apply (erule pred_tcb'_weakenE) + apply (clarsimp) + done + + with st_act show False + by (rule pred_tcb'_neq_contra) simp +qed + +lemma ct_not_in_epQueue: + assumes "ko_at' r ep_ptr s" and + "sym_refs (state_refs_of' s)" and + "r \ IdleEP" and + "ct_in_state' simple' s" + shows "ksCurThread s \ set (epQueue r)" + using assms unfolding ct_in_state'_def + by (rule not_in_epQueue) + +lemma not_in_ntfnQueue: + assumes ko_at: "ko_at' r ntfn_ptr s" and + srefs: "sym_refs (state_refs_of' s)" and + nidle: "ntfnObj r \ IdleNtfn \ (\b m. ntfnObj r \ ActiveNtfn b)" and + st_act: "st_tcb_at' simple' t s" + shows "t \ set (ntfnQueue (ntfnObj r))" +proof + assume t_epQ: "t \ set (ntfnQueue (ntfnObj r))" + + with ko_at nidle + have "(t, NTFNSignal) \ state_refs_of' s ntfn_ptr" + by - (drule ko_at_state_refs_ofD', case_tac "ntfnObj r", (clarsimp)+) + with ko_at srefs + have "(ntfn_ptr, TCBSignal) \ state_refs_of' s t" + apply - + apply (frule(1) sym_refs_ko_atD') + apply (drule ko_at_state_refs_ofD') + apply (case_tac "ntfnObj r") + apply (clarsimp simp: st_tcb_at_refs_of_rev' ntfn_bound_refs'_def + | drule st_tcb_at_state_refs_ofD')+ + apply (drule_tac x="(t, NTFNSignal)" in bspec, clarsimp) + apply (clarsimp simp: st_tcb_at_refs_of_rev' dest!: st_tcb_at_state_refs_ofD') + done + + with ko_at have "st_tcb_at' (Not \ simple') t s" + apply - + apply (drule state_refs_of'_elemD) + apply (simp add: st_tcb_at_refs_of_rev') + apply (erule pred_tcb'_weakenE) + apply (clarsimp) + done + + with st_act show False + by (rule pred_tcb'_neq_contra) simp +qed + +lemma ct_not_in_ntfnQueue: + assumes ko_at: "ko_at' r ntfn_ptr s" and + srefs: "sym_refs (state_refs_of' s)" and + nidle: "ntfnObj r \ IdleNtfn \ (\b m. ntfnObj r \ ActiveNtfn b)" and + st_act: "ct_in_state' simple' s" + shows "ksCurThread s \ set (ntfnQueue (ntfnObj r))" + using assms unfolding ct_in_state'_def + by (rule not_in_ntfnQueue) + +crunch valid_pspace'[wp]: rescheduleRequired "valid_pspace'" +crunch valid_global_refs'[wp]: rescheduleRequired "valid_global_refs'" +crunch valid_machine_state'[wp]: rescheduleRequired "valid_machine_state'" + +lemma sch_act_wf_weak[elim!]: + "sch_act_wf sa s \ weak_sch_act_wf sa s" + by (case_tac sa, (simp add: weak_sch_act_wf_def)+) + +lemma rescheduleRequired_all_invs_but_ct_not_inQ: + "\all_invs_but_ct_not_inQ'\ rescheduleRequired \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp rescheduleRequired_ct_not_inQ + valid_irq_node_lift valid_irq_handlers_lift'' + irqs_masked_lift cur_tcb_lift + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def)+ + apply (auto simp: sch_act_wf_weak) + done + +lemma cancelAllIPC_invs'[wp]: + "\invs'\ cancelAllIPC ep_ptr \\rv. invs'\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (wp rescheduleRequired_all_invs_but_ct_not_inQ + cancel_all_invs'_helper hoare_vcg_const_Ball_lift + valid_global_refs_lift' valid_arch_state_lift' + valid_irq_node_lift ssa_invs' sts_sch_act' + irqs_masked_lift + | simp only: sch_act_wf.simps forM_x_def | simp)+ + prefer 2 + apply assumption + apply (rule hoare_strengthen_post [OF get_ep_sp']) + apply (rename_tac rv s) + apply (clarsimp simp: invs'_def valid_state'_def valid_ep'_def) + apply (frule obj_at_valid_objs', fastforce) + apply (clarsimp simp: valid_obj'_def) + apply (rule conjI) + apply (case_tac rv, simp_all add: valid_ep'_def)[1] + apply (rule conjI[rotated]) + apply (drule(1) sym_refs_ko_atD') + apply (case_tac rv, simp_all add: st_tcb_at_refs_of_rev')[1] + apply (clarsimp elim!: if_live_state_refsE + | drule(1) bspec | drule st_tcb_at_state_refs_ofD')+ + apply (drule(2) ep_q_refs_max) + apply (erule delta_sym_refs) + apply (clarsimp dest!: symreftype_inverse' split: if_split_asm | drule(1) bspec subsetD)+ + done + +lemma cancelAllSignals_invs'[wp]: + "\invs'\ cancelAllSignals ntfn \\rv. invs'\" + apply (simp add: cancelAllSignals_def) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfna", simp_all) + apply (wp, simp) + apply (wp, simp) + apply (rule hoare_pre) + apply (wp rescheduleRequired_all_invs_but_ct_not_inQ + cancel_all_invs'_helper hoare_vcg_const_Ball_lift + valid_irq_node_lift ssa_invs' irqs_masked_lift + | simp only: sch_act_wf.simps)+ + apply (clarsimp simp: invs'_def valid_state'_def valid_ntfn'_def) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + apply (drule(1) sym_refs_ko_atD') + apply (rule conjI, clarsimp elim!: if_live_state_refsE) + apply (rule conjI[rotated]) + apply (clarsimp elim!: if_live_state_refsE) + apply (drule_tac x="(x, NTFNSignal)" in bspec) + apply (clarsimp simp: st_tcb_at_refs_of_rev')+ + apply (drule st_tcb_at_state_refs_ofD') + apply clarsimp + apply (erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def) + apply (drule_tac x="(x, NTFNSignal)" in bspec) + apply (clarsimp simp: st_tcb_at_refs_of_rev')+ + apply (drule st_tcb_at_state_refs_ofD') + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def) + done + +lemma cancelAllIPC_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllIPC ep \\rv. valid_objs'\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (rule hoare_pre) + apply (wp set_ep_valid_objs' setSchedulerAction_valid_objs') + apply (rule_tac Q="\_ s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ (\x\set (epQueue ep). tcb_at' x s)" + in hoare_post_imp) + apply simp + apply (simp add: Ball_def) + apply (wp mapM_x_wp' sts_valid_objs' + hoare_vcg_all_lift hoare_vcg_const_imp_lift)+ + apply simp + apply (simp add: valid_tcb_state'_def) + apply (wp set_ep_valid_objs' hoare_vcg_all_lift hoare_vcg_const_imp_lift) + apply (clarsimp) + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (case_tac epa, simp_all) + done + +lemma cancelAllSignals_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllSignals ntfn \\rv. valid_objs'\" + apply (simp add: cancelAllSignals_def) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfna", simp_all) + apply (wp, simp) + apply (wp, simp) + apply (rename_tac list) + apply (rule_tac Q="\rv s. valid_objs' s \ (\x\set list. tcb_at' x s)" + in hoare_post_imp) + apply (simp add: valid_ntfn'_def) + apply (simp add: Ball_def) + apply (wp setSchedulerAction_valid_objs' mapM_x_wp' + sts_valid_objs' hoare_vcg_all_lift hoare_vcg_const_imp_lift + | simp)+ + apply (simp add: valid_tcb_state'_def) + apply (wp set_ntfn_valid_objs' hoare_vcg_all_lift hoare_vcg_const_imp_lift) + apply clarsimp + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + done + +lemma cancelAllIPC_st_tcb_at: + assumes x[simp]: "P Restart" shows + "\st_tcb_at' P t\ cancelAllIPC epptr \\rv. st_tcb_at' P t\" + unfolding cancelAllIPC_def + by (wp ep'_cases_weak_wp mapM_x_wp' sts_st_tcb_at'_cases | clarsimp)+ + +lemmas cancelAllIPC_makes_simple[wp] = + cancelAllIPC_st_tcb_at [where P=simple', simplified] + +lemma cancelAllSignals_st_tcb_at: + assumes x[simp]: "P Restart" shows + "\st_tcb_at' P t\ cancelAllSignals epptr \\rv. st_tcb_at' P t\" + unfolding cancelAllSignals_def + by (wp ntfn'_cases_weak_wp mapM_x_wp' sts_st_tcb_at'_cases | clarsimp)+ + +lemmas cancelAllSignals_makes_simple[wp] = + cancelAllSignals_st_tcb_at [where P=simple', simplified] + +lemma threadSet_not_tcb[wp]: + "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ + threadSet f t + \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" + by (clarsimp simp: threadSet_def valid_def getObject_def + setObject_def in_monad loadObject_default_def + ko_wp_at'_def split_def in_magnitude_check + objBits_simps' updateObject_default_def + ps_clear_upd projectKO_opt_tcb) + +lemma setThreadState_not_tcb[wp]: + "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ + setThreadState st t + \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" + by (wpsimp wp: isRunnable_inv threadGet_wp hoare_drop_imps + simp: setThreadState_def setQueue_def + rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + unless_def bitmap_fun_defs)+ + +lemma tcbSchedEnqueue_unlive: + "\ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p + and tcb_at' t\ + tcbSchedEnqueue t + \\_. ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) + apply (wp | simp add: setQueue_def bitmap_fun_defs)+ + done + +lemma cancelAll_unlive_helper: + "\\s. (\x\set xs. tcb_at' x s) \ + ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p s\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) xs + \\rv. ko_wp_at' (Not \ live') p\" + apply (rule hoare_strengthen_post) + apply (rule mapM_x_wp') + apply (rule hoare_pre) + apply (wp tcbSchedEnqueue_unlive hoare_vcg_const_Ball_lift) + apply clarsimp + apply (clarsimp elim!: ko_wp_at'_weakenE) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma setObject_ko_wp_at': + fixes v :: "'a :: pspace_storable" + assumes x: "\v :: 'a. updateObject v = updateObject_default v" + assumes n: "\v :: 'a. objBits v = n" + assumes v: "(1 :: machine_word) < 2 ^ n" + shows + "\\s. P (injectKO v)\ setObject p v \\rv. ko_wp_at' P p\" + by (clarsimp simp: setObject_def valid_def in_monad + ko_wp_at'_def x split_def n + updateObject_default_def + objBits_def[symmetric] ps_clear_upd + in_magnitude_check v) + +lemma threadSet_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + threadSet f t + \\rv. ko_wp_at' (Not \ live') p\" + by (clarsimp simp: threadSet_def valid_def getObject_def + setObject_def in_monad loadObject_default_def + ko_wp_at'_def split_def in_magnitude_check + objBits_simps' updateObject_default_def + ps_clear_upd) + +lemma tcbSchedEnqueue_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + tcbSchedEnqueue t + \\_. ko_wp_at' (Not \ live') p\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def) + apply (wpsimp wp: threadGet_wp threadSet_unlive_other simp: bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (frule (1) tcbQueueHead_ksReadyQueues) + apply (drule_tac x=p in spec) + apply (fastforce dest!: inQ_implies_tcbQueueds_of + simp: tcbQueueEmpty_def ko_wp_at'_def opt_pred_def opt_map_def live'_def + split: option.splits) + done + +lemma rescheduleRequired_unlive[wp]: + "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ + rescheduleRequired + \\_. ko_wp_at' (Not \ live') p\" + supply comp_apply[simp del] + unfolding rescheduleRequired_def + apply (wpsimp wp: tcbSchedEnqueue_unlive_other) + done + +lemmas setEndpoint_ko_wp_at' + = setObject_ko_wp_at'[where 'a=endpoint, folded setEndpoint_def, simplified] + +lemma cancelAllIPC_unlive: + "\valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s)\ + cancelAllIPC ep \\rv. ko_wp_at' (Not \ live') ep\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (rule hoare_pre) + apply (wp cancelAll_unlive_helper setEndpoint_ko_wp_at' + hoare_vcg_const_Ball_lift rescheduleRequired_unlive + mapM_x_wp' + | simp add: objBits_simps')+ + apply (clarsimp simp: projectKO_opt_tcb) + apply (frule(1) obj_at_valid_objs') + apply (intro conjI impI) + apply (clarsimp simp: valid_obj'_def valid_ep'_def obj_at'_def pred_tcb_at'_def ko_wp_at'_def + live'_def + split: endpoint.split_asm)+ + done + +lemma cancelAllSignals_unlive: + "\\s. valid_objs' s \ sch_act_wf (ksSchedulerAction s) s + \ obj_at' (\ko. ntfnBoundTCB ko = None) ntfnptr s\ + cancelAllSignals ntfnptr \\rv. ko_wp_at' (Not \ live') ntfnptr\" + apply (simp add: cancelAllSignals_def) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfn", simp_all add: setNotification_def) + apply wp + apply (fastforce simp: obj_at'_real_def live'_def + dest: obj_at_conj' + elim: ko_wp_at'_weakenE) + apply wp + apply (fastforce simp: obj_at'_real_def live'_def + dest: obj_at_conj' + elim: ko_wp_at'_weakenE) + apply (wp rescheduleRequired_unlive) + apply (wp cancelAll_unlive_helper) + apply ((wp mapM_x_wp' setObject_ko_wp_at' hoare_vcg_const_Ball_lift)+, + simp_all add: objBits_simps', simp_all) + apply (fold setNotification_def, wp) + apply (intro conjI[rotated]) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (fastforce simp: projectKO_opt_tcb ko_wp_at'_def valid_obj'_def valid_ntfn'_def + obj_at'_def live'_def)+ + done + +crunch ep_at'[wp]: tcbSchedEnqueue "ep_at' epptr" + (simp: unless_def) + +declare if_cong[cong] + +lemma insert_eqD: + "A = insert a B \ a \ A" + by blast + +lemma cancelBadgedSends_filterM_helper': + notes if_cong[cong del] + shows + "\ys. + \\s. all_invs_but_sym_refs_ct_not_inQ' s + \ ex_nonz_cap_to' epptr s \ ep_at' epptr s + \ sym_refs ((state_refs_of' s) (epptr := set (xs @ ys) \ {EPSend})) + \ (\y \ set (xs @ ys). state_refs_of' s y = {(epptr, TCBBlockedSend)} + \ {r \ state_refs_of' s y. snd r = TCBBound}) + \ sym_refs (state_hyp_refs_of' s) + \ distinct (xs @ ys)\ + filterM (\t. do st \ getThreadState t; + if blockingIPCBadge st = badge then + do y \ setThreadState Structures_H.thread_state.Restart t; + y \ tcbSchedEnqueue t; + return False + od + else return True + od) xs + \\rv s. all_invs_but_sym_refs_ct_not_inQ' s + \ ex_nonz_cap_to' epptr s \ ep_at' epptr s + \ sym_refs ((state_refs_of' s) (epptr := (set rv \ set ys) \ {EPSend})) + \ sym_refs (state_hyp_refs_of' s) + \ (\y \ set ys. state_refs_of' s y = {(epptr, TCBBlockedSend)} + \ {r \ state_refs_of' s y. snd r = TCBBound}) + \ distinct rv \ distinct (xs @ ys) \ set rv \ set xs \ (\x \ set xs. tcb_at' x s)\" + apply (rule_tac xs=xs in rev_induct) + apply clarsimp + apply wp + apply clarsimp + apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append) + apply (drule spec, erule bind_wp_fwd) + apply (rule bind_wp [OF _ gts_inv']) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift hoare_vcg_const_Ball_lift sts_sch_act' + sch_act_wf_lift valid_irq_handlers_lift'' cur_tcb_lift irqs_masked_lift + sts_st_tcb' untyped_ranges_zero_lift + | clarsimp simp: cteCaps_of_def o_def)+ + apply (frule insert_eqD, frule state_refs_of'_elemD) + apply (clarsimp simp: valid_tcb_state'_def st_tcb_at_refs_of_rev') + apply (frule pred_tcb_at') + apply (rule conjI[rotated], blast) + apply (clarsimp simp: valid_pspace'_def cong: conj_cong) + apply (intro conjI) + apply (fastforce simp: valid_tcb'_def dest!: st_tcb_ex_cap'') + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + apply (erule delta_sym_refs) + by (fastforce elim!: obj_atE' + simp: state_refs_of'_def tcb_bound_refs'_def subsetD symreftype_inverse' + split: if_split_asm)+ + +lemmas cancelBadgedSends_filterM_helper + = spec [where x=Nil, OF cancelBadgedSends_filterM_helper', simplified] + +lemma cancelBadgedSends_invs[wp]: + notes if_cong[cong del] + shows + "\invs'\ cancelBadgedSends epptr badge \\rv. invs'\" + apply (simp add: cancelBadgedSends_def) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp'], rename_tac ep) + apply (case_tac ep, simp_all) + apply ((wp | simp)+)[2] + apply (subst bind_assoc [where g="\_. rescheduleRequired", + symmetric])+ + apply (rule bind_wp + [OF rescheduleRequired_all_invs_but_ct_not_inQ]) + apply (simp add: list_case_return cong: list.case_cong) + apply (rule hoare_pre, wp valid_irq_node_lift irqs_masked_lift) + apply simp + apply (rule hoare_strengthen_post, + rule cancelBadgedSends_filterM_helper[where epptr=epptr]) + apply (clarsimp simp: ep_redux_simps3 fun_upd_def[symmetric]) + apply (clarsimp simp add: valid_ep'_def split: list.split) + apply blast + apply (wp valid_irq_node_lift irqs_masked_lift | wp (once) sch_act_sane_lift)+ + apply (clarsimp simp: invs'_def valid_state'_def state_hyp_refs_of'_ep + valid_ep'_def fun_upd_def[symmetric] + obj_at'_weakenE[OF _ TrueI]) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (frule if_live_then_nonz_capD', simp add: obj_at'_real_def) + apply (clarsimp simp: live'_def) + apply (frule(1) sym_refs_ko_atD') + apply (clarsimp simp add: fun_upd_idem + st_tcb_at_refs_of_rev') + apply (drule (1) bspec, drule st_tcb_at_state_refs_ofD', clarsimp) + apply (fastforce simp: set_eq_subset tcb_bound_refs'_def) + done + +crunch state_refs_of[wp]: tcb_sched_action "\s. P (state_refs_of s)" + +lemma setEndpoint_valid_tcbs'[wp]: + "setEndpoint ePtr val \valid_tcbs'\" + unfolding setEndpoint_def + apply (wpsimp wp: setObject_valid_tcbs'[where P=\]) + apply (clarsimp simp: updateObject_default_def monad_simps) + apply fastforce + done + +lemma cancelBadgedSends_corres: + "corres dc (invs and valid_sched and ep_at epptr) (invs' and ep_at' epptr) + (cancel_badged_sends epptr bdg) (cancelBadgedSends epptr bdg)" + apply (simp add: cancel_badged_sends_def cancelBadgedSends_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres _ get_simple_ko_sp get_ep_sp', + where Q="invs and valid_sched" and Q'=invs']) + apply simp_all + apply (case_tac ep, simp_all add: ep_relation_def) + apply (simp add: filterM_mapM list_case_return cong: list.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule corres_split_eqr[OF _ _ _ hoare_post_add + [where R="\_. valid_objs' and pspace_aligned' + and pspace_distinct'"]]) + apply (rule_tac S="(=)" + and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ + distinct xs \ valid_etcbs s \ + in_correct_ready_q s \ ready_qs_distinct s \ + pspace_aligned s \ pspace_distinct s" + and Q'="\_ s. valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in corres_mapM_list_all2[where r'="(=)"], + simp_all add: list_all2_refl)[1] + apply (clarsimp simp: liftM_def[symmetric] o_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac F="\pl. st = Structures_A.BlockedOnSend epptr pl" + in corres_gen_asm) + apply (clarsimp simp: o_def dc_def[symmetric] liftM_def) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) + apply (rule corres_trivial) + apply simp + apply wp+ + apply simp + apply (wp sts_st_tcb_at' gts_st_tcb_at sts_valid_objs' + | strengthen valid_objs'_valid_tcbs')+ + apply (clarsimp simp: valid_tcb_state_def tcb_at_def st_tcb_def2 + st_tcb_at_refs_of_rev + dest!: state_refs_of_elemD elim!: tcb_at_is_etcb_at[rotated]) + apply (simp add: valid_tcb_state'_def) + apply (wp hoare_vcg_const_Ball_lift gts_wp | clarsimp)+ + apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_objs' + | clarsimp simp: valid_tcb_state'_def)+ + apply (rule corres_split[OF _ rescheduleRequired_corres]) + apply (rule setEndpoint_corres) + apply (simp split: list.split add: ep_relation_def) + apply (wp weak_sch_act_wf_lift_linear)+ + apply (wpsimp wp: mapM_wp' set_thread_state_runnable_weak_valid_sched_action + simp: valid_tcb_state'_def) + apply ((wpsimp wp: hoare_vcg_imp_lift mapM_wp' sts_valid_objs' simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: set_ep_valid_objs')+ + apply (clarsimp simp: conj_comms) + apply (frule sym_refs_ko_atD, clarsimp+) + apply (rule obj_at_valid_objsE, assumption+, clarsimp+) + apply (clarsimp simp: valid_obj_def valid_ep_def valid_sched_def valid_sched_action_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (rule conjI, erule obj_at_weakenE, clarsimp simp: is_ep) + apply (rule conjI, fastforce) + apply (clarsimp simp: st_tcb_at_refs_of_rev) + apply (drule(1) bspec, drule st_tcb_at_state_refs_ofD, clarsimp) + apply (simp add: set_eq_subset) + apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) + apply (fastforce simp: valid_ep'_def) + done + +crunches updateRestartPC + for tcb_at'[wp]: "tcb_at' t" + (simp: crunch_simps) + +lemma suspend_unqueued: + "\\\ suspend t \\rv. obj_at' (Not \ tcbQueued) t\" + unfolding suspend_def + by (wpsimp simp: comp_def wp: tcbSchedDequeue_not_tcbQueued) + +crunch no_vcpu[wp]: vcpuInvalidateActive "obj_at' (P::'a:: no_vcpu \ bool) t" + +lemma asUser_tcbQueued[wp]: + "asUser t' f \obj_at' (P \ tcbQueued) t\" + unfolding asUser_def threadGet_stateAssert_gets_asUser + by (wpsimp simp: asUser_fetch_def obj_at'_def) + +lemma archThreadSet_tcbQueued[wp]: + "archThreadSet f tcb \obj_at' (P \ tcbQueued) t\" + unfolding archThreadSet_def + by (wp setObject_tcb_strongest getObject_tcb_wp) (fastforce simp: obj_at'_def) + +lemma dissociateVCPUTCB_unqueued[wp]: + "dissociateVCPUTCB vcpu tcb \obj_at' (Not \ tcbQueued) t\" + unfolding dissociateVCPUTCB_def archThreadGet_def by wpsimp + +lemmas asUser_st_tcb_at'[wp] = asUser_obj_at [folded st_tcb_at'_def] +lemmas setObject_vcpu_st_tcb_at'[wp] = + setObject_vcpu_obj_at'_no_vcpu [where P'="P o tcbState" for P, folded st_tcb_at'_def] +lemmas vcpuInvalidateActive_st_tcb_at'[wp] = + vcpuInvalidateActive_no_vcpu [where P="P o tcbState" for P, folded st_tcb_at'_def] + +lemma archThreadSet_st_tcb_at'[wp]: + "archThreadSet f tcb \st_tcb_at' P t\" + unfolding archThreadSet_def st_tcb_at'_def + by (wp setObject_tcb_strongest getObject_tcb_wp) (fastforce simp: obj_at'_def) + +lemma dissociateVCPUTCB_st_tcb_at'[wp]: + "dissociateVCPUTCB vcpu tcb \st_tcb_at' P t'\" + unfolding dissociateVCPUTCB_def archThreadGet_def by wpsimp + +crunch ksQ[wp]: dissociateVCPUTCB "\s. P (ksReadyQueues s)" + (wp: crunch_wps setObject_queues_unchanged_tcb simp: crunch_simps) + +(* FIXME AARCH64: move to TcbAcc_R *) +lemma archThreadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ Q (f (tcbArch tcb)) s\ archThreadGet f t \Q\" + unfolding archThreadGet_def + by (wpsimp wp: getObject_tcb_wp simp: obj_at'_def) + +crunch unqueued: prepareThreadDelete "obj_at' (Not \ tcbQueued) t" + (simp: o_def wp: dissociateVCPUTCB_unqueued[simplified o_def] archThreadGet_wp) +crunch inactive: prepareThreadDelete "st_tcb_at' ((=) Inactive) t'" + +end +end diff --git a/proof/refine/AARCH64/Ipc_R.thy b/proof/refine/AARCH64/Ipc_R.thy new file mode 100644 index 0000000000..0d1e575954 --- /dev/null +++ b/proof/refine/AARCH64/Ipc_R.thy @@ -0,0 +1,4255 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Ipc_R +imports Finalise_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemmas lookup_slot_wrapper_defs'[simp] = + lookupSourceSlot_def lookupTargetSlot_def lookupPivotSlot_def + +lemma getMessageInfo_corres: "corres ((=) \ message_info_map) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_message_info t) (getMessageInfo t)" + apply (rule corres_guard_imp) + apply (unfold get_message_info_def getMessageInfo_def fun_app_def) + apply (simp add: AARCH64_H.msgInfoRegister_def + AARCH64.msgInfoRegister_def AARCH64_A.msg_info_register_def) + apply (rule corres_split_eqr[OF asUser_getRegister_corres]) + apply (rule corres_trivial, simp add: message_info_from_data_eqv) + apply (wp | simp)+ + done + + +lemma get_mi_inv'[wp]: "\I\ getMessageInfo a \\x. I\" + by (simp add: getMessageInfo_def, wp) + +definition + "get_send_cap_relation rv rv' \ + (case rv of Some (c, cptr) \ (\c' cptr'. rv' = Some (c', cptr') \ + cte_map cptr = cptr' \ + cap_relation c c') + | None \ rv' = None)" + +lemma cap_relation_mask: + "\ cap_relation c c'; msk' = rights_mask_map msk \ \ + cap_relation (mask_cap msk c) (maskCapRights msk' c')" + by simp + +lemma lsfco_cte_at': + "\valid_objs' and valid_cap' cap\ + lookupSlotForCNodeOp f cap idx depth + \\rv. cte_at' rv\, -" + apply (simp add: lookupSlotForCNodeOp_def) + apply (rule conjI) + prefer 2 + apply clarsimp + apply (wp) + apply (clarsimp simp: split_def unlessE_def + split del: if_split) + apply (wp hoare_drop_imps throwE_R) + done + +declare unifyFailure_wp [wp] + +(* FIXME: move *) +lemma unifyFailure_wp_E [wp]: + "\P\ f -, \\_. E\ \ \P\ unifyFailure f -, \\_. E\" + unfolding validE_E_def + by (erule unifyFailure_wp)+ + +(* FIXME: move *) +lemma unifyFailure_wp2 [wp]: + assumes x: "\P\ f \\_. Q\" + shows "\P\ unifyFailure f \\_. Q\" + by (wp x, simp) + +definition + ct_relation :: "captransfer \ cap_transfer \ bool" +where + "ct_relation ct ct' \ + ct_receive_root ct = to_bl (ctReceiveRoot ct') + \ ct_receive_index ct = to_bl (ctReceiveIndex ct') + \ ctReceiveDepth ct' = unat (ct_receive_depth ct)" + +(* MOVE *) +lemma valid_ipc_buffer_ptr_aligned_word_size_bits: + "\valid_ipc_buffer_ptr' a s; is_aligned y word_size_bits \ \ is_aligned (a + y) word_size_bits" + unfolding valid_ipc_buffer_ptr'_def + apply clarsimp + apply (erule (1) aligned_add_aligned) + apply (simp add: msg_align_bits word_size_bits_def) + done + +(* MOVE *) +lemma valid_ipc_buffer_ptr'D2: + "\valid_ipc_buffer_ptr' a s; y < max_ipc_words * word_size; is_aligned y word_size_bits\ \ typ_at' UserDataT (a + y && ~~ mask pageBits) s" + unfolding valid_ipc_buffer_ptr'_def + apply clarsimp + apply (subgoal_tac "(a + y) && ~~ mask pageBits = a && ~~ mask pageBits") + apply simp + apply (rule mask_out_first_mask_some [where n = msg_align_bits]) + apply (erule is_aligned_add_helper [THEN conjunct2]) + apply (erule order_less_le_trans) + apply (simp add: msg_align_bits max_ipc_words word_size_def) + apply simp + done + +lemma loadCapTransfer_corres: + notes msg_max_words_simps = max_ipc_words_def msgMaxLength_def msgMaxExtraCaps_def msgLengthBits_def + capTransferDataSize_def msgExtraCapBits_def + shows + "corres ct_relation \ (valid_ipc_buffer_ptr' buffer) (load_cap_transfer buffer) (loadCapTransfer buffer)" + apply (simp add: load_cap_transfer_def loadCapTransfer_def + captransfer_from_words_def + capTransferDataSize_def capTransferFromWords_def + msgExtraCapBits_def word_size add.commute add.left_commute + msg_max_length_def msg_max_extra_caps_def word_size_def + msgMaxLength_def msgMaxExtraCaps_def msgLengthBits_def wordSize_def wordBits_def + del: upt.simps) + apply (rule corres_guard_imp) + apply (rule corres_split[OF load_word_corres]) + apply (rule corres_split[OF load_word_corres]) + apply (rule corres_split[OF load_word_corres]) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply (clarsimp simp: ct_relation_def) + apply (wp no_irq_loadWord)+ + apply simp + apply (simp add: conj_comms) + apply safe + apply (erule valid_ipc_buffer_ptr_aligned_word_size_bits, simp add: is_aligned_def word_size_bits_def)+ + apply (erule valid_ipc_buffer_ptr'D2, + simp add: msg_max_words_simps word_size_def word_size_bits_def, + simp add: word_size_bits_def is_aligned_def)+ + done + +lemma getReceiveSlots_corres: + "corres (\xs ys. ys = map cte_map xs) + (tcb_at receiver and valid_objs and pspace_aligned) + (tcb_at' receiver and valid_objs' and pspace_aligned' and pspace_distinct' and + case_option \ valid_ipc_buffer_ptr' recv_buf) + (get_receive_slots receiver recv_buf) + (getReceiveSlots receiver recv_buf)" + apply (cases recv_buf) + apply (simp add: getReceiveSlots_def) + apply (simp add: getReceiveSlots_def split_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF loadCapTransfer_corres]) + apply (rule corres_empty_on_failure) + apply (rule corres_splitEE) + apply (rule corres_unify_failure) + apply (rule lookup_cap_corres) + apply (simp add: ct_relation_def) + apply simp + apply (rule corres_splitEE) + apply (rule corres_unify_failure) + apply (simp add: ct_relation_def) + apply (erule lookupSlotForCNodeOp_corres [OF _ refl]) + apply simp + apply (simp add: split_def liftE_bindE unlessE_whenE) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply (case_tac cap, auto)[1] + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_trivial, simp add: returnOk_def) + apply (wp lookup_cap_valid lookup_cap_valid' lsfco_cte_at | simp)+ + done + +lemma get_recv_slot_inv'[wp]: + "\ P \ getReceiveSlots receiver buf \\rv'. P \" + apply (case_tac buf) + apply (simp add: getReceiveSlots_def) + apply (simp add: getReceiveSlots_def + split_def unlessE_def) + apply (wp | simp)+ + done + +lemma get_rs_cte_at'[wp]: + "\\\ + getReceiveSlots receiver recv_buf + \\rv s. \x \ set rv. cte_wp_at' (\c. cteCap c = capability.NullCap) x s\" + apply (cases recv_buf) + apply (simp add: getReceiveSlots_def) + apply (wp,simp) + apply (clarsimp simp add: getReceiveSlots_def + split_def whenE_def unlessE_whenE) + apply wp + apply simp + apply (rule getCTE_wp) + apply (simp add: cte_wp_at_ctes_of cong: conj_cong) + apply wp+ + apply simp + done + +lemma get_rs_real_cte_at'[wp]: + "\valid_objs'\ + getReceiveSlots receiver recv_buf + \\rv s. \x \ set rv. real_cte_at' x s\" + apply (cases recv_buf) + apply (simp add: getReceiveSlots_def) + apply (wp,simp) + apply (clarsimp simp add: getReceiveSlots_def + split_def whenE_def unlessE_whenE) + apply wp + apply simp + apply (wp hoare_drop_imps)[1] + apply simp + apply (wp lookup_cap_valid')+ + apply simp + done + +declare word_div_1 [simp] +declare word_minus_one_le [simp] +declare word64_minus_one_le [simp] + +lemma loadWordUser_corres': + "\ y < unat max_ipc_words; y' = of_nat y * 8 \ \ + corres (=) \ (valid_ipc_buffer_ptr' a) (load_word_offs a y) (loadWordUser (a + y'))" + apply simp + apply (erule loadWordUser_corres) + done + +declare loadWordUser_inv [wp] + +lemma getExtraCptrs_inv[wp]: + "\P\ getExtraCPtrs buf mi \\rv. P\" + apply (cases mi, cases buf, simp_all add: getExtraCPtrs_def) + apply (wp dmo_inv' mapM_wp' loadWord_inv) + done + +lemma getSlotCap_cte_wp_at_rv: + "\cte_wp_at' (\cte. P (cteCap cte) cte) p\ + getSlotCap p + \\rv. cte_wp_at' (P rv) p\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_ctes_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma badge_derived_mask [simp]: + "badge_derived' (maskCapRights R c) c' = badge_derived' c c'" + by (simp add: badge_derived'_def) + +declare derived'_not_Null [simp] + +lemma maskCapRights_vs_cap_ref'[simp]: + "vs_cap_ref' (maskCapRights msk cap) = vs_cap_ref' cap" + unfolding vs_cap_ref'_def + apply (cases cap, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemma corres_set_extra_badge: + "b' = b \ + corres dc (in_user_frame buffer) + (valid_ipc_buffer_ptr' buffer and + (\_. msg_max_length + 2 + n < unat max_ipc_words)) + (set_extra_badge buffer b n) (setExtraBadge buffer b' n)" + apply (rule corres_gen_asm2) + apply (drule storeWordUser_corres [where a=buffer and w=b]) + apply (simp add: set_extra_badge_def setExtraBadge_def buffer_cptr_index_def + bufferCPtrOffset_def Let_def) + apply (simp add: word_size word_size_def wordSize_def wordBits_def + bufferCPtrOffset_def buffer_cptr_index_def msgMaxLength_def + msg_max_length_def msgLengthBits_def store_word_offs_def + add.commute add.left_commute) + done + +crunch typ_at': setExtraBadge "\s. P (typ_at' T p s)" +lemmas setExtraBadge_typ_ats' [wp] = typ_at_lifts [OF setExtraBadge_typ_at'] +crunch valid_pspace' [wp]: setExtraBadge valid_pspace' +crunch cte_wp_at' [wp]: setExtraBadge "cte_wp_at' P p" +crunch ipc_buffer' [wp]: setExtraBadge "valid_ipc_buffer_ptr' buffer" + +crunch inv'[wp]: getExtraCPtr P (wp: dmo_inv' loadWord_inv) + +lemmas unifyFailure_discard2 + = corres_injection[OF id_injection unifyFailure_injection, simplified] + +lemma deriveCap_not_null: + "\\\ deriveCap slot cap \\rv. K (rv \ NullCap \ cap \ NullCap)\,-" + apply (simp add: deriveCap_def split del: if_split) + by (case_tac cap; wpsimp simp: isCap_simps) + +lemma deriveCap_derived_foo: + "\\s. \cap'. (cte_wp_at' (\cte. badge_derived' cap (cteCap cte) + \ capASID cap = capASID (cteCap cte) \ cap_asid_base' cap = cap_asid_base' (cteCap cte) + \ cap_vptr' cap = cap_vptr' (cteCap cte)) slot s + \ valid_objs' s \ cap' \ NullCap \ cte_wp_at' (is_derived' (ctes_of s) slot cap' \ cteCap) slot s) + \ (cte_wp_at' (untyped_derived_eq cap \ cteCap) slot s + \ cte_wp_at' (untyped_derived_eq cap' \ cteCap) slot s) + \ (s \' cap \ s \' cap') \ (cap' \ NullCap \ cap \ NullCap) \ Q cap' s\ + deriveCap slot cap \Q\,-" + using deriveCap_derived[where slot=slot and c'=cap] deriveCap_valid[where slot=slot and c=cap] + deriveCap_untyped_derived[where slot=slot and c'=cap] deriveCap_not_null[where slot=slot and cap=cap] + apply (clarsimp simp: validE_R_def validE_def valid_def split: sum.split) + apply (frule in_inv_by_hoareD[OF deriveCap_inv]) + apply (clarsimp simp: o_def) + apply (drule spec, erule mp) + apply safe + apply fastforce + apply (drule spec, drule(1) mp) + apply fastforce + apply (drule spec, drule(1) mp) + apply fastforce + apply (drule spec, drule(1) bspec, simp) + done + +lemma valid_mdb_untyped_incD': + "valid_mdb' s \ untyped_inc' (ctes_of s)" + by (simp add: valid_mdb'_def valid_mdb_ctes_def) + +lemma cteInsert_cte_wp_at: + "\\s. cte_wp_at' (\c. is_derived' (ctes_of s) src cap (cteCap c)) src s + \ valid_mdb' s \ valid_objs' s + \ (if p = dest then P cap + else cte_wp_at' (\c. P (maskedAsFull (cteCap c) cap)) p s)\ + cteInsert cap src dest + \\uu. cte_wp_at' (\c. P (cteCap c)) p\" + apply (simp add: cteInsert_def) + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp + | clarsimp simp: comp_def + | unfold setUntypedCapAsFull_def)+ + apply (drule cte_at_cte_wp_atD) + apply (elim exE) + apply (rule_tac x=cte in exI) + apply clarsimp + apply (drule cte_at_cte_wp_atD) + apply (elim exE) + apply (rule_tac x=ctea in exI) + apply clarsimp + apply (cases "p=dest") + apply (clarsimp simp: cte_wp_at'_def) + apply (cases "p=src") + apply clarsimp + apply (intro conjI impI) + apply ((clarsimp simp: cte_wp_at'_def maskedAsFull_def split: if_split_asm)+)[2] + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: maskedAsFull_def cte_wp_at_ctes_of split:if_split_asm) + apply (erule disjE) prefer 2 apply simp + apply (clarsimp simp: is_derived'_def isCap_simps) + apply (drule valid_mdb_untyped_incD') + apply (case_tac cte, case_tac cteb, clarsimp) + apply (drule untyped_incD', (simp add: isCap_simps)+) + apply (frule(1) ctes_of_valid'[where p = p]) + apply (clarsimp simp:valid_cap'_def capAligned_def split:if_splits) + apply (drule_tac y ="of_nat fb" in word_plus_mono_right[OF _ is_aligned_no_overflow',rotated]) + apply simp+ + apply (rule word_of_nat_less) + apply simp + apply (simp add:p_assoc_help mask_def) + apply (simp add: max_free_index_def) + apply (clarsimp simp: maskedAsFull_def is_derived'_def badge_derived'_def + isCap_simps capMasterCap_def cte_wp_at_ctes_of + split: if_split_asm capability.splits) + done + +lemma cteInsert_weak_cte_wp_at3: + assumes imp:"\c. P c \ \ isUntypedCap c" + shows " \\s. if p = dest then P cap + else cte_wp_at' (\c. P (cteCap c)) p s\ + cteInsert cap src dest + \\uu. cte_wp_at' (\c. P (cteCap c)) p\" + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp + | clarsimp simp: comp_def cteInsert_def + | unfold setUntypedCapAsFull_def + | auto simp: cte_wp_at'_def dest!: imp)+ + +lemma maskedAsFull_null_cap[simp]: + "(maskedAsFull x y = capability.NullCap) = (x = capability.NullCap)" + "(capability.NullCap = maskedAsFull x y) = (x = capability.NullCap)" + by (case_tac x, auto simp:maskedAsFull_def isCap_simps ) + +lemma maskCapRights_eq_null: + "(RetypeDecls_H.maskCapRights r xa = capability.NullCap) = + (xa = capability.NullCap)" + apply (cases xa; simp add: maskCapRights_def isCap_simps) + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + apply (simp_all add: AARCH64_H.maskCapRights_def isCap_simps) + done + +lemma cte_refs'_maskedAsFull[simp]: + "cte_refs' (maskedAsFull a b) = cte_refs' a" + apply (rule ext)+ + apply (case_tac a) + apply (clarsimp simp:maskedAsFull_def isCap_simps)+ + done + + +lemma transferCapsToSlots_corres: + "\ list_all2 (\(cap, slot) (cap', slot'). cap_relation cap cap' + \ slot' = cte_map slot) caps caps'; + mi' = message_info_map mi \ \ + corres ((=) \ message_info_map) + (\s. valid_objs s \ pspace_aligned s \ pspace_distinct s \ valid_mdb s + \ valid_list s + \ (case ep of Some x \ ep_at x s | _ \ True) + \ (\x \ set slots. cte_wp_at (\cap. cap = cap.NullCap) x s \ + real_cte_at x s) + \ (\(cap, slot) \ set caps. valid_cap cap s \ + cte_wp_at (\cp'. (cap \ cap.NullCap \ cp'\cap \ cp' = masked_as_full cap cap )) slot s ) + \ distinct slots + \ in_user_frame buffer s) + (\s. valid_pspace' s + \ (case ep of Some x \ ep_at' x s | _ \ True) + \ (\x \ set (map cte_map slots). + cte_wp_at' (\cte. cteCap cte = NullCap) x s + \ real_cte_at' x s) + \ distinct (map cte_map slots) + \ valid_ipc_buffer_ptr' buffer s + \ (\(cap, slot) \ set caps'. valid_cap' cap s \ + cte_wp_at' (\cte. cap \ NullCap \ cteCap cte \ cap \ cteCap cte = maskedAsFull cap cap) slot s) + \ 2 + msg_max_length + n + length caps' < unat max_ipc_words) + (transfer_caps_loop ep buffer n caps slots mi) + (transferCapsToSlots ep buffer n caps' + (map cte_map slots) mi')" + (is "\ list_all2 ?P caps caps'; ?v \ \ ?corres") +proof (induct caps caps' arbitrary: slots n mi mi' rule: list_all2_induct) + case Nil + show ?case using Nil.prems by (case_tac mi, simp) +next + case (Cons x xs y ys slots n mi mi') + note if_weak_cong[cong] if_cong [cong del] + assume P: "?P x y" + show ?case using Cons.prems P + apply (clarsimp split del: if_split) + apply (simp add: Let_def split_def word_size liftE_bindE + word_bits_conv[symmetric] split del: if_split) + apply (rule corres_const_on_failure) + apply (simp add: dc_def[symmetric] split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_if2) + apply (case_tac "fst x", auto simp add: isCap_simps)[1] + apply (rule corres_split[OF corres_set_extra_badge]) + apply (clarsimp simp: is_cap_simps) + apply (drule conjunct1) + apply simp + apply (rule corres_rel_imp, rule Cons.hyps, simp_all)[1] + apply (case_tac mi, simp) + apply (simp add: split_def) + apply (wp hoare_vcg_const_Ball_lift) + apply (subgoal_tac "obj_ref_of (fst x) = capEPPtr (fst y)") + prefer 2 + apply (clarsimp simp: is_cap_simps) + apply (simp add: split_def) + apply (wp hoare_vcg_const_Ball_lift) + apply (rule_tac P="slots = []" and Q="slots \ []" in corres_disj_division) + apply simp + apply (rule corres_trivial, simp add: returnOk_def) + apply (case_tac mi, simp) + apply (simp add: list_case_If2 split del: if_split) + apply (rule corres_splitEE) + apply (rule unifyFailure_discard2) + apply (case_tac mi, clarsimp) + apply (rule deriveCap_corres) + apply (simp add: remove_rights_def) + apply clarsimp + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply (case_tac cap', auto)[1] + apply (rule corres_trivial, simp) + apply (case_tac mi, simp) + apply simp + apply (simp add: liftE_bindE) + apply (rule corres_split_nor) + apply (rule cteInsert_corres, simp_all add: hd_map)[1] + apply (simp add: tl_map) + apply (rule corres_rel_imp, rule Cons.hyps, simp_all)[1] + apply (wp valid_case_option_post_wp hoare_vcg_const_Ball_lift + hoare_vcg_const_Ball_lift cap_insert_weak_cte_wp_at) + apply (wp hoare_vcg_const_Ball_lift | simp add:split_def del: imp_disj1)+ + apply (wp cap_insert_cte_wp_at) + apply (wp valid_case_option_post_wp hoare_vcg_const_Ball_lift + cteInsert_valid_pspace + | simp add: split_def)+ + apply (wp cteInsert_weak_cte_wp_at hoare_valid_ipc_buffer_ptr_typ_at')+ + apply (wpsimp wp: hoare_vcg_const_Ball_lift cteInsert_cte_wp_at valid_case_option_post_wp + simp: split_def) + apply (unfold whenE_def) + apply wp+ + apply (clarsimp simp: conj_comms ball_conj_distrib split del: if_split) + apply (rule_tac Q' ="\cap' s. (cap'\ cap.NullCap \ + cte_wp_at (is_derived (cdt s) (a, b) cap') (a, b) s + \ QM s cap')" for QM + in hoare_strengthen_postE_R) + prefer 2 + apply clarsimp + apply assumption + apply (subst imp_conjR) + apply (rule hoare_vcg_conj_liftE_R) + apply (rule derive_cap_is_derived) + apply (wp derive_cap_is_derived_foo)+ + apply (simp split del: if_split) + apply (rule_tac Q' ="\cap' s. (cap'\ capability.NullCap \ + cte_wp_at' (\c. is_derived' (ctes_of s) (cte_map (a, b)) cap' (cteCap c)) (cte_map (a, b)) s + \ QM s cap')" for QM + in hoare_strengthen_postE_R) + prefer 2 + apply clarsimp + apply assumption + apply (subst imp_conjR) + apply (rule hoare_vcg_conj_liftE_R) + apply (rule hoare_strengthen_postE_R[OF deriveCap_derived]) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (wp deriveCap_derived_foo) + apply (clarsimp simp: cte_wp_at_caps_of_state remove_rights_def + real_cte_tcb_valid if_apply_def2 + split del: if_split) + apply (rule conjI, (clarsimp split del: if_split)+) + apply (clarsimp simp:conj_comms split del:if_split) + apply (intro conjI allI) + apply (clarsimp split:if_splits) + apply (case_tac "cap = fst x",simp+) + apply (clarsimp simp:masked_as_full_def is_cap_simps cap_master_cap_simps) + apply (clarsimp split del: if_split) + apply (intro conjI) + apply (clarsimp simp:neq_Nil_conv) + apply (drule hd_in_set) + apply (drule(1) bspec) + apply (clarsimp split:if_split_asm) + apply (fastforce simp:neq_Nil_conv) + apply (intro ballI conjI) + apply (clarsimp simp:neq_Nil_conv) + apply (intro impI) + apply (drule(1) bspec[OF _ subsetD[rotated]]) + apply (clarsimp simp:neq_Nil_conv) + apply (clarsimp split:if_splits) + apply clarsimp + apply (intro conjI) + apply (drule(1) bspec,clarsimp)+ + subgoal for \ aa _ _ capa + by (case_tac "capa = aa"; clarsimp split:if_splits simp:masked_as_full_def is_cap_simps) + apply (case_tac "isEndpointCap (fst y) \ capEPPtr (fst y) = the ep \ (\y. ep = Some y)") + apply (clarsimp simp:conj_comms split del:if_split) + apply (subst if_not_P) + apply clarsimp + apply (clarsimp simp:valid_pspace'_def cte_wp_at_ctes_of split del:if_split) + apply (intro conjI) + apply (case_tac "cteCap cte = fst y",clarsimp simp: badge_derived'_def) + apply (clarsimp simp: maskCapRights_eq_null maskedAsFull_def badge_derived'_def isCap_simps + split: if_split_asm) + apply (clarsimp split del: if_split) + apply (case_tac "fst y = capability.NullCap") + apply (clarsimp simp: neq_Nil_conv split del: if_split)+ + apply (intro allI impI conjI) + apply (clarsimp split:if_splits) + apply (clarsimp simp:image_def)+ + apply (thin_tac "\x\set ys. Q x" for Q) + apply (drule(1) bspec)+ + apply clarsimp+ + apply (drule(1) bspec) + apply (rule conjI) + apply clarsimp+ + apply (case_tac "cteCap cteb = ab") + by (clarsimp simp: isCap_simps maskedAsFull_def split:if_splits)+ +qed + +declare constOnFailure_wp [wp] + +lemma transferCapsToSlots_pres1[crunch_rules]: + assumes x: "\cap src dest. \P\ cteInsert cap src dest \\rv. P\" + assumes eb: "\b n. \P\ setExtraBadge buffer b n \\_. P\" + shows "\P\ transferCapsToSlots ep buffer n caps slots mi \\rv. P\" + apply (induct caps arbitrary: slots n mi) + apply simp + apply (simp add: Let_def split_def whenE_def + cong: if_cong list.case_cong + split del: if_split) + apply (rule hoare_pre) + apply (wp x eb | assumption | simp split del: if_split | wpc + | wp (once) hoare_drop_imps)+ + done + +lemma cteInsert_cte_cap_to': + "\ex_cte_cap_to' p and cte_wp_at' (\cte. cteCap cte = NullCap) dest\ + cteInsert cap src dest + \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) + apply (clarsimp simp:cteInsert_def) + apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (rule_tac x = "cref" in exI) + apply (rule conjI) + apply clarsimp+ + done + +declare maskCapRights_eq_null[simp] + +crunch ex_cte_cap_wp_to' [wp]: setExtraBadge "ex_cte_cap_wp_to' P p" + (rule: ex_cte_cap_to'_pres) + +crunch valid_objs' [wp]: setExtraBadge valid_objs' +crunch aligned' [wp]: setExtraBadge pspace_aligned' +crunch distinct' [wp]: setExtraBadge pspace_distinct' + +lemma cteInsert_assume_Null: + "\P\ cteInsert cap src dest \Q\ \ + \\s. cte_wp_at' (\cte. cteCap cte = NullCap) dest s \ P s\ + cteInsert cap src dest + \Q\" + apply (rule hoare_name_pre_state) + apply (erule impCE) + apply (simp add: cteInsert_def) + apply (rule bind_wp[OF _ getCTE_sp])+ + apply (rule hoare_name_pre_state) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule hoare_pre(1)) + apply simp + done + +crunch mdb'[wp]: setExtraBadge valid_mdb' + +lemma cteInsert_weak_cte_wp_at2: + assumes weak:"\c cap. P (maskedAsFull c cap) = P c" + shows + "\\s. if p = dest then P cap else cte_wp_at' (\c. P (cteCap c)) p s\ + cteInsert cap src dest + \\uu. cte_wp_at' (\c. P (cteCap c)) p\" + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) + apply (clarsimp simp:cteInsert_def) + apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) + apply (clarsimp simp:cte_wp_at_ctes_of weak) + apply auto + done + +lemma transferCapsToSlots_presM: + assumes x: "\cap src dest. \\s. P s \ (emx \ cte_wp_at' (\cte. cteCap cte = NullCap) dest s \ ex_cte_cap_to' dest s) + \ (vo \ valid_objs' s \ valid_cap' cap s \ real_cte_at' dest s) + \ (drv \ cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s + \ cte_wp_at' (untyped_derived_eq cap o cteCap) src s + \ valid_mdb' s) + \ (pad \ pspace_aligned' s \ pspace_distinct' s)\ + cteInsert cap src dest \\rv. P\" + assumes eb: "\b n. \P\ setExtraBadge buffer b n \\_. P\" + shows "\\s. P s + \ (emx \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = NullCap) x s) \ distinct slots) + \ (vo \ valid_objs' s \ (\x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = NullCap) x s) + \ (\x \ set caps. s \' fst x ) \ distinct slots) + \ (pad \ pspace_aligned' s \ pspace_distinct' s) + \ (drv \ vo \ pspace_aligned' s \ pspace_distinct' s \ valid_mdb' s + \ length slots \ 1 + \ (\x \ set caps. s \' fst x \ (slots \ [] + \ cte_wp_at' (\cte. fst x \ NullCap \ cteCap cte = fst x) (snd x) s)))\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. P\" + apply (induct caps arbitrary: slots n mi) + apply (simp, wp, simp) + apply (simp add: Let_def split_def whenE_def + cong: if_cong list.case_cong split del: if_split) + apply (rule hoare_pre) + apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift + | assumption | wpc)+ + apply (rule cteInsert_assume_Null) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) + apply (rule cteInsert_weak_cte_wp_at2,clarsimp) + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ + apply (rule cteInsert_weak_cte_wp_at2,clarsimp) + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp + deriveCap_derived_foo)+ + apply (thin_tac "\slots. PROP P slots" for P) + apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def + real_cte_tcb_valid if_apply_def2 + split del: if_split) + apply (rule conjI) + apply (clarsimp simp:cte_wp_at_ctes_of untyped_derived_eq_def) + apply (intro conjI allI) + apply (clarsimp simp:Fun.comp_def cte_wp_at_ctes_of)+ + apply (clarsimp simp:valid_capAligned) + done + +lemmas transferCapsToSlots_pres2 + = transferCapsToSlots_presM[where vo=False and emx=True + and drv=False and pad=False, simplified] + +crunches transferCapsToSlots + for pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + and pspace_canonical'[wp]: pspace_canonical' + +lemma transferCapsToSlots_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv s. P (typ_at' T p s)\" + by (wp transferCapsToSlots_pres1 setExtraBadge_typ_at') + +lemma transferCapsToSlots_valid_objs[wp]: + "\valid_objs' and valid_mdb' and (\s. \x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + and (\s. \x \ set caps. s \' fst x) and K(distinct slots)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_objs'\" + apply (rule hoare_pre) + apply (rule transferCapsToSlots_presM[where vo=True and emx=False and drv=False and pad=False]) + apply (wp | simp)+ + done + +abbreviation(input) + "transferCaps_srcs caps s \ \x\set caps. cte_wp_at' (\cte. fst x \ NullCap \ cteCap cte = fst x) (snd x) s" + +lemma transferCapsToSlots_mdb[wp]: + "\\s. valid_pspace' s \ distinct slots + \ length slots \ 1 + \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + \ (\x \ set slots. real_cte_at' x s) + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_mdb'\" + apply (wpsimp wp: transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) + apply (frule valid_capAligned) + apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) + apply wp + apply (clarsimp simp: valid_pspace'_def) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule(1) bspec,clarify) + apply (case_tac cte) + apply (clarsimp dest!:ctes_of_valid_cap' split:if_splits) + apply (fastforce simp:valid_cap'_def) + done + +crunch no_0' [wp]: setExtraBadge no_0_obj' + +lemma transferCapsToSlots_no_0_obj' [wp]: + "\no_0_obj'\ transferCapsToSlots ep buffer n caps slots mi \\rv. no_0_obj'\" + by (wp transferCapsToSlots_pres1) + +lemma transferCapsToSlots_vp[wp]: + "\\s. valid_pspace' s \ distinct slots + \ length slots \ 1 + \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + \ (\x \ set slots. real_cte_at' x s) + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_pspace'\" + apply (rule hoare_pre) + apply (simp add: valid_pspace'_def | wp)+ + apply (fastforce simp: cte_wp_at_ctes_of dest: ctes_of_valid') + done + +crunches setExtraBadge, doIPCTransfer + for sch_act [wp]: "\s. P (ksSchedulerAction s)" + (wp: crunch_wps mapME_wp' simp: zipWithM_x_mapM) +crunches setExtraBadge + for pred_tcb_at' [wp]: "\s. pred_tcb_at' proj P p s" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and obj_at' [wp]: "\s. P' (obj_at' P p s)" + and queues [wp]: "\s. P (ksReadyQueues s)" + and queuesL1 [wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and queuesL2 [wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: storeWordUser_def) + + +lemma tcts_sch_act[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (wp sch_act_wf_lift tcb_in_cur_domain'_lift transferCapsToSlots_pres1) + +crunches setExtraBadge + for state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + +lemma tcts_state_refs_of'[wp]: + "\\s. P (state_refs_of' s)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv s. P (state_refs_of' s)\" + by (wp transferCapsToSlots_pres1) + +lemma tcts_state_hyp_refs_of'[wp]: + "transferCapsToSlots ep buffer n caps slots mi \\s. P (state_hyp_refs_of' s)\" + by (wp transferCapsToSlots_pres1) + +crunch if_live' [wp]: setExtraBadge if_live_then_nonz_cap' + +lemma tcts_iflive[wp]: + "\\s. if_live_then_nonz_cap' s \ distinct slots \ + (\x\set slots. + ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. if_live_then_nonz_cap'\" + by (wp transferCapsToSlots_pres2 | simp)+ + +crunch if_unsafe' [wp]: setExtraBadge if_unsafe_then_cap' + +lemma tcts_ifunsafe[wp]: + "\\s. if_unsafe_then_cap' s \ distinct slots \ + (\x\set slots. cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s \ + ex_cte_cap_to' x s)\ transferCapsToSlots ep buffer n caps slots mi + \\rv. if_unsafe_then_cap'\" + by (wp transferCapsToSlots_pres2 | simp)+ + +crunch valid_idle' [wp]: setExtraBadge valid_idle' + +lemma tcts_idle'[wp]: + "\\s. valid_idle' s\ transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_idle'\" + apply (rule hoare_pre) + apply (wp transferCapsToSlots_pres1) + apply simp + done + +lemma tcts_ct[wp]: + "\cur_tcb'\ transferCapsToSlots ep buffer n caps slots mi \\rv. cur_tcb'\" + by (wp transferCapsToSlots_pres1 cur_tcb_lift) + +crunch valid_arch_state' [wp]: setExtraBadge valid_arch_state' + +lemma transferCapsToSlots_valid_arch [wp]: + "\valid_arch_state'\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_arch_state'\" + by (rule transferCapsToSlots_pres1; wp) + +crunch valid_global_refs' [wp]: setExtraBadge valid_global_refs' + +lemma transferCapsToSlots_valid_globals [wp]: + "\valid_global_refs' and valid_objs' and valid_mdb' and pspace_distinct' and pspace_aligned' and K (distinct slots) + and K (length slots \ 1) + and (\s. \x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + and transferCaps_srcs caps\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_global_refs'\" + apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=True] | clarsimp)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule(1) bspec,clarsimp) + apply (case_tac cte,clarsimp) + apply (frule(1) CSpace_I.ctes_of_valid_cap') + apply (fastforce simp:valid_cap'_def) + done + +crunch irq_node' [wp]: setExtraBadge "\s. P (irq_node' s)" + +lemma transferCapsToSlots_irq_node'[wp]: + "\\s. P (irq_node' s)\ transferCapsToSlots ep buffer n caps slots mi \\rv s. P (irq_node' s)\" + by (wp transferCapsToSlots_pres1) + +lemma valid_irq_handlers_ctes_ofD: + "\ ctes_of s p = Some cte; cteCap cte = IRQHandlerCap irq; valid_irq_handlers' s \ + \ irq_issued' irq s" + by (auto simp: valid_irq_handlers'_def cteCaps_of_def ran_def) + +crunch valid_irq_handlers' [wp]: setExtraBadge valid_irq_handlers' + +lemma transferCapsToSlots_irq_handlers[wp]: + "\valid_irq_handlers' and valid_objs' and valid_mdb' and pspace_distinct' and pspace_aligned' + and K(distinct slots \ length slots \ 1) + and (\s. \x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + and transferCaps_srcs caps\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_irq_handlers'\" + apply (wpsimp wp: transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) + apply (clarsimp simp: is_derived'_def cte_wp_at_ctes_of badge_derived'_def) + apply (erule(2) valid_irq_handlers_ctes_ofD) + apply wp + apply (clarsimp simp:cte_wp_at_ctes_of | intro ballI conjI)+ + apply (drule(1) bspec,clarsimp) + apply (case_tac cte,clarsimp) + apply (frule(1) CSpace_I.ctes_of_valid_cap') + apply (fastforce simp:valid_cap'_def) + done + +crunch irq_state' [wp]: setExtraBadge "\s. P (ksInterruptState s)" + +lemma setExtraBadge_irq_states'[wp]: + "\valid_irq_states'\ setExtraBadge buffer b n \\_. valid_irq_states'\" + apply (wp valid_irq_states_lift') + apply (simp add: setExtraBadge_def storeWordUser_def) + apply (wpsimp wp: no_irq dmo_lift' no_irq_storeWord) + apply assumption + done + +lemma transferCapsToSlots_irq_states' [wp]: + "\valid_irq_states'\ transferCapsToSlots ep buffer n caps slots mi \\_. valid_irq_states'\" + by (wp transferCapsToSlots_pres1) + +lemma transferCapsToSlots_irqs_masked'[wp]: + "\irqs_masked'\ transferCapsToSlots ep buffer n caps slots mi \\rv. irqs_masked'\" + by (wp transferCapsToSlots_pres1 irqs_masked_lift) + +lemma storeWordUser_vms'[wp]: + "\valid_machine_state'\ storeWordUser a w \\_. valid_machine_state'\" +proof - + have aligned_offset_ignore: + "\(l::machine_word) (p::machine_word) sz. l<8 \ p && mask 3 = 0 \ + p+l && ~~ mask pageBits = p && ~~ mask pageBits" + proof - + fix l p sz + assume al: "(p::machine_word) && mask 3 = 0" + assume "(l::machine_word) < 8" hence less: "l<2^3" by simp + have le: "3 \ pageBits" by (simp add: pageBits_def) + show "?thesis l p sz" + by (rule is_aligned_add_helper[simplified is_aligned_mask, + THEN conjunct2, THEN mask_out_first_mask_some, + where n=3, OF al less le]) + qed + + show ?thesis + apply (simp add: valid_machine_state'_def storeWordUser_def + doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule use_valid) + apply (rule_tac x=p in storeWord_um_inv, simp+) + apply (drule_tac x=p in spec) + apply (erule disjE, simp_all) + apply (erule conjE) + apply (erule disjE, simp) + apply (simp add: pointerInUserData_def word_size) + apply (subgoal_tac "a && ~~ mask pageBits = p && ~~ mask pageBits", simp) + apply (simp only: is_aligned_mask[of _ 3]) + apply (elim disjE, simp_all) + apply (rule aligned_offset_ignore[symmetric], simp+)+ + done +qed + +lemma setExtraBadge_vms'[wp]: + "\valid_machine_state'\ setExtraBadge buffer b n \\_. valid_machine_state'\" +by (simp add: setExtraBadge_def) wp + +lemma transferCapsToSlots_vms[wp]: + "\\s. valid_machine_state' s\ + transferCapsToSlots ep buffer n caps slots mi + \\_ s. valid_machine_state' s\" + by (wp transferCapsToSlots_pres1) + +crunches setExtraBadge, transferCapsToSlots + for pspace_domain_valid[wp]: "pspace_domain_valid" + +crunch ct_not_inQ[wp]: setExtraBadge "ct_not_inQ" + +lemma tcts_ct_not_inQ[wp]: + "\ct_not_inQ\ + transferCapsToSlots ep buffer n caps slots mi + \\_. ct_not_inQ\" + by (wp transferCapsToSlots_pres1) + +crunch gsUntypedZeroRanges[wp]: setExtraBadge "\s. P (gsUntypedZeroRanges s)" +crunch ctes_of[wp]: setExtraBadge "\s. P (ctes_of s)" + +lemma tcts_zero_ranges[wp]: + "\\s. untyped_ranges_zero' s \ valid_pspace' s \ distinct slots + \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + \ (\x \ set slots. real_cte_at' x s) + \ length slots \ 1 + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. untyped_ranges_zero'\" + apply (wpsimp wp: transferCapsToSlots_presM[where emx=True and vo=True + and drv=True and pad=True]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + apply (clarsimp simp: valid_pspace'_def ball_conj_distrib[symmetric]) + apply (drule(1) bspec) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, clarsimp) + apply (frule(1) ctes_of_valid_cap') + apply auto[1] + done + +crunch ct_idle_or_in_cur_domain'[wp]: setExtraBadge ct_idle_or_in_cur_domain' +crunch ct_idle_or_in_cur_domain'[wp]: transferCapsToSlots ct_idle_or_in_cur_domain' +crunch ksCurDomain[wp]: transferCapsToSlots "\s. P (ksCurDomain s)" +crunch ksDomSchedule[wp]: setExtraBadge "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: setExtraBadge "\s. P (ksDomScheduleIdx s)" +crunch ksDomSchedule[wp]: transferCapsToSlots "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: transferCapsToSlots "\s. P (ksDomScheduleIdx s)" + +crunches transferCapsToSlots + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift) + +lemma transferCapsToSlots_invs[wp]: + "\\s. invs' s \ distinct slots + \ (\x \ set slots. cte_wp_at' (\cte. cteCap cte = NullCap) x s) + \ (\x \ set slots. ex_cte_cap_to' x s) + \ (\x \ set slots. real_cte_at' x s) + \ length slots \ 1 + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (wp valid_irq_node_lift) + apply fastforce + done + +lemma grs_distinct'[wp]: + "\\\ getReceiveSlots t buf \\rv s. distinct rv\" + apply (cases buf, simp_all add: getReceiveSlots_def + split_def unlessE_def) + apply (wp, simp) + apply (wp | simp only: distinct.simps list.simps empty_iff)+ + apply simp + done + +lemma transferCaps_corres: + "\ info' = message_info_map info; + list_all2 (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x)) + caps caps' \ + \ + corres ((=) \ message_info_map) + (tcb_at receiver and valid_objs and + pspace_aligned and pspace_distinct and valid_mdb + and valid_list + and (\s. case ep of Some x \ ep_at x s | _ \ True) + and case_option \ in_user_frame recv_buf + and (\s. valid_message_info info) + and transfer_caps_srcs caps) + (tcb_at' receiver and valid_objs' and + pspace_aligned' and pspace_distinct' and pspace_canonical' + and no_0_obj' and valid_mdb' + and (\s. case ep of Some x \ ep_at' x s | _ \ True) + and case_option \ valid_ipc_buffer_ptr' recv_buf + and transferCaps_srcs caps' + and (\s. length caps' \ msgMaxExtraCaps)) + (transfer_caps info caps ep receiver recv_buf) + (transferCaps info' caps' ep receiver recv_buf)" + apply (simp add: transfer_caps_def transferCaps_def + getThreadCSpaceRoot) + apply (rule corres_assume_pre) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getReceiveSlots_corres]) + apply (rule_tac x=recv_buf in option_corres) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply (case_tac info, simp) + apply simp + apply (rule corres_rel_imp, rule transferCapsToSlots_corres, + simp_all add: split_def)[1] + apply (case_tac info, simp) + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp + | simp only: ball_conj_distrib)+ + apply (simp add: cte_map_def tcb_cnode_index_def split_def) + apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 + split_def + cong: option.case_cong) + apply (drule(1) bspec) + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (frule(1) Invariants_AI.caps_of_state_valid) + apply (fastforce simp:valid_cap_def) + apply (cases info) + apply (clarsimp simp: msg_max_extra_caps_def valid_message_info_def + max_ipc_words msg_max_length_def + msgMaxExtraCaps_def msgExtraCapBits_def + shiftL_nat valid_pspace'_def) + apply (drule(1) bspec) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (case_tac cte,clarsimp) + apply (frule(1) ctes_of_valid_cap') + apply (fastforce simp:valid_cap'_def) + done + +crunch typ_at'[wp]: transferCaps "\s. P (typ_at' T p s)" + +lemmas transferCaps_typ_ats[wp] = typ_at_lifts [OF transferCaps_typ_at'] + +lemma isIRQControlCap_mask [simp]: + "isIRQControlCap (maskCapRights R c) = isIRQControlCap c" + apply (case_tac c) + apply (clarsimp simp: isCap_simps maskCapRights_def Let_def)+ + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + apply (clarsimp simp: isCap_simps AARCH64_H.maskCapRights_def + maskCapRights_def Let_def)+ + done + +lemma isFrameCap_maskCapRights[simp]: +" isArchCap isFrameCap (RetypeDecls_H.maskCapRights R c) = isArchCap isFrameCap c" + apply (case_tac c; simp add: isCap_simps isArchCap_def maskCapRights_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; simp add: isCap_simps AARCH64_H.maskCapRights_def) + done + +lemma capReplyMaster_mask[simp]: + "isReplyCap c \ capReplyMaster (maskCapRights R c) = capReplyMaster c" + by (clarsimp simp: isCap_simps maskCapRights_def) + +lemma is_derived_mask' [simp]: + "is_derived' m p (maskCapRights R c) = is_derived' m p c" + apply (rule ext) + apply (simp add: is_derived'_def badge_derived'_def) + done + +lemma updateCapData_ordering: + "\ (x, capBadge cap) \ capBadge_ordering P; updateCapData p d cap \ NullCap \ + \ (x, capBadge (updateCapData p d cap)) \ capBadge_ordering P" + apply (cases cap, simp_all add: updateCapData_def isCap_simps Let_def + capBadge_def AARCH64_H.updateCapData_def + split: if_split_asm) + apply fastforce+ + done + +lemma updateCapData_capReplyMaster: + "isReplyCap cap \ capReplyMaster (updateCapData p d cap) = capReplyMaster cap" + by (clarsimp simp: isCap_simps updateCapData_def split del: if_split) + +lemma updateCapData_is_Reply[simp]: + "(updateCapData p d cap = ReplyCap x y z) = (cap = ReplyCap x y z)" + by (rule ccontr, + clarsimp simp: isCap_simps updateCapData_def Let_def + AARCH64_H.updateCapData_def + split del: if_split + split: if_split_asm) + +lemma updateCapDataIRQ: + "updateCapData p d cap \ NullCap \ + isIRQControlCap (updateCapData p d cap) = isIRQControlCap cap" + apply (cases cap, simp_all add: updateCapData_def isCap_simps Let_def + AARCH64_H.updateCapData_def + split: if_split_asm) + done + +lemma updateCapData_vs_cap_ref'[simp]: + "vs_cap_ref' (updateCapData pr D c) = vs_cap_ref' c" + by (rule ccontr, + clarsimp simp: isCap_simps updateCapData_def Let_def + AARCH64_H.updateCapData_def + vs_cap_ref'_def + split del: if_split + split: if_split_asm) + +lemma isFrameCap_updateCapData[simp]: + "isArchCap isFrameCap (updateCapData pr D c) = isArchCap isFrameCap c" + apply (case_tac c; simp add:updateCapData_def isCap_simps isArchCap_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; simp add: AARCH64_H.updateCapData_def isCap_simps isArchCap_def) + apply (clarsimp split:capability.splits simp:Let_def) + done + +lemma lookup_cap_to'[wp]: + "\\\ lookupCap t cref \\rv s. \r\cte_refs' rv (irq_node' s). ex_cte_cap_to' r s\,-" + by (simp add: lookupCap_def lookupCapAndSlot_def | wp)+ + +lemma grs_cap_to'[wp]: + "\\\ getReceiveSlots t buf \\rv s. \x \ set rv. ex_cte_cap_to' x s\" + apply (cases buf; simp add: getReceiveSlots_def split_def unlessE_def) + apply (wp, simp) + apply (wp | simp | rule hoare_drop_imps)+ + done + +lemma grs_length'[wp]: + "\\s. 1 \ n\ getReceiveSlots receiver recv_buf \\rv s. length rv \ n\" + apply (simp add: getReceiveSlots_def split_def unlessE_def) + apply (rule hoare_pre) + apply (wp | wpc | simp)+ + done + +lemma transferCaps_invs' [wp]: + "\invs' and transferCaps_srcs caps\ + transferCaps mi caps ep receiver recv_buf + \\rv. invs'\" + apply (simp add: transferCaps_def Let_def split_def) + apply (wp get_rs_cte_at' hoare_vcg_const_Ball_lift + | wpcw | clarsimp)+ + done + +lemma get_mrs_inv'[wp]: + "\P\ getMRs t buf info \\rv. P\" + by (simp add: getMRs_def load_word_offs_def getRegister_def + | wp dmo_inv' loadWord_inv mapM_wp' + asUser_inv det_mapM[where S=UNIV] | wpc)+ + + +lemma copyMRs_typ_at': + "\\s. P (typ_at' T p s)\ copyMRs s sb r rb n \\rv s. P (typ_at' T p s)\" + by (simp add: copyMRs_def | wp mapM_wp [where S=UNIV, simplified] | wpc)+ + +lemmas copyMRs_typ_at_lifts[wp] = typ_at_lifts [OF copyMRs_typ_at'] + +lemma copy_mrs_invs'[wp]: + "\ invs' and tcb_at' s and tcb_at' r \ copyMRs s sb r rb n \\rv. invs' \" + including classic_wp_pre + apply (simp add: copyMRs_def) + apply (wp dmo_invs' no_irq_mapM no_irq_storeWord| + simp add: split_def) + apply (case_tac sb, simp_all)[1] + apply wp+ + apply (case_tac rb, simp_all)[1] + apply (wp mapM_wp dmo_invs' no_irq_mapM no_irq_storeWord no_irq_loadWord) + apply blast + apply (rule hoare_strengthen_post) + apply (rule mapM_wp) + apply (wp | simp | blast)+ + done + +crunches transferCaps, setMRs, copyMRs, setMessageInfo + for aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + and pspace_canonical'[wp]: pspace_canonical' + (wp: crunch_wps simp: crunch_simps) + +lemma set_mrs_valid_objs' [wp]: + "\valid_objs'\ setMRs t a msgs \\rv. valid_objs'\" + apply (simp add: setMRs_def zipWithM_x_mapM split_def) + apply (wp asUser_valid_objs crunch_wps) + done + +crunch valid_objs'[wp]: copyMRs valid_objs' + (wp: crunch_wps simp: crunch_simps) + +lemma setMRs_invs_bits[wp]: + "\valid_pspace'\ setMRs t buf mrs \\rv. valid_pspace'\" + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setMRs t buf mrs \\rv s. sch_act_wf (ksSchedulerAction s) s\" + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setMRs t buf mrs \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + "\P. setMRs t buf mrs \\s. P (state_refs_of' s)\" + "\P. setMRs t buf mrs \\s. P (state_hyp_refs_of' s)\" + "\if_live_then_nonz_cap'\ setMRs t buf mrs \\rv. if_live_then_nonz_cap'\" + "\ex_nonz_cap_to' p\ setMRs t buf mrs \\rv. ex_nonz_cap_to' p\" + "\cur_tcb'\ setMRs t buf mrs \\rv. cur_tcb'\" + "\if_unsafe_then_cap'\ setMRs t buf mrs \\rv. if_unsafe_then_cap'\" + by (simp add: setMRs_def zipWithM_x_mapM split_def storeWordUser_def | wp crunch_wps)+ + +crunch no_0_obj'[wp]: setMRs no_0_obj' + (wp: crunch_wps simp: crunch_simps) + +lemma copyMRs_invs_bits[wp]: + "\valid_pspace'\ copyMRs s sb r rb n \\rv. valid_pspace'\" + "\\s. sch_act_wf (ksSchedulerAction s) s\ copyMRs s sb r rb n + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + "\P. copyMRs s sb r rb n \\s. P (state_refs_of' s)\" + "\P. copyMRs s sb r rb n \\s. P (state_hyp_refs_of' s)\" + "\if_live_then_nonz_cap'\ copyMRs s sb r rb n \\rv. if_live_then_nonz_cap'\" + "\ex_nonz_cap_to' p\ copyMRs s sb r rb n \\rv. ex_nonz_cap_to' p\" + "\cur_tcb'\ copyMRs s sb r rb n \\rv. cur_tcb'\" + "\if_unsafe_then_cap'\ copyMRs s sb r rb n \\rv. if_unsafe_then_cap'\" + by (simp add: copyMRs_def storeWordUser_def | wp mapM_wp' | wpc)+ + +crunch no_0_obj'[wp]: copyMRs no_0_obj' + (wp: crunch_wps simp: crunch_simps) + +lemma mi_map_length[simp]: "msgLength (message_info_map mi) = mi_length mi" + by (cases mi, simp) + +crunch cte_wp_at'[wp]: copyMRs "cte_wp_at' P p" + (wp: crunch_wps) + +lemma lookupExtraCaps_srcs[wp]: + "\\\ lookupExtraCaps thread buf info \transferCaps_srcs\,-" + apply (simp add: lookupExtraCaps_def lookupCapAndSlot_def + split_def lookupSlotForThread_def + getSlotCap_def) + apply (wp mapME_set[where R=\] getCTE_wp') + apply (rule_tac P=\ in hoare_trivE_R) + apply (simp add: cte_wp_at_ctes_of) + apply (wp | simp)+ + done + +crunch inv[wp]: lookupExtraCaps "P" + (wp: crunch_wps mapME_wp' simp: crunch_simps) + +lemma invs_mdb_strengthen': + "invs' s \ valid_mdb' s" by auto + +lemma lookupExtraCaps_length: + "\\s. unat (msgExtraCaps mi) \ n\ lookupExtraCaps thread send_buf mi \\rv s. length rv \ n\,-" + apply (simp add: lookupExtraCaps_def getExtraCPtrs_def) + apply (rule hoare_pre) + apply (wp mapME_length | wpc)+ + apply (clarsimp simp: upto_enum_step_def Suc_unat_diff_1 word_le_sub1) + done + +lemma getMessageInfo_msgExtraCaps[wp]: + "\\\ getMessageInfo t \\rv s. unat (msgExtraCaps rv) \ msgMaxExtraCaps\" + apply (simp add: getMessageInfo_def) + apply wp + apply (simp add: messageInfoFromWord_def Let_def msgMaxExtraCaps_def + shiftL_nat) + apply (subst nat_le_Suc_less_imp) + apply (rule unat_less_power) + apply (simp add: word_bits_def msgExtraCapBits_def) + apply (rule and_mask_less'[unfolded mask_2pm1]) + apply (simp add: msgExtraCapBits_def) + apply wpsimp+ + done + +lemma lookupCapAndSlot_corres: + "cptr = to_bl cptr' \ + corres (lfr \ (\a b. cap_relation (fst a) (fst b) \ snd b = cte_map (snd a))) + (valid_objs and pspace_aligned and tcb_at thread) + (valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread) + (lookup_cap_and_slot thread cptr) (lookupCapAndSlot thread cptr')" + unfolding lookup_cap_and_slot_def lookupCapAndSlot_def + apply (simp add: liftE_bindE split_def) + apply (rule corres_guard_imp) + apply (rule_tac r'="\rv rv'. rv' = cte_map (fst rv)" + in corres_splitEE) + apply (rule corres_rel_imp, rule lookupSlotForThread_corres) + apply (simp add: split_def) + apply (rule corres_split[OF getSlotCap_corres]) + apply simp + apply (rule corres_returnOkTT, simp) + apply wp+ + apply (wp | simp add: liftE_bindE[symmetric])+ + done + +lemma lookupExtraCaps_corres: + "\ info' = message_info_map info; buffer = buffer'\ \ + corres (fr \ list_all2 (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x))) + (valid_objs and pspace_aligned and tcb_at thread and (\_. valid_message_info info)) + (valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread + and case_option \ valid_ipc_buffer_ptr' buffer') + (lookup_extra_caps thread buffer info) (lookupExtraCaps thread buffer' info')" + unfolding lookupExtraCaps_def lookup_extra_caps_def + apply (rule corres_gen_asm) + apply (cases "mi_extra_caps info = 0") + apply (cases info) + apply (simp add: Let_def returnOk_def getExtraCPtrs_def + liftE_bindE upto_enum_step_def mapM_def + sequence_def doMachineOp_return mapME_Nil + split: option.split) + apply (cases info) + apply (rename_tac w1 w2 w3 w4) + apply (simp add: Let_def liftE_bindE) + apply (cases buffer') + apply (simp add: getExtraCPtrs_def mapME_Nil) + apply (rule corres_returnOk) + apply simp + apply (simp add: msgLengthBits_def msgMaxLength_def word_size field_simps + getExtraCPtrs_def upto_enum_step_def upto_enum_word + word_size_def msg_max_length_def liftM_def + Suc_unat_diff_1 word_le_sub1 mapM_map_simp + upt_lhs_sub_map[where x=buffer_cptr_index] + wordSize_def wordBits_def + del: upt.simps) + apply (rule corres_guard_imp) + apply (rule corres_underlying_split) + + apply (rule_tac S = "\x y. x = y \ x < unat w2" + in corres_mapM_list_all2 + [where Q = "\_. valid_objs and pspace_aligned and tcb_at thread" and r = "(=)" + and Q' = "\_. valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' thread + and case_option \ valid_ipc_buffer_ptr' buffer'" and r'="(=)" ]) + apply simp + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule loadWordUser_corres') + apply (clarsimp simp: buffer_cptr_index_def msg_max_length_def + max_ipc_words valid_message_info_def + msg_max_extra_caps_def word_le_nat_alt) + apply (simp add: buffer_cptr_index_def msg_max_length_def) + apply simp + apply simp + apply (simp add: load_word_offs_word_def) + apply (wp | simp)+ + apply (subst list_all2_same) + apply (clarsimp simp: max_ipc_words field_simps) + apply (simp add: mapME_def, fold mapME_def)[1] + apply (rule corres_mapME [where S = Id and r'="(\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x))"]) + apply simp + apply simp + apply simp + apply (rule corres_cap_fault [OF lookupCapAndSlot_corres]) + apply simp + apply simp + apply (wp | simp)+ + apply (simp add: set_zip_same Int_lower1) + apply (wp mapM_wp [OF _ subset_refl] | simp)+ + done + +crunch ctes_of[wp]: copyMRs "\s. P (ctes_of s)" + (ignore: threadSet + wp: threadSet_ctes_of crunch_wps) + +lemma copyMRs_valid_mdb[wp]: + "\valid_mdb'\ copyMRs t buf t' buf' n \\rv. valid_mdb'\" + by (simp add: valid_mdb'_def copyMRs_ctes_of) + +lemma doNormalTransfer_corres: + "corres dc + (tcb_at sender and tcb_at receiver and (pspace_aligned:: det_state \ bool) + and valid_objs and cur_tcb and valid_mdb and valid_list and pspace_distinct + and (\s. case ep of Some x \ ep_at x s | _ \ True) + and case_option \ in_user_frame send_buf + and case_option \ in_user_frame recv_buf) + (tcb_at' sender and tcb_at' receiver and valid_objs' + and pspace_aligned' and pspace_distinct' and pspace_canonical' and cur_tcb' + and valid_mdb' and no_0_obj' + and (\s. case ep of Some x \ ep_at' x s | _ \ True) + and case_option \ valid_ipc_buffer_ptr' send_buf + and case_option \ valid_ipc_buffer_ptr' recv_buf) + (do_normal_transfer sender send_buf ep badge can_grant receiver recv_buf) + (doNormalTransfer sender send_buf ep badge can_grant receiver recv_buf)" + apply (simp add: do_normal_transfer_def doNormalTransfer_def) + apply (rule corres_guard_imp) + apply (rule corres_split_mapr[OF getMessageInfo_corres]) + apply (rule_tac F="valid_message_info mi" in corres_gen_asm) + apply (rule_tac r'="list_all2 (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x))" + in corres_split) + apply (rule corres_if[OF refl]) + apply (rule corres_split_catch) + apply (rule lookupExtraCaps_corres; simp) + apply (rule corres_trivial, simp) + apply wp+ + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_split_eqr[OF copyMRs_corres]) + apply (rule corres_split) + apply (rule transferCaps_corres; simp) + apply (rename_tac mi' mi'') + apply (rule_tac F="mi_label mi' = mi_label mi" + in corres_gen_asm) + apply (rule corres_split_nor[OF setMessageInfo_corres]) + apply (case_tac mi', clarsimp) + apply (simp add: badge_register_def badgeRegister_def) + apply (fold dc_def) + apply (rule asUser_setRegister_corres) + apply wp + apply simp+ + apply ((wp valid_case_option_post_wp hoare_vcg_const_Ball_lift + hoare_case_option_wp + hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' + hoare_vcg_const_Ball_lift lookupExtraCaps_length + | simp add: if_apply_def2)+) + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ + apply clarsimp + apply auto + done + +lemma corres_liftE_lift: + "corres r1 P P' m m' \ + corres (f1 \ r1) P P' (liftE m) (withoutFailure m')" + by simp + +lemmas corres_ipc_thread_helper = + corres_split_eqrE[OF corres_liftE_lift [OF getCurThread_corres]] + +lemmas corres_ipc_info_helper = + corres_split_maprE [where f = message_info_map, OF _ + corres_liftE_lift [OF getMessageInfo_corres]] + +crunch typ_at'[wp]: doNormalTransfer "\s. P (typ_at' T p s)" + +lemmas doNormal_lifts[wp] = typ_at_lifts [OF doNormalTransfer_typ_at'] + +lemma doNormal_invs'[wp]: + "\tcb_at' sender and tcb_at' receiver and invs'\ + doNormalTransfer sender send_buf ep badge + can_grant receiver recv_buf \\r. invs'\" + apply (simp add: doNormalTransfer_def) + apply (wp hoare_vcg_const_Ball_lift | simp)+ + done + +crunch aligned'[wp]: doNormalTransfer pspace_aligned' + (wp: crunch_wps) +crunch distinct'[wp]: doNormalTransfer pspace_distinct' + (wp: crunch_wps) + +lemma transferCaps_urz[wp]: + "\untyped_ranges_zero' and valid_pspace' + and (\s. (\x\set caps. cte_wp_at' (\cte. fst x \ capability.NullCap \ cteCap cte = fst x) (snd x) s))\ + transferCaps tag caps ep receiver recv_buf + \\r. untyped_ranges_zero'\" + apply (simp add: transferCaps_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_const_imp_lift + | wpc + | simp add: ball_conj_distrib)+ + apply clarsimp + done + +crunch gsUntypedZeroRanges[wp]: doNormalTransfer "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps transferCapsToSlots_pres1 ignore: constOnFailure) + +lemmas asUser_urz = untyped_ranges_zero_lift[OF asUser_gsUntypedZeroRanges] + +crunch urz[wp]: doNormalTransfer "untyped_ranges_zero'" + (ignore: asUser wp: crunch_wps asUser_urz hoare_vcg_const_Ball_lift) + +lemma msgFromLookupFailure_map[simp]: + "msgFromLookupFailure (lookup_failure_map lf) + = msg_from_lookup_failure lf" + by (cases lf, simp_all add: lookup_failure_map_def msgFromLookupFailure_def) + +lemma asUser_getRestartPC_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t getRestartPC) (asUser t getRestartPC)" + apply (rule asUser_corres') + apply (rule corres_Id, simp, simp) + apply (rule no_fail_getRestartPC) + done + +lemma asUser_mapM_getRegister_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (mapM getRegister regs)) + (asUser t (mapM getRegister regs))" + apply (rule asUser_corres') + apply (rule corres_Id [OF refl refl]) + apply (rule no_fail_mapM) + apply (simp add: getRegister_def) + done + +lemma makeArchFaultMessage_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (make_arch_fault_msg f t) + (makeArchFaultMessage (arch_fault_map f) t)" + apply (cases f; clarsimp simp: makeArchFaultMessage_def ucast_nat_def split: arch_fault.split) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule corres_trivial, simp) + apply (wp+, auto) + done + +lemma makeFaultMessage_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (make_fault_msg ft t) + (makeFaultMessage (fault_map ft) t)" + apply (cases ft, simp_all add: makeFaultMessage_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule corres_trivial, simp add: fromEnum_def enum_bool) + apply (wp | simp)+ + apply (simp add: AARCH64_H.syscallMessage_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_mapM_getRegister_corres]) + apply (rule corres_trivial, simp) + apply (wp | simp)+ + apply (simp add: AARCH64_H.exceptionMessage_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_mapM_getRegister_corres]) + apply (rule corres_trivial, simp) + apply (wp | simp)+ + apply (rule makeArchFaultMessage_corres) + done + +lemma makeFaultMessage_inv[wp]: + "\P\ makeFaultMessage ft t \\rv. P\" + apply (cases ft, simp_all add: makeFaultMessage_def) + apply (wp asUser_inv mapM_wp' det_mapM[where S=UNIV] + det_getRestartPC getRestartPC_inv + | clarsimp simp: getRegister_def makeArchFaultMessage_def + split: arch_fault.split)+ + done + +lemmas threadget_fault_corres = + threadGet_corres [where r = fault_rel_optionation + and f = tcb_fault and f' = tcbFault, + simplified tcb_relation_def, simplified] + +lemma make_fault_msg_in_user_frame[wp]: + "make_fault_msg f t \in_user_frame p\" + supply if_split[split del] + apply (cases f; wpsimp) + apply (rename_tac af; case_tac af; wpsimp) + done + +lemma doFaultTransfer_corres: + "corres dc + (obj_at (\ko. \tcb ft. ko = TCB tcb \ tcb_fault tcb = Some ft) sender + and tcb_at receiver and case_option \ in_user_frame recv_buf + and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' recv_buf) + (do_fault_transfer badge sender receiver recv_buf) + (doFaultTransfer badge sender receiver recv_buf)" + apply (clarsimp simp: do_fault_transfer_def doFaultTransfer_def split_def + AARCH64_H.badgeRegister_def badge_register_def) + apply (rule_tac Q="\fault. K (\f. fault = Some f) and + tcb_at sender and tcb_at receiver and + case_option \ in_user_frame recv_buf and + pspace_aligned and pspace_distinct" + and Q'="\fault'. case_option \ valid_ipc_buffer_ptr' recv_buf" + in corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule threadget_fault_corres) + apply (clarsimp simp: obj_at_def is_tcb)+ + apply (rule corres_assume_pre) + apply (fold assert_opt_def | unfold haskell_fail_def)+ + apply (rule corres_assert_opt_assume) + apply (clarsimp split: option.splits + simp: fault_rel_optionation_def assert_opt_def + map_option_case) + defer + defer + apply (clarsimp simp: fault_rel_optionation_def) + apply (wp thread_get_wp) + apply (clarsimp simp: obj_at_def is_tcb) + apply wp + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF makeFaultMessage_corres]) + apply (rule corres_split_eqr[OF setMRs_corres [OF refl]]) + apply (rule corres_split_nor[OF setMessageInfo_corres]) + apply simp + apply (rule asUser_setRegister_corres) + apply (wp | simp)+ + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF makeFaultMessage_corres]) + apply (rule corres_split_eqr[OF setMRs_corres [OF refl]]) + apply (rule corres_split_nor[OF setMessageInfo_corres]) + apply simp + apply (rule asUser_setRegister_corres) + apply (wp | simp)+ + done + +lemma doFaultTransfer_invs[wp]: + "\invs' and tcb_at' receiver\ + doFaultTransfer badge sender receiver recv_buf + \\rv. invs'\" + by (simp add: doFaultTransfer_def split_def | wp + | clarsimp split: option.split)+ + +lemma lookupIPCBuffer_valid_ipc_buffer [wp]: + "\valid_objs'\ VSpace_H.lookupIPCBuffer b s \case_option \ valid_ipc_buffer_ptr'\" + unfolding lookupIPCBuffer_def AARCH64_H.lookupIPCBuffer_def + apply (simp add: Let_def getSlotCap_def getThreadBufferSlot_def + locateSlot_conv threadGet_def comp_def) + apply (wp getCTE_wp getObject_tcb_wp | wpc)+ + apply (clarsimp simp del: imp_disjL) + apply (drule obj_at_ko_at') + apply (clarsimp simp del: imp_disjL) + apply (rule_tac x = ko in exI) + apply (frule ko_at_cte_ipcbuffer) + apply (clarsimp simp: cte_wp_at_ctes_of simp del: imp_disjL) + apply (rename_tac ref rg sz d m) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def) + apply (frule (1) ko_at_valid_objs') + apply (clarsimp simp: projectKO_opts_defs split: kernel_object.split_asm) + apply (clarsimp simp add: valid_obj'_def valid_tcb'_def + isCap_simps cte_level_bits_def field_simps) + apply (drule bspec [OF _ ranI [where a = "4 << cteSizeBits"]]) + apply (simp add: cteSizeBits_def) + apply (clarsimp simp add: valid_cap'_def frame_at'_def) + apply (rule conjI) + apply (rule aligned_add_aligned) + apply (clarsimp simp add: capAligned_def) + apply assumption + apply (erule is_aligned_andI1) + apply (rule order_trans[rotated]) + apply (rule pbfs_atleast_pageBits) + apply (simp add: bit_simps msg_align_bits) + apply (clarsimp simp: capAligned_def) + apply (drule_tac x = "(tcbIPCBuffer ko && mask (pageBitsForSize sz)) >> pageBits" in spec) + apply (simp add: shiftr_shiftl1 ) + apply (subst (asm) mask_out_add_aligned) + apply (erule is_aligned_weaken [OF _ pbfs_atleast_pageBits]) + apply (erule mp) + apply (rule shiftr_less_t2n) + apply (clarsimp simp: pbfs_atleast_pageBits) + apply (rule and_mask_less') + apply (simp add: word_bits_conv pbfs_less_wb'[unfolded word_bits_conv]) + done + +lemma doIPCTransfer_corres: + "corres dc + (tcb_at s and tcb_at r and valid_objs and pspace_aligned + and valid_list + and pspace_distinct and valid_mdb and cur_tcb + and (\s. case ep of Some x \ ep_at x s | _ \ True)) + (tcb_at' s and tcb_at' r and valid_pspace' and cur_tcb' + and (\s. case ep of Some x \ ep_at' x s | _ \ True)) + (do_ipc_transfer s ep bg grt r) + (doIPCTransfer s ep bg grt r)" + apply (simp add: do_ipc_transfer_def doIPCTransfer_def) + apply (rule_tac Q="%receiveBuffer sa. tcb_at s sa \ valid_objs sa \ + pspace_aligned sa \ tcb_at r sa \ + cur_tcb sa \ valid_mdb sa \ valid_list sa \ pspace_distinct sa \ + (case ep of None \ True | Some x \ ep_at x sa) \ + case_option (\_. True) in_user_frame receiveBuffer sa \ + obj_at (\ko. \tcb. ko = TCB tcb + \ \\ft. tcb_fault tcb = Some ft\) s sa" + in corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule lookupIPCBuffer_corres') + apply auto[2] + apply (rule corres_underlying_split [OF _ _ thread_get_sp threadGet_inv]) + apply (rule corres_guard_imp) + apply (rule threadget_fault_corres) + apply simp + defer + apply (rule corres_guard_imp) + apply (subst case_option_If)+ + apply (rule corres_if2) + apply (simp add: fault_rel_optionation_def) + apply (rule corres_split_eqr[OF lookupIPCBuffer_corres']) + apply (simp add: dc_def[symmetric]) + apply (rule doNormalTransfer_corres) + apply (wp | simp add: valid_pspace'_def)+ + apply (simp add: dc_def[symmetric]) + apply (rule doFaultTransfer_corres) + apply (clarsimp simp: obj_at_def) + apply (erule ignore_if) + apply (wp|simp add: obj_at_def is_tcb valid_pspace'_def)+ + done + + +crunch ifunsafe[wp]: doIPCTransfer "if_unsafe_then_cap'" + (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' ignore: transferCapsToSlots + simp: zipWithM_x_mapM ball_conj_distrib ) +crunch iflive[wp]: doIPCTransfer "if_live_then_nonz_cap'" + (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' ignore: transferCapsToSlots + simp: zipWithM_x_mapM ball_conj_distrib ) +crunch vp[wp]: doIPCTransfer "valid_pspace'" + (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' wp: transferCapsToSlots_vp simp:ball_conj_distrib ) +crunch sch_act_wf[wp]: doIPCTransfer "\s. sch_act_wf (ksSchedulerAction s) s" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch state_refs_of[wp]: doIPCTransfer "\s. P (state_refs_of' s)" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch state_hyp_refs_of[wp]: doIPCTransfer "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch ct[wp]: doIPCTransfer "cur_tcb'" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch idle'[wp]: doIPCTransfer "valid_idle'" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) + +crunch typ_at'[wp]: doIPCTransfer "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: zipWithM_x_mapM) +lemmas dit'_typ_ats[wp] = typ_at_lifts [OF doIPCTransfer_typ_at'] + +crunch irq_node'[wp]: doIPCTransfer "\s. P (irq_node' s)" + (wp: crunch_wps simp: crunch_simps) + +lemmas dit_irq_node'[wp] + = valid_irq_node_lift [OF doIPCTransfer_irq_node' doIPCTransfer_typ_at'] + +crunch valid_arch_state'[wp]: doIPCTransfer "valid_arch_state'" + (wp: crunch_wps simp: crunch_simps) + +(* Levity: added (20090126 19:32:26) *) +declare asUser_global_refs' [wp] + +lemma lec_valid_cap' [wp]: + "\valid_objs'\ lookupExtraCaps thread xa mi \\rv s. (\x\set rv. s \' fst x)\, -" + apply (rule hoare_pre, rule hoare_strengthen_postE_R) + apply (rule hoare_vcg_conj_lift_R[where R=valid_objs' and S="\_. valid_objs'"]) + apply (rule lookupExtraCaps_srcs) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + apply simp + done + +crunch objs'[wp]: doIPCTransfer "valid_objs'" + ( wp: crunch_wps hoare_vcg_const_Ball_lift + transferCapsToSlots_valid_objs + simp: zipWithM_x_mapM ball_conj_distrib ) + +crunch global_refs'[wp]: doIPCTransfer "valid_global_refs'" + (wp: crunch_wps hoare_vcg_const_Ball_lift threadSet_global_refsT + transferCapsToSlots_valid_globals + simp: zipWithM_x_mapM ball_conj_distrib) + +declare asUser_irq_handlers' [wp] + +crunch irq_handlers'[wp]: doIPCTransfer "valid_irq_handlers'" + (wp: crunch_wps hoare_vcg_const_Ball_lift threadSet_irq_handlers' + transferCapsToSlots_irq_handlers + simp: zipWithM_x_mapM ball_conj_distrib ) + +crunch irq_states'[wp]: doIPCTransfer "valid_irq_states'" + (wp: crunch_wps no_irq no_irq_mapM no_irq_storeWord no_irq_loadWord + no_irq_case_option simp: crunch_simps zipWithM_x_mapM) + +crunch irqs_masked'[wp]: doIPCTransfer "irqs_masked'" + (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) + +lemma doIPCTransfer_invs[wp]: + "\invs' and tcb_at' s and tcb_at' r\ + doIPCTransfer s ep bg grt r + \\rv. invs'\" + apply (simp add: doIPCTransfer_def) + apply (wpsimp wp: hoare_drop_imp) + done + + +lemma arch_getSanitiseRegisterInfo_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_get_sanitise_register_info t) + (getSanitiseRegisterInfo t)" + unfolding arch_get_sanitise_register_info_def getSanitiseRegisterInfo_def + apply (fold archThreadGet_def) + apply corres + done + +crunch tcb_at'[wp]: getSanitiseRegisterInfo "tcb_at' t" + +crunches arch_get_sanitise_register_info + for pspace_distinct[wp]: pspace_distinct + and pspace_aligned[wp]: pspace_aligned + +lemma handle_fault_reply_registers_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (do t' \ arch_get_sanitise_register_info t; + y \ as_user t + (zipWithM_x + (\r v. setRegister r + (sanitise_register t' r v)) + msg_template msg); + return (label = 0) + od) + (do t' \ getSanitiseRegisterInfo t; + y \ asUser t + (zipWithM_x + (\r v. setRegister r (sanitiseRegister t' r v)) + msg_template msg); + return (label = 0) + od)" + apply (rule corres_guard_imp) + apply (rule corres_split[OF arch_getSanitiseRegisterInfo_corres]) + apply (rule corres_split) + apply (rule asUser_corres') + apply(simp add: setRegister_def sanitise_register_def + sanitiseRegister_def syscallMessage_def Let_def cong: register.case_cong) + apply(subst zipWithM_x_modify)+ + apply(rule corres_modify') + apply (simp|wp)+ + done + +lemma handleFaultReply_corres: + "ft' = fault_map ft \ + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (handle_fault_reply ft t label msg) + (handleFaultReply ft' t label msg)" + apply (cases ft) + apply(simp_all add: handleFaultReply_def + handle_arch_fault_reply_def handleArchFaultReply_def + syscallMessage_def exceptionMessage_def + split: arch_fault.split) + by (rule handle_fault_reply_registers_corres)+ + +crunch typ_at'[wp]: handleFaultReply "\s. P (typ_at' T p s)" + +lemmas hfr_typ_ats[wp] = typ_at_lifts [OF handleFaultReply_typ_at'] + +crunch ct'[wp]: handleFaultReply "\s. P (ksCurThread s)" + +lemma doIPCTransfer_sch_act_simple [wp]: + "\sch_act_simple\ doIPCTransfer sender endpoint badge grant receiver \\_. sch_act_simple\" + by (simp add: sch_act_simple_def, wp) + +lemma possibleSwitchTo_invs'[wp]: + "\invs' and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + possibleSwitchTo t \\_. invs'\" + apply (simp add: possibleSwitchTo_def curDomain_def) + apply (wp tcbSchedEnqueue_invs' ssa_invs') + apply (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt]) + apply (wpsimp wp: ssa_invs' threadGet_wp)+ + apply (clarsimp dest!: obj_at_ko_at' simp: tcb_in_cur_domain'_def obj_at'_def) + done + +crunch cur' [wp]: isFinalCapability "\s. P (cur_tcb' s)" + (simp: crunch_simps unless_when + wp: crunch_wps getObject_inv loadObject_default_inv) + +crunch ct' [wp]: deleteCallerCap "\s. P (ksCurThread s)" + (simp: crunch_simps unless_when + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma getThreadCallerSlot_inv: + "\P\ getThreadCallerSlot t \\_. P\" + by (simp add: getThreadCallerSlot_def, wp) + +lemma finaliseCapTrue_standin_tcb_at' [wp]: + "\tcb_at' x\ finaliseCapTrue_standin cap v2 \\_. tcb_at' x\" + apply (simp add: finaliseCapTrue_standin_def Let_def) + apply (safe) + apply (wp getObject_ntfn_inv + | wpc + | simp)+ + done + +lemma finaliseCapTrue_standin_cur': + "\\s. cur_tcb' s\ finaliseCapTrue_standin cap v2 \\_ s'. cur_tcb' s'\" + apply (simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf2 [OF _ finaliseCapTrue_standin_ct']) + apply (wp) + done + +lemma cteDeleteOne_cur' [wp]: + "\\s. cur_tcb' s\ cteDeleteOne slot \\_ s'. cur_tcb' s'\" + apply (simp add: cteDeleteOne_def unless_def when_def) + apply (wp hoare_drop_imps finaliseCapTrue_standin_cur' isFinalCapability_cur' + | simp add: split_def | wp (once) cur_tcb_lift)+ + done + +lemma handleFaultReply_cur' [wp]: + "\\s. cur_tcb' s\ handleFaultReply x0 thread label msg \\_ s'. cur_tcb' s'\" + apply (clarsimp simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf2 [OF _ handleFaultReply_ct']) + apply (wp) + done + +lemma capClass_Reply: + "capClass cap = ReplyClass tcb \ isReplyCap cap \ capTCBPtr cap = tcb" + apply (cases cap, simp_all add: isCap_simps) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, simp_all) + done + +lemma reply_cap_end_mdb_chain: + "\ cte_wp_at (is_reply_cap_to t) slot s; invs s; + invs' s'; + (s, s') \ state_relation; ctes_of s' (cte_map slot) = Some cte \ + \ (mdbPrev (cteMDBNode cte) \ nullPointer + \ mdbNext (cteMDBNode cte) = nullPointer) + \ cte_wp_at' (\cte. isReplyCap (cteCap cte) \ capReplyMaster (cteCap cte)) + (mdbPrev (cteMDBNode cte)) s'" + apply (clarsimp simp only: cte_wp_at_reply_cap_to_ex_rights) + apply (frule(1) pspace_relation_ctes_ofI[OF state_relation_pspace_relation], + clarsimp+) + apply (subgoal_tac "\slot' rights'. caps_of_state s slot' = Some (cap.ReplyCap t True rights') + \ descendants_of slot' (cdt s) = {slot}") + apply (elim state_relationE exE) + apply (clarsimp simp: cdt_relation_def + simp del: split_paired_All) + apply (drule spec, drule(1) mp[OF _ caps_of_state_cte_at]) + apply (frule(1) pspace_relation_cte_wp_at[OF _ caps_of_state_cteD], + clarsimp+) + apply (clarsimp simp: descendants_of'_def cte_wp_at_ctes_of) + apply (frule_tac f="\S. cte_map slot \ S" in arg_cong, simp(no_asm_use)) + apply (frule invs_mdb'[unfolded valid_mdb'_def]) + apply (rule context_conjI) + apply (clarsimp simp: nullPointer_def valid_mdb_ctes_def) + apply (erule(4) subtree_prev_0) + apply (rule conjI) + apply (rule ccontr) + apply (frule valid_mdb_no_loops, simp add: no_loops_def) + apply (drule_tac x="cte_map slot" in spec) + apply (erule notE, rule r_into_trancl, rule ccontr) + apply (clarsimp simp: mdb_next_unfold valid_mdb_ctes_def nullPointer_def) + apply (rule valid_dlistEn, assumption+) + apply (subgoal_tac "ctes_of s' \ cte_map slot \ mdbNext (cteMDBNode cte)") + apply (frule(3) class_linksD) + apply (clarsimp simp: isCap_simps dest!: capClass_Reply[OF sym]) + apply (drule_tac f="\S. mdbNext (cteMDBNode cte) \ S" in arg_cong) + apply (simp, erule notE, rule subtree.trans_parent, assumption+) + apply (case_tac ctea, case_tac cte') + apply (clarsimp simp add: parentOf_def isMDBParentOf_CTE) + apply (simp add: sameRegionAs_def2 isCap_simps) + apply (erule subtree.cases) + apply (clarsimp simp: parentOf_def isMDBParentOf_CTE) + apply (clarsimp simp: parentOf_def isMDBParentOf_CTE) + apply (simp add: mdb_next_unfold) + apply (erule subtree.cases) + apply (clarsimp simp: valid_mdb_ctes_def) + apply (erule_tac cte=ctea in valid_dlistEn, assumption) + apply (simp add: mdb_next_unfold) + apply (clarsimp simp: mdb_next_unfold isCap_simps) + apply (drule_tac f="\S. c' \ S" in arg_cong) + apply (clarsimp simp: no_loops_direct_simp valid_mdb_no_loops) + apply (frule invs_mdb) + apply (drule invs_valid_reply_caps) + apply (clarsimp simp: valid_mdb_def reply_mdb_def + valid_reply_caps_def reply_caps_mdb_def + cte_wp_at_caps_of_state + simp del: split_paired_All) + apply (erule_tac x=slot in allE, erule_tac x=t in allE, erule impE, fast) + apply (elim exEI) + apply clarsimp + apply (subgoal_tac "P" for P, rule sym, rule equalityI, assumption) + apply clarsimp + apply (erule(4) unique_reply_capsD) + apply (simp add: descendants_of_def) + apply (rule r_into_trancl) + apply (simp add: cdt_parent_rel_def is_cdt_parent_def) + done + +lemma unbindNotification_valid_objs'_strengthen: + "valid_tcb' tcb s \ valid_tcb' (tcbBoundNotification_update Map.empty tcb) s" + "valid_ntfn' ntfn s \ valid_ntfn' (ntfnBoundTCB_update Map.empty ntfn) s" + by (simp_all add: unbindNotification_valid_objs'_helper' unbindNotification_valid_objs'_helper) + +crunch valid_objs'[wp]: cteDeleteOne "valid_objs'" + (simp: crunch_simps unless_def + wp: crunch_wps getObject_inv loadObject_default_inv) + +crunch nosch[wp]: handleFaultReply "\s. P (ksSchedulerAction s)" + +lemma emptySlot_weak_sch_act[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + emptySlot slot irq + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (wp weak_sch_act_wf_lift tcb_in_cur_domain'_lift) + +lemma cancelAllIPC_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cancelAllIPC epptr + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: cancelAllIPC_def) + apply (wp rescheduleRequired_weak_sch_act_wf hoare_drop_imp | wpc | simp)+ + done + +lemma cancelAllSignals_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cancelAllSignals ntfnptr + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: cancelAllSignals_def) + apply (wp rescheduleRequired_weak_sch_act_wf hoare_drop_imp | wpc | simp)+ + done + +crunch weak_sch_act_wf[wp]: finaliseCapTrue_standin "\s. weak_sch_act_wf (ksSchedulerAction s) s" + (ignore: setThreadState + simp: crunch_simps + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma cteDeleteOne_weak_sch_act[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cteDeleteOne sl + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: cteDeleteOne_def unless_def) + apply (wp hoare_drop_imps finaliseCapTrue_standin_cur' isFinalCapability_cur' + | simp add: split_def)+ + done + +crunch pred_tcb_at'[wp]: handleFaultReply "pred_tcb_at' proj P t" +crunch tcb_in_cur_domain'[wp]: handleFaultReply "tcb_in_cur_domain' t" + +crunch sch_act_wf[wp]: unbindNotification "\s. sch_act_wf (ksSchedulerAction s) s" +(wp: sbn_sch_act') + +crunch valid_objs'[wp]: handleFaultReply valid_objs' + +lemma cte_wp_at_is_reply_cap_toI: + "cte_wp_at ((=) (cap.ReplyCap t False rights)) ptr s + \ cte_wp_at (is_reply_cap_to t) ptr s" + by (fastforce simp: cte_wp_at_reply_cap_to_ex_rights) + +crunches handle_fault_reply + for pspace_alignedp[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +crunches cteDeleteOne, doIPCTransfer, handleFaultReply + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + +lemma doReplyTransfer_corres: + "corres dc + (einvs and tcb_at receiver and tcb_at sender + and cte_wp_at ((=) (cap.ReplyCap receiver False rights)) slot) + (invs' and tcb_at' sender and tcb_at' receiver + and valid_pspace' and cte_at' (cte_map slot)) + (do_reply_transfer sender receiver slot grant) + (doReplyTransfer sender receiver (cte_map slot) grant)" + apply (simp add: do_reply_transfer_def doReplyTransfer_def cong: option.case_cong) + apply (rule corres_underlying_split [OF _ _ gts_sp gts_sp']) + apply (rule corres_guard_imp) + apply (rule getThreadState_corres, (clarsimp simp add: st_tcb_at_tcb_at invs_distinct invs_psp_aligned)+) + apply (rule_tac F = "awaiting_reply state" in corres_req) + apply (clarsimp simp add: st_tcb_at_def obj_at_def is_tcb) + apply (fastforce simp: invs_def valid_state_def intro: has_reply_cap_cte_wpD + dest: has_reply_cap_cte_wpD + dest!: valid_reply_caps_awaiting_reply cte_wp_at_is_reply_cap_toI) + apply (case_tac state, simp_all add: bind_assoc) + apply (simp add: isReply_def liftM_def) + apply (rule corres_symb_exec_r[OF _ getCTE_sp getCTE_inv, rotated]) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (rename_tac mdbnode) + apply (rule_tac P="Q" and Q="Q" and P'="Q'" and Q'="(\s. Q' s \ R' s)" for Q Q' R' + in stronger_corres_guard_imp[rotated]) + apply assumption + apply (rule conjI, assumption) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule cte_wp_at_is_reply_cap_toI) + apply (erule(4) reply_cap_end_mdb_chain) + apply (rule corres_assert_assume[rotated], simp) + apply (simp add: getSlotCap_def) + apply (rule corres_symb_exec_r[OF _ getCTE_sp getCTE_inv, rotated]) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule corres_assert_assume[rotated]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule corres_guard_imp) + apply (rule corres_split[OF threadget_fault_corres]) + apply (case_tac rv, simp_all add: fault_rel_optionation_def bind_assoc)[1] + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (rule corres_split[OF cap_delete_one_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule possibleSwitchTo_corres) + apply (wp set_thread_state_runnable_valid_sched + set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' + sts_valid_objs' delete_one_tcbDomain_obj_at' + | simp add: valid_tcb_state'_def + | strengthen valid_queues_in_correct_ready_q valid_sched_valid_queues + valid_queues_ready_qs_distinct)+ + apply (strengthen cte_wp_at_reply_cap_can_fast_finalise) + apply (wp hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post [OF do_ipc_transfer_non_null_cte_wp_at]) + prefer 2 + apply (erule cte_wp_at_weakenE) + apply (fastforce) + apply (clarsimp simp:is_cap_simps) + apply (wp weak_valid_sched_action_lift)+ + apply (rule_tac Q="\_ s. valid_objs' s \ cur_tcb' s \ tcb_at' receiver s + \ sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp, simp add: sch_act_wf_weak) + apply (wp tcb_in_cur_domain'_lift) + defer + apply (simp) + apply (wp)+ + apply (clarsimp simp: invs_psp_aligned invs_distinct) + apply (rule conjI, erule invs_valid_objs) + apply (rule conjI, clarsimp)+ + apply (rule conjI) + apply (erule cte_wp_at_weakenE) + apply (clarsimp) + apply (rule conjI, rule refl) + apply (fastforce) + apply (clarsimp simp: invs_def valid_sched_def valid_sched_action_def invs_psp_aligned invs_distinct) + apply (simp) + apply (auto simp: invs'_def valid_state'_def)[1] + + apply (rule corres_guard_imp) + apply (rule corres_split[OF cap_delete_one_corres]) + apply (rule corres_split_mapr[OF getMessageInfo_corres]) + apply (rule corres_split_eqr[OF lookupIPCBuffer_corres']) + apply (rule corres_split_eqr[OF getMRs_corres]) + apply (simp(no_asm) del: dc_simp) + apply (rule corres_split_eqr[OF handleFaultReply_corres]) + apply simp + apply (rule corres_split) + apply (rule threadset_corresT; + clarsimp simp add: tcb_relation_def fault_rel_optionation_def cteSizeBits_def + tcb_cap_cases_def tcb_cte_cases_def exst_same_def) + apply (rule_tac Q="valid_sched and cur_tcb and tcb_at receiver and pspace_aligned and pspace_distinct" + and Q'="tcb_at' receiver and cur_tcb' + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" + in corres_guard_imp) + apply (case_tac rvb, simp_all)[1] + apply (rule corres_guard_imp) + apply (rule corres_split[OF setThreadState_corres]) + apply (clarsimp simp: tcb_relation_def) + apply (fold dc_def, rule possibleSwitchTo_corres) + apply simp + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ + apply (rule corres_guard_imp) + apply (rule setThreadState_corres) + apply (clarsimp simp: tcb_relation_def) + apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state + thread_set_not_state_valid_sched + threadSet_tcbDomain_triv threadSet_valid_objs' + threadSet_sched_pointers threadSet_valid_sched_pointers + | simp add: valid_tcb_state'_def)+ + apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and + valid_objs and pspace_aligned and pspace_distinct" + in hoare_strengthen_post [rotated], clarsimp) + apply (wp) + apply (rule hoare_chain [OF cap_delete_one_invs]) + apply (assumption) + apply (rule conjI, clarsimp) + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def) + apply (rule_tac Q="\_. tcb_at' sender and tcb_at' receiver and invs'" + in hoare_strengthen_post [rotated]) + apply (solves\auto simp: invs'_def valid_state'_def\) + apply wp + apply clarsimp + apply (rule conjI) + apply (erule cte_wp_at_weakenE) + apply (clarsimp simp add: can_fast_finalise_def) + apply (erule(1) emptyable_cte_wp_atD) + apply (rule allI, rule impI) + apply (clarsimp simp add: is_master_reply_cap_def) + apply (clarsimp) + done + +(* when we cannot talk about reply cap rights explicitly (for instance, when a schematic ?rights + would be generated too early *) +lemma doReplyTransfer_corres': + "corres dc + (einvs and tcb_at receiver and tcb_at sender + and cte_wp_at (is_reply_cap_to receiver) slot) + (invs' and tcb_at' sender and tcb_at' receiver + and valid_pspace' and cte_at' (cte_map slot)) + (do_reply_transfer sender receiver slot grant) + (doReplyTransfer sender receiver (cte_map slot) grant)" + using doReplyTransfer_corres[of receiver sender _ slot] + by (fastforce simp add: cte_wp_at_reply_cap_to_ex_rights corres_underlying_def) + +lemma valid_pspace'_splits[elim!]: (* FIXME AARCH64: clean up duplicates *) + "valid_pspace' s \ valid_objs' s" + "valid_pspace' s \ pspace_aligned' s" + "valid_pspace' s \ pspace_distinct' s" + "valid_pspace' s \ valid_mdb' s" + "valid_pspace' s \ no_0_obj' s" + by (simp add: valid_pspace'_def)+ + +lemma sts_valid_pspace_hangers: + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. valid_objs'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. pspace_distinct'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. pspace_aligned'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. pspace_canonical'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. valid_mdb'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. no_0_obj'\" + by (safe intro!: hoare_strengthen_post [OF sts'_valid_pspace'_inv]) + +declare no_fail_getSlotCap [wp] + +lemma setupCallerCap_corres: + "corres dc + (st_tcb_at (Not \ halted) sender and tcb_at receiver and + st_tcb_at (Not \ awaiting_reply) sender and valid_reply_caps and + valid_objs and pspace_distinct and pspace_aligned and valid_mdb + and valid_list and + valid_reply_masters and cte_wp_at (\c. c = cap.NullCap) (receiver, tcb_cnode_index 3)) + (tcb_at' sender and tcb_at' receiver and valid_pspace' + and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + (setup_caller_cap sender receiver grant) + (setupCallerCap sender receiver grant)" + supply if_split[split del] + apply (simp add: setup_caller_cap_def setupCallerCap_def + getThreadReplySlot_def locateSlot_conv + getThreadCallerSlot_def) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split_nor) + apply (rule setThreadState_corres) + apply (simp split: option.split) + apply (rule corres_symb_exec_r) + apply (rule_tac F="\r. cteCap masterCTE = capability.ReplyCap sender True r + \ mdbNext (cteMDBNode masterCTE) = nullPointer" + in corres_gen_asm2, clarsimp simp add: isCap_simps) + apply (rule corres_symb_exec_r) + apply (rule_tac F="rv = capability.NullCap" + in corres_gen_asm2, simp) + apply (rule cteInsert_corres) + apply (simp split: if_splits) + apply (simp add: cte_map_def tcbReplySlot_def + tcb_cnode_index_def cte_level_bits_def) + apply (simp add: cte_map_def tcbCallerSlot_def + tcb_cnode_index_def cte_level_bits_def) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) + + apply (wp, (wp getSlotCap_wp)+) + apply blast + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at'_def cte_at'_def) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) + apply (wp, (wp getCTE_wp')+) + apply blast + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp sts_valid_pspace_hangers + | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: valid_tcb_state_def st_tcb_at_reply_cap_valid + st_tcb_at_tcb_at st_tcb_at_caller_cap_null + split: option.split) + apply (clarsimp simp: valid_tcb_state'_def valid_cap'_def capAligned_reply_tcbI) + apply (frule(1) st_tcb_at_reply_cap_valid, simp, clarsimp) + apply (clarsimp simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state) + apply (drule pspace_relation_cte_wp_at[rotated, OF caps_of_state_cteD], + erule valid_pspace'_splits, clarsimp+)+ + apply (clarsimp simp: cte_wp_at_ctes_of cte_map_def tcbReplySlot_def + tcbCallerSlot_def tcb_cnode_index_def + is_cap_simps) + apply (auto intro: reply_no_descendants_mdbNext_null[OF not_waiting_reply_slot_no_descendants] + simp: cte_level_bits_def) + done + +crunch tcb_at'[wp]: getThreadCallerSlot "tcb_at' t" + +lemma getThreadReplySlot_tcb_at'[wp]: + "\tcb_at' t\ getThreadReplySlot tcb \\_. tcb_at' t\" + by (simp add: getThreadReplySlot_def, wp) + +lemma setupCallerCap_tcb_at'[wp]: + "\tcb_at' t\ setupCallerCap sender receiver grant \\_. tcb_at' t\" + by (simp add: setupCallerCap_def, wp hoare_drop_imp) + +crunch ct'[wp]: setupCallerCap "\s. P (ksCurThread s)" + (wp: crunch_wps) + +lemma cteInsert_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + cteInsert newCap srcSlot destSlot + \\_ s. sch_act_wf (ksSchedulerAction s) s\" +by (wp sch_act_wf_lift tcb_in_cur_domain'_lift) + +lemma setupCallerCap_sch_act [wp]: + "\\s. sch_act_not t s \ sch_act_wf (ksSchedulerAction s) s\ + setupCallerCap t r g \\_ s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setupCallerCap_def getSlotCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv) + apply (wp getCTE_wp' sts_sch_act' hoare_drop_imps hoare_vcg_all_lift) + apply clarsimp + done + +lemma possibleSwitchTo_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s \ st_tcb_at' runnable' t s\ + possibleSwitchTo t \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: possibleSwitchTo_def setSchedulerAction_def threadGet_def curDomain_def + bitmap_fun_defs) + apply (wp rescheduleRequired_weak_sch_act_wf + weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] + getObject_tcb_wp hoare_weak_lift_imp + | wpc)+ + apply (clarsimp simp: obj_at'_def weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) + done + +lemmas transferCapsToSlots_pred_tcb_at' = + transferCapsToSlots_pres1 [OF cteInsert_pred_tcb_at'] + +crunches doIPCTransfer, possibleSwitchTo + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: mapM_wp' crunch_wps simp: zipWithM_x_mapM) + +lemma setSchedulerAction_ct_in_domain: + "\\s. ct_idle_or_in_cur_domain' s + \ p \ ResumeCurrentThread \ setSchedulerAction p + \\_. ct_idle_or_in_cur_domain'\" + by (simp add:setSchedulerAction_def | wp)+ + +crunches setupCallerCap, doIPCTransfer, possibleSwitchTo + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' + (wp: crunch_wps setSchedulerAction_ct_in_domain simp: zipWithM_x_mapM) +crunches setupCallerCap, doIPCTransfer, possibleSwitchTo + for ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + (wp: crunch_wps simp: zipWithM_x_mapM) + +crunch tcbDomain_obj_at'[wp]: doIPCTransfer "obj_at' (\tcb. P (tcbDomain tcb)) t" + (wp: crunch_wps constOnFailure_wp simp: crunch_simps) + +crunch tcb_at'[wp]: possibleSwitchTo "tcb_at' t" + (wp: crunch_wps) + +crunch valid_pspace'[wp]: possibleSwitchTo valid_pspace' + (wp: crunch_wps) + +lemma sendIPC_corres: +(* call is only true if called in handleSyscall SysCall, which + is always blocking. *) + assumes "call \ bl" + shows + "corres dc (einvs and st_tcb_at active t and ep_at ep and ex_nonz_cap_to t) + (invs' and sch_act_not t and tcb_at' t and ep_at' ep) + (send_ipc bl call bg cg cgr t ep) (sendIPC bl call bg cg cgr t ep)" +proof - + show ?thesis + apply (insert assms) + apply (unfold send_ipc_def sendIPC_def Let_def) + apply (case_tac bl) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres, + where + R="\rv. einvs and st_tcb_at active t and ep_at ep and + valid_ep rv and obj_at (\ob. ob = Endpoint rv) ep + and ex_nonz_cap_to t" + and + R'="\rv'. invs' and tcb_at' t and sch_act_not t + and ep_at' ep and valid_ep' rv'"]) + apply (case_tac rv) + apply (simp add: ep_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) + apply clarsimp + \ \concludes IdleEP if bl branch\ + apply (simp add: ep_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) + apply clarsimp + \ \concludes SendEP if bl branch\ + apply (simp add: ep_relation_def) + apply (rename_tac list) + apply (rule_tac F="list \ []" in corres_req) + apply (simp add: valid_ep_def) + apply (case_tac list) + apply simp + apply (clarsimp split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def split: list.split) + apply (simp add: isReceive_def split del:if_split) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac + F="\data. recv_state = Structures_A.BlockedOnReceive ep data" + in corres_gen_asm) + apply (clarsimp simp: case_bool_If case_option_If if3_fold + simp del: dc_simp split del: if_split cong: if_cong) + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule corres_split[OF possibleSwitchTo_corres]) + apply (fold when_def)[1] + apply (rule_tac P="call" and P'="call" + in corres_symmetric_bool_cases, blast) + apply (simp add: when_def dc_def[symmetric] split del: if_split) + apply (rule corres_if2, simp) + apply (rule setupCallerCap_corres) + apply (rule setThreadState_corres, simp) + apply (rule corres_trivial) + apply (simp add: when_def dc_def[symmetric] split del: if_split) + apply (simp split del: if_split add: if_apply_def2) + apply (wp hoare_drop_imps)[1] + apply (simp split del: if_split add: if_apply_def2) + apply (wp hoare_drop_imps)[1] + apply (wp | simp)+ + apply (wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases) + apply (wp sts_weak_sch_act_wf sts_valid_objs' + sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)[1] + apply (simp add: valid_tcb_state_def pred_conj_def) + apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues)+ + apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift + | clarsimp simp: is_cap_simps)+)[1] + apply (simp add: pred_conj_def) + apply (strengthen sch_act_wf_weak) + apply (simp add: valid_tcb_state'_def) + apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift hoare_drop_imps)[1] + apply (wp gts_st_tcb_at)+ + apply (simp add: pred_conj_def cong: conj_cong) + apply (wp hoare_TrueI) + apply (simp) + apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')+ + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def ep_redux_simps + ep_redux_simps' st_tcb_at_tcb_at valid_ep_def + cong: list.case_cong) + apply (drule(1) sym_refs_obj_atD[where P="\ob. ob = e" for e]) + apply (clarsimp simp: st_tcb_at_refs_of_rev st_tcb_at_reply_cap_valid + st_tcb_def2 valid_sched_def valid_sched_action_def) + apply (force simp: st_tcb_def2 dest!: st_tcb_at_caller_cap_null[simplified,rotated]) + subgoal by (auto simp: valid_ep'_def invs'_def valid_state'_def split: list.split) + apply wp+ + apply (clarsimp simp: ep_at_def2)+ + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres, + where + R="\rv. einvs and st_tcb_at active t and ep_at ep and + valid_ep rv and obj_at (\k. k = Endpoint rv) ep" + and + R'="\rv'. invs' and tcb_at' t and sch_act_not t + and ep_at' ep and valid_ep' rv'"]) + apply (rename_tac rv rv') + apply (case_tac rv) + apply (simp add: ep_relation_def) + \ \concludes IdleEP branch if not bl and no ft\ + apply (simp add: ep_relation_def) + \ \concludes SendEP branch if not bl and no ft\ + apply (simp add: ep_relation_def) + apply (rename_tac list) + apply (rule_tac F="list \ []" in corres_req) + apply (simp add: valid_ep_def) + apply (case_tac list) + apply simp + apply (rule_tac F="a \ t" in corres_req) + apply (clarsimp simp: invs_def valid_state_def + valid_pspace_def) + apply (drule(1) sym_refs_obj_atD[where P="\ob. ob = e" for e]) + apply (clarsimp simp: st_tcb_at_def obj_at_def tcb_bound_refs_def2) + apply fastforce + apply (clarsimp split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def split: list.split) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac + F="\data. recv_state = Structures_A.BlockedOnReceive ep data" + in corres_gen_asm) + apply (clarsimp simp: isReceive_def case_bool_If + split del: if_split cong: if_cong) + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule possibleSwitchTo_corres) + apply (simp add: if_apply_def2) + apply ((wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases | + simp add: if_apply_def2 split del: if_split)+)[1] + apply (wp sts_weak_sch_act_wf sts_valid_objs' + sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases) + apply (simp add: valid_tcb_state_def pred_conj_def) + apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift + | clarsimp simp: is_cap_simps + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues )+)[1] + apply (simp add: valid_tcb_state'_def pred_conj_def) + apply (strengthen sch_act_wf_weak) + apply (wp weak_sch_act_wf_lift_linear hoare_drop_imps) + apply (wp gts_st_tcb_at)+ + apply (simp add: pred_conj_def cong: conj_cong) + apply (wp hoare_TrueI) + apply simp + apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb') + apply (clarsimp simp add: invs_def valid_state_def + valid_pspace_def ep_redux_simps ep_redux_simps' + st_tcb_at_tcb_at + cong: list.case_cong) + apply (clarsimp simp: valid_ep_def) + apply (drule(1) sym_refs_obj_atD[where P="\ob. ob = e" for e]) + apply (clarsimp simp: st_tcb_at_refs_of_rev st_tcb_at_reply_cap_valid + st_tcb_at_caller_cap_null) + apply (fastforce simp: st_tcb_def2 valid_sched_def valid_sched_action_def) + subgoal by (auto simp: valid_ep'_def + split: list.split; + clarsimp simp: invs'_def valid_state'_def) + apply wp+ + apply (clarsimp simp: ep_at_def2)+ + done +qed + +lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] + +(* Annotation added by Simon Winwood (Thu Jul 1 20:54:41 2010) using taint-mode *) +declare tl_drop_1[simp] + +crunch cur[wp]: cancel_ipc "cur_tcb" + (wp: crunch_wps simp: crunch_simps) + +lemma valid_sched_weak_strg: + "valid_sched s \ weak_valid_sched_action s" + by (simp add: valid_sched_def valid_sched_action_def) + +lemma sendSignal_corres: + "corres dc (einvs and ntfn_at ep) (invs' and ntfn_at' ep) + (send_signal ep bg) (sendSignal ep bg)" + apply (simp add: send_signal_def sendSignal_def Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres, + where + R = "\rv. einvs and ntfn_at ep and valid_ntfn rv and + ko_at (Structures_A.Notification rv) ep" and + R' = "\rv'. invs' and ntfn_at' ep and + valid_ntfn' rv' and ko_at' rv' ep"]) + defer + apply (wp get_simple_ko_ko_at get_ntfn_ko')+ + apply (simp add: invs_valid_objs)+ + apply (case_tac "ntfn_obj ntfn") + \ \IdleNtfn\ + apply (clarsimp simp add: ntfn_relation_def) + apply (case_tac "ntfnBoundTCB nTFN") + apply clarsimp + apply (rule corres_guard_imp[OF setNotification_corres]) + apply (clarsimp simp add: ntfn_relation_def)+ + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule corres_if) + apply (fastforce simp: receive_blocked_def receiveBlocked_def + thread_state_relation_def + split: Structures_A.thread_state.splits + Structures_H.thread_state.splits) + apply (rule corres_split[OF cancel_ipc_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply (clarsimp simp: thread_state_relation_def) + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule possibleSwitchTo_corres) + apply wp + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' hoare_disjI2 + cancel_ipc_cte_wp_at_not_reply_state + | strengthen invs_vobjs_strgs invs_psp_aligned_strg valid_sched_weak_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues + | simp add: valid_tcb_state_def)+ + apply (rule_tac Q="\rv. invs' and tcb_at' a" in hoare_strengthen_post) + apply wp + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak valid_tcb_state'_def) + apply (rule setNotification_corres) + apply (clarsimp simp add: ntfn_relation_def) + apply (wp gts_wp gts_wp' | clarsimp)+ + apply (auto simp: valid_ntfn_def receive_blocked_def valid_sched_def invs_cur + elim: pred_tcb_weakenE + intro: st_tcb_at_reply_cap_valid + split: Structures_A.thread_state.splits)[1] + apply (clarsimp simp: valid_ntfn'_def invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak) + \ \WaitingNtfn\ + apply (clarsimp simp add: ntfn_relation_def Let_def) + apply (simp add: update_waiting_ntfn_def) + apply (rename_tac list) + apply (case_tac "tl list = []") + \ \tl list = []\ + apply (rule corres_guard_imp) + apply (rule_tac F="list \ []" in corres_gen_asm) + apply (simp add: list_case_helper split del: if_split) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule possibleSwitchTo_corres) + apply ((wp | simp)+)[1] + apply (rule_tac Q="\_. (\s. sch_act_wf (ksSchedulerAction s) s) and + cur_tcb' and + st_tcb_at' runnable' (hd list) and valid_objs' and + sym_heap_sched_pointers and valid_sched_pointers and + pspace_aligned' and pspace_distinct'" + in hoare_post_imp, clarsimp simp: pred_tcb_at') + apply (wp | simp)+ + apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action + | simp)+ + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb + | simp)+ + apply (wp set_simple_ko_valid_objs set_ntfn_aligned' set_ntfn_valid_objs' + hoare_vcg_disj_lift weak_sch_act_wf_lift_linear + | simp add: valid_tcb_state_def valid_tcb_state'_def)+ + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def + valid_sched_action_def) + apply (auto simp: valid_ntfn'_def )[1] + apply (clarsimp simp: invs'_def valid_state'_def) + + \ \tl list \ []\ + apply (rule corres_guard_imp) + apply (rule_tac F="list \ []" in corres_gen_asm) + apply (simp add: list_case_helper) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def split:list.splits) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule possibleSwitchTo_corres) + apply (wp cur_tcb_lift | simp)+ + apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action + | simp)+ + apply (wpsimp wp: sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb) + apply (wp set_ntfn_aligned' set_simple_ko_valid_objs set_ntfn_valid_objs' + hoare_vcg_disj_lift weak_sch_act_wf_lift_linear + | simp add: valid_tcb_state_def valid_tcb_state'_def)+ + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def neq_Nil_conv + ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def + split: option.splits) + apply (auto simp: valid_ntfn'_def neq_Nil_conv invs'_def valid_state'_def + weak_sch_act_wf_def + split: option.splits)[1] + \ \ActiveNtfn\ + apply (clarsimp simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def combine_ntfn_badges_def + combine_ntfn_msgs_def) + apply (simp add: invs_def valid_state_def valid_ntfn_def) + apply (simp add: invs'_def valid_state'_def valid_ntfn'_def) + done + +lemma valid_Running'[simp]: + "valid_tcb_state' Running = \" + by (rule ext, simp add: valid_tcb_state'_def) + +crunch typ'[wp]: setMRs "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: zipWithM_x_mapM) + +lemma possibleSwitchTo_sch_act[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s \ st_tcb_at' runnable' t s\ + possibleSwitchTo t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp + | simp add: unless_def | wpc)+ + apply (auto simp: obj_at'_def tcb_in_cur_domain'_def) + done + +crunch st_refs_of'[wp]: possibleSwitchTo "\s. P (state_refs_of' s)" + (wp: crunch_wps) +crunch st_hyp_refs_of'[wp]: possibleSwitchTo "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps) +crunch cap_to'[wp]: possibleSwitchTo "ex_nonz_cap_to' p" + (wp: crunch_wps) +crunch objs'[wp]: possibleSwitchTo valid_objs' + (wp: crunch_wps) +crunch ct[wp]: possibleSwitchTo cur_tcb' + (wp: cur_tcb_lift crunch_wps) + +lemma possibleSwitchTo_iflive[wp]: + "\if_live_then_nonz_cap' and ex_nonz_cap_to' t and (\s. sch_act_wf (ksSchedulerAction s) s) + and pspace_aligned' and pspace_distinct'\ + possibleSwitchTo t + \\rv. if_live_then_nonz_cap'\" + unfolding possibleSwitchTo_def curDomain_def + by (wpsimp wp: threadGet_wp) + +crunch ifunsafe[wp]: possibleSwitchTo if_unsafe_then_cap' + (wp: crunch_wps) +crunch idle'[wp]: possibleSwitchTo valid_idle' + (wp: crunch_wps) +crunch global_refs'[wp]: possibleSwitchTo valid_global_refs' + (wp: crunch_wps) +crunch arch_state'[wp]: possibleSwitchTo valid_arch_state' + (wp: crunch_wps) +crunch irq_node'[wp]: possibleSwitchTo "\s. P (irq_node' s)" + (wp: crunch_wps) +crunch typ_at'[wp]: possibleSwitchTo "\s. P (typ_at' T p s)" + (wp: crunch_wps) +crunch irq_handlers'[wp]: possibleSwitchTo valid_irq_handlers' + (simp: unless_def tcb_cte_cases_def cteSizeBits_def wp: crunch_wps) +crunch irq_states'[wp]: possibleSwitchTo valid_irq_states' + (wp: crunch_wps) +crunch ct'[wp]: sendSignal "\s. P (ksCurThread s)" + (wp: crunch_wps simp: crunch_simps o_def) +crunch it'[wp]: sendSignal "\s. P (ksIdleThread s)" + (wp: crunch_wps simp: crunch_simps) + +crunch irqs_masked'[wp]: setBoundNotification "irqs_masked'" + (wp: irqs_masked_lift) + +crunch irqs_masked'[wp]: sendSignal "irqs_masked'" + (wp: crunch_wps getObject_inv loadObject_default_inv + simp: crunch_simps unless_def o_def + rule: irqs_masked_lift) + +lemma ct_in_state_activatable_imp_simple'[simp]: + "ct_in_state' activatable' s \ ct_in_state' simple' s" + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply (case_tac st; simp) + done + +lemma setThreadState_nonqueued_state_update: + "\\s. invs' s \ st_tcb_at' simple' t s + \ st \ {Inactive, Running, Restart, IdleThreadState} + \ (st \ Inactive \ ex_nonz_cap_to' t s) + \ (t = ksIdleThread s \ idle' st) + \ (\ runnable' st \ sch_act_simple s)\ + setThreadState st t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wp valid_irq_node_lift setThreadState_ct_not_inQ) + apply (clarsimp simp: pred_tcb_at') + apply (rule conjI, fastforce simp: valid_tcb_state'_def) + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (rule conjI) + apply clarsimp + apply (erule delta_sym_refs) + apply (fastforce split: if_split_asm) + apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm) + apply fastforce + done + +lemma cteDeleteOne_reply_cap_to'[wp]: + "\ex_nonz_cap_to' p and + cte_wp_at' (\c. isReplyCap (cteCap c)) slot\ + cteDeleteOne slot + \\rv. ex_nonz_cap_to' p\" + apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) + apply (rule bind_wp [OF _ getCTE_sp]) + apply (rule hoare_assume_pre) + apply (subgoal_tac "isReplyCap (cteCap cte)") + apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv + | clarsimp simp: finaliseCap_def isCap_simps + | wp (once) hoare_drop_imps)+ + apply (fastforce simp: cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + done + +crunches setupCallerCap, possibleSwitchTo, asUser, doIPCTransfer + for vms'[wp]: "valid_machine_state'" + (wp: crunch_wps simp: zipWithM_x_mapM_x) + +crunch nonz_cap_to'[wp]: cancelSignal "ex_nonz_cap_to' p" + (wp: crunch_wps simp: crunch_simps) + +lemma cancelIPC_nonz_cap_to'[wp]: + "\ex_nonz_cap_to' p\ cancelIPC t \\rv. ex_nonz_cap_to' p\" + apply (simp add: cancelIPC_def getThreadReplySlot_def Let_def + capHasProperty_def) + apply (wp threadSet_cap_to' + | wpc + | simp + | clarsimp elim!: cte_wp_at_weakenE' + | rule hoare_post_imp[where Q="\rv. ex_nonz_cap_to' p"])+ + done + + +crunches activateIdleThread, getThreadReplySlot, isFinalCapability + for nosch[wp]: "\s. P (ksSchedulerAction s)" + (simp: Let_def) + +crunches setupCallerCap, asUser, setMRs, doIPCTransfer, possibleSwitchTo + for pspace_domain_valid[wp]: "pspace_domain_valid" + (wp: crunch_wps simp: zipWithM_x_mapM_x) + +crunches setupCallerCap, doIPCTransfer, possibleSwitchTo + for ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + (wp: crunch_wps simp: zipWithM_x_mapM) + +lemma setThreadState_not_rct[wp]: + "\\s. ksSchedulerAction s \ ResumeCurrentThread \ + setThreadState st t + \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" + apply (simp add: setThreadState_def) + apply (wp) + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply (simp) + apply (wp)+ + apply simp + done + +lemma cancelAllIPC_not_rct[wp]: + "\\s. ksSchedulerAction s \ ResumeCurrentThread \ + cancelAllIPC epptr + \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" + apply (simp add: cancelAllIPC_def) + apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ + done + +lemma cancelAllSignals_not_rct[wp]: + "\\s. ksSchedulerAction s \ ResumeCurrentThread \ + cancelAllSignals epptr + \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" + apply (simp add: cancelAllSignals_def) + apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ + done + +crunch not_rct[wp]: finaliseCapTrue_standin "\s. ksSchedulerAction s \ ResumeCurrentThread" +(simp: Let_def) + +declare setEndpoint_ct' [wp] + +lemma cancelIPC_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + cancelIPC t + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + (is "\?PRE t'\ _ \_\") +proof - + have aipc: "\t t' ntfn. + \\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + cancelSignal t ntfn + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + apply (simp add: cancelSignal_def) + apply (wp)[1] + apply (wp hoare_convert_imp)+ + apply (rule_tac P="\s. ksSchedulerAction s \ ResumeCurrentThread" + in hoare_weaken_pre) + apply (wpc) + apply (wp | simp)+ + apply (wpc, wp+) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp) + apply simp + done + have cdo: "\t t' slot. + \\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + cteDeleteOne slot + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (wp) + apply (wp hoare_convert_imp)[1] + apply (wp) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp hoare_convert_imp | simp)+ + done + show ?thesis + apply (simp add: cancelIPC_def Let_def) + apply (wp, wpc) + prefer 4 \ \state = Running\ + apply wp + prefer 7 \ \state = Restart\ + apply wp + apply (wp)+ + apply (wp hoare_convert_imp)[1] + apply (wpc, wp+) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp cdo)+ + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply ((wp aipc hoare_convert_imp)+)[6] + apply (wp) + apply (wp hoare_convert_imp)[1] + apply (wpc, wp+) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp) + apply simp + done +qed + +lemma sai_invs'[wp]: + "\invs' and ex_nonz_cap_to' ntfnptr\ + sendSignal ntfnptr badge \\y. invs'\" + unfolding sendSignal_def + apply (rule bind_wp[OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj nTFN", simp_all) + prefer 3 + apply (rename_tac list) + apply (case_tac list, + simp_all split del: if_split + add: setMessageInfo_def)[1] + apply (wp hoare_convert_imp [OF asUser_nosch] + hoare_convert_imp [OF setMRs_sch_act])+ + apply (clarsimp simp:conj_comms) + apply (simp add: invs'_def valid_state'_def) + apply (wp valid_irq_node_lift sts_valid_objs' setThreadState_ct_not_inQ + set_ntfn_valid_objs' cur_tcb_lift sts_st_tcb' + hoare_convert_imp [OF setNotification_nosch] + | simp split del: if_split)+ + + apply (intro conjI[rotated]; + (solves \clarsimp simp: invs'_def valid_state'_def valid_pspace'_def\)?) + apply (clarsimp simp: invs'_def valid_state'_def split del: if_split) + apply (drule(1) ct_not_in_ntfnQueue, simp+) + apply clarsimp + apply (frule ko_at_valid_objs', clarsimp) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def + split: list.splits) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (clarsimp simp: st_tcb_at_refs_of_rev' valid_idle'_def pred_tcb_at'_def idle_tcb'_def + dest!: sym_refs_ko_atD' sym_refs_st_tcb_atD' sym_refs_obj_atD' + split: list.splits) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def + split: list.splits option.splits) + apply (clarsimp elim!: if_live_then_nonz_capE' simp:invs'_def valid_state'_def) + apply (drule(1) sym_refs_ko_atD') + apply (clarsimp elim!: ko_wp_at'_weakenE + intro!: refs_of_live') + apply (clarsimp split del: if_split)+ + apply (frule ko_at_valid_objs', clarsimp) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def split del: if_split) + apply (frule invs_sym') + apply (drule(1) sym_refs_obj_atD') + apply (clarsimp split del: if_split cong: if_cong + simp: st_tcb_at_refs_of_rev' ep_redux_simps' ntfn_bound_refs'_def) + apply (frule st_tcb_at_state_refs_ofD') + apply (erule delta_sym_refs) + apply (fastforce simp: split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def set_eq_subset symreftype_inverse' + split: if_split_asm) + apply (clarsimp simp:invs'_def) + apply (frule ko_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def valid_state'_def) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def split del: if_split) + apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def) + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def + split: list.splits option.splits) + apply (case_tac "ntfnBoundTCB nTFN", simp_all) + apply (wp set_ntfn_minor_invs') + apply (fastforce simp: valid_ntfn'_def invs'_def valid_state'_def + elim!: obj_at'_weakenE + dest!: global'_no_ex_cap) + apply (wp add: hoare_convert_imp [OF asUser_nosch] + hoare_convert_imp [OF setMRs_sch_act] + setThreadState_nonqueued_state_update sts_st_tcb' + del: cancelIPC_simple) + apply (clarsimp | wp cancelIPC_ct')+ + apply (wp set_ntfn_minor_invs' gts_wp' | clarsimp)+ + apply (frule pred_tcb_at') + by (wp set_ntfn_minor_invs' + | rule conjI + | clarsimp elim!: st_tcb_ex_cap'' + | fastforce simp: receiveBlocked_def pred_tcb_at'_def obj_at'_def + dest!: invs_rct_ct_activatable' + split: thread_state.splits + | fastforce simp: invs'_def valid_state'_def receiveBlocked_def + valid_obj'_def valid_ntfn'_def + split: thread_state.splits + dest!: global'_no_ex_cap st_tcb_ex_cap'' ko_at_valid_objs')+ + +lemma replyFromKernel_corres: + "corres dc (tcb_at t and invs) (invs') + (reply_from_kernel t r) (replyFromKernel t r)" + apply (case_tac r) + apply (clarsimp simp: replyFromKernel_def reply_from_kernel_def + badge_register_def badgeRegister_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF lookupIPCBuffer_corres]) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule corres_split_eqr[OF setMRs_corres]) + apply clarsimp + apply (rule setMessageInfo_corres) + apply (wp hoare_case_option_wp hoare_valid_ipc_buffer_ptr_typ_at' + | clarsimp simp: invs_distinct invs_psp_aligned)+ + apply fastforce + done + +lemma rfk_invs': + "\invs' and tcb_at' t\ replyFromKernel t r \\rv. invs'\" + apply (simp add: replyFromKernel_def) + apply (cases r) + apply (wp | clarsimp)+ + done + +crunch nosch[wp]: replyFromKernel "\s. P (ksSchedulerAction s)" + +lemma completeSignal_corres: + "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and valid_objs and pspace_distinct) + (ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' and obj_at' isActive ntfnptr) + (complete_signal ntfnptr tcb) (completeSignal ntfnptr tcb)" + apply (simp add: complete_signal_def completeSignal_def) + apply (rule corres_guard_imp) + apply (rule_tac R'="\ntfn. ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' + and valid_ntfn' ntfn and (\_. isActive ntfn)" + in corres_split[OF getNotification_corres]) + apply (rule corres_gen_asm2) + apply (case_tac "ntfn_obj rv") + apply (clarsimp simp: ntfn_relation_def isActive_def + split: ntfn.splits Structures_H.notification.splits)+ + apply (rule corres_guard2_imp) + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def) + apply (wp set_simple_ko_valid_objs get_simple_ko_wp getNotification_wp | clarsimp simp: valid_ntfn'_def)+ + apply (clarsimp simp: valid_pspace'_def) + apply (frule_tac P="(\k. k = ntfn)" in obj_at_valid_objs', assumption) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def obj_at'_def) + done + + +lemma doNBRecvFailedTransfer_corres: + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) \ + (do_nbrecv_failed_transfer thread) + (doNBRecvFailedTransfer thread)" + unfolding do_nbrecv_failed_transfer_def doNBRecvFailedTransfer_def + by (simp add: badgeRegister_def badge_register_def, rule asUser_setRegister_corres) + +lemma receiveIPC_corres: + assumes "is_ep_cap cap" and "cap_relation cap cap'" + shows " + corres dc (einvs and valid_sched and tcb_at thread and valid_cap cap and ex_nonz_cap_to thread + and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3)) + (invs' and tcb_at' thread and valid_cap' cap') + (receive_ipc thread cap isBlocking) (receiveIPC thread cap' isBlocking)" + apply (insert assms) + apply (simp add: receive_ipc_def receiveIPC_def + split del: if_split) + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (rename_tac word1 word2 right) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getBoundNotification_corres]) + apply (rule_tac r'="ntfn_relation" in corres_split) + apply (rule corres_option_split[rotated 2]) + apply (rule getNotification_corres) + apply clarsimp + apply (rule corres_trivial, simp add: ntfn_relation_def default_notification_def + default_ntfn_def) + apply (rule corres_if) + apply (clarsimp simp: ntfn_relation_def Ipc_A.isActive_def Endpoint_H.isActive_def + split: Structures_A.ntfn.splits Structures_H.notification.splits) + apply clarsimp + apply (rule completeSignal_corres) + apply (rule_tac P="einvs and valid_sched and tcb_at thread and + ep_at word1 and valid_ep ep and + obj_at (\k. k = Endpoint ep) word1 + and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3) + and ex_nonz_cap_to thread" and + P'="invs' and tcb_at' thread and ep_at' word1 and + valid_ep' epa" + in corres_inst) + apply (case_tac ep) + \ \IdleEP\ + apply (simp add: ep_relation_def) + apply (rule corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp) + apply simp + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def + valid_tcb_state_def st_tcb_at_tcb_at) + apply auto[1] + \ \SendEP\ + apply (simp add: ep_relation_def) + apply (rename_tac list) + apply (rule_tac F="list \ []" in corres_req) + apply (clarsimp simp: valid_ep_def) + apply (case_tac list, simp_all split del: if_split)[1] + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (case_tac lista, simp_all add: ep_relation_def)[1] + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac + F="\data. + sender_state = + Structures_A.thread_state.BlockedOnSend word1 data" + in corres_gen_asm) + apply (clarsimp simp: isSend_def case_bool_If + case_option_If if3_fold + split del: if_split cong: if_cong) + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (simp split del: if_split cong: if_cong) + apply (fold dc_def)[1] + apply (rule_tac P="valid_objs and valid_mdb and valid_list + and valid_sched + and cur_tcb + and valid_reply_caps + and pspace_aligned and pspace_distinct + and st_tcb_at (Not \ awaiting_reply) a + and st_tcb_at (Not \ halted) a + and tcb_at thread and valid_reply_masters + and cte_wp_at (\c. c = cap.NullCap) + (thread, tcb_cnode_index 3)" + and P'="tcb_at' a and tcb_at' thread and cur_tcb' + and valid_pspace' + and valid_objs' + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" + in corres_guard_imp [OF corres_if]) + apply (simp add: fault_rel_optionation_def) + apply (rule corres_if2 [OF _ setupCallerCap_corres setThreadState_corres]) + apply simp + apply simp + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule possibleSwitchTo_corres) + apply (wpsimp wp: sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action)+ + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb + | simp)+ + apply (fastforce simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def + valid_sched_action_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp | wp do_ipc_transfer_tcb_caps)+ + apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp) + apply (fastforce elim: sch_act_wf_weak) + apply (wp sts_st_tcb' gts_st_tcb_at | simp)+ + apply (simp cong: list.case_cong) + apply wp + apply simp + apply (wp weak_sch_act_wf_lift_linear setEndpoint_valid_mdb' set_ep_valid_objs') + apply (clarsimp split: list.split) + apply (clarsimp simp add: invs_def valid_state_def st_tcb_at_tcb_at) + apply (clarsimp simp add: valid_ep_def valid_pspace_def) + apply (drule(1) sym_refs_obj_atD[where P="\ko. ko = Endpoint e" for e]) + apply (fastforce simp: st_tcb_at_refs_of_rev elim: st_tcb_weakenE) + apply (auto simp: valid_ep'_def invs'_def valid_state'_def split: list.split)[1] + \ \RecvEP\ + apply (simp add: ep_relation_def) + apply (rule_tac corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp) + apply simp + apply (clarsimp simp: valid_tcb_state_def invs_distinct) + apply (clarsimp simp add: valid_tcb_state'_def) + apply (wp get_simple_ko_wp[where f=Notification] getNotification_wp gbn_wp gbn_wp' + hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_if_lift + | wpc | simp add: ep_at_def2[symmetric, simplified] | clarsimp)+ + apply (clarsimp simp: valid_cap_def invs_psp_aligned invs_valid_objs pred_tcb_at_def + valid_obj_def valid_tcb_def valid_bound_ntfn_def invs_distinct + dest!: invs_valid_objs + elim!: obj_at_valid_objsE + split: option.splits) + apply clarsimp + apply (auto simp: valid_cap'_def invs_valid_pspace' valid_obj'_def valid_tcb'_def + valid_bound_ntfn'_def obj_at'_def pred_tcb_at'_def + dest!: invs_valid_objs' obj_at_valid_objs' + split: option.splits)[1] + done + +lemma receiveSignal_corres: + "\ is_ntfn_cap cap; cap_relation cap cap' \ \ + corres dc (invs and st_tcb_at active thread and valid_cap cap and ex_nonz_cap_to thread) + (invs' and tcb_at' thread and valid_cap' cap') + (receive_signal thread cap isBlocking) (receiveSignal thread cap' isBlocking)" + apply (simp add: receive_signal_def receiveSignal_def) + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (rename_tac word1 word2 rights) + apply (rule corres_guard_imp) + apply (rule_tac R="\rv. invs and tcb_at thread and st_tcb_at active thread and + ntfn_at word1 and ex_nonz_cap_to thread and + valid_ntfn rv and + obj_at (\k. k = Notification rv) word1" and + R'="\rv'. invs' and tcb_at' thread and ntfn_at' word1 and + valid_ntfn' rv'" + in corres_split[OF getNotification_corres]) + apply clarsimp + apply (case_tac "ntfn_obj rv") + \ \IdleNtfn\ + apply (simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def) + apply wp+ + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres; simp) + apply (clarsimp simp: invs_distinct) + apply simp + \ \WaitingNtfn\ + apply (simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def) + apply wp+ + apply (rule corres_guard_imp) + apply (rule doNBRecvFailedTransfer_corres; simp) + apply (clarsimp simp: invs_distinct)+ + \ \ActiveNtfn\ + apply (simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def) + apply wp+ + apply (fastforce simp: invs_def valid_state_def valid_pspace_def + elim!: st_tcb_weakenE) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply wp+ + apply (clarsimp simp add: ntfn_at_def2 valid_cap_def st_tcb_at_tcb_at) + apply (clarsimp simp add: valid_cap'_def) + done + +lemma tg_sp': + "\P\ threadGet f p \\t. obj_at' (\t'. f t' = t) p and P\" + including no_pre + apply (simp add: threadGet_def) + apply wp + apply (rule hoare_strengthen_post) + apply (rule getObject_tcb_sp) + apply clarsimp + apply (erule obj_at'_weakenE) + apply simp + done + +declare lookup_cap_valid' [wp] + +lemma sendFaultIPC_corres: + "valid_fault f \ fr f f' \ + corres (fr \ dc) + (einvs and st_tcb_at active thread and ex_nonz_cap_to thread) + (invs' and sch_act_not thread and tcb_at' thread) + (send_fault_ipc thread f) (sendFaultIPC thread f')" + apply (simp add: send_fault_ipc_def sendFaultIPC_def + liftE_bindE Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="\fh fh'. fh = to_bl fh'"]) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def) + apply simp + apply (rule corres_splitEE) + apply (rule corres_cap_fault) + apply (rule lookup_cap_corres, rule refl) + apply (rule_tac P="einvs and st_tcb_at active thread + and valid_cap handler_cap and ex_nonz_cap_to thread" + and P'="invs' and tcb_at' thread and sch_act_not thread + and valid_cap' handlerCap" + in corres_inst) + apply (case_tac handler_cap, + simp_all add: isCap_defs lookup_failure_map_def + case_bool_If If_rearrage + split del: if_split cong: if_cong)[1] + apply (rule corres_guard_imp) + apply (rule corres_if2 [OF refl]) + apply (simp add: dc_def[symmetric]) + apply (rule corres_split[OF threadset_corres sendIPC_corres], simp_all)[1] + apply (simp add: tcb_relation_def fault_rel_optionation_def exst_same_def)+ + apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state + thread_set_typ_at ep_at_typ_at ex_nonz_cap_to_pres + thread_set_cte_wp_at_trivial thread_set_not_state_valid_sched + | simp add: tcb_cap_cases_def)+ + apply ((wp threadSet_invs_trivial threadSet_tcb' + | simp add: tcb_cte_cases_def + | wp (once) sch_act_sane_lift)+)[1] + apply (rule corres_trivial, simp add: lookup_failure_map_def) + apply (clarsimp simp: st_tcb_at_tcb_at split: if_split) + apply (clarsimp simp: valid_cap_def invs_distinct) + apply (clarsimp simp: valid_cap'_def inQ_def) + apply auto[1] + apply (clarsimp simp: lookup_failure_map_def) + apply wp+ + apply (fastforce elim: st_tcb_at_tcb_at) + apply fastforce + done + +lemma gets_the_noop_corres: + assumes P: "\s. P s \ f s \ None" + shows "corres dc P P' (gets_the f) (return x)" + apply (clarsimp simp: corres_underlying_def gets_the_def + return_def gets_def bind_def get_def) + apply (clarsimp simp: assert_opt_def return_def dest!: P) + done + +lemma handleDoubleFault_corres: + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) + \ + (handle_double_fault thread f ft) + (handleDoubleFault thread f' ft')" + apply (rule corres_cross_over_guard[where Q="tcb_at' thread"]) + apply (fastforce intro!: tcb_at_cross) + apply (simp add: handle_double_fault_def handleDoubleFault_def) + apply (rule corres_guard_imp) + apply (subst bind_return [symmetric], + rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule corres_noop2) + apply (simp add: exs_valid_def return_def) + apply (rule hoare_eq_P) + apply wp + apply (rule asUser_inv) + apply (rule getRestartPC_inv) + apply (wp no_fail_getRestartPC)+ + apply (wp|simp)+ + done + +crunch tcb' [wp]: sendFaultIPC "tcb_at' t" (wp: crunch_wps) + +crunch typ_at'[wp]: receiveIPC "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +lemmas receiveIPC_typ_ats[wp] = typ_at_lifts [OF receiveIPC_typ_at'] + +crunch typ_at'[wp]: receiveSignal "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +lemmas receiveAIPC_typ_ats[wp] = typ_at_lifts [OF receiveSignal_typ_at'] + +crunch aligned'[wp]: setupCallerCap "pspace_aligned'" + (wp: crunch_wps) +crunch distinct'[wp]: setupCallerCap "pspace_distinct'" + (wp: crunch_wps) +crunch cur_tcb[wp]: setupCallerCap "cur_tcb'" + (wp: crunch_wps) + +lemma setupCallerCap_state_refs_of[wp]: + "\\s. P ((state_refs_of' s) (sender := {r \ state_refs_of' s sender. snd r = TCBBound}))\ + setupCallerCap sender rcvr grant + \\rv s. P (state_refs_of' s)\" + apply (simp add: setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def) + apply (wp hoare_drop_imps) + apply (simp add: fun_upd_def cong: if_cong) + done + +lemma setupCallerCap_state_hyp_refs_of[wp]: + "setupCallerCap sender rcvr canGrant \\s. P (state_hyp_refs_of' s)\" + apply (simp add: setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def) + apply (wp hoare_drop_imps) + done + +lemma is_derived_ReplyCap' [simp]: + "\m p g. is_derived' m p (capability.ReplyCap t False g) = + (\c. \ g. c = capability.ReplyCap t True g)" + apply (subst fun_eq_iff) + apply clarsimp + apply (case_tac x, simp_all add: is_derived'_def isCap_simps + badge_derived'_def + vs_cap_ref'_def) + done + +lemma unique_master_reply_cap': + "\c t. isReplyCap c \ capReplyMaster c \ capTCBPtr c = t \ + (\g . c = capability.ReplyCap t True g)" + by (fastforce simp: isCap_simps conj_comms) + +lemma getSlotCap_cte_wp_at: + "\\\ getSlotCap sl \\rv. cte_wp_at' (\c. cteCap c = rv) sl\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma setupCallerCap_vp[wp]: + "\valid_pspace' and tcb_at' sender and tcb_at' rcvr\ + setupCallerCap sender rcvr grant \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv getSlotCap_def) + apply (wp getCTE_wp) + apply (rule_tac Q="\_. valid_pspace' and + tcb_at' sender and tcb_at' rcvr" + in hoare_post_imp) + apply (clarsimp simp: valid_cap'_def o_def cte_wp_at_ctes_of isCap_simps + valid_pspace'_def) + apply (frule(1) ctes_of_valid', simp add: valid_cap'_def capAligned_def) + apply clarsimp + apply (wp | simp add: valid_pspace'_def valid_tcb_state'_def)+ + done + +declare haskell_assert_inv[wp del] + +lemma setupCallerCap_iflive[wp]: + "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender and pspace_aligned' and pspace_distinct'\ + setupCallerCap sender rcvr grant + \\rv. if_live_then_nonz_cap'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + by (wp getSlotCap_cte_wp_at + | simp add: unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) hoare_drop_imp[where f="getCTE rs" for rs])+ + +lemma setupCallerCap_ifunsafe[wp]: + "\if_unsafe_then_cap' and valid_objs' and + ex_nonz_cap_to' rcvr and tcb_at' rcvr and pspace_aligned' and pspace_distinct'\ + setupCallerCap sender rcvr grant + \\rv. if_unsafe_then_cap'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + apply (wp getSlotCap_cte_wp_at + | simp add: unique_master_reply_cap' | strengthen eq_imp_strg + | wp (once) hoare_drop_imp[where f="getCTE rs" for rs])+ + apply (rule_tac Q="\rv. valid_objs' and tcb_at' rcvr and ex_nonz_cap_to' rcvr" + in hoare_post_imp) + apply (clarsimp simp: ex_nonz_tcb_cte_caps' tcbCallerSlot_def + objBits_def objBitsKO_def dom_def cte_level_bits_def) + apply (wp sts_valid_objs' | simp)+ + apply (clarsimp simp: valid_tcb_state'_def)+ + done + +lemma setupCallerCap_global_refs'[wp]: + "\valid_global_refs'\ + setupCallerCap sender rcvr grant + \\rv. valid_global_refs'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + by (wp + | simp add: o_def unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) getCTE_wp + | wp (once) hoare_vcg_imp_lift' hoare_vcg_ex_lift | clarsimp simp: cte_wp_at_ctes_of)+ + +crunch valid_arch'[wp]: setupCallerCap "valid_arch_state'" + (wp: hoare_drop_imps) + +crunch irq_node'[wp]: setupCallerCap "\s. P (irq_node' s)" + (wp: hoare_drop_imps) + +lemma setupCallerCap_irq_handlers'[wp]: + "\valid_irq_handlers'\ + setupCallerCap sender rcvr grant + \\rv. valid_irq_handlers'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + by (wp hoare_drop_imps | simp)+ + +lemma cteInsert_cap_to': + "\ex_nonz_cap_to' p and cte_wp_at' (\c. cteCap c = NullCap) dest\ + cteInsert cap src dest + \\rv. ex_nonz_cap_to' p\" + apply (simp add: cteInsert_def ex_nonz_cap_to'_def + updateCap_def setUntypedCapAsFull_def + split del: if_split) + apply (rule hoare_pre, rule hoare_vcg_ex_lift) + apply (wp updateMDB_weak_cte_wp_at + setCTE_weak_cte_wp_at + | simp + | rule hoare_drop_imps)+ + apply (wp getCTE_wp) + apply clarsimp + apply (rule_tac x=cref in exI) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of)+ + done + +crunch cap_to'[wp]: setExtraBadge "ex_nonz_cap_to' p" + +crunch cap_to'[wp]: doIPCTransfer "ex_nonz_cap_to' p" + (ignore: transferCapsToSlots + wp: crunch_wps transferCapsToSlots_pres2 cteInsert_cap_to' hoare_vcg_const_Ball_lift + simp: zipWithM_x_mapM ball_conj_distrib) + +lemma st_tcb_idle': + "\valid_idle' s; st_tcb_at' P t s\ \ + (t = ksIdleThread s) \ P IdleThreadState" + by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + +crunch idle'[wp]: getThreadCallerSlot "valid_idle'" +crunch idle'[wp]: getThreadReplySlot "valid_idle'" + +crunch it[wp]: setupCallerCap "\s. P (ksIdleThread s)" + (simp: updateObject_cte_inv wp: crunch_wps) + +lemma setupCallerCap_idle'[wp]: + "\valid_idle' and valid_pspace' and + (\s. st \ ksIdleThread s \ rt \ ksIdleThread s)\ + setupCallerCap st rt gr + \\_. valid_idle'\" + by (simp add: setupCallerCap_def capRange_def | wp hoare_drop_imps)+ + +crunch it[wp]: setExtraBadge "\s. P (ksIdleThread s)" +crunch it[wp]: receiveIPC "\s. P (ksIdleThread s)" + (ignore: transferCapsToSlots + wp: transferCapsToSlots_pres2 crunch_wps hoare_vcg_const_Ball_lift + simp: crunch_simps ball_conj_distrib) + +crunch irq_states' [wp]: setupCallerCap valid_irq_states' + (wp: crunch_wps) + +crunch irqs_masked' [wp]: receiveIPC "irqs_masked'" + (wp: crunch_wps rule: irqs_masked_lift) + +crunch ct_not_inQ[wp]: getThreadCallerSlot "ct_not_inQ" +crunch ct_not_inQ[wp]: getThreadReplySlot "ct_not_inQ" + +lemma setupCallerCap_ct_not_inQ[wp]: + "\ct_not_inQ\ setupCallerCap sender receiver grant \\_. ct_not_inQ\" + apply (simp add: setupCallerCap_def) + apply (wp hoare_drop_imp setThreadState_ct_not_inQ) + done + +crunch ksQ'[wp]: copyMRs "\s. P (ksReadyQueues s)" + (wp: mapM_wp' hoare_drop_imps simp: crunch_simps) + +crunch ksQ[wp]: doIPCTransfer "\s. P (ksReadyQueues s)" + (wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +crunch ct'[wp]: doIPCTransfer "\s. P (ksCurThread s)" + (wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +lemma asUser_ct_not_inQ[wp]: + "\ct_not_inQ\ asUser t m \\rv. ct_not_inQ\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps threadSet_not_inQ | simp)+ + done + +crunch ct_not_inQ[wp]: copyMRs "ct_not_inQ" + (wp: mapM_wp' hoare_drop_imps simp: crunch_simps) + +crunch ct_not_inQ[wp]: doIPCTransfer "ct_not_inQ" + (ignore: transferCapsToSlots + wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +lemma ntfn_q_refs_no_bound_refs': "rf : ntfn_q_refs_of' (ntfnObj ob) \ rf \ ntfn_bound_refs' (ntfnBoundTCB ob')" + by (auto simp add: ntfn_q_refs_of'_def ntfn_bound_refs'_def + split: Structures_H.ntfn.splits) + +lemma completeSignal_invs: + "\invs' and tcb_at' tcb\ + completeSignal ntfnptr tcb + \\_. invs'\" + apply (simp add: completeSignal_def) + apply (rule bind_wp[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp set_ntfn_minor_invs' | wpc | simp)+ + apply (rule_tac Q="\_ s. (state_refs_of' s ntfnptr = ntfn_bound_refs' (ntfnBoundTCB ntfn)) + \ ntfn_at' ntfnptr s + \ valid_ntfn' (ntfnObj_update (\_. Structures_H.ntfn.IdleNtfn) ntfn) s + \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) + \ ntfnptr \ ksIdleThread s" + in hoare_strengthen_post) + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] + apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def live'_def + split: option.splits) + apply (blast dest: ntfn_q_refs_no_bound_refs') + apply wp + apply (subgoal_tac "valid_ntfn' ntfn s") + apply (subgoal_tac "ntfnptr \ ksIdleThread s") + apply (fastforce simp: valid_ntfn'_def valid_bound_tcb'_def ko_at_state_refs_ofD' live'_def + elim: obj_at'_weakenE + if_live_then_nonz_capD'[OF invs_iflive' + obj_at'_real_def[THEN meta_eq_to_obj_eq, + THEN iffD1]]) + apply (fastforce simp: valid_idle'_def pred_tcb_at'_def obj_at'_def dest!: invs_valid_idle') + apply (fastforce dest: invs_valid_objs' ko_at_valid_objs' simp: valid_obj'_def) + done + +lemma setupCallerCap_urz[wp]: + "\untyped_ranges_zero' and valid_pspace' and tcb_at' sender\ + setupCallerCap sender t g \\rv. untyped_ranges_zero'\" + apply (simp add: setupCallerCap_def getSlotCap_def + getThreadCallerSlot_def getThreadReplySlot_def + locateSlot_conv) + apply (wp getCTE_wp') + apply (rule_tac Q="\_. untyped_ranges_zero' and valid_mdb' and valid_objs'" in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def untyped_derived_eq_def + isCap_simps) + apply (wp sts_valid_pspace_hangers) + apply (clarsimp simp: valid_tcb_state'_def) + done + +lemmas threadSet_urz = untyped_ranges_zero_lift[where f="cteCaps_of", OF _ threadSet_cteCaps_of] + +crunch urz[wp]: doIPCTransfer "untyped_ranges_zero'" + (ignore: threadSet wp: threadSet_urz crunch_wps simp: zipWithM_x_mapM) + +crunch gsUntypedZeroRanges[wp]: receiveIPC "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps transferCapsToSlots_pres1 simp: zipWithM_x_mapM ignore: constOnFailure) + +crunch ctes_of[wp]: possibleSwitchTo "\s. P (ctes_of s)" + (wp: crunch_wps ignore: constOnFailure) + +lemmas possibleSwitchToTo_cteCaps_of[wp] + = cteCaps_of_ctes_of_lift[OF possibleSwitchTo_ctes_of] + +crunches possibleSwitchTo + for ksArch[wp]: "\s. P (ksArchState s)" + (wp: possibleSwitchTo_ctes_of crunch_wps ignore: constOnFailure) + +crunches asUser + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift wp: crunch_wps) + +crunches setupCallerCap, possibleSwitchTo, doIPCTransfer + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + +(* t = ksCurThread s *) +lemma ri_invs' [wp]: + "\invs' and sch_act_not t + and ct_in_state' simple' + and st_tcb_at' simple' t + and ex_nonz_cap_to' t + and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s)\ + receiveIPC t cap isBlocking + \\_. invs'\" (is "\?pre\ _ \_\") + apply (clarsimp simp: receiveIPC_def) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ gbn_sp']) + apply (rule bind_wp) + (* set up precondition for old proof *) + apply (rule_tac R="ko_at' ep (capEPPtr cap) and ?pre" in hoare_vcg_if_split) + apply (wp completeSignal_invs) + apply (case_tac ep) + \ \endpoint = RecvEP\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wpc, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp sts_sch_act' hoare_vcg_const_Ball_lift valid_irq_node_lift + setThreadState_ct_not_inQ + asUser_urz + | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ + apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' o_def) + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (frule obj_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply (drule(1) sym_refs_ko_atD') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp simp: st_tcb_at_refs_of_rev' valid_ep'_def + valid_obj'_def tcb_bound_refs'_def + dest!: isCapDs) + apply (rule conjI, clarsimp) + apply (drule (1) bspec) + apply (clarsimp dest!: st_tcb_at_state_refs_ofD') + apply (clarsimp simp: set_eq_subset) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply ((case_tac tp; fastforce elim: nonempty_cross_distinct_singleton_elim)+)[2] + apply (clarsimp split: if_split_asm) + apply (fastforce simp: valid_pspace'_def global'_no_ex_cap idle'_not_queued) + \ \endpoint = IdleEP\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wpc, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp sts_sch_act' valid_irq_node_lift + setThreadState_ct_not_inQ + asUser_urz + | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ + apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def o_def) + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (subgoal_tac "t \ capEPPtr cap") + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule ko_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp dest!: isCapDs) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: tcb_bound_refs'_def + dest: symreftype_inverse' + split: if_split_asm) + apply (fastforce simp: global'_no_ex_cap) + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + \ \endpoint = SendEP\ + apply (simp add: invs'_def valid_state'_def) + apply (rename_tac list) + apply (case_tac list, simp_all split del: if_split) + apply (rename_tac sender queue) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift hoare_drop_imps setEndpoint_valid_mdb' + set_ep_valid_objs' sts_st_tcb' sts_sch_act' + setThreadState_ct_not_inQ + possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift + setEndpoint_ksQ setEndpoint_ct' + | simp add: valid_tcb_state'_def case_bool_If + case_option_If + split del: if_split cong: if_cong + | wp (once) sch_act_sane_lift hoare_vcg_conj_lift hoare_vcg_all_lift + untyped_ranges_zero_lift)+ + apply (clarsimp split del: if_split simp: pred_tcb_at') + apply (frule obj_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply (frule(1) ct_not_in_epQueue, clarsimp, clarsimp) + apply (drule(1) sym_refs_ko_atD') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (clarsimp simp: valid_obj'_def valid_ep'_def st_tcb_at_refs_of_rev' conj_ac + split del: if_split + cong: if_cong) + apply (subgoal_tac "sch_act_not sender s") + prefer 2 + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (drule st_tcb_at_state_refs_ofD') + apply (simp only: conj_ac(1, 2)[where Q="sym_refs R" for R]) + apply (subgoal_tac "distinct (ksIdleThread s # capEPPtr cap # t # sender # queue)") + apply (rule conjI) + apply (clarsimp simp: ep_redux_simps' cong: if_cong) + apply (erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def + dest: symreftype_inverse' + split: if_split_asm) + apply (clarsimp simp: singleton_tuple_cartesian split: list.split + | rule conjI | drule(1) bspec + | drule st_tcb_at_state_refs_ofD' bound_tcb_at_state_refs_ofD' + | clarsimp elim!: if_live_state_refsE)+ + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (clarsimp simp: global'_no_ex_cap) + apply (rule conjI + | clarsimp simp: singleton_tuple_cartesian split: list.split + | clarsimp elim!: if_live_state_refsE + | clarsimp simp: global'_no_ex_cap idle'_not_queued' idle'_no_refs tcb_bound_refs'_def + | drule(1) bspec | drule st_tcb_at_state_refs_ofD' + | clarsimp simp: set_eq_subset dest!: bound_tcb_at_state_refs_ofD' )+ + apply (rule hoare_pre) + apply (wp getNotification_wp | wpc | clarsimp)+ + done + +(* t = ksCurThread s *) +lemma rai_invs'[wp]: + "\invs' and sch_act_not t + and st_tcb_at' simple' t + and ex_nonz_cap_to' t + and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s) + and (\s. \ntfnptr. isNotificationCap cap + \ capNtfnPtr cap = ntfnptr + \ obj_at' (\ko. ntfnBoundTCB ko = None \ ntfnBoundTCB ko = Some t) + ntfnptr s)\ + receiveSignal t cap isBlocking + \\_. invs'\" + apply (simp add: receiveSignal_def) + apply (rule bind_wp [OF _ get_ntfn_sp']) + apply (rename_tac ep) + apply (case_tac "ntfnObj ep") + \ \ep = IdleNtfn\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift sts_sch_act' typ_at_lifts + setThreadState_ct_not_inQ + asUser_urz + | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def live'_def | wpc)+ + apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def) + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (subgoal_tac "capNtfnPtr cap \ t") + apply (frule valid_pspace_valid_objs') + apply (frule (1) ko_at_valid_objs') + apply clarsimp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + apply (rule conjI, clarsimp simp: obj_at'_def split: option.split) + apply (drule simple_st_tcb_at_state_refs_ofD' + ko_at_state_refs_ofD' bound_tcb_at_state_refs_ofD')+ + apply (clarsimp dest!: isCapDs) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' + split: if_split_asm) + apply (fastforce dest!: global'_no_ex_cap) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + \ \ep = ActiveNtfn\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp + asUser_urz + | simp add: valid_ntfn'_def)+ + apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) + apply (frule (1) ko_at_valid_objs') + apply clarsimp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def isCap_simps) + apply (drule simple_st_tcb_at_state_refs_ofD' + ko_at_state_refs_ofD')+ + apply (erule delta_sym_refs) + apply (clarsimp split: if_split_asm simp: global'_no_ex_cap)+ + \ \ep = WaitingNtfn\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' + setThreadState_ct_not_inQ typ_at_lifts + asUser_urz + | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def live'_def | wpc)+ + apply (clarsimp simp: valid_tcb_state'_def) + apply (frule_tac t=t in not_in_ntfnQueue) + apply (simp) + apply (simp) + apply (erule pred_tcb'_weakenE, clarsimp) + apply (frule ko_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply simp + apply (clarsimp simp: valid_obj'_def) + apply (clarsimp simp: valid_ntfn'_def pred_tcb_at') + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (rule conjI, clarsimp simp: obj_at'_def split: option.split) + apply (drule(1) sym_refs_ko_atD') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp simp: st_tcb_at_refs_of_rev' + dest!: isCapDs) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (rename_tac list one two three four five six seven eight nine) + apply (subgoal_tac "set list \ {NTFNSignal} \ {}") + apply safe[1] + apply (auto simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def)[5] + apply (fastforce simp: tcb_bound_refs'_def + split: if_split_asm) + apply (fastforce dest!: global'_no_ex_cap) + done + +lemma getCTE_cap_to_refs[wp]: + "\\\ getCTE p \\rv s. \r\zobj_refs' (cteCap rv). ex_nonz_cap_to' r s\" + apply (rule hoare_strengthen_post [OF getCTE_sp]) + apply (clarsimp simp: ex_nonz_cap_to'_def) + apply (fastforce elim: cte_wp_at_weakenE') + done + +lemma lookupCap_cap_to_refs[wp]: + "\\\ lookupCap t cref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" + apply (simp add: lookupCap_def lookupCapAndSlot_def split_def + getSlotCap_def) + apply (wp | simp)+ + done + +crunches setVMRoot + for valid_objs'[wp]: valid_objs' + (wp: getASID_wp crunch_wps findVSpaceForASID_vs_at_wp + simp: getPoolPtr_def getThreadVSpaceRoot_def if_distribR) + +lemma arch_stt_objs' [wp]: + "\valid_objs'\ Arch.switchToThread t \\rv. valid_objs'\" + apply (simp add: AARCH64_H.switchToThread_def) + apply wp + done + +lemma possibleSwitchTo_sch_act_not: + "\sch_act_not t' and K (t \ t')\ possibleSwitchTo t \\rv. sch_act_not t'\" + apply (simp add: possibleSwitchTo_def setSchedulerAction_def curDomain_def) + apply (wp hoare_drop_imps | wpc | simp)+ + done + +crunch urz[wp]: possibleSwitchTo "untyped_ranges_zero'" + (simp: crunch_simps unless_def wp: crunch_wps) + +declare zipWithM_x_mapM[simp] (* FIXME AARCH64: remove? *) + +crunches possibleSwitchTo + for pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + +lemma si_invs'[wp]: + "\invs' and st_tcb_at' simple' t + and sch_act_not t + and ex_nonz_cap_to' ep and ex_nonz_cap_to' t\ + sendIPC bl call ba cg cgr t ep + \\rv. invs'\" + supply if_split[split del] + apply (simp add: sendIPC_def) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (case_tac epa) + \ \epa = RecvEP\ + apply simp + apply (rename_tac list) + apply (case_tac list) + apply simp + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule_tac P="a\t" in hoare_gen_asm) + apply (wp valid_irq_node_lift + sts_valid_objs' set_ep_valid_objs' setEndpoint_valid_mdb' sts_st_tcb' sts_sch_act' + possibleSwitchTo_sch_act_not setThreadState_ct_not_inQ + possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift + hoare_convert_imp [OF doIPCTransfer_sch_act doIPCTransfer_ct'] + hoare_convert_imp [OF setEndpoint_nosch setEndpoint_ct'] + hoare_drop_imp [where f="threadGet tcbFault t"] + | rule_tac f="getThreadState a" in hoare_drop_imp + | wp (once) hoare_drop_imp[where R="\_ _. call"] + hoare_drop_imp[where R="\_ _. \ call"] + hoare_drop_imp[where R="\_ _. cg"] + | simp add: valid_tcb_state'_def case_bool_If + case_option_If + cong: if_cong + | wp (once) sch_act_sane_lift tcb_in_cur_domain'_lift hoare_vcg_const_imp_lift)+ + apply (clarsimp simp: pred_tcb_at' cong: conj_cong imp_cong) + apply (frule obj_at_valid_objs', clarsimp) + apply (frule(1) sym_refs_ko_atD') + apply (clarsimp simp: valid_obj'_def valid_ep'_def + st_tcb_at_refs_of_rev' pred_tcb_at' + conj_comms fun_upd_def[symmetric]) + apply (frule pred_tcb_at') + apply (drule simple_st_tcb_at_state_refs_ofD' st_tcb_at_state_refs_ofD')+ + apply (clarsimp simp: valid_pspace'_splits) + apply (subst fun_upd_idem[where x=t]) + apply (clarsimp split: if_split) + apply (rule conjI, clarsimp simp: obj_at'_def) + apply (drule bound_tcb_at_state_refs_ofD') + apply (fastforce simp: tcb_bound_refs'_def) + apply (subgoal_tac "ex_nonz_cap_to' a s") + prefer 2 + apply (clarsimp elim!: if_live_state_refsE) + apply clarsimp + apply (rule conjI) + apply (drule bound_tcb_at_state_refs_ofD') + apply (fastforce simp: tcb_bound_refs'_def set_eq_subset) + apply (clarsimp simp: conj_ac) + apply (rule conjI, clarsimp simp: idle'_no_refs) + apply (rule conjI, clarsimp simp: global'_no_ex_cap) + apply (rule conjI) + apply (rule impI) + apply (frule(1) ct_not_in_epQueue, clarsimp, clarsimp) + apply (clarsimp) + apply (simp add: ep_redux_simps') + apply (rule conjI, clarsimp split: if_split) + apply (rule conjI, fastforce simp: tcb_bound_refs'_def set_eq_subset) + apply (clarsimp, erule delta_sym_refs; + solves\auto simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm\) + apply (solves\clarsimp split: list.splits\) + \ \epa = IdleEP\ + apply (cases bl) + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) + apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (subgoal_tac "ep \ t") + apply (drule simple_st_tcb_at_state_refs_ofD' ko_at_state_refs_ofD' + bound_tcb_at_state_refs_ofD')+ + apply (rule conjI, erule delta_sym_refs) + apply (auto simp: tcb_bound_refs'_def symreftype_inverse' + split: if_split_asm)[2] + apply (fastforce simp: global'_no_ex_cap) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply simp + apply wp + apply simp + \ \epa = SendEP\ + apply (cases bl) + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) + apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (frule obj_at_valid_objs', clarsimp) + apply (frule(1) sym_refs_ko_atD') + apply (frule pred_tcb_at') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp simp: valid_obj'_def valid_ep'_def st_tcb_at_refs_of_rev') + apply (rule conjI, clarsimp) + apply (drule (1) bspec) + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' bound_tcb_at_state_refs_ofD' + simp: tcb_bound_refs'_def) + apply (clarsimp simp: set_eq_subset) + apply (rule conjI, erule delta_sym_refs) + subgoal by (fastforce simp: obj_at'_def symreftype_inverse' + split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' + split: if_split_asm) + apply (fastforce simp: global'_no_ex_cap idle'_not_queued) + apply (simp | wp)+ + done + +lemma sfi_invs_plus': + "\invs' and st_tcb_at' simple' t + and sch_act_not t + and ex_nonz_cap_to' t\ + sendFaultIPC t f + \\_. invs'\, \\_. invs' and st_tcb_at' simple' t and sch_act_not t and (\s. ksIdleThread s \ t)\" + apply (simp add: sendFaultIPC_def) + apply (wp threadSet_invs_trivial threadSet_pred_tcb_no_state + threadSet_cap_to' + | wpc | simp)+ + apply (rule_tac Q'="\rv s. invs' s \ sch_act_not t s + \ st_tcb_at' simple' t s + \ ex_nonz_cap_to' t s + \ t \ ksIdleThread s + \ (\r\zobj_refs' rv. ex_nonz_cap_to' r s)" + in hoare_strengthen_postE_R) + apply wp + apply (clarsimp simp: inQ_def pred_tcb_at') + apply (wp | simp)+ + apply (clarsimp simp: eq_commute) + apply (subst(asm) global'_no_ex_cap, auto) + done + +crunches send_fault_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (simp: crunch_simps wp: crunch_wps) + +lemma handleFault_corres: + "fr f f' \ + corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread + and (\_. valid_fault f)) + (invs' and sch_act_not thread + and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) + (handle_fault thread f) (handleFault thread f')" + apply (simp add: handle_fault_def handleFault_def) + apply (rule corres_guard_imp) + apply (subst return_bind [symmetric], + rule corres_split[where P="tcb_at thread", + OF gets_the_noop_corres [where x="()"]]) + apply (simp add: tcb_at_def) + apply (rule corres_split_catch) + apply (rule_tac F="valid_fault f" in corres_gen_asm) + apply (rule sendFaultIPC_corres, assumption) + apply simp + apply (rule handleDoubleFault_corres) + apply wpsimp+ + apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 invs_def valid_state_def valid_idle_def) + apply auto + done + +lemma sts_invs_minor'': + "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st + \ (st \ Inactive \ \ idle' st \ + st' \ Inactive \ \ idle' st')) t + and (\s. t = ksIdleThread s \ idle' st) + and (\s. \ runnable' st \ sch_act_not t s) + and invs'\ + setThreadState st t + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) + apply clarsimp + apply (rule conjI) + apply fastforce + apply (rule conjI) + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply (clarsimp simp: valid_obj'_def valid_tcb'_def) + subgoal by (cases st, auto simp: valid_tcb_state'_def + split: Structures_H.thread_state.splits)[1] + apply (rule conjI) + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (fastforce elim!: st_tcb_ex_cap'') + done + +lemma hf_invs' [wp]: + "\invs' and sch_act_not t + and st_tcb_at' simple' t + and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ + handleFault t f \\r. invs'\" + apply (simp add: handleFault_def) + apply wp + apply (simp add: handleDoubleFault_def) + apply (wp sts_invs_minor'' dmo_invs')+ + apply (rule hoare_strengthen_postE, rule sfi_invs_plus', + simp_all) + apply (strengthen no_refs_simple_strg') + apply clarsimp + done + +declare zipWithM_x_mapM [simp del] + +lemma gts_st_tcb': + "\\\ getThreadState t \\r. st_tcb_at' (\st. st = r) t\" + apply (rule hoare_strengthen_post) + apply (rule gts_sp') + apply simp + done + +declare setEndpoint_ct' [wp] + +lemma setupCallerCap_pred_tcb_unchanged: + "\pred_tcb_at' proj P t and K (t \ t')\ + setupCallerCap t' t'' g + \\rv. pred_tcb_at' proj P t\" + apply (simp add: setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def) + apply (wp sts_pred_tcb_neq' hoare_drop_imps) + apply clarsimp + done + +lemma si_blk_makes_simple': + "\st_tcb_at' simple' t and K (t \ t')\ + sendIPC True call bdg x x' t' ep + \\rv. st_tcb_at' simple' t\" + apply (simp add: sendIPC_def) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) + apply (rename_tac list) + apply (case_tac list, simp_all add: case_bool_If case_option_If + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged + hoare_drop_imps) + apply (clarsimp simp: pred_tcb_at' del: disjCI) + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + done + +lemma si_blk_makes_runnable': + "\st_tcb_at' runnable' t and K (t \ t')\ + sendIPC True call bdg x x' t' ep + \\rv. st_tcb_at' runnable' t\" + apply (simp add: sendIPC_def) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) + apply (rename_tac list) + apply (case_tac list, simp_all add: case_bool_If case_option_If + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged + hoare_vcg_const_imp_lift hoare_drop_imps + | simp)+ + apply (clarsimp del: disjCI simp: pred_tcb_at' elim!: pred_tcb'_weakenE) + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + done + +lemma sfi_makes_simple': + "\st_tcb_at' simple' t and K (t \ t')\ + sendFaultIPC t' ft + \\rv. st_tcb_at' simple' t\" + apply (rule hoare_gen_asm) + apply (simp add: sendFaultIPC_def + cong: if_cong capability.case_cong bool.case_cong) + apply (wpsimp wp: si_blk_makes_simple' threadSet_pred_tcb_no_state hoare_drop_imps + hoare_vcg_all_liftE_R) + done + +lemma sfi_makes_runnable': + "\st_tcb_at' runnable' t and K (t \ t')\ + sendFaultIPC t' ft + \\rv. st_tcb_at' runnable' t\" + apply (rule hoare_gen_asm) + apply (simp add: sendFaultIPC_def + cong: if_cong capability.case_cong bool.case_cong) + apply (wpsimp wp: si_blk_makes_runnable' threadSet_pred_tcb_no_state hoare_drop_imps + hoare_vcg_all_liftE_R) + done + +lemma hf_makes_runnable_simple': + "\st_tcb_at' P t' and K (t \ t') and K (P = runnable' \ P = simple')\ + handleFault t ft + \\rv. st_tcb_at' P t'\" + apply (safe intro!: hoare_gen_asm) + apply (simp_all add: handleFault_def handleDoubleFault_def) + apply (wp sfi_makes_runnable' sfi_makes_simple' sts_st_tcb_at'_cases + | simp add: handleDoubleFault_def)+ + done + +crunches possibleSwitchTo, completeSignal + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + +lemma ri_makes_runnable_simple': + "\st_tcb_at' P t' and K (t \ t') and K (P = runnable' \ P = simple')\ + receiveIPC t cap isBlocking + \\rv. st_tcb_at' P t'\" + including no_pre + apply (rule hoare_gen_asm)+ + apply (simp add: receiveIPC_def) + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (rule bind_wp [OF _ gbn_sp']) + apply wp + apply (rename_tac ep q r) + apply (case_tac ep, simp_all) + apply (wp sts_st_tcb_at'_cases | wpc | simp add: doNBRecvFailedTransfer_def)+ + apply (rename_tac list) + apply (case_tac list, simp_all add: case_bool_If case_option_If + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged + hoare_vcg_const_imp_lift)+ + apply (simp, simp only: imp_conv_disj) + apply (wp hoare_vcg_disj_lift)+ + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (fastforce simp: pred_tcb_at'_def obj_at'_def isSend_def + split: Structures_H.thread_state.split_asm) + apply (rule hoare_pre) + apply wpsimp+ + done + +lemma rai_makes_runnable_simple': + "\st_tcb_at' P t' and K (t \ t') and K (P = runnable' \ P = simple')\ + receiveSignal t cap isBlocking + \\rv. st_tcb_at' P t'\" + apply (rule hoare_gen_asm) + apply (simp add: receiveSignal_def) + apply (rule hoare_pre) + by (wp sts_st_tcb_at'_cases getNotification_wp | wpc | simp add: doNBRecvFailedTransfer_def)+ + +lemma sendSignal_st_tcb'_Running: + "\st_tcb_at' (\st. st = Running \ P st) t\ + sendSignal ntfnptr bdg + \\_. st_tcb_at' (\st. st = Running \ P st) t\" + apply (simp add: sendSignal_def) + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp + | wpc | clarsimp simp: pred_tcb_at')+ + done + +lemma sai_st_tcb': + "\st_tcb_at' P t and K (P Running)\ + sendSignal ntfn bdg + \\rv. st_tcb_at' P t\" + apply (rule hoare_gen_asm) + apply (subgoal_tac "\Q. P = (\st. st = Running \ Q st)") + apply (clarsimp intro!: sendSignal_st_tcb'_Running) + apply (fastforce intro!: exI[where x=P]) + done + +end + +end diff --git a/proof/refine/AARCH64/KHeap_R.thy b/proof/refine/AARCH64/KHeap_R.thy new file mode 100644 index 0000000000..75bb3e4964 --- /dev/null +++ b/proof/refine/AARCH64/KHeap_R.thy @@ -0,0 +1,2340 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory KHeap_R +imports + Machine_R +begin + +lemma lookupAround2_known1: + "m x = Some y \ fst (lookupAround2 x m) = Some (x, y)" + by (fastforce simp: lookupAround2_char1) + +lemma koTypeOf_injectKO: + fixes v :: "'a :: pspace_storable" + shows "koTypeOf (injectKO v) = koType TYPE('a)" + apply (cut_tac v1=v in iffD2 [OF project_inject, OF refl]) + apply (simp add: project_koType[symmetric]) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma setObject_modify_variable_size: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; obj_at' (\obj. objBits v = objBits obj) p s\ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (clarsimp simp: setObject_def split_def exec_gets obj_at'_def lookupAround2_known1 + assert_opt_def updateObject_default_def bind_assoc) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (clarsimp simp only: koTypeOf_injectKO) + apply (frule in_magnitude_check[where s'=s]) + apply blast + apply fastforce + apply (simp add: magnitudeCheck_assert in_monad bind_def gets_def oassert_opt_def + get_def return_def) + apply (simp add: simpler_modify_def) + done + +lemma setObject_modify: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; \ko. P ko \ objBits ko = objBits v \ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (rule setObject_modify_variable_size) + apply fastforce + apply fastforce + apply fastforce + unfolding obj_at'_def + by fastforce + +lemma obj_at_getObject: + assumes R: + "\a b n ko s obj::'a::pspace_storable. + \ (a, b) \ fst (loadObject t t n ko s); projectKO_opt ko = Some obj \ \ a = obj" + shows "\obj_at' P t\ getObject t \\(rv::'a::pspace_storable) s. P rv\" + by (auto simp: getObject_def obj_at'_def in_monad valid_def + split_def lookupAround2_known1 + dest: R) + +declare projectKO_inv [wp] + +lemma loadObject_default_inv: + "\P\ loadObject_default addr addr' next obj \\rv. P\" + apply (simp add: loadObject_default_def magnitudeCheck_def + alignCheck_def unless_def alignError_def + | wp hoare_vcg_split_case_option + hoare_drop_imps hoare_vcg_all_lift)+ + done + +lemma getObject_inv: + assumes x: "\p q n ko. \P\ loadObject p q n ko \\(rv :: 'a :: pspace_storable). P\" + shows "\P\ getObject p \\(rv :: 'a :: pspace_storable). P\" + by (simp add: getObject_def split_def | wp x)+ + +lemma getObject_inv_vcpu [wp]: "\P\ getObject l \\(rv :: ArchStructures_H.vcpu). P\" + apply (rule getObject_inv) + apply simp + apply (rule loadObject_default_inv) + done + +lemma getObject_inv_tcb [wp]: "\P\ getObject l \\(rv :: Structures_H.tcb). P\" + apply (rule getObject_inv) + apply simp + apply (rule loadObject_default_inv) + done +end + +(* FIXME: this should go somewhere in spec *) +translations + (type) "'a kernel" <=(type) "kernel_state \ ('a \ kernel_state) set \ bool" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma no_fail_loadObject_default [wp]: + "no_fail (\s. \obj. projectKO_opt ko = Some (obj::'a) \ + is_aligned p (objBits obj) \ q = p + \ case_option True (\x. 2 ^ (objBits obj) \ x - p) n) + (loadObject_default p q n ko :: ('a::pre_storable) kernel)" + apply (simp add: loadObject_default_def split_def projectKO_def + alignCheck_def alignError_def magnitudeCheck_def + unless_def) + apply (rule no_fail_pre) + apply (wp case_option_wp) + apply (clarsimp simp: is_aligned_mask) + apply (clarsimp split: option.split_asm) + apply (clarsimp simp: is_aligned_mask[symmetric]) + apply simp + done + +lemma no_fail_getObject_tcb [wp]: + "no_fail (tcb_at' t) (getObject t :: tcb kernel)" + apply (simp add: getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps' + cong: conj_cong) + apply (rule ps_clear_lookupAround2, assumption+) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply simp + apply (fastforce split: option.split_asm simp: objBits_simps') + done + +lemma typ_at_to_obj_at': + "typ_at' (koType (TYPE ('a :: pspace_storable))) p s + = obj_at' (\ :: 'a \ bool) p s" + by (simp add: typ_at'_def obj_at'_real_def project_koType[symmetric]) + +lemmas typ_at_to_obj_at_arches + = typ_at_to_obj_at'[where 'a=pte, simplified] + typ_at_to_obj_at'[where 'a=asidpool, simplified] + typ_at_to_obj_at'[where 'a=user_data, simplified] + typ_at_to_obj_at'[where 'a=user_data_device, simplified] + typ_at_to_obj_at'[where 'a=vcpu, simplified] + +lemmas page_table_at_obj_at' + = page_table_at'_def[unfolded typ_at_to_obj_at_arches] + +lemma no_fail_getObject_vcpu[wp]: "no_fail (vcpu_at' vcpu) (getObject vcpu :: vcpu kernel)" + apply (simp add: getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps typ_at_to_obj_at_arches + cong: conj_cong option.case_cong) + apply (rule ps_clear_lookupAround2; assumption?) + apply simp + apply (erule is_aligned_no_overflow) + apply clarsimp + done + +lemma vcpu_at_ko: "typ_at (AArch AVCPU) p s \ \vcpu. ko_at (ArchObj (arch_kernel_obj.VCPU vcpu)) p s " + by (clarsimp simp add: obj_at_def) + +lemma corres_get_tcb: + "corres (tcb_relation \ the) (tcb_at t) (tcb_at' t) (gets (get_tcb t)) (getObject t)" + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp add: gets_def get_def return_def bind_def get_tcb_def) + apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) + apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def + projectKO_opt_tcb split_def + getObject_def loadObject_default_def in_monad) + apply (case_tac koa) + apply (simp_all add: fail_def return_def) + apply (case_tac bb) + apply (simp_all add: fail_def return_def) + apply (clarsimp simp add: state_relation_def pspace_relation_def) + apply (drule bspec) + apply clarsimp + apply blast + apply (clarsimp simp: tcb_relation_cut_def lookupAround2_known1) + done + +lemma lookupAround2_same1[simp]: + "(fst (lookupAround2 x s) = Some (x, y)) = (s x = Some y)" + apply (rule iffI) + apply (simp add: lookupAround2_char1) + apply (simp add: lookupAround2_known1) + done + + (* If we ever copy this: consider lifting Haskell precondition to \ here first. Not strictly + necessary since the rest of the proofs manage to lift later, but might be more convenient + for new proofs. *) +lemma getObject_vcpu_corres: + "corres vcpu_relation (vcpu_at vcpu) (vcpu_at' vcpu) + (get_vcpu vcpu) (getObject vcpu)" + apply (simp add: getObject_def get_vcpu_def get_object_def split_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, simp_all)[1] + apply (clarsimp simp: lookupAround2_known1) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule (1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_no_overflow) + apply simp + apply (clarsimp simp: objBits_simps split: option.split) + apply (clarsimp simp: in_monad loadObject_default_def) + apply (simp add: bind_assoc exec_gets gets_map_def assert_opt_def fail_def split: option.split) + apply (drule vcpu_at_ko) + apply (clarsimp simp: obj_at_def in_omonad) + apply (simp add: return_def) + apply (simp add: in_magnitude_check objBits_simps pageBits_def) + apply (frule in_inv_by_hoareD [OF magnitudeCheck_inv]) + apply (clarsimp simp: state_relation_def pspace_relation_def) + apply (drule bspec, blast) + apply (clarsimp simp: other_obj_relation_def) + done + +lemma getObject_tcb_at': + "\ \ \ getObject t \\r::tcb. tcb_at' t\" + by (clarsimp simp: valid_def getObject_def in_monad + loadObject_default_def obj_at'_def split_def + in_magnitude_check objBits_simps') + +text \updateObject_cte lemmas\ + +lemma koType_objBitsKO: + "koTypeOf k = koTypeOf k' \ objBitsKO k = objBitsKO k'" + by (auto simp: objBitsKO_def archObjSize_def + split: kernel_object.splits arch_kernel_object.splits) + +lemma updateObject_objBitsKO: + "(ko', t') \ fst (updateObject (val :: 'a :: pspace_storable) ko p q n t) + \ objBitsKO ko' = objBitsKO ko" + apply (drule updateObject_type) + apply (erule koType_objBitsKO) + done + +lemma updateObject_cte_is_tcb_or_cte: + fixes cte :: cte and ptr :: machine_word + shows "\ fst (lookupAround2 p (ksPSpace s)) = Some (q, ko); + snd (lookupAround2 p (ksPSpace s)) = n; + (ko', s') \ fst (updateObject cte ko p q n s) \ \ + (\tcb getF setF. ko = KOTCB tcb \ s' = s \ tcb_cte_cases (p - q) = Some (getF, setF) + \ ko' = KOTCB (setF (\x. cte) tcb) \ is_aligned q tcbBlockSizeBits \ ps_clear q tcbBlockSizeBits s) \ + (\cte'. ko = KOCTE cte' \ ko' = KOCTE cte \ s' = s + \ p = q \ is_aligned p cte_level_bits \ ps_clear p cte_level_bits s)" + apply (clarsimp simp: updateObject_cte typeError_def alignError_def + tcbVTableSlot_def tcbCTableSlot_def to_bl_1 rev_take objBits_simps' + in_monad map_bits_to_bl cte_level_bits_def in_magnitude_check + lookupAround2_char1 + split: kernel_object.splits) + apply (subst(asm) in_magnitude_check3, simp+) + apply (simp add: in_monad tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def tcbIPCBufferSlot_def + split: if_split_asm) + apply (simp add: in_monad tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def tcbIPCBufferSlot_def + split: if_split_asm) + done + +declare plus_1_less[simp] + +lemma updateObject_default_result: + "(x, s'') \ fst (updateObject_default e ko p q n s) \ x = injectKO e" + by (clarsimp simp add: updateObject_default_def in_monad) + +lemma obj_at_setObject1: + assumes R: "\(v::'a::pspace_storable) p q n ko s x s''. + (x, s'') \ fst (updateObject v ko p q n s) \ x = injectKO v" + assumes Q: "\(v::'a::pspace_storable) (v'::'a). objBits v = objBits v'" + shows + "\ obj_at' (\x::'a::pspace_storable. True) t \ + setObject p (v::'a::pspace_storable) + \ \rv. obj_at' (\x::'a::pspace_storable. True) t \" + apply (simp add: setObject_def split_def) + apply (rule bind_wp [OF _ hoare_gets_sp]) + apply (clarsimp simp: valid_def in_monad obj_at'_def lookupAround2_char1 project_inject + dest!: R) + apply (subgoal_tac "objBitsKO (injectKO v) = objBitsKO (injectKO obj)") + apply (intro conjI impI, simp_all) + apply fastforce+ + apply (fold objBits_def) + apply (rule Q) + done + +lemma obj_at_setObject2: + fixes v :: "'a::pspace_storable" + fixes P :: "'b::pspace_storable \ bool" + assumes R: "\ko s' (v :: 'a) oko x y n s. (ko, s') \ fst (updateObject v oko x y n s) + \ koTypeOf ko \ koType TYPE('b)" + shows + "\ obj_at' P t \ + setObject p (v::'a) + \ \rv. obj_at' P t \" + apply (simp add: setObject_def split_def) + apply (rule bind_wp [OF _ hoare_gets_sp]) + apply (clarsimp simp: valid_def in_monad) + apply (frule updateObject_type) + apply (drule R) + apply (clarsimp simp: obj_at'_def) + apply (rule conjI) + apply (clarsimp simp: lookupAround2_char1) + apply (drule iffD1 [OF project_koType, OF exI]) + apply simp + apply (clarsimp simp: ps_clear_upd lookupAround2_char1) + done + +lemma updateObject_ep_eta: + "updateObject (v :: endpoint) = updateObject_default v" + by ((rule ext)+, simp) + +lemma updateObject_tcb_eta: + "updateObject (v :: tcb) = updateObject_default v" + by ((rule ext)+, simp) + +lemma updateObject_ntfn_eta: + "updateObject (v :: Structures_H.notification) = updateObject_default v" + by ((rule ext)+, simp) + +lemmas updateObject_eta = + updateObject_ep_eta updateObject_tcb_eta updateObject_ntfn_eta + +lemma objBits_type: + "koTypeOf k = koTypeOf k' \ objBitsKO k = objBitsKO k'" + by (erule koType_objBitsKO) + +lemma setObject_typ_at_inv: + "\typ_at' T p'\ setObject p v \\r. typ_at' T p'\" + apply (clarsimp simp: setObject_def split_def) + apply (clarsimp simp: valid_def typ_at'_def ko_wp_at'_def in_monad + lookupAround2_char1 ps_clear_upd) + apply (drule updateObject_type) + apply clarsimp + apply (drule objBits_type) + apply (simp add: ps_clear_upd) + done + +lemma setObject_typ_at_not: + "\\s. \ (typ_at' T p' s)\ setObject p v \\r s. \ (typ_at' T p' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def) + apply (erule notE) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def lookupAround2_char1 + split: if_split_asm) + apply (drule updateObject_type) + apply clarsimp + apply (drule objBits_type) + apply (clarsimp elim!: ps_clear_domE) + apply fastforce + apply (clarsimp elim!: ps_clear_domE) + apply fastforce + done + +lemma setObject_typ_at': + "\\s. P (typ_at' T p' s)\ setObject p v \\r s. P (typ_at' T p' s)\" + by (blast intro: P_bool_lift setObject_typ_at_inv setObject_typ_at_not) + +lemmas setObject_typ_ats [wp] = typ_at_lifts [OF setObject_typ_at'] + +lemma setObject_cte_wp_at2': + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); Q s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + shows "\\s. P' (cte_wp_at' P p s) \ Q s\ setObject ptr v \\rv s. P' (cte_wp_at' P p s)\" + apply (clarsimp simp add: setObject_def valid_def in_monad split_def) + apply (simp add: cte_wp_at_cases' split del: if_split) + apply (erule rsubst[where P=P']) + apply (rule iffI) + apply (erule disjEI) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1 y) + apply (erule exEI [where 'a=machine_word]) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1) + apply (drule(1) x) + apply (clarsimp simp: lookupAround2_char1 prod_eqI) + apply (fastforce dest: bspec [OF _ ranI]) + apply (erule disjEI) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1 + split: if_split_asm) + apply (frule updateObject_type) + apply (case_tac ba, simp_all add: y)[1] + apply (erule exEI) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1 + split: if_split_asm) + apply (frule updateObject_type) + apply (case_tac ba, simp_all) + apply (drule(1) x) + apply (clarsimp simp: prod_eqI lookupAround2_char1) + apply (fastforce dest: bspec [OF _ ranI]) + done + +lemma setObject_cte_wp_at': + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); Q s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + shows "\cte_wp_at' P p and Q\ setObject ptr v \\rv. cte_wp_at' P p\" + unfolding pred_conj_def + by (rule setObject_cte_wp_at2'[OF x y], assumption+) + +lemma setObject_ep_pre: + assumes "\P and ep_at' p\ setObject p (e::endpoint) \Q\" + shows "\P\ setObject p (e::endpoint) \Q\" using assms + apply (clarsimp simp: valid_def setObject_def in_monad + split_def updateObject_default_def in_magnitude_check objBits_simps') + apply (drule spec, drule mp, erule conjI) + apply (simp add: obj_at'_def objBits_simps') + apply (simp add: split_paired_Ball) + apply (drule spec, erule mp) + apply (clarsimp simp: in_monad in_magnitude_check) + done + +lemma setObject_ntfn_pre: + assumes "\P and ntfn_at' p\ setObject p (e::Structures_H.notification) \Q\" + shows "\P\ setObject p (e::Structures_H.notification) \Q\" using assms + apply (clarsimp simp: valid_def setObject_def in_monad + split_def updateObject_default_def in_magnitude_check objBits_simps') + apply (drule spec, drule mp, erule conjI) + apply (simp add: obj_at'_def objBits_simps') + apply (simp add: split_paired_Ball) + apply (drule spec, erule mp) + apply (clarsimp simp: in_monad in_magnitude_check) + done + +lemma setObject_tcb_pre: + assumes "\P and tcb_at' p\ setObject p (t::tcb) \Q\" + shows "\P\ setObject p (t::tcb) \Q\" using assms + apply (clarsimp simp: valid_def setObject_def in_monad + split_def updateObject_default_def in_magnitude_check objBits_simps') + apply (drule spec, drule mp, erule conjI) + apply (simp add: obj_at'_def objBits_simps') + apply (simp add: split_paired_Ball) + apply (drule spec, erule mp) + apply (clarsimp simp: in_monad in_magnitude_check) + done + +lemma setObject_tcb_ep_at: + shows + "\ ep_at' t \ + setObject p (x::tcb) + \ \rv. ep_at' t \" + apply (rule obj_at_setObject2) + apply (auto dest: updateObject_default_result) + done + +lemma obj_at_setObject3: + fixes Q::"'a::pspace_storable \ bool" + fixes P::"'a::pspace_storable \ bool" + assumes R: "\ko s y n. (updateObject v ko p y n s) + = (updateObject_default v ko p y n s)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\(\s. P v)\ setObject p v \\rv. obj_at' P p\" + apply (clarsimp simp add: valid_def in_monad obj_at'_def + setObject_def split_def project_inject objBits_def[symmetric] + R updateObject_default_def in_magnitude_check P ps_clear_upd) + apply fastforce + done + +lemma setObject_tcb_strongest: + "\\s. if t = t' then P tcb else obj_at' P t' s\ + setObject t (tcb :: tcb) + \\rv. obj_at' P t'\" + apply (cases "t = t'") + apply simp + apply (rule hoare_weaken_pre) + apply (rule obj_at_setObject3) + apply simp + apply (simp add: objBits_simps') + apply simp + apply (simp add: setObject_def split_def) + apply (clarsimp simp: valid_def obj_at'_def split_def in_monad + updateObject_default_def ps_clear_upd) + done + +method setObject_easy_cases = + clarsimp simp: setObject_def in_monad split_def valid_def lookupAround2_char1, + erule rsubst[where P=P'], rule ext, + clarsimp simp: updateObject_cte updateObject_default_def in_monad + typeError_def opt_map_def opt_pred_def projectKO_opts_defs + split: if_split_asm + Structures_H.kernel_object.split_asm + +lemma setObject_endpoint_tcbs_of'[wp]: + "setObject c (endpoint :: endpoint) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_notification_tcbs_of'[wp]: + "setObject c (notification :: notification) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedNexts_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedNexts_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedPrevs_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedPrevs_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbQueued[wp]: + "setObject c (cte :: cte) \\s. P' (tcbQueued |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + +lemma setObject_cte_inQ[wp]: + "setObject c (cte :: cte) \\s. P' (inQ d p |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + +lemma getObject_obj_at': + assumes x: "\q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\ \ \ getObject p \\r::'a::pspace_storable. obj_at' ((=) r) p\" + by (clarsimp simp: valid_def getObject_def in_monad + loadObject_default_def obj_at'_def + split_def in_magnitude_check lookupAround2_char1 + x P project_inject objBits_def[symmetric]) + +lemma getObject_valid_obj: + assumes x: "\p q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\ valid_objs' \ getObject p \\rv::'a::pspace_storable. valid_obj' (injectKO rv) \" + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule getObject_obj_at' [OF x P]) + apply (rule getObject_inv) + apply (subst x) + apply (rule loadObject_default_inv) + apply (clarsimp, assumption) + apply clarsimp + apply (drule(1) obj_at_valid_objs') + apply (clarsimp simp: project_inject) + done + +declare fail_inv[simp] + +lemma typeError_inv [wp]: + "\P\ typeError x y \\rv. P\" + by (simp add: typeError_def|wp)+ + +lemma getObject_cte_inv [wp]: "\P\ (getObject addr :: cte kernel) \\rv. P\" + apply (simp add: getObject_def loadObject_cte split_def) + apply (clarsimp simp: valid_def in_monad) + apply (clarsimp simp: typeError_def in_monad magnitudeCheck_def + split: kernel_object.split_asm if_split_asm option.split_asm) + done + +lemma getObject_ko_at: + assumes x: "\q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\ \ \ getObject p \\r::'a::pspace_storable. ko_at' r p\" + by (subst eq_commute, rule getObject_obj_at' [OF x P]) + +lemma getObject_ko_at_tcb [wp]: + "\\\ getObject p \\rv::tcb. ko_at' rv p\" + by (rule getObject_ko_at | simp add: objBits_simps')+ + +lemma OMG_getObject_tcb: + "\obj_at' P t\ getObject t \\(tcb :: tcb) s. P tcb\" + apply (rule obj_at_getObject) + apply (clarsimp simp: loadObject_default_def in_monad) + done + +lemma setObject_nosch: + assumes x: "\p q n ko. \\s. P (ksSchedulerAction s)\ updateObject val p q n ko \\rv s. P (ksSchedulerAction s)\" + shows "\\s. P (ksSchedulerAction s)\ setObject t val \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma getObject_ep_inv: "\P\ (getObject addr :: endpoint kernel) \\rv. P\" + apply (rule getObject_inv) + apply (simp add: loadObject_default_inv) + done + +lemma getObject_ntfn_inv: + "\P\ (getObject addr :: Structures_H.notification kernel) \\rv. P\" + apply (rule getObject_inv) + apply (simp add: loadObject_default_inv) + done + +lemma get_ep_inv'[wp]: "\P\ getEndpoint ep \\rv. P\" + by (simp add: getEndpoint_def getObject_ep_inv) + +lemma get_ntfn_inv'[wp]: "\P\ getNotification ntfn \\rv. P\" + by (simp add: getNotification_def getObject_ntfn_inv) + +lemma get_ep'_valid_ep[wp]: + "\ invs' and ep_at' ep \ getEndpoint ep \ valid_ep' \" + apply (simp add: getEndpoint_def) + apply (rule hoare_chain) + apply (rule getObject_valid_obj) + apply simp + apply (simp add: objBits_simps') + apply clarsimp + apply (simp add: valid_obj'_def) + done + +lemma get_ntfn'_valid_ntfn[wp]: + "\ invs' and ntfn_at' ep \ getNotification ep \ valid_ntfn' \" + apply (simp add: getNotification_def) + apply (rule hoare_chain) + apply (rule getObject_valid_obj) + apply simp + apply (simp add: objBits_simps') + apply clarsimp + apply (simp add: valid_obj'_def) + done + +lemma setObject_distinct[wp]: + shows "\pspace_distinct'\ setObject p val \\rv. pspace_distinct'\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad lookupAround2_char1 + pspace_distinct'_def ps_clear_upd objBits_def[symmetric] + split: if_split_asm + dest!: updateObject_objBitsKO) + apply (fastforce dest: bspec[OF _ domI]) + apply (fastforce dest: bspec[OF _ domI]) + done + +lemma setObject_aligned[wp]: + shows "\pspace_aligned'\ setObject p val \\rv. pspace_aligned'\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad lookupAround2_char1 + pspace_aligned'_def ps_clear_upd objBits_def[symmetric] + split: if_split_asm + dest!: updateObject_objBitsKO) + apply (fastforce dest: bspec[OF _ domI]) + apply (fastforce dest: bspec[OF _ domI]) + done + +lemma setObject_canonical[wp]: + shows "\pspace_canonical'\ setObject p val \\rv. pspace_canonical'\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad lookupAround2_char1 + pspace_canonical'_def ps_clear_upd objBits_def[symmetric] + split: if_split_asm + dest!: updateObject_objBitsKO) + apply (fastforce dest: bspec[OF _ domI]) + apply (fastforce dest: bspec[OF _ domI]) + done + +lemma set_ep_aligned' [wp]: + "\pspace_aligned'\ setEndpoint ep v \\rv. pspace_aligned'\" + unfolding setEndpoint_def by wp + +lemma set_ep_distinct' [wp]: + "\pspace_distinct'\ setEndpoint ep v \\rv. pspace_distinct'\" + unfolding setEndpoint_def by wp + +lemma setEndpoint_cte_wp_at': + "\cte_wp_at' P p\ setEndpoint ptr v \\rv. cte_wp_at' P p\" + unfolding setEndpoint_def + apply (rule setObject_cte_wp_at'[where Q="\", simplified]) + apply (clarsimp simp add: updateObject_default_def in_monad + intro!: set_eqI)+ + done + +lemma setEndpoint_pred_tcb_at'[wp]: + "\pred_tcb_at' proj P t\ setEndpoint ptr val \\rv. pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def setEndpoint_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma get_ntfn_ko': + "\\\ getNotification ep \\rv. ko_at' rv ep\" + apply (simp add: getNotification_def) + apply (rule getObject_ko_at) + apply simp + apply (simp add: objBits_simps') + done + +lemma set_ntfn_aligned'[wp]: + "\pspace_aligned'\ setNotification p ntfn \\rv. pspace_aligned'\" + unfolding setNotification_def by wp + +lemma set_ntfn_distinct'[wp]: + "\pspace_distinct'\ setNotification p ntfn \\rv. pspace_distinct'\" + unfolding setNotification_def by wp + +lemma setNotification_cte_wp_at': + "\cte_wp_at' P p\ setNotification ptr v \\rv. cte_wp_at' P p\" + unfolding setNotification_def + apply (rule setObject_cte_wp_at'[where Q="\", simplified]) + apply (clarsimp simp add: updateObject_default_def in_monad + intro!: set_eqI)+ + done + +lemma setObject_ep_tcb': + "\tcb_at' t\ setObject p (e::endpoint) \\_. tcb_at' t\" + by (rule setObject_typ_ats) + +lemma setObject_ntfn_tcb': + "\tcb_at' t\ setObject p (e::Structures_H.notification) \\_. tcb_at' t\" + by (rule setObject_typ_ats) + +lemma set_ntfn_tcb' [wp]: + "\ tcb_at' t \ setNotification ntfn v \ \rv. tcb_at' t \" + by (simp add: setNotification_def setObject_ntfn_tcb') + +lemma pspace_dom_update: + "\ ps ptr = Some x; a_type x = a_type v \ \ pspace_dom (ps(ptr \ v)) = pspace_dom ps" + apply (simp add: pspace_dom_def dom_fun_upd2 del: dom_fun_upd) + apply (rule SUP_cong [OF refl]) + apply clarsimp + apply (simp add: obj_relation_cuts_def3) + done + +lemmas ps_clear_def3 = ps_clear_def2 [OF order_less_imp_le [OF aligned_less_plus_1]] + + +declare diff_neg_mask[simp del] + +lemma cte_wp_at_ctes_of: + "cte_wp_at' P p s = (\cte. ctes_of s p = Some cte \ P cte)" + apply (simp add: cte_wp_at_cases' map_to_ctes_def Let_def + cte_level_bits_def objBits_simps' + split del: if_split) + apply (safe del: disjCI) + apply (clarsimp simp: ps_clear_def3 field_simps) + apply (clarsimp simp: ps_clear_def3 field_simps + split del: if_split) + apply (frule is_aligned_sub_helper) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (case_tac "n = 0") + apply (clarsimp simp: field_simps) + apply (subgoal_tac "ksPSpace s p = None") + apply clarsimp + apply (clarsimp simp: field_simps) + apply (elim conjE) + apply (subst(asm) mask_in_range, assumption) + apply (drule arg_cong[where f="\S. p \ S"]) + apply (simp add: dom_def field_simps) + apply (erule mp) + apply (rule ccontr, simp add: linorder_not_le) + apply (drule word_le_minus_one_leq) + apply clarsimp + apply (simp add: field_simps) + apply (clarsimp split: if_split_asm del: disjCI) + apply (simp add: ps_clear_def3 field_simps) + apply (rule disjI2, rule exI[where x="(p - (p && ~~ mask tcb_bits))"]) + apply (clarsimp simp: ps_clear_def3[where na=tcb_bits] is_aligned_mask add_ac + word_bw_assocs) + done + +lemma ctes_of_canonical: + assumes canonical: "pspace_canonical' s" + assumes ctes_of: "ctes_of s p = Some cte" + shows "canonical_address p" +proof - + from ctes_of have "cte_wp_at' ((=) cte) p s" + by (simp add: cte_wp_at_ctes_of) + thus ?thesis using canonical canonical_bit_def + by (fastforce simp: pspace_canonical'_def tcb_cte_cases_def field_simps objBits_defs take_bit_Suc + split: if_splits + elim: cte_wp_atE' canonical_address_add) +qed + +lemma tcb_cte_cases_small: + "\ tcb_cte_cases v = Some (getF, setF) \ + \ v < 2 ^ tcbBlockSizeBits" + by (simp add: tcb_cte_cases_def objBits_defs split: if_split_asm) + +lemmas tcb_cte_cases_aligned_helpers = + is_aligned_add_helper [OF _ tcb_cte_cases_small] + is_aligned_sub_helper [OF _ tcb_cte_cases_small] + +lemma ctes_of_from_cte_wp_at: + assumes x: "\P P' p. \\s. P (cte_wp_at' P' p s) \ Q s\ f \\r s. P (cte_wp_at' P' p s)\" + shows "\\s. P (ctes_of s) \ Q s\ f \\rv s. P (ctes_of s)\" + apply (clarsimp simp: valid_def + elim!: rsubst[where P=P] + intro!: ext) + apply (case_tac "ctes_of s x", simp_all) + apply (drule_tac P1=Not and P'1="\" and p1=x in use_valid [OF _ x], + simp_all add: cte_wp_at_ctes_of) + apply (drule_tac P1=id and P'1="(=) aa" and p1=x in use_valid [OF _ x], + simp_all add: cte_wp_at_ctes_of) + done + +lemmas setObject_ctes_of = ctes_of_from_cte_wp_at [OF setObject_cte_wp_at2'] + +lemma map_to_ctes_upd_cte: + "\ s p = Some (KOCTE cte'); is_aligned p cte_level_bits; + {p + 1..p + mask cte_level_bits} \ dom s = {} \ \ + map_to_ctes (s (p \ (KOCTE cte))) = ((map_to_ctes s) (p \ cte))" + apply (rule ext) + apply (simp add: map_to_ctes_def Let_def dom_fun_upd2 + split del: if_split del: dom_fun_upd) + apply (case_tac "x = p") + apply (simp add: objBits_simps' cte_level_bits_def mask_def field_simps) + apply (case_tac "(x && ~~ mask (objBitsKO (KOTCB undefined))) = p") + apply clarsimp + apply (simp del: dom_fun_upd split del: if_split cong: if_cong + add: dom_fun_upd2 field_simps objBits_simps) + done + +declare overflow_plus_one_self[simp] + +lemma map_to_ctes_upd_tcb: + "\ s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; {p + 1..p + mask tcbBlockSizeBits} \ dom s = {} \ \ + map_to_ctes (s (p \ (KOTCB tcb))) = + (\x. if \getF setF. tcb_cte_cases (x - p) = Some (getF, setF) + \ getF tcb \ getF tcb' + then (case tcb_cte_cases (x - p) of Some (getF, setF) \ Some (getF tcb)) + else map_to_ctes s x)" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (subgoal_tac "p && ~~ (mask tcbBlockSizeBits) = p") + apply (rule ext) + apply (simp add: map_to_ctes_def Let_def dom_fun_upd2 + split del: if_split del: dom_fun_upd + cong: option.case_cong if_cong) + apply (case_tac "x = p") + apply (simp add: objBits_simps' field_simps map_to_ctes_def mask_def) + apply (case_tac "x && ~~ mask (objBitsKO (KOTCB undefined)) = p") + apply (case_tac "tcb_cte_cases (x - p)") + apply (simp split del: if_split cong: if_cong option.case_cong) + apply (subgoal_tac "s x = None") + apply (simp add: field_simps objBits_simps' mask_def split del: if_split + cong: if_cong option.case_cong) + apply clarsimp + apply (subst(asm) mask_in_range[where bits="objBitsKO v" for v]) + apply (simp add: objBits_simps') + apply (drule_tac a=x in equals0D) + apply (simp add: dom_def objBits_simps' mask_def field_simps) + apply (erule mp) + apply (rule ccontr, simp add: linorder_not_le) + apply (drule word_le_minus_one_leq, simp) + apply (case_tac "tcb_cte_cases (x - p)") + apply (simp split del: if_split cong: if_cong option.case_cong) + apply (rule FalseE) + apply (subst(asm) mask_in_range[where bits="objBitsKO v" for v]) + apply (simp add: objBitsKO_def) + apply (subgoal_tac "x - p < 2 ^ tcbBlockSizeBits") + apply (frule word_le_minus_one_leq) + apply (frule(1) is_aligned_no_wrap') + apply (drule word_plus_mono_right[where x=p]) + apply (simp only: field_simps) + apply (erule is_aligned_no_overflow) + apply (simp add: objBits_simps' field_simps) + apply (clarsimp simp: tcb_cte_cases_def objBits_simps' mask_def field_simps + split: if_split_asm) + apply (subst mask_in_range, assumption) + apply (simp only: atLeastAtMost_iff order_refl simp_thms) + apply (erule is_aligned_no_overflow) + done + +lemma map_to_ctes_upd_other: + "\ s p = Some ko; case ko of KOTCB tcb \ False | KOCTE cte \ False | _ \ True; + case ko' of KOTCB tcb \ False | KOCTE cte \ False | _ \ True \ \ + map_to_ctes (s (p \ ko')) = (map_to_ctes s)" + apply (rule ext) + apply (simp add: map_to_ctes_def Let_def dom_fun_upd2 + split del: if_split del: dom_fun_upd + cong: if_cong) + apply (rule if_cong) + apply clarsimp + apply fastforce + apply clarsimp + apply (rule if_cong) + apply clarsimp + apply fastforce + apply clarsimp + apply (rule refl) + done + +lemma ctes_of_eq_cte_wp_at': + "cte_wp_at' ((=) cte) x s \ ctes_of s x = Some cte" + by (simp add: cte_wp_at_ctes_of) + +lemma tcb_cte_cases_change: + "tcb_cte_cases x = Some (getF, setF) \ + (\getF. (\setF. tcb_cte_cases y = Some (getF, setF)) \ getF (setF f tcb) \ getF tcb) + = (x = y \ f (getF tcb) \ getF tcb)" + apply (rule iffI) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + done + +lemma cte_level_bits_nonzero [simp]: "0 < cte_level_bits" + by (simp add: cte_level_bits_def) + +lemma ctes_of_setObject_cte: + "\\s. P ((ctes_of s) (p \ cte))\ setObject p (cte :: cte) \\rv s. P (ctes_of s)\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim exE conjE disjE rsubst[where P=P]) + apply (clarsimp simp: lookupAround2_char1) + apply (subst map_to_ctes_upd_tcb; assumption?) + apply (clarsimp simp: mask_def objBits_defs field_simps ps_clear_def3) + apply (clarsimp simp: tcb_cte_cases_change) + apply (rule ext, clarsimp) + apply (intro conjI impI) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + apply (drule(1) cte_wp_at_tcbI'[where P="(=) cte"]) + apply (simp add: ps_clear_def3 field_simps) + apply assumption+ + apply (simp add: cte_wp_at_ctes_of) + apply (clarsimp simp: map_to_ctes_upd_cte ps_clear_def3 field_simps mask_def) + done + +declare foldl_True[simp] + +lemma real_cte_at': + "real_cte_at' p s \ cte_at' p s" + by (clarsimp simp add: cte_wp_at_cases' obj_at'_def objBits_simps' cte_level_bits_def + del: disjCI) + +lemma no_fail_getEndpoint [wp]: + "no_fail (ep_at' ptr) (getEndpoint ptr)" + apply (simp add: getEndpoint_def getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps' lookupAround2_known1) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (clarsimp split: option.split_asm simp: objBits_simps') + done + +lemma getEndpoint_corres: + "corres ep_relation (ep_at ptr) (ep_at' ptr) + (get_endpoint ptr) (getEndpoint ptr)" + apply (rule corres_no_failI) + apply wp + apply (simp add: get_simple_ko_def getEndpoint_def get_object_def + getObject_def bind_assoc ep_at_def2) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def return_def) + apply (clarsimp simp: assert_def fail_def obj_at_def return_def is_ep partial_inv_def) + apply (clarsimp simp: loadObject_default_def in_monad in_magnitude_check objBits_simps') + apply (clarsimp simp add: state_relation_def pspace_relation_def) + apply (drule bspec) + apply blast + apply (simp add: other_obj_relation_def) + done + +declare magnitudeCheck_inv [wp] + +declare alignCheck_inv [wp] + +lemma setObject_ct_inv: + "\\s. P (ksCurThread s)\ setObject t (v::tcb) \\rv s. P (ksCurThread s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_cd_inv: + "\\s. P (ksCurDomain s)\ setObject t (v::tcb) \\rv s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_it_inv: +"\\s. P (ksIdleThread s)\ setObject t (v::tcb) \\rv s. P (ksIdleThread s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ksDomSchedule_inv: + "\\s. P (ksDomSchedule s)\ setObject t (v::tcb) \\rv s. P (ksDomSchedule s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma projectKO_def2: + "projectKO ko = assert_opt (projectKO_opt ko)" + by (simp add: projectKO_def assert_opt_def) + +lemma no_fail_magnitudeCheck[wp]: + "no_fail (\s. case y of None \ True | Some z \ 2 ^ n \ z - x) + (magnitudeCheck x y n)" + apply (clarsimp simp add: magnitudeCheck_def split: option.splits) + apply (rule no_fail_pre, wp) + apply simp + done + +lemma no_fail_setObject_other [wp]: + fixes ob :: "'a :: pspace_storable" + assumes x: "updateObject ob = updateObject_default ob" + shows "no_fail (obj_at' (\k::'a. objBits k = objBits ob) ptr) + (setObject ptr ob)" + apply (simp add: setObject_def x split_def updateObject_default_def + projectKO_def2 alignCheck_def alignError_def) + apply (rule no_fail_pre) + apply (wp ) + apply (clarsimp simp: is_aligned_mask[symmetric] obj_at'_def + objBits_def[symmetric] project_inject lookupAround2_known1) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_get_word_bits) + apply (subst add_diff_eq[symmetric]) + apply (erule is_aligned_no_wrap') + apply simp + apply simp + apply fastforce + done + +lemma obj_relation_cut_same_type: + "\ (y, P) \ obj_relation_cuts ko x; P ko z; + (y', P') \ obj_relation_cuts ko' x'; P' ko' z \ + \ (a_type ko = a_type ko') + \ (\n n'. a_type ko = ACapTable n \ a_type ko' = ACapTable n') + \ (\sz sz'. a_type ko = AArch (AUserData sz) \ a_type ko' = AArch (AUserData sz')) + \ (\sz sz'. a_type ko = AArch (ADeviceData sz) \ a_type ko' = AArch (ADeviceData sz')) + \ (\pt_t pt_t'. a_type ko = AArch (APageTable pt_t) \ a_type ko' = AArch (APageTable pt_t'))" + apply (rule ccontr) + apply (simp add: obj_relation_cuts_def2 a_type_def) + apply (auto simp: other_obj_relation_def tcb_relation_cut_def cte_relation_def pte_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + Structures_H.kernel_object.split_asm + arch_kernel_obj.split_asm arch_kernel_object.split_asm) + done + +definition exst_same :: "Structures_H.tcb \ Structures_H.tcb \ bool" +where + "exst_same tcb tcb' \ tcbPriority tcb = tcbPriority tcb' + \ tcbTimeSlice tcb = tcbTimeSlice tcb' + \ tcbDomain tcb = tcbDomain tcb'" + +fun exst_same' :: "Structures_H.kernel_object \ Structures_H.kernel_object \ bool" +where + "exst_same' (KOTCB tcb) (KOTCB tcb') = exst_same tcb tcb'" | + "exst_same' _ _ = True" + +lemma tcbs_of'_non_tcb_update: + "\typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ TCBT\ + \ tcbs_of' (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = tcbs_of' s'" + by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs + split: kernel_object.splits) + +lemma typ_at'_koTypeOf: + "ko_at' ob' ptr b \ typ_at' (koTypeOf (injectKO ob')) ptr b" + by (auto simp: typ_at'_def ko_wp_at'_def obj_at'_def project_inject) + +lemma setObject_other_corres: + fixes ob' :: "'a :: pspace_storable" + assumes x: "updateObject ob' = updateObject_default ob'" + assumes z: "\s. obj_at' P ptr s + \ map_to_ctes ((ksPSpace s) (ptr \ injectKO ob')) = map_to_ctes (ksPSpace s)" + assumes t: "is_other_obj_relation_type (a_type ob)" + assumes b: "\ko. P ko \ objBits ko = objBits ob'" + assumes e: "\ko. P ko \ exst_same' (injectKO ko) (injectKO ob')" + assumes P: "\v::'a::pspace_storable. (1 :: machine_word) < 2 ^ objBits v" + shows "other_obj_relation ob (injectKO (ob' :: 'a :: pspace_storable)) \ + corres dc (obj_at (\ko. a_type ko = a_type ob) ptr and obj_at (same_caps ob) ptr) + (obj_at' (P :: 'a \ bool) ptr) + (set_object ptr ob) (setObject ptr ob')" + supply image_cong_simp [cong del] projectKOs[simp del] + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (rule x) + apply (clarsimp simp: b elim!: obj_at'_weakenE) + apply (unfold set_object_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def x + projectKOs obj_at_def + updateObject_default_def in_magnitude_check [OF _ P]) + apply (rename_tac ko) + apply (clarsimp simp add: state_relation_def z) + apply (clarsimp simp add: caps_of_state_after_update cte_wp_at_after_update + swp_def fun_upd_def obj_at_def) + apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _ _" \ -\) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply (clarsimp simp: obj_at_def a_type_def + split: Structures_A.kernel_object.splits if_split_asm) + apply (simp split: arch_kernel_obj.splits if_splits) + apply (fold fun_upd_def) + apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply (prop_tac "typ_at' (koTypeOf (injectKO ob')) ptr b") + subgoal + by (clarsimp simp: typ_at'_def ko_wp_at'_def obj_at'_def projectKO_opts_defs + is_other_obj_relation_type_def a_type_def other_obj_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm kernel_object.split_asm + arch_kernel_object.split_asm) + apply clarsimp + apply (rule conjI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: is_other_obj_relation_type t) + apply (drule(1) bspec) + apply clarsimp + apply (frule_tac ko'=ko and x'=ptr in obj_relation_cut_same_type, + (fastforce simp add: is_other_obj_relation_type t)+) + apply (insert t) + apply ((erule disjE + | clarsimp simp: is_other_obj_relation_type is_other_obj_relation_type_def a_type_def)+)[1] + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (insert e) + apply atomize + apply (clarsimp simp: obj_at'_def) + apply (erule_tac x=obj in allE) + apply (clarsimp simp: projectKO_eq project_inject) + apply (case_tac ob; + simp_all add: a_type_def other_obj_relation_def etcb_relation_def + is_other_obj_relation_type t exst_same_def)[1] + apply (clarsimp simp: is_other_obj_relation_type t exst_same_def + split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits + arch_kernel_obj.splits)+ + \ \ready_queues_relation\ + apply (prop_tac "koTypeOf (injectKO ob') \ TCBT") + subgoal + by (clarsimp simp: other_obj_relation_def; cases ob; cases "injectKO ob'"; + simp split: arch_kernel_obj.split_asm) + by (fastforce dest: tcbs_of'_non_tcb_update) + +lemmas obj_at_simps = obj_at_def obj_at'_def map_to_ctes_upd_other + is_other_obj_relation_type_def + a_type_def objBits_simps other_obj_relation_def pageBits_def + +lemma setEndpoint_corres: + "ep_relation e e' \ + corres dc (ep_at ptr) (ep_at' ptr) + (set_endpoint ptr e) (setEndpoint ptr e')" + apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ + by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) + +lemma setNotification_corres: + "ntfn_relation ae ae' \ + corres dc (ntfn_at ptr) (ntfn_at' ptr) + (set_notification ptr ae) (setNotification ptr ae')" + apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ + by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) + +lemma no_fail_getNotification [wp]: + "no_fail (ntfn_at' ptr) (getNotification ptr)" + apply (simp add: getNotification_def getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps' lookupAround2_known1) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (clarsimp split: option.split_asm simp: objBits_simps') + done + +lemma getNotification_corres: + "corres ntfn_relation (ntfn_at ptr) (ntfn_at' ptr) + (get_notification ptr) (getNotification ptr)" + apply (rule corres_no_failI) + apply wp + apply (simp add: get_simple_ko_def getNotification_def get_object_def + getObject_def bind_assoc) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def return_def) + apply (clarsimp simp: assert_def fail_def obj_at_def return_def is_ntfn partial_inv_def) + apply (clarsimp simp: loadObject_default_def in_monad in_magnitude_check objBits_simps') + apply (clarsimp simp add: state_relation_def pspace_relation_def) + apply (drule bspec) + apply blast + apply (simp add: other_obj_relation_def) + done + +lemma setObject_ko_wp_at: + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s y n. (updateObject v ko p y n s) + = (updateObject_default v ko p y n s)" + assumes n: "\v' :: 'a. objBits v' = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + shows "\\s. obj_at' (\x :: 'a. True) p s \ + P (ko_wp_at' (if p = p' then K (P' (injectKO v)) else P')p' s)\ + setObject p v + \\rv s. P (ko_wp_at' P' p' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad + ko_wp_at'_def split_def + R updateObject_default_def + obj_at'_real_def + split del: if_split) + apply (clarsimp simp: project_inject objBits_def[symmetric] n + in_magnitude_check [OF _ m] + elim!: rsubst[where P=P] + split del: if_split) + apply (rule iffI) + apply (clarsimp simp: n ps_clear_upd objBits_def[symmetric] + split: if_split_asm) + apply (clarsimp simp: n project_inject objBits_def[symmetric] + ps_clear_upd + split: if_split_asm) + done + +lemma typ_at'_valid_obj'_lift: + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + notes [wp] = hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_const_Ball_lift typ_at_lifts [OF P] + shows "\\s. valid_obj' obj s\ f \\rv s. valid_obj' obj s\" + apply (cases obj; simp add: valid_obj'_def hoare_TrueI) + apply (rename_tac endpoint) + apply (case_tac endpoint; simp add: valid_ep'_def, wp) + apply (rename_tac notification) + apply (case_tac "ntfnObj notification"; + simp add: valid_ntfn'_def split: option.splits, + (wpsimp|rule conjI)+) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; + simp add: valid_tcb'_def valid_tcb_state'_def split_def opt_tcb_at'_def + valid_bound_ntfn'_def; + wpsimp wp: hoare_case_option_wp hoare_case_option_wp2; + (clarsimp split: option.splits)?) + apply (wpsimp simp: valid_cte'_def) + apply wp + done + +lemmas setObject_valid_obj = typ_at'_valid_obj'_lift [OF setObject_typ_at'] + +lemma setObject_valid_objs': + assumes x: "\x n ko s ko' s'. + \ (ko', s') \ fst (updateObject val ko ptr x n s); P s; + valid_obj' ko s; lookupAround2 ptr (ksPSpace s) = (Some (x, ko), n) \ + \ valid_obj' ko' s" + shows "\valid_objs' and P\ setObject ptr val \\rv. valid_objs'\" + apply (clarsimp simp: valid_def) + apply (subgoal_tac "\ko. valid_obj' ko s \ valid_obj' ko b") + defer + apply clarsimp + apply (erule(1) use_valid [OF _ setObject_valid_obj]) + apply (clarsimp simp: setObject_def split_def in_monad + lookupAround2_char1) + apply (simp add: valid_objs'_def) + apply clarsimp + apply (drule spec, erule mp) + apply (drule(1) x) + apply (simp add: ranI) + apply (simp add: prod_eqI lookupAround2_char1) + apply (clarsimp elim!: ranE split: if_split_asm simp: ranI) + done + +lemma setObject_iflive': + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s x y n. (updateObject v ko ptr y n s) + = (updateObject_default v ko ptr y n s)" + assumes n: "\x :: 'a. objBits x = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + shows "\\s. if_live_then_nonz_cap' s \ (live' (injectKO v) \ ex_nonz_cap_to' ptr s) \ P s\ + setObject ptr v + \\rv s. if_live_then_nonz_cap' s\" + unfolding if_live_then_nonz_cap'_def ex_nonz_cap_to'_def + apply (rule hoare_pre) + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (rule setObject_ko_wp_at [OF R n m]) + apply (rule hoare_vcg_ex_lift) + apply (rule setObject_cte_wp_at'[where Q = P, OF x y]) + apply assumption+ + apply clarsimp + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma setObject_qs[wp]: + assumes x: "\q n obj. \\s. P (ksReadyQueues s)\ updateObject v obj p q n \\rv s. P (ksReadyQueues s)\" + shows "\\s. P (ksReadyQueues s)\ setObject p v \\rv s. P (ksReadyQueues s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma setObject_qsL1[wp]: + assumes x: "\q n obj. \\s. P (ksReadyQueuesL1Bitmap s)\ updateObject v obj p q n \\rv s. P (ksReadyQueuesL1Bitmap s)\" + shows "\\s. P (ksReadyQueuesL1Bitmap s)\ setObject p v \\rv s. P (ksReadyQueuesL1Bitmap s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma setObject_qsL2[wp]: + assumes x: "\q n obj. \\s. P (ksReadyQueuesL2Bitmap s)\ updateObject v obj p q n \\rv s. P (ksReadyQueuesL2Bitmap s)\" + shows "\\s. P (ksReadyQueuesL2Bitmap s)\ setObject p v \\rv s. P (ksReadyQueuesL2Bitmap s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma setObject_ifunsafe': + fixes v :: "'a :: pspace_storable" + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + assumes z: "\P. \\s. P (intStateIRQNode (ksInterruptState s))\ + setObject ptr v \\rv s. P (intStateIRQNode (ksInterruptState s))\" + shows "\\s. if_unsafe_then_cap' s \ P s\ + setObject ptr v + \\rv s. if_unsafe_then_cap' s\" + apply (simp only: if_unsafe_then_cap'_def ex_cte_cap_to'_def + cte_wp_at_ctes_of) + apply (rule hoare_use_eq_irq_node' [OF z]) + apply (rule setObject_ctes_of [OF x y], assumption+) + done + +lemma setObject_it[wp]: + assumes x: "\p q n ko. \\s. P (ksIdleThread s)\ updateObject val p q n ko \\rv s. P (ksIdleThread s)\" + shows "\\s. P (ksIdleThread s)\ setObject t val \\rv s. P (ksIdleThread s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +\\ + `idle_tcb_ps val` asserts that `val` is a pspace_storable value + which corresponds to an idle TCB. +\ +definition idle_tcb_ps :: "('a :: pspace_storable) \ bool" where + "idle_tcb_ps val \ (\tcb. projectKO_opt (injectKO val) = Some tcb \ idle_tcb' tcb)" + +lemma setObject_idle': + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s y n. (updateObject v ko ptr y n s) + = (updateObject_default v ko ptr y n s)" + assumes n: "\x :: 'a. objBits x = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + assumes z: "\P p q n ko. + \\s. P (ksIdleThread s)\ updateObject v p q n ko + \\rv s. P (ksIdleThread s)\" + shows "\\s. valid_idle' s \ + (ptr = ksIdleThread s + \ (\val :: 'a. idle_tcb_ps val) + \ idle_tcb_ps v)\ + setObject ptr v + \\rv s. valid_idle' s\" + apply (simp add: valid_idle'_def pred_tcb_at'_def o_def) + apply (rule hoare_pre) + apply (rule hoare_lift_Pf2 [where f="ksIdleThread"]) + apply (simp add: pred_tcb_at'_def obj_at'_real_def) + apply (rule setObject_ko_wp_at [OF R n m]) + apply (wp z) + apply (clarsimp simp add: pred_tcb_at'_def obj_at'_real_def ko_wp_at'_def idle_tcb_ps_def) + apply (clarsimp simp add: project_inject) + done + +lemma setObject_no_0_obj' [wp]: + "\no_0_obj'\ setObject p v \\r. no_0_obj'\" + apply (clarsimp simp: setObject_def split_def) + apply (clarsimp simp: valid_def no_0_obj'_def ko_wp_at'_def in_monad + lookupAround2_char1 ps_clear_upd) + done + +lemma valid_updateCapDataI: + "s \' c \ s \' updateCapData b x c" + apply (unfold updateCapData_def Let_def AARCH64_H.updateCapData_def) + apply (cases c) + apply (simp_all add: isCap_defs valid_cap'_def capUntypedPtr_def isCap_simps + capAligned_def word_size word_bits_def word_bw_assocs + split: arch_capability.splits capability.splits) + done + +lemma no_fail_threadGet [wp]: + "no_fail (tcb_at' t) (threadGet f t)" + by (simp add: threadGet_def, wp) + +lemma no_fail_getThreadState [wp]: + "no_fail (tcb_at' t) (getThreadState t)" + by (simp add: getThreadState_def, wp) + +lemma no_fail_setObject_tcb [wp]: + "no_fail (tcb_at' t) (setObject t (t'::tcb))" + apply (rule no_fail_pre, wp) + apply (rule ext)+ + apply simp + apply (simp add: objBits_simps) + done + +lemma no_fail_threadSet [wp]: + "no_fail (tcb_at' t) (threadSet f t)" + apply (simp add: threadSet_def) + apply (rule no_fail_pre, wp) + apply simp + done + +lemma dmo_return' [simp]: + "doMachineOp (return x) = return x" + apply (simp add: doMachineOp_def select_f_def return_def gets_def get_def + bind_def modify_def put_def) + done + +lemma dmo_storeWordVM' [simp]: + "doMachineOp (storeWordVM x y) = return ()" + by (simp add: storeWordVM_def) + +declare mapM_x_return [simp] + +lemma no_fail_dmo' [wp]: + "no_fail P f \ no_fail (P o ksMachineState) (doMachineOp f)" + apply (simp add: doMachineOp_def split_def) + apply (rule no_fail_pre, wp) + apply simp + apply (simp add: no_fail_def) + done + +lemma setEndpoint_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + setEndpoint val ptr + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setEndpoint_def) + apply (rule setObject_nosch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma setNotification_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + setNotification val ptr + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setNotification_def) + apply (rule setObject_nosch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma set_ep_valid_objs': + "\valid_objs' and valid_ep' ep\ + setEndpoint epptr ep + \\r s. valid_objs' s\" + apply (simp add: setEndpoint_def) + apply (rule setObject_valid_objs') + apply (clarsimp simp: updateObject_default_def in_monad valid_obj'_def) + done + +lemma set_ep_ctes_of[wp]: + "\\s. P (ctes_of s)\ setEndpoint p val \\rv s. P (ctes_of s)\" + apply (simp add: setEndpoint_def) + apply (rule setObject_ctes_of[where Q="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def bind_def) + done + +lemma set_ep_valid_mdb' [wp]: + "\valid_mdb'\ + setObject epptr (ep::endpoint) + \\_. valid_mdb'\" + apply (simp add: valid_mdb'_def) + apply (rule set_ep_ctes_of[simplified setEndpoint_def]) + done + +lemma setEndpoint_valid_mdb': + "\valid_mdb'\ setEndpoint p v \\rv. valid_mdb'\" + unfolding setEndpoint_def + by (rule set_ep_valid_mdb') + +crunches setEndpoint, setNotification + for pspace_canonoical'[wp]: pspace_canonical' + +lemma set_ep_valid_pspace'[wp]: + "\valid_pspace' and valid_ep' ep\ + setEndpoint epptr ep + \\r. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp set_ep_aligned' [simplified] set_ep_valid_objs') + apply (wp hoare_vcg_conj_lift) + apply (simp add: setEndpoint_def) + apply (wp setEndpoint_valid_mdb')+ + apply auto + done + +lemma set_ep_valid_bitmapQ[wp]: + "\Invariants_H.valid_bitmapQ\ setEndpoint epptr ep \\rv. Invariants_H.valid_bitmapQ\" + apply (unfold setEndpoint_def) + apply (rule setObject_ep_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_ep_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ setEndpoint epptr ep \\rv. bitmapQ_no_L1_orphans \" + apply (unfold setEndpoint_def) + apply (rule setObject_ep_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_ep_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ setEndpoint epptr ep \\rv. bitmapQ_no_L2_orphans \" + apply (unfold setEndpoint_def) + apply (rule setObject_ep_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma ct_in_state_thread_state_lift': + assumes ct: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes st: "\t. \st_tcb_at' P t\ f \\_. st_tcb_at' P t\" + shows "\ct_in_state' P\ f \\_. ct_in_state' P\" + apply (clarsimp simp: ct_in_state'_def) + apply (clarsimp simp: valid_def) + apply (frule (1) use_valid [OF _ ct]) + apply (drule (1) use_valid [OF _ st], assumption) + done + +lemma sch_act_wf_lift: + assumes tcb: "\P t. \st_tcb_at' P t\ f \\rv. st_tcb_at' P t\" + assumes tcb_cd: "\P t. \ tcb_in_cur_domain' t\ f \\_ . tcb_in_cur_domain' t \" + assumes kCT: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes ksA: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + shows + "\\s. sch_act_wf (ksSchedulerAction s) s\ + f + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (frule (1) use_valid [OF _ ksA]) + apply (case_tac "ksSchedulerAction b", simp_all) + apply (drule (2) use_valid [OF _ ct_in_state_thread_state_lift' [OF kCT tcb]]) + apply (clarsimp) + apply (rule conjI) + apply (drule (2) use_valid [OF _ tcb]) + apply (drule (2) use_valid [OF _ tcb_cd]) + done + +lemma tcb_in_cur_domain'_lift: + assumes a: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes b: "\x. \obj_at' (\tcb. x = tcbDomain tcb) t\ f \\_. obj_at' (\tcb. x = tcbDomain tcb) t\" + shows "\ tcb_in_cur_domain' t \ f \ \_. tcb_in_cur_domain' t \" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (rule b) + apply (rule a) + done + +lemma ct_idle_or_in_cur_domain'_lift: + assumes a: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes b: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + assumes c: "\P. \\s. P (ksIdleThread s)\ f \\_ s. P (ksIdleThread s)\" + assumes d: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes e: "\d a t t'. \\s. t = t' \ obj_at' (\tcb. d = tcbDomain tcb) t s\ + f + \\_ s. t = t' \ obj_at' (\tcb. d = tcbDomain tcb) t s\" + shows "\ ct_idle_or_in_cur_domain' \ f \ \_. ct_idle_or_in_cur_domain' \" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (rule_tac f="ksCurThread" in hoare_lift_Pf) + apply (rule_tac f="ksIdleThread" in hoare_lift_Pf) + apply (rule_tac f="ksSchedulerAction" in hoare_lift_Pf) + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp hoare_vcg_imp_lift) + apply (rule e) + apply simp + apply (rule a) + apply (rule b) + apply (rule c) + apply (rule d) + done + + +lemma setObject_ep_obj_at'_tcb[wp]: + "\obj_at' (P :: tcb \ bool) t \ setObject ptr (e::endpoint) \\_. obj_at' (P :: tcb \ bool) t\" + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setObject_ep_cur_domain[wp]: + "\\s. P (ksCurDomain s)\ setObject ptr (e::endpoint) \\_ s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ setEndpoint epptr ep \\_. tcb_in_cur_domain' t\" + apply (clarsimp simp: setEndpoint_def) + apply (rule tcb_in_cur_domain'_lift; wp) + done + +lemma setEndpoint_obj_at'_tcb[wp]: + "\obj_at' (P :: tcb \ bool) t \ setEndpoint ptr (e::endpoint) \\_. obj_at' (P :: tcb \ bool) t\" + by (clarsimp simp: setEndpoint_def, wp) + +lemma set_ep_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setEndpoint epptr ep + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift) + apply (simp add: setEndpoint_def split_def setObject_def + | wp updateObject_default_inv)+ + done + +lemma setObject_state_refs_of': + assumes x: "updateObject val = updateObject_default val" + assumes y: "(1 :: machine_word) < 2 ^ objBits val" + shows + "\\s. P ((state_refs_of' s) (ptr := refs_of' (injectKO val)))\ + setObject ptr val + \\rv s. P (state_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def x in_magnitude_check y + elim!: rsubst[where P=P] intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (clarsimp simp: state_refs_of'_def objBits_def[symmetric] + ps_clear_upd + cong: if_cong option.case_cong) + done + +lemma setObject_state_refs_of_eq: + assumes x: "\s s' obj obj' ptr' ptr''. + (obj', s') \ fst (updateObject val obj ptr ptr' ptr'' s) + \ refs_of' obj' = refs_of' obj" + shows + "\\s. P (state_refs_of' s)\ + setObject ptr val + \\rv s. P (state_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def in_magnitude_check lookupAround2_char1 + elim!: rsubst[where P=P] + intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (frule x, drule updateObject_objBitsKO) + apply (simp add: state_refs_of'_def ps_clear_upd + cong: option.case_cong if_cong) + done + +lemma set_ep_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (epptr := ep_q_refs_of' ep))\ + setEndpoint epptr ep + \\rv s. P (state_refs_of' s)\" + unfolding setEndpoint_def + by (wp setObject_state_refs_of', + simp_all add: objBits_simps' fun_upd_def[symmetric]) + +lemma setObject_state_hyp_refs_of': + assumes x: "updateObject val = updateObject_default val" + assumes y: "(1 :: machine_word) < 2 ^ objBits val" + shows + "\\s. P ((state_hyp_refs_of' s) (ptr := hyp_refs_of' (injectKO val)))\ + setObject ptr val + \\rv s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def x in_magnitude_check y + elim!: rsubst[where P=P] intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (clarsimp simp: state_hyp_refs_of'_def objBits_def[symmetric] + ps_clear_upd + cong: if_cong option.case_cong) + done + +lemma setObject_state_hyp_refs_of_eq: + assumes x: "\s s' obj obj' ptr' ptr''. + (obj', s') \ fst (updateObject val obj ptr ptr' ptr'' s) + \ hyp_refs_of' obj' = hyp_refs_of' obj" + shows + "\\s. P (state_hyp_refs_of' s)\ + setObject ptr val + \\rv s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def in_magnitude_check + lookupAround2_char1 + elim!: rsubst[where P=P] intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (frule x, drule updateObject_objBitsKO) + apply (simp add: state_hyp_refs_of'_def ps_clear_upd + cong: option.case_cong if_cong) + done + +lemma state_hyp_refs_of'_ep: + "ep_at' epptr s \ (state_hyp_refs_of' s)(epptr := {}) = state_hyp_refs_of' s" + by (rule ext) (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + +lemma setObject_gen_obj_at: + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s y n. updateObject v ko p y n s = updateObject_default v ko p y n s" + assumes n: "\v' :: 'a. objBits v' = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + assumes o: "\\s. obj_at' (\x :: 'a. True) p s \ P s\ setObject p v \Q\" + shows "\P\ setObject p v \Q\" + using o + apply (clarsimp simp: setObject_def valid_def in_monad split_def split_paired_Ball + R updateObject_default_def project_inject objBits_def[symmetric] n + in_magnitude_check [OF _ m]) + apply (erule allE, erule impE) + apply (fastforce simp: obj_at'_def objBits_def[symmetric] n project_inject) + apply (auto simp: project_inject objBits_def[symmetric] n in_magnitude_check [OF _ m]) + done + +lemma set_ep_state_hyp_refs_of'[wp]: + "setEndpoint epptr ep \\s. P (state_hyp_refs_of' s)\" + unfolding setEndpoint_def + apply (rule setObject_gen_obj_at, simp, simp add: objBits_simps', simp) + apply (wp setObject_state_hyp_refs_of'; simp add: objBits_simps' state_hyp_refs_of'_ep) + done + +lemma set_ntfn_ctes_of[wp]: + "\\s. P (ctes_of s)\ setNotification p val \\rv s. P (ctes_of s)\" + apply (simp add: setNotification_def) + apply (rule setObject_ctes_of[where Q="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def bind_def) + done + +lemma set_ntfn_valid_mdb' [wp]: + "\valid_mdb'\ + setObject epptr (ntfn::Structures_H.notification) + \\_. valid_mdb'\" + apply (simp add: valid_mdb'_def) + apply (rule set_ntfn_ctes_of[simplified setNotification_def]) + done + +lemma set_ntfn_valid_objs': + "\valid_objs' and valid_ntfn' ntfn\ + setNotification p ntfn + \\r s. valid_objs' s\" + apply (simp add: setNotification_def) + apply (rule setObject_valid_objs') + apply (clarsimp simp: updateObject_default_def in_monad + valid_obj'_def) + done + +lemma set_ntfn_valid_pspace'[wp]: + "\valid_pspace' and valid_ntfn' ntfn\ + setNotification p ntfn + \\r. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp set_ntfn_aligned' [simplified] set_ntfn_valid_objs') + apply (simp add: setNotification_def,wp) + apply auto + done + +lemma set_ntfn_valid_bitmapQ[wp]: + "\Invariants_H.valid_bitmapQ\ setNotification p ntfn \\rv. Invariants_H.valid_bitmapQ\" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ + done + +lemma set_ntfn_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ setNotification p ntfn \\rv. bitmapQ_no_L1_orphans \" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ + done + +lemma set_ntfn_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ setNotification p ntfn \\rv. bitmapQ_no_L2_orphans \" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ + done + +lemma set_ntfn_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (epptr := ntfn_q_refs_of' (ntfnObj ntfn) + \ ntfn_bound_refs' (ntfnBoundTCB ntfn)))\ + setNotification epptr ntfn + \\rv s. P (state_refs_of' s)\" + unfolding setNotification_def + by (wp setObject_state_refs_of', + simp_all add: objBits_simps' fun_upd_def) + +lemma state_hyp_refs_of'_ntfn: + "ntfn_at' ntfn s \ (state_hyp_refs_of' s) (ntfn := {}) = state_hyp_refs_of' s" + by (rule ext) (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + +lemma set_ntfn_state_hyp_refs_of'[wp]: + "setNotification epptr ntfn \\s. P (state_hyp_refs_of' s)\" + unfolding setNotification_def + apply (rule setObject_gen_obj_at, simp, simp add: objBits_simps', simp) + apply (wp setObject_state_hyp_refs_of'; simp add: objBits_simps' state_hyp_refs_of'_ntfn) + done + +lemma setNotification_pred_tcb_at'[wp]: + "\pred_tcb_at' proj P t\ setNotification ptr val \\rv. pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def setNotification_def) + apply (rule obj_at_setObject2) + apply simp + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setObject_ntfn_cur_domain[wp]: + "\ \s. P (ksCurDomain s) \ setObject ptr (ntfn::Structures_H.notification) \ \_s . P (ksCurDomain s) \" + apply (clarsimp simp: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ntfn_obj_at'_tcb[wp]: + "\obj_at' (P :: tcb \ bool) t \ setObject ptr (ntfn::Structures_H.notification) \\_. obj_at' (P :: tcb \ bool) t\" + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setNotification_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ setNotification ptr (ntfn::Structures_H.notification) \ \_s . P (ksCurDomain s) \" + apply (simp add: setNotification_def) + apply wp + done + +lemma setNotification_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ setNotification epptr ep \\_. tcb_in_cur_domain' t\" + apply (clarsimp simp: setNotification_def) + apply (rule tcb_in_cur_domain'_lift; wp) + done + +lemma set_ntfn_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setNotification ntfnptr ntfn + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift | clarsimp simp: setNotification_def)+ + apply (simp add: setNotification_def split_def setObject_def + | wp updateObject_default_inv)+ + done + +lemmas cur_tcb_lift = + hoare_lift_Pf [where f = ksCurThread and P = tcb_at', folded cur_tcb'_def] + +lemma set_ntfn_cur_tcb'[wp]: + "\cur_tcb'\ setNotification ptr ntfn \\rv. cur_tcb'\" + apply (wp cur_tcb_lift) + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ setEndpoint ptr val \\rv s. P (typ_at' T p s)\" + unfolding setEndpoint_def + by (rule setObject_typ_at') + +lemmas setEndpoint_typ_ats[wp] = typ_at_lifts [OF setEndpoint_typ_at'] + +lemma get_ep_sp': + "\P\ getEndpoint r \\t. P and ko_at' t r\" + by (clarsimp simp: getEndpoint_def getObject_def loadObject_default_def + in_monad valid_def obj_at'_def objBits_simps' in_magnitude_check split_def) + +lemma setEndpoint_cur_tcb'[wp]: + "\cur_tcb'\ setEndpoint p v \\rv. cur_tcb'\" + apply (wp cur_tcb_lift) + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (v \ IdleEP \ ex_nonz_cap_to' p s)\ + setEndpoint p v + \\rv. if_live_then_nonz_cap'\" + unfolding setEndpoint_def + apply (wp setObject_iflive'[where P="\"]) + apply simp + apply (simp add: objBits_simps') + apply simp + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply (clarsimp simp: live'_def) + done + +declare setEndpoint_cte_wp_at'[wp] + +lemma ex_nonz_cap_to_pres': + assumes y: "\P p. \cte_wp_at' P p\ f \\rv. cte_wp_at' P p\" + shows "\ex_nonz_cap_to' p\ f \\rv. ex_nonz_cap_to' p\" + apply (simp only: ex_nonz_cap_to'_def) + apply (intro hoare_vcg_disj_lift hoare_vcg_ex_lift + y hoare_vcg_all_lift) + done + +lemma setEndpoint_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setEndpoint p' v \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma setEndpoint_ifunsafe'[wp]: + "\if_unsafe_then_cap'\ setEndpoint p v \\rv. if_unsafe_then_cap'\" + unfolding setEndpoint_def + apply (rule setObject_ifunsafe'[where P="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad + intro!: equals0I)+ + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_idle'[wp]: + "\\s. valid_idle' s\ + setEndpoint p v + \\_. valid_idle'\" + unfolding setEndpoint_def + apply (wp setObject_idle') + apply (simp add: objBits_simps' updateObject_default_inv idle_tcb_ps_def)+ + done + +crunch it[wp]: setEndpoint "\s. P (ksIdleThread s)" + (simp: updateObject_default_inv) + +lemma setObject_ksPSpace_only: + "\ \p q n ko. \P\ updateObject val p q n ko \\rv. P \; + \f s. P (ksPSpace_update f s) = P s \ + \ \P\ setObject ptr val \\rv. P\" + apply (simp add: setObject_def split_def) + apply (wp | simp | assumption)+ + done + +lemma setObject_ksMachine: + "\ \p q n ko. \\s. P (ksMachineState s)\ updateObject val p q n ko \\rv s. P (ksMachineState s)\ \ + \ \\s. P (ksMachineState s)\ setObject ptr val \\rv s. P (ksMachineState s)\" + by (simp add: setObject_ksPSpace_only) + +lemma setObject_ksInterrupt: + "\ \p q n ko. \\s. P (ksInterruptState s)\ updateObject val p q n ko \\rv s. P (ksInterruptState s)\ \ + \ \\s. P (ksInterruptState s)\ setObject ptr val \\rv s. P (ksInterruptState s)\" + by (simp add: setObject_ksPSpace_only) + +lemma valid_irq_handlers_lift': + assumes x: "\P. \\s. P (cteCaps_of s)\ f \\rv s. P (cteCaps_of s)\" + assumes y: "\P. \\s. P (ksInterruptState s)\ f \\rv s. P (ksInterruptState s)\" + shows "\valid_irq_handlers'\ f \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_use_eq [where f=cteCaps_of, OF x y]) + done + +lemmas valid_irq_handlers_lift'' = valid_irq_handlers_lift' [unfolded cteCaps_of_def] + +crunch ksInterruptState[wp]: setEndpoint "\s. P (ksInterruptState s)" + (wp: setObject_ksInterrupt updateObject_default_inv) + +lemmas setEndpoint_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF set_ep_ctes_of setEndpoint_ksInterruptState] + +declare set_ep_arch' [wp] + +lemma set_ep_maxObj [wp]: + "\\s. P (gsMaxObjectSize s)\ setEndpoint ptr val \\rv s. P (gsMaxObjectSize s)\" + by (simp add: setEndpoint_def | wp setObject_ksPSpace_only updateObject_default_inv)+ + +lemma valid_global_refs_lift': + assumes ctes: "\P. \\s. P (ctes_of s)\ f \\_ s. P (ctes_of s)\" + assumes arch: "\P. \\s. P (ksArchState s)\ f \\_ s. P (ksArchState s)\" + assumes idle: "\P. \\s. P (ksIdleThread s)\ f \\_ s. P (ksIdleThread s)\" + assumes irqn: "\P. \\s. P (irq_node' s)\ f \\_ s. P (irq_node' s)\" + assumes maxObj: "\P. \\s. P (gsMaxObjectSize s)\ f \\_ s. P (gsMaxObjectSize s)\" + shows "\valid_global_refs'\ f \\_. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_refs'_def global_refs'_def valid_cap_sizes'_def) + apply (rule hoare_lift_Pf [where f="ksArchState"]) + apply (rule hoare_lift_Pf [where f="ksIdleThread"]) + apply (rule hoare_lift_Pf [where f="irq_node'"]) + apply (rule hoare_lift_Pf [where f="gsMaxObjectSize"]) + apply (wp ctes hoare_vcg_const_Ball_lift arch idle irqn maxObj)+ + done + +lemma valid_arch_state_lift': + assumes typs: "\T p P. f \\s. P (typ_at' T p s)\" + assumes arch: "\P. f \\s. P (ksArchState s)\" + assumes vcpu: "\P p. f \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p s)\" + shows "f \valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def vspace_table_at'_defs) + apply (wp_pre, wps arch) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_const_imp_lift vcpu[unfolded pred_conj_def] + split: option.split) + apply (clarsimp simp: pred_conj_def) + done + +lemma setObject_ep_ct: + "\\s. P (ksCurThread s)\ setObject p (e::endpoint) \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def updateObject_ep_eta split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ntfn_ct: + "\\s. P (ksCurThread s)\ setObject p (e::Structures_H.notification) + \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma get_ntfn_sp': + "\P\ getNotification r \\t. P and ko_at' t r\" + by (clarsimp simp: getNotification_def getObject_def loadObject_default_def + in_monad valid_def obj_at'_def objBits_simps' in_magnitude_check split_def) + +lemma set_ntfn_pred_tcb_at' [wp]: + "\ pred_tcb_at' proj P t \ + setNotification ep v + \ \rv. pred_tcb_at' proj P t \" + apply (simp add: setNotification_def pred_tcb_at'_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma set_ntfn_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (live' (KONotification v) \ ex_nonz_cap_to' p s)\ + setNotification p v + \\rv. if_live_then_nonz_cap'\" + apply (simp add: setNotification_def) + apply (wp setObject_iflive'[where P="\"]) + apply simp + apply (simp add: objBits_simps) + apply (simp add: objBits_simps') + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def bind_def) + apply clarsimp + done + +declare setNotification_cte_wp_at'[wp] + +lemma set_ntfn_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setNotification p' v \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma setNotification_ifunsafe'[wp]: + "\if_unsafe_then_cap'\ setNotification p v \\rv. if_unsafe_then_cap'\" + unfolding setNotification_def + apply (rule setObject_ifunsafe'[where P="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad + intro!: equals0I)+ + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_idle'[wp]: + "\\s. valid_idle' s\ setNotification p v \\rv. valid_idle'\" + unfolding setNotification_def + apply (wp setObject_idle') + apply (simp add: objBits_simps' updateObject_default_inv idle_tcb_ps_def)+ + done + +crunch it[wp]: setNotification "\s. P (ksIdleThread s)" + (wp: updateObject_default_inv) + +lemma set_ntfn_arch' [wp]: + "\\s. P (ksArchState s)\ setNotification ntfn p \\_ s. P (ksArchState s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv|simp)+ + done + +lemma set_ntfn_ksInterrupt[wp]: + "\\s. P (ksInterruptState s)\ setNotification ptr val \\rv s. P (ksInterruptState s)\" + by (simp add: setNotification_def | wp setObject_ksInterrupt updateObject_default_inv)+ + +lemma set_ntfn_ksMachine[wp]: + "\\s. P (ksMachineState s)\ setNotification ptr val \\rv s. P (ksMachineState s)\" + by (simp add: setNotification_def | wp setObject_ksMachine updateObject_default_inv)+ + +lemma set_ntfn_maxObj [wp]: + "\\s. P (gsMaxObjectSize s)\ setNotification ptr val \\rv s. P (gsMaxObjectSize s)\" + by (simp add: setNotification_def | wp setObject_ksPSpace_only updateObject_default_inv)+ + +lemma set_ntfn_global_refs' [wp]: + "\valid_global_refs'\ setNotification ptr val \\_. valid_global_refs'\" + by (rule valid_global_refs_lift'; wp) + +crunch typ_at' [wp]: setNotification "\s. P (typ_at' T p s)" (ignore_del: setObject) + +lemma set_ntfn_hyp[wp]: + "setNotification ptr val \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p s)\" + unfolding setNotification_def + by (wpsimp wp: setObject_ko_wp_at simp: objBits_simps', rule refl, simp) + (clarsimp simp: is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma set_ep_hyp[wp]: + "setEndpoint ptr val \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p s)\" + unfolding setEndpoint_def + by (wpsimp wp: setObject_ko_wp_at simp: objBits_simps', rule refl, simp) + (clarsimp simp: is_vcpu'_def ko_wp_at'_def obj_at'_def) + +crunches setEndpoint, setNotification + for valid_arch'[wp]: valid_arch_state' + and pspace_canonoical'[wp]: pspace_canonical' + (wp: valid_arch_state_lift') + +lemmas valid_irq_node_lift = + hoare_use_eq_irq_node' [OF _ typ_at_lift_valid_irq_node'] + +lemmas untyped_ranges_zero_lift + = hoare_use_eq[where f="gsUntypedZeroRanges" + and Q="\v s. untyped_ranges_zero_inv (f s) v" for f] + +lemma valid_irq_states_lift': + assumes x: "\P. \\s. P (intStateIRQTable (ksInterruptState s))\ f \\rv s. P (intStateIRQTable (ksInterruptState s))\" + assumes y: "\P. \\s. P (irq_masks (ksMachineState s))\ f \\rv s. P (irq_masks (ksMachineState s))\" + shows "\valid_irq_states'\ f \\rv. valid_irq_states'\" + apply (rule hoare_use_eq [where f="\s. irq_masks (ksMachineState s)"], rule y) + apply (rule hoare_use_eq [where f="\s. intStateIRQTable (ksInterruptState s)"], rule x) + apply wp + done + +lemmas set_ntfn_irq_handlers'[wp] = valid_irq_handlers_lift'' [OF set_ntfn_ctes_of set_ntfn_ksInterrupt] + +lemmas set_ntfn_irq_states' [wp] = valid_irq_states_lift' [OF set_ntfn_ksInterrupt set_ntfn_ksMachine] + +lemma set_ntfn_vms'[wp]: + "\valid_machine_state'\ setNotification ptr val \\rv. valid_machine_state'\" + apply (simp add: setNotification_def valid_machine_state'_def pointerInDeviceData_def pointerInUserData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + by (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv | + simp)+ + +lemma irqs_masked_lift: + assumes "\P. \\s. P (intStateIRQTable (ksInterruptState s))\ f + \\rv s. P (intStateIRQTable (ksInterruptState s))\" + shows "\irqs_masked'\ f \\_. irqs_masked'\" + apply (simp add: irqs_masked'_def) + apply (wp assms) + done + +lemma setObject_pspace_domain_valid[wp]: + "\pspace_domain_valid\ + setObject ptr val + \\rv. pspace_domain_valid\" + apply (clarsimp simp: setObject_def split_def pspace_domain_valid_def + valid_def in_monad + split: if_split_asm) + apply (drule updateObject_objBitsKO) + apply (clarsimp simp: lookupAround2_char1) + done + +crunches setNotification, setEndpoint + for pspace_domain_valid[wp]: "pspace_domain_valid" + +lemma ct_not_inQ_lift: + assumes sch_act: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + and not_inQ: "\\s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\ + f \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + shows "\ct_not_inQ\ f \\_. ct_not_inQ\" + unfolding ct_not_inQ_def + by (rule hoare_convert_imp [OF sch_act not_inQ]) + +lemma setNotification_ct_not_inQ[wp]: + "\ct_not_inQ\ setNotification ptr rval \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setNotification_nosch]) + apply (simp add: setNotification_def ct_not_inQ_def) + apply (rule hoare_weaken_pre) + apply (wps setObject_ntfn_ct) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad)+ + done + +lemma setNotification_ksCurThread[wp]: + "\\s. P (ksCurThread s)\ setNotification a b \\rv s. P (ksCurThread s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ setNotification a b \\rv s. P (ksDomSchedule s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_ksDomScheduleId[wp]: + "\\s. P (ksDomScheduleIdx s)\ setNotification a b \\rv s. P (ksDomScheduleIdx s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ setNotification ptr ntfn \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ + done + +crunch gsUntypedZeroRanges[wp]: setNotification "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma sym_heap_sched_pointers_lift: + assumes prevs: "\P. f \\s. P (tcbSchedPrevs_of s)\" + assumes nexts: "\P. f \\s. P (tcbSchedNexts_of s)\" + shows "f \sym_heap_sched_pointers\" + by (rule_tac f=tcbSchedPrevs_of in hoare_lift_Pf2; wpsimp wp: assms) + +crunches setNotification + for tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: updateObject_default_def) + +lemma set_ntfn_minor_invs': + "\invs' and obj_at' (\ntfn. ntfn_q_refs_of' (ntfnObj ntfn) = ntfn_q_refs_of' (ntfnObj val) + \ ntfn_bound_refs' (ntfnBoundTCB ntfn) = ntfn_bound_refs' (ntfnBoundTCB val)) + ptr + and valid_ntfn' val + and (\s. live' (KONotification val) \ ex_nonz_cap_to' ptr s) + and (\s. ptr \ ksIdleThread s) \ + setNotification ptr val + \\rv. invs'\" + apply (clarsimp simp: invs'_def valid_state'_def cteCaps_of_def) + apply (wpsimp wp: irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift + sym_heap_sched_pointers_lift valid_bitmaps_lift + simp: o_def) + apply (clarsimp elim!: rsubst[where P=sym_refs] + intro!: ext + dest!: obj_at_state_refs_ofD')+ + done + +lemma getEndpoint_wp: + "\\s. \ep. ko_at' ep e s \ P ep s\ getEndpoint e \P\" + apply (rule hoare_strengthen_post) + apply (rule get_ep_sp') + apply simp + done + +lemma getNotification_wp: + "\\s. \ntfn. ko_at' ntfn e s \ P ntfn s\ getNotification e \P\" + apply (rule hoare_strengthen_post) + apply (rule get_ntfn_sp') + apply simp + done + +lemma ep_redux_simps': + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ SendEP xs) + = (set xs \ {EPSend})" + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ RecvEP xs) + = (set xs \ {EPRecv})" + "ntfn_q_refs_of' (case xs of [] \ IdleNtfn | y # ys \ WaitingNtfn xs) + = (set xs \ {NTFNSignal})" + by (fastforce split: list.splits + simp: valid_ep_def valid_ntfn_def)+ + + +(* There are two wp rules for preserving valid_ioc over set_object. + First, the more involved rule for CNodes and TCBs *) +(* Second, the simpler rule suitable for all objects except CNodes and TCBs. *) +lemma valid_refs'_def2: + "valid_refs' R (ctes_of s) = (\cref. \cte_wp_at' (\c. R \ capRange (cteCap c) \ {}) cref s)" + by (auto simp: valid_refs'_def cte_wp_at_ctes_of ran_def) + +lemma idle_is_global [intro!]: + "ksIdleThread s \ global_refs' s" + by (simp add: global_refs'_def) + +lemma aligned_distinct_obj_atI': + "\ ksPSpace s x = Some ko; pspace_aligned' s; pspace_distinct' s; ko = injectKO v \ + \ ko_at' v x s" + apply (simp add: obj_at'_def project_inject pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (clarsimp simp: objBits_simps' word_bits_def + split: kernel_object.splits arch_kernel_object.splits) + done + +lemma aligned'_distinct'_ko_wp_at'I: + "\ksPSpace s' x = Some ko; P ko; pspace_aligned' s'; pspace_distinct' s'\ + \ ko_wp_at' P x s'" + apply (simp add: ko_wp_at'_def pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (cases ko; force) + done + +lemma aligned'_distinct'_ko_at'I: + "\ksPSpace s' x = Some ko; pspace_aligned' s'; pspace_distinct' s'; + ko = injectKO (v:: 'a :: pspace_storable)\ + \ ko_at' v x s'" + by (fastforce elim: aligned'_distinct'_ko_wp_at'I simp: obj_at'_real_def project_inject) + +lemma valid_globals_cte_wpD': + "\ valid_global_refs' s; cte_wp_at' P p s \ + \ \cte. P cte \ ksIdleThread s \ capRange (cteCap cte)" + by (fastforce simp: valid_global_refs'_def valid_refs'_def cte_wp_at_ctes_of) + +lemma dmo_aligned'[wp]: + "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + done + +lemma dmo_distinct'[wp]: + "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + done + +lemma dmo_valid_objs'[wp]: + "\valid_objs'\ doMachineOp f \\_. valid_objs'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + done + +lemma dmo_inv': + assumes R: "\P. \P\ f \\_. P\" + shows "\P\ doMachineOp f \\_. P\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule in_inv_by_hoareD [OF R]) + apply simp + done + +crunch cte_wp_at'2[wp]: doMachineOp "\s. P (cte_wp_at' P' p s)" + +crunch typ_at'[wp]: doMachineOp "\s. P (typ_at' T p s)" + +lemmas doMachineOp_typ_ats[wp] = typ_at_lifts [OF doMachineOp_typ_at'] + +lemma doMachineOp_invs_bits[wp]: + "doMachineOp m \valid_pspace'\" + "doMachineOp m \\s. sch_act_wf (ksSchedulerAction s) s\" + "doMachineOp m \valid_bitmaps\" + "doMachineOp m \valid_sched_pointers\" + "doMachineOp m \\s. P (state_refs_of' s)\" + "doMachineOp m \\s. P (state_hyp_refs_of' s)\" + "doMachineOp m \if_live_then_nonz_cap'\" + "doMachineOp m \cur_tcb'\" + "doMachineOp m \if_unsafe_then_cap'\" + by (simp add: doMachineOp_def split_def + | wp + | fastforce elim: state_refs_of'_pspaceI)+ + +crunch obj_at'[wp]: doMachineOp "\s. P (obj_at' P' p s)" + +crunch it[wp]: doMachineOp "\s. P (ksIdleThread s)" +crunch idle'[wp]: doMachineOp "valid_idle'" + (wp: crunch_wps simp: crunch_simps valid_idle'_pspace_itI) + +lemma setEndpoint_ksMachine: + "\\s. P (ksMachineState s)\ setEndpoint ptr val \\rv s. P (ksMachineState s)\" + by (simp add: setEndpoint_def | wp setObject_ksMachine updateObject_default_inv)+ + +lemmas setEndpoint_valid_irq_states' = + valid_irq_states_lift' [OF setEndpoint_ksInterruptState setEndpoint_ksMachine] + +lemma setEndpoint_ct': + "\\s. P (ksCurThread s)\ setEndpoint a b \\rv s. P (ksCurThread s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma obj_at'_is_canonical: + "\pspace_canonical' s; obj_at' P t s\ \ canonical_address t" + by (force simp: obj_at'_def pspace_canonical'_def) + +lemmas setEndpoint_valid_globals[wp] + = valid_global_refs_lift' [OF set_ep_ctes_of set_ep_arch' + setEndpoint_it setEndpoint_ksInterruptState] + +end +end diff --git a/proof/refine/AARCH64/KernelInit_R.thy b/proof/refine/AARCH64/KernelInit_R.thy new file mode 100644 index 0000000000..517a1a8a83 --- /dev/null +++ b/proof/refine/AARCH64/KernelInit_R.thy @@ -0,0 +1,41 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Kernel init refinement. Currently axiomatised. +*) + +theory KernelInit_R +imports + IncKernelInit + "AInvs.KernelInit_AI" +begin + +(* Axiomatisation of the rest of the initialisation code *) +axiomatization where + init_refinement: + "Init_H \ lift_state_relation state_relation `` Init_A" + +axiomatization where + ckernel_init_invs: + "\((tc,s),x) \ Init_H. invs' s" + +axiomatization where + ckernel_init_sch_norm: + "((tc,s),x) \ Init_H \ ksSchedulerAction s = ResumeCurrentThread" + +axiomatization where + ckernel_init_ctr: + "((tc,s),x) \ Init_H \ ct_running' s" + +axiomatization where + ckernel_init_domain_time: + "((tc,s),x) \ Init_H \ ksDomainTime s \ 0" + +axiomatization where + ckernel_init_domain_list: + "((tc,s),x) \ Init_H \ length (ksDomSchedule s) > 0 \ (\(d,time) \ set (ksDomSchedule s). time > 0)" + +end diff --git a/proof/refine/AARCH64/LevityCatch.thy b/proof/refine/AARCH64/LevityCatch.thy new file mode 100644 index 0000000000..29272dce95 --- /dev/null +++ b/proof/refine/AARCH64/LevityCatch.thy @@ -0,0 +1,57 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory LevityCatch +imports + "BaseRefine.Include" + "Lib.AddUpdSimps" + "Lib.LemmaBucket" + "Lib.SimpStrategy" + "Lib.Corres_Method" +begin + +no_notation bind_drop (infixl ">>" 60) + +lemma magnitudeCheck_assert: + "magnitudeCheck x y n = assert (case y of None \ True | Some z \ 1 << n \ z - x)" + by (fastforce simp: magnitudeCheck_def assert_def when_def + split: option.split) + +lemma projectKO_inv: "projectKO ko \P\" + by (simp add: projectKO_def fail_def valid_def return_def + split: option.splits) + +lemma alignCheck_assert: + "alignCheck ptr n = assert (is_aligned ptr n)" + by (simp add: is_aligned_mask alignCheck_def assert_def + alignError_def unless_def when_def) + +lemma magnitudeCheck_inv: + "magnitudeCheck x y n \P\" + by (wpsimp simp: magnitudeCheck_def) + +lemma alignCheck_inv: + "alignCheck x n \P\" + by (wpsimp simp: alignCheck_def alignError_def) + +lemma updateObject_default_inv: + "updateObject_default obj ko ptr ptr' next \P\" + unfolding updateObject_default_def + by (wpsimp wp: magnitudeCheck_inv alignCheck_inv projectKO_inv) + + +context begin interpretation Arch . + +lemmas makeObject_simps = + makeObject_endpoint makeObject_notification makeObject_cte + makeObject_tcb makeObject_user_data makeObject_pte makeObject_asidpool + +lemma to_from_apiType[simp]: "toAPIType (fromAPIType x) = Some x" + by (cases x) (auto simp: fromAPIType_def toAPIType_def) + +end + +end diff --git a/proof/refine/AARCH64/Machine_R.thy b/proof/refine/AARCH64/Machine_R.thy new file mode 100644 index 0000000000..34709d376a --- /dev/null +++ b/proof/refine/AARCH64/Machine_R.thy @@ -0,0 +1,83 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Properties of machine operations. +*) + +theory Machine_R +imports Bits_R +begin + +definition "irq_state_independent_H (P :: kernel_state \ bool)\ + \(f :: nat \ nat) (s :: kernel_state). P s \ P (s\ksMachineState := ksMachineState s + \irq_state := f (irq_state (ksMachineState s))\\)" + +lemma irq_state_independent_HI[intro!, simp]: + "\\s f. P (s\ksMachineState := ksMachineState s + \irq_state := f (irq_state (ksMachineState s))\\) = P s\ + \ irq_state_independent_H P" + by (simp add: irq_state_independent_H_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma dmo_getirq_inv[wp]: + "irq_state_independent_H P \ \P\ doMachineOp (getActiveIRQ in_kernel) \\rv. P\" + apply (simp add: getActiveIRQ_def doMachineOp_def split_def exec_gets + select_f_select[simplified liftM_def] + select_modify_comm gets_machine_state_modify) + apply wp + apply (clarsimp simp: irq_state_independent_H_def in_monad return_def split: if_splits) + done + +lemma getActiveIRQ_masked: + "\\s. valid_irq_masks' table (irq_masks s)\ getActiveIRQ in_kernel + \\rv s. \irq. rv = Some irq \ table irq \ IRQInactive\" + apply (simp add: getActiveIRQ_def) + apply wp + apply (clarsimp simp: valid_irq_masks'_def) + done + +lemma dmo_maskInterrupt: + "\\s. P (ksMachineState_update (irq_masks_update (\t. t (irq := m))) s)\ + doMachineOp (maskInterrupt m irq) \\_. P\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply (clarsimp simp: maskInterrupt_def in_monad) + apply (erule rsubst [where P=P]) + apply simp + done + +lemma dmo_maskInterrupt_True: + "\invs'\ doMachineOp (maskInterrupt True irq) \\_. invs'\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (simp add: valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma setIRQState_irq_states': + "\valid_irq_states'\ + setIRQState state irq + \\rv. valid_irq_states'\" + apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) + apply (wp dmo_maskInterrupt) + apply (simp add: valid_irq_masks'_def) + done + +lemma getActiveIRQ_le_maxIRQ: + "\irqs_masked' and valid_irq_states'\ doMachineOp (getActiveIRQ in_kernel) \\rv s. \x. rv = Some x \ x \ maxIRQ\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule use_valid, rule getActiveIRQ_le_maxIRQ') + prefer 2 + apply simp + apply (simp add: irqs_masked'_def valid_irq_states'_def) + done + +end +end diff --git a/proof/refine/AARCH64/PageTableDuplicates.thy b/proof/refine/AARCH64/PageTableDuplicates.thy new file mode 100644 index 0000000000..08eb06bcc3 --- /dev/null +++ b/proof/refine/AARCH64/PageTableDuplicates.thy @@ -0,0 +1,42 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory PageTableDuplicates +imports Syscall_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma doMachineOp_ksPSpace_inv[wp]: + "\\s. P (ksPSpace s)\ doMachineOp f \\ya s. P (ksPSpace s)\" + by (simp add:doMachineOp_def split_def | wp)+ + +lemma foldr_data_map_insert[simp]: + "foldr (\addr map a. if a = addr then Some b else map a) = foldr (\addr. data_map_insert addr b)" + apply (rule ext)+ + apply (simp add:data_map_insert_def[abs_def] fun_upd_def) + done + +lemma mapM_x_mapM_valid: + "\ P \ mapM_x f xs \\r. Q\ \ \P\mapM f xs \\r. Q\" + apply (simp add: mapM_x_mapM) + apply (clarsimp simp:valid_def return_def bind_def) + apply (drule spec) + apply (erule impE) + apply simp + apply (drule(1) bspec) + apply fastforce + done + +declare withoutPreemption_lift [wp del] + +crunch valid_cap'[wp]: + isFinalCapability "\s. valid_cap' cap s" + (wp: crunch_wps filterM_preserved simp: crunch_simps unless_def) + +end + +end diff --git a/proof/refine/AARCH64/RAB_FN.thy b/proof/refine/AARCH64/RAB_FN.thy new file mode 100644 index 0000000000..cbe2a89c2f --- /dev/null +++ b/proof/refine/AARCH64/RAB_FN.thy @@ -0,0 +1,147 @@ +(* + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory RAB_FN + +imports + "CSpace1_R" + "Lib.MonadicRewrite" + +begin + +definition + "only_cnode_caps ctes = + option_map ((\x. if isCNodeCap x then x else NullCap) o cteCap) o ctes" + +definition locateSlotFun_def: +"locateSlotFun cnode offset \ cnode + 2 ^ cte_level_bits * offset" + +definition + "cnode_caps_gsCNodes cts cns + = (\cap \ ran cts. isCNodeCap cap + \ cns (capCNodePtr cap) = Some (capCNodeBits cap))" + +abbreviation (input) + "cnode_caps_gsCNodes' s \ cnode_caps_gsCNodes (only_cnode_caps (ctes_of s)) (gsCNodes s)" + +function + resolveAddressBitsFn :: + "capability \ cptr \ nat \ (machine_word \ capability option) + \ (lookup_failure + (machine_word * nat))" +where + "resolveAddressBitsFn a b c = +(\x0 capptr bits caps. (let nodeCap = x0 in + if isCNodeCap nodeCap + then (let + radixBits = capCNodeBits nodeCap; + guardBits = capCNodeGuardSize nodeCap; + levelBits = radixBits + guardBits; + offset = (fromCPtr capptr `~shiftR~` (bits-levelBits)) && + (mask radixBits); + guard = (fromCPtr capptr `~shiftR~` (bits-guardBits)) && + (mask guardBits); + bitsLeft = bits - levelBits; + slot = locateSlotFun (capCNodePtr nodeCap) offset + in + if levelBits = 0 then Inr (0, 0) + else if \ (guardBits \ bits \ guard = capCNodeGuard nodeCap) + then Inl $ GuardMismatch_ \ + guardMismatchBitsLeft= bits, + guardMismatchGuardFound= capCNodeGuard nodeCap, + guardMismatchGuardSize= guardBits \ + else if (levelBits > bits) then Inl $ DepthMismatch_ \ + depthMismatchBitsLeft= bits, + depthMismatchBitsFound= levelBits \ + else if (bitsLeft = 0) + then Inr (slot, 0) + else (case caps slot of Some NullCap + \ Inr (slot, bitsLeft) + | Some nextCap + \ resolveAddressBitsFn nextCap capptr bitsLeft caps + | None \ Inr (0, 0)) + ) + else Inl InvalidRoot + )) + +a b c" + by auto + +termination + apply (relation "measure (snd o snd)") + apply (auto split: if_split_asm) + done + +declare resolveAddressBitsFn.simps[simp del] + +lemma isCNodeCap_capUntypedPtr_capCNodePtr: + "isCNodeCap c \ capUntypedPtr c = capCNodePtr c" + by (clarsimp simp: isCap_simps) + +lemma resolveAddressBitsFn_eq: + "monadic_rewrite F E (\s. (isCNodeCap cap \ (\slot. cte_wp_at' (\cte. cteCap cte = cap) slot s)) + \ valid_objs' s \ cnode_caps_gsCNodes' s) + (resolveAddressBits cap capptr bits) + (gets (resolveAddressBitsFn cap capptr bits o only_cnode_caps o ctes_of))" + (is "monadic_rewrite F E (?P cap) (?f cap bits) (?g cap capptr bits)") +proof (induct cap capptr bits rule: resolveAddressBits.induct) + case (1 cap cref depth) + show ?case + apply (subst resolveAddressBits.simps, subst resolveAddressBitsFn.simps) + apply (simp only: Let_def haskell_assertE_def K_bind_def) + apply (rule monadic_rewrite_name_pre) + apply (rule monadic_rewrite_guard_imp) + apply (rule_tac P="(=) s" in monadic_rewrite_trans) + (* step 1, apply the induction hypothesis on the lhs *) + apply (rule monadic_rewrite_named_if monadic_rewrite_named_bindE + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="returnOk y" for y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="x $ y" for x y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="assertE P" for P s] + TrueI)+ + apply (rule_tac g="case nextCap of CNodeCap a b c d + \ ?g nextCap cref bitsLeft + | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_guard_imp) + apply (wpc | rule monadic_rewrite_refl "1.hyps" + | simp only: capability.case haskell_assertE_def simp_thms)+ + apply (clarsimp simp: in_monad locateSlot_conv getSlotCap_def + dest!: in_getCTE fst_stateAssertD) + apply (fastforce elim: cte_wp_at_weakenE') + apply (rule monadic_rewrite_refl[THEN monadic_rewrite_guard_imp], simp) + (* step 2, split and match based on the lhs structure *) + apply (simp add: locateSlot_conv liftE_bindE unlessE_def whenE_def + if_to_top_of_bindE assertE_def stateAssert_def bind_assoc + assert_def if_to_top_of_bind getSlotCap_def + split del: if_split cong: if_cong) + apply (rule monadic_rewrite_if_l monadic_rewrite_symb_exec_l'[OF _ get_wp, rotated] + empty_fail_get no_fail_get impI + monadic_rewrite_refl get_wp + | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P + isCNodeCap_capUntypedPtr_capCNodePtr + cong: if_cong split del: if_split)+ + apply (rule monadic_rewrite_symb_exec_l'[OF _ getCTE_inv _ _ getCTE_cte_wp_at, rotated]) + apply simp + apply (rule impI, rule no_fail_getCTE) + apply (simp add: monadic_rewrite_def simpler_gets_def return_def returnOk_def + only_cnode_caps_def cte_wp_at_ctes_of isCap_simps + locateSlotFun_def isCNodeCap_capUntypedPtr_capCNodePtr + split: capability.split) + apply (rule monadic_rewrite_name_pre[where P="\_. False" and f=fail] + monadic_rewrite_refl get_wp + | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P + isCNodeCap_capUntypedPtr_capCNodePtr + cong: if_cong split del: if_split)+ + (* step 3, prove the non-failure conditions *) + apply (clarsimp simp: isCap_simps) + apply (frule(1) cte_wp_at_valid_objs_valid_cap') + apply (clarsimp simp: cte_level_bits_def valid_cap_simps' + real_cte_at' isCap_simps cteSizeBits_def objBits_simps) + apply (clarsimp simp: cte_wp_at_ctes_of only_cnode_caps_def ball_Un + cnode_caps_gsCNodes_def ran_map_option o_def) + apply (drule bspec, rule IntI, erule ranI, simp add: isCap_simps) + apply (simp add: isCap_simps capAligned_def word_bits_def and_mask_less') + done +qed + +end diff --git a/proof/refine/AARCH64/Refine.thy b/proof/refine/AARCH64/Refine.thy new file mode 100644 index 0000000000..d7c1e8fcbc --- /dev/null +++ b/proof/refine/AARCH64/Refine.thy @@ -0,0 +1,1043 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + The main theorem +*) + +theory Refine +imports + KernelInit_R + ADT_H + InitLemmas + PageTableDuplicates +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +text \User memory content is the same on both levels\ +lemma typ_at_AUserDataI: + "\ typ_at (AArch (AUserData sz)) p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned' s'; pspace_distinct' s'; n < 2 ^ (pageBitsForSize sz - pageBits) \ + \ typ_at' UserDataT (p + n * 2 ^ pageBits) s'" + apply (clarsimp simp add: obj_at_def a_type_def) + apply (simp split: Structures_A.kernel_object.split_asm + arch_kernel_obj.split_asm split: if_split_asm) + apply (drule(1) pspace_relation_absD) + apply (clarsimp) + apply (drule_tac x = "p + n * 2 ^ pageBits" in spec) + apply (drule_tac x = "\_ obj. obj = KOUserData" in spec) + apply (clarsimp simp: obj_at'_def typ_at'_def ko_wp_at'_def) + apply (rule exI [where x = KOUserData]) + apply (drule mp) + apply (rule exI [where x = n]) + apply (simp add: shiftl_t2n) + apply (clarsimp simp: pspace_aligned'_def) + apply (drule (1) bspec [OF _ domI]) + apply (clarsimp simp: objBits_simps) + apply (fastforce dest!: pspace_distinctD' simp: objBits_simps) + done + +lemma typ_at_ADeviceDataI: + "\ typ_at (AArch (ADeviceData sz)) p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned' s'; pspace_distinct' s'; n < 2 ^ (pageBitsForSize sz - pageBits) \ + \ typ_at' UserDataDeviceT (p + n * 2 ^ pageBits) s'" + apply (clarsimp simp add: obj_at_def a_type_def ) + apply (simp split: Structures_A.kernel_object.split_asm + arch_kernel_obj.split_asm split: if_split_asm) + apply (drule(1) pspace_relation_absD) + apply (clarsimp) + apply (drule_tac x = "p + n * 2 ^ pageBits" in spec) + apply (drule_tac x = "\_ obj. obj = KOUserDataDevice" in spec) + apply (clarsimp simp: obj_at'_def typ_at'_def ko_wp_at'_def) + apply (rule exI [where x = KOUserDataDevice]) + apply (drule mp) + apply (rule exI [where x = n]) + apply (simp add: shiftl_t2n) + apply (clarsimp simp: pspace_aligned'_def) + apply (drule (1) bspec [OF _ domI]) + apply (clarsimp simp: objBits_simps) + apply (fastforce dest!: pspace_distinctD' simp: objBits_simps) + done + +lemma typ_at_UserDataI: + "\ typ_at' UserDataT (p && ~~ mask pageBits) s'; + pspace_relation (kheap s) (ksPSpace s'); pspace_aligned s \ + \ \sz. typ_at (AArch (AUserData sz)) (p && ~~ mask (pageBitsForSize sz)) s" + apply (clarsimp simp: exists_disj obj_at'_def typ_at'_def ko_wp_at'_def) + apply (frule (1) in_related_pspace_dom) + apply (clarsimp simp: pspace_dom_def) + apply (clarsimp simp: pspace_relation_def dom_def) + apply (erule allE, erule impE, blast) + apply clarsimp + apply (drule (1) bspec) + apply clarsimp + apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) + apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def + cte_relation_def other_obj_relation_def tcb_relation_cut_def + split: Structures_A.kernel_object.split_asm + Structures_H.kernel_object.split_asm + if_split_asm arch_kernel_obj.split_asm) + apply (rename_tac vmpage_size n) + apply (rule_tac x = vmpage_size in exI) + apply (subst conjunct2 [OF is_aligned_add_helper]) + apply (drule (1) pspace_alignedD) + apply simp + apply (simp add: shiftl_t2n mult_ac) + apply (erule word_less_power_trans2 [OF _ pbfs_atleast_pageBits]) + apply (case_tac vmpage_size, simp_all add: word_bits_conv bit_simps)[1] + apply (simp add: obj_at_def a_type_def) + done + +lemma typ_at_DeviceDataI: + "\ typ_at' UserDataDeviceT (p && ~~ mask pageBits) s'; + pspace_relation (kheap s) (ksPSpace s'); pspace_aligned s \ + \ \sz. typ_at (AArch (ADeviceData sz)) (p && ~~ mask (pageBitsForSize sz)) s" + apply (clarsimp simp: exists_disj obj_at'_def typ_at'_def ko_wp_at'_def) + apply (frule (1) in_related_pspace_dom) + apply (clarsimp simp: pspace_dom_def) + apply (clarsimp simp: pspace_relation_def dom_def) + apply (erule allE, erule impE, blast) + apply clarsimp + apply (drule (1) bspec) + apply clarsimp + apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) + apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def + cte_relation_def other_obj_relation_def tcb_relation_cut_def + split: Structures_A.kernel_object.split_asm + Structures_H.kernel_object.split_asm + if_split_asm arch_kernel_obj.split_asm) + apply (rename_tac vmpage_size n) + apply (rule_tac x = vmpage_size in exI) + apply (subst conjunct2 [OF is_aligned_add_helper]) + apply (drule (1) pspace_alignedD) + apply simp + apply (simp add: shiftl_t2n mult_ac) + apply (erule word_less_power_trans2 [OF _ pbfs_atleast_pageBits]) + apply (case_tac vmpage_size, simp_all add: word_bits_conv bit_simps)[1] + apply (simp add: obj_at_def a_type_def) + done + +lemma pointerInUserData_relation: + "\ (s,s') \ state_relation; valid_state' s'; valid_state s\ + \ pointerInUserData p s' = in_user_frame p s" + apply (simp add: pointerInUserData_def in_user_frame_def) + apply (rule iffI) + apply (erule typ_at_UserDataI, (clarsimp simp: valid_state_def)+)[1] + apply clarsimp + apply (drule_tac sz = sz and + n = "(p && mask (pageBitsForSize sz)) >> pageBits" + in typ_at_AUserDataI [where s = s and s' = s']) + apply (fastforce simp: valid_state'_def)+ + apply (rule shiftr_less_t2n') + apply (simp add: pbfs_atleast_pageBits mask_twice) + apply (case_tac sz, simp_all add: bit_simps)[1] + apply (subgoal_tac "(p && ~~ mask (pageBitsForSize sz)) + (p && mask (pageBitsForSize sz) >> pageBits) * 2 ^ pageBits = (p && ~~ mask pageBits)") + apply simp + apply (subst mult.commute) + apply (subst shiftl_t2n [symmetric]) + apply (simp add: shiftr_shiftl1) + apply (subst mask_out_add_aligned) + apply (rule is_aligned_neg_mask) + apply (simp add: pbfs_atleast_pageBits) + apply (subst add.commute) + apply (simp add: word_plus_and_or_coroll2) + done + +lemma pointerInDeviceData_relation: + "\ (s,s') \ state_relation; valid_state' s'; valid_state s\ + \ pointerInDeviceData p s' = in_device_frame p s" + apply (simp add: pointerInDeviceData_def in_device_frame_def) + apply (rule iffI) + apply (erule typ_at_DeviceDataI, (clarsimp simp: valid_state_def)+)[1] + apply clarsimp + apply (drule_tac sz = sz and + n = "(p && mask (pageBitsForSize sz)) >> pageBits" + in typ_at_ADeviceDataI [where s = s and s' = s']) + apply (fastforce simp: valid_state'_def)+ + apply (rule shiftr_less_t2n') + apply (simp add: pbfs_atleast_pageBits mask_twice) + apply (case_tac sz, simp_all add: bit_simps)[1] + apply (subgoal_tac "(p && ~~ mask (pageBitsForSize sz)) + (p && mask (pageBitsForSize sz) >> pageBits) * 2 ^ pageBits = (p && ~~ mask pageBits)") + apply simp + apply (subst mult.commute) + apply (subst shiftl_t2n [symmetric]) + apply (simp add: shiftr_shiftl1) + apply (subst mask_out_add_aligned) + apply (rule is_aligned_neg_mask) + apply (simp add: pbfs_atleast_pageBits) + apply (subst add.commute) + apply (simp add: word_plus_and_or_coroll2) + done + +lemma user_mem_relation: + "\(s,s') \ state_relation; valid_state' s'; valid_state s\ + \ user_mem' s' = user_mem s" + apply (rule ext) + apply (clarsimp simp: user_mem_def user_mem'_def pointerInUserData_relation pointerInDeviceData_relation) + apply (simp add: state_relation_def) + done + +lemma device_mem_relation: + "\(s,s') \ state_relation; valid_state' s'; valid_state s\ + \ device_mem' s' = device_mem s" + apply (rule ext) + apply (clarsimp simp: device_mem_def device_mem'_def pointerInUserData_relation + pointerInDeviceData_relation) + done + +lemma absKState_correct: + assumes invs: "einvs (s :: det_ext state)" and invs': "invs' s'" + assumes rel: "(s,s') \ state_relation" + shows "absKState s' = abs_state s" + using assms + apply (intro state.equality, simp_all add: absKState_def abs_state_def) + apply (rule absHeap_correct, clarsimp+) + apply (clarsimp elim!: state_relationE) + apply (rule absCDT_correct, clarsimp+) + apply (rule absIsOriginalCap_correct, clarsimp+) + apply (simp add: state_relation_def) + apply (simp add: state_relation_def) + apply (clarsimp simp: user_mem_relation invs_def invs'_def) + apply (simp add: state_relation_def) + apply (rule absInterruptIRQNode_correct, simp add: state_relation_def) + apply (rule absInterruptStates_correct, simp add: state_relation_def) + apply (rule absArchState_correct, simp) + apply (rule absExst_correct, simp+) + done + +text \The top-level invariance\ + +lemma set_thread_state_sched_act: + "\(\s. runnable state) and (\s. P (scheduler_action s))\ + set_thread_state thread state + \\rs s. P (scheduler_action (s::det_state))\" + apply (simp add: set_thread_state_def) + apply wp + apply (simp add: set_thread_state_ext_def) + apply wp + apply (rule hoare_pre_cont) + apply (rule_tac Q="\rv. (\s. runnable ts) and (\s. P (scheduler_action s))" + in hoare_strengthen_post) + apply wp + apply force + apply (wp gts_st_tcb_at)+ + apply (rule_tac Q="\rv. st_tcb_at ((=) state) thread and (\s. runnable state) and (\s. P (scheduler_action s))" in hoare_strengthen_post) + apply (simp add: st_tcb_at_def) + apply (wp obj_set_prop_at)+ + apply (force simp: st_tcb_at_def obj_at_def) + apply wp + apply clarsimp + done + +lemma activate_thread_sched_act: + "\ct_in_state activatable and (\s. P (scheduler_action s))\ + activate_thread + \\rs s. P (scheduler_action (s::det_state))\" + by (simp add: activate_thread_def set_thread_state_def arch_activate_idle_thread_def + | (wp set_thread_state_sched_act gts_wp)+ | wpc)+ + +lemma schedule_sched_act_rct[wp]: + "\\\ Schedule_A.schedule + \\rs (s::det_state). scheduler_action s = resume_cur_thread\" + unfolding Schedule_A.schedule_def + by (wpsimp) + +lemma call_kernel_sched_act_rct[wp]: + "\einvs and (\s. e \ Interrupt \ ct_running s) + and (\s. scheduler_action s = resume_cur_thread)\ + call_kernel e + \\rs (s::det_state). scheduler_action s = resume_cur_thread\" + apply (simp add: call_kernel_def) + apply (wp activate_thread_sched_act | simp)+ + apply (clarsimp simp: active_from_running) + done + +lemma kernel_entry_invs: + "\einvs and (\s. e \ Interrupt \ ct_running s) + and (\s. 0 < domain_time s) and valid_domain_list and (ct_running or ct_idle) + and (\s. scheduler_action s = resume_cur_thread)\ + kernel_entry e us + \\rv. einvs and (\s. ct_running s \ ct_idle s) + and (\s. 0 < domain_time s) and valid_domain_list + and (\s. scheduler_action s = resume_cur_thread)\" + apply (rule_tac Q="\rv. invs and (\s. ct_running s \ ct_idle s) and valid_sched and + (\s. 0 < domain_time s) and valid_domain_list and + valid_list and (\s. scheduler_action s = resume_cur_thread)" + in hoare_post_imp) + apply clarsimp + apply (simp add: kernel_entry_def) + apply (wp akernel_invs_det_ext call_kernel_valid_sched thread_set_invs_trivial + thread_set_ct_running thread_set_not_state_valid_sched + hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state + call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext + hoare_weak_lift_imp + | clarsimp simp add: tcb_cap_cases_def active_from_running)+ + done + +definition + "full_invs \ {((tc, s :: det_ext state), m, e). einvs s \ + (ct_running s \ ct_idle s) \ + (m = KernelMode \ e \ None) \ + (m = UserMode \ ct_running s) \ + (m = IdleMode \ ct_idle s) \ + (e \ None \ e \ Some Interrupt \ ct_running s) \ + 0 < domain_time s \ valid_domain_list s \ + (scheduler_action s = resume_cur_thread)}" + +lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" + unfolding do_user_op_def + apply (wp | simp add: split_def)+ + done + +lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" + unfolding do_user_op_def + apply (wp | simp add: split_def)+ + done + +lemma do_user_op_sched_act: + "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" + unfolding do_user_op_def + apply (wp | simp add: split_def)+ + done + +lemma do_user_op_invs2: + "\einvs and ct_running and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list \ + do_user_op f tc + \\_. (einvs and ct_running and (\s. scheduler_action s = resume_cur_thread)) + and (\s. 0 < domain_time s) and valid_domain_list \" + apply (rule_tac Q="\_. valid_list and valid_sched and + (\s. scheduler_action s = resume_cur_thread) and (invs and ct_running) and + (\s. 0 < domain_time s) and valid_domain_list" + in hoare_strengthen_post) + apply (wp do_user_op_valid_list do_user_op_valid_sched do_user_op_sched_act + do_user_op_invs | simp | force)+ + done + +lemmas ext_init_def = ext_init_det_ext_ext_def ext_init_unit_def + +lemma valid_list_init[simp]: + "valid_list init_A_st" + by (simp add: valid_list_2_def init_A_st_def ext_init_def init_cdt_def) + +lemmas valid_list_inits[simp] = valid_list_init[simplified] + +lemma valid_sched_init[simp]: + "valid_sched init_A_st" + apply (simp add: valid_sched_def init_A_st_def ext_init_def) + apply (clarsimp simp: valid_etcbs_def init_kheap_def st_tcb_at_kh_def obj_at_kh_def + obj_at_def is_etcb_at_def idle_thread_ptr_def + valid_queues_2_def ct_not_in_q_def not_queued_def + valid_sched_action_def is_activatable_def init_irq_node_ptr_def + arm_global_pt_ptr_def + ct_in_cur_domain_2_def valid_blocked_2_def valid_idle_etcb_def + etcb_at'_def default_etcb_def) + done + +lemma valid_domain_list_init[simp]: + "valid_domain_list init_A_st" + by (simp add: init_A_st_def ext_init_def valid_domain_list_def) + +lemma akernel_invariant: + "ADT_A uop \ full_invs" + unfolding full_invs_def + apply (rule invariantI) + apply (clarsimp simp: ADT_A_def subset_iff) + apply (frule bspec[OF akernel_init_invs]) + apply (simp add: Let_def Init_A_def) + apply (simp add: init_A_st_def ext_init_def) + apply (clarsimp simp: ADT_A_def global_automaton_def) + + apply (rename_tac tc' s' mode' e' tc s mode e) + apply (elim disjE) + apply ((clarsimp simp: kernel_call_A_def + | drule use_valid[OF _ kernel_entry_invs])+)[2] + apply ((clarsimp simp: do_user_op_A_def monad_to_transition_def + check_active_irq_A_def + | drule use_valid[OF _ do_user_op_invs2] + | drule use_valid[OF _ check_active_irq_invs_just_running])+)[2] + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs])+)[1] + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + apply ((clarsimp simp add: do_user_op_A_def check_active_irq_A_def + | drule use_valid[OF _ do_user_op_invs2] + | drule use_valid[OF _ check_active_irq_invs_just_running])+)[1] + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs])+)[1] + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs_just_idle])+)[1] + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs])+)[1] + done + +lemma dmo_getActiveIRQ_notin_non_kernel_IRQs[wp]: + "\\\ doMachineOp (getActiveIRQ True) \\irq _. irq \ Some ` non_kernel_IRQs\" + unfolding doMachineOp_def + by (wpsimp simp: getActiveIRQ_def in_monad split: if_split_asm) + +lemma non_kernel_IRQs_strg: + "invs' s \ irq \ Some ` non_kernel_IRQs \ Q \ + (\y. irq = Some y) \ invs' s \ (the irq \ non_kernel_IRQs \ P) \ Q" + by auto + +lemma ckernel_invs: + "\invs' and + (\s. e \ Interrupt \ ct_running' s) and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + callKernel e + \\rs. (\s. ksSchedulerAction s = ResumeCurrentThread) + and (invs' and (ct_running' or ct_idle'))\" + apply (simp add: callKernel_def) + apply (rule hoare_pre) + apply (wp activate_invs' activate_sch_act schedule_sch + schedule_sch_act_simple he_invs' schedule_invs' hoare_vcg_if_lift3 + hoare_drop_imp[where R="\_. kernelExitAssertions"] + | simp add: no_irq_getActiveIRQ + | strengthen non_kernel_IRQs_strg[where Q=True, simplified], simp cong: conj_cong)+ + done + +(* abstract and haskell have identical domain list fields *) +abbreviation valid_domain_list' :: "'a kernel_state_scheme \ bool" where + "valid_domain_list' \ \s. valid_domain_list_2 (ksDomSchedule s)" + +lemmas valid_domain_list'_def = valid_domain_list_2_def + +defs fastpathKernelAssertions_def: + "fastpathKernelAssertions \ \s. + (\asid_high ap. armKSASIDTable (ksArchState s) asid_high = Some ap + \ asid_pool_at' ap s)" + +lemma fastpathKernelAssertions_cross: + "\ (s,s') \ state_relation; invs s; valid_arch_state' s'\ \ fastpathKernelAssertions s'" + unfolding fastpathKernelAssertions_def + apply clarsimp + apply (rule asid_pool_at_cross; fastforce?) + apply (rule_tac x="ucast asid_high" in valid_asid_tableD[rotated], fastforce) + apply (clarsimp dest!: state_relationD + simp: arch_state_relation_def comp_def valid_arch_state'_def valid_asid_table'_def) + apply (subst ucast_ucast_len; simp) + apply (rule_tac y="mask asid_high_bits" in order_le_less_trans; + fastforce simp: mask_def asid_high_bits_def) + done + +(* this is only needed for callKernel, where we have invs' on concrete side *) +lemma corres_cross_over_fastpathKernelAssertions: + "\ \s. P s \ invs s; \s'. Q s' \ invs' s'; + corres r P (Q and fastpathKernelAssertions) f g \ \ + corres r P Q f g" + by (rule corres_cross_over_guard[where Q="Q and fastpathKernelAssertions"]) + (fastforce elim: fastpathKernelAssertions_cross)+ + +defs kernelExitAssertions_def: + "kernelExitAssertions s \ 0 < ksDomainTime s \ valid_domain_list' s" + +lemma callKernel_domain_time_left: + "\ \ \ callKernel e \\_ s. 0 < ksDomainTime s \ valid_domain_list' s \" + unfolding callKernel_def kernelExitAssertions_def by wpsimp + +lemma doMachineOp_sch_act_simple: + "doMachineOp f \sch_act_simple\" + by (wp sch_act_simple_lift) + +lemma kernelEntry_invs': + "\ invs' and (\s. e \ Interrupt \ ct_running' s) and + (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread) and + (\s. 0 < ksDomainTime s) and valid_domain_list' \ + kernelEntry e tc + \\rs. (\s. ksSchedulerAction s = ResumeCurrentThread) and + (invs' and (ct_running' or ct_idle')) and + (\s. 0 < ksDomainTime s) and valid_domain_list' \" + apply (simp add: kernelEntry_def) + apply (wp ckernel_invs callKernel_domain_time_left + threadSet_invs_trivial threadSet_ct_running' + TcbAcc_R.dmo_invs' hoare_weak_lift_imp + doMachineOp_ct_in_state' doMachineOp_sch_act_simple + callKernel_domain_time_left + | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' + valid_domain_list'_def)+ + done + +lemma absKState_correct': + "\einvs s; invs' s'; (s,s') \ state_relation\ + \ absKState s' = abs_state s" + apply (intro state.equality, simp_all add: absKState_def abs_state_def) + apply (rule absHeap_correct) + apply (clarsimp simp: valid_state_def valid_pspace_def)+ + apply (clarsimp dest!: state_relationD) + apply (rule absCDT_correct) + apply (clarsimp simp: valid_state_def valid_pspace_def + valid_state'_def valid_pspace'_def)+ + apply (rule absIsOriginalCap_correct, clarsimp+) + apply (simp add: state_relation_def) + apply (simp add: state_relation_def) + apply (clarsimp simp: user_mem_relation invs_def invs'_def) + apply (simp add: state_relation_def) + apply (rule absInterruptIRQNode_correct, simp add: state_relation_def) + apply (rule absInterruptStates_correct, simp add: state_relation_def) + apply (erule absArchState_correct) + apply (rule absExst_correct, simp, assumption+) + done + +lemma ptable_lift_abs_state[simp]: + "ptable_lift t (abs_state s) = ptable_lift t s" + by (simp add: ptable_lift_def abs_state_def) + +lemma ptable_rights_abs_state[simp]: + "ptable_rights t (abs_state s) = ptable_rights t s" + by (simp add: ptable_rights_def abs_state_def) + +lemma ptable_rights_imp_UserData: + assumes invs: "einvs s" and invs': "invs' s'" + assumes rel: "(s,s') : state_relation" + assumes rights: "ptable_rights t (absKState s') x \ {}" + assumes trans: + "ptable_lift t (absKState s') x = Some (AARCH64.addrFromPPtr y)" + shows "pointerInUserData y s' \ pointerInDeviceData y s'" +proof - + from invs invs' rel have [simp]: "absKState s' = abs_state s" + by - (rule absKState_correct', simp_all) + from invs have valid: "valid_state s" by auto + from invs' have valid': "valid_state' s'" by auto + have "in_user_frame y s \ in_device_frame y s " + by (rule ptable_rights_imp_frame[OF valid rights[simplified] + trans[simplified]]) + thus ?thesis + by (auto simp add: pointerInUserData_relation[OF rel valid' valid] + pointerInDeviceData_relation[OF rel valid' valid]) +qed + +definition + "ex_abs G \ \s'. \s. ((s :: (det_ext) state),s') \ state_relation \ G s" + +lemma device_update_invs': + "\invs'\doMachineOp (device_memory_update ds) + \\_. invs'\" + apply (simp add: doMachineOp_def device_memory_update_def simpler_modify_def select_f_def + gets_def get_def bind_def valid_def return_def) + by (clarsimp simp: invs'_def valid_state'_def valid_irq_states'_def valid_machine_state'_def) + +crunches doMachineOp + for ksDomainTime[wp]: "\s. P (ksDomainTime s)" + +lemma doUserOp_invs': + "\invs' and ex_abs einvs and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and + (\s. 0 < ksDomainTime s) and valid_domain_list'\ + doUserOp f tc + \\_. invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and + (\s. 0 < ksDomainTime s) and valid_domain_list'\" + apply (simp add: doUserOp_def split_def ex_abs_def) + apply (wp device_update_invs' doMachineOp_ct_in_state' + | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def + user_memory_update_def))+ + apply (clarsimp simp: user_memory_update_def simpler_modify_def + restrict_map_def + split: option.splits) + apply (frule ptable_rights_imp_UserData[rotated 2], auto) + done + + +text \The top-level correspondence\ + +lemma None_drop: + "P \ x = None \ P" + by simp + +lemma Ex_Some_conv: + "((\y. x = Some y) \ P x) = (\y. x = Some y \ P (Some y))" + by auto + + +lemma kernel_corres': + "corres dc (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) + and (\s. scheduler_action s = resume_cur_thread)) + (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (call_kernel event) + (do _ \ runExceptT $ + handleEvent event `~catchError~` + (\_. withoutPreemption $ do + irq <- doMachineOp (getActiveIRQ True); + when (isJust irq) $ handleInterrupt (fromJust irq) + od); + _ \ ThreadDecls_H.schedule; + activateThread + od)" + unfolding call_kernel_def callKernel_def + apply (simp add: call_kernel_def callKernel_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule corres_split_handle[OF handleEvent_corres]) + apply simp + apply (rule corres_split[OF corres_machine_op]) + apply (rule corres_underlying_trivial) + apply (rule no_fail_getActiveIRQ) + apply clarsimp + apply (rule_tac x=irq in option_corres) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply (simp add: when_def) + apply (rule corres_when[simplified dc_def], simp) + apply simp + apply (rule handleInterrupt_corres[simplified dc_def]) + apply simp + apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift simp: schact_is_rct_def)[1] + apply simp + apply (rule_tac Q="\irq s. invs' s \ + (\irq'. irq = Some irq' \ + intStateIRQTable (ksInterruptState s ) irq' \ + IRQInactive)" + in hoare_post_imp) + apply simp + apply (wp doMachineOp_getActiveIRQ_IRQ_active handle_event_valid_sched | simp)+ + apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_strengthen_postE) + apply wpsimp+ + apply (simp add: invs'_def valid_state'_def) + apply (rule corres_split[OF schedule_corres]) + apply (rule activateThread_corres) + apply (wp schedule_invs' hoare_vcg_if_lift2 dmo_getActiveIRQ_non_kernel + | simp cong: rev_conj_cong | strengthen None_drop | subst Ex_Some_conv)+ + apply (rule_tac Q="\_. valid_sched and invs and valid_list" and + E="\_. valid_sched and invs and valid_list" + in hoare_strengthen_postE) + apply (wp handle_event_valid_sched hoare_vcg_imp_lift' |simp)+ + apply (wp handle_event_valid_sched hoare_vcg_if_lift3 + | simp + | strengthen non_kernel_IRQs_strg[where Q=True, simplified], simp cong: conj_cong)+ + apply (clarsimp simp: active_from_running schact_is_rct_def) + apply (clarsimp simp: active_from_running') + done + +lemma kernel_corres: + "corres dc (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and + (\s. scheduler_action s = resume_cur_thread) and + (\s. 0 < domain_time s \ valid_domain_list s)) + (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (call_kernel event) (callKernel event)" + unfolding callKernel_def K_bind_def + apply (rule corres_cross_over_fastpathKernelAssertions, blast+) + apply (rule corres_stateAssert_r) + apply (rule corres_guard_imp) + apply (rule corres_add_noop_lhs2) + apply (simp only: bind_assoc[symmetric]) + apply (rule corres_split[where r'=dc and + R="\_ s. 0 < domain_time s \ valid_domain_list s" and + R'="\_. \"]) + apply (simp only: bind_assoc) + apply (rule kernel_corres') + apply (rule corres_bind_return2, rule corres_stateAssert_assume_stronger) + apply simp + apply (simp add: kernelExitAssertions_def state_relation_def) + apply (wp call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext) + apply wp + apply clarsimp + apply clarsimp + done + +lemma user_mem_corres: + "corres (=) invs invs' (gets (\x. g (user_mem x))) (gets (\x. g (user_mem' x)))" + by (clarsimp simp add: gets_def get_def return_def bind_def + invs_def invs'_def + corres_underlying_def user_mem_relation) + +lemma device_mem_corres: + "corres (=) invs invs' (gets (\x. g (device_mem x))) (gets (\x. g (device_mem' x)))" + by (clarsimp simp add: gets_def get_def return_def bind_def + invs_def invs'_def + corres_underlying_def device_mem_relation) + +lemma entry_corres: + "corres (=) (einvs and (\s. event \ Interrupt \ ct_running s) and + (\s. 0 < domain_time s) and valid_domain_list and (ct_running or ct_idle) and + (\s. scheduler_action s = resume_cur_thread)) + (invs' and (\s. event \ Interrupt \ ct_running' s) and + (\s. 0 < ksDomainTime s) and valid_domain_list' and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (kernel_entry event tc) (kernelEntry event tc)" + apply (simp add: kernel_entry_def kernelEntry_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split) + apply simp + apply (rule threadset_corresT; simp?) + apply (simp add: tcb_relation_def arch_tcb_relation_def + arch_tcb_context_set_def atcbContextSet_def) + apply (clarsimp simp: tcb_cap_cases_def cteSizeBits_def) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: exst_same_def) + apply (rule corres_split[OF kernel_corres]) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def arch_tcb_relation_def + arch_tcb_context_get_def atcbContextGet_def) + apply wp+ + apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, + simp add: invs_def valid_state_def valid_pspace_def cur_tcb_def) + apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) + apply (wp thread_set_invs_trivial thread_set_ct_running + threadSet_invs_trivial threadSet_ct_running' + thread_set_not_state_valid_sched hoare_weak_lift_imp + hoare_vcg_disj_lift ct_in_state_thread_state_lift + | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state + schact_is_rct_def + | (wps, wp threadSet_st_tcb_at2) )+ + apply (clarsimp simp: invs_def cur_tcb_def valid_state_def valid_pspace_def) + apply (clarsimp simp: ct_in_state'_def) + done + +lemma corres_gets_machine_state: + "corres (=) \ \ (gets (f \ machine_state)) (gets (f \ ksMachineState))" + by (clarsimp simp: gets_def corres_underlying_def + in_monad bind_def get_def return_def state_relation_def) + +lemma do_user_op_corres: + "corres (=) (einvs and ct_running) + (invs' and (%s. ksSchedulerAction s = ResumeCurrentThread) and + ct_running') + (do_user_op f tc) (doUserOp f tc)" + apply (simp add: do_user_op_def doUserOp_def split_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule_tac r'="(=)" and P=einvs and P'=invs' in corres_split) + apply (fastforce dest: absKState_correct [rotated]) + apply (rule_tac r'="(=)" and P=einvs and P'=invs' in corres_split) + apply (fastforce dest: absKState_correct [rotated]) + apply (rule_tac r'="(=)" and P=invs and P'=invs' in corres_split) + apply (rule user_mem_corres) + apply (rule_tac r'="(=)" and P=invs and P'=invs' in corres_split) + apply (rule device_mem_corres) + apply (rule_tac r'="(=)" in corres_split) + apply (rule corres_gets_machine_state) + apply (rule_tac F = "dom (rvb \ addrFromPPtr) \ - dom rvd" in corres_gen_asm) + apply (rule_tac F = "dom (rvc \ addrFromPPtr) \ dom rvd" in corres_gen_asm) + apply simp + apply (rule_tac r'="(=)" in corres_split[OF corres_select]) + apply simp + apply (rule corres_underlying_split[OF corres_machine_op]) + apply simp + apply (rule corres_underlying_trivial) + apply (simp add: user_memory_update_def) + apply (wp | simp)+ + apply (rule corres_underlying_split[OF corres_machine_op,where Q = dc and Q'=dc]) + apply (rule corres_underlying_trivial) + apply (wp | simp add: dc_def device_memory_update_def)+ + apply (clarsimp simp: invs_def valid_state_def pspace_respects_device_region_def) + apply fastforce + done + +lemma ct_running_related: + "\ (a, c) \ state_relation; ct_running' c \ + \ ct_running a" + apply (clarsimp simp: ct_in_state_def ct_in_state'_def + curthread_relation) + apply (frule(1) st_tcb_at_coerce_abstract) + apply (erule st_tcb_weakenE) + apply (case_tac st, simp_all)[1] + done + +lemma ct_idle_related: + "\ (a, c) \ state_relation; ct_idle' c \ + \ ct_idle a" + apply (clarsimp simp: ct_in_state_def ct_in_state'_def + curthread_relation) + apply (frule(1) st_tcb_at_coerce_abstract) + apply (erule st_tcb_weakenE) + apply (case_tac st, simp_all)[1] + done + +definition + "full_invs' \ {((tc,s),m,e). invs' s \ + ex_abs (einvs::det_ext state \ bool) s \ + ksSchedulerAction s = ResumeCurrentThread \ + (ct_running' s \ ct_idle' s) \ + (m = KernelMode \ e \ None) \ + (m = UserMode \ ct_running' s) \ + (m = IdleMode \ ct_idle' s) \ + (e \ None \ e \ Some Interrupt \ ct_running' s) \ + 0 < ksDomainTime s \ valid_domain_list' s}" + +lemma check_active_irq_corres': + "corres (=) \ \ (check_active_irq) (checkActiveIRQ)" + apply (simp add: check_active_irq_def checkActiveIRQ_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_machine_op[OF corres_underlying_trivial], where R="\_. \" and R'="\_. \"]) + apply simp + apply (rule no_fail_getActiveIRQ) + apply (wp | simp )+ + done + +lemma check_active_irq_corres: + "corres (=) + (invs and (ct_running or ct_idle) and einvs and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) + and (\s. 0 < ksDomainTime s) and valid_domain_list' and (ct_running' or ct_idle')) + (check_active_irq) (checkActiveIRQ)" + apply (rule corres_guard_imp) + apply (rule check_active_irq_corres', auto) + done + +lemma checkActiveIRQ_just_running_corres: + "corres (=) + (invs and ct_running and einvs and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list) + (invs' and ct_running' + and (\s. 0 < ksDomainTime s) and valid_domain_list' + and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (check_active_irq) (checkActiveIRQ)" + apply (rule corres_guard_imp) + apply (rule check_active_irq_corres', auto) + done + +lemma checkActiveIRQ_just_idle_corres: + "corres (=) + (invs and ct_idle and einvs and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list) + (invs' and ct_idle' + and (\s. 0 < ksDomainTime s) and valid_domain_list' + and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (check_active_irq) (checkActiveIRQ)" + apply (rule corres_guard_imp) + apply (rule check_active_irq_corres', auto) + done + +lemma checkActiveIRQ_invs': + "\invs' and ex_abs invs and (ct_running' or ct_idle') + and (\s. ksSchedulerAction s = ResumeCurrentThread)\ + checkActiveIRQ + \\_. invs' and (ct_running' or ct_idle') + and (\s. ksSchedulerAction s = ResumeCurrentThread)\" + apply (simp add: checkActiveIRQ_def ex_abs_def) + apply (wp dmo_invs' | simp)+ + done + +lemma checkActiveIRQ_invs'_just_running: + "\invs' and ex_abs invs and ct_running' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\ + checkActiveIRQ + \\_. invs' and ct_running' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\" + apply (simp add: checkActiveIRQ_def) + apply (wp | simp)+ + done + +lemma checkActiveIRQ_invs'_just_idle: + "\invs' and ex_abs invs and ct_idle' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\ + checkActiveIRQ + \\_. invs' and ct_idle' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\" + apply (simp add: checkActiveIRQ_def) + apply (wp | simp)+ + done + +lemma sched_act_rct_related: + "\ (a, c) \ state_relation; ksSchedulerAction c = ResumeCurrentThread\ + \ scheduler_action a = resume_cur_thread" + by (case_tac "scheduler_action a", simp_all add: state_relation_def) + +lemma domain_time_rel_eq: + "(a, c) \ state_relation \ P (ksDomainTime c) = P (domain_time a)" + by (clarsimp simp: state_relation_def) + +lemma domain_list_rel_eq: + "(a, c) \ state_relation \ P (ksDomSchedule c) = P (domain_list a)" + by (clarsimp simp: state_relation_def) + +crunch valid_objs': doUserOp, checkActiveIRQ valid_objs' + (wp: crunch_wps) + +lemma ckernel_invariant: + "ADT_H uop \ full_invs'" + unfolding full_invs'_def + supply word_neq_0_conv[simp] + supply domain_time_rel_eq[simp] domain_list_rel_eq[simp] + apply (rule invariantI) + apply (clarsimp simp add: ADT_H_def) + apply (subst conj_commute, simp) + apply (rule conjI) + apply (frule init_refinement[simplified subset_eq, THEN bspec]) + apply (clarsimp simp: ex_abs_def lift_state_relation_def) + apply (frule akernel_init_invs[THEN bspec]) + apply (rule_tac x = s in exI) + apply (clarsimp simp: Init_A_def) + apply (insert ckernel_init_invs)[1] + apply clarsimp + apply (frule ckernel_init_sch_norm) + apply (frule ckernel_init_ctr) + apply (frule ckernel_init_domain_time) + apply (frule ckernel_init_domain_list) + apply (fastforce simp: Init_H_def valid_domain_list'_def) + apply (clarsimp simp: ADT_A_def ADT_H_def global_automaton_def) + + apply (erule_tac P="a \ (\x. b x)" for a b in disjE) + + apply (clarsimp simp: kernel_call_H_def) + + apply (drule use_valid[OF _ valid_corres_combined + [OF kernel_entry_invs entry_corres], + OF _ kernelEntry_invs'[THEN hoare_weaken_pre]]) + apply fastforce + apply (fastforce simp: ex_abs_def sch_act_simple_def ct_running_related ct_idle_related + sched_act_rct_related) + apply (clarsimp simp: kernel_call_H_def) + apply (fastforce simp: ex_abs_def sch_act_simple_def ct_running_related ct_idle_related + sched_act_rct_related) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp add: do_user_op_H_def monad_to_transition_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule doUserOp_valid_objs') + apply (rule valid_corres_combined[OF do_user_op_invs2 corres_guard_imp2[OF do_user_op_corres]]) + apply clarsimp + apply (rule doUserOp_invs'[THEN hoare_weaken_pre]) + apply (fastforce simp: ex_abs_def) + apply (clarsimp simp: invs_valid_objs' ex_abs_def, rule_tac x=s in exI, + clarsimp simp: ct_running_related sched_act_rct_related) + apply (clarsimp simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related sched_act_rct_related) + + apply (erule_tac P="a \ b \ c \ (\x. d x)" for a b c d in disjE) + apply (clarsimp simp add: do_user_op_H_def monad_to_transition_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule doUserOp_valid_objs') + apply (rule valid_corres_combined[OF do_user_op_invs2 corres_guard_imp2[OF do_user_op_corres]]) + apply clarsimp + apply (rule doUserOp_invs'[THEN hoare_weaken_pre]) + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule checkActiveIRQ_valid_objs') + apply (rule valid_corres_combined[OF check_active_irq_invs_just_running checkActiveIRQ_just_running_corres]) + apply (rule checkActiveIRQ_invs'_just_running[THEN hoare_weaken_pre]) + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule checkActiveIRQ_valid_objs') + apply (rule valid_corres_combined[OF check_active_irq_invs_just_idle checkActiveIRQ_just_idle_corres]) + apply (rule checkActiveIRQ_invs'_just_idle[THEN hoare_weaken_pre]) + apply clarsimp + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_idle_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + + apply (clarsimp simp: check_active_irq_H_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule checkActiveIRQ_valid_objs') + apply (rule valid_corres_combined[OF check_active_irq_invs check_active_irq_corres]) + apply (rule checkActiveIRQ_invs'[THEN hoare_weaken_pre]) + apply clarsimp + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related ct_idle_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + done + +text \The top-level theorem\ + +lemma fw_sim_A_H: + "LI (ADT_A uop) + (ADT_H uop) + (lift_state_relation state_relation) + (full_invs \ full_invs')" + apply (unfold LI_def full_invs_def full_invs'_def) + apply (simp add: ADT_H_def ADT_A_def) + apply (intro conjI) + apply (rule init_refinement) + apply (clarsimp simp: rel_semi_def relcomp_unfold in_lift_state_relation_eq) + apply (rename_tac tc ak m ev tc' ck' m' ev' ck) + apply (simp add: global_automaton_def) + + apply (erule_tac P="a \ (\x. b x)" for a b in disjE) + apply (clarsimp simp add: kernel_call_H_def kernel_call_A_def) + apply (rule rev_mp, rule_tac tc=tc and event=x in entry_corres) + apply (clarsimp simp: corres_underlying_def) + apply (drule (1) bspec) + apply (clarsimp simp: sch_act_simple_def) + apply (drule (1) bspec) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule_tac x=b in exI) + apply (rule conjI) + apply (rule impI, simp) + apply (frule (2) ct_running_related) + apply clarsimp + apply (rule_tac x=b in exI) + apply (drule use_valid, rule kernelEntry_invs') + apply (simp add: sch_act_simple_def) + apply clarsimp + apply (frule (1) ct_idle_related) + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: do_user_op_H_def do_user_op_A_def monad_to_transition_def) + apply (rule rev_mp, rule_tac tc1=tc and f1=uop and P="ct_running and einvs" in corres_guard_imp2[OF do_user_op_corres]) + apply simp + apply (clarsimp simp add: corres_underlying_def) + apply (drule (1) bspec, clarsimp) + apply (drule (1) bspec, clarsimp) + apply fastforce + + apply (erule_tac P="a \ b \ c \ (\x. d x)" for a b c d in disjE) + apply (clarsimp simp: do_user_op_H_def do_user_op_A_def monad_to_transition_def) + apply (rule rev_mp, rule_tac tc1=tc and f1=uop and P="ct_running and einvs" in corres_guard_imp2[OF do_user_op_corres]) + apply simp + apply (clarsimp simp add: corres_underlying_def) + apply (drule (1) bspec, clarsimp) + apply (drule (1) bspec, clarsimp) + apply fastforce + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def check_active_irq_A_def) + apply (rule rev_mp, rule check_active_irq_corres) + apply (clarsimp simp: corres_underlying_def) + apply fastforce + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def check_active_irq_A_def) + apply (rule rev_mp, rule check_active_irq_corres) + apply (clarsimp simp: corres_underlying_def) + apply fastforce + + apply (clarsimp simp: check_active_irq_H_def check_active_irq_A_def) + apply (rule rev_mp, rule check_active_irq_corres) + apply (clarsimp simp: corres_underlying_def) + apply fastforce + + apply (clarsimp simp: absKState_correct dest!: lift_state_relationD) + done + +theorem refinement: + "ADT_H uop \ ADT_A uop" + apply (rule sim_imp_refines) + apply (rule L_invariantI) + apply (rule akernel_invariant) + apply (rule ckernel_invariant) + apply (rule fw_sim_A_H) + done + +end + +end diff --git a/proof/refine/AARCH64/Retype_R.thy b/proof/refine/AARCH64/Retype_R.thy new file mode 100644 index 0000000000..990910c38f --- /dev/null +++ b/proof/refine/AARCH64/Retype_R.thy @@ -0,0 +1,5587 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Retype refinement +*) + +theory Retype_R +imports VSpace_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + APIType_map2 :: "kernel_object + AARCH64_H.object_type \ Structures_A.apiobject_type" +where + "APIType_map2 ty \ case ty of + Inr (APIObjectType ArchTypes_H.Untyped) \ Structures_A.Untyped + | Inr (APIObjectType ArchTypes_H.TCBObject) \ Structures_A.TCBObject + | Inr (APIObjectType ArchTypes_H.EndpointObject) \ Structures_A.EndpointObject + | Inr (APIObjectType ArchTypes_H.NotificationObject) \ Structures_A.NotificationObject + | Inr (APIObjectType ArchTypes_H.CapTableObject) \ Structures_A.CapTableObject + | Inr LargePageObject \ ArchObject LargePageObj + | Inr HugePageObject \ ArchObject HugePageObj + | Inr PageTableObject \ ArchObject PageTableObj + | Inr VSpaceObject \ ArchObject VSpaceObj + | Inl (KOArch (KOASIDPool _)) \ ArchObject ASIDPoolObj + | Inr VCPUObject \ ArchObject AARCH64_A.VCPUObj + | _ \ ArchObject SmallPageObj" + +lemma placeNewObject_def2: + "placeNewObject ptr val gb = createObjects' ptr 1 (injectKO val) gb" + apply (clarsimp simp:placeNewObject_def placeNewObject'_def + createObjects'_def shiftL_nat) + done + +lemma createObjects_ret: + "\n < 2^word_bits;n\ 0\ \ + \\\ createObjects y n ko gbits + \\r s. r = map (\p. ptr_add y (p * 2 ^ objBitsKO ko * 2 ^ gbits)) + [0..< n]\" + unfolding createObjects_def createObjects'_def + apply (simp add: split_def) + apply (wp|simp cong: if_cong)+ + apply (clarsimp simp: ptr_add_def upto_enum_def o_def + unat_sub word_le_nat_alt + power_sub[symmetric] + objBits_def[symmetric] + simp del: upt_Suc) + apply (clarsimp simp: unat_of_nat_minus_1 word_bits_def + shiftl_t2n power_add) + done + +lemma objBitsKO_bounded2[simp]: + "objBitsKO ko < word_bits" + by (simp add: objBits_simps' word_bits_def pageBits_def pte_bits_def word_size_bits_def + split: kernel_object.split arch_kernel_object.split) + +definition + APIType_capBits :: "AARCH64_H.object_type \ nat \ nat" +where + "APIType_capBits ty us \ case ty of + APIObjectType ArchTypes_H.Untyped \ us + | APIObjectType ArchTypes_H.TCBObject \ objBits (makeObject :: tcb) + | APIObjectType ArchTypes_H.EndpointObject \ objBits (makeObject :: endpoint) + | APIObjectType ArchTypes_H.NotificationObject \ objBits (makeObject :: Structures_H.notification) + | APIObjectType ArchTypes_H.CapTableObject \ objBits (makeObject :: cte) + us + | SmallPageObject \ pageBitsForSize ARMSmallPage + | LargePageObject \ pageBitsForSize ARMLargePage + | HugePageObject \ pageBitsForSize ARMHugePage + | PageTableObject \ ptBits NormalPT_T + | VSpaceObject \ ptBits VSRootPT_T + | VCPUObject \ vcpuBits" + +definition + makeObjectKO :: "bool \ (kernel_object + AARCH64_H.object_type) \ kernel_object" +where + "makeObjectKO dev ty \ case ty of + Inl KOUserData \ Some KOUserData + | Inl (KOArch (KOASIDPool _)) \ Some (KOArch (KOASIDPool makeObject)) + | Inl (KOArch (KOVCPU _)) \ Some (KOArch (KOVCPU makeObject)) + | Inr VCPUObject \ Some (KOArch (KOVCPU makeObject)) + | Inr (APIObjectType ArchTypes_H.TCBObject) \ Some (KOTCB makeObject) + | Inr (APIObjectType ArchTypes_H.EndpointObject) \ Some (KOEndpoint makeObject) + | Inr (APIObjectType ArchTypes_H.NotificationObject) \ Some (KONotification makeObject) + | Inr (APIObjectType ArchTypes_H.CapTableObject) \ Some (KOCTE makeObject) + | Inr PageTableObject \ Some (KOArch (KOPTE makeObject)) + | Inr VSpaceObject \ Some (KOArch (KOPTE makeObject)) + | Inr SmallPageObject \ Some (if dev then KOUserDataDevice else KOUserData) + | Inr LargePageObject \ Some(if dev then KOUserDataDevice else KOUserData) + | Inr HugePageObject \ Some (if dev then KOUserDataDevice else KOUserData) + | _ \ None" + +text \makeObject etc. lemmas\ + +lemma NullCap_valid' [iff]: "s \' capability.NullCap" + unfolding valid_cap'_def by simp + +lemma valid_obj_makeObject_cte [simp]: + "valid_obj' (KOCTE makeObject) s" + unfolding valid_obj'_def valid_cte'_def + by (clarsimp simp: makeObject_cte) + +lemma valid_obj_makeObject_tcb [simp]: + "valid_obj' (KOTCB makeObject) s" + unfolding valid_obj'_def valid_tcb'_def valid_tcb_state'_def valid_arch_tcb'_def + by (clarsimp simp: makeObject_tcb makeObject_cte tcb_cte_cases_def minBound_word newArchTCB_def + cteSizeBits_def) + +lemma valid_obj_makeObject_endpoint [simp]: + "valid_obj' (KOEndpoint makeObject) s" + unfolding valid_obj'_def valid_ep'_def + by (clarsimp simp: makeObject_endpoint) + +lemma valid_obj_makeObject_notification [simp]: + "valid_obj' (KONotification makeObject) s" + unfolding valid_obj'_def valid_ntfn'_def + by (clarsimp simp: makeObject_notification) + +lemma valid_obj_makeObject_user_data [simp]: + "valid_obj' (KOUserData) s" + unfolding valid_obj'_def by simp + +lemma valid_obj_makeObject_user_data_device [simp]: + "valid_obj' (KOUserDataDevice) s" + unfolding valid_obj'_def by simp + +lemma valid_obj_makeObject_pte[simp]: + "valid_obj' (KOArch (KOPTE makeObject)) s" + unfolding valid_obj'_def by (simp add: makeObject_pte) + +lemma valid_obj_makeObject_asid_pool[simp]: + "valid_obj' (KOArch (KOASIDPool makeObject)) s" + unfolding valid_obj'_def + by (simp add: makeObject_asidpool Let_def ran_def dom_def) + +lemma valid_obj_makeObject_vcpu[simp]: + "valid_obj' (KOArch (KOVCPU makeObject)) s" + unfolding valid_obj'_def + by (simp add: makeObject_vcpu makeVCPUObject_def valid_vcpu'_def) + +lemmas valid_obj_makeObject_rules = + valid_obj_makeObject_user_data valid_obj_makeObject_tcb + valid_obj_makeObject_endpoint valid_obj_makeObject_notification + valid_obj_makeObject_cte valid_obj_makeObject_pte + valid_obj_makeObject_asid_pool valid_obj_makeObject_user_data_device + valid_obj_makeObject_vcpu + +text \On the abstract side\ + +text \Lemmas for createNewObjects etc.\ + +lemma pspace_dom_upd: + assumes orth: "set as \ dom ps = {}" + shows "pspace_dom (foldr (\p ps. ps(p \ ko)) as ps) = + pspace_dom ps \ (\x \ set as. fst ` obj_relation_cuts ko x)" + using orth + apply (subst foldr_upd_app_if) + apply (rule set_eqI, simp add: pspace_dom_def) + apply (rule iffI) + apply (clarsimp split: if_split_asm) + apply (rule rev_bexI, erule domI) + apply (fastforce simp: image_def) + apply (erule disjE) + apply clarsimp + apply (rule rev_bexI) + apply (clarsimp simp: domIff) + apply (erule exI) + apply clarsimp + apply (intro conjI impI) + apply (drule equals0D, erule notE, erule IntI, erule domI) + apply (fastforce simp: image_def) + apply clarsimp + apply (rule rev_bexI) + apply (clarsimp simp: domIff) + apply (erule(1) notE) + apply clarsimp + apply (fastforce simp: image_def) + done + +definition + "new_cap_addrs \ \n ptr ko. map (\p. ptr + ((of_nat p :: machine_word) << (objBitsKO ko))) + [0 ..< n]" + +definition + null_filter' :: "('a \ cte) \ ('a \ cte)" +where + "null_filter' f \ \x. if f x = Some (CTE NullCap nullMDBNode) then None else f x" + +lemma across_null_filter_eq': + assumes eq: "null_filter' xs = null_filter' ys" + shows "\ xs x = Some v; ys x = Some v \ R; + \ v = CTE NullCap nullMDBNode; ys x = None \ \ R \ + \ R" + apply (cases "null_filter' xs x") + apply (subgoal_tac "null_filter' ys x = None") + apply (simp add: null_filter'_def split: if_split_asm) + apply (simp add: eq) + apply (subgoal_tac "null_filter' ys x = Some a") + apply (simp add: null_filter'_def split: if_split_asm) + apply (simp add: eq) + done + +lemma null_filter_parent_of'': + "\ null_filter' xs = null_filter' ys; xs \ x \ c; c \ 0 \ + \ ys \ x \ c" + apply (clarsimp simp add: mdb_next_unfold) + apply (drule arg_cong[where f="\xs. xs x"]) + apply (simp add: null_filter'_def nullPointer_def split: if_split_asm) + done + +lemma null_filter_parentOf: + "\ null_filter' xs = null_filter' ys; xs \ x parentOf y \ + \ ys \ x parentOf y" + apply (clarsimp simp add: parentOf_def) + apply (rule across_null_filter_eq'[where x=x], assumption+) + apply (erule(1) across_null_filter_eq') + apply clarsimp + apply simp + apply simp + done + +lemma null_filter_descendant: + "\ null_filter' xs = null_filter' ys; xs \ x \ y \ + \ ys \ x \ y" + apply (erule subtree.induct) + apply (rule subtree.direct_parent) + apply (erule(2) null_filter_parent_of'') + apply assumption + apply (erule(1) null_filter_parentOf) + apply (erule subtree.trans_parent) + apply (erule(2) null_filter_parent_of'') + apply assumption + apply (erule(1) null_filter_parentOf) + done + +lemma null_filter_descendants_of': + "null_filter' xs = null_filter' ys + \ descendants_of' x xs = descendants_of' x ys" + apply (simp add: descendants_of'_def) + apply (rule set_eqI, rule iffI) + apply simp + apply (erule(1) null_filter_descendant) + apply simp + apply (erule(1) null_filter_descendant[OF sym]) + done + +lemma descendants_of_cte_at': + "\ p \ descendants_of x (cdt s); valid_mdb s \ + \ cte_wp_at (\c. c \ cap.NullCap) p s" + apply (simp add: descendants_of_def) + apply (drule tranclD2) + apply (clarsimp simp: cdt_parent_defs valid_mdb_def mdb_cte_at_def + simp del: split_paired_All) + apply (fastforce elim: cte_wp_at_weakenE) + done + + +lemma descendants_of_cte_at2': + "\ p \ descendants_of x (cdt s); valid_mdb s \ + \ cte_wp_at (\c. c \ cap.NullCap) x s" + apply (simp add: descendants_of_def) + apply (drule tranclD) + apply (clarsimp simp: cdt_parent_defs valid_mdb_def mdb_cte_at_def + simp del: split_paired_All) + apply (fastforce elim: cte_wp_at_weakenE) + done + +lemma cte_at_next_slot'': + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "\valid_list s; valid_mdb s; finite_depth (cdt s)\ + \ next_slot p (cdt_list s) (cdt s) = Some n \ cte_wp_at (\c. c \ cap.NullCap) p s" + apply(simp add: next_slot_def) + apply(simp split: if_split_asm) + apply(drule next_childD, simp) + apply(rule_tac p=n in descendants_of_cte_at2') + apply(simp add: child_descendant) + apply(simp) + apply(subgoal_tac "next_not_child_dom (p, cdt_list s, cdt s)") + prefer 2 + apply(simp add: next_not_child_termination valid_mdb_def valid_list_def) + apply(simp split: if_split_asm) + apply(case_tac "cdt s p") + apply(simp) + apply(rule descendants_of_cte_at') + apply(simp add: descendants_of_def cdt_parent_defs) + apply(rule r_into_trancl, simp) + apply(simp) + apply(drule next_sibD) + apply(elim exE conjE) + apply(drule after_in_list_in_list) + apply(rule descendants_of_cte_at') + apply(simp add: descendants_of_def cdt_parent_defs) + apply(rule r_into_trancl, simp) + apply(simp) + done + + +lemma state_relation_null_filterE: + "\ (s, s') \ state_relation; t = kheap_update f (ekheap_update ef s); + \f' g' h' pt_fn'. + t' = s'\ksPSpace := f' (ksPSpace s'), gsUserPages := g' (gsUserPages s'), + gsCNodes := h' (gsCNodes s'), + ksArchState := (ksArchState s') \gsPTTypes := pt_fn' (gsPTTypes (ksArchState s'))\\; + null_filter (caps_of_state t) = null_filter (caps_of_state s); + null_filter' (ctes_of t') = null_filter' (ctes_of s'); + pspace_relation (kheap t) (ksPSpace t'); + ekheap_relation (ekheap t) (ksPSpace t'); ready_queues_relation t t'; + ghost_relation (kheap t) (gsUserPages t') (gsCNodes t') (gsPTTypes (ksArchState t')); + valid_list s; + pspace_aligned' s'; pspace_distinct' s'; valid_objs s; valid_mdb s; + pspace_aligned' t'; pspace_distinct' t'; + mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s) \ + \ (t, t') \ state_relation" + apply (clarsimp simp: state_relation_def) + apply (intro conjI) + apply (simp add: cdt_relation_def cte_wp_at_caps_of_state) + apply (elim allEI) + apply clarsimp + apply (erule(1) across_null_filter_eq) + apply simp + apply (rule null_filter_descendants_of', simp) + apply simp + apply (case_tac "cdt s (a, b)") + apply (subst mdb_cte_at_no_descendants, assumption) + apply (simp add: cte_wp_at_caps_of_state swp_def) + apply (cut_tac s="kheap_update f (ekheap_update ef s)" and + s'="s'\ksPSpace := f' (ksPSpace s'), + gsUserPages := g' (gsUserPages s'), + gsCNodes := h' (gsCNodes s'), + ksArchState := ksArchState s' \gsPTTypes := pt_fn' (gsPTTypes (ksArchState s'))\\" + in pspace_relation_ctes_ofI, simp_all)[1] + apply (simp add: trans_state_update[symmetric] del: trans_state_update) + apply (erule caps_of_state_cteD) + apply (clarsimp simp: descendants_of'_def) + apply (case_tac cte) + apply (erule Null_not_subtree[rotated]) + apply simp + apply (drule(1) mdb_cte_atD) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply(simp add: cdt_list_relation_def cte_wp_at_caps_of_state) + apply(elim allEI) + apply(clarsimp) + apply(case_tac "next_slot (a, b) (cdt_list (s)) (cdt s)") + apply(simp) + apply(subgoal_tac "cte_wp_at (\c. c \ cap.NullCap) (a, b) s") + apply(drule_tac f="\cs. cs (a, b)" in arg_cong) + apply(clarsimp simp: cte_wp_at_caps_of_state) + apply(clarsimp simp: null_filter_def split: if_split_asm) + apply(drule_tac f="\ctes. ctes (cte_map (a, b))" in arg_cong) + apply(simp add: null_filter'_def cte_wp_at_ctes_of split: if_split_asm) + apply(frule pspace_relation_cte_wp_at) + apply(simp add: cte_wp_at_caps_of_state) + apply(simp) + apply(simp) + apply(simp add: cte_wp_at_ctes_of) + apply (simp add: mdb_cte_at_def) + apply(frule finite_depth) + apply(frule(3) cte_at_next_slot'') + apply simp + apply (simp add: revokable_relation_def) + apply (elim allEI, rule impI, drule(1) mp, elim allEI) + apply (clarsimp elim!: null_filterE) + apply (drule(3) pspace_relation_cte_wp_at [OF _ caps_of_state_cteD]) + apply (drule_tac f="\ctes. ctes (cte_map (a, b))" in arg_cong) + apply (clarsimp simp: null_filter'_def cte_wp_at_ctes_of + split: if_split_asm) + apply (simp add: arch_state_relation_def) + done + +lemma lookupAround2_pspace_no: + "is_aligned ptr sz \ + (case fst (lookupAround2 (ptr + 2 ^ sz - 1) ps) of None \ return () + | Some (x, y) \ haskell_assert (x < fromPPtr ptr) []) + = assert ({ptr..ptr + 2 ^ sz - 1} \ dom ps = {})" + apply (simp add: assert_def split: option.split) + apply safe + apply (clarsimp simp: lookupAround2_None1) + apply (clarsimp simp: lookupAround2_char1) + apply (clarsimp simp: lookupAround2_char1) + apply (drule_tac a=a in equals0D) + apply (simp add: linorder_not_less) + apply fastforce + done + +lemma pspace_no_overlap_disjoint': + "\pspace_aligned' s;pspace_no_overlap' x n s\ + \ {x .. (x && ~~ mask n) + 2 ^ n - 1} \ dom (ksPSpace s) = {}" + unfolding pspace_no_overlap'_def + apply (rule disjointI) + apply (rule ccontr) + apply (clarsimp simp: mask_def add_diff_eq) + apply (elim allE impE notE) + apply (simp add:field_simps)+ + apply (erule(2) order_trans[OF _ is_aligned_no_overflow,OF _ pspace_alignedD']) + apply (erule(1) is_aligned_no_overflow[OF pspace_alignedD']) + apply (erule order_trans) + apply (simp add:p_assoc_help) +done + +lemma foldr_update_ko_wp_at': + assumes pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + shows + "ko_wp_at' P p (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s) + = (if p \ set addrs then P obj + else ko_wp_at' P p s)" + (is "ko_wp_at' P p ?s' = ?Q") + apply (clarsimp simp: ko_wp_at'_def al) + apply (intro conjI impI) + apply safe[1] + apply (rule pspace_distinctD' [OF _ pv'(2)]) + apply simp + apply safe[1] + apply (simp add: ps_clear_def dom_if_Some) + apply blast + apply simp + apply (rule pspace_distinctD' [OF _ pv'(2)]) + apply simp + done + +lemma foldr_update_obj_at': + assumes pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + shows + "obj_at' P p (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s) + = (if p \ set addrs then (\obj'. projectKO_opt obj = Some obj' \ P obj') + else obj_at' P p s)" + apply (simp only: obj_at'_real_def) + apply (rule foldr_update_ko_wp_at' [OF pv pv' al]) + done + +lemma makeObjectKO_eq: + assumes x: "makeObjectKO dev tp = Some v" + shows + "(v = KOCTE cte) = + (tp = Inr (APIObjectType ArchTypes_H.CapTableObject) \ cte = makeObject)" + "(v = KOTCB tcb) = + (tp = Inr (APIObjectType ArchTypes_H.TCBObject) \ tcb = makeObject)" + using x + by (simp add: makeObjectKO_def eq_commute + split: apiobject_type.split_asm sum.split_asm kernel_object.split_asm + AARCH64_H.object_type.split_asm arch_kernel_object.split_asm)+ + +lemma pspace_no_overlap_base': + "\pspace_aligned' s;pspace_no_overlap' x n s; is_aligned x n \ \ ksPSpace s x = None" + apply (drule(1) pspace_no_overlap_disjoint') + apply (drule equals0D[where a=x]) + apply (rule ccontr, clarsimp) + apply (erule is_aligned_get_word_bits) + apply (erule impE) + apply (frule mask_out_add_aligned[where q = 0,simplified,symmetric]) + apply (fastforce simp add: is_aligned_no_overflow) + apply clarsimp+ + done + +lemma the_ctes_makeObject: + "fst (the (tcb_cte_cases n)) makeObject + = (if tcb_cte_cases n = None + then fst (the None :: (Structures_H.tcb \ cte) \ ((cte \ cte) \ Structures_H.tcb \ Structures_H.tcb)) + (makeObject :: tcb) + else makeObject)" + apply (simp add: makeObject_tcb) + apply (clarsimp simp: tcb_cte_cases_def) + done + +lemma cte_wp_at_obj_cases_mask: + "cte_wp_at' P p s = + (obj_at' P p s \ + (p && mask tcbBlockSizeBits \ dom tcb_cte_cases + \ obj_at' (P \ fst (the (tcb_cte_cases (p && mask tcbBlockSizeBits)))) + (p && ~~ mask tcbBlockSizeBits) s))" + apply (simp add: cte_wp_at_obj_cases') + apply (rule arg_cong [where f="\x. F \ x" for F]) + apply (rule iffI) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (frule(1) tcb_cte_cases_aligned_helpers) + apply fastforce + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (rule bexI[where x="p && mask tcbBlockSizeBits"]) + apply (clarsimp simp: subtract_mask) + apply fastforce + done + +lemma ps_clearD: + "\ ps_clear x n s; ksPSpace s y = Some v; x < y; y \ x + 2 ^ n - 1 \ \ False" + apply (clarsimp simp: ps_clear_def) + apply (drule_tac a=y in equals0D) + apply (simp add: dom_def mask_def add_diff_eq) + apply fastforce + done + +lemma cte_wp_at_retype': + assumes ko: "makeObjectKO dev tp = Some obj" + and pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + shows + "cte_wp_at' P p (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s) + = (if tp = Inr (APIObjectType ArchTypes_H.CapTableObject) \ p \ set addrs + \ tp = Inr (APIObjectType ArchTypes_H.TCBObject) + \ (p && ~~ mask tcbBlockSizeBits \ set addrs) \ (p && mask tcbBlockSizeBits \ dom tcb_cte_cases) + then P (CTE NullCap nullMDBNode) + else cte_wp_at' P p s)" + (is "cte_wp_at' P p ?s' = ?Q") + apply (subgoal_tac "\p \ set addrs. \(P :: cte \ bool). \ obj_at' P p s") + apply (subgoal_tac "\p \ set addrs. \(P :: tcb \ bool). \ obj_at' P p s") + apply (subgoal_tac "(\P :: cte \ bool. obj_at' P p ?s') + \ (\ (\P :: tcb \ bool. obj_at' P (p && ~~ mask tcbBlockSizeBits) ?s'))") + apply (simp only: cte_wp_at_obj_cases_mask foldr_update_obj_at'[OF pv pv' al]) + apply (simp add: the_ctes_makeObject makeObjectKO_eq [OF ko] makeObject_cte dom_def + split del: if_split + cong: if_cong) + apply (insert al ko) + apply (simp, safe, simp_all) + apply fastforce + apply fastforce + apply (clarsimp elim!: obj_atE' simp: objBits_simps) + apply (drule ps_clearD[where y=p and n=tcbBlockSizeBits]) + apply simp + apply (rule order_trans_rules(17)) + apply (clarsimp cong: if_cong) + apply (rule word_and_le2) + apply (simp add: word_neg_and_le[simplified field_simps]) + apply simp + apply (clarsimp elim!: obj_atE' simp: pn) + apply (clarsimp elim!: obj_atE' simp: pn) + done + +lemma ctes_of_retype: + assumes ko: "makeObjectKO dev tp = Some obj" + and pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + shows + "map_to_ctes (\ xa. if xa \ set addrs then Some obj else ksPSpace s xa) + = (\x. if tp = Inr (APIObjectType ArchTypes_H.CapTableObject) \ x \ set addrs + \ tp = Inr (APIObjectType ArchTypes_H.TCBObject) + \ (x && ~~ mask tcbBlockSizeBits \ set addrs) \ (x && mask tcbBlockSizeBits \ dom tcb_cte_cases) + then Some (CTE NullCap nullMDBNode) + else map_to_ctes (ksPSpace s) x)" + (is "map_to_ctes ?ps' = ?map'") + using cte_wp_at_retype' [where P="(=) cte" for cte, OF ko pv pv' al pn] + arg_cong [where f=Not, OF cte_wp_at_retype' [OF ko pv pv' al pn, where P="\"]] + apply (simp(no_asm_use) add: cte_wp_at_ctes_of cong: if_cong) + apply (rule ext) + apply (case_tac "map_to_ctes ?ps' x") + apply (simp(no_asm_simp)) + apply (simp split: if_split_asm) + apply simp + done + +lemma None_ctes_of_cte_at: + "(None = ctes_of s x) = (\ cte_at' x s)" + by (fastforce simp add: cte_wp_at_ctes_of) + +lemma null_filter_ctes_retype: + assumes ko: "makeObjectKO dev tp = Some obj" + and pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + shows + "null_filter' (map_to_ctes (foldr (\addr. data_map_insert addr obj) addrs (ksPSpace s))) + = null_filter' (map_to_ctes (ksPSpace s))" + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (subst ctes_of_retype[OF ko pv pv' al pn]) + apply (rule ext) + apply (clarsimp simp: null_filter'_def None_ctes_of_cte_at) + apply (intro conjI impI notI) + apply (elim cte_wp_atE' disjE conjE) + apply (simp_all add: pn) + apply (cut_tac x="ptr'" and v="if ptr' \ set addrs then obj else KOTCB tcb" + in pspace_distinctD'[OF _ pv'(2)])[1] + apply simp + apply (insert ko[symmetric], + simp add: makeObjectKO_def objBits_simps pn + split: if_split_asm)[1] + apply (drule(2) tcb_ctes_clear[where s="ksPSpace_update f s" for f s]) + apply simp + apply fastforce + apply (cut_tac x="x && ~~ mask tcbBlockSizeBits" in pspace_distinctD'[OF _ pv'(2)])[1] + apply simp + apply (elim cte_wp_atE' disjE conjE) + apply (insert ko[symmetric], simp add: makeObjectKO_def objBits_simps) + apply clarsimp + apply (subst(asm) subtract_mask[symmetric], + erule_tac v="if x \ set addrs then KOTCB makeObject else KOCTE cte" + in tcb_space_clear) + apply (simp add: is_aligned_mask word_bw_assocs) + apply assumption + apply simp + apply simp + apply (simp add: pn) + apply (clarsimp simp: makeObjectKO_def) + apply (drule(1) tcb_cte_cases_aligned_helpers) + apply (clarsimp simp: pn) + done + +lemma new_cap_addrs_aligned: + "\ is_aligned ptr (objBitsKO ko) \ + \ \x \ set (new_cap_addrs n ptr ko). is_aligned x (objBitsKO ko)" + apply (clarsimp simp: new_cap_addrs_def) + apply (erule aligned_add_aligned[OF _ is_aligned_shift]) + apply simp + done + +lemma new_cap_addrs_distinct: + assumes cover: "range_cover ptr sz (objBitsKO ko) n" + shows "distinct (new_cap_addrs n ptr ko)" + unfolding new_cap_addrs_def + apply (simp add: distinct_map) + apply (rule comp_inj_on[where f=of_nat, unfolded o_def]) + apply (rule subset_inj_on) + apply (rule word_unat.Abs_inj_on) + apply (clarsimp simp only: unats_def atLeastLessThan_iff + dest!: less_two_pow_divD) + apply (insert cover) + apply (erule less_le_trans) + apply (insert range_cover.range_cover_n_le[OF cover]) + apply (erule le_trans) + apply (cases "objBitsKO ko = 0") + apply (simp add:word_bits_def) + apply (rule less_imp_le) + apply (rule power_strict_increasing) + apply (simp add:word_bits_def) + apply simp + apply (rule inj_onI) + apply clarsimp + apply (drule arg_cong[where f="\x. x >> objBitsKO ko"]) + apply (cases "objBitsKO ko = 0") + apply simp + apply (subst(asm) shiftl_shiftr_id, simp add: range_cover_def) + apply (subst word_unat_power, rule of_nat_mono_maybe) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply (erule order_less_le_trans) + apply simp + apply (subst(asm) shiftl_shiftr_id, simp add: range_cover_def) + apply (subst word_unat_power, rule of_nat_mono_maybe) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply (erule order_less_le_trans) + apply simp + apply assumption + done + +lemma new_cap_addrs_subset: + assumes range_cover:"range_cover ptr sz (objBitsKO ko) n" + shows "set (new_cap_addrs n ptr ko) \ {ptr .. ptr_add (ptr && ~~ mask sz) (2 ^ sz - 1)}" + apply (clarsimp simp add: new_cap_addrs_def shiftl_t2n + field_simps + dest!: less_two_pow_divD) + apply (intro conjI) + apply (insert range_cover) + apply (rule machine_word_plus_mono_right_split[OF range_cover.range_cover_compare]) + apply assumption + apply simp + apply (simp add:range_cover_def word_bits_def) + apply (clarsimp simp:ptr_add_def) + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz"]) + apply (subst add.commute) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + apply (drule(1) range_cover.range_cover_compare) + apply (rule iffD1[OF le_m1_iff_lt,THEN iffD2]) + using range_cover + apply (simp add: p2_gt_0 range_cover_def word_bits_def) + apply (rule iffD2[OF word_less_nat_alt]) + apply (rule le_less_trans[OF unat_plus_gt]) + using range_cover + apply (clarsimp simp: range_cover_def) + apply (insert range_cover) + apply (rule is_aligned_no_wrap'[OF is_aligned_neg_mask,OF le_refl ]) + apply (simp add:range_cover_def)+ +done + +definition + obj_relation_retype :: "Structures_A.kernel_object \ + Structures_H.kernel_object \ bool" +where + "obj_relation_retype ko ko' \ + obj_bits ko \ objBitsKO ko' + \ (\p. fst ` obj_relation_cuts ko p + = {p + x * 2 ^ (objBitsKO ko') | x. x < 2 ^ (obj_bits ko - objBitsKO ko')} + \ (\x \ obj_relation_cuts ko p. snd x ko ko'))" + +lemma obj_relation_retype_cutsD: + "\ (x, P) \ obj_relation_cuts ko p; obj_relation_retype ko ko' \ + \ \y. x = p + y * 2 ^ (objBitsKO ko') \ y < 2 ^ (obj_bits ko - objBitsKO ko') + \ P ko ko'" + apply (clarsimp simp: obj_relation_retype_def) + apply (drule spec[where x=p]) + apply clarsimp + apply (drule(1) bspec) + apply (drule arg_cong[where f="\S. x \ S"]) + apply clarsimp + apply (fastforce simp: image_def) + done + +lemma APIType_map2_Untyped[simp]: + "(APIType_map2 tp = Structures_A.Untyped) + = (tp = Inr (APIObjectType ArchTypes_H.Untyped))" + by (simp add: APIType_map2_def + split: sum.split object_type.split kernel_object.split arch_kernel_object.splits + apiobject_type.split) + +lemma obj_relation_retype_leD: + "\ obj_relation_retype ko ko' \ + \ objBitsKO ko' \ obj_bits ko" + by (simp add: obj_relation_retype_def) + +lemma obj_relation_retype_default_leD: + "\ obj_relation_retype (default_object (APIType_map2 ty) dev us) ko; + ty \ Inr (APIObjectType ArchTypes_H.Untyped) \ + \ objBitsKO ko \ obj_bits_api (APIType_map2 ty) us" + by (simp add: obj_relation_retype_def objBits_def obj_bits_dev_irr) + +lemma makeObjectKO_Untyped: + "makeObjectKO dev ty = Some v \ ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + by (clarsimp simp: makeObjectKO_def) + +lemma obj_relation_cuts_trivial: + "ptr \ fst ` obj_relation_cuts ty ptr" + apply (case_tac ty) + apply (rename_tac sz cs) + apply (clarsimp simp:image_def cte_map_def well_formed_cnode_n_def) + apply (rule_tac x = "replicate sz False" in exI) + apply clarsimp+ + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: image_def pageBits_def) + apply (rule exI, rule_tac x=0 in bexI, simp, simp) + apply (rule_tac x=0 in exI, simp) + apply (rule p2_gt_0[THEN iffD2]) + apply (rename_tac vmpage_size) + apply (case_tac vmpage_size; clarsimp simp:pageBitsForSize_def bit_simps) + done + +lemma obj_relation_retype_addrs_eq: + assumes not_unt:"ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + assumes amp: "m = 2^ ((obj_bits_api (APIType_map2 ty) us) - (objBitsKO ko)) * n" + assumes orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + shows "\ range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n \ \ + (\x \ set (retype_addrs ptr (APIType_map2 ty) n us). + fst ` obj_relation_cuts (default_object (APIType_map2 ty) dev us) x) + = set (new_cap_addrs m ptr ko)" + apply (rule set_eqI, rule iffI) + apply (clarsimp simp: retype_addrs_def) + apply (rename_tac p a b) + apply (drule obj_relation_retype_cutsD[OF _ orr]) + apply (cut_tac obj_relation_retype_default_leD[OF orr not_unt]) + apply (clarsimp simp: new_cap_addrs_def image_def + dest!: less_two_pow_divD) + apply (rule_tac x="p * 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) + unat y" + in rev_bexI) + apply (simp add: amp obj_bits_api_default_object not_unt obj_bits_dev_irr) + apply (rule less_le_trans[OF nat_add_left_cancel_less[THEN iffD2]]) + apply (erule unat_mono) + apply (subst unat_power_lower) + apply (rule le_less_trans[OF diff_le_self]) + apply (clarsimp simp: range_cover_def + split: Structures_A.apiobject_type.splits) + apply (simp add:field_simps,subst mult_Suc[symmetric]) + apply (rule mult_le_mono1) + apply simp + apply (simp add: ptr_add_def shiftl_t2n field_simps + objBits_def[symmetric] word_unat_power[symmetric]) + apply (simp add: power_add[symmetric]) + apply (clarsimp simp: new_cap_addrs_def retype_addrs_def + dest!: less_two_pow_divD) + apply (rename_tac p) + apply (cut_tac obj_relation_retype_default_leD[OF orr not_unt]) + apply (cut_tac obj_relation_retype_leD[OF orr]) + apply (case_tac "n = 0") + apply (simp add:amp) + apply (case_tac "p = 0") + apply simp + apply (rule_tac x = 0 in rev_bexI) + apply simp+ + apply (rule obj_relation_cuts_trivial) + apply (rule_tac x="p div (2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko))" + in rev_bexI) + apply (simp add:amp) + apply (rule td_gal_lt[THEN iffD1]) + apply (simp add:field_simps)+ + using orr + apply (clarsimp simp: obj_relation_retype_def ptr_add_def) + apply (thin_tac "\x. P x" for P) + apply (rule_tac x="of_nat (p mod (2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko)))" in exI) + apply (simp only: word_unat_power Abs_fnat_homs shiftl_t2n) + apply (rule conjI) + apply (rule arg_cong[where f=of_nat]) + apply (subst mult_div_rearrange) + apply simp + apply (subst minus_mod_eq_mult_div[symmetric]) + apply (simp add:diff_mult_distrib2) + apply (rule of_nat_mono_maybe) + apply (rule power_strict_increasing) + apply (rule le_less_trans[OF diff_le_self]) + apply (clarsimp simp: range_cover_def obj_bits_api_default_object obj_bits_dev_irr + not_unt word_bits_def)+ +done + +lemma objBits_le_obj_bits_api: + "makeObjectKO dev ty = Some ko \ + objBitsKO ko \ obj_bits_api (APIType_map2 ty) us" + apply (case_tac ty) + apply (auto simp: default_arch_object_def bit_simps + makeObjectKO_def objBits_simps' APIType_map2_def obj_bits_api_def slot_bits_def + split: Structures_H.kernel_object.splits arch_kernel_object.splits object_type.splits + Structures_H.kernel_object.splits arch_kernel_object.splits apiobject_type.splits) + done + + +lemma obj_relation_retype_other_obj: + "\ is_other_obj_relation_type (a_type ko); other_obj_relation ko ko' \ + \ obj_relation_retype ko ko'" + apply (simp add: obj_relation_retype_def) + apply (subgoal_tac "objBitsKO ko' = obj_bits ko") + apply (clarsimp simp: is_other_obj_relation_type) + apply (fastforce simp: other_obj_relation_def objBits_simps' + split: Structures_A.kernel_object.split_asm + Structures_H.kernel_object.split_asm + Structures_H.kernel_object.split + arch_kernel_obj.split_asm arch_kernel_object.split) + done + +lemma retype_pspace_relation: + assumes sr: "pspace_relation (kheap s) (ksPSpace s')" + and vs: "valid_pspace s" "valid_mdb s" + and vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn: "pspace_no_overlap_range_cover ptr sz s" + and pn': "pspace_no_overlap' ptr sz s'" + and ko: "makeObjectKO dev ty = Some ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "pspace_relation (foldr (\p ps. ps(p \ default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)) + (foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s'))" + (is "pspace_relation ?ps ?ps'") + unfolding pspace_relation_def +proof + have not_unt: "ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + by (rule makeObjectKO_Untyped[OF ko]) + + have dom_not_ra: + "\x \ dom (kheap s). x \ set (retype_addrs ptr (APIType_map2 ty) n us)" + apply clarsimp + apply (erule(1) pspace_no_overlapC[OF pn _ _ cover vs(1)]) + done + + hence dom_Int_ra: + "set (retype_addrs ptr (APIType_map2 ty) n us) \ dom (kheap s) = {}" + by auto + + note pdom = pspace_dom_upd [OF dom_Int_ra, where ko="default_object (APIType_map2 ty) dev us"] + + have pdom': "dom ?ps' = dom (ksPSpace s') \ set (new_cap_addrs m ptr ko)" + by (clarsimp simp add: foldr_upd_app_if[folded data_map_insert_def] + dom_if_Some Un_commute + split del: if_split) + + note not_unt = makeObjectKO_Untyped [OF ko] + + have "pspace_dom (kheap s) = dom (ksPSpace s')" + using sr by (simp add: pspace_relation_def) + + thus "pspace_dom ?ps = dom ?ps'" + apply (simp add: pdom pdom') + apply (rule arg_cong[where f="\T. S \ T" for S]) + apply (rule obj_relation_retype_addrs_eq[OF not_unt num_r orr cover]) + done + + have dom_same: + "\x v. kheap s x = Some v \ ?ps x = Some v" + apply (frule bspec [OF dom_not_ra, OF domI]) + apply (simp add: foldr_upd_app_if) + done + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + have dom_same': + "\x v. ksPSpace s' x = Some v \ ?ps' x = Some v" + apply (clarsimp simp:foldr_upd_app_if[folded data_map_insert_def]) + apply (drule domI[where m = "ksPSpace s'"]) + apply (drule(1) IntI) + apply (erule_tac A = "A \ B" for A B in in_emptyE[rotated]) + apply (rule disjoint_subset[OF new_cap_addrs_subset[OF cover']]) + apply (clarsimp simp:ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + done + + show "\x \ dom ?ps. \(y, P) \ obj_relation_cuts (the (?ps x)) x. + P (the (?ps x)) (the (?ps' y))" + using sr + apply (clarsimp simp: pspace_relation_def) + apply (simp add: foldr_upd_app_if split: if_split_asm) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def]) + apply (rule conjI) + apply (drule obj_relation_retype_cutsD [OF _ orr], clarsimp) + apply (rule impI, erule notE) + apply (simp add: obj_relation_retype_addrs_eq[OF not_unt num_r orr cover,symmetric]) + apply (erule rev_bexI) + apply (simp add: image_def) + apply (erule rev_bexI, simp) + apply (drule bspec, erule domI) + apply clarsimp + apply (drule(1) bspec, simp) + apply (subgoal_tac "a \ pspace_dom (kheap s)") + apply clarsimp + apply (frule dom_same', simp) + apply (simp(no_asm) add: pspace_dom_def) + apply (rule rev_bexI, erule domI) + apply (simp add: image_def) + apply (erule rev_bexI, simp) + done +qed + + +(*Clagged from Retype_AC*) +lemma foldr_upd_app_if': "foldr (\p ps. ps(p := f p)) as g = (\x. if x \ set as then (f x) else g x)" + apply (induct as) + apply simp + apply simp + apply (rule ext) + apply simp + done + +lemma etcb_rel_makeObject: "etcb_relation default_etcb makeObject" + apply (simp add: etcb_relation_def default_etcb_def) + apply (simp add: makeObject_tcb default_priority_def default_domain_def) + done + + +lemma ekh_at_tcb_at: "valid_etcbs_2 ekh kh \ ekh x = Some y \ \tcb. kh x = Some (TCB tcb)" + apply (simp add: valid_etcbs_2_def + st_tcb_at_kh_def obj_at_kh_def + is_etcb_at'_def obj_at_def) + apply force + done + +lemma default_etcb_default_domain_futz [simp]: + "default_etcb\tcb_domain := default_domain\ = default_etcb" +unfolding default_etcb_def by simp + +lemma retype_ekheap_relation: + assumes sr: "ekheap_relation (ekheap s) (ksPSpace s')" + and sr': "pspace_relation (kheap s) (ksPSpace s')" + and vs: "valid_pspace s" "valid_mdb s" + and et: "valid_etcbs s" + and vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn: "pspace_no_overlap_range_cover ptr sz s" + and pn': "pspace_no_overlap' ptr sz s'" + and ko: "makeObjectKO dev ty = Some ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ekheap_relation (foldr (\p ps. ps(p := default_ext (APIType_map2 ty) default_domain)) + (retype_addrs ptr (APIType_map2 ty) n us) (ekheap s)) + (foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s'))" + (is "ekheap_relation ?ps ?ps'") + proof - + have not_unt: "ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + by (rule makeObjectKO_Untyped[OF ko]) + show ?thesis + apply (case_tac "ty \ Inr (APIObjectType apiobject_type.TCBObject)") + apply (insert ko) + apply (cut_tac retype_pspace_relation[OF sr' vs vs' pn pn' ko cover orr num_r]) + apply (simp add: foldr_upd_app_if' foldr_upd_app_if[folded data_map_insert_def]) + apply (simp add: obj_relation_retype_addrs_eq[OF not_unt num_r orr cover,symmetric]) + apply (insert sr) + apply (clarsimp simp add: ekheap_relation_def + pspace_relation_def default_ext_def cong: if_cong + split: if_split_asm) + subgoal by (clarsimp simp add: makeObjectKO_def APIType_map2_def cong: if_cong + split: sum.splits Structures_H.kernel_object.splits + arch_kernel_object.splits AARCH64_H.object_type.splits apiobject_type.splits) + + apply (frule ekh_at_tcb_at[OF et]) + apply (intro impI conjI) + apply clarsimp + apply (drule_tac x=a in bspec,force) + apply (clarsimp simp: tcb_relation_cut_def split: if_split_asm) + apply (case_tac ko,simp_all) + apply (clarsimp simp add: makeObjectKO_def cong: if_cong split: sum.splits Structures_H.kernel_object.splits + arch_kernel_object.splits AARCH64_H.object_type.splits + apiobject_type.splits if_split_asm) + apply (drule_tac x=xa in bspec,simp) + subgoal by force + subgoal by force + apply (simp add: foldr_upd_app_if' foldr_upd_app_if[folded data_map_insert_def]) + apply (simp add: obj_relation_retype_addrs_eq[OF not_unt num_r orr cover,symmetric]) + apply (clarsimp simp add: APIType_map2_def default_ext_def ekheap_relation_def + default_object_def makeObjectKO_def etcb_rel_makeObject + cong: if_cong + split: if_split_asm) + apply force + done +qed + +lemma pspace_no_overlapD': + "\ ksPSpace s x = Some ko; pspace_no_overlap' p bits s \ + \ {x .. x + 2 ^ objBitsKO ko - 1} \ {p .. (p && ~~ mask bits) + 2 ^ bits - 1} = {}" + by (simp add:pspace_no_overlap'_def mask_def add_diff_eq) + +lemma new_range_subset: + assumes + cover: "range_cover ptr sz (objBitsKO ko) n" + and addr: "x \ set (new_cap_addrs n ptr ko)" + shows "mask_range x (objBitsKO ko) \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}" + (is "?lhs \ ?rhs") +proof - + have base_in: "x \ {ptr..ptr_add (ptr && ~~ mask sz) (2 ^ sz - 1)}" + by (rule set_mp[OF new_cap_addrs_subset[OF cover] addr]) + have aligned: "is_aligned x (objBitsKO ko)" + apply (insert cover) + apply (clarsimp simp:range_cover_def) + apply (drule new_cap_addrs_aligned) + apply (erule bspec[OF _ addr]) + done + show ?thesis using base_in aligned addr + apply (intro range_subsetI) + apply (clarsimp simp:ptr_add_def field_simps)+ + apply (simp add:x_power_minus_1) + apply (clarsimp simp:new_cap_addrs_def) + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz"]) + apply (subst add.commute) + apply (subst add.assoc) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + apply (simp add:mask_2pm1[symmetric]) + apply (rule iffD2[OF shiftr_mask_cmp[where c = "objBitsKO ko"]]) + apply (insert cover) + apply (simp add:range_cover_def) + apply (simp add:range_cover_def word_bits_def) + apply (subst aligned_shift') + apply (simp add:mask_lt_2pn range_cover_def word_bits_def ) + apply (drule is_aligned_addD1) + apply (simp add:range_cover_def) + apply (rule aligned_add_aligned) + apply (rule aligned_already_mask) + apply (fastforce simp:range_cover_def) + apply (simp_all add: range_cover_def)[3] + apply (subst shiftr_mask2[symmetric]) + apply (simp add:range_cover_def word_bits_def) + apply (rule le_shiftr) + apply (subst le_mask_iff_lt_2n[THEN iffD1]) + apply (simp add:range_cover_def word_bits_def) + apply (clarsimp simp:word_less_nat_alt) + apply (rule le_less_trans[OF unat_plus_gt]) + apply (frule(1) range_cover.range_cover_compare) + apply (clarsimp simp:shiftl_t2n mult.commute range_cover_def) + apply (rule is_aligned_no_wrap'[OF is_aligned_neg_mask]) + apply (rule le_refl) + apply (simp add:range_cover_def) + done +qed + +lemma retype_aligned_distinct': + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn': "pspace_no_overlap' ptr sz s'" + and cover: "range_cover ptr sz (objBitsKO ko) n " + shows + "pspace_distinct' (s' \ksPSpace := foldr (\addr. data_map_insert addr ko) + (new_cap_addrs n ptr ko) (ksPSpace s')\)" + "pspace_aligned' (s' \ksPSpace := foldr (\addr. data_map_insert addr ko) + (new_cap_addrs n ptr ko) (ksPSpace s')\)" + (is "pspace_aligned' (s'\ksPSpace := ?ps\)") +proof - + have al: "is_aligned ptr (objBitsKO ko)" + using cover + by (simp add:cover range_cover_def) + let ?s' = "s'\ksPSpace := ?ps\" + note nc_al = bspec [OF new_cap_addrs_aligned [OF al]] + note nc_al' = nc_al[unfolded objBits_def] + + show pa': "pspace_aligned' ?s'" using vs'(1) + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (clarsimp simp add: pspace_aligned'_def nc_al' + split: if_split_asm) + apply (drule bspec, erule domI, simp) + done + + have okov: "objBitsKO ko < word_bits" + by (simp add: objBits_def) + + have new_range_disjoint: + "\x. x \ set (new_cap_addrs n ptr ko) \ + ({x .. x + 2 ^ (objBitsKO ko) - 1} - {x}) \ set (new_cap_addrs n ptr ko) = {}" + apply safe + apply (rule ccontr) + apply (frule(2) aligned_neq_into_no_overlap [OF _ nc_al nc_al]) + apply (drule_tac a=xa in equals0D) + apply (clarsimp simp: field_simps is_aligned_no_overflow [OF nc_al]) + done + note new_range_sub = new_range_subset [OF cover] + + show pd': "pspace_distinct' ?s'" using vs'(2) + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (simp add: pspace_distinct'_def dom_if_Some ball_Un) + apply (intro conjI ballI impI) + apply (simp add: ps_clear_def dom_if_Some Int_Un_distrib mask_def add_diff_eq + objBits_def[symmetric]) + apply (rule conjI) + apply (erule new_range_disjoint) + apply (rule disjoint_subset[OF Diff_subset]) + apply (simp only: add_mask_fold) + apply (erule disjoint_subset[OF new_range_sub]) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + apply (clarsimp simp add: ps_clear_def dom_if_Some Int_Un_distrib mask_def add_diff_eq) + apply (rule conjI) + apply (erule new_range_disjoint) + apply (rule disjoint_subset[OF Diff_subset]) + apply (simp only: add_mask_fold) + apply (erule disjoint_subset[OF new_range_sub]) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + apply (clarsimp simp add: ps_clear_def dom_if_Some Int_Un_distrib) + apply (subst Int_commute) + apply (rule disjoint_subset[OF new_cap_addrs_subset,OF cover]) + apply (subst Int_commute) + apply (simp add:ptr_add_def field_simps) + apply (rule disjoint_subset[OF Diff_subset]) + apply (drule pspace_no_overlapD' [OF _ pn']) + apply (simp add: mask_def add_diff_eq) + done +qed + +definition + update_gs :: "Structures_A.apiobject_type \ nat \ machine_word set + \ 'a kernel_state_scheme \ 'a kernel_state_scheme" +where + "update_gs ty us ptrs \ + case ty of + Structures_A.CapTableObject \ gsCNodes_update + (\cns x. if x \ ptrs then Some us else cns x) + | ArchObject SmallPageObj \ gsUserPages_update + (\ups x. if x \ ptrs then Some ARMSmallPage else ups x) + | ArchObject LargePageObj \ gsUserPages_update + (\ups x. if x \ ptrs then Some ARMLargePage else ups x) + | ArchObject HugePageObj \ gsUserPages_update + (\ups x. if x \ ptrs then Some ARMHugePage else ups x) + | ArchObject PageTableObj \ ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some NormalPT_T else pt_types x) as) + | ArchObject VSpaceObj \ ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some VSRootPT_T else pt_types x) as) + | _ \ id" + +lemma ksPSpace_update_gs_eq[simp]: + "ksPSpace (update_gs ty us ptrs s) = ksPSpace s" + by (simp add: update_gs_def + split: Structures_A.apiobject_type.splits aobject_type.splits) + +end + +global_interpretation update_gs: PSpace_update_eq "update_gs ty us ptrs" + by (simp add: PSpace_update_eq_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma ksMachineState_update_gs[simp]: + "ksMachineState (update_gs tp us addrs s) = ksMachineState s" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + +lemma ksReadyQueues_update_gs[simp]: + "ksReadyQueues (update_gs tp us addrs s) = ksReadyQueues s" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + +lemma update_gs_ksMachineState_update_swap: + "update_gs tp us addrs (ksMachineState_update f s) = + ksMachineState_update f (update_gs tp us addrs s)" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + +lemma update_gs_id: + "tp \ no_gs_types \ update_gs tp us addrs = id" + by (simp add: no_gs_types_def update_gs_def + split: Structures_A.apiobject_type.splits aobject_type.splits) + +lemma update_gs_simps[simp]: + "update_gs Structures_A.apiobject_type.CapTableObject us ptrs = + gsCNodes_update (\cns x. if x \ ptrs then Some us else cns x)" + "update_gs (ArchObject SmallPageObj) us ptrs = + gsUserPages_update (\ups x. if x \ ptrs then Some ARMSmallPage else ups x)" + "update_gs (ArchObject LargePageObj) us ptrs = + gsUserPages_update (\ups x. if x \ ptrs then Some ARMLargePage else ups x)" + "update_gs (ArchObject HugePageObj) us ptrs = + gsUserPages_update (\ups x. if x \ ptrs then Some ARMHugePage else ups x)" + "update_gs (ArchObject PageTableObj) us ptrs = ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some NormalPT_T else pt_types x) as)" + "update_gs (ArchObject VSpaceObj) us ptrs = ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some VSRootPT_T else pt_types x) as)" + by (simp_all add: update_gs_def) + +lemma retype_ksPSpace_dom_same: + fixes x v + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ksPSpace s' x = Some v \ + foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s') x + = Some v" +proof - + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + assume "ksPSpace s' x = Some v" + thus ?thesis + apply (clarsimp simp:foldr_upd_app_if[folded data_map_insert_def]) + apply (drule domI[where m = "ksPSpace s'"]) + apply (drule(1) IntI) + apply (erule_tac A = "A \ B" for A B in in_emptyE[rotated]) + apply (rule disjoint_subset[OF new_cap_addrs_subset[OF cover']]) + apply (clarsimp simp:ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + done +qed + +lemma retype_ksPSpace_None: + assumes ad: "pspace_aligned' s" "pspace_distinct' s" "pspace_bounded' s" + assumes pn: "pspace_no_overlap' ptr sz s" + assumes cover: "range_cover ptr sz (objBitsKO val + gbits) n" + shows "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" +proof - + note cover' = range_cover_rel[where sbit' = "objBitsKO val",OF cover _ refl,simplified] + show "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" + apply (drule subsetD[OF new_cap_addrs_subset [OF cover' ]]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (fastforce simp: ptr_add_def p_assoc_help) + done +qed + +lemma retype_tcbSchedPrevs_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedPrevs_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedPrevs_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_tcbSchedNexts_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedNexts_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedNexts_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_inQ: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "\d p. + inQ d p |< tcbs_of' + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = inQ d p |< tcbs_of' s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (intro allI) + apply (rule ext) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + fastforce simp add: makeObjectKO_def makeObject_tcb + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm + | fastforce)+ +qed + +lemma retype_ready_queues_relation: + assumes rlqr: "ready_queues_relation s s'" + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ready_queues_relation + (s \kheap := foldr (\p. data_map_insert p (default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)\) + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\)" + using rlqr + unfolding ready_queues_relation_def Let_def + by (clarsimp simp: retype_tcbSchedNexts_of[OF vs' pn' ko cover num_r, simplified] + retype_tcbSchedPrevs_of[OF vs' pn' ko cover num_r, simplified] + retype_inQ[OF vs' pn' ko cover num_r, simplified]) + +lemma retype_state_relation: + notes data_map_insert_def[simp del] + assumes sr: "(s, s') \ state_relation" + and vs: "valid_pspace s" "valid_mdb s" + and et: "valid_etcbs s" "valid_list s" + and vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn: "pspace_no_overlap_range_cover ptr sz s" + and pn': "pspace_no_overlap' ptr sz s'" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and ko: "makeObjectKO dev ty = Some ko" + and api: "obj_bits_api (APIType_map2 ty) us \ sz" + and orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "(ekheap_update + (\_. foldr (\p ekh a. if a = p then default_ext (APIType_map2 ty) default_domain else ekh a) + (retype_addrs ptr (APIType_map2 ty) n us) (ekheap s)) + s + \kheap := + foldr (\p. data_map_insert p (default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)\, + update_gs (APIType_map2 ty) us (set (retype_addrs ptr (APIType_map2 ty) n us)) + (s'\ksPSpace := + foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) + (ksPSpace s')\)) + \ state_relation" + (is "(ekheap_update (\_. ?eps) s\kheap := ?ps\, update_gs _ _ _ (s'\ksPSpace := ?ps'\)) + \ state_relation") + proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) + + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + have al':"is_aligned ptr (objBitsKO ko)" + using cover' + by (simp add:range_cover_def) + have sz:"sz < word_bits" + using cover' + by (simp add:range_cover_def word_bits_def) + let ?t = "s\kheap := ?ps\" + let ?tp = "APIType_map2 ty" + let ?al = "retype_addrs ptr ?tp n us" + let ?t' = "update_gs ?tp us (set ?al) (s'\ksPSpace := ?ps'\)" + + note pad' = retype_aligned_distinct' [OF vs' pn' cover'] + thus pa': "pspace_aligned' (s'\ksPSpace := ?ps'\)" + and pd': "pspace_distinct' (s'\ksPSpace := ?ps'\)" + by simp_all + + note pa'' = pa'[simplified foldr_upd_app_if[folded data_map_insert_def]] + note pd'' = pd'[simplified foldr_upd_app_if[folded data_map_insert_def]] + + note not_unt = makeObjectKO_Untyped [OF ko] + show "null_filter (caps_of_state ?t) = null_filter (caps_of_state s)" + apply (rule null_filter_caps_of_state_foldr[folded data_map_insert_def]) + apply (simp add: not_unt) + apply (rule ballI) + apply (erule pspace_no_overlapD2 [OF pn _ cover vs(1)]) + done + + have nc_dis: "distinct (new_cap_addrs m ptr ko)" + by (rule new_cap_addrs_distinct [OF cover']) + + note nc_al = bspec [OF new_cap_addrs_aligned [OF al']] + note nc_al' = nc_al[unfolded objBits_def] + show "null_filter' (map_to_ctes ?ps') = null_filter' (ctes_of s')" + apply (rule null_filter_ctes_retype [OF ko vs' pa'' pd'']) + apply (simp add: nc_al) + apply clarsimp + apply (drule subsetD [OF new_cap_addrs_subset [OF cover']]) + apply (insert pspace_no_overlap_disjoint'[OF vs'(1) pn']) + apply (drule orthD1) + apply (simp add:ptr_add_def field_simps) + apply clarsimp + done + + show "valid_objs s" using vs + by (clarsimp simp: valid_pspace_def) + + show "valid_mdb s" using vs + by (clarsimp) + + show "valid_list s" using et + by (clarsimp) + + show "mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" using vs + by (clarsimp simp: valid_mdb_def) + + have pspr: "pspace_relation (kheap s) (ksPSpace s')" + using sr by (simp add: state_relation_def) + + thus "pspace_relation ?ps ?ps'" + by (rule retype_pspace_relation [OF _ vs vs' pn pn' ko cover orr num_r, + folded data_map_insert_def]) + + have "ekheap_relation (ekheap (s)) (ksPSpace s')" + using sr by (simp add: state_relation_def) + + thus "ekheap_relation ?eps ?ps'" + by (fold fun_upd_apply) (rule retype_ekheap_relation[OF _ pspr vs et(1) vs' pn pn' ko cover orr num_r]) + + have pn2: "\a\set ?al. kheap s a = None" + by (rule ccontr) (clarsimp simp: pspace_no_overlapD1[OF pn _ cover vs(1)]) + + from sr + have gr: "ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s'))" + by (rule state_relationE) + + show "ghost_relation ?ps (gsUserPages ?t') (gsCNodes ?t') (gsPTTypes (ksArchState ?t'))" + proof (cases ?tp) + case Untyped thus ?thesis by (simp add: not_unt) + next + note data_map_insert_def[simp] + + case TCBObject + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: ups_of_heap_def default_object_def TCBObject) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: cns_of_heap_def default_object_def TCBObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def TCBObject opt_map_def) + note data_map_insert_def[simp del] + from gr show ?thesis + by (simp add: ghost_relation_of_heap, simp add: TCBObject update_gs_def) + next + case EndpointObject + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: ups_of_heap_def default_object_def data_map_insert_def EndpointObject) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: cns_of_heap_def default_object_def data_map_insert_def EndpointObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def data_map_insert_def EndpointObject + opt_map_def) + from gr show ?thesis + by (simp add: ghost_relation_of_heap, + simp add: EndpointObject update_gs_def) + next + note data_map_insert_def[simp] + case NotificationObject + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: ups_of_heap_def + default_object_def NotificationObject) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: cns_of_heap_def + default_object_def NotificationObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def NotificationObject opt_map_def) + note data_map_insert_def[simp del] + from gr show ?thesis + by (simp add: ghost_relation_of_heap, + simp add: NotificationObject update_gs_def) + next + case CapTableObject + note data_map_insert_def[simp] + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: ups_of_heap_def + default_object_def CapTableObject) + have [simp]: "cns_of_heap ?ps = (\x. if x \ set ?al then Some us + else cns_of_heap (kheap s) x)" + by (rule ext, induct (?al), + simp_all add: cns_of_heap_def wf_empty_bits wf_unique default_object_def CapTableObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def CapTableObject opt_map_def) + note data_map_insert_def[simp del] + from gr show ?thesis + by (simp add: ghost_relation_of_heap, + simp add: CapTableObject update_gs_def ext) + next + case (ArchObject ao) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: cns_of_heap_def data_map_insert_def + default_object_def ArchObject) + from gr + have [simp]: "gsPTTypes (ksArchState s') = pt_types_of_heap (kheap s)" + by (clarsimp simp add: ghost_relation_of_heap) + from pn2 ArchObject + have [simp]: "pt_types_of_heap ?ps = gsPTTypes (ksArchState ?t')" + apply - + apply (rule ext) + apply (induct (?al)) + apply (simp add: update_gs_def ArchObject split: aobject_type.splits) + apply (cases ao; + simp add: data_map_insert_def pt_types_of_heap_def default_object_def + default_arch_object_def opt_map_def update_gs_def) + done + from pn2 gr show ?thesis + apply (clarsimp simp add: ghost_relation_of_heap) + apply (rule conjI[rotated]) + apply (simp add: ArchObject update_gs_def split: aobject_type.splits) + apply (thin_tac "cns_of_heap h = g" for h g) + apply (drule sym) + apply (rule ext) + apply (induct (?al)) + apply (simp add: update_gs_def ArchObject split: aobject_type.splits) + apply (simp add: update_gs_def ArchObject default_object_def + default_arch_object_def ups_of_heap_def + data_map_insert_def + split: aobject_type.splits) + done + qed + + have [simp]: "\s. gsPTTypes_update (\_. gsPTTypes s) s = s" + by (case_tac s, simp) + + show "\f' g' h' pt_fn'. ?t' = + s'\ksPSpace := f' (ksPSpace s'), gsUserPages := g' (gsUserPages s'), + gsCNodes := h' (gsCNodes s'), + ksArchState := (ksArchState s') \gsPTTypes := pt_fn' (gsPTTypes (ksArchState s'))\\" + apply (clarsimp simp: update_gs_def + split: Structures_A.apiobject_type.splits) + apply (intro conjI impI) + apply (subst ex_comm, rule_tac x=id in exI, + subst ex_comm, rule_tac x=id in exI, + subst ex_comm, rule_tac x=id in exI, fastforce)+ + apply (subst ex_comm, rule_tac x=id in exI) + apply (subst ex_comm) + apply (rule_tac x="\cns x. if x\set ?al then Some us else cns x" in exI, + simp) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\x. foldr (\addr. data_map_insert addr ko) + (new_cap_addrs m ptr ko) x" in exI, simp) + apply clarsimp + apply (rule_tac x="\x. foldr (\addr. data_map_insert addr ko) + (new_cap_addrs m ptr ko) x" in exI) + apply (subst ex_comm, rule_tac x=id in exI) + apply (simp split: aobject_type.splits) + apply (intro conjI impI) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\cns x. if x \ set ?al then Some ARMSmallPage + else cns x" in exI, simp) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\cns x. if x \ set ?al then Some ARMLargePage + else cns x" in exI, simp) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\cns x. if x \ set ?al then Some ARMHugePage + else cns x" in exI, simp) + apply (rule_tac x=id in exI) + apply (rule_tac x="\pt_types x. if x \ set ?al then Some NormalPT_T + else pt_types x" in exI) + apply (cases s', rename_tac arch machine, case_tac arch) + apply fastforce + apply (rule_tac x=id in exI) + apply (rule_tac x="\pt_types x. if x \ set ?al then Some VSRootPT_T + else pt_types x" in exI) + apply (cases s', rename_tac arch machine, case_tac arch) + apply fastforce + apply (rule_tac x=id in exI, simp)+ + done + + have rdyqrel: "ready_queues_relation s s'" + using sr by (simp add: state_relation_def) + + thus "ready_queues_relation_2 (ready_queues s) (ksReadyQueues s') + (?ps' |> tcb_of' |> tcbSchedNext) (?ps' |> tcb_of' |> tcbSchedPrev) + (\d p. inQ d p |< (?ps' |> tcb_of'))" + using retype_ready_queues_relation[OF _ vs' pn' ko cover num_r] + by (clarsimp simp: ready_queues_relation_def Let_def) + +qed + +lemma new_cap_addrs_fold': + "1 \ n \ + map (\n. ptr + (n << objBitsKO ko)) [0.e.n - 1] = + new_cap_addrs (unat n) ptr ko" + by (clarsimp simp:new_cap_addrs_def ptr_add_def upto_enum_red' + shiftl_t2n power_add field_simps) + +lemma objBitsKO_gt_0: "0 < objBitsKO ko" + apply (case_tac ko) + apply (simp_all add:objBits_simps' pageBits_def) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object) + apply (simp_all add:archObjSize_def bit_simps) + done + +lemma kheap_ekheap_double_gets: + "(\rv erv rv'. \pspace_relation rv rv'; ekheap_relation erv rv'\ + \ corres r (R rv erv) (R' rv') (b rv erv) (d rv')) \ + corres r (\s. R (kheap s) (ekheap s) s) (\s. R' (ksPSpace s) s) + (do x \ gets kheap; xa \ gets ekheap; b x xa od) (gets ksPSpace >>= d)" + apply (rule corres_symb_exec_l) + apply (rule corres_guard_imp) + apply (rule_tac r'= "\erv rv'. ekheap_relation erv rv' \ pspace_relation x rv'" + in corres_split) + apply (subst corres_gets[where P="\s. x = kheap s" and P'=\]) + apply clarsimp + apply (simp add: state_relation_def) + apply clarsimp + apply assumption + apply (wp gets_exs_valid | simp)+ + done + +(* + +Split out the extended operation that sets the etcb domains. + +This allows the existing corres proofs in this file to more-or-less go +through as they stand. + +A more principled fix would be to change the abstract spec and +generalise init_arch_objects to initialise other object types. + +*) + +definition retype_region2_ext :: "obj_ref list \ Structures_A.apiobject_type \ unit det_ext_monad" where + "retype_region2_ext ptrs type \ modify (\s. ekheap_update (foldr (\p ekh. (ekh(p := default_ext type default_domain))) ptrs) s)" + +crunch all_but_exst[wp]: retype_region2_ext "all_but_exst P" +crunch (empty_fail) empty_fail[wp]: retype_region2_ext + +end + +interpretation retype_region2_ext_extended: is_extended "retype_region2_ext ptrs type" + by (unfold_locales; wp) + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "retype_region2_extra_ext ptrs type \ + when (type = Structures_A.TCBObject) (do + cdom \ gets cur_domain; + mapM_x (ethread_set (\tcb. tcb\tcb_domain := cdom\)) ptrs + od)" + +crunch all_but_exst[wp]: retype_region2_extra_ext "all_but_exst P" (wp: mapM_x_wp) +crunch (empty_fail) empty_fail[wp]: retype_region2_extra_ext (wp: mapM_x_wp) + +end + +interpretation retype_region2_extra_ext_extended: is_extended "retype_region2_extra_ext ptrs type" + by (unfold_locales; wp) + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + retype_region2 :: "obj_ref \ nat \ nat \ Structures_A.apiobject_type \ bool \ (obj_ref list,'z::state_ext) s_monad" +where + "retype_region2 ptr numObjects o_bits type dev \ do + obj_size \ return $ 2 ^ obj_bits_api type o_bits; + ptrs \ return $ map (\p. ptr_add ptr (p * obj_size)) [0..< numObjects]; + when (type \ Structures_A.Untyped) (do + kh \ gets kheap; + kh' \ return $ foldr (\p kh. kh(p \ default_object type dev o_bits)) ptrs kh; + do_extended_op (retype_region2_ext ptrs type); + modify $ kheap_update (K kh') + od); + return $ ptrs + od" + +lemma retype_region_ext_modify_kheap_futz: + "(retype_region2_extra_ext ptrs type :: (unit, det_ext) s_monad) >>= (\_. modify (kheap_update f)) + = (modify (kheap_update f) >>= (\_. retype_region2_extra_ext ptrs type))" + apply (clarsimp simp: retype_region_ext_def retype_region2_ext_def retype_region2_extra_ext_def when_def bind_assoc) + apply (subst oblivious_modify_swap) + defer + apply (simp add: bind_assoc) + apply (rule oblivious_bind) + apply simp + apply (rule oblivious_mapM_x) + apply (clarsimp simp: ethread_set_def set_eobject_def) + apply (rule oblivious_bind) + apply (simp add: gets_the_def) + apply (rule oblivious_bind) + apply (clarsimp simp: get_etcb_def) + apply simp + apply (simp add: modify_def[symmetric]) +done + +lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] + +lemma foldr_upd_app_if_eta_futz: + "foldr (\p ps. ps(p \ f p)) as = (\g x. if x \ set as then Some (f x) else g x)" +apply (rule ext) +apply (rule foldr_upd_app_if) +done + +lemma modify_ekheap_update_comp_futz: + "modify (ekheap_update (f \ g)) = modify (ekheap_update g) >>= (K (modify (ekheap_update f)))" +by (simp add: o_def modify_def bind_def gets_def get_def put_def) + +lemma mapM_x_modify_futz: + assumes "\ptr\set ptrs. ekheap s ptr \ None" + shows "mapM_x (ethread_set F) (rev ptrs) s + = modify (ekheap_update (foldr (\p ekh. ekh(p := Some (F (the (ekh p))))) ptrs)) s" (is "?lhs ptrs s = ?rhs ptrs s") +using assms +proof(induct ptrs arbitrary: s) + case Nil thus ?case by (simp add: mapM_x_Nil return_def simpler_modify_def) +next + case (Cons ptr ptrs s) + have "?rhs (ptr # ptrs) s + = (do modify (ekheap_update (foldr (\p ekh. ekh(p \ F (the (ekh p)))) ptrs)); + modify (ekheap_update (\ekh. ekh(ptr \ F (the (ekh ptr))))) + od) s" + by (simp only: foldr_Cons modify_ekheap_update_comp_futz) simp + also have "... = (do ?lhs ptrs; + modify (ekheap_update (\ekh. ekh(ptr \ F (the (ekh ptr))))) + od) s" + apply (rule monad_eq_split_tail) + apply simp + apply (rule Cons.hyps[symmetric]) + using Cons.prems + apply force + done + also have "... = ?lhs (ptr # ptrs) s" + apply (simp add: mapM_x_append mapM_x_singleton) + apply (rule monad_eq_split2[OF refl, where + P="\s. \ptr\set (ptr # ptrs). ekheap s ptr \ None" + and Q="\_ s. ekheap s ptr \ None"]) + apply (simp add: ethread_set_def + assert_opt_def get_etcb_def gets_the_def gets_def get_def modify_def put_def set_eobject_def + bind_def fail_def return_def split_def + split: option.splits) + apply ((wp mapM_x_wp[OF _ subset_refl] | simp add: ethread_set_def set_eobject_def)+)[1] + using Cons.prems + apply force + done + finally show ?case by (rule sym) +qed + +lemma awkward_fold_futz: + "fold (\p ekh. ekh(p \ the (ekh p)\tcb_domain := cur_domain s\)) ptrs ekh + = (\x. if x \ set ptrs then Some ((the (ekh x))\tcb_domain := cur_domain s\) else ekh x)" +by (induct ptrs arbitrary: ekh) (simp_all add: fun_eq_iff) + +lemma retype_region2_ext_retype_region_ext_futz: + "retype_region2_ext ptrs type >>= (\_. retype_region2_extra_ext ptrs type) + = retype_region_ext ptrs type" +proof(cases type) + case TCBObject + have complete_futz: + "\F x. modify (ekheap_update (\_. F (cur_domain x) (ekheap x))) x = modify (ekheap_update (\ekh. F (cur_domain x) ekh)) x" + by (simp add: modify_def get_def get_etcb_def put_def bind_def return_def) + have second_futz: + "\f G. + do modify (ekheap_update f); + cdom \ gets (\s. cur_domain s); + G cdom + od = + do cdom \ gets (\s. cur_domain s); + modify (ekheap_update f); + G cdom + od" + by (simp add: bind_def gets_def get_def return_def simpler_modify_def) + from TCBObject show ?thesis + apply (clarsimp simp: retype_region_ext_def retype_region2_ext_def retype_region2_extra_ext_def when_def bind_assoc) + apply (clarsimp simp: exec_gets fun_eq_iff) + apply (subst complete_futz) + apply (simp add: second_futz[simplified] exec_gets) + apply (simp add: default_ext_def exec_modify) + apply (subst mapM_x_modify_futz[where ptrs="rev ptrs", simplified]) + apply (simp add: foldr_upd_app_if_eta_futz) + apply (simp add: modify_def exec_get put_def o_def) + apply (simp add: foldr_upd_app_if_eta_futz foldr_conv_fold awkward_fold_futz) + apply (simp cong: if_cong) + done +qed (auto simp: fun_eq_iff retype_region_ext_def retype_region2_ext_def retype_region2_extra_ext_def + put_def gets_def get_def bind_def return_def mk_ef_def modify_def foldr_upd_app_if' when_def default_ext_def) + +lemma retype_region2_ext_retype_region: + "(retype_region ptr numObjects o_bits type dev :: (obj_ref list, det_ext) s_monad) + = (do ptrs \ retype_region2 ptr numObjects o_bits type dev; + retype_region2_extra_ext ptrs type; + return ptrs + od)" +apply (clarsimp simp: retype_region_def retype_region2_def when_def bind_assoc) + apply safe + defer + apply (simp add: retype_region2_extra_ext_def) +apply (subst retype_region_ext_modify_kheap_futz'[simplified bind_assoc]) +apply (subst retype_region2_ext_retype_region_ext_futz[symmetric]) +apply (simp add: bind_assoc) +done + +lemma getObject_tcb_gets: + "getObject addr >>= (\x::tcb. gets proj >>= (\y. G x y)) + = gets proj >>= (\y. getObject addr >>= (\x. G x y))" +by (auto simp: exec_gets fun_eq_iff intro: bind_apply_cong dest!: in_inv_by_hoareD[OF getObject_inv_tcb]) + +lemma setObject_tcb_gets_ksCurDomain: + "setObject addr (tcb::tcb) >>= (\_. gets ksCurDomain >>= G) + = gets ksCurDomain >>= (\x. setObject addr tcb >>= (\_. G x))" +apply (clarsimp simp: exec_gets fun_eq_iff) +apply (rule bind_apply_cong) + apply simp +apply (drule_tac P1="\cdom. cdom = ksCurDomain x" in use_valid[OF _ setObject_cd_inv]) +apply (simp_all add: exec_gets) +done + +lemma curDomain_mapM_x_futz: + "curDomain >>= (\cdom. mapM_x (threadSet (F cdom)) addrs) + = mapM_x (\addr. curDomain >>= (\cdom. threadSet (F cdom) addr)) addrs" +proof(induct addrs) + case Nil thus ?case + by (simp add: curDomain_def mapM_x_def sequence_x_def bind_def gets_def get_def return_def) +next + case (Cons addr addrs) + have H: "\G. do cdom \ curDomain; + _ \ threadSet (F cdom) addr; + G cdom + od + = do cdom \ curDomain; + threadSet (F cdom) addr; + cdom \ curDomain; + G cdom + od" + by (simp add: bind_assoc curDomain_def threadSet_def setObject_tcb_gets_ksCurDomain + getObject_tcb_gets double_gets_drop_regets) + from Cons.hyps show ?case + apply (simp add: mapM_x_def sequence_x_def) + apply (simp add: bind_assoc foldr_map o_def) + apply (subst H) + apply (simp add: mapM_x_def sequence_x_def) + done +qed + +(* + +The existing proof continues below. + +*) + +lemma modify_ekheap_update_ekheap: + "modify (\s. ekheap_update f s) = do s \ gets ekheap; modify (\s'. s'\ekheap := f s\) od" +by (simp add: modify_def gets_def get_def put_def bind_def return_def split_def fun_eq_iff) + +lemma corres_retype': + assumes not_zero: "n \ 0" + and aligned: "is_aligned ptr (objBitsKO ko + gbits)" + and obj_bits_api: "obj_bits_api (APIType_map2 ty) us = + objBitsKO ko + gbits" + and check: "(sz < obj_bits_api (APIType_map2 ty) us) + = (sz < objBitsKO ko + gbits)" + and usv: "APIType_map2 ty = Structures_A.CapTableObject \ 0 < us" + and ko: "makeObjectKO dev ty = Some ko" + and orr: "obj_bits_api (APIType_map2 ty) us \ sz \ + obj_relation_retype + (default_object (APIType_map2 ty) dev us) ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + shows "corres (\rv rv'. rv' = g rv) + (\s. valid_pspace s \ pspace_no_overlap_range_cover ptr sz s + \ valid_mdb s \ valid_etcbs s \ valid_list s) + (\s. pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s) + (retype_region2 ptr n us (APIType_map2 ty) dev) + (do addrs \ createObjects ptr n ko gbits; + _ \ modify (update_gs (APIType_map2 ty) us (set addrs)); + return (g addrs) od)" + (is "corres ?r ?P ?P' ?C ?A") +proof - + note data_map_insert_def[simp del] + have not_zero':"((of_nat n)::machine_word) \ 0" + by (rule range_cover_not_zero[OF not_zero cover]) + have shiftr_not_zero:" ((of_nat n)::machine_word) << gbits \ 0" + apply (rule range_cover_not_zero_shift[OF not_zero cover]) + apply (simp add:obj_bits_api) + done + have unat_of_nat_shift:"unat (((of_nat n)::machine_word) << gbits) = + (n * 2^ gbits)" + apply (rule range_cover.unat_of_nat_n_shift[OF cover]) + using obj_bits_api + apply simp + done + have unat_of_nat_shift': + "unat (((of_nat n)::machine_word) * 2^(gbits + objBitsKO ko)) = + n * 2^(gbits + objBitsKO ko)" + apply (subst mult.commute) + apply (simp add:shiftl_t2n[symmetric]) + apply (rule range_cover.unat_of_nat_n_shift[OF cover]) + using obj_bits_api + apply simp + done + have unat_of_nat_n': + "unat (((of_nat n)::machine_word) * 2 ^ (gbits + objBitsKO ko)) \ 0" + by (simp add:unat_of_nat_shift' not_zero) + have bound:"obj_bits_api (APIType_map2 ty) us \ sz" + using cover + by (simp add:range_cover_def) + have n_estimate: "n < 2 ^ (word_bits - (objBitsKO ko + gbits))" + apply (rule le_less_trans) + apply (rule range_cover.range_cover_n_le(2)[OF cover]) + apply (rule power_strict_increasing) + apply (simp add:obj_bits_api ko) + apply (rule diff_less_mono) + using cover obj_bits_api + apply (simp_all add:range_cover_def ko word_bits_def) + done + + have set_retype_addrs_fold: + "image (\n. ptr + 2 ^ obj_bits_api (APIType_map2 ty) us * n) + {x. x \ of_nat n - 1} = + set (retype_addrs ptr (APIType_map2 ty) n us)" + apply (clarsimp simp: retype_addrs_def image_def Bex_def ptr_add_def + Collect_eq) + apply (rule iffI) + apply (clarsimp simp: field_simps word_le_nat_alt) + apply (rule_tac x="unat x" in exI) + apply (simp add: unat_sub_if_size range_cover.unat_of_nat_n[OF cover] + not_le not_zero + split: if_split_asm) + apply (clarsimp simp: field_simps word_le_nat_alt) + apply (rule_tac x="of_nat x" in exI) + apply (simp add: unat_sub_if_size range_cover.unat_of_nat_n[OF cover]) + apply (rule nat_le_Suc_less_imp) + apply (metis le_unat_uoi nat_less_le not_le_imp_less) + done + + have new_caps_adds_fold: + "map (\n. ptr + 2 ^ objBitsKO ko * n) [0.e.2 ^ gbits * of_nat n - 1] = + new_cap_addrs (2 ^ gbits * n) ptr ko" + apply (simp add: new_cap_addrs_def shiftl_t2n) + apply (subgoal_tac "1 \ (2::machine_word) ^ gbits * of_nat n") + apply (simp add: upto_enum_red' o_def) + apply (rule arg_cong2[where f=map, OF refl]) + apply (rule arg_cong2[where f=upt, OF refl]) + apply (metis mult.commute shiftl_t2n unat_of_nat_shift) + using shiftr_not_zero + apply (simp add: shiftl_t2n) + apply (metis word_less_1 word_not_le) + done + + from aligned + have al': "is_aligned ptr (obj_bits_api (APIType_map2 ty) us)" + by (simp add: obj_bits_api ko) + show ?thesis + apply (simp add: when_def retype_region2_def createObjects'_def + createObjects_def aligned obj_bits_api[symmetric] + ko[symmetric] al' shiftl_t2n data_map_insert_def[symmetric] + is_aligned_mask[symmetric] split_def unless_def + lookupAround2_pspace_no check + split del: if_split) + apply (subst retype_addrs_fold)+ + apply (subst if_P) + using ko + apply (clarsimp simp: makeObjectKO_def) + apply (simp add: bind_assoc retype_region2_ext_def) + apply (rule corres_guard_imp) + apply (subst modify_ekheap_update_ekheap) + apply (simp only: bind_assoc) + apply (rule kheap_ekheap_double_gets) + apply (rule corres_symb_exec_r) + apply (simp add: not_less modify_modify bind_assoc[symmetric] + obj_bits_api[symmetric] shiftl_t2n upto_enum_red' + range_cover.unat_of_nat_n[OF cover]) + apply (rule corres_split_nor[OF _ corres_trivial]) + apply (rename_tac x eps ps) + apply (rule_tac P="\s. x = kheap s \ eps = ekheap (s) \ ?P s" and + P'="\s. ps = ksPSpace s \ ?P' s" in corres_modify) + apply (simp add: set_retype_addrs_fold new_caps_adds_fold) + apply (erule retype_state_relation[OF _ _ _ _ _ _ _ _ _ cover _ _ orr], + simp_all add: ko not_zero obj_bits_api + bound[simplified obj_bits_api ko])[1] + apply (clarsimp simp: retype_addrs_fold[symmetric] ptr_add_def upto_enum_red' not_zero' + range_cover.unat_of_nat_n[OF cover] word_le_sub1) + apply (rule_tac f=g in arg_cong) + apply clarsimp + apply wp+ + apply (clarsimp split: option.splits) + apply (intro conjI impI) + apply (clarsimp|wp)+ + apply (clarsimp split: option.splits) + apply wpsimp + apply (clarsimp split: option.splits) + apply (intro conjI impI) + apply wp + apply (clarsimp simp:lookupAround2_char1) + apply wp + apply (clarsimp simp: obj_bits_api ko) + apply (drule(1) pspace_no_overlap_disjoint') + apply (rule_tac x1 = a in ccontr[OF in_empty_interE]) + apply simp + apply (clarsimp simp: not_less shiftL_nat) + apply (erule order_trans) + apply (subst p_assoc_help) + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz"]) + apply (subst add.commute) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + using cover + apply - + apply (rule iffD2[OF word_le_nat_alt]) + apply (subst word_of_nat_minus) + using not_zero + apply simp + apply (rule le_trans[OF unat_plus_gt]) + apply simp + apply (subst unat_minus_one) + apply (subst mult.commute) + apply (rule word_power_nonzero_64) + apply (rule of_nat_less_pow_64[OF n_estimate]) + apply (simp add:word_bits_def objBitsKO_gt_0 ko) + apply (simp add:range_cover_def obj_bits_api ko word_bits_def) + apply (cut_tac not_zero',clarsimp simp:ko) + apply(clarsimp simp:field_simps ko) + apply (subst unat_sub[OF word_1_le_power]) + apply (simp add:range_cover_def) + apply (subst diff_add_assoc[symmetric]) + apply (cut_tac unat_of_nat_n',simp add:ko) + apply (clarsimp simp: obj_bits_api ko) + apply (rule diff_le_mono) + apply (frule range_cover.range_cover_compare_bound) + apply (cut_tac obj_bits_api unat_of_nat_shift') + apply (clarsimp simp:add.commute range_cover_def ko) + apply (rule is_aligned_no_wrap'[OF is_aligned_neg_mask,OF le_refl ]) + apply (simp add:range_cover_def domI)+ + done +qed + +lemma createObjects_corres': + "\corres r P P' f (createObjects a b ko d); ko = injectKO val\ + \ corres dc P P' f (createObjects' a b ko d)" + apply (clarsimp simp:corres_underlying_def createObjects_def return_def) + apply (rule conjI) + apply (clarsimp simp:bind_def split_def) + apply (drule(1) bspec) + apply (clarsimp simp:image_def) + apply (drule(1) bspec) + apply clarsimp + apply (erule bexI[rotated]) + apply simp + apply (clarsimp simp:bind_def split_def image_def) + apply (drule(1) bspec|clarsimp)+ + done + +lemmas retype_aligned_distinct'' = retype_aligned_distinct' + [unfolded foldr_upd_app_if[folded data_map_insert_def]] + +lemma retype_ko_wp_at': + assumes vs: "pspace_aligned' s" "pspace_distinct' s" + and pn: "pspace_no_overlap' ptr sz s" + and cover: "range_cover ptr sz (objBitsKO obj) n" + shows + "ko_wp_at' P p (s \ksPSpace := foldr (\addr. data_map_insert addr obj) + (new_cap_addrs n ptr obj) (ksPSpace s)\) + = (if p \ set (new_cap_addrs n ptr obj) then P obj + else ko_wp_at' P p s)" + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (rule foldr_update_ko_wp_at' [OF vs]) + apply (simp add: retype_aligned_distinct'' [OF vs pn cover])+ + apply (rule new_cap_addrs_aligned) + using cover + apply (simp add:range_cover_def cover) + done + +lemma retype_obj_at': + assumes vs: "pspace_aligned' s" "pspace_distinct' s" + and pn: "pspace_no_overlap' ptr sz s" + and cover: "range_cover ptr sz (objBitsKO obj) n" + shows + "obj_at' P p (s \ksPSpace := foldr (\addr. data_map_insert addr obj) + (new_cap_addrs n ptr obj) (ksPSpace s)\) + = (if p \ set (new_cap_addrs n ptr obj) then (\ko. projectKO_opt obj = Some ko \ P ko) + else obj_at' P p s)" + unfolding obj_at'_real_def + apply (rule retype_ko_wp_at'[OF vs pn cover]) +done + +lemma retype_obj_at_disj': + assumes vs: "pspace_aligned' s" "pspace_distinct' s" + and pn: "pspace_no_overlap' ptr sz s" + and cover: "range_cover ptr sz (objBitsKO obj) n" + shows + "obj_at' P p (s \ksPSpace := foldr (\addr. data_map_insert addr obj) + (new_cap_addrs n ptr obj) (ksPSpace s)\) + = (obj_at' P p s \ p \ set (new_cap_addrs n ptr obj) + \ (\ko. projectKO_opt obj = Some ko \ P ko))" + apply (simp add: retype_obj_at' [OF vs pn cover]) + apply (safe, simp_all) + apply (drule subsetD [OF new_cap_addrs_subset [OF cover]]) + apply (insert pspace_no_overlap_disjoint' [OF vs(1) pn ]) + apply (clarsimp simp: obj_at'_def) + apply (rule_tac x1 = p in ccontr[OF in_empty_interE]) + apply (simp add:ptr_add_def p_assoc_help domI)+ + done + +declare word_unat_power[symmetric,simp] + +lemma createObjects_ko_at_strg: + fixes ptr :: machine_word + assumes cover: "range_cover ptr sz ((objBitsKO ko) + gbits) n" + assumes not_0: "n\ 0" + assumes pi: "projectKO_opt ko = Some val" + shows "\\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ + createObjects ptr n ko gbits + \\r s. \x \ set r. \offs < 2 ^ gbits. ko_at' val (x + (offs << objBitsKO ko)) s\" +proof - + have shiftr_not_zero:" 1 \ ((of_nat n)::machine_word) << gbits" + using range_cover_not_zero_shift[OF not_0 cover,where gbits = gbits] + apply - + apply (simp add:word_le_sub1) + done + note unat_of_nat_shiftl = range_cover.unat_of_nat_n_shift[OF cover,where gbits = gbits,simplified] + have in_new:"\idx offs. \idx \ of_nat n - 1;offs<2 ^ gbits\ + \ ptr + (idx << objBitsKO ko + gbits) + (offs << objBitsKO ko) + \ set (new_cap_addrs (n * 2 ^ gbits) ptr ko)" + apply (insert range_cover_not_zero[OF not_0 cover] not_0) + apply (clarsimp simp:new_cap_addrs_def image_def) + apply (rule_tac x ="unat (2 ^ gbits * idx + offs)" in bexI) + apply (subst add.commute) + apply (simp add:shiftl_shiftl[symmetric]) + apply (simp add:shiftl_t2n distrib_left[symmetric]) + apply simp + apply (rule unat_less_helper) + apply (rule less_le_trans) + apply (erule word_plus_strict_mono_right) + apply (subst distrib_left[where c = "1 :: machine_word",symmetric,simplified]) + apply (subst mult.commute[where a = "2^gbits"])+ + apply (insert cover) + apply (rule word_mult_le_iff[THEN iffD2]) + apply (simp add:p2_gt_0) + apply (clarsimp simp:range_cover_def word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (rule less_le_trans) + apply (rule range_cover.range_cover_le_n_less) + apply simp + apply (subst unat_power_lower) + using cover + apply (clarsimp simp:range_cover_def) + apply (simp add:field_simps) + apply (rule unat_le_helper) + apply (erule order_trans[OF _ word_sub_1_le]) + apply (simp add:range_cover_not_zero[OF not_0 cover]) + apply (simp add:word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (erule less_le_trans[OF range_cover.range_cover_le_n_less(1)]) + apply (subst unat_power_lower) + using cover + apply (clarsimp simp:range_cover_def) + apply (simp add:field_simps) + apply (rule unat_le_helper[OF inc_le]) + apply (simp add:word_leq_minus_one_le) + apply (simp add:word_bits_def) + apply (rule no_plus_overflow_neg) + apply (rule less_le_trans[where y = "of_nat n"]) + apply unat_arith + using range_cover.range_cover_n_less[OF cover] + apply (simp add:word_bits_def) + apply (subst distrib_left[where c = "1 :: machine_word",symmetric,simplified]) + apply (subst mult.commute) + apply simp + apply (rule word_mult_le_iff[THEN iffD2]) + apply (simp add:p2_gt_0) + apply (simp add:range_cover_def word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (rule less_le_trans) + apply (rule range_cover.range_cover_le_n_less) + apply simp + apply (subst unat_power_lower) + using cover + apply (clarsimp simp:range_cover_def) + apply (simp add:field_simps) + apply (rule unat_le_helper) + apply unat_arith + apply (simp add:word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (rule less_le_trans) + apply (erule range_cover.range_cover_le_n_less) + apply (simp add:range_cover.unat_of_nat_n[OF cover]) + apply (simp add: unat_le_helper) + apply (simp add:word_bits_def) + apply unat_arith + done + show ?thesis + apply (simp add: split_def createObjects_def lookupAround2_pspace_no + alignError_def unless_def createObjects'_def) + apply (rule hoare_pre) + apply (wp|simp add:data_map_insert_def[symmetric] + cong: if_cong del: fun_upd_apply data_map_insert_def)+ + apply (wpc|wp|clarsimp simp del:fun_upd_apply)+ + apply (subst new_cap_addrs_fold'[OF shiftr_not_zero])+ + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_obj_at_disj') + apply (simp add:valid_pspace'_def unat_of_nat_shiftl)+ + apply (rule range_cover_rel[OF cover]) + apply simp+ + apply (subst retype_obj_at_disj') + apply (simp add:valid_pspace'_def unat_of_nat_shiftl)+ + apply (rule range_cover_rel[OF cover]) + apply simp+ + using range_cover.unat_of_nat_n_shift[OF cover,where gbits = gbits,simplified] pi + apply (simp add: in_new) + done +qed + +lemma createObjects_ko_at: + fixes ptr :: machine_word + assumes cover: "range_cover ptr sz ((objBitsKO ko) + gbits) n" + assumes not_0: "n\ 0" + assumes pi: "projectKO_opt ko = Some val" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createObjects ptr n ko gbits + \\r s. \x \ set r. \offs < 2 ^ gbits. ko_at' val (x + (offs << objBitsKO ko)) s\" + by (wp createObjects_ko_at_strg[OF cover not_0 pi],fastforce) + +lemma createObjects_obj_at: + fixes ptr :: machine_word and val :: "'a :: pspace_storable" + assumes cover:"range_cover ptr sz ((objBitsKO ko) + gbits) n" + and not_0:"n \ 0" + and pi: "\(val::'a). projectKO_opt ko = Some val" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createObjects ptr n ko gbits \\r s. \x \ set r. \offs < 2 ^ gbits. + obj_at' (\(x::'a). True) (x + (offs << objBitsKO ko)) s\" + apply (rule exE[OF pi]) + apply (erule_tac val1 = x in + hoare_post_imp [OF _ createObjects_ko_at [OF cover not_0 ],rotated]) + apply (intro allI ballI impI) + apply (drule(1) bspec) + apply (drule spec, drule(1) mp) + apply (clarsimp elim!: obj_at'_weakenE) + done + +(* until we figure out what we really need of page + mappings it's just alignment, which, fortunately, + is trivial *) +lemma createObjects_aligned: + assumes al: "is_aligned ptr (objBitsKO ko + gbits)" + and bound :"n < 2 ^ word_bits" "n\0" + and bound':"objBitsKO ko + gbits < word_bits" + shows "\\\ createObjects ptr n ko gbits + \\rv s. \x \ set rv. is_aligned x (objBitsKO ko + gbits)\" + apply (rule hoare_strengthen_post) + apply (rule createObjects_ret[OF bound]) + apply (clarsimp dest!: less_two_pow_divD) + apply (rule is_aligned_ptr_add_helper[OF al]) + apply (simp_all add:bound') + done + +lemma createObjects_aligned2: + "\\s. is_aligned ptr (objBitsKO ko + gbits) \ n < 2 ^ word_bits \ n \ 0 + \ aln < word_bits + \ aln = objBitsKO ko + gbits\ + createObjects ptr n ko gbits + \\rv s. \x \ set rv. is_aligned x aln\" + apply (rule hoare_name_pre_state) + apply simp + apply (rule hoare_pre, wp createObjects_aligned, simp_all) + done + +lemma range_cover_n_wb: + "range_cover (ptr :: obj_ref) sz us n \ n < 2 ^ word_bits" + apply (rule order_le_less_trans, erule range_cover.range_cover_n_le(2)) + apply (clarsimp simp: range_cover_def) + apply (simp add: word_bits_def) + done + +lemma createObjects_nonzero: + assumes not_0: "n \ 0" + assumes cover:"range_cover ptr sz ((objBitsKO ko) + bits) n" + shows "\\s. ptr \ 0\ + createObjects ptr n ko bits + \\rv s. \p \ set rv. p \ 0\" + apply (insert not_0) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P = "ptr \ 0"]) + using cover + apply (clarsimp simp:range_cover_def) + apply (erule is_aligned_get_word_bits,simp_all) + apply (rule hoare_post_imp [OF _ createObjects_ret]) + apply (simp add: ptr_add_def) + apply (intro allI impI ballI) + apply (simp add:power_add[symmetric] mult.assoc) + apply (drule(1) range_cover_no_0[OF _ cover]) + apply (simp add: objBits_def) + apply (simp add: range_cover_n_wb[OF cover]) + apply simp + done + +lemma objBits_if_dev: + "objBitsKO (if dev then KOUserDataDevice else KOUserData) = pageBits" + by (simp add: objBitsKO_def) + +lemma cwo_ret: + assumes not_0: "n \ 0" + assumes cover: "range_cover ptr sz (pageBits + bs) n" + assumes sz: "bs = pageBitsForSize vmsz - pageBits" + shows "\pspace_no_overlap' ptr sz and valid_pspace'\ + createObjects ptr n (if dev then KOUserDataDevice else KOUserData) bs + \\rv s. \x\set rv. frame_at' x vmsz dev s\" +proof - + note create_objs_device = hoare_post_imp [OF _ hoare_conj [OF createObjects_ret + createObjects_ko_at[where val = UserDataDevice,simplified]]] + + note create_objs_normal = hoare_post_imp [OF _ hoare_conj [OF createObjects_ret + createObjects_ko_at[where val = UserData,simplified]]] + + show ?thesis + unfolding frame_at'_def + apply (cases dev) + apply (rule hoare_pre) + apply (rule create_objs_device) + apply (clarsimp simp add: sz pageBits_def) + apply (drule bspec, simp, drule spec, drule(1) mp) + apply (simp add: typ_at'_def obj_at'_real_def objBits_simps pageBits_def shiftl_t2n field_simps) + apply (erule ko_wp_at'_weakenE) + apply (clarsimp simp add: projectKO_opts_defs split: kernel_object.splits) + apply (rule le_less_trans[OF _ power_strict_increasing]) + apply (rule range_cover.range_cover_n_le(1)[OF cover]) + apply (simp add: word_bits_def pageBits_def not_0)+ + apply (rule range_cover_rel[OF cover]) + apply (simp add: objBitsKO_def pageBits_def not_0)+ + using not_0 + apply simp_all + apply (rule hoare_pre) + apply (rule create_objs_normal) + apply (clarsimp simp add: sz pageBits_def) + apply (drule bspec, simp, drule spec, drule(1) mp) + apply (simp add: typ_at'_def obj_at'_real_def objBits_simps pageBits_def shiftl_t2n field_simps) + apply (erule ko_wp_at'_weakenE) + apply (clarsimp simp add: projectKO_opts_defs split: kernel_object.splits) + apply (rule le_less_trans[OF _ power_strict_increasing]) + apply (rule range_cover.range_cover_n_le(1)[OF cover]) + apply (simp add: word_bits_def pageBits_def not_0)+ + apply (rule range_cover_rel[OF cover]) + apply (simp add: objBitsKO_def pageBits_def not_0)+ + done +qed + +lemmas capFreeIndex_update_valid_untyped' = + capFreeIndex_update_valid_cap'[unfolded valid_cap'_def,simplified,THEN conjunct2,THEN conjunct1] + +lemma range_cover_canonical_address': + "\ range_cover ptr sz us n; p < of_nat n; + canonical_address (ptr && ~~ mask sz); sz \ maxUntypedSizeBits \ + \ canonical_address (ptr + p * 2 ^ us)" + apply (frule range_cover_canonical_address[where p="unat p"]; simp?) + using unat_less_helper by blast + +lemma createNewCaps_valid_cap: + fixes ptr :: machine_word + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n " + assumes not_0: "n \ 0" + assumes ct: "ty = APIObjectType ArchTypes_H.CapTableObject \ 0 < us" + "ty = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits" + assumes ptr: "ptr \ 0" + + assumes ptr_cn: "canonical_address (ptr && ~~ mask sz)" + assumes sz_constrained: "sz \ maxUntypedSizeBits" + + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createNewCaps ty ptr n us dev + \\r s. (\cap \ set r. s \' cap)\" +proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + note if_split_def[split del] = if_split + note createObjects_nonzero' = createObjects_nonzero[OF not_0] + note cwo_ret' = cwo_ret[OF not_0] + show ?thesis + proof(cases "Types_H.toAPIType ty") + case None thus ?thesis + using not_0 + apply (clarsimp simp: createNewCaps_def Arch_createNewCaps_def) + using cover + apply (simp add: range_cover_def) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def + split: AARCH64_H.object_type.splits) + + apply (in_case "HugePageObject") + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits ball_conj_distrib) + apply (wp createObjects_aligned2 createObjects_nonzero' + cwo_ret'[where bs="2 * ptTranslationBits NormalPT_T", simplified] + | simp add: objBits_if_dev pageBits_def ptr range_cover_n_wb add.commute)+ + apply (simp add:pageBits_def ptr word_bits_def) + + apply (in_case "VSpaceObject") + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned[OF _ range_cover.range_cover_n_less(1) + [where 'a=64, unfolded word_bits_len_of, OF cover] + not_0]; + simp add: objBits_simps word_bits_def add.commute) + apply (rule createObjects_obj_at[where 'a=pte, OF _ not_0]; + simp add: objBits_simps) + apply simp + apply (clarsimp simp: objBits_simps page_table_at'_def typ_at_to_obj_at_arches) + apply (drule (1) bspec)+ + apply (clarsimp simp: pt_bits_def) + apply (erule_tac x="ucast i" in allE) + apply (erule impE) + apply (simp add: mask_def bit_simps split: if_splits) + apply unat_arith + apply clarsimp + + apply (in_case "SmallPageObject") + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits ball_conj_distrib) + apply (wp createObjects_aligned2 createObjects_nonzero' + cwo_ret'[where bs=0, simplified] + | simp add: objBits_if_dev pageBits_def ptr range_cover_n_wb)+ + apply (simp add:pageBits_def ptr word_bits_def) + + apply (in_case \LargePageObject\) + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits ball_conj_distrib) + apply (wp createObjects_aligned2 createObjects_nonzero' + cwo_ret'[where bs="ptTranslationBits NormalPT_T", simplified] + | simp add: objBits_if_dev pageBits_def ptr range_cover_n_wb)+ + apply (simp add:pageBits_def ptr word_bits_def) + + apply (in_case \PageTableObject\) + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned[OF _ range_cover.range_cover_n_less(1) + [where 'a=64, unfolded word_bits_len_of, OF cover] + not_0]; + simp add: objBits_simps bit_simps word_bits_def) + apply (rule createObjects_obj_at[where 'a=pte, OF _ not_0]; + simp add: objBits_simps bit_simps) + apply simp + apply (clarsimp simp: objBits_simps bit_simps page_table_at'_def typ_at_to_obj_at_arches) + apply (drule (1) bspec)+ + apply (erule_tac x="ucast i" in allE) + apply (erule impE) + apply (simp add: mask_def) + apply unat_arith + apply clarsimp + apply simp + + apply (in_case \VCPUObject\) + apply (wpsimp wp: hoare_vcg_const_Ball_lift simp: valid_cap'_def capAligned_def n_less_word_bits)+ + apply (simp only: imp_conv_disj typ_at_to_obj_at_arches pageBits_def) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned[OF _ range_cover.range_cover_n_less(1) + [where 'a=machine_word_len, unfolded word_bits_len_of, + OF cover] not_0]) + apply (simp add:objBits_simps)+ + apply (rule createObjects_obj_at [where 'a=vcpu, OF _ not_0]) + apply (simp add: objBits_simps) + apply simp + apply simp + apply simp + apply (clarsimp simp: objBits_simps) + apply simp + done + next + case (Some a) thus ?thesis + proof(cases a) + case Untyped with Some cover ct show ?thesis + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def fromIntegral_def + toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (intro ballI) + apply (clarsimp simp: image_def upto_enum_red' valid_cap'_def capAligned_def + split: capability.splits) + apply (drule word_leq_minus_one_le[rotated]) + apply (rule range_cover_not_zero[OF not_0 cover]) + apply (intro conjI) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (fastforce simp:range_cover_def word_bits_def)+ + apply (clarsimp simp:valid_untyped'_def ko_wp_at'_def obj_range'_def) + apply (drule(1) pspace_no_overlapD'[rotated]) + apply (frule(1) range_cover_cell_subset) + apply (erule disjE) + apply (simp add: mask_def add_diff_eq) + apply (drule psubset_imp_subset) + apply (drule(1) disjoint_subset2[rotated]) + apply (drule(1) disjoint_subset) + apply (drule(1) range_cover_subset_not_empty) + apply clarsimp+ + apply (simp add: mask_def add_diff_eq) + apply blast + apply (drule(1) range_cover_no_0[OF ptr _ unat_less_helper]) + apply simp + apply (erule (1) range_cover_canonical_address') + apply (rule ptr_cn) + apply (rule sz_constrained) + done + next + case TCBObject with Some cover ct show ?thesis + including no_pre + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def curDomain_def + split: AARCH64_H.object_type.splits) + apply (wp mapM_x_wp' hoare_vcg_const_Ball_lift)+ + apply (rule hoare_post_imp) + prefer 2 + apply (rule createObjects_obj_at [where 'a = "tcb",OF _ not_0]) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.splits) + apply simp + apply (clarsimp simp: valid_cap'_def objBits_simps) + apply (fastforce intro: capAligned_tcbI) + done + next + case EndpointObject with Some cover ct show ?thesis + including no_pre + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (rule hoare_post_imp) + prefer 2 + apply (rule createObjects_obj_at [where 'a=endpoint, OF _ not_0]) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.splits) + apply (simp) + apply (clarsimp simp: valid_cap'_def objBits_simps) + apply (fastforce intro: capAligned_epI) + done + next + case NotificationObject with Some cover ct show ?thesis + including no_pre + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (rule hoare_post_imp) + prefer 2 + apply (rule createObjects_obj_at [where 'a="notification", OF _ not_0]) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.splits) + apply (simp) + apply (clarsimp simp: valid_cap'_def objBits_simps) + apply (fastforce intro: capAligned_ntfnI) + done + next + case CapTableObject with Some cover ct show ?thesis + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.split object_type.splits) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned [OF _ _ not_0 ]) + apply ((clarsimp simp:objBits_simps range_cover_def range_cover.range_cover_n_less[where 'a=64, unfolded word_bits_len_of, OF cover])+)[3] + apply (simp add: word_bits_def) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_ret [OF range_cover.range_cover_n_less(1)[where 'a=64, unfolded word_bits_len_of, OF cover] not_0]) + apply (rule createObjects_obj_at [where 'a=cte, OF _ not_0]) + apply (simp add: objBits_simps APIType_capBits_def) + apply (simp) + apply simp + apply (clarsimp simp: valid_cap'_def capAligned_def objBits_simps + dest!: less_two_pow_divD) + apply (thin_tac "\x\S. is_aligned (p x) n" for S p n) + apply (intro conjI) + apply ((simp add:range_cover_def word_bits_def)+)[2] + apply (clarsimp simp: power_sub) + apply (drule bspec, simp) + apply (drule_tac x = "addr && mask us" in spec) + apply (drule mp) + apply simp + apply (rule and_mask_less') + apply (simp add: range_cover_def word_bits_def) + apply (clarsimp simp add: shiftl_t2n) + apply simp + done + qed + qed +qed + +lemma other_objs_default_relation: + "\ case ty of Structures_A.EndpointObject \ ko = injectKO (makeObject :: endpoint) + | Structures_A.NotificationObject \ ko = injectKO (makeObject :: Structures_H.notification) + | _ \ False \ \ + obj_relation_retype (default_object ty dev n) ko" + apply (rule obj_relation_retype_other_obj) + apply (clarsimp simp: default_object_def + is_other_obj_relation_type_def + split: Structures_A.apiobject_type.split_asm) + apply (clarsimp simp: other_obj_relation_def default_object_def + ep_relation_def ntfn_relation_def + tcb_relation_def default_tcb_def makeObject_tcb + makeObject_cte new_context_def newContext_def + default_ep_def makeObject_endpoint default_notification_def + makeObject_notification default_ntfn_def + fault_rel_optionation_def + initContext_def newFPUState_def + arch_tcb_context_get_def atcbContextGet_def + default_arch_tcb_def newArchTCB_def + arch_tcb_relation_def + split: Structures_A.apiobject_type.split_asm) + done + +lemma tcb_relation_retype: + "obj_relation_retype (default_object Structures_A.TCBObject dev n) (KOTCB makeObject)" + by (clarsimp simp: tcb_relation_cut_def default_object_def obj_relation_retype_def + tcb_relation_def default_tcb_def + makeObject_tcb makeObject_cte new_context_def newContext_def newFPUState_def + fault_rel_optionation_def initContext_def default_priority_def + default_arch_tcb_def newArchTCB_def arch_tcb_relation_def objBits_simps') + +lemma captable_relation_retype: + "n < word_bits \ + obj_relation_retype (default_object Structures_A.CapTableObject dev n) (KOCTE makeObject)" + apply (clarsimp simp: obj_relation_retype_def default_object_def + wf_empty_bits objBits_simps' + dom_empty_cnode ex_with_length cte_level_bits_def) + apply (rule conjI) + defer + apply (clarsimp simp: cte_relation_def empty_cnode_def makeObject_cte) + apply (rule set_eqI, rule iffI) + apply (clarsimp simp: cte_map_def') + apply (rule_tac x="of_bl y" in exI) + apply (simp add: of_bl_length[where 'a=64, folded word_bits_def]) + apply (clarsimp simp: image_def cte_map_def') + apply (rule_tac x="drop (word_bits - n) (to_bl xa)" in exI) + apply (simp add: of_drop_to_bl word_bits_def word_size) + apply (simp add: less_mask_eq) + done + +lemma pagetable_relation_retype: + "obj_relation_retype (default_object (ArchObject PageTableObj) dev n) + (KOArch (KOPTE makeObject))" + apply (simp add: default_object_def default_arch_object_def + makeObject_pte obj_relation_retype_def + objBits_simps pte_relation_def table_size_def) + apply (clarsimp simp: range_composition[symmetric] shiftl_t2n field_simps) + apply (fastforce simp add: image_iff le_mask_iff_lt_2n[THEN iffD1]) + done + +lemma vsroot_relation_retype: + "obj_relation_retype (default_object (ArchObject VSpaceObj) dev n) + (KOArch (KOPTE makeObject))" + apply (simp add: default_object_def default_arch_object_def + makeObject_pte obj_relation_retype_def + objBits_simps pte_relation_def table_size_def) + apply (clarsimp simp: range_composition[symmetric] shiftl_t2n field_simps) + apply (fastforce simp add: image_iff le_mask_iff_lt_2n[THEN iffD1]) + done + +lemmas makeObjectKO_simps = makeObjectKO_def[split_simps AARCH64_H.object_type.split + apiobject_type.split sum.split kernel_object.split ] + +lemma corres_retype: + assumes not_zero: "n \ 0" + and aligned: "is_aligned ptr (objBitsKO ko + gbits)" + and obj_bits_api: "obj_bits_api (APIType_map2 ty) us = objBitsKO ko + gbits" + and tp: "APIType_map2 ty \ no_gs_types" + and ko: "makeObjectKO dev ty = Some ko" + and orr: "obj_bits_api (APIType_map2 ty) us \ sz \ + obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + shows "corres (=) + (\s. valid_pspace s \ pspace_no_overlap_range_cover ptr sz s + \ valid_mdb s \ valid_etcbs s \ valid_list s) + (\s. pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s + \ (\val. ko = injectKO val)) + (retype_region2 ptr n us (APIType_map2 ty) dev) (createObjects ptr n ko gbits)" + apply (rule corres_guard_imp) + apply (rule_tac F = "(\val. ko = injectKO val)" in corres_gen_asm2) + apply (erule exE) + apply (rule corres_rel_imp) + apply (rule corres_retype'[where g=id and ty=ty and sz = sz,OF not_zero aligned _ _ _ ko + ,simplified update_gs_id[OF tp] modify_id_return,simplified]) + using assms + apply (simp_all add: objBits_def no_gs_types_def) + apply auto + done + +lemma init_arch_objects_APIType_map2: + "init_arch_objects (APIType_map2 (Inr ty)) ptr bits sz refs = + (case ty of APIObjectType _ \ return () + | _ \ init_arch_objects (APIType_map2 (Inr ty)) ptr bits sz refs)" + apply (clarsimp split: AARCH64_H.object_type.split) + apply (simp add: init_arch_objects_def APIType_map2_def + split: apiobject_type.split) + done + +lemmas object_splits = + apiobject_type.split_asm + AARCH64_H.object_type.split_asm + sum.split_asm kernel_object.split_asm + arch_kernel_object.split_asm + +declare hoare_in_monad_post[wp del] +declare univ_get_wp[wp del] + +lemma nullPointer_0_simp[simp]: + "(nullPointer = 0) = True" + by (simp add: nullPointer_def) + +lemma descendants_of_retype': + assumes P: "\p. P p \ m p = None" + shows "descendants_of' p (\p. if P p then Some makeObject else m p) = + descendants_of' p m" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (rule iffI) + apply (erule subtree.induct) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold makeObject_cte split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def makeObject_cte split: if_split_asm) + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold makeObject_cte split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def makeObject_cte split: if_split_asm) + apply (erule subtree.induct) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold dest!: P) + apply assumption + apply (fastforce simp: parentOf_def dest!: P) + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold dest!: P) + apply assumption + apply (fastforce simp: parentOf_def dest!: P) + done + +lemma capRange_Null [simp]: "capRange NullCap = {}" + by (simp add: capRange_def) + +end + +locale retype_mdb = vmdb + + fixes P n + assumes P: "\p. P p \ m p = None" + assumes 0: "\P 0" + defines "n \ \p. if P p then Some makeObject else m p" +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma no_0_n: "no_0 n" + using no_0 by (simp add: no_0_def n_def 0) + +lemma n_next: + "n \ c \ c' = (if P c then c' = 0 else m \ c \ c')" + by (simp add: mdb_next_unfold n_def makeObject_cte nullPointer_def) + +lemma n_prev: + "n \ c \ c' = (if P c' then c = 0 else m \ c \ c')" + by (simp add: mdb_prev_def n_def makeObject_cte nullPointer_def) + +lemma dlist_n: "valid_dlist n" + using dlist no_0 no_0_n + apply (simp add: valid_dlist_def2) + apply (clarsimp simp: n_prev n_next) + apply (rule conjI) + apply clarsimp + apply (erule allE, erule (1) impE) + apply (erule_tac x=c' in allE) + apply simp + apply (drule P) + apply (simp add: mdb_next_unfold) + apply clarsimp + apply (erule allE, erule (1) impE) + apply (erule_tac x=c' in allE) + apply simp + apply (drule P) + apply (simp add: mdb_prev_def) + done + +lemma n_next_trancl: + "n \ c \\<^sup>+ c' \ (if P c then c' = 0 else m \ c \\<^sup>+ c')" + apply (insert no_0_n chain) + apply (erule trancl_induct) + apply (fastforce simp: n_next) + apply (simp split: if_split_asm) + apply (clarsimp simp: mdb_next_unfold) + apply (simp add: n_next split: if_split_asm) + apply (simp add: mdb_chain_0_def) + apply (drule_tac x=c in bspec) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + done + +lemma next_not_P: + "m \ c \ c' \ \P c" + by (clarsimp simp: mdb_next_unfold dest!: P) + +lemma m_next_trancl: + "m \ c \\<^sup>+ c' \ n \ c \\<^sup>+ c'" + apply (erule trancl_induct) + apply (rule r_into_trancl) + apply (clarsimp simp: n_next) + apply (drule next_not_P) + apply simp + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (clarsimp simp: n_next) + apply (drule next_not_P) + apply simp + done + +lemma P_to_0: + "P c \ n \ c \\<^sup>+ 0" + by (rule r_into_trancl) (simp add: n_next) + +lemma n_trancl_eq: + "n \ c \\<^sup>+ c' = (if P c then c' = 0 else m \ c \\<^sup>+ c')" + by (auto dest: m_next_trancl n_next_trancl P_to_0) + +lemma n_rtrancl_eq: + "n \ c \\<^sup>* c' = (if P c then c' = 0 \ c = c' else m \ c \\<^sup>* c')" + by (auto simp: n_trancl_eq rtrancl_eq_or_trancl) + +lemma dom_n: + "dom n = dom m \ Collect P" + by (auto simp add: n_def) + +lemma mdb_chain_0_n: "mdb_chain_0 n" + using chain + by (auto simp: mdb_chain_0_def dom_n n_trancl_eq) + +lemma n_Some_eq: + "(n p = Some (CTE cap node)) = + (if P p then cap = NullCap \ node = nullMDBNode + else m p = Some (CTE cap node))" + by (auto simp: n_def makeObject_cte) + +lemma valid_badges_n: "valid_badges n" +proof - + from valid + have "valid_badges m" .. + thus ?thesis + apply (clarsimp simp: valid_badges_def) + apply (simp add: n_Some_eq n_next split: if_split_asm) + apply fastforce + done +qed + +lemma caps_contained_n: "caps_contained' n" +proof - + from valid + have "caps_contained' m" .. + thus ?thesis + apply (clarsimp simp: caps_contained'_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply fastforce + done +qed + +lemma mdb_chunked_n: "mdb_chunked n" +proof - + from valid + have "mdb_chunked m" .. + thus ?thesis + apply (clarsimp simp: mdb_chunked_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply (simp add: n_Some_eq n_trancl_eq n_rtrancl_eq is_chunk_def) + apply fastforce + done +qed + +lemma descendants [simp]: + "descendants_of' p n = descendants_of' p m" + apply (unfold n_def) + apply (subst descendants_of_retype') + apply (erule P) + apply (rule refl) + done + +lemma untyped_mdb_n: "untyped_mdb' n" +proof - + from valid + have "untyped_mdb' m" .. + thus ?thesis + apply (clarsimp simp: untyped_mdb'_def) + apply (simp add: n_Some_eq split: if_split_asm) + done +qed + +lemma untyped_inc_n: "untyped_inc' n" +proof - + from valid + have "untyped_inc' m" .. + thus ?thesis + apply (clarsimp simp: untyped_inc'_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply blast + done +qed + +lemma valid_nullcaps_n: "valid_nullcaps n" +proof - + from valid + have "valid_nullcaps m" .. + thus ?thesis + apply (clarsimp simp: valid_nullcaps_def) + apply (simp add: n_Some_eq split: if_split_asm) + done +qed + +lemma ut_rev_n: "ut_revocable' n" +proof - + from valid + have "ut_revocable' m" .. + thus ?thesis + apply (clarsimp simp: ut_revocable'_def) + apply (simp add: n_Some_eq split: if_split_asm) + done +qed + +lemma class_links_m: + "class_links m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma next_not_P2: + "\ m \ p \ p'; p' \ nullPointer \ \ \ P p'" + using dlist + apply (clarsimp simp: mdb_next_unfold) + apply (erule(1) valid_dlistE) + apply clarsimp + apply (clarsimp dest!: P) + done + +lemma class_links_n: + "class_links n" + using class_links_m + apply (simp add: class_links_def) + apply (elim allEI) + apply clarsimp + apply (subgoal_tac "p' \ nullPointer") + apply (simp add: n_next split: if_split_asm) + apply (case_tac cte, case_tac cte') + apply (clarsimp simp add: n_Some_eq split: if_split_asm) + apply (drule(1) next_not_P2) + apply simp + apply (clarsimp simp: no_0_n nullPointer_def) + done + +lemma irq_control_n: + "irq_control n" + apply (clarsimp simp add: irq_control_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (erule (1) irq_controlD, rule irq_control) + done + +lemma dist_z_m: "distinct_zombies m" + using valid by auto + +lemma dist_z_n: "distinct_zombies n" + using dist_z_m + apply (simp add: n_def distinct_zombies_def + distinct_zombie_caps_def + split del: if_split) + apply (erule allEI, erule allEI) + apply (clarsimp split del: if_split) + apply (clarsimp split: if_split_asm simp: makeObject_cte) + apply (clarsimp simp: isCap_simps) + done + +lemma reply_masters_rvk_fb_m: "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n: "reply_masters_rvk_fb n" + using reply_masters_rvk_fb_m + by (simp add: n_def reply_masters_rvk_fb_def + ball_ran_eq makeObject_cte isCap_simps) + +lemma valid_n: + "valid_mdb_ctes n" + by (simp add: valid_mdb_ctes_def dlist_n no_0_n mdb_chain_0_n + valid_badges_n caps_contained_n untyped_mdb_n + untyped_inc_n mdb_chunked_n valid_nullcaps_n ut_rev_n + class_links_n irq_control_n dist_z_n + reply_masters_rvk_fb_n) + +end + +definition + caps_no_overlap'' :: "machine_word \ nat \ kernel_state \ bool" +where + "caps_no_overlap'' ptr sz s \ \cte \ ran (ctes_of s). + untypedRange (cteCap cte) \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ {} + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ untypedRange (cteCap cte)" + +lemma obj_range'_subset: + "\range_cover ptr sz (objBitsKO val) n; ptr' \ set (new_cap_addrs n ptr val)\ + \ obj_range' ptr' val \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1}" + unfolding obj_range'_def + by (rule new_range_subset, auto) + +lemma obj_range'_subset_strong: + assumes "range_cover ptr sz (objBitsKO val) n" + and "ptr' \ set (new_cap_addrs n ptr val)" + shows "obj_range' ptr' val \ {ptr..ptr + (of_nat n * 2 ^ objBitsKO val) - 1}" +proof - + { + assume cover: "range_cover ptr sz (objBitsKO val) n" + and mem_p: "ptr' \ set (new_cap_addrs n ptr val)" + and not_0: "n\ 0" + note n_less = range_cover.range_cover_n_less[OF cover] + have unat_of_nat_m1: "unat (of_nat n - (1::machine_word)) < n" + using not_0 n_less by (simp add:unat_of_nat_minus_1) + have decomp: + "of_nat n * 2 ^ objBitsKO val = + of_nat (n - 1) * 2 ^ objBitsKO val + (2 :: machine_word) ^ objBitsKO val" + apply (simp add:distrib_right[where b = "1 :: machine_word",simplified,symmetric]) + using not_0 n_less + apply simp + done + have "ptr' + 2 ^ objBitsKO val - 1 \ ptr + of_nat n * 2 ^ objBitsKO val - 1" + using cover + apply (subst decomp) + apply (simp add:add.assoc[symmetric]) + apply (simp add:p_assoc_help) + apply (rule order_trans[OF word_plus_mono_left word_plus_mono_right]) + using mem_p not_0 + apply (clarsimp simp:new_cap_addrs_def shiftl_t2n) + apply (rule word_plus_mono_right) + apply (subst mult.commute) + apply (rule word_mult_le_mono1[OF word_of_nat_le]) + using n_less not_0 + apply (simp add:unat_of_nat_minus_1) + apply (rule p2_gt_0[THEN iffD2]) + apply (simp add:word_bits_def range_cover_def) + apply (simp only: word_bits_def[symmetric]) + apply (clarsimp simp: unat_of_nat_minus_1[OF n_less(1) not_0]) + apply (rule nat_less_power_trans2 + [OF range_cover.range_cover_le_n_less(2),OF cover, folded word_bits_def]) + apply (simp add:unat_of_nat_m1 less_imp_le) + apply (simp add:range_cover_def word_bits_def) + apply (rule machine_word_plus_mono_right_split[where sz = sz]) + using range_cover.range_cover_compare[OF cover,where p = "unat (of_nat n - (1::machine_word))"] + apply (clarsimp simp:unat_of_nat_m1) + apply (simp add:range_cover_def word_bits_def) + apply (rule olen_add_eqv[THEN iffD2]) + apply (subst add.commute[where a = "2^objBitsKO val - 1"]) + apply (subst p_assoc_help[symmetric]) + apply (rule is_aligned_no_overflow) + apply (clarsimp simp:range_cover_def word_bits_def) + apply (erule aligned_add_aligned[OF _ is_aligned_mult_triv2]; simp) + apply simp + by (meson assms(1) is_aligned_add is_aligned_mult_triv2 is_aligned_no_overflow' range_cover_def) + } + with assms show ?thesis + unfolding obj_range'_def + apply - + apply (frule(1) obj_range'_subset) + apply (simp add: obj_range'_def) + apply (cases "n = 0"; clarsimp simp:new_cap_addrs_def mask_def field_simps) + done +qed + + +lemma caps_no_overlapD'': + "\cte_wp_at' (\cap. cteCap cap = c) q s;caps_no_overlap'' ptr sz s\ + \ untypedRange c \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ {} \ + {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ untypedRange c" + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps caps_no_overlap''_def + simp del:atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (drule_tac x = cte in bspec) + apply fastforce + apply (erule(1) impE) + apply blast +done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_untyped'_helper: + assumes valid : "valid_cap' c s" + and cte_at : "cte_wp_at' (\cap. cteCap cap = c) q s" + and cover : "range_cover ptr sz (objBitsKO val) n" + and range : "caps_no_overlap'' ptr sz s" + and pres : "isUntypedCap c \ usableUntypedRange c \ {ptr..ptr + of_nat n * 2 ^ objBitsKO val - 1} = {}" + shows "\pspace_aligned' s; pspace_distinct' s; pspace_no_overlap' ptr sz s\ + \ valid_cap' c (s\ksPSpace := foldr (\addr. data_map_insert addr val) (new_cap_addrs n ptr val) (ksPSpace s)\)" + proof - + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff + assume pn : "pspace_aligned' s" "pspace_distinct' s" + and no_overlap: "pspace_no_overlap' ptr sz s" + show ?thesis + using pn pres no_overlap valid cover cte_wp_at_ctes_of[THEN iffD1,OF cte_at] + caps_no_overlapD''[OF cte_at range] + apply (clarsimp simp:valid_cap'_def retype_ko_wp_at') + apply (case_tac "cteCap cte"; + simp add: valid_cap'_def cte_wp_at_obj_cases' valid_pspace'_def retype_obj_at_disj' + split: zombie_type.split_asm) + apply (clarsimp simp: valid_arch_cap'_def valid_arch_cap_ref'_def retype_obj_at_disj' + typ_at_to_obj_at_arches frame_at'_def page_table_at'_def + split: if_split_asm arch_capability.splits) + unfolding valid_untyped'_def + apply (intro allI) + apply (rule ccontr) + apply clarify + using cover[unfolded range_cover_def] + apply (clarsimp simp:isCap_simps retype_ko_wp_at' split:if_split_asm) + apply (thin_tac "\x. Q x" for Q) + apply (frule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (elim disjE) + apply (frule(1) obj_range'_subset) + apply (erule impE) + apply (drule(1) psubset_subset_trans) + apply (drule Int_absorb1[OF psubset_imp_subset]) + apply (drule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (simp add:Int_ac add_mask_fold) + apply (drule(1) subset_trans) + apply (simp only: add_mask_fold) + apply (frule(1) obj_range'_subset_strong) + apply (drule(1) non_disjoing_subset) + apply blast + apply (thin_tac "\x. Q x" for Q) + apply (frule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (frule(1) obj_range'_subset) + apply (drule(1) subset_trans) + apply (erule impE) + apply (clarsimp simp: add_mask_fold) + apply blast + apply (simp only: add_mask_fold) + apply blast + done +qed + +definition caps_overlap_reserved' :: "machine_word set \ kernel_state \ bool" +where + "caps_overlap_reserved' S s \ \cte \ ran (ctes_of s). + (isUntypedCap (cteCap cte) \ usableUntypedRange (cteCap cte) \ S = {})" + +lemma retype_canonical': + assumes pc': "pspace_canonical' s'" + and cover: "range_cover ptr sz (objBitsKO ko) n" + and sz_limit: "sz \ maxUntypedSizeBits" + and ptr_cn: "canonical_address (ptr && ~~ mask sz)" + shows + "pspace_canonical' (s' \ksPSpace := foldr (\addr. data_map_insert addr ko) + (new_cap_addrs n ptr ko) (ksPSpace s')\)" + (is "pspace_canonical' (s'\ksPSpace := ?ps\)") +proof - + show "pspace_canonical' (s'\ksPSpace := ?ps\)" using assms + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (clarsimp simp: pspace_canonical'_def split: if_split_asm) + apply (clarsimp simp add: new_cap_addrs_def shiftl_t2n) + apply (fastforce intro: range_cover_canonical_address[OF cover] simp: mult.commute)+ + done +qed + +lemma createObjects_valid_pspace': + assumes mko: "makeObjectKO dev ty = Some val" + and not_0: "n \ 0" + and cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and sz_limit: "sz \ maxUntypedSizeBits" + and ptr_cn: "canonical_address (ptr && ~~ mask sz)" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat (n * 2^gbits * 2 ^ objBitsKO val ) - 1} s + \ ptr \ 0\ + createObjects' ptr n val gbits \\r. valid_pspace'\" + apply (cut_tac not_0) + apply (simp add: split_def createObjects'_def + lookupAround2_pspace_no + alignError_def unless_def) + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def del:fun_upd_apply)+ + apply (wpc|wp)+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift[OF _ cover]) + apply simp+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift[OF _ cover]) + apply simp+ + apply (subst data_map_insert_def[symmetric])+ + apply (rule impI) + apply (clarsimp simp: new_cap_addrs_fold' + valid_pspace'_def linorder_not_less + objBits_def[symmetric]) + apply (simp only: imp_disjL[symmetric] imp_conjL[symmetric] imp_ex[symmetric] + range_cover.unat_of_nat_n_shift[OF cover,where gbits=gbits,simplified]) +proof (intro conjI impI) + + fix s + + assume pn: "pspace_no_overlap' ptr sz s" + and vo: "valid_objs' s" + and ad: "pspace_aligned' s" "pspace_distinct' s" + and cn: "pspace_canonical' s" + and pc: "caps_no_overlap'' ptr sz s" + and mdb: "valid_mdb' s" + and p_0: "ptr \ 0" + and reserved : "caps_overlap_reserved' {ptr..ptr + of_nat n *2 ^ gbits * 2 ^ objBitsKO val - 1} s" + and no_0_obj': "no_0_obj' s" + have obj': "objBitsKO val \ sz" + using cover + by (simp add:range_cover_def) + + let ?s' = "s\ksPSpace := foldr (\addr. data_map_insert addr val) (new_cap_addrs (n * 2 ^ gbits) ptr val) (ksPSpace s)\" + + note cover' = range_cover_rel[where sbit' = "objBitsKO val",OF cover _ refl,simplified] + + note ad' = retype_aligned_distinct'[OF ad pn cover'] + + note shift = range_cover.unat_of_nat_n_shift[OF cover,where gbits=gbits,simplified] + + have al: "is_aligned ptr (objBitsKO val)" + using cover' + by (simp add:range_cover_def) + + show pspace_aligned: "pspace_aligned' ?s'" + using ad' shift + by (simp add:field_simps) + + show pspace_canonical: "pspace_canonical' ?s'" + using retype_canonical'[OF cn cover' sz_limit ptr_cn] + by (clarsimp simp: field_simps) + + show "pspace_distinct' ?s'" + using ad' shift + by (simp add:field_simps) + + note obj_at_disj = retype_obj_at_disj' [OF ad pn cover'] + + note obj_at_disj' = obj_at_disj [unfolded foldr_upd_app_if[folded data_map_insert_def]] + + have obj_atC: "\P x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ \ obj_at' P x s" + apply (clarsimp simp: obj_at'_def) + apply (drule subsetD [OF new_cap_addrs_subset [OF cover' ]]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (drule domI[where m = "ksPSpace s"]) + apply (drule(1) orthD2) + apply (clarsimp simp:ptr_add_def p_assoc_help) + done + + have valid_cap: "\cap q. \ s \' cap; cte_wp_at' (\cte. cteCap cte = cap) q s \ + \ ?s' \' cap" + apply (rule valid_untyped'_helper[OF _ _ _ pc _ ad pn ]) + apply simp+ + apply (subst mult.commute) + apply (rule cover') + using reserved + apply (clarsimp simp:caps_overlap_reserved'_def cte_wp_at_ctes_of) + apply (drule_tac x = cte in bspec) + apply fastforce + apply simp + done + + show valid_objs: "valid_objs' ?s'" using vo + apply (clarsimp simp: valid_objs'_def + foldr_upd_app_if[folded data_map_insert_def] + elim!: ranE + split: if_split_asm) + apply (insert sym[OF mko])[1] + apply (clarsimp simp: makeObjectKO_def + split: bool.split_asm sum.split_asm + AARCH64_H.object_type.split_asm + apiobject_type.split_asm + kernel_object.split_asm + arch_kernel_object.split_asm) + apply (drule bspec, erule ranI) + apply (subst mult.commute) + apply (case_tac obj; simp add: valid_obj'_def) + apply (rename_tac endpoint) + apply (case_tac endpoint; simp add: valid_ep'_def obj_at_disj') + apply (rename_tac notification) + apply (case_tac notification; simp add: valid_ntfn'_def valid_bound_tcb'_def obj_at_disj') + apply (rename_tac ntfn xa) + apply (case_tac ntfn, simp_all, (clarsimp simp: obj_at_disj' split:option.splits)+) + apply (rename_tac tcb) + apply (case_tac tcb, clarsimp simp add: valid_tcb'_def) + apply (frule pspace_alignedD' [OF _ ad(1)]) + apply (frule pspace_distinctD' [OF _ ad(2)]) + apply (simp add: objBits_simps) + apply (subst mult.commute) + apply (intro conjI ballI) + apply (clarsimp elim!: ranE) + apply (rule valid_cap[unfolded foldr_upd_app_if[folded data_map_insert_def]]) + apply (fastforce) + apply (rule_tac ptr="x + xa" in cte_wp_at_tcbI', assumption+) + apply fastforce + apply simp + apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound tcbprev tcbnext user_context) + apply (case_tac thread_state, simp_all add: valid_tcb_state'_def valid_bound_tcb'_def + valid_bound_ntfn'_def obj_at_disj' opt_tcb_at'_def + split: option.splits)[4] + apply (clarsimp simp add: valid_arch_tcb'_def typ_at_to_obj_at_arches obj_at_disj') + apply (simp add: valid_cte'_def) + apply (frule pspace_alignedD' [OF _ ad(1)]) + apply (frule pspace_distinctD' [OF _ ad(2)]) + apply (simp add: objBits_simps') + apply (subst mult.commute) + apply (erule valid_cap[unfolded foldr_upd_app_if[folded data_map_insert_def]]) + apply (erule(2) cte_wp_at_cteI'[unfolded cte_level_bits_def]) + apply simp + done + have not_0: "0 \ set (new_cap_addrs (2 ^ gbits * n) ptr val)" + using p_0 + apply clarsimp + apply (drule subsetD [OF new_cap_addrs_subset [OF cover'],rotated]) + apply (clarsimp simp:ptr_add_def) + done + show "valid_mdb' ?s'" + apply (simp add: valid_mdb'_def foldr_upd_app_if[folded data_map_insert_def]) + apply (subst mult.commute) + apply (subst ctes_of_retype [OF mko ad]) + apply (rule ad'[unfolded foldr_upd_app_if[folded data_map_insert_def]])+ + apply (simp add: objBits_def[symmetric] new_cap_addrs_aligned [OF al]) + apply (rule ballI, drule subsetD [OF new_cap_addrs_subset [OF cover']]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (drule_tac x = x in orthD1) + apply (simp add:ptr_add_def p_assoc_help) + apply fastforce + apply (fold makeObject_cte) + apply (rule retype_mdb.valid_n) + apply unfold_locales + apply (rule mdb[unfolded valid_mdb'_def]) + apply (rule iffD2 [OF None_ctes_of_cte_at[unfolded cte_wp_at_obj_cases'], THEN sym]) + apply (rule notI) + apply (elim disjE conjE, simp_all add: obj_atC)[1] + apply (thin_tac "S \ T = {}" for S T) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (drule pspace_no_overlapD' [OF _ pn]) + apply (drule subsetD [OF new_cap_addrs_subset[OF cover']]) + apply (frule_tac ptr'=p in mask_in_range) + apply (drule(1) tcb_cte_cases_aligned_helpers) + apply (drule_tac x = p in orthD1) + apply (clarsimp simp:objBits_simps) + apply (clarsimp simp:ptr_add_def p_assoc_help) + apply (frule new_range_subset[OF cover']) + apply (drule bspec [OF new_cap_addrs_aligned[OF al]]) + apply (drule(1) disjoint_subset[rotated]) + apply (drule_tac a=p in equals0D) + apply (frule_tac ptr'=p in mask_in_range) + apply (simp only: add_mask_fold) + apply (insert sym [OF mko], + clarsimp simp: objBits_simps makeObjectKO_def obj_at'_def)[1] + apply (insert sym[OF mko] cover', + clarsimp simp: obj_at'_def objBits_simps + makeObjectKO_def)[1] + apply (drule(1) tcb_cte_cases_aligned_helpers(2)) + apply clarsimp + apply (drule subsetD [OF new_cap_addrs_subset,rotated]) + apply (simp add:objBits_simps) + apply (drule orthD1) + apply (fastforce simp:p_assoc_help ptr_add_def) + apply fastforce + apply (simp add: not_0) + done + + have data_map_ext: "\x y. data_map_insert x y = (\m. m (x \ y))" + by (rule ext) simp + show no_0_obj: "no_0_obj' ?s'" + using not_0 no_0_obj' + by (simp add: no_0_obj'_def data_map_ext field_simps foldr_upd_app_other) + +qed + +abbreviation + "injectKOS \ (injectKO :: ('a :: pspace_storable) \ kernel_object)" + +lemma createObjects_valid_pspace_untyped': + assumes mko: "makeObjectKO dev ty = Some val" + and not_0: "n \ 0" + and cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and sz_limit: "sz \ maxUntypedSizeBits" + and ptr_cn: "canonical_address (ptr && ~~ mask sz)" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s \ caps_no_overlap'' ptr sz s \ ptr \ 0 + \ caps_overlap_reserved' {ptr .. ptr + of_nat (n * 2^gbits * 2 ^ objBitsKO val ) - 1} s \ + createObjects' ptr n val gbits \\r. valid_pspace'\" + apply (wp createObjects_valid_pspace' [OF mko not_0 cover sz_limit ptr_cn]) + apply simp + done + +declare bleeding_obvious [simp] + +lemma range_cover_new_cap_addrs_compare: + assumes not_0: "n \ 0" + and cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and ptr_in: "p \ set (new_cap_addrs (unat (((of_nat n)::machine_word) << gbits)) ptr val)" + shows "p \ ptr + of_nat (shiftL n (objBitsKO val + gbits) - Suc 0)" +proof - + note unat_of_nat_shift = range_cover.unat_of_nat_n_shift[OF cover,where gbits=gbits,simplified] + have cover' :"range_cover ptr sz (objBitsKO val) (n*2^gbits)" + by (rule range_cover_rel[OF cover],simp+) + have upbound:" unat ((((of_nat n)::machine_word) * 2 ^ gbits)) * unat ((2::machine_word) ^ objBitsKO val) < 2 ^ word_bits" + using range_cover.range_cover_le_n_less[OF cover' le_refl] cover' + apply - + apply (drule nat_less_power_trans) + apply (simp add:range_cover_def) + apply (fold word_bits_def) + using unat_of_nat_shift not_0 + apply (simp add:field_simps shiftl_t2n) + done + have not_0': "(2::machine_word) ^ (objBitsKO val + gbits) * of_nat n \ 0" + apply (rule range_cover_not_zero_shift[OF not_0,unfolded shiftl_t2n,OF _ le_refl]) + apply (rule range_cover_rel[OF cover]) + apply simp+ + done + have "gbits < word_bits" + using cover + by (simp add:range_cover_def word_bits_def) + thus ?thesis + apply - + apply (insert not_0 cover ptr_in) + apply (frule range_cover.range_cover_le_n_less[OF _ le_refl]) + apply (fold word_bits_def) + apply (simp add:shiftL_nat ) + apply (simp add:range_cover.unat_of_nat_n_shift) + apply (clarsimp simp:new_cap_addrs_def shiftl_t2n) + apply (rename_tac pa) + apply (rule word_plus_mono_right) + apply (rule order_trans) + apply (subst mult.commute) + apply (rule word_mult_le_iff[THEN iffD2]) + apply (clarsimp simp:p2_gt_0 range_cover_def word_bits_def) + apply (drule range_cover_rel[where sbit' = "0"]) + apply (simp+)[2] + apply (erule less_le_trans[OF range_cover.range_cover_le_n_less(2)]) + apply (clarsimp simp:field_simps power_add) + apply (rule unat_le_helper) + apply (rule of_nat_mono_maybe_le[THEN iffD1]) + using range_cover.range_cover_le_n_less[OF cover' le_refl] + apply (simp_all only:word_bits_def[symmetric]) + apply simp + apply (drule nat_less_power_trans) + apply (simp add:range_cover_def word_bits_def) + apply (rule less_le_trans[OF mult_less_mono1]) + apply (rule unat_mono) + apply (rule_tac y1= "pa" in of_nat_mono_maybe'[THEN iffD1,rotated -1]) + apply (assumption) + apply (simp add:word_bits_def) + apply (simp add:word_bits_def) + apply simp + using unat_of_nat_shift + apply (simp add:field_simps shiftl_t2n) + apply simp + apply (rule word_less_sub_1) + apply (simp add:power_add field_simps) + apply (subst mult.assoc[symmetric]) + apply (rule word_mult_less_mono1) + apply (rule word_of_nat_less) + using unat_of_nat_shift + apply (simp add:shiftl_t2n field_simps) + apply (meson less_exp objBitsKO_bounded2 of_nat_less_pow_64 word_gt_a_gt_0) + using upbound + apply (simp add:word_bits_def) + apply (rule machine_word_plus_mono_right_split[where sz = sz]) + apply (rule less_le_trans[rotated -1]) + apply (rule range_cover.range_cover_compare_bound[OF cover']) + apply (simp add: unat_minus_one[OF not_0']) + using range_cover.unat_of_nat_n_shift[OF cover le_refl] + apply (simp add:shiftl_t2n power_add field_simps) + apply (simp add:range_cover_def word_bits_def) + done +qed + +lemma createObjects_orig_ko_wp_at2': + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ P (ko_wp_at' P' p s) + \ (P' val \ P True) + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. P (ko_wp_at' P' p s)\" + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def del:fun_upd_apply) + apply (rule hoare_grab_asm)+ + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[rotated]) + apply (rule le_add2) + apply (simp add:word_le_sub1 del:fun_upd_apply)+ + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (wpc|wp)+ + apply (clarsimp simp:valid_pspace'_def linorder_not_less simp del:fun_upd_apply) + apply (subgoal_tac " range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_ko_wp_at',simp+)+ + apply clarsimp + apply (cases "P' val") + apply simp + apply clarsimp + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (simp add:lookupAround2_None1) + apply (intro conjI impI allI) + apply (drule_tac x = p in spec) + apply (erule impE) + apply (erule(1) range_cover_new_cap_addrs_compare[rotated]) + apply simp + apply (fastforce simp: ko_wp_at'_def) + apply (drule_tac x = p in orthD1) + apply (clarsimp simp:ptr_add_def p_assoc_help) + apply (simp add:dom_def) + apply (fastforce simp:ko_wp_at'_def) + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + + +lemma createObjects_orig_obj_at2': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ P (obj_at' P' p s) + \ \ (case_option False P' (projectKO_opt val)) + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. P (obj_at' P' p s)\" + unfolding obj_at'_real_def + by (wp createObjects_orig_ko_wp_at2') auto + +lemma createObjects_orig_cte_wp_at2': + "\\s. P (cte_wp_at' P' p s) + \ n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ \ (case_option False P' (projectKO_opt val)) + \ (\(getF, setF) \ ran tcb_cte_cases. + \ (case_option False (P' \ getF) (projectKO_opt val))) + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. P (cte_wp_at' P' p s)\" + including classic_wp_pre + apply (simp add: cte_wp_at'_obj_at') + apply (rule handy_prop_divs) + apply (wp createObjects_orig_obj_at2'[where sz = sz], simp) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (wp handy_prop_divs createObjects_orig_obj_at2'[where sz = sz] + | simp add: o_def cong: option.case_cong)+ + done + +lemma threadSet_cte_wp_at2'T: + assumes "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows "\\s. P (cte_wp_at' P' p s)\ threadSet F t \\rv s. P (cte_wp_at' P' p s)\" + using assms by (rule threadSet_cte_wp_at'T) + +lemmas threadSet_cte_wp_at2' = + threadSet_cte_wp_at2'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma createNewCaps_cte_wp_at2: + "\\s. P (cte_wp_at' P' p s) \ \ P' makeObject + \ n \ 0 + \ range_cover ptr sz (APIType_capBits ty objsz) n + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n objsz dev + \\rv s. P (cte_wp_at' P' p s)\" + including classic_wp_pre + apply (simp add: createNewCaps_def createObjects_def AARCH64_H.toAPIType_def + split del: if_split) + apply (case_tac ty; simp add: createNewCaps_def createObjects_def Arch_createNewCaps_def + split del: if_split cong: if_cong) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (wp, simp add:createObjects_def) + apply ((wp createObjects_orig_cte_wp_at2'[where sz = sz] + mapM_x_wp' threadSet_cte_wp_at2')+ + | assumption + | clarsimp simp: APIType_capBits_def projectKO_opts_defs + makeObject_tcb tcb_cte_cases_def cteSizeBits_def + archObjSize_def + createObjects_def curDomain_def + objBits_if_dev + split del: if_split + | simp add: objBits_simps field_simps mult_2_right)+ + done + +lemma createObjects_orig_obj_at': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ obj_at' P p s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r. obj_at' P p\" + apply (rule hoare_grab_asm)+ + apply (clarsimp simp: createObjects'_def) + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply simp+ + apply (wp|simp add:split_def cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (wpc|wp)+ + apply (clarsimp simp del:fun_upd_apply) + apply (simp add:range_cover_def is_aligned_mask) + apply (subst data_map_insert_def[symmetric])+ + apply clarsimp + apply (subgoal_tac "range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst retype_obj_at',simp+)+ + apply (intro conjI impI allI) + apply (clarsimp simp:obj_at'_real_def ko_wp_at'_def) + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (simp add:lookupAround2_None1) + apply (drule_tac x = p in spec) + apply (erule impE) + apply (erule(1) range_cover_new_cap_addrs_compare[rotated]) + apply simp + apply simp + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (drule_tac x = p in orthD1) + apply (clarsimp simp:ptr_add_def p_assoc_help) + apply (simp add:dom_def obj_at'_real_def ko_wp_at'_def) + apply simp+ + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +crunch ko_wp_at'[wp]: doMachineOp "\s. P (ko_wp_at' P' p s)" + +lemma createObjects_orig_cte_wp_at': + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ cte_wp_at' P p s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. cte_wp_at' P p s\" + apply (simp add: cte_wp_at'_obj_at' tcb_cte_cases_def cteSizeBits_def) + apply (rule hoare_pre, wp hoare_vcg_disj_lift createObjects_orig_obj_at'[where sz = sz]) + apply clarsimp + done + +lemma createNewCaps_cte_wp_at': + "\\s. cte_wp_at' P p s + \ range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us dev + \\rv. cte_wp_at' P p\" + apply (simp add: createNewCaps_def AARCH64_H.toAPIType_def + split del: if_split) + apply (case_tac ty; simp add: Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wp createObjects_orig_cte_wp_at'[where sz = sz] mapM_x_wp' + threadSet_cte_wp_at'T + | clarsimp simp: objBits_simps field_simps mult_2_right APIType_capBits_def + createObjects_def curDomain_def + | intro conjI impI + | force simp: tcb_cte_cases_def cteSizeBits_def)+ + done + +lemma createObjects_obj_at_other: + assumes cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and not_0: "n\ 0" + shows "\\s. obj_at' P p s \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createObjects ptr n val gbits \\_. obj_at' P p\" + apply (simp add: createObjects_def) + apply (wp createObjects_orig_obj_at'[where sz = sz]) + using cover not_0 + apply (clarsimp simp: cover not_0 valid_pspace'_def pspace_no_overlap'_def) + done + +lemma valid_cap'_range_no_overlap: + "\untypedRange c \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} = {}; s \' c; + valid_pspace' s; pspace_no_overlap' ptr sz s; + range_cover ptr sz (objBitsKO val) n\ + \ s\ksPSpace := foldr (\addr. data_map_insert addr val) + (new_cap_addrs n ptr val) (ksPSpace s)\ \' c" + apply (cases c; simp add: valid_cap'_def valid_arch_cap'_def valid_arch_cap_ref'_def + cte_wp_at_obj_cases' valid_pspace'_def retype_obj_at_disj' + typ_at_to_obj_at_arches frame_at'_def page_table_at'_def + split: zombie_type.split_asm arch_capability.splits if_splits + del: Int_atLeastAtMost)[1] + apply (rename_tac word nat1 nat2) + apply (clarsimp simp:valid_untyped'_def retype_ko_wp_at' + simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (frule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (intro conjI impI) + apply (intro allI) + apply (drule_tac x = ptr' in spec) + apply (rule ccontr) + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (erule disjE) + apply (drule(2) disjoint_subset2 [OF obj_range'_subset]) + apply (drule(1) disjoint_subset2[OF psubset_imp_subset]) + apply (simp add: Int_absorb ptr_add_def p_assoc_help mask_def + del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (drule(1) obj_range'_subset) + apply (drule_tac A'=" {word + of_nat nat2..word + 2 ^ nat1 - 1}" in disjoint_subset[rotated]) + apply clarsimp + apply (rule is_aligned_no_wrap') + apply (fastforce simp:capAligned_def) + apply (erule of_nat_less_pow_64) + apply (simp add:capAligned_def) + apply (drule(1) disjoint_subset2) + apply (simp add: add_mask_fold) + apply blast + apply (intro allI) + apply (drule_tac x = ptr' in spec) + apply (rule ccontr) + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (drule(2) disjoint_subset2 [OF obj_range'_subset]) + apply (drule(1) disjoint_subset2) + apply (simp add: Int_absorb ptr_add_def p_assoc_help mask_def + del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + done + +lemma createObjects_valid_cap': + "\valid_cap' c and valid_pspace' and pspace_no_overlap' ptr sz and + K (untypedRange c \ {ptr .. (ptr && ~~ mask sz) + 2^sz - 1} = {} \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0)\ + createObjects' ptr n val gbits + \\_. valid_cap' c\" + apply (rule hoare_gen_asm) + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def) + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply fastforce+ + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (clarsimp simp: linorder_not_less valid_pspace'_def) + apply (wpc|wp)+ + apply (subst data_map_insert_def[symmetric])+ + apply clarsimp + apply (subgoal_tac " range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst range_cover.unat_of_nat_n_shift,simp+)+ + apply (subst (asm) range_cover.unat_of_nat_n_shift,simp+)+ + apply (intro conjI impI allI) + apply (erule(4) valid_cap'_range_no_overlap)+ + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +lemma createObjects_cte_wp_at': + "\range_cover ptr sz (objBitsKO val + gbits) n; n \ 0\ + \\\s. cte_wp_at' P p s \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\_. cte_wp_at' P p\" + apply (clarsimp simp: valid_def cte_wp_at_obj_cases') + apply (erule disjE) + apply (erule use_valid[OF _ ]) + apply (rule createObjects_orig_obj_at') + apply fastforce + apply clarsimp + apply (drule_tac x = na in bspec) + apply clarsimp + apply clarsimp + apply (drule use_valid[OF _ createObjects_orig_obj_at']) + apply fastforce + apply simp + done + +lemma createNewCaps_cte_wp_at: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0 : "n \ 0" + shows "\\s. cte_wp_at' P p s \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us dev + \\_. cte_wp_at' P p\" + apply (wp createNewCaps_cte_wp_at') + apply (auto simp: cover not_0) + done + +lemma createObjects_ret2: + "\(\s. P (map (\p. ptr_add y (p * 2 ^ (objBitsKO ko + gbits))) + [0.. n \ 0)\ + createObjects y n ko gbits \\rv s. P rv\" + apply (rule hoare_gen_asm) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_ret) + apply simp+ + apply (rule hoare_vcg_prop) + defer + apply (clarsimp simp: power_add mult.commute mult.left_commute | assumption)+ + done + +lemma state_refs_ko_wp_at_eq: + "state_refs_of' s = (\x. {r. ko_wp_at' (\ko. r \ refs_of' ko) x s})" + apply (rule ext) + apply (simp add: state_refs_of'_def ko_wp_at'_def + split: option.split) + done + +lemma state_hyp_refs_ko_wp_at_eq: + "state_hyp_refs_of' s = (\x. {r. ko_wp_at' (\ko. r \ hyp_refs_of' ko) x s})" + apply (rule ext) + apply (simp add: state_hyp_refs_of'_def ko_wp_at'_def + split: option.split) + done + +lemma createObjects_state_refs_of'': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (state_refs_of' s) \ refs_of' val = {} + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\rv s. P (state_refs_of' s)\" + apply (clarsimp simp:valid_def lookupAround2_pspace_no state_refs_ko_wp_at_eq) + apply (erule ssubst[where P = P,rotated]) + apply (rule ext) + apply (rule set_eqI) + apply clarsimp + apply (intro iffI,rule ccontr) + apply (drule_tac P1="\x. \ x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp + apply (intro conjI) + apply simp+ + apply (drule_tac P1="\x. x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp+ + done + +lemma createObjects_state_hyp_refs_of'': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (state_hyp_refs_of' s) \ hyp_refs_of' val = {} + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\rv s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp:valid_def lookupAround2_pspace_no state_hyp_refs_ko_wp_at_eq) + apply (erule ssubst[where P = P,rotated]) + apply (rule ext) + apply (rule set_eqI) + apply clarsimp + apply (intro iffI,rule ccontr) + apply (drule_tac P1="\x. \ x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp + apply (intro conjI) + apply simp+ + apply (drule_tac P1="\x. x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp+ + done + +lemma createNewCaps_state_refs_of': + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (state_refs_of' s)\ + createNewCaps ty ptr n us dev + \\rv s. P (state_refs_of' s)\" + unfolding createNewCaps_def + apply (clarsimp simp: AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (insert cover not_0) + apply (wp mapM_x_wp' createObjects_state_refs_of'' threadSet_state_refs_of' + | simp add: not_0 pspace_no_overlap'_def objBitsKO_def APIType_capBits_def + valid_pspace'_def makeObject_tcb makeObject_endpoint objBits_def + makeObject_notification archObjSize_def createObjects_def + curDomain_def field_simps mult_2_right + | intro conjI impI)+ + done + + +lemma createNewCaps_state_hyp_refs_of': + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (state_hyp_refs_of' s)\ + createNewCaps ty ptr n us dev + \\rv s. P (state_hyp_refs_of' s)\" + unfolding createNewCaps_def + apply (clarsimp simp: AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (insert cover not_0) + apply (wp mapM_x_wp' createObjects_state_hyp_refs_of'' threadSet_state_hyp_refs_of' + | simp add: not_0 pspace_no_overlap'_def objBitsKO_def APIType_capBits_def + valid_pspace'_def makeObject_tcb makeObject_vcpu objBits_def + newArchTCB_def vcpu_tcb_refs'_def makeVCPUObject_def field_simps + archObjSize_def createObjects_def curDomain_def mult_2_right + | intro conjI impI)+ + done + +lemma createObjects_iflive': + "\\s. if_live_then_nonz_cap' s \ \ live' val + \ n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\rv s. if_live_then_nonz_cap' s\" + apply (rule hoare_pre) + apply (simp only: if_live_then_nonz_cap'_def + ex_nonz_cap_to'_def imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift createObjects_orig_ko_wp_at2' + createObjects_orig_cte_wp_at') + apply clarsimp + apply (intro conjI allI impI) + apply simp_all + apply (rule ccontr) + apply clarsimp + apply (drule(1) if_live_then_nonz_capE') + apply (fastforce simp: ex_nonz_cap_to'_def) + done + +lemma atcbVCPUPtr_new[simp]: + "atcbVCPUPtr newArchTCB = None" + by (simp add: newArchTCB_def) + +lemma arch_live'_KOPTE[simp]: + "arch_live' (KOPTE makeObject) = False" + by (simp add: makeObject_pte arch_live'_def) + +lemma arch_live'_KOVCPU[simp]: + "arch_live' (KOVCPU makeObject) = False" + by (simp add: makeObject_vcpu makeVCPUObject_def arch_live'_def) + +lemma createNewCaps_iflive'[wp]: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ if_live_then_nonz_cap' s\ + createNewCaps ty ptr n us dev + \\rv s. if_live_then_nonz_cap' s\" + unfolding createNewCaps_def + apply (insert cover) + apply (clarsimp simp: toAPIType_def) + apply (cases ty, simp_all add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split)[1] + apply (rule hoare_pre, wp, simp) + apply (wp mapM_x_wp' createObjects_iflive' threadSet_iflive' + | simp add: not_0 pspace_no_overlap'_def createObjects_def live'_def hyp_live'_def + valid_pspace'_def makeObject_tcb makeObject_endpoint + makeObject_notification objBitsKO_def + APIType_capBits_def objBits_def + archObjSize_def field_simps mult_2_right + curDomain_def + split del:if_split + | simp split: if_split + | fastforce)+ + done + +lemma createObjects_pspace_only: + "\ \f s. P (ksPSpace_update f s) = P s \ + \ \P\ createObjects' ptr n val gbits \\rv. P\" + apply (simp add: createObjects_def createObjects'_def unless_def alignError_def + split_def lookupAround2_pspace_no) + apply wpsimp + done + +lemma createObjects'_qs[wp]: + "\\s. P (ksReadyQueues s)\ createObjects' ptr n val gbits \\rv s. P (ksReadyQueues s)\" + by (rule createObjects_pspace_only, simp) + +lemma createObjects'_qsL1[wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ createObjects' ptr n val gbits \\rv s. P (ksReadyQueuesL1Bitmap s)\" + by (rule createObjects_pspace_only, simp) + +lemma createObjects'_qsL2[wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ createObjects' ptr n val gbits \\rv s. P (ksReadyQueuesL2Bitmap s)\" + by (rule createObjects_pspace_only, simp) + +(* FIXME move these 2 to TcbAcc_R *) +lemma threadSet_qsL1[wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ threadSet f t \\rv s. P (ksReadyQueuesL1Bitmap s)\" + by (simp add: threadSet_def | wp updateObject_default_inv)+ + +lemma threadSet_qsL2[wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ threadSet f t \\rv s. P (ksReadyQueuesL2Bitmap s)\" + by (simp add: threadSet_def | wp updateObject_default_inv)+ + +crunches createObjects, createNewCaps + for qs[wp]: "\s. P (ksReadyQueues s)" + and qsL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and qsL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: crunch_simps wp: crunch_wps) + +lemma sch_act_wf_lift_asm: + assumes tcb: "\P t. \st_tcb_at' P t and Q \ f \\rv. st_tcb_at' P t\" + assumes tcbDomain: "\P t. \obj_at' (\tcb. P (tcbDomain tcb)) t and Q\ f \\rv. obj_at' (\tcb. P (tcbDomain tcb)) t\" + assumes kCT: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes kCD: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes ksA: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + shows + "\\s. sch_act_wf (ksSchedulerAction s) s \ Q s\ + f + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (rule use_valid [OF _ ksA], assumption) + apply (frule use_valid[OF _ kCT[of "(=) (ksCurThread s)" for s] refl]) + apply (frule use_valid[OF _ kCD[of "(=) (ksCurDomain s)" for s] refl]) + apply (case_tac "ksSchedulerAction s") + apply (simp add: ct_in_state'_def) + apply (drule use_valid [OF _ tcb]) + apply simp + apply simp + apply simp + apply (clarsimp simp: tcb_in_cur_domain'_def) + apply (frule use_valid [OF _ tcb], fastforce) + apply (frule use_valid [OF _ tcbDomain], fastforce) + apply auto + done + +lemma createObjects'_ct[wp]: + "\\s. P (ksCurThread s)\ createObjects' p n v us \\rv s. P (ksCurThread s)\" + by (rule createObjects_pspace_only, simp) + +crunches createObjects, doMachineOp, createNewCaps + for ct[wp]: "\s. P (ksCurThread s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + (simp: unless_def crunch_simps wp: crunch_wps) + +lemma threadSet_ko_wp_at2': + "\\s. P (ko_wp_at' P' p s) \ (\tcb_x :: tcb. P' (injectKO (F tcb_x)) = P' (injectKO tcb_x))\ + threadSet F ptr + \\_ s. P (ko_wp_at' P' p s)\" + apply (simp add: threadSet_def split del: if_split) + apply (wp setObject_ko_wp_at getObject_tcb_wp | simp add: objBits_simps')+ + apply (auto simp: ko_wp_at'_def obj_at'_def) + done + +lemma threadSet_ko_wp_at2'_futz: + "\\s. P (ko_wp_at' P' p s) \ obj_at' Q ptr s + \ (\tcb_x :: tcb. Q tcb_x \ P' (injectKO (F tcb_x)) = P' (injectKO tcb_x))\ + threadSet F ptr + \\_ s. P (ko_wp_at' P' p s)\" + apply (simp add: threadSet_def split del: if_split) + apply (wp setObject_ko_wp_at getObject_tcb_wp | simp add: objBits_simps')+ + apply (auto simp: ko_wp_at'_def obj_at'_def) + done + +lemma mapM_x_threadSet_createNewCaps_futz: + "\\s. P (ko_wp_at' P' p s) \ (\addr\set addrs. obj_at' (\tcb. \tcbQueued tcb \ tcbState tcb = Inactive) addr s) + \ (\tcb_x :: tcb. tcbQueued (F tcb_x) = tcbQueued tcb_x \ tcbState (F tcb_x) = tcbState tcb_x) + \ (\tcb_x :: tcb. \ tcbQueued tcb_x \ tcbState tcb_x = Inactive \ P' (injectKO (F tcb_x)) = P' (injectKO tcb_x))\ + mapM_x (threadSet F) addrs + \\_ s. P (ko_wp_at' P' p s)\" (is "\?PRE\ _ \\_. ?POST\") + apply (rule mapM_x_inv_wp[where P="?PRE"]) + apply simp + apply (rule hoare_pre) + apply (wp hoare_vcg_ball_lift threadSet_ko_wp_at2'[where P="id", simplified] + | wp (once) threadSet_ko_wp_at2'_futz[where Q="\tcb. \tcbQueued tcb \ tcbState tcb = Inactive"] + | simp)+ + done + +lemma createObjects_makeObject_not_tcbQueued: + assumes "range_cover ptr sz (objBitsKO tcb) n" + assumes "n \ 0" "tcb = injectKO (makeObject::tcb)" + shows "\\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ + createObjects ptr n tcb 0 + \\rv s. \addr\set rv. obj_at' (\tcb. \ tcbQueued tcb \ tcbState tcb = Structures_H.thread_state.Inactive) addr s\" + apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where 'a=tcb]]) + using assms + apply (auto simp: obj_at'_def projectKO_opt_tcb objBitsKO_def objBits_def makeObject_tcb) + done + +lemma createObjects_ko_wp_at2: + "\\s. range_cover ptr sz (objBitsKO ko + gbits) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ P (ko_wp_at' P' p s) + \ (P' ko \ P True) + \ pspace_no_overlap' ptr sz s\ + createObjects ptr n ko gbits + \\_ s. P (ko_wp_at' P' p s)\" + apply (simp add: createObjects_def) + apply (wp createObjects_orig_ko_wp_at2') + apply auto + done + +lemma createNewCaps_ko_wp_atQ': + "\(\s. P (ko_wp_at' P' p s) + \ range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s) + and K (\d (tcb_x :: tcb). \tcbQueued tcb_x \ tcbState tcb_x = Inactive + \ P' (injectKO (tcb_x \ tcbDomain := d \)) = P' (injectKO tcb_x)) + and K (\v. makeObjectKO d (Inr ty) = Some v + \ P' v \ P True)\ + createNewCaps ty ptr n us d + \\rv s. P (ko_wp_at' P' p s)\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: createNewCaps_def AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty, simp_all add: Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split)[1] + apply (rule hoare_pre, wp, simp) + apply (wp mapM_x_threadSet_createNewCaps_futz + mapM_x_wp' + createObjects_obj_at + createObjects_ko_wp_at2 createObjects_makeObject_not_tcbQueued + | simp add: makeObjectKO_def objBitsKO_def archObjSize_def APIType_capBits_def + objBits_def curDomain_def field_simps mult_2_right + split del: if_split + | intro conjI impI | fastforce + | split if_split_asm)+ + done + +lemmas createNewCaps_ko_wp_at' + = createNewCaps_ko_wp_atQ'[simplified, unfolded fold_K] + +lemmas createNewCaps_obj_at2 = + createNewCaps_ko_wp_at' + [where P'="\ko. \obj :: ('a :: pspace_storable). + projectKO_opt ko = Some obj \ P' obj" for P', + folded obj_at'_real_def, + unfolded pred_conj_def, simplified] + +lemma createNewCaps_obj_at': + "\\s. obj_at' (P :: ('a :: pspace_storable) \ bool) p s + \ range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ (\tcb d. \tcbQueued tcb \ tcbState tcb = Inactive \ + ((\obj :: 'a. injectKOS obj = KOTCB (tcb\tcbDomain := d\) \ P obj) \ + (\obj :: 'a. injectKOS obj = KOTCB tcb \ P obj)))\ + createNewCaps ty ptr n us d + \\rv s. obj_at' P p s\" + apply (simp add: obj_at'_real_def) + apply (wp createNewCaps_ko_wp_at') + apply (fastforce simp:project_inject) + done + +lemmas createNewCaps_pred_tcb_at' + = createNewCaps_obj_at'[where P="\ko. (Q :: 'a :: type \ bool) (proj (tcb_to_itcb' ko))" for Q proj, + folded pred_tcb_at'_def, simplified] + +lemma createNewCaps_cur: + "\range_cover ptr sz (APIType_capBits ty us) n ; n \ 0\ \ + \\s. valid_pspace' s \ + pspace_no_overlap' ptr sz s \ + cur_tcb' s\ + createNewCaps ty ptr n us d + \\rv. cur_tcb'\" + apply (rule hoare_post_imp [where Q="\rv s. \t. ksCurThread s = t \ tcb_at' t s"]) + apply (simp add: cur_tcb'_def) + apply (wp hoare_vcg_ex_lift createNewCaps_obj_at') + apply (clarsimp simp: pspace_no_overlap'_def cur_tcb'_def valid_pspace'_def) + apply auto + done + +crunch ksInterrupt[wp]: createNewCaps "\s. P (ksInterruptState s)" + (simp: crunch_simps unless_def + wp: setObject_ksInterrupt updateObject_default_inv crunch_wps) + +lemma createNewCaps_ifunsafe': + "\\s. valid_pspace' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 \ + if_unsafe_then_cap' s\ + createNewCaps ty ptr n us d + \\rv s. if_unsafe_then_cap' s\" + apply (simp only: if_unsafe_then_cap'_def ex_cte_cap_to'_def + imp_conv_disj) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF createNewCaps_ksInterrupt]) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + createNewCaps_cte_wp_at2 hoare_vcg_ex_lift) + apply (simp add: makeObject_cte pspace_no_overlap'_def + valid_pspace'_def) + apply auto + done + +lemma createObjects_nosch'[wp]: + "\\s. P (ksSchedulerAction s)\ + createObjects' ptr n val gbits + \\rv s. P (ksSchedulerAction s)\" + by (rule createObjects_pspace_only, simp) + +crunches createObjects, createNewCaps + for nosch[wp]: "\s. P (ksSchedulerAction s)" + and it[wp]: "\s. P (ksIdleThread s)" + (wp: setObject_ksPSpace_only updateObject_default_inv mapM_x_wp') + +lemma createObjects_idle': + "\valid_idle' and valid_pspace' and pspace_no_overlap' ptr sz + and (\s. \ case_option False (\cte. ksIdleThread s \ capRange (cteCap cte)) + (projectKO_opt val) + \ (\(getF, setF) \ ran tcb_cte_cases. + \ case_option False (\tcb. ksIdleThread s \ capRange (cteCap (getF tcb))) + (projectKO_opt val))) + and K (range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0)\ + createObjects' ptr n val gbits + \\rv. valid_idle'\" + apply (rule hoare_gen_asm) + apply (rule hoare_pre) + apply (clarsimp simp add: valid_idle'_def pred_tcb_at'_def) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_as_subst [OF createObjects'_it]) + apply (wp createObjects_orig_obj_at' + createObjects_orig_cte_wp_at2' + hoare_vcg_all_lift | simp)+ + apply (clarsimp simp: valid_idle'_def o_def pred_tcb_at'_def valid_pspace'_def + cong: option.case_cong) + apply auto + done + +lemma createNewCaps_idle'[wp]: + "\valid_idle' and valid_pspace' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_idle'\" + apply (rule hoare_gen_asm) + apply (clarsimp simp: createNewCaps_def AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty, simp_all add: Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split)[1] + apply (wp, simp) + including classic_wp_pre + apply (wp mapM_x_wp' + createObjects_idle' + threadSet_idle' + | simp add: projectKO_opt_tcb projectKO_opt_cte mult_2 + makeObject_cte makeObject_tcb archObjSize_def + tcb_cte_cases_def objBitsKO_def APIType_capBits_def + objBits_def createObjects_def cteSizeBits_def + | simp add: field_simps + | intro conjI impI + | fastforce simp: curDomain_def)+ + done + +crunches createNewCaps + for asid_table[wp]: "\s. P (armKSASIDTable (ksArchState s))" + and vmid_table[wp]: "\s. P (armKSVMIDTable (ksArchState s))" + and cur_vcpu[wp]: "\s. P (armHSCurVCPU (ksArchState s))" + and num_list_regs[wp]: "\s. P (armKSGICVCPUNumListRegs (ksArchState s))" + and global_ksArch[wp]: "\s. P (armKSGlobalUserVSpace (ksArchState s))" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + (simp: crunch_simps wp: crunch_wps) + +lemma createNewCaps_global_refs': + "\\s. range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s \ valid_global_refs' s + \ 0 < gsMaxObjectSize s\ + createNewCaps ty ptr n us d + \\rv. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_cap_sizes'_def valid_refs'_def) + apply (rule_tac Q="\rv s. \ptr. \ cte_wp_at' (\cte. (kernel_data_refs \ capRange (cteCap cte) \ {} + \ 2 ^ capBits (cteCap cte) > gsMaxObjectSize s)) ptr s \ global_refs' s \ kernel_data_refs" + in hoare_post_imp) + apply (auto simp: cte_wp_at_ctes_of linorder_not_less elim!: ranE)[1] + apply (rule hoare_pre) + apply (simp add: global_refs'_def) + apply (rule hoare_use_eq [where f="\s. armKSGlobalUserVSpace (ksArchState s)", + OF createNewCaps_global_ksArch]) + apply (rule hoare_use_eq [where f=ksIdleThread, OF createNewCaps_it]) + apply (rule hoare_use_eq [where f=irq_node', OF createNewCaps_ksInterrupt]) + apply (rule hoare_use_eq [where f=gsMaxObjectSize], wp) + apply (wp hoare_vcg_all_lift createNewCaps_cte_wp_at2[where sz=sz]) + apply (clarsimp simp: cte_wp_at_ctes_of global_refs'_def + makeObject_cte) + apply (auto simp: linorder_not_less ball_ran_eq) + done + +lemma koTypeOf_eq_UserDataT: + "(koTypeOf ko = UserDataT) = (ko = KOUserData)" + by (cases ko, simp_all) + +lemma createNewCaps_valid_arch_state: + "\(\s. valid_arch_state' s \ valid_pspace' s \ pspace_no_overlap' ptr sz s + \ (tp = APIObjectType ArchTypes_H.CapTableObject \ us > 0)) + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_arch_state'\" + unfolding valid_arch_state'_def valid_asid_table'_def vspace_table_at'_defs + apply (simp add: typ_at_to_obj_at_arches option_case_all_conv) + apply (wpsimp wp: hoare_vcg_const_Ball_lift createNewCaps_obj_at' + createNewCaps_ko_wp_at' hoare_vcg_all_lift + hoare_vcg_imp_lift') + apply (fastforce simp: pred_conj_def valid_pspace'_def o_def is_vcpu'_def) + done + +lemma valid_irq_handlers_cte_wp_at_form': + "valid_irq_handlers' = (\s. \irq. irq_issued' irq s \ + (\p. \ cte_wp_at' (\cte. cteCap cte = IRQHandlerCap irq) p s))" + by (auto simp: valid_irq_handlers'_def cteCaps_of_def cte_wp_at_ctes_of + fun_eq_iff ran_def) + +lemma createNewCaps_irq_handlers': + "\valid_irq_handlers' and pspace_no_overlap' ptr sz + and pspace_aligned' and pspace_distinct' + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers_cte_wp_at_form' irq_issued'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + createNewCaps_cte_wp_at2) + apply (clarsimp simp: makeObject_cte) + apply auto + done + +lemma createObjects'_irq_states' [wp]: + "\valid_irq_states'\ createObjects' a b c d \\_. valid_irq_states'\" + apply (simp add: createObjects'_def split_def) + apply (wp unless_wp|wpc|simp add: alignError_def)+ + apply fastforce + done + +crunch irq_states' [wp]: createNewCaps valid_irq_states' + (wp: crunch_wps no_irq no_irq_clearMemory simp: crunch_simps unless_def) + +crunch ksMachine[wp]: createObjects "\s. P (ksMachineState s)" + (simp: crunch_simps unless_def) + +lemma createObjects_valid_bitmaps: + "createObjects' ptr n val gbits \valid_bitmaps\" + apply (clarsimp simp: createObjects'_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def + bitmapQ_no_L1_orphans_def) + done + +lemma valid_bitmaps_gsCNodes_update[simp]: + "valid_bitmaps (gsCNodes_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_gsUserPages_update[simp]: + "valid_bitmaps (gsUserPages_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +crunches curDomain + for valid_bitmaps[wp]: valid_bitmaps + and sched_pointers[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + +lemma createNewCaps_valid_bitmaps: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_bitmaps s\ + createNewCaps ty ptr n us dev + \\_. valid_bitmaps\" + unfolding createNewCaps_def + apply (clarsimp simp: toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_bitmaps) + by (wpsimp wp: createObjects_valid_bitmaps[simplified o_def] mapM_x_wp + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ + +lemma createObjects_sched_queues: + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True) + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + (is "\ \s. _ \ _ \ ?Pre s \ _ \\_. _\") +proof (rule hoare_grab_asm)+ + assume not_0: "\ n = 0" + and cover: "range_cover ptr sz ((objBitsKO val) + gbits) n" + then show + "\\s. ?Pre s\ createObjects' ptr n val gbits \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + proof - + have shiftr_not_zero:" 1 \ ((of_nat n)::machine_word) << gbits" + using range_cover_not_zero_shift[OF not_0 cover,where gbits = gbits] + by (simp add:word_le_sub1) + show ?thesis + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: shiftL_nat data_map_insert_def[symmetric] + new_cap_addrs_fold'[OF shiftr_not_zero] + simp del: data_map_insert_def) + using range_cover.unat_of_nat_n_shift[OF cover, where gbits=gbits, simplified] + apply (clarsimp simp: foldr_upd_app_if) + apply (rule_tac a="tcbSchedNexts_of s" and b="tcbSchedPrevs_of s" + in rsubst2[rotated, OF sym sym, where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply simp + done + qed +qed + +lemma createNewCaps_sched_queues: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + assumes not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + createNewCaps ty ptr n us dev + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + unfolding createNewCaps_def + apply (clarsimp simp: AARCH64_H.toAPIType_def split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (wp, simp) + apply (insert cover not_0) + apply (wpsimp wp: mapM_x_wp' createObjects_sched_queues threadSet_sched_pointers + simp: curDomain_def createObjects_def) + apply (simp add: valid_pspace'_def objBits_simps APIType_capBits_def makeObject_tcb) + by (wpsimp wp: mapM_x_wp' createObjects_sched_queues threadSet_sched_pointers + simp: createObjects_def valid_pspace'_def objBits_simps APIType_capBits_def + split_del: if_split, + fastforce simp add: mult_2 add_ac)+ + +lemma createObjects_valid_sched_pointers: + "\\s. valid_sched_pointers s + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True)\ + createObjects' ptr n val gbits + \\_. valid_sched_pointers\" + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_sched_pointers_def foldr_upd_app_if opt_pred_def opt_map_def comp_def) + apply (cases "tcb_of' val"; clarsimp) + done + +lemma createNewCaps_valid_sched_pointers: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_sched_pointers s\ + createNewCaps ty ptr n us dev + \\_. valid_sched_pointers\" + unfolding createNewCaps_def + apply (clarsimp simp: toAPIType_def split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_sched_pointers) + by (wpsimp wp: createObjects_valid_sched_pointers[simplified o_def] mapM_x_wp + threadSet_valid_sched_pointers + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ + +lemma mapM_x_threadSet_valid_pspace: + "\valid_pspace' and K (curdom \ maxDomain)\ + mapM_x (threadSet (tcbDomain_update (\_. curdom))) addrs \\y. valid_pspace'\" + apply (rule hoare_gen_asm) + apply (wp mapM_x_wp' threadSet_valid_pspace') + apply simp_all + done + +lemma createNewCaps_valid_pspace: + assumes not_0: "n \ 0" + and cover: "range_cover ptr sz (APIType_capBits ty us) n" + and sz_limit: "sz \ maxUntypedSizeBits" + and ptr_cn: "canonical_address (ptr && ~~ mask sz)" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s + \ caps_no_overlap'' ptr sz s \ ptr \ 0 \ caps_overlap_reserved' {ptr..ptr + of_nat n * 2^(APIType_capBits ty us) - 1} s \ ksCurDomain s \ maxDomain\ + createNewCaps ty ptr n us dev \\r. valid_pspace'\" + unfolding createNewCaps_def Arch_createNewCaps_def + using valid_obj_makeObject_rules sz_limit ptr_cn + apply (clarsimp simp: AARCH64_H.toAPIType_def + split del: if_split cong: option.case_cong) + apply (cases ty, simp_all split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + apply (rule hoare_pre, wp, clarsimp) + apply (insert cover) + apply (wp createObjects_valid_pspace_untyped' [OF _ not_0 , where ty="Inr ty" and sz = sz] + mapM_x_threadSet_valid_pspace mapM_x_wp' + | simp add: makeObjectKO_def APIType_capBits_def + objBits_simps not_0 createObjects_def curDomain_def + | intro conjI impI + | simp add: power_add field_simps mult_2_right + | simp add: bit_simps)+ + done + +lemma doMachineOp_return_foo: + "doMachineOp (do x\a;return () od) = (do (doMachineOp a); return () od)" + apply (clarsimp simp: doMachineOp_def bind_def gets_def + get_def return_def select_f_def split_def simpler_modify_def) + apply (rule ext)+ + apply simp + apply (rule set_eqI) + apply clarsimp + done + +lemma createNewCaps_vms: + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and + K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n) and + valid_machine_state'\ + createNewCaps ty ptr n us dev + \\archCaps. valid_machine_state'\" + apply (clarsimp simp: valid_machine_state'_def pointerInDeviceData_def + Arch_createNewCaps_def createNewCaps_def pointerInUserData_def + typ_at'_def createObjects_def doMachineOp_return_foo + split del: if_split) + apply (rule hoare_pre) + apply (wpc + | wp hoare_vcg_const_Ball_lift hoare_vcg_disj_lift + hoare_vcg_all_lift + doMachineOp_ko_wp_at' createObjects_orig_ko_wp_at2'[where sz = sz] + hoare_vcg_all_lift + dmo_lift' mapM_x_wp' threadSet_ko_wp_at2' + | clarsimp simp: createObjects_def Arch_createNewCaps_def curDomain_def Let_def + split del: if_split + | assumption)+ + apply (case_tac ty) + apply (auto simp: APIType_capBits_def objBits_simps toAPIType_def object_type.splits + field_simps mult_2_right) + done + +lemma createObjects_pspace_domain_valid': + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ pspace_domain_valid s\ + createObjects' ptr n val gbits + \\_. pspace_domain_valid\" + apply (simp add: createObjects'_def split_def unless_def) + apply (rule hoare_pre) + apply (wp | wpc | simp only: alignError_def haskell_assert_def)+ + apply (clarsimp simp: new_cap_addrs_fold' unat_1_0 unat_gt_0 + range_cover_not_zero_shift + caps_overlap_reserved'_def) + apply (simp add: pspace_domain_valid_def foldr_upd_app_if + fun_upd_def[symmetric]) + apply (subgoal_tac " \x \ set (new_cap_addrs (unat (of_nat n << gbits)) ptr val). + mask_range x (objBitsKO val) \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}") + apply blast + + apply (rule ballI) + apply (rule new_range_subset) + apply (erule range_cover_rel, simp+) + apply (simp add: range_cover.unat_of_nat_n_shift field_simps) + done + +lemma createObjects_pspace_domain_valid: + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ pspace_domain_valid s\ + createObjects ptr n val gbits + \\_. pspace_domain_valid\" + apply (simp add: createObjects_def) + apply (wp createObjects_pspace_domain_valid'[where sz=sz]) + apply (simp add: objBits_def) + done + +lemma createNewCaps_pspace_domain_valid[wp]: + "\pspace_domain_valid and K ({ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} + \ kernel_data_refs = {} + \ range_cover ptr sz (APIType_capBits ty us) n \ 0 < n)\ + createNewCaps ty ptr n us dev + \\rv. pspace_domain_valid\" + apply (simp add: createNewCaps_def) + apply (rule hoare_pre) + apply (wp createObjects_pspace_domain_valid[where sz=sz] + mapM_x_wp' + | wpc | simp add: Arch_createNewCaps_def curDomain_def Let_def + split del: if_split)+ + apply (simp add: AARCH64_H.toAPIType_def + split: object_type.splits) + apply (auto simp: objBits_simps APIType_capBits_def field_simps mult_2_right) + done + +(* FIXME: move *) +lemma ct_idle_or_in_cur_domain'_lift_futz: + assumes a: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes b: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + assumes c: "\P. \\s. P (ksIdleThread s)\ f \\_ s. P (ksIdleThread s)\" + assumes d: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes e: "\d t. \\s. obj_at' (\tcb. tcbState tcb \ Inactive \ d = tcbDomain tcb) t s \ Q s\ + f + \\_. obj_at' (\tcb. tcbState tcb \ Inactive \ d = tcbDomain tcb) t\" + shows "\ct_idle_or_in_cur_domain' and ct_active' and Q\ f \\_. ct_idle_or_in_cur_domain'\" +proof - + from e have e': + "\d t. \\s. obj_at' (\tcb. tcbState tcb \ Inactive \ d = tcbDomain tcb) t s \ Q s\ + f + \\_. obj_at' (\tcb. d = tcbDomain tcb) t\" + apply (rule hoare_strengthen_post) + apply (auto simp: obj_at'_def) + done + show ?thesis + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply (wps a b c d) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) + apply (auto simp: obj_at'_def ct_in_state'_def st_tcb_at'_def) + done +qed + +lemma createNewCaps_ct_idle_or_in_cur_domain': + "\ct_idle_or_in_cur_domain' and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and ct_active' and K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n) \ + createNewCaps ty ptr n us dev + \\rv. ct_idle_or_in_cur_domain'\" + apply (wp ct_idle_or_in_cur_domain'_lift_futz createNewCaps_obj_at'[where sz=sz] | simp)+ + done + +lemma sch_act_wf_lift_asm_futz: + assumes tcb: "\P t. \st_tcb_at' P t and Q \ f \\rv. st_tcb_at' P t\" + assumes tcbDomain: "\P t. \obj_at' (\tcb. runnable' (tcbState tcb) \ P (tcbDomain tcb)) t and Q\ f \\rv. obj_at' (\tcb. runnable' (tcbState tcb) \ P (tcbDomain tcb)) t\" + assumes kCT: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes kCD: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes ksA: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + shows + "\\s. sch_act_wf (ksSchedulerAction s) s \ Q s\ + f + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (rule use_valid [OF _ ksA], assumption) + apply (frule use_valid [OF _ kCT[of "(=) (ksCurThread s)" for s] refl]) + apply (frule use_valid [OF _ kCD[of "(=) (ksCurDomain s)" for s] refl]) + apply (case_tac "ksSchedulerAction s") + apply (simp add: ct_in_state'_def) + apply (drule use_valid [OF _ tcb]) + apply simp + apply simp + apply simp + apply (clarsimp simp: tcb_in_cur_domain'_def) + apply (frule use_valid [OF _ tcb], fastforce) + apply simp + apply (rename_tac word) + apply (subgoal_tac "(obj_at' (\tcb. runnable' (tcbState tcb) \ ksCurDomain b = tcbDomain tcb) word and Q) s") + apply (drule use_valid [OF _ tcbDomain], fastforce) + apply (auto simp: st_tcb_at'_def o_def obj_at'_def ko_wp_at'_def) + done + +lemma createNewCaps_sch_act_wf: + "\(\s. sch_act_wf (ksSchedulerAction s) s) and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n)\ + createNewCaps ty ptr n us dev + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift_asm_futz + createNewCaps_pred_tcb_at'[where sz=sz] + createNewCaps_obj_at'[where sz=sz] + | simp)+ + done + +lemma createObjects'_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ createObjects' ptr numObjects val gSize \\_ s. P (ksDomSchedule s)\" + apply (simp add: createObjects'_def unless_def alignError_def) + apply (wp | wpc)+ + apply simp + done + +lemma createObjects'_ksDomScheduleIdx[wp]: + "\\s. P (ksDomScheduleIdx s)\ createObjects' ptr numObjects val gSize \\_ s. P (ksDomScheduleIdx s)\" + apply (simp add: createObjects'_def unless_def alignError_def) + apply (wp | wpc)+ + apply simp + done + +crunch ksDomSchedule[wp]: createNewCaps "\s. P (ksDomSchedule s)" + (wp: mapM_x_wp' simp: crunch_simps) + +crunch ksDomScheduleIdx[wp]: createNewCaps "\s. P (ksDomScheduleIdx s)" + (wp: mapM_x_wp' simp: crunch_simps) + +lemma createObjects_null_filter': + "\\s. P (null_filter' (ctes_of s)) \ makeObjectKO dev ty = Some val \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\addrs a. P (null_filter' (ctes_of a))\" + apply (clarsimp simp: createObjects'_def split_def) + apply (wp unless_wp|wpc + | clarsimp simp: alignError_def split del: if_split simp del:fun_upd_apply)+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply fastforce+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply simp + apply assumption + apply simp + apply (subst data_map_insert_def[symmetric])+ + apply (frule(2) retype_aligned_distinct'[where ko = val]) + apply (erule range_cover_rel) + apply simp+ + apply (frule(2) retype_aligned_distinct'(2)[where ko = val]) + apply (erule range_cover_rel) + apply simp+ + apply (frule null_filter_ctes_retype + [where addrs = "(new_cap_addrs (unat (((of_nat n)::machine_word) << gbits)) ptr val)"]) + apply assumption+ + apply (clarsimp simp:field_simps foldr_upd_app_if[folded data_map_insert_def] shiftl_t2n range_cover.unat_of_nat_shift)+ + apply (rule new_cap_addrs_aligned[THEN bspec]) + apply (erule range_cover.aligned[OF range_cover_rel]) + apply simp+ + apply (clarsimp simp:shiftl_t2n field_simps range_cover.unat_of_nat_shift) + apply (drule subsetD[OF new_cap_addrs_subset,rotated]) + apply (erule range_cover_rel) + apply simp + apply simp + apply (rule ccontr) + apply clarify + apply (frule(1) pspace_no_overlapD') + apply (erule_tac B = "{x..x+2^objBitsKO y - 1}" in in_empty_interE[rotated]) + apply (drule(1) pspace_alignedD') + apply (clarsimp) + apply (erule is_aligned_no_overflow) + apply (simp del:atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff add:Int_ac ptr_add_def p_assoc_help) + apply (simp add:field_simps foldr_upd_app_if[folded data_map_insert_def] shiftl_t2n) + apply auto + done + +lemma createNewCaps_null_filter': + "\(\s. P (null_filter' (ctes_of s))) + and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0) \ + createNewCaps ty ptr n us dev + \\_ s. P (null_filter' (ctes_of s))\" + apply (rule hoare_gen_asm) + apply (simp add: createNewCaps_def toAPIType_def + Arch_createNewCaps_def + split del: if_split cong: option.case_cong) + apply (cases ty, simp_all split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + apply (rule hoare_pre, wp,simp) + apply (simp add: createObjects_def makeObjectKO_def APIType_capBits_def objBits_def + archObjSize_def curDomain_def objBits_if_dev bit_simps + split del: if_split + | wp createObjects_null_filter'[where ty = "Inr ty" and sz = sz and dev=dev] + threadSet_ctes_of mapM_x_wp' + | simp add: objBits_simps + | fastforce)+ + done + +crunch gsUntypedZeroRanges[wp]: createNewCaps "\s. P (gsUntypedZeroRanges s)" + (wp: mapM_x_wp' simp: crunch_simps) + +lemma untyped_ranges_zero_inv_null_filter: + "untyped_ranges_zero_inv (option_map cteCap o null_filter' ctes) + = untyped_ranges_zero_inv (option_map cteCap o ctes)" + apply (simp add: untyped_ranges_zero_inv_def fun_eq_iff null_filter'_def) + apply clarsimp + apply (rule_tac f="\caps. x = ran caps" for caps in arg_cong) + apply (clarsimp simp: fun_eq_iff map_comp_def untypedZeroRange_def) + done + +lemma untyped_ranges_zero_inv_null_filter_cteCaps_of: + "untyped_ranges_zero_inv (cteCaps_of s) + = untyped_ranges_zero_inv (option_map cteCap o null_filter' (ctes_of s))" + by (simp add: untyped_ranges_zero_inv_null_filter cteCaps_of_def) + +lemma createNewCaps_urz: + "\untyped_ranges_zero' + and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0) \ + createNewCaps ty ptr n us dev + \\archCaps. untyped_ranges_zero'\" + apply (simp add: untyped_ranges_zero_inv_null_filter_cteCaps_of) + apply (rule hoare_pre) + apply (rule untyped_ranges_zero_lift) + apply (wp createNewCaps_null_filter')+ + apply (auto simp: o_def) + done + +lemma createNewCaps_invs': + "\(\s. invs' s \ ct_active' s \ pspace_no_overlap' ptr sz s + \ caps_no_overlap'' ptr sz s \ ptr \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ caps_overlap_reserved' {ptr..ptr + of_nat n * 2^(APIType_capBits ty us) - 1} s + \ (ty = APIObjectType ArchTypes_H.CapTableObject \ us > 0) + \ gsMaxObjectSize s > 0) + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz))\ + createNewCaps ty ptr n us dev + \\rv. invs'\" + (is "\?P and K ?Q\ ?f \\rv. invs'\") +proof (rule hoare_gen_asm, elim conjE) + assume cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + and sz_limit: "sz \ maxUntypedSizeBits" + and ptr_cn: "canonical_address (ptr && ~~ mask sz)" + have cnc_ct_not_inQ: + "\ct_not_inQ and valid_pspace' and pspace_no_overlap' ptr sz\ + createNewCaps ty ptr n us dev \\_. ct_not_inQ\" + unfolding ct_not_inQ_def + apply (rule_tac Q="\s. ksSchedulerAction s = ResumeCurrentThread + \ (obj_at' (Not \ tcbQueued) (ksCurThread s) s + \ valid_pspace' s \ pspace_no_overlap' ptr sz s)" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF createNewCaps_nosch]) + apply (rule hoare_weaken_pre) + apply (wps createNewCaps_ct) + apply (wp createNewCaps_obj_at') + using cover not_0 + apply (fastforce simp: valid_pspace'_def) + done + show "\?P\ + createNewCaps ty ptr n us dev + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def + pointerInUserData_def typ_at'_def) + apply (rule hoare_pre) + apply (wp createNewCaps_valid_pspace [OF not_0 cover sz_limit ptr_cn] + createNewCaps_state_refs_of' [OF cover not_0 ] + createNewCaps_state_hyp_refs_of' [OF cover not_0 ] + createNewCaps_iflive' [OF cover not_0 ] + irqs_masked_lift + createNewCaps_ifunsafe' + createNewCaps_cur [OF cover not_0] + createNewCaps_global_refs' + createNewCaps_valid_arch_state + valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createNewCaps_obj_at'] + createNewCaps_irq_handlers' createNewCaps_vms + createNewCaps_pred_tcb_at' cnc_ct_not_inQ + createNewCaps_ct_idle_or_in_cur_domain' + createNewCaps_sch_act_wf + createNewCaps_urz[where sz=sz] + createNewCaps_sched_queues[OF cover not_0] + createNewCaps_valid_sched_pointers + createNewCaps_valid_bitmaps + | simp)+ + using not_0 + apply (clarsimp simp: valid_pspace'_def) + using cover + apply (intro conjI) + apply simp_all + done +qed + +lemma createObjects_obj_ranges': + "\\s. (\x ko. ksPSpace s x = Some ko \ (obj_range' x ko) \ S = {}) \ + pspace_no_overlap' ptr sz s \ + pspace_aligned' s \ pspace_distinct' s \ + S \ {ptr..(ptr &&~~ mask sz) + 2^sz - 1} = {} \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ + createObjects' ptr n val gbits + \\r s. (\x ko. ksPSpace s x = Some ko \ (obj_range' x ko) \ S = {})\" + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def del: fun_upd_apply) + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (subst new_cap_addrs_fold') + apply (simp add: unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply fastforce+ + apply (clarsimp simp: foldr_fun_upd_value) + apply (subgoal_tac "range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (erule(1) disjoint_subset[OF obj_range'_subset]) + apply (simp add: Int_commute) + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +lemma createObjects_pred_tcb_at': + "\pred_tcb_at' proj P t and K (range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0) + and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz\ + createObjects ptr n val gbits \\rv. pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def createObjects_def) + apply (wp createObjects_orig_obj_at') + apply auto + done + +lemma createObjects_ex_cte_cap_to [wp]: + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ pspace_aligned' s \ + pspace_distinct' s \ ex_cte_cap_to' p s \ pspace_no_overlap' ptr sz s\ + createObjects ptr n val gbits \\r. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def createObjects_def) + apply (rule hoare_lift_Pf2 [where f="irq_node'"]) + apply (wp hoare_vcg_ex_lift createObjects_orig_cte_wp_at'[where sz = sz]) + apply simp + apply wp + done + +lemma createObjects_orig_obj_at3: + "\\s. obj_at' P p s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + pspace_aligned' s \ + pspace_distinct' s \ pspace_no_overlap' ptr sz s\ + createObjects ptr n val gbits \\r. obj_at' P p\" + by (wp createObjects_orig_obj_at'[where sz = sz] | simp add: createObjects_def)+ + +lemma createObjects_sch: + "\(\s. sch_act_wf (ksSchedulerAction s) s) and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0)\ + createObjects ptr n val gbits + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (rule hoare_gen_asm) + apply (wp sch_act_wf_lift_asm createObjects_pred_tcb_at' createObjects_orig_obj_at3 | force)+ + done + +lemma createObjects_no_cte_ifunsafe': + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows + "\\s. valid_pspace' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + if_unsafe_then_cap' s\ + createObjects ptr n val gbits + \\rv s. if_unsafe_then_cap' s\" + apply (simp only: if_unsafe_then_cap'_def ex_cte_cap_to'_def + imp_conv_disj) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF createObjects_ksInterrupt]) + apply (simp add: createObjects_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_imp_lift + createObjects_orig_cte_wp_at2' hoare_vcg_ex_lift) + apply (simp add: valid_pspace'_def disj_imp) + using no_cte no_tcb + apply fastforce + done + +lemma createObjects_no_cte_valid_global: + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + valid_global_refs' s\ + createObjects ptr n val gbits + \\rv s. valid_global_refs' s\" + apply (simp add: valid_global_refs'_def valid_cap_sizes'_def valid_refs'_def) + apply (rule_tac Q="\rv s. \ptr. \ cte_wp_at' (\cte. (kernel_data_refs \ capRange (cteCap cte) \ {} + \ 2 ^ capBits (cteCap cte) > gsMaxObjectSize s)) ptr s \ global_refs' s \ kernel_data_refs" + in hoare_post_imp) + apply (auto simp: cte_wp_at_ctes_of linorder_not_less elim!: ranE)[1] + apply (rule hoare_pre) + apply (simp add: global_refs'_def) + apply (rule hoare_use_eq [where f="\s. armKSGlobalUserVSpace (ksArchState s)", + OF createObjects_global_ksArch]) + apply (rule hoare_use_eq [where f=ksIdleThread, OF createObjects_it]) + apply (rule hoare_use_eq [where f=irq_node', OF createObjects_ksInterrupt]) + apply (rule hoare_use_eq [where f=gsMaxObjectSize], wp) + apply (simp add: createObjects_def) + apply (wp hoare_vcg_all_lift createObjects_orig_cte_wp_at2') + using no_cte no_tcb + apply (simp add: split_def cte_wp_at_ctes_of split: option.splits) + apply (clarsimp simp: global_refs'_def) + apply (auto simp: ball_ran_eq linorder_not_less[symmetric]) + done + +lemma createObjects'_typ_at: + "\\s. n \ 0 \ + range_cover ptr sz (objBitsKO val + gbits) n \ + typ_at' T p s \ + pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. typ_at' T p s\" + apply (rule hoare_grab_asm)+ + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def typ_at'_def) + apply (subst new_cap_addrs_fold') + apply (simp add: unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply simp+ + apply (wp|wpc|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (subst data_map_insert_def[symmetric]) + apply clarsimp + apply (subgoal_tac "range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_ko_wp_at',simp+)+ + apply clarsimp + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (simp add: lookupAround2_None1) + apply (intro conjI impI allI) + apply (drule_tac x = p in spec) + apply (erule impE) + apply (erule(1) range_cover_new_cap_addrs_compare[rotated]) + apply simp + apply (fastforce simp: ko_wp_at'_def) + apply (drule_tac x = p in orthD1) + apply (clarsimp simp: ptr_add_def p_assoc_help) + apply (simp add: dom_def) + apply (fastforce simp: ko_wp_at'_def) + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +lemma createObjects_valid_arch: + "\\s. valid_arch_state' s \ pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + createObjects ptr n val gbits + \\rv s. valid_arch_state' s\" + unfolding valid_arch_state'_def valid_asid_table'_def vspace_table_at'_defs createObjects_def + apply (simp add: typ_at_to_obj_at_arches option_case_all_conv) + apply (wpsimp wp: hoare_vcg_const_Ball_lift createNewCaps_obj_at' createObjects_orig_ko_wp_at2' + createNewCaps_ko_wp_at' hoare_vcg_all_lift + hoare_vcg_imp_lift') + apply (fastforce simp: pred_conj_def valid_pspace'_def o_def is_vcpu'_def) + done + +lemma createObjects_irq_state: + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + valid_irq_node' (irq_node' s) s\ + createObjects ptr n val gbits + \\rv s. valid_irq_node' (irq_node' s) s\" + apply (wp valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createObjects_orig_obj_at3]) + apply auto + done + +lemma createObjects_no_cte_irq_handlers: + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + valid_irq_handlers' s\ + createObjects ptr n val gbits + \\rv s. valid_irq_handlers' s\" + apply (simp add: valid_irq_handlers_cte_wp_at_form' createObjects_def irq_issued'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + createObjects_orig_cte_wp_at2') + using no_cte no_tcb by (auto simp: split_def split: option.splits) + +lemma createObjects_cur': + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + cur_tcb' s\ + createObjects ptr n val gbits + \\rv s. cur_tcb' s\" + apply (rule hoare_post_imp [where Q="\rv s. \t. ksCurThread s = t \ tcb_at' t s"]) + apply (simp add: cur_tcb'_def) + apply (wp hoare_vcg_ex_lift createObjects_orig_obj_at3) + apply (clarsimp simp: cur_tcb'_def) + apply auto + done + +lemma createObjects_vms'[wp]: + "\(\_. (range_cover ptr sz (objBitsKO val + gbits) n \ 0 < n)) and pspace_aligned' and + pspace_distinct' and pspace_no_overlap' ptr sz and valid_machine_state'\ + createObjects ptr n val gbits + \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + typ_at'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_ko_wp_at2' + | simp add: createObjects_def)+ + apply auto + done + +lemma createObjects_ct_idle_or_in_cur_domain': + "\ct_active' and valid_pspace' and pspace_no_overlap' ptr sz + and ct_idle_or_in_cur_domain' + and K (range_cover ptr sz (objBitsKO val + gSize) n \ n \ 0)\ + createObjects ptr n val gSize + \\_. ct_idle_or_in_cur_domain'\" + apply (rule hoare_gen_asm) + apply (wp ct_idle_or_in_cur_domain'_lift_futz createObjects_obj_at_other[where sz=sz]) + apply simp_all + done + +lemma untyped_zero_ranges_cte_def: + "untyped_ranges_zero_inv (cteCaps_of s) rs + = (\r. (\p. cte_wp_at' (\cte. untypedZeroRange (cteCap cte) = Some r) p s) + = (r \ rs))" + apply (clarsimp simp: untyped_ranges_zero_inv_def cte_wp_at_ctes_of + cteCaps_of_def set_eq_iff ran_def map_comp_Some_iff) + apply (safe, metis+) + done + +lemma createObjects_untyped_ranges_zero': + assumes moKO: "makeObjectKO dev ty = Some val" + shows + "\ct_active' and valid_pspace' and pspace_no_overlap' ptr sz + and untyped_ranges_zero' + and K (range_cover ptr sz (objBitsKO val + gSize) n \ n \ 0)\ + createObjects ptr n val gSize + \\_. untyped_ranges_zero'\" + apply (rule hoare_gen_asm) + apply (simp add: untyped_zero_ranges_cte_def iff_conv_conj_imp + createObjects_def) + apply (simp only: imp_conv_disj not_all not_ex) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_conj_lift + hoare_vcg_disj_lift createObjects_orig_cte_wp_at2'[where sz=sz]) + apply (clarsimp simp: valid_pspace'_def) + apply (cut_tac moKO[symmetric]) + apply (simp add: makeObjectKO_def projectKO_opt_tcb projectKO_opt_cte + split: sum.split_asm kernel_object.split_asm + arch_kernel_object.split_asm + object_type.split_asm apiobject_type.split_asm) + apply (simp add: makeObject_tcb tcb_cte_cases_def cteSizeBits_def makeObject_cte + untypedZeroRange_def) + apply (simp add: makeObject_cte untypedZeroRange_def) + done + +lemma createObjects_no_cte_invs: + assumes moKO: "makeObjectKO dev ty = Some val" + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows + "\\s. range_cover ptr sz ((objBitsKO val) + gbits) n \ n \ 0 + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz) + \ invs' s \ ct_active' s + \ pspace_no_overlap' ptr sz s \ ptr \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ caps_overlap_reserved' {ptr..ptr + of_nat (n * 2 ^ gbits * 2 ^ objBitsKO val) - 1} s + \ caps_no_overlap'' ptr sz s \ refs_of' val = {} \ hyp_refs_of' val = {} \ \ live' val\ + createObjects ptr n val gbits + \\rv. invs'\" +proof - + have co_ct_not_inQ: + "\range_cover ptr sz ((objBitsKO val) + gbits) n; n \ 0\ \ + \\s. ct_not_inQ s \ pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createObjects ptr n val gbits \\_. ct_not_inQ\" + (is "\ _; _ \ \ \\s. ct_not_inQ s \ ?REST s\ _ \_\") + apply (simp add: ct_not_inQ_def) + apply (rule_tac Q="\s. (ksSchedulerAction s = ResumeCurrentThread) \ + (obj_at' (Not \ tcbQueued) (ksCurThread s) s \ ?REST s)" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF createObjects_nosch]) + apply (rule hoare_weaken_pre) + apply (wps createObjects_ct) + apply (wp createObjects_obj_at_other) + apply (simp)+ + done + show ?thesis + apply (rule hoare_grab_asm)+ + apply (clarsimp simp: invs'_def valid_state'_def) + apply wp + apply (rule hoare_pre) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def,wp createObjects_valid_pspace_untyped') + apply (wp assms | simp add: objBits_def)+ + apply (wp createObjects_sch) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_state_refs_of'') + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_state_hyp_refs_of'') + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_iflive') + apply (wp createObjects_no_cte_ifunsafe' assms) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_idle') + apply (wpsimp wp: irqs_masked_lift createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers assms) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_sched_queues) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_sched_pointers) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_idle') + apply (wpsimp wp: createObjects_valid_bitmaps) + apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift + createObjects_idle' createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers createObjects_cur' + createObjects_pspace_domain_valid co_ct_not_inQ + createObjects_ct_idle_or_in_cur_domain' + createObjects_untyped_ranges_zero'[OF moKO] + assms + createObjects_sched_queues + | simp)+ + using no_cte no_tcb + apply (clarsimp simp: valid_pspace'_def) + apply (extract_conjunct \match conclusion in "pspace_no_overlap' ptr ?x _" \ -\, assumption)+ + apply (extract_conjunct \match conclusion in "range_cover ptr ?x ?y _" \ -\, assumption) + apply simp + apply (rule conjI, fastforce simp add: split_def split: option.splits) + by (auto simp: invs'_def no_tcb valid_state'_def no_cte + split: option.splits kernel_object.splits) +qed + +lemma corres_retype_update_gsI: + assumes not_zero: "n \ 0" + and aligned: "is_aligned ptr (objBitsKO ko + gbits)" + and obj_bits_api: "obj_bits_api (APIType_map2 ty) us = + objBitsKO ko + gbits" + and check: "sz < obj_bits_api (APIType_map2 ty) us \ + sz < objBitsKO ko + gbits" + and usv: "APIType_map2 ty = Structures_A.CapTableObject \ 0 < us" + and ko: "makeObjectKO dev ty = Some ko" + and orr: "obj_bits_api (APIType_map2 ty) us \ sz \ + obj_relation_retype + (default_object (APIType_map2 ty) dev us) ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and f: "f = update_gs (APIType_map2 ty) us" + shows "corres (\rv rv'. rv' = g rv) + (\s. valid_pspace s \ pspace_no_overlap_range_cover ptr sz s + \ valid_mdb s \ valid_etcbs s \ valid_list s) + (\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s) + (retype_region2 ptr n us (APIType_map2 ty) dev) + (do addrs \ createObjects ptr n ko gbits; + _ \ modify (f (set addrs)); + return (g addrs) + od)" + using corres_retype' [OF not_zero aligned obj_bits_api check usv ko orr cover] + by (simp add: f) + +lemma gcd_corres: "corres (=) \ \ (gets cur_domain) curDomain" + by (simp add: curDomain_def state_relation_def) + +lemma retype_region2_extra_ext_mapM_x_corres: + shows "corres dc + (valid_etcbs and (\s. \addr\set addrs. tcb_at addr s)) + (\s. \addr\set addrs. obj_at' (Not \ tcbQueued) addr s) + (retype_region2_extra_ext addrs Structures_A.apiobject_type.TCBObject) + (mapM_x (\addr. do cdom \ curDomain; + threadSet (tcbDomain_update (\_. cdom)) addr + od) + addrs)" + apply (rule corres_guard_imp) + apply (simp add: retype_region2_extra_ext_def curDomain_mapM_x_futz[symmetric] when_def) + apply (rule corres_split_eqr[OF gcd_corres]) + apply (rule_tac S="Id \ {(x, y). x \ set addrs}" + and P="\s. (\t \ set addrs. tcb_at t s) \ valid_etcbs s" + and P'="\s. \t \ set addrs. obj_at' (Not \ tcbQueued) t s" + in corres_mapM_x) + apply simp + apply (rule corres_guard_imp) + apply (rule ethread_set_corres, simp_all add: etcb_relation_def non_exst_same_def)[1] + apply (case_tac tcb') + apply simp + apply fastforce + apply (fastforce simp: obj_at'_def) + apply (wp hoare_vcg_ball_lift | simp)+ + apply (clarsimp simp: obj_at'_def) + apply fastforce + apply auto[1] + apply (wp | simp add: curDomain_def)+ + done + +lemma retype_region2_extra_ext_trivial: + "ty \ APIType_map2 (Inr (APIObjectType apiobject_type.TCBObject)) + \ retype_region2_extra_ext ptrs ty = return ()" +by (simp add: retype_region2_extra_ext_def when_def APIType_map2_def) + +lemma retype_region2_retype_region_PageTableObject: + "retype_region ptr n us (APIType_map2 (Inr PageTableObject)) dev = + (retype_region2 ptr n us (APIType_map2 (Inr PageTableObject)) dev :: obj_ref list det_ext_monad)" + by (simp add: retype_region2_ext_retype_region retype_region2_extra_ext_def when_def + APIType_map2_def) + +lemma retype_region2_valid_etcbs[wp]:"\valid_etcbs\ retype_region2 a b c d dev \\_. valid_etcbs\" + apply (simp add: retype_region2_def) + apply (simp add: retype_region2_ext_def bind_assoc) + apply wp + apply (clarsimp simp del: fun_upd_apply) + apply (blast intro: valid_etcb_fold_update) + done + +lemma retype_region2_obj_at: + assumes tytcb: "ty = Structures_A.apiobject_type.TCBObject" + shows "\\\ retype_region2 ptr n us ty dev \\rv s. \x \ set rv. tcb_at x s\" + using tytcb unfolding retype_region2_def + apply (simp only: return_bind bind_return foldr_upd_app_if fun_app_def K_bind_def) + apply (wp dxo_wp_weak | simp)+ + apply (auto simp: obj_at_def default_object_def is_tcb_def) + done + +lemma createObjects_Not_tcbQueued: + "\range_cover ptr sz (objBitsKO (injectKOS (makeObject::tcb))) n; n \ 0\ \ + \\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ + createObjects ptr n (KOTCB makeObject) 0 + \\ptrs s. \addr\set ptrs. obj_at' (Not \ tcbQueued) addr s\" + apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where val = "(makeObject :: tcb)"]]) + apply (auto simp: obj_at'_def project_inject objBitsKO_def objBits_def makeObject_tcb) + done + +lemma init_arch_objects_APIType_map2_noop: + "init_arch_objects (APIType_map2 tp) ptr n m addrs = return ()" + apply (simp add: init_arch_objects_def APIType_map2_def) + done + +lemma data_page_relation_retype: + "obj_relation_retype (ArchObj (DataPage False pgsz)) KOUserData" + "obj_relation_retype (ArchObj (DataPage True pgsz)) KOUserDataDevice" + apply (simp_all add: obj_relation_retype_def shiftl_t2n mult_ac + objBits_simps pbfs_atleast_pageBits) + apply (clarsimp simp: image_def)+ + done + +lemma corres_retype_region_createNewCaps: + "corres ((\r r'. length r = length r' \ list_all2 cap_relation r r') + \ map (\ref. default_cap (APIType_map2 (Inr ty)) ref us dev)) + (\s. valid_pspace s \ valid_mdb s \ valid_etcbs s \ valid_list s \ valid_arch_state s + \ caps_no_overlap y sz s \ pspace_no_overlap_range_cover y sz s + \ caps_overlap_reserved {y..y + of_nat n * 2 ^ (obj_bits_api (APIType_map2 (Inr ty)) us) - 1} s + \ (\slot. cte_wp_at (\c. up_aligned_area y sz \ cap_range c \ cap_is_device c = dev) slot s) + \ (APIType_map2 (Inr ty) = Structures_A.CapTableObject \ 0 < us)) + (\s. pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' y sz s + \ valid_pspace' s \ valid_arch_state' s + \ range_cover y sz (obj_bits_api (APIType_map2 (Inr ty)) us) n \ n\ 0) + (do x \ retype_region y n us (APIType_map2 (Inr ty)) dev :: obj_ref list det_ext_monad; + init_arch_objects (APIType_map2 (Inr ty)) y n us x; + return x od) + (createNewCaps ty y n us dev)" + apply (rule_tac F="range_cover y sz (obj_bits_api (APIType_map2 (Inr ty)) us) n + \ n \ 0 \ (APIType_map2 (Inr ty) = Structures_A.CapTableObject \ 0 < us)" + in corres_req, simp) + apply (clarsimp simp add: createNewCaps_def toAPIType_def split del: if_split cong: if_cong) + apply (subst init_arch_objects_APIType_map2) + apply (cases ty, simp_all add: Arch_createNewCaps_def split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + \ \Untyped\ + apply (simp add: retype_region_def obj_bits_api_def APIType_map2_def + split del: if_split cong: if_cong) + apply (subst upto_enum_red') + apply (drule range_cover_not_zero[rotated]) + apply simp + apply unat_arith + apply (clarsimp simp: list_all2_same enum_word_def range_cover.unat_of_nat_n + list_all2_map1 list_all2_map2 ptr_add_def fromIntegral_def + toInteger_nat fromInteger_nat) + apply (subst unat_of_nat_minus_1) + apply (rule le_less_trans[OF range_cover.range_cover_n_le(2) power_strict_increasing]) + apply simp + apply (clarsimp simp: range_cover_def) + apply (arith+)[4] + \ \TCB, EP, NTFN\ + apply (simp_all add: retype_region2_ext_retype_region + bind_cong[OF curDomain_mapM_x_futz refl, unfolded bind_assoc] + split del: if_split)[8] + apply (rule corres_guard_imp) + apply (rule corres_split_eqr) + apply (rule corres_retype[where 'a = tcb], + simp_all add: obj_bits_api_def objBits_simps' pageBits_def + APIType_map2_def makeObjectKO_def + tcb_relation_retype)[1] + apply (fastforce simp: range_cover_def) + apply (simp add: tcb_relation_retype) + apply (rule corres_split_nor) + apply (simp add: APIType_map2_def) + apply (rule retype_region2_extra_ext_mapM_x_corres) + apply (rule corres_trivial, simp) + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 + objBits_simps APIType_map2_def) + apply wp + apply wp + apply ((wp retype_region2_obj_at | simp add: APIType_map2_def)+)[1] + apply ((wp createObjects_Not_tcbQueued[where sz=sz] + | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] + apply simp + apply simp + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: liftM_def[symmetric] split del: if_split) + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule corres_retype[where 'a = endpoint], + simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def + makeObjectKO_def other_objs_default_relation)[1] + apply (fastforce simp: range_cover_def) + apply simp + apply simp + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 objBits_simps + APIType_map2_def) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: liftM_def[symmetric] split del: if_split) + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule corres_retype[where 'a = notification], + simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def + makeObjectKO_def other_objs_default_relation)[1] + apply (fastforce simp: range_cover_def) + apply simp + apply simp + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 objBits_simps + APIType_map2_def) + \ \CapTable\ + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (subst bind_assoc_return_reverse[of "createObjects y n (KOCTE makeObject) us"]) + apply (subst liftM_def[of "map (\addr. capability.CNodeCap addr us 0 0)", symmetric]) + apply simp + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI, + simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def + makeObjectKO_def slot_bits_def field_simps ext)[1] + apply (simp add: range_cover_def) + apply (rule captable_relation_retype,simp add: range_cover_def word_bits_def) + apply simp + apply simp + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 objBits_simps + allRights_def APIType_map2_def + split del: if_split) + apply (in_case \HugePageObject\) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI; + clarsimp simp: obj_bits_api_def3 APIType_map2_def objBits_simps ext + default_object_def default_arch_object_def makeObjectKO_def + data_page_relation_retype bit_simps + elim!: range_cover.aligned; + assumption) + apply fastforce+ + apply (simp add: APIType_map2_def arch_default_cap_def vm_read_write_def vmrights_map_def + list_all2_map1 list_all2_map2 list_all2_same) + apply (in_case \VSpaceObject\) + apply (subst retype_region2_extra_ext_trivial, simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def]) + apply (rule corres_guard_imp) + apply (simp add: init_arch_objects_APIType_map2_noop) + apply (rule corres_rel_imp) + apply (rule corres_retype_update_gsI; + (simp add: APIType_map2_def objBits_simps makeObjectKO_def obj_bits_api_def + range_cover.aligned default_arch_object_def pt_bits_def)?) + apply (rule vsroot_relation_retype) + apply (rule ext)+ + apply (rename_tac s, case_tac s, rename_tac arch machine, case_tac arch) + apply (fastforce simp: update_gs_def) + apply (clarsimp simp: list_all2_map1 list_all2_map2 list_all2_same + APIType_map2_def arch_default_cap_def) + apply fastforce+ + apply (in_case \SmallPageObject\) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI; + clarsimp simp: obj_bits_api_def3 APIType_map2_def objBits_simps ext + default_object_def default_arch_object_def makeObjectKO_def + data_page_relation_retype + elim!: range_cover.aligned; + assumption) + apply fastforce+ + apply (simp add: APIType_map2_def arch_default_cap_def vm_read_write_def vmrights_map_def + list_all2_map1 list_all2_map2 list_all2_same) + apply (in_case \LargePageObject\) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI; + clarsimp simp: obj_bits_api_def3 APIType_map2_def objBits_simps ext + default_object_def default_arch_object_def makeObjectKO_def + data_page_relation_retype + elim!: range_cover.aligned; + assumption) + apply fastforce+ + apply (simp add: APIType_map2_def arch_default_cap_def vm_read_write_def vmrights_map_def + list_all2_map1 list_all2_map2 list_all2_same) + apply (in_case \PageTableObject\) + apply (subst retype_region2_ext_retype_region) + apply (subst retype_region2_extra_ext_trivial, simp add: APIType_map2_def) + apply (simp_all add: corres_liftM2_simp[unfolded liftM_def]) + apply (rule corres_guard_imp) + apply (simp add: init_arch_objects_APIType_map2_noop) + apply (rule corres_rel_imp) + apply (rule corres_retype_update_gsI; + (simp add: APIType_map2_def objBits_simps makeObjectKO_def obj_bits_api_def + range_cover.aligned default_arch_object_def pt_bits_def)?) + apply (rule pagetable_relation_retype) + apply (rule ext)+ + apply (rename_tac s, case_tac s, rename_tac arch machine, case_tac arch) + apply (fastforce simp: update_gs_def) + apply (clarsimp simp: list_all2_map1 list_all2_map2 list_all2_same + APIType_map2_def arch_default_cap_def) + apply fastforce+ + apply (in_case \VCPUObject\) + apply (subst retype_region2_ext_retype_region) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype[where 'a = vcpu], + simp_all add: obj_bits_api_def objBits_simps pageBits_def default_arch_object_def + APIType_map2_def makeObjectKO_def other_objs_default_relation)[1] + apply (fastforce simp: range_cover_def) + apply (simp add: no_gs_types_def) + apply (auto simp add: obj_relation_retype_def range_cover_def objBitsKO_def arch_kobj_size_def default_object_def + archObjSize_def pageBits_def obj_bits_def cte_level_bits_def default_arch_object_def + other_obj_relation_def vcpu_relation_def default_vcpu_def makeObject_vcpu + makeVCPUObject_def default_gic_vcpu_interface_def vgic_map_def)[1] + apply simp+ + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 + objBits_simps APIType_map2_def arch_default_cap_def) + done + +end +end diff --git a/proof/refine/AARCH64/Schedule_R.thy b/proof/refine/AARCH64/Schedule_R.thy new file mode 100644 index 0000000000..0f2365dc4a --- /dev/null +++ b/proof/refine/AARCH64/Schedule_R.thy @@ -0,0 +1,2510 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Schedule_R +imports VSpace_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare hoare_weak_lift_imp[wp_split del] + +(* Levity: added (20090713 10:04:12) *) +declare sts_rel_idle [simp] + +lemma corres_if2: + "\ G = G'; G \ corres r P P' a c; \ G' \ corres r Q Q' b d \ + \ corres r (if G then P else Q) (if G' then P' else Q') (if G then a else b) (if G' then c else d)" + by simp + +lemma findM_awesome': + assumes x: "\x xs. suffix (x # xs) xs' \ + corres (\a b. if b then (\a'. a = Some a' \ r a' (Some x)) else a = None) + P (P' (x # xs)) + ((f >>= (\x. return (Some x))) \ (return None)) (g x)" + assumes y: "corres r P (P' []) f (return None)" + assumes z: "\x xs. suffix (x # xs) xs' \ + \P' (x # xs)\ g x \\rv s. \ rv \ P' xs s\" + assumes p: "suffix xs xs'" + shows "corres r P (P' xs) f (findM g xs)" +proof - + have P: "f = do x \ (do x \ f; return (Some x) od) \ return None; if x \ None then return (the x) else f od" + apply (rule ext) + apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) + done + have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" + by (wp | simp)+ + show ?thesis using p + apply (induct xs) + apply (simp add: y del: dc_simp) + apply (simp only: findM.simps) + apply (subst P) + apply (rule corres_guard_imp) + apply (rule corres_split[OF x]) + apply assumption + apply (rule corres_if2) + apply (case_tac ra, clarsimp+)[1] + apply (rule corres_trivial, clarsimp) + apply (case_tac ra, simp_all)[1] + apply (erule(1) meta_mp [OF _ suffix_ConsD]) + apply (rule Q) + apply (rule hoare_post_imp [OF _ z]) + apply simp+ + done +qed + +lemmas findM_awesome = findM_awesome' [OF _ _ _ suffix_order.refl] + +(* Levity: added (20090721 10:56:29) *) +declare objBitsT_koTypeOf [simp] + +lemma vs_lookup_pages_vcpu_update: + "typ_at (AArch AVCPU) vcpuPtr s \ + vs_lookup_target level asid vref (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = + vs_lookup_target level asid vref s" + unfolding vs_lookup_target_def vs_lookup_slot_def vs_lookup_table_def + apply (prop_tac "asid_pools_of s vcpuPtr = None", clarsimp simp: opt_map_def obj_at_def) + apply (prop_tac "pts_of s vcpuPtr = None", clarsimp simp: opt_map_def obj_at_def) + apply (fastforce simp: obind_assoc intro!: obind_eqI) + done + +lemma valid_vs_lookup_vcpu_update: + "typ_at (AArch AVCPU) vcpuPtr s \ + valid_vs_lookup (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = valid_vs_lookup s" + by (clarsimp simp: valid_vs_lookup_def caps_of_state_VCPU_update vs_lookup_pages_vcpu_update) + +lemma set_vpcu_valid_vs_lookup[wp]: + "set_vcpu vcpuPtr vcpu \\s. P (valid_vs_lookup s)\" + by (wpsimp wp: set_vcpu_wp simp: valid_vs_lookup_vcpu_update) + +lemma set_vcpu_vmid_inv[wp]: + "set_vcpu vcpuPtr vcpu \\s. P (vmid_inv s)\" + unfolding vmid_inv_def + by (wp_pre, wps, wpsimp, simp) + +lemma vmid_inv_cur_vcpu[simp]: + "vmid_inv (s\arch_state := arch_state s\arm_current_vcpu := x\\) = vmid_inv s" + by (simp add: vmid_inv_def) + +lemma set_vcpu_valid_asid_table[wp]: + "set_vcpu ptr vcpu \valid_asid_table\" + apply (wpsimp wp: set_vcpu_wp) + apply (prop_tac "asid_pools_of s ptr = None") + apply (clarsimp simp: obj_at_def opt_map_def) + apply simp + done + +crunches vcpu_switch + for valid_vs_lookup[wp]: "\s. P (valid_vs_lookup s)" + and vmid_inv[wp]: vmid_inv + and valid_vmid_table[wp]: valid_vmid_table + and valid_asid_table[wp]: valid_asid_table + and global_pt[wp]: "\s. P (global_pt s)" + and valid_uses[wp]: valid_uses + (simp: crunch_simps wp: crunch_wps) + +lemma vcpu_switch_valid_global_arch_objs[wp]: + "vcpu_switch v \valid_global_arch_objs\" + by (wp valid_global_arch_objs_lift) + +crunches set_vm_root + for pspace_distinct[wp]: pspace_distinct + (simp: crunch_simps) + +(* FIXME AARCH64: move to TcbAcc_R *) +lemma ko_tcb_cross: + "\ ko_at (TCB tcb) t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ \tcb'. ko_at' tcb' t s' \ tcb_relation tcb tcb'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (prop_tac "tcb_at t s", clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (drule (2) tcb_at_cross, fastforce simp: state_relation_def) + apply normalise_obj_at' + apply (clarsimp simp: state_relation_def pspace_relation_def obj_at_def) + apply (drule bspec, fastforce) + apply (clarsimp simp: tcb_relation_cut_def obj_at'_def) + done + +(* FIXME AARCH64: move *) +lemma ko_vcpu_cross: + "\ ko_at (ArchObj (VCPU vcpu)) p s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ \vcpu'. ko_at' vcpu' p s' \ vcpu_relation vcpu vcpu'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (clarsimp simp: obj_at_def) + apply (clarsimp simp: state_relation_def pspace_relation_def obj_at_def) + apply (drule bspec, fastforce) + apply (clarsimp simp: other_obj_relation_def + split: kernel_object.splits arch_kernel_object.splits) + apply (prop_tac "ksPSpace s' p \ None") + apply (prop_tac "p \ pspace_dom (kheap s)") + apply (fastforce intro!: set_mp[OF pspace_dom_dom]) + apply fastforce + apply (fastforce simp: obj_at'_def objBits_simps dest: pspace_alignedD pspace_distinctD') + done + +(* FIXME AARCH64: move *) +lemma vcpu_at_cross: + "\ vcpu_at p s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ vcpu_at' p s'" + apply (drule vcpu_at_ko, clarsimp) + apply (drule (3) ko_vcpu_cross) + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + done + +lemma arch_switchToThread_corres: + "corres dc (valid_arch_state and valid_objs and pspace_aligned and pspace_distinct + and valid_vspace_objs and pspace_in_kernel_window and tcb_at t) + (no_0_obj') + (arch_switch_to_thread t) (Arch.switchToThread t)" + unfolding arch_switch_to_thread_def AARCH64_H.switchToThread_def + apply (corres corres: getObject_TCB_corres vcpuSwitch_corres + term_simp: tcb_relation_def arch_tcb_relation_def) + apply (wpsimp wp: vcpu_switch_pred_tcb_at getObject_tcb_wp simp: tcb_at_st_tcb_at)+ + apply (clarsimp simp: valid_arch_state_def st_tcb_at_def obj_at_def get_tcb_def) + apply (rule conjI) + apply clarsimp + apply (erule (1) valid_objsE) + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_arch_tcb_def obj_at_def) + apply (clarsimp simp: cur_vcpu_def in_omonad) + apply normalise_obj_at' + apply (clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (frule (2) ko_tcb_cross[rotated], simp add: obj_at_def) + apply normalise_obj_at' + apply (rule conjI; clarsimp) + apply (rule vcpu_at_cross; assumption?) + apply (erule (1) valid_objsE) + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_arch_tcb_def tcb_relation_def + arch_tcb_relation_def) + apply (rule vcpu_at_cross; assumption?) + apply (prop_tac "cur_vcpu s", clarsimp simp: valid_arch_state_def) + apply (clarsimp simp: state_relation_def arch_state_relation_def cur_vcpu_def in_omonad obj_at_def) + done + +lemma schedule_choose_new_thread_sched_act_rct[wp]: + "\\\ schedule_choose_new_thread \\rs s. scheduler_action s = resume_cur_thread\" + unfolding schedule_choose_new_thread_def + by wp + +\ \This proof shares many similarities with the proof of @{thm tcbSchedEnqueue_corres}\ +lemma tcbSchedAppend_corres: + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_append tcb_ptr) (tcbSchedAppend tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_append_def get_tcb_queue_def + tcbSchedAppend_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce + apply clarsimp + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) + + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueueAppend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: obj_at'_def) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp simp: setQueue_def tcbQueueAppend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply fast + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; clarsimp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply clarsimp + apply (drule_tac x="the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))" + in spec) + subgoal by (auto simp: in_opt_pred opt_map_red) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply split: if_splits) + apply (case_tac "t = the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (intro conjI; clarsimp) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply opt_map_def obj_at'_def + queue_end_valid_def prev_queue_head_def + split: if_splits option.splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_append[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def opt_map_def split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def fun_upd_apply queue_end_valid_def split: if_splits) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply split: if_splits) + by (clarsimp simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def split: if_splits) + +lemma tcbQueueAppend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + apply (clarsimp simp: tcbQueueEmpty_def valid_bound_tcb'_def split: option.splits) + done + +lemma tcbSchedAppend_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_objs'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: threadSet_valid_objs' threadGet_wp hoare_vcg_all_lift) + apply (normalise_obj_at', rename_tac tcb "end") + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: tcbQueueEmpty_def obj_at'_def) + done + +crunches tcbSchedAppend, tcbSchedDequeue + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) + +(* FIXME move *) +lemmas obj_at'_conjI = obj_at_conj' + +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for tcb_at'[wp]: "tcb_at' t" + and cap_to'[wp]: "ex_nonz_cap_to' p" + and ifunsafe'[wp]: if_unsafe_then_cap' + (wp: crunch_wps simp: crunch_simps) + +lemma tcbSchedAppend_iflive'[wp]: + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_if_live_then_nonz_cap' threadGet_wp simp: bitmap_fun_defs) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def st_tcb_at'_def obj_at'_def runnable_eq_active' live'_def) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ko_wp_at'_def inQ_def obj_at'_def tcbQueueEmpty_def live'_def) + done + +lemma tcbSchedDequeue_iflive'[wp]: + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. if_live_then_nonz_cap'\" + apply (simp add: tcbSchedDequeue_def) + apply (wpsimp wp: tcbQueueRemove_if_live_then_nonz_cap' threadGet_wp) + apply (fastforce elim: if_live_then_nonz_capE' simp: obj_at'_def ko_wp_at'_def live'_def) + done + +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for typ_at'[wp]: "\s. P (typ_at' T p s)" + and tcb_at'[wp]: "tcb_at' t" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksInterrupt[wp]: "\s. P (ksInterruptState s)" + and irq_states[wp]: valid_irq_states' + and irq_node'[wp]: "\s. P (irq_node' s)" + and ct'[wp]: "\s. P (ksCurThread s)" + and global_refs'[wp]: valid_global_refs' + and ifunsafe'[wp]: if_unsafe_then_cap' + and cap_to'[wp]: "ex_nonz_cap_to' p" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + and idle'[wp]: valid_idle' + (simp: unless_def crunch_simps obj_at'_def wp: getObject_tcb_wp) + +lemma tcbSchedEnqueue_vms'[wp]: + "\valid_machine_state'\ tcbSchedEnqueue t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedEnqueue_ksMachine) + done + +lemma tcbSchedEnqueue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedEnqueue t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp + apply (clarsimp simp: tcbSchedEnqueue_def) + apply (wpsimp simp: unless_def)+ + done + +lemma ct_idle_or_in_cur_domain'_lift2: + "\ \t. \tcb_in_cur_domain' t\ f \\_. tcb_in_cur_domain' t\; + \P. \\s. P (ksCurThread s) \ f \\_ s. P (ksCurThread s) \; + \P. \\s. P (ksIdleThread s) \ f \\_ s. P (ksIdleThread s) \; + \P. \\s. P (ksSchedulerAction s) \ f \\_ s. P (ksSchedulerAction s) \\ + \ \ ct_idle_or_in_cur_domain'\ f \\_. ct_idle_or_in_cur_domain' \" + apply (unfold ct_idle_or_in_cur_domain'_def) + apply (rule hoare_lift_Pf2[where f=ksCurThread]) + apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) + including no_pre + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) + apply simp+ + done + +lemma threadSet_mdb': + "\valid_mdb' and obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF (f t)) t\ + threadSet f t + \\rv. valid_mdb'\" + apply (wpsimp wp: setObject_tcb_mdb' getTCB_wp simp: threadSet_def obj_at'_def) + apply fastforce + done + +lemma tcbSchedNext_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedNext_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedPrev_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueueRemove_valid_mdb': + "\\s. valid_mdb' s \ valid_objs' s\ tcbQueueRemove q tcbPtr \\_. valid_mdb'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def obj_at'_def) + done + +lemma tcbQueuePrepend_valid_mdb': + "\valid_mdb' and tcb_at' tcbPtr + and (\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_valid_mdb': + "\\s. valid_mdb' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueEnd queue)) s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueued_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbQueued_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma valid_mdb'_ksReadyQueuesL1Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL1Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma valid_mdb'_ksReadyQueuesL2Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL2Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma tcbSchedEnqueue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedEnqueue_def setQueue_def) + apply (wpsimp wp: tcbQueuePrepend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply normalise_obj_at' + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +crunches tcbSchedEnqueue + for cur_tcb'[wp]: cur_tcb' + (wp: threadSet_cur) + +lemma tcbSchedEnqueue_invs'[wp]: + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedEnqueue_ct_not_inQ + simp: cteCaps_of_def o_def) + done + +crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" + (simp: unless_def) + +lemma tcbSchedAppend_vms'[wp]: + "\valid_machine_state'\ tcbSchedAppend t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +crunch pspace_domain_valid[wp]: tcbSchedAppend "pspace_domain_valid" + (simp: unless_def) + +crunch ksCurDomain[wp]: tcbSchedAppend "\s. P (ksCurDomain s)" +(simp: unless_def) + +crunch ksIdleThread[wp]: tcbSchedAppend "\s. P (ksIdleThread s)" +(simp: unless_def) + +crunch ksDomSchedule[wp]: tcbSchedAppend "\s. P (ksDomSchedule s)" +(simp: unless_def) + +lemma tcbQueueAppend_tcbPriority_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueueAppend_tcbDomain_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbSchedAppend_tcbDomain[wp]: + "tcbSchedAppend t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (clarsimp simp: tcbSchedAppend_def) + by wpsimp + +lemma tcbSchedAppend_tcbPriority[wp]: + "tcbSchedAppend t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + apply (clarsimp simp: tcbSchedAppend_def) + by wpsimp + +lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedAppend t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp+ + done + +crunch ksDomScheduleIdx[wp]: tcbSchedAppend "\s. P (ksDomScheduleIdx s)" + (simp: unless_def) + +crunches tcbSchedAppend, tcbSchedDequeue + for gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + (simp: unless_def) + +crunches tcbSchedDequeue, tcbSchedAppend + for arch'[wp]: "\s. P (ksArchState s)" + +lemma tcbSchedAppend_sch_act_wf[wp]: + "tcbSchedAppend thread \\s. sch_act_wf (ksSchedulerAction s) s\" + by (wpsimp wp: sch_act_wf_lift) + +lemma tcbSchedAppend_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedAppend tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedAppend_def + apply (wpsimp simp: tcbQueueAppend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp hoare_vcg_if_lift2) + apply (clarsimp simp: ksReadyQueues_asrt_def split: if_splits) + apply normalise_obj_at' + apply (force dest: tcbQueueHead_iff_tcbQueueEnd + simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def) + done + +lemma tcbSchedAppend_valid_mdb'[wp]: + "\valid_mdb' and valid_tcbs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: tcbQueueAppend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply (fastforce dest: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +lemma tcbSchedAppend_valid_bitmaps[wp]: + "tcbSchedAppend tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) + done + +lemma tcbSchedAppend_invs'[wp]: + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma tcbSchedAppend_all_invs_but_ct_not_inQ': + "\invs'\ + tcbSchedAppend t + \\_. all_invs_but_ct_not_inQ'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma tcbSchedEnqueue_invs'_not_ResumeCurrentThread: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s \ ResumeCurrentThread)\ + tcbSchedEnqueue t + \\_. invs'\" + by wpsimp + +lemma tcbSchedAppend_invs'_not_ResumeCurrentThread: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s \ ResumeCurrentThread)\ + tcbSchedAppend t + \\_. invs'\" + by wpsimp + +lemma tcb_at'_has_tcbDomain: + "tcb_at' t s \ \p. obj_at' (\tcb. tcbDomain tcb = p) t s" + by (clarsimp simp add: obj_at'_def) + +crunch ksMachine[wp]: tcbSchedDequeue "\s. P (ksMachineState s)" + (simp: unless_def) + +lemma tcbSchedDequeue_vms'[wp]: + "\valid_machine_state'\ tcbSchedDequeue t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +crunch pspace_domain_valid[wp]: tcbSchedDequeue "pspace_domain_valid" + +crunch ksCurDomain[wp]: tcbSchedDequeue "\s. P (ksCurDomain s)" +(simp: unless_def) + +crunch ksIdleThread[wp]: tcbSchedDequeue "\s. P (ksIdleThread s)" +(simp: unless_def) + +crunch ksDomSchedule[wp]: tcbSchedDequeue "\s. P (ksDomSchedule s)" +(simp: unless_def) + +lemma tcbSchedDequeue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedDequeue t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp + apply (clarsimp simp: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: hoare_when_weak_wp getObject_tcb_wp threadGet_wp) + done + +crunch ksDomScheduleIdx[wp]: tcbSchedDequeue "\s. P (ksDomScheduleIdx s)" + (simp: unless_def) + +lemma tcbSchedDequeue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs'\ tcbSchedDequeue tcbPtr \\_. valid_mdb'\" + unfolding tcbSchedDequeue_def + apply (wpsimp simp: bitmap_fun_defs setQueue_def wp: threadSet_mdb' tcbQueueRemove_valid_mdb') + apply (rule_tac Q="\_. tcb_at' tcbPtr" in hoare_post_imp) + apply (fastforce simp: tcb_cte_cases_def cteSizeBits_def) + apply (wpsimp wp: threadGet_wp)+ + apply (fastforce simp: obj_at'_def) + done + +lemma tcbSchedDequeue_invs'[wp]: + "tcbSchedDequeue t \invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma ready_qs_runnable_cross: + "\(s, s') \ state_relation; pspace_aligned s; pspace_distinct s; valid_queues s\ + \ ready_qs_runnable s'" + apply (clarsimp simp: ready_qs_runnable_def) + apply normalise_obj_at' + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply clarsimp + apply (drule_tac x=t in bspec) + apply (fastforce simp: inQ_def in_opt_pred obj_at'_def opt_map_red) + apply (fastforce dest: st_tcb_at_runnable_cross simp: obj_at'_def st_tcb_at'_def) + done + +method add_ready_qs_runnable = + rule_tac Q'=ready_qs_runnable in corres_cross_add_guard, + (clarsimp simp: pred_conj_def)?, + (frule valid_sched_valid_queues)?, (frule invs_psp_aligned)?, (frule invs_distinct)?, + fastforce dest: ready_qs_runnable_cross + +defs idleThreadNotQueued_def: + "idleThreadNotQueued s \ obj_at' (Not \ tcbQueued) (ksIdleThread s) s" + +lemma idle_thread_not_queued: + "\valid_idle s; valid_queues s; valid_etcbs s\ + \ \ (\d p. idle_thread s \ set (ready_queues s d p))" + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (drule_tac x="idle_thread s" in bspec) + apply fastforce + apply (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def valid_etcbs_def) + done + +lemma valid_idle_tcb_at: + "valid_idle s \ tcb_at (idle_thread s) s" + by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def is_tcb_def) + +lemma setCurThread_corres: + "corres dc (valid_idle and valid_queues and valid_etcbs and pspace_aligned and pspace_distinct) \ + (modify (cur_thread_update (\_. t))) (setCurThread t)" + apply (clarsimp simp: setCurThread_def) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (clarsimp simp: idleThreadNotQueued_def) + apply (frule (2) idle_thread_not_queued) + apply (frule state_relation_pspace_relation) + apply (frule state_relation_ready_queues_relation) + apply (frule state_relation_idle_thread) + apply (frule valid_idle_tcb_at) + apply (frule (3) tcb_at_cross) + apply (fastforce dest!: in_ready_q_tcbQueued_eq[THEN arg_cong_Not, THEN iffD1] + simp: obj_at'_def opt_pred_def opt_map_def) + apply (rule corres_modify) + apply (simp add: state_relation_def swp_def) + done + +crunches vcpuEnable, vcpuDisable, vcpuSave, vcpuRestore + for typ_at' [wp]: "\s. P (typ_at' T p s)" + (simp: crunch_simps + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma vcpuSwitch_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ vcpuSwitch param_a \\_ s. P (typ_at' T p s) \" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma arch_switch_thread_tcb_at'[wp]: + "\tcb_at' t\ Arch.switchToThread t \\_. tcb_at' t\" + by (unfold AARCH64_H.switchToThread_def, wp typ_at_lift_tcb') + +lemma updateASIDPoolEntry_pred_tcb_at'[wp]: + "updateASIDPoolEntry f asid \pred_tcb_at' proj P t'\" + unfolding updateASIDPoolEntry_def getPoolPtr_def + by (wpsimp wp: setASIDPool_pred_tcb_at' getASID_wp) + +crunches updateASIDPoolEntry + for tcbs_of'[wp]: "\s. P (tcbs_of' s)" + (wp: getASID_wp crunch_wps) + +crunches setVMRoot + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t'" + (simp: crunch_simps wp: crunch_wps) + +crunches vcpuSwitch + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t'" + (simp: crunch_simps wp: crunch_wps) + +crunches Arch.switchToThread + for typ_at'[wp]: "\s. P (typ_at' T p s)" + +lemma Arch_switchToThread_pred_tcb'[wp]: + "Arch.switchToThread t \\s. P (pred_tcb_at' proj P' t' s)\" +proof - + have pos: "\P t t'. Arch.switchToThread t \pred_tcb_at' proj P t'\" + by (wpsimp simp: AARCH64_H.switchToThread_def) + show ?thesis + apply (rule P_bool_lift [OF pos]) + by (rule lift_neg_pred_tcb_at' [OF ArchThreadDecls_H_AARCH64_H_switchToThread_typ_at' pos]) +qed + +crunches storeWordUser, setVMRoot, asUser, storeWordUser, Arch.switchToThread + for ksQ[wp]: "\s. P (ksReadyQueues s p)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_objs'[wp]: valid_objs' + (wp: crunch_wps threadSet_sched_pointers getObject_tcb_wp getASID_wp + simp: crunch_simps obj_at'_def) + +crunches arch_switch_to_thread, arch_switch_to_idle_thread + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + and ready_qs_distinct[wp]: ready_qs_distinct + and valid_idle[wp]: valid_idle + (wp: ready_qs_distinct_lift simp: crunch_simps) + +lemma valid_queues_in_correct_ready_q[elim!]: + "valid_queues s \ in_correct_ready_q s" + by (clarsimp simp: valid_queues_def in_correct_ready_q_def) + +lemma valid_queues_ready_qs_distinct[elim!]: + "valid_queues s \ ready_qs_distinct s" + by (clarsimp simp: valid_queues_def ready_qs_distinct_def) + +lemma switchToThread_corres: + "corres dc (valid_arch_state and valid_objs + and valid_vspace_objs and pspace_aligned and pspace_distinct + and valid_vs_lookup and valid_global_objs and pspace_in_kernel_window + and unique_table_refs + and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s)) + and valid_queues and valid_idle) + (no_0_obj' and sym_heap_sched_pointers and valid_objs') + (switch_to_thread t) (switchToThread t)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply add_ready_qs_runnable + apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) + apply (rule corres_symb_exec_l[OF _ _ get_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_l[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce dest!: state_relation_ready_queues_relation intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce + apply (rule corres_guard_imp) + apply (rule corres_split[OF arch_switchToThread_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres setCurThread_corres]) + apply (wpsimp simp: is_tcb_def)+ + apply (fastforce intro!: st_tcb_at_tcb_at) + apply wpsimp + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + done + +lemma tcb_at_idle_thread_lift: + assumes T: "\T' t. \typ_at T' t\ f \\rv. typ_at T' t\" + assumes I: "\P. \\s. P (idle_thread s)\ f \\rv s. P (idle_thread s)\" + shows "\\s. tcb_at (idle_thread s) s \ f \\rv s. tcb_at (idle_thread s) s\" + apply (simp add: tcb_at_typ) + apply (rule hoare_lift_Pf[where f=idle_thread]) + by (wpsimp wp: T I)+ + +lemma tcb_at'_ksIdleThread_lift: + assumes T: "\T' t. \typ_at' T' t\ f \\rv. typ_at' T' t\" + assumes I: "\P. \\s. P (ksIdleThread s)\ f \\rv s. P (ksIdleThread s)\" + shows "\\s. tcb_at' (ksIdleThread s) s \ f \\rv s. tcb_at' (ksIdleThread s) s\" + apply (simp add: tcb_at_typ_at') + apply (rule hoare_lift_Pf[where f=ksIdleThread]) + by (wpsimp wp: T I)+ + +crunches vcpu_update, vgic_update, vcpu_disable, vcpu_restore, vcpu_enable + for valid_asid_map[wp]: valid_asid_map + (simp: crunch_simps wp: crunch_wps) + +lemma setGlobalUserVSpace_corres[corres]: + "corres dc valid_global_arch_objs \ set_global_user_vspace setGlobalUserVSpace" + unfolding set_global_user_vspace_def setGlobalUserVSpace_def + apply (subst o_def) (* unfold fun_comp on abstract side only to get global_pt abbrev *) + apply corres + done + +lemma arch_switchToIdleThread_corres: + "corres dc + (valid_arch_state and pspace_aligned and pspace_distinct) + (no_0_obj') + arch_switch_to_idle_thread Arch.switchToIdleThread" + unfolding arch_switch_to_idle_thread_def switchToIdleThread_def + apply (corres corres: vcpuSwitch_corres) + apply (clarsimp simp: valid_arch_state_def cur_vcpu_def in_omonad obj_at_def) + apply clarsimp + apply (rule vcpu_at_cross; assumption?) + apply (clarsimp simp: valid_arch_state_def cur_vcpu_def in_omonad obj_at_def state_relation_def + arch_state_relation_def) + done + +lemma switchToIdleThread_corres: + "corres dc + (invs and valid_queues and valid_etcbs) + invs_no_cicd' + switch_to_idle_thread switchToIdleThread" + apply (simp add: switch_to_idle_thread_def Thread_H.switchToIdleThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_ignore, fastforce) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIdleThread_corres]) + apply (rule corres_split[OF arch_switchToIdleThread_corres]) + apply clarsimp + apply (rule setCurThread_corres) + apply wpsimp + apply (simp add: state_relation_def cdt_relation_def) + apply wpsimp+ + apply (simp add: invs_unique_refs invs_valid_vs_lookup invs_valid_objs invs_valid_asid_map + invs_arch_state invs_valid_global_objs invs_psp_aligned invs_distinct + invs_valid_idle invs_vspace_objs) + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def) + done + +lemma gq_sp: "\P\ getQueue d p \\rv. P and (\s. ksReadyQueues s (d, p) = rv)\" + by (unfold getQueue_def, rule gets_sp) + +lemma sch_act_wf: + "sch_act_wf sa s = ((\t. sa = SwitchToThread t \ st_tcb_at' runnable' t s \ + tcb_in_cur_domain' t s) \ + (sa = ResumeCurrentThread \ ct_in_state' activatable' s))" + by (case_tac sa, simp_all add: ) + +declare gq_wp[wp] +declare setQueue_obj_at[wp] + +lemma threadSet_timeslice_invs: + "\invs' and tcb_at' t\ threadSet (tcbTimeSlice_update b) t \\rv. invs'\" + by (wp threadSet_invs_trivial, simp_all add: inQ_def cong: conj_cong) + +lemma setCurThread_invs_no_cicd': + "\invs_no_cicd' and st_tcb_at' activatable' t and obj_at' (\x. \ tcbQueued x) t and tcb_in_cur_domain' t\ + setCurThread t + \\rv. invs'\" +proof - + have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" + apply (simp add: ct_not_inQ_def o_def) + done + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp add: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def ct_not_inQ_ct + ct_idle_or_in_cur_domain'_def bitmapQ_defs valid_bitmaps_def + cong: option.case_cong) + done +qed + +(* Don't use this rule when considering the idle thread. The invariant ct_idle_or_in_cur_domain' + says that either "tcb_in_cur_domain' t" or "t = ksIdleThread s". + Use setCurThread_invs_idle_thread instead. *) +lemma setCurThread_invs: + "\invs' and st_tcb_at' activatable' t and obj_at' (\x. \ tcbQueued x) t and + tcb_in_cur_domain' t\ setCurThread t \\rv. invs'\" + by (rule hoare_pre, rule setCurThread_invs_no_cicd') + (simp add: invs'_to_invs_no_cicd'_def) + +lemma setCurThread_invs_no_cicd'_idle_thread: + "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\_. invs'\" + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def valid_idle'_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def bitmapQ_defs valid_bitmaps_def pred_tcb_at'_def + cong: option.case_cong) + apply (clarsimp simp: idle_tcb'_def ct_not_inQ_def ps_clear_def obj_at'_def st_tcb_at'_def + idleThreadNotQueued_def) + done + +lemma setCurThread_invs_idle_thread: + "\invs' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" + by (rule hoare_pre, rule setCurThread_invs_no_cicd'_idle_thread) + (clarsimp simp: invs'_to_invs_no_cicd'_def all_invs_but_ct_idle_or_in_cur_domain'_def) + +lemma Arch_switchToThread_invs[wp]: + "\invs' and tcb_at' t\ Arch.switchToThread t \\rv. invs'\" + unfolding AARCH64_H.switchToThread_def by (wpsimp wp: getObject_tcb_hyp_sym_refs) + +crunch ksCurDomain[wp]: "Arch.switchToThread" "\s. P (ksCurDomain s)" + (simp: crunch_simps wp: crunch_wps getASID_wp) + +crunches Arch.switchToThread + for obj_at_tcb'[wp]: "obj_at' (\tcb::tcb. P tcb) t" + (wp: crunch_wps getASID_wp simp: crunch_simps) + +lemma Arch_switchToThread_tcb_in_cur_domain'[wp]: + "Arch.switchToThread t \tcb_in_cur_domain' t'\" + by (wp tcb_in_cur_domain'_lift) + +lemma tcbSchedDequeue_not_tcbQueued: + "\\\ tcbSchedDequeue t \\_. obj_at' (\x. \ tcbQueued x) t\" + apply (simp add: tcbSchedDequeue_def) + apply (wp|clarsimp)+ + apply (rule_tac Q="\queued. obj_at' (\x. tcbQueued x = queued) t" in hoare_post_imp) + apply (clarsimp simp: obj_at'_def) + apply (wpsimp wp: threadGet_wp)+ + apply (clarsimp simp: obj_at'_def) + done + +lemma asUser_obj_at[wp]: + "asUser t' f \obj_at' (P \ tcbState) t\" + apply (wpsimp simp: asUser_def threadGet_stateAssert_gets_asUser) + apply (simp add: asUser_fetch_def obj_at'_def) + done + +declare doMachineOp_obj_at[wp] + +crunch valid_arch_state'[wp]: asUser "valid_arch_state'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_irq_states'[wp]: asUser "valid_irq_states'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_machine_state'[wp]: asUser "valid_machine_state'" +(wp: crunch_wps simp: crunch_simps) + +lemma asUser_valid_irq_node'[wp]: + "asUser t (setRegister f r) \\s. valid_irq_node' (irq_node' s) s\" + apply (rule_tac valid_irq_node_lift) + apply (simp add: asUser_def) + apply (wpsimp wp: crunch_wps)+ + done + +crunch irq_masked'_helper: asUser "\s. P (intStateIRQTable (ksInterruptState s))" +(wp: crunch_wps simp: crunch_simps) + +lemma asUser_irq_masked'[wp]: + "\irqs_masked'\ asUser t (setRegister f r) + \\_ . irqs_masked'\" + apply (rule irqs_masked_lift) + apply (rule asUser_irq_masked'_helper) + done + +lemma asUser_ct_not_inQ[wp]: + "\ct_not_inQ\ asUser t (setRegister f r) + \\_ . ct_not_inQ\" + apply (clarsimp simp: submonad_asUser.fn_is_sm submonad_fn_def) + apply (rule bind_wp)+ + prefer 4 + apply (rule stateAssert_sp) + prefer 3 + apply (rule gets_inv) + defer + apply (rule select_f_inv) + apply (case_tac rv; simp) + apply (clarsimp simp: asUser_replace_def obj_at'_def fun_upd_def + split: option.split kernel_object.split) + apply wp + apply (clarsimp simp: ct_not_inQ_def obj_at'_def objBitsKO_def ps_clear_def dom_def) + apply (rule conjI; clarsimp; blast) + done + +crunch pspace_domain_valid[wp]: asUser "pspace_domain_valid" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_dom_schedule'[wp]: asUser "valid_dom_schedule'" +(wp: crunch_wps simp: crunch_simps) + +crunch gsUntypedZeroRanges[wp]: asUser "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps simp: unless_def) + +crunch ctes_of[wp]: asUser "\s. P (ctes_of s)" + (wp: crunch_wps simp: unless_def) + +lemmas asUser_cteCaps_of[wp] = cteCaps_of_ctes_of_lift[OF asUser_ctes_of] + +lemma asUser_utr[wp]: + "\untyped_ranges_zero'\ asUser f t \\_. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + done + +lemma threadSet_invs_no_cicd'_trivialT: + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows "threadSet F t \invs_no_cicd'\" + apply (simp add: invs_no_cicd'_def valid_state'_def) + apply (wp threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_cur + untyped_ranges_zero_lift + | clarsimp simp: assms cteCaps_of_def valid_arch_tcb'_def | rule refl)+ + by (auto simp: o_def) + +lemmas threadSet_invs_no_cicd'_trivial = + threadSet_invs_no_cicd'_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma asUser_invs_no_cicd'[wp]: + "\invs_no_cicd'\ asUser t m \\rv. invs_no_cicd'\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (wp threadSet_invs_no_cicd'_trivial hoare_drop_imps | simp)+ + done + +lemma Arch_switchToThread_invs_no_cicd': + "\invs_no_cicd'\ Arch.switchToThread t \\rv. invs_no_cicd'\" + apply (wpsimp wp: getObject_tcb_hyp_sym_refs setVMRoot_invs_no_cicd' + simp: AARCH64_H.switchToThread_def) + by (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) + +lemma tcbSchedDequeue_invs_no_cicd'[wp]: + "tcbSchedDequeue t \invs_no_cicd'\" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def + apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def)+ + apply clarsimp + done + +lemma switchToThread_invs_no_cicd': + "\invs_no_cicd' and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd' Arch_switchToThread_pred_tcb') + apply (auto elim!: pred_tcb'_weakenE) + done + +lemma switchToThread_invs[wp]: + "\invs' and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" + apply (simp add: Thread_H.switchToThread_def ) + apply (wp threadSet_timeslice_invs setCurThread_invs + Arch_switchToThread_invs dmo_invs' + doMachineOp_obj_at tcbSchedDequeue_not_tcbQueued) + by (auto elim!: pred_tcb'_weakenE) + +lemma setCurThread_ct_in_state: + "\obj_at' (P \ tcbState) t\ setCurThread t \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (simp add: ct_in_state'_def pred_tcb_at'_def o_def) + done +qed + +lemma switchToThread_ct_in_state[wp]: + "\obj_at' (P \ tcbState) t\ switchToThread t \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (simp add: Thread_H.switchToThread_def tcbSchedEnqueue_def unless_def) + apply (wp setCurThread_ct_in_state + | simp add: o_def cong: if_cong)+ + done +qed + +lemma setCurThread_obj_at[wp]: + "\obj_at' P addr\ setCurThread t \\rv. obj_at' P addr\" + apply (simp add: setCurThread_def) + apply wp + apply (fastforce intro: obj_at'_pspaceI) + done + +lemma dmo_cap_to'[wp]: + "\ex_nonz_cap_to' p\ + doMachineOp m + \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma sct_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setCurThread t \\rv. ex_nonz_cap_to' p\" + apply (simp add: setCurThread_def) + apply (wpsimp wp: ex_nonz_cap_to_pres') + done + +lemma setVCPU_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setObject p' (v::vcpu) \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +crunches + vcpuDisable, vcpuRestore, vcpuEnable, vcpuSaveRegRange, vgicUpdateLR, vcpuSave, vcpuSwitch + for cap_to'[wp]: "ex_nonz_cap_to' p" + (ignore: doMachineOp wp: crunch_wps) + +crunches updateASIDPoolEntry + for cap_to'[wp]: "ex_nonz_cap_to' p" + (wp: crunch_wps ex_nonz_cap_to_pres' getASID_wp) + +crunch cap_to'[wp]: "Arch.switchToThread" "ex_nonz_cap_to' p" + (simp: crunch_simps wp: crunch_wps) + +crunch cap_to'[wp]: switchToThread "ex_nonz_cap_to' p" + (simp: crunch_simps) + +lemma no_longer_inQ[simp]: + "\ inQ d p (tcbQueued_update (\x. False) tcb)" + by (simp add: inQ_def) + +lemma iflive_inQ_nonz_cap_strg: + "if_live_then_nonz_cap' s \ obj_at' (inQ d prio) t s + \ ex_nonz_cap_to' t s" + by (clarsimp simp: obj_at'_real_def inQ_def live'_def + elim!: if_live_then_nonz_capE' ko_wp_at'_weakenE) + +lemmas iflive_inQ_nonz_cap[elim] + = mp [OF iflive_inQ_nonz_cap_strg, OF conjI[rotated]] + +declare Cons_eq_tails[simp] + +crunch ksCurDomain[wp]: "ThreadDecls_H.switchToThread" "\s. P (ksCurDomain s)" + +(* FIXME move *) +lemma obj_tcb_at': + "obj_at' (\tcb::tcb. P tcb) t s \ tcb_at' t s" + by (clarsimp simp: obj_at'_def) + +lemma setThreadState_rct: + "\\s. (runnable' st \ ksCurThread s \ t) + \ ksSchedulerAction s = ResumeCurrentThread\ + setThreadState st t + \\_ s. ksSchedulerAction s = ResumeCurrentThread\" + apply (simp add: setThreadState_def) + apply (rule hoare_pre_disj') + apply (rule bind_wp [OF _ + hoare_vcg_conj_lift + [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] + threadSet_nosch]]) + apply (rule bind_wp [OF _ + hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) + apply (clarsimp simp: when_def) + apply (case_tac rv) + apply (clarsimp, wp)[1] + apply (clarsimp) + apply (rule bind_wp [OF _ + hoare_vcg_conj_lift + [OF threadSet_ct threadSet_nosch]]) + apply (rule bind_wp [OF _ isRunnable_inv]) + apply (rule bind_wp [OF _ + hoare_vcg_conj_lift + [OF gct_wp gct_wp]]) + apply (rename_tac ct) + apply (case_tac "ct\t") + apply (clarsimp simp: when_def) + apply (wp)[1] + apply (clarsimp) + done + +lemma bitmapQ_lookupBitmapPriority_simp: (* neater unfold, actual unfold is really ugly *) + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ \ + bitmapQ d (lookupBitmapPriority d s) s = + (ksReadyQueuesL1Bitmap s d !! word_log2 (ksReadyQueuesL1Bitmap s d) \ + ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d))) !! + word_log2 (ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d)))))" + unfolding bitmapQ_def lookupBitmapPriority_def + apply (drule word_log2_nth_same, clarsimp) + apply (drule (1) bitmapQ_no_L1_orphansD, clarsimp) + apply (drule word_log2_nth_same, clarsimp) + apply (frule test_bit_size[where n="word_log2 (ksReadyQueuesL2Bitmap _ _)"]) + apply (clarsimp simp: numPriorities_def wordBits_def word_size) + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (fastforce intro: unat_less_helper word_log2_max[THEN order_less_le_trans] + simp: wordRadix_def word_size l2BitmapSize_def')+ + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (fastforce intro: unat_less_helper word_log2_max of_nat_mono_maybe + simp: wordRadix_def word_size l2BitmapSize_def')+ + apply (simp add: word_ao_dist) + apply (subst less_mask_eq) + apply (rule word_of_nat_less) + apply (fastforce intro: word_of_nat_less simp: wordRadix_def' unat_of_nat word_size)+ + done + +lemma bitmapQ_from_bitmap_lookup: + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + valid_bitmapQ s ; bitmapQ_no_L1_orphans s + \ + \ bitmapQ d (lookupBitmapPriority d s) s" + apply (simp add: bitmapQ_lookupBitmapPriority_simp) + apply (drule word_log2_nth_same) + apply (drule (1) bitmapQ_no_L1_orphansD) + apply (fastforce dest!: word_log2_nth_same + simp: word_ao_dist lookupBitmapPriority_def word_size numPriorities_def + wordBits_def) + done + +lemma lookupBitmapPriority_obj_at': + "\ksReadyQueuesL1Bitmap s d \ 0; valid_bitmapQ s; bitmapQ_no_L1_orphans s; + ksReadyQueues_asrt s; ready_qs_runnable s; pspace_aligned' s; pspace_distinct' s\ + \ obj_at' (inQ d (lookupBitmapPriority d s) and runnable' \ tcbState) + (the (tcbQueueHead (ksReadyQueues s (d, lookupBitmapPriority d s)))) s" + apply (drule (2) bitmapQ_from_bitmap_lookup) + apply (simp add: valid_bitmapQ_bitmapQ_simp) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def tcbQueueEmpty_def) + apply (drule_tac x=d in spec) + apply (drule_tac x="lookupBitmapPriority d s" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (fastforce simp: obj_at'_and ready_qs_runnable_def obj_at'_def st_tcb_at'_def inQ_def + tcbQueueEmpty_def) + done + +lemma bitmapL1_zero_ksReadyQueues: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ + \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. tcbQueueEmpty (ksReadyQueues s (d, p)))" + apply (cases "ksReadyQueuesL1Bitmap s d = 0") + apply (force simp add: bitmapQ_def valid_bitmapQ_def) + apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) + done + +lemma prioToL1Index_le_mask: + "\ prioToL1Index p = prioToL1Index p' ; p && mask wordRadix \ p' && mask wordRadix \ + \ p \ p'" + unfolding prioToL1Index_def + apply (simp add: wordRadix_def word_le_nat_alt[symmetric]) + apply (drule shiftr_eq_neg_mask_eq) + apply (metis add.commute word_and_le2 word_plus_and_or_coroll2 word_plus_mono_left) + done + +lemma prioToL1Index_le_index: + "\ prioToL1Index p \ prioToL1Index p' ; prioToL1Index p \ prioToL1Index p' \ + \ p \ p'" + unfolding prioToL1Index_def + apply (simp add: wordRadix_def word_le_nat_alt[symmetric]) + apply (erule (1) le_shiftr') + done + +lemma bitmapL1_highest_lookup: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; + bitmapQ d p' s \ + \ p' \ lookupBitmapPriority d s" + apply (subgoal_tac "ksReadyQueuesL1Bitmap s d \ 0") + prefer 2 + apply (clarsimp simp add: bitmapQ_def) + apply (case_tac "prioToL1Index (lookupBitmapPriority d s) = prioToL1Index p'") + apply (rule prioToL1Index_le_mask, simp) + apply (frule (2) bitmapQ_from_bitmap_lookup) + apply (clarsimp simp: bitmapQ_lookupBitmapPriority_simp) + apply (clarsimp simp: bitmapQ_def lookupBitmapPriority_def) + apply (subst mask_or_not_mask[where n=wordRadix and x=p', symmetric]) + apply (subst word_bw_comms(2)) (* || commute *) + apply (simp add: word_ao_dist mask_AND_NOT_mask mask_twice) + apply (subst less_mask_eq[where x="of_nat _"]) + apply (subst word_less_nat_alt) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max]) + apply (simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max]) + apply (simp add: word_size wordRadix_def') + apply (subst word_le_nat_alt) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule word_log2_highest) + apply (subst (asm) prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size wordRadix_def') + apply (simp add: word_size wordRadix_def') + apply (drule (1) bitmapQ_no_L1_orphansD[where d=d and i="word_log2 _"]) + apply (simp add: l2BitmapSize_def') + apply simp + apply (rule prioToL1Index_le_index[rotated], simp) + apply (frule (2) bitmapQ_from_bitmap_lookup) + apply (clarsimp simp: bitmapQ_lookupBitmapPriority_simp) + apply (clarsimp simp: bitmapQ_def lookupBitmapPriority_def) + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size wordRadix_def') + apply (fastforce dest: bitmapQ_no_L1_orphansD + simp: wordBits_def numPriorities_def word_size wordRadix_def' l2BitmapSize_def') + apply (erule word_log2_highest) + done + +lemma bitmapQ_ksReadyQueuesI: + "\ bitmapQ d p s ; valid_bitmapQ s \ \ \ tcbQueueEmpty (ksReadyQueues s (d, p))" + unfolding valid_bitmapQ_def by simp + +lemma getReadyQueuesL2Bitmap_inv[wp]: + "\ P \ getReadyQueuesL2Bitmap d i \\_. P\" + unfolding getReadyQueuesL2Bitmap_def by wp + +lemma switchToThread_lookupBitmapPriority_wp: + "\\s. invs_no_cicd' s \ bitmapQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) s \ + t = the (tcbQueueHead (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)))\ + ThreadDecls_H.switchToThread t + \\rv. invs'\" + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd') + apply (auto elim!: pred_tcb'_weakenE) + apply (prop_tac "valid_bitmapQ s") + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_bitmaps_def) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def valid_bitmapQ_bitmapQ_simp) + apply (drule_tac x="ksCurDomain s" in spec) + apply (drule_tac x="lookupBitmapPriority (ksCurDomain s) s" in spec) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) + done + +lemma switchToIdleThread_invs_no_cicd': + "\invs_no_cicd'\ switchToIdleThread \\rv. invs'\" + apply (clarsimp simp: Thread_H.switchToIdleThread_def AARCH64_H.switchToIdleThread_def) + apply (wp setCurThread_invs_no_cicd'_idle_thread setVMRoot_invs_no_cicd' vcpuSwitch_it') + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_idle'_def) + done + +crunch obj_at'[wp]: "Arch.switchToIdleThread" "obj_at' (P :: ('a :: no_vcpu) \ bool) t" + + +declare hoare_weak_lift_imp_conj[wp_split del] + +lemma setCurThread_const: + "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" + by (simp add: setCurThread_def | wp)+ + + + +crunch it[wp]: switchToIdleThread "\s. P (ksIdleThread s)" +crunch it[wp]: switchToThread "\s. P (ksIdleThread s)" + +lemma switchToIdleThread_curr_is_idle: + "\\\ switchToIdleThread \\rv s. ksCurThread s = ksIdleThread s\" + apply (rule hoare_weaken_pre) + apply (wps switchToIdleThread_it) + apply (simp add: Thread_H.switchToIdleThread_def) + apply (wp setCurThread_const) + apply (simp) + done + +lemma chooseThread_it[wp]: + "\\s. P (ksIdleThread s)\ chooseThread \\_ s. P (ksIdleThread s)\" + supply if_split[split del] + by (wpsimp simp: chooseThread_def curDomain_def bitmap_fun_defs) + +lemma threadGet_inv [wp]: "\P\ threadGet f t \\rv. P\" + apply (simp add: threadGet_def) + apply (wp | simp)+ + done + +lemma corres_split_sched_act: + "\sched_act_relation act act'; + corres r P P' f1 g1; + \t. corres r (Q t) (Q' t) (f2 t) (g2 t); + corres r R R' f3 g3\ + \ corres r (case act of resume_cur_thread \ P + | switch_thread t \ Q t + | choose_new_thread \ R) + (case act' of ResumeCurrentThread \ P' + | SwitchToThread t \ Q' t + | ChooseThread \ R') + (case act of resume_cur_thread \ f1 + | switch_thread t \ f2 t + | choose_new_thread \ f3) + (case act' of ResumeCurrentThread \ g1 + | ChooseNewThread \ g3 + | SwitchToThread t \ g2 t)" + apply (cases act) + apply (rule corres_guard_imp, force+)+ + done + +lemma corres_assert_ret: + "corres dc (\s. P) \ (assert P) (return ())" + apply (rule corres_no_failI) + apply simp + apply (simp add: assert_def return_def fail_def) + done + +lemma corres_assert_assume_r: + "corres dc P Q f (g ()) + \ corres dc P (Q and (\s. Q')) f (assert Q' >>= g)" + by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) + +crunch cur[wp]: tcbSchedEnqueue cur_tcb' + (simp: unless_def) + +lemma thread_get_exs_valid[wp]: + "tcb_at t s \ \(=) s\ thread_get f t \\\r. (=) s\" + apply (clarsimp simp: get_thread_state_def assert_opt_def fail_def + thread_get_def gets_the_def exs_valid_def gets_def + get_def bind_def return_def split: option.splits) + apply (erule get_tcb_at) + done + +lemma gts_exs_valid[wp]: + "tcb_at t s \ \(=) s\ get_thread_state t \\\r. (=) s\" + apply (clarsimp simp: get_thread_state_def assert_opt_def fail_def + thread_get_def gets_the_def exs_valid_def gets_def + get_def bind_def return_def split: option.splits) + apply (erule get_tcb_at) + done + +lemma guarded_switch_to_corres: + "corres dc (valid_arch_state and valid_objs + and valid_vspace_objs and pspace_aligned and pspace_distinct + and valid_vs_lookup and valid_global_objs and pspace_in_kernel_window + and unique_table_refs + and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s)) + and valid_queues and valid_idle) + (no_0_obj' and sym_heap_sched_pointers and valid_objs') + (guarded_switch_to t) (switchToThread t)" + apply (simp add: guarded_switch_to_def) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_l'[OF _ gts_exs_valid]) + apply (rule corres_assert_assume_l) + apply (rule switchToThread_corres) + apply (force simp: st_tcb_at_tcb_at) + apply (wp gts_st_tcb_at) + apply (force simp: st_tcb_at_tcb_at)+ + done + +abbreviation "enumPrio \ [0.e.maxPriority]" + +lemma enumPrio_word_div: + fixes v :: "8 word" + assumes vlt: "unat v \ unat maxPriority" + shows "\xs ys. enumPrio = xs @ [v] @ ys \ (\x\set xs. x < v) \ (\y\set ys. v < y)" + apply (subst upto_enum_word) + apply (subst upt_add_eq_append'[where j="unat v"]) + apply simp + apply (rule le_SucI) + apply (rule vlt) + apply (simp only: upt_conv_Cons vlt[simplified less_Suc_eq_le[symmetric]]) + apply (intro exI conjI) + apply fastforce + apply clarsimp + apply (drule of_nat_mono_maybe[rotated, where 'a="8"]) + apply (fastforce simp: vlt) + apply simp + apply (clarsimp simp: Suc_le_eq) + apply (erule disjE) + apply (drule of_nat_mono_maybe[rotated, where 'a="8"]) + apply (simp add: maxPriority_def numPriorities_def) + apply (clarsimp simp: unat_of_nat_eq) + apply (erule conjE) + apply (drule_tac y="unat v" and x="x" in of_nat_mono_maybe[rotated, where 'a="8"]) + apply (simp add: maxPriority_def numPriorities_def)+ + done + +lemma curDomain_corres: "corres (=) \ \ (gets cur_domain) (curDomain)" + by (simp add: curDomain_def state_relation_def) + +lemma curDomain_corres': + "corres (=) \ (\s. ksCurDomain s \ maxDomain) + (gets cur_domain) (if Suc 0 < numDomains then curDomain else return 0)" + apply (case_tac "1 < numDomains"; simp) + apply (rule corres_guard_imp[OF curDomain_corres]; solves simp) + (* if we have only one domain, then we are in it *) + apply (clarsimp simp: return_def simpler_gets_def bind_def maxDomain_def + state_relation_def corres_underlying_def) + done + +lemma lookupBitmapPriority_Max_eqI: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d \ 0 \ + \ lookupBitmapPriority d s = (Max {prio. \ tcbQueueEmpty (ksReadyQueues s (d, prio))})" + apply (rule Max_eqI[simplified eq_commute]; simp) + apply (fastforce simp: bitmapL1_highest_lookup valid_bitmapQ_bitmapQ_simp) + apply (metis valid_bitmapQ_bitmapQ_simp bitmapQ_from_bitmap_lookup) + done + +lemma corres_gets_queues_getReadyQueuesL1Bitmap: + "corres (\qs l1. (l1 = 0) = (\p. qs p = [])) \ valid_bitmaps + (gets (\s. ready_queues s d)) (getReadyQueuesL1Bitmap d)" + unfolding state_relation_def valid_bitmaps_def getReadyQueuesL1Bitmap_def + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x=d in spec) + apply (fastforce simp: bitmapL1_zero_ksReadyQueues list_queue_relation_def tcbQueueEmpty_def) + done + +lemma guarded_switch_to_chooseThread_fragment_corres: + "corres dc + (P and st_tcb_at runnable t and invs and valid_sched) + (P' and invs_no_cicd') + (guarded_switch_to t) + (do runnable \ isRunnable t; + y \ assert runnable; + ThreadDecls_H.switchToThread t + od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + unfolding guarded_switch_to_def isRunnable_def + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule corres_assert_assume_l) + apply (rule corres_assert_assume_r) + apply (rule switchToThread_corres) + apply (wp gts_st_tcb_at)+ + apply (clarsimp simp: st_tcb_at_tcb_at invs_def valid_state_def valid_pspace_def valid_sched_def + invs_valid_vs_lookup invs_unique_refs) + apply (auto elim!: pred_tcb'_weakenE split: thread_state.splits + simp: pred_tcb_at' runnable'_def all_invs_but_ct_idle_or_in_cur_domain'_def) + done + +lemma Max_prio_helper: + "ready_queues_relation s s' + \ Max {prio. ready_queues s d prio \ []} + = Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (d, prio))}" + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def tcbQueueEmpty_def) + apply (rule Max_eq_if) + apply fastforce + apply fastforce + apply (fastforce dest: heap_path_head) + apply clarsimp + apply (drule_tac x=d in spec) + apply (drule_tac x=b in spec) + apply force + done + +lemma bitmap_lookup_queue_is_max_non_empty: + "\ valid_bitmaps s'; (s, s') \ state_relation; invs s; + ksReadyQueuesL1Bitmap s' (ksCurDomain s') \ 0 \ + \ the (tcbQueueHead (ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s'))) + = hd (max_non_empty_queue (ready_queues s (cur_domain s)))" + apply (clarsimp simp: max_non_empty_queue_def valid_bitmaps_def lookupBitmapPriority_Max_eqI) + apply (frule curdomain_relation) + apply (drule state_relation_ready_queues_relation) + apply (simp add: Max_prio_helper) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (frule (2) bitmapL1_zero_ksReadyQueues[THEN arg_cong_Not, THEN iffD1]) + apply clarsimp + apply (cut_tac P="\x. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', x))" + in setcomp_Max_has_prop) + apply fastforce + apply (clarsimp simp: ready_queues_relation_def Let_def list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x="ksCurDomain s'" in spec) + apply (drule_tac x="Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', prio))}" + in spec) + using heap_path_head tcbQueueEmpty_def + by fastforce + +lemma ksReadyQueuesL1Bitmap_return_wp: + "\\s. P (ksReadyQueuesL1Bitmap s d) s \ getReadyQueuesL1Bitmap d \\rv s. P rv s\" + unfolding getReadyQueuesL1Bitmap_def + by wp + +lemma curDomain_or_return_0: + "\ \P\ curDomain \\rv s. Q rv s \; \s. P s \ ksCurDomain s \ maxDomain \ + \ \P\ if 1 < numDomains then curDomain else return 0 \\rv s. Q rv s \" + apply (case_tac "1 < numDomains"; simp) + apply (simp add: valid_def curDomain_def simpler_gets_def return_def maxDomain_def) + done + +lemma invs_no_cicd_ksCurDomain_maxDomain': + "invs_no_cicd' s \ ksCurDomain s \ maxDomain" + unfolding invs_no_cicd'_def by simp + +crunches curDomain + for valid_bitmaps[wp]: valid_bitmaps + +lemma chooseThread_corres: + "corres dc (invs and valid_sched) invs_no_cicd' choose_thread chooseThread" + (is "corres _ ?PREI ?PREH _ _") +proof - + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + + show ?thesis + supply if_split[split del] + apply (clarsimp simp: choose_thread_def chooseThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce + apply (simp only: return_bind Let_def) + apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres']) + apply clarsimp + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (erule corres_if2[OF sym]) + apply (rule switchToIdleThread_corres) + apply (rule corres_symb_exec_r) + apply (rule corres_symb_exec_r) + apply (rule_tac P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) + \ st_tcb_at runnable (hd (max_non_empty_queue queues)) s" + and P'="\s. ?PREH s \ l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) + \ l1 \ 0 + \ queue = ksReadyQueues s (ksCurDomain s, + lookupBitmapPriority (ksCurDomain s) s)" + and F="the (tcbQueueHead queue) = hd (max_non_empty_queue queues)" + in corres_req) + apply (fastforce simp: bitmap_lookup_queue_is_max_non_empty + all_invs_but_ct_idle_or_in_cur_domain'_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) + apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) + apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ + apply (clarsimp simp: valid_sched_def max_non_empty_queue_def valid_queues_def split: if_splits) + apply (erule_tac x="cur_domain s" in allE) + apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) + apply (case_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio + \ []})") + apply (clarsimp) + apply (subgoal_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio \ []}) + \ []") + apply fastforce + apply (fastforce elim!: setcomp_Max_has_prop) + apply fastforce + apply clarsimp + apply (frule invs_no_cicd_ksCurDomain_maxDomain') + apply (prop_tac "valid_bitmaps s") + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def) + apply (fastforce dest: one_domain_case split: if_splits) + done +qed + +lemma thread_get_comm: "do x \ thread_get f p; y \ gets g; k x y od = + do y \ gets g; x \ thread_get f p; k x y od" + apply (rule ext) + apply (clarsimp simp add: gets_the_def assert_opt_def + bind_def gets_def get_def return_def + thread_get_def + fail_def split: option.splits) + done + +lemma schact_bind_inside: "do x \ f; (case act of resume_cur_thread \ f1 x + | switch_thread t \ f2 t x + | choose_new_thread \ f3 x) od + = (case act of resume_cur_thread \ (do x \ f; f1 x od) + | switch_thread t \ (do x \ f; f2 t x od) + | choose_new_thread \ (do x \ f; f3 x od))" + apply (case_tac act,simp_all) + done + +interpretation tcb_sched_action_extended: is_extended' "tcb_sched_action f a" + by (unfold_locales) + +lemma getDomainTime_corres: + "corres (=) \ \ (gets domain_time) getDomainTime" + by (simp add: getDomainTime_def state_relation_def) + +lemma nextDomain_corres: + "corres dc \ \ next_domain nextDomain" + apply (simp add: next_domain_def nextDomain_def) + apply (rule corres_modify) + apply (simp add: state_relation_def Let_def dschLength_def dschDomain_def) + done + +lemma next_domain_valid_sched[wp]: + "\ valid_sched and (\s. scheduler_action s = choose_new_thread)\ next_domain \ \_. valid_sched \" + apply (simp add: next_domain_def Let_def) + apply (wp, simp add: valid_sched_def valid_sched_action_2_def ct_not_in_q_2_def) + apply (simp add:valid_blocked_2_def) + done + +lemma nextDomain_invs_no_cicd': + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread)\ nextDomain \ \_. invs_no_cicd' \" + apply (simp add: nextDomain_def Let_def dschLength_def dschDomain_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def + ct_not_inQ_def cur_tcb'_def ct_idle_or_in_cur_domain'_def dschDomain_def + all_invs_but_ct_idle_or_in_cur_domain'_def) + done + +lemma scheduleChooseNewThread_fragment_corres: + "corres dc (invs and valid_sched and (\s. scheduler_action s = choose_new_thread)) (invs' and (\s. ksSchedulerAction s = ChooseNewThread)) + (do _ \ when (domainTime = 0) next_domain; + choose_thread + od) + (do _ \ when (domainTime = 0) nextDomain; + chooseThread + od)" + apply (subst bind_dummy_ret_val) + apply (subst bind_dummy_ret_val) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule nextDomain_corres) + apply simp + apply (rule chooseThread_corres) + apply (wp nextDomain_invs_no_cicd')+ + apply (clarsimp simp: valid_sched_def invs'_def valid_state'_def all_invs_but_ct_idle_or_in_cur_domain'_def)+ + done + +lemma scheduleSwitchThreadFastfail_corres: + "\ ct \ it \ (tp = tp' \ cp = cp') ; ct = ct' ; it = it' \ \ + corres ((=)) (is_etcb_at ct) (tcb_at' ct) + (schedule_switch_thread_fastfail ct it cp tp) + (scheduleSwitchThreadFastfail ct' it' cp' tp')" + by (clarsimp simp: schedule_switch_thread_fastfail_def scheduleSwitchThreadFastfail_def) + +lemma gets_is_highest_prio_expand: + "gets (is_highest_prio d p) \ do + q \ gets (\s. ready_queues s d); + return ((\p. q p = []) \ Max {prio. q prio \ []} \ p) + od" + by (clarsimp simp: is_highest_prio_def gets_def) + +lemma isHighestPrio_corres: + assumes "d' = d" + assumes "p' = p" + shows + "corres ((=)) \ valid_bitmaps + (gets (is_highest_prio d p)) + (isHighestPrio d' p')" + using assms + apply (clarsimp simp: gets_is_highest_prio_expand isHighestPrio_def) + apply (subst getHighestPrio_def') + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (rule corres_if_r'[where P'="\_. True",rotated]) + apply (rule_tac corres_symb_exec_r) + apply (rule_tac P="\s. q = ready_queues s d" + and P'="\s. valid_bitmaps s \ l1 = ksReadyQueuesL1Bitmap s d \ + l1 \ 0 \ hprio = lookupBitmapPriority d s" + and F="hprio = Max {prio. q prio \ []}" in corres_req) + apply (elim conjE) + apply (clarsimp simp: valid_bitmaps_def) + apply (subst lookupBitmapPriority_Max_eqI; blast?) + apply (fastforce dest: state_relation_ready_queues_relation Max_prio_helper[where d=d] + simp: tcbQueueEmpty_def) + apply fastforce + apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imps ksReadyQueuesL1Bitmap_return_wp)+ + done + +crunch valid_idle_etcb[wp]: set_scheduler_action valid_idle_etcb + +crunch inv[wp]: isHighestPrio P +crunch inv[wp]: curDomain P +crunch inv[wp]: scheduleSwitchThreadFastfail P + +lemma setSchedulerAction_invs': (* not in wp set, clobbered by ssa_wp *) + "setSchedulerAction ChooseNewThread \invs' \" + by (wpsimp simp: invs'_def cur_tcb'_def valid_state'_def valid_irq_node'_def ct_not_inQ_def + ct_idle_or_in_cur_domain'_def) + +lemma scheduleChooseNewThread_corres: + "corres dc + (\s. invs s \ valid_sched s \ scheduler_action s = choose_new_thread) + (\s. invs' s \ ksSchedulerAction s = ChooseNewThread) + schedule_choose_new_thread scheduleChooseNewThread" + unfolding schedule_choose_new_thread_def scheduleChooseNewThread_def + apply (rule corres_guard_imp) + apply (rule corres_split[OF getDomainTime_corres], clarsimp) + apply (rule corres_split[OF scheduleChooseNewThread_fragment_corres, simplified bind_assoc]) + apply (rule setSchedulerAction_corres) + apply (wp | simp)+ + apply (wp | simp add: getDomainTime_def)+ + apply auto + done + +lemma ethread_get_when_corres: + assumes x: "\etcb tcb'. etcb_relation etcb tcb' \ r (f etcb) (f' tcb')" + shows "corres (\rv rv'. b \ r rv rv') (is_etcb_at t) (tcb_at' t) + (ethread_get_when b f t) (threadGet f' t)" + apply (clarsimp simp: ethread_get_when_def) + apply (rule conjI; clarsimp) + apply (rule corres_guard_imp, rule ethreadget_corres; simp add: x) + apply (clarsimp simp: threadGet_def) + apply (rule corres_noop) + apply wpsimp+ + done + +lemma tcb_sched_enqueue_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_enqueue t \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_enqueue_def set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_append_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_append tcb_ptr \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_append_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_enqueue_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_enqueue t \ready_qs_distinct\ " + unfolding tcb_sched_action_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma tcb_sched_append_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_append t \ready_qs_distinct\ " + unfolding tcb_sched_action_def tcb_sched_append_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +crunches set_scheduler_action + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps simp: in_correct_ready_q_def ready_qs_distinct_def) + +crunches reschedule_required + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (ignore: tcb_sched_action wp: crunch_wps) + +lemma schedule_corres: + "corres dc (invs and valid_sched and valid_list) invs' (Schedule_A.schedule) ThreadDecls_H.schedule" + supply ethread_get_wp[wp del] + supply ssa_wp[wp del] + supply tcbSchedEnqueue_invs'[wp del] + supply tcbSchedEnqueue_invs'_not_ResumeCurrentThread[wp del] + supply setSchedulerAction_direct[wp] + supply if_split[split del] + + apply (clarsimp simp: Schedule_A.schedule_def Thread_H.schedule_def) + apply (subst thread_get_test) + apply (subst thread_get_comm) + apply (subst schact_bind_inside) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres[THEN corres_rel_imp[where r="\x y. y = x"],simplified]]) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule corres_split_sched_act,assumption) + apply (rule_tac P="tcb_at ct" in corres_symb_exec_l') + apply (rule_tac corres_symb_exec_l) + apply simp + apply (rule corres_assert_ret) + apply ((wpsimp wp: thread_get_wp' gets_exs_valid)+) + prefer 2 + (* choose thread *) + apply clarsimp + apply (rule corres_split[OF thread_get_isRunnable_corres]) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule tcbSchedEnqueue_corres, simp) + apply (rule scheduleChooseNewThread_corres, simp) + apply (wp thread_get_wp' tcbSchedEnqueue_invs' hoare_vcg_conj_lift hoare_drop_imps + | clarsimp)+ + (* switch to thread *) + apply (rule corres_split[OF thread_get_isRunnable_corres], + rename_tac was_running wasRunning) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule tcbSchedEnqueue_corres, simp) + apply (rule corres_split[OF getIdleThread_corres], rename_tac it it') + apply (rule_tac F="was_running \ ct \ it" in corres_gen_asm) + apply (rule corres_split) + apply (rule ethreadget_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rename_tac tp tp') + apply (rule corres_split) + apply (rule ethread_get_when_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rename_tac cp cp') + apply (rule corres_split) + apply (rule scheduleSwitchThreadFastfail_corres; simp) + apply (rule corres_split[OF curDomain_corres]) + apply (rule corres_split[OF isHighestPrio_corres]; simp only:) + apply (rule corres_if, simp) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) + apply (simp, fold dc_def) + apply (rule corres_split) + apply (rule setSchedulerAction_corres; simp) + apply (rule scheduleChooseNewThread_corres) + apply (wp | simp)+ + apply (simp add: valid_sched_def) + apply wp + apply (rule hoare_vcg_conj_lift) + apply (rule_tac t=t in set_scheduler_action_cnt_valid_blocked') + apply (wpsimp wp: setSchedulerAction_invs')+ + apply (wp tcb_sched_action_enqueue_valid_blocked hoare_vcg_all_lift enqueue_thread_queued) + apply (wp tcbSchedEnqueue_invs'_not_ResumeCurrentThread) + apply (rule corres_if, fastforce) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) + apply (simp, fold dc_def) + apply (rule corres_split) + apply (rule setSchedulerAction_corres; simp) + apply (rule scheduleChooseNewThread_corres) + apply (wp | simp)+ + apply (simp add: valid_sched_def) + apply wp + apply (rule hoare_vcg_conj_lift) + apply (rule_tac t=t in set_scheduler_action_cnt_valid_blocked') + apply (wpsimp wp: setSchedulerAction_invs')+ + apply (wp tcb_sched_action_append_valid_blocked hoare_vcg_all_lift append_thread_queued) + apply (wp tcbSchedAppend_invs'_not_ResumeCurrentThread) + + apply (rule corres_split[OF guarded_switch_to_corres], simp) + apply (rule setSchedulerAction_corres[simplified dc_def]) + apply (wp | simp)+ + + (* isHighestPrio *) + apply (clarsimp simp: if_apply_def2) + apply ((wp (once) hoare_drop_imp)+)[1] + + apply (simp add: if_apply_def2) + apply ((wp (once) hoare_drop_imp)+)[1] + apply wpsimp+ + + apply (clarsimp simp: conj_ac cong: conj_cong) + apply wp + apply (rule_tac Q="\_ s. valid_blocked_except t s \ scheduler_action s = switch_thread t" + in hoare_post_imp, fastforce) + apply (wp add: tcb_sched_action_enqueue_valid_blocked_except + tcbSchedEnqueue_invs'_not_ResumeCurrentThread thread_get_wp + del: gets_wp + | strengthen valid_objs'_valid_tcbs')+ + apply (clarsimp simp: conj_ac if_apply_def2 cong: imp_cong conj_cong del: hoare_gets) + apply (wp gets_wp)+ + + (* abstract final subgoal *) + apply clarsimp + + subgoal for s + apply (clarsimp split: Deterministic_A.scheduler_action.splits + simp: invs_psp_aligned invs_distinct invs_valid_objs invs_arch_state + invs_vspace_objs[simplified] tcb_at_invs) + apply (rule conjI, clarsimp) + apply (fastforce simp: invs_def + valid_sched_def valid_sched_action_def is_activatable_def + st_tcb_at_def obj_at_def valid_state_def only_idle_def + ) + apply (rule conjI, clarsimp) + subgoal for candidate + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def + valid_arch_caps_def valid_sched_action_def + weak_valid_sched_action_def tcb_at_is_etcb_at + tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]] + valid_blocked_except_def valid_blocked_def invs_hyp_sym_refs) + apply (fastforce simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) + done + (* choose new thread case *) + apply (intro impI conjI allI tcb_at_invs + | fastforce simp: invs_def cur_tcb_def valid_etcbs_def + valid_sched_def st_tcb_at_def obj_at_def valid_state_def + weak_valid_sched_action_def not_cur_thread_def)+ + done + + (* haskell final subgoal *) + apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def + cong: imp_cong split: scheduler_action.splits) + apply (fastforce simp: cur_tcb'_def valid_pspace'_def) + done + +lemma ssa_all_invs_but_ct_not_inQ': + "\all_invs_but_ct_not_inQ' and sch_act_wf sa and + (\s. sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s)\ + setSchedulerAction sa \\rv. all_invs_but_ct_not_inQ'\" +proof - + show ?thesis + apply (simp add: setSchedulerAction_def) + apply wp + apply (clarsimp simp add: invs'_def valid_state'_def cur_tcb'_def + state_refs_of'_def ps_clear_def valid_irq_node'_def + tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def bitmapQ_defs + cong: option.case_cong) + done +qed + +lemma ssa_ct_not_inQ: + "\\s. sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s\ + setSchedulerAction sa \\rv. ct_not_inQ\" + by (simp add: setSchedulerAction_def ct_not_inQ_def, wp, clarsimp) + +lemma ssa_all_invs_but_ct_not_inQ''[simplified]: + "\\s. (all_invs_but_ct_not_inQ' s \ sch_act_wf sa s) + \ (sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) + \ (sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s)\ + setSchedulerAction sa \\rv. invs'\" + apply (simp only: all_invs_but_not_ct_inQ_check' [symmetric]) + apply (rule hoare_elim_pred_conj) + apply (wp hoare_vcg_conj_lift [OF ssa_all_invs_but_ct_not_inQ' ssa_ct_not_inQ]) + apply (clarsimp) + done + +lemma ssa_invs': + "\invs' and sch_act_wf sa and + (\s. sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) and + (\s. sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s)\ + setSchedulerAction sa \\rv. invs'\" + apply (wp ssa_all_invs_but_ct_not_inQ'') + apply (clarsimp simp add: invs'_def valid_state'_def) + done + +lemma getDomainTime_wp[wp]: "\\s. P (ksDomainTime s) s \ getDomainTime \ P \" + unfolding getDomainTime_def + by wp + +lemma switchToThread_ct_not_queued_2: + "\invs_no_cicd' and tcb_at' t\ switchToThread t \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + (is "\_\ _ \\_. ?POST\") + apply (simp add: Thread_H.switchToThread_def) + apply (wp) + apply (simp add: AARCH64_H.switchToThread_def setCurThread_def) + apply (wp tcbSchedDequeue_not_tcbQueued hoare_drop_imp | simp)+ + done + +lemma setCurThread_obj_at': + "\ obj_at' P t \ setCurThread t \\rv s. obj_at' P (ksCurThread s) s \" +proof - + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (simp add: ct_in_state'_def st_tcb_at'_def) + done +qed + +lemma switchToIdleThread_ct_not_queued_no_cicd': + "\invs_no_cicd'\ switchToIdleThread \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" + apply (simp add: Thread_H.switchToIdleThread_def) + apply (wp setCurThread_obj_at') + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x="ksIdleThread s" in spec) + apply (clarsimp simp: invs_no_cicd'_def valid_idle'_def st_tcb_at'_def idle_tcb'_def obj_at'_def) + done + +lemma switchToIdleThread_activatable_2[wp]: + "\invs_no_cicd'\ switchToIdleThread \\rv. ct_in_state' activatable'\" + apply (simp add: Thread_H.switchToIdleThread_def + AARCH64_H.switchToIdleThread_def) + apply (wp setCurThread_ct_in_state) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_idle'_def + pred_tcb_at'_def obj_at'_def idle_tcb'_def) + done + +lemma switchToThread_tcb_in_cur_domain': + "\tcb_in_cur_domain' thread\ + ThreadDecls_H.switchToThread thread + \\y s. tcb_in_cur_domain' (ksCurThread s) s\" + apply (simp add: Thread_H.switchToThread_def setCurThread_def) + apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued hoare_drop_imps) + done + +lemma chooseThread_invs_no_cicd'_posts: (* generic version *) + "\ invs_no_cicd' \ chooseThread + \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \ + ct_in_state' activatable' s \ + (ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) \" + (is "\_\ _ \\_. ?POST\") +proof - + note switchToThread_invs[wp del] + note switchToThread_invs_no_cicd'[wp del] + note switchToThread_lookupBitmapPriority_wp[wp] + note assert_wp[wp del] + note if_split[split del] + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + + show ?thesis + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ + apply (simp only: return_bind, simp) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" in bind_wp) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply simp + apply (rule hoare_pre) + apply (wp (once) switchToIdleThread_ct_not_queued_no_cicd') + apply (wp (once)) + apply ((wp hoare_disjI1 switchToIdleThread_curr_is_idle)+)[1] + apply simp + (* we have a thread to switch to *) + apply (clarsimp simp: bitmap_fun_defs) + apply (wp assert_inv switchToThread_ct_not_queued_2 assert_inv hoare_disjI2 + switchToThread_tcb_in_cur_domain') + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) + apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ + done +qed + +lemma chooseThread_activatable_2: + "\invs_no_cicd'\ chooseThread \\rv. ct_in_state' activatable'\" + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts) + apply simp+ + done + +lemma chooseThread_ct_not_queued_2: + "\ invs_no_cicd'\ chooseThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + (is "\_\ _ \\_. ?POST\") + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts) + apply simp+ + done + +lemma chooseThread_invs_no_cicd': + "\ invs_no_cicd' \ chooseThread \\rv. invs' \" +proof - + note switchToThread_invs[wp del] + note switchToThread_invs_no_cicd'[wp del] + note switchToThread_lookupBitmapPriority_wp[wp] + note assert_wp[wp del] + note if_split[split del] + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + + (* NOTE: do *not* unfold numDomains in the rest of the proof, + it should work for any number *) + + (* FIXME this is almost identical to the chooseThread_invs_no_cicd'_posts proof, can generalise? *) + show ?thesis + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ + apply (simp only: return_bind, simp) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" in bind_wp) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply (simp, wp switchToIdleThread_invs_no_cicd', simp) + (* we have a thread to switch to *) + apply (clarsimp simp: bitmap_fun_defs) + apply (wp assert_inv) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) + apply (fastforce elim: bitmapQ_from_bitmap_lookup simp: lookupBitmapPriority_def) + apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ + done +qed + +lemma chooseThread_in_cur_domain': + "\ invs_no_cicd' \ chooseThread \\rv s. ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s\" + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts, simp_all) + done + +lemma scheduleChooseNewThread_invs': + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread) \ + scheduleChooseNewThread + \ \_ s. invs' s \" + unfolding scheduleChooseNewThread_def + apply (wpsimp wp: ssa_invs' chooseThread_invs_no_cicd' chooseThread_ct_not_queued_2 + chooseThread_activatable_2 chooseThread_invs_no_cicd' + chooseThread_in_cur_domain' nextDomain_invs_no_cicd' chooseThread_ct_not_queued_2) + apply (clarsimp simp: invs'_to_invs_no_cicd'_def) + done + +lemma schedule_invs': + "\invs'\ ThreadDecls_H.schedule \\rv. invs'\" + supply ssa_wp[wp del] + apply (simp add: schedule_def) + apply (rule_tac bind_wp, rename_tac t) + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply (wp scheduleChooseNewThread_invs') + \ \action = SwitchToThread candidate\ + apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' + chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct + switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] + | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] + | strengthen invs'_invs_no_cicd + | wp hoare_vcg_imp_lift)+ + apply (frule invs_sch_act_wf') + apply (auto simp: invs_sch_act_wf' obj_at'_activatable_st_tcb_at' + st_tcb_at'_runnable_is_activatable) + done + +lemma setCurThread_nosch: + "\\s. P (ksSchedulerAction s)\ + setCurThread t + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setCurThread_def) + apply wp + apply simp + done + +lemma stt_nosch: + "\\s. P (ksSchedulerAction s)\ + switchToThread t + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: Thread_H.switchToThread_def AARCH64_H.switchToThread_def storeWordUser_def) + apply (wp setCurThread_nosch hoare_drop_imp |simp)+ + done + +lemma stit_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + switchToIdleThread + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: Thread_H.switchToIdleThread_def + AARCH64_H.switchToIdleThread_def storeWordUser_def) + apply (wp setCurThread_nosch | simp add: getIdleThread_def)+ + done + +lemma schedule_sch: + "\\\ schedule \\rv s. ksSchedulerAction s = ResumeCurrentThread\" + by (wp setSchedulerAction_direct | wpc| simp add: schedule_def scheduleChooseNewThread_def)+ + +lemma schedule_sch_act_simple: + "\\\ schedule \\rv. sch_act_simple\" + apply (rule hoare_strengthen_post [OF schedule_sch]) + apply (simp add: sch_act_simple_def) + done + +lemma ssa_ct: + "\ct_in_state' P\ setSchedulerAction sa \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (unfold setSchedulerAction_def) + apply wp + apply (clarsimp simp add: ct_in_state'_def pred_tcb_at'_def) + done +qed + +lemma scheduleChooseNewThread_ct_activatable'[wp]: + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread) \ + scheduleChooseNewThread + \\_. ct_in_state' activatable'\" + unfolding scheduleChooseNewThread_def + by (wpsimp simp: ct_in_state'_def + wp: ssa_invs' nextDomain_invs_no_cicd' + chooseThread_activatable_2[simplified ct_in_state'_def] + | (rule hoare_lift_Pf[where f=ksCurThread], solves wp) + | strengthen invs'_invs_no_cicd)+ + +lemma schedule_ct_activatable'[wp]: + "\invs'\ ThreadDecls_H.schedule \\_. ct_in_state' activatable'\" + supply ssa_wp[wp del] + apply (simp add: schedule_def) + apply (rule_tac bind_wp, rename_tac t) + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply wpsimp + \ \action = SwitchToThread\ + apply (wpsimp wp: ssa_invs' setSchedulerAction_direct ssa_ct + | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] + | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] + | strengthen invs'_invs_no_cicd + | wp hoare_vcg_imp_lift)+ + apply (fastforce dest: invs_sch_act_wf' elim: pred_tcb'_weakenE + simp: sch_act_wf obj_at'_activatable_st_tcb_at') + done + +lemma threadSet_sch_act_sane: + "\sch_act_sane\ threadSet f t \\_. sch_act_sane\" + by (wp sch_act_sane_lift) + +lemma rescheduleRequired_sch_act_sane[wp]: + "\\\ rescheduleRequired \\rv. sch_act_sane\" + apply (simp add: rescheduleRequired_def sch_act_sane_def + setSchedulerAction_def) + by (wp | wpc | clarsimp)+ + +lemma sts_sch_act_sane: + "\sch_act_sane\ setThreadState st t \\_. sch_act_sane\" + apply (simp add: setThreadState_def) + including no_pre + apply (wp hoare_drop_imps + | simp add: threadSet_sch_act_sane)+ + done + +lemma sbn_sch_act_sane: + "\sch_act_sane\ setBoundNotification ntfn t \\_. sch_act_sane\" + apply (simp add: setBoundNotification_def) + apply (wp | simp add: threadSet_sch_act_sane)+ + done + +lemma possibleSwitchTo_corres: + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t + and in_correct_ready_q and ready_qs_distinct and pspace_aligned and pspace_distinct) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers and valid_objs') + (possible_switch_to t) (possibleSwitchTo t)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + supply ethread_get_wp[wp del] + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (clarsimp simp: state_relation_def) + apply (rule tcb_at_cross, erule st_tcb_at_tcb_at; assumption) + apply (simp add: possible_switch_to_def possibleSwitchTo_def cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres], simp) + apply (rule corres_split) + apply (rule ethreadget_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule corres_if, simp) + apply (rule tcbSchedEnqueue_corres, simp) + apply (rule corres_if, simp) + apply (case_tac action; simp) + apply (rule corres_split[OF rescheduleRequired_corres]) + apply (rule tcbSchedEnqueue_corres, simp) + apply (wp reschedule_required_valid_queues | strengthen valid_objs'_valid_tcbs')+ + apply (rule setSchedulerAction_corres, simp) + apply (wpsimp simp: if_apply_def2 + wp: hoare_drop_imp[where f="ethread_get a b" for a b])+ + apply (wp hoare_drop_imps)[1] + apply wp+ + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def st_tcb_at_tcb_at + valid_sched_action_def weak_valid_sched_action_def + tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]]) + apply (fastforce simp: tcb_at_is_etcb_at) + done + +end +end diff --git a/proof/refine/AARCH64/StateRelation.thy b/proof/refine/AARCH64/StateRelation.thy new file mode 100644 index 0000000000..e8f13fae54 --- /dev/null +++ b/proof/refine/AARCH64/StateRelation.thy @@ -0,0 +1,681 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + The refinement relation between abstract and concrete states +*) + +theory StateRelation +imports InvariantUpdates_H +begin + +context begin interpretation Arch . + +definition cte_map :: "cslot_ptr \ machine_word" where + "cte_map \ \(oref, cref). oref + (of_bl cref << cte_level_bits)" + +lemmas cte_map_def' = cte_map_def[simplified cte_level_bits_def shiftl_t2n mult_ac, simplified] + +definition lookup_failure_map :: "ExceptionTypes_A.lookup_failure \ Fault_H.lookup_failure" where + "lookup_failure_map \ \lf. case lf of + ExceptionTypes_A.InvalidRoot \ Fault_H.InvalidRoot + | ExceptionTypes_A.MissingCapability n \ Fault_H.MissingCapability n + | ExceptionTypes_A.DepthMismatch n m \ Fault_H.DepthMismatch n m + | ExceptionTypes_A.GuardMismatch n g \ Fault_H.GuardMismatch n (of_bl g) (length g)" + +primrec arch_fault_map :: "Machine_A.AARCH64_A.arch_fault \ arch_fault" where + "arch_fault_map (Machine_A.AARCH64_A.VMFault ptr msg) = VMFault ptr msg" +| "arch_fault_map (Machine_A.AARCH64_A.VGICMaintenance datalist) = VGICMaintenance datalist " +| "arch_fault_map (Machine_A.AARCH64_A.VPPIEvent irq) = VPPIEvent irq" +| "arch_fault_map (Machine_A.AARCH64_A.VCPUFault data) = VCPUFault data" + +primrec fault_map :: "ExceptionTypes_A.fault \ Fault_H.fault" where + "fault_map (ExceptionTypes_A.CapFault ref bool failure) = + Fault_H.CapFault ref bool (lookup_failure_map failure)" +| "fault_map (ExceptionTypes_A.ArchFault arch_fault) = + Fault_H.ArchFault (arch_fault_map arch_fault)" +| "fault_map (ExceptionTypes_A.UnknownSyscallException n) = + Fault_H.UnknownSyscallException n" +| "fault_map (ExceptionTypes_A.UserException x y) = + Fault_H.UserException x y" + +type_synonym obj_relation_cut = "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" +type_synonym obj_relation_cuts = "(machine_word \ obj_relation_cut) set" + +definition vmrights_map :: "rights set \ vmrights" where + "vmrights_map S \ if AllowRead \ S + then (if AllowWrite \ S then VMReadWrite else VMReadOnly) + else VMKernelOnly" + +definition zbits_map :: "nat option \ zombie_type" where + "zbits_map N \ case N of Some n \ ZombieCNode n | None \ ZombieTCB" + +definition mdata_map :: + "(Machine_A.AARCH64_A.asid \ vspace_ref) option \ (asid \ vspace_ref) option" where + "mdata_map = map_option (\(asid, ref). (ucast asid, ref))" + +primrec acap_relation :: "arch_cap \ arch_capability \ bool" where + "acap_relation (arch_cap.ASIDPoolCap p asid) c = + (c = ASIDPoolCap p (ucast asid))" +| "acap_relation (arch_cap.ASIDControlCap) c = + (c = ASIDControlCap)" +| "acap_relation (arch_cap.FrameCap p rghts sz dev data) c = + (c = FrameCap p (vmrights_map rghts) sz dev (mdata_map data))" +| "acap_relation (arch_cap.PageTableCap p pt_t data) c = + (c = PageTableCap p pt_t (mdata_map data))" +| "acap_relation (arch_cap.VCPUCap vcpu) c = (c = + arch_capability.VCPUCap vcpu)" + +primrec cap_relation :: "cap \ capability \ bool" where + "cap_relation Structures_A.NullCap c = + (c = Structures_H.NullCap)" +| "cap_relation Structures_A.DomainCap c = + (c = Structures_H.DomainCap)" +| "cap_relation (Structures_A.UntypedCap dev ref n f) c = + (c = Structures_H.UntypedCap dev ref n f)" +| "cap_relation (Structures_A.EndpointCap ref b r) c = + (c = Structures_H.EndpointCap ref b (AllowSend \ r) (AllowRecv \ r) (AllowGrant \ r) + (AllowGrantReply \ r))" +| "cap_relation (Structures_A.NotificationCap ref b r) c = + (c = Structures_H.NotificationCap ref b (AllowSend \ r) (AllowRecv \ r))" +| "cap_relation (Structures_A.CNodeCap ref n L) c = + (c = Structures_H.CNodeCap ref n (of_bl L) (length L))" +| "cap_relation (Structures_A.ThreadCap ref) c = + (c = Structures_H.ThreadCap ref)" +| "cap_relation (Structures_A.ReplyCap ref master r) c = + (c = Structures_H.ReplyCap ref master (AllowGrant \ r))" +| "cap_relation (Structures_A.IRQControlCap) c = + (c = Structures_H.IRQControlCap)" +| "cap_relation (Structures_A.IRQHandlerCap irq) c = + (c = Structures_H.IRQHandlerCap irq)" +| "cap_relation (Structures_A.ArchObjectCap a) c = + (\a'. acap_relation a a' \ c = Structures_H.ArchObjectCap a')" +| "cap_relation (Structures_A.Zombie p b n) c = + (c = Structures_H.Zombie p (zbits_map b) n)" + + +definition cte_relation :: "cap_ref \ obj_relation_cut" where + "cte_relation y \ \ko ko'. \sz cs cte cap. ko = CNode sz cs \ ko' = KOCTE cte + \ cs y = Some cap \ cap_relation cap (cteCap cte)" + +definition abs_asid_entry :: "asidpool_entry \ asid_pool_entry" where + "abs_asid_entry ap = AARCH64_A.ASIDPoolVSpace (apVMID ap) (apVSpace ap)" + +definition asid_pool_relation :: "asid_pool \ asidpool \ bool" where + "asid_pool_relation \ \p p'. p = map_option abs_asid_entry \ inv ASIDPool p' \ ucast" + +lemma inj_ASIDPool[simp]: + "inj ASIDPool" + by (auto intro: injI) + +lemma asid_pool_relation_def': + "asid_pool_relation ap (ASIDPool ap') = + (\asid_low. ap asid_low = map_option abs_asid_entry (ap' (ucast asid_low)))" + by (auto simp add: asid_pool_relation_def) + +definition vgic_map :: "gic_vcpu_interface \ gicvcpuinterface" where + "vgic_map \ \v. VGICInterface (vgic_hcr v) (vgic_vmcr v) (vgic_apr v) (vgic_lr v)" + +definition vcpu_relation :: "AARCH64_A.vcpu \ vcpu \ bool" where + "vcpu_relation \ \v v'. vcpu_tcb v = vcpuTCBPtr v' \ + vgic_map (vcpu_vgic v) = vcpuVGIC v' \ + vcpu_regs v = vcpuRegs v' \ + vcpu_vppi_masked v = vcpuVPPIMasked v' \ + vcpu_vtimer v = vcpuVTimer v'" + +definition ntfn_relation :: "Structures_A.notification \ Structures_H.notification \ bool" where + "ntfn_relation \ \ntfn ntfn'. + (case ntfn_obj ntfn of + Structures_A.IdleNtfn \ ntfnObj ntfn' = Structures_H.IdleNtfn + | Structures_A.WaitingNtfn q \ ntfnObj ntfn' = Structures_H.WaitingNtfn q + | Structures_A.ActiveNtfn b \ ntfnObj ntfn' = Structures_H.ActiveNtfn b) + \ ntfn_bound_tcb ntfn = ntfnBoundTCB ntfn'" + +definition ep_relation :: "Structures_A.endpoint \ Structures_H.endpoint \ bool" where + "ep_relation \ \ep ep'. case ep of + Structures_A.IdleEP \ ep' = Structures_H.IdleEP + | Structures_A.RecvEP q \ ep' = Structures_H.RecvEP q + | Structures_A.SendEP q \ ep' = Structures_H.SendEP q" + +definition fault_rel_optionation :: "ExceptionTypes_A.fault option \ Fault_H.fault option \ bool" + where + "fault_rel_optionation \ \f f'. f' = map_option fault_map f" + +primrec thread_state_relation :: "Structures_A.thread_state \ Structures_H.thread_state \ bool" + where + "thread_state_relation (Structures_A.Running) ts' + = (ts' = Structures_H.Running)" +| "thread_state_relation (Structures_A.Restart) ts' + = (ts' = Structures_H.Restart)" +| "thread_state_relation (Structures_A.Inactive) ts' + = (ts' = Structures_H.Inactive)" +| "thread_state_relation (Structures_A.IdleThreadState) ts' + = (ts' = Structures_H.IdleThreadState)" +| "thread_state_relation (Structures_A.BlockedOnReply) ts' + = (ts' = Structures_H.BlockedOnReply)" +| "thread_state_relation (Structures_A.BlockedOnReceive oref sp) ts' + = (ts' = Structures_H.BlockedOnReceive oref (receiver_can_grant sp))" +| "thread_state_relation (Structures_A.BlockedOnSend oref sp) ts' + = (ts' = Structures_H.BlockedOnSend oref (sender_badge sp) + (sender_can_grant sp) (sender_can_grant_reply sp) (sender_is_call sp))" +| "thread_state_relation (Structures_A.BlockedOnNotification oref) ts' + = (ts' = Structures_H.BlockedOnNotification oref)" + +definition arch_tcb_relation :: "Structures_A.arch_tcb \ Structures_H.arch_tcb \ bool" where + "arch_tcb_relation \ + \atcb atcb'. tcb_context atcb = atcbContext atcb' \ tcb_vcpu atcb = atcbVCPUPtr atcb'" + +definition tcb_relation :: "Structures_A.tcb \ Structures_H.tcb \ bool" where + "tcb_relation \ \tcb tcb'. + tcb_fault_handler tcb = to_bl (tcbFaultHandler tcb') + \ tcb_ipc_buffer tcb = tcbIPCBuffer tcb' + \ arch_tcb_relation (tcb_arch tcb) (tcbArch tcb') + \ thread_state_relation (tcb_state tcb) (tcbState tcb') + \ fault_rel_optionation (tcb_fault tcb) (tcbFault tcb') + \ cap_relation (tcb_ctable tcb) (cteCap (tcbCTable tcb')) + \ cap_relation (tcb_vtable tcb) (cteCap (tcbVTable tcb')) + \ cap_relation (tcb_reply tcb) (cteCap (tcbReply tcb')) + \ cap_relation (tcb_caller tcb) (cteCap (tcbCaller tcb')) + \ cap_relation (tcb_ipcframe tcb) (cteCap (tcbIPCBufferFrame tcb')) + \ tcb_bound_notification tcb = tcbBoundNotification tcb' + \ tcb_mcpriority tcb = tcbMCP tcb'" + +\ \ + A pair of objects @{term "(obj, obj')"} should satisfy the following relation when, under further + mild assumptions, a @{term corres_underlying} lemma for @{term "set_object obj"} + and @{term "setObject obj'"} can be stated: see setObject_other_corres in KHeap_R. + + TCBs do not satisfy this relation because the tcbSchedPrev and tcbSchedNext fields of a TCB are + used to model the ready queues, and so an update to such a field would correspond to an update + to a ready queue (see ready_queues_relation below).\ +definition + other_obj_relation :: "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" +where + "other_obj_relation obj obj' \ + (case (obj, obj') of + (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' + | (Notification ntfn, KONotification ntfn') \ ntfn_relation ntfn ntfn' + | (ArchObj (AARCH64_A.ASIDPool ap), KOArch (KOASIDPool ap')) \ asid_pool_relation ap ap' + | (ArchObj (AARCH64_A.VCPU vcpu), KOArch (KOVCPU vcpu')) \ vcpu_relation vcpu vcpu' + | _ \ False)" + + +primrec pte_relation' :: "AARCH64_A.pte \ AARCH64_H.pte \ bool" where + "pte_relation' AARCH64_A.InvalidPTE x = + (x = AARCH64_H.InvalidPTE)" +| "pte_relation' (AARCH64_A.PageTablePTE ppn) x = + (x = AARCH64_H.PageTablePTE (ucast ppn))" +| "pte_relation' (AARCH64_A.PagePTE page_addr is_small attrs rights) x = + (x = AARCH64_H.PagePTE page_addr is_small (Global \ attrs) (Execute \ attrs) (Device \ attrs) + (vmrights_map rights))" + +definition pte_relation :: "machine_word \ Structures_A.kernel_object \ kernel_object \ bool" where + "pte_relation y \ \ko ko'. \pt pte. ko = ArchObj (PageTable pt) \ ko' = KOArch (KOPTE pte) + \ pte_relation' (pt_apply pt y) pte" + +primrec aobj_relation_cuts :: "AARCH64_A.arch_kernel_obj \ machine_word \ obj_relation_cuts" where + "aobj_relation_cuts (DataPage dev sz) x = + { (x + (n << pageBits), \_ obj. obj = (if dev then KOUserDataDevice else KOUserData)) + | n. n < 2 ^ (pageBitsForSize sz - pageBits) }" +| "aobj_relation_cuts (AARCH64_A.ASIDPool pool) x = + {(x, other_obj_relation)}" +| "aobj_relation_cuts (PageTable pt) x = + (\y. (x + (y << pteBits), pte_relation y)) ` {0..mask (ptTranslationBits (pt_type pt))}" +| "aobj_relation_cuts (AARCH64_A.VCPU v) x = + {(x, other_obj_relation)}" + +definition tcb_relation_cut :: "Structures_A.kernel_object \ kernel_object \ bool" where + "tcb_relation_cut obj obj' \ + case (obj, obj') of + (TCB t, KOTCB t') \ tcb_relation t t' + | _ \ False" + +primrec obj_relation_cuts :: "Structures_A.kernel_object \ machine_word \ obj_relation_cuts" where + "obj_relation_cuts (CNode sz cs) x = + (if well_formed_cnode_n sz cs + then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} + else {(x, \\)})" +| "obj_relation_cuts (TCB tcb) x = {(x, tcb_relation_cut)}" +| "obj_relation_cuts (Endpoint ep) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (Notification ntfn) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (ArchObj ao) x = aobj_relation_cuts ao x" + +lemma obj_relation_cuts_def2: + "obj_relation_cuts ko x = + (case ko of CNode sz cs \ if well_formed_cnode_n sz cs + then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} + else {(x, \\)} + | TCB tcb \ {(x, tcb_relation_cut)} + | ArchObj (PageTable pt) \ (\y. (x + (y << pteBits), pte_relation y)) ` + {0..mask (ptTranslationBits (pt_type pt))} + | ArchObj (DataPage dev sz) \ + {(x + (n << pageBits), \_ obj. obj =(if dev then KOUserDataDevice else KOUserData)) + | n . n < 2 ^ (pageBitsForSize sz - pageBits) } + | _ \ {(x, other_obj_relation)})" + by (simp split: Structures_A.kernel_object.split + AARCH64_A.arch_kernel_obj.split) + +lemma obj_relation_cuts_def3: + "obj_relation_cuts ko x = + (case a_type ko of + ACapTable n \ {(cte_map (x, y), cte_relation y) | y. length y = n} + | ATCB \ {(x, tcb_relation_cut)} + | AArch (APageTable pt_t) \ (\y. (x + (y << pteBits), pte_relation y)) ` + {0..mask (ptTranslationBits pt_t)} + | AArch (AUserData sz) \ {(x + (n << pageBits), \_ obj. obj = KOUserData) + | n . n < 2 ^ (pageBitsForSize sz - pageBits) } + | AArch (ADeviceData sz) \ {(x + (n << pageBits), \_ obj. obj = KOUserDataDevice ) + | n . n < 2 ^ (pageBitsForSize sz - pageBits) } + | AGarbage _ \ {(x, \\)} + | _ \ {(x, other_obj_relation)})" + by (simp add: obj_relation_cuts_def2 a_type_def well_formed_cnode_n_def length_set_helper + split: Structures_A.kernel_object.split AARCH64_A.arch_kernel_obj.split) + +definition is_other_obj_relation_type :: "a_type \ bool" where + "is_other_obj_relation_type tp \ + case tp of + ACapTable n \ False + | ATCB \ False + | AArch (APageTable _) \ False + | AArch (AUserData _) \ False + | AArch (ADeviceData _) \ False + | AGarbage _ \ False + | _ \ True" + +lemma is_other_obj_relation_type_CapTable: + "\ is_other_obj_relation_type (ACapTable n)" + by (simp add: is_other_obj_relation_type_def) + +lemma is_other_obj_relation_type_PageTable: + "\ is_other_obj_relation_type (AArch (APageTable pt_t))" + unfolding is_other_obj_relation_type_def by simp + +lemma is_other_obj_relation_type_TCB: + "\ is_other_obj_relation_type ATCB" + by (simp add: is_other_obj_relation_type_def) + +lemma is_other_obj_relation_type_UserData: + "\ is_other_obj_relation_type (AArch (AUserData sz))" + unfolding is_other_obj_relation_type_def by simp + +lemma is_other_obj_relation_type_DeviceData: + "\ is_other_obj_relation_type (AArch (ADeviceData sz))" + unfolding is_other_obj_relation_type_def by simp + +lemma is_other_obj_relation_type: + "is_other_obj_relation_type (a_type ko) \ obj_relation_cuts ko x = {(x, other_obj_relation)}" + by (simp add: obj_relation_cuts_def3 is_other_obj_relation_type_def + split: a_type.splits aa_type.splits) + +definition pspace_dom :: "Structures_A.kheap \ machine_word set" where + "pspace_dom ps \ \x\dom ps. fst ` (obj_relation_cuts (the (ps x)) x)" + +definition pspace_relation :: + "Structures_A.kheap \ (machine_word \ Structures_H.kernel_object) \ bool" where + "pspace_relation ab con \ + (pspace_dom ab = dom con) \ + (\x \ dom ab. \(y, P) \ obj_relation_cuts (the (ab x)) x. P (the (ab x)) (the (con y)))" + +definition etcb_relation :: "etcb \ Structures_H.tcb \ bool" where + "etcb_relation \ \etcb tcb'. + tcb_priority etcb = tcbPriority tcb' + \ tcb_time_slice etcb = tcbTimeSlice tcb' + \ tcb_domain etcb = tcbDomain tcb'" + +definition ekheap_relation :: + "(obj_ref \ etcb option) \ (machine_word \ Structures_H.kernel_object) \ bool" where + "ekheap_relation ab con \ + \x \ dom ab. \tcb'. con x = Some (KOTCB tcb') \ etcb_relation (the (ab x)) tcb'" + +primrec sched_act_relation :: "Deterministic_A.scheduler_action \ scheduler_action \ bool" + where + "sched_act_relation resume_cur_thread a' = (a' = ResumeCurrentThread)" | + "sched_act_relation choose_new_thread a' = (a' = ChooseNewThread)" | + "sched_act_relation (switch_thread x) a' = (a' = SwitchToThread x)" + +definition queue_end_valid :: "obj_ref list \ tcb_queue \ bool" where + "queue_end_valid ts q \ + (ts = [] \ tcbQueueEnd q = None) \ (ts \ [] \ tcbQueueEnd q = Some (last ts))" + +definition prev_queue_head :: "tcb_queue \ (obj_ref \ 'a) \ bool" where + "prev_queue_head q prevs \ \head. tcbQueueHead q = Some head \ prevs head = None" + +lemma prev_queue_head_heap_upd: + "\prev_queue_head q prevs; Some r \ tcbQueueHead q\ \ prev_queue_head q (prevs(r := x))" + by (clarsimp simp: prev_queue_head_def) + +definition list_queue_relation :: + "obj_ref list \ tcb_queue \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ bool" + where + "list_queue_relation ts q nexts prevs \ + heap_ls nexts (tcbQueueHead q) ts \ queue_end_valid ts q \ prev_queue_head q prevs" + +lemma list_queue_relation_nil: + "list_queue_relation ts q nexts prevs \ ts = [] \ tcbQueueEmpty q" + by (fastforce dest: heap_path_head simp: tcbQueueEmpty_def list_queue_relation_def) + +definition ready_queue_relation :: + "Deterministic_A.domain \ Structures_A.priority + \ Deterministic_A.ready_queue \ ready_queue + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (obj_ref \ bool) \ bool" + where + "ready_queue_relation d p q q' nexts prevs flag \ + list_queue_relation q q' nexts prevs + \ (\t. flag t \ t \ set q) + \ (d > maxDomain \ p > maxPriority \ tcbQueueEmpty q')" + +definition ready_queues_relation_2 :: + "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) + \ (domain \ priority \ ready_queue) + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (domain \ priority \ obj_ref \ bool) \ bool" + where + "ready_queues_relation_2 qs qs' nexts prevs inQs \ + \d p. let q = qs d p; q' = qs' (d, p); flag = inQs d p in + ready_queue_relation d p q q' nexts prevs flag" + +abbreviation ready_queues_relation :: "det_state \ kernel_state \ bool" where + "ready_queues_relation s s' \ + ready_queues_relation_2 + (ready_queues s) (ksReadyQueues s') (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (\d p. inQ d p |< tcbs_of' s')" + +lemmas ready_queues_relation_def = ready_queues_relation_2_def + +definition ghost_relation :: + "Structures_A.kheap \ (machine_word \ vmpage_size) \ (machine_word \ nat) \ (machine_word \ pt_type) \ bool" where + "ghost_relation h ups cns pt_types \ + (\a sz. (\dev. h a = Some (ArchObj (DataPage dev sz))) \ ups a = Some sz) \ + (\a n. (\cs. h a = Some (CNode n cs) \ well_formed_cnode_n n cs) \ cns a = Some n) \ + (\a pt_t. (\pt. h a = Some (ArchObj (PageTable pt)) \ pt_t = pt_type pt) \ pt_types a = Some pt_t)" + +definition cdt_relation :: "(cslot_ptr \ bool) \ cdt \ cte_heap \ bool" where + "cdt_relation \ \cte_at m m'. + \c. cte_at c \ cte_map ` descendants_of c m = descendants_of' (cte_map c) m'" + +definition cdt_list_relation :: "cdt_list \ cdt \ cte_heap \ bool" where + "cdt_list_relation \ \t m m'. + \c cap node. m' (cte_map c) = Some (CTE cap node) + \ (case next_slot c t m of None \ True + | Some next \ mdbNext node = cte_map next)" + +definition revokable_relation :: + "(cslot_ptr \ bool) \ (cslot_ptr \ cap option) \ cte_heap \ bool" where + "revokable_relation revo cs m' \ + \c cap node. cs c \ None \ + m' (cte_map c) = Some (CTE cap node) \ + revo c = mdbRevocable node" + +definition irq_state_relation :: "irq_state \ irqstate \ bool" where + "irq_state_relation irq irq' \ case (irq, irq') of + (irq_state.IRQInactive, irqstate.IRQInactive) \ True + | (irq_state.IRQSignal, irqstate.IRQSignal) \ True + | (irq_state.IRQTimer, irqstate.IRQTimer) \ True + | _ \ False" + +definition interrupt_state_relation :: + "(irq \ obj_ref) \ (irq \ irq_state) \ interrupt_state \ bool" where + "interrupt_state_relation node_map irqs is \ + (\node irqs'. is = InterruptState node irqs' + \ (\irq. node_map irq = node + (ucast irq << cte_level_bits)) + \ (\irq. irq_state_relation (irqs irq) (irqs' irq)))" + +definition arch_state_relation :: "(arch_state \ AARCH64_H.kernel_state) set" where + "arch_state_relation \ {(s, s') . + arm_asid_table s = armKSASIDTable s' \ ucast + \ arm_us_global_vspace s = armKSGlobalUserVSpace s' + \ arm_next_vmid s = armKSNextVMID s' + \ map_option ucast \ arm_vmid_table s = armKSVMIDTable s' + \ arm_kernel_vspace s = armKSKernelVSpace s' + \ arm_current_vcpu s = armHSCurVCPU s' + \ arm_gicvcpu_numlistregs s = armKSGICVCPUNumListRegs s'}" + +definition rights_mask_map :: "rights set \ Types_H.cap_rights" where + "rights_mask_map \ + \rs. CapRights (AllowWrite \ rs) (AllowRead \ rs) (AllowGrant \ rs) (AllowGrantReply \ rs)" + + +lemma obj_relation_cutsE: + "\ (y, P) \ obj_relation_cuts ko x; P ko ko'; + \sz cs z cap cte. \ ko = CNode sz cs; well_formed_cnode_n sz cs; y = cte_map (x, z); + ko' = KOCTE cte; cs z = Some cap; cap_relation cap (cteCap cte) \ + \ R; + \tcb tcb'. \ y = x; ko = TCB tcb; ko' = KOTCB tcb'; tcb_relation tcb tcb' \ + \ R; + \pt z pte'. \ ko = ArchObj (PageTable pt); y = x + (z << pteBits); + z \ mask (ptTranslationBits (pt_type pt)); ko' = KOArch (KOPTE pte'); + pte_relation' (pt_apply pt z) pte' \ + \ R; + \sz dev n. \ ko = ArchObj (DataPage dev sz); + ko' = (if dev then KOUserDataDevice else KOUserData); + y = x + (n << pageBits); n < 2 ^ (pageBitsForSize sz - pageBits) \ \ R; + \ y = x; other_obj_relation ko ko'; is_other_obj_relation_type (a_type ko) \ \ R + \ \ R" + by (force simp: obj_relation_cuts_def2 is_other_obj_relation_type_def a_type_def + cte_relation_def pte_relation_def tcb_relation_cut_def + split: Structures_A.kernel_object.splits kernel_object.splits if_splits + AARCH64_A.arch_kernel_obj.splits) + +lemma eq_trans_helper: + "\ x = y; P y = Q \ \ P x = Q" + by simp + +lemma cap_relation_case': + "cap_relation cap cap' = (case cap of + cap.ArchObjectCap arch_cap.ASIDControlCap \ cap_relation cap cap' + | _ \ cap_relation cap cap')" + by (simp split: cap.split arch_cap.split) + +schematic_goal cap_relation_case: + "cap_relation cap cap' = ?P" + apply (subst cap_relation_case') + apply (clarsimp cong: cap.case_cong arch_cap.case_cong) + apply (rule refl) + done + +lemmas cap_relation_split = + eq_trans_helper [where P=P, OF cap_relation_case cap.split[where P=P]] for P +lemmas cap_relation_split_asm = + eq_trans_helper [where P=P, OF cap_relation_case cap.split_asm[where P=P]] for P + + + +text \ + Relations on other data types that aren't stored but used as intermediate values + in the specs. +\ +primrec message_info_map :: "Structures_A.message_info \ Types_H.message_info" where + "message_info_map (Structures_A.MI a b c d) = (Types_H.MI a b c d)" + +lemma mi_map_label[simp]: "msgLabel (message_info_map mi) = mi_label mi" + by (cases mi, simp) + +primrec syscall_error_map :: "ExceptionTypes_A.syscall_error \ Fault_H.syscall_error" where + "syscall_error_map (ExceptionTypes_A.InvalidArgument n) = Fault_H.InvalidArgument n" +| "syscall_error_map (ExceptionTypes_A.InvalidCapability n) = (Fault_H.InvalidCapability n)" +| "syscall_error_map ExceptionTypes_A.IllegalOperation = Fault_H.IllegalOperation" +| "syscall_error_map (ExceptionTypes_A.RangeError n m) = Fault_H.RangeError n m" +| "syscall_error_map ExceptionTypes_A.AlignmentError = Fault_H.AlignmentError" +| "syscall_error_map (ExceptionTypes_A.FailedLookup b lf) = Fault_H.FailedLookup b (lookup_failure_map lf)" +| "syscall_error_map ExceptionTypes_A.TruncatedMessage = Fault_H.TruncatedMessage" +| "syscall_error_map ExceptionTypes_A.DeleteFirst = Fault_H.DeleteFirst" +| "syscall_error_map ExceptionTypes_A.RevokeFirst = Fault_H.RevokeFirst" +| "syscall_error_map (ExceptionTypes_A.NotEnoughMemory n) = Fault_H.syscall_error.NotEnoughMemory n" + +definition APIType_map :: "Structures_A.apiobject_type \ AARCH64_H.object_type" where + "APIType_map ty \ + case ty of + Structures_A.Untyped \ APIObjectType ArchTypes_H.Untyped + | Structures_A.TCBObject \ APIObjectType ArchTypes_H.TCBObject + | Structures_A.EndpointObject \ APIObjectType ArchTypes_H.EndpointObject + | Structures_A.NotificationObject \ APIObjectType ArchTypes_H.NotificationObject + | Structures_A.CapTableObject \ APIObjectType ArchTypes_H.CapTableObject + | ArchObject ao \ (case ao of + SmallPageObj \ SmallPageObject + | LargePageObj \ LargePageObject + | HugePageObj \ HugePageObject + | PageTableObj \ PageTableObject + | AARCH64_A.VCPUObj \ VCPUObject)" + +definition state_relation :: "(det_state \ kernel_state) set" where + "state_relation \ {(s, s'). + pspace_relation (kheap s) (ksPSpace s') + \ ekheap_relation (ekheap s) (ksPSpace s') + \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') + \ ready_queues_relation s s' + \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')) + \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') + \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') + \ revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s') + \ (arch_state s, ksArchState s') \ arch_state_relation + \ interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') + \ (cur_thread s = ksCurThread s') + \ (idle_thread s = ksIdleThread s') + \ (machine_state s = ksMachineState s') + \ (work_units_completed s = ksWorkUnitsCompleted s') + \ (domain_index s = ksDomScheduleIdx s') + \ (domain_list s = ksDomSchedule s') + \ (cur_domain s = ksCurDomain s') + \ (domain_time s = ksDomainTime s')}" + +text \Rules for using states in the relation.\ + +lemma curthread_relation: + "(a, b) \ state_relation \ ksCurThread b = cur_thread a" + by (simp add: state_relation_def) + +lemma curdomain_relation[elim!]: + "(s, s') \ state_relation \ cur_domain s = ksCurDomain s'" + by (clarsimp simp: state_relation_def) + +lemma state_relation_pspace_relation[elim!]: + "(s,s') \ state_relation \ pspace_relation (kheap s) (ksPSpace s')" + by (simp add: state_relation_def) + +lemma state_relation_ekheap_relation[elim!]: + "(s,s') \ state_relation \ ekheap_relation (ekheap s) (ksPSpace s')" + by (simp add: state_relation_def) + +lemma state_relation_sched_act_relation[elim!]: + "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" + by (clarsimp simp: state_relation_def) + +lemma state_relation_ready_queues_relation[elim!]: + "(s, s') \ state_relation \ ready_queues_relation s s'" + by (simp add: state_relation_def) + +lemma state_relation_idle_thread[elim!]: + "(s, s') \ state_relation \ idle_thread s = ksIdleThread s'" + by (clarsimp simp: state_relation_def) + +lemma state_relationD: + "(s, s') \ state_relation \ + pspace_relation (kheap s) (ksPSpace s') \ + ekheap_relation (ekheap s) (ksPSpace s') \ + sched_act_relation (scheduler_action s) (ksSchedulerAction s') \ + ready_queues_relation s s' \ + ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')) \ + cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ + cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') \ + revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s') \ + (arch_state s, ksArchState s') \ arch_state_relation \ + interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') \ + cur_thread s = ksCurThread s' \ + idle_thread s = ksIdleThread s' \ + machine_state s = ksMachineState s' \ + work_units_completed s = ksWorkUnitsCompleted s' \ + domain_index s = ksDomScheduleIdx s' \ + domain_list s = ksDomSchedule s' \ + cur_domain s = ksCurDomain s' \ + domain_time s = ksDomainTime s'" + unfolding state_relation_def by simp + +lemma state_relationE [elim?]: + assumes sr: "(s, s') \ state_relation" + and rl: "\ pspace_relation (kheap s) (ksPSpace s'); + ekheap_relation (ekheap s) (ksPSpace s'); + sched_act_relation (scheduler_action s) (ksSchedulerAction s'); + ready_queues_relation s s'; + ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')); + cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ + revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s'); + cdt_list_relation (cdt_list s) (cdt s) (ctes_of s'); + (arch_state s, ksArchState s') \ arch_state_relation; + interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s'); + cur_thread s = ksCurThread s'; + idle_thread s = ksIdleThread s'; + machine_state s = ksMachineState s'; + work_units_completed s = ksWorkUnitsCompleted s'; + domain_index s = ksDomScheduleIdx s'; + domain_list s = ksDomSchedule s'; + cur_domain s = ksCurDomain s'; + domain_time s = ksDomainTime s' \ \ R" + shows "R" + using sr by (blast intro!: rl dest: state_relationD) + +lemmas isCap_defs = + isZombie_def isArchObjectCap_def + isThreadCap_def isCNodeCap_def isNotificationCap_def + isEndpointCap_def isUntypedCap_def isNullCap_def + isIRQHandlerCap_def isIRQControlCap_def isReplyCap_def + isFrameCap_def isPageTableCap_def + isASIDControlCap_def isASIDPoolCap_def + isDomainCap_def isArchFrameCap_def isVCPUCap_def + +lemma isCNodeCap_cap_map[simp]: + "cap_relation c c' \ isCNodeCap c' = is_cnode_cap c" + by (cases c) (auto simp: isCap_defs split: sum.splits) + +lemma sts_rel_idle : + "thread_state_relation st IdleThreadState = (st = Structures_A.IdleThreadState)" + by (cases st, auto) + +lemma pspace_relation_absD: + "\ ab x = Some y; pspace_relation ab con \ + \ \(x', P) \ obj_relation_cuts y x. \z. con x' = Some z \ P y z" + apply (clarsimp simp: pspace_relation_def) + apply (drule bspec, erule domI) + apply simp + apply (drule(1) bspec) + apply (subgoal_tac "a \ pspace_dom ab", clarsimp) + apply (simp (no_asm) add: pspace_dom_def) + apply (fastforce simp: image_def intro: rev_bexI) + done + +lemma ekheap_relation_absD: + "\ ab x = Some y; ekheap_relation ab con \ \ + \tcb'. con x = Some (KOTCB tcb') \ etcb_relation y tcb'" + by (force simp add: ekheap_relation_def) + +lemma in_related_pspace_dom: + "\ s' x = Some y; pspace_relation s s' \ \ x \ pspace_dom s" + by (clarsimp simp add: pspace_relation_def) + +lemma pspace_dom_revE: + "\ x \ pspace_dom ps; \ko y P. \ ps y = Some ko; (x, P) \ obj_relation_cuts ko y \ \ R \ \ R" + by (clarsimp simp add: pspace_dom_def) + +lemma pspace_dom_relatedE: + "\ s' x = Some ko'; pspace_relation s s'; + \y ko P. \ s y = Some ko; (x, P) \ obj_relation_cuts ko y; P ko ko' \ \ R \ \ R" + apply (rule pspace_dom_revE [OF in_related_pspace_dom]; assumption?) + apply (fastforce dest: pspace_relation_absD) + done + +lemma ghost_relation_typ_at: + "ghost_relation (kheap s) ups cns pt_types \ + (\a sz. data_at sz a s = (ups a = Some sz)) \ + (\a n. typ_at (ACapTable n) a s = (cns a = Some n)) \ + (\a pt_t. pt_at pt_t a s = (pt_types a = Some pt_t))" + apply (rule eq_reflection) + apply (clarsimp simp: ghost_relation_def typ_at_eq_kheap_obj data_at_def) + by (intro conjI impI iffI allI; force) + +end + +end diff --git a/proof/refine/AARCH64/SubMonad_R.thy b/proof/refine/AARCH64/SubMonad_R.thy new file mode 100644 index 0000000000..de45a90d91 --- /dev/null +++ b/proof/refine/AARCH64/SubMonad_R.thy @@ -0,0 +1,137 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory SubMonad_R +imports KHeap_R EmptyFail +begin + +(* SubMonadLib *) +lemma submonad_doMachineOp: + "submonad ksMachineState (ksMachineState_update \ K) \ doMachineOp" + apply (unfold_locales) + apply (clarsimp simp: ext stateAssert_def doMachineOp_def o_def gets_def + get_def bind_def return_def submonad_fn_def)+ + done + +interpretation submonad_doMachineOp: + submonad ksMachineState "(ksMachineState_update \ K)" \ doMachineOp + by (rule submonad_doMachineOp) + +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r \ \ x x'" + shows "corres r \ \ (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad [OF submonad_do_machine_op submonad_doMachineOp _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma doMachineOp_mapM: + assumes "\x. empty_fail (m x)" + shows "doMachineOp (mapM m l) = mapM (doMachineOp \ m) l" + apply (rule submonad_mapM [OF submonad_doMachineOp submonad_doMachineOp, + simplified]) + apply (rule assms) + done + +lemma doMachineOp_mapM_x: + assumes "\x. empty_fail (m x)" + shows "doMachineOp (mapM_x m l) = mapM_x (doMachineOp \ m) l" + apply (rule submonad_mapM_x [OF submonad_doMachineOp submonad_doMachineOp, + simplified]) + apply (rule assms) + done + + +context begin interpretation Arch . (*FIXME: arch_split*) +definition + "asUser_fetch \ \t s. case (ksPSpace s t) of + Some (KOTCB tcb) \ (atcbContextGet o tcbArch) tcb + | None \ undefined" + +definition + "asUser_replace \ \t uc s. + let obj = case (ksPSpace s t) of + Some (KOTCB tcb) \ Some (KOTCB (tcb \tcbArch := atcbContextSet uc (tcbArch tcb)\)) + | obj \ obj + in s \ ksPSpace := (ksPSpace s) (t := obj) \" + + +lemma threadGet_stateAssert_gets_asUser: + "threadGet (atcbContextGet o tcbArch) t = do stateAssert (tcb_at' t) []; gets (asUser_fetch t) od" + apply (rule is_stateAssert_gets [OF _ _ empty_fail_threadGet no_fail_threadGet]) + apply (clarsimp simp: threadGet_def liftM_def, wp) + apply (simp add: threadGet_def liftM_def, wp getObject_tcb_at') + apply (simp add: threadGet_def liftM_def, wp) + apply (rule hoare_strengthen_post, rule getObject_obj_at') + apply (simp add: objBits_simps')+ + apply (clarsimp simp: obj_at'_def asUser_fetch_def atcbContextGet_def)+ + done + +lemma threadSet_modify_asUser: + "tcb_at' t st \ + threadSet (\tcb. tcb\ tcbArch := atcbContextSet uc (tcbArch tcb)\) t st = modify (asUser_replace t uc) st" + apply (rule is_modify [OF _ empty_fail_threadSet no_fail_threadSet]) + apply (clarsimp simp: threadSet_def setObject_def split_def + updateObject_default_def) + apply wp + apply (rule_tac Q="\rv. obj_at' ((=) rv) t and ((=) st)" in hoare_post_imp) + apply (clarsimp simp: asUser_replace_def Let_def obj_at'_def fun_upd_def + split: option.split kernel_object.split) + apply (wp getObject_obj_at' | clarsimp simp: objBits_simps' atcbContextSet_def)+ + done + +lemma atcbContext_get_eq[simp] : "atcbContextGet (atcbContextSet x atcb) = x" + by(simp add: atcbContextGet_def atcbContextSet_def) + +lemma atcbContext_set_eq[simp] : "atcbContextSet (atcbContextGet t) t = t" + by (cases t, simp add: atcbContextGet_def atcbContextSet_def) + + +lemma atcbContext_set_set[simp] : "atcbContextSet x (atcbContextSet y atcb) = atcbContextSet x atcb" + by (cases atcb ,simp add: atcbContextSet_def) + +lemma submonad_asUser: + "submonad (asUser_fetch t) (asUser_replace t) (tcb_at' t) (asUser t)" + apply (unfold_locales) + apply (clarsimp simp: asUser_fetch_def asUser_replace_def + Let_def obj_at'_def + split: kernel_object.split option.split) + apply (clarsimp simp: asUser_replace_def Let_def + split: kernel_object.split option.split) + apply (rename_tac tcb) + apply (case_tac tcb, simp) + apply (clarsimp simp: asUser_fetch_def asUser_replace_def Let_def + fun_upd_idem + split: kernel_object.splits option.splits) + apply (rename_tac tcb) + apply (case_tac tcb, simp add: map_upd_triv atcbContextSet_def) + apply (clarsimp simp: obj_at'_def asUser_replace_def + Let_def atcbContextSet_def + split: kernel_object.splits option.splits) + apply (rename_tac tcb) + apply (case_tac tcb, simp add: objBitsKO_def ps_clear_def) + apply (rule ext) + apply (clarsimp simp: submonad_fn_def asUser_def bind_assoc split_def) + apply (subst threadGet_stateAssert_gets_asUser, simp add: bind_assoc, rule ext) + apply (rule bind_apply_cong [OF refl])+ + apply (rule bind_apply_cong [OF threadSet_modify_asUser]) + apply (clarsimp simp: in_monad stateAssert_def select_f_def) + apply (rule refl) + done + +end + +global_interpretation submonad_asUser: + submonad "asUser_fetch t" "asUser_replace t" "tcb_at' t" "asUser t" + by (rule submonad_asUser) + +lemma doMachineOp_nosch [wp]: + "\\s. P (ksSchedulerAction s)\ doMachineOp m \\rv s. P (ksSchedulerAction s)\" + apply (simp add: doMachineOp_def split_def) + apply (wp select_f_wp) + apply simp + done + +end diff --git a/proof/refine/AARCH64/Syscall_R.thy b/proof/refine/AARCH64/Syscall_R.thy new file mode 100644 index 0000000000..9de40e6e4d --- /dev/null +++ b/proof/refine/AARCH64/Syscall_R.thy @@ -0,0 +1,2094 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Refinement for handleEvent and syscalls +*) + +theory Syscall_R +imports Tcb_R Arch_R Interrupt_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* +syscall has 5 sections: m_fault h_fault m_error h_error m_finalise + +run m_fault (faultable code) \ r_fault + failure, i.e. Inr somefault \ \somefault. h_fault; done + +success, i.e. Inl a + \ run \a. m_error a (errable code) \ r_error + failure, i.e. Inr someerror \ \someerror. h_error e; done + success, i.e. Inl b \ \b. m_finalise b + +One can clearly see this is simulating some kind of monadic Maybe sequence +trying to identify all possible errors before actually performing the syscall. +*) + +lemma syscall_corres: + assumes corres: + "corres (fr \ r_flt_rel) P P' m_flt m_flt'" + "\flt flt'. flt' = fault_map flt \ + corres r (P_flt flt) (P'_flt flt') (h_flt flt) (h_flt' flt')" + "\rv rv'. r_flt_rel rv rv' \ + corres (ser \ r_err_rel rv rv') + (P_no_flt rv) (P'_no_flt rv') + (m_err rv) (m_err' rv')" + "\rv rv' err err'. \r_flt_rel rv rv'; err' = syscall_error_map err \ + \ corres r (P_err rv err) + (P'_err rv' err') (h_err err) (h_err' err')" + "\rvf rvf' rve rve'. \r_flt_rel rvf rvf'; r_err_rel rvf rvf' rve rve'\ + \ corres (dc \ r) + (P_no_err rvf rve) (P'_no_err rvf' rve') + (m_fin rve) (m_fin' rve')" + + assumes wp: + "\rv. \Q_no_flt rv\ m_err rv \P_no_err rv\, \P_err rv\" + "\rv'. \Q'_no_flt rv'\ m_err' rv' \P'_no_err rv'\,\P'_err rv'\" + "\Q\ m_flt \\rv. P_no_flt rv and Q_no_flt rv\, \P_flt\" + "\Q'\ m_flt' \\rv. P'_no_flt rv and Q'_no_flt rv\, \P'_flt\" + + shows "corres (dc \ r) (P and Q) (P' and Q') + (Syscall_A.syscall m_flt h_flt m_err h_err m_fin) + (Syscall_H.syscall m_flt' h_flt' m_err' h_err' m_fin')" + apply (simp add: Syscall_A.syscall_def Syscall_H.syscall_def liftE_bindE) + apply (rule corres_split_bind_case_sum) + apply (rule corres_split_bind_case_sum | rule corres | rule wp | simp add: liftE_bindE)+ + done + +lemma syscall_valid': + assumes x: + "\ft. \P_flt ft\ h_flt ft \Q\" + "\err. \P_err err\ h_err err \Q\" + "\rv. \P_no_err rv\ m_fin rv \Q\,\E\" + "\rv. \P_no_flt rv\ m_err rv \P_no_err\, \P_err\" + "\P\ m_flt \P_no_flt\, \P_flt\" + shows "\P\ Syscall_H.syscall m_flt h_flt m_err h_err m_fin \Q\, \E\" + apply (simp add: Syscall_H.syscall_def liftE_bindE + cong: sum.case_cong) + apply (rule hoare_split_bind_case_sumE) + apply (wp x)[1] + apply (rule hoare_split_bind_case_sumE) + apply (wp x|simp)+ + done + + +text \Completing the relationship between abstract/haskell invocations\ + +primrec + inv_relation :: "Invocations_A.invocation \ Invocations_H.invocation \ bool" +where + "inv_relation (Invocations_A.InvokeUntyped i) x = + (\i'. untypinv_relation i i' \ x = InvokeUntyped i')" +| "inv_relation (Invocations_A.InvokeEndpoint w w2 b c) x = + (x = InvokeEndpoint w w2 b c)" +| "inv_relation (Invocations_A.InvokeNotification w w2) x = + (x = InvokeNotification w w2)" +| "inv_relation (Invocations_A.InvokeReply w ptr grant) x = + (x = InvokeReply w (cte_map ptr) grant)" +| "inv_relation (Invocations_A.InvokeTCB i) x = + (\i'. tcbinv_relation i i' \ x = InvokeTCB i')" +| "inv_relation (Invocations_A.InvokeDomain tptr domain) x = + (x = InvokeDomain tptr domain)" +| "inv_relation (Invocations_A.InvokeIRQControl i) x = + (\i'. irq_control_inv_relation i i' \ x = InvokeIRQControl i')" +| "inv_relation (Invocations_A.InvokeIRQHandler i) x = + (\i'. irq_handler_inv_relation i i' \ x = InvokeIRQHandler i')" +| "inv_relation (Invocations_A.InvokeCNode i) x = + (\i'. cnodeinv_relation i i' \ x = InvokeCNode i')" +| "inv_relation (Invocations_A.InvokeArchObject i) x = + (\i'. archinv_relation i i' \ x = InvokeArchObject i')" + +(* In order to assert conditions that must hold for the appropriate + handleInvocation and handle_invocation calls to succeed, we must have + some notion of what a valid invocation is. + This function defines that. + For example, a InvokeEndpoint requires an endpoint at its first + constructor argument. *) + +primrec + valid_invocation' :: "Invocations_H.invocation \ kernel_state \ bool" +where + "valid_invocation' (Invocations_H.InvokeUntyped i) = valid_untyped_inv' i" +| "valid_invocation' (Invocations_H.InvokeEndpoint w w2 b c) = (ep_at' w and ex_nonz_cap_to' w)" +| "valid_invocation' (Invocations_H.InvokeNotification w w2) = (ntfn_at' w and ex_nonz_cap_to' w)" +| "valid_invocation' (Invocations_H.InvokeTCB i) = tcb_inv_wf' i" +| "valid_invocation' (Invocations_H.InvokeDomain thread domain) = + (tcb_at' thread and K (domain \ maxDomain))" +| "valid_invocation' (Invocations_H.InvokeReply thread slot grant) = + (tcb_at' thread and cte_wp_at' (\cte. \gr. cteCap cte = ReplyCap thread False gr) slot)" +| "valid_invocation' (Invocations_H.InvokeIRQControl i) = irq_control_inv_valid' i" +| "valid_invocation' (Invocations_H.InvokeIRQHandler i) = irq_handler_inv_valid' i" +| "valid_invocation' (Invocations_H.InvokeCNode i) = valid_cnode_inv' i" +| "valid_invocation' (Invocations_H.InvokeArchObject i) = valid_arch_inv' i" + + +(* FIXME: move *) +lemma decodeDomainInvocation_corres: + shows "\ list_all2 cap_relation (map fst cs) (map fst cs'); + list_all2 (\p pa. snd pa = cte_map (snd p)) cs cs' \ \ + corres (ser \ ((\x. inv_relation x \ uncurry Invocations_H.invocation.InvokeDomain) \ (\(x,y). Invocations_A.invocation.InvokeDomain x y))) \ \ + (decode_domain_invocation label args cs) + (decodeDomainInvocation label args cs')" + apply (simp add: decode_domain_invocation_def decodeDomainInvocation_def) + apply (rule whenE_throwError_corres_initial) + apply (simp+)[2] + apply (case_tac "args", simp_all) + apply (rule corres_guard_imp) + apply (rule_tac r'="\domain domain'. domain = domain'" and R="\_. \" and R'="\_. \" + in corres_splitEE) apply (rule whenE_throwError_corres) + apply (simp+)[2] + apply (rule corres_returnOkTT) + apply simp + apply (rule whenE_throwError_corres_initial) + apply simp + apply (case_tac "cs") + apply ((case_tac "cs'", ((simp add: null_def)+)[2])+)[2] + apply (subgoal_tac "cap_relation (fst (hd cs)) (fst (hd cs'))") + apply (case_tac "fst (hd cs)") + apply (case_tac "fst (hd cs')", simp+, rule corres_returnOkTT) + apply (simp add: inv_relation_def o_def uncurry_def) + apply (case_tac "fst (hd cs')", fastforce+) + apply (case_tac "cs") + apply (case_tac "cs'", ((simp add: list_all2_map2 list_all2_map1)+)[2]) + apply (case_tac "cs'", ((simp add: list_all2_map2 list_all2_map1)+)[2]) + apply (wp | simp)+ + done + +lemma decodeInvocation_corres: + "\cptr = to_bl cptr'; mi' = message_info_map mi; + slot' = cte_map slot; cap_relation cap cap'; + args = args'; list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\p pa. snd pa = cte_map (snd p)) excaps excaps' \ + \ + corres (ser \ inv_relation) + (invs and valid_sched and valid_list + and valid_cap cap and cte_at slot and cte_wp_at ((=) cap) slot + and (\s. \x\set excaps. s \ fst x \ cte_at (snd x) s) + and (\s. length args < 2 ^ word_bits)) + (invs' and valid_cap' cap' and cte_at' slot' + and (\s. \x\set excaps'. s \' fst x \ cte_at' (snd x) s)) + (decode_invocation (mi_label mi) args cptr slot cap excaps) + (RetypeDecls_H.decodeInvocation (mi_label mi) args' cptr' slot' cap' excaps')" + apply (rule corres_gen_asm) + apply (unfold decode_invocation_def decodeInvocation_def) + apply (case_tac cap, simp_all only: cap.simps) + \ \dammit, simp_all messes things up, must handle cases manually\ + \ \Null\ + apply (simp add: isCap_defs) + \ \Untyped\ + apply (simp add: isCap_defs Let_def o_def split del: if_split) + apply (rule corres_guard_imp, rule decodeUntypedInvocation_corres) + apply ((clarsimp simp:cte_wp_at_caps_of_state)+)[3] + \ \(Async)Endpoint\ + apply (simp add: isCap_defs returnOk_def) + apply (simp add: isCap_defs) + apply (clarsimp simp: returnOk_def neq_Nil_conv) + \ \ReplyCap\ + apply (simp add: isCap_defs Let_def returnOk_def) + \ \CNodeCap\ + apply (rename_tac word nat list) + apply (simp add: isCap_defs Let_def CanModify_def + split del: if_split cong: if_cong) + apply (clarsimp simp add: o_def) + apply (rule corres_guard_imp) + apply (rule_tac F="length list \ 64" in corres_gen_asm) + apply (rule decodeCNodeInvocation_corres, simp+) + apply (simp add: valid_cap_def word_bits_def) + apply simp + \ \ThreadCap\ + apply (simp add: isCap_defs Let_def CanModify_def + split del: if_split cong: if_cong) + apply (clarsimp simp add: o_def) + apply (rule corres_guard_imp) + apply (rule decodeTCBInvocation_corres, rule refl, + simp_all add: valid_cap_def valid_cap'_def)[3] + apply (simp add: split_def) + apply (rule list_all2_conj) + apply (simp add: list_all2_map2 list_all2_map1) + apply assumption + \ \DomainCap\ + apply (simp add: isCap_defs) + apply (rule corres_guard_imp) + apply (rule decodeDomainInvocation_corres) + apply (simp+)[4] + \ \IRQControl\ + apply (simp add: isCap_defs o_def) + apply (rule corres_guard_imp, rule decodeIRQControlInvocation_corres, simp+)[1] + \ \IRQHandler\ + apply (simp add: isCap_defs o_def) + apply (rule corres_guard_imp, rule decodeIRQHandlerInvocation_corres, simp+)[1] + \ \Zombie\ + apply (simp add: isCap_defs) + \ \Arch\ + apply (clarsimp simp only: cap_relation.simps) + apply (clarsimp simp add: isCap_defs Let_def o_def) + apply (rule corres_guard_imp [OF arch_decodeInvocation_corres]) + apply (simp_all add: list_all2_map2 list_all2_map1)+ + done + +declare mapME_Nil [simp] + +lemma hinv_corres_assist: + "\ info' = message_info_map info \ + \ corres (fr \ (\(p, cap, extracaps, buffer) (p', capa, extracapsa, buffera). + p' = cte_map p \ cap_relation cap capa \ buffer = buffera \ + list_all2 + (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x)) + extracaps extracapsa)) + + (invs and tcb_at thread and (\_. valid_message_info info)) + (invs' and tcb_at' thread) + (doE (cap, slot) \ + cap_fault_on_failure cptr' False + (lookup_cap_and_slot thread (to_bl cptr')); + do + buffer \ lookup_ipc_buffer False thread; + doE extracaps \ lookup_extra_caps thread buffer info; + returnOk (slot, cap, extracaps, buffer) + odE + od + odE) + (doE (cap, slot) \ capFaultOnFailure cptr' False (lookupCapAndSlot thread cptr'); + do buffer \ VSpace_H.lookupIPCBuffer False thread; + doE extracaps \ lookupExtraCaps thread buffer info'; + returnOk (slot, cap, extracaps, buffer) + odE + od + odE)" + apply (clarsimp simp add: split_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF corres_cap_fault]) + \ \switched over to argument of corres_cap_fault\ + apply (rule lookupCapAndSlot_corres, simp) + apply (rule corres_split[OF lookupIPCBuffer_corres]) + apply (rule corres_splitEE) + apply (rule lookupExtraCaps_corres; simp) + apply (rule corres_returnOkTT) + apply (wp | simp)+ + apply auto + done + +lemma msg_from_syserr_map[simp]: + "msgFromSyscallError (syscall_error_map err) = msg_from_syscall_error err" + apply (simp add: msgFromSyscallError_def) + apply (case_tac err,clarsimp+) + done + +lemma threadSet_tcbDomain_update_ct_idle_or_in_cur_domain': + "\ct_idle_or_in_cur_domain' and (\s. ksSchedulerAction s \ ResumeCurrentThread) \ + threadSet (tcbDomain_update (\_. domain)) t + \\_. ct_idle_or_in_cur_domain'\" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_imp_lift) + apply (wp | wps)+ + apply (auto simp: obj_at'_def) + done + +lemma threadSet_tcbDomain_update_ct_not_inQ: + "\ct_not_inQ \ threadSet (tcbDomain_update (\_. domain)) t \\_. ct_not_inQ\" + apply (simp add: threadSet_def ct_not_inQ_def) + apply (wp) + apply (rule hoare_convert_imp [OF setObject_nosch]) + apply (rule updateObject_tcb_inv) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest getObject_tcb_wp)+ + apply (case_tac "t = ksCurThread s") + apply (clarsimp simp: obj_at'_def)+ + done + +(* FIXME: move *) +lemma setObject_F_ct_activatable': + "\\tcb f. tcbState (F f tcb) = tcbState tcb \ \ \ct_in_state' activatable' and obj_at' ((=) tcb) t\ + setObject t (F f tcb) + \\_. ct_in_state' activatable'\" + apply (clarsimp simp: ct_in_state'_def st_tcb_at'_def) + apply (rule hoare_pre) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def) + done + +lemmas setObject_tcbDomain_update_ct_activatable'[wp] = setObject_F_ct_activatable'[where F="tcbDomain_update", simplified] + +(* FIXME: move *) +lemma setObject_F_st_tcb_at': + "\\tcb f. tcbState (F f tcb) = tcbState tcb \ \ \st_tcb_at' P t' and obj_at' ((=) tcb) t\ + setObject t (F f tcb) + \\_. st_tcb_at' P t'\" + apply (simp add: st_tcb_at'_def) + apply (rule hoare_pre) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def) + done + +lemmas setObject_tcbDomain_update_st_tcb_at'[wp] = setObject_F_st_tcb_at'[where F="tcbDomain_update", simplified] + +lemma threadSet_tcbDomain_update_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s \ sch_act_not t s\ + threadSet (tcbDomain_update (\_. domain)) t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: sch_act_wf_cases split: scheduler_action.split) + apply (wp hoare_vcg_conj_lift) + apply (simp add: threadSet_def) + apply wp + apply (wps setObject_sa_unchanged) + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (rename_tac word) + apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ + st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" + in hoare_strengthen_post) + apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp threadSet_tcbDomain_triv')+ + apply (auto simp: obj_at'_def) + done + +lemma setDomain_corres: + "corres dc + (valid_etcbs and valid_sched and tcb_at tptr and pspace_aligned and pspace_distinct) + (invs' and sch_act_simple and tcb_at' tptr and (\s. new_dom \ maxDomain)) + (set_domain tptr new_dom) (setDomain tptr new_dom)" + apply (rule corres_gen_asm2) + apply (simp add: set_domain_def setDomain_def thread_set_domain_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) + apply (rule corres_split) + apply (rule ethread_set_corres; simp) + apply (clarsimp simp: etcb_relation_def) + apply (rule corres_split[OF isRunnable_corres]) + apply simp + apply (rule corres_split) + apply clarsimp + apply (rule corres_when[OF refl]) + apply (rule tcbSchedEnqueue_corres, simp) + apply (rule corres_when[OF refl]) + apply (rule rescheduleRequired_corres) + apply (wpsimp wp: hoare_drop_imps) + apply ((wpsimp wp: hoare_drop_imps | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: gts_wp) + apply wpsimp + apply ((wpsimp wp: hoare_vcg_imp_lift' ethread_set_not_queued_valid_queues hoare_vcg_all_lift + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply (rule_tac Q="\_. valid_objs' and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct' + and (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" + in hoare_strengthen_post[rotated]) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def) + apply (wpsimp wp: threadSet_valid_objs' threadSet_sched_pointers + threadSet_valid_sched_pointers)+ + apply (rule_tac Q="\_ s. valid_queues s \ not_queued tptr s + \ pspace_aligned s \ pspace_distinct s \ valid_etcbs s + \ weak_valid_sched_action s" + in hoare_post_imp) + apply (fastforce simp: pred_tcb_at_def obj_at_def) + apply (wpsimp wp: tcb_dequeue_not_queued) + apply (rule_tac Q = "\_ s. invs' s \ obj_at' (Not \ tcbQueued) tptr s \ sch_act_simple s + \ tcb_at' tptr s" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) + apply (clarsimp simp: valid_tcb'_def obj_at'_def) + apply (drule (1) bspec) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply (wp hoare_vcg_all_lift tcbSchedDequeue_not_queued)+ + apply clarsimp + apply (frule tcb_at_is_etcb_at) + apply simp+ + apply (auto elim: tcb_at_is_etcb_at valid_objs'_maxDomain valid_objs'_maxPriority pred_tcb'_weakenE + simp: valid_sched_def valid_sched_action_def) + done + +lemma performInvocation_corres: + "\ inv_relation i i'; call \ block \ \ + corres (dc \ (=)) + (einvs and valid_invocation i + and schact_is_rct + and ct_active + and (\s. (\w w2 b c. i = Invocations_A.InvokeEndpoint w w2 b c) \ st_tcb_at simple (cur_thread s) s)) + (invs' and sch_act_simple and valid_invocation' i' and ct_active') + (perform_invocation block call i) (performInvocation block call i')" + apply (simp add: performInvocation_def) + apply (case_tac i) + apply (clarsimp simp: o_def liftE_bindE) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_rel_imp, rule inv_untyped_corres) + apply simp + apply (case_tac x, simp_all)[1] + apply (rule corres_returnOkTT, simp) + apply wp+ + apply simp+ + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply simp + apply (rule corres_split[OF sendIPC_corres]) + apply simp + apply (rule corres_trivial) + apply simp + apply wp+ + apply (clarsimp simp: ct_in_state_def) + apply (fastforce elim: st_tcb_ex_cap) + apply (clarsimp simp: pred_conj_def invs'_def cur_tcb'_def simple_sane_strg + sch_act_simple_def) + apply (rule corres_guard_imp) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF sendSignal_corres]) + apply (rule corres_trivial) + apply (simp add: returnOk_def) + apply wp+ + apply (simp+)[2] + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split_nor[OF doReplyTransfer_corres']) + apply (rule corres_trivial, simp) + apply wp+ + apply (clarsimp simp: tcb_at_invs) + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def) + apply (erule cte_wp_at_weakenE, fastforce simp: is_reply_cap_to_def) + apply (clarsimp simp: tcb_at_invs') + apply (fastforce elim!: cte_wp_at_weakenE') + apply (clarsimp simp: liftME_def) + apply (rule corres_guard_imp) + apply (erule invokeTCB_corres) + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] + \ \domain cap\ + apply (clarsimp simp: invoke_domain_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setDomain_corres]) + apply (rule corres_trivial, simp) + apply (wp)+ + apply ((clarsimp simp: invs_psp_aligned invs_distinct)+)[2] + \ \CNodes\ + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF invokeCNode_corres]) + apply assumption + apply (rule corres_trivial, simp add: returnOk_def) + apply wp+ + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] + apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) + apply (rule corres_guard_imp, rule performIRQControl_corres, simp+) + apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) + apply (rule corres_guard_imp, rule invokeIRQHandler_corres, simp+) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule arch_performInvocation_corres, assumption) + apply (clarsimp+)[2] + done + +lemma sendSignal_tcb_at'[wp]: + "\tcb_at' t\ + sendSignal ntfnptr bdg + \\rv. tcb_at' t\" + apply (simp add: sendSignal_def + cong: list.case_cong Structures_H.notification.case_cong) + apply (wp ntfn'_cases_weak_wp list_cases_weak_wp hoare_drop_imps | wpc | simp)+ + done + +lemmas checkCap_inv_typ_at' + = checkCap_inv[where P="\s. P (typ_at' T p s)" for P T p] + +crunches restart, bindNotification, performTransfer + for typ_at'[wp]: "\s. P (typ_at' T p s)" + +lemma invokeTCB_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ + invokeTCB tinv + \\rv s. P (typ_at' T p s)\" + apply (cases tinv, + simp_all add: invokeTCB_def + getThreadBufferSlot_def locateSlot_conv + split del: if_split) + apply (simp only: cases_simp if_cancel simp_thms conj_comms pred_conj_def + Let_def split_def getThreadVSpaceRoot + | (simp split del: if_split cong: if_cong) + | (wp mapM_x_wp[where S=UNIV, simplified] + checkCap_inv_typ_at' + case_options_weak_wp)[1] + | wpcw)+ + done + +lemmas invokeTCB_typ_ats[wp] = typ_at_lifts [OF invokeTCB_typ_at'] + +crunch typ_at'[wp]: doReplyTransfer "\s. P (typ_at' T p s)" + (wp: hoare_drop_imps) + +lemmas doReplyTransfer_typ_ats[wp] = typ_at_lifts [OF doReplyTransfer_typ_at'] + +crunch typ_at'[wp]: "performIRQControl" "\s. P (typ_at' T p s)" + +lemmas invokeIRQControl_typ_ats[wp] = + typ_at_lifts [OF performIRQControl_typ_at'] + +crunch typ_at'[wp]: InterruptDecls_H.invokeIRQHandler "\s. P (typ_at' T p s)" + +lemmas invokeIRQHandler_typ_ats[wp] = + typ_at_lifts [OF InterruptDecls_H_invokeIRQHandler_typ_at'] + +crunch tcb_at'[wp]: setDomain "tcb_at' tptr" + (simp: crunch_simps) + +lemma pinv_tcb'[wp]: + "\invs' and st_tcb_at' active' tptr + and valid_invocation' i and ct_active'\ + RetypeDecls_H.performInvocation block call i + \\rv. tcb_at' tptr\" + apply (simp add: performInvocation_def) + apply (case_tac i, simp_all) + apply (wp invokeArch_tcb_at' | clarsimp simp: pred_tcb_at')+ + done + +lemma sts_cte_at[wp]: + "\cte_at' p\ setThreadState st t \\rv. cte_at' p\" + apply (simp add: setThreadState_def) + apply (wp|simp)+ + done + +crunch obj_at_ntfn[wp]: setThreadState "obj_at' (\ntfn. P (ntfnBoundTCB ntfn) (ntfnObj ntfn)) ntfnptr" + (wp: obj_at_setObject2 crunch_wps + simp: crunch_simps updateObject_default_def in_monad) + +lemma sts_mcpriority_tcb_at'[wp]: + "\mcpriority_tcb_at' P t\ + setThreadState st t' + \\_. mcpriority_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setThreadState_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sts_valid_inv'[wp]: + "\valid_invocation' i\ setThreadState st t \\rv. valid_invocation' i\" + apply (case_tac i, simp_all add: sts_valid_untyped_inv' sts_valid_arch_inv') + apply (wp | simp)+ + defer + apply (rename_tac cnode_invocation) + apply (case_tac cnode_invocation, simp_all add: cte_wp_at_ctes_of) + apply (wp | simp)+ + apply (rename_tac irqcontrol_invocation) + apply (case_tac irqcontrol_invocation, simp_all add: arch_irq_control_inv_valid'_def) + apply (rename_tac archirq_inv) + apply (case_tac archirq_inv; simp) + apply (wp | simp add: irq_issued'_def)+ + apply (rename_tac irqhandler_invocation) + apply (case_tac irqhandler_invocation, simp_all) + apply (wp hoare_vcg_ex_lift ex_cte_cap_to'_pres | simp)+ + apply (rename_tac tcbinvocation) + apply (case_tac tcbinvocation, + simp_all add: setThreadState_tcb', + auto intro!: hoare_vcg_conj_lift hoare_vcg_disj_lift + simp only: imp_conv_disj simp_thms pred_conj_def, + auto intro!: hoare_vcg_prop + sts_cap_to' sts_cte_cap_to' + setThreadState_typ_ats + split: option.splits)[1] + apply (wp sts_bound_tcb_at' hoare_vcg_all_lift hoare_vcg_const_imp_lift)+ + done + +(* FIXME: move to TCB *) +crunch inv[wp]: decodeDomainInvocation P + (wp: crunch_wps simp: crunch_simps) + +lemma arch_cap_exhausted: + "\\ isFrameCap cap; \ isPageTableCap cap; \ isASIDControlCap cap; \ isASIDPoolCap cap; \ isVCPUCap cap\ + \ undefined \P\" + by (cases cap; simp add: isCap_simps) + +crunch inv[wp]: decodeInvocation P + (simp: crunch_simps wp: crunch_wps arch_cap_exhausted mapME_x_inv_wp getASID_wp) + +(* FIXME: move to TCB *) +lemma dec_dom_inv_wf[wp]: + "\invs' and (\s. \x \ set excaps. s \' fst x)\ + decodeDomainInvocation label args excaps + \\x s. tcb_at' (fst x) s \ snd x \ maxDomain\, -" + apply (simp add:decodeDomainInvocation_def) + apply (wp whenE_throwError_wp | wpc |simp)+ + apply clarsimp + apply (drule_tac x = "hd excaps" in bspec) + apply (rule hd_in_set) + apply (simp add:null_def) + apply (simp add:valid_cap'_def) + apply (simp add:not_le) + apply (simp del: Word.of_nat_unat flip: ucast_nat_def) + apply (rule word_of_nat_le) + apply (simp add: le_maxDomain_eq_less_numDomains) + done + +lemma decode_inv_wf'[wp]: + "\valid_cap' cap and invs' and sch_act_simple + and cte_wp_at' ((=) cap \ cteCap) slot and real_cte_at' slot + and (\s. \r\zobj_refs' cap. ex_nonz_cap_to' r s) + and (\s. \r\cte_refs' cap (irq_node' s). ex_cte_cap_to' r s) + and (\s. \cap \ set excaps. \r\cte_refs' (fst cap) (irq_node' s). ex_cte_cap_to' r s) + and (\s. \cap \ set excaps. \r\zobj_refs' (fst cap). ex_nonz_cap_to' r s) + and (\s. \x \ set excaps. cte_wp_at' ((=) (fst x) o cteCap) (snd x) s) + and (\s. \x \ set excaps. s \' fst x) + and (\s. \x \ set excaps. real_cte_at' (snd x) s) + and (\s. \x \ set excaps. ex_cte_cap_wp_to' isCNodeCap (snd x) s) + and (\s. \x \ set excaps. cte_wp_at' (badge_derived' (fst x) o cteCap) (snd x) s)\ + decodeInvocation label args cap_index slot cap excaps + \valid_invocation'\,-" + apply (case_tac cap, simp_all add: decodeInvocation_def Let_def isCap_defs uncurry_def split_def + split del: if_split + cong: if_cong) + apply (rule hoare_pre, + ((wp decodeTCBInv_wf | simp add: o_def)+)[1], + clarsimp simp: valid_cap'_def cte_wp_at_ctes_of + | (rule exI, rule exI, erule (1) conjI) + | drule_tac t="cteCap cte" in sym, simp)+ + done + +lemma ct_active_imp_simple'[elim!]: + "ct_active' s \ st_tcb_at' simple' (ksCurThread s) s" + by (clarsimp simp: ct_in_state'_def + elim!: pred_tcb'_weakenE) + +lemma ct_running_imp_simple'[elim!]: + "ct_running' s \ st_tcb_at' simple' (ksCurThread s) s" + by (clarsimp simp: ct_in_state'_def + elim!: pred_tcb'_weakenE) + +lemma active_ex_cap'[elim]: + "\ ct_active' s; if_live_then_nonz_cap' s \ + \ ex_nonz_cap_to' (ksCurThread s) s" + by (fastforce simp: ct_in_state'_def elim!: st_tcb_ex_cap'') + +crunch it[wp]: handleFaultReply "\s. P (ksIdleThread s)" + +lemma handleFaultReply_invs[wp]: + "\invs' and tcb_at' t\ handleFaultReply x t label msg \\rv. invs'\" + apply (simp add: handleFaultReply_def) + apply (case_tac x; wpsimp simp: handleArchFaultReply_def) + done + +crunch sch_act_simple[wp]: handleFaultReply sch_act_simple + (wp: crunch_wps) + +lemma transferCaps_non_null_cte_wp_at': + assumes PUC: "\cap. P cap \ \ isUntypedCap cap" + shows "\cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\ + transferCaps info caps ep rcvr rcvBuf + \\_. cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\" +proof - + have CTEF: "\P p s. \ cte_wp_at' P p s; \cte. P cte \ False \ \ False" + by (erule cte_wp_atE', auto) + show ?thesis + unfolding transferCaps_def + apply (wp | wpc)+ + apply (rule transferCapsToSlots_pres2) + apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) + apply (rule PUC,simp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ + done +qed + +crunch cte_wp_at' [wp]: setMessageInfo "cte_wp_at' P p" + +lemma copyMRs_cte_wp_at'[wp]: + "\cte_wp_at' P ptr\ copyMRs sender sendBuf receiver recvBuf n \\_. cte_wp_at' P ptr\" + unfolding copyMRs_def + apply (wp mapM_wp | wpc | simp add: split_def | rule equalityD1)+ + done + +lemma doNormalTransfer_non_null_cte_wp_at': + assumes PUC: "\cap. P cap \ \ isUntypedCap cap" + shows + "\cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\ + doNormalTransfer st send_buffer ep b gr rt recv_buffer + \\_. cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\" + unfolding doNormalTransfer_def + apply (wp transferCaps_non_null_cte_wp_at' | simp add:PUC)+ + done + +lemma setMRs_cte_wp_at'[wp]: + "\cte_wp_at' P ptr\ setMRs thread buffer messageData \\_. cte_wp_at' P ptr\" + by (simp add: setMRs_def zipWithM_x_mapM split_def, wp crunch_wps) + +lemma doFaultTransfer_cte_wp_at'[wp]: + "\cte_wp_at' P ptr\ + doFaultTransfer badge sender receiver receiverIPCBuffer + \\_. cte_wp_at' P ptr\" + unfolding doFaultTransfer_def + apply (wp | wpc | simp add: split_def)+ + done + +lemma doIPCTransfer_non_null_cte_wp_at': + assumes PUC: "\cap. P cap \ \ isUntypedCap cap" + shows + "\cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\ + doIPCTransfer sender endpoint badge grant receiver + \\_. cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\" + unfolding doIPCTransfer_def + apply (wp doNormalTransfer_non_null_cte_wp_at' hoare_drop_imp hoare_allI | wpc | clarsimp simp:PUC)+ + done + +lemma doIPCTransfer_non_null_cte_wp_at2': + fixes P + assumes PNN: "\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap" + and PUC: "\cap. P cap \ \ isUntypedCap cap" + shows "\cte_wp_at' (\cte. P (cteCap cte)) ptr\ + doIPCTransfer sender endpoint badge grant receiver + \\_. cte_wp_at' (\cte. P (cteCap cte)) ptr\" + proof - + have PimpQ: "\P Q ptr s. \ cte_wp_at' (\cte. P (cteCap cte)) ptr s; + \cte. P (cteCap cte) \ Q (cteCap cte) \ + \ cte_wp_at' (\cte. P (cteCap cte) \ Q (cteCap cte)) ptr s" + by (erule cte_wp_at_weakenE', clarsimp) + show ?thesis + apply (rule hoare_chain [OF doIPCTransfer_non_null_cte_wp_at']) + apply (erule PUC) + apply (erule PimpQ) + apply (drule PNN, clarsimp) + apply (erule cte_wp_at_weakenE') + apply (clarsimp) + done + qed + +lemma st_tcb_at'_eqD: + "\ st_tcb_at' (\s. s = st) t s; st_tcb_at' (\s. s = st') t s \ \ st = st'" + by (clarsimp simp add: pred_tcb_at'_def obj_at'_def) + +lemma isReply_awaiting_reply': + "isReply st = awaiting_reply' st" + by (case_tac st, (clarsimp simp add: isReply_def)+) + +lemma doReply_invs[wp]: + "\tcb_at' t and tcb_at' t' and + cte_wp_at' (\cte. \grant. cteCap cte = ReplyCap t False grant) slot and + invs' and sch_act_simple\ + doReplyTransfer t' t slot grant + \\_. invs'\" + apply (simp add: doReplyTransfer_def liftM_def) + apply (rule bind_wp [OF _ gts_sp']) + apply (rule bind_wp [OF _ assert_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) + apply (wp, wpc) + apply wp + apply (wp (once) sts_invs_minor'') + apply simp + apply (wp (once) sts_st_tcb') + apply wp + apply (rule_tac Q="\_ s. invs' s \ t \ ksIdleThread s \ st_tcb_at' awaiting_reply' t s" + in hoare_post_imp) + apply clarsimp + apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) + apply (drule(1) pred_tcb_at_conj') + apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") + apply clarsimp + apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" + in pred_tcb'_weakenE) + apply (case_tac st, clarsimp+) + apply (wp cteDeleteOne_reply_pred_tcb_at)+ + apply clarsimp + apply (rule_tac Q="\_. (\s. t \ ksIdleThread s) + and cte_wp_at' (\cte. \grant. cteCap cte + = capability.ReplyCap t False grant) slot" + in hoare_strengthen_post [rotated]) + apply (fastforce simp: cte_wp_at'_def) + apply wp + apply (rule hoare_strengthen_post [OF doIPCTransfer_non_null_cte_wp_at']) + apply (erule conjE) + apply assumption + apply (erule cte_wp_at_weakenE') + apply (fastforce) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) + apply (rule_tac Q="\_ s. invs' s \ sch_act_simple s + \ st_tcb_at' awaiting_reply' t s + \ t \ ksIdleThread s" + in hoare_post_imp) + apply clarsimp + apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) + apply (drule(1) pred_tcb_at_conj') + apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") + apply clarsimp + apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" + in pred_tcb'_weakenE) + apply (case_tac st, clarsimp+) + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp + | clarsimp simp add: inQ_def)+ + apply (rule_tac Q="\_. invs' and tcb_at' t + and sch_act_simple and st_tcb_at' awaiting_reply' t" + in hoare_strengthen_post [rotated]) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def + idle_tcb'_def pred_tcb_at'_def) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) + apply (erule pred_tcb'_weakenE, clarsimp) + apply (clarsimp simp : invs'_def valid_state'_def valid_idle'_def pred_tcb_at'_def + obj_at'_def idle_tcb'_def) + apply (wp cteDeleteOne_reply_pred_tcb_at hoare_drop_imp hoare_allI)+ + apply (clarsimp simp add: isReply_awaiting_reply' cte_wp_at_ctes_of) + apply (auto dest!: st_tcb_idle'[rotated] simp:isCap_simps) + done + +lemma ct_active_runnable' [simp]: + "ct_active' s \ ct_in_state' runnable' s" + by (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) + +crunches tcbSchedEnqueue + for valid_irq_node[wp]: "\s. valid_irq_node' (irq_node' s) s" + (rule: valid_irq_node_lift) + +lemma tcbSchedEnqueue_valid_action: + "\\s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\ + tcbSchedEnqueue ptr + \\rv s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\" + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift) + apply clarsimp + done + +abbreviation (input) "all_invs_but_sch_extra \ + \s. valid_pspace' s \ + sym_refs (state_refs_of' s) \ + sym_refs (state_hyp_refs_of' s) \ + if_live_then_nonz_cap' s \ + sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s \ + if_unsafe_then_cap' s \ + valid_idle' s \ + valid_global_refs' s \ + valid_arch_state' s \ + valid_irq_node' (irq_node' s) s \ + valid_irq_handlers' s \ + valid_irq_states' s \ + irqs_masked' s \ + valid_machine_state' s \ + cur_tcb' s \ + untyped_ranges_zero' s \ + pspace_domain_valid s \ + ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ + (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s)" + + +lemma rescheduleRequired_all_invs_but_extra: + "\\s. all_invs_but_sch_extra s\ + rescheduleRequired \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (wp add: rescheduleRequired_ct_not_inQ + rescheduleRequired_sch_act' + valid_irq_node_lift valid_irq_handlers_lift'' + irqs_masked_lift cur_tcb_lift) + apply auto + done + +lemma threadSet_all_invs_but_sch_extra: + shows "\ tcb_at' t and + all_invs_but_sch_extra and sch_act_simple and + K (ds \ maxDomain) \ + threadSet (tcbDomain_update (\_. ds)) t + \\rv. all_invs_but_sch_extra \" + apply (rule hoare_gen_asm) + apply (rule hoare_pre) + apply (wp threadSet_valid_pspace'T_P[where P = False and Q = \ and Q' = \]) + apply (simp add:tcb_cte_cases_def cteSizeBits_def)+ + apply (wp + threadSet_valid_pspace'T_P + threadSet_state_refs_of'T_P[where f'=id and P'=False and Q=\ and g'=id and Q'=\] + threadSet_state_hyp_refs_of' + threadSet_idle'T + threadSet_global_refsT + threadSet_cur + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_tcbDomain_update_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_iflive'T + threadSet_ifunsafe'T + untyped_ranges_zero_lift threadSet_sched_pointers threadSet_valid_sched_pointers + | simp add:tcb_cte_cases_def cteSizeBits_def cteCaps_of_def o_def)+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift threadSet_pred_tcb_no_state | simp)+ + apply (clarsimp simp:sch_act_simple_def o_def cteCaps_of_def) + apply (intro conjI) + apply fastforce+ + done + +lemma threadSet_not_curthread_ct_domain: + "\\s. ptr \ ksCurThread s \ ct_idle_or_in_cur_domain' s\ threadSet f ptr \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add:ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (wp hoare_vcg_imp_lift hoare_vcg_disj_lift | wps)+ + apply clarsimp + done + +lemma setDomain_invs': + "\invs' and sch_act_simple and ct_active' and + (tcb_at' ptr and + (\s. sch_act_not ptr s) and + (\y. domain \ maxDomain))\ + setDomain ptr domain \\y. invs'\" + apply (simp add:setDomain_def ) + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra + tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) + apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s + \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp:invs'_def valid_state'_def st_tcb_at'_def[symmetric] valid_pspace'_def) + apply simp + apply (rule hoare_strengthen_post[OF hoare_vcg_conj_lift]) + apply (rule threadSet_all_invs_but_sch_extra) + prefer 2 + apply clarsimp + apply assumption + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + threadSet_tcbDomain_update_ct_not_inQ | simp)+ + apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s + \ domain \ maxDomain + \ (ptr \ curThread \ ct_not_inQ s \ sch_act_not ptr s)" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp:invs'_def valid_state'_def) + apply (wp hoare_vcg_imp_lift)+ + apply (clarsimp simp:invs'_def valid_pspace'_def valid_state'_def)+ + done + +lemma performInv_invs'[wp]: + "\invs' and sch_act_simple and ct_active' and valid_invocation' i\ + RetypeDecls_H.performInvocation block call i + \\_. invs'\" + unfolding performInvocation_def + apply (cases i) + apply (clarsimp simp: simple_sane_strg sch_act_simple_def sch_act_sane_def + | wp tcbinv_invs' arch_performInvocation_invs' setDomain_invs' + | rule conjI | erule active_ex_cap')+ + done + +lemma getSlotCap_to_refs[wp]: + "\\\ getSlotCap ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\" + by (simp add: getSlotCap_def | wp)+ + +lemma lcs_valid' [wp]: + "\invs'\ lookupCapAndSlot t xs \\x s. s \' fst x\, -" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp|clarsimp simp: split_def)+ + done + +lemma lcs_ex_cap_to' [wp]: + "\invs'\ lookupCapAndSlot t xs \\x s. \r\cte_refs' (fst x) (irq_node' s). ex_cte_cap_to' r s\, -" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp | simp add: split_def)+ + done + +lemma lcs_ex_nonz_cap_to' [wp]: + "\invs'\ lookupCapAndSlot t xs \\x s. \r\zobj_refs' (fst x). ex_nonz_cap_to' r s\, -" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp | simp add: split_def)+ + done + +lemma lcs_cte_at' [wp]: + "\valid_objs'\ lookupCapAndSlot t xs \\rv s. cte_at' (snd rv) s\,-" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp|simp)+ + done + +lemma lec_ex_cap_to' [wp]: + "\invs'\ + lookupExtraCaps t xa mi + \\rv s. (\cap \ set rv. \r\cte_refs' (fst cap) (irq_node' s). ex_cte_cap_to' r s)\, -" + unfolding lookupExtraCaps_def + apply (cases "msgExtraCaps mi = 0") + apply simp + apply (wp mapME_set | simp)+ + done + +lemma lec_ex_nonz_cap_to' [wp]: + "\invs'\ + lookupExtraCaps t xa mi + \\rv s. (\cap \ set rv. \r\zobj_refs' (fst cap). ex_nonz_cap_to' r s)\, -" + unfolding lookupExtraCaps_def + apply (cases "msgExtraCaps mi = 0") + apply simp + apply (wp mapME_set | simp)+ + done + +(* FIXME: move *) +lemma getSlotCap_eq [wp]: + "\\\ getSlotCap slot + \\cap. cte_wp_at' ((=) cap \ cteCap) slot\" + by (wpsimp wp: getCTE_wp' simp: getSlotCap_def cte_wp_at_ctes_of) + +lemma lcs_eq [wp]: + "\\\ lookupCapAndSlot t cptr \\rv. cte_wp_at' ((=) (fst rv) o cteCap) (snd rv)\,-" + by (wpsimp simp: lookupCapAndSlot_def) + +lemma lec_eq[wp]: + "\\\ + lookupExtraCaps t buffer info + \\rv s. (\x\set rv. cte_wp_at' ((=) (fst x) o cteCap) (snd x) s)\,-" + by (wpsimp wp: mapME_set simp: lookupExtraCaps_def) + +lemma lookupExtras_real_ctes[wp]: + "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. real_cte_at' (snd x) s\,-" + apply (simp add: lookupExtraCaps_def Let_def split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp mapME_set) + apply (simp add: lookupCapAndSlot_def split_def) + apply (wp case_options_weak_wp mapM_wp' lsft_real_cte | simp)+ + done + +lemma lookupExtras_ctes[wp]: + "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. cte_at' (snd x) s\,-" + apply (rule hoare_strengthen_postE_R) + apply (rule lookupExtras_real_ctes) + apply (simp add: real_cte_at') + done + +lemma lsft_ex_cte_cap_to': + "\invs' and K (\cap. isCNodeCap cap \ P cap)\ + lookupSlotForThread t cref + \\rv s. ex_cte_cap_wp_to' P rv s\,-" + apply (simp add: lookupSlotForThread_def split_def) + apply (wp rab_cte_cap_to' getSlotCap_cap_to2 | simp)+ + done + +lemma lec_caps_to'[wp]: + "\invs' and K (\cap. isCNodeCap cap \ P cap)\ + lookupExtraCaps t buffer info + \\rv s. (\x\set rv. ex_cte_cap_wp_to' P (snd x) s)\,-" + apply (simp add: lookupExtraCaps_def split del: if_split) + apply (rule hoare_pre) + apply (wp mapME_set) + apply (simp add: lookupCapAndSlot_def split_def) + apply (wp lsft_ex_cte_cap_to' mapM_wp' + | simp | wpc)+ + done + +lemma getSlotCap_badge_derived[wp]: + "\\\ getSlotCap p \\cap. cte_wp_at' (badge_derived' cap \ cteCap) p\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma lec_derived'[wp]: + "\invs'\ + lookupExtraCaps t buffer info + \\rv s. (\x\set rv. cte_wp_at' (badge_derived' (fst x) o cteCap) (snd x) s)\,-" + apply (simp add: lookupExtraCaps_def split del: if_split) + apply (rule hoare_pre) + apply (wp mapME_set) + apply (simp add: lookupCapAndSlot_def split_def) + apply (wp | simp)+ + done + +lemma get_mrs_length_rv[wp]: + "\\s. \n. n \ msg_max_length \ P n\ get_mrs thread buf mi \\rv s. P (length rv)\" + supply if_split[split del] + apply (simp add: get_mrs_def) + apply (wp mapM_length | wpc | simp del: upt.simps)+ + apply (clarsimp simp: msgRegisters_unfold msg_max_length_def) + done + +lemma st_tcb_at_idle_thread': + "\ st_tcb_at' P (ksIdleThread s) s; valid_idle' s \ + \ P IdleThreadState" + by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + +crunch tcb_at'[wp]: replyFromKernel "tcb_at' t" + +lemma invs_weak_sch_act_wf_strg: + "invs' s \ weak_sch_act_wf (ksSchedulerAction s) s" + by clarsimp + +(* FIXME: move *) +lemma rct_sch_act_simple[simp]: + "ksSchedulerAction s = ResumeCurrentThread \ sch_act_simple s" + by (simp add: sch_act_simple_def) + +(* FIXME: move *) +lemma rct_sch_act_sane[simp]: + "ksSchedulerAction s = ResumeCurrentThread \ sch_act_sane s" + by (simp add: sch_act_sane_def) + +lemma lookupCapAndSlot_real_cte_at'[wp]: + "\valid_objs'\ lookupCapAndSlot thread ptr \\rv. real_cte_at' (snd rv)\, -" +apply (simp add: lookupCapAndSlot_def lookupSlotForThread_def) +apply (wp resolveAddressBits_real_cte_at' | simp add: split_def)+ +done + +lemmas set_thread_state_active_valid_sched = + set_thread_state_runnable_valid_sched[simplified runnable_eq_active] + +crunches reply_from_kernel + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +lemma handleInvocation_corres: + "c \ b \ + corres (dc \ dc) + (einvs and schact_is_rct and ct_active) + (invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') + (handle_invocation c b) + (handleInvocation c b)" + apply (simp add: handle_invocation_def handleInvocation_def liftE_bindE) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF getMessageInfo_corres]) + apply clarsimp + apply (simp add: liftM_def cap_register_def capRegister_def) + apply (rule corres_split_eqr[OF asUser_getRegister_corres]) + apply (rule syscall_corres) + apply (rule hinv_corres_assist, simp) + apply (clarsimp simp add: when_def) + apply (rule handleFault_corres) + apply simp + apply (simp add: split_def) + apply (rule corres_split[OF getMRs_corres]) + apply (rule decodeInvocation_corres, simp_all)[1] + apply (fastforce simp: list_all2_map2 list_all2_map1 elim: list_all2_mono) + apply (fastforce simp: list_all2_map2 list_all2_map1 elim: list_all2_mono) + apply wp[1] + apply (drule sym[OF conjunct1]) + apply simp + apply wp[1] + apply (clarsimp simp: when_def) + apply (rule replyFromKernel_corres) + apply (rule corres_split[OF setThreadState_corres], simp) + apply (rule corres_splitEE[OF performInvocation_corres]) + apply simp+ + apply (rule corres_split[OF getThreadState_corres]) + apply (rename_tac state state') + apply (case_tac state, simp_all)[1] + apply (fold dc_def)[1] + apply (rule corres_split) + apply (rule corres_when [OF refl replyFromKernel_corres]) + apply (rule setThreadState_corres) + apply simp + apply (simp add: when_def) + apply (rule conjI, rule impI) + apply (wp reply_from_kernel_tcb_at) + apply (rule impI, wp+) + apply (wpsimp wp: hoare_drop_imps|strengthen invs_distinct invs_psp_aligned)+ + apply (rule_tac Q="\rv. einvs and schact_is_rct and valid_invocation rve + and (\s. thread = cur_thread s) + and st_tcb_at active thread" + in hoare_post_imp) + apply (clarsimp simp: simple_from_active ct_in_state_def + elim!: st_tcb_weakenE) + apply (wp sts_st_tcb_at' set_thread_state_simple_sched_action + set_thread_state_schact_is_rct set_thread_state_active_valid_sched) + apply (rule_tac Q="\rv. invs' and valid_invocation' rve' + and (\s. thread = ksCurThread s) + and st_tcb_at' active' thread + and (\s. ksSchedulerAction s = ResumeCurrentThread)" + in hoare_post_imp) + apply (clarsimp simp: ct_in_state'_def) + apply (clarsimp) + apply (wp setThreadState_nonqueued_state_update + setThreadState_st_tcb setThreadState_rct)[1] + apply (wp lec_caps_to lsft_ex_cte_cap_to + | simp add: split_def liftE_bindE[symmetric] + ct_in_state'_def ball_conj_distrib + | rule hoare_vcg_E_elim)+ + apply (clarsimp simp: tcb_at_invs invs_valid_objs + valid_tcb_state_def ct_in_state_def + simple_from_active invs_mdb + invs_distinct invs_psp_aligned) + apply (clarsimp simp: msg_max_length_def word_bits_def schact_is_rct_def) + apply (erule st_tcb_ex_cap, clarsimp+) + apply fastforce + apply (clarsimp) + apply (frule tcb_at_invs') + apply (clarsimp simp: invs'_def valid_state'_def + ct_in_state'_def ct_not_inQ_def) + apply (frule pred_tcb'_weakenE [where P=active' and P'=simple'], clarsimp) + apply (frule(1) st_tcb_ex_cap'', fastforce) + apply (clarsimp simp: valid_pspace'_def) + apply (frule(1) st_tcb_at_idle_thread') + apply (simp) + done + +lemma ts_Restart_case_helper': + "(case ts of Structures_H.Restart \ A | _ \ B) + = (if ts = Structures_H.Restart then A else B)" + by (cases ts, simp_all) + +lemma gts_imp': + "\Q\ getThreadState t \R\ \ + \\s. st_tcb_at' P t s \ Q s\ getThreadState t \\rv s. P rv \ R rv s\" + apply (simp only: imp_conv_disj) + apply (erule hoare_vcg_disj_lift[rotated]) + apply (rule hoare_strengthen_post [OF gts_sp']) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +crunch st_tcb_at'[wp]: replyFromKernel "st_tcb_at' P t" +crunch cap_to'[wp]: replyFromKernel "ex_nonz_cap_to' p" +crunch it'[wp]: replyFromKernel "\s. P (ksIdleThread s)" +crunch sch_act_simple[wp]: replyFromKernel sch_act_simple + (rule: sch_act_simple_lift) + +lemma rfk_ksQ[wp]: + "\\s. P (ksReadyQueues s p)\ replyFromKernel t x1 \\_ s. P (ksReadyQueues s p)\" + apply (case_tac x1) + apply (simp add: replyFromKernel_def) + apply (wp) + done + +lemma hinv_invs'[wp]: + "\invs' and ct_active' and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + handleInvocation calling blocking + \\rv. invs'\" + apply (simp add: handleInvocation_def split_def + ts_Restart_case_helper') + apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' + hoare_vcg_all_lift hoare_weak_lift_imp) + apply simp + apply (intro conjI impI) + apply (wp gts_imp' | simp)+ + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R[rotated]) + apply clarsimp + apply (subgoal_tac "thread \ ksIdleThread s", simp_all)[1] + apply (fastforce elim!: pred_tcb'_weakenE st_tcb_ex_cap'') + apply (clarsimp simp: valid_idle'_def valid_state'_def + invs'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + apply wp+ + apply (rule_tac Q="\rv'. invs' and valid_invocation' rv + and (\s. ksSchedulerAction s = ResumeCurrentThread) + and (\s. ksCurThread s = thread) + and st_tcb_at' active' thread" + in hoare_post_imp) + apply (clarsimp simp: ct_in_state'_def) + apply (wp sts_invs_minor' setThreadState_st_tcb setThreadState_rct | simp)+ + apply (clarsimp) + apply (fastforce simp add: tcb_at_invs' ct_in_state'_def + simple_sane_strg + sch_act_simple_def + elim!: pred_tcb'_weakenE st_tcb_ex_cap'' + dest: st_tcb_at_idle_thread')+ + done + +crunch typ_at'[wp]: handleFault "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +lemmas handleFault_typ_ats[wp] = typ_at_lifts [OF handleFault_typ_at'] + +lemma handleSend_corres: + "corres (dc \ dc) + (einvs and schact_is_rct and ct_active) + (invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') + (handle_send blocking) (handleSend blocking)" + by (simp add: handle_send_def handleSend_def handleInvocation_corres) + +lemma hs_invs'[wp]: + "\invs' and ct_active' and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + handleSend blocking \\r. invs'\" + apply (rule validE_valid) + apply (simp add: handleSend_def) + apply (wp | simp)+ + done + +lemma getThreadCallerSlot_map: + "getThreadCallerSlot t = return (cte_map (t, tcb_cnode_index 3))" + by (simp add: getThreadCallerSlot_def locateSlot_conv + cte_map_def tcb_cnode_index_def tcbCallerSlot_def + cte_level_bits_def) + +lemma tcb_at_cte_at_map: + "\ tcb_at' t s; offs \ dom tcb_cap_cases \ \ cte_at' (cte_map (t, offs)) s" + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (drule tcb_cases_related) + apply (auto elim: cte_wp_at_tcbI') + done + +lemma deleteCallerCap_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + (delete_caller_cap t) + (deleteCallerCap t)" + apply (simp add: delete_caller_cap_def deleteCallerCap_def + getThreadCallerSlot_map) + apply (rule corres_guard_imp) + apply (rule_tac P'="cte_at' (cte_map (t, tcb_cnode_index 3))" in corres_symb_exec_r_conj) + apply (rule_tac F="isReplyCap rv \ rv = capability.NullCap" + and P="cte_wp_at (\cap. is_reply_cap cap \ cap = cap.NullCap) (t, tcb_cnode_index 3) + and einvs" + and P'="invs' and cte_wp_at' (\cte. cteCap cte = rv) + (cte_map (t, tcb_cnode_index 3))" in corres_req) + apply (clarsimp simp: cte_wp_at_caps_of_state state_relation_def) + apply (drule caps_of_state_cteD) + apply (drule(1) pspace_relation_cte_wp_at, clarsimp+) + apply (clarsimp simp: cte_wp_at_ctes_of is_reply_cap_relation cap_relation_NullCapI) + apply simp + apply (rule corres_guard_imp, rule cap_delete_one_corres) + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) + apply (auto simp: can_fast_finalise_def)[1] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply ((wp getCTE_wp')+ | simp add: getSlotCap_def)+ + apply clarsimp + apply (frule tcb_at_cte_at[where ref="tcb_cnode_index 3"]) + apply clarsimp + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (frule tcb_cap_valid_caps_of_stateD, clarsimp) + apply (drule(1) tcb_cnode_index_3_reply_or_null) + apply (auto simp: can_fast_finalise_def is_cap_simps + intro: tcb_at_cte_at_map tcb_at_cte_at)[1] + apply clarsimp + apply (frule_tac offs="tcb_cnode_index 3" in tcb_at_cte_at_map) + apply (simp add: tcb_cap_cases_def) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma deleteCallerCap_invs[wp]: + "\invs'\ deleteCallerCap t \\rv. invs'\" + apply (simp add: deleteCallerCap_def getThreadCallerSlot_def + locateSlot_conv) + apply (wp cteDeleteOne_invs hoare_drop_imps) + done + +lemma deleteCallerCap_simple[wp]: + "\st_tcb_at' simple' t\ deleteCallerCap t' \\rv. st_tcb_at' simple' t\" + apply (simp add: deleteCallerCap_def getThreadCallerSlot_def + locateSlot_conv) + apply (wp cteDeleteOne_st_tcb_at hoare_drop_imps | simp)+ + done + +lemma cteDeleteOne_reply_cap_to''[wp]: + "\ex_nonz_cap_to' p and + cte_wp_at' (\c. isReplyCap (cteCap c) \ isNullCap (cteCap c)) slot\ + cteDeleteOne slot + \\rv. ex_nonz_cap_to' p\" + apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) + apply (rule bind_wp [OF _ getCTE_sp]) + apply (rule hoare_assume_pre) + apply (subgoal_tac "isReplyCap (cteCap cte) \ isNullCap (cteCap cte)") + apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv + | clarsimp simp: finaliseCap_def isCap_simps | simp + | wp (once) hoare_drop_imps)+ + apply (fastforce simp: cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + done + +lemma deleteCallerCap_nonz_cap: + "\ex_nonz_cap_to' p and tcb_at' t and valid_objs'\ + deleteCallerCap t + \\rv. ex_nonz_cap_to' p\" + apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_map + locateSlot_conv ) + apply (rule hoare_pre) + apply (wp cteDeleteOne_reply_cap_to'' getCTE_wp') + apply clarsimp + apply (frule_tac offs="tcb_cnode_index 3" in tcb_at_cte_at_map) + apply (clarsimp simp: tcb_cap_cases_def) + apply (auto simp: ex_nonz_cap_to'_def isCap_simps cte_wp_at_ctes_of) + done + +crunch sch_act_sane[wp]: cteDeleteOne sch_act_sane + (wp: crunch_wps loadObject_default_inv getObject_inv + simp: crunch_simps unless_def + rule: sch_act_sane_lift) + +crunch sch_act_sane[wp]: deleteCallerCap sch_act_sane + (wp: crunch_wps) + +lemma delete_caller_cap_valid_ep_cap: + "\valid_cap (cap.EndpointCap r a b)\ delete_caller_cap thread \\rv. valid_cap (cap.EndpointCap r a b)\" + apply (clarsimp simp: delete_caller_cap_def cap_delete_one_def valid_cap_def) + apply (rule hoare_pre) + by (wp get_cap_wp fast_finalise_typ_at abs_typ_at_lifts(1) + | simp add: unless_def valid_cap_def)+ + +lemma handleRecv_isBlocking_corres': + "corres dc (einvs and ct_in_state active + and (\s. ex_nonz_cap_to (cur_thread s) s)) + (invs' and ct_in_state' simple' + and sch_act_sane + and (\s. ex_nonz_cap_to' (ksCurThread s) s)) + (handle_recv isBlocking) (handleRecv isBlocking)" + (is "corres dc (?pre1) (?pre2) (handle_recv _) (handleRecv _)") + apply (simp add: handle_recv_def handleRecv_def liftM_bind Let_def + cap_register_def capRegister_def + cong: if_cong cap.case_cong capability.case_cong bool.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split_eqr[OF asUser_getRegister_corres]) + apply (rule corres_split_catch) + apply (rule corres_cap_fault) + apply (rule corres_splitEE[OF lookupCap_corres]) + apply (rule_tac P="?pre1 and tcb_at thread + and (\s. (cur_thread s) = thread ) + and valid_cap rv" + and P'="?pre2 and tcb_at' thread and valid_cap' rv'" in corres_inst) + apply (clarsimp split: cap_relation_split_asm arch_cap.split_asm split del: if_split + simp: lookup_failure_map_def whenE_def) + apply (rule corres_guard_imp) + apply (rename_tac rights) + apply (case_tac "AllowRead \ rights"; simp) + apply (rule corres_split_nor[OF deleteCallerCap_corres]) + apply (rule receiveIPC_corres) + apply (clarsimp)+ + apply (wp delete_caller_cap_nonz_cap delete_caller_cap_valid_ep_cap)+ + apply (clarsimp)+ + apply (clarsimp simp: lookup_failure_map_def)+ + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply (rule corres_guard_imp) + apply (rename_tac rights) + apply (case_tac "AllowRead \ rights"; simp) + apply (rule_tac r'=ntfn_relation in corres_splitEE) + apply clarsimp + apply (rule getNotification_corres) + apply (rule corres_if) + apply (clarsimp simp: ntfn_relation_def) + apply (clarsimp, rule receiveSignal_corres) + prefer 3 + apply (rule corres_trivial) + apply (clarsimp simp: lookup_failure_map_def)+ + apply (wp get_simple_ko_wp getNotification_wp | wpcw | simp)+ + apply (clarsimp simp: lookup_failure_map_def) + apply (clarsimp simp: valid_cap_def ct_in_state_def) + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply wp+ + apply (rule handleFault_corres) + apply simp + apply (wp get_simple_ko_wp | wpcw | simp)+ + apply (rule hoare_vcg_E_elim) + apply (simp add: lookup_cap_def lookup_slot_for_thread_def) + apply wp + apply (simp add: split_def) + apply (wp resolve_address_bits_valid_fault2)+ + apply (wp getNotification_wp | wpcw | simp add: valid_fault_def whenE_def split del: if_split)+ + apply (clarsimp simp add: ct_in_state_def ct_in_state'_def conj_comms invs_valid_tcb_ctable + invs_valid_objs tcb_at_invs invs_psp_aligned invs_cur) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + ct_in_state'_def sch_act_sane_not) + done + +lemma handleRecv_isBlocking_corres: + "corres dc (einvs and ct_active) + (invs' and ct_active' and sch_act_sane) + (handle_recv isBlocking) (handleRecv isBlocking)" + apply (rule corres_guard_imp) + apply (rule handleRecv_isBlocking_corres') + apply (clarsimp simp: ct_in_state_def) + apply (fastforce elim!: st_tcb_weakenE st_tcb_ex_cap) + apply (clarsimp simp: ct_in_state'_def invs'_def valid_state'_def) + apply (frule(1) st_tcb_ex_cap'') + apply (auto elim: pred_tcb'_weakenE) + done + +lemma lookupCap_refs[wp]: + "\invs'\ lookupCap t ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" + by (simp add: lookupCap_def split_def | wp | simp add: o_def)+ + +lemma hw_invs'[wp]: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ex_nonz_cap_to' (ksCurThread s) s) + and (\s. ksCurThread s \ ksIdleThread s)\ + handleRecv isBlocking \\r. invs'\" + apply (simp add: handleRecv_def cong: if_cong) + apply (rule hoare_pre) + apply ((wp getNotification_wp | wpc | simp)+)[1] + apply (clarsimp simp: ct_in_state'_def) + apply ((wp deleteCallerCap_nonz_cap hoare_vcg_all_lift + hoare_lift_Pf2[OF deleteCallerCap_simple + deleteCallerCap_ct'] + | wpc | simp)+)[1] + apply simp + apply (wp deleteCallerCap_nonz_cap hoare_vcg_all_lift + hoare_lift_Pf2[OF deleteCallerCap_simple + deleteCallerCap_ct'] + | wpc | simp add: ct_in_state'_def whenE_def split del: if_split)+ + apply (rule validE_validE_R) + apply (rule_tac Q="\rv s. invs' s + \ sch_act_sane s + \ thread = ksCurThread s + \ ct_in_state' simple' s + \ ex_nonz_cap_to' thread s + \ thread \ ksIdleThread s + \ (\x \ zobj_refs' rv. ex_nonz_cap_to' x s)" + and E="\_ _. True" + in hoare_strengthen_postE[rotated]) + apply (clarsimp simp: isCap_simps ct_in_state'_def pred_tcb_at' invs_valid_objs' + sch_act_sane_not obj_at'_def pred_tcb_at'_def) + apply (assumption) + apply (wp)+ + apply (clarsimp) + apply (auto elim: st_tcb_ex_cap'' pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread' + simp: ct_in_state'_def sch_act_sane_def) + done + +lemma setSchedulerAction_obj_at'[wp]: + "\obj_at' P p\ setSchedulerAction sa \\rv. obj_at' P p\" + unfolding setSchedulerAction_def + by (wp, clarsimp elim!: obj_at'_pspaceI) + +lemma handleYield_corres: + "corres dc + (einvs and ct_active) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + handle_yield handleYield" + apply (clarsimp simp: handle_yield_def handleYield_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply simp + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) + apply (rule rescheduleRequired_corres) + apply (wpsimp wp: weak_sch_act_wf_lift_linear + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+ + apply (simp add: invs_def valid_sched_def valid_sched_action_def cur_tcb_def + tcb_at_is_etcb_at valid_state_def valid_pspace_def ct_in_state_def + runnable_eq_active) + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def + valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def) + done + +lemma tcbSchedAppend_ct_in_state'[wp]: + "tcbSchedAppend t \ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) + done + +lemma hy_invs': + "\invs' and ct_active'\ handleYield \\r. invs' and ct_active'\" + apply (simp add: handleYield_def) + apply (wpsimp wp: ct_in_state_thread_state_lift' rescheduleRequired_all_invs_but_ct_not_inQ) + apply (rule_tac Q="\_. all_invs_but_ct_not_inQ' and ct_active'" in hoare_post_imp) + apply clarsimp + apply (subst pred_conj_def) + apply (rule hoare_vcg_conj_lift) + apply (rule tcbSchedAppend_all_invs_but_ct_not_inQ') + apply wpsimp + apply wpsimp + apply wpsimp + apply (simp add:ct_active_runnable'[unfolded ct_in_state'_def]) + done + + +lemma dmo_addressTranslateS1_invs'[wp]: + "doMachineOp (addressTranslateS1 addr) \ invs' \" + unfolding addressTranslateS1_def + by (wpsimp wp: dmo_machine_op_lift_invs' dmo'_gets_wp simp: doMachineOp_bind) + +lemma curVCPUActive_invs'[wp]: + "curVCPUActive \invs'\" + unfolding curVCPUActive_def + by wpsimp + +lemma getHSR_invs'[wp]: + "doMachineOp getHSR \invs'\" + by (simp add: getHSR_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma getESR_invs'[wp]: + "doMachineOp getESR \invs'\" + by (simp add: getESR_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma getFAR_invs'[wp]: + "doMachineOp getFAR \invs'\" + by (simp add: getFAR_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma hv_invs'[wp]: "\invs' and tcb_at' t'\ handleVMFault t' vptr \\r. invs'\" + apply (simp add: AARCH64_H.handleVMFault_def + cong: vmfault_type.case_cong) + apply (rule hoare_pre) + apply (wp | wpcw | simp)+ + done + +crunch nosch[wp]: handleVMFault "\s. P (ksSchedulerAction s)" + +lemma active_from_running': + "ct_running' s' \ ct_active' s'" + by (clarsimp elim!: pred_tcb'_weakenE + simp: ct_in_state'_def)+ + +lemma simple_from_running': + "ct_running' s' \ ct_in_state' simple' s'" + by (clarsimp elim!: pred_tcb'_weakenE + simp: ct_in_state'_def)+ + +lemma handleReply_corres: + "corres dc (einvs and ct_running) (invs' and ct_running') + handle_reply handleReply" + apply (simp add: handle_reply_def handleReply_def + getThreadCallerSlot_map + getSlotCap_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac P="einvs and cte_wp_at ((=) caller_cap) (thread, tcb_cnode_index 3) + and K (is_reply_cap caller_cap \ caller_cap = cap.NullCap) + and tcb_at thread and st_tcb_at active thread + and valid_cap caller_cap" + and P'="invs' and tcb_at' thread + and valid_cap' (cteCap rv') + and cte_at' (cte_map (thread, tcb_cnode_index 3))" + in corres_inst) + apply (auto split: cap_relation_split_asm arch_cap.split_asm bool.split + intro!: corres_guard_imp [OF deleteCallerCap_corres] + corres_guard_imp [OF doReplyTransfer_corres] + corres_fail + simp: valid_cap_def valid_cap'_def is_cap_simps assert_def is_reply_cap_to_def)[1] + apply (fastforce simp: invs_def valid_state_def + cte_wp_at_caps_of_state st_tcb_def2 + dest: valid_reply_caps_of_stateD) + apply (wp get_cap_cte_wp_at get_cap_wp | simp add: cte_wp_at_eq_simp)+ + apply (intro conjI impI allI, + (fastforce simp: invs_def valid_state_def + intro: tcb_at_cte_at)+) + apply (clarsimp, frule tcb_at_invs) + apply (fastforce dest: tcb_caller_cap simp: cte_wp_at_def) + apply clarsimp + apply (clarsimp simp: ct_in_state_def elim!: st_tcb_weakenE) + apply (fastforce intro: cte_wp_valid_cap elim: cte_wp_at_weakenE) + apply (fastforce intro: tcb_at_cte_at_map) + done + +lemma hr_invs'[wp]: + "\invs' and sch_act_simple\ handleReply \\rv. invs'\" + apply (simp add: handleReply_def getSlotCap_def + getThreadCallerSlot_map getCurThread_def) + apply (wp getCTE_wp | wpc | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule ctes_of_valid', clarsimp+) + apply (simp add: valid_cap'_def) + apply (simp add: invs'_def cur_tcb'_def) + done + +crunch ksCurThread[wp]: handleReply "\s. P (ksCurThread s)" + (wp: crunch_wps transferCapsToSlots_pres1 setObject_ep_ct + setObject_ntfn_ct + simp: unless_def crunch_simps + ignore: transferCapsToSlots) + +lemmas cteDeleteOne_st_tcb_at_simple'[wp] = + cteDeleteOne_st_tcb_at[where P=simple', simplified] + +crunch st_tcb_at_simple'[wp]: handleReply "st_tcb_at' simple' t'" + (wp: hoare_TrueI crunch_wps sts_st_tcb_at'_cases + threadSet_pred_tcb_no_state + ignore: setThreadState) + +lemmas handleReply_ct_in_state_simple[wp] = + ct_in_state_thread_state_lift' [OF handleReply_ksCurThread + handleReply_st_tcb_at_simple'] + + +(* FIXME: move *) +lemma doReplyTransfer_st_tcb_at_active: + "\st_tcb_at' active' t and tcb_at' t' and K (t \ t') and + cte_wp_at' (\cte. cteCap cte = (capability.ReplyCap t' False g)) sl\ + doReplyTransfer t t' sl g + \\rv. st_tcb_at' active' t\" + apply (simp add: doReplyTransfer_def liftM_def) + apply (wp setThreadState_st_tcb sts_pred_tcb_neq' cteDeleteOne_reply_pred_tcb_at + hoare_drop_imps threadSet_pred_tcb_no_state hoare_exI + doIPCTransfer_non_null_cte_wp_at2' | wpc | clarsimp simp:isCap_simps)+ + apply (fastforce) + done + +lemma hr_ct_active'[wp]: + "\invs' and ct_active'\ handleReply \\rv. ct_active'\" + apply (simp add: handleReply_def getSlotCap_def getCurThread_def + getThreadCallerSlot_def locateSlot_conv) + apply (rule bind_wp, rename_tac cur_thread) + apply (rule_tac t=cur_thread in ct_in_state'_decomp) + apply (wpsimp wp: getCTE_wp) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (wpsimp wp: getCTE_wp doReplyTransfer_st_tcb_at_active)+ + apply (fastforce simp: ct_in_state'_def cte_wp_at_ctes_of valid_cap'_def + dest: ctes_of_valid') + done + +lemma handleCall_corres: + "corres (dc \ dc) (einvs and schact_is_rct and ct_active) + (invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and + ct_active') + handle_call handleCall" + by (simp add: handle_call_def handleCall_def liftE_bindE handleInvocation_corres) + +lemma hc_invs'[wp]: + "\invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and + ct_active'\ + handleCall + \\rv. invs'\" + apply (simp add: handleCall_def) + apply (wp) + apply (clarsimp) + done + +lemma cteInsert_sane[wp]: + "\sch_act_sane\ cteInsert newCap srcSlot destSlot \\_. sch_act_sane\" + apply (simp add: sch_act_sane_def) + apply (wp hoare_vcg_all_lift + hoare_convert_imp [OF cteInsert_nosch cteInsert_ct]) + done + +crunch sane [wp]: setExtraBadge sch_act_sane + +crunch sane [wp]: transferCaps "sch_act_sane" + (wp: transferCapsToSlots_pres1 crunch_wps + simp: crunch_simps + ignore: transferCapsToSlots) + +lemma possibleSwitchTo_sane: + "\\s. sch_act_sane s \ t \ ksCurThread s\ possibleSwitchTo t \\_. sch_act_sane\" + apply (simp add: possibleSwitchTo_def setSchedulerAction_def curDomain_def + cong: if_cong) + apply (wp hoare_drop_imps | wpc)+ + apply (simp add: sch_act_sane_def) + done + +crunch sane [wp]: handleFaultReply sch_act_sane + ( wp: threadGet_inv hoare_drop_imps crunch_wps + simp: crunch_simps + ignore: setSchedulerAction) + +crunch sane [wp]: doIPCTransfer sch_act_sane + ( wp: threadGet_inv hoare_drop_imps crunch_wps + simp: crunch_simps + ignore: setSchedulerAction) + +lemma doReplyTransfer_sane: + "\\s. sch_act_sane s \ t' \ ksCurThread s\ + doReplyTransfer t t' callerSlot g \\rv. sch_act_sane\" + apply (simp add: doReplyTransfer_def liftM_def) + apply (wp possibleSwitchTo_sane hoare_drop_imps hoare_vcg_all_lift|wpc)+ + apply simp + done + +lemma handleReply_sane: + "\sch_act_sane\ handleReply \\rv. sch_act_sane\" + apply (simp add: handleReply_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp doReplyTransfer_sane getCTE_wp'| wpc)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma handleReply_nonz_cap_to_ct: + "\ct_active' and invs' and sch_act_simple\ + handleReply + \\rv s. ex_nonz_cap_to' (ksCurThread s) s\" + apply (rule_tac Q="\rv. ct_active' and invs'" + in hoare_post_imp) + apply (auto simp: ct_in_state'_def elim: st_tcb_ex_cap'')[1] + apply (wp | simp)+ + done + +crunch ksQ[wp]: handleFaultReply "\s. P (ksReadyQueues s p)" + +crunch valid_etcbs[wp]: handle_recv "valid_etcbs" + (wp: crunch_wps simp: crunch_simps) + +lemma handleReply_handleRecv_corres: + "corres dc (einvs and ct_running) + (invs' and ct_running' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (do x \ handle_reply; handle_recv True od) + (do x \ handleReply; handleRecv True od)" + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF handleReply_corres]) + apply (rule handleRecv_isBlocking_corres') + apply (wp handle_reply_nonz_cap_to_ct handleReply_sane + handleReply_nonz_cap_to_ct handle_reply_valid_sched)+ + apply (fastforce simp: ct_in_state_def ct_in_state'_def simple_sane_strg + elim!: st_tcb_weakenE st_tcb_ex_cap') + apply (clarsimp simp: ct_in_state'_def) + apply (fastforce elim: pred_tcb'_weakenE) + done + +lemma handleHypervisorFault_corres: + "corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread) + (invs' and sch_act_not thread + and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) + (handle_hypervisor_fault thread fault) (handleHypervisorFault thread fault)" + apply (cases fault; clarsimp simp: handleHypervisorFault_def isFpuEnable_def split del: if_split) + apply (corres corres: handleFault_corres simp: valid_fault_def) + done + +lemma dmo_machine_rest_lift: + "(\s m. P (s\ksMachineState := ksMachineState s\machine_state_rest := m\\) = P s) \ + \P\ doMachineOp (machine_op_lift f') \\rv. P\" + apply (wpsimp simp: doMachineOp_def machine_op_lift_def machine_rest_lift_def in_monad) + apply (clarsimp simp: select_f_def ignore_failure_def split: if_split_asm) + done + +lemma hvmf_invs_lift: + "(\s m. P (s\ksMachineState := ksMachineState s\machine_state_rest := m\\) = P s) \ + \P\ handleVMFault t flt \\_ _. True\, \\_. P\" + unfolding handleVMFault_def + by (wpsimp wp: dmo_machine_rest_lift asUser_inv dmo'_gets_wp + simp: getHSR_def addressTranslateS1_def getESR_def getFAR_def + curVCPUActive_def doMachineOp_bind getRestartPC_def getRegister_def) + +lemma hvmf_invs_etc: + "\invs' and sch_act_not t and st_tcb_at' simple' t and + ex_nonz_cap_to' t\ + handleVMFault t f + \\_ _. True\, + \\_. invs' and sch_act_not t and st_tcb_at' simple' t and ex_nonz_cap_to' t\" + apply (rule hvmf_invs_lift) + apply (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def) + done + +lemma handleEvent_corres: + "corres (dc \ dc) (einvs and (\s. event \ Interrupt \ ct_running s) and + schact_is_rct) + (invs' and (\s. event \ Interrupt \ ct_running' s) and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (handle_event event) (handleEvent event)" +proof - + have hw: + "\isBlocking. corres dc (einvs and ct_running and schact_is_rct) + (invs' and ct_running' + and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (handle_recv isBlocking) (handleRecv isBlocking)" + apply (rule corres_guard_imp [OF handleRecv_isBlocking_corres]) + apply (clarsimp simp: ct_in_state_def ct_in_state'_def + elim!: st_tcb_weakenE pred_tcb'_weakenE)+ + done + show ?thesis + apply (case_tac event) + apply (simp_all add: handleEvent_def) + + apply (rename_tac syscall) + apply (case_tac syscall) + apply (auto intro: corres_guard_imp[OF handleSend_corres] + corres_guard_imp[OF hw] + corres_guard_imp [OF handleReply_corres] + corres_guard_imp[OF handleReply_handleRecv_corres] + corres_guard_imp[OF handleCall_corres] + corres_guard_imp[OF handleYield_corres] + active_from_running active_from_running' + simp: simple_sane_strg)[8] + apply (rule corres_underlying_split) + apply (rule corres_guard_imp[OF getCurThread_corres], simp+) + apply (rule handleFault_corres) + apply simp + apply (simp add: valid_fault_def) + apply wp + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE + simp: ct_in_state_def) + apply wp + apply (clarsimp) + apply (auto simp: ct_in_state'_def sch_act_simple_def + sch_act_sane_def + elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] + apply (rule corres_underlying_split) + apply (rule corres_guard_imp, rule getCurThread_corres, simp+) + apply (rule handleFault_corres) + apply (simp add: valid_fault_def) + apply wp + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE + simp: ct_in_state_def valid_fault_def) + apply wp + apply clarsimp + apply (auto simp: ct_in_state'_def sch_act_simple_def + sch_act_sane_def + elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[where R="\_. einvs" + and R'="\rv s. \x. rv = Some x \ R'' x s" + for R'']) + apply (rule corres_machine_op) + apply (rule corres_Id; wpsimp) + apply (case_tac rv, simp_all add: doMachineOp_return)[1] + apply (rule handleInterrupt_corres) + apply (wp hoare_vcg_all_lift + doMachineOp_getActiveIRQ_IRQ_active' + | simp + | simp add: imp_conjR | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def valid_queues_def) + apply (rule_tac corres_underlying_split) + apply (rule corres_guard_imp, rule getCurThread_corres, simp+) + apply (rule corres_split_catch) + apply (rule handleVMFault_corres) + apply (erule handleFault_corres) + apply (wp handle_vm_fault_valid_fault) + apply (wp hvmf_invs_etc) + apply wp + apply (clarsimp simp: simple_from_running tcb_at_invs) + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE simp: ct_in_state_def) + apply wp + apply (clarsimp) + apply (fastforce simp: simple_sane_strg sch_act_simple_def ct_in_state'_def + elim: st_tcb_ex_cap'' pred_tcb'_weakenE) + apply (rule corres_underlying_split) + apply (rule corres_guard_imp[OF getCurThread_corres], simp+) + apply (rule handleHypervisorFault_corres) + apply wp + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE + simp: ct_in_state_def) + apply wp + apply (clarsimp) + apply (auto simp: ct_in_state'_def sch_act_simple_def + sch_act_sane_def + elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] + done +qed + +crunches handleVMFault + for st_tcb_at'[wp]: "st_tcb_at' P t" + and cap_to'[wp]: "ex_nonz_cap_to' t" + and norq[wp]: "\s. P (ksReadyQueues s)" + and ksit[wp]: "\s. P (ksIdleThread s)" + +crunches handleHypervisorFault + for ksit[wp]: "\s. P (ksIdleThread s)" + (wp: undefined_valid haskell_assert_inv simp: isFpuEnable_def) + +lemma hh_invs'[wp]: + "\invs' and sch_act_not p and st_tcb_at' simple' p and ex_nonz_cap_to' p and (\s. p \ ksIdleThread s)\ + handleHypervisorFault p t + \\_. invs'\" + supply if_split[split del] + by (cases t; wpsimp simp: AARCH64_H.handleHypervisorFault_def isFpuEnable_def) + +lemma ct_not_idle': + fixes s + assumes vi: "valid_idle' s" + and cts: "ct_in_state' (\tcb. \idle' tcb) s" + shows "ksCurThread s \ ksIdleThread s" +proof + assume "ksCurThread s = ksIdleThread s" + with vi have "ct_in_state' idle' s" + unfolding ct_in_state'_def valid_idle'_def + by (clarsimp simp: pred_tcb_at'_def obj_at'_def idle_tcb'_def) + + with cts show False + unfolding ct_in_state'_def + by (fastforce dest: pred_tcb_at_conj') +qed + +lemma ct_running_not_idle'[simp]: + "\invs' s; ct_running' s\ \ ksCurThread s \ ksIdleThread s" + apply (rule ct_not_idle') + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def + elim: pred_tcb'_weakenE)+ + done + +lemma ct_active_not_idle'[simp]: + "\invs' s; ct_active' s\ \ ksCurThread s \ ksIdleThread s" + apply (rule ct_not_idle') + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def + elim: pred_tcb'_weakenE)+ + done + +lemma deleteCallerCap_st_tcb_at_runnable[wp]: + "\st_tcb_at' runnable' t\ deleteCallerCap t' \\rv. st_tcb_at' runnable' t\" + apply (simp add: deleteCallerCap_def getThreadCallerSlot_def + locateSlot_conv) + apply (wp cteDeleteOne_tcb_at_runnable' hoare_drop_imps | simp)+ + done + +crunches handleFault,receiveSignal,receiveIPC,asUser + for ksCurThread[wp]: "\s. P (ksCurThread s)" + (wp: hoare_drop_imps crunch_wps simp: crunch_simps) + +lemma handleRecv_ksCurThread[wp]: + "\\s. P (ksCurThread s) \ handleRecv b \\rv s. P (ksCurThread s) \" + unfolding handleRecv_def + by ((simp, wp hoare_drop_imps) | wpc | wpsimp wp: hoare_drop_imps)+ + +lemma he_invs'[wp]: + "\invs' and + (\s. event \ Interrupt \ ct_running' s) and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + handleEvent event + \\rv. invs'\" +proof - + have nidle: "\s. invs' s \ ct_active' s \ ksCurThread s \ ksIdleThread s" + by (clarsimp) + show ?thesis + apply (case_tac event, simp_all add: handleEvent_def) + apply (rename_tac syscall) + apply (case_tac syscall, + (wp handleReply_sane handleReply_nonz_cap_to_ct handleReply_ksCurThread + | clarsimp simp: active_from_running' simple_from_running' simple_sane_strg simp del: split_paired_All + | rule conjI active_ex_cap' + | strengthen nidle)+) + apply (rule hoare_strengthen_post, + rule hoare_weaken_pre, + rule hy_invs') + apply (simp add: active_from_running') + apply simp + apply (simp add: active_from_running') + apply (wp + | rule conjI + | erule pred_tcb'_weakenE st_tcb_ex_cap'' + | clarsimp simp: tcb_at_invs ct_in_state'_def simple_sane_strg sch_act_simple_def + | drule st_tcb_at_idle_thread' + | wpc | wp (once) hoare_drop_imps hoare_vcg_all_lift)+ + done +qed + +lemma inv_irq_IRQInactive: + "\\\ performIRQControl irqcontrol_invocation + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: performIRQControl_def) + apply (rule hoare_pre) + apply (wpc|wp|simp add: AARCH64_H.performIRQControl_def)+ + done + +lemma inv_arch_IRQInactive: + "\\\ Arch.performInvocation invocation + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (wpsimp simp: performARMMMUInvocation_def AARCH64_H.performInvocation_def) + done + +lemma retype_pi_IRQInactive: + "\valid_irq_states'\ RetypeDecls_H.performInvocation blocking call v + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: Retype_H.performInvocation_def) + apply (rule hoare_pre) + apply (wpc | + wp inv_tcb_IRQInactive inv_cnode_IRQInactive inv_irq_IRQInactive + inv_untyped_IRQInactive inv_arch_IRQInactive | + simp)+ + done + +lemma hi_IRQInactive: + "\valid_irq_states'\ handleInvocation call blocking + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: handleInvocation_def split_def) + apply (wp syscall_valid' retype_pi_IRQInactive) + done + +lemma handleSend_IRQInactive: + "\invs'\ handleSend blocking + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: handleSend_def) + apply (rule hoare_pre) + apply (wp hi_IRQInactive) + apply (simp add: invs'_def valid_state'_def) + done + +lemma handleCall_IRQInactive: + "\invs'\ handleCall + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: handleCall_def) + apply (rule hoare_pre) + apply (wp hi_IRQInactive) + apply (simp add: invs'_def valid_state'_def) + done + +end + +end diff --git a/proof/refine/AARCH64/TcbAcc_R.thy b/proof/refine/AARCH64/TcbAcc_R.thy new file mode 100644 index 0000000000..179708427e --- /dev/null +++ b/proof/refine/AARCH64/TcbAcc_R.thy @@ -0,0 +1,6077 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory TcbAcc_R +imports CSpace_R ArchMove_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare if_weak_cong [cong] +declare hoare_in_monad_post[wp] +declare trans_state_update'[symmetric,simp] +declare storeWordUser_typ_at' [wp] + +(* Auxiliaries and basic properties of priority bitmap functions *) + +lemma countLeadingZeros_word_clz[simp]: + "countLeadingZeros w = word_clz w" + unfolding countLeadingZeros_def word_clz_def + by (simp add: to_bl_upt) + +lemma wordLog2_word_log2[simp]: + "wordLog2 = word_log2" + apply (rule ext) + unfolding wordLog2_def word_log2_def + by (simp add: word_size wordBits_def) + +lemmas bitmap_fun_defs = addToBitmap_def removeFromBitmap_def + modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def + +(* lookupBitmapPriority is a cleaner version of getHighestPrio *) +definition + "lookupBitmapPriority d \ \s. + l1IndexToPrio (word_log2 (ksReadyQueuesL1Bitmap s d)) || + of_nat (word_log2 (ksReadyQueuesL2Bitmap s (d, + invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d)))))" + +lemma getHighestPrio_def'[simp]: + "getHighestPrio d = gets (lookupBitmapPriority d)" + unfolding getHighestPrio_def gets_def + by (fastforce simp: gets_def get_bind_apply lookupBitmapPriority_def bitmap_fun_defs) + +(* isHighestPrio_def' is a cleaner version of isHighestPrio_def *) +lemma isHighestPrio_def': + "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" + unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' + apply (rule ext) + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def + split: if_splits) + done + +lemma getHighestPrio_inv[wp]: + "\ P \ getHighestPrio d \\_. P \" + unfolding bitmap_fun_defs by simp + +lemma valid_bitmapQ_bitmapQ_simp: + "valid_bitmapQ s \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (simp add: valid_bitmapQ_def) + +lemma prioToL1Index_l1IndexToPrio_or_id: + "\ unat (w'::priority) < 2 ^ wordRadix ; w < 2^(size w' - wordRadix) \ + \ prioToL1Index ((l1IndexToPrio w) || w') = w" + unfolding l1IndexToPrio_def prioToL1Index_def + apply (simp add: shiftr_over_or_dist shiftr_le_0 wordRadix_def') + apply (subst shiftl_shiftr_id, simp, simp add: word_size) + apply (rule word_of_nat_less) + apply simp + apply (subst unat_of_nat_eq, simp_all add: word_size) + done + +lemma bitmapQ_no_L1_orphansD: + "\ bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d !! i \ + \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ i < l2BitmapSize" + unfolding bitmapQ_no_L1_orphans_def by simp + +lemma l1IndexToPrio_wordRadix_mask[simp]: + "l1IndexToPrio i && mask wordRadix = 0" + unfolding l1IndexToPrio_def + by (simp add: wordRadix_def') + +lemma st_tcb_at_coerce_abstract: + assumes t: "st_tcb_at' P t c" + assumes sr: "(a, c) \ state_relation" + shows "st_tcb_at (\st. \st'. thread_state_relation st st' \ P st') t a" + using assms + apply (clarsimp simp: state_relation_def pred_tcb_at'_def obj_at'_def objBits_simps) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE, simp_all) + apply (fastforce simp: st_tcb_at_def obj_at_def other_obj_relation_def + tcb_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + AARCH64_A.arch_kernel_obj.split_asm)+ + done + +lemma st_tcb_at_runnable_coerce_concrete: + assumes t: "st_tcb_at runnable t a" + assumes sr: "(a, c) \ state_relation" + assumes tcb: "tcb_at' t c" + shows "st_tcb_at' runnable' t c" + using t + apply - + apply (rule ccontr) + apply (drule pred_tcb_at'_Not[THEN iffD2, OF conjI, OF tcb]) + apply (drule st_tcb_at_coerce_abstract[OF _ sr]) + apply (clarsimp simp: st_tcb_def2) + apply (case_tac "tcb_state tcb"; simp) + done + +lemma pspace_relation_tcb_at': + assumes p: "pspace_relation (kheap a) (ksPSpace c)" + assumes t: "tcb_at t a" + assumes aligned: "pspace_aligned' c" + assumes distinct: "pspace_distinct' c" + shows "tcb_at' t c" + using assms + apply (clarsimp simp: obj_at_def) + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: is_tcb tcb_relation_cut_def) + apply (simp split: kernel_object.split_asm) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb], simp) + apply (erule obj_at'_weakenE) + apply simp + done + +lemma tcb_at_cross: + "\tcb_at t s; pspace_aligned s; pspace_distinct s; pspace_relation (kheap s) (ksPSpace s')\ + \ tcb_at' t s'" + apply (drule (2) pspace_distinct_cross) + apply (drule (1) pspace_aligned_cross) + apply (erule (3) pspace_relation_tcb_at') + done + +lemma tcb_at'_cross: + assumes p: "pspace_relation (kheap s) (ksPSpace s')" + assumes t: "tcb_at' ptr s'" + shows "tcb_at ptr s" + using assms + apply (clarsimp simp: obj_at'_def) + apply (erule (1) pspace_dom_relatedE) + by (clarsimp simp: obj_relation_cuts_def2 obj_at_def cte_relation_def + other_obj_relation_def pte_relation_def is_tcb_def + split: Structures_A.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) + +lemma st_tcb_at_runnable_cross: + "\ st_tcb_at runnable t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ st_tcb_at' runnable' t s'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (prop_tac "tcb_at t s", clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (drule (2) tcb_at_cross, fastforce simp: state_relation_def) + apply (erule (2) st_tcb_at_runnable_coerce_concrete) + done + +lemma cur_tcb_cross: + "\ cur_tcb s; pspace_aligned s; pspace_distinct s; (s,s') \ state_relation \ \ cur_tcb' s'" + apply (clarsimp simp: cur_tcb'_def cur_tcb_def state_relation_def) + apply (erule (3) tcb_at_cross) + done + +lemma valid_objs_valid_tcbE': + assumes "valid_objs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms + apply (clarsimp simp add: valid_objs'_def ran_def typ_at'_def + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) + apply (fastforce simp: projectKO_def projectKO_opt_tcb return_def valid_tcb'_def) + done + +lemma valid_tcb'_tcbDomain_update: + "new_dom \ maxDomain \ + \tcb. valid_tcb' tcb s \ valid_tcb' (tcbDomain_update (\_. new_dom) tcb) s" + unfolding valid_tcb'_def + apply (clarsimp simp: tcb_cte_cases_def objBits_simps') + done + +lemma valid_tcb'_tcbState_update: + "\valid_tcb_state' st s; valid_tcb' tcb s\ \ + valid_tcb' (tcbState_update (\_. st) tcb) s" + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def objBits_simps') + done + +definition valid_tcbs' :: "kernel_state \ bool" where + "valid_tcbs' s' \ \ptr tcb. ksPSpace s' ptr = Some (KOTCB tcb) \ valid_tcb' tcb s'" + +lemma valid_objs'_valid_tcbs'[elim!]: + "valid_objs' s \ valid_tcbs' s" + by (auto simp: valid_objs'_def valid_tcbs'_def valid_obj'_def split: kernel_object.splits) + +lemma invs'_valid_tcbs'[elim!]: + "invs' s \ valid_tcbs' s" + by (fastforce intro: valid_objs'_valid_tcbs') + +lemma valid_tcbs'_maxDomain: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def) + done + +lemmas valid_objs'_maxDomain = valid_tcbs'_maxDomain[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_maxPriority: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def) + done + +lemmas valid_objs'_maxPriority = valid_tcbs'_maxPriority[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_obj_at': + assumes "valid_tcbs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms + apply (clarsimp simp add: valid_tcbs'_def ran_def typ_at'_def + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) + done + +lemma update_valid_tcb'[simp]: + "\f. valid_tcb' tcb (ksReadyQueuesL1Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueuesL2Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueues_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksSchedulerAction_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksDomainTime_update f s) = valid_tcb' tcb s" + by (auto simp: valid_tcb'_def valid_tcb_state'_def valid_bound_tcb'_def valid_bound_ntfn'_def + opt_tcb_at'_def valid_arch_tcb'_def + split: option.splits thread_state.splits) + +lemma update_valid_tcbs'[simp]: + "\f. valid_tcbs' (ksReadyQueuesL1Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueuesL2Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueues_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksSchedulerAction_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksDomainTime_update f s) = valid_tcbs' s" + by (simp_all add: valid_tcbs'_def) + +lemma doMachineOp_irq_states': + assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" + shows "\valid_irq_states'\ doMachineOp f \\rv. valid_irq_states'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule use_valid) + apply (rule_tac P="\m. m = irq_masks (ksMachineState s)" in masks) + apply simp + apply simp + done + +lemma dmo_invs': + assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" + shows "\(\s. \m. \(r,m')\fst (f m). \p. + pointerInUserData p s \ pointerInDeviceData p s \ + underlying_memory m' p = underlying_memory m p) and + invs'\ doMachineOp f \\r. invs'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subst invs'_machine) + apply (drule use_valid) + apply (rule_tac P="\m. m = irq_masks (ksMachineState s)" in masks, simp+) + apply (fastforce simp add: valid_machine_state'_def) + apply assumption + done + +lemma dmo_invs_no_cicd': + assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" + shows "\(\s. \m. \(r,m')\fst (f m). \p. + pointerInUserData p s \ pointerInDeviceData p s \ + underlying_memory m' p = underlying_memory m p) and + invs_no_cicd'\ doMachineOp f \\r. invs_no_cicd'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subst invs_no_cicd'_machine) + apply (drule use_valid) + apply (rule_tac P="\m. m = irq_masks (ksMachineState s)" in masks, simp+) + apply (fastforce simp add: valid_machine_state'_def) + apply assumption + done + +lemma dmo_lift': + assumes f: "\P\ f \Q\" + shows "\\s. P (ksMachineState s)\ doMachineOp f + \\rv s. Q rv (ksMachineState s)\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (erule (1) use_valid [OF _ f]) + done + +lemma doMachineOp_getActiveIRQ_IRQ_active: + "\valid_irq_states'\ + doMachineOp (getActiveIRQ in_kernel) + \\rv s. \irq. rv = Some irq \ intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" + apply (rule hoare_lift_Pf3 [where f="ksInterruptState"]) + prefer 2 + apply wp + apply (simp add: irq_state_independent_H_def) + apply assumption + apply (rule dmo_lift') + apply (rule getActiveIRQ_masked) + done + +lemma doMachineOp_getActiveIRQ_IRQ_active': + "\valid_irq_states'\ + doMachineOp (getActiveIRQ in_kernel) + \\rv s. rv = Some irq \ intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule doMachineOp_getActiveIRQ_IRQ_active) + apply simp + done + +lemma preemptionPoint_irq [wp]: + "\valid_irq_states'\ preemptionPoint -, + \\irq s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" + apply (simp add: preemptionPoint_def setWorkUnits_def modifyWorkUnits_def getWorkUnits_def) + apply (wp whenE_wp|wpc)+ + apply (rule hoare_post_imp) + prefer 2 + apply (rule doMachineOp_getActiveIRQ_IRQ_active) + apply clarsimp + apply wp+ + apply clarsimp + done + +lemmas doMachineOp_obj_at = doMachineOp_obj_at' + +lemma updateObject_tcb_inv: + "\P\ updateObject (obj::tcb) ko p q n \\rv. P\" + by simp (rule updateObject_default_inv) + +lemma setObject_update_TCB_corres': + assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'" + assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'" + assumes sched_pointers: "tcbSchedPrev new_tcb' = tcbSchedPrev tcb'" + "tcbSchedNext new_tcb' = tcbSchedNext tcb'" + assumes flag: "tcbQueued new_tcb' = tcbQueued tcb'" + assumes r: "r () ()" + assumes exst: "exst_same tcb' new_tcb'" + shows + "corres r + (ko_at (TCB tcb) ptr) (ko_at' tcb' ptr) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" + apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' new_tcb'" in corres_req) + apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) + apply (frule(1) pspace_relation_absD) + apply (clarsimp simp: tcb_relation_cut_def exst) + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp: obj_at'_def) + apply (unfold set_object_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def projectKOs obj_at_def + updateObject_default_def in_magnitude_check obj_at'_def) + apply (rename_tac s s' t') + apply (prop_tac "t' = s'") + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (drule singleton_in_magnitude_check) + apply (prop_tac "map_to_ctes ((ksPSpace s') (ptr \ injectKO new_tcb')) + = map_to_ctes (ksPSpace s')") + apply (frule_tac tcb=new_tcb' and tcb=tcb' in map_to_ctes_upd_tcb) + apply (clarsimp simp: objBits_simps) + apply (clarsimp simp: objBits_simps ps_clear_def3 field_simps objBits_defs mask_def) + apply (insert tables')[1] + apply (rule ext) + apply (clarsimp split: if_splits) + apply blast + apply (prop_tac "obj_at (same_caps (TCB new_tcb)) ptr s") + using tables + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def assms) + apply (clarsimp simp add: state_relation_def) + apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _ _" \ -\) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply clarsimp + apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply clarsimp + apply (rule conjI) + apply (simp only: pspace_relation_def simp_thms + pspace_dom_update[where x="kernel_object.TCB _" + and v="kernel_object.TCB _", + simplified a_type_def, simplified]) + apply (rule conjI) + using assms + apply (simp only: dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: tcb_relation_cut_def project_inject split: if_split_asm kernel_object.split_asm) + apply (rename_tac aa ba) + apply (drule_tac x="(aa, ba)" in bspec, simp) + apply clarsimp + apply (frule_tac ko'="kernel_object.TCB tcb" and x'=ptr in obj_relation_cut_same_type) + apply (simp add: tcb_relation_cut_def)+ + apply clarsimp + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule (1) bspec) + apply (insert exst) + apply (clarsimp simp: etcb_relation_def exst_same_def) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (insert sched_pointers flag exst) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext new_tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev new_tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def) + apply (clarsimp simp: ready_queue_relation_def opt_pred_def opt_map_def exst_same_def inQ_def + split: option.splits) + apply (metis (mono_tags, opaque_lifting)) + apply (clarsimp simp: fun_upd_def caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def) + done + +lemma setObject_update_TCB_corres: + "\tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'; + \(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb; + \(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'; + tcbSchedPrev new_tcb' = tcbSchedPrev tcb'; tcbSchedNext new_tcb' = tcbSchedNext tcb'; + tcbQueued new_tcb' = tcbQueued tcb'; exst_same tcb' new_tcb'; + r () ()\ \ + corres r + (\s. get_tcb ptr s = Some tcb) (\s'. (tcb', s') \ fst (getObject ptr s')) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" + apply (rule corres_guard_imp) + apply (erule (7) setObject_update_TCB_corres') + apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def + loadObject_default_def objBits_simps' in_magnitude_check)+ + done + +lemma getObject_TCB_corres: + "corres tcb_relation (tcb_at t and pspace_aligned and pspace_distinct) \ + (gets_the (get_tcb t)) (getObject t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_gets_the) + apply (rule corres_get_tcb) + apply (simp add: tcb_at_def) + apply assumption + done + +lemma threadGet_corres: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ r (f tcb) (f' tcb')" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get f t) (threadGet f' t)" + apply (simp add: thread_get_def threadGet_def) + apply (fold liftM_def) + apply simp + apply (rule corres_rel_imp) + apply (rule getObject_TCB_corres) + apply (simp add: x) + done + +lemma threadGet_inv [wp]: "\P\ threadGet f t \\rv. P\" + by (simp add: threadGet_def getObject_inv_tcb | wp)+ + +lemma ball_tcb_cte_casesI: + "\ P (tcbCTable, tcbCTable_update); + P (tcbVTable, tcbVTable_update); + P (tcbReply, tcbReply_update); + P (tcbCaller, tcbCaller_update); + P (tcbIPCBufferFrame, tcbIPCBufferFrame_update) \ + \ \x \ ran tcb_cte_cases. P x" + by (simp add: tcb_cte_cases_def cteSizeBits_def) + +lemma all_tcbI: + "\ \a b c d e f g h i j k l m n p q r s. P (Thread a b c d e f g h i j k l m n p q r s) \ + \ \tcb. P tcb" + by (rule allI, case_tac tcb, simp) + +lemma threadset_corresT: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation (f tcb) (f' tcb')" + assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (f' tcb) = getF tcb" + assumes sched_pointers: "\tcb. tcbSchedPrev (f' tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (f' tcb) = tcbSchedNext tcb" + assumes flag: "\tcb. tcbQueued (f' tcb) = tcbQueued tcb" + assumes e: "\tcb'. exst_same tcb' (f' tcb')" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ + (thread_set f t) (threadSet f' t)" + apply (simp add: thread_set_def threadSet_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getObject_TCB_corres]) + apply (rule setObject_update_TCB_corres') + apply (erule x) + apply (rule y) + apply (clarsimp simp: bspec_split [OF spec [OF z]]) + apply fastforce + apply (rule sched_pointers) + apply (rule sched_pointers) + apply (rule flag) + apply simp + apply (rule e) + apply wp+ + apply (clarsimp simp add: tcb_at_def obj_at_def) + apply (drule get_tcb_SomeD) + apply fastforce + apply simp + done + +lemmas threadset_corres = + threadset_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] + +lemma pspace_relation_tcb_at: + assumes p: "pspace_relation (kheap a) (ksPSpace c)" + assumes t: "tcb_at' t c" + shows "tcb_at t a" using assms + apply (clarsimp simp: obj_at'_def) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE) + apply (clarsimp simp: other_obj_relation_def is_tcb obj_at_def + split: Structures_A.kernel_object.split_asm if_split_asm + AARCH64_A.arch_kernel_obj.split_asm)+ + done + +lemma threadSet_corres_noopT: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation tcb (fn tcb')" + assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (fn tcb) = getF tcb" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" + assumes e: "\tcb'. exst_same tcb' (fn tcb')" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (return v) (threadSet fn t)" +proof - + have S: "\t s. tcb_at t s \ return v s = (thread_set id t >>= (\x. return v)) s" + apply (clarsimp simp: tcb_at_def) + apply (simp add: return_def thread_set_def gets_the_def assert_def + assert_opt_def simpler_gets_def set_object_def get_object_def + put_def get_def bind_def) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) + apply (simp add: map_upd_triv get_tcb_SomeD)+ + done + show ?thesis + apply (rule stronger_corres_guard_imp) + apply (subst corres_cong [OF refl refl S refl refl]) + defer + apply (subst bind_return [symmetric], + rule corres_underlying_split [OF threadset_corresT]) + apply (simp add: x) + apply simp + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) + apply (rule e) + apply (rule corres_noop [where P=\ and P'=\]) + apply simp + apply (rule no_fail_pre, wpsimp+)[1] + apply wpsimp+ + done +qed + +lemmas threadSet_corres_noop = + threadSet_corres_noopT [OF _ all_tcbI, OF _ ball_tcb_cte_casesI] + +lemma threadSet_corres_noop_splitT: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation tcb (fn tcb')" + assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (fn tcb) = getF tcb" + assumes z: "corres r P Q' m m'" + assumes w: "\P'\ threadSet fn t \\x. Q'\" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" + assumes e: "\tcb'. exst_same tcb' (fn tcb')" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct and P) P' + m (threadSet fn t >>= (\rv. m'))" + apply (rule corres_guard_imp) + apply (subst return_bind[symmetric]) + apply (rule corres_split_nor[OF threadSet_corres_noopT]) + apply (simp add: x) + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) + apply (rule e) + apply (rule z) + apply (wp w)+ + apply simp + apply simp + done + +lemmas threadSet_corres_noop_split = + threadSet_corres_noop_splitT [OF _ all_tcbI, OF _ ball_tcb_cte_casesI] + +lemma threadSet_tcb' [wp]: + "\tcb_at' t\ threadSet f t' \\rv. tcb_at' t\" + by (simp add: threadSet_def) wp + +lemma threadSet_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ threadSet f t \\rv s. P (ksSchedulerAction s)\" + unfolding threadSet_def + by (simp add: updateObject_default_def | wp setObject_nosch)+ + +(* The function "thread_set f p" updates a TCB at p using function f. + It should not be used to change capabilities, though. *) +lemma setObject_tcb_valid_objs: + "\valid_objs' and (tcb_at' t and valid_obj' (injectKO v))\ setObject t (v :: tcb) \\rv. valid_objs'\" + apply (rule setObject_valid_objs') + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setObject_tcb_at': + "\tcb_at' t'\ setObject t (v :: tcb) \\rv. tcb_at' t'\" + apply (rule obj_at_setObject1) + apply (clarsimp simp: updateObject_default_def return_def in_monad) + apply (simp add: objBits_simps) + done + +lemma setObject_sa_unchanged: + "\\s. P (ksSchedulerAction s)\ setObject t (v :: tcb) \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setObject_def split_def) + apply (wp | simp add: updateObject_default_def)+ + done + +lemma setObject_queues_unchanged: + assumes inv: "\P p q n obj. \P\ updateObject v obj p q n \\r. P\" + shows "\\s. P (ksReadyQueues s)\ setObject t v \\rv s. P (ksReadyQueues s)\" + apply (simp add: setObject_def split_def) + apply (wp inv | simp)+ + done + +lemma setObject_queues_unchanged_tcb[wp]: + "\\s. P (ksReadyQueues s)\ setObject t (v :: tcb) \\rv s. P (ksReadyQueues s)\" + apply (rule setObject_queues_unchanged) + apply (wp|simp add: updateObject_default_def)+ + done + +lemma setObject_queuesL1_unchanged_tcb[wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ setObject t (v :: tcb) \\rv s. P (ksReadyQueuesL1Bitmap s)\" + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma setObject_queuesL2_unchanged_tcb[wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ setObject t (v :: tcb) \\rv s. P (ksReadyQueuesL2Bitmap s)\" + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma setObject_tcb_ctes_of[wp]: + "\\s. P (ctes_of s) \ + obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t s\ + setObject t v + \\rv s. P (ctes_of s)\" + apply (rule setObject_ctes_of) + apply (clarsimp simp: updateObject_default_def in_monad prod_eq_iff + obj_at'_def objBits_simps' in_magnitude_check) + apply fastforce + apply (clarsimp simp: updateObject_default_def in_monad prod_eq_iff + obj_at'_def objBits_simps in_magnitude_check bind_def) + done + +lemma setObject_tcb_mdb' [wp]: + "\ valid_mdb' and + obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t\ + setObject t (v :: tcb) + \\rv. valid_mdb'\" + unfolding valid_mdb'_def pred_conj_def + by (rule setObject_tcb_ctes_of) + +lemma setObject_tcb_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (t := tcb_st_refs_of' (tcbState v) + \ tcb_bound_refs' (tcbBoundNotification v)))\ + setObject t (v :: tcb) \\rv s. P (state_refs_of' s)\" + by (wp setObject_state_refs_of', + simp_all add: objBits_simps' fun_upd_def) + +lemma setObject_tcb_iflive': + "\\s. if_live_then_nonz_cap' s \ + (live' (injectKO v) \ ex_nonz_cap_to' t s) + \ obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t s\ + setObject t (v :: tcb) + \\rv. if_live_then_nonz_cap'\" + apply (rule setObject_iflive') + apply (simp add: objBits_simps')+ + apply (clarsimp simp: updateObject_default_def in_monad obj_at'_def + in_magnitude_check objBits_simps' prod_eq_iff) + apply fastforce + apply (clarsimp simp: updateObject_default_def bind_def) + done + +lemma setObject_tcb_idle': + "\\s. valid_idle' s \ + (t = ksIdleThread s \ idle_tcb' v)\ + setObject t (v :: tcb) \\rv. valid_idle'\" + apply (rule hoare_pre) + apply (rule setObject_idle') + apply (simp add: objBits_simps')+ + apply (simp add: updateObject_default_inv) + apply (simp add: idle_tcb_ps_def) + done + +lemma setObject_tcb_irq_node'[wp]: + "\\s. P (irq_node' s)\ setObject t (v :: tcb) \\rv s. P (irq_node' s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_tcb_ifunsafe': + "\if_unsafe_then_cap' and obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t\ + setObject t (v :: tcb) \\rv. if_unsafe_then_cap'\" + unfolding pred_conj_def + apply (rule setObject_ifunsafe') + apply (clarsimp simp: updateObject_default_def in_monad obj_at'_def + in_magnitude_check objBits_simps' prod_eq_iff) + apply fastforce + apply (clarsimp simp: updateObject_default_def bind_def) + apply wp + done + +lemma setObject_tcb_arch' [wp]: + "\\s. P (ksArchState s)\ setObject t (v :: tcb) \\rv s. P (ksArchState s)\" + apply (simp add: setObject_def split_def updateObject_default_def) + apply wp + apply simp + done + +lemma setObject_tcb_valid_arch' [wp]: + "\valid_arch_state'\ setObject t (v :: tcb) \\rv. valid_arch_state'\" + by (wpsimp wp: valid_arch_state_lift' setObject_typ_at' setObject_ko_wp_at + simp: objBits_simps', rule refl; simp add: pred_conj_def) + (clarsimp simp: is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma setObject_tcb_refs' [wp]: + "\\s. P (global_refs' s)\ setObject t (v::tcb) \\rv s. P (global_refs' s)\" + apply (clarsimp simp: setObject_def split_def updateObject_default_def) + apply wp + apply (simp add: global_refs'_def) + done + +lemma setObject_tcb_valid_globals' [wp]: + "\valid_global_refs' and + obj_at' (\tcb. (\(getF, setF) \ ran tcb_cte_cases. getF tcb = getF v)) t\ + setObject t (v :: tcb) + \\rv. valid_global_refs'\" + unfolding pred_conj_def valid_global_refs'_def + apply (rule hoare_lift_Pf2 [where f="global_refs'"]) + apply (rule hoare_lift_Pf2 [where f="gsMaxObjectSize"]) + apply (rule setObject_ctes_of) + apply (clarsimp simp: updateObject_default_def in_monad obj_at'_def + in_magnitude_check objBits_simps' prod_eq_iff) + apply fastforce + apply (clarsimp simp: updateObject_default_def in_monad prod_eq_iff + obj_at'_def objBits_simps in_magnitude_check bind_def) + apply (wp | wp setObject_ksPSpace_only updateObject_default_inv | simp)+ + done + +lemma setObject_tcb_irq_states' [wp]: + "\valid_irq_states'\ setObject t (v :: tcb) \\rv. valid_irq_states'\" + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF setObject_ksInterrupt]) + apply (simp, rule updateObject_default_inv) + apply (rule hoare_use_eq [where f=ksMachineState, OF setObject_ksMachine]) + apply (simp, rule updateObject_default_inv) + apply wp + apply assumption + done + +lemma getObject_tcb_wp: + "\\s. tcb_at' p s \ (\t::tcb. ko_at' t p s \ Q t s)\ getObject p \Q\" + by (clarsimp simp: getObject_def valid_def in_monad split_def objBits_simps' + loadObject_default_def obj_at'_def in_magnitude_check) + +lemma setObject_tcb_pspace_no_overlap': + "\pspace_no_overlap' w s and tcb_at' t\ + setObject t (tcb::tcb) + \\rv. pspace_no_overlap' w s\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (clarsimp simp: obj_at'_def) + apply (erule (1) ps_clear_lookupAround2) + apply (rule order_refl) + apply (erule is_aligned_no_overflow) + apply simp + apply (clarsimp simp: updateObject_default_def in_monad objBits_simps in_magnitude_check) + apply (fastforce simp: pspace_no_overlap'_def objBits_simps) + done + +lemma threadSet_pspace_no_overlap' [wp]: + "\pspace_no_overlap' w s\ threadSet f t \\rv. pspace_no_overlap' w s\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_pspace_no_overlap' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadSet_global_refsT: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (F tcb) = getF tcb" + shows "\valid_global_refs'\ threadSet F t \\rv. valid_global_refs'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_valid_globals' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_global_refs[wp] = + threadSet_global_refsT [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_valid_pspace'T_P: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + assumes z: "\tcb. (P \ Q (tcbState tcb)) \ + (\s. valid_tcb_state' (tcbState tcb) s + \ valid_tcb_state' (tcbState (F tcb)) s)" + assumes v: "\tcb. (P \ Q' (tcbBoundNotification tcb)) \ + (\s. valid_bound_ntfn' (tcbBoundNotification tcb) s + \ valid_bound_ntfn' (tcbBoundNotification (F tcb)) s)" + assumes p: "\tcb. (P \ Q'' (tcbSchedPrev tcb)) \ + (\s. opt_tcb_at' (tcbSchedPrev tcb) s + \ opt_tcb_at' (tcbSchedPrev (F tcb)) s)" + assumes n: "\tcb. (P \ Q''' (tcbSchedNext tcb)) \ + (\s. opt_tcb_at' (tcbSchedNext tcb) s + \ opt_tcb_at' (tcbSchedNext (F tcb)) s)" + assumes y: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits + \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes u: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + assumes w: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + assumes w': "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes v': "\tcb s. valid_arch_tcb' (tcbArch tcb) s \ valid_arch_tcb' (tcbArch (F tcb)) s" + shows + "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s + \ obj_at' (\tcb. Q'' (tcbSchedPrev tcb)) t s + \ obj_at' (\tcb. Q''' (tcbSchedNext tcb)) t s)\ + threadSet F t + \\_. valid_pspace'\" + apply (simp add: valid_pspace'_def threadSet_def) + apply (rule hoare_pre, + wp setObject_tcb_valid_objs getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + apply (erule(1) valid_objsE') + apply (clarsimp simp add: valid_obj'_def valid_tcb'_def + bspec_split [OF spec [OF x]] z + split_paired_Ball y u w v w' v' p n) + done + +lemmas threadSet_valid_pspace'T = + threadSet_valid_pspace'T_P[where P=False, simplified] + +lemmas threadSet_valid_pspace' = + threadSet_valid_pspace'T [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_ifunsafe'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows "\if_unsafe_then_cap'\ threadSet F t \\rv. if_unsafe_then_cap'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_ifunsafe' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_ifunsafe' = + threadSet_ifunsafe'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_state_refs_of'_helper[simp]: + "{r. (r \ tcb_st_refs_of' ts \ + r \ tcb_bound_refs' ntfnptr) \ + snd r = TCBBound} = + tcb_bound_refs' ntfnptr" + by (auto simp: tcb_st_refs_of'_def tcb_bound_refs'_def + split: thread_state.splits) + +lemma threadSet_state_refs_of'_helper'[simp]: + "{r. (r \ tcb_st_refs_of' ts \ + r \ tcb_bound_refs' ntfnptr) \ + snd r \ TCBBound} = + tcb_st_refs_of' ts" + by (auto simp: tcb_st_refs_of'_def tcb_bound_refs'_def + split: thread_state.splits) + +lemma threadSet_state_refs_of'T_P: + assumes x: "\tcb. (P' \ Q (tcbState tcb)) \ + tcb_st_refs_of' (tcbState (F tcb)) + = f' (tcb_st_refs_of' (tcbState tcb))" + assumes y: "\tcb. (P' \ Q' (tcbBoundNotification tcb)) \ + tcb_bound_refs' (tcbBoundNotification (F tcb)) + = g' (tcb_bound_refs' (tcbBoundNotification tcb))" + shows + "\\s. P ((state_refs_of' s) (t := f' {r \ state_refs_of' s t. snd r \ TCBBound} + \ g' {r \ state_refs_of' s t. snd r = TCBBound})) + \ (P' \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s)\ + threadSet F t + \\rv s. P (state_refs_of' s)\" + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def + elim!: rsubst[where P=P] intro!: ext) + apply (cut_tac s=s and p=t and 'a=tcb in ko_at_state_refs_ofD') + apply (simp add: obj_at'_def) + apply (clarsimp simp: x y) + done + +lemmas threadSet_state_refs_of'T = + threadSet_state_refs_of'T_P [where P'=False, simplified] + +lemmas threadSet_state_refs_of' = + threadSet_state_refs_of'T [OF all_tcbI all_tcbI] + +lemma threadSet_state_hyp_refs_of': + assumes y: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows "\\s. P (state_hyp_refs_of' s)\ threadSet F t \\rv s. P (state_hyp_refs_of' s)\" + apply (simp add: threadSet_def) + apply (wpsimp wp: setObject_state_hyp_refs_of' getObject_tcb_wp + simp: objBits_simps' obj_at'_def state_hyp_refs_of'_def) + apply (clarsimp simp:objBits_simps' y state_hyp_refs_of'_def + elim!: rsubst[where P=P] intro!: ext)+ + done + +lemma threadSet_iflive'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows + "\\s. if_live_then_nonz_cap' s + \ ((\tcb. \ bound (tcbBoundNotification tcb) \ bound (tcbBoundNotification (F tcb)) + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. (tcbState tcb = Inactive \ tcbState tcb = IdleThreadState) + \ tcbState (F tcb) \ Inactive + \ tcbState (F tcb) \ IdleThreadState + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedNext tcb = None \ tcbSchedNext (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedPrev tcb = None \ tcbSchedPrev (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb) + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. \ bound (atcbVCPUPtr (tcbArch tcb)) \ bound (atcbVCPUPtr (tcbArch (F tcb))) + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s)\ + threadSet F t + \\rv. if_live_then_nonz_cap'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_iflive' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def live'_def hyp_live'_def) + apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric])+ + apply (rule conjI) + apply (rule impI, clarsimp) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ko_wp_at'_def live'_def hyp_live'_def) + apply (clarsimp simp: bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_iflive' = + threadSet_iflive'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_cte_wp_at'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (F tcb) = getF tcb" + shows "\\s. P' (cte_wp_at' P p s)\ threadSet F t \\rv s. P' (cte_wp_at' P p s)\" + apply (simp add: threadSet_def) + apply (rule bind_wp [where Q'="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) + apply (rename_tac tcb) + apply (rule setObject_cte_wp_at2') + apply (clarsimp simp: updateObject_default_def in_monad objBits_simps' + obj_at'_def in_magnitude_check prod_eq_iff) + apply (case_tac tcb, clarsimp simp: bspec_split [OF spec [OF x]]) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemmas threadSet_cte_wp_at' = + threadSet_cte_wp_at'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_ctes_ofT: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (F tcb) = getF tcb" + shows "\\s. P (ctes_of s)\ threadSet F t \\rv s. P (ctes_of s)\" + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + apply (case_tac obj) + apply (simp add: bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_ctes_of = + threadSet_ctes_ofT [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemmas threadSet_cap_to' = ex_nonz_cap_to_pres' [OF threadSet_cte_wp_at'] + +lemma threadSet_cap_to: + "(\tcb. \(getF, v)\ran tcb_cte_cases. getF (f tcb) = getF tcb) + \ threadSet f tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: hoare_vcg_ex_lift threadSet_cte_wp_at' + simp: ex_nonz_cap_to'_def tcb_cte_cases_def objBits_simps') + +lemma threadSet_idle'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows + "\\s. valid_idle' s + \ (t = ksIdleThread s \ + (\tcb. ko_at' tcb t s \ idle_tcb' tcb \ idle_tcb' (F tcb)))\ + threadSet F t + \\rv. valid_idle'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_idle' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def valid_idle'_def pred_tcb_at'_def) + done + +lemmas threadSet_idle' = + threadSet_idle'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma set_tcb_valid_bitmapQ[wp]: + "\ valid_bitmapQ \ setObject t (f tcb :: tcb) \\_. valid_bitmapQ \" + apply (rule setObject_tcb_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_tcb_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ setObject t (f tcb :: tcb) \\_. bitmapQ_no_L1_orphans \" + apply (rule setObject_tcb_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_tcb_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ setObject t (f tcb :: tcb) \\_. bitmapQ_no_L2_orphans \" + apply (rule setObject_tcb_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma threadSet_valid_bitmapQ[wp]: + "\ valid_bitmapQ \ threadSet f t \ \rv. valid_bitmapQ \" + unfolding bitmapQ_defs threadSet_def + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma threadSet_valid_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ threadSet f t \ \rv. bitmapQ_no_L1_orphans \" + unfolding bitmapQ_defs threadSet_def + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma threadSet_valid_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ threadSet f t \ \rv. bitmapQ_no_L2_orphans \" + unfolding bitmapQ_defs threadSet_def + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma threadSet_cur: + "\\s. cur_tcb' s\ threadSet f t \\rv s. cur_tcb' s\" + apply (simp add: threadSet_def cur_tcb'_def) + apply (wp hoare_lift_Pf [OF setObject_tcb_at'] setObject_ct_inv) + done + +lemma modifyReadyQueuesL1Bitmap_obj_at[wp]: + "\obj_at' P t\ modifyReadyQueuesL1Bitmap a b \\rv. obj_at' P t\" + apply (simp add: modifyReadyQueuesL1Bitmap_def getReadyQueuesL1Bitmap_def) + apply wp + apply (fastforce intro: obj_at'_pspaceI) + done + +crunches setThreadState, setBoundNotification + for valid_arch' [wp]: valid_arch_state' + (simp: unless_def crunch_simps wp: crunch_wps) + +crunch ksInterrupt'[wp]: threadSet "\s. P (ksInterruptState s)" + (wp: setObject_ksInterrupt updateObject_default_inv) + +crunch ksArchState[wp]: threadSet "\s. P (ksArchState s)" + +lemma threadSet_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ threadSet t F \\rv s. P (typ_at' T p s)\" + by (simp add: threadSet_def, wp setObject_typ_at') + +lemmas threadSet_typ_at_lifts[wp] = typ_at_lifts [OF threadSet_typ_at'] + +crunch irq_states' [wp]: threadSet valid_irq_states' + +crunch pspace_domain_valid [wp]: threadSet "pspace_domain_valid" + +lemma threadSet_obj_at'_really_strongest: + "\\s. tcb_at' t s \ obj_at' (\obj. if t = t' then P (f obj) else P obj) + t' s\ threadSet f t \\rv. obj_at' P t'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_strongest) + apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) + apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) + apply simp + apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) + apply (rule getObject_inv_tcb) + apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply simp + apply (simp add: objBits_simps') + apply (erule obj_at'_weakenE) + apply simp + apply (cases "t = t'", simp_all) + apply (rule OMG_getObject_tcb) + apply wp + done + +(* FIXME: move *) +lemma tcb_at_typ_at': + "tcb_at' p s = typ_at' TCBT p s" + unfolding typ_at'_def + apply (rule iffI) + apply (clarsimp simp add: obj_at'_def ko_wp_at'_def) + apply (clarsimp simp add: obj_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + done + +(* FIXME: move *) +lemma not_obj_at': + "(\obj_at' (\tcb::tcb. P tcb) t s) = (\typ_at' TCBT t s \ obj_at' (Not \ P) t s)" + apply (simp add: obj_at'_real_def typ_at'_def ko_wp_at'_def objBits_simps) + apply (rule iffI) + apply (clarsimp) + apply (case_tac ko) + apply (clarsimp)+ + done + +(* FIXME: move *) +lemma not_obj_at_elim': + assumes typat: "typ_at' TCBT t s" + and nobj: "\obj_at' (\tcb::tcb. P tcb) t s" + shows "obj_at' (Not \ P) t s" + using assms + apply - + apply (drule not_obj_at' [THEN iffD1]) + apply (clarsimp) + done + +(* FIXME: move *) +lemmas tcb_at_not_obj_at_elim' = not_obj_at_elim' [OF tcb_at_typ_at' [THEN iffD1]] + +(* FIXME: move *) +lemma lift_neg_pred_tcb_at': + assumes typat: "\P T p. \\s. P (typ_at' T p s)\ f \\_ s. P (typ_at' T p s)\" + and sttcb: "\S p. \pred_tcb_at' proj S p\ f \\_. pred_tcb_at' proj S p\" + shows "\\s. P (pred_tcb_at' proj S p s)\ f \\_ s. P (pred_tcb_at' proj S p s)\" + apply (rule_tac P=P in P_bool_lift) + apply (rule sttcb) + apply (simp add: pred_tcb_at'_def not_obj_at') + apply (wp hoare_convert_imp) + apply (rule typat) + prefer 2 + apply assumption + apply (rule hoare_chain [OF sttcb]) + apply (fastforce simp: pred_tcb_at'_def comp_def) + apply (clarsimp simp: pred_tcb_at'_def elim!: obj_at'_weakenE) + done + +lemma threadSet_obj_at'_strongish[wp]: + "\obj_at' (\obj. if t = t' then P (f obj) else P obj) t'\ + threadSet f t \\rv. obj_at' P t'\" + by (simp add: hoare_weaken_pre [OF threadSet_obj_at'_really_strongest]) + +lemma threadSet_pred_tcb_no_state: + assumes "\tcb. proj (tcb_to_itcb' (f tcb)) = proj (tcb_to_itcb' tcb)" + shows "\\s. P (pred_tcb_at' proj P' t' s)\ threadSet f t \\rv s. P (pred_tcb_at' proj P' t' s)\" +proof - + have pos: "\P' t' t. + \pred_tcb_at' proj P' t'\ threadSet f t \\rv. pred_tcb_at' proj P' t'\" + apply (simp add: pred_tcb_at'_def) + apply (wp threadSet_obj_at'_strongish) + apply clarsimp + apply (erule obj_at'_weakenE) + apply (insert assms) + apply clarsimp + done + show ?thesis + apply (rule_tac P=P in P_bool_lift) + apply (rule pos) + apply (rule_tac Q="\_ s. \ tcb_at' t' s \ pred_tcb_at' proj (\tcb. \ P' tcb) t' s" + in hoare_post_imp) + apply (erule disjE) + apply (clarsimp dest!: pred_tcb_at') + apply (clarsimp) + apply (frule_tac P=P' and Q="\tcb. \ P' tcb" in pred_tcb_at_conj') + apply (clarsimp)+ + apply (wp hoare_convert_imp) + apply (simp add: typ_at_tcb' [symmetric]) + apply (wp pos)+ + apply (clarsimp simp: pred_tcb_at'_def not_obj_at' elim!: obj_at'_weakenE) + done +qed + +lemma threadSet_ct[wp]: + "\\s. P (ksCurThread s)\ threadSet f t \\rv s. P (ksCurThread s)\" + apply (simp add: threadSet_def) + apply (wp setObject_ct_inv) + done + +lemma threadSet_cd[wp]: + "\\s. P (ksCurDomain s)\ threadSet f t \\rv s. P (ksCurDomain s)\" + apply (simp add: threadSet_def) + apply (wp setObject_cd_inv) + done + + +lemma threadSet_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ threadSet f t \\rv s. P (ksDomSchedule s)\" + apply (simp add: threadSet_def) + apply (wp setObject_ksDomSchedule_inv) + done + +lemma threadSet_it[wp]: + "\\s. P (ksIdleThread s)\ threadSet f t \\rv s. P (ksIdleThread s)\" + apply (simp add: threadSet_def) + apply (wp setObject_it_inv) + done + +lemma threadSet_sch_act: + "(\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb) \ + \\s. sch_act_wf (ksSchedulerAction s) s\ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift threadSet_pred_tcb_no_state | simp add: tcb_in_cur_domain'_def)+ + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish | simp)+ + done + +lemma threadSet_sch_actT_P: + assumes z: "\ P \ (\tcb. tcbState (F tcb) = tcbState tcb + \ tcbDomain (F tcb) = tcbDomain tcb)" + assumes z': "P \ (\tcb. tcbState (F tcb) = Inactive \ tcbDomain (F tcb) = tcbDomain tcb ) + \ (\st. Q st \ st = Inactive)" + shows "\\s. sch_act_wf (ksSchedulerAction s) s \ (P \ st_tcb_at' Q t s)\ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + using z z' + apply (case_tac P, simp_all add: threadSet_sch_act) + apply (clarsimp simp: valid_def) + apply (frule_tac P1="\sa. sch_act_wf sa s" + in use_valid [OF _ threadSet_nosch], assumption) + apply (frule_tac P1="(=) (ksCurThread s)" + in use_valid [OF _ threadSet_ct], rule refl) + apply (frule_tac P1="(=) (ksCurDomain s)" + in use_valid [OF _ threadSet_cd], rule refl) + apply (case_tac "ksSchedulerAction b", + simp_all add: ct_in_state'_def pred_tcb_at'_def) + apply (subgoal_tac "t \ ksCurThread s") + apply (drule_tac t'1="ksCurThread s" + and P1="activatable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def) + apply (clarsimp simp: o_def) + apply (fastforce simp: obj_at'_def) + apply (rename_tac word) + apply (subgoal_tac "t \ word") + apply (frule_tac t'1=word + and P1="runnable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def) + apply (rule conjI) + apply (clarsimp simp: o_def) + apply (clarsimp simp: tcb_in_cur_domain'_def) + apply (frule_tac t'1=word + and P1="\tcb. ksCurDomain b = tcbDomain tcb" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def)+ + apply (fastforce simp: obj_at'_def) + done + +lemma threadSet_ksMachine[wp]: + "\\s. P (ksMachineState s)\ threadSet F t \\_ s. P (ksMachineState s)\" + apply (simp add: threadSet_def) + by (wp setObject_ksMachine updateObject_default_inv | + simp)+ + +lemma threadSet_vms'[wp]: + "\valid_machine_state'\ threadSet F t \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + by (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) + +lemma threadSet_not_inQ: + "\ct_not_inQ and (\s. (\tcb. tcbQueued (F tcb) \ \ tcbQueued tcb) + \ ksSchedulerAction s = ResumeCurrentThread + \ t \ ksCurThread s)\ + threadSet F t \\_. ct_not_inQ\" + apply (simp add: threadSet_def ct_not_inQ_def) + apply (wp) + apply (rule hoare_convert_imp [OF setObject_nosch]) + apply (rule updateObject_tcb_inv) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest getObject_tcb_wp)+ + apply (case_tac "t = ksCurThread s") + apply (clarsimp simp: obj_at'_def)+ + done + +lemma threadSet_invs_trivial_helper[simp]: + "{r \ state_refs_of' s t. snd r \ TCBBound} + \ {r \ state_refs_of' s t. snd r = TCBBound} = state_refs_of' s t" + by auto + +lemma threadSet_ct_idle_or_in_cur_domain': + "(\tcb. tcbDomain (F tcb) = tcbDomain tcb) \ \ct_idle_or_in_cur_domain'\ threadSet F t \\_. ct_idle_or_in_cur_domain'\" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| simp)+ + done + +crunch ksDomScheduleIdx[wp]: threadSet "\s. P (ksDomScheduleIdx s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) +crunch gsUntypedZeroRanges[wp]: threadSet "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma setObject_tcb_ksDomScheduleIdx [wp]: + "\\s. P (ksDomScheduleIdx s) \ setObject t (v::tcb) \\_ s. P (ksDomScheduleIdx s)\" + apply (simp add:setObject_def updateObject_default_def in_monad) + apply wpsimp + done + +lemma threadSet_valid_dom_schedule': + "\ valid_dom_schedule'\ threadSet F t \\_. valid_dom_schedule'\" + unfolding threadSet_def + by (wp setObject_ksDomSchedule_inv hoare_Ball_helper) + +lemma threadSet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (s\ksPSpace := (ksPSpace s)(t \ injectKO (f tcb))\)\ + threadSet f t + \\_. P\" + unfolding threadSet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (auto simp: obj_at'_def split: if_splits) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: fun_upd_def) + apply (prop_tac "\ptr. psMap (ksPSpace s) ptr = ksPSpace s ptr") + apply fastforce + apply metis + done + +lemma threadSet_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb\ + \ threadSet F tcbPtr \\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (fastforce simp: opt_map_def obj_at'_def elim: rsubst2[where P=P]) + done + +lemma threadSet_valid_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb; + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tcbPtr \valid_sched_pointers\" + unfolding valid_sched_pointers_def + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + by (fastforce simp: opt_pred_def opt_map_def obj_at'_def split: option.splits if_splits) + +lemma threadSet_tcbSchedNexts_of: + "(\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb) \ + threadSet F t \\s. P (tcbSchedNexts_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def) + done + +lemma threadSet_tcbSchedPrevs_of: + "(\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb) \ + threadSet F t \\s. P (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def) + done + +lemma threadSet_tcbQueued: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + threadSet F t \\s. P (tcbQueued |< tcbs_of' s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_pred_def opt_map_def obj_at'_def) + done + +crunches threadSet + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + +lemma threadSet_invs_trivialT: + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits + \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows "threadSet F t \invs'\" + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbQueued + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of valid_bitmaps_lift + | clarsimp simp: assms cteCaps_of_def valid_arch_tcb'_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: assms obj_at'_def) + +lemmas threadSet_invs_trivial = + threadSet_invs_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma zobj_refs'_capRange: + "s \' cap \ zobj_refs' cap \ capRange cap" + apply (cases cap; simp add: valid_cap'_def capAligned_def capRange_def is_aligned_no_overflow) + apply (rename_tac aobj_cap) + apply (case_tac aobj_cap; clarsimp dest!: is_aligned_no_overflow) + done + +lemma global'_no_ex_cap: + "\valid_global_refs' s; valid_pspace' s\ \ \ ex_nonz_cap_to' (ksIdleThread s) s" + apply (clarsimp simp: ex_nonz_cap_to'_def valid_global_refs'_def valid_refs'_def2 valid_pspace'_def) + apply (drule cte_wp_at_norm', clarsimp) + apply (frule(1) cte_wp_at_valid_objs_valid_cap', clarsimp) + apply (clarsimp simp: cte_wp_at'_def dest!: zobj_refs'_capRange, blast) + done + +lemma getObject_tcb_sp: + "\P\ getObject r \\t::tcb. P and ko_at' t r\" + by (wp getObject_obj_at'; simp) + +lemma threadSet_valid_objs': + "\valid_objs' and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\rv. valid_objs'\" + apply (simp add: threadSet_def) + apply wp + prefer 2 + apply (rule getObject_tcb_sp) + apply (rule hoare_weaken_pre) + apply (rule setObject_tcb_valid_objs) + prefer 2 + apply assumption + apply (clarsimp simp: valid_obj'_def) + apply (frule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def) + apply (clarsimp elim!: obj_at'_weakenE) + done + +lemma atcbVCPUPtr_atcbContextSet_id[simp]: + "atcbVCPUPtr (atcbContextSet f (tcbArch tcb)) = atcbVCPUPtr (tcbArch tcb)" + by (simp add: atcbContextSet_def) + +lemmas typ_at'_valid_tcb'_lift = + typ_at'_valid_obj'_lift[where obj="KOTCB tcb" for tcb, unfolded valid_obj'_def, simplified] + +lemmas setObject_valid_tcb' = typ_at'_valid_tcb'_lift[OF setObject_typ_at'] + +lemma setObject_valid_tcbs': + assumes preserve_valid_tcb': "\s s' ko ko' x n tcb tcb'. + \ (ko', s') \ fst (updateObject val ko ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, ko), n); + projectKO_opt ko = Some tcb; projectKO_opt ko' = Some tcb'; + valid_tcb' tcb s \ \ valid_tcb' tcb' s" + shows "\valid_tcbs' and P\ setObject ptr val \\rv. valid_tcbs'\" + unfolding valid_tcbs'_def + apply (clarsimp simp: valid_def) + apply (rename_tac s s' ptr' tcb) + apply (prop_tac "\tcb'. valid_tcb' tcb s \ valid_tcb' tcb s'") + apply clarsimp + apply (erule (1) use_valid[OF _ setObject_valid_tcb']) + apply (drule spec, erule mp) + apply (clarsimp simp: setObject_def in_monad split_def lookupAround2_char1) + apply (rename_tac s ptr' new_tcb' ptr'' old_tcb_ko' s' f) + apply (case_tac "ptr'' = ptr'"; clarsimp) + apply (prop_tac "\old_tcb' :: tcb. projectKO_opt old_tcb_ko' = Some old_tcb'") + apply (frule updateObject_type) + apply (case_tac old_tcb_ko'; clarsimp simp: project_inject) + apply (erule exE) + apply (rule preserve_valid_tcb', assumption+) + apply (simp add: prod_eqI lookupAround2_char1) + apply force + apply (clarsimp simp: project_inject) + apply (clarsimp simp: project_inject) + done + +lemma setObject_tcb_valid_tcbs': + "\valid_tcbs' and (tcb_at' t and valid_tcb' v)\ setObject t (v :: tcb) \\rv. valid_tcbs'\" + apply (rule setObject_valid_tcbs') + apply (clarsimp simp: updateObject_default_def in_monad project_inject) + done + +lemma threadSet_valid_tcb': + "\valid_tcb' tcb and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcb' tcb\" + apply (simp add: threadSet_def) + apply (wpsimp wp: setObject_valid_tcb') + done + +lemma threadSet_valid_tcbs': + "\valid_tcbs' and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcbs'\" + apply (simp add: threadSet_def) + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (wpsimp wp: setObject_tcb_valid_tcbs') + apply (clarsimp simp: obj_at'_def valid_tcbs'_def) + done + +lemma asUser_valid_tcbs'[wp]: + "asUser t f \valid_tcbs'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_valid_tcbs' hoare_drop_imps + simp: valid_tcb'_def valid_arch_tcb'_def tcb_cte_cases_def objBits_simps') + done + +lemma asUser_corres': + assumes y: "corres_underlying Id False True r \ \ f g" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t f) (asUser t g)" +proof - + note arch_tcb_context_get_def[simp] + note atcbContextGet_def[simp] + note arch_tcb_context_set_def[simp] + note atcbContextSet_def[simp] + have L1: "corres (\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (gets_the (get_tcb t)) (threadGet (atcbContextGet o tcbArch) t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_gets_the) + apply (simp add: threadGet_def) + apply (rule corres_rel_imp [OF corres_get_tcb]) + apply (simp add: tcb_relation_def arch_tcb_relation_def) + apply (simp add: tcb_at_def)+ + done + have L2: "\tcb tcb' con con'. \ tcb_relation tcb tcb'; con = con'\ + \ tcb_relation (tcb \ tcb_arch := arch_tcb_context_set con (tcb_arch tcb) \) + (tcb' \ tcbArch := atcbContextSet con' (tcbArch tcb') \)" + by (simp add: tcb_relation_def arch_tcb_relation_def) + have L3: "\r add tcb tcb' con con'. \ r () (); con = con'\ \ + corres r (\s. get_tcb add s = Some tcb) + (\s'. (tcb', s') \ fst (getObject add s')) + (set_object add (TCB (tcb \ tcb_arch := arch_tcb_context_set con (tcb_arch tcb) \))) + (setObject add (tcb' \ tcbArch := atcbContextSet con' (tcbArch tcb') \))" + by (rule setObject_update_TCB_corres [OF L2], + (simp add: tcb_cte_cases_def tcb_cap_cases_def cteSizeBits_def exst_same_def)+) + have L4: "\con con'. con = con' \ + corres (\(irv, nc) (irv', nc'). r irv irv' \ nc = nc') + \ \ (select_f (f con)) (select_f (g con'))" + using y + by (fastforce simp: corres_underlying_def select_f_def split_def Id_def) + show ?thesis + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp add: as_user_def asUser_def) + apply (rule corres_guard_imp) + apply (rule_tac r'="\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con" + in corres_split) + apply simp + apply (rule L1[simplified]) + apply (rule corres_split[OF L4]) + apply simp + apply clarsimp + apply (rule corres_split_nor) + apply (simp add: threadSet_def) + apply (rule corres_symb_exec_r) + apply (rule L3[simplified]) + prefer 5 + apply (rule no_fail_pre_and, wp) + apply (wp select_f_inv | simp)+ + done +qed + +lemma asUser_corres: + assumes y: "corres_underlying Id False True r \ \ f g" + shows "corres r (tcb_at t and invs) (tcb_at' t and invs') (as_user t f) (asUser t g)" + apply (rule corres_guard_imp) + apply (rule asUser_corres' [OF y]) + apply (simp add: invs_def valid_state_def valid_pspace_def) + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + done + +lemma asUser_inv: + assumes x: "\P. \P\ f \\x. P\" + shows "\P\ asUser t f \\x. P\" +proof - + have P: "\a b input. (a, b) \ fst (f input) \ b = input" + by (rule use_valid [OF _ x], assumption, rule refl) + have R: "\x. tcbArch_update (\_. tcbArch x) x = x" + by (case_tac x, simp) + show ?thesis + apply (simp add: asUser_def split_def threadGet_def threadSet_def + liftM_def bind_assoc) + apply (clarsimp simp: valid_def in_monad getObject_def setObject_def + loadObject_default_def objBits_simps' + modify_def split_def updateObject_default_def + in_magnitude_check select_f_def + dest!: P) + apply (simp add: R map_upd_triv) + done +qed + +lemma asUser_getRegister_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (getRegister r)) (asUser t (getRegister r))" + apply (rule asUser_corres') + apply (clarsimp simp: getRegister_def) + done + +lemma user_getreg_inv'[wp]: + "\P\ asUser t (getRegister r) \\x. P\" + apply (rule asUser_inv) + apply (simp_all add: getRegister_def) + done + +lemma asUser_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ asUser t' f \\rv s. P (typ_at' T p s)\" + by (simp add: asUser_def bind_assoc split_def, wp select_f_inv) + +lemmas asUser_typ_ats[wp] = typ_at_lifts [OF asUser_typ_at'] + +lemma asUser_invs[wp]: + "\invs' and tcb_at' t\ asUser t m \\rv. invs'\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (wp threadSet_invs_trivial hoare_drop_imps | simp)+ + done + +lemma asUser_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ asUser t m \\rv s. P (ksSchedulerAction s)\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + done + +crunch aligned'[wp]: asUser pspace_aligned' + (simp: crunch_simps wp: crunch_wps) +crunch distinct'[wp]: asUser pspace_distinct' + (simp: crunch_simps wp: crunch_wps) + +lemma asUser_valid_objs [wp]: + "\valid_objs'\ asUser t f \\rv. valid_objs'\" + by (simp add: asUser_def split_def) + (wpsimp wp: threadSet_valid_objs' hoare_drop_imps + simp: valid_tcb'_def tcb_cte_cases_def valid_arch_tcb'_def cteSizeBits_def + atcbContextSet_def)+ + +lemma asUser_valid_pspace'[wp]: + "\valid_pspace'\ asUser t m \\rv. valid_pspace'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_valid_pspace' hoare_drop_imps + simp: atcbContextSet_def valid_arch_tcb'_def)+ + done + +lemma asUser_ifunsafe'[wp]: + "\if_unsafe_then_cap'\ asUser t m \\rv. if_unsafe_then_cap'\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_ifunsafe' hoare_drop_imps | simp)+ + done + +lemma asUser_st_refs_of'[wp]: + "\\s. P (state_refs_of' s)\ + asUser t m + \\rv s. P (state_refs_of' s)\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_state_refs_of' hoare_drop_imps | simp)+ + done + +lemma asUser_st_hyp_refs_of'[wp]: + "\\s. P (state_hyp_refs_of' s)\ + asUser t m + \\rv s. P (state_hyp_refs_of' s)\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_state_hyp_refs_of' hoare_drop_imps | simp add: atcbContextSet_def atcbVCPUPtr_atcbContext_update)+ + done + +lemma asUser_iflive'[wp]: + "\if_live_then_nonz_cap'\ asUser t m \\rv. if_live_then_nonz_cap'\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_iflive' hoare_drop_imps | clarsimp | auto)+ + done + +lemma asUser_cur_tcb[wp]: + "\cur_tcb'\ asUser t m \\rv. cur_tcb'\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_cur hoare_drop_imps | simp)+ + done + +lemma asUser_cte_wp_at'[wp]: + "\cte_wp_at' P p\ asUser t m \\rv. cte_wp_at' P p\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_cte_wp_at' hoare_drop_imps | simp)+ + done + +lemma asUser_cap_to'[wp]: + "\ex_nonz_cap_to' p\ asUser t m \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma asUser_pred_tcb_at' [wp]: + "\pred_tcb_at' proj P t\ asUser t' f \\_. pred_tcb_at' proj P t\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_pred_tcb_no_state) + apply (case_tac tcb) + apply (simp add: tcb_to_itcb'_def) + apply (wpsimp wp: select_f_inv)+ + done + +crunches asUser + for ct[wp]: "\s. P (ksCurThread s)" + and cur_domain[wp]: "\s. P (ksCurDomain s)" + (simp: crunch_simps wp: hoare_drop_imps getObject_inv_tcb setObject_ct_inv) + +lemma asUser_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ asUser t m \\_. tcb_in_cur_domain' t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp | wpc | simp)+ + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | simp)+ + apply (clarsimp simp: obj_at'_def) + done + +lemma asUser_tcbDomain_inv[wp]: + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ + done + +lemma asUser_tcbPriority_inv[wp]: + "\obj_at' (\tcb. P (tcbPriority tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbPriority tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ + done + +lemma asUser_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + asUser t m \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (wp sch_act_wf_lift) + +lemma asUser_idle'[wp]: + "\valid_idle'\ asUser t m \\rv. valid_idle'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_idle' select_f_inv) + done + +lemma no_fail_asUser [wp]: + "no_fail \ f \ no_fail (tcb_at' t) (asUser t f)" + apply (simp add: asUser_def split_def) + apply wp + apply (simp add: no_fail_def) + apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+ + done + +lemma asUser_setRegister_corres: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (setRegister r v)) + (asUser t (setRegister r v))" + apply (simp add: setRegister_def) + apply (rule asUser_corres') + apply (rule corres_modify'; simp) + done + +lemma getThreadState_corres: + "corres thread_state_relation (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_thread_state t) (getThreadState t)" + apply (simp add: get_thread_state_def getThreadState_def) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def) + done + +lemma gts_wf'[wp]: "\tcb_at' t and invs'\ getThreadState t \valid_tcb_state'\" + apply (simp add: getThreadState_def threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (frule ko_at_valid_objs', fastforce, simp) + apply (fastforce simp: valid_obj'_def valid_tcb'_def) + done + +lemma gts_st_tcb_at'[wp]: "\st_tcb_at' P t\ getThreadState t \\rv s. P rv\" + apply (simp add: getThreadState_def threadGet_def liftM_def) + apply wp + apply (rule hoare_chain) + apply (rule obj_at_getObject) + apply (clarsimp simp: loadObject_default_def in_monad) + apply assumption + apply simp + apply (simp add: pred_tcb_at'_def) + done + +lemma gts_inv'[wp]: "\P\ getThreadState t \\rv. P\" + by (simp add: getThreadState_def) wp + +lemma getBoundNotification_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_bound_notification t) (getBoundNotification t)" + apply (simp add: get_bound_notification_def getBoundNotification_def) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def) + done + +lemma gbn_bound_tcb_at'[wp]: "\bound_tcb_at' P t\ getBoundNotification t \\rv s. P rv\" + apply (simp add: getBoundNotification_def threadGet_def liftM_def) + apply wp + apply (rule hoare_strengthen_post) + apply (rule obj_at_getObject) + apply (clarsimp simp: loadObject_default_def in_monad) + apply simp + apply (simp add: pred_tcb_at'_def) + done + +lemma gbn_inv'[wp]: "\P\ getBoundNotification t \\rv. P\" + by (simp add: getBoundNotification_def) wp + +lemma isStopped_def2: + "isStopped t = liftM (Not \ activatable') (getThreadState t)" + apply (unfold isStopped_def fun_app_def) + apply (fold liftM_def) + apply (rule arg_cong [where f="\f. liftM f (getThreadState t)"]) + apply (rule ext) + apply (simp split: Structures_H.thread_state.split) + done + +lemma isRunnable_def2: + "isRunnable t = liftM runnable' (getThreadState t)" + apply (simp add: isRunnable_def isStopped_def2 liftM_def) + apply (rule bind_eqI, rule ext, rule arg_cong) + apply (case_tac state) + apply (clarsimp)+ + done + +lemma isStopped_inv[wp]: + "\P\ isStopped t \\rv. P\" + by (simp add: isStopped_def2 | wp gts_inv')+ + +lemma isRunnable_inv[wp]: + "\P\ isRunnable t \\rv. P\" + by (simp add: isRunnable_def2 | wp gts_inv')+ + +lemma isRunnable_wp[wp]: + "\\s. Q (st_tcb_at' (runnable') t s) s\ isRunnable t \Q\" + apply (simp add: isRunnable_def2) + apply (wpsimp simp: getThreadState_def threadGet_def wp: getObject_tcb_wp) + apply (clarsimp simp: getObject_def valid_def in_monad st_tcb_at'_def + loadObject_default_def obj_at'_def + split_def objBits_simps in_magnitude_check) + done + +lemma setQueue_obj_at[wp]: + "\obj_at' P t\ setQueue d p q \\rv. obj_at' P t\" + apply (simp add: setQueue_def) + apply wp + apply (fastforce intro: obj_at'_pspaceI) + done + +lemma setQueue_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + setQueue d p ts + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setQueue_def) + apply wp + apply simp + done + +lemma gq_wp[wp]: "\\s. Q (ksReadyQueues s (d, p)) s\ getQueue d p \Q\" + by (simp add: getQueue_def, wp) + +lemma no_fail_getQueue [wp]: + "no_fail \ (getQueue d p)" + by (simp add: getQueue_def) + +lemma no_fail_setQueue [wp]: + "no_fail \ (setQueue d p xs)" + by (simp add: setQueue_def) + +lemma in_magnitude_check': + "\ is_aligned x n; (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some y; ps = ksPSpace s \ + \ ((v, s') \ fst (magnitudeCheck x (snd (lookupAround2 x ps)) n s)) = + (s' = s \ ps_clear x n s)" + by (simp add: in_magnitude_check) + +lemma cdt_relation_trans_state[simp]: + "cdt_relation (swp cte_at (trans_state f s)) m m' = cdt_relation (swp cte_at s) m m'" + by (simp add: cdt_relation_def) + + +lemma getObject_obj_at_tcb: + "\obj_at' (\t. P t t) p\ getObject p \\t::tcb. obj_at' (P t) p\" + apply (wp getObject_tcb_wp) + apply (drule obj_at_ko_at') + apply clarsimp + apply (rule exI, rule conjI, assumption) + apply (erule obj_at'_weakenE) + apply simp + done + +lemma threadGet_obj_at': + "\obj_at' (\t. P (f t) t) t\ threadGet f t \\rv. obj_at' (P rv) t\" + by (simp add: threadGet_def o_def | wp getObject_obj_at_tcb)+ + +lemma fun_if_triv[simp]: + "(\x. if x = y then f y else f x) = f" + by (force) + +lemma corres_get_etcb: + "corres (etcb_relation) (is_etcb_at t) (tcb_at' t) + (gets_the (get_etcb t)) (getObject t)" + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp add: get_etcb_def gets_the_def gets_def + get_def assert_opt_def bind_def + return_def fail_def + split: option.splits + ) + apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) + apply (clarsimp simp add: is_etcb_at_def obj_at'_def projectKO_def + projectKO_opt_tcb split_def + getObject_def loadObject_default_def in_monad) + apply (case_tac bb) + apply (simp_all add: fail_def return_def) + apply (clarsimp simp add: state_relation_def ekheap_relation_def) + apply (drule bspec) + apply clarsimp + apply blast + apply (clarsimp simp add: other_obj_relation_def lookupAround2_known1) + done + + +lemma ethreadget_corres: + assumes x: "\etcb tcb'. etcb_relation etcb tcb' \ r (f etcb) (f' tcb')" + shows "corres r (is_etcb_at t) (tcb_at' t) (ethread_get f t) (threadGet f' t)" + apply (simp add: ethread_get_def threadGet_def) + apply (fold liftM_def) + apply simp + apply (rule corres_rel_imp) + apply (rule corres_get_etcb) + apply (simp add: x) + done + +lemma getQueue_corres: + "corres (\ls q. (ls = [] \ tcbQueueEmpty q) \ (ls \ [] \ tcbQueueHead q = Some (hd ls)) + \ queue_end_valid ls q) + \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" + apply (clarsimp simp: get_tcb_queue_def getQueue_def tcbQueueEmpty_def) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]) + apply (rule corres_symb_exec_r[OF _ gets_sp]) + apply clarsimp + apply (drule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x=qdom in spec) + apply (drule_tac x=prio in spec) + apply (fastforce dest: heap_path_head) + apply wpsimp+ + done + +lemma no_fail_return: + "no_fail x (return y)" + by wp + +lemma addToBitmap_noop_corres: + "corres dc \ \ (return ()) (addToBitmap d p)" + unfolding addToBitmap_def modifyReadyQueuesL1Bitmap_def getReadyQueuesL1Bitmap_def + modifyReadyQueuesL2Bitmap_def getReadyQueuesL2Bitmap_def + by (rule corres_noop) + (wp | simp add: state_relation_def | rule no_fail_pre)+ + +lemma addToBitmap_if_null_noop_corres: (* used this way in Haskell code *) + "corres dc \ \ (return ()) (if tcbQueueEmpty queue then addToBitmap d p else return ())" + by (cases "tcbQueueHead queue", simp_all add: addToBitmap_noop_corres) + +lemma removeFromBitmap_corres_noop: + "corres dc \ \ (return ()) (removeFromBitmap tdom prioa)" + unfolding removeFromBitmap_def + by (rule corres_noop) + (wp | simp add: bitmap_fun_defs state_relation_def | rule no_fail_pre)+ + +crunch typ_at'[wp]: addToBitmap "\s. P (typ_at' T p s)" + (wp: hoare_drop_imps setCTE_typ_at') + +crunch typ_at'[wp]: removeFromBitmap "\s. P (typ_at' T p s)" + (wp: hoare_drop_imps setCTE_typ_at') + +lemmas addToBitmap_typ_ats [wp] = typ_at_lifts [OF addToBitmap_typ_at'] +lemmas removeFromBitmap_typ_ats [wp] = typ_at_lifts [OF removeFromBitmap_typ_at'] + +lemma ekheap_relation_tcb_domain_priority: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s t = Some (tcb); + ksPSpace s' t = Some (KOTCB tcb')\ + \ tcbDomain tcb' = tcb_domain tcb \ tcbPriority tcb' = tcb_priority tcb" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=t in bspec, blast) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def) + done + +lemma no_fail_thread_get[wp]: + "no_fail (tcb_at tcb_ptr) (thread_get f tcb_ptr)" + unfolding thread_get_def + apply wpsimp + apply (clarsimp simp: tcb_at_def) + done + +lemma pspace_relation_tcb_relation: + "\pspace_relation (kheap s) (ksPSpace s'); kheap s ptr = Some (TCB tcb); + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ tcb_relation tcb tcb'" + apply (clarsimp simp: pspace_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: obj_at_def obj_at'_def tcb_relation_cut_def) + done + +lemma pspace_relation_update_concrete_tcb: + "\pspace_relation s s'; s ptr = Some (TCB tcb); s' ptr = Some (KOTCB otcb'); + tcb_relation tcb tcb'\ + \ pspace_relation s (s'(ptr \ KOTCB tcb'))" + by (fastforce dest: pspace_relation_update_tcbs simp: map_upd_triv) + +lemma threadSet_pspace_relation: + fixes s :: det_state + assumes tcb_rel: "(\tcb tcb'. tcb_relation tcb tcb' \ tcb_relation tcb (F tcb'))" + shows "threadSet F tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply normalise_obj_at' + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: obj_at_def is_tcb_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule pspace_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def) + apply (frule (1) pspace_relation_tcb_relation) + apply (fastforce simp: obj_at'_def) + apply (fastforce dest!: tcb_rel) + done + +lemma ekheap_relation_update_tcbs: + "\ ekheap_relation (ekheap s) (ksPSpace s'); ekheap s x = Some oetcb; + ksPSpace s' x = Some (KOTCB otcb'); etcb_relation etcb tcb' \ + \ ekheap_relation ((ekheap s)(x \ etcb)) ((ksPSpace s')(x \ KOTCB tcb'))" + by (simp add: ekheap_relation_def) + +lemma ekheap_relation_update_concrete_tcb: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB otcb'); + etcb_relation etcb tcb'\ + \ ekheap_relation (ekheap s) ((ksPSpace s')(ptr \ KOTCB tcb'))" + by (fastforce dest: ekheap_relation_update_tcbs simp: map_upd_triv) + +lemma ekheap_relation_etcb_relation: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ etcb_relation etcb tcb'" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: obj_at_def obj_at'_def) + done + +lemma threadSet_ekheap_relation: + fixes s :: det_state + assumes etcb_rel: "(\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation etcb (F tcb'))" + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet F tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_tcb_def is_etcb_at_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule ekheap_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def) + apply (frule (1) ekheap_relation_etcb_relation) + apply (fastforce simp: obj_at'_def) + apply (fastforce dest!: etcb_rel) + done + +lemma tcbQueued_update_pspace_relation[wp]: + fixes s :: det_state + shows "threadSet (tcbQueued_update f) tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueued_update_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet (tcbQueued_update f) tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_ekheap_relation simp: etcb_relation_def) + +lemma tcbQueueRemove_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueRemove queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueRemove_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueRemove queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_ekheap_relation threadSet_pspace_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma threadSet_ghost_relation[wp]: + "threadSet f tcbPtr \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s'))\" + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma removeFromBitmap_ghost_relation[wp]: + "removeFromBitmap tdom prio + \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s'))\" + by (rule_tac f=gsUserPages in hoare_lift_Pf2; wpsimp simp: bitmap_fun_defs) + +lemma tcbQueued_update_ctes_of[wp]: + "threadSet (tcbQueued_update f) t \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_of) + +lemma removeFromBitmap_ctes_of[wp]: + "removeFromBitmap tdom prio \\s. P (ctes_of s)\" + by (wpsimp simp: bitmap_fun_defs) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for ghost_relation_projs[wp]: "\s. P (gsUserPages s) (gsCNodes s) (gsPTTypes (ksArchState s))" + and ksArchState[wp]: "\s. P (ksArchState s)" + and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" + and ksDomainTime[wp]: "\s. P (ksDomainTime s)" + (wp: crunch_wps getObject_tcb_wp simp: setObject_def updateObject_default_def obj_at'_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for tcb_at'[wp]: "\s. tcb_at' tcbPtr s" + (wp: crunch_wps ignore: threadSet) + +lemma set_tcb_queue_projs: + "set_tcb_queue d p queue + \\s. P (kheap s) (cdt s) (is_original_cap s) (cur_thread s) (idle_thread s) (scheduler_action s) + (domain_list s) (domain_index s) (cur_domain s) (domain_time s) (machine_state s) + (interrupt_irq_node s) (interrupt_states s) (arch_state s) (caps_of_state s) + (work_units_completed s) (cdt_list s) (ekheap s)\" + by (wpsimp simp: set_tcb_queue_def) + +lemma set_tcb_queue_cte_at: + "set_tcb_queue d p queue \\s. P (swp cte_at s)\" + unfolding set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: swp_def cte_wp_at_def) + done + +lemma set_tcb_queue_projs_inv: + "fst (set_tcb_queue d p queue s) = {(r, s')} \ + kheap s = kheap s' + \ ekheap s = ekheap s' + \ cdt s = cdt s' + \ is_original_cap s = is_original_cap s' + \ cur_thread s = cur_thread s' + \ idle_thread s = idle_thread s' + \ scheduler_action s = scheduler_action s' + \ domain_list s = domain_list s' + \ domain_index s = domain_index s' + \ cur_domain s = cur_domain s' + \ domain_time s = domain_time s' + \ machine_state s = machine_state s' + \ interrupt_irq_node s = interrupt_irq_node s' + \ interrupt_states s = interrupt_states s' + \ arch_state s = arch_state s' + \ caps_of_state s = caps_of_state s' + \ work_units_completed s = work_units_completed s' + \ cdt_list s = cdt_list s' + \ swp cte_at s = swp cte_at s'" + apply (drule singleton_eqD) + by (auto elim!: use_valid_inv[where E=\, simplified] + intro: set_tcb_queue_projs set_tcb_queue_cte_at) + +lemma set_tcb_queue_new_state: + "(rv, t) \ fst (set_tcb_queue d p queue s) \ + t = s\ready_queues := \dom prio. if dom = d \ prio = p then queue else ready_queues s dom prio\" + by (clarsimp simp: set_tcb_queue_def in_monad) + +lemma tcbQueuePrepend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueuePrepend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueuePrepend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueuePrepend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueAppend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueAppend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueueAppend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueAppend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueInsert_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueInsert tcbPtr afterPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueInsert_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueInsert tcbPtr afterPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma removeFromBitmap_pspace_relation[wp]: + fixes s :: det_state + shows "removeFromBitmap tdom prio \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding bitmap_fun_defs + by wpsimp + +crunches setQueue, removeFromBitmap + for valid_pspace'[wp]: valid_pspace' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node'[wp]: "\s. P (irq_node' s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + and valid_irq_states'[wp]: valid_irq_states' + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and valid_machine_state'[wp]: valid_machine_state' + and cur_tcb'[wp]: cur_tcb' + and ksPSpace[wp]: "\s. P (ksPSpace s)" + (wp: crunch_wps + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def cur_tcb'_def threadSet_cur + bitmap_fun_defs valid_machine_state'_def) + +crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue, setQueue + for pspace_aligned'[wp]: pspace_aligned' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and pspace_distinct'[wp]: pspace_distinct' + and pspace_canonical'[wp]: pspace_canonical' + and no_0_obj'[wp]: no_0_obj' + and ksSchedulerAction[wp]: "\s. P (ksSchedulerAction s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node[wp]: "\s. P (irq_node' s)" + and typ_at[wp]: "\s. P (typ_at' T p s)" + and interrupt_state[wp]: "\s. P (ksInterruptState s)" + and valid_irq_state'[wp]: valid_irq_states' + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + (wp: crunch_wps threadSet_state_refs_of'[where f'=id and g'=id] + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def bitmap_fun_defs) + +lemma threadSet_ready_queues_relation: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + \\s'. ready_queues_relation s s' \ \ (tcbQueued |< tcbs_of' s') tcbPtr\ + threadSet F tcbPtr + \\_ s'. ready_queues_relation s s'\" + supply fun_upd_apply[simp del] + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: list_queue_relation_def obj_at'_def) + apply (rename_tac tcb' d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce intro: heap_path_heap_upd_not_in + simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (clarsimp simp: prev_queue_head_def) + apply (prop_tac "ready_queues s d p \ []", fastforce) + apply (fastforce dest: heap_path_head simp: inQ_def opt_pred_def opt_map_def fun_upd_apply) + apply (auto simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + done + +definition in_correct_ready_q_2 where + "in_correct_ready_q_2 queues ekh \ + \d p. \t \ set (queues d p). is_etcb_at' t ekh + \ etcb_at' (\t. tcb_priority t = p \ tcb_domain t = d) t ekh" + +abbreviation in_correct_ready_q :: "det_ext state \ bool" where + "in_correct_ready_q s \ in_correct_ready_q_2 (ready_queues s) (ekheap s)" + +lemmas in_correct_ready_q_def = in_correct_ready_q_2_def + +lemma in_correct_ready_q_lift: + assumes c: "\P. \\s. P (ekheap s)\ f \\rv s. P (ekheap s)\" + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \in_correct_ready_q\" + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +definition ready_qs_distinct :: "det_ext state \ bool" where + "ready_qs_distinct s \ \d p. distinct (ready_queues s d p)" + +lemma ready_qs_distinct_lift: + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \ready_qs_distinct\" + unfolding ready_qs_distinct_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +lemma ready_queues_disjoint: + "\in_correct_ready_q s; ready_qs_distinct s; d \ d' \ p \ p'\ + \ set (ready_queues s d p) \ set (ready_queues s d' p') = {}" + apply (clarsimp simp: ready_qs_distinct_def in_correct_ready_q_def) + apply (rule disjointI) + apply (frule_tac x=d in spec) + apply (drule_tac x=d' in spec) + apply (fastforce simp: etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma isRunnable_sp: + "\P\ + isRunnable tcb_ptr + \\rv s. \tcb'. ko_at' tcb' tcb_ptr s + \ (rv = (tcbState tcb' = Running \ tcbState tcb' = Restart)) + \ P s\" + unfolding isRunnable_def getThreadState_def + apply (wpsimp wp: hoare_case_option_wp getObject_tcb_wp simp: threadGet_def) + apply (fastforce simp: obj_at'_def split: Structures_H.thread_state.splits) + done + +crunch (no_fail) no_fail[wp]: isRunnable + +defs ksReadyQueues_asrt_def: + "ksReadyQueues_asrt + \ \s'. \d p. \ts. ready_queue_relation d p ts (ksReadyQueues s' (d, p)) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (inQ d p |< tcbs_of' s')" + +lemma ksReadyQueues_asrt_cross: + "ready_queues_relation s s' \ ksReadyQueues_asrt s'" + by (fastforce simp: ready_queues_relation_def Let_def ksReadyQueues_asrt_def) + +crunches addToBitmap + for ko_at'[wp]: "\s. P (ko_at' ko ptr s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueues_asrt[wp]: ksReadyQueues_asrt + and st_tcb_at'[wp]: "\s. P (st_tcb_at' Q tcbPtr s)" + and valid_tcbs'[wp]: valid_tcbs' + (simp: bitmap_fun_defs ksReadyQueues_asrt_def) + +lemma tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueHead queue))" + by (fastforce dest: heap_path_head + simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueHead queue)) s'" + by (fastforce dest!: tcbQueueHead_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma tcbQueueHead_iff_tcbQueueEnd: + "list_queue_relation ts q nexts prevs \ tcbQueueHead q \ None \ tcbQueueEnd q \ None" + apply (clarsimp simp: list_queue_relation_def queue_end_valid_def) + using heap_path_None + apply fastforce + done + +lemma tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueEnd queue))" + apply (frule tcbQueueHead_iff_tcbQueueEnd) + by (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueEnd queue)) s'" + by (fastforce dest!: tcbQueueEnd_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma thread_get_exs_valid[wp]: + "tcb_at tcb_ptr s \ \(=) s\ thread_get f tcb_ptr \\\_. (=) s\" + by (clarsimp simp: thread_get_def get_tcb_def gets_the_def gets_def return_def get_def + exs_valid_def tcb_at_def bind_def) + +lemma ethread_get_sp: + "\P\ ethread_get f ptr + \\rv. etcb_at (\tcb. f tcb = rv) ptr and P\" + apply wpsimp + apply (clarsimp simp: etcb_at_def split: option.splits) + done + +lemma ethread_get_exs_valid[wp]: + "\tcb_at tcb_ptr s; valid_etcbs s\ \ \(=) s\ ethread_get f tcb_ptr \\\_. (=) s\" + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: ethread_get_def get_etcb_def gets_the_def gets_def return_def get_def + is_etcb_at_def exs_valid_def bind_def) + done + +lemma no_fail_ethread_get[wp]: + "no_fail (tcb_at tcb_ptr and valid_etcbs) (ethread_get f tcb_ptr)" + unfolding ethread_get_def + apply wpsimp + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: is_etcb_at_def get_etcb_def) + done + +lemma threadGet_sp: + "\P\ threadGet f ptr \\rv s. \tcb :: tcb. ko_at' tcb ptr s \ f tcb = rv \ P s\" + unfolding threadGet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma in_set_ready_queues_inQ_eq: + "ready_queues_relation s s' \ t \ set (ready_queues s d p) \ (inQ d p |< tcbs_of' s') t" + by (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + +lemma in_ready_q_tcbQueued_eq: + "ready_queues_relation s s' + \ (\d p. t \ set (ready_queues s d p)) \ (tcbQueued |< tcbs_of' s') t" + apply (intro iffI) + apply clarsimp + apply (frule in_set_ready_queues_inQ_eq) + apply (fastforce simp: inQ_def opt_map_def opt_pred_def split: option.splits) + apply (fastforce simp: ready_queues_relation_def ready_queue_relation_def Let_def inQ_def + opt_pred_def + split: option.splits) + done + +lemma tcbSchedEnqueue_corres: + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_enqueue tcb_ptr) (tcbSchedEnqueue tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_enqueue_def get_tcb_queue_def + tcbSchedEnqueue_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce + apply clarsimp + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueuePrepend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueHead_ksReadyQueues simp: obj_at'_def) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp simp: setQueue_def tcbQueuePrepend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" and s'=s' + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply auto[1] + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" and st="tcbQueueHead (ksReadyQueues s' (d, p))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (cut_tac xs="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + and st="tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "\ (d = tcb_domain etcb \ p = tcb_priority etcb)") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; simp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + force simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply obj_at'_def split: if_splits) + apply (case_tac "t = the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def obj_at'_def fun_upd_apply + split: option.splits) + apply metis + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain etcb \ p = tcb_priority etcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def prev_queue_head_def + opt_map_red obj_at'_def + split: if_splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_prepend[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def obj_at'_def fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def split: if_splits) + by (auto dest!: hd_in_set simp: inQ_def in_opt_pred opt_map_def fun_upd_apply + split: if_splits option.splits) + +definition + weak_sch_act_wf :: "scheduler_action \ kernel_state \ bool" +where + "weak_sch_act_wf sa = (\s. \t. sa = SwitchToThread t \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s)" + +lemma weak_sch_act_wf_updateDomainTime[simp]: + "weak_sch_act_wf m (ksDomainTime_update f s) = weak_sch_act_wf m s" + by (simp add:weak_sch_act_wf_def tcb_in_cur_domain'_def ) + +lemma setSchedulerAction_corres: + "sched_act_relation sa sa' + \ corres dc \ \ (set_scheduler_action sa) (setSchedulerAction sa')" + apply (simp add: setSchedulerAction_def set_scheduler_action_def) + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp: in_monad simpler_modify_def state_relation_def) + done + +lemma getSchedulerAction_corres: + "corres sched_act_relation \ \ (gets scheduler_action) getSchedulerAction" + apply (simp add: getSchedulerAction_def) + apply (clarsimp simp: state_relation_def) + done + +lemma rescheduleRequired_corres: + "corres dc + (weak_valid_sched_action and in_correct_ready_q and ready_qs_distinct and valid_etcbs + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (reschedule_required) rescheduleRequired" + apply (simp add: rescheduleRequired_def reschedule_required_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule_tac P="case action of switch_thread t \ P t | _ \ \" + and P'="case actiona of SwitchToThread t \ P' t | _ \ \" for P P' + in corres_split[where r'=dc]) + apply (case_tac action) + apply simp + apply simp + apply (rule tcbSchedEnqueue_corres, simp) + apply simp + apply (rule setSchedulerAction_corres) + apply simp + apply (wp | wpc | simp)+ + apply (force dest: st_tcb_weakenE simp: in_monad weak_valid_sched_action_def valid_etcbs_def st_tcb_at_def obj_at_def is_tcb + split: Deterministic_A.scheduler_action.split) + apply (clarsimp split: scheduler_action.splits) + done + +lemma rescheduleRequired_corres_simple: + "corres dc \ sch_act_simple + (set_scheduler_action choose_new_thread) rescheduleRequired" + apply (simp add: rescheduleRequired_def) + apply (rule corres_symb_exec_r[where Q'="\rv s. rv = ResumeCurrentThread \ rv = ChooseNewThread"]) + apply (rule corres_symb_exec_r) + apply (rule setSchedulerAction_corres, simp) + apply (wp | clarsimp split: scheduler_action.split)+ + apply (wp | clarsimp simp: sch_act_simple_def split: scheduler_action.split)+ + apply (simp add: getSchedulerAction_def) + done + +lemma weak_sch_act_wf_lift: + assumes pre: "\P. \\s. P (sa s)\ f \\rv s. P (sa s)\" + "\t. \st_tcb_at' runnable' t\ f \\rv. st_tcb_at' runnable' t\" + "\t. \tcb_in_cur_domain' t\ f \\rv. tcb_in_cur_domain' t\" + shows "\\s. weak_sch_act_wf (sa s) s\ f \\rv s. weak_sch_act_wf (sa s) s\" + apply (simp only: weak_sch_act_wf_def imp_conv_disj) + apply (intro hoare_vcg_all_lift hoare_vcg_conj_lift hoare_vcg_disj_lift pre | simp)+ + done + +lemma asUser_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + asUser t m \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (wp weak_sch_act_wf_lift) + +lemma doMachineOp_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + doMachineOp m \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (simp add: doMachineOp_def split_def tcb_in_cur_domain'_def | wp weak_sch_act_wf_lift)+ + +lemma weak_sch_act_wf_setQueue[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s \ + setQueue qdom prio queue + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s \" + by (simp add: setQueue_def weak_sch_act_wf_def tcb_in_cur_domain'_def | wp)+ + +lemma threadSet_tcbDomain_triv: + assumes "\tcb. tcbDomain (f tcb) = tcbDomain tcb" + shows "\tcb_in_cur_domain' t'\ threadSet f t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | simp add: assms)+ + done + +lemmas threadSet_weak_sch_act_wf + = weak_sch_act_wf_lift[OF threadSet_nosch threadSet_pred_tcb_no_state threadSet_tcbDomain_triv, simplified] + +lemma removeFromBitmap_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ removeFromBitmap d p \\rv s. P (ksSchedulerAction s)\" + unfolding removeFromBitmap_def + by (simp add: bitmap_fun_defs|wp setObject_nosch)+ + +lemma addToBitmap_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ addToBitmap d p \\rv s. P (ksSchedulerAction s)\" + unfolding addToBitmap_def + by (simp add: bitmap_fun_defs|wp setObject_nosch)+ + +lemmas removeFromBitmap_weak_sch_act_wf[wp] + = weak_sch_act_wf_lift[OF removeFromBitmap_nosch] + +lemmas addToBitmap_weak_sch_act_wf[wp] + = weak_sch_act_wf_lift[OF addToBitmap_nosch] + +crunch st_tcb_at'[wp]: removeFromBitmap "st_tcb_at' P t" +crunch pred_tcb_at'[wp]: removeFromBitmap "\s. Q (pred_tcb_at' proj P t s)" + +crunch not_st_tcb_at'[wp]: removeFromBitmap "\s. \ (st_tcb_at' P' t) s" + +crunch st_tcb_at'[wp]: addToBitmap "st_tcb_at' P' t" +crunch pred_tcb_at'[wp]: addToBitmap "\s. Q (pred_tcb_at' proj P t s)" + +crunch not_st_tcb_at'[wp]: addToBitmap "\s. \ (st_tcb_at' P' t) s" + +crunch obj_at'[wp]: removeFromBitmap "\s. Q (obj_at' P t s)" + +crunch obj_at'[wp]: addToBitmap "\s. Q (obj_at' P t s)" + +lemma removeFromBitmap_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ removeFromBitmap tdom prio \\ya. tcb_in_cur_domain' t\" + unfolding tcb_in_cur_domain'_def removeFromBitmap_def + apply (rule_tac f="\s. ksCurDomain s" in hoare_lift_Pf) + apply (wp setObject_cte_obj_at_tcb' | simp add: bitmap_fun_defs)+ + done + +lemma addToBitmap_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ addToBitmap tdom prio \\ya. tcb_in_cur_domain' t\" + unfolding tcb_in_cur_domain'_def addToBitmap_def + apply (rule_tac f="\s. ksCurDomain s" in hoare_lift_Pf) + apply (wp setObject_cte_obj_at_tcb' | simp add: bitmap_fun_defs)+ + done + +lemma tcbSchedDequeue_weak_sch_act_wf[wp]: + "tcbSchedDequeue tcbPtr \\s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wp threadSet_weak_sch_act_wf getObject_tcb_wp removeFromBitmap_weak_sch_act_wf + | simp add: crunch_simps threadGet_def)+ + apply (clarsimp simp: obj_at'_def) + done + +lemma dequeue_nothing_eq[simp]: + "t \ set list \ tcb_sched_dequeue t list = list" + apply (clarsimp simp: tcb_sched_dequeue_def) + apply (induct list) + apply simp + apply clarsimp + done + +lemma gets_the_exec: "f s \ None \ (do x \ gets_the f; g x od) s = g (the (f s)) s" + apply (clarsimp simp add: gets_the_def bind_def gets_def get_def + return_def assert_opt_def) + done + +lemma tcbQueueRemove_no_fail: + "no_fail (\s. tcb_at' tcbPtr s + \ (\ts. list_queue_relation ts queue (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ sym_heap_sched_pointers s \ valid_objs' s) + (tcbQueueRemove queue tcbPtr)" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (frule (1) ko_at_valid_objs') + apply fastforce + apply (clarsimp simp: list_queue_relation_def) + apply (prop_tac "tcbQueueHead queue \ Some tcbPtr \ tcbSchedPrevs_of s tcbPtr \ None") + apply (rule impI) + apply (frule not_head_prev_not_None[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (fastforce dest: heap_path_head) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def valid_tcb'_def valid_bound_tcb'_def) + by (fastforce dest!: not_last_next_not_None[where p=tcbPtr] + simp: queue_end_valid_def opt_map_def obj_at'_def valid_obj'_def valid_tcb'_def) + +crunch (no_fail) no_fail[wp]: removeFromBitmap + +crunches removeFromBitmap + for ready_queues_relation[wp]: "ready_queues_relation s" + and list_queue_relation[wp]: + "\s'. list_queue_relation ts (P (ksReadyQueues s')) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s')" + (simp: bitmap_fun_defs ready_queues_relation_def) + +\ \ + A direct analogue of tcbQueueRemove, used in tcb_sched_dequeue' below, so that within the proof of + tcbQueueRemove_corres, we may reason in terms of the list operations used within this function + rather than @{term filter}.\ +definition tcb_queue_remove :: "'a \ 'a list \ 'a list" where + "tcb_queue_remove a ls \ + if ls = [a] + then [] + else if a = hd ls + then tl ls + else if a = last ls + then butlast ls + else list_remove ls a" + +definition tcb_sched_dequeue' :: "obj_ref \ unit det_ext_monad" where + "tcb_sched_dequeue' tcb_ptr \ do + d \ ethread_get tcb_domain tcb_ptr; + prio \ ethread_get tcb_priority tcb_ptr; + queue \ get_tcb_queue d prio; + when (tcb_ptr \ set queue) $ set_tcb_queue d prio (tcb_queue_remove tcb_ptr queue) + od" + +lemma filter_tcb_queue_remove: + "\a \ set ls; distinct ls \ \ filter ((\) a) ls = tcb_queue_remove a ls" + apply (clarsimp simp: tcb_queue_remove_def) + apply (intro conjI impI) + apply (fastforce elim: filter_hd_equals_tl) + apply (fastforce elim: filter_last_equals_butlast) + apply (fastforce elim: filter_hd_equals_tl) + apply (frule split_list) + apply (clarsimp simp: list_remove_middle_distinct) + apply (subst filter_True | clarsimp simp: list_remove_none)+ + done + +lemma tcb_sched_dequeue_monadic_rewrite: + "monadic_rewrite False True (is_etcb_at t and (\s. \d p. distinct (ready_queues s d p))) + (tcb_sched_action tcb_sched_dequeue t) (tcb_sched_dequeue' t)" + supply if_split[split del] + apply (clarsimp simp: tcb_sched_dequeue'_def tcb_sched_dequeue_def tcb_sched_action_def + set_tcb_queue_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (clarsimp simp: when_def) + apply (rule monadic_rewrite_if_r) + apply (rule_tac P="\_. distinct queue" in monadic_rewrite_guard_arg_cong) + apply (frule (1) filter_tcb_queue_remove) + apply (metis (mono_tags, lifting) filter_cong) + apply (rule monadic_rewrite_modify_noop) + apply (wpsimp wp: thread_get_wp)+ + apply (clarsimp simp: etcb_at_def split: option.splits) + apply (prop_tac "(\d' p. if d' = tcb_domain x2 \ p = tcb_priority x2 + then filter (\x. x \ t) (ready_queues s (tcb_domain x2) (tcb_priority x2)) + else ready_queues s d' p) + = ready_queues s") + apply (subst filter_True) + apply fastforce + apply (clarsimp intro!: ext split: if_splits) + apply fastforce + done + +crunches removeFromBitmap + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + +lemma list_queue_relation_neighbour_in_set: + "\list_queue_relation ls q hp hp'; sym_heap hp hp'; p \ set ls\ + \ \nbr. (hp p = Some nbr \ nbr \ set ls) \ (hp' p = Some nbr \ nbr \ set ls)" + apply (rule heap_ls_neighbour_in_set) + apply (fastforce simp: list_queue_relation_def) + apply fastforce + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def) + apply fastforce + done + +lemma in_queue_not_head_or_not_tail_length_gt_1: + "\tcbPtr \ set ls; tcbQueueHead q \ Some tcbPtr \ tcbQueueEnd q \ Some tcbPtr; + list_queue_relation ls q nexts prevs\ + \ Suc 0 < length ls" + apply (clarsimp simp: list_queue_relation_def) + apply (cases ls; fastforce simp: queue_end_valid_def) + done + +lemma tcbSchedDequeue_corres: + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and tcb_at tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_objs') + (tcb_sched_action tcb_sched_dequeue tcb_ptr) (tcbSchedDequeue tcbPtr)" + supply heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + list_remove_append[simp del] + apply (rule_tac Q'="tcb_at' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: tcb_at_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (rule monadic_rewrite_guard_imp[OF tcb_sched_dequeue_monadic_rewrite]) + apply (fastforce dest: tcb_at_is_etcb_at simp: in_correct_ready_q_def ready_qs_distinct_def) + apply (clarsimp simp: tcb_sched_dequeue'_def get_tcb_queue_def tcbSchedDequeue_def getQueue_def + unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac dom) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac prio) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_if_strong'; fastforce?) + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + apply (fastforce simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; wpsimp?) + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp wp: tcbQueueRemove_no_fail) + apply (fastforce dest: state_relation_ready_queues_relation + simp: ex_abs_underlying_def ready_queues_relation_def ready_queue_relation_def + Let_def inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp + simp: setQueue_def tcbQueueRemove_def + split_del: if_split) + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply normalise_obj_at' + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply clarsimp + apply (cut_tac p=tcbPtr and ls="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_neighbour_in_set) + apply (fastforce dest!: spec) + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: ready_queues_relation_def Let_def list_queue_relation_def) + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply fast + apply (clarsimp simp: tcbQueueEmpty_def) + apply (prop_tac "Some tcbPtr \ tcbQueueHead (ksReadyQueues s' (d, p))") + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply blast + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI; clarsimp) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (force intro!: heap_path_heap_upd_not_in simp: fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_heap_upd fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (force simp: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply assumption + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply split: if_splits) + apply (force simp: not_emptyI opt_map_red) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_def fun_upd_apply opt_map_red opt_map_upd_triv) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply (force simp: not_emptyI opt_map_red) + apply fastforce + apply (clarsimp simp: opt_map_red opt_map_upd_triv) + apply (intro prev_queue_head_heap_upd) + apply (force dest!: spec) + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply fastforce + subgoal + by (clarsimp simp: inQ_def opt_map_def opt_pred_def fun_upd_apply + split: if_splits option.splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (frule heap_path_head') + apply (frule heap_ls_distinct) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply prev_queue_head_def) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: tcb_queue_remove_def inQ_def opt_pred_def fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fastforce + apply (fastforce simp: list_queue_relation_def) + apply (frule list_not_head) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_tail_nonempty) + apply (frule (2) heap_ls_next_of_hd) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI allI) + apply (drule (1) heap_ls_remove_head_not_singleton) + apply (clarsimp simp: opt_map_red opt_map_upd_triv fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply last_tl) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fast + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: queue_end_valid_def) + apply (frule list_not_last) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_gt_1_imp_butlast_nonempty) + apply (frule (3) heap_ls_prev_of_last) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI; clarsimp?) + apply (drule (1) heap_ls_remove_last_not_singleton) + apply (force elim!: rsubst3[where P=heap_ls] simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (meson distinct_in_butlast_not_last in_set_butlastD last_in_set not_last_in_set_butlast) + + \ \tcbPtr is in the middle of the ready queue\ + apply (clarsimp simp: obj_at'_def) + apply (frule set_list_mem_nonempty) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []", fastforce simp: queue_end_valid_def) + apply clarsimp + apply (frule (2) ptr_in_middle_prev_next) + apply fastforce + apply (clarsimp simp: tcb_queue_remove_def) + apply (prop_tac "tcbPtr \ last xs") + apply (clarsimp simp: distinct_append) + apply (prop_tac "tcbPtr \ hd ys") + apply (fastforce dest: hd_in_set simp: distinct_append) + apply (prop_tac "last xs \ hd ys") + apply (metis distinct_decompose2 hd_Cons_tl last_in_set) + apply (prop_tac "list_remove (xs @ tcbPtr # ys) tcbPtr = xs @ ys") + apply (simp add: list_remove_middle_distinct del: list_remove_append) + apply (intro conjI impI allI; (solves \clarsimp simp: distinct_append\)?) + apply (fastforce elim!: rsubst3[where P=heap_ls] + dest!: heap_ls_remove_middle hd_in_set last_in_set + simp: distinct_append not_emptyI opt_map_def fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (case_tac xs; + fastforce simp: prev_queue_head_def opt_map_def fun_upd_apply distinct_append) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply distinct_append + split: option.splits) + done + +lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur_ts) od = + do t \ (thread_get (\tcb. test (tcb_state tcb)) cur); g t od" + apply (simp add: get_thread_state_def thread_get_def) + done + +lemma thread_get_isRunnable_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" + apply (simp add: isRunnable_def getThreadState_def threadGet_def + thread_get_def) + apply (fold liftM_def) + apply simp + apply (rule corres_rel_imp) + apply (rule getObject_TCB_corres) + apply (clarsimp simp add: tcb_relation_def thread_state_relation_def) + apply (case_tac "tcb_state x",simp_all) + done + +lemma setThreadState_corres: + "thread_state_relation ts ts' \ + corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (set_thread_state t ts) (setThreadState ts' t)" + (is "?tsr \ corres dc ?Pre ?Pre' ?sts ?sts'") + apply (simp add: set_thread_state_def setThreadState_def) + apply (simp add: set_thread_state_ext_def[abs_def]) + apply (subst bind_assoc[symmetric], subst thread_set_def[simplified, symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'=dc]) + apply (rule threadset_corres, (simp add: tcb_relation_def exst_same_def)+) + apply (subst thread_get_test[where test="runnable"]) + apply (rule corres_split[OF thread_get_isRunnable_corres]) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (simp only: when_def) + apply (rule corres_if[where Q=\ and Q'=\]) + apply (rule iffI) + apply clarsimp+ + apply (case_tac rva,simp_all)[1] + apply (wp rescheduleRequired_corres_simple corres_return_trivial | simp)+ + apply (wp hoare_vcg_conj_lift[where Q'="\\"] | simp add: sch_act_simple_def)+ + done + +lemma setBoundNotification_corres: + "corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (set_bound_notification t ntfn) (setBoundNotification ntfn t)" + apply (simp add: set_bound_notification_def setBoundNotification_def) + apply (subst thread_set_def[simplified, symmetric]) + apply (rule threadset_corres, simp_all add:tcb_relation_def exst_same_def) + done + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for tcb'[wp]: "tcb_at' addr" + +lemma tcbSchedNext_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedNext_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedPrev_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueuePrepend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift' simp: tcbQueueEmpty_def) + +crunches addToBitmap + for valid_objs'[wp]: valid_objs' + (simp: unless_def crunch_simps wp: crunch_wps) + +lemma tcbSchedEnqueue_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_objs'\" + unfolding tcbSchedEnqueue_def setQueue_def + apply (wpsimp wp: threadSet_valid_objs' getObject_tcb_wp simp: threadGet_def) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +crunches rescheduleRequired, removeFromBitmap + for valid_objs'[wp]: valid_objs' + (simp: crunch_simps) + +lemmas ko_at_valid_objs'_pre = + ko_at_valid_objs'[simplified project_inject, atomized, simplified, rule_format] + +lemmas ep_ko_at_valid_objs_valid_ep' = + ko_at_valid_objs'_pre[where 'a=endpoint, simplified injectKO_defs valid_obj'_def, simplified] + +lemmas ntfn_ko_at_valid_objs_valid_ntfn' = + ko_at_valid_objs'_pre[where 'a=notification, simplified injectKO_defs valid_obj'_def, + simplified] + +lemmas tcb_ko_at_valid_objs_valid_tcb' = + ko_at_valid_objs'_pre[where 'a=tcb, simplified injectKO_defs valid_obj'_def, simplified] + +lemma tcbQueueRemove_valid_objs'[wp]: + "tcbQueueRemove queue tcbPtr \valid_objs'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (fastforce dest!: tcb_ko_at_valid_objs_valid_tcb' + simp: valid_tcb'_def valid_bound_tcb'_def obj_at'_def) + done + +lemma tcbSchedDequeue_valid_objs'[wp]: + "tcbSchedDequeue t \valid_objs'\" + unfolding tcbSchedDequeue_def setQueue_def + by (wpsimp wp: threadSet_valid_objs') + +lemma sts_valid_objs': + "\valid_objs' and valid_tcb_state' st and pspace_aligned' and pspace_distinct'\ + setThreadState st t + \\_. valid_objs'\" + apply (wpsimp simp: setThreadState_def wp: threadSet_valid_objs') + apply (rule_tac Q="\_. valid_objs' and pspace_aligned' and pspace_distinct'" in hoare_post_imp) + apply fastforce + apply (wpsimp wp: threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma sbn_valid_objs': + "\valid_objs' and valid_bound_ntfn' ntfn\ + setBoundNotification ntfn t + \\rv. valid_objs'\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma ssa_wp[wp]: + "\\s. P (s \ksSchedulerAction := sa\)\ setSchedulerAction sa \\_. P\" + by (wpsimp simp: setSchedulerAction_def) + +crunches rescheduleRequired, tcbSchedDequeue + for aligned'[wp]: "pspace_aligned'" + and distinct'[wp]: "pspace_distinct'" + and ctes_of[wp]: "\s. P (ctes_of s)" + and no_0_obj'[wp]: "no_0_obj'" + and pspace_canonical'[wp]: pspace_canonical' + +lemma sts'_valid_pspace'_inv[wp]: + "\ valid_pspace' and tcb_at' t and valid_tcb_state' st \ + setThreadState st t + \ \rv. valid_pspace' \" + apply (simp add: valid_pspace'_def) + apply (rule hoare_pre) + apply (wp sts_valid_objs') + apply (simp add: setThreadState_def threadSet_def + setQueue_def bind_assoc valid_mdb'_def) + apply (wp getObject_obj_at_tcb | simp)+ + apply (clarsimp simp: valid_mdb'_def) + apply (drule obj_at_ko_at') + apply clarsimp + apply (erule obj_at'_weakenE) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + done + +crunch ct[wp]: setQueue "\s. P (ksCurThread s)" + +crunch cur_domain[wp]: setQueue "\s. P (ksCurDomain s)" + +crunch ct'[wp]: addToBitmap "\s. P (ksCurThread s)" +crunch ct'[wp]: removeFromBitmap "\s. P (ksCurThread s)" + +lemma setQueue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ setQueue d p xs \\_. tcb_in_cur_domain' t\" + apply (simp add: setQueue_def tcb_in_cur_domain'_def) + apply wp + apply (simp add: ps_clear_def obj_at'_def) + done + +lemma sbn'_valid_pspace'_inv[wp]: + "\ valid_pspace' and tcb_at' t and valid_bound_ntfn' ntfn \ + setBoundNotification ntfn t + \ \rv. valid_pspace' \" + apply (simp add: valid_pspace'_def) + apply (rule hoare_pre) + apply (wp sbn_valid_objs') + apply (simp add: setBoundNotification_def threadSet_def bind_assoc valid_mdb'_def) + apply (wp getObject_obj_at_tcb | simp)+ + apply (clarsimp simp: valid_mdb'_def) + apply (drule obj_at_ko_at') + apply clarsimp + apply (erule obj_at'_weakenE) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + done + +crunch pred_tcb_at'[wp]: setQueue "\s. P (pred_tcb_at' proj P' t s)" + +lemma setQueue_sch_act: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setQueue d p xs + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (wp sch_act_wf_lift) + +lemma setQueue_valid_bitmapQ_except[wp]: + "\ valid_bitmapQ_except d p \ + setQueue d p ts + \\_. valid_bitmapQ_except d p \" + unfolding setQueue_def bitmapQ_defs + by (wp, clarsimp simp: bitmapQ_def) + +lemma setQueue_cur: + "\\s. cur_tcb' s\ setQueue d p ts \\rv s. cur_tcb' s\" + unfolding setQueue_def cur_tcb'_def + by (wp, clarsimp) + +lemma ssa_sch_act[wp]: + "\sch_act_wf sa\ setSchedulerAction sa + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (simp add: setSchedulerAction_def | wp)+ + +lemma threadSet_runnable_sch_act: + "(\tcb. runnable' (tcbState (F tcb)) \ tcbDomain (F tcb) = tcbDomain tcb \ tcbPriority (F tcb) = tcbPriority tcb) \ + \\s. sch_act_wf (ksSchedulerAction s) s\ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (frule_tac P1="(=) (ksSchedulerAction s)" + in use_valid [OF _ threadSet_nosch], + rule refl) + apply (frule_tac P1="(=) (ksCurThread s)" + in use_valid [OF _ threadSet_ct], + rule refl) + apply (frule_tac P1="(=) (ksCurDomain s)" + in use_valid [OF _ threadSet_cd], + rule refl) + apply (case_tac "ksSchedulerAction b", + simp_all add: sch_act_simple_def ct_in_state'_def pred_tcb_at'_def) + apply (drule_tac t'1="ksCurThread s" + and P1="activatable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp elim!: obj_at'_weakenE) + apply (simp add: o_def) + apply (rename_tac word) + apply (rule conjI) + apply (frule_tac t'1=word + and P1="runnable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp elim!: obj_at'_weakenE, clarsimp simp: obj_at'_def) + apply (simp add: tcb_in_cur_domain'_def) + apply (frule_tac t'1=word + and P1="\tcb. ksCurDomain b = tcbDomain tcb" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def tcb_in_cur_domain'_def) + apply clarsimp + done + +lemma threadSet_pred_tcb_at_state: + "\\s. tcb_at' t s \ (if p = t + then obj_at' (\tcb. P (proj (tcb_to_itcb' (f tcb)))) t s + else pred_tcb_at' proj P p s)\ + threadSet f t \\_. pred_tcb_at' proj P p\" + apply (rule hoare_chain) + apply (rule threadSet_obj_at'_really_strongest) + prefer 2 + apply (simp add: pred_tcb_at'_def) + apply (clarsimp split: if_splits simp: pred_tcb_at'_def o_def) + done + +lemma threadSet_tcbDomain_triv': + "\tcb_in_cur_domain' t' and K (t \ t')\ threadSet f t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_assume_pre) + apply simp + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | simp)+ + done + +lemma threadSet_sch_act_wf: + "\\s. sch_act_wf (ksSchedulerAction s) s \ sch_act_not t s \ + (ksCurThread s = t \ \(\tcb. activatable' (tcbState (F tcb))) \ + ksSchedulerAction s \ ResumeCurrentThread) \ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (rule hoare_lift_Pf2 [where f=ksSchedulerAction]) + prefer 2 + apply wp + apply (case_tac x, simp_all) + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf2 [where f=ksCurThread]) + prefer 2 + apply wp[1] + apply (wp threadSet_pred_tcb_at_state) + apply clarsimp + apply wp + apply (clarsimp) + apply (wp threadSet_pred_tcb_at_state threadSet_tcbDomain_triv' | clarsimp)+ + done + +lemma rescheduleRequired_sch_act'[wp]: + "\\\ + rescheduleRequired + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp)+ + done + +lemma setObject_queued_pred_tcb_at'[wp]: + "\pred_tcb_at' proj P t' and obj_at' ((=) tcb) t\ + setObject t (tcbQueued_update f tcb) + \\_. pred_tcb_at' proj P t'\" + apply (simp add: pred_tcb_at'_def) + apply (rule hoare_pre) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def tcb_to_itcb'_def) + done + +lemma setObject_queued_ct_activatable'[wp]: + "\ct_in_state' activatable' and obj_at' ((=) tcb) t\ + setObject t (tcbQueued_update f tcb) + \\_. ct_in_state' activatable'\" + apply (clarsimp simp: ct_in_state'_def pred_tcb_at'_def) + apply (rule hoare_pre) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadSet_queued_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + threadSet (tcbQueued_update f) t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + including classic_wp_pre + apply (simp add: sch_act_wf_cases + split: scheduler_action.split) + apply (wp hoare_vcg_conj_lift) + apply (simp add: threadSet_def) + apply (wp hoare_weak_lift_imp) + apply (wps setObject_sa_unchanged) + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ + apply (clarsimp simp: obj_at'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + apply (wp tcb_in_cur_domain'_lift | simp add: obj_at'_def)+ + done + +lemma tcbSchedNext_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedNext_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + +lemma tcbSchedPrev_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedPrev_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + +lemma tcbSchedEnqueue_pred_tcb_at'[wp]: + "\\s. pred_tcb_at' proj P' t' s \ tcbSchedEnqueue t \\_ s. pred_tcb_at' proj P' t' s\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def when_def unless_def) + apply (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + done + +lemma tcbSchedDequeue_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + tcbSchedDequeue t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wp setQueue_sch_act threadSet_tcbDomain_triv hoare_drop_imps + | wp sch_act_wf_lift | simp add: if_apply_def2)+ + +crunch nosch: tcbSchedDequeue "\s. P (ksSchedulerAction s)" + +lemma sts_sch_act': + "\\s. (\ runnable' st \ sch_act_not t s) \ sch_act_wf (ksSchedulerAction s) s\ + setThreadState st t \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setThreadState_def) + apply (wp | simp)+ + prefer 2 + apply assumption + apply (case_tac "runnable' st") + apply ((wp threadSet_runnable_sch_act hoare_drop_imps | simp)+)[1] + apply (rule_tac Q="\rv s. st_tcb_at' (Not \ runnable') t s \ + (ksCurThread s \ t \ ksSchedulerAction s \ ResumeCurrentThread \ + sch_act_wf (ksSchedulerAction s) s)" + in hoare_post_imp) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (simp only: imp_conv_disj) + apply (wp threadSet_pred_tcb_at_state threadSet_sch_act_wf + hoare_vcg_disj_lift|simp)+ + done + +lemma sts_sch_act[wp]: + "\\s. (\ runnable' st \ sch_act_simple s) \ sch_act_wf (ksSchedulerAction s) s\ + setThreadState st t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setThreadState_def) + apply wp + apply simp + prefer 2 + apply assumption + apply (case_tac "runnable' st") + apply (rule_tac Q="\s. sch_act_wf (ksSchedulerAction s) s" + in hoare_pre_imp, simp) + apply ((wp hoare_drop_imps threadSet_runnable_sch_act | simp)+)[1] + apply (rule_tac Q="\rv s. st_tcb_at' (Not \ runnable') t s \ + (ksCurThread s \ t \ ksSchedulerAction s \ ResumeCurrentThread \ + sch_act_wf (ksSchedulerAction s) s)" + in hoare_post_imp) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (simp only: imp_conv_disj) + apply (rule hoare_pre) + apply (wp threadSet_pred_tcb_at_state threadSet_sch_act_wf + hoare_vcg_disj_lift|simp)+ + apply (auto simp: sch_act_simple_def) + done + +lemma sbn_sch_act': + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setBoundNotification ntfn t \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_sch_act | simp)+ + done + +lemma ssa_sch_act_simple[wp]: + "sa = ResumeCurrentThread \ sa = ChooseNewThread \ + \\\ setSchedulerAction sa \\rv. sch_act_simple\" + unfolding setSchedulerAction_def sch_act_simple_def + by (wp | simp)+ + +lemma sch_act_simple_lift: + "(\P. \\s. P (ksSchedulerAction s)\ f \\rv s. P (ksSchedulerAction s)\) \ + \sch_act_simple\ f \\rv. sch_act_simple\" + by (simp add: sch_act_simple_def) assumption + +lemma rescheduleRequired_sch_act_simple[wp]: + "\sch_act_simple\ rescheduleRequired \\rv. sch_act_simple\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp)+ + done + +crunch no_sa[wp]: tcbSchedDequeue "\s. P (ksSchedulerAction s)" + +lemma sts_sch_act_simple[wp]: + "\sch_act_simple\ setThreadState st t \\rv. sch_act_simple\" + apply (simp add: setThreadState_def) + apply (wp hoare_drop_imps | rule sch_act_simple_lift | simp)+ + done + +lemma setQueue_after: + "(setQueue d p q >>= (\rv. threadSet f t)) = + (threadSet f t >>= (\rv. setQueue d p q))" + apply (simp add: setQueue_def) + apply (rule oblivious_modify_swap) + apply (simp add: threadSet_def getObject_def setObject_def + loadObject_default_def + split_def projectKO_def2 alignCheck_assert + magnitudeCheck_assert updateObject_default_def) + apply (intro oblivious_bind, simp_all) + done + +lemma tcbSchedEnqueue_sch_act[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + tcbSchedEnqueue t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + by (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) + (wp setQueue_sch_act threadSet_tcbDomain_triv | wp sch_act_wf_lift | clarsimp)+ + +lemma tcbSchedEnqueue_weak_sch_act[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + tcbSchedEnqueue t + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) + apply (wp setQueue_sch_act threadSet_weak_sch_act_wf | clarsimp)+ + done + +lemma threadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (f tcb) s\ threadGet f t \P\" + apply (simp add: threadGet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadGet_const: + "\\s. tcb_at' t s \ obj_at' (P \ f) t s\ threadGet f t \\rv s. P (rv)\" + apply (simp add: threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +schematic_goal l2BitmapSize_def': (* arch specific consequence *) + "l2BitmapSize = numeral ?X" + by (simp add: l2BitmapSize_def wordBits_def word_size numPriorities_def) + +lemma prioToL1Index_size [simp]: + "prioToL1Index w < l2BitmapSize" + unfolding prioToL1Index_def wordRadix_def l2BitmapSize_def' + by (fastforce simp: shiftr_div_2n' nat_divide_less_eq + intro: order_less_le_trans[OF unat_lt2p]) + +lemma prioToL1Index_max: + "prioToL1Index p < 2 ^ wordRadix" + unfolding prioToL1Index_def wordRadix_def + by (insert unat_lt2p[where x=p], simp add: shiftr_div_2n') + +lemma prioToL1Index_bit_set: + "((2 :: machine_word) ^ prioToL1Index p) !! prioToL1Index p" + using l2BitmapSize_def' + by (fastforce simp: nth_w2p_same intro: order_less_le_trans[OF prioToL1Index_size]) + +lemma prioL2Index_bit_set: + fixes p :: priority + shows "((2::machine_word) ^ unat (ucast p && (mask wordRadix :: machine_word))) !! unat (p && mask wordRadix)" + apply (simp add: nth_w2p wordRadix_def ucast_and_mask[symmetric] unat_ucast_upcast is_up) + apply (rule unat_less_helper) + apply (insert and_mask_less'[where w=p and n=wordRadix], simp add: wordRadix_def) + done + +lemma addToBitmap_bitmapQ: + "\ \s. True \ addToBitmap d p \\_. bitmapQ d p \" + unfolding addToBitmap_def + modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def + by (wpsimp simp: bitmap_fun_defs bitmapQ_def prioToL1Index_bit_set prioL2Index_bit_set + simp_del: bit_exp_iff) + +crunch norq[wp]: addToBitmap "\s. P (ksReadyQueues s)" + (wp: updateObject_cte_inv hoare_drop_imps) +crunch norq[wp]: removeFromBitmap "\s. P (ksReadyQueues s)" + (wp: updateObject_cte_inv hoare_drop_imps) + +lemma prioToL1Index_lt: + "2 ^ wordRadix \ x \ prioToL1Index p < x" + unfolding prioToL1Index_def wordRadix_def + by (insert unat_lt2p[where x=p], simp add: shiftr_div_2n') + +lemma prioToL1Index_bits_low_high_eq: + "\ pa \ p; prioToL1Index pa = prioToL1Index (p::priority) \ + \ unat (pa && mask wordRadix) \ unat (p && mask wordRadix)" + unfolding prioToL1Index_def + by (fastforce simp: nth_w2p wordRadix_def is_up bits_low_high_eq) + +lemma prioToL1Index_bit_not_set: + "\ (~~ ((2 :: machine_word) ^ prioToL1Index p)) !! prioToL1Index p" + apply (subst word_ops_nth_size, simp_all add: prioToL1Index_bit_set del: bit_exp_iff) + apply (fastforce simp: prioToL1Index_def wordRadix_def word_size + intro: order_less_le_trans[OF word_shiftr_lt]) + done + +lemma prioToL1Index_complement_nth_w2p: + fixes p pa :: priority + shows "(~~ ((2 :: machine_word) ^ prioToL1Index p)) !! prioToL1Index p' + = (prioToL1Index p \ prioToL1Index p')" + by (fastforce simp: complement_nth_w2p prioToL1Index_lt wordRadix_def word_size)+ + +lemma valid_bitmapQ_exceptE: + "\ valid_bitmapQ_except d' p' s ; d \ d' \ p \ p' \ + \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (fastforce simp: valid_bitmapQ_except_def) + +lemma invertL1Index_eq_cancelD: + "\ invertL1Index i = invertL1Index j ; i < l2BitmapSize ; j < l2BitmapSize \ + \ i = j" + by (simp add: invertL1Index_def l2BitmapSize_def') + +lemma invertL1Index_eq_cancel: + "\ i < l2BitmapSize ; j < l2BitmapSize \ + \ (invertL1Index i = invertL1Index j) = (i = j)" + by (rule iffI, simp_all add: invertL1Index_eq_cancelD) + +lemma removeFromBitmap_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ removeFromBitmap d p \\_. bitmapQ_no_L1_orphans \" + unfolding bitmap_fun_defs + apply (wp | simp add: bitmap_fun_defs bitmapQ_no_L1_orphans_def)+ + apply (fastforce simp: invertL1Index_eq_cancel prioToL1Index_bit_not_set + prioToL1Index_complement_nth_w2p) + done + +lemma removeFromBitmap_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans \ + removeFromBitmap d p + \\_. bitmapQ_no_L2_orphans \" + unfolding bitmap_fun_defs + apply (wp, clarsimp simp: bitmap_fun_defs bitmapQ_no_L2_orphans_def)+ + apply (rule conjI, clarsimp) + apply (clarsimp simp: complement_nth_w2p l2BitmapSize_def') + apply clarsimp + apply metis + done + +lemma removeFromBitmap_valid_bitmapQ_except: + "\ valid_bitmapQ_except d p \ + removeFromBitmap d p + \\_. valid_bitmapQ_except d p \" +proof - + have unat_ucast_mask[simp]: + "\x. unat ((ucast (p::priority) :: machine_word) && mask x) = unat (p && mask x)" + by (simp add: ucast_and_mask[symmetric] unat_ucast_upcast is_up) + + note bit_exp_iff[simp del] bit_not_iff[simp del] bit_not_exp_iff[simp del] + show ?thesis + unfolding removeFromBitmap_def + apply (simp add: let_into_return[symmetric]) + unfolding bitmap_fun_defs when_def + apply wp + apply clarsimp + apply (rule conjI) + (* after clearing bit in L2, all bits in L2 field are clear *) + apply clarsimp + apply (subst valid_bitmapQ_except_def, clarsimp)+ + apply (clarsimp simp: bitmapQ_def) + apply (rule conjI; clarsimp) + apply (rename_tac p') + apply (rule conjI; clarsimp simp: invertL1Index_eq_cancel) + apply (drule_tac p=p' in valid_bitmapQ_exceptE[where d=d], clarsimp) + apply (clarsimp simp: bitmapQ_def) + apply (drule_tac n'="unat (p' && mask wordRadix)" in no_other_bits_set) + apply (erule (1) prioToL1Index_bits_low_high_eq) + apply (rule order_less_le_trans[OF word_unat_mask_lt]) + apply ((simp add: wordRadix_def' word_size)+)[2] + apply (rule order_less_le_trans[OF word_unat_mask_lt]) + apply ((simp add: wordRadix_def' word_size)+)[3] + apply (drule_tac p=p' and d=d in valid_bitmapQ_exceptE, simp) + apply (clarsimp simp: bitmapQ_def prioToL1Index_complement_nth_w2p) + apply (drule_tac p=pa and d=da in valid_bitmapQ_exceptE, simp) + apply (clarsimp simp: bitmapQ_def prioToL1Index_complement_nth_w2p) + (* after clearing bit in L2, some bits in L2 field are still set *) + apply clarsimp + apply (subst valid_bitmapQ_except_def, clarsimp)+ + apply (clarsimp simp: bitmapQ_def invertL1Index_eq_cancel) + apply (rule conjI; clarsimp) + apply (frule (1) prioToL1Index_bits_low_high_eq) + apply (drule_tac d=d and p=pa in valid_bitmapQ_exceptE, simp) + apply (clarsimp simp: bitmapQ_def) + apply (subst complement_nth_w2p) + apply (rule order_less_le_trans[OF word_unat_mask_lt]) + apply ((simp add: wordRadix_def' word_size)+)[3] + apply (clarsimp simp: valid_bitmapQ_except_def bitmapQ_def) + done +qed + +lemma addToBitmap_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ addToBitmap d p \\_. bitmapQ_no_L1_orphans \" + unfolding bitmap_fun_defs bitmapQ_defs + using word_unat_mask_lt[where w=p and m=wordRadix] + apply wp + apply (clarsimp simp: word_or_zero prioToL1Index_bit_set ucast_and_mask[symmetric] + unat_ucast_upcast is_up wordRadix_def' word_size nth_w2p + wordBits_def numPriorities_def) + done + +lemma addToBitmap_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ addToBitmap d p \\_. bitmapQ_no_L2_orphans \" + unfolding bitmap_fun_defs bitmapQ_defs + supply bit_exp_iff[simp del] + apply wp + apply clarsimp + apply (fastforce simp: invertL1Index_eq_cancel prioToL1Index_bit_set) + done + +lemma addToBitmap_valid_bitmapQ_except: + "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans \ + addToBitmap d p + \\_. valid_bitmapQ_except d p \" + unfolding bitmap_fun_defs bitmapQ_defs + apply wp + apply (clarsimp simp: bitmapQ_def invertL1Index_eq_cancel + ucast_and_mask[symmetric] unat_ucast_upcast is_up nth_w2p) + apply (fastforce simp: priority_mask_wordRadix_size[simplified wordBits_def'] + dest: prioToL1Index_bits_low_high_eq) + done + +lemma addToBitmap_valid_bitmapQ: + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans + and (\s. \ tcbQueueEmpty (ksReadyQueues s (d,p)))\ + addToBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done + +lemma threadGet_const_tcb_at: + "\\s. tcb_at' t s \ obj_at' (P s \ f) t s\ threadGet f t \\rv s. P s rv \" + apply (simp add: threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadGet_const_tcb_at_imp_lift: + "\\s. tcb_at' t s \ obj_at' (P s \ f) t s \ obj_at' (Q s \ f) t s \ + threadGet f t + \\rv s. P s rv \ Q s rv \" + apply (simp add: threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma setQueue_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ + setQueue d p ts + \\rv. bitmapQ_no_L1_orphans \" + unfolding setQueue_def bitmapQ_no_L1_orphans_def null_def + by (wp, auto) + +lemma setQueue_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ + setQueue d p ts + \\rv. bitmapQ_no_L2_orphans \" + unfolding setQueue_def bitmapQ_no_L2_orphans_def null_def + by (wp, auto) + +lemma setQueue_sets_queue[wp]: + "\d p ts P. \ \s. P ts \ setQueue d p ts \\rv s. P (ksReadyQueues s (d, p)) \" + unfolding setQueue_def + by (wp, simp) + +lemma rescheduleRequired_valid_bitmapQ_sch_act_simple: + "\ valid_bitmapQ and sch_act_simple\ + rescheduleRequired + \\_. valid_bitmapQ \" + including classic_wp_pre + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac Q'="\rv s. valid_bitmapQ s \ (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) + apply wpsimp + apply (case_tac rv; simp) + apply (wp, fastforce) + done + +lemma rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple: + "\ bitmapQ_no_L1_orphans and sch_act_simple\ + rescheduleRequired + \\_. bitmapQ_no_L1_orphans \" + including classic_wp_pre + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac Q'="\rv s. bitmapQ_no_L1_orphans s \ (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) + apply wpsimp + apply (case_tac rv; simp) + apply (wp, fastforce) + done + +lemma rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple: + "\ bitmapQ_no_L2_orphans and sch_act_simple\ + rescheduleRequired + \\_. bitmapQ_no_L2_orphans \" + including classic_wp_pre + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac Q'="\rv s. bitmapQ_no_L2_orphans s \ (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) + apply wpsimp + apply (case_tac rv; simp) + apply (wp, fastforce) + done + +lemma sts_valid_bitmapQ_sch_act_simple: + "\valid_bitmapQ and sch_act_simple\ + setThreadState st t + \\_. valid_bitmapQ \" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_valid_bitmapQ_sch_act_simple + threadSet_valid_bitmapQ [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def inQ_def)+ + done + +lemma sts_valid_bitmapQ_no_L2_orphans_sch_act_simple: + "\bitmapQ_no_L2_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L2_orphans\" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple + threadSet_valid_bitmapQ_no_L2_orphans [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def inQ_def)+ + done + +lemma sts_valid_bitmapQ_no_L1_orphans_sch_act_simple: + "\bitmapQ_no_L1_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L1_orphans\" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple + threadSet_valid_bitmapQ_no_L1_orphans [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def inQ_def)+ + done + +lemma setSchedulerAction_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ setSchedulerAction act \\_ s. P (ksReadyQueues s)\" + by (wp, simp) + +lemma threadSet_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ threadSet f t \\rv s. P (ksReadyQueues s)\" + by (simp add: threadSet_def | wp updateObject_default_inv)+ + +lemma sbn_ksQ: + "\\s. P (ksReadyQueues s p)\ setBoundNotification ntfn t \\rv s. P (ksReadyQueues s p)\" + by (simp add: setBoundNotification_def, wp) + +lemma setQueue_ksQ[wp]: + "\\s. P ((ksReadyQueues s)((d, p) := q))\ + setQueue d p q + \\rv s. P (ksReadyQueues s)\" + by (simp add: setQueue_def fun_upd_def[symmetric] + | wp)+ + +lemma threadSet_tcbState_st_tcb_at': + "\\s. P st \ threadSet (tcbState_update (\_. st)) t \\_. st_tcb_at' P t\" + apply (simp add: threadSet_def pred_tcb_at'_def) + apply (wpsimp wp: setObject_tcb_strongest) + done + +lemma isRunnable_const: + "\st_tcb_at' runnable' t\ isRunnable t \\runnable _. runnable \" + by (rule isRunnable_wp) + +lemma valid_ipc_buffer_ptr'D: + assumes yv: "y < unat max_ipc_words" + and buf: "valid_ipc_buffer_ptr' a s" + shows "pointerInUserData (a + of_nat y * 8) s" + using buf unfolding valid_ipc_buffer_ptr'_def pointerInUserData_def + apply clarsimp + apply (subgoal_tac + "(a + of_nat y * 8) && ~~ mask pageBits = a && ~~ mask pageBits") + apply simp + apply (rule mask_out_first_mask_some [where n = msg_align_bits]) + apply (erule is_aligned_add_helper [THEN conjunct2]) + apply (rule word_less_power_trans_ofnat [where k = 3, simplified]) + apply (rule order_less_le_trans [OF yv]) + apply (simp add: msg_align_bits max_ipc_words) + apply (simp add: msg_align_bits) + apply (simp_all add: msg_align_bits pageBits_def) + done + +lemma in_user_frame_eq: + assumes y: "y < unat max_ipc_words" + and al: "is_aligned a msg_align_bits" + shows "in_user_frame (a + of_nat y * 8) s = in_user_frame a s" +proof - + have "\sz. (a + of_nat y * 8) && ~~ mask (pageBitsForSize sz) = + a && ~~ mask (pageBitsForSize sz)" + apply (rule mask_out_first_mask_some [where n = msg_align_bits]) + apply (rule is_aligned_add_helper [OF al, THEN conjunct2]) + apply (rule word_less_power_trans_ofnat [where k = 3, simplified]) + apply (rule order_less_le_trans [OF y]) + apply (simp add: msg_align_bits max_ipc_words) + apply (simp add: msg_align_bits) + apply (simp add: msg_align_bits pageBits_def) + apply (case_tac sz, simp_all add: msg_align_bits bit_simps) + done + + thus ?thesis by (simp add: in_user_frame_def) +qed + +lemma loadWordUser_corres: + assumes y: "y < unat max_ipc_words" + shows "corres (=) \ (valid_ipc_buffer_ptr' a) (load_word_offs a y) (loadWordUser (a + of_nat y * 8))" + unfolding loadWordUser_def + apply (rule corres_stateAssert_assume [rotated]) + apply (erule valid_ipc_buffer_ptr'D[OF y]) + apply (rule corres_guard_imp) + apply (simp add: load_word_offs_def word_size_def) + apply (rule_tac F = "is_aligned a msg_align_bits" in corres_gen_asm2) + apply (rule corres_machine_op) + apply (rule corres_Id [OF refl refl]) + apply (rule no_fail_pre) + apply wp + apply (erule aligned_add_aligned) + apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) + apply (simp add: word_bits_conv msg_align_bits)+ + apply (simp add: valid_ipc_buffer_ptr'_def msg_align_bits) + done + +lemma storeWordUser_corres: + assumes y: "y < unat max_ipc_words" + shows "corres dc (in_user_frame a) (valid_ipc_buffer_ptr' a) + (store_word_offs a y w) (storeWordUser (a + of_nat y * 8) w)" + apply (simp add: storeWordUser_def bind_assoc[symmetric] + store_word_offs_def word_size_def) + apply (rule corres_guard2_imp) + apply (rule_tac F = "is_aligned a msg_align_bits" in corres_gen_asm2) + apply (rule corres_guard1_imp) + apply (rule_tac r'=dc in corres_split) + apply (simp add: stateAssert_def) + apply (rule_tac r'=dc in corres_split) + apply (rule corres_trivial) + apply simp + apply (rule corres_assert) + apply wp+ + apply (rule corres_machine_op) + apply (rule corres_Id [OF refl]) + apply simp + apply (rule no_fail_pre) + apply (wp no_fail_storeWord) + apply (erule_tac n=msg_align_bits in aligned_add_aligned) + apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) + apply (simp add: word_bits_conv msg_align_bits)+ + apply wp+ + apply (simp add: in_user_frame_eq[OF y]) + apply simp + apply (rule conjI) + apply (frule (1) valid_ipc_buffer_ptr'D [OF y]) + apply (simp add: valid_ipc_buffer_ptr'_def) + done + +lemma load_word_corres: + "corres (=) \ + (typ_at' UserDataT (a && ~~ mask pageBits) and (\s. is_aligned a word_size_bits)) + (do_machine_op (loadWord a)) (loadWordUser a)" + unfolding loadWordUser_def + apply (rule corres_gen_asm2) + apply (rule corres_stateAssert_assume [rotated]) + apply (simp add: pointerInUserData_def) + apply (rule corres_guard_imp) + apply simp + apply (rule corres_machine_op) + apply (rule corres_Id [OF refl refl]) + apply (rule no_fail_pre) + apply (wpsimp simp: word_size_bits_def)+ + done + +lemmas msgRegisters_unfold + = AARCH64_H.msgRegisters_def + msg_registers_def + AARCH64.msgRegisters_def + [unfolded upto_enum_def, simplified, + unfolded fromEnum_def enum_register, simplified, + unfolded toEnum_def enum_register, simplified] + +lemma thread_get_registers: + "thread_get (arch_tcb_get_registers \ tcb_arch) t = as_user t (gets user_regs)" + apply (simp add: thread_get_def as_user_def arch_tcb_get_registers_def + arch_tcb_context_get_def arch_tcb_context_set_def) + apply (rule bind_cong [OF refl]) + apply (clarsimp simp: gets_the_member) + apply (simp add: get_def the_run_state_def set_object_def get_object_def + put_def bind_def return_def gets_def) + apply (drule get_tcb_SomeD) + apply (clarsimp simp: map_upd_triv select_f_def image_def return_def) + done + +lemma getMRs_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' buf) + (get_mrs t buf mi) (getMRs t buf (message_info_map mi))" + proof - + have S: "get = gets id" + by (simp add: gets_def) + have T: "corres (\con regs. regs = map con msg_registers) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (arch_tcb_get_registers o tcb_arch) t) + (asUser t (mapM getRegister AARCH64_H.msgRegisters))" + apply (subst thread_get_registers) + apply (rule asUser_corres') + apply (subst mapM_gets) + apply (simp add: getRegister_def) + apply (simp add: S AARCH64_H.msgRegisters_def msg_registers_def) + done + show ?thesis + apply (case_tac mi, simp add: get_mrs_def getMRs_def split del: if_split) + apply (case_tac buf) + apply (rule corres_guard_imp) + apply (rule corres_split [where R = "\_. \" and R' = "\_. \", OF T]) + apply simp + apply wp+ + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF T]) + apply (simp only: option.simps return_bind fun_app_def + load_word_offs_def doMachineOp_mapM loadWord_empty_fail) + apply (rule corres_split_eqr) + apply (simp only: mapM_map_simp msgMaxLength_def msgLengthBits_def + msg_max_length_def o_def upto_enum_word) + apply (rule corres_mapM [where r'="(=)" and S="{a. fst a = snd a \ fst a < unat max_ipc_words}"]) + apply simp + apply simp + apply (simp add: word_size wordSize_def wordBits_def) + apply (rule loadWordUser_corres) + apply simp + apply wp+ + apply simp + apply (unfold msgRegisters_unfold)[1] + apply simp + apply (clarsimp simp: set_zip) + apply (simp add: msgRegisters_unfold max_ipc_words nth_append) + apply (rule corres_trivial, simp) + apply (wp hoare_vcg_all_lift | simp add: valid_ipc_buffer_ptr'_def)+ + done +qed + +lemmas doMachineOp_return = submonad_doMachineOp.return + +lemma doMachineOp_bind: + "\ empty_fail a; \x. empty_fail (b x) \ \ doMachineOp (a >>= b) = (doMachineOp a >>= (\rv. doMachineOp (b rv)))" + by (blast intro: submonad_bind submonad_doMachineOp) + +lemma zipWithM_x_corres: + assumes x: "\x x' y y'. ((x, y), (x', y')) \ S \ corres dc P P' (f x y) (f' x' y')" + assumes y: "\x x' y y'. ((x, y), (x', y')) \ S \ \P\ f x y \\rv. P\" + and z: "\x x' y y'. ((x, y), (x', y')) \ S \ \P'\ f' x' y' \\rv. P'\" + and a: "set (zip (zip xs ys) (zip xs' ys')) \ S" + and b: "length (zip xs ys) = length (zip xs' ys')" + shows "corres dc P P' (zipWithM_x f xs ys) (zipWithM_x f' xs' ys')" + apply (simp add: zipWithM_x_mapM) + apply (rule corres_underlying_split) + apply (rule corres_mapM) + apply (rule dc_simp)+ + apply clarsimp + apply (rule x) + apply assumption + apply (clarsimp simp: y) + apply (clarsimp simp: z) + apply (rule b) + apply (rule a) + apply (rule corres_trivial, simp) + apply (rule hoare_TrueI)+ + done + + +lemma valid_ipc_buffer_ptr'_def2: + "valid_ipc_buffer_ptr' = (\p s. (is_aligned p msg_align_bits \ typ_at' UserDataT (p && ~~ mask pageBits) s))" + apply (rule ext, rule ext) + apply (simp add: valid_ipc_buffer_ptr'_def) + done + +lemma storeWordUser_valid_ipc_buffer_ptr' [wp]: + "\valid_ipc_buffer_ptr' p\ storeWordUser p' w \\_. valid_ipc_buffer_ptr' p\" + unfolding valid_ipc_buffer_ptr'_def2 + by (wp hoare_vcg_all_lift storeWordUser_typ_at') + +lemma thread_set_as_user_registers: + "thread_set (\tcb. tcb \ tcb_arch := arch_tcb_set_registers (f (arch_tcb_get_registers (tcb_arch tcb))) + (tcb_arch tcb) \) t + = as_user t (modify (modify_registers f))" +proof - + have P: "\f. det (modify f)" + by (simp add: modify_def) + thus ?thesis + apply (simp add: as_user_def P thread_set_def) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def image_def modify_registers_def + arch_tcb_set_registers_def arch_tcb_get_registers_def + arch_tcb_context_set_def arch_tcb_context_get_def) + done +qed + +lemma UserContext_fold: + "UserContext (fpu_state s) (foldl (\s (x, y). s(x := y)) (user_regs s) xs) = + foldl (\s (r, v). UserContext (fpu_state s) ((user_regs s)(r := v))) s xs" + apply (induct xs arbitrary: s; simp) + apply (clarsimp split: prod.splits) + apply (metis user_context.sel) + done + +lemma setMRs_corres: + assumes m: "mrs' = mrs" + shows + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct and case_option \ in_user_frame buf) + (case_option \ valid_ipc_buffer_ptr' buf) + (set_mrs t buf mrs) (setMRs t buf mrs')" +proof - + have setRegister_def2: + "setRegister = (\r v. modify (\s. UserContext (fpu_state s) ((user_regs s)(r := v))))" + by ((rule ext)+, simp add: setRegister_def) + + have S: "\xs ys n m. m - n \ length xs \ (zip xs (drop n (take m ys))) = zip xs (drop n ys)" + by (simp add: zip_take_triv2 drop_take) + + note upt.simps[simp del] upt_rec_numeral[simp del] + + show ?thesis using m + unfolding setMRs_def set_mrs_def + apply (clarsimp cong: option.case_cong split del: if_split) + apply (subst bind_assoc[symmetric]) + apply (fold thread_set_def[simplified]) + apply (subst thread_set_as_user_registers) + apply (cases buf) + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify + take_min_len zip_take_triv2 min.commute) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF asUser_corres']) + apply (rule corres_modify') + apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold UserContext_fold + modify_registers_def + cong: if_cong simp del: the_index.simps) + apply simp + apply (rule corres_trivial, simp) + apply ((wp |simp)+)[4] + \ \buf = Some a\ + using if_split[split del] + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify + take_min_len zip_take_triv2 min.commute + msgMaxLength_def msgLengthBits_def) + apply (simp add: msg_max_length_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF asUser_corres']) + apply (rule corres_modify') + apply (simp only: msgRegisters_unfold cong: if_cong) + apply (fastforce simp: fold_fun_upd[symmetric] modify_registers_def UserContext_fold) + apply simp + apply (rule corres_split_nor) + apply (rule_tac S="{((x, y), (x', y')). y = y' \ x' = (a + (of_nat x * 8)) \ x < unat max_ipc_words}" + in zipWithM_x_corres) + apply (fastforce intro: storeWordUser_corres) + apply wp+ + apply (clarsimp simp add: S msgMaxLength_def msg_max_length_def set_zip) + apply (simp add: wordSize_def wordBits_def word_size max_ipc_words + upt_Suc_append[symmetric] upto_enum_word) + apply simp + apply (rule corres_trivial, clarsimp simp: min.commute) + apply wp+ + apply (wp | clarsimp simp: valid_ipc_buffer_ptr'_def)+ + done +qed + +lemma copyMRs_corres: + "corres (=) (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct + and case_option \ in_user_frame sb + and case_option \ in_user_frame rb + and K (unat n \ msg_max_length)) + (case_option \ valid_ipc_buffer_ptr' sb + and case_option \ valid_ipc_buffer_ptr' rb) + (copy_mrs s sb r rb n) (copyMRs s sb r rb n)" +proof - + have U: "unat n \ msg_max_length \ + map (toEnum :: nat \ machine_word) [7 ..< Suc (unat n)] = map of_nat [7 ..< Suc (unat n)]" + unfolding msg_max_length_def by auto + note R'=msgRegisters_unfold[THEN meta_eq_to_obj_eq, THEN arg_cong[where f=length]] + note R=R'[simplified] + + have as_user_bit: + "\v :: machine_word. + corres dc (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct) + \ + (mapM + (\ra. do v \ as_user s (getRegister ra); + as_user r (setRegister ra v) + od) + (take (unat n) msg_registers)) + (mapM + (\ra. do v \ asUser s (getRegister ra); + asUser r (setRegister ra v) + od) + (take (unat n) msgRegisters))" + apply (rule corres_guard_imp) + apply (rule_tac S=Id in corres_mapM, simp+) + apply (rule corres_split_eqr[OF asUser_getRegister_corres asUser_setRegister_corres]) + apply (wp | clarsimp simp: msg_registers_def msgRegisters_def)+ + done + + have wordSize[simp]: "of_nat wordSize = 8" + by (simp add: wordSize_def wordBits_def word_size) + + show ?thesis + apply (rule corres_assume_pre) + apply (simp add: copy_mrs_def copyMRs_def word_size + cong: option.case_cong + split del: if_split del: upt.simps) + apply (cases sb) + apply (simp add: R) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF as_user_bit]) + apply (rule corres_trivial, simp) + apply wp+ + apply simp + apply simp + apply (cases rb) + apply (simp add: R) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF as_user_bit]) + apply (rule corres_trivial, simp) + apply wp+ + apply simp + apply simp + apply (simp add: R del: upt.simps) + apply (rule corres_guard_imp) + apply (rename_tac sb_ptr rb_ptr) + apply (rule corres_split_nor[OF as_user_bit]) + apply (rule corres_split_eqr) + apply (rule_tac S="{(x, y). y = of_nat x \ x < unat max_ipc_words}" + in corres_mapM, simp+) + apply (rule corres_split_eqr) + apply (rule loadWordUser_corres) + apply simp + apply (rule storeWordUser_corres) + apply simp + apply (wp hoare_vcg_all_lift | simp)+ + apply (clarsimp simp: upto_enum_def) + apply arith + apply (subst set_zip) + apply (simp add: upto_enum_def U del: upt.simps) + apply (clarsimp simp del: upt.simps) + apply (clarsimp simp: msg_max_length_def word_le_nat_alt nth_append + max_ipc_words) + apply (erule order_less_trans) + apply simp + apply (rule corres_trivial, simp) + apply (wp hoare_vcg_all_lift mapM_wp' + | simp add: valid_ipc_buffer_ptr'_def)+ + done +qed + +lemma cte_at_tcb_at_32': + "tcb_at' t s \ cte_at' (t + 32) s" + apply (simp add: cte_at'_obj_at') + apply (rule disjI2, rule bexI[where x=32]) + apply simp + apply fastforce + done + +lemma get_tcb_cap_corres: + "tcb_cap_cases ref = Some (getF, v) \ + corres cap_relation (tcb_at t and valid_objs) (tcb_at' t and pspace_aligned' and pspace_distinct') + (liftM getF (gets_the (get_tcb t))) + (getSlotCap (cte_map (t, ref)))" + apply (simp add: getSlotCap_def liftM_def[symmetric]) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (cases v, simp) + apply (frule tcb_cases_related) + apply (clarsimp simp: cte_at'_obj_at') + apply (drule spec[where x=t]) + apply (drule bspec, erule domI) + apply simp + apply clarsimp + apply (clarsimp simp: gets_the_def simpler_gets_def + bind_def assert_opt_def tcb_at_def + return_def + dest!: get_tcb_SomeD) + apply (drule use_valid [OF _ getCTE_sp[where P="(=) s'" for s'], OF _ refl]) + apply (clarsimp simp: get_tcb_def return_def) + apply (drule pspace_relation_ctes_ofI[OF state_relation_pspace_relation]) + apply (rule cte_wp_at_tcbI[where t="(t, ref)"], fastforce+)[1] + apply assumption+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas get_vtable_cap_corres = + get_tcb_cap_corres[where ref="tcb_cnode_index 1", simplified, OF conjI [OF refl refl]] + +lemma pspace_dom_dom: + "dom ps \ pspace_dom ps" + unfolding pspace_dom_def + apply clarsimp + apply (rule rev_bexI [OF domI], assumption) + apply (simp add: obj_relation_cuts_def2 image_Collect cte_map_def range_composition [symmetric] + split: Structures_A.kernel_object.splits arch_kernel_obj.splits + cong: arch_kernel_obj.case_cong) + apply safe + (* CNode *) + apply (force dest: wf_cs_0 simp: of_bl_def) + (* PageTable *) + apply (fastforce simp add: image_Collect image_image intro: image_eqI[where x=0]) + (* DataPage *) + apply (rule exI[where x=0]) + apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) + done + +lemma no_0_obj_kheap: + assumes no0: "no_0_obj' s'" + and psr: "pspace_relation (kheap s) (ksPSpace s')" + shows "kheap s 0 = None" +proof (rule ccontr) + assume "kheap s 0 \ None" + hence "0 \ dom (kheap s)" .. + hence "0 \ pspace_dom (kheap s)" by (rule set_mp [OF pspace_dom_dom]) + moreover + from no0 have "0 \ dom (ksPSpace s')" + unfolding no_0_obj'_def by clarsimp + ultimately show False using psr + by (clarsimp simp: pspace_relation_def) +qed + +lemmas valid_ipc_buffer_cap_simps = valid_ipc_buffer_cap_def [split_simps cap.split arch_cap.split] + +lemma lookupIPCBuffer_corres': + "corres (=) + (tcb_at t and valid_objs and pspace_aligned and pspace_distinct) + (valid_objs' and no_0_obj') + (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + apply (rule corres_cross_add_guard[where Q'="pspace_aligned' and pspace_distinct'"]) + apply (fastforce simp: pspace_aligned_cross pspace_distinct_cross state_relation_def) + apply (simp add: lookup_ipc_buffer_def AARCH64_H.lookupIPCBuffer_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF threadGet_corres]) + apply (simp add: tcb_relation_def) + apply (simp add: getThreadBufferSlot_def locateSlot_conv) + apply (rule corres_split[OF getSlotCap_corres]) + apply (simp add: cte_map_def tcb_cnode_index_def cte_level_bits_def tcbIPCBufferSlot_def) + apply (rule_tac F="valid_ipc_buffer_cap rv buffer_ptr" + in corres_gen_asm) + apply (rule_tac P="valid_cap rv" and Q="no_0_obj'" + in corres_assume_pre) + apply (simp add: Let_def split: cap.split arch_cap.split + split del: if_split cong: if_cong) + apply (safe, simp_all add: isCap_simps valid_ipc_buffer_cap_simps split:bool.split_asm)[1] + apply (rename_tac word rights vmpage_size d option) + apply (subgoal_tac "word + (buffer_ptr && + mask (pageBitsForSize vmpage_size)) \ 0") + apply (simp add: cap_aligned_def + valid_ipc_buffer_cap_def + vmrights_map_def vm_read_only_def vm_read_write_def) + apply auto[1] + apply (subgoal_tac "word \ 0") + apply (subgoal_tac "word \ word + (buffer_ptr && + mask (pageBitsForSize vmpage_size))") + apply fastforce + apply (rule_tac b="2 ^ (pageBitsForSize vmpage_size) - 1" + in word_plus_mono_right2) + apply (clarsimp simp: valid_cap_def cap_aligned_def + intro!: is_aligned_no_overflow') + apply (clarsimp simp: word_bits_def bit_simps + intro!: word_less_sub_1 and_mask_less') + apply (case_tac vmpage_size, simp_all add: bit_simps)[1] + apply (drule state_relation_pspace_relation) + apply (clarsimp simp: valid_cap_def obj_at_def no_0_obj_kheap + obj_relation_cuts_def3 no_0_obj'_def + split: if_split_asm) + apply (wp get_cap_valid_ipc get_cap_aligned)+ + apply (wp thread_get_obj_at_eq)+ + apply (clarsimp elim!: tcb_at_cte_at) + apply clarsimp + done + +lemma lookupIPCBuffer_corres: + "corres (=) (tcb_at t and invs) (valid_objs' and no_0_obj') + (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + using lookupIPCBuffer_corres' + by (rule corres_guard_imp, auto simp: invs'_def valid_state'_def) + +crunch inv[wp]: lookupIPCBuffer P + (wp: crunch_wps simp: crunch_simps) + +crunch pred_tcb_at'[wp]: rescheduleRequired "pred_tcb_at' proj P t" + +lemma setThreadState_st_tcb': + "\\\ setThreadState st t \\rv. st_tcb_at' (\s. s = st) t\" + apply (simp add: setThreadState_def) + apply (wp threadSet_pred_tcb_at_state | simp add: if_apply_def2)+ + done + +lemma setThreadState_st_tcb: + "\\s. P st\ setThreadState st t \\rv. st_tcb_at' P t\" + apply (cases "P st") + apply simp + apply (rule hoare_post_imp [OF _ setThreadState_st_tcb']) + apply (erule pred_tcb'_weakenE, simp) + apply simp + done + +lemma setBoundNotification_bound_tcb': + "\\\ setBoundNotification ntfn t \\rv. bound_tcb_at' (\s. s = ntfn) t\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_pred_tcb_at_state | simp add: if_apply_def2)+ + done + +lemma setBoundNotification_bound_tcb: + "\\s. P ntfn\ setBoundNotification ntfn t \\rv. bound_tcb_at' P t\" + apply (cases "P ntfn") + apply simp + apply (rule hoare_post_imp [OF _ setBoundNotification_bound_tcb']) + apply (erule pred_tcb'_weakenE, simp) + apply simp + done + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for ct'[wp]: "\s. P (ksCurThread s)" + +lemma ct_in_state'_decomp: + assumes x: "\\s. t = (ksCurThread s)\ f \\rv s. t = (ksCurThread s)\" + assumes y: "\Pre\ f \\rv. st_tcb_at' Prop t\" + shows "\\s. Pre s \ t = (ksCurThread s)\ f \\rv. ct_in_state' Prop\" + apply (rule hoare_post_imp [where Q="\rv s. t = ksCurThread s \ st_tcb_at' Prop t s"]) + apply (clarsimp simp add: ct_in_state'_def) + apply (rule hoare_weaken_pre) + apply (wp x y) + apply simp + done + +lemma ct_in_state'_set: + "\\s. tcb_at' t s \ P st \ t = ksCurThread s\ setThreadState st t \\rv. ct_in_state' P\" + apply (rule hoare_weaken_pre) + apply (rule ct_in_state'_decomp[where t=t]) + apply (wp setThreadState_ct') + apply (wp setThreadState_st_tcb) + apply clarsimp + done + +crunches setQueue, rescheduleRequired, tcbSchedDequeue + for idle'[wp]: "valid_idle'" + (simp: crunch_simps wp: crunch_wps) + +lemma sts_valid_idle'[wp]: + "\valid_idle' and valid_pspace' and + (\s. t = ksIdleThread s \ idle' ts)\ + setThreadState ts t + \\rv. valid_idle'\" + apply (simp add: setThreadState_def) + apply (wpsimp wp: threadSet_idle' simp: idle_tcb'_def) + done + +lemma sbn_valid_idle'[wp]: + "\valid_idle' and valid_pspace' and + (\s. t = ksIdleThread s \ \bound ntfn)\ + setBoundNotification ntfn t + \\rv. valid_idle'\" + apply (simp add: setBoundNotification_def) + apply (wpsimp wp: threadSet_idle' simp: idle_tcb'_def) + done + +lemma gts_sp': + "\P\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t and P\" + apply (simp add: getThreadState_def threadGet_def) + apply wp + apply (simp add: o_def pred_tcb_at'_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma gbn_sp': + "\P\ getBoundNotification t \\rv. bound_tcb_at' (\st. st = rv) t and P\" + apply (simp add: getBoundNotification_def threadGet_def) + apply wp + apply (simp add: o_def pred_tcb_at'_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma tcbSchedDequeue_tcbState_obj_at'[wp]: + "\obj_at' (P \ tcbState) t'\ tcbSchedDequeue t \\rv. obj_at' (P \ tcbState) t'\" + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: getObject_tcb_wp simp: o_def threadGet_def) + apply (clarsimp simp: obj_at'_def) + done + +crunch typ_at'[wp]: setQueue "\s. P' (typ_at' P t s)" + +lemma setQueue_pred_tcb_at[wp]: + "\\s. P' (pred_tcb_at' proj P t s)\ setQueue d p q \\rv s. P' (pred_tcb_at' proj P t s)\" + unfolding pred_tcb_at'_def + apply (rule_tac P=P' in P_bool_lift) + apply (rule setQueue_obj_at) + apply (rule_tac Q="\_ s. \typ_at' TCBT t s \ obj_at' (Not \ (P \ proj \ tcb_to_itcb')) t s" + in hoare_post_imp, simp add: not_obj_at' o_def) + apply (wp hoare_vcg_disj_lift) + apply (clarsimp simp: not_obj_at' o_def) + done + +lemma tcbSchedDequeue_pred_tcb_at'[wp]: + "\\s. P' (pred_tcb_at' proj P t' s)\ tcbSchedDequeue t \\_ s. P' (pred_tcb_at' proj P t' s)\" + apply (rule_tac P=P' in P_bool_lift) + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma sts_st_tcb': + "\if t = t' then K (P st) else st_tcb_at' P t\ + setThreadState st t' + \\_. st_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setThreadState_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sts_bound_tcb_at': + "\bound_tcb_at' P t\ + setThreadState st t' + \\_. bound_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setThreadState_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sbn_st_tcb': + "\st_tcb_at' P t\ + setBoundNotification ntfn t' + \\_. st_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setBoundNotification_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sbn_bound_tcb_at': + "\if t = t' then K (P ntfn) else bound_tcb_at' P t\ + setBoundNotification ntfn t' + \\_. bound_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setBoundNotification_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for typ_at'[wp]: "\s. P (typ_at' T p s)" + +lemmas setThreadState_typ_ats[wp] = typ_at_lifts [OF setThreadState_typ_at'] +lemmas setBoundNotification_typ_ats[wp] = typ_at_lifts [OF setBoundNotification_typ_at'] + +crunches setThreadState, setBoundNotification + for aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + and cte_wp_at'[wp]: "cte_wp_at' P p" + +crunch refs_of'[wp]: rescheduleRequired "\s. P (state_refs_of' s)" + (wp: threadSet_state_refs_of') + +lemma setThreadState_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (t := tcb_st_refs_of' st + \ {r \ state_refs_of' s t. snd r = TCBBound}))\ + setThreadState st t + \\rv s. P (state_refs_of' s)\" + by (simp add: setThreadState_def fun_upd_def + | wp threadSet_state_refs_of')+ + +crunch hyp_refs_of'[wp]: rescheduleRequired "\s. P (state_hyp_refs_of' s)" + (simp: unless_def crunch_simps wp: threadSet_state_hyp_refs_of' ignore: threadSet) + +lemma setThreadState_state_hyp_refs_of'[wp]: + "\\s. P ((state_hyp_refs_of' s))\ + setThreadState st t + \\rv s. P (state_hyp_refs_of' s)\" + apply (simp add: setThreadState_def fun_upd_def + | wp threadSet_state_hyp_refs_of')+ + done + +lemma setBoundNotification_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (t := tcb_bound_refs' ntfn + \ {r \ state_refs_of' s t. snd r \ TCBBound}))\ + setBoundNotification ntfn t + \\rv s. P (state_refs_of' s)\" + by (simp add: setBoundNotification_def Un_commute fun_upd_def + | wp threadSet_state_refs_of' )+ + +lemma setBoundNotification_state_hyp_refs_of'[wp]: + "\\s. P (state_hyp_refs_of' s)\ + setBoundNotification ntfn t + \\rv s. P (state_hyp_refs_of' s)\" + by (simp add: setBoundNotification_def fun_upd_def + | wp threadSet_state_hyp_refs_of')+ + +lemma sts_cur_tcb'[wp]: + "\cur_tcb'\ setThreadState st t \\rv. cur_tcb'\" + by (wp cur_tcb_lift) + +lemma sbn_cur_tcb'[wp]: + "\cur_tcb'\ setBoundNotification ntfn t \\rv. cur_tcb'\" + by (wp cur_tcb_lift) + +crunch iflive'[wp]: setQueue if_live_then_nonz_cap' +crunch nonz_cap[wp]: setQueue "ex_nonz_cap_to' t" +crunch iflive'[wp]: addToBitmap if_live_then_nonz_cap' +crunch nonz_cap[wp]: addToBitmap "ex_nonz_cap_to' t" +crunch iflive'[wp]: removeFromBitmap if_live_then_nonz_cap' +crunch nonz_cap[wp]: removeFromBitmap "ex_nonz_cap_to' t" + +crunches rescheduleRequired + for cap_to'[wp]: "ex_nonz_cap_to' p" + +lemma tcbQueued_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbQueued_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedNext_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedPrev_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedPrev_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_ctes_of[wp]: + "threadSet (tcbSchedNext_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_ctes_of[wp]: + "threadSet (tcbSchedPrev_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedNext_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedPrev_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedNext_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedPrev_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbQueued_update_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbQueued_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbQueued_update_tcb_cte_cases) + +lemma getTCB_wp: + "\\s. \ko :: tcb. ko_at' ko p s \ Q ko s\ getObject p \Q\" + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma tcbQueueRemove_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers and ex_nonz_cap_to' tcbPtr\ + tcbQueueRemove q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_imp_lift' getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + by (force dest: sym_heapD2[where p'=tcbPtr] sym_heapD1[where p=tcbPtr] + elim: if_live_then_nonz_capE' + simp: valid_tcb'_def opt_map_def obj_at'_def ko_wp_at'_def opt_tcb_at'_def live'_def) + +lemma tcbQueueRemove_ex_nonz_cap_to'[wp]: + "tcbQueueRemove q tcbPtr \ex_nonz_cap_to' tcbPtr'\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_cap_to' hoare_drop_imps getTCB_wp) + +(* We could write this one as "\t. tcbQueueHead t \ ..." instead, but we can't do the same in + tcbQueueAppend_if_live_then_nonz_cap', and it's nicer if the two lemmas are symmetric *) +lemma tcbQueuePrepend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueHead q)) s)\ + tcbQueuePrepend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueEnd q)) s)\ + tcbQueueAppend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive') + +lemma tcbQueueInsert_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcbPtr and valid_objs' and sym_heap_sched_pointers\ + tcbQueueInsert tcbPtr afterPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueInsert_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' getTCB_wp) + apply (intro conjI) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ko_wp_at'_def obj_at'_def live'_def) + apply (erule if_live_then_nonz_capE') + apply (frule_tac p'=afterPtr in sym_heapD2) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def ko_wp_at'_def obj_at'_def opt_map_def live'_def) + done + +lemma tcbSchedEnqueue_iflive'[wp]: + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_if_live_then_nonz_cap' threadGet_wp) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def obj_at'_def live'_def) + apply clarsimp + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ko_wp_at'_def inQ_def opt_pred_def opt_map_def obj_at'_def live'_def + split: option.splits) + done + +crunches rescheduleRequired + for iflive'[wp]: if_live_then_nonz_cap' + +lemma sts_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s) + \ pspace_aligned' s \ pspace_distinct' s\ + setThreadState st t + \\rv. if_live_then_nonz_cap'\" + apply (simp add: setThreadState_def setQueue_def) + apply wpsimp + apply (rule_tac Q="\rv. if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) + apply clarsimp + apply (wpsimp wp: threadSet_iflive') + apply fastforce + done + +lemma sbn_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (bound ntfn \ ex_nonz_cap_to' t s)\ + setBoundNotification ntfn t + \\rv. if_live_then_nonz_cap'\" + apply (simp add: setBoundNotification_def) + apply (rule hoare_pre) + apply (wp threadSet_iflive' | simp)+ + apply auto + done + +crunches setThreadState, setBoundNotification + for ifunsafe'[wp]: "if_unsafe_then_cap'" + +lemma st_tcb_ex_cap'': + "\ st_tcb_at' P t s; if_live_then_nonz_cap' s; + \st. P st \ st \ Inactive \ \ idle' st \ \ ex_nonz_cap_to' t s" + by (clarsimp simp: pred_tcb_at'_def obj_at'_real_def live'_def + elim!: ko_wp_at'_weakenE + if_live_then_nonz_capE') + +lemma bound_tcb_ex_cap'': + "\ bound_tcb_at' P t s; if_live_then_nonz_cap' s; + \ntfn. P ntfn \ bound ntfn \ \ ex_nonz_cap_to' t s" + by (clarsimp simp: pred_tcb_at'_def obj_at'_real_def live'_def + elim!: ko_wp_at'_weakenE + if_live_then_nonz_capE') + +crunches setThreadState, setBoundNotification + for arch' [wp]: "\s. P (ksArchState s)" + (simp: unless_def crunch_simps) + +crunches setThreadState, setBoundNotification + for it' [wp]: "\s. P (ksIdleThread s)" + (wp: getObject_inv_tcb + simp: updateObject_default_def unless_def crunch_simps) + +crunch it' [wp]: removeFromBitmap "\s. P (ksIdleThread s)" + +lemma sts_ctes_of [wp]: + "\\s. P (ctes_of s)\ setThreadState st t \\rv s. P (ctes_of s)\" + apply (simp add: setThreadState_def) + apply (wp threadSet_ctes_ofT | simp add: tcb_cte_cases_def cteSizeBits_def)+ + done + +lemma sbn_ctes_of [wp]: + "\\s. P (ctes_of s)\ setBoundNotification ntfn t \\rv s. P (ctes_of s)\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_ctes_ofT | simp add: tcb_cte_cases_def cteSizeBits_def)+ + done + +crunches setThreadState, setBoundNotification + for ksInterruptState[wp]: "\s. P (ksInterruptState s)" + (simp: unless_def crunch_simps) + +crunches setThreadState, setBoundNotification + for gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + (simp: unless_def crunch_simps wp: setObject_ksPSpace_only updateObject_default_inv) + +lemmas setThreadState_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF sts_ctes_of setThreadState_ksInterruptState] + +lemmas setBoundNotification_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF sbn_ctes_of setBoundNotification_ksInterruptState] + +lemma sts_global_reds' [wp]: + "\valid_global_refs'\ setThreadState st t \\_. valid_global_refs'\" + by (rule valid_global_refs_lift'; wp) + +lemma sbn_global_reds' [wp]: + "\valid_global_refs'\ setBoundNotification ntfn t \\_. valid_global_refs'\" + by (rule valid_global_refs_lift'; wp) + +crunches setThreadState, setBoundNotification + for irq_states' [wp]: valid_irq_states' + (simp: unless_def crunch_simps) + +lemma addToBitmap_ksMachine[wp]: + "\\s. P (ksMachineState s)\ addToBitmap d p \\rv s. P (ksMachineState s)\" + unfolding bitmap_fun_defs + by (wp, simp) + +lemma removeFromBitmap_ksMachine[wp]: + "\\s. P (ksMachineState s)\ removeFromBitmap d p \\rv s. P (ksMachineState s)\" + unfolding bitmap_fun_defs + by (wp|simp add: bitmap_fun_defs)+ + +lemma tcbSchedEnqueue_ksMachine[wp]: + "\\s. P (ksMachineState s)\ tcbSchedEnqueue x \\_ s. P (ksMachineState s)\" + by (simp add: tcbSchedEnqueue_def unless_def setQueue_def | wp)+ + +crunches setThreadState, setBoundNotification + for ksMachine[wp]: "\s. P (ksMachineState s)" + and pspace_domain_valid[wp]: "pspace_domain_valid" + +lemma setThreadState_vms'[wp]: + "\valid_machine_state'\ setThreadState F t \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) + done + +lemma ct_not_inQ_addToBitmap[wp]: + "\ ct_not_inQ \ addToBitmap d p \\_. ct_not_inQ \" + unfolding bitmap_fun_defs + by (wp, clarsimp simp: ct_not_inQ_def) + +lemma ct_not_inQ_removeFromBitmap[wp]: + "\ ct_not_inQ \ removeFromBitmap d p \\_. ct_not_inQ \" + unfolding bitmap_fun_defs + by (wp|simp add: bitmap_fun_defs ct_not_inQ_def comp_def)+ + +lemma setBoundNotification_vms'[wp]: + "\valid_machine_state'\ setBoundNotification ntfn t \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) + done + +lemma threadSet_ct_not_inQ: + "(\tcb. tcbQueued tcb = tcbQueued (F tcb)) + \ threadSet F tcbPtr \\s. P (ct_not_inQ s)\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (erule rsubst[where P=P]) + by (fastforce simp: ct_not_inQ_def obj_at'_def objBits_simps ps_clear_def split: if_splits) + +crunches tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, tcbQueueRemove, addToBitmap + for ct_not_inQ[wp]: ct_not_inQ + (wp: threadSet_ct_not_inQ crunch_wps) + +lemma tcbSchedEnqueue_ct_not_inQ: + "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + proof - + have ts: "\?PRE\ threadSet (tcbQueued_update (\_. True)) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule_tac Q="\s. ksSchedulerAction s = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s \ ksCurThread s \ t" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (rule hoare_weaken_pre) + apply (wps setObject_ct_inv) + apply (rule threadSet_obj_at'_strongish) + apply (clarsimp simp: comp_def) + done + have sq: "\d p q. \ct_not_inQ\ setQueue d p q \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def setQueue_def) + apply (wp) + apply (clarsimp) + done + show ?thesis + apply (simp add: tcbSchedEnqueue_def unless_def null_def) + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ + done + qed + +lemma tcbSchedAppend_ct_not_inQ: + "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + proof - + have ts: "\?PRE\ threadSet (tcbQueued_update (\_. True)) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule_tac Q="\s. ksSchedulerAction s = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s \ ksCurThread s \ t" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (rule hoare_weaken_pre) + apply (wps setObject_ct_inv) + apply (rule threadSet_obj_at'_strongish) + apply (clarsimp simp: comp_def) + done + have sq: "\d p q. \ct_not_inQ\ setQueue d p q \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def setQueue_def) + apply (wp) + apply (clarsimp) + done + show ?thesis + apply (simp add: tcbSchedAppend_def unless_def null_def) + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ + done + qed + +lemma setSchedulerAction_direct: + "\\\ setSchedulerAction sa \\_ s. ksSchedulerAction s = sa\" + by (wpsimp simp: setSchedulerAction_def) + +lemma rescheduleRequired_ct_not_inQ: + "\\\ rescheduleRequired \\_. ct_not_inQ\" + apply (simp add: rescheduleRequired_def ct_not_inQ_def) + apply (rule_tac Q="\_ s. ksSchedulerAction s = ChooseNewThread" + in hoare_post_imp, clarsimp) + apply (wp setSchedulerAction_direct) + done + +crunch nosch[wp]: tcbSchedEnqueue "\s. P (ksSchedulerAction s)" + (simp: unless_def) +crunch nosch[wp]: tcbSchedAppend "\s. P (ksSchedulerAction s)" + (simp: unless_def) + +lemma rescheduleRequired_sa_cnt[wp]: + "\\s. True \ rescheduleRequired \\_ s. ksSchedulerAction s = ChooseNewThread \" + unfolding rescheduleRequired_def setSchedulerAction_def + by wpsimp + +lemma possibleSwitchTo_ct_not_inQ: + "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + possibleSwitchTo t \\_. ct_not_inQ\" + apply (simp add: possibleSwitchTo_def curDomain_def) + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + threadGet_wp + | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ + done + +lemma threadSet_tcbState_update_ct_not_inQ[wp]: + "\ct_not_inQ\ threadSet (tcbState_update f) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (simp add: threadSet_def) + apply (wp) + apply (wps setObject_ct_inv) + apply (rule setObject_tcb_strongest) + prefer 2 + apply assumption + apply (clarsimp) + apply (rule hoare_conjI) + apply (rule hoare_weaken_pre) + apply (wps, wp hoare_weak_lift_imp) + apply (wp OMG_getObject_tcb)+ + apply (clarsimp simp: comp_def) + apply (wp hoare_drop_imp) + done + +lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: + "\ct_not_inQ\ threadSet (tcbBoundNotification_update f) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (simp add: threadSet_def) + apply (wp) + apply (wps setObject_ct_inv) + apply (rule setObject_tcb_strongest) + prefer 2 + apply assumption + apply (clarsimp) + apply (rule hoare_conjI) + apply (rule hoare_weaken_pre) + apply wps + apply (wp hoare_weak_lift_imp) + apply (wp OMG_getObject_tcb) + apply (clarsimp simp: comp_def) + apply (wp hoare_drop_imp) + done + +lemma setThreadState_ct_not_inQ: + "\ct_not_inQ\ setThreadState st t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + including no_pre + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_ct_not_inQ) + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) + apply (wp) + done + +lemma setBoundNotification_ct_not_inQ: + "\ct_not_inQ\ setBoundNotification ntfn t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + by (simp add: setBoundNotification_def, wp) + +crunch ct_not_inQ[wp]: setQueue "ct_not_inQ" + +lemma tcbSchedDequeue_ct_not_inQ[wp]: + "\ct_not_inQ\ tcbSchedDequeue t \\_. ct_not_inQ\" + proof - + have TSNIQ: "\F t. + \ct_not_inQ and (\_. \tcb. \tcbQueued (F tcb))\ + threadSet F t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (wp hoare_convert_imp [OF threadSet_nosch]) + apply (simp add: threadSet_def) + apply (wp) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest getObject_tcb_wp)+ + apply (case_tac "t = ksCurThread s") + apply (clarsimp simp: obj_at'_def)+ + done + show ?thesis + apply (simp add: tcbSchedDequeue_def) + apply (wp TSNIQ | simp cong: if_cong)+ + done + qed + +crunch ct_idle_or_in_cur_domain'[wp]: setQueue ct_idle_or_in_cur_domain' + (simp: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +crunch ksDomSchedule[wp]: setQueue "\s. P (ksDomSchedule s)" + +crunch ksCurDomain[wp]: addToBitmap "\s. P (ksCurDomain s)" + (wp: crunch_wps ) +crunch ksDomSchedule[wp]: addToBitmap "\s. P (ksDomSchedule s)" + (wp: crunch_wps ) +crunch ksCurDomain[wp]: removeFromBitmap "\s. P (ksCurDomain s)" + (wp: crunch_wps ) +crunch ksDomSchedule[wp]: removeFromBitmap "\s. P (ksDomSchedule s)" + (wp: crunch_wps ) + +lemma addToBitmap_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ addToBitmap d p \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ + done + +lemma removeFromBitmap_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ removeFromBitmap d p \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ + done + +crunches tcbQueuePrepend + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' + +lemma tcbSchedEnqueue_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ tcbSchedEnqueue tptr \\_. ct_idle_or_in_cur_domain'\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp threadSet_ct_idle_or_in_cur_domain' | simp)+ + done + +lemma setSchedulerAction_spec: + "\\\setSchedulerAction ChooseNewThread + \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add:setSchedulerAction_def) + apply wp + apply (simp add:ct_idle_or_in_cur_domain'_def) + done + +lemma rescheduleRequired_ct_idle_or_in_cur_domain'[wp]: + "\\\ rescheduleRequired \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add: rescheduleRequired_def) + apply (wp setSchedulerAction_spec) + done + +lemma rescheduleRequired_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ rescheduleRequired \\_ s. P (ksCurDomain s) \" + apply (simp add: rescheduleRequired_def) + apply wpsimp + done + +lemma rescheduleRequired_ksDomSchedule[wp]: + "\ \s. P (ksDomSchedule s) \ rescheduleRequired \\_ s. P (ksDomSchedule s) \" + by (simp add: rescheduleRequired_def) wpsimp + +lemma setThreadState_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ setThreadState st tptr \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add: setThreadState_def) + apply (wp threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps | simp)+ + done + +lemma setThreadState_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ setThreadState st tptr \\_ s. P (ksCurDomain s) \" + apply (simp add: setThreadState_def) + apply wpsimp + done + +lemma setThreadState_ksDomSchedule[wp]: + "\ \s. P (ksDomSchedule s) \ setThreadState st tptr \\_ s. P (ksDomSchedule s) \" + apply (simp add: setThreadState_def) + apply wpsimp + done + +lemma setBoundNotification_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ setBoundNotification t a \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps | simp)+ + done + +lemma setBoundNotification_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ setBoundNotification st tptr \\_ s. P (ksCurDomain s) \" + apply (simp add: setBoundNotification_def) + apply wpsimp + done + +lemma setBoundNotification_ksDomSchedule[wp]: + "\ \s. P (ksDomSchedule s) \ setBoundNotification st tptr \\_ s. P (ksDomSchedule s) \" + apply (simp add: setBoundNotification_def) + apply wpsimp + done + +crunches rescheduleRequired, setBoundNotification, setThreadState + for ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + +lemma sts_utr[wp]: + "\untyped_ranges_zero'\ setThreadState st t \\_. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (wp untyped_ranges_zero_lift) + done + +lemma removeFromBitmap_bitmapQ: + "\\\ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" + unfolding bitmapQ_defs bitmap_fun_defs + by (wpsimp simp: bitmap_fun_defs) + +lemma removeFromBitmap_valid_bitmapQ[wp]: + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans + and (\s. tcbQueueEmpty (ksReadyQueues s (d,p)))\ + removeFromBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: removeFromBitmap_valid_bitmapQ_except removeFromBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done + +crunches tcbSchedDequeue + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + (wp: crunch_wps simp: crunch_simps) + +lemma setQueue_nonempty_valid_bitmapQ': + "\\s. valid_bitmapQ s \ \ tcbQueueEmpty (ksReadyQueues s (d, p))\ + setQueue d p queue + \\_ s. \ tcbQueueEmpty queue \ valid_bitmapQ s\" + apply (wpsimp simp: setQueue_def) + apply (fastforce simp: valid_bitmapQ_def bitmapQ_def) + done + +lemma threadSet_valid_bitmapQ_except[wp]: + "threadSet f tcbPtr \valid_bitmapQ_except d p\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (clarsimp simp: valid_bitmapQ_except_def bitmapQ_def) + done + +lemma threadSet_bitmapQ: + "threadSet F t \bitmapQ domain priority\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + by (clarsimp simp: bitmapQ_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend + for valid_bitmapQ_except[wp]: "valid_bitmapQ_except d p" + and valid_bitmapQ[wp]: valid_bitmapQ + and bitmapQ[wp]: "bitmapQ tdom prio" + (wp: crunch_wps) + +lemma tcbQueued_imp_queue_nonempty: + "\list_queue_relation ts (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)) nexts prevs; + \t. t \ set ts \ (inQ (tcbDomain tcb) (tcbPriority tcb) |< tcbs_of' s) t; + ko_at' tcb tcbPtr s; tcbQueued tcb\ + \ \ tcbQueueEmpty (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" + apply (clarsimp simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce dest: heap_path_head simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + done + +lemma tcbSchedDequeue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedDequeue tcbPtr \\_. valid_bitmapQ\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + apply (wpsimp wp: setQueue_nonempty_valid_bitmapQ' hoare_vcg_conj_lift + hoare_vcg_if_lift2 hoare_vcg_const_imp_lift threadGet_wp + | wp (once) hoare_drop_imps)+ + by (fastforce dest!: tcbQueued_imp_queue_nonempty + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + +lemma tcbSchedDequeue_valid_bitmaps[wp]: + "tcbSchedDequeue tcbPtr \valid_bitmaps\" + by (wpsimp simp: valid_bitmaps_def) + +lemma setQueue_valid_bitmapQ': (* enqueue only *) + "\valid_bitmapQ_except d p and bitmapQ d p and K (\ tcbQueueEmpty q)\ + setQueue d p q + \\_. valid_bitmapQ\" + unfolding setQueue_def bitmapQ_defs + by (wpsimp simp: bitmapQ_def) + +lemma tcbSchedEnqueue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedEnqueue tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedEnqueue_def + apply (wpsimp simp: tcbQueuePrepend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp) + apply (fastforce simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def split: if_splits) + done + +crunches tcbSchedEnqueue, tcbSchedAppend + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + +lemma tcbSchedEnqueue_valid_bitmaps[wp]: + "tcbSchedEnqueue tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) + done + +crunches rescheduleRequired, threadSet, setThreadState + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +lemma tcbSchedEnqueue_valid_sched_pointers[wp]: + "tcbSchedEnqueue tcbPtr \valid_sched_pointers\" + apply (clarsimp simp: tcbSchedEnqueue_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueuePrepend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: valid_sched_pointers_def list_queue_relation_def) + apply (case_tac "ts = []", fastforce simp: tcbQueueEmpty_def) + by (intro conjI impI; + force dest!: hd_in_set heap_path_head + simp: inQ_def opt_pred_def opt_map_def obj_at'_def split: if_splits) + +lemma tcbSchedAppend_valid_sched_pointers[wp]: + "tcbSchedAppend tcbPtr \valid_sched_pointers\" + apply (clarsimp simp: tcbSchedAppend_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueueAppend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + by (intro conjI impI; + clarsimp dest: last_in_set + simp: valid_sched_pointers_def opt_map_def list_queue_relation_def tcbQueueEmpty_def + queue_end_valid_def inQ_def opt_pred_def obj_at'_def + split: if_splits option.splits; + fastforce) + +lemma tcbSchedDequeue_valid_sched_pointers[wp]: + "\valid_sched_pointers and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. valid_sched_pointers\" + supply if_split[split del] fun_upd_apply[simp del] + apply (clarsimp simp: tcbSchedDequeue_def getQueue_def setQueue_def) + apply (wpsimp wp: threadSet_wp getTCB_wp threadGet_wp simp: tcbQueueRemove_def) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp split: if_splits) + apply (frule (1) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI) + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (clarsimp simp: valid_sched_pointers_def) + apply (case_tac "ptr = tcbPtr") + apply (force dest!: heap_ls_last_None + simp: prev_queue_head_def queue_end_valid_def inQ_def opt_map_def obj_at'_def) + apply (simp add: fun_upd_def opt_pred_def) + \ \tcbPtr is the head of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def fun_upd_apply prev_queue_head_def + inQ_def opt_pred_def opt_map_def obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is the end of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def queue_end_valid_def inQ_def opt_pred_def + opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI impI allI) + by (clarsimp simp: valid_sched_pointers_def inQ_def opt_pred_def opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits; + auto) + +lemma tcbQueueRemove_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts)\ + tcbQueueRemove q tcbPtr + \\_. sym_heap_sched_pointers\" + supply heap_path_append[simp del] + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + apply (rename_tac tcb ts) + + \ \tcbPtr is the head of q, which is not a singleton\ + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: list_queue_relation_def Let_def) + apply (prop_tac "tcbSchedNext tcb \ Some tcbPtr") + apply (fastforce dest: heap_ls_no_loops[where p=tcbPtr] simp: opt_map_def obj_at'_def) + apply (fastforce intro: sym_heap_remove_only' + simp: prev_queue_head_def opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is the end of q, which is not a singleton\ + apply (intro impI) + apply (rule conjI) + apply clarsimp + apply (prop_tac "tcbSchedPrev tcb \ Some tcbPtr") + apply (fastforce dest!: heap_ls_prev_no_loops[where p=tcbPtr] + simp: list_queue_relation_def opt_map_def obj_at'_def) + apply (subst fun_upd_swap, fastforce) + apply (fastforce intro: sym_heap_remove_only simp: opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is in the middle of q\ + apply (intro conjI impI allI) + apply (frule (2) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []") + apply (fastforce simp: list_queue_relation_def queue_end_valid_def) + apply (clarsimp simp: list_queue_relation_def) + apply (frule (3) ptr_in_middle_prev_next) + apply (frule heap_ls_distinct) + apply (rename_tac afterPtr beforePtr xs ys) + apply (frule_tac before=beforePtr and middle=tcbPtr and after=afterPtr + in sym_heap_remove_middle_from_chain) + apply (fastforce dest: last_in_set simp: opt_map_def obj_at'_def) + apply (fastforce dest: hd_in_set simp: opt_map_def obj_at'_def) + apply (rule_tac hp="tcbSchedNexts_of s" in sym_heapD2) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def split: if_splits) + done + +lemma tcbQueuePrepend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueuePrepend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + apply (clarsimp simp: tcbQueuePrepend_def) + apply (wpsimp wp: threadSet_wp) + apply (prop_tac "tcbPtr \ the (tcbQueueHead q)") + apply (case_tac "ts = []"; + fastforce dest: heap_path_head simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac a=tcbPtr and b="the (tcbQueueHead q)" in sym_heap_connect) + apply assumption + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def tcbQueueEmpty_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def tcbQueueEmpty_def) + done + +lemma tcbQueueInsert_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueInsert tcbPtr afterPtr + \\_. sym_heap_sched_pointers\" + apply (clarsimp simp: tcbQueueInsert_def) + \ \forwards step in order to name beforePtr below\ + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (rule bind_wp[OF _ assert_sp]) + apply (rule hoare_ex_pre_conj[simplified conj_commute], rename_tac beforePtr) + apply (rule bind_wp[OF _ assert_sp]) + apply (wpsimp wp: threadSet_wp) + apply normalise_obj_at' + apply (prop_tac "tcbPtr \ afterPtr") + apply (clarsimp simp: list_queue_relation_def opt_map_red obj_at'_def) + apply (prop_tac "tcbPtr \ beforePtr") + apply (fastforce dest: sym_heap_None simp: opt_map_def obj_at'_def split: option.splits) + apply (prop_tac "tcbSchedNexts_of s beforePtr = Some afterPtr") + apply (fastforce intro: sym_heapD2 simp: opt_map_def obj_at'_def) + apply (fastforce dest: sym_heap_insert_into_middle_of_chain + simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def) + done + +lemma tcbQueueAppend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueAppend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + apply (clarsimp simp: tcbQueueAppend_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def obj_at'_def + split: if_splits) + apply fastforce + apply (drule_tac a="last ts" and b=tcbPtr in sym_heap_connect) + apply (fastforce dest: heap_ls_last_None) + apply assumption + apply (simp add: opt_map_red tcbQueueEmpty_def) + apply (subst fun_upd_swap, simp) + apply (fastforce simp: opt_map_red opt_map_upd_triv) + done + +lemma tcbQueued_update_sym_heap_sched_pointers[wp]: + "threadSet (tcbQueued_update f) tcbPtr \sym_heap_sched_pointers\" + by (rule sym_heap_sched_pointers_lift; + wpsimp wp: threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of) + +lemma tcbSchedEnqueue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedEnqueue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedAppend_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedAppend tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedDequeue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedDequeue_def + apply (wpsimp wp: tcbQueueRemove_sym_heap_sched_pointers hoare_vcg_if_lift2 threadGet_wp + simp: bitmap_fun_defs) + apply (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def inQ_def opt_pred_def + opt_map_def obj_at'_def) + done + +crunches setThreadState + for valid_sched_pointers[wp]: valid_sched_pointers + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_valid_sched_pointers threadSet_sched_pointers) + +lemma sts_invs_minor': + "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st + \ (st \ Inactive \ \ idle' st \ + st' \ Inactive \ \ idle' st')) t + and (\s. t = ksIdleThread s \ idle' st) + and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) + and sch_act_simple + and invs'\ + setThreadState st t + \\rv. invs'\" + including no_pre + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift irqs_masked_lift + setThreadState_ct_not_inQ + | simp add: cteCaps_of_def o_def)+ + apply (clarsimp simp: sch_act_simple_def) + apply (intro conjI) + apply clarsimp + defer + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (clarsimp elim!: st_tcb_ex_cap'') + apply fastforce + apply fastforce + apply (frule tcb_in_valid_state', clarsimp+) + by (cases st; simp add: valid_tcb_state'_def split: Structures_H.thread_state.split_asm) + +lemma sts_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setThreadState st t \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma sts_pred_tcb_neq': + "\pred_tcb_at' proj P t and K (t \ t')\ + setThreadState st t' + \\_. pred_tcb_at' proj P t\" + apply (simp add: setThreadState_def) + apply (wp threadSet_pred_tcb_at_state | simp)+ + done + +lemma sbn_pred_tcb_neq': + "\pred_tcb_at' proj P t and K (t \ t')\ + setBoundNotification ntfn t' + \\_. pred_tcb_at' proj P t\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_pred_tcb_at_state | simp)+ + done + +lemmas isTS_defs = + isRunning_def isBlockedOnSend_def isBlockedOnReceive_def + isBlockedOnNotification_def isBlockedOnReply_def + isRestart_def isInactive_def + isIdleThreadState_def + +lemma sts_st_tcb_at'_cases: + "\\s. ((t = t') \ (P ts \ tcb_at' t' s)) \ ((t \ t') \ st_tcb_at' P t' s)\ + setThreadState ts t + \\rv. st_tcb_at' P t'\" + apply (wp sts_st_tcb') + apply fastforce + done + +lemma threadSet_ct_running': + "(\tcb. tcbState (f tcb) = tcbState tcb) \ + \ct_running'\ threadSet f t \\rv. ct_running'\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply (wp threadSet_pred_tcb_no_state; simp) + apply wp + done + +lemma tcbQueuePrepend_tcbPriority_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueuePrepend_tcbDomain_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbSchedDequeue_tcbPriority[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedDequeue_tcbDomain[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedEnqueue_tcbPriority_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +lemma tcbSchedEnqueue_tcbDomain_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +crunches rescheduleRequired + for tcbPriority_obj_at'[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t'" + and tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + +lemma setThreadState_tcbPriority_obj_at'[wp]: + "setThreadState ts tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: obj_at'_def objBits_simps ps_clear_def) + done + +lemma setThreadState_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply wps + apply (simp add: setThreadState_def) + apply (wpsimp wp: threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps)+ + done + +lemma asUser_global_refs': "\valid_global_refs'\ asUser t f \\rv. valid_global_refs'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_global_refs select_f_inv) + done + +lemma sch_act_sane_lift: + assumes "\P. \\s. P (ksSchedulerAction s)\ f \\rv s. P (ksSchedulerAction s)\" + assumes "\P. \\s. P (ksCurThread s)\ f \\rv s. P (ksCurThread s)\" + shows "\sch_act_sane\ f \\rv. sch_act_sane\" + apply (simp add: sch_act_sane_def) + apply (rule hoare_vcg_all_lift) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply (wp assms)+ + done + +lemma storeWord_invs'[wp]: + "\pointerInUserData p and invs'\ doMachineOp (storeWord p w) \\rv. invs'\" +proof - + have aligned_offset_ignore: + "\l. l<8 \ p && mask word_size_bits = 0 \ p + l && ~~ mask 12 = p && ~~ mask 12" + proof - + fix l + assume al: "p && mask word_size_bits = 0" + assume "(l::machine_word) < 8" hence less: "l<2^word_size_bits" by (simp add: word_size_bits_def) + have le: "(word_size_bits::nat) \ 12" by (simp add: word_size_bits_def) + show "?thesis l" + by (rule is_aligned_add_helper[simplified is_aligned_mask, + THEN conjunct2, THEN mask_out_first_mask_some, OF al less le]) + qed + + show ?thesis + apply (wp dmo_invs' no_irq_storeWord no_irq) + apply (clarsimp simp: storeWord_def invs'_def valid_state'_def) + apply (clarsimp simp: valid_machine_state'_def pointerInUserData_def + assert_def simpler_modify_def fail_def bind_def return_def + aligned_offset_ignore bit_simps upto0_7_def + split: if_split_asm) + done +qed + +lemma storeWord_invs_no_cicd'[wp]: + "\pointerInUserData p and invs_no_cicd'\ doMachineOp (storeWord p w) \\rv. invs_no_cicd'\" +proof - + have aligned_offset_ignore: + "\l. l<8 \ p && mask 3 = 0 \ p + l && ~~ mask 12 = p && ~~ mask 12" + proof - + fix l + assume al: "p && mask 3 = 0" + assume "(l::machine_word) < 8" hence less: "l<2^3" by simp + have le: "(3::nat) \ 12" by simp + show "?thesis l" + by (rule is_aligned_add_helper[simplified is_aligned_mask, + THEN conjunct2, THEN mask_out_first_mask_some, OF al less le]) + qed + + show ?thesis + apply (wp dmo_invs_no_cicd' no_irq_storeWord no_irq) + apply (clarsimp simp: storeWord_def invs'_def valid_state'_def) + apply (clarsimp simp: valid_machine_state'_def pointerInUserData_def + assert_def simpler_modify_def fail_def bind_def return_def + pageBits_def aligned_offset_ignore upto0_7_def + split: if_split_asm) + done +qed + +lemma storeWordUser_invs[wp]: + "\invs'\ storeWordUser p w \\rv. invs'\" + by (simp add: storeWordUser_def | wp)+ + +lemma hoare_valid_ipc_buffer_ptr_typ_at': + "(\q. \typ_at' UserDataT q\ a \\_. typ_at' UserDataT q\) + \ \valid_ipc_buffer_ptr' p\ a \\_. valid_ipc_buffer_ptr' p\" + unfolding valid_ipc_buffer_ptr'_def2 including no_pre + apply wp + apply assumption + done + +lemma gts_wp': + "\\s. \st. st_tcb_at' ((=) st) t s \ P st s\ getThreadState t \P\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule gts_sp') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +lemma gbn_wp': + "\\s. \ntfn. bound_tcb_at' ((=) ntfn) t s \ P ntfn s\ getBoundNotification t \P\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule gbn_sp') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +lemmas threadSet_irq_handlers' = valid_irq_handlers_lift'' [OF threadSet_ctes_ofT] + +lemma get_cap_corres_all_rights_P: + "cte_ptr' = cte_map cte_ptr \ + corres (\x y. cap_relation x y \ P x) + (cte_wp_at P cte_ptr) (pspace_aligned' and pspace_distinct') + (get_cap cte_ptr) (getSlotCap cte_ptr')" + apply (simp add: getSlotCap_def mask_cap_def) + apply (subst bind_return [symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres_P [where P=P]]) + apply (insert cap_relation_masks, simp) + apply (wp getCTE_wp')+ + apply simp + apply fastforce + done + +lemma asUser_irq_handlers': + "\valid_irq_handlers'\ asUser t f \\rv. valid_irq_handlers'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_irq_handlers' [OF all_tcbI, OF ball_tcb_cte_casesI] select_f_inv) + done + +(* the brave can try to move this up to near setObject_update_TCB_corres' *) + +definition non_exst_same :: "Structures_H.tcb \ Structures_H.tcb \ bool" +where + "non_exst_same tcb tcb' \ \d p ts. tcb' = tcb\tcbDomain := d, tcbPriority := p, tcbTimeSlice := ts\" + +fun non_exst_same' :: "Structures_H.kernel_object \ Structures_H.kernel_object \ bool" +where + "non_exst_same' (KOTCB tcb) (KOTCB tcb') = non_exst_same tcb tcb'" | + "non_exst_same' _ _ = True" + +lemma non_exst_same_prio_upd[simp]: + "non_exst_same tcb (tcbPriority_update f tcb)" + by (cases tcb, simp add: non_exst_same_def) + +lemma non_exst_same_timeSlice_upd[simp]: + "non_exst_same tcb (tcbTimeSlice_update f tcb)" + by (cases tcb, simp add: non_exst_same_def) + +lemma non_exst_same_domain_upd[simp]: + "non_exst_same tcb (tcbDomain_update f tcb)" + by (cases tcb, simp add: non_exst_same_def) + +lemma set_eobject_corres': + assumes e: "etcb_relation etcb tcb'" + assumes z: "\s. obj_at' P ptr s + \ map_to_ctes ((ksPSpace s) (ptr \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + shows + "corres dc + (tcb_at ptr and is_etcb_at ptr) + (obj_at' (\ko. non_exst_same ko tcb') ptr and obj_at' P ptr + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcb' \ tcbPriority tcb \ tcbPriority tcb') + \ \ tcbQueued tcb) ptr) + (set_eobject ptr etcb) (setObject ptr tcb')" + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp: obj_at'_def) + apply (unfold set_eobject_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def + updateObject_default_def in_magnitude_check objBits_simps') + apply (clarsimp simp add: state_relation_def z) + apply (clarsimp simp add: obj_at_def is_etcb_at_def) + apply (simp only: pspace_relation_def dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply (rule conjI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: is_other_obj_relation_type) + apply (drule(1) bspec) + apply (clarsimp simp: non_exst_same_def) + apply (case_tac bb; simp) + apply (clarsimp simp: obj_at'_def other_obj_relation_def tcb_relation_cut_def + cte_relation_def tcb_relation_def + split: if_split_asm)+ + apply (clarsimp simp: aobj_relation_cuts_def split: AARCH64_A.arch_kernel_obj.splits) + apply (rename_tac arch_kernel_obj obj d p ts) + apply (case_tac arch_kernel_obj; simp) + apply (clarsimp simp: pte_relation_def is_tcb_def + split: if_split_asm)+ + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: obj_at'_def) + apply (insert e) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type + split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) + apply (frule in_ready_q_tcbQueued_eq[where t=ptr]) + apply (rename_tac s' conctcb' abstcb exttcb) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def obj_at'_def non_exst_same_def split: option.splits) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def obj_at'_def non_exst_same_def split: option.splits) + apply (clarsimp simp: ready_queue_relation_def opt_map_def opt_pred_def obj_at'_def inQ_def + non_exst_same_def + split: option.splits) + apply metis + done + +lemma set_eobject_corres: + assumes tcbs: "non_exst_same tcb' tcbu'" + assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes r: "r () ()" + shows + "corres r + (tcb_at add and (\s. ekheap s add = Some etcb)) + (ko_at' tcb' add + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcbu' \ tcbPriority tcb \ tcbPriority tcbu') + \ \ tcbQueued tcb) add) + (set_eobject add etcbu) (setObject add tcbu')" + apply (rule_tac F="non_exst_same tcb' tcbu' \ etcb_relation etcbu tcbu'" in corres_req) + apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) + apply (frule(1) pspace_relation_absD) + apply (clarsimp simp: other_obj_relation_def ekheap_relation_def e tcbs) + apply (drule bspec, erule domI) + apply (clarsimp simp: e) + apply (erule conjE) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (rule set_eobject_corres'[where P="(=) tcb'"]) + apply simp + defer + apply (simp add: r) + apply (fastforce simp: is_etcb_at_def elim!: obj_at_weakenE) + apply (subst(asm) eq_commute) + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (subst map_to_ctes_upd_tcb, assumption+) + apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) + apply (subst if_not_P) + apply (fastforce dest: bspec [OF tables', OF ranI]) + apply simp + done + +lemma ethread_set_corresT: + assumes x: "\tcb'. non_exst_same tcb' (f' tcb')" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation (f etcb) (f' tcb')" + shows + "corres dc + (tcb_at t and valid_etcbs) + (tcb_at' t + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain (f' tcb) + \ tcbPriority tcb \ tcbPriority (f' tcb)) + \ \ tcbQueued tcb) t) + (ethread_set f t) (threadSet f' t)" + apply (simp add: ethread_set_def threadSet_def bind_assoc) + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_get_etcb set_eobject_corres]) + apply (rule x) + apply (erule e) + apply (simp add: z)+ + apply (wp getObject_tcb_wp)+ + apply clarsimp + apply (simp add: valid_etcbs_def tcb_at_st_tcb_at[symmetric]) + apply (force simp: tcb_at_def get_etcb_def obj_at_def) + apply (clarsimp simp: obj_at'_def) + done + +lemmas ethread_set_corres = + ethread_set_corresT [OF _ all_tcbI, OF _ ball_tcb_cte_casesI] + +lemma archTcbUpdate_aux2: "(\tcb. tcb\ tcbArch := f (tcbArch tcb)\) = tcbArch_update f" + by (rule ext, case_tac tcb, simp) + +end +end diff --git a/proof/refine/AARCH64/Tcb_R.thy b/proof/refine/AARCH64/Tcb_R.thy new file mode 100644 index 0000000000..cf1c333592 --- /dev/null +++ b/proof/refine/AARCH64/Tcb_R.thy @@ -0,0 +1,2646 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Tcb_R +imports CNodeInv_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma asUser_setNextPC_corres: + "corres dc (tcb_at t and invs) (tcb_at' t and invs') + (as_user t (setNextPC v)) (asUser t (setNextPC v))" + apply (rule asUser_corres) + apply (rule corres_Id, simp, simp) + apply (rule no_fail_setNextPC) + done + +lemma activateIdleThread_corres: + "corres dc (invs and st_tcb_at idle t) + (invs' and st_tcb_at' idle' t) + (arch_activate_idle_thread t) (activateIdleThread t)" + by (simp add: arch_activate_idle_thread_def activateIdleThread_def) + +lemma activateThread_corres: + "corres dc (invs and ct_in_state activatable) (invs' and ct_in_state' activatable') + activate_thread activateThread" + supply subst_all [simp del] + apply (simp add: activate_thread_def activateThread_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule_tac R="\ts s. valid_tcb_state ts s \ (idle ts \ runnable ts) + \ invs s \ st_tcb_at ((=) ts) thread s" + and R'="\ts s. valid_tcb_state' ts s \ (idle' ts \ runnable' ts) + \ invs' s \ st_tcb_at' (\ts'. ts' = ts) thread s" + in corres_split[OF getThreadState_corres]) + apply (rule_tac F="idle rv \ runnable rv" in corres_req, simp) + apply (rule_tac F="idle' rv' \ runnable' rv'" in corres_req, simp) + apply (case_tac rv, simp_all add: + isRunning_def isRestart_def, + safe, simp_all)[1] + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule corres_split_nor[OF asUser_setNextPC_corres]) + apply (rule setThreadState_corres) + apply (simp | wp weak_sch_act_wf_lift_linear)+ + apply (clarsimp simp: st_tcb_at_tcb_at invs_distinct) + apply fastforce + apply (rule corres_guard_imp) + apply (rule activateIdleThread_corres) + apply (clarsimp elim!: st_tcb_weakenE) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply (wp gts_st_tcb gts_st_tcb' gts_st_tcb_at)+ + apply (clarsimp simp: ct_in_state_def tcb_at_invs invs_distinct invs_psp_aligned + elim!: st_tcb_weakenE) + apply (clarsimp simp: tcb_at_invs' ct_in_state'_def + elim!: pred_tcb'_weakenE) + done + + +lemma bindNotification_corres: + "corres dc + (invs and tcb_at t and ntfn_at a) (invs' and tcb_at' t and ntfn_at' a) + (bind_notification t a) (bindNotification t a)" + apply (simp add: bind_notification_def bindNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres]) + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def split: Structures_A.ntfn.splits) + apply (rule setBoundNotification_corres) + apply (wp)+ + apply auto + done + + +abbreviation + "ct_idle' \ ct_in_state' idle'" + +lemma gts_st_tcb': + "\tcb_at' t\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t\" + apply (rule hoare_weaken_pre) + apply (rule hoare_post_imp[where Q="\rv s. \rv'. rv = rv' \ st_tcb_at' (\st. st = rv') t s"]) + apply simp + apply (wp hoare_vcg_ex_lift) + apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def) + done + +lemma activateIdle_invs: + "\invs' and ct_idle'\ + activateIdleThread thread + \\rv. invs' and ct_idle'\" + by (simp add: activateIdleThread_def) + +lemma activate_invs': + "\invs' and sch_act_simple and ct_in_state' activatable'\ + activateThread + \\rv. invs' and (ct_running' or ct_idle')\" + apply (simp add: activateThread_def) + apply (rule bind_wp) + apply (rule_tac Q'="\state s. invs' s \ sch_act_simple s + \ st_tcb_at' (\st. st = state) thread s + \ thread = ksCurThread s + \ (runnable' state \ idle' state)" in bind_wp) + apply (case_tac rv; simp add: isTS_defs split del: if_split cong: if_cong) + apply (wp) + apply (clarsimp simp: ct_in_state'_def) + apply (rule_tac Q="\rv. invs' and ct_idle'" in hoare_post_imp, simp) + apply (wp activateIdle_invs) + apply (clarsimp simp: ct_in_state'_def) + apply (rule_tac Q="\rv. invs' and ct_running' and sch_act_simple" + in hoare_post_imp, simp) + apply (rule hoare_weaken_pre) + apply (wp ct_in_state'_set asUser_ct sts_invs_minor' + | wp (once) sch_act_simple_lift)+ + apply (rule_tac Q="\_. st_tcb_at' runnable' thread + and sch_act_simple and invs' + and (\s. thread = ksCurThread s)" + in hoare_post_imp, clarsimp) + apply (wp sch_act_simple_lift)+ + apply (clarsimp simp: valid_idle'_def invs'_def valid_state'_def + pred_tcb_at'_def obj_at'_def idle_tcb'_def + elim!: pred_tcb'_weakenE) + apply (wp gts_st_tcb')+ + apply (clarsimp simp: tcb_at_invs' ct_in_state'_def + pred_disj_def) + done + +declare not_psubset_eq[dest!] + +lemma setThreadState_runnable_simp: + "runnable' ts \ setThreadState ts t = + threadSet (tcbState_update (\x. ts)) t" + apply (simp add: setThreadState_def isRunnable_def isStopped_def liftM_def) + apply (subst bind_return[symmetric], rule bind_cong[OF refl]) + apply (drule use_valid[OF _ threadSet_pred_tcb_at_state[where proj="itcbState" and p=t and P="(=) ts"]]) + apply simp + apply (subst bind_known_operation_eq) + apply wp+ + apply clarsimp + apply (subst eq_commute, erule conjI[OF _ refl]) + apply (rule empty_fail_getThreadState) + apply (simp add: getCurThread_def getSchedulerAction_def exec_gets) + apply (auto simp: when_def split: Structures_H.thread_state.split) + done + +lemma activate_sch_act: + "\ct_in_state' activatable' and (\s. P (ksSchedulerAction s))\ + activateThread \\rv s. P (ksSchedulerAction s)\" + apply (simp add: activateThread_def getCurThread_def + cong: if_cong Structures_H.thread_state.case_cong) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp[where Q'="\st s. (runnable' or idle') st \ P (ksSchedulerAction s)"]) + apply (rule hoare_pre) + apply (wp | wpc | simp add: setThreadState_runnable_simp)+ + apply (clarsimp simp: ct_in_state'_def cur_tcb'_def pred_tcb_at' + elim!: pred_tcb'_weakenE) + done + +lemma runnable_tsr: + "thread_state_relation ts ts' \ runnable' ts' = runnable ts" + by (case_tac ts, auto) + +lemma idle_tsr: + "thread_state_relation ts ts' \ idle' ts' = idle ts" + by (case_tac ts, auto) + +crunches cancelIPC, setupReplyMaster + for cur [wp]: cur_tcb' + (wp: crunch_wps simp: crunch_simps o_def) + +lemma setCTE_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setCTE c cte + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: weak_sch_act_wf_def) + apply (wp hoare_vcg_all_lift hoare_convert_imp setCTE_pred_tcb_at' setCTE_tcb_in_cur_domain') + done + +lemma setupReplyMaster_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setupReplyMaster thread + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setupReplyMaster_def) + apply (wp) + apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" + in hoare_post_imp, clarsimp) + apply (wp)+ + apply assumption + done + +crunches setup_reply_master, Tcb_A.restart, arch_post_modify_registers + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (wp: crunch_wps simp: crunch_simps) + +lemma restart_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and ex_nonz_cap_to' t) + (Tcb_A.restart t) (ThreadDecls_H.restart t)" + apply (simp add: Tcb_A.restart_def Thread_H.restart_def) + apply (simp add: isStopped_def2 liftM_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (clarsimp simp add: runnable_tsr idle_tsr when_def) + apply (rule corres_split_nor[OF cancel_ipc_corres]) + apply (rule corres_split_nor[OF setupReplyMaster_corres]) + apply (rule corres_split_nor[OF setThreadState_corres], simp) + apply (rule corres_split[OF tcbSchedEnqueue_corres possibleSwitchTo_corres]) + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | clarsimp simp: valid_tcb_state'_def | strengthen valid_objs'_valid_tcbs')+ + apply (rule_tac Q="\rv. valid_sched and cur_tcb and pspace_aligned and pspace_distinct" + in hoare_strengthen_post) + apply wp + apply (fastforce simp: valid_sched_def valid_sched_action_def) + apply (rule_tac Q="\rv. invs' and ex_nonz_cap_to' t" in hoare_strengthen_post) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def + valid_tcb_state'_def) + apply wp+ + apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at invs_psp_aligned invs_distinct) + apply clarsimp + done + +lemma restart_invs': + "\invs' and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ + ThreadDecls_H.restart t \\rv. invs'\" + apply (simp add: restart_def isStopped_def2) + apply (wp setThreadState_nonqueued_state_update + cancelIPC_simple setThreadState_st_tcb + | wp (once) sch_act_simple_lift)+ + apply (wp hoare_convert_imp) + apply (wp setThreadState_nonqueued_state_update + setThreadState_st_tcb) + apply (clarsimp) + apply (wp hoare_convert_imp)[1] + apply (clarsimp) + apply (wp)+ + apply (clarsimp simp: comp_def) + apply (rule hoare_strengthen_post, rule gts_sp') + prefer 2 + apply assumption + apply (clarsimp simp: pred_tcb_at' invs'_def valid_state'_def + ct_in_state'_def) + apply (fastforce simp: pred_tcb_at'_def obj_at'_def) + done + +lemma restart_tcb'[wp]: + "\tcb_at' t'\ ThreadDecls_H.restart t \\rv. tcb_at' t'\" + apply (simp add: restart_def isStopped_def2) + apply wpsimp + done + +lemma no_fail_setRegister: "no_fail \ (setRegister r v)" + by (simp add: setRegister_def) + +lemma suspend_cap_to'[wp]: + "\ex_nonz_cap_to' p\ suspend t \\rv. ex_nonz_cap_to' p\" + apply (simp add: suspend_def) + unfolding updateRestartPC_def + apply (wp threadSet_cap_to' | simp)+ + done + +declare det_getRegister[simp] +declare det_setRegister[simp] + +lemma + no_fail_getRegister[wp]: "no_fail \ (getRegister r)" + by (simp add: getRegister_def) + +lemma invokeTCB_ReadRegisters_corres: + "corres (dc \ (=)) + (einvs and tcb_at src and ex_nonz_cap_to src) + (invs' and sch_act_simple and tcb_at' src and ex_nonz_cap_to' src) + (invoke_tcb (tcb_invocation.ReadRegisters src susp n arch)) + (invokeTCB (tcbinvocation.ReadRegisters src susp n arch'))" + apply (simp add: invokeTCB_def performTransfer_def genericTake_def + frame_registers_def gp_registers_def + frameRegisters_def gpRegisters_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule corres_when[OF refl]) + apply (rule suspend_corres) + apply (rule corres_split[OF getCurThread_corres]) + apply (simp add: liftM_def[symmetric]) + apply (rule asUser_corres) + apply (rule corres_Id) + apply simp + apply simp + apply (rule no_fail_mapM) + apply (simp add: no_fail_getRegister) + apply wp+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def + dest!: idle_no_ex_cap) + apply (clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) + done + +lemma asUser_postModifyRegisters_corres: + "corres dc (tcb_at t) (tcb_at' t and tcb_at' ct) + (arch_post_modify_registers ct t) + (asUser t $ postModifyRegisters ct t)" + apply (rule corres_guard_imp) + apply (clarsimp simp: arch_post_modify_registers_def postModifyRegisters_def when_def) + apply safe + apply (subst submonad_asUser.return) + apply (rule corres_stateAssert_assume) + by simp+ + +crunches restart + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_sched_pointers threadSet_valid_sched_pointers) + +lemma invokeTCB_WriteRegisters_corres: + "corres (dc \ (=)) (einvs and tcb_at dest and ex_nonz_cap_to dest) + (invs' and sch_act_simple and tcb_at' dest and ex_nonz_cap_to' dest) + (invoke_tcb (tcb_invocation.WriteRegisters dest resume values arch)) + (invokeTCB (tcbinvocation.WriteRegisters dest resume values arch'))" + apply (simp add: invokeTCB_def performTransfer_def arch_get_sanitise_register_info_def + sanitiseRegister_def sanitise_register_def getSanitiseRegisterInfo_def + frameRegisters_def gpRegisters_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (fold archThreadGet_def[simplified]) + apply (rule corres_split[OF archThreadGet_VCPU_corres]) + apply (rule corres_split_nor) + apply (rule asUser_corres) + apply (simp add: zipWithM_mapM getRestartPC_def setNextPC_def) + apply (rule corres_Id) + apply (clarsimp simp: mask_def user_vtop_def + cong: if_cong register.case_cong) + apply simp + apply (rule no_fail_pre, wp no_fail_mapM) + apply (clarsimp, (wp no_fail_setRegister | simp)+) + apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]]) + apply (rule corres_split_nor[OF corres_when[OF refl restart_corres]]) + apply (rule corres_split_nor[OF corres_when[OF refl rescheduleRequired_corres]]) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply simp + apply (wp+)[2] + apply ((wp hoare_weak_lift_imp restart_invs' + | strengthen valid_sched_weak_strg einvs_valid_etcbs + invs_weak_sch_act_wf + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues valid_objs'_valid_tcbs' invs_valid_objs' + | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def + dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] + apply (rule_tac Q="\_. einvs and tcb_at dest and ex_nonz_cap_to dest" in hoare_post_imp) + apply (fastforce simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def + dest!: idle_no_ex_cap) + prefer 2 + apply (rule_tac Q="\_. invs' and tcb_at' dest and ex_nonz_cap_to' dest" in hoare_post_imp) + apply (fastforce simp: sch_act_wf_weak invs'_def valid_state'_def dest!: global'_no_ex_cap) + apply (wpsimp simp: archThreadGet_def)+ + apply fastforce + apply fastforce + done + +lemma tcbSchedDequeue_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + tcbSchedDequeue t + \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + by (wp hoare_convert_imp) + +lemma updateRestartPC_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + updateRestartPC t + \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + unfolding updateRestartPC_def + apply (wp hoare_convert_imp) + done + +lemma suspend_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + suspend t + \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + by (wpsimp simp: suspend_def) + +crunches restart, suspend + for cur_tcb'[wp]: cur_tcb' + (wp: crunch_wps threadSet_cur ignore: threadSet) + +lemma invokeTCB_CopyRegisters_corres: + "corres (dc \ (=)) + (einvs and simple_sched_action and tcb_at dest and tcb_at src and ex_nonz_cap_to src and + ex_nonz_cap_to dest) + (invs' and sch_act_simple and tcb_at' dest and tcb_at' src + and ex_nonz_cap_to' src and ex_nonz_cap_to' dest) + (invoke_tcb (tcb_invocation.CopyRegisters dest src susp resume frames ints arch)) + (invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch'))" +proof - + have Q: "\src src' des des' r r'. \ src = src'; des = des' \ \ + corres dc (tcb_at src and tcb_at des and invs) + (tcb_at' src' and tcb_at' des' and invs') + (do v \ as_user src (getRegister r); + as_user des (setRegister r' v) + od) + (do v \ asUser src' (getRegister r); + asUser des' (setRegister r' v) + od)" + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split_eqr) + apply (rule asUser_getRegister_corres) + apply (simp add: setRegister_def) + apply (rule asUser_corres) + apply (rule corres_modify') + apply simp + apply simp + apply (simp add: invs_distinct invs_psp_aligned | wp)+ + done + have R: "\src src' des des' xs ys. \ src = src'; des = des'; xs = ys \ \ + corres dc (tcb_at src and tcb_at des and invs) + (tcb_at' src' and tcb_at' des' and invs') + (mapM_x (\r. do v \ as_user src (getRegister r); + as_user des (setRegister r v) + od) xs) + (mapM_x (\r'. do v \ asUser src' (getRegister r'); + asUser des' (setRegister r' v) + od) ys)" + apply (rule corres_mapM_x [where S=Id]) + apply simp + apply (rule Q) + apply (clarsimp simp: set_zip_same | wp)+ + done + have U: "\t. corres dc (tcb_at t and invs) (tcb_at' t and invs') + (do pc \ as_user t getRestartPC; as_user t (setNextPC pc) od) + (do pc \ asUser t getRestartPC; asUser t (setNextPC pc) od)" + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule asUser_setNextPC_corres) + apply wp+ + apply (simp add: invs_distinct invs_psp_aligned)+ + done + show ?thesis + apply (simp add: invokeTCB_def performTransfer_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_when [OF refl suspend_corres]], simp) + apply (rule corres_split[OF corres_when [OF refl restart_corres]], simp) + apply (rule corres_split_nor) + apply (rule corres_when[OF refl]) + apply (rule corres_split_nor) + apply (rule R[OF refl refl]) + apply (simp add: frame_registers_def frameRegisters_def) + apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) + apply (rule Q[OF refl refl]) + apply (wp mapM_x_wp' | simp)+ + apply (rule corres_split_nor) + apply (rule corres_when[OF refl]) + apply (rule R[OF refl refl]) + apply (simp add: gpRegisters_def) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]]) + apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply simp + apply (solves \wp hoare_weak_lift_imp\)+ + apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_post_imp) + apply (fastforce simp: invs_def valid_state_def valid_pspace_def valid_sched_weak_strg valid_sched_def) + prefer 2 + apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_post_imp) + apply (fastforce simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) + apply ((wp mapM_x_wp' hoare_weak_lift_imp | (simp add: cur_tcb'_def[symmetric])+)+)[8] + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp | simp add: if_apply_def2)+ + apply (fastforce simp: invs_def valid_state_def valid_pspace_def + dest!: idle_no_ex_cap) + by (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) +qed + +lemma readreg_invs': + "\invs' and sch_act_simple and tcb_at' src and ex_nonz_cap_to' src\ + invokeTCB (tcbinvocation.ReadRegisters src susp n arch) + \\rv. invs'\" + by (simp add: invokeTCB_def performTransfer_def | wp + | clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap)+ + +crunches getSanitiseRegisterInfo + for invs'[wp]: invs' + and ex_nonz_cap_to'[wp]: "ex_nonz_cap_to' d" + and it'[wp]: "\s. P (ksIdleThread s)" + +lemma writereg_invs': + "\invs' and sch_act_simple and tcb_at' dest and ex_nonz_cap_to' dest\ + invokeTCB (tcbinvocation.WriteRegisters dest resume values arch) + \\rv. invs'\" + by (simp add: invokeTCB_def performTransfer_def | wp restart_invs' | rule conjI + | clarsimp + | clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap)+ + +lemma copyreg_invs'': + "\invs' and sch_act_simple and tcb_at' src and tcb_at' dest and ex_nonz_cap_to' src and ex_nonz_cap_to' dest\ + invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch) + \\rv. invs' and tcb_at' dest\" + apply (simp add: invokeTCB_def performTransfer_def if_apply_def2) + apply (wpsimp wp: mapM_x_wp' restart_invs' hoare_drop_imps + split_del: if_split + simp: if_apply_def2 invs_cur' cur_tcb'_def[symmetric] + cong: rev_conj_cong) + by (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) + +lemma copyreg_invs': + "\invs' and sch_act_simple and tcb_at' src and + tcb_at' dest and ex_nonz_cap_to' src and ex_nonz_cap_to' dest\ + invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch) + \\rv. invs'\" + by (rule hoare_strengthen_post, rule copyreg_invs'', simp) + +lemma isRunnable_corres: + "corres (\ts runn. runnable ts = runn) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_thread_state t) (isRunnable t)" + apply (simp add: isRunnable_def) + apply (subst bind_return[symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (case_tac rv, clarsimp+) + apply (wp hoare_TrueI)+ + apply auto + done + +lemma tcbSchedDequeue_not_queued: + "\\\ tcbSchedDequeue t + \\rv. obj_at' (Not \ tcbQueued) t\" + apply (simp add: tcbSchedDequeue_def) + apply (wp | simp)+ + apply (rule_tac Q="\rv. obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_post_imp) + apply (clarsimp simp: obj_at'_def) + apply (wp tg_sp' [where P=\, simplified] | simp)+ + done + +lemma threadSet_ct_in_state': + "(\tcb. tcbState (f tcb) = tcbState tcb) \ + \ct_in_state' test\ threadSet f t \\rv. ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply (wp threadSet_pred_tcb_no_state)+ + apply simp+ + apply wp + done + +lemma valid_tcb'_tcbPriority_update: + "\valid_tcb' tcb s; f (tcbPriority tcb) \ maxPriority \ \ + valid_tcb' (tcbPriority_update f tcb) s" + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma threadSet_valid_objs_tcbPriority_update: + "\valid_objs' and (\_. x \ maxPriority)\ + threadSet (tcbPriority_update (\_. x)) t + \\_. valid_objs'\" + including no_pre + apply (simp add: threadSet_def) + apply wp + prefer 2 + apply (rule getObject_tcb_sp) + apply (rule hoare_weaken_pre) + apply (rule setObject_tcb_valid_objs) + apply (clarsimp simp: valid_obj'_def) + apply (frule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def) + apply (subgoal_tac "tcb_at' t s") + apply simp + apply (rule valid_tcb'_tcbPriority_update) + apply (fastforce simp: obj_at'_def)+ + done + +lemma tcbSchedDequeue_ct_in_state'[wp]: + "\ct_in_state' test\ tcbSchedDequeue t \\rv. ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) + done + +crunch cur[wp]: tcbSchedDequeue cur_tcb' + +crunches tcbSchedDequeue + for st_tcb_at'[wp]: "\s. P (st_tcb_at' st tcbPtr s)" + +lemma sp_corres2: + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and tcb_at t + and valid_queues and pspace_aligned and pspace_distinct) + (tcb_at' t and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and (\_. x \ maxPriority) and sym_heap_sched_pointers and valid_sched_pointers) + (set_priority t x) (setPriority t x)" + apply (simp add: setPriority_def set_priority_def thread_set_priority_def) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) + apply (rule corres_split[OF ethread_set_corres], simp_all)[1] + apply (simp add: etcb_relation_def) + apply (rule corres_split[OF isRunnable_corres]) + apply (erule corres_when) + apply(rule corres_split[OF getCurThread_corres]) + apply (wp corres_if; clarsimp) + apply (rule rescheduleRequired_corres) + apply (rule possibleSwitchTo_corres) + apply ((clarsimp + | wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp + isRunnable_wp)+)[4] + apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift + ethread_set_not_queued_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+ + apply ((wp hoare_vcg_imp_lift' hoare_vcg_all_lift + isRunnable_wp threadSet_pred_tcb_no_state + threadSet_valid_objs_tcbPriority_update threadSet_sched_pointers + threadSet_valid_sched_pointers tcb_dequeue_not_queued tcbSchedDequeue_not_queued + threadSet_weak_sch_act_wf + | simp add: etcb_relation_def + | strengthen valid_objs'_valid_tcbs' + obj_at'_weakenE[where P="Not \ tcbQueued"] + | wps)+) + apply (force simp: valid_etcbs_def tcb_at_st_tcb_at[symmetric] state_relation_def + dest: pspace_relation_tcb_at intro: st_tcb_at_opeqI) + apply clarsimp + done + +lemma setPriority_corres: + "corres dc + (einvs and tcb_at t) + (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) + (set_priority t x) (setPriority t x)" + apply (rule corres_guard_imp) + apply (rule sp_corres2) + apply (simp add: valid_sched_def valid_sched_action_def invs_psp_aligned invs_distinct invs_def) + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak) + done + +lemma setMCPriority_corres: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_mcpriority t x) (setMCPriority t x)" + apply (rule corres_guard_imp) + apply (clarsimp simp: setMCPriority_def set_mcpriority_def) + apply (rule threadset_corresT) + by (clarsimp simp: tcb_relation_def tcb_cap_cases_tcb_mcpriority + tcb_cte_cases_def cteSizeBits_def exst_same_def)+ + +definition + "out_rel fn fn' v v' \ + ((v = None) = (v' = None)) \ + (\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation (case_option id fn v tcb) + (case_option id fn' v' tcb'))" + +lemma out_corresT: + assumes x: "\tcb v. \(getF, setF)\ran tcb_cap_cases. getF (fn v tcb) = getF tcb" + assumes y: "\v. \tcb. \(getF, setF)\ran tcb_cte_cases. getF (fn' v tcb) = getF tcb" + assumes sched_pointers: "\tcb v. tcbSchedPrev (fn' v tcb) = tcbSchedPrev tcb" + "\tcb v. tcbSchedNext (fn' v tcb) = tcbSchedNext tcb" + assumes flag: "\tcb v. tcbQueued (fn' v tcb) = tcbQueued tcb" + assumes e: "\tcb v. exst_same tcb (fn' v tcb)" + shows + "out_rel fn fn' v v' \ + corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ + (option_update_thread t fn v) + (case_option (return ()) (\x. threadSet (fn' x) t) v')" + apply (case_tac v, simp_all add: out_rel_def option_update_thread_def) + apply (clarsimp simp: threadset_corresT [OF _ x y sched_pointers flag e]) + done + +lemmas out_corres = out_corresT [OF _ all_tcbI, OF ball_tcb_cap_casesI ball_tcb_cte_casesI] + +lemma tcbSchedDequeue_sch_act_simple[wp]: + "tcbSchedDequeue t \sch_act_simple\" + by (wpsimp simp: sch_act_simple_def) + +lemma tcbSchedNext_update_tcb_cte_cases: + "(a, b) \ ran tcb_cte_cases \ a (tcbPriority_update f tcb) = a tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma threadSet_priority_invs': + "\invs' and tcb_at' t and K (p \ maxPriority)\ + threadSet (tcbPriority_update (\_. p)) t + \\_. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace' + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + | clarsimp simp: cteCaps_of_def tcbSchedNext_update_tcb_cte_cases | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) + +lemma setP_invs': + "\invs' and tcb_at' t and K (p \ maxPriority)\ setPriority t p \\rv. invs'\" + unfolding setPriority_def + by (wpsimp wp: rescheduleRequired_invs' threadSet_priority_invs') + +crunches setPriority, setMCPriority + for typ_at'[wp]: "\s. P (typ_at' T p s)" + (simp: crunch_simps) + +lemmas setPriority_typ_ats [wp] = typ_at_lifts [OF setPriority_typ_at'] + +crunches setPriority, setMCPriority + for valid_cap[wp]: "valid_cap' c" + (wp: getObject_inv_tcb) + + +definition + newroot_rel :: "(cap \ cslot_ptr) option \ (capability \ machine_word) option \ bool" +where + "newroot_rel \ opt_rel (\(cap, ptr) (cap', ptr'). + cap_relation cap cap' + \ ptr' = cte_map ptr)" + +function recursive :: "nat \ ((nat \ nat), unit) nondet_monad" +where + "recursive (Suc n) s = (do f \ gets fst; s \ gets snd; put ((f + s), n); recursive n od) s" +| "recursive 0 s = (modify (\(a, b). (a, 0))) s" + by (case_tac "fst x", fastforce+) + +termination recursive + apply (rule recursive.termination) + apply (rule wf_measure [where f=fst]) + apply simp + done + +lemma cte_map_tcb_0: + "cte_map (t, tcb_cnode_index 0) = t" + by (simp add: cte_map_def tcb_cnode_index_def) + +lemma cte_map_tcb_1: + "cte_map (t, tcb_cnode_index 1) = t + 2^cteSizeBits" + by (simp add: cte_map_def tcb_cnode_index_def to_bl_1 objBits_defs cte_level_bits_def) + +lemma sameRegion_corres2: + "\ cap_relation c c'; cap_relation d d' \ + \ same_region_as c d = sameRegionAs c' d'" + by (erule(1) same_region_as_relation) + +lemma sameObject_corres2: + "\ cap_relation c c'; cap_relation d d' \ + \ same_object_as c d = sameObjectAs c' d'" + apply (frule(1) sameRegion_corres2[symmetric, where c=c and d=d]) + apply (case_tac c; simp add: sameObjectAs_def same_object_as_def + isCap_simps is_cap_simps bits_of_def) + apply (case_tac d; simp) + apply (case_tac d'; simp) + apply (rename_tac arch_cap) + apply clarsimp + apply (case_tac d, (simp_all split: arch_cap.split)[11]) + apply (rename_tac arch_capa) + apply (clarsimp simp add: AARCH64_H.sameObjectAs_def Let_def) + apply (intro conjI impI) + apply (case_tac arch_cap; simp add: sameRegionAs_def isCap_simps) + apply (case_tac arch_capa; fastforce simp add: add_mask_fold) + apply (case_tac arch_cap; simp add: sameRegionAs_def isCap_simps) + apply (case_tac arch_capa; simp) + done + +lemma checkCapAt_corres: + assumes r: "cap_relation cap cap'" + assumes c: "corres dc Q Q' f f'" + assumes Q: "\s. P s \ cte_wp_at (same_object_as cap) slot s \ Q s" + assumes Q': "\s. P' s \ cte_wp_at' (sameObjectAs cap' o cteCap) (cte_map slot) s \ Q' s" + shows "corres dc (P and cte_at slot and invs) (P' and pspace_aligned' and pspace_distinct') + (check_cap_at cap slot f) + (checkCapAt cap' (cte_map slot) f')" using r c + apply (simp add: check_cap_at_def checkCapAt_def liftM_def when_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_if [unfolded if_apply_def2]) + apply (erule(1) sameObject_corres2) + apply assumption + apply (rule corres_trivial, simp) + apply (wp get_cap_wp getCTE_wp')+ + apply (fastforce elim: cte_wp_at_weakenE intro: Q) + apply (fastforce elim: cte_wp_at_weakenE' intro: Q') + done + +lemma checkCapAt_weak_corres: + assumes r: "cap_relation cap cap'" + assumes c: "corres dc P P' f f'" + shows "corres dc (P and cte_at slot and invs) (P' and pspace_aligned' and pspace_distinct') + (check_cap_at cap slot f) + (checkCapAt cap' (cte_map slot) f')" + apply (rule checkCapAt_corres, rule r, rule c) + apply auto + done + +defs + assertDerived_def: + "assertDerived src cap f \ + do stateAssert (\s. cte_wp_at' (is_derived' (ctes_of s) src cap o cteCap) src s) []; f od" + +lemma checkCapAt_cteInsert_corres: + "cap_relation new_cap newCap \ + corres dc (einvs and cte_wp_at (\c. c = cap.NullCap) (target, ref) + and cte_at slot and K (is_cnode_or_valid_arch new_cap + \ (is_pt_cap new_cap \ cap_asid new_cap \ None)) + and cte_wp_at (\c. obj_refs c = obj_refs new_cap + \ table_cap_ref c = table_cap_ref new_cap \ + vspace_asid c = vspace_asid new_cap) src_slot) + (invs' and cte_wp_at' (\cte. cteCap cte = NullCap) (cte_map (target, ref)) + and valid_cap' newCap) + (check_cap_at new_cap src_slot + (check_cap_at (cap.ThreadCap target) slot + (cap_insert new_cap src_slot (target, ref)))) + (checkCapAt newCap (cte_map src_slot) + (checkCapAt (ThreadCap target) (cte_map slot) + (assertDerived (cte_map src_slot) newCap (cteInsert newCap (cte_map src_slot) (cte_map (target, ref))))))" + apply (rule corres_guard_imp) + apply (rule_tac P="cte_wp_at (\c. c = cap.NullCap) (target, ref) and + cte_at slot and + cte_wp_at (\c. obj_refs c = obj_refs new_cap + \ table_cap_ref c = table_cap_ref new_cap \ vspace_asid c = vspace_asid new_cap) src_slot + and einvs and K (is_cnode_or_valid_arch new_cap + \ (is_pt_cap new_cap \ cap_asid new_cap \ None))" + and + P'="cte_wp_at' (\c. cteCap c = NullCap) (cte_map (target, ref)) + and invs' and valid_cap' newCap" + in checkCapAt_corres, assumption) + apply (rule checkCapAt_weak_corres, simp) + apply (unfold assertDerived_def)[1] + apply (rule corres_stateAssert_implied [where P'=\]) + apply simp + apply (erule cteInsert_corres [OF _ refl refl]) + apply clarsimp + apply (drule cte_wp_at_norm [where p=src_slot]) + apply (case_tac src_slot) + apply (clarsimp simp: state_relation_def) + apply (drule (1) pspace_relation_cte_wp_at) + apply fastforce + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule (2) is_derived_eq [THEN iffD1]) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption + apply clarsimp + apply (rule conjI, fastforce)+ + apply (cases src_slot) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (rule conjI) + apply (frule same_object_as_cap_master) + apply (clarsimp simp: cap_master_cap_simps is_cnode_or_valid_arch_def + is_cap_simps is_valid_vtable_root_def + dest!: cap_master_cap_eqDs) + apply (erule(1) checked_insert_is_derived) + apply simp + apply simp + apply fastforce + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply clarsimp + apply fastforce + done + +lemma capBadgeNone_masters: + "capMasterCap cap = capMasterCap cap' + \ (capBadge cap = None) = (capBadge cap' = None)" + apply (rule master_eqI) + apply (auto simp add: capBadge_def capMasterCap_def isCap_simps + split: capability.split) + done + +definition + "vspace_asid' cap \ case cap of + ArchObjectCap (PageTableCap _ _ (Some (asid, _))) \ Some asid + | _ \ None" + +lemma untyped_derived_eq_from_sameObjectAs: + "sameObjectAs cap cap2 + \ untyped_derived_eq cap cap2" + by (clarsimp simp: untyped_derived_eq_def sameObjectAs_def2 isCap_Master) + +lemmas vspace_asid'_simps [simp] = + vspace_asid'_def [split_simps capability.split arch_capability.split option.split prod.split] + +lemma checked_insert_tcb_invs'[wp]: + "\invs' and cte_wp_at' (\cte. cteCap cte = NullCap) slot + and valid_cap' new_cap + and K (capBadge new_cap = None) + and K (slot \ cte_refs' (ThreadCap target) 0) + and K (\ isReplyCap new_cap \ \ isIRQControlCap new_cap)\ + checkCapAt new_cap src_slot + (checkCapAt (ThreadCap target) slot' + (assertDerived src_slot new_cap (cteInsert new_cap src_slot slot))) \\rv. invs'\" + supply option.case_cong[cong] + apply (simp add: checkCapAt_def liftM_def assertDerived_def stateAssert_def) + apply (wp getCTE_cteCap_wp cteInsert_invs) + apply (clarsimp split: option.splits) + apply (subst(asm) tree_cte_cteCap_eq[unfolded o_def]) + apply (clarsimp split: option.splits) + apply (rule conjI) + apply (clarsimp simp: sameObjectAs_def3) + apply (clarsimp simp: tree_cte_cteCap_eq[unfolded o_def] + is_derived'_def untyped_derived_eq_from_sameObjectAs + ex_cte_cap_to'_cteCap) + apply (erule sameObjectAsE)+ + apply (clarsimp simp: badge_derived'_def) + apply (frule capBadgeNone_masters, simp) + apply (rule conjI) + apply (rule_tac x=slot' in exI) + subgoal by (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps cteCaps_of_def) + apply (erule(1) valid_irq_handlers_ctes_ofD) + apply (clarsimp simp: invs'_def valid_state'_def) + done + +lemma checkCap_inv: + assumes x: "\P\ f \\rv. P\" + shows "\P\ checkCapAt cap slot f \\rv. P\" + unfolding checkCapAt_def + by (wp x | simp)+ + +lemma isValidVTableRootD: + "isValidVTableRoot cap + \ isArchObjectCap cap \ isPageTableCap (capCap cap) + \ capPTMappedAddress (capCap cap) \ None" + by (simp add: isValidVTableRoot_def isVTableRoot_def isCap_simps + split: capability.split_asm arch_capability.split_asm + option.split_asm) + +lemma assertDerived_wp: + "\P and (\s. cte_wp_at' (is_derived' (ctes_of s) slot cap o cteCap) slot s)\ f \Q\ \ + \P\ assertDerived slot cap f \Q\" + unfolding assertDerived_def by wpsimp + +lemma setMCPriority_invs': + "\invs' and tcb_at' t and K (prio \ maxPriority)\ setMCPriority t prio \\rv. invs'\" + unfolding setMCPriority_def + apply (rule hoare_gen_asm) + apply (rule hoare_pre) + by (wp threadSet_invs_trivial, (clarsimp simp: inQ_def)+) + +lemma valid_tcb'_tcbMCP_update: + "\valid_tcb' tcb s \ f (tcbMCP tcb) \ maxPriority\ \ valid_tcb' (tcbMCP_update f tcb) s" + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma setMCPriority_valid_objs'[wp]: + "\valid_objs' and K (prio \ maxPriority)\ setMCPriority t prio \\rv. valid_objs'\" + unfolding setMCPriority_def + including no_pre + apply (simp add: threadSet_def) + apply wp + prefer 2 + apply (rule getObject_tcb_sp) + apply (rule hoare_weaken_pre) + apply (rule setObject_tcb_valid_objs) + apply (clarsimp simp: valid_obj'_def) + apply (frule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def) + apply (subgoal_tac "tcb_at' t s") + apply simp + apply (rule valid_tcb'_tcbMCP_update) + apply (fastforce simp: obj_at'_def)+ + done + +crunch sch_act_simple[wp]: setMCPriority sch_act_simple + (wp: ssa_sch_act_simple crunch_wps rule: sch_act_simple_lift simp: crunch_simps) + +abbreviation "valid_option_prio \ case_option True (\(p, auth). p \ maxPriority)" + +definition valid_tcb_invocation :: "tcbinvocation \ bool" where + "valid_tcb_invocation i \ case i of + ThreadControl _ _ _ mcp p _ _ _ \ valid_option_prio p \ valid_option_prio mcp + | _ \ True" + +lemma thread_set_ipc_weak_valid_sched_action: + "\ einvs and simple_sched_action\ + thread_set (tcb_ipc_buffer_update f) a + \\x. weak_valid_sched_action\" + apply (rule hoare_pre) + apply (simp add: thread_set_def) + apply (wp set_object_wp) + apply (simp | intro impI | elim exE conjE)+ + apply (frule get_tcb_SomeD) + apply (erule ssubst) + apply (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def + get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + done + +lemma threadcontrol_corres_helper3: + "\einvs and simple_sched_action\ + check_cap_at cap p (check_cap_at (cap.ThreadCap cap') slot (cap_insert cap p (t, tcb_cnode_index 4))) + \\_ s. weak_valid_sched_action s \ in_correct_ready_q s \ ready_qs_distinct s \ valid_etcbs s + \ pspace_aligned s \ pspace_distinct s\" + apply (wpsimp + | strengthen valid_sched_valid_queues valid_queues_in_correct_ready_q + valid_sched_weak_strg[rule_format] valid_queues_ready_qs_distinct)+ + apply (wpsimp wp: check_cap_inv) + apply (fastforce simp: valid_sched_def) + done + +lemma threadcontrol_corres_helper4: + "isArchObjectCap ac \ + \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) + and valid_cap' ac\ + checkCapAt ac (cte_map (ab, ba)) + (checkCapAt (capability.ThreadCap a) (cte_map slot) + (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) + \\_ s. sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_tcbs' s\" + apply (wpsimp wp: + | strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + invs_valid_objs' valid_objs'_valid_tcbs')+ + by (case_tac ac; + clarsimp simp: capBadge_def isCap_simps tcb_cnode_index_def cte_map_def cte_wp_at'_def + cte_level_bits_def) + +crunches cap_delete + for pspace_alinged[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (simp: crunch_simps preemption_point_def wp: crunch_wps OR_choiceE_weak_wp) + +lemmas check_cap_pspace_aligned[wp] = check_cap_inv[of pspace_aligned] +lemmas check_cap_pspace_distinct[wp] = check_cap_inv[of pspace_distinct] + +lemma is_valid_vtable_root_simp: + "is_valid_vtable_root cap = + (\r asid vref. cap = cap.ArchObjectCap (arch_cap.PageTableCap r VSRootPT_T (Some (asid, vref))))" + by (simp add: is_valid_vtable_root_def + split: cap.splits arch_cap.splits option.splits pt_type.splits) + +lemma threadSet_invs_trivialT2: + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)\ + threadSet F t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P="\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits"]) + apply (wp threadSet_valid_pspace'T + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_global_refsT + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_valid_dom_schedule' + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_idle'T + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + | clarsimp simp: assms cteCaps_of_def valid_arch_tcb'_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) + +lemma getThreadBufferSlot_dom_tcb_cte_cases: + "\\\ getThreadBufferSlot a \\rv s. rv \ (+) a ` dom tcb_cte_cases\" + by (wpsimp simp: tcb_cte_cases_def getThreadBufferSlot_def locateSlot_conv cte_level_bits_def + tcbIPCBufferSlot_def cteSizeBits_def) + +lemma tcb_at'_cteInsert[wp]: + "\\s. tcb_at' (ksCurThread s) s\ cteInsert t x y \\_ s. tcb_at' (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps cteInsert_ct, wp, simp) + +lemma tcb_at'_threadSet[wp]: + "\\s. tcb_at' (ksCurThread s) s\ threadSet (tcbIPCBuffer_update (\_. b)) a \\_ s. tcb_at' (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps threadSet_tcb', wp, simp) + +lemma cteDelete_it [wp]: + "\\s. P (ksIdleThread s)\ cteDelete slot e \\_ s. P (ksIdleThread s)\" + by (rule cteDelete_preservation) (wp | clarsimp)+ + +lemmas threadSet_invs_trivial2 = + threadSet_invs_trivialT2 [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma valid_tcb_ipc_buffer_update: + "\buf s. is_aligned buf msg_align_bits + \ (\tcb. valid_tcb' tcb s \ valid_tcb' (tcbIPCBuffer_update (\_. buf) tcb) s)" + by (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma checkCap_wp: + assumes x: "\P\ f \\rv. Q\" + and PQ: "\s. P s \ Q s" + shows "\P\ checkCapAt cap slot f \\rv. Q\" + unfolding checkCapAt_def + apply (wp x) + apply (rule hoare_strengthen_post[rotated]) + apply clarsimp + apply (strengthen PQ) + apply assumption + apply simp + apply (wp x | simp)+ + done + +lemma assertDerived_wp_weak: + "\P\ f \Q\ \ \P\ assertDerived slot cap f \Q\" + apply (wpsimp simp: assertDerived_def) + done + +crunches option_update_thread + for aligned[wp]: "pspace_aligned" + and distinct[wp]: "pspace_distinct" + +lemma threadSet_invs_tcbIPCBuffer_update: + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (tcbIPCBuffer_update f tcb)) msg_align_bits)\ + threadSet (tcbIPCBuffer_update f) t + \\_. invs'\" + by (wp threadSet_invs_trivialT2; simp add: tcb_cte_cases_def cteSizeBits_def) + +lemma transferCaps_corres: + assumes x: "newroot_rel e e'" and y: "newroot_rel f f'" + and z: "(case g of None \ g' = None + | Some (vptr, g'') \ \g'''. g' = Some (vptr, g''') + \ newroot_rel g'' g''')" + and u: "{e, f, option_map undefined g} \ {None} \ sl' = cte_map slot" + shows + "corres (dc \ (=)) + (einvs and simple_sched_action and tcb_at a and + (\s. {e, f, option_map undefined g} \ {None} \ cte_at slot s) and + case_option \ (valid_cap o fst) e and + case_option \ (cte_at o snd) e and + case_option \ (no_cap_to_obj_dr_emp o fst) e and + K (case_option True (is_cnode_cap o fst) e) and + case_option \ (valid_cap o fst) f and + case_option \ (cte_at o snd) f and + case_option \ (no_cap_to_obj_dr_emp o fst) f and + K (case_option True (is_valid_vtable_root o fst) f) + and case_option \ (case_option \ (cte_at o snd) o snd) g + and case_option \ (case_option \ (no_cap_to_obj_dr_emp o fst) o snd) g + and case_option \ (case_option \ (valid_cap o fst) o snd) g + and K (case_option True ((\v. is_aligned v msg_align_bits) o fst) g) + and K (case_option True (\v. case_option True ((swp valid_ipc_buffer_cap (fst v) + and is_arch_cap and is_cnode_or_valid_arch) o fst) (snd v)) g) + and (\s. case_option True (\(pr, auth). mcpriority_tcb_at (\m. pr \ m) auth s) p_auth) \ \only set prio \ mcp\ + and (\s. case_option True (\(mcp, auth). mcpriority_tcb_at (\m. mcp \ m) auth s) mcp_auth) \ \only set mcp \ prev_mcp\) + (invs' and sch_act_simple and case_option \ (valid_cap' o fst) e' and + (\s. {e', f', option_map undefined g'} \ {None} \ cte_at' (cte_map slot) s) and + K (case_option True (isCNodeCap o fst) e') and + case_option \ (valid_cap' o fst) f' and + K (case_option True (isValidVTableRoot o fst) f') and + K (case_option True ((\v. is_aligned v msg_align_bits) o fst) g') and + K (case_option True (case_option True (isArchObjectCap o fst) o snd) g') and + case_option \ (case_option \ (valid_cap' o fst) o snd) g' and + tcb_at' a and ex_nonz_cap_to' a and K (valid_option_prio p_auth \ valid_option_prio mcp_auth) and + (\s. case_option True (\(pr, auth). mcpriority_tcb_at' ((\) pr) auth s) p_auth) and + (\s. case_option True (\(m, auth). mcpriority_tcb_at' ((\) m) auth s) mcp_auth)) + (invoke_tcb (tcb_invocation.ThreadControl a slot (option_map to_bl b') mcp_auth p_auth e f g)) + (invokeTCB (tcbinvocation.ThreadControl a sl' b' mcp_auth p_auth e' f' g'))" +proof - + have P: "\t v. corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (option_update_thread t (tcb_fault_handler_update o (%x _. x)) + (option_map to_bl v)) + (case v of None \ return () + | Some x \ threadSet (tcbFaultHandler_update (%_. x)) t)" + apply (rule out_corres, simp_all add: exst_same_def) + apply (case_tac v, simp_all add: out_rel_def) + apply (safe, case_tac tcb', simp add: tcb_relation_def split: option.split) + done + have R: "\t v. corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (option_update_thread t (tcb_ipc_buffer_update o (%x _. x)) v) + (case v of None \ return () + | Some x \ threadSet (tcbIPCBuffer_update (%_. x)) t)" + apply (rule out_corres, simp_all add: exst_same_def) + apply (case_tac v, simp_all add: out_rel_def) + apply (safe, case_tac tcb', simp add: tcb_relation_def) + done + have S: "\t x. corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and K (valid_option_prio p_auth)) + (case_option (return ()) (\(p, auth). set_priority t p) p_auth) + (case_option (return ()) (\p'. setPriority t (fst p')) p_auth)" + apply (case_tac p_auth; clarsimp simp: setPriority_corres) + done + have S': "\t x. corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (case_option (return ()) (\(mcp, auth). set_mcpriority t mcp) mcp_auth) + (case_option (return ()) (\mcp'. setMCPriority t (fst mcp')) mcp_auth)" + apply(case_tac mcp_auth; clarsimp simp: setMCPriority_corres) + done + have T: "\x x' ref getfn target. + \ newroot_rel x x'; getfn = return (cte_map (target, ref)); + x \ None \ {e, f, option_map undefined g} \ {None} \ \ + corres (dc \ dc) + (einvs and simple_sched_action and cte_at (target, ref) and emptyable (target, ref) and + (\s. \(sl, c) \ (case x of None \ {} | Some (c, sl) \ {(sl, c), (slot, c)}). + cte_at sl s \ no_cap_to_obj_dr_emp c s \ valid_cap c s) + and K (case x of None \ True + | Some (c, sl) \ is_cnode_or_valid_arch c)) + (invs' and sch_act_simple and cte_at' (cte_map (target, ref)) and + (\s. \cp \ (case x' of None \ {} | Some (c, sl) \ {c}). s \' cp)) + (case x of None \ returnOk () + | Some pr \ case_prod (\new_cap src_slot. + doE cap_delete (target, ref); + liftE $ check_cap_at new_cap src_slot $ + check_cap_at (cap.ThreadCap target) slot $ + cap_insert new_cap src_slot (target, ref) + odE) pr) + (case x' of + None \ returnOk () + | Some pr \ (\(newCap, srcSlot). + do slot \ getfn; + doE uu \ cteDelete slot True; + liftE (checkCapAt newCap srcSlot + (checkCapAt (capability.ThreadCap target) sl' + (assertDerived srcSlot newCap (cteInsert newCap srcSlot slot)))) + odE + od) pr)" + apply (case_tac "x = None") + apply (simp add: newroot_rel_def returnOk_def) + apply (drule(1) mp, drule mp [OF u]) + apply (clarsimp simp add: newroot_rel_def returnOk_def split_def) + apply (rule corres_gen_asm) + apply (rule corres_guard_imp) + apply (rule corres_split_norE[OF cteDelete_corres]) + apply (simp del: dc_simp) + apply (erule checkCapAt_cteInsert_corres) + apply (fold validE_R_def) + apply (wp cap_delete_deletes cap_delete_cte_at cap_delete_valid_cap + | strengthen use_no_cap_to_obj_asid_strg)+ + apply (wp cteDelete_invs' cteDelete_deletes) + apply (clarsimp dest!: is_cnode_or_valid_arch_cap_asid) + apply clarsimp + done + have U2: "getThreadBufferSlot a = return (cte_map (a, tcb_cnode_index 4))" + by (simp add: getThreadBufferSlot_def locateSlot_conv + cte_map_def tcb_cnode_index_def tcbIPCBufferSlot_def + cte_level_bits_def) + have T2: "corres (dc \ dc) + (einvs and simple_sched_action and tcb_at a and + (\s. \(sl, c) \ (case g of None \ {} | Some (x, v) \ {(slot, cap.NullCap)} \ + (case v of None \ {} | Some (c, sl) \ {(sl, c), (slot, c)})). + cte_at sl s \ no_cap_to_obj_dr_emp c s \ valid_cap c s) + and K (case g of None \ True | Some (x, v) \ (case v of + None \ True | Some (c, sl) \ is_cnode_or_valid_arch c + \ is_arch_cap c + \ valid_ipc_buffer_cap c x + \ is_aligned x msg_align_bits))) + (invs' and sch_act_simple and tcb_at' a and + (\s. \cp \ (case g' of None \ {} | Some (x, v) \ (case v of + None \ {} | Some (c, sl) \ {c})). s \' cp) and + K (case g' of None \ True | Some (x, v) \ is_aligned x msg_align_bits + \ (case v of None \ True | Some (ac, _) \ isArchObjectCap ac)) ) + (case_option (returnOk ()) + (case_prod + (\ptr frame. + doE cap_delete (a, tcb_cnode_index 4); + do y \ thread_set (tcb_ipc_buffer_update (\_. ptr)) a; + y \ case_option (return ()) + (case_prod + (\new_cap src_slot. + check_cap_at new_cap src_slot $ + check_cap_at (cap.ThreadCap a) slot $ + cap_insert new_cap src_slot (a, tcb_cnode_index 4))) + frame; + cur \ gets cur_thread; + liftE $ when (cur = a) (reschedule_required) + od + odE)) + g) + (case_option (returnOk ()) + (\(ptr, frame). + do bufferSlot \ getThreadBufferSlot a; + doE y \ cteDelete bufferSlot True; + do y \ threadSet (tcbIPCBuffer_update (\_. ptr)) a; + y \ (case_option (return ()) + (case_prod + (\newCap srcSlot. + checkCapAt newCap srcSlot $ + checkCapAt + (capability.ThreadCap a) + sl' $ + assertDerived srcSlot newCap $ + cteInsert newCap srcSlot bufferSlot)) + frame); + cur \ getCurThread; + liftE $ when (cur = a) rescheduleRequired + od odE od) + g')" (is "corres _ ?T2_pre ?T2_pre' _ _") + using z u + apply - + apply (rule corres_guard_imp[where P=P and P'=P' + and Q="P and cte_at (a, tcb_cnode_index 4)" + and Q'="P' and cte_at' (cte_map (a, cap))" for P P' a cap]) + apply (cases g) + apply (simp, simp add: returnOk_def) + apply (clarsimp simp: liftME_def[symmetric] U2 liftE_bindE) + apply (case_tac b, simp_all add: newroot_rel_def) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule cteDelete_corres) + apply (rule_tac F="is_aligned aa msg_align_bits" in corres_gen_asm2) + apply (rule corres_split_nor) + apply (rule threadset_corres, + (simp add: tcb_relation_def), (simp add: exst_same_def)+)[1] + apply (rule corres_split[OF getCurThread_corres], clarsimp) + apply (rule corres_when[OF refl rescheduleRequired_corres]) + apply (wpsimp wp: gct_wp)+ + apply (strengthen valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_ipc_weak_valid_sched_action thread_set_valid_queues + hoare_drop_imp) + apply clarsimp + apply (strengthen valid_objs'_valid_tcbs' invs_valid_objs')+ + apply (wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers hoare_drop_imp + threadSet_invs_tcbIPCBuffer_update) + apply (clarsimp simp: pred_conj_def) + apply (strengthen einvs_valid_etcbs valid_queues_in_correct_ready_q + valid_sched_valid_queues)+ + apply wp + apply (clarsimp simp: pred_conj_def) + apply (strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + valid_objs'_valid_tcbs' invs_valid_objs') + apply (wpsimp wp: cteDelete_invs' hoare_vcg_conj_lift) + apply (fastforce simp: emptyable_def) + apply fastforce + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split_norE[OF cteDelete_corres]) + apply (rule_tac F="is_aligned aa msg_align_bits" + in corres_gen_asm) + apply (rule_tac F="isArchObjectCap ac" in corres_gen_asm2) + apply (rule corres_split_nor) + apply (rule threadset_corres, + simp add: tcb_relation_def, (simp add: exst_same_def)+) + apply (rule corres_split) + apply (erule checkCapAt_cteInsert_corres) + apply (rule corres_split[OF getCurThread_corres], clarsimp) + apply (rule corres_when[OF refl rescheduleRequired_corres]) + apply (wp gct_wp)+ + apply (wp hoare_drop_imp threadcontrol_corres_helper3)[1] + apply (wp hoare_drop_imp threadcontrol_corres_helper4)[1] + apply (wp thread_set_tcb_ipc_buffer_cap_cleared_invs + thread_set_cte_wp_at_trivial thread_set_not_state_valid_sched + | simp add: ran_tcb_cap_cases)+ + apply (wp threadSet_invs_trivial + threadSet_cte_wp_at' | simp)+ + apply (wp cap_delete_deletes cap_delete_cte_at + cap_delete_valid_cap cteDelete_deletes + cteDelete_invs' + | strengthen use_no_cap_to_obj_asid_strg + | clarsimp simp: inQ_def)+ + apply (clarsimp simp: cte_wp_at_caps_of_state + dest!: is_cnode_or_valid_arch_cap_asid) + apply (fastforce simp: emptyable_def) + apply (clarsimp simp: inQ_def) + apply (clarsimp simp: obj_at_def is_tcb) + apply (rule cte_wp_at_tcbI, simp, fastforce, simp) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def obj_at'_def objBits_simps) + apply (erule(2) cte_wp_at_tcbI', fastforce simp: objBits_defs cte_level_bits_def, simp) + done + have U: "getThreadCSpaceRoot a = return (cte_map (a, tcb_cnode_index 0))" + apply (clarsimp simp add: getThreadCSpaceRoot) + apply (simp add: cte_map_def tcb_cnode_index_def + cte_level_bits_def word_bits_def) + done + have V: "getThreadVSpaceRoot a = return (cte_map (a, tcb_cnode_index 1))" + apply (clarsimp simp add: getThreadVSpaceRoot) + apply (simp add: cte_map_def tcb_cnode_index_def to_bl_1 objBits_defs + cte_level_bits_def word_bits_def) + done + have X: "\x P Q R M. (\y. x = Some y \ \P y\ M y \Q\,\R\) + \ \case_option (Q ()) P x\ case_option (returnOk ()) M x \Q\,\R\" + by (case_tac x, simp_all, wp) + have Y: "\x P Q M. (\y. x = Some y \ \P y\ M y \Q\,-) + \ \case_option (Q ()) P x\ case_option (returnOk ()) M x \Q\,-" + by (case_tac x, simp_all, wp) + have Z: "\P f R Q x. \P\ f \\rv. Q and R\ \ \P\ f \\rv. case_option Q (\y. R) x\" + apply (rule hoare_post_imp) + defer + apply assumption + apply (case_tac x, simp_all) + done + have A: "\x P Q M. (\y. x = Some y \ \P y\ M y \Q\) + \ \case_option (Q ()) P x\ case_option (return ()) M x \Q\" + by (case_tac x, simp_all, wp) + have B: "\t v. \invs' and tcb_at' t\ threadSet (tcbFaultHandler_update v) t \\rv. invs'\" + by (wp threadSet_invs_trivial | clarsimp simp: inQ_def)+ + note stuff = Z B out_invs_trivial hoare_case_option_wp + hoare_vcg_const_Ball_lift hoare_vcg_const_Ball_lift_R + cap_delete_deletes cap_delete_valid_cap out_valid_objs + cap_insert_objs + cteDelete_deletes cteDelete_sch_act_simple + out_valid_cap out_cte_at out_tcb_valid out_emptyable + CSpaceInv_AI.cap_insert_valid_cap cap_insert_cte_at cap_delete_cte_at + cap_delete_tcb cteDelete_invs' checkCap_inv [where P="valid_cap' c0" for c0] + check_cap_inv[where P="tcb_at p0" for p0] checkCap_inv [where P="tcb_at' p0" for p0] + check_cap_inv[where P="cte_at p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] + check_cap_inv[where P="valid_cap c" for c] checkCap_inv [where P="valid_cap' c" for c] + check_cap_inv[where P="tcb_cap_valid c p1" for c p1] + check_cap_inv[where P=valid_sched] + check_cap_inv[where P=simple_sched_action] + checkCap_inv [where P=sch_act_simple] + out_no_cap_to_trivial [OF ball_tcb_cap_casesI] + checked_insert_no_cap_to + note if_cong [cong] option.case_cong [cong] + \ \This proof is quite fragile and was written when bind_wp was added to the wp set later + in the theory dependencies, and so was matched with before alternatives. We re-add it here to + create a similar environment and avoid needing to rework the proof.\ + note bind_wp[wp] + show ?thesis + apply (simp add: invokeTCB_def liftE_bindE) + apply (simp only: eq_commute[where a= "a"]) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF P]) + apply (rule corres_split_nor[OF S', simplified]) + apply (rule corres_split_norE[OF T [OF x U], simplified]) + apply (rule corres_split_norE[OF T [OF y V], simplified]) + apply (rule corres_split_norE) + apply (rule T2[simplified]) + apply (rule corres_split_nor[OF S, simplified]) + apply (rule corres_returnOkTT, simp) + apply wp + apply wp + apply (wpsimp wp: hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift + as_user_invs thread_set_ipc_tcb_cap_valid + thread_set_tcb_ipc_buffer_cap_cleared_invs + thread_set_cte_wp_at_trivial + thread_set_valid_cap + reschedule_preserves_valid_sched + check_cap_inv[where P=valid_sched] (* from stuff *) + check_cap_inv[where P="tcb_at p0" for p0] + thread_set_not_state_valid_sched + check_cap_inv[where P=simple_sched_action] + cap_delete_deletes hoare_drop_imps + cap_delete_valid_cap + simp: ran_tcb_cap_cases + | strengthen simple_sched_action_sched_act_not)+ + apply (strengthen use_no_cap_to_obj_asid_strg) + apply (wpsimp wp: cap_delete_cte_at cap_delete_valid_cap) + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + threadSet_invs_tcbIPCBuffer_update threadSet_cte_wp_at' + | strengthen simple_sched_action_sched_act_not)+ + apply ((wpsimp wp: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift + hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift + threadSet_valid_objs' thread_set_not_state_valid_sched + thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial + thread_set_no_cap_to_trivial getThreadBufferSlot_dom_tcb_cte_cases + assertDerived_wp_weak threadSet_cap_to' out_pred_tcb_at_preserved + checkCap_wp assertDerived_wp_weak cap_insert_objs' + | simp add: ran_tcb_cap_cases split_def U V + emptyable_def + | strengthen tcb_cap_always_valid_strg + tcb_at_invs + use_no_cap_to_obj_asid_strg + | (erule exE, clarsimp simp: word_bits_def) | wp (once) hoare_drop_imps)+) + apply (strengthen valid_tcb_ipc_buffer_update) + apply (strengthen invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct') + apply (wpsimp wp: cteDelete_invs' hoare_vcg_imp_lift' hoare_vcg_all_lift) + apply wpsimp + apply wpsimp + apply (clarsimp cong: imp_cong conj_cong simp: emptyable_def) + apply (rule_tac Q'="\_. ?T2_pre" in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) + (* beginning to deal with is_nondevice_page_cap *) + apply (clarsimp simp: emptyable_def is_cap_simps + is_cnode_or_valid_arch_def obj_ref_none_no_asid cap_asid_def + cong: conj_cong imp_cong + split: option.split_asm) + apply (simp add: case_bool_If valid_ipc_buffer_cap_def + split: arch_cap.splits if_splits) + (* is_nondevice_page_cap discharged *) + apply ((wp stuff checkCap_wp assertDerived_wp_weak cap_insert_objs' + | simp add: ran_tcb_cap_cases split_def U V emptyable_def + | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg)+)[1] + apply (clarsimp cong: imp_cong conj_cong) + apply (rule_tac Q'="\_. ?T2_pre' and (\s. valid_option_prio p_auth)" + in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) + apply (case_tac g'; clarsimp simp: isCap_simps ; clarsimp cong:imp_cong) + apply (wp add: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift + hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift setMCPriority_invs' + threadSet_valid_objs' thread_set_not_state_valid_sched setP_invs' + typ_at_lifts [OF setPriority_typ_at'] + typ_at_lifts [OF setMCPriority_typ_at'] + threadSet_cap_to' out_pred_tcb_at_preserved assertDerived_wp + del: cteInsert_invs + | simp add: ran_tcb_cap_cases split_def U V + emptyable_def + | wpc | strengthen tcb_cap_always_valid_strg + use_no_cap_to_obj_asid_strg + | wp add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs + | (erule exE, clarsimp simp: word_bits_def))+ + apply (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] tcb_at_st_tcb_at[symmetric] + tcb_cap_valid_def is_cnode_or_valid_arch_def invs_valid_objs emptyable_def + obj_ref_none_no_asid no_cap_to_obj_with_diff_ref_Null is_valid_vtable_root_simp + is_cap_simps cap_asid_def vs_cap_ref_def arch_cap_fun_lift_def + invs_psp_aligned invs_distinct + cong: conj_cong imp_cong + split: option.split_asm) + by (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def objBits_defs + cte_map_tcb_0 cte_map_tcb_1[simplified] tcb_at_cte_at' cte_at_tcb_at_32' + isCap_simps domIff valid_tcb'_def tcb_cte_cases_def + split: option.split_asm + dest!: isValidVTableRootD) +qed + + +lemmas threadSet_ipcbuffer_trivial + = threadSet_invs_trivial[where F="tcbIPCBuffer_update F'" for F', + simplified inQ_def, simplified] + +crunches setPriority, setMCPriority + for cap_to'[wp]: "ex_nonz_cap_to' a" + (simp: crunch_simps) + +lemma cteInsert_sa_simple[wp]: + "cteInsert newCap srcSlot destSlot \sch_act_simple\" + by (simp add: sch_act_simple_def, wp) + +lemma isReplyCapD: + "isReplyCap cap \ \ptr master grant. cap = capability.ReplyCap ptr master grant" + by (simp add: isCap_simps) + +lemma tc_invs': + "\invs' and sch_act_simple and tcb_at' a and ex_nonz_cap_to' a and + K (valid_option_prio d \ valid_option_prio mcp) and + case_option \ (valid_cap' o fst) e' and + K (case_option True (isCNodeCap o fst) e') and + case_option \ (valid_cap' o fst) f' and + K (case_option True (isValidVTableRoot o fst) f') and + case_option \ (valid_cap') (case_option None (case_option None (Some o fst) o snd) g) and + K (case_option True isArchObjectCap (case_option None (case_option None (Some o fst) o snd) g)) + and K (case_option True (swp is_aligned msg_align_bits o fst) g) \ + invokeTCB (tcbinvocation.ThreadControl a sl b' mcp d e' f' g) + \\rv. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: split_def invokeTCB_def getThreadCSpaceRoot getThreadVSpaceRoot + getThreadBufferSlot_def locateSlot_conv + cong: option.case_cong) + apply (simp only: eq_commute[where a="a"]) + apply (rule hoare_walk_assmsE) + apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp + hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] + apply (rule hoare_walk_assmsE) + apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' + typ_at_lifts[OF setMCPriority_typ_at'] + hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ + apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) + apply clarsimp + apply wpfix + apply assumption + apply (rule case_option_wp_None_returnOk) + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift + checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ + apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] + checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ + apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs tcbIPCBufferSlot_def) + by (auto dest!: isCapDs isReplyCapD isValidVTableRootD simp: isCap_simps) + +lemma setSchedulerAction_invs'[wp]: + "\invs' and sch_act_wf sa + and (\s. sa = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s) + and (\s. sa = ResumeCurrentThread + \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s)\ + setSchedulerAction sa + \\rv. invs'\" + apply (simp add: setSchedulerAction_def) + apply wp + apply (clarsimp simp add: invs'_def valid_state'_def valid_irq_node'_def + valid_queues_def bitmapQ_defs cur_tcb'_def + ct_not_inQ_def) + apply (simp add: ct_idle_or_in_cur_domain'_def) + done + +end + +consts + copyregsets_map :: "arch_copy_register_sets \ Arch.copy_register_sets" + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec + tcbinv_relation :: "tcb_invocation \ tcbinvocation \ bool" +where + "tcbinv_relation (tcb_invocation.ReadRegisters a b c d) x + = (x = tcbinvocation.ReadRegisters a b c (copyregsets_map d))" +| "tcbinv_relation (tcb_invocation.WriteRegisters a b c d) x + = (x = tcbinvocation.WriteRegisters a b c (copyregsets_map d))" +| "tcbinv_relation (tcb_invocation.CopyRegisters a b c d e f g) x + = (x = tcbinvocation.CopyRegisters a b c d e f (copyregsets_map g))" +| "tcbinv_relation (tcb_invocation.ThreadControl a sl flt_ep mcp prio croot vroot buf) x + = (\flt_ep' croot' vroot' sl' buf'. flt_ep = option_map to_bl flt_ep' \ + newroot_rel croot croot' \ newroot_rel vroot vroot' \ + ({croot, vroot, option_map undefined buf} \ {None} + \ sl' = cte_map sl) \ + (case buf of None \ buf' = None + | Some (vptr, g'') \ \g'''. buf' = Some (vptr, g''') + \ newroot_rel g'' g''') \ + x = tcbinvocation.ThreadControl a sl' flt_ep' mcp prio croot' vroot' buf')" +| "tcbinv_relation (tcb_invocation.Suspend a) x + = (x = tcbinvocation.Suspend a)" +| "tcbinv_relation (tcb_invocation.Resume a) x + = (x = tcbinvocation.Resume a)" +| "tcbinv_relation (tcb_invocation.NotificationControl t ntfnptr) x + = (x = tcbinvocation.NotificationControl t ntfnptr)" +| "tcbinv_relation (tcb_invocation.SetTLSBase ref w) x + = (x = tcbinvocation.SetTLSBase ref w)" + +primrec + tcb_inv_wf' :: "tcbinvocation \ kernel_state \ bool" +where + "tcb_inv_wf' (tcbinvocation.Suspend t) + = (tcb_at' t and ex_nonz_cap_to' t)" +| "tcb_inv_wf' (tcbinvocation.Resume t) + = (tcb_at' t and ex_nonz_cap_to' t)" +| "tcb_inv_wf' (tcbinvocation.ThreadControl t sl fe mcp p croot vroot buf) + = (tcb_at' t and ex_nonz_cap_to' t and + K (valid_option_prio p \ valid_option_prio mcp) and + case_option \ (valid_cap' o fst) croot and + K (case_option True (isCNodeCap o fst) croot) and + case_option \ (valid_cap' o fst) vroot and + K (case_option True (isValidVTableRoot o fst) vroot) and + case_option \ (case_option \ (valid_cap' o fst) o snd) buf and + case_option \ (case_option \ (cte_at' o snd) o snd) buf and + K (case_option True (swp is_aligned msg_align_bits o fst) buf) and + K (case_option True (case_option True (isArchObjectCap o fst) o snd) buf) and + (\s. {croot, vroot, option_map undefined buf} \ {None} + \ cte_at' sl s) and + (\s. case_option True (\(pr, auth). mcpriority_tcb_at' ((\) pr) auth s) p) and + (\s. case_option True (\(m, auth). mcpriority_tcb_at' ((\) m) auth s) mcp))" +| "tcb_inv_wf' (tcbinvocation.ReadRegisters src susp n arch) + = (tcb_at' src and ex_nonz_cap_to' src)" +| "tcb_inv_wf' (tcbinvocation.WriteRegisters dest resume values arch) + = (tcb_at' dest and ex_nonz_cap_to' dest)" +| "tcb_inv_wf' (tcbinvocation.CopyRegisters dest src suspend_source resume_target + trans_frame trans_int trans_arch) + = (tcb_at' dest and tcb_at' src and ex_nonz_cap_to' src and ex_nonz_cap_to' dest)" +| "tcb_inv_wf' (tcbinvocation.NotificationControl t ntfn) + = (tcb_at' t and ex_nonz_cap_to' t + and (case ntfn of None \ \ + | Some ntfnptr \ obj_at' (\ko. ntfnBoundTCB ko = None + \ (\q. ntfnObj ko \ WaitingNtfn q)) ntfnptr + and ex_nonz_cap_to' ntfnptr + and bound_tcb_at' ((=) None) t) )" +| "tcb_inv_wf' (tcbinvocation.SetTLSBase ref w) + = (tcb_at' ref and ex_nonz_cap_to' ref)" + +lemma invokeTCB_corres: + "tcbinv_relation ti ti' \ + corres (dc \ (=)) + (einvs and simple_sched_action and Tcb_AI.tcb_inv_wf ti) + (invs' and sch_act_simple and tcb_inv_wf' ti') + (invoke_tcb ti) (invokeTCB ti')" + apply (case_tac ti, simp_all only: tcbinv_relation.simps valid_tcb_invocation_def) + apply (rule corres_guard_imp [OF invokeTCB_WriteRegisters_corres], simp+)[1] + apply (rule corres_guard_imp [OF invokeTCB_ReadRegisters_corres], simp+)[1] + apply (rule corres_guard_imp [OF invokeTCB_CopyRegisters_corres], simp+)[1] + apply (clarsimp simp del: invoke_tcb.simps) + apply (rename_tac word one t2 mcp t3 t4 t5 t6 t7 t8 t9 t10 t11) + apply (rule_tac F="is_aligned word 5" in corres_req) + apply (clarsimp simp add: is_aligned_weaken [OF tcb_aligned]) + apply (rule corres_guard_imp [OF transferCaps_corres], clarsimp+) + apply (clarsimp simp: is_cnode_or_valid_arch_def + split: option.split option.split_asm) + apply clarsimp + apply (auto split: option.split_asm simp: newroot_rel_def)[1] + apply (simp add: invokeTCB_def liftM_def[symmetric] + o_def dc_def[symmetric]) + apply (rule corres_guard_imp [OF suspend_corres], simp+) + apply (simp add: invokeTCB_def liftM_def[symmetric] + o_def dc_def[symmetric]) + apply (rule corres_guard_imp [OF restart_corres], simp+) + apply (simp add:invokeTCB_def) + apply (rename_tac option) + apply (case_tac option) + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindNotification_corres]) + apply (rule corres_trivial, simp) + apply wp+ + apply (clarsimp) + apply clarsimp + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF bindNotification_corres]) + apply (rule corres_trivial, simp) + apply wp+ + apply clarsimp + apply (clarsimp simp: obj_at_def is_ntfn) + apply (clarsimp simp: obj_at'_def) + apply (simp add: invokeTCB_def tlsBaseRegister_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF TcbAcc_R.asUser_setRegister_corres]) + apply (rule corres_split[OF Bits_R.getCurThread_corres]) + apply (rule corres_split[OF Corres_UL.corres_when]) + apply simp + apply (rule TcbAcc_R.rescheduleRequired_corres) + apply (rule corres_trivial, simp) + apply (wpsimp wp: hoare_drop_imp)+ + apply (fastforce dest: valid_sched_valid_queues simp: valid_sched_weak_strg einvs_valid_etcbs) + apply fastforce + done + +lemma tcbBoundNotification_caps_safe[simp]: + "\(getF, setF)\ran tcb_cte_cases. + getF (tcbBoundNotification_update (\_. Some ntfnptr) tcb) = getF tcb" + by (case_tac tcb, simp add: tcb_cte_cases_def cteSizeBits_def) + +lemma valid_bound_ntfn_lift: + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + shows "\\s. valid_bound_ntfn' a s\ f \\rv s. valid_bound_ntfn' a s\" + apply (simp add: valid_bound_ntfn'_def, case_tac a, simp_all) + apply (wp typ_at_lifts[OF P])+ + done + +crunches setBoundNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (ignore: threadSet wp: threadSet_sched_pointers) + +lemma bindNotification_invs': + "\bound_tcb_at' ((=) None) tcbptr + and ex_nonz_cap_to' ntfnptr + and ex_nonz_cap_to' tcbptr + and obj_at' (\ntfn. ntfnBoundTCB ntfn = None \ (\q. ntfnObj ntfn \ WaitingNtfn q)) ntfnptr + and invs'\ + bindNotification tcbptr ntfnptr + \\_. invs'\" + including no_pre + apply (simp add: bindNotification_def invs'_def valid_state'_def) + apply (rule bind_wp[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp set_ntfn_valid_pspace' sbn_sch_act' valid_irq_node_lift + setBoundNotification_ct_not_inQ valid_bound_ntfn_lift + untyped_ranges_zero_lift + | clarsimp dest!: global'_no_ex_cap simp: cteCaps_of_def)+ + apply (clarsimp simp: valid_pspace'_def) + apply (cases "tcbptr = ntfnptr") + apply (clarsimp dest!: pred_tcb_at' simp: obj_at'_def) + apply (clarsimp simp: pred_tcb_at' conj_comms o_def) + apply (subst delta_sym_refs, assumption) + apply (fastforce simp: ntfn_q_refs_of'_def obj_at'_def + dest!: symreftype_inverse' + split: ntfn.splits if_split_asm) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: tcb_st_refs_of'_def + dest!: bound_tcb_at_state_refs_ofD' + split: if_split_asm thread_state.splits) + apply (fastforce simp: obj_at'_def state_refs_of'_def + dest!: symreftype_inverse') + apply (clarsimp simp: valid_pspace'_def) + apply (frule_tac P="\k. k=ntfn" in obj_at_valid_objs', simp) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def obj_at'_def + dest!: pred_tcb_at' + split: ntfn.splits) + done + +lemma tcbntfn_invs': + "\invs' and tcb_inv_wf' (tcbinvocation.NotificationControl tcb ntfnptr)\ + invokeTCB (tcbinvocation.NotificationControl tcb ntfnptr) + \\rv. invs'\" + apply (simp add: invokeTCB_def) + apply (case_tac ntfnptr, simp_all) + apply (wp unbindNotification_invs bindNotification_invs' | simp)+ + done + +lemma setTLSBase_invs'[wp]: + "\invs' and tcb_inv_wf' (tcbinvocation.SetTLSBase tcb tls_base)\ + invokeTCB (tcbinvocation.SetTLSBase tcb tls_base) + \\rv. invs'\" + by (wpsimp simp: invokeTCB_def) + +lemma tcbinv_invs': + "\invs' and sch_act_simple and ct_in_state' runnable' and tcb_inv_wf' ti\ + invokeTCB ti + \\rv. invs'\" + apply (case_tac ti, simp_all only:) + apply (simp add: invokeTCB_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap) + apply (simp add: invokeTCB_def) + apply (wp restart_invs') + apply (clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap) + apply (wp tc_invs') + apply (clarsimp split: option.split dest!: isCapDs) + apply (wp writereg_invs' readreg_invs' copyreg_invs' tcbntfn_invs' + | simp)+ + done + +declare assertDerived_wp [wp] + +lemma copyregsets_map_only[simp]: + "copyregsets_map v = ARMNoExtraRegisters" + by (cases "copyregsets_map v", simp) + +lemma decodeReadRegisters_corres: + "corres (ser \ tcbinv_relation) (invs and tcb_at t) (invs' and tcb_at' t) + (decode_read_registers args (cap.ThreadCap t)) + (decodeReadRegisters args (ThreadCap t))" + apply (simp add: decode_read_registers_def decodeReadRegisters_def) + apply (cases args, simp_all) + apply (case_tac list, simp_all) + apply (simp add: decodeTransfer_def) + apply (simp add: range_check_def rangeCheck_def frameRegisters_def gpRegisters_def) + apply (simp add: unlessE_def split del: if_split, simp add: returnOk_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_trivial) + apply (fastforce simp: returnOk_def) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_trivial) + apply (clarsimp simp: whenE_def) + apply (wp|simp)+ + done + +lemma decodeWriteRegisters_corres: + notes if_cong [cong] + shows + "\ length args < 2 ^ word_bits \ \ + corres (ser \ tcbinv_relation) (invs and tcb_at t) (invs' and tcb_at' t) + (decode_write_registers args (cap.ThreadCap t)) + (decodeWriteRegisters args (ThreadCap t))" + apply (simp add: decode_write_registers_def decodeWriteRegisters_def) + apply (cases args, simp_all) + apply (case_tac list, simp_all) + apply (simp add: decodeTransfer_def genericLength_def) + apply (simp add: word_less_nat_alt unat_of_nat64) + apply (simp add: whenE_def, simp add: returnOk_def) + apply (simp add: genericTake_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split_norE) + apply (rule corres_trivial, simp) + apply (rule corres_trivial, simp) + apply (wp)+ + apply simp+ + done + +lemma decodeCopyRegisters_corres: + "\ list_all2 cap_relation extras extras'; length args < 2 ^ word_bits \ \ + corres (ser \ tcbinv_relation) (invs and tcb_at t) (invs' and tcb_at' t) + (decode_copy_registers args (cap.ThreadCap t) extras) + (decodeCopyRegisters args (ThreadCap t) extras')" + apply (simp add: decode_copy_registers_def decodeCopyRegisters_def) + apply (cases args, simp_all) + apply (cases extras, simp_all add: decodeTransfer_def null_def) + apply (clarsimp simp: list_all2_Cons1 null_def) + apply (case_tac aa, simp_all) + apply (simp add: returnOk_def) + apply clarsimp + done + +lemma decodeReadReg_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeReadRegisters args (ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeReadRegisters_def decodeTransfer_def whenE_def + cong: list.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply simp + done + +lemma decodeWriteReg_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeWriteRegisters args (ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeWriteRegisters_def whenE_def decodeTransfer_def + cong: list.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply simp + done + +lemma decodeCopyReg_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t + and (\s. \x \ set extras. s \' x + \ (\y \ zobj_refs' x. ex_nonz_cap_to' y s))\ + decodeCopyRegisters args (ThreadCap t) extras + \tcb_inv_wf'\,-" + apply (simp add: decodeCopyRegisters_def whenE_def decodeTransfer_def + cong: list.case_cong capability.case_cong bool.case_cong + split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply (clarsimp simp: null_def neq_Nil_conv + valid_cap'_def[where c="ThreadCap t" for t]) + done + +lemma eq_ucast_word8[simp]: + "((ucast (x :: 8 word) :: machine_word) = ucast y) = (x = y)" + apply safe + apply (drule_tac f="ucast :: (machine_word \ 8 word)" in arg_cong) + apply (simp add: ucast_up_ucast_id is_up_def + source_size_def target_size_def word_size) + done + +lemma checkPrio_corres: + "corres (ser \ dc) (tcb_at auth and pspace_aligned and pspace_distinct) \ + (check_prio p auth) (checkPrio p auth)" + apply (simp add: check_prio_def checkPrio_def) + apply (rule corres_guard_imp) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF threadGet_corres[where r="(=)"]]) + apply (clarsimp simp: tcb_relation_def) + apply (rule_tac rvr = dc and + R = \ and + R' = \ in + whenE_throwError_corres'[where m="returnOk ()" and m'="returnOk ()", simplified]) + apply (simp add: minPriority_def) + apply (clarsimp simp: minPriority_def) + apply (rule corres_returnOkTT) + apply (simp add: minPriority_def) + apply (wp gct_wp)+ + apply (simp add: cur_tcb_def cur_tcb'_def)+ + done + +lemma decodeSetPriority_corres: + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) + (invs' and (\s. \x \ set extras'. s \' (fst x))) + (decode_set_priority args cap slot extras) + (decodeSetPriority args cap' extras')" + apply (cases args; cases extras; cases extras'; + clarsimp simp: decode_set_priority_def decodeSetPriority_def) + apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') + apply (rule corres_split_eqrE) + apply corresKsimp + apply (rule corres_splitEE[OF checkPrio_corres]) + apply (rule corres_returnOkTT) + apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) + by (wpsimp simp: valid_cap_def valid_cap'_def)+ + +lemma decodeSetMCPriority_corres: + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) + (invs' and (\s. \x \ set extras'. s \' (fst x))) + (decode_set_mcpriority args cap slot extras) + (decodeSetMCPriority args cap' extras')" + apply (cases args; cases extras; cases extras'; + clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) + apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') + apply (rule corres_split_eqrE) + apply corresKsimp + apply (rule corres_splitEE[OF checkPrio_corres]) + apply (rule corres_returnOkTT) + apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) + by (wpsimp simp: valid_cap_def valid_cap'_def)+ + +lemma getMCP_sp: + "\P\ threadGet tcbMCP t \\rv. mcpriority_tcb_at' (\st. st = rv) t and P\" + apply (simp add: threadGet_def) + apply wp + apply (simp add: o_def pred_tcb_at'_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma getMCP_wp: "\\s. \mcp. mcpriority_tcb_at' ((=) mcp) t s \ P mcp s\ threadGet tcbMCP t \P\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule getMCP_sp) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +lemma checkPrio_wp: + "\ \s. mcpriority_tcb_at' (\mcp. prio \ ucast mcp) auth s \ P s \ + checkPrio prio auth + \ \rv. P \,-" + apply (simp add: checkPrio_def) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) + by (auto simp add: pred_tcb_at'_def obj_at'_def) + +lemma checkPrio_lt_ct: + "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. prio \ ucast mcp) auth s\, -" + by (wp checkPrio_wp) simp + +lemma checkPrio_lt_ct_weak: + "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. ucast prio \ mcp) auth s\, -" + apply (rule hoare_strengthen_postE_R) + apply (rule checkPrio_lt_ct) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + by (rule le_ucast_ucast_le) simp + +crunch inv: checkPrio "P" + +lemma decodeSetPriority_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t \ + decodeSetPriority args (ThreadCap t) extras \tcb_inv_wf'\,-" + unfolding decodeSetPriority_def + apply (wpsimp wp: checkPrio_lt_ct_weak | wp (once) checkPrio_inv)+ + apply (clarsimp simp: maxPriority_def numPriorities_def) + apply unat_arith + apply simp + done + +lemma decodeSetPriority_inv[wp]: + "\P\ decodeSetPriority args cap extras \\rv. P\" + apply (simp add: decodeSetPriority_def Let_def split del: if_split) + apply (rule hoare_pre) + apply (wp checkPrio_inv | simp add: whenE_def split del: if_split + | rule hoare_drop_imps + | wpcw)+ + done + +lemma decodeSetMCPriority_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t \ + decodeSetMCPriority args (ThreadCap t) extras \tcb_inv_wf'\,-" + unfolding decodeSetMCPriority_def Let_def + apply (rule hoare_pre) + apply (wp checkPrio_lt_ct_weak | wpc | simp | wp (once) checkPrio_inv)+ + apply (clarsimp simp: maxPriority_def numPriorities_def) + using max_word_max [of \UCAST(64 \ 8) x\ for x] + apply (simp add: max_word_mask numeral_eq_Suc mask_Suc) + done + +lemma decodeSetMCPriority_inv[wp]: + "\P\ decodeSetMCPriority args cap extras \\rv. P\" + apply (simp add: decodeSetMCPriority_def Let_def split del: if_split) + apply (rule hoare_pre) + apply (wp checkPrio_inv | simp add: whenE_def split del: if_split + | rule hoare_drop_imps + | wpcw)+ + done + +lemma decodeSetSchedParams_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t \ + decodeSetSchedParams args (ThreadCap t) extras + \tcb_inv_wf'\,-" + unfolding decodeSetSchedParams_def + apply (wpsimp wp: checkPrio_lt_ct_weak | wp (once) checkPrio_inv)+ + apply (clarsimp simp: maxPriority_def numPriorities_def) + using max_word_max [of \UCAST(64 \ 8) x\ for x] + apply (simp add: max_word_mask numeral_eq_Suc mask_Suc) + done + +lemma decodeSetSchedParams_corres: + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) + (cur_tcb and valid_etcbs and + (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) + (invs' and (\s. \x \ set extras'. s \' (fst x))) + (decode_set_sched_params args cap slot extras) + (decodeSetSchedParams args cap' extras')" + apply (simp add: decode_set_sched_params_def decodeSetSchedParams_def) + apply (cases "length args < 2") + apply (clarsimp split: list.split) + apply (cases "length extras < 1") + apply (clarsimp split: list.split simp: list_all2_Cons2) + apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) + apply (rule corres_split_eqrE) + apply corresKsimp + apply (rule corres_split_norE[OF checkPrio_corres]) + apply (rule corres_splitEE[OF checkPrio_corres]) + apply (rule corres_returnOkTT) + apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) + apply (wpsimp wp: check_prio_inv checkPrio_inv + simp: valid_cap_def valid_cap'_def)+ + done + +lemma checkValidIPCBuffer_corres: + "cap_relation cap cap' \ + corres (ser \ dc) \ \ + (check_valid_ipc_buffer vptr cap) + (checkValidIPCBuffer vptr cap')" + apply (simp add: check_valid_ipc_buffer_def + checkValidIPCBuffer_def + unlessE_def Let_def + split: cap_relation_split_asm arch_cap.split_asm bool.splits) + apply (simp add: capTransferDataSize_def msgMaxLength_def + msg_max_length_def msgMaxExtraCaps_def + cap_transfer_data_size_def word_size ipcBufferSizeBits_def + msgLengthBits_def msgExtraCapBits_def msg_align_bits msgAlignBits_def + msg_max_extra_caps_def is_aligned_mask whenE_def split:vmpage_size.splits) + apply (auto simp add: returnOk_def) + done + +lemma checkValidIPCBuffer_ArchObject_wp: + "\\s. isArchObjectCap cap \ is_aligned x msg_align_bits \ P s\ + checkValidIPCBuffer x cap + \\rv s. P s\,-" + apply (simp add: checkValidIPCBuffer_def + whenE_def unlessE_def + cong: capability.case_cong + arch_capability.case_cong + split del: if_split) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp + | wpc | clarsimp simp: ipcBufferSizeBits_def isCap_simps is_aligned_mask msg_align_bits msgAlignBits_def)+ + done + +lemma decodeSetIPCBuffer_corres: + notes if_cong [cong] + shows + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) (\s. \x \ set extras. cte_at (snd x) s) + (\s. invs' s \ (\x \ set extras'. cte_at' (snd x) s)) + (decode_set_ipc_buffer args cap slot extras) + (decodeSetIPCBuffer args cap' (cte_map slot) extras')" + apply (simp add: decode_set_ipc_buffer_def decodeSetIPCBuffer_def + split del: if_split) + apply (cases args) + apply simp + apply (cases extras) + apply simp + apply (clarsimp simp: list_all2_Cons1 liftME_def[symmetric] + is_cap_simps + split del: if_split) + apply (clarsimp simp add: returnOk_def newroot_rel_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule deriveCap_corres; simp) + apply (simp add: o_def newroot_rel_def split_def dc_def[symmetric]) + apply (erule checkValidIPCBuffer_corres) + apply (wp hoareE_TrueI | simp)+ + apply fastforce + done + +lemma decodeSetIPC_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t and cte_at' slot + and (\s. \v \ set extras. s \' fst v \ cte_at' (snd v) s)\ + decodeSetIPCBuffer args (ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (simp add: decodeSetIPCBuffer_def Let_def whenE_def + split del: if_split cong: list.case_cong prod.case_cong) + apply (rule hoare_pre) + apply (wp | wpc | simp)+ + apply (rule checkValidIPCBuffer_ArchObject_wp) + apply simp + apply (wp hoare_drop_imps) + apply clarsimp + done + +lemma decodeSetIPCBuffer_is_tc[wp]: + "\\\ decodeSetIPCBuffer args cap slot extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetIPCBuffer_def Let_def + split del: if_split cong: list.case_cong prod.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply (simp only: isThreadControl_def tcbinvocation.simps) + apply wp+ + apply (clarsimp simp: isThreadControl_def) + done + +lemma decodeSetPriority_is_tc[wp]: + "\\\ decodeSetPriority args cap extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetPriority_def) + apply wpsimp + apply (clarsimp simp: isThreadControl_def) + done + +lemma decodeSetMCPriority_is_tc[wp]: + "\\\ decodeSetMCPriority args cap extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetMCPriority_def) + apply wpsimp + apply (clarsimp simp: isThreadControl_def) + done + +crunch inv[wp]: decodeSetIPCBuffer "P" + (simp: crunch_simps) + +lemma slotCapLongRunningDelete_corres: + "cte_map ptr = ptr' \ + corres (=) (cte_at ptr and invs) invs' + (slot_cap_long_running_delete ptr) + (slotCapLongRunningDelete ptr')" + apply (clarsimp simp: slot_cap_long_running_delete_def + slotCapLongRunningDelete_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (auto split: cap_relation_split_asm arch_cap.split_asm + intro!: corres_rel_imp [OF isFinalCapability_corres[where ptr=ptr]] + simp: liftM_def[symmetric] final_matters'_def + long_running_delete_def + longRunningDelete_def isCap_simps)[1] + apply (wp get_cap_wp getCTE_wp)+ + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + done + +lemma slot_long_running_inv'[wp]: + "\P\ slotCapLongRunningDelete ptr \\rv. P\" + apply (simp add: slotCapLongRunningDelete_def) + apply (rule bind_wp [OF _ getCTE_inv]) + apply (rule hoare_pre, wpcw, (wp isFinalCapability_inv)+) + apply simp + done + +lemma cap_CNode_case_throw: + "(case cap of CNodeCap a b c d \ m | _ \ throw x) + = (doE unlessE (isCNodeCap cap) (throw x); m odE)" + by (cases cap, simp_all add: isCap_simps unlessE_def) + +lemma isValidVTableRoot_eq: + "cap_relation cap cap' \ isValidVTableRoot cap' = is_valid_vtable_root cap" + apply (cases cap; simp add: isValidVTableRoot_def isVTableRoot_def is_valid_vtable_root_simp) + apply (rename_tac acap, case_tac acap; simp) + apply (auto split: pt_type.splits simp: mdata_map_def) + done + +lemma decodeSetSpace_corres: + notes if_cong [cong] + shows + "\ cap_relation cap cap'; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras'; + is_thread_cap cap \ \ + corres (ser \ tcbinv_relation) + (invs and valid_cap cap and (\s. \x \ set extras. cte_at (snd x) s)) + (invs' and valid_cap' cap' and (\s. \x \ set extras'. cte_at' (snd x) s)) + (decode_set_space args cap slot extras) + (decodeSetSpace args cap' (cte_map slot) extras')" + apply (simp add: decode_set_space_def decodeSetSpace_def + Let_def + split del: if_split) + apply (cases "3 \ length args \ 2 \ length extras'") + apply (clarsimp simp: val_le_length_Cons list_all2_Cons2 + split del: if_split) + apply (simp add: liftE_bindE liftM_def unlessE_throwError_returnOk unlessE_whenE + bindE_assoc cap_CNode_case_throw + getThreadCSpaceRoot getThreadVSpaceRoot + split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split[OF slotCapLongRunningDelete_corres]) + apply (clarsimp simp: is_cap_simps get_tcb_ctable_ptr_def cte_map_tcb_0) + apply (rule corres_split[OF slotCapLongRunningDelete_corres]) + apply (clarsimp simp: is_cap_simps get_tcb_vtable_ptr_def cte_map_tcb_1[simplified]) + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply simp + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_splitEE[OF deriveCap_corres]) + apply (fastforce dest: list_all2_nthD2[where p=0] simp: cap_map_update_data) + apply (fastforce dest: list_all2_nthD2[where p=0]) + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply simp + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_splitEE[OF deriveCap_corres]) + apply (clarsimp simp: cap_map_update_data) + apply simp + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply (simp add: isValidVTableRoot_eq) + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_trivial) + apply (clarsimp simp: returnOk_def newroot_rel_def is_cap_simps + list_all2_conv_all_nth split_def) + apply wp+ + apply ((simp only: simp_thms pred_conj_def | wp)+)[2] + apply (unfold whenE_def, wp+)[2] + apply ((simp split del: if_split | wp | rule hoare_drop_imps)+)[2] + apply (unfold whenE_def, wp+)[2] + apply simp + apply (wp hoare_drop_imps)+ + apply (clarsimp simp: get_tcb_ctable_ptr_def get_tcb_vtable_ptr_def + is_cap_simps valid_cap_def tcb_at_cte_at_0 + tcb_at_cte_at_1[simplified]) + apply fastforce + apply (frule list_all2_lengthD) + apply (clarsimp split: list.split) + done + +lemma decodeSetSpace_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t and cte_at' slot + and (\s. \x \ set extras. s \' fst x \ cte_at' (snd x) s \ t \ snd x \ t + 32 \ snd x)\ + decodeSetSpace args (ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + cap_CNode_case_throw + split del: if_split cong: if_cong list.case_cong) + apply (rule hoare_pre) + apply (wp + | simp add: o_def split_def + split del: if_split + | wpc + | rule hoare_drop_imps)+ + apply (clarsimp simp del: length_greater_0_conv + split del: if_split) + apply (simp del: length_greater_0_conv add: valid_updateCapDataI) + done + +lemma decodeSetSpace_inv[wp]: + "\P\ decodeSetSpace args cap slot extras \\rv. P\" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + split del: if_split cong: if_cong list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps + | simp add: o_def split_def split del: if_split + | wpcw)+ + done + +lemma decodeSetSpace_is_tc[wp]: + "\\\ decodeSetSpace args cap slot extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + split del: if_split cong: list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps + | simp only: isThreadControl_def tcbinvocation.simps + | wpcw)+ + apply simp + done + +lemma decodeSetSpace_tc_target[wp]: + "\\s. P (capTCBPtr cap)\ decodeSetSpace args cap slot extras \\rv s. P (tcThread rv)\,-" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + split del: if_split cong: list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps + | simp only: tcbinvocation.sel + | wpcw)+ + apply simp + done + +lemma decodeSetSpace_tc_slot[wp]: + "\\s. P slot\ decodeSetSpace args cap slot extras \\rv s. P (tcThreadCapSlot rv)\,-" + apply (simp add: decodeSetSpace_def split_def unlessE_def + getThreadVSpaceRoot getThreadCSpaceRoot + cong: list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpcw | simp only: tcbinvocation.sel)+ + apply simp + done + +lemma decodeTCBConfigure_corres: + notes if_cong [cong] option.case_cong [cong] + shows + "\ cap_relation cap cap'; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras'; + is_thread_cap cap \ \ + corres (ser \ tcbinv_relation) (einvs and valid_cap cap and (\s. \x \ set extras. cte_at (snd x) s)) + (invs' and valid_cap' cap' and (\s. \x \ set extras'. cte_at' (snd x) s)) + (decode_tcb_configure args cap slot extras) + (decodeTCBConfigure args cap' (cte_map slot) extras')" + apply (clarsimp simp add: decode_tcb_configure_def decodeTCBConfigure_def) + apply (cases "length args < 4") + apply (clarsimp split: list.split) + apply (cases "length extras < 3") + apply (clarsimp split: list.split simp: list_all2_Cons2) + apply (clarsimp simp: linorder_not_less val_le_length_Cons list_all2_Cons1 + priorityBits_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule decodeSetIPCBuffer_corres; simp) + apply (rule corres_splitEE) + apply (rule decodeSetSpace_corres; simp) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_params" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_space" in corres_gen_asm) + apply (rule_tac F="tcThreadCapSlot setSpace = cte_map slot" in corres_gen_asm2) + apply (rule corres_trivial) + apply (clarsimp simp: tcb_invocation.is_ThreadControl_def returnOk_def is_cap_simps) + apply (wp | simp add: invs_def valid_sched_def)+ + done + +lemma isThreadControl_def2: + "isThreadControl tinv = (\a b c d e f g h. tinv = ThreadControl a b c d e f g h)" + by (cases tinv, simp_all add: isThreadControl_def) + +lemma decodeSetSpaceSome[wp]: + "\\\ decodeSetSpace xs cap y zs + \\rv s. tcNewCRoot rv \ None\,-" + apply (simp add: decodeSetSpace_def split_def cap_CNode_case_throw + cong: list.case_cong if_cong del: not_None_eq) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpcw + | simp only: tcbinvocation.sel option.simps)+ + apply simp + done + +lemma decodeTCBConf_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t and cte_at' slot + and (\s. \x \ set extras. s \' fst x \ cte_at' (snd x) s \ t \ snd x \ t + 2^cteSizeBits \ snd x)\ + decodeTCBConfigure args (ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (clarsimp simp add: decodeTCBConfigure_def Let_def + split del: if_split cong: list.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply (rule_tac Q'="\setSpace s. tcb_inv_wf' setSpace s \ tcb_inv_wf' setIPCParams s + \ isThreadControl setSpace \ isThreadControl setIPCParams + \ tcThread setSpace = t \ tcNewCRoot setSpace \ None" + in hoare_strengthen_postE_R) + apply wp + apply (clarsimp simp: isThreadControl_def2 cong: option.case_cong) + apply wpsimp + apply (fastforce simp: isThreadControl_def2 objBits_defs) + done + +lemma lsft_real_cte: + "\valid_objs'\ lookupSlotForThread t x \\rv. real_cte_at' rv\, -" + apply (simp add: lookupSlotForThread_def) + apply (wp resolveAddressBits_real_cte_at'|simp add: split_def)+ + done + +lemma tcb_real_cte_32: + "\ real_cte_at' (t + 2^cteSizeBits) s; tcb_at' t s \ \ False" + by (clarsimp simp: obj_at'_def objBitsKO_def ps_clear_32) + +lemma decodeBindNotification_corres: +notes if_cong[cong] shows + "\ list_all2 (\x y. cap_relation (fst x) (fst y)) extras extras' \ \ + corres (ser \ tcbinv_relation) + (invs and tcb_at t and (\s. \x \ set extras. s \ (fst x))) + (invs' and tcb_at' t and (\s. \x \ set extras'. s \' (fst x))) + (decode_bind_notification (cap.ThreadCap t) extras) + (decodeBindNotification (capability.ThreadCap t) extras')" + apply (simp add: decode_bind_notification_def decodeBindNotification_def) + apply (simp add: null_def returnOk_def) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_trivial) + apply (auto simp: returnOk_def whenE_def)[1] + apply (rule_tac F="extras \ []" in corres_gen_asm) + apply (rule corres_split_eqrE) + apply simp + apply (rule getBoundNotification_corres) + apply (rule corres_split_norE) + apply (rule corres_trivial, simp split: option.splits add: returnOk_def) + apply (rule corres_splitEE_prod[where r'="\rv rv'. ((fst rv) = (fst rv')) \ ((snd rv') = (AllowRead \ (snd rv)))"]) + apply (rule corres_trivial, simp) + apply (case_tac extras, simp, clarsimp simp: list_all2_Cons1) + apply (fastforce split: cap.splits capability.splits simp: returnOk_def) + apply (rule corres_split_norE) + apply (rule corres_trivial, clarsimp simp: whenE_def returnOk_def) + apply (clarsimp split del: if_split) + apply (rule corres_splitEE[where r'=ntfn_relation]) + apply simp + apply (rule getNotification_corres) + apply (rule corres_trivial, simp split del: if_split) + apply (simp add: ntfn_relation_def + split: Structures_A.ntfn.splits Structures_H.ntfn.splits + option.splits) + apply wp+ + apply (wp | simp add: whenE_def split del: if_split)+ + apply (wp | wpc | simp)+ + apply (simp | wp gbn_wp gbn_wp')+ + apply (fastforce simp: valid_cap_def valid_cap'_def dest: hd_in_set)+ + done + +lemma decodeUnbindNotification_corres: + "corres (ser \ tcbinv_relation) + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (decode_unbind_notification (cap.ThreadCap t)) + (decodeUnbindNotification (capability.ThreadCap t))" + apply (simp add: decode_unbind_notification_def decodeUnbindNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqrE) + apply simp + apply (rule getBoundNotification_corres) + apply (rule corres_trivial) + apply (simp split: option.splits) + apply (simp add: returnOk_def) + apply wp+ + apply auto + done + +lemma decodeSetTLSBase_corres: + "corres (ser \ tcbinv_relation) (tcb_at t) (tcb_at' t) + (decode_set_tls_base w (cap.ThreadCap t)) + (decodeSetTLSBase w (capability.ThreadCap t))" + by (clarsimp simp: decode_set_tls_base_def decodeSetTLSBase_def returnOk_def + split: list.split) + +lemma decodeTCBInvocation_corres: + "\ c = Structures_A.ThreadCap t; cap_relation c c'; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras'; + length args < 2 ^ word_bits \ \ + corres (ser \ tcbinv_relation) (einvs and tcb_at t and (\s. \x \ set extras. s \ fst x \ cte_at (snd x) s)) + (invs' and tcb_at' t and (\s. \x \ set extras'. s \' fst x \ cte_at' (snd x) s)) + (decode_tcb_invocation label args c slot extras) + (decodeTCBInvocation label args c' (cte_map slot) extras')" + apply (rule_tac F="cap_aligned c \ capAligned c'" in corres_req) + apply (clarsimp simp: cap_aligned_def capAligned_def objBits_simps word_bits_def) + apply (drule obj_at_aligned', simp_all add: objBits_simps') + apply (clarsimp simp: decode_tcb_invocation_def + decodeTCBInvocation_def + split del: if_split split: gen_invocation_labels.split) + apply (simp add: returnOk_def) + apply (intro conjI impI + corres_guard_imp[OF decodeReadRegisters_corres] + corres_guard_imp[OF decodeWriteRegisters_corres] + corres_guard_imp[OF decodeCopyRegisters_corres] + corres_guard_imp[OF decodeTCBConfigure_corres] + corres_guard_imp[OF decodeSetPriority_corres] + corres_guard_imp[OF decodeSetMCPriority_corres] + corres_guard_imp[OF decodeSetSchedParams_corres] + corres_guard_imp[OF decodeSetIPCBuffer_corres] + corres_guard_imp[OF decodeSetSpace_corres] + corres_guard_imp[OF decodeBindNotification_corres] + corres_guard_imp[OF decodeUnbindNotification_corres] + corres_guard_imp[OF decodeSetTLSBase_corres], + simp_all add: valid_cap_simps valid_cap_simps' invs_def valid_state_def + valid_pspace_def valid_sched_def) + apply (auto simp: list_all2_map1 list_all2_map2 + elim!: list_all2_mono) + done + +crunch inv[wp]: decodeTCBInvocation P + (simp: crunch_simps) + +lemma real_cte_at_not_tcb_at': + "real_cte_at' x s \ \ tcb_at' x s" + "real_cte_at' (x + 2^cteSizeBits) s \ \ tcb_at' x s" + apply (clarsimp simp: obj_at'_def) + apply (clarsimp elim!: tcb_real_cte_32) + done + +lemma decodeBindNotification_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t + and (\s. \x \ set extras. s \' (fst x) \ (\y \ zobj_refs' (fst x). ex_nonz_cap_to' y s))\ + decodeBindNotification (capability.ThreadCap t) extras + \tcb_inv_wf'\,-" + apply (simp add: decodeBindNotification_def whenE_def + cong: list.case_cong split del: if_split) + apply (rule hoare_pre) + apply (wp getNotification_wp getObject_tcb_wp + | wpc + | simp add: threadGet_def getBoundNotification_def)+ + apply (fastforce simp: valid_cap'_def[where c="capability.ThreadCap t"] + is_ntfn invs_def valid_state'_def valid_pspace'_def + null_def pred_tcb_at'_def obj_at'_def + dest!: global'_no_ex_cap hd_in_set) + done + +lemma decodeUnbindNotification_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeUnbindNotification (capability.ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeUnbindNotification_def) + apply (wp getObject_tcb_wp | wpc | simp add: threadGet_def getBoundNotification_def)+ + apply (auto simp: obj_at'_def pred_tcb_at'_def) + done + +lemma decodeSetTLSBase_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeSetTLSBase w (capability.ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeSetTLSBase_def + cong: list.case_cong) + by wpsimp + +lemma decodeTCBInv_wf: + "\invs' and tcb_at' t and cte_at' slot and ex_nonz_cap_to' t + and (\s. \x \ set extras. real_cte_at' (snd x) s + \ s \' fst x \ (\y \ zobj_refs' (fst x). ex_nonz_cap_to' y s))\ + decodeTCBInvocation label args (capability.ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (simp add: decodeTCBInvocation_def Let_def + cong: if_cong gen_invocation_labels.case_cong split del: if_split) + apply (rule hoare_pre) + apply (wpc, (wp decodeTCBConf_wf decodeReadReg_wf decodeWriteReg_wf decodeSetTLSBase_wf + decodeCopyReg_wf decodeBindNotification_wf decodeUnbindNotification_wf)+) + apply (clarsimp simp: real_cte_at') + apply (fastforce simp: real_cte_at_not_tcb_at' objBits_defs) + done + +lemma restart_makes_simple': + "\st_tcb_at' simple' t\ + restart t' + \\rv. st_tcb_at' simple' t\" + apply (simp add: restart_def) + apply (wp sts_st_tcb_at'_cases cancelIPC_simple + cancelIPC_st_tcb_at hoare_weak_lift_imp | simp)+ + apply (rule hoare_strengthen_post [OF isStopped_inv]) + prefer 2 + apply assumption + apply clarsimp + done + +lemma setPriority_st_tcb_at'[wp]: + "\st_tcb_at' P t\ setPriority t' p \\rv. st_tcb_at' P t\" + apply (simp add: setPriority_def) + apply (wp threadSet_pred_tcb_no_state | simp)+ + done + +lemma setMCPriority_st_tcb_at'[wp]: + "\st_tcb_at' P t\ setMCPriority t' p \\rv. st_tcb_at' P t\" + apply (simp add: setMCPriority_def) + apply (wp threadSet_pred_tcb_no_state | simp)+ + done + +lemma cteDelete_makes_simple': + "\st_tcb_at' simple' t\ cteDelete slot v \\rv. st_tcb_at' simple' t\" + by (wp cteDelete_st_tcb_at' | simp)+ + +crunches getThreadBufferSlot, setPriority, setMCPriority + for irq_states'[wp]: valid_irq_states' + (simp: crunch_simps) + +lemma inv_tcb_IRQInactive: + "\valid_irq_states'\ invokeTCB tcb_inv + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + including classic_wp_pre + apply (simp add: invokeTCB_def) + apply (rule hoare_pre) + apply (wpc | + wp withoutPreemption_R cteDelete_IRQInactive checkCap_inv + hoare_vcg_const_imp_lift_R cteDelete_irq_states' + hoare_vcg_const_imp_lift | + simp add: split_def)+ + done + +end + +end diff --git a/proof/refine/AARCH64/Untyped_R.thy b/proof/refine/AARCH64/Untyped_R.thy new file mode 100644 index 0000000000..c18e0b4b48 --- /dev/null +++ b/proof/refine/AARCH64/Untyped_R.thy @@ -0,0 +1,5688 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Proofs about untyped invocations. *) + +theory Untyped_R +imports Detype_R Invocations_R InterruptAcc_R +begin + +unbundle l4v_word_context + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec + untypinv_relation :: "Invocations_A.untyped_invocation \ + Invocations_H.untyped_invocation \ bool" +where + "untypinv_relation + (Invocations_A.Retype c reset ob n ao n2 cl d) x = (\ao'. x = + (Invocations_H.Retype (cte_map c) reset ob n ao' n2 + (map cte_map cl) d) + \ ao = APIType_map2 (Inr ao'))" + +primrec + valid_untyped_inv_wcap' :: "Invocations_H.untyped_invocation + \ capability option \ kernel_state \ bool" +where + "valid_untyped_inv_wcap' (Invocations_H.Retype slot reset ptr_base ptr ty us slots d) + = (\co s. \sz idx. (cte_wp_at' (\cte. cteCap cte = UntypedCap d ptr_base sz idx + \ (co = None \ co = Some (cteCap cte))) slot s + \ range_cover ptr sz (APIType_capBits ty us) (length slots) + \ ((\ reset \ idx \ unat (ptr - ptr_base)) \ (reset \ ptr = ptr_base)) + \ (ptr && ~~ mask sz) = ptr_base) + \ (reset \ descendants_of' slot (ctes_of s) = {}) + \ distinct (slot # slots) + \ (ty = APIObjectType ArchTypes_H.CapTableObject \ us > 0) + \ (ty = APIObjectType ArchTypes_H.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits) + \ (\slot \ set slots. cte_wp_at' (\c. cteCap c = NullCap) slot s) + \ (\slot \ set slots. ex_cte_cap_to' slot s) + \ sch_act_simple s \ 0 < length slots + \ (d \ ty = APIObjectType ArchTypes_H.Untyped \ isFrameType ty) + \ APIType_capBits ty us \ maxUntypedSizeBits)" + +abbreviation + "valid_untyped_inv' ui \ valid_untyped_inv_wcap' ui None" + +lemma valid_untyped_inv_wcap': + "valid_untyped_inv' ui + = (\s. \sz idx. valid_untyped_inv_wcap' ui + (Some (case ui of Invocations_H.Retype slot reset ptr_base ptr ty us slots d + \ UntypedCap d (ptr && ~~ mask sz) sz idx)) s)" + by (cases ui, auto simp: fun_eq_iff cte_wp_at_ctes_of) + +lemma whenE_rangeCheck_eq: + "(rangeCheck (x :: 'a :: {linorder, integral}) y z) = + (whenE (x < fromIntegral y \ fromIntegral z < x) + (throwError (RangeError (fromIntegral y) (fromIntegral z))))" + by (simp add: rangeCheck_def unlessE_whenE linorder_not_le[symmetric]) + +lemma APIType_map2_CapTable[simp]: + "(APIType_map2 ty = Structures_A.CapTableObject) + = (ty = Inr (APIObjectType ArchTypes_H.CapTableObject))" + by (simp add: APIType_map2_def + split: sum.split AARCH64_H.object_type.split + apiobject_type.split + kernel_object.split arch_kernel_object.splits) + +lemma alignUp_H[simp]: + "Untyped_H.alignUp = More_Word_Operations.alignUp" + apply (rule ext)+ + apply (clarsimp simp:Untyped_H.alignUp_def More_Word_Operations.alignUp_def mask_def) + done + +(* FIXME: MOVE *) +lemma corres_check_no_children: + "corres (\x y. x = y) (cte_at slot) + (pspace_aligned' and pspace_distinct' and valid_mdb' and + cte_wp_at' (\_. True) (cte_map slot)) + (const_on_failure x + (doE z \ ensure_no_children slot; + returnOk y + odE)) + (constOnFailure x + (doE z \ ensureNoChildren (cte_map slot); + returnOk y + odE))" + apply (clarsimp simp:const_on_failure_def constOnFailure_def) + apply (rule corres_guard_imp) + apply (rule corres_split_catch[where E = dc and E'=dc]) + apply (rule corres_guard_imp[OF corres_splitEE]) + apply (rule ensureNoChildren_corres) + apply simp + apply (rule corres_returnOkTT) + apply simp + apply wp+ + apply simp+ + apply (clarsimp simp:dc_def,wp)+ + apply simp + apply simp + done + +lemma mapM_x_stateAssert: + "mapM_x (\x. stateAssert (f x) (ss x)) xs + = stateAssert (\s. \x \ set xs. f x s) []" + apply (induct xs) + apply (simp add: mapM_x_Nil) + apply (simp add: mapM_x_Cons) + apply (simp add: fun_eq_iff stateAssert_def bind_assoc exec_get assert_def) + done + +lemma mapM_locate_eq: + "isCNodeCap cap + \ mapM (\x. locateSlotCap cap x) xs + = (do stateAssert (\s. case gsCNodes s (capUntypedPtr cap) of None \ xs = [] | Some n + \ \x \ set xs. n = capCNodeBits cap \ x < 2 ^ n) []; + return (map (\x. (capCNodePtr cap) + 2 ^ cte_level_bits * x) xs) od)" + apply (clarsimp simp: isCap_simps) + apply (simp add: locateSlot_conv objBits_simps cte_level_bits_def + liftM_def[symmetric] mapM_liftM_const isCap_simps) + apply (simp add: liftM_def mapM_discarded mapM_x_stateAssert) + apply (intro bind_cong refl arg_cong2[where f=stateAssert] ext) + apply (simp add: isCap_simps split: option.split) + done + +lemmas is_frame_type_defs = is_frame_type_def isFrameType_def arch_is_frame_type_def + +lemma is_frame_type_isFrameType_eq[simp]: + "(is_frame_type (APIType_map2 (Inr (toEnum (unat arg0))))) = + (Types_H.isFrameType (toEnum (unat arg0)))" + by (simp add: APIType_map2_def is_frame_type_defs split: apiobject_type.splits object_type.splits)+ + +(* FIXME: remove *) +lemmas APIType_capBits = objSize_eq_capBits + +(* FIXME: move *) +lemma corres_whenE_throw_merge: + "corres r P P' f (doE _ \ whenE (A \ B) (throwError e); h odE) + \ corres r P P' f (doE _ \ whenE A (throwError e); _ \ whenE B (throwError e); h odE)" + by (auto simp: whenE_def split: if_splits) + +lemma decodeUntypedInvocation_corres: + assumes cap_rel: "list_all2 cap_relation cs cs'" + shows "corres + (ser \ untypinv_relation) + (invs and cte_wp_at ((=) (cap.UntypedCap d w n idx)) slot and (\s. \x \ set cs. s \ x)) + (invs' + and (\s. \x \ set cs'. s \' x)) + (decode_untyped_invocation label args slot (cap.UntypedCap d w n idx) cs) + (decodeUntypedInvocation label args (cte_map slot) + (capability.UntypedCap d w n idx) cs')" +proof (cases "6 \ length args \ cs \ [] + \ gen_invocation_type label = UntypedRetype") + case False + show ?thesis using False cap_rel + apply (clarsimp simp: decode_untyped_invocation_def + decodeUntypedInvocation_def + whenE_whenE_body unlessE_whenE + split del: if_split cong: list.case_cong) + apply (auto split: list.split) + done +next + case True + have val_le_length_Cons: (* clagged from Tcb_R *) + "\n xs. n \ 0 \ (n \ length xs) = (\y ys. xs = y # ys \ (n - 1) \ length ys)" + apply (case_tac xs, simp_all) + apply (case_tac n, simp_all) + done + + obtain arg0 arg1 arg2 arg3 arg4 arg5 argsmore cap cap' csmore csmore' + where args: "args = arg0 # arg1 # arg2 # arg3 # arg4 # arg5 # argsmore" + and cs: "cs = cap # csmore" + and cs': "cs' = cap' # csmore'" + and crel: "cap_relation cap cap'" + using True cap_rel + by (clarsimp simp: neq_Nil_conv list_all2_Cons1 val_le_length_Cons) + + have il: "gen_invocation_type label = UntypedRetype" + using True by simp + + have word_unat_power2: + "\bits. \ bits < 64 \ bits < word_bits \ \ unat (2 ^ bits :: machine_word) = 2 ^ bits" + by (simp add: word_bits_def) + + have P: "\P. corres (ser \ dc) \ \ + (whenE P (throwError ExceptionTypes_A.syscall_error.TruncatedMessage)) + (whenE P (throwError Fault_H.syscall_error.TruncatedMessage))" + by (simp add: whenE_def returnOk_def) + have Q: "\v. corres (ser \ (\a b. APIType_map2 (Inr (toEnum (unat v))) = a)) \ \ + (data_to_obj_type v) + (whenE (fromEnum (maxBound :: AARCH64_H.object_type) < unat v) + (throwError (Fault_H.syscall_error.InvalidArgument 0)))" + apply (simp only: data_to_obj_type_def returnOk_bindE fun_app_def) + apply (simp add: maxBound_def enum_apiobject_type + fromEnum_def whenE_def) + apply (simp add: returnOk_def APIType_map2_def toEnum_def + enum_apiobject_type enum_object_type) + apply (intro conjI impI) + apply (subgoal_tac "unat v - 5 > 5") + apply (simp add: arch_data_to_obj_type_def) + apply simp + apply (subgoal_tac "\n. unat v = n + 5") + apply (clarsimp simp: arch_data_to_obj_type_def returnOk_def) + apply (rule_tac x="unat v - 5" in exI) + apply arith + done + have S: "\x (y :: ('g :: len) word) (z :: 'g word) bits. \ bits < len_of TYPE('g); x < 2 ^ bits \ \ toEnum x = (of_nat x :: 'g word)" + apply (rule toEnum_of_nat) + apply (erule order_less_trans) + apply simp + done + obtain xs where xs: "xs = [unat arg4..ref bits. + \ is_aligned ref bits; + Suc (unat arg4 + unat arg5 - Suc 0) \ 2 ^ bits; + bits < 64; 1 \ arg4 + arg5; + arg4 \ arg4 + arg5 \ \ + (map (\x. ref + 2 ^ cte_level_bits * x) [arg4 .e. arg4 + arg5 - 1]) + = map cte_map + (map (Pair ref) + (map (nat_to_cref bits) xs))" + apply (subgoal_tac "Suc (unat (arg4 + arg5 - 1)) = unat arg4 + unat arg5") + apply (simp add: upto_enum_def xs del: upt.simps) + apply (clarsimp simp: cte_map_def) + apply (subst of_bl_nat_to_cref) + apply simp + apply (simp add: word_bits_def) + apply (subst S) + apply simp + apply simp + apply (simp add: cte_level_bits_def shiftl_t2n) + apply unat_arith + done + have another: + "\bits a. \ (a::machine_word) \ 2 ^ bits; bits < word_bits\ + \ 2 ^ bits - a = of_nat (2 ^ bits - unat a)" + apply (subst of_nat_diff) + apply (subst (asm) word_le_nat_alt) + apply (simp add: word_unat_power2) + apply simp + done + have ty_size: + "\x y. (obj_bits_api (APIType_map2 (Inr x)) y) = (Types_H.getObjectSize x y)" + apply (clarsimp simp:obj_bits_api_def APIType_map2_def getObjectSize_def simp del: APIType_capBits) + apply (case_tac x) + apply (simp_all add:arch_kobj_size_def default_arch_object_def pageBits_def ptBits_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add: apiGetObjectSize_def tcbBlockSizeBits_def epSizeBits_def + ntfnSizeBits_def slot_bits_def cteSizeBits_def bit_simps) + done + obtain if_res where if_res_def: "\reset. if_res reset = (if reset then 0 else idx)" + by auto + have if_res_2n: + "\d res. (\s. s \ cap.UntypedCap d w n idx) \ if_res res \ 2 ^ n" + by (simp add: if_res_def valid_cap_def) + + note word_unat_power [symmetric, simp del] + show ?thesis + apply (rule corres_name_pre) + apply clarsimp + apply (subgoal_tac "cte_wp_at' (\cte. cteCap cte = (capability.UntypedCap d w n idx)) (cte_map slot) s'") + prefer 2 + apply (drule state_relation_pspace_relation) + apply (case_tac slot) + apply simp + apply (drule(1) pspace_relation_cte_wp_at) + apply fastforce+ + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (frule caps_of_state_valid_cap[unfolded valid_cap_def]) + apply fastforce + apply (clarsimp simp:cap_aligned_def) +(* ugh yuck. who likes a word proof? furthermore, some more restriction of + the returnOk_bindE stuff needs to be done in order to give you a single + target to do the word proof against or else it needs repeating. ugh. + maybe could seperate out the equality Isar-style? *) + apply (simp add: decodeUntypedInvocation_def decode_untyped_invocation_def + args cs cs' xs[symmetric] il whenE_rangeCheck_eq + cap_case_CNodeCap unlessE_whenE case_bool_If lookupTargetSlot_def + untypedBits_defs untyped_min_bits_def untyped_max_bits_def + del: upt.simps + split del: if_split + cong: if_cong list.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF Q]) + apply (rule corres_whenE_throw_merge) + apply (rule whenE_throwError_corres) + apply (simp add: word_bits_def word_size) + apply (clarsimp simp: word_size word_bits_def fromIntegral_def ty_size + toInteger_nat fromInteger_nat wordBits_def) + apply (simp add: not_le) + apply (rule whenE_throwError_corres, simp) + apply (clarsimp simp: fromAPIType_def) + apply (rule whenE_throwError_corres, simp) + apply (clarsimp simp: fromAPIType_def) + apply (rule_tac r' = "\cap cap'. cap_relation cap cap'" + in corres_splitEE[OF corres_if]) + apply simp + apply (rule corres_returnOkTT) + apply (rule crel) + apply simp + apply (rule corres_splitEE[OF lookupSlotForCNodeOp_corres]) + apply (rule crel) + apply simp + apply simp + apply (rule getSlotCap_corres,simp) + apply wp+ + apply (rule_tac corres_split_norE) + apply (rule corres_if) + apply simp + apply (rule corres_returnOkTT,clarsimp) + apply (rule corres_trivial) + apply (clarsimp simp: fromAPIType_def lookup_failure_map_def) + apply (rule_tac F="is_cnode_cap rva \ cap_aligned rva" in corres_gen_asm) + apply (subgoal_tac "is_aligned (obj_ref_of rva) (bits_of rva) \ bits_of rva < 64") + prefer 2 + apply (clarsimp simp: is_cap_simps bits_of_def cap_aligned_def word_bits_def + is_aligned_weaken) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:Kernel_Config.retypeFanOutLimit_def is_cap_simps bits_of_def)+ + apply (simp add: unat_arith_simps(2) unat_2p_sub_1 word_bits_def) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:Kernel_Config.retypeFanOutLimit_def is_cap_simps bits_of_def)+ + apply (simp add: unat_eq_0 word_less_nat_alt) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:Kernel_Config.retypeFanOutLimit_def is_cap_simps bits_of_def)+ + apply (clarsimp simp:toInteger_word unat_arith_simps(2) cap_aligned_def) + apply (subst unat_sub) + apply (simp add: linorder_not_less word_le_nat_alt) + apply (fold neq0_conv) + apply (simp add: unat_eq_0 cap_aligned_def) + apply (clarsimp simp:fromAPIType_def) + apply (clarsimp simp:liftE_bindE mapM_locate_eq) + apply (subgoal_tac "unat (arg4 + arg5) = unat arg4 + unat arg5") + prefer 2 + apply (clarsimp simp:not_less) + apply (subst unat_word_ariths(1)) + apply (rule mod_less) + apply (unfold word_bits_len_of)[1] + apply (subgoal_tac "2 ^ bits_of rva < (2 :: nat) ^ word_bits") + apply arith + apply (rule power_strict_increasing, simp add: word_bits_conv) + apply simp + apply (rule_tac P'="valid_cap rva" in corres_stateAssert_implied) + apply (frule_tac bits2 = "bits_of rva" in YUCK) + apply (simp) + apply (simp add: word_bits_conv) + apply (simp add: word_le_nat_alt) + apply (simp add: word_le_nat_alt) + apply (simp add:liftE_bindE[symmetric] free_index_of_def) + apply (rule corres_split_norE) + apply (clarsimp simp:is_cap_simps simp del:ser_def) + apply (simp add: mapME_x_map_simp del: ser_def) + apply (rule_tac P = "valid_cap (cap.CNodeCap r bits g) and invs" in corres_guard_imp [where P' = invs']) + apply (rule mapME_x_corres_inv [OF _ _ _ refl]) + apply (simp del: ser_def) + apply (rule ensureEmptySlot_corres) + apply (clarsimp simp: is_cap_simps) + apply (simp, wp) + apply (simp, wp) + apply clarsimp + apply (clarsimp simp add: xs is_cap_simps bits_of_def valid_cap_def) + apply (erule cap_table_at_cte_at) + apply (simp add: nat_to_cref_def word_bits_conv) + apply simp + apply (subst liftE_bindE)+ + apply (rule corres_split_eqr[OF corres_check_no_children]) + apply (simp only: free_index_of_def cap.simps if_res_def[symmetric]) + apply (rule_tac F="if_res reset \ 2 ^ n" in corres_gen_asm) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:shiftL_nat word_less_nat_alt shiftr_div_2n' + split del: if_split)+ + apply (simp add: word_of_nat_le another) + apply (drule_tac x = "if_res reset" in unat_of_nat64[OF le_less_trans]) + apply (simp add:ty_size shiftR_nat)+ + apply (simp add:unat_of_nat64 le_less_trans[OF div_le_dividend] + le_less_trans[OF diff_le_self]) + apply (rule whenE_throwError_corres) + apply (clarsimp) + apply (clarsimp simp: fromAPIType_def) + apply (rule corres_returnOkTT) + apply (clarsimp simp:ty_size getFreeRef_def get_free_ref_def is_cap_simps) + apply simp + apply (strengthen if_res_2n, wp) + apply simp + apply wp + apply (wp mapME_x_inv_wp + validE_R_validE[OF valid_validE_R[OF ensure_empty_inv]] + validE_R_validE[OF valid_validE_R[OF ensureEmpty_inv]])+ + apply (clarsimp simp: is_cap_simps valid_cap_simps + cap_table_at_gsCNodes bits_of_def + linorder_not_less) + apply (erule order_le_less_trans) + apply (rule word_leq_le_minus_one) + apply (simp add: word_le_nat_alt) + apply (simp add: unat_arith_simps) + apply wpsimp+ + apply (rule hoare_strengthen_post [where Q = "\r. invs and valid_cap r and cte_at slot"]) + apply wp+ + apply (clarsimp simp: is_cap_simps bits_of_def cap_aligned_def + valid_cap_def word_bits_def) + apply (frule caps_of_state_valid_cap, clarsimp+) + apply (strengthen refl exI[mk_strg I E] exI[where x=d])+ + apply simp + apply wp+ + apply (rule hoare_strengthen_post [where Q = "\r. invs' and cte_at' (cte_map slot)"]) + apply wp+ + apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct') + apply (wp whenE_throwError_wp | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct' + cte_wp_at_caps_of_state cte_wp_at_ctes_of ) + apply (clarsimp simp: invs_valid_objs invs_psp_aligned) + apply (frule caps_of_state_valid_cap, clarsimp+) + apply (strengthen refl[where t=True] refl exI[mk_strg I E] exI[where x=d])+ + apply (clarsimp simp: is_cap_simps valid_cap_def bits_of_def cap_aligned_def + cte_level_bits_def word_bits_conv) + apply (clarsimp simp: invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct' + cte_wp_at_caps_of_state cte_wp_at_ctes_of ) + done +qed + +lemma decodeUntyped_inv[wp]: + "\P\ decodeUntypedInvocation label args slot (UntypedCap d w n idx) cs \\rv. P\" + apply (simp add: decodeUntypedInvocation_def whenE_def + split_def unlessE_def Let_def + split del: if_split cong: if_cong list.case_cong) + apply (rule hoare_pre) + apply (wp mapME_x_inv_wp hoare_drop_imps constOnFailure_wp + mapM_wp' + | wpcw + | simp add: lookupTargetSlot_def locateSlot_conv)+ + done + +declare inj_Pair[simp] + +declare upt_Suc[simp del] + +lemma descendants_of_cte_at': + "\p \ descendants_of' x (ctes_of s); valid_mdb' s\ \ cte_wp_at' (\_. True) p s" + by (clarsimp simp: descendants_of'_def cte_wp_at_ctes_of dest!: subtree_target_Some) + +lemma ctes_of_ko: + "valid_cap' cap s \ + isUntypedCap cap \ + (\ptr\capRange cap. \optr ko. ksPSpace s optr = Some ko \ ptr \ obj_range' optr ko)" + apply (case_tac cap; simp add: isCap_simps capRange_def) + \ \TCB case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_def obj_range'_def mask_def add_diff_eq + dest!: projectKO_opt_tcbD simp: objBitsKO_def) + \ \NTFN case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_def mask_def add_diff_eq obj_range'_def objBitsKO_def) + \ \EP case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_def mask_def add_diff_eq obj_range'_def objBitsKO_def) + \ \Zombie case\ + apply (rename_tac word zombie_type nat) + apply (case_tac zombie_type) + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: mask_def add_ac objBits_simps' obj_range'_def dest!: projectKO_opt_tcbD) + apply (clarsimp simp: valid_cap'_def obj_at'_def capAligned_def objBits_simps') + apply (frule_tac ptr=ptr and sz=cte_level_bits + in nasty_range [where 'a=machine_word_len, folded word_bits_def]) + apply (simp add: cte_level_bits_def)+ + apply clarsimp + apply (drule_tac x=idx in spec) + apply (clarsimp simp: less_mask_eq) + apply (fastforce simp: obj_range'_def objBits_simps' mask_def field_simps) + \ \Arch cases\ + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + \ \ASID control\ + apply clarsimp + \ \ASIDPool\ + apply (clarsimp simp: valid_cap'_def valid_acap'_def valid_arch_cap_ref'_def typ_at'_def ko_wp_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: obj_range'_def archObjSize_def objBitsKO_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; + simp add: archObjSize_def asid_low_bits_def bit_simps mask_def add_ac) + \ \Frame case\ + apply (rename_tac word vmrights vmpage_size option) + apply (clarsimp simp: valid_cap'_def valid_acap'_def valid_arch_cap_ref'_def typ_at'_def + ko_wp_at'_def capAligned_def) + apply (frule_tac ptr = ptr and sz = "pageBits" in + nasty_range[where 'a=machine_word_len, folded word_bits_def, rotated]) + apply simp + apply (simp add: pbfs_atleast_pageBits)+ + apply (clarsimp simp: frame_at'_def) + apply (drule_tac x = idx in spec, clarsimp simp: typ_at'_def ko_wp_at'_def) + apply (intro exI conjI,assumption) + apply (clarsimp simp: obj_range'_def shiftl_t2n mask_def add_diff_eq) + apply (case_tac ko, simp_all split: if_splits, + (simp add: objBitsKO_def archObjSize_def field_simps shiftl_t2n)+)[1] + \ \PT case\ + apply (rename_tac word pt_t option) + apply (clarsimp simp: valid_cap'_def valid_acap'_def valid_arch_cap_ref'_def obj_at'_def + page_table_at'_def typ_at'_def ko_wp_at'_def) + apply (cut_tac ptr=ptr and bz="ptBits pt_t" and word=word and sz=pte_bits in + nasty_range[where 'a=machine_word_len]; simp?) + apply (simp add: pt_bits_def) + apply clarsimp + apply (drule_tac x="ucast idx" in spec) + apply (clarsimp simp: pt_bits_def table_size_def le_mask_iff_lt_2n[THEN iffD1]) + apply (intro exI conjI,assumption) + apply (clarsimp simp: obj_range'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (simp add: objBitsKO_def archObjSize_def bit_simps mask_def ucast_ucast_len field_simps + shiftl_t2n) + \ \VCPU case\ + apply (clarsimp simp: valid_cap'_def typ_at'_def ko_wp_at'_def objBits_simps) + apply (intro exI conjI, assumption) + apply (clarsimp simp: obj_range'_def archObjSize_def objBitsKO_def) + apply (case_tac ko, simp+)[1] + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp add: archObjSize_def bit_simps mask_def add_ac) + \ \CNode case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def capAligned_def objBits_simps) + apply (frule_tac ptr=ptr and sz=cte_level_bits + in nasty_range [where 'a=machine_word_len, folded word_bits_def]) + apply (simp add: cte_level_bits_def objBits_defs)+ + apply clarsimp + apply (drule_tac x=idx in spec) + apply (clarsimp simp: less_mask_eq) + apply (fastforce simp: obj_range'_def mask_def objBits_simps' field_simps)[1] + done + +lemma untypedCap_descendants_range': + "\valid_pspace' s; ctes_of s p = Some cte; + isUntypedCap (cteCap cte); valid_mdb' s; + q \ descendants_of' p (ctes_of s) \ + \ cte_wp_at' (\c. (capRange (cteCap c) \ + usableUntypedRange (cteCap cte) = {})) q s" + apply (clarsimp simp: valid_pspace'_def) + apply (frule(1) descendants_of_cte_at') + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (clarsimp simp:valid_mdb'_def) + apply (frule valid_mdb_no_loops) + apply (case_tac "isUntypedCap (cteCap ctea)") + apply (case_tac ctea) + apply (rename_tac cap node) + apply (case_tac cte) + apply (rename_tac cap' node') + apply clarsimp + apply (frule(1) valid_capAligned[OF ctes_of_valid_cap']) + apply (frule_tac c = cap in valid_capAligned[OF ctes_of_valid_cap']) + apply (simp add:untypedCapRange)+ + apply (frule_tac c = cap' in aligned_untypedRange_non_empty) + apply simp + apply (frule_tac c = cap in aligned_untypedRange_non_empty) + apply simp + apply (clarsimp simp:valid_mdb'_def valid_mdb_ctes_def) + apply (drule untyped_incD', simp+) + apply clarify + apply (erule subset_splitE) + apply simp + apply (thin_tac "P \ Q" for P Q)+ + apply (elim conjE) + apply (simp add:descendants_of'_def) + apply (drule(1) subtree_trans) + apply (simp add:no_loops_no_subtree) + apply simp + apply (clarsimp simp:descendants_of'_def | erule disjE)+ + apply (drule(1) subtree_trans) + apply (simp add:no_loops_no_subtree)+ + apply (thin_tac "P \ Q" for P Q)+ + apply (erule(1) disjoint_subset2[OF usableRange_subseteq]) + apply (simp add:Int_ac) + apply (case_tac ctea) + apply (rename_tac cap node) + apply (case_tac cte) + apply clarsimp + apply (drule(1) ctes_of_valid_cap')+ + apply (frule_tac cap = cap in ctes_of_ko; assumption?) + apply (elim disjE) + apply clarsimp+ + apply (thin_tac "s \' cap") + apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def + simp del: usableUntypedRange.simps untypedRange.simps) + apply (thin_tac "\x y z. P x y z" for P) + apply (rule ccontr) + apply (clarsimp dest!: int_not_emptyD + simp del: usableUntypedRange.simps untypedRange.simps) + apply (drule(1) bspec) + apply (clarsimp simp: ko_wp_at'_def simp del: usableUntypedRange.simps untypedRange.simps) + apply (drule_tac x = optr in spec) + apply (clarsimp simp: ko_wp_at'_def simp del: usableUntypedRange.simps untypedRange.simps) + apply (frule(1) pspace_alignedD') + apply (frule(1) pspace_distinctD') + apply (erule(1) impE) + apply (clarsimp simp del: usableUntypedRange.simps untypedRange.simps) + apply blast + done + +lemma cte_wp_at_caps_descendants_range_inI': + "\invs' s; cte_wp_at' (\c. cteCap c = UntypedCap d (ptr && ~~ mask sz) sz idx) cref s; + idx \ unat (ptr && mask sz); sz < word_bits\ + \ descendants_range_in' {ptr .. (ptr && ~~ mask sz) + mask sz} + cref (ctes_of s)" + apply (frule invs_mdb') + apply (frule(1) le_mask_le_2p) + apply (clarsimp simp: descendants_range_in'_def cte_wp_at_ctes_of) + apply (drule untypedCap_descendants_range'[rotated]) + apply (simp add: isCap_simps)+ + apply (simp add: invs_valid_pspace') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule disjoint_subset2[rotated]) + apply clarsimp + apply (rule le_plus'[OF word_and_le2]) + apply simp + apply (erule word_of_nat_le) + done + +lemma checkFreeIndex_wp: + "\\s. if descendants_of' slot (ctes_of s) = {} then Q y s else Q x s\ + constOnFailure x (doE z \ ensureNoChildren slot; returnOk y odE) + \Q\" + apply (clarsimp simp:constOnFailure_def const_def) + apply (wp ensureNoChildren_wp) + apply simp + done + +declare upt_Suc[simp] + +lemma ensureNoChildren_sp: + "\P\ ensureNoChildren sl \\rv s. P s \ descendants_of' sl (ctes_of s) = {}\,-" + by (wp ensureNoChildren_wp, simp) + +lemma dui_sp_helper': + "\P\ if Q then returnOk root_cap + else doE slot \ + lookupTargetSlot root_cap cref dpth; + liftE (getSlotCap slot) + odE \\rv s. (rv = root_cap \ (\slot. cte_wp_at' ((=) rv o cteCap) slot s)) \ P s\, -" + apply (cases Q, simp_all add: lookupTargetSlot_def) + apply (wp, simp) + apply (simp add: getSlotCap_def split_def) + apply wp + apply (rule hoare_strengthen_post [OF getCTE_sp[where P=P]]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (elim allE, drule(1) mp) + apply simp + apply wpsimp + apply simp + done + +lemma map_ensure_empty': + "\\s. (\slot \ set slots. cte_wp_at' (\cte. cteCap cte = NullCap) slot s) \ P s\ + mapME_x ensureEmptySlot slots + \\rv s. P s \,-" + apply (induct slots arbitrary: P) + apply (simp add: mapME_x_def sequenceE_x_def) + apply wp + apply (simp add: mapME_x_def sequenceE_x_def) + apply (rule_tac Q="\rv s. (\slot\set slots. cte_wp_at' (\cte. cteCap cte = NullCap) slot s) \ P s" + in validE_R_sp) + apply (simp add: ensureEmptySlot_def unlessE_def) + apply (wp getCTE_wp') + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (erule meta_allE) + apply (erule hoare_strengthen_postE_R) + apply clarsimp + done + +lemma irq_nodes_global: + "irq_node' s + (ucast (irq :: irq) << cteSizeBits) \ global_refs' s" + by (simp add: global_refs'_def) + +lemma valid_global_refsD2': + "\ctes_of s p = Some cte; valid_global_refs' s\ \ global_refs' s \ capRange (cteCap cte) = {}" + by (blast dest: valid_global_refsD') + +lemma cte_cap_in_untyped_range: + "\ ptr \ x; x \ ptr + mask bits; cte_wp_at' (\cte. cteCap cte = UntypedCap d ptr bits idx) cref s; + descendants_of' cref (ctes_of s) = {}; invs' s; + ex_cte_cap_to' x s; valid_global_refs' s \ \ False" + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (case_tac ctea, simp) + apply (rename_tac cap node) + apply (frule ctes_of_valid_cap', clarsimp) + apply (case_tac "\irq. cap = IRQHandlerCap irq") + apply (drule (1) equals0D[where a=x, OF valid_global_refsD2'[where p=cref]]) + apply (clarsimp simp: irq_nodes_global add_mask_fold) + apply (frule_tac p=crefa and p'=cref in caps_containedD', assumption) + apply (clarsimp dest!: isCapDs) + apply (rule_tac x=x in notemptyI) + apply (simp add: subsetD[OF cte_refs_capRange] add_mask_fold) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + apply (frule_tac p=cref and p'=crefa in untyped_mdbD', assumption) + apply (simp_all add: isUntypedCap_def add_mask_fold) + apply (frule valid_capAligned) + apply (frule capAligned_capUntypedPtr) + apply (case_tac cap; simp) + apply blast + apply (case_tac cap; simp) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + done + +lemma cap_case_CNodeCap_True_throw: + "(case cap of CNodeCap a b c d \ returnOk () + | _ \ throw $ e) + = (whenE (\isCNodeCap cap) (throwError e))" + by (simp split: capability.split bool.split + add: whenE_def isCNodeCap_def) + +lemma empty_descendants_range_in': + "\descendants_of' slot m = {}\ \ descendants_range_in' S slot m " + by (clarsimp simp:descendants_range_in'_def) + +lemma liftE_validE_R: + "\P\ f \Q\ \ \P\ liftE f \Q\,-" + by wpsimp + +lemma decodeUntyped_wf[wp]: + "\invs' and cte_wp_at' (\cte. cteCap cte = UntypedCap d w sz idx) slot + and sch_act_simple + and (\s. \x \ set cs. s \' x) + and (\s. \x \ set cs. \r \ cte_refs' x (irq_node' s). ex_cte_cap_to' r s)\ + decodeUntypedInvocation label args slot + (UntypedCap d w sz idx) cs + \valid_untyped_inv'\,-" + unfolding decodeUntypedInvocation_def + apply (simp add: unlessE_def[symmetric] unlessE_whenE rangeCheck_def whenE_def[symmetric] + returnOk_liftE[symmetric] Let_def cap_case_CNodeCap_True_throw + split del: if_split cong: if_cong list.case_cong) + apply (rule list_case_throw_validE_R) + apply (clarsimp split del: if_split split: list.splits) + apply (intro conjI impI allI) + apply (wp+)[6] + apply (clarsimp split del: if_split) + apply (rename_tac ty us nodeIndexW nodeDepthW nodeOffset nodeWindow rootCap cs' xs') + apply (rule validE_R_sp[OF map_ensure_empty'] validE_R_sp[OF whenE_throwError_sp] + validE_R_sp[OF dui_sp_helper'])+ + apply (case_tac "\ isCNodeCap nodeCap") + apply (simp add: validE_R_def) + apply (simp add: mapM_locate_eq bind_liftE_distrib bindE_assoc returnOk_liftE[symmetric]) + apply (rule validE_R_sp, rule liftE_validE_R, rule stateAssert_sp) + apply (rule hoare_pre, wp whenE_throwError_wp checkFreeIndex_wp map_ensure_empty') + apply (clarsimp simp:cte_wp_at_ctes_of not_less shiftL_nat) + apply (case_tac cte) + apply clarsimp + apply (frule(1) valid_capAligned[OF ctes_of_valid_cap'[OF _ invs_valid_objs']]) + apply (clarsimp simp:capAligned_def) + apply (subgoal_tac "idx \ 2^ sz") + prefer 2 + apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) + apply (clarsimp simp:valid_cap'_def valid_untyped_def) + apply (subgoal_tac "(2 ^ sz - idx) < 2^ word_bits") + prefer 2 + apply (rule le_less_trans[where y = "2^sz"]) + apply simp+ + apply (subgoal_tac "of_nat (2 ^ sz - idx) = (2::machine_word)^sz - of_nat idx") + prefer 2 + apply (simp add:word_of_nat_minus) + apply (subgoal_tac "valid_cap' nodeCap s") + prefer 2 + apply (erule disjE) + apply (fastforce dest: cte_wp_at_valid_objs_valid_cap') + apply clarsimp + apply (case_tac cte) + apply clarsimp + apply (drule(1) ctes_of_valid_cap'[OF _ invs_valid_objs'])+ + apply simp + apply (clarsimp simp: toEnum_of_nat [OF less_Suc_unat_less_bound]) + apply (subgoal_tac "args ! 4 \ 2 ^ capCNodeBits nodeCap") + prefer 2 + apply (clarsimp simp: isCap_simps) + apply (subst (asm) le_m1_iff_lt[THEN iffD1]) + apply (clarsimp simp:valid_cap'_def isCap_simps p2_gt_0 capAligned_def word_bits_def) + apply (rule less_imp_le) + apply simp + apply (subgoal_tac + "distinct (map (\y. capCNodePtr nodeCap + y * 2^cte_level_bits) [args ! 4 .e. args ! 4 + args ! 5 - 1])") + prefer 2 + apply (simp add: distinct_map upto_enum_def del: upt_Suc) + apply (rule comp_inj_on) + apply (rule inj_onI) + apply (clarsimp dest!: less_Suc_unat_less_bound) + apply (erule word_unat.Abs_eqD) + apply (simp add: unats_def) + apply (simp add: unats_def) + apply (rule inj_onI) + apply (clarsimp simp: toEnum_of_nat[OF less_Suc_unat_less_bound] isCap_simps) + apply (erule(2) inj_bits, simp add: cte_level_bits_def word_bits_def) + apply (subst Suc_unat_diff_1) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (drule(1) le_plus) + apply (rule unat_le_helper) + apply (erule order_trans) + apply (subst unat_power_lower64[symmetric], simp add: word_bits_def cte_level_bits_def) + apply (simp add: word_less_nat_alt[symmetric]) + apply (rule two_power_increasing) + apply (clarsimp dest!: valid_capAligned + simp: capAligned_def objBits_def objBitsKO_def) + apply (simp_all add: word_bits_def cte_level_bits_def objBits_defs)[2] + apply (clarsimp simp: AARCH64_H.fromAPIType_def) + apply (subgoal_tac "Suc (unat (args ! 4 + args ! 5 - 1)) = unat (args ! 4) + unat (args ! 5)") + prefer 2 + apply simp + apply (subst Suc_unat_diff_1) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (rule unat_plus_simple[THEN iffD1]) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply clarsimp + apply (subgoal_tac "(\x. (args ! 4) \ x \ x \ (args ! 4) + (args ! 5) - 1 \ + ex_cte_cap_wp_to' (\_. True) (capCNodePtr nodeCap + x * 2^cteSizeBits) s)") + prefer 2 + apply clarsimp + apply (erule disjE) + apply (erule bspec) + apply (clarsimp simp:isCap_simps image_def shiftl_t2n mult_ac) + apply (rule_tac x = x in bexI,simp) + apply (simp add: mask_def) + apply (erule order_trans) + apply (frule(1) le_plus) + apply (rule word_l_diffs,simp+) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (clarsimp simp:ex_cte_cap_wp_to'_def) + apply (rule_tac x = nodeSlot in exI) + apply (case_tac cte) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps image_def + shiftl_t2n) + apply (rule_tac x = x in bexI,simp) + apply (simp add: mask_def) + apply (erule order_trans) + apply (frule(1) le_plus) + apply (rule word_l_diffs,simp+) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (simp add: fromIntegral_def toInteger_nat fromInteger_nat) + apply (rule conjI) + apply (simp add: objBits_defs cte_level_bits_def) + apply (clarsimp simp:of_nat_shiftR word_le_nat_alt) + apply (frule_tac n = "unat (args ! 5)" + and bits = "(APIType_capBits (toEnum (unat (args ! 0))) (unat (args ! 1)))" + in range_cover_stuff[where rv = 0,rotated -1]) + apply (simp add:unat_1_0) + apply simp + apply (simp add:word_sub_le_iff word_of_nat_le) + apply simp+ + apply (clarsimp simp:getFreeRef_def) + apply (frule alignUp_idem[OF is_aligned_weaken,where a = w]) + apply (erule range_cover.sz) + apply (simp add:range_cover_def) + apply (simp add:empty_descendants_range_in' untypedBits_defs) + apply (clarsimp simp: image_def isCap_simps nullPointer_def word_size field_simps) + apply (intro conjI) + apply (clarsimp simp: image_def isCap_simps nullPointer_def word_size field_simps) + apply (drule_tac x=x in spec)+ + apply simp + apply (clarsimp simp: APIType_capBits_def) + apply clarsimp + apply (clarsimp simp: image_def getFreeRef_def cte_level_bits_def objBits_simps' field_simps) + apply (clarsimp simp: of_nat_shiftR word_le_nat_alt) + apply (frule_tac n = "unat (args ! 5)" + and bits = "(APIType_capBits (toEnum (unat (args ! 0))) (unat (args ! 1)))" + in range_cover_stuff[where w=w and sz=sz and rv = idx,rotated -1]; simp?) + apply (intro conjI; clarsimp simp add: image_def word_size) + apply (clarsimp simp: image_def isCap_simps nullPointer_def word_size field_simps) + apply (drule_tac x=x in spec)+ + apply simp + apply (clarsimp simp: APIType_capBits_def) + done + +lemma corres_list_all2_mapM_': + assumes w: "suffix xs oxs" "suffix ys oys" + assumes y: "\x xs y ys. \ F x y; suffix (x # xs) oxs; suffix (y # ys) oys \ + \ corres dc (P (x # xs)) (P' (y # ys)) (f x) (g y)" + assumes z: "\x y xs. \ F x y; suffix (x # xs) oxs \ \ \P (x # xs)\ f x \\rv. P xs\" + "\x y ys. \ F x y; suffix (y # ys) oys \ \ \P' (y # ys)\ g y \\rv. P' ys\" + assumes x: "list_all2 F xs ys" + shows "corres dc (P xs) (P' ys) (mapM_x f xs) (mapM_x g ys)" + apply (insert x w) + apply (induct xs arbitrary: ys) + apply (simp add: mapM_x_def sequence_x_def) + apply (case_tac ys) + apply simp + apply (clarsimp simp add: mapM_x_def sequence_x_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF y]; assumption?) + apply (clarsimp dest!: suffix_ConsD) + apply (erule meta_allE, (drule(1) meta_mp)+) + apply assumption + apply (erule(1) z)+ + apply simp+ + done + +lemmas suffix_refl = suffix_order.refl + +lemmas corres_list_all2_mapM_ + = corres_list_all2_mapM_' [OF suffix_refl suffix_refl] + +declare modify_map_id[simp] + +lemma valid_mdbD3': + "\ ctes_of s p = Some cte; valid_mdb' s \ \ p \ 0" + by (clarsimp simp add: valid_mdb'_def valid_mdb_ctes_def no_0_def) + +lemma capRange_sameRegionAs: + "\ sameRegionAs x y; s \' y; capClass x = PhysicalClass \ capClass y = PhysicalClass \ + \ capRange x \ capRange y \ {}" + apply (erule sameRegionAsE) + apply (subgoal_tac "capClass x = capClass y \ capRange x = capRange y") + apply simp + apply (drule valid_capAligned) + apply (drule(1) capAligned_capUntypedPtr) + apply clarsimp + apply (rule conjI) + apply (rule master_eqI, rule capClass_Master, simp) + apply (rule master_eqI, rule capRange_Master, simp) + apply blast + apply blast + apply (clarsimp simp: isCap_simps)+ + done +end + +locale mdb_insert_again = + mdb_ptr_parent?: mdb_ptr m _ _ parent parent_cap parent_node + + mdb_ptr_site?: mdb_ptr m _ _ site site_cap site_node + for m parent parent_cap parent_node site site_cap site_node + + + fixes c' + + assumes site_cap: "site_cap = NullCap" + assumes site_prev: "mdbPrev site_node = 0" + assumes site_next: "mdbNext site_node = 0" + + assumes is_untyped: "isUntypedCap parent_cap" + assumes same_region: "sameRegionAs parent_cap c'" + + assumes range: "descendants_range' c' parent m" + assumes phys: "capClass c' = PhysicalClass" + + fixes s + assumes valid_capI': "m p = Some (CTE cap node) \ s \' cap" + + assumes ut_rev: "ut_revocable' m" + + fixes n + defines "n \ + (modify_map + (\x. if x = site + then Some (CTE c' (MDB (mdbNext parent_node) parent True True)) + else m x) + parent (cteMDBNode_update (mdbNext_update (\x. site))))" + + assumes neq: "parent \ site" + +context mdb_insert_again +begin +interpretation Arch . (*FIXME: arch_split*) +lemmas parent = mdb_ptr_parent.m_p +lemmas site = mdb_ptr_site.m_p + +lemma next_wont_bite: + "\ mdbNext parent_node \ 0; m (mdbNext parent_node) = Some cte \ + \ \ sameRegionAs c' (cteCap cte)" + using range ut_rev + apply (cases cte) + apply clarsimp + apply (cases "m \ parent \ mdbNext parent_node") + apply (drule (2) descendants_rangeD') + apply (drule capRange_sameRegionAs) + apply (erule valid_capI') + apply (simp add: phys) + apply blast + apply (erule notE, rule direct_parent) + apply (clarsimp simp: mdb_next_unfold parent) + apply assumption + apply (simp add: parentOf_def parent) + apply (insert is_untyped same_region) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (rule conjI) + apply (erule (1) sameRegionAs_trans) + apply (simp add: ut_revocable'_def) + apply (insert parent) + apply simp + apply (clarsimp simp: isCap_simps) + done + +lemma no_0_helper: "no_0 m \ no_0 n" + by (simp add: n_def, simp add: no_0_def) + +lemma no_0_n [intro!]: "no_0 n" by (auto intro: no_0_helper) + +lemmas n_0_simps [iff] = no_0_simps [OF no_0_n] + +lemmas neqs [simp] = neq neq [symmetric] + +definition + "new_site \ CTE c' (MDB (mdbNext parent_node) parent True True)" + +definition + "new_parent \ CTE parent_cap (mdbNext_update (\a. site) parent_node)" + +lemma n: "n = m (site \ new_site, parent \ new_parent)" + using parent site + by (simp add: n_def modify_map_apply new_site_def new_parent_def + fun_upd_def[symmetric]) + +lemma site_no_parent [iff]: + "m \ site \ x = False" using site site_next + by (auto dest: subtree_next_0) + +lemma site_no_child [iff]: + "m \ x \ site = False" using site site_prev + by (auto dest: subtree_prev_0) + +lemma parent_next: "m \ parent \ mdbNext parent_node" + by (simp add: parent mdb_next_unfold) + +lemma parent_next_rtrancl_conv [simp]: + "m \ mdbNext parent_node \\<^sup>* site = m \ parent \\<^sup>+ site" + apply (rule iffI) + apply (insert parent_next) + apply (fastforce dest: rtranclD) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma site_no_next [iff]: + "m \ x \ site = False" using site site_prev dlist + apply clarsimp + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply simp + apply (rule dlistEn [where p=x], assumption) + apply clarsimp + apply clarsimp + done + +lemma site_no_next_trans [iff]: + "m \ x \\<^sup>+ site = False" + by (clarsimp dest!: tranclD2) + +lemma site_no_prev [iff]: + "m \ site \ p = (p = 0)" using site site_next + by (simp add: mdb_next_unfold) + +lemma site_no_prev_trancl [iff]: + "m \ site \\<^sup>+ p = (p = 0)" + apply (rule iffI) + apply (drule tranclD) + apply clarsimp + apply simp + apply (insert chain site) + apply (simp add: mdb_chain_0_def) + apply auto + done + +lemma chain_n: + "mdb_chain_0 n" +proof - + from chain + have "m \ mdbNext parent_node \\<^sup>* 0" using dlist parent + apply (cases "mdbNext parent_node = 0") + apply simp + apply (erule dlistEn, simp) + apply (auto simp: mdb_chain_0_def) + done + moreover + have "\m \ mdbNext parent_node \\<^sup>* parent" + using parent_next + apply clarsimp + apply (drule (1) rtrancl_into_trancl2) + apply simp + done + moreover + have "\ m \ 0 \\<^sup>* site" using no_0 site + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + have "\ m \ 0 \\<^sup>* parent" using no_0 parent + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + note chain + ultimately show "mdb_chain_0 n" using no_0 parent site + apply (simp add: n new_parent_def new_site_def) + apply (auto intro!: mdb_chain_0_update no_0_update simp: next_update_lhs_rtrancl) + done +qed + +lemma no_loops_n: "no_loops n" using chain_n no_0_n + by (rule mdb_chain_0_no_loops) + +lemma n_direct_eq: + "n \ p \ p' = (if p = parent then p' = site else + if p = site then m \ parent \ p' + else m \ p \ p')" + using parent site site_prev + by (auto simp: mdb_next_update n new_parent_def new_site_def + parent_next mdb_next_unfold) + +lemma next_not_parent: + "\ mdbNext parent_node \ 0; m (mdbNext parent_node) = Some cte \ + \ \ isMDBParentOf new_site cte" + apply (drule(1) next_wont_bite) + apply (cases cte) + apply (simp add: isMDBParentOf_def new_site_def) + done + +(* The newly inserted cap should never have children. *) +lemma site_no_parent_n: + "n \ site \ p = False" using parent valid_badges + apply clarsimp + apply (erule subtree.induct) + prefer 2 + apply simp + apply (clarsimp simp: parentOf_def mdb_next_unfold new_site_def n) + apply (cases "mdbNext parent_node = site") + apply (subgoal_tac "m \ parent \ site") + apply simp + apply (subst mdb_next_unfold) + apply (simp add: parent) + apply clarsimp + apply (erule notE[rotated], erule(1) next_not_parent[unfolded new_site_def]) + done + +end + +locale mdb_insert_again_child = mdb_insert_again + + assumes child: + "isMDBParentOf + (CTE parent_cap parent_node) + (CTE c' (MDB (mdbNext parent_node) parent True True))" + +context mdb_insert_again_child +begin + +lemma new_child [simp]: + "isMDBParentOf new_parent new_site" + by (simp add: new_parent_def new_site_def) (rule child) + +lemma n_site_child: + "n \ parent \ site" + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def parent site n) + done + +lemma parent_m_n: + assumes "m \ p \ p'" + shows "if p' = parent then n \ p \ site \ n \ p \ p' else n \ p \ p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply (cases "p = parent") + apply simp + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule subtree.trans_parent [where c'=site]) + apply (rule n_site_child) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (clarsimp simp: new_parent_def parent) + apply simp + apply (subgoal_tac "n \ p \ c") + prefer 2 + apply (rule subtree.direct_parent) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: new_parent_def parent) + apply clarsimp + apply (erule subtree_trans) + apply (rule n_site_child) + done +next + case (trans_parent c d) + thus ?case + apply - + apply (cases "c = site", simp) + apply (cases "d = site", simp) + apply (cases "c = parent") + apply clarsimp + apply (erule subtree.trans_parent [where c'=site]) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (rule conjI, clarsimp) + apply (clarsimp simp: new_parent_def parent) + apply clarsimp + apply (subgoal_tac "n \ p \ d") + apply clarsimp + apply (erule subtree_trans, rule n_site_child) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: parent new_parent_def) + done +qed + +lemma n_to_site [simp]: + "n \ p \ site = (p = parent)" + by (simp add: n_direct_eq) + +lemma parent_n_m: + assumes "n \ p \ p'" + shows "if p' = site then p \ parent \ m \ p \ parent else m \ p \ p'" +proof - + from assms have [simp]: "p \ site" by (clarsimp simp: site_no_parent_n) + from assms + show ?thesis + proof induct + case (direct_parent c) + thus ?case + apply simp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply simp + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + done + next + case (trans_parent c d) + thus ?case + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp split: if_split_asm) + apply (simp add: n_direct_eq) + apply (cases "p=parent") + apply simp + apply (rule subtree.direct_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + done + qed +qed + +lemma descendants: + "descendants_of' p n = + (if parent \ descendants_of' p m \ p = parent + then descendants_of' p m \ {site} else descendants_of' p m)" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (fastforce dest!: parent_n_m dest: parent_m_n simp: n_site_child split: if_split_asm) + done + +end + +lemma blarg_descendants_of': + "descendants_of' x (modify_map m p (if P then id else cteMDBNode_update (mdbPrev_update f))) + = descendants_of' x m" + by (simp add: descendants_of'_def) + +lemma bluhr_descendants_of': + "mdb_insert_again_child (ctes_of s') parent parent_cap pmdb site site_cap site_node cap s + \ + descendants_of' x + (modify_map + (modify_map + (\c. if c = site + then Some (CTE cap (MDB (mdbNext pmdb) parent True True)) + else ctes_of s' c) + (mdbNext pmdb) + (if mdbNext pmdb = 0 then id + else cteMDBNode_update (mdbPrev_update (\x. site)))) + parent (cteMDBNode_update (mdbNext_update (\x. site)))) + = (if parent \ descendants_of' x (ctes_of s') \ x = parent + then descendants_of' x (ctes_of s') \ {site} + else descendants_of' x (ctes_of s'))" + apply (subst modify_map_com) + apply (case_tac x, rename_tac node, case_tac node, clarsimp) + apply (subst blarg_descendants_of') + apply (erule mdb_insert_again_child.descendants) + done + +lemma mdb_relation_simp: + "\ (s, s') \ state_relation; cte_at p s \ + \ descendants_of' (cte_map p) (ctes_of s') = cte_map ` descendants_of p (cdt s)" + by (cases p, clarsimp simp: state_relation_def cdt_relation_def) + +lemma in_getCTE2: + "((cte, s') \ fst (getCTE p s)) = (s' = s \ cte_wp_at' ((=) cte) p s)" + apply (safe dest!: in_getCTE) + apply (clarsimp simp: cte_wp_at'_def getCTE_def) + done + +declare wrap_ext_op_det_ext_ext_def[simp] + +lemma do_ext_op_update_cdt_list_symb_exec_l': + "corres_underlying {(s::det_state, s'). f (kheap s) (ekheap s) s'} nf nf' dc P P' (create_cap_ext p z a) (return x)" + apply (simp add: corres_underlying_def create_cap_ext_def + update_cdt_list_def set_cdt_list_def bind_def put_def get_def gets_def return_def) + done + +crunches updateMDB, updateNewFreeIndex + for it'[wp]: "\s. P (ksIdleThread s)" + and ups'[wp]: "\s. P (gsUserPages s)" + and cns'[wp]: "\s. P (gsCNodes s)" + and ksDomainTime[wp]: "\s. P (ksDomainTime s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + and ksArchState[wp]: "\s. P (ksArchState s)" + +crunches insertNewCap + for ksInterrupt[wp]: "\s. P (ksInterruptState s)" + and norq[wp]: "\s. P (ksReadyQueues s)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and pspace_canonical'[wp]: pspace_canonical' + (wp: crunch_wps) + +crunches insertNewCaps + for nosch[wp]: "\s. P (ksSchedulerAction s)" + (simp: crunch_simps zipWithM_x_mapM wp: crunch_wps) + + +crunch exst[wp]: set_cdt "\s. P (exst s)" + +lemma set_original_symb_exec_l: + "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_original p b) (return x)" + by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) + +lemma set_cdt_symb_exec_l: + "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_cdt g) (return x)" + by (simp add: corres_underlying_def return_def set_cdt_def in_monad Bex_def) + +crunch domain_index[wp]: create_cap_ext "\s. P (domain_index s)" +crunch work_units_completed[wp]: create_cap_ext "\s. P (work_units_completed s)" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma updateNewFreeIndex_noop_psp_corres: + "corres_underlying {(s, s'). pspace_relations (ekheap s) (kheap s) (ksPSpace s')} False True + dc \ (cte_at' slot) + (return ()) (updateNewFreeIndex slot)" + apply (simp add: updateNewFreeIndex_def) + apply (rule corres_guard_imp) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_r_conj[where P'="cte_at' slot"]) + apply (rule corres_trivial, simp) + apply (wp getCTE_wp' | wpc + | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ + done + +crunches updateMDB, updateNewFreeIndex, setCTE + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + +lemma insertNewCap_corres: +notes if_cong[cong del] if_weak_cong[cong] +shows + "\ cref' = cte_map (fst tup) + \ cap_relation (default_cap tp (snd tup) sz d) cap \ \ + corres dc + (cte_wp_at ((=) cap.NullCap) (fst tup) and pspace_aligned + and pspace_distinct and valid_objs and valid_mdb and valid_list + and cte_wp_at ((\) cap.NullCap) p) + (cte_wp_at' (\c. cteCap c = NullCap) cref' and + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ sameRegionAs (cteCap cte) cap) (cte_map p) + and valid_mdb' and pspace_aligned' and pspace_distinct' and valid_objs' + and (\s. descendants_range' cap (cte_map p) (ctes_of s))) + (create_cap tp sz p d tup) + (insertNewCap (cte_map p) cref' cap)" + apply (cases tup, + clarsimp simp add: create_cap_def insertNewCap_def + liftM_def) + apply (rule corres_symb_exec_r [OF _ getCTE_sp])+ + prefer 3 + apply (rule no_fail_pre, wp) + apply (clarsimp elim!: cte_wp_at_weakenE') + prefer 4 + apply (rule no_fail_pre, wp) + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (rule corres_assert_assume) + prefer 2 + apply (case_tac oldCTE) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def + valid_nullcaps_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: initMDBNode_def) + apply clarsimp + apply (rule_tac F="capClass cap = PhysicalClass" in corres_req) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (drule sameRegionAs_classes, simp) + apply (rule corres_caps_decomposition) + prefer 3 + apply wp+ + apply (rule hoare_post_imp, simp) + apply (wp | assumption)+ + defer + apply ((wp | simp)+)[1] + apply (simp add: create_cap_ext_def set_cdt_list_def update_cdt_list_def bind_assoc) + apply ((wp | simp)+)[1] + apply (wp updateMDB_ctes_of_cases + | simp add: o_def split del: if_split)+ + apply (clarsimp simp: cdt_relation_def cte_wp_at_ctes_of + split del: if_split cong: if_cong simp del: id_apply) + apply (subst if_not_P, erule(1) valid_mdbD3') + apply (case_tac x, case_tac oldCTE) + apply (subst bluhr_descendants_of') + apply (rule mdb_insert_again_child.intro) + apply (rule mdb_insert_again.intro) + apply (rule mdb_ptr.intro) + apply (simp add: valid_mdb'_def vmdb_def) + apply (rule mdb_ptr_axioms.intro) + apply simp + apply (rule mdb_ptr.intro) + apply (simp add: valid_mdb'_def vmdb_def) + apply (rule mdb_ptr_axioms.intro) + apply fastforce + apply (rule mdb_insert_again_axioms.intro) + apply (clarsimp simp: nullPointer_def)+ + apply (erule (1) ctes_of_valid_cap') + apply (simp add: valid_mdb'_def valid_mdb_ctes_def) + apply clarsimp + apply (rule mdb_insert_again_child_axioms.intro) + apply (clarsimp simp: isMDBParentOf_def) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def + ut_revocable'_def) + apply (fold fun_upd_def) + apply (subst descendants_of_insert_child') + apply (erule(1) mdb_Null_descendants) + apply (clarsimp simp: cte_wp_at_def) + apply (erule(1) mdb_Null_None) + apply (subgoal_tac "cte_at (aa, bb) s") + prefer 2 + apply (drule not_sym, clarsimp simp: cte_wp_at_caps_of_state split: if_split_asm) + apply (subst descendants_of_eq' [OF _ cte_wp_at_cte_at], assumption+) + apply (clarsimp simp: state_relation_def) + apply assumption+ + apply (subst cte_map_eq_subst [OF _ cte_wp_at_cte_at], assumption+) + apply (simp add: mdb_relation_simp) + defer + apply (clarsimp split del: if_split)+ + apply (clarsimp simp add: revokable_relation_def cte_wp_at_ctes_of + split del: if_split) + apply simp + apply (rule conjI) + apply clarsimp + apply (elim modify_map_casesE) + apply ((clarsimp split: if_split_asm cong: conj_cong + simp: cte_map_eq_subst cte_wp_at_cte_at + revokable_relation_simp)+)[4] + apply clarsimp + apply (subgoal_tac "null_filter (caps_of_state s) (aa, bb) \ None") + prefer 2 + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state split: if_split_asm) + apply (subgoal_tac "cte_at (aa,bb) s") + prefer 2 + apply clarsimp + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_cte_at) + apply (elim modify_map_casesE) + apply (clarsimp split: if_split_asm cong: conj_cong + simp: cte_map_eq_subst cte_wp_at_cte_at revokable_relation_simp)+ + apply (clarsimp simp: state_relation_def ghost_relation_of_heap pt_types_of_heap_eq o_def)+ + apply wp+ + apply (rule corres_guard_imp) + apply (rule corres_underlying_symb_exec_l [OF gets_symb_exec_l]) + apply (rule corres_underlying_symb_exec_l [OF gets_symb_exec_l]) + apply (rule corres_underlying_symb_exec_l [OF set_cdt_symb_exec_l]) + apply (rule corres_underlying_symb_exec_l [OF do_ext_op_update_cdt_list_symb_exec_l']) + apply (rule corres_underlying_symb_exec_l [OF set_original_symb_exec_l]) + apply (rule corres_cong[OF refl refl _ refl refl, THEN iffD1]) + apply (rule bind_return[THEN fun_cong]) + apply (rule corres_split) + apply (rule setCTE_corres; simp) + apply (subst bind_return[symmetric], + rule corres_split) + apply (simp add: dc_def[symmetric]) + apply (rule updateMDB_symb_exec_r) + apply (simp add: dc_def[symmetric]) + apply (rule corres_split_noop_rhs[OF updateMDB_symb_exec_r]) + apply (rule updateNewFreeIndex_noop_psp_corres) + apply (wp getCTE_wp set_cdt_valid_objs set_cdt_cte_at + hoare_weak_lift_imp | simp add: o_def)+ + apply (clarsimp simp: cte_wp_at_cte_at) + apply (clarsimp simp: cte_wp_at_ctes_of no_0_def valid_mdb'_def + valid_mdb_ctes_def) + apply (rule conjI, clarsimp) + apply clarsimp + apply (erule (2) valid_dlistEn) + apply simp + apply(simp only: cdt_list_relation_def valid_mdb_def2) + apply(subgoal_tac "finite_depth (cdt s)") + prefer 2 + apply(simp add: finite_depth valid_mdb_def2[symmetric]) + apply(intro impI allI) + apply(subgoal_tac "mdb_insert_abs (cdt s) p (a, b)") + prefer 2 + apply(clarsimp simp: cte_wp_at_caps_of_state) + apply(rule mdb_insert_abs.intro) + apply(clarsimp) + apply(erule (1) mdb_cte_at_Null_None) + apply (erule (1) mdb_cte_at_Null_descendants) + apply(subgoal_tac "no_0 (ctes_of s')") + prefer 2 + apply(simp add: valid_mdb_ctes_def valid_mdb'_def) + apply simp + apply (elim conjE) + apply (case_tac "cdt s (a,b)") + prefer 2 + apply (simp add: mdb_insert_abs_def) + apply simp + apply(case_tac x) + apply(simp add: cte_wp_at_ctes_of) + apply(simp add: mdb_insert_abs.next_slot split del: if_split) + apply(case_tac "c=p") + apply(simp) + apply(clarsimp simp: modify_map_def) + apply(case_tac z) + apply(fastforce split: if_split_asm) + apply(case_tac "c = (a, b)") + apply(simp) + apply(case_tac "next_slot p (cdt_list s) (cdt s)") + apply(simp) + apply(simp) + apply(clarsimp simp: modify_map_def const_def) + apply(clarsimp split: if_split_asm) + apply(drule_tac p="cte_map p" in valid_mdbD1') + apply(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + apply(clarsimp simp: nullPointer_def no_0_def) + apply(clarsimp simp: state_relation_def) + apply(clarsimp simp: cte_wp_at_caps_of_state) + apply(drule_tac slot=p in pspace_relation_ctes_ofI) + apply(simp add: cte_wp_at_caps_of_state) + apply(simp) + apply(simp) + apply(simp) + apply(clarsimp simp: state_relation_def cdt_list_relation_def) + apply(erule_tac x="fst p" in allE, erule_tac x="snd p" in allE) + apply(fastforce) + apply(simp) + apply(case_tac "next_slot c (cdt_list s) (cdt s)") + apply(simp) + apply(simp) + apply(subgoal_tac "cte_at c s") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all add: valid_mdb_def2)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(simp add: valid_mdb'_def) + apply(drule_tac ptr="cte_map p" in no_self_loop_next) + apply(simp) + apply(simp) + apply(drule_tac p="(aa, bb)" in cte_map_inj) + apply(simp_all add: cte_wp_at_caps_of_state)[5] + apply(clarsimp) + apply(simp) + apply(clarsimp) + apply(drule cte_map_inj_eq; simp add: cte_wp_at_caps_of_state) + apply(clarsimp) + apply(case_tac z) + apply(clarsimp simp: state_relation_def cdt_list_relation_def) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(fastforce) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(simp_all add: cte_wp_at_caps_of_state)[6] + apply(clarsimp simp: state_relation_def cdt_list_relation_def) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE, fastforce) + done + +definition apitype_of :: "cap \ apiobject_type option" +where + "apitype_of c \ case c of + Structures_A.UntypedCap d p b idx \ Some ArchTypes_H.Untyped + | Structures_A.EndpointCap r badge rights \ Some EndpointObject + | Structures_A.NotificationCap r badge rights \ Some NotificationObject + | Structures_A.CNodeCap r bits guard \ Some ArchTypes_H.CapTableObject + | Structures_A.ThreadCap r \ Some TCBObject + | _ \ None" + +lemma cte_wp_at_cteCaps_of: + "cte_wp_at' (\cte. P (cteCap cte)) p s + = (\cap. cteCaps_of s p = Some cap \ P cap)" + apply (subst tree_cte_cteCap_eq[unfolded o_def]) + apply (clarsimp split: option.splits) + done + +lemma caps_contained_modify_mdb_helper[simp]: + "(\n. modify_map m p (cteMDBNode_update f) x = Some (CTE c n)) + = (\n. m x = Some (CTE c n))" + apply (cases "m p", simp_all add: modify_map_def) + apply (case_tac a, simp_all) + done + +lemma sameRegionAs_capRange_subset: + "\ sameRegionAs c c'; capClass c = PhysicalClass \ \ capRange c' \ capRange c" + apply (erule sameRegionAsE) + apply (rule equalityD1) + apply (rule master_eqI, rule capRange_Master) + apply simp + apply assumption+ + apply (clarsimp simp: isCap_simps)+ + done + + +definition + is_end_chunk :: "cte_heap \ capability \ machine_word \ bool" +where + "is_end_chunk ctes cap p \ \p'. ctes \ p \ p' + \ (\cte. ctes p = Some cte \ sameRegionAs cap (cteCap cte)) + \ (\cte'. ctes p' = Some cte' \ \ sameRegionAs cap (cteCap cte'))" + +definition + mdb_chunked2 :: "cte_heap \ bool" +where + "mdb_chunked2 ctes \ (\x p p' cte. ctes x = Some cte + \ is_end_chunk ctes (cteCap cte) p \ is_end_chunk ctes (cteCap cte) p' + \ p = p') + \ (\p p' cte cte'. ctes p = Some cte \ ctes p' = Some cte' + \ ctes \ p \ p' \ sameRegionAs (cteCap cte') (cteCap cte) + \ sameRegionAs (cteCap cte) (cteCap cte'))" + +lemma mdb_chunked2_revD: + "\ ctes p = Some cte; ctes p' = Some cte'; ctes \ p \ p'; + mdb_chunked2 ctes; sameRegionAs (cteCap cte') (cteCap cte) \ + \ sameRegionAs (cteCap cte) (cteCap cte')" + by (fastforce simp add: mdb_chunked2_def) + +lemma valid_dlist_step_back: + "\ ctes \ p \ p''; ctes \ p' \ p''; valid_dlist ctes; p'' \ 0 \ + \ p = p'" + apply (simp add: mdb_next_unfold valid_dlist_def) + apply (frule_tac x=p in spec) + apply (drule_tac x=p' in spec) + apply (clarsimp simp: Let_def) + done + +lemma chunk_sameRegionAs_step1: + "\ ctes \ p' \\<^sup>* p''; ctes p'' = Some cte; + is_chunk ctes (cteCap cte) p p''; + mdb_chunked2 ctes; valid_dlist ctes \ \ + \cte'. ctes p' = Some cte' + \ ctes \ p \\<^sup>+ p' + \ sameRegionAs (cteCap cte') (cteCap cte)" + apply (erule converse_rtrancl_induct) + apply (clarsimp simp: is_chunk_def) + apply (drule_tac x=p'' in spec, clarsimp) + apply (clarsimp simp: is_chunk_def) + apply (frule_tac x=y in spec) + apply (drule_tac x=z in spec) + apply ((drule mp, erule(1) transitive_closure_trans) + | clarsimp)+ + apply (rule sameRegionAs_trans[rotated], assumption) + apply (drule(3) mdb_chunked2_revD) + apply simp + apply (erule(1) sameRegionAs_trans) + apply simp + done + +end +locale mdb_insert_again_all = mdb_insert_again_child + + assumes valid_c': "s \' c'" + + fixes n' + defines "n' \ modify_map n (mdbNext parent_node) (cteMDBNode_update (mdbPrev_update (\a. site)))" +begin +interpretation Arch . (*FIXME: arch_split*) +lemma no_0_n' [simp]: "no_0 n'" + using no_0_n by (simp add: n'_def) + +lemma dom_n' [simp]: "dom n' = dom n" + apply (simp add: n'_def) + apply (simp add: modify_map_if dom_def) + apply (rule set_eqI) + apply simp + apply (rule iffI) + apply auto[1] + apply clarsimp + apply (case_tac y) + apply (case_tac "mdbNext parent_node = x") + apply auto + done + +lemma mdb_chain_0_n' [simp]: "mdb_chain_0 n'" + using chain_n + apply (simp add: mdb_chain_0_def) + apply (simp add: n'_def trancl_prev_update) + done + +lemma parency_n': + "n' \ p \ p' = (if m \ p \ parent \ p = parent + then m \ p \ p' \ p' = site + else m \ p \ p')" + using descendants [of p] + unfolding descendants_of'_def + by (auto simp add: set_eq_iff n'_def) + +lemma n'_direct_eq: + "n' \ p \ p' = (if p = parent then p' = site else + if p = site then m \ parent \ p' + else m \ p \ p')" + by (simp add: n'_def n_direct_eq) + +lemma n'_tranclD: + "n' \ p \\<^sup>+ p' \ + (if p = site then m \ parent \\<^sup>+ p' + else if m \ p \\<^sup>+ parent \ p = parent then m \ p \\<^sup>+ p' \ p' = site + else m \ p \\<^sup>+ p')" + apply (erule trancl_induct) + apply (fastforce simp: n'_direct_eq split: if_split_asm) + apply (fastforce simp: n'_direct_eq split: if_split_asm elim: trancl_trans) + done + +lemma site_in_dom: "site \ dom n" + by (simp add: n) + +lemma m_tranclD: + assumes m: "m \ p \\<^sup>+ p'" + shows "p' \ site \ n' \ p \\<^sup>+ p'" +proof - + from m have "p = site \ p' = 0" by clarsimp + with mdb_chain_0_n' m + show ?thesis + apply - + apply (erule trancl_induct) + apply (rule context_conjI) + apply clarsimp + apply (cases "p = site") + apply (simp add: mdb_chain_0_def site_in_dom) + apply (cases "p = parent") + apply simp + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule context_conjI) + apply clarsimp + apply clarsimp + apply (erule trancl_trans) + apply (case_tac "y = parent") + apply simp + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + done +qed + +lemma n'_trancl_eq: + "n' \ p \\<^sup>+ p' = + (if p = site then m \ parent \\<^sup>+ p' + else if m \ p \\<^sup>+ parent \ p = parent then m \ p \\<^sup>+ p' \ p' = site + else m \ p \\<^sup>+ p')" + apply simp + apply (intro conjI impI iffI) + apply (drule n'_tranclD) + apply simp + apply simp + apply (drule n'_tranclD) + apply simp + apply (erule disjE) + apply (drule m_tranclD)+ + apply simp + apply (drule m_tranclD) + apply simp + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (drule n'_tranclD, simp) + apply (erule disjE) + apply (drule m_tranclD) + apply simp + apply simp + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (drule n'_tranclD, simp) + apply simp + apply (cases "p' = site", simp) + apply (drule m_tranclD) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: n'_direct_eq) + apply (simp add: rtrancl_eq_or_trancl) + apply (drule n'_tranclD, simp) + apply clarsimp + apply (drule m_tranclD, simp) + done + +lemma n'_rtrancl_eq: + "n' \ p \\<^sup>* p' = + (if p = site then p' \ site \ m \ parent \\<^sup>+ p' \ p' = site + else if m \ p \\<^sup>* parent then m \ p \\<^sup>* p' \ p' = site + else m \ p \\<^sup>* p')" + by (auto simp: rtrancl_eq_or_trancl n'_trancl_eq) + +lemma mdbNext_parent_site [simp]: + "mdbNext parent_node \ site" +proof + assume "mdbNext parent_node = site" + hence "m \ parent \ site" + using parent + by (unfold mdb_next_unfold) simp + thus False by simp +qed + +lemma mdbPrev_parent_site [simp]: + "site \ mdbPrev parent_node" +proof + assume "site = mdbPrev parent_node" + with parent site + have "m \ site \ parent" + apply (unfold mdb_next_unfold) + apply simp + apply (erule dlistEp) + apply clarsimp + apply clarsimp + done + with p_0 show False by simp +qed + +lemma parent_prev: + "(m \ parent \ p) = (p = mdbNext parent_node \ p \ 0)" + apply (rule iffI) + apply (frule dlist_prevD, rule parent) + apply (simp add: mdb_next_unfold parent) + apply (clarsimp simp: mdb_prev_def) + apply clarsimp + apply (rule dlist_nextD0) + apply (rule parent_next) + apply assumption + done + +lemma parent_next_prev: + "(m \ p \ mdbNext parent_node) = (p = parent \ mdbNext parent_node \ 0)" + using parent + apply - + apply (rule iffI) + apply (clarsimp simp add: mdb_prev_def) + apply (rule conjI) + apply (erule dlistEn) + apply clarsimp + apply simp + apply clarsimp + apply clarsimp + apply (rule dlist_nextD0) + apply (rule parent_next) + apply assumption + done + + +lemma n'_prev_eq: + notes if_cong[cong del] if_weak_cong[cong] + shows "n' \ p \ p' = (if p' = site then p = parent + else if p = site then m \ parent \ p' + else if p = parent then p' = site + else m \ p \ p')" + using parent site site_prev + apply (simp add: n'_def n mdb_prev_def new_parent_def new_site_def split del: if_split) + apply (clarsimp simp add: modify_map_if cong: if_cong split del: if_split) + apply (cases "p' = site", simp) + apply (simp cong: if_cong split del: if_split) + apply (cases "p' = parent") + apply clarsimp + apply (rule conjI, clarsimp simp: mdb_prev_def) + apply (clarsimp simp: mdb_prev_def) + apply (simp cong: if_cong split del: if_split) + apply (cases "p = site") + apply (simp add: parent_prev) + apply (cases "mdbNext parent_node = p'") + apply simp + apply (rule iffI) + prefer 2 + apply clarsimp + apply (erule dlistEn) + apply simp + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply clarsimp + apply clarsimp + apply (insert site_next)[1] + apply (rule valid_dlistEp [OF dlist, where p=p'], assumption) + apply clarsimp + apply clarsimp + apply (simp cong: if_cong split del: if_split) + apply (cases "p = parent") + apply clarsimp + apply (insert site_next) + apply (cases "mdbNext parent_node = p'", clarsimp) + apply clarsimp + apply (rule valid_dlistEp [OF dlist, where p=p'], assumption) + apply clarsimp + apply clarsimp + apply simp + apply (cases "mdbNext parent_node = p'") + prefer 2 + apply (clarsimp simp: mdb_prev_def) + apply (rule iffI, clarsimp) + apply clarsimp + apply (case_tac z) + apply simp + apply (rule iffI) + apply (clarsimp simp: mdb_prev_def) + apply (drule sym [where t=p']) + apply (simp add: parent_next_prev) + done + +lemma dlist_n' [simp]: + notes if_cong[cong del] if_weak_cong[cong] + shows "valid_dlist n'" + using no_0_n' + by (clarsimp simp: valid_dlist_def2 n'_direct_eq + n'_prev_eq Invariants_H.valid_dlist_prevD [OF dlist]) + +lemma n'_cap: + "n' p = Some (CTE c node) \ + if p = site then c = c' \ m p = Some (CTE NullCap site_node) + else \node'. m p = Some (CTE c node')" + by (auto simp: n'_def n modify_map_if new_parent_def parent + new_site_def site site_cap split: if_split_asm) + +lemma m_cap: + "m p = Some (CTE c node) \ + if p = site + then \node'. n' site = Some (CTE c' node') + else \node'. n' p = Some (CTE c node')" + by (clarsimp simp: n n'_def new_parent_def new_site_def parent) + +lemma n'_badged: + "n' p = Some (CTE c node) \ + if p = site then c = c' \ mdbFirstBadged node + else \node'. m p = Some (CTE c node') \ mdbFirstBadged node = mdbFirstBadged node'" + by (auto simp: n'_def n modify_map_if new_parent_def parent + new_site_def site site_cap split: if_split_asm) + +lemma no_next_region: + "\ m \ parent \ p'; m p' = Some (CTE cap' node) \ \ \sameRegionAs c' cap'" + apply (clarsimp simp: mdb_next_unfold parent) + apply (frule next_wont_bite [rotated], clarsimp) + apply simp + done + +lemma valid_badges_n' [simp]: "valid_badges n'" + using valid_badges + apply (clarsimp simp: valid_badges_def) + apply (simp add: n'_direct_eq) + apply (drule n'_badged)+ + apply (clarsimp split: if_split_asm) + apply (drule (1) no_next_region) + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + done + +lemma c'_not_Null: "c' \ NullCap" + using same_region by clarsimp + +lemma valid_nullcaps_n' [simp]: + "valid_nullcaps n'" + using nullcaps is_untyped c'_not_Null + apply (clarsimp simp: valid_nullcaps_def n'_def n modify_map_if new_site_def + new_parent_def isCap_simps) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: nullMDBNode_def) + apply (insert parent) + apply (rule dlistEn, rule parent) + apply clarsimp + apply (clarsimp simp: nullPointer_def) + done + +lemma phys': "capClass parent_cap = PhysicalClass" + using sameRegionAs_classes [OF same_region] phys + by simp + +lemma capRange_c': "capRange c' \ capRange parent_cap" + apply (rule sameRegionAs_capRange_subset) + apply (rule same_region) + apply (rule phys') + done + +lemma untypedRange_c': + assumes ut: "isUntypedCap c'" + shows "untypedRange c' \ untypedRange parent_cap" + using ut is_untyped capRange_c' + by (auto simp: isCap_simps) + +lemma sameRegion_parentI: + "sameRegionAs c' cap \ sameRegionAs parent_cap cap" + using same_region + apply - + apply (erule (1) sameRegionAs_trans) + done + +lemma no_loops_n': "no_loops n'" + using mdb_chain_0_n' no_0_n' + by (rule mdb_chain_0_no_loops) + +lemmas no_loops_simps' [simp]= + no_loops_trancl_simp [OF no_loops_n'] + no_loops_direct_simp [OF no_loops_n'] + +lemma rangeD: + "\ m \ parent \ p; m p = Some (CTE cap node) \ \ + capRange cap \ capRange c' = {}" + using range by (rule descendants_rangeD') + +lemma capAligned_c': "capAligned c'" + using valid_c' by (rule valid_capAligned) + +lemma capRange_ut: + "capRange c' \ untypedRange parent_cap" + using capRange_c' is_untyped + by (clarsimp simp: isCap_simps del: subsetI) + +lemma untyped_mdb_n' [simp]: "untyped_mdb' n'" + using untyped_mdb capRange_ut untyped_inc + apply (clarsimp simp: untyped_mdb'_def descendants_of'_def) + apply (drule n'_cap)+ + apply (simp add: parency_n') + apply (simp split: if_split_asm) + apply clarsimp + apply (erule_tac x=parent in allE) + apply (simp add: parent is_untyped) + apply (erule_tac x=p' in allE) + apply simp + apply (frule untypedCapRange) + apply (drule untypedRange_c') + apply (erule impE, blast) + apply (drule (1) rangeD) + apply simp + apply clarsimp + apply (thin_tac "All P" for P) + apply (simp add: untyped_inc'_def) + apply (erule_tac x=parent in allE) + apply (erule_tac x=p in allE) + apply (simp add: parent is_untyped) + apply (clarsimp simp: descendants_of'_def) + apply (case_tac "untypedRange parent_cap = untypedRange c") + apply simp + apply (elim disjE conjE) + apply (drule (1) rangeD) + apply (drule untypedCapRange) + apply simp + apply blast + apply simp + apply (erule disjE) + apply clarsimp + apply (erule disjE) + apply (simp add: psubsetI) + apply (elim conjE) + apply (drule (1) rangeD) + apply (drule untypedCapRange) + apply simp + apply blast + apply blast + apply clarsimp + done + +lemma site': + "n' site = Some new_site" + by (simp add: n n'_def modify_map_if new_site_def) + +lemma loopE: "m \ x \\<^sup>+ x \ P" + by simp + +lemma m_loop_trancl_rtrancl: + "m \ y \\<^sup>* x \ \ m \ x \\<^sup>+ y" + apply clarsimp + apply (drule(1) transitive_closure_trans) + apply (erule loopE) + done + +lemma m_rtrancl_to_site: + "m \ p \\<^sup>* site = (p = site)" + apply (rule iffI) + apply (erule rtranclE) + apply assumption + apply simp + apply simp + done + +lemma descendants_of'_D: "p' \ descendants_of' p ctes \ ctes \ p \ p' " + by (clarsimp simp:descendants_of'_def) + +lemma untyped_inc_mdbD: + "\ sameRegionAs cap cap'; isUntypedCap cap; + ctes p = Some (CTE cap node); ctes p' = Some (CTE cap' node'); + untyped_inc' ctes; untyped_mdb' ctes; no_loops ctes \ + \ ctes \ p \ p' \ p = p' \ + (isUntypedCap cap' \ untypedRange cap \ untypedRange cap' + \ sameRegionAs cap' cap + \ ctes \ p' \ p)" + apply (subgoal_tac "untypedRange cap \ untypedRange cap' \ sameRegionAs cap' cap") + apply (cases "isUntypedCap cap'") + apply (drule(4) untyped_incD'[where p=p and p'=p']) + apply (erule sameRegionAsE, simp_all add: untypedCapRange)[1] + apply (cases "untypedRange cap = untypedRange cap'") + apply simp + apply (elim disjE conjE) + apply (simp only: simp_thms descendants_of'_D)+ + apply (elim disjE conjE) + apply (simp add: subset_iff_psubset_eq) + apply (elim disjE) + apply (simp add:descendants_of'_D)+ + apply (clarsimp simp:descendants_of'_def) + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (erule sameRegionAsE) + apply simp + apply (drule(1) untyped_mdbD',simp) + apply (simp add:untypedCapRange) + apply blast + apply simp + apply assumption + apply (simp add:descendants_of'_def) + apply (clarsimp simp:isCap_simps) + apply (clarsimp simp:isCap_simps) + apply (clarsimp simp add: sameRegionAs_def3 del: disjCI) + apply (rule disjI1) + apply (erule disjE) + apply (intro conjI) + apply blast + apply (simp add:untypedCapRange) + apply (erule subset_trans[OF _ untypedRange_in_capRange]) + apply clarsimp + apply (rule untypedRange_not_emptyD) + apply (simp add:untypedCapRange) + apply blast + apply (clarsimp simp:isCap_simps) + done + +lemma parent_chunk: + "is_chunk n' parent_cap parent site" + by (clarsimp simp: is_chunk_def + n'_trancl_eq n'_rtrancl_eq site' new_site_def same_region + m_loop_trancl_rtrancl m_rtrancl_to_site) + +lemma mdb_chunked_n' [simp]: + notes if_cong[cong del] if_weak_cong[cong] + shows "mdb_chunked n'" + using chunked untyped_mdb untyped_inc + apply (clarsimp simp: mdb_chunked_def) + apply (drule n'_cap)+ + apply (simp add: n'_trancl_eq split del: if_split) + apply (simp split: if_split_asm) + apply clarsimp + apply (frule sameRegion_parentI) + apply (frule(1) untyped_inc_mdbD [OF _ is_untyped _ _ untyped_inc untyped_mdb no_loops, OF _ parent]) + apply (elim disjE) + apply (frule sameRegionAs_capRange_Int) + apply (simp add: phys) + apply (rule valid_capAligned [OF valid_c']) + apply (rule valid_capAligned) + apply (erule valid_capI') + apply (erule notE, erule(1) descendants_rangeD' [OF range, rotated]) + apply (clarsimp simp: parent parent_chunk) + apply clarsimp + apply (frule subtree_mdb_next) + apply (simp add: m_loop_trancl_rtrancl [OF trancl_into_rtrancl, where x=parent]) + apply (case_tac "p' = parent") + apply (clarsimp simp: parent) + apply (drule_tac x=p' in spec) + apply (drule_tac x=parent in spec) + apply (frule sameRegionAs_trans [OF _ same_region]) + apply (clarsimp simp: parent is_chunk_def n'_trancl_eq n'_rtrancl_eq + m_rtrancl_to_site site' new_site_def) + apply (drule_tac x=p'' in spec) + apply clarsimp + apply (drule_tac p=p'' in m_cap, clarsimp) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=parent in allE) + apply (insert parent is_untyped)[1] + apply simp + apply (case_tac "p = parent") + apply (simp add: parent) + apply (clarsimp simp add: is_chunk_def) + apply (simp add: rtrancl_eq_or_trancl) + apply (erule disjE) + apply (clarsimp simp: site' new_site_def) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: n'_direct_eq) + apply (drule (1) transitive_closure_trans) + apply simp + apply simp + apply (case_tac "isUntypedCap cap") + prefer 2 + apply (simp add: untyped_mdb'_def) + apply (erule_tac x=parent in allE) + apply simp + apply (erule_tac x=p in allE) + apply (simp add: descendants_of'_def) + apply (drule mp[where P="S \ T \ {}" for S T]) + apply (frule sameRegionAs_capRange_Int, simp add: phys) + apply (rule valid_capAligned, erule valid_capI') + apply (rule valid_capAligned, rule valid_c') + apply (insert capRange_ut)[1] + apply blast + apply (drule (1) rangeD) + apply (drule capRange_sameRegionAs, rule valid_c') + apply (simp add: phys) + apply simp + apply (case_tac "untypedRange parent_cap \ untypedRange cap") + apply (erule impE) + apply (clarsimp simp only: isCap_simps untypedRange.simps) + apply (subst (asm) range_subset_eq) + apply (drule valid_capI')+ + apply (drule valid_capAligned)+ + apply (clarsimp simp: capAligned_def) + apply (erule is_aligned_no_overflow) + apply (simp(no_asm) add: sameRegionAs_def3 isCap_simps) + apply (drule valid_capI')+ + apply (drule valid_capAligned)+ + apply (clarsimp simp: capAligned_def is_aligned_no_overflow) + apply clarsimp + apply (erule disjE) + apply simp + apply (rule conjI) + prefer 2 + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (thin_tac "P \ Q" for P Q) + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (simp add: site' new_site_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (simp add: rtrancl_eq_or_trancl) + apply simp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (drule (1) transitive_closure_trans, erule loopE) + apply (subgoal_tac "m \ p \ parent") + apply (drule subtree_mdb_next) + apply (drule (1) trancl_trans, erule loopE) + apply (thin_tac "All P" for P) + apply (drule_tac p=parent and p'=p in untyped_incD'[rotated], assumption+) + apply simp + apply (subgoal_tac "\ m \ parent \ p") + prefer 2 + apply clarsimp + apply (drule (1) rangeD) + apply (drule capRange_sameRegionAs, rule valid_c') + apply (simp add: phys) + apply simp + apply (clarsimp simp: descendants_of'_def subset_iff_psubset_eq) + apply (erule disjE,simp,simp) + apply (drule_tac p=parent and p'=p in untyped_incD'[rotated], assumption+) + apply (simp add:subset_iff_psubset_eq descendants_of'_def) + apply (elim disjE conjE| simp )+ + apply (drule(1) rangeD) + apply (drule capRange_sameRegionAs[OF _ valid_c']) + apply (simp add:phys)+ + apply (insert capRange_c' is_untyped)[1] + apply (simp add: untypedCapRange [symmetric]) + apply (drule(1) disjoint_subset) + apply (drule capRange_sameRegionAs[OF _ valid_c']) + apply (simp add:phys) + apply (simp add:Int_ac) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (erule disjE) + apply simp + apply (thin_tac "P \ Q" for P Q) + apply (subgoal_tac "is_chunk n' cap p p'") + prefer 2 + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (erule disjE) + apply (erule_tac x=parent in allE) + apply clarsimp + apply (erule impE, fastforce) + apply (clarsimp simp: parent) + apply (simp add: site' new_site_def) + apply (erule sameRegionAs_trans, rule same_region) + apply (clarsimp simp add: parent) + apply (simp add: site' new_site_def) + apply (rule same_region) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply simp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply simp + apply (thin_tac "P \ Q" for P Q) + apply (subgoal_tac "is_chunk n' cap' p' p") + prefer 2 + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (erule disjE) + apply (erule_tac x=parent in allE) + apply clarsimp + apply (erule impE, fastforce) + apply (clarsimp simp: parent) + apply (simp add: site' new_site_def) + apply (erule sameRegionAs_trans, rule same_region) + apply (clarsimp simp add: parent) + apply (simp add: site' new_site_def) + apply (rule same_region) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply simp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + done + +lemma caps_contained_n' [simp]: "caps_contained' n'" + using caps_contained untyped_mdb untyped_inc + apply (clarsimp simp: caps_contained'_def) + apply (drule n'_cap)+ + apply (clarsimp split: if_split_asm) + apply (drule capRange_untyped) + apply simp + apply (frule capRange_untyped) + apply (frule untypedRange_c') + apply (erule_tac x=parent in allE) + apply (erule_tac x=p' in allE) + apply (simp add: parent) + apply (erule impE, blast) + apply (simp add: untyped_mdb'_def) + apply (erule_tac x=parent in allE) + apply (erule_tac x=p' in allE) + apply (simp add: parent is_untyped descendants_of'_def) + apply (erule impE) + apply (thin_tac "m site = t" for t) + apply (drule valid_capI') + apply (frule valid_capAligned) + apply blast + apply (drule (1) rangeD) + apply (frule capRange_untyped) + apply (drule untypedCapRange) + apply simp + apply (thin_tac "All P" for P) + apply (insert capRange_c')[1] + apply (simp add: untypedCapRange is_untyped) + apply (subgoal_tac "untypedRange parent_cap \ untypedRange c \ {}") + prefer 2 + apply blast + apply (frule untyped_incD'[OF _ capRange_untyped _ is_untyped]) + apply (case_tac c) + apply simp_all + apply (simp add:isCap_simps) + apply (rule parent) + apply clarsimp + apply (case_tac "untypedRange c = untypedRange parent_cap") + apply blast + apply simp + apply (elim disjE) + apply (drule_tac A = "untypedRange c" in psubsetI) + apply simp+ + apply (thin_tac "P\Q" for P Q) + apply (elim conjE) + apply (simp add:descendants_of'_def) + apply (drule(1) rangeD) + apply (frule capRange_untyped) + apply (simp add:untypedCapRange Int_ac) + apply blast + apply (simp add:descendants_of'_def) + apply blast + apply blast + done + +lemma untyped_inc_n' [simp]: "untypedRange c' \ usableUntypedRange parent_cap = {} \ untyped_inc' n'" + using untyped_inc + apply (clarsimp simp: untyped_inc'_def) + apply (drule n'_cap)+ + apply (clarsimp simp: descendants_of'_def parency_n' split: if_split_asm) + apply (frule untypedRange_c') + apply (insert parent is_untyped)[1] + apply (erule_tac x=parent in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (case_tac "untypedRange parent_cap = untypedRange c'a") + apply simp + apply (intro conjI) + apply (intro impI) + apply (elim disjE conjE) + apply (drule(1) subtree_trans,simp) + apply (simp add:subset_not_psubset) + apply simp + apply (clarsimp simp:subset_not_psubset) + apply (drule valid_capI')+ + apply (drule(2) disjoint_subset[OF usableRange_subseteq[OF valid_capAligned],rotated -1]) + apply simp + apply (clarsimp) + apply (rule int_not_empty_subsetD) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (erule aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_c']]) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply simp + apply (erule subset_splitE) + apply (simp|elim conjE)+ + apply (thin_tac "P \ Q" for P Q)+ + apply blast + apply (simp|elim conjE)+ + apply (thin_tac "P \ Q" for P Q)+ + apply (intro conjI,intro impI,drule(1) subtree_trans,simp) + apply clarsimp + apply (intro impI) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (rule int_not_empty_subsetD) + apply (simp add:Int_ac) + apply (erule aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_c']]) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply simp + apply (thin_tac "P \ Q" for P Q)+ + apply (drule(1) disjoint_subset[rotated]) + apply simp + apply (drule_tac B = "untypedRange c'a" in int_not_empty_subsetD) + apply (erule aligned_untypedRange_non_empty[OF capAligned_c']) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply simp + apply (frule untypedRange_c') + apply (insert parent is_untyped)[1] + apply (erule_tac x=p in allE) + apply (erule_tac x=parent in allE) + apply clarsimp + apply (case_tac "untypedRange parent_cap = untypedRange c") + apply simp + apply (intro conjI) + apply (intro impI) + apply (elim disjE conjE) + apply (clarsimp simp:subset_not_psubset )+ + apply (drule(1) subtree_trans,simp) + apply simp + apply (clarsimp simp:subset_not_psubset) + apply (drule disjoint_subset[OF usableRange_subseteq[OF valid_capAligned[OF valid_capI']],rotated]) + apply simp + apply assumption + apply simp + apply clarsimp + apply (rule int_not_empty_subsetD) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply (erule aligned_untypedRange_non_empty[OF capAligned_c']) + apply simp + apply (erule subset_splitE) + apply (simp|elim conjE)+ + apply (thin_tac "P \ Q" for P Q)+ + apply (intro conjI,intro impI,drule(1) subtree_trans,simp) + apply clarsimp + apply (intro impI) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (rule int_not_empty_subsetD) + apply (simp add:Int_ac) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply (erule aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_c']]) + apply simp + apply (thin_tac "P\Q" for P Q)+ + apply blast + apply (thin_tac "P\Q" for P Q)+ + apply simp + apply (drule(1) disjoint_subset2[rotated]) + apply simp + apply (drule_tac B = "untypedRange c'" in int_not_empty_subsetD) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply (erule aligned_untypedRange_non_empty[OF capAligned_c']) + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + apply blast + done + +lemma ut_rev_n' [simp]: "ut_revocable' n'" + using ut_rev + apply (clarsimp simp: ut_revocable'_def n'_def n_def) + apply (clarsimp simp: modify_map_if split: if_split_asm) + done + +lemma class_links_m: "class_links m" + using valid + by (simp add: valid_mdb_ctes_def) + +lemma parent_phys: "capClass parent_cap = PhysicalClass" + using is_untyped + by (clarsimp simp: isCap_simps) + +lemma class_links [simp]: "class_links n'" + using class_links_m + apply (clarsimp simp add: class_links_def) + apply (simp add: n'_direct_eq + split: if_split_asm) + apply (case_tac cte, + clarsimp dest!: n'_cap simp: site' parent new_site_def phys parent_phys) + apply (drule_tac x=parent in spec) + apply (drule_tac x=p' in spec) + apply (case_tac cte') + apply (clarsimp simp: site' new_site_def parent parent_phys phys dest!: n'_cap + split: if_split_asm) + apply (case_tac cte, case_tac cte') + apply (clarsimp dest!: n'_cap split: if_split_asm) + apply fastforce + done + +lemma irq_control_n' [simp]: + "irq_control n'" + using irq_control phys + apply (clarsimp simp: irq_control_def) + apply (clarsimp simp: n'_def n_def) + apply (clarsimp simp: modify_map_if split: if_split_asm) + done + +lemma dist_z_m: + "distinct_zombies m" + using valid by auto + +lemma dist_z [simp]: + "distinct_zombies n'" + using dist_z_m + apply (simp add: n'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n_def distinct_zombies_nonCTE_modify_map + fun_upd_def[symmetric]) + apply (erule distinct_zombies_seperateE, simp) + apply (case_tac cte, clarsimp) + apply (rename_tac cap node) + apply (subgoal_tac "capRange cap \ capRange c' \ {}") + apply (frule untyped_mdbD' [OF _ _ _ _ _ untyped_mdb, OF parent]) + apply (simp add: is_untyped) + apply (clarsimp simp add: untypedCapRange[OF is_untyped, symmetric]) + apply (drule disjoint_subset2 [OF capRange_c']) + apply simp + apply simp + apply (simp add: descendants_of'_def) + apply (drule(1) rangeD) + apply simp + apply (drule capAligned_capUntypedPtr [OF capAligned_c']) + apply (frule valid_capAligned [OF valid_capI']) + apply (drule(1) capAligned_capUntypedPtr) + apply auto + done + +lemma reply_masters_rvk_fb_m: + "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n[simp]: + "reply_masters_rvk_fb n'" + using reply_masters_rvk_fb_m + apply (simp add: reply_masters_rvk_fb_def n'_def ball_ran_modify_map_eq + n_def fun_upd_def[symmetric]) + apply (rule ball_ran_fun_updI, assumption) + apply clarsimp + done + +lemma valid_n': + "untypedRange c' \ usableUntypedRange parent_cap = {} \ valid_mdb_ctes n'" + by auto + +end + +lemma caps_overlap_reserved'_D: + "\caps_overlap_reserved' S s; ctes_of s p = Some cte;isUntypedCap (cteCap cte)\ \ usableUntypedRange (cteCap cte) \ S = {}" + apply (simp add:caps_overlap_reserved'_def) + apply (erule ballE) + apply (erule(2) impE) + apply fastforce + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma insertNewCap_valid_mdb: + "\valid_mdb' and valid_objs' and K (slot \ p) and + caps_overlap_reserved' (untypedRange cap) and + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) p and + K (\isZombie cap) and valid_cap' cap and + (\s. descendants_range' cap p (ctes_of s))\ + insertNewCap p slot cap + \\rv. valid_mdb'\" + apply (clarsimp simp: insertNewCap_def valid_mdb'_def) + apply (wp getCTE_ctes_of | simp add: o_def)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: no_0_def valid_mdb_ctes_def) + apply (case_tac cte) + apply (rename_tac p_cap p_node) + apply (clarsimp cong: if_cong) + apply (case_tac ya) + apply (rename_tac node) + apply (clarsimp simp: nullPointer_def) + apply (rule mdb_insert_again_all.valid_n') + apply unfold_locales[1] + apply (assumption|rule refl)+ + apply (frule sameRegionAs_classes, clarsimp simp: isCap_simps) + apply (erule (1) ctes_of_valid_cap') + apply (simp add: valid_mdb_ctes_def) + apply simp + apply (clarsimp simp: isMDBParentOf_CTE) + apply (clarsimp simp: isCap_simps valid_mdb_ctes_def ut_revocable'_def) + apply assumption + apply (drule(1) caps_overlap_reserved'_D) + apply simp + apply (simp add:Int_ac) + done + +(* FIXME: move *) +lemma no_default_zombie: + "cap_relation (default_cap tp p sz d) cap \ \isZombie cap" + by (cases tp, auto simp: isCap_simps) + +lemmas updateNewFreeIndex_typ_ats[wp] = typ_at_lifts[OF updateNewFreeIndex_typ_at'] + +lemma updateNewFreeIndex_valid_objs[wp]: + "\valid_objs'\ updateNewFreeIndex slot \\_. valid_objs'\" + apply (simp add: updateNewFreeIndex_def getSlotCap_def) + apply (wp getCTE_wp' | wpc | simp add: updateTrackedFreeIndex_def)+ + done + +lemma insertNewCap_valid_objs [wp]: + "\ valid_objs' and valid_cap' cap and pspace_aligned' and pspace_distinct'\ + insertNewCap parent slot cap + \\_. valid_objs'\" + apply (simp add: insertNewCap_def) + apply (wp setCTE_valid_objs getCTE_wp') + apply clarsimp + done + +lemma insertNewCap_valid_cap [wp]: + "\ valid_cap' c \ + insertNewCap parent slot cap + \\_. valid_cap' c\" + apply (simp add: insertNewCap_def) + apply (wp getCTE_wp') + apply clarsimp + done + +lemmas descendants_of'_mdbPrev = descendants_of_prev_update + +lemma insertNewCap_ranges: + "\\s. descendants_range' c p (ctes_of s) \ + descendants_range' cap p (ctes_of s) \ + capRange c \ capRange cap = {} \ + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) p s \ + valid_mdb' s \ valid_objs' s\ + insertNewCap p slot cap + \\_ s. descendants_range' c p (ctes_of s)\" + apply (simp add: insertNewCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (case_tac ctea) + apply (case_tac cteb) + apply (clarsimp simp: nullPointer_def cong: if_cong) + apply (simp (no_asm) add: descendants_range'_def descendants_of'_mdbPrev) + apply (subst mdb_insert_again_child.descendants) + apply unfold_locales[1] + apply (simp add: valid_mdb'_def) + apply (assumption|rule refl)+ + apply (frule sameRegionAs_classes, clarsimp simp: isCap_simps) + apply (erule (1) ctes_of_valid_cap') + apply (simp add: valid_mdb'_def valid_mdb_ctes_def) + apply clarsimp + apply (clarsimp simp: isMDBParentOf_def) + apply (clarsimp simp: isCap_simps valid_mdb'_def + valid_mdb_ctes_def ut_revocable'_def) + apply clarsimp + apply (rule context_conjI, blast) + apply (clarsimp simp: descendants_range'_def) + done + +lemma insertNewCap_overlap_reserved'[wp]: + "\\s. caps_overlap_reserved' (capRange c) s\ + capRange c \ capRange cap = {} \ capAligned cap \ + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) p s \ + valid_mdb' s \ valid_objs' s\ + insertNewCap p slot cap + \\_ s. caps_overlap_reserved' (capRange c) s\" + apply (simp add: insertNewCap_def caps_overlap_reserved'_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (case_tac ctea) + apply (case_tac cteb) + apply (clarsimp simp: nullPointer_def ball_ran_modify_map_eq + caps_overlap_reserved'_def[symmetric]) + apply (clarsimp simp: ran_def split: if_splits) + apply (case_tac "slot = a") + apply clarsimp + apply (rule disjoint_subset) + apply (erule(1) usableRange_subseteq) + apply (simp add:untypedCapRange Int_ac)+ + apply (subst Int_commute) + apply (erule(2) caps_overlap_reserved'_D) + done + +crunch ksArch[wp]: insertNewCap "\s. P (ksArchState s)" + (wp: crunch_wps) + +lemma inv_untyped_corres_helper1: + "list_all2 cap_relation (map (\ref. default_cap tp ref sz d) orefs) cps + \ + corres dc + (\s. pspace_aligned s \ pspace_distinct s + \ valid_objs s \ valid_mdb s \ valid_list s + \ cte_wp_at is_untyped_cap p s + \ (\tup \ set (zip crefs orefs). + cte_wp_at (\c. cap_range (default_cap tp (snd tup) sz d) \ untyped_range c) p s) + \ (\tup \ set (zip crefs orefs). + descendants_range (default_cap tp (snd tup) sz d) p s) + \ (\tup \ set (zip crefs orefs). + caps_overlap_reserved (untyped_range (default_cap tp (snd tup) sz d)) s) + \ (\tup \ set (zip crefs orefs). real_cte_at (fst tup) s) + \ (\tup \ set (zip crefs orefs). + cte_wp_at ((=) cap.NullCap) (fst tup) s) + \ distinct (p # (map fst (zip crefs orefs))) + \ distinct_sets (map (\tup. cap_range (default_cap tp (snd tup) sz d)) (zip crefs orefs)) + \ (\tup \ set (zip crefs orefs). + valid_cap (default_cap tp (snd tup) sz d) s)) + (\s. (\tup \ set (zip (map cte_map crefs) cps). valid_cap' (snd tup) s) + \ (\tup \ set (zip (map cte_map crefs) cps). cte_wp_at' (\c. cteCap c = NullCap) (fst tup) s) + \ cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + (\tup \ set (zip (map cte_map crefs) cps). + sameRegionAs (cteCap cte) (snd tup))) + (cte_map p) s + \ distinct ((cte_map p) # (map fst (zip (map cte_map crefs) cps))) + \ valid_mdb' s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ (\tup \ set (zip (map cte_map crefs) cps). descendants_range' (snd tup) (cte_map p) (ctes_of s)) + \ (\tup \ set (zip (map cte_map crefs) cps). + caps_overlap_reserved' (capRange (snd tup)) s) + \ distinct_sets (map capRange (map snd (zip (map cte_map crefs) cps)))) + (sequence_x (map (create_cap tp sz p d) (zip crefs orefs))) + (zipWithM_x (insertNewCap (cte_map p)) + ((map cte_map crefs)) cps)" + apply (simp add: zipWithM_x_def zipWith_def split_def) + apply (fold mapM_x_def) + apply (rule corres_list_all2_mapM_) + apply (rule corres_guard_imp) + apply (erule insertNewCap_corres) + apply (clarsimp simp: cte_wp_at_def is_cap_simps) + apply (clarsimp simp: fun_upd_def cte_wp_at_ctes_of) + apply clarsimp + apply (rule hoare_pre, wp hoare_vcg_const_Ball_lift) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state + cap_range_def[where c="default_cap a b c d" for a b c d]) + apply (drule(2) caps_overlap_reservedD[rotated]) + apply (simp add:Int_ac) + apply (rule conjI) + apply (clarsimp simp: valid_cap_def) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (rule conjI) + apply (clarsimp simp:Int_ac) + apply (erule disjoint_subset2[rotated]) + apply fastforce + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + subgoal by fastforce + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps valid_cap_def) + apply (fastforce simp: image_def) + apply (rule hoare_pre) + apply (wp + hoare_vcg_const_Ball_lift + insertNewCap_valid_mdb hoare_vcg_all_lift insertNewCap_ranges + | subst cte_wp_at_cteCaps_of)+ + apply (subst(asm) cte_wp_at_cteCaps_of)+ + apply (clarsimp simp only:) + apply simp + apply (rule conjI) + apply clarsimp + apply (thin_tac "cte_map p \ S" for S) + apply (erule notE, erule rev_image_eqI) + apply simp + apply (rule conjI,clarsimp+) + apply (rule conjI,erule caps_overlap_reserved'_subseteq) + apply (rule untypedRange_in_capRange) + apply (rule conjI,erule no_default_zombie) + apply (rule conjI, clarsimp simp:Int_ac) + apply fastforce + apply (clarsimp simp:Int_ac valid_capAligned ) + apply fastforce + apply (rule list_all2_zip_split) + apply (simp add: list_all2_map2 list_all2_refl) + apply (simp add: list_all2_map1) + done + +lemma createNewCaps_valid_pspace_extras: + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz) + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. pspace_aligned'\" + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz) + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. pspace_canonical'\" + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz) + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. pspace_distinct'\" + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz) + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. valid_mdb'\" + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz) + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. valid_objs'\" + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + done + +declare map_fst_zip_prefix[simp] + +declare map_snd_zip_prefix[simp] + +declare word_unat_power [symmetric, simp del] + +lemma createNewCaps_range_helper: + "\\s. range_cover ptr sz (APIType_capBits tp us) n \ 0 < n\ + createNewCaps tp ptr n us d + \\rv s. \capfn. + rv = map capfn (map (\p. ptr_add ptr (p * 2 ^ (APIType_capBits tp us))) + [0 ..< n]) + \ (\p. capClass (capfn p) = PhysicalClass + \ capUntypedPtr (capfn p) = p + \ capBits (capfn p) = (APIType_capBits tp us))\" + apply (simp add: createNewCaps_def toAPIType_def Arch_createNewCaps_def + split del: if_split cong: option.case_cong) + apply (rule hoare_grab_asm)+ + apply (frule range_cover.range_cover_n_less) + apply (frule range_cover.unat_of_nat_n) + apply (cases tp, simp_all split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + apply (rule hoare_pre, wp) + apply (frule range_cover_not_zero[rotated -1],simp) + apply (clarsimp simp: APIType_capBits_def objBits_simps ptr_add_def o_def) + apply (subst upto_enum_red') + apply unat_arith + apply (clarsimp simp: o_def fromIntegral_def toInteger_nat fromInteger_nat) + apply fastforce + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def bit_simps + objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def + objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (wp createObjects_ret2 + | clarsimp simp: APIType_capBits_def objBits_if_dev archObjSize_def word_bits_def + split del: if_split + | simp add: objBits_simps + | (rule exI, (fastforce simp: bit_simps)))+ + done + +lemma createNewCaps_range_helper2: + "\\s. range_cover ptr sz (APIType_capBits tp us) n \ 0 < n\ + createNewCaps tp ptr n us d + \\rv s. \cp \ set rv. capRange cp \ {} \ capRange cp \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}\" + apply (rule hoare_assume_pre) + apply (rule hoare_strengthen_post) + apply (rule createNewCaps_range_helper) + apply (clarsimp simp: capRange_def ptr_add_def word_unat_power[symmetric] + simp del: atLeastatMost_subset_iff + dest!: less_two_pow_divD) + apply (rule conjI) + apply (rule is_aligned_no_overflow) + apply (rule is_aligned_add_multI [OF _ _ refl]) + apply (fastforce simp:range_cover_def) + apply simp + apply (rule range_subsetI) + apply (rule machine_word_plus_mono_right_split[OF range_cover.range_cover_compare]) + apply (assumption)+ + apply (simp add:range_cover_def word_bits_def) + apply (frule range_cover_cell_subset) + apply (erule of_nat_mono_maybe[rotated]) + apply (drule (1) range_cover.range_cover_n_less ) + apply (clarsimp) + apply (erule impE) + apply (simp add:range_cover_def) + apply (rule is_aligned_no_overflow) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (fastforce simp:range_cover_def) + apply simp + done + +lemma createNewCaps_children: + "\\s. cap = UntypedCap d (ptr && ~~ mask sz) sz idx + \ range_cover ptr sz (APIType_capBits tp us) n \ 0 < n\ + createNewCaps tp ptr n us d + \\rv s. \y \ set rv. (sameRegionAs cap y)\" + apply (rule hoare_assume_pre) + apply (rule hoare_chain [OF createNewCaps_range_helper2]) + apply fastforce + apply clarsimp + apply (drule(1) bspec) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (drule(1) subsetD) + apply clarsimp + apply (erule order_trans[rotated]) + apply (rule word_and_le2) + done + +fun isDeviceCap :: "capability \ bool" +where + "isDeviceCap (UntypedCap d _ _ _) = d" +| "isDeviceCap (ArchObjectCap (FrameCap _ _ _ d _)) = d" +| "isDeviceCap _ = False" + +lemmas makeObjectKO_simp = makeObjectKO_def[split_simps AARCH64_H.object_type.split + Structures_H.kernel_object.split ArchTypes_H.apiobject_type.split + sum.split arch_kernel_object.split] + +lemma createNewCaps_descendants_range': + "\\s. descendants_range' p q (ctes_of s) \ + range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us d + \ \rv s. descendants_range' p q (ctes_of s)\" + apply (clarsimp simp:descendants_range'_def2 descendants_range_in'_def2) + apply (wp createNewCaps_null_filter') + apply fastforce + done + +lemma caps_overlap_reserved'_def2: + "caps_overlap_reserved' S = + (\s. (\cte \ ran (null_filter' (ctes_of s)). + isUntypedCap (cteCap cte) \ + usableUntypedRange (cteCap cte) \ S = {}))" + apply (rule ext) + apply (clarsimp simp:caps_overlap_reserved'_def) + apply (intro iffI ballI impI) + apply (elim ballE impE) + apply simp + apply simp + apply (simp add:ran_def null_filter'_def split:if_split_asm option.splits) + apply (elim ballE impE) + apply simp + apply simp + apply (clarsimp simp: ran_def null_filter'_def is_cap_simps + simp del: split_paired_All split_paired_Ex split: if_splits) + apply (drule_tac x = a in spec) + apply simp + done + +lemma createNewCaps_caps_overlap_reserved': + "\\s. caps_overlap_reserved' S s \ pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ 0 < n \ + range_cover ptr sz (APIType_capBits ty us) n\ + createNewCaps ty ptr n us d + \\rv s. caps_overlap_reserved' S s\" + apply (clarsimp simp: caps_overlap_reserved'_def2) + apply (wp createNewCaps_null_filter') + apply fastforce + done + +lemma createNewCaps_caps_overlap_reserved_ret': + "\\s. caps_overlap_reserved' + {ptr..ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s \ + 0 < n \ range_cover ptr sz (APIType_capBits ty us) n\ + createNewCaps ty ptr n us d + \\rv s. \y\set rv. caps_overlap_reserved' (capRange y) s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:valid_def) + apply (frule use_valid[OF _ createNewCaps_range_helper]) + apply fastforce + apply clarsimp + apply (erule use_valid[OF _ createNewCaps_caps_overlap_reserved']) + apply (intro conjI,simp_all) + apply (erule caps_overlap_reserved'_subseteq) + apply (drule(1) range_cover_subset) + apply simp + apply (clarsimp simp: ptr_add_def capRange_def + simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + done + +lemma createNewCaps_descendants_range_ret': + "\\s. (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n) + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ descendants_range_in' {ptr..ptr + of_nat n * 2^(APIType_capBits ty us) - 1} cref (ctes_of s)\ + createNewCaps ty ptr n us d + \ \rv s. \y\set rv. descendants_range' y cref (ctes_of s)\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: valid_def) + apply (frule use_valid[OF _ createNewCaps_range_helper]) + apply simp + apply (erule use_valid[OF _ createNewCaps_descendants_range']) + apply (intro conjI,simp_all) + apply (clarsimp simp:descendants_range'_def descendants_range_in'_def) + apply (drule(1) bspec)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (erule disjoint_subset2[rotated]) + apply (drule(1) range_cover_subset) + apply simp + apply (simp add:capRange_def ptr_add_def) + done + +lemma createNewCaps_parent_helper: + "\\s. cte_wp_at' (\cte. cteCap cte = UntypedCap d (ptr && ~~ mask sz) sz idx) p s + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ (ty = APIObjectType ArchTypes_H.CapTableObject \ 0 < us) + \ range_cover ptr sz (APIType_capBits ty us) n \ 0 < n \ + createNewCaps ty ptr n us d + \\rv. cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + (\tup\set (zip (xs rv) rv). + sameRegionAs (cteCap cte) (snd tup))) + p\" + apply (rule hoare_post_imp [where Q="\rv s. \cte. cte_wp_at' ((=) cte) p s + \ isUntypedCap (cteCap cte) + \ (\tup\set (zip (xs rv) rv). + sameRegionAs (cteCap cte) (snd tup))"]) + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (rule hoare_pre) + apply (wp hoare_vcg_ex_lift createNewCaps_cte_wp_at' + set_tuple_pick createNewCaps_children) + apply (auto simp:cte_wp_at'_def isCap_simps) + done + +lemma createNewCaps_valid_cap': + "\\s. pspace_no_overlap' ptr sz s \ + valid_pspace' s \ n \ 0 \ + range_cover ptr sz (APIType_capBits ty us) n \ + (ty = APIObjectType ArchTypes_H.CapTableObject \ 0 < us) \ + (ty = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits) \ + ptr \ 0 \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz)\ + createNewCaps ty ptr n us d + \\r s. \cap\set r. s \' cap\" + apply (rule hoare_assume_pre) + apply clarsimp + apply (erule createNewCaps_valid_cap; simp) + done + +lemma createNewCaps_ranges: + "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 + createNewCaps ty ptr n us d + \\rv s. distinct_sets (map capRange rv)\" + apply (rule hoare_assume_pre) + apply (rule hoare_chain) + apply (rule createNewCaps_range_helper) + apply fastforce + apply (clarsimp simp: distinct_sets_prop distinct_prop_map) + apply (rule distinct_prop_distinct) + apply simp + apply (clarsimp simp: capRange_def simp del: Int_atLeastAtMost + dest!: less_two_pow_divD) + apply (rule aligned_neq_into_no_overlap[simplified field_simps]) + apply (rule notI) + apply (erule(3) ptr_add_distinct_helper) + apply (simp add:range_cover_def word_bits_def) + apply (erule range_cover.range_cover_n_le(1)[where 'a=machine_word_len]) + apply (clarsimp simp: ptr_add_def word_unat_power[symmetric]) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (simp add:range_cover_def)+ + apply (clarsimp simp: ptr_add_def word_unat_power[symmetric]) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (simp add:range_cover_def)+ + done + +lemma createNewCaps_ranges': + "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 < n\ + createNewCaps ty ptr n us d + \\rv s. distinct_sets (map capRange (map snd (zip xs rv)))\" + apply (rule hoare_strengthen_post) + apply (rule createNewCaps_ranges) + apply (simp add: distinct_sets_prop del: map_map) + apply (erule distinct_prop_prefixE) + apply (rule Sublist.map_mono_prefix) + apply (rule map_snd_zip_prefix [unfolded less_eq_list_def]) + done + +declare split_paired_Ex[simp del] +lemmas corres_split_retype_createNewCaps + = corres_split[OF corres_retype_region_createNewCaps, + simplified bind_assoc, simplified ] +declare split_paired_Ex[simp add] + +lemma retype_region_caps_overlap_reserved: + "\valid_pspace and valid_mdb and + pspace_no_overlap_range_cover ptr sz and caps_no_overlap ptr sz and + caps_overlap_reserved + {ptr..ptr + of_nat n * 2^obj_bits_api (APIType_map2 (Inr ao')) us - 1} and + (\s. \slot. cte_wp_at (\c. up_aligned_area ptr sz \ cap_range c \ cap_is_device c = dev) slot s) and + K (APIType_map2 (Inr ao') = Structures_A.apiobject_type.CapTableObject \ 0 < us) and + K (range_cover ptr sz (obj_bits_api (APIType_map2 (Inr ao')) us) n) and + K (S \ {ptr..ptr + of_nat n * + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us - 1})\ + retype_region ptr n us (APIType_map2 (Inr ao')) dev + \\rv s. caps_overlap_reserved S s\" + apply (rule hoare_gen_asm)+ + apply (simp (no_asm) add:caps_overlap_reserved_def2) + apply (rule hoare_pre) + apply (wp retype_region_caps_of) + apply simp+ + apply (simp add:caps_overlap_reserved_def2) + apply (intro conjI,simp+) + apply clarsimp + apply (drule bspec) + apply simp+ + apply (erule(1) disjoint_subset2) + done + +lemma retype_region_caps_overlap_reserved_ret: + "\valid_pspace and valid_mdb and caps_no_overlap ptr sz and + pspace_no_overlap_range_cover ptr sz and + caps_overlap_reserved + {ptr..ptr + of_nat n * 2^obj_bits_api (APIType_map2 (Inr ao')) us - 1} and + (\s. \slot. cte_wp_at (\c. up_aligned_area ptr sz \ cap_range c \ cap_is_device c = dev) slot s) and + K (APIType_map2 (Inr ao') = Structures_A.apiobject_type.CapTableObject \ 0 < us) and + K (range_cover ptr sz (obj_bits_api (APIType_map2 (Inr ao')) us) n)\ + retype_region ptr n us (APIType_map2 (Inr ao')) dev + \\rv s. \y\set rv. caps_overlap_reserved (untyped_range (default_cap + (APIType_map2 (Inr ao')) y us d)) s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:valid_def) + apply (frule retype_region_ret[unfolded valid_def,simplified,THEN spec,THEN bspec]) + apply clarsimp + apply (erule use_valid[OF _ retype_region_caps_overlap_reserved]) + apply clarsimp + apply (intro conjI,simp_all) + apply fastforce + apply (case_tac ao') + apply (simp_all add:APIType_map2_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add:obj_bits_api_def ptr_add_def) + apply (drule(1) range_cover_subset) + apply (clarsimp)+ + done + +lemma updateFreeIndex_pspace_no_overlap': + "\\s. pspace_no_overlap' ptr sz s \ + valid_pspace' s \ cte_wp_at' (isUntypedCap o cteCap) src s\ + updateFreeIndex src index + \\r s. pspace_no_overlap' ptr sz s\" + apply (simp add: updateFreeIndex_def getSlotCap_def updateTrackedFreeIndex_def) + apply (rule hoare_pre) + apply (wp getCTE_wp' | wp (once) pspace_no_overlap'_lift + | simp)+ + apply (clarsimp simp:valid_pspace'_def pspace_no_overlap'_def) + done + +lemma updateFreeIndex_updateCap_caps_overlap_reserved: + "\\s. valid_mdb' s \ valid_objs' s \ S \ untypedRange cap \ + usableUntypedRange (capFreeIndex_update (\_. index) cap) \ S = {} \ + isUntypedCap cap \ descendants_range_in' S src (ctes_of s) \ + cte_wp_at' (\c. cteCap c = cap) src s\ + updateCap src (capFreeIndex_update (\_. index) cap) + \\r s. caps_overlap_reserved' S s\" + apply (clarsimp simp:caps_overlap_reserved'_def) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp:modify_map_def cte_wp_at_ctes_of) + apply (erule ranE) + apply (clarsimp split:if_split_asm simp:valid_mdb'_def valid_mdb_ctes_def) + apply (case_tac cte) + apply (case_tac ctea) + apply simp + apply (drule untyped_incD') + apply (simp+)[4] + apply clarify + apply (erule subset_splitE) + apply (simp del:usable_untyped_range.simps) + apply (thin_tac "P \ Q" for P Q)+ + apply (elim conjE) + apply blast + apply (simp) + apply (thin_tac "P\Q" for P Q)+ + apply (elim conjE) + apply (drule(2) descendants_range_inD') + apply simp + apply (rule disjoint_subset[OF usableRange_subseteq]) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid_cap') + apply (simp add:untypedCapRange)+ + apply (elim disjE) + apply clarsimp + apply (drule(2) descendants_range_inD') + apply simp + apply (rule disjoint_subset[OF usableRange_subseteq]) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid_cap') + apply (simp add:untypedCapRange)+ + apply (thin_tac "P\Q" for P Q)+ + apply (rule disjoint_subset[OF usableRange_subseteq]) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid_cap') + apply simp+ + apply blast + done + +lemma updateFreeIndex_caps_overlap_reserved: + "\\s. valid_pspace' s \ descendants_range_in' S src (ctes_of s) + \ cte_wp_at' ((\cap. S \ untypedRange cap \ + usableUntypedRange (capFreeIndex_update (\_. index) cap) \ S = {} \ + isUntypedCap cap) o cteCap) src s\ + updateFreeIndex src index + \\r s. caps_overlap_reserved' S s\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_updateCap_caps_overlap_reserved getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) + apply (clarsimp simp: valid_mdb'_def split: option.split) + done + +lemma updateFreeIndex_updateCap_caps_no_overlap'': + "\\s. isUntypedCap cap \ caps_no_overlap'' ptr sz s \ + cte_wp_at' (\c. cteCap c = cap) src s\ + updateCap src (capFreeIndex_update (\_. index) cap) + \\r s. caps_no_overlap'' ptr sz s\" + apply (clarsimp simp:caps_no_overlap''_def) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp: modify_map_def ran_def cte_wp_at_ctes_of + simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex) + apply (case_tac "a = src") + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex) + apply (erule subsetD[rotated]) + apply (elim allE impE) + apply fastforce + apply (clarsimp simp:isCap_simps) + apply (erule subset_trans) + apply (clarsimp simp:isCap_simps) + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex) + apply (erule subsetD[rotated]) + apply (elim allE impE) + prefer 2 + apply assumption + apply fastforce+ + done + +lemma updateFreeIndex_caps_no_overlap'': + "\\s. caps_no_overlap'' ptr sz s \ + cte_wp_at' (isUntypedCap o cteCap) src s\ + updateFreeIndex src index + \\r s. caps_no_overlap'' ptr sz s\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_updateCap_caps_no_overlap'' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: caps_no_overlap''_def split: option.split) + done + +lemma updateFreeIndex_descendants_of': + "\\s. cte_wp_at' (\c. \idx'. cteCap c = capFreeIndex_update (K idx') cap) ptr s \ isUntypedCap cap \ + P ((swp descendants_of') (null_filter' (ctes_of s)))\ + updateCap ptr (capFreeIndex_update (\_. index) cap) + \\r s. P ((swp descendants_of') (null_filter' (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply clarsimp + apply (erule subst[rotated,where P = P]) + apply (rule ext) + apply (clarsimp simp:null_filter_descendants_of'[OF null_filter_simp']) + apply (rule mdb_inv_preserve.descendants_of) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (frule_tac m="ctes_of s" and index=index in mdb_inv_preserve_updateCap) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + done + +lemma updateFreeIndex_updateCap_descendants_range_in': + "\\s. cte_wp_at' (\c. cteCap c = cap) slot s \ isUntypedCap cap \ + descendants_range_in' S slot (ctes_of s)\ + updateCap slot (capFreeIndex_update (\_. index) cap) + \\r s. descendants_range_in' S slot (ctes_of s)\" + apply (rule hoare_pre) + apply (wp descendants_range_in_lift' + [where Q'="\s. cte_wp_at' (\c. cteCap c = cap) slot s \ isUntypedCap cap" and + Q = "\s. cte_wp_at' (\c. cteCap c = cap) slot s \ isUntypedCap cap "] ) + apply (wp updateFreeIndex_descendants_of') + apply (clarsimp simp: cte_wp_at_ctes_of swp_def isCap_simps) + apply (simp add:updateCap_def) + apply (wp setCTE_weak_cte_wp_at getCTE_wp) + apply (fastforce simp:cte_wp_at_ctes_of isCap_simps) + apply (clarsimp) + done + +lemma updateFreeIndex_descendants_range_in': + "\\s. cte_wp_at' (isUntypedCap o cteCap) slot s + \ descendants_range_in' S slot (ctes_of s)\ + updateFreeIndex slot index + \\r s. descendants_range_in' S slot (ctes_of s)\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_updateCap_descendants_range_in' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma caps_no_overlap''_def2: + "caps_no_overlap'' ptr sz = + (\s. \cte\ran (null_filter' (ctes_of s)). + untypedRange (cteCap cte) \ + {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} \ {} \ + {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} \ + untypedRange (cteCap cte))" + apply (intro ext iffI) + apply (clarsimp simp:caps_no_overlap''_def null_filter'_def ran_def) + apply (drule_tac x = cte in spec) + apply fastforce + apply (clarsimp simp:caps_no_overlap''_def null_filter'_def) + apply (case_tac "cte = CTE capability.NullCap nullMDBNode") + apply clarsimp + apply (drule_tac x = cte in bspec) + apply (clarsimp simp:ran_def) + apply (rule_tac x= a in exI) + apply clarsimp + apply clarsimp + apply (erule subsetD) + apply simp + done + +lemma deleteObjects_caps_no_overlap'': + "\\s. invs' s \ ct_active' s \ sch_act_simple s \ + cte_wp_at' (\c. cteCap c = capability.UntypedCap d ptr sz idx) slot s \ + caps_no_overlap'' ptr sz s \ + descendants_range' (capability.UntypedCap d ptr sz idx) slot (ctes_of s)\ + deleteObjects ptr sz + \\rv s. caps_no_overlap'' ptr sz s\" + apply (rule hoare_name_pre_state) + apply (clarsimp split:if_splits) + apply (clarsimp simp:caps_no_overlap''_def2 deleteObjects_def2 capAligned_def valid_cap'_def + dest!:ctes_of_valid_cap') + apply (wp deleteObjects_null_filter[where idx = idx and p = slot]) + apply (clarsimp simp:cte_wp_at_ctes_of invs_def) + apply (case_tac cte) + apply clarsimp + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs') + apply (simp add:valid_cap'_def capAligned_def) + done + +lemma descendants_range_in_subseteq': + "\descendants_range_in' A p ms ;B\ A\ \ descendants_range_in' B p ms" + by (auto simp:descendants_range_in'_def cte_wp_at_ctes_of dest!:bspec) + +lemma updateFreeIndex_mdb_simple': + "\\s. descendants_of' src (ctes_of s) = {} \ + pspace_no_overlap' (capPtr cap) (capBlockSize cap) s \ + valid_pspace' s \ cte_wp_at' (\c. \idx'. cteCap c = capFreeIndex_update (\_. idx') cap) src s \ + isUntypedCap cap\ + updateCap src (capFreeIndex_update (\_. idx) cap) + \\rv. valid_mdb'\" + apply (clarsimp simp:valid_mdb'_def updateCap_def valid_pspace'_def) + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of isCap_simps simp del:fun_upd_apply) + + apply (frule mdb_inv_preserve_updateCap[where index=idx and m="ctes_of s" and slot=src for s]) + apply (simp add: isCap_simps) + apply (simp add: modify_map_def) + apply (clarsimp simp add: mdb_inv_preserve.preserve_stuff mdb_inv_preserve.by_products valid_mdb_ctes_def) + + proof - + fix s cte ptr sz idx' d + assume descendants: "descendants_of' src (ctes_of s) = {}" + and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" + and unt_inc' :"untyped_inc' (ctes_of s)" + and valid_objs' :"valid_objs' s" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" + (is "mdb_inv_preserve (ctes_of s) ?ctes") + + show "untyped_inc' ?ctes" + using cte_wp_at' + apply (clarsimp simp:untyped_inc'_def mdb_inv_preserve.descendants_of[OF invp, symmetric] + descendants + split del: if_split) + apply (case_tac "ctes_of s p") + apply (simp split: if_split_asm) + apply (case_tac "ctes_of s p'") + apply (simp split: if_split_asm) + apply (case_tac "the (ctes_of s p)", case_tac "the (ctes_of s p')") + apply clarsimp + apply (cut_tac p=p and p'=p' in untyped_incD'[OF _ _ _ _ unt_inc']) + apply assumption + apply (clarsimp simp: isCap_simps split: if_split_asm) + apply assumption + apply (clarsimp simp: isCap_simps split: if_split_asm) + apply (clarsimp simp: descendants split: if_split_asm) + done +qed + +lemma pspace_no_overlap_valid_untyped': + "\ pspace_no_overlap' ptr bits s; is_aligned ptr bits; bits < word_bits; + pspace_aligned' s \ + \ valid_untyped' d ptr bits idx s" + apply (clarsimp simp: valid_untyped'_def ko_wp_at'_def split del: if_split) + apply (frule(1) pspace_no_overlapD') + apply (simp add: obj_range'_def[symmetric] Int_commute add_mask_fold) + apply (erule disjE) + apply (drule base_member_set[simplified field_simps add_mask_fold]) + apply (simp add: word_bits_def) + apply blast + apply (simp split: if_split_asm) + apply (erule notE, erule disjoint_subset2[rotated]) + apply (clarsimp simp: is_aligned_no_wrap'[OF _ word_of_nat_less]) + done + +lemma updateFreeIndex_valid_pspace_no_overlap': + "\\s. valid_pspace' s \ + (\ptr sz. pspace_no_overlap' ptr sz s \ idx \ 2 ^ sz \ + cte_wp_at' ((\c. isUntypedCap c \ capPtr c = ptr \ capBlockSize c = sz) o cteCap) src s) + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits \ + descendants_of' src (ctes_of s) = {}\ + updateFreeIndex src idx + \\r s. valid_pspace' s\" + apply (clarsimp simp:valid_pspace'_def updateFreeIndex_def updateTrackedFreeIndex_def) + apply (rule hoare_pre) + apply (rule hoare_vcg_conj_lift) + apply (clarsimp simp:updateCap_def getSlotCap_def) + apply (wp getCTE_wp | simp)+ + apply (wp updateFreeIndex_mdb_simple' getCTE_wp' | simp add: getSlotCap_def)+ + apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace'_def) + apply (case_tac cte,simp add:isCap_simps) + apply (frule(1) ctes_of_valid_cap') + apply (clarsimp simp: valid_cap_simps' capAligned_def pspace_no_overlap_valid_untyped') + done + +crunch vms'[wp]: updateFreeIndex "valid_machine_state'" + +(* FIXME: move *) +lemma setCTE_tcbDomain_inv[wp]: + "\obj_at' (\tcb. P (tcbState tcb)) t\ setCTE ptr v \\_. obj_at' (\tcb. P (tcbState tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +(* FIXME: move *) +crunch tcbState_inv[wp]: cteInsert "obj_at' (\tcb. P (tcbState tcb)) t" + (wp: crunch_simps hoare_drop_imps) + +lemma updateFreeIndex_clear_invs': + "\\s. invs' s \ + (\ptr sz. pspace_no_overlap' ptr sz s \ idx \ 2 ^ sz \ + cte_wp_at' ((\c. isUntypedCap c \ capPtr c = ptr \ capBlockSize c = sz) o cteCap) src s) + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits + \ descendants_of' src (ctes_of s) = {}\ + updateFreeIndex src idx + \\r s. invs' s\" + apply (clarsimp simp:invs'_def valid_state'_def) + apply (wp updateFreeIndex_valid_pspace_no_overlap') + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def) + apply (wp updateFreeIndex_valid_pspace_no_overlap' sch_act_wf_lift valid_queues_lift + updateCap_iflive' tcb_in_cur_domain'_lift + | simp add: pred_tcb_at'_def)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: ifunsafe'_def3 cteInsert_def setUntypedCapAsFull_def + split del: if_split) + apply wp+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: updateCap_def) + apply (wp valid_irq_node_lift setCTE_typ_at')+ + apply (rule hoare_vcg_conj_lift) + apply (simp add:updateCap_def) + apply (wp setCTE_irq_handlers' getCTE_wp) + apply (simp add:updateCap_def) + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp valid_bitmaps_lift + | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] + | simp add: getSlotCap_def + | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (clarsimp simp: isCap_simps) + apply (frule(1) valid_global_refsD_with_objSize) + apply clarsimp + apply (intro conjI allI impI) + apply (clarsimp simp: modify_map_def cteCaps_of_def ifunsafe'_def3 split:if_splits) + apply (drule_tac x=src in spec) + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (drule_tac x = cref in spec) + apply clarsimp + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (clarsimp simp: valid_pspace'_def) + apply (erule untyped_ranges_zero_fun_upd, simp_all) + apply (clarsimp simp: untypedZeroRange_def cteCaps_of_def isCap_simps) + done + +lemma cte_wp_at_pspace_no_overlapI': + "\invs' s; cte_wp_at' (\c. cteCap c = capability.UntypedCap + d (ptr && ~~ mask sz) sz idx) cref s; + idx \ unat (ptr && mask sz); sz < word_bits\ + \ pspace_no_overlap' ptr sz s" + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (case_tac cte,clarsimp) + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs') + apply (clarsimp simp:valid_cap'_def invs'_def valid_state'_def valid_pspace'_def + valid_untyped'_def simp del:usableUntypedRange.simps) + apply (unfold pspace_no_overlap'_def) + apply (intro allI impI) + apply (unfold ko_wp_at'_def) + apply (clarsimp simp del: atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff usableUntypedRange.simps) + apply (drule spec)+ + apply (frule(1) pspace_distinctD') + apply (frule(1) pspace_alignedD') + apply (erule(1) impE)+ + apply (clarsimp simp: obj_range'_def simp del: atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff usableUntypedRange.simps) + apply (erule disjoint_subset2[rotated]) + apply (frule(1) le_mask_le_2p) + apply (clarsimp simp:p_assoc_help) + apply (rule le_plus'[OF word_and_le2]) + apply simp + apply (erule word_of_nat_le) + done + +lemma descendants_range_caps_no_overlapI': + "\invs' s; cte_wp_at' (\c. cteCap c = capability.UntypedCap + d (ptr && ~~ mask sz) sz idx) cref s; + descendants_range_in' {ptr .. (ptr && ~~ mask sz) + mask sz} cref (ctes_of s)\ + \ caps_no_overlap'' ptr sz s" + apply (frule invs_mdb') + apply (clarsimp simp:valid_mdb'_def valid_mdb_ctes_def cte_wp_at_ctes_of + simp del:usableUntypedRange.simps untypedRange.simps) + apply (unfold caps_no_overlap''_def add_mask_fold) + apply (intro ballI impI) + apply (erule ranE) + apply (subgoal_tac "isUntypedCap (cteCap ctea)") + prefer 2 + apply (rule untypedRange_not_emptyD) + apply blast + apply (case_tac ctea,case_tac cte) + apply simp + apply (drule untyped_incD') + apply ((simp add:isCap_simps del:usableUntypedRange.simps untypedRange.simps)+)[4] + apply (elim conjE subset_splitE) + apply (erule subset_trans[OF _ psubset_imp_subset,rotated]) + apply (clarsimp simp:word_and_le2 add_mask_fold) + apply simp + apply (elim conjE) + apply (thin_tac "P\Q" for P Q)+ + apply (drule(2) descendants_range_inD') + apply (simp add:untypedCapRange)+ + apply (erule subset_trans[OF _ equalityD1,rotated]) + apply (clarsimp simp:word_and_le2 add_mask_fold) + apply (thin_tac "P\Q" for P Q)+ + apply (drule disjoint_subset[rotated, where A' = "{ptr..(ptr && ~~ mask sz) + mask sz}"]) + apply (clarsimp simp:word_and_le2 Int_ac add_mask_fold)+ + done + +lemma cte_wp_at_caps_no_overlapI': + "\invs' s; cte_wp_at' (\c. (cteCap c) = UntypedCap d (ptr && ~~ mask sz) sz idx) cref s; + idx \ unat (ptr && mask sz); sz < word_bits\ + \ caps_no_overlap'' ptr sz s" + apply (frule invs_mdb') + apply (frule(1) le_mask_le_2p) + apply (clarsimp simp:valid_mdb'_def valid_mdb_ctes_def cte_wp_at_ctes_of) + apply (case_tac cte) + apply simp + apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) + apply (unfold caps_no_overlap''_def) + apply (intro ballI impI) + apply (erule ranE) + apply (subgoal_tac "isUntypedCap (cteCap ctea)") + prefer 2 + apply (rule untypedRange_not_emptyD) + apply blast + apply (case_tac ctea) + apply simp + apply (drule untyped_incD') + apply (simp add:isCap_simps)+ + apply (elim conjE) + apply (erule subset_splitE) + apply (erule subset_trans[OF _ psubset_imp_subset,rotated]) + apply (clarsimp simp: word_and_le2) + apply simp + apply (thin_tac "P\Q" for P Q)+ + apply (elim conjE) + apply (drule disjoint_subset2[rotated, where B' = "{ptr..(ptr && ~~ mask sz) + mask sz}"]) + apply clarsimp + apply (rule le_plus'[OF word_and_le2]) + apply simp + apply (erule word_of_nat_le) + apply (simp add: add_mask_fold) + apply (erule subset_trans[OF _ equalityD1,rotated]) + apply (clarsimp simp:word_and_le2) + apply (thin_tac "P\Q" for P Q)+ + apply (drule disjoint_subset[rotated, where A' = "{ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1}"]) + apply (clarsimp simp:word_and_le2 Int_ac)+ + done + + +lemma descendants_range_ex_cte': + "\descendants_range_in' S p (ctes_of s'); ex_cte_cap_wp_to' P q s'; S \ capRange (cteCap cte); + invs' s'; ctes_of s' p = Some cte; isUntypedCap (cteCap cte)\ \ q \ S" + apply (frule invs_valid_objs') + apply (frule invs_mdb') + apply (clarsimp simp:invs'_def valid_state'_def) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (frule_tac cte = "cte" in valid_global_refsD') + apply simp + apply (case_tac "\irq. cteCap ctea = IRQHandlerCap irq") + apply clarsimp + apply (erule(1) in_empty_interE[OF _ _ subsetD,rotated -1]) + apply (clarsimp simp:global_refs'_def) + apply (erule_tac A = "range P" for P in subsetD) + apply (simp add:range_eqI field_simps) + apply (case_tac ctea) + apply clarsimp + apply (case_tac ctea) + apply (drule_tac cte = "cte" and cte' = ctea in untyped_mdbD') + apply assumption + apply (clarsimp simp:isCap_simps) + apply (drule_tac B = "untypedRange (cteCap cte)" in subsetD[rotated]) + apply (clarsimp simp:untypedCapRange) + apply clarsimp + apply (drule_tac x = " (irq_node' s')" in cte_refs_capRange[rotated]) + apply (erule(1) ctes_of_valid_cap') + apply blast + apply (clarsimp simp:isCap_simps) + apply (simp add:valid_mdb'_def valid_mdb_ctes_def) + apply (drule(2) descendants_range_inD') + apply clarsimp + apply (drule_tac x = " (irq_node' s')" in cte_refs_capRange[rotated]) + apply (erule(1) ctes_of_valid_cap') + apply blast + done + +lemma updateCap_isUntypedCap_corres: + "\is_untyped_cap cap; isUntypedCap cap'; cap_relation cap cap'\ + \ corres dc + (cte_wp_at (\c. is_untyped_cap c \ obj_ref_of c = obj_ref_of cap \ + cap_bits c = cap_bits cap \ cap_is_device c = cap_is_device cap) src and valid_objs and + pspace_aligned and pspace_distinct) + (cte_at' (cte_map src) and pspace_distinct' and pspace_aligned') + (set_cap cap src) (updateCap (cte_map src) cap')" + apply (rule corres_name_pre) + apply (simp add: updateCap_def) + apply (frule state_relation_pspace_relation) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule pspace_relation_cte_wp_atI) + apply (fastforce simp: cte_wp_at_ctes_of) + apply simp + apply clarify + apply (frule cte_map_inj_eq) + apply (fastforce simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state)+ + apply (clarsimp simp: is_cap_simps isCap_simps) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_r) + apply (rule_tac F = "cteCap_update (\_. capability.UntypedCap dev r bits f) ctea + = cteCap_update (\cap. capFreeIndex_update (\_. f) (cteCap cte)) cte" + in corres_gen_asm2) + apply (rule_tac F = " (cap.UntypedCap dev r bits f) = free_index_update (\_. f) c" + in corres_gen_asm) + apply simp + apply (rule setCTE_UntypedCap_corres) + apply ((clarsimp simp: cte_wp_at_caps_of_state cte_wp_at_ctes_of)+)[3] + apply (subst identity_eq) + apply (wp getCTE_sp getCTE_get no_fail_getCTE)+ + apply (clarsimp simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state)+ + done + +end + +lemma updateFreeIndex_corres: + "\is_untyped_cap cap; free_index_of cap = idx \ + \ corres dc + (cte_wp_at (\c. is_untyped_cap c \ obj_ref_of c = obj_ref_of cap \ + cap_bits c = cap_bits cap \ cap_is_device c = cap_is_device cap) src and valid_objs + and pspace_aligned and pspace_distinct) + (cte_at' (cte_map src) + and pspace_distinct' and pspace_aligned') + (set_cap cap src) (updateFreeIndex (cte_map src) idx)" + apply (rule corres_name_pre) + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_r_conj[where P'="cte_at' (cte_map src)"])+ + apply (rule_tac F="isUntypedCap capa + \ cap_relation cap (capFreeIndex_update (\_. idx) capa)" + in corres_gen_asm2) + apply (rule updateCap_isUntypedCap_corres, simp+) + apply (clarsimp simp: isCap_simps) + apply simp + apply (wp getSlotCap_wp)+ + apply (clarsimp simp: state_relation_def cte_wp_at_ctes_of) + apply (rule no_fail_pre, wp no_fail_getSlotCap) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp getSlotCap_wp)+ + apply (clarsimp simp: state_relation_def cte_wp_at_ctes_of) + apply (rule no_fail_pre, wp no_fail_getSlotCap) + apply simp + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state) + apply (frule state_relation_pspace_relation) + apply (frule(1) pspace_relation_ctes_ofI[OF _ caps_of_state_cteD], simp+) + apply (clarsimp simp: isCap_simps is_cap_simps + cte_wp_at_caps_of_state free_index_of_def) + done + + +locale invokeUntyped_proofs = + fixes s cref reset ptr_base ptr tp us slots sz idx dev + assumes vui: "valid_untyped_inv_wcap' + (Invocations_H.Retype cref reset ptr_base ptr tp us slots dev) + (Some (UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" + and misc: "ct_active' s" "invs' s" + +begin + +lemma cte_wp_at': "cte_wp_at' (\cte. cteCap cte = capability.UntypedCap + dev (ptr && ~~ mask sz) sz idx) cref s" + and cover: "range_cover ptr sz (APIType_capBits tp us) (length (slots::machine_word list))" + and misc2: "distinct slots" + "slots \ []" + "\slot\set slots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s" + "\x\set slots. ex_cte_cap_wp_to' (\_. True) x s" + using vui + by (auto simp: cte_wp_at_ctes_of) + +interpretation Arch . (*FIXME: arch_split*) + +lemma idx_cases: + "((\ reset \ idx \ unat (ptr - (ptr && ~~ mask sz))) \ reset \ ptr = ptr && ~~ mask sz)" + using vui + by (clarsimp simp: cte_wp_at_ctes_of) + +lemma desc_range: + "reset \ descendants_range_in' (mask_range ptr sz) (cref) (ctes_of s)" + using vui by (clarsimp simp: empty_descendants_range_in') + +abbreviation(input) + "retype_range == {ptr..ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1}" + +abbreviation(input) + "usable_range == {ptr..(ptr && ~~ mask sz) + mask sz}" + +lemma not_0_ptr[simp]: "ptr\ 0" + using misc cte_wp_at' + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte) + apply clarsimp + apply (drule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) + apply (simp add: valid_cap'_def) + done + +lemmas range_cover_subset'' = range_cover_subset'[simplified add_mask_fold] + +lemma subset_stuff[simp]: + "retype_range \ usable_range" + apply (rule range_cover_subset''[OF cover]) + apply (simp add:misc2) + done + +lemma descendants_range[simp]: + "descendants_range_in' usable_range cref (ctes_of s)" + "descendants_range_in' retype_range cref (ctes_of s)" +proof - + have "descendants_range_in' usable_range cref (ctes_of s)" + using misc idx_cases cte_wp_at' cover + apply - + apply (erule disjE) + apply (erule cte_wp_at_caps_descendants_range_inI' + [OF _ _ _ range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]]) + apply simp+ + using desc_range + apply simp + done + thus "descendants_range_in' usable_range cref (ctes_of s)" + by simp + thus "descendants_range_in' retype_range cref (ctes_of s)" + by (rule descendants_range_in_subseteq'[OF _ subset_stuff]) +qed + +lemma vc'[simp] : "s \' capability.UntypedCap dev (ptr && ~~ mask sz) sz idx" + using misc cte_wp_at' + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply (simp add: invs_valid_objs') + done + +lemma ptr_cn[simp]: + "canonical_address (ptr && ~~ mask sz)" + using vc' unfolding valid_cap'_def by simp + +lemma sz_limit[simp]: + "sz \ maxUntypedSizeBits" + using vc' unfolding valid_cap'_def by clarsimp + +lemma ps_no_overlap'[simp]: "\ reset \ pspace_no_overlap' ptr sz s" + using misc cte_wp_at' cover idx_cases + apply clarsimp + apply (erule cte_wp_at_pspace_no_overlapI' + [OF _ _ _ range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]]) + apply (simp add: cte_wp_at_ctes_of) + apply simp+ + done + +lemma caps_no_overlap'[simp]: "caps_no_overlap'' ptr sz s" + using cte_wp_at' misc cover desc_range idx_cases + apply - + apply (erule disjE) + apply (erule cte_wp_at_caps_no_overlapI' + [OF _ _ _ range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]]) + apply simp+ + apply (erule descendants_range_caps_no_overlapI') + apply simp+ + done + +lemma idx_compare'[simp]: + "unat ((ptr && mask sz) + (of_nat (length slots)<< (APIType_capBits tp us))) \ 2 ^ sz" + apply (rule le_trans[OF unat_plus_gt]) + apply (simp add: range_cover.unat_of_nat_n_shift[OF cover] range_cover_unat) + apply (insert range_cover.range_cover_compare_bound[OF cover]) + apply simp + done + +lemma ex_cte_no_overlap': + "\P p. ex_cte_cap_wp_to' P p s \ p \ usable_range" + using cte_wp_at' misc + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule_tac cte = cte in descendants_range_ex_cte'[OF descendants_range(1)]) + apply (clarsimp simp: word_and_le2 isCap_simps add_mask_fold)+ + done + +lemma cref_inv: "cref \ usable_range" + apply (insert misc cte_wp_at') + apply (drule if_unsafe_then_capD') + apply (simp add: invs'_def valid_state'_def) + apply simp + apply (erule ex_cte_no_overlap') + done + +lemma slots_invD: + "\x. x \ set slots \ x \ cref \ x \ usable_range \ ex_cte_cap_wp_to' (\_. True) x s" + using misc cte_wp_at' vui + apply clarsimp + apply (drule(1) bspec)+ + apply (drule ex_cte_no_overlap') + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma usableRange_disjoint: + "usableUntypedRange (capability.UntypedCap d (ptr && ~~ mask sz) sz + (unat ((ptr && mask sz) + of_nat (length slots) * 2 ^ APIType_capBits tp us))) \ + {ptr..ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1} = {}" +proof - + have idx_compare''[simp]: + "unat ((ptr && mask sz) + (of_nat (length slots) * (2::machine_word) ^ APIType_capBits tp us)) < 2 ^ sz + \ ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1 + < ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us" + apply (rule word_leq_le_minus_one,simp) + apply (rule neq_0_no_wrap) + apply (rule machine_word_plus_mono_right_split) + apply (simp add: shiftl_t2n range_cover_unat[OF cover] field_simps) + apply (simp add: range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def, OF cover])+ + done + show ?thesis + apply (clarsimp simp: mask_out_sub_mask) + apply (drule idx_compare'') + apply simp + done +qed + +lemma szw: "sz < word_bits" + using cte_wp_at_valid_objs_valid_cap'[OF cte_wp_at'] misc + by (clarsimp simp: valid_cap_simps' capAligned_def invs_valid_objs') + +lemma idx_le_new_offs: + "\ reset + \ idx \ unat ((ptr && mask sz) + (of_nat (length slots) * 2 ^ (APIType_capBits tp us)))" + using misc idx_cases range_cover.range_cover_base_le[OF cover] + apply (clarsimp simp only: simp_thms) + apply (erule order_trans) + apply (simp add: word_le_nat_alt[symmetric] + shiftl_t2n mult.commute) + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" + by (simp add: valid_sched_def) + +crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" + (simp: crunch_simps wp: hoare_drop_imps unless_wp) +crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" + (simp: crunch_simps wp: hoare_drop_imps unless_wp) +crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" + (simp: crunch_simps wp: hoare_drop_imps unless_wp) + +lemma deleteObjects_ksCurThread[wp]: + "\\s. P (ksCurThread s)\ deleteObjects ptr sz \\_ s. P (ksCurThread s)\" + apply (simp add: deleteObjects_def3) + apply (wp | simp add: doMachineOp_def split_def)+ + done + +lemma deleteObjects_ct_active': + "\invs' and sch_act_simple and ct_active' + and cte_wp_at' (\c. cteCap c = UntypedCap d ptr sz idx) cref + and (\s. descendants_range' (UntypedCap d ptr sz idx) cref (ctes_of s)) + and K (sz < word_bits \ is_aligned ptr sz)\ + deleteObjects ptr sz + \\_. ct_active'\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply wps + apply (wp deleteObjects_st_tcb_at') + apply (auto simp: ct_in_state'_def elim: pred_tcb'_weakenE) + done + +defs cNodeOverlap_def: + "cNodeOverlap \ \cns inRange. \p n. cns p = Some n \ (\ is_aligned p (cte_level_bits + n) + \ cte_level_bits + n \ word_bits + \ ({p .. p + 2 ^ (cte_level_bits + n) - 1} \ {p. inRange p} \ {}))" + +defs archOverlap_def: + "archOverlap \ \s inRange. + \p pt_t. gsPTTypes (ksArchState s) p = Some pt_t \ + (\is_aligned p (pt_bits pt_t) \ + ({p .. p + 2 ^ pt_bits pt_t - 1} \ {p. inRange p} \ {}))" + +lemma cNodeNoOverlap: + notes Int_atLeastAtMost[simp del] + shows + "corres dc (\s. \cref. cte_wp_at (\cap. is_untyped_cap cap + \ Collect R \ usable_untyped_range cap) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodeOverlap (gsCNodes s) R) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodeOverlap_def cte_wp_at_caps_of_state) + apply (frule(1) caps_of_state_valid_cap) + apply (frule usable_range_subseteq[rotated], simp add: valid_cap_def) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table is_cap_simps) + apply (frule(1) pspace_alignedD) + apply simp + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow simp del: ) + apply blast + apply (simp add: is_aligned_no_overflow power_overflow word_bits_def + Int_atLeastAtMost) + apply wp+ + done + +lemma archNoOverlap: + notes Int_atLeastAtMost[simp del] + shows + "corres dc (\s. \cref. cte_wp_at (\cap. is_untyped_cap cap + \ Collect R \ usable_untyped_range cap) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ archOverlap s R) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: archOverlap_def cte_wp_at_caps_of_state) + apply (frule state_rel_ghost) + apply (drule (1) ghost_PTTypes) + apply (frule(1) caps_of_state_valid_cap) + apply (frule usable_range_subseteq[rotated], simp add: valid_cap_def) + apply (clarsimp simp: valid_cap_def valid_untyped_def + obj_at_def is_cap_table is_cap_simps) + apply (frule(1) pspace_alignedD) + apply (simp add: pt_bits_def) + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow simp del: ) + apply blast + apply (simp add: is_aligned_no_overflow power_overflow word_bits_def + Int_atLeastAtMost) + apply wp+ + done + +lemma reset_ineq_eq_idx_0: + "\ idx \ 2 ^ sz; b \ sz; (ptr :: obj_ref) \ 0; is_aligned ptr sz; sz < word_bits \ + \ (ptr + of_nat idx - 1 < ptr) = (idx = 0)" + apply (cases "idx = 0") + apply (simp add: gt0_iff_gem1[symmetric] word_neq_0_conv) + apply simp + apply (subgoal_tac "ptr \ ptr + of_nat idx - 1", simp_all)[1] + apply (subst field_simps[symmetric], erule is_aligned_no_wrap') + apply (subst word_less_nat_alt) + apply simp + apply (subst unat_of_nat_minus_1) + apply (erule order_le_less_trans, rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply (rule notI, simp) + apply (erule order_less_le_trans[rotated]) + apply simp + done + +lemma reset_addrs_same: + "\ idx \ 2 ^ sz; resetChunkBits \ sz; ptr \ 0; is_aligned ptr sz; sz < word_bits \ + \ [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] = + map (\i. getFreeRef ptr (i * 2 ^ resetChunkBits)) + [i\[0..<2 ^ (sz - resetChunkBits)]. i * 2 ^ resetChunkBits < idx]" + apply (simp add: upto_enum_step_def getFreeRef_def reset_ineq_eq_idx_0) + apply (clarsimp simp: upto_enum_word o_def unat_div simp del: upt.simps) + apply (subst unat_of_nat_minus_1) + apply (rule_tac y="2 ^ sz" in order_le_less_trans, simp) + apply (rule power_strict_increasing, simp_all add: word_bits_def)[1] + apply simp + apply (rule_tac f="map f" for f in arg_cong) + apply (rule filter_upt_eq[symmetric]) + apply clarsimp + apply (erule order_le_less_trans[rotated]) + apply simp + apply (rule notI) + apply (drule order_less_le_trans[where x="a * b" for a b], + rule_tac m="2 ^ resetChunkBits" and n=idx in alignUp_ge_nat) + apply simp+ + apply (simp add: field_simps) + apply (simp only: mult_Suc_right[symmetric]) + apply (subst(asm) div_add_self1[where 'a=nat, simplified, symmetric]) + apply simp + apply (simp only: field_simps) + apply simp + apply clarsimp + apply (rule order_le_less_trans, rule div_mult_le, simp) + apply (simp add: Suc_le_eq td_gal_lt[symmetric] power_add[symmetric]) + done + +lemmas descendants_of_null_filter' = null_filter_descendants_of'[OF null_filter_simp'] + +lemmas deleteObjects_descendants + = deleteObjects_null_filter[where P="\c. Q (descendants_of' p c)" for p Q, + simplified descendants_of_null_filter'] + +lemma updateFreeIndex_descendants_of2: + " \\s. cte_wp_at' (isUntypedCap o cteCap) ptr s \ + P (\y. descendants_of' y (ctes_of s))\ + updateFreeIndex ptr index + \\r s. P (\y. descendants_of' y (ctes_of s))\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_descendants_of'[simplified swp_def descendants_of_null_filter'] + getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + done + +crunch typ_at'[wp]: updateFreeIndex "\s. P (typ_at' T p s)" + +lemma updateFreeIndex_cte_wp_at: + "\\s. cte_wp_at' (\c. P (cteCap_update (if p = slot + then capFreeIndex_update (\_. idx) else id) c)) p s\ + updateFreeIndex slot idx + \\rv. cte_wp_at' P p\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def split del: if_split) + apply (rule hoare_pre, wp updateCap_cte_wp_at' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac "the (ctes_of s p)") + apply (auto split: if_split_asm) + done + +lemma ex_tupI: + "P (fst x) (snd x) \ \a b. P a b" + by blast + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma resetUntypedCap_corres: + "untypinv_relation ui ui' + \ corres (dc \ dc) + (einvs and schact_is_rct and ct_active + and valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev ptr sz idx)) + and (\_. \ptr_base ptr' ty us slots dev'. + ui = Invocations_A.Retype slot True ptr_base ptr' ty us slots dev)) + (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') + (reset_untyped_cap slot) (resetUntypedCap (cte_map slot))" + apply (rule corres_gen_asm, clarsimp) + apply (simp add: reset_untyped_cap_def resetUntypedCap_def liftE_bindE cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getSlotCap_corres]) + apply simp + apply (rule_tac F="cap = cap.UntypedCap dev ptr sz idx \ (\s. s \ cap)" in corres_gen_asm) + apply (clarsimp simp: bits_of_def free_index_of_def unlessE_def + split del: if_split cong: if_cong) + apply (rule corres_if[OF refl]) + apply (rule corres_returnOk[where P=\ and P'=\], simp) + apply (rule corres_split[OF deleteObjects_corres]) + apply (clarsimp simp add: valid_cap_def cap_aligned_def) + apply (clarsimp simp add: valid_cap_def cap_aligned_def untyped_min_bits_def) + apply (rule corres_if) + apply simp + apply (simp add: bits_of_def shiftL_nat) + apply (rule corres_split_nor) + apply (simp add: unless_def) + apply (rule corres_when, simp) + apply (rule corres_machine_op) + apply (rule corres_Id, simp, simp, wp) + apply (rule updateFreeIndex_corres, simp) + apply (simp add: free_index_of_def) + apply (wp | simp only: unless_def)+ + apply (rule_tac F="sz < word_bits \ idx \ 2 ^ sz + \ ptr \ 0 \ is_aligned ptr sz + \ resetChunkBits \ sz" in corres_gen_asm) + apply (simp add: bits_of_def free_index_of_def mapME_x_map_simp liftE_bindE + reset_addrs_same[where ptr=ptr and idx=idx and sz=sz] + o_def rev_map + del: capFreeIndex_update.simps) + apply (rule_tac P="\x. valid_objs and pspace_aligned and pspace_distinct + and pspace_no_overlap {ptr .. ptr + 2 ^ sz - 1} + and cte_wp_at (\a. is_untyped_cap a \ obj_ref_of a = ptr \ cap_bits a = sz + \ cap_is_device a = dev) slot" + and P'="\_. valid_pspace' and (\s. descendants_of' (cte_map slot) (ctes_of s) = {}) + and pspace_no_overlap' ptr sz + and cte_wp_at' (\cte. \idx. cteCap cte = UntypedCap dev ptr sz idx) (cte_map slot)" + in mapME_x_corres_same_xs) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule corres_machine_op) + apply (rule corres_Id) + apply (simp add: shiftL_nat getFreeRef_def shiftl_t2n mult.commute) + apply simp + apply wp + apply (rule corres_split_nor[OF updateFreeIndex_corres]) + apply simp + apply (simp add: getFreeRef_def getFreeIndex_def free_index_of_def) + apply clarify + apply (subst unat_mult_simple) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply (subst unat_p2) + apply (simp add: Kernel_Config.resetChunkBits_def) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply simp + apply (rule preemptionPoint_corres) + apply wp+ + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (clarsimp simp: getFreeRef_def valid_pspace'_def cte_wp_at_ctes_of + valid_cap_def cap_aligned_def) + apply (erule aligned_add_aligned) + apply (rule is_aligned_weaken) + apply (rule is_aligned_mult_triv2) + apply (simp add: Kernel_Config.resetChunkBits_def) + apply (simp add: untyped_min_bits_def) + apply (rule hoare_pre) + apply simp + apply (strengthen imp_consequent) + apply (wp preemption_point_inv set_cap_cte_wp_at update_untyped_cap_valid_objs + set_cap_no_overlap | simp)+ + apply (clarsimp simp: exI cte_wp_at_caps_of_state) + apply (drule caps_of_state_valid_cap, simp+) + apply (clarsimp simp: is_cap_simps valid_cap_simps + cap_aligned_def + valid_untyped_pspace_no_overlap) + apply (rule hoare_pre) + apply (simp del: capFreeIndex_update.simps) + apply (strengthen imp_consequent) + apply (wp updateFreeIndex_valid_pspace_no_overlap' + updateFreeIndex_descendants_of2 + doMachineOp_psp_no_overlap + updateFreeIndex_cte_wp_at + pspace_no_overlap'_lift + preemptionPoint_inv + hoare_vcg_ex_lift + | simp)+ + apply (clarsimp simp add: cte_wp_at_ctes_of exI isCap_simps valid_pspace'_def) + apply (clarsimp simp: getFreeIndex_def getFreeRef_def) + apply (subst is_aligned_weaken[OF is_aligned_mult_triv2]) + apply (simp add: Kernel_Config.resetChunkBits_def minUntypedSizeBits_def) + apply (subst unat_mult_simple) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply (subst unat_p2) + apply (simp add: Kernel_Config.resetChunkBits_def) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply simp + apply simp + apply (simp add: if_apply_def2) + apply (strengthen invs_valid_objs invs_psp_aligned invs_distinct) + apply (wp hoare_vcg_const_imp_lift) + apply (simp add: if_apply_def2) + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_valid_pspace') + apply (wp hoare_vcg_const_imp_lift deleteObjects_cte_wp_at'[where p="cte_map slot"] + deleteObjects_invs'[where p="cte_map slot"] + deleteObjects_descendants[where p="cte_map slot"] + | simp)+ + apply (wp get_cap_wp getCTE_wp' | simp add: getSlotCap_def)+ + apply (clarsimp simp: cte_wp_at_caps_of_state descendants_range_def2) + apply (cases slot) + apply (strengthen empty_descendants_range_in + ex_tupI[where x=slot])+ + apply (frule(1) caps_of_state_valid) + apply (clarsimp simp: valid_cap_simps cap_aligned_def) + apply (frule(1) caps_of_state_valid) + apply (frule if_unsafe_then_capD[OF caps_of_state_cteD], clarsimp+) + apply (drule(1) ex_cte_cap_protects[OF _ caps_of_state_cteD + empty_descendants_range_in _ order_refl]; clarsimp) + apply (intro conjI impI; auto)[1] + apply (clarsimp simp: cte_wp_at_ctes_of descendants_range'_def2 + empty_descendants_range_in') + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def is_aligned_weaken untypedBits_defs) + apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) + apply (frule(1) descendants_range_ex_cte'[OF empty_descendants_range_in' _ order_refl], + (simp add: isCap_simps add_mask_fold)+) + apply (auto simp: descendants_range_in'_def valid_untyped'_def) + done + +end + +lemma deleteObjects_ex_cte_cap_wp_to': + "\invs' and ex_cte_cap_wp_to' P slot and (\s. descendants_of' p (ctes_of s) = {}) + and cte_wp_at' (\cte. \idx d. cteCap cte = UntypedCap d ptr sz idx) p\ + deleteObjects ptr sz + \\rv. ex_cte_cap_wp_to' P slot\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule hoare_pre) + apply (simp add: ex_cte_cap_wp_to'_def) + apply wps + apply (wp hoare_vcg_ex_lift) + apply (rule_tac idx=idx in deleteObjects_cte_wp_at') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: ex_cte_cap_wp_to'_def + cte_wp_at_ctes_of) + apply (rule_tac x=cref in exI, simp) + apply (frule_tac p=cref in if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) + apply (frule descendants_range_ex_cte'[rotated, OF _ order_refl, where p=p], + (simp add: isCap_simps empty_descendants_range_in')+) + apply (auto simp: add_mask_fold) + done + +lemma updateCap_cte_cap_wp_to': + "\\s. cte_wp_at' (\cte. p' \ cte_refs' (cteCap cte) (irq_node' s) \ P (cteCap cte) + \ p' \ cte_refs' cap (irq_node' s) \ P cap) p s + \ ex_cte_cap_wp_to' P p' s\ + updateCap p cap + \\rv. ex_cte_cap_wp_to' P p'\" + apply (simp add: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of updateCap_def) + apply (rule hoare_pre, (wp getCTE_wp | wps)+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule_tac x=cref in exI) + apply auto + done + +crunch ct_in_state'[wp]: doMachineOp "ct_in_state' P" + (simp: crunch_simps ct_in_state'_def) + +crunch st_tcb_at'[wp]: doMachineOp "st_tcb_at' P p" + (simp: crunch_simps ct_in_state'_def) + +lemma ex_cte_cap_wp_to_irq_state_independent_H[simp]: + "irq_state_independent_H (ex_cte_cap_wp_to' P slot)" + by (simp add: ex_cte_cap_wp_to'_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma updateFreeIndex_ctes_of: + "\\s. P (modify_map (ctes_of s) ptr (cteCap_update (capFreeIndex_update (\_. idx))))\ + updateFreeIndex ptr idx + \\r s. P (ctes_of s)\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateCap_ctes_of_wp getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule rsubst[where P=P]) + apply (case_tac cte) + apply (clarsimp simp: modify_map_def fun_eq_iff) + done + +lemma updateFreeIndex_cte_cap_wp_to'[wp]: + "\\s. cte_wp_at' (isUntypedCap o cteCap) p s + \ ex_cte_cap_wp_to' P p' s\ + updateFreeIndex p idx + \\rv. ex_cte_cap_wp_to' P p'\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateCap_cte_cap_wp_to' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: isCap_simps ex_cte_cap_wp_to'_def split: option.split) + done + +lemma setCTE_ct_in_state: + "\ct_in_state' P\ setCTE p cte \\rv. ct_in_state' P\" + apply (rule hoare_name_pre_state) + apply (rule hoare_pre, wp ct_in_state'_decomp setCTE_pred_tcb_at') + apply (auto simp: ct_in_state'_def) + done + +crunch ct_in_state[wp]: updateFreeIndex "ct_in_state' P" +crunch nosch[wp]: updateFreeIndex "\s. P (ksSchedulerAction s)" + +lemma resetUntypedCap_invs_etc: + "\invs' and valid_untyped_inv_wcap' ui + (Some (UntypedCap dev ptr sz idx)) + and ct_active' + and K (\ptr_base ptr' ty us slots. ui = Retype slot True ptr_base ptr' ty us slots dev)\ + resetUntypedCap slot + \\_. invs' and valid_untyped_inv_wcap' ui (Some (UntypedCap dev ptr sz 0)) + and ct_active' + and pspace_no_overlap' ptr sz\, \\_. invs'\" + (is "\invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) and ct_active' and ?asm\ + ?f \\_. invs' and ?vu2 and ct_active' and ?psp\, \\_. invs'\") + apply (simp add: resetUntypedCap_def getSlotCap_def + liftE_bind_return_bindE_returnOk bindE_assoc) + apply (rule bindE_wp_fwd) + apply simp + apply (rule getCTE_sp) + apply (rule hoare_name_pre_stateE) + apply (clarsimp split del: if_split) + apply (subgoal_tac "capAligned ?cap") + prefer 2 + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+) + apply (clarsimp simp: cte_wp_at_ctes_of capAligned_def valid_cap_simps') + apply (cases "idx = 0") + apply (clarsimp simp: cte_wp_at_ctes_of unlessE_def split del: if_split) + apply wp + apply (clarsimp simp: valid_cap_simps' capAligned_def) + apply (rule cte_wp_at_pspace_no_overlapI'[where cref=slot], + (simp_all add: cte_wp_at_ctes_of)+)[1] + apply (clarsimp simp: unlessE_def cte_wp_at_ctes_of + split del: if_split) + apply (rule_tac Q'="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) and ct_active' and ?psp" + in bindE_wp_fwd) + apply clarsimp + apply (rule hoare_pre) + apply (simp add: sch_act_simple_def) + apply (wps ) + apply (wp deleteObject_no_overlap[where idx=idx] + deleteObjects_invs'[where idx=idx and p=slot] + hoare_vcg_ex_lift hoare_vcg_const_Ball_lift + deleteObjects_cte_wp_at'[where idx=idx] + deleteObjects_descendants[where p=slot] + deleteObjects_nosch + deleteObjects_ct_active'[where idx=idx and cref=slot] + deleteObjects_ex_cte_cap_wp_to'[where p=slot]) + apply (clarsimp simp: cte_wp_at_ctes_of descendants_range'_def2 + empty_descendants_range_in' + capAligned_def sch_act_simple_def) + apply (strengthen refl) + apply (frule ctes_of_valid[OF ctes_of_cte_wpD], clarsimp+) + apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) + apply (erule rev_mp[where P="Ball S f" for S f] + rev_mp[where P="ex_cte_cap_wp_to' P p s" for P p s])+ + apply (strengthen descendants_range_ex_cte'[rotated, OF _ order_refl, mk_strg D _ E]) + apply (clarsimp simp: isCap_simps empty_descendants_range_in' add_mask_fold) + apply auto[1] + apply (cases "dev \ sz < resetChunkBits") + apply (simp add: pred_conj_def unless_def) + apply (rule hoare_pre) + apply (strengthen exI[where x=sz]) + apply (wp updateFreeIndex_clear_invs' + hoare_vcg_ex_lift + hoare_vcg_const_Ball_lift + updateFreeIndex_descendants_of2 + sch_act_simple_lift + pspace_no_overlap'_lift + doMachineOp_psp_no_overlap + updateFreeIndex_ctes_of + updateFreeIndex_cte_wp_at + | simp | wps | wp (once) ex_cte_cap_to'_pres)+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps + modify_map_def) + apply auto[1] + apply simp + apply (rule hoare_pre, rule hoare_strengthen_postE, + rule_tac P="\i. invs' and ?psp and ct_active' and valid_untyped_inv_wcap' ?ui + (Some (UntypedCap dev ptr sz (if i = 0 then idx + else (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - i) * 2 ^ resetChunkBits)))" + and E="\_. invs'" + in mapME_x_validE_nth) + apply (rule hoare_pre) + apply simp + apply (wp preemptionPoint_invs + updateFreeIndex_clear_invs' + hoare_vcg_ex_lift + updateFreeIndex_descendants_of2 + updateFreeIndex_ctes_of + updateFreeIndex_cte_wp_at + doMachineOp_psp_no_overlap + hoare_vcg_ex_lift hoare_vcg_const_Ball_lift + pspace_no_overlap'_lift[OF preemptionPoint_inv] + pspace_no_overlap'_lift + updateFreeIndex_ct_in_state[unfolded ct_in_state'_def] + | strengthen invs_pspace_aligned' invs_pspace_distinct' + | simp add: ct_in_state'_def + sch_act_simple_def + | rule hoare_vcg_conj_lift_R + | wp (once) preemptionPoint_inv + | wps + | wp (once) ex_cte_cap_to'_pres)+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps + conj_comms) + apply (subgoal_tac "getFreeIndex ptr + (rev [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] ! i) + = (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - Suc i) * + 2 ^ resetChunkBits") + apply clarsimp + apply (frule ctes_of_valid[OF ctes_of_cte_wpD], clarsimp+) + apply (subgoal_tac "resetChunkBits < word_bits \ sz < word_bits") + apply (strengthen is_aligned_weaken[OF is_aligned_mult_triv2]) + apply (subst nat_less_power_trans2[THEN order_less_imp_le]) + apply (clarsimp simp add: upto_enum_step_def getFreeRef_def) + apply (rule less_imp_diff_less) + apply (simp add: unat_div td_gal_lt[symmetric] power_add[symmetric]) + apply (cases "idx = 0") + apply (simp add: gt0_iff_gem1[symmetric, folded word_neq_0_conv]) + apply (simp add: valid_cap_simps') + apply (subst unat_minus_one) + apply (clarsimp simp: valid_cap_simps') + apply (drule of_nat64_0) + apply (erule order_le_less_trans, simp) + apply simp + apply (clarsimp simp: unat_of_nat valid_cap_simps') + apply (erule order_less_le_trans[rotated], simp) + apply simp + apply (auto simp: Kernel_Config.resetChunkBits_def minUntypedSizeBits_def)[1] + apply (simp add: valid_cap_simps' Kernel_Config.resetChunkBits_def capAligned_def) + apply (simp add: nth_rev) + apply (simp add: upto_enum_step_def upto_enum_word getFreeIndex_def + getFreeRef_def + del: upt.simps) + apply (intro conjI impI, simp_all)[1] + apply (subgoal_tac "resetChunkBits < word_bits") + apply (rule word_unat.Abs_eqD[OF _ word_unat.Rep]) + apply (simp add: word_of_nat_plus Abs_fnat_hom_mult[symmetric]) + apply (simp only: unats_def word_bits_def[symmetric]) + apply (clarsimp simp: unat_div nat_mult_power_less_eq) + apply (rule less_imp_diff_less) + apply (simp add: td_gal_lt[symmetric] power_add[symmetric]) + apply (simp only: unat_lt2p word_bits_def) + apply (simp add: Kernel_Config.resetChunkBits_def word_bits_def) + apply (clarsimp simp: cte_wp_at_ctes_of getFreeRef_def + upto_enum_step_def upto_enum_word) + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def) + apply (simp add: reset_ineq_eq_idx_0) + apply simp + apply clarsimp + done + +end + +lemma whenE_reset_resetUntypedCap_invs_etc: + "\invs' and valid_untyped_inv_wcap' ui + (Some (UntypedCap dev ptr sz idx)) + and ct_active' + and K (\ptr_base ty us slots. ui = Retype slot reset ptr_base ptr' ty us slots dev)\ + whenE reset (resetUntypedCap slot) + \\_. invs' and valid_untyped_inv_wcap' ui (Some (UntypedCap dev ptr sz (if reset then 0 else idx))) + and ct_active' + and pspace_no_overlap' (if reset then ptr else ptr') sz\, \\_. invs'\" + apply (rule hoare_pre) + apply (wp whenE_wp resetUntypedCap_invs_etc[where idx=idx, + simplified pred_conj_def conj_assoc] + | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def) + apply (drule_tac cref=slot in cte_wp_at_pspace_no_overlapI', + simp add: cte_wp_at_ctes_of, simp+) + done + +crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" + +end + +lemma (in range_cover) funky_aligned: + "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" + apply (rule aligned_add_aligned) + apply (rule is_aligned_andI1) + apply (rule aligned) + apply (rule is_aligned_mult_triv2) + apply simp + done + +defs canonicalAddressAssert_def: + "canonicalAddressAssert \ AARCH64.canonical_address" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma inv_untyped_corres': + "\ untypinv_relation ui ui' \ \ + corres (dc \ (=)) + (einvs and valid_untyped_inv ui and ct_active and schact_is_rct) + (invs' and valid_untyped_inv' ui' and ct_active') + (invoke_untyped ui) (invokeUntyped ui')" + apply (cases ui) + apply (rule corres_name_pre) + apply (clarsimp simp only: valid_untyped_inv_wcap + valid_untyped_inv_wcap' + Invocations_A.untyped_invocation.simps + Invocations_H.untyped_invocation.simps + untypinv_relation.simps) + apply (rename_tac cref oref reset ptr ptr' dc us slots dev s s' ao' sz sz' idx idx') + proof - + fix cref reset ptr ptr_base us slots dev ao' sz sz' idx idx' s s' + + let ?ui = "Invocations_A.Retype cref reset ptr_base ptr (APIType_map2 (Inr ao')) us slots dev" + let ?ui' = "Invocations_H.untyped_invocation.Retype + (cte_map cref) reset ptr_base ptr ao' us (map cte_map slots) dev" + + assume invs: "invs (s :: det_state)" "ct_active s" "valid_list s" "valid_sched s" + "schact_is_rct s" + and invs': "invs' s'" "ct_active' s'" + and sr: "(s, s') \ state_relation" + and vui: "valid_untyped_inv_wcap ?ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" + (is "valid_untyped_inv_wcap _ (Some ?cap) s") + and vui': "valid_untyped_inv_wcap' ?ui' (Some (UntypedCap dev (ptr && ~~ mask sz') sz' idx')) s'" + assume ui: "ui = ?ui" and ui': "ui' = ?ui'" + + have cte_at: "cte_wp_at ((=) ?cap) cref s" (is "?cte_cond s") + using vui by (simp add:cte_wp_at_caps_of_state) + + have ptr_sz_simp[simp]: "ptr_base = ptr && ~~ mask sz + \ sz' = sz \ idx' = idx \ 2 \ sz" + using cte_at vui vui' sr invs + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule pspace_relation_cte_wp_atI'[OF state_relation_pspace_relation]) + apply (simp add:cte_wp_at_ctes_of) + apply (simp add:invs_valid_objs) + apply (clarsimp simp:is_cap_simps isCap_simps) + apply (frule cte_map_inj_eq) + apply ((erule cte_wp_at_weakenE | simp + | clarsimp simp: cte_wp_at_caps_of_state)+)[5] + apply (clarsimp simp:cte_wp_at_caps_of_state cte_wp_at_ctes_of) + apply (drule caps_of_state_valid_cap,fastforce) + apply (clarsimp simp:valid_cap_def untyped_min_bits_def) + done + + have obj_bits_low_bound[simp]: + "minUntypedSizeBits \ obj_bits_api (APIType_map2 (Inr ao')) us" + using vui + apply clarsimp + apply (cases ao') + apply (simp_all add: obj_bits_api_def slot_bits_def arch_kobj_size_def default_arch_object_def + APIType_map2_def bit_simps untyped_min_bits_def minUntypedSizeBits_def + split: apiobject_type.splits) + done + + have cover: "range_cover ptr sz + (obj_bits_api (APIType_map2 (Inr ao')) us) (length slots)" + and vslot: "slots\ []" + using vui + by (auto simp: cte_wp_at_caps_of_state) + + have misc'[simp]: + "distinct (map cte_map slots)" + using vui' + by (auto simp: cte_wp_at_ctes_of) + + have intvl_eq[simp]: + "ptr && ~~ mask sz = ptr \ {ptr + of_nat k |k. k < 2 ^ sz} = {ptr..ptr + 2 ^ sz - 1}" + using cover + apply (subgoal_tac "is_aligned (ptr &&~~ mask sz) sz") + apply (rule intvl_range_conv) + apply (simp) + apply (drule range_cover.sz) + apply simp + apply (rule is_aligned_neg_mask,simp) + done + + have delete_objects_rewrite: + "ptr && ~~ mask sz = ptr \ delete_objects ptr sz = + do y \ modify (clear_um {ptr + of_nat k |k. k < 2 ^ sz}); + modify (detype {ptr && ~~ mask sz..ptr + 2 ^ sz - 1}) + od" + using cover + apply (clarsimp simp:delete_objects_def freeMemory_def word_size_def) + apply (subgoal_tac "is_aligned (ptr &&~~ mask sz) sz") + apply (subst mapM_storeWord_clear_um[simplified word_size_def word_size_bits_def]; + clarsimp simp: range_cover_def word_bits_def) + apply (drule_tac z=sz in order_trans[OF obj_bits_low_bound]; + simp add: minUntypedSizeBits_def) + apply (rule is_aligned_neg_mask) + apply simp + done + + have of_nat_length: "(of_nat (length slots)::machine_word) - (1::machine_word) < (of_nat (length slots)::machine_word)" + using vslot + using range_cover.range_cover_le_n_less(1)[OF cover,where p = "length slots"] + apply - + apply (case_tac slots) + apply clarsimp+ + apply (subst add.commute) + apply (subst word_le_make_less[symmetric]) + apply (rule less_imp_neq) + apply (simp add:word_bits_def minus_one_norm) + apply (rule word_of_nat_less) + apply auto + done + + have not_0_ptr[simp]: "ptr\ 0" + using cte_at invs + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (drule(1) caps_of_state_valid)+ + apply (simp add:valid_cap_def) + done + + have size_eq[simp]: "APIType_capBits ao' us = obj_bits_api (APIType_map2 (Inr ao')) us" + apply (case_tac ao') + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (clarsimp simp: APIType_capBits_def objBits_simps' arch_kobj_size_def default_arch_object_def + obj_bits_api_def APIType_map2_def slot_bits_def pageBitsForSize_def bit_simps)+ + done + + have non_reset_idx_le[simp]: "\ reset \ idx < 2^sz" + using vui + apply (clarsimp simp: cte_wp_at_caps_of_state ) + apply (erule le_less_trans) + apply (rule unat_less_helper) + apply simp + apply (rule and_mask_less') + using cover + apply (clarsimp simp:range_cover_def) + done + + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex usableUntypedRange.simps + + have vc'[simp] : "s' \' capability.UntypedCap dev (ptr && ~~ mask sz) sz idx" + using vui' invs' + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (case_tac cte) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply (simp add:invs_valid_objs') + done + + have nidx[simp]: "ptr + (of_nat (length slots) * 2^obj_bits_api (APIType_map2 (Inr ao')) us) - (ptr && ~~ mask sz) + = (ptr && mask sz) + (of_nat (length slots) * 2^obj_bits_api (APIType_map2 (Inr ao')) us)" + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr]) + apply simp + done + + have idx_compare'[simp]:"unat ((ptr && mask sz) + (of_nat (length slots)<< obj_bits_api (APIType_map2 (Inr ao')) us)) \ 2 ^ sz" + apply (rule le_trans[OF unat_plus_gt]) + apply (simp add:range_cover.unat_of_nat_n_shift[OF cover] range_cover_unat) + apply (insert range_cover.range_cover_compare_bound[OF cover]) + apply simp + done + + have idx_compare''[simp]: + "unat ((ptr && mask sz) + (of_nat (length slots) * (2::machine_word) ^ obj_bits_api (APIType_map2 (Inr ao')) us)) < 2 ^ sz + \ ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us - 1 + < ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us" + apply (rule word_leq_le_minus_one,simp) + apply (rule neq_0_no_wrap) + apply (rule machine_word_plus_mono_right_split) + apply (simp add:shiftl_t2n range_cover_unat[OF cover] field_simps) + apply (simp add:range_cover.sz[where 'a=machine_word_len, folded word_bits_def, OF cover])+ + done + + note neg_mask_add_mask = word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr,symmetric] + + have idx_compare'''[simp]: + "\unat (of_nat (length slots) * (2::machine_word) ^ obj_bits_api (APIType_map2 (Inr ao')) us) < 2 ^ sz; + ptr && ~~ mask sz = ptr\ + \ ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us - 1 + < ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us " + apply (rule word_leq_le_minus_one,simp) + apply (simp add:is_aligned_neg_mask_eq'[symmetric]) + apply (rule neq_0_no_wrap) + apply (rule machine_word_plus_mono_right_split[where sz = sz]) + apply (simp add:is_aligned_mask)+ + apply (simp add:range_cover.sz[where 'a=machine_word_len, folded word_bits_def, OF cover])+ + done + + have maxDomain:"ksCurDomain s' \ maxDomain" + using invs' + by (simp add:invs'_def valid_state'_def) + + have sz_mask_less: + "unat (ptr && mask sz) < 2 ^ sz" + using range_cover.sz[OF cover] + by (simp add: unat_less_helper and_mask_less_size word_size) + + have ptr_cn[simp]: "canonical_address (ptr && ~~ mask sz)" + using vc' unfolding valid_cap'_def by simp + + have overlap_ranges1: + "{x. ptr \ x \ x \ ptr + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us + * of_nat (length slots) - 1} \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}" + apply (rule order_trans[rotated]) + apply (rule range_cover_subset'[OF cover], simp add: vslot) + apply (clarsimp simp: atLeastAtMost_iff field_simps) + done + + have overlap_ranges2: + "idx \ unat (ptr && mask sz) + \ {x. ptr \ x \ x \ ptr + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us + * of_nat (length slots) - 1} \ {(ptr && ~~ mask sz) + of_nat idx..(ptr && ~~ mask sz) + 2 ^ sz - 1}" + apply (rule order_trans[OF overlap_ranges1]) + apply (clarsimp simp add: atLeastatMost_subset_iff) + apply (rule order_trans, rule word_plus_mono_right) + apply (erule word_of_nat_le) + apply (simp add: add.commute word_plus_and_or_coroll2 word_and_le2) + apply (simp add: add.commute word_plus_and_or_coroll2) + done + + have overlap_ranges: + "{x. ptr \ x \ x \ ptr + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us * of_nat (length slots) - 1} + \ usable_untyped_range (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))" + apply (cases reset, simp_all add: usable_untyped_range.simps) + apply (rule order_trans, rule overlap_ranges1) + apply (simp add: blah word_and_le2) + apply (rule overlap_ranges2) + apply (cut_tac vui) + apply (clarsimp simp: cte_wp_at_caps_of_state) + done + + have sz_limit[simp]: "sz \ maxUntypedSizeBits" + using vc' unfolding valid_cap'_def by clarsimp + + from ptr_cn sz_limit + have canonical_ptr[simp]: "canonical_address ptr" + unfolding canonical_address_range maxUntypedSizeBits_def canonical_bit_def + by word_bitwise (simp add: word_size) + + note set_cap_free_index_invs_spec = set_free_index_invs[where cap = "cap.UntypedCap + dev (ptr && ~~ mask sz) sz (if reset then 0 else idx)" + ,unfolded free_index_update_def free_index_of_def,simplified] + + note msimp[simp add] = neg_mask_add_mask + note if_split[split del] + show " corres (dc \ (=)) ((=) s) ((=) s') + (invoke_untyped ?ui) + (invokeUntyped ?ui')" + apply (clarsimp simp:invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc) + apply (insert cover) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_whenE, simp) + apply (rule resetUntypedCap_corres[where ui=ui and ui'=ui']) + apply (simp add: ui ui') + apply simp + apply simp + apply (rule corres_symb_exec_l_Ex) + apply (rule_tac F = "cap = cap.UntypedCap dev (ptr && ~~ mask sz) + sz (if reset then 0 else idx)" in corres_gen_asm) + apply (rule corres_add_noop_lhs) + apply (rule corres_split_nor[OF cNodeNoOverlap _ return_wp stateAssert_wp]) + apply (rule corres_add_noop_lhs) + apply (rule corres_split_nor[OF archNoOverlap _ return_wp stateAssert_wp]) + apply (clarsimp simp: canonicalAddressAssert_def) + apply (rule corres_split[OF updateFreeIndex_corres]) + apply (simp add:isCap_simps)+ + apply (clarsimp simp:getFreeIndex_def bits_of_def shiftL_nat shiftl_t2n + free_index_of_def) + apply (insert range_cover.range_cover_n_less[OF cover] vslot) + apply (rule createNewObjects_corres_helper) + apply simp+ + apply (simp add: insertNewCaps_def) + apply (rule corres_split_retype_createNewCaps[where sz = sz,OF corres_rel_imp]) + apply (rule inv_untyped_corres_helper1) + apply simp + apply simp + apply ((wp retype_region_invs_extras[where sz = sz] + retype_region_plain_invs [where sz = sz] + retype_region_descendants_range_ret[where sz = sz] + retype_region_caps_overlap_reserved_ret[where sz = sz] + retype_region_cte_at_other[where sz = sz] + retype_region_distinct_sets[where sz = sz] + retype_region_ranges[where p=cref and sz = sz] + retype_ret_valid_caps [where sz = sz] + retype_region_arch_objs [where sza = "\_. sz"] + hoare_vcg_const_Ball_lift + set_tuple_pick distinct_tuple_helper + retype_region_obj_at_other3[where sz = sz] + | assumption)+)[1] + apply (wp set_tuple_pick createNewCaps_cte_wp_at'[where sz= sz] + hoare_vcg_ex_lift distinct_tuple_helper + createNewCaps_parent_helper [where p="cte_map cref" and sz = sz] + createNewCaps_valid_pspace_extras [where ptr=ptr and sz = sz] + createNewCaps_ranges'[where sz = sz] + hoare_vcg_const_Ball_lift createNewCaps_valid_cap'[where sz = sz] + createNewCaps_descendants_range_ret'[where sz = sz] + createNewCaps_caps_overlap_reserved_ret'[where sz = sz]) + apply clarsimp + apply (erule cte_wp_at_weakenE') + apply (case_tac c, simp) + apply hypsubst + apply (case_tac c,clarsimp simp:isCap_simps) + apply (clarsimp simp: getFreeIndex_def is_cap_simps bits_of_def shiftL_nat) + apply (clarsimp simp:conj_comms) + apply (strengthen invs_mdb invs_valid_objs + invs_valid_pspace invs_arch_state invs_psp_aligned + caps_region_kernel_window_imp[where p=cref] + invs_cap_refs_in_kernel_window)+ + apply (clarsimp simp:conj_comms bits_of_def) + apply (wp set_cap_free_index_invs_spec set_cap_caps_no_overlap set_cap_no_overlap) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF set_cap_sets]) + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (wp set_cap_no_overlap hoare_vcg_ball_lift + set_cap_free_index_invs_spec + set_cap_descendants_range_in + set_untyped_cap_caps_overlap_reserved[where + idx="if reset then 0 else idx"] + set_cap_cte_wp_at + | strengthen exI[where x=cref])+ + apply (clarsimp simp:conj_comms ball_conj_distrib simp del:capFreeIndex_update.simps) + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_valid_pspace' invs_arch_state' + imp_consequent[where Q = "(\x. x \ cte_map ` set slots)"] + | clarsimp simp: conj_comms simp del: capFreeIndex_update.simps)+ + apply ((wp updateFreeIndex_forward_invs' updateFreeIndex_caps_overlap_reserved + updateFreeIndex_caps_no_overlap'' updateFreeIndex_pspace_no_overlap' + hoare_vcg_const_Ball_lift updateFreeIndex_cte_wp_at + updateFreeIndex_descendants_range_in')+)[1] + apply clarsimp + apply (clarsimp simp:conj_comms) + apply (strengthen invs_mdb invs_valid_objs + invs_valid_pspace invs_arch_state invs_psp_aligned + invs_distinct) + apply (clarsimp simp:conj_comms ball_conj_distrib ex_in_conv) + apply ((rule validE_R_validE)?, + rule_tac Q'="\_ s. valid_etcbs s \ valid_list s \ invs s \ ct_active s + \ valid_untyped_inv_wcap ui + (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s + \ (reset \ pspace_no_overlap {ptr && ~~ mask sz..(ptr && ~~ mask sz) + 2 ^ sz - 1} s) + " in hoare_strengthen_postE_R) + apply (simp add: whenE_def, wp) + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc, auto)[1] + apply wp + apply (clarsimp simp: ui cte_wp_at_caps_of_state + bits_of_def untyped_range.simps) + apply (frule(1) valid_global_refsD2[OF _ invs_valid_global_refs]) + apply (cut_tac cref="cref" and reset=reset + in invoke_untyped_proofs.intro, + simp_all add: cte_wp_at_caps_of_state)[1] + apply (rule conjI, (assumption | rule refl))+ + apply (simp split: if_split) + + apply (simp add: invoke_untyped_proofs.simps) + apply (strengthen if_split[where P="\v. v \ unat x" for x, THEN iffD2] + exI[where x=cref]) + apply (simp add: arg_cong[OF mask_out_sub_mask, where f="\y. x - y" for x] + field_simps invoke_untyped_proofs.idx_le_new_offs + if_split[where P="\v. v \ unat x" for x]) + apply (frule range_cover.sz(1), fold word_bits_def) + apply (frule cte_wp_at_pspace_no_overlapI, + simp add: cte_wp_at_caps_of_state, simp split: if_split, + simp add: invoke_untyped_proofs.szw) + apply (simp add: field_simps conj_comms ex_in_conv + cte_wp_at_caps_of_state + in_get_cap_cte_wp_at + atLeastatMost_subset_iff[where b=x and d=x for x] + word_and_le2) + apply (intro conjI impI) + + (* offs *) + apply (drule(1) invoke_untyped_proofs.idx_le_new_offs) + apply simp + + (* usable untyped range *) + apply (simp add: shiftL_nat shiftl_t2n overlap_ranges) + + apply (rule order_trans, erule invoke_untyped_proofs.subset_stuff) + apply (simp add: blah word_and_le2) + + apply (drule invoke_untyped_proofs.usable_range_disjoint) + apply (clarsimp simp: field_simps mask_out_sub_mask shiftl_t2n) + + apply ((rule validE_validE_R)?, rule hoare_strengthen_postE, + rule whenE_reset_resetUntypedCap_invs_etc[where ptr="ptr && ~~ mask sz" + and ptr'=ptr and sz=sz and idx=idx and ui=ui' and dev=dev]) + + prefer 2 + apply simp + apply clarsimp + apply (simp only: ui') + apply (frule(2) invokeUntyped_proofs.intro) + apply (clarsimp simp: cte_wp_at_ctes_of + invokeUntyped_proofs.caps_no_overlap' + invokeUntyped_proofs.ps_no_overlap' + invokeUntyped_proofs.descendants_range + if_split[where P="\v. v \ getFreeIndex x y" for x y] + empty_descendants_range_in' + invs_pspace_aligned' invs_pspace_distinct' + invs_ksCurDomain_maxDomain' + cong: if_cong) + apply (strengthen refl) + apply (frule invokeUntyped_proofs.idx_le_new_offs) + apply (frule invokeUntyped_proofs.szw) + apply (frule invokeUntyped_proofs.descendants_range(2), simp) + apply (clarsimp simp: getFreeIndex_def conj_comms shiftL_nat + is_aligned_weaken[OF range_cover.funky_aligned] + invs_valid_pspace' isCap_simps + arg_cong[OF mask_out_sub_mask, where f="\y. x - y" for x] + field_simps) + + apply (intro conjI) + (* pspace_no_overlap' *) + apply (cases reset, simp_all)[1] + apply (rule order_trans[rotated], + erule invokeUntyped_proofs.idx_compare') + apply (simp add: shiftl_t2n mult.commute) + apply (drule invokeUntyped_proofs.subset_stuff, simp, + erule order_trans, simp add: blah word_and_le2 add_mask_fold) + apply (auto simp: add_mask_fold split: if_split)[1] + apply (drule invokeUntyped_proofs.usableRange_disjoint, simp) + apply (clarsimp simp only: pred_conj_def invs ui) + apply (strengthen vui) + apply (cut_tac vui invs invs') + apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs schact_is_rct_def) + apply (cut_tac vui' invs') + apply (clarsimp simp: ui cte_wp_at_ctes_of if_apply_def2 ui') + done +qed + +lemmas inv_untyped_corres = inv_untyped_corres' + +crunches insertNewCap, doMachineOp + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: crunch_wps) + +lemma sts_valid_untyped_inv': + "\valid_untyped_inv' ui\ setThreadState st t \\rv. valid_untyped_inv' ui\" + apply (cases ui, simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF setThreadState_ksInterruptState]) + apply (wp hoare_vcg_const_Ball_lift hoare_vcg_ex_lift | simp)+ + done + +crunch nosch[wp]: invokeUntyped "\s. P (ksSchedulerAction s)" + (simp: crunch_simps zipWithM_x_mapM + wp: crunch_wps unless_wp mapME_x_inv_wp preemptionPoint_inv) + +crunch no_0_obj'[wp]: insertNewCap no_0_obj' + (wp: crunch_wps) + +lemma insertNewCap_valid_pspace': + "\\s. valid_pspace' s \ s \' cap + \ slot \ parent \ caps_overlap_reserved' (untypedRange cap) s + \ cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) parent s + \ \ isZombie cap \ descendants_range' cap parent (ctes_of s)\ + insertNewCap parent slot cap + \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp insertNewCap_valid_mdb) + apply simp_all + done + +crunches insertNewCap + for tcb'[wp]: "tcb_at' t" + and inQ[wp]: "obj_at' (inQ d p) t" + and norqL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and norqL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + and idle'[wp]: "valid_idle'" + and global_refs': "\s. P (global_refs' s)" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + and irq_states' [wp]: valid_irq_states' + and irqs_masked' [wp]: irqs_masked' + and valid_machine_state'[wp]: valid_machine_state' + and pspace_domain_valid[wp]: pspace_domain_valid + and ct_not_inQ[wp]: "ct_not_inQ" + and tcbState_inv[wp]: "obj_at' (\tcb. P (tcbState tcb)) t" + and tcbDomain_inv[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t" + and tcbPriority_inv[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t" + and sched_queues_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and tcbQueueds_of[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps) + +crunch if_unsafe_then_cap'[wp]: updateNewFreeIndex "if_unsafe_then_cap'" + +lemma insertNewCap_ifunsafe'[wp]: + "\if_unsafe_then_cap' and ex_cte_cap_to' slot\ + insertNewCap parent slot cap + \\rv s. if_unsafe_then_cap' s\" + apply (simp add: insertNewCap_def) + apply (rule hoare_pre) + apply (wp getCTE_wp' | clarsimp simp: ifunsafe'_def3)+ + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of cteCaps_of_def) + apply (drule_tac x=cref in spec) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=crefa in exI, fastforce) + apply clarsimp + apply (rule_tac x=cref' in exI, fastforce) + done + +crunch if_live_then_nonz_cap'[wp]: updateNewFreeIndex "if_live_then_nonz_cap'" + +lemma insertNewCap_iflive'[wp]: + "\if_live_then_nonz_cap'\ insertNewCap parent slot cap \\rv. if_live_then_nonz_cap'\" + apply (simp add: insertNewCap_def) + apply (wp setCTE_iflive' getCTE_wp') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +lemma insertNewCap_cte_wp_at'': + "\cte_wp_at' (\cte. P (cteCap cte)) p and K (\ P NullCap)\ + insertNewCap parent slot cap + \\rv s. cte_wp_at' (P \ cteCap) p s\" + apply (simp add: insertNewCap_def tree_cte_cteCap_eq) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemmas insertNewCap_cte_wp_at' = insertNewCap_cte_wp_at''[unfolded o_def] + +lemma insertNewCap_cap_to'[wp]: + "\ex_cte_cap_to' p\ insertNewCap parent slot cap \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node'[OF insertNewCap_ksInterrupt]) + apply (wp hoare_vcg_ex_lift insertNewCap_cte_wp_at') + apply clarsimp + done + +lemma insertNewCap_nullcap: + "\P and cte_wp_at' (\cte. cteCap cte = NullCap) slot\ insertNewCap parent slot cap \Q\ + \ \P\ insertNewCap parent slot cap \Q\" + apply (clarsimp simp: valid_def) + apply (subgoal_tac "cte_wp_at' (\cte. cteCap cte = NullCap) slot s") + apply fastforce + apply (clarsimp simp: insertNewCap_def in_monad cte_wp_at_ctes_of liftM_def + dest!: use_valid [OF _ getCTE_sp[where P="(=) s" for s], OF _ refl]) + done + +lemma insertNewCap_valid_global_refs': + "\valid_global_refs' and + cte_wp_at' (\cte. capRange cap \ capRange (cteCap cte) + \ capBits cap \ capBits (cteCap cte)) parent\ + insertNewCap parent slot cap + \\rv. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=global_refs', OF insertNewCap_global_refs']) + apply (rule hoare_use_eq [where f=gsMaxObjectSize]) + apply wp+ + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def ball_ran_eq) + apply (frule power_increasing[where a=2], simp) + apply (blast intro: order_trans) + done + +lemma insertNewCap_valid_irq_handlers: + "\valid_irq_handlers' and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + insertNewCap parent slot cap + \\rv. valid_irq_handlers'\" + apply (simp add: insertNewCap_def valid_irq_handlers'_def irq_issued'_def) + apply (wp | wp (once) hoare_use_eq[where f=ksInterruptState, OF updateNewFreeIndex_ksInterrupt])+ + apply (simp add: cteCaps_of_def) + apply (wp | wp (once) hoare_use_eq[where f=ksInterruptState, OF setCTE_ksInterruptState] + getCTE_wp)+ + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of ran_def) + apply auto + done + +lemma insertNewCap_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain' and ct_active'\ insertNewCap parent slot cap \\_. ct_idle_or_in_cur_domain'\" +apply (wp ct_idle_or_in_cur_domain'_lift_futz[where Q=\]) +apply (rule_tac Q="\_. obj_at' (\tcb. tcbState tcb \ Structures_H.thread_state.Inactive) t and obj_at' (\tcb. d = tcbDomain tcb) t" + in hoare_strengthen_post) +apply (wp | clarsimp elim: obj_at'_weakenE)+ +apply (auto simp: obj_at'_def) +done + +crunch ksDomScheduleIdx[wp]: insertNewCap "\s. P (ksDomScheduleIdx s)" + (wp: crunch_simps hoare_drop_imps) + +lemma capRange_subset_capBits: + "capAligned cap \ capAligned cap' + \ capRange cap \ capRange cap' + \ capRange cap \ {} + \ capBits cap \ capBits cap'" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (simp add: capRange_def capAligned_def is_aligned_no_overflow + split: if_split_asm del: atLeastatMost_subset_iff) + apply (frule_tac c="capUntypedPtr cap" in subsetD) + apply (simp only: mask_in_range[symmetric]) + apply (simp add: is_aligned_neg_mask_eq) + apply (drule_tac c="(capUntypedPtr cap && ~~ mask (capBits cap)) + || (~~ capUntypedPtr cap' && mask (capBits cap))" in subsetD) + apply (simp_all only: mask_in_range[symmetric]) + apply (simp add: word_ao_dist is_aligned_neg_mask_eq) + apply (simp add: word_ao_dist) + apply (cases "capBits cap = 0") + apply simp + apply (drule_tac f="\x. x !! (capBits cap - 1)" + and x="a || b" for a b in arg_cong) + apply (simp add: word_ops_nth_size word_bits_def word_size) + apply auto + done + +lemma insertNewCap_urz[wp]: + "\untyped_ranges_zero' and valid_objs' and valid_mdb'\ + insertNewCap parent slot cap \\rv. untyped_ranges_zero'\" + apply (simp add: insertNewCap_def updateNewFreeIndex_def) + apply (wp getCTE_cteCap_wp + | simp add: updateTrackedFreeIndex_def getSlotCap_def case_eq_if_isUntypedCap + split: option.split split del: if_split + | wps | wp (once) getCTE_wp')+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (strengthen untyped_ranges_zero_fun_upd[mk_strg I E]) + apply (intro conjI impI; clarsimp simp: isCap_simps) + apply (auto simp add: cteCaps_of_def untypedZeroRange_def isCap_simps) + done + +crunches insertNewCap + for valid_arch'[wp]: valid_arch_state' + (wp: crunch_wps) + +lemma insertNewCap_invs': + "\invs' and ct_active' + and valid_cap' cap + and cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) parent + and K (\ isZombie cap) and (\s. descendants_range' cap parent (ctes_of s)) + and caps_overlap_reserved' (untypedRange cap) + and ex_cte_cap_to' slot + and (\s. ksIdleThread s \ capRange cap) + and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + insertNewCap parent slot cap + \\rv. invs'\" + apply (rule insertNewCap_nullcap) + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp insertNewCap_valid_pspace' sch_act_wf_lift + cur_tcb_lift tcb_in_cur_domain'_lift valid_bitmaps_lift + insertNewCap_valid_global_refs' sym_heap_sched_pointers_lift + valid_irq_node_lift insertNewCap_valid_irq_handlers) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid[rotated, where p=parent, OF valid_pspace_valid_objs']) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (auto simp: isCap_simps sameRegionAs_def3 + intro!: capRange_subset_capBits + elim: valid_capAligned) + done + +lemma insertNewCap_irq_issued'[wp]: + "\\s. P (irq_issued' irq s)\ insertNewCap parent slot cap \\rv s. P (irq_issued' irq s)\" + by (simp add: irq_issued'_def, wp) + +lemma insertNewCap_ct_in_state'[wp]: + "\ct_in_state' p\insertNewCap parent slot cap \\rv. ct_in_state' p\" + unfolding ct_in_state'_def + apply (rule hoare_pre) + apply wps + apply wp + apply simp + done + +lemma zipWithM_x_insertNewCap_invs'': + "\\s. invs' s \ ct_active' s \ (\tup \ set ls. s \' snd tup) + \ cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + (\tup \ set ls. sameRegionAs (cteCap cte) (snd tup))) parent s + \ (\tup \ set ls. \ isZombie (snd tup)) + \ (\tup \ set ls. ex_cte_cap_to' (fst tup) s) + \ (\tup \ set ls. descendants_range' (snd tup) parent (ctes_of s)) + \ (\tup \ set ls. ksIdleThread s \ capRange (snd tup)) + \ (\tup \ set ls. caps_overlap_reserved' (capRange (snd tup)) s) + \ distinct_sets (map capRange (map snd ls)) + \ (\irq. IRQHandlerCap irq \ set (map snd ls) \ irq_issued' irq s) + \ distinct (map fst ls)\ + mapM (\(x, y). insertNewCap parent x y) ls + \\rv. invs'\" + apply (induct ls) + apply (simp add: mapM_def sequence_def) + apply (wp, simp) + apply (simp add: mapM_Cons) + including no_pre apply wp + apply (thin_tac "valid P f Q" for P f Q) + apply clarsimp + apply (rule hoare_pre) + apply (wp insertNewCap_invs' + hoare_vcg_const_Ball_lift + insertNewCap_cte_wp_at' insertNewCap_ranges + hoare_vcg_all_lift insertNewCap_pred_tcb_at')+ + apply (clarsimp simp: cte_wp_at_ctes_of invs_mdb' invs_valid_objs' dest!:valid_capAligned) + apply (drule caps_overlap_reserved'_subseteq[OF _ untypedRange_in_capRange]) + apply (auto simp:comp_def) + done + +lemma createNewCaps_not_isZombie[wp]: + "\\\ createNewCaps ty ptr bits sz d \\rv s. (\cap \ set rv. \ isZombie cap)\" + apply (simp add: createNewCaps_def toAPIType_def + cong: option.case_cong if_cong apiobject_type.case_cong) + apply (wpsimp wp: undefined_valid simp: isCap_simps) + done + +lemma createNewCaps_cap_to': + "\\s. ex_cte_cap_to' p s \ 0 < n + \ range_cover ptr sz (APIType_capBits ty us) n + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us d + \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def) + apply (wp hoare_vcg_ex_lift + hoare_use_eq_irq_node' [OF createNewCaps_ksInterrupt + createNewCaps_cte_wp_at']) + apply fastforce + done + +lemma createNewCaps_idlethread_ranges[wp]: + "\\s. 0 < n \ range_cover ptr sz (APIType_capBits tp us) n + \ ksIdleThread s \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}\ + createNewCaps tp ptr n us d + \\rv s. \cap\set rv. ksIdleThread s \ capRange cap\" + apply (rule hoare_as_subst [OF createNewCaps_it]) + apply (rule hoare_assume_pre) + apply (rule hoare_chain, rule createNewCaps_range_helper2) + apply fastforce + apply blast + done + +lemma createNewCaps_IRQHandler[wp]: + "\\\ + createNewCaps tp ptr sz us d + \\rv s. IRQHandlerCap irq \ set rv \ P rv s\" + apply (simp add: createNewCaps_def split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc | simp add: image_def | rule hoare_pre_cont)+ + done + +lemma createNewCaps_ct_active': + "\ct_active' and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n)\ + createNewCaps ty ptr n us d + \\_. ct_active'\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply wps + apply (wp createNewCaps_pred_tcb_at'[where sz=sz]) + apply simp + done + +crunch gsMaxObjectSize[wp]: deleteObjects "\s. P (gsMaxObjectSize s)" + (simp: unless_def wp: crunch_wps) + +crunch gsMaxObjectSize[wp]: updateFreeIndex "\s. P (gsMaxObjectSize s)" + +crunch ksIdleThread[wp]: updateFreeIndex "\s. P (ksIdleThread s)" + +lemma invokeUntyped_invs'': + assumes insertNew_Q[wp]: "\p cref cap. + \Q\ insertNewCap p cref cap \\_. Q\" + assumes createNew_Q: "\tp ptr n us sz dev. \\s. Q s + \ range_cover ptr sz (APIType_capBits tp us) n + \ (tp = APIObjectType ArchTypes_H.apiobject_type.CapTableObject \ 0 < us) + \ 0 < n \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createNewCaps tp ptr n us dev \\_. Q\" + assumes set_free_Q[wp]: "\slot idx. \invs' and Q\ updateFreeIndex slot idx \\_.Q\" + assumes reset_Q: "\Q'\ resetUntypedCap (case ui of Invocations_H.Retype src_slot _ _ _ _ _ _ _ \ src_slot) \\_. Q\" + shows "\invs' and valid_untyped_inv' ui + and (\s. (case ui of Invocations_H.Retype _ reset _ _ _ _ _ _ \ reset) \ Q' s) + and Q and ct_active'\ + invokeUntyped ui + \\rv. invs' and Q\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp only: pred_conj_def valid_untyped_inv_wcap') + proof - + fix s sz idx + assume vui1: "valid_untyped_inv_wcap' ui + (Some (case ui of + Invocations_H.untyped_invocation.Retype slot reset ptr_base ptr ty us slots d \ + capability.UntypedCap d (ptr && ~~ mask sz) sz idx)) s" + assume misc: "invs' s" "Q s" "ct_active' s" + "(case ui of + Invocations_H.untyped_invocation.Retype x reset _ _ _ _ _ _ \ reset) \ + Q' s" + + obtain cref reset ptr tp us slots dev + where pf: "invokeUntyped_proofs s cref reset (ptr && ~~ mask sz) ptr tp us slots sz idx dev" + and ui: "ui = Invocations_H.Retype cref reset (ptr && ~~ mask sz) ptr tp us slots dev" + using vui1 misc + apply (cases ui, simp only: Invocations_H.untyped_invocation.simps) + apply (frule(2) invokeUntyped_proofs.intro) + apply clarsimp + apply (unfold cte_wp_at_ctes_of) + apply (drule meta_mp; clarsimp) + done + + note vui = vui1[simplified ui Invocations_H.untyped_invocation.simps] + + have cover: "range_cover ptr sz (APIType_capBits tp us) (length slots)" + and slots: "cref \ set slots" "distinct slots" "slots \ []" + and tps: "tp = APIObjectType ArchTypes_H.apiobject_type.CapTableObject \ 0 < us" + "tp = APIObjectType ArchTypes_H.apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits" + using vui + by (clarsimp simp: ui cte_wp_at_ctes_of)+ + + note not_0_ptr[simp] = invokeUntyped_proofs.not_0_ptr [OF pf] + note subset_stuff[simp] = invokeUntyped_proofs.subset_stuff[OF pf] + + have non_detype_idx_le[simp]: "~ reset \ idx < 2^sz" + using vui ui + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule le_less_trans) + apply (rule unat_less_helper) + apply simp + apply (rule le_less_trans) + apply (rule word_and_le1) + apply (simp add:mask_def) + apply (rule word_leq_le_minus_one) + apply simp + apply (clarsimp simp:range_cover_def) + done + + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff Int_atLeastAtMost + atLeastatMost_empty_iff split_paired_Ex usableUntypedRange.simps + note descendants_range[simp] = invokeUntyped_proofs.descendants_range[OF pf] + note vc'[simp] = invokeUntyped_proofs.vc'[OF pf] + note ps_no_overlap'[simp] = invokeUntyped_proofs.ps_no_overlap'[OF pf] + note caps_no_overlap'[simp] = invokeUntyped_proofs.caps_no_overlap'[OF pf] + note ex_cte_no_overlap' = invokeUntyped_proofs.ex_cte_no_overlap'[OF pf] + note cref_inv = invokeUntyped_proofs.cref_inv[OF pf] + note slots_invD = invokeUntyped_proofs.slots_invD[OF pf] + note nidx[simp] = add_minus_neg_mask[where ptr = ptr] + note idx_compare' = invokeUntyped_proofs.idx_compare'[OF pf] + note ptr_cn[simp] = invokeUntyped_proofs.ptr_cn[OF pf] + note sz_limit[simp] = invokeUntyped_proofs.sz_limit[OF pf] + + have valid_global_refs': "valid_global_refs' s" + using misc by auto + + have mapM_insertNewCap_Q: + "\caps. \Q\ mapM (\(x, y). insertNewCap cref x y) (zip slots caps) \\rv. Q\" + by (wp mapM_wp' | clarsimp)+ + + note reset_Q' = reset_Q[simplified ui, simplified] + + note neg_mask_add_mask = word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr,symmetric] + note msimp[simp add] = misc neg_mask_add_mask + show "\(=) s\ invokeUntyped ui \\rv s. invs' s \ Q s\" + including no_pre + apply (clarsimp simp:invokeUntyped_def getSlotCap_def ui) + apply (rule validE_valid) + apply (rule hoare_pre) + apply (rule_tac Q'="\_ s. invs' s \ Q s \ ct_active' s + \ valid_untyped_inv_wcap' ui + (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s + \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s)" + in bindE_wp_fwd) + apply (simp only: whenE_def) + apply wp + apply (rule hoare_strengthen_postE, rule combine_validE, + rule resetUntypedCap_invs_etc, rule valid_validE, rule reset_Q') + apply (clarsimp simp only: if_True) + apply auto[1] + apply simp + apply wp[1] + prefer 2 + apply (cut_tac vui1 misc) + apply (clarsimp simp: ui cte_wp_at_ctes_of simp del: misc) + apply auto[1] + apply (rule hoare_pre) + apply (wp createNewObjects_wp_helper[where sz = sz]) + apply (simp add: slots)+ + apply (rule cover) + apply (simp add: slots)+ + apply (clarsimp simp:insertNewCaps_def) + apply (wp zipWithM_x_insertNewCap_invs'' + set_tuple_pick distinct_tuple_helper + hoare_vcg_const_Ball_lift + createNewCaps_invs'[where sz = sz] + createNewCaps_valid_cap[where sz = sz,OF cover] + createNewCaps_parent_helper[where sz = sz] + createNewCaps_cap_to'[where sz = sz] + createNewCaps_descendants_range_ret'[where sz = sz] + createNewCaps_caps_overlap_reserved_ret'[where sz = sz] + createNewCaps_ranges[where sz = sz] + createNewCaps_ranges'[where sz = sz] + createNewCaps_IRQHandler + createNewCaps_ct_active'[where sz=sz] + mapM_insertNewCap_Q + | simp add: zipWithM_x_mapM slots tps)+ + apply (wp hoare_vcg_all_lift) + apply (wp hoare_strengthen_post[OF createNewCaps_IRQHandler]) + apply (intro impI) + apply (erule impE) + apply (erule(1) snd_set_zip_in_set) + apply (simp add: conj_comms, wp createNew_Q[where sz=sz]) + apply (wp hoare_strengthen_post[OF createNewCaps_range_helper[where sz = sz]]) + apply (clarsimp simp: slots) + apply (clarsimp simp:conj_comms ball_conj_distrib pred_conj_def + simp del:capFreeIndex_update.simps) + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_valid_pspace' invs_arch_state' + imp_consequent[where Q = "(\x. x \ set slots)"] + | clarsimp simp: conj_comms simp del: capFreeIndex_update.simps)+ + apply (wp updateFreeIndex_forward_invs' updateFreeIndex_caps_overlap_reserved + updateFreeIndex_caps_no_overlap'' updateFreeIndex_pspace_no_overlap' + hoare_vcg_const_Ball_lift + updateFreeIndex_cte_wp_at + updateCap_cte_cap_wp_to') + apply (wp updateFreeIndex_caps_overlap_reserved + updateFreeIndex_descendants_range_in' getCTE_wp | simp)+ + apply (clarsimp simp only: ui) + apply (frule(2) invokeUntyped_proofs.intro) + apply (frule invokeUntyped_proofs.idx_le_new_offs) + apply (frule invokeUntyped_proofs.szw) + apply (frule invokeUntyped_proofs.descendants_range(2), simp) + apply (frule invokeUntyped_proofs.idx_compare') + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps getFreeIndex_def + shiftL_nat shiftl_t2n mult.commute + if_split[where P="\x. x \ unat v" for v] + invs_valid_pspace' invs_ksCurDomain_maxDomain' + invokeUntyped_proofs.caps_no_overlap' + invokeUntyped_proofs.usableRange_disjoint + split del: if_split) + apply (strengthen refl) + apply simp + apply (intro conjI; assumption?) + apply (erule is_aligned_weaken[OF range_cover.funky_aligned]) + apply (simp add: APIType_capBits_def objBits_simps' bit_simps untypedBits_defs + split: object_type.split apiobject_type.split)[1] + apply (cases reset) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: invokeUntyped_proofs.ps_no_overlap') + apply (drule invs_valid_global') + apply (clarsimp simp: valid_global_refs'_def cte_at_valid_cap_sizes_0) + apply (auto)[1] + apply (frule valid_global_refsD', clarsimp) + apply (clarsimp simp: Int_commute) + apply (erule disjoint_subset2[rotated]) + apply (simp add: blah word_and_le2) + apply (rule order_trans, erule invokeUntyped_proofs.subset_stuff) + apply (simp add: blah word_and_le2 add_mask_fold) + apply (frule valid_global_refsD2', clarsimp) + apply (clarsimp simp: global_refs'_def) + apply (erule notE, erule subsetD[rotated], simp add: blah word_and_le2) + done +qed + +lemma invokeUntyped_invs'[wp]: + "\invs' and valid_untyped_inv' ui and ct_active'\ + invokeUntyped ui + \\rv. invs'\" + apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_TrueI, simplified]) + apply auto + done + +crunch pred_tcb_at'[wp]: updateFreeIndex "pred_tcb_at' pr P p" + +lemma resetUntypedCap_st_tcb_at': + "\invs' and st_tcb_at' (P and ((\) Inactive) and ((\) IdleThreadState)) t + and cte_wp_at' (\cp. isUntypedCap (cteCap cp)) slot + and ct_active' and sch_act_simple and (\s. descendants_of' slot (ctes_of s) = {})\ + resetUntypedCap slot + \\_. st_tcb_at' P t\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (simp add: resetUntypedCap_def) + apply (rule hoare_pre) + apply (wp mapME_x_inv_wp preemptionPoint_inv + deleteObjects_st_tcb_at'[where p=slot] getSlotCap_wp + | simp add: unless_def + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (strengthen refl) + apply (rule exI, strengthen refl) + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def empty_descendants_range_in' + descendants_range'_def2 + elim!: pred_tcb'_weakenE) + done + +lemma inv_untyp_st_tcb_at'[wp]: + "\invs' and st_tcb_at' (P and ((\) Inactive) and ((\) IdleThreadState)) tptr + and valid_untyped_inv' ui and ct_active'\ + invokeUntyped ui + \\rv. st_tcb_at' P tptr\" + apply (rule hoare_pre) + apply (rule hoare_strengthen_post) + apply (rule invokeUntyped_invs''[where Q="st_tcb_at' P tptr"]; + wp createNewCaps_pred_tcb_at') + apply (auto simp: valid_pspace'_def)[1] + apply (wp resetUntypedCap_st_tcb_at' | simp)+ + apply (cases ui, clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (clarsimp elim!: pred_tcb'_weakenE) + done + +lemma inv_untyp_tcb'[wp]: + "\invs' and st_tcb_at' active' tptr + and valid_untyped_inv' ui and ct_active'\ + invokeUntyped ui + \\rv. tcb_at' tptr\" + apply (rule hoare_chain [OF inv_untyp_st_tcb_at'[where tptr=tptr and P="\"]]) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply fastforce + apply (clarsimp simp: pred_tcb_at'_def) + done + +crunch ksInterruptState_eq[wp]: invokeUntyped "\s. P (ksInterruptState s)" + (wp: crunch_wps mapME_x_inv_wp preemptionPoint_inv + simp: crunch_simps unless_def) + +crunches deleteObjects, updateFreeIndex + for valid_irq_states'[wp]: "valid_irq_states'" + (wp: doMachineOp_irq_states' crunch_wps + simp: freeMemory_def no_irq_storeWord unless_def) + +lemma resetUntypedCap_IRQInactive: + "\valid_irq_states'\ + resetUntypedCap slot + \\_ _. True\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + (is "\?P\ resetUntypedCap slot \?Q\,\?E\") + apply (simp add: resetUntypedCap_def) + apply (rule hoare_pre) + apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_strengthen_postE] + doMachineOp_irq_states' preemptionPoint_inv hoare_drop_imps + | simp add: no_irq_clearMemory if_apply_def2)+ + done + +lemma inv_untyped_IRQInactive: + "\valid_irq_states'\ + invokeUntyped ui + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + unfolding invokeUntyped_def + by (wpsimp wp: whenE_wp resetUntypedCap_IRQInactive) + +end +end diff --git a/proof/refine/AARCH64/VSpace_R.thy b/proof/refine/AARCH64/VSpace_R.thy new file mode 100644 index 0000000000..731f4f6de6 --- /dev/null +++ b/proof/refine/AARCH64/VSpace_R.thy @@ -0,0 +1,3049 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + AARCH64 VSpace refinement +*) + +theory VSpace_R +imports TcbAcc_R +begin + +lemma cteCaps_of_ctes_of_lift: + "(\P. \\s. P (ctes_of s)\ f \\_ s. P (ctes_of s)\) \ \\s. P (cteCaps_of s) \ f \\_ s. P (cteCaps_of s)\" + unfolding cteCaps_of_def . + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "vspace_at_asid' vs asid \ \s. \ap pool entry. + armKSASIDTable (ksArchState s) (ucast (asid_high_bits_of (ucast asid))) = Some ap \ + ko_at' (ASIDPool pool) ap s \ + pool (ucast (asid_low_bits_of (ucast asid))) = Some entry \ + apVSpace entry = vs \ + page_table_at' VSRootPT_T vs s \ + gsPTTypes (ksArchState s) vs = Some VSRootPT_T" + +lemma findVSpaceForASID_vs_at_wp: + "\\s. \pm. asid \ 0 \ asid_wf asid \ vspace_at_asid' pm asid s \ P pm s\ + findVSpaceForASID asid + \P\,-" + unfolding findVSpaceForASID_def + apply (wpsimp wp: getASID_wp simp: checkPTAt_def getASIDPoolEntry_def getPoolPtr_def) + apply (fastforce simp: asid_low_bits_of_def ucast_ucast_a is_down ucast_ucast_mask + asid_low_bits_def asidRange_def mask_2pm1[symmetric] + le_mask_asidBits_asid_wf vspace_at_asid'_def page_table_at'_def) + done + +crunches findVSpaceForASID, haskell_fail + for inv[wp]: "P" + (simp: const_def crunch_simps wp: loadObject_default_inv crunch_wps ignore_del: getObject) + +lemma asidBits_asid_bits[simp]: + "asidBits = asid_bits" + by (simp add: bit_simps' asid_bits_def asidBits_def) + +(* FIXME AARCH64: Added to crunch_param_rules in Crunch_Instances_NonDet as + trans[OF liftE_bindE return_bind]; move to monad equations instead and give it the name below *) +lemma liftE_return_bindE: + "liftE (return x) >>=E f = f x" + by (rule Crunch.crunch_param_rules(8)) + +crunches getIRQState + for inv[wp]: P + +lemma dmo_invs_no_cicd_lift': (* FIXME AARCH64: move up *) + assumes "\P. f \\s. P (irq_masks s)\" + assumes "\P p. f \\s. P (underlying_memory s p)\" + shows "doMachineOp f \all_invs_but_ct_idle_or_in_cur_domain'\" + apply (wp dmo_invs_no_cicd' assms) + apply clarsimp + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" in use_valid, + rule assms, rule refl) + apply simp + done + +lemma dmo_invs_lift': (* FIXME AARCH64: move up *) + assumes "\P. f \\s. P (irq_masks s)\" + assumes "\P p. f \\s. P (underlying_memory s p)\" + shows "doMachineOp f \invs'\" + apply (wp dmo_invs' assms) + apply clarsimp + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" in use_valid, + rule assms, rule refl) + apply simp + done + +lemma dmos_invs_no_cicd'[wp]: + "doMachineOp isb \invs_no_cicd'\" + "doMachineOp dsb \invs_no_cicd'\" + "\w. doMachineOp (setSCTLR w) \invs_no_cicd'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_hcr w) \invs_no_cicd'\" + "\w x. doMachineOp (set_gic_vcpu_ctrl_lr w x) \invs_no_cicd'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_apr w) \invs_no_cicd'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_vmcr w) \invs_no_cicd'\" + "\w. doMachineOp (setHCR w) \invs_no_cicd'\" + "doMachineOp get_gic_vcpu_ctrl_hcr \invs_no_cicd'\" + "\w. doMachineOp (get_gic_vcpu_ctrl_lr w) \invs_no_cicd'\" + "doMachineOp get_gic_vcpu_ctrl_apr \invs_no_cicd'\" + "doMachineOp get_gic_vcpu_ctrl_vmcr \invs_no_cicd'\" + "doMachineOp enableFpuEL01 \invs_no_cicd'\" + "\r. doMachineOp (readVCPUHardwareReg r) \invs_no_cicd'\" + "\r v. doMachineOp (writeVCPUHardwareReg r v) \invs_no_cicd'\" + "doMachineOp check_export_arch_timer \invs_no_cicd'\" + by (wp dmo_invs_no_cicd_lift')+ + +lemma dmos_invs'[wp]: + "doMachineOp isb \invs'\" + "doMachineOp dsb \invs'\" + "\w. doMachineOp (setSCTLR w) \invs'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_hcr w) \invs'\" + "\w x. doMachineOp (set_gic_vcpu_ctrl_lr w x) \invs'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_apr w) \invs'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_vmcr w) \invs'\" + "\w. doMachineOp (setHCR w) \invs'\" + "doMachineOp get_gic_vcpu_ctrl_hcr \invs'\" + "\w. doMachineOp (get_gic_vcpu_ctrl_lr w) \invs'\" + "doMachineOp get_gic_vcpu_ctrl_apr \invs'\" + "doMachineOp get_gic_vcpu_ctrl_vmcr \invs'\" + "doMachineOp enableFpuEL01 \invs'\" + "\r. doMachineOp (readVCPUHardwareReg r) \invs'\" + "\r v. doMachineOp (writeVCPUHardwareReg r v) \invs'\" + "doMachineOp check_export_arch_timer \invs'\" + by (wp dmo_invs_lift')+ + +lemma valid_irq_node_lift_asm: + assumes x: "\P. \\s. P (irq_node' s)\ f \\rv s. P (irq_node' s)\" + assumes y: "\p. \real_cte_at' p and Q\ f \\rv. real_cte_at' p\" + shows "\\s. valid_irq_node' (irq_node' s) s \ Q s\ f \\rv s. valid_irq_node' (irq_node' s) s\" + apply (simp add: valid_irq_node'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF x]) + apply (wp hoare_vcg_all_lift y) + apply simp + done + +lemma isIRQActive_corres: + "corres (=) \ \ (is_irq_active irqVTimerEvent) (isIRQActive irqVTimerEvent)" + apply (clarsimp simp: isIRQActive_def getIRQState_def is_irq_active_def get_irq_state_def) + apply (clarsimp simp: is_irq_active_def isIRQActive_def + get_irq_state_def irq_state_relation_def + getIRQState_def getInterruptState_def + state_relation_def interrupt_state_relation_def) + apply (fastforce split: irq_state.splits irqstate.splits) + done + +lemma getIRQState_wp: + "\\s. P (intStateIRQTable (ksInterruptState s) irq) s \ getIRQState irq \\rv s. P rv s\" + unfolding getIRQState_def getInterruptState_def + by (wpsimp simp: comp_def) + +lemma maskInterrupt_irq_states': + "\valid_irq_states' + and (\s. \b \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)\ + doMachineOp (maskInterrupt b irq) + \\rv. valid_irq_states'\" + by (wpsimp wp: dmo_maskInterrupt) + (auto simp add: valid_irq_states_def valid_irq_masks'_def) + +crunch ksIdleThread[wp]: storeWordUser "\s. P (ksIdleThread s)" +crunch ksIdleThread[wp]: asUser "\s. P (ksIdleThread s)" + (wp: crunch_wps simp: crunch_simps) +crunch ksQ[wp]: asUser "\s. P (ksReadyQueues s)" + (wp: crunch_wps simp: crunch_simps) + +lemma maskInterrupt_invs': + "\invs' + and (\s. \b \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)\ + doMachineOp (maskInterrupt b irq) + \\rv. invs'\" + by (wpsimp wp: maskInterrupt_irq_states' dmo_maskInterrupt simp: invs'_def valid_state'_def) + (auto simp: valid_irq_states_def valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma dmo_machine_op_lift_invs'[wp]: + "doMachineOp (machine_op_lift f) \invs'\" + by (wpsimp wp: dmo_invs' simp: machine_op_lift_def in_monad machine_rest_lift_def select_f_def) + +lemma dmo'_gets_wp: + "\\s. Q (f (ksMachineState s)) s\ doMachineOp (gets f) \Q\" + unfolding doMachineOp_def by (wpsimp simp: in_monad) + +lemma maskInterrupt_invs_no_cicd': + "\invs_no_cicd' + and (\s. \b \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)\ + doMachineOp (maskInterrupt b irq) + \\rv. invs_no_cicd'\" + by (wpsimp wp: maskInterrupt_irq_states' dmo_maskInterrupt simp: invs_no_cicd'_def) + (auto simp: valid_irq_states_def valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def) + +(* FIXME AARCH64: this is a big block of VCPU-related lemmas in an attempt to consolidate them; + there may be an opportunity to factor most of these out into a separate theory *) +(* setObject for VCPU invariant preservation *) + +lemma setObject_vcpu_cur_domain[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksCurDomain s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ct[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksCurThread s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_it[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksIdleThread s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_sched[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksSchedulerAction s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_L1[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksReadyQueuesL1Bitmap s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_L2[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksReadyQueuesL2Bitmap s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksInt[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksInterruptState s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksArch[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksArchState s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_gs[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (gsMaxObjectSize s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_maschine_state[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksMachineState s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksDomSchedule[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksDomSchedule s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksDomScheduleIdx[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksDomScheduleIdx s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_gsUntypedZeroRanges[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (gsUntypedZeroRanges s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + + +crunches vcpuEnable, vcpuSave, vcpuDisable, vcpuRestore + for pspace_aligned'[wp]: pspace_aligned' + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_aligned'[wp]: "\pspace_aligned'\ vcpuSwitch param_a \\_. pspace_aligned'\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +crunches vcpuEnable, vcpuSave, vcpuDisable, vcpuRestore + for pspace_distinct'[wp]: pspace_distinct' + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_distinct'[wp]: "\pspace_distinct'\ vcpuSwitch param_a \\_. pspace_distinct'\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma setObject_vcpu_ctes_of[wp]: + "\ \s. P (ctes_of s)\ setObject p (t :: vcpu) \\_ s. P (ctes_of s)\" + apply (rule ctes_of_from_cte_wp_at[where Q="\", simplified]) + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad) + apply simp + done + +lemma setObject_vcpu_untyped_ranges_zero'[wp]: + "setObject ptr (vcpu::vcpu) \untyped_ranges_zero'\" + by (rule hoare_lift_Pf[where f=cteCaps_of]; wp cteCaps_of_ctes_of_lift) + +lemma setVCPU_if_live[wp]: + "\\s. if_live_then_nonz_cap' s \ (live' (injectKO vcpu) \ ex_nonz_cap_to' v s)\ + setObject v (vcpu::vcpu) \\_. if_live_then_nonz_cap'\" + apply (wpsimp wp: setObject_iflive' [where P=\] + | simp add: objBits_simps vcpuBits_def pageBits_def)+ + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply simp + done + +lemma setVCPU_if_unsafe[wp]: + "setObject v (vcpu::vcpu) \if_unsafe_then_cap'\" + apply (wp setObject_ifunsafe') + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply wp + apply simp + done + +lemma projectKO_opt_no_vcpu[simp]: + "projectKO_opt (KOArch (KOVCPU v)) = (None::'a::no_vcpu option)" + by (rule ccontr) (simp add: project_koType not_vcpu[symmetric]) + +lemma setObject_vcpu_obj_at'_no_vcpu[wp]: + "setObject ptr (v::vcpu) \\s. P (obj_at' (P'::'a::no_vcpu \ bool) t s)\" + apply (wp setObject_ko_wp_at[where + P'="\ko. \obj. projectKO_opt ko = Some obj \ P' (obj::'a::no_vcpu)" for P', + folded obj_at'_real_def]) + apply (clarsimp simp: updateObject_default_def in_monad not_vcpu[symmetric]) + apply (simp add: objBits_simps) + apply (simp add: vcpuBits_def pageBits_def) + apply (clarsimp split del: if_split) + apply (erule rsubst[where P=P]) + apply normalise_obj_at' + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def) + done + +lemmas setVCPU_pred_tcb'[wp] = + setObject_vcpu_obj_at'_no_vcpu + [where P'="\ko. P (proj (tcb_to_itcb' ko))" for P proj, folded pred_tcb_at'_def] + +lemma setVCPU_valid_idle'[wp]: + "setObject v (vcpu::vcpu) \valid_idle'\" + unfolding valid_idle'_def by (rule hoare_lift_Pf[where f=ksIdleThread]; wp) + +lemma setVCPU_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ setObject p (v::vcpu) \\rv s. P (ksReadyQueues s)\" + by (wp setObject_qs updateObject_default_inv | simp)+ + +lemma setVCPU_ct_not_inQ[wp]: + "setObject v (vcpu::vcpu) \ct_not_inQ\" + apply (wp ct_not_inQ_lift) + apply (rule hoare_lift_Pf[where f=ksCurThread]; wp) + apply assumption + done + +(* TODO: move *) +lemma getObject_ko_at_vcpu [wp]: + "\\\ getObject p \\rv::vcpu. ko_at' rv p\" + by (rule getObject_ko_at | simp add: objBits_simps vcpuBits_def pageBits_def)+ + +lemma corres_gets_gicvcpu_numlistregs: + "corres (=) \ \ (gets (arm_gicvcpu_numlistregs \ arch_state)) + (gets (armKSGICVCPUNumListRegs \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma corres_gets_current_vcpu[corres]: + "corres (=) \ \ (gets (arm_current_vcpu \ arch_state)) + (gets (armHSCurVCPU \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma setObject_VCPU_corres: + "vcpu_relation vcpuObj vcpuObj' + \ corres dc (vcpu_at vcpu) + (vcpu_at' vcpu) + (set_vcpu vcpu vcpuObj) + (setObject vcpu vcpuObj')" + apply (simp add: set_vcpu_def) + apply (rule corres_guard_imp) + apply (rule setObject_other_corres [where P="\ko::vcpu. True"], simp) + apply (clarsimp simp: obj_at'_def) + apply (erule map_to_ctes_upd_other, simp, simp) + apply (simp add: a_type_def is_other_obj_relation_type_def) + apply (simp add: objBits_simps) + apply simp + apply (simp add: objBits_simps vcpuBits_def pageBits_def) + apply (simp add: other_obj_relation_def asid_pool_relation_def) + apply (clarsimp simp: typ_at_to_obj_at'[symmetric] obj_at_def exs_valid_def + assert_def a_type_def return_def fail_def) + apply (fastforce split: Structures_A.kernel_object.split_asm if_split_asm) + apply (simp add: typ_at_to_obj_at_arches) + done + +lemma setObject_vcpu_cte_wp_at'[wp]: + "\\s. P (cte_wp_at' P' p s)\ + setObject ptr (vcpu::vcpu) + \\rv s. P (cte_wp_at' P' p s)\" + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply simp + done + +crunches vcpuSave, vcpuRestore, vcpuDisable, vcpuEnable + for ctes[wp]: "\s. P (ctes_of s)" + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_ctes[wp]: "\\s. P (ctes_of s)\ vcpuSwitch vcpu \\_ s. P (ctes_of s)\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +crunches + vgicUpdate, vgicUpdateLR, vcpuWriteReg, vcpuReadReg, vcpuRestoreRegRange, vcpuSaveRegRange, + vcpuSave + for typ_at'[wp]: "\s. P (typ_at' T p s)" + and no_0_obj'[wp]: no_0_obj' + (wp: crunch_wps ignore_del: setObject) + +crunches vcpuSave, vcpuRestore, vcpuDisable, vcpuEnable + for cte_wp_at'[wp]: "\s. P (cte_wp_at' P' p s)" + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +crunches vcpuDisable, vcpuEnable, vcpuSave, vcpuRestore + for no_0_obj'[wp]: no_0_obj' + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_no_0_obj'[wp]: "\no_0_obj'\ vcpuSwitch v \\_. no_0_obj'\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma vcpuSwitch_cte_wp_at'[wp]: + "\\s. P (cte_wp_at' P' p s)\ vcpuSwitch param_a \\_ s. P (cte_wp_at' P' p s)\ " + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma vcpuUpdate_corres[corres]: + "\v1 v2. vcpu_relation v1 v2 \ vcpu_relation (f v1) (f' v2) \ + corres dc (vcpu_at v) (vcpu_at' v) + (vcpu_update v f) (vcpuUpdate v f')" + by (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres + simp: vcpu_update_def vcpuUpdate_def vcpu_relation_def) + +lemma vgicUpdate_corres[corres]: + "\vgic vgic'. vgic_map vgic = vgic' \ vgic_map (f vgic) = (f' vgic') + \ corres dc (vcpu_at v) (vcpu_at' v) (vgic_update v f) (vgicUpdate v f')" + by (corresKsimp simp: vgic_update_def vgicUpdate_def vcpu_relation_def) + +lemma vgicUpdateLR_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v) + (vgic_update_lr v idx val) (vgicUpdateLR v idx val)" + by (corresKsimp simp: vgic_update_lr_def vgicUpdateLR_def vgic_map_def) + +lemma vcpuReadReg_corres[corres]: + "corres (=) (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_read_reg v r) (vcpuReadReg v r)" + apply (simp add: vcpu_read_reg_def vcpuReadReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule corres_underlying_split[OF getObject_vcpu_corres]) + apply (wpsimp simp: vcpu_relation_def)+ + done + +lemma vcpuWriteReg_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_write_reg v r val) (vcpuWriteReg v r val)" + apply (simp add: vcpu_write_reg_def vcpuWriteReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule vcpuUpdate_corres) + apply (fastforce simp: vcpu_relation_def)+ + done + +lemma vcpuSaveReg_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_save_reg v r) (vcpuSaveReg v r)" + apply (clarsimp simp: vcpu_save_reg_def vcpuSaveReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule corres_split[OF corres_machine_op[where r="(=)"]]) + apply (rule corres_Id; simp) + apply (rule vcpuUpdate_corres, fastforce simp: vcpu_relation_def vgic_map_def) + apply wpsimp+ + done + +lemma vcpuSaveRegRange_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_save_reg_range v rf rt) (vcpuSaveRegRange v rf rt)" + apply (clarsimp simp: vcpu_save_reg_range_def vcpuSaveRegRange_def) + apply (rule corres_mapM_x[OF _ _ _ _ subset_refl]) + apply (wpsimp wp: vcpuSaveReg_corres)+ + done + +lemma vcpuRestoreReg_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_restore_reg v r) (vcpuRestoreReg v r)" + apply (clarsimp simp: vcpu_restore_reg_def vcpuRestoreReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule corres_split[OF getObject_vcpu_corres]) + apply (rule corres_machine_op) + apply (rule corres_Id) + apply (fastforce simp: vcpu_relation_def) + apply (wpsimp wp: corres_Id simp: vcpu_relation_def vgic_map_def)+ + done + +lemma vcpuRestoreRegRange_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_restore_reg_range v rf rt) (vcpuRestoreRegRange v rf rt)" + apply (clarsimp simp: vcpu_restore_reg_range_def vcpuRestoreRegRange_def) + apply (rule corres_mapM_x[OF _ _ _ _ subset_refl]) + apply (wpsimp wp: vcpuRestoreReg_corres)+ + done + +lemma saveVirtTimer_corres[corres]: + "corres dc (vcpu_at vcpu_ptr) (vcpu_at' vcpu_ptr and no_0_obj') + (save_virt_timer vcpu_ptr) (saveVirtTimer vcpu_ptr)" + unfolding save_virt_timer_def saveVirtTimer_def + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF vcpuSaveReg_corres], simp) + apply (rule corres_split_dc[OF corres_machine_op], (rule corres_Id; simp)) + apply (rule corres_split_dc[OF vcpuSaveReg_corres], simp)+ + apply (rule corres_split_eqr[OF corres_machine_op], (rule corres_Id; simp))+ + apply (fold dc_def) + apply (rule vcpuUpdate_corres) + apply (simp add: vcpu_relation_def) + apply wpsimp+ + done + +lemma restoreVirtTimer_corres[corres]: + "corres dc (vcpu_at vcpu_ptr) (vcpu_at' vcpu_ptr and no_0_obj') + (restore_virt_timer vcpu_ptr) (restoreVirtTimer vcpu_ptr)" + unfolding restore_virt_timer_def restoreVirtTimer_def IRQ_def + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF vcpuRestoreReg_corres], simp)+ + apply (rule corres_split_eqr[OF corres_machine_op], (rule corres_Id; simp))+ + apply (rule corres_split[OF getObject_vcpu_corres]) + apply (rule corres_split_eqr[OF vcpuReadReg_corres]) + apply (clarsimp simp: vcpu_relation_def) + apply (rule corres_split_dc[OF vcpuWriteReg_corres])+ + apply (rule corres_split_dc[OF vcpuRestoreReg_corres], simp)+ + apply (rule corres_split[OF getObject_vcpu_corres]) + apply (clarsimp simp: vcpu_relation_def) + apply (rule corres_split_eqr[OF isIRQActive_corres]) + apply (rule corres_split_dc[OF corres_when], simp) + apply (simp add: irq_vppi_event_index_def irqVPPIEventIndex_def IRQ_def) + apply (rule corres_machine_op, simp) + apply (rule corres_Id; wpsimp) + apply (fold dc_def) + apply (rule vcpuRestoreReg_corres) + apply (wpsimp simp: if_apply_def2 isIRQActive_def)+ + done + +lemma vcpuSave_corres: + "corres dc (vcpu_at (fst cvcpu)) (vcpu_at' (fst cvcpu) and no_0_obj') + (vcpu_save (Some cvcpu)) (vcpuSave (Some cvcpu))" + apply (clarsimp simp add: vcpu_save_def vcpuSave_def armvVCPUSave_def) + apply (cases cvcpu, clarsimp, rename_tac v active) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[where r'=dc]) + apply (rule corres_when, simp) + apply (rule corres_split[OF vcpuSaveReg_corres]) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule saveVirtTimer_corres) + apply wpsimp+ + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule corres_split_eqr) + apply (rule corres_trivial) + apply (fastforce simp add: state_relation_def arch_state_relation_def) + apply (simp add: mapM_discarded) + apply (rule corres_split[OF corres_mapM_x[OF _ _ _ _ subset_refl]]) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (clarsimp, fold dc_def) + apply (rule vgicUpdateLR_corres) + apply wpsimp+ + apply (rule vcpuSaveRegRange_corres) + apply (wpsimp wp: mapM_x_wp_inv hoare_vcg_imp_lift' + simp: if_apply_def2)+ + done + +lemma vcpuDisable_corres: + "corres dc (\s. (\v. vcpuopt = Some v) \ vcpu_at (the vcpuopt) s) + (\s. ((\v. vcpuopt = Some v) \ vcpu_at' (the vcpuopt) s) \ no_0_obj' s) + (vcpu_disable vcpuopt) + (vcpuDisable vcpuopt)" + apply (cases vcpuopt; clarsimp simp: vcpu_disable_def vcpuDisable_def) + (* no current VCPU *) + subgoal + apply (clarsimp simp: doMachineOp_bind do_machine_op_bind empty_fail_cond) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op] + | rule corres_machine_op corres_Id + | wpsimp)+ + done + (* have current VCPU *) + apply (rename_tac vcpu) + apply (clarsimp simp: doMachineOp_bind do_machine_op_bind bind_assoc IRQ_def) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule corres_split_dc[OF vcpuSaveReg_corres]) + apply (rule corres_split_dc[OF vcpuSaveReg_corres]) + apply (rule corres_split_dc[OF corres_machine_op] + corres_split_dc[OF saveVirtTimer_corres] + | rule corres_machine_op corres_Id + | wpsimp)+ + done + +lemma vcpuEnable_corres: + "corres dc (vcpu_at vcpu) (vcpu_at' vcpu and no_0_obj') + (vcpu_enable vcpu) (vcpuEnable vcpu)" + apply (simp add: vcpu_enable_def vcpuEnable_def doMachineOp_bind do_machine_op_bind bind_assoc) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF vcpuRestoreReg_corres])+ + apply (rule corres_split[OF getObject_vcpu_corres], rename_tac vcpu') + apply (case_tac vcpu') + apply (rule corres_split_dc[OF corres_machine_op] + | rule corres_split_dc[OF vcpuRestoreReg_corres] + | rule corres_machine_op corres_Id restoreVirtTimer_corres + | wpsimp simp: vcpu_relation_def vgic_map_def)+ + done + +lemma vcpuRestore_corres: + "corres dc (vcpu_at vcpu) + (vcpu_at' vcpu and no_0_obj') + (vcpu_restore vcpu) + (vcpuRestore vcpu)" + apply (simp add: vcpu_restore_def vcpuRestore_def gicVCPUMaxNumLR_def) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op] + | (rule corres_machine_op corres_Id; wpsimp))+ + apply (rule corres_split[OF getObject_vcpu_corres], rename_tac vcpu') + apply (rule corres_split[OF corres_gets_gicvcpu_numlistregs]) + apply (case_tac vcpu' + , clarsimp simp: comp_def vcpu_relation_def vgic_map_def mapM_x_mapM + uncurry_def split_def mapM_map_simp) + apply (simp add: doMachineOp_bind do_machine_op_bind bind_assoc empty_fail_cond) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split) + apply (rule corres_machine_op, rule corres_Id; wpsimp wp: no_fail_mapM) + apply (rule corres_split_dc[OF vcpuRestoreRegRange_corres]) + apply (rule vcpuEnable_corres) + apply wpsimp+ + done + +lemma vcpuSwitch_corres: + assumes "vcpu' = vcpu" + shows + "corres dc (\s. (vcpu \ None \ vcpu_at (the vcpu) s) \ + ((arm_current_vcpu \ arch_state) s \ None + \ vcpu_at ((fst \ the \ arm_current_vcpu \ arch_state) s) s)) + (\s. (vcpu' \ None \ vcpu_at' (the vcpu') s) \ + ((armHSCurVCPU \ ksArchState) s \ None + \ vcpu_at' ((fst \ the \ armHSCurVCPU \ ksArchState) s) s) \ + no_0_obj' s) + (vcpu_switch vcpu) + (vcpuSwitch vcpu')" + proof - + have modify_current_vcpu: + "\a b. corres dc \ \ (modify (\s. s\arch_state := arch_state s\arm_current_vcpu := Some (a, b)\\)) + (modifyArchState (armHSCurVCPU_update (\_. Some (a, b))))" + by (clarsimp simp add: modifyArchState_def state_relation_def arch_state_relation_def + intro!: corres_modify) + have get_current_vcpu: "corres (=) \ \ (gets (arm_current_vcpu \ arch_state)) + (gets (armHSCurVCPU \ ksArchState))" + apply clarsimp + apply (rule_tac P = "(arm_current_vcpu (arch_state s)) = (armHSCurVCPU (ksArchState s'))" + in TrueE; + simp add: state_relation_def arch_state_relation_def) + done + show ?thesis + apply (simp add: vcpu_switch_def vcpuSwitch_def assms) + apply (cases vcpu) + apply (all \simp, rule corres_underlying_split[OF _ _ gets_sp gets_sp], + rule corres_guard_imp[OF get_current_vcpu TrueI TrueI], + rename_tac rv rv', case_tac rv ; + clarsimp simp add: when_def\) + apply (rule corres_machine_op[OF corres_underlying_trivial[OF no_fail_isb]] TrueI TrueI + vcpuDisable_corres modify_current_vcpu + vcpuEnable_corres + vcpuRestore_corres + vcpuSave_corres + hoare_TrueI conjI + corres_underlying_split corres_guard_imp + | clarsimp simp add: when_def | wpsimp | assumption)+ + done + qed + +lemma aligned_distinct_relation_vcpu_atI'[elim]: + "\ vcpu_at p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned' s'; pspace_distinct' s' \ + \ vcpu_at' p s'" + apply (clarsimp simp add: obj_at_def a_type_def) + apply (simp split: Structures_A.kernel_object.split_asm + if_split_asm arch_kernel_obj.split_asm) + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: other_obj_relation_def) + apply (case_tac z ; simp) + apply (rename_tac vcpu) + apply (case_tac vcpu; simp) + apply (clarsimp simp: vcpu_relation_def obj_at'_def typ_at'_def ko_wp_at'_def) + apply (fastforce simp add: pspace_aligned'_def pspace_distinct'_def dom_def) + done + +lemma vcpuSwitch_corres': + assumes "vcpu' = vcpu" + shows + "corres dc (\s. (vcpu \ None \ vcpu_at (the vcpu) s) \ + ((arm_current_vcpu \ arch_state) s \ None + \ vcpu_at ((fst \ the \ arm_current_vcpu \ arch_state) s) s)) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (vcpu_switch vcpu) + (vcpuSwitch vcpu')" + apply (rule stronger_corres_guard_imp, + rule vcpuSwitch_corres[OF assms]) + apply simp + apply (simp add: assms) + apply (rule conjI) + apply clarsimp + apply (rule aligned_distinct_relation_vcpu_atI' ; clarsimp simp add: state_relation_def, assumption?) + apply (clarsimp simp add: state_relation_def arch_state_relation_def) + apply (rule aligned_distinct_relation_vcpu_atI'; assumption) + done + +crunches + vgicUpdateLR, vcpuWriteReg, vcpuReadReg, vcpuRestoreRegRange, vcpuSaveRegRange, vcpuSave, + vcpuSwitch + for nosch[wp]: "\s. P (ksSchedulerAction s)" + and it'[wp]: "\s. P (ksIdleThread s)" + (ignore: doMachineOp wp: crunch_wps) + +lemma modifyArchState_hyp[wp]: + "modifyArchState x \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: modifyArchState_def wp: | subst doMachineOp_bind)+ + +abbreviation + "live_vcpu_at_tcb p s \ \x. ko_at' x p s \ + (case atcbVCPUPtr (tcbArch x) of None \ \_. True + | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x) s" + +lemma valid_case_option_post_wp': + "(\x. \P x\ f \\rv. Q x\) \ + \case ep of Some x \ P x | _ \ \_. True\ + f \\rv. case ep of Some x \ Q x | _ \ \_. True\" + by (cases ep, simp_all add: hoare_vcg_prop) + +crunches + vcpuDisable, vcpuRestore, vcpuEnable, vgicUpdateLR, vcpuWriteReg, vcpuReadReg, + vcpuRestoreRegRange, vcpuSaveRegRange + for ksQ[wp]: "\s. P (ksReadyQueues s)" + (wp: crunch_wps) + +lemma vcpuSave_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ vcpuSave param_a \\_ s. P (ksReadyQueues s)\" + supply option.case_cong_weak[cong] + apply (wpsimp simp: vcpuSave_def modifyArchState_def armvVCPUSave_def | simp)+ + apply (rule_tac S="set gicIndices" in mapM_x_wp) + apply wpsimp+ + done + +lemma vcpuSwitch_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ vcpuSwitch param_a \\_ s. P (ksReadyQueues s)\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | simp)+ + +lemma hyp_live'_vcpu_regs[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuRegs_update f vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma hyp_live'_vcpu_vgic[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuVGIC_update f' vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma hyp_live'_vcpu_VPPIMasked[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuVPPIMasked_update f' vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma hyp_live'_vcpu_VTimer[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuVTimer_update f' vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma live'_vcpu_regs[simp]: + "live' (KOArch (KOVCPU (vcpuRegs_update f vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma live'_vcpu_vgic[simp]: + "live' (KOArch (KOVCPU (vcpuVGIC_update f' vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma live'_vcpu_VPPIMasked[simp]: + "live' (KOArch (KOVCPU (vcpuVPPIMasked_update f' vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma live'_vcpu_VTimer[simp]: + "live' (KOArch (KOVCPU (vcpuVTimer_update f' vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma setVCPU_regs_vcpu_live: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuRegs_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma setVCPU_vgic_vcpu_live[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuVGIC_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma setVCPU_VPPIMasked_vcpu_live[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuVPPIMasked_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma setVCPU_VTimer_vcpu_live[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuVTimer_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma vgicUpdate_vcpu_live[wp]: + "vgicUpdate v f \ ko_wp_at' (is_vcpu' and hyp_live') p \" + by (wpsimp simp: vgicUpdate_def vcpuUpdate_def wp: setVCPU_vgic_vcpu_live) + +lemma setVCPU_regs_vgic_vcpu_live: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuRegs_update f (vcpuVGIC_update f' vcpu)) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +(* FIXME: move *) +lemma setVCPU_regs_vgic_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuRegs_update f (vcpuVGIC_update f' vcpu)) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_regs_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState])+ + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_regs_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuRegs_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_regs_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_vgic_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuVGIC_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_VPPIMasked_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuVPPIMasked_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_VTimer_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuVTimer_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma state_refs_of'_vcpu_empty: + "ko_at' (vcpu::vcpu) v s \ (state_refs_of' s)(v := {}) = state_refs_of' s" + by (rule ext) (clarsimp simp: state_refs_of'_def obj_at'_def) + +lemma state_hyp_refs_of'_vcpu_absorb: + "ko_at' vcpu v s \ + (state_hyp_refs_of' s)(v := vcpu_tcb_refs' (vcpuTCBPtr vcpu)) = state_hyp_refs_of' s" + by (rule ext) (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + +(* FIXME AARCH64: move *) +lemmas valid_arch_obj'_simps[simp] = valid_arch_obj'_def[split_simps arch_kernel_object.split] +lemmas ppn_bounded_simps[simp] = ppn_bounded_def[split_simps pte.split] + +lemma setObject_vcpu_valid_objs': + "\valid_objs' and K (valid_vcpu' vcpu)\ setObject v (vcpu::vcpu) \\_. valid_objs'\" + apply (wp setObject_valid_objs') + apply (clarsimp simp: in_monad updateObject_default_def valid_obj'_def) + apply assumption + apply simp + done + +lemma setVCPU_valid_arch': + "\valid_arch_state' and (\s. \a. armHSCurVCPU (ksArchState s) = Some (v,a) \ hyp_live' (KOArch (KOVCPU vcpu))) \ + setObject v (vcpu::vcpu) + \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv pred_conj_def) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift' setObject_ko_wp_at + | simp add: objBits_simps vcpuBits_def pageBits_def)+ + apply (clarsimp simp: is_vcpu'_def ko_wp_at'_def) + done + +lemma setObject_vcpu_no_tcb_update: + "\ vcpuTCBPtr (f vcpu) = vcpuTCBPtr vcpu \ + \ \ valid_objs' and ko_at' (vcpu :: vcpu) p\ setObject p (f vcpu) \ \_. valid_objs' \" + apply (rule_tac Q="valid_objs' and (ko_at' vcpu p and valid_obj' (KOArch (KOVCPU vcpu)))" in hoare_pre_imp) + apply (clarsimp) + apply (simp add: valid_obj'_def) + apply (drule (1) ko_at_valid_objs', simp) + apply (simp add: valid_obj'_def) + apply (rule setObject_valid_objs') + apply (clarsimp simp add: obj_at'_def) + apply (frule updateObject_default_result) + apply (clarsimp simp add: valid_obj'_def valid_vcpu'_def) + done + +lemma vcpuUpdate_valid_objs'[wp]: + "\vcpu. vcpuTCBPtr (f vcpu) = vcpuTCBPtr vcpu \ + \valid_objs'\ vcpuUpdate vr f \\_. valid_objs'\" + apply (wpsimp simp: vcpuUpdate_def) + apply (rule_tac vcpu=vcpu in setObject_vcpu_no_tcb_update) + apply wpsimp+ + done + +crunches + vgicUpdate, vcpuSaveReg, vgicUpdateLR, vcpuSaveRegRange, vcpuSave, + vcpuDisable, vcpuEnable, vcpuRestore, vcpuSwitch + for valid_objs'[wp]: valid_objs' + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + (wp: mapM_wp_inv simp: mapM_x_mapM) + +lemma setVCPU_tcbs_of'[wp]: + "setObject v (vcpu :: vcpu) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setVCPU_regs_r_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuRegs_update (\_. (vcpuRegs vcpu)(r:=rval)) vcpu) \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuRegs_update (\_. (vcpuRegs vcpu)(r:=rval)) vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_regs_valid_arch' setVCPU_regs_vcpu_live + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_vgic_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuVGIC_update f vcpu) + \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. (vcpuVGIC_update f vcpu)"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_vgic_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VPPIMasked_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuVPPIMasked_update f vcpu) + \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. (vcpuVPPIMasked_update f vcpu)"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_VPPIMasked_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VTimer_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuVTimer_update f vcpu) + \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. (vcpuVTimer_update f vcpu)"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_VTimer_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma vgicUpdate_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vgicUpdate f v \\_. invs_no_cicd'\" + by (wpsimp simp: vgicUpdate_def vcpuUpdate_def wp: setVCPU_vgic_invs_cicd') + +lemma vcpuRestoreReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuRestoreReg v r \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuRestoreReg_def | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuReadReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuReadReg v r \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuReadReg_def | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuSaveReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuSaveReg v r \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuSaveReg_def vcpuUpdate_def wp: setVCPU_regs_r_invs_cicd' + | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuWriteReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuWriteReg vcpu_ptr r v \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuWriteReg_def vcpuUpdate_def wp: setVCPU_regs_r_invs_cicd' + | subst doMachineOp_bind | rule empty_fail_bind)+ + +crunches vcpuRestoreRegRange, vcpuSaveRegRange, vgicUpdateLR + for invs_no_cicd'[wp]: invs_no_cicd' + (wp: mapM_x_wp ignore: loadObject) + +lemma saveVirtTimer_invs_no_cicd'[wp]: + "\invs_no_cicd'\ saveVirtTimer vcpu_ptr \\_. invs_no_cicd'\" + by (wpsimp simp: saveVirtTimer_def vcpuUpdate_def read_cntpct_def + wp: setVCPU_VTimer_invs_cicd' dmo'_gets_wp) + +lemma restoreVirtTimer_invs_no_cicd'[wp]: + "\invs_no_cicd'\ restoreVirtTimer vcpu_ptr \\_. invs_no_cicd'\" + by (wpsimp simp: restoreVirtTimer_def vcpuUpdate_def read_cntpct_def if_apply_def2 + isIRQActive_def + wp: setVCPU_VTimer_invs_cicd' maskInterrupt_invs_no_cicd' getIRQState_wp dmo'_gets_wp) + +lemma vcpuEnable_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuEnable v \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuEnable_def | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma dmo_maskInterrupt_True_invs_no_cicd'[wp]: + "doMachineOp (maskInterrupt True irq) \invs_no_cicd'\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs_no_cicd'_def valid_state'_def) + apply (simp add: valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma vcpuDisable_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuDisable v \\_. invs_no_cicd'\" + unfolding vcpuDisable_def + by (wpsimp wp: doMachineOp_typ_ats + simp: vcpuDisable_def doMachineOp_typ_at' split: option.splits + | subst doMachineOp_bind | rule empty_fail_bind conjI)+ + +lemma vcpuRestore_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuRestore v \\_. invs_no_cicd'\" + including no_pre + apply (wpsimp simp: vcpuRestore_def uncurry_def split_def doMachineOp_mapM_x gets_wp + | subst doMachineOp_bind | rule empty_fail_bind)+ + apply (rule_tac S="(\i. (of_nat i, vgicLR (vcpuVGIC vcpu) i)) ` {0..invs_no_cicd'\ vcpuSave v \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuSave_def armvVCPUSave_def wp: mapM_x_wp cong: option.case_cong_weak + | assumption)+ +lemma valid_arch_state'_armHSCurVCPU_update[simp]: + "\ ko_wp_at' (is_vcpu' and hyp_live') v s; valid_arch_state' s \ \ + valid_arch_state' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" + by (clarsimp simp: invs'_def valid_state'_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def cur_tcb'_def) + +lemma dmo_vcpu_hyp: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ doMachineOp f \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: doMachineOp_def) + +lemma vcpuSaveReg_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v \ vcpuSaveReg v' r \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuSaveReg_def vcpuUpdate_def wp: setVCPU_regs_vcpu_live dmo_vcpu_hyp) + +lemma vcpuWriteReg_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v \ vcpuWriteReg v' r val \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuWriteReg_def vcpuUpdate_def wp: setVCPU_regs_vcpu_live dmo_vcpu_hyp) + +crunches + vcpuRestoreRegRange, vcpuSaveRegRange, vgicUpdateLR, vcpuReadReg + for hyp[wp]: "ko_wp_at' (is_vcpu' and hyp_live') v" + (wp: crunch_wps setVCPU_regs_vcpu_live dmo_vcpu_hyp) + +lemma saveVirtTimer_hyp[wp]: + "saveVirtTimer vcpu_ptr \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: saveVirtTimer_def vcpuUpdate_def wp: dmo_vcpu_hyp vgicUpdate_vcpu_live) + +lemma restoreVirtTimer_hyp[wp]: + "restoreVirtTimer vcpu_ptr \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: restoreVirtTimer_def vcpuUpdate_def isIRQActive_def + wp: dmo_vcpu_hyp vgicUpdate_vcpu_live) + +lemma vcpuDisable_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ vcpuDisable (Some x) \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuDisable_def wp: dmo_vcpu_hyp vgicUpdate_vcpu_live | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuEnable_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ vcpuEnable x \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuEnable_def wp: dmo_vcpu_hyp | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuRestore_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ vcpuRestore x \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuRestore_def wp: dmo_vcpu_hyp | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma armvVCPUSave_hyp[wp]: + "armvVCPUSave x y \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: armvVCPUSave_def wp: dmo_vcpu_hyp) + +lemma vcpuSave_hyp[wp]: + "vcpuSave x \ko_wp_at' (is_vcpu' and hyp_live') v\" + apply (wpsimp simp: vcpuSave_def wp: dmo_vcpu_hyp mapM_x_wp' + | subst doMachineOp_bind + | rule empty_fail_bind)+ + apply (simp add: pred_conj_def) + done + +lemma vcpuSwitch_hyp[wp]: + "vcpuSwitch x \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuSwitch_def wp: dmo_vcpu_hyp) + +lemma getObject_vcpu_ko_at': + "(vcpu::vcpu, s') \ fst (getObject p s) \ s' = s \ ko_at' vcpu p s" + apply (rule context_conjI) + apply (drule use_valid, rule getObject_inv[where P="(=) s"]; simp add: loadObject_default_inv) + apply (drule use_valid, rule getObject_ko_at; clarsimp simp: obj_at_simps vcpuBits_def) + done + +lemma vcpuUpdate_valid_arch_state'[wp]: + "\vcpu. vcpuTCBPtr (f vcpu) = vcpuTCBPtr vcpu \ + \valid_arch_state'\ vcpuUpdate vr f \\_. valid_arch_state'\" + including no_pre + apply (wpsimp simp: vcpuUpdate_def + wp: setVCPU_valid_arch') + by (clarsimp simp: valid_def in_monad hyp_live'_def arch_live'_def valid_arch_state'_def + obj_at'_real_def ko_wp_at'_def is_vcpu'_def + dest!: getObject_vcpu_ko_at')+ + +crunches vcpuRestoreReg + for valid_arch_state'[wp]: valid_arch_state' + +crunches vgicUpdateLR, vcpuSave, vcpuDisable, vcpuEnable, vcpuRestore + for valid_arch_state'[wp]: valid_arch_state' + (wp: crunch_wps ignore: doMachineOp) + +lemma vcpuSwitch_valid_arch_state'[wp]: + "\valid_arch_state' and (case v of None \ \ | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x)\ + vcpuSwitch v \\_. valid_arch_state'\" + apply (wpsimp simp: vcpuSwitch_def modifyArchState_def + wp: vcpuDisable_hyp[simplified pred_conj_def] vcpuSave_hyp[unfolded pred_conj_def] + dmo_vcpu_hyp vcpuSave_valid_arch_state' + | strengthen valid_arch_state'_armHSCurVCPU_update | simp)+ + apply (auto simp: valid_arch_state'_def pred_conj_def) + done + +lemma invs_no_cicd'_armHSCurVCPU_update[simp]: + "ko_wp_at' (is_vcpu' and hyp_live') v s \ invs_no_cicd' s \ + invs_no_cicd' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" + by (clarsimp simp: invs_no_cicd'_def valid_state'_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def cur_tcb'_def) + +lemma invs'_armHSCurVCPU_update[simp]: + "ko_wp_at' (is_vcpu' and hyp_live') v s \ + invs' s \ invs' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" + apply (clarsimp simp: invs'_def valid_state'_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def cur_tcb'_def) + done + +lemma armHSCurVCPU_None_invs'[wp]: + "modifyArchState (armHSCurVCPU_update Map.empty) \invs'\" + apply (wpsimp simp: modifyArchState_def) + by (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_arch_state'_def valid_global_refs'_def global_refs'_def) + +lemma setVCPU_vgic_invs': + "\invs' and ko_at' vcpu v\ + setObject v (vcpuVGIC_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuVGIC_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_vgic_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_regs_invs': + "\invs' and ko_at' vcpu v\ setObject v (vcpuRegs_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuRegs_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_regs_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VPPIMasked_invs': + "\invs' and ko_at' vcpu v\ setObject v (vcpuVPPIMasked_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuVPPIMasked_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_VPPIMasked_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VTimer_invs': + "\invs' and ko_at' vcpu v\ setObject v (vcpuVTimer_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuVTimer_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift valid_bitmaps_lift + setVCPU_VTimer_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma vcpuWriteReg_invs'[wp]: + "vcpuWriteReg vcpu_ptr r v \invs'\" + by (wpsimp simp: vcpuWriteReg_def vcpuUpdate_def wp: setVCPU_regs_invs') + +lemma vcpuSaveReg_invs'[wp]: + "vcpuSaveReg v r \invs'\" + by (wpsimp simp: vcpuSaveReg_def vcpuUpdate_def wp: setVCPU_regs_invs') + +lemma saveVirtTimer_invs'[wp]: + "saveVirtTimer vcpu_ptr \invs'\" + unfolding saveVirtTimer_def + by (wpsimp wp: dmo'_gets_wp setVCPU_vgic_invs' setVCPU_regs_invs' dmo_maskInterrupt_True + setVCPU_VTimer_invs' + simp: doMachineOp_bind vcpuUpdate_def read_cntpct_def check_export_arch_timer_def) + +lemma vcpuDisable_invs'[wp]: + "vcpuDisable v \invs'\" + unfolding vcpuDisable_def isb_def setHCR_def setSCTLR_def set_gic_vcpu_ctrl_hcr_def + getSCTLR_def get_gic_vcpu_ctrl_hcr_def dsb_def vgicUpdate_def vcpuUpdate_def + vcpuSaveReg_def enableFpuEL01_def + by (wpsimp wp: dmo'_gets_wp setVCPU_vgic_invs' setVCPU_regs_invs' dmo_maskInterrupt_True + hoare_drop_imps + simp: doMachineOp_bind empty_fail_cond) + +lemma vcpuInvalidateActive_invs'[wp]: + "vcpuInvalidateActive \invs'\" + unfolding vcpuInvalidateActive_def by wpsimp + +crunches + vcpuRestoreReg, vcpuRestoreRegRange, vcpuSaveReg, vcpuSaveRegRange, vgicUpdateLR, vcpuReadReg + for invs'[wp]: invs' + (wp: crunch_wps setVCPU_regs_invs' setVCPU_vgic_invs' simp: vcpuUpdate_def + ignore: doMachineOp vcpuUpdate) + +lemma restoreVirtTimer_invs'[wp]: + "restoreVirtTimer vcpu_ptr \ invs'\" + unfolding restoreVirtTimer_def + by (wpsimp wp: maskInterrupt_invs' getIRQState_wp dmo'_gets_wp dmo_machine_op_lift_invs' + simp: IRQ_def if_apply_def2 read_cntpct_def isIRQActive_def) + +lemma vcpuEnable_invs'[wp]: + "vcpuEnable v \ invs'\" + unfolding vcpuEnable_def + by (wpsimp | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuRestore_invs'[wp]: + "\invs'\ vcpuRestore v \\_. invs'\" + unfolding vcpuRestore_def + by (wpsimp simp: vcpuRestore_def uncurry_def split_def doMachineOp_mapM_x + wp: mapM_x_wp[OF _ subset_refl] + | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuSave_invs': + "\invs'\ vcpuSave v \\_. invs'\" + by (wpsimp simp: vcpuSave_def doMachineOp_mapM armvVCPUSave_def + get_gic_vcpu_ctrl_apr_def get_gic_vcpu_ctrl_vmcr_def + get_gic_vcpu_ctrl_hcr_def getSCTLR_def + wp: dmo'_gets_wp vgicUpdate_invs' mapM_x_wp[OF _ subset_refl]) + +lemma vcpuSwitch_invs'[wp]: + "\invs' and (case v of None \ \ | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x)\ + vcpuSwitch v \\_. invs'\" + apply (wpsimp simp: vcpuSwitch_def modifyArchState_def + wp: vcpuDisable_hyp[simplified pred_conj_def] vcpuSave_hyp[unfolded pred_conj_def] + dmo_vcpu_hyp vcpuSave_invs' + | strengthen invs'_armHSCurVCPU_update | simp)+ + apply (auto simp: invs'_def valid_state'_def valid_arch_state'_def pred_conj_def) + done + +lemma vcpuSwitch_invs_no_cicd'[wp]: + "\invs_no_cicd' and (case v of None \ \ | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x)\ + vcpuSwitch v \\_. invs_no_cicd'\" + apply (wpsimp simp: vcpuSwitch_def modifyArchState_def + wp: vcpuDisable_hyp[simplified pred_conj_def] vcpuSave_hyp[unfolded pred_conj_def] + gets_wp vcpuSave_invs_no_cicd' dmo_vcpu_hyp + | strengthen invs_no_cicd'_armHSCurVCPU_update | simp)+ + apply (auto simp: invs_no_cicd'_def valid_state'_def valid_arch_state'_def pred_conj_def) + done + +crunches loadVMID + for inv: P + +lemma updateASIDPoolEntry_valid_arch_state'[wp]: + "updateASIDPoolEntry f asid \valid_arch_state'\" + unfolding updateASIDPoolEntry_def + by (wpsimp wp: getObject_inv hoare_drop_imps simp: loadObject_default_def) + +lemma invalidateVMIDEntry_valid_arch_state'[wp]: + "invalidateVMIDEntry vmid \valid_arch_state'\" + unfolding invalidateVMIDEntry_def + by (wpsimp simp: valid_arch_state'_def ran_def cong: option.case_cong) + +lemma valid_arch_state'_vmid_Some_upd: + "\ valid_arch_state' s; 0 < asid \ \ + valid_arch_state' (s\ksArchState := armKSVMIDTable_update + (\_. (armKSVMIDTable (ksArchState s))(vmid \ asid)) + (ksArchState s)\)" + by (simp add: valid_arch_state'_def ran_def cong: option.case_cong) + +lemma storeVMID_valid_arch_state'[wp]: + "\valid_arch_state' and K (0 < asid)\ storeVMID asid vmid \\_. valid_arch_state'\" + unfolding storeVMID_def + by (wpsimp simp_del: fun_upd_apply | strengthen valid_arch_state'_vmid_Some_upd)+ + +crunches armContextSwitch, setGlobalUserVSpace + for valid_arch_state'[wp]: valid_arch_state' + +(* FIXME AARCH64 consolidated VCPU block ends here *) + +(* FIXME AARCH64: replacing getSlotCap_wp is probably going to be too much breakage, but + rename would be good *) +lemma getSlotCap_actual_wp: + "\\s. \cap. cteCaps_of s p = Some cap \ Q cap s\ + getSlotCap p \Q\" + unfolding getSlotCap_def + by (wpsimp wp: getCTE_cteCap_wp split: option.splits) + +lemma setVMRoot_valid_arch_state'[wp]: + "\valid_arch_state' and valid_objs' and live_vcpu_at_tcb p\ + setVMRoot p + \\rv. valid_arch_state'\" + apply (simp add: setVMRoot_def getThreadVSpaceRoot_def locateSlotTCB_def locateSlotBasic_def) + apply ((wpsimp wp: hoare_vcg_ex_lift hoare_vcg_all_lift + getObject_tcb_wp valid_case_option_post_wp' getSlotCap_actual_wp + simp: if_apply_def2 + | wp hoare_drop_imps)+) + apply (fastforce simp: cteCaps_of_def valid_cap'_def wellformed_mapdata'_def + dest!: ctes_of_valid_cap'') + done + +crunches setVMRoot + for ksQ[wp]: "\s. P (ksReadyQueues s)" + (simp: updateObject_default_def o_def loadObject_default_def if_apply_def2 + wp: crunch_wps getObject_inv) + +lemma handleVMFault_corres: + "corres (fr \ dc) (tcb_at thread and pspace_aligned and pspace_distinct) \ + (handle_vm_fault thread fault) (handleVMFault thread fault)" + supply if_split[split del] + apply (rule corres_cross_over_guard[where Q="tcb_at' thread"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp add: AARCH64_H.handleVMFault_def handle_vm_fault_def) + apply (cases fault) + (* ARMDataAbort *) + apply (simp add: curVCPUActive_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE, simp, + rule corres_machine_op[where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + (* only do S1 translation if current VCPU active *) + apply (simp add: bind_liftE_distrib bindE_assoc) + apply (rule corres_splitEE[OF corres_liftE_lift[OF corres_gets_current_vcpu]]) + apply (clarsimp simp: liftE_return_bindE bindE_assoc) + apply (rule corres_split_eqrE[OF corres_if]) + apply fastforce + apply (rule corres_split_eqrE, simp) + apply (rule corres_returnOkTT, simp) + apply simp + apply (rule corres_splitEE, simp, + rule corres_machine_op[where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + apply (rule corres_returnOkTT, simp) + apply wpsimp+ + apply (rule corres_returnOkTT, simp) + apply (rule corres_trivial) + apply simp + apply (wpsimp simp: if_apply_def2)+ + (* ARMPrefetchAbort *) + apply (simp add: curVCPUActive_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE,simp) + apply (rule asUser_corres') + apply (rule corres_no_failI [where R="(=)"]) + apply (rule no_fail_getRestartPC) + apply fastforce + apply (rule corres_splitEE,simp, + rule corres_machine_op [where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + (* only do S1 translation if current VCPU active *) + apply (simp add: bind_liftE_distrib bindE_assoc) + apply (rule corres_splitEE[OF corres_liftE_lift[OF corres_gets_current_vcpu]]) + apply (clarsimp simp: liftE_return_bindE bindE_assoc) + apply (rule corres_split_eqrE[OF corres_if]) + apply fastforce + apply (rule corres_split_eqrE, simp) + apply (rule corres_returnOkTT, simp) + apply simp + apply (rule corres_splitEE, simp, + rule corres_machine_op[where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + apply (rule corres_returnOkTT, simp) + apply wpsimp+ + apply (rule corres_returnOkTT, simp) + apply (rule corres_trivial, simp) + apply (wpsimp simp: if_apply_def2)+ + done + +crunches findFreeVMID, loadVMID + for no_0_obj'[wp]: no_0_obj' + (wp: crunch_wps getObject_inv simp: o_def loadObject_default_def) + +lemma mask_is_asid_low_bits_of[simp]: + "(ucast asid :: machine_word) && mask asid_low_bits = ucast (asid_low_bits_of asid)" + by (word_eqI_solve simp: asid_low_bits_of_def asid_low_bits_def) + +declare corres_gets_asid[corres] (* FIXME AARCH64: declare at origin *) +declare getPoolPtr_corres[corres] (* FIXME AARCH64: declare at origin *) +declare getObject_ASIDPool_corres[corres] (* FIXME AARCH64: declare at origin *) + +lemma getASIDPoolEntry_corres'[corres]: + "asid' = ucast asid \ + corres (\r r'. r = map_option abs_asid_entry r') + (\s. pspace_aligned s \ pspace_distinct s \ + (\p. pool_for_asid asid s = Some p \ asid_pool_at p s) \ 0 < asid) + \ + (gets (entry_for_asid asid)) + (getASIDPoolEntry asid')" + unfolding entry_for_asid_def getASIDPoolEntry_def + apply (clarsimp simp: gets_obind_bind_eq entry_for_pool_def obind_comp_dist + cong: option.case_cong) + apply (corres corres: getPoolPtr_corres | corres_cases_both)+ + apply (rule monadic_rewrite_corres_l) + apply (monadic_rewrite_l gets_oapply_liftM_rewrite) + apply (rule monadic_rewrite_refl) + apply (corres simp: liftM_def asid_pool_relation_def asid_pools_at_eq corres: corres_returnTT + | corres_cases)+ + done + +lemma getASIDPoolEntry_get_the_corres[corres]: + "asid' = ucast asid \ + corres (\r r'. map_option abs_asid_entry r' = Some r) + (\s. pspace_aligned s \ pspace_distinct s \ entry_for_asid asid s \ None \ 0 < asid) + \ + (gets_the (entry_for_asid asid)) + (getASIDPoolEntry asid')" + apply (simp add: gets_the_def cong: corres_weak_cong) + apply (rule corres_bind_return2) + apply (corres simp: entry_for_asid_def entry_for_pool_def in_omonad obj_at_def) + done + +lemma loadVMID_corres[corres]: + "asid' = ucast asid \ + corres (=) + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (load_vmid asid) (loadVMID asid')" + unfolding load_vmid_def loadVMID_def + apply corres + apply (corres_cases, rule corres_inst[where P=\ and P'=\], clarsimp) + apply (corres_cases, rule corres_returnTT, clarsimp simp: abs_asid_entry_def) + apply wpsimp+ + apply (clarsimp simp: vspace_for_asid_def) + apply clarsimp + done + +lemma updateASIDPoolEntry_corres[corres]: + assumes eq: "asid' = ucast asid" + assumes abs: "\e. map_option abs_asid_entry (f' e) = f (abs_asid_entry e)" + shows "corres dc + ((\s. entry_for_asid asid s \ None \ 0 < asid) + and pspace_aligned and pspace_distinct) + \ + (update_asid_pool_entry f asid) + (updateASIDPoolEntry f' asid')" + unfolding update_asid_pool_entry_def updateASIDPoolEntry_def + apply (simp add: gets_the_def bind_assoc eq) + apply (corres simp: liftM_def + term_simp: asid_pool_relation_def asid_low_bits_of_def + mask_asid_low_bits_ucast_ucast ucast_ucast_mask2 + is_down ucast_and_mask) + apply (rule ext) + apply (clarsimp simp: asid_pool_relation_def asid_low_bits_of_def + mask_asid_low_bits_ucast_ucast ucast_ucast_mask2 + is_down ucast_and_mask abs) + apply (erule notE) + apply word_eqI_solve + apply wpsimp+ + apply (clarsimp simp: entry_for_asid_def entry_for_pool_def asid_pools_at_eq) + apply simp + done + +lemma gets_armKSVMIDTable_corres[corres]: + "corres (\t t'. t' = map_option UCAST(16 \ 64) \ t) + \ \ + (gets (arm_vmid_table \ arch_state)) (gets (armKSVMIDTable \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma storeVMID_corres[corres]: + "\ asid' = ucast asid; vmid' = vmid \ \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (store_vmid asid vmid) (storeVMID asid' vmid')" + unfolding store_vmid_def storeVMID_def + apply (corres simp: abs_asid_entry_def corres: corres_modify_tivial) + apply (fastforce simp: state_relation_def arch_state_relation_def) + apply wpsimp+ + apply (clarsimp simp: vspace_for_asid_def) + apply simp + done + +lemma invalidateASID_corres[corres]: + "asid' = ucast asid \ + corres dc + ((\s. entry_for_asid asid s \ None \ 0 < asid) and pspace_aligned and pspace_distinct) + \ + (invalidate_asid asid) (invalidateASID asid')" + unfolding invalidate_asid_def invalidateASID_def + by (corres simp: abs_asid_entry_def entry_for_asid_def) + +lemma gets_armKSNextVMID_corres[corres]: + "corres (=) \ \ + (gets (arm_next_vmid \ arch_state)) (gets (armKSNextVMID \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma take_vmid_minBound_maxBound: + "take (length [minBound .e. maxBound :: vmid]) + ([next_vmid .e. maxBound] @ [minBound .e. next_vmid]) + = [next_vmid .e. maxBound] @ init [minBound .e. next_vmid]" + for next_vmid :: vmid + using leq_maxBound[where x=next_vmid] + by (simp add: word_le_nat_alt init_def upto_enum_word minBound_word) + +(* FIXME AARCH64: move to SubMonad *) +lemmas corres_machine_op_Id = corres_machine_op[OF corres_Id] +lemmas corres_machine_op_Id_eq[corres_term] = corres_machine_op_Id[where r="(=)"] +lemmas corres_machine_op_Id_dc[corres_term] = corres_machine_op_Id[where r="dc::unit \ unit \ bool"] + +lemma invalidateVMIDEntry_corres[corres]: + "vmid' = vmid \ + corres dc \ \ (invalidate_vmid_entry vmid) (invalidateVMIDEntry vmid')" + unfolding invalidate_vmid_entry_def invalidateVMIDEntry_def + by (corres' \fastforce simp: state_relation_def arch_state_relation_def\ + corres: corres_modify_tivial) + +lemma valid_vmid_tableD: + "\ valid_vmid_table s; vmid_table s vmid = Some asid \ \ 0 < asid" + apply (subgoal_tac "asid \ 0") + apply (simp add: word_neq_0_conv) + apply (fastforce simp: valid_vmid_table_def) + done + +lemma findFreeVMID_corres[corres]: + "corres (=) + (vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct) + \ + find_free_vmid findFreeVMID" + unfolding find_free_vmid_def findFreeVMID_def + apply (simp only: take_vmid_minBound_maxBound) + apply corres + apply corres_cases_both (* case find .. of *) + (* Only None case left over *) + apply corres + apply (clarsimp dest!: findNoneD) + apply (drule bspec, rule UnI1, simp, rule order_refl) + apply clarsimp + apply (corres corres: corres_modify_tivial (* FIXME AARCH64: fix typo *) + simp: state_relation_def arch_state_relation_def maxBound_word minBound_word) + apply wpsimp+ + apply (clarsimp dest!: findNoneD) + apply (drule bspec, rule UnI1, simp, rule order_refl) + apply (clarsimp simp: vmid_inv_def) + apply (frule (1) valid_vmid_tableD) + apply (drule (1) is_inv_SomeD) + apply (clarsimp simp: entry_for_asid_def) + apply (clarsimp simp: vmid_for_asid_2_def in_omonad entry_for_pool_def pool_for_asid_def + if_option_eq) + apply simp + done + +lemma getVMID_corres[corres]: + "asid' = ucast asid \ + corres (=) + (vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct + and (\s. vspace_for_asid asid s \ None)) + \ + (get_vmid asid) (getVMID asid')" + unfolding get_vmid_def getVMID_def + by (corres wp: hoare_drop_imps simp: vspace_for_asid_def entry_for_asid_def | corres_cases_both)+ + +lemma armContextSwitch_corres[corres]: + "asid' = ucast asid \ + corres dc + (vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct + and (\s. vspace_for_asid asid s \ None)) + \ + (arm_context_switch pt asid) (armContextSwitch pt asid')" + unfolding arm_context_switch_def armContextSwitch_def + by corres + +lemma setVMRoot_corres [corres]: + assumes "t' = t" + shows "corres dc (tcb_at t and valid_vspace_objs and valid_asid_table and + vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct and + valid_objs and valid_global_arch_objs and pspace_in_kernel_window and valid_uses) + (no_0_obj') + (set_vm_root t) (setVMRoot t')" +proof - + have global: + "(\s. P s \ valid_global_arch_objs s) \ + corres dc P Q set_global_user_vspace setGlobalUserVSpace" for P Q + unfolding set_global_user_vspace_def setGlobalUserVSpace_def o_def[where g=arch_state] + by (corresKsimp corres: corres_gets_global_pt corres_machine_op) + + show ?thesis + unfolding set_vm_root_def setVMRoot_def catchFailure_def withoutFailure_def throw_def + apply (rule corres_cross_over_guard[where Q="no_0_obj' and pspace_distinct' and pspace_aligned'"]) + apply (clarsimp simp add: pspace_distinct_cross pspace_aligned_cross state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="(=) \ cte_map" and P=\ and P'=\]) + apply (simp add: getThreadVSpaceRoot_def locateSlotTCB_def locateSlotBasic_def + tcbVTableSlot_def cte_map_def objBits_def cte_level_bits_def + objBitsKO_def tcb_cnode_index_def to_bl_1 assms cteSizeBits_def) + apply (rule_tac R="\thread_root. valid_vspace_objs and valid_asid_table and vmid_inv and + valid_vmid_table and pspace_aligned and pspace_distinct and + valid_objs and valid_global_arch_objs and + pspace_in_kernel_window and valid_uses and + cte_wp_at ((=) thread_root) thread_root_slot and + tcb_at (fst thread_root_slot) and + K (snd thread_root_slot = tcb_cnode_index 1)" + and R'="\thread_root. no_0_obj'" + in corres_split[OF getSlotCap_corres]) + apply simp + apply simp + apply (rename_tac cap cap') + apply (rule_tac Q="no_0_obj' and (\_. isValidVTableRoot cap' \ cap' = NullCap)" + in corres_cross_over_guard) + apply clarsimp + apply (drule (1) tcb_cap_wp_at[where ref="tcb_cnode_index 1" and + Q="\cap. is_valid_vtable_root cap \ cap=Structures_A.NullCap"]) + apply (simp add: tcb_cap_cases_def) + apply clarsimp + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule disjE; simp?) + apply (clarsimp simp: is_valid_vtable_root_def + split: cap.splits arch_cap.splits option.splits pt_type.splits) + apply (simp add: isValidVTableRoot_def isVTableRoot_def) + apply (rule corres_guard_imp) + apply (rule_tac P="valid_vspace_objs and valid_asid_table and pspace_aligned and + valid_vmid_table and vmid_inv and pspace_distinct and valid_objs and + pspace_in_kernel_window and valid_uses and + valid_global_arch_objs and cte_wp_at ((=) cap) thread_root_slot" + in corres_assert_gen_asm2) + prefer 3 + apply assumption + apply (case_tac cap; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (rename_tac acap acap') + apply (case_tac acap; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (rename_tac pt_t m) + apply (case_tac pt_t; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (case_tac m; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (rule corres_guard_imp) + apply (rule corres_split_catch [where f=lfr and E'="\_. \"]) + apply (rule corres_split_eqrE[OF findVSpaceForASID_corres[OF refl]]) + apply (rule whenE_throwError_corres; simp add: lookup_failure_map_def) + apply (simp add: assertE_liftE liftE_bindE) + apply (rule corres_assert_gen_asm) + apply simp + apply (rule armContextSwitch_corres) + apply (wpsimp wp: find_vspace_for_asid_wp findVSpaceForASID_inv hoare_drop_imps)+ + apply (rule global, assumption) + apply wpsimp+ + apply (frule (1) cte_wp_at_valid_objs_valid_cap) + apply (clarsimp simp: valid_cap_def mask_def wellformed_mapdata_def obj_at_def) + apply (drule (3) pspace_in_kw_bounded) + apply (clarsimp simp: kernel_window_range_def pptr_base_def AARCH64.pptrTop_def + AARCH64_H.pptrTop_def) + apply (wpsimp wp: get_cap_wp simp: getThreadVSpaceRoot_def)+ + apply (auto dest!: tcb_at_cte_at_1) + done +qed + +lemma dMo_no_0_obj'[wp]: + "doMachineOp f \no_0_obj'\" + apply (simp add: doMachineOp_def split_def) + apply wp + by (simp add: no_0_obj'_def) + +lemma dMo_riscvKSASIDTable_inv[wp]: + "doMachineOp f \\s. P (armKSASIDTable (ksArchState s))\" + apply (simp add: doMachineOp_def split_def) + apply wp + by (clarsimp) + +lemma dMo_valid_arch_state'[wp]: + "\\s. P (valid_arch_state' s)\ doMachineOp f \\_ s. P (valid_arch_state' s)\" + apply (simp add: doMachineOp_def split_def) + apply wp + by (clarsimp) + +crunches vcpuDisable, vcpuEnable, vcpuSave, vcpuRestore, deleteASID + for no_0_obj'[wp]: no_0_obj' + (simp: crunch_simps wp: crunch_wps getObject_inv getObject_inv_vcpu loadObject_default_inv) + +lemma asid_high_bits_of_ucast_ucast[simp]: + "asid_high_bits_of (ucast (ucast asid :: machine_word)) = asid_high_bits_of asid" + by (simp add: ucast_down_ucast_id is_down) + +lemma invalidateTLBByASID_corres[corres]: + "asid' = ucast asid \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (invalidate_tlb_by_asid asid) (invalidateTLBByASID asid')" + unfolding invalidate_tlb_by_asid_def invalidateTLBByASID_def + apply corres + (* when vs case .. of *) + apply (corres_cases; (solves \rule corres_inst[where P=\ and P'=\], clarsimp\)?) + (* when-True case *) + apply (clarsimp, corres) + apply wpsimp+ + done + +lemma invalidate_vmid_entry_entry_for_asid[wp]: + "invalidate_vmid_entry vmid \\s. P (entry_for_asid asid s)\" + unfolding invalidate_vmid_entry_def + by wpsimp + +lemma invalidateASIDEntry_corres[corres]: + "asid' = ucast asid \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (invalidate_asid_entry asid) (invalidateASIDEntry asid')" + unfolding invalidate_asid_entry_def invalidateASIDEntry_def + by (corres simp: vspace_for_asid_def) + +lemma deleteASID_corres [corres]: + assumes "asid' = ucast asid" "pm' = pm" + shows "corres dc (invs and K (asid \ 0)) no_0_obj' + (delete_asid asid pm) (deleteASID asid' pm')" + unfolding delete_asid_def deleteASID_def using assms + apply simp + apply (corres simp: liftM_def | corres_cases_both)+ + apply (simp add: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def ucast_ucast_a is_down + asid_pool_relation_def abs_asid_entry_def split: option.splits) + apply corres + apply (rule ext) + apply (clarsimp simp: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def + ucast_ucast_a is_down asid_pool_relation_def) + apply (erule notE) + apply word_eqI_solve + apply (corres corres: getCurThread_corres) + apply (wpsimp simp: cur_tcb_def[symmetric] + wp: set_asid_pool_None_vmid_inv set_asid_pool_vspace_objs_unmap_single) + apply (wp getASID_wp)+ + apply (rename_tac p pool pool' a b) + apply (rule_tac Q="\_ s. invs s \ + (\high. asid_table s high = Some p \ + vmid_for_asid s (asid_of high (asid_low_bits_of asid)) = + None)" in hoare_strengthen_post) + apply (wp hoare_vcg_ex_lift invalidate_asid_entry_vmid_for_asid) + apply (fastforce simp: asid_pools_at_eq ako_asid_pools_of) + apply (wp hoare_drop_imp hoare_vcg_all_lift) + apply (wp invalidate_tlb_by_asid_invs hoare_vcg_ex_lift) + apply wp + apply (clarsimp, wp) + apply (wp getASID_wp) + apply wp + apply (wp hoare_vcg_all_lift hoare_drop_imp) + apply (fastforce simp: pool_for_asid_def vspace_for_asid_def entry_for_asid_def word_neq_0_conv + entry_for_pool_def in_omonad + intro!: pool_for_asid_ap_at) + apply simp + done + +lemma valid_arch_state_unmap_strg': + "valid_arch_state' s \ + valid_arch_state' (s\ksArchState := + armKSASIDTable_update (\_. (armKSASIDTable (ksArchState s))(ptr := None)) + (ksArchState s)\)" + apply (simp add: valid_arch_state'_def valid_asid_table'_def) + apply (auto simp: ran_def split: if_split_asm option.splits) + done + +crunch armKSASIDTable_inv[wp]: invalidateASIDEntry + "\s. P (armKSASIDTable (ksArchState s))" + (wp: getObject_inv crunch_wps simp: loadObject_default_def) + +lemma is_aligned_asid_low_bits_of_zero: + "is_aligned asid asid_low_bits \ asid_low_bits_of asid = 0" + apply (simp add: is_aligned_mask word_eq_iff word_size asid_bits_defs asid_bits_of_defs nth_ucast) + apply (intro iffI allI; drule_tac x=n in spec; fastforce) + done + +lemma asid_high_bits_of_0[simp]: + "asid_high_bits_of 0 = 0" + by (simp add: asid_high_bits_of_def) + +lemma asid_low_bits_of_0[simp]: + "asid_low_bits_of 0 = 0" + by (simp add: asid_low_bits_of_def) + +lemma invalidate_asid_entry_asid_pool_doms[wp]: + "invalidate_asid_entry asid \\s. P (asid_pools_of s ||> dom)\" + unfolding invalidate_asid_entry_def invalidate_asid_def invalidate_vmid_entry_def + apply wpsimp + apply (fastforce simp: opt_map_def split: option.splits elim!: rsubst[where P=P]) + done + +declare getCurThread_corres[corres] (* FIXME AARCH64: declare at origin *) + +lemma valid_asid_table_None_upd: + "valid_asid_table_2 table pools \ valid_asid_table_2 (table(idx := None)) pools" + unfolding valid_asid_table_2_def + by (auto simp: ran_def inj_on_def) + +lemma asid_low_le_mask_asidBits[simp]: + "UCAST(asid_low_len \ asid_len) asid_low \ mask asid_low_bits" + by (rule ucast_leq_mask, simp add: asid_low_bits_def) + +lemma ucast_eq_from_zip_asid_low_bits: + "\(x, y) \ set (zip [0 .e. mask asid_low_bits] [0 .e. mask asid_low_bits]); + is_aligned asid asid_low_bits\ + \ ucast asid + y = ucast (asid + x)" for asid :: AARCH64_A.asid + apply (clarsimp simp: in_set_zip upto_enum_word_nth) + apply (subst add.commute[where a=asid]) + apply (drule nat_le_Suc_less_imp)+ + apply (simp add: ucast_add_mask_aligned[where n=asid_low_bits] mask_def word_le_nat_alt + asid_low_bits_def unat_of_nat_eq ucast_of_nat is_down ucast_of_nat_small) + done + +lemma deleteASIDPool_corres: + assumes "base' = ucast base" "ptr' = ptr" + shows "corres dc (invs and K (is_aligned base asid_low_bits) and asid_pool_at ptr) + (no_0_obj') + (delete_asid_pool base ptr) (deleteASIDPool base' ptr)" + using assms + apply (simp add: delete_asid_pool_def deleteASIDPool_def) + apply (corres simp: liftM_def mapM_discarded) + apply corres_split (* deal with mapM_x manually *) + apply (rule_tac P="\s. invs s \ pool_for_asid base s = Some ptr \ + (asid_pools_of s ||> dom) ptr = Some (dom pool) \ + is_aligned base asid_low_bits" + and P'="no_0_obj'" in corres_mapM_x') + (* mapM_x body *) + apply corres + (* "when" condition *) + apply (clarsimp simp: asid_pool_relation_def in_set_zip upto_enum_word_nth) + apply (simp add: ucast_of_nat is_down asid_low_bits_def ucast_of_nat_small) + apply (rule corres_gen_asm[where F="is_aligned base asid_low_bits"]) + apply (corres term_simp: ucast_eq_from_zip_asid_low_bits mask_def) + apply clarsimp + apply (rename_tac low low' s s' entry) + apply (clarsimp simp: vspace_for_asid_def entry_for_asid_def pool_for_asid_def + in_omonad asid_high_bits_of_add asid_low_bits_of_add + mask_def entry_for_pool_def + dest!: set_zip_leftD) + apply (rule conjI, fastforce) + apply (clarsimp simp flip: word_neq_0_conv mask_2pm1) + apply (prop_tac "valid_asid_map s", fastforce) + apply (prop_tac "base = 0 \ low = 0") + apply (simp add: asid_low_bits_def) + apply (subst (asm) word_plus_and_or_coroll, word_eqI, force) + apply (fastforce simp: word_or_zero) + apply (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq + pool_for_asid_def entry_for_pool_def in_omonad) + apply blast + apply fastforce + apply (wpsimp wp: invalidate_tlb_by_asid_invs)+ + apply (simp add: mask_def asid_low_bits_def) + apply (corres' \fastforce simp: asid_high_bits_of_def asid_low_bits_def up_ucast_inj_eq + state_relation_def arch_state_relation_def\ + corres: corres_modify_tivial) + (* mapM_x wp conditions *) + apply (rename_tac table table' pool pool') + apply (rule hoare_strengthen_post) + apply (rule_tac I="\s. invs s \ is_aligned base asid_low_bits \ table = asid_table s \ + pool_for_asid base s = Some ptr \ + (asid_pools_of s ||> dom) ptr = Some (dom pool)" and + V="\xs s. \asid_low \ set xs. + vmid_for_asid s (asid_of (asid_high_bits_of base) + (ucast asid_low)) = None" + in mapM_x_inv_wp3) + apply (wpsimp wp: invalidate_asid_entry_vmid_for_asid_add hoare_vcg_op_lift + invalidate_tlb_by_asid_invs) + apply (rule conjI; clarsimp) + apply (drule arg_cong[where f=set], drule sym[where t="set xs" for xs]) + apply fastforce + apply (clarsimp simp: vmid_for_asid_def obind_None_eq) + apply (rule ccontr) + apply (clarsimp simp: entry_for_pool_def in_omonad pool_for_asid_def) + apply (fastforce dest: dom_eq_All) + (* mapM_x invariant implies post condition; + some manual massaging to avoid massive duplication *) + apply (simp (no_asm) del: fun_upd_apply) + apply (strengthen invs_vmid_inv invs_valid_global_arch_objs invs_implies invs_valid_uses + invs_valid_vmid_table valid_asid_table_None_upd) + (* can't move these into previous strengthen, otherwise will be applied too early *) + apply (strengthen invs_arm_asid_table_unmap invs_valid_asid_table) + apply (clarsimp simp: o_def) + apply (rename_tac asid_low) + apply (erule_tac x="ucast asid_low" in allE) + apply (fastforce simp: ucast_up_ucast_id is_up) + apply (wpsimp wp: mapM_x_wp' getASID_wp)+ + apply (fastforce simp: is_aligned_asid_low_bits_of_zero pool_for_asid_def in_omonad) + apply (clarsimp simp: is_aligned_asid_low_bits_of_zero) + done + +crunch typ_at' [wp]: setVMRoot "\s. P (typ_at' T p s)" + (simp: crunch_simps loadObject_default_def wp: crunch_wps getObject_inv) + +lemmas setVMRoot_typ_ats [wp] = typ_at_lifts [OF setVMRoot_typ_at'] + +lemma getObject_PTE_corres'': + assumes "p' = p" + shows "corres pte_relation' (pte_at pt_t p and pspace_aligned and pspace_distinct) \ + (get_pte pt_t p) (getObject p')" + using assms getObject_PTE_corres by simp + +crunches unmapPageTable, unmapPage + for aligned'[wp]: "pspace_aligned'" + and distinct'[wp]: "pspace_distinct'" + and ctes [wp]: "\s. P (ctes_of s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + (simp: crunch_simps + wp: crunch_wps getObject_inv loadObject_default_inv) + +crunches storePTE + for no_0_obj'[wp]: no_0_obj' + and valid_arch'[wp]: valid_arch_state' + and cur_tcb'[wp]: cur_tcb' + and pspace_canonical'[wp]: pspace_canonical' + +lemma unmapPageTable_corres: + assumes "asid' = ucast asid" "vptr' = vptr" "pt' = pt" + shows "corres dc + (invs and (\s. vspace_for_asid asid s \ Some pt) and K (0 < asid \ vptr \ user_region)) + no_0_obj' + (unmap_page_table asid vptr pt) + (unmapPageTable asid' vptr' pt')" + apply (clarsimp simp: assms unmap_page_table_def unmapPageTable_def ignoreFailure_def const_def) + apply (corres corres: findVSpaceForASID_corres lookupPTFromLevel_corres storePTE_corres' + corres_returnTT + wp: pt_lookup_from_level_wp + | corres_cases_left)+ + apply (fastforce simp: pte_at_def dest: vspace_for_asid_vs_lookup) + apply simp + done + +(* FIXME AARCH64: move (all arches) *) +lemma corres_split_strengthen_ftE: + "\ corres (ftr \ r') P P' f j; + \rv rv'. r' rv rv' \ corres (ftr' \ r) (R rv) (R' rv') (g rv) (k rv'); + \Q\ f \R\,-; \Q'\ j \R'\,- \ + \ corres (dc \ r) (P and Q) (P' and Q') (f >>=E (\rv. g rv)) (j >>=E (\rv'. k rv'))" + apply (rule_tac r'=r' in corres_splitEE) + apply (erule corres_rel_imp) + apply (case_tac x, auto)[1] + apply (rule corres_rel_imp, assumption) + apply (case_tac x, auto)[1] + apply (simp add: validE_R_def)+ + done + +lemma checkMappingPPtr_corres: + "\ pte_relation' pte pte'; pptr' = pptr \ \ + corres (lfr \ dc) \ \ + (whenE (AARCH64_A.is_PagePTE pte \ pptr_from_pte pte \ pptr) + (throwError ExceptionTypes_A.InvalidRoot)) + (checkMappingPPtr pptr' pte')" + apply (simp add: liftE_bindE checkMappingPPtr_def) + apply (cases pte; simp add: pte_base_addr_def pptr_from_pte_def) + apply (auto simp: whenE_def unlessE_def corres_returnOk lookup_failure_map_def) + done + +crunch inv[wp]: checkMappingPPtr "P" + (wp: crunch_wps loadObject_default_inv simp: crunch_simps) + +lemmas liftE_get_pte_corres = getObject_PTE_corres[THEN corres_liftE_rel_sum[THEN iffD2]] + +lemma invalidateTLBByASIDVA_corres[corres]: + "\ asid' = ucast asid; vptr' = vptr \ \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (invalidate_tlb_by_asid_va asid vptr) (invalidateTLBByASIDVA asid' vptr')" + unfolding invalidate_tlb_by_asid_va_def invalidateTLBByASIDVA_def + by (corres term_simp: wordBits_def word_bits_def word_size + | corres_cases_left + | rule corres_inst[where P=\ and P'=\], clarsimp)+ + +crunches lookupPTSlot + for inv: "P" + +lemma unmapPage_corres[corres]: + assumes "sz' = sz" "asid' = ucast asid" "vptr' = vptr" "pptr' = pptr" + shows "corres dc (invs and K (valid_unmap sz (asid,vptr) \ vptr \ user_region)) + (no_0_obj') + (unmap_page sz asid vptr pptr) + (unmapPage sz' asid' vptr' pptr')" + apply (clarsimp simp: assms unmap_page_def unmapPage_def ignoreFailure_def const_def) + apply (corres corres: findVSpaceForASID_corres lookupPTSlot_corres[@lift_corres_args] + getObject_PTE_corres' checkMappingPPtr_corres corres_returnTT + simp: lookup_failure_map_def + wp: hoare_drop_imp lookupPTSlot_inv + | corres_cases_both)+ + apply (clarsimp simp: valid_unmap_def cong: conj_cong) + apply (fastforce dest: vspace_for_asid_vs_lookup pt_lookup_slot_vs_lookup_slotI + intro: vs_lookup_slot_pte_at) + apply simp + done + +definition + "mapping_map \ \(pte, r, level) (pte', r'). pte_relation' pte pte' \ r' = r" + +definition + "page_invocation_map pgi pgi' \ case pgi of + AARCH64_A.PageMap c slot m \ + \c' m'. pgi' = PageMap c' (cte_map slot) m' \ + acap_relation c c' \ + mapping_map m m' + | AARCH64_A.PageUnmap c ptr \ + \c'. pgi' = PageUnmap c' (cte_map ptr) \ + acap_relation c c' + | AARCH64_A.PageGetAddr ptr \ + pgi' = PageGetAddr ptr + | AARCH64_A.PageFlush type vstart vend pstart vs asid \ + pgi' = PageFlush type vstart vend pstart vs (ucast asid)" + +definition + "valid_page_inv' pgi \ + case pgi of + PageMap cap ptr m \ + K (isPagePTE (fst m)) and + cte_wp_at' (is_arch_update' (ArchObjectCap cap)) ptr and valid_cap' (ArchObjectCap cap) + | PageUnmap cap ptr \ + K (isFrameCap cap) and + cte_wp_at' (is_arch_update' (ArchObjectCap cap)) ptr and valid_cap' (ArchObjectCap cap) + | PageGetAddr ptr \ \ + | PageFlush ty start end pstart space asid \ \" + +lemma message_info_to_data_eqv: + "wordFromMessageInfo (message_info_map mi) = message_info_to_data mi" + apply (cases mi) + apply (simp add: wordFromMessageInfo_def msgLengthBits_def msgExtraCapBits_def msgMaxExtraCaps_def shiftL_nat) + done + +lemma message_info_from_data_eqv: + "message_info_map (data_to_message_info rv) = messageInfoFromWord rv" + using shiftr_mask_eq[where 'a=64 and n=12] + by (auto simp: data_to_message_info_def messageInfoFromWord_def Let_def not_less + msgLengthBits_def msgExtraCapBits_def msgMaxExtraCaps_def mask_def + shiftL_nat msgMaxLength_def msgLabelBits_def) + +lemma setMessageInfo_corres: + "mi' = message_info_map mi \ + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_message_info t mi) (setMessageInfo t mi')" + apply (simp add: setMessageInfo_def set_message_info_def) + apply (subgoal_tac "wordFromMessageInfo (message_info_map mi) = + message_info_to_data mi") + apply (simp add: asUser_setRegister_corres msg_info_register_def + msgInfoRegister_def) + apply (simp add: message_info_to_data_eqv) + done + + +lemma set_mi_invs'[wp]: "\invs' and tcb_at' t\ setMessageInfo t a \\x. invs'\" + by (simp add: setMessageInfo_def) wp + +lemma set_mi_tcb' [wp]: + "\ tcb_at' t \ setMessageInfo receiver msg \\rv. tcb_at' t\" + by (simp add: setMessageInfo_def) wp + + +lemma setMRs_typ_at': + "\\s. P (typ_at' T p s)\ setMRs receiver recv_buf mrs \\rv s. P (typ_at' T p s)\" + by (simp add: setMRs_def zipWithM_x_mapM split_def, wp crunch_wps) + +lemmas setMRs_typ_at_lifts[wp] = typ_at_lifts [OF setMRs_typ_at'] + +lemma set_mrs_invs'[wp]: + "\ invs' and tcb_at' receiver \ setMRs receiver recv_buf mrs \\rv. invs' \" + apply (simp add: setMRs_def) + apply (wp dmo_invs' no_irq_mapM no_irq_storeWord crunch_wps| + simp add: zipWithM_x_mapM split_def)+ + done + +crunches unmapPage + for cte_at'[wp]: "cte_at' p" + (wp: crunch_wps simp: crunch_simps) + +lemma vs_lookup_slot_vspace_for_asidD: + "\ vs_lookup_slot level asid vref s = Some (level, slot); level \ max_pt_level; valid_asid_map s \ + \ vspace_for_asid asid s \ None" + by (fastforce simp: vs_lookup_slot_def vs_lookup_table_def vspace_for_asid_def in_omonad + valid_asid_map_def entry_for_asid_def vspace_for_pool_def obind_None_eq + simp flip: word_neq_0_conv + split: if_split_asm) + +lemma performPageInvocation_corres: + assumes "page_invocation_map pgi pgi'" + shows "corres (=) (invs and valid_page_inv pgi) (no_0_obj' and valid_page_inv' pgi') + (perform_page_invocation pgi) (performPageInvocation pgi')" + apply (rule corres_cross_over_guard [where Q="no_0_obj' and valid_page_inv' pgi' and + pspace_aligned' and pspace_distinct'"]) + apply (fastforce intro!: pspace_aligned_cross pspace_distinct_cross) + using assms + unfolding perform_page_invocation_def performPageInvocation_def page_invocation_map_def + apply (cases pgi; clarsimp simp: valid_page_inv_def mapping_map_def) + apply (rename_tac cap ct_slot_ref ct_slot_idx pte slot level cap' pte') + apply (simp add: perform_pg_inv_map_def bind_assoc) + apply (corres corres: updateCap_same_master | fastforce | corres_cases)+ + apply (rule_tac F="arch_cap.is_FrameCap cap" in corres_gen_asm) + apply ((corres corres: corres_assert_opt_l simp: arch_cap.is_FrameCap_def + | corres_cases)+)[1] + apply (rule corres_return_eq_same, simp) + apply wp + apply wp + apply clarsimp + apply (wp get_pte_wp hoare_drop_imp hoare_vcg_op_lift)+ + apply (clarsimp simp: invs_valid_objs invs_distinct invs_psp_aligned) + apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def is_cap_simps same_ref_def) + apply (frule (3) vs_lookup_slot_pte_at) + apply (clarsimp simp: cap_master_cap_def split: arch_cap.splits) + apply (fastforce dest!: vs_lookup_slot_vspace_for_asidD) + apply (clarsimp simp: valid_page_inv'_def cte_wp_at_ctes_of) + apply (simp add: perform_pg_inv_unmap_def bind_assoc) + apply (corres corres: corres_assert_gen_asm_l simp: liftM_def) + apply (corres_cases_both; (solves \rule corres_trivial, clarsimp simp: arch_cap.is_FrameCap_def\)?) + apply (corres corres: getSlotCap_corres) + apply (rename_tac old_cap old_cap') + apply (rule_tac F="is_frame_cap old_cap" in corres_gen_asm) + apply (corres corres: updateCap_same_master + simp: is_frame_cap_def arch_cap.is_FrameCap_def update_map_data_def) + apply (wp get_cap_wp)+ + apply corres_cases_both + apply (corres simp: arch_cap.is_FrameCap_def corres: getSlotCap_corres) + apply (rename_tac old_cap old_cap') + apply (rule_tac F="is_frame_cap old_cap" in corres_gen_asm) + apply (corres corres: updateCap_same_master + simp: is_frame_cap_def arch_cap.is_FrameCap_def update_map_data_def) + apply (wpsimp wp: get_cap_wp hoare_vcg_op_lift)+ + apply (clarsimp simp: invs_valid_objs invs_psp_aligned invs_distinct) + apply (clarsimp simp: cte_wp_at_caps_of_state wellformed_pte_def + cap_master_cap_simps is_cap_simps update_map_data_def mdata_map_def + wellformed_mapdata_def valid_arch_cap_def) + apply (clarsimp simp: valid_page_inv'_def cte_wp_at_ctes_of) + apply (clarsimp simp: perform_pg_inv_get_addr_def fromPAddr_def) + apply (clarsimp simp: perform_flush_def) + apply (rename_tac type vstart vend pstart vs asid) + apply (case_tac type; + simp add: do_flush_def doFlush_def; + corres simp: doMachineOp_bind do_machine_op_bind empty_fail_bind) + done + +definition + "page_table_invocation_map pti pti' \ + case pti of + AARCH64_A.PageTableMap cap ptr pte pt_slot level \ + \cap' pte'. pti' = PageTableMap cap' (cte_map ptr) pte' pt_slot \ + cap_relation (Structures_A.ArchObjectCap cap) cap' \ + pte_relation' pte pte' + | AARCH64_A.PageTableUnmap cap ptr \ + \cap'. pti' = PageTableUnmap cap' (cte_map ptr) \ acap_relation cap cap'" + +definition + "valid_pti' pti \ + case pti of + PageTableMap cap slot pte pteSlot \ + cte_wp_at' (is_arch_update' cap) slot and valid_cap' cap and K (ppn_bounded pte) + | PageTableUnmap cap slot \ + cte_wp_at' (is_arch_update' (ArchObjectCap cap)) slot and valid_cap' (ArchObjectCap cap) + and K (isPageTableCap cap)" + +(* extend with arch rules *) +lemmas store_pte_typ_ats[wp] = store_pte_typ_ats abs_atyp_at_lifts[OF store_pte_typ_at] + +lemma pte_bits_leq_pt_bits[simp, intro!]: + "pte_bits \ pt_bits pt_t" + by (simp add: bit_simps) + +lemma pt_bits_le_word_len[simplified, simp, intro!]: + "pt_bits pt_t < LENGTH(machine_word_len)" + by (simp add: bit_simps) + +lemma clear_page_table_corres: + "corres dc (pspace_aligned and pspace_distinct and pt_at pt_t p) + \ + (mapM_x (swp (store_pte pt_t) AARCH64_A.InvalidPTE) [p , p + 2^pte_bits .e. p + mask (pt_bits pt_t)]) + (mapM_x (swp storePTE AARCH64_H.InvalidPTE) [p , p + 2^pte_bits .e. p + mask (pt_bits pt_t)])" + apply (rule_tac F="is_aligned p (pt_bits pt_t)" in corres_req) + apply (clarsimp simp: obj_at_def a_type_def) + apply (clarsimp split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm) + apply (drule(1) pspace_alignedD) + apply (simp add: table_size_def pt_bits_def) + apply (simp add: mask_def flip: p_assoc_help) + apply (simp add: upto_enum_step_subtract[where x=p and y="p + 2^pte_bits"] + is_aligned_no_overflow + upto_enum_step_red[where us=pte_bits, simplified] + mapM_x_mapM liftM_def[symmetric]) + apply (rule corres_guard_imp, + rule_tac r'=dc and S="(=)" + and Q="\xs s. \x \ set xs. pte_at pt_t x s \ pspace_aligned s \ pspace_distinct s" + and Q'="\_. \" + in corres_mapM_list_all2, simp_all) + apply (rule corres_guard_imp, rule storePTE_corres) + apply (simp add:pte_relation_def)+ + apply (wp hoare_vcg_const_Ball_lift | simp)+ + apply (simp add: list_all2_refl) + apply (clarsimp simp: upto_enum_step_def pte_bits_def word_size_bits_def) + apply (erule page_table_pte_atI[simplified shiftl_t2n mult.commute bit_simps, simplified]) + apply (simp add: bit_simps word_less_nat_alt word_le_nat_alt unat_of_nat) + apply simp + done + +lemmas unmapPageTable_typ_ats[wp] = typ_at_lifts[OF unmapPageTable_typ_at'] + +lemma performPageTableInvocation_corres: + "page_table_invocation_map pti pti' \ + corres dc + (invs and valid_pti pti) (no_0_obj' and valid_pti' pti') + (perform_page_table_invocation pti) + (performPageTableInvocation pti')" + apply (rule corres_cross_over_guard [where Q="no_0_obj' and valid_pti' pti' and + pspace_aligned' and pspace_distinct'"]) + apply (fastforce intro!: pspace_aligned_cross pspace_distinct_cross) + apply (simp add: perform_page_table_invocation_def performPageTableInvocation_def + page_table_invocation_map_def) + apply (cases pti) + apply (rename_tac cap slot pte pte_slot) + apply (clarsimp simp: perform_pt_inv_map_def) + apply (rule corres_name_pre) + apply (clarsimp simp: valid_pti_def valid_pti'_def + split: arch_cap.splits capability.split_asm arch_capability.split_asm) + apply (rule corres_guard_imp) + apply (rule corres_split[OF updateCap_same_master]) + apply simp + apply (rule corres_split[OF storePTE_corres]) + apply assumption + apply (rule corres_machine_op, rule corres_Id; simp) + apply wpsimp+ + apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def + invs_valid_objs invs_psp_aligned invs_distinct) + apply (case_tac cap; simp add: is_cap_simps cap_master_cap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of valid_pti'_def) + apply (clarsimp simp: perform_pt_inv_unmap_def) + apply (rename_tac acap a b acap') + apply (rule_tac F="AARCH64_A.is_PageTableCap acap" in corres_req; clarsimp) + apply (clarsimp simp: valid_pti_def) + apply (clarsimp simp: AARCH64_A.is_PageTableCap_def split_def cong: option.case_cong) + apply (simp add: case_option_If2 split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule corres_if3) + apply (fastforce simp: acap_map_data_def mdata_map_def is_PageTableCap_def) + apply (rule corres_split[OF unmapPageTable_corres]) + apply (clarsimp simp: mdata_map_def) + apply (clarsimp simp: mdata_map_def) + apply (rule refl) + apply (simp (no_asm) add: p_assoc_help flip: mask_2pm1) + apply (corres corres: clear_page_table_corres) + apply wp+ + apply (rule corres_trivial, simp) + apply (simp add: liftM_def) + apply (rule corres_split[OF getSlotCap_corres[OF refl]]) + apply (rule_tac F="is_pt_cap x" in corres_gen_asm) + apply (rule updateCap_same_master) + apply (clarsimp simp: is_cap_simps update_map_data_def) + apply (wpsimp wp: get_cap_wp mapM_x_wp' hoare_vcg_all_lift hoare_vcg_imp_lift' + simp: wellformed_pte_def)+ + apply (clarsimp simp: valid_pti_def valid_arch_cap_def cte_wp_at_caps_of_state + invs_valid_objs invs_psp_aligned invs_distinct + cap_master_cap_simps is_cap_simps update_map_data_def + wellformed_mapdata_def) + apply (clarsimp simp: valid_pti'_def cte_wp_at_ctes_of) + done + +definition + "asid_pool_invocation_map ap \ case ap of + asid_pool_invocation.Assign asid p slot \ Assign (ucast asid) p (cte_map slot)" + +definition + "valid_apinv' ap \ + case ap of Assign asid p slot \ + cte_wp_at' (isArchCap isPageTableCap o cteCap) slot and K (0 < asid \ asid_wf asid)" + +definition + "valid_vcpuinv' vi \ case vi of + VCPUSetTCB v t \ vcpu_at' v and ex_nonz_cap_to' v and ex_nonz_cap_to' t + | VCPUInjectIRQ v n q \ \ + | VCPUReadRegister v rg \ \ + | VCPUWriteRegister v _ _ \ \ + | VCPUAckVPPI v _ \ \" + +lemma performASIDPoolInvocation_corres[corres]: + "\ ap' = asid_pool_invocation_map ap \ \ + corres dc + (valid_objs and pspace_aligned and pspace_distinct and valid_arch_state and valid_apinv ap) + (no_0_obj' and valid_apinv' ap') + (perform_asid_pool_invocation ap) + (performASIDPoolInvocation ap')" + apply (clarsimp simp: perform_asid_pool_invocation_def performASIDPoolInvocation_def) + apply (cases ap, simp add: asid_pool_invocation_map_def) + apply (corres corres: getSlotCap_corres corres_assert_gen_asm_l updateCap_same_master + simp: liftM_def store_asid_pool_entry_def + term_simp: cap.is_ArchObjectCap_def arch_cap.is_PageTableCap_def + update_map_data_def) + apply (fastforce simp: asid_pool_relation_def abs_asid_entry_def cap.is_ArchObjectCap_def + arch_cap.is_PageTableCap_def inv_def ucast_up_inj) + apply (wpsimp wp: set_cap_typ_at hoare_drop_imp get_cap_wp)+ + apply (clarsimp simp: valid_apinv_def cte_wp_at_caps_of_state cap_master_cap_simps is_cap_simps + arch_cap.is_PageTableCap_def is_vsroot_cap_def update_map_data_def in_omonad) + apply (drule (1) caps_of_state_valid_cap) + apply (simp add: valid_cap_def obj_at_def) + apply (clarsimp simp: valid_apinv'_def cte_wp_at_ctes_of) + apply (fastforce intro!: pspace_aligned_cross pspace_distinct_cross) + done + +crunches doMachineOp + for arch[wp]: "\s. P (ksArchState s)" + and irq_node'[wp]: "\s. P (irq_node' s)" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and cur'[wp]: "\s. P (ksCurThread s)" + and cteCaps_of[wp]: "\s. P (cteCaps_of s)" + and dmo_global_refs'[wp]: "\s. P (global_refs' s)" + and ksPSpace[wp]: "\s. P (ksPSpace s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + +crunches vcpuSave, vcpuDisable, vcpuEnable, vcpuRestore + for obj_at'_no_vcpu[wp]: "\s. P (obj_at' (P' :: ('a :: no_vcpu) \ bool) t s)" + (simp: crunch_simps wp: crunch_wps) + +lemma vcpuSwitch_obj_at'_no_vcpu[wp]: + "vcpuSwitch param_a \\s. P (obj_at' (P' :: ('a :: no_vcpu) \ bool) t s)\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma dmo_setVSpaceRoot_invs'[wp]: + "doMachineOp (setVSpaceRoot r a) \invs'\" + by (wp dmo_invs_lift') + +lemma dmo_setVSpaceRoot_irq_masks[wp]: + "doMachineOp (setVSpaceRoot r a) \\s. P (irq_masks (ksMachineState s))\" + unfolding doMachineOp_def + apply wpsimp + apply (drule use_valid, rule setVSpaceRoot_irq_masks; assumption) + done + +lemma dmo_setVSpaceRoot_memory[wp]: + "doMachineOp (setVSpaceRoot r a) \\s. P (underlying_memory (ksMachineState s))\" + unfolding doMachineOp_def + apply wpsimp + apply (drule use_valid, rule setVSpaceRoot_underlying_memory_inv; assumption) + done + +lemma dmo_setVSpaceRoot_invs_no_cicd'[wp]: + "doMachineOp (setVSpaceRoot r a) \invs_no_cicd'\" + by (wp dmo_invs_no_cicd_lift') + +lemma getObject_tcb_hyp_sym_refs: + "\\s. sym_refs (state_hyp_refs_of' s)\ getObject p + \\rv. case atcbVCPUPtr (tcbArch rv) of None \ \_. True + | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x\" + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: typ_at_tcb'[symmetric] typ_at'_def ko_wp_at'_def[of _ p] + split: option.splits) + apply (case_tac ko; simp) + apply (rename_tac tcb) + apply (rule_tac x=tcb in exI; rule conjI, clarsimp simp: obj_at'_def) + apply (clarsimp, rule context_conjI, clarsimp simp: obj_at'_def) + apply (drule ko_at_state_hyp_refs_ofD') + apply (simp add: hyp_refs_of'_def sym_refs_def) + apply (erule_tac x=p in allE, simp) + apply (drule state_hyp_refs_of'_elemD) + apply (clarsimp simp: hyp_refs_of_rev') + apply (simp add: ko_wp_at'_def, erule exE, + clarsimp simp: is_vcpu'_def hyp_live'_def arch_live'_def) + done + +lemma setASIDPool_valid_objs[wp]: + "setObject p (ap::asidpool) \valid_objs'\" + apply (wp setObject_valid_objs'[where P=\]) + apply (clarsimp simp: updateObject_default_def in_monad valid_obj'_def) + apply simp + done + +lemma setASIDPool_valid_mdb[wp]: + "setObject p (ap::asidpool) \valid_mdb'\" + by (simp add: valid_mdb'_def) wp + +lemma setASIDPool_nosch[wp]: + "setObject p (ap::asidpool) \\s. P (ksSchedulerAction s)\" + by (wp setObject_nosch updateObject_default_inv|simp)+ + +lemma setASIDPool_ksQ[wp]: + "setObject p (ap::asidpool) \\s. P (ksReadyQueues s)\" + by (wp setObject_qs updateObject_default_inv|simp)+ + +lemma setASIDPool_inQ[wp]: + "setObject ptr (ap::asidpool) \\s. P (obj_at' (inQ d p) t s)\" + apply (simp add: obj_at'_real_def) + apply (wpsimp wp: setObject_ko_wp_at simp: objBits_simps) + apply (simp add: pageBits_def) + apply simp + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + done + +lemma setASIDPool_qsL1[wp]: + "setObject p (ap::asidpool) \\s. P (ksReadyQueuesL1Bitmap s)\" + by (wp setObject_qs updateObject_default_inv|simp)+ + +lemma setASIDPool_qsL2[wp]: + "setObject p (ap::asidpool) \\s. P (ksReadyQueuesL2Bitmap s)\" + by (wp setObject_qs updateObject_default_inv|simp)+ + +lemma setASIDPool_tcb_obj_at'[wp]: + "\obj_at' (P::tcb \ bool) t\ setObject p (ap::asidpool) \\_. obj_at' P t\" + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma setASIDPool_state_refs'[wp]: + "setObject p (ap::asidpool) \\s. P (state_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def objBits_simps + in_magnitude_check state_refs_of'_def ps_clear_upd + elim!: rsubst[where P=P] del: ext intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (simp split: option.split) + done + +lemma setASIDPool_state_hyp_refs'[wp]: + "setObject p (ap::asidpool) \\s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def objBits_simps + in_magnitude_check state_hyp_refs_of'_def ps_clear_upd + elim!: rsubst[where P=P] del: ext intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (simp split: option.split) + done + +lemma setASIDPool_iflive[wp]: + "setObject p (ap::asidpool) \if_live_then_nonz_cap'\" + apply (rule hoare_pre) + apply (rule setObject_iflive' [where P=\], simp) + apply (simp add: objBits_simps) + apply (auto simp: updateObject_default_def in_monad bit_simps live'_def hyp_live'_def + arch_live'_def) + done + +lemma setASIDPool_ksInt[wp]: + "setObject p (ap::asidpool) \\s. P (ksInterruptState s)\" + by (wp setObject_ksInterrupt updateObject_default_inv|simp)+ + +lemma setASIDPool_ifunsafe[wp]: + "setObject p (ap::asidpool) \if_unsafe_then_cap'\" + apply (rule hoare_pre) + apply (rule setObject_ifunsafe' [where P=\], simp) + apply (auto simp: updateObject_default_def in_monad)[2] + apply wp + apply simp + done + +lemma setASIDPool_it'[wp]: + "setObject p (ap::asidpool) \\s. P (ksIdleThread s)\" + by (wp setObject_it updateObject_default_inv|simp)+ + +lemma setASIDPool_pred_tcb_at'[wp]: + "setObject p (ap::asidpool) \pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma setASIDPool_idle[wp]: + "setObject p (ap::asidpool) \valid_idle'\" + unfolding valid_idle'_def + by (rule hoare_lift_Pf [where f="ksIdleThread"]; wp) + +lemma setASIDPool_irq_states'[wp]: + "setObject p (ap::asidpool) \valid_irq_states'\" + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF setObject_ksInterrupt]) + apply (simp, rule updateObject_default_inv) + apply (rule hoare_use_eq [where f=ksMachineState, OF setObject_ksMachine]) + apply (simp, rule updateObject_default_inv) + apply wp + apply assumption + done + +lemma setASIDPool_vms'[wp]: + "setObject p (ap::asidpool) \valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv + hoare_vcg_all_lift hoare_vcg_disj_lift | simp)+ + done + +lemma setASIDPool_ct_not_inQ[wp]: + "setObject p (ap::asidpool) \ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setObject_nosch]) + apply (simp add: updateObject_default_def | wp)+ + apply (rule hoare_weaken_pre) + apply (wps setObject_ASID_ct) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad)+ + done + +lemma setObject_asidpool_cur_domain[wp]: + "setObject p (ap::asidpool) \\s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_asidpool_ksDomSchedule[wp]: + "setObject p (ap::asidpool) \\s. P (ksDomSchedule s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_asidpool_tcb_in_cur_domain'[wp]: + "setObject p (ap::asidpool) \tcb_in_cur_domain' t\" + by (wp tcb_in_cur_domain'_lift) + +lemma setObject_asidpool_ct_idle_or_in_cur_domain'[wp]: + "setObject p (ap::asidpool) \ct_idle_or_in_cur_domain'\" + by (wp hoare_vcg_disj_lift ct_idle_or_in_cur_domain'_lift) + +lemma setObject_ap_ksDomScheduleIdx[wp]: + "setObject p (ap::asidpool) \\s. P (ksDomScheduleIdx s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_ap_tcbs_of'[wp]: + "setObject p (ap::asidpool) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setASIDPool_invs[wp]: + "setObject p (ap::asidpool) \invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift + valid_irq_node_lift + cur_tcb_lift valid_irq_handlers_lift'' + untyped_ranges_zero_lift + updateObject_default_inv valid_bitmaps_lift + | simp add: cteCaps_of_def + | rule setObject_ksPSpace_only)+ + apply (clarsimp simp: o_def) + done + +lemma doMachineOp_invalidateTranslationASID_invs'[wp]: + "doMachineOp (invalidateTranslationASID vmid) \invs'\" + unfolding invalidateTranslationASID_def + by (wp dmo_machine_op_lift_invs') + +lemma invs'_vmid_strg: + "\ invs' s; 0 < asid \ \ + invs' (s\ksArchState := armKSVMIDTable_update + (\_. (armKSVMIDTable (ksArchState s))(vmid \ asid)) + (ksArchState s)\)" + by (auto simp: invs'_def valid_state'_def valid_global_refs'_def global_refs'_def + valid_machine_state'_def valid_arch_state'_vmid_Some_upd) + +crunches updateASIDPoolEntry + for invs'[wp]: invs' + (wp: getASID_wp crunch_wps) + +lemma storeVMID_invs'[wp]: + "\invs' and K (0 < asid)\ storeVMID asid vmid \\_. invs'\" + unfolding storeVMID_def + by (wpsimp simp_del: fun_upd_apply | strengthen invs'_vmid_strg)+ + +lemma invs'_vmid_None_upd: + "invs' s \ + invs' (s\ksArchState := armKSVMIDTable_update + (\_ a. if a = vmid then None else armKSVMIDTable (ksArchState s) a) + (ksArchState s)\)" + by (clarsimp simp: invs'_def valid_state'_def valid_global_refs'_def global_refs'_def + valid_machine_state'_def valid_arch_state'_def ran_def + cong: option.case_cong) + +crunches getVMID, armContextSwitch, setGlobalUserVSpace + for invs'[wp]: invs' + (ignore: doMachineOp wp: getASID_wp crunch_wps simp: invs'_vmid_None_upd) + +lemma setVMRoot_invs'[wp]: + "setVMRoot p \invs'\" + unfolding setVMRoot_def getThreadVSpaceRoot_def locateSlotTCB_def locateSlotBasic_def + by (wpsimp wp: whenE_wp findVSpaceForASID_vs_at_wp hoare_vcg_ex_lift + hoare_vcg_all_lift getSlotCap_actual_wp + simp: word_neq_0_conv) + +lemma setASIDPool_invs_no_cicd'[wp]: + "setObject p (ap::asidpool) \invs_no_cicd'\" + apply (simp add: invs_no_cicd'_def valid_state'_def valid_pspace'_def) + apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift + valid_irq_node_lift + cur_tcb_lift valid_irq_handlers_lift'' + untyped_ranges_zero_lift + updateObject_default_inv valid_bitmaps_lift + | simp add: cteCaps_of_def + | rule setObject_ksPSpace_only)+ + apply (clarsimp simp: o_def) + done + +lemma invalidateTranslationASID_invs_no_cicd'[wp]: + "doMachineOp (invalidateTranslationASID asid) \invs_no_cicd'\" + by (wp dmo_invs_no_cicd_lift') + +crunches updateASIDPoolEntry + for invs_no_cicd'[wp]: invs_no_cicd' + (wp: getASID_wp crunch_wps) + +lemma invs_no_cicd'_vmid_strg: + "\ invs_no_cicd' s; 0 < asid \ \ + invs_no_cicd' (s\ksArchState := armKSVMIDTable_update + (\_. (armKSVMIDTable (ksArchState s))(vmid \ asid)) + (ksArchState s)\)" + by (auto simp: invs_no_cicd'_def valid_state'_def valid_global_refs'_def global_refs'_def + valid_machine_state'_def valid_arch_state'_vmid_Some_upd) + +lemma storeVMID_invs_no_cicd'[wp]: + "\invs_no_cicd' and K (0 < asid)\ storeVMID asid vmid \\_. invs_no_cicd'\" + unfolding storeVMID_def + by (wpsimp simp_del: fun_upd_apply | strengthen invs_no_cicd'_vmid_strg)+ + +lemma invs_no_cicd'_vmid_None_upd: + "invs_no_cicd' s \ + invs_no_cicd' (s\ksArchState := armKSVMIDTable_update + (\_ a. if a = vmid then None else armKSVMIDTable (ksArchState s) a) + (ksArchState s)\)" + by (clarsimp simp: invs_no_cicd'_def valid_state'_def valid_global_refs'_def global_refs'_def + valid_machine_state'_def valid_arch_state'_def ran_def + cong: option.case_cong) + +crunches getVMID, armContextSwitch, setGlobalUserVSpace + for invs_no_cicd'[wp]: "invs_no_cicd'" + (ignore: doMachineOp wp: getASID_wp crunch_wps simp: invs_no_cicd'_vmid_None_upd) + +lemma setVMRoot_invs_no_cicd': + "setVMRoot p \invs_no_cicd'\" + unfolding setVMRoot_def getThreadVSpaceRoot_def locateSlotTCB_def locateSlotBasic_def + by (wpsimp wp: whenE_wp findVSpaceForASID_vs_at_wp hoare_vcg_ex_lift getSlotCap_actual_wp + hoare_vcg_all_lift + simp: word_neq_0_conv) + +crunch nosch [wp]: setVMRoot "\s. P (ksSchedulerAction s)" + (wp: crunch_wps getObject_inv setObject_nosch simp: crunch_simps loadObject_default_def updateObject_default_def) + +crunch it' [wp]: deleteASIDPool "\s. P (ksIdleThread s)" + (simp: crunch_simps loadObject_default_def updateObject_default_def wp: getObject_inv mapM_wp' crunch_wps) + +crunch it' [wp]: storePTE "\s. P (ksIdleThread s)" + (simp: crunch_simps updateObject_default_def wp: setObject_idle') + +crunch it' [wp]: deleteASID "\s. P (ksIdleThread s)" + (simp: crunch_simps loadObject_default_def updateObject_default_def + wp: getObject_inv) + +crunch typ_at' [wp]: performPageTableInvocation "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +crunch typ_at' [wp]: performPageInvocation "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: crunch_simps) + +lemma performASIDPoolInvocation_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ performASIDPoolInvocation api \\_ s. P (typ_at' T p s)\" + by (wpsimp simp: performASIDPoolInvocation_def + wp: getASID_wp hoare_vcg_imp_lift[where P'=\, simplified]) + +lemmas performPageTableInvocation_typ_ats' [wp] = + typ_at_lifts [OF performPageTableInvocation_typ_at'] + +lemmas performPageInvocation_typ_ats' [wp] = + typ_at_lifts [OF performPageInvocation_typ_at'] + +lemmas performASIDPoolInvocation_typ_ats' [wp] = + typ_at_lifts [OF performASIDPoolInvocation_typ_at'] + +lemma storePTE_pred_tcb_at' [wp]: + "storePTE p pte \pred_tcb_at' proj P t\" + apply (simp add: storePTE_def pred_tcb_at'_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma storePTE_valid_mdb [wp]: + "\valid_mdb'\ storePTE p pte \\rv. valid_mdb'\" + by (simp add: valid_mdb'_def) wp + +crunch nosch [wp]: storePTE "\s. P (ksSchedulerAction s)" + (simp: updateObject_default_def ignore_del: setObject) + +crunch ksQ [wp]: storePTE "\s. P (ksReadyQueues s)" + (simp: updateObject_default_def) + +lemma storePTE_inQ[wp]: + "\\s. P (obj_at' (inQ d p) t s)\ storePTE ptr pte \\rv s. P (obj_at' (inQ d p) t s)\" + apply (simp add: obj_at'_real_def storePTE_def) + apply (wp setObject_ko_wp_at | simp add: objBits_simps)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + done + +crunch norqL1[wp]: storePTE "\s. P (ksReadyQueuesL1Bitmap s)" + (simp: updateObject_default_def) + +crunch norqL2[wp]: storePTE "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: updateObject_default_def) + +lemma storePTE_iflive [wp]: + "\if_live_then_nonz_cap'\ storePTE p pte \\rv. if_live_then_nonz_cap'\" + apply (simp add: storePTE_def) + apply (rule hoare_pre) + apply (rule setObject_iflive' [where P=\], simp) + apply (simp add: objBits_simps) + apply (auto simp: updateObject_default_def in_monad live'_def hyp_live'_def arch_live'_def) + done + +lemma setObject_pte_ksInt [wp]: + "\\s. P (ksInterruptState s)\ setObject p (pte::pte) \\_. \s. P (ksInterruptState s)\" + by (wp setObject_ksInterrupt updateObject_default_inv|simp)+ + +crunch ksInterruptState [wp]: storePTE "\s. P (ksInterruptState s)" + +lemma storePTE_ifunsafe [wp]: + "\if_unsafe_then_cap'\ storePTE p pte \\rv. if_unsafe_then_cap'\" + apply (simp add: storePTE_def) + apply (rule hoare_pre) + apply (rule setObject_ifunsafe' [where P=\], simp) + apply (auto simp: updateObject_default_def in_monad)[2] + apply wp + apply simp + done + +method valid_idle'_setObject uses simp = + simp add: valid_idle'_def, rule hoare_lift_Pf [where f="ksIdleThread"]; wpsimp?; + (wpsimp wp: obj_at_setObject2[where P="idle_tcb'", simplified] hoare_drop_imp + simp: simp + | clarsimp dest!: updateObject_default_result)+ + + +lemma storePTE_idle [wp]: + "\valid_idle'\ storePTE p pte \\rv. valid_idle'\" by (valid_idle'_setObject simp: storePTE_def) + +crunch arch' [wp]: storePTE "\s. P (ksArchState s)" + +crunch cur' [wp]: storePTE "\s. P (ksCurThread s)" + +lemma storePTE_irq_states' [wp]: + "\valid_irq_states'\ storePTE pte p \\_. valid_irq_states'\" + apply (simp add: storePTE_def) + apply (wpsimp wp: valid_irq_states_lift' dmo_lift' no_irq_storeWord setObject_ksMachine + updateObject_default_inv) + done + +lemma storePTE_vms'[wp]: + "\valid_machine_state'\ storePTE p pte \\_. valid_machine_state'\" + apply (simp add: storePTE_def valid_machine_state'_def pointerInUserData_def + pointerInDeviceData_def) + apply (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv + hoare_vcg_all_lift hoare_vcg_disj_lift | simp)+ + done + +crunch pspace_domain_valid[wp]: storePTE "pspace_domain_valid" + +lemma storePTE_ct_not_inQ[wp]: + "\ct_not_inQ\ storePTE p pte \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF storePTE_nosch]) + apply (simp add: storePTE_def) + apply (wp_pre, wps) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad)+ + done + +lemma setObject_pte_cur_domain[wp]: + "\\s. P (ksCurDomain s)\ setObject t (v::pte) \\rv s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_pte_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ setObject t (v::pte) \\rv s. P (ksDomSchedule s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma storePTE_cur_domain[wp]: + "\\s. P (ksCurDomain s)\ storePTE p pte \\rv s. P (ksCurDomain s)\" + by (simp add: storePTE_def) wp + +lemma storePTE_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ storePTE p pte \\rv s. P (ksDomSchedule s)\" + by (simp add: storePTE_def) wp + +lemma storePTE_tcb_obj_at'[wp]: + "\obj_at' (P::tcb \ bool) t\ storePTE p pte \\_. obj_at' P t\" + apply (simp add: storePTE_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma storePTE_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ storePTE p pte \\_. tcb_in_cur_domain' t\" + by (wp tcb_in_cur_domain'_lift) + +lemma storePTE_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ storePTE p pte \\_. ct_idle_or_in_cur_domain'\" + by (wp ct_idle_or_in_cur_domain'_lift hoare_vcg_disj_lift) + +lemma setObject_pte_ksDomScheduleIdx [wp]: + "\\s. P (ksDomScheduleIdx s)\ setObject p (pte::pte) \\_. \s. P (ksDomScheduleIdx s)\" + by (wp updateObject_default_inv|simp add:setObject_def | wpc)+ + +crunches storePTE + for ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma storePTE_valid_objs[wp]: + "\valid_objs' and K (ppn_bounded pte)\ storePTE p pte \\_. valid_objs'\" + apply (simp add: storePTE_def doMachineOp_def split_def) + apply (rule hoare_pre, rule setObject_valid_objs'[where P="K (ppn_bounded pte)"]) + apply (clarsimp simp: updateObject_default_def in_monad valid_obj'_def) + apply simp + done + +lemma storePTE_ko_wp_vcpu_at'[wp]: + "storePTE p pde \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p' s)\" + apply (clarsimp simp: storePTE_def) + apply (wpsimp wp: hoare_drop_imps setObject_ko_wp_at simp: objBits_simps archObjSize_def) + apply (auto simp: bit_simps ko_wp_at'_def obj_at'_def is_vcpu'_def)+ + done + +lemma setObject_pte_tcb_of'[wp]: + "setObject slote (pte::pte) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +crunches storePTE + for tcbs_of'[wp]: "\s. P (tcbs_of' s)" + +lemma storePTE_invs[wp]: + "\invs' and K (ppn_bounded pte)\ storePTE p pte \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def + by (wpsimp wp: sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift valid_arch_state_lift' + valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' untyped_ranges_zero_lift + valid_bitmaps_lift + simp: cteCaps_of_def o_def) + +crunch cte_wp_at'[wp]: unmapPageTable "\s. P (cte_wp_at' P' p s)" + (wp: crunch_wps simp: crunch_simps) + +lemmas storePTE_Invalid_invs = storePTE_invs[where pte=InvalidPTE, simplified] + +crunches unmapPageTable, invalidateTLBByASIDVA + for invs'[wp]: "invs'" + (ignore: doMachineOp + wp: storePTE_Invalid_invs mapM_wp' crunch_wps dmo_invs_lift' + simp: crunch_simps if_apply_def2) + +lemma perform_pti_invs [wp]: + "\invs' and valid_pti' pti\ performPageTableInvocation pti \\_. invs'\" + apply (clarsimp simp: performPageTableInvocation_def getSlotCap_def valid_pti'_def + split: page_table_invocation.splits) + apply (intro conjI allI impI; + wpsimp wp: arch_update_updateCap_invs getCTE_wp' mapM_x_wp' + hoare_vcg_all_lift hoare_vcg_imp_lift' dmo_invs_lift') + apply (auto simp: cte_wp_at_ctes_of is_arch_update'_def isCap_simps valid_cap'_def capAligned_def) + done + +crunches unmapPage + for cte_wp_at': "\s. P (cte_wp_at' P' p s)" + (wp: crunch_wps lookupPTSlotFromLevel_inv simp: crunch_simps) + +lemmas unmapPage_typ_ats [wp] = typ_at_lifts [OF unmapPage_typ_at'] + +lemma unmapPage_invs' [wp]: + "unmapPage sz asid vptr pptr \invs'\" + unfolding unmapPage_def + by (wpsimp wp: lookupPTSlot_inv hoare_drop_imp hoare_vcg_all_lift dmo_invs_lift') + +lemma dmo_doFlush_invs'[wp]: + "doMachineOp (doFlush flushOp vstart vend pstart) \invs'\" + unfolding doFlush_def cleanCacheRange_RAM_def invalidateCacheRange_RAM_def branchFlushRange_def + cleanInvalidateCacheRange_RAM_def cleanCacheRange_PoU_def invalidateCacheRange_I_def + by (cases flushOp; wpsimp wp: dmo_machine_op_lift_invs' simp: doMachineOp_bind empty_fail_bind) + +lemma isPagePTE_eq: (* FIXME AARCH64: move up *) + "isPagePTE pte = (\base sm g xn d R. pte = PagePTE base sm g xn d R)" + by (simp add: isPagePTE_def split: pte.splits) + +lemma perform_page_invs [wp]: + "\invs' and valid_page_inv' pt\ performPageInvocation pt \\_. invs'\" + supply if_split[split del] + apply (simp add: performPageInvocation_def) + apply (cases pt) + (* FIXME AARCH64: clean up this proof, not clear why all, fwd_all or solve_emerging don't work *) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp dmo_invs_lift' + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp dmo_invs_lift' + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2 isPagePTE_eq) + prefer 2 + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp dmo_invs_lift' + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2) + apply (clarsimp simp: cte_wp_at_ctes_of valid_page_inv'_def is_arch_update'_def isCap_simps valid_cap'_def capAligned_def + split: option.splits)+ + done + +lemma setObject_cte_obj_at_ap': + shows + "\\s. P' (obj_at' (P :: asidpool \ bool) p s)\ + setObject c (cte::cte) + \\_ s. P' (obj_at' P p s)\" + apply (clarsimp simp: setObject_def in_monad split_def + valid_def lookupAround2_char1 + obj_at'_def ps_clear_upd + split del: if_split) + apply (clarsimp elim!: rsubst[where P=P']) + apply (clarsimp simp: updateObject_cte in_monad objBits_simps + tcbCTableSlot_def tcbVTableSlot_def + typeError_def + split: if_split_asm + Structures_H.kernel_object.split_asm) + done + +lemma updateCap_ko_at_ap_inv'[wp]: + "\\s. P (ko_at' (ko::asidpool) p s )\ updateCap a b \\rv s. P ( ko_at' ko p s)\" + by (wpsimp simp: updateCap_def setCTE_def wp: setObject_cte_obj_at_ap') + +lemma storePTE_asid_pool_obj_at'[wp]: + "storePTE p pte \\s. P (obj_at' (P'::asidpool \ bool) t s)\" + apply (simp add: storePTE_def) + apply (clarsimp simp: setObject_def in_monad split_def + valid_def lookupAround2_char1 + obj_at'_def ps_clear_upd + split del: if_split) + apply (clarsimp elim!: rsubst[where P=P]) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma perform_aci_invs [wp]: + "\invs' and valid_apinv' api\ performASIDPoolInvocation api \\_. invs'\" + apply (clarsimp simp: performASIDPoolInvocation_def split: asidpool_invocation.splits) + apply (wpsimp wp: arch_update_updateCap_invs getASID_wp getSlotCap_wp hoare_vcg_all_lift + hoare_vcg_imp_lift') + apply (clarsimp simp: valid_apinv'_def cte_wp_at_ctes_of) + apply (case_tac cte, clarsimp) + apply (drule ctes_of_valid_cap', fastforce) + apply (clarsimp simp: valid_cap'_def capAligned_def is_arch_update'_def isCap_simps + wellformed_mapdata'_def) + done + +end + +end diff --git a/proof/refine/AARCH64/orphanage/Orphanage.thy b/proof/refine/AARCH64/orphanage/Orphanage.thy new file mode 100644 index 0000000000..3d099b0357 --- /dev/null +++ b/proof/refine/AARCH64/orphanage/Orphanage.thy @@ -0,0 +1,1965 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Orphanage +imports Refine.Refine +begin + +text \ + Proof that calling the kernel never leaves threads orphaned. + More specifically, every active thread must be the current thread, + or about to be switched to, or be in a scheduling queue. +\ + +(*FIXME: arch_split: move up? *) +context Arch begin + +requalify_facts + switchToIdleThread_def + switchToThread_def + +lemmas [crunch_def] = switchToIdleThread_def switchToThread_def + +context begin global_naming global +requalify_facts + Thread_H.switchToIdleThread_def + Thread_H.switchToThread_def +end +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + is_active_thread_state :: "thread_state \ bool" +where + "is_active_thread_state ts \ + isRunning ts \ isRestart ts" + +definition + is_active_tcb_ptr :: "machine_word \ kernel_state \ bool" +where + "is_active_tcb_ptr tcb_ptr s \ + st_tcb_at' is_active_thread_state tcb_ptr s" + +lemma is_active_tcb_ptr_runnable': + "is_active_tcb_ptr t s = st_tcb_at' runnable' t s" + by (auto simp: is_active_tcb_ptr_def pred_tcb_at'_def obj_at'_def + is_active_thread_state_def isRunning_def isRestart_def + split: Structures_H.thread_state.split_asm) + +definition + all_active_tcb_ptrs :: "kernel_state \ machine_word set" +where + "all_active_tcb_ptrs s \ + { tcb_ptr. is_active_tcb_ptr tcb_ptr s }" + +definition + all_queued_tcb_ptrs :: "kernel_state \ machine_word set" +where + "all_queued_tcb_ptrs s \ { tcb_ptr. obj_at' tcbQueued tcb_ptr s }" + +lemma st_tcb_at_neg': + "(st_tcb_at' (\ ts. \ P ts) t s) = (tcb_at' t s \ \ st_tcb_at' P t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma st_tcb_at_neg2: + "(\ st_tcb_at' P t s) = (st_tcb_at' (\ ts. \ P ts) t s \ \ tcb_at' t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma st_tcb_at_double_neg': + "(st_tcb_at' (\ ts. \ P ts \ \ Q ts) t s) = + ((st_tcb_at' (\ ts. \ P ts) t s) \ (st_tcb_at' (\ ts. \ Q ts) t s))" + apply (auto simp: pred_tcb_at'_def obj_at'_def) + done + +definition + no_orphans :: " kernel_state \ bool" +where + "no_orphans s \ + \ tcb_ptr. + (tcb_ptr : all_active_tcb_ptrs s + \ + tcb_ptr = ksCurThread s \ tcb_ptr : all_queued_tcb_ptrs s \ + ksSchedulerAction s = SwitchToThread tcb_ptr)" + +lemma no_orphans_disj: + "no_orphans = (\ s. + \ tcb_ptr. tcb_ptr = ksCurThread s \ + tcb_ptr : all_queued_tcb_ptrs s \ + \ typ_at' TCBT tcb_ptr s \ + st_tcb_at' (\ state. \ is_active_thread_state state) tcb_ptr s \ + ksSchedulerAction s = SwitchToThread tcb_ptr)" + apply clarsimp + apply (rule ext) + apply (unfold no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def st_tcb_at_neg' typ_at_tcb') + apply (auto del: pred_tcb_at' intro: pred_tcb_at') + done + +lemma no_orphans_lift: + assumes typ_at'_is_lifted: + "\ tcb_ptr. \ \s. \ typ_at' TCBT tcb_ptr s\ f \ \_ s. \ typ_at' TCBT tcb_ptr s \" + assumes ksCurThread_is_lifted: + "\ tcb_ptr. \ \s. tcb_ptr = ksCurThread s \ f \ \_ s. tcb_ptr = ksCurThread s \" + assumes st_tcb_at'_is_lifted: + "\P p. \ \s. st_tcb_at' P p s\ f \ \_ s. st_tcb_at' P p s \" + assumes tcbQueued_is_lifted: + "\P tcb_ptr. f \ \s. obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s \" + assumes ksSchedulerAction_is_lifted: + "\P. \ \s. P (ksSchedulerAction s)\ f \ \_ s. P (ksSchedulerAction s) \" + shows + "\ \s. no_orphans s \ f \ \_ s. no_orphans s \" + apply (unfold no_orphans_disj + all_active_tcb_ptrs_def + all_queued_tcb_ptrs_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (rule ksCurThread_is_lifted) + apply (wp hoare_vcg_disj_lift) + apply (wpsimp wp: tcbQueued_is_lifted) + apply (wp hoare_vcg_disj_lift) + apply (rule typ_at'_is_lifted) + apply (wp hoare_vcg_disj_lift) + apply (rule st_tcb_at'_is_lifted) + apply (rule ksSchedulerAction_is_lifted) + apply simp + done + +lemma st_tcb_at'_is_active_tcb_ptr_lift: + assumes "\P P' t. \\s. P (st_tcb_at' P' t s)\ f \\rv s. P (st_tcb_at' P' t s)\" + shows "\\s. P (is_active_tcb_ptr t s)\ f \\_ s. P (is_active_tcb_ptr t s)\" + by (clarsimp simp: is_active_tcb_ptr_def) (rule assms) + +lemma st_tcb_at'_all_active_tcb_ptrs_lift: + assumes "\P P' t. \\s. P (st_tcb_at' P' t s)\ f \\rv s. P (st_tcb_at' P' t s)\" + shows "\\s. P (t \ all_active_tcb_ptrs s)\ f \\_ s. P (t \ all_active_tcb_ptrs s)\" + by (clarsimp simp: all_active_tcb_ptrs_def) + (rule st_tcb_at'_is_active_tcb_ptr_lift [OF assms]) + +lemma tcbQueued_all_queued_tcb_ptrs_lift: + assumes "\Q P tcb_ptr. f \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" + shows "\\s. P (t \ all_queued_tcb_ptrs s)\ f \\_ s. P (t \ all_queued_tcb_ptrs s)\" + apply (clarsimp simp: all_queued_tcb_ptrs_def) + apply (rule_tac P=P in P_bool_lift) + apply (wp hoare_vcg_ex_lift assms) + apply (wp hoare_vcg_all_lift assms) + done + +definition + almost_no_orphans :: "obj_ref \ kernel_state \ bool" +where + "almost_no_orphans tcb_ptr s \ + \ptr. ptr = tcb_ptr \ + (ptr : all_active_tcb_ptrs s + \ + ptr = ksCurThread s \ ptr : all_queued_tcb_ptrs s \ + ksSchedulerAction s = SwitchToThread ptr)" + +lemma no_orphans_strg_almost: + "no_orphans s \ almost_no_orphans tcb_ptr s" + unfolding no_orphans_def almost_no_orphans_def + apply simp + done + +lemma almost_no_orphans_disj: + "almost_no_orphans tcb_ptr = (\ s. + \ ptr. ptr = ksCurThread s \ + ptr : all_queued_tcb_ptrs s \ + \ typ_at' TCBT ptr s \ + st_tcb_at' (\ thread_state. \ is_active_thread_state thread_state) ptr s \ + ptr = tcb_ptr \ + ksSchedulerAction s = SwitchToThread ptr)" + apply clarsimp + apply (rule ext) + apply (unfold almost_no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def st_tcb_at_neg' typ_at_tcb') + apply (auto del: pred_tcb_at' intro: pred_tcb_at') + done + +lemma all_queued_tcb_ptrs_ksReadyQueues_update[simp]: + "tcb_ptr \ all_queued_tcb_ptrs (ksReadyQueues_update f s) = (tcb_ptr \ all_queued_tcb_ptrs s)" + unfolding all_queued_tcb_ptrs_def + by (clarsimp simp: obj_at'_def) + +lemma no_orphans_update_simps[simp]: + "no_orphans (gsCNodes_update f s) = no_orphans s" + "no_orphans (gsUserPages_update g s) = no_orphans s" + "no_orphans (gsUntypedZeroRanges_update h s) = no_orphans s" + by (simp_all add: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + +lemma no_orphans_ksReadyQueuesL1Bitmap_update[simp]: + "no_orphans (ksReadyQueuesL1Bitmap_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma no_orphans_ksReadyQueuesL2Bitmap_update[simp]: + "no_orphans (ksReadyQueuesL2Bitmap_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma no_orphans_ksIdle[simp]: + "no_orphans (ksIdleThread_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + apply auto + done + +lemma no_orphans_ksWorkUnits [simp]: + "no_orphans (ksWorkUnitsCompleted_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + apply auto + done + +lemma no_orphans_irq_state_independent[intro!, simp]: + "no_orphans (s \ksMachineState := ksMachineState s \ irq_state := f (irq_state (ksMachineState s)) \ \) + = no_orphans s" + by (simp add: no_orphans_def all_active_tcb_ptrs_def + all_queued_tcb_ptrs_def is_active_tcb_ptr_def) + +add_upd_simps "no_orphans (gsUntypedZeroRanges_update f s)" +declare upd_simps[simp] + +lemma almost_no_orphans_ksReadyQueuesL1Bitmap_update[simp]: + "almost_no_orphans t (ksReadyQueuesL1Bitmap_update f s) = almost_no_orphans t s" + unfolding almost_no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma almost_no_orphans_ksReadyQueuesL2Bitmap_update[simp]: + "almost_no_orphans t (ksReadyQueuesL2Bitmap_update f s) = almost_no_orphans t s" + unfolding almost_no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma all_active_tcb_ptrs_queue [simp]: + "all_active_tcb_ptrs (ksReadyQueues_update f s) = all_active_tcb_ptrs s" + by (clarsimp simp: all_active_tcb_ptrs_def is_active_tcb_ptr_def) + +(****************************************************************************************************) + +crunch no_orphans [wp]: addToBitmap "no_orphans" +crunch no_orphans [wp]: removeFromBitmap "no_orphans" + +crunch almost_no_orphans [wp]: addToBitmap "almost_no_orphans x" +crunch almost_no_orphans [wp]: removeFromBitmap "almost_no_orphans x" + +lemma setCTE_tcbQueued[wp]: + "setCTE ptr v \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) t s)\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +lemma setCTE_no_orphans [wp]: + "\ \s. no_orphans s \ + setCTE p cte + \ \rv s. no_orphans s \" + apply (rule no_orphans_lift) + apply (wp setCTE_typ_at' setCTE_pred_tcb_at')+ + done + +lemma setCTE_almost_no_orphans [wp]: + "\ \s. almost_no_orphans tcb_ptr s \ + setCTE p cte + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift setCTE_typ_at' setCTE_pred_tcb_at') + done + +crunch no_orphans [wp]: activateIdleThread "no_orphans" + +lemma asUser_no_orphans [wp]: + "\ \s. no_orphans s \ + asUser thread f + \ \rv s. no_orphans s \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +lemma threadSet_all_queued_tcb_ptrs: + "\tcb. tcbQueued (F tcb) = tcbQueued tcb \ threadSet F tptr \\s. P (t \ all_queued_tcb_ptrs s)\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 threadSet_wp) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: obj_at'_def ps_clear_upd objBits_simps) + done + +crunches removeFromBitmap, addToBitmap, setQueue + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: tcbQueued_all_queued_tcb_ptrs_lift) + +crunches tcbQueuePrepend, tcbQueueAppend + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: threadSet_all_queued_tcb_ptrs ignore: threadSet) + +lemma tcbQueued_update_True_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + threadSet (tcbQueued_update (\_. True)) tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: all_queued_tcb_ptrs_def obj_at'_def ps_clear_upd objBits_simps) + done + +lemma tcbSchedEnqueue_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr \ all_queued_tcb_ptrs s\ + tcbSchedEnqueue tcb_ptr' + \\_ s. tcb_ptr \ all_queued_tcb_ptrs s\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: hoare_vcg_imp_lift' threadGet_wp + | wpsimp wp: threadSet_all_queued_tcb_ptrs)+ + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def) + done + +lemmas tcbSchedEnqueue_all_queued_tcb_ptrs'[wp] = + tcbSchedEnqueue_all_queued_tcb_ptrs[simplified all_queued_tcb_ptrs_def, simplified] + +lemma tcbSchedAppend_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr \ all_queued_tcb_ptrs s\ + tcbSchedAppend tcb_ptr' + \\_ s. tcb_ptr \ all_queued_tcb_ptrs s\" + unfolding tcbSchedAppend_def tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_imp_lift' threadGet_wp + | wpsimp wp: threadSet_all_queued_tcb_ptrs)+ + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def) + done + +lemmas tcbSchedAppend_all_queued_tcb_ptrs'[wp] = + tcbSchedAppend_all_queued_tcb_ptrs[simplified all_queued_tcb_ptrs_def, simplified] + +lemma threadSet_no_orphans: + "\\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)); + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tptr \no_orphans\" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) + +lemma tcbQueued_update_True_no_orphans: + "\almost_no_orphans tptr and tcb_at' tptr\ + threadSet (tcbQueued_update (\_. True)) tptr + \\_. no_orphans\" + unfolding no_orphans_disj + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) + apply (fastforce simp: almost_no_orphans_def all_active_tcb_ptrs_def + tcb_at_typ_at' st_tcb_at_neg' is_active_tcb_ptr_def) + done + +lemma tcbQueued_update_True_almost_no_orphans: + "threadSet (tcbQueued_update (\_. True)) tptr' \almost_no_orphans tptr\" + unfolding almost_no_orphans_disj + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift threadSet_st_tcb_at2) + apply fastforce + done + +lemma threadSet_almost_no_orphans: + "\\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)); + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tptr \almost_no_orphans ptr\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) + +lemma setQueue_no_orphans[wp]: + "setQueue d prio qs \no_orphans\" + unfolding setQueue_def + apply wp + apply (clarsimp simp: no_orphans_def) + done + +lemma setQueue_almost_no_orphans[wp]: + "setQueue d prio qs \almost_no_orphans tptr\" + unfolding setQueue_def + apply wp + apply (clarsimp simp: almost_no_orphans_def) + done + +lemma tcbSchedEnqueue_no_orphans[wp]: + "tcbSchedEnqueue tcb_ptr \no_orphans\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_strg_almost) + done + +lemma tcbSchedAppend_no_orphans[wp]: + "tcbSchedAppend tcb_ptr \no_orphans\" + unfolding tcbSchedAppend_def tcbQueueAppend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_strg_almost) + done + +lemma tcbSchedEnqueue_almost_no_orphans: + "\almost_no_orphans tcb_ptr\ + tcbSchedEnqueue tcb_ptr + \\_. no_orphans\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_def almost_no_orphans_def all_queued_tcb_ptrs_def obj_at'_def) + done + +lemma tcbSchedEnqueue_almost_no_orphans_lift: + "tcbSchedEnqueue tcb_ptr \almost_no_orphans ptr\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + by (wpsimp wp: tcbQueued_update_True_almost_no_orphans threadSet_almost_no_orphans) + +lemma ssa_no_orphans: + "\ \s. no_orphans s \ + (\t. sch_act_not t s \ t : all_queued_tcb_ptrs s \ ksCurThread s = t) \ + setSchedulerAction sa + \ \rv s. no_orphans s \" + unfolding setSchedulerAction_def no_orphans_disj all_queued_tcb_ptrs_def + apply wp + apply auto + done + +lemma ssa_almost_no_orphans: + "\ \s. almost_no_orphans tcb_ptr s \ + (\t. sch_act_not t s \ t : all_queued_tcb_ptrs s \ ksCurThread s = t) \ + setSchedulerAction (SwitchToThread tcb_ptr) + \ \rv s. no_orphans s \" + unfolding setSchedulerAction_def no_orphans_disj almost_no_orphans_disj all_queued_tcb_ptrs_def + apply wp + apply auto + done + +lemma ssa_almost_no_orphans_lift [wp]: + "\ \s. almost_no_orphans tcb_ptr s \ + (\t. sch_act_not t s \ t : all_queued_tcb_ptrs s \ ksCurThread s = t) \ + setSchedulerAction sa + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding setSchedulerAction_def almost_no_orphans_disj all_queued_tcb_ptrs_def + apply wp + apply auto + done + +lemma rescheduleRequired_no_orphans [wp]: + "rescheduleRequired \no_orphans\" + unfolding rescheduleRequired_def + by (wpsimp wp: ssa_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift | wpc)+ + +lemma rescheduleRequired_almost_no_orphans [wp]: + "rescheduleRequired \almost_no_orphans tcb_ptr\" + unfolding rescheduleRequired_def + by (wpsimp wp: ssa_almost_no_orphans_lift hoare_vcg_all_lift tcbSchedEnqueue_almost_no_orphans_lift + hoare_vcg_imp_lift' hoare_vcg_disj_lift) + +lemma setThreadState_current_no_orphans: + "\\s. no_orphans s \ ksCurThread s = tcb_ptr\ + setThreadState state tcb_ptr + \\_. no_orphans\" + unfolding setThreadState_def + apply wpsimp + unfolding no_orphans_disj + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ + done + +lemma setThreadState_isRestart_no_orphans: + "\no_orphans and st_tcb_at' isRestart tcb_ptr\ + setThreadState state tcb_ptr + \\_ . no_orphans\" + unfolding setThreadState_def + apply wpsimp + unfolding no_orphans_disj + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ + apply (auto simp: is_active_thread_state_def st_tcb_at_double_neg' st_tcb_at_neg') + done + +lemma setThreadState_almost_no_orphans [wp]: + "\no_orphans\ setThreadState state tcb_ptr \\_. almost_no_orphans tcb_ptr\" + unfolding setThreadState_def + apply wpsimp + apply (unfold no_orphans_disj almost_no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ + done + +lemma setThreadState_not_active_no_orphans: + "\ is_active_thread_state state \ setThreadState state tcb_ptr \no_orphans\" + unfolding setThreadState_def + apply wpsimp + apply (unfold no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ + done + +lemma setThreadState_not_active_almost_no_orphans: + "\ is_active_thread_state state \ setThreadState state tcb_ptr \almost_no_orphans thread\" + unfolding setThreadState_def + apply wpsimp + apply (unfold almost_no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ + done + +lemma activateThread_no_orphans [wp]: + "\ \s. no_orphans s \ ct_in_state' activatable' s \ invs' s \ + activateThread + \ \rv s. no_orphans s \" + unfolding activateThread_def + apply (wp gts_wp' setThreadState_isRestart_no_orphans | wpc | clarsimp)+ + apply (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def isRestart_def) + done + +crunches removeFromBitmap, tcbQueueRemove, setQueue + for almost_no_orphans[wp]: "almost_no_orphans thread" + and no_orphans[wp]: no_orphans + and all_queued_tcb_ptrs[wp]: "\s. tcb_ptr \ all_queued_tcb_ptrs s" + (wp: crunch_wps) + +lemma tcbQueued_update_False_all_queued_tcb_ptrs: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + threadSet (tcbQueued_update (\_. False)) tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def ps_clear_upd) + done + +lemma tcbSchedDequeue_all_queued_tcb_ptrs_other: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + tcbSchedDequeue tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + unfolding tcbSchedDequeue_def + by (wpsimp wp: tcbQueued_update_False_all_queued_tcb_ptrs threadGet_wp) + +lemma tcbQueued_update_False_almost_no_orphans: + "\no_orphans\ + threadSet (tcbQueued_update (\_. False)) tptr + \\_. almost_no_orphans tptr\" + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: no_orphans_def almost_no_orphans_def) + apply (rename_tac tcb_ptr) + apply (case_tac "tcb_ptr = tptr") + apply fastforce + apply (fastforce simp: all_queued_tcb_ptrs_def obj_at'_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def st_tcb_at'_def ps_clear_upd) + done + +lemma tcbSchedDequeue_almost_no_orphans [wp]: + "\no_orphans\ tcbSchedDequeue thread \\_. almost_no_orphans thread\" + unfolding tcbSchedDequeue_def + apply (wpsimp wp: tcbQueued_update_False_almost_no_orphans threadGet_wp) + apply (simp add: no_orphans_strg_almost) + done + +lemma tcbSchedDequeue_no_orphans[wp]: + "\\s. no_orphans s \ \ is_active_tcb_ptr tcbPtr s \ tcb_at' tcbPtr s\ + tcbSchedDequeue tcbPtr + \\_. no_orphans\" + supply disj_not1[simp del] + unfolding no_orphans_disj almost_no_orphans_disj + apply (rule hoare_allI) + apply (rename_tac tcb_ptr) + apply (case_tac "tcb_ptr = tcbPtr") + apply (rule_tac Q="\_ s. st_tcb_at' (\state. \ is_active_thread_state state) tcbPtr s" + in hoare_post_imp) + apply fastforce + apply wpsimp + apply (clarsimp simp: st_tcb_at'_def obj_at'_def is_active_tcb_ptr_def disj_not1) + apply (wpsimp wp: tcbQueued_update_False_all_queued_tcb_ptrs hoare_vcg_disj_lift + simp: tcbSchedDequeue_def) + done + +lemma switchToIdleThread_no_orphans' [wp]: + "\\s. no_orphans s + \ (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s)\ + switchToIdleThread + \\_. no_orphans\" + apply (clarsimp simp: switchToIdleThread_def setCurThread_def AARCH64_H.switchToIdleThread_def) + apply (simp add: no_orphans_disj all_queued_tcb_ptrs_def) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_drop_imp[where R="\_. idleThreadNotQueued"] hoare_vcg_imp_lift') + apply (force simp: is_active_tcb_ptr_def st_tcb_at_neg' typ_at_tcb') + done + +crunches getVMID, Arch.switchToThread + for ksCurThread[wp]: "\ s. P (ksCurThread s)" + (wp: crunch_wps getObject_inv loadObject_default_inv findVSpaceForASID_vs_at_wp + simp: getThreadVSpaceRoot_def if_distribR) + +crunches updateASIDPoolEntry, Arch.switchToThread + for no_orphans[wp]: "no_orphans" + (wp: no_orphans_lift crunch_wps) + +lemma all_queued_tcb_ptrs_machine_state[simp]: + "all_queued_tcb_ptrs (s\ksMachineState := m\) = all_queued_tcb_ptrs s" + by (simp add: all_queued_tcb_ptrs_def) + +lemma all_queued_tcb_ptrs_arch_state[simp]: + "all_queued_tcb_ptrs (s\ksArchState := as\) = all_queued_tcb_ptrs s" + by (simp add: all_queued_tcb_ptrs_def) + +lemma setObject_vcpu_all_queued_tcb_ptrs[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (t \ all_queued_tcb_ptrs s)\" + apply (simp add: all_queued_tcb_ptrs_def) + apply (rule setObject_vcpu_obj_at'_no_vcpu) + done + +lemma setASID_all_queued_tcb_ptrs[wp]: + "setObject ptr (ap::asidpool) \\s. P (t \ all_queued_tcb_ptrs s)\" + apply (simp add: all_queued_tcb_ptrs_def obj_at'_real_def) + apply (wpsimp wp: setObject_ko_wp_at simp: objBits_simps) + apply (simp add: pageBits_def) + apply simp + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + done + +crunches Arch.switchToThread + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: getASID_wp crunch_wps simp: crunch_simps) + +crunch ksSchedulerAction [wp]: "Arch.switchToThread" "\s. P (ksSchedulerAction s)" + +lemma setCurThread_no_orphans [wp]: + "\ \s. no_orphans s \ + (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s : all_queued_tcb_ptrs s) \ + setCurThread newThread + \ \rv s. no_orphans s \" + unfolding setCurThread_def + apply (wp | clarsimp)+ + apply (unfold no_orphans_def all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def is_active_tcb_ptr_def) + apply auto + done + +lemma tcbSchedDequeue_all_active_tcb_ptrs[wp]: + "\\s. P (t' \ all_active_tcb_ptrs s)\ tcbSchedDequeue t \\_ s. P (t' \ all_active_tcb_ptrs s)\" + by (clarsimp simp: all_active_tcb_ptrs_def is_active_tcb_ptr_def) wp + +lemma setCurThread_almost_no_orphans: + "\\s. almost_no_orphans t s \ + (ksCurThread s \ t \ + ksCurThread s \ all_active_tcb_ptrs s \ + ksCurThread s \ all_queued_tcb_ptrs s)\ + setCurThread t \\_. no_orphans\" + unfolding setCurThread_def + apply wp + apply (fastforce simp: almost_no_orphans_def + no_orphans_def + all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def + is_active_tcb_ptr_def) + done + +lemmas ArchThreadDecls_H_switchToThread_all_active_tcb_ptrs[wp] = + st_tcb_at'_all_active_tcb_ptrs_lift [OF Arch_switchToThread_pred_tcb'] + +lemma ThreadDecls_H_switchToThread_no_orphans: + "\ \s. no_orphans s \ + st_tcb_at' runnable' tcb_ptr s \ + (ksCurThread s \ all_active_tcb_ptrs s + \ ksCurThread s \ all_queued_tcb_ptrs s)\ + ThreadDecls_H.switchToThread tcb_ptr + \ \rv s. no_orphans s \" + unfolding Thread_H.switchToThread_def + by (wpsimp wp: setCurThread_almost_no_orphans hoare_vcg_imp_lift' + tcbSchedDequeue_all_queued_tcb_ptrs_other + | wps)+ + +lemma findM_failure': + "\ \x S. \ \s. P S s \ f x \ \rv s. \ rv \ P (insert x S) s \ \ \ + \ \s. P S s \ findM f xs \ \rv s. rv = None \ P (S \ set xs) s \" + apply (induct xs arbitrary: S) + apply (clarsimp, wp, clarsimp) + apply clarsimp + apply (rule bind_wp_fwd, assumption) + apply (case_tac r) + apply (clarsimp, wp, clarsimp) + apply clarsimp + apply (rule hoare_strengthen_post, assumption) + apply clarsimp + done + +lemmas findM_failure = findM_failure'[where S="{}", simplified] + +lemma findM_on_success: + "\ \x. \ P x \ f x \ \rv s. rv \; \x y. \ P x \ f y \ \rv. P x \ \ \ + \ \s. \x \ set xs. P x s \ findM f xs \ \rv s. \ y. rv = Some y \" + apply (induct xs; clarsimp) + apply wp+ + apply (clarsimp simp: imp_conv_disj Bex_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift | clarsimp | assumption)+ + done + +crunch st_tcb' [wp]: switchToThread "\s. P' (st_tcb_at' P t s)" + +lemmas switchToThread_all_active_tcb_ptrs[wp] = + st_tcb_at'_all_active_tcb_ptrs_lift [OF switchToThread_st_tcb'] + +(* ksSchedulerAction s = ChooseNewThread *) +lemma chooseThread_no_orphans [wp]: + "\\s. no_orphans s \ all_invs_but_ct_idle_or_in_cur_domain' s + \ (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s)\ + chooseThread + \\_. no_orphans\" + (is "\?PRE\ _ \_\") + unfolding chooseThread_def Let_def + supply if_split[split del] + apply (simp only: return_bind, simp) + apply (intro bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp[where Q'="\rv s. ?PRE s \ ksReadyQueues_asrt s \ ready_qs_runnable s + \ rv = ksCurDomain s"]) + apply (rule_tac Q'="\rv s. ?PRE s \ ksReadyQueues_asrt s \ ready_qs_runnable s + \ curdom = ksCurDomain s \ rv = ksReadyQueuesL1Bitmap s curdom" + in bind_wp) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply (simp, wp, simp) + (* we have a thread to switch to *) + apply (wp assert_inv ThreadDecls_H_switchToThread_no_orphans) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def st_tcb_at'_def) + apply (fastforce dest!: lookupBitmapPriority_obj_at' elim: obj_at'_weaken + simp: all_active_tcb_ptrs_def) + apply (wpsimp simp: bitmap_fun_defs) + apply (wp curDomain_or_return_0[simplified]) + apply (wpsimp simp: curDomain_def simp: invs_no_cicd_ksCurDomain_maxDomain')+ + done + +lemma hoare_neg_imps: + "\P\ f \\ rv s. \ R rv s\ \ \P\ f \\r s. R r s \ Q r s\" + by (auto simp: valid_def) + +lemma setCurThread_ct [wp]: + "\ \ \ + setCurThread tcb_ptr + \ \rv s. ksCurThread s = tcb_ptr \" + unfolding setCurThread_def + apply (wp | clarsimp)+ + done + +lemma ThreadDecls_H_switchToThread_ct [wp]: + "\ \ \ + switchToThread tcb_ptr + \ \rv s. ksCurThread s = tcb_ptr \" + unfolding switchToThread_def + apply (wp | clarsimp)+ + done + +crunch no_orphans [wp]: nextDomain no_orphans +(wp: no_orphans_lift simp: Let_def) + +crunch tcbQueued[wp]: nextDomain "\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)" +(simp: Let_def) + +crunch st_tcb_at' [wp]: nextDomain "\s. P (st_tcb_at' P' p s)" +(simp: Let_def) + +crunch ct' [wp]: nextDomain "\s. P (ksCurThread s)" +(simp: Let_def) + +crunch sch_act_not [wp]: nextDomain "sch_act_not t" +(simp: Let_def) + +lemma all_invs_but_ct_idle_or_in_cur_domain'_strg: + "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" + by (clarsimp simp: invs'_to_invs_no_cicd'_def) + +lemma setSchedulerAction_cnt_sch_act_not[wp]: + "\ \ \ setSchedulerAction ChooseNewThread \\rv s. sch_act_not x s\" + by (rule hoare_pre, rule hoare_strengthen_post[OF setSchedulerAction_direct]) auto + +crunches setSchedulerAction + for pred_tcb_at': "\s. P (pred_tcb_at' proj Q t s)" + and ct': "\s. P (ksCurThread s)" + (wp_del: ssa_wp) + +lemmas ssa_st_tcb_at'_ksCurThread[wp] = + hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_pred_tcb_at' setSchedulerAction_ct'] + +lemma ct_active_st_tcb_at': + "ct_active' s = st_tcb_at' runnable' (ksCurThread s) s" + apply (rule iffI) + apply (drule ct_active_runnable') + apply (simp add: ct_in_state'_def) + apply (clarsimp simp: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply (case_tac st, auto) + done + +(* FIXME move *) +lemma invs_switchToThread_runnable': + "\ invs' s ; ksSchedulerAction s = SwitchToThread t \ \ st_tcb_at' runnable' t s" + by (simp add: invs'_def valid_state'_def) + +(* for shoving pred_tcb_at' through hoare_vcg_imp_lift for tcbs we know are there *) +lemma not_pred_tcb_at'I: + "\ pred_tcb_at' f (Not \ P) t s ; tcb_at' t s \ \ \ pred_tcb_at' f P t s" + by (subst (asm) pred_tcb_at'_Not, blast) + +lemma in_all_active_tcb_ptrsD: + "t \ all_active_tcb_ptrs s \ st_tcb_at' runnable' t s" + unfolding all_active_tcb_ptrs_def is_active_tcb_ptr_def + is_active_thread_state_def isRunning_def isRestart_def + apply clarsimp + apply (erule pred_tcb'_weakenE) + apply (case_tac st; clarsimp) + done + +lemma chooseThread_nosch: + "\\s. P (ksSchedulerAction s)\ + chooseThread + \\rv s. P (ksSchedulerAction s)\" + unfolding chooseThread_def Let_def curDomain_def + supply if_split[split del] + apply (simp only: return_bind, simp) + apply (wp findM_inv | simp)+ + apply (case_tac queue) + apply (wp stt_nosch | simp add: curDomain_def bitmap_fun_defs)+ + done + +lemma scheduleChooseNewThread_no_orphans: + "\invs' and no_orphans + and (\s. ksSchedulerAction s = ChooseNewThread + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s))\ + scheduleChooseNewThread + \\_. no_orphans\" + unfolding scheduleChooseNewThread_def + apply (wp add: ssa_no_orphans hoare_vcg_all_lift) + apply (wp hoare_disjI1 chooseThread_nosch)+ + apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift + hoare_lift_Pf2 [OF tcbQueued_all_queued_tcb_ptrs_lift[OF nextDomain_tcbQueued] + nextDomain_ct'] + hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift[OF nextDomain_st_tcb_at'] + nextDomain_ct'] + hoare_vcg_all_lift getDomainTime_wp)[2] + apply (wpsimp simp: if_apply_def2 invs'_invs_no_cicd all_queued_tcb_ptrs_def + is_active_tcb_ptr_runnable')+ + done + +lemma setSchedulerAction_tcbQueued[wp]: + "setSchedulerAction sa \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" + by wpsimp + +lemma schedule_no_orphans[wp]: + notes ssa_wp[wp del] + shows + "\no_orphans and invs'\ schedule \\_. no_orphans\" +proof - + + have do_switch_to: + "\candidate. + \\s. no_orphans s \ ksSchedulerAction s = SwitchToThread candidate + \ st_tcb_at' runnable' candidate s + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s) \ + do ThreadDecls_H.switchToThread candidate; + setSchedulerAction ResumeCurrentThread + od + \\_. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans + hoare_vcg_all_lift ThreadDecls_H_switchToThread_no_orphans)+ + apply (rule_tac Q="\_ s. (t = candidate \ ksCurThread s = candidate) \ + (t \ candidate \ sch_act_not t s)" + in hoare_post_imp) + apply (wpsimp wp: stt_nosch hoare_weak_lift_imp)+ + apply (fastforce dest!: in_all_active_tcb_ptrsD simp: all_queued_tcb_ptrs_def comp_def) + done + + have abort_switch_to_enq: + "\candidate. + \\s. no_orphans s \ invs' s + \ ksSchedulerAction s = SwitchToThread candidate + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s) \ + do tcbSchedEnqueue candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\_. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_tcbQueued])+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift + | strengthen not_pred_tcb_at'_strengthen + | rule hoare_lift_Pf2[where f=ksCurThread])+ + apply (simp add: st_tcb_at_neg' tcb_at_invs' all_queued_tcb_ptrs_def) + done + + have abort_switch_to_app: + "\candidate. + \\s. no_orphans s \ invs' s + \ ksSchedulerAction s = SwitchToThread candidate + \ (st_tcb_at' runnable' (ksCurThread s) s + \ ksCurThread s \ all_queued_tcb_ptrs s ) \ + do tcbSchedAppend candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\_. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_tcbQueued])+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift + | strengthen not_pred_tcb_at'_strengthen + | rule hoare_lift_Pf2[where f=ksCurThread])+ + apply (simp add: st_tcb_at_neg' tcb_at_invs' all_queued_tcb_ptrs_def) + done + + show ?thesis + supply K_bind_def[simp del] + unfolding schedule_def + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply (clarsimp simp: when_def scheduleChooseNewThread_def) + apply (wp ssa_no_orphans hoare_vcg_all_lift) + apply (wp hoare_disjI1 chooseThread_nosch) + apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift + hoare_lift_Pf2 [OF tcbQueued_all_queued_tcb_ptrs_lift + [OF nextDomain_tcbQueued] + nextDomain_ct'] + hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift + [OF nextDomain_st_tcb_at'] + nextDomain_ct'] + hoare_vcg_all_lift getDomainTime_wp)[2] + apply wpsimp + apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_all_queued_tcb_ptrs' + hoare_drop_imp + | clarsimp simp: all_queued_tcb_ptrs_def + | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg + | wps)+)[1] + apply wpsimp + \ \action = SwitchToThread candidate\ + apply (clarsimp) + apply (rename_tac candidate) + apply (wpsimp wp: do_switch_to abort_switch_to_enq abort_switch_to_app) + (* isHighestPrio *) + apply (wp hoare_drop_imps) + apply (wp add: tcbSchedEnqueue_no_orphans)+ + apply (clarsimp simp: conj_comms cong: conj_cong imp_cong split del: if_split) + apply (wp hoare_vcg_imp_lift' + | strengthen not_pred_tcb_at'_strengthen)+ + apply (wps | wpsimp wp: tcbSchedEnqueue_all_queued_tcb_ptrs')+ + apply (fastforce simp: is_active_tcb_ptr_runnable' all_invs_but_ct_idle_or_in_cur_domain'_strg + invs_switchToThread_runnable') + done +qed + +lemma setNotification_no_orphans[wp]: + "setNotification p ntfn \ no_orphans \" + by (rule no_orphans_lift; wpsimp simp: setNotification_def updateObject_default_def) + +crunch no_orphans [wp]: doMachineOp "no_orphans" + (wp: no_orphans_lift) + +crunch no_orphans [wp]: setMessageInfo "no_orphans" + +crunch no_orphans [wp]: completeSignal "no_orphans" + (simp: crunch_simps wp: crunch_wps) + +lemma possibleSwitchTo_almost_no_orphans [wp]: + "\\s. almost_no_orphans target s \ st_tcb_at' runnable' target s + \ weak_sch_act_wf (ksSchedulerAction s) s\ + possibleSwitchTo target + \\_. no_orphans\" + unfolding possibleSwitchTo_def + by (wpsimp wp: tcbSchedEnqueue_almost_no_orphans + ssa_almost_no_orphans hoare_weak_lift_imp + | wp (once) hoare_drop_imp)+ + +lemma possibleSwitchTo_almost_no_orphans': + "\\s. almost_no_orphans target s \ st_tcb_at' runnable' target s + \ sch_act_wf (ksSchedulerAction s) s \ + possibleSwitchTo target + \\_. no_orphans\" + by wp (strengthen sch_act_wf_weak, assumption) + +crunches tcbQueueAppend, tcbQueuePrepend + for almost_no_orphans[wp]: "almost_no_orphans tcbPtr" + +lemma tcbSchedAppend_almost_no_orphans: + "\almost_no_orphans thread\ + tcbSchedAppend thread + \\_. no_orphans\" + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadGet_wp) + apply (fastforce simp: almost_no_orphans_def no_orphans_def all_queued_tcb_ptrs_def obj_at'_def) + done + +lemma no_orphans_is_almost[simp]: + "no_orphans s \ almost_no_orphans t s" + by (clarsimp simp: no_orphans_def almost_no_orphans_def) + +crunches decDomainTime + for no_orphans[wp]: no_orphans + (wp: no_orphans_lift) + +lemma timerTick_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + timerTick + \ \_ s. no_orphans s \" + unfolding timerTick_def getDomainTime_def + supply if_split[split del] + apply (subst threadState_case_if) + apply (wpsimp wp: threadSet_no_orphans tcbSchedAppend_almost_no_orphans + threadSet_almost_no_orphans threadSet_no_orphans tcbSchedAppend_sch_act_wf + hoare_drop_imp + simp: if_apply_def2 + | strengthen sch_act_wf_weak)+ + done + +lemma handleDoubleFault_no_orphans [wp]: + "\no_orphans\ handleDoubleFault tptr ex1 ex2 \\_. no_orphans \" + unfolding handleDoubleFault_def + by (wpsimp wp: setThreadState_not_active_no_orphans + simp: is_active_thread_state_def isRestart_def isRunning_def) + +crunch st_tcb' [wp]: getThreadCallerSlot "st_tcb_at' (\st. P st) t" + +crunches cteInsert, getThreadCallerSlot, getThreadReplySlot + for almost_no_orphans[wp]: "almost_no_orphans tcb_ptr" + and no_orphans[wp]: no_orphans + (wp: crunch_wps) + +lemma setupCallerCap_no_orphans [wp]: + "setupCallerCap sender receiver gr \no_orphans\" + unfolding setupCallerCap_def + by (wpsimp wp: setThreadState_not_active_no_orphans hoare_drop_imps + simp: is_active_thread_state_def isRestart_def isRunning_def) + +lemma setupCallerCap_almost_no_orphans [wp]: + "\almost_no_orphans tcb_ptr\ + setupCallerCap sender receiver gr + \\_. almost_no_orphans tcb_ptr\" + unfolding setupCallerCap_def + by (wpsimp wp: setThreadState_not_active_almost_no_orphans hoare_drop_imps + simp: is_active_thread_state_def isRestart_def isRunning_def) + +crunches cteInsert, setExtraBadge, setMessageInfo, transferCaps, copyMRs, + doNormalTransfer, doFaultTransfer, + invalidateVMIDEntry, invalidateASID, invalidateASIDEntry + for tcbQueued[wp]: "obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr" + (wp: crunch_wps simp: crunch_simps) + +crunches doIPCTransfer, setMRs + for no_orphans [wp]: "no_orphans" + (wp: no_orphans_lift) + +crunch ksQ'[wp]: setEndpoint "\s. P (ksReadyQueues s)" + (wp: setObject_queues_unchanged_tcb updateObject_default_inv) + +crunch no_orphans [wp]: setEndpoint "no_orphans" + (wp: no_orphans_lift) + +lemma sendIPC_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ + sendIPC blocking call badge canGrant canGrantReply thread epptr + \\_. no_orphans\" + unfolding sendIPC_def + apply (wp hoare_drop_imps setThreadState_not_active_no_orphans sts_st_tcb' + possibleSwitchTo_almost_no_orphans' + | wpc + | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ + apply (rule_tac Q="\rv. no_orphans and valid_objs' and ko_at' rv epptr + and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + apply (fastforce simp: valid_objs'_def valid_obj'_def valid_ep'_def obj_at'_def) + apply (wp get_ep_sp' | clarsimp)+ + done + +lemma sendFaultIPC_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ + sendFaultIPC tptr fault + \\_. no_orphans\" + unfolding sendFaultIPC_def + apply (wpsimp wp: threadSet_no_orphans threadSet_valid_objs' threadSet_sch_act) + apply (rule_tac Q'="\_ s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s" + in hoare_strengthen_postE_R) + apply wpsimp+ + done + +lemma handleFault_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ + handleFault tptr ex1 + \\_. no_orphans\" + unfolding handleFault_def + by wpsimp + +lemma replyFromKernel_no_orphans [wp]: + "\ \s. no_orphans s \ + replyFromKernel thread r + \ \rv s. no_orphans s \" + by (wpsimp simp: replyFromKernel_def) + +crunch inv [wp]: alignError "P" + +lemma createObjects_no_orphans[wp]: + "\\s. no_orphans s \ pspace_aligned' s \ pspace_no_overlap' ptr sz s \ pspace_distinct' s + \ n \ 0 \ range_cover ptr sz (objBitsKO val + gbits) n + \ \ case_option False (is_active_thread_state \ tcbState) (projectKO_opt val) + \ \ case_option False tcbQueued (projectKO_opt val)\ + createObjects ptr n val gbits + \\_ s. no_orphans s\" + apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + apply (simp only: imp_conv_disj pred_tcb_at'_def createObjects_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_obj_at2'[where sz=sz]) + apply (clarsimp split: option.splits) + done + +crunch no_orphans [wp]: insertNewCap "no_orphans" + (wp: hoare_drop_imps) + +lemma no_orphans_ksArchState_idem[simp]: + "no_orphans (s\ksArchState := f (ksArchState s)\) = no_orphans s" + unfolding no_orphans_def all_queued_tcb_ptrs_def all_active_tcb_ptrs_def is_active_tcb_ptr_def + by clarsimp + +lemma createNewCaps_no_orphans: + "\ (\s. no_orphans s + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ (tp = APIObjectType CapTableObject \ us > 0)) + and K (range_cover ptr sz (APIType_capBits tp us) n \ 0 < n) \ + createNewCaps tp ptr n us d + \ \rv s. no_orphans s \" + supply if_split[split del] + apply (clarsimp simp: createNewCaps_def toAPIType_def cong: option.case_cong) + apply (cases tp; simp) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp) + apply (wpsimp wp: mapM_x_wp' threadSet_no_orphans + | clarsimp simp: is_active_thread_state_def makeObject_tcb + projectKO_opt_tcb isRunning_def isRestart_def + APIType_capBits_def Arch_createNewCaps_def + objBits_if_dev + | simp add: objBits_simps mult_2 nat_arith.add1 split: if_split)+ + done + +crunches updatePTType + for no_orphans[wp]: "no_orphans" + (wp: no_orphans_lift) + +lemma createObject_no_orphans: + "\pspace_no_overlap' ptr sz and pspace_aligned' and pspace_distinct' and + cte_wp_at' (\cte. cteCap cte = (capability.UntypedCap d ptr sz idx)) cref and + K (range_cover ptr sz (APIType_capBits tp us) (Suc 0)) and no_orphans\ + RetypeDecls_H.createObject tp ptr us d + \\xa. no_orphans\" + apply (simp only: createObject_def AARCH64_H.createObject_def placeNewObject_def2) + apply (wpsimp wp: createObjects'_wp_subst threadSet_no_orphans + createObjects_no_orphans[where sz = sz] + simp: placeNewObject_def2 placeNewDataObject_def + projectKO_opt_tcb cte_wp_at_ctes_of projectKO_opt_ep + is_active_thread_state_def makeObject_tcb pageBits_def unless_def + projectKO_opt_tcb isRunning_def isRestart_def + APIType_capBits_def objBits_simps + split_del: if_split) + apply (clarsimp simp: toAPIType_def APIType_capBits_def objBits_simps + bit_simps + split: object_type.split_asm apiobject_type.split_asm if_splits) + done + +lemma createNewObjects_no_orphans: + "\\s. no_orphans s \ invs' s \ pspace_no_overlap' ptr sz s + \ (\slot\set slots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) + \ cte_wp_at' (\cte. cteCap cte = UntypedCap d (ptr && ~~ mask sz) sz idx) cref s + \ caps_no_overlap'' ptr sz s + \ range_cover ptr sz (APIType_capBits tp us) (length slots) + \ (tp = APIObjectType ArchTypes_H.CapTableObject \ us > 0) + \ caps_overlap_reserved' {ptr..ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1} s + \ slots \ [] \ distinct slots \ ptr \ 0 + \ sz \ maxUntypedSizeBits \ canonical_address (ptr && ~~ mask sz)\ + createNewObjects tp cref slots ptr us d + \ \rv s. no_orphans s \" + apply (rule hoare_name_pre_state) + apply clarsimp + apply (rule hoare_pre) + apply (rule createNewObjects_wp_helper; simp?) + apply (simp add:insertNewCaps_def) + apply wp + apply (rule_tac P = "length caps = length slots" in hoare_gen_asm) + apply (wp zipWithM_x_inv) + apply simp + apply (wp createNewCaps_no_orphans[where sz = sz] | clarsimp)+ + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply simp + apply (clarsimp simp:invs_pspace_aligned' invs_valid_pspace' invs_pspace_distinct') + apply (intro conjI) + apply (erule range_cover.range_cover_n_less[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (simp add:invs'_def valid_state'_def) + apply (simp add: invs_ksCurDomain_maxDomain') + done + +lemma ksMachineState_ksPSpace_upd_comm: + "ksPSpace_update g (ksMachineState_update f s) = + ksMachineState_update f (ksPSpace_update g s)" + by simp + +lemma deleteObjects_no_orphans [wp]: + "\ (\s. no_orphans s \ pspace_distinct' s) and K (is_aligned ptr bits) \ + deleteObjects ptr bits + \ \rv s. no_orphans s \" + apply (rule hoare_gen_asm) + apply (unfold deleteObjects_def2 doMachineOp_def split_def) + apply wpsimp + apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def + all_queued_tcb_ptrs_def is_active_tcb_ptr_def + ksMachineState_ksPSpace_upd_comm) + apply (drule_tac x=tcb_ptr in spec) + apply (clarsimp simp: pred_tcb_at'_def obj_at_delete') + done + +crunch no_orphans[wp]: updateFreeIndex "no_orphans" + +lemma resetUntypedCap_no_orphans [wp]: + "\ (\s. no_orphans s \ pspace_distinct' s \ valid_objs' s) + and cte_wp_at' (isUntypedCap o cteCap) slot\ + resetUntypedCap slot + \ \rv s. no_orphans s \" + apply (simp add: resetUntypedCap_def) + apply (wpsimp wp: mapME_x_inv_wp preemptionPoint_inv getSlotCap_wp hoare_drop_imps + split_del: if_split) + apply (clarsimp simp: cte_wp_at_ctes_of split del: if_split) + apply (frule(1) cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD]) + apply (clarsimp simp: isCap_simps valid_cap_simps' capAligned_def) + done + +lemma invokeUntyped_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ valid_untyped_inv' ui s \ ct_active' s \ + invokeUntyped ui + \ \reply s. no_orphans s \" + apply (rule hoare_chain, rule invokeUntyped_invs''[where Q=no_orphans]) + apply (wp createNewCaps_no_orphans)+ + apply (fastforce simp: valid_pspace'_def) + apply wpsimp+ + apply (cases ui, auto simp: cte_wp_at_ctes_of)[2] + done + +lemma setInterruptState_no_orphans [wp]: + "setInterruptState a \no_orphans\" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) + +crunch no_orphans [wp]: emptySlot "no_orphans" + +lemma mapM_x_match: + "\I and V xs\ mapM_x m xs \\rv. Q\ \ \I and V xs\ mapM_x m xs \\rv. Q\" + by assumption + +lemma cancelAllIPC_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ + cancelAllIPC epptr + \\_. no_orphans\" + unfolding cancelAllIPC_def + apply (wp sts_valid_objs' set_ep_valid_objs' sts_st_tcb' + hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans + | wpc + | rule mapM_x_match, + rename_tac list, + rule_tac V="\_. valid_objs' and pspace_aligned' and pspace_distinct'" + and I="no_orphans and (\s. \t\set list. tcb_at' t s)" + in mapM_x_inv_wp2 + | clarsimp simp: valid_tcb_state'_def)+ + apply (rule_tac Q="\rv. no_orphans and valid_objs' and pspace_aligned' and pspace_distinct' and + ko_at' rv epptr" + in hoare_post_imp) + apply (fastforce simp: valid_obj'_def valid_ep'_def obj_at'_def) + apply (wp get_ep_sp' | clarsimp)+ + done + +lemma cancelAllSignals_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ + cancelAllSignals ntfn + \\_. no_orphans\" + unfolding cancelAllSignals_def + apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' + hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans + | wpc + | clarsimp simp: valid_tcb_state'_def)+ + apply (rename_tac list) + apply (rule_tac V="\_. valid_objs' and pspace_aligned' and pspace_distinct'" + and I="no_orphans and (\s. \t\set list. tcb_at' t s)" + in mapM_x_inv_wp2) + apply simp + apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' + hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans| + clarsimp simp: valid_tcb_state'_def)+ + apply (rule_tac Q="\rv. no_orphans and valid_objs' and pspace_aligned' and pspace_distinct' and + ko_at' rv ntfn" + in hoare_post_imp) + apply (fastforce simp: valid_obj'_def valid_ntfn'_def obj_at'_def) + apply (wp get_ntfn_sp' | clarsimp)+ + done + +crunches setBoundNotification, unbindNotification, unbindMaybeNotification + for no_orphans[wp]: no_orphans + +lemma finaliseCapTrue_standin_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ + finaliseCapTrue_standin cap final + \\_. no_orphans\" + unfolding finaliseCapTrue_standin_def Let_def + by wpsimp + +lemma cteDeleteOne_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ + cteDeleteOne slot + \\_. no_orphans\" + unfolding cteDeleteOne_def + by (wpsimp wp: assert_inv haskell_assert_inv isFinalCapability_inv weak_if_wp) + +crunch valid_objs' [wp]: getThreadReplySlot "valid_objs'" + +lemma cancelSignal_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s\ + cancelSignal t ntfn + \\_. no_orphans\" + unfolding cancelSignal_def Let_def + by (wpsimp wp: hoare_drop_imps setThreadState_not_active_no_orphans + simp: is_active_thread_state_def isRestart_def isRunning_def) + +lemma cancelIPC_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ + cancelIPC t + \\_. no_orphans\" + unfolding cancelIPC_def Let_def + by (wpsimp wp: setThreadState_not_active_no_orphans hoare_drop_imps weak_if_wp + threadSet_valid_objs' threadSet_no_orphans + simp: is_active_thread_state_def isRestart_def isRunning_def inQ_def) + +lemma asUser_almost_no_orphans: + "\almost_no_orphans t\ asUser a f \\_. almost_no_orphans t\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) + +lemma sendSignal_no_orphans [wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s \ + sch_act_wf (ksSchedulerAction s) s\ + sendSignal ntfnptr badge + \\_. no_orphans\" + unfolding sendSignal_def + by (wpsimp wp: sts_st_tcb' gts_wp' getNotification_wp asUser_almost_no_orphans + cancelIPC_weak_sch_act_wf + simp: sch_act_wf_weak) + +crunches vgicUpdateLR + for no_orphans[wp]: "no_orphans" + (wp: no_orphans_lift crunch_wps) + +crunch not_pred_tcb_at'[wp]: vgicUpdateLR,doMachineOp "\s. \ (pred_tcb_at' proj P' t) s" + +crunches vcpuUpdate, vgicUpdateLR, doMachineOp + for no_orphans[wp]: no_orphans + and tcb_in_cur_domain'[wp]: "tcb_in_cur_domain' t" + (wp: no_orphans_lift tcb_in_cur_domain'_lift) + +lemma vgicMaintenance_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ + vgicMaintenance + \\_. no_orphans\" + unfolding vgicMaintenance_def Let_def + by (wpsimp wp: sch_act_wf_lift hoare_drop_imp[where f="vgicUpdateLR v idx virq" for v idx virq] + hoare_drop_imp[where f="return v" for v] + hoare_drop_imp[where f="doMachineOp f" for f]) + +lemma vppiEvent_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ + vppiEvent irq + \\_. no_orphans\" + unfolding vppiEvent_def Let_def + by (wpsimp wp: hoare_vcg_imp_lift' sch_act_wf_lift | wps)+ + +(* FIXME AARCH64: move *) +lemma irqVPPIEventIndex_irqVGICMaintenance_None[simp]: + "irqVPPIEventIndex irqVGICMaintenance = None" + unfolding irqVTimerEvent_def irqVGICMaintenance_def IRQ_def irqVPPIEventIndex_def + by simp + +lemma handleReservedIRQ_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ + handleReservedIRQ irq + \\_. no_orphans \" + unfolding handleReservedIRQ_def + by (case_tac "irq = irqVGICMaintenance"; wpsimp) + +lemma handleInterrupt_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + handleInterrupt irq + \ \rv s. no_orphans s \" + unfolding handleInterrupt_def + supply if_split[split del] + apply (wp hoare_drop_imps hoare_vcg_all_lift getIRQState_inv + | wpc | clarsimp simp: invs'_def valid_state'_def maskIrqSignal_def + if_apply_def2)+ + apply fastforce + done + +lemma updateRestartPC_no_orphans[wp]: + "\ \s. no_orphans s \ invs' s \ + updateRestartPC t + \ \rv s. no_orphans s \" + by (wpsimp simp: updateRestartPC_def asUser_no_orphans) + +lemma suspend_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' t s \ + suspend t + \ \rv s. no_orphans s \" + unfolding suspend_def + apply (wp | clarsimp simp: unless_def | rule conjI)+ + apply (clarsimp simp: is_active_tcb_ptr_def is_active_thread_state_def st_tcb_at_neg2) + apply (wp setThreadState_not_active_no_orphans hoare_disjI1 setThreadState_st_tcb + | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def)+ + apply (wp hoare_drop_imp)+ + apply auto + done + +crunches invalidateASIDEntry, invalidateTLBByASID + for no_orphans[wp]: no_orphans + (wp: no_orphans_lift) + +lemma deleteASIDPool_no_orphans [wp]: + "\ \s. no_orphans s \ + deleteASIDPool asid pool + \ \rv s. no_orphans s \" + unfolding deleteASIDPool_def + apply (wp | clarsimp)+ + apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) + apply (clarsimp simp: no_orphans_def all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def is_active_tcb_ptr_def) + apply (wp mapM_wp_inv getObject_inv loadObject_default_inv | clarsimp)+ + done + +lemma storePTE_no_orphans [wp]: + "storePTE ptr val \ no_orphans \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) + +lemma archThreadSet_tcbQueued_inv[wp]: + "archThreadSet f t \\s. obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s\" + unfolding archThreadSet_def + by (wp setObject_tcb_strongest getObject_tcb_wp) (fastforce simp: obj_at'_def) + +crunches dissociateVCPUTCB + for tcbQueued_inv[wp]: "\s. obj_at' (\tcb. P (tcbQueued tcb)) t s" + (wp: threadGet_wp crunch_wps asUser_tcbQueued_inv simp: crunch_simps) + +crunches modifyArchState, vcpuUpdate, archThreadSet, dissociateVCPUTCB, vcpuFinalise + for no_orphans[wp]: "no_orphans" + (wp: no_orphans_lift crunch_wps) + +crunch no_orphans [wp]: unmapPage "no_orphans" + (wp: crunch_wps) + +crunches unmapPageTable, prepareThreadDelete + for no_orphans [wp]: "no_orphans" + (wp: lookupPTSlotFromLevel_inv) + +lemma setASIDPool_no_orphans [wp]: + "setObject p (ap :: asidpool) \ no_orphans \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) + +crunches deleteASID, Arch.finaliseCap + for no_orphans [wp]: "no_orphans" + (wp: getObject_inv loadObject_default_inv) + +lemma deletingIRQHandler_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + deletingIRQHandler irq + \ \rv s. no_orphans s \" + unfolding deletingIRQHandler_def + by (wpsimp wp: hoare_drop_imps) auto + +lemma finaliseCap_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ valid_cap' cap s \ + finaliseCap cap final flag + \ \rv s. no_orphans s \" + apply (wpsimp simp: finaliseCap_def Let_def split_del: if_split) + apply (auto simp: valid_cap'_def dest!: isCapDs) + done + +crunches cteSwap, capSwapForDelete + for no_orphans[wp]: "no_orphans" + +declare withoutPreemption_lift [wp del] + +lemma no_orphans_finalise_prop_stuff: + "no_cte_prop no_orphans = no_orphans" + "finalise_prop_stuff no_orphans" + by (simp_all add: no_cte_prop_def finalise_prop_stuff_def + setCTE_no_orphans, + simp_all add: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + +lemma finaliseSlot_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s) \ + finaliseSlot slot e + \ \rv s. no_orphans s \" + unfolding finaliseSlot_def + apply (rule validE_valid, rule hoare_pre, rule hoare_strengthen_postE, rule use_spec) + apply (rule finaliseSlot_invs'[where p=slot and slot=slot and Pr=no_orphans]) + apply (simp_all add: no_orphans_finalise_prop_stuff) + apply wpsimp + apply (auto dest: cte_wp_at_valid_objs_valid_cap') + done + +lemma cteDelete_no_orphans [wp]: + "\ no_orphans and invs' and sch_act_simple and K ex \ + cteDelete ptr ex + \ \rv s. no_orphans s \" + apply (rule hoare_gen_asm) + apply (clarsimp simp: cteDelete_def whenE_def split_def) + apply (rule hoare_pre, wp) + apply clarsimp + done + +crunch no_orphans [wp]: cteMove "no_orphans" + (wp: crunch_wps) + +lemma cteRevoke_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ + cteRevoke ptr + \ \rv s. no_orphans s \" + apply (rule_tac Q="\rv s. no_orphans s \ invs' s \ sch_act_simple s" in hoare_strengthen_post) + apply (wpsimp wp: cteRevoke_preservation cteDelete_invs' cteDelete_sch_act_simple)+ + done + +lemma cancelBadgedSends_no_orphans [wp]: + "cancelBadgedSends epptr badge \no_orphans\" + unfolding cancelBadgedSends_def + by (wpsimp wp: filterM_preserved tcbSchedEnqueue_almost_no_orphans gts_wp' sts_st_tcb' + | wp (once) hoare_drop_imps)+ + +crunch no_orphans [wp]: handleFaultReply "no_orphans" + +lemma doReplyTransfer_no_orphans[wp]: + "\no_orphans and invs' and tcb_at' sender and tcb_at' receiver\ + doReplyTransfer sender receiver slot grant + \\rv. no_orphans\" + unfolding doReplyTransfer_def + apply (wp sts_st_tcb' setThreadState_not_active_no_orphans threadSet_no_orphans + threadSet_weak_sch_act_wf + | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def + | wp (once) hoare_drop_imps + | strengthen sch_act_wf_weak)+ + apply (rule_tac Q="\rv. invs' and no_orphans" in hoare_post_imp) + apply (fastforce simp: inQ_def) + apply (wp hoare_drop_imps | clarsimp)+ + apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def) + done + +crunch no_orphans [wp]: setupReplyMaster "no_orphans" + (wp: crunch_wps simp: crunch_simps) + +lemma restart_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' t s \ + restart t + \ \rv s. no_orphans s \" + unfolding restart_def isStopped_def2 + apply (wp tcbSchedEnqueue_almost_no_orphans sts_st_tcb' cancelIPC_weak_sch_act_wf + | clarsimp simp: o_def if_apply_def2 + | strengthen no_orphans_strg_almost + | wp (once) hoare_drop_imps)+ + apply auto + done + +lemma readreg_no_orphans[wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' src s \ + invokeTCB (tcbinvocation.ReadRegisters src susp n arch) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def + by wpsimp + +lemma writereg_no_orphans[wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s + \ tcb_at' dest s \ ex_nonz_cap_to' dest s\ + invokeTCB (tcbinvocation.WriteRegisters dest resume values arch) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def postModifyRegisters_def + by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' hoare_weak_lift_imp + | strengthen + | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap )+ + +lemma copyreg_no_orphans[wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' src s + \ tcb_at' dest s \ ex_nonz_cap_to' src s \ ex_nonz_cap_to' dest s \ + invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def postModifyRegisters_def + apply simp + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) + apply (wp hoare_weak_lift_imp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' + restart_no_orphans asUser_no_orphans suspend_nonz_cap_to_tcb + | wpc | simp add: if_apply_def2)+ + apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) + done + +lemma settlsbase_no_orphans[wp]: + "\ \s. no_orphans s \ invs' s \ + invokeTCB (tcbinvocation.SetTLSBase src dest) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def + by (wpsimp wp: hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans) + +lemma almost_no_orphans_no_orphans: + "\ almost_no_orphans t s; \ is_active_tcb_ptr t s \ \ no_orphans s" + by (auto simp: almost_no_orphans_def no_orphans_def all_active_tcb_ptrs_def) + +lemma almost_no_orphans_no_orphans': + "\ almost_no_orphans t s; ksCurThread s = t\ \ no_orphans s" + by (auto simp: almost_no_orphans_def no_orphans_def all_active_tcb_ptrs_def) + +lemma setPriority_no_orphans[wp]: + "\no_orphans and invs' and tcb_at' tptr\ + setPriority tptr prio + \\_. no_orphans\" + unfolding setPriority_def + apply wpsimp + apply (rule_tac Q="\_ s. almost_no_orphans tptr s \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) + apply clarsimp + apply (clarsimp simp: is_active_tcb_ptr_runnable' pred_tcb_at'_def obj_at'_def + almost_no_orphans_no_orphans elim!: almost_no_orphans_no_orphans') + apply (wp threadSet_almost_no_orphans | clarsimp simp: inQ_def)+ + apply (wpsimp wp: threadSet_weak_sch_act_wf) + apply (wp tcbSchedDequeue_almost_no_orphans| clarsimp)+ + done + +crunches bindNotification, setMCPriority + for no_orphans[wp]: no_orphans + +lemma threadSet_ipcbuffer_invs: + "is_aligned a msg_align_bits \ + \invs' and tcb_at' t\ threadSet (tcbIPCBuffer_update (\_. a)) t \\rv. invs'\" + by (wp threadSet_invs_trivial, simp_all add: inQ_def cong: conj_cong) + +lemma tc_no_orphans: + "\ no_orphans and invs' and sch_act_simple and tcb_at' a and ex_nonz_cap_to' a and + case_option \ (valid_cap' o fst) e' and + K (case_option True (isCNodeCap o fst) e') and + case_option \ (valid_cap' o fst) f' and + K (case_option True (isValidVTableRoot o fst) f') and + case_option \ (valid_cap') (case_option None (case_option None (Some o fst) o snd) g) and + K (case_option True isArchObjectCap (case_option None (case_option None (Some o fst) o snd) g)) and + K (case_option True (swp is_aligned 2 o fst) g) and + K (case_option True (swp is_aligned msg_align_bits o fst) g) and + K (case g of None \ True | Some x \ (case_option True (isArchObjectCap \ fst) \ snd) x) and + K (valid_option_prio d \ valid_option_prio mcp) \ + invokeTCB (tcbinvocation.ThreadControl a sl b' mcp d e' f' g) + \ \rv s. no_orphans s \" + apply (rule hoare_gen_asm) + apply (rule hoare_gen_asm) + apply (rule hoare_gen_asm) + apply (simp add: invokeTCB_def getThreadCSpaceRoot getThreadVSpaceRoot + getThreadBufferSlot_def split_def) + apply (simp only: eq_commute[where a="a"]) + apply (rule hoare_walk_assmsE) + apply (clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[2] + apply (rule hoare_walk_assmsE) + apply (cases mcp; clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial setMCPriority_invs' + typ_at_lifts[OF setMCPriority_typ_at'] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[3] + apply ((simp only: simp_thms cong: conj_cong + | wp cteDelete_deletes cteDelete_invs' cteDelete_sch_act_simple + case_option_wp[where m'="return ()", OF setPriority_no_orphans return_inv,simplified] + checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] + checkCap_inv[where P=no_orphans] checkCap_inv[where P="tcb_at' a"] + threadSet_cte_wp_at' hoare_vcg_all_liftE_R hoare_vcg_all_lift threadSet_no_orphans + hoare_vcg_const_imp_lift_R hoare_weak_lift_imp hoare_drop_imp threadSet_ipcbuffer_invs + | (simp add: locateSlotTCB_def locateSlotBasic_def objBits_def + objBitsKO_def tcbIPCBufferSlot_def tcb_cte_cases_def, + wp hoare_return_sp) + | wpc | clarsimp)+) + apply (fastforce simp: objBits_defs isCap_simps dest!: isValidVTableRootD) + done + +lemma invokeTCB_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_inv_wf' tinv s \ + invokeTCB tinv + \ \rv s. no_orphans s \" + apply (case_tac tinv; simp; (solves wpsimp)?) + apply (wpsimp simp: invokeTCB_def) + apply (wpsimp simp: invokeTCB_def) + apply (wp tc_no_orphans) + apply (clarsimp split: option.splits simp: msg_align_bits elim!:is_aligned_weaken) + apply (wpsimp simp: invokeTCB_def) + done + +lemma invokeCNode_no_orphans [wp]: + "\no_orphans and invs' and valid_cnode_inv' cinv and sch_act_simple\ + invokeCNode cinv + \\_. no_orphans\" + unfolding invokeCNode_def + apply (rule hoare_pre) + apply (wp hoare_drop_imps unless_wp | wpc | clarsimp split del: if_split)+ + done + +crunches performIRQControl, InterruptDecls_H.invokeIRQHandler, performPageTableInvocation, + performVSpaceInvocation, performPageInvocation, handleVMFault + for no_orphans[wp]: no_orphans + (wp: crunch_wps) + +lemma handleHypervisorFault_no_orphans[wp]: + "\\s. valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ no_orphans s\ + handleHypervisorFault w f + \\_. no_orphans\" + unfolding handleHypervisorFault_def isFpuEnable_def + by (wpsimp wp: undefined_valid) + +lemma associateVCPUTCB_no_orphans[wp]: + "associateVCPUTCB vcpuPtr tcbPtr \no_orphans\" + unfolding associateVCPUTCB_def + by (rule no_orphans_lift; wpsimp wp: setObject_typ_at_not) + +crunches invokeVCPUInjectIRQ, invokeVCPUWriteReg, invokeVCPUAckVPPI, performARMVCPUInvocation + for no_orphans [wp]: "no_orphans" + (wp: crunch_wps simp: crunch_simps) + +lemma performASIDControlInvocation_no_orphans [wp]: + notes [simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost usableUntypedRange.simps + shows "\ \s. no_orphans s \ invs' s \ valid_aci' aci s \ ct_active' s \ + performASIDControlInvocation aci + \ \reply s. no_orphans s \" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:valid_aci'_def cte_wp_at_ctes_of + split:asidcontrol_invocation.splits) + apply (rename_tac s ptr_base p cref ptr null_cte ut_cte idx) + proof - + fix s ptr_base p cref ptr null_cte ut_cte idx + assume no_orphans: "no_orphans s" + and invs' : "invs' s" + and cte : "ctes_of s p = Some null_cte" "cteCap null_cte = capability.NullCap" + "ctes_of s cref = Some ut_cte" "cteCap ut_cte = capability.UntypedCap False ptr_base pageBits idx" + and desc : "descendants_of' cref (ctes_of s) = {}" + and misc : "p \ cref" "ex_cte_cap_wp_to' (\_. True) p s" "sch_act_simple s" "is_aligned ptr asid_low_bits" + "asid_wf ptr" "ct_active' s" + + have vc:"s \' UntypedCap False ptr_base pageBits idx" + using cte misc invs' + apply - + apply (case_tac ut_cte) + apply (rule ctes_of_valid_cap') + apply simp + apply fastforce + done + + hence cover: + "range_cover ptr_base pageBits pageBits (Suc 0)" + apply - + apply (rule range_cover_full) + apply (simp add:valid_cap'_def capAligned_def) + apply simp + done + + have exclude: "cref \ mask_range ptr_base pageBits" + apply (rule descendants_range_ex_cte'[where cte = "ut_cte"]) + apply (rule empty_descendants_range_in'[OF desc]) + apply (rule if_unsafe_then_capD'[where P = "\c. c = ut_cte"]) + apply (clarsimp simp: cte_wp_at_ctes_of cte) + apply (simp add:invs' invs_unsafe_then_cap') + apply (simp add:cte invs' add_mask_fold)+ + done + + show "\(=) s\performASIDControlInvocation (asidcontrol_invocation.MakePool ptr_base p cref ptr) + \\reply. no_orphans\" + apply (clarsimp simp: performASIDControlInvocation_def + split: asidcontrol_invocation.splits) + apply (wp hoare_weak_lift_imp | clarsimp)+ + apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) + apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + apply (wp | clarsimp simp:placeNewObject_def2)+ + apply (wp createObjects'_wp_subst)+ + apply (wp hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace') + apply (clarsimp simp:conj_comms) + apply (wp deleteObjects_invs'[where idx = idx and d=False] + hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where idx = idx and d=False] hoare_vcg_const_imp_lift ) + using invs' misc cte exclude no_orphans cover + apply (clarsimp simp: is_active_thread_state_def makeObject_tcb valid_aci'_def + cte_wp_at_ctes_of invs_pspace_aligned' invs_pspace_distinct' + projectKO_opt_tcb isRunning_def isRestart_def conj_comms + invs_valid_pspace' vc objBits_simps range_cover.aligned) + apply (intro conjI) + apply (rule vc) + apply (simp add:descendants_range'_def2) + apply (rule empty_descendants_range_in'[OF desc]) + apply clarsimp + done +qed + +crunches performASIDPoolInvocation + for no_orphans[wp]: no_orphans + (wp: getObject_inv loadObject_default_inv) + +lemma arch_performInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ valid_arch_inv' i s \ ct_active' s \ + Arch.performInvocation i + \ \reply s. no_orphans s \" + unfolding AARCH64_H.performInvocation_def performARMMMUInvocation_def + by (wpsimp simp: valid_arch_inv'_def) + +lemma setDomain_no_orphans [wp]: + "\no_orphans and cur_tcb' and tcb_at' tptr\ + setDomain tptr newdom + \\_. no_orphans\" + apply (simp add: setDomain_def when_def) + apply (wp tcbSchedEnqueue_almost_no_orphans hoare_vcg_imp_lift threadSet_almost_no_orphans + threadSet_st_tcb_at2 hoare_vcg_disj_lift + threadSet_no_orphans + | clarsimp simp: st_tcb_at_neg2 not_obj_at')+ + apply (fastforce simp: tcb_at_typ_at' is_active_tcb_ptr_runnable') + done + +lemma performInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ valid_invocation' i s \ ct_active' s \ sch_act_simple s \ + performInvocation block call i + \ \reply s. no_orphans s \" + by (wpsimp simp: performInvocation_def) auto + +lemma getThreadState_restart [wp]: + "\ \s. tcb_at' thread s \ + getThreadState thread + \ \rv s. rv = Structures_H.thread_state.Restart \ st_tcb_at' isRestart thread s \" + apply (rule hoare_strengthen_post) + apply (rule gts_st_tcb') + apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def isRestart_def) + done + +lemma handleInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + ct_active' s \ ksSchedulerAction s = ResumeCurrentThread \ + handleInvocation isCall isBlocking + \ \rv s. no_orphans s \" + unfolding handleInvocation_def + apply (rule hoare_pre) + apply (wp syscall_valid' setThreadState_isRestart_no_orphans | wpc | clarsimp)+ + apply (rule_tac Q="\state s. no_orphans s \ invs' s \ + (state = Structures_H.thread_state.Restart \ + st_tcb_at' isRestart thread s)" + in hoare_post_imp) + apply (wp | clarsimp)+ + apply (wp setThreadState_current_no_orphans sts_invs_minor' + ct_in_state'_set setThreadState_st_tcb + hoare_vcg_all_lift + | simp add: split_def split del: if_split)+ + apply (clarsimp simp: if_apply_def2) + by (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def invs'_def + cur_tcb'_def valid_state'_def valid_idle'_def) + +lemma receiveSignal_no_orphans [wp]: + "receiveSignal thread cap isBlocking \no_orphans\" + unfolding receiveSignal_def + apply (wp hoare_drop_imps setThreadState_not_active_no_orphans | wpc + | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def + doNBRecvFailedTransfer_def)+ + done + + +lemma receiveIPC_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + receiveIPC thread cap is_blocking + \ \rv s. no_orphans s \" + unfolding receiveIPC_def + apply (rule hoare_pre) + apply (wp setThreadState_not_active_no_orphans hoare_drop_imps + hoare_vcg_all_lift sts_st_tcb' + | wpc + | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def + doNBRecvFailedTransfer_def + | strengthen sch_act_wf_weak)+ + done + +crunch valid_objs' [wp]: getThreadCallerSlot "valid_objs'" + +lemma deleteCallerCap_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + deleteCallerCap receiver + \ \rv s. no_orphans s \" + unfolding deleteCallerCap_def + by (wpsimp wp: hoare_drop_imps) auto + +lemma remove_neg_strg: + "(A \ B) \ ((x \ A) \ (\ x \ B))" + by blast + +lemma handleRecv_no_orphans [wp]: +notes if_cong[cong] shows + "\ \s. no_orphans s \ invs' s \ + handleRecv isBlocking + \ \rv . no_orphans \" + unfolding handleRecv_def + apply (clarsimp simp: whenE_def split del: if_split | wp hoare_drop_imps getNotification_wp | wpc )+ (*takes a while*) + apply (rule_tac Q'="\rv s. no_orphans s \ invs' s" in hoare_strengthen_postE_R) + apply (wp, fastforce) + apply (rule_tac Q="\rv s. no_orphans s \ invs' s" in hoare_post_imp) + apply (wp | clarsimp | fastforce)+ + done + +crunches getThreadCallerSlot, handleHypervisorFault + for invs' [wp]: "invs'" + +lemma handleReply_no_orphans [wp]: + "\no_orphans and invs'\ handleReply \\_. no_orphans\" + unfolding handleReply_def + apply (wpsimp wp: hoare_drop_imps) + apply (wp (once) hoare_vcg_all_lift) + apply (rule_tac Q="\rv s. no_orphans s \ invs' s \ tcb_at' thread s \ + valid_cap' rv s" in hoare_post_imp) + apply (wpsimp wp: hoare_drop_imps + simp: valid_cap'_def invs'_def cur_tcb'_def valid_state'_def)+ + done + +lemma handleYield_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + handleYield + \ \rv . no_orphans \" + unfolding handleYield_def + by (wp tcbSchedAppend_almost_no_orphans) auto + +lemma activatable_from_running': + "ct_running' s \ ct_in_state' activatable' s" + by (clarsimp simp: ct_in_state'_def elim!: pred_tcb'_weakenE) + +(* FIXME move *) +lemma sts_tcb_at'_preserve: + "\ st_tcb_at' P t and K (P st) \ setThreadState st t' \\_. st_tcb_at' P t \" + by (wpsimp wp: sts_st_tcb') + +(* FIXME move *) +(* e.g. if you set a non-runnable thread to Inactive, all runnable threads are still runnable *) +lemma sts_tcb_at'_preserve': + "\ st_tcb_at' P t and st_tcb_at' (\st. \ P st) t' and K (\ P st) \ + setThreadState st t' + \\_. st_tcb_at' P t \" + by (wpsimp wp: sts_st_tcb' simp: st_tcb_at_neg') + +lemma handleEvent_no_orphans [wp]: + "\ \s. invs' s \ + (e \ Interrupt \ ct_running' s) \ + ksSchedulerAction s = ResumeCurrentThread \ no_orphans s \ + handleEvent e + \ \rv s. no_orphans s \" + apply (simp add: handleEvent_def handleSend_def handleCall_def + cong: event.case_cong syscall.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpc | clarsimp simp: handleHypervisorFault_def + | strengthen invs_valid_objs' invs_sch_act_wf')+ + apply (auto simp: activatable_from_running' active_from_running') + done + +theorem callKernel_no_orphans[wp]: + "\ \s. invs' s \ + (e \ Interrupt \ ct_running' s) \ + ksSchedulerAction s = ResumeCurrentThread \ no_orphans s \ + callKernel e + \ \rv s. no_orphans s \" + unfolding callKernel_def + apply (wpsimp wp: hoare_drop_imp[where f=activateThread] schedule_invs' + (* getActiveIRQ can't return a non-kernel IRQ *) + | wp (once) hoare_post_imp[ + where a="doMachineOp (getActiveIRQ True)" + and Q="\rv s. no_orphans s \ invs' s \ rv \ Some ` non_kernel_IRQs"])+ + done + +end + +end diff --git a/proof/refine/ARM/ADT_H.thy b/proof/refine/ARM/ADT_H.thy index 158ddb5aba..ad198a9a6e 100644 --- a/proof/refine/ARM/ADT_H.thy +++ b/proof/refine/ARM/ADT_H.thy @@ -622,7 +622,7 @@ proof - apply (intro conjI impI allI) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply clarsimp - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (clarsimp simp add: ep_relation_def EndpointMap_def split: Structures_A.endpoint.splits) @@ -636,7 +636,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (clarsimp simp add: ntfn_relation_def AEndpointMap_def split: Structures_A.ntfn.splits) @@ -649,7 +649,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -658,7 +658,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -684,7 +684,7 @@ proof - apply (case_tac vmpage_size; simp) apply ((frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+)[4] apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -710,7 +710,7 @@ proof - apply (case_tac vmpage_size; simp) apply ((frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+)[4] apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) prefer 2 apply (rename_tac arch_kernel_obj) @@ -738,7 +738,7 @@ proof - arch_tcb_relation_imp_ArchTcnMap) apply (simp add: absCNode_def cte_map_def) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def split: if_split_asm) prefer 2 apply (rename_tac arch_kernel_obj) @@ -805,7 +805,7 @@ proof - (* mapping architecture-specific objects *) apply clarsimp apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_object y ko P arch_kernel_obj) apply (case_tac arch_kernel_object, simp_all add: absHeapArch_def @@ -949,7 +949,7 @@ shows apply (case_tac "ksPSpace s' x", clarsimp) apply (erule_tac x=x in allE, clarsimp) apply clarsimp - apply (case_tac a, simp_all add: other_obj_relation_def) + apply (case_tac a, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (insert pspace_relation) apply (clarsimp simp: obj_at'_def projectKOs) apply (erule(1) pspace_dom_relatedE) @@ -1018,7 +1018,7 @@ lemma TCB_implies_KOTCB: apply (clarsimp simp add: pspace_relation_def pspace_dom_def dom_def UNION_eq Collect_eq) apply (erule_tac x=a in allE)+ - apply (clarsimp simp add: other_obj_relation_def + apply (clarsimp simp add: tcb_relation_cut_def split: Structures_H.kernel_object.splits) apply (drule iffD1) apply (fastforce simp add: dom_def image_def) @@ -1802,7 +1802,7 @@ definition domain_index_internal = ksDomScheduleIdx s, cur_domain_internal = ksCurDomain s, domain_time_internal = ksDomainTime s, - ready_queues_internal = curry (ksReadyQueues s), + ready_queues_internal = (\d p. heap_walk (tcbSchedNexts_of s) (tcbQueueHead (ksReadyQueues s (d, p))) []), cdt_list_internal = absCDTList (cteMap (gsCNodes s)) (ctes_of s)\" lemma absExst_correct: @@ -1810,12 +1810,15 @@ lemma absExst_correct: assumes rel: "(s, s') \ state_relation" shows "absExst s' = exst s" apply (rule det_ext.equality) - using rel invs invs' - apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct - absCDTList_correct[THEN fun_cong] state_relation_def invs_def valid_state_def - ready_queues_relation_def invs'_def valid_state'_def - valid_pspace_def valid_sched_def valid_pspace'_def curry_def fun_eq_iff) - apply (fastforce simp: absEkheap_correct) + using rel invs invs' + apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct + absCDTList_correct[THEN fun_cong] state_relation_def invs_def + valid_state_def ready_queues_relation_def ready_queue_relation_def + invs'_def valid_state'_def + valid_pspace_def valid_sched_def valid_pspace'_def curry_def + fun_eq_iff) + apply (fastforce simp: absEkheap_correct) + apply (fastforce simp: list_queue_relation_def Let_def dest: heap_ls_is_walk) done diff --git a/proof/refine/ARM/ArchAcc_R.thy b/proof/refine/ARM/ArchAcc_R.thy index 3352fb12d1..5ddf93aedb 100644 --- a/proof/refine/ARM/ArchAcc_R.thy +++ b/proof/refine/ARM/ArchAcc_R.thy @@ -125,16 +125,6 @@ lemma getObject_ASIDPool_corres [corres]: apply (clarsimp simp: other_obj_relation_def asid_pool_relation_def) done -lemma aligned_distinct_obj_atI': - "\ ksPSpace s x = Some ko; pspace_aligned' s; - pspace_distinct' s; ko = injectKO v \ - \ ko_at' v x s" - apply (simp add: obj_at'_def projectKOs project_inject - pspace_distinct'_def pspace_aligned'_def) - apply (drule bspec, erule domI)+ - apply simp - done - lemmas aligned_distinct_asid_pool_atI' = aligned_distinct_obj_atI'[where 'a=asidpool, simplified, OF _ _ _ refl] @@ -199,7 +189,7 @@ lemma setObject_ASIDPool_corres [corres]: corres dc (asid_pool_at p and valid_etcbs) (asid_pool_at' p') (set_asid_pool p a) (setObject p' a')" apply (simp add: set_asid_pool_def) - apply (corressimp search: setObject_other_corres[where P="\_. True"] + apply (corresKsimp search: setObject_other_corres[where P="\_. True"] wp: get_object_ret get_object_wp) apply (simp add: other_obj_relation_def asid_pool_relation_def) apply (clarsimp simp: obj_at_simps ) @@ -758,18 +748,21 @@ lemma setObject_PD_corres [@lift_corres_args, corres]: apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.splits) - apply (rule conjI) + apply (simp add: tcb_relation_cut_def + split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pd_bits" in allE)+ apply fastforce + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pde')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + subgoal by (fastforce dest: tcbs_of'_non_tcb_update) apply (simp add: map_to_ctes_upd_other) apply (simp add: fun_upd_def) apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) done - lemma setObject_PT_corres [@lift_corres_args, corres]: "pte_relation_aligned (p >> 2) pte pte' \ corres dc (ko_at (ArchObj (PageTable pt)) (p && ~~ mask pt_bits) @@ -835,12 +828,16 @@ lemma setObject_PT_corres [@lift_corres_args, corres]: apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.splits) - apply (rule conjI) + apply (simp add: tcb_relation_cut_def + split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pt_bits" in allE)+ apply fastforce + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pte')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + subgoal by (fastforce dest: tcbs_of'_non_tcb_update) apply (simp add: map_to_ctes_upd_other) apply (simp add: fun_upd_def) apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) @@ -1032,7 +1029,7 @@ lemma lookupPTSlot_corres [@lift_corres_args, corres]: (pspace_aligned' and pspace_distinct') (lookup_pt_slot pd vptr) (lookupPTSlot pd vptr)" unfolding lookup_pt_slot_def lookupPTSlot_def lookupPTSlotFromPT_def - apply (corressimp simp: pde_relation_aligned_def lookup_failure_map_def + apply (corresKsimp simp: pde_relation_aligned_def lookup_failure_map_def ptBits_def pdeBits_def pageBits_def pteBits_def mask_def wp: get_pde_wp_valid getPDE_wp) by (auto simp: lookup_failure_map_def obj_at_def) @@ -1131,7 +1128,7 @@ lemma createMappingEntries_corres [corres]: (create_mapping_entries base vptr pgsz vm_rights attrib pd) (createMappingEntries base' vptr' pgsz' vm_rights' attrib' pd')" unfolding createMappingEntries_def mapping_map_def - by (cases pgsz; corressimp simp: vmattributes_map_def less_kernel_base_mapping_slots + by (cases pgsz; corresKsimp simp: vmattributes_map_def less_kernel_base_mapping_slots largePagePTEOffsets_def largePagePTE_offsets_def superSectionPDEOffsets_def @@ -1168,7 +1165,7 @@ lemma ensureSafeMapping_corres [corres]: unfolding mapping_map_def ensureSafeMapping_def apply (cases m; cases m'; simp; match premises in "(_ \ (=)) p p'" for p p' \ \cases "fst p"; cases "fst p'"\; clarsimp) - by (corressimp corresK: mapME_x_corresK_inv + by (corresKsimp corresK: mapME_x_corresK_inv wp: get_master_pte_wp get_master_pde_wp getPTE_wp getPDE_wp; auto simp add: valid_mapping_entries_def)+ @@ -1201,7 +1198,7 @@ lemma find_pd_for_asid_corres [corres]: (pspace_aligned' and pspace_distinct' and no_0_obj') (find_pd_for_asid asid) (findPDForASID asid')" apply (simp add: find_pd_for_asid_def findPDForASID_def liftME_def bindE_assoc) - apply (corressimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast + apply (corresKsimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast lookup_failure_map_def wp: getPDE_wp getASID_wp search: checkPDAt_corres corres_gets_asid) @@ -1433,5 +1430,161 @@ lemma dmo_clearMemory_invs'[wp]: apply fastforce done +lemma pspace_aligned_cross: + "\ pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ pspace_aligned' s'" + apply (clarsimp simp: pspace_aligned'_def pspace_aligned_def pspace_relation_def) + apply (rename_tac p' ko') + apply (prop_tac "p' \ pspace_dom (kheap s)", fastforce) + apply (thin_tac "pspace_dom k = p" for k p) + apply (clarsimp simp: pspace_dom_def) + apply (drule bspec, fastforce)+ + apply clarsimp + apply (rename_tac ko' a a' P ko) + apply (erule (1) obj_relation_cutsE; clarsimp simp: objBits_simps) + + \\CNode\ + apply (clarsimp simp: cte_map_def) + apply (simp only: cteSizeBits_def cte_level_bits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_weaken) + apply (rule is_aligned_mult_triv2, simp) + + \\TCB\ + apply (clarsimp simp: tcbBlockSizeBits_def elim!: is_aligned_weaken) + + \\PageTable\ + apply (clarsimp simp: archObjSize_def pteBits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (rule is_aligned_shift) + + \\PageDirectory\ + apply (clarsimp simp: archObjSize_def pdeBits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_shift) + + \\DataPage\ + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (rule pbfs_atleast_pageBits) + apply (fastforce intro: is_aligned_shift is_aligned_mult_triv2) + + \\other_obj_relation\ + apply (simp add: other_obj_relation_def) + by (clarsimp simp: epSizeBits_def ntfnSizeBits_def + split: kernel_object.splits Structures_A.kernel_object.splits) + (fastforce simp: archObjSize_def split: arch_kernel_object.splits arch_kernel_obj.splits) + +lemmas is_aligned_add_step_le' = is_aligned_add_step_le[simplified mask_2pm1 add_diff_eq] + +lemma objBitsKO_Data: + "objBitsKO (if dev then KOUserDataDevice else KOUserData) = pageBits" + by (simp add: objBits_def objBitsKO_def word_size_def) + +lemma of_bl_shift_cte_level_bits: + "(of_bl z :: machine_word) << cte_level_bits \ mask (cte_level_bits + length z)" + by word_bitwise + (simp add: test_bit_of_bl bit_simps word_size cte_level_bits_def rev_bl_order_simps) + +lemma obj_relation_cuts_range_limit: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ + \ \x n. p' = p + x \ is_aligned x n \ n \ obj_bits ko \ x \ mask (obj_bits ko)" + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (drule (1) wf_cs_nD) + apply (clarsimp simp: cte_map_def2) + apply (rule_tac x=cte_level_bits in exI) + apply (simp add: is_aligned_shift of_bl_shift_cte_level_bits) + apply (rule_tac x=tcbBlockSizeBits in exI) + apply (simp add: tcbBlockSizeBits_def) + apply (rule_tac x=pteBits in exI) + apply (simp add: bit_simps is_aligned_shift mask_def pteBits_def) + apply word_bitwise + apply (rule_tac x=pdeBits in exI) + apply (simp add: bit_simps is_aligned_shift mask_def pdeBits_def) + apply word_bitwise + apply (rule_tac x=pageBits in exI) + apply (simp add: is_aligned_shift pbfs_atleast_pageBits is_aligned_mult_triv2) + apply (simp add: mask_def shiftl_t2n mult_ac) + apply (frule word_less_power_trans2, rule pbfs_atleast_pageBits) + apply (simp add: pbfs_less_wb'[unfolded word_bits_def, simplified]) + apply (simp add: pbfs_less_wb'[unfolded word_bits_def, simplified]) + apply fastforce + done + +lemma obj_relation_cuts_range_mask_range: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko'; is_aligned p (obj_bits ko) \ + \ p' \ mask_range p (obj_bits ko)" + apply (drule (1) obj_relation_cuts_range_limit, clarsimp) + apply (rule conjI) + apply (rule word_plus_mono_right2; assumption?) + apply (simp add: is_aligned_no_overflow_mask) + apply (erule word_plus_mono_right) + apply (simp add: is_aligned_no_overflow_mask) + done + +lemma obj_relation_cuts_obj_bits: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ \ objBitsKO ko' \ obj_bits ko" + apply (erule (1) obj_relation_cutsE; + clarsimp simp: objBits_simps objBits_defs cte_level_bits_def + pbfs_atleast_pageBits[simplified bit_simps] archObjSize_def pteBits_def + pdeBits_def) + apply (cases ko; simp add: other_obj_relation_def objBits_defs + split: kernel_object.splits) + apply (rename_tac ako, case_tac ako; clarsimp) + apply (rename_tac ako', case_tac ako'; clarsimp simp: archObjSize_def) + done + +lemma pspace_distinct_cross: + "\ pspace_distinct s; pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ + pspace_distinct' s'" + apply (frule (1) pspace_aligned_cross) + apply (clarsimp simp: pspace_distinct'_def) + apply (rename_tac p' ko') + apply (rule pspace_dom_relatedE; assumption?) + apply (rename_tac p ko P) + apply (frule (1) pspace_alignedD') + apply (frule (1) pspace_alignedD) + apply (rule ps_clearI, assumption) + apply (case_tac ko'; simp add: objBits_simps objBits_defs obj_at_simps) + apply (simp split: arch_kernel_object.splits add: obj_at_simps pteBits_def pdeBits_def) + apply (rule ccontr, clarsimp) + apply (rename_tac x' ko_x') + apply (frule_tac x=x' in pspace_alignedD', assumption) + apply (rule_tac x=x' in pspace_dom_relatedE; assumption?) + apply (rename_tac x ko_x P') + apply (frule_tac p=x in pspace_alignedD, assumption) + apply (case_tac "p = x") + apply clarsimp + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (clarsimp simp: cte_relation_def cte_map_def2 objBits_simps) + apply (rule_tac n=cte_level_bits in is_aligned_add_step_le'; assumption?) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (simp add: cte_level_bits_def cteSizeBits_def) + apply (clarsimp simp: pte_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=pteBits in is_aligned_add_step_le'; assumption?) + apply (clarsimp simp: pde_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=pdeBits in is_aligned_add_step_le'; assumption?) + apply (simp add: objBitsKO_Data) + apply (rule_tac n=pageBits in is_aligned_add_step_le'; assumption?) + apply (case_tac ko; + simp split: if_split_asm + add: is_other_obj_relation_type_CapTable a_type_def) + apply (rename_tac ako, + case_tac ako; + simp add: is_other_obj_relation_type_def a_type_def split: if_split_asm) + apply (frule (1) obj_relation_cuts_obj_bits) + apply (drule (2) obj_relation_cuts_range_mask_range)+ + apply (prop_tac "x' \ mask_range p' (objBitsKO ko')", simp add: mask_def add_diff_eq) + apply (frule_tac x=p and y=x in pspace_distinctD; assumption?) + apply (drule (4) mask_range_subsetD) + apply (erule (2) in_empty_interE) + done + end end diff --git a/proof/refine/ARM/Arch_R.thy b/proof/refine/ARM/Arch_R.thy index 784e562b2b..7a3114b738 100644 --- a/proof/refine/ARM/Arch_R.thy +++ b/proof/refine/ARM/Arch_R.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -72,7 +73,7 @@ lemma createObject_typ_at': pspace_aligned' s \ pspace_no_overlap' ptr (objBitsKO ty) s\ createObjects' ptr (Suc 0) ty 0 \\rv s. typ_at' otype ptr s\" - apply (clarsimp simp:createObjects'_def alignError_def split_def | wp hoare_unless_wp | wpc )+ + apply (clarsimp simp:createObjects'_def alignError_def split_def | wp unless_wp | wpc )+ apply (clarsimp simp:obj_at'_def ko_wp_at'_def typ_at'_def pspace_distinct'_def)+ apply (subgoal_tac "ps_clear ptr (objBitsKO ty) (s\ksPSpace := \a. if a = ptr then Some ty else ksPSpace s a\)") @@ -127,7 +128,7 @@ lemma set_cap_device_and_range_aligned: lemma performASIDControlInvocation_corres: "asid_ci_map i = i' \ corres dc - (einvs and ct_active and valid_aci i) + (einvs and ct_active and valid_aci i and schact_is_rct) (invs' and ct_active' and valid_aci' i') (perform_asid_control_invocation i) (performASIDControlInvocation i')" @@ -262,11 +263,10 @@ lemma performASIDControlInvocation_corres: deleteObjects_cte_wp_at' deleteObjects_null_filter[where p="makePoolParent i'"]) apply (clarsimp simp:invs_mdb max_free_index_def invs_untyped_children) - apply (subgoal_tac "detype_locale x y sa" for x y) - prefer 2 - apply (simp add:detype_locale_def) - apply (fastforce simp:cte_wp_at_caps_of_state descendants_range_def2 - empty_descendants_range_in invs_untyped_children) + apply (prop_tac "detype_locale x y sa" for x y) + apply (simp add: detype_locale_def) + apply (fastforce simp: cte_wp_at_caps_of_state descendants_range_def2 + empty_descendants_range_in invs_untyped_children) apply (intro conjI) apply (clarsimp) apply (erule(1) caps_of_state_valid) @@ -326,29 +326,30 @@ lemma performASIDControlInvocation_corres: apply clarsimp apply (frule empty_descendants_range_in') apply (intro conjI, - simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 - null_filter_descendants_of'[OF null_filter_simp'] - capAligned_def asid_low_bits_def) - apply (erule descendants_range_caps_no_overlapI') - apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) - apply (simp add:empty_descendants_range_in') - apply (simp add:word_bits_def pageBits_def) - apply (rule is_aligned_weaken) - apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) - apply (simp add:pageBits_def) + simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 + null_filter_descendants_of'[OF null_filter_simp'] + capAligned_def asid_low_bits_def) + apply (erule descendants_range_caps_no_overlapI') + apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) + apply (simp add:empty_descendants_range_in') + apply (simp add:word_bits_def pageBits_def) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) + apply (simp add:pageBits_def) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp: cte_wp_at_ctes_of) + apply assumption+ + apply fastforce + apply simp apply clarsimp - apply (drule(1) cte_cap_in_untyped_range) - apply (fastforce simp:cte_wp_at_ctes_of) + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) apply fastforce apply simp apply clarsimp - apply (drule (1) cte_cap_in_untyped_range) - apply (fastforce simp add: cte_wp_at_ctes_of) - apply assumption+ - apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) - apply fastforce - apply simp done definition @@ -405,7 +406,7 @@ lemma checkVP_wpR [wp]: checkVPAlignment sz w \P\, -" apply (simp add: checkVPAlignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: is_aligned_mask vmsz_aligned'_def) done @@ -678,7 +679,7 @@ lemma resolve_vaddr_valid_mapping_size: \ (case cap of cap.ArchObjectCap c \ is_page_cap c | _ \ False) \ cap_bits cap = pageBitsForSize a \ " apply (simp add: resolve_vaddr_def) - apply (rule hoare_seq_ext[OF _ get_master_pde_sp]) + apply (rule bind_wp[OF _ get_master_pde_sp]) apply (rule hoare_pre) apply (wp get_master_pte_wp | wpc | simp add: lookup_pt_slot_no_fail_def)+ @@ -838,11 +839,11 @@ shows apply (simp add: returnOk_liftE[symmetric]) apply (rule corres_returnOk) apply (simp add: archinv_relation_def asid_pool_invocation_map_def) - apply (rule hoare_pre, wp hoare_whenE_wp) + apply (rule hoare_pre, wp whenE_wp) apply (clarsimp simp: ucast_fst_hd_assocs) - apply (wp hoareE_TrueI hoare_whenE_wp getASID_wp | simp)+ + apply (wp hoareE_TrueI whenE_wp getASID_wp | simp)+ apply ((clarsimp simp: p2_low_bits_max | rule TrueI impI)+)[2] - apply (wp hoare_whenE_wp getASID_wp)+ + apply (wp whenE_wp getASID_wp)+ apply (clarsimp simp: valid_cap_def) apply auto[1] apply (simp add: isCap_simps split del: if_split) @@ -916,7 +917,7 @@ shows apply (simp add: ord_le_eq_trans [OF word_n1_ge]) apply (wp hoare_drop_imps)+ apply (simp add: o_def validE_R_def) - apply (wp hoare_whenE_wp)+ + apply (wp whenE_wp)+ apply fastforce apply clarsimp apply (simp add: null_def split_def asid_high_bits_def word_le_make_less) @@ -1033,7 +1034,7 @@ shows apply (clarsimp simp: archinv_relation_def page_table_invocation_map_def) apply (clarsimp simp: attribs_from_word_def attribsFromWord_def Let_def) apply (simp add: shiftr_shiftl1 pageBits_def ptBits_def pdeBits_def pteBits_def) - apply (wp hoare_whenE_wp get_master_pde_wp getPDE_wp find_pd_for_asid_inv + apply (wp whenE_wp get_master_pde_wp getPDE_wp find_pd_for_asid_inv | wp (once) hoare_drop_imps)+ apply (fastforce simp: valid_cap_def mask_def invs_vspace_objs[simplified]) @@ -1125,7 +1126,7 @@ shows lemma arch_performInvocation_corres: "archinv_relation ai ai' \ corres (dc \ (=)) - (einvs and ct_active and valid_arch_inv ai) + (einvs and ct_active and valid_arch_inv ai and schact_is_rct) (invs' and ct_active' and valid_arch_inv' ai' and (\s. vs_valid_duplicates' (ksPSpace s))) (arch_perform_invocation ai) (Arch.performInvocation ai')" apply (clarsimp simp: arch_perform_invocation_def @@ -1182,13 +1183,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -1288,8 +1289,8 @@ lemma tcbSchedEnqueue_vs_entry_align[wp]: "\\s. ko_wp_at' (\ko. P (vs_entry_align ko)) p s\ tcbSchedEnqueue pa \\rv. ko_wp_at' (\ko. P (vs_entry_align ko)) p\" - apply (clarsimp simp: tcbSchedEnqueue_def setQueue_def) - by (wp hoare_unless_wp | simp)+ + apply (clarsimp simp: tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def) + by (wp unless_wp | simp)+ crunch vs_entry_align[wp]: setThreadState "ko_wp_at' (\ko. P (vs_entry_align ko)) p" @@ -1387,7 +1388,7 @@ lemma findPDForASID_valid_offset'[wp]: "\valid_objs' and K (vptr < pptrBase)\ findPDForASID p \\rv s. valid_pde_mapping_offset' (rv + (vptr >> 20 << 2) && mask pdBits)\,-" apply (rule hoare_gen_asmE) - apply (rule hoare_post_imp_R, rule findPDForASID_aligned) + apply (rule hoare_strengthen_postE_R, rule findPDForASID_aligned) apply (simp add: mask_add_aligned) apply (erule less_pptrBase_valid_pde_offset'') done @@ -1492,16 +1493,6 @@ lemma ensureSafeMapping_valid_slots_duplicated': apply (fastforce simp:valid_slots_duplicated'_def) done -lemma is_aligned_ptrFromPAddr_aligned: - "m \ 28 \ is_aligned (ptrFromPAddr p) m = is_aligned p m" - apply (simp add:ptrFromPAddr_def is_aligned_mask pptrBaseOffset_def pptrBase_def physBase_def) - apply (subst add.commute) - apply (subst mask_add_aligned) - apply (erule is_aligned_weaken[rotated]) - apply (simp add:is_aligned_def) - apply simp - done - (* FIXME: this lemma is too specific *) lemma lookupPTSlot_aligned: "\\s. is_aligned vptr 16 \ valid_objs' s\ lookupPTSlot pd vptr \\p s. is_aligned p 6\,-" @@ -1513,15 +1504,13 @@ lemma lookupPTSlot_aligned: split:Structures_H.kernel_object.splits arch_kernel_object.splits) apply (simp add:valid_obj'_def lookup_pt_slot_no_fail_def) apply (rule aligned_add_aligned) - apply (rule is_aligned_ptrFromPAddr_aligned[where m = 6,THEN iffD2]) - apply simp - apply (erule is_aligned_weaken) - apply (simp add:ptBits_def pageBits_def) + apply (erule is_aligned_ptrFromPAddr_n) + apply (simp add: ptBits_def pteBits_def) apply (rule is_aligned_shiftl,simp) apply (rule is_aligned_andI1) apply (rule is_aligned_shiftr) apply simp - apply simp + apply (simp add: ptBits_def) done lemma createMappingEntires_valid_slots_duplicated'[wp]: @@ -1532,34 +1521,32 @@ lemma createMappingEntires_valid_slots_duplicated'[wp]: apply (clarsimp simp:createMappingEntries_def) apply (rule hoare_pre) apply (wpc | wp lookupPTSlot_page_table_at' - | simp add: slots_duplicated_ensured_def)+ - apply (rule_tac Q' = "\p s. is_aligned p 6 \ page_table_at' (p && ~~ mask ptBits) s" - in hoare_post_imp_R) + | simp add: slots_duplicated_ensured_def)+ + apply (rule_tac Q' = "\p s. is_aligned p 6 \ page_table_at' (p && ~~ mask ptBits) s" + in hoare_strengthen_postE_R) apply (wp lookupPTSlot_aligned lookupPTSlot_page_table_at') - apply (rule_tac x = r in exI) + apply (rename_tac rv s) + apply (rule_tac x = rv in exI) apply (clarsimp simp: largePagePTEOffsets_def pteBits_def) apply (frule is_aligned_no_wrap'[where off = "0x3c"]) apply simp apply (drule upto_enum_step_shift[where n = 6 and m = 2,simplified]) apply (clarsimp simp: mask_def add.commute upto_enum_step_def take_bit_Suc) - apply simp - apply wp+ - apply (intro conjI impI) - apply ((clarsimp simp: vmsz_aligned_def pageBitsForSize_def - slots_duplicated_ensured_def - split:vmpage_size.splits)+)[8] - apply clarsimp - apply (drule lookup_pd_slot_aligned_6) - apply (simp add:pdBits_def pageBits_def pdeBits_def) - apply (clarsimp simp:slots_duplicated_ensured_def) - apply (rule_tac x = "(lookup_pd_slot pd vptr)" in exI) - apply clarsimp - apply (frule is_aligned_no_wrap'[where off = "0x3c" and sz = 6]) apply simp - apply (drule upto_enum_step_shift[where n = 6 and m = 2,simplified]) - apply (clarsimp simp: mask_def add.commute upto_enum_step_def take_bit_Suc - superSectionPDEOffsets_def pdeBits_def) - done + apply wp+ + apply (intro conjI impI; clarsimp) + apply ((clarsimp simp: vmsz_aligned_def slots_duplicated_ensured_def)+)[2] + apply (drule lookup_pd_slot_aligned_6) + apply (simp add: pdBits_def pageBits_def pdeBits_def) + apply (clarsimp simp: slots_duplicated_ensured_def) + apply (rule_tac x = "(lookup_pd_slot pd vptr)" in exI) + apply clarsimp + apply (frule is_aligned_no_wrap'[where off = "0x3c" and sz = 6]) + apply simp + apply (drule upto_enum_step_shift[where n = 6 and m = 2,simplified]) + apply (clarsimp simp: mask_def add.commute upto_enum_step_def take_bit_Suc + superSectionPDEOffsets_def pdeBits_def) + done lemma arch_decodeARMPageFlush_wf: "ARM_H.isPageFlushLabel (invocation_type label) \ @@ -1578,7 +1565,7 @@ lemma arch_decodeARMPageFlush_wf: apply (simp add: decodeARMPageFlush_def) apply (rule hoare_pre) apply (wp throwE_R whenE_throwError_wp | wpc | clarsimp simp: if_apply_def2)+ - apply (simp add: valid_arch_inv'_def valid_page_inv'_def) + apply (wpsimp simp: valid_arch_inv'_def valid_page_inv'_def) apply fastforce done @@ -1630,7 +1617,7 @@ lemma arch_decodeInvocation_wf[wp]: (snd (excaps!0)) and sch_act_simple and (\s. descendants_of' (snd (excaps!0)) (ctes_of s) = {}) " - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookupTargetSlot_def) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) @@ -1733,7 +1720,7 @@ lemma arch_decodeInvocation_wf[wp]: (arch_capability.PageTableCap word (Some (snd p, hd args >> 20 << 20))) \ is_aligned (addrFromPPtr word) ptBits \ valid_pde_mapping_offset' (b + (hd args >> 20 << 2) && mask pdBits) - " in hoare_post_imp_R) + " in hoare_strengthen_postE_R) apply ((wp whenE_throwError_wp isFinalCapability_inv getPDE_wp | wpc | simp add: valid_arch_inv'_def valid_pti'_def unlessE_whenE | rule_tac x="fst p" in hoare_imp_eq_substR @@ -1788,7 +1775,7 @@ crunch st_tcb_at' [wp]: "Arch.finaliseCap" "st_tcb_at' P t" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - armKSASIDTable_update (\_. (armKSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + armKSASIDTable_update (\_. ((armKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_state'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -1863,7 +1850,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' diff --git a/proof/refine/ARM/Bits_R.thy b/proof/refine/ARM/Bits_R.thy index 278b3c5b43..5023c5e46e 100644 --- a/proof/refine/ARM/Bits_R.thy +++ b/proof/refine/ARM/Bits_R.thy @@ -78,6 +78,10 @@ lemma projectKO_tcb: "(projectKO_opt ko = Some t) = (ko = KOTCB t)" by (cases ko) (auto simp: projectKO_opts_defs) +lemma tcb_of'_TCB[simp]: + "tcb_of' (KOTCB tcb) = Some tcb" + by (simp add: projectKO_tcb) + lemma projectKO_cte: "(projectKO_opt ko = Some t) = (ko = KOCTE t)" by (cases ko) (auto simp: projectKO_opts_defs) diff --git a/proof/refine/ARM/BuildRefineCache.thy b/proof/refine/ARM/BuildRefineCache.thy deleted file mode 100644 index 0e8eac45cf..0000000000 --- a/proof/refine/ARM/BuildRefineCache.thy +++ /dev/null @@ -1,40 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory BuildRefineCache -imports Main -begin - -ML \ - -(* needed to generate a proof cache *) -proofs := 1; -DupSkip.record_proofs := true; -quick_and_dirty := true; - -tracing "Building refinement image using ROOT.ML"; - -use "ROOT.ML"; - -\ - -ML \ - -tracing "Synching proof cache"; - -DupSkip.sync_cache @{theory Refine}; - -tracing "Dumping proof cache"; - -let - val xml = XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache); -in - File.open_output (XML_Syntax.output_forest xml) (Path.basic "proof_cache.xml") -end; - -\ - -end diff --git a/proof/refine/ARM/CNodeInv_R.thy b/proof/refine/ARM/CNodeInv_R.thy index 52d58c1cb9..5af8cd707b 100644 --- a/proof/refine/ARM/CNodeInv_R.thy +++ b/proof/refine/ARM/CNodeInv_R.thy @@ -207,7 +207,7 @@ lemma decodeCNodeInvocation_corres: subgoal by (auto simp add: whenE_def, auto simp add: returnOk_def) apply (wp | wpc | simp(no_asm))+ apply (wp hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps + hoare_vcg_all_liftE_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps | clarsimp)+ subgoal by (auto elim!: valid_cnode_capI) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -264,7 +264,7 @@ lemma decodeCNodeInvocation_corres: apply (clarsimp simp add: returnOk_def) apply (wp get_cap_wp getCTE_wp | simp only: whenE_def | clarsimp)+ apply (rule hoare_trivE_R[where P="\"]) - apply (simp add: cte_wp_at_ctes_of pred_conj_def cong: conj_cong) + apply (wpsimp simp: cte_wp_at_ctes_of pred_conj_def) apply (fastforce elim!: valid_cnode_capI simp: invs_def valid_state_def valid_pspace_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) \ \Rotate\ @@ -385,7 +385,7 @@ lemma deriveCap_Null_helper: apply (cases "cap = NullCap") apply (simp add: deriveCap_def isCap_simps) apply (wp | simp)+ - apply (rule hoare_post_imp_R, rule assms) + apply (rule hoare_strengthen_postE_R, rule assms) apply simp done @@ -443,7 +443,7 @@ lemma decodeCNodeInv_wf[wp]: apply (wp whenE_throwError_wp getCTE_wp | wpc | simp(no_asm))+ apply (rule_tac Q'="\rv. invs' and cte_wp_at' (\cte. cteCap cte = NullCap) destSlot and ex_cte_cap_to' destSlot" - in hoare_post_imp_R, wp) + in hoare_strengthen_postE_R, wp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') apply (simp add: ctes_of_valid' valid_updateCapDataI @@ -478,7 +478,7 @@ lemma decodeCNodeInv_wf[wp]: unlessE_whenE) apply (rule hoare_pre) apply (wp whenE_throwError_wp getCTE_wp | simp)+ - apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (simp add: cte_wp_at_ctes_of imp_ex hasCancelSendRights_not_Null) apply (clarsimp simp: ctes_of_valid' invs_valid_objs') @@ -492,7 +492,7 @@ lemma decodeCNodeInv_wf[wp]: apply (rule_tac Q'="\rv s. cte_at' rv s \ cte_at' destSlot s \ cte_at' srcSlot s \ ex_cte_cap_to' rv s \ ex_cte_cap_to' destSlot s - \ invs' s" in hoare_post_imp_R) + \ invs' s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') @@ -4862,7 +4862,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -4892,7 +4892,7 @@ lemma cteSwap_valid_pspace'[wp]: apply (strengthen imp_consequent, strengthen ctes_of_strng) apply ((wp sch_act_wf_lift valid_queues_lift cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp - hoare_ex_wp updateMDB_cte_wp_at_other getCTE_wp + hoare_vcg_ex_lift updateMDB_cte_wp_at_other getCTE_wp | rule hoare_drop_imps)+)[6] apply (clarsimp simp: valid_pspace_no_0[unfolded valid_pspace'_def valid_mdb'_def] cte_wp_at_ctes_of) @@ -5042,8 +5042,6 @@ crunch irq_states'[wp]: cteSwap "valid_irq_states'" crunch pde_mappings'[wp]: cteSwap "valid_pde_mappings'" -crunch vq'[wp]: cteSwap "valid_queues'" - crunch ksqsL1[wp]: cteSwap "\s. P (ksReadyQueuesL1Bitmap s)" crunch ksqsL2[wp]: cteSwap "\s. P (ksReadyQueuesL2Bitmap s)" @@ -5058,6 +5056,12 @@ crunch ct_not_inQ[wp]: cteSwap "ct_not_inQ" crunch ksDomScheduleIdx [wp]: cteSwap "\s. P (ksDomScheduleIdx s)" +crunches cteSwap + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma cteSwap_invs'[wp]: "\invs' and valid_cap' c and valid_cap' c' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2 and @@ -5237,6 +5241,11 @@ lemma invalid_Thread_CNode: apply (clarsimp simp: obj_at'_def projectKOs) done +(* FIXME MOVE *) +lemma all_Not_False[simp]: + "All Not = False" + by blast + lemma Final_notUntyped_capRange_disjoint: "\ isFinal cap sl (cteCaps_of s); cteCaps_of s sl' = Some cap'; sl \ sl'; capUntypedPtr cap = capUntypedPtr cap'; capBits cap = capBits cap'; @@ -5252,21 +5261,11 @@ lemma Final_notUntyped_capRange_disjoint: apply (clarsimp simp: valid_cap'_def obj_at'_def projectKOs objBits_simps' typ_at'_def ko_wp_at'_def + page_table_at'_def page_directory_at'_def + sameObjectAs_def3 isCap_simps split: capability.split_asm zombie_type.split_asm - arch_capability.split_asm - dest!: spec[where x=0]) - apply (clarsimp simp: sameObjectAs_def3 isCap_simps) - apply (simp add: isCap_simps) - apply (simp add: isCap_simps) - apply (clarsimp simp: valid_cap'_def - obj_at'_def projectKOs objBits_simps - typ_at'_def ko_wp_at'_def - page_table_at'_def page_directory_at'_def - split: capability.split_asm zombie_type.split_asm - arch_capability.split_asm - dest!: spec[where x=0]) - apply fastforce+ - apply (clarsimp simp: isCap_simps sameObjectAs_def3) + arch_capability.split_asm option.split_asm + dest!: spec[where x=0])+ done lemma capBits_capUntyped_capRange: @@ -5514,6 +5513,10 @@ lemma updateCap_untyped_ranges_zero_simple: crunch tcb_in_cur_domain'[wp]: updateCap "tcb_in_cur_domain' t" (wp: crunch_wps simp: crunch_simps rule: tcb_in_cur_domain'_lift) +crunches updateCap + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma make_zombie_invs': "\\s. invs' s \ s \' cap \ cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s \ @@ -5530,7 +5533,8 @@ lemma make_zombie_invs': st_tcb_at' ((=) Inactive) p s \ bound_tcb_at' ((=) None) p s \ obj_at' (Not \ tcbQueued) p s - \ (\pr. p \ set (ksReadyQueues s pr)))) sl s\ + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p s)) sl s\ updateCap sl cap \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def @@ -5567,7 +5571,9 @@ lemma make_zombie_invs': apply (clarsimp simp: cte_wp_at_ctes_of) apply (subgoal_tac "st_tcb_at' ((=) Inactive) p' s \ obj_at' (Not \ tcbQueued) p' s - \ bound_tcb_at' ((=) None) p' s") + \ bound_tcb_at' ((=) None) p' s + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p' s") apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def projectKOs) apply (auto dest!: isCapDs)[1] apply (clarsimp simp: cte_wp_at_ctes_of disj_ac @@ -5741,7 +5747,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5755,7 +5761,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -5873,7 +5879,7 @@ lemma cteDelete_delete_cases: apply (rule hoare_strengthen_post [OF emptySlot_deletes]) apply (clarsimp simp: cte_wp_at_ctes_of) apply wp+ - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of) apply simp done @@ -6233,7 +6239,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) apply clarsimp apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule conjI, clarsimp simp: removeable'_def) @@ -6251,7 +6257,7 @@ lemma finaliseSlot_invs'': \ (\sl'. snd rv \ NullCap \ sl' \ slot \ cteCaps_of s sl' \ Some (snd rv))\, \\rv s. invs' s \ sch_act_simple s\" unfolding finaliseSlot_def - apply (rule hoare_pre, rule hoare_post_impErr, rule use_spec) + apply (rule hoare_pre, rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P="\" and Pr="\" and p=slot]) apply (simp_all add: no_cte_prop_top) apply wp @@ -6261,14 +6267,14 @@ lemma finaliseSlot_invs'': lemma finaliseSlot_invs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. invs'\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done lemma finaliseSlot_sch_act_simple: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. sch_act_simple\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6277,7 +6283,7 @@ lemma finaliseSlot_removeable: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6286,7 +6292,7 @@ lemma finaliseSlot_irqs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. \sl'. snd rv \ NullCap \ sl' \ slot \ cteCaps_of s sl' \ Some (snd rv)\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6301,7 +6307,7 @@ lemma finaliseSlot_cte_wp_at: P cp \ capZombiePtr cp \ p)) p s\,-" unfolding finaliseSlot_def apply (rule hoare_pre, unfold validE_R_def) - apply (rule hoare_post_impErr, rule use_spec) + apply (rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P=P and Pr=\ and p=p]) apply (simp_all add: no_cte_prop_top finalise_prop_stuff_def) apply wp @@ -6324,7 +6330,7 @@ lemma reduceZombie_invs: reduceZombie cap slot exposed \\rv s. invs' s\" apply (rule validE_valid) - apply (rule hoare_post_impErr, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) + apply (rule hoare_strengthen_postE, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6334,7 +6340,7 @@ lemma reduceZombie_cap_to: reduceZombie cap slot exposed \\rv s. \ exposed \ ex_cte_cap_to' slot s\, -" apply (rule validE_validE_R, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6345,7 +6351,7 @@ lemma reduceZombie_sch_act_simple: reduceZombie cap slot exposed \\rv. sch_act_simple\" apply (rule validE_valid, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6355,7 +6361,7 @@ lemma cteDelete_invs': apply (rule hoare_gen_asm) apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -6386,9 +6392,9 @@ lemma cteDelete_cte_at: apply (rule hoare_vcg_disj_lift) apply (rule typ_at_lifts, rule cteDelete_typ_at') apply (simp add: cteDelete_def finaliseSlot_def split_def) - apply (rule validE_valid, rule seqE) + apply (rule validE_valid, rule bindE_wp_fwd) apply (subst finaliseSlot'_simps_ext) - apply (rule seqE) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_pre, rule hoare_FalseE) @@ -6431,10 +6437,10 @@ lemma cteDelete_cte_wp_at_invs: cteCap cte = NullCap \ (\zb n. cteCap cte = Zombie slot zb n)) slot s)" - and E="\rv. \" in hoare_post_impErr) + and E="\rv. \" in hoare_strengthen_postE) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of dest!: isCapDs) apply simp apply simp @@ -6453,10 +6459,10 @@ lemma cteDelete_cte_wp_at_invs: (\zb n. cteCap cte = Zombie p zb n) \ (\cp. P cp \ capZombiePtr cp \ p)) p s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) + apply (rule hoare_strengthen_postE_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) apply simp+ apply (clarsimp simp: cte_wp_at_ctes_of) apply simp @@ -6469,7 +6475,7 @@ lemma cteDelete_sch_act_simple: cteDelete slot exposed \\rv. sch_act_simple\" apply (simp add: cteDelete_def whenE_def split_def) apply (wp hoare_drop_imps | simp)+ - apply (rule_tac hoare_post_impErr [where Q="\rv. sch_act_simple" + apply (rule_tac hoare_strengthen_postE [where Q="\rv. sch_act_simple" and E="\rv. sch_act_simple"]) apply (rule valid_validE) apply (wp finaliseSlot_sch_act_simple) @@ -7016,18 +7022,18 @@ next apply simp apply ((wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms | erule finalise_cap_not_reply_master [simplified])+)[1] apply (simp(no_asm_use)) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - static_imp_wp)+ + hoare_weak_lift_imp)+ apply (elim conjE, strengthen subst[where P="cap_relation cap" for cap, mk_strg I _ E]) apply simp apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - static_imp_wp)+ + hoare_weak_lift_imp)+ apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7069,7 +7075,7 @@ next apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) apply (case_tac "cteCap rv'", auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ apply (rule isFinal[where x="cte_map slot"]) apply (wp get_cap_wp| simp add: conj_comms)+ @@ -7148,13 +7154,11 @@ next case (4 ptr bits n slot) let ?target = "(ptr, nat_to_cref (zombie_cte_bits bits) n)" note hyps = "4.hyps"[simplified rec_del_concrete_unfold spec_corres_liftME2] - have pred_conj_assoc: "\P Q R. (P and (Q and R)) = (P and Q and R)" - by (rule ext, simp) show ?case apply (simp only: rec_del_concrete_unfold cap_relation.simps) apply (simp add: reduceZombie_def Let_def liftE_bindE - del: pred_conj_app) + del: inf_apply) apply (subst rec_del_simps_ext) apply (rule_tac F="ptr + 2 ^ cte_level_bits * of_nat n = cte_map ?target" @@ -7212,7 +7216,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -7251,7 +7255,7 @@ next apply (clarsimp simp: zombie_alignment_oddity cte_map_replicate) apply (wp get_cap_cte_wp_at getCTE_wp' rec_del_cte_at rec_del_invs rec_del_delete_cases)+ - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule_tac P="\cp. cp = Zombie ptr (zbits_map bits) (Suc n)" in cteDelete_cte_wp_at_invs[where p="cte_map slot"]) apply clarsimp @@ -8347,7 +8351,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8500,6 +8504,15 @@ lemma cteMove_urz [wp]: apply auto done +crunches updateMDB + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +(* FIXME: arch_split *) +lemma haskell_assert_inv: + "haskell_assert Q L \P\" + by wpsimp + lemma cteMove_invs' [wp]: "\\x. invs' x \ ex_cte_cap_to' word2 x \ cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ @@ -8525,7 +8538,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -8777,7 +8790,7 @@ crunch irq_states' [wp]: capSwapForDelete valid_irq_states' crunch irq_states' [wp]: finaliseCap valid_irq_states' - (wp: crunch_wps hoare_unless_wp getASID_wp no_irq + (wp: crunch_wps unless_wp getASID_wp no_irq no_irq_invalidateLocalTLB_ASID no_irq_setHardwareASID no_irq_set_current_pd no_irq_invalidateLocalTLB_VAASID no_irq_cleanByVA_PoU @@ -8810,7 +8823,7 @@ lemma finaliseSlot_IRQInactive: "\valid_irq_states'\ finaliseSlot a b -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (unfold validE_E_def) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule use_spec(2) [OF finaliseSlot_IRQInactive', folded finaliseSlot_def]) apply (rule TrueI) apply assumption @@ -8824,8 +8837,8 @@ lemma cteDelete_IRQInactive: "\valid_irq_states'\ cteDelete x y -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule validE_E_validE) apply (rule finaliseSlot_IRQInactive) apply simp @@ -8837,8 +8850,8 @@ lemma cteDelete_irq_states': "\valid_irq_states'\ cteDelete x y \\rv. valid_irq_states'\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule hoare_valid_validE) apply (rule finaliseSlot_irq_states') apply simp @@ -8861,7 +8874,7 @@ proof (induct rule: cteRevoke.induct) case (1 p s') show ?case apply (subst cteRevoke.simps) - apply (wp "1.hyps" unlessE_wp hoare_whenE_wp preemptionPoint_IRQInactive_spec + apply (wp "1.hyps" unlessE_wp whenE_wp preemptionPoint_IRQInactive_spec cteDelete_IRQInactive cteDelete_irq_states' getCTE_wp')+ apply clarsimp done @@ -8882,7 +8895,7 @@ lemma inv_cnode_IRQInactive: apply (rule hoare_pre) apply (wp cteRevoke_IRQInactive finaliseSlot_IRQInactive cteDelete_IRQInactive - hoare_whenE_wp + whenE_wp | wpc | simp add: split_def)+ done diff --git a/proof/refine/ARM/CSpace1_R.thy b/proof/refine/ARM/CSpace1_R.thy index 9afc11fd6a..24dd746660 100644 --- a/proof/refine/ARM/CSpace1_R.thy +++ b/proof/refine/ARM/CSpace1_R.thy @@ -236,7 +236,7 @@ lemma pspace_relation_cte_wp_at: apply (clarsimp elim!: cte_wp_at_weakenE') apply clarsimp apply (drule(1) pspace_relation_absD) - apply (clarsimp simp: other_obj_relation_def) + apply (clarsimp simp: tcb_relation_cut_def) apply (simp split: kernel_object.split_asm) apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb]) apply simp @@ -325,7 +325,7 @@ lemma getSlotCap_corres: (getSlotCap cte_ptr')" apply (simp add: getSlotCap_def) apply (subst bind_return [symmetric]) - apply (corressimp) + apply (corresKsimp) done lemma maskCapRights [simp]: @@ -398,7 +398,7 @@ lemma resolveAddressBits_cte_at': resolveAddressBits cap addr depth \\rv. cte_at' (fst rv)\, \\rv s. True\" apply (fold validE_R_def) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule resolveAddressBits_real_cte_at') apply (erule real_cte_at') done @@ -604,12 +604,12 @@ proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) apply (simp add: Let_def unlessE_whenE) apply (simp add: caps isCap_defs Let_def whenE_bindE_throwError_to_if) apply (subst cnode_cap_case_if) - apply (corressimp search: getSlotCap_corres IH + apply (corresKsimp search: getSlotCap_corres IH wp: get_cap_wp getSlotCap_valid no_fail_stateAssert simp: locateSlot_conv) apply (simp add: drop_postfix_eq) apply clarsimp - apply (prove "is_aligned ptr (cte_level_bits + cbits) \ cbits \ word_bits - cte_level_bits") + apply (prop_tac "is_aligned ptr (cte_level_bits + cbits) \ cbits \ word_bits - cte_level_bits") apply (erule valid_CNodeCapE; fastforce simp: cte_level_bits_def) subgoal premises prems for s s' x apply (insert prems) @@ -622,8 +622,8 @@ proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) apply (subst \to_bl _ = _\[symmetric]) apply (drule postfix_dropD) apply clarsimp - apply (prove "32 + (cbits + length guard) - length cref = - (cbits + length guard) + (32 - length cref)") + apply (prop_tac "32 + (cbits + length guard) - length cref = + (cbits + length guard) + (32 - length cref)") apply (drule len_drop_lemma, simp, arith) apply simp apply (subst drop_drop [symmetric]) @@ -720,11 +720,11 @@ lemma lookupSlotForThread_corres: apply clarsimp apply simp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolve_address_bits_cte_at [unfolded validE_R_def]) apply clarsimp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolveAddressBits_cte_at') apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (simp add: returnOk_def split_def) @@ -810,7 +810,7 @@ lemma setCTE_tcb_in_cur_domain': done lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -911,7 +911,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done @@ -1597,10 +1597,10 @@ lemma cte_map_pulls_tcb_to_abstract: \ \tcb'. kheap s x = Some (TCB tcb') \ tcb_relation tcb' tcb \ (z = (x, tcb_cnode_index (unat ((y - x) >> cte_level_bits))))" apply (rule pspace_dom_relatedE, assumption+) - apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) - apply (clarsimp simp: other_obj_relation_def + apply (erule(1) obj_relation_cutsE; + clarsimp simp: other_obj_relation_def split: Structures_A.kernel_object.split_asm - ARM_A.arch_kernel_obj.split_asm) + ARM_A.arch_kernel_obj.split_asm if_split_asm) apply (drule tcb_cases_related2) apply clarsimp apply (frule(1) cte_wp_at_tcbI [OF _ _ TrueI, where t="(a, b)" for a b, simplified]) @@ -1616,8 +1616,7 @@ lemma pspace_relation_update_tcbs: del: dom_fun_upd) apply (erule conjE) apply (rule ballI, drule(1) bspec) - apply (rule conjI, simp add: other_obj_relation_def) - apply (clarsimp split: Structures_A.kernel_object.split_asm) + apply (clarsimp simp: tcb_relation_cut_def split: Structures_A.kernel_object.split_asm) apply (drule bspec, fastforce) apply clarsimp apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) @@ -1838,6 +1837,27 @@ lemma descendants_of_eq': apply simp done +lemma setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedPrevs_of s)" + shows "P (ps |> tcb_of' |> tcbSchedPrev)" + using use_valid[OF step setObject_cte_tcbSchedPrevs_of(1)] pre + by auto + +lemma setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedNexts_of s)" + shows "P (ps |> tcb_of' |> tcbSchedNext)" + using use_valid[OF step setObject_cte_tcbSchedNexts_of(1)] pre + by auto + +lemma setObject_cte_inQ_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (inQ domain priority |< tcbs_of' s)" + shows "P (inQ domain priority |< (ps |> tcb_of'))" + using use_valid[OF step setObject_cte_inQ(1)] pre + by auto + lemma updateCap_stuff: assumes "(x, s'') \ fst (updateCap p cap s')" shows "(ctes_of s'' = modify_map (ctes_of s') p (cteCap_update (K cap))) \ @@ -1851,7 +1871,12 @@ lemma updateCap_stuff: ksSchedulerAction s'' = ksSchedulerAction s' \ (ksArchState s'' = ksArchState s') \ (pspace_aligned' s' \ pspace_aligned' s'') \ - (pspace_distinct' s' \ pspace_distinct' s'')" using assms + (pspace_distinct' s' \ pspace_distinct' s'') \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" + using assms apply (clarsimp simp: updateCap_def in_monad) apply (drule use_valid [where P="\s. s2 = s" for s2, OF _ getCTE_sp refl]) apply (rule conjI) @@ -1860,8 +1885,11 @@ lemma updateCap_stuff: apply (frule setCTE_pspace_only) apply (clarsimp simp: setCTE_def) apply (intro conjI impI) - apply (erule(1) use_valid [OF _ setObject_aligned]) - apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule(1) use_valid [OF _ setObject_aligned]) + apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace; simp) + apply (erule setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace; simp) + apply (fastforce elim: setObject_cte_inQ_of_use_valid_ksPSpace) done (* FIXME: move *) @@ -1878,16 +1906,16 @@ lemma pspace_relation_cte_wp_atI': apply (simp split: if_split_asm) apply (erule(1) pspace_dom_relatedE) apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + apply (subgoal_tac "n = x - y", clarsimp) + apply (drule tcb_cases_related2, clarsimp) + apply (intro exI, rule conjI) + apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) + apply fastforce + apply simp + apply clarsimp apply (simp add: other_obj_relation_def split: Structures_A.kernel_object.split_asm ARM_A.arch_kernel_obj.split_asm) - apply (subgoal_tac "n = x - y", clarsimp) - apply (drule tcb_cases_related2, clarsimp) - apply (intro exI, rule conjI) - apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) - apply fastforce - apply simp - apply clarsimp done lemma pspace_relation_cte_wp_atI: @@ -2287,7 +2315,7 @@ lemma updateCap_corres: apply (clarsimp simp: in_set_cap_cte_at_swp pspace_relations_def) apply (drule updateCap_stuff) apply simp - apply (rule conjI) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) apply (rule conjI) prefer 2 @@ -2375,9 +2403,9 @@ lemma updateMDB_pspace_relation: apply (clarsimp simp: tcb_ctes_clear cte_level_bits_def objBits_defs) apply clarsimp apply (rule pspace_dom_relatedE, assumption+) - apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] - apply (clarsimp split: Structures_A.kernel_object.split_asm - ARM_A.arch_kernel_obj.split_asm + apply (rule obj_relation_cutsE, assumption+; + clarsimp split: Structures_A.kernel_object.split_asm + ARM_A.arch_kernel_obj.split_asm if_split_asm simp: other_obj_relation_def) apply (frule(1) tcb_cte_cases_aligned_helpers(1)) apply (frule(1) tcb_cte_cases_aligned_helpers(2)) @@ -2439,6 +2467,25 @@ lemma updateMDB_ctes_of: crunch aligned[wp]: updateMDB "pspace_aligned'" crunch pdistinct[wp]: updateMDB "pspace_distinct'" +crunch tcbSchedPrevs_of[wp]: updateMDB "\s. P (tcbSchedPrevs_of s)" +crunch tcbSchedNexts_of[wp]: updateMDB "\s. P (tcbSchedNexts_of s)" +crunch inQ_opt_pred[wp]: updateMDB "\s. P (inQ d p |< tcbs_of' s)" +crunch inQ_opt_pred'[wp]: updateMDB "\s. P (\d p. inQ d p |< tcbs_of' s)" +crunch ksReadyQueues[wp]: updateMDB "\s. P (ksReadyQueues s)" + (wp: crunch_wps simp: crunch_simps setObject_def updateObject_cte) + +lemma setCTE_rdyq_projs[wp]: + "setCTE p f \\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)\" + apply (rule hoare_lift_Pf2[where f=ksReadyQueues]) + apply (rule hoare_lift_Pf2[where f=tcbSchedNexts_of]) + apply (rule hoare_lift_Pf2[where f=tcbSchedPrevs_of]) + apply wpsimp+ + done + +crunches updateMDB + for rdyq_projs[wp]:"\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)" lemma updateMDB_the_lot: assumes "(x, s'') \ fst (updateMDB p f s')" @@ -2461,7 +2508,11 @@ lemma updateMDB_the_lot: ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" using assms apply (simp add: updateMDB_eqs updateMDB_pspace_relations split del: if_split) apply (frule (1) updateMDB_ctes_of) @@ -2470,9 +2521,8 @@ using assms apply (erule use_valid) apply wp apply simp - apply (erule use_valid) - apply wp - apply simp + apply (erule use_valid, wpsimp wp: hoare_vcg_all_lift) + apply (simp add: comp_def) done lemma revokable_eq: @@ -3665,6 +3715,9 @@ lemma updateUntypedCap_descendants_of: apply (clarsimp simp:mdb_next_rel_def mdb_next_def split:if_splits) done +crunches setCTE + for tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + lemma setCTE_UntypedCap_corres: "\cap_relation cap (cteCap cte); is_untyped_cap cap; idx' = idx\ \ corres dc (cte_wp_at ((=) cap) src and valid_objs and @@ -3694,10 +3747,19 @@ lemma setCTE_UntypedCap_corres: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def split: if_split_asm Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation _ _" \ -\) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (rule use_valid[OF _ setCTE_tcbSchedPrevs_of], assumption) + apply (rule use_valid[OF _ setCTE_tcbSchedNexts_of], assumption) + apply (rule use_valid[OF _ setCTE_ksReadyQueues], assumption) + apply (rule use_valid[OF _ setCTE_inQ_opt_pred], assumption) + apply (rule use_valid[OF _ set_cap_exst], assumption) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) @@ -4978,11 +5040,15 @@ lemma updateMDB_the_lot': ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" apply (rule updateMDB_the_lot) using assms apply (fastforce simp: pspace_relations_def)+ - done + done lemma cte_map_inj_eq': "\(cte_map p = cte_map p'); @@ -5084,7 +5150,6 @@ lemma cteInsert_corres: apply (thin_tac "ksMachineState t = p" for p t)+ apply (thin_tac "ksCurThread t = p" for p t)+ apply (thin_tac "ksIdleThread t = p" for p t)+ - apply (thin_tac "ksReadyQueues t = p" for p t)+ apply (thin_tac "ksSchedulerAction t = p" for p t)+ apply (clarsimp simp: pspace_relations_def) diff --git a/proof/refine/ARM/CSpace_R.thy b/proof/refine/ARM/CSpace_R.thy index 1b914e57e1..1444165783 100644 --- a/proof/refine/ARM/CSpace_R.thy +++ b/proof/refine/ARM/CSpace_R.thy @@ -1099,43 +1099,6 @@ lemma bitmapQ_no_L2_orphans_lift: apply (rule hoare_vcg_prop, assumption) done -lemma valid_queues_lift_asm: - assumes tat1: "\d p tcb. \obj_at' (inQ d p) tcb and Q \ f \\_. obj_at' (inQ d p) tcb\" - and tat2: "\tcb. \st_tcb_at' runnable' tcb and Q \ f \\_. st_tcb_at' runnable' tcb\" - and prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" - and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" - and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" - shows "\Invariants_H.valid_queues and Q\ f \\_. Invariants_H.valid_queues\" - proof - - have tat: "\d p tcb. \obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb and Q\ f - \\_. obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb\" - apply (rule hoare_chain [OF hoare_vcg_conj_lift [OF tat1 tat2]]) - apply (fastforce)+ - done - have tat_combined: "\d p tcb. \obj_at' (inQ d p and runnable' \ tcbState) tcb and Q\ f - \\_. obj_at' (inQ d p and runnable' \ tcbState) tcb\" - apply (rule hoare_chain [OF tat]) - apply (fastforce simp add: obj_at'_and pred_tcb_at'_def o_def)+ - done - show ?thesis unfolding valid_queues_def valid_queues_no_bitmap_def - by (wp tat_combined prq prqL1 prqL2 valid_bitmapQ_lift bitmapQ_no_L2_orphans_lift - bitmapQ_no_L1_orphans_lift hoare_vcg_all_lift hoare_vcg_conj_lift hoare_Ball_helper) - simp_all - qed - -lemmas valid_queues_lift = valid_queues_lift_asm[where Q="\_. True", simplified] - -lemma valid_queues_lift': - assumes tat: "\d p tcb. \\s. \ obj_at' (inQ d p) tcb s\ f \\_ s. \ obj_at' (inQ d p) tcb s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_queues'\ f \\_. valid_queues'\" - unfolding valid_queues'_def imp_conv_disj - by (wp hoare_vcg_all_lift hoare_vcg_disj_lift tat prq) - -lemma setCTE_norq [wp]: - "\\s. P (ksReadyQueues s)\ setCTE ptr cte \\r s. P (ksReadyQueues s) \" - by (clarsimp simp: valid_def dest!: setCTE_pspace_only) - lemma setCTE_norqL1 [wp]: "\\s. P (ksReadyQueuesL1Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL1Bitmap s) \" by (clarsimp simp: valid_def dest!: setCTE_pspace_only) @@ -2227,7 +2190,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (revokable' src_cap cap) (revokable' src_cap cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" @@ -2577,7 +2540,7 @@ lemma updateMDB_iflive'[wp]: updateMDB p m \\rv s. if_live_then_nonz_cap' s\" apply (clarsimp simp: updateMDB_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2590,7 +2553,7 @@ lemma updateCap_iflive': updateCap p cap \\rv s. if_live_then_nonz_cap' s\" apply (simp add: updateCap_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2784,12 +2747,6 @@ lemma setCTE_inQ[wp]: apply (simp_all add: inQ_def) done -lemma setCTE_valid_queues'[wp]: - "\valid_queues'\ setCTE p cte \\rv. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done - crunch inQ[wp]: cteInsert "\s. P (obj_at' (inQ d p) t s)" (wp: crunch_wps) @@ -2936,7 +2893,7 @@ lemma setCTE_irq_states' [wp]: apply (wp setObject_ksMachine) apply (simp add: updateObject_cte) apply (rule hoare_pre) - apply (wp hoare_unless_wp|wpc|simp)+ + apply (wp unless_wp|wpc|simp)+ apply fastforce apply assumption done @@ -3051,7 +3008,7 @@ lemma setCTE_ksMachine[wp]: apply (wp setObject_ksMachine) apply (clarsimp simp: updateObject_cte split: Structures_H.kernel_object.splits) - apply (safe, (wp hoare_unless_wp | simp)+) + apply (safe, (wp unless_wp | simp)+) done crunch ksMachine[wp]: cteInsert "\s. P (ksMachineState s)" @@ -3284,6 +3241,13 @@ lemma cteInsert_untyped_ranges_zero[wp]: apply blast done +crunches cteInsert + for tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: crunch_wps rule: valid_bitmaps_lift) + lemma cteInsert_invs: "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and (\s. src \ dest) and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) @@ -3292,9 +3256,9 @@ lemma cteInsert_invs: cteInsert cap src dest \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) - apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift CSpace_R.valid_queues_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift cteInsert_norq - simp: st_tcb_at'_def) + apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift + valid_irq_node_lift irqs_masked_lift cteInsert_norq + sym_heap_sched_pointers_lift) apply (auto simp: invs'_def valid_state'_def valid_pspace'_def elim: valid_capAligned) done @@ -3594,10 +3558,13 @@ lemma corres_caps_decomposition: "\P. \\s. P (new_ups' s)\ g \\rv s. P (gsUserPages s)\" "\P. \\s. P (new_cns s)\ f \\rv s. P (cns_of_heap (kheap s))\" "\P. \\s. P (new_cns' s)\ g \\rv s. P (gsCNodes s)\" - "\P. \\s. P (new_queues s)\ f \\rv s. P (ready_queues s)\" + "\P. \\s. P (new_ready_queues s)\ f \\rv s. P (ready_queues s)\" "\P. \\s. P (new_action s)\ f \\rv s. P (scheduler_action s)\" "\P. \\s. P (new_sa' s)\ g \\rv s. P (ksSchedulerAction s)\" - "\P. \\s. P (new_rqs' s)\ g \\rv s. P (ksReadyQueues s)\" + "\P. \\s. P (new_ksReadyQueues s) (new_tcbSchedNexts_of s) (new_tcbSchedPrevs_of s) + (\d p. new_inQs d p s)\ + g \\rv s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< (tcbs_of' s))\" "\P. \\s. P (new_di s)\ f \\rv s. P (domain_index s)\" "\P. \\s. P (new_dl s)\ f \\rv s. P (domain_list s)\" "\P. \\s. P (new_cd s)\ f \\rv s. P (cur_domain s)\" @@ -3613,7 +3580,9 @@ lemma corres_caps_decomposition: "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ sched_act_relation (new_action s) (new_sa' s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ - \ ready_queues_relation (new_queues s) (new_rqs' s')" + \ ready_queues_relation_2 (new_ready_queues s) (new_ksReadyQueues s') + (new_tcbSchedNexts_of s') (new_tcbSchedPrevs_of s') + (\d p. new_inQs d p s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ revokable_relation (new_rvk s) (null_filter (new_caps s)) (new_ctes s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ @@ -3681,8 +3650,9 @@ proof - apply (rule corres_underlying_decomposition [OF x]) apply (simp add: ghost_relation_of_heap) apply (wp hoare_vcg_conj_lift mdb_wp rvk_wp list_wp u abs_irq_together)+ - apply (intro z[simplified o_def] conjI | simp add: state_relation_def pspace_relations_def swp_cte_at - | (clarsimp, drule (1) z(6), simp add: state_relation_def pspace_relations_def swp_cte_at))+ + apply (intro z[simplified o_def] conjI + | simp add: state_relation_def pspace_relations_def swp_cte_at + | (clarsimp, drule (1) z(6), simp add: state_relation_def))+ done qed @@ -3794,7 +3764,7 @@ lemma create_reply_master_corres: apply clarsimp apply (rule corres_caps_decomposition) defer - apply (wp|simp)+ + apply (wp|simp add: o_def split del: if_splits)+ apply (clarsimp simp: o_def cdt_relation_def cte_wp_at_ctes_of split del: if_split cong: if_cong simp del: id_apply) apply (case_tac cte, clarsimp) @@ -3968,8 +3938,9 @@ lemma setupReplyMaster_corres: cte_wp_at' ((=) rv) (cte_map (t, tcb_cnode_index 2))" in hoare_strengthen_post) apply (wp hoare_drop_imps getCTE_wp') + apply (rename_tac rv s) apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) - apply (case_tac r, fastforce elim: valid_nullcapsE) + apply (case_tac rv, fastforce elim: valid_nullcapsE) apply (fastforce elim: tcb_at_cte_at) apply (clarsimp simp: cte_at'_obj_at' tcb_cte_cases_def cte_map_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -4165,6 +4136,9 @@ crunches setupReplyMaster and ready_queuesL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) lemma setupReplyMaster_vms'[wp]: @@ -4193,7 +4167,8 @@ lemma setupReplyMaster_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp setupReplyMaster_valid_pspace' sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift - valid_queues_lift cur_tcb_lift valid_queues_lift' hoare_vcg_disj_lift + valid_queues_lift cur_tcb_lift hoare_vcg_disj_lift sym_heap_sched_pointers_lift + valid_bitmaps_lift valid_irq_node_lift | simp)+ apply (clarsimp simp: ex_nonz_tcb_cte_caps' valid_pspace'_def objBits_simps' tcbReplySlot_def @@ -4454,8 +4429,8 @@ lemma arch_update_setCTE_invs: apply (wp arch_update_setCTE_mdb valid_queues_lift sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift arch_update_setCTE_iflive arch_update_setCTE_ifunsafe valid_irq_node_lift setCTE_typ_at' setCTE_irq_handlers' - valid_queues_lift' setCTE_pred_tcb_at' irqs_masked_lift - setCTE_norq hoare_vcg_disj_lift untyped_ranges_zero_lift + setCTE_pred_tcb_at' irqs_masked_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift valid_bitmaps_lift | simp add: pred_tcb_at'_def)+ apply (clarsimp simp: valid_global_refs'_def is_arch_update'_def fun_upd_def[symmetric] cte_wp_at_ctes_of isCap_simps untyped_ranges_zero_fun_upd) @@ -5810,7 +5785,7 @@ lemma cteInsert_simple_invs: apply (rule hoare_pre) apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (wp cur_tcb_lift sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift + valid_irq_node_lift irqs_masked_lift sym_heap_sched_pointers_lift cteInsert_simple_mdb' cteInsert_valid_globals_simple cteInsert_norq | simp add: pred_tcb_at'_def)+ apply (auto simp: invs'_def valid_state'_def valid_pspace'_def @@ -5949,6 +5924,21 @@ lemma arch_update_updateCap_invs: apply clarsimp done +lemma setCTE_set_cap_ready_queues_relation_valid_corres: + assumes pre: "ready_queues_relation s s'" + assumes step_abs: "(x, t) \ fst (set_cap cap slot s)" + assumes step_conc: "(y, t') \ fst (setCTE slot' cap' s')" + shows "ready_queues_relation t t'" + apply (clarsimp simp: ready_queues_relation_def) + apply (insert pre) + apply (rule use_valid[OF step_abs set_cap_exst]) + apply (rule use_valid[OF step_conc setCTE_ksReadyQueues]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedNexts_of]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedPrevs_of]) + apply (clarsimp simp: ready_queues_relation_def Let_def) + using use_valid[OF step_conc setCTE_inQ_opt_pred] + by fast + lemma updateCap_same_master: "\ cap_relation cap cap' \ \ corres dc (valid_objs and pspace_aligned and pspace_distinct and @@ -5980,6 +5970,8 @@ lemma updateCap_same_master: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ready_queues_relation a b" for a b \ -\) + subgoal by (erule setCTE_set_cap_ready_queues_relation_valid_corres; assumption) apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def @@ -6206,8 +6198,9 @@ lemma updateFreeIndex_forward_invs': apply (simp add:updateCap_def) apply (wp setCTE_irq_handlers' getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + sym_heap_sched_pointers_lift valid_bitmaps_lift | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def)+ apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) diff --git a/proof/refine/ARM/Cache.thy b/proof/refine/ARM/Cache.thy deleted file mode 100644 index ad26dd3961..0000000000 --- a/proof/refine/ARM/Cache.thy +++ /dev/null @@ -1,37 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory Cache -imports Main -begin - -text \Enable the proof cache, both skipping from it - and recording to it.\ -ML \DupSkip.record_proofs := true\ -ML \proofs := 1\ - -ML \DupSkip.skip_dup_proofs := true\ - -text \If executed in reverse order, save the cache\ -ML \val cache_thy_save_cache = ref false;\ -ML \ -if (! cache_thy_save_cache) -then File.open_output (XML_Syntax.output_forest - (XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache))) - (Path.basic "proof_cache.xml") -else ()\ -ML \cache_thy_save_cache := true\ -ML \cache_thy_save_cache := false\ - -text \Load the proof cache - - can take up to a minute\ - -ML \ -DupSkip.the_cache := XML_Syntax.cache_of_xml_forest ( - File.open_input (XML_Syntax.input_forest) - (Path.basic "proof_cache.xml"))\ - -end diff --git a/proof/refine/ARM/Detype_R.thy b/proof/refine/ARM/Detype_R.thy index 6f0a7f2d6e..abd93e2cc6 100644 --- a/proof/refine/ARM/Detype_R.thy +++ b/proof/refine/ARM/Detype_R.thy @@ -100,6 +100,9 @@ defs deletionIsSafe_def: (\ko. ksPSpace s p = Some (KOArch ko) \ p \ {ptr .. ptr + 2 ^ bits - 1} \ 6 \ bits)" +defs deletionIsSafe_delete_locale_def: + "deletionIsSafe_delete_locale \ \ptr bits s. \p. ko_wp_at' live' p s \ p \ {ptr .. ptr + 2 ^ bits - 1}" + defs ksASIDMapSafe_def: "ksASIDMapSafe \ \s. \asid hw_asid pd. armKSASIDMap (ksArchState s) asid = Some (hw_asid,pd) \ page_directory_at' pd s" @@ -116,6 +119,7 @@ lemma deleteObjects_def2: "is_aligned ptr bits \ deleteObjects ptr bits = do stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ {ptr .. ptr + 2 ^ bits - 1})) []; modify (\s. s \ ksPSpace := \x. if x \ {ptr .. ptr + 2 ^ bits - 1} @@ -126,7 +130,8 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) + apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (simp add: bind_assoc[symmetric]) @@ -149,6 +154,7 @@ lemma deleteObjects_def3: do assert (is_aligned ptr bits); stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ {ptr .. ptr + 2 ^ bits - 1})) []; modify (\s. s \ ksPSpace := \x. if x \ {ptr .. ptr + 2 ^ bits - 1} @@ -432,6 +438,7 @@ next qed end + locale detype_locale' = detype_locale + constrains s::"det_state" lemma (in detype_locale') deletionIsSafe: @@ -443,9 +450,8 @@ lemma (in detype_locale') deletionIsSafe: shows "deletionIsSafe base magnitude s'" proof - interpret Arch . (* FIXME: arch_split *) - note blah[simp del] = atLeastatMost_subset_iff atLeastLessThan_iff - Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex - atLeastAtMost_iff + note [simp del] = atLeastatMost_subset_iff atLeastLessThan_iff atLeastAtMost_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex have "\t m r. \ptr. cte_wp_at ((=) (cap.ReplyCap t m r)) ptr s \ t \ {base .. base + 2 ^ magnitude - 1}" by (fastforce dest!: valid_cap2 simp: cap obj_reply_refs_def) @@ -521,194 +527,8 @@ proof - done thus ?thesis using cte by (auto simp: deletionIsSafe_def) qed -context begin interpretation Arch . (*FIXME: arch_split*) -lemma ksASIDMapSafeI: - "\ (s,s') \ state_relation; invs s; pspace_aligned' s' \ pspace_distinct' s' \ - \ ksASIDMapSafe s'" - apply (clarsimp simp: ksASIDMapSafe_def) - apply (subgoal_tac "valid_asid_map s") - prefer 2 - apply fastforce - apply (clarsimp simp: valid_asid_map_def graph_of_def) - apply (subgoal_tac "arm_asid_map (arch_state s) asid = Some (hw_asid, pd)") - prefer 2 - apply (clarsimp simp: state_relation_def arch_state_relation_def) - apply (erule allE)+ - apply (erule (1) impE) - apply clarsimp - apply (drule find_pd_for_asid_eq_helper) - apply fastforce - apply assumption - apply fastforce - apply clarsimp - apply (rule pspace_relation_pd) - apply (fastforce simp: state_relation_def) - apply fastforce - apply assumption - apply assumption - apply simp - done - -(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) -(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) -(* FIXME: move *) -lemma corres_machine_op: - assumes P: "corres_underlying Id False True r P Q x x'" - shows "corres r (P \ machine_state) (Q \ ksMachineState) - (do_machine_op x) (doMachineOp x')" - apply (rule corres_submonad3 - [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) - apply (simp_all add: state_relation_def swp_def) - done - -lemma ekheap_relation_detype: - "ekheap_relation ekh kh \ - ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" - by (fastforce simp add: ekheap_relation_def split: if_split_asm) - -lemma cap_table_at_gsCNodes_eq: - "(s, s') \ state_relation - \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" - apply (clarsimp simp: state_relation_def ghost_relation_def - obj_at_def is_cap_table) - apply (drule_tac x = ptr in spec)+ - apply (drule_tac x = bits in spec)+ - apply fastforce - done - -lemma cNodeNoPartialOverlap: - "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ valid_objs s \ pspace_aligned s) - \ - (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) - (\x. base \ x \ x \ base + 2 ^ magnitude - 1)) [])" - apply (simp add: stateAssert_def assert_def) - apply (rule corres_symb_exec_r[OF _ get_sp]) - apply (rule corres_req[rotated], subst if_P, assumption) - apply simp - apply (clarsimp simp: cNodePartialOverlap_def) - apply (drule(1) cte_wp_valid_cap) - apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq - obj_at_def is_cap_table) - apply (frule(1) pspace_alignedD) - apply simp - apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) - apply (erule is_aligned_get_word_bits[where 'a=32, folded word_bits_def]) - apply (clarsimp simp: is_aligned_no_overflow) - apply (blast intro: order_trans) - apply (simp add: is_aligned_no_overflow power_overflow word_bits_def) - apply wp+ - done - - -declare wrap_ext_det_ext_ext_def[simp] - -(* Just for ARM *) -lemma sym_refs_hyp_refs_triv[simp]: "sym_refs (state_hyp_refs_of s)" - apply (auto simp: state_hyp_refs_of_def sym_refs_def) - apply (case_tac "kheap s x"; simp add: hyp_refs_of_def) - apply (rename_tac ko) - apply (case_tac ko; clarsimp) - done - -lemma deleteObjects_corres: - "is_aligned base magnitude \ magnitude \ 2 \ - corres dc - (\s. einvs s - \ s \ (cap.UntypedCap d base magnitude idx) - \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) - \ untyped_children_in_mdb s \ if_unsafe_then_cap s - \ valid_mdb s \ valid_global_refs s \ ct_active s) - (\s. s \' (UntypedCap d base magnitude idx) - \ valid_pspace' s) - (delete_objects base magnitude) (deleteObjects base magnitude)" - apply (simp add: deleteObjects_def2) - apply (rule corres_stateAssert_implied[where P'=\, simplified]) - prefer 2 - apply clarsimp - apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and - s=s in detype_locale'.deletionIsSafe, - simp_all add: detype_locale'_def - detype_locale_def p_assoc_help invs_valid_pspace)[1] - apply (simp add:valid_cap_simps) - apply (simp add: bind_assoc[symmetric]) - apply (rule corres_stateAssert_implied2) - defer - apply (erule ksASIDMapSafeI, assumption, assumption) - apply (rule hoare_pre) - apply (rule delete_objects_invs) - apply fastforce - apply (simp add: doMachineOp_def split_def) - apply wp - apply (clarsimp simp: valid_pspace'_def pspace_distinct'_def - pspace_aligned'_def) - apply (rule conjI) - subgoal by fastforce - apply (clarsimp simp add: pspace_distinct'_def ps_clear_def - dom_if_None Diff_Int_distrib) - apply (simp add: delete_objects_def) - apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ - (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ - descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ - s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ - valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ - zombies_final s \ sym_refs (state_refs_of s) \ - untyped_children_in_mdb s \ if_unsafe_then_cap s \ - valid_global_refs s" and - Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ - valid_pspace' s" in corres_underlying_split) - apply (rule corres_bind_return) - apply (rule corres_guard_imp[where r=dc]) - apply (rule corres_split[OF _ cNodeNoPartialOverlap]) - apply (rule corres_machine_op[OF corres_Id], simp+) - apply (rule no_fail_freeMemory, simp+) - apply (wp hoare_vcg_ex_lift)+ - apply auto[1] - apply (auto elim: is_aligned_weaken) - apply (rule corres_modify) - apply (simp add: valid_pspace'_def) - apply (rule state_relation_null_filterE, assumption, - simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] - apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) - apply (intro exI, fastforce) - apply (rule ext, clarsimp simp add: null_filter_def) - apply (rule sym, rule ccontr, clarsimp) - apply (drule(4) cte_map_not_null_outside') - apply (fastforce simp add: cte_wp_at_caps_of_state) - apply simp - apply (rule ext, clarsimp simp add: null_filter'_def - map_to_ctes_delete[simplified field_simps]) - apply (rule sym, rule ccontr, clarsimp) - apply (frule(2) pspace_relation_cte_wp_atI - [OF state_relation_pspace_relation]) - apply (elim exE) - apply (frule(4) cte_map_not_null_outside') - apply (rule cte_wp_at_weakenE, erule conjunct1) - apply (case_tac y, clarsimp) - apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def - valid_nullcaps_def) - apply clarsimp - apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, - erule cte_wp_at_weakenE[OF _ TrueI], assumption+) - apply simp - apply (rule detype_pspace_relation[simplified], - simp_all add: state_relation_pspace_relation valid_pspace_def)[1] - apply (simp add: valid_cap'_def capAligned_def) - apply (clarsimp simp: valid_cap_def, assumption) - apply (fastforce simp add: detype_def detype_ext_def intro!: ekheap_relation_detype) - apply (clarsimp simp: state_relation_def ghost_relation_of_heap - detype_def) - apply (drule_tac t="gsUserPages s'" in sym) - apply (drule_tac t="gsCNodes s'" in sym) - apply (auto simp add: ups_of_heap_def cns_of_heap_def ext - split: option.splits kernel_object.splits)[1] - apply (simp add: valid_mdb_def) - apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | - simp add: invs_def valid_state_def valid_pspace_def - descendants_range_def | wp (once) hoare_drop_imps)+ - done +context begin interpretation Arch . (*FIXME: arch_split*) text \Invariant preservation across concrete deletion\ @@ -748,84 +568,86 @@ lemma zobj_refs_capRange: "capAligned c \ zobj_refs' c \ capRange c" by (cases c, simp_all add: capRange_def capAligned_def is_aligned_no_overflow) end + locale delete_locale = - fixes s and base and bits and ptr and idx and d - assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s" - and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s)" - and invs: "invs' s" - and ct_act: "ct_active' s" - and sa_simp: "sch_act_simple s" - and bwb: "bits < word_bits" + fixes s' and base and bits and ptr and idx and d + assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s'" + and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s')" + and invs: "invs' s'" + and ct_act: "ct_active' s'" + and sa_simp: "sch_act_simple s'" and al: "is_aligned base bits" - and safe: "deletionIsSafe base bits s" - -context delete_locale -begin -interpretation Arch . (*FIXME: arch_split*) -lemma valid_objs: "valid_objs' s" - and pa: "pspace_aligned' s" - and pd: "pspace_distinct' s" - and vq: "valid_queues s" - and vq': "valid_queues' s" - and sym_refs: "sym_refs (state_refs_of' s)" - and iflive: "if_live_then_nonz_cap' s" - and ifunsafe: "if_unsafe_then_cap' s" - and dlist: "valid_dlist (ctes_of s)" - and no_0: "no_0 (ctes_of s)" - and chain_0: "mdb_chain_0 (ctes_of s)" - and badges: "valid_badges (ctes_of s)" - and contained: "caps_contained' (ctes_of s)" - and chunked: "mdb_chunked (ctes_of s)" - and umdb: "untyped_mdb' (ctes_of s)" - and uinc: "untyped_inc' (ctes_of s)" - and nullcaps: "valid_nullcaps (ctes_of s)" - and ut_rev: "ut_revocable' (ctes_of s)" - and dist_z: "distinct_zombies (ctes_of s)" - and irq_ctrl: "irq_control (ctes_of s)" - and clinks: "class_links (ctes_of s)" - and rep_r_fb: "reply_masters_rvk_fb (ctes_of s)" - and idle: "valid_idle' s" - and refs: "valid_global_refs' s" - and arch: "valid_arch_state' s" - and virq: "valid_irq_node' (irq_node' s) s" - and virqh: "valid_irq_handlers' s" - and virqs: "valid_irq_states' s" - and no_0_objs: "no_0_obj' s" - and ctnotinQ: "ct_not_inQ s" - and pde_maps: "valid_pde_mappings' s" - and irqs_masked: "irqs_masked' s" - and ctcd: "ct_idle_or_in_cur_domain' s" - and cdm: "ksCurDomain s \ maxDomain" - and vds: "valid_dom_schedule' s" + and safe: "deletionIsSafe base bits s'" + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_objs: "valid_objs' s'" + and pa: "pspace_aligned' s'" + and pd: "pspace_distinct' s'" + and vbm: "valid_bitmaps s'" + and sym_sched: "sym_heap_sched_pointers s'" + and vsp: "valid_sched_pointers s'" + and sym_refs: "sym_refs (state_refs_of' s')" + and iflive: "if_live_then_nonz_cap' s'" + and ifunsafe: "if_unsafe_then_cap' s'" + and dlist: "valid_dlist (ctes_of s')" + and no_0: "no_0 (ctes_of s')" + and chain_0: "mdb_chain_0 (ctes_of s')" + and badges: "valid_badges (ctes_of s')" + and contained: "caps_contained' (ctes_of s')" + and chunked: "mdb_chunked (ctes_of s')" + and umdb: "untyped_mdb' (ctes_of s')" + and uinc: "untyped_inc' (ctes_of s')" + and nullcaps: "valid_nullcaps (ctes_of s')" + and ut_rev: "ut_revocable' (ctes_of s')" + and dist_z: "distinct_zombies (ctes_of s')" + and irq_ctrl: "irq_control (ctes_of s')" + and clinks: "class_links (ctes_of s')" + and rep_r_fb: "reply_masters_rvk_fb (ctes_of s')" + and idle: "valid_idle' s'" + and refs: "valid_global_refs' s'" + and arch: "valid_arch_state' s'" + and virq: "valid_irq_node' (irq_node' s') s'" + and virqh: "valid_irq_handlers' s'" + and virqs: "valid_irq_states' s'" + and no_0_objs: "no_0_obj' s'" + and ctnotinQ: "ct_not_inQ s'" + and pde_maps: "valid_pde_mappings' s'" + and irqs_masked: "irqs_masked' s'" + and ctcd: "ct_idle_or_in_cur_domain' s'" + and cdm: "ksCurDomain s' \ maxDomain" + and vds: "valid_dom_schedule' s'" using invs - by (auto simp add: invs'_def valid_state'_def valid_pspace'_def - valid_mdb'_def valid_mdb_ctes_def) + by (auto simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) abbreviation "base_bits \ {base .. base + (2 ^ bits - 1)}" -abbreviation - "state' \ (s \ ksPSpace := \x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s x \)" +abbreviation pspace' :: pspace where + "pspace' \ \x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s' x" + +abbreviation state' :: kernel_state where + "state' \ (s' \ ksPSpace := pspace' \)" lemma ko_wp_at'[simp]: - "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s \ p \ base_bits)" + "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s' \ p \ base_bits)" by (fastforce simp add: ko_wp_at_delete'[OF pd]) lemma obj_at'[simp]: - "\P p. (obj_at' P p state') = (obj_at' P p s \ p \ base_bits)" + "\P p. (obj_at' P p state') = (obj_at' P p s' \ p \ base_bits)" by (fastforce simp add: obj_at'_real_def) lemma typ_at'[simp]: - "\T p. (typ_at' P p state') = (typ_at' P p s \ p \ base_bits)" + "typ_at' P p state' = (typ_at' P p s' \ p \ base_bits)" by (simp add: typ_at'_def) lemma valid_untyped[simp]: - "s \' UntypedCap d base bits idx" + "s' \' UntypedCap d base bits idx" using cte_wp_at_valid_objs_valid_cap' [OF cap valid_objs] by clarsimp lemma cte_wp_at'[simp]: - "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s \ p \ base_bits)" + "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s' \ p \ base_bits)" by (fastforce simp:cte_wp_at_delete'[where idx = idx,OF valid_untyped pd ]) (* the bits of caps they need for validity argument are within their capRanges *) @@ -854,13 +676,13 @@ lemma valid_cap_ctes_pre: done lemma replycap_argument: - "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s + "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s' \ t \ {base .. base + (2 ^ bits - 1)}" using safe by (fastforce simp add: deletionIsSafe_def cte_wp_at_ctes_of field_simps) lemma valid_cap': - "\p c. \ s \' c; cte_wp_at' (\cte. cteCap cte = c) p s; + "\p c. \ s' \' c; cte_wp_at' (\cte. cteCap cte = c) p s'; capRange c \ {base .. base + (2 ^ bits - 1)} = {} \ \ state' \' c" apply (subgoal_tac "capClass c = PhysicalClass \ capUntypedPtr c \ capRange c") apply (subgoal_tac "capClass c = PhysicalClass \ @@ -902,11 +724,11 @@ lemma valid_cap': done lemma objRefs_notrange: - assumes asms: "ctes_of s p = Some c" "\ isUntypedCap (cteCap c)" + assumes asms: "ctes_of s' p = Some c" "\ isUntypedCap (cteCap c)" shows "capRange (cteCap c) \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -927,11 +749,11 @@ proof - qed lemma ctes_of_valid [elim!]: - "ctes_of s p = Some cte \ s \' cteCap cte" + "ctes_of s' p = Some cte \ s' \' cteCap cte" by (case_tac cte, simp add: ctes_of_valid_cap' [OF _ valid_objs]) lemma valid_cap2: - "\ cte_wp_at' (\cte. cteCap cte = c) p s \ \ state' \' c" + "\ cte_wp_at' (\cte. cteCap cte = c) p s' \ \ state' \' c" apply (case_tac "isUntypedCap c") apply (drule cte_wp_at_valid_objs_valid_cap' [OF _ valid_objs]) apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def) @@ -941,7 +763,7 @@ lemma valid_cap2: done lemma ex_nonz_cap_notRange: - "ex_nonz_cap_to' p s \ p \ base_bits" + "ex_nonz_cap_to' p s' \ p \ base_bits" apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) apply (case_tac "isUntypedCap (cteCap cte)") apply (clarsimp simp: isCap_simps) @@ -953,22 +775,318 @@ lemma ex_nonz_cap_notRange: done lemma live_notRange: - "\ ko_wp_at' P p s; \ko. P ko \ live' ko \ \ p \ base_bits" + "\ ko_wp_at' P p s'; \ko. P ko \ live' ko \ \ p \ base_bits" apply (drule if_live_then_nonz_capE' [OF iflive ko_wp_at'_weakenE]) apply simp apply (erule ex_nonz_cap_notRange) done +lemma deletionIsSafe_delete_locale_holds: + "deletionIsSafe_delete_locale base bits s'" + by (fastforce dest: live_notRange simp: deletionIsSafe_delete_locale_def field_simps) + lemma refs_notRange: - "(x, tp) \ state_refs_of' s y \ y \ base_bits" + "(x, tp) \ state_refs_of' s' y \ y \ base_bits" apply (drule state_refs_of'_elemD) apply (erule live_notRange) apply (rule refs_of_live') apply clarsimp done +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma ksASIDMapSafeI: + "\ (s,s') \ state_relation; invs s; pspace_aligned' s' \ pspace_distinct' s' \ + \ ksASIDMapSafe s'" + apply (clarsimp simp: ksASIDMapSafe_def) + apply (subgoal_tac "valid_asid_map s") + prefer 2 + apply fastforce + apply (clarsimp simp: valid_asid_map_def graph_of_def) + apply (subgoal_tac "arm_asid_map (arch_state s) asid = Some (hw_asid, pd)") + prefer 2 + apply (clarsimp simp: state_relation_def arch_state_relation_def) + apply (erule allE)+ + apply (erule (1) impE) + apply clarsimp + apply (drule find_pd_for_asid_eq_helper) + apply fastforce + apply assumption + apply fastforce + apply clarsimp + apply (rule pspace_relation_pd) + apply (fastforce simp: state_relation_def) + apply fastforce + apply assumption + apply assumption + apply simp + done + +(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) +(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) +(* FIXME: move *) +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r P Q x x'" + shows "corres r (P \ machine_state) (Q \ ksMachineState) + (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad3 + [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma ekheap_relation_detype: + "ekheap_relation ekh kh \ + ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" + by (fastforce simp add: ekheap_relation_def split: if_split_asm) + +lemma cap_table_at_gsCNodes_eq: + "(s, s') \ state_relation + \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply (drule_tac x = ptr in spec)+ + apply (drule_tac x = bits in spec)+ + apply fastforce + done + +lemma cNodeNoPartialOverlap: + "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) + (\x. base \ x \ x \ base + 2 ^ magnitude - 1)) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodePartialOverlap_def) + apply (drule(1) cte_wp_valid_cap) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table) + apply (frule(1) pspace_alignedD) + apply simp + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=32, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow) + apply (blast intro: order_trans) + apply (simp add: is_aligned_no_overflow power_overflow word_bits_def) + apply wp+ + done + +declare wrap_ext_det_ext_ext_def[simp] + +lemma sym_refs_hyp_refs_triv[simp]: "sym_refs (state_hyp_refs_of s)" + apply (auto simp: state_hyp_refs_of_def sym_refs_def) + apply (case_tac "kheap s x"; simp add: hyp_refs_of_def) + apply (rename_tac ko) + apply (case_tac ko; clarsimp) + done + +crunches doMachineOp + for deletionIsSafe_delete_locale[wp]: "deletionIsSafe_delete_locale base magnitude" + (simp: deletionIsSafe_delete_locale_def) + +lemma detype_tcbSchedNexts_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedNext) + = tcbSchedNexts_of s'" + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def projectKOs split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_tcbSchedPrevs_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedPrev) + = tcbSchedPrevs_of s'" + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def projectKOs split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_inQ: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ \d p. (inQ d p |< ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of')) + = (inQ d p |< tcbs_of' s')" + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: inQ_def opt_pred_def ko_wp_at'_def projectKOs split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_ready_queues_relation: + "\pspace_aligned' s'; pspace_distinct' s'; + \p. p \ {lower..upper} \ \ ko_wp_at' live' p s'; + ready_queues_relation s s'; upper = upper'\ + \ ready_queues_relation_2 + (ready_queues (detype {lower..upper'} s)) + (ksReadyQueues s') + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedNext) + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedPrev) + (\d p. inQ d p |< ((\x. if lower \ x \ x \ upper then None else ksPSpace s' x) |> tcb_of'))" + apply (clarsimp simp: detype_ext_def ready_queues_relation_def Let_def) + apply (frule (1) detype_tcbSchedNexts_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_tcbSchedPrevs_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_inQ[where S="{lower..upper}"]; simp) + apply (fastforce simp add: detype_def detype_ext_def) + done + +lemma deleteObjects_corres: + "is_aligned base magnitude \ magnitude \ 2 \ + corres dc + (\s. einvs s + \ s \ (cap.UntypedCap d base magnitude idx) + \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) + \ untyped_children_in_mdb s \ if_unsafe_then_cap s + \ valid_mdb s \ valid_global_refs s \ ct_active s + \ schact_is_rct s) + (\s'. invs' s' + \ cte_wp_at' (\cte. cteCap cte = UntypedCap d base magnitude idx) ptr s' + \ descendants_range' (UntypedCap d base magnitude idx) ptr (ctes_of s') + \ ct_active' s' + \ s' \' (UntypedCap d base magnitude idx)) + (delete_objects base magnitude) (deleteObjects base magnitude)" + apply (simp add: deleteObjects_def2) + apply (rule corres_stateAssert_implied[where P'=\, simplified]) + prefer 2 + apply clarsimp + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (rule_tac ptr=ptr and idx=idx and d=d in delete_locale.deletionIsSafe_delete_locale_holds) + apply (clarsimp simp: delete_locale_def) + apply (intro conjI) + apply (fastforce simp: sch_act_simple_def state_relation_def schact_is_rct_def) + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add:valid_cap_simps) + apply (simp add: bind_assoc[symmetric]) + apply (rule corres_stateAssert_implied2) + defer + apply (erule ksASIDMapSafeI, assumption, assumption) + apply (rule hoare_pre) + apply (rule delete_objects_invs) + apply fastforce + apply (simp add: doMachineOp_def split_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def pspace_distinct'_def + pspace_aligned'_def) + apply (rule conjI) + subgoal by fastforce + apply (clarsimp simp add: pspace_distinct'_def ps_clear_def + dom_if_None Diff_Int_distrib) + apply (simp add: delete_objects_def) + apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ + (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ + descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ + s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ + valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ + zombies_final s \ sym_refs (state_refs_of s) \ + untyped_children_in_mdb s \ if_unsafe_then_cap s \ + valid_global_refs s" + and Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ + valid_pspace' s \ deletionIsSafe_delete_locale base magnitude s" + in corres_underlying_split) + apply (rule corres_bind_return) + apply (rule corres_guard_imp[where r=dc]) + apply (rule corres_split[OF _ cNodeNoPartialOverlap]) + apply (rule corres_machine_op[OF corres_Id], simp+) + apply (rule no_fail_freeMemory, simp+) + apply (wp hoare_vcg_ex_lift)+ + apply auto[1] + apply (auto elim: is_aligned_weaken) + apply (rule corres_modify) + apply (simp add: valid_pspace'_def) + apply (rule state_relation_null_filterE, assumption, + simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] + apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) + apply (intro exI, fastforce) + apply (rule ext, clarsimp simp add: null_filter_def) + apply (rule sym, rule ccontr, clarsimp) + apply (drule(4) cte_map_not_null_outside') + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply simp + apply (rule ext, clarsimp simp: null_filter'_def map_to_ctes_delete[simplified field_simps]) + apply (rule sym, rule ccontr, clarsimp) + apply (frule(2) pspace_relation_cte_wp_atI[OF state_relation_pspace_relation]) + apply (elim exE) + apply (frule(4) cte_map_not_null_outside') + apply (rule cte_wp_at_weakenE, erule conjunct1) + apply (case_tac y, clarsimp) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply clarsimp + apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, + erule cte_wp_at_weakenE[OF _ TrueI], assumption+) + apply simp + apply (rule detype_pspace_relation[simplified], + simp_all add: state_relation_pspace_relation valid_pspace_def)[1] + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: valid_cap_def, assumption) + apply (fastforce simp add: detype_def detype_ext_def intro!: ekheap_relation_detype) + apply (rule detype_ready_queues_relation; blast?) + apply (clarsimp simp: deletionIsSafe_delete_locale_def) + apply (frule state_relation_ready_queues_relation) + apply (simp add: ready_queues_relation_def Let_def) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap detype_def) + apply (drule_tac t="gsUserPages s'" in sym) + apply (drule_tac t="gsCNodes s'" in sym) + apply (auto simp add: ups_of_heap_def cns_of_heap_def ext + split: option.splits kernel_object.splits)[1] + apply (simp add: valid_mdb_def) + apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | + simp add: invs_def valid_state_def valid_pspace_def + descendants_range_def | wp (once) hoare_drop_imps)+ + apply fastforce + done +end + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma live_idle_untyped_range': + "ko_wp_at' live' p s' \ p = idle_thread_ptr \ p \ base_bits" + apply (case_tac "ko_wp_at' live' p s'") + apply (drule if_live_then_nonz_capE'[OF iflive ko_wp_at'_weakenE]) + apply simp + apply (erule ex_nonz_cap_notRange) + apply clarsimp + apply (insert invs_valid_global'[OF invs] cap invs_valid_idle'[OF invs]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_global_refsD') + apply (clarsimp simp: valid_idle'_def) + using atLeastAtMost_iff apply (simp add: p_assoc_help mask_eq_exp_minus_1) + by fastforce + +lemma untyped_range_live_idle': + "p \ base_bits \ \ (ko_wp_at' live' p s' \ p = idle_thread_ptr)" + using live_idle_untyped_range' by blast lemma valid_obj': - "\ valid_obj' obj s; ko_wp_at' ((=) obj) p s \ \ valid_obj' obj state'" + "\ valid_obj' obj s'; ko_wp_at' ((=) obj) p s'; sym_heap_sched_pointers s' \ + \ valid_obj' obj state'" apply (case_tac obj, simp_all add: valid_obj'_def) apply (rename_tac endpoint) apply (case_tac endpoint, simp_all add: valid_ep'_def)[1] @@ -995,10 +1113,23 @@ lemma valid_obj': apply (erule(2) cte_wp_at_tcbI') apply fastforce apply simp - apply (rename_tac tcb) - apply (case_tac "tcbState tcb"; - clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def - dest!: refs_notRange split: option.splits) + apply (intro conjI) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; clarsimp simp: valid_tcb_state'_def dest!: refs_notRange) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; + clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def + dest!: refs_notRange split: option.splits) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac prev) + apply (cut_tac P=live' and p=prev in live_notRange; fastforce?) + apply (fastforce dest: sym_heapD2[where p'=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def projectKOs) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac "next") + apply (cut_tac P=live' and p="next" in live_notRange; fastforce?) + apply (fastforce dest!: sym_heapD1[where p=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def projectKOs) apply (clarsimp simp: valid_cte'_def) apply (rule_tac p=p in valid_cap2) apply (clarsimp simp: ko_wp_at'_def objBits_simps' cte_level_bits_def[symmetric]) @@ -1014,18 +1145,50 @@ lemma valid_obj': apply (case_tac pde, simp_all add: valid_mapping'_def) done +lemma tcbSchedNexts_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedNext) = tcbSchedNexts_of s'" + supply projectKOs[simp] + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (fastforce dest: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma tcbSchedPrevs_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedPrev) = tcbSchedPrevs_of s'" + supply projectKOs[simp] + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + lemma st_tcb: - "\P p. \ st_tcb_at' P p s; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" - by (fastforce simp: pred_tcb_at'_def obj_at'_real_def - projectKOs - dest: live_notRange) + "\P p. \ st_tcb_at' P p s'; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" + by (fastforce simp: pred_tcb_at'_def obj_at'_real_def projectKOs dest: live_notRange) lemma irq_nodes_global: - "\irq :: 10 word. irq_node' s + (ucast irq) * 16 \ global_refs' s" - by (simp add: global_refs'_def mult.commute mult.left_commute cteSizeBits_def shiftl_t2n) + "\irq :: 10 word. irq_node' s' + (ucast irq) * 16 \ global_refs' s'" + by (simp add: global_refs'_def mult.commute mult.left_commute cteSizeBits_def shiftl_t2n) lemma global_refs: - "global_refs' s \ base_bits = {}" + "global_refs' s' \ base_bits = {}" using cap apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule valid_global_refsD' [OF _ refs]) @@ -1033,20 +1196,20 @@ lemma global_refs: done lemma global_refs2: - "global_refs' s \ (- base_bits)" + "global_refs' s' \ (- base_bits)" using global_refs by blast lemma irq_nodes_range: - "\irq :: 10 word. irq_node' s + (ucast irq) * 16 \ base_bits" + "\irq :: 10 word. irq_node' s' + (ucast irq) * 16 \ base_bits" using irq_nodes_global global_refs by blast lemma cte_refs_notRange: - assumes asms: "ctes_of s p = Some c" - shows "cte_refs' (cteCap c) (irq_node' s) \ base_bits = {}" + assumes asms: "ctes_of s' p = Some c" + shows "cte_refs' (cteCap c) (irq_node' s') \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -1075,7 +1238,7 @@ proof - qed lemma non_null_present: - "cte_wp_at' (\c. cteCap c \ NullCap) p s \ p \ base_bits" + "cte_wp_at' (\c. cteCap c \ NullCap) p s' \ p \ base_bits" apply (drule (1) if_unsafe_then_capD' [OF _ ifunsafe]) apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of dest!: cte_refs_notRange simp del: atLeastAtMost_iff) @@ -1083,7 +1246,7 @@ lemma non_null_present: done lemma cte_cap: - "ex_cte_cap_to' p s \ ex_cte_cap_to' p state'" + "ex_cte_cap_to' p s' \ ex_cte_cap_to' p state'" apply (clarsimp simp: ex_cte_cap_to'_def) apply (frule non_null_present [OF cte_wp_at_weakenE']) apply clarsimp @@ -1091,37 +1254,37 @@ lemma cte_cap: done lemma idle_notRange: - "\cref. \ cte_wp_at' (\c. ksIdleThread s \ capRange (cteCap c)) cref s - \ ksIdleThread s \ base_bits" + "\cref. \ cte_wp_at' (\c. ksIdleThread s' \ capRange (cteCap c)) cref s' + \ ksIdleThread s' \ base_bits" apply (insert cap) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule_tac x=ptr in allE, clarsimp simp: field_simps) done abbreviation - "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s x)" + "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s' x)" lemmas tree_to_ctes = map_to_ctes_delete [OF valid_untyped pd] lemma map_to_ctesE[elim!]: - "\ ctes' x = Some cte; \ ctes_of s x = Some cte; x \ base_bits \ \ P \ \ P" + "\ ctes' x = Some cte; \ ctes_of s' x = Some cte; x \ base_bits \ \ P \ \ P" by (clarsimp simp: tree_to_ctes split: if_split_asm) lemma not_nullMDBNode: - "\ ctes_of s x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" + "\ ctes_of s' x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" using nullcaps apply (cases cte) apply (simp add: valid_nullcaps_def) done -lemma mdb_src: "\ ctes_of s \ x \ y; y \ 0 \ \ x \ base_bits" +lemma mdb_src: "\ ctes_of s' \ x \ y; y \ 0 \ \ x \ base_bits" apply (rule non_null_present) apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of) apply (erule(1) not_nullMDBNode) apply (simp add: nullMDBNode_def nullPointer_def) done -lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ \ y \ base_bits" +lemma mdb_dest: "\ ctes_of s' \ x \ y; y \ 0 \ \ y \ base_bits" apply (case_tac "x = 0") apply (insert no_0, simp add: next_unfold')[1] apply (drule(1) vdlist_nextD0 [OF _ _ dlist]) @@ -1132,7 +1295,7 @@ lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ done lemma trancl_next[elim]: - "\ ctes_of s \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" + "\ ctes_of s' \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" apply (erule rev_mp, erule converse_trancl_induct) apply clarsimp apply (rule r_into_trancl) @@ -1150,14 +1313,14 @@ lemma trancl_next[elim]: done lemma mdb_parent_notrange: - "ctes_of s \ x \ y \ x \ base_bits \ y \ base_bits" + "ctes_of s' \ x \ y \ x \ base_bits \ y \ base_bits" apply (erule subtree.induct) apply (frule(1) mdb_src, drule(1) mdb_dest, simp) apply (drule(1) mdb_dest, simp) done lemma mdb_parent: - "ctes_of s \ x \ y \ ctes' \ x \ y" + "ctes_of s' \ x \ y \ ctes' \ x \ y" apply (erule subtree.induct) apply (frule(1) mdb_src, frule(1) mdb_dest) apply (rule subtree.direct_parent) @@ -1173,7 +1336,7 @@ lemma mdb_parent: done lemma trancl_next_rev: - "ctes' \ x \\<^sup>+ y \ ctes_of s \ x \\<^sup>+ y" + "ctes' \ x \\<^sup>+ y \ ctes_of s' \ x \\<^sup>+ y" apply (erule converse_trancl_induct) apply (rule r_into_trancl) apply (clarsimp simp: next_unfold') @@ -1183,7 +1346,7 @@ lemma trancl_next_rev: done lemma is_chunk[elim!]: - "is_chunk (ctes_of s) cap x y \ is_chunk ctes' cap x y" + "is_chunk (ctes_of s') cap x y \ is_chunk ctes' cap x y" apply (simp add: is_chunk_def) apply (erule allEI) apply (clarsimp dest!: trancl_next_rev) @@ -1222,17 +1385,18 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def show "pspace_aligned' ?s" using pa by (simp add: pspace_aligned'_def dom_def) - show "pspace_distinct' ?s" using pd + show pspace_distinct'_state': "pspace_distinct' ?s" using pd by (clarsimp simp add: pspace_distinct'_def ps_clear_def dom_if_None Diff_Int_distrib) - show "valid_objs' ?s" using valid_objs + show "valid_objs' ?s" using valid_objs sym_sched apply (clarsimp simp: valid_objs'_def ran_def) apply (rule_tac p=a in valid_obj') - apply fastforce - apply (frule pspace_alignedD'[OF _ pa]) - apply (frule pspace_distinctD'[OF _ pd]) - apply (clarsimp simp: ko_wp_at'_def) + apply fastforce + apply (frule pspace_alignedD'[OF _ pa]) + apply (frule pspace_distinctD'[OF _ pd]) + apply (clarsimp simp: ko_wp_at'_def) + apply fastforce done from sym_refs show "sym_refs (state_refs_of' ?s)" @@ -1244,19 +1408,6 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (simp add: refs_notRange[simplified] state_refs_ko_wp_at_eq) done - from vq show "valid_queues ?s" - apply (clarsimp simp: valid_queues_def bitmapQ_defs) - apply (clarsimp simp: valid_queues_no_bitmap_def) - apply (drule spec, drule spec, drule conjunct1, drule(1) bspec) - apply (clarsimp simp: obj_at'_real_def) - apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) - apply (clarsimp simp: projectKOs inQ_def) - apply (clarsimp dest!: ex_nonz_cap_notRange) - done - - from vq' show "valid_queues' ?s" - by (simp add: valid_queues'_def) - show "if_live_then_nonz_cap' ?s" using iflive apply (clarsimp simp: if_live_then_nonz_cap'_def) apply (drule spec, drule(1) mp) @@ -1272,7 +1423,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def intro!: cte_cap) from idle_notRange refs - have "ksIdleThread s \ ?ran" + have "ksIdleThread s' \ ?ran" apply (simp add: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def) apply blast done @@ -1387,7 +1538,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def page_directory_at'_def) by fastforce - show "valid_irq_node' (irq_node' s) ?s" + show "valid_irq_node' (irq_node' s') ?s" using virq irq_nodes_range by (simp add: valid_irq_node'_def mult.commute mult.left_commute ucast_ucast_mask_8 cteSizeBits_def shiftl_t2n) @@ -1418,7 +1569,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def ball_ran_eq) from virqs - show "valid_irq_states' s" . + show "valid_irq_states' s'" . from no_0_objs show "no_0_obj' state'" @@ -1433,19 +1584,19 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def by (simp add: irqs_masked'_def) from sa_simp ct_act - show "sch_act_wf (ksSchedulerAction s) state'" + show "sch_act_wf (ksSchedulerAction s') state'" apply (simp add: sch_act_simple_def) - apply (case_tac "ksSchedulerAction s", simp_all add: ct_in_state'_def) + apply (case_tac "ksSchedulerAction s'", simp_all add: ct_in_state'_def) apply (fastforce dest!: st_tcb elim!: pred_tcb'_weakenE) done from invs - have "pspace_domain_valid s" by (simp add: invs'_def valid_state'_def) + have "pspace_domain_valid s'" by (simp add: invs'_def valid_state'_def) thus "pspace_domain_valid state'" by (simp add: pspace_domain_valid_def) from invs - have "valid_machine_state' s" by (simp add: invs'_def valid_state'_def) + have "valid_machine_state' s'" by (simp add: invs'_def valid_state'_def) thus "valid_machine_state' ?state''" apply (clarsimp simp: valid_machine_state'_def) apply (drule_tac x=p in spec) @@ -1500,12 +1651,12 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (clarsimp dest!: ex_nonz_cap_notRange elim!: ko_wp_at'_weakenE) done - from cdm show "ksCurDomain s \ maxDomain" . + from cdm show "ksCurDomain s' \ maxDomain" . from invs - have urz: "untyped_ranges_zero' s" by (simp add: invs'_def valid_state'_def) + have urz: "untyped_ranges_zero' s'" by (simp add: invs'_def valid_state'_def) show "untyped_ranges_zero_inv (cteCaps_of state') - (gsUntypedZeroRanges s)" + (gsUntypedZeroRanges s')" apply (simp add: untyped_zero_ranges_cte_def urz[unfolded untyped_zero_ranges_cte_def, rule_format, symmetric]) apply (clarsimp simp: fun_eq_iff intro!: arg_cong[where f=Ex]) @@ -1515,17 +1666,31 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply simp done + from vbm + show "valid_bitmaps state'" + by (simp add: valid_bitmaps_def bitmapQ_defs) + + from sym_sched + show "sym_heap (pspace' |> tcb_of' |> tcbSchedNext) (pspace' |> tcb_of' |> tcbSchedPrev)" + using pa pd pspace_distinct'_state' + by (fastforce simp: tcbSchedNexts_of_pspace' tcbSchedPrevs_of_pspace') + + from vsp show "valid_sched_pointers_2 (pspace' |> tcb_of' |> tcbSchedPrev) + (pspace' |> tcb_of' |> tcbSchedNext) + (tcbQueued |< (pspace' |> tcb_of'))" + by (clarsimp simp: valid_sched_pointers_def opt_pred_def opt_map_def) + qed (clarsimp) lemma (in delete_locale) delete_ko_wp_at': - assumes objs: "ko_wp_at' P p s \ ex_nonz_cap_to' p s" + assumes objs: "ko_wp_at' P p s' \ ex_nonz_cap_to' p s'" shows "ko_wp_at' P p state'" using objs by (clarsimp simp: ko_wp_at'_def ps_clear_def dom_if_None Diff_Int_distrib dest!: ex_nonz_cap_notRange) lemma (in delete_locale) null_filter': - assumes descs: "Q (null_filter' (ctes_of s))" + assumes descs: "Q (null_filter' (ctes_of s'))" shows "Q (null_filter' (ctes_of state'))" using descs ifunsafe apply (clarsimp elim!: rsubst[where P=Q]) @@ -1543,7 +1708,7 @@ lemma (in delete_locale) null_filter': done lemma (in delete_locale) delete_ex_cte_cap_to': - assumes exc: "ex_cte_cap_to' p s" + assumes exc: "ex_cte_cap_to' p s'" shows "ex_cte_cap_to' p state'" using exc by (clarsimp elim!: cte_cap) @@ -1982,35 +2147,18 @@ lemma cte_wp_at_top: apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (clarsimp simp:cte_check_def) apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (simp add:typeError_def fail_def cte_check_def split:Structures_H.kernel_object.splits)+ done - -lemma neq_out_intv: - "\a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" - by simp - -lemma rule_out_intv: - "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a\b \ - \ b \ {a..a + 2 ^ objBitsKO obj - 1}" - apply (drule(1) pspace_distinctD') - apply (subst (asm) ps_clear_def) - apply (drule_tac x = b in orthD2) - apply fastforce - apply (drule neq_out_intv) - apply simp - apply simp - done - lemma locateCTE_monad: assumes ko_wp_at: "\Q dest. \\s. P1 s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f @@ -2085,8 +2233,8 @@ proof - apply (drule base_member_set[OF pspace_alignedD']) apply simp apply (simp add:objBitsKO_bounded2[unfolded word_bits_def,simplified]) - apply (clarsimp simp:field_simps) - apply blast + apply (clarsimp simp: field_simps) + apply (elim disjE; fastforce simp: mask_def p_assoc_help) done assume "{(ptr, s)} = fst (locateCTE src s)" @@ -2101,7 +2249,7 @@ qed lemma empty_fail_locateCTE: "empty_fail (locateCTE src)" - by (simp add:locateCTE_def bind_assoc split_def) + by (fastforce simp: locateCTE_def bind_assoc split_def) lemma fail_empty_locateCTE: "snd (locateCTE src s) \ fst (locateCTE src s) = {}" @@ -2719,7 +2867,7 @@ lemma storePDE_det: "ko_wp_at' ((=) (KOArch (KOPDE pde))) ptr s \ storePDE ptr (new_pde::ARM_H.pde) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPDE new_pde)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPDE new_pde)))) s" apply (clarsimp simp:ko_wp_at'_def storePDE_def split_def bind_def gets_def return_def get_def setObject_def @@ -2967,7 +3115,7 @@ lemma cte_wp_at_modify_pde: atLeastAtMost_iff shows "\ksPSpace s ptr' = Some (KOArch (KOPDE pde)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPDE pde')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPDE pde')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) @@ -3025,7 +3173,7 @@ lemma storePDE_setCTE_commute: apply (subst modify_specify) apply (rule modify_obj_commute') apply (rule commute_commute[OF locateCTE_commute]) - apply (wp locateCTE_cte_no_fail non_fail_modify + apply (wp locateCTE_cte_no_fail no_fail_modify modify_pde_pspace_distinct' modify_pde_pspace_aligned'| subst modify_specify)+ apply (clarsimp simp:simpler_modify_def valid_def typ_at'_def) @@ -3331,7 +3479,7 @@ lemma placeNewObject_tcb_at': placeNewObject ptr (makeObject::tcb) 0 \\_ s. tcb_at' ptr s \" apply (simp add: placeNewObject_def placeNewObject'_def split_def) - apply (wp hoare_unless_wp |wpc | simp add:alignError_def)+ + apply (wp unless_wp |wpc | simp add:alignError_def)+ by (auto simp: obj_at'_def is_aligned_mask lookupAround2_None1 lookupAround2_char1 field_simps objBits_simps projectKO_opt_tcb projectKO_def return_def ps_clear_def @@ -3787,7 +3935,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp | wpc |simp add:alignError_def del:fun_upd_apply)+ + apply (wp haskell_assert_wp unless_wp | wpc |simp add:alignError_def del:fun_upd_apply)+ apply (rule conjI) apply (rule impI) apply (subgoal_tac @@ -4335,7 +4483,7 @@ lemma createObjects_Cons: apply simp apply (wp haskell_assert_wp | wpc)+ apply simp - apply (wp hoare_unless_wp |clarsimp)+ + apply (wp unless_wp |clarsimp)+ apply (drule range_cover.aligned) apply (simp add:is_aligned_mask) done @@ -4577,7 +4725,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp |wpc + apply (wp haskell_assert_wp unless_wp |wpc |simp add:alignError_def del:fun_upd_apply)+ apply (rule conjI) apply (rule impI) @@ -4639,7 +4787,7 @@ lemma createTCBs_tcb_at': \\rv s. (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)\" apply (simp add:createObjects'_def split_def alignError_def) - apply (wp hoare_unless_wp |wpc)+ + apply (wp unless_wp |wpc)+ apply (subst data_map_insert_def[symmetric])+ apply clarsimp apply (subgoal_tac "(\x\of_nat n. @@ -4652,7 +4800,6 @@ lemma createTCBs_tcb_at': apply simp apply simp apply (clarsimp simp: retype_obj_at_disj') - apply (clarsimp simp: projectKO_opt_tcb) apply (clarsimp simp: new_cap_addrs_def image_def) apply (drule_tac x = "unat x" in bspec) apply (simp add:objBits_simps' shiftl_t2n) @@ -5447,7 +5594,7 @@ lemma createObject_pspace_aligned_distinct': createObject ty ptr us d \\xa s. pspace_aligned' s \ pspace_distinct' s\" apply (rule hoare_pre) - apply (wp placeNewObject_pspace_aligned' hoare_unless_wp + apply (wp placeNewObject_pspace_aligned' unless_wp placeNewObject_pspace_distinct' | simp add:ARM_H.createObject_def Retype_H.createObject_def objBits_simps diff --git a/proof/refine/ARM/EmptyFail.thy b/proof/refine/ARM/EmptyFail.thy index d3b7d68de5..741a9ba837 100644 --- a/proof/refine/ARM/EmptyFail.thy +++ b/proof/refine/ARM/EmptyFail.thy @@ -19,12 +19,12 @@ lemma empty_fail_projectKO [simp, intro!]: lemma empty_fail_alignCheck [intro!, wp, simp]: "empty_fail (alignCheck a b)" unfolding alignCheck_def - by (simp add: alignError_def) + by (fastforce simp: alignError_def) lemma empty_fail_magnitudeCheck [intro!, wp, simp]: "empty_fail (magnitudeCheck a b c)" unfolding magnitudeCheck_def - by (simp split: option.splits) + by (fastforce split: option.splits) lemma empty_fail_loadObject_default [intro!, wp, simp]: shows "empty_fail (loadObject_default x b c d)" @@ -33,7 +33,7 @@ lemma empty_fail_loadObject_default [intro!, wp, simp]: lemma empty_fail_threadGet [intro!, wp, simp]: "empty_fail (threadGet f p)" - by (simp add: threadGet_def getObject_def split_def) + by (fastforce simp: threadGet_def getObject_def split_def) lemma empty_fail_getCTE [intro!, wp, simp]: "empty_fail (getCTE slot)" @@ -47,12 +47,12 @@ lemma empty_fail_getCTE [intro!, wp, simp]: lemma empty_fail_updateObject_cte [intro!, wp, simp]: "empty_fail (updateObject (v :: cte) ko a b c)" - by (simp add: updateObject_cte typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_cte typeError_def unless_def split: kernel_object.splits ) lemma empty_fail_setCTE [intro!, wp, simp]: "empty_fail (setCTE p cte)" unfolding setCTE_def - by (simp add: setObject_def split_def) + by (fastforce simp: setObject_def split_def) lemma empty_fail_updateMDB [intro!, wp, simp]: "empty_fail (updateMDB a b)" @@ -60,16 +60,15 @@ lemma empty_fail_updateMDB [intro!, wp, simp]: lemma empty_fail_getSlotCap [intro!, wp, simp]: "empty_fail (getSlotCap a)" - unfolding getSlotCap_def by simp + unfolding getSlotCap_def by fastforce context begin interpretation Arch . (*FIXME: arch_split*) lemma empty_fail_getObject: - assumes x: "(\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel))" + assumes "\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel)" shows "empty_fail (getObject x :: 'a :: pspace_storable kernel)" apply (simp add: getObject_def split_def) - apply (safe intro!: empty_fail_bind empty_fail_gets empty_fail_assert_opt) - apply (rule x) + apply (safe intro!: assms) done lemma empty_fail_getObject_tcb [intro!, wp, simp]: @@ -78,22 +77,22 @@ lemma empty_fail_getObject_tcb [intro!, wp, simp]: lemma empty_fail_updateTrackedFreeIndex [intro!, wp, simp]: shows "empty_fail (updateTrackedFreeIndex p idx)" - by (simp add: updateTrackedFreeIndex_def) + by (fastforce simp add: updateTrackedFreeIndex_def) lemma empty_fail_updateNewFreeIndex [intro!, wp, simp]: shows "empty_fail (updateNewFreeIndex p)" apply (simp add: updateNewFreeIndex_def) - apply (safe intro!: empty_fail_bind) + apply safe apply (simp split: capability.split) done lemma empty_fail_insertNewCap [intro!, wp, simp]: "empty_fail (insertNewCap p p' cap)" - unfolding insertNewCap_def by simp + unfolding insertNewCap_def by fastforce lemma empty_fail_getIRQSlot [intro!, wp, simp]: "empty_fail (getIRQSlot irq)" - by (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + by (fastforce simp: getIRQSlot_def getInterruptState_def locateSlot_conv) lemma empty_fail_getObject_ntfn [intro!, wp, simp]: "empty_fail (getObject p :: Structures_H.notification kernel)" @@ -107,15 +106,15 @@ lemma empty_fail_lookupIPCBuffer [intro!, wp, simp]: "empty_fail (lookupIPCBuffer a b)" by (clarsimp simp: lookupIPCBuffer_def Let_def getThreadBufferSlot_def locateSlot_conv - split: capability.splits arch_capability.splits | wp | wpc)+ + split: capability.splits arch_capability.splits | wp | wpc | safe)+ lemma empty_fail_updateObject_default [intro!, wp, simp]: "empty_fail (updateObject_default v ko a b c)" - by (simp add: updateObject_default_def typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_default_def typeError_def unless_def split: kernel_object.splits) lemma empty_fail_threadSet [intro!, wp, simp]: "empty_fail (threadSet f p)" - by (simp add: threadSet_def getObject_def setObject_def split_def) + by (fastforce simp: threadSet_def getObject_def setObject_def split_def) lemma empty_fail_getThreadState[iff]: "empty_fail (getThreadState t)" diff --git a/proof/refine/ARM/EmptyFail_H.thy b/proof/refine/ARM/EmptyFail_H.thy index 7ae323a3a9..8b00db6c5a 100644 --- a/proof/refine/ARM/EmptyFail_H.thy +++ b/proof/refine/ARM/EmptyFail_H.thy @@ -17,19 +17,19 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemmas forM_empty_fail[intro!, wp, simp] = empty_fail_mapM[simplified forM_def[symmetric]] lemmas forM_x_empty_fail[intro!, wp, simp] = empty_fail_mapM_x[simplified forM_x_def[symmetric]] -lemmas forME_x_empty_fail[intro!, wp, simp] = mapME_x_empty_fail[simplified forME_x_def[symmetric]] +lemmas forME_x_empty_fail[intro!, wp, simp] = empty_fail_mapME_x[simplified forME_x_def[symmetric]] lemma withoutPreemption_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (withoutPreemption m)" - by (simp add: withoutPreemption_def) + by simp lemma withoutFailure_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (withoutFailure m)" - by (simp add: withoutFailure_def) + by simp lemma catchFailure_empty_fail[intro!, wp, simp]: "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchFailure f g)" - by (simp add: catchFailure_def empty_fail_catch) + by (simp add: empty_fail_catch) lemma emptyOnFailure_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (emptyOnFailure m)" @@ -86,9 +86,6 @@ proof (induct arbitrary: s rule: resolveAddressBits.induct) lemmas resolveAddressBits_empty_fail[intro!, wp, simp] = resolveAddressBits_spec_empty_fail[THEN use_spec_empty_fail] -crunch (empty_fail) empty_fail[intro!, wp, simp]: lookupIPCBuffer -(simp:Let_def) - declare ef_dmo'[intro!, wp, simp] lemma empty_fail_getObject_ep [intro!, wp, simp]: @@ -175,7 +172,7 @@ crunch (empty_fail) "_H_empty_fail"[intro!, wp, simp]: "ThreadDecls_H.suspend" lemma ThreadDecls_H_restart_empty_fail[intro!, wp, simp]: "empty_fail (ThreadDecls_H.restart target)" - by (simp add:restart_def) + by (fastforce simp: restart_def) crunch (empty_fail) empty_fail[intro!, wp, simp]: finaliseCap, preemptionPoint, capSwapForDelete (wp: empty_fail_catch simp: Let_def) @@ -213,18 +210,14 @@ lemmas finaliseSlot_empty_fail[intro!, wp, simp] = lemma checkCapAt_empty_fail[intro!, wp, simp]: "empty_fail action \ empty_fail (checkCapAt cap ptr action)" - by (simp add: checkCapAt_def) + by (fastforce simp: checkCapAt_def) lemma assertDerived_empty_fail[intro!, wp, simp]: "empty_fail f \ empty_fail (assertDerived src cap f)" - by (simp add: assertDerived_def) + by (fastforce simp: assertDerived_def) crunch (empty_fail) empty_fail[intro!, wp, simp]: cteDelete -lemma liftE_empty_fail[intro!, wp, simp]: - "empty_fail f \ empty_fail (liftE f)" - by simp - lemma spec_empty_fail_unlessE': "\ \ P \ spec_empty_fail f s \ \ spec_empty_fail (unlessE P f) s" by (simp add:unlessE_def spec_empty_returnOk) @@ -254,7 +247,7 @@ lemma Syscall_H_syscall_empty_fail[intro!, wp, simp]: lemma catchError_empty_fail[intro!, wp, simp]: "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchError f g)" - by (simp add: catchError_def handle_empty_fail) + by fastforce crunch (empty_fail) empty_fail[intro!, wp, simp]: chooseThread, getDomainTime, nextDomain, isHighestPrio @@ -271,7 +264,7 @@ crunch (empty_fail) empty_fail: callKernel theorem call_kernel_serial: "\ (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and - (\s. scheduler_action s = resume_cur_thread) and + schact_is_rct and (\s. 0 < domain_time s \ valid_domain_list s)) s; \s'. (s, s') \ state_relation \ (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and diff --git a/proof/refine/ARM/Finalise_R.thy b/proof/refine/ARM/Finalise_R.thy index 9b28b8da13..871d16b4ea 100644 --- a/proof/refine/ARM/Finalise_R.thy +++ b/proof/refine/ARM/Finalise_R.thy @@ -76,20 +76,10 @@ crunch ksRQL1[wp]: emptySlot "\s. P (ksReadyQueuesL1Bitmap s)" crunch ksRQL2[wp]: emptySlot "\s. P (ksReadyQueuesL2Bitmap s)" crunch obj_at'[wp]: postCapDeletion "obj_at' P p" -lemmas postCapDeletion_valid_queues[wp] = - valid_queues_lift [OF postCapDeletion_obj_at' - postCapDeletion_pred_tcb_at' - postCapDeletion_ksRQ] - crunch inQ[wp]: clearUntypedFreeIndex "\s. P (obj_at' (inQ d p) t s)" crunch tcbDomain[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbDomain tcb)) t" crunch tcbPriority[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbPriority tcb)) t" -lemma emptySlot_queues [wp]: - "\Invariants_H.valid_queues\ emptySlot sl opt \\rv. Invariants_H.valid_queues\" - unfolding emptySlot_def - by (wp | wpcw | wp valid_queues_lift | simp)+ - crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" crunch ksCurDomain[wp]: emptySlot "\s. P (ksCurDomain s)" @@ -1162,8 +1152,7 @@ definition "removeable' sl \ \s cap. (\p. p \ sl \ cte_wp_at' (\cte. capMasterCap (cteCap cte) = capMasterCap cap) p s) \ ((\p \ cte_refs' cap (irq_node' s). p \ sl \ cte_wp_at' (\cte. cteCap cte = NullCap) p s) - \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s) - \ (\t \ threadCapRefs cap. \p. t \ set (ksReadyQueues s p)))" + \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s))" lemma not_Final_removeable: "\ isFinal cap sl (cteCaps_of s) @@ -1279,7 +1268,7 @@ crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1354,11 +1343,6 @@ crunch irq_states' [wp]: emptySlot valid_irq_states' crunch no_0_obj' [wp]: emptySlot no_0_obj' (wp: crunch_wps) -crunch valid_queues'[wp]: setInterruptState "valid_queues'" - (simp: valid_queues'_def) - -crunch valid_queues'[wp]: emptySlot "valid_queues'" - crunch pde_mappings'[wp]: emptySlot "valid_pde_mappings'" end @@ -1428,7 +1412,7 @@ lemma emptySlot_untyped_ranges[wp]: emptySlot sl opt \\rv. untyped_ranges_zero'\" apply (simp add: emptySlot_def case_Null_If) apply (rule hoare_pre) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (rule untyped_ranges_zero_lift) apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of | wpc | simp add: clearUntypedFreeIndex_def updateTrackedFreeIndex_def @@ -1445,6 +1429,13 @@ lemma emptySlot_untyped_ranges[wp]: apply (simp add: untypedZeroRange_def isCap_simps) done +crunches emptySlot + for valid_bitmaps[wp]: valid_bitmaps + and tcbQueued_opt_pred[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and sched_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + (wp: valid_bitmaps_lift) + lemma emptySlot_invs'[wp]: "\\s. invs' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s \ (\sl'. info \ NullCap \ sl' \ sl \ cteCaps_of s sl' \ Some info)\ @@ -1467,13 +1458,13 @@ lemma deletedIRQHandler_corres: lemma arch_postCapDeletion_corres: "acap_relation cap cap' \ corres dc \ \ (arch_post_cap_deletion cap) (ARM_H.postCapDeletion cap')" - by (corressimp simp: arch_post_cap_deletion_def ARM_H.postCapDeletion_def) + by (corresKsimp simp: arch_post_cap_deletion_def ARM_H.postCapDeletion_def) lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1533,7 +1524,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2240,16 +2231,24 @@ lemma tcb_st_not_Bound: "(p, TCBBound) \ tcb_st_refs_of' ts" by (auto simp: tcb_st_refs_of'_def split: Structures_H.thread_state.split) +crunches setBoundNotification + for valid_bitmaps[wp]: valid_bitmaps + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: valid_bitmaps_lift) + lemma unbindNotification_invs[wp]: "\invs'\ unbindNotification tcb \\rv. invs'\" apply (simp add: unbindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (rule bind_wp[OF _ gbn_sp']) apply (case_tac ntfnPtr, clarsimp, wp, clarsimp) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift - irqs_masked_lift setBoundNotification_ct_not_inQ + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ sym_heap_sched_pointers_lift untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (rule conjI) apply (clarsimp elim!: obj_atE' @@ -2289,9 +2288,9 @@ lemma ntfn_bound_tcb_at': lemma unbindMaybeNotification_invs[wp]: "\invs'\ unbindMaybeNotification ntfnptr \\rv. invs'\" apply (simp add: unbindMaybeNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sym_heap_sched_pointers_lift valid_irq_node_lift irqs_masked_lift setBoundNotification_ct_not_inQ untyped_ranges_zero_lift | wpc | clarsimp simp: cteCaps_of_def o_def)+ @@ -2396,11 +2395,12 @@ lemma deleteASID_invs'[wp]: apply (simp add: deleteASID_def cong: option.case_cong) apply (rule hoare_pre) apply (wp | wpc)+ - apply (rule_tac Q="\rv. valid_obj' (injectKO rv) and invs'" - in hoare_post_imp) + apply (rule_tac Q="\rv. valid_obj' (injectKO rv) and invs'" + in hoare_post_imp) + apply (rename_tac rv s) apply (clarsimp split: if_split_asm del: subsetI) apply (simp add: fun_upd_def[symmetric] valid_obj'_def) - apply (case_tac r, simp) + apply (case_tac rv, simp) apply (subst inv_f_f, rule inj_onI, simp)+ apply (rule conjI) apply clarsimp @@ -2447,10 +2447,7 @@ lemma prepares_delete_helper'': apply (clarsimp simp: removeable'_def) done -lemma ctes_of_cteCaps_of_lift: - "\ \P. \\s. P (ctes_of s)\ f \\rv s. P (ctes_of s)\ \ - \ \\s. P (cteCaps_of s)\ f \\rv s. P (cteCaps_of s)\" - by (wp | simp add: cteCaps_of_def)+ +lemmas ctes_of_cteCaps_of_lift = cteCaps_of_ctes_of_lift crunches finaliseCapTrue_standin, unbindNotification for ctes_of[wp]: "\s. P (ctes_of s)" @@ -2458,11 +2455,11 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") apply (simp add: finaliseCapTrue_standin_simple_def) apply wp @@ -2488,7 +2485,6 @@ lemma cteDeleteOne_isFinal: lemmas setEndpoint_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF set_ep_ctes_of] lemmas setNotification_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF set_ntfn_ctes_of] -lemmas setQueue_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF setQueue_ctes_of] lemmas threadSet_cteCaps_of = ctes_of_cteCaps_of_lift [OF threadSet_ctes_of] crunch isFinal: suspend, prepareThreadDelete "\s. isFinal cap slot (cteCaps_of s)" @@ -2572,16 +2568,6 @@ lemma unbindNotification_valid_objs'_helper': by (clarsimp simp: valid_bound_tcb'_def valid_ntfn'_def split: option.splits ntfn.splits) -lemma typ_at'_valid_tcb'_lift: - assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" - shows "\\s. valid_tcb' tcb s\ f \\rv s. valid_tcb' tcb s\" - including no_pre - apply (simp add: valid_tcb'_def) - apply (case_tac "tcbState tcb", simp_all add: valid_tcb_state'_def split_def valid_bound_ntfn'_def) - apply (wp hoare_vcg_const_Ball_lift typ_at_lifts[OF P] - | case_tac "tcbBoundNotification tcb", simp_all)+ - done - lemmas setNotification_valid_tcb' = typ_at'_valid_tcb'_lift [OF setNotification_typ_at'] lemma unbindNotification_valid_objs'[wp]: @@ -2603,7 +2589,7 @@ lemma unbindMaybeNotification_valid_objs'[wp]: unbindMaybeNotification t \\rv. valid_objs'\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift setNotification_valid_tcb' getNotification_wp @@ -2632,7 +2618,7 @@ lemma unbindMaybeNotification_obj_at'_bound: unbindMaybeNotification r \\_ s. obj_at' (\ntfn. ntfnBoundTCB ntfn = None) r s\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp obj_at_setObject2 | wpc @@ -2681,7 +2667,7 @@ lemma capDeleteOne_bound_tcb_at': lemma cancelIPC_bound_tcb_at'[wp]: "\bound_tcb_at' P tptr\ cancelIPC t \\rv. bound_tcb_at' P tptr\" apply (simp add: cancelIPC_def Let_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) + apply (rule bind_wp[OF _ gts_sp']) apply (case_tac "state", simp_all) defer 2 apply (rule hoare_pre) @@ -2704,10 +2690,6 @@ lemma unbindNotification_bound_tcb_at': apply (wp setBoundNotification_bound_tcb gbn_wp' | wpc | simp)+ done -crunches unbindNotification, unbindMaybeNotification - for valid_queues[wp]: "Invariants_H.valid_queues" - (wp: sbn_valid_queues) - crunches unbindNotification, unbindMaybeNotification for weak_sch_act_wf[wp]: "\s. weak_sch_act_wf (ksSchedulerAction s) s" @@ -2727,8 +2709,42 @@ crunch cte_wp_at'[wp]: prepareThreadDelete "cte_wp_at' P p" crunch valid_cap'[wp]: prepareThreadDelete "valid_cap' cap" crunch invs[wp]: prepareThreadDelete "invs'" +crunches prepareThreadDelete + for sched_projs_obj_at'[wp]: + "\s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s" + end +lemma tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\\s. \ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + tcbQueueRemove q t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + by (fastforce dest!: heap_ls_last_None + simp: list_queue_relation_def prev_queue_head_def queue_end_valid_def + obj_at'_def projectKOs opt_map_def ps_clear_def objBits_simps + split: if_splits) + +lemma tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\valid_sched_pointers\ + tcbSchedDequeue t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding tcbSchedDequeue_def + by (wpsimp wp: tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at' threadGet_wp) + (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def + valid_sched_pointers_def opt_pred_def opt_map_def projectKOs + split: option.splits) + +crunches updateRestartPC, cancelIPC + for valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps) + +lemma suspend_tcbSchedNext_tcbSchedPrev_None: + "\invs'\ suspend t \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding suspend_def + by (wpsimp wp: hoare_drop_imps tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at') + lemma (in delete_one_conc_pre) finaliseCap_replaceable: "\\s. invs' s \ cte_wp_at' (\cte. cteCap cte = cap) slot s \ (final_matters' cap \ (final = isFinal cap slot (cteCaps_of s))) @@ -2748,21 +2764,22 @@ lemma (in delete_one_conc_pre) finaliseCap_replaceable: \ (\p \ threadCapRefs cap. st_tcb_at' ((=) Inactive) p s \ obj_at' (Not \ tcbQueued) p s \ bound_tcb_at' ((=) None) p s - \ (\pr. p \ set (ksReadyQueues s pr))))\" + \ obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) p s))\" apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot cong: if_cong split del: if_split) apply (rule hoare_pre) apply (wp prepares_delete_helper'' [OF cancelAllIPC_unlive] prepares_delete_helper'' [OF cancelAllSignals_unlive] - suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_nonq + suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_inactive prepareThreadDelete_isFinal - suspend_makes_inactive suspend_nonq + suspend_makes_inactive deletingIRQHandler_removeable' deletingIRQHandler_final[where slot=slot ] unbindMaybeNotification_obj_at'_bound getNotification_wp suspend_bound_tcb_at' unbindNotification_bound_tcb_at' + suspend_tcbSchedNext_tcbSchedPrev_None | simp add: isZombie_Null isThreadCap_threadCapRefs_tcbptr isArchObjectCap_Cap_capCap | (rule hoare_strengthen_post [OF arch_finaliseCap_removeable[where slot=slot]], @@ -2799,7 +2816,7 @@ crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" lemma cancelIPC_cteCaps_of: "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap))) \ + P ((cteCaps_of s)(p \ NullCap))) \ P (cteCaps_of s)\ cancelIPC t \\rv s. P (cteCaps_of s)\" @@ -2830,7 +2847,9 @@ lemma cancelIPC_cte_wp_at': apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) done -crunch cte_wp_at'[wp]: tcbSchedDequeue "cte_wp_at' P p" +crunches tcbSchedDequeue + for cte_wp_at'[wp]: "cte_wp_at' P p" + (wp: crunch_wps) lemma suspend_cte_wp_at': assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" @@ -2928,7 +2947,7 @@ lemma cteDeleteOne_reply_pred_tcb_at: cteDeleteOne slot \\rv. pred_tcb_at' proj P t\" apply (simp add: cteDeleteOne_def unless_def isFinalCapability_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (clarsimp simp: cte_wp_at_ctes_of when_def isCap_simps Let_def finaliseCapTrue_standin_def) @@ -2949,32 +2968,13 @@ end lemma rescheduleRequired_sch_act_not[wp]: "\\\ rescheduleRequired \\rv. sch_act_not t\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp)+ + apply (wp hoare_TrueI | simp)+ done crunch sch_act_not[wp]: cteDeleteOne "sch_act_not t" (simp: crunch_simps case_Null_If unless_def wp: crunch_wps getObject_inv loadObject_default_inv) -lemma cancelAllIPC_mapM_x_valid_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. \t\set q. tcb_at' t s)\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. Invariants_H.valid_queues\" - apply (rule_tac R="\_ s. (\t\set q. tcb_at' t s) \ valid_objs' s" - in hoare_post_add) - apply (rule hoare_pre) - apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply (wp hoare_vcg_const_Ball_lift - tcbSchedEnqueue_valid_queues tcbSchedEnqueue_not_st - sts_valid_queues sts_st_tcb_at'_cases setThreadState_not_st - | simp - | ((elim conjE)?, drule (1) bspec, clarsimp elim!: obj_at'_weakenE simp: valid_tcb_state'_def))+ - done - lemma cancelAllIPC_mapM_x_weak_sch_act: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ mapM_x (\t. do @@ -2988,13 +2988,15 @@ lemma cancelAllIPC_mapM_x_weak_sch_act: done lemma cancelAllIPC_mapM_x_valid_objs': - "\valid_objs'\ + "\valid_objs' and pspace_aligned' and pspace_distinct'\ mapM_x (\t. do y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) q \\_. valid_objs'\" - apply (wp mapM_x_wp' sts_valid_objs') + apply (rule hoare_strengthen_post) + apply (rule mapM_x_wp') + apply (wpsimp wp: sts_valid_objs') apply (clarsimp simp: valid_tcb_state'_def)+ done @@ -3005,18 +3007,12 @@ lemma cancelAllIPC_mapM_x_tcbDomain_obj_at': tcbSchedEnqueue t od) q \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (wp mapM_x_wp' tcbSchedEnqueue_not_st setThreadState_oa_queued | simp)+ -done + by (wpsimp wp: mapM_x_wp') lemma rescheduleRequired_oa_queued': - "\obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\ - rescheduleRequired - \\_. obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\" -apply (simp add: rescheduleRequired_def) -apply (wp tcbSchedEnqueue_not_st - | wpc - | simp)+ -done + "rescheduleRequired \obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t\" + unfolding rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + by wpsimp lemma cancelAllIPC_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ @@ -3030,21 +3026,6 @@ apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift | simp)+ done -lemma cancelAllIPC_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllIPC ep_ptr - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ep_valid_objs' getEndpoint_wp) - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ep'_def valid_tcb'_def projectKOs - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done - lemma cancelAllSignals_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelAllSignals epptr @@ -3061,41 +3042,8 @@ lemma unbindMaybeNotification_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ unbindMaybeNotification r \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" - apply (simp add: unbindMaybeNotification_def) - apply (wp setBoundNotification_oa_queued getNotification_wp gbn_wp' | wpc | simp)+ - done - -lemma cancelAllSignals_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllSignals ntfn - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ntfn_valid_objs' - | simp)+ - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ntfn'_def valid_tcb'_def projectKOs - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done - -lemma finaliseCapTrue_standin_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - finaliseCapTrue_standin cap final - \\_. Invariants_H.valid_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - - -crunch valid_queues[wp]: isFinalCapability "Invariants_H.valid_queues" - (simp: crunch_simps) + unfolding unbindMaybeNotification_def + by (wpsimp wp: getNotification_wp gbn_wp' simp: setBoundNotification_def)+ crunch sch_act[wp]: isFinalCapability "\s. sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) @@ -3104,96 +3052,6 @@ crunch weak_sch_act[wp]: isFinalCapability "\s. weak_sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) -lemma cteDeleteOne_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl - \\_. Invariants_H.valid_queues\" (is "\?PRE\ _ \_\") - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (wp isFinalCapability_inv getCTE_wp | rule hoare_drop_imps | simp)+ - apply (clarsimp simp: cte_wp_at'_def) - done - -lemma valid_inQ_queues_lift: - assumes tat: "\d p tcb. \obj_at' (inQ d p) tcb\ f \\_. obj_at' (inQ d p) tcb\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_inQ_queues\ f \\_. valid_inQ_queues\" - proof - - show ?thesis - apply (clarsimp simp: valid_def valid_inQ_queues_def) - apply safe - apply (rule use_valid [OF _ tat], assumption) - apply (drule spec, drule spec, erule conjE, erule bspec) - apply (rule ccontr) - apply (erule notE[rotated], erule(1) use_valid [OF _ prq]) - apply (erule use_valid [OF _ prq]) - apply simp - done - qed - -lemma emptySlot_valid_inQ_queues [wp]: - "\valid_inQ_queues\ emptySlot sl opt \\rv. valid_inQ_queues\" - unfolding emptySlot_def - by (wp opt_return_pres_lift | wpcw | wp valid_inQ_queues_lift | simp)+ - -crunch valid_inQ_queues[wp]: emptySlot valid_inQ_queues - (simp: crunch_simps) - -lemma cancelAllIPC_mapM_x_valid_inQ_queues: - "\valid_inQ_queues\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. valid_inQ_queues\" - apply (rule mapM_x_wp_inv) - apply (wp sts_valid_queues [where st="Structures_H.thread_state.Restart", simplified] - setThreadState_st_tcb) - done - -lemma cancelAllIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllIPC ep_ptr - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues) - apply (wp hoare_conjI hoare_drop_imp | simp)+ - done - -lemma cancelAllSignals_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllSignals ntfn - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues)+ - apply (simp) - done - -crunches unbindNotification, unbindMaybeNotification - for valid_inQ_queues[wp]: "valid_inQ_queues" - -lemma finaliseCapTrue_standin_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - finaliseCapTrue_standin cap final - \\_. valid_inQ_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - -crunch valid_inQ_queues[wp]: isFinalCapability valid_inQ_queues - (simp: crunch_simps) - -lemma cteDeleteOne_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cteDeleteOne sl - \\_. valid_inQ_queues\" - apply (simp add: cteDeleteOne_def unless_def) - apply (wpsimp wp: hoare_drop_imp hoare_vcg_all_lift) - done - crunch ksCurDomain[wp]: cteDeleteOne "\s. P (ksCurDomain s)" (wp: crunch_wps simp: crunch_simps unless_def) @@ -3239,7 +3097,7 @@ lemma cteDeleteOne_invs[wp]: subgoal by auto subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def projectKOs ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done @@ -3437,10 +3295,9 @@ lemma arch_finaliseCap_corres: lemma unbindNotification_corres: "corres dc (invs and tcb_at t) - (invs' and tcb_at' t) + invs' (unbind_notification t) (unbindNotification t)" - supply option.case_cong_weak[cong] apply (simp add: unbind_notification_def unbindNotification_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getBoundNotification_corres]) @@ -3455,12 +3312,12 @@ lemma unbindNotification_corres: apply (wp gbn_wp' gbn_wp)+ apply (clarsimp elim!: obj_at_valid_objsE dest!: bound_tcb_at_state_refs_ofD invs_valid_objs - simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def - valid_tcb_def valid_bound_ntfn_def + simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def obj_at_def + valid_tcb_def valid_bound_ntfn_def invs_psp_aligned invs_distinct split: option.splits) apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' - simp: projectKOs valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def - tcb_ntfn_is_bound'_def + simp: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def tcb_ntfn_is_bound'_def + projectKOs split: option.splits) done @@ -3481,11 +3338,11 @@ lemma unbindMaybeNotification_corres: apply (wp get_simple_ko_wp getNotification_wp)+ apply (clarsimp elim!: obj_at_valid_objsE dest!: bound_tcb_at_state_refs_ofD invs_valid_objs - simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def + simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def invs_psp_aligned invs_distinct valid_tcb_def valid_bound_ntfn_def valid_ntfn_def split: option.splits) apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' - simp: projectKOs valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def + simp: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def tcb_ntfn_is_bound'_def valid_ntfn'_def split: option.splits) done @@ -3620,12 +3477,6 @@ lemma arch_recycleCap_improve_cases: \ isASIDControlCap cap \ \ (if isASIDPoolCap cap then v else undefined) = v" by (cases cap, simp_all add: isCap_simps) -crunch queues[wp]: copyGlobalMappings "Invariants_H.valid_queues" - (wp: crunch_wps ignore: storePDE) - -crunch queues'[wp]: copyGlobalMappings "Invariants_H.valid_queues'" - (wp: crunch_wps ignore: storePDE) - crunch ifunsafe'[wp]: copyGlobalMappings "if_unsafe_then_cap'" (wp: crunch_wps ignore: storePDE) @@ -3674,178 +3525,6 @@ lemma cteCaps_of_ctes_of_lift: lemmas final_matters'_simps = final_matters'_def [split_simps capability.split arch_capability.split] -definition set_thread_all :: "obj_ref \ Structures_A.tcb \ etcb - \ unit det_ext_monad" where - "set_thread_all ptr tcb etcb \ - do s \ get; - kh \ return $ kheap s(ptr \ (TCB tcb)); - ekh \ return $ (ekheap s)(ptr \ etcb); - put (s\kheap := kh, ekheap := ekh\) - od" - -definition thread_gets_the_all :: "obj_ref \ (Structures_A.tcb \ etcb) det_ext_monad" where - "thread_gets_the_all tptr \ - do tcb \ gets_the $ get_tcb tptr; - etcb \ gets_the $ get_etcb tptr; - return $ (tcb, etcb) od" - -definition thread_set_all :: "(Structures_A.tcb \ Structures_A.tcb) \ (etcb \ etcb) - \ obj_ref \ unit det_ext_monad" where - "thread_set_all f g tptr \ - do (tcb, etcb) \ thread_gets_the_all tptr; - set_thread_all tptr (f tcb) (g etcb) - od" - -lemma set_thread_all_corres: - fixes ob' :: "'a :: pspace_storable" - assumes x: "updateObject ob' = updateObject_default ob'" - assumes z: "\s. obj_at' P ptr s - \ map_to_ctes ((ksPSpace s) (ptr \ injectKO ob')) = map_to_ctes (ksPSpace s)" - assumes b: "\ko. P ko \ objBits ko = objBits ob'" - assumes P: "\(v::'a::pspace_storable). (1 :: word32) < 2 ^ (objBits v)" - assumes e: "etcb_relation etcb tcb'" - assumes is_t: "injectKO (ob' :: 'a :: pspace_storable) = KOTCB tcb'" - shows "other_obj_relation (TCB tcb) (injectKO (ob' :: 'a :: pspace_storable)) \ - corres dc (obj_at (same_caps (TCB tcb)) ptr and is_etcb_at ptr) - (obj_at' (P :: 'a \ bool) ptr) - (set_thread_all ptr tcb etcb) (setObject ptr ob')" - apply (rule corres_no_failI) - apply (rule no_fail_pre) - apply wp - apply (rule x) - apply (clarsimp simp: b elim!: obj_at'_weakenE) - apply (unfold set_thread_all_def setObject_def) - apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def - put_def return_def modify_def get_object_def x - projectKOs - updateObject_default_def in_magnitude_check [OF _ P]) - apply (clarsimp simp add: state_relation_def z) - apply (simp add: trans_state_update'[symmetric] trans_state_update[symmetric] - del: trans_state_update) - apply (clarsimp simp add: swp_def fun_upd_def obj_at_def is_etcb_at_def) - apply (subst cte_wp_at_after_update,fastforce simp add: obj_at_def) - apply (subst caps_of_state_after_update,fastforce simp add: obj_at_def) - apply clarsimp - apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) - apply (clarsimp simp add: ghost_relation_def) - apply (erule_tac x=ptr in allE)+ - apply (clarsimp simp: obj_at_def - split: Structures_A.kernel_object.splits if_split_asm) - - apply (fold fun_upd_def) - apply (simp only: pspace_relation_def dom_fun_upd2 simp_thms) - apply (subst pspace_dom_update) - apply assumption - apply simp - apply (simp only: dom_fun_upd2 simp_thms) - apply (elim conjE) - apply (frule bspec, erule domI) - apply (rule conjI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: is_other_obj_relation_type) - apply (drule(1) bspec) - apply clarsimp - apply (frule_tac ko'="TCB tcb'" and x'=ptr in obj_relation_cut_same_type, - (fastforce simp add: is_other_obj_relation_type)+)[1] - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (clarsimp simp: projectKOs) - apply (insert e is_t) - by (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits ARM_A.arch_kernel_obj.splits) - -lemma tcb_update_all_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" - assumes r: "r () ()" - assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" - shows "corres r (ko_at (TCB tcb) add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_thread_all add tcbu etcbu) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ etcb_relation etcbu tcbu'" in corres_req) - apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) - apply (frule(1) pspace_relation_absD) - apply (force simp: projectKOs other_obj_relation_def ekheap_relation_def e) - apply (erule conjE) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule set_thread_all_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - projectKOs objBits_simps' - other_obj_relation_def tcbs r)+ - apply (fastforce simp: is_etcb_at_def elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: projectKOs obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp - done - -lemma thread_gets_the_all_corres: - shows "corres (\(tcb, etcb) tcb'. tcb_relation tcb tcb' \ etcb_relation etcb tcb') - (tcb_at t and is_etcb_at t) (tcb_at' t) - (thread_gets_the_all t) (getObject t)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp add: gets_def get_def return_def bind_def get_tcb_def thread_gets_the_all_def threadGet_def ethread_get_def gets_the_def assert_opt_def get_etcb_def is_etcb_at_def tcb_at_def liftM_def split: option.splits Structures_A.kernel_object.splits) - apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) - apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def - projectKO_opt_tcb split_def - getObject_def loadObject_default_def in_monad) - apply (case_tac ko) - apply (simp_all add: fail_def return_def) - apply (clarsimp simp add: state_relation_def pspace_relation_def ekheap_relation_def) - apply (drule bspec) - apply clarsimp - apply blast - apply (drule bspec, erule domI) - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) - done - -lemma thread_set_all_corresT: - assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ - tcb_relation (f tcb) (f' tcb')" - assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (g etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (thread_set_all f g t) (threadSet f' t)" - apply (simp add: thread_set_all_def threadSet_def bind_assoc) - apply (rule corres_guard_imp) - apply (rule corres_split[OF thread_gets_the_all_corres]) - apply (simp add: split_def) - apply (rule tcb_update_all_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce - apply (erule e) - apply (simp add: thread_gets_the_all_def, wp+) - apply clarsimp - apply (frule(1) tcb_at_is_etcb_at) - apply (clarsimp simp add: tcb_at_def get_etcb_def obj_at_def) - apply (drule get_tcb_SomeD) - apply fastforce - apply simp - done - -lemmas thread_set_all_corres = - thread_set_all_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] - crunch idle_thread[wp]: deleteCallerCap "\s. P (ksIdleThread s)" (wp: crunch_wps) crunch sch_act_simple: deleteCallerCap sch_act_simple @@ -3863,89 +3542,6 @@ lemma setEndpoint_sch_act_not_ct[wp]: setEndpoint ptr val \\_ s. sch_act_not (ksCurThread s) s\" by (rule hoare_weaken_pre, wps setEndpoint_ct', wp, simp) -lemma cancelAll_ct_not_ksQ_helper: - "\(\s. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ksCurThread s \ set q) \ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (rule mapM_x_inv_wp2, simp) - apply (wp) - apply (wps tcbSchedEnqueue_ct') - apply (wp tcbSchedEnqueue_ksQ) - apply (wps setThreadState_ct') - apply (wp sts_ksQ') - apply (clarsimp) - done - -lemma cancelAllIPC_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllIPC epptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllIPC_def) - apply (wp, wpc, wp) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply clarsimp - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply clarsimp - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep epptr" in hoare_post_imp) - apply (clarsimp) - apply (rule conjI) - apply ((clarsimp simp: invs'_def valid_state'_def - sch_act_sane_def - | drule(1) ct_not_in_epQueue)+)[2] - apply (wp get_ep_sp') - done - -lemma cancelAllSignals_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllSignals ntfnptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllSignals_def) - apply (wp, wpc, wp+) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply clarsimp - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setNotification_ksQ setNotification_ksCurThread]) - apply (wps setNotification_ksCurThread, wp) - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep ntfnptr" in hoare_post_imp) - apply ((clarsimp simp: invs'_def valid_state'_def sch_act_sane_def - | drule(1) ct_not_in_ntfnQueue)+)[1] - apply (wp get_ntfn_sp') - done - -lemma unbindMaybeNotification_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - unbindMaybeNotification t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) - apply (case_tac "ntfnBoundTCB ntfn", simp, wp, simp+) - apply (rule hoare_pre) - apply wp - apply (wps setBoundNotification_ct') - apply (wp sbn_ksQ) - apply (wps setNotification_ksCurThread, wp) - apply clarsimp - done - lemma sbn_ct_in_state'[wp]: "\ct_in_state' P\ setBoundNotification ntfn t \\_. ct_in_state' P\" apply (simp add: ct_in_state'_def) @@ -3978,37 +3574,6 @@ crunches unbindNotification, unbindMaybeNotification for sch_act_sane[wp]: "sch_act_sane" end -lemma finaliseCapTrue_standin_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - finaliseCapTrue_standin cap final - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp cancelAllIPC_ct_not_ksQ cancelAllSignals_ct_not_ksQ - hoare_drop_imps unbindMaybeNotification_ct_not_ksQ - | wpc - | clarsimp simp: isNotificationCap_def isReplyCap_def split:capability.splits)+ - done - -lemma cteDeleteOne_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cteDeleteOne slot - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) - apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") - apply (simp add: finaliseCapTrue_standin_simple_def) - apply wp - apply (clarsimp) - apply (wp emptySlot_cteCaps_of hoare_lift_Pf2 [OF emptySlot_ksRQ emptySlot_ct]) - apply (simp add: cteCaps_of_def) - apply (wp (once) hoare_drop_imps) - apply (wp finaliseCapTrue_standin_ct_not_ksQ isFinalCapability_inv)+ - apply (clarsimp) - done - end end diff --git a/proof/refine/ARM/Init_R.thy b/proof/refine/ARM/Init_R.thy index 0a530b3998..6192b6c601 100644 --- a/proof/refine/ARM/Init_R.thy +++ b/proof/refine/ARM/Init_R.thy @@ -95,7 +95,7 @@ definition zeroed_intermediate_state :: ksDomSchedule = [], ksCurDomain = 0, ksDomainTime = 0, - ksReadyQueues = K [], + ksReadyQueues = K (TcbQueue None None), ksReadyQueuesL1Bitmap = K 0, ksReadyQueuesL2Bitmap = K 0, ksCurThread = 0, @@ -116,9 +116,11 @@ lemma non_empty_refine_state_relation: "(zeroed_abstract_state, zeroed_intermediate_state) \ state_relation" apply (clarsimp simp: state_relation_def zeroed_state_defs state.defs) apply (intro conjI) - apply (clarsimp simp: pspace_relation_def pspace_dom_def) - apply (clarsimp simp: ekheap_relation_def) - apply (clarsimp simp: ready_queues_relation_def) + apply (clarsimp simp: pspace_relation_def pspace_dom_def) + apply (clarsimp simp: ekheap_relation_def) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def queue_end_valid_def + opt_pred_def list_queue_relation_def tcbQueueEmpty_def + prev_queue_head_def) apply (clarsimp simp: ghost_relation_def) apply (fastforce simp: cdt_relation_def swp_def dest: cte_wp_at_domI) apply (clarsimp simp: cdt_list_relation_def map_to_ctes_def) diff --git a/proof/refine/ARM/InterruptAcc_R.thy b/proof/refine/ARM/InterruptAcc_R.thy index c096dbff23..a682adaa7b 100644 --- a/proof/refine/ARM/InterruptAcc_R.thy +++ b/proof/refine/ARM/InterruptAcc_R.thy @@ -52,14 +52,13 @@ lemma setIRQState_invs[wp]: apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) apply (wp dmo_maskInterrupt) apply (clarsimp simp: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def valid_queues'_def valid_idle'_def valid_irq_node'_def valid_arch_state'_def valid_global_refs'_def global_refs'_def valid_machine_state'_def if_unsafe_then_cap'_def ex_cte_cap_to'_def valid_irq_handlers'_def irq_issued'_def cteCaps_of_def valid_irq_masks'_def - bitmapQ_defs valid_queues_no_bitmap_def) + bitmapQ_defs valid_bitmaps_def) apply (rule conjI, clarsimp) apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) apply (rule conjI) @@ -114,7 +113,7 @@ lemma preemptionPoint_inv: shows "\P\ preemptionPoint \\_. P\" using assms apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) apply (wpc - | wp hoare_whenE_wp hoare_seq_ext [OF _ select_inv] alternative_valid hoare_drop_imps + | wp whenE_wp bind_wp [OF _ select_inv] hoare_drop_imps | simp)+ done @@ -149,8 +148,7 @@ lemma invs'_irq_state_independent [simp, intro!]: valid_idle'_def valid_global_refs'_def valid_arch_state'_def valid_irq_node'_def valid_irq_handlers'_def valid_irq_states'_def - irqs_masked'_def bitmapQ_defs valid_queues_no_bitmap_def - valid_queues'_def valid_pde_mappings'_def + irqs_masked'_def bitmapQ_defs valid_pde_mappings'_def pspace_domain_valid_def cur_tcb'_def valid_machine_state'_def tcb_in_cur_domain'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def diff --git a/proof/refine/ARM/Interrupt_R.thy b/proof/refine/ARM/Interrupt_R.thy index 0888951a75..cd04d7b08d 100644 --- a/proof/refine/ARM/Interrupt_R.thy +++ b/proof/refine/ARM/Interrupt_R.thy @@ -187,6 +187,7 @@ crunches arch_check_irq, checkIRQ lemma arch_check_irq_maxIRQ_valid: "\\\ arch_check_irq y \\_. (\s. unat y \ unat maxIRQ)\, -" unfolding arch_check_irq_def + supply hoare_vcg_prop[wp del] (* FIXME lib: check rule order *) apply (wpsimp simp: validE_R_def wp: whenE_throwError_wp) by (metis unat_ucast_10_32 word_le_nat_alt word_le_not_less) @@ -616,13 +617,6 @@ lemma decDomainTime_corres: apply (clarsimp simp:state_relation_def) done -lemma tcbSchedAppend_valid_objs': - "\valid_objs'\tcbSchedAppend t \\r. valid_objs'\" - apply (simp add:tcbSchedAppend_def) - apply (wpsimp wp: hoare_unless_wp threadSet_valid_objs' threadGet_wp) - apply (clarsimp simp add:obj_at'_def typ_at'_def) - done - lemma thread_state_case_if: "(case state of Structures_A.thread_state.Running \ f | _ \ g) = (if state = Structures_A.thread_state.Running then f else g)" @@ -633,35 +627,27 @@ lemma threadState_case_if: (if state = Structures_H.thread_state.Running then f else g)" by (case_tac state,auto) -lemma tcbSchedAppend_invs_but_ct_not_inQ': - "\invs' and st_tcb_at' runnable' t \ - tcbSchedAppend t \\_. all_invs_but_ct_not_inQ'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | fastforce elim!: st_tcb_ex_cap'' split: thread_state.split_asm)+ - done +lemma ready_qs_distinct_domain_time_update[simp]: + "ready_qs_distinct (domain_time_update f s) = ready_qs_distinct s" + by (clarsimp simp: ready_qs_distinct_def) lemma timerTick_corres: - "corres dc (cur_tcb and valid_sched) - invs' - timer_tick timerTick" - supply if_weak_cong[cong] + "corres dc + (cur_tcb and valid_sched and pspace_aligned and pspace_distinct) invs' + timer_tick timerTick" apply (simp add: timerTick_def timer_tick_def) - apply (simp add:thread_state_case_if threadState_case_if) - apply (rule_tac Q="\ and (cur_tcb and valid_sched)" and Q'="\ and invs'" in corres_guard_imp) + apply (simp add: thread_state_case_if threadState_case_if) + apply (rule_tac Q="cur_tcb and valid_sched and pspace_aligned and pspace_distinct" + and Q'=invs' + in corres_guard_imp) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp apply (rule corres_split[OF getThreadState_corres]) apply (rename_tac state state') - apply (rule corres_split[where r' = dc ]) + apply (rule corres_split[where r' = dc]) apply (rule corres_if[where Q = \ and Q' = \]) apply (case_tac state,simp_all)[1] - apply (simp add: Let_def) apply (rule_tac r'="(=)" in corres_split[OF ethreadget_corres]) apply (simp add:etcb_relation_def) apply (rename_tac ts ts') @@ -671,55 +657,53 @@ lemma timerTick_corres: apply (rule ethread_set_corres, simp+) apply (clarsimp simp: etcb_relation_def) apply simp - apply (rule corres_split) - apply (rule ethread_set_corres; simp) - apply (simp add: etcb_relation_def) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF ethread_set_corres]) + apply (simp add: sch_act_wf_weak etcb_relation_def pred_conj_def)+ + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp)[1] - apply (rule hoare_strengthen_post) - apply (rule tcbSchedAppend_invs_but_ct_not_inQ', - clarsimp simp: sch_act_wf_weak) - apply (wp threadSet_timeslice_invs threadSet_valid_queues - threadSet_valid_queues' threadSet_pred_tcb_at_state)+ - apply simp - apply simp - apply (rule corres_when,simp) + apply wp + apply ((wpsimp wp: tcbSchedAppend_sym_heap_sched_pointers + tcbSchedAppend_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply ((wp thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply wpsimp+ + apply (rule corres_when, simp) apply (rule corres_split[OF decDomainTime_corres]) apply (rule corres_split[OF getDomainTime_corres]) apply (rule corres_when,simp) apply (rule rescheduleRequired_corres) apply (wp hoare_drop_imp)+ - apply (simp add:dec_domain_time_def) - apply wp+ - apply (simp add:decDomainTime_def) - apply wp - apply (wp|wpc|unfold Let_def|simp)+ - apply (wp static_imp_wp threadSet_timeslice_invs threadSet_valid_queues threadSet_valid_queues' - threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf tcbSchedAppend_valid_objs' - rescheduleRequired_weak_sch_act_wf tcbSchedAppend_valid_queues| simp)+ - apply (strengthen sch_act_wf_weak) - apply (clarsimp simp:conj_comms) - apply (wp tcbSchedAppend_valid_queues tcbSchedAppend_sch_act_wf) - apply simp - apply (wp threadSet_valid_queues threadSet_pred_tcb_at_state threadSet_sch_act - threadSet_tcbDomain_triv threadSet_valid_queues' threadSet_valid_objs'| simp)+ - apply (wp threadGet_wp gts_wp gts_wp')+ - apply (clarsimp simp: cur_tcb_def tcb_at_is_etcb_at valid_sched_def valid_sched_action_def) - prefer 2 - apply clarsimp - apply (clarsimp simp add:cur_tcb_def valid_sched_def - valid_sched_action_def valid_etcbs_def is_tcb_def - is_etcb_at_def st_tcb_at_def obj_at_def - dest!:get_tcb_SomeD) - apply (clarsimp simp: invs'_def valid_state'_def - sch_act_wf_weak - cur_tcb'_def inQ_def - ct_in_state'_def obj_at'_def) - apply (clarsimp simp:st_tcb_at'_def - valid_idle'_def ct_idle_or_in_cur_domain'_def - obj_at'_def projectKO_eq) - apply simp + apply (wpsimp simp: dec_domain_time_def) + apply (wpsimp simp: decDomainTime_def) + apply (wpsimp wp: hoare_weak_lift_imp threadSet_timeslice_invs + tcbSchedAppend_valid_objs' + threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf + rescheduleRequired_weak_sch_act_wf)+ + apply (strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_time_slice_valid_queues) + apply ((wpsimp wp: thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+)[1] + apply wpsimp + apply wpsimp + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs' + | wp (once) hoare_drop_imp)+)[1] + apply (wpsimp wp: gts_wp gts_wp')+ + apply (clarsimp simp: cur_tcb_def) + apply (frule valid_sched_valid_etcbs) + apply (frule (1) tcb_at_is_etcb_at) + apply (frule valid_sched_valid_queues) + apply (fastforce simp: pred_tcb_at_def obj_at_def valid_sched_weak_strg) + apply (clarsimp simp: etcb_at_def split: option.splits) + apply fastforce + apply (fastforce simp: valid_state'_def ct_not_inQ_def) + apply fastforce done lemmas corres_eq_trivial = corres_Id[where f = h and g = h for h, simplified] @@ -771,7 +755,7 @@ lemma handleInterrupt_corres: apply (rule corres_machine_op) apply (rule corres_eq_trivial, (simp add: no_fail_ackInterrupt)+) apply wp+ - apply clarsimp + apply fastforce apply clarsimp done @@ -782,10 +766,10 @@ lemma threadSet_ksDomainTime[wp]: done crunch ksDomainTime[wp]: rescheduleRequired "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) crunch ksDomainTime[wp]: tcbSchedAppend "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) lemma updateTimeSlice_valid_pspace[wp]: "\valid_pspace'\ threadSet (tcbTimeSlice_update (\_. ts')) thread @@ -800,16 +784,6 @@ lemma updateTimeSlice_sch_act_wf[wp]: \\r s. sch_act_wf (ksSchedulerAction s) s\" by (wp threadSet_sch_act,simp) - -lemma updateTimeSlice_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ - threadSet (tcbTimeSlice_update (\_. ts')) thread - \\r s. Invariants_H.valid_queues s\" - apply (wp threadSet_valid_queues,simp) - apply (clarsimp simp:obj_at'_def inQ_def) - done - - (* catch up tcbSchedAppend to tcbSchedEnqueue, which has these from crunches on possibleSwitchTo *) crunch irq_handlers'[wp]: tcbSchedAppend valid_irq_handlers' (simp: unless_def tcb_cte_cases_def wp: crunch_wps) @@ -819,29 +793,29 @@ crunch ct[wp]: tcbSchedAppend cur_tcb' (wp: cur_tcb_lift crunch_wps) lemma timerTick_invs'[wp]: - "\invs'\ timerTick \\rv. invs'\" + "timerTick \invs'\" apply (simp add: timerTick_def) apply (wpsimp wp: threadSet_invs_trivial threadSet_pred_tcb_no_state rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' - simp: tcb_cte_cases_def) - apply (rule_tac Q="\rv. invs'" in hoare_post_imp) - apply (clarsimp simp add:invs'_def valid_state'_def) + simp: tcb_cte_cases_def) + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp: invs'_def valid_state'_def) apply (simp add: decDomainTime_def) apply wp apply simp apply wpc - apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs - rescheduleRequired_all_invs_but_ct_not_inQ - hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain' - del: tcbSchedAppend_sch_act_wf)+ - apply (rule hoare_strengthen_post[OF tcbSchedAppend_invs_but_ct_not_inQ']) - apply (wpsimp simp: valid_pspace'_def sch_act_wf_weak)+ - apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv - threadSet_valid_objs' threadSet_timeslice_invs)+ - apply (wp threadGet_wp) + apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs + rescheduleRequired_all_invs_but_ct_not_inQ + hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain')+ + apply (rule hoare_strengthen_post[OF tcbSchedAppend_all_invs_but_ct_not_inQ']) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ + apply (rule_tac Q="\_. invs'" in hoare_strengthen_post) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv + threadSet_valid_objs' threadSet_timeslice_invs)+ + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ apply (wp gts_wp')+ - apply (clarsimp simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def) + apply (auto simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def cong: conj_cong) done lemma resetTimer_invs'[wp]: diff --git a/proof/refine/ARM/InvariantUpdates_H.thy b/proof/refine/ARM/InvariantUpdates_H.thy index 377bda1525..1e6db0685c 100644 --- a/proof/refine/ARM/InvariantUpdates_H.thy +++ b/proof/refine/ARM/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] @@ -38,8 +38,9 @@ lemma invs'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_bitmaps_def bitmapQ_defs vms ct_not_inQ_def state_refs_of'_def ps_clear_def valid_irq_node'_def mask @@ -56,12 +57,13 @@ lemma invs_no_cicd'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - vms ct_not_inQ_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def mask - cong: option.case_cong) + apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_bitmaps_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) done qed @@ -98,14 +100,9 @@ lemma valid_tcb'_tcbTimeSlice_update[simp]: "valid_tcb' (tcbTimeSlice_update f tcb) s = valid_tcb' tcb s" by (simp add:valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) -lemma valid_queues_ksSchedulerAction_update[simp]: - "valid_queues (ksSchedulerAction_update f s) = valid_queues s" - unfolding valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - by simp - -lemma valid_queues'_ksSchedulerAction_update[simp]: - "valid_queues' (ksSchedulerAction_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksSchedulerAction_update[simp]: + "valid_bitmaps (ksSchedulerAction_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma ex_cte_cap_wp_to'_gsCNodes_update[simp]: "ex_cte_cap_wp_to' P p (gsCNodes_update f s') = ex_cte_cap_wp_to' P p s'" @@ -140,45 +137,25 @@ lemma tcb_in_cur_domain_ct[simp]: "tcb_in_cur_domain' t (ksCurThread_update f s) = tcb_in_cur_domain' t s" by (fastforce simp: tcb_in_cur_domain'_def) -lemma valid_queues'_ksCurDomain[simp]: - "valid_queues' (ksCurDomain_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues'_ksDomScheduleIdx[simp]: - "valid_queues' (ksDomScheduleIdx_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksCurDomain[simp]: + "valid_bitmaps (ksCurDomain_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomSchedule[simp]: - "valid_queues' (ksDomSchedule_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomScheduleIdx[simp]: + "valid_bitmaps (ksDomScheduleIdx_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomainTime[simp]: - "valid_queues' (ksDomainTime_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomSchedule[simp]: + "valid_bitmaps (ksDomSchedule_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksWorkUnitsCompleted[simp]: - "valid_queues' (ksWorkUnitsCompleted_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomainTime[simp]: + "valid_bitmaps (ksDomainTime_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues_ksCurDomain[simp]: - "valid_queues (ksCurDomain_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomScheduleIdx[simp]: - "valid_queues (ksDomScheduleIdx_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomSchedule[simp]: - "valid_queues (ksDomSchedule_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomainTime[simp]: - "valid_queues (ksDomainTime_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksWorkUnitsCompleted[simp]: - "valid_queues (ksWorkUnitsCompleted_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_ksWorkUnitsCompleted[simp]: + "valid_bitmaps (ksWorkUnitsCompleted_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma valid_irq_node'_ksCurDomain[simp]: "valid_irq_node' w (ksCurDomain_update f s) = valid_irq_node' w s" @@ -255,6 +232,10 @@ lemma valid_mdb_interrupts'[simp]: "valid_mdb' (ksInterruptState_update f s) = valid_mdb' s" by (simp add: valid_mdb'_def) +lemma valid_mdb'_ksReadyQueues_update[simp]: + "valid_mdb' (ksReadyQueues_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + lemma vms_ksReadyQueues_update[simp]: "valid_machine_state' (ksReadyQueues_update f s) = valid_machine_state' s" by (simp add: valid_machine_state'_def) @@ -279,10 +260,10 @@ lemma ct_in_state_ksSched[simp]: lemma invs'_wu [simp]: "invs' (ksWorkUnitsCompleted_update f s) = invs' s" - apply (simp add: invs'_def cur_tcb'_def valid_state'_def Invariants_H.valid_queues_def - valid_queues'_def valid_irq_node'_def valid_machine_state'_def + apply (simp add: invs'_def cur_tcb'_def valid_state'_def valid_bitmaps_def + valid_irq_node'_def valid_machine_state'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def) + bitmapQ_defs) done lemma valid_arch_state'_interrupt[simp]: @@ -334,9 +315,8 @@ lemma sch_act_simple_ksReadyQueuesL2Bitmap[simp]: lemma ksDomainTime_invs[simp]: "invs' (ksDomainTime_update f s) = invs' s" - by (simp add:invs'_def valid_state'_def - cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_machine_state'_def) + by (simp add: invs'_def valid_state'_def cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_machine_state'_def bitmapQ_defs) lemma valid_machine_state'_ksDomainTime[simp]: "valid_machine_state' (ksDomainTime_update f s) = valid_machine_state' s" @@ -364,9 +344,7 @@ lemma ct_not_inQ_update_stt[simp]: lemma invs'_update_cnt[elim!]: "invs' s \ invs' (s\ksSchedulerAction := ChooseNewThread\)" - by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues'_def - valid_irq_node'_def cur_tcb'_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_queues_no_bitmap_def - bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def) + by (clarsimp simp: invs'_def valid_state'_def valid_irq_node'_def cur_tcb'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def bitmapQ_defs) end \ No newline at end of file diff --git a/proof/refine/ARM/Invariants_H.thy b/proof/refine/ARM/Invariants_H.thy index 1c35897b96..cd883e784f 100644 --- a/proof/refine/ARM/Invariants_H.thy +++ b/proof/refine/ARM/Invariants_H.thy @@ -10,6 +10,7 @@ imports "AInvs.Deterministic_AI" "AInvs.AInvs" "Lib.AddUpdSimps" + Lib.Heap_List begin context Arch begin @@ -158,6 +159,21 @@ definition abbreviation "cte_at' \ cte_wp_at' \" +abbreviation tcb_of' :: "kernel_object \ tcb option" where + "tcb_of' \ projectKO_opt" + +abbreviation tcbs_of' :: "kernel_state \ obj_ref \ tcb option" where + "tcbs_of' s \ ksPSpace s |> tcb_of'" + +abbreviation tcbSchedPrevs_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedPrevs_of s \ tcbs_of' s |> tcbSchedPrev" + +abbreviation tcbSchedNexts_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedNexts_of s \ tcbs_of' s |> tcbSchedNext" + +abbreviation sym_heap_sched_pointers :: "global.kernel_state \ bool" where + "sym_heap_sched_pointers s \ sym_heap (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + definition tcb_cte_cases :: "word32 \ ((tcb \ cte) \ ((cte \ cte) \ tcb \ tcb))" where @@ -232,13 +248,14 @@ where then refs_of' ko else {}))" - primrec live' :: "Structures_H.kernel_object \ bool" where "live' (KOTCB tcb) = - (bound (tcbBoundNotification tcb) \ - (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState) \ tcbQueued tcb)" + (bound (tcbBoundNotification tcb) + \ tcbSchedPrev tcb \ None \ tcbSchedNext tcb \ None + \ tcbQueued tcb + \ (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState))" | "live' (KOCTE cte) = False" | "live' (KOEndpoint ep) = (ep \ IdleEP)" | "live' (KONotification ntfn) = (bound (ntfnBoundTCB ntfn) \ (\ts. ntfnObj ntfn = WaitingNtfn ts))" @@ -479,6 +496,11 @@ where capability.ArchObjectCap (arch_capability.PageCap dev _ _ _ _) \ dev | _ \ False" +abbreviation opt_tcb_at' :: "machine_word option \ kernel_state \ bool" where + "opt_tcb_at' \ none_top tcb_at'" + +lemmas opt_tcb_at'_def = none_top_def + definition valid_tcb' :: "Structures_H.tcb \ kernel_state \ bool" where @@ -488,7 +510,9 @@ where \ valid_bound_ntfn' (tcbBoundNotification t) s \ tcbDomain t \ maxDomain \ tcbPriority t \ maxPriority - \ tcbMCP t \ maxPriority" + \ tcbMCP t \ maxPriority + \ opt_tcb_at' (tcbSchedPrev t) s + \ opt_tcb_at' (tcbSchedNext t) s" definition valid_ep' :: "Structures_H.endpoint \ kernel_state \ bool" @@ -866,10 +890,15 @@ where | "runnable' (Structures_H.BlockedOnSend a b c d e) = False" | "runnable' (Structures_H.BlockedOnNotification x) = False" -definition - inQ :: "domain \ priority \ tcb \ bool" -where - "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" +definition inQ :: "domain \ priority \ tcb \ bool" where + "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" + +lemma inQ_implies_tcbQueueds_of: + "(inQ domain priority |< tcbs_of' s') tcbPtr \ (tcbQueued |< tcbs_of' s') tcbPtr" + by (clarsimp simp: opt_map_def opt_pred_def inQ_def split: option.splits) + +defs ready_qs_runnable_def: + "ready_qs_runnable s \ \t. obj_at' tcbQueued t s \ st_tcb_at' runnable' t s" definition (* for given domain and priority, the scheduler bitmap indicates a thread is in the queue *) @@ -879,15 +908,6 @@ where "bitmapQ d p s \ ksReadyQueuesL1Bitmap s d !! prioToL1Index p \ ksReadyQueuesL2Bitmap s (d, invertL1Index (prioToL1Index p)) !! unat (p && mask wordRadix)" - -definition - valid_queues_no_bitmap :: "kernel_state \ bool" -where - "valid_queues_no_bitmap \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - definition (* A priority is used as a two-part key into the bitmap structure. If an L2 bitmap entry is set without an L1 entry, updating the L1 entry (shared by many priorities) may make @@ -911,31 +931,62 @@ where \d i. ksReadyQueuesL1Bitmap s d !! i \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ i < l2BitmapSize" -definition - valid_bitmapQ :: "kernel_state \ bool" -where - "valid_bitmapQ \ \s. (\d p. bitmapQ d p s \ ksReadyQueues s (d,p) \ [])" +definition valid_bitmapQ :: "kernel_state \ bool" where + "valid_bitmapQ \ \s. \d p. bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p))" -definition - valid_queues :: "kernel_state \ bool" -where - "valid_queues \ \s. valid_queues_no_bitmap s \ valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" +definition valid_bitmaps :: "kernel_state \ bool" where + "valid_bitmaps \ \s. valid_bitmapQ s \ bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" -definition - (* when a thread gets added to / removed from a queue, but before bitmap updated *) - valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" -where +lemma valid_bitmaps_valid_bitmapQ[elim!]: + "valid_bitmaps s \ valid_bitmapQ s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L2_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L2_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L1_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L1_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_lift: + assumes prq: "\P. f \\s. P (ksReadyQueues s)\" + assumes prqL1: "\P. f \\s. P (ksReadyQueuesL1Bitmap s)\" + assumes prqL2: "\P. f \\s. P (ksReadyQueuesL2Bitmap s)\" + shows "f \valid_bitmaps\" + unfolding valid_bitmaps_def valid_bitmapQ_def bitmapQ_def + bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +(* when a thread gets added to / removed from a queue, but before bitmap updated *) +definition valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" where "valid_bitmapQ_except d' p' \ \s. - (\d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ ksReadyQueues s (d,p) \ []))" + \d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)))" lemmas bitmapQ_defs = valid_bitmapQ_def valid_bitmapQ_except_def bitmapQ_def bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def -definition - valid_queues' :: "kernel_state \ bool" -where - "valid_queues' \ \s. \d p t. obj_at' (inQ d p) t s \ t \ set (ksReadyQueues s (d, p))" +\ \ + The tcbSchedPrev and tcbSchedNext fields of a TCB are used only to indicate membership in + one of the ready queues. \ +definition valid_sched_pointers_2 :: + "(obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ (obj_ref \ bool) \ bool " + where + "valid_sched_pointers_2 prevs nexts ready \ + \ptr. prevs ptr \ None \ nexts ptr \ None \ ready ptr" + +abbreviation valid_sched_pointers :: "kernel_state \ bool" where + "valid_sched_pointers s \ + valid_sched_pointers_2 (tcbSchedPrevs_of s) (tcbSchedNexts_of s) (tcbQueued |< tcbs_of' s)" + +lemmas valid_sched_pointers_def = valid_sched_pointers_2_def + +lemma valid_sched_pointersD: + "\valid_sched_pointers s; \ (tcbQueued |< tcbs_of' s) t\ + \ tcbSchedPrevs_of s t = None \ tcbSchedNexts_of s t = None" + by (fastforce simp: valid_sched_pointers_def in_opt_pred opt_map_red) definition tcb_in_cur_domain' :: "32 word \ kernel_state \ bool" where "tcb_in_cur_domain' t \ \s. obj_at' (\tcb. ksCurDomain s = tcbDomain tcb) t s" @@ -1152,7 +1203,7 @@ definition valid_state' :: "kernel_state \ bool" where "valid_state' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s @@ -1161,7 +1212,9 @@ where \ valid_irq_states' s \ valid_machine_state' s \ irqs_masked' s - \ valid_queues' s + \ sym_heap_sched_pointers s + \ valid_sched_pointers s + \ valid_bitmaps s \ ct_not_inQ s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s @@ -1213,6 +1266,11 @@ definition abbreviation "active' st \ st = Structures_H.Running \ st = Structures_H.Restart" +lemma runnable_eq_active': "runnable' = active'" + apply (rule ext) + apply (case_tac st, simp_all) + done + abbreviation "simple' st \ st = Structures_H.Inactive \ st = Structures_H.Running \ @@ -1228,11 +1286,12 @@ abbreviation abbreviation(input) "all_invs_but_sym_refs_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1240,12 +1299,13 @@ abbreviation(input) abbreviation(input) "all_invs_but_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1261,12 +1321,13 @@ lemma all_invs_but_not_ct_inQ_check': definition "all_invs_but_ct_idle_or_in_cur_domain' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_not_inQ s \ valid_pde_mappings' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ cur_tcb' s \ ct_not_inQ s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -2990,9 +3051,9 @@ lemma sch_act_wf_arch [simp]: "sch_act_wf sa (ksArchState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_arch [simp]: - "valid_queues (ksArchState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_arch[simp]: + "valid_bitmaps (ksArchState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma if_unsafe_then_cap_arch' [simp]: "if_unsafe_then_cap' (ksArchState_update f s) = if_unsafe_then_cap' s" @@ -3010,22 +3071,14 @@ lemma sch_act_wf_machine_state [simp]: "sch_act_wf sa (ksMachineState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_machine_state [simp]: - "valid_queues (ksMachineState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_arch' [simp]: - "valid_queues' (ksArchState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues_machine_state' [simp]: - "valid_queues' (ksMachineState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - lemma valid_irq_node'_machine_state [simp]: "valid_irq_node' x (ksMachineState_update f s) = valid_irq_node' x s" by (simp add: valid_irq_node'_def) +lemma valid_bitmaps_machine_state[simp]: + "valid_bitmaps (ksMachineState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + (* these should be reasonable safe for automation because of the 0 pattern *) lemma no_0_ko_wp' [elim!]: "\ ko_wp_at' Q 0 s; no_0_obj' s \ \ P" @@ -3099,19 +3152,6 @@ lemma objBitsT_koTypeOf : pteBits_def pdeBits_def) done -lemma valid_queues_obj_at'D: - "\ t \ set (ksReadyQueues s (d, p)); valid_queues s \ - \ obj_at' (inQ d p) t s" - apply (unfold valid_queues_def valid_queues_no_bitmap_def) - apply (elim conjE) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - lemma obj_at'_and: "obj_at' (P and P') t s = (obj_at' P t s \ obj_at' P' t s)" by (rule iffI, (clarsimp simp: obj_at'_def)+) @@ -3149,16 +3189,6 @@ lemma not_pred_tcb_at'_strengthen: "pred_tcb_at' f (Not \ P) p s \ \ pred_tcb_at' f P p s" by (clarsimp simp: pred_tcb_at'_def obj_at'_def) -lemma valid_queues_no_bitmap_def': - "valid_queues_no_bitmap = - (\s. \d p. (\t\set (ksReadyQueues s (d, p)). - obj_at' (inQ d p) t s \ st_tcb_at' runnable' t s) \ - distinct (ksReadyQueues s (d, p)) \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - apply (rule ext, rule iffI) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_and pred_tcb_at'_def o_def - elim!: obj_at'_weakenE)+ - done - lemma valid_refs'_cteCaps: "valid_refs' S (ctes_of s) = (\c \ ran (cteCaps_of s). S \ capRange c = {})" by (fastforce simp: valid_refs'_def cteCaps_of_def elim!: ranE) @@ -3239,8 +3269,16 @@ lemma invs_sch_act_wf' [elim!]: "invs' s \ sch_act_wf (ksSchedulerAction s) s" by (simp add: invs'_def valid_state'_def) -lemma invs_queues [elim!]: - "invs' s \ valid_queues s" +lemma invs_valid_bitmaps[elim!]: + "invs' s \ valid_bitmaps s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym_heap_sched_pointers[elim!]: + "invs' s \ sym_heap_sched_pointers s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_sched_pointers[elim!]: + "invs' s \ valid_sched_pointers s" by (simp add: invs'_def valid_state'_def) lemma invs_valid_idle'[elim!]: @@ -3257,7 +3295,7 @@ lemma invs'_invs_no_cicd: lemma invs'_bitmapQ_no_L1_orphans: "invs' s \ bitmapQ_no_L1_orphans s" - by (drule invs_queues, simp add: valid_queues_def) + by (simp add: invs'_def valid_state'_def valid_bitmaps_def) lemma invs_ksCurDomain_maxDomain' [elim!]: "invs' s \ ksCurDomain s \ maxDomain" @@ -3282,24 +3320,22 @@ lemma invs_no_0_obj'[elim!]: lemma invs'_gsCNodes_update[simp]: "invs' (gsCNodes_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def ct_not_inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done lemma invs'_gsUserPages_update[simp]: "invs' (gsUserPages_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def ct_not_inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done lemma pred_tcb'_neq_contra: @@ -3315,7 +3351,7 @@ lemma invs'_ksDomScheduleIdx: unfolding invs'_def valid_state'_def by clarsimp lemma valid_bitmap_valid_bitmapQ_exceptE: - "\ valid_bitmapQ_except d p s ; (bitmapQ d p s \ ksReadyQueues s (d,p) \ []) ; + "\ valid_bitmapQ_except d p s; bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)); bitmapQ_no_L2_orphans s \ \ valid_bitmapQ s" unfolding valid_bitmapQ_def valid_bitmapQ_except_def @@ -3393,4 +3429,50 @@ add_upd_simps "invs' (gsUntypedZeroRanges_update f s)" (obj_at'_real_def) declare upd_simps[simp] +lemma neq_out_intv: + "\ a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" + by simp + +lemma rule_out_intv: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a \ b \ + \ b \ mask_range a (objBitsKO obj)" + apply (drule(1) pspace_distinctD') + apply (subst (asm) ps_clear_def) + apply (drule_tac x = b in orthD2) + apply fastforce + apply (drule neq_out_intv) + apply (simp add: mask_def add_diff_eq) + apply (simp add: mask_def add_diff_eq) + done + +lemma ptr_range_mask_range: + "{ptr..ptr + 2 ^ bits - 1} = mask_range ptr bits" + unfolding mask_def + by simp + +lemma distinct_obj_range'_not_subset: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ \ obj_range' b obj' \ obj_range' a obj" + unfolding obj_range'_def + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule (3) rule_out_intv) + by (fastforce simp: is_aligned_no_overflow_mask ptr_range_mask_range word_add_increasing) + +lemma obj_range'_disjoint: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ obj_range' a obj \ obj_range' b obj' = {}" + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule_tac p=a and p'=b in aligned_mask_range_cases) + apply assumption + apply (metis add_mask_fold distinct_obj_range'_not_subset obj_range'_def) + done + end diff --git a/proof/refine/ARM/IpcCancel_R.thy b/proof/refine/ARM/IpcCancel_R.thy index 1c962b4619..3ba09d491b 100644 --- a/proof/refine/ARM/IpcCancel_R.thy +++ b/proof/refine/ARM/IpcCancel_R.thy @@ -48,25 +48,6 @@ lemma set_ep_pred_tcb_at' [wp]: apply (simp add: updateObject_default_def in_monad projectKOs) done -(* valid_queues is too strong *) -definition valid_inQ_queues :: "KernelStateData_H.kernel_state \ bool" where - "valid_inQ_queues \ - \s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) \ distinct (ksReadyQueues s (d, p))" - -lemma valid_inQ_queues_ksSchedulerAction_update[simp]: - "valid_inQ_queues (ksSchedulerAction_update f s) = valid_inQ_queues s" - by (simp add: valid_inQ_queues_def) - -lemma valid_inQ_queues_ksReadyQueuesL1Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL1Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - -lemma valid_inQ_queues_ksReadyQueuesL2Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL2Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - defs capHasProperty_def: "capHasProperty ptr P \ cte_wp_at' (\c. P (cteCap c)) ptr" end @@ -83,11 +64,6 @@ locale delete_one_conc_pre = "\pspace_distinct'\ cteDeleteOne slot \\rv. pspace_distinct'\" assumes delete_one_it: "\P. \\s. P (ksIdleThread s)\ cteDeleteOne cap \\rv s. P (ksIdleThread s)\" - assumes delete_one_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl \\rv. Invariants_H.valid_queues\" - assumes delete_one_inQ_queues: - "\valid_inQ_queues\ cteDeleteOne sl \\rv. valid_inQ_queues\" assumes delete_one_sch_act_simple: "\sch_act_simple\ cteDeleteOne sl \\rv. sch_act_simple\" assumes delete_one_sch_act_not: @@ -105,7 +81,7 @@ lemma (in delete_one_conc_pre) cancelIPC_simple[wp]: "\\\ cancelIPC t \\rv. st_tcb_at' simple' t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (rule hoare_pre) apply (wpc | wp sts_st_tcb_at'_cases hoare_vcg_conj_lift @@ -343,6 +319,7 @@ lemma cancelSignal_corres: apply fastforce apply (clarsimp simp: valid_obj_def valid_tcb_def valid_tcb_state_def) apply (drule sym, simp add: obj_at_def) + apply fastforce apply (clarsimp simp: conj_comms pred_tcb_at' cong: conj_cong) apply (rule conjI) apply (simp add: pred_tcb_at'_def) @@ -547,12 +524,12 @@ lemma (in delete_one) cancelIPC_ReplyCap_corres: and Q'="\_. invs' and st_tcb_at' awaiting_reply' t" in corres_underlying_split) apply (rule corres_guard_imp) - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp?) apply (simp add: tcb_relation_def fault_rel_optionation_def) apply (simp add: tcb_cap_cases_def) apply (simp add: tcb_cte_cases_def) apply (simp add: exst_same_def) - apply (clarsimp simp: st_tcb_at_tcb_at) + apply (fastforce simp: st_tcb_at_tcb_at) apply clarsimp defer apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state @@ -639,7 +616,7 @@ lemma (in delete_one) cancel_ipc_corres: apply (rule hoare_strengthen_post) apply (rule gts_sp'[where P="\"]) apply (clarsimp elim!: pred_tcb'_weakenE) - apply simp + apply fastforce apply simp done @@ -671,16 +648,15 @@ lemma sch_act_simple_not_t[simp]: "sch_act_simple s \ sch_act_no context begin interpretation Arch . (*FIXME: arch_split*) +crunches setNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift) + lemma cancelSignal_invs': "\invs' and st_tcb_at' (\st. st = BlockedOnNotification ntfn) t and sch_act_not t\ cancelSignal t ntfn \\rv. invs'\" proof - - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def - valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have NTFNSN: "\ntfn ntfn'. \\s. sch_act_not (ksCurThread s) s \ setNotification ntfn ntfn' \\_ s. sch_act_not (ksCurThread s) s\" @@ -691,20 +667,19 @@ lemma cancelSignal_invs': show ?thesis apply (simp add: cancelSignal_def invs'_def valid_state'_def Let_def) apply (wp valid_irq_node_lift sts_sch_act' irqs_masked_lift - hoare_vcg_all_lift [OF setNotification_ksQ] sts_valid_queues + hoare_vcg_all_lift setThreadState_ct_not_inQ NTFNSN - hoare_vcg_all_lift setNotification_ksQ + hoare_vcg_all_lift | simp add: valid_tcb_state'_def list_case_If split del: if_split)+ prefer 2 apply assumption apply (rule hoare_strengthen_post) apply (rule get_ntfn_sp') + apply (rename_tac rv s) apply (clarsimp simp: pred_tcb_at') - apply (frule NIQ) - apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) apply (rule conjI) apply (clarsimp simp: valid_ntfn'_def) - apply (case_tac "ntfnObj r", simp_all add: isWaitingNtfn_def) + apply (case_tac "ntfnObj rv", simp_all add: isWaitingNtfn_def) apply (frule ko_at_valid_objs') apply (simp add: valid_pspace_valid_objs') apply (clarsimp simp: projectKO_opt_ntfn split: kernel_object.splits) @@ -727,7 +702,7 @@ lemma cancelSignal_invs': split: ntfn.splits) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (fastforce simp: sym_refs_def dest!: idle'_no_refs) - apply (case_tac "ntfnObj r", simp_all) + apply (case_tac "ntfnObj rv", simp_all) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def) apply (rule conjI, clarsimp split: option.splits) @@ -741,9 +716,10 @@ lemma cancelSignal_invs': set_eq_subset) apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def set_eq_subset) + apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (rule conjI) - apply (case_tac "ntfnBoundTCB r") + apply (case_tac "ntfnBoundTCB rv") apply (clarsimp elim!: if_live_state_refsE)+ apply (rule conjI, clarsimp split: option.splits) apply (clarsimp dest!: idle'_no_refs) @@ -809,23 +785,25 @@ lemma setEndpoint_ct_not_inQ[wp]: done lemma setEndpoint_ksDomScheduleIdx[wp]: - "\\s. P (ksDomScheduleIdx s)\ setEndpoint ptr ep \\_ s. P (ksDomScheduleIdx s)\" + "setEndpoint ptr ep \\s. P (ksDomScheduleIdx s)\" apply (simp add: setEndpoint_def setObject_def split_def) apply (wp updateObject_default_inv | simp)+ done + end +crunches setEndpoint + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift simp: updateObject_default_def) + lemma (in delete_one_conc) cancelIPC_invs[wp]: shows "\tcb_at' t and invs'\ cancelIPC t \\rv. invs'\" proof - have P: "\xs v f. (case xs of [] \ return v | y # ys \ return (f (y # ys))) = return (case xs of [] \ v | y # ys \ f xs)" by (clarsimp split: list.split) - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have EPSCHN: "\eeptr ep'. \\s. sch_act_not (ksCurThread s) s\ setEndpoint eeptr ep' \\_ s. sch_act_not (ksCurThread s) s\" @@ -850,8 +828,8 @@ proof - apply (wp valid_irq_node_lift valid_global_refs_lift' valid_arch_state_lift' irqs_masked_lift sts_sch_act' hoare_vcg_all_lift [OF setEndpoint_ksQ] - sts_valid_queues setThreadState_ct_not_inQ EPSCHN - hoare_vcg_all_lift setNotification_ksQ + setThreadState_ct_not_inQ EPSCHN + hoare_vcg_all_lift | simp add: valid_tcb_state'_def split del: if_split | wpc)+ prefer 2 @@ -859,14 +837,14 @@ proof - apply (rule hoare_strengthen_post [OF get_ep_sp']) apply (clarsimp simp: pred_tcb_at' fun_upd_def[symmetric] conj_comms split del: if_split cong: if_cong) + apply (rule conjI, clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: projectKOs valid_obj'_def) apply (rule conjI) apply (clarsimp simp: obj_at'_def valid_ep'_def projectKOs dest!: pred_tcb_at') - apply (frule NIQ) - apply (erule pred_tcb'_weakenE, fastforce) apply (clarsimp, rule conjI) apply (auto simp: pred_tcb_at'_def obj_at'_def)[1] apply (rule conjI) @@ -913,7 +891,7 @@ proof - show ?thesis apply (simp add: cancelIPC_def crunch_simps cong: if_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (case_tac state, simp_all add: isTS_defs) apply (safe intro!: hoare_weaken_pre[OF Q] @@ -956,8 +934,8 @@ lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at: \\rv. st_tcb_at' P t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (case_tac x, simp_all add: isTS_defs list_case_If) + apply (rule bind_wp [OF _ gts_sp']) + apply (case_tac rv, simp_all add: isTS_defs list_case_If) apply (wp sts_st_tcb_at'_cases delete_one_st_tcb_at threadSet_pred_tcb_no_state cancelSignal_st_tcb_at hoare_drop_imps @@ -1029,9 +1007,9 @@ lemma (in delete_one_conc_pre) cancelIPC_tcb_at_runnable': (is "\?PRE\ _ \_\") apply (clarsimp simp: cancelIPC_def Let_def) apply (case_tac "t'=t") - apply (rule_tac B="\st. st_tcb_at' runnable' t and K (runnable' st)" - in hoare_seq_ext) - apply(case_tac x; simp) + apply (rule_tac Q'="\st. st_tcb_at' runnable' t and K (runnable' st)" + in bind_wp) + apply(case_tac rv; simp) apply (wpsimp wp: sts_pred_tcb_neq')+ apply (rule_tac Q="\rv. ?PRE" in hoare_post_imp, fastforce) apply (wp cteDeleteOne_tcb_at_runnable' @@ -1053,31 +1031,6 @@ apply (wp hoare_vcg_conj_lift delete_one_ksCurDomain | simp add: getThreadReplySlot_def o_def if_fun_split)+ done -(* FIXME move *) -lemma tcbSchedEnqueue_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" -apply (simp add: tcbSchedEnqueue_def unless_def) -apply (wp threadGet_wp | simp)+ -apply (clarsimp simp: obj_at'_def) -apply (case_tac obja) -apply fastforce -done - -(* FIXME move *) -lemma setThreadState_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" -apply (simp add: setThreadState_def rescheduleRequired_def) -apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st - | wpc - | rule hoare_drop_imps - | simp)+ -apply (clarsimp simp: obj_at'_def) -apply (case_tac obj) -apply fastforce -done - (* FIXME move *) lemma setBoundNotification_not_ntfn: "(\tcb ntfn. P (tcb\tcbBoundNotification := ntfn\) \ P tcb) @@ -1089,15 +1042,6 @@ lemma setBoundNotification_not_ntfn: | simp)+ done -(* FIXME move *) -lemma setThreadState_tcb_in_cur_domain'[wp]: - "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" -apply (simp add: tcb_in_cur_domain'_def) -apply (rule hoare_pre) -apply wps -apply (wp setThreadState_not_st | simp)+ -done - lemma setBoundNotification_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ setBoundNotification st t \\_. tcb_in_cur_domain' t'\" apply (simp add: tcb_in_cur_domain'_def) @@ -1106,30 +1050,33 @@ lemma setBoundNotification_tcb_in_cur_domain'[wp]: apply (wp setBoundNotification_not_ntfn | simp)+ done -lemma cancelSignal_tcb_obj_at': - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ cancelSignal t word \\_. obj_at' P t'\" -apply (simp add: cancelSignal_def setNotification_def) -apply (wp setThreadState_not_st getNotification_wp | wpc | simp)+ -done + +lemma setThreadState_tcbDomain_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding setThreadState_def + by wpsimp + +crunches cancelSignal + for tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + (wp: crunch_wps) lemma (in delete_one_conc_pre) cancelIPC_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelIPC t \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (simp add: cancelIPC_def Let_def) -apply (wp hoare_vcg_conj_lift - setThreadState_not_st delete_one_tcbDomain_obj_at' cancelSignal_tcb_obj_at' - | wpc - | rule hoare_drop_imps - | simp add: getThreadReplySlot_def o_def if_fun_split)+ -done + apply (simp add: cancelIPC_def Let_def) + apply (wp hoare_vcg_conj_lift + delete_one_tcbDomain_obj_at' + | wpc + | rule hoare_drop_imps + | simp add: getThreadReplySlot_def o_def if_fun_split)+ + done lemma (in delete_one_conc_pre) cancelIPC_tcb_in_cur_domain': "\tcb_in_cur_domain' t'\ cancelIPC t \\_. tcb_in_cur_domain' t'\" -apply (simp add: tcb_in_cur_domain'_def) -apply (rule hoare_pre) -apply wps -apply (wp cancelIPC_tcbDomain_obj_at' | simp)+ -done + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply wps + apply (wp cancelIPC_tcbDomain_obj_at' | simp)+ + done lemma (in delete_one_conc_pre) cancelIPC_sch_act_not: "\sch_act_not t'\ cancelIPC t \\_. sch_act_not t'\" @@ -1155,7 +1102,7 @@ text \The suspend operation, significant as called from delete\ lemma rescheduleRequired_weak_sch_act_wf: "\\\ rescheduleRequired \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp add: weak_sch_act_wf_def)+ + apply (wp hoare_TrueI | simp add: weak_sch_act_wf_def)+ done lemma sts_weak_sch_act_wf[wp]: @@ -1163,7 +1110,7 @@ lemma sts_weak_sch_act_wf[wp]: \ (ksSchedulerAction s = SwitchToThread t \ runnable' st)\ setThreadState st t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: setThreadState_def) apply (wp rescheduleRequired_weak_sch_act_wf) apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp, simp) @@ -1224,190 +1171,54 @@ lemma setNotification_weak_sch_act_wf[wp]: lemmas ipccancel_weak_sch_act_wfs = weak_sch_act_wf_lift[OF _ setCTE_pred_tcb_at'] -lemma tcbSchedDequeue_corres': - "corres dc (is_etcb_at t) (tcb_at' t and valid_inQ_queues) (tcb_sched_action (tcb_sched_dequeue) t) (tcbSchedDequeue t)" - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (wp, simp) - apply (case_tac queued) - defer - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def get_etcb_def set_tcb_queue_def is_etcb_at_def state_relation_def gets_the_def gets_def get_def return_def bind_def assert_opt_def get_tcb_queue_def modify_def put_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - apply (force simp: tcb_sched_dequeue_def valid_inQ_queues_def - ready_queues_relation_def obj_at'_def inQ_def projectKO_eq project_inject) - apply (simp add: ready_queues_relation_def) - apply (simp add: unless_def when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (simp split del: if_split) - apply (rule corres_split_eqr) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split_eqr[OF getQueue_corres]) - apply (simp split del: if_split) - apply (subst bind_return_unit, rule corres_split[where r'=dc]) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (simp add: dc_def[symmetric]) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply (wp | simp)+ - done - -lemma setQueue_valid_inQ_queues: - "\valid_inQ_queues - and (\s. \t \ set ts. obj_at' (inQ d p) t s) - and K (distinct ts)\ - setQueue d p ts - \\_. valid_inQ_queues\" - apply (simp add: setQueue_def valid_inQ_queues_def) - apply wp - apply clarsimp - done - -lemma threadSet_valid_inQ_queues: - "\valid_inQ_queues and (\s. \d p. (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) - \ obj_at' (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) t s - \ t \ set (ksReadyQueues s (d, p)))\ - threadSet f t - \\rv. valid_inQ_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_inQ_queues_def pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_inQ_queues_def pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def projectKOs) - apply (fastforce) - done - -(* reorder the threadSet before the setQueue, useful for lemmas that don't refer to bitmap *) -lemma setQueue_after_addToBitmap: - "(setQueue d p q >>= (\rv. (when P (addToBitmap d p)) >>= (\rv. threadSet f t))) = - (when P (addToBitmap d p) >>= (\rv. (threadSet f t) >>= (\rv. setQueue d p q)))" - apply (case_tac P, simp_all) - prefer 2 - apply (simp add: setQueue_after) - apply (simp add: setQueue_def when_def) - apply (subst oblivious_modify_swap) - apply (simp add: threadSet_def getObject_def setObject_def - loadObject_default_def bitmap_fun_defs - split_def projectKO_def2 alignCheck_assert - magnitudeCheck_assert updateObject_default_def) - apply (intro oblivious_bind, simp_all) - apply (clarsimp simp: bind_assoc) - done - -lemma tcbSchedEnqueue_valid_inQ_queues[wp]: - "\valid_inQ_queues\ tcbSchedEnqueue t \\_. valid_inQ_queues\" - apply (simp add: tcbSchedEnqueue_def setQueue_after_addToBitmap) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued, simp_all add: unless_def)[1] - apply (wp setQueue_valid_inQ_queues threadSet_valid_inQ_queues threadGet_wp - hoare_vcg_const_Ball_lift - | simp add: inQ_def bitmap_fun_defs - | fastforce simp: valid_inQ_queues_def inQ_def obj_at'_def)+ - done - - (* prevents wp from splitting on the when; stronger technique than hoare_when_weak_wp - FIXME: possible to replace with hoare_when_weak_wp? - *) -definition - "removeFromBitmap_conceal d p q t \ when (null [x\q . x \ t]) (removeFromBitmap d p)" - -lemma rescheduleRequired_valid_inQ_queues[wp]: - "\valid_inQ_queues\ rescheduleRequired \\_. valid_inQ_queues\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - done - -lemma sts_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setThreadState st t \\rv. valid_inQ_queues\" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - lemma updateObject_ep_inv: "\P\ updateObject (obj::endpoint) ko p q n \\rv. P\" by simp (rule updateObject_default_inv) -lemma sbn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setBoundNotification ntfn t \\rv. valid_inQ_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ +lemma asUser_tcbQueued_inv[wp]: + "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ done -lemma setEndpoint_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setEndpoint ptr ep \\rv. valid_inQ_queues\" - apply (unfold setEndpoint_def) - apply (rule setObject_ep_pre) - apply (simp add: valid_inQ_queues_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift setObject_queues_unchanged[OF updateObject_ep_inv]) - apply simp - done +context begin interpretation Arch . -lemma set_ntfn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setNotification ptr ntfn \\rv. valid_inQ_queues\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp add: valid_inQ_queues_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv | simp)+ - done +crunches cancel_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_state \ _" + (simp: crunch_simps wp: crunch_wps) -crunch valid_inQ_queues[wp]: cancelSignal valid_inQ_queues - (simp: updateObject_tcb_inv crunch_simps wp: crunch_wps) +end -lemma (in delete_one_conc_pre) cancelIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ cancelIPC t \\_. valid_inQ_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) - apply (wp hoare_drop_imps delete_one_inQ_queues threadSet_valid_inQ_queues | wpc | simp add:if_apply_def2 Fun.comp_def)+ - apply (clarsimp simp: valid_inQ_queues_def inQ_def)+ - done +crunches asUser + for valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps) -lemma valid_queues_inQ_queues: - "Invariants_H.valid_queues s \ valid_inQ_queues s" - by (force simp: Invariants_H.valid_queues_def valid_inQ_queues_def obj_at'_def - valid_queues_no_bitmap_def) +crunches set_thread_state + for in_correct_ready_q[wp]: in_correct_ready_q + (ignore_del: set_thread_state_ext) -lemma asUser_tcbQueued_inv[wp]: - "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" - apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) - apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ - done +crunches set_thread_state_ext + for ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps ignore_del: set_thread_state_ext) -lemma asUser_valid_inQ_queues[wp]: - "\ valid_inQ_queues \ asUser t f \\rv. valid_inQ_queues \" - unfolding valid_inQ_queues_def Ball_def - apply (wpsimp wp: hoare_vcg_all_lift) - defer - apply (wp asUser_ksQ) - apply assumption - apply (simp add: inQ_def[abs_def] obj_at'_conj) - apply (rule hoare_convert_imp) - apply (wp asUser_ksQ) - apply wp - done +lemma set_thread_state_ready_qs_distinct[wp]: + "set_thread_state ref ts \ready_qs_distinct\" + unfolding set_thread_state_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) + +lemma as_user_ready_qs_distinct[wp]: + "as_user tptr f \ready_qs_distinct\" + unfolding as_user_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) lemma (in delete_one) suspend_corres: "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) (IpcCancel_A.suspend t) (ThreadDecls_H.suspend t)" + apply (rule corres_cross_over_guard[where P'=P' and Q="tcb_at' t and P'" for P']) + apply (fastforce dest!: tcb_at_cross state_relation_pspace_relation) apply (simp add: IpcCancel_A.suspend_def Thread_H.suspend_def) apply (rule corres_guard_imp) apply (rule corres_split_nor[OF cancel_ipc_corres]) @@ -1425,17 +1236,19 @@ lemma (in delete_one) suspend_corres: apply (wpsimp simp: ARM.setRegister_def ARM.getRegister_def) apply (rule corres_return_trivial) apply (rule corres_split_nor[OF setThreadState_corres]) - apply simp - apply (rule tcbSchedDequeue_corres') - apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ - apply (rule hoare_post_imp[where Q = "\rv s. tcb_at t s \ is_etcb_at t s"]) - apply simp + apply wpsimp + apply (rule tcbSchedDequeue_corres, simp) + apply wp + apply (wpsimp wp: sts_valid_objs') + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def valid_tcb_state'_def)+ + apply (rule hoare_post_imp[where Q = "\rv s. einvs s \ tcb_at t s"]) + apply (simp add: invs_implies invs_strgs valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct valid_sched_def) apply wp - apply (rule hoare_post_imp[where Q = "\rv s. tcb_at' t s \ valid_inQ_queues s"]) - apply (wpsimp simp: valid_queues_inQ_queues) - apply wp+ - apply (force simp: valid_sched_def tcb_at_is_etcb_at) - apply (clarsimp simp add: invs'_def valid_state'_def valid_queues_inQ_queues) + apply (rule hoare_post_imp[where Q = "\_ s. invs' s \ tcb_at' t s"]) + apply (fastforce simp: invs'_def valid_tcb_state'_def) + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ + apply fastforce+ done lemma (in delete_one) prepareThreadDelete_corres: @@ -1460,248 +1273,8 @@ lemma (in delete_one_conc_pre) cancelIPC_it[wp]: crunch ksQ: threadGet "\s. P (ksReadyQueues s p)" -lemma tcbSchedDequeue_notksQ: - "\\s. t' \ set(ksReadyQueues s p)\ - tcbSchedDequeue t - \\_ s. t' \ set(ksReadyQueues s p)\" - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply wp+ - apply clarsimp - apply (rule_tac Q="\_ s. t' \ set(ksReadyQueues s p)" in hoare_post_imp) - apply (wp | clarsimp)+ - done - -lemma rescheduleRequired_oa_queued: - "\ (\s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)) and sch_act_simple\ - rescheduleRequired - \\_ s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)\" - (is "\?OAQ t' p and sch_act_simple\ _ \_\") - apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ ?OAQ t' p s" in hoare_seq_ext) - including no_pre - apply (wp | clarsimp)+ - apply (case_tac x) - apply (wp | clarsimp)+ - done - -lemma setThreadState_oa_queued: - "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ - setThreadState st t - \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" - (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") - proof (rule P_bool_lift [where P=P']) - show pos: - "\R. \ ?Q R \ setThreadState st t \\_. ?Q R \" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_oa_queued) - apply (simp add: sch_act_simple_def) - apply (rule_tac Q="\_. ?Q R" in hoare_post_imp, clarsimp) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp) - done - show "\\s. \ ?Q P s\ setThreadState st t \\_ s. \ ?Q P s\" - by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) - qed - -lemma setBoundNotification_oa_queued: - "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ - setBoundNotification ntfn t - \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" - (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") - proof (rule P_bool_lift [where P=P']) - show pos: - "\R. \ ?Q R \ setBoundNotification ntfn t \\_. ?Q R \" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp) - done - show "\\s. \ ?Q P s\ setBoundNotification ntfn t \\_ s. \ ?Q P s\" - by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) - qed - -lemma sts_valid_queues_partial: - "\Invariants_H.valid_queues and sch_act_simple\ - setThreadState st t - \\_ s. \t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p))\" - (is "\_\ _ \\_ s. \t' d p. ?OA t' d p s \ ?DISTINCT d p s \") - apply (rule_tac Q="\_ s. (\t' d p. ?OA t' d p s) \ (\d p. ?DISTINCT d p s)" - in hoare_post_imp) - apply (clarsimp) - apply (rule hoare_conjI) - apply (rule_tac Q="\s. \t' d p. - ((t'\set(ksReadyQueues s (d, p)) - \ \ (sch_act_simple s)) - \ (obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ st_tcb_at' runnable' t' s))" in hoare_pre_imp) - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def - pred_tcb_at'_def obj_at'_def inQ_def) - apply (rule hoare_vcg_all_lift)+ - apply (rule hoare_convert_imp) - including no_pre - apply (wp sts_ksQ setThreadState_oa_queued hoare_impI sts_pred_tcb_neq' - | clarsimp)+ - apply (rule_tac Q="\s. \d p. ?DISTINCT d p s \ sch_act_simple s" in hoare_pre_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - -lemma tcbSchedDequeue_t_notksQ: - "\\s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\ - tcbSchedDequeue t - \\_ s. t \ set (ksReadyQueues s (d, p))\" - apply (rule_tac Q="(\s. t \ set (ksReadyQueues s (d, p))) - or obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t" - in hoare_pre_imp, clarsimp) - apply (rule hoare_pre_disj) - apply (wp tcbSchedDequeue_notksQ)[1] - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_real_def ko_wp_at'_def) - done - -lemma sts_invs_minor'_no_valid_queues: - "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st - \ (st \ Inactive \ \ idle' st \ - st' \ Inactive \ \ idle' st')) t - and (\s. t = ksIdleThread s \ idle' st) - and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) - and sch_act_simple - and invs'\ - setThreadState st t - \\_ s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - valid_pde_mappings' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\" - apply (simp add: invs'_def valid_state'_def valid_queues_def) - apply (wp sts_valid_queues_partial sts_ksQ - setThreadState_oa_queued sts_st_tcb_at'_cases - irqs_masked_lift - valid_irq_node_lift - setThreadState_ct_not_inQ - sts_valid_bitmapQ_sch_act_simple - sts_valid_bitmapQ_no_L2_orphans_sch_act_simple - sts_valid_bitmapQ_no_L1_orphans_sch_act_simple - hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_all_lift)+ - apply (clarsimp simp: disj_imp) - apply (intro conjI) - apply (clarsimp simp: valid_queues_def) - apply (rule conjI, clarsimp) - apply (drule valid_queues_no_bitmap_objD, assumption) - apply (clarsimp simp: inQ_def comp_def) - apply (rule conjI) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (simp add: valid_queues_no_bitmap_def) - apply clarsimp - apply (clarsimp simp: st_tcb_at'_def) - apply (drule obj_at_valid_objs') - apply (clarsimp simp: valid_pspace'_def) - apply (clarsimp simp: valid_obj'_def valid_tcb'_def projectKOs) - subgoal - by (fastforce simp: valid_tcb_state'_def - split: Structures_H.thread_state.splits) - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (fastforce simp: valid_queues_def inQ_def pred_tcb_at' pred_tcb_at'_def - elim!: st_tcb_ex_cap'' obj_at'_weakenE)+ - done - crunch ct_idle_or_in_cur_domain'[wp]: tcbSchedDequeue ct_idle_or_in_cur_domain' - -lemma tcbSchedDequeue_invs'_no_valid_queues: - "\\s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - valid_pde_mappings' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\ - tcbSchedDequeue t - \\_. invs' \" - apply (simp add: invs'_def valid_state'_def) - apply (wp tcbSchedDequeue_valid_queues_weak valid_irq_handlers_lift - valid_irq_node_lift valid_irq_handlers_lift' - tcbSchedDequeue_irq_states irqs_masked_lift cur_tcb_lift - untyped_ranges_zero_lift - | clarsimp simp add: cteCaps_of_def valid_queues_def o_def)+ - apply (rule conjI) - apply (fastforce simp: obj_at'_def inQ_def st_tcb_at'_def valid_queues_no_bitmap_except_def) - apply (rule conjI, clarsimp simp: correct_queue_def) - apply (fastforce simp: valid_pspace'_def intro: obj_at'_conjI - elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemmas sts_tcbSchedDequeue_invs' = - sts_invs_minor'_no_valid_queues - tcbSchedDequeue_invs'_no_valid_queues + (wp: crunch_wps) lemma asUser_sch_act_simple[wp]: "\sch_act_simple\ asUser s t \\_. sch_act_simple\" @@ -1713,11 +1286,14 @@ lemma (in delete_one_conc) suspend_invs'[wp]: "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ ThreadDecls_H.suspend t \\rv. invs'\" apply (simp add: suspend_def) - apply (wp sts_tcbSchedDequeue_invs') - apply (simp add: updateRestartPC_def | strengthen no_refs_simple_strg')+ - prefer 2 - apply (wpsimp wp: hoare_drop_imps hoare_vcg_imp_lift' - | strengthen no_refs_simple_strg')+ + apply (wpsimp wp: sts_invs_minor' gts_wp' simp: updateRestartPC_def + | strengthen no_refs_simple_strg')+ + apply (rule_tac Q="\_. invs' and sch_act_simple and st_tcb_at' simple' t + and (\s. t \ ksIdleThread s)" + in hoare_post_imp) + apply clarsimp + apply wpsimp + apply (fastforce elim: pred_tcb'_weakenE) done lemma (in delete_one_conc_pre) suspend_tcb'[wp]: @@ -1762,109 +1338,6 @@ lemma (in delete_one_conc_pre) suspend_st_tcb_at': lemmas (in delete_one_conc_pre) suspend_makes_simple' = suspend_st_tcb_at' [where P=simple', simplified] -lemma valid_queues_not_runnable'_not_ksQ: - assumes "Invariants_H.valid_queues s" and "st_tcb_at' (Not \ runnable') t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - using assms - apply - - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def pred_tcb_at'_def) - apply (erule_tac x=d in allE) - apply (erule_tac x=p in allE) - apply (clarsimp) - apply (drule(1) bspec) - apply (clarsimp simp: obj_at'_def) - done - -declare valid_queues_not_runnable'_not_ksQ[OF ByAssum, simp] - -lemma cancelSignal_queues[wp]: - "\Invariants_H.valid_queues and st_tcb_at' (Not \ runnable') t\ - cancelSignal t ae \\_. Invariants_H.valid_queues \" - apply (simp add: cancelSignal_def) - apply (wp sts_valid_queues) - apply (rule_tac Q="\_ s. \p. t \ set (ksReadyQueues s p)" in hoare_post_imp, simp) - apply (wp hoare_vcg_all_lift) - apply (wpc) - apply (wp)+ - apply (rule_tac Q="\_ s. Invariants_H.valid_queues s \ (\p. t \ set (ksReadyQueues s p))" in hoare_post_imp) - apply (clarsimp) - apply (wp) - apply (clarsimp) - done - -lemma (in delete_one_conc_pre) cancelIPC_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelIPC t \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def - cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_pre) - apply (wpc - | wp hoare_vcg_conj_lift delete_one_queues threadSet_valid_queues - threadSet_valid_objs' sts_valid_queues setEndpoint_ksQ - hoare_vcg_all_lift threadSet_sch_act threadSet_weak_sch_act_wf - | simp add: o_def if_apply_def2 inQ_def - | rule hoare_drop_imps - | clarsimp simp: valid_tcb'_def tcb_cte_cases_def - elim!: pred_tcb'_weakenE)+ - apply (fastforce dest: valid_queues_not_runnable'_not_ksQ elim: pred_tcb'_weakenE) - done - -(* FIXME: move to Schedule_R *) -lemma tcbSchedDequeue_nonq[wp]: - "\Invariants_H.valid_queues and tcb_at' t and K (t = t')\ - tcbSchedDequeue t \\_ s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadGet_wp|simp)+ - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def obj_at'_def projectKOs inQ_def) - done - -lemma sts_ksQ_oaQ: - "\Invariants_H.valid_queues\ - setThreadState st t - \\_ s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\" - (is "\_\ _ \\_. ?POST\") - proof - - have RR: "\sch_act_simple and ?POST\ rescheduleRequired \\_. ?POST\" - apply (simp add: rescheduleRequired_def) - apply (wp) - apply (clarsimp) - apply (rule_tac - Q="(\s. action = ResumeCurrentThread \ action = ChooseNewThread) and ?POST" - in hoare_pre_imp, assumption) - apply (case_tac action) - apply (clarsimp)+ - apply (wp) - apply (clarsimp simp: sch_act_simple_def) - done - show ?thesis - apply (simp add: setThreadState_def) - apply (wp RR) - apply (rule_tac Q="\_. ?POST" in hoare_post_imp) - apply (clarsimp simp add: sch_act_simple_def) - apply (wp hoare_convert_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (fastforce dest: bspec elim!: obj_at'_weakenE simp: inQ_def) - done - qed - -lemma (in delete_one_conc_pre) suspend_nonq: - "\Invariants_H.valid_queues and valid_objs' and tcb_at' t - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and (\s. t \ ksIdleThread s) and K (t = t')\ - suspend t - \\rv s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: suspend_def unless_def) - unfolding updateRestartPC_def - apply (wp hoare_allI tcbSchedDequeue_t_notksQ sts_ksQ_oaQ) - apply wpsimp+ - done - lemma suspend_makes_inactive: "\K (t = t')\ suspend t \\rv. st_tcb_at' ((=) Inactive) t'\" apply (cases "t = t'", simp_all) @@ -1877,20 +1350,19 @@ declare setThreadState_sch_act_sane [wp] lemma tcbSchedEnqueue_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ tcbSchedEnqueue t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) lemma sts_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ setThreadState st t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) text \Cancelling all IPC in an endpoint or notification object\ lemma ep_cancel_corres_helper: - "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs) - ((\s. \t \ set list. tcb_at' t s) - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and Invariants_H.valid_queues and valid_queues' and valid_objs') + "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs and valid_queues + and pspace_aligned and pspace_distinct) + (valid_objs' and sym_heap_sched_pointers and valid_sched_pointers) (mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; tcb_sched_action tcb_sched_enqueue t @@ -1899,28 +1371,34 @@ lemma ep_cancel_corres_helper: y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) list)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) apply (rule_tac S="{t. (fst t = snd t) \ fst t \ set list}" in corres_mapM_x) apply clarsimp apply (rule corres_guard_imp) apply (subst bind_return_unit, rule corres_split[OF _ tcbSchedEnqueue_corres]) + apply simp + apply (rule corres_guard_imp [OF setThreadState_corres]) + apply simp + apply (simp add: valid_tcb_state_def) + apply simp apply simp - apply (rule corres_guard_imp [OF setThreadState_corres]) - apply simp - apply (simp add: valid_tcb_state_def) - apply simp - apply (wp sts_valid_queues)+ - apply (force simp: tcb_at_is_etcb_at) - apply (fastforce elim: obj_at'_weakenE) - apply ((wp hoare_vcg_const_Ball_lift | simp)+)[1] - apply (rule hoare_pre) - apply (wp hoare_vcg_const_Ball_lift - weak_sch_act_wf_lift_linear sts_st_tcb' setThreadState_not_st - sts_valid_queues tcbSchedEnqueue_not_st - | simp)+ - apply (auto elim: obj_at'_weakenE simp: valid_tcb_state'_def) + apply (wpsimp wp: sts_st_tcb_at') + apply (wpsimp wp: sts_valid_objs' | strengthen valid_objs'_valid_tcbs')+ + apply fastforce + apply (wpsimp wp: hoare_vcg_const_Ball_lift set_thread_state_runnable_valid_queues + sts_st_tcb_at' sts_valid_objs' + simp: valid_tcb_state'_def)+ done +crunches set_simple_ko + for ready_qs_distinct[wp]: ready_qs_distinct + and in_correct_ready_q[wp]: in_correct_ready_q + (rule: ready_qs_distinct_lift wp: crunch_wps) + lemma ep_cancel_corres: "corres dc (invs and valid_sched and ep_at ep) (invs' and ep_at' ep) (cancel_all_ipc ep) (cancelAllIPC ep)" @@ -1928,10 +1406,10 @@ proof - have P: "\list. corres dc (\s. (\t \ set list. tcb_at t s) \ valid_pspace s \ ep_at ep s - \ valid_etcbs s \ weak_valid_sched_action s) + \ valid_etcbs s \ weak_valid_sched_action s \ valid_queues s) (\s. (\t \ set list. tcb_at' t s) \ valid_pspace' s \ ep_at' ep s \ weak_sch_act_wf (ksSchedulerAction s) s - \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s) + \ valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s) (do x \ set_endpoint ep Structures_A.IdleEP; x \ mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; @@ -1953,22 +1431,23 @@ proof - apply (rule ep_cancel_corres_helper) apply (rule mapM_x_wp') apply (wp weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s" + apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply ((wp hoare_vcg_const_Ball_lift mapM_x_wp' - sts_valid_queues setThreadState_not_st sts_st_tcb' tcbSchedEnqueue_not_st - | clarsimp - | fastforce elim: obj_at'_weakenE simp: valid_tcb_state'_def)+)[2] - apply (rule hoare_name_pre_state) + apply ((wpsimp wp: hoare_vcg_const_Ball_lift mapM_x_wp' sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[3] + apply fastforce apply (wp hoare_vcg_const_Ball_lift set_ep_valid_objs' - | (clarsimp simp: valid_ep'_def) - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def elim!: valid_objs_valid_tcbE))+ + | (clarsimp simp: valid_ep'_def) + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def + | strengthen valid_objs'_valid_tcbs'))+ done show ?thesis apply (simp add: cancel_all_ipc_def cancelAllIPC_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ep_sp']) apply (rule corres_guard_imp [OF getEndpoint_corres], simp+) apply (case_tac epa, simp_all add: ep_relation_def @@ -1996,6 +1475,8 @@ lemma cancelAllSignals_corres: "corres dc (invs and valid_sched and ntfn_at ntfn) (invs' and ntfn_at' ntfn) (cancel_all_signals ntfn) (cancelAllSignals ntfn)" apply (simp add: cancel_all_signals_def cancelAllSignals_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ntfn_sp']) apply (rule corres_guard_imp [OF getNotification_corres]) apply simp+ @@ -2006,22 +1487,26 @@ lemma cancelAllSignals_corres: apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule ep_cancel_corres_helper) apply (wp mapM_x_wp'[where 'b="det_ext state"] - weak_sch_act_wf_lift_linear setThreadState_not_st + weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ apply (rename_tac list) - apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s" + apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_objs' s + \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') apply (rule hoare_name_pre_state) - apply (wpsimp wp: hoare_vcg_const_Ball_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st - simp: valid_tcb_state'_def) + apply (wpsimp wp: hoare_vcg_const_Ball_lift sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+ apply (wp hoare_vcg_const_Ball_lift set_ntfn_aligned' set_ntfn_valid_objs' weak_sch_act_wf_lift_linear | simp)+ - apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def - valid_sched_def valid_sched_action_def valid_obj'_def projectKOs | erule obj_at_valid_objsE | drule ko_at_valid_objs')+ + apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def + invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def + valid_sched_def valid_sched_action_def valid_obj'_def projectKOs + | erule obj_at_valid_objsE | drule ko_at_valid_objs' | fastforce)+ done lemma ep'_Idle_case_helper: @@ -2060,6 +1545,11 @@ proof - done qed +lemma tcbSchedEnqueue_valid_pspace'[wp]: + "tcbSchedEnqueue tcbPtr \valid_pspace'\" + unfolding valid_pspace'_def + by wpsimp + lemma cancel_all_invs'_helper: "\all_invs_but_sym_refs_ct_not_inQ' and (\s. \x \ set q. tcb_at' x s) and (\s. sym_refs (\x. if x \ set q then {r \ state_refs_of' s x. snd r = TCBBound} @@ -2074,8 +1564,7 @@ lemma cancel_all_invs'_helper: apply clarsimp apply (rule hoare_pre) apply (wp valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift - hoare_vcg_const_Ball_lift untyped_ranges_zero_lift - sts_valid_queues sts_st_tcb' setThreadState_not_st + hoare_vcg_const_Ball_lift untyped_ranges_zero_lift sts_st_tcb' sts_valid_objs' | simp add: cteCaps_of_def o_def)+ apply (unfold fun_upd_apply Invariants_H.tcb_st_refs_of'_simps) apply clarsimp @@ -2084,7 +1573,7 @@ lemma cancel_all_invs'_helper: elim!: rsubst[where P=sym_refs] dest!: set_mono_suffix intro!: ext - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE))+ + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def))+ done lemma ep_q_refs_max: @@ -2115,7 +1604,6 @@ lemma rescheduleRequired_invs'[wp]: "\invs'\ rescheduleRequired \\rv. invs'\" apply (simp add: rescheduleRequired_def) apply (wp ssa_invs' | simp | wpc)+ - apply (clarsimp simp: invs'_def valid_state'_def) done lemma invs_rct_ct_activatable': @@ -2242,6 +1730,7 @@ lemma rescheduleRequired_all_invs_but_ct_not_inQ: lemma cancelAllIPC_invs'[wp]: "\invs'\ cancelAllIPC ep_ptr \\rv. invs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (rule bind_wp[OF _ stateAssert_sp]) apply (wp rescheduleRequired_all_invs_but_ct_not_inQ cancel_all_invs'_helper hoare_vcg_const_Ball_lift valid_global_refs_lift' valid_arch_state_lift' @@ -2251,14 +1740,15 @@ lemma cancelAllIPC_invs'[wp]: prefer 2 apply assumption apply (rule hoare_strengthen_post [OF get_ep_sp']) + apply (rename_tac rv s) apply (clarsimp simp: invs'_def valid_state'_def valid_ep'_def) apply (frule obj_at_valid_objs', fastforce) apply (clarsimp simp: projectKOs valid_obj'_def) apply (rule conjI) - apply (case_tac r, simp_all add: valid_ep'_def)[1] + apply (case_tac rv, simp_all add: valid_ep'_def)[1] apply (rule conjI[rotated]) apply (drule(1) sym_refs_ko_atD') - apply (case_tac r, simp_all add: st_tcb_at_refs_of_rev')[1] + apply (case_tac rv, simp_all add: st_tcb_at_refs_of_rev')[1] apply (clarsimp elim!: if_live_state_refsE | drule(1) bspec | drule st_tcb_at_state_refs_ofD')+ apply (drule(2) ep_q_refs_max) @@ -2269,7 +1759,8 @@ lemma cancelAllIPC_invs'[wp]: lemma cancelAllSignals_invs'[wp]: "\invs'\ cancelAllSignals ntfn \\rv. invs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2303,12 +1794,14 @@ crunch valid_objs'[wp]: tcbSchedEnqueue valid_objs' (simp: unless_def valid_tcb'_def tcb_cte_cases_def) lemma cancelAllIPC_valid_objs'[wp]: - "\valid_objs'\ cancelAllIPC ep \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllIPC ep \\rv. valid_objs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp set_ep_valid_objs' setSchedulerAction_valid_objs') - apply (rule_tac Q="\rv s. valid_objs' s \ (\x\set (epQueue ep). tcb_at' x s)" + apply (rule_tac Q="\_ s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ (\x\set (epQueue ep). tcb_at' x s)" in hoare_post_imp) apply simp apply (simp add: Ball_def) @@ -2325,9 +1818,10 @@ lemma cancelAllIPC_valid_objs'[wp]: done lemma cancelAllSignals_valid_objs'[wp]: - "\valid_objs'\ cancelAllSignals ntfn \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllSignals ntfn \\rv. valid_objs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2379,19 +1873,17 @@ lemma setThreadState_not_tcb[wp]: "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ setThreadState st t \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: setThreadState_def setQueue_def - rescheduleRequired_def tcbSchedEnqueue_def - unless_def bitmap_fun_defs - cong: scheduler_action.case_cong cong del: if_cong - | wp | wpcw)+ - done + by (wpsimp wp: isRunnable_inv threadGet_wp hoare_drop_imps + simp: setThreadState_def setQueue_def + rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + unless_def bitmap_fun_defs)+ lemma tcbSchedEnqueue_unlive: "\ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p and tcb_at' t\ tcbSchedEnqueue t \\_. ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp | simp add: setQueue_def bitmap_fun_defs)+ done @@ -2425,19 +1917,41 @@ lemma setObject_ko_wp_at': objBits_def[symmetric] ps_clear_upd in_magnitude_check v projectKOs) -lemma rescheduleRequired_unlive: - "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ - rescheduleRequired +lemma threadSet_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + threadSet f t \\rv. ko_wp_at' (Not \ live') p\" - apply (simp add: rescheduleRequired_def) - apply (wp | simp | wpc)+ - apply (simp add: tcbSchedEnqueue_def unless_def - threadSet_def setQueue_def threadGet_def) - apply (wp setObject_ko_wp_at getObject_tcb_wp - | simp add: objBits_simps' bitmap_fun_defs split del: if_split)+ - apply (clarsimp simp: o_def) - apply (drule obj_at_ko_at') - apply clarsimp + by (clarsimp simp: threadSet_def valid_def getObject_def + setObject_def in_monad loadObject_default_def + ko_wp_at'_def split_def in_magnitude_check + objBits_simps' updateObject_default_def projectKOs + ps_clear_upd ARM_H.fromPPtr_def) + +lemma tcbSchedEnqueue_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + tcbSchedEnqueue t + \\_. ko_wp_at' (Not \ live') p\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def) + apply (wpsimp wp: threadGet_wp threadSet_unlive_other simp: bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (frule (1) tcbQueueHead_ksReadyQueues) + apply (drule_tac x=p in spec) + apply (fastforce dest!: inQ_implies_tcbQueueds_of + simp: tcbQueueEmpty_def ko_wp_at'_def opt_pred_def opt_map_def projectKOs + split: option.splits) + done + +lemma rescheduleRequired_unlive[wp]: + "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ + rescheduleRequired + \\_. ko_wp_at' (Not \ live') p\" + supply comp_apply[simp del] + unfolding rescheduleRequired_def + apply (wpsimp wp: tcbSchedEnqueue_unlive_other) done lemmas setEndpoint_ko_wp_at' @@ -2447,7 +1961,8 @@ lemma cancelAllIPC_unlive: "\valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s)\ cancelAllIPC ep \\rv. ko_wp_at' (Not \ live') ep\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp cancelAll_unlive_helper setEndpoint_ko_wp_at' hoare_vcg_const_Ball_lift rescheduleRequired_unlive @@ -2466,7 +1981,8 @@ lemma cancelAllSignals_unlive: \ obj_at' (\ko. ntfnBoundTCB ko = None) ntfnptr s\ cancelAllSignals ntfnptr \\rv. ko_wp_at' (Not \ live') ntfnptr\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfn", simp_all add: setNotification_def) apply wp apply (fastforce simp: obj_at'_real_def projectKOs @@ -2526,30 +2042,25 @@ lemma cancelBadgedSends_filterM_helper': apply wp apply clarsimp apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append) - apply (drule spec, erule hoare_seq_ext[rotated]) - apply (rule hoare_seq_ext [OF _ gts_inv']) + apply (drule spec, erule bind_wp_fwd) + apply (rule bind_wp [OF _ gts_inv']) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_vcg_const_Ball_lift sts_sch_act' sch_act_wf_lift valid_irq_handlers_lift'' cur_tcb_lift irqs_masked_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st - tcbSchedEnqueue_not_st - untyped_ranges_zero_lift + sts_st_tcb' untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (frule insert_eqD, frule state_refs_of'_elemD) apply (clarsimp simp: valid_tcb_state'_def st_tcb_at_refs_of_rev') apply (frule pred_tcb_at') apply (rule conjI[rotated], blast) - apply clarsimp + apply (clarsimp simp: valid_pspace'_def cong: conj_cong) apply (intro conjI) - apply (clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE dest!: st_tcb_ex_cap'') - apply (fastforce dest!: st_tcb_ex_cap'') + apply (fastforce simp: valid_tcb'_def dest!: st_tcb_ex_cap'') apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (erule delta_sym_refs) - apply (fastforce elim!: obj_atE' - simp: state_refs_of'_def projectKOs tcb_bound_refs'_def - subsetD symreftype_inverse' - split: if_split_asm)+ - done + by (fastforce elim!: obj_atE' + simp: state_refs_of'_def tcb_bound_refs'_def subsetD symreftype_inverse' projectKOs + split: if_split_asm)+ lemmas cancelBadgedSends_filterM_helper = spec [where x=Nil, OF cancelBadgedSends_filterM_helper', simplified] @@ -2559,12 +2070,13 @@ lemma cancelBadgedSends_invs[wp]: shows "\invs'\ cancelBadgedSends epptr badge \\rv. invs'\" apply (simp add: cancelBadgedSends_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp'], rename_tac ep) apply (case_tac ep, simp_all) apply ((wp | simp)+)[2] apply (subst bind_assoc [where g="\_. rescheduleRequired", symmetric])+ - apply (rule hoare_seq_ext + apply (rule bind_wp [OF rescheduleRequired_all_invs_but_ct_not_inQ]) apply (simp add: list_case_return cong: list.case_cong) apply (rule hoare_pre, wp valid_irq_node_lift irqs_masked_lift) @@ -2592,10 +2104,20 @@ lemma cancelBadgedSends_invs[wp]: crunch state_refs_of[wp]: tcb_sched_action "\s. P (state_refs_of s)" (ignore_del: tcb_sched_action) +lemma setEndpoint_valid_tcbs'[wp]: + "setEndpoint ePtr val \valid_tcbs'\" + unfolding setEndpoint_def + apply (wpsimp wp: setObject_valid_tcbs'[where P=\]) + apply (clarsimp simp: updateObject_default_def monad_simps projectKOs) + apply fastforce + done + lemma cancelBadgedSends_corres: "corres dc (invs and valid_sched and ep_at epptr) (invs' and ep_at' epptr) (cancel_badged_sends epptr bdg) (cancelBadgedSends epptr bdg)" apply (simp add: cancel_badged_sends_def cancelBadgedSends_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_guard_imp) apply (rule corres_split[OF getEndpoint_corres _ get_simple_ko_sp get_ep_sp', where Q="invs and valid_sched" and Q'=invs']) @@ -2605,10 +2127,16 @@ lemma cancelBadgedSends_corres: apply (rule corres_guard_imp) apply (rule corres_split_nor[OF setEndpoint_corres]) apply (simp add: ep_relation_def) - apply (rule corres_split_eqr[OF _ _ _ hoare_post_add[where R="\_. valid_objs'"]]) + apply (rule corres_split_eqr[OF _ _ _ hoare_post_add + [where R="\_. valid_objs' and pspace_aligned' + and pspace_distinct'"]]) apply (rule_tac S="(=)" - and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ distinct xs \ valid_etcbs s" - and Q'="\xs s. (\x \ set xs. tcb_at' x s) \ weak_sch_act_wf (ksSchedulerAction s) s \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s" + and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ + distinct xs \ valid_etcbs s \ + in_correct_ready_q s \ ready_qs_distinct s \ + pspace_aligned s \ pspace_distinct s" + and Q'="\_ s. valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" in corres_mapM_list_all2[where r'="(=)"], simp_all add: list_all2_refl)[1] apply (clarsimp simp: liftM_def[symmetric] o_def) @@ -2619,56 +2147,56 @@ lemma cancelBadgedSends_corres: apply (clarsimp simp: o_def dc_def[symmetric] liftM_def) apply (rule corres_split[OF setThreadState_corres]) apply simp - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (rule corres_trivial) apply simp - apply (wp sts_valid_queues gts_st_tcb_at)+ + apply wp+ + apply simp + apply (wp sts_st_tcb_at' gts_st_tcb_at sts_valid_objs' + | strengthen valid_objs'_valid_tcbs')+ apply (clarsimp simp: valid_tcb_state_def tcb_at_def st_tcb_def2 st_tcb_at_refs_of_rev dest!: state_refs_of_elemD elim!: tcb_at_is_etcb_at[rotated]) - apply (simp add: is_tcb_def) - apply (wp hoare_vcg_const_Ball_lift gts_wp | clarsimp)+ - apply (wp gts_st_tcb_at hoare_vcg_imp_lift - sts_st_tcb' sts_valid_queues + apply (simp add: valid_tcb_state'_def) + apply (wp hoare_vcg_const_Ball_lift gts_wp | clarsimp)+ + apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_objs' | clarsimp simp: valid_tcb_state'_def)+ apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule setEndpoint_corres) apply (simp split: list.split add: ep_relation_def) apply (wp weak_sch_act_wf_lift_linear)+ - apply (wp gts_st_tcb_at hoare_vcg_imp_lift mapM_wp' - sts_st_tcb' sts_valid_queues - set_thread_state_runnable_weak_valid_sched_action - | clarsimp simp: valid_tcb_state'_def)+ - apply (wp hoare_vcg_const_Ball_lift set_ep_valid_objs')+ + apply (wpsimp wp: mapM_wp' set_thread_state_runnable_weak_valid_sched_action + simp: valid_tcb_state'_def) + apply ((wpsimp wp: hoare_vcg_imp_lift mapM_wp' sts_valid_objs' simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: set_ep_valid_objs')+ apply (clarsimp simp: conj_comms) apply (frule sym_refs_ko_atD, clarsimp+) apply (rule obj_at_valid_objsE, assumption+, clarsimp+) apply (clarsimp simp: valid_obj_def valid_ep_def valid_sched_def valid_sched_action_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) apply (rule conjI, erule obj_at_weakenE, clarsimp simp: is_ep) + apply (rule conjI, fastforce) apply (clarsimp simp: st_tcb_at_refs_of_rev) apply (drule(1) bspec, drule st_tcb_at_state_refs_ofD, clarsimp) apply (simp add: set_eq_subset) apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) - apply (drule ko_at_valid_objs', clarsimp) - apply (simp add: projectKOs) - apply (clarsimp simp: valid_obj'_def valid_ep'_def invs_weak_sch_act_wf - invs'_def valid_state'_def) + apply (fastforce simp: valid_ep'_def) done +crunches updateRestartPC + for tcb_at'[wp]: "tcb_at' t" + (simp: crunch_simps) + lemma suspend_unqueued: "\\\ suspend t \\rv. obj_at' (Not \ tcbQueued) t\" - apply (simp add: suspend_def unless_def tcbSchedDequeue_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift) - apply (simp add: threadGet_def| wp getObject_tcb_wp)+ - apply (rule hoare_strengthen_post, rule hoare_post_taut) - apply (fastforce simp: obj_at'_def projectKOs) - apply (rule hoare_post_taut) - apply wp+ - done + unfolding suspend_def + by (wpsimp simp: comp_def wp: tcbSchedDequeue_not_tcbQueued) crunch unqueued: prepareThreadDelete "obj_at' (Not \ tcbQueued) t" crunch inactive: prepareThreadDelete "st_tcb_at' ((=) Inactive) t'" -crunch nonq: prepareThreadDelete " \s. \d p. t' \ set (ksReadyQueues s (d, p))" end end diff --git a/proof/refine/ARM/Ipc_R.thy b/proof/refine/ARM/Ipc_R.thy index 49ff66feec..82d354d9d4 100644 --- a/proof/refine/ARM/Ipc_R.thy +++ b/proof/refine/ARM/Ipc_R.thy @@ -14,7 +14,7 @@ lemmas lookup_slot_wrapper_defs'[simp] = lookupSourceSlot_def lookupTargetSlot_def lookupPivotSlot_def lemma getMessageInfo_corres: "corres ((=) \ message_info_map) - (tcb_at t) (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) \ (get_message_info t) (getMessageInfo t)" apply (rule corres_guard_imp) apply (unfold get_message_info_def getMessageInfo_def fun_app_def) @@ -264,11 +264,7 @@ lemmas unifyFailure_discard2 lemma deriveCap_not_null: "\\\ deriveCap slot cap \\rv. K (rv \ NullCap \ cap \ NullCap)\,-" apply (simp add: deriveCap_def split del: if_split) - apply (case_tac cap) - apply (simp_all add: Let_def isCap_simps) - apply wp - apply simp - done + by (case_tac cap; wpsimp simp: isCap_simps) lemma deriveCap_derived_foo: "\\s. \cap'. (cte_wp_at' (\cte. badge_derived' cap (cteCap cte) @@ -306,7 +302,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -350,7 +346,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -472,7 +468,7 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ cap.NullCap \ cte_wp_at (is_derived (cdt s) (a, b) cap') (a, b) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption @@ -484,13 +480,13 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ capability.NullCap \ cte_wp_at' (\c. is_derived' (ctes_of s) (cte_map (a, b)) cap' (cteCap c)) (cte_map (a, b)) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption apply (subst imp_conjR) apply (rule hoare_vcg_conj_liftE_R) - apply (rule hoare_post_imp_R[OF deriveCap_derived]) + apply (rule hoare_strengthen_postE_R[OF deriveCap_derived]) apply (clarsimp simp:cte_wp_at_ctes_of) apply (wp deriveCap_derived_foo) apply (clarsimp simp: cte_wp_at_caps_of_state remove_rights_def @@ -570,7 +566,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -594,7 +590,7 @@ lemma cteInsert_assume_Null: apply (rule hoare_name_pre_state) apply (erule impCE) apply (simp add: cteInsert_def) - apply (rule hoare_seq_ext[OF _ getCTE_sp])+ + apply (rule bind_wp[OF _ getCTE_sp])+ apply (rule hoare_name_pre_state) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule hoare_pre(1)) @@ -613,7 +609,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -646,11 +642,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -706,8 +702,7 @@ lemma transferCapsToSlots_mdb[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_mdb'\" - apply (wp transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) - apply clarsimp + apply (wpsimp wp: transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) apply (frule valid_capAligned) apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) apply wp @@ -756,14 +751,6 @@ lemma tcts_sch_act[wp]: \\rv s. sch_act_wf (ksSchedulerAction s) s\" by (wp sch_act_wf_lift tcb_in_cur_domain'_lift transferCapsToSlots_pres1) -lemma tcts_vq[wp]: - "\Invariants_H.valid_queues\ transferCapsToSlots ep buffer n caps slots mi \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift transferCapsToSlots_pres1) - -lemma tcts_vq'[wp]: - "\valid_queues'\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_queues'\" - by (wp valid_queues_lift' transferCapsToSlots_pres1) - crunch state_refs_of' [wp]: setExtraBadge "\s. P (state_refs_of' s)" lemma tcts_state_refs_of'[wp]: @@ -852,7 +839,7 @@ lemma transferCapsToSlots_irq_handlers[wp]: and transferCaps_srcs caps\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_irq_handlers'\" - apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) + apply (wpsimp wp: transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) apply (clarsimp simp: is_derived'_def cte_wp_at_ctes_of badge_derived'_def) apply (erule(2) valid_irq_handlers_ctes_ofD) apply wp @@ -955,8 +942,8 @@ lemma tcts_zero_ranges[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. untyped_ranges_zero'\" - apply (wp transferCapsToSlots_presM[where emx=True and vo=True - and drv=True and pad=True]) + apply (wpsimp wp: transferCapsToSlots_presM[where emx=True and vo=True + and drv=True and pad=True]) apply (clarsimp simp: cte_wp_at_ctes_of) apply (simp add: cteCaps_of_def) apply (rule hoare_pre, wp untyped_ranges_zero_lift) @@ -977,6 +964,11 @@ crunch ksDomScheduleIdx[wp]: setExtraBadge "\s. P (ksDomScheduleIdx s)" crunch ksDomSchedule[wp]: transferCapsToSlots "\s. P (ksDomSchedule s)" crunch ksDomScheduleIdx[wp]: transferCapsToSlots "\s. P (ksDomScheduleIdx s)" +crunches transferCapsToSlots + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift) lemma transferCapsToSlots_invs[wp]: "\\s. invs' s \ distinct slots @@ -1034,7 +1026,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1138,7 +1130,7 @@ lemmas copyMRs_typ_at_lifts[wp] = typ_at_lifts [OF copyMRs_typ_at'] lemma copy_mrs_invs'[wp]: "\ invs' and tcb_at' s and tcb_at' r \ copyMRs s sb r rb n \\rv. invs' \" - including no_pre + including classic_wp_pre apply (simp add: copyMRs_def) apply (wp dmo_invs' no_irq_mapM no_irq_storeWord| simp add: split_def) @@ -1182,18 +1174,12 @@ lemma set_mrs_valid_objs' [wp]: crunch valid_objs'[wp]: copyMRs valid_objs' (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "Invariants_H.valid_queues'" - (simp: crunch_simps wp: hoare_drop_imps) - - lemma setMRs_invs_bits[wp]: "\valid_pspace'\ setMRs t buf mrs \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. sch_act_wf (ksSchedulerAction s) s\" "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ setMRs t buf mrs \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ setMRs t buf mrs \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ setMRs t buf mrs \\rv s. P (state_refs_of' s)\" @@ -1210,8 +1196,6 @@ lemma copyMRs_invs_bits[wp]: "\valid_pspace'\ copyMRs s sb r rb n \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ copyMRs s sb r rb n \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ copyMRs s sb r rb n \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ copyMRs s sb r rb n \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ copyMRs s sb r rb n \\rv s. P (state_refs_of' s)\" @@ -1407,7 +1391,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -1468,15 +1452,15 @@ lemma msgFromLookupFailure_map[simp]: by (cases lf, simp_all add: lookup_failure_map_def msgFromLookupFailure_def) lemma asUser_getRestartPC_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (as_user t getRestartPC) (asUser t getRestartPC)" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t getRestartPC) (asUser t getRestartPC)" apply (rule asUser_corres') apply (rule corres_Id, simp, simp) apply (rule no_fail_getRestartPC) done lemma asUser_mapM_getRegister_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t (mapM getRegister regs)) (asUser t (mapM getRegister regs))" apply (rule asUser_corres') @@ -1486,7 +1470,7 @@ lemma asUser_mapM_getRegister_corres: done lemma makeArchFaultMessage_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (make_arch_fault_msg f t) (makeArchFaultMessage (arch_fault_map f) t)" apply (cases f, clarsimp simp: makeArchFaultMessage_def split: arch_fault.split) @@ -1497,7 +1481,7 @@ lemma makeArchFaultMessage_corres: done lemma makeFaultMessage_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (make_fault_msg ft t) (makeFaultMessage (fault_map ft) t)" apply (cases ft, simp_all add: makeFaultMessage_def split del: if_split) @@ -1535,7 +1519,8 @@ lemmas threadget_fault_corres = lemma doFaultTransfer_corres: "corres dc (obj_at (\ko. \tcb ft. ko = TCB tcb \ tcb_fault tcb = Some ft) sender - and tcb_at receiver and case_option \ in_user_frame recv_buf) + and tcb_at receiver and case_option \ in_user_frame recv_buf + and pspace_aligned and pspace_distinct) (tcb_at' sender and tcb_at' receiver and case_option \ valid_ipc_buffer_ptr' recv_buf) (do_fault_transfer badge sender receiver recv_buf) @@ -1544,7 +1529,8 @@ lemma doFaultTransfer_corres: ARM_H.badgeRegister_def badge_register_def) apply (rule_tac Q="\fault. K (\f. fault = Some f) and tcb_at sender and tcb_at receiver and - case_option \ in_user_frame recv_buf" + case_option \ in_user_frame recv_buf and + pspace_aligned and pspace_distinct" and Q'="\fault'. tcb_at' sender and tcb_at' receiver and case_option \ valid_ipc_buffer_ptr' recv_buf" in corres_underlying_split) @@ -1682,10 +1668,6 @@ crunch vp[wp]: doIPCTransfer "valid_pspace'" (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' wp: transferCapsToSlots_vp simp:ball_conj_distrib ) crunch sch_act_wf[wp]: doIPCTransfer "\s. sch_act_wf (ksSchedulerAction s) s" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq[wp]: doIPCTransfer "Invariants_H.valid_queues" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq'[wp]: doIPCTransfer "valid_queues'" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch state_refs_of[wp]: doIPCTransfer "\s. P (state_refs_of' s)" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch ct[wp]: doIPCTransfer "cur_tcb'" @@ -1711,7 +1693,7 @@ declare asUser_global_refs' [wp] lemma lec_valid_cap' [wp]: "\valid_objs'\ lookupExtraCaps thread xa mi \\rv s. (\x\set rv. s \' fst x)\, -" - apply (rule hoare_pre, rule hoare_post_imp_R) + apply (rule hoare_pre, rule hoare_strengthen_postE_R) apply (rule hoare_vcg_conj_lift_R[where R=valid_objs' and S="\_. valid_objs'"]) apply (rule lookupExtraCaps_srcs) apply wp @@ -1760,7 +1742,7 @@ crunch nosch[wp]: doIPCTransfer "\s. P (ksSchedulerAction s)" simp: split_def zipWithM_x_mapM) lemma handle_fault_reply_registers_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (do t' \ arch_get_sanitise_register_info t; y \ as_user t (zipWithM_x @@ -1789,7 +1771,7 @@ lemma handle_fault_reply_registers_corres: lemma handleFaultReply_corres: "ft' = fault_map ft \ - corres (=) (tcb_at t) (tcb_at' t) + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (handle_fault_reply ft t label msg) (handleFaultReply ft' t label msg)" apply (cases ft) @@ -1832,16 +1814,6 @@ lemma getThreadCallerSlot_inv: "\P\ getThreadCallerSlot t \\_. P\" by (simp add: getThreadCallerSlot_def, wp) -lemma deleteCallerCap_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - deleteCallerCap t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) - apply (wp getThreadCallerSlot_inv cteDeleteOne_ct_not_ksQ getCTE_wp) - apply (clarsimp simp: cte_wp_at_ctes_of) - done - crunch tcb_at'[wp]: unbindNotification "tcb_at' x" lemma finaliseCapTrue_standin_tcb_at' [wp]: @@ -1995,39 +1967,11 @@ lemma cteDeleteOne_weak_sch_act[wp]: crunch weak_sch_act_wf[wp]: emptySlot "\s. weak_sch_act_wf (ksSchedulerAction s) s" crunch pred_tcb_at'[wp]: handleFaultReply "pred_tcb_at' proj P t" -crunch valid_queues[wp]: handleFaultReply "Invariants_H.valid_queues" -crunch valid_queues'[wp]: handleFaultReply "valid_queues'" crunch tcb_in_cur_domain'[wp]: handleFaultReply "tcb_in_cur_domain' t" crunch sch_act_wf[wp]: unbindNotification "\s. sch_act_wf (ksSchedulerAction s) s" (wp: sbn_sch_act') -crunch valid_queues'[wp]: cteDeleteOne valid_queues' - (simp: crunch_simps inQ_def - wp: crunch_wps sts_st_tcb' getObject_inv loadObject_default_inv - threadSet_valid_queues' rescheduleRequired_valid_queues'_weak) - -lemma cancelSignal_valid_queues'[wp]: - "\valid_queues'\ cancelSignal t ntfn \\rv. valid_queues'\" - apply (simp add: cancelSignal_def) - apply (rule hoare_pre) - apply (wp getNotification_wp| wpc | simp)+ - done - -lemma cancelIPC_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) \ cancelIPC t \\rv. valid_queues'\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv liftM_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) - apply (case_tac state, simp_all) defer 2 - apply (rule hoare_pre) - apply ((wp getEndpoint_wp getCTE_wp | wpc | simp)+)[8] - apply (wp cteDeleteOne_valid_queues') - apply (rule_tac Q="\_. valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of) - apply (wp threadSet_valid_queues' threadSet_sch_act| simp)+ - apply (clarsimp simp: inQ_def) - done - crunch valid_objs'[wp]: handleFaultReply valid_objs' lemma cte_wp_at_is_reply_cap_toI: @@ -2035,6 +1979,17 @@ lemma cte_wp_at_is_reply_cap_toI: \ cte_wp_at (is_reply_cap_to t) ptr s" by (fastforce simp: cte_wp_at_reply_cap_to_ex_rights) +crunches handle_fault_reply + for pspace_alignedp[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +crunches cteDeleteOne, doIPCTransfer, handleFaultReply + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + lemma doReplyTransfer_corres: "corres dc (einvs and tcb_at receiver and tcb_at sender @@ -2046,7 +2001,7 @@ lemma doReplyTransfer_corres: apply (simp add: do_reply_transfer_def doReplyTransfer_def cong: option.case_cong) apply (rule corres_underlying_split [OF _ _ gts_sp gts_sp']) apply (rule corres_guard_imp) - apply (rule getThreadState_corres, (clarsimp simp add: st_tcb_at_tcb_at)+) + apply (rule getThreadState_corres, (clarsimp simp add: st_tcb_at_tcb_at invs_distinct invs_psp_aligned)+) apply (rule_tac F = "awaiting_reply state" in corres_req) apply (clarsimp simp add: st_tcb_at_def obj_at_def is_tcb) apply (fastforce simp: invs_def valid_state_def intro: has_reply_cap_cte_wpD @@ -2080,8 +2035,12 @@ lemma doReplyTransfer_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp set_thread_state_runnable_valid_sched set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues sts_valid_objs' delete_one_tcbDomain_obj_at' - | simp add: valid_tcb_state'_def)+ + apply (wp set_thread_state_runnable_valid_sched + set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' + sts_valid_objs' delete_one_tcbDomain_obj_at' + | simp add: valid_tcb_state'_def + | strengthen valid_queues_in_correct_ready_q valid_sched_valid_queues + valid_queues_ready_qs_distinct)+ apply (strengthen cte_wp_at_reply_cap_can_fast_finalise) apply (wp hoare_vcg_conj_lift) apply (rule hoare_strengthen_post [OF do_ipc_transfer_non_null_cte_wp_at]) @@ -2090,12 +2049,16 @@ lemma doReplyTransfer_corres: apply (fastforce) apply (clarsimp simp:is_cap_simps) apply (wp weak_valid_sched_action_lift)+ - apply (rule_tac Q="\_. valid_queues' and valid_objs' and cur_tcb' and tcb_at' receiver and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp, simp add: sch_act_wf_weak) + apply (rule_tac Q="\_ s. valid_objs' s \ cur_tcb' s \ tcb_at' receiver s + \ sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp, simp add: sch_act_wf_weak) apply (wp tcb_in_cur_domain'_lift) defer apply (simp) apply (wp)+ - apply (clarsimp) + apply (clarsimp simp: invs_psp_aligned invs_distinct) apply (rule conjI, erule invs_valid_objs) apply (rule conjI, clarsimp)+ apply (rule conjI) @@ -2119,36 +2082,38 @@ lemma doReplyTransfer_corres: apply (rule threadset_corresT; clarsimp simp add: tcb_relation_def fault_rel_optionation_def tcb_cap_cases_def tcb_cte_cases_def exst_same_def) - apply (rule_tac P="valid_sched and cur_tcb and tcb_at receiver" - and P'="tcb_at' receiver and cur_tcb' + apply (rule_tac Q="valid_sched and cur_tcb and tcb_at receiver and pspace_aligned and pspace_distinct" + and Q'="tcb_at' receiver and cur_tcb' and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and Invariants_H.valid_queues and valid_queues' and valid_objs'" - in corres_inst) + and valid_objs' + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" + in corres_guard_imp) apply (case_tac rvb, simp_all)[1] apply (rule corres_guard_imp) apply (rule corres_split[OF setThreadState_corres]) apply simp apply (fold dc_def, rule possibleSwitchTo_corres) apply simp - apply (wp static_imp_wp static_imp_conj_wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_st_tcb' sts_valid_queues | simp | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ apply (rule corres_guard_imp) apply (rule setThreadState_corres) apply clarsimp+ apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' + thread_set_not_state_valid_sched threadSet_tcbDomain_triv threadSet_valid_objs' + threadSet_sched_pointers threadSet_valid_sched_pointers | simp add: valid_tcb_state'_def)+ - apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' - | simp add: runnable_def inQ_def valid_tcb'_def)+ - apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and valid_objs and pspace_aligned" + apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and + valid_objs and pspace_aligned and pspace_distinct" in hoare_strengthen_post [rotated], clarsimp) apply (wp) apply (rule hoare_chain [OF cap_delete_one_invs]) apply (assumption) apply (rule conjI, clarsimp) - apply (clarsimp simp add: invs_def valid_state_def) + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def) apply (rule_tac Q="\_. tcb_at' sender and tcb_at' receiver and invs'" in hoare_strengthen_post [rotated]) apply (solves\auto simp: invs'_def valid_state'_def\) @@ -2231,15 +2196,15 @@ lemma setupCallerCap_corres: tcb_cnode_index_def cte_level_bits_def) apply (simp add: cte_map_def tcbCallerSlot_def tcb_cnode_index_def cte_level_bits_def) - apply (rule_tac Q="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) apply (wp, (wp getSlotCap_wp)+) apply blast apply (rule no_fail_pre, wp) apply (clarsimp simp: cte_wp_at'_def cte_at'_def) - apply (rule_tac Q="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) apply (wp, (wp getCTE_wp')+) apply blast apply (rule no_fail_pre, wp) @@ -2296,7 +2261,7 @@ lemma possibleSwitchTo_weak_sch_act_wf[wp]: bitmap_fun_defs) apply (wp rescheduleRequired_weak_sch_act_wf weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] - getObject_tcb_wp static_imp_wp + getObject_tcb_wp hoare_weak_lift_imp | wpc)+ apply (clarsimp simp: obj_at'_def projectKOs weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) done @@ -2359,7 +2324,7 @@ proof - apply (rule setEndpoint_corres) apply (simp add: ep_relation_def) apply wp+ - apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def) + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) apply clarsimp \ \concludes IdleEP if bl branch\ apply (simp add: ep_relation_def) @@ -2369,7 +2334,7 @@ proof - apply (rule setEndpoint_corres) apply (simp add: ep_relation_def) apply wp+ - apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def) + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) apply clarsimp \ \concludes SendEP if bl branch\ apply (simp add: ep_relation_def) @@ -2408,10 +2373,12 @@ proof - apply (wp hoare_drop_imps)[1] apply (wp | simp)+ apply (wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases) - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)[1] apply (simp add: valid_tcb_state_def pred_conj_def) - apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg) + apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues)+ apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift | clarsimp simp: is_cap_simps)+)[1] apply (simp add: pred_conj_def) @@ -2420,7 +2387,7 @@ proof - apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift hoare_drop_imps)[1] apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply (simp) apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')+ apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def ep_redux_simps @@ -2476,17 +2443,19 @@ proof - apply (simp add: if_apply_def2) apply ((wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases | simp add: if_apply_def2 split del: if_split)+)[1] - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases) apply (simp add: valid_tcb_state_def pred_conj_def) apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift - | clarsimp simp:is_cap_simps)+)[1] + | clarsimp simp: is_cap_simps + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues )+)[1] apply (simp add: valid_tcb_state'_def pred_conj_def) apply (strengthen sch_act_wf_weak) apply (wp weak_sch_act_wf_lift_linear hoare_drop_imps) apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply simp apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb') apply (clarsimp simp add: invs_def valid_state_def @@ -2514,7 +2483,7 @@ lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] declare tl_drop_1[simp] crunch cur[wp]: cancel_ipc "cur_tcb" - (wp: select_wp crunch_wps simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch valid_objs'[wp]: asUser "valid_objs'" @@ -2561,14 +2530,15 @@ lemma sendSignal_corres: apply (rule possibleSwitchTo_corres) apply wp apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_valid_queues sts_st_tcb' hoare_disjI2 + sts_st_tcb' sts_valid_objs' hoare_disjI2 cancel_ipc_cte_wp_at_not_reply_state | strengthen invs_vobjs_strgs invs_psp_aligned_strg valid_sched_weak_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues | simp add: valid_tcb_state_def)+ apply (rule_tac Q="\rv. invs' and tcb_at' a" in hoare_strengthen_post) apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak - valid_tcb_state'_def) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak valid_tcb_state'_def) apply (rule setNotification_corres) apply (clarsimp simp add: ntfn_relation_def) apply (wp gts_wp gts_wp' | clarsimp)+ @@ -2594,23 +2564,23 @@ lemma sendSignal_corres: apply (rule corres_split[OF asUser_setRegister_corres]) apply (rule possibleSwitchTo_corres) apply ((wp | simp)+)[1] - apply (rule_tac Q="\_. Invariants_H.valid_queues and valid_queues' and - (\s. sch_act_wf (ksSchedulerAction s) s) and + apply (rule_tac Q="\_. (\s. sch_act_wf (ksSchedulerAction s) s) and cur_tcb' and - st_tcb_at' runnable' (hd list) and valid_objs'" + st_tcb_at' runnable' (hd list) and valid_objs' and + sym_heap_sched_pointers and valid_sched_pointers and + pspace_aligned' and pspace_distinct'" in hoare_post_imp, clarsimp simp: pred_tcb_at' elim!: sch_act_wf_weak) apply (wp | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ apply (wp set_simple_ko_valid_objs set_ntfn_aligned' set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def - valid_sched_action_def) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def + valid_sched_action_def) apply (auto simp: valid_ntfn'_def )[1] apply (clarsimp simp: invs'_def valid_state'_def) @@ -2628,16 +2598,14 @@ lemma sendSignal_corres: apply (wp cur_tcb_lift | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb - | simp)+ + apply (wpsimp wp: sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb) apply (wp set_ntfn_aligned' set_simple_ko_valid_objs set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def neq_Nil_conv - ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def - split: option.splits) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def neq_Nil_conv + ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def + split: option.splits) apply (auto simp: valid_ntfn'_def neq_Nil_conv invs'_def valid_state'_def weak_sch_act_wf_def split: option.splits)[1] @@ -2663,43 +2631,11 @@ lemma possibleSwitchTo_sch_act[wp]: possibleSwitchTo t \\rv s. sch_act_wf (ksSchedulerAction s) s\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadSet_sch_act setQueue_sch_act threadGet_wp + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp | simp add: unless_def | wpc)+ apply (auto simp: obj_at'_def projectKOs tcb_in_cur_domain'_def) done -lemma possibleSwitchTo_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. Invariants_H.valid_queues\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp hoare_drop_imps | wpc | simp)+ - apply (auto simp: valid_tcb'_def weak_sch_act_wf_def - dest: pred_tcb_at' - elim!: valid_objs_valid_tcbE) - done - -lemma possibleSwitchTo_ksQ': - "\(\s. t' \ set (ksReadyQueues s p) \ sch_act_not t' s) and K(t' \ t)\ - possibleSwitchTo t - \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp - | wpc - | simp split del: if_split)+ - apply (auto simp: obj_at'_def) - done - -lemma possibleSwitchTo_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) - and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. valid_queues'\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadGet_wp | wpc | simp)+ - apply (auto simp: obj_at'_def) - done - crunch st_refs_of'[wp]: possibleSwitchTo "\s. P (state_refs_of' s)" (wp: crunch_wps) @@ -2711,15 +2647,15 @@ crunch ct[wp]: possibleSwitchTo cur_tcb' (wp: cur_tcb_lift crunch_wps) lemma possibleSwitchTo_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' t - and (\s. sch_act_wf (ksSchedulerAction s) s)\ - possibleSwitchTo t - \\rv. if_live_then_nonz_cap'\" + "\if_live_then_nonz_cap' and ex_nonz_cap_to' t and (\s. sch_act_wf (ksSchedulerAction s) s) + and pspace_aligned' and pspace_distinct'\ + possibleSwitchTo t + \\_. if_live_then_nonz_cap'\" apply (simp add: possibleSwitchTo_def curDomain_def) apply (wp | wpc | simp)+ apply (simp only: imp_conv_disj, wp hoare_vcg_all_lift hoare_vcg_disj_lift) apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_def projectKOs) + apply (auto simp: obj_at'_def) done crunches possibleSwitchTo @@ -2749,10 +2685,6 @@ crunches sendSignal, setBoundNotification rule: irqs_masked_lift) end -lemma sts_running_valid_queues: - "runnable' st \ \ Invariants_H.valid_queues \ setThreadState st t \\_. Invariants_H.valid_queues \" - by (wp sts_valid_queues, clarsimp) - lemma ct_in_state_activatable_imp_simple'[simp]: "ct_in_state' activatable' s \ ct_in_state' simple' s" apply (simp add: ct_in_state'_def) @@ -2765,24 +2697,21 @@ lemma setThreadState_nonqueued_state_update: \ st \ {Inactive, Running, Restart, IdleThreadState} \ (st \ Inactive \ ex_nonz_cap_to' t s) \ (t = ksIdleThread s \ idle' st) - - \ (\ runnable' st \ sch_act_simple s) - \ (\ runnable' st \ (\p. t \ set (ksReadyQueues s p)))\ - setThreadState st t \\rv. invs'\" + \ (\ runnable' st \ sch_act_simple s)\ + setThreadState st t + \\_. invs'\" apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre, wp valid_irq_node_lift - sts_valid_queues - setThreadState_ct_not_inQ) + apply (rule hoare_pre, wp valid_irq_node_lift setThreadState_ct_not_inQ) apply (clarsimp simp: pred_tcb_at') apply (rule conjI, fastforce simp: valid_tcb_state'_def) apply (drule simple_st_tcb_at_state_refs_ofD') apply (drule bound_tcb_at_state_refs_ofD') - apply (rule conjI, fastforce) - apply clarsimp - apply (erule delta_sym_refs) - apply (fastforce split: if_split_asm) - apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def - split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (erule delta_sym_refs) + apply (fastforce split: if_split_asm) + apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm) + apply fastforce done lemma cteDeleteOne_reply_cap_to'[wp]: @@ -2791,7 +2720,7 @@ lemma cteDeleteOne_reply_cap_to'[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -2850,16 +2779,14 @@ lemma cancelAllIPC_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllIPC_def) apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) apply simp apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done lemma cancelAllSignals_not_rct[wp]: @@ -2868,12 +2795,10 @@ lemma cancelAllSignals_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllSignals_def) apply (wp | wpc)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done crunch not_rct[wp]: finaliseCapTrue_standin "\s. ksSchedulerAction s \ ResumeCurrentThread" @@ -2945,8 +2870,8 @@ lemma sai_invs'[wp]: "\invs' and ex_nonz_cap_to' ntfnptr\ sendSignal ntfnptr badge \\y. invs'\" unfolding sendSignal_def - including no_pre - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + including classic_wp_pre + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (case_tac "ntfnObj nTFN", simp_all) prefer 3 apply (rename_tac list) @@ -2959,7 +2884,6 @@ lemma sai_invs'[wp]: apply (clarsimp simp:conj_comms) apply (simp add: invs'_def valid_state'_def) apply ((wp valid_irq_node_lift sts_valid_objs' setThreadState_ct_not_inQ - sts_valid_queues [where st="Structures_H.thread_state.Running", simplified] set_ntfn_valid_objs' cur_tcb_lift sts_st_tcb' hoare_convert_imp [OF setNotification_nosch] | simp split del: if_split)+)[3] @@ -3034,8 +2958,10 @@ lemma sai_invs'[wp]: dest!: global'_no_ex_cap st_tcb_ex_cap'' ko_at_valid_objs')+ lemma replyFromKernel_corres: - "corres dc (tcb_at t and invs) (tcb_at' t and invs') + "corres dc (tcb_at t and invs) invs' (reply_from_kernel t r) (replyFromKernel t r)" + apply (rule corres_cross_add_guard[where Q'="tcb_at' t"]) + apply (fastforce intro!: tcb_at_cross) apply (case_tac r) apply (clarsimp simp: replyFromKernel_def reply_from_kernel_def badge_register_def badgeRegister_def) @@ -3046,7 +2972,7 @@ lemma replyFromKernel_corres: apply simp apply (rule setMessageInfo_corres) apply (wp hoare_case_option_wp hoare_valid_ipc_buffer_ptr_typ_at' - | clarsimp)+ + | fastforce)+ done lemma rfk_invs': @@ -3059,7 +2985,7 @@ lemma rfk_invs': crunch nosch[wp]: replyFromKernel "\s. P (ksSchedulerAction s)" lemma completeSignal_corres: - "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and valid_objs + "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and pspace_distinct and valid_objs \ \and obj_at (\ko. ko = Notification ntfn \ Ipc_A.isActive ntfn) ntfnptr*\ ) (ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' and obj_at' isActive ntfnptr) (complete_signal ntfnptr tcb) (completeSignal ntfnptr tcb)" @@ -3084,12 +3010,12 @@ lemma completeSignal_corres: lemma doNBRecvFailedTransfer_corres: - "corres dc (tcb_at thread) - (tcb_at' thread) - (do_nbrecv_failed_transfer thread) - (doNBRecvFailedTransfer thread)" + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) \ + (do_nbrecv_failed_transfer thread) + (doNBRecvFailedTransfer thread)" unfolding do_nbrecv_failed_transfer_def doNBRecvFailedTransfer_def - by (simp add: badgeRegister_def badge_register_def, rule asUser_setRegister_corres) + by (corres corres: asUser_setRegister_corres + simp: badgeRegister_def badge_register_def)+ lemma receiveIPC_corres: assumes "is_ep_cap cap" and "cap_relation cap cap'" @@ -3174,11 +3100,11 @@ lemma receiveIPC_corres: and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3)" and P'="tcb_at' a and tcb_at' thread and cur_tcb' - and Invariants_H.valid_queues - and valid_queues' and valid_pspace' and valid_objs' - and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" in corres_guard_imp [OF corres_if]) apply (simp add: fault_rel_optionation_def) apply (rule corres_if2 [OF _ setupCallerCap_corres setThreadState_corres]) @@ -3187,17 +3113,18 @@ lemma receiveIPC_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action - | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wpsimp wp: sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action)+ + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ - apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def - valid_sched_action_def) + apply (fastforce simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def + valid_sched_action_def) apply (clarsimp split: if_split_asm) apply (clarsimp | wp do_ipc_transfer_tcb_caps)+ - apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp, erule sch_act_wf_weak) + apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp) + apply (fastforce elim: sch_act_wf_weak) apply (wp sts_st_tcb' gts_st_tcb_at | simp)+ apply (simp cong: list.case_cong) apply wp @@ -3220,16 +3147,15 @@ lemma receiveIPC_corres: apply wp+ apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp) apply simp - apply (clarsimp simp: valid_tcb_state_def) + apply (fastforce simp: valid_tcb_state_def) apply (clarsimp simp add: valid_tcb_state'_def) apply (wp get_simple_ko_wp[where f=Notification] getNotification_wp gbn_wp gbn_wp' hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_if_lift | wpc | simp add: ep_at_def2[symmetric, simplified] | clarsimp)+ - apply (clarsimp simp: valid_cap_def invs_psp_aligned invs_valid_objs pred_tcb_at_def - valid_obj_def valid_tcb_def valid_bound_ntfn_def - dest!: invs_valid_objs - elim!: obj_at_valid_objsE - split: option.splits) + apply (fastforce simp: valid_cap_def invs_psp_aligned invs_valid_objs pred_tcb_at_def + valid_obj_def valid_tcb_def valid_bound_ntfn_def + elim!: obj_at_valid_objsE + split: option.splits) apply (auto simp: valid_cap'_def invs_valid_pspace' valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def obj_at'_def projectKOs pred_tcb_at'_def dest!: invs_valid_objs' obj_at_valid_objs' @@ -3263,7 +3189,7 @@ lemma receiveSignal_corres: apply (rule setNotification_corres) apply (simp add: ntfn_relation_def) apply wp+ - apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp+) + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, fastforce+) \ \WaitingNtfn\ apply (simp add: ntfn_relation_def) apply (rule corres_guard_imp) @@ -3274,7 +3200,7 @@ lemma receiveSignal_corres: apply (simp add: ntfn_relation_def) apply wp+ apply (rule corres_guard_imp) - apply (rule doNBRecvFailedTransfer_corres, simp+) + apply (rule doNBRecvFailedTransfer_corres, fastforce+) \ \ActiveNtfn\ apply (simp add: ntfn_relation_def) apply (rule corres_guard_imp) @@ -3344,7 +3270,7 @@ lemma sendFaultIPC_corres: | wp (once) sch_act_sane_lift)+)[1] apply (rule corres_trivial, simp add: lookup_failure_map_def) apply (clarsimp simp: st_tcb_at_tcb_at split: if_split) - apply (simp add: valid_cap_def) + apply (fastforce simp: valid_cap_def) apply (clarsimp simp: valid_cap'_def inQ_def) apply auto[1] apply (clarsimp simp: lookup_failure_map_def) @@ -3362,14 +3288,16 @@ lemma gets_the_noop_corres: done lemma handleDoubleFault_corres: - "corres dc (tcb_at thread) - (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) + \ (handle_double_fault thread f ft) (handleDoubleFault thread f' ft')" + apply (rule corres_cross_over_guard[where Q="tcb_at' thread"]) + apply (fastforce intro!: tcb_at_cross) apply (simp add: handle_double_fault_def handleDoubleFault_def) apply (rule corres_guard_imp) apply (subst bind_return [symmetric], - rule corres_underlying_split [OF setThreadState_corres]) + rule corres_split[OF setThreadState_corres]) apply simp apply (rule corres_noop2) apply (simp add: exs_valid_def return_def) @@ -3378,7 +3306,7 @@ lemma handleDoubleFault_corres: apply (rule asUser_inv) apply (rule getRestartPC_inv) apply (wp no_fail_getRestartPC)+ - apply (wp|simp)+ + apply (wp|simp)+ done crunch tcb' [wp]: sendFaultIPC "tcb_at' t" (wp: crunch_wps) @@ -3418,30 +3346,6 @@ crunch sch_act_wf: setupCallerCap "\s. sch_act_wf (ksSchedulerAction s) s" (wp: crunch_wps ssa_sch_act sts_sch_act rule: sch_act_wf_lift) -lemma setCTE_valid_queues[wp]: - "\Invariants_H.valid_queues\ setCTE ptr val \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift setCTE_pred_tcb_at') - -crunch vq[wp]: cteInsert "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadCallerSlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadReplySlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -lemma setupCallerCap_vq[wp]: - "\Invariants_H.valid_queues and (\s. \p. send \ set (ksReadyQueues s p))\ - setupCallerCap send recv grant \\_. Invariants_H.valid_queues\" - apply (simp add: setupCallerCap_def) - apply (wp crunch_wps sts_valid_queues) - apply (fastforce simp: valid_queues_def obj_at'_def inQ_def) - done - -crunch vq'[wp]: setupCallerCap "valid_queues'" - (wp: crunch_wps) - lemma is_derived_ReplyCap' [simp]: "\m p g. is_derived' m p (capability.ReplyCap t False g) = (\c. \ g. c = capability.ReplyCap t True g)" @@ -3485,7 +3389,7 @@ lemma setupCallerCap_vp[wp]: declare haskell_assert_inv[wp del] lemma setupCallerCap_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender\ + "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_live_then_nonz_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3497,7 +3401,7 @@ lemma setupCallerCap_iflive[wp]: lemma setupCallerCap_ifunsafe[wp]: "\if_unsafe_then_cap' and valid_objs' and - ex_nonz_cap_to' rcvr and tcb_at' rcvr\ + ex_nonz_cap_to' rcvr and tcb_at' rcvr and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_unsafe_then_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3519,13 +3423,11 @@ lemma setupCallerCap_global_refs'[wp]: \\rv. valid_global_refs'\" unfolding setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv - apply (wp getSlotCap_cte_wp_at - | simp add: o_def unique_master_reply_cap' - | strengthen eq_imp_strg - | wp (once) getCTE_wp | clarsimp simp: cte_wp_at_ctes_of)+ - (* at setThreadState *) - apply (rule_tac Q="\_. valid_global_refs'" in hoare_post_imp, wpsimp+) - done + by (wp + | simp add: o_def unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) getCTE_wp + | wp (once) hoare_vcg_imp_lift' hoare_vcg_ex_lift | clarsimp simp: cte_wp_at_ctes_of)+ crunch valid_arch'[wp]: setupCallerCap "valid_arch_state'" (wp: hoare_drop_imps) @@ -3651,7 +3553,7 @@ lemma completeSignal_invs: completeSignal ntfnptr tcb \\_. invs'\" apply (simp add: completeSignal_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp set_ntfn_minor_invs' | wpc | simp)+ apply (rule_tac Q="\_ s. (state_refs_of' s ntfnptr = ntfn_bound_refs' (ntfnBoundTCB ntfn)) @@ -3660,7 +3562,7 @@ lemma completeSignal_invs: \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) \ ntfnptr \ ksIdleThread s" in hoare_strengthen_post) - apply ((wp hoare_vcg_ex_lift static_imp_wp | wpc | simp add: valid_ntfn'_def)+)[1] + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def projectKOs split: option.splits) apply (blast dest: ntfn_q_refs_no_bound_refs') apply wp @@ -3704,20 +3606,29 @@ crunch ctes_of[wp]: possibleSwitchTo "\s. P (ctes_of s)" lemmas possibleSwitchToTo_cteCaps_of[wp] = cteCaps_of_ctes_of_lift[OF possibleSwitchTo_ctes_of] +crunches asUser + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift wp: crunch_wps) + +crunches setupCallerCap, possibleSwitchTo, doIPCTransfer + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + (* t = ksCurThread s *) lemma ri_invs' [wp]: "\invs' and sch_act_not t and ct_in_state' simple' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s)\ receiveIPC t cap isBlocking \\_. invs'\" (is "\?pre\ _ \_\") apply (clarsimp simp: receiveIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) - apply (rule hoare_seq_ext [OF _ gbn_sp']) - apply (rule hoare_seq_ext) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ gbn_sp']) + apply (rule bind_wp) (* set up precondition for old proof *) apply (rule_tac R="ko_at' ep (capEPPtr cap) and ?pre" in hoare_vcg_if_split) apply (wp completeSignal_invs) @@ -3727,7 +3638,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) apply (wp sts_sch_act' hoare_vcg_const_Ball_lift valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' o_def) @@ -3748,7 +3659,6 @@ lemma ri_invs' [wp]: apply (clarsimp split: if_split_asm) apply (rename_tac list one two three fur five six seven eight nine ten eleven) apply (subgoal_tac "set list \ {EPRecv} \ {}") - apply (thin_tac "\a b. t \ set (ksReadyQueues one (a, b))") \ \causes slowdown\ apply (safe ; solves \auto\) apply fastforce apply fastforce @@ -3759,7 +3669,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) apply (wp sts_sch_act' valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def o_def) @@ -3783,9 +3693,8 @@ lemma ri_invs' [wp]: apply (rename_tac sender queue) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_drop_imps setEndpoint_valid_mdb' - set_ep_valid_objs' sts_st_tcb' sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ possibleSwitchTo_valid_queues - possibleSwitchTo_valid_queues' + set_ep_valid_objs' sts_st_tcb' sts_sch_act' + setThreadState_ct_not_inQ possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift setEndpoint_ksQ setEndpoint_ct' | simp add: valid_tcb_state'_def case_bool_If @@ -3803,8 +3712,6 @@ lemma ri_invs' [wp]: st_tcb_at_refs_of_rev' conj_ac split del: if_split cong: if_cong) - apply (frule_tac t=sender in valid_queues_not_runnable'_not_ksQ) - apply (erule pred_tcb'_weakenE, clarsimp) apply (subgoal_tac "sch_act_not sender s") prefer 2 apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) @@ -3838,7 +3745,6 @@ lemma ri_invs' [wp]: lemma rai_invs'[wp]: "\invs' and sch_act_not t and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s) and (\s. \ntfnptr. isNotificationCap cap @@ -3848,14 +3754,14 @@ lemma rai_invs'[wp]: receiveSignal t cap isBlocking \\_. invs'\" apply (simp add: receiveSignal_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (rename_tac ep) apply (case_tac "ntfnObj ep") \ \ep = IdleNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp valid_irq_node_lift sts_sch_act' typ_at_lifts - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def) @@ -3873,12 +3779,12 @@ lemma rai_invs'[wp]: apply (clarsimp split: if_split_asm) apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs) \ \ep = ActiveNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts static_imp_wp + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp asUser_urz | simp add: valid_ntfn'_def)+ apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) @@ -3893,7 +3799,7 @@ lemma rai_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ typ_at_lifts + setThreadState_ct_not_inQ typ_at_lifts asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+ apply (clarsimp simp: valid_tcb_state'_def) @@ -3921,7 +3827,7 @@ lemma rai_invs'[wp]: apply (auto simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def)[5] apply (fastforce simp: tcb_bound_refs'_def split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) done lemma getCTE_cap_to_refs[wp]: @@ -3950,7 +3856,6 @@ lemma cteInsert_invs_bits[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ cteInsert a b c \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ cteInsert a b c \\rv. Invariants_H.valid_queues\" "\cur_tcb'\ cteInsert a b c \\rv. cur_tcb'\" "\\s. P (state_refs_of' s)\ cteInsert a b c @@ -3975,16 +3880,19 @@ crunch irqs_masked'[wp]: possibleSwitchTo "irqs_masked'" crunch urz[wp]: possibleSwitchTo "untyped_ranges_zero'" (simp: crunch_simps unless_def wp: crunch_wps) +crunches possibleSwitchTo + for pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + lemma si_invs'[wp]: "\invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and sch_act_not t and ex_nonz_cap_to' ep and ex_nonz_cap_to' t\ sendIPC bl call ba cg cgr t ep \\rv. invs'\" supply if_split[split del] apply (simp add: sendIPC_def split del: if_split) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ get_ep_sp']) apply (case_tac epa) \ \epa = RecvEP\ apply simp @@ -3996,8 +3904,8 @@ lemma si_invs'[wp]: apply (rule_tac P="a\t" in hoare_gen_asm) apply (wp valid_irq_node_lift sts_valid_objs' set_ep_valid_objs' setEndpoint_valid_mdb' sts_st_tcb' sts_sch_act' - possibleSwitchTo_sch_act_not sts_valid_queues setThreadState_ct_not_inQ - possibleSwitchTo_ksQ' possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift sts_ksQ' + possibleSwitchTo_sch_act_not setThreadState_ct_not_inQ + possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift hoare_convert_imp [OF doIPCTransfer_sch_act doIPCTransfer_ct'] hoare_convert_imp [OF setEndpoint_nosch setEndpoint_ct'] hoare_drop_imp [where f="threadGet tcbFault t"] @@ -4051,8 +3959,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') apply (rule conjI, clarsimp elim!: obj_at'_weakenE) apply (subgoal_tac "ep \ t") @@ -4071,8 +3978,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) - apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') apply (rule conjI, clarsimp elim!: obj_at'_weakenE) apply (frule obj_at_valid_objs', clarsimp) @@ -4099,23 +4005,19 @@ lemma si_invs'[wp]: lemma sfi_invs_plus': "\invs' and st_tcb_at' simple' t and sch_act_not t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t\ - sendFaultIPC t f - \\rv. invs'\, \\rv. invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) - and sch_act_not t and (\s. ksIdleThread s \ t)\" + sendFaultIPC t f + \\_. invs'\, \\_. invs' and st_tcb_at' simple' t and sch_act_not t and (\s. ksIdleThread s \ t)\" apply (simp add: sendFaultIPC_def) apply (wp threadSet_invs_trivial threadSet_pred_tcb_no_state threadSet_cap_to' | wpc | simp)+ apply (rule_tac Q'="\rv s. invs' s \ sch_act_not t s \ st_tcb_at' simple' t s - \ (\p. t \ set (ksReadyQueues s p)) \ ex_nonz_cap_to' t s \ t \ ksIdleThread s \ (\r\zobj_refs' rv. ex_nonz_cap_to' r s)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: inQ_def pred_tcb_at') apply (wp | simp)+ @@ -4123,12 +4025,16 @@ lemma sfi_invs_plus': apply (subst(asm) global'_no_ex_cap, auto) done +crunches send_fault_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (simp: crunch_simps wp: crunch_wps) + lemma handleFault_corres: "fr f f' \ corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread and (%_. valid_fault f)) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_fault thread f) (handleFault thread f')" apply (simp add: handle_fault_def handleFault_def) @@ -4143,9 +4049,6 @@ lemma handleFault_corres: apply simp apply (rule handleDoubleFault_corres) apply wp+ - apply (rule hoare_post_impErr, rule sfi_invs_plus', simp_all)[1] - apply clarsimp - apply wp+ apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 invs_def valid_state_def valid_idle_def) apply auto @@ -4156,17 +4059,13 @@ lemma sts_invs_minor'': \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set (ksReadyQueues s p)) \ runnable' st) - and (\s. runnable' st \ obj_at' tcbQueued t s - \ st_tcb_at' runnable' t s) and (\s. \ runnable' st \ sch_act_not t s) and invs'\ setThreadState st t \\rv. invs'\" apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply clarsimp apply (rule conjI) apply fastforce @@ -4181,12 +4080,11 @@ lemma sts_invs_minor'': apply (clarsimp dest!: st_tcb_at_state_refs_ofD' elim!: rsubst[where P=sym_refs] intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply (fastforce elim!: st_tcb_ex_cap'') done lemma hf_invs' [wp]: "\invs' and sch_act_not t - and (\s. \p. t \ set(ksReadyQueues s p)) and st_tcb_at' simple' t and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ handleFault t f \\r. invs'\" @@ -4194,7 +4092,7 @@ lemma hf_invs' [wp]: apply wp apply (simp add: handleDoubleFault_def) apply (wp sts_invs_minor'' dmo_invs')+ - apply (rule hoare_post_impErr, rule sfi_invs_plus', + apply (rule hoare_strengthen_postE, rule sfi_invs_plus', simp_all) apply (strengthen no_refs_simple_strg') apply clarsimp @@ -4226,8 +4124,8 @@ lemma si_blk_makes_simple': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' simple' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4246,8 +4144,8 @@ lemma si_blk_makes_runnable': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' runnable' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4270,7 +4168,7 @@ lemma sendSignal_st_tcb'_Running: sendSignal ntfnptr bdg \\_. st_tcb_at' (\st. st = Running \ P st) t\" apply (simp add: sendSignal_def) - apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp static_imp_wp + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp | wpc | clarsimp simp: pred_tcb_at')+ done diff --git a/proof/refine/ARM/KHeap_R.thy b/proof/refine/ARM/KHeap_R.thy index a946d12c34..23123dccb1 100644 --- a/proof/refine/ARM/KHeap_R.thy +++ b/proof/refine/ARM/KHeap_R.thy @@ -14,8 +14,45 @@ lemma lookupAround2_known1: "m x = Some y \ fst (lookupAround2 x m) = Some (x, y)" by (fastforce simp: lookupAround2_char1) +lemma koTypeOf_injectKO: + fixes v :: "'a :: pspace_storable" + shows "koTypeOf (injectKO v) = koType TYPE('a)" + apply (cut_tac v1=v in iffD2 [OF project_inject, OF refl]) + apply (simp add: project_koType[symmetric]) + done + context begin interpretation Arch . (*FIXME: arch_split*) +lemma setObject_modify_variable_size: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; obj_at' (\obj. objBits v = objBits obj) p s\ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (clarsimp simp: setObject_def split_def exec_gets obj_at'_def projectKOs + lookupAround2_known1 assert_opt_def updateObject_default_def bind_assoc) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (clarsimp simp only: koTypeOf_injectKO) + apply (frule in_magnitude_check[where s'=s]) + apply blast + apply fastforce + apply (simp add: magnitudeCheck_assert in_monad bind_def gets_def oassert_opt_def + get_def return_def) + apply (simp add: simpler_modify_def) + done + +lemma setObject_modify: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; \ko. P ko \ objBits ko = objBits v \ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (rule setObject_modify_variable_size) + apply fastforce + apply fastforce + apply fastforce + unfolding obj_at'_def + by fastforce + lemma obj_at_getObject: assumes R: "\a b n ko s obj::'a::pspace_storable. @@ -116,8 +153,7 @@ lemma corres_get_tcb [corres]: apply (drule bspec) apply clarsimp apply blast - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) + apply (clarsimp simp: tcb_relation_cut_def lookupAround2_known1) done lemma lookupAround2_same1[simp]: @@ -186,7 +222,7 @@ lemma obj_at_setObject1: setObject p (v::'a::pspace_storable) \ \rv. obj_at' (\x::'a::pspace_storable. True) t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad obj_at'_def projectKOs lookupAround2_char1 project_inject @@ -208,7 +244,7 @@ lemma obj_at_setObject2: setObject p (v::'a) \ \rv. obj_at' P t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (drule R) @@ -383,6 +419,40 @@ lemma setObject_tcb_strongest: ps_clear_upd) done +method setObject_easy_cases = + clarsimp simp: setObject_def in_monad split_def valid_def lookupAround2_char1, + erule rsubst[where P=P'], rule ext, + clarsimp simp: updateObject_cte updateObject_default_def in_monad + typeError_def opt_map_def opt_pred_def projectKO_opts_defs projectKOs projectKO_eq + split: if_split_asm + Structures_H.kernel_object.split_asm + +lemma setObject_endpoint_tcbs_of'[wp]: + "setObject c (endpoint :: endpoint) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_notification_tcbs_of'[wp]: + "setObject c (notification :: notification) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedNexts_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedNexts_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedPrevs_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedPrevs_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbQueued[wp]: + "setObject c (cte :: cte) \\s. P' (tcbQueued |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + +lemma setObject_cte_inQ[wp]: + "setObject c (cte :: cte) \\s. P' (inQ d p |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + lemma getObject_obj_at': assumes x: "\q n ko. loadObject p q n ko = (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" @@ -608,8 +678,8 @@ lemma cte_wp_at_ctes_of: apply (simp add: field_simps) apply (clarsimp split: if_split_asm del: disjCI) apply (simp add: ps_clear_def3 field_simps) - apply (rule disjI2, rule exI[where x="p - (p && ~~ mask 9)"]) - apply (clarsimp simp: ps_clear_def3[where na=9] is_aligned_mask + apply (rule disjI2, rule exI[where x="p - (p && ~~ mask tcb_bits)"]) + apply (clarsimp simp: ps_clear_def3[where na=tcb_bits] is_aligned_mask word_bw_assocs field_simps) done @@ -870,7 +940,7 @@ lemma obj_relation_cut_same_type: \ (\sz sz'. a_type ko = AArch (ADeviceData sz) \ a_type ko' = AArch (ADeviceData sz'))" apply (rule ccontr) apply (simp add: obj_relation_cuts_def2 a_type_def) - apply (auto simp: other_obj_relation_def cte_relation_def + apply (auto simp: other_obj_relation_def tcb_relation_cut_def cte_relation_def pte_relation_def pde_relation_def split: Structures_A.kernel_object.split_asm if_split_asm Structures_H.kernel_object.split_asm @@ -888,6 +958,16 @@ where "exst_same' (KOTCB tcb) (KOTCB tcb') = exst_same tcb tcb'" | "exst_same' _ _ = True" +lemma tcbs_of'_non_tcb_update: + "\typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ TCBT\ + \ tcbs_of' (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = tcbs_of' s'" + by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs + split: kernel_object.splits) + +lemma typ_at'_koTypeOf: + "ko_at' ob' ptr b \ typ_at' (koTypeOf (injectKO ob')) ptr b" + by (auto simp: typ_at'_def ko_wp_at'_def obj_at'_def project_inject projectKOs) + lemma setObject_other_corres: fixes ob' :: "'a :: pspace_storable" assumes x: "updateObject ob' = updateObject_default ob'" @@ -917,7 +997,7 @@ lemma setObject_other_corres: apply (clarsimp simp add: caps_of_state_after_update cte_wp_at_after_update swp_def fun_upd_def obj_at_def) apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x=ptr in allE)+ apply (clarsimp simp: obj_at_def a_type_def @@ -927,6 +1007,14 @@ lemma setObject_other_corres: apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) apply (elim conjE) apply (frule bspec, erule domI) + apply (prop_tac "typ_at' (koTypeOf (injectKO ob')) ptr b") + subgoal + by (clarsimp simp: typ_at'_def ko_wp_at'_def obj_at'_def projectKO_opts_defs + is_other_obj_relation_type_def a_type_def other_obj_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm kernel_object.split_asm + arch_kernel_object.split_asm) + apply clarsimp apply (rule conjI) apply (rule ballI, drule(1) bspec) apply (drule domD) @@ -935,30 +1023,30 @@ lemma setObject_other_corres: apply clarsimp apply (frule_tac ko'=ko and x'=ptr in obj_relation_cut_same_type, (fastforce simp add: is_other_obj_relation_type t)+) - apply (erule disjE) - apply (simp add: is_other_obj_relation_type t) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_CapTable a_type_def) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_UserData a_type_def) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_DeviceData a_type_def) - apply (simp only: ekheap_relation_def) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (insert e) - apply atomize - apply (clarsimp simp: obj_at'_def) - apply (erule_tac x=obj in allE) - apply (clarsimp simp: projectKO_eq project_inject) - apply (case_tac ob; - simp_all add: a_type_def other_obj_relation_def etcb_relation_def - is_other_obj_relation_type t exst_same_def) - by (clarsimp simp: is_other_obj_relation_type t exst_same_def - split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits - ARM_A.arch_kernel_obj.splits)+ + apply (insert t) + apply ((erule disjE + | clarsimp simp: is_other_obj_relation_type is_other_obj_relation_type_def a_type_def)+)[1] + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (insert e) + apply atomize + apply (clarsimp simp: obj_at'_def) + apply (erule_tac x=obj in allE) + apply (clarsimp simp: projectKO_eq project_inject) + apply (case_tac ob; + simp_all add: a_type_def other_obj_relation_def etcb_relation_def + is_other_obj_relation_type t exst_same_def) + apply (clarsimp simp: is_other_obj_relation_type t exst_same_def + split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits + arch_kernel_obj.splits)+ + \ \ready_queues_relation\ + apply (prop_tac "koTypeOf (injectKO ob') \ TCBT") + subgoal + by (clarsimp simp: other_obj_relation_def; cases ob; cases "injectKO ob'"; + simp split: arch_kernel_obj.split_asm) + by (fastforce dest: tcbs_of'_non_tcb_update) lemmas obj_at_simps = obj_at_def obj_at'_def projectKOs map_to_ctes_upd_other is_other_obj_relation_type_def @@ -970,8 +1058,8 @@ lemma setEndpoint_corres [corres]: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres [corres]: @@ -979,8 +1067,8 @@ lemma setNotification_corres [corres]: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma no_fail_getNotification [wp]: @@ -1056,9 +1144,10 @@ lemma typ_at'_valid_obj'_lift: (wpsimp|rule conjI)+) apply (rename_tac tcb) apply (case_tac "tcbState tcb"; - simp add: valid_tcb'_def valid_tcb_state'_def split_def valid_bound_ntfn'_def - split: option.splits, - wpsimp) + simp add: valid_tcb'_def valid_tcb_state'_def split_def opt_tcb_at'_def + valid_bound_ntfn'_def; + wpsimp wp: hoare_case_option_wp hoare_case_option_wp2; + (clarsimp split: option.splits)?) apply (wpsimp simp: valid_cte'_def) apply (rename_tac arch_kernel_object) apply (case_tac arch_kernel_object; wpsimp) @@ -1340,32 +1429,6 @@ lemma set_ep_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma set_ep_valid_queues[wp]: - "\Invariants_H.valid_queues\ setEndpoint epptr ep \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setEndpoint_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ep_valid_queues'[wp]: - "\valid_queues'\ setEndpoint epptr ep \\rv. valid_queues'\" - apply (unfold setEndpoint_def) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: projectKOs ko_wp_at'_def) - done - lemma ct_in_state_thread_state_lift': assumes ct: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" assumes st: "\t. \st_tcb_at' P t\ f \\_. st_tcb_at' P t\" @@ -1565,34 +1628,6 @@ lemma set_ntfn_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ done -lemma set_ntfn_valid_queues[wp]: - "\Invariants_H.valid_queues\ setNotification p ntfn \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (rule hoare_pre) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setNotification_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ntfn_valid_queues'[wp]: - "\valid_queues'\ setNotification p ntfn \\rv. valid_queues'\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: projectKOs ko_wp_at'_def) - done - lemma set_ntfn_state_refs_of'[wp]: "\\s. P ((state_refs_of' s) (epptr := ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn)))\ @@ -2014,6 +2049,21 @@ lemma setNotification_ct_idle_or_in_cur_domain'[wp]: crunch gsUntypedZeroRanges[wp]: setNotification "\s. P (gsUntypedZeroRanges s)" (wp: setObject_ksPSpace_only updateObject_default_inv) +lemma sym_heap_sched_pointers_lift: + assumes prevs: "\P. f \\s. P (tcbSchedPrevs_of s)\" + assumes nexts: "\P. f \\s. P (tcbSchedNexts_of s)\" + shows "f \sym_heap_sched_pointers\" + by (rule_tac f=tcbSchedPrevs_of in hoare_lift_Pf2; wpsimp wp: assms) + +crunches setNotification + for tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: updateObject_default_def) + lemma set_ntfn_minor_invs': "\invs' and obj_at' (\ntfn. ntfn_q_refs_of' (ntfnObj ntfn) = ntfn_q_refs_of' (ntfnObj val) \ ntfn_bound_refs' (ntfnBoundTCB ntfn) = ntfn_bound_refs' (ntfnBoundTCB val)) @@ -2023,9 +2073,10 @@ lemma set_ntfn_minor_invs': and (\s. ptr \ ksIdleThread s) \ setNotification ptr val \\rv. invs'\" - apply (clarsimp simp add: invs'_def valid_state'_def cteCaps_of_def) - apply (wp irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift, - simp_all add: o_def) + apply (clarsimp simp: invs'_def valid_state'_def cteCaps_of_def) + apply (wpsimp wp: irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift + sym_heap_sched_pointers_lift valid_bitmaps_lift + simp: o_def) apply (clarsimp elim!: rsubst[where P=sym_refs] intro!: ext dest!: obj_at_state_refs_ofD')+ @@ -2076,21 +2127,21 @@ lemma valid_globals_cte_wpD': lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -2098,7 +2149,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp @@ -2111,21 +2162,17 @@ crunch typ_at'[wp]: doMachineOp "\s. P (typ_at' T p s)" lemmas doMachineOp_typ_ats[wp] = typ_at_lifts [OF doMachineOp_typ_at'] lemma doMachineOp_invs_bits[wp]: - "\valid_pspace'\ doMachineOp m \\rv. valid_pspace'\" - "\\s. sch_act_wf (ksSchedulerAction s) s\ - doMachineOp m \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ doMachineOp m \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ doMachineOp m \\rv. valid_queues'\" - "\\s. P (state_refs_of' s)\ - doMachineOp m - \\rv s. P (state_refs_of' s)\" - "\if_live_then_nonz_cap'\ doMachineOp m \\rv. if_live_then_nonz_cap'\" - "\cur_tcb'\ doMachineOp m \\rv. cur_tcb'\" - "\if_unsafe_then_cap'\ doMachineOp m \\rv. if_unsafe_then_cap'\" + "doMachineOp m \valid_pspace'\" + "doMachineOp m \\s. sch_act_wf (ksSchedulerAction s) s\" + "doMachineOp m \valid_bitmaps\" + "doMachineOp m \valid_sched_pointers\" + "doMachineOp m \\s. P (state_refs_of' s)\" + "doMachineOp m \if_live_then_nonz_cap'\" + "doMachineOp m \cur_tcb'\" + "doMachineOp m \if_unsafe_then_cap'\" by (simp add: doMachineOp_def split_def - valid_pspace'_def valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - | wp cur_tcb_lift sch_act_wf_lift tcb_in_cur_domain'_lift - | fastforce elim: state_refs_of'_pspaceI)+ + | wp + | fastforce elim: state_refs_of'_pspaceI)+ crunch cte_wp_at'[wp]: doMachineOp "\s. P (cte_wp_at' P' p s)" crunch obj_at'[wp]: doMachineOp "\s. P (obj_at' P' p s)" @@ -2148,6 +2195,29 @@ lemma setEndpoint_ct': apply (wp updateObject_default_inv | simp)+ done +lemma aligned_distinct_obj_atI': + "\ ksPSpace s x = Some ko; pspace_aligned' s; pspace_distinct' s; ko = injectKO v \ + \ ko_at' v x s" + apply (simp add: obj_at'_def projectKOs project_inject pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (clarsimp simp: bit_simps objBits_simps' word_bits_def + split: kernel_object.splits arch_kernel_object.splits) + done + +lemma aligned'_distinct'_ko_wp_at'I: + "\ksPSpace s' x = Some ko; P ko; pspace_aligned' s'; pspace_distinct' s'\ + \ ko_wp_at' P x s'" + apply (simp add: ko_wp_at'_def pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (cases ko; force) + done + +lemma aligned'_distinct'_ko_at'I: + "\ksPSpace s' x = Some ko; pspace_aligned' s'; pspace_distinct' s'; + ko = injectKO (v:: 'a :: pspace_storable)\ + \ ko_at' v x s'" + by (fastforce elim: aligned'_distinct'_ko_wp_at'I simp: obj_at'_real_def project_inject) + lemmas setEndpoint_valid_globals[wp] = valid_global_refs_lift' [OF set_ep_ctes_of set_ep_arch' setEndpoint_it setEndpoint_ksInterruptState] diff --git a/proof/refine/ARM/LevityCatch.thy b/proof/refine/ARM/LevityCatch.thy index 8a8c12a736..ef348c7f85 100644 --- a/proof/refine/ARM/LevityCatch.thy +++ b/proof/refine/ARM/LevityCatch.thy @@ -8,6 +8,7 @@ theory LevityCatch imports "BaseRefine.Include" "Lib.LemmaBucket" + "Lib.Corres_Method" begin (* Try again, clagged from Include *) @@ -39,14 +40,14 @@ lemma alignCheck_assert: lemma magnitudeCheck_inv: "\P\ magnitudeCheck x y n \\rv. P\" apply (clarsimp simp add: magnitudeCheck_def split: option.splits) - apply (wp hoare_when_wp) + apply (wp when_wp) apply simp done lemma alignCheck_inv: "\P\ alignCheck x n \\rv. P\" apply (simp add: alignCheck_def unless_def alignError_def) - apply (wp hoare_when_wp) + apply (wp when_wp) apply simp done diff --git a/proof/refine/ARM/PageTableDuplicates.thy b/proof/refine/ARM/PageTableDuplicates.thy index 3625daf8b5..2986c0ff84 100644 --- a/proof/refine/ARM/PageTableDuplicates.thy +++ b/proof/refine/ARM/PageTableDuplicates.thy @@ -103,7 +103,7 @@ lemma mapM_x_storePTE_updates: apply (induct xs) apply (simp add: mapM_x_Nil) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: storePTE_def setObject_def) apply (wp | simp add:split_def updateObject_default_def)+ @@ -399,7 +399,7 @@ lemma mapM_x_storePDE_updates: apply (induct xs) apply (simp add: mapM_x_Nil) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: storePDE_def setObject_def) apply (wp | simp add:split_def updateObject_default_def)+ @@ -1045,10 +1045,10 @@ lemma createObject_valid_duplicates'[wp]: apply (wpc | wp| simp add: ARM_H.createObject_def split del: if_split)+ apply (simp add: placeNewObject_def placeNewDataObject_def placeNewObject'_def split_def split del: if_split - | wp hoare_unless_wp[where P="d"] hoare_unless_wp[where Q=\] + | wp unless_wp[where P="d"] unless_wp[where Q=\] | wpc | simp add: alignError_def split del: if_split)+ apply (rule copyGlobalMappings_valid_duplicates') - apply ((wp hoare_unless_wp[where P="d"] hoare_unless_wp[where Q=\] | wpc + apply ((wp unless_wp[where P="d"] unless_wp[where Q=\] | wpc | simp add: alignError_def placeNewObject_def placeNewObject'_def split_def split del: if_split)+)[2] apply (intro conjI impI) @@ -1167,7 +1167,7 @@ lemma createObject_valid_duplicates'[wp]: crunch arch_inv[wp]: createNewObjects "\s. P (armKSGlobalPD (ksArchState s))" - (simp: crunch_simps zipWithM_x_mapM wp: crunch_wps hoare_unless_wp) + (simp: crunch_simps zipWithM_x_mapM wp: crunch_wps unless_wp) lemma createNewObjects_valid_duplicates'[wp]: @@ -1320,7 +1320,7 @@ lemma deleteObjects_valid_duplicates'[wp]: crunch arch_inv[wp]: resetUntypedCap "\s. P (ksArchState s)" (simp: crunch_simps - wp: hoare_drop_imps hoare_unless_wp mapME_x_inv_wp + wp: hoare_drop_imps unless_wp mapME_x_inv_wp preemptionPoint_inv ignore: freeMemory) @@ -1356,7 +1356,7 @@ lemma new_CapTable_bound: done lemma invokeUntyped_valid_duplicates[wp]: - notes hoare_whenE_wps[wp_split del] shows + notes whenE_wps[wp_split del] shows "\invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and valid_untyped_inv' ui and ct_active'\ invokeUntyped ui @@ -1370,10 +1370,10 @@ lemma invokeUntyped_valid_duplicates[wp]: apply (rule hoare_pre) apply simp apply (wp add: updateFreeIndex_pspace_no_overlap') - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule combine_validE) apply (rule_tac ui=ui in whenE_reset_resetUntypedCap_invs_etc) - apply (rule hoare_whenE_wp) + apply (rule whenE_wp) apply (rule valid_validE) apply (rule resetUntypedCap_valid_duplicates') defer @@ -1535,7 +1535,7 @@ lemma checkMappingPPtr_Section: lemma mapM_x_mapM_valid: "\ P \ mapM_x f xs \\r. Q\ \ \P\mapM f xs \\r. Q\" - apply (simp add:NonDetMonadLemmaBucket.mapM_x_mapM) + apply (simp add: mapM_x_mapM) apply (clarsimp simp:valid_def return_def bind_def) apply (drule spec) apply (erule impE) @@ -1599,7 +1599,7 @@ lemma unmapPage_valid_duplicates'[wp]: apply simp apply (wp mapM_x_mapM_valid)+ apply (wp checkMappingPPtr_inv lookupPTSlot_page_table_at')+ - apply (rule hoare_post_imp_R[OF lookupPTSlot_aligned[where sz= vmpage_size]]) + apply (rule hoare_strengthen_postE_R[OF lookupPTSlot_aligned[where sz= vmpage_size]]) apply (simp add:pageBitsForSize_def) apply (drule upto_enum_step_shift[where n = 6 and m = 2,simplified]) apply (clarsimp simp: mask_def add.commute upto_enum_step_def largePagePTEOffsets_def @@ -1613,9 +1613,9 @@ lemma unmapPage_valid_duplicates'[wp]: in mapM_x_storePDE_update_helper[where sz = 6]) apply wp+ apply (clarsimp simp:conj_comms) - apply (wp checkMappingPPtr_inv static_imp_wp)+ + apply (wp checkMappingPPtr_inv hoare_weak_lift_imp)+ apply (clarsimp simp:conj_comms) - apply (rule hoare_post_imp_R[where Q'= "\r. pspace_aligned' and + apply (rule hoare_strengthen_postE_R[where Q'= "\r. pspace_aligned' and (\s. vs_valid_duplicates' (ksPSpace s)) and K(vmsz_aligned' vptr vmpage_size \ is_aligned r pdBits) and page_directory_at' (lookup_pd_slot r vptr && ~~ mask pdBits)"]) @@ -1648,7 +1648,7 @@ lemma unmapPageTable_valid_duplicates'[wp]: \\_ s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add:unmapPageTable_def pageTableMapped_def) apply (wpsimp wp: storePDE_no_duplicates' getPDE_wp) - apply (rule hoare_post_imp_R[where Q' = "\r s. vs_valid_duplicates' (ksPSpace s)"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r s. vs_valid_duplicates' (ksPSpace s)"]) apply wp apply (clarsimp simp: ko_wp_at'_def obj_at'_real_def projectKO_opt_pde) apply (clarsimp simp: vs_entry_align_def @@ -1713,7 +1713,7 @@ lemma finaliseSlot_valid_duplicates'[wp]: \\_ s. invs' s \ vs_valid_duplicates' (ksPSpace s) \ sch_act_simple s \" unfolding finaliseSlot_def apply (rule validE_valid, rule hoare_pre, - rule hoare_post_impErr, rule use_spec) + rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where p=slot and slot=slot and Pr="vs_valid_duplicates' o ksPSpace"]) apply (simp_all add: valid_duplicates_finalise_prop_stuff) apply (wp | simp add: o_def)+ @@ -1727,7 +1727,6 @@ lemma cteDelete_valid_duplicates': apply (rule hoare_gen_asm) apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply simp apply (rule valid_validE) apply (rule hoare_post_imp[OF _ finaliseSlot_valid_duplicates']) apply simp @@ -1800,7 +1799,7 @@ lemma invokeCNode_valid_duplicates'[wp]: apply (simp add:invs_valid_objs' invs_pspace_aligned') apply (clarsimp simp add:invokeCNode_def | wp | intro conjI)+ apply (rule hoare_pre) - apply (wp hoare_unless_wp | wpc | simp)+ + apply (wp unless_wp | wpc | simp)+ apply (simp add:invokeCNode_def) apply (wp getSlotCap_inv hoare_drop_imp |simp add:locateSlot_conv getThreadCallerSlot_def @@ -1904,7 +1903,7 @@ lemma placeASIDPool_valid_duplicates'[wp]: placeNewObject' ptr (KOArch (KOASIDPool makeObject)) 0 \\rv s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add:placeNewObject'_def) - apply (wp hoare_unless_wp | wpc | + apply (wp unless_wp | wpc | simp add:alignError_def split_def)+ apply (subgoal_tac "vs_valid_duplicates' (\a. if a = ptr then Some (KOArch (KOASIDPool makeObject)) else ksPSpace s a)") apply fastforce @@ -1982,7 +1981,7 @@ lemma performArchInvocation_valid_duplicates': apply (clarsimp simp:cte_wp_at_ctes_of) apply (case_tac ctea,clarsimp) apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) - apply (wp static_imp_wp|simp)+ + apply (wp hoare_weak_lift_imp|simp)+ apply (simp add:placeNewObject_def) apply (wp |simp add:alignError_def unless_def|wpc)+ apply (wp updateFreeIndex_pspace_no_overlap' hoare_drop_imp @@ -2034,11 +2033,11 @@ lemma tc_valid_duplicates': apply (simp only: eq_commute[where a="a"]) apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp setMCPriority_invs' + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply ((simp only: simp_thms cases_simp cong: conj_cong @@ -2052,7 +2051,7 @@ lemma tc_valid_duplicates': checkCap_inv[where P="\s. vs_valid_duplicates' (ksPSpace s)"] checkCap_inv[where P=sch_act_simple] cteDelete_valid_duplicates' hoare_vcg_const_imp_lift_R typ_at_lifts[OF setPriority_typ_at'] assertDerived_wp threadSet_cte_wp_at' - hoare_vcg_all_lift_R hoare_vcg_all_lift static_imp_wp)[1] + hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_weak_lift_imp)[1] | wpc | simp add: inQ_def | wp hoare_vcg_conj_liftE1 cteDelete_invs' cteDelete_deletes hoare_vcg_const_imp_lift)+) @@ -2127,7 +2126,7 @@ crunch valid_duplicates' [wp]: crunch valid_duplicates' [wp]: tcbSchedAppend "(\s. vs_valid_duplicates' (ksPSpace s))" - (simp:crunch_simps wp:hoare_unless_wp) + (simp:crunch_simps wp:unless_wp) lemma timerTick_valid_duplicates'[wp]: "\\s. vs_valid_duplicates' (ksPSpace s)\ @@ -2156,9 +2155,8 @@ lemma activate_sch_valid_duplicates'[wp]: activateThread \\rv s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add: activateThread_def getCurThread_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext[where B="\st s. (runnable' or idle') st - \ vs_valid_duplicates' (ksPSpace s)"]) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp[where Q'="\st s. (runnable' or idle') st \ vs_valid_duplicates' (ksPSpace s)"]) apply (rule hoare_pre) apply (wp | wpc | simp add: setThreadState_runnable_simp)+ apply (clarsimp simp: ct_in_state'_def cur_tcb'_def pred_tcb_at' @@ -2170,7 +2168,7 @@ crunch valid_duplicates'[wp]: crunch valid_duplicates'[wp]: receiveIPC "\s. vs_valid_duplicates' (ksPSpace s)" -(wp: getNotification_wp gbn_wp') + (wp: getNotification_wp gbn_wp' crunch_wps) crunch valid_duplicates'[wp]: deleteCallerCap "\s. vs_valid_duplicates' (ksPSpace s)" @@ -2178,10 +2176,11 @@ crunch valid_duplicates'[wp]: crunch valid_duplicates'[wp]: handleReply "\s. vs_valid_duplicates' (ksPSpace s)" + (wp: crunch_wps) crunch valid_duplicates'[wp]: handleYield "\s. vs_valid_duplicates' (ksPSpace s)" - (ignore: threadGet simp:crunch_simps wp:hoare_unless_wp) + (ignore: threadGet simp:crunch_simps wp:unless_wp) crunch valid_duplicates'[wp]: "VSpace_H.handleVMFault", handleHypervisorFault "\s. vs_valid_duplicates' (ksPSpace s)" @@ -2211,7 +2210,7 @@ lemma handleRecv_valid_duplicates'[wp]: apply (rule_tac Q="\rv s. vs_valid_duplicates' (ksPSpace s)" - in hoare_post_impErr[rotated]) + in hoare_strengthen_postE[rotated]) apply (clarsimp simp: isCap_simps sch_act_sane_not) apply assumption @@ -2235,19 +2234,23 @@ lemma handleEvent_valid_duplicates': | wpc)+ done +(* nothing extra needed on this architecture *) +defs fastpathKernelAssertions_def: + "fastpathKernelAssertions \ \s. True" + lemma callKernel_valid_duplicates': "\invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and (\s. e \ Interrupt \ ct_running' s)\ callKernel e \\rv s. vs_valid_duplicates' (ksPSpace s)\" - apply (simp add: callKernel_def) + apply (simp add: callKernel_def fastpathKernelAssertions_def) apply (rule hoare_pre) apply (wp activate_invs' activate_sch_act schedule_sch schedule_sch_act_simple he_invs' | simp add: no_irq_getActiveIRQ | wp (once) hoare_drop_imps )+ - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule valid_validE) prefer 2 apply assumption diff --git a/proof/refine/ARM/RAB_FN.thy b/proof/refine/ARM/RAB_FN.thy index 06f88d110a..969cb775de 100644 --- a/proof/refine/ARM/RAB_FN.thy +++ b/proof/refine/ARM/RAB_FN.thy @@ -94,35 +94,35 @@ proof (induct cap capptr bits rule: resolveAddressBits.induct) apply (subst resolveAddressBits.simps, subst resolveAddressBitsFn.simps) apply (simp only: Let_def haskell_assertE_def K_bind_def) apply (rule monadic_rewrite_name_pre) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule_tac P="(=) s" in monadic_rewrite_trans) (* step 1, apply the induction hypothesis on the lhs *) apply (rule monadic_rewrite_named_if monadic_rewrite_named_bindE - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="returnOk y" for y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="x $ y" for x y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="assertE P" for P s] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="returnOk y" for y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="x $ y" for x y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="assertE P" for P s] TrueI)+ apply (rule_tac g="case nextCap of CNodeCap a b c d \ ?g nextCap cref bitsLeft - | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_imp) + | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_guard_imp) apply (wpc | rule monadic_rewrite_refl "1.hyps" | simp only: capability.case haskell_assertE_def simp_thms)+ apply (clarsimp simp: in_monad locateSlot_conv getSlotCap_def dest!: in_getCTE fst_stateAssertD) apply (fastforce elim: cte_wp_at_weakenE') - apply (rule monadic_rewrite_refl[THEN monadic_rewrite_imp], simp) + apply (rule monadic_rewrite_refl[THEN monadic_rewrite_guard_imp], simp) (* step 2, split and match based on the lhs structure *) apply (simp add: locateSlot_conv liftE_bindE unlessE_def whenE_def if_to_top_of_bindE assertE_def stateAssert_def bind_assoc assert_def if_to_top_of_bind getSlotCap_def split del: if_split cong: if_cong) - apply (rule monadic_rewrite_if_lhs monadic_rewrite_symb_exec_l'[OF get_wp] + apply (rule monadic_rewrite_if_l monadic_rewrite_symb_exec_l'[OF _ get_wp, rotated] empty_fail_get no_fail_get impI monadic_rewrite_refl get_wp | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P isCNodeCap_capUntypedPtr_capCNodePtr cong: if_cong split del: if_split)+ - apply (rule monadic_rewrite_symb_exec_l'[OF getCTE_inv _ _ _ getCTE_cte_wp_at]) + apply (rule monadic_rewrite_symb_exec_l'[OF _ getCTE_inv _ _ getCTE_cte_wp_at, rotated]) apply simp apply (rule impI, rule no_fail_getCTE) apply (simp add: monadic_rewrite_def simpler_gets_def return_def returnOk_def diff --git a/proof/refine/ARM/Refine.thy b/proof/refine/ARM/Refine.thy index 6fadade1cf..cd35abff8b 100644 --- a/proof/refine/ARM/Refine.thy +++ b/proof/refine/ARM/Refine.thy @@ -81,7 +81,7 @@ lemma typ_at_UserDataI: apply clarsimp apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def - cte_relation_def other_obj_relation_def + cte_relation_def other_obj_relation_def tcb_relation_cut_def pde_relation_def split: Structures_A.kernel_object.split_asm Structures_H.kernel_object.split_asm @@ -112,7 +112,7 @@ lemma typ_at_DeviceDataI: apply clarsimp apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def - cte_relation_def other_obj_relation_def + cte_relation_def other_obj_relation_def tcb_relation_cut_def pde_relation_def split: Structures_A.kernel_object.split_asm Structures_H.kernel_object.split_asm @@ -280,7 +280,7 @@ lemma kernel_entry_invs: thread_set_ct_running thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext - static_imp_wp + hoare_weak_lift_imp | clarsimp simp add: tcb_cap_cases_def active_from_running)+ done @@ -296,18 +296,18 @@ definition lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_sched_act: "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_invs2: @@ -401,6 +401,19 @@ abbreviation valid_domain_list' :: "'a kernel_state_scheme \ bool" w lemmas valid_domain_list'_def = valid_domain_list_2_def +lemma fastpathKernelAssertions_cross: + "\ (s,s') \ state_relation; invs s; valid_arch_state' s'\ \ fastpathKernelAssertions s'" + unfolding fastpathKernelAssertions_def + by simp + +(* this is only needed for callKernel, where we have invs' on concrete side *) +lemma corres_cross_over_fastpathKernelAssertions: + "\ \s. P s \ invs s; \s'. Q s' \ invs' s'; + corres r P (Q and fastpathKernelAssertions) f g \ \ + corres r P Q f g" + by (rule corres_cross_over_guard[where Q="Q and fastpathKernelAssertions"]) + (fastforce elim: fastpathKernelAssertions_cross)+ + defs kernelExitAssertions_def: "kernelExitAssertions s \ 0 < ksDomainTime s \ valid_domain_list' s" @@ -421,8 +434,8 @@ lemma kernelEntry_invs': (\s. 0 < ksDomainTime s) and valid_domain_list' \" apply (simp add: kernelEntry_def) apply (wp ckernel_invs callKernel_valid_duplicates' callKernel_domain_time_left - threadSet_invs_trivial threadSet_ct_running' select_wp - TcbAcc_R.dmo_invs' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' + TcbAcc_R.dmo_invs' hoare_weak_lift_imp callKernel_domain_time_left | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' valid_domain_list'_def)+ @@ -499,7 +512,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and (\s. 0 < ksDomainTime s) and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' select_wp + apply (wp device_update_invs' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -513,7 +526,7 @@ lemma doUserOp_valid_duplicates': doUserOp f tc \\_ s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add: doUserOp_def split_def) - apply (wp dmo_invs' select_wp) + apply (wp dmo_invs') apply clarsimp done @@ -552,7 +565,7 @@ lemma kernel_corres': apply simp apply (rule handleInterrupt_corres[simplified dc_def]) apply simp - apply (wp hoare_drop_imps hoare_vcg_all_lift)[1] + apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift simp: schact_is_rct_def)[1] apply simp apply (rule_tac Q="\irq s. invs' s \ (\irq'. irq = Some irq' \ @@ -561,7 +574,7 @@ lemma kernel_corres': in hoare_post_imp) apply simp apply (wp doMachineOp_getActiveIRQ_IRQ_active handle_event_valid_sched | simp)+ - apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_post_impErr) + apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_strengthen_postE) apply wpsimp+ apply (simp add: invs'_def valid_state'_def) apply (rule corres_split[OF schedule_corres]) @@ -570,9 +583,9 @@ lemma kernel_corres': schedule_invs' hoare_vcg_if_lift2 hoare_drop_imps |simp)+ apply (rule_tac Q="\_. valid_sched and invs and valid_list" and E="\_. valid_sched and invs and valid_list" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp handle_event_valid_sched hoare_vcg_imp_lift' |simp)+ - apply (clarsimp simp: active_from_running) + apply (clarsimp simp: active_from_running schact_is_rct_def) apply (clarsimp simp: active_from_running') done @@ -585,6 +598,8 @@ lemma kernel_corres: (\s. vs_valid_duplicates' (ksPSpace s))) (call_kernel event) (callKernel event)" unfolding callKernel_def K_bind_def + apply (rule corres_cross_over_fastpathKernelAssertions, blast+) + apply (rule corres_stateAssert_r) apply (rule corres_guard_imp) apply (rule corres_add_noop_lhs2) apply (simp only: bind_assoc[symmetric]) @@ -628,7 +643,7 @@ lemma entry_corres: apply (rule corres_split[OF getCurThread_corres]) apply (rule corres_split) apply simp - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp?) apply (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_set_def atcbContextSet_def) apply (clarsimp simp: tcb_cap_cases_def) @@ -640,15 +655,16 @@ lemma entry_corres: apply (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_get_def atcbContextGet_def) apply wp+ - apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, simp add: invs_def cur_tcb_def) + apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, fastforce simp: invs_def cur_tcb_def) apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) apply (wp thread_set_invs_trivial thread_set_ct_running threadSet_invs_trivial threadSet_ct_running' - select_wp thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state + schact_is_rct_def | (wps, wp threadSet_st_tcb_at2) )+ - apply (clarsimp simp: invs_def cur_tcb_def) + apply (fastforce simp: invs_def cur_tcb_def) apply (clarsimp simp: ct_in_state'_def) done diff --git a/proof/refine/ARM/Retype_R.thy b/proof/refine/ARM/Retype_R.thy index 9b92a27644..6a5ed86224 100644 --- a/proof/refine/ARM/Retype_R.thy +++ b/proof/refine/ARM/Retype_R.thy @@ -59,8 +59,6 @@ lemma objBitsKO_bounded2[simp]: by (simp add: objBits_simps' word_bits_def pageBits_def archObjSize_def pdeBits_def pteBits_def split: Structures_H.kernel_object.split arch_kernel_object.split) -declare select_singleton_is_return[simp] - definition APIType_capBits :: "ARM_H.object_type \ nat \ nat" where @@ -305,7 +303,7 @@ lemma state_relation_null_filterE: null_filter (caps_of_state t) = null_filter (caps_of_state s); null_filter' (ctes_of t') = null_filter' (ctes_of s'); pspace_relation (kheap t) (ksPSpace t'); - ekheap_relation (ekheap t) (ksPSpace t'); + ekheap_relation (ekheap t) (ksPSpace t'); ready_queues_relation t t'; ghost_relation (kheap t) (gsUserPages t') (gsCNodes t'); valid_list s; pspace_aligned' s'; pspace_distinct' s'; valid_objs s; valid_mdb s; pspace_aligned' t'; pspace_distinct' t'; @@ -991,7 +989,7 @@ lemma retype_ekheap_relation: apply (intro impI conjI) apply clarsimp apply (drule_tac x=a in bspec,force) - apply (clarsimp simp add: other_obj_relation_def split: if_split_asm) + apply (clarsimp simp add: tcb_relation_cut_def split: if_split_asm) apply (case_tac ko,simp_all) apply (clarsimp simp add: makeObjectKO_def cong: if_cong split: sum.splits Structures_H.kernel_object.splits arch_kernel_object.splits ARM_H.object_type.splits @@ -1167,6 +1165,11 @@ global_interpretation update_gs: PSpace_update_eq "update_gs ty us ptrs" context begin interpretation Arch . (*FIXME: arch_split*) +lemma ksReadyQueues_update_gs[simp]: + "ksReadyQueues (update_gs tp us addrs s) = ksReadyQueues s" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + lemma update_gs_id: "tp \ no_gs_types \ update_gs tp us addrs = id" by (simp add: no_gs_types_def update_gs_def @@ -1186,6 +1189,144 @@ lemma update_gs_simps[simp]: else ups x)" by (simp_all add: update_gs_def) +lemma retype_ksPSpace_dom_same: + fixes x v + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ksPSpace s' x = Some v \ + foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s') x + = Some v" +proof - + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + assume "ksPSpace s' x = Some v" + thus ?thesis + apply (clarsimp simp:foldr_upd_app_if[folded data_map_insert_def]) + apply (drule domI[where m = "ksPSpace s'"]) + apply (drule(1) IntI) + apply (erule_tac A = "A \ B" for A B in in_emptyE[rotated]) + apply (rule disjoint_subset[OF new_cap_addrs_subset[OF cover']]) + apply (clarsimp simp:ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + done +qed + +lemma retype_ksPSpace_None: + assumes ad: "pspace_aligned' s" "pspace_distinct' s" "pspace_bounded' s" + assumes pn: "pspace_no_overlap' ptr sz s" + assumes cover: "range_cover ptr sz (objBitsKO val + gbits) n" + shows "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" +proof - + note cover' = range_cover_rel[where sbit' = "objBitsKO val",OF cover _ refl,simplified] + show "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" + apply (drule subsetD[OF new_cap_addrs_subset [OF cover' ]]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (fastforce simp: ptr_add_def p_assoc_help) + done +qed + +lemma retype_tcbSchedPrevs_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedPrevs_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedPrevs_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_tcbSchedNexts_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedNexts_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedNexts_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_inQ: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "\d p. + inQ d p |< tcbs_of' + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = inQ d p |< tcbs_of' s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (intro allI) + apply (rule ext) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + fastforce simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm + | fastforce)+ +qed + +lemma retype_ready_queues_relation: + assumes rlqr: "ready_queues_relation s s'" + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ready_queues_relation + (s \kheap := foldr (\p. data_map_insert p (default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)\) + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\)" + using rlqr + unfolding ready_queues_relation_def Let_def + by (clarsimp simp: retype_tcbSchedNexts_of[OF vs' pn' ko cover num_r, simplified] + retype_tcbSchedPrevs_of[OF vs' pn' ko cover num_r, simplified] + retype_inQ[OF vs' pn' ko cover num_r, simplified]) + lemma retype_state_relation: notes data_map_insert_def[simp del] assumes sr: "(s, s') \ state_relation" @@ -1214,7 +1355,7 @@ lemma retype_state_relation: \ state_relation" (is "(ekheap_update (\_. ?eps) s\kheap := ?ps\, update_gs _ _ _ (s'\ksPSpace := ?ps'\)) \ state_relation") - proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) + proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) have cover':"range_cover ptr sz (objBitsKO ko) m" by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) @@ -1405,6 +1546,16 @@ lemma retype_state_relation: else cns x" in exI, simp) apply (rule_tac x=id in exI, simp)+ done + + have rdyqrel: "ready_queues_relation s s'" + using sr by (simp add: state_relation_def) + + thus "ready_queues_relation_2 (ready_queues s) (ksReadyQueues s') + (?ps' |> tcb_of' |> tcbSchedNext) (?ps' |> tcb_of' |> tcbSchedPrev) + (\d p. inQ d p |< (?ps' |> tcb_of'))" + using retype_ready_queues_relation[OF _ vs' pn' ko cover num_r] + by (clarsimp simp: ready_queues_relation_def Let_def) + qed lemma new_cap_addrs_fold': @@ -1516,7 +1667,7 @@ lemma retype_region_ext_modify_kheap_futz: done lemmas retype_region_ext_modify_kheap_futz' = - fun_cong[OF arg_cong[where f=NonDetMonad.bind, + fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] lemma foldr_upd_app_if_eta_futz: @@ -2410,7 +2561,6 @@ qed lemma other_objs_default_relation: "\ case ty of Structures_A.EndpointObject \ ko = injectKO (makeObject :: endpoint) | Structures_A.NotificationObject \ ko = injectKO (makeObject :: Structures_H.notification) - | Structures_A.TCBObject \ ko = injectKO (makeObject :: tcb) | _ \ False \ \ obj_relation_retype (default_object ty dev n) ko" apply (rule obj_relation_retype_other_obj) @@ -2431,6 +2581,13 @@ lemma other_objs_default_relation: split: Structures_A.apiobject_type.split_asm) done +lemma tcb_relation_retype: + "obj_relation_retype (default_object Structures_A.TCBObject dev n) (KOTCB makeObject)" + by (clarsimp simp: default_object_def obj_relation_retype_def tcb_relation_def default_tcb_def + makeObject_tcb makeObject_cte new_context_def newContext_def + fault_rel_optionation_def initContext_def default_arch_tcb_def newArchTCB_def + arch_tcb_relation_def objBits_simps' tcb_relation_cut_def) + lemma captable_relation_retype: "n < word_bits \ obj_relation_retype (default_object Structures_A.CapTableObject dev n) (KOCTE makeObject)" @@ -2589,7 +2746,6 @@ lemmas object_splits = declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -3158,10 +3314,10 @@ proof (intro conjI impI) apply (rule_tac ptr="x + xa" in cte_wp_at_tcbI', assumption+) apply fastforce apply simp - apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound user_context) - apply (case_tac thread_state, simp_all add: valid_tcb_state'_def - valid_bound_ntfn'_def obj_at_disj' - split: option.splits)[2] + apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound tcbprev tcbnext user_context) + apply (case_tac thread_state, simp_all add: valid_tcb_state'_def valid_bound_tcb'_def + valid_bound_ntfn'_def obj_at_disj' opt_tcb_at'_def + split: option.splits)[4] apply (simp add: valid_cte'_def) apply (frule pspace_alignedD' [OF _ ad(1)]) apply (frule pspace_distinctD' [OF _ ad(2)]) @@ -3441,7 +3597,7 @@ lemma createObjects_orig_cte_wp_at2': apply (rule handy_prop_divs) apply (wp createObjects_orig_obj_at2'[where sz = sz], simp) apply (simp add: tcb_cte_cases_def) - including no_pre + including classic_wp_pre apply (wp handy_prop_divs createObjects_orig_obj_at2'[where sz = sz] | simp add: o_def cong: option.case_cong)+ done @@ -3462,7 +3618,7 @@ lemma createNewCaps_cte_wp_at2: \ pspace_no_overlap' ptr sz s\ createNewCaps ty ptr n objsz dev \\rv s. P (cte_wp_at' P' p s)\" - including no_pre + including classic_wp_pre apply (simp add: createNewCaps_def createObjects_def ARM_H.toAPIType_def split del: if_split) apply (case_tac ty; simp add: createNewCaps_def createObjects_def Arch_createNewCaps_def @@ -3856,16 +4012,6 @@ lemma sch_act_wf_lift_asm: apply auto done -lemma valid_queues_lift_asm': - assumes tat: "\d p t. \\s. \ obj_at' (inQ d p) t s \ Q d p s\ f \\_ s. \ obj_at' (inQ d p) t s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\\s. valid_queues' s \ (\d p. Q d p s)\ f \\_. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - tat prq) - apply simp - done - lemma createObjects'_ct[wp]: "\\s. P (ksCurThread s)\ createObjects' p n v us \\rv s. P (ksCurThread s)\" by (rule createObjects_pspace_only, simp) @@ -4114,7 +4260,7 @@ lemma createNewCaps_idle'[wp]: apply (rename_tac apiobject_type) apply (case_tac apiobject_type, simp_all split del: if_split)[1] apply (wp, simp) - including no_pre + including classic_wp_pre apply (wp mapM_x_wp' createObjects_idle' threadSet_idle' @@ -4322,7 +4468,7 @@ lemma createNewCaps_pde_mappings'[wp]: lemma createObjects'_irq_states' [wp]: "\valid_irq_states'\ createObjects' a b c d \\_. valid_irq_states'\" apply (simp add: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc|simp add: alignError_def)+ + apply (wp unless_wp|wpc|simp add: alignError_def)+ apply fastforce done @@ -4334,34 +4480,152 @@ crunch ksMachine[wp]: createObjects "\s. P (ksMachineState s)" crunch cur_domain[wp]: createObjects "\s. P (ksCurDomain s)" (simp: unless_def) -lemma createNewCaps_valid_queues': - "\valid_queues' and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues'\" - apply (wp valid_queues_lift_asm' [OF createNewCaps_obj_at2]) - apply (clarsimp simp: projectKOs) - apply (simp add: makeObjectKO_def - split: object_type.split_asm - apiobject_type.split_asm) - apply (clarsimp simp: inQ_def) - apply (auto simp: makeObject_tcb - split: object_type.splits apiobject_type.splits) +lemma createObjects_valid_bitmaps: + "createObjects' ptr n val gbits \valid_bitmaps\" + apply (clarsimp simp: createObjects'_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def + bitmapQ_no_L1_orphans_def) done -lemma createNewCaps_valid_queues: - "\valid_queues and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues\" -apply (rule hoare_gen_asm) -apply (wp valid_queues_lift_asm createNewCaps_obj_at2[where sz=sz]) -apply (clarsimp simp: projectKO_opts_defs) -apply (simp add: inQ_def) -apply (wp createNewCaps_pred_tcb_at'[where sz=sz] | simp)+ -done +lemma valid_bitmaps_gsCNodes_update[simp]: + "valid_bitmaps (gsCNodes_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_gsUserPages_update[simp]: + "valid_bitmaps (gsUserPages_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +crunches curDomain, copyGlobalMappings + for valid_bitmaps[wp]: valid_bitmaps + and sched_pointers[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps valid_bitmaps_lift) + +lemma createNewCaps_valid_bitmaps: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_bitmaps s\ + createNewCaps ty ptr n us dev + \\_. valid_bitmaps\" + unfolding createNewCaps_def + apply (clarsimp simp: ARM_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_bitmaps) + by (wpsimp wp: createObjects_valid_bitmaps[simplified o_def] mapM_x_wp + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ + +lemma createObjects_sched_queues: + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True) + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + (is "\ \s. _ \ _ \ ?Pre s \ _ \\_. _\") +proof (rule hoare_grab_asm)+ + assume not_0: "\ n = 0" + and cover: "range_cover ptr sz ((objBitsKO val) + gbits) n" + then show + "\\s. ?Pre s\ createObjects' ptr n val gbits \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + proof - + have shiftr_not_zero:" 1 \ ((of_nat n)::machine_word) << gbits" + using range_cover_not_zero_shift[OF not_0 cover,where gbits = gbits] + by (simp add:word_le_sub1) + show ?thesis + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: shiftL_nat data_map_insert_def[symmetric] + new_cap_addrs_fold'[OF shiftr_not_zero] + simp del: data_map_insert_def) + using range_cover.unat_of_nat_n_shift[OF cover, where gbits=gbits, simplified] + apply (clarsimp simp: foldr_upd_app_if) + apply (rule_tac a="tcbSchedNexts_of s" and b="tcbSchedPrevs_of s" + in rsubst2[rotated, OF sym sym, where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp simp: projectKOs split: kernel_object.splits option.splits) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp simp: projectKOs split: kernel_object.splits option.splits) + apply simp + done + qed +qed + +crunches doMachineOp + for sched_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + +lemma createNewCaps_sched_queues: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + assumes not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + createNewCaps ty ptr n us dev + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + unfolding createNewCaps_def + apply (clarsimp simp: ARM_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (insert cover not_0) + apply (wpsimp wp: mapM_x_wp' createObjects_sched_queues + simp: curDomain_def) + by (wpsimp wp: mapM_x_wp' createObjects_sched_queues[simplified o_def] + threadSet_sched_pointers + | simp add: objBitsKO_def APIType_capBits_def valid_pspace'_def makeObject_tcb + objBits_def pageBits_def archObjSize_def createObjects_def + pt_bits_def ptBits_def pteBits_def pdBits_def pdeBits_def word_size_bits_def + | intro conjI impI)+ + +lemma createObjects_valid_sched_pointers: + "\\s. valid_sched_pointers s + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True)\ + createObjects' ptr n val gbits + \\_. valid_sched_pointers\" + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_sched_pointers_def foldr_upd_app_if opt_pred_def opt_map_def comp_def) + apply (cases "tcb_of' val"; clarsimp simp: projectKOs) + done + +lemma createNewCaps_valid_sched_pointers: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_sched_pointers s\ + createNewCaps ty ptr n us dev + \\_. valid_sched_pointers\" + unfolding createNewCaps_def + apply (clarsimp simp: ARM_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_sched_pointers) + by (wpsimp wp: createObjects_valid_sched_pointers[simplified o_def] mapM_x_wp + threadSet_valid_sched_pointers + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ lemma mapM_x_threadSet_valid_pspace: "\valid_pspace' and K (curdom \ maxDomain)\ @@ -4532,7 +4796,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def projectKOs st_tcb_at'_def) done qed @@ -4617,7 +4881,7 @@ lemma createObjects_null_filter': createObjects' ptr n val gbits \\addrs a. P (null_filter' (ctes_of a))\" apply (clarsimp simp: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc + apply (wp unless_wp|wpc | clarsimp simp:haskell_assert_def alignError_def split del: if_splits simp del:fun_upd_apply)+ apply (subst new_cap_addrs_fold') @@ -4761,12 +5025,13 @@ proof (rule hoare_gen_asm, erule conjE) createNewCaps_valid_arch_state valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createNewCaps_obj_at'] createNewCaps_irq_handlers' createNewCaps_vms - createNewCaps_valid_queues - createNewCaps_valid_queues' createNewCaps_pred_tcb_at' cnc_ct_not_inQ createNewCaps_ct_idle_or_in_cur_domain' createNewCaps_sch_act_wf createNewCaps_urz[where sz=sz] + createNewCaps_sched_queues[OF cover not_0] + createNewCaps_valid_sched_pointers + createNewCaps_valid_bitmaps | simp)+ using not_0 apply (clarsimp simp: valid_pspace'_def) @@ -4812,35 +5077,6 @@ lemma createObjects_sch: apply (wp sch_act_wf_lift_asm createObjects_pred_tcb_at' createObjects_orig_obj_at3 | force)+ done -lemma createObjects_queues: - "\\s. valid_queues s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues\" - apply (wp valid_queues_lift_asm [unfolded pred_conj_def, OF createObjects_orig_obj_at3] - createObjects_pred_tcb_at' [unfolded pred_conj_def]) - apply fastforce - apply wp+ - apply fastforce - done - -lemma createObjects_queues': - assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" - shows - "\\s. valid_queues' s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues'\" - apply (simp add: createObjects_def) - apply (wp valid_queues_lift_asm') - apply (wp createObjects_orig_obj_at2') - apply clarsimp - apply assumption - apply wp - apply (clarsimp simp: no_tcb split: option.splits) - apply fastforce - done - lemma createObjects_no_cte_ifunsafe': assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" @@ -5090,7 +5326,7 @@ proof - apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def,wp createObjects_valid_pspace_untyped') apply (wp assms | simp add: objBits_def)+ - apply (wp createObjects_sch createObjects_queues) + apply (wp createObjects_sch) apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_state_refs_of'') @@ -5101,8 +5337,7 @@ proof - createObjects_idle' createObjects_no_cte_valid_global createObjects_valid_arch createObjects_irq_state createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] - assms | simp add: objBits_def )+ + assms | simp add: objBits_def)+ apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_idle') @@ -5110,7 +5345,25 @@ proof - createObjects_idle' createObjects_no_cte_valid_global createObjects_valid_arch createObjects_irq_state createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] assms + assms + createObjects_pspace_domain_valid co_ct_not_inQ + createObjects_ct_idle_or_in_cur_domain' + createObjects_untyped_ranges_zero'[OF moKO] + | simp)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_sched_queues) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_sched_pointers) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_bitmaps) + apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift + createObjects_idle' createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers createObjects_cur' + assms createObjects_pspace_domain_valid co_ct_not_inQ createObjects_ct_idle_or_in_cur_domain' createObjects_untyped_ranges_zero'[OF moKO] @@ -5118,7 +5371,8 @@ proof - apply clarsimp apply ((intro conjI; assumption?); simp add: valid_pspace'_def objBits_def) apply (fastforce simp add: no_cte no_tcb split_def split: option.splits) - apply (clarsimp simp: invs'_def no_tcb valid_state'_def no_cte split: option.splits) + apply (auto simp: invs'_def no_tcb valid_state'_def no_cte + split: option.splits kernel_object.splits) done qed @@ -5155,7 +5409,7 @@ lemma gcd_corres: "corres (=) \ \ (gets cur_domain) curDomain" lemma retype_region2_extra_ext_mapM_x_corres: shows "corres dc (valid_etcbs and (\s. \addr\set addrs. tcb_at addr s)) - (\s. \addr\set addrs. tcb_at' addr s) + (\s. \addr\set addrs. obj_at' (Not \ tcbQueued) addr s) (retype_region2_extra_ext addrs Structures_A.apiobject_type.TCBObject) (mapM_x (\addr. do cdom \ curDomain; threadSet (tcbDomain_update (\_. cdom)) addr @@ -5166,7 +5420,7 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (rule corres_split_eqr[OF gcd_corres]) apply (rule_tac S="Id \ {(x, y). x \ set addrs}" and P="\s. (\t \ set addrs. tcb_at t s) \ valid_etcbs s" - and P'="\s. \t \ set addrs. tcb_at' t s" + and P'="\s. \t \ set addrs. obj_at' (Not \ tcbQueued) t s" in corres_mapM_x) apply simp apply (rule corres_guard_imp) @@ -5174,8 +5428,10 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (case_tac tcb') apply simp apply fastforce - apply fastforce + apply (fastforce simp: obj_at'_def) apply (wp hoare_vcg_ball_lift | simp)+ + apply (clarsimp simp: obj_at'_def) + apply fastforce apply auto[1] apply (wp | simp add: curDomain_def)+ done @@ -5207,10 +5463,11 @@ lemma retype_region2_obj_at: apply (auto simp: obj_at_def default_object_def is_tcb_def) done -lemma createObjects_tcb_at': +lemma createObjects_Not_tcbQueued: "\range_cover ptr sz (objBitsKO (injectKOS (makeObject::tcb))) n; n \ 0\ \ \\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ - createObjects ptr n (KOTCB makeObject) 0 \\ptrs s. \addr\set ptrs. tcb_at' addr s\" + createObjects ptr n (KOTCB makeObject) 0 + \\ptrs s. \addr\set ptrs. obj_at' (Not \ tcbQueued) addr s\" apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where val = "(makeObject :: tcb)"]]) apply (auto simp: obj_at'_def projectKOs project_inject objBitsKO_def objBits_def makeObject_tcb) done @@ -5286,7 +5543,7 @@ lemma corres_retype_region_createNewCaps: apply (rule corres_retype[where 'a = tcb], simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def makeObjectKO_def - other_objs_default_relation)[1] + tcb_relation_retype)[1] apply (fastforce simp: range_cover_def) apply (rule corres_split_nor) apply (simp add: APIType_map2_def) @@ -5297,7 +5554,7 @@ lemma corres_retype_region_createNewCaps: apply wp apply wp apply ((wp retype_region2_obj_at | simp add: APIType_map2_def)+)[1] - apply ((wp createObjects_tcb_at'[where sz=sz] | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] + apply ((wp createObjects_Not_tcbQueued[where sz=sz] | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] apply simp apply simp apply (subst retype_region2_extra_ext_trivial) @@ -5331,7 +5588,7 @@ lemma corres_retype_region_createNewCaps: \ \CapTable\ apply (subst retype_region2_extra_ext_trivial) apply (simp add: APIType_map2_def) - apply (subst bind_assoc_reverse[of "createObjects y n (KOCTE makeObject) us"]) + apply (subst bind_assoc_return_reverse[of "createObjects y n (KOCTE makeObject) us"]) apply (subst liftM_def [of "map (\addr. capability.CNodeCap addr us 0 0)", symmetric]) apply simp diff --git a/proof/refine/ARM/Schedule_R.thy b/proof/refine/ARM/Schedule_R.thy index 89845ba426..c3b5710272 100644 --- a/proof/refine/ARM/Schedule_R.thy +++ b/proof/refine/ARM/Schedule_R.thy @@ -10,16 +10,11 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] -lemma invs_no_cicd'_queues: - "invs_no_cicd' s \ valid_queues s" - unfolding invs_no_cicd'_def - by simp - lemma corres_if2: "\ G = G'; G \ corres r P P' a c; \ G' \ corres r Q Q' b d \ \ corres r (if G then P else Q) (if G' then P' else Q') (if G then a else b) (if G' then c else d)" @@ -41,7 +36,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -72,17 +67,21 @@ lemma arch_switchToThread_corres: and valid_vs_lookup and valid_global_objs and unique_table_refs o caps_of_state and st_tcb_at runnable t) - (valid_arch_state' and valid_pspace' and st_tcb_at' runnable' t) + (valid_arch_state' and st_tcb_at' runnable' t and no_0_obj') (arch_switch_to_thread t) (Arch.switchToThread t)" + apply (rule_tac Q="tcb_at t" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) apply (simp add: arch_switch_to_thread_def ARM_H.switchToThread_def) apply (rule corres_guard_imp) apply (rule corres_underlying_split [OF setVMRoot_corres]) apply (rule corres_machine_op[OF corres_rel_imp]) apply (rule corres_underlying_trivial) apply (simp add: ARM.clearExMonitor_def | wp)+ - apply clarsimp - apply (erule st_tcb_at_tcb_at) - apply (clarsimp simp: valid_pspace'_def) + apply clarsimp done lemma schedule_choose_new_thread_sched_act_rct[wp]: @@ -90,355 +89,279 @@ lemma schedule_choose_new_thread_sched_act_rct[wp]: unfolding schedule_choose_new_thread_def by wp +\ \This proof shares many similarities with the proof of @{thm tcbSchedEnqueue_corres}\ lemma tcbSchedAppend_corres: - notes trans_state_update'[symmetric, simp del] - shows - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues and valid_queues') (tcb_sched_action (tcb_sched_append) t) (tcbSchedAppend t)" - apply (simp only: tcbSchedAppend_def tcb_sched_action_def) - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply wp+ - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def) - - apply (subgoal_tac "tcb_sched_append t (ready_queues a (tcb_domain y) (tcb_priority y)) - = (ready_queues a (tcb_domain y) (tcb_priority y))") - apply (simp add: state_relation_def ready_queues_relation_def) - apply (clarsimp simp: tcb_sched_append_def state_relation_def - valid_queues'_def ready_queues_relation_def - ekheap_relation_def etcb_relation_def - obj_at'_def inQ_def projectKO_eq project_inject) - apply (drule_tac x=t in bspec,clarsimp) + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_append tcb_ptr) (tcbSchedAppend tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_append_def get_tcb_queue_def + tcbSchedAppend_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def projectKOs opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce apply clarsimp - apply (clarsimp simp: unless_def when_def cong: if_cong) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_append_def) - apply (intro conjI impI) - apply (rule corres_guard_imp) - apply (rule setQueue_corres) - prefer 3 - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply simp - apply simp - apply (rule corres_split_noop_rhs2) - apply (rule addToBitmap_if_null_noop_corres) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply wp+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - projectKO_eq project_inject) - done - - -crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue - for valid_pspace'[wp]: valid_pspace' - and valid_arch_state'[wp]: valid_arch_state' - and pred_tcb_at'[wp]: "pred_tcb_at' proj P t" - (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) - -lemma removeFromBitmap_valid_queues_no_bitmap_except[wp]: -" \ valid_queues_no_bitmap_except t \ - removeFromBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding bitmapQ_defs valid_queues_no_bitmap_except_def - by (wp | clarsimp simp: bitmap_fun_defs)+ - -lemma removeFromBitmap_bitmapQ: - "\ \s. True \ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" - unfolding bitmapQ_defs bitmap_fun_defs - by (wpsimp simp: bitmap_fun_defs wordRadix_def) - -lemma removeFromBitmap_valid_bitmapQ[wp]: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \ bitmapQ d p s \ ksReadyQueues s (d,p) = []) \" - by (rule hoare_pre) - (wp removeFromBitmap_valid_queues_no_bitmap_except removeFromBitmap_valid_bitmapQ_except - removeFromBitmap_bitmapQ, simp) - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed - -(* this should be the actual weakest precondition to establish valid_queues - under tagging a thread as not queued *) -lemma threadSet_valid_queues_dequeue_wp: - "\ valid_queues_no_bitmap_except t and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \d p. t \ set (ksReadyQueues s (d,p))) \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues \" - unfolding threadSet_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def valid_queues_no_bitmap_def) - done + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) -(* FIXME move *) -lemmas obj_at'_conjI = obj_at_conj' - -lemma setQueue_valid_queues_no_bitmap_except_dequeue_wp: - "\d p ts t. - \ \s. valid_queues_no_bitmap_except t s \ - (\t' \ set ts. obj_at' (inQ d p and runnable' \ tcbState) t' s) \ - t \ set ts \ distinct ts \ p \ maxPriority \ d \ maxDomain \ - setQueue d p ts - \\rv. valid_queues_no_bitmap_except t \" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by wp force - -definition (* if t is in a queue, it should be tagged with right priority and domain *) - "correct_queue t s \ \d p. t \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s)" - -lemma valid_queues_no_bitmap_correct_queueI[intro]: - "valid_queues_no_bitmap s \ correct_queue t s" - unfolding correct_queue_def valid_queues_no_bitmap_def - by (fastforce simp: obj_at'_def inQ_def) - - -lemma tcbSchedDequeue_valid_queues_weak: - "\ valid_queues_no_bitmap_except t and valid_bitmapQ and - bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - correct_queue t and - obj_at' (\tcb. tcbDomain tcb \ maxDomain \ tcbPriority tcb \ maxPriority) t \ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" -proof - - show ?thesis - unfolding tcbSchedDequeue_def null_def valid_queues_def - apply wp (* stops on threadSet *) - apply (rule hoare_post_eq[OF _ threadSet_valid_queues_dequeue_wp], - simp add: valid_queues_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ - apply (wp hoare_vcg_imp_lift setQueue_valid_queues_no_bitmap_except_dequeue_wp - setQueue_valid_bitmapQ threadGet_const_tcb_at)+ - (* wp done *) - apply (normalise_obj_at') - apply (clarsimp simp: correct_queue_def) - apply (normalise_obj_at') - apply (fastforce simp add: valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def elim: obj_at'_weaken)+ - done -qed + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) -lemma tcbSchedDequeue_valid_queues: - "\Invariants_H.valid_queues - and obj_at' (\tcb. tcbDomain tcb \ maxDomain) t - and obj_at' (\tcb. tcbPriority tcb \ maxPriority) t\ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" - apply (rule hoare_pre, rule tcbSchedDequeue_valid_queues_weak) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def) - done - -lemma tcbSchedAppend_valid_queues'[wp]: - (* most of this is identical to tcbSchedEnqueue_valid_queues' in TcbAcc_R *) - "\valid_queues' and tcb_at' t\ tcbSchedAppend t \\_. valid_queues'\" - apply (simp add: tcbSchedAppend_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueueAppend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: obj_at'_def projectKOs) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp simp: setQueue_def tcbQueueAppend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def projectKOs) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply fast + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def projectKOs opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; clarsimp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def projectKOs valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -crunch norq[wp]: threadSet "\s. P (ksReadyQueues s)" - (simp: updateObject_default_def) - -lemma threadSet_valid_queues'_dequeue: (* threadSet_valid_queues' is too weak for dequeue *) - "\\s. (\d p t'. obj_at' (inQ d p) t' s \ t' \ t \ t' \ set (ksReadyQueues s (d, p))) \ - obj_at' (inQ d p) t s \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues' \" - unfolding valid_queues'_def - apply (rule hoare_pre) - apply (wp hoare_vcg_all_lift) - apply (simp only: imp_conv_disj not_obj_at') - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (simp add: not_obj_at') - apply (clarsimp simp: typ_at_tcb') - apply normalise_obj_at' - apply (fastforce elim: obj_at'_weaken simp: inQ_def) - done - -lemma setQueue_ksReadyQueues_lift: - "\ \s. P (s\ksReadyQueues := (ksReadyQueues s)((d, p) := ts)\) ts \ - setQueue d p ts - \ \_ s. P s (ksReadyQueues s (d,p))\" - unfolding setQueue_def - by (wp, clarsimp simp: fun_upd_def snd_def) - -lemma tcbSchedDequeue_valid_queues'[wp]: - "\valid_queues' and tcb_at' t\ - tcbSchedDequeue t \\_. valid_queues'\" - unfolding tcbSchedDequeue_def - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - prefer 2 - apply (wp threadGet_const_tcb_at) - apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply clarsimp + apply (drule_tac x="the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))" + in spec) + subgoal by (auto simp: in_opt_pred opt_map_red projectKOs) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: projectKOs inQ_def fun_upd_apply split: if_splits) + apply (case_tac "t = the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: projectKOs inQ_def opt_pred_def fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ apply clarsimp - apply (rename_tac queued) - apply (case_tac queued, simp_all) - apply wp - apply (rule_tac d=tdom and p=prio in threadSet_valid_queues'_dequeue) - apply (rule hoare_pre_post, assumption) - apply (wp | clarsimp simp: bitmap_fun_defs)+ - apply (wp hoare_vcg_all_lift setQueue_ksReadyQueues_lift) - apply clarsimp - apply (wp threadGet_obj_at' threadGet_const_tcb_at)+ - apply clarsimp - apply (rule context_conjI, clarsimp simp: obj_at'_def) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def|wp)+ + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def projectKOs) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def projectKOs) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def projectKOs) + apply (intro conjI; clarsimp) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply opt_map_def obj_at'_def projectKOs + queue_end_valid_def prev_queue_head_def + split: if_splits option.splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_append[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def opt_map_def obj_at'_def projectKOs + split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def fun_upd_apply queue_end_valid_def obj_at'_def projectKOs + split: if_splits) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply split: if_splits) + by (clarsimp simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def + obj_at'_def projectKOs + split: if_splits) + +lemma tcbQueueAppend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + apply (clarsimp simp: tcbQueueEmpty_def valid_bound_tcb'_def split: option.splits) + done + +lemma tcbSchedAppend_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_objs'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: threadSet_valid_objs' threadGet_wp hoare_vcg_all_lift) + apply (normalise_obj_at', rename_tac tcb "end") + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: tcbQueueEmpty_def obj_at'_def) done -crunch tcb_at'[wp]: tcbSchedEnqueue "tcb_at' t" - (simp: unless_def) -crunch tcb_at'[wp]: tcbSchedAppend "tcb_at' t" - (simp: unless_def) -crunch tcb_at'[wp]: tcbSchedDequeue "tcb_at' t" - -crunch state_refs_of'[wp]: tcbSchedEnqueue "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps unless_def) -crunch state_refs_of'[wp]: tcbSchedAppend "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps unless_def) -crunch state_refs_of'[wp]: tcbSchedDequeue "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps) +crunches tcbSchedAppend, tcbSchedDequeue + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) -crunch cap_to'[wp]: tcbSchedEnqueue "ex_nonz_cap_to' p" - (simp: unless_def) -crunch cap_to'[wp]: tcbSchedAppend "ex_nonz_cap_to' p" - (simp: unless_def) -crunch cap_to'[wp]: tcbSchedDequeue "ex_nonz_cap_to' p" +(* FIXME move *) +lemmas obj_at'_conjI = obj_at_conj' -crunch iflive'[wp]: setQueue if_live_then_nonz_cap' +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for tcb_at'[wp]: "tcb_at' t" + and cap_to'[wp]: "ex_nonz_cap_to' p" + and ifunsafe'[wp]: if_unsafe_then_cap' + (wp: crunch_wps simp: crunch_simps) lemma tcbSchedAppend_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedAppend tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedAppend_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_if_live_then_nonz_cap' threadGet_wp simp: bitmap_fun_defs) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def st_tcb_at'_def obj_at'_def projectKOs runnable_eq_active') + apply (clarsimp simp: tcbQueueEmpty_def) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ko_wp_at'_def inQ_def obj_at'_def projectKOs tcbQueueEmpty_def) done lemma tcbSchedDequeue_iflive'[wp]: - "\if_live_then_nonz_cap'\ tcbSchedDequeue tcb \\_. if_live_then_nonz_cap'\" + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. if_live_then_nonz_cap'\" apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply ((wp | clarsimp simp: bitmap_fun_defs)+)[1] (* deal with removeFromBitmap *) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp, fastforce) - apply (wp | simp add: crunch_simps)+ - done - -crunch ifunsafe'[wp]: tcbSchedEnqueue if_unsafe_then_cap' - (simp: unless_def) -crunch ifunsafe'[wp]: tcbSchedAppend if_unsafe_then_cap' - (simp: unless_def) -crunch ifunsafe'[wp]: tcbSchedDequeue if_unsafe_then_cap' - -crunch idle'[wp]: tcbSchedEnqueue valid_idle' - (simp: crunch_simps unless_def) -crunch idle'[wp]: tcbSchedAppend valid_idle' - (simp: crunch_simps unless_def) -crunch idle'[wp]: tcbSchedDequeue valid_idle' - (simp: crunch_simps) - -crunch global_refs'[wp]: tcbSchedEnqueue valid_global_refs' - (wp: threadSet_global_refs simp: unless_def) -crunch global_refs'[wp]: tcbSchedAppend valid_global_refs' - (wp: threadSet_global_refs simp: unless_def) -crunch global_refs'[wp]: tcbSchedDequeue valid_global_refs' - (wp: threadSet_global_refs) - -crunch irq_node'[wp]: tcbSchedEnqueue "\s. P (irq_node' s)" - (simp: unless_def) -crunch irq_node'[wp]: tcbSchedAppend "\s. P (irq_node' s)" - (simp: unless_def) -crunch irq_node'[wp]: tcbSchedDequeue "\s. P (irq_node' s)" - -crunch typ_at'[wp]: tcbSchedEnqueue "\s. P (typ_at' T p s)" - (simp: unless_def) -crunch typ_at'[wp]: tcbSchedAppend "\s. P (typ_at' T p s)" - (simp: unless_def) -crunch typ_at'[wp]: tcbSchedDequeue "\s. P (typ_at' T p s)" - -crunch ctes_of[wp]: tcbSchedEnqueue "\s. P (ctes_of s)" - (simp: unless_def) -crunch ctes_of[wp]: tcbSchedAppend "\s. P (ctes_of s)" - (simp: unless_def) -crunch ctes_of[wp]: tcbSchedDequeue "\s. P (ctes_of s)" - -crunch ksInterrupt[wp]: tcbSchedEnqueue "\s. P (ksInterruptState s)" - (simp: unless_def) -crunch ksInterrupt[wp]: tcbSchedAppend "\s. P (ksInterruptState s)" - (simp: unless_def) -crunch ksInterrupt[wp]: tcbSchedDequeue "\s. P (ksInterruptState s)" - -crunch irq_states[wp]: tcbSchedEnqueue valid_irq_states' - (simp: unless_def) -crunch irq_states[wp]: tcbSchedAppend valid_irq_states' - (simp: unless_def) -crunch irq_states[wp]: tcbSchedDequeue valid_irq_states' - -crunch ct'[wp]: tcbSchedEnqueue "\s. P (ksCurThread s)" - (simp: unless_def) -crunch ct'[wp]: tcbSchedAppend "\s. P (ksCurThread s)" - (simp: unless_def) -crunch ct'[wp]: tcbSchedDequeue "\s. P (ksCurThread s)" - -crunch pde_mappings'[wp]: tcbSchedEnqueue "valid_pde_mappings'" - (simp: unless_def) -crunch pde_mappings'[wp]: tcbSchedAppend "valid_pde_mappings'" - (simp: unless_def) -crunch pde_mappings'[wp]: tcbSchedDequeue "valid_pde_mappings'" + apply (wpsimp wp: tcbQueueRemove_if_live_then_nonz_cap' threadGet_wp) + apply (fastforce elim: if_live_then_nonz_capE' simp: obj_at'_def projectKOs ko_wp_at'_def) + done + +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for typ_at'[wp]: "\s. P (typ_at' T p s)" + and tcb_at'[wp]: "tcb_at' t" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksInterrupt[wp]: "\s. P (ksInterruptState s)" + and irq_states[wp]: valid_irq_states' + and irq_node'[wp]: "\s. P (irq_node' s)" + and ct'[wp]: "\s. P (ksCurThread s)" + and global_refs'[wp]: valid_global_refs' + and ifunsafe'[wp]: if_unsafe_then_cap' + and cap_to'[wp]: "ex_nonz_cap_to' p" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and idle'[wp]: valid_idle' + and valid_pde_mappings'[wp]: valid_pde_mappings' + (simp: unless_def crunch_simps wp: crunch_wps) lemma tcbSchedEnqueue_vms'[wp]: "\valid_machine_state'\ tcbSchedEnqueue t \\_. valid_machine_state'\" @@ -446,9 +369,6 @@ lemma tcbSchedEnqueue_vms'[wp]: apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedEnqueue_ksMachine) done -crunch ksCurDomain[wp]: tcbSchedEnqueue "\s. P (ksCurDomain s)" -(simp: unless_def) - lemma tcbSchedEnqueue_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedEnqueue t \\_. tcb_in_cur_domain' t' \" apply (rule tcb_in_cur_domain'_lift) @@ -466,22 +386,88 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (unfold ct_idle_or_in_cur_domain'_def) apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) - apply (wp static_imp_wp hoare_vcg_disj_lift | assumption)+ + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift | assumption)+ + done + +lemma threadSet_mdb': + "\valid_mdb' and obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF (f t)) t\ + threadSet f t + \\rv. valid_mdb'\" + by (wpsimp wp: setObject_tcb_mdb' getTCB_wp simp: threadSet_def obj_at'_def) + +lemma tcbSchedNext_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedNext_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedPrev_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueueRemove_valid_mdb': + "\\s. valid_mdb' s \ valid_objs' s\ tcbQueueRemove q tcbPtr \\_. valid_mdb'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def obj_at'_def) + done + +lemma tcbQueuePrepend_valid_mdb': + "\valid_mdb' and tcb_at' tcbPtr + and (\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_valid_mdb': + "\\s. valid_mdb' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueEnd queue)) s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueued_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbQueued_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma valid_mdb'_ksReadyQueuesL1Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL1Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma valid_mdb'_ksReadyQueuesL2Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL2Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma tcbSchedEnqueue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedEnqueue_def setQueue_def) + apply (wpsimp wp: tcbQueuePrepend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply normalise_obj_at' + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) done +crunches tcbSchedEnqueue + for cur_tcb'[wp]: cur_tcb' + (wp: threadSet_cur) + lemma tcbSchedEnqueue_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedEnqueue t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedEnqueue_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedEnqueue_ct_not_inQ + simp: cteCaps_of_def o_def) done crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" @@ -490,7 +476,7 @@ crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" lemma tcbSchedAppend_vms'[wp]: "\valid_machine_state'\ tcbSchedAppend t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedAppend_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done crunch pspace_domain_valid[wp]: tcbSchedAppend "pspace_domain_valid" @@ -505,21 +491,27 @@ crunch ksIdleThread[wp]: tcbSchedAppend "\s. P (ksIdleThread s)" crunch ksDomSchedule[wp]: tcbSchedAppend "\s. P (ksDomSchedule s)" (simp: unless_def) +lemma tcbQueueAppend_tcbPriority_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def projectKOs objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueueAppend_tcbDomain_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def projectKOs objBits_simps ps_clear_def split: if_splits) + lemma tcbSchedAppend_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedAppend t \\_. tcb_in_cur_domain' t' \" @@ -533,26 +525,58 @@ crunches tcbSchedAppend, tcbSchedDequeue (simp: unless_def) lemma tcbSchedAppend_sch_act_wf[wp]: - "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedAppend thread - \\rv s. sch_act_wf (ksSchedulerAction s) s\" - apply (simp add:tcbSchedAppend_def bitmap_fun_defs) - apply (wp hoare_unless_wp setQueue_sch_act threadGet_wp|simp)+ - apply (fastforce simp:typ_at'_def obj_at'_def) + "tcbSchedAppend thread \\s. sch_act_wf (ksSchedulerAction s) s\" + by (wpsimp wp: sch_act_wf_lift) + +lemma tcbSchedAppend_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedAppend tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedAppend_def + apply (wpsimp simp: tcbQueueAppend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp hoare_vcg_if_lift2) + apply (clarsimp simp: ksReadyQueues_asrt_def split: if_splits) + apply normalise_obj_at' + apply (force dest: tcbQueueHead_iff_tcbQueueEnd + simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def) + done + +lemma tcbSchedAppend_valid_mdb'[wp]: + "\valid_mdb' and valid_tcbs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: tcbQueueAppend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + by (fastforce dest: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def projectKOs) + +lemma tcbSchedAppend_valid_bitmaps[wp]: + "tcbSchedAppend tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) done lemma tcbSchedAppend_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedAppend t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedAppend_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma tcbSchedAppend_all_invs_but_ct_not_inQ': + "\invs'\ + tcbSchedAppend t + \\_. all_invs_but_ct_not_inQ'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) done lemma tcbSchedEnqueue_invs'_not_ResumeCurrentThread: @@ -575,49 +599,13 @@ lemma tcb_at'_has_tcbDomain: "tcb_at' t s \ \p. obj_at' (\tcb. tcbDomain tcb = p) t s" by (clarsimp simp add: obj_at'_def) -lemma valid_queues'_ko_atD: - "valid_queues' s \ ko_at' tcb t s \ tcbQueued tcb - \ t \ set (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" - apply (simp add: valid_queues'_def) - apply (elim allE, erule mp) - apply normalise_obj_at' - apply (simp add: inQ_def) - done - -lemma tcbSchedEnqueue_in_ksQ: - "\valid_queues' and tcb_at' t\ tcbSchedEnqueue t - \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" - apply (rule_tac Q="\s. \d p. valid_queues' s \ - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_pre_imp) - apply (clarsimp simp: tcb_at'_has_tcbPriority tcb_at'_has_tcbDomain) - apply (rule hoare_vcg_ex_lift)+ - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wpsimp simp: if_apply_def2) - apply (rule_tac Q="\rv s. tdom = d \ rv = p \ obj_at' (\tcb. tcbPriority tcb = p) t s - \ obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_post_imp, clarsimp) - apply (wp, (wp threadGet_const)+) - apply (rule_tac Q="\rv s. - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s \ - obj_at' (\tcb. tcbQueued tcb = rv) t s \ - (rv \ t \ set (ksReadyQueues s (d, p)))" in hoare_post_imp) - apply (clarsimp simp: o_def elim!: obj_at'_weakenE) - apply (wp threadGet_obj_at' hoare_vcg_imp_lift threadGet_const) - apply clarsimp - apply normalise_obj_at' - apply (frule(1) valid_queues'_ko_atD, simp+) - done - crunch ksMachine[wp]: tcbSchedDequeue "\s. P (ksMachineState s)" (simp: unless_def) lemma tcbSchedDequeue_vms'[wp]: "\valid_machine_state'\ tcbSchedDequeue t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedDequeue_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done crunch pspace_domain_valid[wp]: tcbSchedDequeue "pspace_domain_valid" @@ -631,50 +619,93 @@ crunch ksIdleThread[wp]: tcbSchedDequeue "\s. P (ksIdleThread s)" crunch ksDomSchedule[wp]: tcbSchedDequeue "\s. P (ksDomSchedule s)" (simp: unless_def) -crunch ksDomScheduleIdx[wp]: tcbSchedDequeue "\s. P (ksDomScheduleIdx s)" -(simp: unless_def) - lemma tcbSchedDequeue_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedDequeue t \\_. tcb_in_cur_domain' t' \" apply (rule tcb_in_cur_domain'_lift) apply wp - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ + apply (clarsimp simp: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: hoare_when_weak_wp getObject_tcb_wp threadGet_wp) done -lemma tcbSchedDequeue_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ - done +crunch ksDomScheduleIdx[wp]: tcbSchedDequeue "\s. P (ksDomScheduleIdx s)" + (simp: unless_def) -lemma tcbSchedDequeue_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ +lemma tcbSchedDequeue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs'\ tcbSchedDequeue tcbPtr \\_. valid_mdb'\" + unfolding tcbSchedDequeue_def + apply (wpsimp simp: bitmap_fun_defs setQueue_def wp: threadSet_mdb' tcbQueueRemove_valid_mdb') + apply (rule_tac Q="\_. tcb_at' tcbPtr" in hoare_post_imp) + apply (fastforce simp: tcb_cte_cases_def cteSizeBits_def) + apply (wpsimp wp: threadGet_wp)+ + apply (fastforce simp: obj_at'_def) done lemma tcbSchedDequeue_invs'[wp]: - "\invs' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs'\" - unfolding invs'_def valid_state'_def - apply (rule hoare_pre) - apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def)+ - apply (fastforce elim: valid_objs'_maxDomain valid_objs'_maxPriority simp: valid_pspace'_def)+ + "tcbSchedDequeue t \invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma ready_qs_runnable_cross: + "\(s, s') \ state_relation; pspace_aligned s; pspace_distinct s; valid_queues s\ + \ ready_qs_runnable s'" + apply (clarsimp simp: ready_qs_runnable_def) + apply normalise_obj_at' + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply clarsimp + apply (drule_tac x=t in bspec) + apply (fastforce simp: inQ_def in_opt_pred obj_at'_def projectKOs opt_map_red) + apply (fastforce dest: st_tcb_at_runnable_cross simp: obj_at'_def projectKOs st_tcb_at'_def) + done + +method add_ready_qs_runnable = + rule_tac Q'=ready_qs_runnable in corres_cross_add_guard, + (clarsimp simp: pred_conj_def)?, + (frule valid_sched_valid_queues)?, (frule invs_psp_aligned)?, (frule invs_distinct)?, + fastforce dest: ready_qs_runnable_cross + +defs idleThreadNotQueued_def: + "idleThreadNotQueued s \ obj_at' (Not \ tcbQueued) (ksIdleThread s) s" + +lemma idle_thread_not_queued: + "\valid_idle s; valid_queues s; valid_etcbs s\ + \ \ (\d p. idle_thread s \ set (ready_queues s d p))" + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (drule_tac x="idle_thread s" in bspec) + apply fastforce + apply (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def valid_etcbs_def) done +lemma valid_idle_tcb_at: + "valid_idle s \ tcb_at (idle_thread s) s" + by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def is_tcb_def) + lemma setCurThread_corres: - "corres dc \ \ (modify (cur_thread_update (\_. t))) (setCurThread t)" - apply (unfold setCurThread_def) + "corres dc (valid_idle and valid_queues and valid_etcbs and pspace_aligned and pspace_distinct) \ + (modify (cur_thread_update (\_. t))) (setCurThread t)" + apply (clarsimp simp: setCurThread_def) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (clarsimp simp: idleThreadNotQueued_def) + apply (frule (2) idle_thread_not_queued) + apply (frule state_relation_pspace_relation) + apply (frule state_relation_ready_queues_relation) + apply (frule state_relation_idle_thread) + apply (frule valid_idle_tcb_at) + apply (frule (3) tcb_at_cross) + apply (fastforce dest!: in_ready_q_tcbQueued_eq[THEN arg_cong_Not, THEN iffD1] + simp: obj_at'_def projectKOs opt_pred_def opt_map_def) apply (rule corres_modify) apply (simp add: state_relation_def swp_def) done @@ -691,7 +722,7 @@ lemma Arch_switchToThread_pred_tcb'[wp]: proof - have pos: "\P t t'. \pred_tcb_at' proj P t'\ Arch.switchToThread t \\rv. pred_tcb_at' proj P t'\" apply (simp add: pred_tcb_at'_def ARM_H.switchToThread_def) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ apply (rule doMachineOp_obj_at) apply (rule setVMRoot_obj_at) done @@ -716,47 +747,62 @@ lemma arch_switch_thread_ksQ[wp]: apply (wp) done -crunch valid_queues[wp]: "Arch.switchToThread" "Invariants_H.valid_queues" -(wp: crunch_wps simp: crunch_simps ignore: clearExMonitor) +crunches storeWordUser, setVMRoot, asUser, storeWordUser, Arch.switchToThread + for ksQ[wp]: "\s. P (ksReadyQueues s p)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_objs'[wp]: valid_objs' + (wp: crunch_wps threadSet_sched_pointers simp: crunch_simps) + +crunches arch_switch_to_thread, arch_switch_to_idle_thread + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + and ready_qs_distinct[wp]: ready_qs_distinct + and valid_idle[wp]: valid_idle + (wp: ready_qs_distinct_lift) + +lemma valid_queues_in_correct_ready_q[elim!]: + "valid_queues s \ in_correct_ready_q s" + by (clarsimp simp: valid_queues_def in_correct_ready_q_def) + +lemma valid_queues_ready_qs_distinct[elim!]: + "valid_queues s \ ready_qs_distinct s" + by (clarsimp simp: valid_queues_def ready_qs_distinct_def) lemma switchToThread_corres: "corres dc (valid_arch_state and valid_objs and valid_asid_map and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and valid_global_objs and unique_table_refs o caps_of_state - and st_tcb_at runnable t and valid_etcbs) - (valid_arch_state' and valid_pspace' and Invariants_H.valid_queues - and st_tcb_at' runnable' t and cur_tcb') + and st_tcb_at runnable t and valid_etcbs + and valid_queues and valid_idle) + (no_0_obj' and sym_heap_sched_pointers and valid_objs' and valid_arch_state') (switch_to_thread t) (switchToThread t)" - (is "corres _ ?PA ?PH _ _") - -proof - - have mainpart: "corres dc (?PA) (?PH) - (do y \ arch_switch_to_thread t; - y \ (tcb_sched_action tcb_sched_dequeue t); - modify (cur_thread_update (\_. t)) - od) - (do y \ Arch.switchToThread t; - y \ tcbSchedDequeue t; - setCurThread t - od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply add_ready_qs_runnable + apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) + apply (rule corres_symb_exec_l[OF _ _ get_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_l[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce dest!: state_relation_ready_queues_relation intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce apply (rule corres_guard_imp) apply (rule corres_split[OF arch_switchToThread_corres]) apply (rule corres_split[OF tcbSchedDequeue_corres setCurThread_corres]) - apply (wp|clarsimp simp: tcb_at_is_etcb_at st_tcb_at_tcb_at)+ - done - - show ?thesis - apply - - apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) - apply (rule corres_symb_exec_l [where Q = "\ s rv. (?PA and (=) rv) s", - OF corres_symb_exec_l [OF mainpart]]) - apply (auto intro: no_fail_pre [OF no_fail_assert] - no_fail_pre [OF no_fail_get] - dest: st_tcb_at_tcb_at [THEN get_tcb_at] | - simp add: assert_def | wp)+ - done -qed + apply (wpsimp simp: is_tcb_def)+ + apply (fastforce intro!: st_tcb_at_tcb_at) + apply wpsimp + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + done lemma arch_switchToIdleThread_corres: "corres dc (valid_arch_state and valid_objs and valid_asid_map and unique_table_refs \ caps_of_state and @@ -766,20 +812,26 @@ lemma arch_switchToIdleThread_corres: Arch.switchToIdleThread" apply (simp add: arch_switch_to_idle_thread_def ARM_H.switchToIdleThread_def) - apply (corressimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args]) + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args]) apply (clarsimp simp: valid_idle_def valid_idle'_def pred_tcb_at_def obj_at_def is_tcb obj_at'_def) done lemma switchToIdleThread_corres: - "corres dc invs invs_no_cicd' switch_to_idle_thread switchToIdleThread" + "corres dc + (invs and valid_queues and valid_etcbs) + invs_no_cicd' + switch_to_idle_thread switchToIdleThread" apply (simp add: switch_to_idle_thread_def Thread_H.switchToIdleThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_ignore, fastforce) apply (rule corres_guard_imp) apply (rule corres_split[OF getIdleThread_corres]) apply (rule corres_split[OF arch_switchToIdleThread_corres]) - apply (unfold setCurThread_def) - apply (rule corres_trivial, rule corres_modify) - apply (simp add: state_relation_def cdt_relation_def) - apply (wp+, simp+) + apply clarsimp + apply (rule setCurThread_corres) + apply wpsimp + apply (simp add: state_relation_def cdt_relation_def) + apply wpsimp+ apply (simp add: invs_unique_refs invs_valid_vs_lookup invs_valid_objs invs_valid_asid_map invs_arch_state invs_valid_global_objs invs_psp_aligned invs_distinct invs_valid_idle invs_vspace_objs) @@ -814,11 +866,9 @@ proof - apply (simp add: setCurThread_def) apply wp apply (clarsimp simp add: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def - valid_state'_def Invariants_H.valid_queues_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def valid_queues'_def ct_not_inQ_ct - ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + valid_state'_def sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def ct_not_inQ_ct + ct_idle_or_in_cur_domain'_def bitmapQ_defs valid_bitmaps_def cong: option.case_cong) done qed @@ -832,101 +882,20 @@ lemma setCurThread_invs: by (rule hoare_pre, rule setCurThread_invs_no_cicd') (simp add: invs'_to_invs_no_cicd'_def) -lemma valid_queues_not_runnable_not_queued: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" -proof (rule ccontr) - assume "\ obj_at' (Not \ tcbQueued) t s" - moreover from st have "typ_at' TCBT t s" - by (rule pred_tcb_at' [THEN tcb_at_typ_at' [THEN iffD1]]) - ultimately have "obj_at' tcbQueued t s" - by (clarsimp simp: not_obj_at' comp_def) - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbPriority] - obtain p where tp: "obj_at' (\tcb. tcbPriority tcb = p) t s" - by clarsimp - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbDomain] - obtain d where td: "obj_at' (\tcb. tcbDomain tcb = d) t s" - by clarsimp - - ultimately - have "t \ set (ksReadyQueues s (d, p))" using vq' - unfolding valid_queues'_def - apply - - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (drule_tac x=t in spec) - apply (erule impE) - apply (fastforce simp add: inQ_def obj_at'_def) - apply (assumption) - done - - with vq have "st_tcb_at' runnable' t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply - - apply clarsimp - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp simp add: st_tcb_at'_def) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - - with st show False - apply - - apply (drule(1) pred_tcb_at_conj') - apply (clarsimp) - done -qed - -(* - * The idle thread is not part of any ready queues. - *) -lemma idle'_not_tcbQueued': - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and idle: "valid_idle' s" - shows "obj_at' (Not \ tcbQueued) (ksIdleThread s) s" - proof - - from idle have stidle: "st_tcb_at' (Not \ runnable') (ksIdleThread s) s" - by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def projectKOs idle_tcb'_def) - - with vq vq' show ?thesis - by (rule valid_queues_not_runnable_not_queued) - qed - lemma setCurThread_invs_no_cicd'_idle_thread: - "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" -proof - - have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" - apply (simp add: ct_not_inQ_def o_def) - done - have idle'_activatable': "\ s t. st_tcb_at' idle' t s \ st_tcb_at' activatable' t s" - apply (clarsimp simp: st_tcb_at'_def o_def obj_at'_def) + "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\_. invs'\" + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def valid_idle'_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def bitmapQ_defs valid_bitmaps_def pred_tcb_at'_def + cong: option.case_cong) + apply (clarsimp simp: idle_tcb'_def ct_not_inQ_def ps_clear_def obj_at'_def projectKOs + st_tcb_at'_def idleThreadNotQueued_def) done - show ?thesis - apply (simp add: setCurThread_def) - apply wp - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) - apply (frule (2) idle'_not_tcbQueued'[simplified o_def]) - apply (clarsimp simp add: ct_not_inQ_ct idle'_activatable' - invs'_def cur_tcb'_def valid_state'_def valid_idle'_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def - ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def bitmapQ_defs valid_queues_no_bitmap_def valid_queues'_def - pred_tcb_at'_def - cong: option.case_cong) - apply (clarsimp simp: obj_at'_def projectKOs idle_tcb'_def) - done -qed lemma clearExMonitor_invs'[wp]: "\invs'\ doMachineOp ARM.clearExMonitor \\rv. invs'\" @@ -964,13 +933,13 @@ lemma Arch_switchToThread_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_not_tcbQueued: - "\ tcb_at' t \ tcbSchedDequeue t \ \_. obj_at' (\x. \ tcbQueued x) t \" + "\\\ tcbSchedDequeue t \\_. obj_at' (\x. \ tcbQueued x) t\" apply (simp add: tcbSchedDequeue_def) apply (wp|clarsimp)+ apply (rule_tac Q="\queued. obj_at' (\x. tcbQueued x = queued) t" in hoare_post_imp) - apply (clarsimp simp: obj_at'_def) - apply (wp threadGet_obj_at') - apply (simp) + apply (clarsimp simp: obj_at'_def) + apply (wpsimp wp: threadGet_wp)+ + apply (clarsimp simp: obj_at'_def) done lemma Arch_switchToThread_obj_at[wp]: @@ -978,7 +947,7 @@ lemma Arch_switchToThread_obj_at[wp]: Arch.switchToThread t \\rv. obj_at' (P \ tcbState) t\" apply (simp add: ARM_H.switchToThread_def ) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ apply (rule doMachineOp_obj_at) apply (rule setVMRoot_obj_at) done @@ -1002,10 +971,6 @@ crunch valid_irq_states'[wp]: asUser "valid_irq_states'" crunch valid_machine_state'[wp]: asUser "valid_machine_state'" (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "valid_queues'" -(wp: crunch_wps simp: crunch_simps) - - crunch irq_masked'_helper: asUser "\s. P (intStateIRQTable (ksInterruptState s))" (wp: crunch_wps simp: crunch_simps) @@ -1033,22 +998,17 @@ lemma Arch_switchToThread_invs_no_cicd': lemma tcbSchedDequeue_invs_no_cicd'[wp]: - "\invs_no_cicd' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs_no_cicd'\" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + "tcbSchedDequeue t \invs_no_cicd'\" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues_weak untyped_ranges_zero_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp - apply (fastforce simp: valid_pspace'_def valid_queues_def - elim: valid_objs'_maxDomain valid_objs'_maxPriority intro: obj_at'_conjI) done lemma switchToThread_invs_no_cicd': - "\invs_no_cicd' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" + "\invs_no_cicd' and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def) apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued Arch_switchToThread_invs_no_cicd' Arch_switchToThread_pred_tcb') @@ -1056,7 +1016,7 @@ lemma switchToThread_invs_no_cicd': done lemma switchToThread_invs[wp]: - "\invs' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" + "\invs' and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def ) apply (wp threadSet_timeslice_invs setCurThread_invs Arch_switchToThread_invs dmo_invs' @@ -1101,8 +1061,7 @@ lemma dmo_cap_to'[wp]: lemma sct_cap_to'[wp]: "\ex_nonz_cap_to' p\ setCurThread t \\rv. ex_nonz_cap_to' p\" apply (simp add: setCurThread_def) - apply (wp ex_nonz_cap_to_pres') - apply (clarsimp elim!: cte_wp_at'_pspaceI)+ + apply (wpsimp wp: ex_nonz_cap_to_pres') done @@ -1142,62 +1101,6 @@ lemma tcb_at_typ_at': apply (case_tac ko, simp_all) done - -lemma invs'_not_runnable_not_queued: - fixes s - assumes inv: "invs' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" - apply (insert assms) - apply (rule valid_queues_not_runnable_not_queued) - apply (clarsimp simp add: invs'_def valid_state'_def)+ - done - -lemma valid_queues_not_tcbQueued_not_ksQ: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and notq: "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" -proof (rule ccontr, simp , erule exE, erule exE) - fix d p - assume "t \ set (ksReadyQueues s (d, p))" - with vq have "obj_at' (inQ d p) t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply clarify - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (simp) - done - hence "obj_at' tcbQueued t s" - apply (rule obj_at'_weakenE) - apply (simp only: inQ_def) - done - with notq show "False" - by (clarsimp simp: obj_at'_def) -qed - -lemma not_tcbQueued_not_ksQ: - fixes s - assumes "invs' s" - and "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - apply (insert assms) - apply (clarsimp simp add: invs'_def valid_state'_def) - apply (drule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (clarsimp) - done - -lemma ct_not_ksQ: - "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ - \ \p. ksCurThread s \ set (ksReadyQueues s p)" - apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (fastforce) - done - crunch nosch[wp]: getCurThread "\s. P (ksSchedulerAction s)" lemma setThreadState_rct: @@ -1207,21 +1110,21 @@ lemma setThreadState_rct: \\_ s. ksSchedulerAction s = ResumeCurrentThread\" apply (simp add: setThreadState_def) apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) apply (clarsimp simp: when_def) - apply (case_tac x) + apply (case_tac rv) apply (clarsimp, wp)[1] apply (clarsimp) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_ct threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ isRunnable_inv]) + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF gct_wp gct_wp]]) apply (rename_tac ct) @@ -1272,21 +1175,24 @@ lemma bitmapQ_from_bitmap_lookup: done lemma lookupBitmapPriority_obj_at': - "\ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0; valid_queues_no_bitmap s; valid_bitmapQ s; - bitmapQ_no_L1_orphans s\ - \ obj_at' (inQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) and runnable' \ tcbState) - (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + "\ksReadyQueuesL1Bitmap s d \ 0; valid_bitmapQ s; bitmapQ_no_L1_orphans s; + ksReadyQueues_asrt s; ready_qs_runnable s; pspace_aligned' s; pspace_distinct' s\ + \ obj_at' (inQ d (lookupBitmapPriority d s) and runnable' \ tcbState) + (the (tcbQueueHead (ksReadyQueues s (d, lookupBitmapPriority d s)))) s" apply (drule (2) bitmapQ_from_bitmap_lookup) apply (simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)", simp) - apply (clarsimp, rename_tac t ts) - apply (drule cons_set_intro) - apply (drule (2) valid_queues_no_bitmap_objD) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def tcbQueueEmpty_def) + apply (drule_tac x=d in spec) + apply (drule_tac x="lookupBitmapPriority d s" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (fastforce simp: obj_at'_and ready_qs_runnable_def obj_at'_def st_tcb_at'_def inQ_def + tcbQueueEmpty_def) done lemma bitmapL1_zero_ksReadyQueues: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ - \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. ksReadyQueues s (d,p) = [])" + \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. tcbQueueEmpty (ksReadyQueues s (d, p)))" apply (cases "ksReadyQueuesL1Bitmap s d = 0") apply (force simp add: bitmapQ_def valid_bitmapQ_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) @@ -1357,7 +1263,7 @@ lemma bitmapL1_highest_lookup: done lemma bitmapQ_ksReadyQueuesI: - "\ bitmapQ d p s ; valid_bitmapQ s \ \ ksReadyQueues s (d, p) \ []" + "\ bitmapQ d p s ; valid_bitmapQ s \ \ \ tcbQueueEmpty (ksReadyQueues s (d, p))" unfolding valid_bitmapQ_def by simp lemma getReadyQueuesL2Bitmap_inv[wp]: @@ -1366,24 +1272,22 @@ lemma getReadyQueuesL2Bitmap_inv[wp]: lemma switchToThread_lookupBitmapPriority_wp: "\\s. invs_no_cicd' s \ bitmapQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) s \ - t = hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)) \ + t = the (tcbQueueHead (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)))\ ThreadDecls_H.switchToThread t \\rv. invs'\" -proof - - have switchToThread_pre: - "\s p t.\ valid_queues s ; bitmapQ (ksCurDomain s) p s ; t = hd (ksReadyQueues s (ksCurDomain s,p)) \ - \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s" - unfolding valid_queues_def - apply (clarsimp dest!: bitmapQ_ksReadyQueuesI) - apply (case_tac "ksReadyQueues s (ksCurDomain s, p)", simp) - apply (rename_tac t ts) - apply (drule_tac t=t and p=p and d="ksCurDomain s" in valid_queues_no_bitmap_objD) - apply simp - apply (fastforce elim: obj_at'_weaken simp: inQ_def tcb_in_cur_domain'_def st_tcb_at'_def) - done - thus ?thesis - by (wp switchToThread_invs_no_cicd') (fastforce dest: invs_no_cicd'_queues) -qed + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd') + apply (auto elim!: pred_tcb'_weakenE) + apply (prop_tac "valid_bitmapQ s") + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_bitmaps_def) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def valid_bitmapQ_bitmapQ_simp) + apply (drule_tac x="ksCurDomain s" in spec) + apply (drule_tac x="lookupBitmapPriority (ksCurDomain s) s" in spec) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) + done lemma switchToIdleThread_invs_no_cicd': "\invs_no_cicd'\ switchToIdleThread \\rv. invs'\" @@ -1395,7 +1299,7 @@ lemma switchToIdleThread_invs_no_cicd': crunch obj_at'[wp]: "Arch.switchToIdleThread" "\s. obj_at' P t s" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -1454,11 +1358,6 @@ lemma corres_assert_ret: apply (simp add: assert_def return_def fail_def) done -lemma corres_assert_assume_l: - "corres dc P Q (f ()) g - \ corres dc (P and (\s. P')) Q (assert P' >>= f) g" - by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) - lemma corres_assert_assume_r: "corres dc P Q f (g ()) \ corres dc P (Q and (\s. Q')) f (assert Q' >>= g)" @@ -1488,9 +1387,8 @@ lemma guarded_switch_to_corres: and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and valid_global_objs and unique_table_refs o caps_of_state - and st_tcb_at runnable t and valid_etcbs) - (valid_arch_state' and valid_pspace' and Invariants_H.valid_queues - and st_tcb_at' runnable' t and cur_tcb') + and st_tcb_at runnable t and valid_etcbs and valid_queues and valid_idle) + (valid_arch_state' and no_0_obj' and sym_heap_sched_pointers and valid_objs') (guarded_switch_to t) (switchToThread t)" apply (simp add: guarded_switch_to_def) apply (rule corres_guard_imp) @@ -1499,8 +1397,8 @@ lemma guarded_switch_to_corres: apply (rule switchToThread_corres) apply (force simp: st_tcb_at_tcb_at) apply (wp gts_st_tcb_at) - apply (force simp: st_tcb_at_tcb_at)+ - done + apply (force simp: st_tcb_at_tcb_at projectKOs)+ + done abbreviation "enumPrio \ [0.e.maxPriority]" @@ -1509,7 +1407,7 @@ lemma curDomain_corres: "corres (=) \ \ (gets cur_domain) (curDomain)" lemma curDomain_corres': "corres (=) \ (\s. ksCurDomain s \ maxDomain) - (gets cur_domain) (if 1 < numDomains then curDomain else return 0)" + (gets cur_domain) (if Suc 0 < numDomains then curDomain else return 0)" apply (case_tac "1 < numDomains"; simp) apply (rule corres_guard_imp[OF curDomain_corres]; solves simp) (* if we have only one domain, then we are in it *) @@ -1519,27 +1417,32 @@ lemma curDomain_corres': lemma lookupBitmapPriority_Max_eqI: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d \ 0 \ - \ lookupBitmapPriority d s = (Max {prio. ksReadyQueues s (d, prio) \ []})" + \ lookupBitmapPriority d s = (Max {prio. \ tcbQueueEmpty (ksReadyQueues s (d, prio))})" apply (rule Max_eqI[simplified eq_commute]; simp) apply (fastforce simp: bitmapL1_highest_lookup valid_bitmapQ_bitmapQ_simp) apply (metis valid_bitmapQ_bitmapQ_simp bitmapQ_from_bitmap_lookup) done lemma corres_gets_queues_getReadyQueuesL1Bitmap: - "corres (\qs l1. ((l1 = 0) = (\p. qs p = []))) \ valid_queues + "corres (\qs l1. (l1 = 0) = (\p. qs p = [])) \ valid_bitmaps (gets (\s. ready_queues s d)) (getReadyQueuesL1Bitmap d)" - unfolding state_relation_def valid_queues_def getReadyQueuesL1Bitmap_def - by (clarsimp simp: bitmapL1_zero_ksReadyQueues ready_queues_relation_def) + unfolding state_relation_def valid_bitmaps_def getReadyQueuesL1Bitmap_def + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x=d in spec) + apply (fastforce simp: bitmapL1_zero_ksReadyQueues list_queue_relation_def tcbQueueEmpty_def) + done lemma guarded_switch_to_chooseThread_fragment_corres: "corres dc (P and st_tcb_at runnable t and invs and valid_sched) - (P' and st_tcb_at' runnable' t and invs_no_cicd') - (guarded_switch_to t) - (do runnable \ isRunnable t; - y \ assert runnable; - ThreadDecls_H.switchToThread t - od)" + (P' and invs_no_cicd') + (guarded_switch_to t) + (do runnable \ isRunnable t; + y \ assert runnable; + ThreadDecls_H.switchToThread t + od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) unfolding guarded_switch_to_def isRunnable_def apply simp apply (rule corres_guard_imp) @@ -1554,35 +1457,50 @@ lemma guarded_switch_to_chooseThread_fragment_corres: simp: pred_tcb_at' runnable'_def all_invs_but_ct_idle_or_in_cur_domain'_def) done +lemma Max_prio_helper: + "ready_queues_relation s s' + \ Max {prio. ready_queues s d prio \ []} + = Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (d, prio))}" + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def tcbQueueEmpty_def) + apply (rule Max_eq_if) + apply fastforce + apply fastforce + apply (fastforce dest: heap_path_head) + apply clarsimp + apply (drule_tac x=d in spec) + apply (drule_tac x=b in spec) + apply force + done + lemma bitmap_lookup_queue_is_max_non_empty: - "\ valid_queues s'; (s, s') \ state_relation; invs s; + "\ valid_bitmaps s'; (s, s') \ state_relation; invs s; ksReadyQueuesL1Bitmap s' (ksCurDomain s') \ 0 \ - \ ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s') = - max_non_empty_queue (ready_queues s (cur_domain s))" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_queues_def - by (clarsimp simp add: max_non_empty_queue_def lookupBitmapPriority_Max_eqI - state_relation_def ready_queues_relation_def) + \ the (tcbQueueHead (ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s'))) + = hd (max_non_empty_queue (ready_queues s (cur_domain s)))" + apply (clarsimp simp: max_non_empty_queue_def valid_bitmaps_def lookupBitmapPriority_Max_eqI) + apply (frule curdomain_relation) + apply (drule state_relation_ready_queues_relation) + apply (simp add: Max_prio_helper) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (frule (2) bitmapL1_zero_ksReadyQueues[THEN arg_cong_Not, THEN iffD1]) + apply clarsimp + apply (cut_tac P="\x. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', x))" + in setcomp_Max_has_prop) + apply fastforce + apply (clarsimp simp: ready_queues_relation_def Let_def list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x="ksCurDomain s'" in spec) + apply (drule_tac x="Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', prio))}" + in spec) + using heap_path_head tcbQueueEmpty_def + by fastforce lemma ksReadyQueuesL1Bitmap_return_wp: "\\s. P (ksReadyQueuesL1Bitmap s d) s \ getReadyQueuesL1Bitmap d \\rv s. P rv s\" unfolding getReadyQueuesL1Bitmap_def by wp -lemma ksReadyQueuesL1Bitmap_st_tcb_at': - "\ ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0 ; valid_queues s \ - \ st_tcb_at' runnable' (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" - apply (drule bitmapQ_from_bitmap_lookup; clarsimp simp: valid_queues_def) - apply (clarsimp simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)") - apply simp - apply (simp add: valid_queues_no_bitmap_def) - apply (erule_tac x="ksCurDomain s" in allE) - apply (erule_tac x="lookupBitmapPriority (ksCurDomain s) s" in allE) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply simp - done - lemma curDomain_or_return_0: "\ \P\ curDomain \\rv s. Q rv s \; \s. P s \ ksCurDomain s \ maxDomain \ \ \P\ if 1 < numDomains then curDomain else return 0 \\rv s. Q rv s \" @@ -1595,51 +1513,68 @@ lemma invs_no_cicd_ksCurDomain_maxDomain': unfolding invs_no_cicd'_def by simp lemma chooseThread_corres: - "corres dc (invs and valid_sched) (invs_no_cicd') - choose_thread chooseThread" (is "corres _ ?PREI ?PREH _ _") + "corres dc (invs and valid_sched) invs_no_cicd' choose_thread chooseThread" + (is "corres _ ?PREI ?PREH _ _") proof - + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + show ?thesis - unfolding choose_thread_def chooseThread_def - apply (simp only: return_bind Let_def) - apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) - apply (rule corres_guard_imp) - apply (rule corres_split[OF curDomain_corres']) - apply clarsimp - apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) - apply (erule corres_if2[OF sym]) - apply (rule switchToIdleThread_corres) - apply (rule corres_symb_exec_r) - apply (rule corres_symb_exec_r) - apply (rule_tac - P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) \ - st_tcb_at runnable (hd (max_non_empty_queue queues)) s" and - P'="\s. (?PREH s \ st_tcb_at' runnable' (hd queue) s) \ - l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) \ - l1 \ 0 \ - queue = ksReadyQueues s (ksCurDomain s, - lookupBitmapPriority (ksCurDomain s) s)" and - F="hd queue = hd (max_non_empty_queue queues)" in corres_req) - apply (fastforce dest!: invs_no_cicd'_queues simp: bitmap_lookup_queue_is_max_non_empty) - apply clarsimp - apply (rule corres_guard_imp) - apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) - apply (wp | clarsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ - apply (clarsimp simp: if_apply_def2) - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) - apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ - apply (fastforce simp: invs_no_cicd'_def) - apply (clarsimp simp: valid_sched_def DetSchedInvs_AI.valid_queues_def max_non_empty_queue_def) - apply (erule_tac x="cur_domain s" in allE) - apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) - apply (case_tac "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []})") - apply (clarsimp) - apply (subgoal_tac - "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []}) \ []") - apply (fastforce elim!: setcomp_Max_has_prop)+ - apply (simp add: invs_no_cicd_ksCurDomain_maxDomain') - apply (clarsimp dest!: invs_no_cicd'_queues) - apply (fastforce intro: ksReadyQueuesL1Bitmap_st_tcb_at') - done + supply if_split[split del] + apply (clarsimp simp: choose_thread_def chooseThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce + apply (simp only: return_bind Let_def) + apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres']) + apply clarsimp + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (erule corres_if2[OF sym]) + apply (rule switchToIdleThread_corres) + apply (rule corres_symb_exec_r) + apply (rule corres_symb_exec_r) + apply (rule_tac P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) + \ st_tcb_at runnable (hd (max_non_empty_queue queues)) s" + and P'="\s. ?PREH s \ l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) + \ l1 \ 0 + \ queue = ksReadyQueues s (ksCurDomain s, + lookupBitmapPriority (ksCurDomain s) s)" + and F="the (tcbQueueHead queue) = hd (max_non_empty_queue queues)" + in corres_req) + apply (fastforce simp: bitmap_lookup_queue_is_max_non_empty + all_invs_but_ct_idle_or_in_cur_domain'_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) + apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) + apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ + apply (clarsimp simp: valid_sched_def max_non_empty_queue_def valid_queues_def split: if_splits) + apply (erule_tac x="cur_domain s" in allE) + apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) + apply (case_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio + \ []})") + apply (clarsimp) + apply (subgoal_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio \ []}) + \ []") + apply fastforce + apply (fastforce elim!: setcomp_Max_has_prop) + apply fastforce + apply clarsimp + apply (frule invs_no_cicd_ksCurDomain_maxDomain') + apply (prop_tac "valid_bitmaps s") + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def) + apply (fastforce dest: one_domain_case split: if_splits) + done qed lemma thread_get_comm: "do x \ thread_get f p; y \ gets g; k x y od = @@ -1690,12 +1625,6 @@ lemma nextDomain_invs_no_cicd': all_invs_but_ct_idle_or_in_cur_domain'_def) done -lemma bind_dummy_ret_val: - "do y \ a; - b - od = do a; b od" - by simp - lemma scheduleChooseNewThread_fragment_corres: "corres dc (invs and valid_sched and (\s. scheduler_action s = choose_new_thread)) (invs' and (\s. ksSchedulerAction s = ChooseNewThread)) (do _ \ when (domainTime = 0) next_domain; @@ -1734,7 +1663,7 @@ lemma isHighestPrio_corres: assumes "d' = d" assumes "p' = p" shows - "corres ((=)) \ valid_queues + "corres ((=)) \ valid_bitmaps (gets (is_highest_prio d p)) (isHighestPrio d' p')" using assms @@ -1744,18 +1673,16 @@ lemma isHighestPrio_corres: apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) apply (rule corres_if_r'[where P'="\_. True",rotated]) apply (rule_tac corres_symb_exec_r) - apply (rule_tac - P="\s. q = ready_queues s d - " and - P'="\s. valid_queues s \ - l1 = ksReadyQueuesL1Bitmap s d \ - l1 \ 0 \ hprio = lookupBitmapPriority d s" and - F="hprio = Max {prio. q prio \ []}" in corres_req) - apply (elim conjE) - apply (clarsimp simp: valid_queues_def) - apply (subst lookupBitmapPriority_Max_eqI; blast?) - apply (fastforce simp: ready_queues_relation_def dest!: state_relationD) - apply fastforce + apply (rule_tac P="\s. q = ready_queues s d" + and P'="\s. valid_bitmaps s \ l1 = ksReadyQueuesL1Bitmap s d \ + l1 \ 0 \ hprio = lookupBitmapPriority d s" + and F="hprio = Max {prio. q prio \ []}" in corres_req) + apply (elim conjE) + apply (clarsimp simp: valid_bitmaps_def) + apply (subst lookupBitmapPriority_Max_eqI; blast?) + apply (fastforce dest: state_relation_ready_queues_relation Max_prio_helper[where d=d] + simp: tcbQueueEmpty_def) + apply fastforce apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imps ksReadyQueuesL1Bitmap_return_wp)+ done @@ -1767,9 +1694,8 @@ crunch inv[wp]: schedule_switch_thread_fastfail P crunch inv[wp]: scheduleSwitchThreadFastfail P lemma setSchedulerAction_invs': (* not in wp set, clobbered by ssa_wp *) - "\\s. invs' s \ setSchedulerAction ChooseNewThread \\_. invs' \" + "setSchedulerAction ChooseNewThread \invs' \" by (wpsimp simp: invs'_def cur_tcb'_def valid_state'_def valid_irq_node'_def ct_not_inQ_def - valid_queues_def valid_queues_no_bitmap_def valid_queues'_def ct_idle_or_in_cur_domain'_def) lemma scheduleChooseNewThread_corres: @@ -1801,6 +1727,46 @@ lemma ethread_get_when_corres: apply wpsimp+ done +lemma tcb_sched_enqueue_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_enqueue t \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_enqueue_def set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_append_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_append tcb_ptr \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_append_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_enqueue_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_enqueue t \ready_qs_distinct\ " + unfolding tcb_sched_action_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma tcb_sched_append_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_append t \ready_qs_distinct\ " + unfolding tcb_sched_action_def tcb_sched_append_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +crunches set_scheduler_action + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps simp: in_correct_ready_q_def ready_qs_distinct_def) + +crunches reschedule_required + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (ignore: tcb_sched_action wp: crunch_wps ignore_del: reschedule_required) + lemma schedule_corres: "corres dc (invs and valid_sched and valid_list) invs' (Schedule_A.schedule) ThreadDecls_H.schedule" supply ethread_get_wp[wp del] @@ -1828,7 +1794,7 @@ lemma schedule_corres: apply (rule corres_split[OF thread_get_isRunnable_corres]) apply (rule corres_split) apply (rule corres_when, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule scheduleChooseNewThread_corres, simp) apply (wp thread_get_wp' tcbSchedEnqueue_invs' hoare_vcg_conj_lift hoare_drop_imps | clarsimp)+ @@ -1837,7 +1803,7 @@ lemma schedule_corres: rename_tac was_running wasRunning) apply (rule corres_split) apply (rule corres_when, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_split[OF getIdleThread_corres], rename_tac it it') apply (rule_tac F="was_running \ ct \ it" in corres_gen_asm) apply (rule corres_split) @@ -1853,7 +1819,7 @@ lemma schedule_corres: apply (rule corres_split[OF curDomain_corres]) apply (rule corres_split[OF isHighestPrio_corres]; simp only:) apply (rule corres_if, simp) - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (simp, fold dc_def) apply (rule corres_split) apply (rule setSchedulerAction_corres; simp) @@ -1867,7 +1833,7 @@ lemma schedule_corres: apply (wp tcb_sched_action_enqueue_valid_blocked hoare_vcg_all_lift enqueue_thread_queued) apply (wp tcbSchedEnqueue_invs'_not_ResumeCurrentThread) apply (rule corres_if, fastforce) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (simp, fold dc_def) apply (rule corres_split) apply (rule setSchedulerAction_corres; simp) @@ -1899,7 +1865,8 @@ lemma schedule_corres: in hoare_post_imp, fastforce) apply (wp add: tcb_sched_action_enqueue_valid_blocked_except tcbSchedEnqueue_invs'_not_ResumeCurrentThread thread_get_wp - del: gets_wp)+ + del: gets_wp + | strengthen valid_objs'_valid_tcbs')+ apply (clarsimp simp: conj_ac if_apply_def2 cong: imp_cong conj_cong del: hoare_gets) apply (wp gets_wp)+ @@ -1922,18 +1889,17 @@ lemma schedule_corres: weak_valid_sched_action_def tcb_at_is_etcb_at tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]] valid_blocked_except_def valid_blocked_def) - apply (clarsimp simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) + apply (fastforce simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) done (* choose new thread case *) apply (intro impI conjI allI tcb_at_invs | fastforce simp: invs_def cur_tcb_def valid_etcbs_def valid_sched_def st_tcb_at_def obj_at_def valid_state_def weak_valid_sched_action_def not_cur_thread_def)+ - apply (simp add: valid_sched_def valid_blocked_def valid_blocked_except_def) done (* haskell final subgoal *) - apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def + apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def valid_sched_def cong: imp_cong split: scheduler_action.splits) apply (fastforce simp: cur_tcb'_def valid_pspace'_def) done @@ -1947,11 +1913,8 @@ proof - apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def valid_queues'_def - tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + state_refs_of'_def ps_clear_def valid_irq_node'_def + tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def bitmapQ_defs cong: option.case_cong) done qed @@ -1991,7 +1954,7 @@ lemma switchToThread_ct_not_queued_2: apply (simp add: Thread_H.switchToThread_def) apply (wp) apply (simp add: ARM_H.switchToThread_def setCurThread_def) - apply (wp tcbSchedDequeue_not_tcbQueued | simp )+ + apply (wp tcbSchedDequeue_not_tcbQueued hoare_drop_imp | simp )+ done lemma setCurThread_obj_at': @@ -2005,11 +1968,12 @@ proof - qed lemma switchToIdleThread_ct_not_queued_no_cicd': - "\ invs_no_cicd' \ switchToIdleThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" + "\invs_no_cicd'\ switchToIdleThread \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" apply (simp add: Thread_H.switchToIdleThread_def) apply (wp setCurThread_obj_at') - apply (rule idle'_not_tcbQueued') - apply (simp add: invs_no_cicd'_def)+ + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x="ksIdleThread s" in spec) + apply (clarsimp simp: invs_no_cicd'_def valid_idle'_def st_tcb_at'_def idle_tcb'_def obj_at'_def) done lemma switchToIdleThread_activatable_2[wp]: @@ -2026,7 +1990,7 @@ lemma switchToThread_tcb_in_cur_domain': ThreadDecls_H.switchToThread thread \\y s. tcb_in_cur_domain' (ksCurThread s) s\" apply (simp add: Thread_H.switchToThread_def setCurThread_def) - apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued) + apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued hoare_drop_imps) done lemma chooseThread_invs_no_cicd'_posts: (* generic version *) @@ -2048,11 +2012,14 @@ proof - by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) @@ -2066,12 +2033,10 @@ proof - apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv switchToThread_ct_not_queued_2 assert_inv hoare_disjI2 switchToThread_tcb_in_cur_domain') - apply clarsimp - apply (clarsimp dest!: invs_no_cicd'_queues - simp: valid_queues_def lookupBitmapPriority_def[symmetric]) - apply (drule (3) lookupBitmapPriority_obj_at') - apply normalise_obj_at' - apply (fastforce simp: tcb_in_cur_domain'_def inQ_def elim: obj_at'_weaken) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done qed @@ -2110,19 +2075,25 @@ proof - (* FIXME this is almost identical to the chooseThread_invs_no_cicd'_posts proof, can generalise? *) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) - apply (simp, wp (once) switchToIdleThread_invs_no_cicd', simp) + apply (simp, wp switchToIdleThread_invs_no_cicd', simp) (* we have a thread to switch to *) apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv) - apply (clarsimp dest!: invs_no_cicd'_queues simp: valid_queues_def) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (fastforce elim: bitmapQ_from_bitmap_lookup simp: lookupBitmapPriority_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done @@ -2149,7 +2120,7 @@ lemma schedule_invs': "\invs'\ ThreadDecls_H.schedule \\rv. invs'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2159,7 +2130,7 @@ lemma schedule_invs': apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 - | wp hoare_disjI2[where Q="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] | strengthen invs'_invs_no_cicd @@ -2242,7 +2213,7 @@ lemma schedule_ct_activatable'[wp]: "\invs'\ ThreadDecls_H.schedule \\_. ct_in_state' activatable'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2272,12 +2243,20 @@ crunch sch_act_sane: setThreadState, setBoundNotification "sch_act_sane" (simp: crunch_simps wp: crunch_wps) lemma possibleSwitchTo_corres: - "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t) - (Invariants_H.valid_queues and valid_queues' and - (\s. weak_sch_act_wf (ksSchedulerAction s) s) and cur_tcb' and tcb_at' t and st_tcb_at' runnable' t and valid_objs') - (possible_switch_to t) - (possibleSwitchTo t)" + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t + and in_correct_ready_q and ready_qs_distinct and pspace_aligned and pspace_distinct) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers and valid_objs') + (possible_switch_to t) (possibleSwitchTo t)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) supply ethread_get_wp[wp del] + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (clarsimp simp: state_relation_def) + apply (rule tcb_at_cross, erule st_tcb_at_tcb_at; assumption) apply (simp add: possible_switch_to_def possibleSwitchTo_def cong: if_cong) apply (rule corres_guard_imp) apply (rule corres_split[OF curDomain_corres], simp) @@ -2286,21 +2265,21 @@ lemma possibleSwitchTo_corres: apply (clarsimp simp: etcb_relation_def) apply (rule corres_split[OF getSchedulerAction_corres]) apply (rule corres_if, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_if, simp) apply (case_tac action; simp) apply (rule corres_split[OF rescheduleRequired_corres]) - apply (rule tcbSchedEnqueue_corres) - apply (wp rescheduleRequired_valid_queues'_weak)+ + apply (rule tcbSchedEnqueue_corres, simp) + apply (wp reschedule_required_valid_queues | strengthen valid_objs'_valid_tcbs')+ apply (rule setSchedulerAction_corres, simp) apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imp[where f="ethread_get a b" for a b])+ apply (wp hoare_drop_imps)[1] apply wp+ - apply (fastforce simp: valid_sched_def invs_def valid_state_def cur_tcb_def + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def st_tcb_at_tcb_at valid_sched_action_def weak_valid_sched_action_def tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]]) - apply (simp add: tcb_at_is_etcb_at) + apply (fastforce simp: tcb_at_is_etcb_at) done end diff --git a/proof/refine/ARM/StateRelation.thy b/proof/refine/ARM/StateRelation.thy index 31c0ffb48f..2734b5ba88 100644 --- a/proof/refine/ARM/StateRelation.thy +++ b/proof/refine/ARM/StateRelation.thy @@ -20,6 +20,10 @@ where lemmas cte_map_def' = cte_map_def[simplified cte_level_bits_def, simplified] +lemma cte_map_def2: + "cte_map \ \(oref, cref). oref + (of_bl cref << cte_level_bits)" + by (simp add: cte_map_def word_shift_by_n) + definition lookup_failure_map :: "ExceptionTypes_A.lookup_failure \ Fault_H.lookup_failure" where @@ -192,13 +196,20 @@ where \ tcb_bound_notification tcb = tcbBoundNotification tcb' \ tcb_mcpriority tcb = tcbMCP tcb'" +\ \ + A pair of objects @{term "(obj, obj')"} should satisfy the following relation when, under further + mild assumptions, a @{term corres_underlying} lemma for @{term "set_object obj"} + and @{term "setObject obj'"} can be stated: see setObject_other_corres in KHeap_R. + + TCBs do not satisfy this relation because the tcbSchedPrev and tcbSchedNext fields of a TCB are + used to model the ready queues, and so an update to such a field would correspond to an update + to a ready queue (see ready_queues_relation below).\ definition other_obj_relation :: "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" where "other_obj_relation obj obj' \ (case (obj, obj') of - (TCB tcb, KOTCB tcb') \ tcb_relation tcb tcb' - | (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' + (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' | (Notification ntfn, KONotification ntfn') \ ntfn_relation ntfn ntfn' | (ArchObj (ARM_A.ASIDPool pool), KOArch (KOASIDPool pool')) \ asid_pool_relation pool pool' @@ -277,6 +288,12 @@ where | "aobj_relation_cuts (PageDirectory pd) x = (\y. (x + (ucast y << 2), pde_relation y)) ` UNIV" +definition tcb_relation_cut :: "Structures_A.kernel_object \ kernel_object \ bool" where + "tcb_relation_cut obj obj' \ + case (obj, obj') of + (TCB t, KOTCB t') \ tcb_relation t t' + | _ \ False" + primrec obj_relation_cuts :: "Structures_A.kernel_object \ word32 \ obj_relation_cuts" where @@ -284,7 +301,7 @@ where (if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)})" -| "obj_relation_cuts (TCB tcb) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (TCB tcb) x = {(x, tcb_relation_cut)}" | "obj_relation_cuts (Endpoint ep) x = {(x, other_obj_relation)}" | "obj_relation_cuts (Notification ntfn) x = {(x, other_obj_relation)}" | "obj_relation_cuts (ArchObj ao) x = aobj_relation_cuts ao x" @@ -295,6 +312,7 @@ lemma obj_relation_cuts_def2: (case ko of CNode sz cs \ if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)} + | TCB tcb \ {(x, tcb_relation_cut)} | ArchObj (PageTable pt) \ (\y. (x + (ucast y << 2), pte_relation y)) ` (UNIV :: word8 set) | ArchObj (PageDirectory pd) \ (\y. (x + (ucast y << 2), pde_relation y)) @@ -309,6 +327,7 @@ lemma obj_relation_cuts_def3: "obj_relation_cuts ko x = (case (a_type ko) of ACapTable n \ {(cte_map (x, y), cte_relation y) | y. length y = n} + | ATCB \ {(x, tcb_relation_cut)} | AArch APageTable \ (\y. (x + (ucast y << 2), pte_relation y)) ` (UNIV :: word8 set) | AArch APageDirectory \ (\y. (x + (ucast y << 2), pde_relation y)) @@ -327,6 +346,7 @@ definition "is_other_obj_relation_type tp \ case tp of ACapTable n \ False + | ATCB \ False | AArch APageTable \ False | AArch APageDirectory \ False | AArch (AUserData _) \ False @@ -338,6 +358,10 @@ lemma is_other_obj_relation_type_CapTable: "\ is_other_obj_relation_type (ACapTable n)" by (simp add: is_other_obj_relation_type_def) +lemma is_other_obj_relation_type_TCB: + "\ is_other_obj_relation_type ATCB" + by (simp add: is_other_obj_relation_type_def) + lemma is_other_obj_relation_type_UserData: "\ is_other_obj_relation_type (AArch (AUserData sz))" unfolding is_other_obj_relation_type_def by simp @@ -385,11 +409,55 @@ where "sched_act_relation choose_new_thread a' = (a' = ChooseNewThread)" | "sched_act_relation (switch_thread x) a' = (a' = SwitchToThread x)" -definition - ready_queues_relation :: "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) - \ (domain \ priority \ KernelStateData_H.ready_queue) \ bool" -where - "ready_queues_relation qs qs' \ \d p. (qs d p = qs' (d, p))" +definition queue_end_valid :: "obj_ref list \ tcb_queue \ bool" where + "queue_end_valid ts q \ + (ts = [] \ tcbQueueEnd q = None) \ (ts \ [] \ tcbQueueEnd q = Some (last ts))" + +definition prev_queue_head :: "tcb_queue \ (obj_ref \ 'a) \ bool" where + "prev_queue_head q prevs \ \head. tcbQueueHead q = Some head \ prevs head = None" + +lemma prev_queue_head_heap_upd: + "\prev_queue_head q prevs; Some r \ tcbQueueHead q\ \ prev_queue_head q (prevs(r := x))" + by (clarsimp simp: prev_queue_head_def) + +definition list_queue_relation :: + "obj_ref list \ tcb_queue \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ bool" + where + "list_queue_relation ts q nexts prevs \ + heap_ls nexts (tcbQueueHead q) ts \ queue_end_valid ts q \ prev_queue_head q prevs" + +lemma list_queue_relation_nil: + "list_queue_relation ts q nexts prevs \ ts = [] \ tcbQueueEmpty q" + by (fastforce dest: heap_path_head simp: tcbQueueEmpty_def list_queue_relation_def) + +definition ready_queue_relation :: + "Deterministic_A.domain \ Structures_A.priority + \ Deterministic_A.ready_queue \ ready_queue + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (obj_ref \ bool) \ bool" + where + "ready_queue_relation d p q q' nexts prevs flag \ + list_queue_relation q q' nexts prevs + \ (\t. flag t \ t \ set q) + \ (d > maxDomain \ p > maxPriority \ tcbQueueEmpty q')" + +definition ready_queues_relation_2 :: + "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) + \ (domain \ priority \ ready_queue) + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (domain \ priority \ obj_ref \ bool) \ bool" + where + "ready_queues_relation_2 qs qs' nexts prevs inQs \ + \d p. let q = qs d p; q' = qs' (d, p); flag = inQs d p in + ready_queue_relation d p q q' nexts prevs flag" + +abbreviation ready_queues_relation :: "det_state \ kernel_state \ bool" where + "ready_queues_relation s s' \ + ready_queues_relation_2 + (ready_queues s) (ksReadyQueues s') (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (\d p. inQ d p |< tcbs_of' s')" + +lemmas ready_queues_relation_def = ready_queues_relation_2_def definition ghost_relation :: "Structures_A.kheap \ (word32 \ vmpage_size) \ (word32 \ nat) \ bool" @@ -463,6 +531,8 @@ lemma obj_relation_cutsE: \sz cs z cap cte. \ ko = CNode sz cs; well_formed_cnode_n sz cs; y = cte_map (x, z); ko' = KOCTE cte; cs z = Some cap; cap_relation cap (cteCap cte) \ \ R; + \tcb tcb'. \ y = x; ko = TCB tcb; ko' = KOTCB tcb'; tcb_relation tcb tcb' \ + \ R; \pt (z :: word8) pte'. \ ko = ArchObj (PageTable pt); y = x + (ucast z << 2); ko' = KOArch (KOPTE pte'); pte_relation_aligned z (pt z) pte' \ \ R; @@ -473,12 +543,12 @@ lemma obj_relation_cutsE: y = x + n * 2 ^ pageBits; n < 2 ^ (pageBitsForSize sz - pageBits) \ \ R; \ y = x; other_obj_relation ko ko'; is_other_obj_relation_type (a_type ko) \ \ R \ \ R" - apply (simp add: obj_relation_cuts_def2 is_other_obj_relation_type_def + apply (simp add: obj_relation_cuts_def2 is_other_obj_relation_type_def tcb_relation_cut_def a_type_def split: Structures_A.kernel_object.split_asm if_split_asm - ARM_A.arch_kernel_obj.split_asm) - apply ((clarsimp split: if_splits, - force simp: cte_relation_def pte_relation_def pde_relation_def)+)[5] + ARM_A.arch_kernel_obj.split_asm kernel_object.splits) + apply ((clarsimp split: if_splits, + force simp: cte_relation_def pte_relation_def pde_relation_def)+)[5] done lemma eq_trans_helper: @@ -554,7 +624,7 @@ where pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') - \ ready_queues_relation (ready_queues s) (ksReadyQueues s') + \ ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') @@ -576,6 +646,10 @@ lemma curthread_relation: "(a, b) \ state_relation \ ksCurThread b = cur_thread a" by (simp add: state_relation_def) +lemma curdomain_relation[elim!]: + "(s, s') \ state_relation \ cur_domain s = ksCurDomain s'" + by (clarsimp simp: state_relation_def) + lemma state_relation_pspace_relation[elim!]: "(s,s') \ state_relation \ pspace_relation (kheap s) (ksPSpace s')" by (simp add: state_relation_def) @@ -584,12 +658,24 @@ lemma state_relation_ekheap_relation[elim!]: "(s,s') \ state_relation \ ekheap_relation (ekheap s) (ksPSpace s')" by (simp add: state_relation_def) +lemma state_relation_sched_act_relation[elim!]: + "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" + by (clarsimp simp: state_relation_def) + +lemma state_relation_ready_queues_relation[elim!]: + "(s, s') \ state_relation \ ready_queues_relation s s'" + by (simp add: state_relation_def) + +lemma state_relation_idle_thread[elim!]: + "(s, s') \ state_relation \ idle_thread s = ksIdleThread s'" + by (clarsimp simp: state_relation_def) + lemma state_relationD: assumes sr: "(s, s') \ state_relation" shows "pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') \ - ready_queues_relation (ready_queues s) (ksReadyQueues s') \ + ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') \ @@ -611,7 +697,7 @@ lemma state_relationE [elim?]: and rl: "\pspace_relation (kheap s) (ksPSpace s'); ekheap_relation (ekheap s) (ksPSpace s'); sched_act_relation (scheduler_action s) (ksSchedulerAction s'); - ready_queues_relation (ready_queues s) (ksReadyQueues s'); + ready_queues_relation s s'; ghost_relation (kheap s) (gsUserPages s') (gsCNodes s'); cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s'); diff --git a/proof/refine/ARM/Syscall_R.thy b/proof/refine/ARM/Syscall_R.thy index 6f011ce0da..555faf86c9 100644 --- a/proof/refine/ARM/Syscall_R.thy +++ b/proof/refine/ARM/Syscall_R.thy @@ -328,7 +328,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -341,16 +341,14 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: lemma setDomain_corres: "corres dc - (valid_etcbs and valid_sched and tcb_at tptr) - (invs' and sch_act_simple - and tcb_at' tptr and (\s. new_dom \ maxDomain)) - (set_domain tptr new_dom) - (setDomain tptr new_dom)" + (valid_etcbs and valid_sched and tcb_at tptr and pspace_aligned and pspace_distinct) + (invs' and sch_act_simple and tcb_at' tptr and (\s. new_dom \ maxDomain)) + (set_domain tptr new_dom) (setDomain tptr new_dom)" apply (rule corres_gen_asm2) apply (simp add: set_domain_def setDomain_def thread_set_domain_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split) apply (rule ethread_set_corres; simp) apply (clarsimp simp: etcb_relation_def) @@ -359,26 +357,38 @@ lemma setDomain_corres: apply (rule corres_split) apply clarsimp apply (rule corres_when[OF refl]) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_when[OF refl]) apply (rule rescheduleRequired_corres) - apply ((wp hoare_drop_imps hoare_vcg_conj_lift | clarsimp| assumption)+)[5] - apply clarsimp - apply (rule_tac Q="\_. valid_objs' and valid_queues' and valid_queues and - (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" - in hoare_strengthen_post[rotated]) - apply (auto simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def)[1] - apply (wp threadSet_valid_objs' threadSet_valid_queues'_no_state - threadSet_valid_queues_no_state - threadSet_pred_tcb_no_state | simp)+ - apply (rule_tac Q = "\r s. invs' s \ (\p. tptr \ set (ksReadyQueues s p)) \ sch_act_simple s - \ tcb_at' tptr s" in hoare_strengthen_post[rotated]) - apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) - apply (clarsimp simp:valid_tcb'_def) - apply (drule(1) bspec) - apply (clarsimp simp:tcb_cte_cases_def) + apply (wpsimp wp: hoare_drop_imps) + apply ((wpsimp wp: hoare_drop_imps | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: gts_wp) + apply wpsimp + apply ((wpsimp wp: hoare_vcg_imp_lift' ethread_set_not_queued_valid_queues hoare_vcg_all_lift + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply (rule_tac Q="\_. valid_objs' and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct' + and (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" + in hoare_strengthen_post[rotated]) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def) + apply (wpsimp wp: threadSet_valid_objs' threadSet_sched_pointers + threadSet_valid_sched_pointers)+ + apply (rule_tac Q="\_ s. valid_queues s \ not_queued tptr s + \ pspace_aligned s \ pspace_distinct s \ valid_etcbs s + \ weak_valid_sched_action s" + in hoare_post_imp) + apply (fastforce simp: pred_tcb_at_def obj_at_def) + apply (wpsimp wp: tcb_dequeue_not_queued) + apply (rule_tac Q = "\_ s. invs' s \ obj_at' (Not \ tcbQueued) tptr s \ sch_act_simple s + \ tcb_at' tptr s" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) + apply (clarsimp simp: valid_tcb'_def obj_at'_def) + apply (drule (1) bspec) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def) apply fastforce - apply (wp hoare_vcg_all_lift Tcb_R.tcbSchedDequeue_not_in_queue)+ + apply (wp hoare_vcg_all_lift tcbSchedDequeue_not_queued)+ apply clarsimp apply (frule tcb_at_is_etcb_at) apply simp+ @@ -390,7 +400,7 @@ lemma performInvocation_corres: "\ inv_relation i i'; call \ block \ \ corres (dc \ (=)) (einvs and valid_invocation i - and simple_sched_action + and schact_is_rct and ct_active and (\s. (\w w2 b c. i = Invocations_A.InvokeEndpoint w w2 b c) \ st_tcb_at simple (cur_thread s) s)) (invs' and sch_act_simple and valid_invocation' i' and ct_active' and (\s. vs_valid_duplicates' (ksPSpace s))) @@ -440,14 +450,14 @@ lemma performInvocation_corres: apply (clarsimp simp: liftME_def) apply (rule corres_guard_imp) apply (erule invokeTCB_corres) - apply (simp)+ + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] \ \domain cap\ apply (clarsimp simp: invoke_domain_def) apply (rule corres_guard_imp) apply (rule corres_split[OF setDomain_corres]) apply (rule corres_trivial, simp) apply (wp)+ - apply (clarsimp+)[2] + apply (fastforce+)[2] \ \CNodes\ apply clarsimp apply (rule corres_guard_imp) @@ -455,7 +465,7 @@ lemma performInvocation_corres: apply assumption apply (rule corres_trivial, simp add: returnOk_def) apply wp+ - apply (clarsimp+)[2] + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) apply (rule corres_guard_imp, rule performIRQControl_corres, simp+) apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) @@ -679,7 +689,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -757,90 +767,71 @@ lemma doReply_invs[wp]: "\tcb_at' t and tcb_at' t' and cte_wp_at' (\cte. \grant. cteCap cte = ReplyCap t False grant) slot and invs' and sch_act_simple\ - doReplyTransfer t' t slot grant - \\rv. invs'\" + doReplyTransfer t' t slot grant + \\_. invs'\" apply (simp add: doReplyTransfer_def liftM_def) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_seq_ext [OF _ assert_sp]) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ gts_sp']) + apply (rule bind_wp [OF _ assert_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp, wpc) - apply (wp) + apply wp apply (wp (once) sts_invs_minor'') - apply (simp) + apply simp apply (wp (once) sts_st_tcb') - apply (wp)[1] - apply (rule_tac Q="\rv s. invs' s - \ t \ ksIdleThread s - \ st_tcb_at' awaiting_reply' t s" + apply wp + apply (rule_tac Q="\_ s. invs' s \ t \ ksIdleThread s \ st_tcb_at' awaiting_reply' t s" in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) apply (wp cteDeleteOne_reply_pred_tcb_at)+ - apply (clarsimp) + apply clarsimp apply (rule_tac Q="\_. (\s. t \ ksIdleThread s) - and cte_wp_at' (\cte. \grant. cteCap cte = capability.ReplyCap t False grant) slot" - in hoare_strengthen_post [rotated]) + and cte_wp_at' (\cte. \grant. cteCap cte + = capability.ReplyCap t False grant) slot" + in hoare_strengthen_post [rotated]) apply (fastforce simp: cte_wp_at'_def) - apply (wp) + apply wp apply (rule hoare_strengthen_post [OF doIPCTransfer_non_null_cte_wp_at']) apply (erule conjE) apply assumption apply (erule cte_wp_at_weakenE') apply (fastforce) - apply (wp sts_invs_minor'' sts_st_tcb' static_imp_wp) - apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s - \ st_tcb_at' awaiting_reply' t s - \ t \ ksIdleThread s" - in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) + apply (rule_tac Q="\_ s. invs' s \ sch_act_simple s + \ st_tcb_at' awaiting_reply' t s + \ t \ ksIdleThread s" + in hoare_post_imp) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" - in pred_tcb'_weakenE) + in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) - apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 static_imp_wp + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp | clarsimp simp add: inQ_def)+ apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and st_tcb_at' awaiting_reply' t" in hoare_strengthen_post [rotated]) - apply (clarsimp) + apply clarsimp apply (rule conjI) - apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) - apply (rule conjI) - apply clarsimp - apply (clarsimp simp: obj_at'_def idle_tcb'_def pred_tcb_at'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def + idle_tcb'_def pred_tcb_at'_def) apply clarsimp apply (rule conjI) apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) apply (erule pred_tcb'_weakenE, clarsimp) - apply (rule conjI) apply (clarsimp simp : invs'_def valid_state'_def valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) - apply (rule conjI) - apply clarsimp - apply (frule invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, clarsimp) - apply (frule (1) not_tcbQueued_not_ksQ) - apply simp - apply clarsimp apply (wp cteDeleteOne_reply_pred_tcb_at hoare_drop_imp hoare_allI)+ apply (clarsimp simp add: isReply_awaiting_reply' cte_wp_at_ctes_of) apply (auto dest!: st_tcb_idle'[rotated] simp:isCap_simps) @@ -850,35 +841,9 @@ lemma ct_active_runnable' [simp]: "ct_active' s \ ct_in_state' runnable' s" by (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) -lemma valid_irq_node_tcbSchedEnqueue[wp]: - "\\s. valid_irq_node' (irq_node' s) s \ tcbSchedEnqueue ptr - \\rv s'. valid_irq_node' (irq_node' s') s'\" - apply (rule hoare_pre) - apply (simp add:valid_irq_node'_def ) - apply (wp hoare_unless_wp hoare_vcg_all_lift | wps)+ - apply (simp add:tcbSchedEnqueue_def) - apply (wp hoare_unless_wp| simp)+ - apply (simp add:valid_irq_node'_def) - done - -lemma rescheduleRequired_valid_queues_but_ct_domain: - "\\s. Invariants_H.valid_queues s \ valid_objs' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) \ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - done - -lemma rescheduleRequired_valid_queues'_but_ct_domain: - "\\s. valid_queues' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) - \ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def)+ - done +crunches tcbSchedEnqueue + for valid_irq_node[wp]: "\s. valid_irq_node' (irq_node' s) s" + (rule: valid_irq_node_lift) lemma tcbSchedEnqueue_valid_action: "\\s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\ @@ -889,9 +854,10 @@ lemma tcbSchedEnqueue_valid_action: done abbreviation (input) "all_invs_but_sch_extra \ - \s. valid_pspace' s \ Invariants_H.valid_queues s \ + \s. valid_pspace' s \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ + sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ @@ -903,7 +869,6 @@ abbreviation (input) "all_invs_but_sch_extra \ valid_machine_state' s \ cur_tcb' s \ untyped_ranges_zero' s \ - valid_queues' s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s)" @@ -913,18 +878,13 @@ lemma rescheduleRequired_all_invs_but_extra: "\\s. all_invs_but_sch_extra s\ rescheduleRequired \\_. invs'\" apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp add:rescheduleRequired_ct_not_inQ - rescheduleRequired_sch_act' - rescheduleRequired_valid_queues_but_ct_domain - rescheduleRequired_valid_queues'_but_ct_domain - valid_irq_node_lift valid_irq_handlers_lift'' - irqs_masked_lift cur_tcb_lift) + apply (wpsimp wp: rescheduleRequired_ct_not_inQ rescheduleRequired_sch_act' + valid_irq_node_lift valid_irq_handlers_lift'') apply auto done lemma threadSet_all_invs_but_sch_extra: - shows "\ tcb_at' t and (\s. (\p. t \ set (ksReadyQueues s p))) and + shows "\ tcb_at' t and all_invs_but_sch_extra and sch_act_simple and K (ds \ maxDomain) \ threadSet (tcbDomain_update (\_. ds)) t @@ -932,7 +892,7 @@ lemma threadSet_all_invs_but_sch_extra: apply (rule hoare_gen_asm) apply (rule hoare_pre) apply (wp threadSet_valid_pspace'T_P[where P = False and Q = \ and Q' = \]) - apply (simp add:tcb_cte_cases_def)+ + apply (simp add:tcb_cte_cases_def cteSizeBits_def)+ apply (wp threadSet_valid_pspace'T_P threadSet_state_refs_of'T_P[where f'=id and P'=False and Q=\ and g'=id and Q'=\] @@ -944,13 +904,11 @@ lemma threadSet_all_invs_but_sch_extra: valid_irq_handlers_lift'' threadSet_ctes_ofT threadSet_not_inQ - threadSet_valid_queues'_no_state - threadSet_valid_queues threadSet_valid_dom_schedule' threadSet_iflive'T threadSet_ifunsafe'T - untyped_ranges_zero_lift - | simp add:tcb_cte_cases_def cteCaps_of_def o_def)+ + untyped_ranges_zero_lift threadSet_sched_pointers threadSet_valid_sched_pointers + | simp add:tcb_cte_cases_def cteSizeBits_def cteCaps_of_def o_def)+ apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift threadSet_pred_tcb_no_state | simp)+ apply (clarsimp simp:sch_act_simple_def o_def cteCaps_of_def) apply (intro conjI) @@ -971,21 +929,19 @@ lemma setDomain_invs': (\y. domain \ maxDomain))\ setDomain ptr domain \\y. invs'\" apply (simp add:setDomain_def ) - apply (wp add: hoare_when_wp static_imp_wp static_imp_conj_wp rescheduleRequired_all_invs_but_extra + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" in hoare_strengthen_post[rotated]) apply (clarsimp simp:invs'_def valid_state'_def st_tcb_at'_def[symmetric] valid_pspace'_def) - apply (erule st_tcb_ex_cap'') apply simp - apply (case_tac st,simp_all)[1] apply (rule hoare_strengthen_post[OF hoare_vcg_conj_lift]) apply (rule threadSet_all_invs_but_sch_extra) prefer 2 apply clarsimp apply assumption - apply (wp static_imp_wp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain threadSet_tcbDomain_update_ct_not_inQ | simp)+ apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s \ domain \ maxDomain @@ -997,17 +953,14 @@ lemma setDomain_invs': done lemma performInv_invs'[wp]: - "\invs' and sch_act_simple - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) - and ct_active' and valid_invocation' i\ - RetypeDecls_H.performInvocation block call i \\rv. invs'\" + "\invs' and sch_act_simple and ct_active' and valid_invocation' i\ + RetypeDecls_H.performInvocation block call i + \\_. invs'\" unfolding performInvocation_def apply (cases i) - apply ((clarsimp simp: simple_sane_strg sch_act_simple_def - ct_not_ksQ sch_act_sane_def - | wp tcbinv_invs' arch_performInvocation_invs' - setDomain_invs' - | rule conjI | erule active_ex_cap')+) + apply (clarsimp simp: simple_sane_strg sch_act_simple_def sch_act_sane_def + | wp tcbinv_invs' arch_performInvocation_invs' setDomain_invs' + | rule conjI | erule active_ex_cap')+ done lemma getSlotCap_to_refs[wp]: @@ -1088,7 +1041,7 @@ lemma lookupExtras_real_ctes[wp]: lemma lookupExtras_ctes[wp]: "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. cte_at' (snd x) s\,-" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookupExtras_real_ctes) apply (simp add: real_cte_at') done @@ -1192,20 +1145,22 @@ crunch valid_duplicates'[wp]: addToBitmap "\s. vs_valid_duplicates' (ksP (wp: setObject_ksInterrupt updateObject_default_inv) lemma tcbSchedEnqueue_valid_duplicates'[wp]: - "\\s. vs_valid_duplicates' (ksPSpace s)\ - tcbSchedEnqueue a \\rv s. vs_valid_duplicates' (ksPSpace s)\" - by (simp add:tcbSchedEnqueue_def unless_def setQueue_def | wp | wpc)+ + "tcbSchedEnqueue tcbPtr \\s. vs_valid_duplicates' (ksPSpace s)\" + by (wpsimp simp: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def setQueue_def) crunch valid_duplicates'[wp]: rescheduleRequired "\s. vs_valid_duplicates' (ksPSpace s)" (wp: setObject_ksInterrupt updateObject_default_inv) crunch valid_duplicates'[wp]: setThreadState "\s. vs_valid_duplicates' (ksPSpace s)" -(*FIXME: move to NonDetMonadVCG.valid_validE_R *) +crunches reply_from_kernel + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + lemma handleInvocation_corres: "c \ b \ corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_invocation c b) @@ -1233,11 +1188,9 @@ lemma handleInvocation_corres: apply wp[1] apply (clarsimp simp: when_def) apply (rule replyFromKernel_corres) - apply (rule corres_split[OF setThreadState_corres]) - apply simp - apply (rule corres_splitEE) - apply (rule performInvocation_corres; simp) - apply simp + apply (rule corres_split[OF setThreadState_corres], simp) + apply (rule corres_splitEE[OF performInvocation_corres]) + apply simp+ apply (rule corres_split[OF getThreadState_corres]) apply (rename_tac state state') apply (case_tac state, simp_all)[1] @@ -1248,21 +1201,17 @@ lemma handleInvocation_corres: apply simp apply (simp add: when_def) apply (rule conjI, rule impI) - apply (rule reply_from_kernel_tcb_at) + apply (wp reply_from_kernel_tcb_at) apply (rule impI, wp+) - apply simp+ - apply (wp hoare_drop_imps)+ - apply simp - apply wp - apply simp - apply (rule_tac Q="\rv. einvs and simple_sched_action and valid_invocation rve + apply (wpsimp wp: hoare_drop_imps|strengthen invs_distinct invs_psp_aligned)+ + apply (rule_tac Q="\rv. einvs and schact_is_rct and valid_invocation rve and (\s. thread = cur_thread s) and st_tcb_at active thread" in hoare_post_imp) apply (clarsimp simp: simple_from_active ct_in_state_def elim!: st_tcb_weakenE) - apply (wp sts_st_tcb_at' set_thread_state_simple_sched_action - set_thread_state_active_valid_sched) + apply (wp sts_st_tcb_at' set_thread_state_schact_is_rct + set_thread_state_active_valid_sched) apply (rule_tac Q="\rv. invs' and valid_invocation' rve' and (\s. thread = ksCurThread s) and st_tcb_at' active' thread @@ -1270,7 +1219,6 @@ lemma handleInvocation_corres: and (\s. vs_valid_duplicates' (ksPSpace s))" in hoare_post_imp) apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (clarsimp) apply (wp setThreadState_nonqueued_state_update setThreadState_st_tcb setThreadState_rct)[1] @@ -1280,19 +1228,19 @@ lemma handleInvocation_corres: | rule hoare_vcg_E_elim)+ apply (clarsimp simp: tcb_at_invs invs_valid_objs valid_tcb_state_def ct_in_state_def - simple_from_active invs_mdb) - apply (clarsimp simp: msg_max_length_def word_bits_def) + simple_from_active invs_mdb + invs_distinct invs_psp_aligned) + apply (clarsimp simp: msg_max_length_def word_bits_def schact_is_rct_def) apply (erule st_tcb_ex_cap, clarsimp+) apply fastforce apply (clarsimp) apply (frule tcb_at_invs') apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) apply (frule pred_tcb'_weakenE [where P=active' and P'=simple'], clarsimp) apply (frule(1) st_tcb_ex_cap'', fastforce) apply (clarsimp simp: valid_pspace'_def) - apply (frule(1) st_tcb_at_idle_thread') + apply (frule (1) st_tcb_at_idle_thread') apply (simp) done @@ -1332,11 +1280,11 @@ lemma hinv_invs'[wp]: apply (simp add: handleInvocation_def split_def ts_Restart_case_helper') apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply simp apply (intro conjI impI) apply (wp gts_imp' | simp)+ - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R[rotated]) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R[rotated]) apply clarsimp apply (subgoal_tac "thread \ ksIdleThread s", simp_all)[1] apply (fastforce elim!: pred_tcb'_weakenE st_tcb_ex_cap'') @@ -1349,11 +1297,8 @@ lemma hinv_invs'[wp]: and st_tcb_at' active' thread" in hoare_post_imp) apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) - apply (clarsimp) apply (wp sts_invs_minor' setThreadState_st_tcb setThreadState_rct | simp)+ apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (fastforce simp add: tcb_at_invs' ct_in_state'_def simple_sane_strg sch_act_simple_def @@ -1362,12 +1307,13 @@ lemma hinv_invs'[wp]: done crunch typ_at'[wp]: handleFault "\s. P (typ_at' T p s)" + (wp: crunch_wps) lemmas handleFault_typ_ats[wp] = typ_at_lifts [OF handleFault_typ_at'] lemma handleSend_corres: "corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_send blocking) (handleSend blocking)" @@ -1453,7 +1399,7 @@ lemma cteDeleteOne_reply_cap_to''[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte) \ isNullCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -1497,7 +1443,6 @@ lemma handleRecv_isBlocking_corres': and (\s. ex_nonz_cap_to (cur_thread s) s)) (invs' and ct_in_state' simple' and sch_act_sane - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ex_nonz_cap_to' (ksCurThread s) s)) (handle_recv isBlocking) (handleRecv isBlocking)" (is "corres dc (?pre1) (?pre2) (handle_recv _) (handleRecv _)") @@ -1560,8 +1505,7 @@ lemma handleRecv_isBlocking_corres': lemma handleRecv_isBlocking_corres: "corres dc (einvs and ct_active) - (invs' and ct_active' and sch_act_sane and - (\s. \p. ksCurThread s \ set (ksReadyQueues s p))) + (invs' and ct_active' and sch_act_sane) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp) apply (rule handleRecv_isBlocking_corres') @@ -1576,49 +1520,34 @@ lemma lookupCap_refs[wp]: "\invs'\ lookupCap t ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" by (simp add: lookupCap_def split_def | wp | simp add: o_def)+ -lemma deleteCallerCap_ksQ_ct': - "\invs' and ct_in_state' simple' and sch_act_sane and - (\s. ksCurThread s \ set (ksReadyQueues s p) \ thread = ksCurThread s)\ - deleteCallerCap thread - \\rv s. thread \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv s. thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p)" - in hoare_strengthen_post) - apply (wp deleteCallerCap_ct_not_ksQ) - apply auto - done - lemma hw_invs'[wp]: "\invs' and ct_in_state' simple' and sch_act_sane and (\s. ex_nonz_cap_to' (ksCurThread s) s) - and (\s. ksCurThread s \ ksIdleThread s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ + and (\s. ksCurThread s \ ksIdleThread s)\ handleRecv isBlocking \\r. invs'\" apply (simp add: handleRecv_def cong: if_cong) apply (rule hoare_pre) apply ((wp getNotification_wp | wpc | simp)+)[1] apply (clarsimp simp: ct_in_state'_def) apply ((wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp)+)[1] apply simp apply (wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp add: ct_in_state'_def whenE_def split del: if_split)+ apply (rule validE_validE_R) apply (rule_tac Q="\rv s. invs' s \ sch_act_sane s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)) \ thread = ksCurThread s \ ct_in_state' simple' s \ ex_nonz_cap_to' thread s \ thread \ ksIdleThread s \ (\x \ zobj_refs' rv. ex_nonz_cap_to' x s)" and E="\_ _. True" - in hoare_post_impErr[rotated]) + in hoare_strengthen_postE[rotated]) apply (clarsimp simp: isCap_simps ct_in_state'_def pred_tcb_at' invs_valid_objs' sch_act_sane_not obj_at'_def projectKOs pred_tcb_at'_def) apply (assumption) @@ -1635,34 +1564,45 @@ lemma setSchedulerAction_obj_at'[wp]: by (wp, clarsimp elim!: obj_at'_pspaceI) lemma handleYield_corres: - "corres dc einvs (invs' and ct_active' and (\s. ksSchedulerAction s = ResumeCurrentThread)) handle_yield handleYield" + "corres dc + (einvs and ct_active) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + handle_yield handleYield" apply (clarsimp simp: handle_yield_def handleYield_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp - apply (rule corres_split[OF tcbSchedDequeue_corres]) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues | simp add: )+ - apply (simp add: invs_def valid_sched_def valid_sched_action_def - cur_tcb_def tcb_at_is_etcb_at) - apply clarsimp - apply (frule ct_active_runnable') - apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def + apply (wpsimp wp: weak_sch_act_wf_lift_linear + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+ + apply (simp add: invs_def valid_sched_def valid_sched_action_def cur_tcb_def + tcb_at_is_etcb_at valid_state_def valid_pspace_def ct_in_state_def + runnable_eq_active) + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def) - apply (erule(1) valid_objs_valid_tcbE[OF valid_pspace_valid_objs']) - apply (simp add:valid_tcb'_def) + done + +lemma tcbSchedAppend_ct_in_state'[wp]: + "tcbSchedAppend t \ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) done lemma hy_invs': "\invs' and ct_active'\ handleYield \\r. invs' and ct_active'\" apply (simp add: handleYield_def) - apply (wp ct_in_state_thread_state_lift' - rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' | simp)+ - apply (clarsimp simp add: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def - valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def - ) + apply (wpsimp wp: ct_in_state_thread_state_lift' rescheduleRequired_all_invs_but_ct_not_inQ) + apply (rule_tac Q="\_. all_invs_but_ct_not_inQ' and ct_active'" in hoare_post_imp) + apply clarsimp + apply (subst pred_conj_def) + apply (rule hoare_vcg_conj_lift) + apply (rule tcbSchedAppend_all_invs_but_ct_not_inQ') + apply wpsimp + apply wpsimp + apply wpsimp apply (simp add:ct_active_runnable'[unfolded ct_in_state'_def]) done @@ -1767,7 +1707,7 @@ lemmas cteDeleteOne_st_tcb_at_simple'[wp] = cteDeleteOne_st_tcb_at[where P=simple', simplified] crunch st_tcb_at_simple'[wp]: handleReply "st_tcb_at' simple' t'" - (wp: hoare_post_taut crunch_wps sts_st_tcb_at'_cases + (wp: hoare_TrueI crunch_wps sts_st_tcb_at'_cases threadSet_pred_tcb_no_state ignore: setThreadState) @@ -1793,18 +1733,17 @@ lemma hr_ct_active'[wp]: "\invs' and ct_active'\ handleReply \\rv. ct_active'\" apply (simp add: handleReply_def getSlotCap_def getCurThread_def getThreadCallerSlot_def locateSlot_conv) - apply (rule hoare_seq_ext) - apply (rule_tac t=thread in ct_in_state'_decomp) - apply ((wp hoare_drop_imps | wpc | simp)+)[1] - apply (subst haskell_assert_def) - apply (wp hoare_vcg_all_lift getCTE_wp doReplyTransfer_st_tcb_at_active - | wpc | simp)+ + apply (rule bind_wp, rename_tac cur_thread) + apply (rule_tac t=cur_thread in ct_in_state'_decomp) + apply (wpsimp wp: getCTE_wp) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (wpsimp wp: getCTE_wp doReplyTransfer_st_tcb_at_active)+ apply (fastforce simp: ct_in_state'_def cte_wp_at_ctes_of valid_cap'_def - dest: ctes_of_valid') + dest: ctes_of_valid') done lemma handleCall_corres: - "corres (dc \ dc) (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + "corres (dc \ dc) (einvs and schact_is_rct and ct_active) (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') @@ -1866,7 +1805,7 @@ lemma handleReply_sane: "\sch_act_sane\ handleReply \\rv. sch_act_sane\" apply (simp add: handleReply_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) apply (rule hoare_pre) - apply (wp haskell_assert_wp doReplyTransfer_sane getCTE_wp'| wpc)+ + apply (wp doReplyTransfer_sane getCTE_wp'| wpc)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -1882,74 +1821,6 @@ lemma handleReply_nonz_cap_to_ct: crunch ksQ[wp]: handleFaultReply "\s. P (ksReadyQueues s p)" -lemma doReplyTransfer_ct_not_ksQ: - "\ invs' and sch_act_simple - and tcb_at' thread and tcb_at' word - and ct_in_state' simple' - and (\s. ksCurThread s \ word) - and (\s. \p. ksCurThread s \ set(ksReadyQueues s p))\ - doReplyTransfer thread word callerSlot g - \\rv s. \p. ksCurThread s \ set(ksReadyQueues s p)\" -proof - - have astct: "\t p. - \(\s. ksCurThread s \ set(ksReadyQueues s p) \ sch_act_sane s) - and (\s. ksCurThread s \ t)\ - possibleSwitchTo t \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps possibleSwitchTo_ct') - apply (wp possibleSwitchTo_ksQ') - apply (clarsimp simp: sch_act_sane_def) - done - have stsct: "\t st p. - \(\s. ksCurThread s \ set(ksReadyQueues s p)) and sch_act_simple\ - setThreadState st t - \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps setThreadState_ct') - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - show ?thesis - apply (simp add: doReplyTransfer_def) - apply (wp, wpc) - apply (wp astct stsct hoare_vcg_all_lift - cteDeleteOne_ct_not_ksQ hoare_drop_imp - hoare_lift_Pf2 [OF cteDeleteOne_sch_act_not cteDeleteOne_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_pred_tcb_at' doIPCTransfer_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_ksQ doIPCTransfer_ct'] - hoare_lift_Pf2 [OF threadSet_ksQ threadSet_ct] - hoare_lift_Pf2 [OF handleFaultReply_ksQ handleFaultReply_ct'] - | simp add: ct_in_state'_def)+ - apply (fastforce simp: sch_act_simple_def sch_act_sane_def ct_in_state'_def)+ - done -qed - -lemma handleReply_ct_not_ksQ: - "\invs' and sch_act_simple - and ct_in_state' simple' - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ - handleReply - \\rv s. \p. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: handleReply_def del: split_paired_All) - apply (subst haskell_assert_def) - apply (wp | wpc)+ - apply (wp doReplyTransfer_ct_not_ksQ getThreadCallerSlot_inv)+ - apply (rule_tac Q="\cap. - (\s. \p. ksCurThread s \ set(ksReadyQueues s p)) - and invs' - and sch_act_simple - and (\s. thread = ksCurThread s) - and tcb_at' thread - and ct_in_state' simple' - and cte_wp_at' (\c. cteCap c = cap) callerSlot" - in hoare_post_imp) - apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def - cte_wp_at_ctes_of valid_cap'_def - dest!: ctes_of_valid') - apply (wp getSlotCap_cte_wp_at getThreadCallerSlot_inv)+ - apply (clarsimp) - done - crunches possible_switch_to, handle_recv for valid_etcbs[wp]: "valid_etcbs" (wp: crunch_wps simp: crunch_simps) @@ -1963,11 +1834,10 @@ lemma handleReply_handleRecv_corres: apply (rule corres_split_nor[OF handleReply_corres]) apply (rule handleRecv_isBlocking_corres') apply (wp handle_reply_nonz_cap_to_ct handleReply_sane - handleReply_nonz_cap_to_ct handleReply_ct_not_ksQ handle_reply_valid_sched)+ + handleReply_nonz_cap_to_ct handle_reply_valid_sched)+ apply (fastforce simp: ct_in_state_def ct_in_state'_def simple_sane_strg elim!: st_tcb_weakenE st_tcb_ex_cap') apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (fastforce elim: pred_tcb'_weakenE) done @@ -1975,7 +1845,6 @@ lemma handleHypervisorFault_corres: "corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread and (%_. valid_fault f)) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_hypervisor_fault w fault) (handleHypervisorFault w fault)" @@ -1985,7 +1854,7 @@ lemma handleHypervisorFault_corres: (* FIXME: move *) lemma handleEvent_corres: "corres (dc \ dc) (einvs and (\s. event \ Interrupt \ ct_running s) and - (\s. scheduler_action s = resume_cur_thread)) + schact_is_rct) (invs' and (\s. event \ Interrupt \ ct_running' s) and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread)) @@ -1993,14 +1862,13 @@ lemma handleEvent_corres: (is "?handleEvent_corres") proof - have hw: - "\isBlocking. corres dc (einvs and ct_running and (\s. scheduler_action s = resume_cur_thread)) + "\isBlocking. corres dc (einvs and ct_running and schact_is_rct) (invs' and ct_running' and (\s. ksSchedulerAction s = ResumeCurrentThread)) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp [OF handleRecv_isBlocking_corres]) apply (clarsimp simp: ct_in_state_def ct_in_state'_def - elim!: st_tcb_weakenE pred_tcb'_weakenE - dest!: ct_not_ksQ)+ + elim!: st_tcb_weakenE pred_tcb'_weakenE)+ done show ?thesis apply (case_tac event) @@ -2015,7 +1883,7 @@ proof - corres_guard_imp[OF handleCall_corres] corres_guard_imp[OF handleYield_corres] active_from_running active_from_running' - simp: simple_sane_strg)[8] + simp: simple_sane_strg schact_is_rct_def)[8] apply (rule corres_underlying_split) apply (rule corres_guard_imp[OF getCurThread_corres], simp+) apply (rule handleFault_corres) @@ -2026,7 +1894,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2039,12 +1906,11 @@ proof - simp: ct_in_state_def valid_fault_def) apply wp apply clarsimp - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] apply (rule corres_guard_imp) - apply (rule corres_split_eqr[where R="\rv. einvs" + apply (rule corres_split_eqr[where R="\_. einvs" and R'="\rv s. \x. rv = Some x \ R'' x s" for R'']) apply (rule corres_machine_op) @@ -2054,10 +1920,7 @@ proof - apply (rule handleInterrupt_corres) apply (wp hoare_vcg_all_lift doMachineOp_getActiveIRQ_IRQ_active' - | simp | simp add: imp_conjR | wp (once) hoare_drop_imps)+ - apply force - apply simp apply (simp add: invs'_def valid_state'_def) apply (rule_tac corres_underlying_split) apply (rule corres_guard_imp, rule getCurThread_corres, simp+) @@ -2073,7 +1936,6 @@ proof - apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (fastforce simp: simple_sane_strg sch_act_simple_def ct_in_state'_def elim: st_tcb_ex_cap'' pred_tcb'_weakenE) apply (rule corres_underlying_split) @@ -2085,7 +1947,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2162,10 +2023,8 @@ proof - apply (rename_tac syscall) apply (case_tac syscall, (wp handleReply_sane handleReply_nonz_cap_to_ct handleReply_ksCurThread - handleReply_ct_not_ksQ | clarsimp simp: active_from_running' simple_from_running' simple_sane_strg simp del: split_paired_All | rule conjI active_ex_cap' - | drule ct_not_ksQ[rotated] | strengthen nidle)+) apply (rule hoare_strengthen_post, rule hoare_weaken_pre, @@ -2177,7 +2036,6 @@ proof - | erule pred_tcb'_weakenE st_tcb_ex_cap'' | clarsimp simp: tcb_at_invs ct_in_state'_def simple_sane_strg sch_act_simple_def | drule st_tcb_at_idle_thread' - | drule ct_not_ksQ[rotated] | wpc | wp (once) hoare_drop_imps)+ done qed diff --git a/proof/refine/ARM/TcbAcc_R.thy b/proof/refine/ARM/TcbAcc_R.thy index b8fbd52f9a..3d51e5b535 100644 --- a/proof/refine/ARM/TcbAcc_R.thy +++ b/proof/refine/ARM/TcbAcc_R.thy @@ -11,10 +11,8 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] -declare empty_fail_sequence_x[simp] declare storeWordUser_typ_at' [wp] (* Auxiliaries and basic properties of priority bitmap functions *) @@ -51,7 +49,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -60,10 +58,8 @@ lemma getHighestPrio_inv[wp]: unfolding bitmap_fun_defs by simp lemma valid_bitmapQ_bitmapQ_simp: - "\ valid_bitmapQ s \ \ - bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_def - by simp + "valid_bitmapQ s \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (simp add: valid_bitmapQ_def) lemma prioToL1Index_l1IndexToPrio_or_id: "\ unat (w'::priority) < 2 ^ wordRadix ; w < size w' \ @@ -86,54 +82,156 @@ lemma l1IndexToPrio_wordRadix_mask[simp]: unfolding l1IndexToPrio_def by (simp add: wordRadix_def') -definition - (* when in the middle of updates, a particular queue might not be entirely valid *) - valid_queues_no_bitmap_except :: "word32 \ kernel_state \ bool" -where - "valid_queues_no_bitmap_except t' \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). t \ t' \ obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - -lemma valid_queues_no_bitmap_exceptI[intro]: - "valid_queues_no_bitmap s \ valid_queues_no_bitmap_except t s" - unfolding valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def - by simp - lemma st_tcb_at_coerce_abstract: assumes t: "st_tcb_at' P t c" assumes sr: "(a, c) \ state_relation" shows "st_tcb_at (\st. \st'. thread_state_relation st st' \ P st') t a" using assms apply (clarsimp simp: state_relation_def pred_tcb_at'_def obj_at'_def - projectKOs objBits_simps) - apply (erule(1) pspace_dom_relatedE) - apply (erule(1) obj_relation_cutsE, simp_all) - apply (clarsimp simp: st_tcb_at_def obj_at_def other_obj_relation_def - tcb_relation_def - split: Structures_A.kernel_object.split_asm if_split_asm - ARM_A.arch_kernel_obj.split_asm)+ - apply fastforce + projectKOs) + apply (erule (1) pspace_dom_relatedE) + apply (erule (1) obj_relation_cutsE, simp_all) + by (fastforce simp: st_tcb_at_def obj_at_def other_obj_relation_def tcb_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm)+ + +lemma st_tcb_at_runnable_coerce_concrete: + assumes t: "st_tcb_at runnable t a" + assumes sr: "(a, c) \ state_relation" + assumes tcb: "tcb_at' t c" + shows "st_tcb_at' runnable' t c" + using t + apply - + apply (rule ccontr) + apply (drule pred_tcb_at'_Not[THEN iffD2, OF conjI, OF tcb]) + apply (drule st_tcb_at_coerce_abstract[OF _ sr]) + apply (clarsimp simp: st_tcb_def2) + apply (case_tac "tcb_state tcb"; simp) done -lemma valid_objs_valid_tcbE: "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" +lemma pspace_relation_tcb_at': + assumes p: "pspace_relation (kheap a) (ksPSpace c)" + assumes t: "tcb_at t a" + assumes aligned: "pspace_aligned' c" + assumes distinct: "pspace_distinct' c" + shows "tcb_at' t c" + using assms + apply (clarsimp simp: obj_at_def) + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: is_tcb tcb_relation_cut_def) + apply (simp split: kernel_object.split_asm) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb], simp) + apply (erule obj_at'_weakenE) + apply simp + done + +lemma tcb_at_cross: + "\tcb_at t s; pspace_aligned s; pspace_distinct s; pspace_relation (kheap s) (ksPSpace s')\ + \ tcb_at' t s'" + apply (drule (2) pspace_distinct_cross) + apply (drule (1) pspace_aligned_cross) + apply (erule (3) pspace_relation_tcb_at') + done + +lemma tcb_at'_cross: + assumes p: "pspace_relation (kheap s) (ksPSpace s')" + assumes t: "tcb_at' ptr s'" + shows "tcb_at ptr s" + using assms + apply (clarsimp simp: obj_at'_def) + apply (erule (1) pspace_dom_relatedE) + by (clarsimp simp: obj_relation_cuts_def2 obj_at_def cte_relation_def + other_obj_relation_def pte_relation_def pde_relation_def is_tcb_def projectKOs + split: Structures_A.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) + +lemma st_tcb_at_runnable_cross: + "\ st_tcb_at runnable t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ st_tcb_at' runnable' t s'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (prop_tac "tcb_at t s", clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (drule (2) tcb_at_cross, fastforce simp: state_relation_def) + apply (erule (2) st_tcb_at_runnable_coerce_concrete) + done + +lemma cur_tcb_cross: + "\ cur_tcb s; pspace_aligned s; pspace_distinct s; (s,s') \ state_relation \ \ cur_tcb' s'" + apply (clarsimp simp: cur_tcb'_def cur_tcb_def state_relation_def) + apply (erule (3) tcb_at_cross) + done + +lemma valid_objs_valid_tcbE: + "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" apply (clarsimp simp add: projectKOs valid_objs'_def ran_def typ_at'_def ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) apply (fastforce simp: projectKO_def projectKO_opt_tcb return_def valid_tcb'_def) done -lemma valid_objs'_maxDomain: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) +lemma valid_tcb'_tcbDomain_update: + "new_dom \ maxDomain \ + \tcb. valid_tcb' tcb s \ valid_tcb' (tcbDomain_update (\_. new_dom) tcb) s" + unfolding valid_tcb'_def + apply (clarsimp simp: tcb_cte_cases_def objBits_simps') + done + +lemma valid_tcb'_tcbState_update: + "\valid_tcb_state' st s; valid_tcb' tcb s\ \ + valid_tcb' (tcbState_update (\_. st) tcb) s" + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def objBits_simps') + done + +definition valid_tcbs' :: "kernel_state \ bool" where + "valid_tcbs' s' \ \ptr tcb. ksPSpace s' ptr = Some (KOTCB tcb) \ valid_tcb' tcb s'" + +lemma valid_objs'_valid_tcbs'[elim!]: + "valid_objs' s \ valid_tcbs' s" + by (auto simp: valid_objs'_def valid_tcbs'_def valid_obj'_def split: kernel_object.splits) + +lemma invs'_valid_tcbs'[elim!]: + "invs' s \ valid_tcbs' s" + by (fastforce intro: valid_objs'_valid_tcbs') + +lemma valid_tcbs'_maxDomain: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def projectKOs) done -lemma valid_objs'_maxPriority: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) +lemmas valid_objs'_maxDomain = valid_tcbs'_maxDomain[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_maxPriority: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def projectKOs) done +lemmas valid_objs'_maxPriority = valid_tcbs'_maxPriority[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_obj_at': + assumes "valid_tcbs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms + apply (clarsimp simp add: valid_tcbs'_def ran_def typ_at'_def + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def projectKOs) + done + +lemma update_valid_tcb'[simp]: + "\f. valid_tcb' tcb (ksReadyQueuesL1Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueuesL2Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueues_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksSchedulerAction_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksDomainTime_update f s) = valid_tcb' tcb s" + by (auto simp: valid_tcb'_def valid_tcb_state'_def valid_bound_tcb'_def valid_bound_ntfn'_def + split: option.splits thread_state.splits) + +lemma update_valid_tcbs'[simp]: + "\f. valid_tcbs' (ksReadyQueuesL1Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueuesL2Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueues_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksSchedulerAction_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksDomainTime_update f s) = valid_tcbs' s" + by (simp_all add: valid_tcbs'_def) + lemma doMachineOp_irq_states': assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" shows "\valid_irq_states'\ doMachineOp f \\rv. valid_irq_states'\" @@ -215,7 +313,7 @@ lemma preemptionPoint_irq [wp]: "\valid_irq_states'\ preemptionPoint -, \\irq s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" apply (simp add: preemptionPoint_def setWorkUnits_def modifyWorkUnits_def getWorkUnits_def) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (rule hoare_post_imp) prefer 2 apply (rule doMachineOp_getActiveIRQ_IRQ_active) @@ -231,56 +329,117 @@ lemma updateObject_tcb_inv: by simp (rule updateObject_default_inv) lemma setObject_update_TCB_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'" + assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'" + assumes sched_pointers: "tcbSchedPrev new_tcb' = tcbSchedPrev tcb'" + "tcbSchedNext new_tcb' = tcbSchedNext tcb'" + assumes flag: "tcbQueued new_tcb' = tcbQueued tcb'" assumes r: "r () ()" - assumes exst: "exst_same tcb' tcbu'" - shows "corres r (ko_at (TCB tcb) add) - (ko_at' tcb' add) - (set_object add (TCB tcbu)) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' tcbu'" in corres_req) + assumes exst: "exst_same tcb' new_tcb'" + shows + "corres r + (ko_at (TCB tcb) ptr) (ko_at' tcb' ptr) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" + apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' new_tcb'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) - apply (clarsimp simp: projectKOs other_obj_relation_def exst) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule setObject_other_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - projectKOs objBits_simps' - other_obj_relation_def tcbs r)+ - apply (fastforce elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: projectKOs obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp + apply (clarsimp simp: tcb_relation_cut_def exst projectKOs) + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp: obj_at'_def) + apply (unfold set_object_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def projectKOs obj_at_def + updateObject_default_def in_magnitude_check obj_at'_def) + apply (rename_tac s s' t') + apply (prop_tac "t' = s'") + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (drule singleton_in_magnitude_check) + apply (prop_tac "map_to_ctes ((ksPSpace s') (ptr \ injectKO new_tcb')) + = map_to_ctes (ksPSpace s')") + apply (frule_tac tcb=new_tcb' and tcb=tcb' in map_to_ctes_upd_tcb) + apply (clarsimp simp: objBits_simps) + apply (clarsimp simp: objBits_simps ps_clear_def3 field_simps objBits_defs mask_def) + apply (insert tables')[1] + apply (rule ext) + apply (clarsimp split: if_splits) + apply blast + apply (prop_tac "obj_at (same_caps (TCB new_tcb)) ptr s") + using tables + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def assms) + apply (clarsimp simp add: state_relation_def) + apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply clarsimp + apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply clarsimp + apply (rule conjI) + apply (simp only: pspace_relation_def simp_thms + pspace_dom_update[where x="kernel_object.TCB _" + and v="kernel_object.TCB _", + simplified a_type_def, simplified]) + apply (rule conjI) + using assms + apply (simp only: dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: tcb_relation_cut_def split: if_split_asm kernel_object.split_asm) + apply (rename_tac aa ba) + apply (drule_tac x="(aa, ba)" in bspec, simp) + apply clarsimp + apply (frule_tac ko'="kernel_object.TCB tcb" and x'=ptr in obj_relation_cut_same_type) + apply (simp add: tcb_relation_cut_def)+ + apply clarsimp + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule (1) bspec) + apply (insert exst) + apply (clarsimp simp: etcb_relation_def exst_same_def) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (insert sched_pointers flag exst) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext new_tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev new_tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def) + apply (clarsimp simp: ready_queue_relation_def opt_pred_def opt_map_def exst_same_def + inQ_def projectKOs + split: option.splits) + apply (metis (mono_tags, opaque_lifting)) + apply (clarsimp simp: fun_upd_def caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def) done lemma setObject_update_TCB_corres: - "\ tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'; - \(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb; - \(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'; - r () (); exst_same tcb' tcbu'\ - \ corres r (\s. get_tcb add s = Some tcb) - (\s'. (tcb', s') \ fst (getObject add s')) - (set_object add (TCB tcbu)) (setObject add tcbu')" + "\tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'; + \(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb; + \(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'; + tcbSchedPrev new_tcb' = tcbSchedPrev tcb'; tcbSchedNext new_tcb' = tcbSchedNext tcb'; + tcbQueued new_tcb' = tcbQueued tcb'; exst_same tcb' new_tcb'; + r () ()\ \ + corres r + (\s. get_tcb ptr s = Some tcb) (\s'. (tcb', s') \ fst (getObject ptr s')) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" apply (rule corres_guard_imp) - apply (erule (3) setObject_update_TCB_corres', force) - apply fastforce - apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def - loadObject_default_def projectKOs objBits_simps' - in_magnitude_check) + apply (erule (7) setObject_update_TCB_corres') + apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def projectKOs + loadObject_default_def objBits_simps' in_magnitude_check)+ done lemma getObject_TCB_corres: - "corres tcb_relation (tcb_at t) (tcb_at' t) + "corres tcb_relation (tcb_at t and pspace_aligned and pspace_distinct) \ (gets_the (get_tcb t)) (getObject t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) apply (rule corres_guard_imp) apply (rule corres_gets_the) apply (rule corres_get_tcb) @@ -290,7 +449,8 @@ lemma getObject_TCB_corres: lemma threadGet_corres: assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ r (f tcb) (f' tcb')" - shows "corres r (tcb_at t) (tcb_at' t) (thread_get f t) (threadGet f' t)" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get f t) (threadGet f' t)" apply (simp add: thread_get_def threadGet_def) apply (fold liftM_def) apply simp @@ -312,7 +472,8 @@ lemma ball_tcb_cte_casesI: by (simp add: tcb_cte_cases_def) lemma all_tcbI: - "\ \a b c d e f g h i j k l m n p q. P (Thread a b c d e f g h i j k l m n p q) \ \ \tcb. P tcb" + "\ \a b c d e f g h i j k l m n p q r s. P (Thread a b c d e f g h i j k l m n p q r s) \ + \ \tcb. P tcb" by (rule allI, case_tac tcb, simp) lemma threadset_corresT: @@ -321,18 +482,24 @@ lemma threadset_corresT: assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes sched_pointers: "\tcb. tcbSchedPrev (f' tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (f' tcb) = tcbSchedNext tcb" + assumes flag: "\tcb. tcbQueued (f' tcb) = tcbQueued tcb" assumes e: "\tcb'. exst_same tcb' (f' tcb')" - shows "corres dc (tcb_at t) - (tcb_at' t) - (thread_set f t) (threadSet f' t)" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ + (thread_set f t) (threadSet f' t)" apply (simp add: thread_set_def threadSet_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getObject_TCB_corres]) apply (rule setObject_update_TCB_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce + apply (erule x) + apply (rule y) + apply (clarsimp simp: bspec_split [OF spec [OF z]]) + apply fastforce + apply (rule sched_pointers) + apply (rule sched_pointers) + apply (rule flag) apply simp apply (rule e) apply wp+ @@ -362,18 +529,20 @@ lemma threadSet_corres_noopT: tcb_relation tcb (fn tcb')" assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (fn tcb) = getF tcb" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" - shows "corres dc \ (tcb_at' t) - (return v) (threadSet fn t)" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (return v) (threadSet fn t)" proof - have S: "\t s. tcb_at t s \ return v s = (thread_set id t >>= (\x. return v)) s" apply (clarsimp simp: tcb_at_def) - apply (simp add: return_def thread_set_def gets_the_def + apply (simp add: return_def thread_set_def gets_the_def assert_def assert_opt_def simpler_gets_def set_object_def get_object_def - put_def get_def bind_def assert_def a_type_def[split_simps kernel_object.split arch_kernel_obj.split]) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s", simp) - apply (simp add: map_upd_triv get_tcb_SomeD) - apply (simp add: get_tcb_SomeD map_upd_triv) + put_def get_def bind_def) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) + apply (simp add: map_upd_triv get_tcb_SomeD)+ done show ?thesis apply (rule stronger_corres_guard_imp) @@ -381,16 +550,17 @@ proof - defer apply (subst bind_return [symmetric], rule corres_underlying_split [OF threadset_corresT]) - apply (simp add: x) - apply simp - apply (rule y) + apply (simp add: x) + apply simp + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule corres_noop [where P=\ and P'=\]) - apply wpsimp+ - apply (erule pspace_relation_tcb_at[rotated]) - apply clarsimp - apply simp - apply simp + apply simp + apply (rule no_fail_pre, wpsimp+)[1] + apply wpsimp+ done qed @@ -404,14 +574,20 @@ lemma threadSet_corres_noop_splitT: getF (fn tcb) = getF tcb" assumes z: "corres r P Q' m m'" assumes w: "\P'\ threadSet fn t \\x. Q'\" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" - shows "corres r P (tcb_at' t and P') + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct and P) P' m (threadSet fn t >>= (\rv. m'))" apply (rule corres_guard_imp) apply (subst return_bind[symmetric]) apply (rule corres_split_nor[OF threadSet_corres_noopT]) - apply (simp add: x) - apply (rule y) + apply (simp add: x) + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule z) apply (wp w)+ @@ -645,16 +821,23 @@ lemma threadSet_valid_pspace'T_P: assumes v: "\tcb. (P \ Q' (tcbBoundNotification tcb)) \ (\s. valid_bound_ntfn' (tcbBoundNotification tcb) s \ valid_bound_ntfn' (tcbBoundNotification (F tcb)) s)" - + assumes p: "\tcb. (P \ Q'' (tcbSchedPrev tcb)) \ + (\s. none_top tcb_at' (tcbSchedPrev tcb) s + \ none_top tcb_at' (tcbSchedPrev (F tcb)) s)" + assumes n: "\tcb. (P \ Q''' (tcbSchedNext tcb)) \ + (\s. none_top tcb_at' (tcbSchedNext tcb) s + \ none_top tcb_at' (tcbSchedNext (F tcb)) s)" assumes y: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" assumes u: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" assumes w: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" assumes w': "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" shows - "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s)\ - threadSet F t - \\rv. valid_pspace'\" + "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s + \ obj_at' (\tcb. Q'' (tcbSchedPrev tcb)) t s + \ obj_at' (\tcb. Q''' (tcbSchedNext tcb)) t s)\ + threadSet F t + \\_. valid_pspace'\" apply (simp add: valid_pspace'_def threadSet_def) apply (rule hoare_pre, wp setObject_tcb_valid_objs getObject_tcb_wp) @@ -662,7 +845,7 @@ lemma threadSet_valid_pspace'T_P: apply (erule(1) valid_objsE') apply (clarsimp simp add: valid_obj'_def valid_tcb'_def bspec_split [OF spec [OF x]] z - split_paired_Ball y u w v w') + split_paired_Ball y u w v w' p n) done lemmas threadSet_valid_pspace'T = @@ -736,6 +919,10 @@ lemma threadSet_iflive'T: \ tcbState (F tcb) \ Inactive \ tcbState (F tcb) \ IdleThreadState \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedNext tcb = None \ tcbSchedNext (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedPrev tcb = None \ tcbSchedPrev (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb) \ ko_at' tcb t s) \ ex_nonz_cap_to' t s)\ threadSet F t @@ -743,8 +930,7 @@ lemma threadSet_iflive'T: apply (simp add: threadSet_def) apply (wp setObject_tcb_iflive' getObject_tcb_wp) apply (clarsimp simp: obj_at'_def projectKOs) - apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric]) - apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric]) + apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric])+ apply (rule conjI) apply (rule impI, clarsimp) apply (erule if_live_then_nonz_capE') @@ -760,7 +946,7 @@ lemma threadSet_cte_wp_at'T: getF (F tcb) = getF tcb" shows "\\s. P' (cte_wp_at' P p s)\ threadSet F t \\rv s. P' (cte_wp_at' P p s)\" apply (simp add: threadSet_def) - apply (rule hoare_seq_ext [where B="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) + apply (rule bind_wp [where Q'="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) apply (rule setObject_cte_wp_at2') apply (clarsimp simp: updateObject_default_def projectKOs in_monad obj_at'_def objBits_simps' in_magnitude_check prod_eq_iff) @@ -790,6 +976,12 @@ lemmas threadSet_ctes_of = lemmas threadSet_cap_to' = ex_nonz_cap_to_pres' [OF threadSet_cte_wp_at'] +lemma threadSet_cap_to: + "(\tcb. \(getF, v)\ran tcb_cte_cases. getF (f tcb) = getF tcb) + \ threadSet f tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: hoare_vcg_ex_lift threadSet_cte_wp_at' + simp: ex_nonz_cap_to'_def tcb_cte_cases_def objBits_simps') + lemma threadSet_idle'T: assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" shows @@ -828,30 +1020,6 @@ lemma set_tcb_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma threadSet_valid_queues_no_bitmap: - "\ valid_queues_no_bitmap and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. valid_queues_no_bitmap \" - apply (simp add: threadSet_def) - apply wp - apply (simp add: Invariants_H.valid_queues_no_bitmap_def' pred_tcb_at'_def) - - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def projectKOs) - apply (fastforce) - done - lemma threadSet_valid_bitmapQ[wp]: "\ valid_bitmapQ \ threadSet f t \ \rv. valid_bitmapQ \" unfolding bitmapQ_defs threadSet_def @@ -870,73 +1038,6 @@ lemma threadSet_valid_bitmapQ_no_L2_orphans[wp]: by (clarsimp simp: setObject_def split_def) (wp | simp add: updateObject_default_def)+ -lemma threadSet_valid_queues: - "\Invariants_H.valid_queues and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. Invariants_H.valid_queues\" - unfolding valid_queues_def - by (wp threadSet_valid_queues_no_bitmap;simp) - -definition - addToQs :: "(Structures_H.tcb \ Structures_H.tcb) - \ word32 \ (domain \ priority \ word32 list) - \ (domain \ priority \ word32 list)" -where - "addToQs F t \ \qs (qdom, prio). if (\ko. \ inQ qdom prio (F ko)) - then t # qs (qdom, prio) - else qs (qdom, prio)" - -lemma addToQs_set_def: - "(t' \ set (addToQs F t qs (qdom, prio))) = (t' \ set (qs (qdom, prio)) - \ (t' = t \ (\ko. \ inQ qdom prio (F ko))))" - by (auto simp add: addToQs_def) - -lemma threadSet_valid_queues_addToQs: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update (addToQs F t) s)\ - threadSet F t - \\rv. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_set_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs split: if_split_asm) - done - -lemma threadSet_valid_queues_Qf: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update Qf s) - \ (\prio. set (Qf (ksReadyQueues s) prio) - \ set (addToQs F t (ksReadyQueues s) prio))\ - threadSet F t - \\rv. valid_queues'\" - apply (wp threadSet_valid_queues_addToQs) - apply (clarsimp simp: valid_queues'_def subset_iff) - done - -lemma addToQs_subset: - "set (qs p) \ set (addToQs F t qs p)" -by (clarsimp simp: addToQs_def split_def) - -lemmas threadSet_valid_queues' - = threadSet_valid_queues_Qf - [where Qf=id, simplified ksReadyQueues_update_id - id_apply addToQs_subset simp_thms] - lemma threadSet_cur: "\\s. cur_tcb' s\ threadSet f t \\rv s. cur_tcb' s\" apply (simp add: threadSet_def cur_tcb'_def) @@ -952,7 +1053,7 @@ lemma modifyReadyQueuesL1Bitmap_obj_at[wp]: crunches setThreadState, setBoundNotification for valid_arch' [wp]: valid_arch_state' - (simp: unless_def crunch_simps) + (simp: unless_def crunch_simps wp: crunch_wps) crunch ksInterrupt'[wp]: threadSet "\s. P (ksInterruptState s)" (wp: setObject_ksInterrupt updateObject_default_inv) @@ -979,20 +1080,18 @@ lemma threadSet_obj_at'_really_strongest: "\\s. tcb_at' t s \ obj_at' (\obj. if t = t' then P (f obj) else P obj) t' s\ threadSet f t \\rv. obj_at' P t'\" apply (simp add: threadSet_def) - apply (rule hoare_wp_splits) - apply (rule setObject_tcb_strongest) - apply (simp only: imp_conv_disj) - apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) - apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) - apply simp - apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) - apply (rule getObject_inv_tcb) - apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply (wp setObject_tcb_strongest) + apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) + apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) apply simp - apply (simp add: objBits_simps') - apply (erule obj_at'_weakenE) - apply simp - apply (cases "t = t'", simp_all) + apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) + apply (rule getObject_inv_tcb) + apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply simp + apply (simp add: objBits_simps') + apply (erule obj_at'_weakenE) + apply simp + apply (cases "t = t'", simp_all) apply (rule OMG_getObject_tcb) apply wp done @@ -1216,57 +1315,103 @@ lemma threadSet_valid_dom_schedule': unfolding threadSet_def by (wp setObject_ksDomSchedule_inv hoare_Ball_helper) +lemma threadSet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (s\ksPSpace := (ksPSpace s)(t \ injectKO (f tcb))\)\ + threadSet f t + \\_. P\" + unfolding threadSet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (auto simp: obj_at'_def split: if_splits) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: fun_upd_def) + apply (prop_tac "\ptr. psMap (ksPSpace s) ptr = ksPSpace s ptr") + apply fastforce + apply metis + done + +lemma threadSet_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb\ + \ threadSet F tcbPtr \\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst2[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_valid_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb; + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tcbPtr \valid_sched_pointers\" + unfolding valid_sched_pointers_def + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + by (fastforce simp: opt_pred_def opt_map_def obj_at'_def projectKOs split: option.splits if_splits) + +lemma threadSet_tcbSchedNexts_of: + "(\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb) \ + threadSet F t \\s. P (tcbSchedNexts_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_tcbSchedPrevs_of: + "(\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb) \ + threadSet F t \\s. P (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_tcbQueued: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + threadSet F t \\s. P (tcbQueued |< tcbs_of' s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_pred_def opt_map_def obj_at'_def projectKOs) + done + +crunches threadSet + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + lemma threadSet_invs_trivialT: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" - shows - "\\s. invs' s \ - tcb_at' t s \ - (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ - (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ - ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ - (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (wp x w v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a domains cteCaps_of_def |rule refl)+ - apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits + \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + shows "threadSet F t \invs'\" + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbQueued + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of valid_bitmaps_lift + | clarsimp simp: assms cteCaps_of_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: assms obj_at'_def) lemmas threadSet_invs_trivial = threadSet_invs_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] @@ -1306,19 +1451,84 @@ lemma threadSet_valid_objs': apply (clarsimp elim!: obj_at'_weakenE) done +lemmas typ_at'_valid_tcb'_lift = + typ_at'_valid_obj'_lift[where obj="KOTCB tcb" for tcb, unfolded valid_obj'_def, simplified] + +lemmas setObject_valid_tcb' = typ_at'_valid_tcb'_lift[OF setObject_typ_at'] + +lemma setObject_valid_tcbs': + assumes preserve_valid_tcb': "\s s' ko ko' x n tcb tcb'. + \ (ko', s') \ fst (updateObject val ko ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, ko), n); + projectKO_opt ko = Some tcb; projectKO_opt ko' = Some tcb'; + valid_tcb' tcb s \ \ valid_tcb' tcb' s" + shows "\valid_tcbs' and P\ setObject ptr val \\rv. valid_tcbs'\" + unfolding valid_tcbs'_def + apply (clarsimp simp: valid_def) + apply (rename_tac s s' ptr' tcb) + apply (prop_tac "\tcb'. valid_tcb' tcb s \ valid_tcb' tcb s'") + apply clarsimp + apply (erule (1) use_valid[OF _ setObject_valid_tcb']) + apply (drule spec, erule mp) + apply (clarsimp simp: setObject_def in_monad split_def lookupAround2_char1) + apply (rename_tac s ptr' new_tcb' ptr'' old_tcb_ko' s' f) + apply (case_tac "ptr'' = ptr'"; clarsimp) + apply (prop_tac "\old_tcb' :: tcb. projectKO_opt old_tcb_ko' = Some old_tcb'") + apply (frule updateObject_type) + apply (case_tac old_tcb_ko'; clarsimp simp: project_inject) + apply (erule exE) + apply (rule preserve_valid_tcb', assumption+) + apply (simp add: prod_eqI lookupAround2_char1) + apply force + apply (clarsimp simp: project_inject) + apply (clarsimp simp: project_inject) + done + +lemma setObject_tcb_valid_tcbs': + "\valid_tcbs' and (tcb_at' t and valid_tcb' v)\ setObject t (v :: tcb) \\rv. valid_tcbs'\" + apply (rule setObject_valid_tcbs') + apply (clarsimp simp: updateObject_default_def in_monad project_inject) + done + +lemma threadSet_valid_tcb': + "\valid_tcb' tcb and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcb' tcb\" + apply (simp add: threadSet_def) + apply (wpsimp wp: setObject_valid_tcb') + done + +lemma threadSet_valid_tcbs': + "\valid_tcbs' and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcbs'\" + apply (simp add: threadSet_def) + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (wpsimp wp: setObject_tcb_valid_tcbs') + apply (clarsimp simp: obj_at'_def valid_tcbs'_def projectKOs) + done + +lemma asUser_valid_tcbs'[wp]: + "asUser t f \valid_tcbs'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_valid_tcbs' hoare_drop_imps + simp: valid_tcb'_def tcb_cte_cases_def objBits_simps') + done + lemma asUser_corres': assumes y: "corres_underlying Id False True r \ \ f g" - shows "corres r (tcb_at t) - (tcb_at' t) + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t f) (asUser t g)" - proof - +proof - note arch_tcb_context_get_def[simp] note atcbContextGet_def[simp] note arch_tcb_context_set_def[simp] note atcbContextSet_def[simp] have L1: "corres (\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con) - (tcb_at t) (tcb_at' t) - (gets_the (get_tcb t)) (threadGet (atcbContextGet o tcbArch) t)" + (tcb_at t and pspace_aligned and pspace_distinct) \ + (gets_the (get_tcb t)) (threadGet (atcbContextGet o tcbArch) t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) apply (rule corres_guard_imp) apply (rule corres_gets_the) apply (simp add: threadGet_def) @@ -1336,32 +1546,32 @@ lemma asUser_corres': (set_object add (TCB (tcb \ tcb_arch := arch_tcb_context_set con (tcb_arch tcb) \))) (setObject add (tcb' \ tcbArch := atcbContextSet con' (tcbArch tcb') \))" by (rule setObject_update_TCB_corres [OF L2], - (simp add: tcb_cte_cases_def tcb_cap_cases_def exst_same_def)+) + (simp add: tcb_cte_cases_def tcb_cap_cases_def cteSizeBits_def exst_same_def)+) have L4: "\con con'. con = con' \ corres (\(irv, nc) (irv', nc'). r irv irv' \ nc = nc') \ \ (select_f (f con)) (select_f (g con'))" using y by (fastforce simp: corres_underlying_def select_f_def split_def Id_def) show ?thesis - apply (simp add: as_user_def asUser_def) - apply (rule corres_guard_imp) - apply (rule_tac r'="\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con" - in corres_split) - apply simp - apply (rule L1[simplified]) - apply (rule corres_split) - apply (rule L4; simp) - apply clarsimp - apply (rule corres_split_nor) - apply (simp add: threadSet_def) - apply (rule corres_symb_exec_r) - prefer 4 - apply (rule no_fail_pre_and, wp) - apply (rule L3[simplified]) - apply simp - apply simp - apply (wp select_f_inv | simp)+ - done + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp add: as_user_def asUser_def) + apply (rule corres_guard_imp) + apply (rule_tac r'="\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con" + in corres_split) + apply simp + apply (rule L1[simplified]) + apply (rule corres_split[OF L4]) + apply simp + apply clarsimp + apply (rule corres_split_nor) + apply (simp add: threadSet_def) + apply (rule corres_symb_exec_r) + apply (rule L3[simplified]) + prefer 5 + apply (rule no_fail_pre_and, wp) + apply (wp select_f_inv | simp)+ + done qed lemma asUser_corres: @@ -1394,7 +1604,7 @@ proof - qed lemma asUser_getRegister_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t (getRegister r)) (asUser t (getRegister r))" apply (rule asUser_corres') apply (clarsimp simp: getRegister_def) @@ -1442,14 +1652,6 @@ lemma asUser_valid_pspace'[wp]: apply (wp threadSet_valid_pspace' hoare_drop_imps | simp)+ done -lemma asUser_valid_queues[wp]: - "\Invariants_H.valid_queues\ asUser t m \\rv. Invariants_H.valid_queues\" - apply (simp add: asUser_def split_def) - apply (wp hoare_drop_imps | simp)+ - - apply (wp threadSet_valid_queues hoare_drop_imps | simp)+ - done - lemma asUser_ifunsafe'[wp]: "\if_unsafe_then_cap'\ asUser t m \\rv. if_unsafe_then_cap'\" apply (simp add: asUser_def split_def) @@ -1536,14 +1738,12 @@ lemma no_fail_asUser [wp]: "no_fail \ f \ no_fail (tcb_at' t) (asUser t f)" apply (simp add: asUser_def split_def) apply wp - apply (simp add: no_fail_def) - apply (wp hoare_drop_imps) - apply simp + apply (simp add: no_fail_def) + apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+ done lemma asUser_setRegister_corres: - "corres dc (tcb_at t) - (tcb_at' t) + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t (setRegister r v)) (asUser t (setRegister r v))" apply (simp add: setRegister_def) @@ -1552,7 +1752,7 @@ lemma asUser_setRegister_corres: done lemma getThreadState_corres: - "corres thread_state_relation (tcb_at t) (tcb_at' t) + "corres thread_state_relation (tcb_at t and pspace_aligned and pspace_distinct) \ (get_thread_state t) (getThreadState t)" apply (simp add: get_thread_state_def getThreadState_def) apply (rule threadGet_corres) @@ -1583,7 +1783,7 @@ lemma gts_inv'[wp]: "\P\ getThreadState t \\rv. by (simp add: getThreadState_def) wp lemma getBoundNotification_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (get_bound_notification t) (getBoundNotification t)" apply (simp add: get_bound_notification_def getBoundNotification_def) apply (rule threadGet_corres) @@ -1724,19 +1924,22 @@ lemma ethreadget_corres: apply (simp add: x) done -lemma setQueue_corres: - "corres dc \ \ (set_tcb_queue d p q) (setQueue d p q)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp: setQueue_def in_monad set_tcb_queue_def return_def simpler_modify_def) - apply (fastforce simp: state_relation_def ready_queues_relation_def) - done - - -lemma getQueue_corres: "corres (=) \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" - apply (clarsimp simp add: getQueue_def state_relation_def ready_queues_relation_def get_tcb_queue_def gets_def) - apply (fold gets_def) - apply simp +lemma getQueue_corres: + "corres (\ls q. (ls = [] \ tcbQueueEmpty q) \ (ls \ [] \ tcbQueueHead q = Some (hd ls)) + \ queue_end_valid ls q) + \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" + apply (clarsimp simp: get_tcb_queue_def getQueue_def tcbQueueEmpty_def) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]) + apply (rule corres_symb_exec_r[OF _ gets_sp]) + apply clarsimp + apply (drule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x=qdom in spec) + apply (drule_tac x=prio in spec) + apply (fastforce dest: heap_path_head) + apply wpsimp+ done lemma no_fail_return: @@ -1751,8 +1954,8 @@ lemma addToBitmap_noop_corres: (wp | simp add: state_relation_def | rule no_fail_pre)+ lemma addToBitmap_if_null_noop_corres: (* used this way in Haskell code *) - "corres dc \ \ (return ()) (if null queue then addToBitmap d p else return ())" - by (cases "null queue", simp_all add: addToBitmap_noop_corres) + "corres dc \ \ (return ()) (if tcbQueueEmpty queue then addToBitmap d p else return ())" + by (cases "tcbQueueHead queue", simp_all add: addToBitmap_noop_corres) lemma removeFromBitmap_corres_noop: "corres dc \ \ (return ()) (removeFromBitmap tdom prioa)" @@ -1769,54 +1972,704 @@ crunch typ_at'[wp]: removeFromBitmap "\s. P (typ_at' T p s)" lemmas addToBitmap_typ_ats [wp] = typ_at_lifts [OF addToBitmap_typ_at'] lemmas removeFromBitmap_typ_ats [wp] = typ_at_lifts [OF removeFromBitmap_typ_at'] +lemma ekheap_relation_tcb_domain_priority: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s t = Some (tcb); + ksPSpace s' t = Some (KOTCB tcb')\ + \ tcbDomain tcb' = tcb_domain tcb \ tcbPriority tcb' = tcb_priority tcb" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=t in bspec, blast) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def) + done + +lemma no_fail_thread_get[wp]: + "no_fail (tcb_at tcb_ptr) (thread_get f tcb_ptr)" + unfolding thread_get_def + apply wpsimp + apply (clarsimp simp: tcb_at_def) + done + +lemma pspace_relation_tcb_relation: + "\pspace_relation (kheap s) (ksPSpace s'); kheap s ptr = Some (TCB tcb); + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ tcb_relation tcb tcb'" + apply (clarsimp simp: pspace_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: tcb_relation_cut_def obj_at_def obj_at'_def) + done + +lemma pspace_relation_update_concrete_tcb: + "\pspace_relation s s'; s ptr = Some (TCB tcb); s' ptr = Some (KOTCB otcb'); + tcb_relation tcb tcb'\ + \ pspace_relation s (s'(ptr \ KOTCB tcb'))" + by (fastforce dest: pspace_relation_update_tcbs simp: map_upd_triv) + +lemma threadSet_pspace_relation: + fixes s :: det_state + assumes tcb_rel: "(\tcb tcb'. tcb_relation tcb tcb' \ tcb_relation tcb (F tcb'))" + shows "threadSet F tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply normalise_obj_at' + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: obj_at_def is_tcb_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule pspace_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def projectKOs) + apply (frule (1) pspace_relation_tcb_relation) + apply (fastforce simp: obj_at'_def projectKOs) + apply (fastforce dest!: tcb_rel) + done + +lemma ekheap_relation_update_tcbs: + "\ ekheap_relation (ekheap s) (ksPSpace s'); ekheap s x = Some oetcb; + ksPSpace s' x = Some (KOTCB otcb'); etcb_relation etcb tcb' \ + \ ekheap_relation ((ekheap s)(x \ etcb)) ((ksPSpace s')(x \ KOTCB tcb'))" + by (simp add: ekheap_relation_def) + +lemma ekheap_relation_update_concrete_tcb: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB otcb'); + etcb_relation etcb tcb'\ + \ ekheap_relation (ekheap s) ((ksPSpace s')(ptr \ KOTCB tcb'))" + by (fastforce dest: ekheap_relation_update_tcbs simp: map_upd_triv) + +lemma ekheap_relation_etcb_relation: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ etcb_relation etcb tcb'" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: obj_at_def obj_at'_def) + done + +lemma threadSet_ekheap_relation: + fixes s :: det_state + assumes etcb_rel: "(\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation etcb (F tcb'))" + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet F tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_tcb_def is_etcb_at_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule ekheap_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def projectKOs) + apply (frule (1) ekheap_relation_etcb_relation) + apply (fastforce simp: obj_at'_def projectKOs) + apply (fastforce dest!: etcb_rel) + done + +lemma tcbQueued_update_pspace_relation[wp]: + fixes s :: det_state + shows "threadSet (tcbQueued_update f) tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueued_update_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet (tcbQueued_update f) tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_ekheap_relation simp: etcb_relation_def) + +lemma tcbQueueRemove_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueRemove queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueRemove_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueRemove queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_ekheap_relation threadSet_pspace_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma threadSet_ghost_relation[wp]: + "threadSet f tcbPtr \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma removeFromBitmap_ghost_relation[wp]: + "removeFromBitmap tdom prio \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + by (rule_tac f=gsUserPages in hoare_lift_Pf2; wpsimp simp: bitmap_fun_defs) + +lemma tcbQueued_update_ctes_of[wp]: + "threadSet (tcbQueued_update f) t \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_of) + +lemma removeFromBitmap_ctes_of[wp]: + "removeFromBitmap tdom prio \\s. P (ctes_of s)\" + by (wpsimp simp: bitmap_fun_defs) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for ghost_relation_projs[wp]: "\s. P (gsUserPages s) (gsCNodes s)" + and ksArchState[wp]: "\s. P (ksArchState s)" + and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" + and ksDomainTime[wp]: "\s. P (ksDomainTime s)" + (wp: crunch_wps getObject_tcb_wp simp: setObject_def updateObject_default_def obj_at'_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for tcb_at'[wp]: "\s. tcb_at' tcbPtr s" + (wp: crunch_wps ignore: threadSet) + +lemma set_tcb_queue_projs: + "set_tcb_queue d p queue + \\s. P (kheap s) (cdt s) (is_original_cap s) (cur_thread s) (idle_thread s) (scheduler_action s) + (domain_list s) (domain_index s) (cur_domain s) (domain_time s) (machine_state s) + (interrupt_irq_node s) (interrupt_states s) (arch_state s) (caps_of_state s) + (work_units_completed s) (cdt_list s) (ekheap s)\" + by (wpsimp simp: set_tcb_queue_def) + +lemma set_tcb_queue_cte_at: + "set_tcb_queue d p queue \\s. P (swp cte_at s)\" + unfolding set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: swp_def cte_wp_at_def) + done + +lemma set_tcb_queue_projs_inv: + "fst (set_tcb_queue d p queue s) = {(r, s')} \ + kheap s = kheap s' + \ ekheap s = ekheap s' + \ cdt s = cdt s' + \ is_original_cap s = is_original_cap s' + \ cur_thread s = cur_thread s' + \ idle_thread s = idle_thread s' + \ scheduler_action s = scheduler_action s' + \ domain_list s = domain_list s' + \ domain_index s = domain_index s' + \ cur_domain s = cur_domain s' + \ domain_time s = domain_time s' + \ machine_state s = machine_state s' + \ interrupt_irq_node s = interrupt_irq_node s' + \ interrupt_states s = interrupt_states s' + \ arch_state s = arch_state s' + \ caps_of_state s = caps_of_state s' + \ work_units_completed s = work_units_completed s' + \ cdt_list s = cdt_list s' + \ swp cte_at s = swp cte_at s'" + apply (drule singleton_eqD) + by (auto elim!: use_valid_inv[where E=\, simplified] + intro: set_tcb_queue_projs set_tcb_queue_cte_at) + +lemma set_tcb_queue_new_state: + "(rv, t) \ fst (set_tcb_queue d p queue s) \ + t = s\ready_queues := \dom prio. if dom = d \ prio = p then queue else ready_queues s dom prio\" + by (clarsimp simp: set_tcb_queue_def in_monad) + +lemma tcbQueuePrepend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueuePrepend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueuePrepend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueuePrepend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueAppend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueAppend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueueAppend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueAppend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueInsert_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueInsert tcbPtr afterPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueInsert_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueInsert tcbPtr afterPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma removeFromBitmap_pspace_relation[wp]: + fixes s :: det_state + shows "removeFromBitmap tdom prio \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding bitmap_fun_defs + by wpsimp + +crunches setQueue, removeFromBitmap + for valid_pspace'[wp]: valid_pspace' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node'[wp]: "\s. P (irq_node' s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + and valid_irq_states'[wp]: valid_irq_states' + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and valid_machine_state'[wp]: valid_machine_state' + and cur_tcb'[wp]: cur_tcb' + and ksPSpace[wp]: "\s. P (ksPSpace s)" + (wp: crunch_wps + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def cur_tcb'_def threadSet_cur + bitmap_fun_defs valid_machine_state'_def) + +crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue, setQueue + for pspace_aligned'[wp]: pspace_aligned' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and pspace_distinct'[wp]: pspace_distinct' + and no_0_obj'[wp]: no_0_obj' + and ksSchedulerAction[wp]: "\s. P (ksSchedulerAction s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node[wp]: "\s. P (irq_node' s)" + and typ_at[wp]: "\s. P (typ_at' T p s)" + and interrupt_state[wp]: "\s. P (ksInterruptState s)" + and valid_irq_state'[wp]: valid_irq_states' + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + (wp: crunch_wps threadSet_state_refs_of'[where f'=id and g'=id] + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def bitmap_fun_defs) + +lemma threadSet_ready_queues_relation: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + \\s'. ready_queues_relation s s' \ \ (tcbQueued |< tcbs_of' s') tcbPtr\ + threadSet F tcbPtr + \\_ s'. ready_queues_relation s s'\" + supply fun_upd_apply[simp del] + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: list_queue_relation_def obj_at'_def projectKOs) + apply (rename_tac tcb' d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: ready_queue_relation_def list_queue_relation_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce intro: heap_path_heap_upd_not_in + simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (clarsimp simp: prev_queue_head_def) + apply (prop_tac "ready_queues s d p \ []", fastforce) + apply (fastforce dest: heap_path_head simp: inQ_def opt_pred_def opt_map_def fun_upd_apply) + apply (auto simp: inQ_def opt_pred_def opt_map_def fun_upd_apply projectKOs split: option.splits) + done + +definition in_correct_ready_q_2 where + "in_correct_ready_q_2 queues ekh \ + \d p. \t \ set (queues d p). is_etcb_at' t ekh + \ etcb_at' (\t. tcb_priority t = p \ tcb_domain t = d) t ekh" + +abbreviation in_correct_ready_q :: "det_ext state \ bool" where + "in_correct_ready_q s \ in_correct_ready_q_2 (ready_queues s) (ekheap s)" + +lemmas in_correct_ready_q_def = in_correct_ready_q_2_def + +lemma in_correct_ready_q_lift: + assumes c: "\P. \\s. P (ekheap s)\ f \\rv s. P (ekheap s)\" + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \in_correct_ready_q\" + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +definition ready_qs_distinct :: "det_ext state \ bool" where + "ready_qs_distinct s \ \d p. distinct (ready_queues s d p)" + +lemma ready_qs_distinct_lift: + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \ready_qs_distinct\" + unfolding ready_qs_distinct_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +lemma ready_queues_disjoint: + "\in_correct_ready_q s; ready_qs_distinct s; d \ d' \ p \ p'\ + \ set (ready_queues s d p) \ set (ready_queues s d' p') = {}" + apply (clarsimp simp: ready_qs_distinct_def in_correct_ready_q_def) + apply (rule disjointI) + apply (frule_tac x=d in spec) + apply (drule_tac x=d' in spec) + apply (fastforce simp: etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma isRunnable_sp: + "\P\ + isRunnable tcb_ptr + \\rv s. \tcb'. ko_at' tcb' tcb_ptr s + \ (rv = (tcbState tcb' = Running \ tcbState tcb' = Restart)) + \ P s\" + unfolding isRunnable_def getThreadState_def + apply (wpsimp wp: hoare_case_option_wp getObject_tcb_wp simp: threadGet_def) + apply (fastforce simp: obj_at'_def split: Structures_H.thread_state.splits) + done + +crunch (no_fail) no_fail[wp]: isRunnable + +defs ksReadyQueues_asrt_def: + "ksReadyQueues_asrt + \ \s'. \d p. \ts. ready_queue_relation d p ts (ksReadyQueues s' (d, p)) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (inQ d p |< tcbs_of' s')" + +lemma ksReadyQueues_asrt_cross: + "ready_queues_relation s s' \ ksReadyQueues_asrt s'" + by (fastforce simp: ready_queues_relation_def Let_def ksReadyQueues_asrt_def) + +lemma ex_abs_ksReadyQueues_asrt: + "ex_abs P s \ ksReadyQueues_asrt s" + by (fastforce simp: ex_abs_underlying_def intro: ksReadyQueues_asrt_cross) + +crunches addToBitmap + for ko_at'[wp]: "\s. P (ko_at' ko ptr s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueues_asrt[wp]: ksReadyQueues_asrt + and st_tcb_at'[wp]: "\s. P (st_tcb_at' Q tcbPtr s)" + and valid_tcbs'[wp]: valid_tcbs' + (simp: bitmap_fun_defs ksReadyQueues_asrt_def) + +lemma tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueHead queue))" + by (fastforce dest: heap_path_head + simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueHead queue)) s'" + by (fastforce dest!: tcbQueueHead_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma tcbQueueHead_iff_tcbQueueEnd: + "list_queue_relation ts q nexts prevs \ tcbQueueHead q \ None \ tcbQueueEnd q \ None" + apply (clarsimp simp: list_queue_relation_def queue_end_valid_def) + using heap_path_None + apply fastforce + done + +lemma tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueEnd queue))" + apply (frule tcbQueueHead_iff_tcbQueueEnd) + by (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueEnd queue)) s'" + by (fastforce dest!: tcbQueueEnd_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma thread_get_exs_valid[wp]: + "tcb_at tcb_ptr s \ \(=) s\ thread_get f tcb_ptr \\\_. (=) s\" + by (clarsimp simp: thread_get_def get_tcb_def gets_the_def gets_def return_def get_def + exs_valid_def tcb_at_def bind_def) + +lemma ethread_get_sp: + "\P\ ethread_get f ptr + \\rv. etcb_at (\tcb. f tcb = rv) ptr and P\" + apply wpsimp + apply (clarsimp simp: etcb_at_def split: option.splits) + done + +lemma ethread_get_exs_valid[wp]: + "\tcb_at tcb_ptr s; valid_etcbs s\ \ \(=) s\ ethread_get f tcb_ptr \\\_. (=) s\" + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: ethread_get_def get_etcb_def gets_the_def gets_def return_def get_def + is_etcb_at_def exs_valid_def bind_def) + done + +lemma no_fail_ethread_get[wp]: + "no_fail (tcb_at tcb_ptr and valid_etcbs) (ethread_get f tcb_ptr)" + unfolding ethread_get_def + apply wpsimp + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: is_etcb_at_def get_etcb_def) + done + +lemma threadGet_sp: + "\P\ threadGet f ptr \\rv s. \tcb :: tcb. ko_at' tcb ptr s \ f tcb = rv \ P s\" + unfolding threadGet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma in_set_ready_queues_inQ_eq: + "ready_queues_relation s s' \ t \ set (ready_queues s d p) \ (inQ d p |< tcbs_of' s') t" + by (clarsimp simp: ready_queue_relation_def ready_queues_relation_def Let_def) + +lemma in_ready_q_tcbQueued_eq: + "ready_queues_relation s s' + \ (\d p. t \ set (ready_queues s d p)) \ (tcbQueued |< tcbs_of' s') t" + apply (intro iffI) + apply clarsimp + apply (frule in_set_ready_queues_inQ_eq) + apply (fastforce simp: inQ_def opt_map_def opt_pred_def split: option.splits) + apply (fastforce simp: ready_queue_relation_def ready_queues_relation_def Let_def + inQ_def opt_pred_def + split: option.splits) + done + lemma tcbSchedEnqueue_corres: - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues and valid_queues') - (tcb_sched_action (tcb_sched_enqueue) t) (tcbSchedEnqueue t)" -proof - - have ready_queues_helper: - "\t tcb a b. \ ekheap a t = Some tcb; obj_at' tcbQueued t b ; valid_queues' b ; - ekheap_relation (ekheap a) (ksPSpace b) \ - \ t \ set (ksReadyQueues b (tcb_domain tcb, tcb_priority tcb))" - unfolding valid_queues'_def - by (fastforce dest: ekheap_relation_absD simp: obj_at'_def inQ_def etcb_relation_def projectKO_eq projectKO_tcb) - - show ?thesis unfolding tcbSchedEnqueue_def tcb_sched_action_def - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, - where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at'; simp_all) - apply (rule no_fail_pre, wp, blast) - apply (case_tac queued; simp_all) - apply (rule corres_no_failI; simp add: no_fail_return) - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def ready_queues_relation_def - state_relation_def tcb_sched_enqueue_def) - apply (rule ready_queues_helper; auto) - apply (clarsimp simp: when_def) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_enqueue_def split del: if_split) - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply (rule setQueue_corres[unfolded dc_def]) - apply (rule corres_split_noop_rhs2) - apply (fastforce intro: addToBitmap_noop_corres) - apply (fastforce intro: threadSet_corres_noop simp: tcb_relation_def exst_same_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - projectKO_eq project_inject) - done -qed + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_enqueue tcb_ptr) (tcbSchedEnqueue tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_enqueue_def get_tcb_queue_def + tcbSchedEnqueue_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def projectKOs) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce + apply clarsimp + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueuePrepend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueHead_ksReadyQueues simp: obj_at'_def projectKOs) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp simp: setQueue_def tcbQueuePrepend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def projectKOs) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" and s'=s' + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply auto[1] + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" and st="tcbQueueHead (ksReadyQueues s' (d, p))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (cut_tac xs="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + and st="tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "\ (d = tcb_domain etcb \ p = tcb_priority etcb)") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def projectKOs) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; simp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + force simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply obj_at'_def projectKOs split: if_splits) + apply (case_tac "t = the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def obj_at'_def projectKOs fun_upd_apply + split: option.splits) + apply metis + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain etcb \ p = tcb_priority etcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def projectKOs) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def projectKOs) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def projectKOs) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def prev_queue_head_def + opt_map_red obj_at'_def projectKOs + split: if_splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_prepend[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def obj_at'_def projectKOs fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply obj_at'_def projectKOs split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def split: if_splits) + by (auto dest!: hd_in_set + simp: inQ_def in_opt_pred opt_map_def fun_upd_apply obj_at'_def projectKOs + split: if_splits option.splits) definition weak_sch_act_wf :: "scheduler_action \ kernel_state \ bool" @@ -1843,7 +2696,10 @@ lemma getSchedulerAction_corres: done lemma rescheduleRequired_corres: - "corres dc (weak_valid_sched_action and valid_etcbs) (Invariants_H.valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + "corres dc + (weak_valid_sched_action and in_correct_ready_q and ready_qs_distinct and valid_etcbs + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') (reschedule_required) rescheduleRequired" apply (simp add: rescheduleRequired_def reschedule_required_def) apply (rule corres_guard_imp) @@ -1854,15 +2710,14 @@ lemma rescheduleRequired_corres: apply (case_tac action) apply simp apply simp - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply simp apply (rule setSchedulerAction_corres) apply simp apply (wp | wpc | simp)+ - apply (force dest: st_tcb_weakenE simp: in_monad weak_valid_sched_action_def valid_etcbs_def + apply (force dest: st_tcb_weakenE simp: in_monad weak_valid_sched_action_def valid_etcbs_def st_tcb_at_def obj_at_def is_tcb split: Deterministic_A.scheduler_action.split) - apply simp - apply (clarsimp simp: weak_sch_act_wf_def pred_tcb_at' split: scheduler_action.splits) + apply (clarsimp split: scheduler_action.splits) done lemma rescheduleRequired_corres_simple: @@ -1930,20 +2785,18 @@ lemmas addToBitmap_weak_sch_act_wf[wp] = weak_sch_act_wf_lift[OF addToBitmap_nosch] crunch st_tcb_at'[wp]: removeFromBitmap "st_tcb_at' P t" -crunch pred_tcb_at'[wp]: removeFromBitmap "pred_tcb_at' proj P t" +crunch pred_tcb_at'[wp]: removeFromBitmap "\s. Q (pred_tcb_at' proj P t s)" crunch not_st_tcb_at'[wp]: removeFromBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: removeFromBitmap "\s. \ (pred_tcb_at' proj P' t) s" crunch st_tcb_at'[wp]: addToBitmap "st_tcb_at' P' t" -crunch pred_tcb_at'[wp]: addToBitmap "pred_tcb_at' proj P' t" +crunch pred_tcb_at'[wp]: addToBitmap "\s. Q (pred_tcb_at' proj P t s)" crunch not_st_tcb_at'[wp]: addToBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: addToBitmap "\s. \ (pred_tcb_at' proj P' t) s" -crunch obj_at'[wp]: removeFromBitmap "obj_at' P t" +crunch obj_at'[wp]: removeFromBitmap "\s. Q (obj_at' P t s)" -crunch obj_at'[wp]: addToBitmap "obj_at' P t" +crunch obj_at'[wp]: addToBitmap "\s. Q (obj_at' P t s)" lemma removeFromBitmap_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t\ removeFromBitmap tdom prio \\ya. tcb_in_cur_domain' t\" @@ -1960,9 +2813,11 @@ lemma addToBitmap_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_weak_sch_act_wf[wp]: - "\ \s. weak_sch_act_wf (ksSchedulerAction s) s \ tcbSchedDequeue a \ \_ s. weak_sch_act_wf (ksSchedulerAction s) s \" - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_weak_sch_act_wf removeFromBitmap_weak_sch_act_wf | simp add: crunch_simps)+ + "tcbSchedDequeue tcbPtr \\s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wp threadSet_weak_sch_act_wf getObject_tcb_wp removeFromBitmap_weak_sch_act_wf + | simp add: crunch_simps threadGet_def)+ + apply (clarsimp simp: obj_at'_def) done lemma dequeue_nothing_eq[simp]: @@ -1978,44 +2833,345 @@ lemma gets_the_exec: "f s \ None \ (do x \ ge return_def assert_opt_def) done +lemma tcbQueueRemove_no_fail: + "no_fail (\s. tcb_at' tcbPtr s + \ (\ts. list_queue_relation ts queue (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ sym_heap_sched_pointers s \ valid_objs' s) + (tcbQueueRemove queue tcbPtr)" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (frule (1) ko_at_valid_objs') + apply (fastforce simp: projectKOs) + apply (clarsimp simp: list_queue_relation_def) + apply (prop_tac "tcbQueueHead queue \ Some tcbPtr \ tcbSchedPrevs_of s tcbPtr \ None") + apply (rule impI) + apply (frule not_head_prev_not_None[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (fastforce dest: heap_path_head) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def valid_tcb'_def valid_bound_tcb'_def) + by (fastforce dest!: not_last_next_not_None[where p=tcbPtr] + simp: queue_end_valid_def opt_map_def obj_at'_def projectKOs valid_obj'_def + valid_tcb'_def) + +crunch (no_fail) no_fail[wp]: removeFromBitmap + +crunches removeFromBitmap + for ready_queues_relation[wp]: "ready_queues_relation s" + and list_queue_relation[wp]: + "\s'. list_queue_relation ts (P (ksReadyQueues s')) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s')" + (simp: bitmap_fun_defs ready_queues_relation_def) + +\ \ + A direct analogue of tcbQueueRemove, used in tcb_sched_dequeue' below, so that within the proof of + tcbQueueRemove_corres, we may reason in terms of the list operations used within this function + rather than @{term filter}.\ +definition tcb_queue_remove :: "'a \ 'a list \ 'a list" where + "tcb_queue_remove a ls \ + if ls = [a] + then [] + else if a = hd ls + then tl ls + else if a = last ls + then butlast ls + else list_remove ls a" + +definition tcb_sched_dequeue' :: "obj_ref \ unit det_ext_monad" where + "tcb_sched_dequeue' tcb_ptr \ do + d \ ethread_get tcb_domain tcb_ptr; + prio \ ethread_get tcb_priority tcb_ptr; + queue \ get_tcb_queue d prio; + when (tcb_ptr \ set queue) $ set_tcb_queue d prio (tcb_queue_remove tcb_ptr queue) + od" + +lemma filter_tcb_queue_remove: + "\a \ set ls; distinct ls \ \ filter ((\) a) ls = tcb_queue_remove a ls" + apply (clarsimp simp: tcb_queue_remove_def) + apply (intro conjI impI) + apply (fastforce elim: filter_hd_equals_tl) + apply (fastforce elim: filter_last_equals_butlast) + apply (fastforce elim: filter_hd_equals_tl) + apply (frule split_list) + apply (clarsimp simp: list_remove_middle_distinct) + apply (subst filter_True | clarsimp simp: list_remove_none)+ + done + +lemma tcb_sched_dequeue_monadic_rewrite: + "monadic_rewrite False True (is_etcb_at t and (\s. \d p. distinct (ready_queues s d p))) + (tcb_sched_action tcb_sched_dequeue t) (tcb_sched_dequeue' t)" + supply if_split[split del] + apply (clarsimp simp: tcb_sched_dequeue'_def tcb_sched_dequeue_def tcb_sched_action_def + set_tcb_queue_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (clarsimp simp: when_def) + apply (rule monadic_rewrite_if_r) + apply (rule_tac P="\_. distinct queue" in monadic_rewrite_guard_arg_cong) + apply (frule (1) filter_tcb_queue_remove) + apply (metis (mono_tags, lifting) filter_cong) + apply (rule monadic_rewrite_modify_noop) + apply (wpsimp wp: thread_get_wp)+ + apply (clarsimp simp: etcb_at_def split: option.splits) + apply (prop_tac "(\d' p. if d' = tcb_domain x2 \ p = tcb_priority x2 + then filter (\x. x \ t) (ready_queues s (tcb_domain x2) (tcb_priority x2)) + else ready_queues s d' p) + = ready_queues s") + apply (subst filter_True) + apply fastforce + apply (clarsimp intro!: ext split: if_splits) + apply fastforce + done + +crunches removeFromBitmap + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + +lemma list_queue_relation_neighbour_in_set: + "\list_queue_relation ls q hp hp'; sym_heap hp hp'; p \ set ls\ + \ \nbr. (hp p = Some nbr \ nbr \ set ls) \ (hp' p = Some nbr \ nbr \ set ls)" + apply (rule heap_ls_neighbour_in_set) + apply (fastforce simp: list_queue_relation_def) + apply fastforce + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def) + apply fastforce + done + +lemma in_queue_not_head_or_not_tail_length_gt_1: + "\tcbPtr \ set ls; tcbQueueHead q \ Some tcbPtr \ tcbQueueEnd q \ Some tcbPtr; + list_queue_relation ls q nexts prevs\ + \ Suc 0 < length ls" + apply (clarsimp simp: list_queue_relation_def) + apply (cases ls; fastforce simp: queue_end_valid_def) + done + lemma tcbSchedDequeue_corres: - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues) - (tcb_sched_action tcb_sched_dequeue t) (tcbSchedDequeue t)" - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - defer - apply (simp add: when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def set_tcb_queue_def is_etcb_at_def state_relation_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - subgoal by (force simp: tcb_sched_dequeue_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def - ready_queues_relation_def obj_at'_def inQ_def projectKO_eq project_inject) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (simp add: exec_gets simpler_modify_def get_etcb_def ready_queues_relation_def cong: if_cong get_tcb_queue_def) - apply (simp add: when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (simp, rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (rule threadSet_corres_noop; simp_all add: tcb_relation_def exst_same_def) - apply (wp | simp)+ + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and tcb_at tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_objs') + (tcb_sched_action tcb_sched_dequeue tcb_ptr) (tcbSchedDequeue tcbPtr)" + supply heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + list_remove_append[simp del] + apply (rule_tac Q'="tcb_at' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: tcb_at_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (rule monadic_rewrite_guard_imp[OF tcb_sched_dequeue_monadic_rewrite]) + apply (fastforce dest: tcb_at_is_etcb_at simp: in_correct_ready_q_def ready_qs_distinct_def) + apply (clarsimp simp: tcb_sched_dequeue'_def get_tcb_queue_def tcbSchedDequeue_def getQueue_def + unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac dom) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac prio) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_if_strong'; fastforce?) + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + apply (fastforce simp: obj_at'_def projectKOs opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; wpsimp?) + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp wp: tcbQueueRemove_no_fail) + apply (fastforce dest: state_relation_ready_queues_relation + simp: ex_abs_underlying_def ready_queues_relation_def ready_queue_relation_def + Let_def inQ_def opt_pred_def opt_map_def obj_at'_def projectKOs) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp + simp: setQueue_def tcbQueueRemove_def + split_del: if_split) + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply normalise_obj_at' + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def projectKOs) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply clarsimp + apply (cut_tac p=tcbPtr and ls="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_neighbour_in_set) + apply (fastforce dest!: spec) + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: ready_queues_relation_def Let_def list_queue_relation_def) + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply fast + apply (clarsimp simp: tcbQueueEmpty_def) + apply (prop_tac "Some tcbPtr \ tcbQueueHead (ksReadyQueues s' (d, p))") + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply blast + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI; clarsimp) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (force intro!: heap_path_heap_upd_not_in simp: fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_heap_upd fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + apply (clarsimp simp: etcb_at_def obj_at'_def projectKOs) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (force simp: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply assumption + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply split: if_splits) + apply (force simp: not_emptyI opt_map_red) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_def fun_upd_apply opt_map_red opt_map_upd_triv) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply (force simp: not_emptyI opt_map_red) + apply fastforce + apply (clarsimp simp: opt_map_red opt_map_upd_triv) + apply (intro prev_queue_head_heap_upd) + apply (force dest!: spec) + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply fastforce + subgoal + by (clarsimp simp: inQ_def opt_map_def opt_pred_def fun_upd_apply + split: if_splits option.splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (frule heap_path_head') + apply (frule heap_ls_distinct) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply prev_queue_head_def) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: tcb_queue_remove_def inQ_def opt_pred_def fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fastforce + apply (fastforce simp: list_queue_relation_def) + apply (frule list_not_head) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_tail_nonempty) + apply (frule (2) heap_ls_next_of_hd) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI allI) + apply (drule (1) heap_ls_remove_head_not_singleton) + apply (clarsimp simp: opt_map_red opt_map_upd_triv fun_upd_apply projectKOs) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply last_tl) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply projectKOs) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply projectKOs + split: option.splits) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fast + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: queue_end_valid_def) + apply (frule list_not_last) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_gt_1_imp_butlast_nonempty) + apply (frule (3) heap_ls_prev_of_last) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI; clarsimp?) + apply (drule (1) heap_ls_remove_last_not_singleton) + apply (force elim!: rsubst3[where P=heap_ls] simp: opt_map_def fun_upd_apply obj_at'_def projectKOs) + apply (clarsimp simp: opt_map_def fun_upd_apply projectKOs) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def projectKOs) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply projectKOs + split: option.splits) + apply (meson distinct_in_butlast_not_last in_set_butlastD last_in_set not_last_in_set_butlast) + + \ \tcbPtr is in the middle of the ready queue\ + apply (clarsimp simp: obj_at'_def) + apply (frule set_list_mem_nonempty) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []", fastforce simp: queue_end_valid_def) + apply clarsimp + apply (frule (2) ptr_in_middle_prev_next) + apply fastforce + apply (clarsimp simp: tcb_queue_remove_def) + apply (prop_tac "tcbPtr \ last xs") + apply (clarsimp simp: distinct_append) + apply (prop_tac "tcbPtr \ hd ys") + apply (fastforce dest: hd_in_set simp: distinct_append) + apply (prop_tac "last xs \ hd ys") + apply (metis distinct_decompose2 hd_Cons_tl last_in_set) + apply (prop_tac "list_remove (xs @ tcbPtr # ys) tcbPtr = xs @ ys") + apply (simp add: list_remove_middle_distinct del: list_remove_append) + apply (intro conjI impI allI; (solves \clarsimp simp: distinct_append\)?) + apply (fastforce elim!: rsubst3[where P=heap_ls] + dest!: heap_ls_remove_middle hd_in_set last_in_set + simp: distinct_append not_emptyI opt_map_def fun_upd_apply projectKOs) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (case_tac xs; + fastforce simp: prev_queue_head_def opt_map_def fun_upd_apply distinct_append projectKOs) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply distinct_append projectKOs + split: option.splits) done lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur_ts) od = @@ -2023,7 +3179,9 @@ lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur apply (simp add: get_thread_state_def thread_get_def) done -lemma thread_get_isRunnable_corres: "corres (=) (tcb_at t) (tcb_at' t) (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" +lemma thread_get_isRunnable_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" apply (simp add: isRunnable_def getThreadState_def threadGet_def thread_get_def) apply (fold liftM_def) @@ -2037,8 +3195,8 @@ lemma thread_get_isRunnable_corres: "corres (=) (tcb_at t) (tcb_at' t) (thread_g lemma setThreadState_corres: "thread_state_relation ts ts' \ corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (set_thread_state t ts) (setThreadState ts' t)" (is "?tsr \ corres dc ?Pre ?Pre' ?sts ?sts'") apply (simp add: set_thread_state_def setThreadState_def) @@ -2062,40 +3220,95 @@ lemma setThreadState_corres: lemma setBoundNotification_corres: "corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (set_bound_notification t ntfn) (setBoundNotification ntfn t)" apply (simp add: set_bound_notification_def setBoundNotification_def) apply (subst thread_set_def[simplified, symmetric]) apply (rule threadset_corres, simp_all add:tcb_relation_def exst_same_def) done -crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification - for tcb'[wp]: "tcb_at' addr" +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for tcb'[wp]: "tcb_at' addr" + +lemma tcbSchedNext_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedNext_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedPrev_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueuePrepend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift' simp: tcbQueueEmpty_def) + +crunches addToBitmap + for valid_objs'[wp]: valid_objs' + (simp: unless_def crunch_simps wp: crunch_wps) + +lemma tcbSchedEnqueue_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_objs'\" + unfolding tcbSchedEnqueue_def setQueue_def + apply (wpsimp wp: threadSet_valid_objs' getObject_tcb_wp simp: threadGet_def) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done crunches rescheduleRequired, removeFromBitmap for valid_objs'[wp]: valid_objs' (simp: crunch_simps) -lemma tcbSchedDequeue_valid_objs' [wp]: "\ valid_objs' \ tcbSchedDequeue t \\_. valid_objs' \" - unfolding tcbSchedDequeue_def - apply (wp threadSet_valid_objs') - apply (clarsimp simp add: valid_tcb'_def tcb_cte_cases_def) - apply wp - apply (simp add: if_apply_def2) - apply (wp hoare_drop_imps) - apply (wp | simp cong: if_cong add: valid_tcb'_def tcb_cte_cases_def if_apply_def2)+ +lemmas ko_at_valid_objs'_pre = + ko_at_valid_objs'[simplified project_inject, atomized, simplified, rule_format] + +lemmas ep_ko_at_valid_objs_valid_ep' = + ko_at_valid_objs'_pre[where 'a=endpoint, simplified injectKO_defs valid_obj'_def, simplified] + +lemmas ntfn_ko_at_valid_objs_valid_ntfn' = + ko_at_valid_objs'_pre[where 'a=notification, simplified injectKO_defs valid_obj'_def, + simplified] + +lemmas tcb_ko_at_valid_objs_valid_tcb' = + ko_at_valid_objs'_pre[where 'a=tcb, simplified injectKO_defs valid_obj'_def, simplified] + +lemma tcbQueueRemove_valid_objs'[wp]: + "tcbQueueRemove queue tcbPtr \valid_objs'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (fastforce dest!: tcb_ko_at_valid_objs_valid_tcb' + simp: valid_tcb'_def valid_bound_tcb'_def obj_at'_def) done +lemma tcbSchedDequeue_valid_objs'[wp]: + "tcbSchedDequeue t \valid_objs'\" + unfolding tcbSchedDequeue_def setQueue_def + by (wpsimp wp: threadSet_valid_objs') + lemma sts_valid_objs': - "\valid_objs' and valid_tcb_state' st\ - setThreadState st t - \\rv. valid_objs'\" - apply (simp add: setThreadState_def setQueue_def isRunnable_def isStopped_def) - apply (wp threadSet_valid_objs') - apply (simp add: valid_tcb'_def tcb_cte_cases_def) - apply (wp threadSet_valid_objs' | simp)+ - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) + "\valid_objs' and valid_tcb_state' st and pspace_aligned' and pspace_distinct'\ + setThreadState st t + \\_. valid_objs'\" + apply (wpsimp simp: setThreadState_def wp: threadSet_valid_objs') + apply (rule_tac Q="\_. valid_objs' and pspace_aligned' and pspace_distinct'" in hoare_post_imp) + apply fastforce + apply (wpsimp wp: threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) done lemma sbn_valid_objs': @@ -2181,18 +3394,6 @@ lemma setQueue_valid_bitmapQ_except[wp]: unfolding setQueue_def bitmapQ_defs by (wp, clarsimp simp: bitmapQ_def) -lemma setQueue_valid_bitmapQ: (* enqueue only *) - "\ valid_bitmapQ and (\s. (ksReadyQueues s (d, p) = []) = (ts = [])) \ - setQueue d p ts - \\_. valid_bitmapQ \" - unfolding setQueue_def bitmapQ_defs - by (wp, clarsimp simp: bitmapQ_def) - -lemma setQueue_valid_queues': - "\valid_queues' and (\s. \t. obj_at' (inQ d p) t s \ t \ set ts)\ - setQueue d p ts \\_. valid_queues'\" - by (wp | simp add: valid_queues'_def setQueue_def)+ - lemma setQueue_cur: "\\s. cur_tcb' s\ setQueue d p ts \\rv s. cur_tcb' s\" unfolding setQueue_def cur_tcb'_def @@ -2314,14 +3515,14 @@ lemma threadSet_queued_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ threadSet (tcbQueued_update f) t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: sch_act_wf_cases split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -2330,9 +3531,17 @@ lemma threadSet_queued_sch_act_wf[wp]: apply (wp tcb_in_cur_domain'_lift | simp add: obj_at'_def)+ done +lemma tcbSchedNext_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedNext_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + +lemma tcbSchedPrev_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedPrev_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + lemma tcbSchedEnqueue_pred_tcb_at'[wp]: "\\s. pred_tcb_at' proj P' t' s \ tcbSchedEnqueue t \\_ s. pred_tcb_at' proj P' t' s\" - apply (simp add: tcbSchedEnqueue_def when_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def when_def unless_def) apply (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ done @@ -2340,8 +3549,9 @@ lemma tcbSchedDequeue_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedDequeue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - unfolding tcbSchedDequeue_def - by (wp setQueue_sch_act | wp sch_act_wf_lift | simp add: if_apply_def2)+ + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wp setQueue_sch_act threadSet_tcbDomain_triv hoare_drop_imps + | wp sch_act_wf_lift | simp add: if_apply_def2)+ crunch nosch: tcbSchedDequeue "\s. P (ksSchedulerAction s)" @@ -2437,21 +3647,22 @@ lemma tcbSchedEnqueue_sch_act[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - by (simp add: tcbSchedEnqueue_def unless_def) - (wp setQueue_sch_act | wp sch_act_wf_lift | clarsimp)+ + by (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) + (wp setQueue_sch_act threadSet_tcbDomain_triv | wp sch_act_wf_lift | clarsimp)+ lemma tcbSchedEnqueue_weak_sch_act[wp]: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp setQueue_sch_act threadSet_weak_sch_act_wf | clarsimp)+ done -lemma threadGet_wp: "\\s. tcb_at' t s \ (\tcb. ko_at' tcb t s \ P (f tcb) s)\ threadGet f t \P\" +lemma threadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (f tcb) s\ threadGet f t \P\" apply (simp add: threadGet_def) apply (wp getObject_tcb_wp) - apply clarsimp + apply (clarsimp simp: obj_at'_def) done lemma threadGet_const: @@ -2493,14 +3704,6 @@ lemma addToBitmap_bitmapQ: by (wpsimp simp: bitmap_fun_defs bitmapQ_def prioToL1Index_bit_set prioL2Index_bit_set simp_del: bit_exp_iff) -lemma addToBitmap_valid_queues_no_bitmap_except: -" \ valid_queues_no_bitmap_except t \ - addToBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def valid_queues_no_bitmap_except_def - by (wp, clarsimp) - crunch norq[wp]: addToBitmap "\s. P (ksReadyQueues s)" (wp: updateObject_cte_inv hoare_drop_imps) crunch norq[wp]: removeFromBitmap "\s. P (ksReadyQueues s)" @@ -2532,9 +3735,8 @@ lemma prioToL1Index_complement_nth_w2p: lemma valid_bitmapQ_exceptE: "\ valid_bitmapQ_except d' p' s ; d \ d' \ p \ p' \ - \ bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_except_def - by blast + \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (fastforce simp: valid_bitmapQ_except_def) lemma invertL1Index_eq_cancelD: "\ invertL1Index i = invertL1Index j ; i < l2BitmapSize ; j < l2BitmapSize \ @@ -2561,7 +3763,6 @@ lemma removeFromBitmap_bitmapQ_no_L2_orphans[wp]: unfolding bitmap_fun_defs apply (wp, clarsimp simp: bitmap_fun_defs bitmapQ_no_L2_orphans_def)+ apply (rule conjI, clarsimp) - apply (rule conjI, clarsimp) apply (clarsimp simp: complement_nth_w2p l2BitmapSize_def') apply clarsimp apply metis @@ -2649,22 +3850,15 @@ lemma addToBitmap_valid_bitmapQ_except: done lemma addToBitmap_valid_bitmapQ: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ_except d p and - bitmapQ_no_L2_orphans and (\s. bitmapQ d p s \ ksReadyQueues s (d,p) \ []) \" - by (wp addToBitmap_valid_queues_no_bitmap_except addToBitmap_valid_bitmapQ_except - addToBitmap_bitmapQ_no_L2_orphans addToBitmap_bitmapQ; simp) - - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans + and (\s. \ tcbQueueEmpty (ksReadyQueues s (d,p)))\ + addToBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done lemma threadGet_const_tcb_at: "\\s. tcb_at' t s \ obj_at' (P s \ f) t s\ threadGet f t \\rv s. P s rv \" @@ -2682,12 +3876,6 @@ lemma threadGet_const_tcb_at_imp_lift: apply (clarsimp simp: obj_at'_def) done -lemma valid_queues_no_bitmap_objD: - "\ valid_queues_no_bitmap s; t \ set (ksReadyQueues s (d, p))\ - \ obj_at' (inQ d p and runnable' \ tcbState) t s" - unfolding valid_queues_no_bitmap_def - by blast - lemma setQueue_bitmapQ_no_L1_orphans[wp]: "\ bitmapQ_no_L1_orphans \ setQueue d p ts @@ -2707,136 +3895,16 @@ lemma setQueue_sets_queue[wp]: unfolding setQueue_def by (wp, simp) -lemma tcbSchedEnqueueOrAppend_valid_queues: - (* f is either (t#ts) or (ts @ [t]), so we define its properties generally *) - assumes f_set[simp]: "\ts. t \ set (f ts)" - assumes f_set_insert[simp]: "\ts. set (f ts) = insert t (set ts)" - assumes f_not_empty[simp]: "\ts. f ts \ []" - assumes f_distinct: "\ts. \ distinct ts ; t \ set ts \ \ distinct (f ts)" - shows "\Invariants_H.valid_queues and st_tcb_at' runnable' t and valid_objs' \ - do queued \ threadGet tcbQueued t; - unless queued $ - do tdom \ threadGet tcbDomain t; - prio \ threadGet tcbPriority t; - queue \ getQueue tdom prio; - setQueue tdom prio $ f queue; - when (null queue) $ addToBitmap tdom prio; - threadSet (tcbQueued_update (\_. True)) t - od - od - \\_. Invariants_H.valid_queues\" -proof - - - define could_run where "could_run == - \d p t. obj_at' (\tcb. inQ d p (tcbQueued_update (\_. True) tcb) \ runnable' (tcbState tcb)) t" - - have addToBitmap_could_run: - "\d p. \\s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\ - addToBitmap d p - \\_ s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\" - unfolding bitmap_fun_defs - by (wp, clarsimp simp: could_run_def) - - have setQueue_valid_queues_no_bitmap_except: - "\d p ts. - \ valid_queues_no_bitmap_except t and - (\s. ksReadyQueues s (d, p) = ts \ p \ maxPriority \ d \ maxDomain \ t \ set ts) \ - setQueue d p (f ts) - \\rv. valid_queues_no_bitmap_except t\" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by (wp, auto intro: f_distinct) - - have threadSet_valid_queues_could_run: - "\f. \ valid_queues_no_bitmap_except t and - (\s. \d p. t \ set (ksReadyQueues s (d,p)) \ could_run d p t s) and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans \ - threadSet (tcbQueued_update (\_. True)) t - \\rv. Invariants_H.valid_queues \" - unfolding threadSet_def could_run_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def) - done - - have setQueue_could_run: "\d p ts. - \ valid_queues and (\_. t \ set ts) and - (\s. could_run d p t s) \ - setQueue d p ts - \\rv s. (\d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s)\" - unfolding setQueue_def valid_queues_def could_run_def - by wp (fastforce dest: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def) - - note hoare_vcg_if_lift[wp] hoare_vcg_conj_lift[wp] hoare_vcg_const_imp_lift[wp] - - show ?thesis - unfolding tcbSchedEnqueue_def null_def - apply (rule hoare_pre) - apply (rule hoare_seq_ext) - apply (simp add: unless_def) - apply (wp threadSet_valid_queues_could_run) - apply (wp addToBitmap_could_run addToBitmap_valid_bitmapQ - addToBitmap_valid_queues_no_bitmap_except addToBitmap_bitmapQ_no_L2_orphans)+ - apply (wp setQueue_valid_queues_no_bitmap_except setQueue_could_run - setQueue_valid_bitmapQ_except setQueue_sets_queue setQueue_valid_bitmapQ)+ - apply (wp threadGet_const_tcb_at_imp_lift | simp add: if_apply_def2)+ - apply clarsimp - apply (frule pred_tcb_at') - apply (frule (1) valid_objs'_maxDomain) - apply (frule (1) valid_objs'_maxPriority) - apply (clarsimp simp: valid_queues_def st_tcb_at'_def obj_at'_def valid_queues_no_bitmap_exceptI) - apply (fastforce dest!: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def could_run_def) - done -qed - -lemma tcbSchedEnqueue_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedEnqueue t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedEnqueue_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma tcbSchedAppend_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedAppend t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedAppend_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma rescheduleRequired_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ valid_objs' s \ - weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (fastforce simp: weak_sch_act_wf_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemma rescheduleRequired_valid_queues_sch_act_simple: - "\Invariants_H.valid_queues and sch_act_simple\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: Invariants_H.valid_queues_def sch_act_simple_def)+ - done - lemma rescheduleRequired_valid_bitmapQ_sch_act_simple: "\ valid_bitmapQ and sch_act_simple\ rescheduleRequired \\_. valid_bitmapQ \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. valid_bitmapQ s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. valid_bitmapQ s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2844,12 +3912,12 @@ lemma rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple: "\ bitmapQ_no_L1_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L1_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L1_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L1_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2857,162 +3925,43 @@ lemma rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple: "\ bitmapQ_no_L2_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L2_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L2_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L2_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done lemma sts_valid_bitmapQ_sch_act_simple: "\valid_bitmapQ and sch_act_simple\ - setThreadState st t + setThreadState st t \\_. valid_bitmapQ \" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_valid_bitmapQ_sch_act_simple threadSet_valid_bitmapQ [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L2_orphans_sch_act_simple: - "\ bitmapQ_no_L2_orphans and sch_act_simple\ - setThreadState st t - \\_. bitmapQ_no_L2_orphans \" + "\bitmapQ_no_L2_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L2_orphans\" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L2_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L1_orphans_sch_act_simple: - "\ bitmapQ_no_L1_orphans and sch_act_simple\ - setThreadState st t + "\bitmapQ_no_L1_orphans and sch_act_simple\ + setThreadState st t \\_. bitmapQ_no_L1_orphans \" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L1_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sts_valid_queues: - "\\s. Invariants_H.valid_queues s \ - ((\p. t \ set(ksReadyQueues s p)) \ runnable' st)\ - setThreadState st t \\rv. Invariants_H.valid_queues\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues_sch_act_simple - threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sbn_valid_queues: - "\\s. Invariants_H.valid_queues s\ - setBoundNotification ntfn t \\rv. Invariants_H.valid_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - - - -lemma addToBitmap_valid_queues'[wp]: - "\ valid_queues' \ addToBitmap d p \\_. valid_queues' \" - unfolding valid_queues'_def addToBitmap_def - modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def - by (wp, simp) - -lemma tcbSchedEnqueue_valid_queues'[wp]: - "\valid_queues' and st_tcb_at' runnable' t \ - tcbSchedEnqueue t - \\_. valid_queues'\" - apply (simp add: tcbSchedEnqueue_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp - apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def projectKOs valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -lemma rescheduleRequired_valid_queues'_weak[wp]: - "\\s. valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - apply (clarsimp simp: weak_sch_act_wf_def) - done - -lemma rescheduleRequired_valid_queues'_sch_act_simple: - "\valid_queues' and sch_act_simple\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def sch_act_simple_def)+ - done - -lemma setThreadState_valid_queues'[wp]: - "\\s. valid_queues' s\ setThreadState st t \\rv. valid_queues'\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues'_sch_act_simple) - apply (rule_tac Q="\_. valid_queues'" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma setBoundNotification_valid_queues'[wp]: - "\\s. valid_queues' s\ setBoundNotification ntfn t \\rv. valid_queues'\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma valid_tcb'_tcbState_update: - "\ valid_tcb_state' st s; valid_tcb' tcb s \ \ valid_tcb' (tcbState_update (\_. st) tcb) s" - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def) - done - -lemma setThreadState_valid_objs'[wp]: - "\ valid_tcb_state' st and valid_objs' \ setThreadState st t \ \_. valid_objs' \" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_objs' | clarsimp simp: valid_tcb'_tcbState_update)+ - done - -lemma rescheduleRequired_ksQ: - "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ - rescheduleRequired - \\_ s. P (ksReadyQueues s p)\" - including no_pre - apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ P (ksReadyQueues s p)" in hoare_seq_ext) - apply wpsimp - apply (case_tac x; simp) - apply wp + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma setSchedulerAction_ksQ[wp]: @@ -3027,17 +3976,6 @@ lemma sbn_ksQ: "\\s. P (ksReadyQueues s p)\ setBoundNotification ntfn t \\rv s. P (ksReadyQueues s p)\" by (simp add: setBoundNotification_def, wp) -lemma sts_ksQ: - "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_ksQ) - apply (rule_tac Q="\_ s. P (ksReadyQueues s p)" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def)+ - apply (wp, simp) - done - lemma setQueue_ksQ[wp]: "\\s. P ((ksReadyQueues s)((d, p) := q))\ setQueue d p q @@ -3045,22 +3983,6 @@ lemma setQueue_ksQ[wp]: by (simp add: setQueue_def fun_upd_def[symmetric] | wp)+ -lemma tcbSchedEnqueue_ksQ: - "\\s. t' \ set (ksReadyQueues s p) \ t' \ t \ - tcbSchedEnqueue t \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wpsimp wp: hoare_vcg_imp_lift threadGet_wp) - apply (drule obj_at_ko_at') - apply fastforce - done - -lemma rescheduleRequired_ksQ': - "\\s. t \ set (ksReadyQueues s p) \ sch_act_not t s \ - rescheduleRequired \\_ s. t \ set (ksReadyQueues s p)\" - apply (simp add: rescheduleRequired_def) - apply (wpsimp wp: tcbSchedEnqueue_ksQ) - done - lemma threadSet_tcbState_st_tcb_at': "\\s. P st \ threadSet (tcbState_update (\_. st)) t \\_. st_tcb_at' P t\" apply (simp add: threadSet_def pred_tcb_at'_def) @@ -3071,36 +3993,6 @@ lemma isRunnable_const: "\st_tcb_at' runnable' t\ isRunnable t \\runnable _. runnable \" by (rule isRunnable_wp) -lemma sts_ksQ': - "\\s. (runnable' st \ ksCurThread s \ t) \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] - threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) - apply (clarsimp simp: when_def) - apply (case_tac x) - apply (clarsimp, wp)[1] - apply (clarsimp) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_ct threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF gct_wp gct_wp]]) - apply (rename_tac ct) - apply (case_tac "ct\t") - apply (clarsimp simp: when_def) - apply (wp)[1] - apply (clarsimp) - done - lemma valid_ipc_buffer_ptr'D: assumes yv: "y < unat max_ipc_words" and buf: "valid_ipc_buffer_ptr' a s" @@ -3217,17 +4109,30 @@ lemmas msgRegisters_unfold unfolded fromEnum_def enum_register, simplified, unfolded toEnum_def enum_register, simplified] +lemma thread_get_registers: + "thread_get (arch_tcb_get_registers \ tcb_arch) t = as_user t (gets user_regs)" + apply (simp add: thread_get_def as_user_def arch_tcb_get_registers_def + arch_tcb_context_get_def arch_tcb_context_set_def) + apply (rule bind_cong [OF refl]) + apply (clarsimp simp: gets_the_member) + apply (simp add: get_def the_run_state_def set_object_def get_object_def + put_def bind_def return_def gets_def) + apply (drule get_tcb_SomeD) + apply (clarsimp simp: map_upd_triv select_f_def image_def return_def) + done + lemma getMRs_corres: - "corres (=) (tcb_at t) - (tcb_at' t and case_option \ valid_ipc_buffer_ptr' buf) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' buf) (get_mrs t buf mi) (getMRs t buf (message_info_map mi))" proof - have S: "get = gets id" by (simp add: gets_def) - have T: "corres (\con regs. regs = map con msg_registers) (tcb_at t) (tcb_at' t) - (thread_get (arch_tcb_get_registers o tcb_arch) t) (asUser t (mapM getRegister ARM_H.msgRegisters))" - unfolding arch_tcb_get_registers_def - apply (subst thread_get_as_user) + have T: "corres (\con regs. regs = map con msg_registers) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (arch_tcb_get_registers o tcb_arch) t) + (asUser t (mapM getRegister ARM_H.msgRegisters))" + apply (subst thread_get_registers) apply (rule asUser_corres') apply (subst mapM_gets) apply (simp add: getRegister_def) @@ -3291,7 +4196,7 @@ lemma zipWithM_x_corres: apply (rule b) apply (rule a) apply (rule corres_trivial, simp) - apply (rule hoare_post_taut)+ + apply (rule hoare_TrueI)+ done @@ -3306,14 +4211,37 @@ lemma storeWordUser_valid_ipc_buffer_ptr' [wp]: unfolding valid_ipc_buffer_ptr'_def2 by (wp hoare_vcg_all_lift storeWordUser_typ_at') +lemma thread_set_as_user_registers: + "thread_set (\tcb. tcb \ tcb_arch := arch_tcb_set_registers (f (arch_tcb_get_registers (tcb_arch tcb))) + (tcb_arch tcb) \) t + = as_user t (modify (modify_registers f))" +proof - + have P: "\f. det (modify f)" + by (simp add: modify_def) + thus ?thesis + apply (simp add: as_user_def P thread_set_def) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def image_def modify_registers_def + arch_tcb_set_registers_def arch_tcb_get_registers_def + arch_tcb_context_set_def arch_tcb_context_get_def) + done +qed + +lemma UserContext_fold: + "UserContext (foldl (\s (x, y). s(x := y)) (user_regs s) xs) = + foldl (\s (r, v). UserContext ((user_regs s)(r := v))) s xs" + apply (induct xs arbitrary: s; simp) + apply (clarsimp split: prod.splits) + by (metis user_context.sel(1)) + lemma setMRs_corres: assumes m: "mrs' = mrs" shows - "corres (=) (tcb_at t and case_option \ in_user_frame buf) - (tcb_at' t and case_option \ valid_ipc_buffer_ptr' buf) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct and case_option \ in_user_frame buf) + (case_option \ valid_ipc_buffer_ptr' buf) (set_mrs t buf mrs) (setMRs t buf mrs')" proof - - have setRegister_def2: "setRegister = (\r v. modify (\s. s ( r := v )))" + have setRegister_def2: + "setRegister = (\r v. modify (\s. UserContext ((user_regs s)(r := v))))" by ((rule ext)+, simp add: setRegister_def) have S: "\xs ys n m. m - n \ length xs \ (zip xs (drop n (take m ys))) = zip xs (drop n ys)" @@ -3327,24 +4255,23 @@ proof - show ?thesis using m unfolding setMRs_def set_mrs_def - apply (clarsimp simp: arch_tcb_set_registers_def arch_tcb_get_registers_def cong: option.case_cong split del: if_split) + apply (clarsimp cong: option.case_cong split del: if_split) apply (subst bind_assoc[symmetric]) apply (fold thread_set_def[simplified]) - apply (subst thread_set_as_user[where f="\context. \reg. - if reg \ set (take (length mrs) msg_registers) - then mrs ! (the_index msg_registers reg) else context reg",simplified]) + apply (subst thread_set_as_user_registers) apply (cases buf) - apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_Nil zipWithM_x_modify + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify take_min_len zip_take_triv2 min.commute) apply (rule corres_guard_imp) apply (rule corres_split_nor[OF asUser_corres']) apply (rule corres_modify') - apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold + apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold UserContext_fold + modify_registers_def cong: if_cong simp del: the_index.simps) apply ((wp |simp)+)[6] \ \buf = Some a\ using if_split[split del] - apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_Nil zipWithM_x_modify + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify take_min_len zip_take_triv2 min.commute msgMaxLength_def msgLengthBits_def) apply (simp add: msg_max_length_def) @@ -3352,9 +4279,10 @@ proof - apply (rule corres_split_nor[OF asUser_corres']) apply (rule corres_modify') apply (simp only: msgRegisters_unfold cong: if_cong) - apply (fastforce simp: fold_fun_upd[symmetric]) + apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold UserContext_fold + modify_registers_def) apply clarsimp - apply (rule corres_split_nor) + apply (rule corres_split_nor) apply (rule_tac S="{((x, y), (x', y')). y = y' \ x' = (a + (of_nat x * 4)) \ x < unat max_ipc_words}" in zipWithM_x_corres) apply (fastforce intro: storeWordUser_corres) @@ -3370,13 +4298,12 @@ proof - qed lemma copyMRs_corres: - "corres (=) (tcb_at s and tcb_at r + "corres (=) (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct and case_option \ in_user_frame sb and case_option \ in_user_frame rb and K (unat n \ msg_max_length)) - (tcb_at' s and tcb_at' r - and case_option \ valid_ipc_buffer_ptr' sb - and case_option \ valid_ipc_buffer_ptr' rb) + (case_option \ valid_ipc_buffer_ptr' sb + and case_option \ valid_ipc_buffer_ptr' rb) (copy_mrs s sb r rb n) (copyMRs s sb r rb n)" proof - have U: "unat n \ msg_max_length \ @@ -3386,7 +4313,7 @@ proof - note R=R'[simplified] have as_user_bit: - "\v :: word32. corres dc (tcb_at s and tcb_at r) (tcb_at' s and tcb_at' r) + "\v :: word32. corres dc (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct) \ (mapM (\ra. do v \ as_user s (getRegister ra); as_user r (setRegister ra v) @@ -3529,7 +4456,7 @@ qed lemmas valid_ipc_buffer_cap_simps = valid_ipc_buffer_cap_def [split_simps cap.split arch_cap.split] lemma lookupIPCBuffer_corres': - "corres (=) (tcb_at t and valid_objs and pspace_aligned) + "corres (=) (tcb_at t and valid_objs and pspace_aligned and pspace_distinct) (tcb_at' t and valid_objs' and pspace_aligned' and pspace_distinct' and no_0_obj') (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" @@ -3625,14 +4552,14 @@ lemma ct_in_state'_decomp: shows "\\s. Pre s \ t = (ksCurThread s)\ f \\rv. ct_in_state' Prop\" apply (rule hoare_post_imp [where Q="\rv s. t = ksCurThread s \ st_tcb_at' Prop t s"]) apply (clarsimp simp add: ct_in_state'_def) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (wp x y) apply simp done lemma ct_in_state'_set: "\\s. tcb_at' t s \ P st \ t = ksCurThread s\ setThreadState st t \\rv. ct_in_state' P\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule ct_in_state'_decomp[where t=t]) apply (wp setThreadState_ct') apply (wp setThreadState_st_tcb) @@ -3641,7 +4568,7 @@ lemma ct_in_state'_set: crunches setQueue, rescheduleRequired, tcbSchedDequeue for idle'[wp]: "valid_idle'" - (simp: crunch_simps ) + (simp: crunch_simps wp: crunch_wps) lemma sts_valid_idle'[wp]: "\valid_idle' and valid_pspace' and @@ -3681,8 +4608,9 @@ lemma gbn_sp': lemma tcbSchedDequeue_tcbState_obj_at'[wp]: "\obj_at' (P \ tcbState) t'\ tcbSchedDequeue t \\rv. obj_at' (P \ tcbState) t'\" - apply (simp add: tcbSchedDequeue_def) - apply (wp | simp add: o_def split del: if_split cong: if_cong)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: getObject_tcb_wp simp: o_def threadGet_def) + apply (clarsimp simp: obj_at'_def) done crunch typ_at'[wp]: setQueue "\s. P' (typ_at' P t s)" @@ -3701,10 +4629,14 @@ lemma setQueue_pred_tcb_at[wp]: lemma tcbSchedDequeue_pred_tcb_at'[wp]: "\\s. P' (pred_tcb_at' proj P t' s)\ tcbSchedDequeue t \\_ s. P' (pred_tcb_at' proj P t' s)\" apply (rule_tac P=P' in P_bool_lift) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) done lemma sts_st_tcb': @@ -3789,39 +4721,155 @@ crunch nonz_cap[wp]: addToBitmap "ex_nonz_cap_to' t" crunch iflive'[wp]: removeFromBitmap if_live_then_nonz_cap' crunch nonz_cap[wp]: removeFromBitmap "ex_nonz_cap_to' t" -lemma tcbSchedEnqueue_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedEnqueue tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ +crunches rescheduleRequired + for cap_to'[wp]: "ex_nonz_cap_to' p" + +lemma tcbQueued_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbQueued_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedNext_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedPrev_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedPrev_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_ctes_of[wp]: + "threadSet (tcbSchedNext_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_ctes_of[wp]: + "threadSet (tcbSchedPrev_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedNext_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedPrev_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedNext_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedPrev_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbQueued_update_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbQueued_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbQueued_update_tcb_cte_cases) + +lemma getTCB_wp: + "\\s. \ko :: tcb. ko_at' ko p s \ Q ko s\ getObject p \Q\" + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) done -lemma rescheduleRequired_iflive'[wp]: - "\if_live_then_nonz_cap' - and (\s. \t. ksSchedulerAction s = SwitchToThread t - \ st_tcb_at' runnable' t s)\ - rescheduleRequired - \\rv. if_live_then_nonz_cap'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def) - apply (erule(1) if_live_then_nonz_capD') - apply (fastforce simp: projectKOs) +lemma tcbQueueRemove_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers and ex_nonz_cap_to' tcbPtr\ + tcbQueueRemove q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_imp_lift' getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (force dest: sym_heapD2[where p'=tcbPtr] sym_heapD1[where p=tcbPtr] + elim: if_live_then_nonz_capE' + simp: valid_tcb'_def opt_map_def obj_at'_def projectKOs ko_wp_at'_def) + done + +lemma tcbQueueRemove_ex_nonz_cap_to'[wp]: + "tcbQueueRemove q tcbPtr \ex_nonz_cap_to' tcbPtr'\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_cap_to' hoare_drop_imps getTCB_wp) + +(* We could write this one as "\t. tcbQueueHead t \ ..." instead, but we can't do the same in + tcbQueueAppend_if_live_then_nonz_cap', and it's nicer if the two lemmas are symmetric *) +lemma tcbQueuePrepend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueHead q)) s)\ + tcbQueuePrepend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueEnd q)) s)\ + tcbQueueAppend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive') + +lemma tcbQueueInsert_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcbPtr and valid_objs' and sym_heap_sched_pointers\ + tcbQueueInsert tcbPtr afterPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueInsert_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' getTCB_wp) + apply (intro conjI) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ko_wp_at'_def obj_at'_def projectKOs) + apply (erule if_live_then_nonz_capE') + apply (frule_tac p'=afterPtr in sym_heapD2) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def ko_wp_at'_def obj_at'_def projectKOs opt_map_def) + done + +lemma tcbSchedEnqueue_iflive'[wp]: + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_if_live_then_nonz_cap' threadGet_wp) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def obj_at'_def projectKOs) + apply clarsimp + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ko_wp_at'_def inQ_def opt_pred_def opt_map_def + obj_at'_def projectKOs + split: option.splits) done +crunches rescheduleRequired + for iflive'[wp]: if_live_then_nonz_cap' + lemma sts_iflive'[wp]: "\\s. if_live_then_nonz_cap' s - \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s)\ + \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s) + \ pspace_aligned' s \ pspace_distinct' s\ setThreadState st t \\rv. if_live_then_nonz_cap'\" apply (simp add: setThreadState_def setQueue_def) - apply (rule hoare_pre) - apply (wp | simp)+ - apply (rule_tac Q="\rv. if_live_then_nonz_cap'" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_iflive' | simp)+ - apply auto - done + apply wpsimp + apply (rule_tac Q="\rv. if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) + apply clarsimp + apply (wpsimp wp: threadSet_iflive') + apply fastforce + done lemma sbn_iflive'[wp]: "\\s. if_live_then_nonz_cap' s @@ -3934,6 +4982,19 @@ lemma setBoundNotification_vms'[wp]: apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) done +lemma threadSet_ct_not_inQ: + "(\tcb. tcbQueued tcb = tcbQueued (F tcb)) + \ threadSet F tcbPtr \\s. P (ct_not_inQ s)\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (erule rsubst[where P=P]) + by (fastforce simp: ct_not_inQ_def obj_at'_def projectKOs objBits_simps ps_clear_def + split: if_splits) + +crunches tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, tcbQueueRemove, addToBitmap + for ct_not_inQ[wp]: ct_not_inQ + (wp: threadSet_ct_not_inQ crunch_wps) + lemma tcbSchedEnqueue_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ tcbSchedEnqueue t \\_. ct_not_inQ\" @@ -3957,12 +5018,7 @@ lemma tcbSchedEnqueue_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -3989,12 +5045,7 @@ lemma tcbSchedAppend_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedAppend_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -4023,12 +5074,10 @@ lemma rescheduleRequired_sa_cnt[wp]: lemma possibleSwitchTo_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ possibleSwitchTo t \\_. ct_not_inQ\" - (is "\?PRE\ _ \_\") apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wpsimp wp: static_imp_wp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ threadGet_wp - | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ - apply (fastforce simp: obj_at'_def) + | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ done lemma threadSet_tcbState_update_ct_not_inQ[wp]: @@ -4044,7 +5093,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4064,7 +5113,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4108,29 +5157,6 @@ lemma tcbSchedDequeue_ct_not_inQ[wp]: done qed -lemma tcbSchedEnqueue_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadGet_wp | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obja) - apply fastforce - done - -lemma setThreadState_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" - apply (simp add: setThreadState_def rescheduleRequired_def) - apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st - | wpc - | rule hoare_drop_imps - | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obj) - apply fastforce - done - crunch ct_idle_or_in_cur_domain'[wp]: setQueue ct_idle_or_in_cur_domain' (simp: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) @@ -4159,17 +5185,8 @@ lemma removeFromBitmap_ct_idle_or_in_cur_domain'[wp]: | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ done -lemma tcbSchedEnqueue_ksCurDomain[wp]: - "\ \s. P (ksCurDomain s)\ tcbSchedEnqueue tptr \\_ s. P (ksCurDomain s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done - -lemma tcbSchedEnqueue_ksDomSchedule[wp]: - "\ \s. P (ksDomSchedule s)\ tcbSchedEnqueue tptr \\_ s. P (ksDomSchedule s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done +crunches tcbQueuePrepend + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' lemma tcbSchedEnqueue_ct_idle_or_in_cur_domain'[wp]: "\ct_idle_or_in_cur_domain'\ tcbSchedEnqueue tptr \\_. ct_idle_or_in_cur_domain'\" @@ -4247,12 +5264,383 @@ lemma sts_utr[wp]: apply (wp untyped_ranges_zero_lift) done +lemma removeFromBitmap_bitmapQ: + "\\\ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" + unfolding bitmapQ_defs bitmap_fun_defs + by (wpsimp simp: bitmap_fun_defs) + +lemma removeFromBitmap_valid_bitmapQ[wp]: + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans + and (\s. tcbQueueEmpty (ksReadyQueues s (d,p)))\ + removeFromBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: removeFromBitmap_valid_bitmapQ_except removeFromBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done + +crunches tcbSchedDequeue + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + (wp: crunch_wps simp: crunch_simps) + +lemma setQueue_nonempty_valid_bitmapQ': + "\\s. valid_bitmapQ s \ \ tcbQueueEmpty (ksReadyQueues s (d, p))\ + setQueue d p queue + \\_ s. \ tcbQueueEmpty queue \ valid_bitmapQ s\" + apply (wpsimp simp: setQueue_def) + apply (fastforce simp: valid_bitmapQ_def bitmapQ_def) + done + +lemma threadSet_valid_bitmapQ_except[wp]: + "threadSet f tcbPtr \valid_bitmapQ_except d p\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (clarsimp simp: valid_bitmapQ_except_def bitmapQ_def) + done + +lemma threadSet_bitmapQ: + "threadSet F t \bitmapQ domain priority\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + by (clarsimp simp: bitmapQ_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend + for valid_bitmapQ_except[wp]: "valid_bitmapQ_except d p" + and valid_bitmapQ[wp]: valid_bitmapQ + and bitmapQ[wp]: "bitmapQ tdom prio" + (wp: crunch_wps) + +lemma tcbQueued_imp_queue_nonempty: + "\list_queue_relation ts (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)) nexts prevs; + \t. t \ set ts \ (inQ (tcbDomain tcb) (tcbPriority tcb) |< tcbs_of' s) t; + ko_at' tcb tcbPtr s; tcbQueued tcb\ + \ \ tcbQueueEmpty (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" + apply (clarsimp simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce dest: heap_path_head + simp: inQ_def opt_map_def opt_pred_def obj_at'_def projectKOs) + done + +lemma tcbSchedDequeue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedDequeue tcbPtr \\_. valid_bitmapQ\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + apply (wpsimp wp: setQueue_nonempty_valid_bitmapQ' hoare_vcg_conj_lift + hoare_vcg_if_lift2 hoare_vcg_const_imp_lift threadGet_wp + | wp (once) hoare_drop_imps)+ + by (fastforce dest!: tcbQueued_imp_queue_nonempty + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + +lemma tcbSchedDequeue_valid_bitmaps[wp]: + "tcbSchedDequeue tcbPtr \valid_bitmaps\" + by (wpsimp simp: valid_bitmaps_def) + +lemma setQueue_valid_bitmapQ': (* enqueue only *) + "\valid_bitmapQ_except d p and bitmapQ d p and K (\ tcbQueueEmpty q)\ + setQueue d p q + \\_. valid_bitmapQ\" + unfolding setQueue_def bitmapQ_defs + by (wpsimp simp: bitmapQ_def) + +lemma tcbSchedEnqueue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedEnqueue tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedEnqueue_def + apply (wpsimp simp: tcbQueuePrepend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp) + apply (fastforce simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def split: if_splits) + done + +crunches tcbSchedEnqueue, tcbSchedAppend + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + +lemma tcbSchedEnqueue_valid_bitmaps[wp]: + "tcbSchedEnqueue tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) + done + +crunches rescheduleRequired, threadSet, setThreadState + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +lemma tcbSchedEnqueue_valid_sched_pointers[wp]: + "tcbSchedEnqueue tcbPtr \valid_sched_pointers\" + apply (clarsimp simp: tcbSchedEnqueue_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueuePrepend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: valid_sched_pointers_def list_queue_relation_def) + apply (case_tac "ts = []", fastforce simp: tcbQueueEmpty_def) + by (intro conjI impI; + force dest!: hd_in_set heap_path_head + simp: inQ_def opt_pred_def opt_map_def obj_at'_def projectKOs split: if_splits) + +lemma tcbSchedAppend_valid_sched_pointers[wp]: + "tcbSchedAppend tcbPtr \valid_sched_pointers\" + apply (clarsimp simp: tcbSchedAppend_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueueAppend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + by (intro conjI impI; + clarsimp dest: last_in_set + simp: valid_sched_pointers_def opt_map_def list_queue_relation_def tcbQueueEmpty_def + queue_end_valid_def inQ_def opt_pred_def obj_at'_def projectKOs + split: if_splits option.splits; + fastforce) + +lemma tcbSchedDequeue_valid_sched_pointers[wp]: + "\valid_sched_pointers and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. valid_sched_pointers\" + supply if_split[split del] fun_upd_apply[simp del] + apply (clarsimp simp: tcbSchedDequeue_def getQueue_def setQueue_def) + apply (wpsimp wp: threadSet_wp getTCB_wp threadGet_wp simp: tcbQueueRemove_def) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp split: if_splits) + apply (frule (1) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def projectKOs) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI) + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (clarsimp simp: valid_sched_pointers_def) + apply (case_tac "ptr = tcbPtr") + apply (force dest!: heap_ls_last_None + simp: prev_queue_head_def queue_end_valid_def inQ_def opt_map_def + obj_at'_def projectKOs) + apply (simp add: fun_upd_def opt_pred_def) + \ \tcbPtr is the head of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def fun_upd_apply prev_queue_head_def + inQ_def opt_pred_def opt_map_def obj_at'_def projectKOs + split: if_splits option.splits) + \ \tcbPtr is the end of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def queue_end_valid_def inQ_def opt_pred_def + opt_map_def fun_upd_apply obj_at'_def projectKOs + split: if_splits option.splits) + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI impI allI) + by (clarsimp simp: valid_sched_pointers_def inQ_def opt_pred_def opt_map_def fun_upd_apply + obj_at'_def projectKOs + split: if_splits option.splits; + auto) + +lemma tcbQueueRemove_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts)\ + tcbQueueRemove q tcbPtr + \\_. sym_heap_sched_pointers\" + supply heap_path_append[simp del] + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + apply (rename_tac tcb ts) + + \ \tcbPtr is the head of q, which is not a singleton\ + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: list_queue_relation_def Let_def) + apply (prop_tac "tcbSchedNext tcb \ Some tcbPtr") + apply (fastforce dest: heap_ls_no_loops[where p=tcbPtr] simp: opt_map_def obj_at'_def projectKOs) + apply (fastforce intro: sym_heap_remove_only' + simp: prev_queue_head_def opt_map_red opt_map_upd_triv obj_at'_def projectKOs) + + \ \tcbPtr is the end of q, which is not a singleton\ + apply (intro impI) + apply (rule conjI) + apply clarsimp + apply (prop_tac "tcbSchedPrev tcb \ Some tcbPtr") + apply (fastforce dest!: heap_ls_prev_no_loops[where p=tcbPtr] + simp: list_queue_relation_def opt_map_def obj_at'_def projectKOs) + apply (subst fun_upd_swap, fastforce) + apply (fastforce intro: sym_heap_remove_only + simp: opt_map_red opt_map_upd_triv obj_at'_def projectKOs) + + \ \tcbPtr is in the middle of q\ + apply (intro conjI impI allI) + apply (frule (2) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []") + apply (fastforce simp: list_queue_relation_def queue_end_valid_def) + apply (clarsimp simp: list_queue_relation_def) + apply (frule (3) ptr_in_middle_prev_next) + apply (frule heap_ls_distinct) + apply (rename_tac afterPtr beforePtr xs ys) + apply (frule_tac before=beforePtr and middle=tcbPtr and after=afterPtr + in sym_heap_remove_middle_from_chain) + apply (fastforce dest: last_in_set simp: opt_map_def obj_at'_def projectKOs) + apply (fastforce dest: hd_in_set simp: opt_map_def obj_at'_def projectKOs) + apply (rule_tac hp="tcbSchedNexts_of s" in sym_heapD2) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def projectKOs + split: if_splits) + done + +lemma tcbQueuePrepend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueuePrepend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + apply (clarsimp simp: tcbQueuePrepend_def) + apply (wpsimp wp: threadSet_wp) + apply (prop_tac "tcbPtr \ the (tcbQueueHead q)") + apply (case_tac "ts = []"; + fastforce dest: heap_path_head simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac a=tcbPtr and b="the (tcbQueueHead q)" in sym_heap_connect) + apply assumption + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def tcbQueueEmpty_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def projectKOs + tcbQueueEmpty_def) + done + +lemma tcbQueueInsert_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueInsert tcbPtr afterPtr + \\_. sym_heap_sched_pointers\" + apply (clarsimp simp: tcbQueueInsert_def) + \ \forwards step in order to name beforePtr below\ + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (rule bind_wp[OF _ assert_sp]) + apply (rule hoare_ex_pre_conj[simplified conj_commute], rename_tac beforePtr) + apply (rule bind_wp[OF _ assert_sp]) + apply (wpsimp wp: threadSet_wp) + apply normalise_obj_at' + apply (prop_tac "tcbPtr \ afterPtr") + apply (clarsimp simp: list_queue_relation_def opt_map_red obj_at'_def projectKOs) + apply (prop_tac "tcbPtr \ beforePtr") + apply (fastforce dest: sym_heap_None simp: opt_map_def obj_at'_def projectKOs + split: option.splits) + apply (prop_tac "tcbSchedNexts_of s beforePtr = Some afterPtr") + apply (fastforce intro: sym_heapD2 simp: opt_map_def obj_at'_def projectKOs) + apply (fastforce dest: sym_heap_insert_into_middle_of_chain + simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def projectKOs) + done + +lemma tcbQueueAppend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueAppend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + apply (clarsimp simp: tcbQueueAppend_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def + obj_at'_def projectKOs + split: if_splits) + apply fastforce + apply (drule_tac a="last ts" and b=tcbPtr in sym_heap_connect) + apply (fastforce dest: heap_ls_last_None) + apply assumption + apply (simp add: opt_map_red tcbQueueEmpty_def) + apply (subst fun_upd_swap, simp) + apply (fastforce simp: opt_map_red opt_map_upd_triv) + done + +lemma tcbQueued_update_sym_heap_sched_pointers[wp]: + "threadSet (tcbQueued_update f) tcbPtr \sym_heap_sched_pointers\" + by (rule sym_heap_sched_pointers_lift; + wpsimp wp: threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of) + +lemma tcbSchedEnqueue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedEnqueue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def projectKOs) + done + +lemma tcbSchedAppend_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedAppend tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def projectKOs) + done + +lemma tcbSchedDequeue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedDequeue_def + apply (wpsimp wp: tcbQueueRemove_sym_heap_sched_pointers hoare_vcg_if_lift2 threadGet_wp + simp: bitmap_fun_defs) + apply (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def inQ_def opt_pred_def + opt_map_def obj_at'_def projectKOs) + done + +crunches setThreadState + for valid_sched_pointers[wp]: valid_sched_pointers + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_valid_sched_pointers threadSet_sched_pointers) + lemma sts_invs_minor': "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set(ksReadyQueues s p)) \ runnable' st) and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) and sch_act_simple and invs'\ @@ -4261,21 +5649,21 @@ lemma sts_invs_minor': including no_pre apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp sts_valid_queues valid_irq_node_lift irqs_masked_lift - setThreadState_ct_not_inQ + apply (wp valid_irq_node_lift irqs_masked_lift + setThreadState_ct_not_inQ | simp add: cteCaps_of_def o_def)+ apply (clarsimp simp: sch_act_simple_def) apply (intro conjI) - apply clarsimp - defer - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply clarsimp + defer + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (clarsimp elim!: st_tcb_ex_cap'') + apply fastforce + apply fastforce apply (frule tcb_in_valid_state', clarsimp+) - apply (cases st, simp_all add: valid_tcb_state'_def - split: Structures_H.thread_state.split_asm) - done + by (cases st; simp add: valid_tcb_state'_def split: Structures_H.thread_state.split_asm) lemma sts_cap_to'[wp]: "\ex_nonz_cap_to' p\ setThreadState st t \\rv. ex_nonz_cap_to' p\" @@ -4312,12 +5700,56 @@ lemma threadSet_ct_running': apply wp done +lemma tcbQueuePrepend_tcbPriority_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def projectKOs objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueuePrepend_tcbDomain_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def projectKOs objBits_simps ps_clear_def split: if_splits) + +lemma tcbSchedDequeue_tcbPriority[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedDequeue_tcbDomain[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedEnqueue_tcbPriority_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +lemma tcbSchedEnqueue_tcbDomain_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +crunches rescheduleRequired + for tcbPriority_obj_at'[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t'" + and tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + +lemma setThreadState_tcbPriority_obj_at'[wp]: + "setThreadState ts tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: obj_at'_def projectKOs objBits_simps ps_clear_def) + done + lemma setThreadState_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" apply (simp add: tcb_in_cur_domain'_def) apply (rule hoare_pre) apply wps - apply (wp setThreadState_not_st | simp)+ + apply (simp add: setThreadState_def) + apply (wpsimp wp: threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps)+ done lemma asUser_global_refs': "\valid_global_refs'\ asUser t f \\rv. valid_global_refs'\" @@ -4438,10 +5870,13 @@ lemma set_eobject_corres': assumes e: "etcb_relation etcb tcb'" assumes z: "\s. obj_at' P ptr s \ map_to_ctes ((ksPSpace s) (ptr \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" - shows "corres dc (tcb_at ptr and is_etcb_at ptr) - (obj_at' (\ko. non_exst_same ko tcb') ptr - and obj_at' P ptr) - (set_eobject ptr etcb) (setObject ptr tcb')" + shows + "corres dc + (tcb_at ptr and is_etcb_at ptr) + (obj_at' (\ko. non_exst_same ko tcb') ptr and obj_at' P ptr + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcb' \ tcbPriority tcb \ tcbPriority tcb') + \ \ tcbQueued tcb) ptr) + (set_eobject ptr etcb) (setObject ptr tcb')" apply (rule corres_no_failI) apply (rule no_fail_pre) apply wp @@ -4462,20 +5897,34 @@ lemma set_eobject_corres': apply (drule(1) bspec) apply (clarsimp simp: non_exst_same_def) apply (case_tac bb; simp) - apply (clarsimp simp: obj_at'_def other_obj_relation_def cte_relation_def tcb_relation_def projectKOs split: if_split_asm)+ + apply (clarsimp simp: obj_at'_def other_obj_relation_def tcb_relation_cut_def cte_relation_def + tcb_relation_def projectKOs + split: if_split_asm)+ apply (clarsimp simp: aobj_relation_cuts_def split: ARM_A.arch_kernel_obj.splits) apply (rename_tac arch_kernel_obj obj d p ts) apply (case_tac arch_kernel_obj; simp) apply (clarsimp simp: pte_relation_def pde_relation_def is_tcb_def split: if_split_asm)+ - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (clarsimp simp: projectKOs) - apply (insert e) - apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits ARM_A.arch_kernel_obj.splits) + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: obj_at'_def) + apply (insert e) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type + split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) + apply (frule in_ready_q_tcbQueued_eq[where t=ptr]) + apply (rename_tac s' conctcb' abstcb exttcb) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def obj_at'_def projectKOs non_exst_same_def split: option.splits) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def obj_at'_def projectKOs non_exst_same_def split: option.splits) + apply (clarsimp simp: ready_queue_relation_def opt_map_def opt_pred_def obj_at'_def projectKOs + inQ_def non_exst_same_def + split: option.splits) + apply metis done lemma set_eobject_corres: @@ -4483,9 +5932,13 @@ lemma set_eobject_corres: assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" assumes r: "r () ()" - shows "corres r (tcb_at add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_eobject add etcbu) (setObject add tcbu')" + shows + "corres r + (tcb_at add and (\s. ekheap s add = Some etcb)) + (ko_at' tcb' add + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcbu' \ tcbPriority tcb \ tcbPriority tcbu') + \ \ tcbQueued tcb) add) + (set_eobject add etcbu) (setObject add tcbu')" apply (rule_tac F="non_exst_same tcb' tcbu' \ etcb_relation etcbu tcbu'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) @@ -4512,24 +5965,27 @@ lemma set_eobject_corres: lemma ethread_set_corresT: assumes x: "\tcb'. non_exst_same tcb' (f' tcb')" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (f etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (ethread_set f t) (threadSet f' t)" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation (f etcb) (f' tcb')" + shows + "corres dc + (tcb_at t and valid_etcbs) + (tcb_at' t + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain (f' tcb) + \ tcbPriority tcb \ tcbPriority (f' tcb)) + \ \ tcbQueued tcb) t) + (ethread_set f t) (threadSet f' t)" apply (simp add: ethread_set_def threadSet_def bind_assoc) apply (rule corres_guard_imp) apply (rule corres_split[OF corres_get_etcb set_eobject_corres]) apply (rule x) apply (erule e) apply (simp add: z)+ - apply wp+ + apply (wp getObject_tcb_wp)+ apply clarsimp apply (simp add: valid_etcbs_def tcb_at_st_tcb_at[symmetric]) apply (force simp: tcb_at_def get_etcb_def obj_at_def) - apply simp + apply (clarsimp simp: obj_at'_def) done lemmas ethread_set_corres = diff --git a/proof/refine/ARM/Tcb_R.thy b/proof/refine/ARM/Tcb_R.thy index 5a464cafb5..c19190d9b7 100644 --- a/proof/refine/ARM/Tcb_R.thy +++ b/proof/refine/ARM/Tcb_R.thy @@ -46,14 +46,14 @@ lemma activateThread_corres: apply (rule corres_split_nor[OF asUser_setNextPC_corres]) apply (rule setThreadState_corres) apply (simp | wp weak_sch_act_wf_lift_linear)+ - apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_tcb_at invs_distinct) apply fastforce apply (rule corres_guard_imp) apply (rule activateIdleThread_corres) apply (clarsimp elim!: st_tcb_weakenE) apply (clarsimp elim!: pred_tcb'_weakenE) apply (wp gts_st_tcb gts_st_tcb' gts_st_tcb_at)+ - apply (clarsimp simp: ct_in_state_def tcb_at_invs + apply (clarsimp simp: ct_in_state_def tcb_at_invs invs_distinct invs_psp_aligned elim!: st_tcb_weakenE) apply (clarsimp simp: tcb_at_invs' ct_in_state'_def elim!: pred_tcb'_weakenE) @@ -80,10 +80,10 @@ abbreviation lemma gts_st_tcb': "\tcb_at' t\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule hoare_post_imp[where Q="\rv s. \rv'. rv = rv' \ st_tcb_at' (\st. st = rv') t s"]) apply simp - apply (wp hoare_ex_wp) + apply (wp hoare_vcg_ex_lift) apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def) done @@ -98,12 +98,12 @@ lemma activate_invs': activateThread \\rv. invs' and (ct_running' or ct_idle')\" apply (simp add: activateThread_def) - apply (rule hoare_seq_ext) - apply (rule_tac B="\state s. invs' s \ sch_act_simple s + apply (rule bind_wp) + apply (rule_tac Q'="\state s. invs' s \ sch_act_simple s \ st_tcb_at' (\st. st = state) thread s \ thread = ksCurThread s - \ (runnable' state \ idle' state)" in hoare_seq_ext) - apply (case_tac x, simp_all add: isTS_defs hoare_pre_cont + \ (runnable' state \ idle' state)" in bind_wp) + apply (case_tac rv, simp_all add: isTS_defs hoare_pre_cont split del: if_splits cong: if_cong) apply (wp) apply (clarsimp simp: ct_in_state'_def) @@ -154,8 +154,8 @@ lemma activate_sch_act: activateThread \\rv s. P (ksSchedulerAction s)\" apply (simp add: activateThread_def getCurThread_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext[where B="\st s. (runnable' or idle') st + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp[where Q'="\st s. (runnable' or idle') st \ P (ksSchedulerAction s)"]) apply (rule hoare_pre) apply (wp | wpc | simp add: setThreadState_runnable_simp)+ @@ -197,13 +197,13 @@ lemma setupReplyMaster_weak_sch_act_wf[wp]: apply assumption done -crunches setupReplyMaster - for valid_queues[wp]: "Invariants_H.valid_queues" - and valid_queues'[wp]: "valid_queues'" +crunches setup_reply_master, Tcb_A.restart, arch_post_modify_registers + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" (wp: crunch_wps simp: crunch_simps) lemma restart_corres: - "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and ex_nonz_cap_to' t) (Tcb_A.restart t) (ThreadDecls_H.restart t)" apply (simp add: Tcb_A.restart_def Thread_H.restart_def) apply (simp add: isStopped_def2 liftM_def) @@ -212,20 +212,22 @@ lemma restart_corres: apply (clarsimp simp add: runnable_tsr idle_tsr when_def) apply (rule corres_split_nor[OF cancel_ipc_corres]) apply (rule corres_split_nor[OF setupReplyMaster_corres]) - apply (rule corres_split_nor[OF setThreadState_corres]) - apply clarsimp + apply (rule corres_split_nor[OF setThreadState_corres], simp) apply (rule corres_split[OF tcbSchedEnqueue_corres possibleSwitchTo_corres]) - apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_valid_queues sts_st_tcb' - | clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. valid_sched and cur_tcb" in hoare_strengthen_post) - apply wp - apply (simp add: valid_sched_def valid_sched_action_def) - apply (rule_tac Q="\rv. invs' and tcb_at' t" in hoare_strengthen_post) - apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def) - apply wp+ - apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at) - apply (clarsimp simp add: invs'_def valid_state'_def sch_act_wf_weak) + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | clarsimp simp: valid_tcb_state'_def | strengthen valid_objs'_valid_tcbs')+ + apply (rule_tac Q="\rv. valid_sched and cur_tcb and pspace_aligned and pspace_distinct" + in hoare_strengthen_post) + apply wp + apply (fastforce simp: valid_sched_def valid_sched_action_def) + apply (rule_tac Q="\rv. invs' and ex_nonz_cap_to' t" in hoare_strengthen_post) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def + valid_tcb_state'_def) + apply wp+ + apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at invs_psp_aligned invs_distinct) + apply clarsimp done lemma restart_invs': @@ -309,12 +311,6 @@ lemma invokeTCB_ReadRegisters_corres: crunch sch_act_simple [wp]: asUser "sch_act_simple" (rule: sch_act_simple_lift) -lemma invs_valid_queues': - "invs' s \ valid_queues' s" - by (clarsimp simp:invs'_def valid_state'_def) - -declare invs_valid_queues'[rule_format, elim!] - lemma einvs_valid_etcbs: "einvs s \ valid_etcbs s" by (clarsimp simp: valid_sched_def) @@ -327,6 +323,11 @@ lemma asUser_postModifyRegisters_corres: apply (rule corres_stateAssert_assume) by simp+ +crunches restart + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_sched_pointers threadSet_valid_sched_pointers) + lemma invokeTCB_WriteRegisters_corres: "corres (dc \ (=)) (einvs and tcb_at dest and ex_nonz_cap_to dest) (invs' and sch_act_simple and tcb_at' dest and ex_nonz_cap_to' dest) @@ -341,20 +342,20 @@ lemma invokeTCB_WriteRegisters_corres: apply (rule asUser_corres) apply (simp add: zipWithM_mapM getRestartPC_def setNextPC_def) apply (rule corres_Id, simp+) - apply (rule no_fail_pre, wp no_fail_mapM) - apply clarsimp - apply (wp no_fail_setRegister | simp)+ + apply (wpsimp wp: no_fail_mapM no_fail_setRegister)+ apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]]) apply (rule corres_split_nor[OF corres_when[OF refl restart_corres]]) apply (rule corres_split_nor[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp apply (wp+)[2] - apply ((wp static_imp_wp restart_invs' - | strengthen valid_sched_weak_strg einvs_valid_etcbs - invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def - dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] + apply ((wp hoare_weak_lift_imp restart_invs' + | strengthen valid_sched_weak_strg einvs_valid_etcbs + invs_weak_sch_act_wf + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues valid_objs'_valid_tcbs' invs_valid_objs' + | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def + dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] apply (rule_tac Q="\_. einvs and tcb_at dest and ex_nonz_cap_to dest" in hoare_strengthen_post[rotated]) apply (fastforce simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def dest!: idle_no_ex_cap) prefer 2 @@ -385,6 +386,10 @@ lemma suspend_ResumeCurrentThread_imp_notct[wp]: \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" by (wpsimp simp: suspend_def) +crunches restart, suspend + for cur_tcb'[wp]: cur_tcb' + (wp: crunch_wps threadSet_cur ignore: threadSet) + lemma invokeTCB_CopyRegisters_corres: "corres (dc \ (=)) (einvs and simple_sched_action and tcb_at dest and tcb_at src and ex_nonz_cap_to src and @@ -413,6 +418,7 @@ proof - apply simp apply simp apply (simp | wp)+ + apply fastforce+ done have R: "\src src' des des' xs ys. \ src = src'; des = des'; xs = ys \ \ corres dc (tcb_at src and tcb_at des and invs) @@ -435,7 +441,7 @@ proof - apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) apply (rule asUser_setNextPC_corres) apply wp+ - apply simp+ + apply fastforce+ done show ?thesis apply (simp add: invokeTCB_def performTransfer_def) @@ -449,7 +455,7 @@ proof - apply (simp add: frame_registers_def frameRegisters_def) apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) apply (rule Q[OF refl refl]) - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[2] + apply ((wp mapM_x_wp' hoare_weak_lift_imp | simp)+)[2] apply (rule corres_split_nor) apply (rule corres_when[OF refl]) apply (rule R[OF refl refl]) @@ -459,21 +465,18 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply ((wp static_imp_wp)+)[6] - apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_strengthen_post[rotated]) - apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def) + apply (solves \wp hoare_weak_lift_imp\)+ + apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_post_imp) + apply (fastforce simp: invs_def valid_state_def valid_pspace_def valid_sched_weak_strg valid_sched_def) prefer 2 - apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_strengthen_post[rotated]) - apply (clarsimp simp: invs'_def valid_state'_def invs_weak_sch_act_wf) - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[2] - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[1] - apply (wp mapM_x_wp' static_imp_wp | simp)+ - apply ((wp mapM_x_wp' static_imp_wp restart_invs' | wpc | clarsimp simp add: if_apply_def2)+)[2] - apply (wp suspend_nonz_cap_to_tcb static_imp_wp | simp add: if_apply_def2)+ + apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_post_imp) + apply (fastforce simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) + apply ((wp mapM_x_wp' hoare_weak_lift_imp | (simp add: cur_tcb'_def[symmetric])+)+)[8] + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp | simp add: if_apply_def2)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def - dest!: idle_no_ex_cap) - apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) - done + dest!: idle_no_ex_cap) + by (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) qed lemma readreg_invs': @@ -519,41 +522,10 @@ lemma copyreg_invs': \\rv. invs'\" by (rule hoare_strengthen_post, rule copyreg_invs'', simp) -lemma threadSet_valid_queues_no_state: - "\Invariants_H.valid_queues and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. Invariants_H.valid_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def) - done - -lemma threadSet_valid_queues'_no_state: - "(\tcb. tcbQueued tcb = tcbQueued (f tcb)) - \ \valid_queues' and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs inQ_def split: if_split_asm) - done - lemma isRunnable_corres: - "corres (\ts runn. runnable ts = runn) (tcb_at t) (tcb_at' t) - (get_thread_state t) (isRunnable t)" + "corres (\ts runn. runnable ts = runn) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_thread_state t) (isRunnable t)" apply (simp add: isRunnable_def) apply (subst bind_return[symmetric]) apply (rule corres_guard_imp) @@ -574,16 +546,6 @@ lemma tcbSchedDequeue_not_queued: apply (wp tg_sp' [where P=\, simplified] | simp)+ done -lemma tcbSchedDequeue_not_in_queue: - "\p. \Invariants_H.valid_queues and tcb_at' t and valid_objs'\ tcbSchedDequeue t - \\rv s. t \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv. Invariants_H.valid_queues and obj_at' (Not \ tcbQueued) t" - in hoare_post_imp) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def projectKOs inQ_def ) - apply (wp tcbSchedDequeue_not_queued tcbSchedDequeue_valid_queues | - simp add: valid_objs'_maxDomain valid_objs'_maxPriority)+ - done - lemma threadSet_ct_in_state': "(\tcb. tcbState (f tcb) = tcbState tcb) \ \ct_in_state' test\ threadSet f t \\rv. ct_in_state' test\" @@ -627,14 +589,19 @@ lemma threadSet_valid_objs_tcbPriority_update: crunch cur[wp]: tcbSchedDequeue cur_tcb' +crunches tcbSchedDequeue + for st_tcb_at'[wp]: "\s. P (st_tcb_at' st tcbPtr s)" + lemma sp_corres2: - "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb) - (Invariants_H.valid_queues and valid_queues' and cur_tcb' and tcb_at' t - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and tcb_at t + and valid_queues and pspace_aligned and pspace_distinct) + (tcb_at' t and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and (\_. x \ maxPriority) and sym_heap_sched_pointers and valid_sched_pointers) + (set_priority t x) (setPriority t x)" apply (simp add: setPriority_def set_priority_def thread_set_priority_def) apply (rule stronger_corres_guard_imp) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split[OF ethread_set_corres], simp_all)[1] apply (simp add: etcb_relation_def) apply (rule corres_split[OF isRunnable_corres]) @@ -644,39 +611,44 @@ lemma sp_corres2: apply (rule rescheduleRequired_corres) apply (rule possibleSwitchTo_corres) apply ((clarsimp - | wp static_imp_wp hoare_vcg_if_lift hoare_wp_combs gts_wp + | wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp isRunnable_wp)+)[4] - apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift) - apply clarsimp - apply ((wp hoare_drop_imps hoare_vcg_if_lift hoare_vcg_all_lift - isRunnable_wp threadSet_pred_tcb_no_state threadSet_valid_queues_no_state - threadSet_valid_queues'_no_state threadSet_cur threadSet_valid_objs_tcbPriority_update - threadSet_weak_sch_act_wf threadSet_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[1] - apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift hoare_vcg_disj_lift - tcbSchedDequeue_not_in_queue tcbSchedDequeue_valid_queues - tcbSchedDequeue_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[2] + apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift + ethread_set_not_queued_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+ + apply ((wp hoare_vcg_imp_lift' hoare_vcg_all_lift + isRunnable_wp threadSet_pred_tcb_no_state + threadSet_valid_objs_tcbPriority_update threadSet_sched_pointers + threadSet_valid_sched_pointers tcb_dequeue_not_queued tcbSchedDequeue_not_queued + threadSet_weak_sch_act_wf + | simp add: etcb_relation_def + | strengthen valid_objs'_valid_tcbs' + obj_at'_weakenE[where P="Not \ tcbQueued"] + | wps)+) apply (force simp: valid_etcbs_def tcb_at_st_tcb_at[symmetric] state_relation_def dest: pspace_relation_tcb_at intro: st_tcb_at_opeqI) - apply (force simp: state_relation_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) + apply clarsimp done -lemma setPriority_corres: "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" +lemma setPriority_corres: + "corres dc + (einvs and tcb_at t) + (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) + (set_priority t x) (setPriority t x)" apply (rule corres_guard_imp) apply (rule sp_corres2) - apply (clarsimp simp: valid_sched_def valid_sched_action_def) + apply (simp add: valid_sched_def valid_sched_action_def invs_psp_aligned invs_distinct invs_def) apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak) done -lemma setMCPriority_corres: "corres dc (tcb_at t) (tcb_at' t) - (set_mcpriority t x) (setMCPriority t x)" +lemma setMCPriority_corres: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_mcpriority t x) (setMCPriority t x)" apply (rule corres_guard_imp) apply (clarsimp simp: setMCPriority_def set_mcpriority_def) apply (rule threadset_corresT) - by (clarsimp simp: tcb_relation_def tcb_cap_cases_tcb_mcpriority - tcb_cte_cases_def exst_same_def)+ + by (clarsimp simp: tcb_relation_def tcb_cap_cases_tcb_mcpriority + tcb_cte_cases_def cteSizeBits_def exst_same_def)+ definition "out_rel fn fn' v v' \ @@ -688,17 +660,18 @@ definition lemma out_corresT: assumes x: "\tcb v. \(getF, setF)\ran tcb_cap_cases. getF (fn v tcb) = getF tcb" assumes y: "\v. \tcb. \(getF, setF)\ran tcb_cte_cases. getF (fn' v tcb) = getF tcb" + assumes sched_pointers: "\tcb v. tcbSchedPrev (fn' v tcb) = tcbSchedPrev tcb" + "\tcb v. tcbSchedNext (fn' v tcb) = tcbSchedNext tcb" + assumes flag: "\tcb v. tcbQueued (fn' v tcb) = tcbQueued tcb" assumes e: "\tcb v. exst_same tcb (fn' v tcb)" shows "out_rel fn fn' v v' \ - corres dc (tcb_at t) - (tcb_at' t) + corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t fn v) (case_option (return ()) (\x. threadSet (fn' x) t) v')" - apply (case_tac v, simp_all add: out_rel_def - option_update_thread_def) - apply clarsimp - apply (clarsimp simp add: threadset_corresT [OF _ x y e]) + apply (case_tac v, simp_all add: out_rel_def option_update_thread_def) + apply (clarsimp simp: threadset_corresT [OF _ x y sched_pointers flag e]) done lemmas out_corres = out_corresT [OF _ all_tcbI, OF ball_tcb_cap_casesI ball_tcb_cte_casesI] @@ -707,109 +680,40 @@ lemma tcbSchedDequeue_sch_act_simple[wp]: "tcbSchedDequeue t \sch_act_simple\" by (wpsimp simp: sch_act_simple_def) -lemma setP_vq[wp]: - "\\s. Invariants_H.valid_queues s \ tcb_at' t s \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s \ p \ maxPriority\ - setPriority t p - \\rv. Invariants_H.valid_queues\" - apply (simp add: setPriority_def) - apply (wpsimp ) - apply (wp hoare_vcg_imp_lift') - unfolding st_tcb_at'_def - apply (strengthen not_obj_at'_strengthen) - apply (wp hoare_wp_combs) - apply (wp hoare_vcg_imp_lift') - apply (wp threadSet_valid_queues threadSet_valid_objs_tcbPriority_update) - apply(wp threadSet_weak_sch_act_wf) - apply clarsimp - apply clarsimp - apply (wp hoare_vcg_imp_lift') - apply (wp threadSet_valid_queues threadSet_valid_objs_tcbPriority_update threadSet_sch_act, clarsimp) - apply (wp add: threadSet_valid_queues comb:hoare_drop_imps ) - apply (clarsimp simp: eq_commute[where a=t]) - apply (wp add: threadSet_valid_queues threadSet_valid_objs_tcbPriority_update threadSet_weak_sch_act_wf - hoare_vcg_imp_lift'[where P="\_ s. ksCurThread s \ _"] hoare_drop_imps hoare_vcg_all_lift - tcbSchedDequeue_not_in_queue tcbSchedEnqueue_valid_objs' tcbSchedDequeue_valid_queues - | clarsimp simp: valid_objs'_maxDomain valid_objs'_maxPriority)+ - done - -lemma valid_queues_subsetE': - "\ valid_queues' s; ksPSpace s = ksPSpace s'; - \x. set (ksReadyQueues s x) \ set (ksReadyQueues s' x) \ - \ valid_queues' s'" - by (simp add: valid_queues'_def obj_at'_def - ps_clear_def subset_iff projectKOs) - -crunch vq'[wp]: getCurThread valid_queues' - -lemma setP_vq'[wp]: - "\\s. valid_queues' s \ tcb_at' t s \ sch_act_wf (ksSchedulerAction s) s \ p \ maxPriority\ - setPriority t p - \\rv. valid_queues'\" - apply (simp add: setPriority_def) - apply (wpsimp wp: threadSet_valid_queues' hoare_drop_imps - threadSet_weak_sch_act_wf threadSet_sch_act) - apply (rule_tac Q="\_ s. valid_queues' s \ obj_at' (Not \ tcbQueued) t s \ sch_act_wf (ksSchedulerAction s) s - \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_strengthen_post, - wp tcbSchedDequeue_valid_queues' tcbSchedDequeue_not_queued) - apply (clarsimp simp: inQ_def) - apply normalise_obj_at' - apply clarsimp - done - -lemma setQueue_invs_bits[wp]: - "\valid_pspace'\ setQueue d p q \\rv. valid_pspace'\" - "\\s. sch_act_wf (ksSchedulerAction s) s\ setQueue d p q \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\\s. sym_refs (state_refs_of' s)\ setQueue d p q \\rv s. sym_refs (state_refs_of' s)\" - "\if_live_then_nonz_cap'\ setQueue d p q \\rv. if_live_then_nonz_cap'\" - "\if_unsafe_then_cap'\ setQueue d p q \\rv. if_unsafe_then_cap'\" - "\cur_tcb'\ setQueue d p q \\rv. cur_tcb'\" - "\valid_global_refs'\ setQueue d p q \\rv. valid_global_refs'\" - "\valid_irq_handlers'\ setQueue d p q \\rv. valid_irq_handlers'\" - by (simp add: setQueue_def tcb_in_cur_domain'_def - | wp sch_act_wf_lift cur_tcb_lift - | fastforce)+ - -lemma setQueue_ex_idle_cap[wp]: - "\\s. ex_nonz_cap_to' (ksIdleThread s) s\ - setQueue d p q - \\rv s. ex_nonz_cap_to' (ksIdleThread s) s\" - by (simp add: setQueue_def, wp, - simp add: ex_nonz_cap_to'_def cte_wp_at_pspaceI) - -lemma tcbPriority_caps_safe: - "\tcb. \x\ran tcb_cte_cases. (\(getF, setF). getF (tcbPriority_update f tcb) = getF tcb) x" - by (rule all_tcbI, rule ball_tcb_cte_casesI, simp+) +lemma tcbSchedNext_update_tcb_cte_cases: + "(a, b) \ ran tcb_cte_cases \ a (tcbPriority_update f tcb) = a tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') -lemma tcbPriority_Queued_caps_safe: - "\tcb. \x\ran tcb_cte_cases. (\(getF, setF). getF (tcbPriority_update f (tcbQueued_update g tcb)) = getF tcb) x" - by (rule all_tcbI, rule ball_tcb_cte_casesI, simp+) +lemma threadSet_priority_invs': + "\invs' and tcb_at' t and K (p \ maxPriority)\ + threadSet (tcbPriority_update (\_. p)) t + \\_. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace' + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + | clarsimp simp: cteCaps_of_def tcbSchedNext_update_tcb_cte_cases | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) lemma setP_invs': "\invs' and tcb_at' t and K (p \ maxPriority)\ setPriority t p \\rv. invs'\" - apply (rule hoare_gen_asm) - apply (simp add: setPriority_def) - apply (wp rescheduleRequired_all_invs_but_ct_not_inQ) - apply simp - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift') - unfolding st_tcb_at'_def - apply (strengthen not_obj_at'_strengthen, wp) - apply (wp hoare_vcg_imp_lift') - apply (rule_tac Q="\rv s. invs' s" in hoare_post_imp) - apply (clarsimp simp: invs_sch_act_wf' invs'_def invs_queues) - apply (clarsimp simp: valid_state'_def) - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (rule_tac Q="\_. invs' and obj_at' (Not \ tcbQueued) t - and (\s. \d p. t \ set (ksReadyQueues s (d,p)))" - in hoare_post_imp) - apply (clarsimp simp: obj_at'_def inQ_def) - apply (wp tcbSchedDequeue_not_queued)+ - apply clarsimp - done + unfolding setPriority_def + by (wpsimp wp: rescheduleRequired_invs' threadSet_priority_invs') crunches setPriority, setMCPriority for typ_at'[wp]: "\s. P (typ_at' T p s)" @@ -1111,11 +1015,6 @@ lemma setMCPriority_valid_objs'[wp]: crunch sch_act_simple[wp]: setMCPriority sch_act_simple (wp: ssa_sch_act_simple crunch_wps rule: sch_act_simple_lift simp: crunch_simps) -(* For some reason, when this was embedded in a larger expression clarsimp wouldn't remove it. Adding it as a simp rule does *) -lemma inQ_tc_corres_helper: - "(\d p. (\tcb. tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d \ (tcbQueued tcb \ tcbDomain tcb \ d)) \ a \ set (ksReadyQueues s (d, p))) = True" - by clarsimp - abbreviation "valid_option_prio \ case_option True (\(p, auth). p \ maxPriority)" definition valid_tcb_invocation :: "tcbinvocation \ bool" where @@ -1139,108 +1038,87 @@ lemma threadcontrol_corres_helper1: apply (clarsimp simp: is_tcb_def) done -lemma threadcontrol_corres_helper2: - "is_aligned a msg_align_bits \ \invs' and tcb_at' t\ - threadSet (tcbIPCBuffer_update (\_. a)) t - \\x s. Invariants_H.valid_queues s \ valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\" - by (wp threadSet_invs_trivial - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: inQ_def )+ - -lemma threadcontrol_corres_helper3: +lemma thread_set_ipc_weak_valid_sched_action: "\ einvs and simple_sched_action\ - check_cap_at aaa (ab, ba) (check_cap_at (cap.ThreadCap a) slot (cap_insert aaa (ab, ba) (a, tcb_cnode_index 4))) - \\x. weak_valid_sched_action and valid_etcbs \" + thread_set (tcb_ipc_buffer_update f) a + \\x. weak_valid_sched_action\" apply (rule hoare_pre) - apply (wp check_cap_inv | simp add:)+ - by (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def - get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + apply (simp add: thread_set_def) + apply (wp set_object_wp) + apply (simp | intro impI | elim exE conjE)+ + apply (frule get_tcb_SomeD) + apply (erule ssubst) + apply (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def + get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + done + +lemma threadcontrol_corres_helper3: + "\einvs and simple_sched_action\ + check_cap_at cap p (check_cap_at (cap.ThreadCap cap') slot (cap_insert cap p (t, tcb_cnode_index 4))) + \\_ s. weak_valid_sched_action s \ in_correct_ready_q s \ ready_qs_distinct s \ valid_etcbs s + \ pspace_aligned s \ pspace_distinct s\" + apply (wpsimp + | strengthen valid_sched_valid_queues valid_queues_in_correct_ready_q + valid_sched_weak_strg[rule_format] valid_queues_ready_qs_distinct)+ + apply (wpsimp wp: check_cap_inv) + apply (fastforce simp: valid_sched_def) + done lemma threadcontrol_corres_helper4: "isArchObjectCap ac \ - \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) and valid_cap' ac \ - checkCapAt ac (cte_map (ab, ba)) - (checkCapAt (capability.ThreadCap a) (cte_map slot) - (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) - \\x. Invariants_H.valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\" - apply (wp - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: )+ + \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) + and valid_cap' ac\ + checkCapAt ac (cte_map (ab, ba)) + (checkCapAt (capability.ThreadCap a) (cte_map slot) + (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) + \\_ s. sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_tcbs' s\" + apply (wpsimp wp: + | strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + invs_valid_objs' valid_objs'_valid_tcbs')+ by (case_tac ac; - clarsimp simp: capBadge_def isArchObjectCap_def isNotificationCap_def isEndpointCap_def - isReplyCap_def isIRQControlCap_def tcb_cnode_index_def cte_map_def cte_wp_at'_def + clarsimp simp: capBadge_def isCap_simps tcb_cnode_index_def cte_map_def cte_wp_at'_def cte_level_bits_def) lemma threadSet_invs_trivialT2: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" shows - "\\s. invs' s - \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits) - \ tcb_at' t s - \ (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) - \ (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) - \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) - \ (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (rule hoare_gen_asm [where P="(\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)"]) - apply (wp x v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a domains cteCaps_of_def |rule refl)+ - apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed - -lemma threadSet_valid_queues'_no_state2: - "\ \tcb. tcbQueued tcb = tcbQueued (f tcb); - \tcb. tcbState tcb = tcbState (f tcb); - \tcb. tcbPriority tcb = tcbPriority (f tcb); - \tcb. tcbDomain tcb = tcbDomain (f tcb) \ - \ \valid_queues'\ threadSet f t \\_. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs inQ_def split: if_split_asm) - done + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)\ + threadSet F t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P="\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits"]) + apply (wp threadSet_valid_pspace'T + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_global_refsT + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_valid_dom_schedule' + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_idle'T + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + | clarsimp simp: assms cteCaps_of_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) lemma getThreadBufferSlot_dom_tcb_cte_cases: "\\\ getThreadBufferSlot a \\rv s. rv \ (+) a ` dom tcb_cte_cases\" @@ -1271,6 +1149,12 @@ lemma valid_tcb_ipc_buffer_update: \ (\tcb. valid_tcb' tcb s \ valid_tcb' (tcbIPCBuffer_update (\_. buf) tcb) s)" by (simp add: valid_tcb'_def tcb_cte_cases_def) +lemma threadSet_invs_tcbIPCBuffer_update: + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (tcbIPCBuffer_update f tcb)) msg_align_bits)\ + threadSet (tcbIPCBuffer_update f) t + \\_. invs'\" + by (wp threadSet_invs_trivialT2; simp add: tcb_cte_cases_def cteSizeBits_def) + lemma transferCaps_corres: assumes x: "newroot_rel e e'" assumes y: "newroot_rel f f'" @@ -1313,8 +1197,8 @@ lemma transferCaps_corres: (invokeTCB (tcbinvocation.ThreadControl a sl' b' mcp_auth p_auth e' f' g'))" proof - have P: "\t v. corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t (tcb_fault_handler_update o (%x _. x)) (option_map to_bl v)) (case v of None \ return () @@ -1324,8 +1208,8 @@ proof - apply (safe, case_tac tcb', simp add: tcb_relation_def split: option.split) done have R: "\t v. corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t (tcb_ipc_buffer_update o (%x _. x)) v) (case v of None \ return () | Some x \ threadSet (tcbIPCBuffer_update (%_. x)) t)" @@ -1338,7 +1222,9 @@ proof - (case_option (return ()) (\p'. setPriority t (fst p')) p_auth)" apply (case_tac p_auth; clarsimp simp: setPriority_corres) done - have S': "\t x. corres dc (tcb_at t) (tcb_at' t) + have S': "\t x. corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ (case_option (return ()) (\(mcp, auth). set_mcpriority t mcp) mcp_auth) (case_option (return ()) (\mcp'. setMCPriority t (fst mcp')) mcp_auth)" apply(case_tac mcp_auth; clarsimp simp: setMCPriority_corres) @@ -1462,23 +1348,33 @@ proof - apply (rule corres_split[OF getCurThread_corres], clarsimp) apply (rule corres_when[OF refl rescheduleRequired_corres]) apply (wpsimp wp: gct_wp)+ - apply (wp hoare_drop_imp) - apply (rule threadcontrol_corres_helper1[unfolded pred_conj_def]) - apply simp - apply (wp hoare_drop_imp) - apply (wp threadcontrol_corres_helper2 | wpc | simp)+ + apply (strengthen valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_ipc_weak_valid_sched_action thread_set_valid_queues + hoare_drop_imp) + apply clarsimp + apply (strengthen valid_objs'_valid_tcbs' invs_valid_objs')+ + apply (wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers hoare_drop_imp + threadSet_invs_tcbIPCBuffer_update) + apply (clarsimp simp: pred_conj_def) + apply (strengthen einvs_valid_etcbs valid_queues_in_correct_ready_q + valid_sched_valid_queues invs_psp_aligned invs_distinct)+ + apply wp + apply (clarsimp simp: pred_conj_def) + apply (strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + valid_objs'_valid_tcbs' invs_valid_objs') apply (wpsimp wp: cteDelete_invs' hoare_vcg_conj_lift) apply (fastforce simp: emptyable_def) apply fastforce apply clarsimp apply (rule corres_guard_imp) apply (rule corres_split_norE[OF cteDelete_corres]) - apply (rule_tac F="is_aligned aa msg_align_bits" in corres_gen_asm) + apply (rule_tac F="is_aligned aa msg_align_bits" + in corres_gen_asm) apply (rule_tac F="isArchObjectCap ac" in corres_gen_asm2) apply (rule corres_split_nor) apply (rule threadset_corres, simp add: tcb_relation_def, (simp add: exst_same_def)+) - apply (rule corres_split_nor) + apply (rule corres_split) apply (erule checkCapAt_cteInsert_corres) apply (rule corres_split[OF getCurThread_corres], clarsimp) apply (rule corres_when[OF refl rescheduleRequired_corres]) @@ -1487,22 +1383,21 @@ proof - apply (wp hoare_drop_imp threadcontrol_corres_helper4)[1] apply (wp thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial thread_set_not_state_valid_sched - | simp add: ran_tcb_cap_cases)+ + | simp add: ran_tcb_cap_cases)+ apply (wp threadSet_invs_trivial threadSet_cte_wp_at' | simp)+ apply (wp cap_delete_deletes cap_delete_cte_at cap_delete_valid_cap cteDelete_deletes cteDelete_invs' - | strengthen use_no_cap_to_obj_asid_strg - | clarsimp simp: inQ_def inQ_tc_corres_helper)+ + | strengthen use_no_cap_to_obj_asid_strg invs_psp_aligned invs_distinct + | clarsimp simp: inQ_def)+ apply (clarsimp simp: cte_wp_at_caps_of_state dest!: is_cnode_or_valid_arch_cap_asid) - apply (clarsimp simp: emptyable_def) + apply (fastforce simp: emptyable_def) apply (clarsimp simp: inQ_def) apply (clarsimp simp: obj_at_def is_tcb) apply (rule cte_wp_at_tcbI, simp, fastforce, simp) - apply (clarsimp simp: cte_map_def tcb_cnode_index_def obj_at'_def - projectKOs objBits_simps) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def obj_at'_def projectKOs objBits_simps) apply (erule(2) cte_wp_at_tcbI', fastforce simp: objBits_defs cte_level_bits_def, simp) done have U: "getThreadCSpaceRoot a = return (cte_map (a, tcb_cnode_index 0))" @@ -1550,6 +1445,10 @@ proof - out_no_cap_to_trivial [OF ball_tcb_cap_casesI] checked_insert_no_cap_to note if_cong [cong] option.case_cong [cong] + \ \This proof is quite fragile and was written when bind_wp was added to the wp set later + in the theory dependencies, and so was matched with before alternatives. We re-add it here to + create a similar environment and avoid needing to rework the proof.\ + note bind_wp[wp] show ?thesis apply (simp add: invokeTCB_def liftE_bindE) apply (simp only: eq_commute[where a= "a"]) @@ -1565,41 +1464,26 @@ proof - apply wp apply wp apply (wpsimp wp: hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift as_user_invs cap_delete_deletes - thread_set_ipc_tcb_cap_valid thread_set_tcb_ipc_buffer_cap_cleared_invs - thread_set_cte_wp_at_trivial thread_set_valid_cap cap_delete_valid_cap - reschedule_preserves_valid_sched thread_set_not_state_valid_sched + hoare_vcg_all_liftE_R hoare_vcg_all_lift + as_user_invs thread_set_ipc_tcb_cap_valid + thread_set_tcb_ipc_buffer_cap_cleared_invs + thread_set_cte_wp_at_trivial + thread_set_valid_cap + reschedule_preserves_valid_sched check_cap_inv[where P=valid_sched] (* from stuff *) check_cap_inv[where P="tcb_at p0" for p0] - simp: ran_tcb_cap_cases) + thread_set_not_state_valid_sched + check_cap_inv[where P=simple_sched_action] + cap_delete_deletes hoare_drop_imps + cap_delete_valid_cap + simp: ran_tcb_cap_cases + | strengthen simple_sched_action_sched_act_not)+ apply (strengthen use_no_cap_to_obj_asid_strg) apply (wpsimp wp: cap_delete_cte_at cap_delete_valid_cap) - apply (wpsimp wp: hoare_drop_imps) - apply ((wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_imp_lift' hoare_vcg_all_lift - threadSet_cte_wp_at' threadSet_invs_trivialT2 cteDelete_invs' - simp: tcb_cte_cases_def), (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cte_wp_at' - simp: tcb_cte_cases_def) - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cap_to' threadSet_invs_trivialT2 - threadSet_cte_wp_at' hoare_drop_imps - simp: tcb_cte_cases_def) - apply (clarsimp) - apply ((wpsimp wp: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + threadSet_invs_tcbIPCBuffer_update threadSet_cte_wp_at' + | strengthen simple_sched_action_sched_act_not)+ + apply ((wpsimp wp: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift threadSet_valid_objs' thread_set_not_state_valid_sched thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial @@ -1611,14 +1495,14 @@ proof - | strengthen tcb_cap_always_valid_strg tcb_at_invs use_no_cap_to_obj_asid_strg - | (erule exE, clarsimp simp: word_bits_def))+) + | (erule exE, clarsimp simp: word_bits_def) | wp (once) hoare_drop_imps)+) apply (strengthen valid_tcb_ipc_buffer_update) - apply (strengthen invs_valid_objs')+ + apply (strengthen invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct') apply (wpsimp wp: cteDelete_invs' hoare_vcg_imp_lift' hoare_vcg_all_lift) apply wpsimp apply wpsimp apply (clarsimp cong: imp_cong conj_cong simp: emptyable_def) - apply (rule_tac Q'="\_. ?T2_pre" in hoare_post_imp_R[simplified validE_R_def, rotated]) + apply (rule_tac Q'="\_. ?T2_pre" in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) (* beginning to deal with is_nondevice_page_cap *) apply (clarsimp simp: emptyable_def is_nondevice_page_cap_simps is_cap_simps is_cnode_or_valid_arch_def obj_ref_none_no_asid cap_asid_def @@ -1633,9 +1517,9 @@ proof - | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg)+)[1] apply (clarsimp cong: imp_cong conj_cong) apply (rule_tac Q'="\_. ?T2_pre' and (\s. valid_option_prio p_auth)" - in hoare_post_imp_R[simplified validE_R_def, rotated]) + in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) apply (case_tac g'; clarsimp simp: isCap_simps ; clarsimp elim: invs_valid_objs' cong:imp_cong) - apply (wp add: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wp add: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift setMCPriority_invs' threadSet_valid_objs' thread_set_not_state_valid_sched setP_invs' typ_at_lifts [OF setPriority_typ_at'] @@ -1645,8 +1529,8 @@ proof - | simp add: ran_tcb_cap_cases split_def U V emptyable_def | wpc | strengthen tcb_cap_always_valid_strg - use_no_cap_to_obj_asid_strg - | wp (once) add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs + use_no_cap_to_obj_asid_strg invs_psp_aligned invs_distinct + | wp add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs | (erule exE, clarsimp simp: word_bits_def))+ (* the last two subgoals *) apply (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] tcb_at_st_tcb_at[symmetric] @@ -1657,7 +1541,7 @@ proof - split: option.split_asm) by (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def objBits_defs cte_map_tcb_0 cte_map_tcb_1[simplified] tcb_at_cte_at' cte_at_tcb_at_16' - isCap_simps domIff valid_tcb'_def tcb_cte_cases_def arch_cap_fun_lift_def + isCap_simps domIff valid_tcb'_def tcb_cte_cases_def split: option.split_asm dest!: isValidVTableRootD) qed @@ -1697,31 +1581,31 @@ lemma tc_invs': apply (simp only: eq_commute[where a="a"]) apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp setMCPriority_invs' + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] - apply (wp add: setP_invs' static_imp_wp hoare_vcg_all_lift)+ + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) apply clarsimp apply wpfix apply assumption apply (rule case_option_wp_None_returnOk) - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ - apply (wpsimp wp: static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) - | wpsimp wp: static_imp_wp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] - hoare_vcg_const_imp_lift_R assertDerived_wp_weak static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs tcbIPCBufferSlot_def) by (auto dest!: isCapDs isReplyCapD isValidVTableRootD simp: isCap_simps) @@ -1737,7 +1621,7 @@ lemma setSchedulerAction_invs'[wp]: apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def valid_irq_node'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs cur_tcb'_def + valid_queues_def bitmapQ_defs cur_tcb'_def ct_not_inQ_def) apply (simp add: ct_idle_or_in_cur_domain'_def) done @@ -1867,8 +1751,8 @@ lemma invokeTCB_corres: apply (rule TcbAcc_R.rescheduleRequired_corres) apply (rule corres_trivial, simp) apply (wpsimp wp: hoare_drop_imp)+ - apply (clarsimp simp: valid_sched_weak_strg einvs_valid_etcbs) - apply (clarsimp simp: Tcb_R.invs_valid_queues' Invariants_H.invs_queues) + apply (fastforce dest: valid_sched_valid_queues simp: valid_sched_weak_strg einvs_valid_etcbs) + apply fastforce done lemma tcbBoundNotification_caps_safe[simp]: @@ -1883,6 +1767,10 @@ lemma valid_bound_ntfn_lift: apply (wp typ_at_lifts[OF P])+ done +crunches setBoundNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (ignore: threadSet wp: threadSet_sched_pointers) + lemma bindNotification_invs': "\bound_tcb_at' ((=) None) tcbptr and ex_nonz_cap_to' ntfnptr @@ -1893,9 +1781,9 @@ lemma bindNotification_invs': \\_. invs'\" including no_pre apply (simp add: bindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp set_ntfn_valid_pspace' sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp set_ntfn_valid_pspace' sbn_sch_act' valid_irq_node_lift setBoundNotification_ct_not_inQ valid_bound_ntfn_lift untyped_ranges_zero_lift | clarsimp dest!: global'_no_ex_cap simp: cteCaps_of_def)+ @@ -2066,7 +1954,7 @@ lemma eq_ucast_word8[simp]: done lemma checkPrio_corres: - "corres (ser \ dc) (tcb_at auth) (tcb_at' auth) + "corres (ser \ dc) (tcb_at auth and pspace_aligned and pspace_distinct) \ (check_prio p auth) (checkPrio p auth)" apply (simp add: check_prio_def checkPrio_def) apply (rule corres_guard_imp) @@ -2089,7 +1977,7 @@ lemma decodeSetPriority_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) (invs' and (\s. \x \ set extras'. s \' (fst x))) (decode_set_priority args cap slot extras) (decodeSetPriority args cap' extras')" @@ -2097,18 +1985,17 @@ lemma decodeSetPriority_corres: clarsimp simp: decode_set_priority_def decodeSetPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) - apply (wpsimp simp: valid_cap_def valid_cap'_def)+ - done + by (wpsimp simp: valid_cap_def valid_cap'_def)+ lemma decodeSetMCPriority_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) (invs' and (\s. \x \ set extras'. s \' (fst x))) (decode_set_mcpriority args cap slot extras) (decodeSetMCPriority args cap' extras')" @@ -2116,12 +2003,11 @@ lemma decodeSetMCPriority_corres: clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) - apply (wpsimp simp: valid_cap_def valid_cap'_def)+ - done + by (wpsimp simp: valid_cap_def valid_cap'_def)+ lemma getMCP_sp: "\P\ threadGet tcbMCP t \\rv. mcpriority_tcb_at' (\st. st = rv) t and P\" @@ -2147,7 +2033,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: @@ -2156,7 +2042,7 @@ lemma checkPrio_lt_ct: lemma checkPrio_lt_ct_weak: "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. ucast prio \ mcp) auth s\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule checkPrio_lt_ct) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) by (rule le_ucast_ucast_le) simp @@ -2216,7 +2102,8 @@ lemma decodeSetSchedParams_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) + (cur_tcb and valid_etcbs and + (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) (invs' and (\s. \x \ set extras'. s \' (fst x))) (decode_set_sched_params args cap slot extras) (decodeSetSchedParams args cap' extras')" @@ -2227,7 +2114,7 @@ lemma decodeSetSchedParams_corres: apply (clarsimp split: list.split simp: list_all2_Cons2) apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_split_norE[OF checkPrio_corres]) apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) @@ -2349,7 +2236,7 @@ lemma slotCapLongRunningDelete_corres: lemma slot_long_running_inv'[wp]: "\P\ slotCapLongRunningDelete ptr \\rv. P\" apply (simp add: slotCapLongRunningDelete_def) - apply (rule hoare_seq_ext [OF _ getCTE_inv]) + apply (rule bind_wp [OF _ getCTE_inv]) apply (rule hoare_pre, wpcw, (wp isFinalCapability_inv)+) apply simp done @@ -2519,11 +2406,11 @@ lemma decodeTCBConfigure_corres: apply (rule decodeSetIPCBuffer_corres; simp) apply (rule corres_splitEE) apply (rule decodeSetSpace_corres; simp) - apply (rule_tac F="is_thread_control set_params" in corres_gen_asm) - apply (rule_tac F="is_thread_control set_space" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_params" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_space" in corres_gen_asm) apply (rule_tac F="tcThreadCapSlot setSpace = cte_map slot" in corres_gen_asm2) apply (rule corres_trivial) - apply (clarsimp simp: returnOk_def is_thread_control_def2 is_cap_simps) + apply (clarsimp simp: tcb_invocation.is_ThreadControl_def returnOk_def is_cap_simps) apply (wp | simp add: invs_def valid_sched_def)+ done @@ -2554,15 +2441,13 @@ lemma decodeTCBConf_wf[wp]: apply (rule_tac Q'="\setSpace s. tcb_inv_wf' setSpace s \ tcb_inv_wf' setIPCParams s \ isThreadControl setSpace \ isThreadControl setIPCParams \ tcThread setSpace = t \ tcNewCRoot setSpace \ None" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: isThreadControl_def2 cong: option.case_cong) apply wpsimp apply (fastforce simp: isThreadControl_def2 objBits_defs) done -declare hoare_True_E_R [simp del] - lemma lsft_real_cte: "\valid_objs'\ lookupSlotForThread t x \\rv. real_cte_at' rv\, -" apply (simp add: lookupSlotForThread_def) @@ -2630,8 +2515,8 @@ notes if_cong[cong] shows lemma decodeUnbindNotification_corres: "corres (ser \ tcbinv_relation) - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (decode_unbind_notification (cap.ThreadCap t)) (decodeUnbindNotification (capability.ThreadCap t))" apply (simp add: decode_unbind_notification_def decodeUnbindNotification_def) @@ -2681,7 +2566,8 @@ lemma decodeTCBInvocation_corres: corres_guard_imp[OF decodeBindNotification_corres] corres_guard_imp[OF decodeUnbindNotification_corres] corres_guard_imp[OF decodeSetTLSBase_corres], - simp_all add: valid_cap_simps valid_cap_simps' invs_def valid_sched_def) + simp_all add: valid_cap_simps valid_cap_simps' invs_def valid_state_def + valid_pspace_def valid_sched_def) apply (auto simp: list_all2_map1 list_all2_map2 elim!: list_all2_mono) done @@ -2752,6 +2638,7 @@ crunches getThreadBufferSlot, setPriority, setMCPriority lemma inv_tcb_IRQInactive: "\valid_irq_states'\ invokeTCB tcb_inv -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + including classic_wp_pre apply (simp add: invokeTCB_def) apply (rule hoare_pre) apply (wpc | diff --git a/proof/refine/ARM/Untyped_R.thy b/proof/refine/ARM/Untyped_R.thy index 5a7d2f5b2b..87c16faba5 100644 --- a/proof/refine/ARM/Untyped_R.thy +++ b/proof/refine/ARM/Untyped_R.thy @@ -307,7 +307,7 @@ next apply simp apply (rule getSlotCap_corres,simp) apply (wp lookup_slot_for_cnode_op_inv - hoare_drop_impE_R hoare_vcg_all_lift_R)+ + hoare_drop_impE_R hoare_vcg_all_liftE_R)+ apply (rule_tac corres_split_norE) apply (rule corres_if) apply simp @@ -684,7 +684,7 @@ lemma map_ensure_empty': apply (wp getCTE_wp') apply (clarsimp elim!: cte_wp_at_weakenE') apply (erule meta_allE) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -1358,16 +1358,6 @@ crunches insertNewCap crunch exst[wp]: set_cdt "\s. P (exst s)" -(*FIXME: Move to StateRelation*) -lemma state_relation_schact[elim!]: - "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" - apply (simp add: state_relation_def) - done - -lemma state_relation_queues[elim!]: "(s,s') \ state_relation \ ready_queues_relation (ready_queues s) (ksReadyQueues s')" - apply (simp add: state_relation_def) - done - lemma set_original_symb_exec_l: "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_original p b) (return x)" by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) @@ -1398,6 +1388,10 @@ lemma updateNewFreeIndex_noop_psp_corres: | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ done +crunches updateMDB, updateNewFreeIndex, setCTE + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + lemma insertNewCap_corres: notes if_cong[cong del] if_weak_cong[cong] shows @@ -3181,7 +3175,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 @@ -3505,7 +3499,7 @@ lemma updateFreeIndex_mdb_simple': and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" and unt_inc' :"untyped_inc' (ctes_of s)" and valid_objs' :"valid_objs' s" - and invp: "mdb_inv_preserve (ctes_of s) (ctes_of s(src \ cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -3607,8 +3601,8 @@ lemma updateFreeIndex_clear_invs': apply (simp add:updateCap_def) apply (wp setCTE_irq_handlers' getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift - hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp valid_bitmaps_lift | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def | simp add: cte_wp_at_ctes_of)+ @@ -4013,15 +4007,17 @@ lemma idx_le_new_offs: end +context begin interpretation Arch . (*FIXME: arch_split*) + lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" by (simp add: valid_sched_def) crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) lemma deleteObjects_ksCurThread[wp]: "\\s. P (ksCurThread s)\ deleteObjects ptr sz \\_ s. P (ksCurThread s)\" @@ -4171,14 +4167,12 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemma resetUntypedCap_corres: "untypinv_relation ui ui' \ corres (dc \ dc) - (invs and valid_untyped_inv_wcap ui - (Some (cap.UntypedCap dev ptr sz idx)) - and ct_active and einvs - and (\_. \ptr_base ptr' ty us slots dev'. ui = Invocations_A.Retype slot True - ptr_base ptr' ty us slots dev)) - (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') - (reset_untyped_cap slot) - (resetUntypedCap (cte_map slot))" + (einvs and schact_is_rct and ct_active + and valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev ptr sz idx)) + and (\_. \ptr_base ptr' ty us slots dev'. + ui = Invocations_A.Retype slot True ptr_base ptr' ty us slots dev)) + (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') + (reset_untyped_cap slot) (resetUntypedCap (cte_map slot))" apply (rule corres_gen_asm, clarsimp) apply (simp add: reset_untyped_cap_def resetUntypedCap_def liftE_bindE) @@ -4329,7 +4323,7 @@ lemma resetUntypedCap_corres: apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) apply (frule(1) descendants_range_ex_cte'[OF empty_descendants_range_in' _ order_refl], (simp add: isCap_simps)+) - apply (intro conjI impI; clarsimp) + apply (auto simp: descendants_range_in'_def valid_untyped'_def) done end @@ -4428,7 +4422,7 @@ lemma resetUntypedCap_invs_etc: ?f \\_. invs' and ?vu2 and ct_active' and ?psp\, \\_. invs'\") apply (simp add: resetUntypedCap_def getSlotCap_def liftE_bind_return_bindE_returnOk bindE_assoc) - apply (rule hoare_vcg_seqE[rotated]) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_name_pre_stateE) @@ -4445,8 +4439,8 @@ lemma resetUntypedCap_invs_etc: (simp_all add: cte_wp_at_ctes_of)+)[1] apply (clarsimp simp: unlessE_def cte_wp_at_ctes_of split del: if_split) - apply (rule_tac B="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) - and ct_active' and ?psp" in hoare_vcg_seqE[rotated]) + apply (rule_tac Q'="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) + and ct_active' and ?psp" in bindE_wp_fwd) apply clarsimp apply (rule hoare_pre) apply (simp add: sch_act_simple_def) @@ -4488,7 +4482,7 @@ lemma resetUntypedCap_invs_etc: modify_map_def) apply auto[1] apply simp - apply (rule hoare_pre, rule hoare_post_impErr, + apply (rule hoare_pre, rule hoare_strengthen_postE, rule_tac P="\i. invs' and ?psp and ct_active' and valid_untyped_inv_wcap' ?ui (Some (UntypedCap dev ptr sz (if i = 0 then idx else (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - i) * 2 ^ resetChunkBits)))" @@ -4576,7 +4570,7 @@ lemma whenE_reset_resetUntypedCap_invs_etc: and ct_active' and pspace_no_overlap' (if reset then ptr else ptr') sz\, \\_. invs'\" apply (rule hoare_pre) - apply (wp hoare_whenE_wp resetUntypedCap_invs_etc[where idx=idx, + apply (wp whenE_wp resetUntypedCap_invs_etc[where idx=idx, simplified pred_conj_def conj_assoc] | simp)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -4588,6 +4582,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) @@ -4599,10 +4595,13 @@ lemma (in range_cover) funky_aligned: context begin interpretation Arch . (*FIXME: arch_split*) +defs archOverlap_def: + "archOverlap \ \_ _. False" + lemma inv_untyped_corres': "\ untypinv_relation ui ui' \ \ corres (dc \ (=)) - (einvs and valid_untyped_inv ui and ct_active) + (einvs and valid_untyped_inv ui and ct_active and schact_is_rct) (invs' and valid_untyped_inv' ui' and ct_active') (invoke_untyped ui) (invokeUntyped ui')" apply (cases ui) @@ -4621,6 +4620,7 @@ lemma inv_untyped_corres': (cte_map cref) reset ptr_base ptr ao' us (map cte_map slots) dev" assume invs: "invs (s :: det_state)" "ct_active s" "valid_list s" "valid_sched s" + "schact_is_rct s" and invs': "invs' s'" "ct_active' s'" and sr: "(s, s') \ state_relation" and vui: "valid_untyped_inv_wcap ?ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" @@ -4830,7 +4830,8 @@ lemma inv_untyped_corres': show " corres (dc \ (=)) ((=) s) ((=) s') (invoke_untyped ?ui) (invokeUntyped ?ui')" - apply (clarsimp simp:invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc) + apply (clarsimp simp: invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc + archOverlap_def) apply (insert cover) apply (rule corres_guard_imp) apply (rule corres_split_norE) @@ -4919,9 +4920,9 @@ lemma inv_untyped_corres': \ valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s \ (reset \ pspace_no_overlap {ptr && ~~ mask sz..(ptr && ~~ mask sz) + 2 ^ sz - 1} s) - " in hoare_post_imp_R) + " in hoare_strengthen_postE_R) apply (simp add: whenE_def, wp) - apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc, auto)[1] + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc, auto)[1] apply wp apply (clarsimp simp: ui cte_wp_at_caps_of_state bits_of_def untyped_range.simps) @@ -4962,7 +4963,7 @@ lemma inv_untyped_corres': apply (drule invoke_untyped_proofs.usable_range_disjoint) apply (clarsimp simp: field_simps mask_out_sub_mask shiftl_t2n) - apply (rule hoare_post_impErr, + apply (rule hoare_strengthen_postE, rule whenE_reset_resetUntypedCap_invs_etc[where ptr="ptr && ~~ mask sz" and ptr'=ptr and sz=sz and idx=idx and ui=ui' and dev=dev]) @@ -5003,7 +5004,7 @@ lemma inv_untyped_corres': apply (clarsimp simp only: pred_conj_def invs ui if_apply_def2) apply (strengthen vui) apply (cut_tac vui invs invs') - apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs) + apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs schact_is_rct_def) apply (cut_tac vui' invs') apply (clarsimp simp: ui cte_wp_at_ctes_of if_apply_def2 ui') done @@ -5033,7 +5034,7 @@ lemma sts_valid_untyped_inv': crunch nosch[wp]: invokeUntyped "\s. P (ksSchedulerAction s)" (simp: crunch_simps zipWithM_x_mapM - wp: crunch_wps hoare_unless_wp mapME_x_inv_wp preemptionPoint_inv) + wp: crunch_wps unless_wp mapME_x_inv_wp preemptionPoint_inv) crunch no_0_obj'[wp]: insertNewCap no_0_obj' (wp: crunch_wps) @@ -5164,9 +5165,6 @@ crunch irq_states' [wp]: insertNewCap valid_irq_states' crunch pde_mappings' [wp]: insertNewCap valid_pde_mappings' (wp: getCTE_wp') -crunch vq'[wp]: insertNewCap valid_queues' - (wp: crunch_wps) - crunch irqs_masked' [wp]: insertNewCap irqs_masked' (wp: crunch_wps rule: irqs_masked_lift) @@ -5238,6 +5236,12 @@ lemma insertNewCap_urz[wp]: apply (auto simp add: cteCaps_of_def untypedZeroRange_def isCap_simps) done +crunches insertNewCap + for tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps) + lemma insertNewCap_invs': "\invs' and ct_active' and valid_cap' cap @@ -5254,8 +5258,8 @@ lemma insertNewCap_invs': apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp insertNewCap_valid_pspace' sch_act_wf_lift - valid_queues_lift cur_tcb_lift tcb_in_cur_domain'_lift - insertNewCap_valid_global_refs' + cur_tcb_lift tcb_in_cur_domain'_lift valid_bitmaps_lift + insertNewCap_valid_global_refs' sym_heap_sched_pointers_lift valid_arch_state_lift' valid_irq_node_lift insertNewCap_valid_irq_handlers) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -5482,14 +5486,14 @@ lemma invokeUntyped_invs'': apply (clarsimp simp:invokeUntyped_def getSlotCap_def ui) apply (rule validE_valid) apply (rule hoare_pre) - apply (rule_tac B="\_ s. invs' s \ Q s \ ct_active' s + apply (rule_tac Q'="\_ s. invs' s \ Q s \ ct_active' s \ valid_untyped_inv_wcap' ui (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s) - " in hoare_vcg_seqE[rotated]) + " in bindE_wp_fwd) apply (simp only: whenE_def) apply wp - apply (rule hoare_post_impErr, rule combine_validE, + apply (rule hoare_strengthen_postE, rule combine_validE, rule resetUntypedCap_invs_etc, rule valid_validE, rule reset_Q') apply (clarsimp simp only: if_True) apply auto[1] @@ -5582,7 +5586,7 @@ lemma invokeUntyped_invs'[wp]: "\invs' and valid_untyped_inv' ui and ct_active'\ invokeUntyped ui \\rv. invs'\" - apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_post_taut, simplified]) + apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_TrueI, simplified]) apply auto done @@ -5653,7 +5657,7 @@ lemma resetUntypedCap_IRQInactive: (is "\?P\ resetUntypedCap slot \?Q\,\?E\") apply (simp add: resetUntypedCap_def) apply (rule hoare_pre) - apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_post_impErr] + apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_strengthen_postE] doMachineOp_irq_states' preemptionPoint_inv hoare_drop_imps | simp add: no_irq_clearMemory if_apply_def2)+ done @@ -5662,8 +5666,7 @@ lemma inv_untyped_IRQInactive: "\valid_irq_states'\ invokeUntyped ui -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: invokeUntyped_def) - apply (rule hoare_pre) - apply (wp hoare_whenE_wp resetUntypedCap_IRQInactive | wpc | simp)+ + apply (wpsimp wp: resetUntypedCap_IRQInactive) done end diff --git a/proof/refine/ARM/VSpace_R.thy b/proof/refine/ARM/VSpace_R.thy index aca8f5f015..adc62eb452 100644 --- a/proof/refine/ARM/VSpace_R.thy +++ b/proof/refine/ARM/VSpace_R.thy @@ -458,13 +458,11 @@ lemma getHWASID_corres: apply (simp add: get_hw_asid_def getHWASID_def) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF loadHWASID_corres[where pd=pd]]) - apply (case_tac maybe_hw_asid, simp_all)[1] + apply (corres_cases; simp) apply (rule corres_split_eqr[OF findFreeHWASID_corres]) apply (rule corres_split[OF storeHWASID_corres[where pd=pd]]) - apply (rule corres_trivial, simp ) - apply (wpsimp wp: load_hw_asid_wp)+ - apply (simp add: pd_at_asid_uniq) - apply simp + apply (rule corres_trivial, simp) + apply (wpsimp wp: load_hw_asid_wp hoare_drop_imp simp: pd_at_asid_uniq)+ done lemma setCurrentPD_to_abs: @@ -495,12 +493,11 @@ lemma armv_contextSwitch_corres: done lemma handleVMFault_corres: - "corres (fr \ dc) (tcb_at thread) (tcb_at' thread) + "corres (fr \ dc) (tcb_at thread and pspace_aligned and pspace_distinct) \ (handle_vm_fault thread fault) (handleVMFault thread fault)" apply (simp add: ARM_H.handleVMFault_def) - apply (cases fault) - apply simp - apply (rule corres_guard_imp) + apply corres_cases + apply simp apply (rule corres_splitEE) apply simp apply (rule corres_machine_op [where r="(=)"]) @@ -512,9 +509,7 @@ lemma handleVMFault_corres: apply (rule corres_Id, rule refl, simp) apply (rule no_fail_getDFSR) apply (rule corres_trivial, simp add: arch_fault_map_def) - apply wp+ - apply simp+ - apply (rule corres_guard_imp) + apply wpsimp+ apply (rule corres_splitEE) apply simp apply (rule asUser_corres') @@ -527,8 +522,7 @@ lemma handleVMFault_corres: apply (rule corres_Id, rule refl, simp) apply (rule no_fail_getIFSR) apply (rule corres_trivial, simp add: arch_fault_map_def) - apply wp+ - apply simp+ + apply wpsimp+ done lemma flushSpace_corres: @@ -545,18 +539,15 @@ lemma flushSpace_corres: apply (rule corres_guard_imp) apply (rule corres_split) apply (rule loadHWASID_corres[where pd=pd]) - apply (rule corres_split[where R="\_. \" and R'="\_. \"]) + apply (rule corres_split) apply (rule corres_machine_op [where r=dc]) apply (rule corres_Id, rule refl, simp) apply (rule no_fail_cleanCaches_PoU) - apply (case_tac maybe_hw_asid) - apply simp - apply clarsimp + apply (corres_cases; clarsimp) apply (rule corres_machine_op) apply (rule corres_Id, rule refl, simp) apply (rule no_fail_invalidateLocalTLB_ASID) - apply wp+ - apply clarsimp + apply wpsimp+ apply (simp add: pd_at_asid_uniq) apply simp done @@ -573,16 +564,13 @@ lemma invalidateTLBByASID_corres: (invalidate_tlb_by_asid asid) (invalidateTLBByASID asid)" apply (simp add: invalidate_tlb_by_asid_def invalidateTLBByASID_def) apply (rule corres_guard_imp) - apply (rule corres_split[where R="\_. \" and R'="\_. \"]) + apply (rule corres_split) apply (rule loadHWASID_corres[where pd=pd]) - apply (case_tac maybe_hw_asid) - apply simp - apply clarsimp + apply (corres_cases; clarsimp) apply (rule corres_machine_op) apply (rule corres_Id, rule refl, simp) apply (rule no_fail_invalidateLocalTLB_ASID) - apply wp+ - apply clarsimp + apply wpsimp+ apply (simp add: pd_at_asid_uniq) apply simp done @@ -617,12 +605,12 @@ lemma find_pd_for_asid_pd_at_asid_again: apply (unfold validE_def, rule hoare_name_pre_state, fold validE_def) apply (case_tac "\pd. vspace_at_asid asid pd s") apply clarsimp - apply (rule_tac Q="\rv s'. s' = s \ rv = pd" and E="\\" in hoare_post_impErr) + apply (rule_tac Q="\rv s'. s' = s \ rv = pd" and E="\\" in hoare_strengthen_postE) apply (rule hoare_pre, wp find_pd_for_asid_valids) apply fastforce apply simp+ apply (rule_tac Q="\rv s'. s' = s \ vspace_at_asid asid rv s'" - and E="\rv s'. s' = s" in hoare_post_impErr) + and E="\rv s'. s' = s" in hoare_strengthen_postE) apply (rule hoare_pre, wp) apply clarsimp+ done @@ -842,8 +830,7 @@ lemma deleteASID_corres: apply (simp add: delete_asid_def deleteASID_def) apply (rule corres_guard_imp) apply (rule corres_split[OF corres_gets_asid]) - apply (case_tac "asid_table (asid_high_bits_of asid)", simp) - apply clarsimp + apply (corres_cases; clarsimp) apply (rule_tac P="\s. asid_high_bits_of asid \ dom (asidTable o ucast) \ asid_pool_at (the ((asidTable o ucast) (asid_high_bits_of asid))) s" and P'="pspace_aligned' and pspace_distinct'" and @@ -894,13 +881,9 @@ lemma deleteASID_corres: apply (simp add: vs_refs_def) apply (rule image_eqI[rotated], erule graph_ofI) apply (simp add: mask_asid_low_bits_ucast_ucast) - apply wp - apply (simp add: o_def) - apply (wp getASID_wp) - apply clarsimp - apply assumption - apply wp+ - apply clarsimp + \ \rewrite assumption so that the goal can refer to the C variable instead of the abstract's.\ + apply (drule Some_to_the) + apply (wpsimp wp: getASID_wp)+ apply (clarsimp simp: valid_arch_state_def valid_asid_table_def dest!: invs_arch_state) apply blast @@ -1078,10 +1061,10 @@ proof - and valid_arch_state and pspace_aligned and pspace_distinct) (pspace_aligned' and pspace_distinct' and no_0_obj') - (do arm_context_switch pd asid; + (do _ \ arm_context_switch pd asid; return True od) - (do armv_contextSwitch pd asid; + (do _ \ armv_contextSwitch pd asid; return True od)" apply (rule corres_guard_imp) @@ -1090,34 +1073,20 @@ proof - apply (wp | simp)+ done show ?thesis - apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) - apply (rule corres_guard_imp) - apply (rule corres_split[OF getCurThread_corres]) - apply (rule corres_split[where R="\_. vspace_at_asid asid pd and K (asid \ 0 \ asid \ mask asid_bits) - and valid_asid_map and valid_vs_lookup - and valid_vspace_objs and valid_global_objs - and unique_table_refs o caps_of_state - and valid_arch_state - and pspace_aligned and pspace_distinct" - and R'="\_. pspace_aligned' and pspace_distinct' and no_0_obj'"]) - apply (rule getSlotCap_corres) - apply (simp add: cte_map_def tcb_cnode_index_def tcbVTableSlot_def to_bl_1) - apply (case_tac "isArchObjectCap rv' \ - isPageDirectoryCap (capCap rv') \ - capPDMappedASID (capCap rv') \ None \ - capPDBasePtr (capCap rv') = pd") - apply (case_tac rv, simp_all add: isCap_simps)[1] - apply (rename_tac arch_cap) - apply (case_tac arch_cap, auto)[1] - apply (case_tac rv, simp_all add: isCap_simps[simplified] X[simplified])[1] - apply (rename_tac arch_cap) - apply (case_tac arch_cap, auto simp: X[simplified] split: option.splits)[1] - apply wp+ - apply (clarsimp simp: cur_tcb_def) - apply (erule tcb_at_cte_at) - apply (simp add: tcb_cap_cases_def) - apply clarsimp - done + apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split) + apply (rule getSlotCap_corres) + apply (simp add: cte_map_def tcb_cnode_index_def tcbVTableSlot_def to_bl_1) + apply (corres_cases; (simp add: isCap_simps, rule X)?)+ + apply (clarsimp simp: isCap_simps, rule X) + apply (wpsimp wp: hoare_drop_imp hoare_vcg_all_lift)+ + apply (clarsimp simp: cur_tcb_def) + apply (erule tcb_at_cte_at) + apply (simp add: tcb_cap_cases_def) + apply clarsimp + done qed crunch typ_at' [wp]: armv_contextSwitch "\s. P (typ_at' T p s)" @@ -1169,7 +1138,7 @@ lemma storeHWASID_valid_arch' [wp]: apply wp apply (rule_tac Q'="\rv s. valid_asid_map' (armKSASIDMap (ksArchState s)) \ asid \ 0 \ asid \ mask asid_bits" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp findPDForASID_inv2)+ apply (clarsimp simp: valid_asid_map'_def) apply (subst conj_commute, rule context_conjI) @@ -1370,13 +1339,11 @@ lemma pageTableMapped_corres: apply simp apply (simp add: liftE_bindE) apply (rule corres_split[OF getObject_PDE_corres']) - apply (rule corres_trivial) - apply (case_tac rv, - simp_all add: returnOk_def pde_relation_aligned_def - split:if_splits ARM_H.pde.splits)[1] - apply (wp | simp add: lookup_pd_slot_def Let_def)+ - apply (simp add: word_neq_0_conv) - apply simp + apply (corres_cases_both simp: pde_relation_aligned_def; + fastforce intro!: corres_returnOkTT + simp: pde_relation_aligned_def + split: if_split_asm) + apply (wpsimp simp: lookup_pd_slot_def word_neq_0_conv)+ done crunch inv[wp]: pageTableMapped "P" @@ -1624,7 +1591,7 @@ lemma unmapPage_corres: pd_aligned vmsz_aligned_def) apply simp apply wp - apply (rule_tac Q'="\_. invs and vspace_at_asid asid pda" in hoare_post_imp_R) + apply (rule_tac Q'="\_. invs and vspace_at_asid asid pda" in hoare_strengthen_postE_R) apply (wp lookup_pt_slot_inv lookup_pt_slot_cap_to2' lookup_pt_slot_cap_to_multiple2 store_pde_invs_unmap store_pde_pd_at_asid mapM_swp_store_pde_invs_unmap | wpc | simp | wp hoare_drop_imps @@ -1655,17 +1622,11 @@ lemma doFlush_corres: "corres_underlying Id nf nf' dc \ \ (do_flush typ start end pstart) (doFlush (flush_type_map typ) start end pstart)" apply (simp add: do_flush_def doFlush_def) - apply (cases "typ", simp_all add: flush_type_map_def) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (wp no_fail_cleanCacheRange_RAM) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (wp no_fail_invalidateCacheRange_RAM) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (wp no_fail_cleanInvalidateCacheRange_RAM) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (rule no_fail_pre, wp add: no_fail_cleanCacheRange_PoU no_fail_invalidateCacheRange_I - no_fail_dsb no_fail_isb del: no_irq) - apply clarsimp + apply corres_pre + apply (corres_cases_both simp: flush_type_map_def; + (rule corres_Id[OF refl], simp, wpsimp wp: no_fail_dsb no_fail_isb)) + apply simp + apply simp done definition @@ -1681,25 +1642,22 @@ lemma performPageDirectoryInvocation_corres: and cur_tcb' and valid_arch_state') (perform_page_directory_invocation pdi) (performPageDirectoryInvocation pdi')" apply (simp add: perform_page_directory_invocation_def performPageDirectoryInvocation_def) - apply (cases pdi) - apply (clarsimp simp: page_directory_invocation_map_def) - apply (rule corres_guard_imp) - apply (rule corres_when, simp) - apply (rule corres_split[OF setVMRootForFlush_corres]) - apply (rule corres_split[OF corres_machine_op]) - apply (rule doFlush_corres) - apply (rule corres_when, simp) - apply (rule corres_split[OF getCurThread_corres]) - apply clarsimp - apply (rule setVMRoot_corres) - apply wp+ - apply (simp add: cur_tcb_def[symmetric]) - apply (wp hoare_drop_imps) - apply (simp add: cur_tcb'_def[symmetric]) - apply (wp hoare_drop_imps)+ - apply clarsimp - apply (auto simp: valid_pdi_def invs_vspace_objs[simplified])[2] - apply (clarsimp simp: page_directory_invocation_map_def) + apply (corres_cases_both simp: page_directory_invocation_map_def) + apply (clarsimp simp: page_directory_invocation_map_def) + apply (rule corres_when, simp) + apply (rule corres_split[OF setVMRootForFlush_corres]) + apply (rule corres_split[OF corres_machine_op]) + apply (rule doFlush_corres) + apply (rule corres_when, simp) + apply (rule corres_split[OF getCurThread_corres]) + apply clarsimp + apply (rule setVMRoot_corres) + apply wp+ + apply (simp flip: cur_tcb_def) + apply (wp hoare_drop_imps) + apply (simp flip: cur_tcb'_def) + apply (wp hoare_drop_imps)+ + apply (auto simp: valid_pdi_def invs_vspace_objs[simplified] page_directory_invocation_map_def) done definition @@ -2014,7 +1972,7 @@ lemma duplicate_address_set_simp: lemma valid_duplicates'_non_pd_pt_I: "\koTypeOf ko \ ArchT PDET; koTypeOf ko \ ArchT PTET; vs_valid_duplicates' (ksPSpace s) ; ksPSpace s p = Some ko; koTypeOf ko = koTypeOf m\ - \ vs_valid_duplicates' (ksPSpace s(p \ m))" + \ vs_valid_duplicates' ((ksPSpace s)(p \ m))" apply (subst vs_valid_duplicates'_def) apply (intro allI impI) apply (clarsimp split:if_splits simp:duplicate_address_set_simp option.splits) @@ -2063,8 +2021,8 @@ lemma message_info_from_data_eqv: lemma setMessageInfo_corres: "mi' = message_info_map mi \ - corres dc (tcb_at t) (tcb_at' t) - (set_message_info t mi) (setMessageInfo t mi')" + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_message_info t mi) (setMessageInfo t mi')" apply (simp add: setMessageInfo_def set_message_info_def) apply (subgoal_tac "wordFromMessageInfo (message_info_map mi) = message_info_to_data mi") @@ -2777,14 +2735,6 @@ crunch norqL1[wp]: storePDE "\s. P (ksReadyQueuesL1Bitmap s)" crunch norqL2[wp]: storePDE "\s. P (ksReadyQueuesL2Bitmap s)" (simp: updateObject_default_def) -lemma storePDE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePDE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePDE_valid_queues' [wp]: - "\valid_queues'\ storePDE p pde \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePDE_state_refs' [wp]: "\\s. P (state_refs_of' s)\ storePDE p pde \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: storePDE_def) @@ -2927,7 +2877,17 @@ crunches storePTE, storePDE for ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" - (wp: setObject_ksPSpace_only updateObject_default_inv) + (wp: setObject_ksPSpace_only updateObject_default_inv simp: o_def) + +lemma storePTE_tcbs_of'[wp]: + "storePTE c (pte::pte) \\s. P' (tcbs_of' s)\" + unfolding storePTE_def + by setObject_easy_cases + +lemma storePDE_tcbs_of'[wp]: + "storePDE c (pde::pde) \\s. P' (tcbs_of' s)\" + unfolding storePDE_def + by setObject_easy_cases lemma storePDE_invs[wp]: "\invs' and valid_pde' pde @@ -2939,7 +2899,7 @@ lemma storePDE_invs[wp]: apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift - cur_tcb_lift valid_irq_handlers_lift'' + cur_tcb_lift valid_irq_handlers_lift'' valid_bitmaps_lift sym_heap_sched_pointers_lift untyped_ranges_zero_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp @@ -2968,14 +2928,6 @@ crunch norqL1[wp]: storePTE "\s. P (ksReadyQueuesL1Bitmap s)" crunch norqL2[wp]: storePTE "\s. P (ksReadyQueuesL2Bitmap s)" (simp: updateObject_default_def) -lemma storePTE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePTE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePTE_valid_queues' [wp]: - "\valid_queues'\ storePTE p pde \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePTE_state_refs' [wp]: "\\s. P (state_refs_of' s)\ storePTE p pte \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: storePTE_def) @@ -3112,7 +3064,7 @@ lemma storePTE_invs [wp]: apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' - untyped_ranges_zero_lift + untyped_ranges_zero_lift valid_bitmaps_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp done @@ -3158,14 +3110,6 @@ lemma setASIDPool_qsL2 [wp]: "\\s. P (ksReadyQueuesL2Bitmap s)\ setObject p (ap::asidpool) \\rv s. P (ksReadyQueuesL2Bitmap s)\" by (wp setObject_qs updateObject_default_inv|simp)+ -lemma setASIDPool_valid_queues [wp]: - "\Invariants_H.valid_queues\ setObject p (ap::asidpool) \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma setASIDPool_valid_queues' [wp]: - "\valid_queues'\ setObject p (ap::asidpool) \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma setASIDPool_state_refs' [wp]: "\\s. P (state_refs_of' s)\ setObject p (ap::asidpool) \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: setObject_def valid_def in_monad split_def @@ -3278,6 +3222,10 @@ lemma setObject_ap_ksDomScheduleIdx [wp]: "\\s. P (ksDomScheduleIdx s)\ setObject p (ap::asidpool) \\_. \s. P (ksDomScheduleIdx s)\" by (wp updateObject_default_inv|simp add:setObject_def | wpc)+ +lemma setObject_asidpool_tcbs_of'[wp]: + "setObject c (asidpool::asidpool) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + lemma setASIDPool_invs [wp]: "\invs' and valid_asid_pool' ap\ setObject p (ap::asidpool) \\_. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) @@ -3286,7 +3234,7 @@ lemma setASIDPool_invs [wp]: valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' untyped_ranges_zero_lift - updateObject_default_inv + updateObject_default_inv valid_bitmaps_lift | simp add: cteCaps_of_def | rule setObject_ksPSpace_only)+ apply (clarsimp simp add: setObject_def o_def) diff --git a/proof/refine/ARM/orphanage/Orphanage.thy b/proof/refine/ARM/orphanage/Orphanage.thy index 43b95866ed..3a51a36e03 100644 --- a/proof/refine/ARM/orphanage/Orphanage.thy +++ b/proof/refine/ARM/orphanage/Orphanage.thy @@ -59,8 +59,7 @@ where definition all_queued_tcb_ptrs :: "kernel_state \ machine_word set" where - "all_queued_tcb_ptrs s \ - { tcb_ptr. \ priority. tcb_ptr : set ((ksReadyQueues s) priority) }" + "all_queued_tcb_ptrs s \ { tcb_ptr. obj_at' tcbQueued tcb_ptr s }" lemma st_tcb_at_neg': "(st_tcb_at' (\ ts. \ P ts) t s) = (tcb_at' t s \ \ st_tcb_at' P t s)" @@ -107,8 +106,8 @@ lemma no_orphans_lift: "\ tcb_ptr. \ \s. tcb_ptr = ksCurThread s \ f \ \_ s. tcb_ptr = ksCurThread s \" assumes st_tcb_at'_is_lifted: "\P p. \ \s. st_tcb_at' P p s\ f \ \_ s. st_tcb_at' P p s \" - assumes ksReadyQueues_is_lifted: - "\P. \ \s. P (ksReadyQueues s)\ f \ \_ s. P (ksReadyQueues s) \" + assumes tcbQueued_is_lifted: + "\P tcb_ptr. f \ \s. obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s \" assumes ksSchedulerAction_is_lifted: "\P. \ \s. P (ksSchedulerAction s)\ f \ \_ s. P (ksSchedulerAction s) \" shows @@ -119,7 +118,7 @@ lemma no_orphans_lift: apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) apply (rule ksCurThread_is_lifted) apply (wp hoare_vcg_disj_lift) - apply (rule ksReadyQueues_is_lifted) + apply (wpsimp wp: tcbQueued_is_lifted) apply (wp hoare_vcg_disj_lift) apply (rule typ_at'_is_lifted) apply (wp hoare_vcg_disj_lift) @@ -139,13 +138,12 @@ lemma st_tcb_at'_all_active_tcb_ptrs_lift: by (clarsimp simp: all_active_tcb_ptrs_def) (rule st_tcb_at'_is_active_tcb_ptr_lift [OF assms]) -lemma ksQ_all_queued_tcb_ptrs_lift: - assumes "\P p. \\s. P (ksReadyQueues s p)\ f \\rv s. P (ksReadyQueues s p)\" +lemma tcbQueued_all_queued_tcb_ptrs_lift: + assumes "\Q P tcb_ptr. f \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" shows "\\s. P (t \ all_queued_tcb_ptrs s)\ f \\_ s. P (t \ all_queued_tcb_ptrs s)\" apply (clarsimp simp: all_queued_tcb_ptrs_def) apply (rule_tac P=P in P_bool_lift) - apply (wp hoare_ex_wp assms) - apply (clarsimp) + apply (wp hoare_vcg_ex_lift assms) apply (wp hoare_vcg_all_lift assms) done @@ -180,6 +178,11 @@ lemma almost_no_orphans_disj: apply (auto intro: pred_tcb_at') done +lemma all_queued_tcb_ptrs_ksReadyQueues_update[simp]: + "tcb_ptr \ all_queued_tcb_ptrs (ksReadyQueues_update f s) = (tcb_ptr \ all_queued_tcb_ptrs s)" + unfolding all_queued_tcb_ptrs_def + by (clarsimp simp: obj_at'_def projectKOs) + lemma no_orphans_update_simps[simp]: "no_orphans (gsCNodes_update f s) = no_orphans s" "no_orphans (gsUserPages_update g s) = no_orphans s" @@ -252,6 +255,12 @@ crunch no_orphans [wp]: removeFromBitmap "no_orphans" crunch almost_no_orphans [wp]: addToBitmap "almost_no_orphans x" crunch almost_no_orphans [wp]: removeFromBitmap "almost_no_orphans x" +lemma setCTE_tcbQueued[wp]: + "setCTE ptr v \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) t s)\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + lemma setCTE_no_orphans [wp]: "\ \s. no_orphans s \ setCTE p cte @@ -265,7 +274,7 @@ lemma setCTE_almost_no_orphans [wp]: setCTE p cte \ \rv s. almost_no_orphans tcb_ptr s \" unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift setCTE_typ_at' setCTE_pred_tcb_at') + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift setCTE_typ_at' setCTE_pred_tcb_at') done crunch no_orphans [wp]: activateIdleThread "no_orphans" @@ -275,128 +284,131 @@ lemma asUser_no_orphans [wp]: asUser thread f \ \rv s. no_orphans s \" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +lemma threadSet_all_queued_tcb_ptrs: + "\tcb. tcbQueued (F tcb) = tcbQueued tcb \ threadSet F tptr \\s. P (t \ all_queued_tcb_ptrs s)\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 threadSet_wp) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: obj_at'_def projectKOs ps_clear_upd objBits_simps) done +crunches removeFromBitmap, addToBitmap, setQueue + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: tcbQueued_all_queued_tcb_ptrs_lift) + +crunches tcbQueuePrepend, tcbQueueAppend + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: threadSet_all_queued_tcb_ptrs ignore: threadSet) + +lemma tcbQueued_update_True_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + threadSet (tcbQueued_update (\_. True)) tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: all_queued_tcb_ptrs_def obj_at'_def projectKOs ps_clear_upd objBits_simps) + done + +lemma tcbSchedEnqueue_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr \ all_queued_tcb_ptrs s\ + tcbSchedEnqueue tcb_ptr' + \\_ s. tcb_ptr \ all_queued_tcb_ptrs s\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: hoare_vcg_imp_lift' threadGet_wp + | wpsimp wp: threadSet_all_queued_tcb_ptrs)+ + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def projectKOs) + done + +lemmas tcbSchedEnqueue_all_queued_tcb_ptrs'[wp] = + tcbSchedEnqueue_all_queued_tcb_ptrs[simplified all_queued_tcb_ptrs_def, simplified] + +lemma tcbSchedAppend_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr \ all_queued_tcb_ptrs s\ + tcbSchedAppend tcb_ptr' + \\_ s. tcb_ptr \ all_queued_tcb_ptrs s\" + unfolding tcbSchedAppend_def tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_imp_lift' threadGet_wp + | wpsimp wp: threadSet_all_queued_tcb_ptrs)+ + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def projectKOs) + done + +lemmas tcbSchedAppend_all_queued_tcb_ptrs'[wp] = + tcbSchedAppend_all_queued_tcb_ptrs[simplified all_queued_tcb_ptrs_def, simplified] + lemma threadSet_no_orphans: - "\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)) \ - \ \s. no_orphans s \ - threadSet F tptr - \ \rv s. no_orphans s \" + "\\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)); + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tptr \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 | clarsimp)+ - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) -lemma threadSet_almost_no_orphans: - "\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)) \ - \ \s. almost_no_orphans ptr s \ - threadSet F tptr - \ \rv s. almost_no_orphans ptr s \" - unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 | clarsimp)+ +lemma tcbQueued_update_True_no_orphans: + "\almost_no_orphans tptr and tcb_at' tptr\ + threadSet (tcbQueued_update (\_. True)) tptr + \\_. no_orphans\" + unfolding no_orphans_disj + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) + apply (fastforce simp: almost_no_orphans_def all_active_tcb_ptrs_def + tcb_at_typ_at' st_tcb_at_neg' is_active_tcb_ptr_def) done -lemma setQueue_no_orphans_enq: - "\ \s. no_orphans s \ set (ksReadyQueues s (d, prio)) \ set qs \ - setQueue d prio qs - \ \_ s. no_orphans s \" - unfolding setQueue_def - apply wp - apply (clarsimp simp: no_orphans_def all_queued_tcb_ptrs_def - split: if_split_asm) +lemma tcbQueued_update_True_almost_no_orphans: + "threadSet (tcbQueued_update (\_. True)) tptr' \almost_no_orphans tptr\" + unfolding almost_no_orphans_disj + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift threadSet_st_tcb_at2) apply fastforce done -lemma setQueue_almost_no_orphans_enq: - "\ \s. almost_no_orphans tcb_ptr s \ set (ksReadyQueues s (d, prio)) \ set qs \ tcb_ptr \ set qs \ - setQueue d prio qs - \ \_ s. no_orphans s \" +lemma threadSet_almost_no_orphans: + "\\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)); + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tptr \almost_no_orphans ptr\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) + +lemma setQueue_no_orphans[wp]: + "setQueue d prio qs \no_orphans\" unfolding setQueue_def apply wp - apply (clarsimp simp: no_orphans_def almost_no_orphans_def all_queued_tcb_ptrs_def - split: if_split_asm) - apply fastforce + apply (clarsimp simp: no_orphans_def) done -lemma setQueue_almost_no_orphans_enq_lift: - "\ \s. almost_no_orphans tcb_ptr s \ set (ksReadyQueues s (d, prio)) \ set qs \ - setQueue d prio qs - \ \_ s. almost_no_orphans tcb_ptr s \" +lemma setQueue_almost_no_orphans[wp]: + "setQueue d prio qs \almost_no_orphans tptr\" unfolding setQueue_def apply wp - apply (clarsimp simp: almost_no_orphans_def all_queued_tcb_ptrs_def - split: if_split_asm) - apply fastforce + apply (clarsimp simp: almost_no_orphans_def) done lemma tcbSchedEnqueue_no_orphans[wp]: - "\ \s. no_orphans s \ - tcbSchedEnqueue tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedEnqueue_def - apply (wp setQueue_no_orphans_enq threadSet_no_orphans | clarsimp simp: unless_def)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto + "tcbSchedEnqueue tcb_ptr \no_orphans\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_strg_almost) done lemma tcbSchedAppend_no_orphans[wp]: - "\ \s. no_orphans s \ - tcbSchedAppend tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedAppend_def - apply (wp setQueue_no_orphans_enq threadSet_no_orphans | clarsimp simp: unless_def)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto - done - -lemma ko_at_obj_at': - "ko_at' ko p s \ P ko \ obj_at' P p s" - unfolding obj_at'_def - apply clarsimp - done - -lemma queued_in_queue: - "\valid_queues' s; ko_at' tcb tcb_ptr s; tcbQueued tcb\ \ - \ p. tcb_ptr \ set (ksReadyQueues s p)" - unfolding valid_queues'_def - apply (drule_tac x="tcbDomain tcb" in spec) - apply (drule_tac x="tcbPriority tcb" in spec) - apply (drule_tac x="tcb_ptr" in spec) - apply (drule mp) - apply (rule ko_at_obj_at') - apply (auto simp: inQ_def) + "tcbSchedAppend tcb_ptr \no_orphans\" + unfolding tcbSchedAppend_def tcbQueueAppend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_strg_almost) done lemma tcbSchedEnqueue_almost_no_orphans: - "\ \s. almost_no_orphans tcb_ptr s \ valid_queues' s \ + "\almost_no_orphans tcb_ptr\ tcbSchedEnqueue tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedEnqueue_def - apply simp - apply (wp setQueue_almost_no_orphans_enq[where tcb_ptr=tcb_ptr] threadSet_no_orphans - | clarsimp)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply normalise_obj_at' - apply (rule_tac x=ko in exI) - apply (clarsimp simp: subset_insertI) - apply (unfold no_orphans_def almost_no_orphans_def) - apply clarsimp - apply (drule(2) queued_in_queue) - apply (fastforce simp: all_queued_tcb_ptrs_def) + \\_. no_orphans\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_def almost_no_orphans_def all_queued_tcb_ptrs_def obj_at'_def) done lemma tcbSchedEnqueue_almost_no_orphans_lift: - "\ \s. almost_no_orphans ptr s \ - tcbSchedEnqueue tcb_ptr - \ \rv s. almost_no_orphans ptr s \" - unfolding tcbSchedEnqueue_def - apply (wp setQueue_almost_no_orphans_enq_lift threadSet_almost_no_orphans | clarsimp simp: unless_def)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto - done + "tcbSchedEnqueue tcb_ptr \almost_no_orphans ptr\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + by (wpsimp wp: tcbQueued_update_True_almost_no_orphans threadSet_almost_no_orphans) lemma ssa_no_orphans: "\ \s. no_orphans s \ @@ -428,124 +440,70 @@ lemma ssa_almost_no_orphans_lift [wp]: apply auto done -lemma tcbSchedEnqueue_inQueue [wp]: - "\ \s. valid_queues' s \ - tcbSchedEnqueue tcb_ptr - \ \rv s. tcb_ptr \ all_queued_tcb_ptrs s \" - unfolding tcbSchedEnqueue_def all_queued_tcb_ptrs_def - apply (wp | clarsimp simp: unless_def)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp) - apply fastforce - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (fastforce simp: obj_at'_def valid_queues'_def inQ_def) - done - -lemma tcbSchedAppend_inQueue [wp]: - "\ \s. valid_queues' s \ - tcbSchedAppend tcb_ptr - \ \rv s. tcb_ptr \ all_queued_tcb_ptrs s \" - unfolding tcbSchedAppend_def all_queued_tcb_ptrs_def - apply (wp | clarsimp simp: unless_def)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp) - apply fastforce - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (fastforce simp: obj_at'_def valid_queues'_def inQ_def) - done - lemma rescheduleRequired_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - rescheduleRequired - \ \rv s. no_orphans s \" + "rescheduleRequired \no_orphans\" unfolding rescheduleRequired_def - apply (wp tcbSchedEnqueue_no_orphans hoare_vcg_all_lift ssa_no_orphans | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) - apply (rename_tac word t p) - apply (rule_tac P="word = t" in hoare_gen_asm) - apply (wp hoare_disjI1 | clarsimp)+ - done + by (wpsimp wp: ssa_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift | wpc)+ lemma rescheduleRequired_almost_no_orphans [wp]: - "\ \s. almost_no_orphans tcb_ptr s \ valid_queues' s \ - rescheduleRequired - \ \rv s. almost_no_orphans tcb_ptr s \" + "rescheduleRequired \almost_no_orphans tcb_ptr\" unfolding rescheduleRequired_def - apply (wp tcbSchedEnqueue_almost_no_orphans_lift hoare_vcg_all_lift | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) - apply (rename_tac word t p) - apply (rule_tac P="word = t" in hoare_gen_asm) - apply (wp hoare_disjI1 | clarsimp)+ - done + by (wpsimp wp: ssa_almost_no_orphans_lift hoare_vcg_all_lift tcbSchedEnqueue_almost_no_orphans_lift + hoare_vcg_imp_lift' hoare_vcg_disj_lift) lemma setThreadState_current_no_orphans: - "\ \s. no_orphans s \ ksCurThread s = tcb_ptr \ valid_queues' s \ + "\\s. no_orphans s \ ksCurThread s = tcb_ptr\ setThreadState state tcb_ptr - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj all_queued_tcb_ptrs_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: inQ_def) + apply wpsimp + unfolding no_orphans_disj + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma setThreadState_isRestart_no_orphans: - "\ \s. no_orphans s \ st_tcb_at' isRestart tcb_ptr s \ valid_queues' s\ + "\no_orphans and st_tcb_at' isRestart tcb_ptr\ setThreadState state tcb_ptr - \ \rv s. no_orphans s \" + \\_ . no_orphans\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: st_tcb_at_double_neg' st_tcb_at_neg' inQ_def) + apply wpsimp + unfolding no_orphans_disj + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ + apply (auto simp: is_active_thread_state_def st_tcb_at_double_neg' st_tcb_at_neg') done lemma setThreadState_almost_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s\ - setThreadState state tcb_ptr - \ \rv s. almost_no_orphans tcb_ptr s \" + "\no_orphans\ setThreadState state tcb_ptr \\_. almost_no_orphans tcb_ptr\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ almost_no_orphans tcb_ptr s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj almost_no_orphans_disj all_queued_tcb_ptrs_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: inQ_def) + apply wpsimp + apply (unfold no_orphans_disj almost_no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma setThreadState_not_active_no_orphans: - "\ is_active_thread_state state \ - \ \s. no_orphans s \ valid_queues' s \ - setThreadState state tcb_ptr - \ \rv s. no_orphans s \" + "\ is_active_thread_state state \ setThreadState state tcb_ptr \no_orphans\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: isRunning_def isRestart_def inQ_def) + apply wpsimp + apply (unfold no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma setThreadState_not_active_almost_no_orphans: - "\ is_active_thread_state state \ - \ \s. almost_no_orphans thread s \ valid_queues' s \ - setThreadState state tcb_ptr - \ \rv s. almost_no_orphans thread s \" + "\ is_active_thread_state state \ setThreadState state tcb_ptr \almost_no_orphans thread\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ almost_no_orphans thread s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold almost_no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: isRunning_def isRestart_def inQ_def) + apply wpsimp + apply (unfold almost_no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma activateThread_no_orphans [wp]: @@ -557,60 +515,75 @@ lemma activateThread_no_orphans [wp]: apply (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def isRestart_def) done -lemma setQueue_no_orphans_deq: - "\ \s. \ tcb_ptr. no_orphans s \ \ is_active_tcb_ptr tcb_ptr s \ - queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr] \ - setQueue d priority queue - \ \rv s. no_orphans s \" - unfolding setQueue_def - apply (wp | clarsimp)+ - apply (fastforce simp: no_orphans_def all_queued_tcb_ptrs_def - all_active_tcb_ptrs_def is_active_tcb_ptr_def) +crunches removeFromBitmap, tcbQueueRemove, setQueue + for almost_no_orphans[wp]: "almost_no_orphans thread" + and no_orphans[wp]: no_orphans + and all_queued_tcb_ptrs[wp]: "\s. tcb_ptr \ all_queued_tcb_ptrs s" + (wp: crunch_wps) + +lemma tcbQueued_update_False_all_queued_tcb_ptrs: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + threadSet (tcbQueued_update (\_. False)) tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def projectKOs ps_clear_upd) done -lemma setQueue_almost_no_orphans_deq [wp]: - "\ \s. almost_no_orphans tcb_ptr s \ - queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr] \ - setQueue d priority queue - \ \rv s. almost_no_orphans tcb_ptr s \" - unfolding setQueue_def - apply (wp | clarsimp)+ - apply (fastforce simp: almost_no_orphans_def all_queued_tcb_ptrs_def - all_active_tcb_ptrs_def is_active_tcb_ptr_def) +lemma tcbSchedDequeue_all_queued_tcb_ptrs_other: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + tcbSchedDequeue tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + unfolding tcbSchedDequeue_def + by (wpsimp wp: tcbQueued_update_False_all_queued_tcb_ptrs threadGet_wp) + +lemma tcbQueued_update_False_almost_no_orphans: + "\no_orphans\ + threadSet (tcbQueued_update (\_. False)) tptr + \\_. almost_no_orphans tptr\" + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: no_orphans_def almost_no_orphans_def) + apply (rename_tac tcb_ptr) + apply (case_tac "tcb_ptr = tptr") + apply fastforce + apply (fastforce simp: all_queued_tcb_ptrs_def obj_at'_def projectKOs all_active_tcb_ptrs_def + is_active_tcb_ptr_def st_tcb_at'_def ps_clear_upd) done lemma tcbSchedDequeue_almost_no_orphans [wp]: - "\ \s. no_orphans s \ - tcbSchedDequeue thread - \ \rv s. almost_no_orphans thread s \" + "\no_orphans\ tcbSchedDequeue thread \\_. almost_no_orphans thread\" unfolding tcbSchedDequeue_def - apply (wp threadSet_almost_no_orphans | simp cong: if_cong)+ - apply (simp add:no_orphans_strg_almost cong: if_cong) + apply (wpsimp wp: tcbQueued_update_False_almost_no_orphans threadGet_wp) + apply (simp add: no_orphans_strg_almost) done -lemma tcbSchedDequeue_no_orphans [wp]: - "\ \s. no_orphans s \ \ is_active_tcb_ptr tcb_ptr s \ - tcbSchedDequeue tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedDequeue_def - apply (wp setQueue_no_orphans_deq threadSet_no_orphans | clarsimp)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto +lemma tcbSchedDequeue_no_orphans[wp]: + "\\s. no_orphans s \ \ is_active_tcb_ptr tcbPtr s \ tcb_at' tcbPtr s\ + tcbSchedDequeue tcbPtr + \\_. no_orphans\" + supply disj_not1[simp del] + unfolding no_orphans_disj almost_no_orphans_disj + apply (rule hoare_allI) + apply (rename_tac tcb_ptr) + apply (case_tac "tcb_ptr = tcbPtr") + apply (rule_tac Q="\_ s. st_tcb_at' (\state. \ is_active_thread_state state) tcbPtr s" + in hoare_post_imp) + apply fastforce + apply wpsimp + apply (clarsimp simp: st_tcb_at'_def obj_at'_def projectKOs is_active_tcb_ptr_def disj_not1) + apply (wpsimp wp: tcbQueued_update_False_all_queued_tcb_ptrs hoare_vcg_disj_lift + simp: tcbSchedDequeue_def) done lemma switchToIdleThread_no_orphans' [wp]: - "\ \s. no_orphans s \ - (is_active_tcb_ptr (ksCurThread s) s - \ ksCurThread s \ all_queued_tcb_ptrs s) \ + "\\s. no_orphans s + \ (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s)\ switchToIdleThread - \ \rv s. no_orphans s \" - unfolding switchToIdleThread_def setCurThread_def ARM_H.switchToIdleThread_def + \\_. no_orphans\" + supply disj_not1[simp del] + apply (clarsimp simp: switchToIdleThread_def setCurThread_def ARM_H.switchToIdleThread_def) apply (simp add: no_orphans_disj all_queued_tcb_ptrs_def) - apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_disj_lift - | clarsimp)+ - apply (auto simp: no_orphans_disj all_queued_tcb_ptrs_def is_active_tcb_ptr_def - st_tcb_at_neg' tcb_at_typ_at') + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift hoare_drop_imps) + apply (force simp: is_active_tcb_ptr_def st_tcb_at_neg' typ_at_tcb') done crunch no_orphans [wp]: "Arch.switchToThread" "no_orphans" @@ -622,13 +595,9 @@ crunch ksCurThread [wp]: "Arch.switchToThread" "\ s. P (ksCurThread s)" crunch ksIdleThread [wp]: "Arch.switchToThread" "\ s. P (ksIdleThread s)" (ignore: ARM.clearExMonitor) -lemma ArchThreadDecls_H_switchToThread_all_queued_tcb_ptrs [wp]: - "\ \s. P (all_queued_tcb_ptrs s) \ - Arch.switchToThread tcb_ptr - \ \rv s. P (all_queued_tcb_ptrs s) \" - unfolding ARM_H.switchToThread_def all_queued_tcb_ptrs_def - apply (wp | clarsimp)+ - done +crunches Arch.switchToThread + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: tcbQueued_all_queued_tcb_ptrs_lift) crunch ksSchedulerAction [wp]: "Arch.switchToThread" "\s. P (ksSchedulerAction s)" (ignore: ARM.clearExMonitor) @@ -646,22 +615,6 @@ lemma setCurThread_no_orphans [wp]: apply auto done -lemma tcbSchedDequeue_all_queued_tcb_ptrs: - "\\s. x \ all_queued_tcb_ptrs s \ x \ t \ - tcbSchedDequeue t \\_ s. x \ all_queued_tcb_ptrs s\" - apply (rule_tac Q="(\s. x \ all_queued_tcb_ptrs s) and K (x \ t)" - in hoare_pre_imp, clarsimp) - apply (rule hoare_gen_asm) - apply (clarsimp simp: tcbSchedDequeue_def all_queued_tcb_ptrs_def) - apply (rule hoare_pre) - apply (wp, clarsimp) - apply (wp hoare_ex_wp)+ - apply (rename_tac d p) - apply (rule_tac Q="\_ s. x \ set (ksReadyQueues s (d, p))" - in hoare_post_imp, clarsimp) - apply (wp hoare_vcg_all_lift | simp)+ - done - lemma tcbSchedDequeue_all_active_tcb_ptrs[wp]: "\\s. P (t' \ all_active_tcb_ptrs s)\ tcbSchedDequeue t \\_ s. P (t' \ all_active_tcb_ptrs s)\" by (clarsimp simp: all_active_tcb_ptrs_def is_active_tcb_ptr_def) wp @@ -684,8 +637,14 @@ lemma setCurThread_almost_no_orphans: lemmas ArchThreadDecls_H_switchToThread_all_active_tcb_ptrs[wp] = st_tcb_at'_all_active_tcb_ptrs_lift [OF Arch_switchToThread_pred_tcb'] +lemma arch_switch_thread_tcbQueued[wp]: + "Arch.switchToThread t \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" + apply (simp add: ARM_H.switchToThread_def) + apply (wp) + done + lemmas ArchThreadDecls_H_switchToThread_all_queued_tcb_ptrs_lift[wp] = - ksQ_all_queued_tcb_ptrs_lift [OF arch_switch_thread_ksQ] + tcbQueued_all_queued_tcb_ptrs_lift [OF arch_switch_thread_tcbQueued] lemma ThreadDecls_H_switchToThread_no_orphans: "\ \s. no_orphans s \ @@ -695,16 +654,9 @@ lemma ThreadDecls_H_switchToThread_no_orphans: ThreadDecls_H.switchToThread tcb_ptr \ \rv s. no_orphans s \" unfolding Thread_H.switchToThread_def - apply (wp setCurThread_almost_no_orphans - tcbSchedDequeue_almost_no_orphans) - apply (wps tcbSchedDequeue_ct') - apply (wp tcbSchedDequeue_all_queued_tcb_ptrs hoare_convert_imp)+ - apply (wps) - apply (wp)+ - apply (wps) - apply (wp) - apply (clarsimp) - done + by (wpsimp wp: setCurThread_almost_no_orphans hoare_vcg_imp_lift' + tcbSchedDequeue_all_queued_tcb_ptrs_other + | wps)+ lemma findM_failure': "\ \x S. \ \s. P S s \ f x \ \rv s. \ rv \ P (insert x S) s \ \ \ @@ -712,7 +664,7 @@ lemma findM_failure': apply (induct xs arbitrary: S) apply (clarsimp, wp, clarsimp) apply clarsimp - apply (rule hoare_seq_ext[rotated], assumption) + apply (rule bind_wp_fwd, assumption) apply (case_tac r) apply (clarsimp, wp, clarsimp) apply clarsimp @@ -722,94 +674,44 @@ lemma findM_failure': lemmas findM_failure = findM_failure'[where S="{}", simplified] -lemma tcbSchedEnqueue_inQueue_eq: - "\ valid_queues' and K (tcb_ptr = tcb_ptr') \ - tcbSchedEnqueue tcb_ptr - \ \rv s. tcb_ptr' \ all_queued_tcb_ptrs s \" - apply (rule hoare_gen_asm, simp) - apply wp - done - -lemma tcbSchedAppend_inQueue_eq: - "\ valid_queues' and K (tcb_ptr = tcb_ptr') \ - tcbSchedAppend tcb_ptr - \ \rv s. tcb_ptr' \ all_queued_tcb_ptrs s \" - apply (rule hoare_gen_asm, simp) - apply wp - done - lemma findM_on_success: "\ \x. \ P x \ f x \ \rv s. rv \; \x y. \ P x \ f y \ \rv. P x \ \ \ \ \s. \x \ set xs. P x s \ findM f xs \ \rv s. \ y. rv = Some y \" apply (induct xs; clarsimp) apply wp+ apply (clarsimp simp: imp_conv_disj Bex_def) - apply (wp hoare_vcg_disj_lift hoare_ex_wp | clarsimp | assumption)+ + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift | clarsimp | assumption)+ done crunch st_tcb' [wp]: switchToThread "\s. P' (st_tcb_at' P t s)" (ignore: ARM.clearExMonitor) -lemma setQueue_deq_not_empty: - "\ \s. (\tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s) \ - (\tcb_ptr. \ st_tcb_at' P tcb_ptr s \ - queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr]) \ - setQueue d priority queue - \ \rv s. \tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s \" - unfolding setQueue_def - apply wp - apply auto - done - -lemma tcbSchedDequeue_not_empty: - "\ \s. (\tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s) \ \ st_tcb_at' P thread s \ - tcbSchedDequeue thread - \ \rv s. \tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s \" - unfolding tcbSchedDequeue_def - apply wp - apply (wp hoare_ex_wp threadSet_pred_tcb_no_state) - apply clarsimp - apply (wp setQueue_deq_not_empty) - apply clarsimp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs) - apply wp - apply clarsimp - apply clarsimp - apply (wp setQueue_deq_not_empty)+ - apply (rule_tac Q="\rv s. \ st_tcb_at' P thread s" in hoare_post_imp) - apply fastforce - apply (wp weak_if_wp | clarsimp)+ - done - lemmas switchToThread_all_active_tcb_ptrs[wp] = st_tcb_at'_all_active_tcb_ptrs_lift [OF switchToThread_st_tcb'] (* ksSchedulerAction s = ChooseNewThread *) lemma chooseThread_no_orphans [wp]: - notes hoare_TrueI[simp] - shows - "\\s. no_orphans s \ all_invs_but_ct_idle_or_in_cur_domain' s \ - (is_active_tcb_ptr (ksCurThread s) s - \ ksCurThread s \ all_queued_tcb_ptrs s)\ + "\\s. no_orphans s \ all_invs_but_ct_idle_or_in_cur_domain' s + \ (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s)\ chooseThread - \ \rv s. no_orphans s \" + \\_. no_orphans\" (is "\?PRE\ _ \_\") unfolding chooseThread_def Let_def supply if_split[split del] apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. ?PRE s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. ?PRE s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (intro bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp[where Q'="\rv s. ?PRE s \ ksReadyQueues_asrt s \ ready_qs_runnable s + \ rv = ksCurDomain s"]) + apply (rule_tac Q'="\rv s. ?PRE s \ ksReadyQueues_asrt s \ ready_qs_runnable s + \ curdom = ksCurDomain s \ rv = ksReadyQueuesL1Bitmap s curdom" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) - apply (simp, wp (once), simp) + apply (simp, wp, simp) (* we have a thread to switch to *) - apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv ThreadDecls_H_switchToThread_no_orphans) - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def - valid_queues_def st_tcb_at'_def) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def st_tcb_at'_def) apply (fastforce dest!: lookupBitmapPriority_obj_at' elim: obj_at'_weaken simp: all_active_tcb_ptrs_def) apply (wpsimp simp: bitmap_fun_defs) @@ -817,33 +719,6 @@ lemma chooseThread_no_orphans [wp]: apply (wpsimp simp: curDomain_def simp: invs_no_cicd_ksCurDomain_maxDomain')+ done -lemma tcbSchedAppend_in_ksQ: - "\valid_queues' and tcb_at' t\ tcbSchedAppend t - \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" - apply (rule_tac Q="\s. \d p. valid_queues' s \ - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_pre_imp) - apply (clarsimp simp: tcb_at'_has_tcbPriority tcb_at'_has_tcbDomain) - apply (rule hoare_vcg_ex_lift)+ - apply (simp add: tcbSchedAppend_def unless_def) - apply wpsimp - apply (rule_tac Q="\rv s. tdom = d \ rv = p \ obj_at' (\tcb. tcbPriority tcb = p) t s - \ obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_post_imp, clarsimp) - apply (wp, (wp threadGet_const)+) - apply (rule_tac Q="\rv s. - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s \ - obj_at' (\tcb. tcbQueued tcb = rv) t s \ - (rv \ t \ set (ksReadyQueues s (d, p)))" in hoare_post_imp) - apply (clarsimp simp: o_def elim!: obj_at'_weakenE) - apply (wp threadGet_obj_at' hoare_vcg_imp_lift threadGet_const) - apply clarsimp - apply normalise_obj_at' - apply (drule(1) valid_queues'_ko_atD, simp+) - done - lemma hoare_neg_imps: "\P\ f \\ rv s. \ R rv s\ \ \P\ f \\r s. R r s \ Q r s\" by (auto simp: valid_def) @@ -867,7 +742,7 @@ lemma ThreadDecls_H_switchToThread_ct [wp]: crunch no_orphans [wp]: nextDomain no_orphans (wp: no_orphans_lift simp: Let_def) -crunch ksQ [wp]: nextDomain "\s. P (ksReadyQueues s p)" +crunch tcbQueued[wp]: nextDomain "\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)" (simp: Let_def) crunch st_tcb_at' [wp]: nextDomain "\s. P (st_tcb_at' P' p s)" @@ -879,14 +754,6 @@ crunch ct' [wp]: nextDomain "\s. P (ksCurThread s)" crunch sch_act_not [wp]: nextDomain "sch_act_not t" (simp: Let_def) -lemma tcbSchedEnqueue_in_ksQ': - "\valid_queues' and tcb_at' t and K (t = t')\ - tcbSchedEnqueue t' - \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" - apply (rule hoare_gen_asm) - apply (wp tcbSchedEnqueue_in_ksQ | clarsimp)+ - done - lemma all_invs_but_ct_idle_or_in_cur_domain'_strg: "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" by (clarsimp simp: invs'_to_invs_no_cicd'_def) @@ -899,69 +766,6 @@ lemma obj_at'_static_fix: "\ obj_at' (\(ko::'a::pspace_storable). True) p s ; P \ \ obj_at' (\(ko::'a::pspace_storable). P) p s" by (erule obj_at'_weakenE, simp) -lemma tcbSchedEnqueue_in_ksQ_aqtp[wp]: - "\valid_queues' and tcb_at' t\ tcbSchedEnqueue t - \\r s. t \ all_queued_tcb_ptrs s\" - apply (clarsimp simp: all_queued_tcb_ptrs_def) - apply (rule tcbSchedEnqueue_in_ksQ) - done - -crunch ksReadyQueues[wp]: threadGet "\s. P (ksReadyQueues s)" - -lemma tcbSchedEnqueue_in_ksQ_already_queued: - "\\s. valid_queues' s \ tcb_at' t s \ - (\domain priority. t' \ set (ksReadyQueues s (domain, priority))) \ - tcbSchedEnqueue t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedEnqueue_in_ksQ) - apply (wpsimp simp: tcbSchedEnqueue_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - -lemma tcbSchedAppend_in_ksQ_already_queued: - "\\s. valid_queues' s \ tcb_at' t s \ - (\domain priority. t' \ set (ksReadyQueues s (domain, priority))) \ - tcbSchedAppend t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedAppend_in_ksQ) - apply (wpsimp simp: tcbSchedAppend_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - -lemma tcbSchedEnqueue_in_ksQ'': - "\\s. valid_queues' s \ tcb_at' t s \ - (t' \ t \ (\domain priority. t' \ set (ksReadyQueues s (domain, priority)))) \ - tcbSchedEnqueue t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedEnqueue_in_ksQ) - apply clarsimp - apply (wpsimp simp: tcbSchedEnqueue_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - -lemma tcbSchedAppend_in_ksQ'': - "\\s. valid_queues' s \ tcb_at' t s \ - (t' \ t \ (\domain priority. t' \ set (ksReadyQueues s (domain, priority)))) \ - tcbSchedAppend t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedAppend_in_ksQ) - apply clarsimp - apply (wpsimp simp: tcbSchedAppend_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - crunches setSchedulerAction for pred_tcb_at': "\s. P (pred_tcb_at' proj Q t s)" and ct': "\s. P (ksCurThread s)" @@ -980,12 +784,6 @@ lemma ct_active_st_tcb_at': apply (case_tac st, auto) done -lemma tcbSchedEnqueue_in_ksQ_already_queued_aqtp: - "\\s. valid_queues' s \ tcb_at' t s \ - t' \ all_queued_tcb_ptrs s \ tcbSchedEnqueue t - \\r s. t' \ all_queued_tcb_ptrs s \" - by (clarsimp simp: all_queued_tcb_ptrs_def tcbSchedEnqueue_in_ksQ_already_queued) - (* FIXME move *) lemma invs_switchToThread_runnable': "\ invs' s ; ksSchedulerAction s = SwitchToThread t \ \ st_tcb_at' runnable' t s" @@ -1006,17 +804,16 @@ lemma in_all_active_tcb_ptrsD: done lemma scheduleChooseNewThread_no_orphans: - "\ invs' and no_orphans - and (\s. ksSchedulerAction s = ChooseNewThread - \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p))))) \ + "\invs' and no_orphans + and (\s. ksSchedulerAction s = ChooseNewThread + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s))\ scheduleChooseNewThread - \\_. no_orphans \" + \\_. no_orphans\" unfolding scheduleChooseNewThread_def apply (wp add: ssa_no_orphans hoare_vcg_all_lift) apply (wp hoare_disjI1 chooseThread_nosch)+ apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift - hoare_lift_Pf2 [OF ksQ_all_queued_tcb_ptrs_lift[OF nextDomain_ksQ] + hoare_lift_Pf2 [OF tcbQueued_all_queued_tcb_ptrs_lift[OF nextDomain_tcbQueued] nextDomain_ct'] hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift[OF nextDomain_st_tcb_at'] nextDomain_ct'] @@ -1025,85 +822,73 @@ lemma scheduleChooseNewThread_no_orphans: is_active_tcb_ptr_runnable')+ done +lemma setSchedulerAction_tcbQueued[wp]: + "setSchedulerAction sa \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" + by wpsimp + lemma schedule_no_orphans[wp]: notes ssa_wp[wp del] shows - "\ \s. no_orphans s \ invs' s \ - schedule - \ \rv s. no_orphans s \" + "\no_orphans and invs'\ schedule \\_. no_orphans\" proof - have do_switch_to: "\candidate. \\s. no_orphans s \ ksSchedulerAction s = SwitchToThread candidate \ st_tcb_at' runnable' candidate s - \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p)))) \ - do ThreadDecls_H.switchToThread candidate; - setSchedulerAction ResumeCurrentThread - od - \\rv. no_orphans\" + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s) \ + do ThreadDecls_H.switchToThread candidate; + setSchedulerAction ResumeCurrentThread + od + \\_. no_orphans\" apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans hoare_vcg_all_lift ThreadDecls_H_switchToThread_no_orphans)+ apply (rule_tac Q="\_ s. (t = candidate \ ksCurThread s = candidate) \ (t \ candidate \ sch_act_not t s)" in hoare_post_imp) - apply (wpsimp wp: stt_nosch static_imp_wp)+ + apply (wpsimp wp: stt_nosch hoare_weak_lift_imp)+ apply (fastforce dest!: in_all_active_tcb_ptrsD simp: all_queued_tcb_ptrs_def comp_def) done have abort_switch_to_enq: "\candidate. - \\s. no_orphans s \ invs' s \ valid_queues' s + \\s. no_orphans s \ invs' s \ ksSchedulerAction s = SwitchToThread candidate - \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p)))) \ - do tcbSchedEnqueue candidate; - setSchedulerAction ChooseNewThread; - scheduleChooseNewThread - od - \\rv. no_orphans\" - apply (rule hoare_pre) - apply (wp scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s) \ + do tcbSchedEnqueue candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\_. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift - simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def - | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_ksQ])+ - apply (wp tcbSchedEnqueue_in_ksQ' tcbSchedEnqueue_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift) - apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_pred_tcb_at'] - hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_in_ksQ_already_queued] - tcbSchedEnqueue_no_orphans - | strengthen not_pred_tcb_at'_strengthen - | wp (once) hoare_vcg_imp_lift')+ - apply (clarsimp) - apply (frule invs_sch_act_wf', clarsimp simp: pred_tcb_at') - apply (simp add: st_tcb_at_neg' tcb_at_invs') + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_tcbQueued])+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift + | strengthen not_pred_tcb_at'_strengthen + | rule hoare_lift_Pf2[where f=ksCurThread])+ + apply (simp add: st_tcb_at_neg' tcb_at_invs' all_queued_tcb_ptrs_def) done have abort_switch_to_app: "\candidate. - \\s. no_orphans s \ invs' s \ valid_queues' s + \\s. no_orphans s \ invs' s \ ksSchedulerAction s = SwitchToThread candidate \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p))) ) \ - do tcbSchedAppend candidate; - setSchedulerAction ChooseNewThread; - scheduleChooseNewThread - od - \\rv. no_orphans\" - apply (rule hoare_pre) - apply (wp scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + \ ksCurThread s \ all_queued_tcb_ptrs s ) \ + do tcbSchedAppend candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\_. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift - simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def - | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_ksQ])+ - apply (wp tcbSchedAppend_in_ksQ'' tcbSchedAppend_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift) - apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedAppend_pred_tcb_at'] - hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedAppend_in_ksQ_already_queued] - tcbSchedAppend_no_orphans - | strengthen not_pred_tcb_at'_strengthen - | wp (once) hoare_vcg_imp_lift')+ - apply (clarsimp) - apply (frule invs_sch_act_wf', clarsimp simp: pred_tcb_at') - apply (simp add: st_tcb_at_neg' tcb_at_invs') + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_tcbQueued])+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift + | strengthen not_pred_tcb_at'_strengthen + | rule hoare_lift_Pf2[where f=ksCurThread])+ + apply (simp add: st_tcb_at_neg' tcb_at_invs' all_queued_tcb_ptrs_def) done show ?thesis @@ -1111,47 +896,39 @@ proof - supply if_weak_cong[cong] apply (wp, wpc) \ \action = ResumeCurrentThread\ - apply (wp)[1] - \ \action = ChooseNewThread\ - apply (clarsimp simp: when_def scheduleChooseNewThread_def) - apply (wp ssa_no_orphans hoare_vcg_all_lift) - apply (wp hoare_disjI1 chooseThread_nosch) + apply (wp)[1] + \ \action = ChooseNewThread\ + apply (clarsimp simp: when_def scheduleChooseNewThread_def) + apply (wp ssa_no_orphans hoare_vcg_all_lift) + apply (wp hoare_disjI1 chooseThread_nosch) apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift - hoare_lift_Pf2 [OF ksQ_all_queued_tcb_ptrs_lift - [OF nextDomain_ksQ] + hoare_lift_Pf2 [OF tcbQueued_all_queued_tcb_ptrs_lift + [OF nextDomain_tcbQueued] nextDomain_ct'] hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift [OF nextDomain_st_tcb_at'] nextDomain_ct'] hoare_vcg_all_lift getDomainTime_wp)[2] - apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_in_ksQ' - hoare_drop_imp - | clarsimp simp: all_queued_tcb_ptrs_def - | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg - | wps tcbSchedEnqueue_ct')+)[1] - apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_in_ksQ' + apply wpsimp + apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_all_queued_tcb_ptrs' hoare_drop_imp - | clarsimp simp: all_queued_tcb_ptrs_def - | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg - | wps tcbSchedEnqueue_ct')+)[1] - apply wp[1] + | clarsimp simp: all_queued_tcb_ptrs_def + | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg + | rule hoare_lift_Pf2[where f=ksCurThread])+)[1] + apply wpsimp \ \action = SwitchToThread candidate\ - apply (clarsimp) + apply clarsimp apply (rename_tac candidate) apply (wpsimp wp: do_switch_to abort_switch_to_enq abort_switch_to_app) (* isHighestPrio *) apply (wp hoare_drop_imps) apply (wp add: tcbSchedEnqueue_no_orphans)+ apply (clarsimp simp: conj_comms cong: conj_cong imp_cong split del: if_split) - apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_pred_tcb_at'] - hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_in_ksQ'] - hoare_vcg_imp_lift' + apply (wp hoare_vcg_imp_lift' | strengthen not_pred_tcb_at'_strengthen)+ - - apply (clarsimp simp: comp_def) - apply (frule invs_queues) - apply (clarsimp simp: invs_valid_queues' tcb_at_invs' st_tcb_at_neg' is_active_tcb_ptr_runnable') - apply (fastforce simp: all_invs_but_ct_idle_or_in_cur_domain'_strg invs_switchToThread_runnable') + apply (wps | wpsimp wp: tcbSchedEnqueue_all_queued_tcb_ptrs')+ + apply (fastforce simp: is_active_tcb_ptr_runnable' all_invs_but_ct_idle_or_in_cur_domain'_strg + invs_switchToThread_runnable') done qed @@ -1172,37 +949,33 @@ crunches completeSignal (simp: crunch_simps wp: crunch_wps) lemma possibleSwitchTo_almost_no_orphans [wp]: - "\ \s. almost_no_orphans target s \ valid_queues' s \ st_tcb_at' runnable' target s - \ weak_sch_act_wf (ksSchedulerAction s) s \ + "\\s. almost_no_orphans target s \ st_tcb_at' runnable' target s + \ weak_sch_act_wf (ksSchedulerAction s) s\ possibleSwitchTo target - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding possibleSwitchTo_def - by (wp rescheduleRequired_valid_queues'_weak tcbSchedEnqueue_almost_no_orphans - ssa_almost_no_orphans static_imp_wp + by (wp tcbSchedEnqueue_almost_no_orphans + ssa_almost_no_orphans hoare_weak_lift_imp | wpc | clarsimp | wp (once) hoare_drop_imp)+ lemma possibleSwitchTo_almost_no_orphans': - "\ \s. almost_no_orphans target s \ valid_queues' s \ st_tcb_at' runnable' target s - \ sch_act_wf (ksSchedulerAction s) s \ + "\\s. almost_no_orphans target s \ st_tcb_at' runnable' target s + \ sch_act_wf (ksSchedulerAction s) s \ possibleSwitchTo target - \ \rv s. no_orphans s \" + \\_. no_orphans\" by wp (strengthen sch_act_wf_weak, assumption) +crunches tcbQueueAppend, tcbQueuePrepend + for almost_no_orphans[wp]: "almost_no_orphans tcbPtr" + lemma tcbSchedAppend_almost_no_orphans: - "\ \s. almost_no_orphans thread s \ valid_queues' s \ + "\almost_no_orphans thread\ tcbSchedAppend thread - \ \_ s. no_orphans s \" + \\_. no_orphans\" unfolding tcbSchedAppend_def - apply (wp setQueue_almost_no_orphans_enq[where tcb_ptr=thread] threadSet_no_orphans - | clarsimp simp: unless_def | simp only: subset_insertI)+ - apply (unfold threadGet_def) - apply (wp getObject_tcb_wp | clarsimp)+ - apply (drule obj_at_ko_at', clarsimp) - apply (rule_tac x=ko in exI) - apply (clarsimp simp: almost_no_orphans_def no_orphans_def) - apply (drule queued_in_queue | simp)+ - apply (auto simp: all_queued_tcb_ptrs_def) + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadGet_wp) + apply (fastforce simp: almost_no_orphans_def no_orphans_def all_queued_tcb_ptrs_def obj_at'_def) done lemma no_orphans_is_almost[simp]: @@ -1211,7 +984,6 @@ lemma no_orphans_is_almost[simp]: crunches decDomainTime for no_orphans [wp]: no_orphans - and valid_queues' [wp]: valid_queues' (wp: no_orphans_lift) lemma timerTick_no_orphans [wp]: @@ -1221,28 +993,19 @@ lemma timerTick_no_orphans [wp]: unfolding timerTick_def getDomainTime_def supply if_split[split del] apply (subst threadState_case_if) - apply (wpsimp wp: threadSet_no_orphans threadSet_valid_queues' - threadSet_valid_queues' tcbSchedAppend_almost_no_orphans + apply (wpsimp wp: threadSet_no_orphans tcbSchedAppend_almost_no_orphans threadSet_almost_no_orphans threadSet_no_orphans tcbSchedAppend_sch_act_wf hoare_drop_imp simp: if_apply_def2 | strengthen sch_act_wf_weak)+ - apply (rule_tac Q="\rv s. no_orphans s \ valid_queues' s \ tcb_at' thread s - \ sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) - apply (clarsimp simp: inQ_def) - apply (wp hoare_drop_imps | clarsimp)+ - apply (auto split: if_split) - done + done lemma handleDoubleFault_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - handleDoubleFault tptr ex1 ex2 - \ \rv s. no_orphans s \" + "\no_orphans\ handleDoubleFault tptr ex1 ex2 \\_. no_orphans \" unfolding handleDoubleFault_def - apply (wp setThreadState_not_active_no_orphans - | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ - done + by (wpsimp wp: setThreadState_not_active_no_orphans + simp: is_active_thread_state_def isRestart_def isRunning_def)+ crunches cteInsert, getThreadCallerSlot, getThreadReplySlot for st_tcb' [wp]: "st_tcb_at' (\st. P st) t" @@ -1251,11 +1014,9 @@ crunches cteInsert, getThreadCallerSlot, getThreadReplySlot (wp: crunch_wps) lemma setupCallerCap_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - setupCallerCap sender receiver gr - \ \rv s. no_orphans s \" + "setupCallerCap sender receiver gr \no_orphans\" unfolding setupCallerCap_def - apply (wp setThreadState_not_active_no_orphans + apply (wp setThreadState_not_active_no_orphans hoare_drop_imps | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ done @@ -1264,82 +1025,55 @@ lemma setupCallerCap_almost_no_orphans [wp]: setupCallerCap sender receiver gr \ \rv s. almost_no_orphans tcb_ptr s \" unfolding setupCallerCap_def - apply (wp setThreadState_not_active_almost_no_orphans + apply (wp setThreadState_not_active_almost_no_orphans hoare_drop_imps | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ done +crunches cteInsert, setExtraBadge, setMessageInfo, transferCaps, copyMRs, + doNormalTransfer, doFaultTransfer, copyGlobalMappings, + invalidateHWASIDEntry, invalidateASID, invalidateASIDEntry + for tcbQueued[wp]: "obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr" + (wp: crunch_wps simp: crunch_simps) + crunches doIPCTransfer, setMRs, setEndpoint for ksReadyQueues [wp]: "\s. P (ksReadyQueues s)" and no_orphans [wp]: "no_orphans" - (wp: transferCapsToSlots_pres1 crunch_wps no_orphans_lift setObject_queues_unchanged_tcb - updateObject_default_inv) + (wp: no_orphans_lift updateObject_default_inv) lemma sendIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ sendIPC blocking call badge canGrant canGrantReply thread epptr - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding sendIPC_def apply (wp hoare_drop_imps setThreadState_not_active_no_orphans sts_st_tcb' possibleSwitchTo_almost_no_orphans' | wpc | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ - - apply (rule_tac Q="\rv. no_orphans and valid_queues' and valid_objs' and ko_at' rv epptr + apply (rule_tac Q="\rv. no_orphans and valid_objs' and ko_at' rv epptr and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) apply (fastforce simp: valid_objs'_def valid_obj'_def valid_ep'_def obj_at'_def projectKOs) apply (wp get_ep_sp' | clarsimp)+ done lemma sendFaultIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ sendFaultIPC tptr fault - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding sendFaultIPC_def apply (rule hoare_pre) - apply (wp threadSet_valid_queues' threadSet_no_orphans threadSet_valid_objs' + apply (wp threadSet_no_orphans threadSet_valid_objs' threadSet_sch_act | wpc | clarsimp)+ - apply (rule_tac Q'="\handlerCap s. no_orphans s \ valid_queues' s - \ valid_objs' s - \ sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp_R) - apply (wp | clarsimp simp: inQ_def valid_tcb'_def tcb_cte_cases_def)+ - done - -lemma sendIPC_valid_queues' [wp]: - "\ \s. valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ - sendIPC blocking call badge canGrant canGrantReply thread epptr - \ \rv s. valid_queues' s \" - unfolding sendIPC_def - apply (wpsimp wp: hoare_drop_imps) - apply (wpsimp | wp (once) sts_st_tcb')+ - apply (rule_tac Q="\rv. valid_queues' and valid_objs' and ko_at' rv epptr - and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp) - apply (wp get_ep_sp' | clarsimp)+ - done - -lemma sendFaultIPC_valid_queues' [wp]: - "\ \s. valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ - sendFaultIPC tptr fault - \ \rv s. valid_queues' s \" - unfolding sendFaultIPC_def - apply (rule hoare_pre) - apply (wp threadSet_valid_queues' threadSet_valid_objs' threadSet_sch_act - | wpc | clarsimp)+ - apply (rule_tac Q'="\handlerCap s. valid_queues' s \ valid_objs' s - \ sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp_R) + apply (rule_tac Q'="\_ s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s" + in hoare_strengthen_postE_R) apply (wp | clarsimp simp: inQ_def valid_tcb'_def tcb_cte_cases_def)+ done -lemma handleFault_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ +lemma handleFault_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ handleFault tptr ex1 - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding handleFault_def - apply (rule hoare_pre) - apply (wp | clarsimp)+ - done + by wpsimp lemma replyFromKernel_no_orphans [wp]: "\ \s. no_orphans s \ @@ -1357,32 +1091,24 @@ crunch ksReadyQueues [wp]: createNewCaps "\ s. P (ksReadyQueues s)" crunch inv [wp]: alignError "P" -lemma createObjects_no_orphans [wp]: - "\ \s. no_orphans s \ pspace_aligned' s \ pspace_no_overlap' ptr sz s \ pspace_distinct' s - \ n \ 0 \ range_cover ptr sz (objBitsKO val + gbits) n - \ \ case_option False (is_active_thread_state \ tcbState) (projectKO_opt val) \ +lemma createObjects_no_orphans[wp]: + "\\s. no_orphans s \ pspace_aligned' s \ pspace_no_overlap' ptr sz s \ pspace_distinct' s + \ n \ 0 \ range_cover ptr sz (objBitsKO val + gbits) n + \ \ case_option False (is_active_thread_state \ tcbState) (projectKO_opt val) + \ \ case_option False tcbQueued (projectKO_opt val)\ createObjects ptr n val gbits - \ \rv s. no_orphans s \" + \\_ s. no_orphans s\" apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def is_active_tcb_ptr_def all_queued_tcb_ptrs_def) apply (simp only: imp_conv_disj pred_tcb_at'_def createObjects_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_obj_at2') - apply clarsimp - apply (erule(1) impE) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_obj_at2'[where sz=sz]) apply clarsimp - apply (drule_tac x = x in spec) - apply (erule impE) - apply (clarsimp simp:obj_at'_def projectKOs split: option.splits) - apply simp done -lemma copyGlobalMappings_no_orphans [wp]: - "\ \s. no_orphans s \ - copyGlobalMappings newPD - \ \rv s. no_orphans s \" +lemma copyGlobalMappings_no_orphans[wp]: + "copyGlobalMappings newPD \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) crunch no_orphans [wp]: insertNewCap "no_orphans" (wp: hoare_drop_imps) @@ -1527,45 +1253,47 @@ lemma mapM_x_match: "\I and V xs\ mapM_x m xs \\rv. Q\ \ \I and V xs\ mapM_x m xs \\rv. Q\" by assumption -lemma cancelAllIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ +lemma cancelAllIPC_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ cancelAllIPC epptr - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding cancelAllIPC_def apply (wp sts_valid_objs' set_ep_valid_objs' sts_st_tcb' hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans | wpc | rule mapM_x_match, rename_tac list, - rule_tac V="\_. valid_queues' and valid_objs'" + rule_tac V="\_. valid_objs' and pspace_aligned' and pspace_distinct'" and I="no_orphans and (\s. \t\set list. tcb_at' t s)" in mapM_x_inv_wp2 | clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. no_orphans and valid_objs' and valid_queues' and ko_at' rv epptr" - in hoare_post_imp) + apply (rule_tac Q="\rv. no_orphans and valid_objs' and pspace_aligned' and pspace_distinct' + and ko_at' rv epptr" + in hoare_post_imp) apply (fastforce simp: valid_obj'_def valid_ep'_def obj_at'_def projectKOs) apply (wp get_ep_sp' | clarsimp)+ done -lemma cancelAllSignals_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ +lemma cancelAllSignals_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ cancelAllSignals ntfn - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding cancelAllSignals_def apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans | wpc | clarsimp simp: valid_tcb_state'_def)+ apply (rename_tac list) - apply (rule_tac V="\_. valid_queues' and valid_objs'" + apply (rule_tac V="\_. valid_objs' and pspace_aligned' and pspace_distinct'" and I="no_orphans and (\s. \t\set list. tcb_at' t s)" in mapM_x_inv_wp2) apply simp apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans| clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. no_orphans and valid_objs' and valid_queues' and ko_at' rv ntfn" - in hoare_post_imp) + apply (rule_tac Q="\rv. no_orphans and valid_objs' and pspace_aligned' and pspace_distinct' + and ko_at' rv ntfn" + in hoare_post_imp) apply (fastforce simp: valid_obj'_def valid_ntfn'_def obj_at'_def projectKOs) apply (wp get_ntfn_sp' | clarsimp)+ done @@ -1577,9 +1305,9 @@ lemma unbindNotification_no_orphans[wp]: unbindNotification t \ \rv s. no_orphans s\" unfolding unbindNotification_def - apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (rule bind_wp[OF _ gbn_sp']) apply (case_tac ntfnPtr, simp_all, wp, simp) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (wp | simp)+ done @@ -1590,78 +1318,63 @@ lemma unbindMaybeNotification_no_orphans[wp]: unfolding unbindMaybeNotification_def by (wp getNotification_wp | simp | wpc)+ -lemma finaliseCapTrue_standin_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ - finaliseCapTrue_standin cap final - \ \rv s. no_orphans s \" +lemma finaliseCapTrue_standin_no_orphans[wp]: + "\no_orphans and valid_objs' and pspace_aligned' and pspace_distinct'\ + finaliseCapTrue_standin cap final + \\_. no_orphans\" unfolding finaliseCapTrue_standin_def - apply (rule hoare_pre) - apply (wp | clarsimp simp: Let_def | wpc)+ - done + by (wpsimp | clarsimp simp: Let_def | wpc)+ -lemma cteDeleteOne_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ +lemma cteDeleteOne_no_orphans[wp]: + "\no_orphans and valid_objs' and pspace_aligned' and pspace_distinct'\ cteDeleteOne slot - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding cteDeleteOne_def - apply (wp assert_inv isFinalCapability_inv weak_if_wp | clarsimp simp: unless_def)+ - done + by (wp assert_inv isFinalCapability_inv weak_if_wp | clarsimp simp: unless_def)+ crunch valid_objs' [wp]: getThreadReplySlot "valid_objs'" -lemma cancelSignal_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ - cancelSignal t ntfn - \ \rv s. no_orphans s \" +lemma cancelSignal_no_orphans[wp]: + "cancelSignal t ntfn \no_orphans\" unfolding cancelSignal_def Let_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps setThreadState_not_active_no_orphans | wpc - | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ - done + by (wpsimp wp: hoare_drop_imps setThreadState_not_active_no_orphans + simp: is_active_thread_state_def isRestart_def isRunning_def) lemma cancelIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + "\no_orphans and valid_objs' and pspace_aligned' and pspace_distinct'\ cancelIPC t - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding cancelIPC_def Let_def apply (rule hoare_pre) apply (wp setThreadState_not_active_no_orphans hoare_drop_imps weak_if_wp - threadSet_valid_queues' threadSet_valid_objs' threadSet_no_orphans | wpc + threadSet_valid_objs' threadSet_no_orphans | wpc | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def inQ_def valid_tcb'_def tcb_cte_cases_def)+ done - lemma asUser_almost_no_orphans: "\almost_no_orphans t\ asUser a f \\_. almost_no_orphans t\" unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) -crunch almost_no_orphans[wp]: asUser "almost_no_orphans t" - (simp: almost_no_orphans_disj all_queued_tcb_ptrs_def wp: hoare_vcg_all_lift hoare_vcg_disj_lift crunch_wps) - -lemma sendSignal_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ +lemma sendSignal_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s + \ pspace_aligned' s \ pspace_distinct' s\ sendSignal ntfnptr badge - \ \_ s. no_orphans s \" + \\_. no_orphans\" unfolding sendSignal_def - apply (rule hoare_pre) - apply (wp sts_st_tcb' gts_wp' getNotification_wp asUser_almost_no_orphans - cancelIPC_weak_sch_act_wf - | wpc | clarsimp simp: sch_act_wf_weak)+ + apply (wp sts_st_tcb' gts_wp' getNotification_wp asUser_almost_no_orphans + cancelIPC_weak_sch_act_wf + | wpc | clarsimp simp: sch_act_wf_weak)+ done -lemma handleInterrupt_no_orphans [wp]: - "\ \s. no_orphans s \ invs' s \ +lemma handleInterrupt_no_orphans[wp]: + "\no_orphans and invs' and pspace_aligned' and pspace_distinct'\ handleInterrupt irq - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding handleInterrupt_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps hoare_vcg_all_lift getIRQState_inv - | wpc | clarsimp simp: invs'_def valid_state'_def maskIrqSignal_def - handleReservedIRQ_def)+ - done + by (wp hoare_drop_imps hoare_vcg_all_lift getIRQState_inv + | wpc | clarsimp simp: invs'_def valid_state'_def maskIrqSignal_def handleReservedIRQ_def)+ lemma updateRestartPC_no_orphans[wp]: "\ \s. no_orphans s \ invs' s \ @@ -1669,20 +1382,6 @@ lemma updateRestartPC_no_orphans[wp]: \ \rv s. no_orphans s \" by (wpsimp simp: updateRestartPC_def asUser_no_orphans) -lemma updateRestartPC_valid_queues'[wp]: - "\ \s. valid_queues' s \ - updateRestartPC t - \ \rv s. valid_queues' s \" - unfolding updateRestartPC_def - apply (rule asUser_valid_queues') - done - -lemma updateRestartPC_no_orphans_invs'_valid_queues'[wp]: - "\\s. no_orphans s \ invs' s \ valid_queues' s \ - updateRestartPC t - \\rv s. no_orphans s \ valid_queues' s \" - by (wpsimp simp: updateRestartPC_def asUser_no_orphans) - lemma suspend_no_orphans [wp]: "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' t s \ suspend t @@ -1696,49 +1395,34 @@ lemma suspend_no_orphans [wp]: apply auto done -lemma storeHWASID_no_orphans [wp]: - "\ \s. no_orphans s \ - storeHWASID asid hw_asid - \ \reply s. no_orphans s \" +lemma storeHWASID_no_orphans[wp]: + "storeHWASID asid hw_asid \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) -lemma invalidateHWASIDEntry_no_orphans [wp]: - "\ \s. no_orphans s \ - invalidateHWASIDEntry hwASID - \ \reply s. no_orphans s \" +lemma invalidateHWASIDEntry_no_orphans[wp]: + "invalidateHWASIDEntry hwASID \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) -lemma invalidateASID_no_orphans [wp]: - "\ \s. no_orphans s \ - invalidateASID asid - \ \reply s. no_orphans s \" +lemma invalidateASID_no_orphans[wp]: + "invalidateASID asid \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) -lemma findFreeHWASID_no_orphans [wp]: - "\ \s. no_orphans s \ - findFreeHWASID - \ \reply s. no_orphans s \" +lemma findFreeHWASID_no_orphans[wp]: + "findFreeHWASID \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) crunch ksCurThread [wp]: invalidateASIDEntry "\ s. P (ksCurThread s)" crunch ksReadyQueues[wp]: invalidateASIDEntry "\s. P (ksReadyQueues s)" -lemma invalidateASIDEntry_no_orphans [wp]: - "\ \s. no_orphans s \ - invalidateASIDEntry asid - \ \rv s. no_orphans s \" +lemma invalidateASIDEntry_no_orphans[wp]: + "invalidateASIDEntry asid \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) crunch no_orphans [wp]: flushSpace "no_orphans" @@ -1754,21 +1438,15 @@ lemma deleteASIDPool_no_orphans [wp]: apply (wp mapM_wp_inv getObject_inv loadObject_default_inv | clarsimp)+ done -lemma storePTE_no_orphans [wp]: - "\ \s. no_orphans s \ - storePTE ptr val - \ \rv s. no_orphans s \" +lemma storePTE_no_orphans[wp]: + "storePTE ptr val \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) lemma storePDE_no_orphans [wp]: - "\ \s. no_orphans s \ - storePDE ptr val - \ \rv s. no_orphans s \" + "storePDE ptr val \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) crunches unmapPage for no_orphans [wp]: "no_orphans" @@ -1785,13 +1463,10 @@ lemma flushTable_no_orphans [wp]: crunches unmapPageTable, prepareThreadDelete for no_orphans [wp]: "no_orphans" -lemma setASIDPool_no_orphans [wp]: - "\ \s. no_orphans s \ - setObject p (ap :: asidpool) - \ \rv s. no_orphans s \" +lemma setASIDPool_no_orphans[wp]: + "setObject p (ap :: asidpool) \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) lemma deleteASID_no_orphans [wp]: "\ \s. no_orphans s \ @@ -1815,7 +1490,7 @@ lemma deletingIRQHandler_no_orphans [wp]: deletingIRQHandler irq \ \rv s. no_orphans s \" unfolding deletingIRQHandler_def - apply (wp, auto) + apply (wp hoare_drop_imps, auto) done lemma finaliseCap_no_orphans [wp]: @@ -1848,7 +1523,7 @@ lemma finaliseSlot_no_orphans [wp]: \ \rv s. no_orphans s \" unfolding finaliseSlot_def apply (rule validE_valid, rule hoare_pre, - rule hoare_post_impErr, rule use_spec) + rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where p=slot and slot=slot and Pr=no_orphans]) apply (simp_all add: no_orphans_finalise_prop_stuff) apply (wp | simp)+ @@ -1879,52 +1554,35 @@ lemma cteRevoke_no_orphans [wp]: done lemma cancelBadgedSends_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - cancelBadgedSends epptr badge - \ \rv s. no_orphans s \" + "cancelBadgedSends epptr badge \no_orphans\" unfolding cancelBadgedSends_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps | wpc | clarsimp)+ - apply (wp filterM_preserved tcbSchedEnqueue_almost_no_orphans gts_wp' - sts_st_tcb' hoare_drop_imps | clarsimp)+ + apply (wpsimp wp: filterM_preserved tcbSchedEnqueue_almost_no_orphans gts_wp' sts_st_tcb' + | wp (once) hoare_drop_imps)+ done crunch no_orphans [wp]: invalidateTLBByASID "no_orphans" crunch no_orphans [wp]: handleFaultReply "no_orphans" -crunch valid_queues' [wp]: handleFaultReply "valid_queues'" - lemma doReplyTransfer_no_orphans[wp]: "\no_orphans and invs' and tcb_at' sender and tcb_at' receiver\ doReplyTransfer sender receiver slot grant \\rv. no_orphans\" unfolding doReplyTransfer_def apply (wp sts_st_tcb' setThreadState_not_active_no_orphans threadSet_no_orphans - threadSet_valid_queues' threadSet_weak_sch_act_wf + threadSet_weak_sch_act_wf | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def | wp (once) hoare_drop_imps - | strengthen sch_act_wf_weak invs_valid_queues')+ + | strengthen sch_act_wf_weak)+ apply (rule_tac Q="\rv. invs' and no_orphans" in hoare_post_imp) apply (fastforce simp: inQ_def) apply (wp hoare_drop_imps | clarsimp)+ apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def) done -lemma cancelSignal_valid_queues' [wp]: - "\ \s. valid_queues' s \ valid_objs' s \ - cancelSignal t ntfn - \ \rv s. valid_queues' s \" - unfolding cancelSignal_def Let_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps | wpc | clarsimp)+ - done - crunch no_orphans [wp]: setupReplyMaster "no_orphans" (wp: crunch_wps simp: crunch_simps) -crunch valid_queues' [wp]: setupReplyMaster "valid_queues'" - lemma restart_no_orphans [wp]: "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' t s \ restart t @@ -1933,7 +1591,6 @@ lemma restart_no_orphans [wp]: apply (wp tcbSchedEnqueue_almost_no_orphans sts_st_tcb' cancelIPC_weak_sch_act_wf | clarsimp simp: o_def if_apply_def2 | strengthen no_orphans_strg_almost - | strengthen invs_valid_queues' | wp (once) hoare_drop_imps)+ apply auto done @@ -1954,9 +1611,8 @@ lemma writereg_no_orphans: unfolding invokeTCB_def performTransfer_def postModifyRegisters_def apply simp apply (rule hoare_pre) - by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' static_imp_wp - | strengthen invs_valid_queues' | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap )+ - + by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' hoare_weak_lift_imp + | strengthen | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap)+ lemma copyreg_no_orphans: "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' src s @@ -1966,12 +1622,12 @@ lemma copyreg_no_orphans: unfolding invokeTCB_def performTransfer_def postModifyRegisters_def supply if_weak_cong[cong] apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) apply (wp hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans | wpc | clarsimp split del: if_splits)+ - apply (wp static_imp_wp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' + apply (wp hoare_weak_lift_imp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' restart_no_orphans asUser_no_orphans suspend_nonz_cap_to_tcb - | strengthen invs_valid_queues' | wpc | simp add: if_apply_def2)+ + | wpc | simp add: if_apply_def2)+ apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) done @@ -1981,7 +1637,7 @@ lemma settlsbase_no_orphans: \ \rv s. no_orphans s \" unfolding invokeTCB_def performTransfer_def apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) apply (wpsimp wp: hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans)+ done @@ -1993,22 +1649,19 @@ lemma almost_no_orphans_no_orphans': "\ almost_no_orphans t s; ksCurThread s = t\ \ no_orphans s" by (auto simp: almost_no_orphans_def no_orphans_def all_active_tcb_ptrs_def) -lemma setPriority_no_orphans [wp]: - "\ \s. no_orphans s \ invs' s \ tcb_at' tptr s \ +lemma setPriority_no_orphans[wp]: + "\no_orphans and invs' and tcb_at' tptr\ setPriority tptr prio - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding setPriority_def apply wpsimp - apply (rule_tac Q="\rv s. almost_no_orphans tptr s \ valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) + apply (rule_tac Q="\_ s. almost_no_orphans tptr s \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) apply clarsimp apply (clarsimp simp: is_active_tcb_ptr_runnable' pred_tcb_at'_def obj_at'_def almost_no_orphans_no_orphans elim!: almost_no_orphans_no_orphans') - apply (wp threadSet_almost_no_orphans threadSet_valid_queues' | clarsimp simp: inQ_def)+ + apply (wp threadSet_almost_no_orphans | clarsimp simp: inQ_def)+ apply (wpsimp wp: threadSet_weak_sch_act_wf) apply (wp tcbSchedDequeue_almost_no_orphans| clarsimp)+ - apply (rule_tac Q="\rv. obj_at' (Not \ tcbQueued) tptr and invs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp simp: obj_at'_def inQ_def) - apply (wp tcbSchedDequeue_not_queued | clarsimp)+ done lemma setMCPriority_no_orphans[wp]: @@ -2047,20 +1700,19 @@ lemma tc_no_orphans: apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[2] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (cases mcp; clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[3] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[3] apply ((simp only: simp_thms cong: conj_cong | wp cteDelete_deletes cteDelete_invs' cteDelete_sch_act_simple case_option_wp[where m'="return ()", OF setPriority_no_orphans return_inv,simplified] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] checkCap_inv[where P=no_orphans] checkCap_inv[where P="tcb_at' a"] - threadSet_cte_wp_at' hoare_vcg_all_lift_R hoare_vcg_all_lift threadSet_no_orphans - hoare_vcg_const_imp_lift_R static_imp_wp hoare_drop_imp threadSet_ipcbuffer_invs - | strengthen invs_valid_queues' + threadSet_cte_wp_at' hoare_vcg_all_liftE_R hoare_vcg_all_lift threadSet_no_orphans + hoare_vcg_const_imp_lift_R hoare_weak_lift_imp hoare_drop_imp threadSet_ipcbuffer_invs | (simp add: locateSlotTCB_def locateSlotBasic_def objBits_def objBitsKO_def tcbIPCBufferSlot_def tcb_cte_cases_def, wp hoare_return_sp) @@ -2091,13 +1743,12 @@ lemma invokeTCB_no_orphans [wp]: done lemma invokeCNode_no_orphans [wp]: - "\ \s. no_orphans s \ invs' s \ valid_cnode_inv' cinv s \ sch_act_simple s \ + "\no_orphans and invs' and valid_cnode_inv' cinv and sch_act_simple\ invokeCNode cinv - \ \rv. no_orphans \" + \\_. no_orphans\" unfolding invokeCNode_def apply (rule hoare_pre) - apply (wp hoare_drop_imps hoare_unless_wp | wpc | clarsimp split del: if_split)+ - apply (simp add: invs_valid_queues') + apply (wp hoare_drop_imps unless_wp | wpc | clarsimp split del: if_split)+ done lemma invokeIRQControl_no_orphans [wp]: @@ -2137,7 +1788,7 @@ lemma performPageInvocation_no_orphans [wp]: apply (simp add: performPageInvocation_def cong: page_invocation.case_cong) apply (rule hoare_pre) - apply (wp mapM_x_wp' mapM_wp' static_imp_wp | wpc | clarsimp simp: pdeCheckIfMapped_def pteCheckIfMapped_def)+ + apply (wp mapM_x_wp' mapM_wp' hoare_weak_lift_imp | wpc | clarsimp simp: pdeCheckIfMapped_def pteCheckIfMapped_def)+ done lemma performASIDControlInvocation_no_orphans [wp]: @@ -2190,17 +1841,17 @@ lemma performASIDControlInvocation_no_orphans [wp]: \\reply. no_orphans\" apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) - apply (wp static_imp_wp | clarsimp)+ + apply (wp hoare_weak_lift_imp | clarsimp)+ apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def is_active_tcb_ptr_def all_queued_tcb_ptrs_def) apply (wp | clarsimp simp:placeNewObject_def2)+ apply (wp createObjects'_wp_subst)+ - apply (wp static_imp_wp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ + apply (wp hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ apply (strengthen invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace') apply (clarsimp simp:conj_comms) apply (wp deleteObjects_invs'[where idx = idx and d=False] - hoare_ex_wp deleteObjects_cte_wp_at'[where idx = idx and d=False] hoare_vcg_const_imp_lift ) + hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where idx = idx and d=False] hoare_vcg_const_imp_lift ) using invs' misc cte exclude no_orphans cover apply (clarsimp simp: is_active_thread_state_def makeObject_tcb valid_aci'_def cte_wp_at_ctes_of invs_pspace_aligned' invs_pspace_distinct' @@ -2240,17 +1891,15 @@ lemma arch_performInvocation_no_orphans [wp]: done lemma setDomain_no_orphans [wp]: - "\no_orphans and valid_queues and valid_queues' and cur_tcb'\ - setDomain tptr newdom + "\no_orphans and cur_tcb' and tcb_at' tptr\ + setDomain tptr newdom \\_. no_orphans\" apply (simp add: setDomain_def when_def) apply (wp tcbSchedEnqueue_almost_no_orphans hoare_vcg_imp_lift threadSet_almost_no_orphans - threadSet_valid_queues'_no_state threadSet_st_tcb_at2 hoare_vcg_disj_lift + threadSet_st_tcb_at2 hoare_vcg_disj_lift threadSet_no_orphans - | clarsimp simp: st_tcb_at_neg2 not_obj_at')+ - apply (auto simp: tcb_at_typ_at' st_tcb_at_neg' is_active_tcb_ptr_runnable' - cur_tcb'_def obj_at'_def - dest: pred_tcb_at') + | clarsimp simp: st_tcb_at_neg2 not_obj_at')+ + apply (fastforce simp: tcb_at_typ_at' is_active_tcb_ptr_runnable') done crunch no_orphans[wp]: InterruptDecls_H.invokeIRQHandler no_orphans @@ -2279,8 +1928,6 @@ lemma K_bind_hoareE [wp]: "\P\ f \Q\,\E\ \ \P\ K_bind f x \Q\,\E\" by simp -crunch valid_queues' [wp]: replyFromKernel "valid_queues'" - lemma handleInvocation_no_orphans [wp]: "\ \s. no_orphans s \ invs' s \ vs_valid_duplicates' (ksPSpace s) \ ct_active' s \ ksSchedulerAction s = ResumeCurrentThread \ @@ -2298,20 +1945,12 @@ lemma handleInvocation_no_orphans [wp]: ct_in_state'_set setThreadState_st_tcb hoare_vcg_all_lift | simp add: split_def split del: if_split)+ - apply (wps setThreadState_ct') - apply (wp sts_ksQ - setThreadState_current_no_orphans sts_invs_minor' - ct_in_state'_set setThreadState_st_tcb - | simp add: split_def split del: if_split)+ apply (clarsimp simp: if_apply_def2) - apply (frule(1) ct_not_ksQ) by (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def invs'_def cur_tcb'_def valid_state'_def valid_idle'_def) lemma receiveSignal_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - receiveSignal thread cap isBlocking - \ \rv s. no_orphans s \" + "receiveSignal thread cap isBlocking \no_orphans\" unfolding receiveSignal_def apply (wp hoare_drop_imps setThreadState_not_active_no_orphans | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def @@ -2329,7 +1968,7 @@ lemma receiveIPC_no_orphans [wp]: hoare_vcg_all_lift sts_st_tcb' | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def - doNBRecvFailedTransfer_def invs_valid_queues' + doNBRecvFailedTransfer_def | strengthen sch_act_wf_weak)+ done @@ -2340,7 +1979,7 @@ lemma deleteCallerCap_no_orphans [wp]: deleteCallerCap receiver \ \rv s. no_orphans s \" unfolding deleteCallerCap_def - by wpsimp auto + by (wpsimp wp: hoare_drop_imps) auto lemma remove_neg_strg: "(A \ B) \ ((x \ A) \ (\ x \ B))" @@ -2353,7 +1992,7 @@ notes if_cong[cong] shows \ \rv . no_orphans \" unfolding handleRecv_def apply (clarsimp simp: whenE_def split del: if_split | wp hoare_drop_imps getNotification_wp | wpc )+ (*takes a while*) - apply (rule_tac Q'="\rv s. no_orphans s \ invs' s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. no_orphans s \ invs' s" in hoare_strengthen_postE_R) apply (wp, fastforce) apply (rule_tac Q="\rv s. no_orphans s \ invs' s" in hoare_post_imp) apply (wp | clarsimp | fastforce)+ @@ -2420,7 +2059,8 @@ theorem callKernel_no_orphans [wp]: callKernel e \ \rv s. no_orphans s \" unfolding callKernel_def - by (wpsimp wp: weak_if_wp schedule_invs' hoare_drop_imps) + by (wpsimp wp: weak_if_wp schedule_invs' hoare_drop_imps + | strengthen invs_pspace_aligned' invs_pspace_distinct')+ end diff --git a/proof/refine/ARM_HYP/ADT_H.thy b/proof/refine/ARM_HYP/ADT_H.thy index f4ef312901..5deb5ff44c 100644 --- a/proof/refine/ARM_HYP/ADT_H.thy +++ b/proof/refine/ARM_HYP/ADT_H.thy @@ -482,7 +482,7 @@ proof - apply (intro conjI impI allI) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply clarsimp - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (clarsimp simp add: ep_relation_def EndpointMap_def split: Structures_A.endpoint.splits) @@ -495,7 +495,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (clarsimp simp add: ntfn_relation_def AEndpointMap_def split: Structures_A.ntfn.splits) @@ -508,7 +508,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -517,7 +517,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -543,7 +543,7 @@ proof - apply (case_tac vmpage_size; simp) apply ((frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+)[4] apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -569,7 +569,7 @@ proof - apply (case_tac vmpage_size; simp) apply ((frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+)[4] apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) prefer 2 apply (rename_tac arch_kernel_obj) @@ -596,7 +596,7 @@ proof - arch_tcb_relation_imp_ArchTcnMap) apply (simp add: absCNode_def cte_map_def) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def split: if_split_asm) prefer 2 apply (rename_tac arch_kernel_obj) @@ -660,7 +660,7 @@ proof - (* mapping architecture-specific objects *) apply clarsimp apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_object y ko P arch_kernel_obj) apply (case_tac arch_kernel_object, simp_all add: absHeapArch_def split: asidpool.splits) @@ -804,7 +804,7 @@ shows apply (case_tac "ksPSpace s' x", clarsimp) apply (erule_tac x=x in allE, clarsimp) apply clarsimp - apply (case_tac a, simp_all add: other_obj_relation_def) + apply (case_tac a, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (insert pspace_relation) apply (clarsimp simp: obj_at'_def projectKOs) apply (erule(1) pspace_dom_relatedE) @@ -848,7 +848,7 @@ lemma TCB_implies_KOTCB: apply (clarsimp simp add: pspace_relation_def pspace_dom_def dom_def UNION_eq Collect_eq) apply (erule_tac x=a in allE)+ - apply (clarsimp simp add: other_obj_relation_def + apply (clarsimp simp add: tcb_relation_cut_def split: Structures_H.kernel_object.splits) apply (drule iffD1) apply (fastforce simp add: dom_def image_def) @@ -1638,7 +1638,7 @@ definition domain_index_internal = ksDomScheduleIdx s, cur_domain_internal = ksCurDomain s, domain_time_internal = ksDomainTime s, - ready_queues_internal = curry (ksReadyQueues s), + ready_queues_internal = (\d p. heap_walk (tcbSchedNexts_of s) (tcbQueueHead (ksReadyQueues s (d, p))) []), cdt_list_internal = absCDTList (cteMap (gsCNodes s)) (ctes_of s)\" lemma absExst_correct: @@ -1646,12 +1646,15 @@ lemma absExst_correct: assumes rel: "(s, s') \ state_relation" shows "absExst s' = exst s" apply (rule det_ext.equality) - using rel invs invs' - apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct - absCDTList_correct[THEN fun_cong] state_relation_def invs_def valid_state_def - ready_queues_relation_def invs'_def valid_state'_def - valid_pspace_def valid_sched_def valid_pspace'_def curry_def fun_eq_iff) - apply (fastforce simp: absEkheap_correct) + using rel invs invs' + apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct + absCDTList_correct[THEN fun_cong] state_relation_def invs_def + valid_state_def ready_queues_relation_def ready_queue_relation_def + invs'_def valid_state'_def + valid_pspace_def valid_sched_def valid_pspace'_def curry_def + fun_eq_iff) + apply (fastforce simp: absEkheap_correct) + apply (fastforce simp: list_queue_relation_def Let_def dest: heap_ls_is_walk) done diff --git a/proof/refine/ARM_HYP/ArchAcc_R.thy b/proof/refine/ARM_HYP/ArchAcc_R.thy index d2d3cb8ffc..66b7ea8871 100644 --- a/proof/refine/ARM_HYP/ArchAcc_R.thy +++ b/proof/refine/ARM_HYP/ArchAcc_R.thy @@ -230,7 +230,7 @@ lemma setObject_ASIDPool_corres [corres]: corres dc (asid_pool_at p and valid_etcbs) (asid_pool_at' p) (set_asid_pool p a) (setObject p a')" apply (simp add: set_asid_pool_def) - apply (corressimp search: setObject_other_corres[where P="\_. True"] + apply (corresKsimp search: setObject_other_corres[where P="\_. True"] wp: get_object_ret get_object_wp) apply (simp add: other_obj_relation_def asid_pool_relation_def) apply (clarsimp simp: obj_at_simps ) @@ -342,6 +342,10 @@ lemma magnitudeCheck_assert2: using in_magnitude_check[where x=x and n=n and s=s and s'=s and v="()"] by (simp add: magnitudeCheck_assert in_monad) +lemma fst_fail: (* FIXME lib: move up *) + "fst (fail s) = {}" + by (simp add: fail_def) + lemma getObject_get_assert: assumes deflt: "\a b c d. (loadObject a b c d :: ('a :: pspace_storable) kernel) = loadObject_default a b c d" @@ -361,7 +365,7 @@ lemma getObject_get_assert: apply (simp add: lookupAround2_known1 assert_opt_def obj_at'_def projectKO_def2 split: option.split) - apply (clarsimp simp: fail_def fst_return conj_comms project_inject + apply (clarsimp simp: fst_fail fst_return conj_comms project_inject objBits_def) apply (simp only: assert2[symmetric], rule bind_apply_cong[OF refl]) @@ -930,12 +934,16 @@ lemma setObject_PD_corres [corres]: apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def + apply (simp add: tcb_relation_cut_def split: Structures_A.kernel_object.splits) - apply (rule conjI) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pd_bits" in allE)+ apply fastforce + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pde')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + subgoal by (fastforce dest: tcbs_of'_non_tcb_update) apply (simp add: map_to_ctes_upd_other) apply (simp add: fun_upd_def) apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) @@ -1006,12 +1014,15 @@ lemma setObject_PT_corres [corres]: apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.splits) - apply (rule conjI) + apply (simp add: tcb_relation_cut_def split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pt_bits" in allE)+ apply fastforce + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pte')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + subgoal by (fastforce dest: tcbs_of'_non_tcb_update) apply (simp add: map_to_ctes_upd_other) apply (simp add: fun_upd_def) apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) @@ -1212,7 +1223,7 @@ lemma lookupPTSlot_corres [corres]: (pspace_aligned' and pspace_distinct') (lookup_pt_slot pd vptr) (lookupPTSlot pd vptr)" unfolding lookup_pt_slot_def lookupPTSlot_def lookupPTSlotFromPT_def - apply (corressimp simp: pde_relation_aligned_def lookup_failure_map_def + apply (corresKsimp simp: pde_relation_aligned_def lookup_failure_map_def wp: get_pde_wp_valid getPDE_wp) by (auto simp: lookup_failure_map_def obj_at_def) @@ -1317,7 +1328,7 @@ lemma createMappingEntries_corres [corres]: (create_mapping_entries base vptr pgsz vm_rights attrib pd) (createMappingEntries base vptr pgsz vm_rights' attrib' pd)" unfolding createMappingEntries_def mapping_map_def - by (cases pgsz; corressimp simp: vmattributes_map_def) + by (cases pgsz; corresKsimp simp: vmattributes_map_def) lemma pte_relation'_Invalid_inv [simp]: "pte_relation' x ARM_HYP_H.pte.InvalidPTE = (x = ARM_A.pte.InvalidPTE)" @@ -1350,7 +1361,7 @@ lemma createMappingEntries_valid_slots' [wp]: apply (auto elim: is_aligned_weaken) done -lemmas [corresc_simp] = master_pte_relation_def master_pde_relation_def +lemmas [corresKc_simp] = master_pte_relation_def master_pde_relation_def lemma ensureSafeMapping_corres [corres]: "mapping_map m m' \ @@ -1361,7 +1372,7 @@ lemma ensureSafeMapping_corres [corres]: unfolding mapping_map_def ensureSafeMapping_def apply (cases m; cases m'; simp; match premises in "(_ \ (=)) p p'" for p p' \ \cases "fst p"; cases "fst p'"\; clarsimp) - by (corressimp corresK: mapME_x_corresK_inv + by (corresKsimp corresK: mapME_x_corresK_inv wp: get_master_pte_wp get_master_pde_wp getPTE_wp getPDE_wp; auto simp add: valid_mapping_entries_def)+ @@ -1393,7 +1404,7 @@ lemma find_pd_for_asid_corres [@lift_corres_args, corres]: (pspace_aligned' and pspace_distinct' and no_0_obj') (find_pd_for_asid asid) (findPDForASID asid)" apply (simp add: find_pd_for_asid_def findPDForASID_def liftME_def bindE_assoc) - apply (corressimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast lookup_failure_map_def + apply (corresKsimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast lookup_failure_map_def wp: getPDE_wp getASID_wp search: checkPDAt_corres corres_gets_asid) subgoal premises prems for s s' @@ -1679,5 +1690,166 @@ lemma dmo_clearMemory_invs'[wp]: apply fastforce done +lemma pspace_aligned_cross: + "\ pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ pspace_aligned' s'" + apply (clarsimp simp: pspace_aligned'_def pspace_aligned_def pspace_relation_def) + apply (rename_tac p' ko') + apply (prop_tac "p' \ pspace_dom (kheap s)", fastforce) + apply (thin_tac "pspace_dom k = p" for k p) + apply (clarsimp simp: pspace_dom_def) + apply (drule bspec, fastforce)+ + apply clarsimp + apply (rename_tac ko' a a' P ko) + apply (erule (1) obj_relation_cutsE; clarsimp simp: objBits_simps) + + \\CNode\ + apply (clarsimp simp: cte_map_def) + apply (simp only: cteSizeBits_def cte_level_bits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_weaken) + apply (rule is_aligned_mult_triv2, simp) + + \\TCB\ + apply (clarsimp simp: tcbBlockSizeBits_def elim!: is_aligned_weaken) + + \\PageTable\ + apply (clarsimp simp: archObjSize_def pteBits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (simp add: vspace_bits_defs) + apply (rule is_aligned_shift) + + \\PageDirectory\ + apply (clarsimp simp: archObjSize_def vspace_bits_defs) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_shift) + + \\DataPage\ + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (rule pbfs_atleast_pageBits) + apply (fastforce intro: is_aligned_shift is_aligned_mult_triv2) + + \\other_obj_relation\ + apply (simp add: other_obj_relation_def) + by (clarsimp simp: epSizeBits_def ntfnSizeBits_def + split: kernel_object.splits Structures_A.kernel_object.splits) + (fastforce simp: archObjSize_def split: arch_kernel_object.splits arch_kernel_obj.splits) + +lemmas is_aligned_add_step_le' = is_aligned_add_step_le[simplified mask_2pm1 add_diff_eq] + +lemma objBitsKO_Data: + "objBitsKO (if dev then KOUserDataDevice else KOUserData) = pageBits" + by (simp add: objBits_def objBitsKO_def word_size_def) + +lemma of_bl_shift_cte_level_bits: + "(of_bl z :: machine_word) << cte_level_bits \ mask (cte_level_bits + length z)" + by word_bitwise + (simp add: test_bit_of_bl bit_simps word_size cte_level_bits_def rev_bl_order_simps) + +lemma obj_relation_cuts_range_limit: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ + \ \x n. p' = p + x \ is_aligned x n \ n \ obj_bits ko \ x \ mask (obj_bits ko)" + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (drule (1) wf_cs_nD) + apply (clarsimp simp: cte_map_def2) + apply (rule_tac x=cte_level_bits in exI) + apply (simp add: is_aligned_shift of_bl_shift_cte_level_bits) + apply (rule_tac x=tcbBlockSizeBits in exI) + apply (simp add: tcbBlockSizeBits_def) + apply (rule_tac x=pteBits in exI) + apply (simp add: bit_simps is_aligned_shift mask_def vspace_bits_defs) + apply word_bitwise + apply (rule_tac x=pdeBits in exI) + apply (simp add: bit_simps is_aligned_shift mask_def vspace_bits_defs) + apply word_bitwise + apply (rule_tac x=pageBits in exI) + apply (simp add: is_aligned_shift pbfs_atleast_pageBits is_aligned_mult_triv2) + apply (simp add: mask_def shiftl_t2n mult_ac) + apply (frule word_less_power_trans2, rule pbfs_atleast_pageBits) + apply (simp add: pbfs_less_wb'[unfolded word_bits_def, simplified]) + apply (simp add: pbfs_less_wb'[unfolded word_bits_def, simplified]) + apply fastforce + done + +lemma obj_relation_cuts_range_mask_range: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko'; is_aligned p (obj_bits ko) \ + \ p' \ mask_range p (obj_bits ko)" + apply (drule (1) obj_relation_cuts_range_limit, clarsimp) + apply (rule conjI) + apply (rule word_plus_mono_right2; assumption?) + apply (simp add: is_aligned_no_overflow_mask) + apply (erule word_plus_mono_right) + apply (simp add: is_aligned_no_overflow_mask) + done + +lemma obj_relation_cuts_obj_bits: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ \ objBitsKO ko' \ obj_bits ko" + apply (erule (1) obj_relation_cutsE; + clarsimp simp: objBits_simps objBits_defs cte_level_bits_def + pbfs_atleast_pageBits[simplified bit_simps] archObjSize_def pteBits_def + pdeBits_def) + apply (simp add: vspace_bits_defs) + apply (simp add: vspace_bits_defs) + apply (cases ko; simp add: other_obj_relation_def objBits_defs + split: kernel_object.splits) + apply (rename_tac ako', case_tac ako'; + clarsimp simp: archObjSize_def vspace_bits_defs vcpu_bits_def + split: arch_kernel_object.splits) + done + +lemma pspace_distinct_cross: + "\ pspace_distinct s; pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ + pspace_distinct' s'" + apply (frule (1) pspace_aligned_cross) + apply (clarsimp simp: pspace_distinct'_def) + apply (rename_tac p' ko') + apply (rule pspace_dom_relatedE; assumption?) + apply (rename_tac p ko P) + apply (frule (1) pspace_alignedD') + apply (frule (1) pspace_alignedD) + apply (rule ps_clearI, assumption) + apply (case_tac ko'; simp add: objBits_simps objBits_defs obj_at_simps) + apply (simp split: arch_kernel_object.splits + add: obj_at_simps pteBits_def pdeBits_def vspace_bits_defs vcpu_bits_def) + apply (rule ccontr, clarsimp) + apply (rename_tac x' ko_x') + apply (frule_tac x=x' in pspace_alignedD', assumption) + apply (rule_tac x=x' in pspace_dom_relatedE; assumption?) + apply (rename_tac x ko_x P') + apply (frule_tac p=x in pspace_alignedD, assumption) + apply (case_tac "p = x") + apply clarsimp + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (clarsimp simp: cte_relation_def cte_map_def2 objBits_simps) + apply (rule_tac n=cte_level_bits in is_aligned_add_step_le'; assumption?) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (simp add: cte_level_bits_def cteSizeBits_def) + apply (clarsimp simp: pte_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=pteBits in is_aligned_add_step_le'; + simp add: vspace_bits_defs vcpu_bits_def) + apply (clarsimp simp: pde_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=pdeBits in is_aligned_add_step_le'; simp add: vspace_bits_defs vcpu_bits_def) + apply (simp add: objBitsKO_Data) + apply (rule_tac n=pageBits in is_aligned_add_step_le'; assumption?) + apply (case_tac ko; + simp split: if_split_asm + add: is_other_obj_relation_type_CapTable a_type_def) + apply (rename_tac ako, + case_tac ako; + simp add: is_other_obj_relation_type_def a_type_def split: if_split_asm) + apply (frule (1) obj_relation_cuts_obj_bits) + apply (drule (2) obj_relation_cuts_range_mask_range)+ + apply (prop_tac "x' \ mask_range p' (objBitsKO ko')", simp add: mask_def add_diff_eq) + apply (frule_tac x=p and y=x in pspace_distinctD; assumption?) + apply (drule (4) mask_range_subsetD) + apply (erule (2) in_empty_interE) + done + end end diff --git a/proof/refine/ARM_HYP/Arch_R.thy b/proof/refine/ARM_HYP/Arch_R.thy index bf3ea02837..54a56405e8 100644 --- a/proof/refine/ARM_HYP/Arch_R.thy +++ b/proof/refine/ARM_HYP/Arch_R.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -72,7 +73,7 @@ lemma createObject_typ_at': pspace_aligned' s \ pspace_no_overlap' ptr (objBitsKO ty) s\ createObjects' ptr (Suc 0) ty 0 \\rv s. typ_at' otype ptr s\" - apply (clarsimp simp:createObjects'_def alignError_def split_def | wp hoare_unless_wp | wpc )+ + apply (clarsimp simp:createObjects'_def alignError_def split_def | wp unless_wp | wpc )+ apply (clarsimp simp: obj_at'_def ko_wp_at'_def typ_at'_def pspace_distinct'_def)+ apply (subgoal_tac "ps_clear ptr (objBitsKO ty) (s\ksPSpace := \a. if a = ptr then Some ty else ksPSpace s a\)") @@ -126,7 +127,7 @@ lemma set_cap_device_and_range_aligned: lemma performASIDControlInvocation_corres: "asid_ci_map i = i' \ corres dc - (einvs and ct_active and valid_aci i) + (einvs and ct_active and valid_aci i and schact_is_rct) (invs' and ct_active' and valid_aci' i') (perform_asid_control_invocation i) (performASIDControlInvocation i')" @@ -325,29 +326,30 @@ lemma performASIDControlInvocation_corres: apply clarsimp apply (frule empty_descendants_range_in') apply (intro conjI, - simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 - null_filter_descendants_of'[OF null_filter_simp'] - capAligned_def asid_low_bits_def) - apply (erule descendants_range_caps_no_overlapI') - apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) - apply (simp add:empty_descendants_range_in') - apply (simp add:word_bits_def pageBits_def) - apply (rule is_aligned_weaken) - apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) - apply (simp add:pageBits_def) + simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 + null_filter_descendants_of'[OF null_filter_simp'] + capAligned_def asid_low_bits_def) + apply (erule descendants_range_caps_no_overlapI') + apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) + apply (simp add:empty_descendants_range_in') + apply (simp add:word_bits_def pageBits_def) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) + apply (simp add:pageBits_def) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp: cte_wp_at_ctes_of) + apply assumption+ + apply fastforce + apply simp apply clarsimp - apply (drule(1) cte_cap_in_untyped_range) - apply (fastforce simp:cte_wp_at_ctes_of) + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) apply fastforce apply simp apply clarsimp - apply (drule (1) cte_cap_in_untyped_range) - apply (fastforce simp add: cte_wp_at_ctes_of) - apply assumption+ - apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) - apply fastforce - apply simp done definition @@ -422,7 +424,7 @@ lemma checkVP_wpR [wp]: checkVPAlignment sz w \P\, -" apply (simp add: checkVPAlignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: is_aligned_mask vmsz_aligned'_def) done @@ -450,7 +452,7 @@ lemma ARMMMU_improve_cases: by (cases cap, simp_all add: isCap_simps) (* not sure if this is useful as is *) lemma decodeVCPUInjectIRQ_inv[wp]: "\P\ decodeVCPUInjectIRQ a b \\_. P\" - by (wpsimp simp: decodeVCPUInjectIRQ_def Let_def wp: hoare_whenE_wp getVCPU_wp | rule conjI)+ + by (wpsimp simp: decodeVCPUInjectIRQ_def Let_def wp: whenE_wp getVCPU_wp | rule conjI)+ crunch inv [wp]: "ARM_HYP_H.decodeInvocation" "P" (wp: crunch_wps mapME_x_inv_wp getASID_wp @@ -575,7 +577,7 @@ lemma decodeARMPageFlush_corres: page_size \ returnOk $ 1 << pageBitsForSize vmpage_size; whenE (page_size \ start \ page_size < end) $ throwError $ ExceptionTypes_A.syscall_error.InvalidArgument 0; - whenE (pstart < ARM_HYP.physBase \ ARM_HYP.paddrTop < end - start + pstart) $ + whenE (pstart < physBase \ ARM_HYP.paddrTop < end - start + pstart) $ throwError ExceptionTypes_A.syscall_error.IllegalOperation; returnOk $ arch_invocation.InvokePage $ @@ -604,7 +606,7 @@ lemma decodeARMPageFlush_corres: apply (rule whenE_throwError_corres, simp) apply simp apply (rule whenE_throwError_corres, simp) - apply (simp add: fromPAddr_def physBase_def paddrTop_def add.commute) + apply (clarsimp simp: add.commute fromPAddr_def) apply (rule corres_trivial) apply (rule corres_returnOk) apply (clarsimp simp: archinv_relation_def page_invocation_map_def flush_type_map_def) @@ -699,7 +701,7 @@ lemma resolve_vaddr_valid_mapping_size: \ (case cap of cap.ArchObjectCap c \ is_page_cap c | _ \ False) \ cap_bits cap = pageBitsForSize a \ " apply (simp add: resolve_vaddr_def) - apply (rule hoare_seq_ext[OF _ get_master_pde_sp]) + apply (rule bind_wp[OF _ get_master_pde_sp]) apply (rule hoare_pre) apply (wp get_master_pte_wp | wpc | simp add: lookup_pt_slot_no_fail_def)+ @@ -842,14 +844,14 @@ lemma decodeARMVCPUInvocation_corres: apply (frule list_all2_Cons) apply clarsimp apply (case_tac a; clarsimp simp add: cap_relation_def) - apply (corres corres: corres_returnOkTT) + apply (corresK corres: corres_returnOkTT) apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def) (* inject_irq *) apply (simp add: decode_vcpu_inject_irq_def decodeVCPUInjectIRQ_def isVCPUCap_def) apply (cases args; clarsimp) apply (case_tac list; clarsimp simp add: rangeCheck_def range_check_def unlessE_whenE) apply (clarsimp simp: shiftL_nat whenE_bindE_throwError_to_if) - apply (corressimp wp: get_vcpu_wp) + apply (corresKsimp wp: get_vcpu_wp) apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def ucast_id valid_cap'_def valid_cap_def make_virq_def makeVIRQ_def split:if_split) @@ -956,11 +958,11 @@ shows apply (simp add: returnOk_liftE[symmetric]) apply (rule corres_returnOk) apply (simp add: archinv_relation_def asid_pool_invocation_map_def) - apply (rule hoare_pre, wp hoare_whenE_wp) + apply (rule hoare_pre, wp whenE_wp) apply (clarsimp simp: ucast_fst_hd_assocs) - apply (wp hoareE_TrueI hoare_whenE_wp getASID_wp | simp)+ + apply (wp hoareE_TrueI whenE_wp getASID_wp | simp)+ apply ((clarsimp simp: p2_low_bits_max | rule TrueI impI)+)[2] - apply (wp hoare_whenE_wp getASID_wp)+ + apply (wp whenE_wp getASID_wp)+ apply (clarsimp simp: valid_cap_def) apply auto[1] apply (simp add: isCap_simps split del: if_split) @@ -1034,7 +1036,7 @@ shows apply (simp add: ord_le_eq_trans [OF word_n1_ge]) apply (wp hoare_drop_imps)+ apply (simp add: o_def validE_R_def) - apply (wp hoare_whenE_wp)+ + apply (wp whenE_wp)+ apply fastforce apply clarsimp apply (simp add: null_def split_def asid_high_bits_def @@ -1155,7 +1157,7 @@ shows apply (rule corres_returnOk) apply (clarsimp simp: archinv_relation_def page_table_invocation_map_def) apply (simp add: shiftr_shiftl1) - apply (wp hoare_whenE_wp get_master_pde_wp getPDE_wp find_pd_for_asid_inv + apply (wp whenE_wp get_master_pde_wp getPDE_wp find_pd_for_asid_inv | wp (once) hoare_drop_imps)+ apply (fastforce simp: valid_cap_def mask_def) apply (clarsimp simp: valid_cap'_def) @@ -1250,7 +1252,7 @@ lemma invokeVCPUInjectIRQ_corres: (invokeVCPUInjectIRQ v index virq)" unfolding invokeVCPUInjectIRQ_def invoke_vcpu_inject_irq_def apply (clarsimp simp: bind_assoc) - apply (corressimp corres: getObject_vcpu_corres setObject_VCPU_corres wp: get_vcpu_wp) + apply (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres wp: get_vcpu_wp) apply clarsimp done @@ -1263,7 +1265,7 @@ lemma invokeVCPUReadReg_corres: (invokeVCPUReadReg v r)" unfolding invoke_vcpu_read_register_def invokeVCPUReadReg_def read_vcpu_register_def readVCPUReg_def apply (rule corres_discard_r) - apply (corressimp corres: getObject_vcpu_corres wp: get_vcpu_wp) + apply (corresKsimp corres: getObject_vcpu_corres wp: get_vcpu_wp) apply (clarsimp simp: vcpu_relation_def split: option.splits) apply (wpsimp simp: getCurThread_def)+ done @@ -1278,14 +1280,15 @@ lemma invokeVCPUWriteReg_corres: unfolding invokeVCPUWriteReg_def invoke_vcpu_write_register_def write_vcpu_register_def writeVCPUReg_def apply (rule corres_discard_r) - apply (corressimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) + apply (corresKsimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) subgoal by (auto simp: vcpu_relation_def split: option.splits) apply (wpsimp simp: getCurThread_def)+ done lemma archThreadSet_VCPU_Some_corres[corres]: - "corres dc (tcb_at t) (tcb_at' t) - (arch_thread_set (tcb_vcpu_update (\_. Some v)) t) (archThreadSet (atcbVCPUPtr_update (\_. Some v)) t)" + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set (tcb_vcpu_update (\_. Some v)) t) + (archThreadSet (atcbVCPUPtr_update (\_. Some v)) t)" apply (rule archThreadSet_corres) apply (simp add: arch_tcb_relation_def) done @@ -1313,7 +1316,7 @@ lemma associateVCPUTCB_corres: (associateVCPUTCB v t)" unfolding associate_vcpu_tcb_def associateVCPUTCB_def apply (clarsimp simp: bind_assoc) - apply (corressimp search: getObject_vcpu_corres setObject_VCPU_corres vcpuSwitch_corres'' + apply (corresKsimp search: getObject_vcpu_corres setObject_VCPU_corres vcpuSwitch_corres'' wp: get_vcpu_wp getVCPU_wp hoare_vcg_imp_lift' simp: vcpu_relation_def) apply (rule_tac Q="\_. invs and tcb_at t" in hoare_strengthen_post) @@ -1323,7 +1326,7 @@ lemma associateVCPUTCB_corres: apply (clarsimp simp: vcpu_relation_def) apply (rule conjI) apply (frule (1) sym_refs_vcpu_tcb, fastforce) - apply (clarsimp simp: obj_at_def)+ + apply (fastforce simp: obj_at_def)+ apply (wpsimp)+ apply (rule_tac Q="\_. invs' and tcb_at' t" in hoare_strengthen_post) apply wpsimp @@ -1334,7 +1337,7 @@ lemma associateVCPUTCB_corres: apply (simp add: valid_vcpu'_def typ_at_tcb') apply (clarsimp simp: typ_at_to_obj_at_arches obj_at'_def) apply (fastforce simp: typ_at_to_obj_at_arches obj_at'_def) - apply (corressimp wp: arch_thread_get_wp getObject_tcb_wp + apply (corresKsimp wp: arch_thread_get_wp getObject_tcb_wp simp: archThreadGet_def)+ apply (simp add: vcpu_relation_def) apply (intro allI conjI impI; @@ -1360,7 +1363,7 @@ lemma invokeVCPUAckVPPI_corres: (invokeVCPUAckVPPI vcpu vppi)" unfolding invokeVCPUAckVPPI_def invoke_vcpu_ack_vppi_def write_vcpu_register_def writeVCPUReg_def - by (corressimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) + by (corresKsimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) (auto simp: vcpu_relation_def split: option.splits) lemma performARMVCPUInvocation_corres: @@ -1378,7 +1381,7 @@ lemma performARMVCPUInvocation_corres: lemma arch_performInvocation_corres: assumes "archinv_relation ai ai'" shows "corres (dc \ (=)) - (einvs and ct_active and valid_arch_inv ai) + (einvs and ct_active and valid_arch_inv ai and schact_is_rct) (invs' and ct_active' and valid_arch_inv' ai' and (\s. vs_valid_duplicates' (ksPSpace s))) (arch_perform_invocation ai) (Arch.performInvocation ai')" proof - @@ -1412,13 +1415,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -1522,8 +1525,8 @@ lemma tcbSchedEnqueue_vs_entry_align[wp]: "\\s. ko_wp_at' (\ko. P (vs_entry_align ko)) p s\ tcbSchedEnqueue pa \\rv. ko_wp_at' (\ko. P (vs_entry_align ko)) p\" - apply (clarsimp simp: tcbSchedEnqueue_def setQueue_def) - by (wp hoare_unless_wp | simp)+ + apply (clarsimp simp: tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def) + by (wp unless_wp | simp)+ crunch vs_entry_align[wp]: setThreadState "ko_wp_at' (\ko. P (vs_entry_align ko)) p" @@ -1619,7 +1622,7 @@ lemma findPDForASID_valid_offset'[wp]: "\valid_objs' and K (vptr < pptrBase)\ findPDForASID p \\rv s. valid_pde_mapping_offset' (rv + (vptr >> 21 << 3) && mask pd_bits)\,-" apply (rule hoare_gen_asmE) - apply (rule hoare_post_imp_R, rule findPDForASID_aligned) + apply (rule hoare_strengthen_postE_R, rule findPDForASID_aligned) apply (simp add: mask_add_aligned vspace_bits_defs) apply (erule less_pptrBase_valid_pde_offset'') done @@ -1722,17 +1725,6 @@ lemma ensureSafeMapping_valid_slots_duplicated': apply (fastforce simp:valid_slots_duplicated'_def) done -lemma is_aligned_ptrFromPAddr_aligned: - "m \ 28 \ is_aligned (ptrFromPAddr p) m = is_aligned p m" - apply (simp add:ptrFromPAddr_def is_aligned_mask - pptrBaseOffset_def pptrBase_def ARM_HYP.physBase_def physBase_def) - apply (subst add.commute) - apply (subst mask_add_aligned) - apply (erule is_aligned_weaken[rotated]) - apply (simp add:is_aligned_def) - apply simp - done - (* FIXME: this lemma is too specific *) lemma lookupPTSlot_aligned: "\\s. is_aligned vptr 16 \ valid_objs' s\ lookupPTSlot pd vptr \\p s. is_aligned p 7\,-" @@ -1744,15 +1736,13 @@ lemma lookupPTSlot_aligned: split:Structures_H.kernel_object.splits arch_kernel_object.splits) apply (simp add:valid_obj'_def lookup_pt_slot_no_fail_def) apply (rule aligned_add_aligned) - apply (rule is_aligned_ptrFromPAddr_aligned[where m = 7,THEN iffD2]) - apply simp - apply (erule is_aligned_weaken) - apply (simp add: vspace_bits_defs) + apply (erule is_aligned_ptrFromPAddr_n) + apply (simp add: pt_bits_def pte_bits_def) apply (rule is_aligned_shiftl) apply (rule is_aligned_andI1) apply (rule is_aligned_shiftr) apply (simp add: pte_bits_def pageBits_def pt_bits_def) - apply simp + apply (simp add: pt_bits_def) done lemma createMappingEntires_valid_slots_duplicated'[wp]: @@ -1763,33 +1753,31 @@ lemma createMappingEntires_valid_slots_duplicated'[wp]: apply (clarsimp simp:createMappingEntries_def) apply (rule hoare_pre) apply (wpc | wp lookupPTSlot_page_table_at' - | simp add: slots_duplicated_ensured_def)+ - apply (rule_tac Q' = "\p s. is_aligned p 7 \ page_table_at' (p && ~~ mask pt_bits) s" - in hoare_post_imp_R) + | simp add: slots_duplicated_ensured_def)+ + apply (rule_tac Q' = "\p s. is_aligned p 7 \ page_table_at' (p && ~~ mask pt_bits) s" + in hoare_strengthen_postE_R) apply (wp lookupPTSlot_aligned lookupPTSlot_page_table_at' - | simp add: vspace_bits_defs largePagePTEOffsets_def superSectionPDEOffsets_def)+ - apply (rule_tac x = r in exI) + | simp add: vspace_bits_defs largePagePTEOffsets_def superSectionPDEOffsets_def)+ + apply (rename_tac rv s) + apply (rule_tac x = rv in exI) apply clarsimp apply (frule is_aligned_no_wrap'[where off = "0x78"]) apply simp apply (drule upto_enum_step_shift[where n = 7 and m = 3,simplified]) apply (clarsimp simp:mask_def add.commute upto_enum_step_def) apply wp+ - apply (intro conjI impI) - apply ((clarsimp simp: vmsz_aligned_def pageBitsForSize_def - slots_duplicated_ensured_def - split:vmpage_size.splits)+)[9] - apply clarsimp - apply (drule lookup_pd_slot_aligned_6) - apply (simp add:pdBits_def pageBits_def pd_bits_def pde_bits_def) - apply (clarsimp simp:slots_duplicated_ensured_def) - apply (rule_tac x = "(lookup_pd_slot pd vptr)" in exI) - apply (clarsimp simp: superSectionPDEOffsets_def Let_def pde_bits_def) - apply (frule is_aligned_no_wrap'[where off = "0x78" and sz = 7]) - apply simp - apply (drule upto_enum_step_shift[where n = 7 and m = 3,simplified]) - apply (clarsimp simp:mask_def add.commute upto_enum_step_def) - done + apply (intro conjI impI; clarsimp) + apply ((clarsimp simp: vmsz_aligned_def slots_duplicated_ensured_def)+)[2] + apply (drule lookup_pd_slot_aligned_6) + apply (simp add:pdBits_def pageBits_def pd_bits_def pde_bits_def) + apply (clarsimp simp:slots_duplicated_ensured_def) + apply (rule_tac x = "(lookup_pd_slot pd vptr)" in exI) + apply (clarsimp simp: superSectionPDEOffsets_def Let_def pde_bits_def) + apply (frule is_aligned_no_wrap'[where off = "0x78" and sz = 7]) + apply simp + apply (drule upto_enum_step_shift[where n = 7 and m = 3,simplified]) + apply (clarsimp simp:mask_def add.commute upto_enum_step_def) + done lemma arch_decodeARMPageFlush_wf: "ARM_HYP_H.isPageFlushLabel (invocation_type label) \ @@ -1805,7 +1793,6 @@ lemma arch_decodeARMPageFlush_wf: (\s. vs_valid_duplicates' (ksPSpace s))\ decodeARMPageFlush label args (arch_capability.PageCap d word vmrights vmpage_size option) \valid_arch_inv'\, -" - supply hoare_True_E_R [simp del] apply (simp add: decodeARMPageFlush_def) apply (wpsimp wp: whenE_throwError_wp simp: valid_arch_inv'_def valid_page_inv'_def if_apply_def2) done @@ -1862,7 +1849,7 @@ lemma arch_decodeInvocation_wf[wp]: (snd (excaps!0)) and sch_act_simple and (\s. descendants_of' (snd (excaps!0)) (ctes_of s) = {}) " - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookupTargetSlot_def) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) @@ -1963,7 +1950,7 @@ lemma arch_decodeInvocation_wf[wp]: c \' capability.ArchObjectCap (arch_capability.PageTableCap word (Some (snd p, hd args >> 21 << 21))) \ is_aligned (addrFromPPtr word) ptBits \ valid_pde_mapping_offset' (b + (hd args >> 21 << 3) && mask pd_bits) - " in hoare_post_imp_R) + " in hoare_strengthen_postE_R) apply (wp whenE_throwError_wp isFinalCapability_inv getPDE_wp | wpc | simp add: valid_arch_inv'_def valid_pti'_def unlessE_whenE | rule_tac x="fst p" in hoare_imp_eq_substR @@ -1989,7 +1976,6 @@ lemma arch_decodeInvocation_wf[wp]: \ \PageDirectoryCap\ apply (simp add: decodeARMMMUInvocation_def ARM_HYP_H.decodeInvocation_def isCap_simps Let_def) - supply hoare_True_E_R [simp del] apply (cases "ARM_HYP_H.isPDFlushLabel (invocation_type label)", simp_all) apply (cases args; simp) apply wp @@ -2049,7 +2035,7 @@ lemma performASIDControlInvocation_st_tcb_at': hoare_vcg_ex_lift deleteObjects_cte_wp_at' deleteObjects_invs_derivatives deleteObjects_st_tcb_at' - static_imp_wp + hoare_weak_lift_imp | simp add: placeNewObject_def2)+ apply (case_tac ctea) apply (clarsimp) @@ -2104,7 +2090,7 @@ crunch cte_wp_at': "Arch.finaliseCap" "cte_wp_at' P p" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - armKSASIDTable_update (\_. (armKSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + armKSASIDTable_update (\_. ((armKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_state'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -2179,7 +2165,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' @@ -2265,7 +2251,7 @@ lemma assoc_invs': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_valid_arch' + setVCPU_valid_arch' valid_bitmaps_lift sym_heap_sched_pointers_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb valid_arch_tcb'_def | wp (once) hoare_vcg_imp_lift)+ diff --git a/proof/refine/ARM_HYP/Bits_R.thy b/proof/refine/ARM_HYP/Bits_R.thy index 6ffa1f1908..67b738cbce 100644 --- a/proof/refine/ARM_HYP/Bits_R.thy +++ b/proof/refine/ARM_HYP/Bits_R.thy @@ -76,6 +76,10 @@ lemma projectKO_tcb: "(projectKO_opt ko = Some t) = (ko = KOTCB t)" by (cases ko) (auto simp: projectKO_opts_defs) +lemma tcb_of'_TCB[simp]: + "tcb_of' (KOTCB tcb) = Some tcb" + by (simp add: projectKO_tcb) + lemma projectKO_cte: "(projectKO_opt ko = Some t) = (ko = KOCTE t)" by (cases ko) (auto simp: projectKO_opts_defs) @@ -482,7 +486,7 @@ lemma constOnFailure_wp : apply (wp|simp)+ done -lemma corres_throwError_str [corres_concrete_rER]: +lemma corres_throwError_str [corresK_concrete_rER]: "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throw b)" "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" by (simp add: corres_underlyingK_def)+ diff --git a/proof/refine/ARM_HYP/BuildRefineCache.thy b/proof/refine/ARM_HYP/BuildRefineCache.thy deleted file mode 100644 index 0e8eac45cf..0000000000 --- a/proof/refine/ARM_HYP/BuildRefineCache.thy +++ /dev/null @@ -1,40 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory BuildRefineCache -imports Main -begin - -ML \ - -(* needed to generate a proof cache *) -proofs := 1; -DupSkip.record_proofs := true; -quick_and_dirty := true; - -tracing "Building refinement image using ROOT.ML"; - -use "ROOT.ML"; - -\ - -ML \ - -tracing "Synching proof cache"; - -DupSkip.sync_cache @{theory Refine}; - -tracing "Dumping proof cache"; - -let - val xml = XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache); -in - File.open_output (XML_Syntax.output_forest xml) (Path.basic "proof_cache.xml") -end; - -\ - -end diff --git a/proof/refine/ARM_HYP/CNodeInv_R.thy b/proof/refine/ARM_HYP/CNodeInv_R.thy index 6e259112c2..14fc0fc0db 100644 --- a/proof/refine/ARM_HYP/CNodeInv_R.thy +++ b/proof/refine/ARM_HYP/CNodeInv_R.thy @@ -207,7 +207,7 @@ lemma decodeCNodeInvocation_corres: subgoal by (auto simp add: whenE_def, auto simp add: returnOk_def) apply (wp | wpc | simp(no_asm))+ apply (wp hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps + hoare_vcg_all_liftE_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps | clarsimp)+ subgoal by (auto elim!: valid_cnode_capI) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -264,7 +264,7 @@ lemma decodeCNodeInvocation_corres: apply (clarsimp simp add: returnOk_def) apply (wp get_cap_wp getCTE_wp | simp only: whenE_def | clarsimp)+ apply (rule hoare_trivE_R[where P="\"]) - apply (simp add: cte_wp_at_ctes_of pred_conj_def cong: conj_cong) + apply (wpsimp simp: cte_wp_at_ctes_of pred_conj_def) apply (fastforce elim!: valid_cnode_capI simp: invs_def valid_state_def valid_pspace_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) \ \Rotate\ @@ -385,7 +385,7 @@ lemma deriveCap_Null_helper: apply (cases "cap = NullCap") apply (simp add: deriveCap_def isCap_simps) apply (wp | simp)+ - apply (rule hoare_post_imp_R, rule assms) + apply (rule hoare_strengthen_postE_R, rule assms) apply simp done @@ -443,7 +443,7 @@ lemma decodeCNodeInv_wf[wp]: apply (wp whenE_throwError_wp getCTE_wp | wpc | simp(no_asm))+ apply (rule_tac Q'="\rv. invs' and cte_wp_at' (\cte. cteCap cte = NullCap) destSlot and ex_cte_cap_to' destSlot" - in hoare_post_imp_R, wp) + in hoare_strengthen_postE_R, wp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') apply (simp add: ctes_of_valid' valid_updateCapDataI @@ -478,7 +478,7 @@ lemma decodeCNodeInv_wf[wp]: unlessE_whenE) apply (rule hoare_pre) apply (wp whenE_throwError_wp getCTE_wp | simp)+ - apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (simp add: cte_wp_at_ctes_of imp_ex hasCancelSendRights_not_Null) apply (clarsimp simp: ctes_of_valid' invs_valid_objs') @@ -492,7 +492,7 @@ lemma decodeCNodeInv_wf[wp]: apply (rule_tac Q'="\rv s. cte_at' rv s \ cte_at' destSlot s \ cte_at' srcSlot s \ ex_cte_cap_to' rv s \ ex_cte_cap_to' destSlot s - \ invs' s" in hoare_post_imp_R) + \ invs' s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') @@ -4880,7 +4880,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -4910,7 +4910,7 @@ lemma cteSwap_valid_pspace'[wp]: apply (strengthen imp_consequent, strengthen ctes_of_strng) apply ((wp sch_act_wf_lift valid_queues_lift cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp - hoare_ex_wp updateMDB_cte_wp_at_other getCTE_wp + hoare_vcg_ex_lift updateMDB_cte_wp_at_other getCTE_wp | simp add: cte_wp_at_ctes_ofI o_def | rule hoare_drop_imps)+)[6] apply (clarsimp simp: valid_pspace_no_0[unfolded valid_pspace'_def valid_mdb'_def] @@ -5062,8 +5062,6 @@ crunch irq_states'[wp]: cteSwap "valid_irq_states'" crunch pde_mappings'[wp]: cteSwap "valid_pde_mappings'" -crunch vq'[wp]: cteSwap "valid_queues'" - crunch ksqsL1[wp]: cteSwap "\s. P (ksReadyQueuesL1Bitmap s)" crunch ksqsL2[wp]: cteSwap "\s. P (ksReadyQueuesL2Bitmap s)" @@ -5078,6 +5076,12 @@ crunch ct_not_inQ[wp]: cteSwap "ct_not_inQ" crunch ksDomScheduleIdx [wp]: cteSwap "\s. P (ksDomScheduleIdx s)" +crunches cteSwap + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma cteSwap_invs'[wp]: "\invs' and valid_cap' c and valid_cap' c' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2 and @@ -5261,6 +5265,11 @@ lemma invalid_Thread_CNode: apply (clarsimp simp: obj_at'_def projectKOs) done +(* FIXME MOVE *) +lemma all_Not_False[simp]: + "All Not = False" + by blast + lemma Final_notUntyped_capRange_disjoint: "\ isFinal cap sl (cteCaps_of s); cteCaps_of s sl' = Some cap'; sl \ sl'; capUntypedPtr cap = capUntypedPtr cap'; capBits cap = capBits cap'; @@ -5276,21 +5285,11 @@ lemma Final_notUntyped_capRange_disjoint: apply (clarsimp simp: valid_cap'_def obj_at'_def projectKOs objBits_simps' typ_at'_def ko_wp_at'_def + page_table_at'_def page_directory_at'_def + sameObjectAs_def3 isCap_simps split: capability.split_asm zombie_type.split_asm - arch_capability.split_asm - dest!: spec[where x=0]) - apply (clarsimp simp: sameObjectAs_def3 isCap_simps) - apply (simp add: isCap_simps) - apply (simp add: isCap_simps) - apply (clarsimp simp: valid_cap'_def - obj_at'_def projectKOs objBits_simps - typ_at'_def ko_wp_at'_def - page_table_at'_def page_directory_at'_def - split: capability.split_asm zombie_type.split_asm - arch_capability.split_asm - dest!: spec[where x=0]) - apply fastforce+ - apply (clarsimp simp: isCap_simps sameObjectAs_def3) + arch_capability.split_asm option.split_asm + dest!: spec[where x=0])+ done lemma capBits_capUntyped_capRange: @@ -5538,6 +5537,10 @@ lemma updateCap_untyped_ranges_zero_simple: crunch tcb_in_cur_domain'[wp]: updateCap "tcb_in_cur_domain' t" (wp: crunch_wps simp: crunch_simps rule: tcb_in_cur_domain'_lift) +crunches updateCap + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma make_zombie_invs': "\\s. invs' s \ s \' cap \ cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s \ @@ -5555,7 +5558,8 @@ lemma make_zombie_invs': \ bound_tcb_at' ((=) None) p s \ obj_at' (Not \ tcbQueued) p s \ ko_wp_at' (Not \ hyp_live') p s - \ (\pr. p \ set (ksReadyQueues s pr)))) sl s\ + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p s)) sl s\ updateCap sl cap \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def @@ -5593,7 +5597,9 @@ lemma make_zombie_invs': apply (subgoal_tac "st_tcb_at' ((=) Inactive) p' s \ obj_at' (Not \ tcbQueued) p' s \ bound_tcb_at' ((=) None) p' s - \ ko_wp_at' (Not \ hyp_live') p' s") + \ ko_wp_at' (Not \ hyp_live') p' s + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p' s") apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def projectKOs live'_def hyp_live'_def) subgoal by (auto dest!: isCapDs)[1] apply (clarsimp simp: cte_wp_at_ctes_of disj_ac @@ -5780,7 +5786,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5794,7 +5800,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -5875,7 +5881,7 @@ crunch cap_to'[wp]: cancelSignal "ex_cte_cap_wp_to' P p" lemma cancelIPC_cap_to'[wp]: "\ex_cte_cap_wp_to' P p\ cancelIPC t \\rv. ex_cte_cap_wp_to' P p\" apply (simp add: cancelIPC_def Let_def) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (case_tac state, simp_all add: getThreadReplySlot_def locateSlot_conv) apply (wp ex_cte_cap_to'_pres [OF threadSet_cte_wp_at'] | simp add: o_def if_apply_def2 @@ -5937,7 +5943,7 @@ lemma cteDelete_delete_cases: apply (rule hoare_strengthen_post [OF emptySlot_deletes]) apply (clarsimp simp: cte_wp_at_ctes_of) apply wp+ - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of) apply simp done @@ -6297,7 +6303,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) apply clarsimp apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of disj_ac) apply (rule conjI, clarsimp simp: removeable'_def) @@ -6315,7 +6321,7 @@ lemma finaliseSlot_invs'': \ (\sl'. snd rv \ NullCap \ sl' \ slot \ cteCaps_of s sl' \ Some (snd rv))\, \\rv s. invs' s \ sch_act_simple s\" unfolding finaliseSlot_def - apply (rule hoare_pre, rule hoare_post_impErr, rule use_spec) + apply (rule hoare_pre, rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P="\" and Pr="\" and p=slot]) apply (simp_all add: no_cte_prop_top) apply wp @@ -6325,14 +6331,14 @@ lemma finaliseSlot_invs'': lemma finaliseSlot_invs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. invs'\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done lemma finaliseSlot_sch_act_simple: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. sch_act_simple\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6341,7 +6347,7 @@ lemma finaliseSlot_removeable: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6350,7 +6356,7 @@ lemma finaliseSlot_irqs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. \sl'. snd rv \ NullCap \ sl' \ slot \ cteCaps_of s sl' \ Some (snd rv)\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6365,7 +6371,7 @@ lemma finaliseSlot_cte_wp_at: P cp \ capZombiePtr cp \ p)) p s\,-" unfolding finaliseSlot_def apply (rule hoare_pre, unfold validE_R_def) - apply (rule hoare_post_impErr, rule use_spec) + apply (rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P=P and Pr=\ and p=p]) apply (simp_all add: no_cte_prop_top finalise_prop_stuff_def) apply wp @@ -6388,7 +6394,7 @@ lemma reduceZombie_invs: reduceZombie cap slot exposed \\rv s. invs' s\" apply (rule validE_valid) - apply (rule hoare_post_impErr, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) + apply (rule hoare_strengthen_postE, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6398,7 +6404,7 @@ lemma reduceZombie_cap_to: reduceZombie cap slot exposed \\rv s. \ exposed \ ex_cte_cap_to' slot s\, -" apply (rule validE_validE_R, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6409,7 +6415,7 @@ lemma reduceZombie_sch_act_simple: reduceZombie cap slot exposed \\rv. sch_act_simple\" apply (rule validE_valid, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6419,7 +6425,7 @@ lemma cteDelete_invs': apply (rule hoare_gen_asm) apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -6450,9 +6456,9 @@ lemma cteDelete_cte_at: apply (rule hoare_vcg_disj_lift) apply (rule typ_at_lifts, rule cteDelete_typ_at') apply (simp add: cteDelete_def finaliseSlot_def split_def) - apply (rule validE_valid, rule seqE) + apply (rule validE_valid, rule bindE_wp_fwd) apply (subst finaliseSlot'_simps_ext) - apply (rule seqE) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_pre, rule hoare_FalseE) @@ -6495,10 +6501,10 @@ lemma cteDelete_cte_wp_at_invs: cteCap cte = NullCap \ (\zb n. cteCap cte = Zombie slot zb n)) slot s)" - and E="\rv. \" in hoare_post_impErr) + and E="\rv. \" in hoare_strengthen_postE) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of dest!: isCapDs) apply simp apply simp @@ -6517,10 +6523,10 @@ lemma cteDelete_cte_wp_at_invs: (\zb n. cteCap cte = Zombie p zb n) \ (\cp. P cp \ capZombiePtr cp \ p)) p s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) + apply (rule hoare_strengthen_postE_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) apply simp+ apply (clarsimp simp: cte_wp_at_ctes_of) apply simp @@ -6533,7 +6539,7 @@ lemma cteDelete_sch_act_simple: cteDelete slot exposed \\rv. sch_act_simple\" apply (simp add: cteDelete_def whenE_def split_def) apply (wp hoare_drop_imps | simp)+ - apply (rule_tac hoare_post_impErr [where Q="\rv. sch_act_simple" + apply (rule_tac hoare_strengthen_postE [where Q="\rv. sch_act_simple" and E="\rv. sch_act_simple"]) apply (rule valid_validE) apply (wp finaliseSlot_sch_act_simple) @@ -7094,14 +7100,14 @@ next apply simp apply ((wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms | erule finalise_cap_not_reply_master [simplified])+) apply (elim conjE, strengthen exI[mk_strg I], strengthen asm_rl[where psi="(cap_relation cap cap')" for cap cap', mk_strg I E]) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - hoare_vcg_ex_lift static_imp_wp) + hoare_vcg_ex_lift hoare_weak_lift_imp) apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7143,7 +7149,7 @@ next apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) apply (case_tac "cteCap rv'", auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ apply (rule isFinal[where x="cte_map slot"]) apply (wp get_cap_wp| simp add: conj_comms)+ @@ -7222,13 +7228,11 @@ next case (4 ptr bits n slot) let ?target = "(ptr, nat_to_cref (zombie_cte_bits bits) n)" note hyps = "4.hyps"[simplified rec_del_concrete_unfold spec_corres_liftME2] - have pred_conj_assoc: "\P Q R. (P and (Q and R)) = (P and Q and R)" - by (rule ext, simp) show ?case apply (simp only: rec_del_concrete_unfold cap_relation.simps) apply (simp add: reduceZombie_def Let_def liftE_bindE - del: pred_conj_app) + del: inf_apply) apply (subst rec_del_simps_ext) apply (rule_tac F="ptr + 2 ^ cte_level_bits * of_nat n = cte_map ?target" @@ -7286,7 +7290,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -7325,7 +7329,7 @@ next apply (clarsimp simp: zombie_alignment_oddity cte_map_replicate) apply (wp get_cap_cte_wp_at getCTE_wp' rec_del_cte_at rec_del_invs rec_del_delete_cases)+ - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule_tac P="\cp. cp = Zombie ptr (zbits_map bits) (Suc n)" in cteDelete_cte_wp_at_invs[where p="cte_map slot"]) apply clarsimp @@ -8439,7 +8443,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8592,6 +8596,15 @@ lemma cteMove_urz [wp]: apply auto done +crunches updateMDB + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +(* FIXME: arch_split *) +lemma haskell_assert_inv: + "haskell_assert Q L \P\" + by wpsimp + lemma cteMove_invs' [wp]: "\\x. invs' x \ ex_cte_cap_to' word2 x \ cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ @@ -8617,7 +8630,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -8669,6 +8682,10 @@ crunch ksDomSchedule[wp]: updateCap "\s. P (ksDomSchedule s)" crunch ksDomScheduleIdx[wp]: updateCap "\s. P (ksDomScheduleIdx s)" crunch ksDomainTime[wp]: updateCap "\s. P (ksDomainTime s)" +crunches updateCap + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + lemma corres_null_cap_update: "cap_relation cap cap' \ corres dc (invs and cte_wp_at ((=) cap) slot) @@ -8968,7 +8985,7 @@ crunches ignore: saveVirtTimer) crunch irq_states' [wp]: finaliseCap valid_irq_states' - (wp: crunch_wps hoare_unless_wp getASID_wp no_irq + (wp: crunch_wps unless_wp getASID_wp no_irq no_irq_invalidateLocalTLB_ASID no_irq_setHardwareASID no_irq_setCurrentPD no_irq_invalidateLocalTLB_VAASID no_irq_cleanByVA_PoU FalseI @@ -9001,7 +9018,7 @@ lemma finaliseSlot_IRQInactive: "\valid_irq_states'\ finaliseSlot a b -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (unfold validE_E_def) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule use_spec(2) [OF finaliseSlot_IRQInactive', folded finaliseSlot_def]) apply (rule TrueI) apply assumption @@ -9015,8 +9032,8 @@ lemma cteDelete_IRQInactive: "\valid_irq_states'\ cteDelete x y -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule validE_E_validE) apply (rule finaliseSlot_IRQInactive) apply simp @@ -9028,8 +9045,8 @@ lemma cteDelete_irq_states': "\valid_irq_states'\ cteDelete x y \\rv. valid_irq_states'\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule hoare_valid_validE) apply (rule finaliseSlot_irq_states') apply simp @@ -9052,7 +9069,7 @@ proof (induct rule: cteRevoke.induct) case (1 p s') show ?case apply (subst cteRevoke.simps) - apply (wp "1.hyps" unlessE_wp hoare_whenE_wp preemptionPoint_IRQInactive_spec + apply (wp "1.hyps" unlessE_wp whenE_wp preemptionPoint_IRQInactive_spec cteDelete_IRQInactive cteDelete_irq_states' getCTE_wp')+ apply clarsimp done @@ -9072,7 +9089,7 @@ lemma inv_cnode_IRQInactive: apply (simp add: invokeCNode_def) apply (wp hoare_TrueI [where P=\] cteRevoke_IRQInactive finaliseSlot_IRQInactive cteDelete_IRQInactive - hoare_whenE_wp + whenE_wp | wpc | simp add: split_def)+ done diff --git a/proof/refine/ARM_HYP/CSpace1_R.thy b/proof/refine/ARM_HYP/CSpace1_R.thy index 6ded3048fe..89ea95896b 100644 --- a/proof/refine/ARM_HYP/CSpace1_R.thy +++ b/proof/refine/ARM_HYP/CSpace1_R.thy @@ -236,7 +236,7 @@ lemma pspace_relation_cte_wp_at: apply (clarsimp elim!: cte_wp_at_weakenE') apply clarsimp apply (drule(1) pspace_relation_absD) - apply (clarsimp simp: other_obj_relation_def) + apply (clarsimp simp: tcb_relation_cut_def) apply (simp split: kernel_object.split_asm) apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb]) apply simp @@ -325,7 +325,7 @@ lemma getSlotCap_corres: (getSlotCap cte_ptr')" apply (simp add: getSlotCap_def) apply (subst bind_return [symmetric]) - apply (corressimp) + apply (corresKsimp) done lemma maskCapRights [simp]: @@ -402,7 +402,7 @@ lemma resolveAddressBits_cte_at': resolveAddressBits cap addr depth \\rv. cte_at' (fst rv)\, \\rv s. True\" apply (fold validE_R_def) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule resolveAddressBits_real_cte_at') apply (erule real_cte_at') done @@ -607,12 +607,12 @@ proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) apply (simp add: Let_def unlessE_whenE) apply (simp add: caps isCap_defs Let_def whenE_bindE_throwError_to_if) apply (subst cnode_cap_case_if) - apply (corressimp search: getSlotCap_corres IH + apply (corresKsimp search: getSlotCap_corres IH wp: get_cap_wp getSlotCap_valid no_fail_stateAssert simp: locateSlot_conv) apply (simp add: drop_postfix_eq) apply clarsimp - apply (prove "is_aligned ptr (4 + cbits) \ cbits \ word_bits - cte_level_bits") + apply (prop_tac "is_aligned ptr (4 + cbits) \ cbits \ word_bits - cte_level_bits") apply (erule valid_CNodeCapE; fastforce simp: cte_level_bits_def) subgoal premises prems for s s' x apply (insert prems) @@ -625,8 +625,8 @@ proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) apply (subst \to_bl _ = _\[symmetric]) apply (drule postfix_dropD) apply clarsimp - apply (prove "32 + (cbits + length guard) - length cref = - (cbits + length guard) + (32 - length cref)") + apply (prop_tac "32 + (cbits + length guard) - length cref = + (cbits + length guard) + (32 - length cref)") apply (drule len_drop_lemma, simp, arith) apply simp apply (subst drop_drop [symmetric]) @@ -725,11 +725,11 @@ lemma lookupSlotForThread_corres: apply clarsimp apply simp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolve_address_bits_cte_at [unfolded validE_R_def]) apply clarsimp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolveAddressBits_cte_at') apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (simp add: returnOk_def split_def) @@ -823,7 +823,7 @@ lemma tcbVTable_upd_simp [simp]: by (cases tcb) simp lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -928,7 +928,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done @@ -1636,10 +1636,10 @@ lemma cte_map_pulls_tcb_to_abstract: \ \tcb'. kheap s x = Some (TCB tcb') \ tcb_relation tcb' tcb \ (z = (x, tcb_cnode_index (unat ((y - x) >> cte_level_bits))))" apply (rule pspace_dom_relatedE, assumption+) - apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) - apply (clarsimp simp: other_obj_relation_def - split: Structures_A.kernel_object.split_asm - ARM_A.arch_kernel_obj.split_asm) + apply (erule(1) obj_relation_cutsE; + clarsimp simp: other_obj_relation_def + split: Structures_A.kernel_object.split_asm + ARM_A.arch_kernel_obj.split_asm if_split_asm) apply (drule tcb_cases_related2) apply clarsimp apply (frule(1) cte_wp_at_tcbI [OF _ _ TrueI, where t="(a, b)" for a b, simplified]) @@ -1655,8 +1655,7 @@ lemma pspace_relation_update_tcbs: del: dom_fun_upd) apply (erule conjE) apply (rule ballI, drule(1) bspec) - apply (rule conjI, simp add: other_obj_relation_def) - apply (clarsimp split: Structures_A.kernel_object.split_asm) + apply (clarsimp simp: tcb_relation_cut_def split: Structures_A.kernel_object.split_asm) apply (drule bspec, fastforce) apply clarsimp apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) @@ -1878,6 +1877,27 @@ lemma descendants_of_eq': apply simp done +lemma setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedPrevs_of s)" + shows "P (ps |> tcb_of' |> tcbSchedPrev)" + using use_valid[OF step setObject_cte_tcbSchedPrevs_of(1)] pre + by auto + +lemma setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedNexts_of s)" + shows "P (ps |> tcb_of' |> tcbSchedNext)" + using use_valid[OF step setObject_cte_tcbSchedNexts_of(1)] pre + by auto + +lemma setObject_cte_inQ_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (inQ domain priority |< tcbs_of' s)" + shows "P (inQ domain priority |< (ps |> tcb_of'))" + using use_valid[OF step setObject_cte_inQ(1)] pre + by auto + lemma updateCap_stuff: assumes "(x, s'') \ fst (updateCap p cap s')" shows "(ctes_of s'' = modify_map (ctes_of s') p (cteCap_update (K cap))) \ @@ -1891,7 +1911,12 @@ lemma updateCap_stuff: ksSchedulerAction s'' = ksSchedulerAction s' \ (ksArchState s'' = ksArchState s') \ (pspace_aligned' s' \ pspace_aligned' s'') \ - (pspace_distinct' s' \ pspace_distinct' s'')" using assms + (pspace_distinct' s' \ pspace_distinct' s'') \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" + using assms apply (clarsimp simp: updateCap_def in_monad) apply (drule use_valid [where P="\s. s2 = s" for s2, OF _ getCTE_sp refl]) apply (rule conjI) @@ -1900,8 +1925,11 @@ lemma updateCap_stuff: apply (frule setCTE_pspace_only) apply (clarsimp simp: setCTE_def) apply (intro conjI impI) - apply (erule(1) use_valid [OF _ setObject_aligned]) - apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule(1) use_valid [OF _ setObject_aligned]) + apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace; simp) + apply (erule setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace; simp) + apply (fastforce elim: setObject_cte_inQ_of_use_valid_ksPSpace) done (* FIXME: move *) @@ -1918,16 +1946,16 @@ lemma pspace_relation_cte_wp_atI': apply (simp split: if_split_asm) apply (erule(1) pspace_dom_relatedE) apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + apply (subgoal_tac "n = x - y", clarsimp) + apply (drule tcb_cases_related2, clarsimp) + apply (intro exI, rule conjI) + apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) + apply fastforce + apply simp + apply clarsimp apply (simp add: other_obj_relation_def split: Structures_A.kernel_object.split_asm ARM_A.arch_kernel_obj.split_asm) - apply (subgoal_tac "n = x - y", clarsimp) - apply (drule tcb_cases_related2, clarsimp) - apply (intro exI, rule conjI) - apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) - apply fastforce - apply simp - apply clarsimp done lemma pspace_relation_cte_wp_atI: @@ -2452,7 +2480,7 @@ lemma updateCap_corres: apply (clarsimp simp: in_set_cap_cte_at_swp pspace_relations_def) apply (drule updateCap_stuff) apply simp - apply (rule conjI) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) apply (rule conjI) prefer 2 @@ -2540,9 +2568,9 @@ lemma updateMDB_pspace_relation: apply (clarsimp simp: tcb_ctes_clear cte_level_bits_def objBits_defs) apply clarsimp apply (rule pspace_dom_relatedE, assumption+) - apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] - apply (clarsimp split: Structures_A.kernel_object.split_asm - ARM_A.arch_kernel_obj.split_asm + apply (rule obj_relation_cutsE, assumption+; + clarsimp split: Structures_A.kernel_object.split_asm + ARM_A.arch_kernel_obj.split_asm if_split_asm simp: other_obj_relation_def) apply (frule(1) tcb_cte_cases_aligned_helpers(1)) apply (frule(1) tcb_cte_cases_aligned_helpers(2)) @@ -2604,6 +2632,25 @@ lemma updateMDB_ctes_of: crunch aligned[wp]: updateMDB "pspace_aligned'" crunch pdistinct[wp]: updateMDB "pspace_distinct'" +crunch tcbSchedPrevs_of[wp]: updateMDB "\s. P (tcbSchedPrevs_of s)" +crunch tcbSchedNexts_of[wp]: updateMDB "\s. P (tcbSchedNexts_of s)" +crunch inQ_opt_pred[wp]: updateMDB "\s. P (inQ d p |< tcbs_of' s)" +crunch inQ_opt_pred'[wp]: updateMDB "\s. P (\d p. inQ d p |< tcbs_of' s)" +crunch ksReadyQueues[wp]: updateMDB "\s. P (ksReadyQueues s)" + (wp: crunch_wps simp: crunch_simps setObject_def updateObject_cte) + +lemma setCTE_rdyq_projs[wp]: + "setCTE p f \\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)\" + apply (rule hoare_lift_Pf2[where f=ksReadyQueues]) + apply (rule hoare_lift_Pf2[where f=tcbSchedNexts_of]) + apply (rule hoare_lift_Pf2[where f=tcbSchedPrevs_of]) + apply wpsimp+ + done + +crunches updateMDB + for rdyq_projs[wp]:"\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)" lemma updateMDB_the_lot: assumes "(x, s'') \ fst (updateMDB p f s')" @@ -2626,7 +2673,11 @@ lemma updateMDB_the_lot: ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" using assms apply (simp add: updateMDB_eqs updateMDB_pspace_relations split del: if_split) apply (frule (1) updateMDB_ctes_of) @@ -2635,9 +2686,8 @@ using assms apply (erule use_valid) apply wp apply simp - apply (erule use_valid) - apply wp - apply simp + apply (erule use_valid, wpsimp wp: hoare_vcg_all_lift) + apply (simp add: comp_def) done lemma revokable_eq: @@ -3833,6 +3883,9 @@ lemma updateUntypedCap_descendants_of: apply (clarsimp simp:mdb_next_rel_def mdb_next_def split:if_splits) done +crunches setCTE + for tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + lemma setCTE_UntypedCap_corres: "\cap_relation cap (cteCap cte); is_untyped_cap cap; idx' = idx\ \ corres dc (cte_wp_at ((=) cap) src and valid_objs and @@ -3862,10 +3915,19 @@ lemma setCTE_UntypedCap_corres: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def split: if_split_asm Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation _ _" \ -\) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (rule use_valid[OF _ setCTE_tcbSchedPrevs_of], assumption) + apply (rule use_valid[OF _ setCTE_tcbSchedNexts_of], assumption) + apply (rule use_valid[OF _ setCTE_ksReadyQueues], assumption) + apply (rule use_valid[OF _ setCTE_inQ_opt_pred], assumption) + apply (rule use_valid[OF _ set_cap_exst], assumption) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) @@ -5143,11 +5205,15 @@ lemma updateMDB_the_lot': ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" apply (rule updateMDB_the_lot) using assms apply (fastforce simp: pspace_relations_def)+ - done + done lemma cte_map_inj_eq': "\(cte_map p = cte_map p'); @@ -5249,7 +5315,6 @@ lemma cteInsert_corres: apply (thin_tac "ksMachineState t = p" for p t)+ apply (thin_tac "ksCurThread t = p" for p t)+ apply (thin_tac "ksIdleThread t = p" for p t)+ - apply (thin_tac "ksReadyQueues t = p" for p t)+ apply (thin_tac "ksSchedulerAction t = p" for p t)+ apply (clarsimp simp: pspace_relations_def) apply (rule conjI) diff --git a/proof/refine/ARM_HYP/CSpace_R.thy b/proof/refine/ARM_HYP/CSpace_R.thy index 6f1d13280b..48e062cbfc 100644 --- a/proof/refine/ARM_HYP/CSpace_R.thy +++ b/proof/refine/ARM_HYP/CSpace_R.thy @@ -1099,43 +1099,6 @@ lemma bitmapQ_no_L2_orphans_lift: apply (rule hoare_vcg_prop, assumption) done -lemma valid_queues_lift_asm: - assumes tat1: "\d p tcb. \obj_at' (inQ d p) tcb and Q \ f \\_. obj_at' (inQ d p) tcb\" - and tat2: "\tcb. \st_tcb_at' runnable' tcb and Q \ f \\_. st_tcb_at' runnable' tcb\" - and prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" - and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" - and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" - shows "\Invariants_H.valid_queues and Q\ f \\_. Invariants_H.valid_queues\" - proof - - have tat: "\d p tcb. \obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb and Q\ f - \\_. obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb\" - apply (rule hoare_chain [OF hoare_vcg_conj_lift [OF tat1 tat2]]) - apply (fastforce)+ - done - have tat_combined: "\d p tcb. \obj_at' (inQ d p and runnable' \ tcbState) tcb and Q\ f - \\_. obj_at' (inQ d p and runnable' \ tcbState) tcb\" - apply (rule hoare_chain [OF tat]) - apply (fastforce simp add: obj_at'_and pred_tcb_at'_def o_def)+ - done - show ?thesis unfolding valid_queues_def valid_queues_no_bitmap_def - by (wp tat_combined prq prqL1 prqL2 valid_bitmapQ_lift bitmapQ_no_L2_orphans_lift - bitmapQ_no_L1_orphans_lift hoare_vcg_all_lift hoare_vcg_conj_lift hoare_Ball_helper) - simp_all - qed - -lemmas valid_queues_lift = valid_queues_lift_asm[where Q="\_. True", simplified] - -lemma valid_queues_lift': - assumes tat: "\d p tcb. \\s. \ obj_at' (inQ d p) tcb s\ f \\_ s. \ obj_at' (inQ d p) tcb s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_queues'\ f \\_. valid_queues'\" - unfolding valid_queues'_def imp_conv_disj - by (wp hoare_vcg_all_lift hoare_vcg_disj_lift tat prq) - -lemma setCTE_norq [wp]: - "\\s. P (ksReadyQueues s)\ setCTE ptr cte \\r s. P (ksReadyQueues s) \" - by (clarsimp simp: valid_def dest!: setCTE_pspace_only) - lemma setCTE_norqL1 [wp]: "\\s. P (ksReadyQueuesL1Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL1Bitmap s) \" by (clarsimp simp: valid_def dest!: setCTE_pspace_only) @@ -2227,7 +2190,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (revokable' src_cap cap) (revokable' src_cap cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" @@ -2589,7 +2552,7 @@ lemma updateMDB_iflive'[wp]: updateMDB p m \\rv s. if_live_then_nonz_cap' s\" apply (clarsimp simp: updateMDB_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2602,7 +2565,7 @@ lemma updateCap_iflive': updateCap p cap \\rv s. if_live_then_nonz_cap' s\" apply (simp add: updateCap_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2795,12 +2758,6 @@ lemma setCTE_inQ[wp]: apply (simp_all add: inQ_def) done -lemma setCTE_valid_queues'[wp]: - "\valid_queues'\ setCTE p cte \\rv. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done - crunch inQ[wp]: cteInsert "\s. P (obj_at' (inQ d p) t s)" (wp: crunch_wps) @@ -2964,7 +2921,7 @@ lemma setCTE_irq_states' [wp]: apply (wp setObject_ksMachine) apply (simp add: updateObject_cte) apply (rule hoare_pre) - apply (wp hoare_unless_wp|wpc|simp)+ + apply (wp unless_wp|wpc|simp)+ apply fastforce apply assumption done @@ -3084,7 +3041,7 @@ lemma setCTE_ksMachine[wp]: apply (wp setObject_ksMachine) apply (clarsimp simp: updateObject_cte split: Structures_H.kernel_object.splits) - apply (safe, (wp hoare_unless_wp | simp)+) + apply (safe, (wp unless_wp | simp)+) done crunch ksMachine[wp]: cteInsert "\s. P (ksMachineState s)" @@ -3317,6 +3274,13 @@ lemma cteInsert_untyped_ranges_zero[wp]: apply blast done +crunches cteInsert + for tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: crunch_wps rule: valid_bitmaps_lift) + lemma cteInsert_invs: "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and (\s. src \ dest) and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) @@ -3325,20 +3289,9 @@ lemma cteInsert_invs: cteInsert cap src dest \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) - (* FIXME: wp_cleanup - apply (wp cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift - cteInsert_norq | simp add: st_tcb_at'_def)+ - apply (wp cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift CSpace_R.valid_queues_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift - cteInsert_norq | simp add: pred_tcb_at'_def)+ - apply (auto simp: invs'_def valid_state'_def valid_pspace'_def - cte_wp_at_ctes_of - elim: valid_capAligned is_derived_badge_derived') - *) - apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift CSpace_R.valid_queues_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift cteInsert_norq - simp: st_tcb_at'_def) + apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift + valid_irq_node_lift irqs_masked_lift cteInsert_norq + sym_heap_sched_pointers_lift) apply (auto simp: invs'_def valid_state'_def valid_pspace'_def elim: valid_capAligned) done @@ -3646,10 +3599,13 @@ lemma corres_caps_decomposition: "\P. \\s. P (new_ups' s)\ g \\rv s. P (gsUserPages s)\" "\P. \\s. P (new_cns s)\ f \\rv s. P (cns_of_heap (kheap s))\" "\P. \\s. P (new_cns' s)\ g \\rv s. P (gsCNodes s)\" - "\P. \\s. P (new_queues s)\ f \\rv s. P (ready_queues s)\" + "\P. \\s. P (new_ready_queues s)\ f \\rv s. P (ready_queues s)\" "\P. \\s. P (new_action s)\ f \\rv s. P (scheduler_action s)\" "\P. \\s. P (new_sa' s)\ g \\rv s. P (ksSchedulerAction s)\" - "\P. \\s. P (new_rqs' s)\ g \\rv s. P (ksReadyQueues s)\" + "\P. \\s. P (new_ksReadyQueues s) (new_tcbSchedNexts_of s) (new_tcbSchedPrevs_of s) + (\d p. new_inQs d p s)\ + g \\rv s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)\" "\P. \\s. P (new_di s)\ f \\rv s. P (domain_index s)\" "\P. \\s. P (new_dl s)\ f \\rv s. P (domain_list s)\" "\P. \\s. P (new_cd s)\ f \\rv s. P (cur_domain s)\" @@ -3665,7 +3621,9 @@ lemma corres_caps_decomposition: "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ sched_act_relation (new_action s) (new_sa' s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ - \ ready_queues_relation (new_queues s) (new_rqs' s')" + \ ready_queues_relation_2 (new_ready_queues s) (new_ksReadyQueues s') + (new_tcbSchedNexts_of s') (new_tcbSchedPrevs_of s') + (\d p. new_inQs d p s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ revokable_relation (new_rvk s) (null_filter (new_caps s)) (new_ctes s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ @@ -3732,7 +3690,7 @@ proof - apply (subst pspace_relations_def[symmetric]) apply (rule corres_underlying_decomposition [OF x]) apply (simp add: ghost_relation_of_heap) - apply (wp hoare_vcg_conj_lift mdb_wp rvk_wp list_wp u abs_irq_together)+ + apply (wpsimp wp: hoare_vcg_conj_lift mdb_wp rvk_wp list_wp u abs_irq_together)+ apply (intro z[simplified o_def] conjI | simp add: state_relation_def pspace_relations_def swp_cte_at | (clarsimp, drule (1) z(6), simp add: state_relation_def pspace_relations_def swp_cte_at))+ done @@ -4020,8 +3978,9 @@ lemma setupReplyMaster_corres: cte_wp_at' ((=) rv) (cte_map (t, tcb_cnode_index 2))" in hoare_strengthen_post) apply (wp hoare_drop_imps getCTE_wp') + apply (rename_tac rv s) apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) - apply (case_tac r, fastforce elim: valid_nullcapsE) + apply (case_tac rv, fastforce elim: valid_nullcapsE) apply (fastforce elim: tcb_at_cte_at) apply (clarsimp simp: cte_at'_obj_at' tcb_cte_cases_def cte_map_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -4222,6 +4181,9 @@ crunches setupReplyMaster and ready_queuesL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) lemma setupReplyMaster_vms'[wp]: @@ -4250,7 +4212,8 @@ lemma setupReplyMaster_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp setupReplyMaster_valid_pspace' sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift - valid_queues_lift cur_tcb_lift valid_queues_lift' hoare_vcg_disj_lift + valid_queues_lift cur_tcb_lift hoare_vcg_disj_lift sym_heap_sched_pointers_lift + valid_bitmaps_lift valid_irq_node_lift | simp)+ apply (clarsimp simp: ex_nonz_tcb_cte_caps' valid_pspace'_def objBits_simps' tcbReplySlot_def @@ -4511,8 +4474,8 @@ lemma arch_update_setCTE_invs: apply (wp arch_update_setCTE_mdb valid_queues_lift sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift arch_update_setCTE_iflive arch_update_setCTE_ifunsafe valid_irq_node_lift setCTE_typ_at' setCTE_irq_handlers' - valid_queues_lift' setCTE_pred_tcb_at' irqs_masked_lift - setCTE_norq hoare_vcg_disj_lift untyped_ranges_zero_lift + setCTE_pred_tcb_at' irqs_masked_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift valid_bitmaps_lift | simp add: pred_tcb_at'_def)+ apply (clarsimp simp: valid_global_refs'_def is_arch_update'_def fun_upd_def[symmetric] cte_wp_at_ctes_of isCap_simps untyped_ranges_zero_fun_upd) @@ -5882,7 +5845,7 @@ lemma cteInsert_simple_invs: apply (rule hoare_pre) apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (wp cur_tcb_lift sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift + valid_irq_node_lift irqs_masked_lift sym_heap_sched_pointers_lift cteInsert_simple_mdb' cteInsert_valid_globals_simple cteInsert_norq | simp add: pred_tcb_at'_def)+ apply (auto simp: invs'_def valid_state'_def valid_pspace'_def @@ -6021,6 +5984,21 @@ lemma arch_update_updateCap_invs: apply clarsimp done +lemma setCTE_set_cap_ready_queues_relation_valid_corres: + assumes pre: "ready_queues_relation s s'" + assumes step_abs: "(x, t) \ fst (set_cap cap slot s)" + assumes step_conc: "(y, t') \ fst (setCTE slot' cap' s')" + shows "ready_queues_relation t t'" + apply (clarsimp simp: ready_queues_relation_def) + apply (insert pre) + apply (rule use_valid[OF step_abs set_cap_exst]) + apply (rule use_valid[OF step_conc setCTE_ksReadyQueues]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedNexts_of]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedPrevs_of]) + apply (clarsimp simp: ready_queues_relation_def Let_def) + using use_valid[OF step_conc setCTE_inQ_opt_pred] + by fast + lemma updateCap_same_master: "\ cap_relation cap cap' \ \ corres dc (valid_objs and pspace_aligned and pspace_distinct and @@ -6052,6 +6030,8 @@ lemma updateCap_same_master: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ready_queues_relation a b" for a b \ -\) + subgoal by (erule setCTE_set_cap_ready_queues_relation_valid_corres; assumption) apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def @@ -6278,8 +6258,9 @@ lemma updateFreeIndex_forward_invs': apply (simp add:updateCap_def) apply (wp setCTE_irq_handlers' getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + sym_heap_sched_pointers_lift valid_bitmaps_lift | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def)+ apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) diff --git a/proof/refine/ARM_HYP/Cache.thy b/proof/refine/ARM_HYP/Cache.thy deleted file mode 100644 index ad26dd3961..0000000000 --- a/proof/refine/ARM_HYP/Cache.thy +++ /dev/null @@ -1,37 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory Cache -imports Main -begin - -text \Enable the proof cache, both skipping from it - and recording to it.\ -ML \DupSkip.record_proofs := true\ -ML \proofs := 1\ - -ML \DupSkip.skip_dup_proofs := true\ - -text \If executed in reverse order, save the cache\ -ML \val cache_thy_save_cache = ref false;\ -ML \ -if (! cache_thy_save_cache) -then File.open_output (XML_Syntax.output_forest - (XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache))) - (Path.basic "proof_cache.xml") -else ()\ -ML \cache_thy_save_cache := true\ -ML \cache_thy_save_cache := false\ - -text \Load the proof cache - - can take up to a minute\ - -ML \ -DupSkip.the_cache := XML_Syntax.cache_of_xml_forest ( - File.open_input (XML_Syntax.input_forest) - (Path.basic "proof_cache.xml"))\ - -end diff --git a/proof/refine/ARM_HYP/Detype_R.thy b/proof/refine/ARM_HYP/Detype_R.thy index 644082b538..ae8efc27c8 100644 --- a/proof/refine/ARM_HYP/Detype_R.thy +++ b/proof/refine/ARM_HYP/Detype_R.thy @@ -100,6 +100,9 @@ defs deletionIsSafe_def: (\ko. ksPSpace s p = Some (KOArch ko) \ p \ {ptr .. ptr + 2 ^ bits - 1} \ 7 \ bits)" +defs deletionIsSafe_delete_locale_def: + "deletionIsSafe_delete_locale \ \ptr bits s. \p. ko_wp_at' live' p s \ p \ {ptr .. ptr + 2 ^ bits - 1}" + defs ksASIDMapSafe_def: "ksASIDMapSafe \ \s. \asid hw_asid pd. armKSASIDMap (ksArchState s) asid = Some (hw_asid,pd) \ page_directory_at' pd s" @@ -116,6 +119,7 @@ lemma deleteObjects_def2: "is_aligned ptr bits \ deleteObjects ptr bits = do stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ {ptr .. ptr + 2 ^ bits - 1})) []; modify (\s. s \ ksPSpace := \x. if x \ {ptr .. ptr + 2 ^ bits - 1} @@ -126,7 +130,8 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) + apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (simp add: bind_assoc[symmetric]) @@ -149,6 +154,7 @@ lemma deleteObjects_def3: do assert (is_aligned ptr bits); stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ {ptr .. ptr + 2 ^ bits - 1})) []; modify (\s. s \ ksPSpace := \x. if x \ {ptr .. ptr + 2 ^ bits - 1} @@ -432,6 +438,7 @@ next qed end + locale detype_locale' = detype_locale + constrains s::"det_state" lemma (in detype_locale') deletionIsSafe: @@ -522,187 +529,8 @@ proof - done thus ?thesis using cte by (auto simp: deletionIsSafe_def) qed -context begin interpretation Arch . (*FIXME: arch_split*) -lemma ksASIDMapSafeI: - "\ (s,s') \ state_relation; invs s; pspace_aligned' s' \ pspace_distinct' s' \ - \ ksASIDMapSafe s'" - apply (clarsimp simp: ksASIDMapSafe_def) - apply (subgoal_tac "valid_asid_map s") - prefer 2 - apply fastforce - apply (clarsimp simp: valid_asid_map_def graph_of_def) - apply (subgoal_tac "arm_asid_map (arch_state s) asid = Some (hw_asid, pd)") - prefer 2 - apply (clarsimp simp: state_relation_def arch_state_relation_def) - apply (erule allE)+ - apply (erule (1) impE) - apply clarsimp - apply (drule find_pd_for_asid_eq_helper) - apply fastforce - apply assumption - apply fastforce - apply clarsimp - apply (rule pspace_relation_pd) - apply (fastforce simp: state_relation_def) - apply fastforce - apply assumption - apply assumption - apply simp - done - -(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) -(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) -(* FIXME: move *) -lemma corres_machine_op: - assumes P: "corres_underlying Id False True r P Q x x'" - shows "corres r (P \ machine_state) (Q \ ksMachineState) - (do_machine_op x) (doMachineOp x')" - apply (rule corres_submonad3 - [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) - apply (simp_all add: state_relation_def swp_def) - done - -lemma ekheap_relation_detype: - "ekheap_relation ekh kh \ - ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" - by (fastforce simp add: ekheap_relation_def split: if_split_asm) - -lemma cap_table_at_gsCNodes_eq: - "(s, s') \ state_relation - \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" - apply (clarsimp simp: state_relation_def ghost_relation_def - obj_at_def is_cap_table) - apply (drule_tac x = ptr in spec)+ - apply (drule_tac x = bits in spec)+ - apply fastforce - done - -lemma cNodeNoPartialOverlap: - "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ valid_objs s \ pspace_aligned s) - \ - (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) - (\x. base \ x \ x \ base + 2 ^ magnitude - 1)) [])" - apply (simp add: stateAssert_def assert_def) - apply (rule corres_symb_exec_r[OF _ get_sp]) - apply (rule corres_req[rotated], subst if_P, assumption) - apply simp - apply (clarsimp simp: cNodePartialOverlap_def) - apply (drule(1) cte_wp_valid_cap) - apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq - obj_at_def is_cap_table) - apply (frule(1) pspace_alignedD) - apply simp - apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) - apply (erule is_aligned_get_word_bits[where 'a=32, folded word_bits_def]) - apply (clarsimp simp: is_aligned_no_overflow) - apply (blast intro: order_trans) - apply (simp add: is_aligned_no_overflow power_overflow word_bits_def) - apply wp+ - done - - -declare wrap_ext_det_ext_ext_def[simp] - - -lemma deleteObjects_corres: - "is_aligned base magnitude \ magnitude \ 2 \ - corres dc - (\s. einvs s - \ s \ (cap.UntypedCap d base magnitude idx) - \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) - \ untyped_children_in_mdb s \ if_unsafe_then_cap s - \ valid_mdb s \ valid_global_refs s \ ct_active s) - (\s. s \' (UntypedCap d base magnitude idx) - \ valid_pspace' s) - (delete_objects base magnitude) (deleteObjects base magnitude)" - apply (simp add: deleteObjects_def2) - apply (rule corres_stateAssert_implied[where P'=\, simplified]) - prefer 2 - apply clarsimp - apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and - s=s in detype_locale'.deletionIsSafe, - simp_all add: detype_locale'_def - detype_locale_def p_assoc_help invs_valid_pspace)[1] - apply (simp add:valid_cap_simps) - apply (simp add: bind_assoc[symmetric]) - apply (rule corres_stateAssert_implied2) - defer - apply (erule ksASIDMapSafeI, assumption, assumption) - apply (rule hoare_pre) - apply (rule delete_objects_invs) - apply fastforce - apply (simp add: doMachineOp_def split_def) - apply wp - apply (clarsimp simp: valid_pspace'_def pspace_distinct'_def - pspace_aligned'_def) - apply (rule conjI) - subgoal by fastforce - apply (clarsimp simp add: pspace_distinct'_def ps_clear_def - dom_if_None Diff_Int_distrib) - apply (simp add: delete_objects_def) - apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ - (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ - descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ - s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ - valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ - zombies_final s \ sym_refs (state_refs_of s) \ sym_refs (state_hyp_refs_of s) \ - untyped_children_in_mdb s \ if_unsafe_then_cap s \ - valid_global_refs s" and - Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ - valid_pspace' s" in corres_underlying_split) - apply (rule corres_bind_return) - apply (rule corres_guard_imp[where r=dc]) - apply (rule corres_split[OF _ cNodeNoPartialOverlap]) - apply (rule corres_machine_op[OF corres_Id], simp+) - apply (rule no_fail_freeMemory, simp+) - apply (wp hoare_vcg_ex_lift)+ - apply auto[1] - apply (auto elim: is_aligned_weaken) - apply (rule corres_modify) - apply (simp add: valid_pspace'_def) - apply (rule state_relation_null_filterE, assumption, - simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] - apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) - apply (intro exI, fastforce) - apply (rule ext, clarsimp simp add: null_filter_def) - apply (rule sym, rule ccontr, clarsimp) - apply (drule(4) cte_map_not_null_outside') - apply (fastforce simp add: cte_wp_at_caps_of_state) - apply simp - apply (rule ext, clarsimp simp add: null_filter'_def - map_to_ctes_delete[simplified field_simps]) - apply (rule sym, rule ccontr, clarsimp) - apply (frule(2) pspace_relation_cte_wp_atI - [OF state_relation_pspace_relation]) - apply (elim exE) - apply (frule(4) cte_map_not_null_outside') - apply (rule cte_wp_at_weakenE, erule conjunct1) - apply (case_tac y, clarsimp) - apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def - valid_nullcaps_def) - apply clarsimp - apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, - erule cte_wp_at_weakenE[OF _ TrueI], assumption+) - apply simp - apply (rule detype_pspace_relation[simplified], - simp_all add: state_relation_pspace_relation valid_pspace_def)[1] - apply (simp add: valid_cap'_def capAligned_def) - apply (clarsimp simp: valid_cap_def, assumption) - apply (fastforce simp add: detype_def detype_ext_def intro!: ekheap_relation_detype) - apply (clarsimp simp: state_relation_def ghost_relation_of_heap - detype_def) - apply (drule_tac t="gsUserPages s'" in sym) - apply (drule_tac t="gsCNodes s'" in sym) - apply (auto simp add: ups_of_heap_def cns_of_heap_def ext - split: option.splits kernel_object.splits)[1] - apply (simp add: valid_mdb_def) - apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | - simp add: invs_def valid_state_def valid_pspace_def - descendants_range_def | wp (once) hoare_drop_imps)+ - done +context begin interpretation Arch . (*FIXME: arch_split*) text \Invariant preservation across concrete deletion\ @@ -762,92 +590,94 @@ lemma zobj_refs_capRange: apply (drule is_aligned_no_overflow) apply simp done + end locale delete_locale = - fixes s and base and bits and ptr and idx and d - assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s" - and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s)" - and invs: "invs' s" - and ct_act: "ct_active' s" - and sa_simp: "sch_act_simple s" - and bwb: "bits < word_bits" + fixes s' and base and bits and ptr and idx and d + assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s'" + and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s')" + and invs: "invs' s'" + and ct_act: "ct_active' s'" + and sa_simp: "sch_act_simple s'" and al: "is_aligned base bits" - and safe: "deletionIsSafe base bits s" - -context delete_locale -begin -interpretation Arch . (*FIXME: arch_split*) -lemma valid_objs: "valid_objs' s" - and pa: "pspace_aligned' s" - and pd: "pspace_distinct' s" - and vq: "valid_queues s" - and vq': "valid_queues' s" - and sym_refs: "sym_refs (state_refs_of' s)" - and sym_hyp_refs: "sym_refs (state_hyp_refs_of' s)" - and iflive: "if_live_then_nonz_cap' s" - and ifunsafe: "if_unsafe_then_cap' s" - and dlist: "valid_dlist (ctes_of s)" - and no_0: "no_0 (ctes_of s)" - and chain_0: "mdb_chain_0 (ctes_of s)" - and badges: "valid_badges (ctes_of s)" - and contained: "caps_contained' (ctes_of s)" - and chunked: "mdb_chunked (ctes_of s)" - and umdb: "untyped_mdb' (ctes_of s)" - and uinc: "untyped_inc' (ctes_of s)" - and nullcaps: "valid_nullcaps (ctes_of s)" - and ut_rev: "ut_revocable' (ctes_of s)" - and dist_z: "distinct_zombies (ctes_of s)" - and irq_ctrl: "irq_control (ctes_of s)" - and clinks: "class_links (ctes_of s)" - and rep_r_fb: "reply_masters_rvk_fb (ctes_of s)" - and idle: "valid_idle' s" - and refs: "valid_global_refs' s" - and arch: "valid_arch_state' s" - and virq: "valid_irq_node' (irq_node' s) s" - and virqh: "valid_irq_handlers' s" - and virqs: "valid_irq_states' s" - and no_0_objs: "no_0_obj' s" - and ctnotinQ: "ct_not_inQ s" - and pde_maps: "valid_pde_mappings' s" - and irqs_masked: "irqs_masked' s" - and ctcd: "ct_idle_or_in_cur_domain' s" - and cdm: "ksCurDomain s \ maxDomain" - and vds: "valid_dom_schedule' s" + and safe: "deletionIsSafe base bits s'" + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_objs: "valid_objs' s'" + and pa: "pspace_aligned' s'" + and pd: "pspace_distinct' s'" + and vbm: "valid_bitmaps s'" + and sym_sched: "sym_heap_sched_pointers s'" + and vsp: "valid_sched_pointers s'" + and sym_refs: "sym_refs (state_refs_of' s')" + and sym_hyp_refs: "sym_refs (state_hyp_refs_of' s')" + and iflive: "if_live_then_nonz_cap' s'" + and ifunsafe: "if_unsafe_then_cap' s'" + and dlist: "valid_dlist (ctes_of s')" + and no_0: "no_0 (ctes_of s')" + and chain_0: "mdb_chain_0 (ctes_of s')" + and badges: "valid_badges (ctes_of s')" + and contained: "caps_contained' (ctes_of s')" + and chunked: "mdb_chunked (ctes_of s')" + and umdb: "untyped_mdb' (ctes_of s')" + and uinc: "untyped_inc' (ctes_of s')" + and nullcaps: "valid_nullcaps (ctes_of s')" + and ut_rev: "ut_revocable' (ctes_of s')" + and dist_z: "distinct_zombies (ctes_of s')" + and irq_ctrl: "irq_control (ctes_of s')" + and clinks: "class_links (ctes_of s')" + and rep_r_fb: "reply_masters_rvk_fb (ctes_of s')" + and idle: "valid_idle' s'" + and refs: "valid_global_refs' s'" + and arch: "valid_arch_state' s'" + and virq: "valid_irq_node' (irq_node' s') s'" + and virqh: "valid_irq_handlers' s'" + and virqs: "valid_irq_states' s'" + and no_0_objs: "no_0_obj' s'" + and ctnotinQ: "ct_not_inQ s'" + and pde_maps: "valid_pde_mappings' s'" + and irqs_masked: "irqs_masked' s'" + and ctcd: "ct_idle_or_in_cur_domain' s'" + and cdm: "ksCurDomain s' \ maxDomain" + and vds: "valid_dom_schedule' s'" using invs - by (auto simp add: invs'_def valid_state'_def valid_pspace'_def - valid_mdb'_def valid_mdb_ctes_def) + by (auto simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) abbreviation "base_bits \ {base .. base + (2 ^ bits - 1)}" -abbreviation - "state' \ (s \ ksPSpace := \x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s x \)" +abbreviation pspace' :: pspace where + "pspace' \ \x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s' x" + +abbreviation state' :: kernel_state where + "state' \ (s' \ ksPSpace := pspace' \)" lemma ko_wp_at'[simp]: - "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s \ p \ base_bits)" + "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s' \ p \ base_bits)" by (fastforce simp add: ko_wp_at_delete'[OF pd]) lemma obj_at'[simp]: - "\P p. (obj_at' P p state') = (obj_at' P p s \ p \ base_bits)" + "\P p. (obj_at' P p state') = (obj_at' P p s' \ p \ base_bits)" by (fastforce simp add: obj_at'_real_def) lemma typ_at'[simp]: - "\T p. (typ_at' P p state') = (typ_at' P p s \ p \ base_bits)" + "typ_at' P p state' = (typ_at' P p s' \ p \ base_bits)" by (simp add: typ_at'_def) lemma valid_untyped[simp]: - "s \' UntypedCap d base bits idx" + "s' \' UntypedCap d base bits idx" using cte_wp_at_valid_objs_valid_cap' [OF cap valid_objs] by clarsimp lemma cte_wp_at'[simp]: - "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s \ p \ base_bits)" + "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s' \ p \ base_bits)" by (fastforce simp:cte_wp_at_delete'[where idx = idx,OF valid_untyped pd ]) (* the bits of caps they need for validity argument are within their capRanges *) lemma valid_cap_ctes_pre: - "\c. s \' c \ case c of CNodeCap ref bits g gs + "\c. s' \' c \ case c of CNodeCap ref bits g gs \ \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c | Zombie ref (ZombieCNode bits) n \ \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c @@ -871,13 +701,13 @@ lemma valid_cap_ctes_pre: done lemma replycap_argument: - "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s + "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s' \ t \ {base .. base + (2 ^ bits - 1)}" using safe by (fastforce simp add: deletionIsSafe_def cte_wp_at_ctes_of field_simps) lemma valid_cap': - "\p c. \ s \' c; cte_wp_at' (\cte. cteCap cte = c) p s; + "\p c. \ s' \' c; cte_wp_at' (\cte. cteCap cte = c) p s'; capRange c \ {base .. base + (2 ^ bits - 1)} = {} \ \ state' \' c" apply (subgoal_tac "capClass c = PhysicalClass \ capUntypedPtr c \ capRange c") apply (subgoal_tac "capClass c = PhysicalClass \ @@ -919,11 +749,11 @@ lemma valid_cap': done lemma objRefs_notrange: - assumes asms: "ctes_of s p = Some c" "\ isUntypedCap (cteCap c)" + assumes asms: "ctes_of s' p = Some c" "\ isUntypedCap (cteCap c)" shows "capRange (cteCap c) \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -944,11 +774,11 @@ proof - qed lemma ctes_of_valid [elim!]: - "ctes_of s p = Some cte \ s \' cteCap cte" + "ctes_of s' p = Some cte \ s' \' cteCap cte" by (case_tac cte, simp add: ctes_of_valid_cap' [OF _ valid_objs]) lemma valid_cap2: - "\ cte_wp_at' (\cte. cteCap cte = c) p s \ \ state' \' c" + "\ cte_wp_at' (\cte. cteCap cte = c) p s' \ \ state' \' c" apply (case_tac "isUntypedCap c") apply (drule cte_wp_at_valid_objs_valid_cap' [OF _ valid_objs]) apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def) @@ -958,7 +788,7 @@ lemma valid_cap2: done lemma ex_nonz_cap_notRange: - "ex_nonz_cap_to' p s \ p \ base_bits" + "ex_nonz_cap_to' p s' \ p \ base_bits" apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) apply (case_tac "isUntypedCap (cteCap cte)") apply (clarsimp simp: isCap_simps) @@ -970,14 +800,18 @@ lemma ex_nonz_cap_notRange: done lemma live_notRange: - "\ ko_wp_at' P p s; \ko. P ko \ live' ko \ \ p \ base_bits" + "\ ko_wp_at' P p s'; \ko. P ko \ live' ko \ \ p \ base_bits" apply (drule if_live_then_nonz_capE' [OF iflive ko_wp_at'_weakenE]) apply simp apply (erule ex_nonz_cap_notRange) done +lemma deletionIsSafe_delete_locale_holds: + "deletionIsSafe_delete_locale base bits s'" + by (fastforce dest: live_notRange simp: deletionIsSafe_delete_locale_def field_simps) + lemma refs_notRange: - "(x, tp) \ state_refs_of' s y \ y \ base_bits" + "(x, tp) \ state_refs_of' s' y \ y \ base_bits" apply (drule state_refs_of'_elemD) apply (erule live_notRange) apply (rule refs_of_live') @@ -985,7 +819,7 @@ lemma refs_notRange: done lemma hyp_refs_notRange: - "(x, tp) \ state_hyp_refs_of' s y \ y \ base_bits" + "(x, tp) \ state_hyp_refs_of' s' y \ y \ base_bits" apply (drule state_hyp_refs_of'_elemD) apply (erule live_notRange) apply (rule hyp_refs_of_live') @@ -993,8 +827,8 @@ lemma hyp_refs_notRange: done lemma sym_refs_VCPU_hyp_live': - "\ko_wp_at' ((=) (KOArch (KOVCPU v))) p s; sym_refs (state_hyp_refs_of' s); vcpuTCBPtr v = Some t\ - \ ko_wp_at' (\ko. koTypeOf ko = TCBT \ hyp_live' ko) t s" + "\ko_wp_at' ((=) (KOArch (KOVCPU v))) p s'; sym_refs (state_hyp_refs_of' s'); vcpuTCBPtr v = Some t\ + \ ko_wp_at' (\ko. koTypeOf ko = TCBT \ hyp_live' ko) t s'" apply (drule (1) sym_hyp_refs_ko_wp_atD) apply (clarsimp) apply (drule state_hyp_refs_of'_elemD) @@ -1003,17 +837,308 @@ lemma sym_refs_VCPU_hyp_live': done lemma sym_refs_TCB_hyp_live': - "\ko_wp_at' ((=) (KOTCB t)) p s; sym_refs (state_hyp_refs_of' s); atcbVCPUPtr (tcbArch t) = Some v\ - \ ko_wp_at' (\ko. koTypeOf ko = ArchT VCPUT \ hyp_live' ko) v s" + "\ko_wp_at' ((=) (KOTCB t)) p s'; sym_refs (state_hyp_refs_of' s'); atcbVCPUPtr (tcbArch t) = Some v\ + \ ko_wp_at' (\ko. koTypeOf ko = ArchT VCPUT \ hyp_live' ko) v s'" apply (drule (1) sym_hyp_refs_ko_wp_atD) apply (clarsimp) apply (drule state_hyp_refs_of'_elemD) apply (simp add: ko_wp_at'_def) apply (clarsimp simp: hyp_refs_of_rev' hyp_live'_def arch_live'_def) done +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma ksASIDMapSafeI: + "\ (s,s') \ state_relation; invs s; pspace_aligned' s' \ pspace_distinct' s' \ + \ ksASIDMapSafe s'" + apply (clarsimp simp: ksASIDMapSafe_def) + apply (subgoal_tac "valid_asid_map s") + prefer 2 + apply fastforce + apply (clarsimp simp: valid_asid_map_def graph_of_def) + apply (subgoal_tac "arm_asid_map (arch_state s) asid = Some (hw_asid, pd)") + prefer 2 + apply (clarsimp simp: state_relation_def arch_state_relation_def) + apply (erule allE)+ + apply (erule (1) impE) + apply clarsimp + apply (drule find_pd_for_asid_eq_helper) + apply fastforce + apply assumption + apply fastforce + apply clarsimp + apply (rule pspace_relation_pd) + apply (fastforce simp: state_relation_def) + apply fastforce + apply assumption + apply assumption + apply simp + done + +(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) +(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) +(* FIXME: move *) +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r P Q x x'" + shows "corres r (P \ machine_state) (Q \ ksMachineState) + (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad3 + [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma ekheap_relation_detype: + "ekheap_relation ekh kh \ + ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" + by (fastforce simp add: ekheap_relation_def split: if_split_asm) + +lemma cap_table_at_gsCNodes_eq: + "(s, s') \ state_relation + \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply (drule_tac x = ptr in spec)+ + apply (drule_tac x = bits in spec)+ + apply fastforce + done + +lemma cNodeNoPartialOverlap: + "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) + (\x. base \ x \ x \ base + 2 ^ magnitude - 1)) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodePartialOverlap_def) + apply (drule(1) cte_wp_valid_cap) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table) + apply (frule(1) pspace_alignedD) + apply simp + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=32, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow) + apply (blast intro: order_trans) + apply (simp add: is_aligned_no_overflow power_overflow word_bits_def) + apply wp+ + done + +declare wrap_ext_det_ext_ext_def[simp] + +crunches doMachineOp + for deletionIsSafe_delete_locale[wp]: "deletionIsSafe_delete_locale base magnitude" + (simp: deletionIsSafe_delete_locale_def) + +lemma detype_tcbSchedNexts_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedNext) + = tcbSchedNexts_of s'" + supply projectKOs[simp] + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def live'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_tcbSchedPrevs_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedPrev) + = tcbSchedPrevs_of s'" + supply projectKOs[simp] + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def live'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_inQ: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ \d p. (inQ d p |< ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of')) + = (inQ d p |< tcbs_of' s')" + supply projectKOs[simp] + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: inQ_def opt_pred_def ko_wp_at'_def live'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_ready_queues_relation: + "\pspace_aligned' s'; pspace_distinct' s'; + \p. p \ {lower..upper} \ \ ko_wp_at' live' p s'; + ready_queues_relation s s'; upper = upper'\ + \ ready_queues_relation_2 + (ready_queues (detype {lower..upper'} s)) + (ksReadyQueues s') + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedNext) + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedPrev) + (\d p. inQ d p |< ((\x. if lower \ x \ x \ upper then None else ksPSpace s' x) |> tcb_of'))" + apply (clarsimp simp: detype_ext_def ready_queues_relation_def Let_def) + apply (frule (1) detype_tcbSchedNexts_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_tcbSchedPrevs_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_inQ[where S="{lower..upper}"]; simp) + apply (fastforce simp add: detype_def detype_ext_def wrap_ext_det_ext_ext_def) + done + +lemma deleteObjects_corres: + "is_aligned base magnitude \ magnitude \ 2 \ + corres dc + (\s. einvs s + \ s \ (cap.UntypedCap d base magnitude idx) + \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) + \ untyped_children_in_mdb s \ if_unsafe_then_cap s + \ valid_mdb s \ valid_global_refs s \ ct_active s + \ schact_is_rct s) + (\s'. invs' s' + \ cte_wp_at' (\cte. cteCap cte = UntypedCap d base magnitude idx) ptr s' + \ descendants_range' (UntypedCap d base magnitude idx) ptr (ctes_of s') + \ ct_active' s' + \ s' \' (UntypedCap d base magnitude idx)) + (delete_objects base magnitude) (deleteObjects base magnitude)" + apply (simp add: deleteObjects_def2) + apply (rule corres_stateAssert_implied[where P'=\, simplified]) + prefer 2 + apply clarsimp + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (rule_tac ptr=ptr and idx=idx and d=d in delete_locale.deletionIsSafe_delete_locale_holds) + apply (clarsimp simp: delete_locale_def) + apply (intro conjI) + apply (fastforce simp: sch_act_simple_def schact_is_rct_def state_relation_def) + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (simp add: bind_assoc[symmetric]) + apply (rule corres_stateAssert_implied2) + defer + apply (erule ksASIDMapSafeI, assumption, assumption) + apply (rule hoare_pre) + apply (rule delete_objects_invs) + apply fastforce + apply (simp add: doMachineOp_def split_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def pspace_distinct'_def + pspace_aligned'_def) + apply (rule conjI) + subgoal by fastforce + apply (clarsimp simp add: pspace_distinct'_def ps_clear_def + dom_if_None Diff_Int_distrib) + apply (simp add: delete_objects_def) + apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ + (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ + descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ + s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ + valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ + zombies_final s \ sym_refs (state_refs_of s) \ sym_refs (state_hyp_refs_of s) \ + untyped_children_in_mdb s \ if_unsafe_then_cap s \ + valid_global_refs s" and + Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ + valid_pspace' s \ + deletionIsSafe_delete_locale base magnitude s" in corres_underlying_split) + apply (rule corres_bind_return) + apply (rule corres_guard_imp[where r=dc]) + apply (rule corres_split[OF _ cNodeNoPartialOverlap]) + apply (rule corres_machine_op[OF corres_Id], simp+) + apply (rule no_fail_freeMemory, simp+) + apply (wp hoare_vcg_ex_lift)+ + apply auto[1] + apply (auto elim: is_aligned_weaken) + apply (rule corres_modify) + apply (simp add: valid_pspace'_def) + apply (rule state_relation_null_filterE, assumption, + simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] + apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) + apply (intro exI, fastforce) + apply (rule ext, clarsimp simp add: null_filter_def) + apply (rule sym, rule ccontr, clarsimp) + apply (drule(4) cte_map_not_null_outside') + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply simp + apply (rule ext, clarsimp simp add: null_filter'_def + map_to_ctes_delete[simplified field_simps]) + apply (rule sym, rule ccontr, clarsimp) + apply (frule(2) pspace_relation_cte_wp_atI + [OF state_relation_pspace_relation]) + apply (elim exE) + apply (frule(4) cte_map_not_null_outside') + apply (rule cte_wp_at_weakenE, erule conjunct1) + apply (case_tac y, clarsimp) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def + valid_nullcaps_def) + apply clarsimp + apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, + erule cte_wp_at_weakenE[OF _ TrueI], assumption+) + apply simp + apply (rule detype_pspace_relation[simplified], + simp_all add: state_relation_pspace_relation valid_pspace_def)[1] + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: valid_cap_def, assumption) + apply (fastforce simp add: detype_def detype_ext_def intro!: ekheap_relation_detype) + apply (rule detype_ready_queues_relation; blast?) + apply (clarsimp simp: deletionIsSafe_delete_locale_def) + apply (erule state_relation_ready_queues_relation) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap + detype_def) + apply (drule_tac t="gsUserPages s'" in sym) + apply (drule_tac t="gsCNodes s'" in sym) + apply (auto simp add: ups_of_heap_def cns_of_heap_def ext + split: option.splits kernel_object.splits)[1] + apply (simp add: valid_mdb_def) + apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | + simp add: invs_def valid_state_def valid_pspace_def + descendants_range_def | wp (once) hoare_drop_imps)+ + apply fastforce + done +end + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma live_idle_untyped_range': + "ko_wp_at' live' p s' \ p = idle_thread_ptr \ p \ base_bits" + apply (case_tac "ko_wp_at' live' p s'") + apply (drule if_live_then_nonz_capE'[OF iflive ko_wp_at'_weakenE]) + apply simp + apply (erule ex_nonz_cap_notRange) + apply clarsimp + apply (insert invs_valid_global'[OF invs] cap invs_valid_idle'[OF invs]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_global_refsD') + apply (clarsimp simp: valid_idle'_def) + using atLeastAtMost_iff apply (simp add: p_assoc_help mask_eq_exp_minus_1) + by fastforce + +lemma untyped_range_live_idle': + "p \ base_bits \ \ (ko_wp_at' live' p s' \ p = idle_thread_ptr)" + using live_idle_untyped_range' by blast lemma valid_obj': - "\ valid_obj' obj s; ko_wp_at' ((=) obj) p s \ \ valid_obj' obj state'" + "\ valid_obj' obj s'; ko_wp_at' ((=) obj) p s'; sym_heap_sched_pointers s' \ + \ valid_obj' obj state'" apply (case_tac obj, simp_all add: valid_obj'_def) apply (rename_tac endpoint) apply (case_tac endpoint, simp_all add: valid_ep'_def)[1] @@ -1040,11 +1165,23 @@ lemma valid_obj': apply (erule(2) cte_wp_at_tcbI') apply fastforce apply simp - apply (rename_tac tcb) - apply (simp only: conj_assoc[symmetric], rule conjI) - apply (case_tac "tcbState tcb"; - clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def - dest!: refs_notRange split: option.splits) + apply (intro conjI) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; clarsimp simp: valid_tcb_state'_def dest!: refs_notRange) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; + clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def + dest!: refs_notRange split: option.splits) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac prev) + apply (cut_tac P=live' and p=prev in live_notRange; fastforce?) + apply (fastforce dest: sym_heapD2[where p'=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def projectKOs live'_def) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac "next") + apply (cut_tac P=live' and p="next" in live_notRange; fastforce?) + apply (fastforce dest!: sym_heapD1[where p=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def projectKOs live'_def) using sym_hyp_refs apply (clarsimp simp add: valid_arch_tcb'_def split: option.split_asm) apply (drule (1) sym_refs_TCB_hyp_live'[rotated]) @@ -1074,18 +1211,51 @@ lemma valid_obj': apply clarsimp done +lemma tcbSchedNexts_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedNext) = tcbSchedNexts_of s'" + supply projectKOs[simp] + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def live'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma tcbSchedPrevs_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedPrev) = tcbSchedPrevs_of s'" + supply projectKOs[simp] + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def live'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + lemma st_tcb: - "\P p. \ st_tcb_at' P p s; \ P Inactive; \ P IdleThreadState \ \ - st_tcb_at' P p state'" + "\P p. \ st_tcb_at' P p s'; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" by (fastforce simp: pred_tcb_at'_def obj_at'_real_def projectKOs live'_def hyp_live'_def - dest: live_notRange) + dest: live_notRange) lemma irq_nodes_global: - "\irq :: 10 word. irq_node' s + (ucast irq) * 16 \ global_refs' s" - by (simp add: global_refs'_def mult.commute mult.left_commute) + "\irq :: 10 word. irq_node' s' + (ucast irq) * 16 \ global_refs' s'" + by (simp add: global_refs'_def mult.commute mult.left_commute) lemma global_refs: - "global_refs' s \ base_bits = {}" + "global_refs' s' \ base_bits = {}" using cap apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule valid_global_refsD' [OF _ refs]) @@ -1093,20 +1263,20 @@ lemma global_refs: done lemma global_refs2: - "global_refs' s \ (- base_bits)" + "global_refs' s' \ (- base_bits)" using global_refs by blast lemma irq_nodes_range: - "\irq :: 10 word. irq_node' s + (ucast irq) * 16 \ base_bits" + "\irq :: 10 word. irq_node' s' + (ucast irq) * 16 \ base_bits" using irq_nodes_global global_refs by blast lemma cte_refs_notRange: - assumes asms: "ctes_of s p = Some c" - shows "cte_refs' (cteCap c) (irq_node' s) \ base_bits = {}" + assumes asms: "ctes_of s' p = Some c" + shows "cte_refs' (cteCap c) (irq_node' s') \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -1135,7 +1305,7 @@ proof - qed lemma non_null_present: - "cte_wp_at' (\c. cteCap c \ NullCap) p s \ p \ base_bits" + "cte_wp_at' (\c. cteCap c \ NullCap) p s' \ p \ base_bits" apply (drule (1) if_unsafe_then_capD' [OF _ ifunsafe]) apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of dest!: cte_refs_notRange simp del: atLeastAtMost_iff) @@ -1143,7 +1313,7 @@ lemma non_null_present: done lemma cte_cap: - "ex_cte_cap_to' p s \ ex_cte_cap_to' p state'" + "ex_cte_cap_to' p s' \ ex_cte_cap_to' p state'" apply (clarsimp simp: ex_cte_cap_to'_def) apply (frule non_null_present [OF cte_wp_at_weakenE']) apply clarsimp @@ -1151,37 +1321,37 @@ lemma cte_cap: done lemma idle_notRange: - "\cref. \ cte_wp_at' (\c. ksIdleThread s \ capRange (cteCap c)) cref s - \ ksIdleThread s \ base_bits" + "\cref. \ cte_wp_at' (\c. ksIdleThread s' \ capRange (cteCap c)) cref s' + \ ksIdleThread s' \ base_bits" apply (insert cap) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule_tac x=ptr in allE, clarsimp simp: field_simps) done abbreviation - "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s x)" + "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s' x)" lemmas tree_to_ctes = map_to_ctes_delete [OF valid_untyped pd] lemma map_to_ctesE[elim!]: - "\ ctes' x = Some cte; \ ctes_of s x = Some cte; x \ base_bits \ \ P \ \ P" + "\ ctes' x = Some cte; \ ctes_of s' x = Some cte; x \ base_bits \ \ P \ \ P" by (clarsimp simp: tree_to_ctes split: if_split_asm) lemma not_nullMDBNode: - "\ ctes_of s x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" + "\ ctes_of s' x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" using nullcaps apply (cases cte) apply (simp add: valid_nullcaps_def) done -lemma mdb_src: "\ ctes_of s \ x \ y; y \ 0 \ \ x \ base_bits" +lemma mdb_src: "\ ctes_of s' \ x \ y; y \ 0 \ \ x \ base_bits" apply (rule non_null_present) apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of) apply (erule(1) not_nullMDBNode) apply (simp add: nullMDBNode_def nullPointer_def) done -lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ \ y \ base_bits" +lemma mdb_dest: "\ ctes_of s' \ x \ y; y \ 0 \ \ y \ base_bits" apply (case_tac "x = 0") apply (insert no_0, simp add: next_unfold')[1] apply (drule(1) vdlist_nextD0 [OF _ _ dlist]) @@ -1192,7 +1362,7 @@ lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ done lemma trancl_next[elim]: - "\ ctes_of s \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" + "\ ctes_of s' \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" apply (erule rev_mp, erule converse_trancl_induct) apply clarsimp apply (rule r_into_trancl) @@ -1210,14 +1380,14 @@ lemma trancl_next[elim]: done lemma mdb_parent_notrange: - "ctes_of s \ x \ y \ x \ base_bits \ y \ base_bits" + "ctes_of s' \ x \ y \ x \ base_bits \ y \ base_bits" apply (erule subtree.induct) apply (frule(1) mdb_src, drule(1) mdb_dest, simp) apply (drule(1) mdb_dest, simp) done lemma mdb_parent: - "ctes_of s \ x \ y \ ctes' \ x \ y" + "ctes_of s' \ x \ y \ ctes' \ x \ y" apply (erule subtree.induct) apply (frule(1) mdb_src, frule(1) mdb_dest) apply (rule subtree.direct_parent) @@ -1233,7 +1403,7 @@ lemma mdb_parent: done lemma trancl_next_rev: - "ctes' \ x \\<^sup>+ y \ ctes_of s \ x \\<^sup>+ y" + "ctes' \ x \\<^sup>+ y \ ctes_of s' \ x \\<^sup>+ y" apply (erule converse_trancl_induct) apply (rule r_into_trancl) apply (clarsimp simp: next_unfold') @@ -1243,7 +1413,7 @@ lemma trancl_next_rev: done lemma is_chunk[elim!]: - "is_chunk (ctes_of s) cap x y \ is_chunk ctes' cap x y" + "is_chunk (ctes_of s') cap x y \ is_chunk ctes' cap x y" apply (simp add: is_chunk_def) apply (erule allEI) apply (clarsimp dest!: trancl_next_rev) @@ -1282,17 +1452,18 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def show "pspace_aligned' ?s" using pa by (simp add: pspace_aligned'_def dom_def) - show "pspace_distinct' ?s" using pd + show pspace_distinct'_state': "pspace_distinct' ?s" using pd by (clarsimp simp add: pspace_distinct'_def ps_clear_def dom_if_None Diff_Int_distrib) - show "valid_objs' ?s" using valid_objs + show "valid_objs' ?s" using valid_objs sym_sched apply (clarsimp simp: valid_objs'_def ran_def) apply (rule_tac p=a in valid_obj') - apply fastforce - apply (frule pspace_alignedD'[OF _ pa]) - apply (frule pspace_distinctD'[OF _ pd]) - apply (clarsimp simp: ko_wp_at'_def) + apply fastforce + apply (frule pspace_alignedD'[OF _ pa]) + apply (frule pspace_distinctD'[OF _ pd]) + apply (clarsimp simp: ko_wp_at'_def) + apply fastforce done from sym_refs show "sym_refs (state_refs_of' ?s)" @@ -1313,19 +1484,6 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (simp add: hyp_refs_notRange[simplified] state_hyp_refs_ko_wp_at_eq) done - from vq show "valid_queues ?s" - apply (clarsimp simp: valid_queues_def bitmapQ_defs) - apply (clarsimp simp: valid_queues_no_bitmap_def) - apply (drule spec, drule spec, drule conjunct1, drule(1) bspec) - apply (clarsimp simp: obj_at'_real_def) - apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) - apply (clarsimp simp: projectKOs inQ_def live'_def) - apply (clarsimp dest!: ex_nonz_cap_notRange) - done - - from vq' show "valid_queues' ?s" - by (simp add: valid_queues'_def) - show "if_live_then_nonz_cap' ?s" using iflive apply (clarsimp simp: if_live_then_nonz_cap'_def) apply (drule spec, drule(1) mp) @@ -1341,7 +1499,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def intro!: cte_cap) from idle_notRange refs - have "ksIdleThread s \ ?ran" + have "ksIdleThread s' \ ?ran" apply (simp add: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def) apply blast done @@ -1446,11 +1604,11 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def global_refs'_def) apply (intro conjI) apply (simp add: valid_asid_table'_def) - apply (case_tac "armHSCurVCPU (ksArchState s)"; clarsimp simp add: split_def) + apply (case_tac "armHSCurVCPU (ksArchState s')"; clarsimp simp add: split_def) apply (drule live_notRange, clarsimp, case_tac ko; simp add: is_vcpu'_def live'_def) done - show "valid_irq_node' (irq_node' s) ?s" + show "valid_irq_node' (irq_node' s') ?s" using virq irq_nodes_range by (simp add: valid_irq_node'_def mult.commute mult.left_commute ucast_ucast_mask_8) @@ -1480,7 +1638,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def ball_ran_eq) from virqs - show "valid_irq_states' s" . + show "valid_irq_states' s'" . from no_0_objs show "no_0_obj' state'" @@ -1495,19 +1653,19 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def by (simp add: irqs_masked'_def) from sa_simp ct_act - show "sch_act_wf (ksSchedulerAction s) state'" + show "sch_act_wf (ksSchedulerAction s') state'" apply (simp add: sch_act_simple_def) - apply (case_tac "ksSchedulerAction s", simp_all add: ct_in_state'_def) + apply (case_tac "ksSchedulerAction s'", simp_all add: ct_in_state'_def) apply (fastforce dest!: st_tcb elim!: pred_tcb'_weakenE) done from invs - have "pspace_domain_valid s" by (simp add: invs'_def valid_state'_def) + have "pspace_domain_valid s'" by (simp add: invs'_def valid_state'_def) thus "pspace_domain_valid state'" by (simp add: pspace_domain_valid_def) from invs - have "valid_machine_state' s" by (simp add: invs'_def valid_state'_def) + have "valid_machine_state' s'" by (simp add: invs'_def valid_state'_def) thus "valid_machine_state' ?state''" apply (clarsimp simp: valid_machine_state'_def) apply (drule_tac x=p in spec) @@ -1562,12 +1720,11 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (clarsimp dest!: ex_nonz_cap_notRange elim!: ko_wp_at'_weakenE) done - from cdm show "ksCurDomain s \ maxDomain" . + from cdm show "ksCurDomain s' \ maxDomain" . from invs - have urz: "untyped_ranges_zero' s" by (simp add: invs'_def valid_state'_def) - show "untyped_ranges_zero_inv (cteCaps_of state') - (gsUntypedZeroRanges s)" + have urz: "untyped_ranges_zero' s'" by (simp add: invs'_def valid_state'_def) + show "untyped_ranges_zero_inv (cteCaps_of state') (gsUntypedZeroRanges s')" apply (simp add: untyped_zero_ranges_cte_def urz[unfolded untyped_zero_ranges_cte_def, rule_format, symmetric]) apply (clarsimp simp: fun_eq_iff intro!: arg_cong[where f=Ex]) @@ -1577,17 +1734,31 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply simp done + from vbm + show "valid_bitmaps state'" + by (simp add: valid_bitmaps_def bitmapQ_defs) + + from sym_sched + show "sym_heap (pspace' |> tcb_of' |> tcbSchedNext) (pspace' |> tcb_of' |> tcbSchedPrev)" + using pa pd pspace_distinct'_state' + by (fastforce simp: tcbSchedNexts_of_pspace' tcbSchedPrevs_of_pspace') + + from vsp show "valid_sched_pointers_2 (pspace' |> tcb_of' |> tcbSchedPrev) + (pspace' |> tcb_of' |> tcbSchedNext) + (tcbQueued |< (pspace' |> tcb_of'))" + by (clarsimp simp: valid_sched_pointers_def opt_pred_def opt_map_def) + qed (clarsimp) lemma (in delete_locale) delete_ko_wp_at': - assumes objs: "ko_wp_at' P p s \ ex_nonz_cap_to' p s" + assumes objs: "ko_wp_at' P p s' \ ex_nonz_cap_to' p s'" shows "ko_wp_at' P p state'" using objs by (clarsimp simp: ko_wp_at'_def ps_clear_def dom_if_None Diff_Int_distrib dest!: ex_nonz_cap_notRange) lemma (in delete_locale) null_filter': - assumes descs: "Q (null_filter' (ctes_of s))" + assumes descs: "Q (null_filter' (ctes_of s'))" shows "Q (null_filter' (ctes_of state'))" using descs ifunsafe apply (clarsimp elim!: rsubst[where P=Q]) @@ -1605,7 +1776,7 @@ lemma (in delete_locale) null_filter': done lemma (in delete_locale) delete_ex_cte_cap_to': - assumes exc: "ex_cte_cap_to' p s" + assumes exc: "ex_cte_cap_to' p s'" shows "ex_cte_cap_to' p state'" using exc by (clarsimp elim!: cte_cap) @@ -2044,35 +2215,18 @@ lemma cte_wp_at_top: apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (clarsimp simp:cte_check_def) apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (simp add:typeError_def fail_def cte_check_def split:Structures_H.kernel_object.splits)+ done - -lemma neq_out_intv: - "\a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" - by simp - -lemma rule_out_intv: - "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a\b \ - \ b \ {a..a + 2 ^ objBitsKO obj - 1}" - apply (drule(1) pspace_distinctD') - apply (subst (asm) ps_clear_def) - apply (drule_tac x = b in orthD2) - apply fastforce - apply (drule neq_out_intv) - apply simp - apply simp - done - lemma locateCTE_monad: assumes ko_wp_at: "\Q dest. \\s. P1 s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f @@ -2148,8 +2302,8 @@ proof - apply (drule base_member_set[OF pspace_alignedD']) apply simp apply (simp add:objBitsKO_bounded2[unfolded word_bits_def,simplified]) - apply (clarsimp simp:field_simps) - apply blast + apply (clarsimp simp: field_simps) + apply (elim disjE; fastforce simp: mask_def p_assoc_help) done assume "{(ptr, s)} = fst (locateCTE src s)" @@ -2164,7 +2318,7 @@ qed lemma empty_fail_locateCTE: "empty_fail (locateCTE src)" - by (simp add:locateCTE_def bind_assoc split_def) + by (fastforce simp: locateCTE_def bind_assoc split_def) lemma fail_empty_locateCTE: "snd (locateCTE src s) \ fst (locateCTE src s) = {}" @@ -2757,7 +2911,7 @@ lemma storePDE_det: "ko_wp_at' ((=) (KOArch (KOPDE pde))) ptr s \ storePDE ptr (new_pde::ARM_HYP_H.pde) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPDE new_pde)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPDE new_pde)))) s" apply (clarsimp simp:ko_wp_at'_def storePDE_def split_def bind_def gets_def return_def wordsFromPDE_def get_def setObject_def headM_def tailM_def @@ -3005,7 +3159,7 @@ lemma cte_wp_at_modify_pde: atLeastAtMost_iff shows "\ksPSpace s ptr' = Some (KOArch (KOPDE pde)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPDE pde')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPDE pde')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) @@ -3289,7 +3443,7 @@ lemma placeNewObject_tcb_at': placeNewObject ptr (makeObject::tcb) 0 \\_ s. tcb_at' ptr s \" apply (simp add: placeNewObject_def placeNewObject'_def split_def) - apply (wp hoare_unless_wp |wpc | simp add:alignError_def)+ + apply (wp unless_wp |wpc | simp add:alignError_def)+ by (auto simp: obj_at'_def is_aligned_mask lookupAround2_None1 lookupAround2_char1 field_simps objBits_simps projectKO_opt_tcb projectKO_def return_def ps_clear_def @@ -3728,7 +3882,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp | wpc + apply (wp haskell_assert_wp unless_wp | wpc | simp add:alignError_def if_apply_def2 del: fun_upd_apply hoare_fail_any)+ apply (rule impI) apply (subgoal_tac @@ -4270,7 +4424,7 @@ lemma createObjects_Cons: apply simp apply (wp haskell_assert_wp | wpc)+ apply simp - apply (wp hoare_unless_wp |clarsimp)+ + apply (wp unless_wp |clarsimp)+ apply (drule range_cover.aligned) apply (simp add:is_aligned_mask) done @@ -4599,7 +4753,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp |wpc + apply (wp haskell_assert_wp unless_wp |wpc |simp add:alignError_def del:fun_upd_apply)+ apply (rule conjI) apply (rule impI) @@ -4661,7 +4815,7 @@ lemma createTCBs_tcb_at': \\rv s. (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)\" apply (simp add:createObjects'_def split_def alignError_def) - apply (wp hoare_unless_wp |wpc)+ + apply (wp unless_wp |wpc)+ apply (subst data_map_insert_def[symmetric])+ apply clarsimp apply (subgoal_tac "(\x\of_nat n. @@ -4674,7 +4828,6 @@ lemma createTCBs_tcb_at': apply simp apply simp apply (clarsimp simp: retype_obj_at_disj') - apply (clarsimp simp: projectKO_opt_tcb) apply (clarsimp simp: new_cap_addrs_def image_def) apply (drule_tac x = "unat x" in bspec) apply (simp add:objBits_simps' shiftl_t2n) @@ -5488,7 +5641,7 @@ lemma createObject_pspace_aligned_distinct': createObject ty ptr us d \\xa s. pspace_aligned' s \ pspace_distinct' s\" apply (rule hoare_pre) - apply (wp placeNewObject_pspace_aligned' hoare_unless_wp + apply (wp placeNewObject_pspace_aligned' unless_wp placeNewObject_pspace_distinct' | simp add:ARM_HYP_H.createObject_def Retype_H.createObject_def objBits_simps diff --git a/proof/refine/ARM_HYP/EmptyFail.thy b/proof/refine/ARM_HYP/EmptyFail.thy index d3b7d68de5..741a9ba837 100644 --- a/proof/refine/ARM_HYP/EmptyFail.thy +++ b/proof/refine/ARM_HYP/EmptyFail.thy @@ -19,12 +19,12 @@ lemma empty_fail_projectKO [simp, intro!]: lemma empty_fail_alignCheck [intro!, wp, simp]: "empty_fail (alignCheck a b)" unfolding alignCheck_def - by (simp add: alignError_def) + by (fastforce simp: alignError_def) lemma empty_fail_magnitudeCheck [intro!, wp, simp]: "empty_fail (magnitudeCheck a b c)" unfolding magnitudeCheck_def - by (simp split: option.splits) + by (fastforce split: option.splits) lemma empty_fail_loadObject_default [intro!, wp, simp]: shows "empty_fail (loadObject_default x b c d)" @@ -33,7 +33,7 @@ lemma empty_fail_loadObject_default [intro!, wp, simp]: lemma empty_fail_threadGet [intro!, wp, simp]: "empty_fail (threadGet f p)" - by (simp add: threadGet_def getObject_def split_def) + by (fastforce simp: threadGet_def getObject_def split_def) lemma empty_fail_getCTE [intro!, wp, simp]: "empty_fail (getCTE slot)" @@ -47,12 +47,12 @@ lemma empty_fail_getCTE [intro!, wp, simp]: lemma empty_fail_updateObject_cte [intro!, wp, simp]: "empty_fail (updateObject (v :: cte) ko a b c)" - by (simp add: updateObject_cte typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_cte typeError_def unless_def split: kernel_object.splits ) lemma empty_fail_setCTE [intro!, wp, simp]: "empty_fail (setCTE p cte)" unfolding setCTE_def - by (simp add: setObject_def split_def) + by (fastforce simp: setObject_def split_def) lemma empty_fail_updateMDB [intro!, wp, simp]: "empty_fail (updateMDB a b)" @@ -60,16 +60,15 @@ lemma empty_fail_updateMDB [intro!, wp, simp]: lemma empty_fail_getSlotCap [intro!, wp, simp]: "empty_fail (getSlotCap a)" - unfolding getSlotCap_def by simp + unfolding getSlotCap_def by fastforce context begin interpretation Arch . (*FIXME: arch_split*) lemma empty_fail_getObject: - assumes x: "(\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel))" + assumes "\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel)" shows "empty_fail (getObject x :: 'a :: pspace_storable kernel)" apply (simp add: getObject_def split_def) - apply (safe intro!: empty_fail_bind empty_fail_gets empty_fail_assert_opt) - apply (rule x) + apply (safe intro!: assms) done lemma empty_fail_getObject_tcb [intro!, wp, simp]: @@ -78,22 +77,22 @@ lemma empty_fail_getObject_tcb [intro!, wp, simp]: lemma empty_fail_updateTrackedFreeIndex [intro!, wp, simp]: shows "empty_fail (updateTrackedFreeIndex p idx)" - by (simp add: updateTrackedFreeIndex_def) + by (fastforce simp add: updateTrackedFreeIndex_def) lemma empty_fail_updateNewFreeIndex [intro!, wp, simp]: shows "empty_fail (updateNewFreeIndex p)" apply (simp add: updateNewFreeIndex_def) - apply (safe intro!: empty_fail_bind) + apply safe apply (simp split: capability.split) done lemma empty_fail_insertNewCap [intro!, wp, simp]: "empty_fail (insertNewCap p p' cap)" - unfolding insertNewCap_def by simp + unfolding insertNewCap_def by fastforce lemma empty_fail_getIRQSlot [intro!, wp, simp]: "empty_fail (getIRQSlot irq)" - by (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + by (fastforce simp: getIRQSlot_def getInterruptState_def locateSlot_conv) lemma empty_fail_getObject_ntfn [intro!, wp, simp]: "empty_fail (getObject p :: Structures_H.notification kernel)" @@ -107,15 +106,15 @@ lemma empty_fail_lookupIPCBuffer [intro!, wp, simp]: "empty_fail (lookupIPCBuffer a b)" by (clarsimp simp: lookupIPCBuffer_def Let_def getThreadBufferSlot_def locateSlot_conv - split: capability.splits arch_capability.splits | wp | wpc)+ + split: capability.splits arch_capability.splits | wp | wpc | safe)+ lemma empty_fail_updateObject_default [intro!, wp, simp]: "empty_fail (updateObject_default v ko a b c)" - by (simp add: updateObject_default_def typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_default_def typeError_def unless_def split: kernel_object.splits) lemma empty_fail_threadSet [intro!, wp, simp]: "empty_fail (threadSet f p)" - by (simp add: threadSet_def getObject_def setObject_def split_def) + by (fastforce simp: threadSet_def getObject_def setObject_def split_def) lemma empty_fail_getThreadState[iff]: "empty_fail (getThreadState t)" diff --git a/proof/refine/ARM_HYP/EmptyFail_H.thy b/proof/refine/ARM_HYP/EmptyFail_H.thy index b20a5662fc..937d0c6891 100644 --- a/proof/refine/ARM_HYP/EmptyFail_H.thy +++ b/proof/refine/ARM_HYP/EmptyFail_H.thy @@ -17,19 +17,19 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemmas forM_empty_fail[intro!, wp, simp] = empty_fail_mapM[simplified forM_def[symmetric]] lemmas forM_x_empty_fail[intro!, wp, simp] = empty_fail_mapM_x[simplified forM_x_def[symmetric]] -lemmas forME_x_empty_fail[intro!, wp, simp] = mapME_x_empty_fail[simplified forME_x_def[symmetric]] +lemmas forME_x_empty_fail[intro!, wp, simp] = empty_fail_mapME_x[simplified forME_x_def[symmetric]] lemma withoutPreemption_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (withoutPreemption m)" - by (simp add: withoutPreemption_def) + by simp lemma withoutFailure_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (withoutFailure m)" - by (simp add: withoutFailure_def) + by simp lemma catchFailure_empty_fail[intro!, wp, simp]: "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchFailure f g)" - by (simp add: catchFailure_def empty_fail_catch) + by (simp add: empty_fail_catch) lemma emptyOnFailure_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (emptyOnFailure m)" @@ -86,9 +86,6 @@ proof (induct arbitrary: s rule: resolveAddressBits.induct) lemmas resolveAddressBits_empty_fail[intro!, wp, simp] = resolveAddressBits_spec_empty_fail[THEN use_spec_empty_fail] -crunch (empty_fail) empty_fail[intro!, wp, simp]: lookupIPCBuffer -(simp:Let_def) - declare ef_dmo'[intro!, wp, simp] lemma empty_fail_getObject_ep [intro!, wp, simp]: @@ -178,11 +175,11 @@ crunch (empty_fail) "_H_empty_fail"[intro!, wp, simp]: "ThreadDecls_H.suspend" lemma ThreadDecls_H_restart_empty_fail[intro!, wp, simp]: "empty_fail (ThreadDecls_H.restart target)" - by (simp add:restart_def) + by (fastforce simp: restart_def) lemma vcpuUpdate_empty_fail[intro!, wp, simp]: "empty_fail (vcpuUpdate p f)" - by (simp add: vcpuUpdate_def) + by (fastforce simp: vcpuUpdate_def) crunch (empty_fail) empty_fail[intro!, wp, simp]: vcpuEnable, vcpuRestore (simp: uncurry_def) @@ -223,18 +220,14 @@ lemmas finaliseSlot_empty_fail[intro!, wp, simp] = lemma checkCapAt_empty_fail[intro!, wp, simp]: "empty_fail action \ empty_fail (checkCapAt cap ptr action)" - by (simp add: checkCapAt_def) + by (fastforce simp: checkCapAt_def) lemma assertDerived_empty_fail[intro!, wp, simp]: "empty_fail f \ empty_fail (assertDerived src cap f)" - by (simp add: assertDerived_def) + by (fastforce simp: assertDerived_def) crunch (empty_fail) empty_fail[intro!, wp, simp]: cteDelete -lemma liftE_empty_fail[intro!, wp, simp]: - "empty_fail f \ empty_fail (liftE f)" - by simp - lemma spec_empty_fail_unlessE': "\ \ P \ spec_empty_fail f s \ \ spec_empty_fail (unlessE P f) s" by (simp add:unlessE_def spec_empty_returnOk) @@ -264,7 +257,7 @@ lemma Syscall_H_syscall_empty_fail[intro!, wp, simp]: lemma catchError_empty_fail[intro!, wp, simp]: "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchError f g)" - by (simp add: catchError_def handle_empty_fail) + by fastforce crunch (empty_fail) empty_fail[intro!, wp, simp]: chooseThread, getDomainTime, nextDomain, isHighestPrio @@ -284,7 +277,7 @@ crunch (empty_fail) empty_fail: callKernel theorem call_kernel_serial: "\ (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and - (\s. scheduler_action s = resume_cur_thread) and + schact_is_rct and (\s. 0 < domain_time s \ valid_domain_list s)) s; \s'. (s, s') \ state_relation \ (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and diff --git a/proof/refine/ARM_HYP/Finalise_R.thy b/proof/refine/ARM_HYP/Finalise_R.thy index 8229aa5713..a3a990bf7f 100644 --- a/proof/refine/ARM_HYP/Finalise_R.thy +++ b/proof/refine/ARM_HYP/Finalise_R.thy @@ -76,20 +76,10 @@ crunch ksRQL1[wp]: emptySlot "\s. P (ksReadyQueuesL1Bitmap s)" crunch ksRQL2[wp]: emptySlot "\s. P (ksReadyQueuesL2Bitmap s)" crunch obj_at'[wp]: postCapDeletion "obj_at' P p" -lemmas postCapDeletion_valid_queues[wp] = - valid_queues_lift [OF postCapDeletion_obj_at' - postCapDeletion_pred_tcb_at' - postCapDeletion_ksRQ] - crunch inQ[wp]: clearUntypedFreeIndex "\s. P (obj_at' (inQ d p) t s)" crunch tcbDomain[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbDomain tcb)) t" crunch tcbPriority[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbPriority tcb)) t" -lemma emptySlot_queues [wp]: - "\Invariants_H.valid_queues\ emptySlot sl opt \\rv. Invariants_H.valid_queues\" - unfolding emptySlot_def - by (wp | wpcw | wp valid_queues_lift | simp)+ - crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" crunch ksCurDomain[wp]: emptySlot "\s. P (ksCurDomain s)" @@ -1166,8 +1156,7 @@ definition "removeable' sl \ \s cap. (\p. p \ sl \ cte_wp_at' (\cte. capMasterCap (cteCap cte) = capMasterCap cap) p s) \ ((\p \ cte_refs' cap (irq_node' s). p \ sl \ cte_wp_at' (\cte. cteCap cte = NullCap) p s) - \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s) - \ (\t \ threadCapRefs cap. \p. t \ set (ksReadyQueues s p)))" + \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s))" lemma not_Final_removeable: "\ isFinal cap sl (cteCaps_of s) @@ -1283,7 +1272,7 @@ crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1358,11 +1347,6 @@ crunch irq_states' [wp]: emptySlot valid_irq_states' crunch no_0_obj' [wp]: emptySlot no_0_obj' (wp: crunch_wps) -crunch valid_queues'[wp]: setInterruptState "valid_queues'" - (simp: valid_queues'_def) - -crunch valid_queues'[wp]: emptySlot "valid_queues'" - crunch pde_mappings'[wp]: emptySlot "valid_pde_mappings'" end @@ -1432,7 +1416,7 @@ lemma emptySlot_untyped_ranges[wp]: emptySlot sl opt \\rv. untyped_ranges_zero'\" apply (simp add: emptySlot_def case_Null_If) apply (rule hoare_pre) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (rule untyped_ranges_zero_lift) apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of | wpc | simp add: clearUntypedFreeIndex_def updateTrackedFreeIndex_def @@ -1452,6 +1436,13 @@ lemma emptySlot_untyped_ranges[wp]: crunch valid_arch'[wp]: emptySlot valid_arch_state' (wp: crunch_wps) +crunches emptySlot + for valid_bitmaps[wp]: valid_bitmaps + and tcbQueued_opt_pred[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and sched_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + (wp: valid_bitmaps_lift) + lemma emptySlot_invs'[wp]: "\\s. invs' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s \ (\sl'. info \ NullCap \ sl' \ sl \ cteCaps_of s sl' \ Some info)\ @@ -1473,13 +1464,13 @@ lemma deletedIRQHandler_corres: lemma arch_postCapDeletion_corres: "acap_relation cap cap' \ corres dc \ \ (arch_post_cap_deletion cap) (ARM_HYP_H.postCapDeletion cap')" - by (corressimp simp: arch_post_cap_deletion_def ARM_HYP_H.postCapDeletion_def) + by (corresKsimp simp: arch_post_cap_deletion_def ARM_HYP_H.postCapDeletion_def) lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1497,7 +1488,7 @@ lemma clearUntypedFreeIndex_noop_corres: apply (rule corres_bind_return2) apply (rule corres_symb_exec_r_conj[where P'="cte_at' (cte_map slot)"]) apply (rule corres_trivial, simp) - apply (wp hoare_TrueI[where P=\] getCTE_wp' | wpc + apply (wp wp_post_taut getCTE_wp' | wpc | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ apply (clarsimp simp: state_relation_def) apply (rule no_fail_pre) @@ -1539,7 +1530,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2242,16 +2233,24 @@ lemma ntfn_q_refs_of'_mult: "ntfn_q_refs_of' ntfn = (case ntfn of Structures_H.WaitingNtfn q \ set q | _ \ {}) \ {NTFNSignal}" by (cases ntfn, simp_all) +crunches setBoundNotification + for valid_bitmaps[wp]: valid_bitmaps + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: valid_bitmaps_lift) + lemma unbindNotification_invs[wp]: "\invs'\ unbindNotification tcb \\rv. invs'\" apply (simp add: unbindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (rule bind_wp[OF _ gbn_sp']) apply (case_tac ntfnPtr, clarsimp, wp, clarsimp) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift - irqs_masked_lift setBoundNotification_ct_not_inQ + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ sym_heap_sched_pointers_lift untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (rule conjI) apply (clarsimp elim!: obj_atE' @@ -2291,9 +2290,9 @@ lemma ntfn_bound_tcb_at': lemma unbindMaybeNotification_invs[wp]: "\invs'\ unbindMaybeNotification ntfnptr \\rv. invs'\" apply (simp add: unbindMaybeNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sym_heap_sched_pointers_lift valid_irq_node_lift irqs_masked_lift setBoundNotification_ct_not_inQ untyped_ranges_zero_lift | wpc | clarsimp simp: cteCaps_of_def o_def)+ @@ -2400,11 +2399,12 @@ lemma deleteASID_invs'[wp]: apply (simp add: deleteASID_def cong: option.case_cong) apply (rule hoare_pre) apply (wp | wpc)+ - apply (rule_tac Q="\rv. valid_obj' (injectKO rv) and invs'" - in hoare_post_imp) + apply (rule_tac Q="\rv. valid_obj' (injectKO rv) and invs'" + in hoare_post_imp) + apply (rename_tac rv s) apply (clarsimp split: if_split_asm del: subsetI) apply (simp add: fun_upd_def[symmetric] valid_obj'_def) - apply (case_tac r, simp) + apply (case_tac rv, simp) apply (subst inv_f_f, rule inj_onI, simp)+ apply (rule conjI) apply clarsimp @@ -2562,14 +2562,6 @@ lemma archThreadSet_valid_arch_state'[wp]: apply (clarsimp simp: pred_conj_def) done -lemma archThreadSet_valid_queues'[wp]: - "archThreadSet f t \valid_queues'\" - unfolding valid_queues'_def - apply (rule hoare_lift_Pf[where f=ksReadyQueues]; wp?) - apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift) - apply auto - done - lemma archThreadSet_ct_not_inQ[wp]: "archThreadSet f t \ct_not_inQ\" unfolding ct_not_inQ_def @@ -2599,6 +2591,54 @@ lemma archThreadSet_tcb_at'[wp]: unfolding archThreadSet_def by (wpsimp wp: getObject_tcb_wp simp: obj_at'_def) +lemma setObject_tcb_tcbs_of'[wp]: + "\\s. P ((tcbs_of' s) (t \ tcb))\ + setObject t tcb + \\_ s. P (tcbs_of' s)\" + unfolding setObject_def + apply (wpsimp simp: updateObject_default_def) + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma archThreadSet_tcbSchedPrevs_of[wp]: + "archThreadSet f t \\s. P (tcbSchedPrevs_of s)\" + supply projectKOs[simp] + unfolding archThreadSet_def + apply (wp getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def obj_at'_def split: option.splits) + done + +lemma archThreadSet_tcbSchedNexts_of[wp]: + "archThreadSet f t \\s. P (tcbSchedNexts_of s)\" + supply projectKOs[simp] + unfolding archThreadSet_def + apply (wp getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def obj_at'_def split: option.splits) + done + +lemma archThreadSet_tcbQueued[wp]: + "archThreadSet f t \\s. P (tcbQueued |< tcbs_of' s)\" + supply projectKOs[simp] + unfolding archThreadSet_def + apply (wp getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_pred_def opt_map_def obj_at'_def split: option.splits) + done + +lemma archThreadSet_valid_sched_pointers[wp]: + "archThreadSet f t \valid_sched_pointers\" + by (wp_pre, wps, wp, assumption) + lemma dissoc_invs': "\invs' and (\s. \p. (\a. armHSCurVCPU (ksArchState s) = Some (p, a)) \ p \ v) and ko_at' vcpu v and K (vcpuTCBPtr vcpu = Some t) and @@ -2617,7 +2657,8 @@ lemma dissoc_invs': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_valid_arch' archThreadSet_if_live' + setVCPU_valid_arch' archThreadSet_if_live' valid_bitmaps_lift + sym_heap_sched_pointers_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb valid_arch_tcb'_def | clarsimp simp: live'_def hyp_live'_def arch_live'_def)+ @@ -2664,7 +2705,7 @@ lemma when_assert_eq: lemma dissociateVCPUTCB_invs'[wp]: "dissociateVCPUTCB vcpu tcb \invs'\" unfolding dissociateVCPUTCB_def setVCPU_archThreadSet_None_eq when_assert_eq - apply ( wpsimp wp: dissoc_invs' getVCPU_wp | wpsimp wp: getObject_tcb_wp simp: archThreadGet_def)+ + apply (wpsimp wp: dissoc_invs' getVCPU_wp | wpsimp wp: getObject_tcb_wp simp: archThreadGet_def)+ apply (drule tcb_ko_at') apply clarsimp apply (rule exI, rule conjI, assumption) @@ -2713,7 +2754,6 @@ lemma asUser_unlive[wp]: apply (rename_tac tcb) apply (rule_tac x=tcb in exI) apply (clarsimp simp: obj_at'_def projectKOs) - apply (rule_tac x=tcb in exI, rule conjI; clarsimp simp: o_def) apply (clarsimp simp: ko_wp_at'_def live'_def hyp_live'_def) done @@ -2744,7 +2784,7 @@ lemma arch_finaliseCap_removeable[wp]: Arch.finaliseCap cap final \\rv s. isNullCap (fst rv) \ removeable' slot s (ArchObjectCap cap) \ isNullCap (snd rv)\" unfolding ARM_HYP_H.finaliseCap_def - including no_pre + including classic_wp_pre apply (case_tac cap; clarsimp) apply ((wpsimp simp: removeable'_def isCap_simps | rule conjI)+)[5] @@ -2768,10 +2808,7 @@ lemma prepares_delete_helper'': apply (clarsimp simp: removeable'_def) done -lemma ctes_of_cteCaps_of_lift: - "\ \P. \\s. P (ctes_of s)\ f \\rv s. P (ctes_of s)\ \ - \ \\s. P (cteCaps_of s)\ f \\rv s. P (cteCaps_of s)\" - by (wp | simp add: cteCaps_of_def)+ +lemmas ctes_of_cteCaps_of_lift = cteCaps_of_ctes_of_lift crunches finaliseCapTrue_standin, unbindNotification for ctes_of[wp]: "\s. P (ctes_of s)" @@ -2779,11 +2816,11 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") apply (simp add: finaliseCapTrue_standin_simple_def) apply wp @@ -2809,7 +2846,6 @@ lemma cteDeleteOne_isFinal: lemmas setEndpoint_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF set_ep_ctes_of] lemmas setNotification_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF set_ntfn_ctes_of] -lemmas setQueue_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF setQueue_ctes_of] lemmas threadSet_cteCaps_of = ctes_of_cteCaps_of_lift [OF threadSet_ctes_of] crunch isFinal: setSchedulerAction "\s. isFinal cap slot (cteCaps_of s)" @@ -2906,18 +2942,6 @@ lemma unbindNotification_valid_objs'_helper': by (clarsimp simp: valid_bound_tcb'_def valid_ntfn'_def split: option.splits ntfn.splits) -lemma typ_at'_valid_tcb'_lift: - assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" - shows "\\s. valid_tcb' tcb s\ f \\rv s. valid_tcb' tcb s\" - including no_pre - apply (simp add: valid_tcb'_def valid_arch_tcb'_def) - apply (case_tac "atcbVCPUPtr (tcbArch tcb)"; - case_tac "tcbState tcb"; - case_tac "tcbBoundNotification tcb") - apply (simp add: valid_tcb_state'_def split_def valid_bound_ntfn'_def - | wp hoare_vcg_const_Ball_lift typ_at_lifts[OF P] P)+ - done - lemmas setNotification_valid_tcb' = typ_at'_valid_tcb'_lift [OF setNotification_typ_at'] lemma unbindNotification_valid_objs'[wp]: @@ -2939,7 +2963,7 @@ lemma unbindMaybeNotification_valid_objs'[wp]: unbindMaybeNotification t \\rv. valid_objs'\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift setNotification_valid_tcb' getNotification_wp @@ -2979,7 +3003,7 @@ lemma unbindMaybeNotification_obj_at'_bound: unbindMaybeNotification r \\_ s. obj_at' (\ntfn. ntfnBoundTCB ntfn = None) r s\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp obj_at_setObject2 | wpc @@ -3030,7 +3054,7 @@ lemma capDeleteOne_bound_tcb_at': lemma cancelIPC_bound_tcb_at'[wp]: "\bound_tcb_at' P tptr\ cancelIPC t \\rv. bound_tcb_at' P tptr\" apply (simp add: cancelIPC_def Let_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) + apply (rule bind_wp[OF _ gts_sp']) apply (case_tac "state", simp_all) defer 2 apply (rule hoare_pre) @@ -3070,10 +3094,6 @@ lemma unbindNotification_bound_tcb_at': apply (wp setBoundNotification_bound_tcb gbn_wp' | wpc | simp)+ done -crunches unbindNotification, unbindMaybeNotification - for valid_queues[wp]: "Invariants_H.valid_queues" - (wp: sbn_valid_queues) - crunches unbindNotification, unbindMaybeNotification for weak_sch_act_wf[wp]: "\s. weak_sch_act_wf (ksSchedulerAction s) s" @@ -3147,6 +3167,54 @@ lemma prepareThreadDelete_hyp_unlive[wp]: end +lemma tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\\s. \ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + tcbQueueRemove q t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + supply projectKOs[simp] + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + by (fastforce dest!: heap_ls_last_None + simp: list_queue_relation_def prev_queue_head_def queue_end_valid_def + obj_at'_def opt_map_def ps_clear_def objBits_simps + split: if_splits) + +lemma tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\valid_sched_pointers\ + tcbSchedDequeue t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + supply projectKOs[simp] + unfolding tcbSchedDequeue_def + by (wpsimp wp: tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at' threadGet_wp) + (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def + valid_sched_pointers_def opt_pred_def opt_map_def + split: option.splits) + +crunches updateRestartPC, cancelIPC + for valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps) + +lemma suspend_tcbSchedNext_tcbSchedPrev_None: + "\invs'\ suspend t \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding suspend_def + by (wpsimp wp: hoare_drop_imps tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at') + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma archThreadSet_tcbSchedPrevNext[wp]: + "archThreadSet f t' \obj_at' (\tcb. P (tcbSchedNext tcb) (tcbSchedPrev tcb)) t\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp) + apply normalise_obj_at' + apply auto + done + +crunches prepareThreadDelete + for tcbSchedPrevNext[wp]: "obj_at' (\tcb. P (tcbSchedNext tcb) (tcbSchedPrev tcb)) t" + (wp: threadGet_wp getVCPU_wp archThreadGet_wp crunch_wps simp: crunch_simps) + +end + lemma (in delete_one_conc_pre) finaliseCap_replaceable: "\\s. invs' s \ cte_wp_at' (\cte. cteCap cte = cap) slot s \ (final_matters' cap \ (final = isFinal cap slot (cteCaps_of s))) @@ -3167,21 +3235,22 @@ lemma (in delete_one_conc_pre) finaliseCap_replaceable: \ obj_at' (Not \ tcbQueued) p s \ bound_tcb_at' ((=) None) p s \ ko_wp_at' (Not \ hyp_live') p s - \ (\pr. p \ set (ksReadyQueues s pr))))\" + \ obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) p s))\" apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot cong: if_cong split del: if_split) apply (rule hoare_pre) apply (wp prepares_delete_helper'' [OF cancelAllIPC_unlive] prepares_delete_helper'' [OF cancelAllSignals_unlive] - suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_nonq + suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_inactive prepareThreadDelete_isFinal - suspend_makes_inactive suspend_nonq + suspend_makes_inactive deletingIRQHandler_removeable' deletingIRQHandler_final[where slot=slot ] unbindMaybeNotification_obj_at'_bound getNotification_wp suspend_bound_tcb_at' unbindNotification_bound_tcb_at' + suspend_tcbSchedNext_tcbSchedPrev_None | simp add: isZombie_Null isThreadCap_threadCapRefs_tcbptr isArchObjectCap_Cap_capCap | (rule hoare_strengthen_post [OF arch_finaliseCap_removeable[where slot=slot]], @@ -3189,24 +3258,12 @@ lemma (in delete_one_conc_pre) finaliseCap_replaceable: | wpc)+ apply clarsimp apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+) - apply (rule conjI) - apply (case_tac "cteCap cte", + apply (case_tac "cteCap cte", simp_all add: isCap_simps capRange_def final_matters'_def objBits_simps not_Final_removeable finaliseCap_def, simp_all add: removeable'_def)[1] - (* thread *) - apply (frule capAligned_capUntypedPtr [OF valid_capAligned], simp) - apply (clarsimp simp: valid_cap'_def) - apply (drule valid_globals_cte_wpD'[rotated], clarsimp) - apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) - apply ((clarsimp simp: obj_at'_def | rule conjI)+)[2] - apply (rule impI) - apply (case_tac "cteCap cte", - simp_all add: isCap_simps capRange_def cap_has_cleanup'_def - final_matters'_def objBits_simps - not_Final_removeable finaliseCap_def, - simp_all add: removeable'_def) + apply fastforce+ done lemma cteDeleteOne_cte_wp_at_preserved: @@ -3224,7 +3281,7 @@ crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" lemma cancelIPC_cteCaps_of: "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap))) \ + P ((cteCaps_of s)(p \ NullCap))) \ P (cteCaps_of s)\ cancelIPC t \\rv s. P (cteCaps_of s)\" @@ -3255,7 +3312,9 @@ lemma cancelIPC_cte_wp_at': apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) done -crunch cte_wp_at'[wp]: tcbSchedDequeue "cte_wp_at' P p" +crunches tcbSchedDequeue + for cte_wp_at'[wp]: "cte_wp_at' P p" + (wp: crunch_wps) lemma suspend_cte_wp_at': assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" @@ -3355,7 +3414,7 @@ lemma cteDeleteOne_reply_pred_tcb_at: cteDeleteOne slot \\rv. pred_tcb_at' proj P t\" apply (simp add: cteDeleteOne_def unless_def isFinalCapability_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (clarsimp simp: cte_wp_at_ctes_of when_def isCap_simps Let_def finaliseCapTrue_standin_def) @@ -3376,32 +3435,13 @@ end lemma rescheduleRequired_sch_act_not[wp]: "\\\ rescheduleRequired \\rv. sch_act_not t\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp)+ + apply (wp hoare_TrueI | simp)+ done crunch sch_act_not[wp]: cteDeleteOne "sch_act_not t" (simp: crunch_simps case_Null_If unless_def wp: crunch_wps getObject_inv loadObject_default_inv) -lemma cancelAllIPC_mapM_x_valid_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. \t\set q. tcb_at' t s)\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. Invariants_H.valid_queues\" - apply (rule_tac R="\_ s. (\t\set q. tcb_at' t s) \ valid_objs' s" - in hoare_post_add) - apply (rule hoare_pre) - apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply (wp hoare_vcg_const_Ball_lift - tcbSchedEnqueue_valid_queues tcbSchedEnqueue_not_st - sts_valid_queues sts_st_tcb_at'_cases setThreadState_not_st - | simp - | ((elim conjE)?, drule (1) bspec, clarsimp elim!: obj_at'_weakenE simp: valid_tcb_state'_def))+ - done - lemma cancelAllIPC_mapM_x_weak_sch_act: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ mapM_x (\t. do @@ -3415,13 +3455,15 @@ lemma cancelAllIPC_mapM_x_weak_sch_act: done lemma cancelAllIPC_mapM_x_valid_objs': - "\valid_objs'\ + "\valid_objs' and pspace_aligned' and pspace_distinct'\ mapM_x (\t. do y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) q \\_. valid_objs'\" - apply (wp mapM_x_wp' sts_valid_objs') + apply (rule hoare_strengthen_post) + apply (rule mapM_x_wp') + apply (wpsimp wp: sts_valid_objs') apply (clarsimp simp: valid_tcb_state'_def)+ done @@ -3432,18 +3474,12 @@ lemma cancelAllIPC_mapM_x_tcbDomain_obj_at': tcbSchedEnqueue t od) q \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (wp mapM_x_wp' tcbSchedEnqueue_not_st setThreadState_oa_queued | simp)+ -done + by (wpsimp wp: mapM_x_wp') lemma rescheduleRequired_oa_queued': - "\obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\ - rescheduleRequired - \\_. obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\" -apply (simp add: rescheduleRequired_def) -apply (wp tcbSchedEnqueue_not_st - | wpc - | simp)+ -done + "rescheduleRequired \obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t\" + unfolding rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + by wpsimp lemma cancelAllIPC_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ @@ -3457,21 +3493,6 @@ apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift | simp)+ done -lemma cancelAllIPC_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllIPC ep_ptr - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ep_valid_objs' getEndpoint_wp) - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ep'_def valid_tcb'_def projectKOs - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done - lemma cancelAllSignals_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelAllSignals epptr @@ -3485,44 +3506,9 @@ apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift done lemma unbindMaybeNotification_tcbDomain_obj_at': - "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ - unbindMaybeNotification r - \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" - apply (simp add: unbindMaybeNotification_def) - apply (wp setBoundNotification_oa_queued getNotification_wp gbn_wp' | wpc | simp)+ - done - -lemma cancelAllSignals_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllSignals ntfn - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ntfn_valid_objs' - | simp)+ - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ntfn'_def valid_tcb'_def projectKOs - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done - -lemma finaliseCapTrue_standin_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - finaliseCapTrue_standin cap final - \\_. Invariants_H.valid_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - - -crunch valid_queues[wp]: isFinalCapability "Invariants_H.valid_queues" - (simp: crunch_simps) + "unbindMaybeNotification r \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding unbindMaybeNotification_def + by (wpsimp wp: getNotification_wp gbn_wp' simp: setBoundNotification_def)+ crunch sch_act[wp]: isFinalCapability "\s. sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) @@ -3531,96 +3517,6 @@ crunch weak_sch_act[wp]: isFinalCapability "\s. weak_sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) -lemma cteDeleteOne_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl - \\_. Invariants_H.valid_queues\" (is "\?PRE\ _ \_\") - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (wp isFinalCapability_inv getCTE_wp | rule hoare_drop_imps | simp)+ - apply (clarsimp simp: cte_wp_at'_def) - done - -lemma valid_inQ_queues_lift: - assumes tat: "\d p tcb. \obj_at' (inQ d p) tcb\ f \\_. obj_at' (inQ d p) tcb\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_inQ_queues\ f \\_. valid_inQ_queues\" - proof - - show ?thesis - apply (clarsimp simp: valid_def valid_inQ_queues_def) - apply safe - apply (rule use_valid [OF _ tat], assumption) - apply (drule spec, drule spec, erule conjE, erule bspec) - apply (rule ccontr) - apply (erule notE[rotated], erule(1) use_valid [OF _ prq]) - apply (erule use_valid [OF _ prq]) - apply simp - done - qed - -lemma emptySlot_valid_inQ_queues [wp]: - "\valid_inQ_queues\ emptySlot sl opt \\rv. valid_inQ_queues\" - unfolding emptySlot_def - by (wp opt_return_pres_lift | wpcw | wp valid_inQ_queues_lift | simp)+ - -crunch valid_inQ_queues[wp]: emptySlot valid_inQ_queues - (simp: crunch_simps) - -lemma cancelAllIPC_mapM_x_valid_inQ_queues: - "\valid_inQ_queues\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. valid_inQ_queues\" - apply (rule mapM_x_wp_inv) - apply (wp sts_valid_queues [where st="Structures_H.thread_state.Restart", simplified] - setThreadState_st_tcb) - done - -lemma cancelAllIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllIPC ep_ptr - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues) - apply (wp hoare_conjI hoare_drop_imp | simp)+ - done - -lemma cancelAllSignals_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllSignals ntfn - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues)+ - apply (simp) - done - -crunches unbindNotification, unbindMaybeNotification - for valid_inQ_queues[wp]: "valid_inQ_queues" - -lemma finaliseCapTrue_standin_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - finaliseCapTrue_standin cap final - \\_. valid_inQ_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - -crunch valid_inQ_queues[wp]: isFinalCapability valid_inQ_queues - (simp: crunch_simps) - -lemma cteDeleteOne_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cteDeleteOne sl - \\_. valid_inQ_queues\" - apply (simp add: cteDeleteOne_def unless_def) - apply (wpsimp wp: hoare_drop_imp hoare_vcg_all_lift) - done - crunch ksCurDomain[wp]: cteDeleteOne "\s. P (ksCurDomain s)" (wp: crunch_wps simp: crunch_simps unless_def) @@ -3666,7 +3562,7 @@ lemma cteDeleteOne_invs[wp]: subgoal by auto subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def projectKOs live'_def hyp_live'_def ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done @@ -3843,17 +3739,15 @@ lemma sym_refs_vcpu_tcb: lemma vcpuFinalise_corres [corres]: "corres dc (invs and vcpu_at vcpu) (invs' and vcpu_at' vcpu) (vcpu_finalise vcpu) (vcpuFinalise vcpu)" unfolding vcpuFinalise_def vcpu_finalise_def - apply (corressimp corres: getObject_vcpu_corres simp: vcpu_relation_def) + apply (corresKsimp corres: getObject_vcpu_corres simp: vcpu_relation_def) apply (wpsimp wp: get_vcpu_wp getVCPU_wp)+ apply (rule conjI) apply clarsimp apply (frule sym_refs_vcpu_tcb) apply (simp add: vcpu_relation_def) apply fastforce - apply (clarsimp simp: obj_at_def vcpu_relation_def) + apply (fastforce simp: obj_at_def vcpu_relation_def) apply clarsimp - apply (drule ko_at_valid_objs', fastforce, simp add: projectKOs) - apply (clarsimp simp: valid_obj'_def valid_vcpu'_def typ_at_tcb') done lemma arch_finaliseCap_corres: @@ -3887,7 +3781,7 @@ lemma arch_finaliseCap_corres: elim!: is_aligned_weaken invs_valid_asid_map)[2] apply (rule corres_guard_imp, rule deleteASID_corres) apply (auto elim!: invs_valid_asid_map simp: mask_def valid_cap_def)[2] - apply corres + apply corresK apply (clarsimp simp: valid_cap_def valid_cap'_def) done @@ -3910,6 +3804,9 @@ lemma unbindNotification_corres: apply (clarsimp simp: ntfn_relation_def split:Structures_A.ntfn.splits) apply (rule setBoundNotification_corres) apply (wp gbn_wp' gbn_wp)+ + apply clarsimp + apply (frule invs_psp_aligned) + apply (frule invs_distinct) apply (clarsimp elim!: obj_at_valid_objsE dest!: bound_tcb_at_state_refs_ofD invs_valid_objs simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def @@ -3936,6 +3833,9 @@ lemma unbindMaybeNotification_corres: apply (clarsimp simp: ntfn_relation_def split: Structures_A.ntfn.splits) apply (rule setBoundNotification_corres) apply (wp get_simple_ko_wp getNotification_wp)+ + apply clarsimp + apply (frule invs_psp_aligned) + apply (frule invs_distinct) apply (clarsimp elim!: obj_at_valid_objsE dest!: bound_tcb_at_state_refs_ofD invs_valid_objs simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def @@ -4107,12 +4007,6 @@ lemma arch_recycleCap_improve_cases: \ isASIDControlCap cap \ \ (if isASIDPoolCap cap then v else undefined) = v" by (cases cap, simp_all add: isCap_simps) -crunch queues[wp]: copyGlobalMappings "Invariants_H.valid_queues" - (wp: crunch_wps ignore: storePDE) - -crunch queues'[wp]: copyGlobalMappings "Invariants_H.valid_queues'" - (wp: crunch_wps ignore: storePDE) - crunch ifunsafe'[wp]: copyGlobalMappings "if_unsafe_then_cap'" (wp: crunch_wps ignore: storePDE) @@ -4212,178 +4106,6 @@ lemma cteCaps_of_ctes_of_lift: lemmas final_matters'_simps = final_matters'_def [split_simps capability.split arch_capability.split] -definition set_thread_all :: "obj_ref \ Structures_A.tcb \ etcb - \ unit det_ext_monad" where - "set_thread_all ptr tcb etcb \ - do s \ get; - kh \ return $ kheap s(ptr \ (TCB tcb)); - ekh \ return $ (ekheap s)(ptr \ etcb); - put (s\kheap := kh, ekheap := ekh\) - od" - -definition thread_gets_the_all :: "obj_ref \ (Structures_A.tcb \ etcb) det_ext_monad" where - "thread_gets_the_all tptr \ - do tcb \ gets_the $ get_tcb tptr; - etcb \ gets_the $ get_etcb tptr; - return $ (tcb, etcb) od" - -definition thread_set_all :: "(Structures_A.tcb \ Structures_A.tcb) \ (etcb \ etcb) - \ obj_ref \ unit det_ext_monad" where - "thread_set_all f g tptr \ - do (tcb, etcb) \ thread_gets_the_all tptr; - set_thread_all tptr (f tcb) (g etcb) - od" - -lemma set_thread_all_corres: - fixes ob' :: "'a :: pspace_storable" - assumes x: "updateObject ob' = updateObject_default ob'" - assumes z: "\s. obj_at' P ptr s - \ map_to_ctes ((ksPSpace s) (ptr \ injectKO ob')) = map_to_ctes (ksPSpace s)" - assumes b: "\ko. P ko \ objBits ko = objBits ob'" - assumes P: "\(v::'a::pspace_storable). (1 :: word32) < 2 ^ (objBits v)" - assumes e: "etcb_relation etcb tcb'" - assumes is_t: "injectKO (ob' :: 'a :: pspace_storable) = KOTCB tcb'" - shows "other_obj_relation (TCB tcb) (injectKO (ob' :: 'a :: pspace_storable)) \ - corres dc (obj_at (same_caps (TCB tcb)) ptr and is_etcb_at ptr) - (obj_at' (P :: 'a \ bool) ptr) - (set_thread_all ptr tcb etcb) (setObject ptr ob')" - apply (rule corres_no_failI) - apply (rule no_fail_pre) - apply wp - apply (rule x) - apply (clarsimp simp: b elim!: obj_at'_weakenE) - apply (unfold set_thread_all_def setObject_def) - apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def - put_def return_def modify_def get_object_def x - projectKOs - updateObject_default_def in_magnitude_check [OF _ P]) - apply (clarsimp simp add: state_relation_def z) - apply (simp add: trans_state_update'[symmetric] trans_state_update[symmetric] - del: trans_state_update) - apply (clarsimp simp add: swp_def fun_upd_def obj_at_def is_etcb_at_def) - apply (subst cte_wp_at_after_update,fastforce simp add: obj_at_def) - apply (subst caps_of_state_after_update,fastforce simp add: obj_at_def) - apply clarsimp - apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) - apply (clarsimp simp add: ghost_relation_def) - apply (erule_tac x=ptr in allE)+ - apply (clarsimp simp: obj_at_def - split: Structures_A.kernel_object.splits if_split_asm) - - apply (fold fun_upd_def) - apply (simp only: pspace_relation_def dom_fun_upd2 simp_thms) - apply (subst pspace_dom_update) - apply assumption - apply simp - apply (simp only: dom_fun_upd2 simp_thms) - apply (elim conjE) - apply (frule bspec, erule domI) - apply (rule conjI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: is_other_obj_relation_type) - apply (drule(1) bspec) - apply clarsimp - apply (frule_tac ko'="TCB tcb'" and x'=ptr in obj_relation_cut_same_type, - (fastforce simp add: is_other_obj_relation_type)+)[1] - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (clarsimp simp: projectKOs) - apply (insert e is_t) - by (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits ARM_A.arch_kernel_obj.splits) - -lemma tcb_update_all_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" - assumes r: "r () ()" - assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" - shows "corres r (ko_at (TCB tcb) add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_thread_all add tcbu etcbu) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ etcb_relation etcbu tcbu'" in corres_req) - apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) - apply (frule(1) pspace_relation_absD) - apply (force simp: projectKOs other_obj_relation_def ekheap_relation_def e) - apply (erule conjE) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule set_thread_all_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - projectKOs objBits_simps' - other_obj_relation_def tcbs r)+ - apply (fastforce simp: is_etcb_at_def elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: projectKOs obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps mask_def objBits_defs) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp - done - -lemma thread_gets_the_all_corres: - shows "corres (\(tcb, etcb) tcb'. tcb_relation tcb tcb' \ etcb_relation etcb tcb') - (tcb_at t and is_etcb_at t) (tcb_at' t) - (thread_gets_the_all t) (getObject t)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp add: gets_def get_def return_def bind_def get_tcb_def thread_gets_the_all_def threadGet_def ethread_get_def gets_the_def assert_opt_def get_etcb_def is_etcb_at_def tcb_at_def liftM_def split: option.splits Structures_A.kernel_object.splits) - apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) - apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def - projectKO_opt_tcb split_def - getObject_def loadObject_default_def in_monad) - apply (case_tac ko) - apply (simp_all add: fail_def return_def) - apply (clarsimp simp add: state_relation_def pspace_relation_def ekheap_relation_def) - apply (drule bspec) - apply clarsimp - apply blast - apply (drule bspec, erule domI) - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) - done - -lemma thread_set_all_corresT: - assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ - tcb_relation (f tcb) (f' tcb')" - assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (g etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (thread_set_all f g t) (threadSet f' t)" - apply (simp add: thread_set_all_def threadSet_def bind_assoc) - apply (rule corres_guard_imp) - apply (rule corres_split[OF thread_gets_the_all_corres]) - apply (simp add: split_def) - apply (rule tcb_update_all_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce - apply (erule e) - apply (simp add: thread_gets_the_all_def, wp+) - apply clarsimp - apply (frule(1) tcb_at_is_etcb_at) - apply (clarsimp simp add: tcb_at_def get_etcb_def obj_at_def) - apply (drule get_tcb_SomeD) - apply fastforce - apply simp - done - -lemmas thread_set_all_corres = - thread_set_all_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] - crunch idle_thread[wp]: deleteCallerCap "\s. P (ksIdleThread s)" (wp: crunch_wps) crunch sch_act_simple: deleteCallerCap sch_act_simple @@ -4401,89 +4123,6 @@ lemma setEndpoint_sch_act_not_ct[wp]: setEndpoint ptr val \\_ s. sch_act_not (ksCurThread s) s\" by (rule hoare_weaken_pre, wps setEndpoint_ct', wp, simp) -lemma cancelAll_ct_not_ksQ_helper: - "\(\s. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ksCurThread s \ set q) \ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (rule mapM_x_inv_wp2, simp) - apply (wp) - apply (wps tcbSchedEnqueue_ct') - apply (wp tcbSchedEnqueue_ksQ) - apply (wps setThreadState_ct') - apply (wp sts_ksQ') - apply (clarsimp) - done - -lemma cancelAllIPC_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllIPC epptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllIPC_def) - apply (wp, wpc, wp) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply (clarsimp simp: forM_x_def) - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply (clarsimp simp: forM_x_def) - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep epptr" in hoare_post_imp) - apply (clarsimp) - apply (rule conjI) - apply ((clarsimp simp: invs'_def valid_state'_def - sch_act_sane_def - | drule(1) ct_not_in_epQueue)+)[2] - apply (wp get_ep_sp') - done - -lemma cancelAllSignals_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllSignals ntfnptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllSignals_def) - apply (wp, wpc, wp+) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply clarsimp - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setNotification_ksQ setNotification_ksCurThread]) - apply (wps setNotification_ksCurThread, wp) - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep ntfnptr" in hoare_post_imp) - apply ((clarsimp simp: invs'_def valid_state'_def sch_act_sane_def - | drule(1) ct_not_in_ntfnQueue)+)[1] - apply (wp get_ntfn_sp') - done - -lemma unbindMaybeNotification_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - unbindMaybeNotification t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) - apply (case_tac "ntfnBoundTCB ntfn", simp, wp, simp+) - apply (rule hoare_pre) - apply wp - apply (wps setBoundNotification_ct') - apply (wp sbn_ksQ) - apply (wps setNotification_ksCurThread, wp) - apply clarsimp - done - lemma sbn_ct_in_state'[wp]: "\ct_in_state' P\ setBoundNotification ntfn t \\_. ct_in_state' P\" apply (simp add: ct_in_state'_def) @@ -4516,37 +4155,6 @@ lemma unbindMaybeNotification_sch_act_sane[wp]: apply (wp setNotification_sch_act_sane sbn_sch_act_sane | wpc | clarsimp)+ done -lemma finaliseCapTrue_standin_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - finaliseCapTrue_standin cap final - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp cancelAllIPC_ct_not_ksQ cancelAllSignals_ct_not_ksQ - hoare_drop_imps unbindMaybeNotification_ct_not_ksQ - | wpc - | clarsimp simp: isNotificationCap_def isReplyCap_def split:capability.splits)+ - done - -lemma cteDeleteOne_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cteDeleteOne slot - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) - apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") - apply (simp add: finaliseCapTrue_standin_simple_def) - apply wp - apply (clarsimp) - apply (wp emptySlot_cteCaps_of hoare_lift_Pf2 [OF emptySlot_ksRQ emptySlot_ct]) - apply (simp add: cteCaps_of_def) - apply (wp (once) hoare_drop_imps) - apply (wp finaliseCapTrue_standin_ct_not_ksQ isFinalCapability_inv)+ - apply (clarsimp) - done - end end diff --git a/proof/refine/ARM_HYP/Init_R.thy b/proof/refine/ARM_HYP/Init_R.thy index b12f9a916d..589a07a9a6 100644 --- a/proof/refine/ARM_HYP/Init_R.thy +++ b/proof/refine/ARM_HYP/Init_R.thy @@ -96,7 +96,7 @@ definition zeroed_intermediate_state :: ksDomSchedule = [], ksCurDomain = 0, ksDomainTime = 0, - ksReadyQueues = K [], + ksReadyQueues = K (TcbQueue None None), ksReadyQueuesL1Bitmap = K 0, ksReadyQueuesL2Bitmap = K 0, ksCurThread = 0, @@ -117,9 +117,11 @@ lemma non_empty_refine_state_relation: "(zeroed_abstract_state, zeroed_intermediate_state) \ state_relation" apply (clarsimp simp: state_relation_def zeroed_state_defs state.defs) apply (intro conjI) - apply (clarsimp simp: pspace_relation_def pspace_dom_def) - apply (clarsimp simp: ekheap_relation_def) - apply (clarsimp simp: ready_queues_relation_def) + apply (clarsimp simp: pspace_relation_def pspace_dom_def) + apply (clarsimp simp: ekheap_relation_def) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def queue_end_valid_def + opt_pred_def list_queue_relation_def tcbQueueEmpty_def + prev_queue_head_def) apply (clarsimp simp: ghost_relation_def) apply (fastforce simp: cdt_relation_def swp_def dest: cte_wp_at_domI) apply (clarsimp simp: cdt_list_relation_def map_to_ctes_def) diff --git a/proof/refine/ARM_HYP/InterruptAcc_R.thy b/proof/refine/ARM_HYP/InterruptAcc_R.thy index 3c2a245104..e99175a549 100644 --- a/proof/refine/ARM_HYP/InterruptAcc_R.thy +++ b/proof/refine/ARM_HYP/InterruptAcc_R.thy @@ -52,14 +52,14 @@ lemma setIRQState_invs[wp]: apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) apply (wp dmo_maskInterrupt) apply (clarsimp simp: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def valid_queues'_def valid_idle'_def valid_irq_node'_def valid_arch_state'_def valid_global_refs'_def global_refs'_def valid_machine_state'_def if_unsafe_then_cap'_def ex_cte_cap_to'_def valid_irq_handlers'_def irq_issued'_def cteCaps_of_def valid_irq_masks'_def - bitmapQ_defs valid_queues_no_bitmap_def split: option.splits) + bitmapQ_defs valid_bitmaps_def + split: option.splits) apply (rule conjI, clarsimp) apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) apply (rule conjI) @@ -119,7 +119,7 @@ lemma preemptionPoint_inv: shows "\P\ preemptionPoint \\_. P\" using assms apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) apply (wpc - | wp hoare_whenE_wp hoare_seq_ext [OF _ select_inv] alternative_valid hoare_drop_imps + | wp whenE_wp bind_wp [OF _ select_inv] hoare_drop_imps | simp)+ done @@ -154,8 +154,8 @@ lemma invs'_irq_state_independent [simp, intro!]: valid_idle'_def valid_global_refs'_def valid_arch_state'_def valid_irq_node'_def valid_irq_handlers'_def valid_irq_states'_def - irqs_masked'_def bitmapQ_defs valid_queues_no_bitmap_def - valid_queues'_def valid_pde_mappings'_def + irqs_masked'_def bitmapQ_defs valid_bitmaps_def + valid_pde_mappings'_def pspace_domain_valid_def cur_tcb'_def valid_machine_state'_def tcb_in_cur_domain'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def diff --git a/proof/refine/ARM_HYP/Interrupt_R.thy b/proof/refine/ARM_HYP/Interrupt_R.thy index 25952092ea..dc1c9600a0 100644 --- a/proof/refine/ARM_HYP/Interrupt_R.thy +++ b/proof/refine/ARM_HYP/Interrupt_R.thy @@ -186,6 +186,7 @@ crunches arch_check_irq, checkIRQ lemma arch_check_irq_maxIRQ_valid: "\\\ arch_check_irq y \\_. (\s. unat y \ unat maxIRQ)\, -" unfolding arch_check_irq_def + supply hoare_vcg_prop[wp del] (* FIXME lib: check rule order *) apply (wpsimp simp: validE_R_def wp: whenE_throwError_wp) by (metis unat_ucast_10_32 word_le_nat_alt word_le_not_less) @@ -603,13 +604,6 @@ lemma decDomainTime_corres: apply (clarsimp simp:state_relation_def) done -lemma tcbSchedAppend_valid_objs': - "\valid_objs'\tcbSchedAppend t \\r. valid_objs'\" - apply (simp add:tcbSchedAppend_def) - apply (wpsimp wp: hoare_unless_wp threadSet_valid_objs' threadGet_wp) - apply (clarsimp simp add:obj_at'_def typ_at'_def) - done - lemma thread_state_case_if: "(case state of Structures_A.thread_state.Running \ f | _ \ g) = (if state = Structures_A.thread_state.Running then f else g)" @@ -620,35 +614,27 @@ lemma threadState_case_if: (if state = Structures_H.thread_state.Running then f else g)" by (case_tac state,auto) -lemma tcbSchedAppend_invs_but_ct_not_inQ': - "\invs' and st_tcb_at' runnable' t \ - tcbSchedAppend t \\_. all_invs_but_ct_not_inQ'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | fastforce elim!: st_tcb_ex_cap'' split: thread_state.split_asm)+ - done +lemma ready_qs_distinct_domain_time_update[simp]: + "ready_qs_distinct (domain_time_update f s) = ready_qs_distinct s" + by (clarsimp simp: ready_qs_distinct_def) lemma timerTick_corres: - "corres dc (cur_tcb and valid_sched) - invs' - timer_tick timerTick" - supply if_weak_cong[cong] + "corres dc + (cur_tcb and valid_sched and pspace_aligned and pspace_distinct) invs' + timer_tick timerTick" apply (simp add: timerTick_def timer_tick_def) - apply (simp add:thread_state_case_if threadState_case_if) - apply (rule_tac Q="\ and (cur_tcb and valid_sched)" and Q'="\ and invs'" in corres_guard_imp) + apply (simp add: thread_state_case_if threadState_case_if) + apply (rule_tac Q="cur_tcb and valid_sched and pspace_aligned and pspace_distinct" + and Q'=invs' + in corres_guard_imp) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp apply (rule corres_split[OF getThreadState_corres]) apply (rename_tac state state') - apply (rule corres_split[where r' = dc ]) + apply (rule corres_split[where r' = dc]) apply (rule corres_if[where Q = \ and Q' = \]) apply (case_tac state,simp_all)[1] - apply (simp add: Let_def) apply (rule_tac r'="(=)" in corres_split[OF ethreadget_corres]) apply (simp add:etcb_relation_def) apply (rename_tac ts ts') @@ -658,55 +644,53 @@ lemma timerTick_corres: apply (rule ethread_set_corres, simp+) apply (clarsimp simp: etcb_relation_def) apply simp - apply (rule corres_split) - apply (rule ethread_set_corres; simp) - apply (simp add: etcb_relation_def) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF ethread_set_corres]) + apply (simp add: sch_act_wf_weak etcb_relation_def pred_conj_def)+ + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp)[1] - apply (rule hoare_strengthen_post) - apply (rule tcbSchedAppend_invs_but_ct_not_inQ', - clarsimp simp: sch_act_wf_weak) - apply (wp threadSet_timeslice_invs threadSet_valid_queues - threadSet_valid_queues' threadSet_pred_tcb_at_state)+ - apply simp - apply simp - apply (rule corres_when,simp) + apply wp + apply ((wpsimp wp: tcbSchedAppend_sym_heap_sched_pointers + tcbSchedAppend_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply ((wp thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply wpsimp+ + apply (rule corres_when, simp) apply (rule corres_split[OF decDomainTime_corres]) apply (rule corres_split[OF getDomainTime_corres]) apply (rule corres_when,simp) apply (rule rescheduleRequired_corres) apply (wp hoare_drop_imp)+ - apply (simp add:dec_domain_time_def) - apply wp+ - apply (simp add:decDomainTime_def) - apply wp - apply (wp|wpc|unfold Let_def|simp)+ - apply (wp static_imp_wp threadSet_timeslice_invs threadSet_valid_queues threadSet_valid_queues' - threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf tcbSchedAppend_valid_objs' - rescheduleRequired_weak_sch_act_wf tcbSchedAppend_valid_queues| simp)+ - apply (strengthen sch_act_wf_weak) - apply (clarsimp simp:conj_comms) - apply (wp tcbSchedAppend_valid_queues tcbSchedAppend_sch_act_wf) - apply simp - apply (wp threadSet_valid_queues threadSet_pred_tcb_at_state threadSet_sch_act - threadSet_tcbDomain_triv threadSet_valid_queues' threadSet_valid_objs'| simp)+ - apply (wp threadGet_wp gts_wp gts_wp')+ - apply (clarsimp simp: cur_tcb_def tcb_at_is_etcb_at valid_sched_def valid_sched_action_def) - prefer 2 - apply clarsimp - apply (clarsimp simp add:cur_tcb_def valid_sched_def - valid_sched_action_def valid_etcbs_def is_tcb_def - is_etcb_at_def st_tcb_at_def obj_at_def - dest!:get_tcb_SomeD) - apply (clarsimp simp: invs'_def valid_state'_def - sch_act_wf_weak - cur_tcb'_def inQ_def - ct_in_state'_def obj_at'_def) - apply (clarsimp simp:st_tcb_at'_def - valid_idle'_def ct_idle_or_in_cur_domain'_def - obj_at'_def projectKO_eq) - apply simp + apply (wpsimp simp: dec_domain_time_def) + apply (wpsimp simp: decDomainTime_def) + apply (wpsimp wp: hoare_weak_lift_imp threadSet_timeslice_invs + tcbSchedAppend_valid_objs' + threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf + rescheduleRequired_weak_sch_act_wf)+ + apply (strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_time_slice_valid_queues) + apply ((wpsimp wp: thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+)[1] + apply wpsimp + apply wpsimp + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs' + | wp (once) hoare_drop_imp)+)[1] + apply (wpsimp wp: gts_wp gts_wp')+ + apply (clarsimp simp: cur_tcb_def) + apply (frule valid_sched_valid_etcbs) + apply (frule (1) tcb_at_is_etcb_at) + apply (frule valid_sched_valid_queues) + apply (fastforce simp: pred_tcb_at_def obj_at_def valid_sched_weak_strg) + apply (clarsimp simp: etcb_at_def split: option.splits) + apply fastforce + apply (fastforce simp: valid_state'_def ct_not_inQ_def) + apply fastforce done lemma corres_return_VGICMaintenance [corres]: @@ -801,7 +785,7 @@ lemma virqSetEOIIRQEN_eq[simp]: lemma vgic_maintenance_corres [corres]: "corres dc einvs - (\s. invs' s \ sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (\s. invs' s \ sch_act_not (ksCurThread s) s) vgic_maintenance vgicMaintenance" proof - (* hoare_lift_Pf-style rules match too often, slowing down proof unless specialised *) @@ -812,7 +796,6 @@ proof - note wplr' = vilr'[where P="sch_act_not"] vilr'[where P="ex_nonz_cap_to'"] vilr'[where P="st_tcb_at' simple'"] - vilr'[where P="\t s. t \ set (ksReadyQueues s x)" for x] show ?thesis unfolding vgic_maintenance_def vgicMaintenance_def isRunnable_def Let_def apply (rule corres_guard_imp) @@ -866,22 +849,22 @@ proof - in hoare_post_imp) apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb runnable_eq pred_conj_def) apply (strengthen st_tcb_ex_cap'[where P=active], clarsimp) - apply (clarsimp simp: pred_tcb_at_def obj_at_def) + apply (clarsimp simp: pred_tcb_at_def obj_at_def invs_psp_aligned invs_distinct) apply wp apply clarsimp apply (rule_tac Q="\rv x. tcb_at' rv x \ invs' x - \ sch_act_not rv x - \ (\d p. rv \ set (ksReadyQueues x (d, p)))" + \ sch_act_not rv x" in hoare_post_imp) + apply (rename_tac rv s) apply clarsimp apply (strengthen st_tcb_ex_cap''[where P=active']) apply (strengthen invs_iflive') apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') apply (clarsimp simp: pred_tcb_at'_def) - apply (rule conjI, erule_tac p=r in obj_at'_weakenE - , fastforce split: thread_state.splits) - apply (erule_tac p=r in obj_at'_weakenE, fastforce split: thread_state.splits) + apply (rule conjI, erule_tac p=rv in obj_at'_weakenE, + fastforce split: thread_state.splits) + apply (erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) apply wp apply (wpsimp wp: wplr wplr' hoare_vcg_all_lift hoare_vcg_imp_lift' dmo_gets_wp dmo'_gets_wp @@ -901,7 +884,7 @@ qed lemma vppiEvent_corres: "corres dc einvs - (\s. invs' s \ sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (\s. invs' s \ sch_act_not (ksCurThread s) s) (vppi_event irq) (vppiEvent irq)" unfolding vppi_event_def vppiEvent_def isRunnable_def supply [[simproc del: defined_all]] @@ -939,19 +922,20 @@ lemma vppiEvent_corres: (ARM_A.VPPIEvent irq)))" in hoare_post_imp) apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb runnable_eq pred_conj_def) - apply (strengthen st_tcb_ex_cap'[where P=active], clarsimp) + apply (strengthen st_tcb_ex_cap'[where P=active], + clarsimp simp: invs_psp_aligned invs_distinct) apply wp apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) apply (rule_tac Q="\rv x. tcb_at' rv x \ invs' x - \ sch_act_not rv x - \ (\d p. rv \ set (ksReadyQueues x (d, p)))" in hoare_post_imp) + \ sch_act_not rv x" in hoare_post_imp) + apply (rename_tac rv s) apply (strengthen st_tcb_ex_cap''[where P=active']) apply (strengthen invs_iflive') apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') apply (clarsimp simp: pred_tcb_at'_def) - apply (rule conjI, erule_tac p=r in obj_at'_weakenE, fastforce split: thread_state.splits) - apply (erule_tac p=r in obj_at'_weakenE, fastforce split: thread_state.splits) + apply (rule conjI, erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) + apply (erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) apply wp apply (wpsimp wp: vcpu_update_tcb_at hoare_vcg_all_lift hoare_vcg_imp_lift' cong: vcpu.fold_congs)+ @@ -972,8 +956,7 @@ lemma vppiEvent_corres: lemma handle_reserved_irq_corres[corres]: "corres dc einvs - (\s. invs' s \ (irq \ non_kernel_IRQs \ - sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (\s. invs' s \ (irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) (handle_reserved_irq irq) (handleReservedIRQ irq)" apply (clarsimp simp: handle_reserved_irq_def handleReservedIRQ_def irqVPPIEventIndex_def irq_vppi_event_index_def non_kernel_IRQs_def IRQ_def irqVGICMaintenance_def @@ -981,17 +964,13 @@ lemma handle_reserved_irq_corres[corres]: apply (rule conjI; clarsimp) apply (rule corres_guard_imp, rule vppiEvent_corres) apply (fastforce intro: vgic_maintenance_corres simp: unat_arith_simps)+ - apply (rule conjI; clarsimp) - apply (rule corres_guard_imp) - apply (fastforce intro: vgic_maintenance_corres simp: unat_arith_simps)+ done lemma handleInterrupt_corres: "corres dc - (einvs) + einvs (invs' and (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive) and - (\s. irq \ non_kernel_IRQs \ - sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)) (handle_interrupt irq) (handleInterrupt irq)" (is "corres dc _ (invs' and _ and ?P') _ _") apply (simp add: handle_interrupt_def handleInterrupt_def) @@ -1026,10 +1005,6 @@ lemma handleInterrupt_corres: apply (rule corres_machine_op, rule corres_eq_trivial ; (simp add: no_fail_maskInterrupt no_fail_bind no_fail_ackInterrupt)+)+ apply wp+ - apply clarsimp - apply clarsimp - apply (rule hoare_post_taut) (* FIXME: wp (once) does not terminate? *) - apply wp+ apply clarsimp apply fastforce apply (rule corres_guard_imp) @@ -1039,9 +1014,9 @@ lemma handleInterrupt_corres: apply (rule corres_machine_op) apply (rule corres_eq_trivial, (simp add: no_fail_ackInterrupt)+) apply wp+ - apply clarsimp + apply fastforce apply clarsimp - apply corressimp + apply corresKsimp done lemma threadSet_ksDomainTime[wp]: @@ -1051,10 +1026,10 @@ lemma threadSet_ksDomainTime[wp]: done crunch ksDomainTime[wp]: rescheduleRequired "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) crunch ksDomainTime[wp]: tcbSchedAppend "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) lemma updateTimeSlice_valid_pspace[wp]: "\valid_pspace'\ threadSet (tcbTimeSlice_update (\_. ts')) thread @@ -1063,15 +1038,6 @@ lemma updateTimeSlice_valid_pspace[wp]: apply (auto simp:tcb_cte_cases_def) done -lemma updateTimeSlice_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ - threadSet (tcbTimeSlice_update (\_. ts')) thread - \\r s. Invariants_H.valid_queues s\" - apply (wp threadSet_valid_queues,simp) - apply (clarsimp simp:obj_at'_def inQ_def) - done - - lemma dom_upd_eq: "f t = Some y \ dom (\a. if a = t then Some x else f a) = dom f" by (auto split: if_split_asm) @@ -1098,29 +1064,29 @@ crunch ct[wp]: tcbSchedAppend cur_tcb' (wp: cur_tcb_lift crunch_wps) lemma timerTick_invs'[wp]: - "\invs'\ timerTick \\rv. invs'\" + "timerTick \invs'\" apply (simp add: timerTick_def) apply (wpsimp wp: threadSet_invs_trivial threadSet_pred_tcb_no_state rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' - simp: tcb_cte_cases_def) - apply (rule_tac Q="\rv. invs'" in hoare_post_imp) - apply (clarsimp simp add:invs'_def valid_state'_def) + simp: tcb_cte_cases_def) + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp: invs'_def valid_state'_def) apply (simp add: decDomainTime_def) apply wp apply simp apply wpc - apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs - rescheduleRequired_all_invs_but_ct_not_inQ - hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain' - del: tcbSchedAppend_sch_act_wf)+ - apply (rule hoare_strengthen_post[OF tcbSchedAppend_invs_but_ct_not_inQ']) - apply (wpsimp simp: valid_pspace'_def sch_act_wf_weak)+ - apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv - threadSet_valid_objs' threadSet_timeslice_invs)+ - apply (wp threadGet_wp) + apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs + rescheduleRequired_all_invs_but_ct_not_inQ + hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain')+ + apply (rule hoare_strengthen_post[OF tcbSchedAppend_all_invs_but_ct_not_inQ']) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ + apply (rule_tac Q="\_. invs'" in hoare_strengthen_post) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv + threadSet_valid_objs' threadSet_timeslice_invs)+ + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ apply (wp gts_wp')+ - apply (clarsimp simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def) + apply (auto simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def cong: conj_cong) done lemma resetTimer_invs'[wp]: @@ -1150,8 +1116,9 @@ lemma runnable'_eq: by (cases st; simp) lemma vgicMaintenance_invs'[wp]: - "\invs' and (\s. sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))\ - vgicMaintenance \\y. invs'\" + "\invs' and (\s. sch_act_not (ksCurThread s) s)\ + vgicMaintenance + \\_. invs'\" supply if_split[split del] apply (clarsimp simp: vgicMaintenance_def get_gic_vcpu_ctrl_lr_def set_gic_vcpu_ctrl_lr_def get_gic_vcpu_ctrl_misr_def get_gic_vcpu_ctrl_eisr1_def get_gic_vcpu_ctrl_eisr0_def @@ -1162,8 +1129,7 @@ lemma vgicMaintenance_invs'[wp]: apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) apply (rule_tac Q="\_ s. tcb_at' (ksCurThread s) s \ invs' s - \ sch_act_not (ksCurThread s) s - \ (\d p. (ksCurThread s) \ set (ksReadyQueues s (d, p)))" + \ sch_act_not (ksCurThread s) s" in hoare_post_imp) apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) @@ -1186,7 +1152,7 @@ lemma vgicMaintenance_invs'[wp]: done lemma vppiEvent_invs'[wp]: - "\invs' and (\s. sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))\ + "\invs' and (\s. sch_act_not (ksCurThread s) s)\ vppiEvent irq \\y. invs'\" supply if_split[split del] apply (clarsimp simp: vppiEvent_def doMachineOp_bind) @@ -1196,8 +1162,7 @@ lemma vppiEvent_invs'[wp]: apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) apply (rule_tac Q="\_ s. tcb_at' (ksCurThread s) s \ invs' s - \ sch_act_not (ksCurThread s) s - \ (\d p. (ksCurThread s) \ set (ksReadyQueues s (d, p)))" + \ sch_act_not (ksCurThread s) s" in hoare_post_imp) apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) @@ -1212,8 +1177,7 @@ lemma vppiEvent_invs'[wp]: done lemma hint_invs[wp]: - "\invs' and (\s. irq \ non_kernel_IRQs \ - sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))\ + "\invs' and (\s. irq \ non_kernel_IRQs \ sch_act_not (ksCurThread s) s)\ handleInterrupt irq \\rv. invs'\" apply (simp add: handleInterrupt_def getSlotCap_def cong: irqstate.case_cong) apply (rule conjI; rule impI) diff --git a/proof/refine/ARM_HYP/InvariantUpdates_H.thy b/proof/refine/ARM_HYP/InvariantUpdates_H.thy index 29ef82a6b5..46a234c70a 100644 --- a/proof/refine/ARM_HYP/InvariantUpdates_H.thy +++ b/proof/refine/ARM_HYP/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] @@ -38,8 +38,9 @@ lemma invs'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_bitmaps_def bitmapQ_defs vms ct_not_inQ_def state_refs_of'_def ps_clear_def valid_irq_node'_def mask @@ -56,12 +57,13 @@ lemma invs_no_cicd'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - vms ct_not_inQ_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def mask - cong: option.case_cong) + apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_bitmaps_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) done qed @@ -98,14 +100,9 @@ lemma valid_tcb'_tcbTimeSlice_update[simp]: "valid_tcb' (tcbTimeSlice_update f tcb) s = valid_tcb' tcb s" by (simp add:valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) -lemma valid_queues_ksSchedulerAction_update[simp]: - "valid_queues (ksSchedulerAction_update f s) = valid_queues s" - unfolding valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - by simp - -lemma valid_queues'_ksSchedulerAction_update[simp]: - "valid_queues' (ksSchedulerAction_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksSchedulerAction_update[simp]: + "valid_bitmaps (ksSchedulerAction_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma ex_cte_cap_wp_to'_gsCNodes_update[simp]: "ex_cte_cap_wp_to' P p (gsCNodes_update f s') = ex_cte_cap_wp_to' P p s'" @@ -140,45 +137,25 @@ lemma tcb_in_cur_domain_ct[simp]: "tcb_in_cur_domain' t (ksCurThread_update f s) = tcb_in_cur_domain' t s" by (fastforce simp: tcb_in_cur_domain'_def) -lemma valid_queues'_ksCurDomain[simp]: - "valid_queues' (ksCurDomain_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues'_ksDomScheduleIdx[simp]: - "valid_queues' (ksDomScheduleIdx_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksCurDomain[simp]: + "valid_bitmaps (ksCurDomain_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomSchedule[simp]: - "valid_queues' (ksDomSchedule_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomScheduleIdx[simp]: + "valid_bitmaps (ksDomScheduleIdx_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomainTime[simp]: - "valid_queues' (ksDomainTime_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomSchedule[simp]: + "valid_bitmaps (ksDomSchedule_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksWorkUnitsCompleted[simp]: - "valid_queues' (ksWorkUnitsCompleted_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomainTime[simp]: + "valid_bitmaps (ksDomainTime_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues_ksCurDomain[simp]: - "valid_queues (ksCurDomain_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomScheduleIdx[simp]: - "valid_queues (ksDomScheduleIdx_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomSchedule[simp]: - "valid_queues (ksDomSchedule_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomainTime[simp]: - "valid_queues (ksDomainTime_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksWorkUnitsCompleted[simp]: - "valid_queues (ksWorkUnitsCompleted_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_ksWorkUnitsCompleted[simp]: + "valid_bitmaps (ksWorkUnitsCompleted_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma valid_irq_node'_ksCurDomain[simp]: "valid_irq_node' w (ksCurDomain_update f s) = valid_irq_node' w s" @@ -255,6 +232,10 @@ lemma valid_mdb_interrupts'[simp]: "valid_mdb' (ksInterruptState_update f s) = valid_mdb' s" by (simp add: valid_mdb'_def) +lemma valid_mdb'_ksReadyQueues_update[simp]: + "valid_mdb' (ksReadyQueues_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + lemma vms_ksReadyQueues_update[simp]: "valid_machine_state' (ksReadyQueues_update f s) = valid_machine_state' s" by (simp add: valid_machine_state'_def) @@ -279,10 +260,10 @@ lemma ct_in_state_ksSched[simp]: lemma invs'_wu[simp]: "invs' (ksWorkUnitsCompleted_update f s) = invs' s" - apply (simp add: invs'_def cur_tcb'_def valid_state'_def Invariants_H.valid_queues_def - valid_queues'_def valid_irq_node'_def valid_machine_state'_def + apply (simp add: invs'_def cur_tcb'_def valid_state'_def valid_bitmaps_def + valid_irq_node'_def valid_machine_state'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def) + bitmapQ_defs) done lemma valid_arch_state'_interrupt[simp]: @@ -334,9 +315,8 @@ lemma sch_act_simple_ksReadyQueuesL2Bitmap[simp]: lemma ksDomainTime_invs[simp]: "invs' (ksDomainTime_update f s) = invs' s" - by (simp add:invs'_def valid_state'_def - cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_machine_state'_def) + by (simp add: invs'_def valid_state'_def cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_machine_state'_def bitmapQ_defs) lemma valid_machine_state'_ksDomainTime[simp]: "valid_machine_state' (ksDomainTime_update f s) = valid_machine_state' s" @@ -364,9 +344,7 @@ lemma ct_not_inQ_update_stt[simp]: lemma invs'_update_cnt[elim!]: "invs' s \ invs' (s\ksSchedulerAction := ChooseNewThread\)" - by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues'_def - valid_irq_node'_def cur_tcb'_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_queues_no_bitmap_def - bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def) + by (clarsimp simp: invs'_def valid_state'_def valid_irq_node'_def cur_tcb'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def bitmapQ_defs) end \ No newline at end of file diff --git a/proof/refine/ARM_HYP/Invariants_H.thy b/proof/refine/ARM_HYP/Invariants_H.thy index 93994caf2a..60be16f4ba 100644 --- a/proof/refine/ARM_HYP/Invariants_H.thy +++ b/proof/refine/ARM_HYP/Invariants_H.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only @@ -10,6 +11,7 @@ imports "AInvs.Deterministic_AI" "AInvs.AInvs" "Lib.AddUpdSimps" + "Lib.Heap_List" begin context Arch begin @@ -161,6 +163,21 @@ definition abbreviation "cte_at' \ cte_wp_at' \" +abbreviation tcb_of' :: "kernel_object \ tcb option" where + "tcb_of' \ projectKO_opt" + +abbreviation tcbs_of' :: "kernel_state \ obj_ref \ tcb option" where + "tcbs_of' s \ ksPSpace s |> tcb_of'" + +abbreviation tcbSchedPrevs_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedPrevs_of s \ tcbs_of' s |> tcbSchedPrev" + +abbreviation tcbSchedNexts_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedNexts_of s \ tcbs_of' s |> tcbSchedNext" + +abbreviation sym_heap_sched_pointers :: "global.kernel_state \ bool" where + "sym_heap_sched_pointers s \ sym_heap (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + definition tcb_cte_cases :: "word32 \ ((tcb \ cte) \ ((cte \ cte) \ tcb \ tcb))" where @@ -235,13 +252,14 @@ where then refs_of' ko else {}))" - primrec live0' :: "Structures_H.kernel_object \ bool" where "live0' (KOTCB tcb) = - (bound (tcbBoundNotification tcb) \ - (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState) \ tcbQueued tcb)" + (bound (tcbBoundNotification tcb) + \ tcbSchedPrev tcb \ None \ tcbSchedNext tcb \ None + \ tcbQueued tcb + \ (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState))" | "live0' (KOCTE cte) = False" | "live0' (KOEndpoint ep) = (ep \ IdleEP)" | "live0' (KONotification ntfn) = (bound (ntfnBoundTCB ntfn) \ (\ts. ntfnObj ntfn = WaitingNtfn ts))" @@ -314,14 +332,14 @@ where definition live' :: "kernel_object \ bool" where "live' ko \ case ko of - (KOTCB tcb) => live0' ko \ hyp_live' ko - | (KOCTE cte) => False - | (KOEndpoint ep) => live0' ko - | (KONotification ntfn) => live0' ko - | (KOUserData) => False - | (KOUserDataDevice) => False - | (KOKernelData) => False - | (KOArch ako) => hyp_live' ko" + KOTCB tcb => live0' ko \ hyp_live' ko + | KOCTE cte => False + | KOEndpoint ep => live0' ko + | KONotification ntfn => live0' ko + | KOUserData => False + | KOUserDataDevice => False + | KOKernelData => False + | KOArch ako => hyp_live' ko" context begin interpretation Arch . (*FIXME: arch_split*) primrec @@ -578,6 +596,11 @@ definition where "valid_arch_tcb' \ \t s. \v. atcbVCPUPtr t = Some v \ vcpu_at' v s " +abbreviation opt_tcb_at' :: "machine_word option \ kernel_state \ bool" where + "opt_tcb_at' \ none_top tcb_at'" + +lemmas opt_tcb_at'_def = none_top_def + definition valid_tcb' :: "Structures_H.tcb \ kernel_state \ bool" where @@ -588,6 +611,8 @@ where \ tcbDomain t \ maxDomain \ tcbPriority t \ maxPriority \ tcbMCP t \ maxPriority + \ opt_tcb_at' (tcbSchedPrev t) s + \ opt_tcb_at' (tcbSchedNext t) s \ valid_arch_tcb' (tcbArch t) s" definition @@ -1011,10 +1036,15 @@ where | "runnable' (Structures_H.BlockedOnSend a b c d e) = False" | "runnable' (Structures_H.BlockedOnNotification x) = False" -definition - inQ :: "domain \ priority \ tcb \ bool" -where - "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" +definition inQ :: "domain \ priority \ tcb \ bool" where + "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" + +lemma inQ_implies_tcbQueueds_of: + "(inQ domain priority |< tcbs_of' s') tcbPtr \ (tcbQueued |< tcbs_of' s') tcbPtr" + by (clarsimp simp: opt_map_def opt_pred_def inQ_def split: option.splits) + +defs ready_qs_runnable_def: + "ready_qs_runnable s \ \t. obj_at' tcbQueued t s \ st_tcb_at' runnable' t s" definition (* for given domain and priority, the scheduler bitmap indicates a thread is in the queue *) @@ -1024,15 +1054,6 @@ where "bitmapQ d p s \ ksReadyQueuesL1Bitmap s d !! prioToL1Index p \ ksReadyQueuesL2Bitmap s (d, invertL1Index (prioToL1Index p)) !! unat (p && mask wordRadix)" - -definition - valid_queues_no_bitmap :: "kernel_state \ bool" -where - "valid_queues_no_bitmap \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - definition (* A priority is used as a two-part key into the bitmap structure. If an L2 bitmap entry is set without an L1 entry, updating the L1 entry (shared by many priorities) may make @@ -1056,31 +1077,62 @@ where \d i. ksReadyQueuesL1Bitmap s d !! i \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ i < l2BitmapSize" -definition - valid_bitmapQ :: "kernel_state \ bool" -where - "valid_bitmapQ \ \s. (\d p. bitmapQ d p s \ ksReadyQueues s (d,p) \ [])" +definition valid_bitmapQ :: "kernel_state \ bool" where + "valid_bitmapQ \ \s. \d p. bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p))" -definition - valid_queues :: "kernel_state \ bool" -where - "valid_queues \ \s. valid_queues_no_bitmap s \ valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" +definition valid_bitmaps :: "kernel_state \ bool" where + "valid_bitmaps \ \s. valid_bitmapQ s \ bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" -definition - (* when a thread gets added to / removed from a queue, but before bitmap updated *) - valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" -where +lemma valid_bitmaps_valid_bitmapQ[elim!]: + "valid_bitmaps s \ valid_bitmapQ s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L2_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L2_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L1_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L1_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_lift: + assumes prq: "\P. f \\s. P (ksReadyQueues s)\" + assumes prqL1: "\P. f \\s. P (ksReadyQueuesL1Bitmap s)\" + assumes prqL2: "\P. f \\s. P (ksReadyQueuesL2Bitmap s)\" + shows "f \valid_bitmaps\" + unfolding valid_bitmaps_def valid_bitmapQ_def bitmapQ_def + bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +(* when a thread gets added to / removed from a queue, but before bitmap updated *) +definition valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" where "valid_bitmapQ_except d' p' \ \s. - (\d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ ksReadyQueues s (d,p) \ []))" + \d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)))" lemmas bitmapQ_defs = valid_bitmapQ_def valid_bitmapQ_except_def bitmapQ_def bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def -definition - valid_queues' :: "kernel_state \ bool" -where - "valid_queues' \ \s. \d p t. obj_at' (inQ d p) t s \ t \ set (ksReadyQueues s (d, p))" +\ \ + The tcbSchedPrev and tcbSchedNext fields of a TCB are used only to indicate membership in + one of the ready queues. \ +definition valid_sched_pointers_2 :: + "(obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ (obj_ref \ bool) \ bool " + where + "valid_sched_pointers_2 prevs nexts ready \ + \ptr. prevs ptr \ None \ nexts ptr \ None \ ready ptr" + +abbreviation valid_sched_pointers :: "kernel_state \ bool" where + "valid_sched_pointers s \ + valid_sched_pointers_2 (tcbSchedPrevs_of s) (tcbSchedNexts_of s) (tcbQueued |< tcbs_of' s)" + +lemmas valid_sched_pointers_def = valid_sched_pointers_2_def + +lemma valid_sched_pointersD: + "\valid_sched_pointers s; \ (tcbQueued |< tcbs_of' s) t\ + \ tcbSchedPrevs_of s t = None \ tcbSchedNexts_of s t = None" + by (fastforce simp: valid_sched_pointers_def in_opt_pred opt_map_red) definition tcb_in_cur_domain' :: "32 word \ kernel_state \ bool" where "tcb_in_cur_domain' t \ \s. obj_at' (\tcb. ksCurDomain s = tcbDomain tcb) t s" @@ -1315,7 +1367,7 @@ definition valid_state' :: "kernel_state \ bool" where "valid_state' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) \sym_refs (state_hyp_refs_of' s) + \ sym_refs (state_refs_of' s) \sym_refs (state_hyp_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s @@ -1324,7 +1376,9 @@ where \ valid_irq_states' s \ valid_machine_state' s \ irqs_masked' s - \ valid_queues' s + \ sym_heap_sched_pointers s + \ valid_sched_pointers s + \ valid_bitmaps s \ ct_not_inQ s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s @@ -1377,6 +1431,11 @@ definition abbreviation "active' st \ st = Structures_H.Running \ st = Structures_H.Restart" +lemma runnable_eq_active': "runnable' = active'" + apply (rule ext) + apply (case_tac st, simp_all) + done + abbreviation "simple' st \ st = Structures_H.Inactive \ st = Structures_H.Running \ @@ -1392,11 +1451,12 @@ abbreviation abbreviation(input) "all_invs_but_sym_refs_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1404,12 +1464,13 @@ abbreviation(input) abbreviation(input) "all_invs_but_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1425,12 +1486,13 @@ lemma all_invs_but_not_ct_inQ_check': definition "all_invs_but_ct_idle_or_in_cur_domain' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_not_inQ s \ valid_pde_mappings' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ cur_tcb' s \ ct_not_inQ s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1528,7 +1590,7 @@ lemmas valid_duplicates'_D = valid_duplicates'_pdeD valid_duplicates'_pteD lemma valid_duplicates'_non_pd_pt_I: "\koTypeOf ko \ ArchT PDET; koTypeOf ko \ ArchT PTET; vs_valid_duplicates' (ksPSpace s) ; ksPSpace s p = Some ko; koTypeOf ko = koTypeOf m\ - \ vs_valid_duplicates' (ksPSpace s(p \ m))" + \ vs_valid_duplicates' ((ksPSpace s)(p \ m))" apply (subst vs_valid_duplicates'_def) apply (rule allI) apply (clarsimp simp: option.splits kernel_object.splits arch_kernel_object.splits) @@ -3291,9 +3353,9 @@ lemma sch_act_wf_arch [simp]: "sch_act_wf sa (ksArchState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_arch [simp]: - "valid_queues (ksArchState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_arch[simp]: + "valid_bitmaps (ksArchState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma if_unsafe_then_cap_arch' [simp]: "if_unsafe_then_cap' (ksArchState_update f s) = if_unsafe_then_cap' s" @@ -3311,22 +3373,14 @@ lemma sch_act_wf_machine_state [simp]: "sch_act_wf sa (ksMachineState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_machine_state [simp]: - "valid_queues (ksMachineState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_arch' [simp]: - "valid_queues' (ksArchState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues_machine_state' [simp]: - "valid_queues' (ksMachineState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - lemma valid_irq_node'_machine_state [simp]: "valid_irq_node' x (ksMachineState_update f s) = valid_irq_node' x s" by (simp add: valid_irq_node'_def) +lemma valid_bitmaps_machine_state[simp]: + "valid_bitmaps (ksMachineState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + (* these should be reasonable safe for automation because of the 0 pattern *) lemma no_0_ko_wp' [elim!]: "\ ko_wp_at' Q 0 s; no_0_obj' s \ \ P" @@ -3404,19 +3458,6 @@ lemma typ_at_aligned': "\ typ_at' tp p s \ \ is_aligned p (objBitsT tp)" by (clarsimp simp add: typ_at'_def ko_wp_at'_def objBitsT_koTypeOf) -lemma valid_queues_obj_at'D: - "\ t \ set (ksReadyQueues s (d, p)); valid_queues s \ - \ obj_at' (inQ d p) t s" - apply (unfold valid_queues_def valid_queues_no_bitmap_def) - apply (elim conjE) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - lemma obj_at'_and: "obj_at' (P and P') t s = (obj_at' P t s \ obj_at' P' t s)" by (rule iffI, (clarsimp simp: obj_at'_def)+) @@ -3458,21 +3499,6 @@ lemma not_pred_tcb_at'_strengthen: "pred_tcb_at' f (Not \ P) p s \ \ pred_tcb_at' f P p s" by (clarsimp simp: pred_tcb_at'_def obj_at'_def) -lemma valid_queues_no_bitmap_def': - "valid_queues_no_bitmap = - (\s. \d p. (\t\set (ksReadyQueues s (d, p)). - obj_at' (inQ d p) t s \ st_tcb_at' runnable' t s) \ - distinct (ksReadyQueues s (d, p)) \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - apply (rule ext, rule iffI) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_and pred_tcb_at'_def o_def - elim!: obj_at'_weakenE)+ - done - -lemma valid_queues_running: - assumes Q: "t \ set(ksReadyQueues s (d, p))" "valid_queues s" - shows "st_tcb_at' runnable' t s" - using assms by (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def') - lemma valid_refs'_cteCaps: "valid_refs' S (ctes_of s) = (\c \ ran (cteCaps_of s). S \ capRange c = {})" by (fastforce simp: valid_refs'_def cteCaps_of_def elim!: ranE) @@ -3557,8 +3583,16 @@ lemma invs_sch_act_wf' [elim!]: "invs' s \ sch_act_wf (ksSchedulerAction s) s" by (simp add: invs'_def valid_state'_def) -lemma invs_queues [elim!]: - "invs' s \ valid_queues s" +lemma invs_valid_bitmaps[elim!]: + "invs' s \ valid_bitmaps s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym_heap_sched_pointers[elim!]: + "invs' s \ sym_heap_sched_pointers s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_sched_pointers[elim!]: + "invs' s \ valid_sched_pointers s" by (simp add: invs'_def valid_state'_def) lemma invs_valid_idle'[elim!]: @@ -3573,21 +3607,9 @@ lemma invs'_invs_no_cicd: "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" by (simp add: invs'_to_invs_no_cicd'_def) -lemma valid_queues_valid_bitmapQ: - "valid_queues s \ valid_bitmapQ s" - by (simp add: valid_queues_def) - -lemma valid_queues_valid_queues_no_bitmap: - "valid_queues s \ valid_queues_no_bitmap s" - by (simp add: valid_queues_def) - -lemma valid_queues_bitmapQ_no_L1_orphans: - "valid_queues s \ bitmapQ_no_L1_orphans s" - by (simp add: valid_queues_def) - lemma invs'_bitmapQ_no_L1_orphans: "invs' s \ bitmapQ_no_L1_orphans s" - by (drule invs_queues, simp add: valid_queues_def) + by (simp add: invs'_def valid_state'_def valid_bitmaps_def) lemma invs_ksCurDomain_maxDomain' [elim!]: "invs' s \ ksCurDomain s \ maxDomain" @@ -3612,34 +3634,24 @@ lemma invs_no_0_obj'[elim!]: lemma invs'_gsCNodes_update[simp]: "invs' (gsCNodes_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def ct_not_inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done lemma invs'_gsUserPages_update[simp]: "invs' (gsUserPages_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def ct_not_inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done -lemma invs_queues_tcb_in_cur_domain': - "\ ksReadyQueues s (d, p) = x # xs; invs' s; d = ksCurDomain s\ - \ tcb_in_cur_domain' x s" -apply (subgoal_tac "x \ set (ksReadyQueues s (d, p))") - apply (drule (1) valid_queues_obj_at'D[OF _ invs_queues]) - apply (auto simp: inQ_def tcb_in_cur_domain'_def elim: obj_at'_weakenE) -done - lemma pred_tcb'_neq_contra: "\ pred_tcb_at' proj P p s; pred_tcb_at' proj Q p s; \st. P st \ Q st \ \ False" by (clarsimp simp: pred_tcb_at'_def obj_at'_def) @@ -3653,7 +3665,7 @@ lemma invs'_ksDomScheduleIdx: unfolding invs'_def valid_state'_def by clarsimp lemma valid_bitmap_valid_bitmapQ_exceptE: - "\ valid_bitmapQ_except d p s ; (bitmapQ d p s \ ksReadyQueues s (d,p) \ []) ; + "\ valid_bitmapQ_except d p s; bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)); bitmapQ_no_L2_orphans s \ \ valid_bitmapQ s" unfolding valid_bitmapQ_def valid_bitmapQ_except_def @@ -3740,6 +3752,52 @@ add_upd_simps "invs' (gsUntypedZeroRanges_update f s)" (obj_at'_real_def) declare upd_simps[simp] +lemma neq_out_intv: + "\ a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" + by simp + +lemma rule_out_intv: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a \ b \ + \ b \ mask_range a (objBitsKO obj)" + apply (drule(1) pspace_distinctD') + apply (subst (asm) ps_clear_def) + apply (drule_tac x = b in orthD2) + apply fastforce + apply (drule neq_out_intv) + apply (simp add: mask_def add_diff_eq) + apply (simp add: mask_def add_diff_eq) + done + +lemma ptr_range_mask_range: + "{ptr..ptr + 2 ^ bits - 1} = mask_range ptr bits" + unfolding mask_def + by simp + +lemma distinct_obj_range'_not_subset: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ \ obj_range' b obj' \ obj_range' a obj" + unfolding obj_range'_def + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule (3) rule_out_intv) + by (fastforce simp: is_aligned_no_overflow_mask ptr_range_mask_range word_add_increasing) + +lemma obj_range'_disjoint: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ obj_range' a obj \ obj_range' b obj' = {}" + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule_tac p=a and p'=b in aligned_mask_range_cases) + apply assumption + apply (metis add_mask_fold distinct_obj_range'_not_subset obj_range'_def) + done + qualify ARM_HYP_H (in Arch) (* diff --git a/proof/refine/ARM_HYP/IpcCancel_R.thy b/proof/refine/ARM_HYP/IpcCancel_R.thy index 7ef00de130..77ca9c476c 100644 --- a/proof/refine/ARM_HYP/IpcCancel_R.thy +++ b/proof/refine/ARM_HYP/IpcCancel_R.thy @@ -48,25 +48,6 @@ lemma set_ep_pred_tcb_at' [wp]: apply (simp add: updateObject_default_def in_monad projectKOs) done -(* valid_queues is too strong *) -definition valid_inQ_queues :: "KernelStateData_H.kernel_state \ bool" where - "valid_inQ_queues \ - \s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) \ distinct (ksReadyQueues s (d, p))" - -lemma valid_inQ_queues_ksSchedulerAction_update[simp]: - "valid_inQ_queues (ksSchedulerAction_update f s) = valid_inQ_queues s" - by (simp add: valid_inQ_queues_def) - -lemma valid_inQ_queues_ksReadyQueuesL1Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL1Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - -lemma valid_inQ_queues_ksReadyQueuesL2Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL2Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - defs capHasProperty_def: "capHasProperty ptr P \ cte_wp_at' (\c. P (cteCap c)) ptr" end @@ -83,11 +64,6 @@ locale delete_one_conc_pre = "\pspace_distinct'\ cteDeleteOne slot \\rv. pspace_distinct'\" assumes delete_one_it: "\P. \\s. P (ksIdleThread s)\ cteDeleteOne cap \\rv s. P (ksIdleThread s)\" - assumes delete_one_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl \\rv. Invariants_H.valid_queues\" - assumes delete_one_inQ_queues: - "\valid_inQ_queues\ cteDeleteOne sl \\rv. valid_inQ_queues\" assumes delete_one_sch_act_simple: "\sch_act_simple\ cteDeleteOne sl \\rv. sch_act_simple\" assumes delete_one_sch_act_not: @@ -105,7 +81,7 @@ lemma (in delete_one_conc_pre) cancelIPC_simple[wp]: "\\\ cancelIPC t \\rv. st_tcb_at' simple' t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (rule hoare_pre) apply (wpc | wp sts_st_tcb_at'_cases hoare_vcg_conj_lift @@ -339,6 +315,8 @@ lemma cancelSignal_corres: apply (wp getNotification_wp)+ apply (clarsimp simp: conj_comms st_tcb_at_tcb_at) apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) apply (erule pspace_valid_objsE) apply fastforce apply (clarsimp simp: valid_obj_def valid_tcb_def valid_tcb_state_def) @@ -547,12 +525,12 @@ lemma (in delete_one) cancelIPC_ReplyCap_corres: and Q'="\_. invs' and st_tcb_at' awaiting_reply' t" in corres_underlying_split) apply (rule corres_guard_imp) - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp?) apply (simp add: tcb_relation_def fault_rel_optionation_def) apply (simp add: tcb_cap_cases_def) - apply (simp add: tcb_cte_cases_def) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) apply (simp add: exst_same_def) - apply (clarsimp simp: st_tcb_at_tcb_at) + apply (fastforce simp: st_tcb_at_tcb_at) apply clarsimp defer apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state @@ -587,7 +565,7 @@ lemma (in delete_one) cancelIPC_ReplyCap_corres: apply (rule_tac F="mdbNext (cteMDBNode cte) = cte_map sl" in corres_req) apply (clarsimp dest!: st_tcb_at_tcb_at) apply (fastforce simp: cte_wp_at_ctes_of cte_level_bits_def - elim!: reply_mdbNext_is_descendantD) + elim!: reply_mdbNext_is_descendantD) apply (simp add: when_def getSlotCap_def capHasProperty_def del: split_paired_Ex) apply (rule corres_guard_imp) @@ -639,7 +617,7 @@ lemma (in delete_one) cancel_ipc_corres: apply (rule hoare_strengthen_post) apply (rule gts_sp'[where P="\"]) apply (clarsimp elim!: pred_tcb'_weakenE) - apply simp + apply fastforce apply simp done @@ -671,16 +649,15 @@ lemma sch_act_simple_not_t[simp]: "sch_act_simple s \ sch_act_no context begin interpretation Arch . (*FIXME: arch_split*) +crunches setNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift) + lemma cancelSignal_invs': "\invs' and st_tcb_at' (\st. st = BlockedOnNotification ntfn) t and sch_act_not t\ cancelSignal t ntfn \\rv. invs'\" proof - - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def - valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have NTFNSN: "\ntfn ntfn'. \\s. sch_act_not (ksCurThread s) s \ setNotification ntfn ntfn' \\_ s. sch_act_not (ksCurThread s) s\" @@ -691,20 +668,19 @@ lemma cancelSignal_invs': show ?thesis apply (simp add: cancelSignal_def invs'_def valid_state'_def Let_def) apply (wp valid_irq_node_lift sts_sch_act' irqs_masked_lift - hoare_vcg_all_lift [OF setNotification_ksQ] sts_valid_queues + hoare_vcg_all_lift setThreadState_ct_not_inQ NTFNSN - hoare_vcg_all_lift setNotification_ksQ + hoare_vcg_all_lift | simp add: valid_tcb_state'_def list_case_If split del: if_split)+ prefer 2 apply assumption apply (rule hoare_strengthen_post) apply (rule get_ntfn_sp') + apply (rename_tac rv s) apply (clarsimp simp: pred_tcb_at') - apply (frule NIQ) - apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) apply (rule conjI) apply (clarsimp simp: valid_ntfn'_def) - apply (case_tac "ntfnObj r", simp_all add: isWaitingNtfn_def) + apply (case_tac "ntfnObj rv", simp_all add: isWaitingNtfn_def) apply (frule ko_at_valid_objs') apply (simp add: valid_pspace_valid_objs') apply (clarsimp simp: projectKO_opt_ntfn split: kernel_object.splits) @@ -729,7 +705,7 @@ lemma cancelSignal_invs': split: ntfn.splits) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (fastforce simp: sym_refs_def dest!: idle'_no_refs) - apply (case_tac "ntfnObj r", simp_all) + apply (case_tac "ntfnObj rv", simp_all) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def) apply (rule conjI, clarsimp split: option.splits) @@ -743,9 +719,10 @@ lemma cancelSignal_invs': set_eq_subset) apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def set_eq_subset) + apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (rule conjI) - apply (case_tac "ntfnBoundTCB r") + apply (case_tac "ntfnBoundTCB rv") apply (clarsimp elim!: if_live_state_refsE)+ apply (clarsimp dest!: idle'_no_refs) done @@ -826,17 +803,18 @@ lemma tcb_bound_refs'_not_NTFNBound[simp]: "(t, NTFNBound) \ tcb_bound_refs' n" by (simp add: tcb_bound_refs'_def) +crunches setEndpoint + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift simp: updateObject_default_def) + lemma (in delete_one_conc) cancelIPC_invs[wp]: shows "\tcb_at' t and invs'\ cancelIPC t \\rv. invs'\" proof - have P: "\xs v f. (case xs of [] \ return v | y # ys \ return (f (y # ys))) = return (case xs of [] \ v | y # ys \ f xs)" by (clarsimp split: list.split) - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have EPSCHN: "\eeptr ep'. \\s. sch_act_not (ksCurThread s) s\ setEndpoint eeptr ep' \\_ s. sch_act_not (ksCurThread s) s\" @@ -860,21 +838,20 @@ proof - apply (subst P) apply (wp valid_irq_node_lift valid_global_refs_lift' irqs_masked_lift sts_sch_act' - hoare_vcg_all_lift [OF setEndpoint_ksQ] - sts_valid_queues setThreadState_ct_not_inQ EPSCHN - hoare_vcg_all_lift setNotification_ksQ getEndpoint_wp + setThreadState_ct_not_inQ EPSCHN + hoare_vcg_all_lift getEndpoint_wp | simp add: valid_tcb_state'_def split del: if_split | wpc)+ apply (clarsimp simp: pred_tcb_at' fun_upd_def[symmetric] conj_comms split del: if_split cong: if_cong) + apply (rule conjI, clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: projectKOs valid_obj'_def) apply (rule conjI) apply (clarsimp simp: obj_at'_def valid_ep'_def projectKOs dest!: pred_tcb_at') - apply (frule NIQ) - apply (erule pred_tcb'_weakenE, fastforce) apply (clarsimp, rule conjI) apply (auto simp: pred_tcb_at'_def obj_at'_def)[1] apply (rule conjI) @@ -922,7 +899,7 @@ proof - show ?thesis apply (simp add: cancelIPC_def crunch_simps cong: if_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (case_tac state, simp_all add: isTS_defs) apply (safe intro!: hoare_weaken_pre[OF Q] @@ -965,8 +942,8 @@ lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at: \\rv. st_tcb_at' P t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (case_tac x, simp_all add: isTS_defs list_case_If) + apply (rule bind_wp [OF _ gts_sp']) + apply (case_tac rv, simp_all add: isTS_defs list_case_If) apply (wp sts_st_tcb_at'_cases delete_one_st_tcb_at threadSet_pred_tcb_no_state cancelSignal_st_tcb_at hoare_drop_imps @@ -1038,9 +1015,8 @@ lemma (in delete_one_conc_pre) cancelIPC_tcb_at_runnable': (is "\?PRE\ _ \_\") apply (clarsimp simp: cancelIPC_def Let_def) apply (case_tac "t'=t") - apply (rule_tac B="\st. st_tcb_at' runnable' t and K (runnable' st)" - in hoare_seq_ext) - apply(case_tac x; simp) + apply (rule_tac Q'="\st. st_tcb_at' runnable' t and K (runnable' st)" in bind_wp) + apply(case_tac rv; simp) apply (wpsimp wp: sts_pred_tcb_neq')+ apply (rule_tac Q="\rv. ?PRE" in hoare_post_imp, fastforce) apply (wp cteDeleteOne_tcb_at_runnable' @@ -1062,31 +1038,6 @@ apply (wp hoare_vcg_conj_lift delete_one_ksCurDomain | simp add: getThreadReplySlot_def o_def if_fun_split)+ done -(* FIXME move *) -lemma tcbSchedEnqueue_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" -apply (simp add: tcbSchedEnqueue_def unless_def) -apply (wp threadGet_wp | simp)+ -apply (clarsimp simp: obj_at'_def) -apply (case_tac obja) -apply fastforce -done - -(* FIXME move *) -lemma setThreadState_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" -apply (simp add: setThreadState_def rescheduleRequired_def) -apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st - | wpc - | rule hoare_drop_imps - | simp)+ -apply (clarsimp simp: obj_at'_def) -apply (case_tac obj) -apply fastforce -done - (* FIXME move *) lemma setBoundNotification_not_ntfn: "(\tcb ntfn. P (tcb\tcbBoundNotification := ntfn\) \ P tcb) @@ -1098,15 +1049,6 @@ lemma setBoundNotification_not_ntfn: | simp)+ done -(* FIXME move *) -lemma setThreadState_tcb_in_cur_domain'[wp]: - "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" -apply (simp add: tcb_in_cur_domain'_def) -apply (rule hoare_pre) -apply wps -apply (wp setThreadState_not_st | simp)+ -done - lemma setBoundNotification_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ setBoundNotification st t \\_. tcb_in_cur_domain' t'\" apply (simp add: tcb_in_cur_domain'_def) @@ -1115,22 +1057,22 @@ lemma setBoundNotification_tcb_in_cur_domain'[wp]: apply (wp setBoundNotification_not_ntfn | simp)+ done -lemma cancelSignal_tcb_obj_at': - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ cancelSignal t word \\_. obj_at' P t'\" -apply (simp add: cancelSignal_def setNotification_def) -apply (wp setThreadState_not_st getNotification_wp | wpc | simp)+ -done +lemma setThreadState_tcbDomain_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding setThreadState_def + by wpsimp + +crunches cancelSignal + for tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + (wp: crunch_wps) lemma (in delete_one_conc_pre) cancelIPC_tcbDomain_obj_at': - "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelIPC t \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (simp add: cancelIPC_def Let_def) -apply (wp hoare_vcg_conj_lift - setThreadState_not_st delete_one_tcbDomain_obj_at' cancelSignal_tcb_obj_at' - | wpc - | rule hoare_drop_imps - | simp add: getThreadReplySlot_def o_def if_fun_split)+ -done + "cancelIPC t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding cancelIPC_def Let_def + by (wp hoare_vcg_conj_lift delete_one_tcbDomain_obj_at' + | wpc + | rule hoare_drop_imps + | simp add: getThreadReplySlot_def o_def if_fun_split)+ lemma (in delete_one_conc_pre) cancelIPC_tcb_in_cur_domain': "\tcb_in_cur_domain' t'\ cancelIPC t \\_. tcb_in_cur_domain' t'\" @@ -1164,7 +1106,7 @@ text \The suspend operation, significant as called from delete\ lemma rescheduleRequired_weak_sch_act_wf: "\\\ rescheduleRequired \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp add: weak_sch_act_wf_def)+ + apply (wp hoare_TrueI | simp add: weak_sch_act_wf_def)+ done lemma sts_weak_sch_act_wf[wp]: @@ -1172,7 +1114,7 @@ lemma sts_weak_sch_act_wf[wp]: \ (ksSchedulerAction s = SwitchToThread t \ runnable' st)\ setThreadState st t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: setThreadState_def) apply (wp rescheduleRequired_weak_sch_act_wf) apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp, simp) @@ -1233,189 +1175,48 @@ lemma setNotification_weak_sch_act_wf[wp]: lemmas ipccancel_weak_sch_act_wfs = weak_sch_act_wf_lift[OF _ setCTE_pred_tcb_at'] -lemma tcbSchedDequeue_corres': - "corres dc (is_etcb_at t) (tcb_at' t and valid_inQ_queues) (tcb_sched_action (tcb_sched_dequeue) t) (tcbSchedDequeue t)" - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (wp, simp) - apply (case_tac queued) - defer - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def get_etcb_def set_tcb_queue_def is_etcb_at_def state_relation_def gets_the_def gets_def get_def return_def bind_def assert_opt_def get_tcb_queue_def modify_def put_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - apply (force simp: tcb_sched_dequeue_def valid_inQ_queues_def - ready_queues_relation_def obj_at'_def inQ_def projectKO_eq project_inject) - apply (simp add: ready_queues_relation_def) - apply (simp add: unless_def when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (simp split del: if_split) - apply (rule corres_split_eqr) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split_eqr[OF getQueue_corres]) - apply (simp split del: if_split) - apply (subst bind_return_unit, rule corres_split[where r'=dc]) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (simp add: dc_def[symmetric]) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply (wp | simp)+ - done - -lemma setQueue_valid_inQ_queues: - "\valid_inQ_queues - and (\s. \t \ set ts. obj_at' (inQ d p) t s) - and K (distinct ts)\ - setQueue d p ts - \\_. valid_inQ_queues\" - apply (simp add: setQueue_def valid_inQ_queues_def) - apply wp - apply clarsimp - done - -lemma threadSet_valid_inQ_queues: - "\valid_inQ_queues and (\s. \d p. (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) - \ obj_at' (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) t s - \ t \ set (ksReadyQueues s (d, p)))\ - threadSet f t - \\rv. valid_inQ_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_inQ_queues_def pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_inQ_queues_def pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def projectKOs) - apply (fastforce) - done - -(* reorder the threadSet before the setQueue, useful for lemmas that don't refer to bitmap *) -lemma setQueue_after_addToBitmap: - "(setQueue d p q >>= (\rv. (when P (addToBitmap d p)) >>= (\rv. threadSet f t))) = - (when P (addToBitmap d p) >>= (\rv. (threadSet f t) >>= (\rv. setQueue d p q)))" - apply (case_tac P, simp_all) - prefer 2 - apply (simp add: setQueue_after) - apply (simp add: setQueue_def when_def) - apply (subst oblivious_modify_swap) - apply (simp add: threadSet_def getObject_def setObject_def - loadObject_default_def bitmap_fun_defs - split_def projectKO_def2 alignCheck_assert - magnitudeCheck_assert updateObject_default_def) - apply (intro oblivious_bind, simp_all) - apply (clarsimp simp: bind_assoc) - done - -lemma tcbSchedEnqueue_valid_inQ_queues[wp]: - "\valid_inQ_queues\ tcbSchedEnqueue t \\_. valid_inQ_queues\" - apply (simp add: tcbSchedEnqueue_def setQueue_after_addToBitmap) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued, simp_all add: unless_def)[1] - apply (wp setQueue_valid_inQ_queues threadSet_valid_inQ_queues threadGet_wp - hoare_vcg_const_Ball_lift - | simp add: inQ_def bitmap_fun_defs - | fastforce simp: valid_inQ_queues_def inQ_def obj_at'_def)+ - done - - (* prevents wp from splitting on the when; stronger technique than hoare_when_weak_wp - FIXME: possible to replace with hoare_when_weak_wp? - *) -definition - "removeFromBitmap_conceal d p q t \ when (null [x\q . x \ t]) (removeFromBitmap d p)" - -lemma removeFromBitmap_conceal_valid_inQ_queues[wp]: - "\ valid_inQ_queues \ removeFromBitmap_conceal d p q t \ \_. valid_inQ_queues \" - unfolding valid_inQ_queues_def removeFromBitmap_conceal_def - by (wp|clarsimp simp: bitmap_fun_defs)+ - -lemma rescheduleRequired_valid_inQ_queues[wp]: - "\valid_inQ_queues\ rescheduleRequired \\_. valid_inQ_queues\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - done - -lemma sts_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setThreadState st t \\rv. valid_inQ_queues\" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - lemma updateObject_ep_inv: "\P\ updateObject (obj::endpoint) ko p q n \\rv. P\" by simp (rule updateObject_default_inv) -lemma sbn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setBoundNotification ntfn t \\rv. valid_inQ_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ +lemma asUser_tcbQueued_inv[wp]: + "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ done -lemma setEndpoint_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setEndpoint ptr ep \\rv. valid_inQ_queues\" - apply (unfold setEndpoint_def) - apply (rule setObject_ep_pre) - apply (simp add: valid_inQ_queues_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift setObject_queues_unchanged[OF updateObject_ep_inv]) - apply simp - done +context begin interpretation Arch . -lemma set_ntfn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setNotification ptr ntfn \\rv. valid_inQ_queues\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp add: valid_inQ_queues_def) - apply (wpsimp wp: hoare_Ball_helper hoare_vcg_all_lift simp: updateObject_default_def in_monad) - done +crunches cancel_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_state \ _" + (simp: crunch_simps wp: crunch_wps) -crunch valid_inQ_queues[wp]: cancelSignal valid_inQ_queues - (simp: updateObject_tcb_inv crunch_simps wp: crunch_wps) +end -lemma (in delete_one_conc_pre) cancelIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ cancelIPC t \\_. valid_inQ_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) - apply (wp hoare_drop_imps delete_one_inQ_queues threadSet_valid_inQ_queues | wpc | simp add:if_apply_def2 Fun.comp_def)+ - apply (clarsimp simp: valid_inQ_queues_def inQ_def)+ - done +crunches asUser + for valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps) -lemma valid_queues_inQ_queues: - "Invariants_H.valid_queues s \ valid_inQ_queues s" - by (force simp: Invariants_H.valid_queues_def valid_inQ_queues_def obj_at'_def - valid_queues_no_bitmap_def) +crunches set_thread_state + for in_correct_ready_q[wp]: in_correct_ready_q + (wp: crunch_wps ignore_del: set_thread_state_ext) -lemma asUser_tcbQueued_inv[wp]: - "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" - apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) - apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ - done +crunches set_thread_state_ext + for ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps ignore_del: set_thread_state_ext) -lemma asUser_valid_inQ_queues[wp]: - "\ valid_inQ_queues \ asUser t f \\rv. valid_inQ_queues \" - unfolding valid_inQ_queues_def Ball_def - apply (wpsimp wp: hoare_vcg_all_lift) - defer - apply (wp asUser_ksQ) - apply assumption - apply (simp add: inQ_def[abs_def] obj_at'_conj) - apply (rule hoare_convert_imp) - apply (wp asUser_ksQ) - apply wp - done +lemma set_thread_state_ready_qs_distinct[wp]: + "set_thread_state ref ts \ready_qs_distinct\" + unfolding set_thread_state_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) + +lemma as_user_ready_qs_distinct[wp]: + "as_user tptr f \ready_qs_distinct\" + unfolding as_user_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) lemma (in delete_one) suspend_corres: "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) @@ -1438,25 +1239,28 @@ lemma (in delete_one) suspend_corres: apply (rule corres_return_trivial) apply (rule corres_split_nor[OF setThreadState_corres]) apply simp - apply (rule tcbSchedDequeue_corres') + apply (rule tcbSchedDequeue_corres) apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ - apply (rule hoare_post_imp[where Q = "\rv s. tcb_at t s \ is_etcb_at t s"]) - apply simp + apply (wpsimp wp: sts_valid_objs') + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def valid_tcb_state'_def)+ + apply (rule hoare_post_imp[where Q = "\_ s. einvs s \ tcb_at t s"]) + apply (simp add: invs_implies invs_strgs valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct valid_sched_def) apply wp - apply (rule hoare_post_imp[where Q = "\rv s. tcb_at' t s \ valid_inQ_queues s"]) - apply (wpsimp simp: valid_queues_inQ_queues) - apply wp+ - apply (force simp: valid_sched_def tcb_at_is_etcb_at) - apply (clarsimp simp add: invs'_def valid_state'_def valid_queues_inQ_queues) + apply (rule hoare_post_imp[where Q = "\_ s. invs' s \ tcb_at' t s"]) + apply (fastforce simp: invs'_def valid_tcb_state'_def) + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ + apply fastforce+ done context begin interpretation Arch . lemma archThreadGet_corres: "(\a a'. arch_tcb_relation a a' \ f a = f' a') \ - corres (=) (tcb_at t) (tcb_at' t) (arch_thread_get f t) (archThreadGet f' t)" + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_get f t) (archThreadGet f' t)" unfolding arch_thread_get_def archThreadGet_def - apply (corressimp corres: get_tcb_corres) + apply (corresKsimp corres: getObject_TCB_corres) apply (clarsimp simp: tcb_relation_def) done @@ -1465,7 +1269,8 @@ lemma tcb_vcpu_relation: unfolding arch_tcb_relation_def by auto lemma archThreadGet_VCPU_corres[corres]: - "corres (=) (tcb_at t) (tcb_at' t) (arch_thread_get tcb_vcpu t) (archThreadGet atcbVCPUPtr t)" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_get tcb_vcpu t) (archThreadGet atcbVCPUPtr t)" by (rule archThreadGet_corres) (erule tcb_vcpu_relation) lemma when_fail_assert: @@ -1485,7 +1290,7 @@ lemma corres_gets_current_vcpu[corres]: lemma vcpuInvalidateActive_corres[corres]: "corres dc \ no_0_obj' vcpu_invalidate_active vcpuInvalidateActive" unfolding vcpuInvalidateActive_def vcpu_invalidate_active_def - apply (corressimp corres: vcpuDisable_corres + apply (corresKsimp corres: vcpuDisable_corres corresK: corresK_modifyT simp: modifyArchState_def) apply (clarsimp simp: state_relation_def arch_state_relation_def) @@ -1497,16 +1302,19 @@ lemma tcb_ko_at': lemma archThreadSet_corres: "(\a a'. arch_tcb_relation a a' \ arch_tcb_relation (f a) (f' a')) \ - corres dc (tcb_at t) (tcb_at' t) (arch_thread_set f t) (archThreadSet f' t)" + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set f t) (archThreadSet f' t)" apply (simp add: arch_thread_set_def archThreadSet_def) - apply (corres corres: get_tcb_corres setObject_update_TCB_corres') + apply (corresK corres: getObject_TCB_corres setObject_update_TCB_corres') apply wpsimp+ apply (auto simp add: tcb_relation_def tcb_cap_cases_def tcb_cte_cases_def exst_same_def)+ done lemma archThreadSet_VCPU_None_corres[corres]: - "t = t' \ corres dc (tcb_at t) (tcb_at' t') - (arch_thread_set (tcb_vcpu_update Map.empty) t) (archThreadSet (atcbVCPUPtr_update Map.empty) t')" + "t = t' \ + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set (tcb_vcpu_update Map.empty) t) + (archThreadSet (atcbVCPUPtr_update Map.empty) t')" apply simp apply (rule archThreadSet_corres) apply (simp add: arch_tcb_relation_def) @@ -1522,15 +1330,17 @@ lemmas corresK_as_user' = asUser_corres'[atomized, THEN corresK_lift_rule, THEN mp] lemma asUser_sanitiseRegister_corres[corres]: - "b=b' \ t = t' \ corres dc (tcb_at t) (tcb_at' t') - (as_user t (do cpsr \ getRegister CPSR; - setRegister CPSR (sanitise_register b CPSR cpsr) - od)) - (asUser t' (do cpsr \ getRegister CPSR; - setRegister CPSR (sanitiseRegister b' CPSR cpsr) - od))" + "b=b' \ t = t' \ + corres dc + (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (do cpsr \ getRegister CPSR; + setRegister CPSR (sanitise_register b CPSR cpsr) + od)) + (asUser t' (do cpsr \ getRegister CPSR; + setRegister CPSR (sanitiseRegister b' CPSR cpsr) + od))" unfolding sanitiseRegister_def sanitise_register_def - apply (corressimp corresK: corresK_as_user') + apply (corresKsimp corresK: corresK_as_user') done crunch typ_at'[wp]: vcpuInvalidateActive "\s. P (typ_at' T p s)" @@ -1547,23 +1357,22 @@ lemma imp_drop_strg: lemma dissociateVCPUTCB_corres [@lift_corres_args, corres]: "corres dc (obj_at (\ko. \tcb. ko = TCB tcb \ tcb_vcpu (tcb_arch tcb) = Some v) t and - obj_at (\ko. \vcpu. ko = ArchObj (VCPU vcpu) \ vcpu_tcb vcpu = Some t) v) - (tcb_at' t and vcpu_at' v and no_0_obj') + obj_at (\ko. \vcpu. ko = ArchObj (VCPU vcpu) \ vcpu_tcb vcpu = Some t) v and + pspace_aligned and pspace_distinct) + (vcpu_at' v and no_0_obj') (dissociate_vcpu_tcb v t) (dissociateVCPUTCB v t)" unfolding dissociate_vcpu_tcb_def dissociateVCPUTCB_def - apply (clarsimp simp: bind_assoc when_fail_assert opt_case_when) - apply (corressimp corres: getObject_vcpu_corres setObject_VCPU_corres get_tcb_corres) - apply (wpsimp wp: arch_thread_get_wp - simp: archThreadSet_def tcb_ko_at' tcb_at_typ_at' - | strengthen imp_drop_strg[where Q="tcb_at t s" for s] - imp_drop_strg[where Q="vcpu_at' v s \ typ_at' TCBT t s" for s] - | corres_rv)+ - apply (corressimp wp: get_vcpu_wp getVCPU_wp getObject_tcb_wp arch_thread_get_wp corres_rv_wp_left - simp: archThreadGet_def tcb_ko_at')+ + apply (clarsimp simp: when_fail_assert opt_case_when) + apply (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres getObject_TCB_corres) + apply (wpsimp wp: arch_thread_get_wp + simp: archThreadSet_def tcb_ko_at' tcb_at_typ_at' + | corresK_rv)+ + apply (corresKsimp wp: get_vcpu_wp getVCPU_wp getObject_tcb_wp arch_thread_get_wp + simp: archThreadGet_def tcb_ko_at')+ apply (clarsimp simp: typ_at_tcb' typ_at_to_obj_at_arches) apply normalise_obj_at' apply (clarsimp simp: obj_at_def is_tcb vcpu_relation_def tcb_relation_def - arch_tcb_relation_def vgic_map_def ) + arch_tcb_relation_def vgic_map_def obj_at'_def) done lemma sym_refs_tcb_vcpu: @@ -1581,10 +1390,13 @@ lemma prepareThreadDelete_corres: "corres dc (invs and tcb_at t) (valid_objs' and tcb_at' t and no_0_obj') (prepare_thread_delete t) (prepareThreadDelete t)" apply (simp add: prepare_thread_delete_def prepareThreadDelete_def) - apply (corressimp simp: tcb_vcpu_relation) + apply (corresKsimp simp: tcb_vcpu_relation) apply (wp arch_thread_get_wp) apply (wpsimp wp: getObject_tcb_wp simp: archThreadGet_def) apply clarsimp + apply (frule invs_psp_aligned) + apply (frule invs_distinct) + apply clarsimp apply (rule conjI) apply clarsimp apply (frule (1) sym_refs_tcb_vcpu, fastforce) @@ -1612,265 +1424,8 @@ lemma (in delete_one_conc_pre) cancelIPC_it[wp]: apply (wp hoare_drop_imps delete_one_it | wpc | simp add:if_apply_def2 Fun.comp_def)+ done -crunch ksQ: threadGet "\s. P (ksReadyQueues s p)" - -lemma tcbSchedDequeue_notksQ: - "\\s. t' \ set(ksReadyQueues s p)\ - tcbSchedDequeue t - \\_ s. t' \ set(ksReadyQueues s p)\" - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply wp+ - apply clarsimp - apply (rule_tac Q="\_ s. t' \ set(ksReadyQueues s p)" in hoare_post_imp) - apply (wp | clarsimp)+ - done - -lemma rescheduleRequired_oa_queued: - "\ (\s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)) and sch_act_simple\ - rescheduleRequired - \\_ s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)\" - (is "\?OAQ t' p and sch_act_simple\ _ \_\") - apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ ?OAQ t' p s" in hoare_seq_ext) - including no_pre - apply (wp | clarsimp)+ - apply (case_tac x) - apply (wp | clarsimp)+ - done - -lemma setThreadState_oa_queued: - "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ - setThreadState st t - \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" - (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") - proof (rule P_bool_lift [where P=P']) - show pos: - "\R. \ ?Q R \ setThreadState st t \\_. ?Q R \" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_oa_queued) - apply (simp add: sch_act_simple_def) - apply (rule_tac Q="\_. ?Q R" in hoare_post_imp, clarsimp) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp) - done - show "\\s. \ ?Q P s\ setThreadState st t \\_ s. \ ?Q P s\" - by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) - qed - -lemma setBoundNotification_oa_queued: - "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ - setBoundNotification ntfn t - \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" - (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") - proof (rule P_bool_lift [where P=P']) - show pos: - "\R. \ ?Q R \ setBoundNotification ntfn t \\_. ?Q R \" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp) - done - show "\\s. \ ?Q P s\ setBoundNotification ntfn t \\_ s. \ ?Q P s\" - by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) - qed - -lemma tcbSchedDequeue_ksQ_distinct[wp]: - "\\s. distinct (ksReadyQueues s p)\ - tcbSchedDequeue t - \\_ s. distinct (ksReadyQueues s p)\" - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply wp+ - apply (rule_tac Q="\_ s. distinct (ksReadyQueues s p)" in hoare_post_imp) - apply (clarsimp | wp)+ - done - -lemma sts_valid_queues_partial: - "\Invariants_H.valid_queues and sch_act_simple\ - setThreadState st t - \\_ s. \t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p))\" - (is "\_\ _ \\_ s. \t' d p. ?OA t' d p s \ ?DISTINCT d p s \") - apply (rule_tac Q="\_ s. (\t' d p. ?OA t' d p s) \ (\d p. ?DISTINCT d p s)" - in hoare_post_imp) - apply (clarsimp) - apply (rule hoare_conjI) - apply (rule_tac Q="\s. \t' d p. - ((t'\set(ksReadyQueues s (d, p)) - \ \ (sch_act_simple s)) - \ (obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ st_tcb_at' runnable' t' s))" in hoare_pre_imp) - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def - pred_tcb_at'_def obj_at'_def inQ_def) - apply (rule hoare_vcg_all_lift)+ - apply (rule hoare_convert_imp) - including no_pre - apply (wp sts_ksQ setThreadState_oa_queued hoare_impI sts_pred_tcb_neq' - | clarsimp)+ - apply (rule_tac Q="\s. \d p. ?DISTINCT d p s \ sch_act_simple s" in hoare_pre_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - -lemma tcbSchedDequeue_t_notksQ: - "\\s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\ - tcbSchedDequeue t - \\_ s. t \ set (ksReadyQueues s (d, p))\" - apply (rule_tac Q="(\s. t \ set (ksReadyQueues s (d, p))) - or obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t" - in hoare_pre_imp, clarsimp) - apply (rule hoare_pre_disj) - apply (wp tcbSchedDequeue_notksQ)[1] - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_real_def ko_wp_at'_def) - done - -lemma sts_invs_minor'_no_valid_queues: - "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st - \ (st \ Inactive \ \ idle' st \ - st' \ Inactive \ \ idle' st')) t - and (\s. t = ksIdleThread s \ idle' st) - and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) - and sch_act_simple - and invs'\ - setThreadState st t - \\_ s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - sym_refs (state_hyp_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - valid_pde_mappings' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\" - apply (simp add: invs'_def valid_state'_def valid_queues_def) - apply (wp sts_valid_queues_partial sts_ksQ - setThreadState_oa_queued sts_st_tcb_at'_cases - irqs_masked_lift - valid_irq_node_lift - setThreadState_ct_not_inQ - sts_valid_bitmapQ_sch_act_simple - sts_valid_bitmapQ_no_L2_orphans_sch_act_simple - sts_valid_bitmapQ_no_L1_orphans_sch_act_simple - hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_all_lift)+ - apply (clarsimp simp: disj_imp) - apply (intro conjI) - apply (clarsimp simp: valid_queues_def) - apply (rule conjI, clarsimp) - apply (drule valid_queues_no_bitmap_objD, assumption) - apply (clarsimp simp: inQ_def comp_def) - apply (rule conjI) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (simp add: valid_queues_no_bitmap_def) - apply clarsimp - apply (clarsimp simp: st_tcb_at'_def) - apply (drule obj_at_valid_objs') - apply (clarsimp simp: valid_pspace'_def) - apply (clarsimp simp: valid_obj'_def valid_tcb'_def projectKOs) - subgoal - by (fastforce simp: valid_tcb_state'_def - split: Structures_H.thread_state.splits) - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (fastforce simp: valid_queues_def inQ_def pred_tcb_at' pred_tcb_at'_def - elim!: st_tcb_ex_cap'' obj_at'_weakenE)+ - done - crunch ct_idle_or_in_cur_domain'[wp]: tcbSchedDequeue ct_idle_or_in_cur_domain' - -lemma tcbSchedDequeue_invs'_no_valid_queues: - "\\s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - sym_refs (state_hyp_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - valid_pde_mappings' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\ - tcbSchedDequeue t - \\_. invs' \" - apply (simp add: invs'_def valid_state'_def) - apply (wp tcbSchedDequeue_valid_queues_weak valid_irq_handlers_lift - valid_irq_node_lift valid_irq_handlers_lift' - tcbSchedDequeue_irq_states irqs_masked_lift cur_tcb_lift - untyped_ranges_zero_lift - | clarsimp simp add: cteCaps_of_def valid_queues_def o_def)+ - apply (rule conjI) - apply (fastforce simp: obj_at'_def inQ_def st_tcb_at'_def valid_queues_no_bitmap_except_def) - apply (rule conjI, clarsimp simp: correct_queue_def) - apply (fastforce simp: valid_pspace'_def intro: obj_at'_conjI - elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemmas sts_tcbSchedDequeue_invs' = - sts_invs_minor'_no_valid_queues - tcbSchedDequeue_invs'_no_valid_queues + (wp: crunch_wps) lemma asUser_sch_act_simple[wp]: "\sch_act_simple\ asUser s t \\_. sch_act_simple\" @@ -1882,11 +1437,14 @@ lemma (in delete_one_conc) suspend_invs'[wp]: "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ ThreadDecls_H.suspend t \\rv. invs'\" apply (simp add: suspend_def) - apply (wp sts_tcbSchedDequeue_invs') - apply (simp add: updateRestartPC_def | strengthen no_refs_simple_strg')+ - prefer 2 - apply (wpsimp wp: hoare_drop_imps hoare_vcg_imp_lift' - | strengthen no_refs_simple_strg')+ + apply (wpsimp wp: sts_invs_minor' gts_wp' simp: updateRestartPC_def + | strengthen no_refs_simple_strg')+ + apply (rule_tac Q="\_. invs' and sch_act_simple and st_tcb_at' simple' t + and (\s. t \ ksIdleThread s)" + in hoare_post_imp) + apply clarsimp + apply wpsimp + apply (fastforce elim: pred_tcb'_weakenE) done lemma (in delete_one_conc_pre) suspend_tcb'[wp]: @@ -1931,109 +1489,6 @@ lemma (in delete_one_conc_pre) suspend_st_tcb_at': lemmas (in delete_one_conc_pre) suspend_makes_simple' = suspend_st_tcb_at' [where P=simple', simplified] -lemma valid_queues_not_runnable'_not_ksQ: - assumes "Invariants_H.valid_queues s" and "st_tcb_at' (Not \ runnable') t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - using assms - apply - - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def pred_tcb_at'_def) - apply (erule_tac x=d in allE) - apply (erule_tac x=p in allE) - apply (clarsimp) - apply (drule(1) bspec) - apply (clarsimp simp: obj_at'_def) - done - -declare valid_queues_not_runnable'_not_ksQ[OF ByAssum, simp] - -lemma cancelSignal_queues[wp]: - "\Invariants_H.valid_queues and st_tcb_at' (Not \ runnable') t\ - cancelSignal t ae \\_. Invariants_H.valid_queues \" - apply (simp add: cancelSignal_def) - apply (wp sts_valid_queues) - apply (rule_tac Q="\_ s. \p. t \ set (ksReadyQueues s p)" in hoare_post_imp, simp) - apply (wp hoare_vcg_all_lift) - apply (wpc) - apply (wp)+ - apply (rule_tac Q="\_ s. Invariants_H.valid_queues s \ (\p. t \ set (ksReadyQueues s p))" in hoare_post_imp) - apply (clarsimp) - apply (wp) - apply (clarsimp) - done - -lemma (in delete_one_conc_pre) cancelIPC_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelIPC t \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def - cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_pre) - apply (wpc - | wp hoare_vcg_conj_lift delete_one_queues threadSet_valid_queues - threadSet_valid_objs' sts_valid_queues setEndpoint_ksQ - hoare_vcg_all_lift threadSet_sch_act threadSet_weak_sch_act_wf - | simp add: o_def if_apply_def2 inQ_def - | rule hoare_drop_imps - | clarsimp simp: valid_tcb'_def tcb_cte_cases_def - elim!: pred_tcb'_weakenE)+ - apply (fastforce dest: valid_queues_not_runnable'_not_ksQ elim: pred_tcb'_weakenE) - done - -(* FIXME: move to Schedule_R *) -lemma tcbSchedDequeue_nonq[wp]: - "\Invariants_H.valid_queues and tcb_at' t and K (t = t')\ - tcbSchedDequeue t \\_ s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadGet_wp|simp)+ - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def obj_at'_def projectKOs inQ_def) - done - -lemma sts_ksQ_oaQ: - "\Invariants_H.valid_queues\ - setThreadState st t - \\_ s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\" - (is "\_\ _ \\_. ?POST\") - proof - - have RR: "\sch_act_simple and ?POST\ rescheduleRequired \\_. ?POST\" - apply (simp add: rescheduleRequired_def) - apply (wp) - apply (clarsimp) - apply (rule_tac - Q="(\s. action = ResumeCurrentThread \ action = ChooseNewThread) and ?POST" - in hoare_pre_imp, assumption) - apply (case_tac action) - apply (clarsimp)+ - apply (wp) - apply (clarsimp simp: sch_act_simple_def) - done - show ?thesis - apply (simp add: setThreadState_def) - apply (wp RR) - apply (rule_tac Q="\_. ?POST" in hoare_post_imp) - apply (clarsimp simp add: sch_act_simple_def) - apply (wp hoare_convert_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (fastforce dest: bspec elim!: obj_at'_weakenE simp: inQ_def) - done - qed - -lemma (in delete_one_conc_pre) suspend_nonq: - "\Invariants_H.valid_queues and valid_objs' and tcb_at' t - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and (\s. t \ ksIdleThread s) and K (t = t')\ - suspend t - \\rv s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: suspend_def unless_def) - unfolding updateRestartPC_def - apply (wp hoare_allI tcbSchedDequeue_t_notksQ sts_ksQ_oaQ) - apply wpsimp+ - done - lemma suspend_makes_inactive: "\K (t = t')\ suspend t \\rv. st_tcb_at' ((=) Inactive) t'\" apply (cases "t = t'", simp_all) @@ -2044,31 +1499,21 @@ lemma suspend_makes_inactive: declare threadSet_sch_act_sane [wp] declare sts_sch_act_sane [wp] -lemma tcbSchedEnqueue_ksQset_weak: - "\\s. t' \ set (ksReadyQueues s p)\ - tcbSchedEnqueue t - \\_ s. t' \ set (ksReadyQueues s p)\" (is "\?PRE\ _ \_\") - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_if_lift) - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, ((wp | clarsimp)+))+ - done - lemma tcbSchedEnqueue_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ tcbSchedEnqueue t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) lemma sts_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ setThreadState st t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) text \Cancelling all IPC in an endpoint or notification object\ lemma ep_cancel_corres_helper: - "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs) - ((\s. \t \ set list. tcb_at' t s) - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and Invariants_H.valid_queues and valid_queues' and valid_objs') + "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs and valid_queues + and pspace_aligned and pspace_distinct) + (valid_objs' and sym_heap_sched_pointers and valid_sched_pointers) (mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; tcb_sched_action tcb_sched_enqueue t @@ -2077,28 +1522,34 @@ lemma ep_cancel_corres_helper: y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) list)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) apply (rule_tac S="{t. (fst t = snd t) \ fst t \ set list}" in corres_mapM_x) apply clarsimp apply (rule corres_guard_imp) apply (subst bind_return_unit, rule corres_split[OF _ tcbSchedEnqueue_corres]) + apply simp + apply (rule corres_guard_imp [OF setThreadState_corres]) + apply simp + apply (simp add: valid_tcb_state_def) + apply simp apply simp - apply (rule corres_guard_imp [OF setThreadState_corres]) - apply simp - apply (simp add: valid_tcb_state_def) - apply simp - apply (wp sts_valid_queues)+ - apply (force simp: tcb_at_is_etcb_at) - apply (fastforce elim: obj_at'_weakenE) - apply ((wp hoare_vcg_const_Ball_lift | simp)+)[1] - apply (rule hoare_pre) - apply (wp hoare_vcg_const_Ball_lift - weak_sch_act_wf_lift_linear sts_st_tcb' setThreadState_not_st - sts_valid_queues tcbSchedEnqueue_not_st - | simp)+ - apply (auto elim: obj_at'_weakenE simp: valid_tcb_state'_def) + apply (wpsimp wp: sts_st_tcb_at') + apply (wpsimp wp: sts_valid_objs' | strengthen valid_objs'_valid_tcbs')+ + apply fastforce + apply (wpsimp wp: hoare_vcg_const_Ball_lift set_thread_state_runnable_valid_queues + sts_st_tcb_at' sts_valid_objs' + simp: valid_tcb_state'_def)+ done +crunches set_simple_ko + for ready_qs_distinct[wp]: ready_qs_distinct + and in_correct_ready_q[wp]: in_correct_ready_q + (rule: ready_qs_distinct_lift wp: crunch_wps) + lemma ep_cancel_corres: "corres dc (invs and valid_sched and ep_at ep) (invs' and ep_at' ep) (cancel_all_ipc ep) (cancelAllIPC ep)" @@ -2106,10 +1557,10 @@ proof - have P: "\list. corres dc (\s. (\t \ set list. tcb_at t s) \ valid_pspace s \ ep_at ep s - \ valid_etcbs s \ weak_valid_sched_action s) + \ valid_etcbs s \ weak_valid_sched_action s \ valid_queues s) (\s. (\t \ set list. tcb_at' t s) \ valid_pspace' s \ ep_at' ep s \ weak_sch_act_wf (ksSchedulerAction s) s - \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s) + \ valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s) (do x \ set_endpoint ep Structures_A.IdleEP; x \ mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; @@ -2131,22 +1582,23 @@ proof - apply (rule ep_cancel_corres_helper) apply (rule mapM_x_wp') apply (wp weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s" + apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply ((wp hoare_vcg_const_Ball_lift mapM_x_wp' - sts_valid_queues setThreadState_not_st sts_st_tcb' tcbSchedEnqueue_not_st - | clarsimp - | fastforce elim: obj_at'_weakenE simp: valid_tcb_state'_def)+)[2] - apply (rule hoare_name_pre_state) + apply ((wpsimp wp: hoare_vcg_const_Ball_lift mapM_x_wp' sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[3] + apply fastforce apply (wp hoare_vcg_const_Ball_lift set_ep_valid_objs' - | (clarsimp simp: valid_ep'_def) - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def elim!: valid_objs_valid_tcbE))+ + | (clarsimp simp: valid_ep'_def) + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def + | strengthen valid_objs'_valid_tcbs'))+ done show ?thesis apply (simp add: cancel_all_ipc_def cancelAllIPC_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ep_sp']) apply (rule corres_guard_imp [OF getEndpoint_corres], simp+) apply (case_tac epa, simp_all add: ep_relation_def @@ -2173,7 +1625,10 @@ lemma set_ntfn_tcb_obj_at' [wp]: lemma cancelAllSignals_corres: "corres dc (invs and valid_sched and ntfn_at ntfn) (invs' and ntfn_at' ntfn) (cancel_all_signals ntfn) (cancelAllSignals ntfn)" + supply projectKOs[simp] apply (simp add: cancel_all_signals_def cancelAllSignals_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ntfn_sp']) apply (rule corres_guard_imp [OF getNotification_corres]) apply simp+ @@ -2184,22 +1639,27 @@ lemma cancelAllSignals_corres: apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule ep_cancel_corres_helper) apply (wp mapM_x_wp'[where 'b="det_ext state"] - weak_sch_act_wf_lift_linear setThreadState_not_st + weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ apply (rename_tac list) - apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s" + apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_objs' s + \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') apply (rule hoare_name_pre_state) - apply (wpsimp wp: hoare_vcg_const_Ball_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st - simp: valid_tcb_state'_def) + apply (wpsimp wp: hoare_vcg_const_Ball_lift sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+ apply (wp hoare_vcg_const_Ball_lift set_ntfn_aligned' set_ntfn_valid_objs' weak_sch_act_wf_lift_linear | simp)+ - apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def - valid_sched_def valid_sched_action_def valid_obj'_def projectKOs | erule obj_at_valid_objsE | drule ko_at_valid_objs')+ + apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def + invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def valid_sched_def + valid_sched_action_def valid_obj'_def + | erule obj_at_valid_objsE | drule ko_at_valid_objs' + | fastforce)+ done lemma ep'_Idle_case_helper: @@ -2238,6 +1698,11 @@ proof - done qed +lemma tcbSchedEnqueue_valid_pspace'[wp]: + "tcbSchedEnqueue tcbPtr \valid_pspace'\" + unfolding valid_pspace'_def + by wpsimp + lemma cancel_all_invs'_helper: "\all_invs_but_sym_refs_ct_not_inQ' and (\s. \x \ set q. tcb_at' x s) and (\s. sym_refs (\x. if x \ set q then {r \ state_refs_of' s x. snd r = TCBBound} @@ -2253,8 +1718,7 @@ lemma cancel_all_invs'_helper: apply clarsimp apply (rule hoare_pre) apply (wp valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift - hoare_vcg_const_Ball_lift untyped_ranges_zero_lift - sts_valid_queues sts_st_tcb' setThreadState_not_st + hoare_vcg_const_Ball_lift untyped_ranges_zero_lift sts_st_tcb' sts_valid_objs' | simp add: cteCaps_of_def o_def)+ apply (unfold fun_upd_apply Invariants_H.tcb_st_refs_of'_simps) apply clarsimp @@ -2263,7 +1727,7 @@ lemma cancel_all_invs'_helper: elim!: rsubst[where P=sym_refs] dest!: set_mono_suffix intro!: ext - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE))+ + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def))+ done lemma ep_q_refs_max: @@ -2279,22 +1743,10 @@ lemma ep_q_refs_max: | case_tac ntfnptr)+ done -crunch ct' [wp]: setEndpoint "\s. P (ksCurThread s)" - (wp: setObject_ep_ct) - -crunch ct' [wp]: setNotification "\s. P (ksCurThread s)" - (wp: setObject_ntfn_ct) - -lemma tcbSchedEnqueue_cur_tcb'[wp]: - "\cur_tcb'\ tcbSchedEnqueue t \\_. cur_tcb'\" - by (simp add: tcbSchedEnqueue_def unless_def) - (wp threadSet_cur setQueue_cur | simp)+ - lemma rescheduleRequired_invs'[wp]: - "\invs'\ rescheduleRequired \\rv. invs'\" + "rescheduleRequired \invs'\" apply (simp add: rescheduleRequired_def) - apply (wp ssa_invs' | simp add: invs'_update_cnt | wpc)+ - apply (clarsimp simp: invs'_def valid_state'_def) + apply (wpsimp wp: ssa_invs') done lemma invs_rct_ct_activatable': @@ -2421,6 +1873,7 @@ lemma rescheduleRequired_all_invs_but_ct_not_inQ: lemma cancelAllIPC_invs'[wp]: "\invs'\ cancelAllIPC ep_ptr \\rv. invs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (rule bind_wp[OF _ stateAssert_sp]) apply (wp rescheduleRequired_all_invs_but_ct_not_inQ cancel_all_invs'_helper hoare_vcg_const_Ball_lift valid_global_refs_lift' getEndpoint_wp @@ -2444,7 +1897,8 @@ lemma cancelAllIPC_invs'[wp]: lemma cancelAllSignals_invs'[wp]: "\invs'\ cancelAllSignals ntfn \\rv. invs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2478,12 +1932,14 @@ crunch valid_objs'[wp]: tcbSchedEnqueue valid_objs' (simp: unless_def valid_tcb'_def tcb_cte_cases_def) lemma cancelAllIPC_valid_objs'[wp]: - "\valid_objs'\ cancelAllIPC ep \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllIPC ep \\rv. valid_objs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp set_ep_valid_objs' setSchedulerAction_valid_objs') - apply (rule_tac Q="\rv s. valid_objs' s \ (\x\set (epQueue ep). tcb_at' x s)" + apply (rule_tac Q="\_ s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ (\x\set (epQueue ep). tcb_at' x s)" in hoare_post_imp) apply simp apply (simp add: Ball_def) @@ -2500,9 +1956,10 @@ lemma cancelAllIPC_valid_objs'[wp]: done lemma cancelAllSignals_valid_objs'[wp]: - "\valid_objs'\ cancelAllSignals ntfn \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllSignals ntfn \\rv. valid_objs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2554,19 +2011,17 @@ lemma setThreadState_not_tcb[wp]: "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ setThreadState st t \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: setThreadState_def setQueue_def - rescheduleRequired_def tcbSchedEnqueue_def - unless_def bitmap_fun_defs - cong: scheduler_action.case_cong cong del: if_cong - | wp | wpcw)+ - done + by (wpsimp wp: isRunnable_inv threadGet_wp hoare_drop_imps + simp: setThreadState_def setQueue_def + rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + unless_def bitmap_fun_defs)+ lemma tcbSchedEnqueue_unlive: "\ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p and tcb_at' t\ tcbSchedEnqueue t \\_. ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp | simp add: setQueue_def bitmap_fun_defs)+ done @@ -2600,19 +2055,42 @@ lemma setObject_ko_wp_at': objBits_def[symmetric] ps_clear_upd in_magnitude_check v projectKOs) -lemma rescheduleRequired_unlive: - "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ - rescheduleRequired +lemma threadSet_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + threadSet f t \\rv. ko_wp_at' (Not \ live') p\" - apply (simp add: rescheduleRequired_def) - apply (wp | simp | wpc)+ - apply (simp add: tcbSchedEnqueue_def unless_def - threadSet_def setQueue_def threadGet_def) - apply (wp setObject_ko_wp_at getObject_tcb_wp - | simp add: objBits_simps' bitmap_fun_defs split del: if_split)+ - apply (clarsimp simp: o_def) - apply (drule obj_at_ko_at') - apply clarsimp + by (clarsimp simp: threadSet_def valid_def getObject_def + setObject_def in_monad loadObject_default_def + ko_wp_at'_def split_def in_magnitude_check + objBits_simps' updateObject_default_def + ps_clear_upd live'_def projectKOs) + +lemma tcbSchedEnqueue_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + tcbSchedEnqueue t + \\_. ko_wp_at' (Not \ live') p\" + supply projectKOs[simp] + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def) + apply (wpsimp wp: threadGet_wp threadSet_unlive_other simp: bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (frule (1) tcbQueueHead_ksReadyQueues) + apply (drule_tac x=p in spec) + apply (fastforce dest!: inQ_implies_tcbQueueds_of + simp: tcbQueueEmpty_def ko_wp_at'_def opt_pred_def opt_map_def live'_def + split: option.splits) + done + +lemma rescheduleRequired_unlive[wp]: + "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ + rescheduleRequired + \\_. ko_wp_at' (Not \ live') p\" + supply comp_apply[simp del] + unfolding rescheduleRequired_def + apply (wpsimp wp: tcbSchedEnqueue_unlive_other) done lemmas setEndpoint_ko_wp_at' @@ -2622,7 +2100,8 @@ lemma cancelAllIPC_unlive: "\valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s)\ cancelAllIPC ep \\rv. ko_wp_at' (Not \ live') ep\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp cancelAll_unlive_helper setEndpoint_ko_wp_at' hoare_vcg_const_Ball_lift rescheduleRequired_unlive @@ -2641,7 +2120,8 @@ lemma cancelAllSignals_unlive: \ obj_at' (\ko. ntfnBoundTCB ko = None) ntfnptr s\ cancelAllSignals ntfnptr \\rv. ko_wp_at' (Not \ live') ntfnptr\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfn", simp_all add: setNotification_def) apply wp apply (fastforce simp: obj_at'_real_def projectKOs live'_def @@ -2703,30 +2183,26 @@ lemma cancelBadgedSends_filterM_helper': apply wp apply clarsimp apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append) - apply (drule spec, erule hoare_seq_ext[rotated]) - apply (rule hoare_seq_ext [OF _ gts_inv']) + apply (drule spec, erule bind_wp_fwd) + apply (rule bind_wp [OF _ gts_inv']) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_vcg_const_Ball_lift sts_sch_act' sch_act_wf_lift valid_irq_handlers_lift'' cur_tcb_lift irqs_masked_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st - tcbSchedEnqueue_not_st - untyped_ranges_zero_lift + sts_st_tcb' untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (frule insert_eqD, frule state_refs_of'_elemD) apply (clarsimp simp: valid_tcb_state'_def st_tcb_at_refs_of_rev') apply (frule pred_tcb_at') apply (rule conjI[rotated], blast) - apply clarsimp + apply (clarsimp simp: valid_pspace'_def cong: conj_cong) apply (intro conjI) - apply (clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE dest!: st_tcb_ex_cap'') - apply (fastforce dest!: st_tcb_ex_cap'') + apply (fastforce simp: valid_tcb'_def dest!: st_tcb_ex_cap'') apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (erule delta_sym_refs) - apply (fastforce elim!: obj_atE' - simp: state_refs_of'_def projectKOs tcb_bound_refs'_def - subsetD symreftype_inverse' - split: if_split_asm)+ - done + by (fastforce elim!: obj_atE' + simp: state_refs_of'_def projectKOs tcb_bound_refs'_def + subsetD symreftype_inverse' + split: if_split_asm)+ lemmas cancelBadgedSends_filterM_helper = spec [where x=Nil, OF cancelBadgedSends_filterM_helper', simplified] @@ -2736,12 +2212,13 @@ lemma cancelBadgedSends_invs[wp]: shows "\invs'\ cancelBadgedSends epptr badge \\rv. invs'\" apply (simp add: cancelBadgedSends_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp'], rename_tac ep) apply (case_tac ep, simp_all) apply ((wp | simp)+)[2] apply (subst bind_assoc [where g="\_. rescheduleRequired", symmetric])+ - apply (rule hoare_seq_ext + apply (rule bind_wp [OF rescheduleRequired_all_invs_but_ct_not_inQ]) apply (simp add: list_case_return cong: list.case_cong) apply (rule hoare_pre, wp valid_irq_node_lift irqs_masked_lift) @@ -2770,10 +2247,21 @@ crunches tcb_sched_action and state_hyp_refs_of[wp]: "\s. P (state_hyp_refs_of s)" (ignore_del: tcb_sched_action) +lemma setEndpoint_valid_tcbs'[wp]: + "setEndpoint ePtr val \valid_tcbs'\" + supply projectKOs[simp] + unfolding setEndpoint_def + apply (wpsimp wp: setObject_valid_tcbs'[where P=\]) + apply (clarsimp simp: updateObject_default_def monad_simps) + apply fastforce + done + lemma cancelBadgedSends_corres: "corres dc (invs and valid_sched and ep_at epptr) (invs' and ep_at' epptr) (cancel_badged_sends epptr bdg) (cancelBadgedSends epptr bdg)" apply (simp add: cancel_badged_sends_def cancelBadgedSends_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_guard_imp) apply (rule corres_split[OF getEndpoint_corres _ get_simple_ko_sp get_ep_sp', where Q="invs and valid_sched" and Q'=invs']) @@ -2783,10 +2271,16 @@ lemma cancelBadgedSends_corres: apply (rule corres_guard_imp) apply (rule corres_split_nor[OF setEndpoint_corres]) apply (simp add: ep_relation_def) - apply (rule corres_split_eqr[OF _ _ _ hoare_post_add[where R="\_. valid_objs'"]]) + apply (rule corres_split_eqr[OF _ _ _ hoare_post_add + [where R="\_. valid_objs' and pspace_aligned' + and pspace_distinct'"]]) apply (rule_tac S="(=)" - and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ distinct xs \ valid_etcbs s" - and Q'="\xs s. (\x \ set xs. tcb_at' x s) \ weak_sch_act_wf (ksSchedulerAction s) s \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s" + and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ + distinct xs \ valid_etcbs s \ + in_correct_ready_q s \ ready_qs_distinct s \ + pspace_aligned s \ pspace_distinct s" + and Q'="\_ s. valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" in corres_mapM_list_all2[where r'="(=)"], simp_all add: list_all2_refl)[1] apply (clarsimp simp: liftM_def[symmetric] o_def) @@ -2797,55 +2291,53 @@ lemma cancelBadgedSends_corres: apply (clarsimp simp: o_def dc_def[symmetric] liftM_def) apply (rule corres_split[OF setThreadState_corres]) apply simp - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (rule corres_trivial) apply simp apply wp+ apply simp - apply (wp sts_valid_queues gts_st_tcb_at)+ + apply (wp sts_st_tcb_at' gts_st_tcb_at sts_valid_objs' + | strengthen valid_objs'_valid_tcbs')+ apply (clarsimp simp: valid_tcb_state_def tcb_at_def st_tcb_def2 st_tcb_at_refs_of_rev dest!: state_refs_of_elemD elim!: tcb_at_is_etcb_at[rotated]) - apply (simp add: is_tcb_def) - apply simp + apply (simp add: valid_tcb_state'_def) apply (wp hoare_vcg_const_Ball_lift gts_wp | clarsimp)+ - apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_queues + apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_objs' | clarsimp simp: valid_tcb_state'_def)+ apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule setEndpoint_corres) apply (simp split: list.split add: ep_relation_def) apply (wp weak_sch_act_wf_lift_linear)+ - apply (wp gts_st_tcb_at hoare_vcg_imp_lift mapM_wp' - sts_st_tcb' sts_valid_queues - set_thread_state_runnable_weak_valid_sched_action - | clarsimp simp: valid_tcb_state'_def)+ - apply (wp hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear set_ep_valid_objs' - | simp)+ + apply (wpsimp wp: mapM_wp' set_thread_state_runnable_weak_valid_sched_action + simp: valid_tcb_state'_def) + apply ((wpsimp wp: hoare_vcg_imp_lift mapM_wp' sts_valid_objs' simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: set_ep_valid_objs')+ apply (clarsimp simp: conj_comms) apply (frule sym_refs_ko_atD, clarsimp+) apply (rule obj_at_valid_objsE, assumption+, clarsimp+) apply (clarsimp simp: valid_obj_def valid_ep_def valid_sched_def valid_sched_action_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) apply (rule conjI, erule obj_at_weakenE, clarsimp simp: is_ep) + apply (rule conjI, fastforce) apply (clarsimp simp: st_tcb_at_refs_of_rev) apply (drule(1) bspec, drule st_tcb_at_state_refs_ofD, clarsimp) apply (simp add: set_eq_subset) apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) - apply (drule ko_at_valid_objs', clarsimp) - apply (simp add: projectKOs) - apply (clarsimp simp: valid_obj'_def valid_ep'_def invs_weak_sch_act_wf - invs'_def valid_state'_def) + apply (fastforce simp: valid_ep'_def) done +crunches updateRestartPC + for tcb_at'[wp]: "tcb_at' t" + (simp: crunch_simps) + lemma suspend_unqueued: "\\\ suspend t \\rv. obj_at' (Not \ tcbQueued) t\" - apply (simp add: suspend_def unless_def tcbSchedDequeue_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift) - apply (simp add: threadGet_def| wp getObject_tcb_wp)+ - apply (rule hoare_strengthen_post, rule hoare_post_taut) - apply (fastforce simp: obj_at'_def projectKOs) - apply (rule hoare_post_taut) - apply wp+ - done + unfolding suspend_def + by (wpsimp simp: comp_def wp: tcbSchedDequeue_not_tcbQueued) crunch no_vcpu[wp]: vcpuInvalidateActive "obj_at' (P::'a:: no_vcpu \ bool) t" @@ -2887,7 +2379,6 @@ crunch ksQ[wp]: dissociateVCPUTCB "\s. P (ksReadyQueues s)" crunch unqueued: prepareThreadDelete "obj_at' (Not \ tcbQueued) t" crunch inactive: prepareThreadDelete "st_tcb_at' ((=) Inactive) t'" -crunch nonq: prepareThreadDelete " \s. \d p. t' \ set (ksReadyQueues s (d, p))" end end diff --git a/proof/refine/ARM_HYP/Ipc_R.thy b/proof/refine/ARM_HYP/Ipc_R.thy index 585b19c53d..7eaabf88b5 100644 --- a/proof/refine/ARM_HYP/Ipc_R.thy +++ b/proof/refine/ARM_HYP/Ipc_R.thy @@ -13,9 +13,9 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemmas lookup_slot_wrapper_defs'[simp] = lookupSourceSlot_def lookupTargetSlot_def lookupPivotSlot_def -lemma getMessageInfo_corres: "corres ((=) \ message_info_map) - (tcb_at t) (tcb_at' t) - (get_message_info t) (getMessageInfo t)" +lemma getMessageInfo_corres: + "corres ((=) \ message_info_map) (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_message_info t) (getMessageInfo t)" apply (rule corres_guard_imp) apply (unfold get_message_info_def getMessageInfo_def fun_app_def) apply (simp add: ARM_HYP_H.msgInfoRegister_def @@ -275,11 +275,7 @@ lemmas unifyFailure_discard2 lemma deriveCap_not_null: "\\\ deriveCap slot cap \\rv. K (rv \ NullCap \ cap \ NullCap)\,-" apply (simp add: deriveCap_def split del: if_split) - apply (case_tac cap) - apply (simp_all add: Let_def isCap_simps) - apply wp - apply simp - done + by (case_tac cap; wpsimp simp: isCap_simps) lemma deriveCap_derived_foo: "\\s. \cap'. (cte_wp_at' (\cte. badge_derived' cap (cteCap cte) @@ -317,7 +313,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -361,7 +357,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -483,7 +479,7 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ cap.NullCap \ cte_wp_at (is_derived (cdt s) (a, b) cap') (a, b) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption @@ -495,13 +491,13 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ capability.NullCap \ cte_wp_at' (\c. is_derived' (ctes_of s) (cte_map (a, b)) cap' (cteCap c)) (cte_map (a, b)) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption apply (subst imp_conjR) apply (rule hoare_vcg_conj_liftE_R) - apply (rule hoare_post_imp_R[OF deriveCap_derived]) + apply (rule hoare_strengthen_postE_R[OF deriveCap_derived]) apply (clarsimp simp:cte_wp_at_ctes_of) apply (wp deriveCap_derived_foo) apply (clarsimp simp: cte_wp_at_caps_of_state remove_rights_def @@ -581,7 +577,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -605,7 +601,7 @@ lemma cteInsert_assume_Null: apply (rule hoare_name_pre_state) apply (erule impCE) apply (simp add: cteInsert_def) - apply (rule hoare_seq_ext[OF _ getCTE_sp])+ + apply (rule bind_wp[OF _ getCTE_sp])+ apply (rule hoare_name_pre_state) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule hoare_pre(1)) @@ -624,7 +620,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -657,11 +653,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -717,8 +713,7 @@ lemma transferCapsToSlots_mdb[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_mdb'\" - apply (wp transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) - apply clarsimp + apply (wpsimp wp: transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) apply (frule valid_capAligned) apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) apply wp @@ -767,14 +762,6 @@ lemma tcts_sch_act[wp]: \\rv s. sch_act_wf (ksSchedulerAction s) s\" by (wp sch_act_wf_lift tcb_in_cur_domain'_lift transferCapsToSlots_pres1) -lemma tcts_vq[wp]: - "\Invariants_H.valid_queues\ transferCapsToSlots ep buffer n caps slots mi \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift transferCapsToSlots_pres1) - -lemma tcts_vq'[wp]: - "\valid_queues'\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_queues'\" - by (wp valid_queues_lift' transferCapsToSlots_pres1) - crunches setExtraBadge for state_refs_of'[wp]: "\s. P (state_refs_of' s)" and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" @@ -871,7 +858,7 @@ lemma transferCapsToSlots_irq_handlers[wp]: and transferCaps_srcs caps\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_irq_handlers'\" - apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) + apply (wpsimp wp: transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) apply (clarsimp simp: is_derived'_def cte_wp_at_ctes_of badge_derived'_def) apply (erule(2) valid_irq_handlers_ctes_ofD) apply wp @@ -974,8 +961,8 @@ lemma tcts_zero_ranges[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. untyped_ranges_zero'\" - apply (wp transferCapsToSlots_presM[where emx=True and vo=True - and drv=True and pad=True]) + apply (wpsimp wp: transferCapsToSlots_presM[where emx=True and vo=True + and drv=True and pad=True]) apply (clarsimp simp: cte_wp_at_ctes_of) apply (simp add: cteCaps_of_def) apply (rule hoare_pre, wp untyped_ranges_zero_lift) @@ -996,6 +983,11 @@ crunch ksDomScheduleIdx[wp]: setExtraBadge "\s. P (ksDomScheduleIdx s)" crunch ksDomSchedule[wp]: transferCapsToSlots "\s. P (ksDomSchedule s)" crunch ksDomScheduleIdx[wp]: transferCapsToSlots "\s. P (ksDomScheduleIdx s)" +crunches transferCapsToSlots + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift) lemma transferCapsToSlots_invs[wp]: "\\s. invs' s \ distinct slots @@ -1053,7 +1045,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1194,7 +1186,7 @@ lemmas copyMRs_typ_at_lifts[wp] = typ_at_lifts [OF copyMRs_typ_at'] lemma copy_mrs_invs'[wp]: "\ invs' and tcb_at' s and tcb_at' r \ copyMRs s sb r rb n \\rv. invs' \" - including no_pre + including classic_wp_pre apply (simp add: copyMRs_def) apply (wp dmo_invs' no_irq_mapM no_irq_storeWord| simp add: split_def) @@ -1238,18 +1230,12 @@ lemma set_mrs_valid_objs' [wp]: crunch valid_objs'[wp]: copyMRs valid_objs' (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "Invariants_H.valid_queues'" - (simp: crunch_simps wp: hoare_drop_imps) - - lemma setMRs_invs_bits[wp]: "\valid_pspace'\ setMRs t buf mrs \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. sch_act_wf (ksSchedulerAction s) s\" "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ setMRs t buf mrs \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ setMRs t buf mrs \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ setMRs t buf mrs \\rv s. P (state_refs_of' s)\" @@ -1269,8 +1255,6 @@ lemma copyMRs_invs_bits[wp]: "\valid_pspace'\ copyMRs s sb r rb n \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ copyMRs s sb r rb n \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ copyMRs s sb r rb n \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ copyMRs s sb r rb n \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ copyMRs s sb r rb n \\rv s. P (state_refs_of' s)\" @@ -1471,7 +1455,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -1532,15 +1516,15 @@ lemma msgFromLookupFailure_map[simp]: by (cases lf, simp_all add: lookup_failure_map_def msgFromLookupFailure_def) lemma asUser_getRestartPC_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (as_user t getRestartPC) (asUser t getRestartPC)" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t getRestartPC) (asUser t getRestartPC)" apply (rule asUser_corres') apply (rule corres_Id, simp, simp) apply (rule no_fail_getRestartPC) done lemma asUser_mapM_getRegister_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t (mapM getRegister regs)) (asUser t (mapM getRegister regs))" apply (rule asUser_corres') @@ -1550,9 +1534,8 @@ lemma asUser_mapM_getRegister_corres: done lemma makeArchFaultMessage_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (make_arch_fault_msg f t) - (makeArchFaultMessage (arch_fault_map f) t)" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (make_arch_fault_msg f t) (makeArchFaultMessage (arch_fault_map f) t)" apply (cases f; clarsimp simp: makeArchFaultMessage_def ucast_nat_def split: arch_fault.split) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) @@ -1561,7 +1544,7 @@ lemma makeArchFaultMessage_corres: done lemma makeFaultMessage_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (make_fault_msg ft t) (makeFaultMessage (fault_map ft) t)" apply (cases ft, simp_all add: makeFaultMessage_def split del: if_split) @@ -1633,7 +1616,8 @@ lemmas threadget_fault_corres = lemma doFaultTransfer_corres: "corres dc (obj_at (\ko. \tcb ft. ko = TCB tcb \ tcb_fault tcb = Some ft) sender - and tcb_at receiver and case_option \ in_user_frame recv_buf) + and tcb_at receiver and case_option \ in_user_frame recv_buf + and pspace_aligned and pspace_distinct) (tcb_at' sender and tcb_at' receiver and case_option \ valid_ipc_buffer_ptr' recv_buf) (do_fault_transfer badge sender receiver recv_buf) @@ -1642,7 +1626,8 @@ lemma doFaultTransfer_corres: ARM_HYP_H.badgeRegister_def badge_register_def) apply (rule_tac Q="\fault. K (\f. fault = Some f) and tcb_at sender and tcb_at receiver and - case_option \ in_user_frame recv_buf" + case_option \ in_user_frame recv_buf and + pspace_aligned and pspace_distinct" and Q'="\fault'. tcb_at' sender and tcb_at' receiver and case_option \ valid_ipc_buffer_ptr' recv_buf" in corres_underlying_split) @@ -1780,10 +1765,6 @@ crunch vp[wp]: doIPCTransfer "valid_pspace'" (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' wp: transferCapsToSlots_vp simp:ball_conj_distrib ) crunch sch_act_wf[wp]: doIPCTransfer "\s. sch_act_wf (ksSchedulerAction s) s" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq[wp]: doIPCTransfer "Invariants_H.valid_queues" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq'[wp]: doIPCTransfer "valid_queues'" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch state_refs_of[wp]: doIPCTransfer "\s. P (state_refs_of' s)" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch state_hyp_refs_of[wp]: doIPCTransfer "\s. P (state_hyp_refs_of' s)" @@ -1811,7 +1792,7 @@ declare asUser_global_refs' [wp] lemma lec_valid_cap' [wp]: "\valid_objs'\ lookupExtraCaps thread xa mi \\rv s. (\x\set rv. s \' fst x)\, -" - apply (rule hoare_pre, rule hoare_post_imp_R) + apply (rule hoare_pre, rule hoare_strengthen_postE_R) apply (rule hoare_vcg_conj_lift_R[where R=valid_objs' and S="\_. valid_objs'"]) apply (rule lookupExtraCaps_srcs) apply wp @@ -1861,17 +1842,21 @@ crunch nosch[wp]: doIPCTransfer "\s. P (ksSchedulerAction s)" simp: split_def zipWithM_x_mapM) lemma arch_getSanitiseRegisterInfo_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (arch_get_sanitise_register_info t) (getSanitiseRegisterInfo t)" unfolding arch_get_sanitise_register_info_def getSanitiseRegisterInfo_def apply (fold archThreadGet_def) - by (corressimp corres: archThreadGet_VCPU_corres) + by (corresKsimp corres: archThreadGet_VCPU_corres) + +crunches arch_get_sanitise_register_info + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct crunch tcb_at'[wp]: getSanitiseRegisterInfo "tcb_at' t" lemma handle_fault_reply_registers_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (do t' \ arch_get_sanitise_register_info t; y \ as_user t (zipWithM_x @@ -1887,7 +1872,6 @@ lemma handle_fault_reply_registers_corres: msg_template msg); return (label = 0) od)" - apply (rule corres_guard_imp) apply (rule corres_split[OF arch_getSanitiseRegisterInfo_corres]) apply (rule corres_split) @@ -1901,7 +1885,7 @@ lemma handle_fault_reply_registers_corres: lemma handleFaultReply_corres: "ft' = fault_map ft \ - corres (=) (tcb_at t) (tcb_at' t) + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (handle_fault_reply ft t label msg) (handleFaultReply ft' t label msg)" apply (cases ft) @@ -1951,16 +1935,6 @@ lemma getThreadCallerSlot_inv: "\P\ getThreadCallerSlot t \\_. P\" by (simp add: getThreadCallerSlot_def, wp) -lemma deleteCallerCap_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - deleteCallerCap t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) - apply (wp getThreadCallerSlot_inv cteDeleteOne_ct_not_ksQ getCTE_wp) - apply (clarsimp simp: cte_wp_at_ctes_of) - done - crunch tcb_at'[wp]: unbindNotification "tcb_at' x" lemma finaliseCapTrue_standin_tcb_at' [wp]: @@ -2120,39 +2094,11 @@ crunch weak_sch_act_wf[wp]: emptySlot "\s. weak_sch_act_wf (ksSchedulerA crunches archThreadGet, handleFaultReply for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" - and valid_queues[wp]: "Invariants_H.valid_queues" - and valid_queues'[wp]: "valid_queues'" and tcb_in_cur_domain'[wp]: "tcb_in_cur_domain' t" crunch sch_act_wf[wp]: unbindNotification "\s. sch_act_wf (ksSchedulerAction s) s" (wp: sbn_sch_act') -crunch valid_queues'[wp]: cteDeleteOne valid_queues' - (simp: crunch_simps unless_def inQ_def - wp: crunch_wps sts_st_tcb' getObject_inv loadObject_default_inv - threadSet_valid_queues' rescheduleRequired_valid_queues'_weak) - -lemma cancelSignal_valid_queues'[wp]: - "\valid_queues'\ cancelSignal t ntfn \\rv. valid_queues'\" - apply (simp add: cancelSignal_def) - apply (rule hoare_pre) - apply (wp getNotification_wp| wpc | simp)+ - done - -lemma cancelIPC_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) \ cancelIPC t \\rv. valid_queues'\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv liftM_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) - apply (case_tac state, simp_all) defer 2 - apply (rule hoare_pre) - apply ((wp getEndpoint_wp getCTE_wp | wpc | simp)+)[8] - apply (wp cteDeleteOne_valid_queues') - apply (rule_tac Q="\_. valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of) - apply (wp threadSet_valid_queues' threadSet_sch_act| simp)+ - apply (clarsimp simp: inQ_def) - done - crunches archThreadGet, handleFaultReply for valid_objs'[wp]: valid_objs' @@ -2161,6 +2107,17 @@ lemma cte_wp_at_is_reply_cap_toI: \ cte_wp_at (is_reply_cap_to t) ptr s" by (fastforce simp: cte_wp_at_reply_cap_to_ex_rights) +crunches handle_fault_reply + for pspace_alignedp[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +crunches cteDeleteOne, doIPCTransfer, handleFaultReply + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + lemma doReplyTransfer_corres: "corres dc (einvs and tcb_at receiver and tcb_at sender @@ -2172,7 +2129,8 @@ lemma doReplyTransfer_corres: apply (simp add: do_reply_transfer_def doReplyTransfer_def cong: option.case_cong) apply (rule corres_underlying_split [OF _ _ gts_sp gts_sp']) apply (rule corres_guard_imp) - apply (rule getThreadState_corres, (clarsimp simp add: st_tcb_at_tcb_at)+) + apply (rule getThreadState_corres, + (clarsimp simp add: st_tcb_at_tcb_at invs_distinct invs_psp_aligned)+) apply (rule_tac F = "awaiting_reply state" in corres_req) apply (clarsimp simp add: st_tcb_at_def obj_at_def is_tcb) apply (fastforce simp: invs_def valid_state_def intro: has_reply_cap_cte_wpD @@ -2206,8 +2164,12 @@ lemma doReplyTransfer_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp set_thread_state_runnable_valid_sched set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues sts_valid_objs' delete_one_tcbDomain_obj_at' - | simp add: valid_tcb_state'_def)+ + apply (wp set_thread_state_runnable_valid_sched + set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' + sts_valid_objs' delete_one_tcbDomain_obj_at' + | simp add: valid_tcb_state'_def + | strengthen valid_queues_in_correct_ready_q valid_sched_valid_queues + valid_queues_ready_qs_distinct)+ apply (strengthen cte_wp_at_reply_cap_can_fast_finalise) apply (wp hoare_vcg_conj_lift) apply (rule hoare_strengthen_post [OF do_ipc_transfer_non_null_cte_wp_at]) @@ -2216,12 +2178,16 @@ lemma doReplyTransfer_corres: apply (fastforce) apply (clarsimp simp:is_cap_simps) apply (wp weak_valid_sched_action_lift)+ - apply (rule_tac Q="\_. valid_queues' and valid_objs' and cur_tcb' and tcb_at' receiver and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp, simp add: sch_act_wf_weak) + apply (rule_tac Q="\_ s. valid_objs' s \ cur_tcb' s \ tcb_at' receiver s + \ sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp, simp add: sch_act_wf_weak) apply (wp tcb_in_cur_domain'_lift) defer apply (simp) apply (wp)+ - apply (clarsimp) + apply (clarsimp simp: invs_psp_aligned invs_distinct) apply (rule conjI, erule invs_valid_objs) apply (rule conjI, clarsimp)+ apply (rule conjI) @@ -2245,36 +2211,38 @@ lemma doReplyTransfer_corres: apply (rule threadset_corresT; clarsimp simp add: tcb_relation_def fault_rel_optionation_def tcb_cap_cases_def tcb_cte_cases_def exst_same_def) - apply (rule_tac P="valid_sched and cur_tcb and tcb_at receiver" - and P'="tcb_at' receiver and cur_tcb' + apply (rule_tac Q="valid_sched and cur_tcb and tcb_at receiver and pspace_aligned and pspace_distinct" + and Q'="tcb_at' receiver and cur_tcb' and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and Invariants_H.valid_queues and valid_queues' and valid_objs'" - in corres_inst) + and valid_objs' + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" + in corres_guard_imp) apply (case_tac rvb, simp_all)[1] apply (rule corres_guard_imp) apply (rule corres_split[OF setThreadState_corres]) apply simp apply (fold dc_def, rule possibleSwitchTo_corres) apply simp - apply (wp static_imp_wp static_imp_conj_wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_st_tcb' sts_valid_queues | simp | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ apply (rule corres_guard_imp) apply (rule setThreadState_corres) apply clarsimp+ - apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' - threadSet_tcbDomain_triv threadSet_valid_objs' - | simp add: valid_tcb_state'_def)+ - apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' - | simp add: runnable_def inQ_def valid_tcb'_def)+ - apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and valid_objs and pspace_aligned" + apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state + thread_set_not_state_valid_sched + threadSet_tcbDomain_triv threadSet_valid_objs' + threadSet_sched_pointers threadSet_valid_sched_pointers + | simp add: valid_tcb_state'_def)+ + apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and + valid_objs and pspace_aligned and pspace_distinct" in hoare_strengthen_post [rotated], clarsimp) apply (wp) apply (rule hoare_chain [OF cap_delete_one_invs]) apply (assumption) apply (rule conjI, clarsimp) - apply (clarsimp simp add: invs_def valid_state_def) + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def) apply (rule_tac Q="\_. tcb_at' sender and tcb_at' receiver and invs'" in hoare_strengthen_post [rotated]) apply (solves\auto simp: invs'_def valid_state'_def\) @@ -2357,15 +2325,15 @@ lemma setupCallerCap_corres: tcb_cnode_index_def cte_level_bits_def) apply (simp add: cte_map_def tcbCallerSlot_def tcb_cnode_index_def cte_level_bits_def) - apply (rule_tac Q="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) apply (wp, (wp getSlotCap_wp)+) apply blast apply (rule no_fail_pre, wp) apply (clarsimp simp: cte_wp_at'_def cte_at'_def) - apply (rule_tac Q="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) apply (wp, (wp getCTE_wp')+) apply blast apply (rule no_fail_pre, wp) @@ -2422,7 +2390,7 @@ lemma possibleSwitchTo_weak_sch_act_wf[wp]: bitmap_fun_defs) apply (wp rescheduleRequired_weak_sch_act_wf weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] - getObject_tcb_wp static_imp_wp + getObject_tcb_wp hoare_weak_lift_imp | wpc)+ apply (clarsimp simp: obj_at'_def projectKOs weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) done @@ -2486,7 +2454,7 @@ proof - apply (rule setEndpoint_corres) apply (simp add: ep_relation_def) apply wp+ - apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def) + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) apply clarsimp \ \concludes IdleEP if bl branch\ apply (simp add: ep_relation_def) @@ -2496,7 +2464,7 @@ proof - apply (rule setEndpoint_corres) apply (simp add: ep_relation_def) apply wp+ - apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def) + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) apply clarsimp \ \concludes SendEP if bl branch\ apply (simp add: ep_relation_def) @@ -2535,10 +2503,12 @@ proof - apply (wp hoare_drop_imps)[1] apply (wp | simp)+ apply (wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases) - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)[1] apply (simp add: valid_tcb_state_def pred_conj_def) - apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg) + apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues)+ apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift | clarsimp simp: is_cap_simps)+)[1] apply (simp add: pred_conj_def) @@ -2547,7 +2517,7 @@ proof - apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift hoare_drop_imps)[1] apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply (simp) apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')+ apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def ep_redux_simps @@ -2603,17 +2573,19 @@ proof - apply (simp add: if_apply_def2) apply ((wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases | simp add: if_apply_def2 split del: if_split)+)[1] - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases) apply (simp add: valid_tcb_state_def pred_conj_def) apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift - | clarsimp simp:is_cap_simps)+)[1] + | clarsimp simp: is_cap_simps + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues )+)[1] apply (simp add: valid_tcb_state'_def pred_conj_def) apply (strengthen sch_act_wf_weak) apply (wp weak_sch_act_wf_lift_linear hoare_drop_imps) apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply simp apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb') apply (clarsimp simp add: invs_def valid_state_def @@ -2641,7 +2613,7 @@ lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] declare tl_drop_1[simp] crunch cur[wp]: cancel_ipc "cur_tcb" - (wp: select_wp crunch_wps simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch valid_objs'[wp]: asUser "valid_objs'" @@ -2688,14 +2660,15 @@ lemma sendSignal_corres: apply (rule possibleSwitchTo_corres) apply wp apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_valid_queues sts_st_tcb' hoare_disjI2 + sts_st_tcb' sts_valid_objs' hoare_disjI2 cancel_ipc_cte_wp_at_not_reply_state | strengthen invs_vobjs_strgs invs_psp_aligned_strg valid_sched_weak_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues | simp add: valid_tcb_state_def)+ apply (rule_tac Q="\rv. invs' and tcb_at' a" in hoare_strengthen_post) apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak - valid_tcb_state'_def) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak valid_tcb_state'_def) apply (rule setNotification_corres) apply (clarsimp simp add: ntfn_relation_def) apply (wp gts_wp gts_wp' | clarsimp)+ @@ -2721,23 +2694,23 @@ lemma sendSignal_corres: apply (rule corres_split[OF asUser_setRegister_corres]) apply (rule possibleSwitchTo_corres) apply ((wp | simp)+)[1] - apply (rule_tac Q="\_. Invariants_H.valid_queues and valid_queues' and - (\s. sch_act_wf (ksSchedulerAction s) s) and + apply (rule_tac Q="\_. (\s. sch_act_wf (ksSchedulerAction s) s) and cur_tcb' and - st_tcb_at' runnable' (hd list) and valid_objs'" - in hoare_post_imp, clarsimp simp: pred_tcb_at' elim!: sch_act_wf_weak) + st_tcb_at' runnable' (hd list) and valid_objs' and + sym_heap_sched_pointers and valid_sched_pointers and + pspace_aligned' and pspace_distinct'" + in hoare_post_imp, clarsimp simp: pred_tcb_at') apply (wp | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ apply (wp set_simple_ko_valid_objs set_ntfn_aligned' set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def - valid_sched_action_def) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def + valid_sched_action_def) apply (auto simp: valid_ntfn'_def )[1] apply (clarsimp simp: invs'_def valid_state'_def) @@ -2755,16 +2728,14 @@ lemma sendSignal_corres: apply (wp cur_tcb_lift | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb - | simp)+ + apply (wpsimp wp: sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb) apply (wp set_ntfn_aligned' set_simple_ko_valid_objs set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def neq_Nil_conv - ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def - split: option.splits) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def neq_Nil_conv + ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def + split: option.splits) apply (auto simp: valid_ntfn'_def neq_Nil_conv invs'_def valid_state'_def weak_sch_act_wf_def split: option.splits)[1] @@ -2790,43 +2761,11 @@ lemma possibleSwitchTo_sch_act[wp]: possibleSwitchTo t \\rv s. sch_act_wf (ksSchedulerAction s) s\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadSet_sch_act setQueue_sch_act threadGet_wp + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp | simp add: unless_def | wpc)+ apply (auto simp: obj_at'_def projectKOs tcb_in_cur_domain'_def) done -lemma possibleSwitchTo_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. Invariants_H.valid_queues\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp hoare_drop_imps | wpc | simp)+ - apply (auto simp: valid_tcb'_def weak_sch_act_wf_def - dest: pred_tcb_at' - elim!: valid_objs_valid_tcbE) - done - -lemma possibleSwitchTo_ksQ': - "\(\s. t' \ set (ksReadyQueues s p) \ sch_act_not t' s) and K(t' \ t)\ - possibleSwitchTo t - \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp - | wpc - | simp split del: if_split)+ - apply (auto simp: obj_at'_def) - done - -lemma possibleSwitchTo_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) - and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. valid_queues'\" - apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wp static_imp_wp threadGet_wp | wpc | simp)+ - apply (auto simp: obj_at'_def) - done - crunch st_refs_of'[wp]: possibleSwitchTo "\s. P (state_refs_of' s)" (wp: crunch_wps) @@ -2841,16 +2780,12 @@ crunch st_hyp_refs_of'[wp]: possibleSwitchTo "\s. P (state_hyp_refs_of' (wp: crunch_wps) lemma possibleSwitchTo_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' t - and (\s. sch_act_wf (ksSchedulerAction s) s)\ - possibleSwitchTo t + "\if_live_then_nonz_cap' and ex_nonz_cap_to' t and (\s. sch_act_wf (ksSchedulerAction s) s) + and pspace_aligned' and pspace_distinct'\ + possibleSwitchTo t \\rv. if_live_then_nonz_cap'\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp | wpc | simp)+ - apply (simp only: imp_conv_disj, wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_def projectKOs) - done + unfolding possibleSwitchTo_def curDomain_def + by (wpsimp wp: threadGet_wp) crunches possibleSwitchTo for ifunsafe[wp]: if_unsafe_then_cap' @@ -2878,10 +2813,6 @@ crunches sendSignal, setBoundNotification rule: irqs_masked_lift) end -lemma sts_running_valid_queues: - "runnable' st \ \ Invariants_H.valid_queues \ setThreadState st t \\_. Invariants_H.valid_queues \" - by (wp sts_valid_queues, clarsimp) - lemma ct_in_state_activatable_imp_simple'[simp]: "ct_in_state' activatable' s \ ct_in_state' simple' s" apply (simp add: ct_in_state'_def) @@ -2894,24 +2825,21 @@ lemma setThreadState_nonqueued_state_update: \ st \ {Inactive, Running, Restart, IdleThreadState} \ (st \ Inactive \ ex_nonz_cap_to' t s) \ (t = ksIdleThread s \ idle' st) - - \ (\ runnable' st \ sch_act_simple s) - \ (\ runnable' st \ (\p. t \ set (ksReadyQueues s p)))\ - setThreadState st t \\rv. invs'\" + \ (\ runnable' st \ sch_act_simple s)\ + setThreadState st t + \\_. invs'\" apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre, wp valid_irq_node_lift - sts_valid_queues - setThreadState_ct_not_inQ) + apply (rule hoare_pre, wp valid_irq_node_lift setThreadState_ct_not_inQ) apply (clarsimp simp: pred_tcb_at') apply (rule conjI, fastforce simp: valid_tcb_state'_def) apply (drule simple_st_tcb_at_state_refs_ofD') apply (drule bound_tcb_at_state_refs_ofD') - apply (rule conjI, fastforce) - apply clarsimp - apply (erule delta_sym_refs) - apply (fastforce split: if_split_asm) - apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def - split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (erule delta_sym_refs) + apply (fastforce split: if_split_asm) + apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm) + apply fastforce done lemma cteDeleteOne_reply_cap_to'[wp]: @@ -2920,7 +2848,7 @@ lemma cteDeleteOne_reply_cap_to'[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -2988,16 +2916,14 @@ lemma cancelAllIPC_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllIPC_def) apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) apply simp apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done lemma cancelAllSignals_not_rct[wp]: @@ -3006,12 +2932,10 @@ lemma cancelAllSignals_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllSignals_def) apply (wp | wpc)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done crunch not_rct[wp]: finaliseCapTrue_standin "\s. ksSchedulerAction s \ ResumeCurrentThread" @@ -3083,7 +3007,7 @@ lemma sai_invs'[wp]: "\invs' and ex_nonz_cap_to' ntfnptr\ sendSignal ntfnptr badge \\y. invs'\" unfolding sendSignal_def - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (case_tac "ntfnObj nTFN", simp_all) prefer 3 apply (rename_tac list) @@ -3096,7 +3020,6 @@ lemma sai_invs'[wp]: apply (clarsimp simp:conj_comms) apply (simp add: invs'_def valid_state'_def) apply (wp valid_irq_node_lift sts_valid_objs' setThreadState_ct_not_inQ - sts_valid_queues [where st="Structures_H.thread_state.Running", simplified] set_ntfn_valid_objs' cur_tcb_lift sts_st_tcb' hoare_convert_imp [OF setNotification_nosch] | simp split del: if_split)+ @@ -3183,7 +3106,7 @@ lemma replyFromKernel_corres: apply simp apply (rule setMessageInfo_corres) apply (wp hoare_case_option_wp hoare_valid_ipc_buffer_ptr_typ_at' - | clarsimp)+ + | fastforce)+ done lemma rfk_invs': @@ -3196,7 +3119,7 @@ lemma rfk_invs': crunch nosch[wp]: replyFromKernel "\s. P (ksSchedulerAction s)" lemma completeSignal_corres: - "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and valid_objs + "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and pspace_distinct and valid_objs \ \and obj_at (\ko. ko = Notification ntfn \ Ipc_A.isActive ntfn) ntfnptr\) (ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' and obj_at' isActive ntfnptr) (complete_signal ntfnptr tcb) (completeSignal ntfnptr tcb)" @@ -3221,10 +3144,8 @@ lemma completeSignal_corres: lemma doNBRecvFailedTransfer_corres: - "corres dc (tcb_at thread) - (tcb_at' thread) - (do_nbrecv_failed_transfer thread) - (doNBRecvFailedTransfer thread)" + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) \ + (do_nbrecv_failed_transfer thread) (doNBRecvFailedTransfer thread)" unfolding do_nbrecv_failed_transfer_def doNBRecvFailedTransfer_def by (simp add: badgeRegister_def badge_register_def, rule asUser_setRegister_corres) @@ -3311,11 +3232,11 @@ lemma receiveIPC_corres: and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3)" and P'="tcb_at' a and tcb_at' thread and cur_tcb' - and Invariants_H.valid_queues - and valid_queues' and valid_pspace' and valid_objs' - and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" in corres_guard_imp [OF corres_if]) apply (simp add: fault_rel_optionation_def) apply (rule corres_if2 [OF _ setupCallerCap_corres setThreadState_corres]) @@ -3324,17 +3245,18 @@ lemma receiveIPC_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action - | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wpsimp wp: sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action)+ + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ - apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def - valid_sched_action_def) + apply (fastforce simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def + valid_sched_action_def) apply (clarsimp split: if_split_asm) apply (clarsimp | wp do_ipc_transfer_tcb_caps)+ - apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp, erule sch_act_wf_weak) + apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp) + apply (fastforce elim: sch_act_wf_weak) apply (wp sts_st_tcb' gts_st_tcb_at | simp)+ apply (simp cong: list.case_cong) apply wp @@ -3357,13 +3279,13 @@ lemma receiveIPC_corres: apply wp+ apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp) apply simp - apply (clarsimp simp: valid_tcb_state_def) + apply (clarsimp simp: valid_tcb_state_def invs_distinct) apply (clarsimp simp add: valid_tcb_state'_def) apply (wp get_simple_ko_wp[where f=Notification] getNotification_wp gbn_wp gbn_wp' hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_if_lift | wpc | simp add: ep_at_def2[symmetric, simplified] | clarsimp)+ apply (clarsimp simp: valid_cap_def invs_psp_aligned invs_valid_objs pred_tcb_at_def - valid_obj_def valid_tcb_def valid_bound_ntfn_def + valid_obj_def valid_tcb_def valid_bound_ntfn_def invs_distinct dest!: invs_valid_objs elim!: obj_at_valid_objsE split: option.splits) @@ -3374,7 +3296,7 @@ lemma receiveIPC_corres: done lemma receiveSignal_corres: - "\ is_ntfn_cap cap; cap_relation cap cap' \ \ + "\ is_ntfn_cap cap; cap_relation cap cap' \ \ corres dc (invs and st_tcb_at active thread and valid_cap cap and ex_nonz_cap_to thread) (invs' and tcb_at' thread and valid_cap' cap') (receive_signal thread cap isBlocking) (receiveSignal thread cap' isBlocking)" @@ -3401,6 +3323,8 @@ lemma receiveSignal_corres: apply (simp add: ntfn_relation_def) apply wp+ apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp+) + apply (clarsimp simp: invs_distinct) + apply simp \ \WaitingNtfn\ apply (simp add: ntfn_relation_def) apply (rule corres_guard_imp) @@ -3411,7 +3335,8 @@ lemma receiveSignal_corres: apply (simp add: ntfn_relation_def) apply wp+ apply (rule corres_guard_imp) - apply (rule doNBRecvFailedTransfer_corres, simp+) + apply (rule doNBRecvFailedTransfer_corres; simp) + apply (clarsimp simp: invs_distinct)+ \ \ActiveNtfn\ apply (simp add: ntfn_relation_def) apply (rule corres_guard_imp) @@ -3481,7 +3406,7 @@ lemma sendFaultIPC_corres: | wp (once) sch_act_sane_lift)+)[1] apply (rule corres_trivial, simp add: lookup_failure_map_def) apply (clarsimp simp: st_tcb_at_tcb_at split: if_split) - apply (simp add: valid_cap_def) + apply (clarsimp simp: valid_cap_def invs_distinct) apply (clarsimp simp: valid_cap'_def inQ_def) apply auto[1] apply (clarsimp simp: lookup_failure_map_def) @@ -3499,14 +3424,16 @@ lemma gets_the_noop_corres: done lemma handleDoubleFault_corres: - "corres dc (tcb_at thread) - (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) + \ (handle_double_fault thread f ft) (handleDoubleFault thread f' ft')" + apply (rule corres_cross_over_guard[where Q="tcb_at' thread"]) + apply (fastforce intro!: tcb_at_cross) apply (simp add: handle_double_fault_def handleDoubleFault_def) apply (rule corres_guard_imp) apply (subst bind_return [symmetric], - rule corres_underlying_split [OF setThreadState_corres]) + rule corres_split[OF setThreadState_corres]) apply simp apply (rule corres_noop2) apply (simp add: exs_valid_def return_def) @@ -3515,7 +3442,7 @@ lemma handleDoubleFault_corres: apply (rule asUser_inv) apply (rule getRestartPC_inv) apply (wp no_fail_getRestartPC)+ - apply (wp|simp)+ + apply (wp|simp)+ done crunch tcb' [wp]: sendFaultIPC "tcb_at' t" (wp: crunch_wps) @@ -3564,30 +3491,6 @@ crunch sch_act_wf: setupCallerCap "\s. sch_act_wf (ksSchedulerAction s) s" (wp: crunch_wps ssa_sch_act sts_sch_act rule: sch_act_wf_lift) -lemma setCTE_valid_queues[wp]: - "\Invariants_H.valid_queues\ setCTE ptr val \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift setCTE_pred_tcb_at') - -crunch vq[wp]: cteInsert "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadCallerSlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadReplySlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -lemma setupCallerCap_vq[wp]: - "\Invariants_H.valid_queues and (\s. \p. send \ set (ksReadyQueues s p))\ - setupCallerCap send recv grant \\_. Invariants_H.valid_queues\" - apply (simp add: setupCallerCap_def) - apply (wp crunch_wps sts_valid_queues) - apply (fastforce simp: valid_queues_def obj_at'_def inQ_def) - done - -crunch vq'[wp]: setupCallerCap "valid_queues'" - (wp: crunch_wps) - lemma is_derived_ReplyCap' [simp]: "\m p g. is_derived' m p (capability.ReplyCap t False g) = (\c. \ g. c = capability.ReplyCap t True g)" @@ -3631,7 +3534,7 @@ lemma setupCallerCap_vp[wp]: declare haskell_assert_inv[wp del] lemma setupCallerCap_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender\ + "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_live_then_nonz_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3643,7 +3546,7 @@ lemma setupCallerCap_iflive[wp]: lemma setupCallerCap_ifunsafe[wp]: "\if_unsafe_then_cap' and valid_objs' and - ex_nonz_cap_to' rcvr and tcb_at' rcvr\ + ex_nonz_cap_to' rcvr and tcb_at' rcvr and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_unsafe_then_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3665,13 +3568,11 @@ lemma setupCallerCap_global_refs'[wp]: \\rv. valid_global_refs'\" unfolding setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv - apply (wp getSlotCap_cte_wp_at - | simp add: o_def unique_master_reply_cap' - | strengthen eq_imp_strg - | wp (once) getCTE_wp | clarsimp simp: cte_wp_at_ctes_of)+ - (* at setThreadState *) - apply (rule_tac Q="\_. valid_global_refs'" in hoare_post_imp, wpsimp+) - done + by (wp + | simp add: o_def unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) getCTE_wp + | wp (once) hoare_vcg_imp_lift' hoare_vcg_ex_lift | clarsimp simp: cte_wp_at_ctes_of)+ crunch valid_arch'[wp]: setupCallerCap "valid_arch_state'" (wp: hoare_drop_imps) @@ -3797,7 +3698,7 @@ lemma completeSignal_invs: completeSignal ntfnptr tcb \\_. invs'\" apply (simp add: completeSignal_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp set_ntfn_minor_invs' | wpc | simp)+ apply (rule_tac Q="\_ s. (state_refs_of' s ntfnptr = ntfn_bound_refs' (ntfnBoundTCB ntfn)) @@ -3806,7 +3707,7 @@ lemma completeSignal_invs: \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) \ ntfnptr \ ksIdleThread s" in hoare_strengthen_post) - apply ((wp hoare_vcg_ex_lift static_imp_wp | wpc | simp add: valid_ntfn'_def)+)[1] + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def live'_def projectKOs split: option.splits) apply (blast dest: ntfn_q_refs_no_bound_refs') apply wp @@ -3852,20 +3753,29 @@ lemmas possibleSwitchToTo_cteCaps_of[wp] crunch hyp_refs'[wp]: possibleSwitchTo "\s. P (state_hyp_refs_of' s)" +crunches asUser + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift wp: crunch_wps) + +crunches setupCallerCap, possibleSwitchTo, doIPCTransfer + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + (* t = ksCurThread s *) lemma ri_invs' [wp]: "\invs' and sch_act_not t and ct_in_state' simple' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s)\ receiveIPC t cap isBlocking \\_. invs'\" (is "\?pre\ _ \_\") apply (clarsimp simp: receiveIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) - apply (rule hoare_seq_ext [OF _ gbn_sp']) - apply (rule hoare_seq_ext) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ gbn_sp']) + apply (rule bind_wp) (* set up precondition for old proof *) apply (rule_tac R="ko_at' ep (capEPPtr cap) and ?pre" in hoare_vcg_if_split) apply (wp completeSignal_invs) @@ -3875,7 +3785,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def del: fun_upd_apply) apply (wp sts_sch_act' hoare_vcg_const_Ball_lift valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def del: fun_upd_apply)+ apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' o_def) @@ -3896,7 +3806,6 @@ lemma ri_invs' [wp]: apply (clarsimp split: if_split_asm) apply (rename_tac list one two three fur five six seven eight nine ten eleven) apply (subgoal_tac "set list \ {EPRecv} \ {}") - apply (thin_tac "\a b. t \ set (ksReadyQueues one (a, b))") \ \causes slowdown\ apply (safe ; solves \auto\) apply fastforce apply fastforce @@ -3907,7 +3816,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def del: fun_upd_apply) apply (wp sts_sch_act' valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def del: fun_upd_apply)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def o_def state_hyp_refs_of'_ep) @@ -3931,9 +3840,8 @@ lemma ri_invs' [wp]: apply (rename_tac sender queue) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_drop_imps setEndpoint_valid_mdb' - set_ep_valid_objs' sts_st_tcb' sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ possibleSwitchTo_valid_queues - possibleSwitchTo_valid_queues' + set_ep_valid_objs' sts_st_tcb' sts_sch_act' + setThreadState_ct_not_inQ possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift setEndpoint_ksQ setEndpoint_ct' | simp add: valid_tcb_state'_def case_bool_If @@ -3951,8 +3859,6 @@ lemma ri_invs' [wp]: st_tcb_at_refs_of_rev' conj_ac split del: if_split cong: if_cong) - apply (frule_tac t=sender in valid_queues_not_runnable'_not_ksQ) - apply (erule pred_tcb'_weakenE, clarsimp) apply (subgoal_tac "sch_act_not sender s") prefer 2 apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) @@ -3987,7 +3893,6 @@ lemma ri_invs' [wp]: lemma rai_invs'[wp]: "\invs' and sch_act_not t and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s) and (\s. \ntfnptr. isNotificationCap cap @@ -3997,14 +3902,14 @@ lemma rai_invs'[wp]: receiveSignal t cap isBlocking \\_. invs'\" apply (simp add: receiveSignal_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (rename_tac ep) apply (case_tac "ntfnObj ep") \ \ep = IdleNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp valid_irq_node_lift sts_sch_act' typ_at_lifts - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def live'_def | wpc)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def) @@ -4022,12 +3927,12 @@ lemma rai_invs'[wp]: apply (clarsimp split: if_split_asm) apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs) \ \ep = ActiveNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts static_imp_wp + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp asUser_urz | simp add: valid_ntfn'_def)+ apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) @@ -4042,7 +3947,7 @@ lemma rai_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ typ_at_lifts + setThreadState_ct_not_inQ typ_at_lifts asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def live'_def | wpc)+ apply (clarsimp simp: valid_tcb_state'_def) @@ -4070,7 +3975,7 @@ lemma rai_invs'[wp]: apply (auto simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def)[5] apply (fastforce simp: tcb_bound_refs'_def split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) done lemma getCTE_cap_to_refs[wp]: @@ -4099,7 +4004,6 @@ lemma cteInsert_invs_bits[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ cteInsert a b c \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ cteInsert a b c \\rv. Invariants_H.valid_queues\" "\cur_tcb'\ cteInsert a b c \\rv. cur_tcb'\" "\\s. P (state_refs_of' s)\ cteInsert a b c @@ -4139,16 +4043,19 @@ crunch irqs_masked'[wp]: possibleSwitchTo "irqs_masked'" crunch urz[wp]: possibleSwitchTo "untyped_ranges_zero'" (simp: crunch_simps unless_def wp: crunch_wps) +crunches possibleSwitchTo + for pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + lemma si_invs'[wp]: "\invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and sch_act_not t and ex_nonz_cap_to' ep and ex_nonz_cap_to' t\ sendIPC bl call ba cg cgr t ep \\rv. invs'\" supply if_split[split del] apply (simp add: sendIPC_def split del: if_split) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ get_ep_sp']) apply (case_tac epa) \ \epa = RecvEP\ apply simp @@ -4160,8 +4067,8 @@ lemma si_invs'[wp]: apply (rule_tac P="a\t" in hoare_gen_asm) apply (wp valid_irq_node_lift sts_valid_objs' set_ep_valid_objs' setEndpoint_valid_mdb' sts_st_tcb' sts_sch_act' - possibleSwitchTo_sch_act_not sts_valid_queues setThreadState_ct_not_inQ - possibleSwitchTo_ksQ' possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift sts_ksQ' + possibleSwitchTo_sch_act_not setThreadState_ct_not_inQ + possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift hoare_convert_imp [OF doIPCTransfer_sch_act doIPCTransfer_ct'] hoare_convert_imp [OF setEndpoint_nosch setEndpoint_ct'] hoare_drop_imp [where f="threadGet tcbFault t"] @@ -4216,8 +4123,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def del: fun_upd_apply) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' state_hyp_refs_of'_ep simp del: fun_upd_apply) apply (rule conjI, clarsimp elim!: obj_at'_weakenE) @@ -4237,8 +4143,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def del: fun_upd_apply) - apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' state_hyp_refs_of'_ep simp del: fun_upd_apply) apply (rule conjI, clarsimp elim!: obj_at'_weakenE) @@ -4266,23 +4171,19 @@ lemma si_invs'[wp]: lemma sfi_invs_plus': "\invs' and st_tcb_at' simple' t and sch_act_not t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t\ - sendFaultIPC t f - \\rv. invs'\, \\rv. invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) - and sch_act_not t and (\s. ksIdleThread s \ t)\" + sendFaultIPC t f + \\_. invs'\, \\_. invs' and st_tcb_at' simple' t and sch_act_not t and (\s. ksIdleThread s \ t)\" apply (simp add: sendFaultIPC_def) apply (wp threadSet_invs_trivial threadSet_pred_tcb_no_state threadSet_cap_to' | wpc | simp)+ apply (rule_tac Q'="\rv s. invs' s \ sch_act_not t s \ st_tcb_at' simple' t s - \ (\p. t \ set (ksReadyQueues s p)) \ ex_nonz_cap_to' t s \ t \ ksIdleThread s \ (\r\zobj_refs' rv. ex_nonz_cap_to' r s)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: inQ_def pred_tcb_at') apply (wp | simp)+ @@ -4290,32 +4191,32 @@ lemma sfi_invs_plus': apply (subst(asm) global'_no_ex_cap, auto) done +crunches send_fault_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (simp: crunch_simps wp: crunch_wps) + lemma handleFault_corres: "fr f f' \ - corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread - and (%_. valid_fault f)) + corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread + and (\_. valid_fault f)) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_fault thread f) (handleFault thread f')" apply (simp add: handle_fault_def handleFault_def) apply (rule corres_guard_imp) apply (subst return_bind [symmetric], rule corres_split[where P="tcb_at thread", - OF gets_the_noop_corres [where x="()"]]) + OF gets_the_noop_corres [where x="()"]]) apply (simp add: tcb_at_def) apply (rule corres_split_catch) apply (rule_tac F="valid_fault f" in corres_gen_asm) apply (rule sendFaultIPC_corres, assumption) apply simp apply (rule handleDoubleFault_corres) - apply wp+ - apply (rule hoare_post_impErr, rule sfi_invs_plus', simp_all)[1] - apply clarsimp - apply wp+ - apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 invs_def - valid_state_def valid_idle_def) - apply auto + apply wpsimp+ + apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 invs_def valid_state_def valid_idle_def) + apply auto done lemma sts_invs_minor'': @@ -4323,17 +4224,13 @@ lemma sts_invs_minor'': \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set (ksReadyQueues s p)) \ runnable' st) - and (\s. runnable' st \ obj_at' tcbQueued t s - \ st_tcb_at' runnable' t s) and (\s. \ runnable' st \ sch_act_not t s) and invs'\ setThreadState st t \\rv. invs'\" apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply clarsimp apply (rule conjI) apply fastforce @@ -4348,12 +4245,11 @@ lemma sts_invs_minor'': apply (clarsimp dest!: st_tcb_at_state_refs_ofD' elim!: rsubst[where P=sym_refs] intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply (fastforce elim!: st_tcb_ex_cap'') done lemma hf_invs' [wp]: "\invs' and sch_act_not t - and (\s. \p. t \ set(ksReadyQueues s p)) and st_tcb_at' simple' t and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ handleFault t f \\r. invs'\" @@ -4361,7 +4257,7 @@ lemma hf_invs' [wp]: apply wp apply (simp add: handleDoubleFault_def) apply (wp sts_invs_minor'' dmo_invs')+ - apply (rule hoare_post_impErr, rule sfi_invs_plus', + apply (rule hoare_strengthen_postE, rule sfi_invs_plus', simp_all) apply (strengthen no_refs_simple_strg') apply clarsimp @@ -4393,8 +4289,8 @@ lemma si_blk_makes_simple': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' simple' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4413,8 +4309,8 @@ lemma si_blk_makes_runnable': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' runnable' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4437,7 +4333,7 @@ lemma sfi_makes_simple': apply (simp add: sendFaultIPC_def cong: if_cong capability.case_cong bool.case_cong) apply (wpsimp wp: si_blk_makes_simple' threadSet_pred_tcb_no_state hoare_drop_imps - hoare_vcg_all_lift_R) + hoare_vcg_all_liftE_R) done lemma sfi_makes_runnable': @@ -4448,7 +4344,7 @@ lemma sfi_makes_runnable': apply (simp add: sendFaultIPC_def cong: if_cong capability.case_cong bool.case_cong) apply (wpsimp wp: si_blk_makes_runnable' threadSet_pred_tcb_no_state hoare_drop_imps - hoare_vcg_all_lift_R) + hoare_vcg_all_liftE_R) done lemma hf_makes_runnable_simple': @@ -4472,8 +4368,8 @@ lemma ri_makes_runnable_simple': apply (rule hoare_gen_asm)+ apply (simp add: receiveIPC_def) apply (case_tac cap, simp_all add: isEndpointCap_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (rule hoare_seq_ext [OF _ gbn_sp']) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (rule bind_wp [OF _ gbn_sp']) apply wp apply (rename_tac ep q r) apply (case_tac ep, simp_all) @@ -4507,7 +4403,7 @@ lemma sendSignal_st_tcb'_Running: sendSignal ntfnptr bdg \\_. st_tcb_at' (\st. st = Running \ P st) t\" apply (simp add: sendSignal_def) - apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp static_imp_wp + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp | wpc | clarsimp simp: pred_tcb_at')+ done diff --git a/proof/refine/ARM_HYP/KHeap_R.thy b/proof/refine/ARM_HYP/KHeap_R.thy index dd9b8dd5de..3f67cf7deb 100644 --- a/proof/refine/ARM_HYP/KHeap_R.thy +++ b/proof/refine/ARM_HYP/KHeap_R.thy @@ -14,8 +14,46 @@ lemma lookupAround2_known1: "m x = Some y \ fst (lookupAround2 x m) = Some (x, y)" by (fastforce simp: lookupAround2_char1) +lemma koTypeOf_injectKO: + fixes v :: "'a :: pspace_storable" + shows "koTypeOf (injectKO v) = koType TYPE('a)" + apply (cut_tac v1=v in iffD2 [OF project_inject, OF refl]) + apply (simp add: project_koType[symmetric]) + done + context begin interpretation Arch . (*FIXME: arch_split*) +lemma setObject_modify_variable_size: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; obj_at' (\obj. objBits v = objBits obj) p s\ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + supply projectKOs[simp] + apply (clarsimp simp: setObject_def split_def exec_gets obj_at'_def lookupAround2_known1 + assert_opt_def updateObject_default_def bind_assoc) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (clarsimp simp only: koTypeOf_injectKO) + apply (frule in_magnitude_check[where s'=s]) + apply blast + apply fastforce + apply (simp add: magnitudeCheck_assert in_monad bind_def gets_def oassert_opt_def + get_def return_def) + apply (simp add: simpler_modify_def) + done + +lemma setObject_modify: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; \ko. P ko \ objBits ko = objBits v \ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (rule setObject_modify_variable_size) + apply fastforce + apply fastforce + apply fastforce + unfolding obj_at'_def + by fastforce + lemma obj_at_getObject: assumes R: "\a b n ko s obj::'a::pspace_storable. @@ -151,8 +189,7 @@ lemma corres_get_tcb [corres]: apply (drule bspec) apply clarsimp apply blast - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) + apply (clarsimp simp: tcb_relation_cut_def lookupAround2_known1) done lemma lookupAround2_same1[simp]: @@ -254,7 +291,7 @@ lemma obj_at_setObject1: setObject p (v::'a::pspace_storable) \ \rv. obj_at' (\x::'a::pspace_storable. True) t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad obj_at'_def projectKOs lookupAround2_char1 project_inject @@ -276,7 +313,7 @@ lemma obj_at_setObject2: setObject p (v::'a) \ \rv. obj_at' P t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (drule R) @@ -460,6 +497,40 @@ lemma setObject_tcb_strongest: ps_clear_upd) done +method setObject_easy_cases = + clarsimp simp: setObject_def in_monad split_def valid_def lookupAround2_char1, + erule rsubst[where P=P'], rule ext, + clarsimp simp: updateObject_cte updateObject_default_def in_monad + typeError_def opt_map_def opt_pred_def projectKO_opts_defs projectKOs + split: if_split_asm + Structures_H.kernel_object.split_asm + +lemma setObject_endpoint_tcbs_of'[wp]: + "setObject c (endpoint :: endpoint) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_notification_tcbs_of'[wp]: + "setObject c (notification :: notification) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedNexts_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedNexts_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedPrevs_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedPrevs_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbQueued[wp]: + "setObject c (cte :: cte) \\s. P' (tcbQueued |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + +lemma setObject_cte_inQ[wp]: + "setObject c (cte :: cte) \\s. P' (inQ d p |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + lemma getObject_obj_at': assumes x: "\q n ko. loadObject p q n ko = (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" @@ -691,8 +762,8 @@ lemma cte_wp_at_ctes_of: apply (simp add: field_simps) apply (clarsimp split: if_split_asm del: disjCI) apply (simp add: ps_clear_def3 field_simps) - apply (rule disjI2, rule exI[where x="p - (p && ~~ mask 9)"]) - apply (clarsimp simp: ps_clear_def3[where na=9] is_aligned_mask + apply (rule disjI2, rule exI[where x="p - (p && ~~ mask tcb_bits)"]) + apply (clarsimp simp: ps_clear_def3[where na=tcb_bits] is_aligned_mask word_bw_assocs field_simps) done @@ -960,7 +1031,7 @@ lemma obj_relation_cut_same_type: \ (a_type ko = AArch AVCPU \ a_type ko' = AArch AVCPU)" apply (rule ccontr) apply (simp add: obj_relation_cuts_def2 a_type_def) - apply (auto simp: other_obj_relation_def cte_relation_def + apply (auto simp: other_obj_relation_def tcb_relation_cut_def cte_relation_def pte_relation_def pde_relation_def split: Structures_A.kernel_object.split_asm if_split_asm Structures_H.kernel_object.split_asm @@ -978,6 +1049,16 @@ where "exst_same' (KOTCB tcb) (KOTCB tcb') = exst_same tcb tcb'" | "exst_same' _ _ = True" +lemma tcbs_of'_non_tcb_update: + "\typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ TCBT\ + \ tcbs_of' (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = tcbs_of' s'" + by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs + split: kernel_object.splits) + +lemma typ_at'_koTypeOf: + "ko_at' ob' ptr b \ typ_at' (koTypeOf (injectKO ob')) ptr b" + by (auto simp: typ_at'_def ko_wp_at'_def obj_at'_def project_inject projectKOs) + lemma setObject_other_corres: fixes ob' :: "'a :: pspace_storable" assumes x: "updateObject ob' = updateObject_default ob'" @@ -986,12 +1067,12 @@ lemma setObject_other_corres: assumes t: "is_other_obj_relation_type (a_type ob)" assumes b: "\ko. P ko \ objBits ko = objBits ob'" assumes e: "\ko. P ko \ exst_same' (injectKO ko) (injectKO ob')" - assumes P: "\(v::'a::pspace_storable). (1 :: word32) < 2 ^ (objBits v)" + assumes P: "\v::'a::pspace_storable. (1 :: machine_word) < 2 ^ objBits v" shows "other_obj_relation ob (injectKO (ob' :: 'a :: pspace_storable)) \ corres dc (obj_at (\ko. a_type ko = a_type ob) ptr and obj_at (same_caps ob) ptr) (obj_at' (P :: 'a \ bool) ptr) (set_object ptr ob) (setObject ptr ob')" - supply image_cong_simp [cong del] + supply image_cong_simp [cong del] projectKOs[simp del] apply (rule corres_no_failI) apply (rule no_fail_pre) apply wp @@ -1002,11 +1083,12 @@ lemma setObject_other_corres: put_def return_def modify_def get_object_def x projectKOs obj_at_def updateObject_default_def in_magnitude_check [OF _ P]) + apply (rename_tac ko) apply (clarsimp simp add: state_relation_def z) apply (clarsimp simp add: caps_of_state_after_update cte_wp_at_after_update swp_def fun_upd_def obj_at_def) apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x=ptr in allE)+ apply (clarsimp simp: obj_at_def a_type_def @@ -1016,40 +1098,46 @@ lemma setObject_other_corres: apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) apply (elim conjE) apply (frule bspec, erule domI) + apply (prop_tac "typ_at' (koTypeOf (injectKO ob')) ptr b") + subgoal + by (clarsimp simp: typ_at'_def ko_wp_at'_def obj_at'_def projectKO_opts_defs + is_other_obj_relation_type_def a_type_def other_obj_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm kernel_object.split_asm + arch_kernel_object.split_asm) + apply clarsimp apply (rule conjI) apply (rule ballI, drule(1) bspec) apply (drule domD) apply (clarsimp simp: is_other_obj_relation_type t) apply (drule(1) bspec) apply clarsimp - apply (frule_tac ko'=koa and x'=ptr in obj_relation_cut_same_type, + apply (frule_tac ko'=ko and x'=ptr in obj_relation_cut_same_type, (fastforce simp add: is_other_obj_relation_type t)+) - apply (erule disjE) - apply (simp add: is_other_obj_relation_type t) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_CapTable a_type_def) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_UserData a_type_def) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_DeviceData a_type_def) - apply (simp add: is_other_obj_relation_type t) - apply (simp only: ekheap_relation_def) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (insert e) - apply atomize - apply (clarsimp simp: obj_at'_def) - apply (erule_tac x=obj in allE) - apply (clarsimp simp: projectKO_eq project_inject) - apply (case_tac ob; - simp_all add: a_type_def other_obj_relation_def etcb_relation_def - is_other_obj_relation_type t exst_same_def) - by (clarsimp simp: is_other_obj_relation_type t exst_same_def - split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits - ARM_A.arch_kernel_obj.splits)+ + apply (insert t) + apply ((erule disjE + | clarsimp simp: is_other_obj_relation_type is_other_obj_relation_type_def a_type_def)+)[1] + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (insert e) + apply atomize + apply (clarsimp simp: obj_at'_def) + apply (erule_tac x=obj in allE) + apply (clarsimp simp: projectKO_eq project_inject) + apply (case_tac ob; + simp_all add: a_type_def other_obj_relation_def etcb_relation_def + is_other_obj_relation_type t exst_same_def)[1] + apply (clarsimp simp: is_other_obj_relation_type t exst_same_def + split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits + arch_kernel_obj.splits)+ + \ \ready_queues_relation\ + apply (prop_tac "koTypeOf (injectKO ob') \ TCBT") + subgoal + by (clarsimp simp: other_obj_relation_def; cases ob; cases "injectKO ob'"; + simp split: arch_kernel_obj.split_asm) + by (fastforce dest: tcbs_of'_non_tcb_update) lemmas obj_at_simps = obj_at_def obj_at'_def projectKOs map_to_ctes_upd_other is_other_obj_relation_type_def @@ -1061,8 +1149,8 @@ lemma setEndpoint_corres [corres]: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres [corres]: @@ -1070,8 +1158,8 @@ lemma setNotification_corres [corres]: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma no_fail_getNotification [wp]: @@ -1143,14 +1231,14 @@ lemma typ_at'_valid_obj'_lift: apply (case_tac endpoint; simp add: valid_ep'_def, wp) apply (rename_tac notification) apply (case_tac "ntfnObj notification"; - simp add: valid_ntfn'_def valid_bound_tcb'_def split: option.splits, + simp add: valid_ntfn'_def split: option.splits, (wpsimp|rule conjI)+) apply (rename_tac tcb) apply (case_tac "tcbState tcb"; - simp add: valid_tcb'_def valid_tcb_state'_def split_def valid_bound_ntfn'_def - valid_arch_tcb'_def - split: option.splits, - wpsimp wp: P) + simp add: valid_tcb'_def valid_tcb_state'_def split_def opt_tcb_at'_def + valid_bound_ntfn'_def; + wpsimp wp: hoare_case_option_wp hoare_case_option_wp2; + (clarsimp split: option.splits)?) apply (wpsimp simp: valid_cte'_def) apply (rename_tac arch_kernel_object) apply (case_tac arch_kernel_object; wpsimp) @@ -1432,32 +1520,6 @@ lemma set_ep_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma set_ep_valid_queues[wp]: - "\Invariants_H.valid_queues\ setEndpoint epptr ep \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setEndpoint_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ep_valid_queues'[wp]: - "\valid_queues'\ setEndpoint epptr ep \\rv. valid_queues'\" - apply (unfold setEndpoint_def) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: projectKOs ko_wp_at'_def) - done - lemma ct_in_state_thread_state_lift': assumes ct: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" assumes st: "\t. \st_tcb_at' P t\ f \\_. st_tcb_at' P t\" @@ -1723,34 +1785,6 @@ lemma set_ntfn_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ done -lemma set_ntfn_valid_queues[wp]: - "\Invariants_H.valid_queues\ setNotification p ntfn \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (rule hoare_pre) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setNotification_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ntfn_valid_queues'[wp]: - "\valid_queues'\ setNotification p ntfn \\rv. valid_queues'\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: projectKOs ko_wp_at'_def) - done - lemma set_ntfn_state_refs_of'[wp]: "\\s. P ((state_refs_of' s) (epptr := ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn)))\ @@ -2203,6 +2237,21 @@ lemma setNotification_ct_idle_or_in_cur_domain'[wp]: crunch gsUntypedZeroRanges[wp]: setNotification "\s. P (gsUntypedZeroRanges s)" (wp: setObject_ksPSpace_only updateObject_default_inv) +lemma sym_heap_sched_pointers_lift: + assumes prevs: "\P. f \\s. P (tcbSchedPrevs_of s)\" + assumes nexts: "\P. f \\s. P (tcbSchedNexts_of s)\" + shows "f \sym_heap_sched_pointers\" + by (rule_tac f=tcbSchedPrevs_of in hoare_lift_Pf2; wpsimp wp: assms) + +crunches setNotification + for tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: updateObject_default_def) + lemma set_ntfn_minor_invs': "\invs' and obj_at' (\ntfn. ntfn_q_refs_of' (ntfnObj ntfn) = ntfn_q_refs_of' (ntfnObj val) \ ntfn_bound_refs' (ntfnBoundTCB ntfn) = ntfn_bound_refs' (ntfnBoundTCB val)) @@ -2212,11 +2261,14 @@ lemma set_ntfn_minor_invs': and (\s. ptr \ ksIdleThread s) \ setNotification ptr val \\rv. invs'\" - apply (clarsimp simp add: invs'_def valid_state'_def cteCaps_of_def) - apply (wp irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift; simp add: o_def) - by (clarsimp elim!: rsubst[where P=sym_refs] - intro!: ext - dest!: obj_at_state_refs_ofD') + apply (clarsimp simp: invs'_def valid_state'_def cteCaps_of_def) + apply (wpsimp wp: irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift + sym_heap_sched_pointers_lift valid_bitmaps_lift + simp: o_def) + apply (clarsimp elim!: rsubst[where P=sym_refs] + intro!: ext + dest!: obj_at_state_refs_ofD')+ + done lemma getEndpoint_wp: "\\s. \ep. ko_at' ep e s \ P ep s\ getEndpoint e \P\" @@ -2254,6 +2306,30 @@ lemma idle_is_global [intro!]: "ksIdleThread s \ global_refs' s" by (simp add: global_refs'_def) +lemma aligned_distinct_obj_atI': + "\ ksPSpace s x = Some ko; pspace_aligned' s; pspace_distinct' s; ko = injectKO v \ + \ ko_at' v x s" + supply projectKOs[simp] + apply (simp add: obj_at'_def project_inject pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (clarsimp simp: objBits_simps' word_bits_def + split: kernel_object.splits arch_kernel_object.splits) + done + +lemma aligned'_distinct'_ko_wp_at'I: + "\ksPSpace s' x = Some ko; P ko; pspace_aligned' s'; pspace_distinct' s'\ + \ ko_wp_at' P x s'" + apply (simp add: ko_wp_at'_def pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (cases ko; force) + done + +lemma aligned'_distinct'_ko_at'I: + "\ksPSpace s' x = Some ko; pspace_aligned' s'; pspace_distinct' s'; + ko = injectKO (v:: 'a :: pspace_storable)\ + \ ko_at' v x s'" + by (fastforce elim: aligned'_distinct'_ko_wp_at'I simp: obj_at'_real_def project_inject) + lemma valid_globals_cte_wpD': "\ valid_global_refs' s; cte_wp_at' P p s \ \ \cte. P cte \ ksIdleThread s \ capRange (cteCap cte)" @@ -2262,21 +2338,21 @@ lemma valid_globals_cte_wpD': lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -2284,7 +2360,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp @@ -2297,20 +2373,17 @@ crunch typ_at'[wp]: doMachineOp "\s. P (typ_at' T p s)" lemmas doMachineOp_typ_ats[wp] = typ_at_lifts [OF doMachineOp_typ_at'] lemma doMachineOp_invs_bits[wp]: - "\valid_pspace'\ doMachineOp m \\rv. valid_pspace'\" - "\\s. sch_act_wf (ksSchedulerAction s) s\ - doMachineOp m \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ doMachineOp m \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ doMachineOp m \\rv. valid_queues'\" - "\\s. P (state_refs_of' s)\ - doMachineOp m - \\rv s. P (state_refs_of' s)\" - "\if_live_then_nonz_cap'\ doMachineOp m \\rv. if_live_then_nonz_cap'\" - "\cur_tcb'\ doMachineOp m \\rv. cur_tcb'\" - "\if_unsafe_then_cap'\ doMachineOp m \\rv. if_unsafe_then_cap'\" - by (simp add: doMachineOp_def split_def - valid_pspace'_def valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - | wp cur_tcb_lift sch_act_wf_lift tcb_in_cur_domain'_lift + "doMachineOp m \valid_pspace'\" + "doMachineOp m \\s. sch_act_wf (ksSchedulerAction s) s\" + "doMachineOp m \valid_bitmaps\" + "doMachineOp m \valid_sched_pointers\" + "doMachineOp m \\s. P (state_refs_of' s)\" + "doMachineOp m \\s. P (state_hyp_refs_of' s)\" + "doMachineOp m \if_live_then_nonz_cap'\" + "doMachineOp m \cur_tcb'\" + "doMachineOp m \if_unsafe_then_cap'\" + by (simp add: doMachineOp_def split_def + | wp | fastforce elim: state_refs_of'_pspaceI)+ crunch obj_at'[wp]: doMachineOp "\s. P (obj_at' P' p s)" diff --git a/proof/refine/ARM_HYP/LevityCatch.thy b/proof/refine/ARM_HYP/LevityCatch.thy index e7f778cc43..064702ca9d 100644 --- a/proof/refine/ARM_HYP/LevityCatch.thy +++ b/proof/refine/ARM_HYP/LevityCatch.thy @@ -8,6 +8,7 @@ theory LevityCatch imports "BaseRefine.Include" "Lib.LemmaBucket" + "Lib.Corres_Method" begin (* Try again, clagged from Include *) @@ -39,14 +40,14 @@ lemma alignCheck_assert: lemma magnitudeCheck_inv: "\P\ magnitudeCheck x y n \\rv. P\" apply (clarsimp simp add: magnitudeCheck_def split: option.splits) - apply (wp hoare_when_wp) + apply (wp when_wp) apply simp done lemma alignCheck_inv: "\P\ alignCheck x n \\rv. P\" apply (simp add: alignCheck_def unless_def alignError_def) - apply (wp hoare_when_wp) + apply (wp when_wp) apply simp done diff --git a/proof/refine/ARM_HYP/PageTableDuplicates.thy b/proof/refine/ARM_HYP/PageTableDuplicates.thy index 903c5d3e66..f5ec26f93c 100644 --- a/proof/refine/ARM_HYP/PageTableDuplicates.thy +++ b/proof/refine/ARM_HYP/PageTableDuplicates.thy @@ -77,11 +77,6 @@ crunches threadSet, setBoundNotification for valid_duplicates'[wp]: "\s. vs_valid_duplicates' (ksPSpace s)" (wp: setObject_ksInterrupt updateObject_default_inv) -lemma tcbSchedEnqueue_valid_duplicates'[wp]: - "\\s. vs_valid_duplicates' (ksPSpace s)\ - tcbSchedEnqueue a \\rv s. vs_valid_duplicates' (ksPSpace s)\" - by (simp add:tcbSchedEnqueue_def unless_def setQueue_def | wp | wpc)+ - crunch valid_duplicates'[wp]: rescheduleRequired "\s. vs_valid_duplicates' (ksPSpace s)" (wp: setObject_ksInterrupt updateObject_default_inv) @@ -152,7 +147,7 @@ lemma mapM_x_storePTE_updates: apply (induct xs) apply (simp add: mapM_x_Nil) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: storePTE_def setObject_def) apply (wp hoare_drop_imps | simp add:split_def updateObject_default_def)+ @@ -469,7 +464,7 @@ lemma mapM_x_storePDE_updates: apply (induct xs) apply (simp add: mapM_x_Nil) apply (simp add: mapM_x_Cons) - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: storePDE_def setObject_def) apply (wp hoare_drop_imps | simp add:split_def updateObject_default_def)+ @@ -642,7 +637,7 @@ lemma createObject_valid_duplicates'[wp]: apply (simp add: placeNewObject_def placeNewDataObject_def placeNewObject'_def split_def copyGlobalMappings_def split del: if_split - | wp hoare_unless_wp[where P="d"] hoare_unless_wp[where Q=\] + | wp unless_wp[where P="d"] unless_wp[where Q=\] | wpc | simp add: alignError_def split del: if_split)+ apply (intro conjI impI) apply clarsimp+ @@ -874,7 +869,7 @@ lemma deleteObjects_valid_duplicates'[wp]: crunch arch_inv[wp]: resetUntypedCap "\s. P (ksArchState s)" (simp: crunch_simps - wp: hoare_drop_imps hoare_unless_wp mapME_x_inv_wp + wp: hoare_drop_imps unless_wp mapME_x_inv_wp preemptionPoint_inv ignore: freeMemory) @@ -909,7 +904,7 @@ lemma invokeUntyped_valid_duplicates[wp]: and valid_untyped_inv' ui and ct_active'\ invokeUntyped ui \\rv s. vs_valid_duplicates' (ksPSpace s) \" - supply hoare_whenE_wps[wp_split del] + supply whenE_wps[wp_split del] apply (simp only: invokeUntyped_def updateCap_def) apply (rule hoare_name_pre_state) apply (cases ui) @@ -919,10 +914,10 @@ lemma invokeUntyped_valid_duplicates[wp]: apply (rule hoare_pre) apply simp apply (wp updateFreeIndex_pspace_no_overlap') - apply ((rule validE_validE_R)?, rule hoare_post_impErr) + apply ((rule validE_validE_R)?, rule hoare_strengthen_postE) apply (rule combine_validE) apply (rule_tac ui=ui in whenE_reset_resetUntypedCap_invs_etc) - apply (rule hoare_whenE_wp) + apply (rule whenE_wp) apply (rule valid_validE) apply (rule resetUntypedCap_valid_duplicates') defer @@ -1102,7 +1097,7 @@ lemma checkMappingPPtr_Section: lemma mapM_x_mapM_valid: "\ P \ mapM_x f xs \\r. Q\ \ \P\mapM f xs \\r. Q\" - apply (simp add:NonDetMonadLemmaBucket.mapM_x_mapM) + apply (simp add: mapM_x_mapM) apply (clarsimp simp:valid_def return_def bind_def) apply (drule spec) apply (erule impE) @@ -1185,7 +1180,7 @@ lemma unmapPage_valid_duplicates'[wp]: in mapM_x_storePTE_update_helper[where sz = 7]) apply (wp checkMappingPPtr_inv lookupPTSlot_page_table_at' Arch_R.lookupPTSlot_aligned | simp)+ - apply (rule hoare_post_imp_R[OF lookupPTSlot_aligned[where sz= vmpage_size]]) + apply (rule hoare_strengthen_postE_R[OF lookupPTSlot_aligned[where sz= vmpage_size]]) apply (simp add:pageBitsForSize_def pt_bits_def pte_bits_def) apply (drule upto_enum_step_shift[where n = 7 and m = 3,simplified]) apply (clarsimp simp: mask_def add.commute upto_enum_step_def largePagePTEOffsets_def @@ -1199,7 +1194,7 @@ lemma unmapPage_valid_duplicates'[wp]: in mapM_x_storePDE_update_helper[where sz = 7]) apply (wp mapM_x_mapM_valid checkMappingPPtr_inv)+ apply (clarsimp simp:conj_comms) - apply (rule hoare_post_imp_R[where Q'= "\r. pspace_aligned' and + apply (rule hoare_strengthen_postE_R[where Q'= "\r. pspace_aligned' and (\s. vs_valid_duplicates' (ksPSpace s)) and K(vmsz_aligned' vptr vmpage_size \ is_aligned r pdBits) and page_directory_at' (lookup_pd_slot r vptr && ~~ mask pdBits)"]) @@ -1257,7 +1252,7 @@ lemma unmapPageTable_valid_duplicates'[wp]: apply (wp storePDE_no_duplicates')+ apply (simp add:pageTableMapped_def) apply (wp getPDE_wp |wpc|simp)+ - apply (rule hoare_post_imp_R[where Q' = "\r s. vs_valid_duplicates' (ksPSpace s)"]) + apply (rule hoare_strengthen_postE_R[where Q' = "\r s. vs_valid_duplicates' (ksPSpace s)"]) apply wp apply (clarsimp simp:ko_wp_at'_def obj_at'_real_def projectKO_opt_pde) apply (clarsimp simp: nondup_obj_def @@ -1321,7 +1316,7 @@ lemma finaliseSlot_valid_duplicates'[wp]: finaliseSlot slot exposed \\_ s. invs' s \ vs_valid_duplicates' (ksPSpace s) \ sch_act_simple s \" unfolding finaliseSlot_def - apply (rule validE_valid, rule hoare_pre, rule hoare_post_impErr, rule use_spec) + apply (rule validE_valid, rule hoare_pre, rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where p=slot and slot=slot and Pr="vs_valid_duplicates' o ksPSpace"]) apply (simp_all add: valid_duplicates_finalise_prop_stuff) apply (wp | simp add: o_def)+ @@ -1335,7 +1330,6 @@ lemma cteDelete_valid_duplicates': apply (rule hoare_gen_asm) apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply simp apply (rule valid_validE) apply (rule hoare_post_imp[OF _ finaliseSlot_valid_duplicates']) apply simp @@ -1415,7 +1409,7 @@ lemma invokeCNode_valid_duplicates'[wp]: apply (simp add:invs_valid_objs' invs_pspace_aligned') apply (clarsimp simp add:invokeCNode_def | wp | intro conjI)+ apply (rule hoare_pre) - apply (wp hoare_unless_wp | wpc | simp)+ + apply (wp unless_wp | wpc | simp)+ apply (simp add:invokeCNode_def) apply (wp getSlotCap_inv hoare_drop_imp |simp add:locateSlot_conv getThreadCallerSlot_def @@ -1522,7 +1516,7 @@ lemma mapM_x_storePTE_slot_updates: apply (subst the_ith_mapM_x_reduce, simp) apply (drule_tac x="Suc n" in meta_spec) apply simp - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: storePTE_def setObject_def) apply (wp hoare_drop_imps | simp add:split_def updateObject_default_def)+ @@ -1556,7 +1550,7 @@ lemma mapM_x_storePDE_slot_updates: apply (subst the_ith_mapM_x_reduce, simp) apply (drule_tac x="Suc n" in meta_spec) apply simp - apply (rule hoare_seq_ext, assumption) + apply (rule bind_wp, assumption) apply (thin_tac "valid P f Q" for P f Q) apply (simp add: storePDE_def setObject_def) apply (wp hoare_drop_imps | simp add:split_def updateObject_default_def)+ @@ -1847,7 +1841,7 @@ lemma placeASIDPool_valid_duplicates'[wp]: placeNewObject' ptr (KOArch (KOASIDPool makeObject)) 0 \\rv s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add: placeNewObject'_def) - apply (wp hoare_unless_wp | wpc | simp add:alignError_def split_def)+ + apply (wp unless_wp | wpc | simp add:alignError_def split_def)+ apply (subgoal_tac "vs_valid_duplicates' (\a. if a = ptr then Some (KOArch (KOASIDPool makeObject)) else ksPSpace s a)") apply fastforce apply clarsimp @@ -1899,7 +1893,7 @@ lemma performArchInvocation_valid_duplicates': apply (clarsimp simp:cte_wp_at_ctes_of) apply (case_tac ctea,clarsimp) apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) - apply (wp static_imp_wp|simp)+ + apply (wp hoare_weak_lift_imp|simp)+ apply (simp add:placeNewObject_def) apply (wp |simp add:alignError_def unless_def|wpc)+ apply (wp updateFreeIndex_pspace_no_overlap' hoare_drop_imp @@ -1949,10 +1943,10 @@ lemma tc_valid_duplicates': apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_invs_trivial - hoare_vcg_all_lift threadSet_cap_to' static_imp_wp | simp add: inQ_def | fastforce)+)[2] + hoare_vcg_all_lift threadSet_cap_to' hoare_weak_lift_imp | simp add: inQ_def | fastforce)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp setMCPriority_invs' static_imp_wp + apply ((wp case_option_wp setMCPriority_invs' hoare_weak_lift_imp typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | simp add: inQ_def | fastforce)+)[2] apply ((simp only: simp_thms cases_simp cong: conj_cong @@ -1969,9 +1963,9 @@ lemma tc_valid_duplicates': typ_at_lifts [OF setPriority_typ_at'] assertDerived_wp threadSet_cte_wp_at' - hoare_vcg_all_lift_R + hoare_vcg_all_liftE_R hoare_vcg_all_lift - static_imp_wp + hoare_weak_lift_imp )[1] | wpc | simp add: inQ_def @@ -2051,7 +2045,7 @@ crunch valid_duplicates' [wp]: crunch valid_duplicates' [wp]: tcbSchedAppend "(\s. vs_valid_duplicates' (ksPSpace s))" - (simp:crunch_simps wp:hoare_unless_wp) + (simp:crunch_simps wp:unless_wp) lemma timerTick_valid_duplicates'[wp]: "\\s. vs_valid_duplicates' (ksPSpace s)\ @@ -2089,9 +2083,8 @@ lemma activate_sch_valid_duplicates'[wp]: activateThread \\rv s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add: activateThread_def getCurThread_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext[where B="\st s. (runnable' or idle') st - \ vs_valid_duplicates' (ksPSpace s)"]) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp[where Q'="\st s. (runnable' or idle') st \ vs_valid_duplicates' (ksPSpace s)"]) apply (rule hoare_pre) apply (wp | wpc | simp add: setThreadState_runnable_simp)+ apply (clarsimp simp: ct_in_state'_def cur_tcb'_def pred_tcb_at' @@ -2103,7 +2096,7 @@ crunch valid_duplicates'[wp]: crunch valid_duplicates'[wp]: receiveIPC "\s. vs_valid_duplicates' (ksPSpace s)" -(wp: getNotification_wp gbn_wp') + (wp: getNotification_wp gbn_wp' crunch_wps) crunch valid_duplicates'[wp]: deleteCallerCap "\s. vs_valid_duplicates' (ksPSpace s)" @@ -2111,10 +2104,11 @@ crunch valid_duplicates'[wp]: crunch valid_duplicates'[wp]: handleReply "\s. vs_valid_duplicates' (ksPSpace s)" + (wp: crunch_wps) crunch valid_duplicates'[wp]: handleYield "\s. vs_valid_duplicates' (ksPSpace s)" - (ignore: threadGet simp:crunch_simps wp:hoare_unless_wp) + (ignore: threadGet simp:crunch_simps wp:unless_wp) crunch valid_duplicates'[wp]: "VSpace_H.handleVMFault", handleHypervisorFault "\s. vs_valid_duplicates' (ksPSpace s)" @@ -2144,7 +2138,7 @@ lemma handleRecv_valid_duplicates'[wp]: apply (rule_tac Q="\rv s. vs_valid_duplicates' (ksPSpace s)" - in hoare_post_impErr[rotated]) + in hoare_strengthen_postE[rotated]) apply (clarsimp simp: isCap_simps sch_act_sane_not) apply assumption @@ -2178,20 +2172,24 @@ lemma non_kernel_IRQs_strg: (\y. irq = Some y) \ invs' s \ (the irq \ non_kernel_IRQs \ P) \ Q" by auto +(* nothing extra needed on this architecture *) +defs fastpathKernelAssertions_def: + "fastpathKernelAssertions \ \s. True" + lemma callKernel_valid_duplicates': "\invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and (\s. e \ Interrupt \ ct_running' s)\ callKernel e \\rv s. vs_valid_duplicates' (ksPSpace s)\" - apply (simp add: callKernel_def) + apply (simp add: callKernel_def fastpathKernelAssertions_def) apply (rule hoare_pre) apply (wp activate_invs' activate_sch_act schedule_sch hoare_drop_imp[where R="\_. kernelExitAssertions"] schedule_sch_act_simple he_invs' hoare_vcg_if_lift3 | simp add: no_irq_getActiveIRQ | strengthen non_kernel_IRQs_strg, simp cong: conj_cong)+ - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule valid_validE) prefer 2 apply assumption diff --git a/proof/refine/ARM_HYP/RAB_FN.thy b/proof/refine/ARM_HYP/RAB_FN.thy index 06f88d110a..969cb775de 100644 --- a/proof/refine/ARM_HYP/RAB_FN.thy +++ b/proof/refine/ARM_HYP/RAB_FN.thy @@ -94,35 +94,35 @@ proof (induct cap capptr bits rule: resolveAddressBits.induct) apply (subst resolveAddressBits.simps, subst resolveAddressBitsFn.simps) apply (simp only: Let_def haskell_assertE_def K_bind_def) apply (rule monadic_rewrite_name_pre) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule_tac P="(=) s" in monadic_rewrite_trans) (* step 1, apply the induction hypothesis on the lhs *) apply (rule monadic_rewrite_named_if monadic_rewrite_named_bindE - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="returnOk y" for y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="x $ y" for x y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="assertE P" for P s] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="returnOk y" for y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="x $ y" for x y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="assertE P" for P s] TrueI)+ apply (rule_tac g="case nextCap of CNodeCap a b c d \ ?g nextCap cref bitsLeft - | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_imp) + | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_guard_imp) apply (wpc | rule monadic_rewrite_refl "1.hyps" | simp only: capability.case haskell_assertE_def simp_thms)+ apply (clarsimp simp: in_monad locateSlot_conv getSlotCap_def dest!: in_getCTE fst_stateAssertD) apply (fastforce elim: cte_wp_at_weakenE') - apply (rule monadic_rewrite_refl[THEN monadic_rewrite_imp], simp) + apply (rule monadic_rewrite_refl[THEN monadic_rewrite_guard_imp], simp) (* step 2, split and match based on the lhs structure *) apply (simp add: locateSlot_conv liftE_bindE unlessE_def whenE_def if_to_top_of_bindE assertE_def stateAssert_def bind_assoc assert_def if_to_top_of_bind getSlotCap_def split del: if_split cong: if_cong) - apply (rule monadic_rewrite_if_lhs monadic_rewrite_symb_exec_l'[OF get_wp] + apply (rule monadic_rewrite_if_l monadic_rewrite_symb_exec_l'[OF _ get_wp, rotated] empty_fail_get no_fail_get impI monadic_rewrite_refl get_wp | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P isCNodeCap_capUntypedPtr_capCNodePtr cong: if_cong split del: if_split)+ - apply (rule monadic_rewrite_symb_exec_l'[OF getCTE_inv _ _ _ getCTE_cte_wp_at]) + apply (rule monadic_rewrite_symb_exec_l'[OF _ getCTE_inv _ _ getCTE_cte_wp_at, rotated]) apply simp apply (rule impI, rule no_fail_getCTE) apply (simp add: monadic_rewrite_def simpler_gets_def return_def returnOk_def diff --git a/proof/refine/ARM_HYP/Refine.thy b/proof/refine/ARM_HYP/Refine.thy index 2ec62aecd3..dcd667b9d0 100644 --- a/proof/refine/ARM_HYP/Refine.thy +++ b/proof/refine/ARM_HYP/Refine.thy @@ -82,7 +82,7 @@ lemma typ_at_UserDataI: apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def cte_relation_def other_obj_relation_def - pde_relation_def + pde_relation_def tcb_relation_cut_def split: Structures_A.kernel_object.split_asm Structures_H.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) @@ -113,7 +113,7 @@ lemma typ_at_DeviceDataI: apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def cte_relation_def other_obj_relation_def - pde_relation_def + pde_relation_def tcb_relation_cut_def split: Structures_A.kernel_object.split_asm Structures_H.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) @@ -280,7 +280,7 @@ lemma kernel_entry_invs: thread_set_ct_running thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext - static_imp_wp + hoare_weak_lift_imp | clarsimp simp add: tcb_cap_cases_def active_from_running)+ done @@ -296,18 +296,18 @@ definition lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_sched_act: "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_invs2: @@ -402,6 +402,19 @@ abbreviation valid_domain_list' :: "'a kernel_state_scheme \ bool" w lemmas valid_domain_list'_def = valid_domain_list_2_def +lemma fastpathKernelAssertions_cross: + "\ (s,s') \ state_relation; invs s; valid_arch_state' s'\ \ fastpathKernelAssertions s'" + unfolding fastpathKernelAssertions_def + by simp + +(* this is only needed for callKernel, where we have invs' on concrete side *) +lemma corres_cross_over_fastpathKernelAssertions: + "\ \s. P s \ invs s; \s'. Q s' \ invs' s'; + corres r P (Q and fastpathKernelAssertions) f g \ \ + corres r P Q f g" + by (rule corres_cross_over_guard[where Q="Q and fastpathKernelAssertions"]) + (fastforce elim: fastpathKernelAssertions_cross)+ + defs kernelExitAssertions_def: "kernelExitAssertions s \ 0 < ksDomainTime s \ valid_domain_list' s" @@ -422,9 +435,9 @@ lemma kernelEntry_invs': (\s. 0 < ksDomainTime s) and valid_domain_list' \" apply (simp add: kernelEntry_def) apply (wp ckernel_invs callKernel_valid_duplicates' callKernel_domain_time_left - threadSet_invs_trivial threadSet_ct_running' select_wp + threadSet_invs_trivial threadSet_ct_running' TcbAcc_R.dmo_invs' callKernel_domain_time_left - static_imp_wp + hoare_weak_lift_imp | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' atcbContextSet_def valid_domain_list'_def)+ done @@ -504,7 +517,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and (\s. 0 < ksDomainTime s) and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' select_wp + apply (wp device_update_invs' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -518,7 +531,7 @@ lemma doUserOp_valid_duplicates': doUserOp f tc \\_ s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add: doUserOp_def split_def) - apply (wp dmo_invs' select_wp) + apply (wp dmo_invs') apply clarsimp done @@ -565,7 +578,7 @@ lemma kernel_corres': apply simp apply (rule handleInterrupt_corres[simplified dc_def]) apply simp - apply (wp hoare_drop_imps hoare_vcg_all_lift)[1] + apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift simp: schact_is_rct_def)[1] apply simp apply (rule_tac Q="\irq s. irq \ Some ` non_kernel_IRQs \ invs' s \ (\irq'. irq = Some irq' \ @@ -573,7 +586,7 @@ lemma kernel_corres': in hoare_post_imp) apply clarsimp apply (wp doMachineOp_getActiveIRQ_IRQ_active handle_event_valid_sched | simp)+ - apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_post_impErr) + apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_strengthen_postE) apply wpsimp+ apply (simp add: invs'_def valid_state'_def) apply (rule corres_split[OF schedule_corres]) @@ -581,11 +594,11 @@ lemma kernel_corres': apply (wp schedule_invs' hoare_vcg_if_lift2 dmo_getActiveIRQ_non_kernel | simp cong: rev_conj_cong | strengthen None_drop | subst Ex_Some_conv)+ apply (rule_tac Q="\_. valid_sched and invs and valid_list" and E="\_. valid_sched and invs and valid_list" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp handle_event_valid_sched hoare_vcg_if_lift3 | simp | strengthen non_kernel_IRQs_strg[where Q=True, simplified], simp cong: conj_cong)+ - apply (clarsimp simp: active_from_running) + apply (clarsimp simp: active_from_running schact_is_rct_def) apply (clarsimp simp: active_from_running') done @@ -598,6 +611,8 @@ lemma kernel_corres: (\s. vs_valid_duplicates' (ksPSpace s))) (call_kernel event) (callKernel event)" unfolding callKernel_def K_bind_def + apply (rule corres_cross_over_fastpathKernelAssertions, blast+) + apply (rule corres_stateAssert_r) apply (rule corres_guard_imp) apply (rule corres_add_noop_lhs2) apply (simp only: bind_assoc[symmetric]) @@ -641,7 +656,7 @@ lemma entry_corres: apply (rule corres_split[OF getCurThread_corres]) apply (rule corres_split) apply simp - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp?) apply (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_set_def atcbContextSet_def) apply (clarsimp simp: tcb_cap_cases_def) @@ -653,18 +668,19 @@ lemma entry_corres: apply (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_get_def atcbContextGet_def) apply wp+ - apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, simp add: invs_def cur_tcb_def) + apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, + simp add: invs_def valid_state_def valid_pspace_def cur_tcb_def) apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) apply ((wp thread_set_invs_trivial thread_set_ct_running - thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift - | simp add: tcb_cap_cases_def thread_set_no_change_tcb_state)+)[1] + | simp add: tcb_cap_cases_def thread_set_no_change_tcb_state schact_is_rct_def)+)[1] apply (simp add: pred_conj_def cong: conj_cong) apply (wp threadSet_invs_trivial threadSet_ct_running' - static_imp_wp hoare_vcg_disj_lift + hoare_weak_lift_imp hoare_vcg_disj_lift | simp add: ct_in_state'_def atcbContextSet_def | (wps, wp threadSet_st_tcb_at2))+ - apply (clarsimp simp: invs_def cur_tcb_def) + apply (fastforce simp: invs_def cur_tcb_def) apply (clarsimp simp: ct_in_state'_def) done diff --git a/proof/refine/ARM_HYP/Retype_R.thy b/proof/refine/ARM_HYP/Retype_R.thy index 36c2bb8d27..616184c2f0 100644 --- a/proof/refine/ARM_HYP/Retype_R.thy +++ b/proof/refine/ARM_HYP/Retype_R.thy @@ -61,8 +61,6 @@ lemma objBitsKO_bounded2[simp]: by (simp add: objBits_simps' word_bits_def vspace_bits_defs vcpu_bits_def archObjSize_def split: Structures_H.kernel_object.split arch_kernel_object.split) -declare select_singleton_is_return[simp] - definition APIType_capBits :: "ARM_HYP_H.object_type \ nat \ nat" where @@ -315,7 +313,7 @@ lemma state_relation_null_filterE: null_filter (caps_of_state t) = null_filter (caps_of_state s); null_filter' (ctes_of t') = null_filter' (ctes_of s'); pspace_relation (kheap t) (ksPSpace t'); - ekheap_relation (ekheap t) (ksPSpace t'); + ekheap_relation (ekheap t) (ksPSpace t'); ready_queues_relation t t'; ghost_relation (kheap t) (gsUserPages t') (gsCNodes t'); valid_list s; pspace_aligned' s'; pspace_distinct' s'; valid_objs s; valid_mdb s; pspace_aligned' t'; pspace_distinct' t'; @@ -1004,7 +1002,7 @@ lemma retype_ekheap_relation: apply (intro impI conjI) apply clarsimp apply (drule_tac x=a in bspec,force) - apply (clarsimp simp add: other_obj_relation_def split: if_split_asm) + apply (clarsimp simp: tcb_relation_cut_def split: if_split_asm) apply (case_tac ko,simp_all) apply (clarsimp simp add: makeObjectKO_def cong: if_cong split: sum.splits Structures_H.kernel_object.splits arch_kernel_object.splits ARM_HYP_H.object_type.splits @@ -1185,6 +1183,11 @@ lemma update_gs_id: by (simp add: no_gs_types_def update_gs_def split: Structures_A.apiobject_type.splits aobject_type.splits) +lemma ksReadyQueues_update_gs[simp]: + "ksReadyQueues (update_gs tp us addrs s) = ksReadyQueues s" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + lemma update_gs_simps[simp]: "update_gs Structures_A.apiobject_type.CapTableObject us ptrs = gsCNodes_update (\cns x. if x \ ptrs then Some us else cns x)" @@ -1199,6 +1202,144 @@ lemma update_gs_simps[simp]: else ups x)" by (simp_all add: update_gs_def) +lemma retype_ksPSpace_dom_same: + fixes x v + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ksPSpace s' x = Some v \ + foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s') x + = Some v" +proof - + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + assume "ksPSpace s' x = Some v" + thus ?thesis + apply (clarsimp simp:foldr_upd_app_if[folded data_map_insert_def]) + apply (drule domI[where m = "ksPSpace s'"]) + apply (drule(1) IntI) + apply (erule_tac A = "A \ B" for A B in in_emptyE[rotated]) + apply (rule disjoint_subset[OF new_cap_addrs_subset[OF cover']]) + apply (clarsimp simp:ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + done +qed + +lemma retype_ksPSpace_None: + assumes ad: "pspace_aligned' s" "pspace_distinct' s" "pspace_bounded' s" + assumes pn: "pspace_no_overlap' ptr sz s" + assumes cover: "range_cover ptr sz (objBitsKO val + gbits) n" + shows "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" +proof - + note cover' = range_cover_rel[where sbit' = "objBitsKO val",OF cover _ refl,simplified] + show "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" + apply (drule subsetD[OF new_cap_addrs_subset [OF cover' ]]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (fastforce simp: ptr_add_def p_assoc_help) + done +qed + +lemma retype_tcbSchedPrevs_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedPrevs_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedPrevs_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_tcbSchedNexts_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedNexts_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedNexts_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_inQ: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "\d p. + inQ d p |< tcbs_of' + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = inQ d p |< tcbs_of' s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (intro allI) + apply (rule ext) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + fastforce simp: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm + | fastforce)+ +qed + +lemma retype_ready_queues_relation: + assumes rlqr: "ready_queues_relation s s'" + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ready_queues_relation + (s \kheap := foldr (\p. data_map_insert p (default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)\) + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\)" + using rlqr + unfolding ready_queues_relation_def Let_def + by (clarsimp simp: retype_tcbSchedNexts_of[OF vs' pn' ko cover num_r, simplified] + retype_tcbSchedPrevs_of[OF vs' pn' ko cover num_r, simplified] + retype_inQ[OF vs' pn' ko cover num_r, simplified]) + lemma retype_state_relation: notes data_map_insert_def[simp del] assumes sr: "(s, s') \ state_relation" @@ -1227,7 +1368,7 @@ lemma retype_state_relation: \ state_relation" (is "(ekheap_update (\_. ?eps) s\kheap := ?ps\, update_gs _ _ _ (s'\ksPSpace := ?ps'\)) \ state_relation") - proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) + proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) have cover':"range_cover ptr sz (objBitsKO ko) m" by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) @@ -1418,6 +1559,16 @@ lemma retype_state_relation: else cns x" in exI, simp) apply (rule_tac x=id in exI, simp)+ done + + have rdyqrel: "ready_queues_relation s s'" + using sr by (simp add: state_relation_def) + + thus "ready_queues_relation_2 (ready_queues s) (ksReadyQueues s') + (?ps' |> tcb_of' |> tcbSchedNext) (?ps' |> tcb_of' |> tcbSchedPrev) + (\d p. inQ d p |< (?ps' |> tcb_of'))" + using retype_ready_queues_relation[OF _ vs' pn' ko cover num_r] + by (clarsimp simp: ready_queues_relation_def Let_def) + qed lemma new_cap_addrs_fold': @@ -1529,7 +1680,7 @@ lemma retype_region_ext_modify_kheap_futz: done lemmas retype_region_ext_modify_kheap_futz' = - fun_cong[OF arg_cong[where f=NonDetMonad.bind, + fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] lemma foldr_upd_app_if_eta_futz: @@ -2433,7 +2584,6 @@ qed lemma other_objs_default_relation: "\ case ty of Structures_A.EndpointObject \ ko = injectKO (makeObject :: endpoint) | Structures_A.NotificationObject \ ko = injectKO (makeObject :: Structures_H.notification) - | Structures_A.TCBObject \ ko = injectKO (makeObject :: tcb) | _ \ False \ \ obj_relation_retype (default_object ty dev n) ko" apply (rule obj_relation_retype_other_obj) @@ -2454,6 +2604,13 @@ lemma other_objs_default_relation: split: Structures_A.apiobject_type.split_asm) done +lemma tcb_relation_retype: + "obj_relation_retype (default_object Structures_A.TCBObject dev n) (KOTCB makeObject)" + by (clarsimp simp: default_object_def obj_relation_retype_def tcb_relation_def default_tcb_def + makeObject_tcb makeObject_cte new_context_def newContext_def tcb_relation_cut_def + fault_rel_optionation_def initContext_def default_priority_def + default_arch_tcb_def newArchTCB_def arch_tcb_relation_def objBits_simps') + lemma captable_relation_retype: "n < word_bits \ obj_relation_retype (default_object Structures_A.CapTableObject dev n) (KOCTE makeObject)" @@ -2586,7 +2743,6 @@ lemma update_gs_ksMachineState_update_swap: declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -3155,10 +3311,10 @@ proof (intro conjI impI) apply (rule_tac ptr="x + xa" in cte_wp_at_tcbI', assumption+) apply fastforce apply simp - apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound user_context) - apply (case_tac thread_state, simp_all add: valid_tcb_state'_def - valid_bound_ntfn'_def obj_at_disj' - split: option.splits)[2] + apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound tcbprev tcbnext user_context) + apply (case_tac thread_state, simp_all add: valid_tcb_state'_def valid_bound_tcb'_def + valid_bound_ntfn'_def obj_at_disj' opt_tcb_at'_def + split: option.splits)[4] apply (clarsimp simp add: valid_arch_tcb'_def typ_at_to_obj_at_arches obj_at_disj') apply (simp add: valid_cte'_def) apply (frule pspace_alignedD' [OF _ ad(1)]) @@ -3428,7 +3584,7 @@ lemma createObjects_orig_cte_wp_at2': apply (rule handy_prop_divs) apply (wp createObjects_orig_obj_at2'[where sz = sz], simp) apply (simp add: tcb_cte_cases_def) - including no_pre + including classic_wp_pre apply (wp handy_prop_divs createObjects_orig_obj_at2'[where sz = sz] | simp add: o_def cong: option.case_cong)+ done @@ -3449,7 +3605,7 @@ lemma createNewCaps_cte_wp_at2: \ pspace_no_overlap' ptr sz s\ createNewCaps ty ptr n objsz dev \\rv s. P (cte_wp_at' P' p s)\" - including no_pre + including classic_wp_pre apply (simp add: createNewCaps_def createObjects_def ARM_HYP_H.toAPIType_def split del: if_split) apply (case_tac ty; simp add: createNewCaps_def createObjects_def Arch_createNewCaps_def @@ -3933,16 +4089,6 @@ lemma sch_act_wf_lift_asm: apply auto done -lemma valid_queues_lift_asm': - assumes tat: "\d p t. \\s. \ obj_at' (inQ d p) t s \ Q d p s\ f \\_ s. \ obj_at' (inQ d p) t s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\\s. valid_queues' s \ (\d p. Q d p s)\ f \\_. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - tat prq) - apply simp - done - lemma createObjects'_ct[wp]: "\\s. P (ksCurThread s)\ createObjects' p n v us \\rv s. P (ksCurThread s)\" by (rule createObjects_pspace_only, simp) @@ -4185,7 +4331,7 @@ lemma createNewCaps_idle'[wp]: apply (rename_tac apiobject_type) apply (case_tac apiobject_type, simp_all split del: if_split)[1] apply (wp, simp) - including no_pre + including classic_wp_pre apply (wp mapM_x_wp' createObjects_idle' threadSet_idle' @@ -4347,7 +4493,7 @@ lemma createNewCaps_pde_mappings'[wp]: lemma createObjects'_irq_states' [wp]: "\valid_irq_states'\ createObjects' a b c d \\_. valid_irq_states'\" apply (simp add: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc|simp add: alignError_def)+ + apply (wp unless_wp|wpc|simp add: alignError_def)+ apply fastforce done @@ -4359,34 +4505,149 @@ crunch ksMachine[wp]: createObjects "\s. P (ksMachineState s)" crunch cur_domain[wp]: createObjects "\s. P (ksCurDomain s)" (simp: unless_def) -lemma createNewCaps_valid_queues': - "\valid_queues' and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues'\" - apply (wp valid_queues_lift_asm' [OF createNewCaps_obj_at2]) - apply (clarsimp simp: projectKOs) - apply (simp add: makeObjectKO_def - split: object_type.split_asm - apiobject_type.split_asm) - apply (clarsimp simp: inQ_def) - apply (auto simp: makeObject_tcb - split: object_type.splits apiobject_type.splits) +lemma createObjects_valid_bitmaps: + "createObjects' ptr n val gbits \valid_bitmaps\" + apply (clarsimp simp: createObjects'_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def + bitmapQ_no_L1_orphans_def) done -lemma createNewCaps_valid_queues: - "\valid_queues and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues\" -apply (rule hoare_gen_asm) -apply (wp valid_queues_lift_asm createNewCaps_obj_at2[where sz=sz]) -apply (clarsimp simp: projectKO_opts_defs) -apply (simp add: inQ_def) -apply (wp createNewCaps_pred_tcb_at'[where sz=sz] | simp)+ -done +lemma valid_bitmaps_gsCNodes_update[simp]: + "valid_bitmaps (gsCNodes_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_gsUserPages_update[simp]: + "valid_bitmaps (gsUserPages_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +crunches curDomain, copyGlobalMappings + for valid_bitmaps[wp]: valid_bitmaps + and sched_pointers[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + +lemma createNewCaps_valid_bitmaps: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_bitmaps s\ + createNewCaps ty ptr n us dev + \\_. valid_bitmaps\" + unfolding createNewCaps_def + apply (clarsimp simp: toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_bitmaps) + by (wpsimp wp: createObjects_valid_bitmaps[simplified o_def] mapM_x_wp + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ + +lemma createObjects_sched_queues: + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True) + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + (is "\ \s. _ \ _ \ ?Pre s \ _ \\_. _\") +proof (rule hoare_grab_asm)+ + assume not_0: "\ n = 0" + and cover: "range_cover ptr sz ((objBitsKO val) + gbits) n" + then show + "\\s. ?Pre s\ createObjects' ptr n val gbits \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + proof - + have shiftr_not_zero:" 1 \ ((of_nat n)::machine_word) << gbits" + using range_cover_not_zero_shift[OF not_0 cover,where gbits = gbits] + by (simp add:word_le_sub1) + show ?thesis + supply projectKOs[simp] + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: shiftL_nat data_map_insert_def[symmetric] + new_cap_addrs_fold'[OF shiftr_not_zero] + simp del: data_map_insert_def) + using range_cover.unat_of_nat_n_shift[OF cover, where gbits=gbits, simplified] + apply (clarsimp simp: foldr_upd_app_if) + apply (rule_tac a="tcbSchedNexts_of s" and b="tcbSchedPrevs_of s" + in rsubst2[rotated, OF sym sym, where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply simp + done + qed +qed + +lemma createNewCaps_sched_queues: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + assumes not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + createNewCaps ty ptr n us dev + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + unfolding createNewCaps_def + apply (clarsimp simp: ARM_HYP_H.toAPIType_def split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (wp, simp) + apply (insert cover not_0) + apply (wpsimp wp: mapM_x_wp' createObjects_sched_queues threadSet_sched_pointers + simp: curDomain_def createObjects_def) + apply (simp add: valid_pspace'_def makeObject_tcb) + by (wp createObjects_sched_queues mapM_x_wp' + | clarsimp simp: objBits_simps APIType_capBits_def createObjects_def + vspace_bits_defs archObjSize_def + | intro conjI impI + | force)+ + + +lemma createObjects_valid_sched_pointers: + "\\s. valid_sched_pointers s + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True)\ + createObjects' ptr n val gbits + \\_. valid_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_sched_pointers_def foldr_upd_app_if opt_pred_def opt_map_def comp_def) + apply (cases "tcb_of' val"; clarsimp) + done + +lemma createNewCaps_valid_sched_pointers: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_sched_pointers s\ + createNewCaps ty ptr n us dev + \\_. valid_sched_pointers\" + unfolding createNewCaps_def + apply (clarsimp simp: toAPIType_def split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_sched_pointers) + by (wpsimp wp: createObjects_valid_sched_pointers[simplified o_def] mapM_x_wp + threadSet_valid_sched_pointers + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ lemma mapM_x_threadSet_valid_pspace: "\valid_pspace' and K (curdom \ maxDomain)\ @@ -4555,7 +4816,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def projectKOs st_tcb_at'_def) done qed @@ -4640,7 +4901,7 @@ lemma createObjects_null_filter': createObjects' ptr n val gbits \\addrs a. P (null_filter' (ctes_of a))\" apply (clarsimp simp: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc + apply (wp unless_wp|wpc | clarsimp simp:haskell_assert_def alignError_def split del: if_splits simp del:fun_upd_apply)+ apply (subst new_cap_addrs_fold') @@ -4785,12 +5046,13 @@ proof (rule hoare_gen_asm, erule conjE) createNewCaps_valid_arch_state valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createNewCaps_obj_at'] createNewCaps_irq_handlers' createNewCaps_vms - createNewCaps_valid_queues - createNewCaps_valid_queues' createNewCaps_pred_tcb_at' cnc_ct_not_inQ createNewCaps_ct_idle_or_in_cur_domain' createNewCaps_sch_act_wf createNewCaps_urz[where sz=sz] + createNewCaps_sched_queues[OF cover not_0] + createNewCaps_valid_sched_pointers + createNewCaps_valid_bitmaps | simp)+ using not_0 apply (clarsimp simp: valid_pspace'_def) @@ -4863,35 +5125,6 @@ lemma createObjects_sch: apply (wp sch_act_wf_lift_asm createObjects_pred_tcb_at' createObjects_orig_obj_at3 | force)+ done -lemma createObjects_queues: - "\\s. valid_queues s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues\" - apply (wp valid_queues_lift_asm [unfolded pred_conj_def, OF createObjects_orig_obj_at3] - createObjects_pred_tcb_at' [unfolded pred_conj_def]) - apply fastforce - apply wp+ - apply fastforce - done - -lemma createObjects_queues': - assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" - shows - "\\s. valid_queues' s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues'\" - apply (simp add: createObjects_def) - apply (wp valid_queues_lift_asm') - apply (wp createObjects_orig_obj_at2') - apply clarsimp - apply assumption - apply wp - apply (clarsimp simp: no_tcb split: option.splits) - apply fastforce - done - lemma createObjects_no_cte_ifunsafe': assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" @@ -5137,7 +5370,7 @@ proof - apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def,wp createObjects_valid_pspace_untyped') apply (wp assms | simp add: objBits_def)+ - apply (wp createObjects_sch createObjects_queues) + apply (wp createObjects_sch) apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_state_refs_of'') @@ -5147,30 +5380,37 @@ proof - apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_iflive') - apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift - createObjects_idle' createObjects_no_cte_valid_global - createObjects_valid_arch createObjects_irq_state - createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] - assms | simp add: objBits_def )+ + apply (wp createObjects_no_cte_ifunsafe' + assms | simp add: objBits_def)+ apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_idle') - apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift - createObjects_idle' createObjects_no_cte_valid_global + apply (wp irqs_masked_lift createObjects_no_cte_valid_global createObjects_valid_arch createObjects_irq_state - createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] assms - createObjects_pspace_domain_valid co_ct_not_inQ - createObjects_ct_idle_or_in_cur_domain' - createObjects_untyped_ranges_zero'[OF moKO] - | simp)+ + createObjects_no_cte_irq_handlers assms + | simp)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_sched_queues) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_sched_pointers) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_idle') + apply (wpsimp wp: createObjects_valid_bitmaps) + apply (wp createObjects_cur' + createObjects_pspace_domain_valid co_ct_not_inQ + createObjects_ct_idle_or_in_cur_domain' + createObjects_untyped_ranges_zero'[OF moKO] + | simp)+ apply clarsimp apply (simp add: conj_comms) apply ((intro conjI; assumption?); simp add: valid_pspace'_def objBits_def) - apply (fastforce simp add: no_cte no_tcb split_def split: option.splits) - apply (clarsimp simp: invs'_def no_tcb valid_state'_def no_cte split: option.splits) - done + apply (fastforce simp: no_cte no_tcb split_def split: option.splits) + apply (clarsimp simp: invs'_def no_tcb valid_state'_def no_cte split: option.splits) + by (auto simp: invs'_def no_tcb valid_state'_def no_cte live'_def + split: option.splits kernel_object.splits) qed lemma corres_retype_update_gsI: @@ -5206,7 +5446,7 @@ lemma gcd_corres: "corres (=) \ \ (gets cur_domain) curDomain" lemma retype_region2_extra_ext_mapM_x_corres: shows "corres dc (valid_etcbs and (\s. \addr\set addrs. tcb_at addr s)) - (\s. \addr\set addrs. tcb_at' addr s) + (\s. \addr\set addrs. obj_at' (Not \ tcbQueued) addr s) (retype_region2_extra_ext addrs Structures_A.apiobject_type.TCBObject) (mapM_x (\addr. do cdom \ curDomain; threadSet (tcbDomain_update (\_. cdom)) addr @@ -5217,7 +5457,7 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (rule corres_split_eqr[OF gcd_corres]) apply (rule_tac S="Id \ {(x, y). x \ set addrs}" and P="\s. (\t \ set addrs. tcb_at t s) \ valid_etcbs s" - and P'="\s. \t \ set addrs. tcb_at' t s" + and P'="\s. \t \ set addrs. obj_at' (Not \ tcbQueued) t s" in corres_mapM_x) apply simp apply (rule corres_guard_imp) @@ -5225,8 +5465,10 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (case_tac tcb') apply simp apply fastforce - apply fastforce + apply (fastforce simp: obj_at'_def) apply (wp hoare_vcg_ball_lift | simp)+ + apply (clarsimp simp: obj_at'_def) + apply fastforce apply auto[1] apply (wp | simp add: curDomain_def)+ done @@ -5258,10 +5500,11 @@ lemma retype_region2_obj_at: apply (auto simp: obj_at_def default_object_def is_tcb_def) done -lemma createObjects_tcb_at': +lemma createObjects_Not_tcbQueued: "\range_cover ptr sz (objBitsKO (injectKOS (makeObject::tcb))) n; n \ 0\ \ \\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ - createObjects ptr n (KOTCB makeObject) 0 \\ptrs s. \addr\set ptrs. tcb_at' addr s\" + createObjects ptr n (KOTCB makeObject) 0 + \\ptrs s. \addr\set ptrs. obj_at' (Not \ tcbQueued) addr s\" apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where val = "(makeObject :: tcb)"]]) apply (auto simp: obj_at'_def projectKOs project_inject objBitsKO_def objBits_def makeObject_tcb) done @@ -5338,7 +5581,7 @@ lemma corres_retype_region_createNewCaps: apply (rule corres_retype[where 'a = tcb], simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def makeObjectKO_def - other_objs_default_relation)[1] + tcb_relation_retype)[1] apply (fastforce simp: range_cover_def) apply (rule corres_split_nor) apply (simp add: APIType_map2_def) @@ -5349,7 +5592,7 @@ lemma corres_retype_region_createNewCaps: apply wp apply wp apply ((wp retype_region2_obj_at | simp add: APIType_map2_def)+)[1] - apply ((wp createObjects_tcb_at'[where sz=sz] | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] + apply ((wp createObjects_Not_tcbQueued[where sz=sz] | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] apply simp apply simp apply (subst retype_region2_extra_ext_trivial) @@ -5383,7 +5626,7 @@ lemma corres_retype_region_createNewCaps: \ \CapTable\ apply (subst retype_region2_extra_ext_trivial) apply (simp add: APIType_map2_def) - apply (subst bind_assoc_reverse[of "createObjects y n (KOCTE makeObject) us"]) + apply (subst bind_assoc_return_reverse[of "createObjects y n (KOCTE makeObject) us"]) apply (subst liftM_def [of "map (\addr. capability.CNodeCap addr us 0 0)", symmetric]) apply simp diff --git a/proof/refine/ARM_HYP/Schedule_R.thy b/proof/refine/ARM_HYP/Schedule_R.thy index 0ff23927ad..20d63003d9 100644 --- a/proof/refine/ARM_HYP/Schedule_R.thy +++ b/proof/refine/ARM_HYP/Schedule_R.thy @@ -10,16 +10,11 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] -lemma invs_no_cicd'_queues: - "invs_no_cicd' s \ valid_queues s" - unfolding invs_no_cicd'_def - by simp - lemma corres_if2: "\ G = G'; G \ corres r P P' a c; \ G' \ corres r Q Q' b d \ \ corres r (if G then P else Q) (if G' then P' else Q') (if G then a else b) (if G' then c else d)" @@ -41,7 +36,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -76,17 +71,17 @@ lemma vs_refs_pages_vcpu: by (simp add: vs_refs_pages_def) lemma vs_lookup_pages1_vcpu_update: - "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages1 (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages1 (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = vs_lookup_pages1 s" by (clarsimp intro!: set_eqI simp: vs_lookup_pages1_def vs_refs_pages_vcpu obj_at_def) lemma vs_lookup_pages_vcpu_update: - "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def vs_lookup_pages1_vcpu_update) lemma valid_vs_lookup_vcpu_update: - "typ_at (AArch AVCPU) vcpuPtr s \ valid_vs_lookup (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + "typ_at (AArch AVCPU) vcpuPtr s \ valid_vs_lookup (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = valid_vs_lookup s" apply (clarsimp simp: valid_vs_lookup_def caps_of_state_VCPU_update) apply (rule all_cong1) @@ -133,12 +128,17 @@ lemma arch_switchToThread_corres: and valid_vs_lookup and valid_global_objs and unique_table_refs o caps_of_state and st_tcb_at runnable t and (\s. sym_refs (state_hyp_refs_of s))) - (valid_arch_state' and valid_pspace' and st_tcb_at' runnable' t - and (\s. sym_refs (state_hyp_refs_of' s))) + (valid_arch_state' and no_0_obj' and (\s. sym_refs (state_hyp_refs_of' s))) (arch_switch_to_thread t) (Arch.switchToThread t)" + apply (rule_tac Q'="tcb_at' t" in corres_cross_add_guard) + apply (fastforce intro!: tcb_at_cross st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) apply (simp add: arch_switch_to_thread_def ARM_HYP_H.switchToThread_def) apply (rule corres_guard_imp) - apply (rule corres_split[OF get_tcb_corres]) + apply (rule corres_split[OF getObject_TCB_corres]) apply (rule corres_split[OF vcpuSwitch_corres']) apply (clarsimp simp: tcb_relation_def arch_tcb_relation_def) apply (rule corres_split[OF setVMRoot_corres]) @@ -170,327 +170,280 @@ lemma schedule_choose_new_thread_sched_act_rct[wp]: unfolding schedule_choose_new_thread_def by wp +\ \This proof shares many similarities with the proof of @{thm tcbSchedEnqueue_corres}\ lemma tcbSchedAppend_corres: - notes trans_state_update'[symmetric, simp del] - shows - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues and valid_queues') (tcb_sched_action (tcb_sched_append) t) (tcbSchedAppend t)" - apply (simp only: tcbSchedAppend_def tcb_sched_action_def) - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply wp+ - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def) - - apply (subgoal_tac "tcb_sched_append t (ready_queues a (tcb_domain y) (tcb_priority y)) - = (ready_queues a (tcb_domain y) (tcb_priority y))") - apply (simp add: state_relation_def ready_queues_relation_def) - apply (clarsimp simp: tcb_sched_append_def state_relation_def - valid_queues'_def ready_queues_relation_def - ekheap_relation_def etcb_relation_def - obj_at'_def inQ_def projectKO_eq project_inject) - apply (drule_tac x=t in bspec,clarsimp) + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_append tcb_ptr) (tcbSchedAppend tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + projectKOs[simp] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_append_def get_tcb_queue_def + tcbSchedAppend_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce apply clarsimp - apply (clarsimp simp: unless_def when_def cong: if_cong) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_append_def) - apply (intro conjI impI) - apply (rule corres_guard_imp) - apply (rule setQueue_corres) - prefer 3 - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply simp - apply simp - apply (rule corres_split_noop_rhs2) - apply (rule addToBitmap_if_null_noop_corres) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply wp+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - projectKO_eq project_inject) - done + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) -crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue - for valid_pspace'[wp]: valid_pspace' - and valid_arch_state'[wp]: valid_arch_state' - and pred_tcb_at'[wp]: "pred_tcb_at' proj P t" - (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) - -lemma removeFromBitmap_valid_queues_no_bitmap_except[wp]: -" \ valid_queues_no_bitmap_except t \ - removeFromBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding bitmapQ_defs valid_queues_no_bitmap_except_def - by (wp| clarsimp simp: bitmap_fun_defs)+ - -lemma removeFromBitmap_bitmapQ: - "\ \s. True \ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" - unfolding bitmapQ_defs bitmap_fun_defs - by (wp| clarsimp simp: bitmap_fun_defs)+ - -lemma removeFromBitmap_valid_bitmapQ[wp]: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \ bitmapQ d p s \ ksReadyQueues s (d,p) = []) \" - by (rule hoare_pre) - (wp removeFromBitmap_valid_queues_no_bitmap_except removeFromBitmap_valid_bitmapQ_except - removeFromBitmap_bitmapQ, simp) - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed - -(* this should be the actual weakest precondition to establish valid_queues - under tagging a thread as not queued *) -lemma threadSet_valid_queues_dequeue_wp: - "\ valid_queues_no_bitmap_except t and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \d p. t \ set (ksReadyQueues s (d,p))) \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues \" - unfolding threadSet_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def valid_queues_no_bitmap_def) - done - -(* FIXME move *) -lemmas obj_at'_conjI = obj_at_conj' - -lemma setQueue_valid_queues_no_bitmap_except_dequeue_wp: - "\d p ts t. - \ \s. valid_queues_no_bitmap_except t s \ - (\t' \ set ts. obj_at' (inQ d p and runnable' \ tcbState) t' s) \ - t \ set ts \ distinct ts \ p \ maxPriority \ d \ maxDomain \ - setQueue d p ts - \\rv. valid_queues_no_bitmap_except t \" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by wp force - -definition (* if t is in a queue, it should be tagged with right priority and domain *) - "correct_queue t s \ \d p. t \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s)" - -lemma valid_queues_no_bitmap_correct_queueI[intro]: - "valid_queues_no_bitmap s \ correct_queue t s" - unfolding correct_queue_def valid_queues_no_bitmap_def - by (fastforce simp: obj_at'_def inQ_def) - - -lemma tcbSchedDequeue_valid_queues_weak: - "\ valid_queues_no_bitmap_except t and valid_bitmapQ and - bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - correct_queue t and - obj_at' (\tcb. tcbDomain tcb \ maxDomain \ tcbPriority tcb \ maxPriority) t \ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" -proof - - show ?thesis - unfolding tcbSchedDequeue_def null_def valid_queues_def - apply wp (* stops on threadSet *) - apply (rule hoare_post_eq[OF _ threadSet_valid_queues_dequeue_wp], - simp add: valid_queues_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ - apply (wp hoare_vcg_imp_lift setQueue_valid_queues_no_bitmap_except_dequeue_wp - setQueue_valid_bitmapQ threadGet_const_tcb_at)+ - (* wp done *) - apply (normalise_obj_at') - apply (clarsimp simp: correct_queue_def) - apply (normalise_obj_at') - apply (fastforce simp add: valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def elim: obj_at'_weaken)+ - done -qed - -lemma tcbSchedDequeue_valid_queues: - "\Invariants_H.valid_queues - and obj_at' (\tcb. tcbDomain tcb \ maxDomain) t - and obj_at' (\tcb. tcbPriority tcb \ maxPriority) t\ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" - apply (rule hoare_pre, rule tcbSchedDequeue_valid_queues_weak) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def) - done - -lemma tcbSchedAppend_valid_queues'[wp]: - (* most of this is identical to tcbSchedEnqueue_valid_queues' in TcbAcc_R *) - "\valid_queues' and tcb_at' t\ tcbSchedAppend t \\_. valid_queues'\" - apply (simp add: tcbSchedAppend_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueueAppend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: obj_at'_def) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp simp: setQueue_def tcbQueueAppend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply fast + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; clarsimp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def projectKOs valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -lemma threadSet_valid_queues'_dequeue: (* threadSet_valid_queues' is too weak for dequeue *) - "\\s. (\d p t'. obj_at' (inQ d p) t' s \ t' \ t \ t' \ set (ksReadyQueues s (d, p))) \ - obj_at' (inQ d p) t s \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues' \" - unfolding valid_queues'_def - apply (rule hoare_pre) - apply (wp hoare_vcg_all_lift) - apply (simp only: imp_conv_disj not_obj_at') - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (simp add: not_obj_at') - apply (clarsimp simp: typ_at_tcb') - apply normalise_obj_at' - apply (fastforce elim: obj_at'_weaken simp: inQ_def) - done - -lemma setQueue_ksReadyQueues_lift: - "\ \s. P (s\ksReadyQueues := (ksReadyQueues s)((d, p) := ts)\) ts \ - setQueue d p ts - \ \_ s. P s (ksReadyQueues s (d,p))\" - unfolding setQueue_def - by (wp, clarsimp simp: fun_upd_def snd_def) - -lemma tcbSchedDequeue_valid_queues'[wp]: - "\valid_queues' and tcb_at' t\ - tcbSchedDequeue t \\_. valid_queues'\" - unfolding tcbSchedDequeue_def - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - prefer 2 - apply (wp threadGet_const_tcb_at) - apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply clarsimp + apply (drule_tac x="the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))" + in spec) + subgoal by (auto simp: in_opt_pred opt_map_red) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply split: if_splits) + apply (case_tac "t = the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ apply clarsimp - apply (rename_tac queued) - apply (case_tac queued, simp_all) - apply wp - apply (rule_tac d=tdom and p=prio in threadSet_valid_queues'_dequeue) - apply (rule hoare_pre_post, assumption) - apply (wp | clarsimp simp: bitmap_fun_defs)+ - apply (wp hoare_vcg_all_lift setQueue_ksReadyQueues_lift) - apply clarsimp - apply (wp threadGet_obj_at' threadGet_const_tcb_at)+ - apply clarsimp - apply (rule context_conjI, clarsimp simp: obj_at'_def) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def|wp)+ + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (intro conjI; clarsimp) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply opt_map_def obj_at'_def + queue_end_valid_def prev_queue_head_def + split: if_splits option.splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_append[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def opt_map_def split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def fun_upd_apply queue_end_valid_def split: if_splits) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply split: if_splits) + by (clarsimp simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def split: if_splits) + +lemma tcbQueueAppend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + apply (clarsimp simp: tcbQueueEmpty_def valid_bound_tcb'_def split: option.splits) + done + +lemma tcbSchedAppend_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_objs'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: threadSet_valid_objs' threadGet_wp hoare_vcg_all_lift) + apply (normalise_obj_at', rename_tac tcb "end") + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: tcbQueueEmpty_def obj_at'_def) done -crunch tcb_at'[wp]: tcbSchedAppend "tcb_at' t" - (simp: unless_def) - -crunch state_refs_of'[wp]: tcbSchedAppend "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps unless_def) -crunch state_refs_of'[wp]: tcbSchedDequeue "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps) +crunches tcbSchedAppend, tcbSchedDequeue + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) -crunch state_hyp_refs_of'[wp]: tcbSchedAppend "\s. P (state_hyp_refs_of' s)" - (wp: refl simp: crunch_simps unless_def) -crunch state_hyp_refs_of'[wp]: tcbSchedDequeue "\s. P (state_hyp_refs_of' s)" - (wp: refl simp: crunch_simps) +(* FIXME move *) +lemmas obj_at'_conjI = obj_at_conj' -crunch cap_to'[wp]: tcbSchedEnqueue "ex_nonz_cap_to' p" - (simp: unless_def) -crunch cap_to'[wp]: tcbSchedAppend "ex_nonz_cap_to' p" - (simp: unless_def) -crunch cap_to'[wp]: tcbSchedDequeue "ex_nonz_cap_to' p" +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for tcb_at'[wp]: "tcb_at' t" + and cap_to'[wp]: "ex_nonz_cap_to' p" + and ifunsafe'[wp]: if_unsafe_then_cap' + (wp: crunch_wps simp: crunch_simps) lemma tcbSchedAppend_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedAppend tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedAppend_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. if_live_then_nonz_cap'\" + supply projectKOs[simp] + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_if_live_then_nonz_cap' threadGet_wp simp: bitmap_fun_defs) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def st_tcb_at'_def obj_at'_def runnable_eq_active' live'_def) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ko_wp_at'_def inQ_def obj_at'_def tcbQueueEmpty_def live'_def) done lemma tcbSchedDequeue_iflive'[wp]: - "\if_live_then_nonz_cap'\ tcbSchedDequeue tcb \\_. if_live_then_nonz_cap'\" + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. if_live_then_nonz_cap'\" + supply projectKOs[simp] apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply ((wp | clarsimp simp: bitmap_fun_defs)+)[1] (* deal with removeFromBitmap *) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp, fastforce) - apply (wp | simp add: crunch_simps)+ + apply (wpsimp wp: tcbQueueRemove_if_live_then_nonz_cap' threadGet_wp) + apply (fastforce elim: if_live_then_nonz_capE' simp: obj_at'_def ko_wp_at'_def live'_def) done -crunch ifunsafe'[wp]: tcbSchedAppend if_unsafe_then_cap' - (simp: unless_def) -crunch ifunsafe'[wp]: tcbSchedDequeue if_unsafe_then_cap' - -crunch idle'[wp]: tcbSchedAppend valid_idle' - (simp: crunch_simps unless_def) - -crunch global_refs'[wp]: tcbSchedEnqueue valid_global_refs' - (wp: threadSet_global_refs simp: unless_def) -crunch global_refs'[wp]: tcbSchedAppend valid_global_refs' - (wp: threadSet_global_refs simp: unless_def) -crunch global_refs'[wp]: tcbSchedDequeue valid_global_refs' - (wp: threadSet_global_refs) - -crunch irq_node'[wp]: tcbSchedAppend "\s. P (irq_node' s)" - (simp: unless_def) -crunch irq_node'[wp]: tcbSchedDequeue "\s. P (irq_node' s)" - -crunch typ_at'[wp]: tcbSchedAppend "\s. P (typ_at' T p s)" - (simp: unless_def) - -crunch ctes_of[wp]: tcbSchedAppend "\s. P (ctes_of s)" - (simp: unless_def) - -crunch ksInterrupt[wp]: tcbSchedAppend "\s. P (ksInterruptState s)" - (simp: unless_def) -crunch ksInterrupt[wp]: tcbSchedDequeue "\s. P (ksInterruptState s)" - -crunch irq_states[wp]: tcbSchedAppend valid_irq_states' - (simp: unless_def) -crunch irq_states[wp]: tcbSchedDequeue valid_irq_states' - -crunch ct'[wp]: tcbSchedAppend "\s. P (ksCurThread s)" - (simp: unless_def) - -crunch pde_mappings'[wp]: tcbSchedAppend "valid_pde_mappings'" - (simp: unless_def) -crunch pde_mappings'[wp]: tcbSchedDequeue "valid_pde_mappings'" +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for typ_at'[wp]: "\s. P (typ_at' T p s)" + and tcb_at'[wp]: "tcb_at' t" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksInterrupt[wp]: "\s. P (ksInterruptState s)" + and irq_states[wp]: valid_irq_states' + and irq_node'[wp]: "\s. P (irq_node' s)" + and ct'[wp]: "\s. P (ksCurThread s)" + and global_refs'[wp]: valid_global_refs' + and ifunsafe'[wp]: if_unsafe_then_cap' + and cap_to'[wp]: "ex_nonz_cap_to' p" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + and idle'[wp]: valid_idle' + and valid_pde_mappings'[wp]: valid_pde_mappings' + (simp: unless_def crunch_simps obj_at'_def wp: getObject_tcb_wp) lemma tcbSchedEnqueue_vms'[wp]: "\valid_machine_state'\ tcbSchedEnqueue t \\_. valid_machine_state'\" @@ -516,23 +469,89 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) including no_pre - apply (wp static_imp_wp hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) apply simp+ done +lemma threadSet_mdb': + "\valid_mdb' and obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF (f t)) t\ + threadSet f t + \\rv. valid_mdb'\" + by (wpsimp wp: setObject_tcb_mdb' getTCB_wp simp: threadSet_def obj_at'_def) + +lemma tcbSchedNext_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedNext_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedPrev_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueueRemove_valid_mdb': + "\\s. valid_mdb' s \ valid_objs' s\ tcbQueueRemove q tcbPtr \\_. valid_mdb'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def obj_at'_def) + done + +lemma tcbQueuePrepend_valid_mdb': + "\valid_mdb' and tcb_at' tcbPtr + and (\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_valid_mdb': + "\\s. valid_mdb' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueEnd queue)) s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueued_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbQueued_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma valid_mdb'_ksReadyQueuesL1Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL1Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma valid_mdb'_ksReadyQueuesL2Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL2Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma tcbSchedEnqueue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedEnqueue_def setQueue_def) + apply (wpsimp wp: tcbQueuePrepend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply normalise_obj_at' + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +crunches tcbSchedEnqueue + for cur_tcb'[wp]: cur_tcb' + (wp: threadSet_cur) + lemma tcbSchedEnqueue_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedEnqueue t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedEnqueue_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedEnqueue_ct_not_inQ + simp: cteCaps_of_def o_def) done crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" @@ -541,7 +560,7 @@ crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" lemma tcbSchedAppend_vms'[wp]: "\valid_machine_state'\ tcbSchedAppend t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedAppend_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done crunch pspace_domain_valid[wp]: tcbSchedAppend "pspace_domain_valid" @@ -556,21 +575,29 @@ crunch ksIdleThread[wp]: tcbSchedAppend "\s. P (ksIdleThread s)" crunch ksDomSchedule[wp]: tcbSchedAppend "\s. P (ksDomSchedule s)" (simp: unless_def) +lemma tcbQueueAppend_tcbPriority_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + supply projectKOs[simp] + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueueAppend_tcbDomain_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + supply projectKOs[simp] + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + lemma tcbSchedAppend_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedAppend t \\_. tcb_in_cur_domain' t' \" @@ -584,26 +611,60 @@ crunches tcbSchedAppend, tcbSchedDequeue (simp: unless_def) lemma tcbSchedAppend_sch_act_wf[wp]: - "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedAppend thread - \\rv s. sch_act_wf (ksSchedulerAction s) s\" - apply (simp add:tcbSchedAppend_def bitmap_fun_defs) - apply (wp hoare_unless_wp setQueue_sch_act threadGet_wp|simp)+ - apply (fastforce simp:typ_at'_def obj_at'_def) + "tcbSchedAppend thread \\s. sch_act_wf (ksSchedulerAction s) s\" + by (wpsimp wp: sch_act_wf_lift) + +lemma tcbSchedAppend_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedAppend tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedAppend_def + apply (wpsimp simp: tcbQueueAppend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp hoare_vcg_if_lift2) + apply (clarsimp simp: ksReadyQueues_asrt_def split: if_splits) + apply normalise_obj_at' + apply (force dest: tcbQueueHead_iff_tcbQueueEnd + simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def) + done + +lemma tcbSchedAppend_valid_mdb'[wp]: + "\valid_mdb' and valid_tcbs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_mdb'\" + supply projectKOs[simp] + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: tcbQueueAppend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply (fastforce dest: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +lemma tcbSchedAppend_valid_bitmaps[wp]: + "tcbSchedAppend tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) done lemma tcbSchedAppend_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedAppend t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedAppend_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma tcbSchedAppend_all_invs_but_ct_not_inQ': + "\invs'\ + tcbSchedAppend t + \\_. all_invs_but_ct_not_inQ'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) done lemma tcbSchedEnqueue_invs'_not_ResumeCurrentThread: @@ -632,7 +693,7 @@ crunch ksMachine[wp]: tcbSchedDequeue "\s. P (ksMachineState s)" lemma tcbSchedDequeue_vms'[wp]: "\valid_machine_state'\ tcbSchedDequeue t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedDequeue_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done crunch pspace_domain_valid[wp]: tcbSchedDequeue "pspace_domain_valid" @@ -650,43 +711,88 @@ lemma tcbSchedDequeue_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedDequeue t \\_. tcb_in_cur_domain' t' \" apply (rule tcb_in_cur_domain'_lift) apply wp - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ - done - -lemma tcbSchedDequeue_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ + apply (clarsimp simp: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: hoare_when_weak_wp getObject_tcb_wp threadGet_wp) done -lemma tcbSchedDequeue_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ +lemma tcbSchedDequeue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs'\ tcbSchedDequeue tcbPtr \\_. valid_mdb'\" + unfolding tcbSchedDequeue_def + apply (wpsimp simp: bitmap_fun_defs setQueue_def wp: threadSet_mdb' tcbQueueRemove_valid_mdb') + apply (rule_tac Q="\_. tcb_at' tcbPtr" in hoare_post_imp) + apply (fastforce simp: tcb_cte_cases_def cteSizeBits_def) + apply (wpsimp wp: threadGet_wp)+ + apply (fastforce simp: obj_at'_def) done lemma tcbSchedDequeue_invs'[wp]: - "\invs' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs'\" - unfolding invs'_def valid_state'_def - apply (rule hoare_pre) - apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def)+ - apply (fastforce elim: valid_objs'_maxDomain valid_objs'_maxPriority simp: valid_pspace'_def)+ + "tcbSchedDequeue t \invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma ready_qs_runnable_cross: + "\(s, s') \ state_relation; pspace_aligned s; pspace_distinct s; valid_queues s\ + \ ready_qs_runnable s'" + supply projectKOs[simp] + apply (clarsimp simp: ready_qs_runnable_def) + apply normalise_obj_at' + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply clarsimp + apply (drule_tac x=t in bspec) + apply (fastforce simp: inQ_def in_opt_pred obj_at'_def opt_map_red) + apply (fastforce dest: st_tcb_at_runnable_cross simp: obj_at'_def st_tcb_at'_def) + done + +method add_ready_qs_runnable = + rule_tac Q'=ready_qs_runnable in corres_cross_add_guard, + (clarsimp simp: pred_conj_def)?, + (frule valid_sched_valid_queues)?, (frule invs_psp_aligned)?, (frule invs_distinct)?, + fastforce dest: ready_qs_runnable_cross + +defs idleThreadNotQueued_def: + "idleThreadNotQueued s \ obj_at' (Not \ tcbQueued) (ksIdleThread s) s" + +lemma idle_thread_not_queued: + "\valid_idle s; valid_queues s; valid_etcbs s\ + \ \ (\d p. idle_thread s \ set (ready_queues s d p))" + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (drule_tac x="idle_thread s" in bspec) + apply fastforce + apply (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def valid_etcbs_def) done +lemma valid_idle_tcb_at: + "valid_idle s \ tcb_at (idle_thread s) s" + by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def is_tcb_def) + lemma setCurThread_corres: - "corres dc \ \ (modify (cur_thread_update (\_. t))) (setCurThread t)" - apply (unfold setCurThread_def) + "corres dc (valid_idle and valid_queues and valid_etcbs and pspace_aligned and pspace_distinct) \ + (modify (cur_thread_update (\_. t))) (setCurThread t)" + supply projectKOs[simp] + apply (clarsimp simp: setCurThread_def) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (clarsimp simp: idleThreadNotQueued_def) + apply (frule (2) idle_thread_not_queued) + apply (frule state_relation_pspace_relation) + apply (frule state_relation_ready_queues_relation) + apply (frule state_relation_idle_thread) + apply (frule valid_idle_tcb_at) + apply (frule (3) tcb_at_cross) + apply (fastforce dest!: in_ready_q_tcbQueued_eq[THEN arg_cong_Not, THEN iffD1] + simp: obj_at'_def opt_pred_def opt_map_def) apply (rule corres_modify) apply (simp add: state_relation_def swp_def) done @@ -709,48 +815,64 @@ proof - by (rule lift_neg_pred_tcb_at' [OF ArchThreadDecls_H_ARM_HYP_H_switchToThread_typ_at' pos]) qed -crunches Arch.switchToThread - for valid_queues[wp]: Invariants_H.valid_queues - (wp: crunch_wps simp: crunch_simps ignore: clearExMonitor) +crunches storeWordUser, setVMRoot, asUser, storeWordUser, Arch.switchToThread + for ksQ[wp]: "\s. P (ksReadyQueues s p)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_objs'[wp]: valid_objs' + (wp: crunch_wps threadSet_sched_pointers getObject_tcb_wp getASID_wp + simp: crunch_simps obj_at'_def) + +crunches arch_switch_to_thread, arch_switch_to_idle_thread + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + and ready_qs_distinct[wp]: ready_qs_distinct + and valid_idle_new[wp]: valid_idle + (wp: ready_qs_distinct_lift simp: crunch_simps) + +lemma valid_queues_in_correct_ready_q[elim!]: + "valid_queues s \ in_correct_ready_q s" + by (clarsimp simp: valid_queues_def in_correct_ready_q_def) + +lemma valid_queues_ready_qs_distinct[elim!]: + "valid_queues s \ ready_qs_distinct s" + by (clarsimp simp: valid_queues_def ready_qs_distinct_def) lemma switchToThread_corres: "corres dc (valid_arch_state and valid_objs and valid_asid_map and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and valid_global_objs and unique_table_refs o caps_of_state - and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s))) - (valid_arch_state' and valid_pspace' and Invariants_H.valid_queues - and st_tcb_at' runnable' t and cur_tcb' and (\s. sym_refs (state_hyp_refs_of' s))) + and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s)) + and valid_queues and valid_idle) + (valid_arch_state' and no_0_obj' and sym_heap_sched_pointers and valid_objs' + and (\s. sym_refs (state_hyp_refs_of' s))) (switch_to_thread t) (switchToThread t)" - (is "corres _ ?PA ?PH _ _") - -proof - - have mainpart: "corres dc (?PA) (?PH) - (do y \ arch_switch_to_thread t; - y \ (tcb_sched_action tcb_sched_dequeue t); - modify (cur_thread_update (\_. t)) - od) - (do y \ Arch.switchToThread t; - y \ tcbSchedDequeue t; - setCurThread t - od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply add_ready_qs_runnable + apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) + apply (rule corres_symb_exec_l[OF _ _ get_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_l[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce dest!: state_relation_ready_queues_relation intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce apply (rule corres_guard_imp) apply (rule corres_split[OF arch_switchToThread_corres]) apply (rule corres_split[OF tcbSchedDequeue_corres setCurThread_corres]) - apply (wpsimp simp: tcb_at_is_etcb_at st_tcb_at_tcb_at)+ - done - - show ?thesis - apply - - apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) - apply (rule corres_symb_exec_l [where Q = "\ s rv. (?PA and (=) rv) s", - OF corres_symb_exec_l [OF mainpart]]) - apply (auto intro: no_fail_pre [OF no_fail_assert] - no_fail_pre [OF no_fail_get] - dest: st_tcb_at_tcb_at [THEN get_tcb_at] - | simp add: assert_def | wp)+ - done -qed + apply (wpsimp simp: is_tcb_def)+ + apply (fastforce intro!: st_tcb_at_tcb_at) + apply wpsimp + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + done lemma tcb_at_idle_thread_lift: assumes T: "\T' t. \typ_at T' t\ f \\rv. typ_at T' t\" @@ -769,12 +891,12 @@ lemma valid_vs_lookup_arm_current_vcpu_inv[simp]: "valid_vs_lookup (s\arc lemma vs_lookup_pages1_vcpu_update': "kheap s p = Some (ArchObj (VCPU x)) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ ArchObj (VCPU x'))\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ ArchObj (VCPU x'))\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) lemma vs_lookup_pages_vcpu_update': "kheap s y = Some (ArchObj (VCPU x)) \ - (ref \ p) s = (ref \ p) (s\kheap := kheap s(y \ ArchObj (VCPU x'))\)" + (ref \ p) s = (ref \ p) (s\kheap := (kheap s)(y \ ArchObj (VCPU x'))\)" by (clarsimp simp: vs_lookup_pages_def vs_lookup_pages1_vcpu_update') lemma tcb_at'_ksIdleThread_lift: @@ -792,7 +914,7 @@ lemma arch_switchToIdleThread_corres: arch_switch_to_idle_thread Arch.switchToIdleThread" unfolding arch_switch_to_idle_thread_def ARM_HYP_H.switchToIdleThread_def - apply (corressimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args] vcpuSwitch_corres[where vcpu=None, simplified] + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args] vcpuSwitch_corres[where vcpu=None, simplified] wp: tcb_at_idle_thread_lift tcb_at'_ksIdleThread_lift vcpuSwitch_it') apply (clarsimp simp: invs_valid_objs invs_arch_state invs_valid_asid_map invs_valid_vs_lookup invs_psp_aligned invs_distinct invs_unique_refs invs_vspace_objs) @@ -803,16 +925,24 @@ lemma arch_switchToIdleThread_corres: done lemma switchToIdleThread_corres: - "corres dc invs invs_no_cicd' switch_to_idle_thread switchToIdleThread" + "corres dc + (invs and valid_queues and valid_etcbs) + invs_no_cicd' + switch_to_idle_thread switchToIdleThread" apply (simp add: switch_to_idle_thread_def Thread_H.switchToIdleThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_ignore, fastforce) apply (rule corres_guard_imp) apply (rule corres_split[OF getIdleThread_corres]) apply (rule corres_split[OF arch_switchToIdleThread_corres]) - apply (unfold setCurThread_def) - apply (rule corres_trivial, rule corres_modify) - apply (simp add: state_relation_def cdt_relation_def) - apply (wp+, simp+) - apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def) + apply clarsimp + apply (rule setCurThread_corres) + apply wpsimp + apply (simp add: state_relation_def cdt_relation_def) + apply wpsimp+ + apply (simp add: invs_unique_refs invs_valid_vs_lookup invs_psp_aligned invs_distinct + invs_valid_idle) + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) done lemma gq_sp: "\P\ getQueue d p \\rv. P and (\s. ksReadyQueues s (d, p) = rv)\" @@ -843,11 +973,9 @@ proof - apply (simp add: setCurThread_def) apply wp apply (clarsimp simp add: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def - valid_state'_def Invariants_H.valid_queues_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def valid_queues'_def ct_not_inQ_ct - ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + valid_state'_def sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def ct_not_inQ_ct + ct_idle_or_in_cur_domain'_def bitmapQ_defs valid_bitmaps_def cong: option.case_cong) done qed @@ -861,101 +989,20 @@ lemma setCurThread_invs: by (rule hoare_pre, rule setCurThread_invs_no_cicd') (simp add: invs'_to_invs_no_cicd'_def) -lemma valid_queues_not_runnable_not_queued: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" -proof (rule ccontr) - assume "\ obj_at' (Not \ tcbQueued) t s" - moreover from st have "typ_at' TCBT t s" - by (rule pred_tcb_at' [THEN tcb_at_typ_at' [THEN iffD1]]) - ultimately have "obj_at' tcbQueued t s" - by (clarsimp simp: not_obj_at' comp_def) - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbPriority] - obtain p where tp: "obj_at' (\tcb. tcbPriority tcb = p) t s" - by clarsimp - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbDomain] - obtain d where td: "obj_at' (\tcb. tcbDomain tcb = d) t s" - by clarsimp - - ultimately - have "t \ set (ksReadyQueues s (d, p))" using vq' - unfolding valid_queues'_def - apply - - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (drule_tac x=t in spec) - apply (erule impE) - apply (fastforce simp add: inQ_def obj_at'_def) - apply (assumption) - done - - with vq have "st_tcb_at' runnable' t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply - - apply clarsimp - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp simp add: st_tcb_at'_def) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - - with st show False - apply - - apply (drule(1) pred_tcb_at_conj') - apply (clarsimp) - done -qed - -(* - * The idle thread is not part of any ready queues. - *) -lemma idle'_not_tcbQueued': - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and idle: "valid_idle' s" - shows "obj_at' (Not \ tcbQueued) (ksIdleThread s) s" - proof - - from idle have stidle: "st_tcb_at' (Not \ runnable') (ksIdleThread s) s" - by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def projectKOs idle_tcb'_def) - - with vq vq' show ?thesis - by (rule valid_queues_not_runnable_not_queued) - qed - lemma setCurThread_invs_no_cicd'_idle_thread: - "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" -proof - - have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" - apply (simp add: ct_not_inQ_def o_def) - done - have idle'_activatable': "\ s t. st_tcb_at' idle' t s \ st_tcb_at' activatable' t s" - apply (clarsimp simp: st_tcb_at'_def o_def obj_at'_def) + "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\_. invs'\" + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def valid_idle'_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def ct_not_inQ_def + valid_queues_def bitmapQ_defs valid_bitmaps_def pred_tcb_at'_def + cong: option.case_cong) + apply (clarsimp simp: idle_tcb'_def ct_not_inQ_def ps_clear_def obj_at'_def st_tcb_at'_def + idleThreadNotQueued_def) done - show ?thesis - apply (simp add: setCurThread_def) - apply wp - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) - apply (frule (2) idle'_not_tcbQueued'[simplified o_def]) - apply (clarsimp simp add: ct_not_inQ_ct idle'_activatable' - invs'_def cur_tcb'_def valid_state'_def valid_idle'_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def - ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def bitmapQ_defs valid_queues_no_bitmap_def valid_queues'_def - pred_tcb_at'_def - cong: option.case_cong) - apply (clarsimp simp: obj_at'_def projectKOs idle_tcb'_def) - done -qed lemma setCurThread_invs_idle_thread: "\invs' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" @@ -996,13 +1043,13 @@ lemma Arch_switchToThread_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_not_tcbQueued: - "\ tcb_at' t \ tcbSchedDequeue t \ \_. obj_at' (\x. \ tcbQueued x) t \" + "\\\ tcbSchedDequeue t \\_. obj_at' (\x. \ tcbQueued x) t\" apply (simp add: tcbSchedDequeue_def) apply (wp|clarsimp)+ apply (rule_tac Q="\queued. obj_at' (\x. tcbQueued x = queued) t" in hoare_post_imp) - apply (clarsimp simp: obj_at'_def) - apply (wp threadGet_obj_at') - apply (simp) + apply (clarsimp simp: obj_at'_def) + apply (wpsimp wp: threadGet_wp)+ + apply (clarsimp simp: obj_at'_def) done lemma asUser_obj_at[wp]: "\obj_at' (P \ tcbState) t\ @@ -1018,7 +1065,7 @@ lemma Arch_switchToThread_obj_at[wp]: Arch.switchToThread t \\rv. obj_at' (P \ tcbState) t\" apply (simp add: ARM_HYP_H.switchToThread_def) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ apply (rule doMachineOp_obj_at) apply (rule setVMRoot_obj_at'_no_vcpu) apply wpsimp+ @@ -1043,10 +1090,6 @@ crunch valid_irq_states'[wp]: asUser "valid_irq_states'" crunch valid_machine_state'[wp]: asUser "valid_machine_state'" (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "valid_queues'" -(wp: crunch_wps simp: crunch_simps) - - lemma asUser_valid_irq_node'[wp]: "\\s. valid_irq_node' (irq_node' s) s\ asUser t (setRegister f r) \\_ s. valid_irq_node' (irq_node' s) s\" @@ -1123,22 +1166,17 @@ lemma Arch_switchToThread_invs_no_cicd': lemma tcbSchedDequeue_invs_no_cicd'[wp]: - "\invs_no_cicd' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs_no_cicd'\" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + "tcbSchedDequeue t \invs_no_cicd'\" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues_weak untyped_ranges_zero_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp - apply (fastforce simp: valid_pspace'_def valid_queues_def - elim: valid_objs'_maxDomain valid_objs'_maxPriority intro: obj_at'_conjI) done lemma switchToThread_invs_no_cicd': - "\invs_no_cicd' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" + "\invs_no_cicd' and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def) apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued Arch_switchToThread_invs_no_cicd' Arch_switchToThread_pred_tcb') @@ -1146,7 +1184,7 @@ lemma switchToThread_invs_no_cicd': done lemma switchToThread_invs[wp]: - "\invs' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" + "\invs' and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def ) apply (wp threadSet_timeslice_invs setCurThread_invs Arch_switchToThread_invs dmo_invs' @@ -1189,8 +1227,7 @@ lemma dmo_cap_to'[wp]: lemma sct_cap_to'[wp]: "\ex_nonz_cap_to' p\ setCurThread t \\rv. ex_nonz_cap_to' p\" apply (simp add: setCurThread_def) - apply (wp ex_nonz_cap_to_pres') - apply (clarsimp elim!: cte_wp_at'_pspaceI)+ + apply (wpsimp wp: ex_nonz_cap_to_pres') done lemma setVCPU_cap_to'[wp]: @@ -1240,61 +1277,6 @@ lemma tcb_at_typ_at': done -lemma invs'_not_runnable_not_queued: - fixes s - assumes inv: "invs' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" - apply (insert assms) - apply (rule valid_queues_not_runnable_not_queued) - apply (clarsimp simp add: invs'_def valid_state'_def)+ - done - -lemma valid_queues_not_tcbQueued_not_ksQ: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and notq: "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" -proof (rule ccontr, simp , erule exE, erule exE) - fix d p - assume "t \ set (ksReadyQueues s (d, p))" - with vq have "obj_at' (inQ d p) t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply clarify - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (simp) - done - hence "obj_at' tcbQueued t s" - apply (rule obj_at'_weakenE) - apply (simp only: inQ_def) - done - with notq show "False" - by (clarsimp simp: obj_at'_def) -qed - -lemma not_tcbQueued_not_ksQ: - fixes s - assumes "invs' s" - and "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - apply (insert assms) - apply (clarsimp simp add: invs'_def valid_state'_def) - apply (drule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (clarsimp) - done - -lemma ct_not_ksQ: - "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ - \ \p. ksCurThread s \ set (ksReadyQueues s p)" - apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (fastforce) - done - lemma setThreadState_rct: "\\s. (runnable' st \ ksCurThread s \ t) \ ksSchedulerAction s = ResumeCurrentThread\ @@ -1302,21 +1284,21 @@ lemma setThreadState_rct: \\_ s. ksSchedulerAction s = ResumeCurrentThread\" apply (simp add: setThreadState_def) apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) apply (clarsimp simp: when_def) - apply (case_tac x) + apply (case_tac rv) apply (clarsimp, wp)[1] apply (clarsimp) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_ct threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ isRunnable_inv]) + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF gct_wp gct_wp]]) apply (rename_tac ct) @@ -1367,21 +1349,24 @@ lemma bitmapQ_from_bitmap_lookup: done lemma lookupBitmapPriority_obj_at': - "\ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0; valid_queues_no_bitmap s; valid_bitmapQ s; - bitmapQ_no_L1_orphans s\ - \ obj_at' (inQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) and runnable' \ tcbState) - (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + "\ksReadyQueuesL1Bitmap s d \ 0; valid_bitmapQ s; bitmapQ_no_L1_orphans s; + ksReadyQueues_asrt s; ready_qs_runnable s; pspace_aligned' s; pspace_distinct' s\ + \ obj_at' (inQ d (lookupBitmapPriority d s) and runnable' \ tcbState) + (the (tcbQueueHead (ksReadyQueues s (d, lookupBitmapPriority d s)))) s" apply (drule (2) bitmapQ_from_bitmap_lookup) apply (simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)", simp) - apply (clarsimp, rename_tac t ts) - apply (drule cons_set_intro) - apply (drule (2) valid_queues_no_bitmap_objD) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def tcbQueueEmpty_def) + apply (drule_tac x=d in spec) + apply (drule_tac x="lookupBitmapPriority d s" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (fastforce simp: obj_at'_and ready_qs_runnable_def obj_at'_def st_tcb_at'_def inQ_def + tcbQueueEmpty_def) done lemma bitmapL1_zero_ksReadyQueues: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ - \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. ksReadyQueues s (d,p) = [])" + \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. tcbQueueEmpty (ksReadyQueues s (d, p)))" apply (cases "ksReadyQueuesL1Bitmap s d = 0") apply (force simp add: bitmapQ_def valid_bitmapQ_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) @@ -1452,7 +1437,7 @@ lemma bitmapL1_highest_lookup: done lemma bitmapQ_ksReadyQueuesI: - "\ bitmapQ d p s ; valid_bitmapQ s \ \ ksReadyQueues s (d, p) \ []" + "\ bitmapQ d p s ; valid_bitmapQ s \ \ \ tcbQueueEmpty (ksReadyQueues s (d, p))" unfolding valid_bitmapQ_def by simp lemma getReadyQueuesL2Bitmap_inv[wp]: @@ -1461,24 +1446,22 @@ lemma getReadyQueuesL2Bitmap_inv[wp]: lemma switchToThread_lookupBitmapPriority_wp: "\\s. invs_no_cicd' s \ bitmapQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) s \ - t = hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)) \ + t = the (tcbQueueHead (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)))\ ThreadDecls_H.switchToThread t \\rv. invs'\" -proof - - have switchToThread_pre: - "\s p t.\ valid_queues s ; bitmapQ (ksCurDomain s) p s ; t = hd (ksReadyQueues s (ksCurDomain s,p)) \ - \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s" - unfolding valid_queues_def - apply (clarsimp dest!: bitmapQ_ksReadyQueuesI) - apply (case_tac "ksReadyQueues s (ksCurDomain s, p)", simp) - apply (rename_tac t ts) - apply (drule_tac t=t and p=p and d="ksCurDomain s" in valid_queues_no_bitmap_objD) - apply simp - apply (fastforce elim: obj_at'_weaken simp: inQ_def tcb_in_cur_domain'_def st_tcb_at'_def) - done - thus ?thesis - by (wp switchToThread_invs_no_cicd') (fastforce dest: invs_no_cicd'_queues) -qed + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd') + apply (auto elim!: pred_tcb'_weakenE) + apply (prop_tac "valid_bitmapQ s") + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_bitmaps_def) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def valid_bitmapQ_bitmapQ_simp) + apply (drule_tac x="ksCurDomain s" in spec) + apply (drule_tac x="lookupBitmapPriority (ksCurDomain s) s" in spec) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) + done lemma switchToIdleThread_invs_no_cicd': "\invs_no_cicd'\ switchToIdleThread \\rv. invs'\" @@ -1490,7 +1473,7 @@ lemma switchToIdleThread_invs_no_cicd': crunch obj_at'[wp]: "Arch.switchToIdleThread" "obj_at' (P :: ('a :: no_vcpu) \ bool) t" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -1550,11 +1533,6 @@ lemma corres_assert_ret: apply (simp add: assert_def return_def fail_def) done -lemma corres_assert_assume_l: - "corres dc P Q (f ()) g - \ corres dc (P and (\s. P')) Q (assert P' >>= f) g" - by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) - lemma corres_assert_assume_r: "corres dc P Q f (g ()) \ corres dc P (Q and (\s. Q')) f (assert Q' >>= g)" @@ -1584,9 +1562,9 @@ lemma guarded_switch_to_corres: and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and unique_table_refs o caps_of_state - and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s))) - (valid_arch_state' and valid_pspace' and Invariants_H.valid_queues - and st_tcb_at' runnable' t and cur_tcb' and (\s. sym_refs (state_hyp_refs_of' s))) + and st_tcb_at runnable t and valid_etcbs and valid_queues and valid_idle and (\s. sym_refs (state_hyp_refs_of s))) + (valid_arch_state' and valid_pspace' and sym_heap_sched_pointers + and (\s. sym_refs (state_hyp_refs_of' s))) (guarded_switch_to t) (switchToThread t)" apply (simp add: guarded_switch_to_def) apply (rule corres_guard_imp) @@ -1631,7 +1609,7 @@ lemma curDomain_corres: "corres (=) \ \ (gets cur_domain) (curDomain)" lemma curDomain_corres': "corres (=) \ (\s. ksCurDomain s \ maxDomain) - (gets cur_domain) (if 1 < numDomains then curDomain else return 0)" + (gets cur_domain) (if Suc 0 < numDomains then curDomain else return 0)" apply (case_tac "1 < numDomains"; simp) apply (rule corres_guard_imp[OF curDomain_corres]; solves simp) (* if we have only one domain, then we are in it *) @@ -1641,27 +1619,32 @@ lemma curDomain_corres': lemma lookupBitmapPriority_Max_eqI: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d \ 0 \ - \ lookupBitmapPriority d s = (Max {prio. ksReadyQueues s (d, prio) \ []})" + \ lookupBitmapPriority d s = (Max {prio. \ tcbQueueEmpty (ksReadyQueues s (d, prio))})" apply (rule Max_eqI[simplified eq_commute]; simp) apply (fastforce simp: bitmapL1_highest_lookup valid_bitmapQ_bitmapQ_simp) apply (metis valid_bitmapQ_bitmapQ_simp bitmapQ_from_bitmap_lookup) done lemma corres_gets_queues_getReadyQueuesL1Bitmap: - "corres (\qs l1. ((l1 = 0) = (\p. qs p = []))) \ valid_queues + "corres (\qs l1. (l1 = 0) = (\p. qs p = [])) \ valid_bitmaps (gets (\s. ready_queues s d)) (getReadyQueuesL1Bitmap d)" - unfolding state_relation_def valid_queues_def getReadyQueuesL1Bitmap_def - by (clarsimp simp: bitmapL1_zero_ksReadyQueues ready_queues_relation_def) + unfolding state_relation_def valid_bitmaps_def getReadyQueuesL1Bitmap_def + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x=d in spec) + apply (fastforce simp: bitmapL1_zero_ksReadyQueues list_queue_relation_def tcbQueueEmpty_def) + done lemma guarded_switch_to_chooseThread_fragment_corres: "corres dc (P and st_tcb_at runnable t and invs and valid_sched) - (P' and st_tcb_at' runnable' t and invs_no_cicd') - (guarded_switch_to t) - (do runnable \ isRunnable t; - y \ assert runnable; - ThreadDecls_H.switchToThread t - od)" + (P' and invs_no_cicd') + (guarded_switch_to t) + (do runnable \ isRunnable t; + y \ assert runnable; + ThreadDecls_H.switchToThread t + od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) unfolding guarded_switch_to_def isRunnable_def apply simp apply (rule corres_guard_imp) @@ -1676,35 +1659,50 @@ lemma guarded_switch_to_chooseThread_fragment_corres: simp: pred_tcb_at' runnable'_def all_invs_but_ct_idle_or_in_cur_domain'_def) done +lemma Max_prio_helper: + "ready_queues_relation s s' + \ Max {prio. ready_queues s d prio \ []} + = Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (d, prio))}" + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def tcbQueueEmpty_def) + apply (rule Max_eq_if) + apply fastforce + apply fastforce + apply (fastforce dest: heap_path_head) + apply clarsimp + apply (drule_tac x=d in spec) + apply (drule_tac x=b in spec) + apply force + done + lemma bitmap_lookup_queue_is_max_non_empty: - "\ valid_queues s'; (s, s') \ state_relation; invs s; + "\ valid_bitmaps s'; (s, s') \ state_relation; invs s; ksReadyQueuesL1Bitmap s' (ksCurDomain s') \ 0 \ - \ ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s') = - max_non_empty_queue (ready_queues s (cur_domain s))" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_queues_def - by (clarsimp simp add: max_non_empty_queue_def lookupBitmapPriority_Max_eqI - state_relation_def ready_queues_relation_def) + \ the (tcbQueueHead (ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s'))) + = hd (max_non_empty_queue (ready_queues s (cur_domain s)))" + apply (clarsimp simp: max_non_empty_queue_def valid_bitmaps_def lookupBitmapPriority_Max_eqI) + apply (frule curdomain_relation) + apply (drule state_relation_ready_queues_relation) + apply (simp add: Max_prio_helper) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (frule (2) bitmapL1_zero_ksReadyQueues[THEN arg_cong_Not, THEN iffD1]) + apply clarsimp + apply (cut_tac P="\x. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', x))" + in setcomp_Max_has_prop) + apply fastforce + apply (clarsimp simp: ready_queues_relation_def Let_def list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x="ksCurDomain s'" in spec) + apply (drule_tac x="Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', prio))}" + in spec) + using heap_path_head tcbQueueEmpty_def + by fastforce lemma ksReadyQueuesL1Bitmap_return_wp: "\\s. P (ksReadyQueuesL1Bitmap s d) s \ getReadyQueuesL1Bitmap d \\rv s. P rv s\" unfolding getReadyQueuesL1Bitmap_def by wp -lemma ksReadyQueuesL1Bitmap_st_tcb_at': - "\ ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0 ; valid_queues s \ - \ st_tcb_at' runnable' (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" - apply (drule bitmapQ_from_bitmap_lookup; clarsimp simp: valid_queues_def) - apply (clarsimp simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)") - apply simp - apply (simp add: valid_queues_no_bitmap_def) - apply (erule_tac x="ksCurDomain s" in allE) - apply (erule_tac x="lookupBitmapPriority (ksCurDomain s) s" in allE) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply simp - done - lemma curDomain_or_return_0: "\ \P\ curDomain \\rv s. Q rv s \; \s. P s \ ksCurDomain s \ maxDomain \ \ \P\ if 1 < numDomains then curDomain else return 0 \\rv s. Q rv s \" @@ -1716,52 +1714,72 @@ lemma invs_no_cicd_ksCurDomain_maxDomain': "invs_no_cicd' s \ ksCurDomain s \ maxDomain" unfolding invs_no_cicd'_def by simp +crunches curDomain + for valid_bitmaps[wp]: valid_bitmaps + lemma chooseThread_corres: - "corres dc (invs and valid_sched) (invs_no_cicd') - choose_thread chooseThread" (is "corres _ ?PREI ?PREH _ _") + "corres dc (invs and valid_sched) invs_no_cicd' choose_thread chooseThread" + (is "corres _ ?PREI ?PREH _ _") proof - + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + show ?thesis - unfolding choose_thread_def chooseThread_def - apply (simp only: return_bind Let_def) - apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) - apply (rule corres_guard_imp) - apply (rule corres_split[OF curDomain_corres']) - apply clarsimp - apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) - apply (erule corres_if2[OF sym]) - apply (rule switchToIdleThread_corres) - apply (rule corres_symb_exec_r) - apply (rule corres_symb_exec_r) - apply (rule_tac - P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) \ - st_tcb_at runnable (hd (max_non_empty_queue queues)) s" and - P'="\s. (?PREH s \ st_tcb_at' runnable' (hd queue) s) \ - l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) \ - l1 \ 0 \ - queue = ksReadyQueues s (ksCurDomain s, - lookupBitmapPriority (ksCurDomain s) s)" and - F="hd queue = hd (max_non_empty_queue queues)" in corres_req) - apply (fastforce dest!: invs_no_cicd'_queues simp: bitmap_lookup_queue_is_max_non_empty) - apply clarsimp - apply (rule corres_guard_imp) - apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) - apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ - apply (clarsimp simp: if_apply_def2) - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) - apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ - apply (fastforce simp: invs_no_cicd'_def) - apply (clarsimp simp: valid_sched_def DetSchedInvs_AI.valid_queues_def max_non_empty_queue_def) - apply (erule_tac x="cur_domain s" in allE) - apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) - apply (case_tac "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []})") - apply (clarsimp) - apply (subgoal_tac - "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []}) \ []") - apply (fastforce elim!: setcomp_Max_has_prop)+ - apply (simp add: invs_no_cicd_ksCurDomain_maxDomain') - apply (clarsimp dest!: invs_no_cicd'_queues) - apply (fastforce intro: ksReadyQueuesL1Bitmap_st_tcb_at') - done + supply if_split[split del] + apply (clarsimp simp: choose_thread_def chooseThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce + apply (simp only: return_bind Let_def) + apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres']) + apply clarsimp + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (erule corres_if2[OF sym]) + apply (rule switchToIdleThread_corres) + apply (rule corres_symb_exec_r) + apply (rule corres_symb_exec_r) + apply (rule_tac P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) + \ st_tcb_at runnable (hd (max_non_empty_queue queues)) s" + and P'="\s. ?PREH s \ l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) + \ l1 \ 0 + \ queue = ksReadyQueues s (ksCurDomain s, + lookupBitmapPriority (ksCurDomain s) s)" + and F="the (tcbQueueHead queue) = hd (max_non_empty_queue queues)" + in corres_req) + apply (fastforce simp: bitmap_lookup_queue_is_max_non_empty + all_invs_but_ct_idle_or_in_cur_domain'_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) + apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) + apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ + apply (clarsimp simp: valid_sched_def max_non_empty_queue_def valid_queues_def split: if_splits) + apply (erule_tac x="cur_domain s" in allE) + apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) + apply (case_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio + \ []})") + apply (clarsimp) + apply (subgoal_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio \ []}) + \ []") + apply fastforce + apply (fastforce elim!: setcomp_Max_has_prop) + apply fastforce + apply clarsimp + apply (frule invs_no_cicd_ksCurDomain_maxDomain') + apply (prop_tac "valid_bitmaps s") + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def) + apply (fastforce dest: one_domain_case split: if_splits) + done qed lemma thread_get_comm: "do x \ thread_get f p; y \ gets g; k x y od = @@ -1812,12 +1830,6 @@ lemma nextDomain_invs_no_cicd': all_invs_but_ct_idle_or_in_cur_domain'_def) done -lemma bind_dummy_ret_val: - "do y \ a; - b - od = do a; b od" - by simp - lemma scheduleChooseNewThread_fragment_corres: "corres dc (invs and valid_sched and (\s. scheduler_action s = choose_new_thread)) (invs' and (\s. ksSchedulerAction s = ChooseNewThread)) (do _ \ when (domainTime = 0) next_domain; @@ -1856,7 +1868,7 @@ lemma isHighestPrio_corres: assumes "d' = d" assumes "p' = p" shows - "corres ((=)) \ valid_queues + "corres ((=)) \ valid_bitmaps (gets (is_highest_prio d p)) (isHighestPrio d' p')" using assms @@ -1866,18 +1878,16 @@ lemma isHighestPrio_corres: apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) apply (rule corres_if_r'[where P'="\_. True",rotated]) apply (rule_tac corres_symb_exec_r) - apply (rule_tac - P="\s. q = ready_queues s d - " and - P'="\s. valid_queues s \ - l1 = ksReadyQueuesL1Bitmap s d \ - l1 \ 0 \ hprio = lookupBitmapPriority d s" and - F="hprio = Max {prio. q prio \ []}" in corres_req) - apply (elim conjE) - apply (clarsimp simp: valid_queues_def) - apply (subst lookupBitmapPriority_Max_eqI; blast?) - apply (fastforce simp: ready_queues_relation_def dest!: state_relationD) - apply fastforce + apply (rule_tac P="\s. q = ready_queues s d" + and P'="\s. valid_bitmaps s \ l1 = ksReadyQueuesL1Bitmap s d \ + l1 \ 0 \ hprio = lookupBitmapPriority d s" + and F="hprio = Max {prio. q prio \ []}" in corres_req) + apply (elim conjE) + apply (clarsimp simp: valid_bitmaps_def) + apply (subst lookupBitmapPriority_Max_eqI; blast?) + apply (fastforce dest: state_relation_ready_queues_relation Max_prio_helper[where d=d] + simp: tcbQueueEmpty_def) + apply fastforce apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imps ksReadyQueuesL1Bitmap_return_wp)+ done @@ -1889,9 +1899,8 @@ crunch inv[wp]: schedule_switch_thread_fastfail P crunch inv[wp]: scheduleSwitchThreadFastfail P lemma setSchedulerAction_invs': (* not in wp set, clobbered by ssa_wp *) - "\\s. invs' s \ setSchedulerAction ChooseNewThread \\_. invs' \" + "setSchedulerAction ChooseNewThread \invs' \" by (wpsimp simp: invs'_def cur_tcb'_def valid_state'_def valid_irq_node'_def ct_not_inQ_def - valid_queues_def valid_queues_no_bitmap_def valid_queues'_def ct_idle_or_in_cur_domain'_def) lemma scheduleChooseNewThread_corres: @@ -1927,6 +1936,46 @@ lemma tcb_sched_action_sym_refs_state_hyp_refs_of[wp]: "tcb_sched_action a b \\s. sym_refs (state_hyp_refs_of s)\" by (wpsimp simp: tcb_sched_action_def) +lemma tcb_sched_enqueue_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_enqueue t \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_enqueue_def set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_append_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_append tcb_ptr \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_append_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_enqueue_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_enqueue t \ready_qs_distinct\ " + unfolding tcb_sched_action_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma tcb_sched_append_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_append t \ready_qs_distinct\ " + unfolding tcb_sched_action_def tcb_sched_append_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +crunches set_scheduler_action + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps simp: in_correct_ready_q_def ready_qs_distinct_def) + +crunches reschedule_required + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (ignore: tcb_sched_action wp: crunch_wps ignore_del: reschedule_required) + lemma schedule_corres: "corres dc (invs and valid_sched and valid_list) invs' (Schedule_A.schedule) ThreadDecls_H.schedule" supply ethread_get_wp[wp del] @@ -1954,7 +2003,7 @@ lemma schedule_corres: apply (rule corres_split[OF thread_get_isRunnable_corres]) apply (rule corres_split[OF corres_when]) apply simp - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule scheduleChooseNewThread_corres, simp) apply (wp thread_get_wp' tcbSchedEnqueue_invs' hoare_vcg_conj_lift hoare_drop_imps | clarsimp)+ @@ -1963,7 +2012,7 @@ lemma schedule_corres: rename_tac was_running wasRunning) apply (rule corres_split[OF corres_when]) apply simp - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_split[OF getIdleThread_corres], rename_tac it it') apply (rule_tac F="was_running \ ct \ it" in corres_gen_asm) apply (rule corres_split[OF ethreadget_corres[where r="(=)"]]) @@ -1977,7 +2026,7 @@ lemma schedule_corres: apply (rule corres_split[OF curDomain_corres]) apply (rule corres_split[OF isHighestPrio_corres]; simp only:) apply (rule corres_if, simp) - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (simp, fold dc_def) apply (rule corres_split[OF setSchedulerAction_corres]) apply simp @@ -1993,7 +2042,7 @@ lemma schedule_corres: apply (rule corres_if, fastforce) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (simp, fold dc_def) apply (rule corres_split[OF setSchedulerAction_corres]) apply simp @@ -2025,7 +2074,8 @@ lemma schedule_corres: in hoare_post_imp, fastforce) apply (wp add: tcb_sched_action_enqueue_valid_blocked_except tcbSchedEnqueue_invs'_not_ResumeCurrentThread thread_get_wp - del: gets_wp)+ + del: gets_wp + | strengthen valid_objs'_valid_tcbs' invs_valid_pspace')+ apply (clarsimp simp: conj_ac if_apply_def2 cong: imp_cong conj_cong del: hoare_gets) apply (wp gets_wp)+ @@ -2048,14 +2098,13 @@ lemma schedule_corres: weak_valid_sched_action_def tcb_at_is_etcb_at tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]] valid_blocked_except_def valid_blocked_def invs_hyp_sym_refs) - apply (clarsimp simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) + apply (fastforce simp: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) done (* choose new thread case *) apply (intro impI conjI allI tcb_at_invs | fastforce simp: invs_def cur_tcb_def valid_etcbs_def valid_sched_def st_tcb_at_def obj_at_def valid_state_def weak_valid_sched_action_def not_cur_thread_def)+ - apply (simp add: valid_sched_def valid_blocked_def valid_blocked_except_def) done (* haskell final subgoal *) @@ -2073,11 +2122,8 @@ proof - apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def valid_queues'_def - tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + state_refs_of'_def ps_clear_def valid_irq_node'_def + tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def bitmapQ_defs cong: option.case_cong) done qed @@ -2112,12 +2158,13 @@ lemma getDomainTime_wp[wp]: "\\s. P (ksDomainTime s) s \ by wp lemma switchToThread_ct_not_queued_2: - "\invs_no_cicd' and tcb_at' t\ switchToThread t \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" - (is "\_\ _ \\_. ?POST\") + "\invs_no_cicd' and tcb_at' t\ + switchToThread t + \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" apply (simp add: Thread_H.switchToThread_def) apply (wp) apply (simp add: ARM_HYP_H.switchToThread_def setCurThread_def) - apply (wp tcbSchedDequeue_not_tcbQueued | simp )+ + apply (wp tcbSchedDequeue_not_tcbQueued hoare_drop_imp | simp)+ done lemma setCurThread_obj_at': @@ -2131,11 +2178,12 @@ proof - qed lemma switchToIdleThread_ct_not_queued_no_cicd': - "\ invs_no_cicd' \ switchToIdleThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" + "\invs_no_cicd'\ switchToIdleThread \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" apply (simp add: Thread_H.switchToIdleThread_def) apply (wp setCurThread_obj_at') - apply (rule idle'_not_tcbQueued') - apply (simp add: invs_no_cicd'_def)+ + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x="ksIdleThread s" in spec) + apply (clarsimp simp: invs_no_cicd'_def valid_idle'_def st_tcb_at'_def idle_tcb'_def obj_at'_def) done lemma switchToIdleThread_activatable_2[wp]: @@ -2152,7 +2200,7 @@ lemma switchToThread_tcb_in_cur_domain': ThreadDecls_H.switchToThread thread \\y s. tcb_in_cur_domain' (ksCurThread s) s\" apply (simp add: Thread_H.switchToThread_def setCurThread_def) - apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued) + apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued hoare_drop_imps) done lemma chooseThread_invs_no_cicd'_posts: (* generic version *) @@ -2174,11 +2222,15 @@ proof - by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) @@ -2192,12 +2244,10 @@ proof - apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv switchToThread_ct_not_queued_2 assert_inv hoare_disjI2 switchToThread_tcb_in_cur_domain') - apply clarsimp - apply (clarsimp dest!: invs_no_cicd'_queues - simp: valid_queues_def lookupBitmapPriority_def[symmetric]) - apply (drule (3) lookupBitmapPriority_obj_at') - apply normalise_obj_at' - apply (fastforce simp: tcb_in_cur_domain'_def inQ_def elim: obj_at'_weaken) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done qed @@ -2236,19 +2286,26 @@ proof - (* FIXME this is almost identical to the chooseThread_invs_no_cicd'_posts proof, can generalise? *) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) - apply (simp, wp (once) switchToIdleThread_invs_no_cicd', simp) + apply (simp, wp switchToIdleThread_invs_no_cicd', simp) (* we have a thread to switch to *) apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv) - apply (clarsimp dest!: invs_no_cicd'_queues simp: valid_queues_def) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (fastforce elim: bitmapQ_from_bitmap_lookup simp: lookupBitmapPriority_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done @@ -2275,7 +2332,7 @@ lemma schedule_invs': "\invs'\ ThreadDecls_H.schedule \\rv. invs'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2285,7 +2342,7 @@ lemma schedule_invs': apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 - | wp hoare_disjI2[where Q="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] | strengthen invs'_invs_no_cicd @@ -2356,7 +2413,7 @@ lemma schedule_ct_activatable'[wp]: "\invs'\ ThreadDecls_H.schedule \\_. ct_in_state' activatable'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2397,12 +2454,20 @@ lemma sbn_sch_act_sane: done lemma possibleSwitchTo_corres: - "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t) - (Invariants_H.valid_queues and valid_queues' and - (\s. weak_sch_act_wf (ksSchedulerAction s) s) and cur_tcb' and tcb_at' t and st_tcb_at' runnable' t and valid_objs') - (possible_switch_to t) - (possibleSwitchTo t)" + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t + and in_correct_ready_q and ready_qs_distinct and pspace_aligned and pspace_distinct) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers and valid_objs') + (possible_switch_to t) (possibleSwitchTo t)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) supply ethread_get_wp[wp del] + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (clarsimp simp: state_relation_def) + apply (rule tcb_at_cross, erule st_tcb_at_tcb_at; assumption) apply (simp add: possible_switch_to_def possibleSwitchTo_def cong: if_cong) apply (rule corres_guard_imp) apply (rule corres_split[OF curDomain_corres], simp) @@ -2411,21 +2476,21 @@ lemma possibleSwitchTo_corres: apply (clarsimp simp: etcb_relation_def) apply (rule corres_split[OF getSchedulerAction_corres]) apply (rule corres_if, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_if, simp) apply (case_tac action; simp) apply (rule corres_split[OF rescheduleRequired_corres]) - apply (rule tcbSchedEnqueue_corres) - apply (wp rescheduleRequired_valid_queues'_weak)+ + apply (rule tcbSchedEnqueue_corres, simp) + apply (wp reschedule_required_valid_queues | strengthen valid_objs'_valid_tcbs')+ apply (rule setSchedulerAction_corres, simp) apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imp[where f="ethread_get a b" for a b])+ apply (wp hoare_drop_imps)[1] apply wp+ - apply (fastforce simp: valid_sched_def invs_def valid_state_def cur_tcb_def + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def st_tcb_at_tcb_at valid_sched_action_def weak_valid_sched_action_def tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]]) - apply (simp add: tcb_at_is_etcb_at) + apply (fastforce simp: tcb_at_is_etcb_at) done end diff --git a/proof/refine/ARM_HYP/StateRelation.thy b/proof/refine/ARM_HYP/StateRelation.thy index 0587b987ed..458995c779 100644 --- a/proof/refine/ARM_HYP/StateRelation.thy +++ b/proof/refine/ARM_HYP/StateRelation.thy @@ -20,6 +20,10 @@ where lemmas cte_map_def' = cte_map_def[simplified cte_level_bits_def, simplified] +lemma cte_map_def2: + "cte_map \ \(oref, cref). oref + (of_bl cref << cte_level_bits)" + by (simp add: cte_map_def word_shift_by_n) + definition lookup_failure_map :: "ExceptionTypes_A.lookup_failure \ Fault_H.lookup_failure" where @@ -212,13 +216,20 @@ where \ tcb_bound_notification tcb = tcbBoundNotification tcb' \ tcb_mcpriority tcb = tcbMCP tcb'" +\ \ + A pair of objects @{term "(obj, obj')"} should satisfy the following relation when, under further + mild assumptions, a @{term corres_underlying} lemma for @{term "set_object obj"} + and @{term "setObject obj'"} can be stated: see setObject_other_corres in KHeap_R. + + TCBs do not satisfy this relation because the tcbSchedPrev and tcbSchedNext fields of a TCB are + used to model the ready queues, and so an update to such a field would correspond to an update + to a ready queue (see ready_queues_relation below).\ definition other_obj_relation :: "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" where "other_obj_relation obj obj' \ (case (obj, obj') of - (TCB tcb, KOTCB tcb') \ tcb_relation tcb tcb' - | (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' + (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' | (Notification ntfn, KONotification ntfn') \ ntfn_relation ntfn ntfn' | (ArchObj (ARM_A.ASIDPool pool), KOArch (KOASIDPool pool')) \ asid_pool_relation pool pool' @@ -301,6 +312,12 @@ where | "aobj_relation_cuts (ARM_A.VCPU v) x = {(x, other_obj_relation)}" +definition tcb_relation_cut :: "Structures_A.kernel_object \ kernel_object \ bool" where + "tcb_relation_cut obj obj' \ + case (obj, obj') of + (TCB t, KOTCB t') \ tcb_relation t t' + | _ \ False" + primrec obj_relation_cuts :: "Structures_A.kernel_object \ word32 \ obj_relation_cuts" where @@ -308,7 +325,7 @@ where (if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)})" -| "obj_relation_cuts (TCB tcb) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (TCB tcb) x = {(x, tcb_relation_cut)}" | "obj_relation_cuts (Endpoint ep) x = {(x, other_obj_relation)}" | "obj_relation_cuts (Notification ntfn) x = {(x, other_obj_relation)}" | "obj_relation_cuts (ArchObj ao) x = aobj_relation_cuts ao x" @@ -319,6 +336,7 @@ lemma obj_relation_cuts_def2: (case ko of CNode sz cs \ if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)} + | TCB tcb \ {(x, tcb_relation_cut)} | ArchObj (PageTable pt) \ (\y. (x + (ucast y << pte_bits), pte_relation y)) ` (UNIV :: (9 word) set) | ArchObj (PageDirectory pd) \ (\y. (x + (ucast y << pde_bits), pde_relation y)) @@ -333,6 +351,7 @@ lemma obj_relation_cuts_def3: "obj_relation_cuts ko x = (case (a_type ko) of ACapTable n \ {(cte_map (x, y), cte_relation y) | y. length y = n} + | ATCB \ {(x, tcb_relation_cut)} | AArch APageTable \ (\y. (x + (ucast y << pte_bits), pte_relation y)) ` (UNIV :: (9 word) set) | AArch APageDirectory \ (\y. (x + (ucast y << pde_bits), pde_relation y)) @@ -351,6 +370,7 @@ definition "is_other_obj_relation_type tp \ case tp of ACapTable n \ False + | ATCB \ False | AArch APageTable \ False | AArch APageDirectory \ False | AArch (AUserData _) \ False @@ -362,6 +382,10 @@ lemma is_other_obj_relation_type_CapTable: "\ is_other_obj_relation_type (ACapTable n)" by (simp add: is_other_obj_relation_type_def) +lemma is_other_obj_relation_type_TCB: + "\ is_other_obj_relation_type ATCB" + by (simp add: is_other_obj_relation_type_def) + lemma is_other_obj_relation_type_UserData: "\ is_other_obj_relation_type (AArch (AUserData sz))" unfolding is_other_obj_relation_type_def by simp @@ -409,11 +433,55 @@ where "sched_act_relation choose_new_thread a' = (a' = ChooseNewThread)" | "sched_act_relation (switch_thread x) a' = (a' = SwitchToThread x)" -definition - ready_queues_relation :: "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) - \ (domain \ priority \ KernelStateData_H.ready_queue) \ bool" -where - "ready_queues_relation qs qs' \ \d p. (qs d p = qs' (d, p))" +definition queue_end_valid :: "obj_ref list \ tcb_queue \ bool" where + "queue_end_valid ts q \ + (ts = [] \ tcbQueueEnd q = None) \ (ts \ [] \ tcbQueueEnd q = Some (last ts))" + +definition prev_queue_head :: "tcb_queue \ (obj_ref \ 'a) \ bool" where + "prev_queue_head q prevs \ \head. tcbQueueHead q = Some head \ prevs head = None" + +lemma prev_queue_head_heap_upd: + "\prev_queue_head q prevs; Some r \ tcbQueueHead q\ \ prev_queue_head q (prevs(r := x))" + by (clarsimp simp: prev_queue_head_def) + +definition list_queue_relation :: + "obj_ref list \ tcb_queue \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ bool" + where + "list_queue_relation ts q nexts prevs \ + heap_ls nexts (tcbQueueHead q) ts \ queue_end_valid ts q \ prev_queue_head q prevs" + +lemma list_queue_relation_nil: + "list_queue_relation ts q nexts prevs \ ts = [] \ tcbQueueEmpty q" + by (fastforce dest: heap_path_head simp: tcbQueueEmpty_def list_queue_relation_def) + +definition ready_queue_relation :: + "Deterministic_A.domain \ Structures_A.priority + \ Deterministic_A.ready_queue \ ready_queue + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (obj_ref \ bool) \ bool" + where + "ready_queue_relation d p q q' nexts prevs flag \ + list_queue_relation q q' nexts prevs + \ (\t. flag t \ t \ set q) + \ (d > maxDomain \ p > maxPriority \ tcbQueueEmpty q')" + +definition ready_queues_relation_2 :: + "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) + \ (domain \ priority \ ready_queue) + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (domain \ priority \ obj_ref \ bool) \ bool" + where + "ready_queues_relation_2 qs qs' nexts prevs inQs \ + \d p. let q = qs d p; q' = qs' (d, p); flag = inQs d p in + ready_queue_relation d p q q' nexts prevs flag" + +abbreviation ready_queues_relation :: "det_state \ kernel_state \ bool" where + "ready_queues_relation s s' \ + ready_queues_relation_2 + (ready_queues s) (ksReadyQueues s') (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (\d p. inQ d p |< tcbs_of' s')" + +lemmas ready_queues_relation_def = ready_queues_relation_2_def definition ghost_relation :: "Structures_A.kheap \ (word32 \ vmpage_size) \ (word32 \ nat) \ bool" @@ -488,6 +556,8 @@ lemma obj_relation_cutsE: \sz cs z cap cte. \ ko = CNode sz cs; well_formed_cnode_n sz cs; y = cte_map (x, z); ko' = KOCTE cte; cs z = Some cap; cap_relation cap (cteCap cte) \ \ R; + \tcb tcb'. \ y = x; ko = TCB tcb; ko' = KOTCB tcb'; tcb_relation tcb tcb' \ + \ R; \pt (z :: 9 word) pte'. \ ko = ArchObj (PageTable pt); y = x + (ucast z << pte_bits); ko' = KOArch (KOPTE pte'); pte_relation_aligned z (pt z) pte' \ \ R; @@ -498,13 +568,10 @@ lemma obj_relation_cutsE: y = x + n * 2 ^ pageBits; n < 2 ^ (pageBitsForSize sz - pageBits) \ \ R; \ y = x; other_obj_relation ko ko'; is_other_obj_relation_type (a_type ko) \ \ R \ \ R" - apply (simp add: obj_relation_cuts_def2 is_other_obj_relation_type_def - a_type_def - split: Structures_A.kernel_object.split_asm if_split_asm - ARM_A.arch_kernel_obj.split_asm) - apply ((clarsimp split: if_splits, - force simp: cte_relation_def pte_relation_def pde_relation_def)+)[5] - done + by (force simp: obj_relation_cuts_def2 is_other_obj_relation_type_def a_type_def + cte_relation_def pte_relation_def pde_relation_def tcb_relation_cut_def + split: Structures_A.kernel_object.splits kernel_object.splits if_splits + ARM_A.arch_kernel_obj.splits) lemma eq_trans_helper: "\ x = y; P y = Q \ \ P x = Q" @@ -580,7 +647,7 @@ where pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') - \ ready_queues_relation (ready_queues s) (ksReadyQueues s') + \ ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') @@ -602,6 +669,10 @@ lemma curthread_relation: "(a, b) \ state_relation \ ksCurThread b = cur_thread a" by (simp add: state_relation_def) +lemma curdomain_relation[elim!]: + "(s, s') \ state_relation \ cur_domain s = ksCurDomain s'" + by (clarsimp simp: state_relation_def) + lemma state_relation_pspace_relation[elim!]: "(s,s') \ state_relation \ pspace_relation (kheap s) (ksPSpace s')" by (simp add: state_relation_def) @@ -610,12 +681,24 @@ lemma state_relation_ekheap_relation[elim!]: "(s,s') \ state_relation \ ekheap_relation (ekheap s) (ksPSpace s')" by (simp add: state_relation_def) +lemma state_relation_sched_act_relation[elim!]: + "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" + by (clarsimp simp: state_relation_def) + +lemma state_relation_ready_queues_relation[elim!]: + "(s, s') \ state_relation \ ready_queues_relation s s'" + by (simp add: state_relation_def) + +lemma state_relation_idle_thread[elim!]: + "(s, s') \ state_relation \ idle_thread s = ksIdleThread s'" + by (clarsimp simp: state_relation_def) + lemma state_relationD: assumes sr: "(s, s') \ state_relation" shows "pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') \ - ready_queues_relation (ready_queues s) (ksReadyQueues s') \ + ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') \ @@ -637,7 +720,7 @@ lemma state_relationE [elim?]: and rl: "\pspace_relation (kheap s) (ksPSpace s'); ekheap_relation (ekheap s) (ksPSpace s'); sched_act_relation (scheduler_action s) (ksSchedulerAction s'); - ready_queues_relation (ready_queues s) (ksReadyQueues s'); + ready_queues_relation s s'; ghost_relation (kheap s) (gsUserPages s') (gsCNodes s'); cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s'); diff --git a/proof/refine/ARM_HYP/Syscall_R.thy b/proof/refine/ARM_HYP/Syscall_R.thy index 9139b3ae93..3a19f8427d 100644 --- a/proof/refine/ARM_HYP/Syscall_R.thy +++ b/proof/refine/ARM_HYP/Syscall_R.thy @@ -338,7 +338,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -351,16 +351,14 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: lemma setDomain_corres: "corres dc - (valid_etcbs and valid_sched and tcb_at tptr) - (invs' and sch_act_simple - and tcb_at' tptr and (\s. new_dom \ maxDomain)) - (set_domain tptr new_dom) - (setDomain tptr new_dom)" + (valid_etcbs and valid_sched and tcb_at tptr and pspace_aligned and pspace_distinct) + (invs' and sch_act_simple and tcb_at' tptr and (\s. new_dom \ maxDomain)) + (set_domain tptr new_dom) (setDomain tptr new_dom)" apply (rule corres_gen_asm2) apply (simp add: set_domain_def setDomain_def thread_set_domain_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split) apply (rule ethread_set_corres; simp) apply (clarsimp simp: etcb_relation_def) @@ -369,26 +367,38 @@ lemma setDomain_corres: apply (rule corres_split) apply clarsimp apply (rule corres_when[OF refl]) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_when[OF refl]) apply (rule rescheduleRequired_corres) - apply ((wp hoare_drop_imps hoare_vcg_conj_lift | clarsimp| assumption)+)[5] - apply clarsimp - apply (rule_tac Q="\_. valid_objs' and valid_queues' and valid_queues and - (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" - in hoare_strengthen_post[rotated]) - apply (auto simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def)[1] - apply (wp threadSet_valid_objs' threadSet_valid_queues'_no_state - threadSet_valid_queues_no_state - threadSet_pred_tcb_no_state | simp)+ - apply (rule_tac Q = "\r s. invs' s \ (\p. tptr \ set (ksReadyQueues s p)) \ sch_act_simple s - \ tcb_at' tptr s" in hoare_strengthen_post[rotated]) - apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) - apply (clarsimp simp:valid_tcb'_def) - apply (drule(1) bspec) - apply (clarsimp simp:tcb_cte_cases_def) + apply (wpsimp wp: hoare_drop_imps) + apply ((wpsimp wp: hoare_drop_imps | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: gts_wp) + apply wpsimp + apply ((wpsimp wp: hoare_vcg_imp_lift' ethread_set_not_queued_valid_queues hoare_vcg_all_lift + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply (rule_tac Q="\_. valid_objs' and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct' + and (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" + in hoare_strengthen_post[rotated]) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def) + apply (wpsimp wp: threadSet_valid_objs' threadSet_sched_pointers + threadSet_valid_sched_pointers)+ + apply (rule_tac Q="\_ s. valid_queues s \ not_queued tptr s + \ pspace_aligned s \ pspace_distinct s \ valid_etcbs s + \ weak_valid_sched_action s" + in hoare_post_imp) + apply (fastforce simp: pred_tcb_at_def obj_at_def) + apply (wpsimp wp: tcb_dequeue_not_queued) + apply (rule_tac Q = "\_ s. invs' s \ obj_at' (Not \ tcbQueued) tptr s \ sch_act_simple s + \ tcb_at' tptr s" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) + apply (clarsimp simp: valid_tcb'_def obj_at'_def) + apply (drule (1) bspec) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def) apply fastforce - apply (wp hoare_vcg_all_lift Tcb_R.tcbSchedDequeue_not_in_queue)+ + apply (wp hoare_vcg_all_lift tcbSchedDequeue_not_queued)+ apply clarsimp apply (frule tcb_at_is_etcb_at) apply simp+ @@ -400,7 +410,7 @@ lemma performInvocation_corres: "\ inv_relation i i'; call \ block \ \ corres (dc \ (=)) (einvs and valid_invocation i - and simple_sched_action + and schact_is_rct and ct_active and (\s. (\w w2 b c. i = Invocations_A.InvokeEndpoint w w2 b c) \ st_tcb_at simple (cur_thread s) s)) (invs' and sch_act_simple and valid_invocation' i' and ct_active' and (\s. vs_valid_duplicates' (ksPSpace s))) @@ -450,14 +460,14 @@ lemma performInvocation_corres: apply (clarsimp simp: liftME_def) apply (rule corres_guard_imp) apply (erule invokeTCB_corres) - apply (simp)+ + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] \ \domain cap\ apply (clarsimp simp: invoke_domain_def) apply (rule corres_guard_imp) apply (rule corres_split[OF setDomain_corres]) apply (rule corres_trivial, simp) apply (wp)+ - apply (clarsimp+)[2] + apply ((clarsimp simp: invs_psp_aligned invs_distinct)+)[2] \ \CNodes\ apply clarsimp apply (rule corres_guard_imp) @@ -465,7 +475,7 @@ lemma performInvocation_corres: apply assumption apply (rule corres_trivial, simp add: returnOk_def) apply wp+ - apply (clarsimp+)[2] + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) apply (rule corres_guard_imp, rule performIRQControl_corres, simp+) apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) @@ -689,7 +699,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -770,90 +780,71 @@ lemma doReply_invs[wp]: "\tcb_at' t and tcb_at' t' and cte_wp_at' (\cte. \grant. cteCap cte = ReplyCap t False grant) slot and invs' and sch_act_simple\ - doReplyTransfer t' t slot grant - \\rv. invs'\" + doReplyTransfer t' t slot grant + \\_. invs'\" apply (simp add: doReplyTransfer_def liftM_def) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_seq_ext [OF _ assert_sp]) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ gts_sp']) + apply (rule bind_wp [OF _ assert_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp, wpc) - apply (wp) + apply wp apply (wp (once) sts_invs_minor'') - apply (simp) + apply simp apply (wp (once) sts_st_tcb') - apply (wp)[1] - apply (rule_tac Q="\rv s. invs' s - \ t \ ksIdleThread s - \ st_tcb_at' awaiting_reply' t s" + apply wp + apply (rule_tac Q="\_ s. invs' s \ t \ ksIdleThread s \ st_tcb_at' awaiting_reply' t s" in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) apply (wp cteDeleteOne_reply_pred_tcb_at)+ - apply (clarsimp) + apply clarsimp apply (rule_tac Q="\_. (\s. t \ ksIdleThread s) - and cte_wp_at' (\cte. \grant. cteCap cte = capability.ReplyCap t False grant) slot" - in hoare_strengthen_post [rotated]) + and cte_wp_at' (\cte. \grant. cteCap cte + = capability.ReplyCap t False grant) slot" + in hoare_strengthen_post [rotated]) apply (fastforce simp: cte_wp_at'_def) - apply (wp) + apply wp apply (rule hoare_strengthen_post [OF doIPCTransfer_non_null_cte_wp_at']) apply (erule conjE) apply assumption apply (erule cte_wp_at_weakenE') apply (fastforce) - apply (wp sts_invs_minor'' sts_st_tcb' static_imp_wp) - apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s - \ st_tcb_at' awaiting_reply' t s - \ t \ ksIdleThread s" - in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) + apply (rule_tac Q="\_ s. invs' s \ sch_act_simple s + \ st_tcb_at' awaiting_reply' t s + \ t \ ksIdleThread s" + in hoare_post_imp) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" - in pred_tcb'_weakenE) + in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) - apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 static_imp_wp + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp | clarsimp simp add: inQ_def)+ apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and st_tcb_at' awaiting_reply' t" in hoare_strengthen_post [rotated]) - apply (clarsimp) + apply clarsimp apply (rule conjI) - apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) - apply (rule conjI) - apply clarsimp - apply (clarsimp simp: obj_at'_def idle_tcb'_def pred_tcb_at'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def + idle_tcb'_def pred_tcb_at'_def) apply clarsimp apply (rule conjI) apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) apply (erule pred_tcb'_weakenE, clarsimp) - apply (rule conjI) apply (clarsimp simp : invs'_def valid_state'_def valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) - apply (rule conjI) - apply clarsimp - apply (frule invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, clarsimp) - apply (frule (1) not_tcbQueued_not_ksQ) - apply simp - apply clarsimp apply (wp cteDeleteOne_reply_pred_tcb_at hoare_drop_imp hoare_allI)+ apply (clarsimp simp add: isReply_awaiting_reply' cte_wp_at_ctes_of) apply (auto dest!: st_tcb_idle'[rotated] simp:isCap_simps) @@ -863,35 +854,9 @@ lemma ct_active_runnable' [simp]: "ct_active' s \ ct_in_state' runnable' s" by (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) -lemma valid_irq_node_tcbSchedEnqueue[wp]: - "\\s. valid_irq_node' (irq_node' s) s \ tcbSchedEnqueue ptr - \\rv s'. valid_irq_node' (irq_node' s') s'\" - apply (rule hoare_pre) - apply (simp add:valid_irq_node'_def ) - apply (wp hoare_unless_wp hoare_vcg_all_lift | wps)+ - apply (simp add:tcbSchedEnqueue_def) - apply (wp hoare_unless_wp| simp)+ - apply (simp add:valid_irq_node'_def) - done - -lemma rescheduleRequired_valid_queues_but_ct_domain: - "\\s. Invariants_H.valid_queues s \ valid_objs' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) \ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - done - -lemma rescheduleRequired_valid_queues'_but_ct_domain: - "\\s. valid_queues' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) - \ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def)+ - done +crunches tcbSchedEnqueue + for valid_irq_node[wp]: "\s. valid_irq_node' (irq_node' s) s" + (rule: valid_irq_node_lift) lemma tcbSchedEnqueue_valid_action: "\\s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\ @@ -902,10 +867,11 @@ lemma tcbSchedEnqueue_valid_action: done abbreviation (input) "all_invs_but_sch_extra \ - \s. valid_pspace' s \ Invariants_H.valid_queues s \ + \s. valid_pspace' s \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) \ if_live_then_nonz_cap' s \ + sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ @@ -917,7 +883,6 @@ abbreviation (input) "all_invs_but_sch_extra \ valid_machine_state' s \ cur_tcb' s \ untyped_ranges_zero' s \ - valid_queues' s \ valid_pde_mappings' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s)" @@ -930,15 +895,13 @@ lemma rescheduleRequired_all_invs_but_extra: apply (rule hoare_pre) apply (wp add:rescheduleRequired_ct_not_inQ rescheduleRequired_sch_act' - rescheduleRequired_valid_queues_but_ct_domain - rescheduleRequired_valid_queues'_but_ct_domain valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift cur_tcb_lift) apply auto done lemma threadSet_all_invs_but_sch_extra: - shows "\ tcb_at' t and (\s. (\p. t \ set (ksReadyQueues s p))) and + shows "\ tcb_at' t and all_invs_but_sch_extra and sch_act_simple and K (ds \ maxDomain) \ threadSet (tcbDomain_update (\_. ds)) t @@ -959,13 +922,11 @@ lemma threadSet_all_invs_but_sch_extra: valid_irq_handlers_lift'' threadSet_ctes_ofT threadSet_not_inQ - threadSet_valid_queues'_no_state threadSet_tcbDomain_update_ct_idle_or_in_cur_domain' - threadSet_valid_queues threadSet_valid_dom_schedule' threadSet_iflive'T threadSet_ifunsafe'T - untyped_ranges_zero_lift + untyped_ranges_zero_lift threadSet_sched_pointers threadSet_valid_sched_pointers | simp add:tcb_cte_cases_def cteCaps_of_def o_def)+ apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift threadSet_pred_tcb_no_state | simp)+ apply (clarsimp simp:sch_act_simple_def o_def cteCaps_of_def) @@ -987,21 +948,19 @@ lemma setDomain_invs': (\y. domain \ maxDomain))\ setDomain ptr domain \\y. invs'\" apply (simp add:setDomain_def ) - apply (wp add: hoare_when_wp static_imp_wp static_imp_conj_wp rescheduleRequired_all_invs_but_extra + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" in hoare_strengthen_post[rotated]) apply (clarsimp simp:invs'_def valid_state'_def st_tcb_at'_def[symmetric] valid_pspace'_def) - apply (erule st_tcb_ex_cap'') apply simp - apply (case_tac st,simp_all)[1] apply (rule hoare_strengthen_post[OF hoare_vcg_conj_lift]) apply (rule threadSet_all_invs_but_sch_extra) prefer 2 apply clarsimp apply assumption - apply (wp static_imp_wp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain threadSet_tcbDomain_update_ct_not_inQ | simp)+ apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s \ domain \ maxDomain @@ -1013,17 +972,14 @@ lemma setDomain_invs': done lemma performInv_invs'[wp]: - "\invs' and sch_act_simple - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) - and ct_active' and valid_invocation' i\ - RetypeDecls_H.performInvocation block call i \\rv. invs'\" + "\invs' and sch_act_simple and ct_active' and valid_invocation' i\ + RetypeDecls_H.performInvocation block call i + \\_. invs'\" unfolding performInvocation_def apply (cases i) - apply ((clarsimp simp: simple_sane_strg sch_act_simple_def - ct_not_ksQ sch_act_sane_def - | wp tcbinv_invs' arch_performInvocation_invs' - setDomain_invs' - | rule conjI | erule active_ex_cap')+) + apply (clarsimp simp: simple_sane_strg sch_act_simple_def sch_act_sane_def + | wp tcbinv_invs' arch_performInvocation_invs' setDomain_invs' + | rule conjI | erule active_ex_cap')+ done lemma getSlotCap_to_refs[wp]: @@ -1105,7 +1061,7 @@ lemma lookupExtras_real_ctes[wp]: lemma lookupExtras_ctes[wp]: "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. cte_at' (snd x) s\,-" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookupExtras_real_ctes) apply (simp add: real_cte_at') done @@ -1211,18 +1167,21 @@ crunch valid_duplicates'[wp]: addToBitmap "\s. vs_valid_duplicates' (ksP lemma tcbSchedEnqueue_valid_duplicates'[wp]: "\\s. vs_valid_duplicates' (ksPSpace s)\ tcbSchedEnqueue a \\rv s. vs_valid_duplicates' (ksPSpace s)\" - by (simp add: tcbSchedEnqueue_def unless_def setQueue_def | wp | wpc)+ + by (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def setQueue_def | wp | wpc)+ crunch valid_duplicates'[wp]: rescheduleRequired "\s. vs_valid_duplicates' (ksPSpace s)" (wp: setObject_ksInterrupt updateObject_default_inv) crunch valid_duplicates'[wp]: setThreadState "\s. vs_valid_duplicates' (ksPSpace s)" -(*FIXME: move to NonDetMonadVCG.valid_validE_R *) +crunches reply_from_kernel + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + lemma handleInvocation_corres: "c \ b \ corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_invocation c b) @@ -1265,21 +1224,17 @@ lemma handleInvocation_corres: apply simp apply (simp add: when_def) apply (rule conjI, rule impI) - apply (rule reply_from_kernel_tcb_at) + apply (wp reply_from_kernel_tcb_at) apply (rule impI, wp+) - apply simp+ - apply (wp hoare_drop_imps)+ - apply simp - apply wp - apply simp - apply (rule_tac Q="\rv. einvs and simple_sched_action and valid_invocation rve + apply (wpsimp wp: hoare_drop_imps|strengthen invs_distinct invs_psp_aligned)+ + apply (rule_tac Q="\rv. einvs and schact_is_rct and valid_invocation rve and (\s. thread = cur_thread s) and st_tcb_at active thread" in hoare_post_imp) apply (clarsimp simp: simple_from_active ct_in_state_def elim!: st_tcb_weakenE) - apply (wp sts_st_tcb_at' set_thread_state_simple_sched_action - set_thread_state_active_valid_sched) + apply (wp sts_st_tcb_at' set_thread_state_schact_is_rct + set_thread_state_active_valid_sched) apply (rule_tac Q="\rv. invs' and valid_invocation' rve' and (\s. thread = ksCurThread s) and st_tcb_at' active' thread @@ -1287,7 +1242,6 @@ lemma handleInvocation_corres: and (\s. vs_valid_duplicates' (ksPSpace s))" in hoare_post_imp) apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (clarsimp) apply (wp setThreadState_nonqueued_state_update setThreadState_st_tcb setThreadState_rct)[1] @@ -1297,7 +1251,8 @@ lemma handleInvocation_corres: | rule hoare_vcg_E_elim)+ apply (clarsimp simp: tcb_at_invs invs_valid_objs valid_tcb_state_def ct_in_state_def - simple_from_active invs_mdb) + simple_from_active invs_mdb + invs_distinct invs_psp_aligned) apply (clarsimp simp: msg_max_length_def word_bits_def) apply (erule st_tcb_ex_cap, clarsimp+) apply fastforce @@ -1305,7 +1260,6 @@ lemma handleInvocation_corres: apply (frule tcb_at_invs') apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) apply (frule pred_tcb'_weakenE [where P=active' and P'=simple'], clarsimp) apply (frule(1) st_tcb_ex_cap'', fastforce) apply (clarsimp simp: valid_pspace'_def) @@ -1349,10 +1303,10 @@ lemma hinv_invs'[wp]: apply (simp add: handleInvocation_def split_def ts_Restart_case_helper') apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply (simp add: if_apply_def2) apply (wp gts_imp' | simp)+ - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R[rotated]) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R[rotated]) apply clarsimp apply (subgoal_tac "thread \ ksIdleThread s", simp_all)[1] apply (fastforce elim!: pred_tcb'_weakenE st_tcb_ex_cap'') @@ -1361,10 +1315,7 @@ lemma hinv_invs'[wp]: apply wp+ apply (wp sts_invs_minor' setThreadState_st_tcb setThreadState_rct ct_in_state_thread_state_lift' sts_st_tcb_at'_cases - | clarsimp - | strengthen ct_not_ksQ[rule_format] - )+ - apply (frule(1) ct_not_ksQ) + | clarsimp)+ apply (simp add: conj_comms) apply (fastforce simp add: tcb_at_invs' ct_in_state'_def simple_sane_strg @@ -1374,12 +1325,13 @@ lemma hinv_invs'[wp]: done crunch typ_at'[wp]: handleFault "\s. P (typ_at' T p s)" + (wp: crunch_wps) lemmas handleFault_typ_ats[wp] = typ_at_lifts [OF handleFault_typ_at'] lemma handleSend_corres: "corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_send blocking) (handleSend blocking)" @@ -1468,7 +1420,7 @@ lemma cteDeleteOne_reply_cap_to''[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte) \ isNullCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -1512,7 +1464,6 @@ lemma handleRecv_isBlocking_corres': and (\s. ex_nonz_cap_to (cur_thread s) s)) (invs' and ct_in_state' simple' and sch_act_sane - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ex_nonz_cap_to' (ksCurThread s) s)) (handle_recv isBlocking) (handleRecv isBlocking)" (is "corres dc (?pre1) (?pre2) (handle_recv _) (handleRecv _)") @@ -1575,8 +1526,7 @@ lemma handleRecv_isBlocking_corres': lemma handleRecv_isBlocking_corres: "corres dc (einvs and ct_active) - (invs' and ct_active' and sch_act_sane and - (\s. \p. ksCurThread s \ set (ksReadyQueues s p))) + (invs' and ct_active' and sch_act_sane) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp) apply (rule handleRecv_isBlocking_corres') @@ -1591,49 +1541,34 @@ lemma lookupCap_refs[wp]: "\invs'\ lookupCap t ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" by (simp add: lookupCap_def split_def | wp | simp add: o_def)+ -lemma deleteCallerCap_ksQ_ct': - "\invs' and ct_in_state' simple' and sch_act_sane and - (\s. ksCurThread s \ set (ksReadyQueues s p) \ thread = ksCurThread s)\ - deleteCallerCap thread - \\rv s. thread \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv s. thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p)" - in hoare_strengthen_post) - apply (wp deleteCallerCap_ct_not_ksQ) - apply auto - done - lemma hw_invs'[wp]: "\invs' and ct_in_state' simple' and sch_act_sane and (\s. ex_nonz_cap_to' (ksCurThread s) s) - and (\s. ksCurThread s \ ksIdleThread s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ + and (\s. ksCurThread s \ ksIdleThread s)\ handleRecv isBlocking \\r. invs'\" apply (simp add: handleRecv_def cong: if_cong) apply (rule hoare_pre) apply ((wp getNotification_wp | wpc | simp)+)[1] apply (clarsimp simp: ct_in_state'_def) apply ((wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp)+)[1] apply simp apply (wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp add: ct_in_state'_def whenE_def split del: if_split)+ apply (rule validE_validE_R) apply (rule_tac Q="\rv s. invs' s \ sch_act_sane s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)) \ thread = ksCurThread s \ ct_in_state' simple' s \ ex_nonz_cap_to' thread s \ thread \ ksIdleThread s \ (\x \ zobj_refs' rv. ex_nonz_cap_to' x s)" and E="\_ _. True" - in hoare_post_impErr[rotated]) + in hoare_strengthen_postE[rotated]) apply (clarsimp simp: isCap_simps ct_in_state'_def pred_tcb_at' invs_valid_objs' sch_act_sane_not obj_at'_def projectKOs pred_tcb_at'_def) apply (assumption) @@ -1650,34 +1585,45 @@ lemma setSchedulerAction_obj_at'[wp]: by (wp, clarsimp elim!: obj_at'_pspaceI) lemma handleYield_corres: - "corres dc einvs (invs' and ct_active' and (\s. ksSchedulerAction s = ResumeCurrentThread)) handle_yield handleYield" + "corres dc + (einvs and ct_active) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + handle_yield handleYield" apply (clarsimp simp: handle_yield_def handleYield_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp - apply (rule corres_split[OF tcbSchedDequeue_corres]) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues | simp add: )+ - apply (simp add: invs_def valid_sched_def valid_sched_action_def - cur_tcb_def tcb_at_is_etcb_at) - apply clarsimp - apply (frule ct_active_runnable') - apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def + apply (wpsimp wp: weak_sch_act_wf_lift_linear + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+ + apply (simp add: invs_def valid_sched_def valid_sched_action_def cur_tcb_def + tcb_at_is_etcb_at valid_state_def valid_pspace_def ct_in_state_def + runnable_eq_active) + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def) - apply (erule(1) valid_objs_valid_tcbE[OF valid_pspace_valid_objs']) - apply (simp add:valid_tcb'_def) + done + +lemma tcbSchedAppend_ct_in_state'[wp]: + "tcbSchedAppend t \ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) done lemma hy_invs': "\invs' and ct_active'\ handleYield \\r. invs' and ct_active'\" apply (simp add: handleYield_def) - apply (wp ct_in_state_thread_state_lift' - rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' | simp)+ - apply (clarsimp simp add: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def - valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def - ) + apply (wpsimp wp: ct_in_state_thread_state_lift' rescheduleRequired_all_invs_but_ct_not_inQ) + apply (rule_tac Q="\_. all_invs_but_ct_not_inQ' and ct_active'" in hoare_post_imp) + apply clarsimp + apply (subst pred_conj_def) + apply (rule hoare_vcg_conj_lift) + apply (rule tcbSchedAppend_all_invs_but_ct_not_inQ') + apply wpsimp + apply wpsimp + apply wpsimp apply (simp add:ct_active_runnable'[unfolded ct_in_state'_def]) done @@ -1767,7 +1713,7 @@ lemmas cteDeleteOne_st_tcb_at_simple'[wp] = cteDeleteOne_st_tcb_at[where P=simple', simplified] crunch st_tcb_at_simple'[wp]: handleReply "st_tcb_at' simple' t'" - (wp: hoare_post_taut crunch_wps sts_st_tcb_at'_cases + (wp: hoare_TrueI crunch_wps sts_st_tcb_at'_cases threadSet_pred_tcb_no_state ignore: setThreadState) @@ -1793,18 +1739,17 @@ lemma hr_ct_active'[wp]: "\invs' and ct_active'\ handleReply \\rv. ct_active'\" apply (simp add: handleReply_def getSlotCap_def getCurThread_def getThreadCallerSlot_def locateSlot_conv) - apply (rule hoare_seq_ext) - apply (rule ct_in_state'_decomp) - apply ((wp hoare_drop_imps | wpc | simp)+)[1] - apply (subst haskell_assert_def) - apply (wp hoare_vcg_all_lift getCTE_wp doReplyTransfer_st_tcb_at_active - | wpc | simp)+ + apply (rule bind_wp, rename_tac cur_thread) + apply (rule_tac t=cur_thread in ct_in_state'_decomp) + apply (wpsimp wp: getCTE_wp) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (wpsimp wp: getCTE_wp doReplyTransfer_st_tcb_at_active)+ apply (fastforce simp: ct_in_state'_def cte_wp_at_ctes_of valid_cap'_def - dest: ctes_of_valid') + dest: ctes_of_valid') done lemma handleCall_corres: - "corres (dc \ dc) (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + "corres (dc \ dc) (einvs and schact_is_rct and ct_active) (invs' and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') @@ -1866,7 +1811,7 @@ lemma handleReply_sane: "\sch_act_sane\ handleReply \\rv. sch_act_sane\" apply (simp add: handleReply_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) apply (rule hoare_pre) - apply (wp haskell_assert_wp doReplyTransfer_sane getCTE_wp'| wpc)+ + apply (wp doReplyTransfer_sane getCTE_wp'| wpc)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -1882,74 +1827,6 @@ lemma handleReply_nonz_cap_to_ct: crunch ksQ[wp]: handleFaultReply "\s. P (ksReadyQueues s p)" -lemma doReplyTransfer_ct_not_ksQ: - "\ invs' and sch_act_simple - and tcb_at' thread and tcb_at' word - and ct_in_state' simple' - and (\s. ksCurThread s \ word) - and (\s. \p. ksCurThread s \ set(ksReadyQueues s p))\ - doReplyTransfer thread word callerSlot g - \\rv s. \p. ksCurThread s \ set(ksReadyQueues s p)\" -proof - - have astct: "\t p. - \(\s. ksCurThread s \ set(ksReadyQueues s p) \ sch_act_sane s) - and (\s. ksCurThread s \ t)\ - possibleSwitchTo t \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps possibleSwitchTo_ct') - apply (wp possibleSwitchTo_ksQ') - apply (clarsimp simp: sch_act_sane_def) - done - have stsct: "\t st p. - \(\s. ksCurThread s \ set(ksReadyQueues s p)) and sch_act_simple\ - setThreadState st t - \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps setThreadState_ct') - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - show ?thesis - apply (simp add: doReplyTransfer_def) - apply (wp, wpc) - apply (wp astct stsct hoare_vcg_all_lift - cteDeleteOne_ct_not_ksQ hoare_drop_imp - hoare_lift_Pf2 [OF cteDeleteOne_sch_act_not cteDeleteOne_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_pred_tcb_at' doIPCTransfer_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_ksQ doIPCTransfer_ct'] - hoare_lift_Pf2 [OF threadSet_ksQ threadSet_ct] - hoare_lift_Pf2 [OF handleFaultReply_ksQ handleFaultReply_ct'] - | simp add: ct_in_state'_def)+ - apply (fastforce simp: sch_act_simple_def sch_act_sane_def ct_in_state'_def)+ - done -qed - -lemma handleReply_ct_not_ksQ: - "\invs' and sch_act_simple - and ct_in_state' simple' - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ - handleReply - \\rv s. \p. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: handleReply_def del: split_paired_All) - apply (subst haskell_assert_def) - apply (wp | wpc)+ - apply (wp doReplyTransfer_ct_not_ksQ getThreadCallerSlot_inv)+ - apply (rule_tac Q="\cap. - (\s. \p. ksCurThread s \ set(ksReadyQueues s p)) - and invs' - and sch_act_simple - and (\s. thread = ksCurThread s) - and tcb_at' thread - and ct_in_state' simple' - and cte_wp_at' (\c. cteCap c = cap) callerSlot" - in hoare_post_imp) - apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def - cte_wp_at_ctes_of valid_cap'_def - dest!: ctes_of_valid') - apply (wp getSlotCap_cte_wp_at getThreadCallerSlot_inv)+ - apply (clarsimp) - done - crunch valid_etcbs[wp]: possible_switch_to "valid_etcbs" crunch valid_etcbs[wp]: handle_recv "valid_etcbs" (wp: crunch_wps simp: crunch_simps) @@ -1963,23 +1840,21 @@ lemma handleReply_handleRecv_corres: apply (rule corres_split_nor[OF handleReply_corres]) apply (rule handleRecv_isBlocking_corres') apply (wp handle_reply_nonz_cap_to_ct handleReply_sane - handleReply_nonz_cap_to_ct handleReply_ct_not_ksQ handle_reply_valid_sched)+ + handleReply_nonz_cap_to_ct handle_reply_valid_sched)+ apply (fastforce simp: ct_in_state_def ct_in_state'_def simple_sane_strg elim!: st_tcb_weakenE st_tcb_ex_cap') apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (fastforce elim: pred_tcb'_weakenE) done lemma handleHypervisorFault_corres: "corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_hypervisor_fault thread fault) (handleHypervisorFault thread fault)" apply (cases fault; clarsimp simp add: handleHypervisorFault_def returnOk_def2) - apply (corres corres: handleFault_corres) + apply (corresK corres: handleFault_corres) apply (simp add: ucast_id) apply (clarsimp simp: valid_fault_def) done @@ -2000,19 +1875,18 @@ lemma hvmf_invs_lift: doMachineOp_bind getRestartPC_def getRegister_def) lemma hvmf_invs_etc: - "\invs' and sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p)) and st_tcb_at' simple' t and + "\invs' and sch_act_not t and st_tcb_at' simple' t and ex_nonz_cap_to' t\ handleVMFault t f \\_ _. True\, - \\_. invs' and sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p)) and - st_tcb_at' simple' t and ex_nonz_cap_to' t\" + \\_. invs' and sch_act_not t and st_tcb_at' simple' t and ex_nonz_cap_to' t\" apply (rule hvmf_invs_lift) apply (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def) done lemma handleEvent_corres: "corres (dc \ dc) (einvs and (\s. event \ Interrupt \ ct_running s) and - (\s. scheduler_action s = resume_cur_thread)) + schact_is_rct) (invs' and (\s. event \ Interrupt \ ct_running' s) and (\s. vs_valid_duplicates' (ksPSpace s)) and (\s. ksSchedulerAction s = ResumeCurrentThread)) @@ -2020,14 +1894,13 @@ lemma handleEvent_corres: (is "?handleEvent_corres") proof - have hw: - "\isBlocking. corres dc (einvs and ct_running and (\s. scheduler_action s = resume_cur_thread)) + "\isBlocking. corres dc (einvs and ct_running and schact_is_rct) (invs' and ct_running' and (\s. ksSchedulerAction s = ResumeCurrentThread)) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp [OF handleRecv_isBlocking_corres]) apply (clarsimp simp: ct_in_state_def ct_in_state'_def - elim!: st_tcb_weakenE pred_tcb'_weakenE - dest!: ct_not_ksQ)+ + elim!: st_tcb_weakenE pred_tcb'_weakenE)+ done show ?thesis apply (case_tac event) @@ -2053,7 +1926,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2066,7 +1938,6 @@ proof - simp: ct_in_state_def valid_fault_def) apply wp apply clarsimp - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2082,13 +1953,7 @@ proof - doMachineOp_getActiveIRQ_IRQ_active' | simp | simp add: imp_conjR | wp (once) hoare_drop_imps)+ - apply force - apply simp - apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def valid_queues_def - valid_queues_no_bitmap_def) - apply (erule allE)+ - apply (erule conjE, drule (1) bspec) - apply (clarsimp simp: obj_at'_def inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def valid_queues_def) apply (rule_tac corres_underlying_split) apply (rule corres_guard_imp, rule getCurThread_corres, simp+) apply (rule corres_split_catch) @@ -2101,7 +1966,6 @@ proof - apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (fastforce simp: simple_sane_strg sch_act_simple_def ct_in_state'_def elim: st_tcb_ex_cap'' pred_tcb'_weakenE) apply (rule corres_underlying_split) @@ -2112,7 +1976,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2123,10 +1986,11 @@ crunches handleVMFault for st_tcb_at'[wp]: "st_tcb_at' P t" and norq[wp]: "\s. P (ksReadyQueues s)" (ignore: getFAR getDFSR getIFSR) + crunches handleVMFault, handleHypervisorFault for cap_to'[wp]: "ex_nonz_cap_to' t" and ksit[wp]: "\s. P (ksIdleThread s)" - (ignore: getFAR getDFSR getIFSR) + (ignore: getFAR getDFSR getIFSR wp: crunch_wps) (* FIXME *) lemma hv_stuff'[wp]: @@ -2137,9 +2001,10 @@ lemma hv_stuff'[wp]: by (wpsimp wp: getDFSR_inv getHSR_inv getHDFAR_inv getRestartPC_inv det_getRestartPC asUser_inv) lemma hh_invs'[wp]: - "\invs' and sch_act_not p and (\s. \a b. p \ set (ksReadyQueues s (a, b))) and - st_tcb_at' simple' p and ex_nonz_cap_to' p and (\s. p \ ksIdleThread s)\ - handleHypervisorFault p t \\_. invs'\" + "\invs' and sch_act_not p and st_tcb_at' simple' p and ex_nonz_cap_to' p + and (\s. p \ ksIdleThread s)\ + handleHypervisorFault p t + \\_. invs'\" apply (simp add: ARM_HYP_H.handleHypervisorFault_def) apply (cases t; wpsimp) done @@ -2205,10 +2070,8 @@ proof - apply (rename_tac syscall) apply (case_tac syscall, (wp handleReply_sane handleReply_nonz_cap_to_ct handleReply_ksCurThread - handleReply_ct_not_ksQ | clarsimp simp: active_from_running' simple_from_running' simple_sane_strg simp del: split_paired_All | rule conjI active_ex_cap' - | drule ct_not_ksQ[rotated] | strengthen nidle)+) apply (rule hoare_strengthen_post, rule hoare_weaken_pre, @@ -2221,7 +2084,6 @@ proof - | erule pred_tcb'_weakenE st_tcb_ex_cap'' | clarsimp simp: tcb_at_invs ct_in_state'_def simple_sane_strg sch_act_simple_def | drule st_tcb_at_idle_thread' - | drule ct_not_ksQ[rotated] | wpc | wp (once) hoare_drop_imps hoare_vcg_all_lift)+ done qed @@ -2255,7 +2117,6 @@ lemma hi_IRQInactive: -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: handleInvocation_def split_def) apply (wp syscall_valid' retype_pi_IRQInactive) - apply simp_all done lemma handleSend_IRQInactive: diff --git a/proof/refine/ARM_HYP/TcbAcc_R.thy b/proof/refine/ARM_HYP/TcbAcc_R.thy index 0ff5098fed..fabd396868 100644 --- a/proof/refine/ARM_HYP/TcbAcc_R.thy +++ b/proof/refine/ARM_HYP/TcbAcc_R.thy @@ -11,10 +11,8 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] -declare empty_fail_sequence_x[simp] declare storeWordUser_typ_at' [wp] (* Auxiliaries and basic properties of priority bitmap functions *) @@ -51,7 +49,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -60,10 +58,8 @@ lemma getHighestPrio_inv[wp]: unfolding bitmap_fun_defs by simp lemma valid_bitmapQ_bitmapQ_simp: - "\ valid_bitmapQ s \ \ - bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_def - by simp + "valid_bitmapQ s \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (simp add: valid_bitmapQ_def) lemma prioToL1Index_l1IndexToPrio_or_id: "\ unat (w'::priority) < 2 ^ wordRadix ; w < size w' \ @@ -86,20 +82,6 @@ lemma l1IndexToPrio_wordRadix_mask[simp]: unfolding l1IndexToPrio_def by (simp add: wordRadix_def') -definition - (* when in the middle of updates, a particular queue might not be entirely valid *) - valid_queues_no_bitmap_except :: "word32 \ kernel_state \ bool" -where - "valid_queues_no_bitmap_except t' \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). t \ t' \ obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - -lemma valid_queues_no_bitmap_exceptI[intro]: - "valid_queues_no_bitmap s \ valid_queues_no_bitmap_except t s" - unfolding valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def - by simp - lemma st_tcb_at_coerce_abstract: assumes t: "st_tcb_at' P t c" assumes sr: "(a, c) \ state_relation" @@ -109,11 +91,10 @@ lemma st_tcb_at_coerce_abstract: projectKOs objBits_simps) apply (erule(1) pspace_dom_relatedE) apply (erule(1) obj_relation_cutsE, simp_all) - apply (clarsimp simp: st_tcb_at_def obj_at_def other_obj_relation_def - tcb_relation_def - split: Structures_A.kernel_object.split_asm if_split_asm - ARM_A.arch_kernel_obj.split_asm)+ - apply fastforce + apply (fastforce simp: st_tcb_at_def obj_at_def other_obj_relation_def + tcb_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + ARM_A.arch_kernel_obj.split_asm)+ done lemma st_tcb_at_runnable_coerce_concrete: @@ -130,24 +111,130 @@ lemma st_tcb_at_runnable_coerce_concrete: apply (case_tac "tcb_state tcb"; simp) done -lemma valid_objs_valid_tcbE: "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" +lemma pspace_relation_tcb_at': + assumes p: "pspace_relation (kheap a) (ksPSpace c)" + assumes t: "tcb_at t a" + assumes aligned: "pspace_aligned' c" + assumes distinct: "pspace_distinct' c" + shows "tcb_at' t c" + using assms + apply (clarsimp simp: obj_at_def) + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: is_tcb tcb_relation_cut_def) + apply (simp split: kernel_object.split_asm) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb], simp) + apply (erule obj_at'_weakenE) + apply simp + done + +lemma tcb_at_cross: + "\tcb_at t s; pspace_aligned s; pspace_distinct s; pspace_relation (kheap s) (ksPSpace s')\ + \ tcb_at' t s'" + apply (drule (2) pspace_distinct_cross) + apply (drule (1) pspace_aligned_cross) + apply (erule (3) pspace_relation_tcb_at') + done + +lemma tcb_at'_cross: + assumes p: "pspace_relation (kheap s) (ksPSpace s')" + assumes t: "tcb_at' ptr s'" + shows "tcb_at ptr s" + using assms + apply (clarsimp simp: obj_at'_def) + apply (erule (1) pspace_dom_relatedE) + by (clarsimp simp: obj_relation_cuts_def2 obj_at_def cte_relation_def + other_obj_relation_def pte_relation_def pde_relation_def is_tcb_def projectKOs + split: Structures_A.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) + +lemma st_tcb_at_runnable_cross: + "\ st_tcb_at runnable t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ st_tcb_at' runnable' t s'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (prop_tac "tcb_at t s", clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (drule (2) tcb_at_cross, fastforce simp: state_relation_def) + apply (erule (2) st_tcb_at_runnable_coerce_concrete) + done + +lemma cur_tcb_cross: + "\ cur_tcb s; pspace_aligned s; pspace_distinct s; (s,s') \ state_relation \ \ cur_tcb' s'" + apply (clarsimp simp: cur_tcb'_def cur_tcb_def state_relation_def) + apply (erule (3) tcb_at_cross) + done + +lemma valid_objs_valid_tcbE: + "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" apply (clarsimp simp add: projectKOs valid_objs'_def ran_def typ_at'_def ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) apply (fastforce simp: projectKO_def projectKO_opt_tcb return_def valid_tcb'_def) done -lemma valid_objs'_maxDomain: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) +lemma valid_tcb'_tcbDomain_update: + "new_dom \ maxDomain \ + \tcb. valid_tcb' tcb s \ valid_tcb' (tcbDomain_update (\_. new_dom) tcb) s" + unfolding valid_tcb'_def + apply (clarsimp simp: tcb_cte_cases_def objBits_simps') done -lemma valid_objs'_maxPriority: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) +lemma valid_tcb'_tcbState_update: + "\valid_tcb_state' st s; valid_tcb' tcb s\ \ + valid_tcb' (tcbState_update (\_. st) tcb) s" + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def objBits_simps') done +definition valid_tcbs' :: "kernel_state \ bool" where + "valid_tcbs' s' \ \ptr tcb. ksPSpace s' ptr = Some (KOTCB tcb) \ valid_tcb' tcb s'" + +lemma valid_objs'_valid_tcbs'[elim!]: + "valid_objs' s \ valid_tcbs' s" + by (auto simp: valid_objs'_def valid_tcbs'_def valid_obj'_def split: kernel_object.splits) + +lemma invs'_valid_tcbs'[elim!]: + "invs' s \ valid_tcbs' s" + by (fastforce intro: valid_objs'_valid_tcbs') + +lemma valid_tcbs'_maxDomain: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def projectKOs) + done + +lemmas valid_objs'_maxDomain = valid_tcbs'_maxDomain[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_maxPriority: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def projectKOs) + done + +lemmas valid_objs'_maxPriority = valid_tcbs'_maxPriority[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_obj_at': + assumes "valid_tcbs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms + apply (clarsimp simp add: valid_tcbs'_def ran_def typ_at'_def + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def projectKOs) + done + +lemma update_valid_tcb'[simp]: + "\f. valid_tcb' tcb (ksReadyQueuesL1Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueuesL2Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueues_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksSchedulerAction_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksDomainTime_update f s) = valid_tcb' tcb s" + by (auto simp: valid_tcb'_def valid_tcb_state'_def valid_bound_tcb'_def valid_bound_ntfn'_def + opt_tcb_at'_def valid_arch_tcb'_def + split: option.splits thread_state.splits) + +lemma update_valid_tcbs'[simp]: + "\f. valid_tcbs' (ksReadyQueuesL1Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueuesL2Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueues_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksSchedulerAction_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksDomainTime_update f s) = valid_tcbs' s" + by (simp_all add: valid_tcbs'_def) + lemma doMachineOp_irq_states': assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" shows "\valid_irq_states'\ doMachineOp f \\rv. valid_irq_states'\" @@ -229,7 +316,7 @@ lemma preemptionPoint_irq [wp]: "\valid_irq_states'\ preemptionPoint -, \\irq s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" apply (simp add: preemptionPoint_def setWorkUnits_def modifyWorkUnits_def getWorkUnits_def) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (rule hoare_post_imp) prefer 2 apply (rule doMachineOp_getActiveIRQ_IRQ_active) @@ -245,56 +332,117 @@ lemma updateObject_tcb_inv: by simp (rule updateObject_default_inv) lemma setObject_update_TCB_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'" + assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'" + assumes sched_pointers: "tcbSchedPrev new_tcb' = tcbSchedPrev tcb'" + "tcbSchedNext new_tcb' = tcbSchedNext tcb'" + assumes flag: "tcbQueued new_tcb' = tcbQueued tcb'" assumes r: "r () ()" - assumes exst: "exst_same tcb' tcbu'" - shows "corres r (ko_at (TCB tcb) add) - (ko_at' tcb' add) - (set_object add (TCB tcbu)) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' tcbu'" in corres_req) + assumes exst: "exst_same tcb' new_tcb'" + shows + "corres r + (ko_at (TCB tcb) ptr) (ko_at' tcb' ptr) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" + apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' new_tcb'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) - apply (clarsimp simp: projectKOs other_obj_relation_def exst) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule setObject_other_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - projectKOs objBits_simps' - other_obj_relation_def tcbs r)+ - apply (fastforce elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: projectKOs obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp + apply (clarsimp simp: projectKOs tcb_relation_cut_def exst) + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp: obj_at'_def) + apply (unfold set_object_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def projectKOs obj_at_def + updateObject_default_def in_magnitude_check obj_at'_def) + apply (rename_tac s s' t') + apply (prop_tac "t' = s'") + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (drule singleton_in_magnitude_check) + apply (prop_tac "map_to_ctes ((ksPSpace s') (ptr \ injectKO new_tcb')) + = map_to_ctes (ksPSpace s')") + apply (frule_tac tcb=new_tcb' and tcb=tcb' in map_to_ctes_upd_tcb) + apply (clarsimp simp: objBits_simps) + apply (clarsimp simp: objBits_simps ps_clear_def3 field_simps objBits_defs mask_def) + apply (insert tables')[1] + apply (rule ext) + apply (clarsimp split: if_splits) + apply blast + apply (prop_tac "obj_at (same_caps (TCB new_tcb)) ptr s") + using tables + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def assms) + apply (clarsimp simp add: state_relation_def) + apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply clarsimp + apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply clarsimp + apply (rule conjI) + apply (simp only: pspace_relation_def simp_thms + pspace_dom_update[where x="kernel_object.TCB _" + and v="kernel_object.TCB _", + simplified a_type_def, simplified]) + apply (rule conjI) + using assms + apply (simp only: dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: tcb_relation_cut_def project_inject split: if_split_asm kernel_object.split_asm) + apply (rename_tac aa ba) + apply (drule_tac x="(aa, ba)" in bspec, simp) + apply clarsimp + apply (frule_tac ko'="kernel_object.TCB tcb" and x'=ptr in obj_relation_cut_same_type) + apply (simp add: tcb_relation_cut_def)+ + apply clarsimp + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule (1) bspec) + apply (insert exst) + apply (clarsimp simp: etcb_relation_def exst_same_def) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (insert sched_pointers flag exst) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext new_tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev new_tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def) + apply (clarsimp simp: ready_queue_relation_def opt_pred_def opt_map_def exst_same_def + inQ_def projectKOs + split: option.splits) + apply (metis (mono_tags, opaque_lifting)) + apply (clarsimp simp: fun_upd_def caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def) done lemma setObject_update_TCB_corres: - "\ tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'; - \(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb; - \(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'; - r () (); exst_same tcb' tcbu'\ - \ corres r (\s. get_tcb add s = Some tcb) - (\s'. (tcb', s') \ fst (getObject add s')) - (set_object add (TCB tcbu)) (setObject add tcbu')" + "\tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'; + \(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb; + \(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'; + tcbSchedPrev new_tcb' = tcbSchedPrev tcb'; tcbSchedNext new_tcb' = tcbSchedNext tcb'; + tcbQueued new_tcb' = tcbQueued tcb'; exst_same tcb' new_tcb'; + r () ()\ \ + corres r + (\s. get_tcb ptr s = Some tcb) (\s'. (tcb', s') \ fst (getObject ptr s')) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" apply (rule corres_guard_imp) - apply (erule (3) setObject_update_TCB_corres', force) - apply fastforce - apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def - loadObject_default_def projectKOs objBits_simps' - in_magnitude_check) + apply (erule (7) setObject_update_TCB_corres') + apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def + loadObject_default_def objBits_simps' in_magnitude_check projectKOs)+ done lemma getObject_TCB_corres: - "corres tcb_relation (tcb_at t) (tcb_at' t) + "corres tcb_relation (tcb_at t and pspace_aligned and pspace_distinct) \ (gets_the (get_tcb t)) (getObject t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) apply (rule corres_guard_imp) apply (rule corres_gets_the) apply (rule corres_get_tcb) @@ -304,7 +452,8 @@ lemma getObject_TCB_corres: lemma threadGet_corres: assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ r (f tcb) (f' tcb')" - shows "corres r (tcb_at t) (tcb_at' t) (thread_get f t) (threadGet f' t)" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get f t) (threadGet f' t)" apply (simp add: thread_get_def threadGet_def) apply (fold liftM_def) apply simp @@ -326,7 +475,8 @@ lemma ball_tcb_cte_casesI: by (simp add: tcb_cte_cases_def) lemma all_tcbI: - "\ \a b c d e f g h i j k l m n p q. P (Thread a b c d e f g h i j k l m n p q) \ \ \tcb. P tcb" + "\ \a b c d e f g h i j k l m n p q r s. P (Thread a b c d e f g h i j k l m n p q r s) \ + \ \tcb. P tcb" by (rule allI, case_tac tcb, simp) lemma threadset_corresT: @@ -335,18 +485,24 @@ lemma threadset_corresT: assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes sched_pointers: "\tcb. tcbSchedPrev (f' tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (f' tcb) = tcbSchedNext tcb" + assumes flag: "\tcb. tcbQueued (f' tcb) = tcbQueued tcb" assumes e: "\tcb'. exst_same tcb' (f' tcb')" - shows "corres dc (tcb_at t) - (tcb_at' t) - (thread_set f t) (threadSet f' t)" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ + (thread_set f t) (threadSet f' t)" apply (simp add: thread_set_def threadSet_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getObject_TCB_corres]) apply (rule setObject_update_TCB_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce + apply (erule x) + apply (rule y) + apply (clarsimp simp: bspec_split [OF spec [OF z]]) + apply fastforce + apply (rule sched_pointers) + apply (rule sched_pointers) + apply (rule flag) apply simp apply (rule e) apply wp+ @@ -376,16 +532,19 @@ lemma threadSet_corres_noopT: tcb_relation tcb (fn tcb')" assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (fn tcb) = getF tcb" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" - shows "corres dc \ (tcb_at' t) - (return v) (threadSet fn t)" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (return v) (threadSet fn t)" proof - have S: "\t s. tcb_at t s \ return v s = (thread_set id t >>= (\x. return v)) s" apply (clarsimp simp: tcb_at_def) apply (simp add: return_def thread_set_def gets_the_def assert_def assert_opt_def simpler_gets_def set_object_def get_object_def put_def get_def bind_def) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s") + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s") apply (simp add: map_upd_triv get_tcb_SomeD)+ done show ?thesis @@ -394,16 +553,15 @@ proof - defer apply (subst bind_return [symmetric], rule corres_underlying_split [OF threadset_corresT]) - apply (simp add: x) - apply simp - apply (rule y) + apply (simp add: x) + apply simp + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule corres_noop [where P=\ and P'=\]) apply wpsimp+ - apply (erule pspace_relation_tcb_at[rotated]) - apply clarsimp - apply simp - apply simp done qed @@ -417,14 +575,20 @@ lemma threadSet_corres_noop_splitT: getF (fn tcb) = getF tcb" assumes z: "corres r P Q' m m'" assumes w: "\P'\ threadSet fn t \\x. Q'\" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" - shows "corres r P (tcb_at' t and P') + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct and P) P' m (threadSet fn t >>= (\rv. m'))" apply (rule corres_guard_imp) apply (subst return_bind[symmetric]) apply (rule corres_split_nor[OF threadSet_corres_noopT]) - apply (simp add: x) - apply (rule y) + apply (simp add: x) + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule z) apply (wp w)+ @@ -661,7 +825,12 @@ lemma threadSet_valid_pspace'T_P: assumes v: "\tcb. (P \ Q' (tcbBoundNotification tcb)) \ (\s. valid_bound_ntfn' (tcbBoundNotification tcb) s \ valid_bound_ntfn' (tcbBoundNotification (F tcb)) s)" - + assumes p: "\tcb. (P \ Q'' (tcbSchedPrev tcb)) \ + (\s. opt_tcb_at' (tcbSchedPrev tcb) s + \ opt_tcb_at' (tcbSchedPrev (F tcb)) s)" + assumes n: "\tcb. (P \ Q''' (tcbSchedNext tcb)) \ + (\s. opt_tcb_at' (tcbSchedNext tcb) s + \ opt_tcb_at' (tcbSchedNext (F tcb)) s)" assumes y: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" assumes u: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" @@ -669,9 +838,11 @@ lemma threadSet_valid_pspace'T_P: assumes w': "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" assumes v': "\tcb s. valid_arch_tcb' (tcbArch tcb) s \ valid_arch_tcb' (tcbArch (F tcb)) s" shows - "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s)\ - threadSet F t - \\rv. valid_pspace'\" + "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s + \ obj_at' (\tcb. Q'' (tcbSchedPrev tcb)) t s + \ obj_at' (\tcb. Q''' (tcbSchedNext tcb)) t s)\ + threadSet F t + \\_. valid_pspace'\" apply (simp add: valid_pspace'_def threadSet_def) apply (rule hoare_pre, wp setObject_tcb_valid_objs getObject_tcb_wp) @@ -679,7 +850,7 @@ lemma threadSet_valid_pspace'T_P: apply (erule(1) valid_objsE') apply (clarsimp simp add: valid_obj'_def valid_tcb'_def bspec_split [OF spec [OF x]] z - split_paired_Ball y u w v w' v') + split_paired_Ball y u w v w' v' p n) done lemmas threadSet_valid_pspace'T = @@ -763,6 +934,10 @@ lemma threadSet_iflive'T: \ tcbState (F tcb) \ Inactive \ tcbState (F tcb) \ IdleThreadState \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedNext tcb = None \ tcbSchedNext (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedPrev tcb = None \ tcbSchedPrev (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb) \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) \((\tcb. \ bound (atcbVCPUPtr (tcbArch tcb)) \ bound (atcbVCPUPtr (tcbArch (F tcb))) @@ -788,7 +963,7 @@ lemma threadSet_cte_wp_at'T: getF (F tcb) = getF tcb" shows "\\s. P' (cte_wp_at' P p s)\ threadSet F t \\rv s. P' (cte_wp_at' P p s)\" apply (simp add: threadSet_def) - apply (rule hoare_seq_ext [where B="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) + apply (rule bind_wp [where Q'="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) apply (rule setObject_cte_wp_at2') apply (clarsimp simp: updateObject_default_def projectKOs in_monad obj_at'_def objBits_simps' in_magnitude_check prod_eq_iff) @@ -818,6 +993,12 @@ lemmas threadSet_ctes_of = lemmas threadSet_cap_to' = ex_nonz_cap_to_pres' [OF threadSet_cte_wp_at'] +lemma threadSet_cap_to: + "(\tcb. \(getF, v)\ran tcb_cte_cases. getF (f tcb) = getF tcb) + \ threadSet f tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: hoare_vcg_ex_lift threadSet_cte_wp_at' + simp: ex_nonz_cap_to'_def tcb_cte_cases_def objBits_simps') + lemma threadSet_idle'T: assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" shows @@ -856,30 +1037,6 @@ lemma set_tcb_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma threadSet_valid_queues_no_bitmap: - "\ valid_queues_no_bitmap and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. valid_queues_no_bitmap \" - apply (simp add: threadSet_def) - apply wp - apply (simp add: Invariants_H.valid_queues_no_bitmap_def' pred_tcb_at'_def) - - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def projectKOs) - apply (fastforce) - done - lemma threadSet_valid_bitmapQ[wp]: "\ valid_bitmapQ \ threadSet f t \ \rv. valid_bitmapQ \" unfolding bitmapQ_defs threadSet_def @@ -898,73 +1055,6 @@ lemma threadSet_valid_bitmapQ_no_L2_orphans[wp]: by (clarsimp simp: setObject_def split_def) (wp | simp add: updateObject_default_def)+ -lemma threadSet_valid_queues: - "\Invariants_H.valid_queues and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. Invariants_H.valid_queues\" - unfolding valid_queues_def - by (wp threadSet_valid_queues_no_bitmap;simp) - -definition - addToQs :: "(Structures_H.tcb \ Structures_H.tcb) - \ word32 \ (domain \ priority \ word32 list) - \ (domain \ priority \ word32 list)" -where - "addToQs F t \ \qs (qdom, prio). if (\ko. \ inQ qdom prio (F ko)) - then t # qs (qdom, prio) - else qs (qdom, prio)" - -lemma addToQs_set_def: - "(t' \ set (addToQs F t qs (qdom, prio))) = (t' \ set (qs (qdom, prio)) - \ (t' = t \ (\ko. \ inQ qdom prio (F ko))))" - by (auto simp add: addToQs_def) - -lemma threadSet_valid_queues_addToQs: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update (addToQs F t) s)\ - threadSet F t - \\rv. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_set_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs split: if_split_asm) - done - -lemma threadSet_valid_queues_Qf: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update Qf s) - \ (\prio. set (Qf (ksReadyQueues s) prio) - \ set (addToQs F t (ksReadyQueues s) prio))\ - threadSet F t - \\rv. valid_queues'\" - apply (wp threadSet_valid_queues_addToQs) - apply (clarsimp simp: valid_queues'_def subset_iff) - done - -lemma addToQs_subset: - "set (qs p) \ set (addToQs F t qs p)" -by (clarsimp simp: addToQs_def split_def) - -lemmas threadSet_valid_queues' - = threadSet_valid_queues_Qf - [where Qf=id, simplified ksReadyQueues_update_id - id_apply addToQs_subset simp_thms] - lemma threadSet_cur: "\\s. cur_tcb' s\ threadSet f t \\rv s. cur_tcb' s\" apply (simp add: threadSet_def cur_tcb'_def) @@ -980,7 +1070,7 @@ lemma modifyReadyQueuesL1Bitmap_obj_at[wp]: crunches setThreadState, setBoundNotification for valid_arch' [wp]: valid_arch_state' - (simp: unless_def crunch_simps) + (simp: unless_def crunch_simps wp: crunch_wps) crunch ksInterrupt'[wp]: threadSet "\s. P (ksInterruptState s)" (wp: setObject_ksInterrupt updateObject_default_inv) @@ -1007,20 +1097,18 @@ lemma threadSet_obj_at'_really_strongest: "\\s. tcb_at' t s \ obj_at' (\obj. if t = t' then P (f obj) else P obj) t' s\ threadSet f t \\rv. obj_at' P t'\" apply (simp add: threadSet_def) - apply (rule hoare_wp_splits) - apply (rule setObject_tcb_strongest) - apply (simp only: imp_conv_disj) - apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) - apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) - apply simp - apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) - apply (rule getObject_inv_tcb) - apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply (wp setObject_tcb_strongest) + apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) + apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) apply simp - apply (simp add: objBits_simps') - apply (erule obj_at'_weakenE) - apply simp - apply (cases "t = t'", simp_all) + apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) + apply (rule getObject_inv_tcb) + apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply simp + apply (simp add: objBits_simps') + apply (erule obj_at'_weakenE) + apply simp + apply (cases "t = t'", simp_all) apply (rule OMG_getObject_tcb) apply wp done @@ -1244,60 +1332,103 @@ lemma threadSet_valid_dom_schedule': unfolding threadSet_def by (wp setObject_ksDomSchedule_inv hoare_Ball_helper) -lemma threadSet_invs_trivialT: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" - assumes r: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" - shows - "\\s. invs' s \ - tcb_at' t s \ - (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ - (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ - ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ - (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (wp x w v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_state_hyp_refs_of' - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a r domains cteCaps_of_def valid_arch_tcb'_def|rule refl)+ - apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - apply (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) +lemma threadSet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (s\ksPSpace := (ksPSpace s)(t \ injectKO (f tcb))\)\ + threadSet f t + \\_. P\" + unfolding threadSet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (auto simp: obj_at'_def split: if_splits) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: fun_upd_def) + apply (prop_tac "\ptr. psMap (ksPSpace s) ptr = ksPSpace s ptr") + apply fastforce + apply metis done -qed + +lemma threadSet_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb\ + \ threadSet F tcbPtr \\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs elim: rsubst2[where P=P]) + done + +lemma threadSet_valid_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb; + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tcbPtr \valid_sched_pointers\" + unfolding valid_sched_pointers_def + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + by (fastforce simp: opt_pred_def opt_map_def obj_at'_def projectKOs split: option.splits if_splits) + +lemma threadSet_tcbSchedNexts_of: + "(\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb) \ + threadSet F t \\s. P (tcbSchedNexts_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_tcbSchedPrevs_of: + "(\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb) \ + threadSet F t \\s. P (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_tcbQueued: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + threadSet F t \\s. P (tcbQueued |< tcbs_of' s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_pred_def opt_map_def obj_at'_def projectKOs) + done + +crunches threadSet + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + +lemma threadSet_invs_trivialT: + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits + \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows "threadSet F t \invs'\" + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbQueued + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of valid_bitmaps_lift + | clarsimp simp: assms cteCaps_of_def valid_arch_tcb'_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: assms obj_at'_def) lemmas threadSet_invs_trivial = threadSet_invs_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] @@ -1342,11 +1473,74 @@ lemma threadSet_valid_objs': apply (clarsimp elim!: obj_at'_weakenE) done +lemmas typ_at'_valid_tcb'_lift = + typ_at'_valid_obj'_lift[where obj="KOTCB tcb" for tcb, unfolded valid_obj'_def, simplified] + +lemmas setObject_valid_tcb' = typ_at'_valid_tcb'_lift[OF setObject_typ_at'] + +lemma setObject_valid_tcbs': + assumes preserve_valid_tcb': "\s s' ko ko' x n tcb tcb'. + \ (ko', s') \ fst (updateObject val ko ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, ko), n); + projectKO_opt ko = Some tcb; projectKO_opt ko' = Some tcb'; + valid_tcb' tcb s \ \ valid_tcb' tcb' s" + shows "\valid_tcbs' and P\ setObject ptr val \\rv. valid_tcbs'\" + unfolding valid_tcbs'_def + apply (clarsimp simp: valid_def) + apply (rename_tac s s' ptr' tcb) + apply (prop_tac "\tcb'. valid_tcb' tcb s \ valid_tcb' tcb s'") + apply clarsimp + apply (erule (1) use_valid[OF _ setObject_valid_tcb']) + apply (drule spec, erule mp) + apply (clarsimp simp: setObject_def in_monad split_def lookupAround2_char1) + apply (rename_tac s ptr' new_tcb' ptr'' old_tcb_ko' s' f) + apply (case_tac "ptr'' = ptr'"; clarsimp) + apply (prop_tac "\old_tcb' :: tcb. projectKO_opt old_tcb_ko' = Some old_tcb'") + apply (frule updateObject_type) + apply (case_tac old_tcb_ko'; clarsimp simp: project_inject) + apply (erule exE) + apply (rule preserve_valid_tcb', assumption+) + apply (simp add: prod_eqI lookupAround2_char1) + apply force + apply (clarsimp simp: project_inject) + apply (clarsimp simp: project_inject) + done + +lemma setObject_tcb_valid_tcbs': + "\valid_tcbs' and (tcb_at' t and valid_tcb' v)\ setObject t (v :: tcb) \\rv. valid_tcbs'\" + apply (rule setObject_valid_tcbs') + apply (clarsimp simp: updateObject_default_def in_monad project_inject) + done + +lemma threadSet_valid_tcb': + "\valid_tcb' tcb and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcb' tcb\" + apply (simp add: threadSet_def) + apply (wpsimp wp: setObject_valid_tcb') + done + +lemma threadSet_valid_tcbs': + "\valid_tcbs' and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcbs'\" + apply (simp add: threadSet_def) + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (wpsimp wp: setObject_tcb_valid_tcbs') + apply (clarsimp simp: obj_at'_def projectKOs valid_tcbs'_def) + done + +lemma asUser_valid_tcbs'[wp]: + "asUser t f \valid_tcbs'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_valid_tcbs' hoare_drop_imps + simp: valid_tcb'_def valid_arch_tcb'_def tcb_cte_cases_def objBits_simps' + atcbContextSet_def) + done + lemma asUser_corres': assumes y: "corres_underlying Id False True r \ \ f g" - shows "corres r (tcb_at t) - (tcb_at' t) - (as_user t f) (asUser t g)" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t f) (asUser t g)" proof - note arch_tcb_context_get_def[simp] note atcbContextGet_def[simp] @@ -1379,6 +1573,8 @@ lemma asUser_corres': using y by (fastforce simp: corres_underlying_def select_f_def split_def Id_def) show ?thesis + apply (rule_tac Q'="tcb_at' t" in corres_cross_add_guard) + apply (fastforce intro!: tcb_at_cross) apply (simp add: as_user_def asUser_def) apply (rule corres_guard_imp) apply (rule_tac r'="\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con" @@ -1430,7 +1626,7 @@ proof - qed lemma asUser_getRegister_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t (getRegister r)) (asUser t (getRegister r))" apply (rule asUser_corres') apply (clarsimp simp: getRegister_def) @@ -1479,14 +1675,6 @@ lemma asUser_valid_pspace'[wp]: | simp add: atcbContextSet_def valid_arch_tcb'_def)+ done -lemma asUser_valid_queues[wp]: - "\Invariants_H.valid_queues\ asUser t m \\rv. Invariants_H.valid_queues\" - apply (simp add: asUser_def split_def) - apply (wp hoare_drop_imps | simp)+ - - apply (wp threadSet_valid_queues hoare_drop_imps | simp)+ - done - lemma asUser_ifunsafe'[wp]: "\if_unsafe_then_cap'\ asUser t m \\rv. if_unsafe_then_cap'\" apply (simp add: asUser_def split_def) @@ -1583,23 +1771,20 @@ lemma no_fail_asUser [wp]: "no_fail \ f \ no_fail (tcb_at' t) (asUser t f)" apply (simp add: asUser_def split_def) apply wp - apply (simp add: no_fail_def) - apply (wp hoare_drop_imps) - apply simp + apply (simp add: no_fail_def) + apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+ done lemma asUser_setRegister_corres: - "corres dc (tcb_at t) - (tcb_at' t) - (as_user t (setRegister r v)) - (asUser t (setRegister r v))" + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (setRegister r v)) (asUser t (setRegister r v))" apply (simp add: setRegister_def) apply (rule asUser_corres') apply (rule corres_modify'; simp) done lemma getThreadState_corres: - "corres thread_state_relation (tcb_at t) (tcb_at' t) + "corres thread_state_relation (tcb_at t and pspace_aligned and pspace_distinct) \ (get_thread_state t) (getThreadState t)" apply (simp add: get_thread_state_def getThreadState_def) apply (rule threadGet_corres) @@ -1630,7 +1815,7 @@ lemma gts_inv'[wp]: "\P\ getThreadState t \\rv. by (simp add: getThreadState_def) wp lemma getBoundNotification_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (get_bound_notification t) (getBoundNotification t)" apply (simp add: get_bound_notification_def getBoundNotification_def) apply (rule threadGet_corres) @@ -1703,25 +1888,6 @@ lemma setQueue_nosch[wp]: lemma gq_wp[wp]: "\\s. Q (ksReadyQueues s (d, p)) s\ getQueue d p \Q\" by (simp add: getQueue_def, wp) -lemma get_tcb_corres: - "corres tcb_relation (tcb_at t) (tcb_at' t) (gets_the (get_tcb t)) (getObject t)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp add: gets_def - get_def return_def bind_def get_tcb_def - gets_the_def assert_opt_def) - apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) - apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def - projectKO_opt_tcb split_def - getObject_def loadObject_default_def in_monad - split: option.split_asm) - apply (clarsimp simp add: return_def in_magnitude_check objBits_simps - state_relation_def - split: kernel_object.split_asm) - apply (frule(1) pspace_relation_absD) - apply (clarsimp simp add: other_obj_relation_def) - done - lemma no_fail_getQueue [wp]: "no_fail \ (getQueue d p)" by (simp add: getQueue_def) @@ -1803,19 +1969,22 @@ lemma ethreadget_corres: apply (simp add: x) done -lemma setQueue_corres: - "corres dc \ \ (set_tcb_queue d p q) (setQueue d p q)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp: setQueue_def in_monad set_tcb_queue_def return_def simpler_modify_def) - apply (fastforce simp: state_relation_def ready_queues_relation_def) - done - - -lemma getQueue_corres: "corres (=) \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" - apply (clarsimp simp add: getQueue_def state_relation_def ready_queues_relation_def get_tcb_queue_def gets_def) - apply (fold gets_def) - apply simp +lemma getQueue_corres: + "corres (\ls q. (ls = [] \ tcbQueueEmpty q) \ (ls \ [] \ tcbQueueHead q = Some (hd ls)) + \ queue_end_valid ls q) + \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" + apply (clarsimp simp: get_tcb_queue_def getQueue_def tcbQueueEmpty_def) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]) + apply (rule corres_symb_exec_r[OF _ gets_sp]) + apply clarsimp + apply (drule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x=qdom in spec) + apply (drule_tac x=prio in spec) + apply (fastforce dest: heap_path_head) + apply wpsimp+ done lemma no_fail_return: @@ -1830,8 +1999,8 @@ lemma addToBitmap_noop_corres: (wp | simp add: state_relation_def | rule no_fail_pre)+ lemma addToBitmap_if_null_noop_corres: (* used this way in Haskell code *) - "corres dc \ \ (return ()) (if null queue then addToBitmap d p else return ())" - by (cases "null queue", simp_all add: addToBitmap_noop_corres) + "corres dc \ \ (return ()) (if tcbQueueEmpty queue then addToBitmap d p else return ())" + by (cases "tcbQueueHead queue", simp_all add: addToBitmap_noop_corres) lemma removeFromBitmap_corres_noop: "corres dc \ \ (return ()) (removeFromBitmap tdom prioa)" @@ -1848,54 +2017,701 @@ crunch typ_at'[wp]: removeFromBitmap "\s. P (typ_at' T p s)" lemmas addToBitmap_typ_ats [wp] = typ_at_lifts [OF addToBitmap_typ_at'] lemmas removeFromBitmap_typ_ats [wp] = typ_at_lifts [OF removeFromBitmap_typ_at'] +lemma ekheap_relation_tcb_domain_priority: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s t = Some (tcb); + ksPSpace s' t = Some (KOTCB tcb')\ + \ tcbDomain tcb' = tcb_domain tcb \ tcbPriority tcb' = tcb_priority tcb" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=t in bspec, blast) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def) + done + +lemma no_fail_thread_get[wp]: + "no_fail (tcb_at tcb_ptr) (thread_get f tcb_ptr)" + unfolding thread_get_def + apply wpsimp + apply (clarsimp simp: tcb_at_def) + done + +lemma pspace_relation_tcb_relation: + "\pspace_relation (kheap s) (ksPSpace s'); kheap s ptr = Some (TCB tcb); + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ tcb_relation tcb tcb'" + apply (clarsimp simp: pspace_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: obj_at_def obj_at'_def tcb_relation_cut_def) + done + +lemma pspace_relation_update_concrete_tcb: + "\pspace_relation s s'; s ptr = Some (TCB tcb); s' ptr = Some (KOTCB otcb'); + tcb_relation tcb tcb'\ + \ pspace_relation s (s'(ptr \ KOTCB tcb'))" + by (fastforce dest: pspace_relation_update_tcbs simp: map_upd_triv) + +lemma threadSet_pspace_relation: + fixes s :: det_state + assumes tcb_rel: "(\tcb tcb'. tcb_relation tcb tcb' \ tcb_relation tcb (F tcb'))" + shows "threadSet F tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply normalise_obj_at' + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: obj_at_def is_tcb_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule pspace_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def projectKOs) + apply (frule (1) pspace_relation_tcb_relation) + apply (fastforce simp: obj_at'_def projectKOs) + apply (fastforce dest!: tcb_rel) + done + +lemma ekheap_relation_update_tcbs: + "\ ekheap_relation (ekheap s) (ksPSpace s'); ekheap s x = Some oetcb; + ksPSpace s' x = Some (KOTCB otcb'); etcb_relation etcb tcb' \ + \ ekheap_relation ((ekheap s)(x \ etcb)) ((ksPSpace s')(x \ KOTCB tcb'))" + by (simp add: ekheap_relation_def) + +lemma ekheap_relation_update_concrete_tcb: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB otcb'); + etcb_relation etcb tcb'\ + \ ekheap_relation (ekheap s) ((ksPSpace s')(ptr \ KOTCB tcb'))" + by (fastforce dest: ekheap_relation_update_tcbs simp: map_upd_triv) + +lemma ekheap_relation_etcb_relation: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ etcb_relation etcb tcb'" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: obj_at_def obj_at'_def) + done + +lemma threadSet_ekheap_relation: + fixes s :: det_state + assumes etcb_rel: "(\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation etcb (F tcb'))" + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet F tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_tcb_def is_etcb_at_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule ekheap_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def projectKOs) + apply (frule (1) ekheap_relation_etcb_relation) + apply (fastforce simp: obj_at'_def projectKOs) + apply (fastforce dest!: etcb_rel) + done + +lemma tcbQueued_update_pspace_relation[wp]: + fixes s :: det_state + shows "threadSet (tcbQueued_update f) tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueued_update_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet (tcbQueued_update f) tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_ekheap_relation simp: etcb_relation_def) + +lemma tcbQueueRemove_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueRemove queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueRemove_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueRemove queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_ekheap_relation threadSet_pspace_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma threadSet_ghost_relation[wp]: + "threadSet f tcbPtr \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (clarsimp simp: obj_at'_def projectKOs) + done + +lemma removeFromBitmap_ghost_relation[wp]: + "removeFromBitmap tdom prio \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + by (rule_tac f=gsUserPages in hoare_lift_Pf2; wpsimp simp: bitmap_fun_defs) + +lemma tcbQueued_update_ctes_of[wp]: + "threadSet (tcbQueued_update f) t \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_of) + +lemma removeFromBitmap_ctes_of[wp]: + "removeFromBitmap tdom prio \\s. P (ctes_of s)\" + by (wpsimp simp: bitmap_fun_defs) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for ghost_relation_projs[wp]: "\s. P (gsUserPages s) (gsCNodes s)" + and ksArchState[wp]: "\s. P (ksArchState s)" + and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" + and ksDomainTime[wp]: "\s. P (ksDomainTime s)" + (wp: crunch_wps getObject_tcb_wp simp: setObject_def updateObject_default_def obj_at'_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for tcb_at'[wp]: "\s. tcb_at' tcbPtr s" + (wp: crunch_wps ignore: threadSet) + +lemma set_tcb_queue_projs: + "set_tcb_queue d p queue + \\s. P (kheap s) (cdt s) (is_original_cap s) (cur_thread s) (idle_thread s) (scheduler_action s) + (domain_list s) (domain_index s) (cur_domain s) (domain_time s) (machine_state s) + (interrupt_irq_node s) (interrupt_states s) (arch_state s) (caps_of_state s) + (work_units_completed s) (cdt_list s) (ekheap s)\" + by (wpsimp simp: set_tcb_queue_def) + +lemma set_tcb_queue_cte_at: + "set_tcb_queue d p queue \\s. P (swp cte_at s)\" + unfolding set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: swp_def cte_wp_at_def) + done + +lemma set_tcb_queue_projs_inv: + "fst (set_tcb_queue d p queue s) = {(r, s')} \ + kheap s = kheap s' + \ ekheap s = ekheap s' + \ cdt s = cdt s' + \ is_original_cap s = is_original_cap s' + \ cur_thread s = cur_thread s' + \ idle_thread s = idle_thread s' + \ scheduler_action s = scheduler_action s' + \ domain_list s = domain_list s' + \ domain_index s = domain_index s' + \ cur_domain s = cur_domain s' + \ domain_time s = domain_time s' + \ machine_state s = machine_state s' + \ interrupt_irq_node s = interrupt_irq_node s' + \ interrupt_states s = interrupt_states s' + \ arch_state s = arch_state s' + \ caps_of_state s = caps_of_state s' + \ work_units_completed s = work_units_completed s' + \ cdt_list s = cdt_list s' + \ swp cte_at s = swp cte_at s'" + apply (drule singleton_eqD) + by (auto elim!: use_valid_inv[where E=\, simplified] + intro: set_tcb_queue_projs set_tcb_queue_cte_at) + +lemma set_tcb_queue_new_state: + "(rv, t) \ fst (set_tcb_queue d p queue s) \ + t = s\ready_queues := \dom prio. if dom = d \ prio = p then queue else ready_queues s dom prio\" + by (clarsimp simp: set_tcb_queue_def in_monad) + +lemma tcbQueuePrepend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueuePrepend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueuePrepend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueuePrepend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueAppend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueAppend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueueAppend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueAppend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueInsert_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueInsert tcbPtr afterPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueInsert_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueInsert tcbPtr afterPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma removeFromBitmap_pspace_relation[wp]: + fixes s :: det_state + shows "removeFromBitmap tdom prio \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding bitmap_fun_defs + by wpsimp + +crunches setQueue, removeFromBitmap + for valid_pspace'[wp]: valid_pspace' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node'[wp]: "\s. P (irq_node' s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + and valid_irq_states'[wp]: valid_irq_states' + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and valid_machine_state'[wp]: valid_machine_state' + and cur_tcb'[wp]: cur_tcb' + and ksPSpace[wp]: "\s. P (ksPSpace s)" + (wp: crunch_wps + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def cur_tcb'_def threadSet_cur + bitmap_fun_defs valid_machine_state'_def) + +crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue, setQueue + for pspace_aligned'[wp]: pspace_aligned' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and pspace_distinct'[wp]: pspace_distinct' + and no_0_obj'[wp]: no_0_obj' + and ksSchedulerAction[wp]: "\s. P (ksSchedulerAction s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node[wp]: "\s. P (irq_node' s)" + and typ_at[wp]: "\s. P (typ_at' T p s)" + and interrupt_state[wp]: "\s. P (ksInterruptState s)" + and valid_irq_state'[wp]: valid_irq_states' + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + (wp: crunch_wps threadSet_state_refs_of'[where f'=id and g'=id] + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def bitmap_fun_defs) + +lemma threadSet_ready_queues_relation: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + \\s'. ready_queues_relation s s' \ \ (tcbQueued |< tcbs_of' s') tcbPtr\ + threadSet F tcbPtr + \\_ s'. ready_queues_relation s s'\" + supply projectKOs[simp] + supply fun_upd_apply[simp del] + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: list_queue_relation_def obj_at'_def) + apply (rename_tac tcb' d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce intro: heap_path_heap_upd_not_in + simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (clarsimp simp: prev_queue_head_def) + apply (prop_tac "ready_queues s d p \ []", fastforce) + apply (fastforce dest: heap_path_head simp: inQ_def opt_pred_def opt_map_def fun_upd_apply) + apply (auto simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + done + +definition in_correct_ready_q_2 where + "in_correct_ready_q_2 queues ekh \ + \d p. \t \ set (queues d p). is_etcb_at' t ekh + \ etcb_at' (\t. tcb_priority t = p \ tcb_domain t = d) t ekh" + +abbreviation in_correct_ready_q :: "det_ext state \ bool" where + "in_correct_ready_q s \ in_correct_ready_q_2 (ready_queues s) (ekheap s)" + +lemmas in_correct_ready_q_def = in_correct_ready_q_2_def + +lemma in_correct_ready_q_lift: + assumes c: "\P. \\s. P (ekheap s)\ f \\rv s. P (ekheap s)\" + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \in_correct_ready_q\" + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +definition ready_qs_distinct :: "det_ext state \ bool" where + "ready_qs_distinct s \ \d p. distinct (ready_queues s d p)" + +lemma ready_qs_distinct_lift: + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \ready_qs_distinct\" + unfolding ready_qs_distinct_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +lemma ready_queues_disjoint: + "\in_correct_ready_q s; ready_qs_distinct s; d \ d' \ p \ p'\ + \ set (ready_queues s d p) \ set (ready_queues s d' p') = {}" + apply (clarsimp simp: ready_qs_distinct_def in_correct_ready_q_def) + apply (rule disjointI) + apply (frule_tac x=d in spec) + apply (drule_tac x=d' in spec) + apply (fastforce simp: etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma isRunnable_sp: + "\P\ + isRunnable tcb_ptr + \\rv s. \tcb'. ko_at' tcb' tcb_ptr s + \ (rv = (tcbState tcb' = Running \ tcbState tcb' = Restart)) + \ P s\" + unfolding isRunnable_def getThreadState_def + apply (wpsimp wp: hoare_case_option_wp getObject_tcb_wp simp: threadGet_def) + apply (fastforce simp: obj_at'_def split: Structures_H.thread_state.splits) + done + +crunch (no_fail) no_fail[wp]: isRunnable + +defs ksReadyQueues_asrt_def: + "ksReadyQueues_asrt + \ \s'. \d p. \ts. ready_queue_relation d p ts (ksReadyQueues s' (d, p)) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (inQ d p |< tcbs_of' s')" + +lemma ksReadyQueues_asrt_cross: + "ready_queues_relation s s' \ ksReadyQueues_asrt s'" + by (fastforce simp: ready_queues_relation_def Let_def ksReadyQueues_asrt_def) + +crunches addToBitmap + for ko_at'[wp]: "\s. P (ko_at' ko ptr s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueues_asrt[wp]: ksReadyQueues_asrt + and st_tcb_at'[wp]: "\s. P (st_tcb_at' Q tcbPtr s)" + and valid_tcbs'[wp]: valid_tcbs' + (simp: bitmap_fun_defs ksReadyQueues_asrt_def) + +lemma tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueHead queue))" + by (fastforce dest: heap_path_head + simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueHead queue)) s'" + by (fastforce dest!: tcbQueueHead_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma tcbQueueHead_iff_tcbQueueEnd: + "list_queue_relation ts q nexts prevs \ tcbQueueHead q \ None \ tcbQueueEnd q \ None" + apply (clarsimp simp: list_queue_relation_def queue_end_valid_def) + using heap_path_None + apply fastforce + done + +lemma tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueEnd queue))" + apply (frule tcbQueueHead_iff_tcbQueueEnd) + by (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueEnd queue)) s'" + by (fastforce dest!: tcbQueueEnd_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma thread_get_exs_valid[wp]: + "tcb_at tcb_ptr s \ \(=) s\ thread_get f tcb_ptr \\\_. (=) s\" + by (clarsimp simp: thread_get_def get_tcb_def gets_the_def gets_def return_def get_def + exs_valid_def tcb_at_def bind_def) + +lemma ethread_get_sp: + "\P\ ethread_get f ptr + \\rv. etcb_at (\tcb. f tcb = rv) ptr and P\" + apply wpsimp + apply (clarsimp simp: etcb_at_def split: option.splits) + done + +lemma ethread_get_exs_valid[wp]: + "\tcb_at tcb_ptr s; valid_etcbs s\ \ \(=) s\ ethread_get f tcb_ptr \\\_. (=) s\" + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: ethread_get_def get_etcb_def gets_the_def gets_def return_def get_def + is_etcb_at_def exs_valid_def bind_def) + done + +lemma no_fail_ethread_get[wp]: + "no_fail (tcb_at tcb_ptr and valid_etcbs) (ethread_get f tcb_ptr)" + unfolding ethread_get_def + apply wpsimp + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: is_etcb_at_def get_etcb_def) + done + +lemma threadGet_sp: + "\P\ threadGet f ptr \\rv s. \tcb :: tcb. ko_at' tcb ptr s \ f tcb = rv \ P s\" + unfolding threadGet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma in_set_ready_queues_inQ_eq: + "ready_queues_relation s s' \ t \ set (ready_queues s d p) \ (inQ d p |< tcbs_of' s') t" + by (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + +lemma in_ready_q_tcbQueued_eq: + "ready_queues_relation s s' + \ (\d p. t \ set (ready_queues s d p)) \ (tcbQueued |< tcbs_of' s') t" + apply (intro iffI) + apply clarsimp + apply (frule in_set_ready_queues_inQ_eq) + apply (fastforce simp: inQ_def opt_map_def opt_pred_def split: option.splits) + apply (fastforce simp: ready_queues_relation_def ready_queue_relation_def Let_def + inQ_def opt_pred_def + split: option.splits) + done + lemma tcbSchedEnqueue_corres: - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues and valid_queues') - (tcb_sched_action (tcb_sched_enqueue) t) (tcbSchedEnqueue t)" -proof - - have ready_queues_helper: - "\t tcb a b. \ ekheap a t = Some tcb; obj_at' tcbQueued t b ; valid_queues' b ; - ekheap_relation (ekheap a) (ksPSpace b) \ - \ t \ set (ksReadyQueues b (tcb_domain tcb, tcb_priority tcb))" - unfolding valid_queues'_def - by (fastforce dest: ekheap_relation_absD simp: obj_at'_def inQ_def etcb_relation_def projectKO_eq projectKO_tcb) - - show ?thesis unfolding tcbSchedEnqueue_def tcb_sched_action_def - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, - where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at'; simp_all) - apply (rule no_fail_pre, wp, blast) - apply (case_tac queued; simp_all) - apply (rule corres_no_failI; simp add: no_fail_return) - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def ready_queues_relation_def - state_relation_def tcb_sched_enqueue_def) - apply (rule ready_queues_helper; auto) - apply (clarsimp simp: when_def) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_enqueue_def split del: if_split) - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply (rule setQueue_corres[unfolded dc_def]) - apply (rule corres_split_noop_rhs2) - apply (fastforce intro: addToBitmap_noop_corres) - apply (fastforce intro: threadSet_corres_noop simp: tcb_relation_def exst_same_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - projectKO_eq project_inject) - done -qed + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_enqueue tcb_ptr) (tcbSchedEnqueue tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + projectKOs[simp] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_enqueue_def get_tcb_queue_def + tcbSchedEnqueue_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce + apply clarsimp + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueuePrepend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueHead_ksReadyQueues simp: obj_at'_def) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp simp: setQueue_def tcbQueuePrepend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" and s'=s' + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply auto[1] + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" and st="tcbQueueHead (ksReadyQueues s' (d, p))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (cut_tac xs="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + and st="tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "\ (d = tcb_domain etcb \ p = tcb_priority etcb)") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; simp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + force simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply obj_at'_def split: if_splits) + apply (case_tac "t = the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def obj_at'_def fun_upd_apply + split: option.splits) + apply metis + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain etcb \ p = tcb_priority etcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def prev_queue_head_def + opt_map_red obj_at'_def + split: if_splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_prepend[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def obj_at'_def fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def split: if_splits) + by (auto dest!: hd_in_set simp: inQ_def in_opt_pred opt_map_def fun_upd_apply + split: if_splits option.splits) definition weak_sch_act_wf :: "scheduler_action \ kernel_state \ bool" @@ -1922,7 +2738,10 @@ lemma getSchedulerAction_corres: done lemma rescheduleRequired_corres: - "corres dc (weak_valid_sched_action and valid_etcbs) (Invariants_H.valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + "corres dc + (weak_valid_sched_action and in_correct_ready_q and ready_qs_distinct and valid_etcbs + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') (reschedule_required) rescheduleRequired" apply (simp add: rescheduleRequired_def reschedule_required_def) apply (rule corres_guard_imp) @@ -1933,15 +2752,14 @@ lemma rescheduleRequired_corres: apply (case_tac action) apply simp apply simp - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply simp apply (rule setSchedulerAction_corres) apply simp apply (wp | wpc | simp)+ - apply (force dest: st_tcb_weakenE simp: in_monad weak_valid_sched_action_def valid_etcbs_def + apply (force dest: st_tcb_weakenE simp: in_monad weak_valid_sched_action_def valid_etcbs_def st_tcb_at_def obj_at_def is_tcb split: Deterministic_A.scheduler_action.split) - apply simp - apply (clarsimp simp: weak_sch_act_wf_def pred_tcb_at' split: scheduler_action.splits) + apply (clarsimp split: scheduler_action.splits) done lemma rescheduleRequired_corres_simple: @@ -2009,20 +2827,18 @@ lemmas addToBitmap_weak_sch_act_wf[wp] = weak_sch_act_wf_lift[OF addToBitmap_nosch] crunch st_tcb_at'[wp]: removeFromBitmap "st_tcb_at' P t" -crunch pred_tcb_at'[wp]: removeFromBitmap "pred_tcb_at' proj P t" +crunch pred_tcb_at'[wp]: removeFromBitmap "\s. Q (pred_tcb_at' proj P t s)" crunch not_st_tcb_at'[wp]: removeFromBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: removeFromBitmap "\s. \ (pred_tcb_at' proj P' t) s" crunch st_tcb_at'[wp]: addToBitmap "st_tcb_at' P' t" -crunch pred_tcb_at'[wp]: addToBitmap "pred_tcb_at' proj P' t" +crunch pred_tcb_at'[wp]: addToBitmap "\s. Q (pred_tcb_at' proj P t s)" crunch not_st_tcb_at'[wp]: addToBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: addToBitmap "\s. \ (pred_tcb_at' proj P' t) s" -crunch obj_at'[wp]: removeFromBitmap "obj_at' P t" +crunch obj_at'[wp]: removeFromBitmap "\s. Q (obj_at' P t s)" -crunch obj_at'[wp]: addToBitmap "obj_at' P t" +crunch obj_at'[wp]: addToBitmap "\s. Q (obj_at' P t s)" lemma removeFromBitmap_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t\ removeFromBitmap tdom prio \\ya. tcb_in_cur_domain' t\" @@ -2039,9 +2855,11 @@ lemma addToBitmap_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_weak_sch_act_wf[wp]: - "\ \s. weak_sch_act_wf (ksSchedulerAction s) s \ tcbSchedDequeue a \ \_ s. weak_sch_act_wf (ksSchedulerAction s) s \" - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_weak_sch_act_wf removeFromBitmap_weak_sch_act_wf | simp add: crunch_simps)+ + "tcbSchedDequeue tcbPtr \\s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wp threadSet_weak_sch_act_wf getObject_tcb_wp removeFromBitmap_weak_sch_act_wf + | simp add: crunch_simps threadGet_def)+ + apply (clarsimp simp: obj_at'_def) done lemma dequeue_nothing_eq[simp]: @@ -2057,44 +2875,344 @@ lemma gets_the_exec: "f s \ None \ (do x \ ge return_def assert_opt_def) done +lemma tcbQueueRemove_no_fail: + "no_fail (\s. tcb_at' tcbPtr s + \ (\ts. list_queue_relation ts queue (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ sym_heap_sched_pointers s \ valid_objs' s) + (tcbQueueRemove queue tcbPtr)" + supply projectKOs[simp] + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (frule (1) ko_at_valid_objs') + apply fastforce + apply (clarsimp simp: list_queue_relation_def) + apply (prop_tac "tcbQueueHead queue \ Some tcbPtr \ tcbSchedPrevs_of s tcbPtr \ None") + apply (rule impI) + apply (frule not_head_prev_not_None[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (fastforce dest: heap_path_head) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def valid_tcb'_def valid_bound_tcb'_def) + by (fastforce dest!: not_last_next_not_None[where p=tcbPtr] + simp: queue_end_valid_def opt_map_def obj_at'_def valid_obj'_def valid_tcb'_def) + +crunch (no_fail) no_fail[wp]: removeFromBitmap + +crunches removeFromBitmap + for ready_queues_relation[wp]: "ready_queues_relation s" + and list_queue_relation[wp]: + "\s'. list_queue_relation ts (P (ksReadyQueues s')) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s')" + (simp: bitmap_fun_defs ready_queues_relation_def) + +\ \ + A direct analogue of tcbQueueRemove, used in tcb_sched_dequeue' below, so that within the proof of + tcbQueueRemove_corres, we may reason in terms of the list operations used within this function + rather than @{term filter}.\ +definition tcb_queue_remove :: "'a \ 'a list \ 'a list" where + "tcb_queue_remove a ls \ + if ls = [a] + then [] + else if a = hd ls + then tl ls + else if a = last ls + then butlast ls + else list_remove ls a" + +definition tcb_sched_dequeue' :: "obj_ref \ unit det_ext_monad" where + "tcb_sched_dequeue' tcb_ptr \ do + d \ ethread_get tcb_domain tcb_ptr; + prio \ ethread_get tcb_priority tcb_ptr; + queue \ get_tcb_queue d prio; + when (tcb_ptr \ set queue) $ set_tcb_queue d prio (tcb_queue_remove tcb_ptr queue) + od" + +lemma filter_tcb_queue_remove: + "\a \ set ls; distinct ls \ \ filter ((\) a) ls = tcb_queue_remove a ls" + apply (clarsimp simp: tcb_queue_remove_def) + apply (intro conjI impI) + apply (fastforce elim: filter_hd_equals_tl) + apply (fastforce elim: filter_last_equals_butlast) + apply (fastforce elim: filter_hd_equals_tl) + apply (frule split_list) + apply (clarsimp simp: list_remove_middle_distinct) + apply (subst filter_True | clarsimp simp: list_remove_none)+ + done + +lemma tcb_sched_dequeue_monadic_rewrite: + "monadic_rewrite False True (is_etcb_at t and (\s. \d p. distinct (ready_queues s d p))) + (tcb_sched_action tcb_sched_dequeue t) (tcb_sched_dequeue' t)" + supply if_split[split del] + apply (clarsimp simp: tcb_sched_dequeue'_def tcb_sched_dequeue_def tcb_sched_action_def + set_tcb_queue_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (clarsimp simp: when_def) + apply (rule monadic_rewrite_if_r) + apply (rule_tac P="\_. distinct queue" in monadic_rewrite_guard_arg_cong) + apply (frule (1) filter_tcb_queue_remove) + apply (metis (mono_tags, lifting) filter_cong) + apply (rule monadic_rewrite_modify_noop) + apply (wpsimp wp: thread_get_wp)+ + apply (clarsimp simp: etcb_at_def split: option.splits) + apply (prop_tac "(\d' p. if d' = tcb_domain x2 \ p = tcb_priority x2 + then filter (\x. x \ t) (ready_queues s (tcb_domain x2) (tcb_priority x2)) + else ready_queues s d' p) + = ready_queues s") + apply (subst filter_True) + apply fastforce + apply (clarsimp intro!: ext split: if_splits) + apply fastforce + done + +crunches removeFromBitmap + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + +lemma list_queue_relation_neighbour_in_set: + "\list_queue_relation ls q hp hp'; sym_heap hp hp'; p \ set ls\ + \ \nbr. (hp p = Some nbr \ nbr \ set ls) \ (hp' p = Some nbr \ nbr \ set ls)" + apply (rule heap_ls_neighbour_in_set) + apply (fastforce simp: list_queue_relation_def) + apply fastforce + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def) + apply fastforce + done + +lemma in_queue_not_head_or_not_tail_length_gt_1: + "\tcbPtr \ set ls; tcbQueueHead q \ Some tcbPtr \ tcbQueueEnd q \ Some tcbPtr; + list_queue_relation ls q nexts prevs\ + \ Suc 0 < length ls" + apply (clarsimp simp: list_queue_relation_def) + apply (cases ls; fastforce simp: queue_end_valid_def) + done + lemma tcbSchedDequeue_corres: - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues) - (tcb_sched_action tcb_sched_dequeue t) (tcbSchedDequeue t)" - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - defer - apply (simp add: when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def set_tcb_queue_def is_etcb_at_def state_relation_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - subgoal by (force simp: tcb_sched_dequeue_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def - ready_queues_relation_def obj_at'_def inQ_def projectKO_eq project_inject) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (simp add: exec_gets simpler_modify_def get_etcb_def ready_queues_relation_def cong: if_cong get_tcb_queue_def) - apply (simp add: when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (simp, rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (rule threadSet_corres_noop; simp_all add: tcb_relation_def exst_same_def) - apply (wp | simp)+ + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and tcb_at tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_objs') + (tcb_sched_action tcb_sched_dequeue tcb_ptr) (tcbSchedDequeue tcbPtr)" + supply heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + list_remove_append[simp del] + projectKOs[simp] + apply (rule_tac Q'="tcb_at' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: tcb_at_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (rule monadic_rewrite_guard_imp[OF tcb_sched_dequeue_monadic_rewrite]) + apply (fastforce dest: tcb_at_is_etcb_at simp: in_correct_ready_q_def ready_qs_distinct_def) + apply (clarsimp simp: tcb_sched_dequeue'_def get_tcb_queue_def tcbSchedDequeue_def getQueue_def + unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac dom) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac prio) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_if_strong'; fastforce?) + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + apply (fastforce simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; wpsimp?) + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp wp: tcbQueueRemove_no_fail) + apply (fastforce dest: state_relation_ready_queues_relation + simp: ex_abs_underlying_def ready_queues_relation_def ready_queue_relation_def + Let_def inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp + simp: setQueue_def tcbQueueRemove_def + split_del: if_split) + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply normalise_obj_at' + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply clarsimp + apply (cut_tac p=tcbPtr and ls="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_neighbour_in_set) + apply (fastforce dest!: spec) + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: ready_queues_relation_def Let_def list_queue_relation_def) + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply fast + apply (clarsimp simp: tcbQueueEmpty_def) + apply (prop_tac "Some tcbPtr \ tcbQueueHead (ksReadyQueues s' (d, p))") + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply blast + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI; clarsimp) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (force intro!: heap_path_heap_upd_not_in simp: fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_heap_upd fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (force simp: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply assumption + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply split: if_splits) + apply (force simp: not_emptyI opt_map_red) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_def fun_upd_apply opt_map_red opt_map_upd_triv) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply (force simp: not_emptyI opt_map_red) + apply fastforce + apply (clarsimp simp: opt_map_red opt_map_upd_triv) + apply (intro prev_queue_head_heap_upd) + apply (force dest!: spec) + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply fastforce + subgoal + by (clarsimp simp: inQ_def opt_map_def opt_pred_def fun_upd_apply + split: if_splits option.splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (frule heap_path_head') + apply (frule heap_ls_distinct) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply prev_queue_head_def) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: tcb_queue_remove_def inQ_def opt_pred_def fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fastforce + apply (fastforce simp: list_queue_relation_def) + apply (frule list_not_head) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_tail_nonempty) + apply (frule (2) heap_ls_next_of_hd) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI allI) + apply (drule (1) heap_ls_remove_head_not_singleton) + apply (clarsimp simp: opt_map_red opt_map_upd_triv fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply last_tl) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fast + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: queue_end_valid_def) + apply (frule list_not_last) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_gt_1_imp_butlast_nonempty) + apply (frule (3) heap_ls_prev_of_last) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI; clarsimp?) + apply (drule (1) heap_ls_remove_last_not_singleton) + apply (force elim!: rsubst3[where P=heap_ls] simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (meson distinct_in_butlast_not_last in_set_butlastD last_in_set not_last_in_set_butlast) + + \ \tcbPtr is in the middle of the ready queue\ + apply (clarsimp simp: obj_at'_def) + apply (frule set_list_mem_nonempty) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []", fastforce simp: queue_end_valid_def) + apply clarsimp + apply (frule (2) ptr_in_middle_prev_next) + apply fastforce + apply (clarsimp simp: tcb_queue_remove_def) + apply (prop_tac "tcbPtr \ last xs") + apply (clarsimp simp: distinct_append) + apply (prop_tac "tcbPtr \ hd ys") + apply (fastforce dest: hd_in_set simp: distinct_append) + apply (prop_tac "last xs \ hd ys") + apply (metis distinct_decompose2 hd_Cons_tl last_in_set) + apply (prop_tac "list_remove (xs @ tcbPtr # ys) tcbPtr = xs @ ys") + apply (simp add: list_remove_middle_distinct del: list_remove_append) + apply (intro conjI impI allI; (solves \clarsimp simp: distinct_append\)?) + apply (fastforce elim!: rsubst3[where P=heap_ls] + dest!: heap_ls_remove_middle hd_in_set last_in_set + simp: distinct_append not_emptyI opt_map_def fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (case_tac xs; + fastforce simp: prev_queue_head_def opt_map_def fun_upd_apply distinct_append) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply distinct_append + split: option.splits) done lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur_ts) od = @@ -2102,7 +3220,9 @@ lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur apply (simp add: get_thread_state_def thread_get_def) done -lemma thread_get_isRunnable_corres: "corres (=) (tcb_at t) (tcb_at' t) (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" +lemma thread_get_isRunnable_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" apply (simp add: isRunnable_def getThreadState_def threadGet_def thread_get_def) apply (fold liftM_def) @@ -2116,8 +3236,8 @@ lemma thread_get_isRunnable_corres: "corres (=) (tcb_at t) (tcb_at' t) (thread_g lemma setThreadState_corres: "thread_state_relation ts ts' \ corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (set_thread_state t ts) (setThreadState ts' t)" (is "?tsr \ corres dc ?Pre ?Pre' ?sts ?sts'") apply (simp add: set_thread_state_def setThreadState_def) @@ -2141,8 +3261,8 @@ lemma setThreadState_corres: lemma setBoundNotification_corres: "corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (set_bound_notification t ntfn) (setBoundNotification ntfn t)" apply (simp add: set_bound_notification_def setBoundNotification_def) apply (subst thread_set_def[simplified, symmetric]) @@ -2152,29 +3272,84 @@ lemma setBoundNotification_corres: crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification for tcb'[wp]: "tcb_at' addr" +lemma tcbSchedNext_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedNext_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedPrev_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueuePrepend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift' simp: tcbQueueEmpty_def) + +crunches addToBitmap + for valid_objs'[wp]: valid_objs' + (simp: unless_def crunch_simps wp: crunch_wps) + +lemma tcbSchedEnqueue_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_objs'\" + unfolding tcbSchedEnqueue_def setQueue_def + apply (wpsimp wp: threadSet_valid_objs' getObject_tcb_wp simp: threadGet_def) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + crunches rescheduleRequired, removeFromBitmap for valid_objs'[wp]: valid_objs' (simp: crunch_simps) -lemma tcbSchedDequeue_valid_objs' [wp]: "\ valid_objs' \ tcbSchedDequeue t \\_. valid_objs' \" - unfolding tcbSchedDequeue_def - apply (wp threadSet_valid_objs') - apply (clarsimp simp add: valid_tcb'_def tcb_cte_cases_def) - apply wp - apply (simp add: if_apply_def2) - apply (wp hoare_drop_imps) - apply (wp | simp cong: if_cong add: valid_tcb'_def tcb_cte_cases_def if_apply_def2)+ +lemmas ko_at_valid_objs'_pre = + ko_at_valid_objs'[simplified project_inject, atomized, simplified, rule_format] + +lemmas ep_ko_at_valid_objs_valid_ep' = + ko_at_valid_objs'_pre[where 'a=endpoint, simplified injectKO_defs valid_obj'_def, simplified] + +lemmas ntfn_ko_at_valid_objs_valid_ntfn' = + ko_at_valid_objs'_pre[where 'a=notification, simplified injectKO_defs valid_obj'_def, + simplified] + +lemmas tcb_ko_at_valid_objs_valid_tcb' = + ko_at_valid_objs'_pre[where 'a=tcb, simplified injectKO_defs valid_obj'_def, simplified] + +lemma tcbQueueRemove_valid_objs'[wp]: + "tcbQueueRemove queue tcbPtr \valid_objs'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (fastforce dest!: tcb_ko_at_valid_objs_valid_tcb' + simp: valid_tcb'_def valid_bound_tcb'_def obj_at'_def) done +lemma tcbSchedDequeue_valid_objs'[wp]: + "tcbSchedDequeue t \valid_objs'\" + unfolding tcbSchedDequeue_def setQueue_def + by (wpsimp wp: threadSet_valid_objs') + lemma sts_valid_objs': - "\valid_objs' and valid_tcb_state' st\ - setThreadState st t - \\rv. valid_objs'\" - apply (simp add: setThreadState_def setQueue_def isRunnable_def isStopped_def) - apply (wp threadSet_valid_objs') - apply (simp add: valid_tcb'_def tcb_cte_cases_def) - apply (wp threadSet_valid_objs' | simp)+ - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) + "\valid_objs' and valid_tcb_state' st and pspace_aligned' and pspace_distinct'\ + setThreadState st t + \\_. valid_objs'\" + apply (wpsimp simp: setThreadState_def wp: threadSet_valid_objs') + apply (rule_tac Q="\_. valid_objs' and pspace_aligned' and pspace_distinct'" in hoare_post_imp) + apply fastforce + apply (wpsimp wp: threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) done lemma sbn_valid_objs': @@ -2260,18 +3435,6 @@ lemma setQueue_valid_bitmapQ_except[wp]: unfolding setQueue_def bitmapQ_defs by (wp, clarsimp simp: bitmapQ_def) -lemma setQueue_valid_bitmapQ: (* enqueue only *) - "\ valid_bitmapQ and (\s. (ksReadyQueues s (d, p) = []) = (ts = [])) \ - setQueue d p ts - \\_. valid_bitmapQ \" - unfolding setQueue_def bitmapQ_defs - by (wp, clarsimp simp: bitmapQ_def) - -lemma setQueue_valid_queues': - "\valid_queues' and (\s. \t. obj_at' (inQ d p) t s \ t \ set ts)\ - setQueue d p ts \\_. valid_queues'\" - by (wp | simp add: valid_queues'_def setQueue_def)+ - lemma setQueue_cur: "\\s. cur_tcb' s\ setQueue d p ts \\rv s. cur_tcb' s\" unfolding setQueue_def cur_tcb'_def @@ -2393,14 +3556,14 @@ lemma threadSet_queued_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ threadSet (tcbQueued_update f) t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: sch_act_wf_cases split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -2409,9 +3572,17 @@ lemma threadSet_queued_sch_act_wf[wp]: apply (wp tcb_in_cur_domain'_lift | simp add: obj_at'_def)+ done +lemma tcbSchedNext_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedNext_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + +lemma tcbSchedPrev_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedPrev_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + lemma tcbSchedEnqueue_pred_tcb_at'[wp]: "\\s. pred_tcb_at' proj P' t' s \ tcbSchedEnqueue t \\_ s. pred_tcb_at' proj P' t' s\" - apply (simp add: tcbSchedEnqueue_def when_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def when_def unless_def) apply (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ done @@ -2419,8 +3590,9 @@ lemma tcbSchedDequeue_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedDequeue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - unfolding tcbSchedDequeue_def - by (wp setQueue_sch_act | wp sch_act_wf_lift | simp add: if_apply_def2)+ + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wp setQueue_sch_act threadSet_tcbDomain_triv hoare_drop_imps + | wp sch_act_wf_lift | simp add: if_apply_def2)+ crunch nosch: tcbSchedDequeue "\s. P (ksSchedulerAction s)" @@ -2516,21 +3688,22 @@ lemma tcbSchedEnqueue_sch_act[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - by (simp add: tcbSchedEnqueue_def unless_def) - (wp setQueue_sch_act | wp sch_act_wf_lift | clarsimp)+ + by (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) + (wp setQueue_sch_act threadSet_tcbDomain_triv | wp sch_act_wf_lift | clarsimp)+ lemma tcbSchedEnqueue_weak_sch_act[wp]: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp setQueue_sch_act threadSet_weak_sch_act_wf | clarsimp)+ done -lemma threadGet_wp: "\\s. tcb_at' t s \ (\tcb. ko_at' tcb t s \ P (f tcb) s)\ threadGet f t \P\" +lemma threadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (f tcb) s\ threadGet f t \P\" apply (simp add: threadGet_def) apply (wp getObject_tcb_wp) - apply clarsimp + apply (clarsimp simp: obj_at'_def) done lemma threadGet_const: @@ -2540,6 +3713,11 @@ lemma threadGet_const: apply (clarsimp simp: obj_at'_def) done +lemma archThreadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ Q (f (tcbArch tcb)) s\ archThreadGet f t \Q\" + unfolding archThreadGet_def + by (wpsimp wp: getObject_tcb_wp simp: obj_at'_def) + schematic_goal l2BitmapSize_def': (* arch specific consequence *) "l2BitmapSize = numeral ?X" by (simp add: l2BitmapSize_def wordBits_def word_size numPriorities_def) @@ -2577,14 +3755,6 @@ lemma addToBitmap_bitmapQ: by (wpsimp simp: bitmap_fun_defs bitmapQ_def prioToL1Index_bit_set prioL2Index_bit_set simp_del: bit_exp_iff) -lemma addToBitmap_valid_queues_no_bitmap_except: -" \ valid_queues_no_bitmap_except t \ - addToBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def valid_queues_no_bitmap_except_def - by (wp, clarsimp) - crunch norq[wp]: addToBitmap "\s. P (ksReadyQueues s)" (wp: updateObject_cte_inv hoare_drop_imps) crunch norq[wp]: removeFromBitmap "\s. P (ksReadyQueues s)" @@ -2616,9 +3786,8 @@ lemma prioToL1Index_complement_nth_w2p: lemma valid_bitmapQ_exceptE: "\ valid_bitmapQ_except d' p' s ; d \ d' \ p \ p' \ - \ bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_except_def - by blast + \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (fastforce simp: valid_bitmapQ_except_def) lemma invertL1Index_eq_cancelD: "\ invertL1Index i = invertL1Index j ; i < l2BitmapSize ; j < l2BitmapSize \ @@ -2645,7 +3814,6 @@ lemma removeFromBitmap_bitmapQ_no_L2_orphans[wp]: unfolding bitmap_fun_defs apply (wp, clarsimp simp: bitmap_fun_defs bitmapQ_no_L2_orphans_def)+ apply (rule conjI, clarsimp) - apply (rule conjI, clarsimp) apply (clarsimp simp: complement_nth_w2p l2BitmapSize_def') apply clarsimp apply metis @@ -2733,22 +3901,15 @@ lemma addToBitmap_valid_bitmapQ_except: done lemma addToBitmap_valid_bitmapQ: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ_except d p and - bitmapQ_no_L2_orphans and (\s. bitmapQ d p s \ ksReadyQueues s (d,p) \ []) \" - by (wp addToBitmap_valid_queues_no_bitmap_except addToBitmap_valid_bitmapQ_except - addToBitmap_bitmapQ_no_L2_orphans addToBitmap_bitmapQ; simp) - - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans + and (\s. \ tcbQueueEmpty (ksReadyQueues s (d,p)))\ + addToBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done lemma threadGet_const_tcb_at: "\\s. tcb_at' t s \ obj_at' (P s \ f) t s\ threadGet f t \\rv s. P s rv \" @@ -2766,12 +3927,6 @@ lemma threadGet_const_tcb_at_imp_lift: apply (clarsimp simp: obj_at'_def) done -lemma valid_queues_no_bitmap_objD: - "\ valid_queues_no_bitmap s; t \ set (ksReadyQueues s (d, p))\ - \ obj_at' (inQ d p and runnable' \ tcbState) t s" - unfolding valid_queues_no_bitmap_def - by blast - lemma setQueue_bitmapQ_no_L1_orphans[wp]: "\ bitmapQ_no_L1_orphans \ setQueue d p ts @@ -2791,136 +3946,16 @@ lemma setQueue_sets_queue[wp]: unfolding setQueue_def by (wp, simp) -lemma tcbSchedEnqueueOrAppend_valid_queues: - (* f is either (t#ts) or (ts @ [t]), so we define its properties generally *) - assumes f_set[simp]: "\ts. t \ set (f ts)" - assumes f_set_insert[simp]: "\ts. set (f ts) = insert t (set ts)" - assumes f_not_empty[simp]: "\ts. f ts \ []" - assumes f_distinct: "\ts. \ distinct ts ; t \ set ts \ \ distinct (f ts)" - shows "\Invariants_H.valid_queues and st_tcb_at' runnable' t and valid_objs' \ - do queued \ threadGet tcbQueued t; - unless queued $ - do tdom \ threadGet tcbDomain t; - prio \ threadGet tcbPriority t; - queue \ getQueue tdom prio; - setQueue tdom prio $ f queue; - when (null queue) $ addToBitmap tdom prio; - threadSet (tcbQueued_update (\_. True)) t - od - od - \\_. Invariants_H.valid_queues\" -proof - - - define could_run where "could_run == - \d p t. obj_at' (\tcb. inQ d p (tcbQueued_update (\_. True) tcb) \ runnable' (tcbState tcb)) t" - - have addToBitmap_could_run: - "\d p. \\s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\ - addToBitmap d p - \\_ s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\" - unfolding bitmap_fun_defs - by (wp, clarsimp simp: could_run_def) - - have setQueue_valid_queues_no_bitmap_except: - "\d p ts. - \ valid_queues_no_bitmap_except t and - (\s. ksReadyQueues s (d, p) = ts \ p \ maxPriority \ d \ maxDomain \ t \ set ts) \ - setQueue d p (f ts) - \\rv. valid_queues_no_bitmap_except t\" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by (wp, auto intro: f_distinct) - - have threadSet_valid_queues_could_run: - "\f. \ valid_queues_no_bitmap_except t and - (\s. \d p. t \ set (ksReadyQueues s (d,p)) \ could_run d p t s) and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans \ - threadSet (tcbQueued_update (\_. True)) t - \\rv. Invariants_H.valid_queues \" - unfolding threadSet_def could_run_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def) - done - - have setQueue_could_run: "\d p ts. - \ valid_queues and (\_. t \ set ts) and - (\s. could_run d p t s) \ - setQueue d p ts - \\rv s. (\d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s)\" - unfolding setQueue_def valid_queues_def could_run_def - by wp (fastforce dest: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def) - - note hoare_vcg_if_lift[wp] hoare_vcg_conj_lift[wp] hoare_vcg_const_imp_lift[wp] - - show ?thesis - unfolding tcbSchedEnqueue_def null_def - apply (rule hoare_pre) - apply (rule hoare_seq_ext) - apply (simp add: unless_def) - apply (wp threadSet_valid_queues_could_run) - apply (wp addToBitmap_could_run addToBitmap_valid_bitmapQ - addToBitmap_valid_queues_no_bitmap_except addToBitmap_bitmapQ_no_L2_orphans)+ - apply (wp setQueue_valid_queues_no_bitmap_except setQueue_could_run - setQueue_valid_bitmapQ_except setQueue_sets_queue setQueue_valid_bitmapQ)+ - apply (wp threadGet_const_tcb_at_imp_lift | simp add: if_apply_def2)+ - apply clarsimp - apply (frule pred_tcb_at') - apply (frule (1) valid_objs'_maxDomain) - apply (frule (1) valid_objs'_maxPriority) - apply (clarsimp simp: valid_queues_def st_tcb_at'_def obj_at'_def valid_queues_no_bitmap_exceptI) - apply (fastforce dest!: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def could_run_def) - done -qed - -lemma tcbSchedEnqueue_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedEnqueue t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedEnqueue_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma tcbSchedAppend_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedAppend t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedAppend_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma rescheduleRequired_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ valid_objs' s \ - weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (fastforce simp: weak_sch_act_wf_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemma rescheduleRequired_valid_queues_sch_act_simple: - "\Invariants_H.valid_queues and sch_act_simple\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: Invariants_H.valid_queues_def sch_act_simple_def)+ - done - lemma rescheduleRequired_valid_bitmapQ_sch_act_simple: "\ valid_bitmapQ and sch_act_simple\ rescheduleRequired \\_. valid_bitmapQ \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. valid_bitmapQ s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. valid_bitmapQ s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2928,12 +3963,12 @@ lemma rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple: "\ bitmapQ_no_L1_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L1_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L1_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L1_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2941,162 +3976,43 @@ lemma rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple: "\ bitmapQ_no_L2_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L2_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L2_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L2_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done lemma sts_valid_bitmapQ_sch_act_simple: "\valid_bitmapQ and sch_act_simple\ - setThreadState st t - \\_. valid_bitmapQ \" + setThreadState st t + \\_. valid_bitmapQ\" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_valid_bitmapQ_sch_act_simple threadSet_valid_bitmapQ [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L2_orphans_sch_act_simple: - "\ bitmapQ_no_L2_orphans and sch_act_simple\ - setThreadState st t - \\_. bitmapQ_no_L2_orphans \" + "\bitmapQ_no_L2_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L2_orphans\" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L2_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L1_orphans_sch_act_simple: - "\ bitmapQ_no_L1_orphans and sch_act_simple\ - setThreadState st t - \\_. bitmapQ_no_L1_orphans \" + "\bitmapQ_no_L1_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L1_orphans\" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L1_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sts_valid_queues: - "\\s. Invariants_H.valid_queues s \ - ((\p. t \ set(ksReadyQueues s p)) \ runnable' st)\ - setThreadState st t \\rv. Invariants_H.valid_queues\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues_sch_act_simple - threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sbn_valid_queues: - "\\s. Invariants_H.valid_queues s\ - setBoundNotification ntfn t \\rv. Invariants_H.valid_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - - - -lemma addToBitmap_valid_queues'[wp]: - "\ valid_queues' \ addToBitmap d p \\_. valid_queues' \" - unfolding valid_queues'_def addToBitmap_def - modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def - by (wp, simp) - -lemma tcbSchedEnqueue_valid_queues'[wp]: - "\valid_queues' and st_tcb_at' runnable' t \ - tcbSchedEnqueue t - \\_. valid_queues'\" - apply (simp add: tcbSchedEnqueue_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp - apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def projectKOs valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -lemma rescheduleRequired_valid_queues'_weak[wp]: - "\\s. valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - apply (clarsimp simp: weak_sch_act_wf_def) - done - -lemma rescheduleRequired_valid_queues'_sch_act_simple: - "\valid_queues' and sch_act_simple\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def sch_act_simple_def)+ - done - -lemma setThreadState_valid_queues'[wp]: - "\\s. valid_queues' s\ setThreadState st t \\rv. valid_queues'\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues'_sch_act_simple) - apply (rule_tac Q="\_. valid_queues'" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma setBoundNotification_valid_queues'[wp]: - "\\s. valid_queues' s\ setBoundNotification ntfn t \\rv. valid_queues'\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma valid_tcb'_tcbState_update: - "\ valid_tcb_state' st s; valid_tcb' tcb s \ \ valid_tcb' (tcbState_update (\_. st) tcb) s" - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def) - done - -lemma setThreadState_valid_objs'[wp]: - "\ valid_tcb_state' st and valid_objs' \ setThreadState st t \ \_. valid_objs' \" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_objs' | clarsimp simp: valid_tcb'_tcbState_update)+ - done - -lemma rescheduleRequired_ksQ: - "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ - rescheduleRequired - \\_ s. P (ksReadyQueues s p)\" - including no_pre - apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ P (ksReadyQueues s p)" in hoare_seq_ext) - apply wpsimp - apply (case_tac x; simp) - apply wp + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma setSchedulerAction_ksQ[wp]: @@ -3111,17 +4027,6 @@ lemma sbn_ksQ: "\\s. P (ksReadyQueues s p)\ setBoundNotification ntfn t \\rv s. P (ksReadyQueues s p)\" by (simp add: setBoundNotification_def, wp) -lemma sts_ksQ: - "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_ksQ) - apply (rule_tac Q="\_ s. P (ksReadyQueues s p)" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def)+ - apply (wp, simp) - done - lemma setQueue_ksQ[wp]: "\\s. P ((ksReadyQueues s)((d, p) := q))\ setQueue d p q @@ -3129,22 +4034,6 @@ lemma setQueue_ksQ[wp]: by (simp add: setQueue_def fun_upd_def[symmetric] | wp)+ -lemma tcbSchedEnqueue_ksQ: - "\\s. t' \ set (ksReadyQueues s p) \ t' \ t \ - tcbSchedEnqueue t \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wpsimp wp: hoare_vcg_imp_lift threadGet_wp) - apply (drule obj_at_ko_at') - apply fastforce - done - -lemma rescheduleRequired_ksQ': - "\\s. t \ set (ksReadyQueues s p) \ sch_act_not t s \ - rescheduleRequired \\_ s. t \ set (ksReadyQueues s p)\" - apply (simp add: rescheduleRequired_def) - apply (wpsimp wp: tcbSchedEnqueue_ksQ) - done - lemma threadSet_tcbState_st_tcb_at': "\\s. P st \ threadSet (tcbState_update (\_. st)) t \\_. st_tcb_at' P t\" apply (simp add: threadSet_def pred_tcb_at'_def) @@ -3155,36 +4044,6 @@ lemma isRunnable_const: "\st_tcb_at' runnable' t\ isRunnable t \\runnable _. runnable \" by (rule isRunnable_wp) -lemma sts_ksQ': - "\\s. (runnable' st \ ksCurThread s \ t) \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] - threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) - apply (clarsimp simp: when_def) - apply (case_tac x) - apply (clarsimp, wp)[1] - apply (clarsimp) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_ct threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF gct_wp gct_wp]]) - apply (rename_tac ct) - apply (case_tac "ct\t") - apply (clarsimp simp: when_def) - apply (wp)[1] - apply (clarsimp) - done - lemma valid_ipc_buffer_ptr'D: assumes yv: "y < unat max_ipc_words" and buf: "valid_ipc_buffer_ptr' a s" @@ -3301,17 +4160,30 @@ lemmas msgRegisters_unfold unfolded fromEnum_def enum_register, simplified, unfolded toEnum_def enum_register, simplified] +lemma thread_get_registers: + "thread_get (arch_tcb_get_registers \ tcb_arch) t = as_user t (gets user_regs)" + apply (simp add: thread_get_def as_user_def arch_tcb_get_registers_def + arch_tcb_context_get_def arch_tcb_context_set_def) + apply (rule bind_cong [OF refl]) + apply (clarsimp simp: gets_the_member) + apply (simp add: get_def the_run_state_def set_object_def get_object_def + put_def bind_def return_def gets_def) + apply (drule get_tcb_SomeD) + apply (clarsimp simp: map_upd_triv select_f_def image_def return_def) + done + lemma getMRs_corres: - "corres (=) (tcb_at t) - (tcb_at' t and case_option \ valid_ipc_buffer_ptr' buf) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' buf) (get_mrs t buf mi) (getMRs t buf (message_info_map mi))" proof - have S: "get = gets id" by (simp add: gets_def) - have T: "corres (\con regs. regs = map con msg_registers) (tcb_at t) (tcb_at' t) - (thread_get (arch_tcb_get_registers o tcb_arch) t) (asUser t (mapM getRegister ARM_HYP_H.msgRegisters))" - unfolding arch_tcb_get_registers_def - apply (subst thread_get_as_user) + have T: "corres (\con regs. regs = map con msg_registers) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (arch_tcb_get_registers o tcb_arch) t) + (asUser t (mapM getRegister ARM_HYP_H.msgRegisters))" + apply (subst thread_get_registers) apply (rule asUser_corres') apply (subst mapM_gets) apply (simp add: getRegister_def) @@ -3375,7 +4247,7 @@ lemma zipWithM_x_corres: apply (rule b) apply (rule a) apply (rule corres_trivial, simp) - apply (rule hoare_post_taut)+ + apply (rule hoare_TrueI)+ done @@ -3390,14 +4262,37 @@ lemma storeWordUser_valid_ipc_buffer_ptr' [wp]: unfolding valid_ipc_buffer_ptr'_def2 by (wp hoare_vcg_all_lift storeWordUser_typ_at') +lemma thread_set_as_user_registers: + "thread_set (\tcb. tcb \ tcb_arch := arch_tcb_set_registers (f (arch_tcb_get_registers (tcb_arch tcb))) + (tcb_arch tcb) \) t + = as_user t (modify (modify_registers f))" +proof - + have P: "\f. det (modify f)" + by (simp add: modify_def) + thus ?thesis + apply (simp add: as_user_def P thread_set_def) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def image_def modify_registers_def + arch_tcb_set_registers_def arch_tcb_get_registers_def + arch_tcb_context_set_def arch_tcb_context_get_def) + done +qed + +lemma UserContext_fold: + "UserContext (foldl (\s (x, y). s(x := y)) (user_regs s) xs) = + foldl (\s (r, v). UserContext ((user_regs s)(r := v))) s xs" + apply (induct xs arbitrary: s; simp) + apply (clarsimp split: prod.splits) + by (metis user_context.sel(1)) + lemma setMRs_corres: assumes m: "mrs' = mrs" shows - "corres (=) (tcb_at t and case_option \ in_user_frame buf) - (tcb_at' t and case_option \ valid_ipc_buffer_ptr' buf) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct and case_option \ in_user_frame buf) + (case_option \ valid_ipc_buffer_ptr' buf) (set_mrs t buf mrs) (setMRs t buf mrs')" proof - - have setRegister_def2: "setRegister = (\r v. modify (\s. s ( r := v )))" + have setRegister_def2: + "setRegister = (\r v. modify (\s. UserContext ((user_regs s)(r := v))))" by ((rule ext)+, simp add: setRegister_def) have S: "\xs ys n m. m - n \ length xs \ (zip xs (drop n (take m ys))) = zip xs (drop n ys)" @@ -3411,24 +4306,23 @@ proof - show ?thesis using m unfolding setMRs_def set_mrs_def - apply (clarsimp simp: arch_tcb_set_registers_def arch_tcb_get_registers_def cong: option.case_cong split del: if_split) + apply (clarsimp cong: option.case_cong split del: if_split) apply (subst bind_assoc[symmetric]) apply (fold thread_set_def[simplified]) - apply (subst thread_set_as_user[where f="\context. \reg. - if reg \ set (take (length mrs) msg_registers) - then mrs ! (the_index msg_registers reg) else context reg",simplified]) + apply (subst thread_set_as_user_registers) apply (cases buf) - apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_Nil zipWithM_x_modify + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify take_min_len zip_take_triv2 min.commute) apply (rule corres_guard_imp) apply (rule corres_split_nor[OF asUser_corres']) apply (rule corres_modify') - apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold + apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold UserContext_fold + modify_registers_def cong: if_cong simp del: the_index.simps) apply ((wp |simp)+)[6] \ \buf = Some a\ using if_split[split del] - apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_Nil zipWithM_x_modify + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify take_min_len zip_take_triv2 min.commute msgMaxLength_def msgLengthBits_def) apply (simp add: msg_max_length_def) @@ -3436,9 +4330,10 @@ proof - apply (rule corres_split_nor[OF asUser_corres']) apply (rule corres_modify') apply (simp only: msgRegisters_unfold cong: if_cong) - apply (fastforce simp: fold_fun_upd[symmetric]) + apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold UserContext_fold + modify_registers_def) apply clarsimp - apply (rule corres_split_nor) + apply (rule corres_split_nor) apply (rule_tac S="{((x, y), (x', y')). y = y' \ x' = (a + (of_nat x * 4)) \ x < unat max_ipc_words}" in zipWithM_x_corres) apply (fastforce intro: storeWordUser_corres) @@ -3454,14 +4349,12 @@ proof - qed lemma copyMRs_corres: - "corres (=) (tcb_at s and tcb_at r - and case_option \ in_user_frame sb - and case_option \ in_user_frame rb - and K (unat n \ msg_max_length)) - (tcb_at' s and tcb_at' r - and case_option \ valid_ipc_buffer_ptr' sb - and case_option \ valid_ipc_buffer_ptr' rb) - (copy_mrs s sb r rb n) (copyMRs s sb r rb n)" + "corres (=) + (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct + and case_option \ in_user_frame sb and case_option \ in_user_frame rb + and K (unat n \ msg_max_length)) + (case_option \ valid_ipc_buffer_ptr' sb and case_option \ valid_ipc_buffer_ptr' rb) + (copy_mrs s sb r rb n) (copyMRs s sb r rb n)" proof - have U: "unat n \ msg_max_length \ map (toEnum :: nat \ word32) [7 ..< Suc (unat n)] = map of_nat [7 ..< Suc (unat n)]" @@ -3470,7 +4363,7 @@ proof - note R=R'[simplified] have as_user_bit: - "\v :: word32. corres dc (tcb_at s and tcb_at r) (tcb_at' s and tcb_at' r) + "\v :: word32. corres dc (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct) \ (mapM (\ra. do v \ as_user s (getRegister ra); as_user r (setRegister ra v) @@ -3613,10 +4506,12 @@ qed lemmas valid_ipc_buffer_cap_simps = valid_ipc_buffer_cap_def [split_simps cap.split arch_cap.split] lemma lookupIPCBuffer_corres': - "corres (=) (tcb_at t and valid_objs and pspace_aligned) - (tcb_at' t and valid_objs' and pspace_aligned' - and pspace_distinct' and no_0_obj') - (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + "corres (=) + (tcb_at t and valid_objs and pspace_aligned and pspace_distinct) + (valid_objs' and no_0_obj') + (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + apply (rule corres_cross_add_guard[where Q'="pspace_aligned' and pspace_distinct'"]) + apply (fastforce simp: pspace_aligned_cross pspace_distinct_cross state_relation_def) apply (simp add: lookup_ipc_buffer_def ARM_HYP_H.lookupIPCBuffer_def) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF threadGet_corres]) @@ -3659,13 +4554,11 @@ lemma lookupIPCBuffer_corres': done lemma lookupIPCBuffer_corres: - "corres (=) (tcb_at t and invs) - (tcb_at' t and invs') - (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + "corres (=) (tcb_at t and invs) (valid_objs' and no_0_obj') + (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" using lookupIPCBuffer_corres' by (rule corres_guard_imp, auto simp: invs'_def valid_state'_def) - crunch inv[wp]: lookupIPCBuffer P crunch pred_tcb_at'[wp]: rescheduleRequired "pred_tcb_at' proj P t" @@ -3709,14 +4602,14 @@ lemma ct_in_state'_decomp: shows "\\s. Pre s \ t = (ksCurThread s)\ f \\rv. ct_in_state' Prop\" apply (rule hoare_post_imp [where Q="\rv s. t = ksCurThread s \ st_tcb_at' Prop t s"]) apply (clarsimp simp add: ct_in_state'_def) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (wp x y) apply simp done lemma ct_in_state'_set: "\\s. tcb_at' t s \ P st \ t = ksCurThread s\ setThreadState st t \\rv. ct_in_state' P\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule ct_in_state'_decomp[where t=t]) apply (wp setThreadState_ct') apply (wp setThreadState_st_tcb) @@ -3725,7 +4618,7 @@ lemma ct_in_state'_set: crunches setQueue, rescheduleRequired, tcbSchedDequeue for idle'[wp]: "valid_idle'" - (simp: crunch_simps ) + (simp: crunch_simps wp: crunch_wps) lemma sts_valid_idle'[wp]: "\valid_idle' and valid_pspace' and @@ -3765,8 +4658,9 @@ lemma gbn_sp': lemma tcbSchedDequeue_tcbState_obj_at'[wp]: "\obj_at' (P \ tcbState) t'\ tcbSchedDequeue t \\rv. obj_at' (P \ tcbState) t'\" - apply (simp add: tcbSchedDequeue_def) - apply (wp | simp add: o_def split del: if_split cong: if_cong)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: getObject_tcb_wp simp: o_def threadGet_def) + apply (clarsimp simp: obj_at'_def) done crunch typ_at'[wp]: setQueue "\s. P' (typ_at' P t s)" @@ -3785,10 +4679,14 @@ lemma setQueue_pred_tcb_at[wp]: lemma tcbSchedDequeue_pred_tcb_at'[wp]: "\\s. P' (pred_tcb_at' proj P t' s)\ tcbSchedDequeue t \\_ s. P' (pred_tcb_at' proj P t' s)\" apply (rule_tac P=P' in P_bool_lift) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) done lemma sts_st_tcb': @@ -3902,38 +4800,155 @@ crunch nonz_cap[wp]: addToBitmap "ex_nonz_cap_to' t" crunch iflive'[wp]: removeFromBitmap if_live_then_nonz_cap' crunch nonz_cap[wp]: removeFromBitmap "ex_nonz_cap_to' t" -lemma tcbSchedEnqueue_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedEnqueue tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ +crunches rescheduleRequired + for cap_to'[wp]: "ex_nonz_cap_to' p" + +lemma tcbQueued_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbQueued_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedNext_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedPrev_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedPrev_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_ctes_of[wp]: + "threadSet (tcbSchedNext_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_ctes_of[wp]: + "threadSet (tcbSchedPrev_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedNext_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedPrev_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedNext_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedPrev_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbQueued_update_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbQueued_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbQueued_update_tcb_cte_cases) + +lemma getTCB_wp: + "\\s. \ko :: tcb. ko_at' ko p s \ Q ko s\ getObject p \Q\" + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) done -lemma rescheduleRequired_iflive'[wp]: - "\if_live_then_nonz_cap' - and (\s. \t. ksSchedulerAction s = SwitchToThread t - \ st_tcb_at' runnable' t s)\ - rescheduleRequired - \\rv. if_live_then_nonz_cap'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def) - apply (erule(1) if_live_then_nonz_capD') - apply (fastforce simp: projectKOs live'_def) +lemma tcbQueueRemove_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers and ex_nonz_cap_to' tcbPtr\ + tcbQueueRemove q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_imp_lift' getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + by (force dest: sym_heapD2[where p'=tcbPtr] sym_heapD1[where p=tcbPtr] + elim: if_live_then_nonz_capE' + simp: valid_tcb'_def opt_map_def obj_at'_def projectKOs + ko_wp_at'_def opt_tcb_at'_def live'_def) + +lemma tcbQueueRemove_ex_nonz_cap_to'[wp]: + "tcbQueueRemove q tcbPtr \ex_nonz_cap_to' tcbPtr'\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_cap_to' hoare_drop_imps getTCB_wp) + +(* We could write this one as "\t. tcbQueueHead t \ ..." instead, but we can't do the same in + tcbQueueAppend_if_live_then_nonz_cap', and it's nicer if the two lemmas are symmetric *) +lemma tcbQueuePrepend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueHead q)) s)\ + tcbQueuePrepend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueEnd q)) s)\ + tcbQueueAppend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive') + +lemma tcbQueueInsert_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcbPtr and valid_objs' and sym_heap_sched_pointers\ + tcbQueueInsert tcbPtr afterPtr + \\_. if_live_then_nonz_cap'\" + supply projectKOs[simp] + unfolding tcbQueueInsert_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' getTCB_wp) + apply (intro conjI) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ko_wp_at'_def obj_at'_def live'_def) + apply (erule if_live_then_nonz_capE') + apply (frule_tac p'=afterPtr in sym_heapD2) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def ko_wp_at'_def obj_at'_def opt_map_def live'_def) + done + +lemma tcbSchedEnqueue_iflive'[wp]: + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. if_live_then_nonz_cap'\" + supply projectKOs[simp] + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_if_live_then_nonz_cap' threadGet_wp) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def obj_at'_def live'_def) + apply clarsimp + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ko_wp_at'_def inQ_def opt_pred_def opt_map_def obj_at'_def live'_def + split: option.splits) done +crunches rescheduleRequired + for iflive'[wp]: if_live_then_nonz_cap' + lemma sts_iflive'[wp]: "\\s. if_live_then_nonz_cap' s - \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s)\ + \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s) + \ pspace_aligned' s \ pspace_distinct' s\ setThreadState st t \\rv. if_live_then_nonz_cap'\" apply (simp add: setThreadState_def setQueue_def) - apply (rule hoare_pre) - apply (wp | simp)+ - apply (rule_tac Q="\rv. if_live_then_nonz_cap'" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_iflive' | simp)+ - apply auto + apply wpsimp + apply (rule_tac Q="\rv. if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) + apply clarsimp + apply (wpsimp wp: threadSet_iflive') + apply fastforce done lemma sbn_iflive'[wp]: @@ -4052,6 +5067,19 @@ lemma setBoundNotification_vms'[wp]: apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) done +lemma threadSet_ct_not_inQ: + "(\tcb. tcbQueued tcb = tcbQueued (F tcb)) + \ threadSet F tcbPtr \\s. P (ct_not_inQ s)\" + supply projectKOs[simp] + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (erule rsubst[where P=P]) + by (fastforce simp: ct_not_inQ_def obj_at'_def objBits_simps ps_clear_def split: if_splits) + +crunches tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, tcbQueueRemove, addToBitmap + for ct_not_inQ[wp]: ct_not_inQ + (wp: threadSet_ct_not_inQ crunch_wps) + lemma tcbSchedEnqueue_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ tcbSchedEnqueue t \\_. ct_not_inQ\" @@ -4075,12 +5103,7 @@ lemma tcbSchedEnqueue_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -4107,12 +5130,7 @@ lemma tcbSchedAppend_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedAppend_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -4141,12 +5159,10 @@ lemma rescheduleRequired_sa_cnt[wp]: lemma possibleSwitchTo_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ possibleSwitchTo t \\_. ct_not_inQ\" - (is "\?PRE\ _ \_\") apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wpsimp wp: static_imp_wp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ threadGet_wp - | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ - apply (fastforce simp: obj_at'_def) + | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ done lemma threadSet_tcbState_update_ct_not_inQ[wp]: @@ -4162,7 +5178,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4182,7 +5198,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4226,29 +5242,6 @@ lemma tcbSchedDequeue_ct_not_inQ[wp]: done qed -lemma tcbSchedEnqueue_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadGet_wp | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obja) - apply fastforce - done - -lemma setThreadState_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" - apply (simp add: setThreadState_def rescheduleRequired_def) - apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st - | wpc - | rule hoare_drop_imps - | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obj) - apply fastforce - done - crunch ct_idle_or_in_cur_domain'[wp]: setQueue ct_idle_or_in_cur_domain' (simp: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) @@ -4277,17 +5270,8 @@ lemma removeFromBitmap_ct_idle_or_in_cur_domain'[wp]: | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ done -lemma tcbSchedEnqueue_ksCurDomain[wp]: - "\ \s. P (ksCurDomain s)\ tcbSchedEnqueue tptr \\_ s. P (ksCurDomain s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done - -lemma tcbSchedEnqueue_ksDomSchedule[wp]: - "\ \s. P (ksDomSchedule s)\ tcbSchedEnqueue tptr \\_ s. P (ksDomSchedule s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done +crunches tcbQueuePrepend + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' lemma tcbSchedEnqueue_ct_idle_or_in_cur_domain'[wp]: "\ct_idle_or_in_cur_domain'\ tcbSchedEnqueue tptr \\_. ct_idle_or_in_cur_domain'\" @@ -4365,12 +5349,385 @@ lemma sts_utr[wp]: apply (wp untyped_ranges_zero_lift) done +lemma removeFromBitmap_bitmapQ: + "\\\ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" + unfolding bitmapQ_defs bitmap_fun_defs + by (wpsimp simp: bitmap_fun_defs) + +lemma removeFromBitmap_valid_bitmapQ[wp]: + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans + and (\s. tcbQueueEmpty (ksReadyQueues s (d,p)))\ + removeFromBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: removeFromBitmap_valid_bitmapQ_except removeFromBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done + +crunches tcbSchedDequeue + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + (wp: crunch_wps simp: crunch_simps) + +lemma setQueue_nonempty_valid_bitmapQ': + "\\s. valid_bitmapQ s \ \ tcbQueueEmpty (ksReadyQueues s (d, p))\ + setQueue d p queue + \\_ s. \ tcbQueueEmpty queue \ valid_bitmapQ s\" + apply (wpsimp simp: setQueue_def) + apply (fastforce simp: valid_bitmapQ_def bitmapQ_def) + done + +lemma threadSet_valid_bitmapQ_except[wp]: + "threadSet f tcbPtr \valid_bitmapQ_except d p\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (clarsimp simp: valid_bitmapQ_except_def bitmapQ_def) + done + +lemma threadSet_bitmapQ: + "threadSet F t \bitmapQ domain priority\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + by (clarsimp simp: bitmapQ_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend + for valid_bitmapQ_except[wp]: "valid_bitmapQ_except d p" + and valid_bitmapQ[wp]: valid_bitmapQ + and bitmapQ[wp]: "bitmapQ tdom prio" + (wp: crunch_wps) + +lemma tcbQueued_imp_queue_nonempty: + "\list_queue_relation ts (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)) nexts prevs; + \t. t \ set ts \ (inQ (tcbDomain tcb) (tcbPriority tcb) |< tcbs_of' s) t; + ko_at' tcb tcbPtr s; tcbQueued tcb\ + \ \ tcbQueueEmpty (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" + supply projectKOs[simp] + apply (clarsimp simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce dest: heap_path_head simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + done + +lemma tcbSchedDequeue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedDequeue tcbPtr \\_. valid_bitmapQ\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + apply (wpsimp wp: setQueue_nonempty_valid_bitmapQ' hoare_vcg_conj_lift + hoare_vcg_if_lift2 hoare_vcg_const_imp_lift threadGet_wp + | wp (once) hoare_drop_imps)+ + by (fastforce dest!: tcbQueued_imp_queue_nonempty + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + +lemma tcbSchedDequeue_valid_bitmaps[wp]: + "tcbSchedDequeue tcbPtr \valid_bitmaps\" + by (wpsimp simp: valid_bitmaps_def) + +lemma setQueue_valid_bitmapQ': (* enqueue only *) + "\valid_bitmapQ_except d p and bitmapQ d p and K (\ tcbQueueEmpty q)\ + setQueue d p q + \\_. valid_bitmapQ\" + unfolding setQueue_def bitmapQ_defs + by (wpsimp simp: bitmapQ_def) + +lemma tcbSchedEnqueue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedEnqueue tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedEnqueue_def + apply (wpsimp simp: tcbQueuePrepend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp) + apply (fastforce simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def split: if_splits) + done + +crunches tcbSchedEnqueue, tcbSchedAppend + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + +lemma tcbSchedEnqueue_valid_bitmaps[wp]: + "tcbSchedEnqueue tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) + done + +crunches rescheduleRequired, threadSet, setThreadState + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +lemma tcbSchedEnqueue_valid_sched_pointers[wp]: + "tcbSchedEnqueue tcbPtr \valid_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: tcbSchedEnqueue_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueuePrepend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: valid_sched_pointers_def list_queue_relation_def) + apply (case_tac "ts = []", fastforce simp: tcbQueueEmpty_def) + by (intro conjI impI; + force dest!: hd_in_set heap_path_head + simp: inQ_def opt_pred_def opt_map_def obj_at'_def split: if_splits) + +lemma tcbSchedAppend_valid_sched_pointers[wp]: + "tcbSchedAppend tcbPtr \valid_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: tcbSchedAppend_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueueAppend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + by (intro conjI impI; + clarsimp dest: last_in_set + simp: valid_sched_pointers_def opt_map_def list_queue_relation_def tcbQueueEmpty_def + queue_end_valid_def inQ_def opt_pred_def obj_at'_def + split: if_splits option.splits; + fastforce) + +lemma tcbSchedDequeue_valid_sched_pointers[wp]: + "\valid_sched_pointers and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. valid_sched_pointers\" + supply if_split[split del] fun_upd_apply[simp del] projectKOs[simp] + apply (clarsimp simp: tcbSchedDequeue_def getQueue_def setQueue_def) + apply (wpsimp wp: threadSet_wp getTCB_wp threadGet_wp simp: tcbQueueRemove_def) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp split: if_splits) + apply (frule (1) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI) + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (clarsimp simp: valid_sched_pointers_def) + apply (case_tac "ptr = tcbPtr") + apply (force dest!: heap_ls_last_None + simp: prev_queue_head_def queue_end_valid_def inQ_def opt_map_def obj_at'_def) + apply (simp add: fun_upd_def opt_pred_def) + \ \tcbPtr is the head of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def fun_upd_apply prev_queue_head_def + inQ_def opt_pred_def opt_map_def obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is the end of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def queue_end_valid_def inQ_def opt_pred_def + opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI impI allI) + by (clarsimp simp: valid_sched_pointers_def inQ_def opt_pred_def opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits; + auto) + +lemma tcbQueueRemove_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts)\ + tcbQueueRemove q tcbPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + supply heap_path_append[simp del] + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + apply (rename_tac tcb ts) + + \ \tcbPtr is the head of q, which is not a singleton\ + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: list_queue_relation_def Let_def) + apply (prop_tac "tcbSchedNext tcb \ Some tcbPtr") + apply (fastforce dest: heap_ls_no_loops[where p=tcbPtr] simp: opt_map_def obj_at'_def) + apply (fastforce intro: sym_heap_remove_only' + simp: prev_queue_head_def opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is the end of q, which is not a singleton\ + apply (intro impI) + apply (rule conjI) + apply clarsimp + apply (prop_tac "tcbSchedPrev tcb \ Some tcbPtr") + apply (fastforce dest!: heap_ls_prev_no_loops[where p=tcbPtr] + simp: list_queue_relation_def opt_map_def obj_at'_def) + apply (subst fun_upd_swap, fastforce) + apply (fastforce intro: sym_heap_remove_only simp: opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is in the middle of q\ + apply (intro conjI impI allI) + apply (frule (2) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []") + apply (fastforce simp: list_queue_relation_def queue_end_valid_def) + apply (clarsimp simp: list_queue_relation_def) + apply (frule (3) ptr_in_middle_prev_next) + apply (frule heap_ls_distinct) + apply (rename_tac afterPtr beforePtr xs ys) + apply (frule_tac before=beforePtr and middle=tcbPtr and after=afterPtr + in sym_heap_remove_middle_from_chain) + apply (fastforce dest: last_in_set simp: opt_map_def obj_at'_def) + apply (fastforce dest: hd_in_set simp: opt_map_def obj_at'_def) + apply (rule_tac hp="tcbSchedNexts_of s" in sym_heapD2) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def split: if_splits) + done + +lemma tcbQueuePrepend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueuePrepend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + supply if_split[split del] + apply (clarsimp simp: tcbQueuePrepend_def) + apply (wpsimp wp: threadSet_wp) + apply (prop_tac "tcbPtr \ the (tcbQueueHead q)") + apply (case_tac "ts = []"; + fastforce dest: heap_path_head simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac a=tcbPtr and b="the (tcbQueueHead q)" in sym_heap_connect) + apply assumption + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def tcbQueueEmpty_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def tcbQueueEmpty_def) + done + +lemma tcbQueueInsert_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueInsert tcbPtr afterPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: tcbQueueInsert_def) + \ \forwards step in order to name beforePtr below\ + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (rule bind_wp[OF _ assert_sp]) + apply (rule hoare_ex_pre_conj[simplified conj_commute], rename_tac beforePtr) + apply (rule bind_wp[OF _ assert_sp]) + apply (wpsimp wp: threadSet_wp) + apply normalise_obj_at' + apply (prop_tac "tcbPtr \ afterPtr") + apply (clarsimp simp: list_queue_relation_def opt_map_red obj_at'_def) + apply (prop_tac "tcbPtr \ beforePtr") + apply (fastforce dest: sym_heap_None simp: opt_map_def obj_at'_def split: option.splits) + apply (prop_tac "tcbSchedNexts_of s beforePtr = Some afterPtr") + apply (fastforce intro: sym_heapD2 simp: opt_map_def obj_at'_def) + apply (fastforce dest: sym_heap_insert_into_middle_of_chain + simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def) + done + +lemma tcbQueueAppend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueAppend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + supply if_split[split del] + apply (clarsimp simp: tcbQueueAppend_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def obj_at'_def + split: if_splits) + apply fastforce + apply (drule_tac a="last ts" and b=tcbPtr in sym_heap_connect) + apply (fastforce dest: heap_ls_last_None) + apply assumption + apply (simp add: opt_map_red tcbQueueEmpty_def) + apply (subst fun_upd_swap, simp) + apply (fastforce simp: opt_map_red opt_map_upd_triv) + done + +lemma tcbQueued_update_sym_heap_sched_pointers[wp]: + "threadSet (tcbQueued_update f) tcbPtr \sym_heap_sched_pointers\" + by (rule sym_heap_sched_pointers_lift; + wpsimp wp: threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of) + +lemma tcbSchedEnqueue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedEnqueue tcbPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedAppend_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedAppend tcbPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedDequeue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + unfolding tcbSchedDequeue_def + apply (wpsimp wp: tcbQueueRemove_sym_heap_sched_pointers hoare_vcg_if_lift2 threadGet_wp + simp: bitmap_fun_defs) + apply (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def inQ_def opt_pred_def + opt_map_def obj_at'_def) + done + +crunches setThreadState + for valid_sched_pointers[wp]: valid_sched_pointers + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_valid_sched_pointers threadSet_sched_pointers) + lemma sts_invs_minor': "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set(ksReadyQueues s p)) \ runnable' st) and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) and sch_act_simple and invs'\ @@ -4379,21 +5736,21 @@ lemma sts_invs_minor': including no_pre apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp sts_valid_queues valid_irq_node_lift irqs_masked_lift - setThreadState_ct_not_inQ + apply (wp valid_irq_node_lift irqs_masked_lift + setThreadState_ct_not_inQ | simp add: cteCaps_of_def o_def)+ apply (clarsimp simp: sch_act_simple_def) apply (intro conjI) - apply clarsimp - defer - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply clarsimp + defer + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (clarsimp elim!: st_tcb_ex_cap'') + apply fastforce + apply fastforce apply (frule tcb_in_valid_state', clarsimp+) - apply (cases st, simp_all add: valid_tcb_state'_def - split: Structures_H.thread_state.split_asm) - done + by (cases st; simp add: valid_tcb_state'_def split: Structures_H.thread_state.split_asm) lemma sts_cap_to'[wp]: "\ex_nonz_cap_to' p\ setThreadState st t \\rv. ex_nonz_cap_to' p\" @@ -4438,12 +5795,59 @@ lemma threadSet_ct_running': apply wp done +lemma tcbQueuePrepend_tcbPriority_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + supply projectKOs[simp] + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueuePrepend_tcbDomain_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + supply projectKOs[simp] + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbSchedDequeue_tcbPriority[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedDequeue_tcbDomain[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedEnqueue_tcbPriority_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +lemma tcbSchedEnqueue_tcbDomain_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +crunches rescheduleRequired + for tcbPriority_obj_at'[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t'" + and tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + +lemma setThreadState_tcbPriority_obj_at'[wp]: + "setThreadState ts tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + supply projectKOs[simp] + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: obj_at'_def objBits_simps ps_clear_def) + done + lemma setThreadState_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" apply (simp add: tcb_in_cur_domain'_def) apply (rule hoare_pre) apply wps - apply (wp setThreadState_not_st | simp)+ + apply (simp add: setThreadState_def) + apply (wpsimp wp: threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps)+ done lemma asUser_global_refs': "\valid_global_refs'\ asUser t f \\rv. valid_global_refs'\" @@ -4589,10 +5993,13 @@ lemma set_eobject_corres': assumes e: "etcb_relation etcb tcb'" assumes z: "\s. obj_at' P ptr s \ map_to_ctes ((ksPSpace s) (ptr \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" - shows "corres dc (tcb_at ptr and is_etcb_at ptr) - (obj_at' (\ko. non_exst_same ko tcb') ptr - and obj_at' P ptr) - (set_eobject ptr etcb) (setObject ptr tcb')" + shows + "corres dc + (tcb_at ptr and is_etcb_at ptr) + (obj_at' (\ko. non_exst_same ko tcb') ptr and obj_at' P ptr + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcb' \ tcbPriority tcb \ tcbPriority tcb') + \ \ tcbQueued tcb) ptr) + (set_eobject ptr etcb) (setObject ptr tcb')" apply (rule corres_no_failI) apply (rule no_fail_pre) apply wp @@ -4613,20 +6020,34 @@ lemma set_eobject_corres': apply (drule(1) bspec) apply (clarsimp simp: non_exst_same_def) apply (case_tac bb; simp) - apply (clarsimp simp: obj_at'_def other_obj_relation_def cte_relation_def tcb_relation_def projectKOs split: if_split_asm)+ + apply (clarsimp simp: obj_at'_def other_obj_relation_def tcb_relation_cut_def cte_relation_def + tcb_relation_def projectKOs + split: if_split_asm)+ apply (clarsimp simp: aobj_relation_cuts_def split: ARM_A.arch_kernel_obj.splits) apply (rename_tac arch_kernel_obj obj d p ts) apply (case_tac arch_kernel_obj; simp) apply (clarsimp simp: pte_relation_def pde_relation_def is_tcb_def split: if_split_asm)+ - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (clarsimp simp: projectKOs) - apply (insert e) - apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits ARM_A.arch_kernel_obj.splits) + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: obj_at'_def) + apply (insert e) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type + split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) + apply (frule in_ready_q_tcbQueued_eq[where t=ptr]) + apply (rename_tac s' conctcb' abstcb exttcb) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def obj_at'_def projectKOs non_exst_same_def split: option.splits) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def obj_at'_def projectKOs non_exst_same_def split: option.splits) + apply (clarsimp simp: ready_queue_relation_def opt_map_def opt_pred_def obj_at'_def projectKOs + inQ_def non_exst_same_def + split: option.splits) + apply metis done lemma set_eobject_corres: @@ -4634,9 +6055,13 @@ lemma set_eobject_corres: assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" assumes r: "r () ()" - shows "corres r (tcb_at add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_eobject add etcbu) (setObject add tcbu')" + shows + "corres r + (tcb_at add and (\s. ekheap s add = Some etcb)) + (ko_at' tcb' add + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcbu' \ tcbPriority tcb \ tcbPriority tcbu') + \ \ tcbQueued tcb) add) + (set_eobject add etcbu) (setObject add tcbu')" apply (rule_tac F="non_exst_same tcb' tcbu' \ etcb_relation etcbu tcbu'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) @@ -4663,24 +6088,27 @@ lemma set_eobject_corres: lemma ethread_set_corresT: assumes x: "\tcb'. non_exst_same tcb' (f' tcb')" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (f etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (ethread_set f t) (threadSet f' t)" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation (f etcb) (f' tcb')" + shows + "corres dc + (tcb_at t and valid_etcbs) + (tcb_at' t + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain (f' tcb) + \ tcbPriority tcb \ tcbPriority (f' tcb)) + \ \ tcbQueued tcb) t) + (ethread_set f t) (threadSet f' t)" apply (simp add: ethread_set_def threadSet_def bind_assoc) apply (rule corres_guard_imp) apply (rule corres_split[OF corres_get_etcb set_eobject_corres]) apply (rule x) apply (erule e) apply (simp add: z)+ - apply wp+ + apply (wp getObject_tcb_wp)+ apply clarsimp apply (simp add: valid_etcbs_def tcb_at_st_tcb_at[symmetric]) apply (force simp: tcb_at_def get_etcb_def obj_at_def) - apply simp + apply (clarsimp simp: obj_at'_def) done lemmas ethread_set_corres = diff --git a/proof/refine/ARM_HYP/Tcb_R.thy b/proof/refine/ARM_HYP/Tcb_R.thy index 51f89b22be..422709d8c3 100644 --- a/proof/refine/ARM_HYP/Tcb_R.thy +++ b/proof/refine/ARM_HYP/Tcb_R.thy @@ -46,14 +46,14 @@ lemma activateThread_corres: apply (rule corres_split_nor[OF asUser_setNextPC_corres]) apply (rule setThreadState_corres) apply (simp | wp weak_sch_act_wf_lift_linear)+ - apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_tcb_at invs_distinct) apply fastforce apply (rule corres_guard_imp) apply (rule activateIdleThread_corres) apply (clarsimp elim!: st_tcb_weakenE) apply (clarsimp elim!: pred_tcb'_weakenE) apply (wp gts_st_tcb gts_st_tcb' gts_st_tcb_at)+ - apply (clarsimp simp: ct_in_state_def tcb_at_invs + apply (clarsimp simp: ct_in_state_def tcb_at_invs invs_distinct invs_psp_aligned elim!: st_tcb_weakenE) apply (clarsimp simp: tcb_at_invs' ct_in_state'_def elim!: pred_tcb'_weakenE) @@ -80,10 +80,10 @@ abbreviation lemma gts_st_tcb': "\tcb_at' t\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule hoare_post_imp[where Q="\rv s. \rv'. rv = rv' \ st_tcb_at' (\st. st = rv') t s"]) apply simp - apply (wp hoare_ex_wp) + apply (wp hoare_vcg_ex_lift) apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def) done @@ -98,12 +98,13 @@ lemma activate_invs': activateThread \\rv. invs' and (ct_running' or ct_idle')\" apply (simp add: activateThread_def) - apply (rule hoare_seq_ext) - apply (rule_tac B="\state s. invs' s \ sch_act_simple s - \ st_tcb_at' (\st. st = state) thread s - \ thread = ksCurThread s - \ (runnable' state \ idle' state)" in hoare_seq_ext) - apply (case_tac x, simp_all add: isTS_defs hoare_pre_cont + apply (rule bind_wp) + apply (rule_tac Q'="\state s. invs' s \ sch_act_simple s + \ st_tcb_at' (\st. st = state) thread s + \ thread = ksCurThread s + \ (runnable' state \ idle' state)" + in bind_wp) + apply (case_tac rv, simp_all add: isTS_defs hoare_pre_cont split del: if_splits cong: if_cong) apply (wp) apply (clarsimp simp: ct_in_state'_def) @@ -154,9 +155,8 @@ lemma activate_sch_act: activateThread \\rv s. P (ksSchedulerAction s)\" apply (simp add: activateThread_def getCurThread_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext[where B="\st s. (runnable' or idle') st - \ P (ksSchedulerAction s)"]) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp[where Q'="\st s. (runnable' or idle') st \ P (ksSchedulerAction s)"]) apply (rule hoare_pre) apply (wp | wpc | simp add: setThreadState_runnable_simp)+ apply (clarsimp simp: ct_in_state'_def cur_tcb'_def pred_tcb_at' @@ -197,13 +197,13 @@ lemma setupReplyMaster_weak_sch_act_wf[wp]: apply assumption done -crunches setupReplyMaster - for valid_queues[wp]: "Invariants_H.valid_queues" - and valid_queues'[wp]: "valid_queues'" +crunches setup_reply_master, Tcb_A.restart, arch_post_modify_registers + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" (wp: crunch_wps simp: crunch_simps) lemma restart_corres: - "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and ex_nonz_cap_to' t) (Tcb_A.restart t) (ThreadDecls_H.restart t)" apply (simp add: Tcb_A.restart_def Thread_H.restart_def) apply (simp add: isStopped_def2 liftM_def) @@ -212,20 +212,22 @@ lemma restart_corres: apply (clarsimp simp add: runnable_tsr idle_tsr when_def) apply (rule corres_split_nor[OF cancel_ipc_corres]) apply (rule corres_split_nor[OF setupReplyMaster_corres]) - apply (rule corres_split_nor[OF setThreadState_corres]) - apply clarsimp + apply (rule corres_split_nor[OF setThreadState_corres], simp) apply (rule corres_split[OF tcbSchedEnqueue_corres possibleSwitchTo_corres]) - apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_valid_queues sts_st_tcb' - | clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. valid_sched and cur_tcb" in hoare_strengthen_post) - apply wp - apply (simp add: valid_sched_def valid_sched_action_def) - apply (rule_tac Q="\rv. invs' and tcb_at' t" in hoare_strengthen_post) - apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def) - apply wp+ - apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at) - apply (clarsimp simp add: invs'_def valid_state'_def sch_act_wf_weak) + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | clarsimp simp: valid_tcb_state'_def | strengthen valid_objs'_valid_tcbs')+ + apply (rule_tac Q="\rv. valid_sched and cur_tcb and pspace_aligned and pspace_distinct" + in hoare_strengthen_post) + apply wp + apply (fastforce simp: valid_sched_def valid_sched_action_def) + apply (rule_tac Q="\rv. invs' and ex_nonz_cap_to' t" in hoare_strengthen_post) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def + valid_tcb_state'_def) + apply wp+ + apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at invs_psp_aligned invs_distinct) + apply clarsimp done lemma restart_invs': @@ -304,12 +306,6 @@ crunch sch_act_simple [wp]: asUser "sch_act_simple" crunch invs'[wp]: getSanitiseRegisterInfo invs' -lemma invs_valid_queues': - "invs' s \ valid_queues' s" - by (clarsimp simp:invs'_def valid_state'_def) - -declare invs_valid_queues'[rule_format, elim!] - lemma einvs_valid_etcbs: "einvs s \ valid_etcbs s" by (clarsimp simp: valid_sched_def) @@ -322,6 +318,11 @@ lemma asUser_postModifyRegisters_corres: apply (rule corres_stateAssert_assume) by simp+ +crunches restart + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_sched_pointers threadSet_valid_sched_pointers) + lemma invokeTCB_WriteRegisters_corres: "corres (dc \ (=)) (einvs and tcb_at dest and ex_nonz_cap_to dest) (invs' and sch_act_simple and tcb_at' dest and ex_nonz_cap_to' dest) @@ -347,17 +348,21 @@ lemma invokeTCB_WriteRegisters_corres: apply (rule_tac P=\ and P'=\ in corres_inst) apply simp apply (wp+)[2] - apply ((wp static_imp_wp restart_invs' - | strengthen valid_sched_weak_strg einvs_valid_etcbs - invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def - dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] + apply ((wp hoare_weak_lift_imp restart_invs' + | strengthen valid_sched_weak_strg einvs_valid_etcbs + invs_weak_sch_act_wf + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues valid_objs'_valid_tcbs' invs_valid_objs' + | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def + dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] apply (rule_tac Q="\_. einvs and tcb_at dest and ex_nonz_cap_to dest" in hoare_strengthen_post[rotated]) apply (fastforce simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def dest!: idle_no_ex_cap) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest and ex_nonz_cap_to' dest" in hoare_strengthen_post[rotated]) apply (fastforce simp: sch_act_wf_weak invs'_def valid_state'_def dest!: global'_no_ex_cap) apply (wpsimp simp: getSanitiseRegisterInfo_def)+ + apply fastforce + apply fastforce done crunch it[wp]: suspend "\s. P (ksIdleThread s)" @@ -382,6 +387,10 @@ lemma suspend_ResumeCurrentThread_imp_notct[wp]: \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" by (wpsimp simp: suspend_def) +crunches restart, suspend + for cur_tcb'[wp]: cur_tcb' + (wp: crunch_wps threadSet_cur ignore: threadSet) + lemma invokeTCB_CopyRegisters_corres: "corres (dc \ (=)) (einvs and simple_sched_action and tcb_at dest and tcb_at src and ex_nonz_cap_to src and @@ -409,7 +418,7 @@ proof - apply (rule corres_modify') apply simp apply simp - apply (simp | wp)+ + apply (simp add: invs_distinct invs_psp_aligned | wp)+ done have R: "\src src' des des' xs ys. \ src = src'; des = des'; xs = ys \ \ corres dc (tcb_at src and tcb_at des and invs) @@ -432,7 +441,7 @@ proof - apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) apply (rule asUser_setNextPC_corres) apply wp+ - apply simp+ + apply (simp add: invs_distinct invs_psp_aligned)+ done show ?thesis apply (simp add: invokeTCB_def performTransfer_def) @@ -446,7 +455,7 @@ proof - apply (simp add: frame_registers_def frameRegisters_def) apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) apply (rule Q[OF refl refl]) - apply (wp mapM_x_wp' static_imp_wp | simp)+ + apply (wp mapM_x_wp' hoare_weak_lift_imp | simp)+ apply (rule corres_split_nor) apply (rule corres_when[OF refl]) apply (rule R[OF refl refl]) @@ -456,15 +465,15 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply (solves \wp static_imp_wp\)+ + apply (solves \wp hoare_weak_lift_imp\)+ apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_strengthen_post[rotated]) - apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def) + apply (fastforce simp: invs_def valid_state_def valid_pspace_def valid_sched_weak_strg valid_sched_def) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_strengthen_post[rotated]) - apply (clarsimp simp: invs'_def valid_state'_def invs_weak_sch_act_wf) - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[4] - apply ((wp static_imp_wp restart_invs' | wpc | clarsimp simp add: if_apply_def2)+)[2] - apply (wp suspend_nonz_cap_to_tcb static_imp_wp | simp add: if_apply_def2)+ + apply (fastforce simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) + apply ((wp mapM_x_wp' hoare_weak_lift_imp | (simp add: cur_tcb'_def[symmetric])+)+)[8] + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | clarsimp simp add: if_apply_def2)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp | simp add: if_apply_def2)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def dest!: idle_no_ex_cap) apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) @@ -509,41 +518,10 @@ lemma copyreg_invs': \\rv. invs'\" by (rule hoare_strengthen_post, rule copyreg_invs'', simp) -lemma threadSet_valid_queues_no_state: - "\Invariants_H.valid_queues and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. Invariants_H.valid_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def) - done - -lemma threadSet_valid_queues'_no_state: - "(\tcb. tcbQueued tcb = tcbQueued (f tcb)) - \ \valid_queues' and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs inQ_def split: if_split_asm) - done - lemma isRunnable_corres: - "corres (\ts runn. runnable ts = runn) (tcb_at t) (tcb_at' t) - (get_thread_state t) (isRunnable t)" + "corres (\ts runn. runnable ts = runn) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_thread_state t) (isRunnable t)" apply (simp add: isRunnable_def) apply (subst bind_return[symmetric]) apply (rule corres_guard_imp) @@ -564,16 +542,6 @@ lemma tcbSchedDequeue_not_queued: apply (wp tg_sp' [where P=\, simplified] | simp)+ done -lemma tcbSchedDequeue_not_in_queue: - "\p. \Invariants_H.valid_queues and tcb_at' t and valid_objs'\ tcbSchedDequeue t - \\rv s. t \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv. Invariants_H.valid_queues and obj_at' (Not \ tcbQueued) t" - in hoare_post_imp) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def projectKOs inQ_def) - apply (wp tcbSchedDequeue_not_queued tcbSchedDequeue_valid_queues | - simp add: valid_objs'_maxDomain valid_objs'_maxPriority)+ - done - lemma threadSet_ct_in_state': "(\tcb. tcbState (f tcb) = tcbState tcb) \ \ct_in_state' test\ threadSet f t \\rv. ct_in_state' test\" @@ -609,22 +577,27 @@ lemma threadSet_valid_objs_tcbPriority_update: apply (fastforce simp: obj_at'_def)+ done -lemma tcbSchedDequeue_ct_in_state': - "\ct_in_state' test\ tcbSchedDequeue t \\rv. ct_in_state' test\" +lemma tcbSchedDequeue_ct_in_state'[wp]: + "tcbSchedDequeue t \ct_in_state' test\" apply (simp add: ct_in_state'_def) apply (rule hoare_lift_Pf[where f=ksCurThread]; wpsimp) done crunch cur[wp]: tcbSchedDequeue cur_tcb' +crunches tcbSchedDequeue + for st_tcb_at'[wp]: "\s. P (st_tcb_at' st tcbPtr s)" + lemma sp_corres2: - "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb) - (Invariants_H.valid_queues and valid_queues' and cur_tcb' and tcb_at' t - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and tcb_at t + and valid_queues and pspace_aligned and pspace_distinct) + (tcb_at' t and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and (\_. x \ maxPriority) and sym_heap_sched_pointers and valid_sched_pointers) + (set_priority t x) (setPriority t x)" apply (simp add: setPriority_def set_priority_def thread_set_priority_def) apply (rule stronger_corres_guard_imp) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split[OF ethread_set_corres], simp_all)[1] apply (simp add: etcb_relation_def) apply (rule corres_split[OF isRunnable_corres]) @@ -633,56 +606,45 @@ lemma sp_corres2: apply (wp corres_if; clarsimp) apply (rule rescheduleRequired_corres) apply (rule possibleSwitchTo_corres) - apply wp - apply wp - apply clarsimp - apply (wp static_imp_wp hoare_vcg_if_lift hoare_wp_combs gts_wp) - apply clarsimp - apply (wp hoare_vcg_if_lift static_imp_wp hoare_wp_combs isRunnable_wp) - apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift) - apply clarsimp - apply (wp hoare_drop_imps) - apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift - isRunnable_wp threadSet_pred_tcb_no_state threadSet_valid_queues_no_state - threadSet_valid_queues'_no_state threadSet_valid_objs_tcbPriority_update - threadSet_weak_sch_act_wf threadSet_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[1] - apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift - isRunnable_wp threadSet_pred_tcb_no_state threadSet_valid_queues_no_state - threadSet_valid_queues'_no_state threadSet_cur threadSet_valid_objs_tcbPriority_update - threadSet_weak_sch_act_wf threadSet_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[1] - apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift - isRunnable_wp threadSet_pred_tcb_no_state threadSet_valid_queues_no_state - threadSet_cur threadSet_valid_queues'_no_state threadSet_valid_objs_tcbPriority_update - threadSet_weak_sch_act_wf threadSet_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[1] - apply clarsimp - apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift hoare_vcg_disj_lift - tcbSchedDequeue_not_in_queue - tcbSchedDequeue_valid_queues - tcbSchedDequeue_ct_in_state'[simplified ct_in_state'_def] | simp add: etcb_relation_def)+)[1] - apply clarsimp + apply ((clarsimp + | wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp + isRunnable_wp)+)[4] + apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift + ethread_set_not_queued_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+ + apply ((wp hoare_vcg_imp_lift' hoare_vcg_all_lift + isRunnable_wp threadSet_pred_tcb_no_state + threadSet_valid_objs_tcbPriority_update threadSet_sched_pointers + threadSet_valid_sched_pointers tcb_dequeue_not_queued tcbSchedDequeue_not_queued + threadSet_weak_sch_act_wf + | simp add: etcb_relation_def + | strengthen valid_objs'_valid_tcbs' + obj_at'_weakenE[where P="Not \ tcbQueued"] + | wps)+) apply (force simp: valid_etcbs_def tcb_at_st_tcb_at[symmetric] state_relation_def dest: pspace_relation_tcb_at intro: st_tcb_at_opeqI) - apply (force simp: state_relation_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) + apply clarsimp done -lemma setPriority_corres: "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" - apply (rule corres_guard_imp) +lemma setPriority_corres: + "corres dc + (einvs and tcb_at t) + (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) + (set_priority t x) (setPriority t x)" + apply (rule corres_guard_imp) apply (rule sp_corres2) - apply (clarsimp simp: valid_sched_def valid_sched_action_def) + apply (simp add: valid_sched_def valid_sched_action_def invs_psp_aligned invs_distinct invs_def) apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak) done -lemma setMCPriority_corres: "corres dc (tcb_at t) (tcb_at' t) - (set_mcpriority t x) (setMCPriority t x)" +lemma setMCPriority_corres: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_mcpriority t x) (setMCPriority t x)" apply (rule corres_guard_imp) apply (clarsimp simp: setMCPriority_def set_mcpriority_def) apply (rule threadset_corresT) - by (clarsimp simp: tcb_relation_def tcb_cap_cases_tcb_mcpriority - tcb_cte_cases_def exst_same_def)+ + by (clarsimp simp: tcb_relation_def tcb_cap_cases_tcb_mcpriority + tcb_cte_cases_def cteSizeBits_def exst_same_def)+ definition "out_rel fn fn' v v' \ @@ -694,100 +656,26 @@ definition lemma out_corresT: assumes x: "\tcb v. \(getF, setF)\ran tcb_cap_cases. getF (fn v tcb) = getF tcb" assumes y: "\v. \tcb. \(getF, setF)\ran tcb_cte_cases. getF (fn' v tcb) = getF tcb" + assumes sched_pointers: "\tcb v. tcbSchedPrev (fn' v tcb) = tcbSchedPrev tcb" + "\tcb v. tcbSchedNext (fn' v tcb) = tcbSchedNext tcb" + assumes flag: "\tcb v. tcbQueued (fn' v tcb) = tcbQueued tcb" assumes e: "\tcb v. exst_same tcb (fn' v tcb)" shows "out_rel fn fn' v v' \ - corres dc (tcb_at t) - (tcb_at' t) + corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t fn v) (case_option (return ()) (\x. threadSet (fn' x) t) v')" - apply (case_tac v, simp_all add: out_rel_def - option_update_thread_def) - apply clarsimp - apply (clarsimp simp add: threadset_corresT [OF _ x y e]) + apply (case_tac v, simp_all add: out_rel_def option_update_thread_def) + apply (clarsimp simp: threadset_corresT [OF _ x y sched_pointers flag e]) done lemmas out_corres = out_corresT [OF _ all_tcbI, OF ball_tcb_cap_casesI ball_tcb_cte_casesI] -crunch sch_act[wp]: tcbSchedEnqueue "\s. sch_act_wf (ksSchedulerAction s) s" - (simp: unless_def) - -crunch vq'[wp]: getCurThread valid_queues' - lemma tcbSchedDequeue_sch_act_simple[wp]: "tcbSchedDequeue t \sch_act_simple\" by (wpsimp simp: sch_act_simple_def) -lemma setP_vq[wp]: - "\\s. Invariants_H.valid_queues s \ tcb_at' t s \ sch_act_wf (ksSchedulerAction s) s \ valid_objs' s \ p \ maxPriority\ - setPriority t p - \\rv. Invariants_H.valid_queues\" - apply (simp add: setPriority_def) - apply (wpsimp ) - apply (wp hoare_vcg_imp_lift') - unfolding st_tcb_at'_def - apply (strengthen not_obj_at'_strengthen) - apply (wp hoare_wp_combs) - apply (wp hoare_vcg_imp_lift') - apply (wp threadSet_valid_queues threadSet_valid_objs_tcbPriority_update) - apply(wp threadSet_weak_sch_act_wf) - apply clarsimp - apply clarsimp - apply (wp hoare_vcg_imp_lift') - apply (wp threadSet_valid_queues threadSet_valid_objs_tcbPriority_update threadSet_sch_act, clarsimp) - apply (wp add:threadSet_valid_queues comb:hoare_drop_imps ) - apply (clarsimp simp: eq_commute[where a=t]) - apply (wp add: threadSet_valid_queues threadSet_valid_objs_tcbPriority_update threadSet_weak_sch_act_wf - hoare_vcg_imp_lift'[where P="\_ s. ksCurThread s \ _"] hoare_drop_imps hoare_vcg_all_lift - tcbSchedDequeue_not_in_queue tcbSchedEnqueue_valid_objs' tcbSchedDequeue_valid_queues - | clarsimp simp: valid_objs'_maxDomain valid_objs'_maxPriority)+ - done - -lemma valid_queues_subsetE': - "\ valid_queues' s; ksPSpace s = ksPSpace s'; - \x. set (ksReadyQueues s x) \ set (ksReadyQueues s' x) \ - \ valid_queues' s'" - by (simp add: valid_queues'_def obj_at'_def - ps_clear_def subset_iff projectKOs) - -crunch vq'[wp]: getCurThread valid_queues' - -lemma setP_vq'[wp]: - "\\s. valid_queues' s \ tcb_at' t s \ sch_act_wf (ksSchedulerAction s) s \ p \ maxPriority\ - setPriority t p - \\rv. valid_queues'\" - apply (simp add: setPriority_def) - apply (wpsimp wp: threadSet_valid_queues' hoare_drop_imps - threadSet_weak_sch_act_wf threadSet_sch_act) - apply (rule_tac Q="\_ s. valid_queues' s \ obj_at' (Not \ tcbQueued) t s \ sch_act_wf (ksSchedulerAction s) s - \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_strengthen_post, - wp tcbSchedDequeue_valid_queues' tcbSchedDequeue_not_queued) - apply (clarsimp simp: inQ_def) - apply normalise_obj_at' - apply clarsimp - done - -lemma setQueue_invs_bits[wp]: - "\valid_pspace'\ setQueue d p q \\rv. valid_pspace'\" - "\\s. sch_act_wf (ksSchedulerAction s) s\ setQueue d p q \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\\s. sym_refs (state_refs_of' s)\ setQueue d p q \\rv s. sym_refs (state_refs_of' s)\" - "\\s. sym_hyp_refs (state_refs_of' s)\ setQueue d p q \\rv s. sym_hyp_refs (state_refs_of' s)\" - "\if_live_then_nonz_cap'\ setQueue d p q \\rv. if_live_then_nonz_cap'\" - "\if_unsafe_then_cap'\ setQueue d p q \\rv. if_unsafe_then_cap'\" - "\cur_tcb'\ setQueue d p q \\rv. cur_tcb'\" - "\valid_global_refs'\ setQueue d p q \\rv. valid_global_refs'\" - "\valid_irq_handlers'\ setQueue d p q \\rv. valid_irq_handlers'\" - by (simp add: setQueue_def tcb_in_cur_domain'_def - | wp sch_act_wf_lift cur_tcb_lift - | fastforce)+ - -lemma setQueue_ex_idle_cap[wp]: - "\\s. ex_nonz_cap_to' (ksIdleThread s) s\ - setQueue d p q - \\rv s. ex_nonz_cap_to' (ksIdleThread s) s\" - by (simp add: setQueue_def, wp, - simp add: ex_nonz_cap_to'_def cte_wp_at_pspaceI) - lemma tcbPriority_caps_safe: "\tcb. \x\ran tcb_cte_cases. (\(getF, setF). getF (tcbPriority_update f tcb) = getF tcb) x" by (rule all_tcbI, rule ball_tcb_cte_casesI, simp+) @@ -796,22 +684,41 @@ lemma tcbPriority_Queued_caps_safe: "\tcb. \x\ran tcb_cte_cases. (\(getF, setF). getF (tcbPriority_update f (tcbQueued_update g tcb)) = getF tcb) x" by (rule all_tcbI, rule ball_tcb_cte_casesI, simp+) +lemma tcbSchedNext_update_tcb_cte_cases: + "(a, b) \ ran tcb_cte_cases \ a (tcbPriority_update f tcb) = a tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma threadSet_priority_invs': + "\invs' and tcb_at' t and K (p \ maxPriority)\ + threadSet (tcbPriority_update (\_. p)) t + \\_. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace' + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + | clarsimp simp: cteCaps_of_def tcbSchedNext_update_tcb_cte_cases | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) + lemma setP_invs': "\invs' and tcb_at' t and K (p \ maxPriority)\ setPriority t p \\rv. invs'\" - apply (rule hoare_gen_asm) - apply (simp add: setPriority_def) - apply (wp rescheduleRequired_all_invs_but_ct_not_inQ valid_irq_node_lift - | simp add: if_apply_def2)+ - apply (rule hoare_strengthen_post, rule threadSet_invs_trivial, simp+) - apply (clarsimp simp: invs'_def valid_state'_def invs_valid_objs' elim!: st_tcb_ex_cap'') - apply auto[1] - apply (rule_tac Q="\_. invs' and obj_at' (Not \ tcbQueued) t - and (\s. \d p. t \ set (ksReadyQueues s (d,p)))" - in hoare_post_imp) - apply (clarsimp dest: obj_at_ko_at' simp: obj_at'_def inQ_def) - apply (wp tcbSchedDequeue_not_queued)+ - apply (clarsimp)+ - done + unfolding setPriority_def + by (wpsimp wp: rescheduleRequired_invs' threadSet_priority_invs') crunches setPriority, setMCPriority for typ_at'[wp]: "\s. P (typ_at' T p s)" @@ -1105,11 +1012,6 @@ lemma setMCPriority_valid_objs'[wp]: crunch sch_act_simple[wp]: setMCPriority sch_act_simple (wp: ssa_sch_act_simple crunch_wps rule: sch_act_simple_lift simp: crunch_simps) -(* For some reason, when this was embedded in a larger expression clarsimp wouldn't remove it. Adding it as a simp rule does *) -lemma inQ_tc_corres_helper: - "(\d p. (\tcb. tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d \ (tcbQueued tcb \ tcbDomain tcb \ d)) \ a \ set (ksReadyQueues s (d, p))) = True" - by clarsimp - abbreviation "valid_option_prio \ case_option True (\(p, auth). p \ maxPriority)" definition valid_tcb_invocation :: "tcbinvocation \ bool" where @@ -1117,107 +1019,95 @@ definition valid_tcb_invocation :: "tcbinvocation \ bool" where ThreadControl _ _ _ mcp p _ _ _ \ valid_option_prio p \ valid_option_prio mcp | _ \ True" -lemma threadcontrol_corres_helper1: +lemma thread_set_ipc_weak_valid_sched_action: "\ einvs and simple_sched_action\ - thread_set (tcb_ipc_buffer_update f) a - \\x. weak_valid_sched_action and valid_etcbs\" + thread_set (tcb_ipc_buffer_update f) a + \\x. weak_valid_sched_action\" apply (rule hoare_pre) apply (simp add: thread_set_def) - apply (wpsimp wp: set_object_wp) + apply (wp set_object_wp) apply (simp | intro impI | elim exE conjE)+ apply (frule get_tcb_SomeD) apply (erule ssubst) apply (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) - apply (erule_tac x=a in allE)+ - apply (clarsimp simp: is_tcb_def) done -lemma threadcontrol_corres_helper2: - "is_aligned a msg_align_bits \ \invs' and tcb_at' t\ - threadSet (tcbIPCBuffer_update (\_. a)) t - \\x s. Invariants_H.valid_queues s \ valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\" - by (wp threadSet_invs_trivial - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: inQ_def )+ - lemma threadcontrol_corres_helper3: - "\ einvs and simple_sched_action\ - check_cap_at aaa (ab, ba) (check_cap_at (cap.ThreadCap a) slot (cap_insert aaa (ab, ba) (a, tcb_cnode_index 4))) - \\x. weak_valid_sched_action and valid_etcbs \" - apply (rule hoare_pre) - apply (wp check_cap_inv | simp add:)+ - by (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def - get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + "\einvs and simple_sched_action\ + check_cap_at cap p (check_cap_at (cap.ThreadCap cap') slot (cap_insert cap p (t, tcb_cnode_index 4))) + \\_ s. weak_valid_sched_action s \ in_correct_ready_q s \ ready_qs_distinct s \ valid_etcbs s + \ pspace_aligned s \ pspace_distinct s\" + apply (wpsimp + | strengthen valid_sched_valid_queues valid_queues_in_correct_ready_q + valid_sched_weak_strg[rule_format] valid_queues_ready_qs_distinct)+ + apply (wpsimp wp: check_cap_inv) + apply (fastforce simp: valid_sched_def) + done lemma threadcontrol_corres_helper4: "isArchObjectCap ac \ - \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) and valid_cap' ac \ - checkCapAt ac (cte_map (ab, ba)) - (checkCapAt (capability.ThreadCap a) (cte_map slot) - (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) - \\x. Invariants_H.valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\" - apply (wp - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: )+ + \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) + and valid_cap' ac\ + checkCapAt ac (cte_map (ab, ba)) + (checkCapAt (capability.ThreadCap a) (cte_map slot) + (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) + \\_ s. sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_tcbs' s\" + apply (wpsimp wp: + | strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + invs_valid_objs' valid_objs'_valid_tcbs')+ by (case_tac ac; - clarsimp simp: capBadge_def isArchObjectCap_def isNotificationCap_def isEndpointCap_def - isReplyCap_def isIRQControlCap_def tcb_cnode_index_def cte_map_def cte_wp_at'_def + clarsimp simp: capBadge_def isCap_simps tcb_cnode_index_def cte_map_def cte_wp_at'_def cte_level_bits_def) +crunches cap_delete + for pspace_alinged[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (simp: crunch_simps preemption_point_def wp: crunch_wps OR_choiceE_weak_wp) + +lemmas check_cap_pspace_aligned[wp] = check_cap_inv[of pspace_aligned] +lemmas check_cap_pspace_distinct[wp] = check_cap_inv[of pspace_distinct] + lemma threadSet_invs_trivialT2: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" - assumes r: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" shows - "\\s. invs' s - \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits) - \ tcb_at' t s - \ (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) - \ (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) - \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) - \ (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (rule hoare_gen_asm [where P="(\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)"]) - apply (wp x v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_state_hyp_refs_of' - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a r domains cteCaps_of_def valid_arch_tcb'_def|rule refl)+ - apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)\ + threadSet F t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P="\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits"]) + apply (wp threadSet_valid_pspace'T + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_global_refsT + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_valid_dom_schedule' + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_idle'T + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + | clarsimp simp: assms cteCaps_of_def valid_arch_tcb'_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) lemma getThreadBufferSlot_dom_tcb_cte_cases: "\\\ getThreadBufferSlot a \\rv s. rv \ (+) a ` dom tcb_cte_cases\" @@ -1228,10 +1118,6 @@ lemma tcb_at'_cteInsert[wp]: "\\s. tcb_at' (ksCurThread s) s\ cteInsert t x y \\_ s. tcb_at' (ksCurThread s) s\" by (rule hoare_weaken_pre, wps cteInsert_ct, wp, simp) -lemma tcb_at'_asUser[wp]: - "\\s. tcb_at' (ksCurThread s) s\ asUser a (setTCBIPCBuffer b) \\_ s. tcb_at' (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps asUser_typ_ats(1), wp, simp) - lemma tcb_at'_threadSet[wp]: "\\s. tcb_at' (ksCurThread s) s\ threadSet (tcbIPCBuffer_update (\_. b)) a \\_ s. tcb_at' (ksCurThread s) s\" by (rule hoare_weaken_pre, wps threadSet_tcb', wp, simp) @@ -1245,6 +1131,16 @@ lemma valid_tcb_ipc_buffer_update: \ (\tcb. valid_tcb' tcb s \ valid_tcb' (tcbIPCBuffer_update (\_. buf) tcb) s)" by (simp add: valid_tcb'_def tcb_cte_cases_def) +crunches option_update_thread + for aligned[wp]: pspace_aligned + and distinct[wp]: pspace_distinct + +lemma threadSet_invs_tcbIPCBuffer_update: + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (tcbIPCBuffer_update f tcb)) msg_align_bits)\ + threadSet (tcbIPCBuffer_update f) t + \\_. invs'\" + by (wp threadSet_invs_trivialT2; simp add: tcb_cte_cases_def cteSizeBits_def) + lemma transferCaps_corres: assumes x: "newroot_rel e e'" assumes y: "newroot_rel f f'" @@ -1287,8 +1183,8 @@ lemma transferCaps_corres: (invokeTCB (tcbinvocation.ThreadControl a sl' b' mcp_auth p_auth e' f' g'))" proof - have P: "\t v. corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t (tcb_fault_handler_update o (%x _. x)) (option_map to_bl v)) (case v of None \ return () @@ -1298,8 +1194,8 @@ proof - apply (safe, case_tac tcb', simp add: tcb_relation_def split: option.split) done have R: "\t v. corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t (tcb_ipc_buffer_update o (%x _. x)) v) (case v of None \ return () | Some x \ threadSet (tcbIPCBuffer_update (%_. x)) t)" @@ -1312,7 +1208,7 @@ proof - (case_option (return ()) (\p'. setPriority t (fst p')) p_auth)" apply (case_tac p_auth; clarsimp simp: setPriority_corres) done - have S': "\t x. corres dc (tcb_at t) (tcb_at' t) + have S': "\t x. corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ (case_option (return ()) (\(mcp, auth). set_mcpriority t mcp) mcp_auth) (case_option (return ()) (\mcp'. setMCPriority t (fst mcp')) mcp_auth)" apply(case_tac mcp_auth; clarsimp simp: setMCPriority_corres) @@ -1436,10 +1332,20 @@ proof - apply (rule corres_split[OF getCurThread_corres], clarsimp) apply (rule corres_when[OF refl rescheduleRequired_corres]) apply (wpsimp wp: gct_wp)+ - apply (wp hoare_drop_imp) - apply (rule threadcontrol_corres_helper1[unfolded pred_conj_def]) - apply (wp hoare_drop_imp) - apply (wp threadcontrol_corres_helper2 | wpc | simp)+ + apply (strengthen valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_ipc_weak_valid_sched_action thread_set_valid_queues + hoare_drop_imp) + apply clarsimp + apply (strengthen valid_objs'_valid_tcbs' invs_valid_objs')+ + apply (wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers hoare_drop_imp + threadSet_invs_tcbIPCBuffer_update) + apply (clarsimp simp: pred_conj_def) + apply (strengthen einvs_valid_etcbs valid_queues_in_correct_ready_q + valid_sched_valid_queues invs_psp_aligned invs_distinct)+ + apply wp + apply (clarsimp simp: pred_conj_def) + apply (strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + valid_objs'_valid_tcbs' invs_valid_objs') apply (wpsimp wp: cteDelete_invs' hoare_vcg_conj_lift) apply (fastforce simp: emptyable_def) apply fastforce @@ -1467,10 +1373,10 @@ proof - cap_delete_valid_cap cteDelete_deletes cteDelete_invs' | strengthen use_no_cap_to_obj_asid_strg - | clarsimp simp: inQ_def inQ_tc_corres_helper)+ + | clarsimp simp: inQ_def)+ apply (clarsimp simp: cte_wp_at_caps_of_state dest!: is_cnode_or_valid_arch_cap_asid) - apply (clarsimp simp: emptyable_def) + apply (fastforce simp: emptyable_def) apply (clarsimp simp: inQ_def) apply (clarsimp simp: obj_at_def is_tcb) apply (rule cte_wp_at_tcbI, simp, fastforce, simp) @@ -1523,6 +1429,10 @@ proof - out_no_cap_to_trivial [OF ball_tcb_cap_casesI] checked_insert_no_cap_to note if_cong [cong] option.case_cong [cong] + \ \This proof is quite fragile and was written when bind_wp was added to the wp set later + in the theory dependencies, and so was matched with before alternatives. We re-add it here to + create a similar environment and avoid needing to rework the proof.\ + note bind_wp[wp] show ?thesis apply (simp add: invokeTCB_def liftE_bindE) apply (simp only: eq_commute[where a= "a"]) @@ -1538,41 +1448,26 @@ proof - apply wp apply wp apply (wpsimp wp: hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift as_user_invs cap_delete_deletes - thread_set_ipc_tcb_cap_valid thread_set_tcb_ipc_buffer_cap_cleared_invs - thread_set_cte_wp_at_trivial thread_set_valid_cap cap_delete_valid_cap - reschedule_preserves_valid_sched thread_set_not_state_valid_sched + hoare_vcg_all_liftE_R hoare_vcg_all_lift + as_user_invs thread_set_ipc_tcb_cap_valid + thread_set_tcb_ipc_buffer_cap_cleared_invs + thread_set_cte_wp_at_trivial + thread_set_valid_cap + reschedule_preserves_valid_sched check_cap_inv[where P=valid_sched] (* from stuff *) check_cap_inv[where P="tcb_at p0" for p0] - simp: ran_tcb_cap_cases) + thread_set_not_state_valid_sched + check_cap_inv[where P=simple_sched_action] + cap_delete_deletes hoare_drop_imps + cap_delete_valid_cap + simp: ran_tcb_cap_cases + | strengthen simple_sched_action_sched_act_not)+ apply (strengthen use_no_cap_to_obj_asid_strg) apply (wpsimp wp: cap_delete_cte_at cap_delete_valid_cap) - apply (wpsimp wp: hoare_drop_imps) - apply ((wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_imp_lift' hoare_vcg_all_lift - threadSet_cte_wp_at' threadSet_invs_trivialT2 cteDelete_invs' - simp: tcb_cte_cases_def), (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cte_wp_at' - simp: tcb_cte_cases_def) - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cap_to' threadSet_invs_trivialT2 - threadSet_cte_wp_at' hoare_drop_imps - simp: tcb_cte_cases_def) - apply (clarsimp) - apply ((wpsimp wp: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + threadSet_invs_tcbIPCBuffer_update threadSet_cte_wp_at' + | strengthen simple_sched_action_sched_act_not)+ + apply ((wpsimp wp: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift threadSet_valid_objs' thread_set_not_state_valid_sched thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial @@ -1584,14 +1479,14 @@ proof - | strengthen tcb_cap_always_valid_strg tcb_at_invs use_no_cap_to_obj_asid_strg - | (erule exE, clarsimp simp: word_bits_def))+) + | (erule exE, clarsimp simp: word_bits_def) | wp (once) hoare_drop_imps)+) apply (strengthen valid_tcb_ipc_buffer_update) - apply (strengthen invs_valid_objs')+ + apply (strengthen invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct') apply (wpsimp wp: cteDelete_invs' hoare_vcg_imp_lift' hoare_vcg_all_lift) apply wpsimp apply wpsimp apply (clarsimp cong: imp_cong conj_cong simp: emptyable_def) - apply (rule_tac Q'="\_. ?T2_pre" in hoare_post_imp_R[simplified validE_R_def, rotated]) + apply (rule_tac Q'="\_. ?T2_pre" in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) (* beginning to deal with is_nondevice_page_cap *) apply (clarsimp simp: emptyable_def is_nondevice_page_cap_simps is_cap_simps is_cnode_or_valid_arch_def obj_ref_none_no_asid cap_asid_def arch_cap_fun_lift_def @@ -1606,9 +1501,9 @@ proof - | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg)+)[1] apply (clarsimp cong: imp_cong conj_cong) apply (rule_tac Q'="\_. ?T2_pre' and (\s. valid_option_prio p_auth)" - in hoare_post_imp_R[simplified validE_R_def, rotated]) + in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) apply (case_tac g'; clarsimp simp: isCap_simps ; clarsimp elim: invs_valid_objs' cong:imp_cong) - apply (wp add: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wp add: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift setMCPriority_invs' threadSet_valid_objs' thread_set_not_state_valid_sched setP_invs' typ_at_lifts [OF setPriority_typ_at'] @@ -1619,13 +1514,14 @@ proof - emptyable_def | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg - | wp (once) add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs + | wp add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs | (erule exE, clarsimp simp: word_bits_def))+ (* the last two subgoals *) apply (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] tcb_at_st_tcb_at[symmetric] tcb_cap_valid_def is_cnode_or_valid_arch_def invs_valid_objs emptyable_def obj_ref_none_no_asid no_cap_to_obj_with_diff_ref_Null is_valid_vtable_root_def is_cap_simps cap_asid_def vs_cap_ref_def arch_cap_fun_lift_def + invs_psp_aligned invs_distinct cong: conj_cong imp_cong split: option.split_asm) by (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def objBits_defs @@ -1673,31 +1569,31 @@ lemma tc_invs': apply (simp only: eq_commute[where a="a"]) apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp setMCPriority_invs' + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] - apply (wp add: setP_invs' static_imp_wp hoare_vcg_all_lift)+ + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) apply clarsimp apply wpfix apply assumption apply (rule case_option_wp_None_returnOk) - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ - apply (wpsimp wp: static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) - | wpsimp wp: static_imp_wp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] - hoare_vcg_const_imp_lift_R assertDerived_wp_weak static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs tcbIPCBufferSlot_def) @@ -1714,7 +1610,7 @@ lemma setSchedulerAction_invs'[wp]: apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def valid_irq_node'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs cur_tcb'_def + valid_queues_def bitmapQ_defs cur_tcb'_def ct_not_inQ_def) apply (simp add: ct_idle_or_in_cur_domain'_def) done @@ -1845,8 +1741,8 @@ lemma invokeTCB_corres: apply (rule TcbAcc_R.rescheduleRequired_corres) apply (rule corres_trivial, simp) apply (wpsimp wp: hoare_drop_imp)+ - apply (clarsimp simp: valid_sched_weak_strg einvs_valid_etcbs) - apply (clarsimp simp: Tcb_R.invs_valid_queues' Invariants_H.invs_queues) + apply (fastforce dest: valid_sched_valid_queues simp: valid_sched_weak_strg einvs_valid_etcbs) + apply fastforce done lemma tcbBoundNotification_caps_safe[simp]: @@ -1861,6 +1757,10 @@ lemma valid_bound_ntfn_lift: apply (wp typ_at_lifts[OF P])+ done +crunches setBoundNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (ignore: threadSet wp: threadSet_sched_pointers) + lemma bindNotification_invs': "\bound_tcb_at' ((=) None) tcbptr and ex_nonz_cap_to' ntfnptr @@ -1871,9 +1771,9 @@ lemma bindNotification_invs': \\_. invs'\" including no_pre apply (simp add: bindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp set_ntfn_valid_pspace' sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp set_ntfn_valid_pspace' sbn_sch_act' valid_irq_node_lift setBoundNotification_ct_not_inQ valid_bound_ntfn_lift untyped_ranges_zero_lift | clarsimp dest!: global'_no_ex_cap simp: cteCaps_of_def)+ @@ -2044,7 +1944,7 @@ lemma eq_ucast_word8[simp]: done lemma checkPrio_corres: - "corres (ser \ dc) (tcb_at auth) (tcb_at' auth) + "corres (ser \ dc) (tcb_at auth and pspace_aligned and pspace_distinct) \ (check_prio p auth) (checkPrio p auth)" apply (simp add: check_prio_def checkPrio_def) apply (rule corres_guard_imp) @@ -2067,7 +1967,7 @@ lemma decodeSetPriority_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) (invs' and (\s. \x \ set extras'. s \' (fst x))) (decode_set_priority args cap slot extras) (decodeSetPriority args cap' extras')" @@ -2075,18 +1975,17 @@ lemma decodeSetPriority_corres: clarsimp simp: decode_set_priority_def decodeSetPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) - apply (wpsimp simp: valid_cap_def valid_cap'_def)+ - done + by (wpsimp simp: valid_cap_def valid_cap'_def)+ lemma decodeSetMCPriority_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) (invs' and (\s. \x \ set extras'. s \' (fst x))) (decode_set_mcpriority args cap slot extras) (decodeSetMCPriority args cap' extras')" @@ -2094,18 +1993,11 @@ lemma decodeSetMCPriority_corres: clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) - apply (wpsimp simp: valid_cap_def valid_cap'_def)+ - done - -lemma valid_objs'_maxPriority': - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbMCP tcb \ maxPriority) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) - done + by (wpsimp simp: valid_cap_def valid_cap'_def)+ lemma getMCP_sp: "\P\ threadGet tcbMCP t \\rv. mcpriority_tcb_at' (\st. st = rv) t and P\" @@ -2131,7 +2023,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: @@ -2140,7 +2032,7 @@ lemma checkPrio_lt_ct: lemma checkPrio_lt_ct_weak: "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. ucast prio \ mcp) auth s\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule checkPrio_lt_ct) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) by (rule le_ucast_ucast_le) simp @@ -2200,7 +2092,8 @@ lemma decodeSetSchedParams_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) + (cur_tcb and valid_etcbs and + (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) (invs' and (\s. \x \ set extras'. s \' (fst x))) (decode_set_sched_params args cap slot extras) (decodeSetSchedParams args cap' extras')" @@ -2211,7 +2104,7 @@ lemma decodeSetSchedParams_corres: apply (clarsimp split: list.split simp: list_all2_Cons2) apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_split_norE[OF checkPrio_corres]) apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) @@ -2347,7 +2240,7 @@ lemma slotCapLongRunningDelete_corres: lemma slot_long_running_inv'[wp]: "\P\ slotCapLongRunningDelete ptr \\rv. P\" apply (simp add: slotCapLongRunningDelete_def) - apply (rule hoare_seq_ext [OF _ getCTE_inv]) + apply (rule bind_wp [OF _ getCTE_inv]) apply (rule hoare_pre, wpcw, (wp isFinalCapability_inv)+) apply simp done @@ -2517,11 +2410,11 @@ lemma decodeTCBConfigure_corres: apply (rule decodeSetIPCBuffer_corres; simp) apply (rule corres_splitEE) apply (rule decodeSetSpace_corres; simp) - apply (rule_tac F="is_thread_control set_params" in corres_gen_asm) - apply (rule_tac F="is_thread_control set_space" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_params" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_space" in corres_gen_asm) apply (rule_tac F="tcThreadCapSlot setSpace = cte_map slot" in corres_gen_asm2) apply (rule corres_trivial) - apply (clarsimp simp: returnOk_def is_thread_control_def2 is_cap_simps) + apply (clarsimp simp: tcb_invocation.is_ThreadControl_def returnOk_def is_cap_simps) apply (wp | simp add: invs_def valid_sched_def)+ done @@ -2552,15 +2445,13 @@ lemma decodeTCBConf_wf[wp]: apply (rule_tac Q'="\setSpace s. tcb_inv_wf' setSpace s \ tcb_inv_wf' setIPCParams s \ isThreadControl setSpace \ isThreadControl setIPCParams \ tcThread setSpace = t \ tcNewCRoot setSpace \ None" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: isThreadControl_def2 cong: option.case_cong) apply wpsimp apply (fastforce simp: isThreadControl_def2 objBits_defs) done -declare hoare_True_E_R [simp del] - lemma lsft_real_cte: "\valid_objs'\ lookupSlotForThread t x \\rv. real_cte_at' rv\, -" apply (simp add: lookupSlotForThread_def) @@ -2628,8 +2519,7 @@ notes if_cong[cong] shows lemma decodeUnbindNotification_corres: "corres (ser \ tcbinv_relation) - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) \ (decode_unbind_notification (cap.ThreadCap t)) (decodeUnbindNotification (capability.ThreadCap t))" apply (simp add: decode_unbind_notification_def decodeUnbindNotification_def) @@ -2679,7 +2569,7 @@ lemma decodeTCBInvocation_corres: corres_guard_imp[OF decodeBindNotification_corres] corres_guard_imp[OF decodeUnbindNotification_corres] corres_guard_imp[OF decodeSetTLSBase_corres], - simp_all add: valid_cap_simps valid_cap_simps' invs_def valid_sched_def) + simp_all add: valid_cap_simps valid_cap_simps' invs_def valid_state_def valid_sched_def) apply (auto simp: list_all2_map1 list_all2_map2 elim!: list_all2_mono) done @@ -2749,7 +2639,7 @@ lemma restart_makes_simple': \\rv. st_tcb_at' simple' t\" apply (simp add: restart_def) apply (wp sts_st_tcb_at'_cases cancelIPC_simple - cancelIPC_st_tcb_at static_imp_wp | simp)+ + cancelIPC_st_tcb_at hoare_weak_lift_imp | simp)+ apply (rule hoare_strengthen_post [OF isStopped_inv]) prefer 2 apply assumption @@ -2779,6 +2669,7 @@ crunches getThreadBufferSlot, setPriority, setMCPriority lemma inv_tcb_IRQInactive: "\valid_irq_states'\ invokeTCB tcb_inv -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + including classic_wp_pre apply (simp add: invokeTCB_def) apply (rule hoare_pre) apply (wpc | diff --git a/proof/refine/ARM_HYP/Untyped_R.thy b/proof/refine/ARM_HYP/Untyped_R.thy index 227aff2920..a7518b29a6 100644 --- a/proof/refine/ARM_HYP/Untyped_R.thy +++ b/proof/refine/ARM_HYP/Untyped_R.thy @@ -396,7 +396,7 @@ next apply (simp add: word_le_nat_alt) apply (simp add: unat_arith_simps) apply wp+ - apply (wp hoare_drop_impE_R hoare_vcg_all_lift_R + apply (wp hoare_drop_impE_R hoare_vcg_all_liftE_R | clarsimp)+ apply (rule hoare_strengthen_post [where Q = "\r. invs and valid_cap r and cte_at slot"]) apply wp+ @@ -695,7 +695,7 @@ lemma map_ensure_empty': apply (wp getCTE_wp') apply (clarsimp elim!: cte_wp_at_weakenE') apply (erule meta_allE) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -1360,6 +1360,7 @@ crunches updateMDB, updateNewFreeIndex and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" and ksMachineState[wp]: "\s. P (ksMachineState s)" and ksArchState[wp]: "\s. P (ksArchState s)" + crunches insertNewCap for ksInterrupt[wp]: "\s. P (ksInterruptState s)" and norq[wp]: "\s. P (ksReadyQueues s)" @@ -1367,22 +1368,16 @@ crunches insertNewCap and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" and ksCurDomain[wp]: "\s. P (ksCurDomain s)" and ksCurThread[wp]: "\s. P (ksCurThread s)" + and sched_queues_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and tcbQueueds_of[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers (wp: crunch_wps) + crunch nosch[wp]: insertNewCaps "\s. P (ksSchedulerAction s)" (simp: crunch_simps zipWithM_x_mapM wp: crunch_wps) crunch exst[wp]: set_cdt "\s. P (exst s)" -(*FIXME: Move to StateRelation*) -lemma state_relation_schact[elim!]: - "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" - apply (simp add: state_relation_def) - done - -lemma state_relation_queues[elim!]: "(s,s') \ state_relation \ ready_queues_relation (ready_queues s) (ksReadyQueues s')" - apply (simp add: state_relation_def) - done - lemma set_original_symb_exec_l: "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_original p b) (return x)" by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) @@ -1413,6 +1408,10 @@ lemma updateNewFreeIndex_noop_psp_corres: | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ done +crunches updateMDB, updateNewFreeIndex, setCTE + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + lemma insertNewCap_corres: notes if_cong[cong del] if_weak_cong[cong] shows @@ -3232,7 +3231,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 @@ -3556,7 +3555,7 @@ lemma updateFreeIndex_mdb_simple': and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" and unt_inc' :"untyped_inc' (ctes_of s)" and valid_objs' :"valid_objs' s" - and invp: "mdb_inv_preserve (ctes_of s) (ctes_of s(src \ cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -3656,7 +3655,7 @@ lemma updateFreeIndex_clear_invs': apply (wp valid_irq_node_lift setCTE_typ_at') apply (wp getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift valid_bitmaps_lift hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp setCTE_irq_handlers' | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def @@ -4062,15 +4061,17 @@ lemma idx_le_new_offs: end +context begin interpretation Arch . (*FIXME: arch_split*) + lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" by (simp add: valid_sched_def) crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) lemma deleteObjects_ksCurThread[wp]: "\\s. P (ksCurThread s)\ deleteObjects ptr sz \\_ s. P (ksCurThread s)\" @@ -4220,14 +4221,12 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemma resetUntypedCap_corres: "untypinv_relation ui ui' \ corres (dc \ dc) - (invs and valid_untyped_inv_wcap ui - (Some (cap.UntypedCap dev ptr sz idx)) - and ct_active and einvs - and (\_. \ptr_base ptr' ty us slots dev'. ui = Invocations_A.Retype slot True - ptr_base ptr' ty us slots dev)) - (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') - (reset_untyped_cap slot) - (resetUntypedCap (cte_map slot))" + (einvs and schact_is_rct and ct_active + and valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev ptr sz idx)) + and (\_. \ptr_base ptr' ty us slots dev'. + ui = Invocations_A.Retype slot True ptr_base ptr' ty us slots dev)) + (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') + (reset_untyped_cap slot) (resetUntypedCap (cte_map slot))" apply (rule corres_gen_asm, clarsimp) apply (simp add: reset_untyped_cap_def resetUntypedCap_def liftE_bindE) @@ -4378,7 +4377,7 @@ lemma resetUntypedCap_corres: apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) apply (frule(1) descendants_range_ex_cte'[OF empty_descendants_range_in' _ order_refl], (simp add: isCap_simps)+) - apply (intro conjI impI; clarsimp) + apply (auto simp: descendants_range_in'_def valid_untyped'_def) done end @@ -4477,7 +4476,7 @@ lemma resetUntypedCap_invs_etc: ?f \\_. invs' and ?vu2 and ct_active' and ?psp\, \\_. invs'\") apply (simp add: resetUntypedCap_def getSlotCap_def liftE_bind_return_bindE_returnOk bindE_assoc) - apply (rule hoare_vcg_seqE[rotated]) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_name_pre_stateE) @@ -4494,8 +4493,8 @@ lemma resetUntypedCap_invs_etc: (simp_all add: cte_wp_at_ctes_of)+)[1] apply (clarsimp simp: unlessE_def cte_wp_at_ctes_of split del: if_split) - apply (rule_tac B="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) - and ct_active' and ?psp" in hoare_vcg_seqE[rotated]) + apply (rule_tac Q'="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) and ct_active' and ?psp" + in bindE_wp_fwd) apply clarsimp apply (rule hoare_pre) apply (simp add: sch_act_simple_def) @@ -4537,7 +4536,7 @@ lemma resetUntypedCap_invs_etc: modify_map_def) apply auto[1] apply simp - apply (rule hoare_pre, rule hoare_post_impErr, + apply (rule hoare_pre, rule hoare_strengthen_postE, rule_tac P="\i. invs' and ?psp and ct_active' and valid_untyped_inv_wcap' ?ui (Some (UntypedCap dev ptr sz (if i = 0 then idx else (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - i) * 2 ^ resetChunkBits)))" @@ -4625,7 +4624,7 @@ lemma whenE_reset_resetUntypedCap_invs_etc: and ct_active' and pspace_no_overlap' (if reset then ptr else ptr') sz\, \\_. invs'\" apply (rule hoare_pre) - apply (wp hoare_whenE_wp resetUntypedCap_invs_etc[where idx=idx, + apply (wp whenE_wp resetUntypedCap_invs_etc[where idx=idx, simplified pred_conj_def conj_assoc] | simp)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -4637,6 +4636,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) @@ -4648,10 +4649,13 @@ lemma (in range_cover) funky_aligned: context begin interpretation Arch . (*FIXME: arch_split*) +defs archOverlap_def: + "archOverlap \ \_ _. False" + lemma inv_untyped_corres': "\ untypinv_relation ui ui' \ \ corres (dc \ (=)) - (einvs and valid_untyped_inv ui and ct_active) + (einvs and valid_untyped_inv ui and ct_active and schact_is_rct) (invs' and valid_untyped_inv' ui' and ct_active') (invoke_untyped ui) (invokeUntyped ui')" apply (cases ui) @@ -4670,6 +4674,7 @@ lemma inv_untyped_corres': (cte_map cref) reset ptr_base ptr ao' us (map cte_map slots) dev" assume invs: "invs (s :: det_state)" "ct_active s" "valid_list s" "valid_sched s" + "schact_is_rct s" and invs': "invs' s'" "ct_active' s'" and sr: "(s, s') \ state_relation" and vui: "valid_untyped_inv_wcap ?ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" @@ -4881,7 +4886,8 @@ lemma inv_untyped_corres': show " corres (dc \ (=)) ((=) s) ((=) s') (invoke_untyped ?ui) (invokeUntyped ?ui')" - apply (clarsimp simp:invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc) + apply (clarsimp simp: invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc + archOverlap_def) apply (insert cover) apply (rule corres_guard_imp) apply (rule corres_split_norE) @@ -4971,9 +4977,9 @@ lemma inv_untyped_corres': \ valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s \ (reset \ pspace_no_overlap {ptr && ~~ mask sz..(ptr && ~~ mask sz) + 2 ^ sz - 1} s) - " in hoare_post_imp_R) + " in hoare_strengthen_postE_R) apply (simp add: whenE_def split del: if_split, wp) - apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc, auto)[1] + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc, auto)[1] apply wp apply (clarsimp simp: ui cte_wp_at_caps_of_state bits_of_def untyped_range.simps) @@ -5014,7 +5020,7 @@ lemma inv_untyped_corres': apply (drule invoke_untyped_proofs.usable_range_disjoint) apply (clarsimp simp: field_simps mask_out_sub_mask shiftl_t2n) - apply ((rule validE_validE_R)?, rule hoare_post_impErr, + apply ((rule validE_validE_R)?, rule hoare_strengthen_postE, rule whenE_reset_resetUntypedCap_invs_etc[where ptr="ptr && ~~ mask sz" and ptr'=ptr and sz=sz and idx=idx and ui=ui' and dev=dev]) @@ -5055,7 +5061,7 @@ lemma inv_untyped_corres': apply (clarsimp simp only: pred_conj_def invs ui) apply (strengthen vui) apply (cut_tac vui invs invs') - apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs) + apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs schact_is_rct_def) apply (cut_tac vui' invs') apply (clarsimp simp: ui cte_wp_at_ctes_of if_apply_def2 ui') done @@ -5085,7 +5091,7 @@ lemma sts_valid_untyped_inv': crunch nosch[wp]: invokeUntyped "\s. P (ksSchedulerAction s)" (simp: crunch_simps zipWithM_x_mapM - wp: crunch_wps hoare_unless_wp mapME_x_inv_wp preemptionPoint_inv) + wp: crunch_wps unless_wp mapME_x_inv_wp preemptionPoint_inv) crunch no_0_obj'[wp]: insertNewCap no_0_obj' (wp: crunch_wps) @@ -5218,9 +5224,6 @@ crunch irq_states' [wp]: insertNewCap valid_irq_states' crunch pde_mappings' [wp]: insertNewCap valid_pde_mappings' (wp: getCTE_wp') -crunch vq'[wp]: insertNewCap valid_queues' - (wp: crunch_wps) - crunch irqs_masked' [wp]: insertNewCap irqs_masked' (wp: crunch_wps rule: irqs_masked_lift) @@ -5311,8 +5314,8 @@ lemma insertNewCap_invs': apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp insertNewCap_valid_pspace' sch_act_wf_lift - valid_queues_lift cur_tcb_lift tcb_in_cur_domain'_lift - insertNewCap_valid_global_refs' + cur_tcb_lift tcb_in_cur_domain'_lift valid_bitmaps_lift + insertNewCap_valid_global_refs' sym_heap_sched_pointers_lift valid_irq_node_lift insertNewCap_valid_irq_handlers) apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule ctes_of_valid[rotated, where p=parent, OF valid_pspace_valid_objs']) @@ -5538,14 +5541,14 @@ lemma invokeUntyped_invs'': apply (clarsimp simp:invokeUntyped_def getSlotCap_def ui) apply (rule validE_valid) apply (rule hoare_pre) - apply (rule_tac B="\_ s. invs' s \ Q s \ ct_active' s - \ valid_untyped_inv_wcap' ui - (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s - \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s) - " in hoare_vcg_seqE[rotated]) + apply (rule_tac Q'="\_ s. invs' s \ Q s \ ct_active' s + \ valid_untyped_inv_wcap' ui + (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s + \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s)" + in bindE_wp_fwd) apply (simp only: whenE_def) apply wp - apply (rule hoare_post_impErr, rule combine_validE, + apply (rule hoare_strengthen_postE, rule combine_validE, rule resetUntypedCap_invs_etc, rule valid_validE, rule reset_Q') apply (clarsimp simp only: if_True) apply auto[1] @@ -5639,7 +5642,7 @@ lemma invokeUntyped_invs'[wp]: "\invs' and valid_untyped_inv' ui and ct_active'\ invokeUntyped ui \\rv. invs'\" - apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_post_taut, simplified]) + apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_TrueI, simplified]) apply auto done @@ -5710,7 +5713,7 @@ lemma resetUntypedCap_IRQInactive: (is "\?P\ resetUntypedCap slot \?Q\,\?E\") apply (simp add: resetUntypedCap_def) apply (rule hoare_pre) - apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_post_impErr] + apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_strengthen_postE] doMachineOp_irq_states' preemptionPoint_inv hoare_drop_imps | simp add: no_irq_clearMemory if_apply_def2)+ done @@ -5719,8 +5722,7 @@ lemma inv_untyped_IRQInactive: "\valid_irq_states'\ invokeUntyped ui -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: invokeUntyped_def) - apply (rule hoare_pre) - apply (wp hoare_whenE_wp resetUntypedCap_IRQInactive | wpc | simp)+ + apply (wpsimp wp: resetUntypedCap_IRQInactive) done end diff --git a/proof/refine/ARM_HYP/VSpace_R.thy b/proof/refine/ARM_HYP/VSpace_R.thy index c677628bc7..1ca6ff824d 100644 --- a/proof/refine/ARM_HYP/VSpace_R.thy +++ b/proof/refine/ARM_HYP/VSpace_R.thy @@ -616,11 +616,6 @@ lemma setVCPU_ksQ[wp]: "\\s. P (ksReadyQueues s)\ setObject p (v::vcpu) \\rv s. P (ksReadyQueues s)\" by (wp setObject_qs updateObject_default_inv | simp)+ -lemma setVCPU_valid_queues'[wp]: - "setObject v (vcpu::vcpu) \valid_queues'\" - unfolding valid_queues'_def - by (rule hoare_lift_Pf[where f=ksReadyQueues]; wp hoare_vcg_all_lift updateObject_default_inv) - lemma setVCPU_ct_not_inQ[wp]: "setObject v (vcpu::vcpu) \ct_not_inQ\" apply (wp ct_not_inQ_lift) @@ -629,8 +624,8 @@ lemma setVCPU_ct_not_inQ[wp]: done lemma handleVMFault_corres: - "corres (fr \ dc) (tcb_at thread) (tcb_at' thread) - (handle_vm_fault thread fault) (handleVMFault thread fault)" + "corres (fr \ dc) (tcb_at thread and pspace_aligned and pspace_distinct) \ + (handle_vm_fault thread fault) (handleVMFault thread fault)" apply (simp add: ARM_HYP_H.handleVMFault_def) apply (cases fault) apply simp @@ -739,12 +734,12 @@ lemma find_pd_for_asid_pd_at_asid_again: apply (unfold validE_def, rule hoare_name_pre_state, fold validE_def) apply (case_tac "\pd. vspace_at_asid asid pd s") apply clarsimp - apply (rule_tac Q="\rv s'. s' = s \ rv = pd" and E="\\" in hoare_post_impErr) + apply (rule_tac Q="\rv s'. s' = s \ rv = pd" and E="\\" in hoare_strengthen_postE) apply (rule hoare_pre, wp find_pd_for_asid_valids) apply fastforce apply simp+ apply (rule_tac Q="\rv s'. s' = s \ vspace_at_asid asid rv s'" - and E="\rv s'. s' = s" in hoare_post_impErr) + and E="\rv s'. s' = s" in hoare_strengthen_postE) apply (rule hoare_pre, wp) apply clarsimp+ done @@ -804,18 +799,18 @@ lemma vcpuUpdate_corres[corres]: "\v1 v2. vcpu_relation v1 v2 \ vcpu_relation (f v1) (f' v2) \ corres dc (vcpu_at v) (vcpu_at' v) (vcpu_update v f) (vcpuUpdate v f')" - by (corressimp corres: getObject_vcpu_corres setObject_VCPU_corres + by (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres simp: vcpu_update_def vcpuUpdate_def vcpu_relation_def) lemma vgicUpdate_corres[corres]: "\vgic vgic'. vgic_map vgic = vgic' \ vgic_map (f vgic) = (f' vgic') \ corres dc (vcpu_at v) (vcpu_at' v) (vgic_update v f) (vgicUpdate v f')" - by (corressimp simp: vgic_update_def vgicUpdate_def vcpu_relation_def) + by (corresKsimp simp: vgic_update_def vgicUpdate_def vcpu_relation_def) lemma vgicUpdateLR_corres[corres]: "corres dc (vcpu_at v) (vcpu_at' v) (vgic_update_lr v idx val) (vgicUpdateLR v idx val)" - by (corressimp simp: vgic_update_lr_def vgicUpdateLR_def vgic_map_def) + by (corresKsimp simp: vgic_update_lr_def vgicUpdateLR_def vgic_map_def) lemma vcpuReadReg_corres[corres]: "corres (=) (vcpu_at v) (vcpu_at' v and no_0_obj') @@ -982,7 +977,7 @@ lemma vcpuDisable_corres: apply (cases vcpuopt; clarsimp simp: vcpu_disable_def vcpuDisable_def) (* no current VCPU *) subgoal - apply (clarsimp simp: doMachineOp_bind do_machine_op_bind) + apply (clarsimp simp: doMachineOp_bind do_machine_op_bind empty_fail_cond) apply (rule corres_guard_imp) apply (rule corres_split_dc[OF corres_machine_op] | rule corres_machine_op corres_Id @@ -1036,7 +1031,7 @@ lemma vcpuRestore_corres: apply (case_tac vcpu' , clarsimp simp: comp_def vcpu_relation_def vgic_map_def mapM_x_mapM uncurry_def split_def mapM_map_simp) - apply (simp add: doMachineOp_bind do_machine_op_bind bind_assoc) + apply (simp add: doMachineOp_bind do_machine_op_bind bind_assoc empty_fail_cond) apply (rule corres_split_dc[OF corres_machine_op]) apply (rule corres_Id; wpsimp) apply (rule corres_split_dc[OF corres_machine_op]) @@ -1088,7 +1083,7 @@ lemma vcpuSwitch_corres: vcpuEnable_corres vcpuRestore_corres vcpuSave_corres - hoare_post_taut conjI + hoare_TrueI conjI corres_underlying_split corres_guard_imp | clarsimp simp add: when_def | wpsimp | assumption)+ done @@ -1424,13 +1419,9 @@ lemma deleteASID_corres: apply (simp add: vs_refs_def) apply (rule image_eqI[rotated], erule graph_ofI) apply (simp add: mask_asid_low_bits_ucast_ucast) - apply wp - apply (simp add: o_def) - apply (wp getASID_wp) - apply clarsimp - apply assumption - apply wp+ - apply clarsimp + \ \rewrite assumption so that the goal can refer to the C variable instead of the abstract's.\ + apply (drule Some_to_the) + apply (wpsimp wp: getASID_wp)+ apply (clarsimp simp: valid_arch_state_def valid_asid_table_def dest!: invs_arch_state) apply blast @@ -1712,7 +1703,7 @@ lemma storeHWASID_valid_arch' [wp]: apply (rule_tac Q'="\rv s. valid_asid_map' (armKSASIDMap (ksArchState s)) \ asid \ 0 \ asid \ mask asid_bits \ armKSGICVCPUNumListRegs (ksArchState s) \ max_armKSGICVCPUNumListRegs" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp findPDForASID_inv2)+ apply clarsimp apply (clarsimp simp: valid_asid_map'_def) @@ -1847,7 +1838,7 @@ lemma flushTable_corres: apply ((wp mapM_wp' hoare_vcg_const_imp_lift get_pte_wp getPTE_wp| wpc|simp|fold cur_tcb_def cur_tcb'_def)+)[4] apply (wpsimp wp: hoare_drop_imps | fold cur_tcb_def cur_tcb'_def)+ - apply (wpsimp wp: hoare_post_taut load_hw_asid_wp simp: valid_global_objs_def + apply (wpsimp wp: hoare_TrueI load_hw_asid_wp simp: valid_global_objs_def | rule hoare_drop_imps)+ done @@ -1882,7 +1873,7 @@ lemma flushPage_corres: apply (rule setVMRoot_corres) apply wp+ apply (simp add: cur_tcb_def [symmetric] cur_tcb'_def [symmetric]) - apply (wpsimp wp: hoare_post_taut load_hw_asid_wp simp: valid_global_objs_def + apply (wpsimp wp: hoare_TrueI load_hw_asid_wp simp: valid_global_objs_def | rule hoare_drop_imps | fold cur_tcb_def cur_tcb'_def)+ done @@ -2262,7 +2253,7 @@ lemma unmapPage_corres: apply clarsimp apply (rule flushPage_corres) apply wp - apply (rule_tac Q'="\_. invs and vspace_at_asid asid pda" in hoare_post_imp_R) + apply (rule_tac Q'="\_. invs and vspace_at_asid asid pda" in hoare_strengthen_postE_R) apply (wpsimp wp: lookup_pt_slot_inv lookup_pt_slot_cap_to2' lookup_pt_slot_cap_to_multiple2 store_pde_invs_unmap store_pde_pd_at_asid mapM_swp_store_pde_invs_unmap simp: largePagePTEOffsets_def pte_bits_def @@ -2709,8 +2700,8 @@ lemma message_info_from_data_eqv: lemma setMessageInfo_corres: "mi' = message_info_map mi \ - corres dc (tcb_at t) (tcb_at' t) - (set_message_info t mi) (setMessageInfo t mi')" + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_message_info t mi) (setMessageInfo t mi')" apply (simp add: setMessageInfo_def set_message_info_def) apply (subgoal_tac "wordFromMessageInfo (message_info_map mi) = message_info_to_data mi") @@ -3580,23 +3571,6 @@ lemma setVCPU_valid_arch': apply (wp hoare_vcg_all_lift hoare_drop_imp)+ done -lemma setVCPU_valid_queues [wp]: - "\valid_queues\ setObject p (v::vcpu) \\_. valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -crunches - vcpuDisable, vcpuRestore, vcpuEnable, vcpuUpdate, vcpuSaveRegRange, vgicUpdateLR - for valid_queues[wp]: valid_queues - (ignore: doMachineOp wp: mapM_x_wp) - -lemma vcpuSave_valid_queues[wp]: - "\Invariants_H.valid_queues\ vcpuSave param_a \\_. Invariants_H.valid_queues\" - by (wpsimp simp: vcpuSave_def armvVCPUSave_def wp: mapM_x_wp cong: option.case_cong_weak | simp)+ - -lemma vcpuSwitch_valid_queues[wp]: - "\Invariants_H.valid_queues\ vcpuSwitch param_a \\_. Invariants_H.valid_queues\" - by (wpsimp simp: vcpuSwitch_def modifyArchState_def | simp)+ - lemma isb_invs_no_cicd'[wp]: "\invs_no_cicd'\ doMachineOp isb \\rv. invs_no_cicd'\" apply (wpsimp wp: dmo_invs_no_cicd' no_irq no_irq_isb) @@ -3691,6 +3665,10 @@ lemma get_gic_vcpu_ctrl_vmcr_invs_no_cicd'[wp]: by (wpsimp wp: dmo_invs_no_cicd' no_irq_get_gic_vcpu_ctrl_vmcr no_irq simp: get_gic_vcpu_ctrl_vmcr_def gets_def in_monad) +lemma setVCPU_tcbs_of'[wp]: + "setObject v (vcpu :: vcpu) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + lemma setVCPU_regs_r_invs_cicd': "\invs_no_cicd' and ko_at' vcpu v\ setObject v (vcpuRegs_update (\_. (vcpuRegs vcpu)(r:=rval)) vcpu) \\_. invs_no_cicd'\" @@ -3705,7 +3683,7 @@ lemma setVCPU_regs_r_invs_cicd': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_regs_valid_arch' setVCPU_regs_vcpu_live + setVCPU_regs_valid_arch' setVCPU_regs_vcpu_live valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -3727,7 +3705,7 @@ lemma setVCPU_vgic_invs_cicd': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_vgic_valid_arch' + setVCPU_vgic_valid_arch' valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -3749,7 +3727,7 @@ lemma setVCPU_VPPIMasked_invs_cicd': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_VPPIMasked_valid_arch' + setVCPU_VPPIMasked_valid_arch' valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -3771,7 +3749,7 @@ lemma setVCPU_VTimer_invs_cicd': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_VTimer_valid_arch' + setVCPU_VTimer_valid_arch' valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -3905,13 +3883,12 @@ lemma vcpuSave_invs_no_cicd'[wp]: | assumption)+ lemma valid_arch_state'_armHSCurVCPU_update[simp]: - "ko_wp_at' (is_vcpu' and hyp_live') v s \ - valid_arch_state' s \ valid_arch_state' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" - by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) + "\ ko_wp_at' (is_vcpu' and hyp_live') v s; valid_arch_state' s \ \ + valid_arch_state' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" + by (clarsimp simp: invs'_def valid_state'_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def cur_tcb'_def) lemma dmo_vcpu_hyp: "\ko_wp_at' (is_vcpu' and hyp_live') v\ doMachineOp f \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" @@ -3992,20 +3969,18 @@ lemma vcpuSwitch_valid_arch_state'[wp]: lemma invs_no_cicd'_armHSCurVCPU_update[simp]: "ko_wp_at' (is_vcpu' and hyp_live') v s \ invs_no_cicd' s \ invs_no_cicd' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" - by (clarsimp simp: invs_no_cicd'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) + by (clarsimp simp: invs_no_cicd'_def valid_state'_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def cur_tcb'_def) lemma invs'_armHSCurVCPU_update[simp]: "ko_wp_at' (is_vcpu' and hyp_live') v s \ invs' s \ invs' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) + apply (clarsimp simp: invs'_def valid_state'_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def cur_tcb'_def) done lemma armHSCurVCPU_None_invs'[wp]: @@ -4029,7 +4004,7 @@ lemma setVCPU_vgic_invs': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_vgic_valid_arch' + setVCPU_vgic_valid_arch' valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -4049,7 +4024,7 @@ lemma setVCPU_regs_invs': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_regs_valid_arch' + setVCPU_regs_valid_arch' valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -4069,7 +4044,7 @@ lemma setVCPU_VPPIMasked_invs': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_VPPIMasked_valid_arch' + setVCPU_VPPIMasked_valid_arch' valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -4089,7 +4064,7 @@ lemma setVCPU_VTimer_invs': cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift valid_pde_mappings_lift' setObject_typ_at' cur_tcb_lift - setVCPU_VTimer_valid_arch' + setVCPU_VTimer_valid_arch' valid_bitmaps_lift simp: objBits_simps archObjSize_def vcpu_bits_def pageBits_def state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) @@ -4125,7 +4100,8 @@ lemma vcpuDisable_invs'[wp]: getSCTLR_def get_gic_vcpu_ctrl_hcr_def dsb_def vgicUpdate_def vcpuUpdate_def vcpuSaveReg_def by (wpsimp wp: dmo'_gets_wp setVCPU_vgic_invs' setVCPU_regs_invs' dmo_maskInterrupt_True - simp: doMachineOp_bind) + hoare_drop_imps + simp: doMachineOp_bind empty_fail_cond) lemma vcpuInvalidateActive_invs'[wp]: "vcpuInvalidateActive \invs'\" @@ -4484,14 +4460,6 @@ lemma storePDE_nordL2[wp]: "\\s. P (ksReadyQueuesL2Bitmap s)\ storePDE param_a param_b \\_ s. P (ksReadyQueuesL2Bitmap s)\" by (wpsimp wp: headM_inv hoare_drop_imp simp: storePDE_def updateObject_default_def) -lemma storePDE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePDE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePDE_valid_queues' [wp]: - "\valid_queues'\ storePDE p pde \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePDE_iflive [wp]: "\if_live_then_nonz_cap'\ storePDE p pde \\rv. if_live_then_nonz_cap'\" apply (wpsimp simp: storePDE_def objBits_simps archObjSize_def vspace_bits_defs @@ -4647,6 +4615,22 @@ lemma storePTE_gsUntypedZeroRanges[wp]: "\\s. P (gsUntypedZeroRanges s)\ storePTE p pde \\rv s. P (gsUntypedZeroRanges s)\" by (wpsimp wp: headM_inv hoare_drop_imp simp: storePTE_def updateObject_default_def setObject_def) +lemma setObject_pte_tcb_of'[wp]: + "setObject slote (pte::pte) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +crunches storePTE + for tcbs_of'[wp]: "\s. P (tcbs_of' s)" + (wp: crunch_wps) + +lemma setObject_pde_tcb_of'[wp]: + "setObject slote (pde::pde) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +crunches storePDE + for tcbs_of'[wp]: "\s. P (tcbs_of' s)" + (wp: crunch_wps) + lemma storePDE_invs[wp]: "\invs' and valid_pde' pde and (\s. valid_pde_mapping' (p && mask pdBits) pde)\ @@ -4658,7 +4642,7 @@ lemma storePDE_invs[wp]: irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' - untyped_ranges_zero_lift + untyped_ranges_zero_lift sym_heap_sched_pointers_lift valid_bitmaps_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp done @@ -4690,14 +4674,6 @@ lemma storePTE_nordL2[wp]: "\\s. P (ksReadyQueuesL2Bitmap s)\ storePTE param_a param_b \\_ s. P (ksReadyQueuesL2Bitmap s)\" by (wpsimp wp: headM_inv hoare_drop_imp simp: storePTE_def updateObject_default_def) -lemma storePTE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePTE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePTE_valid_queues' [wp]: - "\valid_queues'\ storePTE p pde \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePTE_iflive [wp]: "\if_live_then_nonz_cap'\ storePTE p pte \\rv. if_live_then_nonz_cap'\" apply (wpsimp simp: storePTE_def objBits_simps archObjSize_def vspace_bits_defs @@ -4825,7 +4801,7 @@ lemma storePTE_invs [wp]: apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' - untyped_ranges_zero_lift + untyped_ranges_zero_lift valid_bitmaps_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp done @@ -4871,14 +4847,6 @@ lemma setASIDPool_qsL2 [wp]: "\\s. P (ksReadyQueuesL2Bitmap s)\ setObject p (ap::asidpool) \\rv s. P (ksReadyQueuesL2Bitmap s)\" by (wp setObject_qs updateObject_default_inv|simp)+ -lemma setASIDPool_valid_queues [wp]: - "\Invariants_H.valid_queues\ setObject p (ap::asidpool) \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma setASIDPool_valid_queues' [wp]: - "\valid_queues'\ setObject p (ap::asidpool) \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma setASIDPool_state_refs' [wp]: "\\s. P (state_refs_of' s)\ setObject p (ap::asidpool) \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: setObject_def valid_def in_monad split_def @@ -5001,17 +4969,22 @@ lemma setObject_ap_ksDomScheduleIdx [wp]: "\\s. P (ksDomScheduleIdx s)\ setObject p (ap::asidpool) \\_. \s. P (ksDomScheduleIdx s)\" by (wp updateObject_default_inv|simp add:setObject_def | wpc)+ +lemma setObject_asidpool_tcbs_of'[wp]: + "setObject c (asidpool::asidpool) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + lemma setASIDPool_invs [wp]: "\invs' and valid_asid_pool' ap\ setObject p (ap::asidpool) \\_. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) - apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift - valid_irq_node_lift - cur_tcb_lift valid_irq_handlers_lift'' - untyped_ranges_zero_lift - updateObject_default_inv - | simp add: cteCaps_of_def - | rule setObject_ksPSpace_only)+ - apply (clarsimp simp: o_def) + apply (rule hoare_pre) + apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift + valid_irq_node_lift + cur_tcb_lift valid_irq_handlers_lift'' + untyped_ranges_zero_lift + updateObject_default_inv valid_bitmaps_lift + | simp add: cteCaps_of_def + | rule setObject_ksPSpace_only)+ + apply (clarsimp simp add: setObject_def o_def) done crunches vcpuSave, vcpuRestore, vcpuDisable, vcpuEnable diff --git a/proof/refine/Move_R.thy b/proof/refine/Move_R.thy index b2b3a40c34..513b51bdfc 100644 --- a/proof/refine/Move_R.thy +++ b/proof/refine/Move_R.thy @@ -72,7 +72,7 @@ lemma hoare_vcg_if_lift3: lemmas hoare_pre_post = hoare_pre_imp[where R="\_. Q" and Q=Q for Q] lemmas corres_underlying_gets_pre_rhs = - corres_symb_exec_r[OF _ _ gets_inv no_fail_pre[OF non_fail_gets TrueI]] + corres_symb_exec_r[OF _ _ gets_inv no_fail_pre[OF no_fail_gets TrueI]] lemma corres_if_r': "\ G' \ corres_underlying sr nf nf' r P P' a c; \G' \ corres_underlying sr nf nf' r P Q' a d \ @@ -122,7 +122,7 @@ lemma corres_symb_exec_l': apply (rule corres_noop3) apply (erule x) apply (rule gets_wp) - apply (rule non_fail_gets) + apply (rule no_fail_gets) apply (rule z) apply (rule y) apply (rule gets_wp) @@ -207,7 +207,6 @@ lemma get_mapM_x_lower: (* Move to DetSchedDomainTime_AI *) crunch domain_list_inv[wp]: do_user_op "\s. P (domain_list s)" - (wp: select_wp) lemma next_child_child_set: "\next_child slot (cdt_list s) = Some child; valid_list s\ diff --git a/proof/refine/README.md b/proof/refine/README.md index 13b8a8fe13..f7c40866da 100644 --- a/proof/refine/README.md +++ b/proof/refine/README.md @@ -20,12 +20,9 @@ Proof](../invariant-abstract/). It is described in the TPHOLS '08 Building -------- -Make sure that the `L4V_ARCH` environment variable is set to the desired -target architecture. If in doubt, use `L4V_ARCH=ARM`. +To build for the ARM architecture from the `l4v/` directory, run: -To build from the `l4v/` directory, run: - - ./isabelle/bin/isabelle build -d . -v -b Refine + L4V_ARCH=ARM ./run_tests Refine Important Theories ------------------ diff --git a/proof/refine/RISCV64/ADT_H.thy b/proof/refine/RISCV64/ADT_H.thy index 12353508da..15bf6278fd 100644 --- a/proof/refine/RISCV64/ADT_H.thy +++ b/proof/refine/RISCV64/ADT_H.thy @@ -454,7 +454,7 @@ proof - apply (intro conjI impI allI) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply clarsimp - apply (case_tac ko; simp add: other_obj_relation_def) + apply (case_tac ko; simp add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp: cte_relation_def split: if_split_asm) apply (clarsimp simp: ep_relation_def EndpointMap_def split: Structures_A.endpoint.splits) @@ -465,7 +465,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko; simp add: other_obj_relation_def) + apply (case_tac ko; simp add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp: cte_relation_def split: if_split_asm) apply (clarsimp simp: ntfn_relation_def AEndpointMap_def split: Structures_A.ntfn.splits) @@ -476,7 +476,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko; simp add: other_obj_relation_def) + apply (case_tac ko; simp add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj; simp add: other_obj_relation_def) @@ -484,7 +484,7 @@ proof - apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -507,7 +507,7 @@ proof - apply (erule n_less_2p_pageBitsForSize) apply (clarsimp simp: shiftl_t2n mult_ac) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) @@ -530,7 +530,7 @@ proof - apply (erule n_less_2p_pageBitsForSize) apply (clarsimp simp: shiftl_t2n mult_ac) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) prefer 2 apply (rename_tac arch_kernel_obj) @@ -556,7 +556,7 @@ proof - arch_tcb_relation_imp_ArchTcnMap) apply (simp add: absCNode_def cte_map_def) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def split: if_split_asm) prefer 2 apply (rename_tac arch_kernel_obj) @@ -623,7 +623,7 @@ proof - (* mapping architecture-specific objects *) apply clarsimp apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (case_tac ko, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac arch_kernel_object y ko P arch_kernel_obj) apply (case_tac arch_kernel_object, simp_all add: absHeapArch_def @@ -660,7 +660,7 @@ proof - apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps bit_simps) apply (rename_tac pte') apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko; simp add: other_obj_relation_def) + apply (case_tac ko; simp add: tcb_relation_cut_def other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) apply (rename_tac ako' y ko P ako) apply (case_tac ako; clarsimp simp: other_obj_relation_def bit_simps) @@ -749,7 +749,7 @@ lemma absEkheap_correct: apply (case_tac "ksPSpace s' x", clarsimp) apply (erule_tac x=x in allE, clarsimp) apply clarsimp - apply (case_tac a, simp_all add: other_obj_relation_def) + apply (case_tac a, simp_all add: tcb_relation_cut_def other_obj_relation_def) apply (insert pspace_relation) apply (clarsimp simp: obj_at'_def) apply (erule(1) pspace_dom_relatedE) @@ -777,7 +777,7 @@ lemma TCB_implies_KOTCB: apply (clarsimp simp add: pspace_relation_def pspace_dom_def dom_def UNION_eq Collect_eq) apply (erule_tac x=a in allE)+ - apply (clarsimp simp add: other_obj_relation_def + apply (clarsimp simp add: tcb_relation_cut_def split: Structures_H.kernel_object.splits) apply (drule iffD1) apply (fastforce simp add: dom_def image_def) @@ -1524,7 +1524,7 @@ definition domain_index_internal = ksDomScheduleIdx s, cur_domain_internal = ksCurDomain s, domain_time_internal = ksDomainTime s, - ready_queues_internal = curry (ksReadyQueues s), + ready_queues_internal = (\d p. heap_walk (tcbSchedNexts_of s) (tcbQueueHead (ksReadyQueues s (d, p))) []), cdt_list_internal = absCDTList (cteMap (gsCNodes s)) (ctes_of s)\" lemma absExst_correct: @@ -1532,12 +1532,15 @@ lemma absExst_correct: assumes rel: "(s, s') \ state_relation" shows "absExst s' = exst s" apply (rule det_ext.equality) - using rel invs invs' - apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct - absCDTList_correct[THEN fun_cong] state_relation_def invs_def valid_state_def - ready_queues_relation_def invs'_def valid_state'_def - valid_pspace_def valid_sched_def valid_pspace'_def curry_def fun_eq_iff) - apply (fastforce simp: absEkheap_correct) + using rel invs invs' + apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct + absCDTList_correct[THEN fun_cong] state_relation_def invs_def + valid_state_def ready_queues_relation_def ready_queue_relation_def + invs'_def valid_state'_def + valid_pspace_def valid_sched_def valid_pspace'_def curry_def + fun_eq_iff) + apply (fastforce simp: absEkheap_correct) + apply (fastforce simp: list_queue_relation_def Let_def dest: heap_ls_is_walk) done diff --git a/proof/refine/RISCV64/ArchAcc_R.thy b/proof/refine/RISCV64/ArchAcc_R.thy index 97afd63bee..1465eda97a 100644 --- a/proof/refine/RISCV64/ArchAcc_R.thy +++ b/proof/refine/RISCV64/ArchAcc_R.thy @@ -20,9 +20,9 @@ declare if_cong[cong] (* FIXME: if_cong *) lemma asid_pool_at_ko: "asid_pool_at p s \ \pool. ko_at (ArchObj (RISCV64_A.ASIDPool pool)) p s" - by (clarsimp simp: asid_pools_at_eq obj_at_def) + by (clarsimp simp: asid_pools_at_eq obj_at_def elim!: opt_mapE) -lemma corres_gets_asid: +lemma corres_gets_asid[corres]: "corres (\a c. a = c o ucast) \ \ (gets (riscv_asid_table \ arch_state)) (gets (riscvKSASIDTable \ ksArchState))" by (simp add: state_relation_def arch_state_relation_def) @@ -52,27 +52,38 @@ lemma pspace_aligned_cross: apply (clarsimp simp: pspace_dom_def) apply (drule bspec, fastforce)+ apply clarsimp + apply (rename_tac ko' a a' P ko) apply (erule (1) obj_relation_cutsE; clarsimp simp: objBits_simps) - apply (clarsimp simp: cte_map_def) - apply (simp add: cteSizeBits_def cte_level_bits_def) - apply (rule is_aligned_add) - apply (erule is_aligned_weaken) - apply simp - apply (rule is_aligned_shift) + + \\CNode\ + apply (clarsimp simp: cte_map_def) + apply (simp only: cteSizeBits_def cte_level_bits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self, simp) + + \\TCB\ + apply (clarsimp simp: tcbBlockSizeBits_def elim!: is_aligned_weaken) + + \\PageTable\ + apply (clarsimp simp: archObjSize_def pteBits_def table_size_def ptTranslationBits_def pte_bits_def) apply (rule is_aligned_add) apply (erule is_aligned_weaken) - apply (simp add: bit_simps) + apply simp apply (rule is_aligned_shift) + + \\DataPage\ apply (rule is_aligned_add) apply (erule is_aligned_weaken) apply (rule pbfs_atleast_pageBits) apply (rule is_aligned_shift) + + \\other_obj_relation\ apply (simp add: other_obj_relation_def) apply (clarsimp simp: bit_simps' tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def - split: kernel_object.splits Structures_A.kernel_object.splits) - apply (clarsimp simp: archObjSize_def split: arch_kernel_object.splits arch_kernel_obj.splits) - apply (erule is_aligned_weaken) - apply (simp add: bit_simps) + split: kernel_object.splits Structures_A.kernel_object.splits) + apply (fastforce simp: archObjSize_def split: arch_kernel_object.splits arch_kernel_obj.splits) done lemma of_bl_shift_cte_level_bits: @@ -84,10 +95,12 @@ lemma obj_relation_cuts_range_limit: "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ \ \x n. p' = p + x \ is_aligned x n \ n \ obj_bits ko \ x \ mask (obj_bits ko)" apply (erule (1) obj_relation_cutsE; clarsimp) - apply (drule (1) wf_cs_nD) - apply (clarsimp simp: cte_map_def) - apply (rule_tac x=cte_level_bits in exI) - apply (simp add: is_aligned_shift of_bl_shift_cte_level_bits) + apply (drule (1) wf_cs_nD) + apply (clarsimp simp: cte_map_def) + apply (rule_tac x=cte_level_bits in exI) + apply (simp add: is_aligned_shift of_bl_shift_cte_level_bits) + apply (rule_tac x=tcbBlockSizeBits in exI) + apply (simp add: tcbBlockSizeBits_def) apply (rule_tac x=pte_bits in exI) apply (simp add: bit_simps is_aligned_shift mask_def) apply word_bitwise @@ -230,14 +243,6 @@ lemma getObject_ASIDPool_corres: apply (clarsimp simp: other_obj_relation_def asid_pool_relation_def) done -lemma aligned_distinct_obj_atI': - "\ ksPSpace s x = Some ko; pspace_aligned' s; pspace_distinct' s; ko = injectKO v \ - \ ko_at' v x s" - apply (simp add: obj_at'_def project_inject pspace_distinct'_def pspace_aligned'_def) - apply (drule bspec, erule domI)+ - apply simp - done - lemma storePTE_cte_wp_at'[wp]: "storePTE ptr val \\s. P (cte_wp_at' P' p s)\" apply (simp add: storePTE_def) @@ -258,10 +263,10 @@ lemma storePTE_state_refs_of[wp]: crunch cte_wp_at'[wp]: setIRQState "\s. P (cte_wp_at' P' p s)" crunch inv[wp]: getIRQSlot "P" -lemma setObject_ASIDPool_corres: - "a = inv ASIDPool a' o ucast \ +lemma setObject_ASIDPool_corres[corres]: + "\ a = inv ASIDPool a' o ucast; p' = p \ \ corres dc (asid_pool_at p and pspace_aligned and pspace_distinct) \ - (set_asid_pool p a) (setObject p a')" + (set_asid_pool p a) (setObject p' a')" apply (simp add: set_asid_pool_def) apply (rule corres_underlying_symb_exec_l[where P=P and Q="\_. P" for P]) apply (rule corres_no_failI; clarsimp) @@ -333,9 +338,10 @@ lemma corres_cross_over_pte_at: apply assumption done -lemma getObject_PTE_corres: - "corres pte_relation' (pte_at p and pspace_aligned and pspace_distinct) \ - (get_pte p) (getObject p)" +lemma getObject_PTE_corres[corres]: + "p = p' \ + corres pte_relation' (pte_at p and pspace_aligned and pspace_distinct) \ + (get_pte p) (getObject p')" apply (rule corres_cross_over_pte_at, fastforce) apply (simp add: getObject_def gets_map_def split_def bind_assoc) apply (rule corres_no_failI) @@ -355,7 +361,7 @@ lemma getObject_PTE_corres: apply (clarsimp simp: pte_at_eq) apply (clarsimp simp: ptes_of_def) apply (clarsimp simp: typ_at'_def ko_wp_at'_def in_magnitude_check objBits_simps bit_simps) - apply (clarsimp simp: state_relation_def pspace_relation_def) + apply (clarsimp simp: state_relation_def pspace_relation_def elim!: opt_mapE) apply (drule bspec, blast) apply (clarsimp simp: other_obj_relation_def pte_relation_def) apply (erule_tac x="table_index p" in allE) @@ -382,6 +388,7 @@ lemma setObject_PT_corres: pspace_aligned and pspace_distinct) \ (set_pt (table_base p) (pt(table_index p := pte))) (setObject p pte')" + supply opt_mapE[elim!] apply (rule corres_cross_over_pte_at[where p=p]) apply (simp add: pte_at_eq ptes_of_def in_omonad) apply (simp add: set_pt_def get_object_def bind_assoc set_object_def gets_map_def) @@ -440,18 +447,22 @@ lemma setObject_PT_corres: apply (drule_tac x=p in bspec, erule domI) apply (simp add: other_obj_relation_def split: Structures_A.kernel_object.splits) - apply (rule conjI) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pt_bits" in allE)+ apply fastforce + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pte')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + subgoal by (fastforce dest: tcbs_of'_non_tcb_update) apply (simp add: map_to_ctes_upd_other) apply (simp add: fun_upd_def) apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) done -lemma storePTE_corres: - "pte_relation' pte pte' \ - corres dc (pte_at p and pspace_aligned and pspace_distinct) \ (store_pte p pte) (storePTE p pte')" +lemma storePTE_corres[corres]: + "\ p = p'; pte_relation' pte pte' \ \ + corres dc (pte_at p and pspace_aligned and pspace_distinct) \ (store_pte p pte) (storePTE p' pte')" apply (simp add: store_pte_def storePTE_def) apply (rule corres_assume_pre, simp add: pte_at_def) apply (rule corres_symb_exec_l) @@ -497,14 +508,32 @@ lemma getPTE_wp: by (clarsimp simp: getObject_def split_def loadObject_default_def in_magnitude_check in_monad valid_def obj_at'_def objBits_simps) -lemma pt_at_lift: - "corres_inst_eq ptr ptr' \ \s s'. (s, s') \ state_relation \ True \ - (pspace_aligned s \ pspace_distinct s \ pt_at ptr s \ ptr = ptr') \ - \ s' \ page_table_at' ptr' s'" - by ( fastforce intro!: page_table_at_cross) +(* FIXME: use more recent guard crossing framework that don't need a specific goal form. + This was mostly left here to test compatibility bewtween corres and corresK methods *) +(* only applies when ptr is available in abstract and concrete guard at the same time *) +lemma pt_at_lift_eq: + "\s s'. (s, s') \ state_relation \ + (pspace_aligned s \ pspace_distinct s \ pt_at ptr s) \ + \ s' \ + page_table_at' ptr s'" + by (fastforce intro!: page_table_at_cross) + +(* only applies for the "getPPtrFromHWPTE pte" pattern *) +lemma pt_at_lift_relation: + "\ pte_relation' pte pte'; RISCV64_A.is_PageTablePTE pte \ \ + \s s'. (s, s') \ state_relation \ + (pspace_aligned s \ pspace_distinct s \ pt_at (pptr_from_pte pte) s) \ + \ s' \ + page_table_at' (getPPtrFromHWPTE pte') s'" + apply (cases pte; simp) + apply (simp add: getPPtrFromHWPTE_def pptr_from_pte_def addr_from_ppn_def pt_at_lift_eq) + done + +lemmas checkPTAt_corres_pte[corres] = + corres_stateAssert_r_cross[OF pt_at_lift_relation, folded checkPTAt_def] -lemmas checkPTAt_corres[corresK] = - corres_stateAssert_implied_frame[OF pt_at_lift, folded checkPTAt_def] +lemmas checkPTAt_corres_eq[corres] = + corres_stateAssert_r_cross[OF pt_at_lift_eq, folded checkPTAt_def] lemma lookupPTSlotFromLevel_inv: "lookupPTSlotFromLevel level pt_ptr vptr \P\" @@ -558,14 +587,14 @@ lemma pteAtIndex_corres: \ (get_pte (pt_slot_offset level pt vptr)) (pteAtIndex level' pt vptr)" - by (simp add: pteAtIndex_def) (rule getObject_PTE_corres) + by (simp add: pteAtIndex_def getObject_PTE_corres) lemma user_region_or: "\ vref \ user_region; vref' \ user_region \ \ vref || vref' \ user_region" by (simp add: user_region_def canonical_user_def le_mask_high_bits word_size) -lemma lookupPTSlotFromLevel_corres: +lemma lookupPTSlotFromLevel_corres[corres]: "\ level' = size level; pt' = pt \ \ corres (\(level, p) (bits, p'). bits = pt_bits_left level \ p' = p) (pspace_aligned and pspace_distinct and valid_vspace_objs and valid_asid_table and @@ -651,7 +680,7 @@ next apply (frule (5) vs_lookup_table_is_aligned) apply (rule conjI) apply (drule (5) valid_vspace_objs_strongD) - apply (clarsimp simp: pte_at_def obj_at_def) + apply (clarsimp simp: pte_at_def obj_at_def elim!: opt_mapE) apply (simp add: pt_slot_offset_def) apply (rule is_aligned_add) apply (erule is_aligned_weaken) @@ -680,7 +709,9 @@ lemma lookupPTSlot_corres: \ (gets_the (pt_lookup_slot pt vptr \ ptes_of)) (lookupPTSlot pt vptr)" unfolding lookupPTSlot_def pt_lookup_slot_def - by (corressimp corres: lookupPTSlotFromLevel_corres) + by corres + +declare RISCV64_A.pte.sel[datatype_schematic] lemma lookupPTFromLevel_corres: "\ level' = size level; pt' = pt \ \ @@ -702,6 +733,8 @@ proof (induct level arbitrary: level' pt pt') next case (minus level) + note minus.hyps(1)[corres] + (* FIXME: unfortunate duplication from lookupPTSlotFromLevel_corres *) from `0 < level` obtain nlevel where nlevel: "level = nlevel + 1" by (auto intro: that[of "level-1"]) @@ -754,52 +787,37 @@ next apply (subst lookupPTFromLevel.simps, subst pt_lookup_from_level_simps) apply (simp add: unlessE_whenE not_less) apply (rule corres_gen_asm, simp) - apply (rule corres_initial_splitE[where r'=dc]) - apply (corressimp simp: lookup_failure_map_def) - apply (rule corres_splitEE[where r'=pte_relation']) - apply (simp, rule getObject_PTE_corres) - apply (rule whenE_throwError_corres) - apply (simp add: lookup_failure_map_def) + apply (corres simp: lookup_failure_map_def) apply (rename_tac pte pte', case_tac pte; simp add: isPageTablePTE_def) - apply (rule corres_if) - apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromHWPTE_def - addr_from_ppn_def) - apply (rule corres_returnOk[where P=\ and P'=\], rule refl) - apply (clarsimp simp: checkPTAt_def) - apply (subst liftE_bindE, rule corres_stateAssert_implied) - apply (rule minus.hyps) - apply (simp add: minus.hyps(2)) - apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromHWPTE_def - addr_from_ppn_def) - apply clarsimp - apply (rule page_table_at_cross; assumption?) - apply (drule vs_lookup_table_pt_at; simp?) - apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromHWPTE_def - addr_from_ppn_def) - apply (simp add: state_relation_def) + apply (corres term_simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def + getPPtrFromHWPTE_def addr_from_ppn_def minus.hyps(2)) apply wpsimp+ apply (simp add: bit0.neq_0_conv) apply (frule (5) vs_lookup_table_is_aligned) apply (rule conjI) apply (drule (5) valid_vspace_objs_strongD) - apply (clarsimp simp: pte_at_def obj_at_def) + apply (clarsimp simp: pte_at_def obj_at_def elim!: opt_mapE) apply (simp add: pt_slot_offset_def) apply (rule is_aligned_add) apply (erule is_aligned_weaken) apply (simp add: bit_simps) apply (rule is_aligned_shiftl, simp) apply clarsimp - apply (rule_tac x=asid in exI) - apply (rule_tac x="vref_step vref" in exI) - apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) apply (rule conjI) - apply (clarsimp simp: level_defs) - apply (subst pt_walk_split_Some[where level'=level]; simp?) - apply (drule bit0.pred) - apply simp - apply (subst pt_walk.simps) - apply (simp add: in_omonad) - apply wpsimp + apply (rule_tac x=asid in exI) + apply (rule_tac x="vref_step vref" in exI) + apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: level_defs) + apply (subst pt_walk_split_Some[where level'=level]; simp?) + apply (drule bit0.pred) + apply simp + apply (subst pt_walk.simps) + apply (simp add: in_omonad) + apply (drule (1) valid_vspace_objs_pte, fastforce) + apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def + table_index_max_level_slots) + apply simp done qed @@ -833,23 +851,19 @@ lemma corres_gets_global_pt [corres]: apply (case_tac "riscvKSGlobalPTs (ksArchState s') maxPTLevel"; simp) done -lemmas getObject_PTE_corres'[corres] = getObject_PTE_corres[@lift_corres_args] -lemmas storePTE_corres'[corres] = storePTE_corres[@lift_corres_args] - lemma copy_global_mappings_corres [@lift_corres_args, corres]: "corres dc (valid_global_arch_objs and pspace_aligned and pspace_distinct and pt_at pt) \ (copy_global_mappings pt) (copyGlobalMappings pt)" (is "corres _ ?apre _ _ _") unfolding copy_global_mappings_def copyGlobalMappings_def objBits_simps archObjSize_def pptr_base_def - apply corressimp - apply (rule_tac P="pt_at global_pt and ?apre" and P'="\" - in corresK_mapM_x[OF order_refl]) - apply (corressimp simp: objBits_def mask_def wp: get_pte_wp getPTE_wp)+ - apply (drule valid_global_arch_objs_pt_at) - apply (clarsimp simp: ptIndex_def ptBitsLeft_def maxPTLevel_def ptTranslationBits_def pageBits_def - pt_index_def pt_bits_left_def level_defs) - apply (fastforce intro!: page_table_pte_atI simp add: bit_simps word_le_nat_alt word_less_nat_alt) + apply corres + apply (rule_tac P="pt_at global_pt and ?apre" and P'="\" in corres_mapM_x[OF _ _ _ _ order_refl]) + apply (corres simp: ptIndex_def ptBitsLeft_def maxPTLevel_def ptTranslationBits_def pageBits_def + pt_index_def pt_bits_left_def level_defs + | fastforce dest!: valid_global_arch_objs_pt_at + intro!: page_table_pte_atI + simp: bit_simps word_le_nat_alt word_less_nat_alt)+ done lemma arch_cap_rights_update: @@ -958,7 +972,7 @@ lemma find_vspace_for_asid_rewite: apply (simp add: liftE_bindE bind_assoc exec_gets opt_map_def asid_low_bits_of_def) done -lemma findVSpaceForASID_corres: +lemma findVSpaceForASID_corres[corres]: assumes "asid' = ucast asid" shows "corres (lfr \ (=)) (valid_vspace_objs and valid_asid_table @@ -969,7 +983,7 @@ lemma findVSpaceForASID_corres: using assms apply (simp add: findVSpaceForASID_def) apply (rule corres_gen_asm, simp add: ucast_down_ucast_id is_down_def target_size source_size) - apply (rule corres_guard_imp[where Q'="?Q"], rule monadic_rewrite_corres[where P="?P", rotated], + apply (rule corres_guard_imp[where Q'="?Q"], rule monadic_rewrite_corres_l[where P="?P"], rule find_vspace_for_asid_rewite; simp) apply (simp add: liftE_bindE asidRange_def flip: mask_2pm1) apply (rule_tac r'="\x y. x = y o ucast" @@ -999,14 +1013,8 @@ lemma findVSpaceForASID_corres: apply (simp add: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def returnOk_def lookup_failure_map_def ucast_ucast_a is_down split: option.split) - apply clarsimp - apply (simp add: returnOk_liftE checkPTAt_def liftE_bindE) - apply (rule corres_stateAssert_implied[where P=\, simplified]) - apply simp - apply clarsimp - apply (rule page_table_at_cross; assumption?) - apply fastforce - apply (wp getObject_inv loadObject_default_inv | simp)+ + apply (corres corres: corres_returnTT) + apply (wpsimp wp: getObject_inv loadObject_default_inv)+ apply (clarsimp simp: o_def) apply (rule conjI) apply (rule valid_asid_tableD; simp) diff --git a/proof/refine/RISCV64/Arch_R.thy b/proof/refine/RISCV64/Arch_R.thy index bf1e5b6173..edddbe5434 100644 --- a/proof/refine/RISCV64/Arch_R.thy +++ b/proof/refine/RISCV64/Arch_R.thy @@ -78,7 +78,7 @@ lemma createObject_typ_at': supply is_aligned_neg_mask_eq[simp del] is_aligned_neg_mask_weaken[simp del] - apply (clarsimp simp:createObjects'_def alignError_def split_def | wp hoare_unless_wp | wpc )+ + apply (clarsimp simp:createObjects'_def alignError_def split_def | wp unless_wp | wpc )+ apply (clarsimp simp:obj_at'_def ko_wp_at'_def typ_at'_def pspace_distinct'_def)+ apply (subgoal_tac "ps_clear ptr (objBitsKO ty) (s\ksPSpace := \a. if a = ptr then Some ty else ksPSpace s a\)") @@ -132,7 +132,7 @@ lemma set_cap_device_and_range_aligned: lemma performASIDControlInvocation_corres: "asid_ci_map i = i' \ corres dc - (einvs and ct_active and valid_aci i) + (einvs and ct_active and valid_aci i and schact_is_rct) (invs' and ct_active' and valid_aci' i') (perform_asid_control_invocation i) (performASIDControlInvocation i')" @@ -262,20 +262,20 @@ lemma performASIDControlInvocation_corres: deleteObjects_cte_wp_at' deleteObjects_null_filter[where p="makePoolParent i'"]) apply (clarsimp simp:invs_mdb max_free_index_def invs_untyped_children) - apply (subgoal_tac "detype_locale x y sa" for x y) - prefer 2 - apply (simp add:detype_locale_def) - apply (fastforce simp:cte_wp_at_caps_of_state descendants_range_def2 - empty_descendants_range_in invs_untyped_children) + apply (prop_tac "detype_locale x y sa" for x y) + apply (simp add: detype_locale_def) + apply (fastforce simp: cte_wp_at_caps_of_state descendants_range_def2 + empty_descendants_range_in invs_untyped_children) apply (intro conjI) apply (clarsimp) apply (erule(1) caps_of_state_valid) subgoal by (fastforce simp:cte_wp_at_caps_of_state descendants_range_def2 empty_descendants_range_in) apply (fold_subgoals (prefix))[2] subgoal premises prems using prems by (clarsimp simp:invs_def valid_state_def)+ - apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (clarsimp simp: schact_is_rct_def) + apply (clarsimp simp: cte_wp_at_caps_of_state) apply (drule detype_locale.non_null_present) - apply (fastforce simp:cte_wp_at_caps_of_state) + apply (fastforce simp: cte_wp_at_caps_of_state) apply simp apply (frule_tac ptr = "(aa,ba)" in detype_invariants [rotated 3]) apply fastforce @@ -327,29 +327,30 @@ lemma performASIDControlInvocation_corres: apply clarsimp apply (frule empty_descendants_range_in') apply (intro conjI, - simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 - null_filter_descendants_of'[OF null_filter_simp'] - capAligned_def asid_low_bits_def) - apply (erule descendants_range_caps_no_overlapI') - apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) - apply (simp add:empty_descendants_range_in') - apply (simp add:word_bits_def bit_simps) - apply (rule is_aligned_weaken) - apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) - apply (simp add:pageBits_def) + simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 + null_filter_descendants_of'[OF null_filter_simp'] + capAligned_def asid_low_bits_def) + apply (erule descendants_range_caps_no_overlapI') + apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) + apply (simp add:empty_descendants_range_in') + apply (simp add:word_bits_def bit_simps) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) + apply (simp add:pageBits_def) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp: cte_wp_at_ctes_of) + apply assumption+ + apply fastforce + apply simp apply clarsimp - apply (drule(1) cte_cap_in_untyped_range) - apply (fastforce simp:cte_wp_at_ctes_of) + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) apply fastforce apply simp apply clarsimp - apply (drule (1) cte_cap_in_untyped_range) - apply (fastforce simp add: cte_wp_at_ctes_of) - apply assumption+ - apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) - apply fastforce - apply simp done definition @@ -403,7 +404,7 @@ lemma checkVP_wpR [wp]: checkVPAlignment sz w \P\, -" apply (simp add: checkVPAlignment_def unlessE_whenE cong: vmpage_size.case_cong) apply (rule hoare_pre) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (simp add: is_aligned_mask vmsz_aligned_def) done @@ -454,12 +455,7 @@ lemma checkSlot_corres: (check_slot p test) (checkSlot p test')" apply (simp add: check_slot_def checkSlot_def unlessE_whenE liftE_bindE) - apply (rule corres_guard_imp) - apply (rule corres_split[OF getObject_PTE_corres]) - apply (rule corres_whenE, simp) - apply (rule corres_trivial, simp) - apply simp - apply wpsimp+ + apply (corres corres: corres_throwErrorTT[of ser]) done lemma vmrights_map_vm_kernel_only[simp]: @@ -687,7 +683,7 @@ lemma decodeX64PageTableInvocation_corres: apply (rule leq_mask_shift) apply (simp add: bit_simps le_mask_high_bits word_size) apply ((clarsimp cong: if_cong - | wp hoare_whenE_wp hoare_vcg_all_lift_R getPTE_wp get_pte_wp + | wp whenE_wp hoare_vcg_all_liftE_R getPTE_wp get_pte_wp | wp (once) hoare_drop_imps)+) apply (clarsimp simp: invs_vspace_objs invs_valid_asid_table invs_psp_aligned invs_distinct) apply (clarsimp simp: valid_cap_def wellformed_mapdata_def not_le below_user_vtop_in_user_region) @@ -813,11 +809,11 @@ shows apply (simp add: returnOk_liftE[symmetric]) apply (rule corres_returnOk) apply (simp add: archinv_relation_def asid_pool_invocation_map_def) - apply (rule hoare_pre, wp hoare_whenE_wp) + apply (rule hoare_pre, wp whenE_wp) apply (clarsimp simp: ucast_fst_hd_assocs) - apply (wp hoareE_TrueI hoare_whenE_wp getASID_wp | simp)+ + apply (wp hoareE_TrueI whenE_wp getASID_wp | simp)+ apply ((clarsimp simp: p2_low_bits_max | rule TrueI impI)+)[2] - apply (wp hoare_whenE_wp getASID_wp)+ + apply (wp whenE_wp getASID_wp)+ apply (auto simp: valid_cap_def)[1] apply auto[1] \ \ASIDControlCap\ @@ -921,7 +917,7 @@ shows lemma arch_performInvocation_corres: "archinv_relation ai ai' \ corres (dc \ (=)) - (einvs and ct_active and valid_arch_inv ai) + (einvs and ct_active and valid_arch_inv ai and schact_is_rct) (invs' and ct_active' and valid_arch_inv' ai') (arch_perform_invocation ai) (Arch.performInvocation ai')" apply (clarsimp simp: arch_perform_invocation_def @@ -975,13 +971,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -1141,7 +1137,7 @@ lemma arch_decodeInvocation_wf[wp]: cte_wp_at' (\cte. \idx. cteCap cte = (UntypedCap False frame pageBits idx)) (snd (excaps!0)) and sch_act_simple and (\s. descendants_of' (snd (excaps!0)) (ctes_of s) = {}) " - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookupTargetSlot_def) apply wp apply (clarsimp simp: cte_wp_at_ctes_of asid_wf_def mask_def) @@ -1162,15 +1158,15 @@ lemma arch_decodeInvocation_wf[wp]: \ \ASIDPool cap\ apply (simp add: decodeRISCVMMUInvocation_def RISCV64_H.decodeInvocation_def - Let_def split_def isCap_simps decodeRISCVASIDPoolInvocation_def - cong: if_cong split del: if_split) - apply (wpsimp simp: valid_arch_inv'_def valid_apinv'_def wp: getASID_wp cong: if_cong) + Let_def split_def isCap_simps decodeRISCVASIDPoolInvocation_def) + apply (wpsimp simp: valid_arch_inv'_def valid_apinv'_def wp: getASID_wp + split_del: if_split) apply (clarsimp simp: word_neq_0_conv valid_cap'_def valid_arch_inv'_def valid_apinv'_def) apply (rule conjI) apply (erule cte_wp_at_weakenE') apply (simp, drule_tac t="cteCap c" in sym, simp add: isCap_simps) - apply (subst (asm) conj_assoc [symmetric]) - apply (subst (asm) assocs_empty_dom_comp [symmetric]) + apply (subst (asm) conj_assoc [symmetric], + subst (asm) assocs_empty_dom_comp [symmetric]) apply (drule dom_hd_assocsD) apply (simp add: capAligned_def asid_wf_def mask_def) apply (elim conjE) @@ -1221,7 +1217,7 @@ lemma performASIDControlInvocation_st_tcb_at': hoare_vcg_ex_lift deleteObjects_cte_wp_at' deleteObjects_invs_derivatives deleteObjects_st_tcb_at' - static_imp_wp + hoare_weak_lift_imp | simp add: placeNewObject_def2)+ apply (case_tac ctea) apply (clarsimp) @@ -1261,7 +1257,7 @@ crunch st_tcb_at' [wp]: "Arch.finaliseCap" "st_tcb_at' P t" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - riscvKSASIDTable_update (\_. (riscvKSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + riscvKSASIDTable_update (\_. ((riscvKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_state'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -1347,7 +1343,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' diff --git a/proof/refine/RISCV64/Bits_R.thy b/proof/refine/RISCV64/Bits_R.thy index d739cf95b8..bd6bafa35f 100644 --- a/proof/refine/RISCV64/Bits_R.thy +++ b/proof/refine/RISCV64/Bits_R.thy @@ -73,6 +73,10 @@ lemma projectKO_tcb: "(projectKO_opt ko = Some t) = (ko = KOTCB t)" by (cases ko) (auto simp: projectKO_opts_defs) +lemma tcb_of'_TCB[simp]: + "tcb_of' (KOTCB tcb) = Some tcb" + by (simp add: projectKO_tcb) + lemma projectKO_cte: "(projectKO_opt ko = Some t) = (ko = KOCTE t)" by (cases ko) (auto simp: projectKO_opts_defs) @@ -208,6 +212,8 @@ where lfr_def[simp]: "lfr x y \ (y = lookup_failure_map x)" +lemmas corres_throwError_lfr[corres_term] = corres_throwErrorTT[of lfr] + text \Correspondence and weakest precondition rules for the "on failure" transformers\ diff --git a/proof/refine/RISCV64/CNodeInv_R.thy b/proof/refine/RISCV64/CNodeInv_R.thy index 3ad30a2345..947fd5de32 100644 --- a/proof/refine/RISCV64/CNodeInv_R.thy +++ b/proof/refine/RISCV64/CNodeInv_R.thy @@ -208,7 +208,7 @@ lemma decodeCNodeInvocation_corres: subgoal by (auto simp add: whenE_def, auto simp add: returnOk_def) apply (wp | wpc | simp(no_asm))+ apply (wp hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps + hoare_vcg_all_liftE_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps | clarsimp)+ subgoal by (auto elim!: valid_cnode_capI) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -265,7 +265,7 @@ lemma decodeCNodeInvocation_corres: apply (clarsimp simp add: returnOk_def) apply (wp get_cap_wp getCTE_wp | simp only: whenE_def | clarsimp)+ apply (rule hoare_trivE_R[where P="\"]) - apply (simp add: cte_wp_at_ctes_of pred_conj_def cong: conj_cong) + apply (wpsimp simp: cte_wp_at_ctes_of pred_conj_def) apply (fastforce elim!: valid_cnode_capI simp: invs_def valid_state_def valid_pspace_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) \ \Rotate\ @@ -386,7 +386,7 @@ lemma deriveCap_Null_helper: apply (cases "cap = NullCap") apply (simp add: deriveCap_def isCap_simps) apply (wp | simp)+ - apply (rule hoare_post_imp_R, rule assms) + apply (rule hoare_strengthen_postE_R, rule assms) apply simp done @@ -444,7 +444,7 @@ lemma decodeCNodeInv_wf[wp]: apply (wp whenE_throwError_wp getCTE_wp | wpc | simp(no_asm))+ apply (rule_tac Q'="\rv. invs' and cte_wp_at' (\cte. cteCap cte = NullCap) destSlot and ex_cte_cap_to' destSlot" - in hoare_post_imp_R, wp) + in hoare_strengthen_postE_R, wp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') apply (simp add: ctes_of_valid' valid_updateCapDataI @@ -479,7 +479,7 @@ lemma decodeCNodeInv_wf[wp]: unlessE_whenE) apply (rule hoare_pre) apply (wp whenE_throwError_wp getCTE_wp | simp)+ - apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (simp add: cte_wp_at_ctes_of imp_ex hasCancelSendRights_not_Null) apply (clarsimp simp: ctes_of_valid' invs_valid_objs') @@ -493,7 +493,7 @@ lemma decodeCNodeInv_wf[wp]: apply (rule_tac Q'="\rv s. cte_at' rv s \ cte_at' destSlot s \ cte_at' srcSlot s \ ex_cte_cap_to' rv s \ ex_cte_cap_to' destSlot s - \ invs' s" in hoare_post_imp_R) + \ invs' s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') @@ -4861,7 +4861,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -4891,7 +4891,7 @@ lemma cteSwap_valid_pspace'[wp]: apply (strengthen imp_consequent, strengthen ctes_of_strng) apply ((wp sch_act_wf_lift valid_queues_lift cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp - hoare_ex_wp getCTE_wp + hoare_vcg_ex_lift getCTE_wp | simp add: cte_wp_at_ctes_ofI o_def | rule hoare_drop_imps)+)[6] apply (clarsimp simp: valid_pspace_no_0[unfolded valid_pspace'_def valid_mdb'_def] @@ -5042,8 +5042,6 @@ crunch valid_arch_state'[wp]: cteSwap "valid_arch_state'" crunch irq_states'[wp]: cteSwap "valid_irq_states'" -crunch vq'[wp]: cteSwap "valid_queues'" - crunch ksqsL1[wp]: cteSwap "\s. P (ksReadyQueuesL1Bitmap s)" crunch ksqsL2[wp]: cteSwap "\s. P (ksReadyQueuesL2Bitmap s)" @@ -5058,6 +5056,12 @@ crunch ct_not_inQ[wp]: cteSwap "ct_not_inQ" crunch ksDomScheduleIdx [wp]: cteSwap "\s. P (ksDomScheduleIdx s)" +crunches cteSwap + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma cteSwap_invs'[wp]: "\invs' and valid_cap' c and valid_cap' c' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2 and @@ -5513,6 +5517,10 @@ lemma updateCap_untyped_ranges_zero_simple: crunch tcb_in_cur_domain'[wp]: updateCap "tcb_in_cur_domain' t" (wp: crunch_wps simp: crunch_simps rule: tcb_in_cur_domain'_lift) +crunches updateCap + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma make_zombie_invs': "\\s. invs' s \ s \' cap \ cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s \ @@ -5529,7 +5537,8 @@ lemma make_zombie_invs': st_tcb_at' ((=) Inactive) p s \ bound_tcb_at' ((=) None) p s \ obj_at' (Not \ tcbQueued) p s - \ (\pr. p \ set (ksReadyQueues s pr)))) sl s\ + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p s)) sl s\ updateCap sl cap \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def @@ -5566,7 +5575,9 @@ lemma make_zombie_invs': apply (clarsimp simp: cte_wp_at_ctes_of) apply (subgoal_tac "st_tcb_at' ((=) Inactive) p' s \ obj_at' (Not \ tcbQueued) p' s - \ bound_tcb_at' ((=) None) p' s") + \ bound_tcb_at' ((=) None) p' s + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p' s") apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def) apply (auto dest!: isCapDs)[1] apply (clarsimp simp: cte_wp_at_ctes_of disj_ac @@ -5727,7 +5738,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5741,7 +5752,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -5822,7 +5833,7 @@ crunch cap_to'[wp]: cancelSignal "ex_cte_cap_wp_to' P p" lemma cancelIPC_cap_to'[wp]: "\ex_cte_cap_wp_to' P p\ cancelIPC t \\rv. ex_cte_cap_wp_to' P p\" apply (simp add: cancelIPC_def Let_def) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (case_tac state, simp_all add: getThreadReplySlot_def locateSlot_conv) apply (wp ex_cte_cap_to'_pres [OF threadSet_cte_wp_at'] | simp add: o_def if_apply_def2 @@ -5887,7 +5898,7 @@ lemma cteDelete_delete_cases: apply (rule hoare_strengthen_post [OF emptySlot_deletes]) apply (clarsimp simp: cte_wp_at_ctes_of) apply wp+ - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of) apply simp done @@ -6250,7 +6261,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) apply clarsimp apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule conjI, clarsimp simp: removeable'_def) @@ -6268,7 +6279,7 @@ lemma finaliseSlot_invs'': \ (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\, \\rv s. invs' s \ sch_act_simple s\" unfolding finaliseSlot_def - apply (rule hoare_pre, rule hoare_post_impErr, rule use_spec) + apply (rule hoare_pre, rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P="\" and Pr="\" and p=slot]) apply (simp_all add: no_cte_prop_top) apply wp @@ -6278,14 +6289,14 @@ lemma finaliseSlot_invs'': lemma finaliseSlot_invs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. invs'\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done lemma finaliseSlot_sch_act_simple: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. sch_act_simple\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6294,7 +6305,7 @@ lemma finaliseSlot_removeable: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6303,7 +6314,7 @@ lemma finaliseSlot_irqs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6318,7 +6329,7 @@ lemma finaliseSlot_cte_wp_at: P cp \ capZombiePtr cp \ p)) p s\,-" unfolding finaliseSlot_def apply (rule hoare_pre, unfold validE_R_def) - apply (rule hoare_post_impErr, rule use_spec) + apply (rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P=P and Pr=\ and p=p]) apply (simp_all add: no_cte_prop_top finalise_prop_stuff_def) apply wp @@ -6341,7 +6352,7 @@ lemma reduceZombie_invs: reduceZombie cap slot exposed \\rv s. invs' s\" apply (rule validE_valid) - apply (rule hoare_post_impErr, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) + apply (rule hoare_strengthen_postE, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6351,7 +6362,7 @@ lemma reduceZombie_cap_to: reduceZombie cap slot exposed \\rv s. \ exposed \ ex_cte_cap_to' slot s\, -" apply (rule validE_validE_R, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6362,7 +6373,7 @@ lemma reduceZombie_sch_act_simple: reduceZombie cap slot exposed \\rv. sch_act_simple\" apply (rule validE_valid, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6372,7 +6383,7 @@ lemma cteDelete_invs': apply (rule hoare_gen_asm) apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -6403,9 +6414,9 @@ lemma cteDelete_cte_at: apply (rule hoare_vcg_disj_lift) apply (rule typ_at_lifts, rule cteDelete_typ_at') apply (simp add: cteDelete_def finaliseSlot_def split_def) - apply (rule validE_valid, rule seqE) + apply (rule validE_valid, rule bindE_wp_fwd) apply (subst finaliseSlot'_simps_ext) - apply (rule seqE) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_pre, rule hoare_FalseE) @@ -6447,10 +6458,10 @@ lemma cteDelete_cte_wp_at_invs: cteCap cte = NullCap \ (\zb n. cteCap cte = Zombie slot zb n)) slot s)" - and E="\rv. \" in hoare_post_impErr) + and E="\rv. \" in hoare_strengthen_postE) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of dest!: isCapDs) apply simp apply simp @@ -6468,10 +6479,10 @@ lemma cteDelete_cte_wp_at_invs: (\zb n. cteCap cte = Zombie p zb n) \ (\cp. P cp \ capZombiePtr cp \ p)) p s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) + apply (rule hoare_strengthen_postE_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) apply simp+ apply (clarsimp simp: cte_wp_at_ctes_of) apply simp @@ -6484,7 +6495,7 @@ lemma cteDelete_sch_act_simple: cteDelete slot exposed \\rv. sch_act_simple\" apply (simp add: cteDelete_def whenE_def split_def) apply (wp hoare_drop_imps | simp)+ - apply (rule_tac hoare_post_impErr [where Q="\rv. sch_act_simple" + apply (rule_tac hoare_strengthen_postE [where Q="\rv. sch_act_simple" and E="\rv. sch_act_simple"]) apply (rule valid_validE) apply (wp finaliseSlot_sch_act_simple) @@ -7025,14 +7036,14 @@ next apply simp apply (wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms | erule finalise_cap_not_reply_master [simplified])+ apply (elim conjE, strengthen exI[mk_strg I], strengthen asm_rl[where psi="(cap_relation cap cap')" for cap cap', mk_strg I E]) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - hoare_vcg_ex_lift static_imp_wp) + hoare_vcg_ex_lift hoare_weak_lift_imp) apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7074,7 +7085,7 @@ next apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) apply (case_tac "cteCap rv'", auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ apply (rule isFinal[where x="cte_map slot"]) apply (wp get_cap_wp| simp add: conj_comms)+ @@ -7153,13 +7164,11 @@ next case (4 ptr bits n slot) let ?target = "(ptr, nat_to_cref (zombie_cte_bits bits) n)" note hyps = "4.hyps"[simplified rec_del_concrete_unfold spec_corres_liftME2] - have pred_conj_assoc: "\P Q R. (P and (Q and R)) = (P and Q and R)" - by (rule ext, simp) show ?case apply (simp only: rec_del_concrete_unfold cap_relation.simps) apply (simp add: reduceZombie_def Let_def liftE_bindE - del: pred_conj_app) + del: inf_apply) apply (subst rec_del_simps_ext) apply (rule_tac F="ptr + 2 ^ cte_level_bits * of_nat n = cte_map ?target" @@ -7217,7 +7226,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -7256,7 +7265,7 @@ next apply (clarsimp simp: zombie_alignment_oddity cte_map_replicate) apply (wp get_cap_cte_wp_at getCTE_wp' rec_del_cte_at rec_del_invs rec_del_delete_cases)+ - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule_tac P="\cp. cp = Zombie ptr (zbits_map bits) (Suc n)" in cteDelete_cte_wp_at_invs[where p="cte_map slot"]) apply clarsimp @@ -8374,7 +8383,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8530,6 +8539,15 @@ lemma cteMove_urz [wp]: apply auto done +crunches updateMDB + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +(* FIXME: arch_split *) +lemma haskell_assert_inv: + "haskell_assert Q L \P\" + by wpsimp + lemma cteMove_invs' [wp]: "\\x. invs' x \ ex_cte_cap_to' word2 x \ cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ @@ -8555,7 +8573,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -8607,6 +8625,10 @@ crunch ksDomSchedule[wp]: updateCap "\s. P (ksDomSchedule s)" crunch ksDomScheduleIdx[wp]: updateCap "\s. P (ksDomScheduleIdx s)" crunch ksDomainTime[wp]: updateCap "\s. P (ksDomainTime s)" +crunches updateCap + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + lemma corres_null_cap_update: "cap_relation cap cap' \ corres dc (invs and cte_wp_at ((=) cap) slot) @@ -8867,7 +8889,7 @@ declare withoutPreemption_lift [wp] crunch irq_states' [wp]: capSwapForDelete valid_irq_states' crunch irq_states' [wp]: finaliseCap valid_irq_states' - (wp: crunch_wps hoare_unless_wp getASID_wp no_irq_setVSpaceRoot no_irq_hwASIDFlush + (wp: crunch_wps unless_wp getASID_wp no_irq_setVSpaceRoot no_irq_hwASIDFlush simp: crunch_simps o_def pteAtIndex_def) lemma finaliseSlot_IRQInactive': @@ -8897,7 +8919,7 @@ lemma finaliseSlot_IRQInactive: "\valid_irq_states'\ finaliseSlot a b -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (unfold validE_E_def) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule use_spec(2) [OF finaliseSlot_IRQInactive', folded finaliseSlot_def]) apply (rule TrueI) apply assumption @@ -8911,8 +8933,8 @@ lemma cteDelete_IRQInactive: "\valid_irq_states'\ cteDelete x y -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule validE_E_validE) apply (rule finaliseSlot_IRQInactive) apply simp @@ -8924,8 +8946,8 @@ lemma cteDelete_irq_states': "\valid_irq_states'\ cteDelete x y \\rv. valid_irq_states'\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule hoare_valid_validE) apply (rule finaliseSlot_irq_states') apply simp @@ -8948,7 +8970,7 @@ proof (induct rule: cteRevoke.induct) case (1 p s') show ?case apply (subst cteRevoke.simps) - apply (wp "1.hyps" unlessE_wp hoare_whenE_wp preemptionPoint_IRQInactive_spec + apply (wp "1.hyps" unlessE_wp whenE_wp preemptionPoint_IRQInactive_spec cteDelete_IRQInactive cteDelete_irq_states' getCTE_wp')+ apply clarsimp done @@ -8969,7 +8991,7 @@ lemma inv_cnode_IRQInactive: apply (rule hoare_pre) apply (wp cteRevoke_IRQInactive finaliseSlot_IRQInactive cteDelete_IRQInactive - hoare_whenE_wp + whenE_wp | wpc | simp add: split_def)+ done diff --git a/proof/refine/RISCV64/CSpace1_R.thy b/proof/refine/RISCV64/CSpace1_R.thy index 2c73fde839..49850b8bd7 100644 --- a/proof/refine/RISCV64/CSpace1_R.thy +++ b/proof/refine/RISCV64/CSpace1_R.thy @@ -233,7 +233,7 @@ lemma pspace_relation_cte_wp_at: apply (clarsimp elim!: cte_wp_at_weakenE') apply clarsimp apply (drule(1) pspace_relation_absD) - apply (clarsimp simp: other_obj_relation_def) + apply (clarsimp simp: tcb_relation_cut_def) apply (simp split: kernel_object.split_asm) apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb]) apply simp @@ -397,7 +397,7 @@ lemma resolveAddressBits_cte_at': resolveAddressBits cap addr depth \\rv. cte_at' (fst rv)\, \\rv s. True\" apply (fold validE_R_def) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule resolveAddressBits_real_cte_at') apply (erule real_cte_at') done @@ -740,11 +740,11 @@ lemma lookupSlotForThread_corres: apply clarsimp apply simp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolve_address_bits_cte_at [unfolded validE_R_def]) apply clarsimp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolveAddressBits_cte_at') apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (simp add: returnOk_def split_def) @@ -829,7 +829,7 @@ lemma setCTE_tcb_in_cur_domain': done lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -934,7 +934,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done @@ -1631,10 +1631,10 @@ lemma cte_map_pulls_tcb_to_abstract: \ \tcb'. kheap s x = Some (TCB tcb') \ tcb_relation tcb' tcb \ (z = (x, tcb_cnode_index (unat ((y - x) >> cte_level_bits))))" apply (rule pspace_dom_relatedE, assumption+) - apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) - apply (clarsimp simp: other_obj_relation_def + apply (erule(1) obj_relation_cutsE; + clarsimp simp: other_obj_relation_def split: Structures_A.kernel_object.split_asm - RISCV64_A.arch_kernel_obj.split_asm) + RISCV64_A.arch_kernel_obj.split_asm if_split_asm) apply (drule tcb_cases_related2) apply clarsimp apply (frule(1) cte_wp_at_tcbI [OF _ _ TrueI, where t="(a, b)" for a b, simplified]) @@ -1650,8 +1650,7 @@ lemma pspace_relation_update_tcbs: del: dom_fun_upd) apply (erule conjE) apply (rule ballI, drule(1) bspec) - apply (rule conjI, simp add: other_obj_relation_def) - apply (clarsimp split: Structures_A.kernel_object.split_asm) + apply (clarsimp simp: tcb_relation_cut_def split: Structures_A.kernel_object.split_asm) apply (drule bspec, fastforce) apply clarsimp apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) @@ -1873,6 +1872,27 @@ lemma descendants_of_eq': apply simp done +lemma setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedPrevs_of s)" + shows "P (ps |> tcb_of' |> tcbSchedPrev)" + using use_valid[OF step setObject_cte_tcbSchedPrevs_of(1)] pre + by auto + +lemma setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedNexts_of s)" + shows "P (ps |> tcb_of' |> tcbSchedNext)" + using use_valid[OF step setObject_cte_tcbSchedNexts_of(1)] pre + by auto + +lemma setObject_cte_inQ_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (inQ domain priority |< tcbs_of' s)" + shows "P (inQ domain priority |< (ps |> tcb_of'))" + using use_valid[OF step setObject_cte_inQ(1)] pre + by auto + lemma updateCap_stuff: assumes "(x, s'') \ fst (updateCap p cap s')" shows "(ctes_of s'' = modify_map (ctes_of s') p (cteCap_update (K cap))) \ @@ -1886,7 +1906,12 @@ lemma updateCap_stuff: ksSchedulerAction s'' = ksSchedulerAction s' \ (ksArchState s'' = ksArchState s') \ (pspace_aligned' s' \ pspace_aligned' s'') \ - (pspace_distinct' s' \ pspace_distinct' s'')" using assms + (pspace_distinct' s' \ pspace_distinct' s'') \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" + using assms apply (clarsimp simp: updateCap_def in_monad) apply (drule use_valid [where P="\s. s2 = s" for s2, OF _ getCTE_sp refl]) apply (rule conjI) @@ -1895,8 +1920,11 @@ lemma updateCap_stuff: apply (frule setCTE_pspace_only) apply (clarsimp simp: setCTE_def) apply (intro conjI impI) - apply (erule(1) use_valid [OF _ setObject_aligned]) - apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule(1) use_valid [OF _ setObject_aligned]) + apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace; simp) + apply (erule setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace; simp) + apply (fastforce elim: setObject_cte_inQ_of_use_valid_ksPSpace) done (* FIXME: move *) @@ -1913,16 +1941,16 @@ lemma pspace_relation_cte_wp_atI': apply (simp split: if_split_asm) apply (erule(1) pspace_dom_relatedE) apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + apply (subgoal_tac "n = x - y", clarsimp) + apply (drule tcb_cases_related2, clarsimp) + apply (intro exI, rule conjI) + apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) + apply fastforce + apply simp + apply clarsimp apply (simp add: other_obj_relation_def split: Structures_A.kernel_object.split_asm RISCV64_A.arch_kernel_obj.split_asm) - apply (subgoal_tac "n = x - y", clarsimp) - apply (drule tcb_cases_related2, clarsimp) - apply (intro exI, rule conjI) - apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) - apply fastforce - apply simp - apply clarsimp done lemma pspace_relation_cte_wp_atI: @@ -2444,7 +2472,7 @@ lemma updateCap_corres: apply (clarsimp simp: in_set_cap_cte_at_swp pspace_relations_def) apply (drule updateCap_stuff) apply simp - apply (rule conjI) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) apply (rule conjI) prefer 2 @@ -2532,9 +2560,9 @@ lemma updateMDB_pspace_relation: apply (clarsimp simp: tcb_ctes_clear cte_level_bits_def objBits_defs) apply clarsimp apply (rule pspace_dom_relatedE, assumption+) - apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] - apply (clarsimp split: Structures_A.kernel_object.split_asm - RISCV64_A.arch_kernel_obj.split_asm + apply (rule obj_relation_cutsE, assumption+; + clarsimp split: Structures_A.kernel_object.split_asm + RISCV64_A.arch_kernel_obj.split_asm if_split_asm simp: other_obj_relation_def) apply (frule(1) tcb_cte_cases_aligned_helpers(1)) apply (frule(1) tcb_cte_cases_aligned_helpers(2)) @@ -2596,6 +2624,25 @@ lemma updateMDB_ctes_of: crunch aligned[wp]: updateMDB "pspace_aligned'" crunch pdistinct[wp]: updateMDB "pspace_distinct'" +crunch tcbSchedPrevs_of[wp]: updateMDB "\s. P (tcbSchedPrevs_of s)" +crunch tcbSchedNexts_of[wp]: updateMDB "\s. P (tcbSchedNexts_of s)" +crunch inQ_opt_pred[wp]: updateMDB "\s. P (inQ d p |< tcbs_of' s)" +crunch inQ_opt_pred'[wp]: updateMDB "\s. P (\d p. inQ d p |< tcbs_of' s)" +crunch ksReadyQueues[wp]: updateMDB "\s. P (ksReadyQueues s)" + (wp: crunch_wps simp: crunch_simps setObject_def updateObject_cte) + +lemma setCTE_rdyq_projs[wp]: + "setCTE p f \\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)\" + apply (rule hoare_lift_Pf2[where f=ksReadyQueues]) + apply (rule hoare_lift_Pf2[where f=tcbSchedNexts_of]) + apply (rule hoare_lift_Pf2[where f=tcbSchedPrevs_of]) + apply wpsimp+ + done + +crunches updateMDB + for rdyq_projs[wp]:"\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)" lemma updateMDB_the_lot: assumes "(x, s'') \ fst (updateMDB p f s')" @@ -2618,7 +2665,11 @@ lemma updateMDB_the_lot: ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" using assms apply (simp add: updateMDB_eqs updateMDB_pspace_relations split del: if_split) apply (frule (1) updateMDB_ctes_of) @@ -2627,9 +2678,8 @@ using assms apply (erule use_valid) apply wp apply simp - apply (erule use_valid) - apply wp - apply simp + apply (erule use_valid, wpsimp wp: hoare_vcg_all_lift) + apply (simp add: comp_def) done lemma is_cap_revocable_eq: @@ -3792,6 +3842,9 @@ lemma updateUntypedCap_descendants_of: apply (clarsimp simp:mdb_next_rel_def mdb_next_def split:if_splits) done +crunches setCTE + for tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + lemma setCTE_UntypedCap_corres: "\cap_relation cap (cteCap cte); is_untyped_cap cap; idx' = idx\ \ corres dc (cte_wp_at ((=) cap) src and valid_objs and @@ -3821,10 +3874,19 @@ lemma setCTE_UntypedCap_corres: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def split: if_split_asm Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation _ _" \ -\) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (rule use_valid[OF _ setCTE_tcbSchedPrevs_of], assumption) + apply (rule use_valid[OF _ setCTE_tcbSchedNexts_of], assumption) + apply (rule use_valid[OF _ setCTE_ksReadyQueues], assumption) + apply (rule use_valid[OF _ setCTE_inQ_opt_pred], assumption) + apply (rule use_valid[OF _ set_cap_exst], assumption) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) @@ -5104,11 +5166,15 @@ lemma updateMDB_the_lot': ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" apply (rule updateMDB_the_lot) using assms apply (fastforce simp: pspace_relations_def)+ - done + done lemma cte_map_inj_eq': "\(cte_map p = cte_map p'); @@ -5210,7 +5276,6 @@ lemma cteInsert_corres: apply (thin_tac "ksMachineState t = p" for p t)+ apply (thin_tac "ksCurThread t = p" for p t)+ apply (thin_tac "ksIdleThread t = p" for p t)+ - apply (thin_tac "ksReadyQueues t = p" for p t)+ apply (thin_tac "ksSchedulerAction t = p" for p t)+ apply (clarsimp simp: pspace_relations_def) diff --git a/proof/refine/RISCV64/CSpace_R.thy b/proof/refine/RISCV64/CSpace_R.thy index e6fca94298..951001f24d 100644 --- a/proof/refine/RISCV64/CSpace_R.thy +++ b/proof/refine/RISCV64/CSpace_R.thy @@ -1091,43 +1091,6 @@ lemma bitmapQ_no_L2_orphans_lift: apply (rule hoare_vcg_prop, assumption) done -lemma valid_queues_lift_asm: - assumes tat1: "\d p tcb. \obj_at' (inQ d p) tcb and Q \ f \\_. obj_at' (inQ d p) tcb\" - and tat2: "\tcb. \st_tcb_at' runnable' tcb and Q \ f \\_. st_tcb_at' runnable' tcb\" - and prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" - and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" - and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" - shows "\Invariants_H.valid_queues and Q\ f \\_. Invariants_H.valid_queues\" - proof - - have tat: "\d p tcb. \obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb and Q\ f - \\_. obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb\" - apply (rule hoare_chain [OF hoare_vcg_conj_lift [OF tat1 tat2]]) - apply (fastforce)+ - done - have tat_combined: "\d p tcb. \obj_at' (inQ d p and runnable' \ tcbState) tcb and Q\ f - \\_. obj_at' (inQ d p and runnable' \ tcbState) tcb\" - apply (rule hoare_chain [OF tat]) - apply (fastforce simp add: obj_at'_and pred_tcb_at'_def o_def)+ - done - show ?thesis unfolding valid_queues_def valid_queues_no_bitmap_def - by (wp tat_combined prq prqL1 prqL2 valid_bitmapQ_lift bitmapQ_no_L2_orphans_lift - bitmapQ_no_L1_orphans_lift hoare_vcg_all_lift hoare_vcg_conj_lift hoare_Ball_helper) - simp_all - qed - -lemmas valid_queues_lift = valid_queues_lift_asm[where Q="\_. True", simplified] - -lemma valid_queues_lift': - assumes tat: "\d p tcb. \\s. \ obj_at' (inQ d p) tcb s\ f \\_ s. \ obj_at' (inQ d p) tcb s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_queues'\ f \\_. valid_queues'\" - unfolding valid_queues'_def imp_conv_disj - by (wp hoare_vcg_all_lift hoare_vcg_disj_lift tat prq) - -lemma setCTE_norq [wp]: - "\\s. P (ksReadyQueues s)\ setCTE ptr cte \\r s. P (ksReadyQueues s) \" - by (clarsimp simp: valid_def dest!: setCTE_pspace_only) - lemma setCTE_norqL1 [wp]: "\\s. P (ksReadyQueuesL1Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL1Bitmap s) \" by (clarsimp simp: valid_def dest!: setCTE_pspace_only) @@ -2226,7 +2189,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (isCapRevocable cap src_cap) (isCapRevocable cap src_cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" @@ -2582,7 +2545,7 @@ lemma updateMDB_iflive'[wp]: updateMDB p m \\rv s. if_live_then_nonz_cap' s\" apply (clarsimp simp: updateMDB_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2595,7 +2558,7 @@ lemma updateCap_iflive': updateCap p cap \\rv s. if_live_then_nonz_cap' s\" apply (simp add: updateCap_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2784,12 +2747,6 @@ lemma setCTE_inQ[wp]: apply (simp_all add: inQ_def) done -lemma setCTE_valid_queues'[wp]: - "\valid_queues'\ setCTE p cte \\rv. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done - crunch inQ[wp]: cteInsert "\s. P (obj_at' (inQ d p) t s)" (wp: crunch_wps) @@ -2921,7 +2878,7 @@ lemma cteInsert_valid_irq_handlers'[wp]: done lemma setCTE_arch_ctes_of_wp [wp]: - "\\s. P (ksArchState s) (ctes_of s (p \ cte))\ + "\\s. P (ksArchState s) ((ctes_of s)(p \ cte))\ setCTE p cte \\rv s. P (ksArchState s) (ctes_of s)\" apply (simp add: setCTE_def ctes_of_setObject_cte) @@ -2950,7 +2907,7 @@ lemma setCTE_irq_states' [wp]: apply (wp setObject_ksMachine) apply (simp add: updateObject_cte) apply (rule hoare_pre) - apply (wp hoare_unless_wp|wpc|simp)+ + apply (wp unless_wp|wpc|simp)+ apply fastforce apply assumption done @@ -3056,7 +3013,7 @@ lemma setCTE_ksMachine[wp]: apply (wp setObject_ksMachine) apply (clarsimp simp: updateObject_cte split: Structures_H.kernel_object.splits) - apply (safe, (wp hoare_unless_wp | simp)+) + apply (safe, (wp unless_wp | simp)+) done crunch ksMachine[wp]: cteInsert "\s. P (ksMachineState s)" @@ -3289,6 +3246,13 @@ lemma cteInsert_untyped_ranges_zero[wp]: apply blast done +crunches cteInsert + for tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: crunch_wps rule: valid_bitmaps_lift) + lemma cteInsert_invs: "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and (\s. src \ dest) and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) @@ -3297,9 +3261,9 @@ lemma cteInsert_invs: cteInsert cap src dest \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) - apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift CSpace_R.valid_queues_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift cteInsert_norq - simp: st_tcb_at'_def) + apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift + valid_irq_node_lift irqs_masked_lift cteInsert_norq + sym_heap_sched_pointers_lift) apply (auto simp: invs'_def valid_state'_def valid_pspace'_def elim: valid_capAligned) done @@ -3603,10 +3567,13 @@ lemma corres_caps_decomposition: "\P. \\s. P (new_ups' s)\ g \\rv s. P (gsUserPages s)\" "\P. \\s. P (new_cns s)\ f \\rv s. P (cns_of_heap (kheap s))\" "\P. \\s. P (new_cns' s)\ g \\rv s. P (gsCNodes s)\" - "\P. \\s. P (new_queues s)\ f \\rv s. P (ready_queues s)\" + "\P. \\s. P (new_ready_queues s)\ f \\rv s. P (ready_queues s)\" "\P. \\s. P (new_action s)\ f \\rv s. P (scheduler_action s)\" "\P. \\s. P (new_sa' s)\ g \\rv s. P (ksSchedulerAction s)\" - "\P. \\s. P (new_rqs' s)\ g \\rv s. P (ksReadyQueues s)\" + "\P. \\s. P (new_ksReadyQueues s) (new_tcbSchedNexts_of s) (new_tcbSchedPrevs_of s) + (\d p. new_inQs d p s)\ + g \\rv s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< (tcbs_of' s))\" "\P. \\s. P (new_di s)\ f \\rv s. P (domain_index s)\" "\P. \\s. P (new_dl s)\ f \\rv s. P (domain_list s)\" "\P. \\s. P (new_cd s)\ f \\rv s. P (cur_domain s)\" @@ -3622,7 +3589,9 @@ lemma corres_caps_decomposition: "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ sched_act_relation (new_action s) (new_sa' s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ - \ ready_queues_relation (new_queues s) (new_rqs' s')" + \ ready_queues_relation_2 (new_ready_queues s) (new_ksReadyQueues s') + (new_tcbSchedNexts_of s') (new_tcbSchedPrevs_of s') + (\d p. new_inQs d p s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ revokable_relation (new_rvk s) (null_filter (new_caps s)) (new_ctes s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ @@ -3690,8 +3659,9 @@ proof - apply (rule corres_underlying_decomposition [OF x]) apply (simp add: ghost_relation_of_heap) apply (wp hoare_vcg_conj_lift mdb_wp rvk_wp list_wp u abs_irq_together)+ - apply (intro z[simplified o_def] conjI | simp add: state_relation_def pspace_relations_def swp_cte_at - | (clarsimp, drule (1) z(6), simp add: state_relation_def pspace_relations_def swp_cte_at))+ + apply (intro z[simplified o_def] conjI + | simp add: state_relation_def pspace_relations_def swp_cte_at + | (clarsimp, drule (1) z(6), simp add: state_relation_def))+ done qed @@ -3796,7 +3766,7 @@ lemma create_reply_master_corres: apply clarsimp apply (rule corres_caps_decomposition) defer - apply (wp|simp)+ + apply (wp|simp add: o_def split del: if_splits)+ apply (clarsimp simp: o_def cdt_relation_def cte_wp_at_ctes_of split del: if_split cong: if_cong simp del: id_apply) apply (case_tac cte, clarsimp) @@ -3969,8 +3939,9 @@ lemma setupReplyMaster_corres: cte_wp_at' ((=) rv) (cte_map (t, tcb_cnode_index 2))" in hoare_strengthen_post) apply (wp hoare_drop_imps getCTE_wp') + apply (rename_tac rv s) apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) - apply (case_tac r, fastforce elim: valid_nullcapsE) + apply (case_tac rv, fastforce elim: valid_nullcapsE) apply (fastforce elim: tcb_at_cte_at) apply (clarsimp simp: cte_at'_obj_at' tcb_cte_cases_def cte_map_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -4171,6 +4142,9 @@ crunches setupReplyMaster and ready_queuesL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) lemma setupReplyMaster_vms'[wp]: @@ -4199,7 +4173,8 @@ lemma setupReplyMaster_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp setupReplyMaster_valid_pspace' sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift - valid_queues_lift cur_tcb_lift valid_queues_lift' hoare_vcg_disj_lift + valid_queues_lift cur_tcb_lift hoare_vcg_disj_lift sym_heap_sched_pointers_lift + valid_bitmaps_lift valid_irq_node_lift | simp)+ apply (clarsimp simp: ex_nonz_tcb_cte_caps' valid_pspace'_def objBits_simps' tcbReplySlot_def @@ -4463,8 +4438,8 @@ lemma arch_update_setCTE_invs: apply (wp arch_update_setCTE_mdb valid_queues_lift sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift arch_update_setCTE_iflive arch_update_setCTE_ifunsafe valid_irq_node_lift setCTE_typ_at' setCTE_irq_handlers' - valid_queues_lift' setCTE_pred_tcb_at' irqs_masked_lift - setCTE_norq hoare_vcg_disj_lift untyped_ranges_zero_lift + setCTE_pred_tcb_at' irqs_masked_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift valid_bitmaps_lift | simp add: pred_tcb_at'_def)+ apply (clarsimp simp: valid_global_refs'_def is_arch_update'_def fun_upd_def[symmetric] cte_wp_at_ctes_of isCap_simps untyped_ranges_zero_fun_upd) @@ -5839,7 +5814,7 @@ lemma cteInsert_simple_invs: apply (rule hoare_pre) apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (wp cur_tcb_lift sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift + valid_irq_node_lift irqs_masked_lift sym_heap_sched_pointers_lift cteInsert_simple_mdb' cteInsert_valid_globals_simple cteInsert_norq | simp add: pred_tcb_at'_def)+ apply (auto simp: invs'_def valid_state'_def valid_pspace'_def @@ -5974,6 +5949,21 @@ lemma arch_update_updateCap_invs: apply clarsimp done +lemma setCTE_set_cap_ready_queues_relation_valid_corres: + assumes pre: "ready_queues_relation s s'" + assumes step_abs: "(x, t) \ fst (set_cap cap slot s)" + assumes step_conc: "(y, t') \ fst (setCTE slot' cap' s')" + shows "ready_queues_relation t t'" + apply (clarsimp simp: ready_queues_relation_def) + apply (insert pre) + apply (rule use_valid[OF step_abs set_cap_exst]) + apply (rule use_valid[OF step_conc setCTE_ksReadyQueues]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedNexts_of]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedPrevs_of]) + apply (clarsimp simp: ready_queues_relation_def Let_def) + using use_valid[OF step_conc setCTE_inQ_opt_pred] + by fast + lemma updateCap_same_master: "\ cap_relation cap cap' \ \ corres dc (valid_objs and pspace_aligned and pspace_distinct and @@ -6005,6 +5995,8 @@ lemma updateCap_same_master: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ready_queues_relation a b" for a b \ -\) + subgoal by (erule setCTE_set_cap_ready_queues_relation_valid_corres; assumption) apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def @@ -6235,8 +6227,9 @@ lemma updateFreeIndex_forward_invs': apply (simp add:updateCap_def) apply (wp setCTE_irq_handlers' getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + sym_heap_sched_pointers_lift valid_bitmaps_lift | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def)+ apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) diff --git a/proof/refine/RISCV64/Detype_R.thy b/proof/refine/RISCV64/Detype_R.thy index fbe6f5623c..42d0703b7d 100644 --- a/proof/refine/RISCV64/Detype_R.thy +++ b/proof/refine/RISCV64/Detype_R.thy @@ -99,6 +99,9 @@ defs deletionIsSafe_def: t \ mask_range ptr bits) \ (\ko. ksPSpace s p = Some (KOArch ko) \ p \ mask_range ptr bits \ 6 \ bits)" +defs deletionIsSafe_delete_locale_def: + "deletionIsSafe_delete_locale \ \ptr bits s. \p. ko_wp_at' live' p s \ p \ mask_range ptr bits" + defs ksASIDMapSafe_def: "ksASIDMapSafe \ \s. True" @@ -115,6 +118,7 @@ lemma deleteObjects_def2: "is_aligned ptr bits \ deleteObjects ptr bits = do stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ mask_range ptr bits)) []; modify (\s. s \ ksPSpace := \x. if x \ mask_range ptr bits @@ -125,7 +129,8 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) + apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (simp add: bind_assoc[symmetric]) @@ -148,6 +153,7 @@ lemma deleteObjects_def3: do assert (is_aligned ptr bits); stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ mask_range ptr bits)) []; modify (\s. s \ ksPSpace := \x. if x \ mask_range ptr bits @@ -418,6 +424,7 @@ next qed end + locale detype_locale' = detype_locale + constrains s::"det_state" lemma (in detype_locale') deletionIsSafe: @@ -514,150 +521,6 @@ qed context begin interpretation Arch . (*FIXME: arch_split*) -(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) -(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) -(* FIXME: move *) -lemma corres_machine_op: - assumes P: "corres_underlying Id False True r P Q x x'" - shows "corres r (P \ machine_state) (Q \ ksMachineState) - (do_machine_op x) (doMachineOp x')" - apply (rule corres_submonad3 - [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) - apply (simp_all add: state_relation_def swp_def) - done - -lemma ekheap_relation_detype: - "ekheap_relation ekh kh \ - ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" - by (fastforce simp add: ekheap_relation_def split: if_split_asm) - -lemma cap_table_at_gsCNodes_eq: - "(s, s') \ state_relation - \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" - apply (clarsimp simp: state_relation_def ghost_relation_def - obj_at_def is_cap_table) - apply (drule_tac x = ptr in spec)+ - apply (drule_tac x = bits in spec)+ - apply fastforce - done - -lemma cNodeNoPartialOverlap: - "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ valid_objs s \ pspace_aligned s) - \ - (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) - (\x. base \ x \ x \ base + mask magnitude)) [])" - apply (simp add: stateAssert_def assert_def) - apply (rule corres_symb_exec_r[OF _ get_sp]) - apply (rule corres_req[rotated], subst if_P, assumption) - apply simp - apply (clarsimp simp: cNodePartialOverlap_def) - apply (drule(1) cte_wp_valid_cap) - apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq - obj_at_def is_cap_table) - apply (frule(1) pspace_alignedD) - apply (simp add: add_mask_fold) - apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) - apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) - apply (clarsimp simp: is_aligned_no_overflow_mask add_mask_fold) - apply (blast intro: order_trans) - apply (simp add: is_aligned_no_overflow_mask power_overflow word_bits_def) - apply wp+ - done - -declare wrap_ext_det_ext_ext_def[simp] - - -lemma sym_refs_hyp_refs_triv[simp]: "sym_refs (state_hyp_refs_of s)" - apply (clarsimp simp: state_hyp_refs_of_def sym_refs_def) - by (case_tac "kheap s x"; simp) - -lemma deleteObjects_corres: - "is_aligned base magnitude \ magnitude \ 3 \ - corres dc - (\s. einvs s - \ s \ (cap.UntypedCap d base magnitude idx) - \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) - \ untyped_children_in_mdb s \ if_unsafe_then_cap s - \ valid_mdb s \ valid_global_refs s \ ct_active s) - (\s. s \' (UntypedCap d base magnitude idx) - \ valid_pspace' s) - (delete_objects base magnitude) (deleteObjects base magnitude)" - apply (simp add: deleteObjects_def2) - apply (rule corres_stateAssert_implied[where P'=\, simplified]) - prefer 2 - apply clarsimp - apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and - s=s in detype_locale'.deletionIsSafe, - simp_all add: detype_locale'_def - detype_locale_def p_assoc_help invs_valid_pspace)[1] - apply (simp add:valid_cap_simps) - apply (simp add: bind_assoc[symmetric] ksASIDMapSafe_def) - apply (simp add: delete_objects_def) - apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ - (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ - descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ - s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ - valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ - zombies_final s \ sym_refs (state_refs_of s) \ - untyped_children_in_mdb s \ if_unsafe_then_cap s \ - valid_global_refs s" and - Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ - valid_pspace' s" in corres_underlying_split) - apply (rule corres_bind_return) - apply (rule corres_guard_imp[where r=dc]) - apply (rule corres_split[OF _ cNodeNoPartialOverlap]) - apply (rule corres_machine_op[OF corres_Id], simp+) - apply (rule no_fail_freeMemory, simp+) - apply (wp hoare_vcg_ex_lift)+ - apply auto[1] - apply (auto elim: is_aligned_weaken)[1] - apply (rule corres_modify) - apply (simp add: valid_pspace'_def) - apply (rule state_relation_null_filterE, assumption, - simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] - apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) - apply (intro exI, fastforce) - apply (rule ext, clarsimp simp add: null_filter_def) - apply (rule sym, rule ccontr, clarsimp) - apply (drule(4) cte_map_not_null_outside') - apply (fastforce simp add: cte_wp_at_caps_of_state) - apply simp - apply (rule ext, clarsimp simp add: null_filter'_def - map_to_ctes_delete[simplified field_simps]) - apply (rule sym, rule ccontr, clarsimp) - apply (frule(2) pspace_relation_cte_wp_atI - [OF state_relation_pspace_relation]) - apply (elim exE) - apply (frule(4) cte_map_not_null_outside') - apply (rule cte_wp_at_weakenE, erule conjunct1) - apply (case_tac y, clarsimp) - apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def - valid_nullcaps_def) - apply clarsimp - apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, - erule cte_wp_at_weakenE[OF _ TrueI], assumption+) - apply (simp add: add_mask_fold) - apply (simp add: add_mask_fold) - apply (rule detype_pspace_relation[simplified], - simp_all add: state_relation_pspace_relation valid_pspace_def)[1] - apply (simp add: valid_cap'_def capAligned_def) - apply (clarsimp simp: valid_cap_def, assumption) - apply (fastforce simp add: detype_def detype_ext_def add_mask_fold intro!: ekheap_relation_detype) - apply (clarsimp simp: state_relation_def ghost_relation_of_heap - detype_def) - apply (drule_tac t="gsUserPages s'" in sym) - apply (drule_tac t="gsCNodes s'" in sym) - apply (auto simp add: ups_of_heap_def cns_of_heap_def ext add_mask_fold - split: option.splits kernel_object.splits)[1] - apply (simp add: valid_mdb_def) - apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | - simp add: invs_def valid_state_def valid_pspace_def - descendants_range_def | wp (once) hoare_drop_imps)+ - done - - text \Invariant preservation across concrete deletion\ lemma caps_containedD': @@ -696,89 +559,92 @@ lemma zobj_refs_capRange: "capAligned c \ zobj_refs' c \ capRange c" by (cases c, simp_all add: capRange_def capAligned_def is_aligned_no_overflow) end + locale delete_locale = - fixes s and base and bits and ptr and idx and d - assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s" - and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s)" - and invs: "invs' s" - and ct_act: "ct_active' s" - and sa_simp: "sch_act_simple s" - and bwb: "bits < word_bits" + fixes s' and base and bits and ptr and idx and d + assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s'" + and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s')" + and invs: "invs' s'" + and ct_act: "ct_active' s'" + and sa_simp: "sch_act_simple s'" and al: "is_aligned base bits" - and safe: "deletionIsSafe base bits s" + and safe: "deletionIsSafe base bits s'" context delete_locale begin interpretation Arch . (*FIXME: arch_split*) -lemma valid_objs: "valid_objs' s" - and pa: "pspace_aligned' s" - and pc: "pspace_canonical' s" - and pkm: "pspace_in_kernel_mappings' s" - and pd: "pspace_distinct' s" - and vq: "valid_queues s" - and vq': "valid_queues' s" - and sym_refs: "sym_refs (state_refs_of' s)" - and iflive: "if_live_then_nonz_cap' s" - and ifunsafe: "if_unsafe_then_cap' s" - and dlist: "valid_dlist (ctes_of s)" - and no_0: "no_0 (ctes_of s)" - and chain_0: "mdb_chain_0 (ctes_of s)" - and badges: "valid_badges (ctes_of s)" - and contained: "caps_contained' (ctes_of s)" - and chunked: "mdb_chunked (ctes_of s)" - and umdb: "untyped_mdb' (ctes_of s)" - and uinc: "untyped_inc' (ctes_of s)" - and nullcaps: "valid_nullcaps (ctes_of s)" - and ut_rev: "ut_revocable' (ctes_of s)" - and dist_z: "distinct_zombies (ctes_of s)" - and irq_ctrl: "irq_control (ctes_of s)" - and clinks: "class_links (ctes_of s)" - and rep_r_fb: "reply_masters_rvk_fb (ctes_of s)" - and idle: "valid_idle' s" - and refs: "valid_global_refs' s" - and arch: "valid_arch_state' s" - and virq: "valid_irq_node' (irq_node' s) s" - and virqh: "valid_irq_handlers' s" - and virqs: "valid_irq_states' s" - and no_0_objs: "no_0_obj' s" - and ctnotinQ: "ct_not_inQ s" - and irqs_masked: "irqs_masked' s" - and ctcd: "ct_idle_or_in_cur_domain' s" - and cdm: "ksCurDomain s \ maxDomain" - and vds: "valid_dom_schedule' s" +lemma valid_objs: "valid_objs' s'" + and pa: "pspace_aligned' s'" + and pc: "pspace_canonical' s'" + and pkm: "pspace_in_kernel_mappings' s'" + and pd: "pspace_distinct' s'" + and vbm: "valid_bitmaps s'" + and sym_sched: "sym_heap_sched_pointers s'" + and vsp: "valid_sched_pointers s'" + and sym_refs: "sym_refs (state_refs_of' s')" + and iflive: "if_live_then_nonz_cap' s'" + and ifunsafe: "if_unsafe_then_cap' s'" + and dlist: "valid_dlist (ctes_of s')" + and no_0: "no_0 (ctes_of s')" + and chain_0: "mdb_chain_0 (ctes_of s')" + and badges: "valid_badges (ctes_of s')" + and contained: "caps_contained' (ctes_of s')" + and chunked: "mdb_chunked (ctes_of s')" + and umdb: "untyped_mdb' (ctes_of s')" + and uinc: "untyped_inc' (ctes_of s')" + and nullcaps: "valid_nullcaps (ctes_of s')" + and ut_rev: "ut_revocable' (ctes_of s')" + and dist_z: "distinct_zombies (ctes_of s')" + and irq_ctrl: "irq_control (ctes_of s')" + and clinks: "class_links (ctes_of s')" + and rep_r_fb: "reply_masters_rvk_fb (ctes_of s')" + and idle: "valid_idle' s'" + and refs: "valid_global_refs' s'" + and arch: "valid_arch_state' s'" + and virq: "valid_irq_node' (irq_node' s') s'" + and virqh: "valid_irq_handlers' s'" + and virqs: "valid_irq_states' s'" + and no_0_objs: "no_0_obj' s'" + and ctnotinQ: "ct_not_inQ s'" + and irqs_masked: "irqs_masked' s'" + and ctcd: "ct_idle_or_in_cur_domain' s'" + and cdm: "ksCurDomain s' \ maxDomain" + and vds: "valid_dom_schedule' s'" using invs - by (auto simp add: invs'_def valid_state'_def valid_pspace'_def - valid_mdb'_def valid_mdb_ctes_def) + by (auto simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) abbreviation "base_bits \ mask_range base bits" -abbreviation - "state' \ (s \ ksPSpace := \x. if base \ x \ x \ base + mask bits then None else ksPSpace s x \)" +abbreviation pspace' :: pspace where + "pspace' \ \x. if base \ x \ x \ base + mask bits then None else ksPSpace s' x" + +abbreviation state' :: kernel_state where + "state' \ (s' \ ksPSpace := pspace' \)" lemma ko_wp_at'[simp]: - "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s \ p \ base_bits)" + "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s' \ p \ base_bits)" by (fastforce simp add: ko_wp_at_delete'[OF pd]) lemma obj_at'[simp]: - "\P p. (obj_at' P p state') = (obj_at' P p s \ p \ base_bits)" + "\P p. (obj_at' P p state') = (obj_at' P p s' \ p \ base_bits)" by (fastforce simp add: obj_at'_real_def) lemma typ_at'[simp]: - "typ_at' P p state' = (typ_at' P p s \ p \ base_bits)" + "typ_at' P p state' = (typ_at' P p s' \ p \ base_bits)" by (simp add: typ_at'_def) lemma valid_untyped[simp]: - "s \' UntypedCap d base bits idx" + "s' \' UntypedCap d base bits idx" using cte_wp_at_valid_objs_valid_cap' [OF cap valid_objs] by clarsimp lemma cte_wp_at'[simp]: - "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s \ p \ base_bits)" + "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s' \ p \ base_bits)" by (fastforce simp:cte_wp_at_delete'[where idx = idx,OF valid_untyped pd ]) (* the bits of caps they need for validity argument are within their capRanges *) lemma valid_cap_ctes_pre: - "\c. s \' c \ case c of CNodeCap ref bits g gs \ + "\c. s' \' c \ case c of CNodeCap ref bits g gs \ \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c | Zombie ref (ZombieCNode bits) n \ \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c @@ -806,13 +672,13 @@ lemma valid_cap_ctes_pre: done lemma replycap_argument: - "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s + "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s' \ t \ mask_range base bits" using safe by (force simp: deletionIsSafe_def cte_wp_at_ctes_of) lemma valid_cap': - "\p c. \ s \' c; cte_wp_at' (\cte. cteCap cte = c) p s; + "\p c. \ s' \' c; cte_wp_at' (\cte. cteCap cte = c) p s'; capRange c \ mask_range base bits = {} \ \ state' \' c" apply (subgoal_tac "capClass c = PhysicalClass \ capUntypedPtr c \ capRange c") apply (subgoal_tac "capClass c = PhysicalClass \ @@ -836,11 +702,11 @@ lemma valid_cap': done lemma objRefs_notrange: - assumes asms: "ctes_of s p = Some c" "\ isUntypedCap (cteCap c)" + assumes asms: "ctes_of s' p = Some c" "\ isUntypedCap (cteCap c)" shows "capRange (cteCap c) \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -861,11 +727,11 @@ proof - qed lemma ctes_of_valid [elim!]: - "ctes_of s p = Some cte \ s \' cteCap cte" + "ctes_of s' p = Some cte \ s' \' cteCap cte" by (case_tac cte, simp add: ctes_of_valid_cap' [OF _ valid_objs]) lemma valid_cap2: - "\ cte_wp_at' (\cte. cteCap cte = c) p s \ \ state' \' c" + "\ cte_wp_at' (\cte. cteCap cte = c) p s' \ \ state' \' c" apply (case_tac "isUntypedCap c") apply (drule cte_wp_at_valid_objs_valid_cap' [OF _ valid_objs]) apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def) @@ -875,7 +741,7 @@ lemma valid_cap2: done lemma ex_nonz_cap_notRange: - "ex_nonz_cap_to' p s \ p \ base_bits" + "ex_nonz_cap_to' p s' \ p \ base_bits" apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) apply (case_tac "isUntypedCap (cteCap cte)") apply (clarsimp simp: isCap_simps) @@ -887,22 +753,276 @@ lemma ex_nonz_cap_notRange: done lemma live_notRange: - "\ ko_wp_at' P p s; \ko. P ko \ live' ko \ \ p \ base_bits" + "\ ko_wp_at' P p s'; \ko. P ko \ live' ko \ \ p \ base_bits" apply (drule if_live_then_nonz_capE' [OF iflive ko_wp_at'_weakenE]) apply simp apply (erule ex_nonz_cap_notRange) done +lemma deletionIsSafe_delete_locale_holds: + "deletionIsSafe_delete_locale base bits s'" + by (fastforce dest: live_notRange simp: deletionIsSafe_delete_locale_def) + lemma refs_notRange: - "(x, tp) \ state_refs_of' s y \ y \ base_bits" + "(x, tp) \ state_refs_of' s' y \ y \ base_bits" apply (drule state_refs_of'_elemD) apply (erule live_notRange) apply (rule refs_of_live') apply clarsimp done +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) +(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) +(* FIXME: move *) +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r P Q x x'" + shows "corres r (P \ machine_state) (Q \ ksMachineState) + (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad3 + [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma ekheap_relation_detype: + "ekheap_relation ekh kh \ + ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" + by (fastforce simp add: ekheap_relation_def split: if_split_asm) + +lemma cap_table_at_gsCNodes_eq: + "(s, s') \ state_relation + \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply (drule_tac x = ptr in spec)+ + apply (drule_tac x = bits in spec)+ + apply fastforce + done + +lemma cNodeNoPartialOverlap: + "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) + (\x. base \ x \ x \ base + mask magnitude)) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodePartialOverlap_def) + apply (drule(1) cte_wp_valid_cap) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table) + apply (frule(1) pspace_alignedD) + apply (simp add: add_mask_fold) + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow_mask add_mask_fold) + apply (blast intro: order_trans) + apply (simp add: is_aligned_no_overflow_mask power_overflow word_bits_def) + apply wp+ + done + +declare wrap_ext_det_ext_ext_def[simp] + +lemma sym_refs_hyp_refs_triv[simp]: "sym_refs (state_hyp_refs_of s')" + apply (clarsimp simp: state_hyp_refs_of_def sym_refs_def) + by (case_tac "kheap s' x"; simp) + +crunches doMachineOp + for deletionIsSafe_delete_locale[wp]: "deletionIsSafe_delete_locale base magnitude" + (simp: deletionIsSafe_delete_locale_def) + +lemma detype_tcbSchedNexts_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedNext) + = tcbSchedNexts_of s'" + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_tcbSchedPrevs_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedPrev) + = tcbSchedPrevs_of s'" + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_inQ: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ \d p. (inQ d p |< ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of')) + = (inQ d p |< tcbs_of' s')" + using pspace_alignedD' pspace_distinctD' + using pspace_alignedD' pspace_distinctD' + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: inQ_def opt_pred_def ko_wp_at'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_ready_queues_relation: + "\pspace_aligned' s'; pspace_distinct' s'; + \p. p \ {lower..upper} \ \ ko_wp_at' live' p s'; + ready_queues_relation s s'; upper = upper'\ + \ ready_queues_relation_2 + (ready_queues (detype {lower..upper'} s)) + (ksReadyQueues s') + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedNext) + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedPrev) + (\d p. inQ d p |< ((\x. if lower \ x \ x \ upper then None else ksPSpace s' x) |> tcb_of'))" + apply (clarsimp simp: detype_ext_def ready_queues_relation_def Let_def) + apply (frule (1) detype_tcbSchedNexts_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_tcbSchedPrevs_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_inQ[where S="{lower..upper}"]; simp) + apply (fastforce simp add: detype_def detype_ext_def) + done + +lemma deleteObjects_corres: + "is_aligned base magnitude \ magnitude \ 3 \ + corres dc + (\s. einvs s + \ s \ (cap.UntypedCap d base magnitude idx) + \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) + \ untyped_children_in_mdb s \ if_unsafe_then_cap s + \ valid_mdb s \ valid_global_refs s \ ct_active s + \ schact_is_rct s) + (\s'. invs' s' + \ cte_wp_at' (\cte. cteCap cte = UntypedCap d base magnitude idx) ptr s' + \ descendants_range' (UntypedCap d base magnitude idx) ptr (ctes_of s') + \ ct_active' s' + \ s' \' (UntypedCap d base magnitude idx)) + (delete_objects base magnitude) (deleteObjects base magnitude)" + apply (simp add: deleteObjects_def2) + apply (rule corres_stateAssert_implied[where P'=\, simplified]) + prefer 2 + apply clarsimp + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (rule_tac ptr=ptr and idx=idx and d=d in delete_locale.deletionIsSafe_delete_locale_holds) + apply (clarsimp simp: delete_locale_def) + apply (intro conjI) + apply (fastforce simp: sch_act_simple_def schact_is_rct_def state_relation_def) + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (simp add: bind_assoc[symmetric] ksASIDMapSafe_def) + apply (simp add: delete_objects_def) + apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ + (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ + descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ + s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ + valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ + zombies_final s \ sym_refs (state_refs_of s) \ + untyped_children_in_mdb s \ if_unsafe_then_cap s \ + valid_global_refs s" + and Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ + valid_pspace' s \ deletionIsSafe_delete_locale base magnitude s" + in corres_underlying_split) + apply (rule corres_bind_return) + apply (rule corres_guard_imp[where r=dc]) + apply (rule corres_split[OF _ cNodeNoPartialOverlap]) + apply (rule corres_machine_op[OF corres_Id], simp+) + apply (rule no_fail_freeMemory, simp+) + apply (wp hoare_vcg_ex_lift)+ + apply auto[1] + apply (auto elim: is_aligned_weaken)[1] + apply (rule corres_modify) + apply (simp add: valid_pspace'_def) + apply (rule state_relation_null_filterE, assumption, + simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] + apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) + apply (intro exI, fastforce) + apply (rule ext, clarsimp simp add: null_filter_def) + apply (rule sym, rule ccontr, clarsimp) + apply (drule(4) cte_map_not_null_outside') + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply simp + apply (rule ext, clarsimp simp: null_filter'_def map_to_ctes_delete) + apply (rule sym, rule ccontr, clarsimp) + apply (frule(2) pspace_relation_cte_wp_atI[OF state_relation_pspace_relation]) + apply (elim exE) + apply (frule (4) cte_map_not_null_outside') + apply (rule cte_wp_at_weakenE, erule conjunct1) + apply (case_tac y, clarsimp) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply clarsimp + apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, + erule cte_wp_at_weakenE[OF _ TrueI], assumption+) + apply (simp add: add_mask_fold) + apply (simp add: add_mask_fold) + apply (rule detype_pspace_relation[simplified], + simp_all add: state_relation_pspace_relation valid_pspace_def)[1] + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: valid_cap_def, assumption) + apply (fastforce simp: detype_def detype_ext_def add_mask_fold intro!: ekheap_relation_detype) + apply (simp add: add_mask_fold) + apply (rule detype_ready_queues_relation; blast?) + apply (clarsimp simp: deletionIsSafe_delete_locale_def) + apply (frule state_relation_ready_queues_relation) + apply (simp add: ready_queues_relation_def Let_def) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap detype_def) + apply (drule_tac t="gsUserPages s'" in sym) + apply (drule_tac t="gsCNodes s'" in sym) + apply (auto simp: ups_of_heap_def cns_of_heap_def ext add_mask_fold + split: option.splits kernel_object.splits)[1] + apply (simp add: valid_mdb_def) + apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | + simp add: invs_def valid_state_def valid_pspace_def + descendants_range_def | wp (once) hoare_drop_imps)+ + apply fastforce + done +end + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma live_idle_untyped_range': + "ko_wp_at' live' p s' \ p = idle_thread_ptr \ p \ base_bits" + apply (case_tac "ko_wp_at' live' p s'") + apply (drule if_live_then_nonz_capE'[OF iflive ko_wp_at'_weakenE]) + apply simp + apply (erule ex_nonz_cap_notRange) + apply clarsimp + apply (insert invs_valid_global'[OF invs] cap invs_valid_idle'[OF invs]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_global_refsD') + apply (clarsimp simp: valid_idle'_def) + using atLeastAtMost_iff apply (simp add: p_assoc_help mask_eq_exp_minus_1) + by fastforce + +lemma untyped_range_live_idle': + "p \ base_bits \ \ (ko_wp_at' live' p s' \ p = idle_thread_ptr)" + using live_idle_untyped_range' by blast lemma valid_obj': - "\ valid_obj' obj s; ko_wp_at' ((=) obj) p s \ \ valid_obj' obj state'" + "\ valid_obj' obj s'; ko_wp_at' ((=) obj) p s'; sym_heap_sched_pointers s' \ + \ valid_obj' obj state'" apply (case_tac obj, simp_all add: valid_obj'_def) apply (rename_tac endpoint) apply (case_tac endpoint, simp_all add: valid_ep'_def)[1] @@ -929,10 +1049,21 @@ lemma valid_obj': apply (erule(2) cte_wp_at_tcbI') apply fastforce apply simp - apply (rename_tac tcb) - apply (case_tac "tcbState tcb"; - clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def - dest!: refs_notRange split: option.splits) + apply (intro conjI) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; clarsimp simp: valid_tcb_state'_def dest!: refs_notRange) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; + clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def + dest!: refs_notRange split: option.splits) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac prev) + apply (cut_tac P=live' and p=prev in live_notRange; fastforce?) + apply (fastforce dest: sym_heapD2[where p'=p] simp: opt_map_def ko_wp_at'_def obj_at'_def) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac "next") + apply (cut_tac P=live' and p="next" in live_notRange; fastforce?) + apply (fastforce dest!: sym_heapD1[where p=p] simp: opt_map_def ko_wp_at'_def obj_at'_def) apply (clarsimp simp: valid_cte'_def) apply (rule_tac p=p in valid_cap2) apply (clarsimp simp: ko_wp_at'_def objBits_simps' cte_level_bits_def[symmetric]) @@ -940,17 +1071,49 @@ lemma valid_obj': apply simp done +lemma tcbSchedNexts_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedNext) = tcbSchedNexts_of s'" + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma tcbSchedPrevs_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedPrev) = tcbSchedPrevs_of s'" + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + lemma st_tcb: - "\P p. \ st_tcb_at' P p s; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" - by (fastforce simp: pred_tcb_at'_def obj_at'_real_def - dest: live_notRange) + "\P p. \ st_tcb_at' P p s'; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" + by (fastforce simp: pred_tcb_at'_def obj_at'_real_def + dest: live_notRange) lemma irq_nodes_global: - "\irq :: irq. irq_node' s + (ucast irq << cteSizeBits) \ global_refs' s" + "\irq :: irq. irq_node' s' + (ucast irq << cteSizeBits) \ global_refs' s'" by (simp add: global_refs'_def) lemma global_refs: - "global_refs' s \ base_bits = {}" + "global_refs' s' \ base_bits = {}" using cap apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule valid_global_refsD' [OF _ refs]) @@ -958,20 +1121,20 @@ lemma global_refs: done lemma global_refs2: - "global_refs' s \ (- base_bits)" + "global_refs' s' \ (- base_bits)" using global_refs by blast lemma irq_nodes_range: - "\irq :: irq. irq_node' s + (ucast irq << cteSizeBits) \ base_bits" + "\irq :: irq. irq_node' s' + (ucast irq << cteSizeBits) \ base_bits" using irq_nodes_global global_refs by blast lemma cte_refs_notRange: - assumes asms: "ctes_of s p = Some c" - shows "cte_refs' (cteCap c) (irq_node' s) \ base_bits = {}" + assumes asms: "ctes_of s' p = Some c" + shows "cte_refs' (cteCap c) (irq_node' s') \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -1000,7 +1163,7 @@ proof - qed lemma non_null_present: - "cte_wp_at' (\c. cteCap c \ NullCap) p s \ p \ base_bits" + "cte_wp_at' (\c. cteCap c \ NullCap) p s' \ p \ base_bits" apply (drule (1) if_unsafe_then_capD' [OF _ ifunsafe]) apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of dest!: cte_refs_notRange simp del: atLeastAtMost_iff) @@ -1008,7 +1171,7 @@ lemma non_null_present: done lemma cte_cap: - "ex_cte_cap_to' p s \ ex_cte_cap_to' p state'" + "ex_cte_cap_to' p s' \ ex_cte_cap_to' p state'" apply (clarsimp simp: ex_cte_cap_to'_def) apply (frule non_null_present [OF cte_wp_at_weakenE']) apply clarsimp @@ -1016,37 +1179,37 @@ lemma cte_cap: done lemma idle_notRange: - "\cref. \ cte_wp_at' (\c. ksIdleThread s \ capRange (cteCap c)) cref s - \ ksIdleThread s \ base_bits" + "\cref. \ cte_wp_at' (\c. ksIdleThread s' \ capRange (cteCap c)) cref s' + \ ksIdleThread s' \ base_bits" apply (insert cap) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule_tac x=ptr in allE, clarsimp simp: field_simps mask_def) done abbreviation - "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + mask bits then None else ksPSpace s x)" + "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + mask bits then None else ksPSpace s' x)" lemmas tree_to_ctes = map_to_ctes_delete [OF valid_untyped pd] lemma map_to_ctesE[elim!]: - "\ ctes' x = Some cte; \ ctes_of s x = Some cte; x \ base_bits \ \ P \ \ P" + "\ ctes' x = Some cte; \ ctes_of s' x = Some cte; x \ base_bits \ \ P \ \ P" by (clarsimp simp: tree_to_ctes split: if_split_asm) lemma not_nullMDBNode: - "\ ctes_of s x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" + "\ ctes_of s' x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" using nullcaps apply (cases cte) apply (simp add: valid_nullcaps_def) done -lemma mdb_src: "\ ctes_of s \ x \ y; y \ 0 \ \ x \ base_bits" +lemma mdb_src: "\ ctes_of s' \ x \ y; y \ 0 \ \ x \ base_bits" apply (rule non_null_present) apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of) apply (erule(1) not_nullMDBNode) apply (simp add: nullMDBNode_def nullPointer_def) done -lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ \ y \ base_bits" +lemma mdb_dest: "\ ctes_of s' \ x \ y; y \ 0 \ \ y \ base_bits" apply (case_tac "x = 0") apply (insert no_0, simp add: next_unfold')[1] apply (drule(1) vdlist_nextD0 [OF _ _ dlist]) @@ -1057,7 +1220,7 @@ lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ done lemma trancl_next[elim]: - "\ ctes_of s \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" + "\ ctes_of s' \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" apply (erule rev_mp, erule converse_trancl_induct) apply clarsimp apply (rule r_into_trancl) @@ -1075,14 +1238,14 @@ lemma trancl_next[elim]: done lemma mdb_parent_notrange: - "ctes_of s \ x \ y \ x \ base_bits \ y \ base_bits" + "ctes_of s' \ x \ y \ x \ base_bits \ y \ base_bits" apply (erule subtree.induct) apply (frule(1) mdb_src, drule(1) mdb_dest, simp) apply (drule(1) mdb_dest, simp) done lemma mdb_parent: - "ctes_of s \ x \ y \ ctes' \ x \ y" + "ctes_of s' \ x \ y \ ctes' \ x \ y" apply (erule subtree.induct) apply (frule(1) mdb_src, frule(1) mdb_dest) apply (rule subtree.direct_parent) @@ -1098,7 +1261,7 @@ lemma mdb_parent: done lemma trancl_next_rev: - "ctes' \ x \\<^sup>+ y \ ctes_of s \ x \\<^sup>+ y" + "ctes' \ x \\<^sup>+ y \ ctes_of s' \ x \\<^sup>+ y" apply (erule converse_trancl_induct) apply (rule r_into_trancl) apply (clarsimp simp: next_unfold') @@ -1108,7 +1271,7 @@ lemma trancl_next_rev: done lemma is_chunk[elim!]: - "is_chunk (ctes_of s) cap x y \ is_chunk ctes' cap x y" + "is_chunk (ctes_of s') cap x y \ is_chunk ctes' cap x y" apply (simp add: is_chunk_def) apply (erule allEI) apply (clarsimp dest!: trancl_next_rev) @@ -1153,17 +1316,18 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def show "pspace_in_kernel_mappings' ?s" using pkm by (simp add: pspace_in_kernel_mappings'_def dom_def) - show "pspace_distinct' ?s" using pd + show pspace_distinct'_state': "pspace_distinct' ?s" using pd by (clarsimp simp add: pspace_distinct'_def ps_clear_def dom_if_None Diff_Int_distrib) - show "valid_objs' ?s" using valid_objs + show "valid_objs' ?s" using valid_objs sym_sched apply (clarsimp simp: valid_objs'_def ran_def) apply (rule_tac p=a in valid_obj') - apply fastforce - apply (frule pspace_alignedD'[OF _ pa]) - apply (frule pspace_distinctD'[OF _ pd]) - apply (clarsimp simp: ko_wp_at'_def) + apply fastforce + apply (frule pspace_alignedD'[OF _ pa]) + apply (frule pspace_distinctD'[OF _ pd]) + apply (clarsimp simp: ko_wp_at'_def) + apply fastforce done from sym_refs show "sym_refs (state_refs_of' ?s)" @@ -1175,19 +1339,6 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (simp add: refs_notRange[simplified] state_refs_ko_wp_at_eq) done - from vq show "valid_queues ?s" - apply (clarsimp simp: valid_queues_def bitmapQ_defs) - apply (clarsimp simp: valid_queues_no_bitmap_def) - apply (drule spec, drule spec, drule conjunct1, drule(1) bspec) - apply (clarsimp simp: obj_at'_real_def) - apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) - apply (clarsimp simp: inQ_def) - apply (clarsimp dest!: ex_nonz_cap_notRange) - done - - from vq' show "valid_queues' ?s" - by (simp add: valid_queues'_def) - show "if_live_then_nonz_cap' ?s" using iflive apply (clarsimp simp: if_live_then_nonz_cap'_def) apply (drule spec, drule(1) mp) @@ -1203,7 +1354,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def intro!: cte_cap) from idle_notRange refs - have "ksIdleThread s \ ?ran" + have "ksIdleThread s' \ ?ran" apply (simp add: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def) apply blast done @@ -1315,7 +1466,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply simp done - show "valid_irq_node' (irq_node' s) ?s" + show "valid_irq_node' (irq_node' s') ?s" using virq irq_nodes_range by (simp add: valid_irq_node'_def mult.commute mult.left_commute ucast_ucast_mask_8) @@ -1345,7 +1496,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def ball_ran_eq) from virqs - show "valid_irq_states' s" . + show "valid_irq_states' s'" . from no_0_objs show "no_0_obj' state'" @@ -1356,19 +1507,19 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def by (simp add: irqs_masked'_def) from sa_simp ct_act - show "sch_act_wf (ksSchedulerAction s) state'" + show "sch_act_wf (ksSchedulerAction s') state'" apply (simp add: sch_act_simple_def) - apply (case_tac "ksSchedulerAction s", simp_all add: ct_in_state'_def) + apply (case_tac "ksSchedulerAction s'", simp_all add: ct_in_state'_def) apply (fastforce dest!: st_tcb elim!: pred_tcb'_weakenE) done from invs - have "pspace_domain_valid s" by (simp add: invs'_def valid_state'_def) + have "pspace_domain_valid s'" by (simp add: invs'_def valid_state'_def) thus "pspace_domain_valid state'" by (simp add: pspace_domain_valid_def) from invs - have "valid_machine_state' s" by (simp add: invs'_def valid_state'_def) + have "valid_machine_state' s'" by (simp add: invs'_def valid_state'_def) thus "valid_machine_state' ?state''" apply (clarsimp simp: valid_machine_state'_def) apply (drule_tac x=p in spec) @@ -1421,11 +1572,11 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (clarsimp dest!: ex_nonz_cap_notRange elim!: ko_wp_at'_weakenE) done - from cdm show "ksCurDomain s \ maxDomain" . + from cdm show "ksCurDomain s' \ maxDomain" . from invs - have urz: "untyped_ranges_zero' s" by (simp add: invs'_def valid_state'_def) - show "untyped_ranges_zero_inv (cteCaps_of state') (gsUntypedZeroRanges s)" + have urz: "untyped_ranges_zero' s'" by (simp add: invs'_def valid_state'_def) + show "untyped_ranges_zero_inv (cteCaps_of state') (gsUntypedZeroRanges s')" apply (simp add: untyped_zero_ranges_cte_def urz[unfolded untyped_zero_ranges_cte_def, rule_format, symmetric]) apply (clarsimp simp: fun_eq_iff intro!: arg_cong[where f=Ex]) @@ -1435,17 +1586,31 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply simp done + from vbm + show "valid_bitmaps state'" + by (simp add: valid_bitmaps_def bitmapQ_defs) + + from sym_sched + show "sym_heap (pspace' |> tcb_of' |> tcbSchedNext) (pspace' |> tcb_of' |> tcbSchedPrev)" + using pa pd pspace_distinct'_state' + by (fastforce simp: tcbSchedNexts_of_pspace' tcbSchedPrevs_of_pspace') + + from vsp show "valid_sched_pointers_2 (pspace' |> tcb_of' |> tcbSchedPrev) + (pspace' |> tcb_of' |> tcbSchedNext) + (tcbQueued |< (pspace' |> tcb_of'))" + by (clarsimp simp: valid_sched_pointers_def opt_pred_def opt_map_def) + qed (clarsimp) lemma (in delete_locale) delete_ko_wp_at': - assumes objs: "ko_wp_at' P p s \ ex_nonz_cap_to' p s" + assumes objs: "ko_wp_at' P p s' \ ex_nonz_cap_to' p s'" shows "ko_wp_at' P p state'" using objs by (clarsimp simp: ko_wp_at'_def ps_clear_def dom_if_None Diff_Int_distrib dest!: ex_nonz_cap_notRange) lemma (in delete_locale) null_filter': - assumes descs: "Q (null_filter' (ctes_of s))" + assumes descs: "Q (null_filter' (ctes_of s'))" shows "Q (null_filter' (ctes_of state'))" using descs ifunsafe apply (clarsimp elim!: rsubst[where P=Q]) @@ -1463,7 +1628,7 @@ lemma (in delete_locale) null_filter': done lemma (in delete_locale) delete_ex_cte_cap_to': - assumes exc: "ex_cte_cap_to' p s" + assumes exc: "ex_cte_cap_to' p s'" shows "ex_cte_cap_to' p state'" using exc by (clarsimp elim!: cte_cap) @@ -1891,34 +2056,17 @@ lemma cte_wp_at_top: tcbReplySlot_def tcbCTableSlot_def tcbVTableSlot_def objBits_simps cteSizeBits_def) apply (simp add: alignCheck_def bind_def alignError_def fail_def return_def objBits_simps - magnitudeCheck_def in_monad is_aligned_mask when_def + magnitudeCheck_def in_monad is_aligned_mask when_def unless_def split: option.splits) apply (intro conjI impI allI; simp add: not_le) apply (clarsimp simp:cte_check_def) apply (simp add: alignCheck_def bind_def alignError_def fail_def return_def objBits_simps - magnitudeCheck_def in_monad is_aligned_mask when_def + magnitudeCheck_def in_monad is_aligned_mask when_def unless_def split: option.splits) apply (intro conjI impI allI; simp add:not_le) apply (simp add: typeError_def fail_def cte_check_def split: Structures_H.kernel_object.splits) done - -lemma neq_out_intv: - "\a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" - by simp - -lemma rule_out_intv: - "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a\b \ - \ b \ mask_range a (objBitsKO obj)" - apply (drule(1) pspace_distinctD') - apply (subst (asm) ps_clear_def) - apply (drule_tac x = b in orthD2) - apply fastforce - apply (drule neq_out_intv) - apply (simp add: mask_def add_diff_eq) - apply (simp add: mask_def add_diff_eq) - done - lemma locateCTE_monad: assumes ko_wp_at: "\Q dest. \\s. P1 s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f @@ -2010,7 +2158,7 @@ qed lemma empty_fail_locateCTE: "empty_fail (locateCTE src)" - by (simp add:locateCTE_def bind_assoc split_def) + by (fastforce simp: locateCTE_def bind_assoc split_def) lemma fail_empty_locateCTE: "snd (locateCTE src s) \ fst (locateCTE src s) = {}" @@ -2531,7 +2679,7 @@ lemma setCTE_pte_at': lemma storePTE_det: "ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr s \ storePTE ptr (new_pte::pte) s = - modify (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPTE new_pte)))) s" + modify (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPTE new_pte)))) s" apply (clarsimp simp:ko_wp_at'_def storePTE_def split_def bind_def gets_def return_def get_def setObject_def @@ -2866,25 +3014,14 @@ lemma curDomain_commute: crunch inv[wp]: curDomain P lemma placeNewObject_tcb_at': - notes blah[simp del] = atLeastatMost_subset_iff atLeastLessThan_iff - Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex - atLeastAtMost_iff - shows - "\pspace_aligned' and pspace_distinct' - and pspace_no_overlap' ptr (objBits (makeObject::tcb)) - and K(is_aligned ptr (objBits (makeObject::tcb))) - \ placeNewObject ptr (makeObject::tcb) 0 - \\rv s. tcb_at' ptr s \" - apply (simp add:placeNewObject_def placeNewObject'_def split_def alignError_def) + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr (objBits (makeObject::tcb)) + and K (is_aligned ptr (objBits (makeObject::tcb)))\ + placeNewObject ptr (makeObject::tcb) 0 + \\_ s. tcb_at' ptr s \" + apply (simp add: placeNewObject_def placeNewObject'_def split_def alignError_def) apply wpsimp - apply (clarsimp simp: obj_at'_def lookupAround2_None1 objBits_simps - lookupAround2_char1 field_simps projectKO_opt_tcb return_def ps_clear_def - simp flip: is_aligned_mask) - apply (drule (1) pspace_no_overlap_disjoint') - apply (clarsimp intro!: set_eqI; - drule_tac m = "ksPSpace s" in domI, - erule in_emptyE, - fastforce elim!: in_emptyE simp:objBits_simps mask_def add_diff_eq) + apply (clarsimp simp: obj_at'_def objBits_simps ps_clear_def) + apply (fastforce intro!: set_eqI dest: pspace_no_overlap_disjoint' simp: add_mask_fold) done lemma monad_commute_if_weak_r: @@ -3299,7 +3436,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp | wpc + apply (wp haskell_assert_wp unless_wp | wpc | simp add:alignError_def if_apply_def2 del: fun_upd_apply hoare_fail_any)+ apply (rule impI) apply (subgoal_tac @@ -3814,7 +3951,7 @@ lemma createObjects_Cons: apply simp apply (wp haskell_assert_wp | wpc)+ apply simp - apply (wp hoare_unless_wp |clarsimp)+ + apply (wp unless_wp |clarsimp)+ apply (drule range_cover.aligned) apply (simp add:is_aligned_mask) done @@ -4066,7 +4203,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp |wpc + apply (wp haskell_assert_wp unless_wp |wpc |simp add:alignError_def del:fun_upd_apply)+ apply (rule conjI) apply (rule impI) @@ -4126,7 +4263,7 @@ lemma createTCBs_tcb_at': \\rv s. (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)\" apply (simp add:createObjects'_def split_def alignError_def) - apply (wp hoare_unless_wp |wpc)+ + apply (wp unless_wp |wpc)+ apply (subst data_map_insert_def[symmetric])+ apply clarsimp apply (subgoal_tac "(\x\of_nat n. @@ -4787,7 +4924,7 @@ lemma createObject_pspace_aligned_distinct': createObject ty ptr us d \\xa s. pspace_aligned' s \ pspace_distinct' s\" apply (rule hoare_pre) - apply (wp placeNewObject_pspace_aligned' hoare_unless_wp + apply (wp placeNewObject_pspace_aligned' unless_wp placeNewObject_pspace_distinct' | simp add: RISCV64_H.createObject_def Retype_H.createObject_def objBits_simps curDomain_def placeNewDataObject_def diff --git a/proof/refine/RISCV64/EmptyFail.thy b/proof/refine/RISCV64/EmptyFail.thy index 42bd0962e0..87316f621c 100644 --- a/proof/refine/RISCV64/EmptyFail.thy +++ b/proof/refine/RISCV64/EmptyFail.thy @@ -19,12 +19,12 @@ lemma empty_fail_projectKO [simp, intro!]: lemma empty_fail_alignCheck [intro!, wp, simp]: "empty_fail (alignCheck a b)" unfolding alignCheck_def - by (simp add: alignError_def) + by (fastforce simp: alignError_def) lemma empty_fail_magnitudeCheck [intro!, wp, simp]: "empty_fail (magnitudeCheck a b c)" unfolding magnitudeCheck_def - by (simp split: option.splits) + by (fastforce split: option.splits) lemma empty_fail_loadObject_default [intro!, wp, simp]: shows "empty_fail (loadObject_default x b c d)" @@ -33,7 +33,7 @@ lemma empty_fail_loadObject_default [intro!, wp, simp]: lemma empty_fail_threadGet [intro!, wp, simp]: "empty_fail (threadGet f p)" - by (simp add: threadGet_def getObject_def split_def) + by (fastforce simp: threadGet_def getObject_def split_def) lemma empty_fail_getCTE [intro!, wp, simp]: "empty_fail (getCTE slot)" @@ -47,12 +47,12 @@ lemma empty_fail_getCTE [intro!, wp, simp]: lemma empty_fail_updateObject_cte [intro!, wp, simp]: "empty_fail (updateObject (v :: cte) ko a b c)" - by (simp add: updateObject_cte typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_cte typeError_def unless_def split: kernel_object.splits ) lemma empty_fail_setCTE [intro!, wp, simp]: "empty_fail (setCTE p cte)" unfolding setCTE_def - by (simp add: setObject_def split_def) + by (fastforce simp: setObject_def split_def) lemma empty_fail_updateCap [intro!, wp, simp]: "empty_fail (updateCap p f)" @@ -64,36 +64,35 @@ lemma empty_fail_updateMDB [intro!, wp, simp]: lemma empty_fail_getSlotCap [intro!, wp, simp]: "empty_fail (getSlotCap a)" - unfolding getSlotCap_def by simp + unfolding getSlotCap_def by fastforce context begin interpretation Arch . (*FIXME: arch_split*) lemma empty_fail_getObject: - assumes x: "(\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel))" + assumes "\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel)" shows "empty_fail (getObject x :: 'a :: pspace_storable kernel)" apply (simp add: getObject_def split_def) - apply (safe intro!: empty_fail_bind empty_fail_gets empty_fail_assert_opt) - apply (rule x) + apply (safe intro!: assms) done lemma empty_fail_updateTrackedFreeIndex [intro!, wp, simp]: shows "empty_fail (updateTrackedFreeIndex p idx)" - by (simp add: updateTrackedFreeIndex_def) + by (fastforce simp add: updateTrackedFreeIndex_def) lemma empty_fail_updateNewFreeIndex [intro!, wp, simp]: shows "empty_fail (updateNewFreeIndex p)" apply (simp add: updateNewFreeIndex_def) - apply (safe intro!: empty_fail_bind) + apply safe apply (simp split: capability.split) done lemma empty_fail_insertNewCap [intro!, wp, simp]: "empty_fail (insertNewCap p p' cap)" - unfolding insertNewCap_def by simp + unfolding insertNewCap_def by fastforce lemma empty_fail_getIRQSlot [intro!, wp, simp]: "empty_fail (getIRQSlot irq)" - by (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + by (fastforce simp: getIRQSlot_def getInterruptState_def locateSlot_conv) lemma empty_fail_getObject_ntfn [intro!, wp, simp]: "empty_fail (getObject p :: Structures_H.notification kernel)" @@ -106,15 +105,15 @@ lemma empty_fail_getNotification [intro!, wp, simp]: lemma empty_fail_lookupIPCBuffer [intro!, wp, simp]: "empty_fail (lookupIPCBuffer a b)" by (clarsimp simp: lookupIPCBuffer_def Let_def getThreadBufferSlot_def locateSlot_conv - split: capability.splits arch_capability.splits | wp | wpc)+ + split: capability.splits arch_capability.splits | wp | wpc | safe)+ lemma empty_fail_updateObject_default [intro!, wp, simp]: "empty_fail (updateObject_default v ko a b c)" - by (simp add: updateObject_default_def typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_default_def typeError_def unless_def split: kernel_object.splits ) lemma empty_fail_threadSet [intro!, wp, simp]: "empty_fail (threadSet f p)" - by (simp add: threadSet_def getObject_def setObject_def split_def) + by (fastforce simp: threadSet_def getObject_def setObject_def split_def) lemma empty_fail_getThreadState[iff]: "empty_fail (getThreadState t)" diff --git a/proof/refine/RISCV64/EmptyFail_H.thy b/proof/refine/RISCV64/EmptyFail_H.thy index edd5622145..bde07a1ef8 100644 --- a/proof/refine/RISCV64/EmptyFail_H.thy +++ b/proof/refine/RISCV64/EmptyFail_H.thy @@ -17,7 +17,7 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemmas forM_empty_fail[intro!, wp, simp] = empty_fail_mapM[simplified forM_def[symmetric]] lemmas forM_x_empty_fail[intro!, wp, simp] = empty_fail_mapM_x[simplified forM_x_def[symmetric]] -lemmas forME_x_empty_fail[intro!, wp, simp] = mapME_x_empty_fail[simplified forME_x_def[symmetric]] +lemmas forME_x_empty_fail[intro!, wp, simp] = empty_fail_mapME_x[simplified forME_x_def[symmetric]] lemma withoutPreemption_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (withoutPreemption m)" @@ -80,9 +80,6 @@ proof (induct arbitrary: s rule: resolveAddressBits.induct) lemmas resolveAddressBits_empty_fail[intro!, wp, simp] = resolveAddressBits_spec_empty_fail[THEN use_spec_empty_fail] -crunch (empty_fail) empty_fail[intro!, wp, simp]: lookupIPCBuffer -(simp:Let_def) - declare ef_dmo'[intro!, wp, simp] lemma empty_fail_getObject_ep [intro!, wp, simp]: @@ -166,14 +163,14 @@ lemma ignoreFailure_empty_fail[intro!, wp, simp]: by (simp add: ignoreFailure_def empty_fail_catch) crunch (empty_fail) empty_fail[intro!, wp, simp]: cancelIPC, setThreadState, tcbSchedDequeue, setupReplyMaster, isStopped, possibleSwitchTo, tcbSchedAppend -(simp: Let_def setNotification_def setBoundNotification_def) +(simp: Let_def setNotification_def setBoundNotification_def wp: empty_fail_getObject) crunch (empty_fail) "_H_empty_fail"[intro!, wp, simp]: "ThreadDecls_H.suspend" (ignore_del: ThreadDecls_H.suspend) lemma ThreadDecls_H_restart_empty_fail[intro!, wp, simp]: "empty_fail (ThreadDecls_H.restart target)" - by (simp add:restart_def) + by (fastforce simp: restart_def) lemma empty_fail_lookupPTFromLevel[intro!, wp, simp]: "empty_fail (lookupPTFromLevel level ptPtr vPtr target)" @@ -217,18 +214,14 @@ lemmas finaliseSlot_empty_fail[intro!, wp, simp] = lemma checkCapAt_empty_fail[intro!, wp, simp]: "empty_fail action \ empty_fail (checkCapAt cap ptr action)" - by (simp add: checkCapAt_def) + by (fastforce simp: checkCapAt_def) lemma assertDerived_empty_fail[intro!, wp, simp]: "empty_fail f \ empty_fail (assertDerived src cap f)" - by (simp add: assertDerived_def) + by (fastforce simp: assertDerived_def) crunch (empty_fail) empty_fail[intro!, wp, simp]: cteDelete -lemma liftE_empty_fail[intro!, wp, simp]: - "empty_fail f \ empty_fail (liftE f)" - by simp - lemma spec_empty_fail_unlessE': "\ \ P \ spec_empty_fail f s \ \ spec_empty_fail (unlessE P f) s" by (simp add:unlessE_def spec_empty_returnOk) @@ -258,7 +251,7 @@ lemma Syscall_H_syscall_empty_fail[intro!, wp, simp]: lemma catchError_empty_fail[intro!, wp, simp]: "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchError f g)" - by (simp add: catchError_def handle_empty_fail) + by fastforce crunch (empty_fail) empty_fail[intro!, wp, simp]: chooseThread, getDomainTime, nextDomain, isHighestPrio @@ -278,7 +271,7 @@ crunch (empty_fail) empty_fail: callKernel theorem call_kernel_serial: "\ (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and - (\s. scheduler_action s = resume_cur_thread) and + schact_is_rct and (\s. 0 < domain_time s \ valid_domain_list s)) s; \s'. (s, s') \ state_relation \ (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and diff --git a/proof/refine/RISCV64/Finalise_R.thy b/proof/refine/RISCV64/Finalise_R.thy index 529f8fd3e7..d96b3f6e2b 100644 --- a/proof/refine/RISCV64/Finalise_R.thy +++ b/proof/refine/RISCV64/Finalise_R.thy @@ -16,9 +16,7 @@ context begin interpretation Arch . (*FIXME: arch_split*) declare doUnbindNotification_def[simp] crunches copyGlobalMappings - for queues[wp]: "Invariants_H.valid_queues" - and queues'[wp]: "Invariants_H.valid_queues'" - and ifunsafe'[wp]: "if_unsafe_then_cap'" + for ifunsafe'[wp]: "if_unsafe_then_cap'" and pred_tcb_at'[wp]: "pred_tcb_at' proj P t" and vms'[wp]: "valid_machine_state'" and ct_not_inQ[wp]: "ct_not_inQ" @@ -95,20 +93,10 @@ crunch ksRQL1[wp]: emptySlot "\s. P (ksReadyQueuesL1Bitmap s)" crunch ksRQL2[wp]: emptySlot "\s. P (ksReadyQueuesL2Bitmap s)" crunch obj_at'[wp]: postCapDeletion "obj_at' P p" -lemmas postCapDeletion_valid_queues[wp] = - valid_queues_lift [OF postCapDeletion_obj_at' - postCapDeletion_pred_tcb_at' - postCapDeletion_ksRQ] - crunch inQ[wp]: clearUntypedFreeIndex "\s. P (obj_at' (inQ d p) t s)" crunch tcbDomain[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbDomain tcb)) t" crunch tcbPriority[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbPriority tcb)) t" -lemma emptySlot_queues [wp]: - "\Invariants_H.valid_queues\ emptySlot sl opt \\rv. Invariants_H.valid_queues\" - unfolding emptySlot_def - by (wp | wpcw | wp valid_queues_lift | simp)+ - crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" crunch ksCurDomain[wp]: emptySlot "\s. P (ksCurDomain s)" @@ -1181,8 +1169,7 @@ definition "removeable' sl \ \s cap. (\p. p \ sl \ cte_wp_at' (\cte. capMasterCap (cteCap cte) = capMasterCap cap) p s) \ ((\p \ cte_refs' cap (irq_node' s). p \ sl \ cte_wp_at' (\cte. cteCap cte = NullCap) p s) - \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s) - \ (\t \ threadCapRefs cap. \p. t \ set (ksReadyQueues s p)))" + \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s))" lemma not_Final_removeable: "\ isFinal cap sl (cteCaps_of s) @@ -1291,7 +1278,7 @@ crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1400,11 +1387,6 @@ crunch irq_states' [wp]: emptySlot valid_irq_states' crunch no_0_obj' [wp]: emptySlot no_0_obj' (wp: crunch_wps) -crunch valid_queues'[wp]: setInterruptState "valid_queues'" - (simp: valid_queues'_def) - -crunch valid_queues'[wp]: emptySlot "valid_queues'" - end lemma deletedIRQHandler_irqs_masked'[wp]: @@ -1469,7 +1451,7 @@ lemma emptySlot_untyped_ranges[wp]: emptySlot sl opt \\rv. untyped_ranges_zero'\" apply (simp add: emptySlot_def case_Null_If) apply (rule hoare_pre) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (rule untyped_ranges_zero_lift) apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of | wpc | simp add: clearUntypedFreeIndex_def updateTrackedFreeIndex_def @@ -1498,6 +1480,13 @@ lemma emptySlot_valid_arch'[wp]: by (wpsimp simp: emptySlot_def cte_wp_at_ctes_of wp: getCTE_wp hoare_drop_imps hoare_vcg_ex_lift) +crunches emptySlot + for valid_bitmaps[wp]: valid_bitmaps + and tcbQueued_opt_pred[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and sched_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + (wp: valid_bitmaps_lift) + lemma emptySlot_invs'[wp]: "\\s. invs' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s \ (info \ NullCap \ post_cap_delete_pre' info sl (cteCaps_of s) )\ @@ -1527,8 +1516,8 @@ lemma arch_postCapDeletion_corres: lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1588,7 +1577,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2292,16 +2281,24 @@ lemma tcb_st_not_Bound: "(p, TCBBound) \ tcb_st_refs_of' ts" by (auto simp: tcb_st_refs_of'_def split: Structures_H.thread_state.split) +crunches setBoundNotification + for valid_bitmaps[wp]: valid_bitmaps + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: valid_bitmaps_lift) + lemma unbindNotification_invs[wp]: "\invs'\ unbindNotification tcb \\rv. invs'\" apply (simp add: unbindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (rule bind_wp[OF _ gbn_sp']) apply (case_tac ntfnPtr, clarsimp, wp, clarsimp) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift - irqs_masked_lift setBoundNotification_ct_not_inQ + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ sym_heap_sched_pointers_lift untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (rule conjI) apply (clarsimp elim!: obj_atE' @@ -2339,9 +2336,9 @@ lemma ntfn_bound_tcb_at': lemma unbindMaybeNotification_invs[wp]: "\invs'\ unbindMaybeNotification ntfnptr \\rv. invs'\" apply (simp add: unbindMaybeNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sym_heap_sched_pointers_lift valid_irq_node_lift irqs_masked_lift setBoundNotification_ct_not_inQ untyped_ranges_zero_lift | wpc | clarsimp simp: cteCaps_of_def o_def)+ @@ -2465,11 +2462,11 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") apply (simp add: finaliseCapTrue_standin_simple_def) apply wp @@ -2495,7 +2492,6 @@ lemma cteDeleteOne_isFinal: lemmas setEndpoint_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ep_ctes_of] lemmas setNotification_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ntfn_ctes_of] -lemmas setQueue_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF setQueue_ctes_of] lemmas threadSet_cteCaps_of = cteCaps_of_ctes_of_lift [OF threadSet_ctes_of] crunch isFinal: suspend, prepareThreadDelete "\s. isFinal cap slot (cteCaps_of s)" @@ -2584,16 +2580,6 @@ lemma unbindNotification_valid_objs'_helper': by (clarsimp simp: valid_bound_tcb'_def valid_ntfn'_def split: option.splits ntfn.splits) -lemma typ_at'_valid_tcb'_lift: - assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" - shows "\\s. valid_tcb' tcb s\ f \\rv s. valid_tcb' tcb s\" - including no_pre - apply (simp add: valid_tcb'_def) - apply (case_tac "tcbState tcb", simp_all add: valid_tcb_state'_def split_def valid_bound_ntfn'_def) - apply (wp hoare_vcg_const_Ball_lift typ_at_lifts[OF P] - | case_tac "tcbBoundNotification tcb", simp_all)+ - done - lemmas setNotification_valid_tcb' = typ_at'_valid_tcb'_lift [OF setNotification_typ_at'] lemma unbindNotification_valid_objs'[wp]: @@ -2615,7 +2601,7 @@ lemma unbindMaybeNotification_valid_objs'[wp]: unbindMaybeNotification t \\rv. valid_objs'\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift setNotification_valid_tcb' getNotification_wp @@ -2654,7 +2640,7 @@ lemma unbindMaybeNotification_obj_at'_bound: unbindMaybeNotification r \\_ s. obj_at' (\ntfn. ntfnBoundTCB ntfn = None) r s\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp obj_at_setObject2 | wpc @@ -2702,7 +2688,7 @@ lemma capDeleteOne_bound_tcb_at': lemma cancelIPC_bound_tcb_at'[wp]: "\bound_tcb_at' P tptr\ cancelIPC t \\rv. bound_tcb_at' P tptr\" apply (simp add: cancelIPC_def Let_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) + apply (rule bind_wp[OF _ gts_sp']) apply (case_tac "state", simp_all) defer 2 apply (rule hoare_pre) @@ -2726,10 +2712,6 @@ lemma unbindNotification_bound_tcb_at': apply (wp setBoundNotification_bound_tcb gbn_wp' | wpc | simp)+ done -crunches unbindNotification, unbindMaybeNotification - for valid_queues[wp]: "Invariants_H.valid_queues" - (wp: sbn_valid_queues) - crunches unbindNotification, unbindMaybeNotification for weak_sch_act_wf[wp]: "\s. weak_sch_act_wf (ksSchedulerAction s) s" @@ -2750,10 +2732,40 @@ crunch valid_cap'[wp]: prepareThreadDelete "valid_cap' cap" crunch invs[wp]: prepareThreadDelete "invs'" (ignore: doMachineOp) crunch obj_at'[wp]: prepareThreadDelete "\s. P' (obj_at' P p s)" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) end +lemma tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\\s. \ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + tcbQueueRemove q t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + by (fastforce dest!: heap_ls_last_None + simp: list_queue_relation_def prev_queue_head_def queue_end_valid_def + obj_at'_def opt_map_def ps_clear_def objBits_simps + split: if_splits) + +lemma tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\valid_sched_pointers\ + tcbSchedDequeue t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding tcbSchedDequeue_def + by (wpsimp wp: tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at' threadGet_wp) + (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def + valid_sched_pointers_def opt_pred_def opt_map_def + split: option.splits) + +crunches updateRestartPC, cancelIPC + for valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps) + +lemma suspend_tcbSchedNext_tcbSchedPrev_None: + "\invs'\ suspend t \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding suspend_def + by (wpsimp wp: hoare_drop_imps tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at') + lemma (in delete_one_conc_pre) finaliseCap_replaceable: "\\s. invs' s \ cte_wp_at' (\cte. cteCap cte = cap) slot s \ (final_matters' cap \ (final = isFinal cap slot (cteCaps_of s))) @@ -2773,21 +2785,22 @@ lemma (in delete_one_conc_pre) finaliseCap_replaceable: \ (\p \ threadCapRefs cap. st_tcb_at' ((=) Inactive) p s \ obj_at' (Not \ tcbQueued) p s \ bound_tcb_at' ((=) None) p s - \ (\pr. p \ set (ksReadyQueues s pr))))\" + \ obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) p s))\" apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot cong: if_cong split del: if_split) apply (rule hoare_pre) apply (wp prepares_delete_helper'' [OF cancelAllIPC_unlive] prepares_delete_helper'' [OF cancelAllSignals_unlive] - suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_nonq + suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_inactive prepareThreadDelete_isFinal - suspend_makes_inactive suspend_nonq + suspend_makes_inactive deletingIRQHandler_removeable' deletingIRQHandler_final[where slot=slot ] unbindMaybeNotification_obj_at'_bound getNotification_wp suspend_bound_tcb_at' unbindNotification_bound_tcb_at' + suspend_tcbSchedNext_tcbSchedPrev_None | simp add: isZombie_Null isThreadCap_threadCapRefs_tcbptr isArchObjectCap_Cap_capCap | (rule hoare_strengthen_post [OF arch_finaliseCap_removeable[where slot=slot]], @@ -2823,7 +2836,7 @@ crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" lemma cancelIPC_cteCaps_of: "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap))) \ + P ((cteCaps_of s)(p \ NullCap))) \ P (cteCaps_of s)\ cancelIPC t \\rv s. P (cteCaps_of s)\" @@ -2854,7 +2867,9 @@ lemma cancelIPC_cte_wp_at': apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) done -crunch cte_wp_at'[wp]: tcbSchedDequeue "cte_wp_at' P p" +crunches tcbSchedDequeue + for cte_wp_at'[wp]: "cte_wp_at' P p" + (wp: crunch_wps) lemma suspend_cte_wp_at': assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" @@ -2949,7 +2964,7 @@ lemma cteDeleteOne_reply_pred_tcb_at: cteDeleteOne slot \\rv. pred_tcb_at' proj P t\" apply (simp add: cteDeleteOne_def unless_def isFinalCapability_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (clarsimp simp: cte_wp_at_ctes_of when_def isCap_simps Let_def finaliseCapTrue_standin_def) @@ -2972,32 +2987,13 @@ crunches cteDeleteOne, unbindNotification lemma rescheduleRequired_sch_act_not[wp]: "\\\ rescheduleRequired \\rv. sch_act_not t\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp)+ + apply (wp hoare_TrueI | simp)+ done crunch sch_act_not[wp]: cteDeleteOne "sch_act_not t" (simp: crunch_simps case_Null_If unless_def wp: crunch_wps getObject_inv loadObject_default_inv) -lemma cancelAllIPC_mapM_x_valid_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. \t\set q. tcb_at' t s)\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. Invariants_H.valid_queues\" - apply (rule_tac R="\_ s. (\t\set q. tcb_at' t s) \ valid_objs' s" - in hoare_post_add) - apply (rule hoare_pre) - apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply (wp hoare_vcg_const_Ball_lift - tcbSchedEnqueue_valid_queues tcbSchedEnqueue_not_st - sts_valid_queues sts_st_tcb_at'_cases setThreadState_not_st - | simp - | ((elim conjE)?, drule (1) bspec, clarsimp elim!: obj_at'_weakenE simp: valid_tcb_state'_def))+ - done - lemma cancelAllIPC_mapM_x_weak_sch_act: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ mapM_x (\t. do @@ -3011,13 +3007,15 @@ lemma cancelAllIPC_mapM_x_weak_sch_act: done lemma cancelAllIPC_mapM_x_valid_objs': - "\valid_objs'\ + "\valid_objs' and pspace_aligned' and pspace_distinct'\ mapM_x (\t. do y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) q \\_. valid_objs'\" - apply (wp mapM_x_wp' sts_valid_objs') + apply (rule hoare_strengthen_post) + apply (rule mapM_x_wp') + apply (wpsimp wp: sts_valid_objs') apply (clarsimp simp: valid_tcb_state'_def)+ done @@ -3028,17 +3026,12 @@ lemma cancelAllIPC_mapM_x_tcbDomain_obj_at': tcbSchedEnqueue t od) q \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" - by (wp mapM_x_wp' tcbSchedEnqueue_not_st setThreadState_oa_queued | simp)+ + by (wp mapM_x_wp' | simp)+ lemma rescheduleRequired_oa_queued': - "\obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\ - rescheduleRequired - \\_. obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\" - apply (simp add: rescheduleRequired_def) - apply (wp tcbSchedEnqueue_not_st - | wpc - | simp)+ - done + "rescheduleRequired \obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t\" + unfolding rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + by wpsimp lemma cancelAllIPC_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ @@ -3052,21 +3045,6 @@ lemma cancelAllIPC_tcbDomain_obj_at': | simp)+ done -lemma cancelAllIPC_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllIPC ep_ptr - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ep_valid_objs' getEndpoint_wp) - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ep'_def valid_tcb'_def - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done - lemma cancelAllSignals_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelAllSignals epptr @@ -3083,41 +3061,8 @@ lemma unbindMaybeNotification_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ unbindMaybeNotification r \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" - apply (simp add: unbindMaybeNotification_def) - apply (wp setBoundNotification_oa_queued getNotification_wp gbn_wp' | wpc | simp)+ - done - -lemma cancelAllSignals_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllSignals ntfn - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ntfn_valid_objs' - | simp)+ - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ntfn'_def valid_tcb'_def - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done - -lemma finaliseCapTrue_standin_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - finaliseCapTrue_standin cap final - \\_. Invariants_H.valid_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - - -crunch valid_queues[wp]: isFinalCapability "Invariants_H.valid_queues" - (simp: crunch_simps) + unfolding unbindMaybeNotification_def + by (wpsimp wp: getNotification_wp gbn_wp' simp: setBoundNotification_def)+ crunch sch_act[wp]: isFinalCapability "\s. sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) @@ -3126,93 +3071,6 @@ crunch weak_sch_act[wp]: isFinalCapability "\s. weak_sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) -lemma cteDeleteOne_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl - \\_. Invariants_H.valid_queues\" (is "\?PRE\ _ \_\") - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (wp isFinalCapability_inv getCTE_wp | rule hoare_drop_imps | simp)+ - apply (clarsimp simp: cte_wp_at'_def) - done - -lemma valid_inQ_queues_lift: - assumes tat: "\d p tcb. \obj_at' (inQ d p) tcb\ f \\_. obj_at' (inQ d p) tcb\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_inQ_queues\ f \\_. valid_inQ_queues\" - proof - - show ?thesis - apply (clarsimp simp: valid_def valid_inQ_queues_def) - apply safe - apply (rule use_valid [OF _ tat], assumption) - apply (drule spec, drule spec, erule conjE, erule bspec) - apply (rule ccontr) - apply (erule notE[rotated], erule(1) use_valid [OF _ prq]) - apply (erule use_valid [OF _ prq]) - apply simp - done - qed - -lemma emptySlot_valid_inQ_queues [wp]: - "\valid_inQ_queues\ emptySlot sl opt \\rv. valid_inQ_queues\" - unfolding emptySlot_def - by (wp opt_return_pres_lift | wpcw | wp valid_inQ_queues_lift | simp)+ - -lemma cancelAllIPC_mapM_x_valid_inQ_queues: - "\valid_inQ_queues\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. valid_inQ_queues\" - apply (rule mapM_x_wp_inv) - apply (wp sts_valid_queues [where st="Structures_H.thread_state.Restart", simplified] - setThreadState_st_tcb) - done - -lemma cancelAllIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllIPC ep_ptr - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues) - apply (wp hoare_conjI hoare_drop_imp | simp)+ - done - -lemma cancelAllSignals_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllSignals ntfn - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues)+ - apply (simp) - done - -crunches unbindNotification, unbindMaybeNotification - for valid_inQ_queues[wp]: "valid_inQ_queues" - -lemma finaliseCapTrue_standin_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - finaliseCapTrue_standin cap final - \\_. valid_inQ_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - -crunch valid_inQ_queues[wp]: isFinalCapability valid_inQ_queues - (simp: crunch_simps) - -lemma cteDeleteOne_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cteDeleteOne sl - \\_. valid_inQ_queues\" - apply (simp add: cteDeleteOne_def unless_def) - apply (wpsimp wp: hoare_drop_imp hoare_vcg_all_lift) - done - crunch ksCurDomain[wp]: cteDeleteOne "\s. P (ksCurDomain s)" (wp: crunch_wps simp: crunch_simps unless_def) @@ -3259,7 +3117,7 @@ lemma cteDeleteOne_invs[wp]: subgoal by auto subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def projectKOs ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done @@ -3703,178 +3561,6 @@ lemma isFinal_lift: lemmas final_matters'_simps = final_matters'_def [split_simps capability.split arch_capability.split] -definition set_thread_all :: "obj_ref \ Structures_A.tcb \ etcb - \ unit det_ext_monad" where - "set_thread_all ptr tcb etcb \ - do s \ get; - kh \ return $ kheap s(ptr \ (TCB tcb)); - ekh \ return $ (ekheap s)(ptr \ etcb); - put (s\kheap := kh, ekheap := ekh\) - od" - -definition thread_gets_the_all :: "obj_ref \ (Structures_A.tcb \ etcb) det_ext_monad" where - "thread_gets_the_all tptr \ - do tcb \ gets_the $ get_tcb tptr; - etcb \ gets_the $ get_etcb tptr; - return $ (tcb, etcb) od" - -definition thread_set_all :: "(Structures_A.tcb \ Structures_A.tcb) \ (etcb \ etcb) - \ obj_ref \ unit det_ext_monad" where - "thread_set_all f g tptr \ - do (tcb, etcb) \ thread_gets_the_all tptr; - set_thread_all tptr (f tcb) (g etcb) - od" - -lemma set_thread_all_corres: - fixes ob' :: "'a :: pspace_storable" - assumes x: "updateObject ob' = updateObject_default ob'" - assumes z: "\s. obj_at' P ptr s - \ map_to_ctes ((ksPSpace s) (ptr \ injectKO ob')) = map_to_ctes (ksPSpace s)" - assumes b: "\ko. P ko \ objBits ko = objBits ob'" - assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" - assumes e: "etcb_relation etcb tcb'" - assumes is_t: "injectKO (ob' :: 'a :: pspace_storable) = KOTCB tcb'" - shows "other_obj_relation (TCB tcb) (injectKO (ob' :: 'a :: pspace_storable)) \ - corres dc (obj_at (same_caps (TCB tcb)) ptr and is_etcb_at ptr) - (obj_at' (P :: 'a \ bool) ptr) - (set_thread_all ptr tcb etcb) (setObject ptr ob')" - apply (rule corres_no_failI) - apply (rule no_fail_pre) - apply wp - apply (rule x) - apply (clarsimp simp: b elim!: obj_at'_weakenE) - apply (unfold set_thread_all_def setObject_def) - apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def - put_def return_def modify_def get_object_def x - updateObject_default_def in_magnitude_check [OF _ P]) - apply (clarsimp simp add: state_relation_def z) - apply (simp flip: trans_state_update) - apply (clarsimp simp add: swp_def fun_upd_def obj_at_def is_etcb_at_def) - apply (subst cte_wp_at_after_update,fastforce simp add: obj_at_def) - apply (subst caps_of_state_after_update,fastforce simp add: obj_at_def) - apply clarsimp - apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) - apply (clarsimp simp add: ghost_relation_def) - apply (erule_tac x=ptr in allE)+ - apply (clarsimp simp: obj_at_def - split: Structures_A.kernel_object.splits if_split_asm) - - apply (fold fun_upd_def) - apply (simp only: pspace_relation_def dom_fun_upd2 simp_thms) - apply (subst pspace_dom_update) - apply assumption - apply simp - apply (simp only: dom_fun_upd2 simp_thms) - apply (elim conjE) - apply (frule bspec, erule domI) - apply (rule conjI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: is_other_obj_relation_type) - apply (drule(1) bspec) - apply clarsimp - apply (frule_tac ko'="TCB tcb'" and x'=ptr in obj_relation_cut_same_type, - (fastforce simp add: is_other_obj_relation_type)+)[1] - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (insert e is_t) - by (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type - split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) - -lemma tcb_update_all_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" - assumes r: "r () ()" - assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" - shows "corres r (ko_at (TCB tcb) add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_thread_all add tcbu etcbu) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ etcb_relation etcbu tcbu'" in corres_req) - apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) - apply (frule(1) pspace_relation_absD) - apply (force simp: other_obj_relation_def ekheap_relation_def e) - apply (erule conjE) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule set_thread_all_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - objBits_simps' other_obj_relation_def tcbs r)+ - apply (fastforce simp: is_etcb_at_def elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp - done - -lemma thread_gets_the_all_corres: - shows "corres (\(tcb, etcb) tcb'. tcb_relation tcb tcb' \ etcb_relation etcb tcb') - (tcb_at t and is_etcb_at t) (tcb_at' t) - (thread_gets_the_all t) (getObject t)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp: gets_def get_def return_def bind_def get_tcb_def thread_gets_the_all_def - threadGet_def ethread_get_def gets_the_def assert_opt_def get_etcb_def - is_etcb_at_def tcb_at_def liftM_def - split: option.splits Structures_A.kernel_object.splits) - apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) - apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def - projectKO_opt_tcb split_def - getObject_def loadObject_default_def in_monad) - apply (case_tac ko) - apply (simp_all add: fail_def return_def) - apply (clarsimp simp add: state_relation_def pspace_relation_def ekheap_relation_def) - apply (drule bspec) - apply clarsimp - apply blast - apply (drule bspec, erule domI) - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) - done - -lemma thread_set_all_corresT: - assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ - tcb_relation (f tcb) (f' tcb')" - assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (g etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (thread_set_all f g t) (threadSet f' t)" - apply (simp add: thread_set_all_def threadSet_def bind_assoc) - apply (rule corres_guard_imp) - apply (rule corres_split[OF thread_gets_the_all_corres]) - apply (simp add: split_def) - apply (rule tcb_update_all_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce - apply (erule e) - apply (simp add: thread_gets_the_all_def, wp+) - apply clarsimp - apply (frule(1) tcb_at_is_etcb_at) - apply (clarsimp simp add: tcb_at_def get_etcb_def obj_at_def) - apply (drule get_tcb_SomeD) - apply fastforce - apply simp - done - -lemmas thread_set_all_corres = - thread_set_all_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] - crunch idle_thread[wp]: deleteCallerCap "\s. P (ksIdleThread s)" (wp: crunch_wps) crunch sch_act_simple: deleteCallerCap sch_act_simple @@ -3890,89 +3576,6 @@ lemma setEndpoint_sch_act_not_ct[wp]: setEndpoint ptr val \\_ s. sch_act_not (ksCurThread s) s\" by (rule hoare_weaken_pre, wps setEndpoint_ct', wp, simp) -lemma cancelAll_ct_not_ksQ_helper: - "\(\s. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ksCurThread s \ set q) \ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (rule mapM_x_inv_wp2, simp) - apply (wp) - apply (wps tcbSchedEnqueue_ct') - apply (wp tcbSchedEnqueue_ksQ) - apply (wps setThreadState_ct') - apply (wp sts_ksQ') - apply (clarsimp) - done - -lemma cancelAllIPC_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllIPC epptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllIPC_def) - apply (wp, wpc, wp) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply (clarsimp) - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply (clarsimp) - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep epptr" in hoare_post_imp) - apply (clarsimp) - apply (rule conjI) - apply ((clarsimp simp: invs'_def valid_state'_def - sch_act_sane_def - | drule(1) ct_not_in_epQueue)+)[2] - apply (wp get_ep_sp') - done - -lemma cancelAllSignals_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllSignals ntfnptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllSignals_def) - apply (wp, wpc, wp+) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply clarsimp - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setNotification_ksQ setNotification_ksCurThread]) - apply (wps setNotification_ksCurThread, wp) - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep ntfnptr" in hoare_post_imp) - apply ((clarsimp simp: invs'_def valid_state'_def sch_act_sane_def - | drule(1) ct_not_in_ntfnQueue)+)[1] - apply (wp get_ntfn_sp') - done - -lemma unbindMaybeNotification_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - unbindMaybeNotification t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) - apply (case_tac "ntfnBoundTCB ntfn", simp, wp, simp+) - apply (rule hoare_pre) - apply wp - apply (wps setBoundNotification_ct') - apply (wp sbn_ksQ) - apply (wps setNotification_ksCurThread, wp) - apply clarsimp - done - lemma sbn_ct_in_state'[wp]: "\ct_in_state' P\ setBoundNotification ntfn t \\_. ct_in_state' P\" apply (simp add: ct_in_state'_def) @@ -4005,37 +3608,6 @@ lemma unbindMaybeNotification_sch_act_sane[wp]: apply (wp setNotification_sch_act_sane sbn_sch_act_sane | wpc | clarsimp)+ done -lemma finaliseCapTrue_standin_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - finaliseCapTrue_standin cap final - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp cancelAllIPC_ct_not_ksQ cancelAllSignals_ct_not_ksQ - hoare_drop_imps unbindMaybeNotification_ct_not_ksQ - | wpc - | clarsimp simp: isNotificationCap_def isReplyCap_def split:capability.splits)+ - done - -lemma cteDeleteOne_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cteDeleteOne slot - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) - apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") - apply (simp add: finaliseCapTrue_standin_simple_def) - apply wp - apply (clarsimp) - apply (wp emptySlot_cteCaps_of hoare_lift_Pf2 [OF emptySlot_ksRQ emptySlot_ct]) - apply (simp add: cteCaps_of_def) - apply (wp (once) hoare_drop_imps) - apply (wp finaliseCapTrue_standin_ct_not_ksQ isFinalCapability_inv)+ - apply (clarsimp) - done - end end diff --git a/proof/refine/RISCV64/Init_R.thy b/proof/refine/RISCV64/Init_R.thy index 9cac8880a1..7b0d851e7e 100644 --- a/proof/refine/RISCV64/Init_R.thy +++ b/proof/refine/RISCV64/Init_R.thy @@ -91,7 +91,7 @@ definition zeroed_intermediate_state :: ksDomSchedule = [], ksCurDomain = 0, ksDomainTime = 0, - ksReadyQueues = K [], + ksReadyQueues = K (TcbQueue None None), ksReadyQueuesL1Bitmap = K 0, ksReadyQueuesL2Bitmap = K 0, ksCurThread = 0, @@ -112,9 +112,11 @@ lemma non_empty_refine_state_relation: "(zeroed_abstract_state, zeroed_intermediate_state) \ state_relation" apply (clarsimp simp: state_relation_def zeroed_state_defs state.defs) apply (intro conjI) - apply (clarsimp simp: pspace_relation_def pspace_dom_def) - apply (clarsimp simp: ekheap_relation_def) - apply (clarsimp simp: ready_queues_relation_def) + apply (clarsimp simp: pspace_relation_def pspace_dom_def) + apply (clarsimp simp: ekheap_relation_def) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def queue_end_valid_def + opt_pred_def list_queue_relation_def tcbQueueEmpty_def + prev_queue_head_def) apply (clarsimp simp: ghost_relation_def) apply (fastforce simp: cdt_relation_def swp_def dest: cte_wp_at_domI) apply (clarsimp simp: cdt_list_relation_def map_to_ctes_def) diff --git a/proof/refine/RISCV64/InterruptAcc_R.thy b/proof/refine/RISCV64/InterruptAcc_R.thy index 7712c5957f..ba899fb824 100644 --- a/proof/refine/RISCV64/InterruptAcc_R.thy +++ b/proof/refine/RISCV64/InterruptAcc_R.thy @@ -50,14 +50,13 @@ lemma setIRQState_invs[wp]: apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) apply (wp dmo_maskInterrupt) apply (clarsimp simp: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def valid_queues'_def valid_idle'_def valid_irq_node'_def valid_arch_state'_def valid_global_refs'_def global_refs'_def valid_machine_state'_def if_unsafe_then_cap'_def ex_cte_cap_to'_def valid_irq_handlers'_def irq_issued'_def cteCaps_of_def valid_irq_masks'_def - bitmapQ_defs valid_queues_no_bitmap_def) + bitmapQ_defs valid_bitmaps_def) apply (rule conjI, clarsimp) apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) apply (rule conjI; clarsimp) @@ -113,7 +112,7 @@ lemma preemptionPoint_inv: shows "\P\ preemptionPoint \\_. P\" using assms apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) apply (wpc - | wp hoare_whenE_wp hoare_seq_ext [OF _ select_inv] alternative_valid hoare_drop_imps + | wp whenE_wp bind_wp [OF _ select_inv] hoare_drop_imps | simp)+ done @@ -148,8 +147,7 @@ lemma invs'_irq_state_independent [simp, intro!]: valid_idle'_def valid_global_refs'_def valid_arch_state'_def valid_irq_node'_def valid_irq_handlers'_def valid_irq_states'_def - irqs_masked'_def bitmapQ_defs valid_queues_no_bitmap_def - valid_queues'_def + irqs_masked'_def bitmapQ_defs valid_bitmaps_def pspace_domain_valid_def cur_tcb'_def valid_machine_state'_def tcb_in_cur_domain'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def diff --git a/proof/refine/RISCV64/Interrupt_R.thy b/proof/refine/RISCV64/Interrupt_R.thy index 7cf6f22944..dcc577606f 100644 --- a/proof/refine/RISCV64/Interrupt_R.thy +++ b/proof/refine/RISCV64/Interrupt_R.thy @@ -609,13 +609,6 @@ lemma decDomainTime_corres: apply (clarsimp simp:state_relation_def) done -lemma tcbSchedAppend_valid_objs': - "\valid_objs'\tcbSchedAppend t \\r. valid_objs'\" - apply (simp add:tcbSchedAppend_def) - apply (wpsimp wp: hoare_unless_wp threadSet_valid_objs' threadGet_wp) - apply (clarsimp simp add:obj_at'_def typ_at'_def) - done - lemma thread_state_case_if: "(case state of Structures_A.thread_state.Running \ f | _ \ g) = (if state = Structures_A.thread_state.Running then f else g)" @@ -626,26 +619,19 @@ lemma threadState_case_if: (if state = Structures_H.thread_state.Running then f else g)" by (case_tac state,auto) -lemma tcbSchedAppend_invs_but_ct_not_inQ': - "\invs' and st_tcb_at' runnable' t \ - tcbSchedAppend t \\_. all_invs_but_ct_not_inQ'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | fastforce elim!: st_tcb_ex_cap'' split: thread_state.split_asm)+ - done +lemma ready_qs_distinct_domain_time_update[simp]: + "ready_qs_distinct (domain_time_update f s) = ready_qs_distinct s" + by (clarsimp simp: ready_qs_distinct_def) lemma timerTick_corres: - "corres dc (cur_tcb and valid_sched and pspace_aligned and pspace_distinct) - invs' - timer_tick timerTick" + "corres dc + (cur_tcb and valid_sched and pspace_aligned and pspace_distinct) invs' + timer_tick timerTick" apply (simp add: timerTick_def timer_tick_def) - apply (simp add:thread_state_case_if threadState_case_if) - apply (rule_tac Q="\ and (cur_tcb and valid_sched and pspace_aligned and pspace_distinct)" - and Q'="\ and invs'" in corres_guard_imp) + apply (simp add: thread_state_case_if threadState_case_if) + apply (rule_tac Q="cur_tcb and valid_sched and pspace_aligned and pspace_distinct" + and Q'=invs' + in corres_guard_imp) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp @@ -665,63 +651,71 @@ lemma timerTick_corres: apply simp apply (rule corres_split[OF ethread_set_corres]) apply (simp add: sch_act_wf_weak etcb_relation_def pred_conj_def)+ - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp)[1] - apply (rule hoare_strengthen_post) - apply (rule tcbSchedAppend_invs_but_ct_not_inQ', clarsimp simp: sch_act_wf_weak) - apply (wp threadSet_timeslice_invs threadSet_valid_queues - threadSet_valid_queues' threadSet_pred_tcb_at_state)+ - apply simp - apply (rule corres_when,simp) + apply wp + apply ((wpsimp wp: tcbSchedAppend_sym_heap_sched_pointers + tcbSchedAppend_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply ((wp thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply wpsimp+ + apply (rule corres_when, simp) apply (rule corres_split[OF decDomainTime_corres]) apply (rule corres_split[OF getDomainTime_corres]) apply (rule corres_when,simp) apply (rule rescheduleRequired_corres) apply (wp hoare_drop_imp)+ - apply (simp add:dec_domain_time_def) - apply wp+ - apply (simp add:decDomainTime_def) - apply wp - apply (wpsimp wp: static_imp_wp threadSet_timeslice_invs threadSet_valid_queues - threadSet_valid_queues' tcbSchedAppend_valid_objs' + apply (wpsimp simp: dec_domain_time_def) + apply (wpsimp simp: decDomainTime_def) + apply (wpsimp wp: hoare_weak_lift_imp threadSet_timeslice_invs + tcbSchedAppend_valid_objs' threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf - rescheduleRequired_weak_sch_act_wf tcbSchedAppend_valid_queues)+ - apply (strengthen sch_act_wf_weak) - apply (clarsimp simp:conj_comms) - apply (wp tcbSchedAppend_valid_queues tcbSchedAppend_sch_act_wf) - apply simp - apply (wpsimp wp: threadSet_valid_queues threadSet_pred_tcb_at_state threadSet_sch_act - threadSet_tcbDomain_triv threadSet_valid_queues' threadSet_valid_objs' - threadGet_wp gts_wp gts_wp')+ - apply (clarsimp simp: cur_tcb_def tcb_at_is_etcb_at valid_sched_def valid_sched_action_def) - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak cur_tcb'_def inQ_def - ct_in_state'_def obj_at'_def) - apply (clarsimp simp:st_tcb_at'_def valid_idle'_def ct_idle_or_in_cur_domain'_def obj_at'_def) - apply simp - apply simp + rescheduleRequired_weak_sch_act_wf)+ + apply (strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_time_slice_valid_queues) + apply ((wpsimp wp: thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+)[1] + apply wpsimp + apply wpsimp + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs' + | wp (once) hoare_drop_imp)+)[1] + apply (wpsimp wp: gts_wp gts_wp')+ + apply (clarsimp simp: cur_tcb_def) + apply (frule valid_sched_valid_etcbs) + apply (frule (1) tcb_at_is_etcb_at) + apply (frule valid_sched_valid_queues) + apply (fastforce simp: pred_tcb_at_def obj_at_def valid_sched_weak_strg) + apply (clarsimp simp: etcb_at_def split: option.splits) + apply fastforce + apply (fastforce simp: valid_state'_def ct_not_inQ_def) + apply fastforce done lemmas corres_eq_trivial = corres_Id[where f = h and g = h for h, simplified] lemma handleInterrupt_corres: "corres dc - (einvs) (invs' and (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)) + einvs + (invs' and (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)) (handle_interrupt irq) (handleInterrupt irq)" - (is "corres dc (einvs) ?P' ?f ?g") - apply (simp add: handle_interrupt_def handleInterrupt_def ) + (is "corres dc ?P ?P' ?f ?g") + apply (simp add: handle_interrupt_def handleInterrupt_def) apply (rule conjI[rotated]; rule impI) - apply (rule corres_guard_imp) apply (rule corres_split[OF getIRQState_corres, - where R="\rv. einvs" + where R="\rv. ?P" and R'="\rv. invs' and (\s. rv \ IRQInactive)"]) defer - apply (wp getIRQState_prop getIRQState_inv do_machine_op_bind doMachineOp_bind | simp add: do_machine_op_bind doMachineOp_bind )+ - apply (rule corres_guard_imp) - apply (rule corres_split) - apply (rule corres_machine_op, rule corres_eq_trivial ; (simp add: dc_def no_fail_maskInterrupt no_fail_bind no_fail_ackInterrupt)+)+ - apply ((wp | simp)+)[4] + apply (wp getIRQState_prop getIRQState_inv do_machine_op_bind doMachineOp_bind + | simp add: do_machine_op_bind doMachineOp_bind valid_sched_def)+ + apply (corres corres: corres_machine_op) apply (rule corres_gen_asm2) apply (case_tac st, simp_all add: irq_state_relation_def split: irqstate.split_asm) @@ -751,7 +745,7 @@ lemma handleInterrupt_corres: apply (rule corres_machine_op) apply (rule corres_eq_trivial, (simp add: no_fail_ackInterrupt)+) apply wp+ - apply (clarsimp simp: invs_distinct invs_psp_aligned) + apply (clarsimp simp: invs_distinct invs_psp_aligned schact_is_rct_def) apply clarsimp done @@ -762,10 +756,10 @@ lemma threadSet_ksDomainTime[wp]: done crunch ksDomainTime[wp]: rescheduleRequired "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) crunch ksDomainTime[wp]: tcbSchedAppend "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) lemma updateTimeSlice_valid_pspace[wp]: "\valid_pspace'\ threadSet (tcbTimeSlice_update (\_. ts')) thread @@ -774,14 +768,6 @@ lemma updateTimeSlice_valid_pspace[wp]: apply (auto simp:tcb_cte_cases_def cteSizeBits_def) done -lemma updateTimeSlice_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ - threadSet (tcbTimeSlice_update (\_. ts')) thread - \\r s. Invariants_H.valid_queues s\" - apply (wp threadSet_valid_queues,simp) - apply (clarsimp simp:obj_at'_def inQ_def) - done - crunches tcbSchedAppend for irq_handlers'[wp]: valid_irq_handlers' and irqs_masked'[wp]: irqs_masked' @@ -789,29 +775,29 @@ crunches tcbSchedAppend (simp: unless_def tcb_cte_cases_def cteSizeBits_def wp: crunch_wps cur_tcb_lift) lemma timerTick_invs'[wp]: - "\invs'\ timerTick \\rv. invs'\" + "timerTick \invs'\" apply (simp add: timerTick_def) apply (wpsimp wp: threadSet_invs_trivial threadSet_pred_tcb_no_state rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' - simp: tcb_cte_cases_def) - apply (rule_tac Q="\rv. invs'" in hoare_post_imp) - apply (clarsimp simp add:invs'_def valid_state'_def) + simp: tcb_cte_cases_def) + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp: invs'_def valid_state'_def) apply (simp add: decDomainTime_def) apply wp apply simp apply wpc - apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs - rescheduleRequired_all_invs_but_ct_not_inQ - hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain' - del: tcbSchedAppend_sch_act_wf)+ - apply (rule hoare_strengthen_post[OF tcbSchedAppend_invs_but_ct_not_inQ']) - apply (wpsimp simp: valid_pspace'_def sch_act_wf_weak)+ - apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv - threadSet_valid_objs' threadSet_timeslice_invs)+ - apply (wp threadGet_wp) + apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs + rescheduleRequired_all_invs_but_ct_not_inQ + hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain')+ + apply (rule hoare_strengthen_post[OF tcbSchedAppend_all_invs_but_ct_not_inQ']) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ + apply (rule_tac Q="\_. invs'" in hoare_strengthen_post) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv + threadSet_valid_objs' threadSet_timeslice_invs)+ + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ apply (wp gts_wp')+ - apply (clarsimp simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def) + apply (auto simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def cong: conj_cong) done lemma resetTimer_invs'[wp]: diff --git a/proof/refine/RISCV64/InvariantUpdates_H.thy b/proof/refine/RISCV64/InvariantUpdates_H.thy index 377bda1525..41354e5a19 100644 --- a/proof/refine/RISCV64/InvariantUpdates_H.thy +++ b/proof/refine/RISCV64/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] @@ -38,8 +38,9 @@ lemma invs'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_bitmaps_def bitmapQ_defs vms ct_not_inQ_def state_refs_of'_def ps_clear_def valid_irq_node'_def mask @@ -56,12 +57,13 @@ lemma invs_no_cicd'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - vms ct_not_inQ_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def mask - cong: option.case_cong) + apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_bitmaps_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) done qed @@ -98,14 +100,9 @@ lemma valid_tcb'_tcbTimeSlice_update[simp]: "valid_tcb' (tcbTimeSlice_update f tcb) s = valid_tcb' tcb s" by (simp add:valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) -lemma valid_queues_ksSchedulerAction_update[simp]: - "valid_queues (ksSchedulerAction_update f s) = valid_queues s" - unfolding valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - by simp - -lemma valid_queues'_ksSchedulerAction_update[simp]: - "valid_queues' (ksSchedulerAction_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksSchedulerAction_update[simp]: + "valid_bitmaps (ksSchedulerAction_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma ex_cte_cap_wp_to'_gsCNodes_update[simp]: "ex_cte_cap_wp_to' P p (gsCNodes_update f s') = ex_cte_cap_wp_to' P p s'" @@ -140,45 +137,25 @@ lemma tcb_in_cur_domain_ct[simp]: "tcb_in_cur_domain' t (ksCurThread_update f s) = tcb_in_cur_domain' t s" by (fastforce simp: tcb_in_cur_domain'_def) -lemma valid_queues'_ksCurDomain[simp]: - "valid_queues' (ksCurDomain_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues'_ksDomScheduleIdx[simp]: - "valid_queues' (ksDomScheduleIdx_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksCurDomain[simp]: + "valid_bitmaps (ksCurDomain_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomSchedule[simp]: - "valid_queues' (ksDomSchedule_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomScheduleIdx[simp]: + "valid_bitmaps (ksDomScheduleIdx_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomainTime[simp]: - "valid_queues' (ksDomainTime_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomSchedule[simp]: + "valid_bitmaps (ksDomSchedule_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksWorkUnitsCompleted[simp]: - "valid_queues' (ksWorkUnitsCompleted_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomainTime[simp]: + "valid_bitmaps (ksDomainTime_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues_ksCurDomain[simp]: - "valid_queues (ksCurDomain_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomScheduleIdx[simp]: - "valid_queues (ksDomScheduleIdx_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomSchedule[simp]: - "valid_queues (ksDomSchedule_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomainTime[simp]: - "valid_queues (ksDomainTime_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksWorkUnitsCompleted[simp]: - "valid_queues (ksWorkUnitsCompleted_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_ksWorkUnitsCompleted[simp]: + "valid_bitmaps (ksWorkUnitsCompleted_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma valid_irq_node'_ksCurDomain[simp]: "valid_irq_node' w (ksCurDomain_update f s) = valid_irq_node' w s" @@ -255,6 +232,10 @@ lemma valid_mdb_interrupts'[simp]: "valid_mdb' (ksInterruptState_update f s) = valid_mdb' s" by (simp add: valid_mdb'_def) +lemma valid_mdb'_ksReadyQueues_update[simp]: + "valid_mdb' (ksReadyQueues_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + lemma vms_ksReadyQueues_update[simp]: "valid_machine_state' (ksReadyQueues_update f s) = valid_machine_state' s" by (simp add: valid_machine_state'_def) @@ -279,10 +260,10 @@ lemma ct_in_state_ksSched[simp]: lemma invs'_wu [simp]: "invs' (ksWorkUnitsCompleted_update f s) = invs' s" - apply (simp add: invs'_def cur_tcb'_def valid_state'_def Invariants_H.valid_queues_def - valid_queues'_def valid_irq_node'_def valid_machine_state'_def + apply (simp add: invs'_def cur_tcb'_def valid_state'_def valid_bitmaps_def + valid_irq_node'_def valid_machine_state'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def) + bitmapQ_defs) done lemma valid_arch_state'_interrupt[simp]: @@ -334,9 +315,8 @@ lemma sch_act_simple_ksReadyQueuesL2Bitmap[simp]: lemma ksDomainTime_invs[simp]: "invs' (ksDomainTime_update f s) = invs' s" - by (simp add:invs'_def valid_state'_def - cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_machine_state'_def) + by (simp add: invs'_def valid_state'_def cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_machine_state'_def bitmapQ_defs) lemma valid_machine_state'_ksDomainTime[simp]: "valid_machine_state' (ksDomainTime_update f s) = valid_machine_state' s" @@ -364,9 +344,7 @@ lemma ct_not_inQ_update_stt[simp]: lemma invs'_update_cnt[elim!]: "invs' s \ invs' (s\ksSchedulerAction := ChooseNewThread\)" - by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues'_def - valid_irq_node'_def cur_tcb'_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_queues_no_bitmap_def - bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def) + by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_irq_node'_def cur_tcb'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def bitmapQ_defs) end \ No newline at end of file diff --git a/proof/refine/RISCV64/Invariants_H.thy b/proof/refine/RISCV64/Invariants_H.thy index 51df0bb7e4..5cc523ed1e 100644 --- a/proof/refine/RISCV64/Invariants_H.thy +++ b/proof/refine/RISCV64/Invariants_H.thy @@ -8,6 +8,7 @@ theory Invariants_H imports LevityCatch "AInvs.ArchDetSchedSchedule_AI" + "Lib.Heap_List" begin (* global data and code of the kernel, not covered by any cap *) @@ -137,6 +138,21 @@ definition cte_wp_at' :: "(cte \ bool) \ obj_ref \ kernel_state \ bool" where "cte_at' \ cte_wp_at' \" +abbreviation tcb_of' :: "kernel_object \ tcb option" where + "tcb_of' \ projectKO_opt" + +abbreviation tcbs_of' :: "kernel_state \ obj_ref \ tcb option" where + "tcbs_of' s \ ksPSpace s |> tcb_of'" + +abbreviation tcbSchedPrevs_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedPrevs_of s \ tcbs_of' s |> tcbSchedPrev" + +abbreviation tcbSchedNexts_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedNexts_of s \ tcbs_of' s |> tcbSchedNext" + +abbreviation sym_heap_sched_pointers :: "global.kernel_state \ bool" where + "sym_heap_sched_pointers s \ sym_heap (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + definition tcb_cte_cases :: "machine_word \ ((tcb \ cte) \ ((cte \ cte) \ tcb \ tcb))" where "tcb_cte_cases \ [ 0 << cteSizeBits \ (tcbCTable, tcbCTable_update), 1 << cteSizeBits \ (tcbVTable, tcbVTable_update), @@ -188,8 +204,10 @@ definition state_refs_of' :: "kernel_state \ obj_ref \ ( fun live' :: "kernel_object \ bool" where "live' (KOTCB tcb) = - (bound (tcbBoundNotification tcb) \ - (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState) \ tcbQueued tcb)" + (bound (tcbBoundNotification tcb) + \ tcbSchedPrev tcb \ None \ tcbSchedNext tcb \ None + \ tcbQueued tcb + \ (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState))" | "live' (KOEndpoint ep) = (ep \ IdleEP)" | "live' (KONotification ntfn) = (bound (ntfnBoundTCB ntfn) \ (\ts. ntfnObj ntfn = WaitingNtfn ts))" | "live' _ = False" @@ -387,6 +405,11 @@ definition valid_bound_ntfn' :: "machine_word option \ kernel_state definition is_device_frame_cap' :: "capability \ bool" where "is_device_frame_cap' cap \ case cap of ArchObjectCap (FrameCap _ _ _ dev _) \ dev | _ \ False" +abbreviation opt_tcb_at' :: "machine_word option \ kernel_state \ bool" where + "opt_tcb_at' \ none_top tcb_at'" + +lemmas opt_tcb_at'_def = none_top_def + definition valid_tcb' :: "tcb \ kernel_state \ bool" where "valid_tcb' t s \ (\(getF, setF) \ ran tcb_cte_cases. s \' cteCap (getF t)) \ valid_tcb_state' (tcbState t) s @@ -394,7 +417,9 @@ definition valid_tcb' :: "tcb \ kernel_state \ bool" whe \ valid_bound_ntfn' (tcbBoundNotification t) s \ tcbDomain t \ maxDomain \ tcbPriority t \ maxPriority - \ tcbMCP t \ maxPriority" + \ tcbMCP t \ maxPriority + \ opt_tcb_at' (tcbSchedPrev t) s + \ opt_tcb_at' (tcbSchedNext t) s" definition valid_ep' :: "Structures_H.endpoint \ kernel_state \ bool" where "valid_ep' ep s \ case ep of @@ -402,7 +427,6 @@ definition valid_ep' :: "Structures_H.endpoint \ kernel_state \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts) | RecvEP ts \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts)" - definition valid_bound_tcb' :: "machine_word option \ kernel_state \ bool" where "valid_bound_tcb' tcb_opt s \ case tcb_opt of None \ True | Some t \ tcb_at' t s" @@ -733,10 +757,15 @@ where | "runnable' (Structures_H.BlockedOnSend a b c d e) = False" | "runnable' (Structures_H.BlockedOnNotification x) = False" -definition - inQ :: "domain \ priority \ tcb \ bool" -where - "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" +definition inQ :: "domain \ priority \ tcb \ bool" where + "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" + +lemma inQ_implies_tcbQueueds_of: + "(inQ domain priority |< tcbs_of' s') tcbPtr \ (tcbQueued |< tcbs_of' s') tcbPtr" + by (clarsimp simp: opt_map_def opt_pred_def inQ_def split: option.splits) + +defs ready_qs_runnable_def: + "ready_qs_runnable s \ \t. obj_at' tcbQueued t s \ st_tcb_at' runnable' t s" definition (* for given domain and priority, the scheduler bitmap indicates a thread is in the queue *) @@ -746,15 +775,6 @@ where "bitmapQ d p s \ ksReadyQueuesL1Bitmap s d !! prioToL1Index p \ ksReadyQueuesL2Bitmap s (d, invertL1Index (prioToL1Index p)) !! unat (p && mask wordRadix)" - -definition - valid_queues_no_bitmap :: "kernel_state \ bool" -where - "valid_queues_no_bitmap \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - definition (* A priority is used as a two-part key into the bitmap structure. If an L2 bitmap entry is set without an L1 entry, updating the L1 entry (shared by many priorities) may make @@ -778,31 +798,62 @@ where \d i. ksReadyQueuesL1Bitmap s d !! i \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ i < l2BitmapSize" -definition - valid_bitmapQ :: "kernel_state \ bool" -where - "valid_bitmapQ \ \s. (\d p. bitmapQ d p s \ ksReadyQueues s (d,p) \ [])" +definition valid_bitmapQ :: "kernel_state \ bool" where + "valid_bitmapQ \ \s. \d p. bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p))" -definition - valid_queues :: "kernel_state \ bool" -where - "valid_queues \ \s. valid_queues_no_bitmap s \ valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" +definition valid_bitmaps :: "kernel_state \ bool" where + "valid_bitmaps \ \s. valid_bitmapQ s \ bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" -definition - (* when a thread gets added to / removed from a queue, but before bitmap updated *) - valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" -where +lemma valid_bitmaps_valid_bitmapQ[elim!]: + "valid_bitmaps s \ valid_bitmapQ s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L2_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L2_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L1_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L1_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_lift: + assumes prq: "\P. f \\s. P (ksReadyQueues s)\" + assumes prqL1: "\P. f \\s. P (ksReadyQueuesL1Bitmap s)\" + assumes prqL2: "\P. f \\s. P (ksReadyQueuesL2Bitmap s)\" + shows "f \valid_bitmaps\" + unfolding valid_bitmaps_def valid_bitmapQ_def bitmapQ_def + bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +(* when a thread gets added to / removed from a queue, but before bitmap updated *) +definition valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" where "valid_bitmapQ_except d' p' \ \s. - (\d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ ksReadyQueues s (d,p) \ []))" + \d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)))" lemmas bitmapQ_defs = valid_bitmapQ_def valid_bitmapQ_except_def bitmapQ_def bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def -definition - valid_queues' :: "kernel_state \ bool" -where - "valid_queues' \ \s. \d p t. obj_at' (inQ d p) t s \ t \ set (ksReadyQueues s (d, p))" +\ \ + The tcbSchedPrev and tcbSchedNext fields of a TCB are used only to indicate membership in + one of the ready queues. \ +definition valid_sched_pointers_2 :: + "(obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ (obj_ref \ bool) \ bool " + where + "valid_sched_pointers_2 prevs nexts ready \ + \ptr. prevs ptr \ None \ nexts ptr \ None \ ready ptr" + +abbreviation valid_sched_pointers :: "kernel_state \ bool" where + "valid_sched_pointers s \ + valid_sched_pointers_2 (tcbSchedPrevs_of s) (tcbSchedNexts_of s) (tcbQueued |< tcbs_of' s)" + +lemmas valid_sched_pointers_def = valid_sched_pointers_2_def + +lemma valid_sched_pointersD: + "\valid_sched_pointers s; \ (tcbQueued |< tcbs_of' s) t\ + \ tcbSchedPrevs_of s t = None \ tcbSchedNexts_of s t = None" + by (fastforce simp: valid_sched_pointers_def in_opt_pred opt_map_red) definition tcb_in_cur_domain' :: "machine_word \ kernel_state \ bool" where "tcb_in_cur_domain' t \ \s. obj_at' (\tcb. ksCurDomain s = tcbDomain tcb) t s" @@ -949,7 +1000,7 @@ abbreviation definition valid_state' :: "kernel_state \ bool" where "valid_state' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s @@ -958,7 +1009,9 @@ definition valid_state' :: "kernel_state \ bool" where \ valid_irq_states' s \ valid_machine_state' s \ irqs_masked' s - \ valid_queues' s + \ sym_heap_sched_pointers s + \ valid_sched_pointers s + \ valid_bitmaps s \ ct_not_inQ s \ ct_idle_or_in_cur_domain' s \ pspace_domain_valid s @@ -1009,6 +1062,11 @@ definition abbreviation "active' st \ st = Structures_H.Running \ st = Structures_H.Restart" +lemma runnable_eq_active': "runnable' = active'" + apply (rule ext) + apply (case_tac st, simp_all) + done + abbreviation "simple' st \ st = Structures_H.Inactive \ st = Structures_H.Running \ @@ -1024,11 +1082,13 @@ abbreviation abbreviation(input) "all_invs_but_sym_refs_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s - \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1036,12 +1096,14 @@ abbreviation(input) abbreviation(input) "all_invs_but_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s - \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1057,12 +1119,14 @@ lemma all_invs_but_not_ct_inQ_check': definition "all_invs_but_ct_idle_or_in_cur_domain' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s - \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_not_inQ s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_not_inQ s \ pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -2767,9 +2831,9 @@ lemma sch_act_wf_arch [simp]: "sch_act_wf sa (ksArchState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_arch [simp]: - "valid_queues (ksArchState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_arch[simp]: + "valid_bitmaps (ksArchState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma if_unsafe_then_cap_arch' [simp]: "if_unsafe_then_cap' (ksArchState_update f s) = if_unsafe_then_cap' s" @@ -2787,22 +2851,14 @@ lemma sch_act_wf_machine_state [simp]: "sch_act_wf sa (ksMachineState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_machine_state [simp]: - "valid_queues (ksMachineState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_arch' [simp]: - "valid_queues' (ksArchState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues_machine_state' [simp]: - "valid_queues' (ksMachineState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - lemma valid_irq_node'_machine_state [simp]: "valid_irq_node' x (ksMachineState_update f s) = valid_irq_node' x s" by (simp add: valid_irq_node'_def) +lemma valid_bitmaps_machine_state[simp]: + "valid_bitmaps (ksMachineState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + (* these should be reasonable safe for automation because of the 0 pattern *) lemma no_0_ko_wp' [elim!]: "\ ko_wp_at' Q 0 s; no_0_obj' s \ \ P" @@ -2884,19 +2940,6 @@ lemma typ_at_aligned': "\ typ_at' tp p s \ \ is_aligned p (objBitsT tp)" by (clarsimp simp add: typ_at'_def ko_wp_at'_def objBitsT_koTypeOf) -lemma valid_queues_obj_at'D: - "\ t \ set (ksReadyQueues s (d, p)); valid_queues s \ - \ obj_at' (inQ d p) t s" - apply (unfold valid_queues_def valid_queues_no_bitmap_def) - apply (elim conjE) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - lemma obj_at'_and: "obj_at' (P and P') t s = (obj_at' P t s \ obj_at' P' t s)" by (rule iffI, (clarsimp simp: obj_at'_def)+) @@ -2938,21 +2981,6 @@ lemma obj_at'_ko_at'_prop: "ko_at' ko t s \ obj_at' P t s = P ko" by (drule obj_at_ko_at', clarsimp simp: obj_at'_def) -lemma valid_queues_no_bitmap_def': - "valid_queues_no_bitmap = - (\s. \d p. (\t\set (ksReadyQueues s (d, p)). - obj_at' (inQ d p) t s \ st_tcb_at' runnable' t s) \ - distinct (ksReadyQueues s (d, p)) \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - apply (rule ext, rule iffI) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_and pred_tcb_at'_def o_def - elim!: obj_at'_weakenE)+ - done - -lemma valid_queues_running: - assumes Q: "t \ set(ksReadyQueues s (d, p))" "valid_queues s" - shows "st_tcb_at' runnable' t s" - using assms by (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def') - lemma valid_refs'_cteCaps: "valid_refs' S (ctes_of s) = (\c \ ran (cteCaps_of s). S \ capRange c = {})" by (fastforce simp: valid_refs'_def cteCaps_of_def elim!: ranE) @@ -3033,8 +3061,16 @@ lemma invs_sch_act_wf' [elim!]: "invs' s \ sch_act_wf (ksSchedulerAction s) s" by (simp add: invs'_def valid_state'_def) -lemma invs_queues [elim!]: - "invs' s \ valid_queues s" +lemma invs_valid_bitmaps[elim!]: + "invs' s \ valid_bitmaps s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym_heap_sched_pointers[elim!]: + "invs' s \ sym_heap_sched_pointers s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_sched_pointers[elim!]: + "invs' s \ valid_sched_pointers s" by (simp add: invs'_def valid_state'_def) lemma invs_valid_idle'[elim!]: @@ -3049,18 +3085,12 @@ lemma invs'_invs_no_cicd: "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" by (simp add: invs'_to_invs_no_cicd'_def) -lemma invs_valid_queues'_strg: - "invs' s \ valid_queues' s" - by (clarsimp simp: invs'_def valid_state'_def) - -lemmas invs_valid_queues'[elim!] = invs_valid_queues'_strg[rule_format] - lemma einvs_valid_etcbs: "einvs s \ valid_etcbs s" by (clarsimp simp: valid_sched_def) lemma invs'_bitmapQ_no_L1_orphans: "invs' s \ bitmapQ_no_L1_orphans s" - by (drule invs_queues, simp add: valid_queues_def) + by (simp add: invs'_def valid_state'_def valid_bitmaps_def) lemma invs_ksCurDomain_maxDomain' [elim!]: "invs' s \ ksCurDomain s \ maxDomain" @@ -3085,32 +3115,22 @@ lemma invs_no_0_obj'[elim!]: lemma invs'_gsCNodes_update[simp]: "invs' (gsCNodes_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def ct_not_inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done lemma invs'_gsUserPages_update[simp]: "invs' (gsUserPages_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def ct_not_inQ_def) - done - -lemma invs_queues_tcb_in_cur_domain': - "\ ksReadyQueues s (d, p) = x # xs; invs' s; d = ksCurDomain s\ - \ tcb_in_cur_domain' x s" - apply (subgoal_tac "x \ set (ksReadyQueues s (d, p))") - apply (drule (1) valid_queues_obj_at'D[OF _ invs_queues]) - apply (auto simp: inQ_def tcb_in_cur_domain'_def elim: obj_at'_weakenE) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done lemma pred_tcb'_neq_contra: @@ -3126,7 +3146,7 @@ lemma invs'_ksDomScheduleIdx: unfolding invs'_def valid_state'_def by clarsimp lemma valid_bitmap_valid_bitmapQ_exceptE: - "\ valid_bitmapQ_except d p s ; (bitmapQ d p s \ ksReadyQueues s (d,p) \ []) ; + "\ valid_bitmapQ_except d p s; bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)); bitmapQ_no_L2_orphans s \ \ valid_bitmapQ s" unfolding valid_bitmapQ_def valid_bitmapQ_except_def @@ -3258,4 +3278,52 @@ add_upd_simps "invs' (gsUntypedZeroRanges_update f s)" (obj_at'_real_def) declare upd_simps[simp] +lemma neq_out_intv: + "\ a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" + by simp + +lemma rule_out_intv: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a \ b \ + \ b \ mask_range a (objBitsKO obj)" + apply (drule(1) pspace_distinctD') + apply (subst (asm) ps_clear_def) + apply (drule_tac x = b in orthD2) + apply fastforce + apply (drule neq_out_intv) + apply (simp add: mask_def add_diff_eq) + apply (simp add: mask_def add_diff_eq) + done + +lemma ptr_range_mask_range: + "{ptr..ptr + 2 ^ bits - 1} = mask_range ptr bits" + unfolding mask_def + by simp + +lemma distinct_obj_range'_not_subset: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ \ obj_range' b obj' \ obj_range' a obj" + unfolding obj_range'_def + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule (3) rule_out_intv) + using is_aligned_no_overflow_mask + by fastforce + +lemma obj_range'_disjoint: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ obj_range' a obj \ obj_range' b obj' = {}" + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule_tac p=a and p'=b in aligned_mask_range_cases) + apply assumption + apply (fastforce dest: distinct_obj_range'_not_subset + simp: obj_range'_def) + done + end diff --git a/proof/refine/RISCV64/IpcCancel_R.thy b/proof/refine/RISCV64/IpcCancel_R.thy index d351a96e80..921983ceb5 100644 --- a/proof/refine/RISCV64/IpcCancel_R.thy +++ b/proof/refine/RISCV64/IpcCancel_R.thy @@ -38,25 +38,6 @@ lemma cancelSignal_pred_tcb_at': crunch pred_tcb_at'[wp]: emptySlot "pred_tcb_at' proj P t" (wp: setCTE_pred_tcb_at') -(* valid_queues is too strong *) -definition valid_inQ_queues :: "KernelStateData_H.kernel_state \ bool" where - "valid_inQ_queues \ - \s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) \ distinct (ksReadyQueues s (d, p))" - -lemma valid_inQ_queues_ksSchedulerAction_update[simp]: - "valid_inQ_queues (ksSchedulerAction_update f s) = valid_inQ_queues s" - by (simp add: valid_inQ_queues_def) - -lemma valid_inQ_queues_ksReadyQueuesL1Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL1Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - -lemma valid_inQ_queues_ksReadyQueuesL2Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL2Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - defs capHasProperty_def: "capHasProperty ptr P \ cte_wp_at' (\c. P (cteCap c)) ptr" @@ -75,11 +56,6 @@ locale delete_one_conc_pre = "\pspace_distinct'\ cteDeleteOne slot \\rv. pspace_distinct'\" assumes delete_one_it: "\P. \\s. P (ksIdleThread s)\ cteDeleteOne cap \\rv s. P (ksIdleThread s)\" - assumes delete_one_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl \\rv. Invariants_H.valid_queues\" - assumes delete_one_inQ_queues: - "\valid_inQ_queues\ cteDeleteOne sl \\rv. valid_inQ_queues\" assumes delete_one_sch_act_simple: "\sch_act_simple\ cteDeleteOne sl \\rv. sch_act_simple\" assumes delete_one_sch_act_not: @@ -97,7 +73,7 @@ lemma (in delete_one_conc_pre) cancelIPC_simple[wp]: "\\\ cancelIPC t \\rv. st_tcb_at' simple' t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (rule hoare_pre) apply (wpc | wp sts_st_tcb_at'_cases hoare_vcg_conj_lift @@ -537,7 +513,7 @@ lemma (in delete_one) cancelIPC_ReplyCap_corres: and Q'="\_. invs' and st_tcb_at' awaiting_reply' t" in corres_underlying_split) apply (rule corres_guard_imp) - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp?) apply (simp add: tcb_relation_def fault_rel_optionation_def) apply (simp add: tcb_cap_cases_def) apply (simp add: tcb_cte_cases_def cteSizeBits_def) @@ -661,16 +637,15 @@ lemma sch_act_simple_not_t[simp]: "sch_act_simple s \ sch_act_no context begin interpretation Arch . (*FIXME: arch_split*) +crunches setNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift) + lemma cancelSignal_invs': "\invs' and st_tcb_at' (\st. st = BlockedOnNotification ntfn) t and sch_act_not t\ cancelSignal t ntfn \\rv. invs'\" proof - - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def - valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have NTFNSN: "\ntfn ntfn'. \\s. sch_act_not (ksCurThread s) s \ setNotification ntfn ntfn' \\_ s. sch_act_not (ksCurThread s) s\" @@ -681,20 +656,19 @@ lemma cancelSignal_invs': show ?thesis apply (simp add: cancelSignal_def invs'_def valid_state'_def Let_def) apply (wp valid_irq_node_lift sts_sch_act' irqs_masked_lift - hoare_vcg_all_lift [OF setNotification_ksQ] sts_valid_queues + hoare_vcg_all_lift setThreadState_ct_not_inQ NTFNSN - hoare_vcg_all_lift setNotification_ksQ + hoare_vcg_all_lift | simp add: valid_tcb_state'_def list_case_If split del: if_split)+ prefer 2 apply assumption apply (rule hoare_strengthen_post) apply (rule get_ntfn_sp') + apply (rename_tac rv s) apply (clarsimp simp: pred_tcb_at') - apply (frule NIQ) - apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) apply (rule conjI) apply (clarsimp simp: valid_ntfn'_def) - apply (case_tac "ntfnObj r", simp_all add: isWaitingNtfn_def) + apply (case_tac "ntfnObj rv", simp_all add: isWaitingNtfn_def) apply (frule ko_at_valid_objs') apply (simp add: valid_pspace_valid_objs') apply (clarsimp simp: projectKO_opt_ntfn split: kernel_object.splits) @@ -717,7 +691,7 @@ lemma cancelSignal_invs': split: ntfn.splits) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (fastforce simp: sym_refs_def dest!: idle'_no_refs) - apply (case_tac "ntfnObj r", simp_all) + apply (case_tac "ntfnObj rv", simp_all) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) apply (rule conjI, clarsimp split: option.splits) @@ -731,9 +705,10 @@ lemma cancelSignal_invs': set_eq_subset) apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def set_eq_subset) + apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (rule conjI) - apply (case_tac "ntfnBoundTCB r") + apply (case_tac "ntfnBoundTCB rv") apply (clarsimp elim!: if_live_state_refsE)+ apply (rule conjI, clarsimp split: option.splits) apply (clarsimp dest!: idle'_no_refs) @@ -791,23 +766,25 @@ lemma setEndpoint_ct_not_inQ[wp]: done lemma setEndpoint_ksDomScheduleIdx[wp]: - "\\s. P (ksDomScheduleIdx s)\ setEndpoint ptr ep \\_ s. P (ksDomScheduleIdx s)\" + "setEndpoint ptr ep \\s. P (ksDomScheduleIdx s)\" apply (simp add: setEndpoint_def setObject_def split_def) apply (wp updateObject_default_inv | simp)+ done + end +crunches setEndpoint + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift simp: updateObject_default_def) + lemma (in delete_one_conc) cancelIPC_invs[wp]: shows "\tcb_at' t and invs'\ cancelIPC t \\rv. invs'\" proof - have P: "\xs v f. (case xs of [] \ return v | y # ys \ return (f (y # ys))) = return (case xs of [] \ v | y # ys \ f xs)" by (clarsimp split: list.split) - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have EPSCHN: "\eeptr ep'. \\s. sch_act_not (ksCurThread s) s\ setEndpoint eeptr ep' \\_ s. sch_act_not (ksCurThread s) s\" @@ -832,8 +809,8 @@ proof - apply (wp valid_irq_node_lift valid_global_refs_lift' valid_arch_state_lift' irqs_masked_lift sts_sch_act' hoare_vcg_all_lift [OF setEndpoint_ksQ] - sts_valid_queues setThreadState_ct_not_inQ EPSCHN - hoare_vcg_all_lift setNotification_ksQ + setThreadState_ct_not_inQ EPSCHN + hoare_vcg_all_lift | simp add: valid_tcb_state'_def split del: if_split | wpc)+ prefer 2 @@ -841,14 +818,14 @@ proof - apply (rule hoare_strengthen_post [OF get_ep_sp']) apply (clarsimp simp: pred_tcb_at' fun_upd_def[symmetric] conj_comms split del: if_split cong: if_cong) + apply (rule conjI, clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: projectKOs valid_obj'_def) apply (rule conjI) apply (clarsimp simp: obj_at'_def valid_ep'_def projectKOs dest!: pred_tcb_at') - apply (frule NIQ) - apply (erule pred_tcb'_weakenE, fastforce) apply (clarsimp, rule conjI) apply (auto simp: pred_tcb_at'_def obj_at'_def)[1] apply (rule conjI) @@ -895,7 +872,7 @@ proof - show ?thesis apply (simp add: cancelIPC_def crunch_simps cong: if_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (case_tac state, simp_all add: isTS_defs) apply (safe intro!: hoare_weaken_pre[OF Q] @@ -938,8 +915,8 @@ lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at: \\rv. st_tcb_at' P t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (case_tac x, simp_all add: isTS_defs list_case_If) + apply (rule bind_wp [OF _ gts_sp']) + apply (case_tac rv, simp_all add: isTS_defs list_case_If) apply (wp sts_st_tcb_at'_cases delete_one_st_tcb_at threadSet_pred_tcb_no_state cancelSignal_st_tcb_at hoare_drop_imps @@ -1011,9 +988,8 @@ lemma (in delete_one_conc_pre) cancelIPC_tcb_at_runnable': (is "\?PRE\ _ \_\") apply (clarsimp simp: cancelIPC_def Let_def) apply (case_tac "t'=t") - apply (rule_tac B="\st. st_tcb_at' runnable' t and K (runnable' st)" - in hoare_seq_ext) - apply (case_tac x; simp) + apply (rule_tac Q'="\st. st_tcb_at' runnable' t and K (runnable' st)" in bind_wp) + apply (case_tac rv; simp) apply (wp sts_pred_tcb_neq' | simp | wpc)+ apply (clarsimp) apply (rule_tac Q="\rv. ?PRE" in hoare_post_imp, fastforce) @@ -1052,18 +1028,20 @@ lemma setBoundNotification_tcb_in_cur_domain'[wp]: apply (wp setBoundNotification_not_ntfn | simp)+ done -lemma cancelSignal_tcb_obj_at': - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ cancelSignal t word \\_. obj_at' P t'\" - apply (simp add: cancelSignal_def setNotification_def) - apply (wp setThreadState_not_st getNotification_wp | wpc | simp)+ - done +lemma setThreadState_tcbDomain_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding setThreadState_def + by wpsimp + +crunches cancelSignal + for tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + (wp: crunch_wps) lemma (in delete_one_conc_pre) cancelIPC_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelIPC t \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" apply (simp add: cancelIPC_def Let_def) apply (wp hoare_vcg_conj_lift - setThreadState_not_st delete_one_tcbDomain_obj_at' cancelSignal_tcb_obj_at' + delete_one_tcbDomain_obj_at' | wpc | rule hoare_drop_imps | simp add: getThreadReplySlot_def o_def if_fun_split)+ @@ -1101,7 +1079,7 @@ text \The suspend operation, significant as called from delete\ lemma rescheduleRequired_weak_sch_act_wf: "\\\ rescheduleRequired \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp add: weak_sch_act_wf_def)+ + apply (wp hoare_TrueI | simp add: weak_sch_act_wf_def)+ done lemma sts_weak_sch_act_wf[wp]: @@ -1109,7 +1087,7 @@ lemma sts_weak_sch_act_wf[wp]: \ (ksSchedulerAction s = SwitchToThread t \ runnable' st)\ setThreadState st t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: setThreadState_def) apply (wp rescheduleRequired_weak_sch_act_wf) apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp, simp) @@ -1170,206 +1148,49 @@ lemma setNotification_weak_sch_act_wf[wp]: lemmas ipccancel_weak_sch_act_wfs = weak_sch_act_wf_lift[OF _ setCTE_pred_tcb_at'] -lemma tcbSchedDequeue_corres': - "corres dc (is_etcb_at t and tcb_at t and pspace_aligned and pspace_distinct) - (valid_inQ_queues) - (tcb_sched_action (tcb_sched_dequeue) t) (tcbSchedDequeue t)" - apply (rule corres_cross_over_guard[where P'=P' and Q="tcb_at' t and P'" for P']) - apply (fastforce simp: tcb_at_cross dest: state_relation_pspace_relation) - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (wp, simp) - apply (case_tac queued) - defer - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def get_etcb_def set_tcb_queue_def is_etcb_at_def state_relation_def gets_the_def gets_def get_def return_def bind_def assert_opt_def get_tcb_queue_def modify_def put_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - apply (force simp: tcb_sched_dequeue_def valid_inQ_queues_def - ready_queues_relation_def obj_at'_def inQ_def project_inject) - apply (simp add: ready_queues_relation_def) - apply (simp add: unless_def when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (simp split del: if_split) - apply (rule corres_split_eqr) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split_eqr[OF getQueue_corres]) - apply (simp split del: if_split) - apply (subst bind_return_unit, rule corres_split[where r'=dc]) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (simp add: dc_def[symmetric]) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply (wp | simp)+ - done - -lemma setQueue_valid_inQ_queues: - "\valid_inQ_queues - and (\s. \t \ set ts. obj_at' (inQ d p) t s) - and K (distinct ts)\ - setQueue d p ts - \\_. valid_inQ_queues\" - apply (simp add: setQueue_def valid_inQ_queues_def) - apply wp - apply clarsimp - done - -lemma threadSet_valid_inQ_queues: - "\valid_inQ_queues and (\s. \d p. (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) - \ obj_at' (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) t s - \ t \ set (ksReadyQueues s (d, p)))\ - threadSet f t - \\rv. valid_inQ_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_inQ_queues_def pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_inQ_queues_def pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def) - apply (fastforce) - done - -(* reorder the threadSet before the setQueue, useful for lemmas that don't refer to bitmap *) -lemma setQueue_after_addToBitmap: - "(setQueue d p q >>= (\rv. (when P (addToBitmap d p)) >>= (\rv. threadSet f t))) = - (when P (addToBitmap d p) >>= (\rv. (threadSet f t) >>= (\rv. setQueue d p q)))" - apply (case_tac P, simp_all) - prefer 2 - apply (simp add: setQueue_after) - apply (simp add: setQueue_def when_def) - apply (subst oblivious_modify_swap) - apply (simp add: threadSet_def getObject_def setObject_def - loadObject_default_def bitmap_fun_defs - split_def projectKO_def2 alignCheck_assert - magnitudeCheck_assert updateObject_default_def) - apply (intro oblivious_bind, simp_all) - apply (clarsimp simp: bind_assoc) - done - -lemma tcbSchedEnqueue_valid_inQ_queues[wp]: - "\valid_inQ_queues\ tcbSchedEnqueue t \\_. valid_inQ_queues\" - apply (simp add: tcbSchedEnqueue_def setQueue_after_addToBitmap) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued, simp_all add: unless_def)[1] - apply (wp setQueue_valid_inQ_queues threadSet_valid_inQ_queues threadGet_wp - hoare_vcg_const_Ball_lift - | simp add: inQ_def bitmap_fun_defs - | fastforce simp: valid_inQ_queues_def inQ_def obj_at'_def)+ - done - - (* prevents wp from splitting on the when; stronger technique than hoare_when_weak_wp - FIXME: possible to replace with hoare_when_weak_wp? - *) -definition - "removeFromBitmap_conceal d p q t \ when (null [x\q . x \ t]) (removeFromBitmap d p)" - -lemma removeFromBitmap_conceal_valid_inQ_queues[wp]: - "\ valid_inQ_queues \ removeFromBitmap_conceal d p q t \ \_. valid_inQ_queues \" - unfolding valid_inQ_queues_def removeFromBitmap_conceal_def - by (wp|clarsimp simp: bitmap_fun_defs)+ - -lemma rescheduleRequired_valid_inQ_queues[wp]: - "\valid_inQ_queues\ rescheduleRequired \\_. valid_inQ_queues\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - done - -lemma sts_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setThreadState st t \\rv. valid_inQ_queues\" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - lemma updateObject_ep_inv: "\P\ updateObject (obj::endpoint) ko p q n \\rv. P\" by simp (rule updateObject_default_inv) -lemma sbn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setBoundNotification ntfn t \\rv. valid_inQ_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma setEndpoint_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setEndpoint ptr ep \\rv. valid_inQ_queues\" - apply (unfold setEndpoint_def) - apply (rule setObject_ep_pre) - apply (simp add: valid_inQ_queues_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift setObject_queues_unchanged[OF updateObject_ep_inv]) - apply simp - done - -lemma set_ntfn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setNotification ptr ntfn \\rv. valid_inQ_queues\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp add: valid_inQ_queues_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv | simp)+ - done - -crunch valid_inQ_queues[wp]: cancelSignal valid_inQ_queues - (simp: updateObject_tcb_inv crunch_simps wp: crunch_wps) - -lemma (in delete_one_conc_pre) cancelIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ cancelIPC t \\_. valid_inQ_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) - apply (wp hoare_drop_imps delete_one_inQ_queues threadSet_valid_inQ_queues | wpc | simp add:if_apply_def2 Fun.comp_def)+ - apply (clarsimp simp: valid_inQ_queues_def inQ_def)+ - done - -lemma valid_queues_inQ_queues: - "Invariants_H.valid_queues s \ valid_inQ_queues s" - by (force simp: Invariants_H.valid_queues_def valid_inQ_queues_def obj_at'_def - valid_queues_no_bitmap_def) - lemma asUser_tcbQueued_inv[wp]: "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ done -lemma asUser_valid_inQ_queues[wp]: - "\valid_inQ_queues\ asUser t f \\rv. valid_inQ_queues\" - unfolding valid_inQ_queues_def Ball_def - apply (wpsimp wp: hoare_vcg_all_lift) - defer - apply (wp asUser_ksQ) - apply assumption - apply (simp add: inQ_def[abs_def] obj_at'_conj) - apply (rule hoare_convert_imp) - apply (wp asUser_ksQ) - apply wp - done - -context begin -interpretation Arch . +context begin interpretation Arch . crunches cancel_ipc for pspace_aligned[wp]: "pspace_aligned :: det_state \ _" and pspace_distinct[wp]: "pspace_distinct :: det_state \ _" - (simp: crunch_simps wp: crunch_wps select_wp) + (simp: crunch_simps wp: crunch_wps) end +crunches asUser + for valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps) + +crunches set_thread_state + for in_correct_ready_q[wp]: in_correct_ready_q + (wp: crunch_wps) + +crunches set_thread_state_ext + for ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps) + +lemma set_thread_state_ready_qs_distinct[wp]: + "set_thread_state ref ts \ready_qs_distinct\" + unfolding set_thread_state_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) + +lemma as_user_ready_qs_distinct[wp]: + "as_user tptr f \ready_qs_distinct\" + unfolding as_user_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) + lemma (in delete_one) suspend_corres: "corres dc (einvs and tcb_at t) invs' (IpcCancel_A.suspend t) (ThreadDecls_H.suspend t)" @@ -1393,15 +1214,18 @@ lemma (in delete_one) suspend_corres: apply (rule corres_return_trivial) apply (rule corres_split_nor[OF setThreadState_corres]) apply wpsimp - apply (rule tcbSchedDequeue_corres') + apply (rule tcbSchedDequeue_corres, simp) apply wp - apply wpsimp - apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ - apply (rule hoare_post_imp[where Q = "\rv s. tcb_at t s \ is_etcb_at t s \ pspace_aligned s \ pspace_distinct s"]) - apply simp - apply (wp | simp)+ - apply (fastforce simp: valid_sched_def tcb_at_is_etcb_at) - apply (clarsimp simp add: invs'_def valid_state'_def valid_queues_inQ_queues) + apply (wpsimp wp: sts_valid_objs') + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def valid_tcb_state'_def)+ + apply (rule hoare_post_imp[where Q = "\rv s. einvs s \ tcb_at t s"]) + apply (simp add: invs_implies invs_strgs valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct valid_sched_def) + apply wp + apply (rule hoare_post_imp[where Q = "\_ s. invs' s \ tcb_at' t s"]) + apply (fastforce simp: invs'_def valid_tcb_state'_def) + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ + apply fastforce+ done lemma (in delete_one) prepareThreadDelete_corres: @@ -1424,259 +1248,8 @@ lemma (in delete_one_conc_pre) cancelIPC_it[wp]: apply (wp hoare_drop_imps delete_one_it | wpc | simp add:if_apply_def2 Fun.comp_def)+ done -lemma tcbSchedDequeue_notksQ: - "\\s. t' \ set(ksReadyQueues s p)\ - tcbSchedDequeue t - \\_ s. t' \ set(ksReadyQueues s p)\" - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply wp+ - apply clarsimp - apply (rule_tac Q="\_ s. t' \ set(ksReadyQueues s p)" in hoare_post_imp) - apply (wp | clarsimp)+ - done - -lemma rescheduleRequired_oa_queued: - "\ (\s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)) and sch_act_simple\ - rescheduleRequired - \\_ s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)\" - (is "\?OAQ t' p and sch_act_simple\ _ \_\") - apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ ?OAQ t' p s" in hoare_seq_ext) - including no_pre - apply (wp | clarsimp)+ - apply (case_tac x) - apply (wp | clarsimp)+ - done - -lemma setThreadState_oa_queued: - "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ - setThreadState st t - \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" - (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") - proof (rule P_bool_lift [where P=P']) - show pos: - "\R. \ ?Q R \ setThreadState st t \\_. ?Q R \" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_oa_queued) - apply (simp add: sch_act_simple_def) - apply (rule_tac Q="\_. ?Q R" in hoare_post_imp, clarsimp) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp) - done - show "\\s. \ ?Q P s\ setThreadState st t \\_ s. \ ?Q P s\" - by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) - qed - -lemma setBoundNotification_oa_queued: - "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ - setBoundNotification ntfn t - \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" - (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") - proof (rule P_bool_lift [where P=P']) - show pos: - "\R. \ ?Q R \ setBoundNotification ntfn t \\_. ?Q R \" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_obj_at'_strongish) - apply (clarsimp) - done - show "\\s. \ ?Q P s\ setBoundNotification ntfn t \\_ s. \ ?Q P s\" - by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) - qed - -lemma tcbSchedDequeue_ksQ_distinct[wp]: - "\\s. distinct (ksReadyQueues s p)\ - tcbSchedDequeue t - \\_ s. distinct (ksReadyQueues s p)\" - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply wp+ - apply (rule_tac Q="\_ s. distinct (ksReadyQueues s p)" in hoare_post_imp) - apply (clarsimp | wp)+ - done - -lemma sts_valid_queues_partial: - "\Invariants_H.valid_queues and sch_act_simple\ - setThreadState st t - \\_ s. \t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p))\" - (is "\_\ _ \\_ s. \t' d p. ?OA t' d p s \ ?DISTINCT d p s \") - apply (rule_tac Q="\_ s. (\t' d p. ?OA t' d p s) \ (\d p. ?DISTINCT d p s)" - in hoare_post_imp) - apply (clarsimp) - apply (rule hoare_conjI) - apply (rule_tac Q="\s. \t' d p. - ((t'\set(ksReadyQueues s (d, p)) - \ \ (sch_act_simple s)) - \ (obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ st_tcb_at' runnable' t' s))" in hoare_pre_imp) - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def - pred_tcb_at'_def obj_at'_def inQ_def) - apply (rule hoare_vcg_all_lift)+ - apply (rule hoare_convert_imp) - including no_pre - apply (wp sts_ksQ setThreadState_oa_queued hoare_impI sts_pred_tcb_neq' - | clarsimp)+ - apply (rule_tac Q="\s. \d p. ?DISTINCT d p s \ sch_act_simple s" in hoare_pre_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - -lemma tcbSchedDequeue_t_notksQ: - "\\s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\ - tcbSchedDequeue t - \\_ s. t \ set (ksReadyQueues s (d, p))\" - apply (rule_tac Q="(\s. t \ set (ksReadyQueues s (d, p))) - or obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t" - in hoare_pre_imp, clarsimp) - apply (rule hoare_pre_disj) - apply (wp tcbSchedDequeue_notksQ)[1] - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_real_def ko_wp_at'_def) - done - -lemma sts_invs_minor'_no_valid_queues: - "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st - \ (st \ Inactive \ \ idle' st \ - st' \ Inactive \ \ idle' st')) t - and (\s. t = ksIdleThread s \ idle' st) - and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) - and sch_act_simple - and invs'\ - setThreadState st t - \\_ s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\" - apply (simp add: invs'_def valid_state'_def valid_queues_def) - apply (wp sts_valid_queues_partial sts_ksQ - setThreadState_oa_queued sts_st_tcb_at'_cases - irqs_masked_lift - valid_irq_node_lift - setThreadState_ct_not_inQ - sts_valid_bitmapQ_sch_act_simple - sts_valid_bitmapQ_no_L2_orphans_sch_act_simple - sts_valid_bitmapQ_no_L1_orphans_sch_act_simple - hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_all_lift)+ - apply (clarsimp simp: disj_imp) - apply (intro conjI) - apply (clarsimp simp: valid_queues_def) - apply (rule conjI, clarsimp) - apply (drule valid_queues_no_bitmap_objD, assumption) - apply (clarsimp simp: inQ_def comp_def) - apply (rule conjI) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (simp add: valid_queues_no_bitmap_def) - apply clarsimp - apply (clarsimp simp: st_tcb_at'_def) - apply (drule obj_at_valid_objs') - apply (clarsimp simp: valid_pspace'_def) - apply (clarsimp simp: valid_obj'_def valid_tcb'_def) - subgoal - by (fastforce simp: valid_tcb_state'_def - split: Structures_H.thread_state.splits) - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (fastforce simp: valid_queues_def inQ_def pred_tcb_at' pred_tcb_at'_def - elim!: st_tcb_ex_cap'' obj_at'_weakenE)+ - done - crunch ct_idle_or_in_cur_domain'[wp]: tcbSchedDequeue ct_idle_or_in_cur_domain' - -lemma tcbSchedDequeue_invs'_no_valid_queues: - "\\s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\ - tcbSchedDequeue t - \\_. invs' \" - apply (simp add: invs'_def valid_state'_def) - apply (wp tcbSchedDequeue_valid_queues_weak valid_irq_handlers_lift - valid_irq_node_lift valid_irq_handlers_lift' - tcbSchedDequeue_irq_states irqs_masked_lift cur_tcb_lift - untyped_ranges_zero_lift - | clarsimp simp add: cteCaps_of_def valid_queues_def o_def)+ - apply (rule conjI) - apply (fastforce simp: obj_at'_def inQ_def st_tcb_at'_def valid_queues_no_bitmap_except_def) - apply (rule conjI, clarsimp simp: correct_queue_def) - apply (fastforce simp: valid_pspace'_def intro: obj_at'_conjI - elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemmas sts_tcbSchedDequeue_invs' = - sts_invs_minor'_no_valid_queues - tcbSchedDequeue_invs'_no_valid_queues + (wp: crunch_wps) lemma asUser_sch_act_simple[wp]: "\sch_act_simple\ asUser s t \\_. sch_act_simple\" @@ -1688,11 +1261,14 @@ lemma (in delete_one_conc) suspend_invs'[wp]: "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ ThreadDecls_H.suspend t \\rv. invs'\" apply (simp add: suspend_def) - apply (wp sts_tcbSchedDequeue_invs') - apply (simp add: updateRestartPC_def | strengthen no_refs_simple_strg')+ - prefer 2 - apply (wpsimp wp: hoare_drop_imps hoare_vcg_imp_lift' - | strengthen no_refs_simple_strg')+ + apply (wpsimp wp: sts_invs_minor' gts_wp' simp: updateRestartPC_def + | strengthen no_refs_simple_strg')+ + apply (rule_tac Q="\_. invs' and sch_act_simple and st_tcb_at' simple' t + and (\s. t \ ksIdleThread s)" + in hoare_post_imp) + apply clarsimp + apply wpsimp + apply (fastforce elim: pred_tcb'_weakenE) done lemma (in delete_one_conc_pre) suspend_tcb'[wp]: @@ -1736,109 +1312,6 @@ lemma (in delete_one_conc_pre) suspend_st_tcb_at': lemmas (in delete_one_conc_pre) suspend_makes_simple' = suspend_st_tcb_at' [where P=simple', simplified] -lemma valid_queues_not_runnable'_not_ksQ: - assumes "Invariants_H.valid_queues s" and "st_tcb_at' (Not \ runnable') t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - using assms - apply - - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def pred_tcb_at'_def) - apply (erule_tac x=d in allE) - apply (erule_tac x=p in allE) - apply (clarsimp) - apply (drule(1) bspec) - apply (clarsimp simp: obj_at'_def) - done - -declare valid_queues_not_runnable'_not_ksQ[OF ByAssum, simp] - -lemma cancelSignal_queues[wp]: - "\Invariants_H.valid_queues and st_tcb_at' (Not \ runnable') t\ - cancelSignal t ae \\_. Invariants_H.valid_queues \" - apply (simp add: cancelSignal_def) - apply (wp sts_valid_queues) - apply (rule_tac Q="\_ s. \p. t \ set (ksReadyQueues s p)" in hoare_post_imp, simp) - apply (wp hoare_vcg_all_lift) - apply (wpc) - apply (wp)+ - apply (rule_tac Q="\_ s. Invariants_H.valid_queues s \ (\p. t \ set (ksReadyQueues s p))" in hoare_post_imp) - apply (clarsimp) - apply (wp) - apply (clarsimp) - done - -lemma (in delete_one_conc_pre) cancelIPC_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelIPC t \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def - cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_pre) - apply (wpc - | wp hoare_vcg_conj_lift delete_one_queues threadSet_valid_queues - threadSet_valid_objs' sts_valid_queues setEndpoint_ksQ - hoare_vcg_all_lift threadSet_sch_act threadSet_weak_sch_act_wf - | simp add: o_def if_apply_def2 inQ_def - | rule hoare_drop_imps - | clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def - elim!: pred_tcb'_weakenE)+ - apply (fastforce dest: valid_queues_not_runnable'_not_ksQ elim: pred_tcb'_weakenE) - done - -(* FIXME: move to Schedule_R *) -lemma tcbSchedDequeue_nonq[wp]: - "\Invariants_H.valid_queues and tcb_at' t and K (t = t')\ - tcbSchedDequeue t \\_ s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadGet_wp|simp)+ - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def obj_at'_def projectKOs inQ_def) - done - -lemma sts_ksQ_oaQ: - "\Invariants_H.valid_queues\ - setThreadState st t - \\_ s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\" - (is "\_\ _ \\_. ?POST\") - proof - - have RR: "\sch_act_simple and ?POST\ rescheduleRequired \\_. ?POST\" - apply (simp add: rescheduleRequired_def) - apply (wp) - apply (clarsimp) - apply (rule_tac - Q="(\s. action = ResumeCurrentThread \ action = ChooseNewThread) and ?POST" - in hoare_pre_imp, assumption) - apply (case_tac action) - apply (clarsimp)+ - apply (wp) - apply (clarsimp simp: sch_act_simple_def) - done - show ?thesis - apply (simp add: setThreadState_def) - apply (wp RR) - apply (rule_tac Q="\_. ?POST" in hoare_post_imp) - apply (clarsimp simp add: sch_act_simple_def) - apply (wp hoare_convert_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (fastforce dest: bspec elim!: obj_at'_weakenE simp: inQ_def) - done - qed - -lemma (in delete_one_conc_pre) suspend_nonq: - "\Invariants_H.valid_queues and valid_objs' and tcb_at' t - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and (\s. t \ ksIdleThread s) and K (t = t')\ - suspend t - \\rv s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: suspend_def) - unfolding updateRestartPC_def - apply (wp hoare_allI tcbSchedDequeue_t_notksQ sts_ksQ_oaQ) - apply wpsimp+ - done - lemma suspend_makes_inactive: "\K (t = t')\ suspend t \\rv. st_tcb_at' ((=) Inactive) t'\" apply (cases "t = t'", simp_all) @@ -1849,29 +1322,21 @@ lemma suspend_makes_inactive: declare threadSet_sch_act_sane [wp] declare sts_sch_act_sane [wp] -lemma tcbSchedEnqueue_ksQset_weak: - "\\s. t' \ set (ksReadyQueues s p)\ - tcbSchedEnqueue t - \\_ s. t' \ set (ksReadyQueues s p)\" (is "\?PRE\ _ \_\") - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_if_lift) - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, ((wp | clarsimp)+))+ - done - lemma tcbSchedEnqueue_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ tcbSchedEnqueue t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) lemma sts_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ setThreadState st t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) text \Cancelling all IPC in an endpoint or notification object\ lemma ep_cancel_corres_helper: - "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs and pspace_aligned and pspace_distinct) - (Invariants_H.valid_queues and valid_queues' and valid_objs') + "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs and valid_queues + and pspace_aligned and pspace_distinct) + (valid_objs' and sym_heap_sched_pointers and valid_sched_pointers) (mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; tcb_sched_action tcb_sched_enqueue t @@ -1880,28 +1345,34 @@ lemma ep_cancel_corres_helper: y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) list)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) apply (rule_tac S="{t. (fst t = snd t) \ fst t \ set list}" in corres_mapM_x) apply clarsimp apply (rule corres_guard_imp) apply (subst bind_return_unit, rule corres_split[OF _ tcbSchedEnqueue_corres]) + apply simp + apply (rule corres_guard_imp [OF setThreadState_corres]) + apply simp + apply (simp add: valid_tcb_state_def) + apply simp apply simp - apply (rule corres_guard_imp [OF setThreadState_corres]) - apply simp - apply (simp add: valid_tcb_state_def) - apply simp - apply (wp sts_valid_queues)+ - apply (force simp: tcb_at_is_etcb_at) - apply (fastforce elim: obj_at'_weakenE) - apply ((wp hoare_vcg_const_Ball_lift | simp)+)[1] - apply (rule hoare_pre) - apply (wp hoare_vcg_const_Ball_lift - weak_sch_act_wf_lift_linear sts_st_tcb' setThreadState_not_st - sts_valid_queues tcbSchedEnqueue_not_st - | simp)+ - apply (auto elim: obj_at'_weakenE simp: valid_tcb_state'_def) + apply (wpsimp wp: sts_st_tcb_at') + apply (wpsimp wp: sts_valid_objs' | strengthen valid_objs'_valid_tcbs')+ + apply fastforce + apply (wpsimp wp: hoare_vcg_const_Ball_lift set_thread_state_runnable_valid_queues + sts_st_tcb_at' sts_valid_objs' + simp: valid_tcb_state'_def)+ done +crunches set_simple_ko + for ready_qs_distinct[wp]: ready_qs_distinct + and in_correct_ready_q[wp]: in_correct_ready_q + (rule: ready_qs_distinct_lift wp: crunch_wps) + lemma ep_cancel_corres: "corres dc (invs and valid_sched and ep_at ep) (invs' and ep_at' ep) (cancel_all_ipc ep) (cancelAllIPC ep)" @@ -1909,10 +1380,10 @@ proof - have P: "\list. corres dc (\s. (\t \ set list. tcb_at t s) \ valid_pspace s \ ep_at ep s - \ valid_etcbs s \ weak_valid_sched_action s) + \ valid_etcbs s \ weak_valid_sched_action s \ valid_queues s) (\s. (\t \ set list. tcb_at' t s) \ valid_pspace' s \ ep_at' ep s \ weak_sch_act_wf (ksSchedulerAction s) s - \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s) + \ valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s) (do x \ set_endpoint ep Structures_A.IdleEP; x \ mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; @@ -1934,22 +1405,23 @@ proof - apply (rule ep_cancel_corres_helper) apply (rule mapM_x_wp') apply (wp weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s" + apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply ((wp hoare_vcg_const_Ball_lift mapM_x_wp' - sts_valid_queues setThreadState_not_st sts_st_tcb' tcbSchedEnqueue_not_st - | clarsimp - | fastforce elim: obj_at'_weakenE simp: valid_tcb_state'_def)+)[2] - apply (rule hoare_name_pre_state) + apply ((wpsimp wp: hoare_vcg_const_Ball_lift mapM_x_wp' sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[3] + apply fastforce apply (wp hoare_vcg_const_Ball_lift set_ep_valid_objs' - | (clarsimp simp: valid_ep'_def) - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def elim!: valid_objs_valid_tcbE))+ + | (clarsimp simp: valid_ep'_def) + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def + | strengthen valid_objs'_valid_tcbs'))+ done show ?thesis apply (simp add: cancel_all_ipc_def cancelAllIPC_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ep_sp']) apply (rule corres_guard_imp [OF getEndpoint_corres], simp+) apply (case_tac epa, simp_all add: ep_relation_def @@ -1977,6 +1449,8 @@ lemma cancelAllSignals_corres: "corres dc (invs and valid_sched and ntfn_at ntfn) (invs' and ntfn_at' ntfn) (cancel_all_signals ntfn) (cancelAllSignals ntfn)" apply (simp add: cancel_all_signals_def cancelAllSignals_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ntfn_sp']) apply (rule corres_guard_imp [OF getNotification_corres]) apply simp+ @@ -1987,17 +1461,19 @@ lemma cancelAllSignals_corres: apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule ep_cancel_corres_helper) apply (wp mapM_x_wp'[where 'b="det_ext state"] - weak_sch_act_wf_lift_linear setThreadState_not_st + weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ apply (rename_tac list) - apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s" + apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_objs' s + \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') apply (rule hoare_name_pre_state) - apply (wpsimp wp: hoare_vcg_const_Ball_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st - simp: valid_tcb_state'_def) + apply (wpsimp wp: hoare_vcg_const_Ball_lift sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+ apply (wp hoare_vcg_const_Ball_lift set_ntfn_aligned' set_ntfn_valid_objs' weak_sch_act_wf_lift_linear | simp)+ @@ -2044,6 +1520,11 @@ proof - done qed +lemma tcbSchedEnqueue_valid_pspace'[wp]: + "tcbSchedEnqueue tcbPtr \valid_pspace'\" + unfolding valid_pspace'_def + by wpsimp + lemma cancel_all_invs'_helper: "\all_invs_but_sym_refs_ct_not_inQ' and (\s. \x \ set q. tcb_at' x s) and (\s. sym_refs (\x. if x \ set q then {r \ state_refs_of' s x. snd r = TCBBound} @@ -2058,8 +1539,7 @@ lemma cancel_all_invs'_helper: apply clarsimp apply (rule hoare_pre) apply (wp valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift - hoare_vcg_const_Ball_lift untyped_ranges_zero_lift - sts_valid_queues sts_st_tcb' setThreadState_not_st + hoare_vcg_const_Ball_lift untyped_ranges_zero_lift sts_st_tcb' sts_valid_objs' | simp add: cteCaps_of_def o_def)+ apply (unfold fun_upd_apply Invariants_H.tcb_st_refs_of'_simps) apply clarsimp @@ -2068,7 +1548,7 @@ lemma cancel_all_invs'_helper: elim!: rsubst[where P=sym_refs] dest!: set_mono_suffix intro!: ext - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE))+ + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def))+ done lemma ep_q_refs_max: @@ -2085,10 +1565,9 @@ lemma ep_q_refs_max: done lemma rescheduleRequired_invs'[wp]: - "\invs'\ rescheduleRequired \\rv. invs'\" + "rescheduleRequired \invs'\" apply (simp add: rescheduleRequired_def) apply (wpsimp wp: ssa_invs') - apply (clarsimp simp: invs'_def valid_state'_def) done lemma invs_rct_ct_activatable': @@ -2215,6 +1694,7 @@ lemma rescheduleRequired_all_invs_but_ct_not_inQ: lemma cancelAllIPC_invs'[wp]: "\invs'\ cancelAllIPC ep_ptr \\rv. invs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (rule bind_wp[OF _ stateAssert_sp]) apply (wp rescheduleRequired_all_invs_but_ct_not_inQ cancel_all_invs'_helper hoare_vcg_const_Ball_lift valid_global_refs_lift' valid_arch_state_lift' @@ -2224,14 +1704,15 @@ lemma cancelAllIPC_invs'[wp]: prefer 2 apply assumption apply (rule hoare_strengthen_post [OF get_ep_sp']) + apply (rename_tac rv s) apply (clarsimp simp: invs'_def valid_state'_def valid_ep'_def) apply (frule obj_at_valid_objs', fastforce) apply (clarsimp simp: valid_obj'_def) apply (rule conjI) - apply (case_tac r, simp_all add: valid_ep'_def)[1] + apply (case_tac rv, simp_all add: valid_ep'_def)[1] apply (rule conjI[rotated]) apply (drule(1) sym_refs_ko_atD') - apply (case_tac r, simp_all add: st_tcb_at_refs_of_rev')[1] + apply (case_tac rv, simp_all add: st_tcb_at_refs_of_rev')[1] apply (clarsimp elim!: if_live_state_refsE | drule(1) bspec | drule st_tcb_at_state_refs_ofD')+ apply (drule(2) ep_q_refs_max) @@ -2242,7 +1723,8 @@ lemma cancelAllIPC_invs'[wp]: lemma cancelAllSignals_invs'[wp]: "\invs'\ cancelAllSignals ntfn \\rv. invs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2273,12 +1755,14 @@ lemma cancelAllSignals_invs'[wp]: done lemma cancelAllIPC_valid_objs'[wp]: - "\valid_objs'\ cancelAllIPC ep \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllIPC ep \\rv. valid_objs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp set_ep_valid_objs' setSchedulerAction_valid_objs') - apply (rule_tac Q="\rv s. valid_objs' s \ (\x\set (epQueue ep). tcb_at' x s)" + apply (rule_tac Q="\_ s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ (\x\set (epQueue ep). tcb_at' x s)" in hoare_post_imp) apply simp apply (simp add: Ball_def) @@ -2295,9 +1779,10 @@ lemma cancelAllIPC_valid_objs'[wp]: done lemma cancelAllSignals_valid_objs'[wp]: - "\valid_objs'\ cancelAllSignals ntfn \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllSignals ntfn \\rv. valid_objs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2349,19 +1834,17 @@ lemma setThreadState_not_tcb[wp]: "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ setThreadState st t \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: setThreadState_def setQueue_def - rescheduleRequired_def tcbSchedEnqueue_def - unless_def bitmap_fun_defs - cong: scheduler_action.case_cong cong del: if_cong - | wp | wpcw)+ - done + by (wpsimp wp: isRunnable_inv threadGet_wp hoare_drop_imps + simp: setThreadState_def setQueue_def + rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + unless_def bitmap_fun_defs)+ lemma tcbSchedEnqueue_unlive: "\ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p and tcb_at' t\ tcbSchedEnqueue t \\_. ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp | simp add: setQueue_def bitmap_fun_defs)+ done @@ -2395,19 +1878,41 @@ lemma setObject_ko_wp_at': objBits_def[symmetric] ps_clear_upd in_magnitude_check v) -lemma rescheduleRequired_unlive: - "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ - rescheduleRequired +lemma threadSet_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + threadSet f t \\rv. ko_wp_at' (Not \ live') p\" - apply (simp add: rescheduleRequired_def) - apply (wp | simp | wpc)+ - apply (simp add: tcbSchedEnqueue_def unless_def - threadSet_def setQueue_def threadGet_def) - apply (wp setObject_ko_wp_at getObject_tcb_wp - | simp add: objBits_simps' bitmap_fun_defs split del: if_split)+ - apply (clarsimp simp: o_def) - apply (drule obj_at_ko_at') - apply clarsimp + by (clarsimp simp: threadSet_def valid_def getObject_def + setObject_def in_monad loadObject_default_def + ko_wp_at'_def split_def in_magnitude_check + objBits_simps' updateObject_default_def + ps_clear_upd RISCV64_H.fromPPtr_def) + +lemma tcbSchedEnqueue_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + tcbSchedEnqueue t + \\_. ko_wp_at' (Not \ live') p\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def) + apply (wpsimp wp: threadGet_wp threadSet_unlive_other simp: bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (frule (1) tcbQueueHead_ksReadyQueues) + apply (drule_tac x=p in spec) + apply (fastforce dest!: inQ_implies_tcbQueueds_of + simp: tcbQueueEmpty_def ko_wp_at'_def opt_pred_def opt_map_def + split: option.splits) + done + +lemma rescheduleRequired_unlive[wp]: + "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ + rescheduleRequired + \\_. ko_wp_at' (Not \ live') p\" + supply comp_apply[simp del] + unfolding rescheduleRequired_def + apply (wpsimp wp: tcbSchedEnqueue_unlive_other) done lemmas setEndpoint_ko_wp_at' @@ -2417,7 +1922,8 @@ lemma cancelAllIPC_unlive: "\valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s)\ cancelAllIPC ep \\rv. ko_wp_at' (Not \ live') ep\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp cancelAll_unlive_helper setEndpoint_ko_wp_at' hoare_vcg_const_Ball_lift rescheduleRequired_unlive @@ -2435,7 +1941,8 @@ lemma cancelAllSignals_unlive: \ obj_at' (\ko. ntfnBoundTCB ko = None) ntfnptr s\ cancelAllSignals ntfnptr \\rv. ko_wp_at' (Not \ live') ntfnptr\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfn", simp_all add: setNotification_def) apply wp apply (fastforce simp: obj_at'_real_def @@ -2494,30 +2001,25 @@ lemma cancelBadgedSends_filterM_helper': apply wp apply clarsimp apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append) - apply (drule spec, erule hoare_seq_ext[rotated]) - apply (rule hoare_seq_ext [OF _ gts_inv']) + apply (drule spec, erule bind_wp_fwd) + apply (rule bind_wp [OF _ gts_inv']) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_vcg_const_Ball_lift sts_sch_act' sch_act_wf_lift valid_irq_handlers_lift'' cur_tcb_lift irqs_masked_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st - tcbSchedEnqueue_not_st - untyped_ranges_zero_lift + sts_st_tcb' untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (frule insert_eqD, frule state_refs_of'_elemD) apply (clarsimp simp: valid_tcb_state'_def st_tcb_at_refs_of_rev') apply (frule pred_tcb_at') apply (rule conjI[rotated], blast) - apply clarsimp + apply (clarsimp simp: valid_pspace'_def cong: conj_cong) apply (intro conjI) - apply (clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE dest!: st_tcb_ex_cap'') - apply (fastforce dest!: st_tcb_ex_cap'') + apply (fastforce simp: valid_tcb'_def dest!: st_tcb_ex_cap'') apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (erule delta_sym_refs) - apply (fastforce elim!: obj_atE' - simp: state_refs_of'_def tcb_bound_refs'_def - subsetD symreftype_inverse' - split: if_split_asm)+ - done + by (fastforce elim!: obj_atE' + simp: state_refs_of'_def tcb_bound_refs'_def subsetD symreftype_inverse' + split: if_split_asm)+ lemmas cancelBadgedSends_filterM_helper = spec [where x=Nil, OF cancelBadgedSends_filterM_helper', simplified] @@ -2527,12 +2029,13 @@ lemma cancelBadgedSends_invs[wp]: shows "\invs'\ cancelBadgedSends epptr badge \\rv. invs'\" apply (simp add: cancelBadgedSends_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp'], rename_tac ep) apply (case_tac ep, simp_all) apply ((wp | simp)+)[2] apply (subst bind_assoc [where g="\_. rescheduleRequired", symmetric])+ - apply (rule hoare_seq_ext + apply (rule bind_wp [OF rescheduleRequired_all_invs_but_ct_not_inQ]) apply (simp add: list_case_return cong: list.case_cong) apply (rule hoare_pre, wp valid_irq_node_lift irqs_masked_lift) @@ -2559,11 +2062,20 @@ lemma cancelBadgedSends_invs[wp]: crunch state_refs_of[wp]: tcb_sched_action "\s. P (state_refs_of s)" +lemma setEndpoint_valid_tcbs'[wp]: + "setEndpoint ePtr val \valid_tcbs'\" + unfolding setEndpoint_def + apply (wpsimp wp: setObject_valid_tcbs'[where P=\]) + apply (clarsimp simp: updateObject_default_def monad_simps) + apply fastforce + done lemma cancelBadgedSends_corres: "corres dc (invs and valid_sched and ep_at epptr) (invs' and ep_at' epptr) (cancel_badged_sends epptr bdg) (cancelBadgedSends epptr bdg)" apply (simp add: cancel_badged_sends_def cancelBadgedSends_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_guard_imp) apply (rule corres_split[OF getEndpoint_corres _ get_simple_ko_sp get_ep_sp', where Q="invs and valid_sched" and Q'=invs']) @@ -2573,11 +2085,16 @@ lemma cancelBadgedSends_corres: apply (rule corres_guard_imp) apply (rule corres_split_nor[OF setEndpoint_corres]) apply (simp add: ep_relation_def) - apply (rule corres_split_eqr[OF _ _ _ hoare_post_add[where R="\_. valid_objs'"]]) + apply (rule corres_split_eqr[OF _ _ _ hoare_post_add + [where R="\_. valid_objs' and pspace_aligned' + and pspace_distinct'"]]) apply (rule_tac S="(=)" and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ - distinct xs \ valid_etcbs s \ pspace_aligned s \ pspace_distinct s" - and Q'="\xs s. Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s" + distinct xs \ valid_etcbs s \ + in_correct_ready_q s \ ready_qs_distinct s \ + pspace_aligned s \ pspace_distinct s" + and Q'="\_ s. valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" in corres_mapM_list_all2[where r'="(=)"], simp_all add: list_all2_refl)[1] apply (clarsimp simp: liftM_def[symmetric] o_def) @@ -2588,61 +2105,56 @@ lemma cancelBadgedSends_corres: apply (clarsimp simp: o_def dc_def[symmetric] liftM_def) apply (rule corres_split[OF setThreadState_corres]) apply simp - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (rule corres_trivial) apply simp apply wp+ apply simp - apply (wp sts_valid_queues gts_st_tcb_at)+ + apply (wp sts_st_tcb_at' gts_st_tcb_at sts_valid_objs' + | strengthen valid_objs'_valid_tcbs')+ apply (clarsimp simp: valid_tcb_state_def tcb_at_def st_tcb_def2 st_tcb_at_refs_of_rev dest!: state_refs_of_elemD elim!: tcb_at_is_etcb_at[rotated]) - apply (simp add: is_tcb_def) - apply simp + apply (simp add: valid_tcb_state'_def) apply (wp hoare_vcg_const_Ball_lift gts_wp | clarsimp)+ - apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_queues + apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_objs' | clarsimp simp: valid_tcb_state'_def)+ apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule setEndpoint_corres) apply (simp split: list.split add: ep_relation_def) apply (wp weak_sch_act_wf_lift_linear)+ - apply (wp gts_st_tcb_at hoare_vcg_imp_lift mapM_wp' - sts_st_tcb' sts_valid_queues - set_thread_state_runnable_weak_valid_sched_action - | clarsimp simp: valid_tcb_state'_def)+ - apply (wp hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear set_ep_valid_objs' - | simp)+ + apply (wpsimp wp: mapM_wp' set_thread_state_runnable_weak_valid_sched_action + simp: valid_tcb_state'_def) + apply ((wpsimp wp: hoare_vcg_imp_lift mapM_wp' sts_valid_objs' simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: set_ep_valid_objs')+ apply (clarsimp simp: conj_comms) apply (frule sym_refs_ko_atD, clarsimp+) apply (rule obj_at_valid_objsE, assumption+, clarsimp+) apply (clarsimp simp: valid_obj_def valid_ep_def valid_sched_def valid_sched_action_def) apply (rule conjI, fastforce) apply (rule conjI, fastforce) + apply (rule conjI, fastforce) apply (rule conjI, erule obj_at_weakenE, clarsimp simp: is_ep) + apply (rule conjI, fastforce) apply (clarsimp simp: st_tcb_at_refs_of_rev) apply (drule(1) bspec, drule st_tcb_at_state_refs_ofD, clarsimp) apply (simp add: set_eq_subset) apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) - apply (drule ko_at_valid_objs', clarsimp) - apply simp - apply (clarsimp simp: valid_obj'_def valid_ep'_def invs_weak_sch_act_wf - invs'_def valid_state'_def) + apply (fastforce simp: valid_ep'_def) done +crunches updateRestartPC + for tcb_at'[wp]: "tcb_at' t" + (simp: crunch_simps) + lemma suspend_unqueued: "\\\ suspend t \\rv. obj_at' (Not \ tcbQueued) t\" - apply (simp add: suspend_def unless_def tcbSchedDequeue_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift) - apply (simp add: threadGet_def| wp getObject_tcb_wp)+ - apply (rule hoare_strengthen_post, rule hoare_post_taut) - apply (fastforce simp: obj_at'_def) - apply (rule hoare_post_taut) - apply wp+ - done + unfolding suspend_def + by (wpsimp simp: comp_def wp: tcbSchedDequeue_not_tcbQueued) crunch unqueued: prepareThreadDelete "obj_at' (\a. \ tcbQueued a) t" crunch inactive: prepareThreadDelete "st_tcb_at' ((=) Inactive) t'" -crunch nonq: prepareThreadDelete " \s. \d p. t' \ set (ksReadyQueues s (d, p))" end end diff --git a/proof/refine/RISCV64/Ipc_R.thy b/proof/refine/RISCV64/Ipc_R.thy index 16bc96cafc..bb6b1c5f7f 100644 --- a/proof/refine/RISCV64/Ipc_R.thy +++ b/proof/refine/RISCV64/Ipc_R.thy @@ -278,11 +278,7 @@ lemmas unifyFailure_discard2 lemma deriveCap_not_null: "\\\ deriveCap slot cap \\rv. K (rv \ NullCap \ cap \ NullCap)\,-" apply (simp add: deriveCap_def split del: if_split) - apply (case_tac cap) - apply (simp_all add: Let_def isCap_simps) - apply wp - apply simp - done + by (case_tac cap; wpsimp simp: isCap_simps) lemma deriveCap_derived_foo: "\\s. \cap'. (cte_wp_at' (\cte. badge_derived' cap (cteCap cte) @@ -320,7 +316,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -364,7 +360,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -486,7 +482,7 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ cap.NullCap \ cte_wp_at (is_derived (cdt s) (a, b) cap') (a, b) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption @@ -498,13 +494,13 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ capability.NullCap \ cte_wp_at' (\c. is_derived' (ctes_of s) (cte_map (a, b)) cap' (cteCap c)) (cte_map (a, b)) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption apply (subst imp_conjR) apply (rule hoare_vcg_conj_liftE_R) - apply (rule hoare_post_imp_R[OF deriveCap_derived]) + apply (rule hoare_strengthen_postE_R[OF deriveCap_derived]) apply (clarsimp simp:cte_wp_at_ctes_of) apply (wp deriveCap_derived_foo) apply (clarsimp simp: cte_wp_at_caps_of_state remove_rights_def @@ -584,7 +580,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -608,7 +604,7 @@ lemma cteInsert_assume_Null: apply (rule hoare_name_pre_state) apply (erule impCE) apply (simp add: cteInsert_def) - apply (rule hoare_seq_ext[OF _ getCTE_sp])+ + apply (rule bind_wp[OF _ getCTE_sp])+ apply (rule hoare_name_pre_state) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule hoare_pre(1)) @@ -627,7 +623,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -660,11 +656,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -713,8 +709,7 @@ lemma transferCapsToSlots_mdb[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_mdb'\" - apply (wp transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) - apply clarsimp + apply (wpsimp wp: transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) apply (frule valid_capAligned) apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) apply wp @@ -765,14 +760,6 @@ lemma tcts_sch_act[wp]: \\rv s. sch_act_wf (ksSchedulerAction s) s\" by (wp sch_act_wf_lift tcb_in_cur_domain'_lift transferCapsToSlots_pres1) -lemma tcts_vq[wp]: - "\Invariants_H.valid_queues\ transferCapsToSlots ep buffer n caps slots mi \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift transferCapsToSlots_pres1) - -lemma tcts_vq'[wp]: - "\valid_queues'\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_queues'\" - by (wp valid_queues_lift' transferCapsToSlots_pres1) - crunch state_refs_of' [wp]: setExtraBadge "\s. P (state_refs_of' s)" lemma tcts_state_refs_of'[wp]: @@ -861,7 +848,7 @@ lemma transferCapsToSlots_irq_handlers[wp]: and transferCaps_srcs caps\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_irq_handlers'\" - apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) + apply (wpsimp wp: transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) apply (clarsimp simp: is_derived'_def cte_wp_at_ctes_of badge_derived'_def) apply (erule(2) valid_irq_handlers_ctes_ofD) apply wp @@ -958,8 +945,8 @@ lemma tcts_zero_ranges[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. untyped_ranges_zero'\" - apply (wp transferCapsToSlots_presM[where emx=True and vo=True - and drv=True and pad=True]) + apply (wpsimp wp: transferCapsToSlots_presM[where emx=True and vo=True + and drv=True and pad=True]) apply (clarsimp simp: cte_wp_at_ctes_of) apply (simp add: cteCaps_of_def) apply (rule hoare_pre, wp untyped_ranges_zero_lift) @@ -980,6 +967,11 @@ crunch ksDomScheduleIdx[wp]: setExtraBadge "\s. P (ksDomScheduleIdx s)" crunch ksDomSchedule[wp]: transferCapsToSlots "\s. P (ksDomSchedule s)" crunch ksDomScheduleIdx[wp]: transferCapsToSlots "\s. P (ksDomScheduleIdx s)" +crunches transferCapsToSlots + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift) lemma transferCapsToSlots_invs[wp]: "\\s. invs' s \ distinct slots @@ -1038,7 +1030,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1179,7 +1171,7 @@ lemmas copyMRs_typ_at_lifts[wp] = typ_at_lifts [OF copyMRs_typ_at'] lemma copy_mrs_invs'[wp]: "\ invs' and tcb_at' s and tcb_at' r \ copyMRs s sb r rb n \\rv. invs' \" - including no_pre + including classic_wp_pre apply (simp add: copyMRs_def) apply (wp dmo_invs' no_irq_mapM no_irq_storeWord| simp add: split_def) @@ -1227,18 +1219,12 @@ lemma set_mrs_valid_objs' [wp]: crunch valid_objs'[wp]: copyMRs valid_objs' (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "Invariants_H.valid_queues'" - (simp: crunch_simps wp: hoare_drop_imps) - - lemma setMRs_invs_bits[wp]: "\valid_pspace'\ setMRs t buf mrs \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. sch_act_wf (ksSchedulerAction s) s\" "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ setMRs t buf mrs \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ setMRs t buf mrs \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ setMRs t buf mrs \\rv s. P (state_refs_of' s)\" @@ -1255,8 +1241,6 @@ lemma copyMRs_invs_bits[wp]: "\valid_pspace'\ copyMRs s sb r rb n \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ copyMRs s sb r rb n \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ copyMRs s sb r rb n \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ copyMRs s sb r rb n \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ copyMRs s sb r rb n \\rv s. P (state_refs_of' s)\" @@ -1453,7 +1437,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -1736,10 +1720,6 @@ crunch vp[wp]: doIPCTransfer "valid_pspace'" (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' wp: transferCapsToSlots_vp simp:ball_conj_distrib ) crunch sch_act_wf[wp]: doIPCTransfer "\s. sch_act_wf (ksSchedulerAction s) s" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq[wp]: doIPCTransfer "Invariants_H.valid_queues" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq'[wp]: doIPCTransfer "valid_queues'" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch state_refs_of[wp]: doIPCTransfer "\s. P (state_refs_of' s)" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch ct[wp]: doIPCTransfer "cur_tcb'" @@ -1765,7 +1745,7 @@ declare asUser_global_refs' [wp] lemma lec_valid_cap' [wp]: "\valid_objs'\ lookupExtraCaps thread xa mi \\rv s. (\x\set rv. s \' fst x)\, -" - apply (rule hoare_pre, rule hoare_post_imp_R) + apply (rule hoare_pre, rule hoare_strengthen_postE_R) apply (rule hoare_vcg_conj_lift_R[where R=valid_objs' and S="\_. valid_objs'"]) apply (rule lookupExtraCaps_srcs) apply wp @@ -1891,16 +1871,6 @@ lemma getThreadCallerSlot_inv: "\P\ getThreadCallerSlot t \\_. P\" by (simp add: getThreadCallerSlot_def, wp) -lemma deleteCallerCap_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - deleteCallerCap t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) - apply (wp getThreadCallerSlot_inv cteDeleteOne_ct_not_ksQ getCTE_wp) - apply (clarsimp simp: cte_wp_at_ctes_of) - done - lemma finaliseCapTrue_standin_tcb_at' [wp]: "\tcb_at' x\ finaliseCapTrue_standin cap v2 \\_. tcb_at' x\" apply (simp add: finaliseCapTrue_standin_def Let_def) @@ -2056,39 +2026,11 @@ lemma cteDeleteOne_weak_sch_act[wp]: crunch weak_sch_act_wf[wp]: emptySlot "\s. weak_sch_act_wf (ksSchedulerAction s) s" crunch pred_tcb_at'[wp]: handleFaultReply "pred_tcb_at' proj P t" -crunch valid_queues[wp]: handleFaultReply "Invariants_H.valid_queues" -crunch valid_queues'[wp]: handleFaultReply "valid_queues'" crunch tcb_in_cur_domain'[wp]: handleFaultReply "tcb_in_cur_domain' t" crunch sch_act_wf[wp]: unbindNotification "\s. sch_act_wf (ksSchedulerAction s) s" (wp: sbn_sch_act') -crunch valid_queues'[wp]: cteDeleteOne valid_queues' - (simp: crunch_simps unless_def inQ_def - wp: crunch_wps sts_st_tcb' getObject_inv loadObject_default_inv - threadSet_valid_queues' rescheduleRequired_valid_queues'_weak) - -lemma cancelSignal_valid_queues'[wp]: - "\valid_queues'\ cancelSignal t ntfn \\rv. valid_queues'\" - apply (simp add: cancelSignal_def) - apply (rule hoare_pre) - apply (wp getNotification_wp| wpc | simp)+ - done - -lemma cancelIPC_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) \ cancelIPC t \\rv. valid_queues'\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv liftM_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) - apply (case_tac state, simp_all) defer 2 - apply (rule hoare_pre) - apply ((wp getEndpoint_wp getCTE_wp | wpc | simp)+)[8] - apply (wp cteDeleteOne_valid_queues') - apply (rule_tac Q="\_. valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of) - apply (wp threadSet_valid_queues' threadSet_sch_act| simp)+ - apply (clarsimp simp: inQ_def) - done - crunch valid_objs'[wp]: handleFaultReply valid_objs' lemma cte_wp_at_is_reply_cap_toI: @@ -2100,6 +2042,13 @@ crunches handle_fault_reply for pspace_alignedp[wp]: pspace_aligned and pspace_distinct[wp]: pspace_distinct +crunches cteDeleteOne, doIPCTransfer, handleFaultReply + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + lemma doReplyTransfer_corres: "corres dc (einvs and tcb_at receiver and tcb_at sender @@ -2145,8 +2094,12 @@ lemma doReplyTransfer_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp set_thread_state_runnable_valid_sched set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues sts_valid_objs' delete_one_tcbDomain_obj_at' - | simp add: valid_tcb_state'_def)+ + apply (wp set_thread_state_runnable_valid_sched + set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' + sts_valid_objs' delete_one_tcbDomain_obj_at' + | simp add: valid_tcb_state'_def + | strengthen valid_queues_in_correct_ready_q valid_sched_valid_queues + valid_queues_ready_qs_distinct)+ apply (strengthen cte_wp_at_reply_cap_can_fast_finalise) apply (wp hoare_vcg_conj_lift) apply (rule hoare_strengthen_post [OF do_ipc_transfer_non_null_cte_wp_at]) @@ -2155,7 +2108,11 @@ lemma doReplyTransfer_corres: apply (fastforce) apply (clarsimp simp:is_cap_simps) apply (wp weak_valid_sched_action_lift)+ - apply (rule_tac Q="\_. valid_queues' and valid_objs' and cur_tcb' and tcb_at' receiver and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp, simp add: sch_act_wf_weak) + apply (rule_tac Q="\_ s. valid_objs' s \ cur_tcb' s \ tcb_at' receiver s + \ sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp, simp add: sch_act_wf_weak) apply (wp tcb_in_cur_domain'_lift) defer apply (simp) @@ -2187,7 +2144,9 @@ lemma doReplyTransfer_corres: apply (rule_tac Q="valid_sched and cur_tcb and tcb_at receiver and pspace_aligned and pspace_distinct" and Q'="tcb_at' receiver and cur_tcb' and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and Invariants_H.valid_queues and valid_queues' and valid_objs'" + and valid_objs' + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" in corres_guard_imp) apply (case_tac rvb, simp_all)[1] apply (rule corres_guard_imp) @@ -2195,19 +2154,17 @@ lemma doReplyTransfer_corres: apply (clarsimp simp: tcb_relation_def) apply (fold dc_def, rule possibleSwitchTo_corres) apply simp - apply (wp static_imp_wp static_imp_conj_wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_st_tcb' sts_valid_queues + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ apply (rule corres_guard_imp) apply (rule setThreadState_corres) apply (clarsimp simp: tcb_relation_def) apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' + thread_set_not_state_valid_sched threadSet_tcbDomain_triv threadSet_valid_objs' + threadSet_sched_pointers threadSet_valid_sched_pointers | simp add: valid_tcb_state'_def)+ - apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' - | simp add: runnable_def inQ_def valid_tcb'_def)+ apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and valid_objs and pspace_aligned and pspace_distinct" in hoare_strengthen_post [rotated], clarsimp) @@ -2297,15 +2254,15 @@ lemma setupCallerCap_corres: tcb_cnode_index_def cte_level_bits_def) apply (simp add: cte_map_def tcbCallerSlot_def tcb_cnode_index_def cte_level_bits_def) - apply (rule_tac Q="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) apply (wp, (wp getSlotCap_wp)+) apply blast apply (rule no_fail_pre, wp) apply (clarsimp simp: cte_wp_at'_def cte_at'_def) - apply (rule_tac Q="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) apply (wp, (wp getCTE_wp')+) apply blast apply (rule no_fail_pre, wp) @@ -2362,7 +2319,7 @@ lemma possibleSwitchTo_weak_sch_act_wf[wp]: bitmap_fun_defs) apply (wp rescheduleRequired_weak_sch_act_wf weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] - getObject_tcb_wp static_imp_wp + getObject_tcb_wp hoare_weak_lift_imp | wpc)+ apply (clarsimp simp: obj_at'_def projectKOs weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) done @@ -2477,10 +2434,12 @@ proof - apply (wp hoare_drop_imps)[1] apply (wp | simp)+ apply (wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases) - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)[1] apply (simp add: valid_tcb_state_def pred_conj_def) - apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg) + apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues)+ apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift | clarsimp simp: is_cap_simps)+)[1] apply (simp add: pred_conj_def) @@ -2489,7 +2448,7 @@ proof - apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift hoare_drop_imps)[1] apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply (simp) apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')+ apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def ep_redux_simps @@ -2545,17 +2504,19 @@ proof - apply (simp add: if_apply_def2) apply ((wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases | simp add: if_apply_def2 split del: if_split)+)[1] - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases) apply (simp add: valid_tcb_state_def pred_conj_def) apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift - | clarsimp simp:is_cap_simps)+)[1] + | clarsimp simp: is_cap_simps + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues )+)[1] apply (simp add: valid_tcb_state'_def pred_conj_def) apply (strengthen sch_act_wf_weak) apply (wp weak_sch_act_wf_lift_linear hoare_drop_imps) apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply simp apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb') apply (clarsimp simp add: invs_def valid_state_def @@ -2581,7 +2542,7 @@ lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] declare tl_drop_1[simp] crunch cur[wp]: cancel_ipc "cur_tcb" - (wp: select_wp crunch_wps simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) lemma valid_sched_weak_strg: "valid_sched s \ weak_valid_sched_action s" @@ -2623,14 +2584,15 @@ lemma sendSignal_corres: apply (rule possibleSwitchTo_corres) apply wp apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_valid_queues sts_st_tcb' hoare_disjI2 + sts_st_tcb' sts_valid_objs' hoare_disjI2 cancel_ipc_cte_wp_at_not_reply_state | strengthen invs_vobjs_strgs invs_psp_aligned_strg valid_sched_weak_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues | simp add: valid_tcb_state_def)+ apply (rule_tac Q="\rv. invs' and tcb_at' a" in hoare_strengthen_post) apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak - valid_tcb_state'_def) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak valid_tcb_state'_def) apply (rule setNotification_corres) apply (clarsimp simp add: ntfn_relation_def) apply (wp gts_wp gts_wp' | clarsimp)+ @@ -2656,23 +2618,23 @@ lemma sendSignal_corres: apply (rule corres_split[OF asUser_setRegister_corres]) apply (rule possibleSwitchTo_corres) apply ((wp | simp)+)[1] - apply (rule_tac Q="\_. Invariants_H.valid_queues and valid_queues' and - (\s. sch_act_wf (ksSchedulerAction s) s) and + apply (rule_tac Q="\_. (\s. sch_act_wf (ksSchedulerAction s) s) and cur_tcb' and - st_tcb_at' runnable' (hd list) and valid_objs'" + st_tcb_at' runnable' (hd list) and valid_objs' and + sym_heap_sched_pointers and valid_sched_pointers and + pspace_aligned' and pspace_distinct'" in hoare_post_imp, clarsimp simp: pred_tcb_at' elim!: sch_act_wf_weak) apply (wp | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ apply (wp set_simple_ko_valid_objs set_ntfn_aligned' set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def - valid_sched_action_def) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def + valid_sched_action_def) apply (auto simp: valid_ntfn'_def )[1] apply (clarsimp simp: invs'_def valid_state'_def) @@ -2690,16 +2652,14 @@ lemma sendSignal_corres: apply (wp cur_tcb_lift | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb - | simp)+ + apply (wpsimp wp: sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb) apply (wp set_ntfn_aligned' set_simple_ko_valid_objs set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def neq_Nil_conv - ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def - split: option.splits) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def neq_Nil_conv + ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def + split: option.splits) apply (auto simp: valid_ntfn'_def neq_Nil_conv invs'_def valid_state'_def weak_sch_act_wf_def split: option.splits)[1] @@ -2725,43 +2685,11 @@ lemma possibleSwitchTo_sch_act[wp]: possibleSwitchTo t \\rv s. sch_act_wf (ksSchedulerAction s) s\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadSet_sch_act setQueue_sch_act threadGet_wp + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp | simp add: unless_def | wpc)+ apply (auto simp: obj_at'_def projectKOs tcb_in_cur_domain'_def) done -lemma possibleSwitchTo_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. Invariants_H.valid_queues\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp hoare_drop_imps | wpc | simp)+ - apply (auto simp: valid_tcb'_def weak_sch_act_wf_def - dest: pred_tcb_at' - elim!: valid_objs_valid_tcbE) - done - -lemma possibleSwitchTo_ksQ': - "\(\s. t' \ set (ksReadyQueues s p) \ sch_act_not t' s) and K(t' \ t)\ - possibleSwitchTo t - \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp - | wpc - | simp split del: if_split)+ - apply (auto simp: obj_at'_def) - done - -lemma possibleSwitchTo_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) - and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. valid_queues'\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadGet_wp | wpc | simp)+ - apply (auto simp: obj_at'_def) - done - crunch st_refs_of'[wp]: possibleSwitchTo "\s. P (state_refs_of' s)" (wp: crunch_wps) @@ -2773,15 +2701,15 @@ crunch ct[wp]: possibleSwitchTo cur_tcb' (wp: cur_tcb_lift crunch_wps) lemma possibleSwitchTo_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' t - and (\s. sch_act_wf (ksSchedulerAction s) s)\ - possibleSwitchTo t - \\rv. if_live_then_nonz_cap'\" + "\if_live_then_nonz_cap' and ex_nonz_cap_to' t and (\s. sch_act_wf (ksSchedulerAction s) s) + and pspace_aligned' and pspace_distinct'\ + possibleSwitchTo t + \\_. if_live_then_nonz_cap'\" apply (simp add: possibleSwitchTo_def curDomain_def) apply (wp | wpc | simp)+ apply (simp only: imp_conv_disj, wp hoare_vcg_all_lift hoare_vcg_disj_lift) apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_def projectKOs) + apply (auto simp: obj_at'_def) done crunch ifunsafe[wp]: possibleSwitchTo if_unsafe_then_cap' @@ -2813,10 +2741,6 @@ crunch irqs_masked'[wp]: sendSignal "irqs_masked'" simp: crunch_simps unless_def o_def rule: irqs_masked_lift) -lemma sts_running_valid_queues: - "runnable' st \ \ Invariants_H.valid_queues \ setThreadState st t \\_. Invariants_H.valid_queues \" - by (wp sts_valid_queues, clarsimp) - lemma ct_in_state_activatable_imp_simple'[simp]: "ct_in_state' activatable' s \ ct_in_state' simple' s" apply (simp add: ct_in_state'_def) @@ -2829,24 +2753,21 @@ lemma setThreadState_nonqueued_state_update: \ st \ {Inactive, Running, Restart, IdleThreadState} \ (st \ Inactive \ ex_nonz_cap_to' t s) \ (t = ksIdleThread s \ idle' st) - - \ (\ runnable' st \ sch_act_simple s) - \ (\ runnable' st \ (\p. t \ set (ksReadyQueues s p)))\ - setThreadState st t \\rv. invs'\" + \ (\ runnable' st \ sch_act_simple s)\ + setThreadState st t + \\_. invs'\" apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre, wp valid_irq_node_lift - sts_valid_queues - setThreadState_ct_not_inQ) + apply (rule hoare_pre, wp valid_irq_node_lift setThreadState_ct_not_inQ) apply (clarsimp simp: pred_tcb_at') apply (rule conjI, fastforce simp: valid_tcb_state'_def) apply (drule simple_st_tcb_at_state_refs_ofD') apply (drule bound_tcb_at_state_refs_ofD') - apply (rule conjI, fastforce) - apply clarsimp - apply (erule delta_sym_refs) - apply (fastforce split: if_split_asm) - apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def - split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (erule delta_sym_refs) + apply (fastforce split: if_split_asm) + apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm) + apply fastforce done lemma cteDeleteOne_reply_cap_to'[wp]: @@ -2855,7 +2776,7 @@ lemma cteDeleteOne_reply_cap_to'[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -2914,16 +2835,14 @@ lemma cancelAllIPC_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllIPC_def) apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) apply simp apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done lemma cancelAllSignals_not_rct[wp]: @@ -2932,12 +2851,10 @@ lemma cancelAllSignals_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllSignals_def) apply (wp | wpc)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done crunch not_rct[wp]: finaliseCapTrue_standin "\s. ksSchedulerAction s \ ResumeCurrentThread" @@ -3009,7 +2926,7 @@ lemma sai_invs'[wp]: "\invs' and ex_nonz_cap_to' ntfnptr\ sendSignal ntfnptr badge \\y. invs'\" unfolding sendSignal_def - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (case_tac "ntfnObj nTFN", simp_all) prefer 3 apply (rename_tac list) @@ -3021,7 +2938,6 @@ lemma sai_invs'[wp]: apply (clarsimp simp:conj_comms) apply (simp add: invs'_def valid_state'_def) apply (wp valid_irq_node_lift sts_valid_objs' setThreadState_ct_not_inQ - sts_valid_queues [where st="Structures_H.thread_state.Running", simplified] set_ntfn_valid_objs' cur_tcb_lift sts_st_tcb' hoare_convert_imp [OF setNotification_nosch] | simp split del: if_split)+ @@ -3234,11 +3150,11 @@ lemma receiveIPC_corres: and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3)" and P'="tcb_at' a and tcb_at' thread and cur_tcb' - and Invariants_H.valid_queues - and valid_queues' and valid_pspace' and valid_objs' - and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" in corres_guard_imp [OF corres_if]) apply (simp add: fault_rel_optionation_def) apply (rule corres_if2 [OF _ setupCallerCap_corres setThreadState_corres]) @@ -3247,17 +3163,18 @@ lemma receiveIPC_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action - | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wpsimp wp: sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action)+ + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ - apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def - valid_sched_action_def) + apply (fastforce simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def + valid_sched_action_def) apply (clarsimp split: if_split_asm) apply (clarsimp | wp do_ipc_transfer_tcb_caps)+ - apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp, erule sch_act_wf_weak) + apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp) + apply (fastforce elim: sch_act_wf_weak) apply (wp sts_st_tcb' gts_st_tcb_at | simp)+ apply (simp cong: list.case_cong) apply wp @@ -3476,30 +3393,6 @@ lemma setupCallerCap_state_refs_of[wp]: apply (simp add: fun_upd_def cong: if_cong) done -lemma setCTE_valid_queues[wp]: - "\Invariants_H.valid_queues\ setCTE ptr val \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift setCTE_pred_tcb_at') - -crunch vq[wp]: cteInsert "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadCallerSlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadReplySlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -lemma setupCallerCap_vq[wp]: - "\Invariants_H.valid_queues and (\s. \p. send \ set (ksReadyQueues s p))\ - setupCallerCap send recv grant \\_. Invariants_H.valid_queues\" - apply (simp add: setupCallerCap_def) - apply (wp crunch_wps sts_valid_queues) - apply (fastforce simp: valid_queues_def obj_at'_def inQ_def) - done - -crunch vq'[wp]: setupCallerCap "valid_queues'" - (wp: crunch_wps) - lemma is_derived_ReplyCap' [simp]: "\m p g. is_derived' m p (capability.ReplyCap t False g) = (\c. \ g. c = capability.ReplyCap t True g)" @@ -3540,10 +3433,8 @@ lemma setupCallerCap_vp[wp]: apply (wp | simp add: valid_pspace'_def valid_tcb_state'_def)+ done -declare haskell_assert_inv[wp del] - lemma setupCallerCap_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender\ + "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_live_then_nonz_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3555,7 +3446,7 @@ lemma setupCallerCap_iflive[wp]: lemma setupCallerCap_ifunsafe[wp]: "\if_unsafe_then_cap' and valid_objs' and - ex_nonz_cap_to' rcvr and tcb_at' rcvr\ + ex_nonz_cap_to' rcvr and tcb_at' rcvr and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_unsafe_then_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3577,13 +3468,11 @@ lemma setupCallerCap_global_refs'[wp]: \\rv. valid_global_refs'\" unfolding setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv - apply (wp getSlotCap_cte_wp_at - | simp add: o_def unique_master_reply_cap' - | strengthen eq_imp_strg - | wp (once) getCTE_wp | clarsimp simp: cte_wp_at_ctes_of)+ - (* at setThreadState *) - apply (rule_tac Q="\_. valid_global_refs'" in hoare_post_imp, wpsimp+) - done + by (wp + | simp add: o_def unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) getCTE_wp + | wp (once) hoare_vcg_imp_lift' hoare_vcg_ex_lift | clarsimp simp: cte_wp_at_ctes_of)+ crunch valid_arch'[wp]: setupCallerCap "valid_arch_state'" (wp: hoare_drop_imps) @@ -3701,7 +3590,7 @@ lemma completeSignal_invs: completeSignal ntfnptr tcb \\_. invs'\" apply (simp add: completeSignal_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp set_ntfn_minor_invs' | wpc | simp)+ apply (rule_tac Q="\_ s. (state_refs_of' s ntfnptr = ntfn_bound_refs' (ntfnBoundTCB ntfn)) @@ -3710,7 +3599,7 @@ lemma completeSignal_invs: \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) \ ntfnptr \ ksIdleThread s" in hoare_strengthen_post) - apply ((wp hoare_vcg_ex_lift static_imp_wp | wpc | simp add: valid_ntfn'_def)+)[1] + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def split: option.splits) apply (blast dest: ntfn_q_refs_no_bound_refs') apply wp @@ -3759,20 +3648,29 @@ crunches possibleSwitchTo for ksArch[wp]: "\s. P (ksArchState s)" (wp: possibleSwitchTo_ctes_of crunch_wps ignore: constOnFailure) +crunches asUser + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift wp: crunch_wps) + +crunches setupCallerCap, possibleSwitchTo, doIPCTransfer + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + (* t = ksCurThread s *) lemma ri_invs' [wp]: "\invs' and sch_act_not t and ct_in_state' simple' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s)\ receiveIPC t cap isBlocking \\_. invs'\" (is "\?pre\ _ \_\") apply (clarsimp simp: receiveIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) - apply (rule hoare_seq_ext [OF _ gbn_sp']) - apply (rule hoare_seq_ext) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ gbn_sp']) + apply (rule bind_wp) (* set up precondition for old proof *) apply (rule_tac R="ko_at' ep (capEPPtr cap) and ?pre" in hoare_vcg_if_split) apply (wp completeSignal_invs) @@ -3782,7 +3680,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) apply (wp sts_sch_act' hoare_vcg_const_Ball_lift valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' o_def) @@ -3809,7 +3707,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) apply (wp sts_sch_act' valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def o_def) @@ -3833,9 +3731,8 @@ lemma ri_invs' [wp]: apply (rename_tac sender queue) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_drop_imps setEndpoint_valid_mdb' - set_ep_valid_objs' sts_st_tcb' sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ possibleSwitchTo_valid_queues - possibleSwitchTo_valid_queues' + set_ep_valid_objs' sts_st_tcb' sts_sch_act' + setThreadState_ct_not_inQ possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift setEndpoint_ksQ setEndpoint_ct' | simp add: valid_tcb_state'_def case_bool_If @@ -3853,8 +3750,6 @@ lemma ri_invs' [wp]: st_tcb_at_refs_of_rev' conj_ac split del: if_split cong: if_cong) - apply (frule_tac t=sender in valid_queues_not_runnable'_not_ksQ) - apply (erule pred_tcb'_weakenE, clarsimp) apply (subgoal_tac "sch_act_not sender s") prefer 2 apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) @@ -3888,7 +3783,6 @@ lemma ri_invs' [wp]: lemma rai_invs'[wp]: "\invs' and sch_act_not t and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s) and (\s. \ntfnptr. isNotificationCap cap @@ -3898,14 +3792,14 @@ lemma rai_invs'[wp]: receiveSignal t cap isBlocking \\_. invs'\" apply (simp add: receiveSignal_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (rename_tac ep) apply (case_tac "ntfnObj ep") \ \ep = IdleNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp valid_irq_node_lift sts_sch_act' typ_at_lifts - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def) @@ -3923,12 +3817,12 @@ lemma rai_invs'[wp]: apply (clarsimp split: if_split_asm) apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs) \ \ep = ActiveNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts static_imp_wp + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp asUser_urz | simp add: valid_ntfn'_def)+ apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) @@ -3943,7 +3837,7 @@ lemma rai_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ typ_at_lifts + setThreadState_ct_not_inQ typ_at_lifts asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+ apply (clarsimp simp: valid_tcb_state'_def) @@ -3971,7 +3865,7 @@ lemma rai_invs'[wp]: apply (auto simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def)[5] apply (fastforce simp: tcb_bound_refs'_def split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) done lemma getCTE_cap_to_refs[wp]: @@ -4003,7 +3897,6 @@ lemma cteInsert_invs_bits[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ cteInsert a b c \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ cteInsert a b c \\rv. Invariants_H.valid_queues\" "\cur_tcb'\ cteInsert a b c \\rv. cur_tcb'\" "\\s. P (state_refs_of' s)\ cteInsert a b c @@ -4021,16 +3914,19 @@ lemma possibleSwitchTo_sch_act_not: crunch urz[wp]: possibleSwitchTo "untyped_ranges_zero'" (simp: crunch_simps unless_def wp: crunch_wps) +crunches possibleSwitchTo + for pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + lemma si_invs'[wp]: "\invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and sch_act_not t and ex_nonz_cap_to' ep and ex_nonz_cap_to' t\ sendIPC bl call ba cg cgr t ep \\rv. invs'\" supply if_split[split del] apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ get_ep_sp']) apply (case_tac epa) \ \epa = RecvEP\ apply simp @@ -4042,8 +3938,8 @@ lemma si_invs'[wp]: apply (rule_tac P="a\t" in hoare_gen_asm) apply (wp valid_irq_node_lift sts_valid_objs' set_ep_valid_objs' setEndpoint_valid_mdb' sts_st_tcb' sts_sch_act' - possibleSwitchTo_sch_act_not sts_valid_queues setThreadState_ct_not_inQ - possibleSwitchTo_ksQ' possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift sts_ksQ' + possibleSwitchTo_sch_act_not setThreadState_ct_not_inQ + possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift hoare_convert_imp [OF doIPCTransfer_sch_act doIPCTransfer_ct'] hoare_convert_imp [OF setEndpoint_nosch setEndpoint_ct'] hoare_drop_imp [where f="threadGet tcbFault t"] @@ -4095,8 +3991,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') apply (rule conjI, clarsimp elim!: obj_at'_weakenE) apply (subgoal_tac "ep \ t") @@ -4115,8 +4010,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) - apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') apply (rule conjI, clarsimp elim!: obj_at'_weakenE) apply (frule obj_at_valid_objs', clarsimp) @@ -4142,23 +4036,19 @@ lemma si_invs'[wp]: lemma sfi_invs_plus': "\invs' and st_tcb_at' simple' t and sch_act_not t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t\ - sendFaultIPC t f - \\rv. invs'\, \\rv. invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) - and sch_act_not t and (\s. ksIdleThread s \ t)\" + sendFaultIPC t f + \\_. invs'\, \\_. invs' and st_tcb_at' simple' t and sch_act_not t and (\s. ksIdleThread s \ t)\" apply (simp add: sendFaultIPC_def) apply (wp threadSet_invs_trivial threadSet_pred_tcb_no_state threadSet_cap_to' | wpc | simp)+ apply (rule_tac Q'="\rv s. invs' s \ sch_act_not t s \ st_tcb_at' simple' t s - \ (\p. t \ set (ksReadyQueues s p)) \ ex_nonz_cap_to' t s \ t \ ksIdleThread s \ (\r\zobj_refs' rv. ex_nonz_cap_to' r s)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: inQ_def pred_tcb_at') apply (wp | simp)+ @@ -4176,7 +4066,6 @@ lemma handleFault_corres: corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread and (\_. valid_fault f)) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_fault thread f) (handleFault thread f')" apply (simp add: handle_fault_def handleFault_def) @@ -4200,17 +4089,13 @@ lemma sts_invs_minor'': \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set (ksReadyQueues s p)) \ runnable' st) - and (\s. runnable' st \ obj_at' tcbQueued t s - \ st_tcb_at' runnable' t s) and (\s. \ runnable' st \ sch_act_not t s) and invs'\ setThreadState st t \\rv. invs'\" apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply clarsimp apply (rule conjI) apply fastforce @@ -4225,12 +4110,11 @@ lemma sts_invs_minor'': apply (clarsimp dest!: st_tcb_at_state_refs_ofD' elim!: rsubst[where P=sym_refs] intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply (fastforce elim!: st_tcb_ex_cap'') done lemma hf_invs' [wp]: "\invs' and sch_act_not t - and (\s. \p. t \ set(ksReadyQueues s p)) and st_tcb_at' simple' t and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ handleFault t f \\r. invs'\" @@ -4238,7 +4122,7 @@ lemma hf_invs' [wp]: apply wp apply (simp add: handleDoubleFault_def) apply (wp sts_invs_minor'' dmo_invs')+ - apply (rule hoare_post_impErr, rule sfi_invs_plus', + apply (rule hoare_strengthen_postE, rule sfi_invs_plus', simp_all) apply (strengthen no_refs_simple_strg') apply clarsimp @@ -4270,8 +4154,8 @@ lemma si_blk_makes_simple': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' simple' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4290,8 +4174,8 @@ lemma si_blk_makes_runnable': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' runnable' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4314,7 +4198,7 @@ lemma sfi_makes_simple': apply (simp add: sendFaultIPC_def cong: if_cong capability.case_cong bool.case_cong) apply (wpsimp wp: si_blk_makes_simple' threadSet_pred_tcb_no_state hoare_drop_imps - hoare_vcg_all_lift_R) + hoare_vcg_all_liftE_R) done lemma sfi_makes_runnable': @@ -4325,7 +4209,7 @@ lemma sfi_makes_runnable': apply (simp add: sendFaultIPC_def cong: if_cong capability.case_cong bool.case_cong) apply (wpsimp wp: si_blk_makes_runnable' threadSet_pred_tcb_no_state hoare_drop_imps - hoare_vcg_all_lift_R) + hoare_vcg_all_liftE_R) done lemma hf_makes_runnable_simple': @@ -4349,8 +4233,8 @@ lemma ri_makes_runnable_simple': apply (rule hoare_gen_asm)+ apply (simp add: receiveIPC_def) apply (case_tac cap, simp_all add: isEndpointCap_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (rule hoare_seq_ext [OF _ gbn_sp']) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (rule bind_wp [OF _ gbn_sp']) apply wp apply (rename_tac ep q r) apply (case_tac ep, simp_all) @@ -4384,7 +4268,7 @@ lemma sendSignal_st_tcb'_Running: sendSignal ntfnptr bdg \\_. st_tcb_at' (\st. st = Running \ P st) t\" apply (simp add: sendSignal_def) - apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp static_imp_wp + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp | wpc | clarsimp simp: pred_tcb_at')+ done diff --git a/proof/refine/RISCV64/KHeap_R.thy b/proof/refine/RISCV64/KHeap_R.thy index 4574417ea8..6636cf2d20 100644 --- a/proof/refine/RISCV64/KHeap_R.thy +++ b/proof/refine/RISCV64/KHeap_R.thy @@ -13,8 +13,45 @@ lemma lookupAround2_known1: "m x = Some y \ fst (lookupAround2 x m) = Some (x, y)" by (fastforce simp: lookupAround2_char1) +lemma koTypeOf_injectKO: + fixes v :: "'a :: pspace_storable" + shows "koTypeOf (injectKO v) = koType TYPE('a)" + apply (cut_tac v1=v in iffD2 [OF project_inject, OF refl]) + apply (simp add: project_koType[symmetric]) + done + context begin interpretation Arch . (*FIXME: arch_split*) +lemma setObject_modify_variable_size: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; obj_at' (\obj. objBits v = objBits obj) p s\ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (clarsimp simp: setObject_def split_def exec_gets obj_at'_def lookupAround2_known1 + assert_opt_def updateObject_default_def bind_assoc) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (clarsimp simp only: koTypeOf_injectKO) + apply (frule in_magnitude_check[where s'=s]) + apply blast + apply fastforce + apply (simp add: magnitudeCheck_assert in_monad bind_def gets_def oassert_opt_def + get_def return_def) + apply (simp add: simpler_modify_def) + done + +lemma setObject_modify: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; \ko. P ko \ objBits ko = objBits v \ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (rule setObject_modify_variable_size) + apply fastforce + apply fastforce + apply fastforce + unfolding obj_at'_def + by fastforce + lemma obj_at_getObject: assumes R: "\a b n ko s obj::'a::pspace_storable. @@ -114,8 +151,7 @@ lemma corres_get_tcb: apply (drule bspec) apply clarsimp apply blast - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) + apply (clarsimp simp: tcb_relation_cut_def lookupAround2_known1) done lemma lookupAround2_same1[simp]: @@ -183,7 +219,7 @@ lemma obj_at_setObject1: setObject p (v::'a::pspace_storable) \ \rv. obj_at' (\x::'a::pspace_storable. True) t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad obj_at'_def lookupAround2_char1 project_inject dest!: R) apply (subgoal_tac "objBitsKO (injectKO v) = objBitsKO (injectKO obj)") @@ -203,7 +239,7 @@ lemma obj_at_setObject2: setObject p (v::'a) \ \rv. obj_at' P t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (drule R) @@ -381,6 +417,40 @@ lemma setObject_tcb_strongest: updateObject_default_def ps_clear_upd) done +method setObject_easy_cases = + clarsimp simp: setObject_def in_monad split_def valid_def lookupAround2_char1, + erule rsubst[where P=P'], rule ext, + clarsimp simp: updateObject_cte updateObject_default_def in_monad + typeError_def opt_map_def opt_pred_def projectKO_opts_defs projectKO_eq + split: if_split_asm + Structures_H.kernel_object.split_asm + +lemma setObject_endpoint_tcbs_of'[wp]: + "setObject c (endpoint :: endpoint) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_notification_tcbs_of'[wp]: + "setObject c (notification :: notification) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedNexts_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedNexts_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedPrevs_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedPrevs_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbQueued[wp]: + "setObject c (cte :: cte) \\s. P' (tcbQueued |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + +lemma setObject_cte_inQ[wp]: + "setObject c (cte :: cte) \\s. P' (inQ d p |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + lemma getObject_obj_at': assumes x: "\q n ko. loadObject p q n ko = (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" @@ -896,7 +966,7 @@ lemma obj_relation_cut_same_type: \ (\sz sz'. a_type ko = AArch (ADeviceData sz) \ a_type ko' = AArch (ADeviceData sz'))" apply (rule ccontr) apply (simp add: obj_relation_cuts_def2 a_type_def) - apply (auto simp: other_obj_relation_def cte_relation_def pte_relation_def + apply (auto simp: other_obj_relation_def tcb_relation_cut_def cte_relation_def pte_relation_def split: Structures_A.kernel_object.split_asm if_split_asm Structures_H.kernel_object.split_asm arch_kernel_obj.split_asm) @@ -913,6 +983,16 @@ where "exst_same' (KOTCB tcb) (KOTCB tcb') = exst_same tcb tcb'" | "exst_same' _ _ = True" +lemma tcbs_of'_non_tcb_update: + "\typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ TCBT\ + \ tcbs_of' (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = tcbs_of' s'" + by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs + split: kernel_object.splits) + +lemma typ_at'_koTypeOf: + "ko_at' ob' ptr b \ typ_at' (koTypeOf (injectKO ob')) ptr b" + by (auto simp: typ_at'_def ko_wp_at'_def obj_at'_def project_inject) + lemma setObject_other_corres: fixes ob' :: "'a :: pspace_storable" assumes x: "updateObject ob' = updateObject_default ob'" @@ -942,7 +1022,7 @@ lemma setObject_other_corres: apply (clarsimp simp add: caps_of_state_after_update cte_wp_at_after_update swp_def fun_upd_def obj_at_def) apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x=ptr in allE)+ apply (clarsimp simp: obj_at_def a_type_def @@ -952,6 +1032,14 @@ lemma setObject_other_corres: apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) apply (elim conjE) apply (frule bspec, erule domI) + apply (prop_tac "typ_at' (koTypeOf (injectKO ob')) ptr b") + subgoal + by (clarsimp simp: typ_at'_def ko_wp_at'_def obj_at'_def projectKO_opts_defs + is_other_obj_relation_type_def a_type_def other_obj_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm kernel_object.split_asm + arch_kernel_object.split_asm) + apply clarsimp apply (rule conjI) apply (rule ballI, drule(1) bspec) apply (drule domD) @@ -960,31 +1048,30 @@ lemma setObject_other_corres: apply clarsimp apply (frule_tac ko'=ko and x'=ptr in obj_relation_cut_same_type, (fastforce simp add: is_other_obj_relation_type t)+) - apply (erule disjE) - apply (simp add: is_other_obj_relation_type t) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_CapTable a_type_def) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_UserData a_type_def) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_DeviceData a_type_def) - apply (simp only: ekheap_relation_def) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (insert e) - apply atomize - apply (clarsimp simp: obj_at'_def) - apply (erule_tac x=obj in allE) - apply (clarsimp simp: projectKO_eq project_inject) - apply (case_tac ob; - simp_all add: a_type_def other_obj_relation_def etcb_relation_def - is_other_obj_relation_type t exst_same_def) - apply (clarsimp simp: is_other_obj_relation_type t exst_same_def - split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits - arch_kernel_obj.splits)+ - done + apply (insert t) + apply ((erule disjE + | clarsimp simp: is_other_obj_relation_type is_other_obj_relation_type_def a_type_def)+)[1] + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (insert e) + apply atomize + apply (clarsimp simp: obj_at'_def) + apply (erule_tac x=obj in allE) + apply (clarsimp simp: projectKO_eq project_inject) + apply (case_tac ob; + simp_all add: a_type_def other_obj_relation_def etcb_relation_def + is_other_obj_relation_type t exst_same_def) + apply (clarsimp simp: is_other_obj_relation_type t exst_same_def + split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits + arch_kernel_obj.splits)+ + \ \ready_queues_relation\ + apply (prop_tac "koTypeOf (injectKO ob') \ TCBT") + subgoal + by (clarsimp simp: other_obj_relation_def; cases ob; cases "injectKO ob'"; + simp split: arch_kernel_obj.split_asm) + by (fastforce dest: tcbs_of'_non_tcb_update) lemmas obj_at_simps = obj_at_def obj_at'_def map_to_ctes_upd_other is_other_obj_relation_type_def @@ -995,8 +1082,8 @@ lemma setEndpoint_corres: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres: @@ -1004,8 +1091,8 @@ lemma setNotification_corres: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma no_fail_getNotification [wp]: @@ -1074,13 +1161,14 @@ lemma typ_at'_valid_obj'_lift: apply (case_tac endpoint; simp add: valid_ep'_def, wp) apply (rename_tac notification) apply (case_tac "ntfnObj notification"; - simp add: valid_ntfn'_def valid_bound_tcb'_def split: option.splits, + simp add: valid_ntfn'_def split: option.splits, (wpsimp|rule conjI)+) apply (rename_tac tcb) apply (case_tac "tcbState tcb"; - simp add: valid_tcb'_def valid_tcb_state'_def split_def valid_bound_ntfn'_def - split: option.splits, - wpsimp) + simp add: valid_tcb'_def valid_tcb_state'_def split_def opt_tcb_at'_def + valid_bound_ntfn'_def; + wpsimp wp: hoare_case_option_wp hoare_case_option_wp2; + (clarsimp split: option.splits)?) apply (wpsimp simp: valid_cte'_def) done @@ -1357,32 +1445,6 @@ lemma set_ep_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma set_ep_valid_queues[wp]: - "\Invariants_H.valid_queues\ setEndpoint epptr ep \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setEndpoint_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ep_valid_queues'[wp]: - "\valid_queues'\ setEndpoint epptr ep \\rv. valid_queues'\" - apply (unfold setEndpoint_def) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: ko_wp_at'_def) - done - lemma ct_in_state_thread_state_lift': assumes ct: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" assumes st: "\t. \st_tcb_at' P t\ f \\_. st_tcb_at' P t\" @@ -1579,34 +1641,6 @@ lemma set_ntfn_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ done -lemma set_ntfn_valid_queues[wp]: - "\Invariants_H.valid_queues\ setNotification p ntfn \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (rule hoare_pre) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setNotification_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ntfn_valid_queues'[wp]: - "\valid_queues'\ setNotification p ntfn \\rv. valid_queues'\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: ko_wp_at'_def) - done - lemma set_ntfn_state_refs_of'[wp]: "\\s. P ((state_refs_of' s) (epptr := ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn)))\ @@ -1993,6 +2027,21 @@ lemma setNotification_ct_idle_or_in_cur_domain'[wp]: crunch gsUntypedZeroRanges[wp]: setNotification "\s. P (gsUntypedZeroRanges s)" (wp: setObject_ksPSpace_only updateObject_default_inv) +lemma sym_heap_sched_pointers_lift: + assumes prevs: "\P. f \\s. P (tcbSchedPrevs_of s)\" + assumes nexts: "\P. f \\s. P (tcbSchedNexts_of s)\" + shows "f \sym_heap_sched_pointers\" + by (rule_tac f=tcbSchedPrevs_of in hoare_lift_Pf2; wpsimp wp: assms) + +crunches setNotification + for tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: updateObject_default_def) + lemma set_ntfn_minor_invs': "\invs' and obj_at' (\ntfn. ntfn_q_refs_of' (ntfnObj ntfn) = ntfn_q_refs_of' (ntfnObj val) \ ntfn_bound_refs' (ntfnBoundTCB ntfn) = ntfn_bound_refs' (ntfnBoundTCB val)) @@ -2002,9 +2051,10 @@ lemma set_ntfn_minor_invs': and (\s. ptr \ ksIdleThread s) \ setNotification ptr val \\rv. invs'\" - apply (clarsimp simp add: invs'_def valid_state'_def cteCaps_of_def) - apply (wp irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift, - simp_all add: o_def) + apply (clarsimp simp: invs'_def valid_state'_def cteCaps_of_def) + apply (wpsimp wp: irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift + sym_heap_sched_pointers_lift valid_bitmaps_lift + simp: o_def) apply (clarsimp elim!: rsubst[where P=sym_refs] intro!: ext dest!: obj_at_state_refs_ofD')+ @@ -2054,21 +2104,21 @@ lemma valid_globals_cte_wpD': lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -2076,7 +2126,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp @@ -2089,21 +2139,17 @@ crunch typ_at'[wp]: doMachineOp "\s. P (typ_at' T p s)" lemmas doMachineOp_typ_ats[wp] = typ_at_lifts [OF doMachineOp_typ_at'] lemma doMachineOp_invs_bits[wp]: - "\valid_pspace'\ doMachineOp m \\rv. valid_pspace'\" - "\\s. sch_act_wf (ksSchedulerAction s) s\ - doMachineOp m \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ doMachineOp m \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ doMachineOp m \\rv. valid_queues'\" - "\\s. P (state_refs_of' s)\ - doMachineOp m - \\rv s. P (state_refs_of' s)\" - "\if_live_then_nonz_cap'\ doMachineOp m \\rv. if_live_then_nonz_cap'\" - "\cur_tcb'\ doMachineOp m \\rv. cur_tcb'\" - "\if_unsafe_then_cap'\ doMachineOp m \\rv. if_unsafe_then_cap'\" + "doMachineOp m \valid_pspace'\" + "doMachineOp m \\s. sch_act_wf (ksSchedulerAction s) s\" + "doMachineOp m \valid_bitmaps\" + "doMachineOp m \valid_sched_pointers\" + "doMachineOp m \\s. P (state_refs_of' s)\" + "doMachineOp m \if_live_then_nonz_cap'\" + "doMachineOp m \cur_tcb'\" + "doMachineOp m \if_unsafe_then_cap'\" by (simp add: doMachineOp_def split_def - valid_pspace'_def valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - | wp cur_tcb_lift sch_act_wf_lift tcb_in_cur_domain'_lift - | fastforce elim: state_refs_of'_pspaceI)+ + | wp + | fastforce elim: state_refs_of'_pspaceI)+ crunch obj_at'[wp]: doMachineOp "\s. P (obj_at' P' p s)" @@ -2132,5 +2178,28 @@ lemma obj_at'_is_canonical: apply (clarsimp simp: obj_at'_def pspace_canonical'_def) by (drule_tac x=t in bspec) clarsimp+ +lemma aligned_distinct_obj_atI': + "\ ksPSpace s x = Some ko; pspace_aligned' s; pspace_distinct' s; ko = injectKO v \ + \ ko_at' v x s" + apply (simp add: obj_at'_def project_inject pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (clarsimp simp: bit_simps objBits_simps' word_bits_def + split: kernel_object.splits arch_kernel_object.splits) + done + +lemma aligned'_distinct'_ko_wp_at'I: + "\ksPSpace s' x = Some ko; P ko; pspace_aligned' s'; pspace_distinct' s'\ + \ ko_wp_at' P x s'" + apply (simp add: ko_wp_at'_def pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (cases ko; force) + done + +lemma aligned'_distinct'_ko_at'I: + "\ksPSpace s' x = Some ko; pspace_aligned' s'; pspace_distinct' s'; + ko = injectKO (v:: 'a :: pspace_storable)\ + \ ko_at' v x s'" + by (fastforce elim: aligned'_distinct'_ko_wp_at'I simp: obj_at'_real_def project_inject) + end end diff --git a/proof/refine/RISCV64/LevityCatch.thy b/proof/refine/RISCV64/LevityCatch.thy index 393ebb2223..29272dce95 100644 --- a/proof/refine/RISCV64/LevityCatch.thy +++ b/proof/refine/RISCV64/LevityCatch.thy @@ -10,6 +10,7 @@ imports "Lib.AddUpdSimps" "Lib.LemmaBucket" "Lib.SimpStrategy" + "Lib.Corres_Method" begin no_notation bind_drop (infixl ">>" 60) diff --git a/proof/refine/RISCV64/PageTableDuplicates.thy b/proof/refine/RISCV64/PageTableDuplicates.thy index a88a8f0b51..6ff7925a4d 100644 --- a/proof/refine/RISCV64/PageTableDuplicates.thy +++ b/proof/refine/RISCV64/PageTableDuplicates.thy @@ -22,12 +22,12 @@ lemma foldr_data_map_insert[simp]: crunch arch_inv[wp]: resetUntypedCap "\s. P (ksArchState s)" (simp: crunch_simps - wp: hoare_drop_imps hoare_unless_wp mapME_x_inv_wp + wp: hoare_drop_imps unless_wp mapME_x_inv_wp preemptionPoint_inv) lemma mapM_x_mapM_valid: "\ P \ mapM_x f xs \\r. Q\ \ \P\mapM f xs \\r. Q\" - apply (simp add:NonDetMonadLemmaBucket.mapM_x_mapM) + apply (simp add: mapM_x_mapM) apply (clarsimp simp:valid_def return_def bind_def) apply (drule spec) apply (erule impE) diff --git a/proof/refine/RISCV64/RAB_FN.thy b/proof/refine/RISCV64/RAB_FN.thy index 376f1658d4..cbe2a89c2f 100644 --- a/proof/refine/RISCV64/RAB_FN.thy +++ b/proof/refine/RISCV64/RAB_FN.thy @@ -92,35 +92,35 @@ proof (induct cap capptr bits rule: resolveAddressBits.induct) apply (subst resolveAddressBits.simps, subst resolveAddressBitsFn.simps) apply (simp only: Let_def haskell_assertE_def K_bind_def) apply (rule monadic_rewrite_name_pre) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule_tac P="(=) s" in monadic_rewrite_trans) (* step 1, apply the induction hypothesis on the lhs *) apply (rule monadic_rewrite_named_if monadic_rewrite_named_bindE - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="returnOk y" for y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="x $ y" for x y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="assertE P" for P s] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="returnOk y" for y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="x $ y" for x y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="assertE P" for P s] TrueI)+ apply (rule_tac g="case nextCap of CNodeCap a b c d \ ?g nextCap cref bitsLeft - | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_imp) + | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_guard_imp) apply (wpc | rule monadic_rewrite_refl "1.hyps" | simp only: capability.case haskell_assertE_def simp_thms)+ apply (clarsimp simp: in_monad locateSlot_conv getSlotCap_def dest!: in_getCTE fst_stateAssertD) apply (fastforce elim: cte_wp_at_weakenE') - apply (rule monadic_rewrite_refl[THEN monadic_rewrite_imp], simp) + apply (rule monadic_rewrite_refl[THEN monadic_rewrite_guard_imp], simp) (* step 2, split and match based on the lhs structure *) apply (simp add: locateSlot_conv liftE_bindE unlessE_def whenE_def if_to_top_of_bindE assertE_def stateAssert_def bind_assoc assert_def if_to_top_of_bind getSlotCap_def split del: if_split cong: if_cong) - apply (rule monadic_rewrite_if_lhs monadic_rewrite_symb_exec_l'[OF get_wp] + apply (rule monadic_rewrite_if_l monadic_rewrite_symb_exec_l'[OF _ get_wp, rotated] empty_fail_get no_fail_get impI monadic_rewrite_refl get_wp | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P isCNodeCap_capUntypedPtr_capCNodePtr cong: if_cong split del: if_split)+ - apply (rule monadic_rewrite_symb_exec_l'[OF getCTE_inv _ _ _ getCTE_cte_wp_at]) + apply (rule monadic_rewrite_symb_exec_l'[OF _ getCTE_inv _ _ getCTE_cte_wp_at, rotated]) apply simp apply (rule impI, rule no_fail_getCTE) apply (simp add: monadic_rewrite_def simpler_gets_def return_def returnOk_def diff --git a/proof/refine/RISCV64/Refine.thy b/proof/refine/RISCV64/Refine.thy index b641b3331e..b0c70b2ce0 100644 --- a/proof/refine/RISCV64/Refine.thy +++ b/proof/refine/RISCV64/Refine.thy @@ -77,7 +77,7 @@ lemma typ_at_UserDataI: apply clarsimp apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def - cte_relation_def other_obj_relation_def + cte_relation_def other_obj_relation_def tcb_relation_cut_def split: Structures_A.kernel_object.split_asm Structures_H.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) @@ -106,7 +106,7 @@ lemma typ_at_DeviceDataI: apply clarsimp apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def - cte_relation_def other_obj_relation_def + cte_relation_def other_obj_relation_def tcb_relation_cut_def split: Structures_A.kernel_object.split_asm Structures_H.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) @@ -274,7 +274,7 @@ lemma kernel_entry_invs: thread_set_ct_running thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext - static_imp_wp + hoare_weak_lift_imp | clarsimp simp add: tcb_cap_cases_def active_from_running)+ done @@ -290,18 +290,18 @@ definition lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_sched_act: "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_invs2: @@ -397,6 +397,23 @@ abbreviation valid_domain_list' :: "'a kernel_state_scheme \ bool" w lemmas valid_domain_list'_def = valid_domain_list_2_def +(* nothing extra needed on this architecture *) +defs fastpathKernelAssertions_def: + "fastpathKernelAssertions \ \s. True" + +lemma fastpathKernelAssertions_cross: + "\ (s,s') \ state_relation; invs s; valid_arch_state' s'\ \ fastpathKernelAssertions s'" + unfolding fastpathKernelAssertions_def + by simp + +(* this is only needed for callKernel, where we have invs' on concrete side *) +lemma corres_cross_over_fastpathKernelAssertions: + "\ \s. P s \ invs s; \s'. Q s' \ invs' s'; + corres r P (Q and fastpathKernelAssertions) f g \ \ + corres r P Q f g" + by (rule corres_cross_over_guard[where Q="Q and fastpathKernelAssertions"]) + (fastforce elim: fastpathKernelAssertions_cross)+ + defs kernelExitAssertions_def: "kernelExitAssertions s \ 0 < ksDomainTime s \ valid_domain_list' s" @@ -419,8 +436,8 @@ lemma kernelEntry_invs': (\s. 0 < ksDomainTime s) and valid_domain_list' \" apply (simp add: kernelEntry_def) apply (wp ckernel_invs callKernel_domain_time_left - threadSet_invs_trivial threadSet_ct_running' select_wp - TcbAcc_R.dmo_invs' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' + TcbAcc_R.dmo_invs' hoare_weak_lift_imp doMachineOp_ct_in_state' doMachineOp_sch_act_simple callKernel_domain_time_left | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' @@ -479,6 +496,10 @@ qed definition "ex_abs G \ \s'. \s. ((s :: (det_ext) state),s') \ state_relation \ G s" +lemma ex_abs_ksReadyQueues_asrt: + "ex_abs P s \ ksReadyQueues_asrt s" + by (fastforce simp: ex_abs_def intro: ksReadyQueues_asrt_cross) + lemma device_update_invs': "\invs'\doMachineOp (device_memory_update ds) \\_. invs'\" @@ -498,7 +519,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and (\s. 0 < ksDomainTime s) and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' doMachineOp_ct_in_state' select_wp + apply (wp device_update_invs' doMachineOp_ct_in_state' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -542,7 +563,7 @@ lemma kernel_corres': apply simp apply (rule handleInterrupt_corres[simplified dc_def]) apply simp - apply (wp hoare_drop_imps hoare_vcg_all_lift)[1] + apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift simp: schact_is_rct_def)[1] apply simp apply (rule_tac Q="\irq s. invs' s \ (\irq'. irq = Some irq' \ @@ -551,7 +572,7 @@ lemma kernel_corres': in hoare_post_imp) apply simp apply (wp doMachineOp_getActiveIRQ_IRQ_active handle_event_valid_sched | simp)+ - apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_post_impErr) + apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_strengthen_postE) apply wpsimp+ apply (simp add: invs'_def valid_state'_def) apply (rule corres_split[OF schedule_corres]) @@ -560,9 +581,9 @@ lemma kernel_corres': schedule_invs' hoare_vcg_if_lift2 hoare_drop_imps |simp)+ apply (rule_tac Q="\_. valid_sched and invs and valid_list" and E="\_. valid_sched and invs and valid_list" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp handle_event_valid_sched hoare_vcg_imp_lift' |simp)+ - apply (clarsimp simp: active_from_running) + apply (clarsimp simp: active_from_running schact_is_rct_def) apply (clarsimp simp: active_from_running') done @@ -574,6 +595,8 @@ lemma kernel_corres: (\s. ksSchedulerAction s = ResumeCurrentThread)) (call_kernel event) (callKernel event)" unfolding callKernel_def K_bind_def + apply (rule corres_cross_over_fastpathKernelAssertions, blast+) + apply (rule corres_stateAssert_r) apply (rule corres_guard_imp) apply (rule corres_add_noop_lhs2) apply (simp only: bind_assoc[symmetric]) @@ -616,7 +639,7 @@ lemma entry_corres: apply (rule corres_split[OF getCurThread_corres]) apply (rule corres_split) apply simp - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp?) apply (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_set_def atcbContextSet_def) apply (clarsimp simp: tcb_cap_cases_def cteSizeBits_def) @@ -633,9 +656,10 @@ lemma entry_corres: apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) apply (wp thread_set_invs_trivial thread_set_ct_running threadSet_invs_trivial threadSet_ct_running' - select_wp thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state + schact_is_rct_def | (wps, wp threadSet_st_tcb_at2) )+ apply (clarsimp simp: invs_def cur_tcb_def valid_state_def valid_pspace_def) apply (clarsimp simp: ct_in_state'_def) @@ -801,7 +825,7 @@ lemma domain_list_rel_eq: by (clarsimp simp: state_relation_def) crunch valid_objs': doUserOp, checkActiveIRQ valid_objs' - (wp: crunch_wps select_wp) + (wp: crunch_wps) lemma ckernel_invariant: "ADT_H uop \ full_invs'" diff --git a/proof/refine/RISCV64/Retype_R.thy b/proof/refine/RISCV64/Retype_R.thy index 264725c77e..f6006974a5 100644 --- a/proof/refine/RISCV64/Retype_R.thy +++ b/proof/refine/RISCV64/Retype_R.thy @@ -57,8 +57,6 @@ lemma objBitsKO_bounded2[simp]: by (simp add: objBits_simps' word_bits_def bit_simps split: Structures_H.kernel_object.split arch_kernel_object.split) -declare select_singleton_is_return[simp] - definition APIType_capBits :: "RISCV64_H.object_type \ nat \ nat" where @@ -295,7 +293,7 @@ lemma state_relation_null_filterE: null_filter (caps_of_state t) = null_filter (caps_of_state s); null_filter' (ctes_of t') = null_filter' (ctes_of s'); pspace_relation (kheap t) (ksPSpace t'); - ekheap_relation (ekheap t) (ksPSpace t'); + ekheap_relation (ekheap t) (ksPSpace t'); ready_queues_relation t t'; ghost_relation (kheap t) (gsUserPages t') (gsCNodes t'); valid_list s; pspace_aligned' s'; pspace_distinct' s'; valid_objs s; valid_mdb s; pspace_aligned' t'; pspace_distinct' t'; @@ -981,7 +979,7 @@ lemma retype_ekheap_relation: apply (intro impI conjI) apply clarsimp apply (drule_tac x=a in bspec,force) - apply (clarsimp simp add: other_obj_relation_def split: if_split_asm) + apply (clarsimp simp add: tcb_relation_cut_def split: if_split_asm) apply (case_tac ko,simp_all) apply (clarsimp simp add: makeObjectKO_def cong: if_cong split: sum.splits Structures_H.kernel_object.splits arch_kernel_object.splits RISCV64_H.object_type.splits @@ -1159,6 +1157,11 @@ lemma ksMachineState_update_gs[simp]: by (simp add: update_gs_def split: aobject_type.splits Structures_A.apiobject_type.splits) +lemma ksReadyQueues_update_gs[simp]: + "ksReadyQueues (update_gs tp us addrs s) = ksReadyQueues s" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + lemma update_gs_ksMachineState_update_swap: "update_gs tp us addrs (ksMachineState_update f s) = ksMachineState_update f (update_gs tp us addrs s)" @@ -1181,6 +1184,144 @@ lemma update_gs_simps[simp]: gsUserPages_update (\ups x. if x \ ptrs then Some RISCVHugePage else ups x)" by (simp_all add: update_gs_def) +lemma retype_ksPSpace_dom_same: + fixes x v + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ksPSpace s' x = Some v \ + foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s') x + = Some v" +proof - + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + assume "ksPSpace s' x = Some v" + thus ?thesis + apply (clarsimp simp:foldr_upd_app_if[folded data_map_insert_def]) + apply (drule domI[where m = "ksPSpace s'"]) + apply (drule(1) IntI) + apply (erule_tac A = "A \ B" for A B in in_emptyE[rotated]) + apply (rule disjoint_subset[OF new_cap_addrs_subset[OF cover']]) + apply (clarsimp simp:ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + done +qed + +lemma retype_ksPSpace_None: + assumes ad: "pspace_aligned' s" "pspace_distinct' s" "pspace_bounded' s" + assumes pn: "pspace_no_overlap' ptr sz s" + assumes cover: "range_cover ptr sz (objBitsKO val + gbits) n" + shows "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" +proof - + note cover' = range_cover_rel[where sbit' = "objBitsKO val",OF cover _ refl,simplified] + show "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" + apply (drule subsetD[OF new_cap_addrs_subset [OF cover' ]]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (fastforce simp: ptr_add_def p_assoc_help) + done +qed + +lemma retype_tcbSchedPrevs_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedPrevs_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedPrevs_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_tcbSchedNexts_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedNexts_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedNexts_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_inQ: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "\d p. + inQ d p |< tcbs_of' + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = inQ d p |< tcbs_of' s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (intro allI) + apply (rule ext) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + fastforce simp add: makeObjectKO_def makeObject_tcb + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm + | fastforce)+ +qed + +lemma retype_ready_queues_relation: + assumes rlqr: "ready_queues_relation s s'" + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ready_queues_relation + (s \kheap := foldr (\p. data_map_insert p (default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)\) + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\)" + using rlqr + unfolding ready_queues_relation_def Let_def + by (clarsimp simp: retype_tcbSchedNexts_of[OF vs' pn' ko cover num_r, simplified] + retype_tcbSchedPrevs_of[OF vs' pn' ko cover num_r, simplified] + retype_inQ[OF vs' pn' ko cover num_r, simplified]) + lemma retype_state_relation: notes data_map_insert_def[simp del] assumes sr: "(s, s') \ state_relation" @@ -1209,7 +1350,7 @@ lemma retype_state_relation: \ state_relation" (is "(ekheap_update (\_. ?eps) s\kheap := ?ps\, update_gs _ _ _ (s'\ksPSpace := ?ps'\)) \ state_relation") - proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) + proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) have cover':"range_cover ptr sz (objBitsKO ko) m" by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) @@ -1398,6 +1539,16 @@ lemma retype_state_relation: else cns x" in exI, simp) apply (rule_tac x=id in exI, simp)+ done + + have rdyqrel: "ready_queues_relation s s'" + using sr by (simp add: state_relation_def) + + thus "ready_queues_relation_2 (ready_queues s) (ksReadyQueues s') + (?ps' |> tcb_of' |> tcbSchedNext) (?ps' |> tcb_of' |> tcbSchedPrev) + (\d p. inQ d p |< (?ps' |> tcb_of'))" + using retype_ready_queues_relation[OF _ vs' pn' ko cover num_r] + by (clarsimp simp: ready_queues_relation_def Let_def) + qed lemma new_cap_addrs_fold': @@ -1508,7 +1659,7 @@ lemma retype_region_ext_modify_kheap_futz: apply (simp add: modify_def[symmetric]) done -lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=NonDetMonad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] +lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] lemma foldr_upd_app_if_eta_futz: "foldr (\p ps. ps(p \ f p)) as = (\g x. if x \ set as then Some (f x) else g x)" @@ -2371,7 +2522,6 @@ qed lemma other_objs_default_relation: "\ case ty of Structures_A.EndpointObject \ ko = injectKO (makeObject :: endpoint) | Structures_A.NotificationObject \ ko = injectKO (makeObject :: Structures_H.notification) - | Structures_A.TCBObject \ ko = injectKO (makeObject :: tcb) | _ \ False \ \ obj_relation_retype (default_object ty dev n) ko" apply (rule obj_relation_retype_other_obj) @@ -2392,6 +2542,13 @@ lemma other_objs_default_relation: split: Structures_A.apiobject_type.split_asm) done +lemma tcb_relation_retype: + "obj_relation_retype (default_object Structures_A.TCBObject dev n) (KOTCB makeObject)" + by (clarsimp simp: default_object_def obj_relation_retype_def tcb_relation_def default_tcb_def + makeObject_tcb makeObject_cte new_context_def newContext_def + fault_rel_optionation_def initContext_def default_arch_tcb_def newArchTCB_def + arch_tcb_relation_def objBits_simps' tcb_relation_cut_def) + lemma captable_relation_retype: "n < word_bits \ obj_relation_retype (default_object Structures_A.CapTableObject dev n) (KOCTE makeObject)" @@ -2489,7 +2646,6 @@ lemmas object_splits = declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -3109,10 +3265,10 @@ proof (intro conjI impI) apply (rule_tac ptr="x + xa" in cte_wp_at_tcbI', assumption+) apply fastforce apply simp - apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound user_context) - apply (case_tac thread_state, simp_all add: valid_tcb_state'_def - valid_bound_ntfn'_def obj_at_disj' - split: option.splits)[2] + apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound tcbprev tcbnext user_context) + apply (case_tac thread_state, simp_all add: valid_tcb_state'_def valid_bound_tcb'_def + valid_bound_ntfn'_def obj_at_disj' opt_tcb_at'_def + split: option.splits)[4] apply (simp add: valid_cte'_def) apply (frule pspace_alignedD' [OF _ ad(1)]) apply (frule pspace_distinctD' [OF _ ad(2)]) @@ -3376,11 +3532,11 @@ lemma createObjects_orig_cte_wp_at2': \ (case_option False (P' \ getF) (projectKO_opt val))) \ pspace_no_overlap' ptr sz s\ createObjects' ptr n val gbits \\r s. P (cte_wp_at' P' p s)\" + including classic_wp_pre apply (simp add: cte_wp_at'_obj_at') apply (rule handy_prop_divs) apply (wp createObjects_orig_obj_at2'[where sz = sz], simp) apply (simp add: tcb_cte_cases_def cteSizeBits_def) - including no_pre apply (wp handy_prop_divs createObjects_orig_obj_at2'[where sz = sz] | simp add: o_def cong: option.case_cong)+ done @@ -3401,7 +3557,7 @@ lemma createNewCaps_cte_wp_at2: \ pspace_no_overlap' ptr sz s\ createNewCaps ty ptr n objsz dev \\rv s. P (cte_wp_at' P' p s)\" - including no_pre + including classic_wp_pre apply (simp add: createNewCaps_def createObjects_def RISCV64_H.toAPIType_def split del: if_split) apply (case_tac ty; simp add: createNewCaps_def createObjects_def Arch_createNewCaps_def @@ -3820,16 +3976,6 @@ lemma sch_act_wf_lift_asm: apply auto done -lemma valid_queues_lift_asm': - assumes tat: "\d p t. \\s. \ obj_at' (inQ d p) t s \ Q d p s\ f \\_ s. \ obj_at' (inQ d p) t s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\\s. valid_queues' s \ (\d p. Q d p s)\ f \\_. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - tat prq) - apply simp - done - lemma createObjects'_ct[wp]: "\\s. P (ksCurThread s)\ createObjects' p n v us \\rv s. P (ksCurThread s)\" by (rule createObjects_pspace_only, simp) @@ -4058,7 +4204,7 @@ lemma createNewCaps_idle'[wp]: apply (rename_tac apiobject_type) apply (case_tac apiobject_type, simp_all split del: if_split)[1] apply (wp, simp) - including no_pre + including classic_wp_pre apply (wp mapM_x_wp' createObjects_idle' threadSet_idle' @@ -4161,7 +4307,7 @@ lemma createNewCaps_irq_handlers': lemma createObjects'_irq_states' [wp]: "\valid_irq_states'\ createObjects' a b c d \\_. valid_irq_states'\" apply (simp add: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc|simp add: alignError_def)+ + apply (wp unless_wp|wpc|simp add: alignError_def)+ apply fastforce done @@ -4171,35 +4317,150 @@ crunch irq_states' [wp]: createNewCaps valid_irq_states' crunch ksMachine[wp]: createObjects "\s. P (ksMachineState s)" (simp: crunch_simps unless_def) -lemma createNewCaps_valid_queues': - "\valid_queues' and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues'\" - apply (wp valid_queues_lift_asm' [OF createNewCaps_obj_at2]) - apply (clarsimp) - apply (simp add: makeObjectKO_def - split: object_type.split_asm - apiobject_type.split_asm) - apply (clarsimp simp: inQ_def) - apply (auto simp: makeObject_tcb - split: object_type.splits apiobject_type.splits) - done - -lemma createNewCaps_valid_queues: - "\valid_queues and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues\" - apply (rule hoare_gen_asm) - apply (wp valid_queues_lift_asm createNewCaps_obj_at2[where sz=sz]) - apply (clarsimp simp: projectKO_opts_defs) - apply (simp add: inQ_def) - apply (wp createNewCaps_pred_tcb_at'[where sz=sz] | simp)+ +lemma createObjects_valid_bitmaps: + "createObjects' ptr n val gbits \valid_bitmaps\" + apply (clarsimp simp: createObjects'_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def + bitmapQ_no_L1_orphans_def) done +lemma valid_bitmaps_gsCNodes_update[simp]: + "valid_bitmaps (gsCNodes_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_gsUserPages_update[simp]: + "valid_bitmaps (gsUserPages_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +crunches curDomain + for valid_bitmaps[wp]: valid_bitmaps + and sched_pointers[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + +lemma createNewCaps_valid_bitmaps: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_bitmaps s\ + createNewCaps ty ptr n us dev + \\_. valid_bitmaps\" + unfolding createNewCaps_def + apply (clarsimp simp: RISCV64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_bitmaps) + by (wpsimp wp: createObjects_valid_bitmaps[simplified o_def] mapM_x_wp + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ + +lemma createObjects_sched_queues: + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True) + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + (is "\ \s. _ \ _ \ ?Pre s \ _ \\_. _\") +proof (rule hoare_grab_asm)+ + assume not_0: "\ n = 0" + and cover: "range_cover ptr sz ((objBitsKO val) + gbits) n" + then show + "\\s. ?Pre s\ createObjects' ptr n val gbits \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + proof - + have shiftr_not_zero:" 1 \ ((of_nat n)::machine_word) << gbits" + using range_cover_not_zero_shift[OF not_0 cover,where gbits = gbits] + by (simp add:word_le_sub1) + show ?thesis + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: shiftL_nat data_map_insert_def[symmetric] + new_cap_addrs_fold'[OF shiftr_not_zero] + simp del: data_map_insert_def) + using range_cover.unat_of_nat_n_shift[OF cover, where gbits=gbits, simplified] + apply (clarsimp simp: foldr_upd_app_if) + apply (rule_tac a="tcbSchedNexts_of s" and b="tcbSchedPrevs_of s" + in rsubst2[rotated, OF sym sym, where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply simp + done + qed +qed + +lemma createNewCaps_sched_queues: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + assumes not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + createNewCaps ty ptr n us dev + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + unfolding createNewCaps_def + apply (clarsimp simp: RISCV64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (insert cover not_0) + apply (wpsimp wp: mapM_x_wp' createObjects_sched_queues + simp: curDomain_def) + by (wpsimp wp: mapM_x_wp' createObjects_sched_queues[simplified o_def] + threadSet_sched_pointers + | simp add: objBitsKO_def APIType_capBits_def valid_pspace'_def makeObject_tcb + objBits_def pageBits_def archObjSize_def createObjects_def + pt_bits_def pte_bits_def word_size_bits_def table_size_def + ptTranslationBits_def + | intro conjI impI)+ + +lemma createObjects_valid_sched_pointers: + "\\s. valid_sched_pointers s + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True)\ + createObjects' ptr n val gbits + \\_. valid_sched_pointers\" + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_sched_pointers_def foldr_upd_app_if opt_pred_def opt_map_def comp_def) + apply (cases "tcb_of' val"; clarsimp) + done + +lemma createNewCaps_valid_sched_pointers: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_sched_pointers s\ + createNewCaps ty ptr n us dev + \\_. valid_sched_pointers\" + unfolding createNewCaps_def + apply (clarsimp simp: RISCV64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_sched_pointers) + by (wpsimp wp: createObjects_valid_sched_pointers[simplified o_def] mapM_x_wp + threadSet_valid_sched_pointers + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ + lemma mapM_x_threadSet_valid_pspace: "\valid_pspace' and K (curdom \ maxDomain)\ mapM_x (threadSet (tcbDomain_update (\_. curdom))) addrs \\y. valid_pspace'\" @@ -4353,7 +4614,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def st_tcb_at'_def) done qed @@ -4434,7 +4695,7 @@ lemma createObjects_null_filter': createObjects' ptr n val gbits \\addrs a. P (null_filter' (ctes_of a))\" apply (clarsimp simp: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc + apply (wp unless_wp|wpc | clarsimp simp: alignError_def split del: if_split simp del:fun_upd_apply)+ apply (subst new_cap_addrs_fold') apply (simp add:unat_1_0 unat_gt_0) @@ -4581,12 +4842,13 @@ proof (rule hoare_gen_asm, elim conjE) createNewCaps_valid_arch_state valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createNewCaps_obj_at'] createNewCaps_irq_handlers' createNewCaps_vms - createNewCaps_valid_queues - createNewCaps_valid_queues' createNewCaps_pred_tcb_at' cnc_ct_not_inQ createNewCaps_ct_idle_or_in_cur_domain' createNewCaps_sch_act_wf createNewCaps_urz[where sz=sz] + createNewCaps_sched_queues[OF cover not_0] + createNewCaps_valid_sched_pointers + createNewCaps_valid_bitmaps | simp)+ using not_0 apply (clarsimp simp: valid_pspace'_def) @@ -4659,35 +4921,6 @@ lemma createObjects_sch: apply (wp sch_act_wf_lift_asm createObjects_pred_tcb_at' createObjects_orig_obj_at3 | force)+ done -lemma createObjects_queues: - "\\s. valid_queues s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues\" - apply (wp valid_queues_lift_asm [unfolded pred_conj_def, OF createObjects_orig_obj_at3] - createObjects_pred_tcb_at' [unfolded pred_conj_def]) - apply fastforce - apply wp+ - apply fastforce - done - -lemma createObjects_queues': - assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" - shows - "\\s. valid_queues' s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues'\" - apply (simp add: createObjects_def) - apply (wp valid_queues_lift_asm') - apply (wp createObjects_orig_obj_at2') - apply clarsimp - apply assumption - apply wp - using no_tcb - apply fastforce - done - lemma createObjects_no_cte_ifunsafe': assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" @@ -4933,36 +5166,46 @@ proof - apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def,wp createObjects_valid_pspace_untyped') apply (wp assms | simp add: objBits_def)+ - apply (wp createObjects_sch createObjects_queues) + apply (wp createObjects_sch) apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_state_refs_of'') apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_iflive') - apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift - createObjects_idle' createObjects_no_cte_valid_global - createObjects_valid_arch createObjects_irq_state - createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] - assms | simp add: objBits_def )+ + apply (wp createObjects_no_cte_ifunsafe' assms) apply (rule hoare_vcg_conj_lift) apply (simp add: createObjects_def) apply (wp createObjects_idle') + apply (wp irqs_masked_lift createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers assms + | simp)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_sched_queues) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_sched_pointers) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_bitmaps) apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift createObjects_idle' createObjects_no_cte_valid_global createObjects_valid_arch createObjects_irq_state createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] assms + assms createObjects_pspace_domain_valid co_ct_not_inQ createObjects_ct_idle_or_in_cur_domain' createObjects_untyped_ranges_zero'[OF moKO] + createObjects_sched_queues | simp)+ apply clarsimp using no_cte no_tcb apply ((intro conjI; assumption?); simp add: valid_pspace'_def objBits_def) apply (fastforce simp add: split_def split: option.splits) - apply (clarsimp simp: invs'_def no_tcb valid_state'_def no_cte split: option.splits) + apply (auto simp: invs'_def no_tcb valid_state'_def no_cte + split: option.splits kernel_object.splits) done qed @@ -4999,7 +5242,7 @@ lemma gcd_corres: "corres (=) \ \ (gets cur_domain) curDomain" lemma retype_region2_extra_ext_mapM_x_corres: shows "corres dc (valid_etcbs and (\s. \addr\set addrs. tcb_at addr s)) - (\s. \addr\set addrs. tcb_at' addr s) + (\s. \addr\set addrs. obj_at' (Not \ tcbQueued) addr s) (retype_region2_extra_ext addrs Structures_A.apiobject_type.TCBObject) (mapM_x (\addr. do cdom \ curDomain; threadSet (tcbDomain_update (\_. cdom)) addr @@ -5010,7 +5253,7 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (rule corres_split_eqr[OF gcd_corres]) apply (rule_tac S="Id \ {(x, y). x \ set addrs}" and P="\s. (\t \ set addrs. tcb_at t s) \ valid_etcbs s" - and P'="\s. \t \ set addrs. tcb_at' t s" + and P'="\s. \t \ set addrs. obj_at' (Not \ tcbQueued) t s" in corres_mapM_x) apply simp apply (rule corres_guard_imp) @@ -5018,8 +5261,10 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (case_tac tcb') apply simp apply fastforce - apply fastforce + apply (fastforce simp: obj_at'_def) apply (wp hoare_vcg_ball_lift | simp)+ + apply (clarsimp simp: obj_at'_def) + apply fastforce apply auto[1] apply (wp | simp add: curDomain_def)+ done @@ -5052,10 +5297,11 @@ lemma retype_region2_obj_at: apply (auto simp: obj_at_def default_object_def is_tcb_def) done -lemma createObjects_tcb_at': +lemma createObjects_Not_tcbQueued: "\range_cover ptr sz (objBitsKO (injectKOS (makeObject::tcb))) n; n \ 0\ \ \\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ - createObjects ptr n (KOTCB makeObject) 0 \\ptrs s. \addr\set ptrs. tcb_at' addr s\" + createObjects ptr n (KOTCB makeObject) 0 + \\ptrs s. \addr\set ptrs. obj_at' (Not \ tcbQueued) addr s\" apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where val = "(makeObject :: tcb)"]]) apply (auto simp: obj_at'_def project_inject objBitsKO_def objBits_def makeObject_tcb) done @@ -5120,8 +5366,9 @@ lemma corres_retype_region_createNewCaps: apply (rule corres_retype[where 'a = tcb], simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def makeObjectKO_def - other_objs_default_relation)[1] + tcb_relation_retype)[1] apply (fastforce simp: range_cover_def) + apply (simp add: tcb_relation_retype) apply (rule corres_split_nor) apply (simp add: APIType_map2_def) apply (rule retype_region2_extra_ext_mapM_x_corres) @@ -5131,7 +5378,7 @@ lemma corres_retype_region_createNewCaps: apply wp apply wp apply ((wp retype_region2_obj_at | simp add: APIType_map2_def)+)[1] - apply ((wp createObjects_tcb_at'[where sz=sz] + apply ((wp createObjects_Not_tcbQueued[where sz=sz] | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] apply simp apply simp @@ -5164,7 +5411,7 @@ lemma corres_retype_region_createNewCaps: \ \CapTable\ apply (subst retype_region2_extra_ext_trivial) apply (simp add: APIType_map2_def) - apply (subst bind_assoc_reverse[of "createObjects y n (KOCTE makeObject) us"]) + apply (subst bind_assoc_return_reverse[of "createObjects y n (KOCTE makeObject) us"]) apply (subst liftM_def[of "map (\addr. capability.CNodeCap addr us 0 0)", symmetric]) apply simp apply (rule corres_rel_imp) diff --git a/proof/refine/RISCV64/Schedule_R.thy b/proof/refine/RISCV64/Schedule_R.thy index 15e0ce5486..fa8296faea 100644 --- a/proof/refine/RISCV64/Schedule_R.thy +++ b/proof/refine/RISCV64/Schedule_R.thy @@ -10,16 +10,11 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] -lemma invs_no_cicd'_queues: - "invs_no_cicd' s \ valid_queues s" - unfolding invs_no_cicd'_def - by simp - lemma corres_if2: "\ G = G'; G \ corres r P P' a c; \ G' \ corres r Q Q' b d \ \ corres r (if G then P else Q) (if G' then P' else Q') (if G then a else b) (if G' then c else d)" @@ -41,7 +36,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -89,275 +84,259 @@ lemma schedule_choose_new_thread_sched_act_rct[wp]: unfolding schedule_choose_new_thread_def by wp +\ \This proof shares many similarities with the proof of @{thm tcbSchedEnqueue_corres}\ lemma tcbSchedAppend_corres: - notes trans_state_update'[symmetric, simp del] - shows - "corres dc (is_etcb_at t and tcb_at t and pspace_aligned and pspace_distinct) - (Invariants_H.valid_queues and valid_queues') - (tcb_sched_action (tcb_sched_append) t) (tcbSchedAppend t)" - apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) - apply (fastforce simp: tcb_at_cross state_relation_def) - apply (simp only: tcbSchedAppend_def tcb_sched_action_def) - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply wp+ - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def) - - apply (subgoal_tac "tcb_sched_append t (ready_queues a (tcb_domain y) (tcb_priority y)) - = (ready_queues a (tcb_domain y) (tcb_priority y))") - apply (simp add: state_relation_def ready_queues_relation_def) - apply (clarsimp simp: tcb_sched_append_def state_relation_def - valid_queues'_def ready_queues_relation_def - ekheap_relation_def etcb_relation_def - obj_at'_def inQ_def project_inject) - apply (drule_tac x=t in bspec,clarsimp) + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_append tcb_ptr) (tcbSchedAppend tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_append_def get_tcb_queue_def + tcbSchedAppend_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce apply clarsimp - apply (clarsimp simp: unless_def when_def cong: if_cong) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_append_def) - apply (intro conjI impI) - apply (rule corres_guard_imp) - apply (rule setQueue_corres) - prefer 3 - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply simp - apply simp - apply (rule corres_split_noop_rhs2) - apply (rule addToBitmap_if_null_noop_corres) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply wp+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - project_inject) - done + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) -crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue - for valid_pspace'[wp]: valid_pspace' - and valid_arch_state'[wp]: valid_arch_state' - (simp: unless_def) + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueueAppend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: obj_at'_def) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp simp: setQueue_def tcbQueueAppend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply fast + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; clarsimp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply clarsimp + apply (drule_tac x="the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))" + in spec) + subgoal by (auto simp: in_opt_pred opt_map_red) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply split: if_splits) + apply (case_tac "t = the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (intro conjI; clarsimp) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply opt_map_def obj_at'_def + queue_end_valid_def prev_queue_head_def + split: if_splits option.splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_append[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def opt_map_def split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def fun_upd_apply queue_end_valid_def split: if_splits) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply split: if_splits) + by (clarsimp simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def split: if_splits) + +lemma tcbQueueAppend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + apply (clarsimp simp: tcbQueueEmpty_def valid_bound_tcb'_def split: option.splits) + done + +lemma tcbSchedAppend_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_objs'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: threadSet_valid_objs' threadGet_wp hoare_vcg_all_lift) + apply (normalise_obj_at', rename_tac tcb "end") + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: tcbQueueEmpty_def obj_at'_def) + done crunches tcbSchedAppend, tcbSchedDequeue for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) -lemma removeFromBitmap_valid_queues_no_bitmap_except[wp]: - "\ valid_queues_no_bitmap_except t \ - removeFromBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding bitmapQ_defs valid_queues_no_bitmap_except_def - by (wp| clarsimp simp: bitmap_fun_defs)+ - -lemma removeFromBitmap_bitmapQ: - "\ \s. True \ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" - unfolding bitmapQ_defs bitmap_fun_defs - by (wp| clarsimp simp: bitmap_fun_defs)+ - -lemma removeFromBitmap_valid_bitmapQ[wp]: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \ bitmapQ d p s \ ksReadyQueues s (d,p) = []) \" - by (rule hoare_pre) - (wp removeFromBitmap_valid_queues_no_bitmap_except removeFromBitmap_valid_bitmapQ_except - removeFromBitmap_bitmapQ, simp) - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed - -(* this should be the actual weakest precondition to establish valid_queues - under tagging a thread as not queued *) -lemma threadSet_valid_queues_dequeue_wp: - "\ valid_queues_no_bitmap_except t and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \d p. t \ set (ksReadyQueues s (d,p))) \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues \" - unfolding threadSet_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def valid_queues_no_bitmap_def) - done - (* FIXME move *) lemmas obj_at'_conjI = obj_at_conj' -lemma setQueue_valid_queues_no_bitmap_except_dequeue_wp: - "\d p ts t. - \ \s. valid_queues_no_bitmap_except t s \ - (\t' \ set ts. obj_at' (inQ d p and runnable' \ tcbState) t' s) \ - t \ set ts \ distinct ts \ p \ maxPriority \ d \ maxDomain \ - setQueue d p ts - \\rv. valid_queues_no_bitmap_except t \" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by wp force - -definition (* if t is in a queue, it should be tagged with right priority and domain *) - "correct_queue t s \ \d p. t \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s)" - -lemma valid_queues_no_bitmap_correct_queueI[intro]: - "valid_queues_no_bitmap s \ correct_queue t s" - unfolding correct_queue_def valid_queues_no_bitmap_def - by (fastforce simp: obj_at'_def inQ_def) - - -lemma tcbSchedDequeue_valid_queues_weak: - "\ valid_queues_no_bitmap_except t and valid_bitmapQ and - bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - correct_queue t and - obj_at' (\tcb. tcbDomain tcb \ maxDomain \ tcbPriority tcb \ maxPriority) t \ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" -proof - - show ?thesis - unfolding tcbSchedDequeue_def null_def valid_queues_def - apply wp (* stops on threadSet *) - apply (rule hoare_post_eq[OF _ threadSet_valid_queues_dequeue_wp], - simp add: valid_queues_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ - apply (wp hoare_vcg_imp_lift setQueue_valid_queues_no_bitmap_except_dequeue_wp - setQueue_valid_bitmapQ threadGet_const_tcb_at)+ - (* wp done *) - apply (normalise_obj_at') - apply (clarsimp simp: correct_queue_def) - apply (normalise_obj_at') - apply (fastforce simp add: valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def elim: obj_at'_weaken)+ - done -qed - -lemma tcbSchedDequeue_valid_queues: - "\Invariants_H.valid_queues - and obj_at' (\tcb. tcbDomain tcb \ maxDomain) t - and obj_at' (\tcb. tcbPriority tcb \ maxPriority) t\ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" - apply (rule hoare_pre, rule tcbSchedDequeue_valid_queues_weak) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def) - done - -lemma tcbSchedAppend_valid_queues'[wp]: - (* most of this is identical to tcbSchedEnqueue_valid_queues' in TcbAcc_R *) - "\valid_queues' and tcb_at' t\ tcbSchedAppend t \\_. valid_queues'\" - apply (simp add: tcbSchedAppend_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp - apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -lemma threadSet_valid_queues'_dequeue: (* threadSet_valid_queues' is too weak for dequeue *) - "\\s. (\d p t'. obj_at' (inQ d p) t' s \ t' \ t \ t' \ set (ksReadyQueues s (d, p))) \ - obj_at' (inQ d p) t s \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues' \" - unfolding valid_queues'_def - apply (rule hoare_pre) - apply (wp hoare_vcg_all_lift) - apply (simp only: imp_conv_disj not_obj_at') - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (simp add: not_obj_at') - apply (clarsimp simp: typ_at_tcb') - apply normalise_obj_at' - apply (fastforce elim: obj_at'_weaken simp: inQ_def) - done - -lemma setQueue_ksReadyQueues_lift: - "\ \s. P (s\ksReadyQueues := (ksReadyQueues s)((d, p) := ts)\) ts \ - setQueue d p ts - \ \_ s. P s (ksReadyQueues s (d,p))\" - unfolding setQueue_def - by (wp, clarsimp simp: fun_upd_def cong: if_cong) - -lemma tcbSchedDequeue_valid_queues'[wp]: - "\valid_queues' and tcb_at' t\ - tcbSchedDequeue t \\_. valid_queues'\" - unfolding tcbSchedDequeue_def - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - prefer 2 - apply (wp threadGet_const_tcb_at) - apply (fastforce simp: obj_at'_def) - apply clarsimp - apply (rename_tac queued) - apply (case_tac queued, simp_all) - apply wp - apply (rule_tac d=tdom and p=prio in threadSet_valid_queues'_dequeue) - apply (rule hoare_pre_post, assumption) - apply (wp | clarsimp simp: bitmap_fun_defs)+ - apply (wp hoare_vcg_all_lift setQueue_ksReadyQueues_lift) - apply clarsimp - apply (wp threadGet_obj_at' threadGet_const_tcb_at)+ - apply clarsimp - apply (rule context_conjI, clarsimp simp: obj_at'_def) - apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def|wp)+ - done +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for tcb_at'[wp]: "tcb_at' t" + and cap_to'[wp]: "ex_nonz_cap_to' p" + and ifunsafe'[wp]: if_unsafe_then_cap' + (wp: crunch_wps simp: crunch_simps) lemma tcbSchedAppend_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedAppend tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedAppend_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_if_live_then_nonz_cap' threadGet_wp simp: bitmap_fun_defs) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def st_tcb_at'_def obj_at'_def runnable_eq_active') + apply (clarsimp simp: tcbQueueEmpty_def) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ko_wp_at'_def inQ_def obj_at'_def tcbQueueEmpty_def) done lemma tcbSchedDequeue_iflive'[wp]: - "\if_live_then_nonz_cap'\ tcbSchedDequeue tcb \\_. if_live_then_nonz_cap'\" + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. if_live_then_nonz_cap'\" apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply ((wp | clarsimp simp: bitmap_fun_defs)+)[1] (* deal with removeFromBitmap *) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp, fastforce) - apply (wp | simp add: crunch_simps)+ + apply (wpsimp wp: tcbQueueRemove_if_live_then_nonz_cap' threadGet_wp) + apply (fastforce elim: if_live_then_nonz_capE' simp: obj_at'_def ko_wp_at'_def) done crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue @@ -399,23 +378,91 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) including no_pre - apply (wp static_imp_wp hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) apply simp+ done +lemma threadSet_mdb': + "\valid_mdb' and obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF (f t)) t\ + threadSet f t + \\rv. valid_mdb'\" + apply (wpsimp wp: setObject_tcb_mdb' getTCB_wp simp: threadSet_def obj_at'_def) + apply fastforce + done + +lemma tcbSchedNext_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedNext_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedPrev_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueueRemove_valid_mdb': + "\\s. valid_mdb' s \ valid_objs' s\ tcbQueueRemove q tcbPtr \\_. valid_mdb'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def obj_at'_def) + done + +lemma tcbQueuePrepend_valid_mdb': + "\valid_mdb' and tcb_at' tcbPtr + and (\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_valid_mdb': + "\\s. valid_mdb' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueEnd queue)) s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueued_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbQueued_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma valid_mdb'_ksReadyQueuesL1Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL1Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma valid_mdb'_ksReadyQueuesL2Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL2Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma tcbSchedEnqueue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedEnqueue_def setQueue_def) + apply (wpsimp wp: tcbQueuePrepend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply normalise_obj_at' + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +crunches tcbSchedEnqueue + for cur_tcb'[wp]: cur_tcb' + (wp: threadSet_cur) + lemma tcbSchedEnqueue_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedEnqueue t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedEnqueue_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedEnqueue_ct_not_inQ + simp: cteCaps_of_def o_def) done crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" @@ -424,7 +471,7 @@ crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" lemma tcbSchedAppend_vms'[wp]: "\valid_machine_state'\ tcbSchedAppend t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedAppend_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done crunch pspace_domain_valid[wp]: tcbSchedAppend "pspace_domain_valid" @@ -439,21 +486,27 @@ crunch ksIdleThread[wp]: tcbSchedAppend "\s. P (ksIdleThread s)" crunch ksDomSchedule[wp]: tcbSchedAppend "\s. P (ksDomSchedule s)" (simp: unless_def) +lemma tcbQueueAppend_tcbPriority_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueueAppend_tcbDomain_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + lemma tcbSchedAppend_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedAppend t \\_. tcb_in_cur_domain' t' \" @@ -472,28 +525,59 @@ crunches tcbSchedDequeue, tcbSchedAppend for arch'[wp]: "\s. P (ksArchState s)" lemma tcbSchedAppend_sch_act_wf[wp]: - "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedAppend thread - \\rv s. sch_act_wf (ksSchedulerAction s) s\" - apply (simp add:tcbSchedAppend_def bitmap_fun_defs) - apply (wp hoare_unless_wp setQueue_sch_act threadGet_wp|simp)+ - apply (fastforce simp:typ_at'_def obj_at'_def) + "tcbSchedAppend thread \\s. sch_act_wf (ksSchedulerAction s) s\" + by (wpsimp wp: sch_act_wf_lift) + +lemma tcbSchedAppend_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedAppend tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedAppend_def + apply (wpsimp simp: tcbQueueAppend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp hoare_vcg_if_lift2) + apply (clarsimp simp: ksReadyQueues_asrt_def split: if_splits) + apply normalise_obj_at' + apply (force dest: tcbQueueHead_iff_tcbQueueEnd + simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def) + done + +lemma tcbSchedAppend_valid_mdb'[wp]: + "\valid_mdb' and valid_tcbs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: tcbQueueAppend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply (fastforce dest: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +lemma tcbSchedAppend_valid_bitmaps[wp]: + "tcbSchedAppend tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) done lemma tcbSchedAppend_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedAppend t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedAppend_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority - split: thread_state.split_asm - simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma tcbSchedAppend_all_invs_but_ct_not_inQ': + "\invs'\ + tcbSchedAppend t + \\_. all_invs_but_ct_not_inQ'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) done lemma tcbSchedEnqueue_invs'_not_ResumeCurrentThread: @@ -522,7 +606,7 @@ crunch ksMachine[wp]: tcbSchedDequeue "\s. P (ksMachineState s)" lemma tcbSchedDequeue_vms'[wp]: "\valid_machine_state'\ tcbSchedDequeue t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedDequeue_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done crunch pspace_domain_valid[wp]: tcbSchedDequeue "pspace_domain_valid" @@ -540,46 +624,89 @@ lemma tcbSchedDequeue_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedDequeue t \\_. tcb_in_cur_domain' t' \" apply (rule tcb_in_cur_domain'_lift) apply wp - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ - done - -lemma tcbSchedDequeue_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ - done - -lemma tcbSchedDequeue_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ + apply (clarsimp simp: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: hoare_when_weak_wp getObject_tcb_wp threadGet_wp) done crunch ksDomScheduleIdx[wp]: tcbSchedDequeue "\s. P (ksDomScheduleIdx s)" (simp: unless_def) +lemma tcbSchedDequeue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs'\ tcbSchedDequeue tcbPtr \\_. valid_mdb'\" + unfolding tcbSchedDequeue_def + apply (wpsimp simp: bitmap_fun_defs setQueue_def wp: threadSet_mdb' tcbQueueRemove_valid_mdb') + apply (rule_tac Q="\_. tcb_at' tcbPtr" in hoare_post_imp) + apply (fastforce simp: tcb_cte_cases_def cteSizeBits_def) + apply (wpsimp wp: threadGet_wp)+ + apply (fastforce simp: obj_at'_def) + done + lemma tcbSchedDequeue_invs'[wp]: - "\invs' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs'\" - unfolding invs'_def valid_state'_def - apply (rule hoare_pre) - apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def)+ - apply (fastforce elim: valid_objs'_maxDomain valid_objs'_maxPriority simp: valid_pspace'_def)+ + "tcbSchedDequeue t \invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma ready_qs_runnable_cross: + "\(s, s') \ state_relation; pspace_aligned s; pspace_distinct s; valid_queues s\ + \ ready_qs_runnable s'" + apply (clarsimp simp: ready_qs_runnable_def) + apply normalise_obj_at' + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply clarsimp + apply (drule_tac x=t in bspec) + apply (fastforce simp: inQ_def in_opt_pred obj_at'_def opt_map_red) + apply (fastforce dest: st_tcb_at_runnable_cross simp: obj_at'_def st_tcb_at'_def) + done + +method add_ready_qs_runnable = + rule_tac Q'=ready_qs_runnable in corres_cross_add_guard, + (clarsimp simp: pred_conj_def)?, + (frule valid_sched_valid_queues)?, (frule invs_psp_aligned)?, (frule invs_distinct)?, + fastforce dest: ready_qs_runnable_cross + +defs idleThreadNotQueued_def: + "idleThreadNotQueued s \ obj_at' (Not \ tcbQueued) (ksIdleThread s) s" + +lemma idle_thread_not_queued: + "\valid_idle s; valid_queues s; valid_etcbs s\ + \ \ (\d p. idle_thread s \ set (ready_queues s d p))" + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (drule_tac x="idle_thread s" in bspec) + apply fastforce + apply (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def valid_etcbs_def) done +lemma valid_idle_tcb_at: + "valid_idle s \ tcb_at (idle_thread s) s" + by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def is_tcb_def) + lemma setCurThread_corres: - "corres dc \ \ (modify (cur_thread_update (\_. t))) (setCurThread t)" - apply (unfold setCurThread_def) + "corres dc (valid_idle and valid_queues and valid_etcbs and pspace_aligned and pspace_distinct) \ + (modify (cur_thread_update (\_. t))) (setCurThread t)" + apply (clarsimp simp: setCurThread_def) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (clarsimp simp: idleThreadNotQueued_def) + apply (frule (2) idle_thread_not_queued) + apply (frule state_relation_pspace_relation) + apply (frule state_relation_ready_queues_relation) + apply (frule state_relation_idle_thread) + apply (frule valid_idle_tcb_at) + apply (frule (3) tcb_at_cross) + apply (fastforce dest!: in_ready_q_tcbQueued_eq[THEN arg_cong_Not, THEN iffD1] + simp: obj_at'_def opt_pred_def opt_map_def) apply (rule corres_modify) apply (simp add: state_relation_def swp_def) done @@ -606,49 +733,58 @@ qed crunches storeWordUser, setVMRoot, asUser, storeWordUser, Arch.switchToThread for ksQ[wp]: "\s. P (ksReadyQueues s p)" and ksIdleThread[wp]: "\s. P (ksIdleThread s)" - and valid_queues[wp]: "Invariants_H.valid_queues" - (wp: crunch_wps simp: crunch_simps) + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_objs'[wp]: valid_objs' + (wp: crunch_wps threadSet_sched_pointers simp: crunch_simps) -crunches arch_switch_to_thread +crunches arch_switch_to_thread, arch_switch_to_idle_thread for pspace_aligned[wp]: pspace_aligned and pspace_distinct[wp]: pspace_distinct + and ready_qs_distinct[wp]: ready_qs_distinct + and valid_idle[wp]: valid_idle + (wp: ready_qs_distinct_lift) + +lemma valid_queues_in_correct_ready_q[elim!]: + "valid_queues s \ in_correct_ready_q s" + by (clarsimp simp: valid_queues_def in_correct_ready_q_def) + +lemma valid_queues_ready_qs_distinct[elim!]: + "valid_queues s \ ready_qs_distinct s" + by (clarsimp simp: valid_queues_def ready_qs_distinct_def) lemma switchToThread_corres: "corres dc (valid_arch_state and valid_objs and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and valid_global_objs and unique_table_refs - and st_tcb_at runnable t and valid_etcbs) - (no_0_obj' and Invariants_H.valid_queues) + and st_tcb_at runnable t and valid_etcbs and valid_queues and valid_idle) + (no_0_obj' and sym_heap_sched_pointers and valid_objs') (switch_to_thread t) (switchToThread t)" - (is "corres _ ?PA ?PH _ _") -proof - - have mainpart: "corres dc (?PA) (?PH) - (do y \ arch_switch_to_thread t; - y \ (tcb_sched_action tcb_sched_dequeue t); - modify (cur_thread_update (\_. t)) - od) - (do y \ Arch.switchToThread t; - y \ tcbSchedDequeue t; - setCurThread t - od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply add_ready_qs_runnable + apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) + apply (rule corres_symb_exec_l[OF _ _ get_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_l[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce dest!: state_relation_ready_queues_relation intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce apply (rule corres_guard_imp) apply (rule corres_split[OF arch_switchToThread_corres]) apply (rule corres_split[OF tcbSchedDequeue_corres setCurThread_corres]) - apply (wp|clarsimp simp: tcb_at_is_etcb_at st_tcb_at_tcb_at)+ - done - - show ?thesis - apply - - apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) - apply (rule corres_symb_exec_l [where Q = "\ s rv. (?PA and (=) rv) s", - OF corres_symb_exec_l [OF mainpart]]) - apply (auto intro: no_fail_pre [OF no_fail_assert] - no_fail_pre [OF no_fail_get] - dest: st_tcb_at_tcb_at [THEN get_tcb_at] | - simp add: assert_def | wp)+ - done -qed + apply (wpsimp simp: is_tcb_def)+ + apply (fastforce intro!: st_tcb_at_tcb_at) + apply wpsimp + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + done lemma arch_switchToIdleThread_corres: "corres dc @@ -658,21 +794,27 @@ lemma arch_switchToIdleThread_corres: arch_switch_to_idle_thread Arch.switchToIdleThread" apply (simp add: arch_switch_to_idle_thread_def RISCV64_H.switchToIdleThread_def) - apply (corressimp corres: getIdleThread_corres setVMRoot_corres) + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres) apply (clarsimp simp: valid_idle_def valid_idle'_def pred_tcb_at_def obj_at_def is_tcb valid_arch_state_asid_table valid_arch_state_global_arch_objs) done lemma switchToIdleThread_corres: - "corres dc invs invs_no_cicd' switch_to_idle_thread switchToIdleThread" + "corres dc + (invs and valid_queues and valid_etcbs) + invs_no_cicd' + switch_to_idle_thread switchToIdleThread" apply (simp add: switch_to_idle_thread_def Thread_H.switchToIdleThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_ignore, fastforce) apply (rule corres_guard_imp) apply (rule corres_split[OF getIdleThread_corres]) apply (rule corres_split[OF arch_switchToIdleThread_corres]) - apply (unfold setCurThread_def) - apply (rule corres_trivial, rule corres_modify) - apply (simp add: state_relation_def cdt_relation_def) - apply (wp+, simp+) + apply clarsimp + apply (rule setCurThread_corres) + apply wpsimp + apply (simp add: state_relation_def cdt_relation_def) + apply wpsimp+ apply (simp add: invs_unique_refs invs_valid_vs_lookup invs_valid_objs invs_valid_asid_map invs_arch_state invs_valid_global_objs invs_psp_aligned invs_distinct invs_valid_idle invs_vspace_objs) @@ -707,11 +849,9 @@ proof - apply (simp add: setCurThread_def) apply wp apply (clarsimp simp add: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def - valid_state'_def Invariants_H.valid_queues_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def valid_queues'_def ct_not_inQ_ct - ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + valid_state'_def sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def ct_not_inQ_ct + ct_idle_or_in_cur_domain'_def bitmapQ_defs valid_bitmaps_def cong: option.case_cong) done qed @@ -725,100 +865,20 @@ lemma setCurThread_invs: by (rule hoare_pre, rule setCurThread_invs_no_cicd') (simp add: invs'_to_invs_no_cicd'_def) -lemma valid_queues_not_runnable_not_queued: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" -proof (rule ccontr) - assume "\ obj_at' (Not \ tcbQueued) t s" - moreover from st have "typ_at' TCBT t s" - by (rule pred_tcb_at' [THEN tcb_at_typ_at' [THEN iffD1]]) - ultimately have "obj_at' tcbQueued t s" - by (clarsimp simp: not_obj_at' comp_def) - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbPriority] - obtain p where tp: "obj_at' (\tcb. tcbPriority tcb = p) t s" - by clarsimp - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbDomain] - obtain d where td: "obj_at' (\tcb. tcbDomain tcb = d) t s" - by clarsimp - - ultimately - have "t \ set (ksReadyQueues s (d, p))" using vq' - unfolding valid_queues'_def - apply - - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (drule_tac x=t in spec) - apply (erule impE) - apply (fastforce simp add: inQ_def obj_at'_def) - apply (assumption) - done - - with vq have "st_tcb_at' runnable' t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply - - apply clarsimp - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp simp add: st_tcb_at'_def) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - - with st show False - apply - - apply (drule(1) pred_tcb_at_conj') - apply (clarsimp) - done -qed - -(* - * The idle thread is not part of any ready queues. - *) -lemma idle'_not_tcbQueued': - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and idle: "valid_idle' s" - shows "obj_at' (Not \ tcbQueued) (ksIdleThread s) s" -proof - - from idle have stidle: "st_tcb_at' (Not \ runnable') (ksIdleThread s) s" - by (clarsimp simp add: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) - with vq vq' show ?thesis - by (rule valid_queues_not_runnable_not_queued) -qed - lemma setCurThread_invs_no_cicd'_idle_thread: - "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" -proof - - have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" - apply (simp add: ct_not_inQ_def o_def) - done - have idle'_activatable': "\ s t. st_tcb_at' idle' t s \ st_tcb_at' activatable' t s" - apply (clarsimp simp: st_tcb_at'_def o_def obj_at'_def) + "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\_. invs'\" + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def valid_idle'_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def bitmapQ_defs valid_bitmaps_def pred_tcb_at'_def + cong: option.case_cong) + apply (clarsimp simp: idle_tcb'_def ct_not_inQ_def ps_clear_def obj_at'_def st_tcb_at'_def + idleThreadNotQueued_def) done - show ?thesis - apply (simp add: setCurThread_def) - apply wp - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) - apply (frule (2) idle'_not_tcbQueued'[simplified o_def]) - apply (clarsimp simp add: ct_not_inQ_ct idle'_activatable' - invs'_def cur_tcb'_def valid_state'_def valid_idle'_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def - ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def bitmapQ_defs valid_queues_no_bitmap_def valid_queues'_def - pred_tcb_at'_def - cong: option.case_cong) - apply (clarsimp simp: obj_at'_def idle_tcb'_def ) - done -qed lemma setCurThread_invs_idle_thread: "\invs' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" @@ -853,13 +913,13 @@ lemma Arch_switchToThread_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_not_tcbQueued: - "\ tcb_at' t \ tcbSchedDequeue t \ \_. obj_at' (\x. \ tcbQueued x) t \" + "\\\ tcbSchedDequeue t \\_. obj_at' (\x. \ tcbQueued x) t\" apply (simp add: tcbSchedDequeue_def) apply (wp|clarsimp)+ apply (rule_tac Q="\queued. obj_at' (\x. tcbQueued x = queued) t" in hoare_post_imp) - apply (clarsimp simp: obj_at'_def) - apply (wp threadGet_obj_at') - apply (simp) + apply (clarsimp simp: obj_at'_def) + apply (wpsimp wp: threadGet_wp)+ + apply (clarsimp simp: obj_at'_def) done lemma asUser_tcbState_inv[wp]: @@ -883,10 +943,6 @@ crunch valid_irq_states'[wp]: asUser "valid_irq_states'" crunch valid_machine_state'[wp]: asUser "valid_machine_state'" (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "valid_queues'" -(wp: crunch_wps simp: crunch_simps) - - lemma asUser_valid_irq_node'[wp]: "\\s. valid_irq_node' (irq_node' s) s\ asUser t (setRegister f r) \\_ s. valid_irq_node' (irq_node' s) s\" @@ -909,14 +965,14 @@ lemma asUser_ct_not_inQ[wp]: "\ct_not_inQ\ asUser t (setRegister f r) \\_ . ct_not_inQ\" apply (clarsimp simp: submonad_asUser.fn_is_sm submonad_fn_def) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ prefer 4 apply (rule stateAssert_sp) prefer 3 apply (rule gets_inv) defer apply (rule select_f_inv) - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (clarsimp simp: asUser_replace_def obj_at'_def fun_upd_def split: option.split kernel_object.split) apply wp @@ -946,55 +1002,38 @@ lemma asUser_utr[wp]: done lemma threadSet_invs_no_cicd'_trivialT: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" shows - "\\s. invs_no_cicd' s \ - (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ - (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ - ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ - (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs_no_cicd'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs_no_cicd'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (wp x w v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a domains cteCaps_of_def |rule refl)+ - apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed + "threadSet F t \invs_no_cicd'\" + apply (simp add: invs_no_cicd'_def valid_state'_def) + apply (wp threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_cur + untyped_ranges_zero_lift + | clarsimp simp: assms cteCaps_of_def | rule refl)+ + by (auto simp: o_def) lemmas threadSet_invs_no_cicd'_trivial = threadSet_invs_no_cicd'_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] @@ -1013,22 +1052,17 @@ lemma Arch_switchToThread_invs_no_cicd': done lemma tcbSchedDequeue_invs_no_cicd'[wp]: - "\invs_no_cicd' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs_no_cicd'\" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + "tcbSchedDequeue t \invs_no_cicd'\" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues_weak untyped_ranges_zero_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp - apply (fastforce simp: valid_pspace'_def valid_queues_def - elim: valid_objs'_maxDomain valid_objs'_maxPriority intro: obj_at'_conjI) done lemma switchToThread_invs_no_cicd': - "\invs_no_cicd' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" + "\invs_no_cicd' and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def) apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued Arch_switchToThread_invs_no_cicd' Arch_switchToThread_pred_tcb') @@ -1036,7 +1070,7 @@ lemma switchToThread_invs_no_cicd': done lemma switchToThread_invs[wp]: - "\invs' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" + "\invs' and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def ) apply (wp threadSet_timeslice_invs setCurThread_invs Arch_switchToThread_invs dmo_invs' @@ -1079,8 +1113,7 @@ lemma dmo_cap_to'[wp]: lemma sct_cap_to'[wp]: "\ex_nonz_cap_to' p\ setCurThread t \\rv. ex_nonz_cap_to' p\" apply (simp add: setCurThread_def) - apply (wp ex_nonz_cap_to_pres') - apply (clarsimp elim!: cte_wp_at'_pspaceI)+ + apply (wpsimp wp: ex_nonz_cap_to_pres') done @@ -1112,61 +1145,6 @@ lemma obj_tcb_at': "obj_at' (\tcb::tcb. P tcb) t s \ tcb_at' t s" by (clarsimp simp: obj_at'_def) -lemma invs'_not_runnable_not_queued: - fixes s - assumes inv: "invs' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" - apply (insert assms) - apply (rule valid_queues_not_runnable_not_queued) - apply (clarsimp simp add: invs'_def valid_state'_def)+ - done - -lemma valid_queues_not_tcbQueued_not_ksQ: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and notq: "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" -proof (rule ccontr, simp , erule exE, erule exE) - fix d p - assume "t \ set (ksReadyQueues s (d, p))" - with vq have "obj_at' (inQ d p) t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply clarify - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (simp) - done - hence "obj_at' tcbQueued t s" - apply (rule obj_at'_weakenE) - apply (simp only: inQ_def) - done - with notq show "False" - by (clarsimp simp: obj_at'_def) -qed - -lemma not_tcbQueued_not_ksQ: - fixes s - assumes "invs' s" - and "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - apply (insert assms) - apply (clarsimp simp add: invs'_def valid_state'_def) - apply (drule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (clarsimp) - done - -lemma ct_not_ksQ: - "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ - \ \p. ksCurThread s \ set (ksReadyQueues s p)" - apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (fastforce) - done - lemma setThreadState_rct: "\\s. (runnable' st \ ksCurThread s \ t) \ ksSchedulerAction s = ResumeCurrentThread\ @@ -1174,21 +1152,21 @@ lemma setThreadState_rct: \\_ s. ksSchedulerAction s = ResumeCurrentThread\" apply (simp add: setThreadState_def) apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) apply (clarsimp simp: when_def) - apply (case_tac x) + apply (case_tac rv) apply (clarsimp, wp)[1] apply (clarsimp) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_ct threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ isRunnable_inv]) + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF gct_wp gct_wp]]) apply (rename_tac ct) @@ -1238,21 +1216,24 @@ lemma bitmapQ_from_bitmap_lookup: done lemma lookupBitmapPriority_obj_at': - "\ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0; valid_queues_no_bitmap s; valid_bitmapQ s; - bitmapQ_no_L1_orphans s\ - \ obj_at' (inQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) and runnable' \ tcbState) - (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + "\ksReadyQueuesL1Bitmap s d \ 0; valid_bitmapQ s; bitmapQ_no_L1_orphans s; + ksReadyQueues_asrt s; ready_qs_runnable s; pspace_aligned' s; pspace_distinct' s\ + \ obj_at' (inQ d (lookupBitmapPriority d s) and runnable' \ tcbState) + (the (tcbQueueHead (ksReadyQueues s (d, lookupBitmapPriority d s)))) s" apply (drule (2) bitmapQ_from_bitmap_lookup) apply (simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)", simp) - apply (clarsimp, rename_tac t ts) - apply (drule cons_set_intro) - apply (drule (2) valid_queues_no_bitmap_objD) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def tcbQueueEmpty_def) + apply (drule_tac x=d in spec) + apply (drule_tac x="lookupBitmapPriority d s" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (fastforce simp: obj_at'_and ready_qs_runnable_def obj_at'_def st_tcb_at'_def inQ_def + tcbQueueEmpty_def) done lemma bitmapL1_zero_ksReadyQueues: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ - \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. ksReadyQueues s (d,p) = [])" + \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. tcbQueueEmpty (ksReadyQueues s (d, p)))" apply (cases "ksReadyQueuesL1Bitmap s d = 0") apply (force simp add: bitmapQ_def valid_bitmapQ_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) @@ -1323,7 +1304,7 @@ lemma bitmapL1_highest_lookup: done lemma bitmapQ_ksReadyQueuesI: - "\ bitmapQ d p s ; valid_bitmapQ s \ \ ksReadyQueues s (d, p) \ []" + "\ bitmapQ d p s ; valid_bitmapQ s \ \ \ tcbQueueEmpty (ksReadyQueues s (d, p))" unfolding valid_bitmapQ_def by simp lemma getReadyQueuesL2Bitmap_inv[wp]: @@ -1332,24 +1313,22 @@ lemma getReadyQueuesL2Bitmap_inv[wp]: lemma switchToThread_lookupBitmapPriority_wp: "\\s. invs_no_cicd' s \ bitmapQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) s \ - t = hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)) \ + t = the (tcbQueueHead (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)))\ ThreadDecls_H.switchToThread t \\rv. invs'\" -proof - - have switchToThread_pre: - "\s p t.\ valid_queues s ; bitmapQ (ksCurDomain s) p s ; t = hd (ksReadyQueues s (ksCurDomain s,p)) \ - \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s" - unfolding valid_queues_def - apply (clarsimp dest!: bitmapQ_ksReadyQueuesI) - apply (case_tac "ksReadyQueues s (ksCurDomain s, p)", simp) - apply (rename_tac t ts) - apply (drule_tac t=t and p=p and d="ksCurDomain s" in valid_queues_no_bitmap_objD) - apply simp - apply (fastforce elim: obj_at'_weaken simp: inQ_def tcb_in_cur_domain'_def st_tcb_at'_def) - done - thus ?thesis - by (wp switchToThread_invs_no_cicd') (fastforce dest: invs_no_cicd'_queues) -qed + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd') + apply (auto elim!: pred_tcb'_weakenE) + apply (prop_tac "valid_bitmapQ s") + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_bitmaps_def) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def valid_bitmapQ_bitmapQ_simp) + apply (drule_tac x="ksCurDomain s" in spec) + apply (drule_tac x="lookupBitmapPriority (ksCurDomain s) s" in spec) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) + done lemma switchToIdleThread_invs_no_cicd': "\invs_no_cicd'\ switchToIdleThread \\rv. invs'\" @@ -1361,7 +1340,7 @@ lemma switchToIdleThread_invs_no_cicd': crunch obj_at'[wp]: "Arch.switchToIdleThread" "\s. obj_at' P t s" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -1419,11 +1398,6 @@ lemma corres_assert_ret: apply (simp add: assert_def return_def fail_def) done -lemma corres_assert_assume_l: - "corres dc P Q (f ()) g - \ corres dc (P and (\s. P')) Q (assert P' >>= f) g" - by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) - lemma corres_assert_assume_r: "corres dc P Q f (g ()) \ corres dc P (Q and (\s. Q')) f (assert Q' >>= g)" @@ -1453,8 +1427,8 @@ lemma guarded_switch_to_corres: and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and valid_global_objs and unique_table_refs - and st_tcb_at runnable t and valid_etcbs) - (no_0_obj' and Invariants_H.valid_queues) + and st_tcb_at runnable t and valid_etcbs and valid_queues and valid_idle) + (no_0_obj' and sym_heap_sched_pointers and valid_objs') (guarded_switch_to t) (switchToThread t)" apply (simp add: guarded_switch_to_def) apply (rule corres_guard_imp) @@ -1499,7 +1473,7 @@ lemma curDomain_corres: "corres (=) \ \ (gets cur_domain) (curDomain)" lemma curDomain_corres': "corres (=) \ (\s. ksCurDomain s \ maxDomain) - (gets cur_domain) (if 1 < numDomains then curDomain else return 0)" + (gets cur_domain) (if Suc 0 < numDomains then curDomain else return 0)" apply (case_tac "1 < numDomains"; simp) apply (rule corres_guard_imp[OF curDomain_corres]; solves simp) (* if we have only one domain, then we are in it *) @@ -1509,27 +1483,32 @@ lemma curDomain_corres': lemma lookupBitmapPriority_Max_eqI: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d \ 0 \ - \ lookupBitmapPriority d s = (Max {prio. ksReadyQueues s (d, prio) \ []})" + \ lookupBitmapPriority d s = (Max {prio. \ tcbQueueEmpty (ksReadyQueues s (d, prio))})" apply (rule Max_eqI[simplified eq_commute]; simp) apply (fastforce simp: bitmapL1_highest_lookup valid_bitmapQ_bitmapQ_simp) apply (metis valid_bitmapQ_bitmapQ_simp bitmapQ_from_bitmap_lookup) done lemma corres_gets_queues_getReadyQueuesL1Bitmap: - "corres (\qs l1. ((l1 = 0) = (\p. qs p = []))) \ valid_queues + "corres (\qs l1. (l1 = 0) = (\p. qs p = [])) \ valid_bitmaps (gets (\s. ready_queues s d)) (getReadyQueuesL1Bitmap d)" - unfolding state_relation_def valid_queues_def getReadyQueuesL1Bitmap_def - by (clarsimp simp: bitmapL1_zero_ksReadyQueues ready_queues_relation_def) + unfolding state_relation_def valid_bitmaps_def getReadyQueuesL1Bitmap_def + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x=d in spec) + apply (fastforce simp: bitmapL1_zero_ksReadyQueues list_queue_relation_def tcbQueueEmpty_def) + done lemma guarded_switch_to_chooseThread_fragment_corres: "corres dc (P and st_tcb_at runnable t and invs and valid_sched) - (P' and st_tcb_at' runnable' t and invs_no_cicd') - (guarded_switch_to t) - (do runnable \ isRunnable t; - y \ assert runnable; - ThreadDecls_H.switchToThread t - od)" + (P' and invs_no_cicd') + (guarded_switch_to t) + (do runnable \ isRunnable t; + y \ assert runnable; + ThreadDecls_H.switchToThread t + od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) unfolding guarded_switch_to_def isRunnable_def apply simp apply (rule corres_guard_imp) @@ -1544,35 +1523,50 @@ lemma guarded_switch_to_chooseThread_fragment_corres: simp: pred_tcb_at' runnable'_def all_invs_but_ct_idle_or_in_cur_domain'_def) done +lemma Max_prio_helper: + "ready_queues_relation s s' + \ Max {prio. ready_queues s d prio \ []} + = Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (d, prio))}" + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def tcbQueueEmpty_def) + apply (rule Max_eq_if) + apply fastforce + apply fastforce + apply (fastforce dest: heap_path_head) + apply clarsimp + apply (drule_tac x=d in spec) + apply (drule_tac x=b in spec) + apply force + done + lemma bitmap_lookup_queue_is_max_non_empty: - "\ valid_queues s'; (s, s') \ state_relation; invs s; + "\ valid_bitmaps s'; (s, s') \ state_relation; invs s; ksReadyQueuesL1Bitmap s' (ksCurDomain s') \ 0 \ - \ ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s') = - max_non_empty_queue (ready_queues s (cur_domain s))" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_queues_def - by (clarsimp simp add: max_non_empty_queue_def lookupBitmapPriority_Max_eqI - state_relation_def ready_queues_relation_def) + \ the (tcbQueueHead (ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s'))) + = hd (max_non_empty_queue (ready_queues s (cur_domain s)))" + apply (clarsimp simp: max_non_empty_queue_def valid_bitmaps_def lookupBitmapPriority_Max_eqI) + apply (frule curdomain_relation) + apply (drule state_relation_ready_queues_relation) + apply (simp add: Max_prio_helper) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (frule (2) bitmapL1_zero_ksReadyQueues[THEN arg_cong_Not, THEN iffD1]) + apply clarsimp + apply (cut_tac P="\x. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', x))" + in setcomp_Max_has_prop) + apply fastforce + apply (clarsimp simp: ready_queues_relation_def Let_def list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x="ksCurDomain s'" in spec) + apply (drule_tac x="Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', prio))}" + in spec) + using heap_path_head tcbQueueEmpty_def + by fastforce lemma ksReadyQueuesL1Bitmap_return_wp: "\\s. P (ksReadyQueuesL1Bitmap s d) s \ getReadyQueuesL1Bitmap d \\rv s. P rv s\" unfolding getReadyQueuesL1Bitmap_def by wp -lemma ksReadyQueuesL1Bitmap_st_tcb_at': - "\ ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0 ; valid_queues s \ - \ st_tcb_at' runnable' (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" - apply (drule bitmapQ_from_bitmap_lookup; clarsimp simp: valid_queues_def) - apply (clarsimp simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)") - apply simp - apply (simp add: valid_queues_no_bitmap_def) - apply (erule_tac x="ksCurDomain s" in allE) - apply (erule_tac x="lookupBitmapPriority (ksCurDomain s) s" in allE) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply simp - done - lemma curDomain_or_return_0: "\ \P\ curDomain \\rv s. Q rv s \; \s. P s \ ksCurDomain s \ maxDomain \ \ \P\ if 1 < numDomains then curDomain else return 0 \\rv s. Q rv s \" @@ -1584,52 +1578,72 @@ lemma invs_no_cicd_ksCurDomain_maxDomain': "invs_no_cicd' s \ ksCurDomain s \ maxDomain" unfolding invs_no_cicd'_def by simp +crunches curDomain + for valid_bitmaps[wp]: valid_bitmaps + lemma chooseThread_corres: - "corres dc (invs and valid_sched) (invs_no_cicd') - choose_thread chooseThread" (is "corres _ ?PREI ?PREH _ _") + "corres dc (invs and valid_sched) invs_no_cicd' choose_thread chooseThread" + (is "corres _ ?PREI ?PREH _ _") proof - + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + show ?thesis - unfolding choose_thread_def chooseThread_def - apply (simp only: return_bind Let_def) - apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) - apply (rule corres_guard_imp) - apply (rule corres_split[OF curDomain_corres']) - apply clarsimp - apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) - apply (erule corres_if2[OF sym]) - apply (rule switchToIdleThread_corres) - apply (rule corres_symb_exec_r) - apply (rule corres_symb_exec_r) - apply (rule_tac - P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) \ - st_tcb_at runnable (hd (max_non_empty_queue queues)) s" and - P'="\s. (?PREH s \ st_tcb_at' runnable' (hd queue) s) \ - l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) \ - l1 \ 0 \ - queue = ksReadyQueues s (ksCurDomain s, - lookupBitmapPriority (ksCurDomain s) s)" and - F="hd queue = hd (max_non_empty_queue queues)" in corres_req) - apply (fastforce dest!: invs_no_cicd'_queues simp: bitmap_lookup_queue_is_max_non_empty) - apply clarsimp - apply (rule corres_guard_imp) - apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) - apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ - apply (clarsimp simp: if_apply_def2) - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) - apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ - apply (fastforce simp: invs_no_cicd'_def) - apply (clarsimp simp: valid_sched_def DetSchedInvs_AI.valid_queues_def max_non_empty_queue_def) - apply (erule_tac x="cur_domain s" in allE) - apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) - apply (case_tac "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []})") - apply (clarsimp) - apply (subgoal_tac - "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []}) \ []") - apply (fastforce elim!: setcomp_Max_has_prop)+ - apply (simp add: invs_no_cicd_ksCurDomain_maxDomain') - apply (clarsimp dest!: invs_no_cicd'_queues) - apply (fastforce intro: ksReadyQueuesL1Bitmap_st_tcb_at') - done + supply if_split[split del] + apply (clarsimp simp: choose_thread_def chooseThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce + apply (simp only: return_bind Let_def) + apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres']) + apply clarsimp + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (erule corres_if2[OF sym]) + apply (rule switchToIdleThread_corres) + apply (rule corres_symb_exec_r) + apply (rule corres_symb_exec_r) + apply (rule_tac P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) + \ st_tcb_at runnable (hd (max_non_empty_queue queues)) s" + and P'="\s. ?PREH s \ l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) + \ l1 \ 0 + \ queue = ksReadyQueues s (ksCurDomain s, + lookupBitmapPriority (ksCurDomain s) s)" + and F="the (tcbQueueHead queue) = hd (max_non_empty_queue queues)" + in corres_req) + apply (fastforce simp: bitmap_lookup_queue_is_max_non_empty + all_invs_but_ct_idle_or_in_cur_domain'_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) + apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) + apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ + apply (clarsimp simp: valid_sched_def max_non_empty_queue_def valid_queues_def split: if_splits) + apply (erule_tac x="cur_domain s" in allE) + apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) + apply (case_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio + \ []})") + apply (clarsimp) + apply (subgoal_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio \ []}) + \ []") + apply fastforce + apply (fastforce elim!: setcomp_Max_has_prop) + apply fastforce + apply clarsimp + apply (frule invs_no_cicd_ksCurDomain_maxDomain') + apply (prop_tac "valid_bitmaps s") + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def) + apply (fastforce dest: one_domain_case split: if_splits) + done qed lemma thread_get_comm: "do x \ thread_get f p; y \ gets g; k x y od = @@ -1680,12 +1694,6 @@ lemma nextDomain_invs_no_cicd': all_invs_but_ct_idle_or_in_cur_domain'_def) done -lemma bind_dummy_ret_val: - "do y \ a; - b - od = do a; b od" - by simp - lemma scheduleChooseNewThread_fragment_corres: "corres dc (invs and valid_sched and (\s. scheduler_action s = choose_new_thread)) (invs' and (\s. ksSchedulerAction s = ChooseNewThread)) (do _ \ when (domainTime = 0) next_domain; @@ -1724,7 +1732,7 @@ lemma isHighestPrio_corres: assumes "d' = d" assumes "p' = p" shows - "corres ((=)) \ valid_queues + "corres ((=)) \ valid_bitmaps (gets (is_highest_prio d p)) (isHighestPrio d' p')" using assms @@ -1734,18 +1742,16 @@ lemma isHighestPrio_corres: apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) apply (rule corres_if_r'[where P'="\_. True",rotated]) apply (rule_tac corres_symb_exec_r) - apply (rule_tac - P="\s. q = ready_queues s d - " and - P'="\s. valid_queues s \ - l1 = ksReadyQueuesL1Bitmap s d \ - l1 \ 0 \ hprio = lookupBitmapPriority d s" and - F="hprio = Max {prio. q prio \ []}" in corres_req) - apply (elim conjE) - apply (clarsimp simp: valid_queues_def) - apply (subst lookupBitmapPriority_Max_eqI; blast?) - apply (fastforce simp: ready_queues_relation_def dest!: state_relationD) - apply fastforce + apply (rule_tac P="\s. q = ready_queues s d" + and P'="\s. valid_bitmaps s \ l1 = ksReadyQueuesL1Bitmap s d \ + l1 \ 0 \ hprio = lookupBitmapPriority d s" + and F="hprio = Max {prio. q prio \ []}" in corres_req) + apply (elim conjE) + apply (clarsimp simp: valid_bitmaps_def) + apply (subst lookupBitmapPriority_Max_eqI; blast?) + apply (fastforce dest: state_relation_ready_queues_relation Max_prio_helper[where d=d] + simp: tcbQueueEmpty_def) + apply fastforce apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imps ksReadyQueuesL1Bitmap_return_wp)+ done @@ -1756,9 +1762,8 @@ crunch inv[wp]: curDomain P crunch inv[wp]: scheduleSwitchThreadFastfail P lemma setSchedulerAction_invs': (* not in wp set, clobbered by ssa_wp *) - "\\s. invs' s \ setSchedulerAction ChooseNewThread \\_. invs' \" + "setSchedulerAction ChooseNewThread \invs' \" by (wpsimp simp: invs'_def cur_tcb'_def valid_state'_def valid_irq_node'_def ct_not_inQ_def - valid_queues_def valid_queues_no_bitmap_def valid_queues'_def ct_idle_or_in_cur_domain'_def) lemma scheduleChooseNewThread_corres: @@ -1788,6 +1793,46 @@ lemma ethread_get_when_corres: apply wpsimp+ done +lemma tcb_sched_enqueue_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_enqueue t \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_enqueue_def set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_append_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_append tcb_ptr \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_append_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_enqueue_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_enqueue t \ready_qs_distinct\ " + unfolding tcb_sched_action_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma tcb_sched_append_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_append t \ready_qs_distinct\ " + unfolding tcb_sched_action_def tcb_sched_append_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +crunches set_scheduler_action + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps simp: in_correct_ready_q_def ready_qs_distinct_def) + +crunches reschedule_required + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (ignore: tcb_sched_action wp: crunch_wps) + lemma schedule_corres: "corres dc (invs and valid_sched and valid_list) invs' (Schedule_A.schedule) ThreadDecls_H.schedule" supply ethread_get_wp[wp del] @@ -1816,7 +1861,7 @@ lemma schedule_corres: apply (rule corres_split[OF thread_get_isRunnable_corres]) apply (rule corres_split) apply (rule corres_when, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule scheduleChooseNewThread_corres, simp) apply (wp thread_get_wp' tcbSchedEnqueue_invs' hoare_vcg_conj_lift hoare_drop_imps | clarsimp)+ @@ -1825,7 +1870,7 @@ lemma schedule_corres: rename_tac was_running wasRunning) apply (rule corres_split) apply (rule corres_when, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_split[OF getIdleThread_corres], rename_tac it it') apply (rule_tac F="was_running \ ct \ it" in corres_gen_asm) apply (rule corres_split) @@ -1841,7 +1886,7 @@ lemma schedule_corres: apply (rule corres_split[OF curDomain_corres]) apply (rule corres_split[OF isHighestPrio_corres]; simp only:) apply (rule corres_if, simp) - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (simp, fold dc_def) apply (rule corres_split) apply (rule setSchedulerAction_corres; simp) @@ -1855,7 +1900,7 @@ lemma schedule_corres: apply (wp tcb_sched_action_enqueue_valid_blocked hoare_vcg_all_lift enqueue_thread_queued) apply (wp tcbSchedEnqueue_invs'_not_ResumeCurrentThread) apply (rule corres_if, fastforce) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (simp, fold dc_def) apply (rule corres_split) apply (rule setSchedulerAction_corres; simp) @@ -1887,7 +1932,8 @@ lemma schedule_corres: in hoare_post_imp, fastforce) apply (wp add: tcb_sched_action_enqueue_valid_blocked_except tcbSchedEnqueue_invs'_not_ResumeCurrentThread thread_get_wp - del: gets_wp)+ + del: gets_wp + | strengthen valid_objs'_valid_tcbs')+ apply (clarsimp simp: conj_ac if_apply_def2 cong: imp_cong conj_cong del: hoare_gets) apply (wp gets_wp)+ @@ -1910,18 +1956,17 @@ lemma schedule_corres: weak_valid_sched_action_def tcb_at_is_etcb_at tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]] valid_blocked_except_def valid_blocked_def) - apply (clarsimp simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) + apply (fastforce simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) done (* choose new thread case *) apply (intro impI conjI allI tcb_at_invs | fastforce simp: invs_def cur_tcb_def valid_etcbs_def valid_sched_def st_tcb_at_def obj_at_def valid_state_def weak_valid_sched_action_def not_cur_thread_def)+ - apply (simp add: valid_sched_def valid_blocked_def valid_blocked_except_def) done (* haskell final subgoal *) - apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def + apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def valid_sched_def cong: imp_cong split: scheduler_action.splits) apply (fastforce simp: cur_tcb'_def valid_pspace'_def) done @@ -1935,11 +1980,8 @@ proof - apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def valid_queues'_def - tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + state_refs_of'_def ps_clear_def valid_irq_node'_def + tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def bitmapQ_defs cong: option.case_cong) done qed @@ -1979,7 +2021,7 @@ lemma switchToThread_ct_not_queued_2: apply (simp add: Thread_H.switchToThread_def) apply (wp) apply (simp add: RISCV64_H.switchToThread_def setCurThread_def) - apply (wp tcbSchedDequeue_not_tcbQueued | simp )+ + apply (wp tcbSchedDequeue_not_tcbQueued hoare_drop_imp | simp )+ done lemma setCurThread_obj_at': @@ -1993,11 +2035,12 @@ proof - qed lemma switchToIdleThread_ct_not_queued_no_cicd': - "\ invs_no_cicd' \ switchToIdleThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" + "\invs_no_cicd'\ switchToIdleThread \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" apply (simp add: Thread_H.switchToIdleThread_def) apply (wp setCurThread_obj_at') - apply (rule idle'_not_tcbQueued') - apply (simp add: invs_no_cicd'_def)+ + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x="ksIdleThread s" in spec) + apply (clarsimp simp: invs_no_cicd'_def valid_idle'_def st_tcb_at'_def idle_tcb'_def obj_at'_def) done lemma switchToIdleThread_activatable_2[wp]: @@ -2014,7 +2057,7 @@ lemma switchToThread_tcb_in_cur_domain': ThreadDecls_H.switchToThread thread \\y s. tcb_in_cur_domain' (ksCurThread s) s\" apply (simp add: Thread_H.switchToThread_def setCurThread_def) - apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued) + apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued hoare_drop_imps) done lemma chooseThread_invs_no_cicd'_posts: (* generic version *) @@ -2036,11 +2079,15 @@ proof - by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) @@ -2054,12 +2101,10 @@ proof - apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv switchToThread_ct_not_queued_2 assert_inv hoare_disjI2 switchToThread_tcb_in_cur_domain') - apply clarsimp - apply (clarsimp dest!: invs_no_cicd'_queues - simp: valid_queues_def lookupBitmapPriority_def[symmetric]) - apply (drule (3) lookupBitmapPriority_obj_at') - apply normalise_obj_at' - apply (fastforce simp: tcb_in_cur_domain'_def inQ_def elim: obj_at'_weaken) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done qed @@ -2098,19 +2143,26 @@ proof - (* FIXME this is almost identical to the chooseThread_invs_no_cicd'_posts proof, can generalise? *) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) - apply (simp, wp (once) switchToIdleThread_invs_no_cicd', simp) + apply (simp, wp switchToIdleThread_invs_no_cicd', simp) (* we have a thread to switch to *) apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv) - apply (clarsimp dest!: invs_no_cicd'_queues simp: valid_queues_def) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (fastforce elim: bitmapQ_from_bitmap_lookup simp: lookupBitmapPriority_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done @@ -2137,7 +2189,7 @@ lemma schedule_invs': "\invs'\ ThreadDecls_H.schedule \\rv. invs'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2147,7 +2199,7 @@ lemma schedule_invs': apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 - | wp hoare_disjI2[where Q="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] | strengthen invs'_invs_no_cicd @@ -2218,7 +2270,7 @@ lemma schedule_ct_activatable'[wp]: "\invs'\ ThreadDecls_H.schedule \\_. ct_in_state' activatable'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2260,12 +2312,15 @@ lemma sbn_sch_act_sane: lemma possibleSwitchTo_corres: "corres dc - (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t - and pspace_aligned and pspace_distinct) - (valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and valid_objs') - (possible_switch_to t) - (possibleSwitchTo t)" + (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t + and in_correct_ready_q and ready_qs_distinct and pspace_aligned and pspace_distinct) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers and valid_objs') + (possible_switch_to t) (possibleSwitchTo t)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) supply ethread_get_wp[wp del] apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) apply (clarsimp simp: state_relation_def) @@ -2278,12 +2333,12 @@ lemma possibleSwitchTo_corres: apply (clarsimp simp: etcb_relation_def) apply (rule corres_split[OF getSchedulerAction_corres]) apply (rule corres_if, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_if, simp) apply (case_tac action; simp) apply (rule corres_split[OF rescheduleRequired_corres]) - apply (rule tcbSchedEnqueue_corres) - apply (wp rescheduleRequired_valid_queues'_weak)+ + apply (rule tcbSchedEnqueue_corres, simp) + apply (wp reschedule_required_valid_queues | strengthen valid_objs'_valid_tcbs')+ apply (rule setSchedulerAction_corres, simp) apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imp[where f="ethread_get a b" for a b])+ @@ -2292,7 +2347,7 @@ lemma possibleSwitchTo_corres: apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def st_tcb_at_tcb_at valid_sched_action_def weak_valid_sched_action_def tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]]) - apply (simp add: tcb_at_is_etcb_at) + apply (fastforce simp: tcb_at_is_etcb_at) done end diff --git a/proof/refine/RISCV64/StateRelation.thy b/proof/refine/RISCV64/StateRelation.thy index 18b70fc2b3..c02b8cbbb0 100644 --- a/proof/refine/RISCV64/StateRelation.thy +++ b/proof/refine/RISCV64/StateRelation.thy @@ -155,13 +155,20 @@ definition tcb_relation :: "Structures_A.tcb \ Structures_H.tcb \ tcb_bound_notification tcb = tcbBoundNotification tcb' \ tcb_mcpriority tcb = tcbMCP tcb'" +\ \ + A pair of objects @{term "(obj, obj')"} should satisfy the following relation when, under further + mild assumptions, a @{term corres_underlying} lemma for @{term "set_object obj"} + and @{term "setObject obj'"} can be stated: see setObject_other_corres in KHeap_R. + + TCBs do not satisfy this relation because the tcbSchedPrev and tcbSchedNext fields of a TCB are + used to model the ready queues, and so an update to such a field would correspond to an update + to a ready queue (see ready_queues_relation below).\ definition other_obj_relation :: "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" where "other_obj_relation obj obj' \ (case (obj, obj') of - (TCB tcb, KOTCB tcb') \ tcb_relation tcb tcb' - | (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' + (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' | (Notification ntfn, KONotification ntfn') \ ntfn_relation ntfn ntfn' | (ArchObj (RISCV64_A.ASIDPool ap), KOArch (KOASIDPool ap')) \ asid_pool_relation ap ap' | _ \ False)" @@ -188,22 +195,28 @@ primrec aobj_relation_cuts :: "RISCV64_A.arch_kernel_obj \ machine_w | "aobj_relation_cuts (PageTable pt) x = (\y. (x + (ucast y << pteBits), pte_relation y)) ` UNIV" +definition tcb_relation_cut :: "Structures_A.kernel_object \ kernel_object \ bool" where + "tcb_relation_cut obj obj' \ + case (obj, obj') of + (TCB t, KOTCB t') \ tcb_relation t t' + | _ \ False" + primrec obj_relation_cuts :: "Structures_A.kernel_object \ machine_word \ obj_relation_cuts" where "obj_relation_cuts (CNode sz cs) x = (if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)})" -| "obj_relation_cuts (TCB tcb) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (TCB tcb) x = {(x, tcb_relation_cut)}" | "obj_relation_cuts (Endpoint ep) x = {(x, other_obj_relation)}" | "obj_relation_cuts (Notification ntfn) x = {(x, other_obj_relation)}" | "obj_relation_cuts (ArchObj ao) x = aobj_relation_cuts ao x" - lemma obj_relation_cuts_def2: "obj_relation_cuts ko x = (case ko of CNode sz cs \ if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)} + | TCB tcb \ {(x, tcb_relation_cut)} | ArchObj (PageTable pt) \ (\y. (x + (ucast y << pteBits), pte_relation y)) ` UNIV | ArchObj (DataPage dev sz) \ {(x + (n << pageBits), \_ obj. obj =(if dev then KOUserDataDevice else KOUserData)) @@ -216,6 +229,7 @@ lemma obj_relation_cuts_def3: "obj_relation_cuts ko x = (case a_type ko of ACapTable n \ {(cte_map (x, y), cte_relation y) | y. length y = n} + | ATCB \ {(x, tcb_relation_cut)} | AArch APageTable \ (\y. (x + (ucast y << pteBits), pte_relation y)) ` UNIV | AArch (AUserData sz) \ {(x + (n << pageBits), \_ obj. obj = KOUserData) | n . n < 2 ^ (pageBitsForSize sz - pageBits) } @@ -230,6 +244,7 @@ definition is_other_obj_relation_type :: "a_type \ bool" where "is_other_obj_relation_type tp \ case tp of ACapTable n \ False + | ATCB \ False | AArch APageTable \ False | AArch (AUserData _) \ False | AArch (ADeviceData _) \ False @@ -240,6 +255,10 @@ lemma is_other_obj_relation_type_CapTable: "\ is_other_obj_relation_type (ACapTable n)" by (simp add: is_other_obj_relation_type_def) +lemma is_other_obj_relation_type_TCB: + "\ is_other_obj_relation_type ATCB" + by (simp add: is_other_obj_relation_type_def) + lemma is_other_obj_relation_type_UserData: "\ is_other_obj_relation_type (AArch (AUserData sz))" unfolding is_other_obj_relation_type_def by simp @@ -279,10 +298,55 @@ primrec sched_act_relation :: "Deterministic_A.scheduler_action \ sc "sched_act_relation choose_new_thread a' = (a' = ChooseNewThread)" | "sched_act_relation (switch_thread x) a' = (a' = SwitchToThread x)" -definition ready_queues_relation :: - "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) \ - (domain \ priority \ KernelStateData_H.ready_queue) \ bool" where - "ready_queues_relation qs qs' \ \d p. (qs d p = qs' (d, p))" +definition queue_end_valid :: "obj_ref list \ tcb_queue \ bool" where + "queue_end_valid ts q \ + (ts = [] \ tcbQueueEnd q = None) \ (ts \ [] \ tcbQueueEnd q = Some (last ts))" + +definition prev_queue_head :: "tcb_queue \ (obj_ref \ 'a) \ bool" where + "prev_queue_head q prevs \ \head. tcbQueueHead q = Some head \ prevs head = None" + +lemma prev_queue_head_heap_upd: + "\prev_queue_head q prevs; Some r \ tcbQueueHead q\ \ prev_queue_head q (prevs(r := x))" + by (clarsimp simp: prev_queue_head_def) + +definition list_queue_relation :: + "obj_ref list \ tcb_queue \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ bool" + where + "list_queue_relation ts q nexts prevs \ + heap_ls nexts (tcbQueueHead q) ts \ queue_end_valid ts q \ prev_queue_head q prevs" + +lemma list_queue_relation_nil: + "list_queue_relation ts q nexts prevs \ ts = [] \ tcbQueueEmpty q" + by (fastforce dest: heap_path_head simp: tcbQueueEmpty_def list_queue_relation_def) + +definition ready_queue_relation :: + "Deterministic_A.domain \ Structures_A.priority + \ Deterministic_A.ready_queue \ ready_queue + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (obj_ref \ bool) \ bool" + where + "ready_queue_relation d p q q' nexts prevs flag \ + list_queue_relation q q' nexts prevs + \ (\t. flag t \ t \ set q) + \ (d > maxDomain \ p > maxPriority \ tcbQueueEmpty q')" + +definition ready_queues_relation_2 :: + "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) + \ (domain \ priority \ ready_queue) + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (domain \ priority \ obj_ref \ bool) \ bool" + where + "ready_queues_relation_2 qs qs' nexts prevs inQs \ + \d p. let q = qs d p; q' = qs' (d, p); flag = inQs d p in + ready_queue_relation d p q q' nexts prevs flag" + +abbreviation ready_queues_relation :: "det_state \ kernel_state \ bool" where + "ready_queues_relation s s' \ + ready_queues_relation_2 + (ready_queues s) (ksReadyQueues s') (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (\d p. inQ d p |< tcbs_of' s')" + +lemmas ready_queues_relation_def = ready_queues_relation_2_def definition ghost_relation :: "Structures_A.kheap \ (machine_word \ vmpage_size) \ (machine_word \ nat) \ bool" where @@ -337,6 +401,8 @@ lemma obj_relation_cutsE: \sz cs z cap cte. \ ko = CNode sz cs; well_formed_cnode_n sz cs; y = cte_map (x, z); ko' = KOCTE cte; cs z = Some cap; cap_relation cap (cteCap cte) \ \ R; + \tcb tcb'. \ y = x; ko = TCB tcb; ko' = KOTCB tcb'; tcb_relation tcb tcb' \ + \ R; \pt (z :: pt_index) pte'. \ ko = ArchObj (PageTable pt); y = x + (ucast z << pteBits); ko' = KOArch (KOPTE pte'); pte_relation' (pt z) pte' \ \ R; @@ -346,8 +412,9 @@ lemma obj_relation_cutsE: \ y = x; other_obj_relation ko ko'; is_other_obj_relation_type (a_type ko) \ \ R \ \ R" by (force simp: obj_relation_cuts_def2 is_other_obj_relation_type_def a_type_def - cte_relation_def pte_relation_def - split: Structures_A.kernel_object.splits if_splits RISCV64_A.arch_kernel_obj.splits) + tcb_relation_cut_def cte_relation_def pte_relation_def + split: Structures_A.kernel_object.splits kernel_object.splits if_splits + RISCV64_A.arch_kernel_obj.splits) lemma eq_trans_helper: "\ x = y; P y = Q \ \ P x = Q" @@ -414,7 +481,7 @@ definition state_relation :: "(det_state \ kernel_state) set" where pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') - \ ready_queues_relation (ready_queues s) (ksReadyQueues s') + \ ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') @@ -436,6 +503,10 @@ lemma curthread_relation: "(a, b) \ state_relation \ ksCurThread b = cur_thread a" by (simp add: state_relation_def) +lemma curdomain_relation[elim!]: + "(s, s') \ state_relation \ cur_domain s = ksCurDomain s'" + by (clarsimp simp: state_relation_def) + lemma state_relation_pspace_relation[elim!]: "(s,s') \ state_relation \ pspace_relation (kheap s) (ksPSpace s')" by (simp add: state_relation_def) @@ -444,12 +515,24 @@ lemma state_relation_ekheap_relation[elim!]: "(s,s') \ state_relation \ ekheap_relation (ekheap s) (ksPSpace s')" by (simp add: state_relation_def) +lemma state_relation_sched_act_relation[elim!]: + "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" + by (clarsimp simp: state_relation_def) + +lemma state_relation_ready_queues_relation[elim!]: + "(s, s') \ state_relation \ ready_queues_relation s s'" + by (simp add: state_relation_def) + +lemma state_relation_idle_thread[elim!]: + "(s, s') \ state_relation \ idle_thread s = ksIdleThread s'" + by (clarsimp simp: state_relation_def) + lemma state_relationD: "(s, s') \ state_relation \ pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') \ - ready_queues_relation (ready_queues s) (ksReadyQueues s') \ + ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') \ @@ -471,7 +554,7 @@ lemma state_relationE [elim?]: and rl: "\ pspace_relation (kheap s) (ksPSpace s'); ekheap_relation (ekheap s) (ksPSpace s'); sched_act_relation (scheduler_action s) (ksSchedulerAction s'); - ready_queues_relation (ready_queues s) (ksReadyQueues s'); + ready_queues_relation s s'; ghost_relation (kheap s) (gsUserPages s') (gsCNodes s'); cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s'); diff --git a/proof/refine/RISCV64/SubMonad_R.thy b/proof/refine/RISCV64/SubMonad_R.thy index de45a90d91..70af5a5e61 100644 --- a/proof/refine/RISCV64/SubMonad_R.thy +++ b/proof/refine/RISCV64/SubMonad_R.thy @@ -27,6 +27,10 @@ lemma corres_machine_op: apply (simp_all add: state_relation_def swp_def) done +lemmas corres_machine_op_Id = corres_machine_op[OF corres_Id] +lemmas corres_machine_op_Id_eq[corres_term] = corres_machine_op_Id[where r="(=)"] +lemmas corres_machine_op_Id_dc[corres_term] = corres_machine_op_Id[where r="dc::unit \ unit \ bool"] + lemma doMachineOp_mapM: assumes "\x. empty_fail (m x)" shows "doMachineOp (mapM m l) = mapM (doMachineOp \ m) l" diff --git a/proof/refine/RISCV64/Syscall_R.thy b/proof/refine/RISCV64/Syscall_R.thy index c542f82fe2..b645fb63af 100644 --- a/proof/refine/RISCV64/Syscall_R.thy +++ b/proof/refine/RISCV64/Syscall_R.thy @@ -337,7 +337,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -351,15 +351,13 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: lemma setDomain_corres: "corres dc (valid_etcbs and valid_sched and tcb_at tptr and pspace_aligned and pspace_distinct) - (invs' and sch_act_simple - and tcb_at' tptr and (\s. new_dom \ maxDomain)) - (set_domain tptr new_dom) - (setDomain tptr new_dom)" + (invs' and sch_act_simple and tcb_at' tptr and (\s. new_dom \ maxDomain)) + (set_domain tptr new_dom) (setDomain tptr new_dom)" apply (rule corres_gen_asm2) apply (simp add: set_domain_def setDomain_def thread_set_domain_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split) apply (rule ethread_set_corres; simp) apply (clarsimp simp: etcb_relation_def) @@ -368,26 +366,38 @@ lemma setDomain_corres: apply (rule corres_split) apply clarsimp apply (rule corres_when[OF refl]) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_when[OF refl]) apply (rule rescheduleRequired_corres) - apply ((wp hoare_drop_imps hoare_vcg_conj_lift | clarsimp| assumption)+)[5] - apply clarsimp - apply (rule_tac Q="\_. valid_objs' and valid_queues' and valid_queues and - (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" - in hoare_strengthen_post[rotated]) - apply (auto simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def)[1] - apply (wp threadSet_valid_objs' threadSet_valid_queues'_no_state - threadSet_valid_queues_no_state - threadSet_pred_tcb_no_state | simp)+ - apply (rule_tac Q = "\r s. invs' s \ (\p. tptr \ set (ksReadyQueues s p)) \ sch_act_simple s - \ tcb_at' tptr s" in hoare_strengthen_post[rotated]) - apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) - apply (clarsimp simp:valid_tcb'_def) - apply (drule(1) bspec) - apply (clarsimp simp:tcb_cte_cases_def cteSizeBits_def) + apply (wpsimp wp: hoare_drop_imps) + apply ((wpsimp wp: hoare_drop_imps | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: gts_wp) + apply wpsimp + apply ((wpsimp wp: hoare_vcg_imp_lift' ethread_set_not_queued_valid_queues hoare_vcg_all_lift + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply (rule_tac Q="\_. valid_objs' and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct' + and (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" + in hoare_strengthen_post[rotated]) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def) + apply (wpsimp wp: threadSet_valid_objs' threadSet_sched_pointers + threadSet_valid_sched_pointers)+ + apply (rule_tac Q="\_ s. valid_queues s \ not_queued tptr s + \ pspace_aligned s \ pspace_distinct s \ valid_etcbs s + \ weak_valid_sched_action s" + in hoare_post_imp) + apply (fastforce simp: pred_tcb_at_def obj_at_def) + apply (wpsimp wp: tcb_dequeue_not_queued) + apply (rule_tac Q = "\_ s. invs' s \ obj_at' (Not \ tcbQueued) tptr s \ sch_act_simple s + \ tcb_at' tptr s" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) + apply (clarsimp simp: valid_tcb'_def obj_at'_def) + apply (drule (1) bspec) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def) apply fastforce - apply (wp hoare_vcg_all_lift Tcb_R.tcbSchedDequeue_not_in_queue)+ + apply (wp hoare_vcg_all_lift tcbSchedDequeue_not_queued)+ apply clarsimp apply (frule tcb_at_is_etcb_at) apply simp+ @@ -395,12 +405,11 @@ lemma setDomain_corres: simp: valid_sched_def valid_sched_action_def) done - lemma performInvocation_corres: "\ inv_relation i i'; call \ block \ \ corres (dc \ (=)) (einvs and valid_invocation i - and simple_sched_action + and schact_is_rct and ct_active and (\s. (\w w2 b c. i = Invocations_A.InvokeEndpoint w w2 b c) \ st_tcb_at simple (cur_thread s) s)) (invs' and sch_act_simple and valid_invocation' i' and ct_active') @@ -449,7 +458,7 @@ lemma performInvocation_corres: apply (clarsimp simp: liftME_def) apply (rule corres_guard_imp) apply (erule invokeTCB_corres) - apply (simp)+ + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] \ \domain cap\ apply (clarsimp simp: invoke_domain_def) apply (rule corres_guard_imp) @@ -464,7 +473,7 @@ lemma performInvocation_corres: apply assumption apply (rule corres_trivial, simp add: returnOk_def) apply wp+ - apply (clarsimp+)[2] + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) apply (rule corres_guard_imp, rule performIRQControl_corres, simp+) apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) @@ -684,7 +693,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -762,90 +771,71 @@ lemma doReply_invs[wp]: "\tcb_at' t and tcb_at' t' and cte_wp_at' (\cte. \grant. cteCap cte = ReplyCap t False grant) slot and invs' and sch_act_simple\ - doReplyTransfer t' t slot grant - \\rv. invs'\" + doReplyTransfer t' t slot grant + \\_. invs'\" apply (simp add: doReplyTransfer_def liftM_def) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_seq_ext [OF _ assert_sp]) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ gts_sp']) + apply (rule bind_wp [OF _ assert_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp, wpc) - apply (wp) + apply wp apply (wp (once) sts_invs_minor'') - apply (simp) + apply simp apply (wp (once) sts_st_tcb') - apply (wp)[1] - apply (rule_tac Q="\rv s. invs' s - \ t \ ksIdleThread s - \ st_tcb_at' awaiting_reply' t s" + apply wp + apply (rule_tac Q="\_ s. invs' s \ t \ ksIdleThread s \ st_tcb_at' awaiting_reply' t s" in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) apply (wp cteDeleteOne_reply_pred_tcb_at)+ - apply (clarsimp) + apply clarsimp apply (rule_tac Q="\_. (\s. t \ ksIdleThread s) - and cte_wp_at' (\cte. \grant. cteCap cte = capability.ReplyCap t False grant) slot" - in hoare_strengthen_post [rotated]) + and cte_wp_at' (\cte. \grant. cteCap cte + = capability.ReplyCap t False grant) slot" + in hoare_strengthen_post [rotated]) apply (fastforce simp: cte_wp_at'_def) - apply (wp) + apply wp apply (rule hoare_strengthen_post [OF doIPCTransfer_non_null_cte_wp_at']) apply (erule conjE) apply assumption apply (erule cte_wp_at_weakenE') apply (fastforce) - apply (wp sts_invs_minor'' sts_st_tcb' static_imp_wp) - apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s - \ st_tcb_at' awaiting_reply' t s - \ t \ ksIdleThread s" - in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) + apply (rule_tac Q="\_ s. invs' s \ sch_act_simple s + \ st_tcb_at' awaiting_reply' t s + \ t \ ksIdleThread s" + in hoare_post_imp) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" - in pred_tcb'_weakenE) + in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) - apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 static_imp_wp + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp | clarsimp simp add: inQ_def)+ apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and st_tcb_at' awaiting_reply' t" in hoare_strengthen_post [rotated]) - apply (clarsimp) + apply clarsimp apply (rule conjI) - apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) - apply (rule conjI) - apply clarsimp - apply (clarsimp simp: obj_at'_def idle_tcb'_def pred_tcb_at'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def + idle_tcb'_def pred_tcb_at'_def) apply clarsimp apply (rule conjI) apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) apply (erule pred_tcb'_weakenE, clarsimp) - apply (rule conjI) apply (clarsimp simp : invs'_def valid_state'_def valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) - apply (rule conjI) - apply clarsimp - apply (frule invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, clarsimp) - apply (frule (1) not_tcbQueued_not_ksQ) - apply simp - apply clarsimp apply (wp cteDeleteOne_reply_pred_tcb_at hoare_drop_imp hoare_allI)+ apply (clarsimp simp add: isReply_awaiting_reply' cte_wp_at_ctes_of) apply (auto dest!: st_tcb_idle'[rotated] simp:isCap_simps) @@ -855,35 +845,9 @@ lemma ct_active_runnable' [simp]: "ct_active' s \ ct_in_state' runnable' s" by (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) -lemma valid_irq_node_tcbSchedEnqueue[wp]: - "\\s. valid_irq_node' (irq_node' s) s \ tcbSchedEnqueue ptr - \\rv s'. valid_irq_node' (irq_node' s') s'\" - apply (rule hoare_pre) - apply (simp add:valid_irq_node'_def ) - apply (wp hoare_unless_wp hoare_vcg_all_lift | wps)+ - apply (simp add:tcbSchedEnqueue_def) - apply (wp hoare_unless_wp| simp)+ - apply (simp add:valid_irq_node'_def) - done - -lemma rescheduleRequired_valid_queues_but_ct_domain: - "\\s. Invariants_H.valid_queues s \ valid_objs' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) \ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - done - -lemma rescheduleRequired_valid_queues'_but_ct_domain: - "\\s. valid_queues' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) - \ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def)+ - done +crunches tcbSchedEnqueue + for valid_irq_node[wp]: "\s. valid_irq_node' (irq_node' s) s" + (rule: valid_irq_node_lift) lemma tcbSchedEnqueue_valid_action: "\\s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\ @@ -894,9 +858,10 @@ lemma tcbSchedEnqueue_valid_action: done abbreviation (input) "all_invs_but_sch_extra \ - \s. valid_pspace' s \ Invariants_H.valid_queues s \ + \s. valid_pspace' s \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ + sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ @@ -908,7 +873,7 @@ abbreviation (input) "all_invs_but_sch_extra \ valid_machine_state' s \ cur_tcb' s \ untyped_ranges_zero' s \ - valid_queues' s \ pspace_domain_valid s \ + pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s)" @@ -917,18 +882,13 @@ lemma rescheduleRequired_all_invs_but_extra: "\\s. all_invs_but_sch_extra s\ rescheduleRequired \\_. invs'\" apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp add:rescheduleRequired_ct_not_inQ - rescheduleRequired_sch_act' - rescheduleRequired_valid_queues_but_ct_domain - rescheduleRequired_valid_queues'_but_ct_domain - valid_irq_node_lift valid_irq_handlers_lift'' - irqs_masked_lift cur_tcb_lift) + apply (wpsimp wp: rescheduleRequired_ct_not_inQ rescheduleRequired_sch_act' + valid_irq_node_lift valid_irq_handlers_lift'') apply auto done lemma threadSet_all_invs_but_sch_extra: - shows "\ tcb_at' t and (\s. (\p. t \ set (ksReadyQueues s p))) and + shows "\ tcb_at' t and all_invs_but_sch_extra and sch_act_simple and K (ds \ maxDomain) \ threadSet (tcbDomain_update (\_. ds)) t @@ -948,13 +908,11 @@ lemma threadSet_all_invs_but_sch_extra: valid_irq_handlers_lift'' threadSet_ctes_ofT threadSet_not_inQ - threadSet_valid_queues'_no_state threadSet_tcbDomain_update_ct_idle_or_in_cur_domain' - threadSet_valid_queues threadSet_valid_dom_schedule' threadSet_iflive'T threadSet_ifunsafe'T - untyped_ranges_zero_lift + untyped_ranges_zero_lift threadSet_sched_pointers threadSet_valid_sched_pointers | simp add:tcb_cte_cases_def cteSizeBits_def cteCaps_of_def o_def)+ apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift threadSet_pred_tcb_no_state | simp)+ apply (clarsimp simp:sch_act_simple_def o_def cteCaps_of_def) @@ -976,21 +934,19 @@ lemma setDomain_invs': (\y. domain \ maxDomain))\ setDomain ptr domain \\y. invs'\" apply (simp add:setDomain_def ) - apply (wp add: hoare_when_wp static_imp_wp static_imp_conj_wp rescheduleRequired_all_invs_but_extra + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" in hoare_strengthen_post[rotated]) apply (clarsimp simp:invs'_def valid_state'_def st_tcb_at'_def[symmetric] valid_pspace'_def) - apply (erule st_tcb_ex_cap'') apply simp - apply (case_tac st,simp_all)[1] apply (rule hoare_strengthen_post[OF hoare_vcg_conj_lift]) apply (rule threadSet_all_invs_but_sch_extra) prefer 2 apply clarsimp apply assumption - apply (wp static_imp_wp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain threadSet_tcbDomain_update_ct_not_inQ | simp)+ apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s \ domain \ maxDomain @@ -1002,17 +958,14 @@ lemma setDomain_invs': done lemma performInv_invs'[wp]: - "\invs' and sch_act_simple - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) - and ct_active' and valid_invocation' i\ - RetypeDecls_H.performInvocation block call i \\rv. invs'\" + "\invs' and sch_act_simple and ct_active' and valid_invocation' i\ + RetypeDecls_H.performInvocation block call i + \\_. invs'\" unfolding performInvocation_def apply (cases i) - apply ((clarsimp simp: simple_sane_strg sch_act_simple_def - ct_not_ksQ sch_act_sane_def - | wp tcbinv_invs' arch_performInvocation_invs' - setDomain_invs' - | rule conjI | erule active_ex_cap')+) + apply (clarsimp simp: simple_sane_strg sch_act_simple_def sch_act_sane_def + | wp tcbinv_invs' arch_performInvocation_invs' setDomain_invs' + | rule conjI | erule active_ex_cap')+ done lemma getSlotCap_to_refs[wp]: @@ -1094,7 +1047,7 @@ lemma lookupExtras_real_ctes[wp]: lemma lookupExtras_ctes[wp]: "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. cte_at' (snd x) s\,-" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookupExtras_real_ctes) apply (simp add: real_cte_at') done @@ -1182,7 +1135,7 @@ crunches reply_from_kernel lemma handleInvocation_corres: "c \ b \ corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_invocation c b) @@ -1226,21 +1179,20 @@ lemma handleInvocation_corres: apply (wp reply_from_kernel_tcb_at) apply (rule impI, wp+) apply (wpsimp wp: hoare_drop_imps|strengthen invs_distinct invs_psp_aligned)+ - apply (rule_tac Q="\rv. einvs and simple_sched_action and valid_invocation rve + apply (rule_tac Q="\rv. einvs and schact_is_rct and valid_invocation rve and (\s. thread = cur_thread s) and st_tcb_at active thread" in hoare_post_imp) apply (clarsimp simp: simple_from_active ct_in_state_def elim!: st_tcb_weakenE) - apply (wp sts_st_tcb_at' set_thread_state_simple_sched_action - set_thread_state_active_valid_sched) + apply (wp sts_st_tcb_at' set_thread_state_schact_is_rct + set_thread_state_active_valid_sched) apply (rule_tac Q="\rv. invs' and valid_invocation' rve' and (\s. thread = ksCurThread s) and st_tcb_at' active' thread and (\s. ksSchedulerAction s = ResumeCurrentThread)" in hoare_post_imp) apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (clarsimp) apply (wp setThreadState_nonqueued_state_update setThreadState_st_tcb setThreadState_rct)[1] @@ -1252,14 +1204,13 @@ lemma handleInvocation_corres: valid_tcb_state_def ct_in_state_def simple_from_active invs_mdb invs_distinct invs_psp_aligned) - apply (clarsimp simp: msg_max_length_def word_bits_def) + apply (clarsimp simp: msg_max_length_def word_bits_def schact_is_rct_def) apply (erule st_tcb_ex_cap, clarsimp+) apply fastforce apply (clarsimp) apply (frule tcb_at_invs') apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) apply (frule pred_tcb'_weakenE [where P=active' and P'=simple'], clarsimp) apply (frule(1) st_tcb_ex_cap'', fastforce) apply (clarsimp simp: valid_pspace'_def) @@ -1302,11 +1253,11 @@ lemma hinv_invs'[wp]: apply (simp add: handleInvocation_def split_def ts_Restart_case_helper') apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply simp apply (intro conjI impI) apply (wp gts_imp' | simp)+ - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R[rotated]) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R[rotated]) apply clarsimp apply (subgoal_tac "thread \ ksIdleThread s", simp_all)[1] apply (fastforce elim!: pred_tcb'_weakenE st_tcb_ex_cap'') @@ -1319,11 +1270,8 @@ lemma hinv_invs'[wp]: and st_tcb_at' active' thread" in hoare_post_imp) apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) - apply (clarsimp) apply (wp sts_invs_minor' setThreadState_st_tcb setThreadState_rct | simp)+ apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (fastforce simp add: tcb_at_invs' ct_in_state'_def simple_sane_strg sch_act_simple_def @@ -1332,12 +1280,13 @@ lemma hinv_invs'[wp]: done crunch typ_at'[wp]: handleFault "\s. P (typ_at' T p s)" + (wp: crunch_wps) lemmas handleFault_typ_ats[wp] = typ_at_lifts [OF handleFault_typ_at'] lemma handleSend_corres: "corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_send blocking) (handleSend blocking)" @@ -1422,7 +1371,7 @@ lemma cteDeleteOne_reply_cap_to''[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte) \ isNullCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -1466,7 +1415,6 @@ lemma handleRecv_isBlocking_corres': and (\s. ex_nonz_cap_to (cur_thread s) s)) (invs' and ct_in_state' simple' and sch_act_sane - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ex_nonz_cap_to' (ksCurThread s) s)) (handle_recv isBlocking) (handleRecv isBlocking)" (is "corres dc (?pre1) (?pre2) (handle_recv _) (handleRecv _)") @@ -1529,8 +1477,7 @@ lemma handleRecv_isBlocking_corres': lemma handleRecv_isBlocking_corres: "corres dc (einvs and ct_active) - (invs' and ct_active' and sch_act_sane and - (\s. \p. ksCurThread s \ set (ksReadyQueues s p))) + (invs' and ct_active' and sch_act_sane) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp) apply (rule handleRecv_isBlocking_corres') @@ -1545,49 +1492,34 @@ lemma lookupCap_refs[wp]: "\invs'\ lookupCap t ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" by (simp add: lookupCap_def split_def | wp | simp add: o_def)+ -lemma deleteCallerCap_ksQ_ct': - "\invs' and ct_in_state' simple' and sch_act_sane and - (\s. ksCurThread s \ set (ksReadyQueues s p) \ thread = ksCurThread s)\ - deleteCallerCap thread - \\rv s. thread \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv s. thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p)" - in hoare_strengthen_post) - apply (wp deleteCallerCap_ct_not_ksQ) - apply auto - done - lemma hw_invs'[wp]: "\invs' and ct_in_state' simple' and sch_act_sane and (\s. ex_nonz_cap_to' (ksCurThread s) s) - and (\s. ksCurThread s \ ksIdleThread s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ + and (\s. ksCurThread s \ ksIdleThread s)\ handleRecv isBlocking \\r. invs'\" apply (simp add: handleRecv_def cong: if_cong) apply (rule hoare_pre) apply ((wp getNotification_wp | wpc | simp)+)[1] apply (clarsimp simp: ct_in_state'_def) apply ((wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp)+)[1] apply simp apply (wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp add: ct_in_state'_def whenE_def split del: if_split)+ apply (rule validE_validE_R) apply (rule_tac Q="\rv s. invs' s \ sch_act_sane s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)) \ thread = ksCurThread s \ ct_in_state' simple' s \ ex_nonz_cap_to' thread s \ thread \ ksIdleThread s \ (\x \ zobj_refs' rv. ex_nonz_cap_to' x s)" and E="\_ _. True" - in hoare_post_impErr[rotated]) + in hoare_strengthen_postE[rotated]) apply (clarsimp simp: isCap_simps ct_in_state'_def pred_tcb_at' invs_valid_objs' sch_act_sane_not obj_at'_def pred_tcb_at'_def) apply (assumption) @@ -1604,34 +1536,45 @@ lemma setSchedulerAction_obj_at'[wp]: by (wp, clarsimp elim!: obj_at'_pspaceI) lemma handleYield_corres: - "corres dc einvs (invs' and ct_active' and (\s. ksSchedulerAction s = ResumeCurrentThread)) handle_yield handleYield" + "corres dc + (einvs and ct_active) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + handle_yield handleYield" apply (clarsimp simp: handle_yield_def handleYield_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp - apply (rule corres_split[OF tcbSchedDequeue_corres]) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues | simp add: )+ + apply (wpsimp wp: weak_sch_act_wf_lift_linear + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+ apply (simp add: invs_def valid_sched_def valid_sched_action_def cur_tcb_def - tcb_at_is_etcb_at valid_state_def valid_pspace_def) - apply clarsimp - apply (frule ct_active_runnable') - apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def + tcb_at_is_etcb_at valid_state_def valid_pspace_def ct_in_state_def + runnable_eq_active) + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def) - apply (erule(1) valid_objs_valid_tcbE[OF valid_pspace_valid_objs']) - apply (simp add:valid_tcb'_def) + done + +lemma tcbSchedAppend_ct_in_state'[wp]: + "tcbSchedAppend t \ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) done lemma hy_invs': "\invs' and ct_active'\ handleYield \\r. invs' and ct_active'\" apply (simp add: handleYield_def) - apply (wp ct_in_state_thread_state_lift' - rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' | simp)+ - apply (clarsimp simp add: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def - valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def - ) + apply (wpsimp wp: ct_in_state_thread_state_lift' rescheduleRequired_all_invs_but_ct_not_inQ) + apply (rule_tac Q="\_. all_invs_but_ct_not_inQ' and ct_active'" in hoare_post_imp) + apply clarsimp + apply (subst pred_conj_def) + apply (rule hoare_vcg_conj_lift) + apply (rule tcbSchedAppend_all_invs_but_ct_not_inQ') + apply wpsimp + apply wpsimp + apply wpsimp apply (simp add:ct_active_runnable'[unfolded ct_in_state'_def]) done @@ -1729,7 +1672,7 @@ lemmas cteDeleteOne_st_tcb_at_simple'[wp] = cteDeleteOne_st_tcb_at[where P=simple', simplified] crunch st_tcb_at_simple'[wp]: handleReply "st_tcb_at' simple' t'" - (wp: hoare_post_taut crunch_wps sts_st_tcb_at'_cases + (wp: hoare_TrueI crunch_wps sts_st_tcb_at'_cases threadSet_pred_tcb_no_state ignore: setThreadState) @@ -1755,18 +1698,17 @@ lemma hr_ct_active'[wp]: "\invs' and ct_active'\ handleReply \\rv. ct_active'\" apply (simp add: handleReply_def getSlotCap_def getCurThread_def getThreadCallerSlot_def locateSlot_conv) - apply (rule hoare_seq_ext) - apply (rule ct_in_state'_decomp) - apply ((wp hoare_drop_imps | wpc | simp)+)[1] - apply (subst haskell_assert_def) - apply (wp hoare_vcg_all_lift getCTE_wp doReplyTransfer_st_tcb_at_active - | wpc | simp)+ + apply (rule bind_wp, rename_tac cur_thread) + apply (rule_tac t=cur_thread in ct_in_state'_decomp) + apply (wpsimp wp: getCTE_wp) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (wpsimp wp: getCTE_wp doReplyTransfer_st_tcb_at_active)+ apply (fastforce simp: ct_in_state'_def cte_wp_at_ctes_of valid_cap'_def - dest: ctes_of_valid') + dest: ctes_of_valid') done lemma handleCall_corres: - "corres (dc \ dc) (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + "corres (dc \ dc) (einvs and schact_is_rct and ct_active) (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') @@ -1828,7 +1770,7 @@ lemma handleReply_sane: "\sch_act_sane\ handleReply \\rv. sch_act_sane\" apply (simp add: handleReply_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) apply (rule hoare_pre) - apply (wp haskell_assert_wp doReplyTransfer_sane getCTE_wp'| wpc)+ + apply (wp doReplyTransfer_sane getCTE_wp'| wpc)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -1844,74 +1786,6 @@ lemma handleReply_nonz_cap_to_ct: crunch ksQ[wp]: handleFaultReply "\s. P (ksReadyQueues s p)" -lemma doReplyTransfer_ct_not_ksQ: - "\ invs' and sch_act_simple - and tcb_at' thread and tcb_at' word - and ct_in_state' simple' - and (\s. ksCurThread s \ word) - and (\s. \p. ksCurThread s \ set(ksReadyQueues s p))\ - doReplyTransfer thread word callerSlot g - \\rv s. \p. ksCurThread s \ set(ksReadyQueues s p)\" -proof - - have astct: "\t p. - \(\s. ksCurThread s \ set(ksReadyQueues s p) \ sch_act_sane s) - and (\s. ksCurThread s \ t)\ - possibleSwitchTo t \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps possibleSwitchTo_ct') - apply (wp possibleSwitchTo_ksQ') - apply (clarsimp simp: sch_act_sane_def) - done - have stsct: "\t st p. - \(\s. ksCurThread s \ set(ksReadyQueues s p)) and sch_act_simple\ - setThreadState st t - \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps setThreadState_ct') - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - show ?thesis - apply (simp add: doReplyTransfer_def) - apply (wp, wpc) - apply (wp astct stsct hoare_vcg_all_lift - cteDeleteOne_ct_not_ksQ hoare_drop_imp - hoare_lift_Pf2 [OF cteDeleteOne_sch_act_not cteDeleteOne_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_pred_tcb_at' doIPCTransfer_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_ksQ doIPCTransfer_ct'] - hoare_lift_Pf2 [OF threadSet_ksQ threadSet_ct] - hoare_lift_Pf2 [OF handleFaultReply_ksQ handleFaultReply_ct'] - | simp add: ct_in_state'_def)+ - apply (fastforce simp: sch_act_simple_def sch_act_sane_def ct_in_state'_def)+ - done -qed - -lemma handleReply_ct_not_ksQ: - "\invs' and sch_act_simple - and ct_in_state' simple' - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ - handleReply - \\rv s. \p. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: handleReply_def del: split_paired_All) - apply (subst haskell_assert_def) - apply (wp | wpc)+ - apply (wp doReplyTransfer_ct_not_ksQ getThreadCallerSlot_inv)+ - apply (rule_tac Q="\cap. - (\s. \p. ksCurThread s \ set(ksReadyQueues s p)) - and invs' - and sch_act_simple - and (\s. thread = ksCurThread s) - and tcb_at' thread - and ct_in_state' simple' - and cte_wp_at' (\c. cteCap c = cap) callerSlot" - in hoare_post_imp) - apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def - cte_wp_at_ctes_of valid_cap'_def - dest!: ctes_of_valid') - apply (wp getSlotCap_cte_wp_at getThreadCallerSlot_inv)+ - apply (clarsimp) - done - crunch valid_etcbs[wp]: handle_recv "valid_etcbs" (wp: crunch_wps simp: crunch_simps) @@ -1924,11 +1798,10 @@ lemma handleReply_handleRecv_corres: apply (rule corres_split_nor[OF handleReply_corres]) apply (rule handleRecv_isBlocking_corres') apply (wp handle_reply_nonz_cap_to_ct handleReply_sane - handleReply_nonz_cap_to_ct handleReply_ct_not_ksQ handle_reply_valid_sched)+ + handleReply_nonz_cap_to_ct handle_reply_valid_sched)+ apply (fastforce simp: ct_in_state_def ct_in_state'_def simple_sane_strg elim!: st_tcb_weakenE st_tcb_ex_cap') apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (fastforce elim: pred_tcb'_weakenE) done @@ -1936,7 +1809,6 @@ lemma handleHypervisorFault_corres: "corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread and (%_. valid_fault f)) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_hypervisor_fault w fault) (handleHypervisorFault w fault)" apply (cases fault; clarsimp simp add: handleHypervisorFault_def returnOk_def2) @@ -1945,21 +1817,20 @@ lemma handleHypervisorFault_corres: (* FIXME: move *) lemma handleEvent_corres: "corres (dc \ dc) (einvs and (\s. event \ Interrupt \ ct_running s) and - (\s. scheduler_action s = resume_cur_thread)) + schact_is_rct) (invs' and (\s. event \ Interrupt \ ct_running' s) and (\s. ksSchedulerAction s = ResumeCurrentThread)) (handle_event event) (handleEvent event)" (is "?handleEvent_corres") proof - have hw: - "\isBlocking. corres dc (einvs and ct_running and (\s. scheduler_action s = resume_cur_thread)) + "\isBlocking. corres dc (einvs and ct_running and schact_is_rct) (invs' and ct_running' and (\s. ksSchedulerAction s = ResumeCurrentThread)) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp [OF handleRecv_isBlocking_corres]) apply (clarsimp simp: ct_in_state_def ct_in_state'_def - elim!: st_tcb_weakenE pred_tcb'_weakenE - dest!: ct_not_ksQ)+ + elim!: st_tcb_weakenE pred_tcb'_weakenE)+ done show ?thesis apply (case_tac event) @@ -1974,7 +1845,7 @@ proof - corres_guard_imp[OF handleCall_corres] corres_guard_imp[OF handleYield_corres] active_from_running active_from_running' - simp: simple_sane_strg)[8] + simp: simple_sane_strg schact_is_rct_def)[8] apply (rule corres_underlying_split) apply (rule corres_guard_imp[OF getCurThread_corres], simp+) apply (rule handleFault_corres) @@ -1985,7 +1856,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -1998,12 +1868,11 @@ proof - simp: ct_in_state_def valid_fault_def) apply wp apply clarsimp - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] apply (rule corres_guard_imp) - apply (rule corres_split_eqr[where R="\rv. einvs" + apply (rule corres_split_eqr[where R="\_. einvs" and R'="\rv s. \x. rv = Some x \ R'' x s" for R'']) apply (rule corres_machine_op) @@ -2013,10 +1882,7 @@ proof - apply (rule handleInterrupt_corres) apply (wp hoare_vcg_all_lift doMachineOp_getActiveIRQ_IRQ_active' - | simp | simp add: imp_conjR | wp (once) hoare_drop_imps)+ - apply force - apply simp apply (simp add: invs'_def valid_state'_def) apply (rule_tac corres_underlying_split) apply (rule corres_guard_imp, rule getCurThread_corres, simp+) @@ -2032,7 +1898,6 @@ proof - apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (fastforce simp: simple_sane_strg sch_act_simple_def ct_in_state'_def elim: st_tcb_ex_cap'' pred_tcb'_weakenE) apply (rule corres_underlying_split) @@ -2044,7 +1909,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2131,10 +1995,8 @@ proof - apply (rename_tac syscall) apply (case_tac syscall, (wp handleReply_sane handleReply_nonz_cap_to_ct handleReply_ksCurThread - handleReply_ct_not_ksQ | clarsimp simp: active_from_running' simple_from_running' simple_sane_strg simp del: split_paired_All | rule conjI active_ex_cap' - | drule ct_not_ksQ[rotated] | strengthen nidle)+) apply (rule hoare_strengthen_post, rule hoare_weaken_pre, @@ -2146,7 +2008,6 @@ proof - | erule pred_tcb'_weakenE st_tcb_ex_cap'' | clarsimp simp: tcb_at_invs ct_in_state'_def simple_sane_strg sch_act_simple_def | drule st_tcb_at_idle_thread' - | drule ct_not_ksQ[rotated] | wpc | wp (once) hoare_drop_imps)+ done qed @@ -2181,7 +2042,6 @@ lemma hi_IRQInactive: -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: handleInvocation_def split_def) apply (wp syscall_valid' retype_pi_IRQInactive) - apply simp_all done lemma handleSend_IRQInactive: diff --git a/proof/refine/RISCV64/TcbAcc_R.thy b/proof/refine/RISCV64/TcbAcc_R.thy index b053ff4469..5b9afdab59 100644 --- a/proof/refine/RISCV64/TcbAcc_R.thy +++ b/proof/refine/RISCV64/TcbAcc_R.thy @@ -11,10 +11,8 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] -declare empty_fail_sequence_x[simp] declare storeWordUser_typ_at' [wp] (* Auxiliaries and basic properties of priority bitmap functions *) @@ -51,7 +49,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -60,10 +58,8 @@ lemma getHighestPrio_inv[wp]: unfolding bitmap_fun_defs by simp lemma valid_bitmapQ_bitmapQ_simp: - "\ valid_bitmapQ s \ \ - bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_def - by simp + "valid_bitmapQ s \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (simp add: valid_bitmapQ_def) lemma prioToL1Index_l1IndexToPrio_or_id: "\ unat (w'::priority) < 2 ^ wordRadix ; w < 2^(size w' - wordRadix) \ @@ -86,34 +82,18 @@ lemma l1IndexToPrio_wordRadix_mask[simp]: unfolding l1IndexToPrio_def by (simp add: wordRadix_def') -definition - (* when in the middle of updates, a particular queue might not be entirely valid *) - valid_queues_no_bitmap_except :: "machine_word \ kernel_state \ bool" -where - "valid_queues_no_bitmap_except t' \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). t \ t' \ obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - -lemma valid_queues_no_bitmap_exceptI[intro]: - "valid_queues_no_bitmap s \ valid_queues_no_bitmap_except t s" - unfolding valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def - by simp - lemma st_tcb_at_coerce_abstract: assumes t: "st_tcb_at' P t c" assumes sr: "(a, c) \ state_relation" shows "st_tcb_at (\st. \st'. thread_state_relation st st' \ P st') t a" using assms - apply (clarsimp simp: state_relation_def pred_tcb_at'_def obj_at'_def objBits_simps) - apply (erule(1) pspace_dom_relatedE) - apply (erule(1) obj_relation_cutsE, simp_all) - apply (clarsimp simp: st_tcb_at_def obj_at_def other_obj_relation_def - tcb_relation_def - split: Structures_A.kernel_object.split_asm if_split_asm - RISCV64_A.arch_kernel_obj.split_asm)+ - apply fastforce - done + apply (clarsimp simp: state_relation_def pred_tcb_at'_def obj_at'_def + projectKOs) + apply (erule (1) pspace_dom_relatedE) + apply (erule (1) obj_relation_cutsE, simp_all) + by (fastforce simp: st_tcb_at_def obj_at_def other_obj_relation_def tcb_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm)+ lemma st_tcb_at_runnable_coerce_concrete: assumes t: "st_tcb_at runnable t a" @@ -134,10 +114,11 @@ lemma pspace_relation_tcb_at': assumes t: "tcb_at t a" assumes aligned: "pspace_aligned' c" assumes distinct: "pspace_distinct' c" - shows "tcb_at' t c" using assms + shows "tcb_at' t c" + using assms apply (clarsimp simp: obj_at_def) apply (drule(1) pspace_relation_absD) - apply (clarsimp simp: is_tcb other_obj_relation_def) + apply (clarsimp simp: is_tcb tcb_relation_cut_def) apply (simp split: kernel_object.split_asm) apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb], simp) apply (erule obj_at'_weakenE) @@ -145,13 +126,24 @@ lemma pspace_relation_tcb_at': done lemma tcb_at_cross: - "\ tcb_at t s; pspace_aligned s; pspace_distinct s; - pspace_relation (kheap s) (ksPSpace s') \ \ tcb_at' t s'" + "\tcb_at t s; pspace_aligned s; pspace_distinct s; pspace_relation (kheap s) (ksPSpace s')\ + \ tcb_at' t s'" apply (drule (2) pspace_distinct_cross) apply (drule (1) pspace_aligned_cross) apply (erule (3) pspace_relation_tcb_at') done +lemma tcb_at'_cross: + assumes p: "pspace_relation (kheap s) (ksPSpace s')" + assumes t: "tcb_at' ptr s'" + shows "tcb_at ptr s" + using assms + apply (clarsimp simp: obj_at'_def) + apply (erule (1) pspace_dom_relatedE) + by (clarsimp simp: obj_relation_cuts_def2 obj_at_def cte_relation_def + other_obj_relation_def pte_relation_def is_tcb_def + split: Structures_A.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) + lemma st_tcb_at_runnable_cross: "\ st_tcb_at runnable t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ \ st_tcb_at' runnable' t s'" @@ -168,24 +160,82 @@ lemma cur_tcb_cross: apply (erule (3) tcb_at_cross) done -lemma valid_objs_valid_tcbE: "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" +lemma valid_objs_valid_tcbE': + assumes "valid_objs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms apply (clarsimp simp add: valid_objs'_def ran_def typ_at'_def ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) apply (fastforce simp: projectKO_def projectKO_opt_tcb return_def valid_tcb'_def) done -lemma valid_objs'_maxDomain: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) +lemma valid_tcb'_tcbDomain_update: + "new_dom \ maxDomain \ + \tcb. valid_tcb' tcb s \ valid_tcb' (tcbDomain_update (\_. new_dom) tcb) s" + unfolding valid_tcb'_def + apply (clarsimp simp: tcb_cte_cases_def objBits_simps') done -lemma valid_objs'_maxPriority: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) +lemma valid_tcb'_tcbState_update: + "\valid_tcb_state' st s; valid_tcb' tcb s\ \ + valid_tcb' (tcbState_update (\_. st) tcb) s" + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def objBits_simps') done +definition valid_tcbs' :: "kernel_state \ bool" where + "valid_tcbs' s' \ \ptr tcb. ksPSpace s' ptr = Some (KOTCB tcb) \ valid_tcb' tcb s'" + +lemma valid_objs'_valid_tcbs'[elim!]: + "valid_objs' s \ valid_tcbs' s" + by (auto simp: valid_objs'_def valid_tcbs'_def valid_obj'_def split: kernel_object.splits) + +lemma invs'_valid_tcbs'[elim!]: + "invs' s \ valid_tcbs' s" + by (fastforce intro: valid_objs'_valid_tcbs') + +lemma valid_tcbs'_maxDomain: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def) + done + +lemmas valid_objs'_maxDomain = valid_tcbs'_maxDomain[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_maxPriority: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" + apply (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def) + done + +lemmas valid_objs'_maxPriority = valid_tcbs'_maxPriority[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_obj_at': + assumes "valid_tcbs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms + apply (clarsimp simp add: valid_tcbs'_def ran_def typ_at'_def + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) + done + +lemma update_valid_tcb'[simp]: + "\f. valid_tcb' tcb (ksReadyQueuesL1Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueuesL2Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueues_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksSchedulerAction_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksDomainTime_update f s) = valid_tcb' tcb s" + by (auto simp: valid_tcb'_def valid_tcb_state'_def valid_bound_tcb'_def valid_bound_ntfn'_def + split: option.splits thread_state.splits) + +lemma update_valid_tcbs'[simp]: + "\f. valid_tcbs' (ksReadyQueuesL1Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueuesL2Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueues_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksSchedulerAction_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksDomainTime_update f s) = valid_tcbs' s" + by (simp_all add: valid_tcbs'_def) + lemma doMachineOp_irq_states': assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" shows "\valid_irq_states'\ doMachineOp f \\rv. valid_irq_states'\" @@ -267,7 +317,7 @@ lemma preemptionPoint_irq [wp]: "\valid_irq_states'\ preemptionPoint -, \\irq s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" apply (simp add: preemptionPoint_def setWorkUnits_def modifyWorkUnits_def getWorkUnits_def) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (rule hoare_post_imp) prefer 2 apply (rule doMachineOp_getActiveIRQ_IRQ_active) @@ -283,49 +333,109 @@ lemma updateObject_tcb_inv: by simp (rule updateObject_default_inv) lemma setObject_update_TCB_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'" + assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'" + assumes sched_pointers: "tcbSchedPrev new_tcb' = tcbSchedPrev tcb'" + "tcbSchedNext new_tcb' = tcbSchedNext tcb'" + assumes flag: "tcbQueued new_tcb' = tcbQueued tcb'" assumes r: "r () ()" - assumes exst: "exst_same tcb' tcbu'" - shows "corres r (ko_at (TCB tcb) add) - (ko_at' tcb' add) - (set_object add (TCB tcbu)) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' tcbu'" in corres_req) + assumes exst: "exst_same tcb' new_tcb'" + shows + "corres r + (ko_at (TCB tcb) ptr) (ko_at' tcb' ptr) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" + apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' new_tcb'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) - apply (clarsimp simp: other_obj_relation_def exst) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule setObject_other_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - objBits_simps' other_obj_relation_def tcbs r)+ - apply (fastforce elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp + apply (clarsimp simp: tcb_relation_cut_def exst) + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp: obj_at'_def) + apply (unfold set_object_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def projectKOs obj_at_def + updateObject_default_def in_magnitude_check obj_at'_def) + apply (rename_tac s s' t') + apply (prop_tac "t' = s'") + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (drule singleton_in_magnitude_check) + apply (prop_tac "map_to_ctes ((ksPSpace s') (ptr \ injectKO new_tcb')) + = map_to_ctes (ksPSpace s')") + apply (frule_tac tcb=new_tcb' and tcb=tcb' in map_to_ctes_upd_tcb) + apply (clarsimp simp: objBits_simps) + apply (clarsimp simp: objBits_simps ps_clear_def3 field_simps objBits_defs mask_def) + apply (insert tables')[1] + apply (rule ext) + apply (clarsimp split: if_splits) + apply blast + apply (prop_tac "obj_at (same_caps (TCB new_tcb)) ptr s") + using tables + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def assms) + apply (clarsimp simp add: state_relation_def) + apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply clarsimp + apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply clarsimp + apply (rule conjI) + apply (simp only: pspace_relation_def simp_thms + pspace_dom_update[where x="kernel_object.TCB _" + and v="kernel_object.TCB _", + simplified a_type_def, simplified]) + apply (rule conjI) + using assms + apply (simp only: dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: tcb_relation_cut_def split: if_split_asm kernel_object.split_asm) + apply (rename_tac aa ba) + apply (drule_tac x="(aa, ba)" in bspec, simp) + apply clarsimp + apply (frule_tac ko'="kernel_object.TCB tcb" and x'=ptr in obj_relation_cut_same_type) + apply (simp add: tcb_relation_cut_def)+ + apply clarsimp + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule (1) bspec) + apply (insert exst) + apply (clarsimp simp: etcb_relation_def exst_same_def) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (insert sched_pointers flag exst) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext new_tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev new_tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def) + apply (clarsimp simp: ready_queue_relation_def opt_pred_def opt_map_def exst_same_def inQ_def + split: option.splits) + apply (metis (mono_tags, opaque_lifting)) + apply (clarsimp simp: fun_upd_def caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def) done lemma setObject_update_TCB_corres: - "\ tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'; - \(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb; - \(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'; - r () (); exst_same tcb' tcbu'\ - \ corres r (\s. get_tcb add s = Some tcb) - (\s'. (tcb', s') \ fst (getObject add s')) - (set_object add (TCB tcbu)) (setObject add tcbu')" + "\tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'; + \(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb; + \(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'; + tcbSchedPrev new_tcb' = tcbSchedPrev tcb'; tcbSchedNext new_tcb' = tcbSchedNext tcb'; + tcbQueued new_tcb' = tcbQueued tcb'; exst_same tcb' new_tcb'; + r () ()\ \ + corres r + (\s. get_tcb ptr s = Some tcb) (\s'. (tcb', s') \ fst (getObject ptr s')) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" apply (rule corres_guard_imp) - apply (erule (3) setObject_update_TCB_corres', force) - apply fastforce - apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def - loadObject_default_def objBits_simps' in_magnitude_check) + apply (erule (7) setObject_update_TCB_corres') + apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def + loadObject_default_def objBits_simps' in_magnitude_check)+ done lemma getObject_TCB_corres: @@ -365,7 +475,8 @@ lemma ball_tcb_cte_casesI: by (simp add: tcb_cte_cases_def cteSizeBits_def) lemma all_tcbI: - "\ \a b c d e f g h i j k l m n p q. P (Thread a b c d e f g h i j k l m n p q) \ \ \tcb. P tcb" + "\ \a b c d e f g h i j k l m n p q r s. P (Thread a b c d e f g h i j k l m n p q r s) \ + \ \tcb. P tcb" by (rule allI, case_tac tcb, simp) lemma threadset_corresT: @@ -374,6 +485,9 @@ lemma threadset_corresT: assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes sched_pointers: "\tcb. tcbSchedPrev (f' tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (f' tcb) = tcbSchedNext tcb" + assumes flag: "\tcb. tcbQueued (f' tcb) = tcbQueued tcb" assumes e: "\tcb'. exst_same tcb' (f' tcb')" shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ @@ -382,10 +496,13 @@ lemma threadset_corresT: apply (rule corres_guard_imp) apply (rule corres_split[OF getObject_TCB_corres]) apply (rule setObject_update_TCB_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce + apply (erule x) + apply (rule y) + apply (clarsimp simp: bspec_split [OF spec [OF z]]) + apply fastforce + apply (rule sched_pointers) + apply (rule sched_pointers) + apply (rule flag) apply simp apply (rule e) apply wp+ @@ -415,6 +532,9 @@ lemma threadSet_corres_noopT: tcb_relation tcb (fn tcb')" assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (fn tcb) = getF tcb" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ (return v) (threadSet fn t)" @@ -424,7 +544,7 @@ proof - apply (simp add: return_def thread_set_def gets_the_def assert_def assert_opt_def simpler_gets_def set_object_def get_object_def put_def get_def bind_def) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s", simp) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) apply (simp add: map_upd_triv get_tcb_SomeD)+ done show ?thesis @@ -433,9 +553,12 @@ proof - defer apply (subst bind_return [symmetric], rule corres_underlying_split [OF threadset_corresT]) - apply (simp add: x) - apply simp - apply (rule y) + apply (simp add: x) + apply simp + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule corres_noop [where P=\ and P'=\]) apply simp @@ -454,14 +577,20 @@ lemma threadSet_corres_noop_splitT: getF (fn tcb) = getF tcb" assumes z: "corres r P Q' m m'" assumes w: "\P'\ threadSet fn t \\x. Q'\" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" shows "corres r (tcb_at t and pspace_aligned and pspace_distinct and P) P' m (threadSet fn t >>= (\rv. m'))" apply (rule corres_guard_imp) apply (subst return_bind[symmetric]) apply (rule corres_split_nor[OF threadSet_corres_noopT]) - apply (simp add: x) - apply (rule y) + apply (simp add: x) + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule z) apply (wp w)+ @@ -688,16 +817,23 @@ lemma threadSet_valid_pspace'T_P: assumes v: "\tcb. (P \ Q' (tcbBoundNotification tcb)) \ (\s. valid_bound_ntfn' (tcbBoundNotification tcb) s \ valid_bound_ntfn' (tcbBoundNotification (F tcb)) s)" - + assumes p: "\tcb. (P \ Q'' (tcbSchedPrev tcb)) \ + (\s. none_top tcb_at' (tcbSchedPrev tcb) s + \ none_top tcb_at' (tcbSchedPrev (F tcb)) s)" + assumes n: "\tcb. (P \ Q''' (tcbSchedNext tcb)) \ + (\s. none_top tcb_at' (tcbSchedNext tcb) s + \ none_top tcb_at' (tcbSchedNext (F tcb)) s)" assumes y: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" assumes u: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" assumes w: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" assumes w': "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" shows - "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s)\ - threadSet F t - \\rv. valid_pspace'\" + "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s + \ obj_at' (\tcb. Q'' (tcbSchedPrev tcb)) t s + \ obj_at' (\tcb. Q''' (tcbSchedNext tcb)) t s)\ + threadSet F t + \\_. valid_pspace'\" apply (simp add: valid_pspace'_def threadSet_def) apply (rule hoare_pre, wp setObject_tcb_valid_objs getObject_tcb_wp) @@ -705,7 +841,7 @@ lemma threadSet_valid_pspace'T_P: apply (erule(1) valid_objsE') apply (clarsimp simp add: valid_obj'_def valid_tcb'_def bspec_split [OF spec [OF x]] z - split_paired_Ball y u w v w') + split_paired_Ball y u w v w' p n) done lemmas threadSet_valid_pspace'T = @@ -779,6 +915,10 @@ lemma threadSet_iflive'T: \ tcbState (F tcb) \ Inactive \ tcbState (F tcb) \ IdleThreadState \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedNext tcb = None \ tcbSchedNext (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedPrev tcb = None \ tcbSchedPrev (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb) \ ko_at' tcb t s) \ ex_nonz_cap_to' t s)\ threadSet F t @@ -786,8 +926,7 @@ lemma threadSet_iflive'T: apply (simp add: threadSet_def) apply (wp setObject_tcb_iflive' getObject_tcb_wp) apply (clarsimp simp: obj_at'_def) - apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric]) - apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric]) + apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric])+ apply (rule conjI) apply (rule impI, clarsimp) apply (erule if_live_then_nonz_capE') @@ -803,7 +942,7 @@ lemma threadSet_cte_wp_at'T: getF (F tcb) = getF tcb" shows "\\s. P' (cte_wp_at' P p s)\ threadSet F t \\rv s. P' (cte_wp_at' P p s)\" apply (simp add: threadSet_def) - apply (rule hoare_seq_ext [where B="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) + apply (rule bind_wp [where Q'="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) apply (rename_tac tcb) apply (rule setObject_cte_wp_at2') apply (clarsimp simp: updateObject_default_def in_monad objBits_simps' @@ -833,6 +972,12 @@ lemmas threadSet_ctes_of = lemmas threadSet_cap_to' = ex_nonz_cap_to_pres' [OF threadSet_cte_wp_at'] +lemma threadSet_cap_to: + "(\tcb. \(getF, v)\ran tcb_cte_cases. getF (f tcb) = getF tcb) + \ threadSet f tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: hoare_vcg_ex_lift threadSet_cte_wp_at' + simp: ex_nonz_cap_to'_def tcb_cte_cases_def objBits_simps') + lemma threadSet_idle'T: assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" shows @@ -870,30 +1015,6 @@ lemma set_tcb_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma threadSet_valid_queues_no_bitmap: - "\ valid_queues_no_bitmap and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. valid_queues_no_bitmap \" - apply (simp add: threadSet_def) - apply wp - apply (simp add: Invariants_H.valid_queues_no_bitmap_def' pred_tcb_at'_def) - - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def) - apply (fastforce) - done - lemma threadSet_valid_bitmapQ[wp]: "\ valid_bitmapQ \ threadSet f t \ \rv. valid_bitmapQ \" unfolding bitmapQ_defs threadSet_def @@ -912,72 +1033,6 @@ lemma threadSet_valid_bitmapQ_no_L2_orphans[wp]: by (clarsimp simp: setObject_def split_def) (wp | simp add: updateObject_default_def)+ -lemma threadSet_valid_queues: - "\Invariants_H.valid_queues and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. Invariants_H.valid_queues\" - unfolding valid_queues_def - by (wp threadSet_valid_queues_no_bitmap;simp) - -definition - addToQs :: "(Structures_H.tcb \ Structures_H.tcb) - \ machine_word \ (domain \ priority \ machine_word list) - \ (domain \ priority \ machine_word list)" -where - "addToQs F t \ \qs (qdom, prio). if (\ko. \ inQ qdom prio (F ko)) - then t # qs (qdom, prio) - else qs (qdom, prio)" - -lemma addToQs_set_def: - "(t' \ set (addToQs F t qs (qdom, prio))) = (t' \ set (qs (qdom, prio)) - \ (t' = t \ (\ko. \ inQ qdom prio (F ko))))" - by (auto simp add: addToQs_def) - -lemma threadSet_valid_queues_addToQs: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update (addToQs F t) s)\ - threadSet F t - \\rv. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def objBits_simps addToQs_set_def - split del: if_split cong: if_cong) - apply (fastforce split: if_split_asm) - done - -lemma threadSet_valid_queues_Qf: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update Qf s) - \ (\prio. set (Qf (ksReadyQueues s) prio) - \ set (addToQs F t (ksReadyQueues s) prio))\ - threadSet F t - \\rv. valid_queues'\" - apply (wp threadSet_valid_queues_addToQs) - apply (clarsimp simp: valid_queues'_def subset_iff) - done - -lemma addToQs_subset: - "set (qs p) \ set (addToQs F t qs p)" -by (clarsimp simp: addToQs_def split_def) - -lemmas threadSet_valid_queues' - = threadSet_valid_queues_Qf - [where Qf=id, simplified ksReadyQueues_update_id - id_apply addToQs_subset simp_thms] - lemma threadSet_cur: "\\s. cur_tcb' s\ threadSet f t \\rv s. cur_tcb' s\" apply (simp add: threadSet_def cur_tcb'_def) @@ -993,7 +1048,7 @@ lemma modifyReadyQueuesL1Bitmap_obj_at[wp]: crunches setThreadState, setBoundNotification for valid_arch' [wp]: valid_arch_state' - (simp: unless_def crunch_simps) + (simp: unless_def crunch_simps wp: crunch_wps) crunch ksInterrupt'[wp]: threadSet "\s. P (ksInterruptState s)" (wp: setObject_ksInterrupt updateObject_default_inv) @@ -1014,20 +1069,18 @@ lemma threadSet_obj_at'_really_strongest: "\\s. tcb_at' t s \ obj_at' (\obj. if t = t' then P (f obj) else P obj) t' s\ threadSet f t \\rv. obj_at' P t'\" apply (simp add: threadSet_def) - apply (rule hoare_wp_splits) - apply (rule setObject_tcb_strongest) - apply (simp only: imp_conv_disj) - apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) - apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) - apply simp - apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) - apply (rule getObject_inv_tcb) - apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply (wp setObject_tcb_strongest) + apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) + apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) apply simp - apply (simp add: objBits_simps') - apply (erule obj_at'_weakenE) - apply simp - apply (cases "t = t'", simp_all) + apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) + apply (rule getObject_inv_tcb) + apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply simp + apply (simp add: objBits_simps') + apply (erule obj_at'_weakenE) + apply simp + apply (cases "t = t'", simp_all) apply (rule OMG_getObject_tcb) apply wp done @@ -1248,56 +1301,103 @@ lemma threadSet_valid_dom_schedule': unfolding threadSet_def by (wp setObject_ksDomSchedule_inv hoare_Ball_helper) +lemma threadSet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (s\ksPSpace := (ksPSpace s)(t \ injectKO (f tcb))\)\ + threadSet f t + \\_. P\" + unfolding threadSet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (auto simp: obj_at'_def split: if_splits) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: fun_upd_def) + apply (prop_tac "\ptr. psMap (ksPSpace s) ptr = ksPSpace s ptr") + apply fastforce + apply metis + done + +lemma threadSet_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb\ + \ threadSet F tcbPtr \\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst2[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: opt_map_def obj_at'_def) + done + +lemma threadSet_valid_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb; + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tcbPtr \valid_sched_pointers\" + unfolding valid_sched_pointers_def + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + by (fastforce simp: opt_pred_def opt_map_def obj_at'_def split: option.splits if_splits) + +lemma threadSet_tcbSchedNexts_of: + "(\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb) \ + threadSet F t \\s. P (tcbSchedNexts_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def) + done + +lemma threadSet_tcbSchedPrevs_of: + "(\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb) \ + threadSet F t \\s. P (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def) + done + +lemma threadSet_tcbQueued: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + threadSet F t \\s. P (tcbQueued |< tcbs_of' s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_pred_def opt_map_def obj_at'_def) + done + +crunches threadSet + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + lemma threadSet_invs_trivialT: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" - shows - "\\s. invs' s \ - (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ - (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ - ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ - (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (wp x w v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a domains cteCaps_of_def |rule refl)+ - apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits + \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + shows "threadSet F t \invs'\" + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbQueued + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of valid_bitmaps_lift + | clarsimp simp: assms cteCaps_of_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: assms obj_at'_def) lemmas threadSet_invs_trivial = threadSet_invs_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] @@ -1337,6 +1437,70 @@ lemma threadSet_valid_objs': apply (clarsimp elim!: obj_at'_weakenE) done +lemmas typ_at'_valid_tcb'_lift = + typ_at'_valid_obj'_lift[where obj="KOTCB tcb" for tcb, unfolded valid_obj'_def, simplified] + +lemmas setObject_valid_tcb' = typ_at'_valid_tcb'_lift[OF setObject_typ_at'] + +lemma setObject_valid_tcbs': + assumes preserve_valid_tcb': "\s s' ko ko' x n tcb tcb'. + \ (ko', s') \ fst (updateObject val ko ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, ko), n); + projectKO_opt ko = Some tcb; projectKO_opt ko' = Some tcb'; + valid_tcb' tcb s \ \ valid_tcb' tcb' s" + shows "\valid_tcbs' and P\ setObject ptr val \\rv. valid_tcbs'\" + unfolding valid_tcbs'_def + apply (clarsimp simp: valid_def) + apply (rename_tac s s' ptr' tcb) + apply (prop_tac "\tcb'. valid_tcb' tcb s \ valid_tcb' tcb s'") + apply clarsimp + apply (erule (1) use_valid[OF _ setObject_valid_tcb']) + apply (drule spec, erule mp) + apply (clarsimp simp: setObject_def in_monad split_def lookupAround2_char1) + apply (rename_tac s ptr' new_tcb' ptr'' old_tcb_ko' s' f) + apply (case_tac "ptr'' = ptr'"; clarsimp) + apply (prop_tac "\old_tcb' :: tcb. projectKO_opt old_tcb_ko' = Some old_tcb'") + apply (frule updateObject_type) + apply (case_tac old_tcb_ko'; clarsimp simp: project_inject) + apply (erule exE) + apply (rule preserve_valid_tcb', assumption+) + apply (simp add: prod_eqI lookupAround2_char1) + apply force + apply (clarsimp simp: project_inject) + apply (clarsimp simp: project_inject) + done + +lemma setObject_tcb_valid_tcbs': + "\valid_tcbs' and (tcb_at' t and valid_tcb' v)\ setObject t (v :: tcb) \\rv. valid_tcbs'\" + apply (rule setObject_valid_tcbs') + apply (clarsimp simp: updateObject_default_def in_monad project_inject) + done + +lemma threadSet_valid_tcb': + "\valid_tcb' tcb and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcb' tcb\" + apply (simp add: threadSet_def) + apply (wpsimp wp: setObject_valid_tcb') + done + +lemma threadSet_valid_tcbs': + "\valid_tcbs' and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcbs'\" + apply (simp add: threadSet_def) + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (wpsimp wp: setObject_tcb_valid_tcbs') + apply (clarsimp simp: obj_at'_def valid_tcbs'_def) + done + +lemma asUser_valid_tcbs'[wp]: + "asUser t f \valid_tcbs'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_valid_tcbs' hoare_drop_imps + simp: valid_tcb'_def tcb_cte_cases_def objBits_simps') + done + lemma asUser_corres': assumes y: "corres_underlying Id False True r \ \ f g" shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ @@ -1476,14 +1640,6 @@ lemma asUser_valid_pspace'[wp]: apply (wp threadSet_valid_pspace' hoare_drop_imps | simp)+ done -lemma asUser_valid_queues[wp]: - "\Invariants_H.valid_queues\ asUser t m \\rv. Invariants_H.valid_queues\" - apply (simp add: asUser_def split_def) - apply (wp hoare_drop_imps | simp)+ - - apply (wp threadSet_valid_queues hoare_drop_imps | simp)+ - done - lemma asUser_ifunsafe'[wp]: "\if_unsafe_then_cap'\ asUser t m \\rv. if_unsafe_then_cap'\" apply (simp add: asUser_def split_def) @@ -1570,9 +1726,8 @@ lemma no_fail_asUser [wp]: "no_fail \ f \ no_fail (tcb_at' t) (asUser t f)" apply (simp add: asUser_def split_def) apply wp - apply (simp add: no_fail_def) - apply (wp hoare_drop_imps) - apply simp + apply (simp add: no_fail_def) + apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+ done lemma asUser_setRegister_corres: @@ -1761,19 +1916,22 @@ lemma ethreadget_corres: apply (simp add: x) done -lemma setQueue_corres: - "corres dc \ \ (set_tcb_queue d p q) (setQueue d p q)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp: setQueue_def in_monad set_tcb_queue_def return_def simpler_modify_def) - apply (fastforce simp: state_relation_def ready_queues_relation_def) - done - - -lemma getQueue_corres: "corres (=) \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" - apply (clarsimp simp add: getQueue_def state_relation_def ready_queues_relation_def get_tcb_queue_def gets_def) - apply (fold gets_def) - apply simp +lemma getQueue_corres: + "corres (\ls q. (ls = [] \ tcbQueueEmpty q) \ (ls \ [] \ tcbQueueHead q = Some (hd ls)) + \ queue_end_valid ls q) + \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" + apply (clarsimp simp: get_tcb_queue_def getQueue_def tcbQueueEmpty_def) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]) + apply (rule corres_symb_exec_r[OF _ gets_sp]) + apply clarsimp + apply (drule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x=qdom in spec) + apply (drule_tac x=prio in spec) + apply (fastforce dest: heap_path_head) + apply wpsimp+ done lemma no_fail_return: @@ -1788,8 +1946,8 @@ lemma addToBitmap_noop_corres: (wp | simp add: state_relation_def | rule no_fail_pre)+ lemma addToBitmap_if_null_noop_corres: (* used this way in Haskell code *) - "corres dc \ \ (return ()) (if null queue then addToBitmap d p else return ())" - by (cases "null queue", simp_all add: addToBitmap_noop_corres) + "corres dc \ \ (return ()) (if tcbQueueEmpty queue then addToBitmap d p else return ())" + by (cases "tcbQueueHead queue", simp_all add: addToBitmap_noop_corres) lemma removeFromBitmap_corres_noop: "corres dc \ \ (return ()) (removeFromBitmap tdom prioa)" @@ -1806,56 +1964,701 @@ crunch typ_at'[wp]: removeFromBitmap "\s. P (typ_at' T p s)" lemmas addToBitmap_typ_ats [wp] = typ_at_lifts [OF addToBitmap_typ_at'] lemmas removeFromBitmap_typ_ats [wp] = typ_at_lifts [OF removeFromBitmap_typ_at'] +lemma ekheap_relation_tcb_domain_priority: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s t = Some (tcb); + ksPSpace s' t = Some (KOTCB tcb')\ + \ tcbDomain tcb' = tcb_domain tcb \ tcbPriority tcb' = tcb_priority tcb" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=t in bspec, blast) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def) + done + +lemma no_fail_thread_get[wp]: + "no_fail (tcb_at tcb_ptr) (thread_get f tcb_ptr)" + unfolding thread_get_def + apply wpsimp + apply (clarsimp simp: tcb_at_def) + done + +lemma pspace_relation_tcb_relation: + "\pspace_relation (kheap s) (ksPSpace s'); kheap s ptr = Some (TCB tcb); + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ tcb_relation tcb tcb'" + apply (clarsimp simp: pspace_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: tcb_relation_cut_def obj_at_def obj_at'_def) + done + +lemma pspace_relation_update_concrete_tcb: + "\pspace_relation s s'; s ptr = Some (TCB tcb); s' ptr = Some (KOTCB otcb'); + tcb_relation tcb tcb'\ + \ pspace_relation s (s'(ptr \ KOTCB tcb'))" + by (fastforce dest: pspace_relation_update_tcbs simp: map_upd_triv) + +lemma threadSet_pspace_relation: + fixes s :: det_state + assumes tcb_rel: "(\tcb tcb'. tcb_relation tcb tcb' \ tcb_relation tcb (F tcb'))" + shows "threadSet F tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply normalise_obj_at' + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: obj_at_def is_tcb_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule pspace_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def) + apply (frule (1) pspace_relation_tcb_relation) + apply (fastforce simp: obj_at'_def) + apply (fastforce dest!: tcb_rel) + done + +lemma ekheap_relation_update_tcbs: + "\ ekheap_relation (ekheap s) (ksPSpace s'); ekheap s x = Some oetcb; + ksPSpace s' x = Some (KOTCB otcb'); etcb_relation etcb tcb' \ + \ ekheap_relation ((ekheap s)(x \ etcb)) ((ksPSpace s')(x \ KOTCB tcb'))" + by (simp add: ekheap_relation_def) + +lemma ekheap_relation_update_concrete_tcb: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB otcb'); + etcb_relation etcb tcb'\ + \ ekheap_relation (ekheap s) ((ksPSpace s')(ptr \ KOTCB tcb'))" + by (fastforce dest: ekheap_relation_update_tcbs simp: map_upd_triv) + +lemma ekheap_relation_etcb_relation: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ etcb_relation etcb tcb'" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: obj_at_def obj_at'_def) + done + +lemma threadSet_ekheap_relation: + fixes s :: det_state + assumes etcb_rel: "(\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation etcb (F tcb'))" + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet F tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_tcb_def is_etcb_at_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule ekheap_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def) + apply (frule (1) ekheap_relation_etcb_relation) + apply (fastforce simp: obj_at'_def) + apply (fastforce dest!: etcb_rel) + done + +lemma tcbQueued_update_pspace_relation[wp]: + fixes s :: det_state + shows "threadSet (tcbQueued_update f) tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueued_update_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet (tcbQueued_update f) tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_ekheap_relation simp: etcb_relation_def) + +lemma tcbQueueRemove_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueRemove queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueRemove_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueRemove queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_ekheap_relation threadSet_pspace_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma threadSet_ghost_relation[wp]: + "threadSet f tcbPtr \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma removeFromBitmap_ghost_relation[wp]: + "removeFromBitmap tdom prio \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + by (rule_tac f=gsUserPages in hoare_lift_Pf2; wpsimp simp: bitmap_fun_defs) + +lemma tcbQueued_update_ctes_of[wp]: + "threadSet (tcbQueued_update f) t \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_of) + +lemma removeFromBitmap_ctes_of[wp]: + "removeFromBitmap tdom prio \\s. P (ctes_of s)\" + by (wpsimp simp: bitmap_fun_defs) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for ghost_relation_projs[wp]: "\s. P (gsUserPages s) (gsCNodes s)" + and ksArchState[wp]: "\s. P (ksArchState s)" + and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" + and ksDomainTime[wp]: "\s. P (ksDomainTime s)" + (wp: crunch_wps getObject_tcb_wp simp: setObject_def updateObject_default_def obj_at'_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for tcb_at'[wp]: "\s. tcb_at' tcbPtr s" + (wp: crunch_wps ignore: threadSet) + +lemma set_tcb_queue_projs: + "set_tcb_queue d p queue + \\s. P (kheap s) (cdt s) (is_original_cap s) (cur_thread s) (idle_thread s) (scheduler_action s) + (domain_list s) (domain_index s) (cur_domain s) (domain_time s) (machine_state s) + (interrupt_irq_node s) (interrupt_states s) (arch_state s) (caps_of_state s) + (work_units_completed s) (cdt_list s) (ekheap s)\" + by (wpsimp simp: set_tcb_queue_def) + +lemma set_tcb_queue_cte_at: + "set_tcb_queue d p queue \\s. P (swp cte_at s)\" + unfolding set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: swp_def cte_wp_at_def) + done + +lemma set_tcb_queue_projs_inv: + "fst (set_tcb_queue d p queue s) = {(r, s')} \ + kheap s = kheap s' + \ ekheap s = ekheap s' + \ cdt s = cdt s' + \ is_original_cap s = is_original_cap s' + \ cur_thread s = cur_thread s' + \ idle_thread s = idle_thread s' + \ scheduler_action s = scheduler_action s' + \ domain_list s = domain_list s' + \ domain_index s = domain_index s' + \ cur_domain s = cur_domain s' + \ domain_time s = domain_time s' + \ machine_state s = machine_state s' + \ interrupt_irq_node s = interrupt_irq_node s' + \ interrupt_states s = interrupt_states s' + \ arch_state s = arch_state s' + \ caps_of_state s = caps_of_state s' + \ work_units_completed s = work_units_completed s' + \ cdt_list s = cdt_list s' + \ swp cte_at s = swp cte_at s'" + apply (drule singleton_eqD) + by (auto elim!: use_valid_inv[where E=\, simplified] + intro: set_tcb_queue_projs set_tcb_queue_cte_at) + +lemma set_tcb_queue_new_state: + "(rv, t) \ fst (set_tcb_queue d p queue s) \ + t = s\ready_queues := \dom prio. if dom = d \ prio = p then queue else ready_queues s dom prio\" + by (clarsimp simp: set_tcb_queue_def in_monad) + +lemma tcbQueuePrepend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueuePrepend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueuePrepend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueuePrepend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueAppend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueAppend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueueAppend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueAppend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueInsert_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueInsert tcbPtr afterPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueInsert_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueInsert tcbPtr afterPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma removeFromBitmap_pspace_relation[wp]: + fixes s :: det_state + shows "removeFromBitmap tdom prio \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding bitmap_fun_defs + by wpsimp + +crunches setQueue, removeFromBitmap + for valid_pspace'[wp]: valid_pspace' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node'[wp]: "\s. P (irq_node' s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + and valid_irq_states'[wp]: valid_irq_states' + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and valid_machine_state'[wp]: valid_machine_state' + and cur_tcb'[wp]: cur_tcb' + and ksPSpace[wp]: "\s. P (ksPSpace s)" + (wp: crunch_wps + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def cur_tcb'_def threadSet_cur + bitmap_fun_defs valid_machine_state'_def) + +crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue, setQueue + for pspace_aligned'[wp]: pspace_aligned' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and pspace_distinct'[wp]: pspace_distinct' + and pspace_canonical'[wp]: pspace_canonical' + and no_0_obj'[wp]: no_0_obj' + and ksSchedulerAction[wp]: "\s. P (ksSchedulerAction s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node[wp]: "\s. P (irq_node' s)" + and typ_at[wp]: "\s. P (typ_at' T p s)" + and interrupt_state[wp]: "\s. P (ksInterruptState s)" + and valid_irq_state'[wp]: valid_irq_states' + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + and pspace_in_kernel_mappings'[wp]: pspace_in_kernel_mappings' + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + (wp: crunch_wps threadSet_state_refs_of'[where f'=id and g'=id] + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def bitmap_fun_defs) + +lemma threadSet_ready_queues_relation: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + \\s'. ready_queues_relation s s' \ \ (tcbQueued |< tcbs_of' s') tcbPtr\ + threadSet F tcbPtr + \\_ s'. ready_queues_relation s s'\" + supply fun_upd_apply[simp del] + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: list_queue_relation_def obj_at'_def) + apply (rename_tac tcb' d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce intro: heap_path_heap_upd_not_in + simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (clarsimp simp: prev_queue_head_def) + apply (prop_tac "ready_queues s d p \ []", fastforce) + apply (fastforce dest: heap_path_head simp: inQ_def opt_pred_def opt_map_def fun_upd_apply) + apply (auto simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + done + +definition in_correct_ready_q_2 where + "in_correct_ready_q_2 queues ekh \ + \d p. \t \ set (queues d p). is_etcb_at' t ekh + \ etcb_at' (\t. tcb_priority t = p \ tcb_domain t = d) t ekh" + +abbreviation in_correct_ready_q :: "det_ext state \ bool" where + "in_correct_ready_q s \ in_correct_ready_q_2 (ready_queues s) (ekheap s)" + +lemmas in_correct_ready_q_def = in_correct_ready_q_2_def + +lemma in_correct_ready_q_lift: + assumes c: "\P. \\s. P (ekheap s)\ f \\rv s. P (ekheap s)\" + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \in_correct_ready_q\" + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +definition ready_qs_distinct :: "det_ext state \ bool" where + "ready_qs_distinct s \ \d p. distinct (ready_queues s d p)" + +lemma ready_qs_distinct_lift: + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \ready_qs_distinct\" + unfolding ready_qs_distinct_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +lemma ready_queues_disjoint: + "\in_correct_ready_q s; ready_qs_distinct s; d \ d' \ p \ p'\ + \ set (ready_queues s d p) \ set (ready_queues s d' p') = {}" + apply (clarsimp simp: ready_qs_distinct_def in_correct_ready_q_def) + apply (rule disjointI) + apply (frule_tac x=d in spec) + apply (drule_tac x=d' in spec) + apply (fastforce simp: etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma isRunnable_sp: + "\P\ + isRunnable tcb_ptr + \\rv s. \tcb'. ko_at' tcb' tcb_ptr s + \ (rv = (tcbState tcb' = Running \ tcbState tcb' = Restart)) + \ P s\" + unfolding isRunnable_def getThreadState_def + apply (wpsimp wp: hoare_case_option_wp getObject_tcb_wp simp: threadGet_def) + apply (fastforce simp: obj_at'_def split: Structures_H.thread_state.splits) + done + +crunch (no_fail) no_fail[wp]: isRunnable + +defs ksReadyQueues_asrt_def: + "ksReadyQueues_asrt + \ \s'. \d p. \ts. ready_queue_relation d p ts (ksReadyQueues s' (d, p)) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (inQ d p |< tcbs_of' s')" + +lemma ksReadyQueues_asrt_cross: + "ready_queues_relation s s' \ ksReadyQueues_asrt s'" + by (fastforce simp: ready_queues_relation_def Let_def ksReadyQueues_asrt_def) + +crunches addToBitmap + for ko_at'[wp]: "\s. P (ko_at' ko ptr s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueues_asrt[wp]: ksReadyQueues_asrt + and st_tcb_at'[wp]: "\s. P (st_tcb_at' Q tcbPtr s)" + and valid_tcbs'[wp]: valid_tcbs' + (simp: bitmap_fun_defs ksReadyQueues_asrt_def) + +lemma tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueHead queue))" + by (fastforce dest: heap_path_head + simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueHead queue)) s'" + by (fastforce dest!: tcbQueueHead_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma tcbQueueHead_iff_tcbQueueEnd: + "list_queue_relation ts q nexts prevs \ tcbQueueHead q \ None \ tcbQueueEnd q \ None" + apply (clarsimp simp: list_queue_relation_def queue_end_valid_def) + using heap_path_None + apply fastforce + done + +lemma tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueEnd queue))" + apply (frule tcbQueueHead_iff_tcbQueueEnd) + by (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueEnd queue)) s'" + by (fastforce dest!: tcbQueueEnd_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma thread_get_exs_valid[wp]: + "tcb_at tcb_ptr s \ \(=) s\ thread_get f tcb_ptr \\\_. (=) s\" + by (clarsimp simp: thread_get_def get_tcb_def gets_the_def gets_def return_def get_def + exs_valid_def tcb_at_def bind_def) + +lemma ethread_get_sp: + "\P\ ethread_get f ptr + \\rv. etcb_at (\tcb. f tcb = rv) ptr and P\" + apply wpsimp + apply (clarsimp simp: etcb_at_def split: option.splits) + done + +lemma ethread_get_exs_valid[wp]: + "\tcb_at tcb_ptr s; valid_etcbs s\ \ \(=) s\ ethread_get f tcb_ptr \\\_. (=) s\" + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: ethread_get_def get_etcb_def gets_the_def gets_def return_def get_def + is_etcb_at_def exs_valid_def bind_def) + done + +lemma no_fail_ethread_get[wp]: + "no_fail (tcb_at tcb_ptr and valid_etcbs) (ethread_get f tcb_ptr)" + unfolding ethread_get_def + apply wpsimp + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: is_etcb_at_def get_etcb_def) + done + +lemma threadGet_sp: + "\P\ threadGet f ptr \\rv s. \tcb :: tcb. ko_at' tcb ptr s \ f tcb = rv \ P s\" + unfolding threadGet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma in_set_ready_queues_inQ_eq: + "ready_queues_relation s s' \ t \ set (ready_queues s d p) \ (inQ d p |< tcbs_of' s') t" + by (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + +lemma in_ready_q_tcbQueued_eq: + "ready_queues_relation s s' + \ (\d p. t \ set (ready_queues s d p)) \ (tcbQueued |< tcbs_of' s') t" + apply (intro iffI) + apply clarsimp + apply (frule in_set_ready_queues_inQ_eq) + apply (fastforce simp: inQ_def opt_map_def opt_pred_def split: option.splits) + apply (fastforce simp: ready_queues_relation_def ready_queue_relation_def Let_def inQ_def + opt_pred_def + split: option.splits) + done + lemma tcbSchedEnqueue_corres: - "corres dc (tcb_at t and is_etcb_at t and pspace_aligned and pspace_distinct) - (Invariants_H.valid_queues and valid_queues') - (tcb_sched_action (tcb_sched_enqueue) t) (tcbSchedEnqueue t)" -proof - - have ready_queues_helper: - "\t tcb a b. \ ekheap a t = Some tcb; obj_at' tcbQueued t b ; valid_queues' b ; - ekheap_relation (ekheap a) (ksPSpace b) \ - \ t \ set (ksReadyQueues b (tcb_domain tcb, tcb_priority tcb))" - unfolding valid_queues'_def - by (fastforce dest: ekheap_relation_absD simp: obj_at'_def inQ_def etcb_relation_def) - - show ?thesis unfolding tcbSchedEnqueue_def tcb_sched_action_def - apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) - apply (fastforce simp: tcb_at_cross state_relation_def) - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, - where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at'; simp_all) - apply (rule no_fail_pre, wp, blast) - apply (case_tac queued; simp_all) - apply (rule corres_no_failI; simp add: no_fail_return) - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def ready_queues_relation_def - state_relation_def tcb_sched_enqueue_def) - apply (rule ready_queues_helper; auto) - apply (clarsimp simp: when_def) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)", OF ethreadget_corres]) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)", OF ethreadget_corres]) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply simp - apply (simp add: tcb_sched_enqueue_def split del: if_split) - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply (rule setQueue_corres[unfolded dc_def]) - apply (rule corres_split_noop_rhs2) - apply (fastforce intro: addToBitmap_noop_corres) - apply (fastforce intro: threadSet_corres_noop simp: tcb_relation_def exst_same_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - project_inject) - done -qed + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_enqueue tcb_ptr) (tcbSchedEnqueue tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_enqueue_def get_tcb_queue_def + tcbSchedEnqueue_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce + apply clarsimp + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueuePrepend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueHead_ksReadyQueues simp: obj_at'_def) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp simp: setQueue_def tcbQueuePrepend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" and s'=s' + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply auto[1] + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" and st="tcbQueueHead (ksReadyQueues s' (d, p))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (cut_tac xs="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + and st="tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "\ (d = tcb_domain etcb \ p = tcb_priority etcb)") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; simp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + force simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply obj_at'_def split: if_splits) + apply (case_tac "t = the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def obj_at'_def fun_upd_apply + split: option.splits) + apply metis + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain etcb \ p = tcb_priority etcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def prev_queue_head_def + opt_map_red obj_at'_def + split: if_splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_prepend[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def obj_at'_def fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def split: if_splits) + by (auto dest!: hd_in_set simp: inQ_def in_opt_pred opt_map_def fun_upd_apply + split: if_splits option.splits) definition weak_sch_act_wf :: "scheduler_action \ kernel_state \ bool" @@ -1882,8 +2685,10 @@ lemma getSchedulerAction_corres: done lemma rescheduleRequired_corres: - "corres dc (weak_valid_sched_action and valid_etcbs and pspace_aligned and pspace_distinct) - (Invariants_H.valid_queues and valid_queues') + "corres dc + (weak_valid_sched_action and in_correct_ready_q and ready_qs_distinct and valid_etcbs + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') (reschedule_required) rescheduleRequired" apply (simp add: rescheduleRequired_def reschedule_required_def) apply (rule corres_guard_imp) @@ -1894,7 +2699,7 @@ lemma rescheduleRequired_corres: apply (case_tac action) apply simp apply simp - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply simp apply (rule setSchedulerAction_corres) apply simp @@ -1969,20 +2774,18 @@ lemmas addToBitmap_weak_sch_act_wf[wp] = weak_sch_act_wf_lift[OF addToBitmap_nosch] crunch st_tcb_at'[wp]: removeFromBitmap "st_tcb_at' P t" -crunch pred_tcb_at'[wp]: removeFromBitmap "pred_tcb_at' proj P t" +crunch pred_tcb_at'[wp]: removeFromBitmap "\s. Q (pred_tcb_at' proj P t s)" crunch not_st_tcb_at'[wp]: removeFromBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: removeFromBitmap "\s. \ (pred_tcb_at' proj P' t) s" crunch st_tcb_at'[wp]: addToBitmap "st_tcb_at' P' t" -crunch pred_tcb_at'[wp]: addToBitmap "pred_tcb_at' proj P' t" +crunch pred_tcb_at'[wp]: addToBitmap "\s. Q (pred_tcb_at' proj P t s)" crunch not_st_tcb_at'[wp]: addToBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: addToBitmap "\s. \ (pred_tcb_at' proj P' t) s" -crunch obj_at'[wp]: removeFromBitmap "obj_at' P t" +crunch obj_at'[wp]: removeFromBitmap "\s. Q (obj_at' P t s)" -crunch obj_at'[wp]: addToBitmap "obj_at' P t" +crunch obj_at'[wp]: addToBitmap "\s. Q (obj_at' P t s)" lemma removeFromBitmap_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t\ removeFromBitmap tdom prio \\ya. tcb_in_cur_domain' t\" @@ -1999,9 +2802,11 @@ lemma addToBitmap_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_weak_sch_act_wf[wp]: - "\ \s. weak_sch_act_wf (ksSchedulerAction s) s \ tcbSchedDequeue a \ \_ s. weak_sch_act_wf (ksSchedulerAction s) s \" - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_weak_sch_act_wf removeFromBitmap_weak_sch_act_wf | simp add: crunch_simps)+ + "tcbSchedDequeue tcbPtr \\s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wp threadSet_weak_sch_act_wf getObject_tcb_wp removeFromBitmap_weak_sch_act_wf + | simp add: crunch_simps threadGet_def)+ + apply (clarsimp simp: obj_at'_def) done lemma dequeue_nothing_eq[simp]: @@ -2017,47 +2822,342 @@ lemma gets_the_exec: "f s \ None \ (do x \ ge return_def assert_opt_def) done +lemma tcbQueueRemove_no_fail: + "no_fail (\s. tcb_at' tcbPtr s + \ (\ts. list_queue_relation ts queue (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ sym_heap_sched_pointers s \ valid_objs' s) + (tcbQueueRemove queue tcbPtr)" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (frule (1) ko_at_valid_objs') + apply fastforce + apply (clarsimp simp: list_queue_relation_def) + apply (prop_tac "tcbQueueHead queue \ Some tcbPtr \ tcbSchedPrevs_of s tcbPtr \ None") + apply (rule impI) + apply (frule not_head_prev_not_None[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (fastforce dest: heap_path_head) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def valid_tcb'_def valid_bound_tcb'_def) + by (fastforce dest!: not_last_next_not_None[where p=tcbPtr] + simp: queue_end_valid_def opt_map_def obj_at'_def valid_obj'_def valid_tcb'_def) + +crunch (no_fail) no_fail[wp]: removeFromBitmap + +crunches removeFromBitmap + for ready_queues_relation[wp]: "ready_queues_relation s" + and list_queue_relation[wp]: + "\s'. list_queue_relation ts (P (ksReadyQueues s')) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s')" + (simp: bitmap_fun_defs ready_queues_relation_def) + +\ \ + A direct analogue of tcbQueueRemove, used in tcb_sched_dequeue' below, so that within the proof of + tcbQueueRemove_corres, we may reason in terms of the list operations used within this function + rather than @{term filter}.\ +definition tcb_queue_remove :: "'a \ 'a list \ 'a list" where + "tcb_queue_remove a ls \ + if ls = [a] + then [] + else if a = hd ls + then tl ls + else if a = last ls + then butlast ls + else list_remove ls a" + +definition tcb_sched_dequeue' :: "obj_ref \ unit det_ext_monad" where + "tcb_sched_dequeue' tcb_ptr \ do + d \ ethread_get tcb_domain tcb_ptr; + prio \ ethread_get tcb_priority tcb_ptr; + queue \ get_tcb_queue d prio; + when (tcb_ptr \ set queue) $ set_tcb_queue d prio (tcb_queue_remove tcb_ptr queue) + od" + +lemma filter_tcb_queue_remove: + "\a \ set ls; distinct ls \ \ filter ((\) a) ls = tcb_queue_remove a ls" + apply (clarsimp simp: tcb_queue_remove_def) + apply (intro conjI impI) + apply (fastforce elim: filter_hd_equals_tl) + apply (fastforce elim: filter_last_equals_butlast) + apply (fastforce elim: filter_hd_equals_tl) + apply (frule split_list) + apply (clarsimp simp: list_remove_middle_distinct) + apply (subst filter_True | clarsimp simp: list_remove_none)+ + done + +lemma tcb_sched_dequeue_monadic_rewrite: + "monadic_rewrite False True (is_etcb_at t and (\s. \d p. distinct (ready_queues s d p))) + (tcb_sched_action tcb_sched_dequeue t) (tcb_sched_dequeue' t)" + supply if_split[split del] + apply (clarsimp simp: tcb_sched_dequeue'_def tcb_sched_dequeue_def tcb_sched_action_def + set_tcb_queue_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (clarsimp simp: when_def) + apply (rule monadic_rewrite_if_r) + apply (rule_tac P="\_. distinct queue" in monadic_rewrite_guard_arg_cong) + apply (frule (1) filter_tcb_queue_remove) + apply (metis (mono_tags, lifting) filter_cong) + apply (rule monadic_rewrite_modify_noop) + apply (wpsimp wp: thread_get_wp)+ + apply (clarsimp simp: etcb_at_def split: option.splits) + apply (prop_tac "(\d' p. if d' = tcb_domain x2 \ p = tcb_priority x2 + then filter (\x. x \ t) (ready_queues s (tcb_domain x2) (tcb_priority x2)) + else ready_queues s d' p) + = ready_queues s") + apply (subst filter_True) + apply fastforce + apply (clarsimp intro!: ext split: if_splits) + apply fastforce + done + +crunches removeFromBitmap + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + +lemma list_queue_relation_neighbour_in_set: + "\list_queue_relation ls q hp hp'; sym_heap hp hp'; p \ set ls\ + \ \nbr. (hp p = Some nbr \ nbr \ set ls) \ (hp' p = Some nbr \ nbr \ set ls)" + apply (rule heap_ls_neighbour_in_set) + apply (fastforce simp: list_queue_relation_def) + apply fastforce + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def) + apply fastforce + done + +lemma in_queue_not_head_or_not_tail_length_gt_1: + "\tcbPtr \ set ls; tcbQueueHead q \ Some tcbPtr \ tcbQueueEnd q \ Some tcbPtr; + list_queue_relation ls q nexts prevs\ + \ Suc 0 < length ls" + apply (clarsimp simp: list_queue_relation_def) + apply (cases ls; fastforce simp: queue_end_valid_def) + done + lemma tcbSchedDequeue_corres: - "corres dc (is_etcb_at t and tcb_at t and pspace_aligned and pspace_distinct) - (Invariants_H.valid_queues) - (tcb_sched_action tcb_sched_dequeue t) (tcbSchedDequeue t)" - apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) - apply (fastforce simp: tcb_at_cross state_relation_def) - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - defer - apply (simp add: when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def set_tcb_queue_def is_etcb_at_def state_relation_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - subgoal by (force simp: tcb_sched_dequeue_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def - ready_queues_relation_def obj_at'_def inQ_def project_inject) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (simp add: exec_gets simpler_modify_def get_etcb_def ready_queues_relation_def cong: if_cong get_tcb_queue_def) - apply (simp add: when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (simp, rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (rule threadSet_corres_noop; simp_all add: tcb_relation_def exst_same_def) - apply (wp | simp)+ + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and tcb_at tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_objs') + (tcb_sched_action tcb_sched_dequeue tcb_ptr) (tcbSchedDequeue tcbPtr)" + supply heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + list_remove_append[simp del] + apply (rule_tac Q'="tcb_at' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: tcb_at_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (rule monadic_rewrite_guard_imp[OF tcb_sched_dequeue_monadic_rewrite]) + apply (fastforce dest: tcb_at_is_etcb_at simp: in_correct_ready_q_def ready_qs_distinct_def) + apply (clarsimp simp: tcb_sched_dequeue'_def get_tcb_queue_def tcbSchedDequeue_def getQueue_def + unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac dom) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac prio) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_if_strong'; fastforce?) + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + apply (fastforce simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; wpsimp?) + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp wp: tcbQueueRemove_no_fail) + apply (fastforce dest: state_relation_ready_queues_relation + simp: ex_abs_underlying_def ready_queues_relation_def ready_queue_relation_def + Let_def inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp + simp: setQueue_def tcbQueueRemove_def + split_del: if_split) + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply normalise_obj_at' + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply clarsimp + apply (cut_tac p=tcbPtr and ls="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_neighbour_in_set) + apply (fastforce dest!: spec) + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: ready_queues_relation_def Let_def list_queue_relation_def) + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply fast + apply (clarsimp simp: tcbQueueEmpty_def) + apply (prop_tac "Some tcbPtr \ tcbQueueHead (ksReadyQueues s' (d, p))") + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply blast + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI; clarsimp) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (force intro!: heap_path_heap_upd_not_in simp: fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_heap_upd fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (force simp: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply assumption + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply split: if_splits) + apply (force simp: not_emptyI opt_map_red) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_def fun_upd_apply opt_map_red opt_map_upd_triv) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply (force simp: not_emptyI opt_map_red) + apply fastforce + apply (clarsimp simp: opt_map_red opt_map_upd_triv) + apply (intro prev_queue_head_heap_upd) + apply (force dest!: spec) + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply fastforce + subgoal + by (clarsimp simp: inQ_def opt_map_def opt_pred_def fun_upd_apply + split: if_splits option.splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (frule heap_path_head') + apply (frule heap_ls_distinct) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply prev_queue_head_def) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: tcb_queue_remove_def inQ_def opt_pred_def fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fastforce + apply (fastforce simp: list_queue_relation_def) + apply (frule list_not_head) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_tail_nonempty) + apply (frule (2) heap_ls_next_of_hd) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI allI) + apply (drule (1) heap_ls_remove_head_not_singleton) + apply (clarsimp simp: opt_map_red opt_map_upd_triv fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply last_tl) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fast + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: queue_end_valid_def) + apply (frule list_not_last) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_gt_1_imp_butlast_nonempty) + apply (frule (3) heap_ls_prev_of_last) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI; clarsimp?) + apply (drule (1) heap_ls_remove_last_not_singleton) + apply (force elim!: rsubst3[where P=heap_ls] simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (meson distinct_in_butlast_not_last in_set_butlastD last_in_set not_last_in_set_butlast) + + \ \tcbPtr is in the middle of the ready queue\ + apply (clarsimp simp: obj_at'_def) + apply (frule set_list_mem_nonempty) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []", fastforce simp: queue_end_valid_def) + apply clarsimp + apply (frule (2) ptr_in_middle_prev_next) + apply fastforce + apply (clarsimp simp: tcb_queue_remove_def) + apply (prop_tac "tcbPtr \ last xs") + apply (clarsimp simp: distinct_append) + apply (prop_tac "tcbPtr \ hd ys") + apply (fastforce dest: hd_in_set simp: distinct_append) + apply (prop_tac "last xs \ hd ys") + apply (metis distinct_decompose2 hd_Cons_tl last_in_set) + apply (prop_tac "list_remove (xs @ tcbPtr # ys) tcbPtr = xs @ ys") + apply (simp add: list_remove_middle_distinct del: list_remove_append) + apply (intro conjI impI allI; (solves \clarsimp simp: distinct_append\)?) + apply (fastforce elim!: rsubst3[where P=heap_ls] + dest!: heap_ls_remove_middle hd_in_set last_in_set + simp: distinct_append not_emptyI opt_map_def fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (case_tac xs; + fastforce simp: prev_queue_head_def opt_map_def fun_upd_apply distinct_append) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply distinct_append + split: option.splits) done lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur_ts) od = @@ -2117,30 +3217,84 @@ lemma setBoundNotification_corres: crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification for tcb'[wp]: "tcb_at' addr" +lemma tcbSchedNext_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedNext_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedPrev_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueuePrepend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift' simp: tcbQueueEmpty_def) + +crunches addToBitmap + for valid_objs'[wp]: valid_objs' + (simp: unless_def crunch_simps wp: crunch_wps) + +lemma tcbSchedEnqueue_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_objs'\" + unfolding tcbSchedEnqueue_def setQueue_def + apply (wpsimp wp: threadSet_valid_objs' getObject_tcb_wp simp: threadGet_def) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + crunches rescheduleRequired, removeFromBitmap for valid_objs'[wp]: valid_objs' - (simp: unless_def crunch_simps) + (simp: crunch_simps) +lemmas ko_at_valid_objs'_pre = + ko_at_valid_objs'[simplified project_inject, atomized, simplified, rule_format] -lemma tcbSchedDequeue_valid_objs' [wp]: "\ valid_objs' \ tcbSchedDequeue t \\_. valid_objs' \" - unfolding tcbSchedDequeue_def - apply (wp threadSet_valid_objs') - apply (clarsimp simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) - apply wp - apply (simp add: if_apply_def2) - apply (wp hoare_drop_imps) - apply (wp | simp cong: if_cong add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def if_apply_def2)+ +lemmas ep_ko_at_valid_objs_valid_ep' = + ko_at_valid_objs'_pre[where 'a=endpoint, simplified injectKO_defs valid_obj'_def, simplified] + +lemmas ntfn_ko_at_valid_objs_valid_ntfn' = + ko_at_valid_objs'_pre[where 'a=notification, simplified injectKO_defs valid_obj'_def, + simplified] + +lemmas tcb_ko_at_valid_objs_valid_tcb' = + ko_at_valid_objs'_pre[where 'a=tcb, simplified injectKO_defs valid_obj'_def, simplified] + +lemma tcbQueueRemove_valid_objs'[wp]: + "tcbQueueRemove queue tcbPtr \valid_objs'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (fastforce dest!: tcb_ko_at_valid_objs_valid_tcb' + simp: valid_tcb'_def valid_bound_tcb'_def obj_at'_def) done +lemma tcbSchedDequeue_valid_objs'[wp]: + "tcbSchedDequeue t \valid_objs'\" + unfolding tcbSchedDequeue_def setQueue_def + by (wpsimp wp: threadSet_valid_objs') + lemma sts_valid_objs': - "\valid_objs' and valid_tcb_state' st\ - setThreadState st t - \\rv. valid_objs'\" - apply (simp add: setThreadState_def setQueue_def isRunnable_def isStopped_def) - apply (wp threadSet_valid_objs') - apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) - apply (wp threadSet_valid_objs' | simp)+ - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + "\valid_objs' and valid_tcb_state' st and pspace_aligned' and pspace_distinct'\ + setThreadState st t + \\_. valid_objs'\" + apply (wpsimp simp: setThreadState_def wp: threadSet_valid_objs') + apply (rule_tac Q="\_. valid_objs' and pspace_aligned' and pspace_distinct'" in hoare_post_imp) + apply fastforce + apply (wpsimp wp: threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) done lemma sbn_valid_objs': @@ -2228,18 +3382,6 @@ lemma setQueue_valid_bitmapQ_except[wp]: unfolding setQueue_def bitmapQ_defs by (wp, clarsimp simp: bitmapQ_def) -lemma setQueue_valid_bitmapQ: (* enqueue only *) - "\ valid_bitmapQ and (\s. (ksReadyQueues s (d, p) = []) = (ts = [])) \ - setQueue d p ts - \\_. valid_bitmapQ \" - unfolding setQueue_def bitmapQ_defs - by (wp, clarsimp simp: bitmapQ_def) - -lemma setQueue_valid_queues': - "\valid_queues' and (\s. \t. obj_at' (inQ d p) t s \ t \ set ts)\ - setQueue d p ts \\_. valid_queues'\" - by (wp | simp add: valid_queues'_def setQueue_def)+ - lemma setQueue_cur: "\\s. cur_tcb' s\ setQueue d p ts \\rv s. cur_tcb' s\" unfolding setQueue_def cur_tcb'_def @@ -2361,14 +3503,14 @@ lemma threadSet_queued_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ threadSet (tcbQueued_update f) t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: sch_act_wf_cases split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -2377,9 +3519,17 @@ lemma threadSet_queued_sch_act_wf[wp]: apply (wp tcb_in_cur_domain'_lift | simp add: obj_at'_def)+ done +lemma tcbSchedNext_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedNext_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + +lemma tcbSchedPrev_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedPrev_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + lemma tcbSchedEnqueue_pred_tcb_at'[wp]: "\\s. pred_tcb_at' proj P' t' s \ tcbSchedEnqueue t \\_ s. pred_tcb_at' proj P' t' s\" - apply (simp add: tcbSchedEnqueue_def when_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def when_def unless_def) apply (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ done @@ -2387,8 +3537,9 @@ lemma tcbSchedDequeue_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedDequeue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - unfolding tcbSchedDequeue_def - by (wp setQueue_sch_act | wp sch_act_wf_lift | simp add: if_apply_def2)+ + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wp setQueue_sch_act threadSet_tcbDomain_triv hoare_drop_imps + | wp sch_act_wf_lift | simp add: if_apply_def2)+ crunch nosch: tcbSchedDequeue "\s. P (ksSchedulerAction s)" @@ -2484,21 +3635,22 @@ lemma tcbSchedEnqueue_sch_act[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - by (simp add: tcbSchedEnqueue_def unless_def) - (wp setQueue_sch_act | wp sch_act_wf_lift | clarsimp)+ + by (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) + (wp setQueue_sch_act threadSet_tcbDomain_triv | wp sch_act_wf_lift | clarsimp)+ lemma tcbSchedEnqueue_weak_sch_act[wp]: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp setQueue_sch_act threadSet_weak_sch_act_wf | clarsimp)+ done -lemma threadGet_wp: "\\s. tcb_at' t s \ (\tcb. ko_at' tcb t s \ P (f tcb) s)\ threadGet f t \P\" +lemma threadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (f tcb) s\ threadGet f t \P\" apply (simp add: threadGet_def) apply (wp getObject_tcb_wp) - apply clarsimp + apply (clarsimp simp: obj_at'_def) done lemma threadGet_const: @@ -2544,14 +3696,6 @@ lemma addToBitmap_bitmapQ: by (wpsimp simp: bitmap_fun_defs bitmapQ_def prioToL1Index_bit_set prioL2Index_bit_set simp_del: bit_exp_iff) -lemma addToBitmap_valid_queues_no_bitmap_except: -" \ valid_queues_no_bitmap_except t \ - addToBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def valid_queues_no_bitmap_except_def - by (wp, clarsimp) - crunch norq[wp]: addToBitmap "\s. P (ksReadyQueues s)" (wp: updateObject_cte_inv hoare_drop_imps) crunch norq[wp]: removeFromBitmap "\s. P (ksReadyQueues s)" @@ -2583,9 +3727,8 @@ lemma prioToL1Index_complement_nth_w2p: lemma valid_bitmapQ_exceptE: "\ valid_bitmapQ_except d' p' s ; d \ d' \ p \ p' \ - \ bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_except_def - by blast + \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (fastforce simp: valid_bitmapQ_except_def) lemma invertL1Index_eq_cancelD: "\ invertL1Index i = invertL1Index j ; i < l2BitmapSize ; j < l2BitmapSize \ @@ -2612,7 +3755,6 @@ lemma removeFromBitmap_bitmapQ_no_L2_orphans[wp]: unfolding bitmap_fun_defs apply (wp, clarsimp simp: bitmap_fun_defs bitmapQ_no_L2_orphans_def)+ apply (rule conjI, clarsimp) - apply (rule conjI, clarsimp) apply (clarsimp simp: complement_nth_w2p l2BitmapSize_def') apply clarsimp apply metis @@ -2701,22 +3843,15 @@ lemma addToBitmap_valid_bitmapQ_except: done lemma addToBitmap_valid_bitmapQ: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ_except d p and - bitmapQ_no_L2_orphans and (\s. bitmapQ d p s \ ksReadyQueues s (d,p) \ []) \" - by (wp addToBitmap_valid_queues_no_bitmap_except addToBitmap_valid_bitmapQ_except - addToBitmap_bitmapQ_no_L2_orphans addToBitmap_bitmapQ; simp) - - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans + and (\s. \ tcbQueueEmpty (ksReadyQueues s (d,p)))\ + addToBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done lemma threadGet_const_tcb_at: "\\s. tcb_at' t s \ obj_at' (P s \ f) t s\ threadGet f t \\rv s. P s rv \" @@ -2734,12 +3869,6 @@ lemma threadGet_const_tcb_at_imp_lift: apply (clarsimp simp: obj_at'_def) done -lemma valid_queues_no_bitmap_objD: - "\ valid_queues_no_bitmap s; t \ set (ksReadyQueues s (d, p))\ - \ obj_at' (inQ d p and runnable' \ tcbState) t s" - unfolding valid_queues_no_bitmap_def - by metis - lemma setQueue_bitmapQ_no_L1_orphans[wp]: "\ bitmapQ_no_L1_orphans \ setQueue d p ts @@ -2759,136 +3888,16 @@ lemma setQueue_sets_queue[wp]: unfolding setQueue_def by (wp, simp) -lemma tcbSchedEnqueueOrAppend_valid_queues: - (* f is either (t#ts) or (ts @ [t]), so we define its properties generally *) - assumes f_set[simp]: "\ts. t \ set (f ts)" - assumes f_set_insert[simp]: "\ts. set (f ts) = insert t (set ts)" - assumes f_not_empty[simp]: "\ts. f ts \ []" - assumes f_distinct: "\ts. \ distinct ts ; t \ set ts \ \ distinct (f ts)" - shows "\Invariants_H.valid_queues and st_tcb_at' runnable' t and valid_objs' \ - do queued \ threadGet tcbQueued t; - unless queued $ - do tdom \ threadGet tcbDomain t; - prio \ threadGet tcbPriority t; - queue \ getQueue tdom prio; - setQueue tdom prio $ f queue; - when (null queue) $ addToBitmap tdom prio; - threadSet (tcbQueued_update (\_. True)) t - od - od - \\_. Invariants_H.valid_queues\" -proof - - - define could_run where "could_run == - \d p t. obj_at' (\tcb. inQ d p (tcbQueued_update (\_. True) tcb) \ runnable' (tcbState tcb)) t" - - have addToBitmap_could_run: - "\d p. \\s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\ - addToBitmap d p - \\_ s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\" - unfolding bitmap_fun_defs - by (wp, clarsimp simp: could_run_def) - - have setQueue_valid_queues_no_bitmap_except: - "\d p ts. - \ valid_queues_no_bitmap_except t and - (\s. ksReadyQueues s (d, p) = ts \ p \ maxPriority \ d \ maxDomain \ t \ set ts) \ - setQueue d p (f ts) - \\rv. valid_queues_no_bitmap_except t\" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by (wp, auto intro: f_distinct) - - have threadSet_valid_queues_could_run: - "\f. \ valid_queues_no_bitmap_except t and - (\s. \d p. t \ set (ksReadyQueues s (d,p)) \ could_run d p t s) and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans \ - threadSet (tcbQueued_update (\_. True)) t - \\rv. Invariants_H.valid_queues \" - unfolding threadSet_def could_run_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def) - done - - have setQueue_could_run: "\d p ts. - \ valid_queues and (\_. t \ set ts) and - (\s. could_run d p t s) \ - setQueue d p ts - \\rv s. (\d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s)\" - unfolding setQueue_def valid_queues_def could_run_def - by wp (fastforce dest: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def) - - note hoare_vcg_if_lift[wp] hoare_vcg_conj_lift[wp] hoare_vcg_const_imp_lift[wp] - - show ?thesis - unfolding tcbSchedEnqueue_def null_def - apply (rule hoare_pre) - apply (rule hoare_seq_ext) - apply (simp add: unless_def) - apply (wp threadSet_valid_queues_could_run) - apply (wp addToBitmap_could_run addToBitmap_valid_bitmapQ - addToBitmap_valid_queues_no_bitmap_except addToBitmap_bitmapQ_no_L2_orphans)+ - apply (wp setQueue_valid_queues_no_bitmap_except setQueue_could_run - setQueue_valid_bitmapQ_except setQueue_sets_queue setQueue_valid_bitmapQ)+ - apply (wp threadGet_const_tcb_at_imp_lift | simp add: if_apply_def2)+ - apply clarsimp - apply (frule pred_tcb_at') - apply (frule (1) valid_objs'_maxDomain) - apply (frule (1) valid_objs'_maxPriority) - apply (clarsimp simp: valid_queues_def st_tcb_at'_def obj_at'_def valid_queues_no_bitmap_exceptI) - apply (fastforce dest!: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def could_run_def) - done -qed - -lemma tcbSchedEnqueue_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedEnqueue t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedEnqueue_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma tcbSchedAppend_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedAppend t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedAppend_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma rescheduleRequired_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ valid_objs' s \ - weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (fastforce simp: weak_sch_act_wf_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemma rescheduleRequired_valid_queues_sch_act_simple: - "\Invariants_H.valid_queues and sch_act_simple\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: Invariants_H.valid_queues_def sch_act_simple_def)+ - done - lemma rescheduleRequired_valid_bitmapQ_sch_act_simple: "\ valid_bitmapQ and sch_act_simple\ rescheduleRequired \\_. valid_bitmapQ \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. valid_bitmapQ s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. valid_bitmapQ s \ (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2896,12 +3905,12 @@ lemma rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple: "\ bitmapQ_no_L1_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L1_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L1_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L1_orphans s \ (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2909,162 +3918,43 @@ lemma rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple: "\ bitmapQ_no_L2_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L2_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L2_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L2_orphans s \ (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done lemma sts_valid_bitmapQ_sch_act_simple: "\valid_bitmapQ and sch_act_simple\ - setThreadState st t + setThreadState st t \\_. valid_bitmapQ \" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_valid_bitmapQ_sch_act_simple threadSet_valid_bitmapQ [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L2_orphans_sch_act_simple: - "\ bitmapQ_no_L2_orphans and sch_act_simple\ - setThreadState st t - \\_. bitmapQ_no_L2_orphans \" + "\bitmapQ_no_L2_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L2_orphans\" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L2_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L1_orphans_sch_act_simple: - "\ bitmapQ_no_L1_orphans and sch_act_simple\ - setThreadState st t + "\bitmapQ_no_L1_orphans and sch_act_simple\ + setThreadState st t \\_. bitmapQ_no_L1_orphans \" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L1_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sts_valid_queues: - "\\s. Invariants_H.valid_queues s \ - ((\p. t \ set(ksReadyQueues s p)) \ runnable' st)\ - setThreadState st t \\rv. Invariants_H.valid_queues\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues_sch_act_simple - threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sbn_valid_queues: - "\\s. Invariants_H.valid_queues s\ - setBoundNotification ntfn t \\rv. Invariants_H.valid_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - - - -lemma addToBitmap_valid_queues'[wp]: - "\ valid_queues' \ addToBitmap d p \\_. valid_queues' \" - unfolding valid_queues'_def addToBitmap_def - modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def - by (wp, simp) - -lemma tcbSchedEnqueue_valid_queues'[wp]: - "\valid_queues' and st_tcb_at' runnable' t \ - tcbSchedEnqueue t - \\_. valid_queues'\" - apply (simp add: tcbSchedEnqueue_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp - apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -lemma rescheduleRequired_valid_queues'_weak[wp]: - "\\s. valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - apply (clarsimp simp: weak_sch_act_wf_def) - done - -lemma rescheduleRequired_valid_queues'_sch_act_simple: - "\valid_queues' and sch_act_simple\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def sch_act_simple_def)+ - done - -lemma setThreadState_valid_queues'[wp]: - "\\s. valid_queues' s\ setThreadState st t \\rv. valid_queues'\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues'_sch_act_simple) - apply (rule_tac Q="\_. valid_queues'" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma setBoundNotification_valid_queues'[wp]: - "\\s. valid_queues' s\ setBoundNotification ntfn t \\rv. valid_queues'\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma valid_tcb'_tcbState_update: - "\ valid_tcb_state' st s; valid_tcb' tcb s \ \ valid_tcb' (tcbState_update (\_. st) tcb) s" - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def valid_tcb_state'_def) - done - -lemma setThreadState_valid_objs'[wp]: - "\ valid_tcb_state' st and valid_objs' \ setThreadState st t \ \_. valid_objs' \" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_objs' | clarsimp simp: valid_tcb'_tcbState_update)+ - done - -lemma rescheduleRequired_ksQ: - "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ - rescheduleRequired - \\_ s. P (ksReadyQueues s p)\" - including no_pre - apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ P (ksReadyQueues s p)" in hoare_seq_ext) - apply wpsimp - apply (case_tac x; simp) - apply wp + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma setSchedulerAction_ksQ[wp]: @@ -3079,17 +3969,6 @@ lemma sbn_ksQ: "\\s. P (ksReadyQueues s p)\ setBoundNotification ntfn t \\rv s. P (ksReadyQueues s p)\" by (simp add: setBoundNotification_def, wp) -lemma sts_ksQ: - "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_ksQ) - apply (rule_tac Q="\_ s. P (ksReadyQueues s p)" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def)+ - apply (wp, simp) - done - lemma setQueue_ksQ[wp]: "\\s. P ((ksReadyQueues s)((d, p) := q))\ setQueue d p q @@ -3097,22 +3976,6 @@ lemma setQueue_ksQ[wp]: by (simp add: setQueue_def fun_upd_def[symmetric] | wp)+ -lemma tcbSchedEnqueue_ksQ: - "\\s. t' \ set (ksReadyQueues s p) \ t' \ t \ - tcbSchedEnqueue t \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wpsimp wp: hoare_vcg_imp_lift threadGet_wp) - apply (drule obj_at_ko_at') - apply fastforce - done - -lemma rescheduleRequired_ksQ': - "\\s. t \ set (ksReadyQueues s p) \ sch_act_not t s \ - rescheduleRequired \\_ s. t \ set (ksReadyQueues s p)\" - apply (simp add: rescheduleRequired_def) - apply (wpsimp wp: tcbSchedEnqueue_ksQ) - done - lemma threadSet_tcbState_st_tcb_at': "\\s. P st \ threadSet (tcbState_update (\_. st)) t \\_. st_tcb_at' P t\" apply (simp add: threadSet_def pred_tcb_at'_def) @@ -3123,36 +3986,6 @@ lemma isRunnable_const: "\st_tcb_at' runnable' t\ isRunnable t \\runnable _. runnable \" by (rule isRunnable_wp) -lemma sts_ksQ': - "\\s. (runnable' st \ ksCurThread s \ t) \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] - threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) - apply (clarsimp simp: when_def) - apply (case_tac x) - apply (clarsimp, wp)[1] - apply (clarsimp) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_ct threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF gct_wp gct_wp]]) - apply (rename_tac ct) - apply (case_tac "ct\t") - apply (clarsimp simp: when_def) - apply (wp)[1] - apply (clarsimp) - done - lemma valid_ipc_buffer_ptr'D: assumes yv: "y < unat max_ipc_words" and buf: "valid_ipc_buffer_ptr' a s" @@ -3353,7 +4186,7 @@ lemma zipWithM_x_corres: apply (rule b) apply (rule a) apply (rule corres_trivial, simp) - apply (rule hoare_post_taut)+ + apply (rule hoare_TrueI)+ done @@ -3710,14 +4543,14 @@ lemma ct_in_state'_decomp: shows "\\s. Pre s \ t = (ksCurThread s)\ f \\rv. ct_in_state' Prop\" apply (rule hoare_post_imp [where Q="\rv s. t = ksCurThread s \ st_tcb_at' Prop t s"]) apply (clarsimp simp add: ct_in_state'_def) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (wp x y) apply simp done lemma ct_in_state'_set: "\\s. tcb_at' t s \ P st \ t = ksCurThread s\ setThreadState st t \\rv. ct_in_state' P\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule ct_in_state'_decomp[where t=t]) apply (wp setThreadState_ct') apply (wp setThreadState_st_tcb) @@ -3726,7 +4559,7 @@ lemma ct_in_state'_set: crunches setQueue, rescheduleRequired, tcbSchedDequeue for idle'[wp]: "valid_idle'" - (simp: crunch_simps) + (simp: crunch_simps wp: crunch_wps) lemma sts_valid_idle'[wp]: "\valid_idle' and valid_pspace' and @@ -3766,8 +4599,9 @@ lemma gbn_sp': lemma tcbSchedDequeue_tcbState_obj_at'[wp]: "\obj_at' (P \ tcbState) t'\ tcbSchedDequeue t \\rv. obj_at' (P \ tcbState) t'\" - apply (simp add: tcbSchedDequeue_def) - apply (wp | simp add: o_def split del: if_split cong: if_cong)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: getObject_tcb_wp simp: o_def threadGet_def) + apply (clarsimp simp: obj_at'_def) done crunch typ_at'[wp]: setQueue "\s. P' (typ_at' P t s)" @@ -3786,10 +4620,14 @@ lemma setQueue_pred_tcb_at[wp]: lemma tcbSchedDequeue_pred_tcb_at'[wp]: "\\s. P' (pred_tcb_at' proj P t' s)\ tcbSchedDequeue t \\_ s. P' (pred_tcb_at' proj P t' s)\" apply (rule_tac P=P' in P_bool_lift) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) done lemma sts_st_tcb': @@ -3885,39 +4723,154 @@ crunch nonz_cap[wp]: addToBitmap "ex_nonz_cap_to' t" crunch iflive'[wp]: removeFromBitmap if_live_then_nonz_cap' crunch nonz_cap[wp]: removeFromBitmap "ex_nonz_cap_to' t" -lemma tcbSchedEnqueue_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedEnqueue tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ +crunches rescheduleRequired + for cap_to'[wp]: "ex_nonz_cap_to' p" + +lemma tcbQueued_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbQueued_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedNext_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedPrev_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedPrev_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_ctes_of[wp]: + "threadSet (tcbSchedNext_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_ctes_of[wp]: + "threadSet (tcbSchedPrev_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedNext_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedPrev_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedNext_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedPrev_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbQueued_update_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbQueued_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbQueued_update_tcb_cte_cases) + +lemma getTCB_wp: + "\\s. \ko :: tcb. ko_at' ko p s \ Q ko s\ getObject p \Q\" + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) done -lemma rescheduleRequired_iflive'[wp]: - "\if_live_then_nonz_cap' - and (\s. \t. ksSchedulerAction s = SwitchToThread t - \ st_tcb_at' runnable' t s)\ - rescheduleRequired - \\rv. if_live_then_nonz_cap'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def) - apply (erule(1) if_live_then_nonz_capD') - apply fastforce +lemma tcbQueueRemove_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers and ex_nonz_cap_to' tcbPtr\ + tcbQueueRemove q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_imp_lift' getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (force dest: sym_heapD2[where p'=tcbPtr] sym_heapD1[where p=tcbPtr] + elim: if_live_then_nonz_capE' + simp: valid_tcb'_def opt_map_def obj_at'_def ko_wp_at'_def) + done + +lemma tcbQueueRemove_ex_nonz_cap_to'[wp]: + "tcbQueueRemove q tcbPtr \ex_nonz_cap_to' tcbPtr'\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_cap_to' hoare_drop_imps getTCB_wp) + +(* We could write this one as "\t. tcbQueueHead t \ ..." instead, but we can't do the same in + tcbQueueAppend_if_live_then_nonz_cap', and it's nicer if the two lemmas are symmetric *) +lemma tcbQueuePrepend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueHead q)) s)\ + tcbQueuePrepend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueEnd q)) s)\ + tcbQueueAppend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive') + +lemma tcbQueueInsert_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcbPtr and valid_objs' and sym_heap_sched_pointers\ + tcbQueueInsert tcbPtr afterPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueInsert_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' getTCB_wp) + apply (intro conjI) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (erule if_live_then_nonz_capE') + apply (frule_tac p'=afterPtr in sym_heapD2) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def ko_wp_at'_def obj_at'_def opt_map_def) + done + +lemma tcbSchedEnqueue_iflive'[wp]: + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_if_live_then_nonz_cap' threadGet_wp) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def obj_at'_def) + apply clarsimp + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ko_wp_at'_def inQ_def opt_pred_def opt_map_def obj_at'_def + split: option.splits) done +crunches rescheduleRequired + for iflive'[wp]: if_live_then_nonz_cap' + lemma sts_iflive'[wp]: "\\s. if_live_then_nonz_cap' s - \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s)\ + \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s) + \ pspace_aligned' s \ pspace_distinct' s\ setThreadState st t \\rv. if_live_then_nonz_cap'\" apply (simp add: setThreadState_def setQueue_def) - apply (rule hoare_pre) - apply (wp | simp)+ - apply (rule_tac Q="\rv. if_live_then_nonz_cap'" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_iflive' | simp)+ - apply auto - done + apply wpsimp + apply (rule_tac Q="\rv. if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) + apply clarsimp + apply (wpsimp wp: threadSet_iflive') + apply fastforce + done lemma sbn_iflive'[wp]: "\\s. if_live_then_nonz_cap' s @@ -4036,6 +4989,18 @@ lemma setBoundNotification_vms'[wp]: apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) done +lemma threadSet_ct_not_inQ: + "(\tcb. tcbQueued tcb = tcbQueued (F tcb)) + \ threadSet F tcbPtr \\s. P (ct_not_inQ s)\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (erule rsubst[where P=P]) + by (fastforce simp: ct_not_inQ_def obj_at'_def objBits_simps ps_clear_def split: if_splits) + +crunches tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, tcbQueueRemove, addToBitmap + for ct_not_inQ[wp]: ct_not_inQ + (wp: threadSet_ct_not_inQ crunch_wps) + lemma tcbSchedEnqueue_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ tcbSchedEnqueue t \\_. ct_not_inQ\" @@ -4059,12 +5024,7 @@ lemma tcbSchedEnqueue_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -4091,12 +5051,7 @@ lemma tcbSchedAppend_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedAppend_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -4125,12 +5080,10 @@ lemma rescheduleRequired_sa_cnt[wp]: lemma possibleSwitchTo_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ possibleSwitchTo t \\_. ct_not_inQ\" - (is "\?PRE\ _ \_\") apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wpsimp wp: static_imp_wp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ threadGet_wp - | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ - apply (fastforce simp: obj_at'_def) + | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ done lemma threadSet_tcbState_update_ct_not_inQ[wp]: @@ -4146,7 +5099,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4166,7 +5119,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4210,29 +5163,6 @@ lemma tcbSchedDequeue_ct_not_inQ[wp]: done qed -lemma tcbSchedEnqueue_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadGet_wp | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obja) - apply fastforce - done - -lemma setThreadState_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" - apply (simp add: setThreadState_def rescheduleRequired_def) - apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st - | wpc - | rule hoare_drop_imps - | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obj) - apply fastforce - done - crunch ct_idle_or_in_cur_domain'[wp]: setQueue ct_idle_or_in_cur_domain' (simp: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) @@ -4261,17 +5191,8 @@ lemma removeFromBitmap_ct_idle_or_in_cur_domain'[wp]: | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ done -lemma tcbSchedEnqueue_ksCurDomain[wp]: - "\ \s. P (ksCurDomain s)\ tcbSchedEnqueue tptr \\_ s. P (ksCurDomain s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done - -lemma tcbSchedEnqueue_ksDomSchedule[wp]: - "\ \s. P (ksDomSchedule s)\ tcbSchedEnqueue tptr \\_ s. P (ksDomSchedule s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done +crunches tcbQueuePrepend + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' lemma tcbSchedEnqueue_ct_idle_or_in_cur_domain'[wp]: "\ct_idle_or_in_cur_domain'\ tcbSchedEnqueue tptr \\_. ct_idle_or_in_cur_domain'\" @@ -4349,12 +5270,375 @@ lemma sts_utr[wp]: apply (wp untyped_ranges_zero_lift) done +lemma removeFromBitmap_bitmapQ: + "\\\ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" + unfolding bitmapQ_defs bitmap_fun_defs + by (wpsimp simp: bitmap_fun_defs) + +lemma removeFromBitmap_valid_bitmapQ[wp]: + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans + and (\s. tcbQueueEmpty (ksReadyQueues s (d,p)))\ + removeFromBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: removeFromBitmap_valid_bitmapQ_except removeFromBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done + +crunches tcbSchedDequeue + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + (wp: crunch_wps simp: crunch_simps) + +lemma setQueue_nonempty_valid_bitmapQ': + "\\s. valid_bitmapQ s \ \ tcbQueueEmpty (ksReadyQueues s (d, p))\ + setQueue d p queue + \\_ s. \ tcbQueueEmpty queue \ valid_bitmapQ s\" + apply (wpsimp simp: setQueue_def) + apply (fastforce simp: valid_bitmapQ_def bitmapQ_def) + done + +lemma threadSet_valid_bitmapQ_except[wp]: + "threadSet f tcbPtr \valid_bitmapQ_except d p\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (clarsimp simp: valid_bitmapQ_except_def bitmapQ_def) + done + +lemma threadSet_bitmapQ: + "threadSet F t \bitmapQ domain priority\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + by (clarsimp simp: bitmapQ_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend + for valid_bitmapQ_except[wp]: "valid_bitmapQ_except d p" + and valid_bitmapQ[wp]: valid_bitmapQ + and bitmapQ[wp]: "bitmapQ tdom prio" + (wp: crunch_wps) + +lemma tcbQueued_imp_queue_nonempty: + "\list_queue_relation ts (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)) nexts prevs; + \t. t \ set ts \ (inQ (tcbDomain tcb) (tcbPriority tcb) |< tcbs_of' s) t; + ko_at' tcb tcbPtr s; tcbQueued tcb\ + \ \ tcbQueueEmpty (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" + apply (clarsimp simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce dest: heap_path_head simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + done + +lemma tcbSchedDequeue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedDequeue tcbPtr \\_. valid_bitmapQ\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + apply (wpsimp wp: setQueue_nonempty_valid_bitmapQ' hoare_vcg_conj_lift + hoare_vcg_if_lift2 hoare_vcg_const_imp_lift threadGet_wp + | wp (once) hoare_drop_imps)+ + by (fastforce dest!: tcbQueued_imp_queue_nonempty + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + +lemma tcbSchedDequeue_valid_bitmaps[wp]: + "tcbSchedDequeue tcbPtr \valid_bitmaps\" + by (wpsimp simp: valid_bitmaps_def) + +lemma setQueue_valid_bitmapQ': (* enqueue only *) + "\valid_bitmapQ_except d p and bitmapQ d p and K (\ tcbQueueEmpty q)\ + setQueue d p q + \\_. valid_bitmapQ\" + unfolding setQueue_def bitmapQ_defs + by (wpsimp simp: bitmapQ_def) + +lemma tcbSchedEnqueue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedEnqueue tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedEnqueue_def + apply (wpsimp simp: tcbQueuePrepend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp) + apply (fastforce simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def split: if_splits) + done + +crunches tcbSchedEnqueue, tcbSchedAppend + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + +lemma tcbSchedEnqueue_valid_bitmaps[wp]: + "tcbSchedEnqueue tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) + done + +crunches rescheduleRequired, threadSet, setThreadState + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +lemma tcbSchedEnqueue_valid_sched_pointers[wp]: + "tcbSchedEnqueue tcbPtr \valid_sched_pointers\" + apply (clarsimp simp: tcbSchedEnqueue_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueuePrepend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: valid_sched_pointers_def list_queue_relation_def) + apply (case_tac "ts = []", fastforce simp: tcbQueueEmpty_def) + by (intro conjI impI; + force dest!: hd_in_set heap_path_head + simp: inQ_def opt_pred_def opt_map_def obj_at'_def split: if_splits) + +lemma tcbSchedAppend_valid_sched_pointers[wp]: + "tcbSchedAppend tcbPtr \valid_sched_pointers\" + apply (clarsimp simp: tcbSchedAppend_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueueAppend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + by (intro conjI impI; + clarsimp dest: last_in_set + simp: valid_sched_pointers_def opt_map_def list_queue_relation_def tcbQueueEmpty_def + queue_end_valid_def inQ_def opt_pred_def obj_at'_def + split: if_splits option.splits; + fastforce) + +lemma tcbSchedDequeue_valid_sched_pointers[wp]: + "\valid_sched_pointers and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. valid_sched_pointers\" + supply if_split[split del] fun_upd_apply[simp del] + apply (clarsimp simp: tcbSchedDequeue_def getQueue_def setQueue_def) + apply (wpsimp wp: threadSet_wp getTCB_wp threadGet_wp simp: tcbQueueRemove_def) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp split: if_splits) + apply (frule (1) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI) + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (clarsimp simp: valid_sched_pointers_def) + apply (case_tac "ptr = tcbPtr") + apply (force dest!: heap_ls_last_None + simp: prev_queue_head_def queue_end_valid_def inQ_def opt_map_def obj_at'_def) + apply (simp add: fun_upd_def opt_pred_def) + \ \tcbPtr is the head of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def fun_upd_apply prev_queue_head_def + inQ_def opt_pred_def opt_map_def obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is the end of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def queue_end_valid_def inQ_def opt_pred_def + opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI impI allI) + by (clarsimp simp: valid_sched_pointers_def inQ_def opt_pred_def opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits; + auto) + +lemma tcbQueueRemove_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts)\ + tcbQueueRemove q tcbPtr + \\_. sym_heap_sched_pointers\" + supply heap_path_append[simp del] + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + apply (rename_tac tcb ts) + + \ \tcbPtr is the head of q, which is not a singleton\ + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: list_queue_relation_def Let_def) + apply (prop_tac "tcbSchedNext tcb \ Some tcbPtr") + apply (fastforce dest: heap_ls_no_loops[where p=tcbPtr] simp: opt_map_def obj_at'_def) + apply (fastforce intro: sym_heap_remove_only' + simp: prev_queue_head_def opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is the end of q, which is not a singleton\ + apply (intro impI) + apply (rule conjI) + apply clarsimp + apply (prop_tac "tcbSchedPrev tcb \ Some tcbPtr") + apply (fastforce dest!: heap_ls_prev_no_loops[where p=tcbPtr] + simp: list_queue_relation_def opt_map_def obj_at'_def) + apply (subst fun_upd_swap, fastforce) + apply (fastforce intro: sym_heap_remove_only simp: opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is in the middle of q\ + apply (intro conjI impI allI) + apply (frule (2) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []") + apply (fastforce simp: list_queue_relation_def queue_end_valid_def) + apply (clarsimp simp: list_queue_relation_def) + apply (frule (3) ptr_in_middle_prev_next) + apply (frule heap_ls_distinct) + apply (rename_tac afterPtr beforePtr xs ys) + apply (frule_tac before=beforePtr and middle=tcbPtr and after=afterPtr + in sym_heap_remove_middle_from_chain) + apply (fastforce dest: last_in_set simp: opt_map_def obj_at'_def) + apply (fastforce dest: hd_in_set simp: opt_map_def obj_at'_def) + apply (rule_tac hp="tcbSchedNexts_of s" in sym_heapD2) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def split: if_splits) + done + +lemma tcbQueuePrepend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueuePrepend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + apply (clarsimp simp: tcbQueuePrepend_def) + apply (wpsimp wp: threadSet_wp) + apply (prop_tac "tcbPtr \ the (tcbQueueHead q)") + apply (case_tac "ts = []"; + fastforce dest: heap_path_head simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac a=tcbPtr and b="the (tcbQueueHead q)" in sym_heap_connect) + apply assumption + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def tcbQueueEmpty_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def tcbQueueEmpty_def) + done + +lemma tcbQueueInsert_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueInsert tcbPtr afterPtr + \\_. sym_heap_sched_pointers\" + apply (clarsimp simp: tcbQueueInsert_def) + \ \forwards step in order to name beforePtr below\ + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (rule bind_wp[OF _ assert_sp]) + apply (rule hoare_ex_pre_conj[simplified conj_commute], rename_tac beforePtr) + apply (rule bind_wp[OF _ assert_sp]) + apply (wpsimp wp: threadSet_wp) + apply normalise_obj_at' + apply (prop_tac "tcbPtr \ afterPtr") + apply (clarsimp simp: list_queue_relation_def opt_map_red obj_at'_def) + apply (prop_tac "tcbPtr \ beforePtr") + apply (fastforce dest: sym_heap_None simp: opt_map_def obj_at'_def split: option.splits) + apply (prop_tac "tcbSchedNexts_of s beforePtr = Some afterPtr") + apply (fastforce intro: sym_heapD2 simp: opt_map_def obj_at'_def) + apply (fastforce dest: sym_heap_insert_into_middle_of_chain + simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def) + done + +lemma tcbQueueAppend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueAppend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + apply (clarsimp simp: tcbQueueAppend_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def obj_at'_def + split: if_splits) + apply fastforce + apply (drule_tac a="last ts" and b=tcbPtr in sym_heap_connect) + apply (fastforce dest: heap_ls_last_None) + apply assumption + apply (simp add: opt_map_red tcbQueueEmpty_def) + apply (subst fun_upd_swap, simp) + apply (fastforce simp: opt_map_red opt_map_upd_triv) + done + +lemma tcbQueued_update_sym_heap_sched_pointers[wp]: + "threadSet (tcbQueued_update f) tcbPtr \sym_heap_sched_pointers\" + by (rule sym_heap_sched_pointers_lift; + wpsimp wp: threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of) + +lemma tcbSchedEnqueue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedEnqueue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedEnqueue_def + apply (wpsimp wp: tcbQueuePrepend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedAppend_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedAppend tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedAppend_def + apply (wpsimp wp: tcbQueueAppend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedDequeue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedDequeue_def + apply (wpsimp wp: tcbQueueRemove_sym_heap_sched_pointers hoare_vcg_if_lift2 threadGet_wp + simp: bitmap_fun_defs) + apply (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def inQ_def opt_pred_def + opt_map_def obj_at'_def) + done + +crunches setThreadState + for valid_sched_pointers[wp]: valid_sched_pointers + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_valid_sched_pointers threadSet_sched_pointers) + lemma sts_invs_minor': "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set(ksReadyQueues s p)) \ runnable' st) and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) and sch_act_simple and invs'\ @@ -4363,21 +5647,21 @@ lemma sts_invs_minor': including no_pre apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp sts_valid_queues valid_irq_node_lift irqs_masked_lift - setThreadState_ct_not_inQ + apply (wp valid_irq_node_lift irqs_masked_lift + setThreadState_ct_not_inQ | simp add: cteCaps_of_def o_def)+ apply (clarsimp simp: sch_act_simple_def) apply (intro conjI) - apply clarsimp - defer - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply clarsimp + defer + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (clarsimp elim!: st_tcb_ex_cap'') + apply fastforce + apply fastforce apply (frule tcb_in_valid_state', clarsimp+) - apply (cases st, simp_all add: valid_tcb_state'_def - split: Structures_H.thread_state.split_asm) - done + by (cases st; simp add: valid_tcb_state'_def split: Structures_H.thread_state.split_asm) lemma sts_cap_to'[wp]: "\ex_nonz_cap_to' p\ setThreadState st t \\rv. ex_nonz_cap_to' p\" @@ -4422,12 +5706,56 @@ lemma threadSet_ct_running': apply wp done +lemma tcbQueuePrepend_tcbPriority_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueuePrepend_tcbDomain_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueuePrepend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbSchedDequeue_tcbPriority[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedDequeue_tcbDomain[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedEnqueue_tcbPriority_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +lemma tcbSchedEnqueue_tcbDomain_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +crunches rescheduleRequired + for tcbPriority_obj_at'[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t'" + and tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + +lemma setThreadState_tcbPriority_obj_at'[wp]: + "setThreadState ts tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding setThreadState_def + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: obj_at'_def objBits_simps ps_clear_def) + done + lemma setThreadState_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" apply (simp add: tcb_in_cur_domain'_def) apply (rule hoare_pre) apply wps - apply (wp setThreadState_not_st | simp)+ + apply (simp add: setThreadState_def) + apply (wpsimp wp: threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps)+ done lemma asUser_global_refs': "\valid_global_refs'\ asUser t f \\rv. valid_global_refs'\" @@ -4573,10 +5901,13 @@ lemma set_eobject_corres': assumes e: "etcb_relation etcb tcb'" assumes z: "\s. obj_at' P ptr s \ map_to_ctes ((ksPSpace s) (ptr \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" - shows "corres dc (tcb_at ptr and is_etcb_at ptr) - (obj_at' (\ko. non_exst_same ko tcb') ptr - and obj_at' P ptr) - (set_eobject ptr etcb) (setObject ptr tcb')" + shows + "corres dc + (tcb_at ptr and is_etcb_at ptr) + (obj_at' (\ko. non_exst_same ko tcb') ptr and obj_at' P ptr + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcb' \ tcbPriority tcb \ tcbPriority tcb') + \ \ tcbQueued tcb) ptr) + (set_eobject ptr etcb) (setObject ptr tcb')" apply (rule corres_no_failI) apply (rule no_fail_pre) apply wp @@ -4597,21 +5928,34 @@ lemma set_eobject_corres': apply (drule(1) bspec) apply (clarsimp simp: non_exst_same_def) apply (case_tac bb; simp) - apply (clarsimp simp: obj_at'_def other_obj_relation_def cte_relation_def tcb_relation_def + apply (clarsimp simp: obj_at'_def other_obj_relation_def tcb_relation_cut_def cte_relation_def + tcb_relation_def split: if_split_asm)+ apply (clarsimp simp: aobj_relation_cuts_def split: RISCV64_A.arch_kernel_obj.splits) apply (rename_tac arch_kernel_obj obj d p ts) apply (case_tac arch_kernel_obj; simp) apply (clarsimp simp: pte_relation_def is_tcb_def split: if_split_asm)+ - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (insert e) - apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: obj_at'_def) + apply (insert e) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) + apply (frule in_ready_q_tcbQueued_eq[where t=ptr]) + apply (rename_tac s' conctcb' abstcb exttcb) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def obj_at'_def non_exst_same_def split: option.splits) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def obj_at'_def non_exst_same_def split: option.splits) + apply (clarsimp simp: ready_queue_relation_def opt_map_def opt_pred_def obj_at'_def inQ_def + non_exst_same_def + split: option.splits) + apply metis done lemma set_eobject_corres: @@ -4619,9 +5963,13 @@ lemma set_eobject_corres: assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" assumes r: "r () ()" - shows "corres r (tcb_at add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_eobject add etcbu) (setObject add tcbu')" + shows + "corres r + (tcb_at add and (\s. ekheap s add = Some etcb)) + (ko_at' tcb' add + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcbu' \ tcbPriority tcb \ tcbPriority tcbu') + \ \ tcbQueued tcb) add) + (set_eobject add etcbu) (setObject add tcbu')" apply (rule_tac F="non_exst_same tcb' tcbu' \ etcb_relation etcbu tcbu'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) @@ -4648,24 +5996,27 @@ lemma set_eobject_corres: lemma ethread_set_corresT: assumes x: "\tcb'. non_exst_same tcb' (f' tcb')" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (f etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (ethread_set f t) (threadSet f' t)" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation (f etcb) (f' tcb')" + shows + "corres dc + (tcb_at t and valid_etcbs) + (tcb_at' t + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain (f' tcb) + \ tcbPriority tcb \ tcbPriority (f' tcb)) + \ \ tcbQueued tcb) t) + (ethread_set f t) (threadSet f' t)" apply (simp add: ethread_set_def threadSet_def bind_assoc) apply (rule corres_guard_imp) apply (rule corres_split[OF corres_get_etcb set_eobject_corres]) apply (rule x) apply (erule e) apply (simp add: z)+ - apply wp+ + apply (wp getObject_tcb_wp)+ apply clarsimp apply (simp add: valid_etcbs_def tcb_at_st_tcb_at[symmetric]) apply (force simp: tcb_at_def get_etcb_def obj_at_def) - apply simp + apply (clarsimp simp: obj_at'_def) done lemmas ethread_set_corres = diff --git a/proof/refine/RISCV64/Tcb_R.thy b/proof/refine/RISCV64/Tcb_R.thy index 02d6b3dc8f..7109c5bb65 100644 --- a/proof/refine/RISCV64/Tcb_R.thy +++ b/proof/refine/RISCV64/Tcb_R.thy @@ -80,10 +80,10 @@ abbreviation lemma gts_st_tcb': "\tcb_at' t\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule hoare_post_imp[where Q="\rv s. \rv'. rv = rv' \ st_tcb_at' (\st. st = rv') t s"]) apply simp - apply (wp hoare_ex_wp) + apply (wp hoare_vcg_ex_lift) apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def) done @@ -98,12 +98,12 @@ lemma activate_invs': activateThread \\rv. invs' and (ct_running' or ct_idle')\" apply (simp add: activateThread_def) - apply (rule hoare_seq_ext) - apply (rule_tac B="\state s. invs' s \ sch_act_simple s - \ st_tcb_at' (\st. st = state) thread s - \ thread = ksCurThread s - \ (runnable' state \ idle' state)" in hoare_seq_ext) - apply (case_tac x; simp add: isTS_defs split del: if_split cong: if_cong) + apply (rule bind_wp) + apply (rule_tac Q'="\state s. invs' s \ sch_act_simple s + \ st_tcb_at' (\st. st = state) thread s + \ thread = ksCurThread s + \ (runnable' state \ idle' state)" in bind_wp) + apply (case_tac rv; simp add: isTS_defs split del: if_split cong: if_cong) apply (wp) apply (clarsimp simp: ct_in_state'_def) apply (rule_tac Q="\rv. invs' and ct_idle'" in hoare_post_imp, simp) @@ -150,9 +150,8 @@ lemma activate_sch_act: activateThread \\rv s. P (ksSchedulerAction s)\" apply (simp add: activateThread_def getCurThread_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext[where B="\st s. (runnable' or idle') st - \ P (ksSchedulerAction s)"]) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp[where Q'="\st s. (runnable' or idle') st \ P (ksSchedulerAction s)"]) apply (rule hoare_pre) apply (wp | wpc | simp add: setThreadState_runnable_simp)+ apply (clarsimp simp: ct_in_state'_def cur_tcb'_def pred_tcb_at' @@ -191,18 +190,13 @@ lemma setupReplyMaster_weak_sch_act_wf[wp]: apply assumption done -crunches setupReplyMaster - for valid_queues[wp]: "Invariants_H.valid_queues" - and valid_queues'[wp]: "valid_queues'" - (wp: crunch_wps simp: crunch_simps) - crunches setup_reply_master, Tcb_A.restart, arch_post_modify_registers for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" (wp: crunch_wps simp: crunch_simps) lemma restart_corres: - "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and ex_nonz_cap_to' t) (Tcb_A.restart t) (ThreadDecls_H.restart t)" apply (simp add: Tcb_A.restart_def Thread_H.restart_def) apply (simp add: isStopped_def2 liftM_def) @@ -213,19 +207,20 @@ lemma restart_corres: apply (rule corres_split_nor[OF setupReplyMaster_corres]) apply (rule corres_split_nor[OF setThreadState_corres], simp) apply (rule corres_split[OF tcbSchedEnqueue_corres possibleSwitchTo_corres]) - apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_valid_queues sts_st_tcb' - | clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. valid_sched and cur_tcb and pspace_aligned and pspace_distinct" - in hoare_strengthen_post) - apply wp - apply (simp add: valid_sched_def valid_sched_action_def) - apply (rule_tac Q="\rv. invs' and tcb_at' t" in hoare_strengthen_post) - apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def) - apply wp+ + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | clarsimp simp: valid_tcb_state'_def | strengthen valid_objs'_valid_tcbs')+ + apply (rule_tac Q="\rv. valid_sched and cur_tcb and pspace_aligned and pspace_distinct" + in hoare_strengthen_post) + apply wp + apply (fastforce simp: valid_sched_def valid_sched_action_def) + apply (rule_tac Q="\rv. invs' and ex_nonz_cap_to' t" in hoare_strengthen_post) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def + valid_tcb_state'_def) + apply wp+ apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at invs_psp_aligned invs_distinct) - apply (clarsimp simp add: invs'_def valid_state'_def sch_act_wf_weak) + apply clarsimp done lemma restart_invs': @@ -312,6 +307,11 @@ lemma asUser_postModifyRegisters_corres: apply (rule corres_stateAssert_assume) by simp+ +crunches restart + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_sched_pointers threadSet_valid_sched_pointers) + lemma invokeTCB_WriteRegisters_corres: "corres (dc \ (=)) (einvs and tcb_at dest and ex_nonz_cap_to dest) (invs' and sch_act_simple and tcb_at' dest and ex_nonz_cap_to' dest) @@ -329,19 +329,21 @@ lemma invokeTCB_WriteRegisters_corres: apply (clarsimp simp: mask_def user_vtop_def cong: if_cong) apply simp - apply (rule no_fail_pre, wp no_fail_mapM) - apply (clarsimp, (wp no_fail_setRegister | simp)+) + apply (wpsimp wp: no_fail_mapM no_fail_setRegister) + apply simp apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]]) apply (rule corres_split_nor[OF corres_when[OF refl restart_corres]]) apply (rule corres_split_nor[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp apply (wp+)[2] - apply ((wp static_imp_wp restart_invs' - | strengthen valid_sched_weak_strg einvs_valid_etcbs invs_valid_queues' invs_queues - invs_weak_sch_act_wf - | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def - dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] + apply ((wp hoare_weak_lift_imp restart_invs' + | strengthen valid_sched_weak_strg einvs_valid_etcbs + invs_weak_sch_act_wf + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues valid_objs'_valid_tcbs' invs_valid_objs' + | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def + dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] apply (rule_tac Q="\_. einvs and tcb_at dest and ex_nonz_cap_to dest" in hoare_post_imp) apply (fastforce simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def dest!: idle_no_ex_cap) prefer 2 @@ -370,6 +372,10 @@ lemma suspend_ResumeCurrentThread_imp_notct[wp]: \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" by (wpsimp simp: suspend_def) +crunches restart, suspend + for cur_tcb'[wp]: cur_tcb' + (wp: crunch_wps threadSet_cur ignore: threadSet) + lemma invokeTCB_CopyRegisters_corres: "corres (dc \ (=)) (einvs and simple_sched_action and tcb_at dest and tcb_at src and ex_nonz_cap_to src and @@ -434,7 +440,7 @@ proof - apply (simp add: frame_registers_def frameRegisters_def) apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) apply (rule Q[OF refl refl]) - apply (wp mapM_x_wp' | simp)+ + apply (wp mapM_x_wp' hoare_weak_lift_imp | simp)+ apply (rule corres_split_nor) apply (rule corres_when[OF refl]) apply (rule R[OF refl refl]) @@ -444,15 +450,15 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply (solves \wp static_imp_wp\)+ + apply (solves \wp hoare_weak_lift_imp\)+ apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_post_imp) - apply (clarsimp simp: invs_def valid_state_def valid_pspace_def valid_sched_weak_strg valid_sched_def) + apply (fastforce simp: invs_def valid_state_def valid_pspace_def valid_sched_weak_strg valid_sched_def) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_post_imp) - apply (clarsimp simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) - apply ((wp mapM_x_wp' static_imp_wp | simp+)+)[4] - apply ((wp static_imp_wp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] - apply (wp suspend_nonz_cap_to_tcb static_imp_wp | simp add: if_apply_def2)+ + apply (fastforce simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) + apply ((wp mapM_x_wp' hoare_weak_lift_imp | (simp add: cur_tcb'_def[symmetric])+)+)[8] + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp | simp add: if_apply_def2)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def dest!: idle_no_ex_cap) by (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) @@ -499,38 +505,6 @@ lemma copyreg_invs': \\rv. invs'\" by (rule hoare_strengthen_post, rule copyreg_invs'', simp) -lemma threadSet_valid_queues_no_state: - "\Invariants_H.valid_queues and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. Invariants_H.valid_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def) - done - -lemma threadSet_valid_queues'_no_state: - "(\tcb. tcbQueued tcb = tcbQueued (f tcb)) - \ \valid_queues' and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def - objBits_simps addToQs_def - split del: if_split cong: if_cong) - apply (fastforce simp: inQ_def split: if_split_asm) - done - lemma isRunnable_corres: "corres (\ts runn. runnable ts = runn) (tcb_at t and pspace_aligned and pspace_distinct) \ @@ -555,16 +529,6 @@ lemma tcbSchedDequeue_not_queued: apply (wp tg_sp' [where P=\, simplified] | simp)+ done -lemma tcbSchedDequeue_not_in_queue: - "\p. \Invariants_H.valid_queues and tcb_at' t and valid_objs'\ tcbSchedDequeue t - \\rv s. t \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv. Invariants_H.valid_queues and obj_at' (Not \ tcbQueued) t" - in hoare_post_imp) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def ) - apply (wp tcbSchedDequeue_not_queued tcbSchedDequeue_valid_queues | - simp add: valid_objs'_maxDomain valid_objs'_maxPriority)+ - done - lemma threadSet_ct_in_state': "(\tcb. tcbState (f tcb) = tcbState tcb) \ \ct_in_state' test\ threadSet f t \\rv. ct_in_state' test\" @@ -610,14 +574,19 @@ lemma tcbSchedDequeue_ct_in_state'[wp]: crunch cur[wp]: tcbSchedDequeue cur_tcb' +crunches tcbSchedDequeue + for st_tcb_at'[wp]: "\s. P (st_tcb_at' st tcbPtr s)" + lemma sp_corres2: - "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb and pspace_aligned and pspace_distinct) - (Invariants_H.valid_queues and valid_queues' and tcb_at' t and - (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and tcb_at t + and valid_queues and pspace_aligned and pspace_distinct) + (tcb_at' t and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and (\_. x \ maxPriority) and sym_heap_sched_pointers and valid_sched_pointers) + (set_priority t x) (setPriority t x)" apply (simp add: setPriority_def set_priority_def thread_set_priority_def) apply (rule stronger_corres_guard_imp) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split[OF ethread_set_corres], simp_all)[1] apply (simp add: etcb_relation_def) apply (rule corres_split[OF isRunnable_corres]) @@ -627,27 +596,30 @@ lemma sp_corres2: apply (rule rescheduleRequired_corres) apply (rule possibleSwitchTo_corres) apply ((clarsimp - | wp static_imp_wp hoare_vcg_if_lift hoare_wp_combs gts_wp + | wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp isRunnable_wp)+)[4] - apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift) - apply clarsimp - apply ((wp hoare_drop_imps hoare_vcg_if_lift hoare_vcg_all_lift - isRunnable_wp threadSet_pred_tcb_no_state threadSet_valid_queues_no_state - threadSet_valid_queues'_no_state threadSet_cur threadSet_valid_objs_tcbPriority_update - threadSet_weak_sch_act_wf threadSet_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[1] - apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift hoare_vcg_disj_lift - tcbSchedDequeue_not_in_queue tcbSchedDequeue_valid_queues - tcbSchedDequeue_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[2] + apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift + ethread_set_not_queued_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+ + apply ((wp hoare_vcg_imp_lift' hoare_vcg_all_lift + isRunnable_wp threadSet_pred_tcb_no_state + threadSet_valid_objs_tcbPriority_update threadSet_sched_pointers + threadSet_valid_sched_pointers tcb_dequeue_not_queued tcbSchedDequeue_not_queued + threadSet_weak_sch_act_wf + | simp add: etcb_relation_def + | strengthen valid_objs'_valid_tcbs' + obj_at'_weakenE[where P="Not \ tcbQueued"] + | wps)+) apply (force simp: valid_etcbs_def tcb_at_st_tcb_at[symmetric] state_relation_def dest: pspace_relation_tcb_at intro: st_tcb_at_opeqI) - apply (force simp: state_relation_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) + apply clarsimp done lemma setPriority_corres: - "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" + "corres dc + (einvs and tcb_at t) + (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) + (set_priority t x) (setPriority t x)" apply (rule corres_guard_imp) apply (rule sp_corres2) apply (simp add: valid_sched_def valid_sched_action_def invs_psp_aligned invs_distinct invs_def) @@ -673,6 +645,9 @@ definition lemma out_corresT: assumes x: "\tcb v. \(getF, setF)\ran tcb_cap_cases. getF (fn v tcb) = getF tcb" assumes y: "\v. \tcb. \(getF, setF)\ran tcb_cte_cases. getF (fn' v tcb) = getF tcb" + assumes sched_pointers: "\tcb v. tcbSchedPrev (fn' v tcb) = tcbSchedPrev tcb" + "\tcb v. tcbSchedNext (fn' v tcb) = tcbSchedNext tcb" + assumes flag: "\tcb v. tcbQueued (fn' v tcb) = tcbQueued tcb" assumes e: "\tcb v. exst_same tcb (fn' v tcb)" shows "out_rel fn fn' v v' \ @@ -680,10 +655,8 @@ lemma out_corresT: \ (option_update_thread t fn v) (case_option (return ()) (\x. threadSet (fn' x) t) v')" - apply (case_tac v, simp_all add: out_rel_def - option_update_thread_def) - apply clarsimp - apply (clarsimp simp add: threadset_corresT [OF _ x y e]) + apply (case_tac v, simp_all add: out_rel_def option_update_thread_def) + apply (clarsimp simp: threadset_corresT [OF _ x y sched_pointers flag e]) done lemmas out_corres = out_corresT [OF _ all_tcbI, OF ball_tcb_cap_casesI ball_tcb_cte_casesI] @@ -692,32 +665,40 @@ lemma tcbSchedDequeue_sch_act_simple[wp]: "tcbSchedDequeue t \sch_act_simple\" by (wpsimp simp: sch_act_simple_def) +lemma tcbSchedNext_update_tcb_cte_cases: + "(a, b) \ ran tcb_cte_cases \ a (tcbPriority_update f tcb) = a tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma threadSet_priority_invs': + "\invs' and tcb_at' t and K (p \ maxPriority)\ + threadSet (tcbPriority_update (\_. p)) t + \\_. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace' + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + | clarsimp simp: cteCaps_of_def tcbSchedNext_update_tcb_cte_cases | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) + lemma setP_invs': "\invs' and tcb_at' t and K (p \ maxPriority)\ setPriority t p \\rv. invs'\" - apply (rule hoare_gen_asm) - apply (simp add: setPriority_def) - apply (wp rescheduleRequired_all_invs_but_ct_not_inQ) - apply simp - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift') - unfolding st_tcb_at'_def - apply (strengthen not_obj_at'_strengthen, wp) - apply (wp hoare_vcg_imp_lift') - apply (rule_tac Q="\rv s. invs' s" in hoare_post_imp) - apply (clarsimp simp: invs_sch_act_wf' invs'_def invs_queues) - apply (clarsimp simp: valid_state'_def) - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (rule_tac Q="\_. invs' and obj_at' (Not \ tcbQueued) t - and (\s. \d p. t \ set (ksReadyQueues s (d,p)))" - in hoare_post_imp) - apply (clarsimp simp: obj_at'_def inQ_def) - apply (wp tcbSchedDequeue_not_queued)+ - apply clarsimp - done + unfolding setPriority_def + by (wpsimp wp: rescheduleRequired_invs' threadSet_priority_invs') crunches setPriority, setMCPriority for typ_at'[wp]: "\s. P (typ_at' T p s)" @@ -984,13 +965,6 @@ lemma setMCPriority_valid_objs'[wp]: crunch sch_act_simple[wp]: setMCPriority sch_act_simple (wp: ssa_sch_act_simple crunch_wps rule: sch_act_simple_lift simp: crunch_simps) -(* For some reason, when this was embedded in a larger expression clarsimp wouldn't remove it. - Adding it as a simp rule does *) -lemma inQ_tc_corres_helper: - "(\d p. (\tcb. tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d \ - (tcbQueued tcb \ tcbDomain tcb \ d)) \ a \ set (ksReadyQueues s (d, p)))" - by clarsimp - abbreviation "valid_option_prio \ case_option True (\(p, auth). p \ maxPriority)" definition valid_tcb_invocation :: "tcbinvocation \ bool" where @@ -1012,35 +986,29 @@ lemma thread_set_ipc_weak_valid_sched_action: get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) done -lemma threadcontrol_corres_helper2: - "is_aligned a msg_align_bits \ - \invs' and tcb_at' t\ - threadSet (tcbIPCBuffer_update (\_. a)) t - \\x s. Invariants_H.valid_queues s \ valid_queues' s\" - by (wp threadSet_invs_trivial - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: inQ_def )+ - lemma threadcontrol_corres_helper3: - "\ einvs and simple_sched_action\ + "\einvs and simple_sched_action\ check_cap_at cap p (check_cap_at (cap.ThreadCap cap') slot (cap_insert cap p (t, tcb_cnode_index 4))) - \\x. weak_valid_sched_action and valid_etcbs \" - apply (rule hoare_pre) - apply (wp check_cap_inv | simp add:)+ - by (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def - get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + \\_ s. weak_valid_sched_action s \ in_correct_ready_q s \ ready_qs_distinct s \ valid_etcbs s + \ pspace_aligned s \ pspace_distinct s\" + apply (wpsimp + | strengthen valid_sched_valid_queues valid_queues_in_correct_ready_q + valid_sched_weak_strg[rule_format] valid_queues_ready_qs_distinct)+ + apply (wpsimp wp: check_cap_inv) + apply (fastforce simp: valid_sched_def) + done lemma threadcontrol_corres_helper4: "isArchObjectCap ac \ \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) - and valid_cap' ac \ - checkCapAt ac (cte_map (ab, ba)) - (checkCapAt (capability.ThreadCap a) (cte_map slot) - (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) - \\x. Invariants_H.valid_queues and valid_queues'\" - apply (wp - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: )+ + and valid_cap' ac\ + checkCapAt ac (cte_map (ab, ba)) + (checkCapAt (capability.ThreadCap a) (cte_map slot) + (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) + \\_ s. sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_tcbs' s\" + apply (wpsimp wp: + | strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + invs_valid_objs' valid_objs'_valid_tcbs')+ by (case_tac ac; clarsimp simp: capBadge_def isCap_simps tcb_cnode_index_def cte_map_def cte_wp_at'_def cte_level_bits_def) @@ -1059,73 +1027,45 @@ lemma is_valid_vtable_root_simp: by (simp add: is_valid_vtable_root_def split: cap.splits arch_cap.splits option.splits) lemma threadSet_invs_trivialT2: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" shows - "\\s. invs' s - \ tcb_at' t s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits) - \ (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) - \ (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) - \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) - \ (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (rule hoare_gen_asm [where P="(\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)"]) - apply (wp x v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a domains cteCaps_of_def |rule refl)+ - apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed - -lemma threadSet_valid_queues'_no_state2: - "\ \tcb. tcbQueued tcb = tcbQueued (f tcb); - \tcb. tcbState tcb = tcbState (f tcb); - \tcb. tcbPriority tcb = tcbPriority (f tcb); - \tcb. tcbDomain tcb = tcbDomain (f tcb) \ - \ \valid_queues'\ threadSet f t \\_. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def objBits_simps addToQs_def - split del: if_split cong: if_cong) - apply (fastforce simp: inQ_def split: if_split_asm) - done + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)\ + threadSet F t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P="\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits"]) + apply (wp threadSet_valid_pspace'T + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_global_refsT + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_valid_dom_schedule' + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_idle'T + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + | clarsimp simp: assms cteCaps_of_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) lemma getThreadBufferSlot_dom_tcb_cte_cases: "\\\ getThreadBufferSlot a \\rv s. rv \ (+) a ` dom tcb_cte_cases\" @@ -1179,6 +1119,12 @@ crunches option_update_thread for aligned[wp]: "pspace_aligned" and distinct[wp]: "pspace_distinct" +lemma threadSet_invs_tcbIPCBuffer_update: + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (tcbIPCBuffer_update f tcb)) msg_align_bits)\ + threadSet (tcbIPCBuffer_update f) t + \\_. invs'\" + by (wp threadSet_invs_trivialT2; simp add: tcb_cte_cases_def cteSizeBits_def) + lemma transferCaps_corres: assumes x: "newroot_rel e e'" and y: "newroot_rel f f'" and z: "(case g of None \ g' = None @@ -1370,10 +1316,20 @@ proof - apply (rule corres_split[OF getCurThread_corres], clarsimp) apply (rule corres_when[OF refl rescheduleRequired_corres]) apply (wpsimp wp: gct_wp)+ - apply (wp thread_set_ipc_weak_valid_sched_action|wp (once) hoare_drop_imp)+ - apply simp - apply (wp threadcontrol_corres_helper2 | wpc | simp)+ - apply (wp|strengthen einvs_valid_etcbs)+ + apply (strengthen valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_ipc_weak_valid_sched_action thread_set_valid_queues + hoare_drop_imp) + apply clarsimp + apply (strengthen valid_objs'_valid_tcbs' invs_valid_objs')+ + apply (wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers hoare_drop_imp + threadSet_invs_tcbIPCBuffer_update) + apply (clarsimp simp: pred_conj_def) + apply (strengthen einvs_valid_etcbs valid_queues_in_correct_ready_q + valid_sched_valid_queues)+ + apply wp + apply (clarsimp simp: pred_conj_def) + apply (strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + valid_objs'_valid_tcbs' invs_valid_objs') apply (wpsimp wp: cteDelete_invs' hoare_vcg_conj_lift) apply (fastforce simp: emptyable_def) apply fastforce @@ -1402,7 +1358,7 @@ proof - cap_delete_valid_cap cteDelete_deletes cteDelete_invs' | strengthen use_no_cap_to_obj_asid_strg - | clarsimp simp: inQ_def inQ_tc_corres_helper)+ + | clarsimp simp: inQ_def)+ apply (clarsimp simp: cte_wp_at_caps_of_state dest!: is_cnode_or_valid_arch_cap_asid) apply (fastforce simp: emptyable_def) @@ -1457,6 +1413,10 @@ proof - out_no_cap_to_trivial [OF ball_tcb_cap_casesI] checked_insert_no_cap_to note if_cong [cong] option.case_cong [cong] + \ \This proof is quite fragile and was written when bind_wp was added to the wp set later + in the theory dependencies, and so was matched with before alternatives. We re-add it here to + create a similar environment and avoid needing to rework the proof.\ + note bind_wp[wp] show ?thesis apply (simp add: invokeTCB_def liftE_bindE) apply (simp only: eq_commute[where a= "a"]) @@ -1472,7 +1432,7 @@ proof - apply wp apply wp apply (wpsimp wp: hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift as_user_invs thread_set_ipc_tcb_cap_valid thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial @@ -1481,37 +1441,17 @@ proof - check_cap_inv[where P=valid_sched] (* from stuff *) check_cap_inv[where P="tcb_at p0" for p0] thread_set_not_state_valid_sched - cap_delete_deletes + check_cap_inv[where P=simple_sched_action] + cap_delete_deletes hoare_drop_imps cap_delete_valid_cap - simp: ran_tcb_cap_cases) + simp: ran_tcb_cap_cases + | strengthen simple_sched_action_sched_act_not)+ apply (strengthen use_no_cap_to_obj_asid_strg) apply (wpsimp wp: cap_delete_cte_at cap_delete_valid_cap) - apply (wpsimp wp: hoare_drop_imps) - apply ((wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_imp_lift' hoare_vcg_all_lift - threadSet_cte_wp_at' threadSet_invs_trivialT2 cteDelete_invs' - simp: tcb_cte_cases_def cteSizeBits_def), (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def cteSizeBits_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cte_wp_at' - simp: tcb_cte_cases_def) - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def cteSizeBits_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def cteSizeBits_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cap_to' threadSet_invs_trivialT2 - threadSet_cte_wp_at' hoare_drop_imps - simp: tcb_cte_cases_def cteSizeBits_def) - apply (clarsimp) - apply ((wpsimp wp: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + threadSet_invs_tcbIPCBuffer_update threadSet_cte_wp_at' + | strengthen simple_sched_action_sched_act_not)+ + apply ((wpsimp wp: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift threadSet_valid_objs' thread_set_not_state_valid_sched thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial @@ -1523,14 +1463,14 @@ proof - | strengthen tcb_cap_always_valid_strg tcb_at_invs use_no_cap_to_obj_asid_strg - | (erule exE, clarsimp simp: word_bits_def))+) + | (erule exE, clarsimp simp: word_bits_def) | wp (once) hoare_drop_imps)+) apply (strengthen valid_tcb_ipc_buffer_update) - apply (strengthen invs_valid_objs') + apply (strengthen invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct') apply (wpsimp wp: cteDelete_invs' hoare_vcg_imp_lift' hoare_vcg_all_lift) apply wpsimp apply wpsimp apply (clarsimp cong: imp_cong conj_cong simp: emptyable_def) - apply (rule_tac Q'="\_. ?T2_pre" in hoare_post_imp_R[simplified validE_R_def, rotated]) + apply (rule_tac Q'="\_. ?T2_pre" in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) (* beginning to deal with is_nondevice_page_cap *) apply (clarsimp simp: emptyable_def is_cap_simps is_cnode_or_valid_arch_def obj_ref_none_no_asid cap_asid_def @@ -1544,9 +1484,9 @@ proof - | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg)+)[1] apply (clarsimp cong: imp_cong conj_cong) apply (rule_tac Q'="\_. ?T2_pre' and (\s. valid_option_prio p_auth)" - in hoare_post_imp_R[simplified validE_R_def, rotated]) + in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) apply (case_tac g'; clarsimp simp: isCap_simps ; clarsimp cong:imp_cong) - apply (wp add: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wp add: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift setMCPriority_invs' threadSet_valid_objs' thread_set_not_state_valid_sched setP_invs' typ_at_lifts [OF setPriority_typ_at'] @@ -1557,7 +1497,7 @@ proof - emptyable_def | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg - | wp (once) add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs + | wp add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs | (erule exE, clarsimp simp: word_bits_def))+ apply (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] tcb_at_st_tcb_at[symmetric] tcb_cap_valid_def is_cnode_or_valid_arch_def invs_valid_objs emptyable_def @@ -1609,31 +1549,31 @@ lemma tc_invs': apply (simp only: eq_commute[where a="a"]) apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp setMCPriority_invs' + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] - apply (wp add: setP_invs' static_imp_wp hoare_vcg_all_lift)+ + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) apply clarsimp apply wpfix apply assumption apply (rule case_option_wp_None_returnOk) - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ - apply (wpsimp wp: static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) - | wpsimp wp: static_imp_wp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] - hoare_vcg_const_imp_lift_R assertDerived_wp_weak static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs tcbIPCBufferSlot_def) by (auto dest!: isCapDs isReplyCapD isValidVTableRootD simp: isCap_simps) @@ -1649,7 +1589,7 @@ lemma setSchedulerAction_invs'[wp]: apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def valid_irq_node'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs cur_tcb'_def + valid_queues_def bitmapQ_defs cur_tcb'_def ct_not_inQ_def) apply (simp add: ct_idle_or_in_cur_domain'_def) done @@ -1779,8 +1719,8 @@ lemma invokeTCB_corres: apply (rule TcbAcc_R.rescheduleRequired_corres) apply (rule corres_trivial, simp) apply (wpsimp wp: hoare_drop_imp)+ - apply (clarsimp simp: valid_sched_weak_strg einvs_valid_etcbs invs_distinct) - apply (clarsimp simp: invs_valid_queues' invs_queues) + apply (fastforce dest: valid_sched_valid_queues simp: valid_sched_weak_strg einvs_valid_etcbs) + apply fastforce done lemma tcbBoundNotification_caps_safe[simp]: @@ -1795,6 +1735,10 @@ lemma valid_bound_ntfn_lift: apply (wp typ_at_lifts[OF P])+ done +crunches setBoundNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (ignore: threadSet wp: threadSet_sched_pointers) + lemma bindNotification_invs': "\bound_tcb_at' ((=) None) tcbptr and ex_nonz_cap_to' ntfnptr @@ -1805,9 +1749,9 @@ lemma bindNotification_invs': \\_. invs'\" including no_pre apply (simp add: bindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp set_ntfn_valid_pspace' sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp set_ntfn_valid_pspace' sbn_sch_act' valid_irq_node_lift setBoundNotification_ct_not_inQ valid_bound_ntfn_lift untyped_ranges_zero_lift | clarsimp dest!: global'_no_ex_cap simp: cteCaps_of_def)+ @@ -2009,7 +1953,7 @@ lemma decodeSetPriority_corres: clarsimp simp: decode_set_priority_def decodeSetPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) @@ -2027,18 +1971,12 @@ lemma decodeSetMCPriority_corres: clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) by (wpsimp simp: valid_cap_def valid_cap'_def)+ -lemma valid_objs'_maxPriority': - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbMCP tcb \ maxPriority) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) - done - lemma getMCP_sp: "\P\ threadGet tcbMCP t \\rv. mcpriority_tcb_at' (\st. st = rv) t and P\" apply (simp add: threadGet_def) @@ -2060,7 +1998,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: @@ -2069,7 +2007,7 @@ lemma checkPrio_lt_ct: lemma checkPrio_lt_ct_weak: "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. ucast prio \ mcp) auth s\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule checkPrio_lt_ct) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) by (rule le_ucast_ucast_le) simp @@ -2142,7 +2080,7 @@ lemma decodeSetSchedParams_corres: apply (clarsimp split: list.split simp: list_all2_Cons2) apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_split_norE[OF checkPrio_corres]) apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) @@ -2276,7 +2214,7 @@ lemma slotCapLongRunningDelete_corres: lemma slot_long_running_inv'[wp]: "\P\ slotCapLongRunningDelete ptr \\rv. P\" apply (simp add: slotCapLongRunningDelete_def) - apply (rule hoare_seq_ext [OF _ getCTE_inv]) + apply (rule bind_wp [OF _ getCTE_inv]) apply (rule hoare_pre, wpcw, (wp isFinalCapability_inv)+) apply simp done @@ -2440,11 +2378,11 @@ lemma decodeTCBConfigure_corres: apply (rule decodeSetIPCBuffer_corres; simp) apply (rule corres_splitEE) apply (rule decodeSetSpace_corres; simp) - apply (rule_tac F="is_thread_control set_params" in corres_gen_asm) - apply (rule_tac F="is_thread_control set_space" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_params" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_space" in corres_gen_asm) apply (rule_tac F="tcThreadCapSlot setSpace = cte_map slot" in corres_gen_asm2) apply (rule corres_trivial) - apply (clarsimp simp: returnOk_def is_thread_control_def2 is_cap_simps) + apply (clarsimp simp: tcb_invocation.is_ThreadControl_def returnOk_def is_cap_simps) apply (wp | simp add: invs_def valid_sched_def)+ done @@ -2475,15 +2413,13 @@ lemma decodeTCBConf_wf[wp]: apply (rule_tac Q'="\setSpace s. tcb_inv_wf' setSpace s \ tcb_inv_wf' setIPCParams s \ isThreadControl setSpace \ isThreadControl setIPCParams \ tcThread setSpace = t \ tcNewCRoot setSpace \ None" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: isThreadControl_def2 cong: option.case_cong) apply wpsimp apply (fastforce simp: isThreadControl_def2 objBits_defs) done -declare hoare_True_E_R [simp del] - lemma lsft_real_cte: "\valid_objs'\ lookupSlotForThread t x \\rv. real_cte_at' rv\, -" apply (simp add: lookupSlotForThread_def) @@ -2673,7 +2609,7 @@ lemma restart_makes_simple': \\rv. st_tcb_at' simple' t\" apply (simp add: restart_def) apply (wp sts_st_tcb_at'_cases cancelIPC_simple - cancelIPC_st_tcb_at static_imp_wp | simp)+ + cancelIPC_st_tcb_at hoare_weak_lift_imp | simp)+ apply (rule hoare_strengthen_post [OF isStopped_inv]) prefer 2 apply assumption @@ -2703,6 +2639,7 @@ crunches getThreadBufferSlot, setPriority, setMCPriority lemma inv_tcb_IRQInactive: "\valid_irq_states'\ invokeTCB tcb_inv -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + including classic_wp_pre apply (simp add: invokeTCB_def) apply (rule hoare_pre) apply (wpc | diff --git a/proof/refine/RISCV64/Untyped_R.thy b/proof/refine/RISCV64/Untyped_R.thy index cb00dc1ccd..ec4297e8ba 100644 --- a/proof/refine/RISCV64/Untyped_R.thy +++ b/proof/refine/RISCV64/Untyped_R.thy @@ -667,7 +667,7 @@ lemma map_ensure_empty': apply (wp getCTE_wp') apply (clarsimp elim!: cte_wp_at_weakenE') apply (erule meta_allE) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -1340,16 +1340,6 @@ crunch nosch[wp]: insertNewCaps "\s. P (ksSchedulerAction s)" crunch exst[wp]: set_cdt "\s. P (exst s)" -(*FIXME: Move to StateRelation*) -lemma state_relation_schact[elim!]: - "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" - apply (simp add: state_relation_def) - done - -lemma state_relation_queues[elim!]: "(s,s') \ state_relation \ ready_queues_relation (ready_queues s) (ksReadyQueues s')" - apply (simp add: state_relation_def) - done - lemma set_original_symb_exec_l: "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_original p b) (return x)" by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) @@ -1376,6 +1366,10 @@ lemma updateNewFreeIndex_noop_psp_corres: | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ done +crunches updateMDB, updateNewFreeIndex, setCTE + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + lemma insertNewCap_corres: notes if_cong[cong del] if_weak_cong[cong] shows @@ -3200,7 +3194,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 @@ -3524,7 +3518,7 @@ lemma updateFreeIndex_mdb_simple': and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" and unt_inc' :"untyped_inc' (ctes_of s)" and valid_objs' :"valid_objs' s" - and invp: "mdb_inv_preserve (ctes_of s) (ctes_of s(src \ cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -3622,8 +3616,8 @@ lemma updateFreeIndex_clear_invs': apply (simp add:updateCap_def) apply (wp setCTE_irq_handlers' getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift - hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp valid_bitmaps_lift | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def | simp add: cte_wp_at_ctes_of)+ @@ -4048,15 +4042,17 @@ lemma idx_le_new_offs: end +context begin interpretation Arch . (*FIXME: arch_split*) + lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" by (simp add: valid_sched_def) crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp) + (simp: crunch_simps wp: hoare_drop_imps unless_wp) crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp) + (simp: crunch_simps wp: hoare_drop_imps unless_wp) crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp) + (simp: crunch_simps wp: hoare_drop_imps unless_wp) lemma deleteObjects_ksCurThread[wp]: "\\s. P (ksCurThread s)\ deleteObjects ptr sz \\_ s. P (ksCurThread s)\" @@ -4201,14 +4197,12 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemma resetUntypedCap_corres: "untypinv_relation ui ui' \ corres (dc \ dc) - (invs and valid_untyped_inv_wcap ui - (Some (cap.UntypedCap dev ptr sz idx)) - and ct_active and einvs - and (\_. \ptr_base ptr' ty us slots dev'. ui = Invocations_A.Retype slot True - ptr_base ptr' ty us slots dev)) - (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') - (reset_untyped_cap slot) - (resetUntypedCap (cte_map slot))" + (einvs and schact_is_rct and ct_active + and valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev ptr sz idx)) + and (\_. \ptr_base ptr' ty us slots dev'. + ui = Invocations_A.Retype slot True ptr_base ptr' ty us slots dev)) + (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') + (reset_untyped_cap slot) (resetUntypedCap (cte_map slot))" apply (rule corres_gen_asm, clarsimp) apply (simp add: reset_untyped_cap_def resetUntypedCap_def liftE_bindE cong: if_cong) apply (rule corres_guard_imp) @@ -4352,7 +4346,8 @@ lemma resetUntypedCap_corres: apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) apply (frule(1) descendants_range_ex_cte'[OF empty_descendants_range_in' _ order_refl], (simp add: isCap_simps add_mask_fold)+) - by (intro conjI impI; clarsimp) + apply (auto simp: descendants_range_in'_def valid_untyped'_def) + done end @@ -4450,7 +4445,7 @@ lemma resetUntypedCap_invs_etc: ?f \\_. invs' and ?vu2 and ct_active' and ?psp\, \\_. invs'\") apply (simp add: resetUntypedCap_def getSlotCap_def liftE_bind_return_bindE_returnOk bindE_assoc) - apply (rule hoare_vcg_seqE[rotated]) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_name_pre_stateE) @@ -4467,8 +4462,8 @@ lemma resetUntypedCap_invs_etc: (simp_all add: cte_wp_at_ctes_of)+)[1] apply (clarsimp simp: unlessE_def cte_wp_at_ctes_of split del: if_split) - apply (rule_tac B="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) - and ct_active' and ?psp" in hoare_vcg_seqE[rotated]) + apply (rule_tac Q'="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) and ct_active' and ?psp" + in bindE_wp_fwd) apply clarsimp apply (rule hoare_pre) apply (simp add: sch_act_simple_def) @@ -4510,7 +4505,7 @@ lemma resetUntypedCap_invs_etc: modify_map_def) apply auto[1] apply simp - apply (rule hoare_pre, rule hoare_post_impErr, + apply (rule hoare_pre, rule hoare_strengthen_postE, rule_tac P="\i. invs' and ?psp and ct_active' and valid_untyped_inv_wcap' ?ui (Some (UntypedCap dev ptr sz (if i = 0 then idx else (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - i) * 2 ^ resetChunkBits)))" @@ -4598,7 +4593,7 @@ lemma whenE_reset_resetUntypedCap_invs_etc: and ct_active' and pspace_no_overlap' (if reset then ptr else ptr') sz\, \\_. invs'\" apply (rule hoare_pre) - apply (wp hoare_whenE_wp resetUntypedCap_invs_etc[where idx=idx, + apply (wp whenE_wp resetUntypedCap_invs_etc[where idx=idx, simplified pred_conj_def conj_assoc] | simp)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -4610,6 +4605,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) @@ -4624,10 +4621,13 @@ defs canonicalAddressAssert_def: context begin interpretation Arch . (*FIXME: arch_split*) +defs archOverlap_def: + "archOverlap \ \_ _. False" + lemma inv_untyped_corres': "\ untypinv_relation ui ui' \ \ corres (dc \ (=)) - (einvs and valid_untyped_inv ui and ct_active) + (einvs and valid_untyped_inv ui and ct_active and schact_is_rct) (invs' and valid_untyped_inv' ui' and ct_active') (invoke_untyped ui) (invokeUntyped ui')" apply (cases ui) @@ -4646,6 +4646,7 @@ lemma inv_untyped_corres': (cte_map cref) reset ptr_base ptr ao' us (map cte_map slots) dev" assume invs: "invs (s :: det_state)" "ct_active s" "valid_list s" "valid_sched s" + "schact_is_rct s" and invs': "invs' s'" "ct_active' s'" and sr: "(s, s') \ state_relation" and vui: "valid_untyped_inv_wcap ?ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" @@ -4874,7 +4875,8 @@ lemma inv_untyped_corres': show " corres (dc \ (=)) ((=) s) ((=) s') (invoke_untyped ?ui) (invokeUntyped ?ui')" - apply (clarsimp simp:invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc) + apply (clarsimp simp: invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc + archOverlap_def) apply (insert cover) apply (rule corres_guard_imp) apply (rule corres_split_norE) @@ -4965,9 +4967,9 @@ lemma inv_untyped_corres': \ valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s \ (reset \ pspace_no_overlap {ptr && ~~ mask sz..(ptr && ~~ mask sz) + 2 ^ sz - 1} s) - " in hoare_post_imp_R) + " in hoare_strengthen_postE_R) apply (simp add: whenE_def, wp) - apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc, auto)[1] + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc, auto)[1] apply wp apply (clarsimp simp: ui cte_wp_at_caps_of_state bits_of_def untyped_range.simps) @@ -5008,7 +5010,7 @@ lemma inv_untyped_corres': apply (drule invoke_untyped_proofs.usable_range_disjoint) apply (clarsimp simp: field_simps mask_out_sub_mask shiftl_t2n) - apply ((rule validE_validE_R)?, rule hoare_post_impErr, + apply ((rule validE_validE_R)?, rule hoare_strengthen_postE, rule whenE_reset_resetUntypedCap_invs_etc[where ptr="ptr && ~~ mask sz" and ptr'=ptr and sz=sz and idx=idx and ui=ui' and dev=dev]) @@ -5049,7 +5051,7 @@ lemma inv_untyped_corres': apply (clarsimp simp only: pred_conj_def invs ui) apply (strengthen vui) apply (cut_tac vui invs invs') - apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs) + apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs schact_is_rct_def) apply (cut_tac vui' invs') apply (clarsimp simp: ui cte_wp_at_ctes_of if_apply_def2 ui') done @@ -5071,7 +5073,7 @@ lemma sts_valid_untyped_inv': crunch nosch[wp]: invokeUntyped "\s. P (ksSchedulerAction s)" (simp: crunch_simps zipWithM_x_mapM - wp: crunch_wps hoare_unless_wp mapME_x_inv_wp preemptionPoint_inv) + wp: crunch_wps unless_wp mapME_x_inv_wp preemptionPoint_inv) crunch no_0_obj'[wp]: insertNewCap no_0_obj' (wp: crunch_wps) @@ -5099,7 +5101,6 @@ crunches insertNewCap and global_refs': "\s. P (global_refs' s)" and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" and irq_states' [wp]: valid_irq_states' - and vq'[wp]: valid_queues' and irqs_masked' [wp]: irqs_masked' and valid_machine_state'[wp]: valid_machine_state' and pspace_domain_valid[wp]: pspace_domain_valid @@ -5107,6 +5108,9 @@ crunches insertNewCap and tcbState_inv[wp]: "obj_at' (\tcb. P (tcbState tcb)) t" and tcbDomain_inv[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t" and tcbPriority_inv[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t" + and sched_queues_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and tcbQueueds_of[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers (wp: crunch_wps) crunch if_unsafe_then_cap'[wp]: updateNewFreeIndex "if_unsafe_then_cap'" @@ -5263,8 +5267,8 @@ lemma insertNewCap_invs': apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp insertNewCap_valid_pspace' sch_act_wf_lift - valid_queues_lift cur_tcb_lift tcb_in_cur_domain'_lift - insertNewCap_valid_global_refs' + cur_tcb_lift tcb_in_cur_domain'_lift valid_bitmaps_lift + insertNewCap_valid_global_refs' sym_heap_sched_pointers_lift valid_arch_state_lift' valid_irq_node_lift insertNewCap_valid_irq_handlers) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -5488,14 +5492,14 @@ lemma invokeUntyped_invs'': apply (clarsimp simp:invokeUntyped_def getSlotCap_def ui) apply (rule validE_valid) apply (rule hoare_pre) - apply (rule_tac B="\_ s. invs' s \ Q s \ ct_active' s - \ valid_untyped_inv_wcap' ui - (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s - \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s) - " in hoare_vcg_seqE[rotated]) + apply (rule_tac Q'="\_ s. invs' s \ Q s \ ct_active' s + \ valid_untyped_inv_wcap' ui + (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s + \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s)" + in bindE_wp_fwd) apply (simp only: whenE_def) apply wp - apply (rule hoare_post_impErr, rule combine_validE, + apply (rule hoare_strengthen_postE, rule combine_validE, rule resetUntypedCap_invs_etc, rule valid_validE, rule reset_Q') apply (clarsimp simp only: if_True) apply auto[1] @@ -5588,7 +5592,7 @@ lemma invokeUntyped_invs'[wp]: "\invs' and valid_untyped_inv' ui and ct_active'\ invokeUntyped ui \\rv. invs'\" - apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_post_taut, simplified]) + apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_TrueI, simplified]) apply auto done @@ -5659,7 +5663,7 @@ lemma resetUntypedCap_IRQInactive: (is "\?P\ resetUntypedCap slot \?Q\,\?E\") apply (simp add: resetUntypedCap_def) apply (rule hoare_pre) - apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_post_impErr] + apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_strengthen_postE] doMachineOp_irq_states' preemptionPoint_inv hoare_drop_imps | simp add: no_irq_clearMemory if_apply_def2)+ done @@ -5669,7 +5673,7 @@ lemma inv_untyped_IRQInactive: invokeUntyped ui -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" unfolding invokeUntyped_def - by (wpsimp wp: hoare_whenE_wp resetUntypedCap_IRQInactive) + by (wpsimp wp: whenE_wp resetUntypedCap_IRQInactive) end end diff --git a/proof/refine/RISCV64/VSpace_R.thy b/proof/refine/RISCV64/VSpace_R.thy index 96dd448d36..9b05422ef5 100644 --- a/proof/refine/RISCV64/VSpace_R.thy +++ b/proof/refine/RISCV64/VSpace_R.thy @@ -44,25 +44,17 @@ lemma asidBits_asid_bits[simp]: "asidBits = asid_bits" by (simp add: bit_simps' asid_bits_def asidBits_def) -lemma no_fail_read_stval[intro!,simp]: +lemma no_fail_read_stval[wp, intro!, simp]: "no_fail \ read_stval" by (simp add: read_stval_def) lemma handleVMFault_corres: "corres (fr \ dc) (tcb_at thread) (tcb_at' thread) (handle_vm_fault thread fault) (handleVMFault thread fault)" - apply (simp add: RISCV64_H.handleVMFault_def handle_vm_fault_def) - apply (rule corres_guard_imp) - apply (rule corres_split_eqrE) - apply simp - apply (rule corres_machine_op[where r="(=)"]) - apply (rule corres_Id, rule refl, simp) - apply (rule no_fail_read_stval) - apply (cases fault; simp) - apply wpsimp+ - done + unfolding handleVMFault_def handle_vm_fault_def + by (corres | corres_cases_both)+ -lemma no_fail_setVSpaceRoot[intro!, simp]: +lemma no_fail_setVSpaceRoot[wp, intro!, simp]: "no_fail \ (setVSpaceRoot v a)" by (simp add: setVSpaceRoot_def) @@ -85,10 +77,7 @@ proof - (do globalPT <- gets (riscvKSGlobalPT \ ksArchState); doMachineOp (setVSpaceRoot (addrFromKPPtr globalPT) 0) od)" for P Q - apply (corressimp corres: corres_gets_global_pt corres_machine_op) - apply fastforce - apply (simp add: addrFromKPPtr_def) - done + by corres show ?thesis unfolding set_vm_root_def setVMRoot_def catchFailure_def withoutFailure_def throw_def @@ -132,17 +121,7 @@ proof - apply (case_tac acap; clarsimp simp: isCap_simps catch_throwError intro!: global) apply (rename_tac m) apply (case_tac m; clarsimp simp: isCap_simps catch_throwError intro!: global) - apply (rule corres_guard_imp) - apply (rule corres_split_catch [where f=lfr and E'="\_. \"]) - apply (rule corres_split_eqrE[OF findVSpaceForASID_corres[OF refl]]) - apply (rule whenE_throwError_corres; simp add: lookup_failure_map_def) - apply (rule corres_machine_op) - apply corressimp - apply fastforce - apply simp - apply wpsimp+ - apply (rule global, assumption) - apply wpsimp+ + apply (corres simp: lookup_failure_map_def wp: hoare_vcg_if_lift_ER) apply (frule (1) cte_wp_at_valid_objs_valid_cap) apply (clarsimp simp: valid_cap_def mask_def wellformed_mapdata_def) apply (wpsimp wp: get_cap_wp simp: getThreadVSpaceRoot_def)+ @@ -151,7 +130,7 @@ proof - qed -lemma get_asid_pool_corres_inv': +lemma get_asid_pool_corres_inv'[corres]: assumes "p' = p" shows "corres (\p. (\p'. p = p' o ucast) \ inv ASIDPool) (asid_pool_at p and pspace_aligned and pspace_distinct) \ @@ -192,60 +171,29 @@ lemma no_fail_hwAIDFlush[intro!, wp, simp]: lemma hwASIDFlush_corres[corres]: "corres dc \ \ (do_machine_op (hwASIDFlush x)) (doMachineOp (hwASIDFlush x))" - by (corressimp corres: corres_machine_op) + by (corresKsimp corres: corres_machine_op) -lemma deleteASID_corres [corres]: +lemma deleteASID_corres[corres]: assumes "asid' = ucast asid" "pm' = pm" shows "corres dc invs no_0_obj' (delete_asid asid pm) (deleteASID asid' pm')" unfolding delete_asid_def deleteASID_def using assms apply simp - apply (rule corres_guard_imp) - apply (rule corres_split[OF corres_gets_asid]) - apply (case_tac "asid_table (asid_high_bits_of asid)", simp) - apply clarsimp - apply (rule_tac P="\s. asid_high_bits_of asid \ dom (asidTable o ucast) \ - asid_pool_at (the ((asidTable o ucast) (asid_high_bits_of asid))) s \ - pspace_aligned s \ pspace_distinct s" and - P'="\" and - Q="invs and - (\s. asid_table s = asidTable \ ucast)" in - corres_split) - apply (simp add: dom_def) - apply (rule get_asid_pool_corres_inv'[OF refl, unfolded pred_conj_def, simplified]) - apply (rule corres_when) - apply (simp add: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def ucast_ucast_a is_down) - apply (rule corres_split[OF hwASIDFlush_corres]) - apply (rule_tac P="asid_pool_at (the (asidTable (ucast (asid_high_bits_of asid)))) - and pspace_aligned and pspace_distinct" - and P'="\" - in corres_split) - apply (simp del: fun_upd_apply) - apply (rule setObject_ASIDPool_corres) - apply (simp add: inv_def mask_asid_low_bits_ucast_ucast) + apply (corres simp: liftM_def mask_asid_low_bits_ucast_ucast asid_low_bits_of_def + ucast_ucast_a is_down + | corres_cases_both)+ + (* side condition of setObject_ASIDPool_corres needs manual work *) apply (rule ext) - apply (clarsimp simp: o_def ucast_ucast_a is_down asid_low_bits_of_def) + apply (clarsimp simp: ucast_ucast_a is_down asid_low_bits_of_def + mask_asid_low_bits_ucast_ucast inv_def) apply (word_bitwise, clarsimp) - apply (rule corres_split[OF getCurThread_corres]) - apply simp - apply (rule setVMRoot_corres[OF refl]) - apply wp+ - apply (thin_tac "x = f o g" for x f g) - apply (simp del: fun_upd_apply) - apply (fold cur_tcb_def) + (* continue rest of corres proof: *) + apply (corres corres: getCurThread_corres) apply (wp set_asid_pool_vs_lookup_unmap' - set_asid_pool_vspace_objs_unmap_single - | strengthen valid_arch_state_asid_table valid_arch_state_global_arch_objs)+ - apply (auto simp: obj_at_def a_type_def graph_of_def - split: if_split_asm dest: invs_valid_asid_table)[1] - apply (wp getASID_wp) - apply clarsimp - apply assumption - apply wp+ - apply clarsimp - apply (frule invs_valid_asid_table) - apply (drule (1) valid_asid_tableD) - apply (clarsimp simp: invs_distinct) + set_asid_pool_vspace_objs_unmap_single getASID_wp + | strengthen valid_arch_state_asid_table valid_arch_state_global_arch_objs + | simp flip: cur_tcb_def)+ + apply (fastforce dest: valid_asid_tableD invs_valid_asid_table) apply simp done @@ -347,18 +295,7 @@ lemma unmapPageTable_corres: (unmap_page_table asid vptr pt) (unmapPageTable asid' vptr' pt')" apply (clarsimp simp: assms unmap_page_table_def unmapPageTable_def ignoreFailure_def const_def) - apply (rule corres_guard_imp) - apply (rule corres_split_catch[where E="\\" and E'="\\", OF _ corres_return_trivial]) - apply (rule corres_split_eqrE[OF findVSpaceForASID_corres[OF refl]]) - apply (rule corres_split_eqrE[OF lookupPTFromLevel_corres[OF _ refl]]) - apply simp - apply (simp add: liftE_bindE) - apply (rule corres_split[OF storePTE_corres]) - apply simp - apply simp - apply (rule corres_machine_op) - apply (rule corres_Id; simp) - apply (wpsimp wp: pt_lookup_from_level_wp)+ + apply (corres corres: lookupPTFromLevel_corres wp: pt_lookup_from_level_wp) apply (clarsimp simp: invs_distinct invs_psp_aligned invs_vspace_objs invs_valid_asid_table pte_at_eq) apply (rule_tac x=asid in exI) @@ -413,11 +350,11 @@ lemma unmapPage_corres: apply (rule whenE_throwError_corres_initial, simp, simp) apply (rule corres_splitEE) apply (rule corres_rel_imp) - apply (rule liftE_get_pte_corres[@lift_corres_args], simp) + apply (rule liftE_get_pte_corres, simp) apply fastforce apply (rule corres_splitEE[OF checkMappingPPtr_corres]; assumption?) apply (simp add: liftE_bindE) - apply (rule corres_split[OF storePTE_corres]) + apply (rule corres_split[OF storePTE_corres], rule refl) apply simp apply simp apply (rule corres_machine_op, rule corres_Id, rule refl; simp) @@ -522,16 +459,11 @@ lemma performPageInvocation_corres: apply (cases pgi; clarsimp simp: valid_page_inv_def mapping_map_def) apply (simp add: bind_assoc[symmetric]) apply (rule corres_underlying_split[where r'=dc, OF _ corres_return_eq_same[OF refl] - hoare_post_taut hoare_post_taut]) + hoare_TrueI hoare_TrueI]) apply (simp add: bind_assoc) apply (rule corres_guard_imp) apply (simp add: perform_pg_inv_map_def) - apply (rule corres_split[OF updateCap_same_master]) - apply simp - apply (rule corres_split[OF storePTE_corres]) - apply assumption - apply (rule corres_machine_op, rule corres_Id; simp) - apply wpsimp+ + apply (corres corres: updateCap_same_master) apply (clarsimp simp: invs_valid_objs invs_distinct invs_psp_aligned) apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def is_cap_simps) apply (clarsimp simp: same_ref_def) @@ -539,7 +471,7 @@ lemma performPageInvocation_corres: apply (clarsimp simp: valid_page_inv'_def cte_wp_at_ctes_of) apply (simp add: bind_assoc[symmetric]) apply (rule corres_underlying_split[where r'=dc, OF _ corres_return_eq_same[OF refl] - hoare_post_taut hoare_post_taut]) + hoare_TrueI hoare_TrueI]) apply (simp add: bind_assoc) apply (clarsimp simp: perform_pg_inv_unmap_def liftM_def) apply (rename_tac cap a b cap') @@ -642,12 +574,7 @@ lemma performPageTableInvocation_corres: apply (clarsimp simp: valid_pti_def valid_pti'_def split: arch_cap.splits capability.split_asm arch_capability.split_asm) apply (rule corres_guard_imp) - apply (rule corres_split[OF updateCap_same_master]) - apply simp - apply (rule corres_split[OF storePTE_corres]) - apply assumption - apply (rule corres_machine_op, rule corres_Id; simp) - apply wpsimp+ + apply (corres corres: updateCap_same_master) apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def invs_valid_objs invs_psp_aligned invs_distinct) apply (case_tac cap; simp add: is_cap_simps cap_master_cap_simps) @@ -709,28 +636,17 @@ lemma performASIDPoolInvocation_corres: using assms apply (clarsimp simp: perform_asid_pool_invocation_def performASIDPoolInvocation_def) apply (cases ap, simp add: asid_pool_invocation_map_def) - apply (rule corres_guard_imp) - apply (rule corres_split[OF getSlotCap_corres[OF refl] _ get_cap_wp getSlotCap_wp]) - apply (rule corres_assert_gen_asm_l, rule corres_assert_gen_asm_l) - apply (rule_tac F="is_pt_cap pt_cap" in corres_gen_asm) - apply (rule corres_split[OF updateCap_same_master]) - apply (clarsimp simp: is_cap_simps update_map_data_def) - apply (rule corres_split[OF copy_global_mappings_corres]) - apply (clarsimp simp: is_cap_simps) - apply (unfold store_asid_pool_entry_def)[1] - apply (rule corres_split[where r'="\pool pool'. pool = pool' \ ucast"]) - apply (simp cong: corres_weak_cong) - apply (rule corres_rel_imp) - apply (rule getObject_ASIDPool_corres[OF refl]) - apply simp - apply (simp cong: corres_weak_cong) - apply (rule setObject_ASIDPool_corres) - apply (rule ext) - apply (clarsimp simp: inv_def is_cap_simps ucast_up_inj) - apply (wp getASID_wp)+ - apply (wpsimp wp: set_cap_typ_at hoare_drop_imps|strengthen valid_arch_state_global_arch_objs)+ + (* The fastforce is needed for the side conditions of setObject_ASIDPool_corres used at the end. + Guarded by "match" to not slow down the rest too much. *) + apply (corres' \match conclusion in "f' = inv f x \ g" for f' f x g \ + \fastforce simp: inv_def is_cap_simps ucast_up_inj\\ + corres: getSlotCap_corres corres_assert_gen_asm_l updateCap_same_master + simp: update_map_data_def cap.is_ArchObjectCap_def arch_cap.is_PageTableCap_def + liftM_def store_asid_pool_entry_def) + apply (wpsimp wp: set_cap_typ_at hoare_drop_imps get_cap_wp + | strengthen valid_arch_state_global_arch_objs)+ apply (clarsimp simp: valid_apinv_def cte_wp_at_caps_of_state is_cap_simps cap_master_cap_simps - update_map_data_def) + update_map_data_def in_omonad) apply (drule (1) caps_of_state_valid_cap) apply (simp add: valid_cap_def obj_at_def) apply (clarsimp simp: valid_apinv'_def cte_wp_at_ctes_of) @@ -794,13 +710,13 @@ lemma dmo_setVSpaceRoot_invs_no_cicd'[wp]: lemma setVMRoot_invs [wp]: "setVMRoot p \invs'\" unfolding setVMRoot_def getThreadVSpaceRoot_def - by (wpsimp wp: hoare_whenE_wp findVSpaceForASID_vs_at_wp hoare_drop_imps hoare_vcg_ex_lift + by (wpsimp wp: whenE_wp findVSpaceForASID_vs_at_wp hoare_drop_imps hoare_vcg_ex_lift hoare_vcg_all_lift) lemma setVMRoot_invs_no_cicd': "\invs_no_cicd'\ setVMRoot p \\rv. invs_no_cicd'\" unfolding setVMRoot_def getThreadVSpaceRoot_def - by (wpsimp wp: hoare_whenE_wp findVSpaceForASID_vs_at_wp hoare_drop_imps hoare_vcg_ex_lift + by (wpsimp wp: whenE_wp findVSpaceForASID_vs_at_wp hoare_drop_imps hoare_vcg_ex_lift hoare_vcg_all_lift) crunch nosch [wp]: setVMRoot "\s. P (ksSchedulerAction s)" @@ -878,10 +794,6 @@ crunch norqL1[wp]: storePTE "\s. P (ksReadyQueuesL1Bitmap s)" crunch norqL2[wp]: storePTE "\s. P (ksReadyQueuesL2Bitmap s)" (simp: updateObject_default_def) -lemma storePTE_valid_queues' [wp]: - "\valid_queues'\ storePTE p pte \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePTE_iflive [wp]: "\if_live_then_nonz_cap'\ storePTE p pte \\rv. if_live_then_nonz_cap'\" apply (simp add: storePTE_def) @@ -992,6 +904,11 @@ crunches storePTE and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" (wp: setObject_ksPSpace_only updateObject_default_inv) +lemma storePTE_tcbs_of'[wp]: + "storePTE c (pte::pte) \\s. P' (tcbs_of' s)\" + unfolding storePTE_def + by setObject_easy_cases + crunches storePTE for pspace_canonical'[wp]: "pspace_canonical'" and pspace_in_kernel_mappings'[wp]: "pspace_in_kernel_mappings'" @@ -1004,15 +921,12 @@ lemma storePTE_valid_objs[wp]: apply simp done -lemma storePTE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePTE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - lemma storePTE_invs[wp]: "storePTE p pte \invs'\" unfolding invs'_def valid_state'_def valid_pspace'_def by (wpsimp wp: sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' untyped_ranges_zero_lift + valid_bitmaps_lift simp: cteCaps_of_def o_def) lemma setASIDPool_valid_objs [wp]: @@ -1059,14 +973,6 @@ lemma setASIDPool_tcb_obj_at'[wp]: apply (clarsimp simp add: updateObject_default_def in_monad) done -lemma setASIDPool_valid_queues [wp]: - "\Invariants_H.valid_queues\ setObject p (ap::asidpool) \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma setASIDPool_valid_queues' [wp]: - "\valid_queues'\ setObject p (ap::asidpool) \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma setASIDPool_state_refs' [wp]: "\\s. P (state_refs_of' s)\ setObject p (ap::asidpool) \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: setObject_def valid_def in_monad split_def @@ -1181,6 +1087,10 @@ lemma setObject_ap_ksDomScheduleIdx [wp]: "\\s. P (ksDomScheduleIdx s)\ setObject p (ap::asidpool) \\_. \s. P (ksDomScheduleIdx s)\" by (wp updateObject_default_inv|simp add:setObject_def | wpc)+ +lemma setObject_asidpool_tcbs_of'[wp]: + "setObject c (asidpool::asidpool) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + lemma setASIDPool_invs [wp]: "setObject p (ap::asidpool) \invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) @@ -1188,7 +1098,7 @@ lemma setASIDPool_invs [wp]: valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' untyped_ranges_zero_lift - updateObject_default_inv + updateObject_default_inv valid_bitmaps_lift | simp add: cteCaps_of_def | rule setObject_ksPSpace_only)+ apply (clarsimp simp: o_def) diff --git a/proof/refine/RISCV64/orphanage/Orphanage.thy b/proof/refine/RISCV64/orphanage/Orphanage.thy index bb0d053175..08f35832ed 100644 --- a/proof/refine/RISCV64/orphanage/Orphanage.thy +++ b/proof/refine/RISCV64/orphanage/Orphanage.thy @@ -59,8 +59,7 @@ where definition all_queued_tcb_ptrs :: "kernel_state \ machine_word set" where - "all_queued_tcb_ptrs s \ - { tcb_ptr. \ priority. tcb_ptr : set ((ksReadyQueues s) priority) }" + "all_queued_tcb_ptrs s \ { tcb_ptr. obj_at' tcbQueued tcb_ptr s }" lemma st_tcb_at_neg': "(st_tcb_at' (\ ts. \ P ts) t s) = (tcb_at' t s \ \ st_tcb_at' P t s)" @@ -107,8 +106,8 @@ lemma no_orphans_lift: "\ tcb_ptr. \ \s. tcb_ptr = ksCurThread s \ f \ \_ s. tcb_ptr = ksCurThread s \" assumes st_tcb_at'_is_lifted: "\P p. \ \s. st_tcb_at' P p s\ f \ \_ s. st_tcb_at' P p s \" - assumes ksReadyQueues_is_lifted: - "\P. \ \s. P (ksReadyQueues s)\ f \ \_ s. P (ksReadyQueues s) \" + assumes tcbQueued_is_lifted: + "\P tcb_ptr. f \ \s. obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s \" assumes ksSchedulerAction_is_lifted: "\P. \ \s. P (ksSchedulerAction s)\ f \ \_ s. P (ksSchedulerAction s) \" shows @@ -119,7 +118,7 @@ lemma no_orphans_lift: apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) apply (rule ksCurThread_is_lifted) apply (wp hoare_vcg_disj_lift) - apply (rule ksReadyQueues_is_lifted) + apply (wpsimp wp: tcbQueued_is_lifted) apply (wp hoare_vcg_disj_lift) apply (rule typ_at'_is_lifted) apply (wp hoare_vcg_disj_lift) @@ -139,13 +138,12 @@ lemma st_tcb_at'_all_active_tcb_ptrs_lift: by (clarsimp simp: all_active_tcb_ptrs_def) (rule st_tcb_at'_is_active_tcb_ptr_lift [OF assms]) -lemma ksQ_all_queued_tcb_ptrs_lift: - assumes "\P p. \\s. P (ksReadyQueues s p)\ f \\rv s. P (ksReadyQueues s p)\" +lemma tcbQueued_all_queued_tcb_ptrs_lift: + assumes "\Q P tcb_ptr. f \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" shows "\\s. P (t \ all_queued_tcb_ptrs s)\ f \\_ s. P (t \ all_queued_tcb_ptrs s)\" apply (clarsimp simp: all_queued_tcb_ptrs_def) apply (rule_tac P=P in P_bool_lift) - apply (wp hoare_ex_wp assms) - apply (clarsimp) + apply (wp hoare_vcg_ex_lift assms) apply (wp hoare_vcg_all_lift assms) done @@ -180,6 +178,11 @@ lemma almost_no_orphans_disj: apply (auto intro: pred_tcb_at') done +lemma all_queued_tcb_ptrs_ksReadyQueues_update[simp]: + "tcb_ptr \ all_queued_tcb_ptrs (ksReadyQueues_update f s) = (tcb_ptr \ all_queued_tcb_ptrs s)" + unfolding all_queued_tcb_ptrs_def + by (clarsimp simp: obj_at'_def) + lemma no_orphans_update_simps[simp]: "no_orphans (gsCNodes_update f s) = no_orphans s" "no_orphans (gsUserPages_update g s) = no_orphans s" @@ -243,6 +246,12 @@ crunch no_orphans [wp]: removeFromBitmap "no_orphans" crunch almost_no_orphans [wp]: addToBitmap "almost_no_orphans x" crunch almost_no_orphans [wp]: removeFromBitmap "almost_no_orphans x" +lemma setCTE_tcbQueued[wp]: + "setCTE ptr v \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) t s)\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + lemma setCTE_no_orphans [wp]: "\ \s. no_orphans s \ setCTE p cte @@ -256,7 +265,7 @@ lemma setCTE_almost_no_orphans [wp]: setCTE p cte \ \rv s. almost_no_orphans tcb_ptr s \" unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift setCTE_typ_at' setCTE_pred_tcb_at') + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift setCTE_typ_at' setCTE_pred_tcb_at') done crunch no_orphans [wp]: activateIdleThread "no_orphans" @@ -266,128 +275,131 @@ lemma asUser_no_orphans [wp]: asUser thread f \ \rv s. no_orphans s \" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +lemma threadSet_all_queued_tcb_ptrs: + "\tcb. tcbQueued (F tcb) = tcbQueued tcb \ threadSet F tptr \\s. P (t \ all_queued_tcb_ptrs s)\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 threadSet_wp) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: obj_at'_def ps_clear_upd objBits_simps) + done + +crunches removeFromBitmap, addToBitmap, setQueue + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: tcbQueued_all_queued_tcb_ptrs_lift) + +crunches tcbQueuePrepend, tcbQueueAppend + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: threadSet_all_queued_tcb_ptrs ignore: threadSet) + +lemma tcbQueued_update_True_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + threadSet (tcbQueued_update (\_. True)) tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: all_queued_tcb_ptrs_def obj_at'_def ps_clear_upd objBits_simps) done +lemma tcbSchedEnqueue_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr \ all_queued_tcb_ptrs s\ + tcbSchedEnqueue tcb_ptr' + \\_ s. tcb_ptr \ all_queued_tcb_ptrs s\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: hoare_vcg_imp_lift' threadGet_wp + | wpsimp wp: threadSet_all_queued_tcb_ptrs)+ + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def) + done + +lemmas tcbSchedEnqueue_all_queued_tcb_ptrs'[wp] = + tcbSchedEnqueue_all_queued_tcb_ptrs[simplified all_queued_tcb_ptrs_def, simplified] + +lemma tcbSchedAppend_all_queued_tcb_ptrs[wp]: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr \ all_queued_tcb_ptrs s\ + tcbSchedAppend tcb_ptr' + \\_ s. tcb_ptr \ all_queued_tcb_ptrs s\" + unfolding tcbSchedAppend_def tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_imp_lift' threadGet_wp + | wpsimp wp: threadSet_all_queued_tcb_ptrs)+ + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def) + done + +lemmas tcbSchedAppend_all_queued_tcb_ptrs'[wp] = + tcbSchedAppend_all_queued_tcb_ptrs[simplified all_queued_tcb_ptrs_def, simplified] + lemma threadSet_no_orphans: - "\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)) \ - \ \s. no_orphans s \ - threadSet F tptr - \ \rv s. no_orphans s \" + "\\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)); + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tptr \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 | clarsimp)+ - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) -lemma threadSet_almost_no_orphans: - "\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)) \ - \ \s. almost_no_orphans ptr s \ - threadSet F tptr - \ \rv s. almost_no_orphans ptr s \" - unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 | clarsimp)+ +lemma tcbQueued_update_True_no_orphans: + "\almost_no_orphans tptr and tcb_at' tptr\ + threadSet (tcbQueued_update (\_. True)) tptr + \\_. no_orphans\" + unfolding no_orphans_disj + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) + apply (fastforce simp: almost_no_orphans_def all_active_tcb_ptrs_def + tcb_at_typ_at' st_tcb_at_neg' is_active_tcb_ptr_def) done -lemma setQueue_no_orphans_enq: - "\ \s. no_orphans s \ set (ksReadyQueues s (d, prio)) \ set qs \ - setQueue d prio qs - \ \_ s. no_orphans s \" - unfolding setQueue_def - apply wp - apply (clarsimp simp: no_orphans_def all_queued_tcb_ptrs_def - split: if_split_asm) +lemma tcbQueued_update_True_almost_no_orphans: + "threadSet (tcbQueued_update (\_. True)) tptr' \almost_no_orphans tptr\" + unfolding almost_no_orphans_disj + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift threadSet_st_tcb_at2) apply fastforce done -lemma setQueue_almost_no_orphans_enq: - "\ \s. almost_no_orphans tcb_ptr s \ set (ksReadyQueues s (d, prio)) \ set qs \ tcb_ptr \ set qs \ - setQueue d prio qs - \ \_ s. no_orphans s \" +lemma threadSet_almost_no_orphans: + "\\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)); + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tptr \almost_no_orphans ptr\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2) + +lemma setQueue_no_orphans[wp]: + "setQueue d prio qs \no_orphans\" unfolding setQueue_def apply wp - apply (clarsimp simp: no_orphans_def almost_no_orphans_def all_queued_tcb_ptrs_def - split: if_split_asm) - apply fastforce + apply (clarsimp simp: no_orphans_def) done -lemma setQueue_almost_no_orphans_enq_lift: - "\ \s. almost_no_orphans tcb_ptr s \ set (ksReadyQueues s (d, prio)) \ set qs \ - setQueue d prio qs - \ \_ s. almost_no_orphans tcb_ptr s \" +lemma setQueue_almost_no_orphans[wp]: + "setQueue d prio qs \almost_no_orphans tptr\" unfolding setQueue_def apply wp - apply (clarsimp simp: almost_no_orphans_def all_queued_tcb_ptrs_def - split: if_split_asm) - apply fastforce + apply (clarsimp simp: almost_no_orphans_def) done lemma tcbSchedEnqueue_no_orphans[wp]: - "\ \s. no_orphans s \ - tcbSchedEnqueue tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedEnqueue_def - apply (wp setQueue_no_orphans_enq threadSet_no_orphans | clarsimp simp: unless_def)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto + "tcbSchedEnqueue tcb_ptr \no_orphans\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_strg_almost) done lemma tcbSchedAppend_no_orphans[wp]: - "\ \s. no_orphans s \ - tcbSchedAppend tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedAppend_def - apply (wp setQueue_no_orphans_enq threadSet_no_orphans | clarsimp simp: unless_def)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto - done - -lemma ko_at_obj_at': - "ko_at' ko p s \ P ko \ obj_at' P p s" - unfolding obj_at'_def - apply clarsimp - done - -lemma queued_in_queue: - "\valid_queues' s; ko_at' tcb tcb_ptr s; tcbQueued tcb\ \ - \ p. tcb_ptr \ set (ksReadyQueues s p)" - unfolding valid_queues'_def - apply (drule_tac x="tcbDomain tcb" in spec) - apply (drule_tac x="tcbPriority tcb" in spec) - apply (drule_tac x="tcb_ptr" in spec) - apply (drule mp) - apply (rule ko_at_obj_at') - apply (auto simp: inQ_def) + "tcbSchedAppend tcb_ptr \no_orphans\" + unfolding tcbSchedAppend_def tcbQueueAppend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_strg_almost) done lemma tcbSchedEnqueue_almost_no_orphans: - "\ \s. almost_no_orphans tcb_ptr s \ valid_queues' s \ + "\almost_no_orphans tcb_ptr\ tcbSchedEnqueue tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedEnqueue_def - apply simp - apply (wp setQueue_almost_no_orphans_enq[where tcb_ptr=tcb_ptr] threadSet_no_orphans - | clarsimp)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply normalise_obj_at' - apply (rule_tac x=ko in exI) - apply (clarsimp simp: subset_insertI) - apply (unfold no_orphans_def almost_no_orphans_def) - apply clarsimp - apply (drule(2) queued_in_queue) - apply (fastforce simp: all_queued_tcb_ptrs_def) + \\_. no_orphans\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadSet_almost_no_orphans threadGet_wp) + apply (fastforce simp: no_orphans_def almost_no_orphans_def all_queued_tcb_ptrs_def obj_at'_def) done lemma tcbSchedEnqueue_almost_no_orphans_lift: - "\ \s. almost_no_orphans ptr s \ - tcbSchedEnqueue tcb_ptr - \ \rv s. almost_no_orphans ptr s \" - unfolding tcbSchedEnqueue_def - apply (wp setQueue_almost_no_orphans_enq_lift threadSet_almost_no_orphans | clarsimp simp: unless_def)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto - done + "tcbSchedEnqueue tcb_ptr \almost_no_orphans ptr\" + unfolding tcbSchedEnqueue_def tcbQueuePrepend_def + by (wpsimp wp: tcbQueued_update_True_almost_no_orphans threadSet_almost_no_orphans) lemma ssa_no_orphans: "\ \s. no_orphans s \ @@ -419,124 +431,70 @@ lemma ssa_almost_no_orphans_lift [wp]: apply auto done -lemma tcbSchedEnqueue_inQueue [wp]: - "\ \s. valid_queues' s \ - tcbSchedEnqueue tcb_ptr - \ \rv s. tcb_ptr \ all_queued_tcb_ptrs s \" - unfolding tcbSchedEnqueue_def all_queued_tcb_ptrs_def - apply (wp | clarsimp simp: unless_def)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp) - apply fastforce - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (fastforce simp: obj_at'_def valid_queues'_def inQ_def) - done - -lemma tcbSchedAppend_inQueue [wp]: - "\ \s. valid_queues' s \ - tcbSchedAppend tcb_ptr - \ \rv s. tcb_ptr \ all_queued_tcb_ptrs s \" - unfolding tcbSchedAppend_def all_queued_tcb_ptrs_def - apply (wp | clarsimp simp: unless_def)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp) - apply fastforce - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (fastforce simp: obj_at'_def valid_queues'_def inQ_def) - done - lemma rescheduleRequired_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - rescheduleRequired - \ \rv s. no_orphans s \" + "rescheduleRequired \no_orphans\" unfolding rescheduleRequired_def - apply (wp tcbSchedEnqueue_no_orphans hoare_vcg_all_lift ssa_no_orphans | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) - apply (rename_tac word t p) - apply (rule_tac P="word = t" in hoare_gen_asm) - apply (wp hoare_disjI1 | clarsimp)+ - done + by (wpsimp wp: ssa_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift | wpc)+ lemma rescheduleRequired_almost_no_orphans [wp]: - "\ \s. almost_no_orphans tcb_ptr s \ valid_queues' s \ - rescheduleRequired - \ \rv s. almost_no_orphans tcb_ptr s \" + "rescheduleRequired \almost_no_orphans tcb_ptr\" unfolding rescheduleRequired_def - apply (wp tcbSchedEnqueue_almost_no_orphans_lift hoare_vcg_all_lift | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) - apply (rename_tac word t p) - apply (rule_tac P="word = t" in hoare_gen_asm) - apply (wp hoare_disjI1 | clarsimp)+ - done + by (wpsimp wp: ssa_almost_no_orphans_lift hoare_vcg_all_lift tcbSchedEnqueue_almost_no_orphans_lift + hoare_vcg_imp_lift' hoare_vcg_disj_lift) lemma setThreadState_current_no_orphans: - "\ \s. no_orphans s \ ksCurThread s = tcb_ptr \ valid_queues' s \ + "\\s. no_orphans s \ ksCurThread s = tcb_ptr\ setThreadState state tcb_ptr - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj all_queued_tcb_ptrs_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: inQ_def) + apply wpsimp + unfolding no_orphans_disj + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma setThreadState_isRestart_no_orphans: - "\ \s. no_orphans s \ st_tcb_at' isRestart tcb_ptr s \ valid_queues' s\ + "\no_orphans and st_tcb_at' isRestart tcb_ptr\ setThreadState state tcb_ptr - \ \rv s. no_orphans s \" + \\_ . no_orphans\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: st_tcb_at_double_neg' st_tcb_at_neg' inQ_def) + apply wpsimp + unfolding no_orphans_disj + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ + apply (auto simp: is_active_thread_state_def st_tcb_at_double_neg' st_tcb_at_neg') done lemma setThreadState_almost_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s\ - setThreadState state tcb_ptr - \ \rv s. almost_no_orphans tcb_ptr s \" + "\no_orphans\ setThreadState state tcb_ptr \\_. almost_no_orphans tcb_ptr\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ almost_no_orphans tcb_ptr s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj almost_no_orphans_disj all_queued_tcb_ptrs_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: inQ_def) + apply wpsimp + apply (unfold no_orphans_disj almost_no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma setThreadState_not_active_no_orphans: - "\ is_active_thread_state state \ - \ \s. no_orphans s \ valid_queues' s \ - setThreadState state tcb_ptr - \ \rv s. no_orphans s \" + "\ is_active_thread_state state \ setThreadState state tcb_ptr \no_orphans\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: isRunning_def isRestart_def inQ_def) + apply wpsimp + apply (unfold no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma setThreadState_not_active_almost_no_orphans: - "\ is_active_thread_state state \ - \ \s. almost_no_orphans thread s \ valid_queues' s \ - setThreadState state tcb_ptr - \ \rv s. almost_no_orphans thread s \" + "\ is_active_thread_state state \ setThreadState state tcb_ptr \almost_no_orphans thread\" unfolding setThreadState_def - apply (wp | clarsimp)+ - apply (rule_tac Q="\rv s. valid_queues' s \ almost_no_orphans thread s" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_valid_queues') - apply (unfold almost_no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) - apply (auto simp: isRunning_def isRestart_def inQ_def) + apply wpsimp + apply (unfold almost_no_orphans_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state + threadSet_all_queued_tcb_ptrs + | fastforce)+ done lemma activateThread_no_orphans [wp]: @@ -548,46 +506,63 @@ lemma activateThread_no_orphans [wp]: apply (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def isRestart_def) done -lemma setQueue_no_orphans_deq: - "\ \s. \ tcb_ptr. no_orphans s \ \ is_active_tcb_ptr tcb_ptr s \ - queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr] \ - setQueue d priority queue - \ \rv s. no_orphans s \" - unfolding setQueue_def - apply (wp | clarsimp)+ - apply (fastforce simp: no_orphans_def all_queued_tcb_ptrs_def - all_active_tcb_ptrs_def is_active_tcb_ptr_def) +crunches removeFromBitmap, tcbQueueRemove, setQueue + for almost_no_orphans[wp]: "almost_no_orphans thread" + and no_orphans[wp]: no_orphans + and all_queued_tcb_ptrs[wp]: "\s. tcb_ptr \ all_queued_tcb_ptrs s" + (wp: crunch_wps) + +lemma tcbQueued_update_False_all_queued_tcb_ptrs: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + threadSet (tcbQueued_update (\_. False)) tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: all_queued_tcb_ptrs_def obj_at'_def projectKOs ps_clear_upd) done -lemma setQueue_almost_no_orphans_deq [wp]: - "\ \s. almost_no_orphans tcb_ptr s \ - queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr] \ - setQueue d priority queue - \ \rv s. almost_no_orphans tcb_ptr s \" - unfolding setQueue_def - apply (wp | clarsimp)+ - apply (fastforce simp: almost_no_orphans_def all_queued_tcb_ptrs_def - all_active_tcb_ptrs_def is_active_tcb_ptr_def) +lemma tcbSchedDequeue_all_queued_tcb_ptrs_other: + "\\s. tcb_ptr \ tcb_ptr' \ tcb_ptr' \ all_queued_tcb_ptrs s\ + tcbSchedDequeue tcb_ptr + \\_ s. tcb_ptr' \ all_queued_tcb_ptrs s\" + unfolding tcbSchedDequeue_def + by (wpsimp wp: tcbQueued_update_False_all_queued_tcb_ptrs threadGet_wp) + +lemma tcbQueued_update_False_almost_no_orphans: + "\no_orphans\ + threadSet (tcbQueued_update (\_. False)) tptr + \\_. almost_no_orphans tptr\" + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: no_orphans_def almost_no_orphans_def) + apply (rename_tac tcb_ptr) + apply (case_tac "tcb_ptr = tptr") + apply fastforce + apply (fastforce simp: all_queued_tcb_ptrs_def obj_at'_def projectKOs all_active_tcb_ptrs_def + is_active_tcb_ptr_def st_tcb_at'_def ps_clear_upd) done lemma tcbSchedDequeue_almost_no_orphans [wp]: - "\ \s. no_orphans s \ - tcbSchedDequeue thread - \ \rv s. almost_no_orphans thread s \" + "\no_orphans\ tcbSchedDequeue thread \\_. almost_no_orphans thread\" unfolding tcbSchedDequeue_def - apply (wp threadSet_almost_no_orphans | simp cong: if_cong)+ - apply (simp add:no_orphans_strg_almost cong: if_cong) + apply (wpsimp wp: tcbQueued_update_False_almost_no_orphans threadGet_wp) + apply (simp add: no_orphans_strg_almost) done -lemma tcbSchedDequeue_no_orphans [wp]: - "\ \s. no_orphans s \ \ is_active_tcb_ptr tcb_ptr s \ - tcbSchedDequeue tcb_ptr - \ \rv s. no_orphans s \" - unfolding tcbSchedDequeue_def - apply (wp setQueue_no_orphans_deq threadSet_no_orphans | clarsimp)+ - apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ - apply (drule obj_at_ko_at') - apply auto +lemma tcbSchedDequeue_no_orphans[wp]: + "\\s. no_orphans s \ \ is_active_tcb_ptr tcbPtr s \ tcb_at' tcbPtr s\ + tcbSchedDequeue tcbPtr + \\_. no_orphans\" + supply disj_not1[simp del] + unfolding no_orphans_disj almost_no_orphans_disj + apply (rule hoare_allI) + apply (rename_tac tcb_ptr) + apply (case_tac "tcb_ptr = tcbPtr") + apply (rule_tac Q="\_ s. st_tcb_at' (\state. \ is_active_thread_state state) tcbPtr s" + in hoare_post_imp) + apply fastforce + apply wpsimp + apply (clarsimp simp: st_tcb_at'_def obj_at'_def projectKOs is_active_tcb_ptr_def disj_not1) + apply (wpsimp wp: tcbQueued_update_False_all_queued_tcb_ptrs hoare_vcg_disj_lift + simp: tcbSchedDequeue_def) done crunches setVMRoot @@ -595,17 +570,15 @@ crunches setVMRoot (wp: crunch_wps) lemma switchToIdleThread_no_orphans' [wp]: - "\ \s. no_orphans s \ - (is_active_tcb_ptr (ksCurThread s) s - \ ksCurThread s \ all_queued_tcb_ptrs s) \ + "\\s. no_orphans s + \ (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s)\ switchToIdleThread - \ \rv s. no_orphans s \" - unfolding switchToIdleThread_def setCurThread_def RISCV64_H.switchToIdleThread_def + \\_. no_orphans\" + supply disj_not1[simp del] + apply (clarsimp simp: switchToIdleThread_def setCurThread_def RISCV64_H.switchToIdleThread_def) apply (simp add: no_orphans_disj all_queued_tcb_ptrs_def) - apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_disj_lift - | clarsimp)+ - apply (auto simp: no_orphans_disj all_queued_tcb_ptrs_def is_active_tcb_ptr_def - st_tcb_at_neg' tcb_at_typ_at') + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift hoare_drop_imps) + apply (force simp: is_active_tcb_ptr_def st_tcb_at_neg' typ_at_tcb') done crunch no_orphans [wp]: "Arch.switchToThread" "no_orphans" @@ -613,13 +586,9 @@ crunch no_orphans [wp]: "Arch.switchToThread" "no_orphans" crunch ksCurThread [wp]: "Arch.switchToThread" "\ s. P (ksCurThread s)" -lemma ArchThreadDecls_H_switchToThread_all_queued_tcb_ptrs [wp]: - "\ \s. P (all_queued_tcb_ptrs s) \ - Arch.switchToThread tcb_ptr - \ \rv s. P (all_queued_tcb_ptrs s) \" - unfolding RISCV64_H.switchToThread_def all_queued_tcb_ptrs_def - apply (wp | clarsimp)+ - done +crunches Arch.switchToThread + for all_queued_tcb_ptrs[wp]: "\s. P (t \ all_queued_tcb_ptrs s)" + (wp: tcbQueued_all_queued_tcb_ptrs_lift) crunch ksSchedulerAction [wp]: "Arch.switchToThread" "\s. P (ksSchedulerAction s)" @@ -635,22 +604,6 @@ lemma setCurThread_no_orphans [wp]: apply auto done -lemma tcbSchedDequeue_all_queued_tcb_ptrs: - "\\s. x \ all_queued_tcb_ptrs s \ x \ t \ - tcbSchedDequeue t \\_ s. x \ all_queued_tcb_ptrs s\" - apply (rule_tac Q="(\s. x \ all_queued_tcb_ptrs s) and K (x \ t)" - in hoare_pre_imp, clarsimp) - apply (rule hoare_gen_asm) - apply (clarsimp simp: tcbSchedDequeue_def all_queued_tcb_ptrs_def) - apply (rule hoare_pre) - apply (wp, clarsimp) - apply (wp hoare_ex_wp)+ - apply (rename_tac d p) - apply (rule_tac Q="\_ s. x \ set (ksReadyQueues s (d, p))" - in hoare_post_imp, clarsimp) - apply (wp hoare_vcg_all_lift | simp)+ - done - lemma tcbSchedDequeue_all_active_tcb_ptrs[wp]: "\\s. P (t' \ all_active_tcb_ptrs s)\ tcbSchedDequeue t \\_ s. P (t' \ all_active_tcb_ptrs s)\" by (clarsimp simp: all_active_tcb_ptrs_def is_active_tcb_ptr_def) wp @@ -673,8 +626,11 @@ lemma setCurThread_almost_no_orphans: lemmas ArchThreadDecls_H_switchToThread_all_active_tcb_ptrs[wp] = st_tcb_at'_all_active_tcb_ptrs_lift [OF Arch_switchToThread_pred_tcb'] -lemmas ArchThreadDecls_H_switchToThread_all_queued_tcb_ptrs_lift[wp] = - ksQ_all_queued_tcb_ptrs_lift [OF ArchThreadDecls_H_RISCV64_H_switchToThread_ksQ] +lemma arch_switch_thread_tcbQueued[wp]: + "Arch.switchToThread t \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" + apply (simp add: RISCV64_H.switchToThread_def) + apply (wp) + done lemma ThreadDecls_H_switchToThread_no_orphans: "\ \s. no_orphans s \ @@ -684,16 +640,9 @@ lemma ThreadDecls_H_switchToThread_no_orphans: ThreadDecls_H.switchToThread tcb_ptr \ \rv s. no_orphans s \" unfolding Thread_H.switchToThread_def - apply (wp setCurThread_almost_no_orphans - tcbSchedDequeue_almost_no_orphans) - apply (wps tcbSchedDequeue_ct') - apply (wp tcbSchedDequeue_all_queued_tcb_ptrs hoare_convert_imp)+ - apply (wps) - apply (wp)+ - apply (wps) - apply (wp) - apply (clarsimp) - done + by (wpsimp wp: setCurThread_almost_no_orphans hoare_vcg_imp_lift' + tcbSchedDequeue_all_queued_tcb_ptrs_other + | wps)+ lemma findM_failure': "\ \x S. \ \s. P S s \ f x \ \rv s. \ rv \ P (insert x S) s \ \ \ @@ -701,7 +650,7 @@ lemma findM_failure': apply (induct xs arbitrary: S) apply (clarsimp, wp, clarsimp) apply clarsimp - apply (rule hoare_seq_ext[rotated], assumption) + apply (rule bind_wp_fwd, assumption) apply (case_tac r) apply (clarsimp, wp, clarsimp) apply clarsimp @@ -711,93 +660,43 @@ lemma findM_failure': lemmas findM_failure = findM_failure'[where S="{}", simplified] -lemma tcbSchedEnqueue_inQueue_eq: - "\ valid_queues' and K (tcb_ptr = tcb_ptr') \ - tcbSchedEnqueue tcb_ptr - \ \rv s. tcb_ptr' \ all_queued_tcb_ptrs s \" - apply (rule hoare_gen_asm, simp) - apply wp - done - -lemma tcbSchedAppend_inQueue_eq: - "\ valid_queues' and K (tcb_ptr = tcb_ptr') \ - tcbSchedAppend tcb_ptr - \ \rv s. tcb_ptr' \ all_queued_tcb_ptrs s \" - apply (rule hoare_gen_asm, simp) - apply wp - done - lemma findM_on_success: "\ \x. \ P x \ f x \ \rv s. rv \; \x y. \ P x \ f y \ \rv. P x \ \ \ \ \s. \x \ set xs. P x s \ findM f xs \ \rv s. \ y. rv = Some y \" apply (induct xs; clarsimp) apply wp+ apply (clarsimp simp: imp_conv_disj Bex_def) - apply (wp hoare_vcg_disj_lift hoare_ex_wp | clarsimp | assumption)+ + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift | clarsimp | assumption)+ done crunch st_tcb' [wp]: switchToThread "\s. P' (st_tcb_at' P t s)" -lemma setQueue_deq_not_empty: - "\ \s. (\tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s) \ - (\tcb_ptr. \ st_tcb_at' P tcb_ptr s \ - queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr]) \ - setQueue d priority queue - \ \rv s. \tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s \" - unfolding setQueue_def - apply wp - apply auto - done - -lemma tcbSchedDequeue_not_empty: - "\ \s. (\tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s) \ \ st_tcb_at' P thread s \ - tcbSchedDequeue thread - \ \rv s. \tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s \" - unfolding tcbSchedDequeue_def - apply wp - apply (wp hoare_ex_wp threadSet_pred_tcb_no_state) - apply clarsimp - apply (wp setQueue_deq_not_empty) - apply clarsimp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs) - apply wp - apply clarsimp - apply clarsimp - apply (wp setQueue_deq_not_empty)+ - apply (rule_tac Q="\rv s. \ st_tcb_at' P thread s" in hoare_post_imp) - apply fastforce - apply (wp weak_if_wp | clarsimp)+ - done - lemmas switchToThread_all_active_tcb_ptrs[wp] = st_tcb_at'_all_active_tcb_ptrs_lift [OF switchToThread_st_tcb'] (* ksSchedulerAction s = ChooseNewThread *) lemma chooseThread_no_orphans [wp]: - notes hoare_TrueI[simp] - shows - "\\s. no_orphans s \ all_invs_but_ct_idle_or_in_cur_domain' s \ - (is_active_tcb_ptr (ksCurThread s) s - \ ksCurThread s \ all_queued_tcb_ptrs s)\ + "\\s. no_orphans s \ all_invs_but_ct_idle_or_in_cur_domain' s + \ (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s)\ chooseThread - \ \rv s. no_orphans s \" + \\_. no_orphans\" (is "\?PRE\ _ \_\") unfolding chooseThread_def Let_def supply if_split[split del] apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. ?PRE s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. ?PRE s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (intro bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp[where Q'="\rv s. ?PRE s \ ksReadyQueues_asrt s \ ready_qs_runnable s + \ rv = ksCurDomain s"]) + apply (rule_tac Q'="\rv s. ?PRE s \ ksReadyQueues_asrt s \ ready_qs_runnable s + \ curdom = ksCurDomain s \ rv = ksReadyQueuesL1Bitmap s curdom" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) - apply (simp, wp (once), simp) + apply (simp, wp, simp) (* we have a thread to switch to *) - apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv ThreadDecls_H_switchToThread_no_orphans) - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def - valid_queues_def st_tcb_at'_def) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def st_tcb_at'_def) apply (fastforce dest!: lookupBitmapPriority_obj_at' elim: obj_at'_weaken simp: all_active_tcb_ptrs_def) apply (wpsimp simp: bitmap_fun_defs) @@ -805,42 +704,6 @@ lemma chooseThread_no_orphans [wp]: apply (wpsimp simp: curDomain_def simp: invs_no_cicd_ksCurDomain_maxDomain')+ done -lemma valid_queues'_ko_atD: - "valid_queues' s \ ko_at' tcb t s \ tcbQueued tcb - \ t \ set (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" - apply (simp add: valid_queues'_def) - apply (elim allE, erule mp) - apply normalise_obj_at' - apply (simp add: inQ_def) - done - -lemma tcbSchedAppend_in_ksQ: - "\valid_queues' and tcb_at' t\ tcbSchedAppend t - \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" - apply (rule_tac Q="\s. \d p. valid_queues' s \ - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_pre_imp) - apply (clarsimp simp: tcb_at'_has_tcbPriority tcb_at'_has_tcbDomain) - apply (rule hoare_vcg_ex_lift)+ - apply (simp add: tcbSchedAppend_def unless_def) - apply wpsimp - apply (rule_tac Q="\rv s. tdom = d \ rv = p \ obj_at' (\tcb. tcbPriority tcb = p) t s - \ obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_post_imp, clarsimp) - apply (wp, (wp threadGet_const)+) - apply (rule_tac Q="\rv s. - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s \ - obj_at' (\tcb. tcbQueued tcb = rv) t s \ - (rv \ t \ set (ksReadyQueues s (d, p)))" in hoare_post_imp) - apply (clarsimp simp: o_def elim!: obj_at'_weakenE) - apply (wp threadGet_obj_at' hoare_vcg_imp_lift threadGet_const) - apply clarsimp - apply normalise_obj_at' - apply (drule(1) valid_queues'_ko_atD, simp+) - done - lemma hoare_neg_imps: "\P\ f \\ rv s. \ R rv s\ \ \P\ f \\r s. R r s \ Q r s\" by (auto simp: valid_def) @@ -864,7 +727,7 @@ lemma ThreadDecls_H_switchToThread_ct [wp]: crunch no_orphans [wp]: nextDomain no_orphans (wp: no_orphans_lift simp: Let_def) -crunch ksQ [wp]: nextDomain "\s. P (ksReadyQueues s p)" +crunch tcbQueued[wp]: nextDomain "\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)" (simp: Let_def) crunch st_tcb_at' [wp]: nextDomain "\s. P (st_tcb_at' P' p s)" @@ -876,41 +739,6 @@ crunch ct' [wp]: nextDomain "\s. P (ksCurThread s)" crunch sch_act_not [wp]: nextDomain "sch_act_not t" (simp: Let_def) -lemma tcbSchedEnqueue_in_ksQ: - "\valid_queues' and tcb_at' t\ tcbSchedEnqueue t - \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" - apply (rule_tac Q="\s. \d p. valid_queues' s \ - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_pre_imp) - apply (clarsimp simp: tcb_at'_has_tcbPriority tcb_at'_has_tcbDomain) - apply (rule hoare_vcg_ex_lift)+ - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wpsimp simp: if_apply_def2) - apply (rule_tac Q="\rv s. tdom = d \ rv = p \ obj_at' (\tcb. tcbPriority tcb = p) t s - \ obj_at' (\tcb. tcbDomain tcb = d) t s" - in hoare_post_imp, clarsimp) - apply (wp, (wp threadGet_const)+) - apply (rule_tac Q="\rv s. - obj_at' (\tcb. tcbPriority tcb = p) t s \ - obj_at' (\tcb. tcbDomain tcb = d) t s \ - obj_at' (\tcb. tcbQueued tcb = rv) t s \ - (rv \ t \ set (ksReadyQueues s (d, p)))" in hoare_post_imp) - apply (clarsimp simp: o_def elim!: obj_at'_weakenE) - apply (wp threadGet_obj_at' hoare_vcg_imp_lift threadGet_const) - apply clarsimp - apply normalise_obj_at' - apply (frule(1) valid_queues'_ko_atD, simp+) - done - -lemma tcbSchedEnqueue_in_ksQ': - "\valid_queues' and tcb_at' t and K (t = t')\ - tcbSchedEnqueue t' - \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" - apply (rule hoare_gen_asm) - apply (wp tcbSchedEnqueue_in_ksQ | clarsimp)+ - done - lemma all_invs_but_ct_idle_or_in_cur_domain'_strg: "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" by (clarsimp simp: invs'_to_invs_no_cicd'_def) @@ -919,67 +747,6 @@ lemma setSchedulerAction_cnt_sch_act_not[wp]: "\ \ \ setSchedulerAction ChooseNewThread \\rv s. sch_act_not x s\" by (rule hoare_pre, rule hoare_strengthen_post[OF setSchedulerAction_direct]) auto -lemma tcbSchedEnqueue_in_ksQ_aqtp[wp]: - "\valid_queues' and tcb_at' t\ tcbSchedEnqueue t - \\r s. t \ all_queued_tcb_ptrs s\" - apply (clarsimp simp: all_queued_tcb_ptrs_def) - apply (rule tcbSchedEnqueue_in_ksQ) - done - -lemma tcbSchedEnqueue_in_ksQ_already_queued: - "\\s. valid_queues' s \ tcb_at' t s \ - (\domain priority. t' \ set (ksReadyQueues s (domain, priority))) \ - tcbSchedEnqueue t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedEnqueue_in_ksQ) - apply (wpsimp simp: tcbSchedEnqueue_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - -lemma tcbSchedAppend_in_ksQ_already_queued: - "\\s. valid_queues' s \ tcb_at' t s \ - (\domain priority. t' \ set (ksReadyQueues s (domain, priority))) \ - tcbSchedAppend t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedAppend_in_ksQ) - apply (wpsimp simp: tcbSchedAppend_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - -lemma tcbSchedEnqueue_in_ksQ'': - "\\s. valid_queues' s \ tcb_at' t s \ - (t' \ t \ (\domain priority. t' \ set (ksReadyQueues s (domain, priority)))) \ - tcbSchedEnqueue t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedEnqueue_in_ksQ) - apply clarsimp - apply (wpsimp simp: tcbSchedEnqueue_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - -lemma tcbSchedAppend_in_ksQ'': - "\\s. valid_queues' s \ tcb_at' t s \ - (t' \ t \ (\domain priority. t' \ set (ksReadyQueues s (domain, priority)))) \ - tcbSchedAppend t - \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" - apply (case_tac "t'=t", wpsimp wp: tcbSchedAppend_in_ksQ) - apply clarsimp - apply (wpsimp simp: tcbSchedAppend_def unless_def) - apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" - in hoare_post_imp) - apply metis - apply wpsimp+ - done - crunches setSchedulerAction for pred_tcb_at': "\s. P (pred_tcb_at' proj Q t s)" and ct': "\s. P (ksCurThread s)" @@ -998,12 +765,6 @@ lemma ct_active_st_tcb_at': apply (case_tac st, auto) done -lemma tcbSchedEnqueue_in_ksQ_already_queued_aqtp: - "\\s. valid_queues' s \ tcb_at' t s \ - t' \ all_queued_tcb_ptrs s \ tcbSchedEnqueue t - \\r s. t' \ all_queued_tcb_ptrs s \" - by (clarsimp simp: all_queued_tcb_ptrs_def tcbSchedEnqueue_in_ksQ_already_queued) - (* FIXME move *) lemma invs_switchToThread_runnable': "\ invs' s ; ksSchedulerAction s = SwitchToThread t \ \ st_tcb_at' runnable' t s" @@ -1036,17 +797,16 @@ lemma chooseThread_nosch: done lemma scheduleChooseNewThread_no_orphans: - "\ invs' and no_orphans - and (\s. ksSchedulerAction s = ChooseNewThread - \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p))))) \ + "\invs' and no_orphans + and (\s. ksSchedulerAction s = ChooseNewThread + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s))\ scheduleChooseNewThread - \\_. no_orphans \" + \\_. no_orphans\" unfolding scheduleChooseNewThread_def apply (wp add: ssa_no_orphans hoare_vcg_all_lift) apply (wp hoare_disjI1 chooseThread_nosch)+ apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift - hoare_lift_Pf2 [OF ksQ_all_queued_tcb_ptrs_lift[OF nextDomain_ksQ] + hoare_lift_Pf2 [OF tcbQueued_all_queued_tcb_ptrs_lift[OF nextDomain_tcbQueued] nextDomain_ct'] hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift[OF nextDomain_st_tcb_at'] nextDomain_ct'] @@ -1055,85 +815,73 @@ lemma scheduleChooseNewThread_no_orphans: is_active_tcb_ptr_runnable')+ done +lemma setSchedulerAction_tcbQueued[wp]: + "setSchedulerAction sa \\s. Q (obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr s)\" + by wpsimp + lemma schedule_no_orphans[wp]: notes ssa_wp[wp del] shows - "\ \s. no_orphans s \ invs' s \ - schedule - \ \rv s. no_orphans s \" + "\no_orphans and invs'\ schedule \\_. no_orphans\" proof - have do_switch_to: "\candidate. \\s. no_orphans s \ ksSchedulerAction s = SwitchToThread candidate \ st_tcb_at' runnable' candidate s - \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p)))) \ - do ThreadDecls_H.switchToThread candidate; - setSchedulerAction ResumeCurrentThread - od - \\rv. no_orphans\" + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s) \ + do ThreadDecls_H.switchToThread candidate; + setSchedulerAction ResumeCurrentThread + od + \\_. no_orphans\" apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans hoare_vcg_all_lift ThreadDecls_H_switchToThread_no_orphans)+ apply (rule_tac Q="\_ s. (t = candidate \ ksCurThread s = candidate) \ (t \ candidate \ sch_act_not t s)" in hoare_post_imp) - apply (wpsimp wp: stt_nosch static_imp_wp)+ + apply (wpsimp wp: stt_nosch hoare_weak_lift_imp)+ apply (fastforce dest!: in_all_active_tcb_ptrsD simp: all_queued_tcb_ptrs_def comp_def) done have abort_switch_to_enq: "\candidate. - \\s. no_orphans s \ invs' s \ valid_queues' s + \\s. no_orphans s \ invs' s \ ksSchedulerAction s = SwitchToThread candidate - \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p)))) \ - do tcbSchedEnqueue candidate; - setSchedulerAction ChooseNewThread; - scheduleChooseNewThread - od - \\rv. no_orphans\" - apply (rule hoare_pre) - apply (wp scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + \ (st_tcb_at' runnable' (ksCurThread s) s \ ksCurThread s \ all_queued_tcb_ptrs s) \ + do tcbSchedEnqueue candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\_. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift - simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def - | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_ksQ])+ - apply (wp tcbSchedEnqueue_in_ksQ' tcbSchedEnqueue_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift) - apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_pred_tcb_at'] - hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_in_ksQ_already_queued] - tcbSchedEnqueue_no_orphans - | strengthen not_pred_tcb_at'_strengthen - | wp (once) hoare_vcg_imp_lift')+ - apply (clarsimp) - apply (frule invs_sch_act_wf', clarsimp simp: pred_tcb_at') - apply (simp add: st_tcb_at_neg' tcb_at_invs') + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_tcbQueued])+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift + | strengthen not_pred_tcb_at'_strengthen + | rule hoare_lift_Pf2[where f=ksCurThread])+ + apply (simp add: st_tcb_at_neg' tcb_at_invs' all_queued_tcb_ptrs_def) done have abort_switch_to_app: "\candidate. - \\s. no_orphans s \ invs' s \ valid_queues' s + \\s. no_orphans s \ invs' s \ ksSchedulerAction s = SwitchToThread candidate \ (st_tcb_at' runnable' (ksCurThread s) s - \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p))) ) \ - do tcbSchedAppend candidate; - setSchedulerAction ChooseNewThread; - scheduleChooseNewThread - od - \\rv. no_orphans\" - apply (rule hoare_pre) - apply (wp scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + \ ksCurThread s \ all_queued_tcb_ptrs s ) \ + do tcbSchedAppend candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\_. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift - simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def - | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_ksQ])+ - apply (wp tcbSchedAppend_in_ksQ'' tcbSchedAppend_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift) - apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedAppend_pred_tcb_at'] - hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedAppend_in_ksQ_already_queued] - tcbSchedAppend_no_orphans - | strengthen not_pred_tcb_at'_strengthen - | wp (once) hoare_vcg_imp_lift')+ - apply (clarsimp) - apply (frule invs_sch_act_wf', clarsimp simp: pred_tcb_at') - apply (simp add: st_tcb_at_neg' tcb_at_invs') + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_tcbQueued])+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift + | strengthen not_pred_tcb_at'_strengthen + | rule hoare_lift_Pf2[where f=ksCurThread])+ + apply (simp add: st_tcb_at_neg' tcb_at_invs' all_queued_tcb_ptrs_def) done show ?thesis @@ -1147,24 +895,20 @@ proof - apply (wp ssa_no_orphans hoare_vcg_all_lift) apply (wp hoare_disjI1 chooseThread_nosch) apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift - hoare_lift_Pf2 [OF ksQ_all_queued_tcb_ptrs_lift - [OF nextDomain_ksQ] - nextDomain_ct'] + hoare_lift_Pf2 [OF tcbQueued_all_queued_tcb_ptrs_lift + [OF nextDomain_tcbQueued] + nextDomain_ct'] hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift [OF nextDomain_st_tcb_at'] nextDomain_ct'] hoare_vcg_all_lift getDomainTime_wp)[2] - apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_in_ksQ' - hoare_drop_imp - | clarsimp simp: all_queued_tcb_ptrs_def - | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg - | wps tcbSchedEnqueue_ct')+)[1] - apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_in_ksQ' + apply wpsimp + apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_all_queued_tcb_ptrs' hoare_drop_imp - | clarsimp simp: all_queued_tcb_ptrs_def - | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg - | wps tcbSchedEnqueue_ct')+)[1] - apply wp[1] + | clarsimp simp: all_queued_tcb_ptrs_def + | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg + | wps)+)[1] + apply wpsimp \ \action = SwitchToThread candidate\ apply (clarsimp) apply (rename_tac candidate) @@ -1173,14 +917,11 @@ proof - apply (wp hoare_drop_imps) apply (wp add: tcbSchedEnqueue_no_orphans)+ apply (clarsimp simp: conj_comms cong: conj_cong imp_cong split del: if_split) - apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_pred_tcb_at'] - hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_in_ksQ'] - hoare_vcg_imp_lift' + apply (wp hoare_vcg_imp_lift' | strengthen not_pred_tcb_at'_strengthen)+ - apply (clarsimp simp: comp_def) - apply (frule invs_queues) - apply (clarsimp simp: invs_valid_queues' tcb_at_invs' st_tcb_at_neg' is_active_tcb_ptr_runnable') - apply (fastforce simp: all_invs_but_ct_idle_or_in_cur_domain'_strg invs_switchToThread_runnable') + apply (wps | wpsimp wp: tcbSchedEnqueue_all_queued_tcb_ptrs')+ + apply (fastforce simp: is_active_tcb_ptr_runnable' all_invs_but_ct_idle_or_in_cur_domain'_strg + invs_switchToThread_runnable') done qed @@ -1201,47 +942,42 @@ crunch no_orphans [wp]: completeSignal "no_orphans" (simp: crunch_simps wp: crunch_wps) lemma possibleSwitchTo_almost_no_orphans [wp]: - "\ \s. almost_no_orphans target s \ valid_queues' s \ st_tcb_at' runnable' target s - \ weak_sch_act_wf (ksSchedulerAction s) s \ + "\\s. almost_no_orphans target s \ st_tcb_at' runnable' target s + \ weak_sch_act_wf (ksSchedulerAction s) s\ possibleSwitchTo target - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding possibleSwitchTo_def - by (wp rescheduleRequired_valid_queues'_weak tcbSchedEnqueue_almost_no_orphans - ssa_almost_no_orphans static_imp_wp + by (wp tcbSchedEnqueue_almost_no_orphans + ssa_almost_no_orphans hoare_weak_lift_imp | wpc | clarsimp | wp (once) hoare_drop_imp)+ lemma possibleSwitchTo_almost_no_orphans': - "\ \s. almost_no_orphans target s \ valid_queues' s \ st_tcb_at' runnable' target s - \ sch_act_wf (ksSchedulerAction s) s \ + "\\s. almost_no_orphans target s \ st_tcb_at' runnable' target s + \ sch_act_wf (ksSchedulerAction s) s \ possibleSwitchTo target - \ \rv s. no_orphans s \" + \\_. no_orphans\" by wp (strengthen sch_act_wf_weak, assumption) +crunches tcbQueueAppend, tcbQueuePrepend + for almost_no_orphans[wp]: "almost_no_orphans tcbPtr" + lemma tcbSchedAppend_almost_no_orphans: - "\ \s. almost_no_orphans thread s \ valid_queues' s \ + "\almost_no_orphans thread\ tcbSchedAppend thread - \ \_ s. no_orphans s \" + \\_. no_orphans\" unfolding tcbSchedAppend_def - apply (wp setQueue_almost_no_orphans_enq[where tcb_ptr=thread] threadSet_no_orphans - | clarsimp simp: unless_def | simp only: subset_insertI)+ - apply (unfold threadGet_def) - apply (wp getObject_tcb_wp | clarsimp)+ - apply (drule obj_at_ko_at', clarsimp) - apply (rule_tac x=ko in exI) - apply (clarsimp simp: almost_no_orphans_def no_orphans_def) - apply (drule queued_in_queue | simp)+ - apply (auto simp: all_queued_tcb_ptrs_def) + apply (wpsimp wp: tcbQueued_update_True_no_orphans threadGet_wp) + apply (fastforce simp: almost_no_orphans_def no_orphans_def all_queued_tcb_ptrs_def obj_at'_def) done lemma no_orphans_is_almost[simp]: "no_orphans s \ almost_no_orphans t s" by (clarsimp simp: no_orphans_def almost_no_orphans_def) -crunch no_orphans [wp]: decDomainTime no_orphans -(wp: no_orphans_lift) - -crunch valid_queues' [wp]: decDomainTime valid_queues' +crunches decDomainTime + for no_orphans [wp]: no_orphans + (wp: no_orphans_lift) lemma timerTick_no_orphans [wp]: "\ \s. no_orphans s \ invs' s \ @@ -1250,28 +986,18 @@ lemma timerTick_no_orphans [wp]: unfolding timerTick_def getDomainTime_def supply if_split[split del] apply (subst threadState_case_if) - apply (wpsimp wp: threadSet_no_orphans threadSet_valid_queues' - threadSet_valid_queues' tcbSchedAppend_almost_no_orphans threadSet_sch_act + apply (wpsimp wp: threadSet_no_orphans tcbSchedAppend_almost_no_orphans threadSet_almost_no_orphans threadSet_no_orphans tcbSchedAppend_sch_act_wf hoare_drop_imp simp: if_apply_def2 | strengthen sch_act_wf_weak)+ - apply (rule_tac Q="\rv s. no_orphans s \ valid_queues' s \ tcb_at' thread s - \ sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) - apply (clarsimp simp: inQ_def) - apply (wp hoare_drop_imps | clarsimp)+ - apply (auto split: if_split) done - lemma handleDoubleFault_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - handleDoubleFault tptr ex1 ex2 - \ \rv s. no_orphans s \" + "\no_orphans\ handleDoubleFault tptr ex1 ex2 \\_. no_orphans \" unfolding handleDoubleFault_def - apply (wp setThreadState_not_active_no_orphans - | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ - done + by (wpsimp wp: setThreadState_not_active_no_orphans + simp: is_active_thread_state_def isRestart_def isRunning_def)+ crunch st_tcb' [wp]: getThreadCallerSlot "st_tcb_at' (\st. P st) t" @@ -1287,7 +1013,7 @@ lemma setupCallerCap_no_orphans [wp]: setupCallerCap sender receiver gr \ \rv s. no_orphans s \" unfolding setupCallerCap_def - apply (wp setThreadState_not_active_no_orphans + apply (wp setThreadState_not_active_no_orphans hoare_drop_imps | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ done @@ -1303,86 +1029,54 @@ lemma setupCallerCap_almost_no_orphans [wp]: setupCallerCap sender receiver gr \ \rv s. almost_no_orphans tcb_ptr s \" unfolding setupCallerCap_def - apply (wp setThreadState_not_active_almost_no_orphans + apply (wp setThreadState_not_active_almost_no_orphans hoare_drop_imps | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ done -crunches doIPCTransfer, setMRs - for no_orphans [wp]: "no_orphans" - (wp: no_orphans_lift) - -crunch ksQ'[wp]: setEndpoint "\s. P (ksReadyQueues s)" - (wp: setObject_queues_unchanged_tcb updateObject_default_inv) +crunches cteInsert, setExtraBadge, setMessageInfo, transferCaps, copyMRs, + doNormalTransfer, doFaultTransfer, copyGlobalMappings + for tcbQueued[wp]: "obj_at' (\tcb. P (tcbQueued tcb)) tcb_ptr" + (wp: crunch_wps simp: crunch_simps) -crunch no_orphans [wp]: setEndpoint "no_orphans" - (wp: no_orphans_lift) +crunches doIPCTransfer, setMRs, setEndpoint + for ksReadyQueues [wp]: "\s. P (ksReadyQueues s)" + and no_orphans [wp]: "no_orphans" + (wp: no_orphans_lift updateObject_default_inv) lemma sendIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ sendIPC blocking call badge canGrant canGrantReply thread epptr - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding sendIPC_def apply (wp hoare_drop_imps setThreadState_not_active_no_orphans sts_st_tcb' possibleSwitchTo_almost_no_orphans' | wpc | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ - - apply (rule_tac Q="\rv. no_orphans and valid_queues' and valid_objs' and ko_at' rv epptr + apply (rule_tac Q="\rv. no_orphans and valid_objs' and ko_at' rv epptr and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) apply (fastforce simp: valid_objs'_def valid_obj'_def valid_ep'_def obj_at'_def) apply (wp get_ep_sp' | clarsimp)+ done lemma sendFaultIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ sendFaultIPC tptr fault - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding sendFaultIPC_def apply (rule hoare_pre) - apply (wp threadSet_valid_queues' threadSet_no_orphans threadSet_valid_objs' + apply (wp threadSet_no_orphans threadSet_valid_objs' threadSet_sch_act | wpc | clarsimp)+ - apply (rule_tac Q'="\handlerCap s. no_orphans s \ valid_queues' s - \ valid_objs' s - \ sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp_R) + apply (rule_tac Q'="\_ s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s" + in hoare_strengthen_postE_R) apply (wp | clarsimp simp: inQ_def valid_tcb'_def tcb_cte_cases_def)+ done -lemma sendIPC_valid_queues' [wp]: - "\ \s. valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ - sendIPC blocking call badge canGrant canGrantReply thread epptr - \ \rv s. valid_queues' s \" - unfolding sendIPC_def - apply (wpsimp wp: hoare_drop_imps) - apply (wpsimp | wp (once) sts_st_tcb')+ - apply (rule_tac Q="\rv. valid_queues' and valid_objs' and ko_at' rv epptr - and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp) - apply (wp get_ep_sp' | clarsimp)+ - done - -lemma sendFaultIPC_valid_queues' [wp]: - "\ \s. valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ - sendFaultIPC tptr fault - \ \rv s. valid_queues' s \" - unfolding sendFaultIPC_def - apply (rule hoare_pre) - apply (wp threadSet_valid_queues' threadSet_valid_objs' threadSet_sch_act - | wpc | clarsimp)+ - apply (rule_tac Q'="\handlerCap s. valid_queues' s \ valid_objs' s - \ sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp_R) - apply (wp | clarsimp simp: inQ_def valid_tcb'_def tcb_cte_cases_def)+ - done - -lemma handleFault_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ +lemma handleFault_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ handleFault tptr ex1 - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding handleFault_def - apply (rule hoare_pre) - apply (wp | clarsimp)+ - done + by wpsimp lemma replyFromKernel_no_orphans [wp]: "\ \s. no_orphans s \ @@ -1394,32 +1088,24 @@ lemma replyFromKernel_no_orphans [wp]: crunch inv [wp]: alignError "P" -lemma createObjects_no_orphans [wp]: - "\ \s. no_orphans s \ pspace_aligned' s \ pspace_no_overlap' ptr sz s \ pspace_distinct' s - \ n \ 0 \ range_cover ptr sz (objBitsKO val + gbits) n - \ \ case_option False (is_active_thread_state \ tcbState) (projectKO_opt val) \ +lemma createObjects_no_orphans[wp]: + "\\s. no_orphans s \ pspace_aligned' s \ pspace_no_overlap' ptr sz s \ pspace_distinct' s + \ n \ 0 \ range_cover ptr sz (objBitsKO val + gbits) n + \ \ case_option False (is_active_thread_state \ tcbState) (projectKO_opt val) + \ \ case_option False tcbQueued (projectKO_opt val)\ createObjects ptr n val gbits - \ \rv s. no_orphans s \" + \\_ s. no_orphans s\" apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def is_active_tcb_ptr_def all_queued_tcb_ptrs_def) apply (simp only: imp_conv_disj pred_tcb_at'_def createObjects_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_obj_at2') - apply clarsimp - apply (erule(1) impE) - apply clarsimp - apply (drule_tac x = x in spec) - apply (erule impE) - apply (clarsimp simp: obj_at'_def split: option.splits) - apply simp + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_obj_at2'[where sz=sz]) + apply (clarsimp simp: comp_def split: option.splits) done -lemma copyGlobalMappings_no_orphans [wp]: - "\ \s. no_orphans s \ - copyGlobalMappings newPD - \ \rv s. no_orphans s \" +lemma copyGlobalMappings_no_orphans[wp]: + "copyGlobalMappings newPD \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) crunch no_orphans [wp]: insertNewCap "no_orphans" (wp: hoare_drop_imps) @@ -1565,43 +1251,45 @@ lemma mapM_x_match: by assumption lemma cancelAllIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ - cancelAllIPC epptr - \ \rv s. no_orphans s \" + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ + cancelAllIPC epptr + \\_. no_orphans\" unfolding cancelAllIPC_def apply (wp sts_valid_objs' set_ep_valid_objs' sts_st_tcb' hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans | wpc | rule mapM_x_match, rename_tac list, - rule_tac V="\_. valid_queues' and valid_objs'" + rule_tac V="\_. valid_objs' and pspace_aligned' and pspace_distinct'" and I="no_orphans and (\s. \t\set list. tcb_at' t s)" in mapM_x_inv_wp2 | clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. no_orphans and valid_objs' and valid_queues' and ko_at' rv epptr" + apply (rule_tac Q="\rv. no_orphans and valid_objs' and pspace_aligned' and pspace_distinct' and + ko_at' rv epptr" in hoare_post_imp) apply (fastforce simp: valid_obj'_def valid_ep'_def obj_at'_def) apply (wp get_ep_sp' | clarsimp)+ done lemma cancelAllSignals_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + "\\s. no_orphans s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s\ cancelAllSignals ntfn - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding cancelAllSignals_def apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans | wpc | clarsimp simp: valid_tcb_state'_def)+ apply (rename_tac list) - apply (rule_tac V="\_. valid_queues' and valid_objs'" + apply (rule_tac V="\_. valid_objs' and pspace_aligned' and pspace_distinct'" and I="no_orphans and (\s. \t\set list. tcb_at' t s)" in mapM_x_inv_wp2) apply simp apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans| clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. no_orphans and valid_objs' and valid_queues' and ko_at' rv ntfn" + apply (rule_tac Q="\rv. no_orphans and valid_objs' and pspace_aligned' and pspace_distinct' and + ko_at' rv ntfn" in hoare_post_imp) apply (fastforce simp: valid_obj'_def valid_ntfn'_def obj_at'_def) apply (wp get_ntfn_sp' | clarsimp)+ @@ -1614,9 +1302,9 @@ lemma unbindNotification_no_orphans[wp]: unbindNotification t \ \rv s. no_orphans s\" unfolding unbindNotification_def - apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (rule bind_wp[OF _ gbn_sp']) apply (case_tac ntfnPtr, simp_all, wp, simp) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (wp | simp)+ done @@ -1627,43 +1315,36 @@ lemma unbindMaybeNotification_no_orphans[wp]: unfolding unbindMaybeNotification_def by (wp getNotification_wp | simp | wpc)+ -lemma finaliseCapTrue_standin_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ - finaliseCapTrue_standin cap final - \ \rv s. no_orphans s \" +lemma finaliseCapTrue_standin_no_orphans[wp]: + "\no_orphans and valid_objs' and pspace_aligned' and pspace_distinct'\ + finaliseCapTrue_standin cap final + \\_. no_orphans\" unfolding finaliseCapTrue_standin_def - apply (rule hoare_pre) - apply (wp | clarsimp simp: Let_def | wpc)+ - done + by (wpsimp | clarsimp simp: Let_def | wpc)+ -lemma cteDeleteOne_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ +lemma cteDeleteOne_no_orphans[wp]: + "\no_orphans and valid_objs' and pspace_aligned' and pspace_distinct'\ cteDeleteOne slot - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding cteDeleteOne_def - apply (wp assert_inv isFinalCapability_inv weak_if_wp | clarsimp simp: unless_def)+ - done + by (wp assert_inv isFinalCapability_inv weak_if_wp | clarsimp simp: unless_def)+ crunch valid_objs' [wp]: getThreadReplySlot "valid_objs'" -lemma cancelSignal_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ - cancelSignal t ntfn - \ \rv s. no_orphans s \" +lemma cancelSignal_no_orphans[wp]: + "cancelSignal t ntfn \no_orphans\" unfolding cancelSignal_def Let_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps setThreadState_not_active_no_orphans | wpc - | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ - done + by (wpsimp wp: hoare_drop_imps setThreadState_not_active_no_orphans + simp: is_active_thread_state_def isRestart_def isRunning_def) lemma cancelIPC_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + "\no_orphans and valid_objs' and pspace_aligned' and pspace_distinct'\ cancelIPC t \ \rv s. no_orphans s \" unfolding cancelIPC_def Let_def apply (rule hoare_pre) apply (wp setThreadState_not_active_no_orphans hoare_drop_imps weak_if_wp - threadSet_valid_queues' threadSet_valid_objs' threadSet_no_orphans | wpc + threadSet_valid_objs' threadSet_no_orphans | wpc | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def inQ_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def)+ done @@ -1672,30 +1353,26 @@ lemma cancelIPC_no_orphans [wp]: lemma asUser_almost_no_orphans: "\almost_no_orphans t\ asUser a f \\_. almost_no_orphans t\" unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) -lemma sendSignal_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ +lemma sendSignal_no_orphans[wp]: + "\\s. no_orphans s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s + \ pspace_aligned' s \ pspace_distinct' s\ sendSignal ntfnptr badge - \ \_ s. no_orphans s \" + \\_. no_orphans\" unfolding sendSignal_def - apply (rule hoare_pre) - apply (wp sts_st_tcb' gts_wp' getNotification_wp asUser_almost_no_orphans - cancelIPC_weak_sch_act_wf - | wpc | clarsimp simp: sch_act_wf_weak)+ + apply (wp sts_st_tcb' gts_wp' getNotification_wp asUser_almost_no_orphans + cancelIPC_weak_sch_act_wf + | wpc | clarsimp simp: sch_act_wf_weak)+ done -lemma handleInterrupt_no_orphans [wp]: - "\ \s. no_orphans s \ invs' s \ +lemma handleInterrupt_no_orphans[wp]: + "\no_orphans and invs' and pspace_aligned' and pspace_distinct'\ handleInterrupt irq - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding handleInterrupt_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps hoare_vcg_all_lift getIRQState_inv - | wpc | clarsimp simp: invs'_def valid_state'_def maskIrqSignal_def - handleReservedIRQ_def)+ - done + by (wp hoare_drop_imps hoare_vcg_all_lift getIRQState_inv + | wpc | clarsimp simp: invs'_def valid_state'_def maskIrqSignal_def handleReservedIRQ_def)+ lemma updateRestartPC_no_orphans[wp]: "\ \s. no_orphans s \ invs' s \ @@ -1703,20 +1380,6 @@ lemma updateRestartPC_no_orphans[wp]: \ \rv s. no_orphans s \" by (wpsimp simp: updateRestartPC_def asUser_no_orphans) -lemma updateRestartPC_valid_queues'[wp]: - "\ \s. valid_queues' s \ - updateRestartPC t - \ \rv s. valid_queues' s \" - unfolding updateRestartPC_def - apply (rule asUser_valid_queues') - done - -lemma updateRestartPC_no_orphans_invs'_valid_queues'[wp]: - "\\s. no_orphans s \ invs' s \ valid_queues' s \ - updateRestartPC t - \\rv s. no_orphans s \ valid_queues' s \" - by (wpsimp simp: updateRestartPC_def asUser_no_orphans) - lemma suspend_no_orphans [wp]: "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' t s \ suspend t @@ -1742,13 +1405,10 @@ lemma deleteASIDPool_no_orphans [wp]: apply (wp mapM_wp_inv getObject_inv loadObject_default_inv | clarsimp)+ done -lemma storePTE_no_orphans [wp]: - "\ \s. no_orphans s \ - storePTE ptr val - \ \rv s. no_orphans s \" +lemma storePTE_no_orphans[wp]: + "storePTE ptr val \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) crunch no_orphans [wp]: unmapPage "no_orphans" (wp: crunch_wps) @@ -1757,13 +1417,10 @@ crunches unmapPageTable, prepareThreadDelete for no_orphans [wp]: "no_orphans" (wp: lookupPTSlotFromLevel_inv) -lemma setASIDPool_no_orphans [wp]: - "\ \s. no_orphans s \ - setObject p (ap :: asidpool) - \ \rv s. no_orphans s \" +lemma setASIDPool_no_orphans[wp]: + "setObject p (ap :: asidpool) \no_orphans\" unfolding no_orphans_disj all_queued_tcb_ptrs_def - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_disj_lift) lemma deleteASID_no_orphans [wp]: "\ \s. no_orphans s \ @@ -1786,7 +1443,7 @@ lemma deletingIRQHandler_no_orphans [wp]: deletingIRQHandler irq \ \rv s. no_orphans s \" unfolding deletingIRQHandler_def - apply (wp, auto) + apply (wp hoare_drop_imps, auto) done lemma finaliseCap_no_orphans [wp]: @@ -1820,7 +1477,7 @@ lemma finaliseSlot_no_orphans [wp]: \ \rv s. no_orphans s \" unfolding finaliseSlot_def apply (rule validE_valid, rule hoare_pre, - rule hoare_post_impErr, rule use_spec) + rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where p=slot and slot=slot and Pr=no_orphans]) apply (simp_all add: no_orphans_finalise_prop_stuff) apply (wp | simp)+ @@ -1851,14 +1508,10 @@ lemma cteRevoke_no_orphans [wp]: done lemma cancelBadgedSends_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - cancelBadgedSends epptr badge - \ \rv s. no_orphans s \" + "cancelBadgedSends epptr badge \no_orphans\" unfolding cancelBadgedSends_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps | wpc | clarsimp)+ - apply (wp filterM_preserved tcbSchedEnqueue_almost_no_orphans gts_wp' - sts_st_tcb' hoare_drop_imps | clarsimp)+ + apply (wpsimp wp: filterM_preserved tcbSchedEnqueue_almost_no_orphans gts_wp' sts_st_tcb' + | wp (once) hoare_drop_imps)+ done crunch no_orphans [wp]: handleFaultReply "no_orphans" @@ -1869,25 +1522,16 @@ lemma doReplyTransfer_no_orphans[wp]: \\rv. no_orphans\" unfolding doReplyTransfer_def apply (wp sts_st_tcb' setThreadState_not_active_no_orphans threadSet_no_orphans - threadSet_valid_queues' threadSet_weak_sch_act_wf + threadSet_weak_sch_act_wf | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def | wp (once) hoare_drop_imps - | strengthen sch_act_wf_weak invs_valid_queues')+ + | strengthen sch_act_wf_weak)+ apply (rule_tac Q="\rv. invs' and no_orphans" in hoare_post_imp) apply (fastforce simp: inQ_def) apply (wp hoare_drop_imps | clarsimp)+ apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def) done -lemma cancelSignal_valid_queues' [wp]: - "\ \s. valid_queues' s \ valid_objs' s \ - cancelSignal t ntfn - \ \rv s. valid_queues' s \" - unfolding cancelSignal_def Let_def - apply (rule hoare_pre) - apply (wp hoare_drop_imps | wpc | clarsimp)+ - done - crunch no_orphans [wp]: setupReplyMaster "no_orphans" (wp: crunch_wps simp: crunch_simps) @@ -1899,7 +1543,6 @@ lemma restart_no_orphans [wp]: apply (wp tcbSchedEnqueue_almost_no_orphans sts_st_tcb' cancelIPC_weak_sch_act_wf | clarsimp simp: o_def if_apply_def2 | strengthen no_orphans_strg_almost - | strengthen invs_valid_queues' | wp (once) hoare_drop_imps)+ apply auto done @@ -1913,15 +1556,12 @@ lemma readreg_no_orphans: done lemma writereg_no_orphans: - "\ \s. no_orphans s \ invs' s \ sch_act_simple s - \ tcb_at' dest s \ ex_nonz_cap_to' dest s\ - invokeTCB (tcbinvocation.WriteRegisters dest resume values arch) - \ \rv s. no_orphans s \" + "\\s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' dest s \ ex_nonz_cap_to' dest s\ + invokeTCB (tcbinvocation.WriteRegisters dest resume values arch) + \\_. no_orphans\" unfolding invokeTCB_def performTransfer_def postModifyRegisters_def - apply simp - apply (rule hoare_pre) - by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' static_imp_wp - | strengthen invs_valid_queues' | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap )+ + by (wpsimp wp: hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' hoare_weak_lift_imp + | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap )+ lemma copyreg_no_orphans: "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' src s @@ -1930,10 +1570,10 @@ lemma copyreg_no_orphans: \ \rv s. no_orphans s \" unfolding invokeTCB_def performTransfer_def postModifyRegisters_def apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) - apply (wp static_imp_wp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' - restart_no_orphans asUser_no_orphans suspend_nonz_cap_to_tcb - | strengthen invs_valid_queues' | wpc | simp add: if_apply_def2)+ + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) + apply (wp hoare_weak_lift_imp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' + restart_no_orphans asUser_no_orphans suspend_nonz_cap_to_tcb + | wpc | simp add: if_apply_def2)+ apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) done @@ -1943,7 +1583,7 @@ lemma settlsbase_no_orphans: \ \rv s. no_orphans s \" unfolding invokeTCB_def performTransfer_def apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) apply (wpsimp wp: hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans)+ done @@ -1955,22 +1595,19 @@ lemma almost_no_orphans_no_orphans': "\ almost_no_orphans t s; ksCurThread s = t\ \ no_orphans s" by (auto simp: almost_no_orphans_def no_orphans_def all_active_tcb_ptrs_def) -lemma setPriority_no_orphans [wp]: - "\ \s. no_orphans s \ invs' s \ tcb_at' tptr s \ +lemma setPriority_no_orphans[wp]: + "\no_orphans and invs' and tcb_at' tptr\ setPriority tptr prio - \ \rv s. no_orphans s \" + \\_. no_orphans\" unfolding setPriority_def apply wpsimp - apply (rule_tac Q="\rv s. almost_no_orphans tptr s \ valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) + apply (rule_tac Q="\_ s. almost_no_orphans tptr s \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) apply clarsimp apply (clarsimp simp: is_active_tcb_ptr_runnable' pred_tcb_at'_def obj_at'_def almost_no_orphans_no_orphans elim!: almost_no_orphans_no_orphans') - apply (wp threadSet_almost_no_orphans threadSet_valid_queues' | clarsimp simp: inQ_def)+ + apply (wp threadSet_almost_no_orphans | clarsimp simp: inQ_def)+ apply (wpsimp wp: threadSet_weak_sch_act_wf) apply (wp tcbSchedDequeue_almost_no_orphans| clarsimp)+ - apply (rule_tac Q="\rv. obj_at' (Not \ tcbQueued) tptr and invs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp simp: obj_at'_def inQ_def) - apply (wp tcbSchedDequeue_not_queued | clarsimp)+ done lemma setMCPriority_no_orphans[wp]: @@ -2009,20 +1646,19 @@ lemma tc_no_orphans: apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[2] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (cases mcp; clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[3] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[3] apply ((simp only: simp_thms cong: conj_cong | wp cteDelete_deletes cteDelete_invs' cteDelete_sch_act_simple case_option_wp[where m'="return ()", OF setPriority_no_orphans return_inv,simplified] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] checkCap_inv[where P=no_orphans] checkCap_inv[where P="tcb_at' a"] - threadSet_cte_wp_at' hoare_vcg_all_lift_R hoare_vcg_all_lift threadSet_no_orphans - hoare_vcg_const_imp_lift_R static_imp_wp hoare_drop_imp threadSet_ipcbuffer_invs - | strengthen invs_valid_queues' + threadSet_cte_wp_at' hoare_vcg_all_liftE_R hoare_vcg_all_lift threadSet_no_orphans + hoare_vcg_const_imp_lift_R hoare_weak_lift_imp hoare_drop_imp threadSet_ipcbuffer_invs | (simp add: locateSlotTCB_def locateSlotBasic_def objBits_def objBitsKO_def tcbIPCBufferSlot_def tcb_cte_cases_def, wp hoare_return_sp) @@ -2053,13 +1689,12 @@ lemma invokeTCB_no_orphans [wp]: done lemma invokeCNode_no_orphans [wp]: - "\ \s. no_orphans s \ invs' s \ valid_cnode_inv' cinv s \ sch_act_simple s \ + "\no_orphans and invs' and valid_cnode_inv' cinv and sch_act_simple\ invokeCNode cinv - \ \rv. no_orphans \" + \\_. no_orphans\" unfolding invokeCNode_def apply (rule hoare_pre) - apply (wp hoare_drop_imps hoare_unless_wp | wpc | clarsimp split del: if_split)+ - apply (simp add: invs_valid_queues') + apply (wp hoare_drop_imps unless_wp | wpc | clarsimp split del: if_split)+ done lemma invokeIRQControl_no_orphans [wp]: @@ -2096,7 +1731,7 @@ lemma performPageInvocation_no_orphans [wp]: apply (simp add: performPageInvocation_def cong: page_invocation.case_cong) apply (rule hoare_pre) - apply (wp mapM_x_wp' mapM_wp' static_imp_wp | wpc | clarsimp)+ + apply (wp mapM_x_wp' mapM_wp' hoare_weak_lift_imp | wpc | clarsimp)+ done lemma performASIDControlInvocation_no_orphans [wp]: @@ -2150,17 +1785,17 @@ lemma performASIDControlInvocation_no_orphans [wp]: \\reply. no_orphans\" apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) - apply (wp static_imp_wp | clarsimp)+ + apply (wp hoare_weak_lift_imp | clarsimp)+ apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def is_active_tcb_ptr_def all_queued_tcb_ptrs_def) apply (wp | clarsimp simp:placeNewObject_def2)+ apply (wp createObjects'_wp_subst)+ - apply (wp static_imp_wp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ + apply (wp hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ apply (strengthen invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace') apply (clarsimp simp:conj_comms) apply (wp deleteObjects_invs'[where idx = idx and d=False] - hoare_ex_wp deleteObjects_cte_wp_at'[where idx = idx and d=False] hoare_vcg_const_imp_lift ) + hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where idx = idx and d=False] hoare_vcg_const_imp_lift ) using invs' misc cte exclude no_orphans cover apply (clarsimp simp: is_active_thread_state_def makeObject_tcb valid_aci'_def cte_wp_at_ctes_of invs_pspace_aligned' invs_pspace_distinct' @@ -2192,17 +1827,15 @@ lemma arch_performInvocation_no_orphans [wp]: done lemma setDomain_no_orphans [wp]: - "\no_orphans and valid_queues and valid_queues' and cur_tcb'\ - setDomain tptr newdom + "\no_orphans and cur_tcb' and tcb_at' tptr\ + setDomain tptr newdom \\_. no_orphans\" apply (simp add: setDomain_def when_def) apply (wp tcbSchedEnqueue_almost_no_orphans hoare_vcg_imp_lift threadSet_almost_no_orphans - threadSet_valid_queues'_no_state threadSet_st_tcb_at2 hoare_vcg_disj_lift + threadSet_st_tcb_at2 hoare_vcg_disj_lift threadSet_no_orphans - | clarsimp simp: st_tcb_at_neg2 not_obj_at')+ - apply (auto simp: tcb_at_typ_at' st_tcb_at_neg' is_active_tcb_ptr_runnable' - cur_tcb'_def obj_at'_def - dest: pred_tcb_at') + | clarsimp simp: st_tcb_at_neg2 not_obj_at')+ + apply (fastforce simp: tcb_at_typ_at' is_active_tcb_ptr_runnable') done lemma performInvocation_no_orphans [wp]: @@ -2229,8 +1862,6 @@ lemma K_bind_hoareE [wp]: "\P\ f \Q\,\E\ \ \P\ K_bind f x \Q\,\E\" by simp -crunch valid_queues' [wp]: replyFromKernel "valid_queues'" - lemma handleInvocation_no_orphans [wp]: "\ \s. no_orphans s \ invs' s \ ct_active' s \ ksSchedulerAction s = ResumeCurrentThread \ @@ -2248,20 +1879,12 @@ lemma handleInvocation_no_orphans [wp]: ct_in_state'_set setThreadState_st_tcb hoare_vcg_all_lift | simp add: split_def split del: if_split)+ - apply (wps setThreadState_ct') - apply (wp sts_ksQ - setThreadState_current_no_orphans sts_invs_minor' - ct_in_state'_set setThreadState_st_tcb - | simp add: split_def split del: if_split)+ apply (clarsimp simp: if_apply_def2) - apply (frule(1) ct_not_ksQ) by (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def invs'_def cur_tcb'_def valid_state'_def valid_idle'_def) lemma receiveSignal_no_orphans [wp]: - "\ \s. no_orphans s \ valid_queues' s \ - receiveSignal thread cap isBlocking - \ \rv s. no_orphans s \" + "receiveSignal thread cap isBlocking \no_orphans\" unfolding receiveSignal_def apply (wp hoare_drop_imps setThreadState_not_active_no_orphans | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def @@ -2279,7 +1902,7 @@ lemma receiveIPC_no_orphans [wp]: hoare_vcg_all_lift sts_st_tcb' | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def - doNBRecvFailedTransfer_def invs_valid_queues' + doNBRecvFailedTransfer_def | strengthen sch_act_wf_weak)+ done @@ -2290,7 +1913,7 @@ lemma deleteCallerCap_no_orphans [wp]: deleteCallerCap receiver \ \rv s. no_orphans s \" unfolding deleteCallerCap_def - by wpsimp auto + by (wpsimp wp: hoare_drop_imps) auto lemma remove_neg_strg: "(A \ B) \ ((x \ A) \ (\ x \ B))" @@ -2303,7 +1926,7 @@ notes if_cong[cong] shows \ \rv . no_orphans \" unfolding handleRecv_def apply (clarsimp simp: whenE_def split del: if_split | wp hoare_drop_imps getNotification_wp | wpc )+ (*takes a while*) - apply (rule_tac Q'="\rv s. no_orphans s \ invs' s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. no_orphans s \ invs' s" in hoare_strengthen_postE_R) apply (wp, fastforce) apply (rule_tac Q="\rv s. no_orphans s \ invs' s" in hoare_post_imp) apply (wp | clarsimp | fastforce)+ @@ -2370,7 +1993,8 @@ theorem callKernel_no_orphans [wp]: callKernel e \ \rv s. no_orphans s \" unfolding callKernel_def - by (wpsimp wp: weak_if_wp schedule_invs' hoare_drop_imps) + by (wpsimp wp: weak_if_wp schedule_invs' hoare_drop_imps + | strengthen invs_pspace_aligned' invs_pspace_distinct')+ end diff --git a/proof/refine/X64/ADT_H.thy b/proof/refine/X64/ADT_H.thy index d4c1ed75ff..d736b254d3 100644 --- a/proof/refine/X64/ADT_H.thy +++ b/proof/refine/X64/ADT_H.thy @@ -243,61 +243,61 @@ lemma apply (clarsimp simp add: absHeapArch_def) apply (rename_tac arch_kernel_object) apply (case_tac arch_kernel_object) - apply (rule_tac x=y in exI) - apply (clarsimp split: asidpool.splits) - using fst_pte - apply (erule_tac x=y in allE) - apply (clarsimp split: if_split_asm) - apply (rule_tac x="(y && ~~ mask pt_bits)" in exI, simp) - apply (simp add: range_composition[symmetric]) - apply (rule_tac x="ucast (y >> 3)" in range_eqI) - apply (simp add: pt_bits_def pageBits_def bit_simps) - apply (simp add: word_size ucast_ucast_mask and_mask_shiftl_comm) - using pspace_aligned' - apply (simp add: pspace_aligned'_def dom_def) - apply (erule_tac x=y in allE) - apply (simp add: objBitsKO_def archObjSize_def is_aligned_neg_mask_eq - and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) - using fst_pde - apply (erule_tac x=y in allE) - apply (clarsimp split: if_split_asm) - apply (rule_tac x="(y && ~~ mask pd_bits)" in exI, simp) - apply (simp add: range_composition[symmetric]) - apply (rule_tac x="ucast (y >> 3)" in range_eqI) - apply (simp add: pt_bits_def pageBits_def bit_simps) - apply (simp add: word_size ucast_ucast_mask and_mask_shiftl_comm) - using pspace_aligned' - apply (simp add: pspace_aligned'_def dom_def) - apply (erule_tac x=y in allE) - apply (simp add: objBitsKO_def archObjSize_def is_aligned_neg_mask_eq - and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) - using fst_pdpte + apply (rule_tac x=y in exI) + apply (clarsimp split: asidpool.splits) + using fst_pte apply (erule_tac x=y in allE) apply (clarsimp split: if_split_asm) - apply (rule_tac x="(y && ~~ mask pdpt_bits)" in exI, simp) + apply (rule_tac x="(y && ~~ mask pt_bits)" in exI, simp) apply (simp add: range_composition[symmetric]) apply (rule_tac x="ucast (y >> 3)" in range_eqI) - apply (simp add: pt_bits_def pageBits_def bit_simps) + apply (simp add: pt_bits_def bit_simps) apply (simp add: word_size ucast_ucast_mask and_mask_shiftl_comm) - using pspace_aligned' + using pspace_aligned' apply (simp add: pspace_aligned'_def dom_def) apply (erule_tac x=y in allE) - apply (simp add: objBitsKO_def archObjSize_def is_aligned_neg_mask_eq - and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) - using fst_pml4e + apply (simp add: objBitsKO_def archObjSize_def + and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) + using fst_pde apply (erule_tac x=y in allE) apply (clarsimp split: if_split_asm) - apply (rule_tac x="(y && ~~ mask pml4_bits)" in exI, simp) + apply (rule_tac x="(y && ~~ mask pd_bits)" in exI, simp) apply (simp add: range_composition[symmetric]) apply (rule_tac x="ucast (y >> 3)" in range_eqI) - apply (simp add: pt_bits_def pageBits_def bit_simps) + apply (simp add: pt_bits_def bit_simps) apply (simp add: word_size ucast_ucast_mask and_mask_shiftl_comm) - using pspace_aligned' + using pspace_aligned' apply (simp add: pspace_aligned'_def dom_def) apply (erule_tac x=y in allE) - apply (simp add: objBitsKO_def archObjSize_def is_aligned_neg_mask_eq - and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) - apply (simp split: option.splits Structures_H.kernel_object.splits) + apply (simp add: objBitsKO_def archObjSize_def + and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) + using fst_pdpte + apply (erule_tac x=y in allE) + apply (clarsimp split: if_split_asm) + apply (rule_tac x="(y && ~~ mask pdpt_bits)" in exI, simp) + apply (simp add: range_composition[symmetric]) + apply (rule_tac x="ucast (y >> 3)" in range_eqI) + apply (simp add: pt_bits_def bit_simps) + apply (simp add: word_size ucast_ucast_mask and_mask_shiftl_comm) + using pspace_aligned' + apply (simp add: pspace_aligned'_def dom_def) + apply (erule_tac x=y in allE) + apply (simp add: objBitsKO_def archObjSize_def + and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) + using fst_pml4e + apply (erule_tac x=y in allE) + apply (clarsimp split: if_split_asm) + apply (rule_tac x="y && ~~ mask pml4_bits" in exI, simp) + apply (simp add: range_composition[symmetric]) + apply (rule_tac x="ucast (y >> 3)" in range_eqI) + apply (simp add: pt_bits_def bit_simps) + apply (simp add: word_size ucast_ucast_mask and_mask_shiftl_comm) + using pspace_aligned' + apply (simp add: pspace_aligned'_def dom_def) + apply (erule_tac x=y in allE) + apply (simp add: objBitsKO_def archObjSize_def + and_not_mask[symmetric] AND_NOT_mask_plus_AND_mask_eq) + apply (simp split: option.splits Structures_H.kernel_object.splits) apply (intro allI) apply (intro impI) apply (elim exE) @@ -305,56 +305,56 @@ lemma apply (simp add: absHeapArch_def) apply (rename_tac arch_kernel_object z a b) apply (case_tac arch_kernel_object) - apply (clarsimp split: asidpool.splits) - apply (simp add: other_obj_relation_def asid_pool_relation_def o_def inv_def) - apply simp - apply (clarsimp simp: pte_relation_def - split: if_split_asm) - using ptes - apply (erule_tac x=x in allE) - apply simp - apply (erule_tac x=y in allE) - apply (clarsimp simp: bit_simps) - apply (simp add: absPageTable_def split: option.splits X64_H.pte.splits) - apply (clarsimp simp add: vmrights_map_def vm_rights_of_def - vm_kernel_only_def vm_read_only_def vm_read_write_def - split: vmrights.splits) - apply simp - apply (clarsimp simp: pde_relation_def - split: if_split_asm) - using pdes - apply (erule_tac x=x in allE) - apply simp - apply (erule_tac x=y in allE) - apply (clarsimp simp: bit_simps) - apply (simp add: absPageDirectory_def split: option.splits X64_H.pde.splits) - apply (clarsimp simp add: vmrights_map_def vm_rights_of_def - vm_kernel_only_def vm_read_only_def vm_read_write_def - split: vmrights.splits) - apply simp - apply (clarsimp simp: pdpte_relation_def - split: if_split_asm) - using pdptes - apply (erule_tac x=x in allE) - apply simp - apply (erule_tac x=y in allE) - apply (clarsimp simp: bit_simps) - apply (simp add: absPDPT_def split: option.splits X64_H.pdpte.splits) - apply (clarsimp simp add: vmrights_map_def vm_rights_of_def - vm_kernel_only_def vm_read_only_def vm_read_write_def - split: vmrights.splits) - apply simp - apply (clarsimp simp: pml4e_relation_def - split: if_split_asm) - using pml4es - apply (erule_tac x=x in allE) - apply simp - apply (erule_tac x=y in allE) - apply (clarsimp simp: bit_simps) - apply (simp add: absPML4_def split: option.splits X64_H.pml4e.splits) - apply (clarsimp simp add: vmrights_map_def vm_rights_of_def - vm_kernel_only_def vm_read_only_def vm_read_write_def - split: vmrights.splits) + apply (clarsimp split: asidpool.splits) + apply (simp add: other_obj_relation_def asid_pool_relation_def o_def inv_def) + apply simp + apply (clarsimp simp: pte_relation_def + split: if_split_asm) + using ptes + apply (erule_tac x=x in allE) + apply simp + apply (erule_tac x=y in allE) + apply (clarsimp simp: bit_simps) + apply (simp add: absPageTable_def split: option.splits X64_H.pte.splits) + apply (clarsimp simp add: vmrights_map_def vm_rights_of_def + vm_kernel_only_def vm_read_only_def vm_read_write_def + split: vmrights.splits) + apply simp + apply (clarsimp simp: pde_relation_def + split: if_split_asm) + using pdes + apply (erule_tac x=x in allE) + apply simp + apply (erule_tac x=y in allE) + apply (clarsimp simp: bit_simps) + apply (simp add: absPageDirectory_def split: option.splits X64_H.pde.splits) + apply (clarsimp simp add: vmrights_map_def vm_rights_of_def + vm_kernel_only_def vm_read_only_def vm_read_write_def + split: vmrights.splits) + apply simp + apply (clarsimp simp: pdpte_relation_def + split: if_split_asm) + using pdptes + apply (erule_tac x=x in allE) + apply simp + apply (erule_tac x=y in allE) + apply (clarsimp simp: bit_simps) + apply (simp add: absPDPT_def split: option.splits X64_H.pdpte.splits) + apply (clarsimp simp add: vmrights_map_def vm_rights_of_def + vm_kernel_only_def vm_read_only_def vm_read_write_def + split: vmrights.splits) + apply simp + apply (clarsimp simp: pml4e_relation_def + split: if_split_asm) + using pml4es + apply (erule_tac x=x in allE) + apply simp + apply (erule_tac x=y in allE) + apply (clarsimp simp: bit_simps) + apply (simp add: absPML4_def split: option.splits X64_H.pml4e.splits) + apply (clarsimp simp add: vmrights_map_def vm_rights_of_def + vm_kernel_only_def vm_read_only_def vm_read_write_def + split: vmrights.splits) done definition @@ -673,13 +673,12 @@ proof - by (fastforce simp add: ghost_relation_def)+ show "?thesis" - supply image_cong_simp [cong del] + supply image_cong_simp [cong del] apply (rule ext) apply (simp add: absHeap_def split: option.splits) apply (rule conjI) using pspace_relation - apply (clarsimp simp add: pspace_relation_def pspace_dom_def UNION_eq - dom_def Collect_eq) + apply (clarsimp simp add: pspace_relation_def pspace_dom_def UNION_eq dom_def Collect_eq) apply (erule_tac x=x in allE) apply clarsimp apply (case_tac "kheap s x", simp) @@ -691,97 +690,72 @@ proof - apply (simp_all add: other_obj_relation_def split: if_split_asm Structures_H.kernel_object.splits) apply (rename_tac sz cs) - apply (clarsimp simp add: image_def cte_map_def - well_formed_cnode_n_def Collect_eq dom_def) + apply (clarsimp simp add: image_def cte_map_def well_formed_cnode_n_def dom_def) apply (erule_tac x="replicate sz False" in allE)+ apply simp apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: image_def) - apply (erule_tac x=0 in allE, simp) apply (erule_tac x=0 in allE, simp) apply (erule_tac x=0 in allE, simp) apply (erule_tac x=0 in allE, simp) - apply clarsimp - apply (erule_tac x=0 in allE, simp add: bit_simps) - apply (rename_tac vmpage_size) - apply (case_tac vmpage_size, simp_all add: bit_simps) + apply (erule_tac x=0 in allE, simp) + apply clarsimp + apply (erule_tac x=0 in allE, simp add: bit_simps) + apply (rename_tac vmpage_size) + apply (case_tac vmpage_size, simp_all add: bit_simps) apply clarsimp apply (intro conjI impI allI) apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply clarsimp apply (case_tac ko, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp: tcb_relation_cut_def) apply (clarsimp simp add: ep_relation_def EndpointMap_def - split: Structures_A.endpoint.splits) + split: Structures_A.endpoint.splits) apply (clarsimp simp add: EndpointMap_def - split: Structures_A.endpoint.splits) + split: Structures_A.endpoint.splits) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: pte_relation_def) - apply (clarsimp simp add: pde_relation_def) - apply (clarsimp simp add: pdpte_relation_def) - apply (clarsimp simp add: pml4e_relation_def) - apply (clarsimp split: if_split_asm)+ + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp simp add: pde_relation_def) + apply (clarsimp simp add: pdpte_relation_def) + apply (clarsimp simp add: pml4e_relation_def) + apply (clarsimp split: if_split_asm)+ apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply (case_tac ko, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp: tcb_relation_cut_def) apply (clarsimp simp add: ntfn_relation_def AEndpointMap_def - split: Structures_A.ntfn.splits) + split: Structures_A.ntfn.splits) apply (clarsimp simp add: AEndpointMap_def - split: Structures_A.ntfn.splits) + split: Structures_A.ntfn.splits) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: pte_relation_def) - apply (clarsimp simp add: pde_relation_def) - apply (clarsimp simp add: pdpte_relation_def) - apply (clarsimp simp add: pml4e_relation_def) - apply (clarsimp split: if_split_asm)+ - - apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: cte_relation_def split: if_split_asm) - apply (rename_tac arch_kernel_obj) - apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) apply (clarsimp simp add: pte_relation_def) apply (clarsimp simp add: pde_relation_def) apply (clarsimp simp add: pdpte_relation_def) apply (clarsimp simp add: pml4e_relation_def) apply (clarsimp split: if_split_asm)+ - apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: cte_relation_def split: if_split_asm) - apply (rename_tac arch_kernel_obj) - apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp: tcb_relation_cut_def) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) apply (clarsimp simp add: pte_relation_def) apply (clarsimp simp add: pde_relation_def) apply (clarsimp simp add: pdpte_relation_def) apply (clarsimp simp add: pml4e_relation_def) - apply (rename_tac vmpage_size) - apply (cut_tac a=y and sz=vmpage_size in gsUserPages, clarsimp split: if_split_asm) - apply (case_tac "n=0", simp) - apply (case_tac "kheap s (y + n * 2 ^ pageBits)") - apply (rule ccontr) - apply (clarsimp dest!: gsUserPages[symmetric, THEN iffD1] ) - using pspace_aligned - apply (simp add: pspace_aligned_def dom_def) - apply (erule_tac x=y in allE) - apply (case_tac "n=0",(simp split: if_split_asm)+) - apply (frule (2) unaligned_page_offsets_helper) - apply (frule_tac y="n*2^pageBits" in pspace_aligned_distinct_None' - [OF pspace_aligned pspace_distinct]) - apply simp - apply (rule conjI, clarsimp simp add: word_gt_0) - apply (simp add: is_aligned_mask) - apply (clarsimp simp add: pageBits_def mask_def ) - apply (case_tac vmpage_size; simp add: bit_simps) - apply ((frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+)[4] + apply (clarsimp split: if_split_asm)+ + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply (case_tac ko, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp: tcb_relation_cut_def) apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) apply (clarsimp simp add: pte_relation_def) @@ -793,241 +767,261 @@ proof - apply (case_tac "n=0", simp) apply (case_tac "kheap s (y + n * 2 ^ pageBits)") apply (rule ccontr) - apply (clarsimp dest!: gsUserPages[symmetric, THEN iffD1]) + apply (clarsimp dest!: gsUserPages[symmetric, THEN iffD1] ) using pspace_aligned apply (simp add: pspace_aligned_def dom_def) apply (erule_tac x=y in allE) - apply (case_tac "n=0",simp+) + apply (case_tac "n=0",(simp split: if_split_asm)+) apply (frule (2) unaligned_page_offsets_helper) - apply (frule_tac y="n*2^pageBits" in pspace_aligned_distinct_None' - [OF pspace_aligned pspace_distinct]) + apply (frule_tac y="n*2^pageBits" + in pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct]) apply simp apply (rule conjI, clarsimp simp add: word_gt_0) apply (simp add: is_aligned_mask) - apply (clarsimp simp add: pageBits_def mask_def bit_simps) - apply (case_tac vmpage_size; simp add: bit_simps) + apply (clarsimp simp add: pageBits_def mask_def ) + apply (case_tac vmpage_size; simp add: bit_simps) apply ((frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+)[4] apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply (case_tac ko, simp_all add: other_obj_relation_def) apply (clarsimp simp add: cte_relation_def split: if_split_asm) - prefer 2 - apply (rename_tac arch_kernel_obj) - apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: pte_relation_def) - apply (clarsimp simp add: pde_relation_def) - apply (clarsimp simp add: pdpte_relation_def) - apply (clarsimp simp add: pml4e_relation_def) - apply (clarsimp split: if_split_asm) - apply (clarsimp simp add: TcbMap_def tcb_relation_def valid_obj_def) - apply (rename_tac tcb y tcb') - apply (case_tac tcb) - apply (case_tac tcb') - apply (simp add: thread_state_relation_imp_ThStateMap) - apply (subgoal_tac "map_option FaultMap (tcbFault tcb) = tcb_fault") - prefer 2 - apply (simp add: fault_rel_optionation_def) - using valid_objs[simplified valid_objs_def dom_def fun_app_def, - simplified] - apply (erule_tac x=y in allE) - apply (clarsimp simp: valid_obj_def valid_tcb_def - split: option.splits) - using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (clarsimp simp: tcb_relation_cut_def) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp simp add: pde_relation_def) + apply (clarsimp simp add: pdpte_relation_def) + apply (clarsimp simp add: pml4e_relation_def) + apply (rename_tac vmpage_size) + apply (cut_tac a=y and sz=vmpage_size in gsUserPages, clarsimp split: if_split_asm) + apply (case_tac "n=0", simp) + apply (case_tac "kheap s (y + n * 2 ^ pageBits)") + apply (rule ccontr) + apply (clarsimp dest!: gsUserPages[symmetric, THEN iffD1]) + using pspace_aligned + apply (simp add: pspace_aligned_def dom_def pspace_aligned) apply (erule_tac x=y in allE) - apply (clarsimp simp add: cap_relation_imp_CapabilityMap valid_obj_def - valid_tcb_def ran_tcb_cap_cases valid_cap_def2 - arch_tcb_relation_imp_ArchTcnMap) - apply (simp add: absCNode_def cte_map_def) + apply (case_tac "n=0",simp+) + apply (frule (2) unaligned_page_offsets_helper) + apply (frule_tac y="n*2^pageBits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply simp + apply (rule conjI, clarsimp simp add: word_gt_0) + apply (simp add: is_aligned_mask) + apply (clarsimp simp add: mask_def bit_simps) + apply (case_tac vmpage_size; simp add: bit_simps) + apply ((frule_tac k="0x1000" in word_mult_less_mono1, simp+)+)[4] apply (erule pspace_dom_relatedE[OF _ pspace_relation]) - apply (case_tac ko, simp_all add: other_obj_relation_def - split: if_split_asm) + apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp: tcb_relation_cut_def) prefer 2 apply (rename_tac arch_kernel_obj) apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: pte_relation_def) - apply (clarsimp simp add: pde_relation_def) - apply (clarsimp simp add: pdpte_relation_def) - apply (clarsimp simp add: pml4e_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp simp add: pde_relation_def) + apply (clarsimp simp add: pdpte_relation_def) + apply (clarsimp simp add: pml4e_relation_def) apply (clarsimp split: if_split_asm) - apply (simp add: cte_map_def) - apply (clarsimp simp add: cte_relation_def) - apply (cut_tac a=y and n=sz in gsCNodes, clarsimp) - using pspace_aligned[simplified pspace_aligned_def] - apply (drule_tac x=y in bspec, clarsimp) - apply clarsimp - apply (case_tac "(of_bl ya::machine_word) * 2^cte_level_bits = 0", simp) - apply (rule ext) - apply simp - apply (rule conjI) - prefer 2 - using valid_objs[simplified valid_objs_def Ball_def dom_def - fun_app_def] - apply (erule_tac x=y in allE) - apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def - well_formed_cnode_n_def dom_def Collect_eq) - apply (frule_tac x=ya in spec, simp) - apply (erule_tac x=bl in allE) - apply clarsimp+ - apply (frule pspace_relation_absD[OF _ pspace_relation]) - apply (simp add: cte_map_def) - apply (drule_tac x="y + of_bl bl * 2^cte_level_bits" in spec) - apply clarsimp - apply (erule_tac x="cte_relation bl" in allE) - apply (erule impE) - apply (fastforce simp add: well_formed_cnode_n_def) - apply clarsimp - apply (clarsimp simp add: cte_relation_def) - apply (rule cap_relation_imp_CapabilityMap) - using valid_objs[simplified valid_objs_def Ball_def dom_def - fun_app_def] - apply (erule_tac x=y in allE) - apply (clarsimp simp: valid_obj_def valid_cs_def valid_cap_def2 ran_def) - apply (fastforce simp: cte_level_bits_def objBits_defs)+ - apply (subgoal_tac "kheap s (y + of_bl ya * 2^cte_level_bits) = None") + apply (clarsimp simp add: TcbMap_def tcb_relation_def valid_obj_def) + apply (rename_tac tcb y tcb') + apply (case_tac tcb) + apply (case_tac tcb') + apply (simp add: thread_state_relation_imp_ThStateMap) + apply (subgoal_tac "map_option FaultMap (tcbFault tcb) = tcb_fault") + prefer 2 + apply (simp add: fault_rel_optionation_def) + using valid_objs[simplified valid_objs_def dom_def fun_app_def, simplified] + apply (erule_tac x=y in allE) + apply (clarsimp simp: valid_obj_def valid_tcb_def + split: option.splits) + using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: cap_relation_imp_CapabilityMap valid_obj_def + valid_tcb_def ran_tcb_cap_cases valid_cap_def2 + arch_tcb_relation_imp_ArchTcnMap) + apply (simp add: absCNode_def cte_map_def) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: other_obj_relation_def + split: if_split_asm) + prefer 2 + apply (clarsimp simp: tcb_relation_cut_def) + prefer 2 + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp simp add: pde_relation_def) + apply (clarsimp simp add: pdpte_relation_def) + apply (clarsimp simp add: pml4e_relation_def) + apply (clarsimp split: if_split_asm) + apply (simp add: cte_map_def) + apply (clarsimp simp add: cte_relation_def) + apply (cut_tac a=y and n=sz in gsCNodes, clarsimp) + using pspace_aligned[simplified pspace_aligned_def] + apply (drule_tac x=y in bspec, clarsimp) + apply clarsimp + apply (case_tac "(of_bl ya::machine_word) * 2^cte_level_bits = 0", simp) + apply (rule ext) + apply simp + apply (rule conjI) prefer 2 using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] apply (erule_tac x=y in allE) - apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def) - apply (rule pspace_aligned_distinct_None'[OF - pspace_aligned pspace_distinct], assumption) - apply (clarsimp simp: word_neq_0_conv power_add cte_index_repair) - apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) - apply (erule_tac x=ya in allE)+ - apply (rule word_mult_less_mono1) - apply (subgoal_tac "sz = length ya") - apply simp - apply (rule of_bl_length, (simp add: word_bits_def)+)[1] - apply fastforce - apply (simp add: cte_level_bits_def) - apply (simp add: word_bits_conv cte_level_bits_def) - apply (drule_tac a="2::nat" in power_strict_increasing, simp+) - apply (rule ccontr, clarsimp) - apply (cut_tac a="y + of_bl ya * 2^cte_level_bits" and n=yc in gsCNodes) + apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def + well_formed_cnode_n_def dom_def Collect_eq) + apply (frule_tac x=ya in spec, simp) + apply (erule_tac x=bl in allE) + apply clarsimp+ + apply (frule pspace_relation_absD[OF _ pspace_relation]) + apply (simp add: cte_map_def) + apply (drule_tac x="y + of_bl bl * 2^cte_level_bits" in spec) + apply clarsimp + apply (erule_tac x="cte_relation bl" in allE) + apply (erule impE) + apply (fastforce simp add: well_formed_cnode_n_def) apply clarsimp + apply (clarsimp simp add: cte_relation_def) + apply (rule cap_relation_imp_CapabilityMap) + using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp: valid_obj_def valid_cs_def valid_cap_def2 ran_def) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + apply (subgoal_tac "kheap s (y + of_bl ya * 2^cte_level_bits) = None") + prefer 2 + using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], assumption) + apply (clarsimp simp: word_neq_0_conv power_add cte_index_repair) + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) + apply (erule_tac x=ya in allE)+ + apply (rule word_mult_less_mono1) + apply (subgoal_tac "sz = length ya") + apply simp + apply (rule of_bl_length, (simp add: word_bits_def)+)[1] + apply fastforce + apply (simp add: cte_level_bits_def) + apply (simp add: word_bits_conv cte_level_bits_def) + apply (drule_tac a="2::nat" in power_strict_increasing, simp+) + apply (rule ccontr, clarsimp) + apply (cut_tac a="y + of_bl ya * 2^cte_level_bits" and n=yc in gsCNodes) + apply clarsimp (* mapping architecture-specific objects *) apply clarsimp apply (erule pspace_dom_relatedE[OF _ pspace_relation]) apply (case_tac ko, simp_all add: other_obj_relation_def) - apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (clarsimp simp: tcb_relation_cut_def) apply (rename_tac arch_kernel_object y ko P arch_kernel_obj) apply (case_tac arch_kernel_object, simp_all add: absHeapArch_def - split: asidpool.splits) + split: asidpool.splits) + apply clarsimp + apply (case_tac arch_kernel_obj) + apply (simp add: other_obj_relation_def asid_pool_relation_def + inv_def o_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp simp add: pde_relation_def) + apply (clarsimp simp add: pdpte_relation_def) + apply (clarsimp simp add: pml4e_relation_def) + apply (clarsimp split: if_split_asm)+ + + apply (case_tac arch_kernel_obj) + apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def o_def) + using pspace_aligned[simplified pspace_aligned_def Ball_def dom_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: pte_relation_def absPageTable_def bit_simps) + apply (rule conjI) + prefer 2 + apply clarsimp + apply (rule sym) + apply (rule pspace_aligned_distinct_None' [OF pspace_aligned pspace_distinct], + (simp add: bit_simps)+) + apply (cut_tac x=ya and n="2^12" in + ucast_less_shiftl_helper'[where 'a=machine_word_len and a=3,simplified word_bits_conv], simp+) + apply (clarsimp simp add: word_gt_0) + apply clarsimp + apply (subgoal_tac "ucast ya << 3 = 0") + prefer 2 + apply (rule ccontr) + apply (frule_tac x=y in unaligned_helper, assumption) + apply (rule ucast_less_shiftl_helper'[where a=3], simp_all) + apply (rule ext) + apply (frule pspace_relation_absD[OF _ pspace_relation]) + apply simp + apply (erule_tac x=offs in allE) + apply (clarsimp simp add: pte_relation_def word_size_bits_def) + using valid_objs[simplified valid_objs_def fun_app_def dom_def, simplified] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def wellformed_pte_def) + apply (erule_tac x=offs in allE) + apply (rename_tac pte') + apply (case_tac pte', simp_all add:)[1] + apply (clarsimp split: X64_A.pte.splits) + apply (rule set_eqI, clarsimp) + apply (case_tac x, simp_all)[1] + apply (clarsimp split: X64_A.pte.splits) + apply (case_tac x1, simp_all)[1] + apply (clarsimp simp add: pde_relation_def) + apply (clarsimp simp add: pdpte_relation_def) + apply (clarsimp simp: pml4e_relation_def) + apply (clarsimp split: if_split_asm)+ - apply clarsimp apply (case_tac arch_kernel_obj) - apply (simp add: other_obj_relation_def asid_pool_relation_def - inv_def o_def) - apply (clarsimp simp add: pte_relation_def) - apply (clarsimp simp add: pde_relation_def) - apply (clarsimp simp add: pdpte_relation_def) - apply (clarsimp simp add: pml4e_relation_def) + apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def o_def) + apply (clarsimp simp: pte_relation_def) + using pspace_aligned[simplified pspace_aligned_def Ball_def dom_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: pde_relation_def absPageDirectory_def bit_simps) + apply (rule conjI) + prefer 2 + apply clarsimp + apply (rule sym) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], + (simp add: bit_simps)+) + apply (cut_tac x=ya and n="2^12" in + ucast_less_shiftl_helper'[where 'a=machine_word_len and a=3,simplified word_bits_conv], simp+) + apply (clarsimp simp add: word_gt_0) + apply clarsimp + apply (subgoal_tac "ucast ya << 3 = 0") + prefer 2 + apply (rule ccontr) + apply (frule_tac x=y in unaligned_helper, assumption) + apply (rule ucast_less_shiftl_helper'[where a=3], simp_all) + apply (rule ext) + apply (frule pspace_relation_absD[OF _ pspace_relation]) + apply simp + apply (erule_tac x=offs in allE) + apply (clarsimp simp add: pde_relation_def word_size_bits_def) + using valid_objs[simplified valid_objs_def fun_app_def dom_def, simplified] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def wellformed_pde_def) + apply (erule_tac x=offs in allE) + apply (rename_tac pde') + apply (case_tac pde', simp_all add:)[1] + apply (clarsimp split: X64_A.pde.splits) + apply (rule set_eqI, clarsimp) + apply (case_tac x, simp_all)[1] + apply (clarsimp split: X64_A.pde.splits) + apply (rule set_eqI, clarsimp) + apply (case_tac x, simp_all)[1] + apply (case_tac x1, simp_all)[1] + apply (clarsimp simp add: pdpte_relation_def) + apply (clarsimp simp: pml4e_relation_def) apply (clarsimp split: if_split_asm)+ apply (case_tac arch_kernel_obj) - apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def - o_def) - using pspace_aligned[simplified pspace_aligned_def Ball_def dom_def] - apply (erule_tac x=y in allE) - apply (clarsimp simp add: pte_relation_def absPageTable_def - bit_simps) - apply (rule conjI) - prefer 2 - apply clarsimp - apply (rule sym) - apply (rule pspace_aligned_distinct_None' - [OF pspace_aligned pspace_distinct], (simp add: bit_simps)+) - apply (cut_tac x=ya and n="2^12" in - ucast_less_shiftl_helper'[where 'a=machine_word_len and a=3,simplified word_bits_conv], simp+) - apply (clarsimp simp add: word_gt_0) - apply clarsimp - apply (subgoal_tac "ucast ya << 3 = 0") - prefer 2 - apply (rule ccontr) - apply (frule_tac x=y in unaligned_helper, assumption) - apply (rule ucast_less_shiftl_helper'[where a=3], simp_all) - apply (rule ext) - apply (frule pspace_relation_absD[OF _ pspace_relation]) - apply simp - apply (erule_tac x=offs in allE) - apply (clarsimp simp add: pte_relation_def word_size_bits_def) - using valid_objs[simplified valid_objs_def fun_app_def dom_def, - simplified] - apply (erule_tac x=y in allE) - apply (clarsimp simp add: valid_obj_def wellformed_pte_def) - apply (erule_tac x=offs in allE) - apply (rename_tac pte') - apply (case_tac pte', simp_all add:)[1] - apply (clarsimp split: X64_A.pte.splits) - apply (rule set_eqI, clarsimp) - apply (case_tac x, simp_all)[1] - apply (clarsimp split: X64_A.pte.splits) - apply (case_tac x1, simp_all)[1] - apply (clarsimp simp add: pde_relation_def) -apply (clarsimp simp add: pdpte_relation_def) -apply (clarsimp simp: pml4e_relation_def) - apply (clarsimp split: if_split_asm)+ - - apply (case_tac arch_kernel_obj) - apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def - o_def) -apply (clarsimp simp: pte_relation_def) - using pspace_aligned[simplified pspace_aligned_def Ball_def dom_def] - apply (erule_tac x=y in allE) - apply (clarsimp simp add: pde_relation_def absPageDirectory_def - bit_simps) - apply (rule conjI) - prefer 2 - apply clarsimp - apply (rule sym) - apply (rule pspace_aligned_distinct_None' - [OF pspace_aligned pspace_distinct], (simp add: bit_simps)+) - apply (cut_tac x=ya and n="2^12" in - ucast_less_shiftl_helper'[where 'a=machine_word_len and a=3,simplified word_bits_conv], simp+) - apply (clarsimp simp add: word_gt_0) - apply clarsimp - apply (subgoal_tac "ucast ya << 3 = 0") - prefer 2 - apply (rule ccontr) - apply (frule_tac x=y in unaligned_helper, assumption) - apply (rule ucast_less_shiftl_helper'[where a=3], simp_all) - apply (rule ext) - apply (frule pspace_relation_absD[OF _ pspace_relation]) - apply simp - apply (erule_tac x=offs in allE) - apply (clarsimp simp add: pde_relation_def word_size_bits_def) - using valid_objs[simplified valid_objs_def fun_app_def dom_def, - simplified] - apply (erule_tac x=y in allE) - apply (clarsimp simp add: valid_obj_def wellformed_pde_def) - apply (erule_tac x=offs in allE) - apply (rename_tac pde') - apply (case_tac pde', simp_all add:)[1] - apply (clarsimp split: X64_A.pde.splits) - apply (rule set_eqI, clarsimp) - apply (case_tac x, simp_all)[1] - apply (clarsimp split: X64_A.pde.splits) -apply (rule set_eqI, clarsimp) - apply (case_tac x, simp_all)[1] -apply (case_tac x1, simp_all)[1] -apply (clarsimp simp add: pdpte_relation_def) -apply (clarsimp simp: pml4e_relation_def) - apply (clarsimp split: if_split_asm)+ - - apply (case_tac arch_kernel_obj) - apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def - o_def) -apply (clarsimp simp: pte_relation_def) -apply (clarsimp simp: pde_relation_def) + apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def o_def) + apply (clarsimp simp: pte_relation_def) + apply (clarsimp simp: pde_relation_def) using pspace_aligned[simplified pspace_aligned_def Ball_def dom_def] apply (erule_tac x=y in allE) - apply (clarsimp simp add: pdpte_relation_def absPDPT_def - bit_simps) + apply (clarsimp simp add: pdpte_relation_def absPDPT_def bit_simps) apply (rule conjI) prefer 2 apply clarsimp apply (rule sym) - apply (rule pspace_aligned_distinct_None' - [OF pspace_aligned pspace_distinct], (simp add: bit_simps)+) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], + (simp add: bit_simps)+) apply (cut_tac x=ya and n="2^12" in ucast_less_shiftl_helper'[where 'a=machine_word_len and a=3,simplified word_bits_conv], simp+) apply (clarsimp simp add: word_gt_0) @@ -1042,8 +1036,7 @@ apply (clarsimp simp: pde_relation_def) apply simp apply (erule_tac x=offs in allE) apply (clarsimp simp add: pdpte_relation_def word_size_bits_def) - using valid_objs[simplified valid_objs_def fun_app_def dom_def, - simplified] + using valid_objs[simplified valid_objs_def fun_app_def dom_def, simplified] apply (erule_tac x=y in allE) apply (clarsimp simp add: valid_obj_def wellformed_pdpte_def) apply (erule_tac x=offs in allE) @@ -1053,55 +1046,52 @@ apply (clarsimp simp: pde_relation_def) apply (rule set_eqI, clarsimp) apply (case_tac x, simp_all)[1] apply (clarsimp split: X64_A.pdpte.splits) -apply (rule set_eqI, clarsimp) + apply (rule set_eqI, clarsimp) apply (case_tac x, simp_all)[1] -apply (case_tac x1, simp_all)[1] -apply (clarsimp simp: pml4e_relation_def) + apply (case_tac x1, simp_all)[1] + apply (clarsimp simp: pml4e_relation_def) apply (clarsimp split: if_split_asm)+ - apply (case_tac arch_kernel_obj) - apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def - o_def) -apply (clarsimp simp: pte_relation_def) -apply (clarsimp simp: pde_relation_def) -apply (clarsimp simp: pdpte_relation_def) - using pspace_aligned[simplified pspace_aligned_def Ball_def dom_def] - apply (erule_tac x=y in allE) - apply (clarsimp simp add: pml4e_relation_def absPML4_def - bit_simps) - apply (rule conjI) - prefer 2 - apply clarsimp - apply (rule sym) - apply (rule pspace_aligned_distinct_None' - [OF pspace_aligned pspace_distinct], (simp add: bit_simps)+) - apply (cut_tac x=ya and n="2^12" in - ucast_less_shiftl_helper'[where 'a=machine_word_len and a=3,simplified word_bits_conv], simp+) - apply (clarsimp simp add: word_gt_0) - apply clarsimp - apply (subgoal_tac "ucast ya << 3 = 0") - prefer 2 - apply (rule ccontr) - apply (frule_tac x=y in unaligned_helper, assumption) - apply (rule ucast_less_shiftl_helper'[where a=3], simp_all) - apply (rule ext) - apply (frule pspace_relation_absD[OF _ pspace_relation]) - apply simp - apply (erule_tac x=offs in allE) - apply (clarsimp simp add: pml4e_relation_def word_size_bits_def) - using valid_objs[simplified valid_objs_def fun_app_def dom_def, - simplified] - apply (erule_tac x=y in allE) - apply (clarsimp simp add: valid_obj_def wellformed_pml4e_def) - apply (erule_tac x=offs in allE) - apply (rename_tac pml4e') - apply (case_tac pml4e', simp_all add:)[1] -apply (case_tac "pd offs", simp_all)[1] - apply (clarsimp split: X64_A.pml4e.splits) - apply (rule set_eqI, clarsimp) - apply (case_tac x, simp_all)[1] - apply (clarsimp split: if_split_asm)+ -done + apply (case_tac arch_kernel_obj) + apply (simp add: other_obj_relation_def asid_pool_relation_def inv_def o_def) + apply (clarsimp simp: pte_relation_def) + apply (clarsimp simp: pde_relation_def) + apply (clarsimp simp: pdpte_relation_def) + using pspace_aligned[simplified pspace_aligned_def Ball_def dom_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: pml4e_relation_def absPML4_def bit_simps) + apply (rule conjI) + prefer 2 + apply clarsimp + apply (rule sym) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], + (simp add: bit_simps)+) + apply (cut_tac x=ya and n="2^12" in + ucast_less_shiftl_helper'[where 'a=machine_word_len and a=3,simplified word_bits_conv], simp+) + apply (clarsimp simp add: word_gt_0) + apply clarsimp + apply (subgoal_tac "ucast ya << 3 = 0") + prefer 2 + apply (rule ccontr) + apply (frule_tac x=y in unaligned_helper, assumption) + apply (rule ucast_less_shiftl_helper'[where a=3], simp_all) + apply (rule ext) + apply (frule pspace_relation_absD[OF _ pspace_relation]) + apply simp + apply (erule_tac x=offs in allE) + apply (clarsimp simp add: pml4e_relation_def word_size_bits_def) + using valid_objs[simplified valid_objs_def fun_app_def dom_def, simplified] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def wellformed_pml4e_def) + apply (erule_tac x=offs in allE) + apply (rename_tac pml4e') + apply (case_tac pml4e', simp_all add:)[1] + apply (case_tac "pd offs", simp_all)[1] + apply (clarsimp split: X64_A.pml4e.splits) + apply (rule set_eqI, clarsimp) + apply (case_tac x, simp_all)[1] + apply (clarsimp split: if_split_asm)+ + done qed definition @@ -1136,14 +1126,14 @@ shows apply (rule conjI, clarsimp simp: EtcbMap_def etcb_relation_def)+ apply clarsimp using pspace_relation - apply (clarsimp simp add: pspace_relation_def pspace_dom_def UNION_eq - dom_def Collect_eq) + apply (clarsimp simp add: pspace_relation_def pspace_dom_def UNION_eq dom_def Collect_eq) apply (rule iffI) apply (erule_tac x=x in allE)+ apply (case_tac "ksPSpace s' x", clarsimp) apply (erule_tac x=x in allE, clarsimp) + apply (rename_tac ko) apply clarsimp - apply (case_tac a, simp_all add: other_obj_relation_def) + apply (case_tac ko; simp add: other_obj_relation_def tcb_relation_cut_def) apply (insert pspace_relation) apply (clarsimp simp: obj_at'_def projectKOs) apply (erule(1) pspace_dom_relatedE) @@ -1209,10 +1199,9 @@ lemma bin_to_bl_of_bl_eq: lemma TCB_implies_KOTCB: "\pspace_relation (kheap s) (ksPSpace s'); kheap s a = Some (TCB tcb)\ \ \tcb'. ksPSpace s' a = Some (KOTCB tcb') \ tcb_relation tcb tcb'" - apply (clarsimp simp add: pspace_relation_def pspace_dom_def - dom_def UNION_eq Collect_eq) + apply (clarsimp simp add: pspace_relation_def pspace_dom_def dom_def UNION_eq Collect_eq) apply (erule_tac x=a in allE)+ - apply (clarsimp simp add: other_obj_relation_def + apply (clarsimp simp: other_obj_relation_def tcb_relation_cut_def split: Structures_H.kernel_object.splits) apply (drule iffD1) apply (fastforce simp add: dom_def image_def) @@ -1222,12 +1211,11 @@ lemma TCB_implies_KOTCB: lemma cte_at_CNodeI: "\kheap s a = Some (CNode (length b) cs); well_formed_cnode_n (length b) cs\ \ cte_at (a,b) s" -apply (subgoal_tac "\y. cs b = Some y") - apply clarsimp - apply (rule_tac cte=y in cte_wp_at_cteI[of s _ "length b" cs], - simp_all) -apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) -done + apply (subgoal_tac "\y. cs b = Some y") + apply clarsimp + apply (rule_tac cte=y in cte_wp_at_cteI[of s _ "length b" cs]; simp) + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) + done lemma cteMap_correct: assumes rel: "(s,s') \ state_relation" @@ -1961,13 +1949,14 @@ where definition "absArchState s' \ - case s' of X64KernelState asid_tbl gpm gpdpts gpds gpts ccr3 kvspace kports num_ioapics irq_states \ + case s' of X64KernelState asid_tbl gpm gpdpts gpds gpts ccr3 kvspace kports num_ioapics ioapics_nirqs irq_states \ \x64_asid_table = asid_tbl \ ucast, x64_global_pml4 = gpm, x64_kernel_vspace = kvspace, x64_global_pts = gpts, x64_global_pdpts = gpdpts, x64_global_pds = gpds, x64_current_cr3 = absCR3 ccr3, x64_allocated_io_ports = kports, x64_num_ioapics = num_ioapics, + x64_ioapic_nirqs = ioapics_nirqs, x64_irq_state = x64irqstate_to_abstract \ irq_states\" lemma cr3_expand_unexpand[simp]: "cr3 (cr3_base_address a) (cr3_pcid a) = a" @@ -2007,7 +1996,7 @@ definition domain_index_internal = ksDomScheduleIdx s, cur_domain_internal = ksCurDomain s, domain_time_internal = ksDomainTime s, - ready_queues_internal = curry (ksReadyQueues s), + ready_queues_internal = (\d p. heap_walk (tcbSchedNexts_of s) (tcbQueueHead (ksReadyQueues s (d, p))) []), cdt_list_internal = absCDTList (cteMap (gsCNodes s)) (ctes_of s)\" lemma absExst_correct: @@ -2015,12 +2004,15 @@ lemma absExst_correct: assumes rel: "(s, s') \ state_relation" shows "absExst s' = exst s" apply (rule det_ext.equality) - using rel invs invs' - apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct - absCDTList_correct[THEN fun_cong] state_relation_def invs_def valid_state_def - ready_queues_relation_def invs'_def valid_state'_def - valid_pspace_def valid_sched_def valid_pspace'_def curry_def fun_eq_iff) - apply (fastforce simp: absEkheap_correct) + using rel invs invs' + apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct + absCDTList_correct[THEN fun_cong] state_relation_def invs_def + valid_state_def ready_queues_relation_def ready_queue_relation_def + invs'_def valid_state'_def + valid_pspace_def valid_sched_def valid_pspace'_def curry_def + fun_eq_iff) + apply (fastforce simp: absEkheap_correct) + apply (fastforce simp: list_queue_relation_def Let_def dest: heap_ls_is_walk) done diff --git a/proof/refine/X64/ArchAcc_R.thy b/proof/refine/X64/ArchAcc_R.thy index d4dfa37733..db8aa35f6e 100644 --- a/proof/refine/X64/ArchAcc_R.thy +++ b/proof/refine/X64/ArchAcc_R.thy @@ -70,16 +70,6 @@ lemma getObject_ASIDPool_corres: apply (clarsimp simp: other_obj_relation_def asid_pool_relation_def) done -lemma aligned_distinct_obj_atI': - "\ ksPSpace s x = Some ko; pspace_aligned' s; - pspace_distinct' s; ko = injectKO v \ - \ ko_at' v x s" - apply (simp add: obj_at'_def projectKOs project_inject - pspace_distinct'_def pspace_aligned'_def) - apply (drule bspec, erule domI)+ - apply simp - done - lemmas aligned_distinct_asid_pool_atI' = aligned_distinct_obj_atI'[where 'a=asidpool, simplified, OF _ _ _ refl] @@ -595,8 +585,12 @@ lemma setObject_PD_corres: apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def + apply (simp add: other_obj_relation_def tcb_relation_cut_def split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pde')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + apply (fastforce dest: tcbs_of'_non_tcb_update) apply (rule conjI) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pd_bits" in allE)+ @@ -680,12 +674,16 @@ lemma setObject_PT_corres: apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.splits) - apply (rule conjI) + apply (simp add: other_obj_relation_def tcb_relation_cut_def + split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pt_bits" in allE)+ apply fastforce + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pte')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + subgoal by (fastforce dest: tcbs_of'_non_tcb_update) apply (simp add: map_to_ctes_upd_other) apply (simp add: fun_upd_def) apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) @@ -744,20 +742,21 @@ lemma setObject_PDPT_corres: apply (drule bspec, assumption) apply clarsimp apply (erule (1) obj_relation_cutsE) + apply simp + apply simp + apply clarsimp apply simp + apply (frule (1) pspace_alignedD) + apply (drule_tac p=x in pspace_alignedD, assumption) apply simp - apply clarsimp - apply (frule (1) pspace_alignedD) - apply (drule_tac p=x in pspace_alignedD, assumption) - apply simp - apply (drule mask_alignment_ugliness) + apply (drule mask_alignment_ugliness) + apply (simp add: pdpt_bits_def pageBits_def) apply (simp add: pdpt_bits_def pageBits_def) - apply (simp add: pdpt_bits_def pageBits_def) - apply clarsimp - apply (drule test_bit_size) - apply (clarsimp simp: word_size bit_simps) - apply arith - apply ((simp split: if_split_asm)+)[5] + apply clarsimp + apply (drule test_bit_size) + apply (clarsimp simp: word_size bit_simps) + apply arith + apply ((simp split: if_split_asm)+)[5] apply (simp add: other_obj_relation_def split: Structures_A.kernel_object.splits arch_kernel_obj.splits) apply (rule conjI) @@ -765,8 +764,12 @@ lemma setObject_PDPT_corres: apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.splits) + apply (simp add: other_obj_relation_def tcb_relation_cut_def + split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pdpte')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + apply (fastforce dest: tcbs_of'_non_tcb_update) apply (rule conjI) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pdpt_bits" in allE)+ @@ -818,20 +821,21 @@ lemma setObject_PML4_corres: apply clarsimp apply (drule_tac x = x in spec) apply (clarsimp simp: pml4e_relation_def mask_pml4_bits_inner_beauty - dest!: more_pml4_inner_beauty) + dest!: more_pml4_inner_beauty) apply (rule ballI) apply (drule (1) bspec) apply clarsimp apply (rule conjI) apply (clarsimp simp: pml4e_relation_def mask_pml4_bits_inner_beauty - dest!: more_pml4_inner_beauty) + dest!: more_pml4_inner_beauty) apply clarsimp apply (drule bspec, assumption) apply clarsimp apply (erule (1) obj_relation_cutsE) + apply simp apply simp apply simp - apply clarsimp + apply simp apply simp apply (frule (1) pspace_alignedD) apply (drule_tac p=x in pspace_alignedD, assumption) @@ -845,14 +849,18 @@ lemma setObject_PML4_corres: apply arith apply ((simp split: if_split_asm)+)[2] apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.splits arch_kernel_obj.splits) + split: Structures_A.kernel_object.splits arch_kernel_obj.splits) apply (rule conjI) apply (clarsimp simp: ekheap_relation_def pspace_relation_def) apply (drule(1) ekheap_kheap_dom) apply clarsimp apply (drule_tac x=p in bspec, erule domI) - apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.splits) + apply (simp add: other_obj_relation_def tcb_relation_cut_def + split: Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (prop_tac "typ_at' (koTypeOf (injectKO pml4e')) p b") + apply (simp add: typ_at'_def ko_wp_at'_def) + apply (fastforce dest: tcbs_of'_non_tcb_update) apply (rule conjI) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x="p && ~~ mask pml4_bits" in allE)+ @@ -1411,11 +1419,11 @@ lemma copy_global_mappings_corres [@lift_corres_args, corres]: (copyGlobalMappings pm)" (is "corres _ ?apre _ _ _") unfolding copy_global_mappings_def copyGlobalMappings_def objBits_simps archObjSize_def pptr_base_def apply (fold word_size_bits_def) - apply corressimp + apply corresKsimp apply (rule_tac P="page_map_l4_at global_pm and ?apre" and P'="page_map_l4_at' skimPM and page_map_l4_at' pm" in corresK_mapM_x[OF order_refl]) - apply (corressimp simp: objBits_def mask_def wp: get_pde_wp getPDE_wp)+ + apply (corresKsimp simp: objBits_def mask_def wp: get_pde_wp getPDE_wp)+ apply(rule conjI) subgoal by (auto intro!: page_map_l4_pml4e_atI page_map_l4_pml4e_atI' simp: page_bits_def le_less_trans ptTranslationBits_def) @@ -1581,10 +1589,7 @@ lemma mapping_map_pdpte: "\mapping_map (vm_page_entry.VMPDPTE p, x) m'; lemma createMappingEntries_wf: "\\\ createMappingEntries base vptr sz R attrs vspace \\rv s. page_entry_map_corres rv\, -" apply (simp add: createMappingEntries_def page_entry_map_corres_def) - apply (rule hoare_pre) - apply wpc - apply (wp | simp split: vmpage_entry.splits)+ - by auto + by (wpsimp split: vmpage_entry.splits) lemma ensureSafeMapping_corres: notes mapping_map_simps = mapping_map_def page_entry_map_def page_entry_ptr_map_def attr_mask_def diff --git a/proof/refine/X64/Arch_R.thy b/proof/refine/X64/Arch_R.thy index 156c655ce5..61ebad25b5 100644 --- a/proof/refine/X64/Arch_R.thy +++ b/proof/refine/X64/Arch_R.thy @@ -75,7 +75,7 @@ lemma createObject_typ_at': supply is_aligned_neg_mask_eq[simp del] is_aligned_neg_mask_weaken[simp del] - apply (clarsimp simp:createObjects'_def alignError_def split_def | wp hoare_unless_wp | wpc )+ + apply (clarsimp simp:createObjects'_def alignError_def split_def | wp unless_wp | wpc )+ apply (clarsimp simp:obj_at'_def ko_wp_at'_def typ_at'_def pspace_distinct'_def)+ apply (subgoal_tac "ps_clear ptr (objBitsKO ty) (s\ksPSpace := \a. if a = ptr then Some ty else ksPSpace s a\)") @@ -129,7 +129,7 @@ lemma set_cap_device_and_range_aligned: lemma performASIDControlInvocation_corres: "asid_ci_map i = i' \ corres dc - (einvs and ct_active and valid_aci i) + (einvs and ct_active and valid_aci i and schact_is_rct) (invs' and ct_active' and valid_aci' i') (perform_asid_control_invocation i) (performASIDControlInvocation i')" @@ -267,11 +267,10 @@ lemma performASIDControlInvocation_corres: deleteObjects_cte_wp_at' deleteObjects_null_filter[where p="makePoolParent i'"]) apply (clarsimp simp:invs_mdb max_free_index_def invs_untyped_children) - apply (subgoal_tac "detype_locale x y sa" for x y) - prefer 2 - apply (simp add:detype_locale_def) - apply (fastforce simp:cte_wp_at_caps_of_state descendants_range_def2 - empty_descendants_range_in invs_untyped_children) + apply (prop_tac "detype_locale x y sa" for x y) + apply (simp add: detype_locale_def) + apply (fastforce simp: cte_wp_at_caps_of_state descendants_range_def2 + empty_descendants_range_in invs_untyped_children) apply (intro conjI) apply (clarsimp) apply (erule(1) caps_of_state_valid) @@ -332,29 +331,30 @@ lemma performASIDControlInvocation_corres: apply clarsimp apply (frule empty_descendants_range_in') apply (intro conjI, - simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 - null_filter_descendants_of'[OF null_filter_simp'] - capAligned_def asid_low_bits_def) - apply (erule descendants_range_caps_no_overlapI') - apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) - apply (simp add:empty_descendants_range_in') - apply (simp add:word_bits_def bit_simps) - apply (rule is_aligned_weaken) - apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) - apply (simp add:pageBits_def) + simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 + null_filter_descendants_of'[OF null_filter_simp'] + capAligned_def asid_low_bits_def) + apply (erule descendants_range_caps_no_overlapI') + apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) + apply (simp add:empty_descendants_range_in') + apply (simp add:word_bits_def bit_simps) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) + apply (simp add:pageBits_def) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp: cte_wp_at_ctes_of) + apply assumption+ + apply fastforce + apply simp apply clarsimp - apply (drule(1) cte_cap_in_untyped_range) - apply (fastforce simp:cte_wp_at_ctes_of) + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) apply fastforce apply simp apply clarsimp - apply (drule (1) cte_cap_in_untyped_range) - apply (fastforce simp add: cte_wp_at_ctes_of) - apply assumption+ - apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) - apply fastforce - apply simp done (* FIXME x64: move *) @@ -453,7 +453,7 @@ lemma checkVP_wpR [wp]: "\\s. vmsz_aligned w sz \ P () s\ checkVPAlignment sz w \P\, -" apply (simp add: checkVPAlignment_def) - by (wpsimp wp: hoare_whenE_wp simp: is_aligned_mask vmsz_aligned_def) + by (wpsimp wp: whenE_wp simp: is_aligned_mask vmsz_aligned_def) lemma asidHighBits [simp]: "asidHighBits = asid_high_bits" @@ -539,7 +539,7 @@ lemma find_vspace_for_asid_lookup_slot [wp]: find_vspace_for_asid asid \\rv. \\ (lookup_pml4_slot rv vptr && ~~ mask pml4_bits)\, -" apply (rule hoare_pre) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule hoare_vcg_R_conj) apply (rule hoare_vcg_R_conj) apply (rule find_vspace_for_asid_inv [where P="\", THEN valid_validE_R]) @@ -703,14 +703,14 @@ lemma decodeX64PageTableInvocation_corres: apply (clarsimp simp: attribs_from_word_def filter_frame_attrs_def attribsFromWord_def Let_def) apply ((clarsimp cong: if_cong - | wp hoare_whenE_wp hoare_vcg_all_lift_R getPDE_wp get_pde_wp + | wp whenE_wp hoare_vcg_all_liftE_R getPDE_wp get_pde_wp | wp (once) hoare_drop_imps)+)[6] apply (clarsimp intro!: validE_R_validE) apply (rule_tac Q'="\rv s. pspace_aligned s \ valid_vspace_objs s \ valid_arch_state s \ equal_kernel_mappings s \ valid_global_objs s \ (\ref. (ref \ rv) s) \ typ_at (AArch APageMapL4) rv s \ is_aligned rv pml4_bits" - in hoare_post_imp_R[rotated]) + in hoare_strengthen_postE_R[rotated]) apply fastforce apply (wpsimp | wp (once) hoare_drop_imps)+ apply (fastforce simp: valid_cap_def mask_def) @@ -794,14 +794,14 @@ lemma decodeX64PageDirectoryInvocation_corres: apply (clarsimp simp: attribs_from_word_def filter_frame_attrs_def attribsFromWord_def Let_def) apply ((clarsimp cong: if_cong - | wp hoare_whenE_wp hoare_vcg_all_lift_R getPDPTE_wp get_pdpte_wp + | wp whenE_wp hoare_vcg_all_liftE_R getPDPTE_wp get_pdpte_wp | wp (once) hoare_drop_imps)+)[6] apply (clarsimp intro!: validE_R_validE) apply (rule_tac Q'="\rv s. pspace_aligned s \ valid_vspace_objs s \ valid_arch_state s \ equal_kernel_mappings s \ valid_global_objs s \ (\ref. (ref \ rv) s) \ typ_at (AArch APageMapL4) rv s \ is_aligned rv pml4_bits" - in hoare_post_imp_R[rotated]) + in hoare_strengthen_postE_R[rotated]) apply fastforce apply (wpsimp | wp (once) hoare_drop_imps)+ apply (fastforce simp: valid_cap_def mask_def) @@ -882,7 +882,7 @@ lemma decodeX64PDPointerTableInvocation_corres: apply (clarsimp simp: attribs_from_word_def filter_frame_attrs_def attribsFromWord_def Let_def) apply ((clarsimp cong: if_cong - | wp hoare_whenE_wp hoare_vcg_all_lift_R getPML4E_wp get_pml4e_wp + | wp whenE_wp hoare_vcg_all_liftE_R getPML4E_wp get_pml4e_wp | wp (once) hoare_drop_imps)+) apply (fastforce simp: valid_cap_def mask_def intro!: page_map_l4_pml4e_at_lookupI) apply (clarsimp simp: valid_cap'_def) @@ -1161,11 +1161,11 @@ shows apply (simp add: returnOk_liftE[symmetric]) apply (rule corres_returnOk) apply (simp add: archinv_relation_def asid_pool_invocation_map_def) - apply (rule hoare_pre, wp hoare_whenE_wp) + apply (rule hoare_pre, wp whenE_wp) apply (clarsimp simp: ucast_fst_hd_assocs) - apply (wp hoareE_TrueI hoare_whenE_wp getASID_wp | simp)+ + apply (wp hoareE_TrueI whenE_wp getASID_wp | simp)+ apply ((clarsimp simp: p2_low_bits_max | rule TrueI impI)+)[2] - apply (wp hoare_whenE_wp getASID_wp)+ + apply (wp whenE_wp getASID_wp)+ apply (clarsimp simp: valid_cap_def) apply auto[1] \ \ASIDControlCap\ @@ -1321,7 +1321,7 @@ lemma perform_port_inv_corres: apply (clarsimp simp: perform_io_port_invocation_def performX64PortInvocation_def archinv_relation_def ioport_invocation_map_def) apply (case_tac x; clarsimp) - apply (corressimp corres: port_in_corres simp: ioport_data_relation_def) + apply (corresKsimp corres: port_in_corres simp: ioport_data_relation_def) by (auto simp: no_fail_in8 no_fail_in16 no_fail_in32 no_fail_out8 no_fail_out16 no_fail_out32) @@ -1373,8 +1373,7 @@ lemma performX64PortInvocation_corres: apply (strengthen invs_mdb'[mk_strg]) apply (wpsimp wp: setIOPortMask_invs') apply (clarsimp simp: invs_valid_objs valid_arch_inv_def valid_iocontrol_inv_def cte_wp_at_caps_of_state) - apply (rule conjI, clarsimp) - apply (clarsimp simp: safe_parent_for_def safe_parent_for_arch_def) + apply (fastforce simp: safe_parent_for_def safe_parent_for_arch_def) apply (clarsimp simp: invs_pspace_distinct' invs_pspace_aligned' valid_arch_inv'_def ioport_control_inv_valid'_def valid_cap'_def capAligned_def word_bits_def) apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of) @@ -1400,7 +1399,7 @@ lemma arch_ioport_inv_case_simp: lemma arch_performInvocation_corres: "archinv_relation ai ai' \ corres (dc \ (=)) - (einvs and ct_active and valid_arch_inv ai) + (einvs and ct_active and valid_arch_inv ai and schact_is_rct) (invs' and ct_active' and valid_arch_inv' ai') (arch_perform_invocation ai) (Arch.performInvocation ai')" apply (clarsimp simp: arch_perform_invocation_def @@ -1480,13 +1479,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -1806,7 +1805,7 @@ lemma arch_decodeInvocation_wf[wp]: cte_wp_at' (\cte. \idx. cteCap cte = (UntypedCap False frame pageBits idx)) (snd (excaps!0)) and sch_act_simple and (\s. descendants_of' (snd (excaps!0)) (ctes_of s) = {}) " - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (simp add: lookupTargetSlot_def) apply wp apply (clarsimp simp: cte_wp_at_ctes_of asid_wf_def) @@ -1894,7 +1893,7 @@ lemma performASIDControlInvocation_st_tcb_at': hoare_vcg_ex_lift deleteObjects_cte_wp_at' deleteObjects_invs_derivatives deleteObjects_st_tcb_at' - static_imp_wp + hoare_weak_lift_imp | simp add: placeNewObject_def2)+ apply (case_tac ctea) apply (clarsimp) @@ -1950,7 +1949,7 @@ crunch cte_wp_at': "Arch.finaliseCap" "cte_wp_at' P p" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - x64KSASIDTable_update (\_. (x64KSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + x64KSASIDTable_update (\_. ((x64KSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_state'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -2041,7 +2040,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' @@ -2179,7 +2178,7 @@ lemma arch_performInvocation_invs': apply (drule_tac src=p in valid_ioports_issuedD'[OF invs_valid_ioports']) apply (fastforce simp: cteCaps_of_def) apply force - by (force simp: cteCaps_of_def ran_def valid_ioports'_simps dest!: invs_valid_ioports') + using ranD valid_ioports_issuedD' by fastforce end diff --git a/proof/refine/X64/Bits_R.thy b/proof/refine/X64/Bits_R.thy index 09b416edb4..b211aeaedd 100644 --- a/proof/refine/X64/Bits_R.thy +++ b/proof/refine/X64/Bits_R.thy @@ -77,6 +77,10 @@ lemma projectKO_tcb: "(projectKO_opt ko = Some t) = (ko = KOTCB t)" by (cases ko) (auto simp: projectKO_opts_defs) +lemma tcb_of'_TCB[simp]: + "tcb_of' (KOTCB tcb) = Some tcb" + by (simp add: projectKO_tcb) + lemma projectKO_cte: "(projectKO_opt ko = Some t) = (ko = KOCTE t)" by (cases ko) (auto simp: projectKO_opts_defs) diff --git a/proof/refine/X64/BuildRefineCache.thy b/proof/refine/X64/BuildRefineCache.thy deleted file mode 100644 index 0e8eac45cf..0000000000 --- a/proof/refine/X64/BuildRefineCache.thy +++ /dev/null @@ -1,40 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory BuildRefineCache -imports Main -begin - -ML \ - -(* needed to generate a proof cache *) -proofs := 1; -DupSkip.record_proofs := true; -quick_and_dirty := true; - -tracing "Building refinement image using ROOT.ML"; - -use "ROOT.ML"; - -\ - -ML \ - -tracing "Synching proof cache"; - -DupSkip.sync_cache @{theory Refine}; - -tracing "Dumping proof cache"; - -let - val xml = XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache); -in - File.open_output (XML_Syntax.output_forest xml) (Path.basic "proof_cache.xml") -end; - -\ - -end diff --git a/proof/refine/X64/CNodeInv_R.thy b/proof/refine/X64/CNodeInv_R.thy index 66bcb1a268..6300db2a82 100644 --- a/proof/refine/X64/CNodeInv_R.thy +++ b/proof/refine/X64/CNodeInv_R.thy @@ -208,7 +208,7 @@ lemma decodeCNodeInvocation_corres: subgoal by (auto simp add: whenE_def, auto simp add: returnOk_def) apply (wp | wpc | simp(no_asm))+ apply (wp hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps + hoare_vcg_all_liftE_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps | clarsimp)+ subgoal by (auto elim!: valid_cnode_capI) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -265,7 +265,7 @@ lemma decodeCNodeInvocation_corres: apply (clarsimp simp add: returnOk_def) apply (wp get_cap_wp getCTE_wp | simp only: whenE_def | clarsimp)+ apply (rule hoare_trivE_R[where P="\"]) - apply (simp add: cte_wp_at_ctes_of pred_conj_def cong: conj_cong) + apply (wpsimp simp: cte_wp_at_ctes_of pred_conj_def) apply (fastforce elim!: valid_cnode_capI simp: invs_def valid_state_def valid_pspace_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) \ \Rotate\ @@ -386,7 +386,7 @@ lemma deriveCap_Null_helper: apply (cases "cap = NullCap") apply (simp add: deriveCap_def isCap_simps) apply (wp | simp)+ - apply (rule hoare_post_imp_R, rule assms) + apply (rule hoare_strengthen_postE_R, rule assms) apply simp done @@ -444,7 +444,7 @@ lemma decodeCNodeInv_wf[wp]: apply (wp whenE_throwError_wp getCTE_wp | wpc | simp(no_asm))+ apply (rule_tac Q'="\rv. invs' and cte_wp_at' (\cte. cteCap cte = NullCap) destSlot and ex_cte_cap_to' destSlot" - in hoare_post_imp_R, wp) + in hoare_strengthen_postE_R, wp) apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') apply (simp add: ctes_of_valid' valid_updateCapDataI @@ -479,7 +479,7 @@ lemma decodeCNodeInv_wf[wp]: unlessE_whenE) apply (rule hoare_pre) apply (wp whenE_throwError_wp getCTE_wp | simp)+ - apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_post_imp_R) + apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (simp add: cte_wp_at_ctes_of imp_ex hasCancelSendRights_not_Null) apply (clarsimp simp: ctes_of_valid' invs_valid_objs') @@ -493,7 +493,7 @@ lemma decodeCNodeInv_wf[wp]: apply (rule_tac Q'="\rv s. cte_at' rv s \ cte_at' destSlot s \ cte_at' srcSlot s \ ex_cte_cap_to' rv s \ ex_cte_cap_to' destSlot s - \ invs' s" in hoare_post_imp_R) + \ invs' s" in hoare_strengthen_postE_R) apply (wp lsfco_cte_at') apply (clarsimp simp: cte_wp_at_ctes_of) apply (frule invs_valid_objs') @@ -4906,7 +4906,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -4936,7 +4936,7 @@ lemma cteSwap_valid_pspace'[wp]: apply (strengthen imp_consequent, strengthen ctes_of_strng) apply ((wp sch_act_wf_lift valid_queues_lift cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp - hoare_ex_wp getCTE_wp + hoare_vcg_ex_lift getCTE_wp | simp add: cte_wp_at_ctes_ofI o_def | rule hoare_drop_imps)+)[6] apply (clarsimp simp: valid_pspace_no_0[unfolded valid_pspace'_def valid_mdb'_def] @@ -5116,7 +5116,6 @@ lemma cteSwap_urz[wp]: crunches cteSwap for valid_arch_state'[wp]: "valid_arch_state'" and irq_states'[wp]: "valid_irq_states'" - and vq'[wp]: "valid_queues'" and ksqsL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" and ksqsL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" and st_tcb_at'[wp]: "st_tcb_at' P t" @@ -5125,6 +5124,12 @@ crunches cteSwap and ct_not_inQ[wp]: "ct_not_inQ" and ksDomScheduleIdx [wp]: "\s. P (ksDomScheduleIdx s)" +crunches cteSwap + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma cteSwap_invs'[wp]: "\invs' and valid_cap' c and valid_cap' c' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2 and @@ -5586,6 +5591,10 @@ lemma updateCap_untyped_ranges_zero_simple: crunch tcb_in_cur_domain'[wp]: updateCap "tcb_in_cur_domain' t" (wp: crunch_wps simp: crunch_simps rule: tcb_in_cur_domain'_lift) +crunches updateCap + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + lemma make_zombie_invs': "\\s. invs' s \ s \' cap \ cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s \ @@ -5602,7 +5611,8 @@ lemma make_zombie_invs': st_tcb_at' ((=) Inactive) p s \ bound_tcb_at' ((=) None) p s \ obj_at' (Not \ tcbQueued) p s - \ (\pr. p \ set (ksReadyQueues s pr)))) sl s\ + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p s)) sl s\ updateCap sl cap \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def @@ -5639,7 +5649,9 @@ lemma make_zombie_invs': apply (clarsimp simp: cte_wp_at_ctes_of) apply (subgoal_tac "st_tcb_at' ((=) Inactive) p' s \ obj_at' (Not \ tcbQueued) p' s - \ bound_tcb_at' ((=) None) p' s") + \ bound_tcb_at' ((=) None) p' s + \ obj_at' (\tcb. tcbSchedNext tcb = None + \ tcbSchedPrev tcb = None) p' s") apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def projectKOs) apply (auto dest!: isCapDs)[1] apply (clarsimp simp: cte_wp_at_ctes_of disj_ac @@ -5831,7 +5843,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5845,7 +5857,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -5926,7 +5938,7 @@ crunch cap_to'[wp]: cancelSignal "ex_cte_cap_wp_to' P p" lemma cancelIPC_cap_to'[wp]: "\ex_cte_cap_wp_to' P p\ cancelIPC t \\rv. ex_cte_cap_wp_to' P p\" apply (simp add: cancelIPC_def Let_def) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (case_tac state, simp_all add: getThreadReplySlot_def locateSlot_conv) apply (wp ex_cte_cap_to'_pres [OF threadSet_cte_wp_at'] | simp add: o_def if_apply_def2 @@ -5988,7 +6000,7 @@ lemma cteDelete_delete_cases: apply (rule hoare_strengthen_post [OF emptySlot_deletes]) apply (clarsimp simp: cte_wp_at_ctes_of) apply wp+ - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of) apply simp done @@ -6383,7 +6395,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) apply clarsimp apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule conjI, clarsimp simp: removeable'_def) @@ -6401,7 +6413,7 @@ lemma finaliseSlot_invs'': \ (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\, \\rv s. invs' s \ sch_act_simple s\" unfolding finaliseSlot_def - apply (rule hoare_pre, rule hoare_post_impErr, rule use_spec) + apply (rule hoare_pre, rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P="\" and Pr="\" and p=slot]) apply (simp_all add: no_cte_prop_top) apply wp @@ -6411,14 +6423,14 @@ lemma finaliseSlot_invs'': lemma finaliseSlot_invs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. invs'\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done lemma finaliseSlot_sch_act_simple: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. sch_act_simple\" - apply (rule validE_valid, rule hoare_post_impErr) + apply (rule validE_valid, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6427,7 +6439,7 @@ lemma finaliseSlot_removeable: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6436,7 +6448,7 @@ lemma finaliseSlot_irqs: "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv s. (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\,-" - apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule validE_validE_R, rule hoare_strengthen_postE) apply (rule finaliseSlot_invs'') apply simp+ done @@ -6451,7 +6463,7 @@ lemma finaliseSlot_cte_wp_at: P cp \ capZombiePtr cp \ p)) p s\,-" unfolding finaliseSlot_def apply (rule hoare_pre, unfold validE_R_def) - apply (rule hoare_post_impErr, rule use_spec) + apply (rule hoare_strengthen_postE, rule use_spec) apply (rule finaliseSlot_invs'[where P=P and Pr=\ and p=p]) apply (simp_all add: no_cte_prop_top finalise_prop_stuff_def) apply wp @@ -6474,7 +6486,7 @@ lemma reduceZombie_invs: reduceZombie cap slot exposed \\rv s. invs' s\" apply (rule validE_valid) - apply (rule hoare_post_impErr, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) + apply (rule hoare_strengthen_postE, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6484,7 +6496,7 @@ lemma reduceZombie_cap_to: reduceZombie cap slot exposed \\rv s. \ exposed \ ex_cte_cap_to' slot s\, -" apply (rule validE_validE_R, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6495,7 +6507,7 @@ lemma reduceZombie_sch_act_simple: reduceZombie cap slot exposed \\rv. sch_act_simple\" apply (rule validE_valid, rule hoare_pre, - rule hoare_post_impErr) + rule hoare_strengthen_postE) apply (rule reduceZombie_invs'[where p=slot]) apply clarsimp+ done @@ -6505,7 +6517,7 @@ lemma cteDelete_invs': apply (rule hoare_gen_asm) apply (simp add: cteDelete_def whenE_def split_def) apply (rule hoare_pre, wp finaliseSlot_invs) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (unfold validE_R_def) apply (rule use_spec) apply (rule spec_valid_conj_liftE1) @@ -6536,9 +6548,9 @@ lemma cteDelete_cte_at: apply (rule hoare_vcg_disj_lift) apply (rule typ_at_lifts, rule cteDelete_typ_at') apply (simp add: cteDelete_def finaliseSlot_def split_def) - apply (rule validE_valid, rule seqE) + apply (rule validE_valid, rule bindE_wp_fwd) apply (subst finaliseSlot'_simps_ext) - apply (rule seqE) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_pre, rule hoare_FalseE) @@ -6580,10 +6592,10 @@ lemma cteDelete_cte_wp_at_invs: cteCap cte = NullCap \ (\zb n. cteCap cte = Zombie slot zb n)) slot s)" - and E="\rv. \" in hoare_post_impErr) + and E="\rv. \" in hoare_strengthen_postE) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (rule hoare_strengthen_postE_R, rule finaliseSlot_abort_cases) apply (clarsimp simp: cte_wp_at_ctes_of dest!: isCapDs) apply simp apply simp @@ -6601,10 +6613,10 @@ lemma cteDelete_cte_wp_at_invs: (\zb n. cteCap cte = Zombie p zb n) \ (\cp. P cp \ capZombiePtr cp \ p)) p s" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple hoare_drop_imps(2)[OF finaliseSlot_irqs]) - apply (rule hoare_post_imp_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) + apply (rule hoare_strengthen_postE_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) apply simp+ apply (clarsimp simp: cte_wp_at_ctes_of) apply simp @@ -6617,7 +6629,7 @@ lemma cteDelete_sch_act_simple: cteDelete slot exposed \\rv. sch_act_simple\" apply (simp add: cteDelete_def whenE_def split_def) apply (wp hoare_drop_imps | simp)+ - apply (rule_tac hoare_post_impErr [where Q="\rv. sch_act_simple" + apply (rule_tac hoare_strengthen_postE [where Q="\rv. sch_act_simple" and E="\rv. sch_act_simple"]) apply (rule valid_validE) apply (wp finaliseSlot_sch_act_simple) @@ -7167,14 +7179,14 @@ next apply simp apply (wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms | erule finalise_cap_not_reply_master [simplified])+ apply (elim conjE, strengthen exI[mk_strg I], strengthen asm_rl[where psi="(cap_relation cap cap')" for cap cap', mk_strg I E]) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - hoare_vcg_ex_lift static_imp_wp) + hoare_vcg_ex_lift hoare_weak_lift_imp) apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7216,7 +7228,7 @@ next apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) apply (case_tac "cteCap rv'", auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ apply (rule isFinal[where x="cte_map slot"]) apply (wp get_cap_wp| simp add: conj_comms)+ @@ -7295,13 +7307,11 @@ next case (4 ptr bits n slot) let ?target = "(ptr, nat_to_cref (zombie_cte_bits bits) n)" note hyps = "4.hyps"[simplified rec_del_concrete_unfold spec_corres_liftME2] - have pred_conj_assoc: "\P Q R. (P and (Q and R)) = (P and Q and R)" - by (rule ext, simp) show ?case apply (simp only: rec_del_concrete_unfold cap_relation.simps) apply (simp add: reduceZombie_def Let_def liftE_bindE - del: pred_conj_app) + del: inf_apply) apply (subst rec_del_simps_ext) apply (rule_tac F="ptr + 2 ^ cte_level_bits * of_nat n = cte_map ?target" @@ -7359,7 +7369,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -7398,7 +7408,7 @@ next apply (clarsimp simp: zombie_alignment_oddity cte_map_replicate) apply (wp get_cap_cte_wp_at getCTE_wp' rec_del_cte_at rec_del_invs rec_del_delete_cases)+ - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule_tac P="\cp. cp = Zombie ptr (zbits_map bits) (Suc n)" in cteDelete_cte_wp_at_invs[where p="cte_map slot"]) apply clarsimp @@ -8539,7 +8549,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8654,7 +8664,7 @@ lemma cteMove_ioports' [wp]: apply (clarsimp simp add: modify_map_def split: if_split_asm dest!: weak_derived_cap_ioports') apply (rule conjI, clarsimp) apply (rule conjI, clarsimp) - apply (force simp: isCap_simps) + apply blast subgoal by ((auto | blast)+) apply clarsimp apply (rule conjI, clarsimp) @@ -8719,6 +8729,15 @@ lemma cteMove_urz [wp]: apply auto done +crunches updateMDB + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +(* FIXME: arch_split *) +lemma haskell_assert_inv: + "haskell_assert Q L \P\" + by wpsimp + lemma cteMove_invs' [wp]: "\\x. invs' x \ ex_cte_cap_to' word2 x \ cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ @@ -8744,7 +8763,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -8796,6 +8815,10 @@ crunch ksDomSchedule[wp]: updateCap "\s. P (ksDomSchedule s)" crunch ksDomScheduleIdx[wp]: updateCap "\s. P (ksDomScheduleIdx s)" crunch ksDomainTime[wp]: updateCap "\s. P (ksDomainTime s)" +crunches updateCap + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + lemma corres_null_cap_update: "cap_relation cap cap' \ corres dc (invs and cte_wp_at ((=) cap) slot) @@ -9073,7 +9096,7 @@ declare withoutPreemption_lift [wp] crunch irq_states' [wp]: capSwapForDelete valid_irq_states' crunch irq_states' [wp]: finaliseCap valid_irq_states' - (wp: crunch_wps hoare_unless_wp getASID_wp no_irq + (wp: crunch_wps unless_wp getASID_wp no_irq no_irq_writeCR3 no_irq_invalidateASID no_irq_invalidateLocalPageStructureCacheASID no_irq_switchFpuOwner no_irq_nativeThreadUsingFPU @@ -9106,7 +9129,7 @@ lemma finaliseSlot_IRQInactive: "\valid_irq_states'\ finaliseSlot a b -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (unfold validE_E_def) - apply (rule hoare_post_impErr) + apply (rule hoare_strengthen_postE) apply (rule use_spec(2) [OF finaliseSlot_IRQInactive', folded finaliseSlot_def]) apply (rule TrueI) apply assumption @@ -9120,8 +9143,8 @@ lemma cteDelete_IRQInactive: "\valid_irq_states'\ cteDelete x y -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule validE_E_validE) apply (rule finaliseSlot_IRQInactive) apply simp @@ -9133,8 +9156,8 @@ lemma cteDelete_irq_states': "\valid_irq_states'\ cteDelete x y \\rv. valid_irq_states'\" apply (simp add: cteDelete_def split_def) - apply (wp hoare_whenE_wp) - apply (rule hoare_post_impErr) + apply (wp whenE_wp) + apply (rule hoare_strengthen_postE) apply (rule hoare_valid_validE) apply (rule finaliseSlot_irq_states') apply simp @@ -9157,7 +9180,7 @@ proof (induct rule: cteRevoke.induct) case (1 p s') show ?case apply (subst cteRevoke.simps) - apply (wp "1.hyps" unlessE_wp hoare_whenE_wp preemptionPoint_IRQInactive_spec + apply (wp "1.hyps" unlessE_wp whenE_wp preemptionPoint_IRQInactive_spec cteDelete_IRQInactive cteDelete_irq_states' getCTE_wp')+ apply clarsimp done @@ -9178,7 +9201,7 @@ lemma inv_cnode_IRQInactive: apply (rule hoare_pre) apply (wp cteRevoke_IRQInactive finaliseSlot_IRQInactive cteDelete_IRQInactive - hoare_whenE_wp + whenE_wp | wpc | simp add: split_def)+ done diff --git a/proof/refine/X64/CSpace1_R.thy b/proof/refine/X64/CSpace1_R.thy index 6f751f138d..c0825ef812 100644 --- a/proof/refine/X64/CSpace1_R.thy +++ b/proof/refine/X64/CSpace1_R.thy @@ -234,7 +234,7 @@ lemma pspace_relation_cte_wp_at: apply (clarsimp elim!: cte_wp_at_weakenE') apply clarsimp apply (drule(1) pspace_relation_absD) - apply (clarsimp simp: other_obj_relation_def) + apply (clarsimp simp: tcb_relation_cut_def) apply (simp split: kernel_object.split_asm) apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb]) apply simp @@ -399,7 +399,7 @@ lemma resolveAddressBits_cte_at': resolveAddressBits cap addr depth \\rv. cte_at' (fst rv)\, \\rv s. True\" apply (fold validE_R_def) - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule resolveAddressBits_real_cte_at') apply (erule real_cte_at') done @@ -743,11 +743,11 @@ lemma lookupSlotForThread_corres: apply clarsimp apply simp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolve_address_bits_cte_at [unfolded validE_R_def]) apply clarsimp prefer 2 - apply (rule hoare_vcg_precond_impE) + apply (rule hoare_weaken_preE) apply (rule resolveAddressBits_cte_at') apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (simp add: returnOk_def split_def) @@ -833,7 +833,7 @@ lemma setCTE_tcb_in_cur_domain': done lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -938,7 +938,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done @@ -1639,10 +1639,10 @@ lemma cte_map_pulls_tcb_to_abstract: \ \tcb'. kheap s x = Some (TCB tcb') \ tcb_relation tcb' tcb \ (z = (x, tcb_cnode_index (unat ((y - x) >> cte_level_bits))))" apply (rule pspace_dom_relatedE, assumption+) - apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) - apply (clarsimp simp: other_obj_relation_def + apply (erule(1) obj_relation_cutsE; + clarsimp simp: other_obj_relation_def split: Structures_A.kernel_object.split_asm - X64_A.arch_kernel_obj.split_asm) + X64_A.arch_kernel_obj.split_asm if_split_asm) apply (drule tcb_cases_related2) apply clarsimp apply (frule(1) cte_wp_at_tcbI [OF _ _ TrueI, where t="(a, b)" for a b, simplified]) @@ -1658,8 +1658,7 @@ lemma pspace_relation_update_tcbs: del: dom_fun_upd) apply (erule conjE) apply (rule ballI, drule(1) bspec) - apply (rule conjI, simp add: other_obj_relation_def) - apply (clarsimp split: Structures_A.kernel_object.split_asm) + apply (clarsimp simp: tcb_relation_cut_def split: Structures_A.kernel_object.split_asm) apply (drule bspec, fastforce) apply clarsimp apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) @@ -1881,6 +1880,27 @@ lemma descendants_of_eq': apply simp done +lemma setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedPrevs_of s)" + shows "P (ps |> tcb_of' |> tcbSchedPrev)" + using use_valid[OF step setObject_cte_tcbSchedPrevs_of(1)] pre + by auto + +lemma setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (tcbSchedNexts_of s)" + shows "P (ps |> tcb_of' |> tcbSchedNext)" + using use_valid[OF step setObject_cte_tcbSchedNexts_of(1)] pre + by auto + +lemma setObject_cte_inQ_of_use_valid_ksPSpace: + assumes step: "(x, s\ksPSpace := ps\) \ fst (setObject p (cte :: cte) s)" + assumes pre: "P (inQ domain priority |< tcbs_of' s)" + shows "P (inQ domain priority |< (ps |> tcb_of'))" + using use_valid[OF step setObject_cte_inQ(1)] pre + by auto + lemma updateCap_stuff: assumes "(x, s'') \ fst (updateCap p cap s')" shows "(ctes_of s'' = modify_map (ctes_of s') p (cteCap_update (K cap))) \ @@ -1894,7 +1914,12 @@ lemma updateCap_stuff: ksSchedulerAction s'' = ksSchedulerAction s' \ (ksArchState s'' = ksArchState s') \ (pspace_aligned' s' \ pspace_aligned' s'') \ - (pspace_distinct' s' \ pspace_distinct' s'')" using assms + (pspace_distinct' s' \ pspace_distinct' s'') \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" + using assms apply (clarsimp simp: updateCap_def in_monad) apply (drule use_valid [where P="\s. s2 = s" for s2, OF _ getCTE_sp refl]) apply (rule conjI) @@ -1903,8 +1928,11 @@ lemma updateCap_stuff: apply (frule setCTE_pspace_only) apply (clarsimp simp: setCTE_def) apply (intro conjI impI) - apply (erule(1) use_valid [OF _ setObject_aligned]) - apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule(1) use_valid [OF _ setObject_aligned]) + apply (erule(1) use_valid [OF _ setObject_distinct]) + apply (erule setObject_cte_tcbSchedPrevs_of_use_valid_ksPSpace; simp) + apply (erule setObject_cte_tcbSchedNexts_of_use_valid_ksPSpace; simp) + apply (fastforce elim: setObject_cte_inQ_of_use_valid_ksPSpace) done (* FIXME: move *) @@ -1921,16 +1949,15 @@ lemma pspace_relation_cte_wp_atI': apply (simp split: if_split_asm) apply (erule(1) pspace_dom_relatedE) apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + apply (subgoal_tac "n = x - y", clarsimp) + apply (drule tcb_cases_related2, clarsimp) + apply (intro exI, rule conjI) + apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) + apply fastforce + apply simp + apply clarsimp apply (simp add: other_obj_relation_def - split: Structures_A.kernel_object.split_asm - X64_A.arch_kernel_obj.split_asm) - apply (subgoal_tac "n = x - y", clarsimp) - apply (drule tcb_cases_related2, clarsimp) - apply (intro exI, rule conjI) - apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) - apply fastforce - apply simp - apply clarsimp + split: Structures_A.kernel_object.split_asm X64_A.arch_kernel_obj.split_asm) done lemma pspace_relation_cte_wp_atI: @@ -2453,7 +2480,7 @@ lemma updateCap_corres: apply (clarsimp simp: in_set_cap_cte_at_swp pspace_relations_def) apply (drule updateCap_stuff) apply simp - apply (rule conjI) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) apply (rule conjI) prefer 2 @@ -2536,9 +2563,9 @@ lemma updateMDB_pspace_relation: apply (clarsimp simp: tcb_ctes_clear cte_level_bits_def objBits_defs) apply clarsimp apply (rule pspace_dom_relatedE, assumption+) - apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] - apply (clarsimp split: Structures_A.kernel_object.split_asm - X64_A.arch_kernel_obj.split_asm + apply (rule obj_relation_cutsE, assumption+; + clarsimp split: Structures_A.kernel_object.split_asm + X64_A.arch_kernel_obj.split_asm if_split_asm simp: other_obj_relation_def) apply (frule(1) tcb_cte_cases_aligned_helpers(1)) apply (frule(1) tcb_cte_cases_aligned_helpers(2)) @@ -2599,6 +2626,25 @@ lemma updateMDB_ctes_of: crunch aligned[wp]: updateMDB "pspace_aligned'" crunch pdistinct[wp]: updateMDB "pspace_distinct'" +crunch tcbSchedPrevs_of[wp]: updateMDB "\s. P (tcbSchedPrevs_of s)" +crunch tcbSchedNexts_of[wp]: updateMDB "\s. P (tcbSchedNexts_of s)" +crunch inQ_opt_pred[wp]: updateMDB "\s. P (inQ d p |< tcbs_of' s)" +crunch inQ_opt_pred'[wp]: updateMDB "\s. P (\d p. inQ d p |< tcbs_of' s)" +crunch ksReadyQueues[wp]: updateMDB "\s. P (ksReadyQueues s)" + (wp: crunch_wps simp: crunch_simps setObject_def updateObject_cte) + +lemma setCTE_rdyq_projs[wp]: + "setCTE p f \\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)\" + apply (rule hoare_lift_Pf2[where f=ksReadyQueues]) + apply (rule hoare_lift_Pf2[where f=tcbSchedNexts_of]) + apply (rule hoare_lift_Pf2[where f=tcbSchedPrevs_of]) + apply wpsimp+ + done + +crunches updateMDB + for rdyq_projs[wp]:"\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< tcbs_of' s)" lemma updateMDB_the_lot: assumes "(x, s'') \ fst (updateMDB p f s')" @@ -2621,7 +2667,11 @@ lemma updateMDB_the_lot: ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" using assms apply (simp add: updateMDB_eqs updateMDB_pspace_relations split del: if_split) apply (frule (1) updateMDB_ctes_of) @@ -2630,9 +2680,8 @@ using assms apply (erule use_valid) apply wp apply simp - apply (erule use_valid) - apply wp - apply simp + apply (erule use_valid, wpsimp wp: hoare_vcg_all_lift) + apply (simp add: comp_def) done lemma is_cap_revocable_eq: @@ -3832,6 +3881,9 @@ lemma updateUntypedCap_descendants_of: apply (clarsimp simp:mdb_next_rel_def mdb_next_def split:if_splits) done +crunches setCTE + for tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + lemma setCTE_UntypedCap_corres: "\cap_relation cap (cteCap cte); is_untyped_cap cap; idx' = idx\ \ corres dc (cte_wp_at ((=) cap) src and valid_objs and @@ -3861,10 +3913,19 @@ lemma setCTE_UntypedCap_corres: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def split: if_split_asm Structures_A.kernel_object.splits) + apply (extract_conjunct \match conclusion in "ready_queues_relation _ _" \ -\) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (rule use_valid[OF _ setCTE_tcbSchedPrevs_of], assumption) + apply (rule use_valid[OF _ setCTE_tcbSchedNexts_of], assumption) + apply (rule use_valid[OF _ setCTE_ksReadyQueues], assumption) + apply (rule use_valid[OF _ setCTE_inQ_opt_pred], assumption) + apply (rule use_valid[OF _ set_cap_exst], assumption) + apply clarsimp apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) @@ -5216,11 +5277,15 @@ lemma updateMDB_the_lot': ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ ksDomSchedule s'' = ksDomSchedule s' \ ksCurDomain s'' = ksCurDomain s' \ - ksDomainTime s'' = ksDomainTime s'" + ksDomainTime s'' = ksDomainTime s' \ + tcbSchedNexts_of s'' = tcbSchedNexts_of s' \ + tcbSchedPrevs_of s'' = tcbSchedPrevs_of s' \ + (\domain priority. + (inQ domain priority |< tcbs_of' s'') = (inQ domain priority |< tcbs_of' s'))" apply (rule updateMDB_the_lot) using assms apply (fastforce simp: pspace_relations_def)+ - done + done lemma cte_map_inj_eq': "\(cte_map p = cte_map p'); @@ -5321,7 +5386,6 @@ lemma cteInsert_corres: apply (thin_tac "ksMachineState t = p" for p t)+ apply (thin_tac "ksCurThread t = p" for p t)+ apply (thin_tac "ksIdleThread t = p" for p t)+ - apply (thin_tac "ksReadyQueues t = p" for p t)+ apply (thin_tac "ksSchedulerAction t = p" for p t)+ apply (clarsimp simp: pspace_relations_def) apply (rule conjI) diff --git a/proof/refine/X64/CSpace_R.thy b/proof/refine/X64/CSpace_R.thy index 7ec38be5ca..738928ee97 100644 --- a/proof/refine/X64/CSpace_R.thy +++ b/proof/refine/X64/CSpace_R.thy @@ -1100,43 +1100,6 @@ lemma bitmapQ_no_L2_orphans_lift: apply (rule hoare_vcg_prop, assumption) done -lemma valid_queues_lift_asm: - assumes tat1: "\d p tcb. \obj_at' (inQ d p) tcb and Q \ f \\_. obj_at' (inQ d p) tcb\" - and tat2: "\tcb. \st_tcb_at' runnable' tcb and Q \ f \\_. st_tcb_at' runnable' tcb\" - and prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" - and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" - and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" - shows "\Invariants_H.valid_queues and Q\ f \\_. Invariants_H.valid_queues\" - proof - - have tat: "\d p tcb. \obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb and Q\ f - \\_. obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb\" - apply (rule hoare_chain [OF hoare_vcg_conj_lift [OF tat1 tat2]]) - apply (fastforce)+ - done - have tat_combined: "\d p tcb. \obj_at' (inQ d p and runnable' \ tcbState) tcb and Q\ f - \\_. obj_at' (inQ d p and runnable' \ tcbState) tcb\" - apply (rule hoare_chain [OF tat]) - apply (fastforce simp add: obj_at'_and pred_tcb_at'_def o_def)+ - done - show ?thesis unfolding valid_queues_def valid_queues_no_bitmap_def - by (wp tat_combined prq prqL1 prqL2 valid_bitmapQ_lift bitmapQ_no_L2_orphans_lift - bitmapQ_no_L1_orphans_lift hoare_vcg_all_lift hoare_vcg_conj_lift hoare_Ball_helper) - simp_all - qed - -lemmas valid_queues_lift = valid_queues_lift_asm[where Q="\_. True", simplified] - -lemma valid_queues_lift': - assumes tat: "\d p tcb. \\s. \ obj_at' (inQ d p) tcb s\ f \\_ s. \ obj_at' (inQ d p) tcb s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_queues'\ f \\_. valid_queues'\" - unfolding valid_queues'_def imp_conv_disj - by (wp hoare_vcg_all_lift hoare_vcg_disj_lift tat prq) - -lemma setCTE_norq [wp]: - "\\s. P (ksReadyQueues s)\ setCTE ptr cte \\r s. P (ksReadyQueues s) \" - by (clarsimp simp: valid_def dest!: setCTE_pspace_only) - lemma setCTE_norqL1 [wp]: "\\s. P (ksReadyQueuesL1Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL1Bitmap s) \" by (clarsimp simp: valid_def dest!: setCTE_pspace_only) @@ -2319,7 +2282,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (isCapRevocable cap src_cap) (isCapRevocable cap src_cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" @@ -2676,7 +2639,7 @@ lemma updateMDB_iflive'[wp]: updateMDB p m \\rv s. if_live_then_nonz_cap' s\" apply (clarsimp simp: updateMDB_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2689,7 +2652,7 @@ lemma updateCap_iflive': updateCap p cap \\rv s. if_live_then_nonz_cap' s\" apply (simp add: updateCap_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp setCTE_iflive') apply (clarsimp elim!: cte_wp_at_weakenE') done @@ -2883,12 +2846,6 @@ lemma setCTE_inQ[wp]: apply (simp_all add: inQ_def) done -lemma setCTE_valid_queues'[wp]: - "\valid_queues'\ setCTE p cte \\rv. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - done - crunch inQ[wp]: cteInsert "\s. P (obj_at' (inQ d p) t s)" (wp: crunch_wps) @@ -3044,7 +3001,7 @@ definition cap_ioports' newcap - cap_ioports' oldcap \ issued_ioports' (ksArchState s)" lemma setCTE_arch_ctes_of_wp [wp]: - "\\s. P (ksArchState s) (ctes_of s (p \ cte))\ + "\\s. P (ksArchState s) ((ctes_of s)(p \ cte))\ setCTE p cte \\rv s. P (ksArchState s) (ctes_of s)\" apply (simp add: setCTE_def ctes_of_setObject_cte) @@ -3125,7 +3082,7 @@ lemma setCTE_irq_states' [wp]: apply (wp setObject_ksMachine) apply (simp add: updateObject_cte) apply (rule hoare_pre) - apply (wp hoare_unless_wp|wpc|simp)+ + apply (wp unless_wp|wpc|simp)+ apply fastforce apply assumption done @@ -3237,7 +3194,7 @@ lemma setCTE_ksMachine[wp]: apply (wp setObject_ksMachine) apply (clarsimp simp: updateObject_cte split: Structures_H.kernel_object.splits) - apply (safe, (wp hoare_unless_wp | simp)+) + apply (safe, (wp unless_wp | simp)+) done crunch ksMachine[wp]: cteInsert "\s. P (ksMachineState s)" @@ -3476,6 +3433,13 @@ crunch pspace_canonical'[wp]: cteInsert "pspace_canonical'" crunch pspace_in_kernel_mappings'[wp]: cteInsert "pspace_in_kernel_mappings'" (wp: crunch_wps) +crunches cteInsert + for tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: crunch_wps rule: valid_bitmaps_lift) + lemma cteInsert_invs: "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and (\s. src \ dest) and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) @@ -3485,9 +3449,9 @@ lemma cteInsert_invs: cteInsert cap src dest \\rv. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) - apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift CSpace_R.valid_queues_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift cteInsert_norq - simp: st_tcb_at'_def) + apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift + valid_irq_node_lift irqs_masked_lift cteInsert_norq + sym_heap_sched_pointers_lift) apply (auto simp: invs'_def valid_state'_def valid_pspace'_def elim: valid_capAligned) done @@ -3791,10 +3755,13 @@ lemma corres_caps_decomposition: "\P. \\s. P (new_ups' s)\ g \\rv s. P (gsUserPages s)\" "\P. \\s. P (new_cns s)\ f \\rv s. P (cns_of_heap (kheap s))\" "\P. \\s. P (new_cns' s)\ g \\rv s. P (gsCNodes s)\" - "\P. \\s. P (new_queues s)\ f \\rv s. P (ready_queues s)\" + "\P. \\s. P (new_ready_queues s)\ f \\rv s. P (ready_queues s)\" "\P. \\s. P (new_action s)\ f \\rv s. P (scheduler_action s)\" "\P. \\s. P (new_sa' s)\ g \\rv s. P (ksSchedulerAction s)\" - "\P. \\s. P (new_rqs' s)\ g \\rv s. P (ksReadyQueues s)\" + "\P. \\s. P (new_ksReadyQueues s) (new_tcbSchedNexts_of s) (new_tcbSchedPrevs_of s) + (\d p. new_inQs d p s)\ + g \\rv s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + (\d p. inQ d p |< (tcbs_of' s))\" "\P. \\s. P (new_di s)\ f \\rv s. P (domain_index s)\" "\P. \\s. P (new_dl s)\ f \\rv s. P (domain_list s)\" "\P. \\s. P (new_cd s)\ f \\rv s. P (cur_domain s)\" @@ -3810,7 +3777,9 @@ lemma corres_caps_decomposition: "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ sched_act_relation (new_action s) (new_sa' s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ - \ ready_queues_relation (new_queues s) (new_rqs' s')" + \ ready_queues_relation_2 (new_ready_queues s) (new_ksReadyQueues s') + (new_tcbSchedNexts_of s') (new_tcbSchedPrevs_of s') + (\d p. new_inQs d p s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ \ revokable_relation (new_rvk s) (null_filter (new_caps s)) (new_ctes s')" "\s s'. \ P s; P' s'; (s, s') \ state_relation \ @@ -3878,8 +3847,9 @@ proof - apply (rule corres_underlying_decomposition [OF x]) apply (simp add: ghost_relation_of_heap) apply (wp hoare_vcg_conj_lift mdb_wp rvk_wp list_wp u abs_irq_together)+ - apply (intro z[simplified o_def] conjI | simp add: state_relation_def pspace_relations_def swp_cte_at - | (clarsimp, drule (1) z(6), simp add: state_relation_def pspace_relations_def swp_cte_at))+ + apply (intro z[simplified o_def] conjI + | simp add: state_relation_def pspace_relations_def swp_cte_at + | (clarsimp, drule (1) z(6), simp add: state_relation_def))+ done qed @@ -3991,7 +3961,7 @@ lemma create_reply_master_corres: apply clarsimp apply (rule corres_caps_decomposition) defer - apply (wp|simp)+ + apply (wp|simp add: o_def split del: if_splits)+ apply (clarsimp simp: o_def cdt_relation_def cte_wp_at_ctes_of split del: if_split cong: if_cong simp del: id_apply) apply (case_tac cte, clarsimp) @@ -4165,8 +4135,9 @@ lemma setupReplyMaster_corres: cte_wp_at' ((=) rv) (cte_map (t, tcb_cnode_index 2))" in hoare_strengthen_post) apply (wp hoare_drop_imps getCTE_wp') + apply (rename_tac rv s) apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) - apply (case_tac r, fastforce elim: valid_nullcapsE) + apply (case_tac rv, fastforce elim: valid_nullcapsE) apply (fastforce elim: tcb_at_cte_at) apply (clarsimp simp: cte_at'_obj_at' tcb_cte_cases_def cte_map_def) apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) @@ -4367,6 +4338,9 @@ crunches setupReplyMaster and ready_queuesL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) lemma setupReplyMaster_vms'[wp]: @@ -4409,7 +4383,8 @@ lemma setupReplyMaster_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp setupReplyMaster_valid_pspace' sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift - valid_queues_lift cur_tcb_lift valid_queues_lift' hoare_vcg_disj_lift + valid_queues_lift cur_tcb_lift hoare_vcg_disj_lift sym_heap_sched_pointers_lift + valid_bitmaps_lift valid_irq_node_lift | simp)+ apply (clarsimp simp: ex_nonz_tcb_cte_caps' valid_pspace'_def objBits_simps' tcbReplySlot_def @@ -4686,8 +4661,8 @@ lemma arch_update_setCTE_invs: apply (wp arch_update_setCTE_mdb valid_queues_lift sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift arch_update_setCTE_iflive arch_update_setCTE_ifunsafe valid_irq_node_lift setCTE_typ_at' setCTE_irq_handlers' - valid_queues_lift' setCTE_pred_tcb_at' irqs_masked_lift setCTE_ioports' - setCTE_norq hoare_vcg_disj_lift untyped_ranges_zero_lift + setCTE_pred_tcb_at' irqs_masked_lift setCTE_ioports' + hoare_vcg_disj_lift untyped_ranges_zero_lift valid_bitmaps_lift | simp add: pred_tcb_at'_def)+ apply (clarsimp simp: valid_global_refs'_def is_arch_update'_def fun_upd_def[symmetric] cte_wp_at_ctes_of isCap_simps untyped_ranges_zero_fun_upd) @@ -6137,7 +6112,7 @@ lemma cteInsert_simple_invs: apply (rule hoare_pre) apply (simp add: invs'_def valid_state'_def valid_pspace'_def) apply (wp cur_tcb_lift sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift - valid_irq_node_lift valid_queues_lift' irqs_masked_lift + valid_irq_node_lift irqs_masked_lift sym_heap_sched_pointers_lift cteInsert_simple_mdb' cteInsert_valid_globals_simple cteInsert_norq | simp add: pred_tcb_at'_def)+ apply (auto simp: invs'_def valid_state'_def valid_pspace'_def @@ -6276,6 +6251,21 @@ lemma arch_update_updateCap_invs: apply clarsimp done +lemma setCTE_set_cap_ready_queues_relation_valid_corres: + assumes pre: "ready_queues_relation s s'" + assumes step_abs: "(x, t) \ fst (set_cap cap slot s)" + assumes step_conc: "(y, t') \ fst (setCTE slot' cap' s')" + shows "ready_queues_relation t t'" + apply (clarsimp simp: ready_queues_relation_def) + apply (insert pre) + apply (rule use_valid[OF step_abs set_cap_exst]) + apply (rule use_valid[OF step_conc setCTE_ksReadyQueues]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedNexts_of]) + apply (rule use_valid[OF step_conc setCTE_tcbSchedPrevs_of]) + apply (clarsimp simp: ready_queues_relation_def Let_def) + using use_valid[OF step_conc setCTE_inQ_opt_pred] + by fast + lemma updateCap_same_master: "\ cap_relation cap cap' \ \ corres dc (valid_objs and pspace_aligned and pspace_distinct and @@ -6307,6 +6297,8 @@ lemma updateCap_same_master: apply assumption apply (clarsimp simp: pspace_relations_def) apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ready_queues_relation a b" for a b \ -\) + subgoal by (erule setCTE_set_cap_ready_queues_relation_valid_corres; assumption) apply (rule conjI) apply (frule setCTE_pspace_only) apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def @@ -6537,8 +6529,9 @@ lemma updateFreeIndex_forward_invs': apply (simp add:updateCap_def) apply (wp setCTE_irq_handlers' getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp setCTE_ioports' + sym_heap_sched_pointers_lift valid_bitmaps_lift | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def)+ apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) diff --git a/proof/refine/X64/Cache.thy b/proof/refine/X64/Cache.thy deleted file mode 100644 index ad26dd3961..0000000000 --- a/proof/refine/X64/Cache.thy +++ /dev/null @@ -1,37 +0,0 @@ -(* - * Copyright 2014, General Dynamics C4 Systems - * - * SPDX-License-Identifier: GPL-2.0-only - *) - -theory Cache -imports Main -begin - -text \Enable the proof cache, both skipping from it - and recording to it.\ -ML \DupSkip.record_proofs := true\ -ML \proofs := 1\ - -ML \DupSkip.skip_dup_proofs := true\ - -text \If executed in reverse order, save the cache\ -ML \val cache_thy_save_cache = ref false;\ -ML \ -if (! cache_thy_save_cache) -then File.open_output (XML_Syntax.output_forest - (XML_Syntax.xml_forest_of_cache (! DupSkip.the_cache))) - (Path.basic "proof_cache.xml") -else ()\ -ML \cache_thy_save_cache := true\ -ML \cache_thy_save_cache := false\ - -text \Load the proof cache - - can take up to a minute\ - -ML \ -DupSkip.the_cache := XML_Syntax.cache_of_xml_forest ( - File.open_input (XML_Syntax.input_forest) - (Path.basic "proof_cache.xml"))\ - -end diff --git a/proof/refine/X64/Detype_R.thy b/proof/refine/X64/Detype_R.thy index a246cd8f8f..fc2fc085ed 100644 --- a/proof/refine/X64/Detype_R.thy +++ b/proof/refine/X64/Detype_R.thy @@ -100,6 +100,9 @@ defs deletionIsSafe_def: (\ko. ksPSpace s p = Some (KOArch ko) \ p \ {ptr .. ptr + 2 ^ bits - 1} \ 6 \ bits)" +defs deletionIsSafe_delete_locale_def: + "deletionIsSafe_delete_locale \ \ptr bits s. \p. ko_wp_at' live' p s \ p \ {ptr .. ptr + 2 ^ bits - 1}" + defs ksASIDMapSafe_def: "ksASIDMapSafe \ \s. True" @@ -115,6 +118,7 @@ lemma deleteObjects_def2: "is_aligned ptr bits \ deleteObjects ptr bits = do stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ {ptr .. ptr + 2 ^ bits - 1})) []; modify (\s. s \ ksPSpace := \x. if x \ {ptr .. ptr + 2 ^ bits - 1} @@ -125,7 +129,8 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) + apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (simp add: bind_assoc[symmetric]) @@ -148,6 +153,7 @@ lemma deleteObjects_def3: do assert (is_aligned ptr bits); stateAssert (deletionIsSafe ptr bits) []; + stateAssert (deletionIsSafe_delete_locale ptr bits) []; doMachineOp (freeMemory ptr bits); stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ {ptr .. ptr + 2 ^ bits - 1})) []; modify (\s. s \ ksPSpace := \x. if x \ {ptr .. ptr + 2 ^ bits - 1} @@ -454,6 +460,7 @@ next qed end + locale detype_locale' = detype_locale + constrains s::"det_state" lemma (in detype_locale') deletionIsSafe: @@ -547,149 +554,6 @@ qed context begin interpretation Arch . (*FIXME: arch_split*) -(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) -(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) -(* FIXME: move *) -lemma corres_machine_op: - assumes P: "corres_underlying Id False True r P Q x x'" - shows "corres r (P \ machine_state) (Q \ ksMachineState) - (do_machine_op x) (doMachineOp x')" - apply (rule corres_submonad3 - [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) - apply (simp_all add: state_relation_def swp_def) - done - -lemma ekheap_relation_detype: - "ekheap_relation ekh kh \ - ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" - by (fastforce simp add: ekheap_relation_def split: if_split_asm) - -lemma cap_table_at_gsCNodes_eq: - "(s, s') \ state_relation - \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" - apply (clarsimp simp: state_relation_def ghost_relation_def - obj_at_def is_cap_table) - apply (drule_tac x = ptr in spec)+ - apply (drule_tac x = bits in spec)+ - apply fastforce - done - -lemma cNodeNoPartialOverlap: - "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ valid_objs s \ pspace_aligned s) - \ - (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) - (\x. base \ x \ x \ base + 2 ^ magnitude - 1)) [])" - apply (simp add: stateAssert_def assert_def) - apply (rule corres_symb_exec_r[OF _ get_sp]) - apply (rule corres_req[rotated], subst if_P, assumption) - apply simp - apply (clarsimp simp: cNodePartialOverlap_def) - apply (drule(1) cte_wp_valid_cap) - apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq - obj_at_def is_cap_table) - apply (frule(1) pspace_alignedD) - apply simp - apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) - apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) - apply (clarsimp simp: is_aligned_no_overflow) - apply (blast intro: order_trans) - apply (simp add: is_aligned_no_overflow power_overflow word_bits_def) - apply wp+ - done - -declare wrap_ext_det_ext_ext_def[simp] - - -lemma sym_refs_hyp_refs_triv[simp]: "sym_refs (state_hyp_refs_of s)" - apply (clarsimp simp: state_hyp_refs_of_def sym_refs_def) - by (case_tac "kheap s x"; simp) - -lemma deleteObjects_corres: - "is_aligned base magnitude \ magnitude \ 3 \ - corres dc - (\s. einvs s - \ s \ (cap.UntypedCap d base magnitude idx) - \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s - \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) - \ untyped_children_in_mdb s \ if_unsafe_then_cap s - \ valid_mdb s \ valid_global_refs s \ ct_active s) - (\s. s \' (UntypedCap d base magnitude idx) - \ valid_pspace' s) - (delete_objects base magnitude) (deleteObjects base magnitude)" - apply (simp add: deleteObjects_def2) - apply (rule corres_stateAssert_implied[where P'=\, simplified]) - prefer 2 - apply clarsimp - apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and - s=s in detype_locale'.deletionIsSafe, - simp_all add: detype_locale'_def - detype_locale_def p_assoc_help invs_valid_pspace)[1] - apply (simp add:valid_cap_simps) - apply (simp add: bind_assoc[symmetric] ksASIDMapSafe_def) - apply (simp add: delete_objects_def) - apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ - (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ - descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ - s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ - valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ - zombies_final s \ sym_refs (state_refs_of s) \ - untyped_children_in_mdb s \ if_unsafe_then_cap s \ - valid_global_refs s" and - Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ - valid_pspace' s" in corres_underlying_split) - apply (rule corres_bind_return) - apply (rule corres_guard_imp[where r=dc]) - apply (rule corres_split[OF _ cNodeNoPartialOverlap]) - apply (rule corres_machine_op[OF corres_Id], simp+) - apply (rule no_fail_freeMemory, simp+) - apply (wp hoare_vcg_ex_lift)+ - apply auto[1] - apply (auto elim: is_aligned_weaken)[1] - apply (rule corres_modify) - apply (simp add: valid_pspace'_def) - apply (rule state_relation_null_filterE, assumption, - simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] - apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) - apply (intro exI, fastforce) - apply (rule ext, clarsimp simp add: null_filter_def) - apply (rule sym, rule ccontr, clarsimp) - apply (drule(4) cte_map_not_null_outside') - apply (fastforce simp add: cte_wp_at_caps_of_state) - apply simp - apply (rule ext, clarsimp simp add: null_filter'_def - map_to_ctes_delete[simplified field_simps]) - apply (rule sym, rule ccontr, clarsimp) - apply (frule(2) pspace_relation_cte_wp_atI - [OF state_relation_pspace_relation]) - apply (elim exE) - apply (frule(4) cte_map_not_null_outside') - apply (rule cte_wp_at_weakenE, erule conjunct1) - apply (case_tac y, clarsimp) - apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def - valid_nullcaps_def) - apply clarsimp - apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, - erule cte_wp_at_weakenE[OF _ TrueI], assumption+) - apply simp - apply (rule detype_pspace_relation[simplified], - simp_all add: state_relation_pspace_relation valid_pspace_def)[1] - apply (simp add: valid_cap'_def capAligned_def) - apply (clarsimp simp: valid_cap_def, assumption) - apply (fastforce simp add: detype_def detype_ext_def intro!: ekheap_relation_detype) - apply (clarsimp simp: state_relation_def ghost_relation_of_heap - detype_def) - apply (drule_tac t="gsUserPages s'" in sym) - apply (drule_tac t="gsCNodes s'" in sym) - apply (auto simp add: ups_of_heap_def cns_of_heap_def ext - split: option.splits kernel_object.splits)[1] - apply (simp add: valid_mdb_def) - apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | - simp add: invs_def valid_state_def valid_pspace_def - descendants_range_def | wp (once) hoare_drop_imps)+ - done - - text \Invariant preservation across concrete deletion\ lemma caps_containedD': @@ -728,91 +592,94 @@ lemma zobj_refs_capRange: "capAligned c \ zobj_refs' c \ capRange c" by (cases c, simp_all add: capRange_def capAligned_def is_aligned_no_overflow) end + locale delete_locale = - fixes s and base and bits and ptr and idx and d - assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s" - and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s)" - and invs: "invs' s" - and ct_act: "ct_active' s" - and sa_simp: "sch_act_simple s" - and bwb: "bits < word_bits" + fixes s' and base and bits and ptr and idx and d + assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s'" + and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s')" + and invs: "invs' s'" + and ct_act: "ct_active' s'" + and sa_simp: "sch_act_simple s'" and al: "is_aligned base bits" - and safe: "deletionIsSafe base bits s" + and safe: "deletionIsSafe base bits s'" context delete_locale begin interpretation Arch . (*FIXME: arch_split*) -lemma valid_objs: "valid_objs' s" - and pa: "pspace_aligned' s" - and pc: "pspace_canonical' s" - and pkm: "pspace_in_kernel_mappings' s" - and pd: "pspace_distinct' s" - and vq: "valid_queues s" - and vq': "valid_queues' s" - and sym_refs: "sym_refs (state_refs_of' s)" - and iflive: "if_live_then_nonz_cap' s" - and ifunsafe: "if_unsafe_then_cap' s" - and dlist: "valid_dlist (ctes_of s)" - and no_0: "no_0 (ctes_of s)" - and chain_0: "mdb_chain_0 (ctes_of s)" - and badges: "valid_badges (ctes_of s)" - and contained: "caps_contained' (ctes_of s)" - and chunked: "mdb_chunked (ctes_of s)" - and umdb: "untyped_mdb' (ctes_of s)" - and uinc: "untyped_inc' (ctes_of s)" - and nullcaps: "valid_nullcaps (ctes_of s)" - and ut_rev: "ut_revocable' (ctes_of s)" - and dist_z: "distinct_zombies (ctes_of s)" - and irq_ctrl: "irq_control (ctes_of s)" - and ioport_ctrl: "ioport_control (ctes_of s)" - and clinks: "class_links (ctes_of s)" - and rep_r_fb: "reply_masters_rvk_fb (ctes_of s)" - and idle: "valid_idle' s" - and refs: "valid_global_refs' s" - and arch: "valid_arch_state' s" - and virq: "valid_irq_node' (irq_node' s) s" - and virqh: "valid_irq_handlers' s" - and vioports: "valid_ioports' s" - and virqs: "valid_irq_states' s" - and no_0_objs: "no_0_obj' s" - and ctnotinQ: "ct_not_inQ s" - and irqs_masked: "irqs_masked' s" - and ctcd: "ct_idle_or_in_cur_domain' s" - and cdm: "ksCurDomain s \ maxDomain" - and vds: "valid_dom_schedule' s" +lemma valid_objs: "valid_objs' s'" + and pa: "pspace_aligned' s'" + and pc: "pspace_canonical' s'" + and pkm: "pspace_in_kernel_mappings' s'" + and pd: "pspace_distinct' s'" + and vbm: "valid_bitmaps s'" + and sym_sched: "sym_heap_sched_pointers s'" + and vsp: "valid_sched_pointers s'" + and sym_refs: "sym_refs (state_refs_of' s')" + and iflive: "if_live_then_nonz_cap' s'" + and ifunsafe: "if_unsafe_then_cap' s'" + and dlist: "valid_dlist (ctes_of s')" + and no_0: "no_0 (ctes_of s')" + and chain_0: "mdb_chain_0 (ctes_of s')" + and badges: "valid_badges (ctes_of s')" + and contained: "caps_contained' (ctes_of s')" + and chunked: "mdb_chunked (ctes_of s')" + and umdb: "untyped_mdb' (ctes_of s')" + and uinc: "untyped_inc' (ctes_of s')" + and nullcaps: "valid_nullcaps (ctes_of s')" + and ut_rev: "ut_revocable' (ctes_of s')" + and dist_z: "distinct_zombies (ctes_of s')" + and irq_ctrl: "irq_control (ctes_of s')" + and ioport_ctrl: "ioport_control (ctes_of s')" + and clinks: "class_links (ctes_of s')" + and rep_r_fb: "reply_masters_rvk_fb (ctes_of s')" + and idle: "valid_idle' s'" + and refs: "valid_global_refs' s'" + and arch: "valid_arch_state' s'" + and virq: "valid_irq_node' (irq_node' s') s'" + and virqh: "valid_irq_handlers' s'" + and vioports: "valid_ioports' s'" + and virqs: "valid_irq_states' s'" + and no_0_objs: "no_0_obj' s'" + and ctnotinQ: "ct_not_inQ s'" + and irqs_masked: "irqs_masked' s'" + and ctcd: "ct_idle_or_in_cur_domain' s'" + and cdm: "ksCurDomain s' \ maxDomain" + and vds: "valid_dom_schedule' s'" using invs - by (auto simp add: invs'_def valid_state'_def valid_pspace'_def - valid_mdb'_def valid_mdb_ctes_def) + by (auto simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) abbreviation "base_bits \ {base .. base + (2 ^ bits - 1)}" -abbreviation - "state' \ (s \ ksPSpace := \x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s x \)" +abbreviation pspace' :: pspace where + "pspace' \ \x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s' x" + +abbreviation state' :: kernel_state where + "state' \ (s' \ ksPSpace := pspace' \)" lemma ko_wp_at'[simp]: - "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s \ p \ base_bits)" + "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s' \ p \ base_bits)" by (fastforce simp add: ko_wp_at_delete'[OF pd]) lemma obj_at'[simp]: - "\P p. (obj_at' P p state') = (obj_at' P p s \ p \ base_bits)" + "\P p. (obj_at' P p state') = (obj_at' P p s' \ p \ base_bits)" by (fastforce simp add: obj_at'_real_def) lemma typ_at'[simp]: - "\T p. (typ_at' P p state') = (typ_at' P p s \ p \ base_bits)" + "typ_at' P p state' = (typ_at' P p s' \ p \ base_bits)" by (simp add: typ_at'_def) lemma valid_untyped[simp]: - "s \' UntypedCap d base bits idx" + "s' \' UntypedCap d base bits idx" using cte_wp_at_valid_objs_valid_cap' [OF cap valid_objs] by clarsimp lemma cte_wp_at'[simp]: - "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s \ p \ base_bits)" + "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s' \ p \ base_bits)" by (fastforce simp:cte_wp_at_delete'[where idx = idx,OF valid_untyped pd ]) (* the bits of caps they need for validity argument are within their capRanges *) lemma valid_cap_ctes_pre: - "\c. s \' c \ case c of CNodeCap ref bits g gs + "\c. s' \' c \ case c of CNodeCap ref bits g gs \ \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c | Zombie ref (ZombieCNode bits) n \ \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c @@ -850,13 +717,13 @@ lemma valid_cap_ctes_pre: done lemma replycap_argument: - "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s + "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s' \ t \ {base .. base + (2 ^ bits - 1)}" using safe by (fastforce simp add: deletionIsSafe_def cte_wp_at_ctes_of field_simps) lemma valid_cap': - "\p c. \ s \' c; cte_wp_at' (\cte. cteCap cte = c) p s; + "\p c. \ s' \' c; cte_wp_at' (\cte. cteCap cte = c) p s'; capRange c \ {base .. base + (2 ^ bits - 1)} = {} \ \ state' \' c" apply (subgoal_tac "capClass c = PhysicalClass \ capUntypedPtr c \ capRange c") apply (subgoal_tac "capClass c = PhysicalClass \ @@ -903,11 +770,11 @@ lemma valid_cap': done lemma objRefs_notrange: - assumes asms: "ctes_of s p = Some c" "\ isUntypedCap (cteCap c)" + assumes asms: "ctes_of s' p = Some c" "\ isUntypedCap (cteCap c)" shows "capRange (cteCap c) \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -928,11 +795,11 @@ proof - qed lemma ctes_of_valid [elim!]: - "ctes_of s p = Some cte \ s \' cteCap cte" + "ctes_of s' p = Some cte \ s' \' cteCap cte" by (case_tac cte, simp add: ctes_of_valid_cap' [OF _ valid_objs]) lemma valid_cap2: - "\ cte_wp_at' (\cte. cteCap cte = c) p s \ \ state' \' c" + "\ cte_wp_at' (\cte. cteCap cte = c) p s' \ \ state' \' c" apply (case_tac "isUntypedCap c") apply (drule cte_wp_at_valid_objs_valid_cap' [OF _ valid_objs]) apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def) @@ -942,7 +809,7 @@ lemma valid_cap2: done lemma ex_nonz_cap_notRange: - "ex_nonz_cap_to' p s \ p \ base_bits" + "ex_nonz_cap_to' p s' \ p \ base_bits" apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) apply (case_tac "isUntypedCap (cteCap cte)") apply (clarsimp simp: isCap_simps) @@ -954,22 +821,280 @@ lemma ex_nonz_cap_notRange: done lemma live_notRange: - "\ ko_wp_at' P p s; \ko. P ko \ live' ko \ \ p \ base_bits" + "\ ko_wp_at' P p s'; \ko. P ko \ live' ko \ \ p \ base_bits" apply (drule if_live_then_nonz_capE' [OF iflive ko_wp_at'_weakenE]) apply simp apply (erule ex_nonz_cap_notRange) done +lemma deletionIsSafe_delete_locale_holds: + "deletionIsSafe_delete_locale base bits s'" + by (fastforce dest: live_notRange simp: deletionIsSafe_delete_locale_def field_simps) + lemma refs_notRange: - "(x, tp) \ state_refs_of' s y \ y \ base_bits" + "(x, tp) \ state_refs_of' s' y \ y \ base_bits" apply (drule state_refs_of'_elemD) apply (erule live_notRange) apply (rule refs_of_live') apply clarsimp done +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) +(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) +(* FIXME: move *) +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r P Q x x'" + shows "corres r (P \ machine_state) (Q \ ksMachineState) + (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad3 + [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma ekheap_relation_detype: + "ekheap_relation ekh kh \ + ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" + by (fastforce simp add: ekheap_relation_def split: if_split_asm) + +lemma cap_table_at_gsCNodes_eq: + "(s, s') \ state_relation + \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply (drule_tac x = ptr in spec)+ + apply (drule_tac x = bits in spec)+ + apply fastforce + done + +lemma cNodeNoPartialOverlap: + "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) + (\x. base \ x \ x \ base + 2 ^ magnitude - 1)) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodePartialOverlap_def) + apply (drule(1) cte_wp_valid_cap) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table) + apply (frule(1) pspace_alignedD) + apply simp + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow) + apply (blast intro: order_trans) + apply (simp add: is_aligned_no_overflow power_overflow word_bits_def) + apply wp+ + done + +declare wrap_ext_det_ext_ext_def[simp] + +lemma sym_refs_hyp_refs_triv[simp]: "sym_refs (state_hyp_refs_of s)" + apply (clarsimp simp: state_hyp_refs_of_def sym_refs_def) + by (case_tac "kheap s x"; simp) + +crunches doMachineOp + for deletionIsSafe_delete_locale[wp]: "deletionIsSafe_delete_locale base magnitude" + (simp: deletionIsSafe_delete_locale_def) + +lemma detype_tcbSchedNexts_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedNext) + = tcbSchedNexts_of s'" + using pspace_alignedD' pspace_distinctD' + supply projectKOs[simp] + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_tcbSchedPrevs_of: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of' |> tcbSchedPrev) + = tcbSchedPrevs_of s'" + using pspace_alignedD' pspace_distinctD' + supply projectKOs[simp] + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: ko_wp_at'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_inQ: + "\pspace_aligned' s'; pspace_distinct' s'; \p. p \ S \ \ ko_wp_at' live' p s'\ + \ \d p. (inQ d p |< ((\x. if x \ S then None else ksPSpace s' x) |> tcb_of')) + = (inQ d p |< tcbs_of' s')" + using pspace_alignedD' pspace_distinctD' + supply projectKOs[simp] + apply (clarsimp simp: opt_map_def) + apply (rule ext) + apply (rename_tac s) + apply (clarsimp simp: inQ_def opt_pred_def ko_wp_at'_def split: option.splits) + apply (drule_tac x=s in spec) + apply force + done + +lemma detype_ready_queues_relation: + "\pspace_aligned' s'; pspace_distinct' s'; + \p. p \ {lower..upper} \ \ ko_wp_at' live' p s'; + ready_queues_relation s s'; upper = upper'\ + \ ready_queues_relation_2 + (ready_queues (detype {lower..upper'} s)) + (ksReadyQueues s') + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedNext) + ((\x. if lower \ x \ x \ upper then None + else ksPSpace s' x) |> + tcb_of' |> + tcbSchedPrev) + (\d p. inQ d p |< ((\x. if lower \ x \ x \ upper then None else ksPSpace s' x) |> tcb_of'))" + apply (clarsimp simp: detype_ext_def ready_queues_relation_def Let_def) + apply (frule (1) detype_tcbSchedNexts_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_tcbSchedPrevs_of[where S="{lower..upper}"]; simp) + apply (frule (1) detype_inQ[where S="{lower..upper}"]; simp) + apply (fastforce simp add: detype_def detype_ext_def) + done + +lemma deleteObjects_corres: + "is_aligned base magnitude \ magnitude \ 3 \ + corres dc + (\s. einvs s + \ s \ (cap.UntypedCap d base magnitude idx) + \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) + \ untyped_children_in_mdb s \ if_unsafe_then_cap s + \ valid_mdb s \ valid_global_refs s \ ct_active s + \ schact_is_rct s) + (\s'. invs' s' + \ cte_wp_at' (\cte. cteCap cte = UntypedCap d base magnitude idx) ptr s' + \ descendants_range' (UntypedCap d base magnitude idx) ptr (ctes_of s') + \ ct_active' s' + \ s' \' (UntypedCap d base magnitude idx)) + (delete_objects base magnitude) (deleteObjects base magnitude)" + apply (simp add: deleteObjects_def2) + apply (rule corres_stateAssert_implied[where P'=\, simplified]) + prefer 2 + apply clarsimp + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (rule_tac ptr=ptr and idx=idx and d=d in delete_locale.deletionIsSafe_delete_locale_holds) + apply (clarsimp simp: delete_locale_def) + apply (intro conjI) + apply (fastforce simp: sch_act_simple_def schact_is_rct_def state_relation_def) + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and s=s + in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def detype_locale_def invs_valid_pspace)[1] + apply (simp add: valid_cap_simps) + apply (simp add: bind_assoc[symmetric] ksASIDMapSafe_def) + apply (simp add: delete_objects_def) + apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ + (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ + descendants_range (cap.UntypedCap d base magnitude idx) cref s) \ + s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ + valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ + zombies_final s \ sym_refs (state_refs_of s) \ + untyped_children_in_mdb s \ if_unsafe_then_cap s \ + valid_global_refs s" and + Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ + valid_pspace' s \ + deletionIsSafe_delete_locale base magnitude s" + in corres_underlying_split) + apply (rule corres_bind_return) + apply (rule corres_guard_imp[where r=dc]) + apply (rule corres_split[OF _ cNodeNoPartialOverlap]) + apply (rule corres_machine_op[OF corres_Id], simp+) + apply (rule no_fail_freeMemory, simp+) + apply (wp hoare_vcg_ex_lift)+ + apply auto[1] + apply (auto elim: is_aligned_weaken)[1] + apply (rule corres_modify) + apply (simp add: valid_pspace'_def) + apply (rule state_relation_null_filterE, assumption, + simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] + apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) + apply (intro exI, fastforce) + apply (rule ext, clarsimp simp add: null_filter_def) + apply (rule sym, rule ccontr, clarsimp) + apply (drule(4) cte_map_not_null_outside') + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply simp + apply (rule ext, clarsimp simp add: null_filter'_def + map_to_ctes_delete[simplified field_simps]) + apply (rule sym, rule ccontr, clarsimp) + apply (frule(2) pspace_relation_cte_wp_atI + [OF state_relation_pspace_relation]) + apply (elim exE) + apply (frule(4) cte_map_not_null_outside') + apply (rule cte_wp_at_weakenE, erule conjunct1) + apply (case_tac y, clarsimp) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def + valid_nullcaps_def) + apply clarsimp + apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, + erule cte_wp_at_weakenE[OF _ TrueI], assumption+) + apply simp + apply (rule detype_pspace_relation[simplified], + simp_all add: state_relation_pspace_relation valid_pspace_def)[1] + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: valid_cap_def, assumption) + apply (fastforce simp add: detype_def detype_ext_def intro!: ekheap_relation_detype) + apply (rule detype_ready_queues_relation; blast?) + apply (clarsimp simp: deletionIsSafe_delete_locale_def) + apply (frule state_relation_ready_queues_relation) + apply (simp add: ready_queues_relation_def Let_def) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap + detype_def) + apply (drule_tac t="gsUserPages s'" in sym) + apply (drule_tac t="gsCNodes s'" in sym) + apply (auto simp add: ups_of_heap_def cns_of_heap_def ext + split: option.splits kernel_object.splits)[1] + apply (simp add: valid_mdb_def) + apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | + simp add: invs_def valid_state_def valid_pspace_def + descendants_range_def | wp (once) hoare_drop_imps)+ + apply fastforce + done +end + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma live_idle_untyped_range': + "ko_wp_at' live' p s' \ p = idle_thread_ptr \ p \ base_bits" + apply (case_tac "ko_wp_at' live' p s'") + apply (drule if_live_then_nonz_capE'[OF iflive ko_wp_at'_weakenE]) + apply simp + apply (erule ex_nonz_cap_notRange) + apply clarsimp + apply (insert invs_valid_global'[OF invs] cap invs_valid_idle'[OF invs]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_global_refsD') + apply (clarsimp simp: valid_idle'_def) + using atLeastAtMost_iff apply (simp add: p_assoc_help mask_eq_exp_minus_1) + by fastforce + +lemma untyped_range_live_idle': + "p \ base_bits \ \ (ko_wp_at' live' p s' \ p = idle_thread_ptr)" + using live_idle_untyped_range' by blast lemma valid_obj': - "\ valid_obj' obj s; ko_wp_at' ((=) obj) p s \ \ valid_obj' obj state'" + "\ valid_obj' obj s'; ko_wp_at' ((=) obj) p s'; sym_heap_sched_pointers s' \ + \ valid_obj' obj state'" apply (case_tac obj, simp_all add: valid_obj'_def) apply (rename_tac endpoint) apply (case_tac endpoint, simp_all add: valid_ep'_def)[1] @@ -996,10 +1121,23 @@ lemma valid_obj': apply (erule(2) cte_wp_at_tcbI') apply fastforce apply simp - apply (rename_tac tcb) - apply (case_tac "tcbState tcb"; - clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def - dest!: refs_notRange split: option.splits) + apply (intro conjI) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; clarsimp simp: valid_tcb_state'_def dest!: refs_notRange) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; + clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def + dest!: refs_notRange split: option.splits) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac prev) + apply (cut_tac P=live' and p=prev in live_notRange; fastforce?) + apply (fastforce dest: sym_heapD2[where p'=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def projectKOs) + apply (clarsimp simp: none_top_bool_cases) + apply (rename_tac "next") + apply (cut_tac P=live' and p="next" in live_notRange; fastforce?) + apply (fastforce dest!: sym_heapD1[where p=p] + simp: opt_map_def ko_wp_at'_def obj_at'_def projectKOs) apply (clarsimp simp: valid_cte'_def) apply (rule_tac p=p in valid_cap2) apply (clarsimp simp: ko_wp_at'_def objBits_simps' cte_level_bits_def[symmetric]) @@ -1011,26 +1149,58 @@ lemma valid_obj': apply (case_tac asidpool, clarsimp simp: page_directory_at'_def) apply (rename_tac pte) apply (case_tac pte, simp_all add: valid_mapping'_def) - apply(rename_tac pde) + apply (rename_tac pde) apply (case_tac pde, simp_all add: valid_mapping'_def) - apply(rename_tac pdpte) + apply (rename_tac pdpte) apply (case_tac pdpte, simp_all add: valid_mapping'_def) - apply(rename_tac pml4e) + apply (rename_tac pml4e) apply (case_tac pml4e, simp_all add: valid_mapping'_def) done +lemma tcbSchedNexts_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedNext) = tcbSchedNexts_of s'" + supply projectKOs[simp] + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma tcbSchedPrevs_of_pspace': + "\pspace_aligned' s'; pspace_distinct' s'; pspace_distinct' state'\ + \ (pspace' |> tcb_of' |> tcbSchedPrev) = tcbSchedPrevs_of s'" + supply projectKOs[simp] + apply (rule ext) + apply (rename_tac p) + apply (case_tac "p \ base_bits") + apply (frule untyped_range_live_idle') + apply (clarsimp simp: opt_map_def) + apply (case_tac "ksPSpace s' p"; clarsimp) + apply (rename_tac obj) + apply (case_tac "tcb_of' obj"; clarsimp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (fastforce simp: pspace_alignedD' pspace_distinctD') + apply (clarsimp simp: opt_map_def split: option.splits) + done + lemma st_tcb: - "\P p. \ st_tcb_at' P p s; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" - by (fastforce simp: pred_tcb_at'_def obj_at'_real_def - projectKOs - dest: live_notRange) + "\P p. \ st_tcb_at' P p s'; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" + by (fastforce simp: pred_tcb_at'_def obj_at'_real_def projectKOs dest: live_notRange) lemma irq_nodes_global: "\irq :: 8 word. irq_node' s + (ucast irq) * 32 \ global_refs' s" (*2^cte_level_bits *) by (simp add: global_refs'_def mult.commute mult.left_commute) lemma global_refs: - "global_refs' s \ base_bits = {}" + "global_refs' s' \ base_bits = {}" using cap apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule valid_global_refsD' [OF _ refs]) @@ -1038,20 +1208,20 @@ lemma global_refs: done lemma global_refs2: - "global_refs' s \ (- base_bits)" + "global_refs' s' \ (- base_bits)" using global_refs by blast lemma irq_nodes_range: - "\irq :: 8 word. irq_node' s + (ucast irq) * 32 \ base_bits" + "\irq :: 8 word. irq_node' s' + (ucast irq) * 32 \ base_bits" using irq_nodes_global global_refs by blast lemma cte_refs_notRange: - assumes asms: "ctes_of s p = Some c" - shows "cte_refs' (cteCap c) (irq_node' s) \ base_bits = {}" + assumes asms: "ctes_of s' p = Some c" + shows "cte_refs' (cteCap c) (irq_node' s') \ base_bits = {}" proof - from cap obtain node - where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + where ctes_of: "ctes_of s' ptr = Some (CTE (UntypedCap d base bits idx) node)" apply (clarsimp simp: cte_wp_at_ctes_of) apply (case_tac cte, simp) done @@ -1080,7 +1250,7 @@ proof - qed lemma non_null_present: - "cte_wp_at' (\c. cteCap c \ NullCap) p s \ p \ base_bits" + "cte_wp_at' (\c. cteCap c \ NullCap) p s' \ p \ base_bits" apply (drule (1) if_unsafe_then_capD' [OF _ ifunsafe]) apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of dest!: cte_refs_notRange simp del: atLeastAtMost_iff) @@ -1088,7 +1258,7 @@ lemma non_null_present: done lemma cte_cap: - "ex_cte_cap_to' p s \ ex_cte_cap_to' p state'" + "ex_cte_cap_to' p s' \ ex_cte_cap_to' p state'" apply (clarsimp simp: ex_cte_cap_to'_def) apply (frule non_null_present [OF cte_wp_at_weakenE']) apply clarsimp @@ -1096,37 +1266,37 @@ lemma cte_cap: done lemma idle_notRange: - "\cref. \ cte_wp_at' (\c. ksIdleThread s \ capRange (cteCap c)) cref s - \ ksIdleThread s \ base_bits" + "\cref. \ cte_wp_at' (\c. ksIdleThread s' \ capRange (cteCap c)) cref s' + \ ksIdleThread s' \ base_bits" apply (insert cap) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule_tac x=ptr in allE, clarsimp simp: field_simps) done abbreviation - "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s x)" + "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + (2 ^ bits - 1) then None else ksPSpace s' x)" lemmas tree_to_ctes = map_to_ctes_delete [OF valid_untyped pd] lemma map_to_ctesE[elim!]: - "\ ctes' x = Some cte; \ ctes_of s x = Some cte; x \ base_bits \ \ P \ \ P" + "\ ctes' x = Some cte; \ ctes_of s' x = Some cte; x \ base_bits \ \ P \ \ P" by (clarsimp simp: tree_to_ctes split: if_split_asm) lemma not_nullMDBNode: - "\ ctes_of s x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" + "\ ctes_of s' x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" using nullcaps apply (cases cte) apply (simp add: valid_nullcaps_def) done -lemma mdb_src: "\ ctes_of s \ x \ y; y \ 0 \ \ x \ base_bits" +lemma mdb_src: "\ ctes_of s' \ x \ y; y \ 0 \ \ x \ base_bits" apply (rule non_null_present) apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of) apply (erule(1) not_nullMDBNode) apply (simp add: nullMDBNode_def nullPointer_def) done -lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ \ y \ base_bits" +lemma mdb_dest: "\ ctes_of s' \ x \ y; y \ 0 \ \ y \ base_bits" apply (case_tac "x = 0") apply (insert no_0, simp add: next_unfold')[1] apply (drule(1) vdlist_nextD0 [OF _ _ dlist]) @@ -1137,7 +1307,7 @@ lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ done lemma trancl_next[elim]: - "\ ctes_of s \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" + "\ ctes_of s' \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" apply (erule rev_mp, erule converse_trancl_induct) apply clarsimp apply (rule r_into_trancl) @@ -1155,14 +1325,14 @@ lemma trancl_next[elim]: done lemma mdb_parent_notrange: - "ctes_of s \ x \ y \ x \ base_bits \ y \ base_bits" + "ctes_of s' \ x \ y \ x \ base_bits \ y \ base_bits" apply (erule subtree.induct) apply (frule(1) mdb_src, drule(1) mdb_dest, simp) apply (drule(1) mdb_dest, simp) done lemma mdb_parent: - "ctes_of s \ x \ y \ ctes' \ x \ y" + "ctes_of s' \ x \ y \ ctes' \ x \ y" apply (erule subtree.induct) apply (frule(1) mdb_src, frule(1) mdb_dest) apply (rule subtree.direct_parent) @@ -1178,7 +1348,7 @@ lemma mdb_parent: done lemma trancl_next_rev: - "ctes' \ x \\<^sup>+ y \ ctes_of s \ x \\<^sup>+ y" + "ctes' \ x \\<^sup>+ y \ ctes_of s' \ x \\<^sup>+ y" apply (erule converse_trancl_induct) apply (rule r_into_trancl) apply (clarsimp simp: next_unfold') @@ -1188,7 +1358,7 @@ lemma trancl_next_rev: done lemma is_chunk[elim!]: - "is_chunk (ctes_of s) cap x y \ is_chunk ctes' cap x y" + "is_chunk (ctes_of s') cap x y \ is_chunk ctes' cap x y" apply (simp add: is_chunk_def) apply (erule allEI) apply (clarsimp dest!: trancl_next_rev) @@ -1233,17 +1403,18 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def show "pspace_in_kernel_mappings' ?s" using pkm by (simp add: pspace_in_kernel_mappings'_def dom_def) - show "pspace_distinct' ?s" using pd + show pspace_distinct'_state': "pspace_distinct' ?s" using pd by (clarsimp simp add: pspace_distinct'_def ps_clear_def dom_if_None Diff_Int_distrib) - show "valid_objs' ?s" using valid_objs + show "valid_objs' ?s" using valid_objs sym_sched apply (clarsimp simp: valid_objs'_def ran_def) apply (rule_tac p=a in valid_obj') - apply fastforce - apply (frule pspace_alignedD'[OF _ pa]) - apply (frule pspace_distinctD'[OF _ pd]) - apply (clarsimp simp: ko_wp_at'_def) + apply fastforce + apply (frule pspace_alignedD'[OF _ pa]) + apply (frule pspace_distinctD'[OF _ pd]) + apply (clarsimp simp: ko_wp_at'_def) + apply fastforce done from sym_refs show "sym_refs (state_refs_of' ?s)" @@ -1255,19 +1426,6 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (simp add: refs_notRange[simplified] state_refs_ko_wp_at_eq) done - from vq show "valid_queues ?s" - apply (clarsimp simp: valid_queues_def bitmapQ_defs) - apply (clarsimp simp: valid_queues_no_bitmap_def) - apply (drule spec, drule spec, drule conjunct1, drule(1) bspec) - apply (clarsimp simp: obj_at'_real_def) - apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) - apply (clarsimp simp: projectKOs inQ_def) - apply (clarsimp dest!: ex_nonz_cap_notRange) - done - - from vq' show "valid_queues' ?s" - by (simp add: valid_queues'_def) - show "if_live_then_nonz_cap' ?s" using iflive apply (clarsimp simp: if_live_then_nonz_cap'_def) apply (drule spec, drule(1) mp) @@ -1283,7 +1441,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def intro!: cte_cap) from idle_notRange refs - have "ksIdleThread s \ ?ran" + have "ksIdleThread s' \ ?ran" apply (simp add: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def) apply blast done @@ -1410,7 +1568,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def page_map_l4_at'_def) by fastforce - show "valid_irq_node' (irq_node' s) ?s" + show "valid_irq_node' (irq_node' s') ?s" using virq irq_nodes_range by (simp add: valid_irq_node'_def mult.commute mult.left_commute ucast_ucast_mask_8) @@ -1451,7 +1609,7 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def ball_ran_eq) from virqs - show "valid_irq_states' s" . + show "valid_irq_states' s'" . from no_0_objs show "no_0_obj' state'" @@ -1462,19 +1620,19 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def by (simp add: irqs_masked'_def) from sa_simp ct_act - show "sch_act_wf (ksSchedulerAction s) state'" + show "sch_act_wf (ksSchedulerAction s') state'" apply (simp add: sch_act_simple_def) - apply (case_tac "ksSchedulerAction s", simp_all add: ct_in_state'_def) + apply (case_tac "ksSchedulerAction s'", simp_all add: ct_in_state'_def) apply (fastforce dest!: st_tcb elim!: pred_tcb'_weakenE) done from invs - have "pspace_domain_valid s" by (simp add: invs'_def valid_state'_def) + have "pspace_domain_valid s'" by (simp add: invs'_def valid_state'_def) thus "pspace_domain_valid state'" by (simp add: pspace_domain_valid_def) from invs - have "valid_machine_state' s" by (simp add: invs'_def valid_state'_def) + have "valid_machine_state' s'" by (simp add: invs'_def valid_state'_def) thus "valid_machine_state' ?state''" apply (clarsimp simp: valid_machine_state'_def) apply (drule_tac x=p in spec) @@ -1529,12 +1687,11 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply (clarsimp dest!: ex_nonz_cap_notRange elim!: ko_wp_at'_weakenE) done - from cdm show "ksCurDomain s \ maxDomain" . + from cdm show "ksCurDomain s' \ maxDomain" . from invs - have urz: "untyped_ranges_zero' s" by (simp add: invs'_def valid_state'_def) - show "untyped_ranges_zero_inv (cteCaps_of state') - (gsUntypedZeroRanges s)" + have urz: "untyped_ranges_zero' s'" by (simp add: invs'_def valid_state'_def) + show "untyped_ranges_zero_inv (cteCaps_of state') (gsUntypedZeroRanges s')" apply (simp add: untyped_zero_ranges_cte_def urz[unfolded untyped_zero_ranges_cte_def, rule_format, symmetric]) apply (clarsimp simp: fun_eq_iff intro!: arg_cong[where f=Ex]) @@ -1544,17 +1701,31 @@ proof (simp add: invs'_def valid_state'_def valid_pspace'_def apply simp done + from vbm + show "valid_bitmaps state'" + by (simp add: valid_bitmaps_def bitmapQ_defs) + + from sym_sched + show "sym_heap (pspace' |> tcb_of' |> tcbSchedNext) (pspace' |> tcb_of' |> tcbSchedPrev)" + using pa pd pspace_distinct'_state' + by (fastforce simp: tcbSchedNexts_of_pspace' tcbSchedPrevs_of_pspace') + + from vsp show "valid_sched_pointers_2 (pspace' |> tcb_of' |> tcbSchedPrev) + (pspace' |> tcb_of' |> tcbSchedNext) + (tcbQueued |< (pspace' |> tcb_of'))" + by (clarsimp simp: valid_sched_pointers_def opt_pred_def opt_map_def) + qed (clarsimp) lemma (in delete_locale) delete_ko_wp_at': - assumes objs: "ko_wp_at' P p s \ ex_nonz_cap_to' p s" + assumes objs: "ko_wp_at' P p s' \ ex_nonz_cap_to' p s'" shows "ko_wp_at' P p state'" using objs by (clarsimp simp: ko_wp_at'_def ps_clear_def dom_if_None Diff_Int_distrib dest!: ex_nonz_cap_notRange) lemma (in delete_locale) null_filter': - assumes descs: "Q (null_filter' (ctes_of s))" + assumes descs: "Q (null_filter' (ctes_of s'))" shows "Q (null_filter' (ctes_of state'))" using descs ifunsafe apply (clarsimp elim!: rsubst[where P=Q]) @@ -1572,7 +1743,7 @@ lemma (in delete_locale) null_filter': done lemma (in delete_locale) delete_ex_cte_cap_to': - assumes exc: "ex_cte_cap_to' p s" + assumes exc: "ex_cte_cap_to' p s'" shows "ex_cte_cap_to' p state'" using exc by (clarsimp elim!: cte_cap) @@ -2002,35 +2173,19 @@ lemma cte_wp_at_top: apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (clarsimp simp:cte_check_def) apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (simp add:typeError_def fail_def cte_check_def split:Structures_H.kernel_object.splits)+ done -lemma neq_out_intv: - "\a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" - by simp - -lemma rule_out_intv: - "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a\b \ - \ b \ {a..a + 2 ^ objBitsKO obj - 1}" - apply (drule(1) pspace_distinctD') - apply (subst (asm) ps_clear_def) - apply (drule_tac x = b in orthD2) - apply fastforce - apply (drule neq_out_intv) - apply simp - apply simp - done - lemma locateCTE_monad: assumes ko_wp_at: "\Q dest. \\s. P1 s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f @@ -2106,8 +2261,8 @@ proof - apply (drule base_member_set[OF pspace_alignedD']) apply simp apply (simp add:objBitsKO_bounded2[unfolded word_bits_def,simplified]) - apply (clarsimp simp:field_simps) - apply blast + apply (clarsimp simp: field_simps) + apply (elim disjE; fastforce simp: mask_def p_assoc_help) done assume "{(ptr, s)} = fst (locateCTE src s)" @@ -2122,7 +2277,7 @@ qed lemma empty_fail_locateCTE: "empty_fail (locateCTE src)" - by (simp add:locateCTE_def bind_assoc split_def) + by (fastforce simp: locateCTE_def bind_assoc split_def) lemma fail_empty_locateCTE: "snd (locateCTE src s) \ fst (locateCTE src s) = {}" @@ -2659,7 +2814,7 @@ lemma storePDE_det: "ko_wp_at' ((=) (KOArch (KOPDE pde))) ptr s \ storePDE ptr (new_pde::X64_H.pde) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPDE new_pde)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPDE new_pde)))) s" apply (clarsimp simp:ko_wp_at'_def storePDE_def split_def bind_def gets_def return_def get_def setObject_def @@ -2812,7 +2967,7 @@ lemma cte_wp_at_modify_pde: atLeastAtMost_iff shows "\ksPSpace s ptr' = Some (KOArch (KOPDE pde)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPDE pde')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPDE pde')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) @@ -3022,7 +3177,7 @@ lemma storePML4E_det: "ko_wp_at' ((=) (KOArch (KOPML4E pml4e))) ptr s \ storePML4E ptr (new_pml4e::X64_H.pml4e) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPML4E new_pml4e)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPML4E new_pml4e)))) s" apply (clarsimp simp:ko_wp_at'_def storePML4E_def split_def bind_def gets_def return_def get_def setObject_def @@ -3224,7 +3379,7 @@ lemma cte_wp_at_modify_pml4e: atLeastAtMost_iff shows "\ksPSpace s ptr' = Some (KOArch (KOPML4E pml4e)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPML4E pml4e')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPML4E pml4e')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) @@ -3282,7 +3437,7 @@ lemma storePML4E_setCTE_commute: apply (subst modify_specify) apply (rule modify_obj_commute') apply (rule commute_commute[OF locateCTE_commute]) - apply (wp locateCTE_cte_no_fail non_fail_modify + apply (wp locateCTE_cte_no_fail no_fail_modify modify_pml4e_pspace_distinct' modify_pml4e_pspace_aligned'| subst modify_specify)+ apply (clarsimp simp:simpler_modify_def valid_def typ_at'_def) @@ -3553,7 +3708,7 @@ lemma placeNewObject_tcb_at': placeNewObject ptr (makeObject::tcb) 0 \\_ s. tcb_at' ptr s \" apply (simp add: placeNewObject_def placeNewObject'_def split_def) - apply (wp hoare_unless_wp |wpc | simp add:alignError_def)+ + apply (wp unless_wp |wpc | simp add:alignError_def)+ by (auto simp: obj_at'_def is_aligned_mask lookupAround2_None1 lookupAround2_char1 field_simps objBits_simps projectKO_opt_tcb projectKO_def return_def ps_clear_def @@ -4001,7 +4156,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp | wpc + apply (wp haskell_assert_wp unless_wp | wpc | simp add:alignError_def if_apply_def2 del: fun_upd_apply hoare_fail_any)+ apply (rule impI) apply (subgoal_tac @@ -4547,7 +4702,7 @@ lemma createObjects_Cons: apply simp apply (wp haskell_assert_wp | wpc)+ apply simp - apply (wp hoare_unless_wp |clarsimp)+ + apply (wp unless_wp |clarsimp)+ apply (drule range_cover.aligned) apply (simp add:is_aligned_mask) done @@ -4831,7 +4986,7 @@ proof - apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) apply simp+ apply (simp add:word_le_sub1) - apply (wp haskell_assert_wp hoare_unless_wp |wpc + apply (wp haskell_assert_wp unless_wp |wpc |simp add:alignError_def del:fun_upd_apply)+ apply (rule conjI) apply (rule impI) @@ -4893,7 +5048,7 @@ lemma createTCBs_tcb_at': \\rv s. (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)\" apply (simp add:createObjects'_def split_def alignError_def) - apply (wp hoare_unless_wp |wpc)+ + apply (wp unless_wp |wpc)+ apply (subst data_map_insert_def[symmetric])+ apply clarsimp apply (subgoal_tac "(\x\of_nat n. @@ -4906,7 +5061,6 @@ lemma createTCBs_tcb_at': apply simp apply simp apply (clarsimp simp: retype_obj_at_disj') - apply (clarsimp simp: projectKO_opt_tcb) apply (clarsimp simp: new_cap_addrs_def image_def) apply (drule_tac x = "unat x" in bspec) apply (simp add:objBits_simps' shiftl_t2n) @@ -5704,7 +5858,7 @@ lemma createObject_pspace_aligned_distinct': createObject ty ptr us d \\xa s. pspace_aligned' s \ pspace_distinct' s\" apply (rule hoare_pre) - apply (wp placeNewObject_pspace_aligned' hoare_unless_wp + apply (wp placeNewObject_pspace_aligned' unless_wp placeNewObject_pspace_distinct' | simp add:X64_H.createObject_def Retype_H.createObject_def objBits_simps diff --git a/proof/refine/X64/EmptyFail.thy b/proof/refine/X64/EmptyFail.thy index 6a0f0f136f..7c62a3f9ce 100644 --- a/proof/refine/X64/EmptyFail.thy +++ b/proof/refine/X64/EmptyFail.thy @@ -19,12 +19,12 @@ lemma empty_fail_projectKO [simp, intro!]: lemma empty_fail_alignCheck [intro!, wp, simp]: "empty_fail (alignCheck a b)" unfolding alignCheck_def - by (simp add: alignError_def) + by (fastforce simp: alignError_def) lemma empty_fail_magnitudeCheck [intro!, wp, simp]: "empty_fail (magnitudeCheck a b c)" unfolding magnitudeCheck_def - by (simp split: option.splits) + by (fastforce split: option.splits) lemma empty_fail_loadObject_default [intro!, wp, simp]: shows "empty_fail (loadObject_default x b c d)" @@ -33,7 +33,7 @@ lemma empty_fail_loadObject_default [intro!, wp, simp]: lemma empty_fail_threadGet [intro!, wp, simp]: "empty_fail (threadGet f p)" - by (simp add: threadGet_def getObject_def split_def) + by (fastforce simp: threadGet_def getObject_def split_def) lemma empty_fail_getCTE [intro!, wp, simp]: "empty_fail (getCTE slot)" @@ -47,12 +47,12 @@ lemma empty_fail_getCTE [intro!, wp, simp]: lemma empty_fail_updateObject_cte [intro!, wp, simp]: "empty_fail (updateObject (v :: cte) ko a b c)" - by (simp add: updateObject_cte typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_cte typeError_def unless_def split: kernel_object.splits ) lemma empty_fail_setCTE [intro!, wp, simp]: "empty_fail (setCTE p cte)" unfolding setCTE_def - by (simp add: setObject_def split_def) + by (fastforce simp: setObject_def split_def) lemma empty_fail_updateCap [intro!, wp, simp]: "empty_fail (updateCap p f)" @@ -64,36 +64,35 @@ lemma empty_fail_updateMDB [intro!, wp, simp]: lemma empty_fail_getSlotCap [intro!, wp, simp]: "empty_fail (getSlotCap a)" - unfolding getSlotCap_def by simp + unfolding getSlotCap_def by fastforce context begin interpretation Arch . (*FIXME: arch_split*) lemma empty_fail_getObject: - assumes x: "(\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel))" + assumes "\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel)" shows "empty_fail (getObject x :: 'a :: pspace_storable kernel)" apply (simp add: getObject_def split_def) - apply (safe intro!: empty_fail_bind empty_fail_gets empty_fail_assert_opt) - apply (rule x) + apply (safe intro!: assms) done lemma empty_fail_updateTrackedFreeIndex [intro!, wp, simp]: shows "empty_fail (updateTrackedFreeIndex p idx)" - by (simp add: updateTrackedFreeIndex_def) + by (fastforce simp add: updateTrackedFreeIndex_def) lemma empty_fail_updateNewFreeIndex [intro!, wp, simp]: shows "empty_fail (updateNewFreeIndex p)" apply (simp add: updateNewFreeIndex_def) - apply (safe intro!: empty_fail_bind) + apply safe apply (simp split: capability.split) done lemma empty_fail_insertNewCap [intro!, wp, simp]: "empty_fail (insertNewCap p p' cap)" - unfolding insertNewCap_def by simp + unfolding insertNewCap_def by fastforce lemma empty_fail_getIRQSlot [intro!, wp, simp]: "empty_fail (getIRQSlot irq)" - by (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + by (fastforce simp: getIRQSlot_def getInterruptState_def locateSlot_conv) lemma empty_fail_getObject_ntfn [intro!, wp, simp]: "empty_fail (getObject p :: Structures_H.notification kernel)" @@ -107,15 +106,15 @@ lemma empty_fail_lookupIPCBuffer [intro!, wp, simp]: "empty_fail (lookupIPCBuffer a b)" by (clarsimp simp: lookupIPCBuffer_def Let_def getThreadBufferSlot_def locateSlot_conv - split: capability.splits arch_capability.splits | wp | wpc)+ + split: capability.splits arch_capability.splits | wp | wpc | safe)+ lemma empty_fail_updateObject_default [intro!, wp, simp]: "empty_fail (updateObject_default v ko a b c)" - by (simp add: updateObject_default_def typeError_def unless_def split: kernel_object.splits ) + by (fastforce simp: updateObject_default_def typeError_def unless_def split: kernel_object.splits) lemma empty_fail_threadSet [intro!, wp, simp]: "empty_fail (threadSet f p)" - by (simp add: threadSet_def getObject_def setObject_def split_def) + by (fastforce simp: threadSet_def getObject_def setObject_def split_def) lemma empty_fail_getThreadState[iff]: "empty_fail (getThreadState t)" diff --git a/proof/refine/X64/EmptyFail_H.thy b/proof/refine/X64/EmptyFail_H.thy index f8aa4a890f..5bf5b76656 100644 --- a/proof/refine/X64/EmptyFail_H.thy +++ b/proof/refine/X64/EmptyFail_H.thy @@ -17,19 +17,19 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemmas forM_empty_fail[intro!, wp, simp] = empty_fail_mapM[simplified forM_def[symmetric]] lemmas forM_x_empty_fail[intro!, wp, simp] = empty_fail_mapM_x[simplified forM_x_def[symmetric]] -lemmas forME_x_empty_fail[intro!, wp, simp] = mapME_x_empty_fail[simplified forME_x_def[symmetric]] +lemmas forME_x_empty_fail[intro!, wp, simp] = empty_fail_mapME_x[simplified forME_x_def[symmetric]] lemma withoutPreemption_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (withoutPreemption m)" - by (simp add: withoutPreemption_def) + by simp lemma withoutFailure_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (withoutFailure m)" - by (simp add: withoutFailure_def) + by simp lemma catchFailure_empty_fail[intro!, wp, simp]: "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchFailure f g)" - by (simp add: catchFailure_def empty_fail_catch) + by (simp add: empty_fail_catch) lemma emptyOnFailure_empty_fail[intro!, wp, simp]: "empty_fail m \ empty_fail (emptyOnFailure m)" @@ -86,9 +86,6 @@ proof (induct arbitrary: s rule: resolveAddressBits.induct) lemmas resolveAddressBits_empty_fail[intro!, wp, simp] = resolveAddressBits_spec_empty_fail[THEN use_spec_empty_fail] -crunch (empty_fail) empty_fail[intro!, wp, simp]: lookupIPCBuffer -(simp:Let_def) - declare ef_dmo'[intro!, wp, simp] lemma empty_fail_getObject_ep [intro!, wp, simp]: @@ -172,14 +169,14 @@ crunch (empty_fail) empty_fail[intro!, wp, simp]: setBoundNotification, setNotif crunch (empty_fail) empty_fail[intro!, wp, simp]: cancelIPC, setThreadState, tcbSchedDequeue, setupReplyMaster, isStopped, possibleSwitchTo, tcbSchedAppend -(simp: Let_def) + (simp: Let_def wp: empty_fail_getObject) crunch (empty_fail) "_H_empty_fail"[intro!, wp, simp]: "ThreadDecls_H.suspend" (ignore_del: ThreadDecls_H.suspend) lemma ThreadDecls_H_restart_empty_fail[intro!, wp, simp]: "empty_fail (ThreadDecls_H.restart target)" - by (simp add:restart_def) + by (fastforce simp: restart_def) crunch (empty_fail) empty_fail[intro!, wp, simp]: finaliseCap, preemptionPoint, capSwapForDelete (wp: empty_fail_catch simp: Let_def) @@ -217,18 +214,14 @@ lemmas finaliseSlot_empty_fail[intro!, wp, simp] = lemma checkCapAt_empty_fail[intro!, wp, simp]: "empty_fail action \ empty_fail (checkCapAt cap ptr action)" - by (simp add: checkCapAt_def) + by (fastforce simp: checkCapAt_def) lemma assertDerived_empty_fail[intro!, wp, simp]: "empty_fail f \ empty_fail (assertDerived src cap f)" - by (simp add: assertDerived_def) + by (fastforce simp: assertDerived_def) crunch (empty_fail) empty_fail[intro!, wp, simp]: cteDelete -lemma liftE_empty_fail[intro!, wp, simp]: - "empty_fail f \ empty_fail (liftE f)" - by simp - lemma spec_empty_fail_unlessE': "\ \ P \ spec_empty_fail f s \ \ spec_empty_fail (unlessE P f) s" by (simp add:unlessE_def spec_empty_returnOk) @@ -258,7 +251,7 @@ lemma Syscall_H_syscall_empty_fail[intro!, wp, simp]: lemma catchError_empty_fail[intro!, wp, simp]: "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchError f g)" - by (simp add: catchError_def handle_empty_fail) + by fastforce crunch (empty_fail) empty_fail[intro!, wp, simp]: chooseThread, getDomainTime, nextDomain, isHighestPrio @@ -275,18 +268,18 @@ crunch (empty_fail) empty_fail[wp, simp]: setMRs, setMessageInfo lemma empty_fail_portIn[intro!, wp, simp]: "empty_fail a \ empty_fail (portIn a)" - by (simp add: portIn_def) + by (fastforce simp: portIn_def) lemma empty_fail_portOut[intro!, wp, simp]: "empty_fail (w a) \ empty_fail (portOut w a)" - by (simp add: portOut_def) + by (fastforce simp: portOut_def) crunch (empty_fail) empty_fail: callKernel (wp: empty_fail_catch) theorem call_kernel_serial: "\ (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and - (\s. scheduler_action s = resume_cur_thread) and + schact_is_rct and (\s. 0 < domain_time s \ valid_domain_list s)) s; \s'. (s, s') \ state_relation \ (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and diff --git a/proof/refine/X64/Finalise_R.thy b/proof/refine/X64/Finalise_R.thy index 03ca276fd5..79bccd1765 100644 --- a/proof/refine/X64/Finalise_R.thy +++ b/proof/refine/X64/Finalise_R.thy @@ -80,20 +80,10 @@ crunch ksRQL1[wp]: emptySlot "\s. P (ksReadyQueuesL1Bitmap s)" crunch ksRQL2[wp]: emptySlot "\s. P (ksReadyQueuesL2Bitmap s)" crunch obj_at'[wp]: postCapDeletion "obj_at' P p" -lemmas postCapDeletion_valid_queues[wp] = - valid_queues_lift [OF postCapDeletion_obj_at' - postCapDeletion_pred_tcb_at' - postCapDeletion_ksRQ] - crunch inQ[wp]: clearUntypedFreeIndex "\s. P (obj_at' (inQ d p) t s)" crunch tcbDomain[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbDomain tcb)) t" crunch tcbPriority[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbPriority tcb)) t" -lemma emptySlot_queues [wp]: - "\Invariants_H.valid_queues\ emptySlot sl opt \\rv. Invariants_H.valid_queues\" - unfolding emptySlot_def - by (wp | wpcw | wp valid_queues_lift | simp)+ - crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" crunch ksCurDomain[wp]: emptySlot "\s. P (ksCurDomain s)" @@ -1179,8 +1169,7 @@ definition "removeable' sl \ \s cap. (\p. p \ sl \ cte_wp_at' (\cte. capMasterCap (cteCap cte) = capMasterCap cap) p s) \ ((\p \ cte_refs' cap (irq_node' s). p \ sl \ cte_wp_at' (\cte. cteCap cte = NullCap) p s) - \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s) - \ (\t \ threadCapRefs cap. \p. t \ set (ksReadyQueues s p)))" + \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s))" lemma not_Final_removeable: "\ isFinal cap sl (cteCaps_of s) @@ -1296,7 +1285,7 @@ crunches deletedIRQHandler, getSlotCap, clearUntypedFreeIndex, updateMDB, getCTE end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1484,11 +1473,6 @@ crunch irq_states' [wp]: emptySlot valid_irq_states' crunch no_0_obj' [wp]: emptySlot no_0_obj' (wp: crunch_wps) -crunch valid_queues'[wp]: setInterruptState "valid_queues'" - (simp: valid_queues'_def) - -crunch valid_queues'[wp]: emptySlot "valid_queues'" - end lemma deletedIRQHandler_irqs_masked'[wp]: @@ -1561,7 +1545,7 @@ lemma emptySlot_untyped_ranges[wp]: emptySlot sl opt \\rv. untyped_ranges_zero'\" apply (simp add: emptySlot_def case_Null_If) apply (rule hoare_pre) - apply (rule hoare_seq_ext) + apply (rule bind_wp) apply (rule untyped_ranges_zero_lift) apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of | wpc | simp add: clearUntypedFreeIndex_def updateTrackedFreeIndex_def @@ -1594,6 +1578,13 @@ lemma emptySlot_valid_arch'[wp]: by (wpsimp simp: emptySlot_def cte_wp_at_ctes_of wp: getCTE_wp hoare_drop_imps hoare_vcg_ex_lift) +crunches emptySlot + for valid_bitmaps[wp]: valid_bitmaps + and tcbQueued_opt_pred[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and sched_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + (wp: valid_bitmaps_lift) + lemma emptySlot_invs'[wp]: "\\s. invs' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s \ (info \ NullCap \ post_cap_delete_pre' info sl (cteCaps_of s) )\ @@ -1646,8 +1637,8 @@ lemma arch_postCapDeletion_corres: lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1707,7 +1698,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2420,16 +2411,24 @@ lemma tcb_st_not_Bound: "(p, TCBBound) \ tcb_st_refs_of' ts" by (auto simp: tcb_st_refs_of'_def split: Structures_H.thread_state.split) +crunches setBoundNotification + for valid_bitmaps[wp]: valid_bitmaps + and tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and tcbQueued[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: valid_bitmaps_lift) + lemma unbindNotification_invs[wp]: "\invs'\ unbindNotification tcb \\rv. invs'\" apply (simp add: unbindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (rule bind_wp[OF _ gbn_sp']) apply (case_tac ntfnPtr, clarsimp, wp, clarsimp) apply clarsimp - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift - irqs_masked_lift setBoundNotification_ct_not_inQ + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ sym_heap_sched_pointers_lift untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (rule conjI) apply (clarsimp elim!: obj_atE' @@ -2469,9 +2468,9 @@ lemma ntfn_bound_tcb_at': lemma unbindMaybeNotification_invs[wp]: "\invs'\ unbindMaybeNotification ntfnptr \\rv. invs'\" apply (simp add: unbindMaybeNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sym_heap_sched_pointers_lift valid_irq_node_lift irqs_masked_lift setBoundNotification_ct_not_inQ untyped_ranges_zero_lift | wpc | clarsimp simp: cteCaps_of_def o_def)+ @@ -2545,11 +2544,12 @@ lemma deleteASID_invs'[wp]: apply (simp add: deleteASID_def cong: option.case_cong) apply (rule hoare_pre) apply (wp | wpc)+ - apply (rule_tac Q="\rv. valid_obj' (injectKO rv) and invs'" - in hoare_post_imp) + apply (rule_tac Q="\rv. valid_obj' (injectKO rv) and invs'" + in hoare_post_imp) + apply (rename_tac rv s) apply (clarsimp split: if_split_asm del: subsetI) apply (simp add: fun_upd_def[symmetric] valid_obj'_def) - apply (case_tac r, simp) + apply (case_tac rv, simp) apply (subst inv_f_f, rule inj_onI, simp)+ apply (rule conjI) apply clarsimp @@ -2621,11 +2621,11 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") apply (simp add: finaliseCapTrue_standin_simple_def) apply wp @@ -2651,7 +2651,6 @@ lemma cteDeleteOne_isFinal: lemmas setEndpoint_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF set_ep_ctes_of] lemmas setNotification_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF set_ntfn_ctes_of] -lemmas setQueue_cteCaps_of[wp] = ctes_of_cteCaps_of_lift [OF setQueue_ctes_of] lemmas threadSet_cteCaps_of = ctes_of_cteCaps_of_lift [OF threadSet_ctes_of] crunch isFinal: suspend, prepareThreadDelete "\s. isFinal cap slot (cteCaps_of s)" @@ -2735,16 +2734,6 @@ lemma unbindNotification_valid_objs'_helper': by (clarsimp simp: valid_bound_tcb'_def valid_ntfn'_def split: option.splits ntfn.splits) -lemma typ_at'_valid_tcb'_lift: - assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" - shows "\\s. valid_tcb' tcb s\ f \\rv s. valid_tcb' tcb s\" - including no_pre - apply (simp add: valid_tcb'_def) - apply (case_tac "tcbState tcb", simp_all add: valid_tcb_state'_def split_def valid_bound_ntfn'_def) - apply (wp hoare_vcg_const_Ball_lift typ_at_lifts[OF P] - | case_tac "tcbBoundNotification tcb", simp_all)+ - done - lemmas setNotification_valid_tcb' = typ_at'_valid_tcb'_lift [OF setNotification_typ_at'] lemma unbindNotification_valid_objs'[wp]: @@ -2766,7 +2755,7 @@ lemma unbindMaybeNotification_valid_objs'[wp]: unbindMaybeNotification t \\rv. valid_objs'\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift setNotification_valid_tcb' getNotification_wp @@ -2806,7 +2795,7 @@ lemma unbindMaybeNotification_obj_at'_bound: unbindMaybeNotification r \\_ s. obj_at' (\ntfn. ntfnBoundTCB ntfn = None) r s\" apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp obj_at_setObject2 | wpc @@ -2854,7 +2843,7 @@ lemma capDeleteOne_bound_tcb_at': lemma cancelIPC_bound_tcb_at'[wp]: "\bound_tcb_at' P tptr\ cancelIPC t \\rv. bound_tcb_at' P tptr\" apply (simp add: cancelIPC_def Let_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) + apply (rule bind_wp[OF _ gts_sp']) apply (case_tac "state", simp_all) defer 2 apply (rule hoare_pre) @@ -2879,9 +2868,7 @@ lemma unbindNotification_bound_tcb_at': done crunches unbindNotification, unbindMaybeNotification - for valid_queues[wp]: "Invariants_H.valid_queues" - and weak_sch_act_wf[wp]: "\s. weak_sch_act_wf (ksSchedulerAction s) s" - (wp: sbn_valid_queues) + for weak_sch_act_wf[wp]: "\s. weak_sch_act_wf (ksSchedulerAction s) s" lemma unbindNotification_tcb_at'[wp]: "\tcb_at' t'\ unbindNotification t \\rv. tcb_at' t'\" @@ -2920,10 +2907,42 @@ crunch valid_cap'[wp]: prepareThreadDelete "valid_cap' cap" crunch invs[wp]: prepareThreadDelete "invs'" (ignore: doMachineOp) crunch obj_at'[wp]: prepareThreadDelete "\s. P' (obj_at' P p s)" - (wp: hoare_whenE_wp simp: crunch_simps) + (wp: whenE_wp simp: crunch_simps) end +lemma tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\\s. \ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + tcbQueueRemove q t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + supply projectKOs[simp] + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + by (fastforce dest!: heap_ls_last_None + simp: list_queue_relation_def prev_queue_head_def queue_end_valid_def + obj_at'_def opt_map_def ps_clear_def objBits_simps + split: if_splits) + +lemma tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at': + "\valid_sched_pointers\ + tcbSchedDequeue t + \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding tcbSchedDequeue_def + supply projectKOs[simp] + by (wpsimp wp: tcbQueueRemove_tcbSchedNext_tcbSchedPrev_None_obj_at' threadGet_wp) + (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def + valid_sched_pointers_def opt_pred_def opt_map_def + split: option.splits) + +crunches updateRestartPC, cancelIPC + for valid_sched_pointers[wp]: valid_sched_pointers + (simp: crunch_simps wp: crunch_wps) + +lemma suspend_tcbSchedNext_tcbSchedPrev_None: + "\invs'\ suspend t \\_ s. obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) t s\" + unfolding suspend_def + by (wpsimp wp: hoare_drop_imps tcbSchedDequeue_tcbSchedNext_tcbSchedPrev_None_obj_at') + lemma (in delete_one_conc_pre) finaliseCap_replaceable: "\\s. invs' s \ cte_wp_at' (\cte. cteCap cte = cap) slot s \ (final_matters' cap \ (final = isFinal cap slot (cteCaps_of s))) @@ -2943,21 +2962,22 @@ lemma (in delete_one_conc_pre) finaliseCap_replaceable: \ (\p \ threadCapRefs cap. st_tcb_at' ((=) Inactive) p s \ obj_at' (Not \ tcbQueued) p s \ bound_tcb_at' ((=) None) p s - \ (\pr. p \ set (ksReadyQueues s pr))))\" + \ obj_at' (\tcb. tcbSchedNext tcb = None \ tcbSchedPrev tcb = None) p s))\" apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot cong: if_cong split del: if_split) apply (rule hoare_pre) apply (wp prepares_delete_helper'' [OF cancelAllIPC_unlive] prepares_delete_helper'' [OF cancelAllSignals_unlive] - suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_nonq + suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_inactive prepareThreadDelete_isFinal - suspend_makes_inactive suspend_nonq + suspend_makes_inactive deletingIRQHandler_removeable' deletingIRQHandler_final[where slot=slot ] unbindMaybeNotification_obj_at'_bound getNotification_wp suspend_bound_tcb_at' unbindNotification_bound_tcb_at' + suspend_tcbSchedNext_tcbSchedPrev_None | simp add: isZombie_Null isThreadCap_threadCapRefs_tcbptr isArchObjectCap_Cap_capCap | (rule hoare_strengthen_post [OF arch_finaliseCap_removeable[where slot=slot]], @@ -2993,7 +3013,7 @@ crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" lemma cancelIPC_cteCaps_of: "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap))) \ + P ((cteCaps_of s)(p \ NullCap))) \ P (cteCaps_of s)\ cancelIPC t \\rv s. P (cteCaps_of s)\" @@ -3024,7 +3044,9 @@ lemma cancelIPC_cte_wp_at': apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) done -crunch cte_wp_at'[wp]: tcbSchedDequeue "cte_wp_at' P p" +crunches tcbSchedDequeue + for cte_wp_at'[wp]: "cte_wp_at' P p" + (wp: crunch_wps) lemma suspend_cte_wp_at': assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" @@ -3122,7 +3144,7 @@ lemma cteDeleteOne_reply_pred_tcb_at: cteDeleteOne slot \\rv. pred_tcb_at' proj P t\" apply (simp add: cteDeleteOne_def unless_def isFinalCapability_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (clarsimp simp: cte_wp_at_ctes_of when_def isCap_simps Let_def finaliseCapTrue_standin_def) @@ -3143,32 +3165,13 @@ end lemma rescheduleRequired_sch_act_not[wp]: "\\\ rescheduleRequired \\rv. sch_act_not t\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp)+ + apply (wp hoare_TrueI | simp)+ done crunch sch_act_not[wp]: cteDeleteOne "sch_act_not t" (simp: crunch_simps case_Null_If unless_def wp: crunch_wps getObject_inv loadObject_default_inv) -lemma cancelAllIPC_mapM_x_valid_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. \t\set q. tcb_at' t s)\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. Invariants_H.valid_queues\" - apply (rule_tac R="\_ s. (\t\set q. tcb_at' t s) \ valid_objs' s" - in hoare_post_add) - apply (rule hoare_pre) - apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply (wp hoare_vcg_const_Ball_lift - tcbSchedEnqueue_valid_queues tcbSchedEnqueue_not_st - sts_valid_queues sts_st_tcb_at'_cases setThreadState_not_st - | simp - | ((elim conjE)?, drule (1) bspec, clarsimp elim!: obj_at'_weakenE simp: valid_tcb_state'_def))+ - done - lemma cancelAllIPC_mapM_x_weak_sch_act: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ mapM_x (\t. do @@ -3182,13 +3185,15 @@ lemma cancelAllIPC_mapM_x_weak_sch_act: done lemma cancelAllIPC_mapM_x_valid_objs': - "\valid_objs'\ + "\valid_objs' and pspace_aligned' and pspace_distinct'\ mapM_x (\t. do y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) q \\_. valid_objs'\" - apply (wp mapM_x_wp' sts_valid_objs') + apply (rule hoare_strengthen_post) + apply (rule mapM_x_wp') + apply (wpsimp wp: sts_valid_objs') apply (clarsimp simp: valid_tcb_state'_def)+ done @@ -3199,57 +3204,29 @@ lemma cancelAllIPC_mapM_x_tcbDomain_obj_at': tcbSchedEnqueue t od) q \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (wp mapM_x_wp' tcbSchedEnqueue_not_st setThreadState_oa_queued | simp)+ -done + by (wpsimp wp: mapM_x_wp' setThreadState_oa_queued) lemma rescheduleRequired_oa_queued': "\obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\ rescheduleRequired \\_. obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\" -apply (simp add: rescheduleRequired_def) -apply (wp tcbSchedEnqueue_not_st - | wpc - | simp)+ -done + by (wpsimp simp: rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def) lemma cancelAllIPC_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelAllIPC epptr \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (simp add: cancelAllIPC_def) -apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at' - getEndpoint_wp - | wpc - | simp)+ -done - -lemma cancelAllIPC_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllIPC ep_ptr - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ep_valid_objs' getEndpoint_wp) - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ep'_def valid_tcb'_def projectKOs - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done + unfolding cancelAllIPC_def + by (wpsimp wp: hoare_vcg_conj_lift hoare_vcg_const_Ball_lift getEndpoint_wp + rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at') lemma cancelAllSignals_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelAllSignals epptr \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (simp add: cancelAllSignals_def) -apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at' - getNotification_wp - | wpc - | simp)+ -done + unfolding cancelAllSignals_def + by (wpsimp wp: hoare_vcg_conj_lift hoare_vcg_const_Ball_lift getNotification_wp + rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at') lemma unbindMaybeNotification_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ @@ -3259,38 +3236,6 @@ lemma unbindMaybeNotification_tcbDomain_obj_at': apply (wp setBoundNotification_oa_queued getNotification_wp gbn_wp' | wpc | simp)+ done -lemma cancelAllSignals_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelAllSignals ntfn - \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift - cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act - set_ntfn_valid_objs' - | simp)+ - apply (clarsimp simp: valid_ep'_def) - apply (drule (1) ko_at_valid_objs') - apply (auto simp: valid_obj'_def valid_ntfn'_def valid_tcb'_def projectKOs - split: endpoint.splits - elim: valid_objs_valid_tcbE) - done - -lemma finaliseCapTrue_standin_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - finaliseCapTrue_standin cap final - \\_. Invariants_H.valid_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - - -crunch valid_queues[wp]: isFinalCapability "Invariants_H.valid_queues" - (simp: crunch_simps) - crunch sch_act[wp]: isFinalCapability "\s. sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) @@ -3298,96 +3243,6 @@ crunch weak_sch_act[wp]: isFinalCapability "\s. weak_sch_act_wf (ksSchedulerAction s) s" (simp: crunch_simps) -lemma cteDeleteOne_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl - \\_. Invariants_H.valid_queues\" (is "\?PRE\ _ \_\") - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (wp isFinalCapability_inv getCTE_wp | rule hoare_drop_imps | simp)+ - apply (clarsimp simp: cte_wp_at'_def) - done - -lemma valid_inQ_queues_lift: - assumes tat: "\d p tcb. \obj_at' (inQ d p) tcb\ f \\_. obj_at' (inQ d p) tcb\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\valid_inQ_queues\ f \\_. valid_inQ_queues\" - proof - - show ?thesis - apply (clarsimp simp: valid_def valid_inQ_queues_def) - apply safe - apply (rule use_valid [OF _ tat], assumption) - apply (drule spec, drule spec, erule conjE, erule bspec) - apply (rule ccontr) - apply (erule notE[rotated], erule(1) use_valid [OF _ prq]) - apply (erule use_valid [OF _ prq]) - apply simp - done - qed - -lemma emptySlot_valid_inQ_queues [wp]: - "\valid_inQ_queues\ emptySlot sl opt \\rv. valid_inQ_queues\" - unfolding emptySlot_def - by (wp opt_return_pres_lift | wpcw | wp valid_inQ_queues_lift | simp)+ - -crunch valid_inQ_queues[wp]: emptySlot valid_inQ_queues - (simp: crunch_simps) - -lemma cancelAllIPC_mapM_x_valid_inQ_queues: - "\valid_inQ_queues\ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv. valid_inQ_queues\" - apply (rule mapM_x_wp_inv) - apply (wp sts_valid_queues [where st="Structures_H.thread_state.Restart", simplified] - setThreadState_st_tcb) - done - -lemma cancelAllIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllIPC ep_ptr - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues) - apply (wp hoare_conjI hoare_drop_imp | simp)+ - done - -lemma cancelAllSignals_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cancelAllSignals ntfn - \\rv. valid_inQ_queues\" - apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) - apply (case_tac "ntfnObj ntfna", simp_all) - apply (wp, simp)+ - apply (wp cancelAllIPC_mapM_x_valid_inQ_queues)+ - apply (simp) - done - -crunches unbindNotification, unbindMaybeNotification - for valid_inQ_queues[wp]: "valid_inQ_queues" - -lemma finaliseCapTrue_standin_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - finaliseCapTrue_standin cap final - \\_. valid_inQ_queues\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp | clarsimp | wpc)+ - done - -crunch valid_inQ_queues[wp]: isFinalCapability valid_inQ_queues - (simp: crunch_simps) - -lemma cteDeleteOne_valid_inQ_queues[wp]: - "\valid_inQ_queues\ - cteDeleteOne sl - \\_. valid_inQ_queues\" - apply (simp add: cteDeleteOne_def unless_def) - apply (wpsimp wp: hoare_drop_imp hoare_vcg_all_lift) - done - crunch ksCurDomain[wp]: cteDeleteOne "\s. P (ksCurDomain s)" (wp: crunch_wps simp: crunch_simps unless_def) @@ -3433,7 +3288,7 @@ lemma cteDeleteOne_invs[wp]: subgoal by auto subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def projectKOs ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done @@ -3451,9 +3306,6 @@ lemma deletingIRQHandler_invs' [wp]: apply simp done -crunches unbindNotification, unbindMaybeNotification - for tcb_at'[wp]: "tcb_at' t" - lemma finaliseCap_invs: "\invs' and sch_act_simple and valid_cap' cap and cte_wp_at' (\cte. cteCap cte = cap) sl\ @@ -3658,7 +3510,7 @@ lemma unbindNotification_corres: apply (clarsimp elim!: obj_at_valid_objsE dest!: bound_tcb_at_state_refs_ofD invs_valid_objs simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def - valid_tcb_def valid_bound_ntfn_def + valid_tcb_def valid_bound_ntfn_def invs_distinct invs_psp_aligned split: option.splits) apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' simp: projectKOs valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def @@ -3683,8 +3535,8 @@ lemma unbindMaybeNotification_corres: apply (wp get_simple_ko_wp getNotification_wp)+ apply (clarsimp elim!: obj_at_valid_objsE dest!: bound_tcb_at_state_refs_ofD invs_valid_objs - simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def - valid_tcb_def valid_bound_ntfn_def valid_ntfn_def + simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def invs_psp_aligned + valid_tcb_def valid_bound_ntfn_def valid_ntfn_def invs_distinct split: option.splits) apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' simp: projectKOs valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def @@ -3819,45 +3671,15 @@ lemma finaliseCap_corres: context begin interpretation Arch . (*FIXME: arch_split*) -crunch queues[wp]: copyGlobalMappings "Invariants_H.valid_queues" - (wp: crunch_wps ignore: storePDE) - -crunch queues'[wp]: copyGlobalMappings "Invariants_H.valid_queues'" - (wp: crunch_wps ignore: storePDE) - -crunch ifunsafe'[wp]: copyGlobalMappings "if_unsafe_then_cap'" - (wp: crunch_wps ignore: storePDE) - -crunch pred_tcb_at'[wp]: copyGlobalMappings "pred_tcb_at' proj P t" - (wp: crunch_wps ignore: storePDE) - -crunch vms'[wp]: copyGlobalMappings "valid_machine_state'" - (wp: crunch_wps ignore: storePDE) - -crunch ct_not_inQ[wp]: copyGlobalMappings "ct_not_inQ" +crunches copyGlobalMappings + for ifunsafe'[wp]: "if_unsafe_then_cap'" + and pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + and vms'[wp]: "valid_machine_state'" + and ct_not_inQ[wp]: "ct_not_inQ" + and tcb_in_cur_domain'[wp]: "tcb_in_cur_domain' t" + and ct__in_cur_domain'[wp]: ct_idle_or_in_cur_domain' (wp: crunch_wps ignore: storePDE) -crunch tcb_in_cur_domain'[wp]: copyGlobalMappings "tcb_in_cur_domain' t" - (wp: crunch_wps) - -crunch ct__in_cur_domain'[wp]: copyGlobalMappings ct_idle_or_in_cur_domain' - (wp: crunch_wps) - -crunch gsUntypedZeroRanges[wp]: copyGlobalMappings "\s. P (gsUntypedZeroRanges s)" - (wp: crunch_wps) - -crunch gsMaxObjectSize[wp]: copyGlobalMappings "\s. P (gsMaxObjectSize s)" - (wp: crunch_wps) - -crunch it'[wp]: copyGlobalMappings "\s. P (ksIdleThread s)" - (wp: crunch_wps) - -crunch valid_irq_states'[wp]: copyGlobalMappings "valid_irq_states'" - (wp: crunch_wps) - -crunch ksDomScheduleIdx[wp]: copyGlobalMappings "\s. P (ksDomScheduleIdx s)" - (wp: crunch_wps) - lemma threadSet_ct_idle_or_in_cur_domain': "\ct_idle_or_in_cur_domain' and (\s. \tcb. tcbDomain tcb = ksCurDomain s \ tcbDomain (F tcb) = ksCurDomain s)\ threadSet F t @@ -3918,178 +3740,6 @@ lemmas cteCaps_of_ctes_of_lift = ctes_of_cteCaps_of_lift lemmas final_matters'_simps = final_matters'_def [split_simps capability.split arch_capability.split] -definition set_thread_all :: "obj_ref \ Structures_A.tcb \ etcb - \ unit det_ext_monad" where - "set_thread_all ptr tcb etcb \ - do s \ get; - kh \ return $ kheap s(ptr \ (TCB tcb)); - ekh \ return $ (ekheap s)(ptr \ etcb); - put (s\kheap := kh, ekheap := ekh\) - od" - -definition thread_gets_the_all :: "obj_ref \ (Structures_A.tcb \ etcb) det_ext_monad" where - "thread_gets_the_all tptr \ - do tcb \ gets_the $ get_tcb tptr; - etcb \ gets_the $ get_etcb tptr; - return $ (tcb, etcb) od" - -definition thread_set_all :: "(Structures_A.tcb \ Structures_A.tcb) \ (etcb \ etcb) - \ obj_ref \ unit det_ext_monad" where - "thread_set_all f g tptr \ - do (tcb, etcb) \ thread_gets_the_all tptr; - set_thread_all tptr (f tcb) (g etcb) - od" - -lemma set_thread_all_corres: - fixes ob' :: "'a :: pspace_storable" - assumes x: "updateObject ob' = updateObject_default ob'" - assumes z: "\s. obj_at' P ptr s - \ map_to_ctes ((ksPSpace s) (ptr \ injectKO ob')) = map_to_ctes (ksPSpace s)" - assumes b: "\ko. P ko \ objBits ko = objBits ob'" - assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" - assumes e: "etcb_relation etcb tcb'" - assumes is_t: "injectKO (ob' :: 'a :: pspace_storable) = KOTCB tcb'" - shows "other_obj_relation (TCB tcb) (injectKO (ob' :: 'a :: pspace_storable)) \ - corres dc (obj_at (same_caps (TCB tcb)) ptr and is_etcb_at ptr) - (obj_at' (P :: 'a \ bool) ptr) - (set_thread_all ptr tcb etcb) (setObject ptr ob')" - apply (rule corres_no_failI) - apply (rule no_fail_pre) - apply wp - apply (rule x) - apply (clarsimp simp: b elim!: obj_at'_weakenE) - apply (unfold set_thread_all_def setObject_def) - apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def - put_def return_def modify_def get_object_def x - projectKOs - updateObject_default_def in_magnitude_check [OF _ P]) - apply (clarsimp simp add: state_relation_def z) - apply (simp add: trans_state_update'[symmetric] trans_state_update[symmetric] - del: trans_state_update) - apply (clarsimp simp add: swp_def fun_upd_def obj_at_def is_etcb_at_def) - apply (subst cte_wp_at_after_update,fastforce simp add: obj_at_def) - apply (subst caps_of_state_after_update,fastforce simp add: obj_at_def) - apply clarsimp - apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) - apply (clarsimp simp add: ghost_relation_def) - apply (erule_tac x=ptr in allE)+ - apply (clarsimp simp: obj_at_def - split: Structures_A.kernel_object.splits if_split_asm) - - apply (fold fun_upd_def) - apply (simp only: pspace_relation_def dom_fun_upd2 simp_thms) - apply (subst pspace_dom_update) - apply assumption - apply simp - apply (simp only: dom_fun_upd2 simp_thms) - apply (elim conjE) - apply (frule bspec, erule domI) - apply (rule conjI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: is_other_obj_relation_type) - apply (drule(1) bspec) - apply clarsimp - apply (frule_tac ko'="TCB tcb'" and x'=ptr in obj_relation_cut_same_type, - (fastforce simp add: is_other_obj_relation_type)+)[1] - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (clarsimp simp: projectKOs) - apply (insert e is_t) - by (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits X64_A.arch_kernel_obj.splits) - -lemma tcb_update_all_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" - assumes r: "r () ()" - assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" - shows "corres r (ko_at (TCB tcb) add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_thread_all add tcbu etcbu) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ etcb_relation etcbu tcbu'" in corres_req) - apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) - apply (frule(1) pspace_relation_absD) - apply (force simp: projectKOs other_obj_relation_def ekheap_relation_def e) - apply (erule conjE) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule set_thread_all_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - projectKOs objBits_simps' - other_obj_relation_def tcbs r)+ - apply (fastforce simp: is_etcb_at_def elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: projectKOs obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp - done - -lemma thread_gets_the_all_corres: - shows "corres (\(tcb, etcb) tcb'. tcb_relation tcb tcb' \ etcb_relation etcb tcb') - (tcb_at t and is_etcb_at t) (tcb_at' t) - (thread_gets_the_all t) (getObject t)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp add: gets_def get_def return_def bind_def get_tcb_def thread_gets_the_all_def threadGet_def ethread_get_def gets_the_def assert_opt_def get_etcb_def is_etcb_at_def tcb_at_def liftM_def split: option.splits Structures_A.kernel_object.splits) - apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) - apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def - projectKO_opt_tcb split_def - getObject_def loadObject_default_def in_monad) - apply (case_tac ko) - apply (simp_all add: fail_def return_def) - apply (clarsimp simp add: state_relation_def pspace_relation_def ekheap_relation_def) - apply (drule bspec) - apply clarsimp - apply blast - apply (drule bspec, erule domI) - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) - done - -lemma thread_set_all_corresT: - assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ - tcb_relation (f tcb) (f' tcb')" - assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (g etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (thread_set_all f g t) (threadSet f' t)" - apply (simp add: thread_set_all_def threadSet_def bind_assoc) - apply (rule corres_guard_imp) - apply (rule corres_split[OF thread_gets_the_all_corres]) - apply (simp add: split_def) - apply (rule tcb_update_all_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce - apply (erule e) - apply (simp add: thread_gets_the_all_def, wp+) - apply clarsimp - apply (frule(1) tcb_at_is_etcb_at) - apply (clarsimp simp add: tcb_at_def get_etcb_def obj_at_def) - apply (drule get_tcb_SomeD) - apply fastforce - apply simp - done - -lemmas thread_set_all_corres = - thread_set_all_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] - crunch idle_thread[wp]: deleteCallerCap "\s. P (ksIdleThread s)" (wp: crunch_wps) crunch sch_act_simple: deleteCallerCap sch_act_simple @@ -4100,96 +3750,11 @@ crunch typ_at'[wp]: deleteCallerCap "\s. P (typ_at' T p s)" (wp: crunch_wps) lemmas deleteCallerCap_typ_ats[wp] = typ_at_lifts [OF deleteCallerCap_typ_at'] -crunch ksQ[wp]: emptySlot "\s. P (ksReadyQueues s p)" - lemma setEndpoint_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ setEndpoint ptr val \\_ s. sch_act_not (ksCurThread s) s\" by (rule hoare_weaken_pre, wps setEndpoint_ct', wp, simp) -lemma cancelAll_ct_not_ksQ_helper: - "\(\s. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ksCurThread s \ set q) \ - mapM_x (\t. do - y \ setThreadState Structures_H.thread_state.Restart t; - tcbSchedEnqueue t - od) q - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (rule mapM_x_inv_wp2, simp) - apply (wp) - apply (wps tcbSchedEnqueue_ct') - apply (wp tcbSchedEnqueue_ksQ) - apply (wps setThreadState_ct') - apply (wp sts_ksQ') - apply (clarsimp) - done - -lemma cancelAllIPC_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllIPC epptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllIPC_def) - apply (wp, wpc, wp) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply (clarsimp simp: forM_x_def) - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply (clarsimp simp: forM_x_def) - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep epptr" in hoare_post_imp) - apply (clarsimp) - apply (rule conjI) - apply ((clarsimp simp: invs'_def valid_state'_def - sch_act_sane_def - | drule(1) ct_not_in_epQueue)+)[2] - apply (wp get_ep_sp') - done - -lemma cancelAllSignals_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cancelAllSignals ntfnptr - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - (is "\?PRE\ _ \\_. ?POST\") - apply (simp add: cancelAllSignals_def) - apply (wp, wpc, wp+) - apply (wps rescheduleRequired_ct') - apply (wp rescheduleRequired_ksQ') - apply clarsimp - apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) - apply (wp hoare_lift_Pf2 [OF setNotification_ksQ setNotification_ksCurThread]) - apply (wps setNotification_ksCurThread, wp) - prefer 2 - apply assumption - apply (rule_tac Q="\ep. ?PRE and ko_at' ep ntfnptr" in hoare_post_imp) - apply ((clarsimp simp: invs'_def valid_state'_def sch_act_sane_def - | drule(1) ct_not_in_ntfnQueue)+)[1] - apply (wp get_ntfn_sp') - done - -lemma unbindMaybeNotification_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - unbindMaybeNotification t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: unbindMaybeNotification_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) - apply (case_tac "ntfnBoundTCB ntfn", simp, wp, simp+) - apply (rule hoare_pre) - apply wp - apply (wps setBoundNotification_ct') - apply (wp sbn_ksQ) - apply (wps setNotification_ksCurThread, wp) - apply clarsimp - done - lemma sbn_ct_in_state'[wp]: "\ct_in_state' P\ setBoundNotification ntfn t \\_. ct_in_state' P\" apply (simp add: ct_in_state'_def) @@ -4222,37 +3787,6 @@ lemma unbindMaybeNotification_sch_act_sane[wp]: apply (wp setNotification_sch_act_sane sbn_sch_act_sane | wpc | clarsimp)+ done -lemma finaliseCapTrue_standin_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - finaliseCapTrue_standin cap final - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: finaliseCapTrue_standin_def Let_def) - apply (safe) - apply (wp cancelAllIPC_ct_not_ksQ cancelAllSignals_ct_not_ksQ - hoare_drop_imps unbindMaybeNotification_ct_not_ksQ - | wpc - | clarsimp simp: isNotificationCap_def isReplyCap_def split:capability.splits)+ - done - -lemma cteDeleteOne_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - cteDeleteOne slot - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: cteDeleteOne_def unless_def split_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) - apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") - apply (simp add: finaliseCapTrue_standin_simple_def) - apply wp - apply (clarsimp) - apply (wp emptySlot_cteCaps_of hoare_lift_Pf2 [OF emptySlot_ksRQ emptySlot_ct]) - apply (simp add: cteCaps_of_def) - apply (wp (once) hoare_drop_imps) - apply (wp finaliseCapTrue_standin_ct_not_ksQ isFinalCapability_inv)+ - apply (clarsimp) - done - end end diff --git a/proof/refine/X64/Init_R.thy b/proof/refine/X64/Init_R.thy index e0077afd48..844799594f 100644 --- a/proof/refine/X64/Init_R.thy +++ b/proof/refine/X64/Init_R.thy @@ -41,6 +41,7 @@ definition zeroed_arch_abstract_state :: x64_current_cr3 = cr3 0 0 , x64_allocated_io_ports = \, x64_num_ioapics = 0, + x64_ioapic_nirqs = K 0, x64_irq_state = K IRQFree\" definition zeroed_main_abstract_state :: @@ -83,7 +84,7 @@ definition zeroed_arch_intermediate_state :: Arch.kernel_state where "zeroed_arch_intermediate_state \ X64KernelState Map.empty 0 [] [] [] (CR3 0 0) - (K X64VSpaceUserRegion) \ 0 (K X64IRQFree)" + (K X64VSpaceUserRegion) \ 0 (K 0) (K X64IRQFree)" definition zeroed_intermediate_state :: global.kernel_state @@ -98,7 +99,7 @@ definition zeroed_intermediate_state :: ksDomSchedule = [], ksCurDomain = 0, ksDomainTime = 0, - ksReadyQueues = K [], + ksReadyQueues = K (TcbQueue None None), ksReadyQueuesL1Bitmap = K 0, ksReadyQueuesL2Bitmap = K 0, ksCurThread = 0, @@ -119,9 +120,11 @@ lemma non_empty_refine_state_relation: "(zeroed_abstract_state, zeroed_intermediate_state) \ state_relation" apply (clarsimp simp: state_relation_def zeroed_state_defs state.defs) apply (intro conjI) - apply (clarsimp simp: pspace_relation_def pspace_dom_def) - apply (clarsimp simp: ekheap_relation_def) - apply (clarsimp simp: ready_queues_relation_def) + apply (clarsimp simp: pspace_relation_def pspace_dom_def) + apply (clarsimp simp: ekheap_relation_def) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def queue_end_valid_def + opt_pred_def list_queue_relation_def tcbQueueEmpty_def + prev_queue_head_def) apply (clarsimp simp: ghost_relation_def) apply (fastforce simp: cdt_relation_def swp_def dest: cte_wp_at_domI) apply (clarsimp simp: cdt_list_relation_def map_to_ctes_def) diff --git a/proof/refine/X64/InterruptAcc_R.thy b/proof/refine/X64/InterruptAcc_R.thy index fe5412a17f..5cf160806a 100644 --- a/proof/refine/X64/InterruptAcc_R.thy +++ b/proof/refine/X64/InterruptAcc_R.thy @@ -49,7 +49,6 @@ lemma setIRQState_invs[wp]: apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) apply (wp dmo_maskInterrupt) apply (clarsimp simp: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def valid_queues'_def valid_idle'_def valid_irq_node'_def valid_arch_state'_def valid_global_refs'_def global_refs'_def valid_machine_state'_def @@ -57,7 +56,7 @@ lemma setIRQState_invs[wp]: valid_irq_handlers'_def irq_issued'_def cteCaps_of_def valid_irq_masks'_def valid_ioports'_simps - bitmapQ_defs valid_queues_no_bitmap_def) + bitmapQ_defs valid_bitmaps_def) apply (rule conjI, clarsimp) apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) apply (rule conjI) @@ -112,7 +111,7 @@ lemma preemptionPoint_inv: shows "\P\ preemptionPoint \\_. P\" using assms apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) apply (wpc - | wp hoare_whenE_wp hoare_seq_ext [OF _ select_inv] alternative_valid hoare_drop_imps + | wp whenE_wp bind_wp [OF _ select_inv] hoare_drop_imps | simp)+ done @@ -147,8 +146,7 @@ lemma invs'_irq_state_independent [simp, intro!]: valid_idle'_def valid_global_refs'_def valid_arch_state'_def valid_irq_node'_def valid_irq_handlers'_def valid_irq_states'_def - irqs_masked'_def bitmapQ_defs valid_queues_no_bitmap_def - valid_queues'_def + irqs_masked'_def bitmapQ_defs valid_bitmaps_def pspace_domain_valid_def cur_tcb'_def valid_machine_state'_def tcb_in_cur_domain'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def diff --git a/proof/refine/X64/Interrupt_R.thy b/proof/refine/X64/Interrupt_R.thy index 2297cd302c..f797f8fdd3 100644 --- a/proof/refine/X64/Interrupt_R.thy +++ b/proof/refine/X64/Interrupt_R.thy @@ -200,6 +200,10 @@ lemmas irq_const_defs = maxIRQ_def minIRQ_def X64.maxUserIRQ_def X64.minUserIRQ_def X64_H.maxUserIRQ_def X64_H.minUserIRQ_def +lemma corres_gets_ioapic_nirqs [corres]: + "corres (=) \ \ (gets (x64_ioapic_nirqs \ arch_state)) (gets (x64KSIOAPICnIRQs \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + lemma arch_decodeIRQControlInvocation_corres: "list_all2 cap_relation caps caps' \ corres (ser \ arch_irq_control_inv_relation) @@ -257,12 +261,13 @@ lemma arch_decodeIRQControlInvocation_corres: apply (rule corres_splitEE[OF ensureEmptySlot_corres]) apply simp apply (rule corres_split[OF corres_gets_num_ioapics]) - apply (rule whenE_throwError_corres, ((simp add: ioapicIRQLines_def)+)[2])+ - apply (rule corres_returnOkTT) - apply (clarsimp simp: arch_irq_control_inv_relation_def ) - apply (wpsimp wp: isIRQActive_inv - simp: invs_valid_objs invs_psp_aligned invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct' - | wp (once) hoare_drop_imps)+ + apply (rule corres_split[OF corres_gets_ioapic_nirqs]) + apply (rule whenE_throwError_corres, ((simp add: ioapicIRQLines_def)+)[2])+ + apply (rule corres_returnOkTT) + apply (clarsimp simp: arch_irq_control_inv_relation_def ) + apply (wpsimp wp: isIRQActive_inv + simp: invs_valid_objs invs_psp_aligned invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct' + | wp (once) hoare_drop_imps)+ by (auto split: arch_invocation_label.splits invocation_label.splits) lemma irqhandler_simp[simp]: @@ -439,9 +444,8 @@ lemma invoke_irq_handler_invs'[wp]: InterruptDecls_H.invokeIRQHandler i \\rv. invs'\" apply (cases i, simp_all add: Interrupt_H.invokeIRQHandler_def invokeIRQHandler_def) apply (wp dmo_maskInterrupt) - apply (clarsimp simp add: invs'_def valid_state'_def valid_irq_masks'_def - valid_machine_state'_def ct_not_inQ_def - ct_in_current_domain_ksMachineState) + apply (clarsimp simp: invs'_def valid_state'_def valid_irq_masks'_def + valid_machine_state'_def ct_not_inQ_def) apply (wp cteInsert_invs)+ apply (strengthen ntfn_badge_derived_enough_strg isnt_irq_handler_strg safe_ioport_insert'_ntfn_strg) apply (wp cteDeleteOne_other_cap cteDeleteOne_other_cap[unfolded o_def]) @@ -586,14 +590,13 @@ lemma updateIRQState_invs'[wp]: apply (clarsimp simp: X64_H.updateIRQState_def) apply wp apply (fastforce simp: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def valid_queues'_def valid_idle'_def valid_irq_node'_def valid_arch_state'_def valid_global_refs'_def global_refs'_def valid_machine_state'_def if_unsafe_then_cap'_def ex_cte_cap_to'_def valid_irq_handlers'_def irq_issued'_def cteCaps_of_def valid_irq_masks'_def - bitmapQ_defs valid_queues_no_bitmap_def valid_x64_irq_state'_def + bitmapQ_defs valid_x64_irq_state'_def valid_ioports'_def all_ioports_issued'_def issued_ioports'_def) done @@ -664,13 +667,6 @@ lemma decDomainTime_corres: apply (clarsimp simp:state_relation_def) done -lemma tcbSchedAppend_valid_objs': - "\valid_objs'\tcbSchedAppend t \\r. valid_objs'\" - apply (simp add:tcbSchedAppend_def) - apply (wpsimp wp: hoare_unless_wp threadSet_valid_objs' threadGet_wp) - apply (clarsimp simp add:obj_at'_def typ_at'_def) - done - lemma thread_state_case_if: "(case state of Structures_A.thread_state.Running \ f | _ \ g) = (if state = Structures_A.thread_state.Running then f else g)" @@ -681,26 +677,20 @@ lemma threadState_case_if: (if state = Structures_H.thread_state.Running then f else g)" by (case_tac state,auto) -lemma tcbSchedAppend_invs_but_ct_not_inQ': - "\invs' and st_tcb_at' runnable' t \ - tcbSchedAppend t \\_. all_invs_but_ct_not_inQ'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | fastforce elim!: st_tcb_ex_cap'' split: thread_state.split_asm)+ - done +lemma ready_qs_distinct_domain_time_update[simp]: + "ready_qs_distinct (domain_time_update f s) = ready_qs_distinct s" + by (clarsimp simp: ready_qs_distinct_def) lemma timerTick_corres: - "corres dc (cur_tcb and valid_sched) + "corres dc (cur_tcb and valid_sched and pspace_aligned and pspace_distinct) invs' timer_tick timerTick" supply if_weak_cong[cong] apply (simp add: timerTick_def timer_tick_def) - apply (simp add:thread_state_case_if threadState_case_if) - apply (rule_tac Q="\ and (cur_tcb and valid_sched)" and Q'="\ and invs'" in corres_guard_imp) + apply (simp add: thread_state_case_if threadState_case_if) + apply (rule_tac Q="cur_tcb and valid_sched and pspace_aligned and pspace_distinct" + and Q'=invs' + in corres_guard_imp) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp @@ -722,67 +712,67 @@ lemma timerTick_corres: apply (rule corres_split) apply (rule ethread_set_corres; simp) apply (simp add: etcb_relation_def) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp)[1] - apply (rule hoare_strengthen_post) - apply (rule tcbSchedAppend_invs_but_ct_not_inQ', - clarsimp simp: sch_act_wf_weak) - apply (wp threadSet_timeslice_invs threadSet_valid_queues - threadSet_valid_queues' threadSet_pred_tcb_at_state)+ - apply simp - apply simp - apply (rule corres_when,simp) + apply wp + apply ((wpsimp wp: tcbSchedAppend_sym_heap_sched_pointers + tcbSchedAppend_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply ((wp thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs')+)[1] + apply wpsimp+ + apply (rule corres_when, simp) apply (rule corres_split[OF decDomainTime_corres]) apply (rule corres_split[OF getDomainTime_corres]) apply (rule corres_when,simp) apply (rule rescheduleRequired_corres) apply (wp hoare_drop_imp)+ - apply (simp add:dec_domain_time_def) - apply wp+ - apply (simp add:decDomainTime_def) - apply wp - apply (wp|wpc|unfold Let_def|simp)+ - apply (wp static_imp_wp threadSet_timeslice_invs threadSet_valid_queues threadSet_valid_queues' - threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf tcbSchedAppend_valid_objs' - rescheduleRequired_weak_sch_act_wf tcbSchedAppend_valid_queues| simp)+ - apply (strengthen sch_act_wf_weak) - apply (clarsimp simp:conj_comms) - apply (wp tcbSchedAppend_valid_queues tcbSchedAppend_sch_act_wf) - apply simp - apply (wp threadSet_valid_queues threadSet_pred_tcb_at_state threadSet_sch_act - threadSet_tcbDomain_triv threadSet_valid_queues' threadSet_valid_objs'| simp)+ - apply (wp threadGet_wp gts_wp gts_wp')+ - apply (clarsimp simp: cur_tcb_def tcb_at_is_etcb_at valid_sched_def valid_sched_action_def) - prefer 2 - apply clarsimp - apply (clarsimp simp add:cur_tcb_def valid_sched_def - valid_sched_action_def valid_etcbs_def is_tcb_def - is_etcb_at_def st_tcb_at_def obj_at_def - dest!:get_tcb_SomeD) - apply (clarsimp simp: invs'_def valid_state'_def - sch_act_wf_weak - cur_tcb'_def inQ_def - ct_in_state'_def obj_at'_def) - apply (clarsimp simp:st_tcb_at'_def - valid_idle'_def ct_idle_or_in_cur_domain'_def - obj_at'_def projectKO_eq) - apply simp + apply (wpsimp simp: dec_domain_time_def) + apply (wpsimp simp: decDomainTime_def) + apply (wpsimp wp: hoare_weak_lift_imp threadSet_timeslice_invs + tcbSchedAppend_valid_objs' + threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf + rescheduleRequired_weak_sch_act_wf + split_del: if_split)+ + apply (strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_time_slice_valid_queues) + apply ((wpsimp wp: thread_set_time_slice_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+)[1] + apply wpsimp + apply wpsimp + apply ((wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers + threadSet_valid_objs' + | strengthen valid_objs'_valid_tcbs' + | wp (once) hoare_drop_imp)+)[1] + apply (wpsimp wp: gts_wp gts_wp')+ + apply (clarsimp simp: cur_tcb_def) + apply (frule valid_sched_valid_etcbs) + apply (frule (1) tcb_at_is_etcb_at) + apply (frule valid_sched_valid_queues) + apply (fastforce simp: pred_tcb_at_def obj_at_def valid_sched_weak_strg) + apply (clarsimp simp: etcb_at_def split: option.splits) + apply fastforce + apply (fastforce simp: valid_state'_def ct_not_inQ_def) + apply fastforce done lemmas corres_eq_trivial = corres_Id[where f = h and g = h for h, simplified] lemma handleInterrupt_corres: "corres dc - (einvs) (invs' and (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)) + einvs + (invs' and (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)) (handle_interrupt irq) (handleInterrupt irq)" - (is "corres dc (einvs) ?P' ?f ?g") - apply (simp add: handle_interrupt_def handleInterrupt_def ) + (is "corres dc ?P ?P' ?f ?g") + apply (simp add: handle_interrupt_def handleInterrupt_def) apply (rule conjI[rotated]; rule impI) - apply (rule corres_guard_imp) apply (rule corres_split[OF getIRQState_corres, - where R="\rv. einvs" + where R="\rv. ?P" and R'="\rv. invs' and (\s. rv \ IRQInactive)"]) defer apply (wp getIRQState_prop getIRQState_inv do_machine_op_bind doMachineOp_bind | simp add: do_machine_op_bind doMachineOp_bind )+ @@ -818,7 +808,7 @@ lemma handleInterrupt_corres: apply (rule corres_machine_op) apply (rule corres_eq_trivial, (simp add: no_fail_ackInterrupt)+) apply wp+ - apply clarsimp + apply fastforce apply clarsimp done @@ -829,10 +819,10 @@ lemma threadSet_ksDomainTime[wp]: done crunch ksDomainTime[wp]: rescheduleRequired "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) crunch ksDomainTime[wp]: tcbSchedAppend "\s. P (ksDomainTime s)" -(simp:tcbSchedEnqueue_def wp:hoare_unless_wp) +(simp:tcbSchedEnqueue_def wp:unless_wp) lemma updateTimeSlice_valid_pspace[wp]: "\valid_pspace'\ threadSet (tcbTimeSlice_update (\_. ts')) thread @@ -841,52 +831,38 @@ lemma updateTimeSlice_valid_pspace[wp]: apply (auto simp:tcb_cte_cases_def) done -lemma updateTimeSlice_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ - threadSet (tcbTimeSlice_update (\_. ts')) thread - \\r s. Invariants_H.valid_queues s\" - apply (wp threadSet_valid_queues,simp) - apply (clarsimp simp:obj_at'_def inQ_def) - done - - (* catch up tcbSchedAppend to tcbSchedEnqueue, which has these from crunches on possibleSwitchTo *) -crunch ifunsafe[wp]: tcbSchedAppend if_unsafe_then_cap' crunch irq_handlers'[wp]: tcbSchedAppend valid_irq_handlers' (simp: unless_def tcb_cte_cases_def wp: crunch_wps) -crunch irq_states'[wp]: tcbSchedAppend valid_irq_states' crunch irqs_masked'[wp]: tcbSchedAppend irqs_masked' (simp: unless_def wp: crunch_wps) crunch ct[wp]: tcbSchedAppend cur_tcb' (wp: cur_tcb_lift crunch_wps) -crunch cur_tcb'[wp]: tcbSchedAppend cur_tcb' - (simp: unless_def wp: crunch_wps) - lemma timerTick_invs'[wp]: - "\invs'\ timerTick \\rv. invs'\" + "timerTick \invs'\" apply (simp add: timerTick_def) apply (wpsimp wp: threadSet_invs_trivial threadSet_pred_tcb_no_state rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' - simp: tcb_cte_cases_def) - apply (rule_tac Q="\rv. invs'" in hoare_post_imp) - apply (clarsimp simp add:invs'_def valid_state'_def) + simp: tcb_cte_cases_def) + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp: invs'_def valid_state'_def) apply (simp add: decDomainTime_def) apply wp apply simp apply wpc - apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs - rescheduleRequired_all_invs_but_ct_not_inQ - hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain' - del: tcbSchedAppend_sch_act_wf)+ - apply (rule hoare_strengthen_post[OF tcbSchedAppend_invs_but_ct_not_inQ']) - apply (wpsimp simp: valid_pspace'_def sch_act_wf_weak)+ - apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv - threadSet_valid_objs' threadSet_timeslice_invs)+ - apply (wp threadGet_wp) + apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs + rescheduleRequired_all_invs_but_ct_not_inQ + hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain')+ + apply (rule hoare_strengthen_post[OF tcbSchedAppend_all_invs_but_ct_not_inQ']) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ + apply (rule_tac Q="\_. invs'" in hoare_strengthen_post) + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv + threadSet_valid_objs' threadSet_timeslice_invs)+ + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak)+ apply (wp gts_wp')+ - apply (clarsimp simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def) + apply (auto simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def cong: conj_cong) done lemma resetTimer_invs'[wp]: diff --git a/proof/refine/X64/InvariantUpdates_H.thy b/proof/refine/X64/InvariantUpdates_H.thy index 523019315d..1406c2ebba 100644 --- a/proof/refine/X64/InvariantUpdates_H.thy +++ b/proof/refine/X64/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] @@ -38,8 +38,9 @@ lemma invs'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_bitmaps_def bitmapQ_defs vms ct_not_inQ_def state_refs_of'_def ps_clear_def valid_irq_node'_def mask @@ -56,12 +57,13 @@ lemma invs_no_cicd'_machine: proof - show ?thesis apply (cases "ksSchedulerAction s") - apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - vms ct_not_inQ_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def mask - cong: option.case_cong) + apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_bitmaps_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) done qed @@ -98,14 +100,9 @@ lemma valid_tcb'_tcbTimeSlice_update[simp]: "valid_tcb' (tcbTimeSlice_update f tcb) s = valid_tcb' tcb s" by (simp add:valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) -lemma valid_queues_ksSchedulerAction_update[simp]: - "valid_queues (ksSchedulerAction_update f s) = valid_queues s" - unfolding valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - by simp - -lemma valid_queues'_ksSchedulerAction_update[simp]: - "valid_queues' (ksSchedulerAction_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksSchedulerAction_update[simp]: + "valid_bitmaps (ksSchedulerAction_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma ex_cte_cap_wp_to'_gsCNodes_update[simp]: "ex_cte_cap_wp_to' P p (gsCNodes_update f s') = ex_cte_cap_wp_to' P p s'" @@ -140,45 +137,25 @@ lemma tcb_in_cur_domain_ct[simp]: "tcb_in_cur_domain' t (ksCurThread_update f s) = tcb_in_cur_domain' t s" by (fastforce simp: tcb_in_cur_domain'_def) -lemma valid_queues'_ksCurDomain[simp]: - "valid_queues' (ksCurDomain_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues'_ksDomScheduleIdx[simp]: - "valid_queues' (ksDomScheduleIdx_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksCurDomain[simp]: + "valid_bitmaps (ksCurDomain_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomSchedule[simp]: - "valid_queues' (ksDomSchedule_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomScheduleIdx[simp]: + "valid_bitmaps (ksDomScheduleIdx_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksDomainTime[simp]: - "valid_queues' (ksDomainTime_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomSchedule[simp]: + "valid_bitmaps (ksDomSchedule_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues'_ksWorkUnitsCompleted[simp]: - "valid_queues' (ksWorkUnitsCompleted_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) +lemma valid_bitmaps_ksDomainTime[simp]: + "valid_bitmaps (ksDomainTime_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) -lemma valid_queues_ksCurDomain[simp]: - "valid_queues (ksCurDomain_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomScheduleIdx[simp]: - "valid_queues (ksDomScheduleIdx_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomSchedule[simp]: - "valid_queues (ksDomSchedule_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksDomainTime[simp]: - "valid_queues (ksDomainTime_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_ksWorkUnitsCompleted[simp]: - "valid_queues (ksWorkUnitsCompleted_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_ksWorkUnitsCompleted[simp]: + "valid_bitmaps (ksWorkUnitsCompleted_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma valid_irq_node'_ksCurDomain[simp]: "valid_irq_node' w (ksCurDomain_update f s) = valid_irq_node' w s" @@ -255,6 +232,10 @@ lemma valid_mdb_interrupts'[simp]: "valid_mdb' (ksInterruptState_update f s) = valid_mdb' s" by (simp add: valid_mdb'_def) +lemma valid_mdb'_ksReadyQueues_update[simp]: + "valid_mdb' (ksReadyQueues_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + lemma vms_ksReadyQueues_update[simp]: "valid_machine_state' (ksReadyQueues_update f s) = valid_machine_state' s" by (simp add: valid_machine_state'_def) @@ -279,10 +260,10 @@ lemma ct_in_state_ksSched[simp]: lemma invs'_wu [simp]: "invs' (ksWorkUnitsCompleted_update f s) = invs' s" - apply (simp add: invs'_def cur_tcb'_def valid_state'_def Invariants_H.valid_queues_def - valid_queues'_def valid_irq_node'_def valid_machine_state'_def + apply (simp add: invs'_def cur_tcb'_def valid_state'_def valid_bitmaps_def + valid_irq_node'_def valid_machine_state'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def) + bitmapQ_defs) done lemma valid_arch_state'_interrupt[simp]: @@ -342,9 +323,8 @@ lemma sch_act_simple_ksReadyQueuesL2Bitmap[simp]: lemma ksDomainTime_invs[simp]: "invs' (ksDomainTime_update f s) = invs' s" - by (simp add:invs'_def valid_state'_def - cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_machine_state'_def) + by (simp add: invs'_def valid_state'_def cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_machine_state'_def bitmapQ_defs) lemma valid_machine_state'_ksDomainTime[simp]: "valid_machine_state' (ksDomainTime_update f s) = valid_machine_state' s" @@ -372,9 +352,7 @@ lemma ct_not_inQ_update_stt[simp]: lemma invs'_update_cnt[elim!]: "invs' s \ invs' (s\ksSchedulerAction := ChooseNewThread\)" - by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues'_def - valid_irq_node'_def cur_tcb'_def ct_idle_or_in_cur_domain'_def - tcb_in_cur_domain'_def valid_queues_no_bitmap_def - bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def) + by (clarsimp simp: invs'_def valid_state'_def valid_irq_node'_def cur_tcb'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def bitmapQ_defs) end \ No newline at end of file diff --git a/proof/refine/X64/Invariants_H.thy b/proof/refine/X64/Invariants_H.thy index aef5c0a697..b237e71d45 100644 --- a/proof/refine/X64/Invariants_H.thy +++ b/proof/refine/X64/Invariants_H.thy @@ -10,6 +10,7 @@ imports "AInvs.Deterministic_AI" "AInvs.AInvs" "Lib.AddUpdSimps" + "Lib.Heap_List" begin context Arch begin @@ -88,6 +89,21 @@ abbreviation abbreviation "ko_at' v \ obj_at' (\k. k = v)" +abbreviation tcb_of' :: "kernel_object \ tcb option" where + "tcb_of' \ projectKO_opt" + +abbreviation tcbs_of' :: "kernel_state \ obj_ref \ tcb option" where + "tcbs_of' s \ ksPSpace s |> tcb_of'" + +abbreviation tcbSchedPrevs_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedPrevs_of s \ tcbs_of' s |> tcbSchedPrev" + +abbreviation tcbSchedNexts_of :: "kernel_state \ obj_ref \ obj_ref option" where + "tcbSchedNexts_of s \ tcbs_of' s |> tcbSchedNext" + +abbreviation sym_heap_sched_pointers :: "global.kernel_state \ bool" where + "sym_heap_sched_pointers s \ sym_heap (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + abbreviation "pde_at' \ typ_at' (ArchT PDET)" abbreviation @@ -236,16 +252,17 @@ where then refs_of' ko else {}))" - primrec live' :: "Structures_H.kernel_object \ bool" where "live' (KOTCB tcb) = - (bound (tcbBoundNotification tcb) \ - (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState) \ tcbQueued tcb)" + (bound (tcbBoundNotification tcb) + \ tcbSchedPrev tcb \ None \ tcbSchedNext tcb \ None + \ tcbQueued tcb + \ (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState))" | "live' (KOCTE cte) = False" | "live' (KOEndpoint ep) = (ep \ IdleEP)" -| "live' (KONotification ntfn) = (bound (ntfnBoundTCB ntfn) \ (\ts. ntfnObj ntfn = WaitingNtfn ts))" +| "live' (KONotification ntfn) = (bound (ntfnBoundTCB ntfn) \ (\ts. ntfnObj ntfn = WaitingNtfn ts))" | "live' (KOUserData) = False" | "live' (KOUserDataDevice) = False" | "live' (KOKernelData) = False" @@ -505,6 +522,11 @@ where capability.ArchObjectCap (arch_capability.PageCap _ _ _ _ dev _) \ dev | _ \ False" +abbreviation opt_tcb_at' :: "machine_word option \ kernel_state \ bool" where + "opt_tcb_at' \ none_top tcb_at'" + +lemmas opt_tcb_at'_def = none_top_def + definition valid_tcb' :: "Structures_H.tcb \ kernel_state \ bool" where @@ -514,7 +536,9 @@ where \ valid_bound_ntfn' (tcbBoundNotification t) s \ tcbDomain t \ maxDomain \ tcbPriority t \ maxPriority - \ tcbMCP t \ maxPriority" + \ tcbMCP t \ maxPriority + \ opt_tcb_at' (tcbSchedPrev t) s + \ opt_tcb_at' (tcbSchedNext t) s" definition valid_ep' :: "Structures_H.endpoint \ kernel_state \ bool" @@ -524,7 +548,6 @@ where | Structures_H.SendEP ts \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts) | Structures_H.RecvEP ts \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts)" - definition valid_bound_tcb' :: "machine_word option \ kernel_state \ bool" where @@ -921,10 +944,15 @@ where | "runnable' (Structures_H.BlockedOnSend a b c d e) = False" | "runnable' (Structures_H.BlockedOnNotification x) = False" -definition - inQ :: "domain \ priority \ tcb \ bool" -where - "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" +definition inQ :: "domain \ priority \ tcb \ bool" where + "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" + +lemma inQ_implies_tcbQueueds_of: + "(inQ domain priority |< tcbs_of' s') tcbPtr \ (tcbQueued |< tcbs_of' s') tcbPtr" + by (clarsimp simp: opt_map_def opt_pred_def inQ_def split: option.splits) + +defs ready_qs_runnable_def: + "ready_qs_runnable s \ \t. obj_at' tcbQueued t s \ st_tcb_at' runnable' t s" definition (* for given domain and priority, the scheduler bitmap indicates a thread is in the queue *) @@ -934,15 +962,6 @@ where "bitmapQ d p s \ ksReadyQueuesL1Bitmap s d !! prioToL1Index p \ ksReadyQueuesL2Bitmap s (d, invertL1Index (prioToL1Index p)) !! unat (p && mask wordRadix)" - -definition - valid_queues_no_bitmap :: "kernel_state \ bool" -where - "valid_queues_no_bitmap \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - definition (* A priority is used as a two-part key into the bitmap structure. If an L2 bitmap entry is set without an L1 entry, updating the L1 entry (shared by many priorities) may make @@ -966,31 +985,62 @@ where \d i. ksReadyQueuesL1Bitmap s d !! i \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ i < l2BitmapSize" -definition - valid_bitmapQ :: "kernel_state \ bool" -where - "valid_bitmapQ \ \s. (\d p. bitmapQ d p s \ ksReadyQueues s (d,p) \ [])" +definition valid_bitmapQ :: "kernel_state \ bool" where + "valid_bitmapQ \ \s. \d p. bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p))" -definition - valid_queues :: "kernel_state \ bool" -where - "valid_queues \ \s. valid_queues_no_bitmap s \ valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" +definition valid_bitmaps :: "kernel_state \ bool" where + "valid_bitmaps \ \s. valid_bitmapQ s \ bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" -definition - (* when a thread gets added to / removed from a queue, but before bitmap updated *) - valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" -where +lemma valid_bitmaps_valid_bitmapQ[elim!]: + "valid_bitmaps s \ valid_bitmapQ s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L2_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L2_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_bitmapQ_no_L1_orphans[elim!]: + "valid_bitmaps s \ bitmapQ_no_L1_orphans s" + by (simp add: valid_bitmaps_def) + +lemma valid_bitmaps_lift: + assumes prq: "\P. f \\s. P (ksReadyQueues s)\" + assumes prqL1: "\P. f \\s. P (ksReadyQueuesL1Bitmap s)\" + assumes prqL2: "\P. f \\s. P (ksReadyQueuesL2Bitmap s)\" + shows "f \valid_bitmaps\" + unfolding valid_bitmaps_def valid_bitmapQ_def bitmapQ_def + bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +(* when a thread gets added to / removed from a queue, but before bitmap updated *) +definition valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" where "valid_bitmapQ_except d' p' \ \s. - (\d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ ksReadyQueues s (d,p) \ []))" + \d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)))" lemmas bitmapQ_defs = valid_bitmapQ_def valid_bitmapQ_except_def bitmapQ_def bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def -definition - valid_queues' :: "kernel_state \ bool" -where - "valid_queues' \ \s. \d p t. obj_at' (inQ d p) t s \ t \ set (ksReadyQueues s (d, p))" +\ \ + The tcbSchedPrev and tcbSchedNext fields of a TCB are used only to indicate membership in + one of the ready queues. \ +definition valid_sched_pointers_2 :: + "(obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ (obj_ref \ bool) \ bool " + where + "valid_sched_pointers_2 prevs nexts ready \ + \ptr. prevs ptr \ None \ nexts ptr \ None \ ready ptr" + +abbreviation valid_sched_pointers :: "kernel_state \ bool" where + "valid_sched_pointers s \ + valid_sched_pointers_2 (tcbSchedPrevs_of s) (tcbSchedNexts_of s) (tcbQueued |< tcbs_of' s)" + +lemmas valid_sched_pointers_def = valid_sched_pointers_2_def + +lemma valid_sched_pointersD: + "\valid_sched_pointers s; \ (tcbQueued |< tcbs_of' s) t\ + \ tcbSchedPrevs_of s t = None \ tcbSchedNexts_of s t = None" + by (fastforce simp: valid_sched_pointers_def in_opt_pred opt_map_red) definition tcb_in_cur_domain' :: "machine_word \ kernel_state \ bool" where "tcb_in_cur_domain' t \ \s. obj_at' (\tcb. ksCurDomain s = tcbDomain tcb) t s" @@ -1128,9 +1178,20 @@ definition where "valid_x64_irq_state' irqState \ \irq > maxIRQ. irqState irq = X64IRQFree" -definition - valid_arch_state' :: "kernel_state \ bool" -where +definition valid_ioapic_2 :: "machine_word \ (machine_word \ 8 word) \ bool" where + "valid_ioapic_2 num_ioapics ioapic_nirqs \ + num_ioapics \ of_nat Kernel_Config.maxNumIOAPIC \ + (\ioapic < num_ioapics. 0 < ioapic_nirqs ioapic) \ + (\ioapic < num_ioapics. ioapic_nirqs ioapic \ ucast ioapicIRQLines) \ + (\ioapic > of_nat Kernel_Config.maxNumIOAPIC. ioapic_nirqs ioapic = 0)" + +abbreviation valid_ioapic where + "valid_ioapic s \ + valid_ioapic_2 (x64KSNumIOAPICs (ksArchState s)) (x64KSIOAPICnIRQs (ksArchState s))" + +lemmas valid_ioapic_def = valid_ioapic_2_def + +definition valid_arch_state' :: "kernel_state \ bool" where "valid_arch_state' \ \s. valid_asid_table' (x64KSASIDTable (ksArchState s)) s \ valid_cr3' (x64KSCurrentUserCR3 (ksArchState s)) \ @@ -1138,7 +1199,8 @@ where valid_global_pds' (x64KSSKIMPDs (ksArchState s)) s \ valid_global_pdpts' (x64KSSKIMPDPTs (ksArchState s)) s \ valid_global_pts' (x64KSSKIMPTs (ksArchState s)) s \ - valid_x64_irq_state' (x64KSIRQState (ksArchState s))" + valid_x64_irq_state' (x64KSIRQState (ksArchState s)) \ + valid_ioapic s" definition irq_issued' :: "irq \ kernel_state \ bool" @@ -1231,7 +1293,7 @@ definition valid_state' :: "kernel_state \ bool" where "valid_state' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s @@ -1241,7 +1303,9 @@ where \ valid_machine_state' s \ irqs_masked' s \ valid_ioports' s - \ valid_queues' s + \ sym_heap_sched_pointers s + \ valid_sched_pointers s + \ valid_bitmaps s \ ct_not_inQ s \ ct_idle_or_in_cur_domain' s \ pspace_domain_valid s @@ -1294,6 +1358,11 @@ definition abbreviation "active' st \ st = Structures_H.Running \ st = Structures_H.Restart" +lemma runnable_eq_active': "runnable' = active'" + apply (rule ext) + apply (case_tac st, simp_all) + done + abbreviation "simple' st \ st = Structures_H.Inactive \ st = Structures_H.Running \ @@ -1309,11 +1378,14 @@ abbreviation abbreviation(input) "all_invs_but_sym_refs_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s - \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ pspace_domain_valid s \ valid_ioports' s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1321,12 +1393,14 @@ abbreviation(input) abbreviation(input) "all_invs_but_ct_not_inQ' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s - \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_idle_or_in_cur_domain' s \ pspace_domain_valid s \ valid_ioports' s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1342,12 +1416,14 @@ lemma all_invs_but_not_ct_inQ_check': definition "all_invs_but_ct_idle_or_in_cur_domain' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s - \ valid_queues s \ sym_refs (state_refs_of' s) + \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s - \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s - \ cur_tcb' s \ valid_queues' s \ ct_not_inQ s + \ valid_irq_states' s \ irqs_masked' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s + \ valid_machine_state' s + \ cur_tcb' s \ ct_not_inQ s \ pspace_domain_valid s \ valid_ioports' s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ untyped_ranges_zero' s" @@ -1424,6 +1500,10 @@ lemma valid_bound_tcb'_Some[simp]: "valid_bound_tcb' (Some x) = tcb_at' x" by (auto simp: valid_bound_tcb'_def) +lemma objBitsKO_Data: + "objBitsKO (if dev then KOUserDataDevice else KOUserData) = pageBits" + by (simp add: objBits_def objBitsKO_def word_size_def) + lemmas objBits_defs = tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def cteSizeBits_def lemmas untypedBits_defs = minUntypedSizeBits_def maxUntypedSizeBits_def lemmas objBits_simps = objBits_def objBitsKO_def word_size_def @@ -2510,7 +2590,7 @@ lemma typ_at_lift_valid_cap': apply (case_tac arch_capability, simp_all add: P[where P=id, simplified] vspace_table_at'_defs hoare_vcg_prop All_less_Ball - split del: if_splits) + split del: if_split) apply (wp hoare_vcg_const_Ball_lift P typ_at_lift_valid_untyped' hoare_vcg_all_lift typ_at_lift_cte')+ done @@ -3141,9 +3221,9 @@ lemma sch_act_wf_arch [simp]: "sch_act_wf sa (ksArchState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_arch [simp]: - "valid_queues (ksArchState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) +lemma valid_bitmaps_arch[simp]: + "valid_bitmaps (ksArchState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) lemma if_unsafe_then_cap_arch' [simp]: "if_unsafe_then_cap' (ksArchState_update f s) = if_unsafe_then_cap' s" @@ -3161,22 +3241,14 @@ lemma sch_act_wf_machine_state [simp]: "sch_act_wf sa (ksMachineState_update f s) = sch_act_wf sa s" by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) -lemma valid_queues_machine_state [simp]: - "valid_queues (ksMachineState_update f s) = valid_queues s" - by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) - -lemma valid_queues_arch' [simp]: - "valid_queues' (ksArchState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - -lemma valid_queues_machine_state' [simp]: - "valid_queues' (ksMachineState_update f s) = valid_queues' s" - by (simp add: valid_queues'_def) - lemma valid_irq_node'_machine_state [simp]: "valid_irq_node' x (ksMachineState_update f s) = valid_irq_node' x s" by (simp add: valid_irq_node'_def) +lemma valid_bitmaps_machine_state[simp]: + "valid_bitmaps (ksMachineState_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + (* these should be reasonable safe for automation because of the 0 pattern *) lemma no_0_ko_wp' [elim!]: "\ ko_wp_at' Q 0 s; no_0_obj' s \ \ P" @@ -3256,19 +3328,6 @@ lemma typ_at_aligned': "\ typ_at' tp p s \ \ is_aligned p (objBitsT tp)" by (clarsimp simp add: typ_at'_def ko_wp_at'_def objBitsT_koTypeOf) -lemma valid_queues_obj_at'D: - "\ t \ set (ksReadyQueues s (d, p)); valid_queues s \ - \ obj_at' (inQ d p) t s" - apply (unfold valid_queues_def valid_queues_no_bitmap_def) - apply (elim conjE) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - lemma obj_at'_and: "obj_at' (P and P') t s = (obj_at' P t s \ obj_at' P' t s)" by (rule iffI, (clarsimp simp: obj_at'_def)+) @@ -3310,21 +3369,6 @@ lemma obj_at'_ko_at'_prop: "ko_at' ko t s \ obj_at' P t s = P ko" by (drule obj_at_ko_at', clarsimp simp: obj_at'_def) -lemma valid_queues_no_bitmap_def': - "valid_queues_no_bitmap = - (\s. \d p. (\t\set (ksReadyQueues s (d, p)). - obj_at' (inQ d p) t s \ st_tcb_at' runnable' t s) \ - distinct (ksReadyQueues s (d, p)) \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - apply (rule ext, rule iffI) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_and pred_tcb_at'_def o_def - elim!: obj_at'_weakenE)+ - done - -lemma valid_queues_running: - assumes Q: "t \ set(ksReadyQueues s (d, p))" "valid_queues s" - shows "st_tcb_at' runnable' t s" - using assms by (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def') - lemma valid_refs'_cteCaps: "valid_refs' S (ctes_of s) = (\c \ ran (cteCaps_of s). S \ capRange c = {})" by (fastforce simp: valid_refs'_def cteCaps_of_def elim!: ranE) @@ -3409,8 +3453,16 @@ lemma invs_sch_act_wf' [elim!]: "invs' s \ sch_act_wf (ksSchedulerAction s) s" by (simp add: invs'_def valid_state'_def) -lemma invs_queues [elim!]: - "invs' s \ valid_queues s" +lemma invs_valid_bitmaps[elim!]: + "invs' s \ valid_bitmaps s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym_heap_sched_pointers[elim!]: + "invs' s \ sym_heap_sched_pointers s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_sched_pointers[elim!]: + "invs' s \ valid_sched_pointers s" by (simp add: invs'_def valid_state'_def) lemma invs_valid_idle'[elim!]: @@ -3421,13 +3473,17 @@ lemma invs_valid_global'[elim!]: "invs' s \ valid_global_refs' s" by (fastforce simp: invs'_def valid_state'_def) +lemma invs_valid_ioapic[elim!]: + "invs' s \ valid_ioapic s" + by (simp add: invs'_def valid_state'_def valid_arch_state'_def) + lemma invs'_invs_no_cicd: "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" by (simp add: invs'_to_invs_no_cicd'_def) lemma invs'_bitmapQ_no_L1_orphans: "invs' s \ bitmapQ_no_L1_orphans s" - by (drule invs_queues, simp add: valid_queues_def) + by (simp add: invs'_def valid_state'_def valid_bitmaps_def) lemma invs_ksCurDomain_maxDomain' [elim!]: "invs' s \ ksCurDomain s \ maxDomain" @@ -3452,34 +3508,24 @@ lemma invs_no_0_obj'[elim!]: lemma invs'_gsCNodes_update[simp]: "invs' (gsCNodes_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def valid_ioports'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def ct_not_inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs valid_ioports'_def + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done lemma invs'_gsUserPages_update[simp]: "invs' (gsUserPages_update f s') = invs' s'" - apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def - bitmapQ_defs - valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def valid_ioports'_def - irq_issued'_def irqs_masked'_def valid_machine_state'_def - cur_tcb'_def) - apply (cases "ksSchedulerAction s'") - apply (simp_all add: ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def ct_not_inQ_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_bitmaps_def bitmapQ_defs valid_ioports'_def + valid_irq_node'_def valid_irq_handlers'_def irq_issued'_def irqs_masked'_def + valid_machine_state'_def cur_tcb'_def) + apply (cases "ksSchedulerAction s'"; + simp add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + ct_not_inQ_def) done -lemma invs_queues_tcb_in_cur_domain': - "\ ksReadyQueues s (d, p) = x # xs; invs' s; d = ksCurDomain s\ - \ tcb_in_cur_domain' x s" -apply (subgoal_tac "x \ set (ksReadyQueues s (d, p))") - apply (drule (1) valid_queues_obj_at'D[OF _ invs_queues]) - apply (auto simp: inQ_def tcb_in_cur_domain'_def elim: obj_at'_weakenE) -done - lemma pred_tcb'_neq_contra: "\ pred_tcb_at' proj P p s; pred_tcb_at' proj Q p s; \st. P st \ Q st \ \ False" by (clarsimp simp: pred_tcb_at'_def obj_at'_def) @@ -3493,7 +3539,7 @@ lemma invs'_ksDomScheduleIdx: unfolding invs'_def valid_state'_def by clarsimp lemma valid_bitmap_valid_bitmapQ_exceptE: - "\ valid_bitmapQ_except d p s ; (bitmapQ d p s \ ksReadyQueues s (d,p) \ []) ; + "\ valid_bitmapQ_except d p s; bitmapQ d p s \ \ tcbQueueEmpty (ksReadyQueues s (d,p)); bitmapQ_no_L2_orphans s \ \ valid_bitmapQ s" unfolding valid_bitmapQ_def valid_bitmapQ_except_def @@ -3623,4 +3669,50 @@ add_upd_simps "invs' (gsUntypedZeroRanges_update f s)" (obj_at'_real_def) declare upd_simps[simp] +lemma neq_out_intv: + "\ a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" + by simp + +lemma rule_out_intv: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a \ b \ + \ b \ mask_range a (objBitsKO obj)" + apply (drule(1) pspace_distinctD') + apply (subst (asm) ps_clear_def) + apply (drule_tac x = b in orthD2) + apply fastforce + apply (drule neq_out_intv) + apply (simp add: mask_def add_diff_eq) + apply (simp add: mask_def add_diff_eq) + done + +lemma ptr_range_mask_range: + "{ptr..ptr + 2 ^ bits - 1} = mask_range ptr bits" + unfolding mask_def + by simp + +lemma distinct_obj_range'_not_subset: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ \ obj_range' b obj' \ obj_range' a obj" + unfolding obj_range'_def + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule (3) rule_out_intv) + by (fastforce simp: is_aligned_no_overflow_mask ptr_range_mask_range word_add_increasing) + +lemma obj_range'_disjoint: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ obj_range' a obj \ obj_range' b obj' = {}" + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule_tac p=a and p'=b in aligned_mask_range_cases) + apply assumption + apply (metis add_mask_fold distinct_obj_range'_not_subset obj_range'_def) + done + end diff --git a/proof/refine/X64/IpcCancel_R.thy b/proof/refine/X64/IpcCancel_R.thy index 07211de1d5..ce753afc38 100644 --- a/proof/refine/X64/IpcCancel_R.thy +++ b/proof/refine/X64/IpcCancel_R.thy @@ -48,25 +48,6 @@ lemma set_ep_pred_tcb_at' [wp]: apply (simp add: updateObject_default_def in_monad projectKOs) done -(* valid_queues is too strong *) -definition valid_inQ_queues :: "KernelStateData_H.kernel_state \ bool" where - "valid_inQ_queues \ - \s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) \ distinct (ksReadyQueues s (d, p))" - -lemma valid_inQ_queues_ksSchedulerAction_update[simp]: - "valid_inQ_queues (ksSchedulerAction_update f s) = valid_inQ_queues s" - by (simp add: valid_inQ_queues_def) - -lemma valid_inQ_queues_ksReadyQueuesL1Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL1Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - -lemma valid_inQ_queues_ksReadyQueuesL2Bitmap_upd[simp]: - "valid_inQ_queues (ksReadyQueuesL2Bitmap_update f s) = valid_inQ_queues s" - unfolding valid_inQ_queues_def - by simp - defs capHasProperty_def: "capHasProperty ptr P \ cte_wp_at' (\c. P (cteCap c)) ptr" end @@ -83,11 +64,6 @@ locale delete_one_conc_pre = "\pspace_distinct'\ cteDeleteOne slot \\rv. pspace_distinct'\" assumes delete_one_it: "\P. \\s. P (ksIdleThread s)\ cteDeleteOne cap \\rv s. P (ksIdleThread s)\" - assumes delete_one_queues: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cteDeleteOne sl \\rv. Invariants_H.valid_queues\" - assumes delete_one_inQ_queues: - "\valid_inQ_queues\ cteDeleteOne sl \\rv. valid_inQ_queues\" assumes delete_one_sch_act_simple: "\sch_act_simple\ cteDeleteOne sl \\rv. sch_act_simple\" assumes delete_one_sch_act_not: @@ -105,7 +81,7 @@ lemma (in delete_one_conc_pre) cancelIPC_simple[wp]: "\\\ cancelIPC t \\rv. st_tcb_at' simple' t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (rule hoare_pre) apply (wpc | wp sts_st_tcb_at'_cases hoare_vcg_conj_lift @@ -343,6 +319,7 @@ lemma cancelSignal_corres: apply fastforce apply (clarsimp simp: valid_obj_def valid_tcb_def valid_tcb_state_def) apply (drule sym, simp add: obj_at_def) + apply fastforce apply (clarsimp simp: conj_comms pred_tcb_at' cong: conj_cong) apply (rule conjI) apply (simp add: pred_tcb_at'_def) @@ -547,12 +524,12 @@ lemma (in delete_one) cancelIPC_ReplyCap_corres: and Q'="\_. invs' and st_tcb_at' awaiting_reply' t" in corres_underlying_split) apply (rule corres_guard_imp) - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp?) apply (simp add: tcb_relation_def fault_rel_optionation_def) apply (simp add: tcb_cap_cases_def) - apply (simp add: tcb_cte_cases_def) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) apply (simp add: exst_same_def) - apply (clarsimp simp: st_tcb_at_tcb_at) + apply (fastforce simp: st_tcb_at_tcb_at) apply clarsimp defer apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state @@ -609,7 +586,7 @@ lemma (in delete_one) cancelIPC_ReplyCap_corres: qed lemma (in delete_one) cancel_ipc_corres: - "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + "corres dc (einvs and tcb_at t) invs' (cancel_ipc t) (cancelIPC t)" apply (simp add: cancel_ipc_def cancelIPC_def Let_def) apply (rule corres_guard_imp) @@ -639,7 +616,7 @@ lemma (in delete_one) cancel_ipc_corres: apply (rule hoare_strengthen_post) apply (rule gts_sp'[where P="\"]) apply (clarsimp elim!: pred_tcb'_weakenE) - apply simp + apply fastforce apply simp done @@ -663,24 +640,20 @@ lemma setEndpoint_utr[wp]: declare cart_singleton_empty [simp] declare cart_singleton_empty2[simp] -crunch ksQ[wp]: setNotification "\s. P (ksReadyQueues s p)" - (wp: setObject_queues_unchanged_tcb updateObject_default_inv) - lemma sch_act_simple_not_t[simp]: "sch_act_simple s \ sch_act_not t s" by (clarsimp simp: sch_act_simple_def) context begin interpretation Arch . (*FIXME: arch_split*) +crunches setNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift) + lemma cancelSignal_invs': "\invs' and st_tcb_at' (\st. st = BlockedOnNotification ntfn) t and sch_act_not t\ cancelSignal t ntfn \\rv. invs'\" proof - - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def - valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have NTFNSN: "\ntfn ntfn'. \\s. sch_act_not (ksCurThread s) s \ setNotification ntfn ntfn' \\_ s. sch_act_not (ksCurThread s) s\" @@ -691,20 +664,19 @@ lemma cancelSignal_invs': show ?thesis apply (simp add: cancelSignal_def invs'_def valid_state'_def Let_def) apply (wp valid_irq_node_lift sts_sch_act' irqs_masked_lift - hoare_vcg_all_lift [OF setNotification_ksQ] sts_valid_queues + hoare_vcg_all_lift setThreadState_ct_not_inQ NTFNSN - hoare_vcg_all_lift setNotification_ksQ + hoare_vcg_all_lift | simp add: valid_tcb_state'_def list_case_If split del: if_split)+ prefer 2 apply assumption apply (rule hoare_strengthen_post) apply (rule get_ntfn_sp') + apply (rename_tac rv s) apply (clarsimp simp: pred_tcb_at') - apply (frule NIQ) - apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) apply (rule conjI) apply (clarsimp simp: valid_ntfn'_def) - apply (case_tac "ntfnObj r", simp_all add: isWaitingNtfn_def) + apply (case_tac "ntfnObj rv", simp_all add: isWaitingNtfn_def) apply (frule ko_at_valid_objs') apply (simp add: valid_pspace_valid_objs') apply (clarsimp simp: projectKO_opt_ntfn split: kernel_object.splits) @@ -727,7 +699,7 @@ lemma cancelSignal_invs': split: ntfn.splits) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (fastforce simp: sym_refs_def dest!: idle'_no_refs) - apply (case_tac "ntfnObj r", simp_all) + apply (case_tac "ntfnObj rv", simp_all) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: projectKOs valid_obj'_def valid_ntfn'_def) apply (rule conjI, clarsimp split: option.splits) @@ -741,9 +713,10 @@ lemma cancelSignal_invs': set_eq_subset) apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def set_eq_subset) + apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp elim!: if_live_state_refsE) apply (rule conjI) - apply (case_tac "ntfnBoundTCB r") + apply (case_tac "ntfnBoundTCB rv") apply (clarsimp elim!: if_live_state_refsE)+ apply (rule conjI, clarsimp split: option.splits) apply (clarsimp dest!: idle'_no_refs) @@ -801,23 +774,25 @@ lemma setEndpoint_ct_not_inQ[wp]: done lemma setEndpoint_ksDomScheduleIdx[wp]: - "\\s. P (ksDomScheduleIdx s)\ setEndpoint ptr ep \\_ s. P (ksDomScheduleIdx s)\" + "setEndpoint ptr ep \\s. P (ksDomScheduleIdx s)\" apply (simp add: setEndpoint_def setObject_def split_def) apply (wp updateObject_default_inv | simp)+ done + end +crunches setEndpoint + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (wp: valid_bitmaps_lift simp: updateObject_default_def) + lemma (in delete_one_conc) cancelIPC_invs[wp]: shows "\tcb_at' t and invs'\ cancelIPC t \\rv. invs'\" proof - have P: "\xs v f. (case xs of [] \ return v | y # ys \ return (f (y # ys))) = return (case xs of [] \ v | y # ys \ f xs)" by (clarsimp split: list.split) - have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ - \ \x. t \ set (ksReadyQueues s x)" - apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ - done have EPSCHN: "\eeptr ep'. \\s. sch_act_not (ksCurThread s) s\ setEndpoint eeptr ep' \\_ s. sch_act_not (ksCurThread s) s\" @@ -842,8 +817,8 @@ proof - apply (wp valid_irq_node_lift valid_global_refs_lift' valid_arch_state_lift' irqs_masked_lift sts_sch_act' hoare_vcg_all_lift [OF setEndpoint_ksQ] - sts_valid_queues setThreadState_ct_not_inQ EPSCHN - hoare_vcg_all_lift setNotification_ksQ + setThreadState_ct_not_inQ EPSCHN + hoare_vcg_all_lift | simp add: valid_tcb_state'_def split del: if_split | wpc)+ prefer 2 @@ -851,14 +826,14 @@ proof - apply (rule hoare_strengthen_post [OF get_ep_sp']) apply (clarsimp simp: pred_tcb_at' fun_upd_def[symmetric] conj_comms split del: if_split cong: if_cong) + apply (rule conjI, clarsimp simp: valid_pspace'_def) + apply (rule conjI, clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (frule obj_at_valid_objs', clarsimp) apply (clarsimp simp: projectKOs valid_obj'_def) apply (rule conjI) apply (clarsimp simp: obj_at'_def valid_ep'_def projectKOs dest!: pred_tcb_at') - apply (frule NIQ) - apply (erule pred_tcb'_weakenE, fastforce) apply (clarsimp, rule conjI) apply (auto simp: pred_tcb_at'_def obj_at'_def)[1] apply (rule conjI) @@ -905,7 +880,7 @@ proof - show ?thesis apply (simp add: cancelIPC_def crunch_simps cong: if_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule bind_wp [OF _ gts_sp']) apply (case_tac state, simp_all add: isTS_defs) apply (safe intro!: hoare_weaken_pre[OF Q] @@ -948,8 +923,8 @@ lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at: \\rv. st_tcb_at' P t\" apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (case_tac x, simp_all add: isTS_defs list_case_If) + apply (rule bind_wp [OF _ gts_sp']) + apply (case_tac rv, simp_all add: isTS_defs list_case_If) apply (wp sts_st_tcb_at'_cases delete_one_st_tcb_at threadSet_pred_tcb_no_state cancelSignal_st_tcb_at hoare_drop_imps @@ -1021,9 +996,9 @@ lemma (in delete_one_conc_pre) cancelIPC_tcb_at_runnable': (is "\?PRE\ _ \_\") apply (clarsimp simp: cancelIPC_def Let_def) apply (case_tac "t'=t") - apply (rule_tac B="\st. st_tcb_at' runnable' t and K (runnable' st)" - in hoare_seq_ext) - apply (case_tac x; simp) + apply (rule_tac Q'="\st. st_tcb_at' runnable' t and K (runnable' st)" + in bind_wp) + apply (case_tac rv; simp) apply (wp sts_pred_tcb_neq' | simp | wpc)+ apply (clarsimp) apply (rule_tac Q="\rv. ?PRE" in hoare_post_imp, fastforce) @@ -1039,37 +1014,12 @@ crunch ksCurDomain[wp]: cancelSignal "\s. P (ksCurDomain s)" lemma (in delete_one_conc_pre) cancelIPC_ksCurDomain[wp]: "\\s. P (ksCurDomain s)\ cancelIPC t \\_ s. P (ksCurDomain s)\" -apply (simp add: cancelIPC_def Let_def) -apply (wp hoare_vcg_conj_lift delete_one_ksCurDomain - | wpc - | rule hoare_drop_imps - | simp add: getThreadReplySlot_def o_def if_fun_split)+ -done - -(* FIXME move *) -lemma tcbSchedEnqueue_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" -apply (simp add: tcbSchedEnqueue_def unless_def) -apply (wp threadGet_wp | simp)+ -apply (clarsimp simp: obj_at'_def) -apply (case_tac obja) -apply fastforce -done - -(* FIXME move *) -lemma setThreadState_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" -apply (simp add: setThreadState_def rescheduleRequired_def) -apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st - | wpc - | rule hoare_drop_imps - | simp)+ -apply (clarsimp simp: obj_at'_def) -apply (case_tac obj) -apply fastforce -done + apply (simp add: cancelIPC_def Let_def) + apply (wp hoare_vcg_conj_lift delete_one_ksCurDomain + | wpc + | rule hoare_drop_imps + | simp add: getThreadReplySlot_def o_def if_fun_split)+ + done (* FIXME move *) lemma setBoundNotification_not_ntfn: @@ -1082,15 +1032,6 @@ lemma setBoundNotification_not_ntfn: | simp)+ done -(* FIXME move *) -lemma setThreadState_tcb_in_cur_domain'[wp]: - "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" -apply (simp add: tcb_in_cur_domain'_def) -apply (rule hoare_pre) -apply wps -apply (wp setThreadState_not_st | simp)+ -done - lemma setBoundNotification_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ setBoundNotification st t \\_. tcb_in_cur_domain' t'\" apply (simp add: tcb_in_cur_domain'_def) @@ -1099,22 +1040,24 @@ lemma setBoundNotification_tcb_in_cur_domain'[wp]: apply (wp setBoundNotification_not_ntfn | simp)+ done -lemma cancelSignal_tcb_obj_at': - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ cancelSignal t word \\_. obj_at' P t'\" -apply (simp add: cancelSignal_def setNotification_def) -apply (wp setThreadState_not_st getNotification_wp | wpc | simp)+ -done +lemma setThreadState_tcbDomain_obj_at'[wp]: + "setThreadState ts t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding setThreadState_def + by wpsimp + +crunches cancelSignal + for tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + (wp: crunch_wps) lemma (in delete_one_conc_pre) cancelIPC_tcbDomain_obj_at': "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelIPC t \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" -apply (simp add: cancelIPC_def Let_def) -apply (wp hoare_vcg_conj_lift - setThreadState_not_st delete_one_tcbDomain_obj_at' cancelSignal_tcb_obj_at' - | wpc - | rule hoare_drop_imps - | simp add: getThreadReplySlot_def o_def if_fun_split)+ -done + apply (simp add: cancelIPC_def Let_def) + apply (wp hoare_vcg_conj_lift + delete_one_tcbDomain_obj_at' + | wpc + | rule hoare_drop_imps + | simp add: getThreadReplySlot_def o_def if_fun_split)+ + done lemma (in delete_one_conc_pre) cancelIPC_tcb_in_cur_domain': "\tcb_in_cur_domain' t'\ cancelIPC t \\_. tcb_in_cur_domain' t'\" @@ -1148,7 +1091,7 @@ text \The suspend operation, significant as called from delete\ lemma rescheduleRequired_weak_sch_act_wf: "\\\ rescheduleRequired \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" apply (simp add: rescheduleRequired_def setSchedulerAction_def) - apply (wp hoare_post_taut | simp add: weak_sch_act_wf_def)+ + apply (wp hoare_TrueI | simp add: weak_sch_act_wf_def)+ done lemma sts_weak_sch_act_wf[wp]: @@ -1156,7 +1099,7 @@ lemma sts_weak_sch_act_wf[wp]: \ (ksSchedulerAction s = SwitchToThread t \ runnable' st)\ setThreadState st t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: setThreadState_def) apply (wp rescheduleRequired_weak_sch_act_wf) apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp, simp) @@ -1217,191 +1160,61 @@ lemma setNotification_weak_sch_act_wf[wp]: lemmas ipccancel_weak_sch_act_wfs = weak_sch_act_wf_lift[OF _ setCTE_pred_tcb_at'] -lemma tcbSchedDequeue_corres': - "corres dc (is_etcb_at t) (tcb_at' t and valid_inQ_queues) (tcb_sched_action (tcb_sched_dequeue) t) (tcbSchedDequeue t)" - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (wp, simp) - apply (case_tac queued) - defer - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def get_etcb_def set_tcb_queue_def is_etcb_at_def state_relation_def gets_the_def gets_def get_def return_def bind_def assert_opt_def get_tcb_queue_def modify_def put_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - apply (force simp: tcb_sched_dequeue_def valid_inQ_queues_def - ready_queues_relation_def obj_at'_def inQ_def projectKO_eq project_inject) - apply (simp add: ready_queues_relation_def) - apply (simp add: unless_def when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (simp split del: if_split) - apply (rule corres_split_eqr) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split_eqr[OF getQueue_corres]) - apply (simp split del: if_split) - apply (subst bind_return_unit, rule corres_split[where r'=dc]) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (simp add: dc_def[symmetric]) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply (wp | simp)+ - done - -lemma setQueue_valid_inQ_queues: - "\valid_inQ_queues - and (\s. \t \ set ts. obj_at' (inQ d p) t s) - and K (distinct ts)\ - setQueue d p ts - \\_. valid_inQ_queues\" - apply (simp add: setQueue_def valid_inQ_queues_def) - apply wp - apply clarsimp - done - -lemma threadSet_valid_inQ_queues: - "\valid_inQ_queues and (\s. \d p. (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) - \ obj_at' (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) t s - \ t \ set (ksReadyQueues s (d, p)))\ - threadSet f t - \\rv. valid_inQ_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_inQ_queues_def pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_inQ_queues_def pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def projectKOs) - apply (fastforce) - done - -(* reorder the threadSet before the setQueue, useful for lemmas that don't refer to bitmap *) -lemma setQueue_after_addToBitmap: - "(setQueue d p q >>= (\rv. (when P (addToBitmap d p)) >>= (\rv. threadSet f t))) = - (when P (addToBitmap d p) >>= (\rv. (threadSet f t) >>= (\rv. setQueue d p q)))" - apply (case_tac P, simp_all) - prefer 2 - apply (simp add: setQueue_after) - apply (simp add: setQueue_def when_def) - apply (subst oblivious_modify_swap) - apply (simp add: threadSet_def getObject_def setObject_def - loadObject_default_def bitmap_fun_defs - split_def projectKO_def2 alignCheck_assert - magnitudeCheck_assert updateObject_default_def) - apply (intro oblivious_bind, simp_all) - apply (clarsimp simp: bind_assoc) - done - -lemma tcbSchedEnqueue_valid_inQ_queues[wp]: - "\valid_inQ_queues\ tcbSchedEnqueue t \\_. valid_inQ_queues\" - apply (simp add: tcbSchedEnqueue_def setQueue_after_addToBitmap) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued, simp_all add: unless_def)[1] - apply (wp setQueue_valid_inQ_queues threadSet_valid_inQ_queues threadGet_wp - hoare_vcg_const_Ball_lift - | simp add: inQ_def bitmap_fun_defs - | fastforce simp: valid_inQ_queues_def inQ_def obj_at'_def)+ - done - - (* prevents wp from splitting on the when; stronger technique than hoare_when_weak_wp - FIXME: possible to replace with hoare_when_weak_wp? - *) -definition - "removeFromBitmap_conceal d p q t \ when (null [x\q . x \ t]) (removeFromBitmap d p)" - -lemma removeFromBitmap_conceal_valid_inQ_queues[wp]: - "\ valid_inQ_queues \ removeFromBitmap_conceal d p q t \ \_. valid_inQ_queues \" - unfolding valid_inQ_queues_def removeFromBitmap_conceal_def - by (wp|clarsimp simp: bitmap_fun_defs)+ - -lemma rescheduleRequired_valid_inQ_queues[wp]: - "\valid_inQ_queues\ rescheduleRequired \\_. valid_inQ_queues\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - done - -lemma sts_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setThreadState st t \\rv. valid_inQ_queues\" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - lemma updateObject_ep_inv: "\P\ updateObject (obj::endpoint) ko p q n \\rv. P\" by simp (rule updateObject_default_inv) -lemma sbn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setBoundNotification ntfn t \\rv. valid_inQ_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ +lemma asUser_tcbQueued_inv[wp]: + "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ done -lemma setEndpoint_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setEndpoint ptr ep \\rv. valid_inQ_queues\" - apply (unfold setEndpoint_def) - apply (rule setObject_ep_pre) - apply (simp add: valid_inQ_queues_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift setObject_queues_unchanged[OF updateObject_ep_inv]) - apply simp - done +crunches asUser + for valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps) -lemma set_ntfn_valid_inQ_queues[wp]: - "\valid_inQ_queues\ setNotification ptr ntfn \\rv. valid_inQ_queues\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp add: valid_inQ_queues_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv | simp)+ - done +crunches set_thread_state + for in_correct_ready_q[wp]: in_correct_ready_q + (wp: crunch_wps ignore_del: set_thread_state_ext) -crunch valid_inQ_queues[wp]: cancelSignal valid_inQ_queues - (simp: updateObject_tcb_inv crunch_simps wp: crunch_wps) +crunches set_thread_state_ext + for ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps ignore_del: set_thread_state_ext) -lemma (in delete_one_conc_pre) cancelIPC_valid_inQ_queues[wp]: - "\valid_inQ_queues\ cancelIPC t \\_. valid_inQ_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) - apply (wp hoare_drop_imps delete_one_inQ_queues threadSet_valid_inQ_queues | wpc | simp add:if_apply_def2 Fun.comp_def)+ - apply (clarsimp simp: valid_inQ_queues_def inQ_def)+ - done +lemma set_thread_state_ready_qs_distinct[wp]: + "set_thread_state ref ts \ready_qs_distinct\" + unfolding set_thread_state_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) -lemma valid_queues_inQ_queues: - "Invariants_H.valid_queues s \ valid_inQ_queues s" - by (force simp: Invariants_H.valid_queues_def valid_inQ_queues_def obj_at'_def - valid_queues_no_bitmap_def) +lemma as_user_ready_qs_distinct[wp]: + "as_user tptr f \ready_qs_distinct\" + unfolding as_user_def + apply (wpsimp wp: set_object_wp) + by (clarsimp simp: ready_qs_distinct_def) -lemma asUser_tcbQueued_inv[wp]: - "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" - apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) - apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ - done +lemma do_extended_op_pspace_aligned[wp]: + "do_extended_op f \pspace_aligned\" + by (wpsimp simp: do_extended_op_def) -lemma asUser_valid_inQ_queues[wp]: - "\valid_inQ_queues\ asUser t f \\rv. valid_inQ_queues\" - unfolding valid_inQ_queues_def Ball_def - apply (wpsimp wp: hoare_vcg_all_lift) - defer - apply (wp asUser_ksQ) - apply assumption - apply (simp add: inQ_def[abs_def] obj_at'_conj) - apply (rule hoare_convert_imp) - apply (wp asUser_ksQ) - apply wp - done +lemma do_extended_op_pspace_distinct[wp]: + "do_extended_op f \pspace_distinct\" + by (wpsimp simp: do_extended_op_def) + +context begin interpretation Arch . (* FIXME: arch_split *) + +crunches arch_post_cap_deletion + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + (wp: crunch_wps simp: crunch_simps) + +end + +crunches cancel_ipc + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + (wp: crunch_wps simp: crunch_simps) lemma (in delete_one) suspend_corres: "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) @@ -1424,19 +1237,19 @@ lemma (in delete_one) suspend_corres: apply (rule corres_return_trivial) apply (rule corres_split_nor[OF setThreadState_corres]) apply simp - apply (rule tcbSchedDequeue_corres') + apply (rule tcbSchedDequeue_corres, simp) apply wpsimp - apply wp - apply wpsimp - apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ - apply (rule hoare_post_imp[where Q = "\rv s. tcb_at t s \ is_etcb_at t s"]) - apply simp - apply (wp | simp)+ - apply (rule hoare_post_imp[where Q = "\rv s. tcb_at' t s \ valid_inQ_queues s"]) - apply (wpsimp simp: valid_queues_inQ_queues) - apply wp+ - apply (force simp: valid_sched_def tcb_at_is_etcb_at) - apply (clarsimp simp add: invs'_def valid_state'_def valid_queues_inQ_queues) + apply (wpsimp wp: sts_valid_objs') + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def valid_tcb_state'_def)+ + apply (rule hoare_post_imp[where Q = "\rv s. einvs s \ tcb_at t s"]) + apply (simp add: invs_implies invs_strgs valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct valid_sched_def) + apply wp + apply (rule hoare_post_imp[where Q = "\_ s. invs' s \ tcb_at' t s"]) + apply (fastforce simp: invs'_def valid_tcb_state'_def) + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ + apply fastforce + apply simp done lemma no_fail_switchFpuOwner[wp]: @@ -1447,10 +1260,7 @@ lemma no_fail_nativeThreadUsingFPU[wp]: "no_fail (\ and \) (X64.nativeThreadUsingFPU thread)" supply Collect_const[simp del] apply (simp only: X64.nativeThreadUsingFPU_def) - apply (rule no_fail_bind) - apply (simp add: Arch.no_fail_machine_op_lift) - apply simp - apply wp + apply (wpsimp wp: Arch.no_fail_machine_op_lift) done lemma (in delete_one) prepareThreadDelete_corres: @@ -1491,33 +1301,17 @@ lemma (in delete_one_conc_pre) cancelIPC_it[wp]: apply (wp hoare_drop_imps delete_one_it | wpc | simp add:if_apply_def2 Fun.comp_def)+ done -crunch ksQ: threadGet "\s. P (ksReadyQueues s p)" - -lemma tcbSchedDequeue_notksQ: - "\\s. t' \ set(ksReadyQueues s p)\ - tcbSchedDequeue t - \\_ s. t' \ set(ksReadyQueues s p)\" - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply wp+ - apply clarsimp - apply (rule_tac Q="\_ s. t' \ set(ksReadyQueues s p)" in hoare_post_imp) - apply (wp | clarsimp)+ - done - lemma rescheduleRequired_oa_queued: "\ (\s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)) and sch_act_simple\ rescheduleRequired \\_ s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)\" (is "\?OAQ t' p and sch_act_simple\ _ \_\") apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ ?OAQ t' p s" in hoare_seq_ext) - including no_pre + apply (rule_tac Q'="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) \ ?OAQ t' p s" + in bind_wp) + including classic_wp_pre apply (wp | clarsimp)+ - apply (case_tac x) + apply (case_tac rv) apply (wp | clarsimp)+ done @@ -1556,198 +1350,9 @@ lemma setBoundNotification_oa_queued: by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) qed -lemma tcbSchedDequeue_ksQ_distinct[wp]: - "\\s. distinct (ksReadyQueues s p)\ - tcbSchedDequeue t - \\_ s. distinct (ksReadyQueues s p)\" - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply wp+ - apply (rule_tac Q="\_ s. distinct (ksReadyQueues s p)" in hoare_post_imp) - apply (clarsimp | wp)+ - done - -lemma sts_valid_queues_partial: - "\Invariants_H.valid_queues and sch_act_simple\ - setThreadState st t - \\_ s. \t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p))\" - (is "\_\ _ \\_ s. \t' d p. ?OA t' d p s \ ?DISTINCT d p s \") - apply (rule_tac Q="\_ s. (\t' d p. ?OA t' d p s) \ (\d p. ?DISTINCT d p s)" - in hoare_post_imp) - apply (clarsimp) - apply (rule hoare_conjI) - apply (rule_tac Q="\s. \t' d p. - ((t'\set(ksReadyQueues s (d, p)) - \ \ (sch_act_simple s)) - \ (obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ st_tcb_at' runnable' t' s))" in hoare_pre_imp) - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def - pred_tcb_at'_def obj_at'_def inQ_def) - apply (rule hoare_vcg_all_lift)+ - apply (rule hoare_convert_imp) - including no_pre - apply (wp sts_ksQ setThreadState_oa_queued hoare_impI sts_pred_tcb_neq' - | clarsimp)+ - apply (rule_tac Q="\s. \d p. ?DISTINCT d p s \ sch_act_simple s" in hoare_pre_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - -lemma tcbSchedDequeue_t_notksQ: - "\\s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\ - tcbSchedDequeue t - \\_ s. t \ set (ksReadyQueues s (d, p))\" - apply (rule_tac Q="(\s. t \ set (ksReadyQueues s (d, p))) - or obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t" - in hoare_pre_imp, clarsimp) - apply (rule hoare_pre_disj) - apply (wp tcbSchedDequeue_notksQ)[1] - apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) - apply wp - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) - apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_real_def ko_wp_at'_def) - done - -lemma sts_invs_minor'_no_valid_queues: - "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st - \ (st \ Inactive \ \ idle' st \ - st' \ Inactive \ \ idle' st')) t - and (\s. t = ksIdleThread s \ idle' st) - and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) - and sch_act_simple - and invs'\ - setThreadState st t - \\_ s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_ioports' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\" - apply (simp add: invs'_def valid_state'_def valid_queues_def) - apply (wp sts_valid_queues_partial sts_ksQ - setThreadState_oa_queued sts_st_tcb_at'_cases - irqs_masked_lift - valid_irq_node_lift - setThreadState_ct_not_inQ - sts_valid_bitmapQ_sch_act_simple - sts_valid_bitmapQ_no_L2_orphans_sch_act_simple - sts_valid_bitmapQ_no_L1_orphans_sch_act_simple - hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_all_lift)+ - apply (clarsimp simp: disj_imp) - apply (intro conjI) - apply (clarsimp simp: valid_queues_def) - apply (rule conjI, clarsimp) - apply (drule valid_queues_no_bitmap_objD, assumption) - apply (clarsimp simp: inQ_def comp_def) - apply (rule conjI) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply (simp add: inQ_def) - apply (simp add: valid_queues_no_bitmap_def) - apply clarsimp - apply (clarsimp simp: st_tcb_at'_def) - apply (drule obj_at_valid_objs') - apply (clarsimp simp: valid_pspace'_def) - apply (clarsimp simp: valid_obj'_def valid_tcb'_def projectKOs) - subgoal - by (fastforce simp: valid_tcb_state'_def - split: Structures_H.thread_state.splits) - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (fastforce simp: valid_queues_def inQ_def pred_tcb_at' pred_tcb_at'_def - elim!: st_tcb_ex_cap'' obj_at'_weakenE)+ - done - -crunch ct_idle_or_in_cur_domain'[wp]: tcbSchedDequeue ct_idle_or_in_cur_domain' - -lemma tcbSchedDequeue_invs'_no_valid_queues: - "\\s. (\t' d p. - (t' \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s - \ (t' \ t \ st_tcb_at' runnable' t' s))) - \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ - valid_bitmapQ s \ - bitmapQ_no_L2_orphans s \ - bitmapQ_no_L1_orphans s \ - valid_pspace' s \ - sch_act_wf (ksSchedulerAction s) s \ - sym_refs (state_refs_of' s) \ - if_live_then_nonz_cap' s \ - if_unsafe_then_cap' s \ - valid_idle' s \ - valid_global_refs' s \ - valid_arch_state' s \ - valid_irq_node' (irq_node' s) s \ - valid_irq_handlers' s \ - valid_irq_states' s \ - valid_ioports' s \ - valid_machine_state' s \ - irqs_masked' s \ - valid_queues' s \ - ct_not_inQ s \ - ct_idle_or_in_cur_domain' s \ - pspace_domain_valid s \ - ksCurDomain s \ maxDomain \ - valid_dom_schedule' s \ - untyped_ranges_zero' s \ - cur_tcb' s \ - tcb_at' t s\ - tcbSchedDequeue t - \\_. invs' \" - apply (simp add: invs'_def valid_state'_def) - apply (wp tcbSchedDequeue_valid_queues_weak valid_irq_handlers_lift - valid_irq_node_lift valid_irq_handlers_lift' - tcbSchedDequeue_irq_states irqs_masked_lift cur_tcb_lift - untyped_ranges_zero_lift - | clarsimp simp add: cteCaps_of_def valid_queues_def o_def)+ - apply (rule conjI) - apply (fastforce simp: obj_at'_def inQ_def st_tcb_at'_def valid_queues_no_bitmap_except_def) - apply (rule conjI, clarsimp simp: correct_queue_def) - apply (fastforce simp: valid_pspace'_def intro: obj_at'_conjI - elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemmas sts_tcbSchedDequeue_invs' = - sts_invs_minor'_no_valid_queues - tcbSchedDequeue_invs'_no_valid_queues +crunches tcbSchedDequeue + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' + (wp: crunch_wps) lemma asUser_sch_act_simple[wp]: "\sch_act_simple\ asUser s t \\_. sch_act_simple\" @@ -1759,11 +1364,14 @@ lemma (in delete_one_conc) suspend_invs'[wp]: "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ ThreadDecls_H.suspend t \\rv. invs'\" apply (simp add: suspend_def) - apply (wp sts_tcbSchedDequeue_invs') - apply (simp add: updateRestartPC_def | strengthen no_refs_simple_strg')+ - prefer 2 - apply (wpsimp wp: hoare_drop_imps hoare_vcg_imp_lift' - | strengthen no_refs_simple_strg')+ + apply (wpsimp wp: sts_invs_minor' gts_wp' simp: updateRestartPC_def + | strengthen no_refs_simple_strg')+ + apply (rule_tac Q="\_. invs' and sch_act_simple and st_tcb_at' simple' t + and (\s. t \ ksIdleThread s)" + in hoare_post_imp) + apply clarsimp + apply wpsimp + apply (fastforce elim: pred_tcb'_weakenE) done lemma (in delete_one_conc_pre) suspend_tcb'[wp]: @@ -1807,109 +1415,6 @@ lemma (in delete_one_conc_pre) suspend_st_tcb_at': lemmas (in delete_one_conc_pre) suspend_makes_simple' = suspend_st_tcb_at' [where P=simple', simplified] -lemma valid_queues_not_runnable'_not_ksQ: - assumes "Invariants_H.valid_queues s" and "st_tcb_at' (Not \ runnable') t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - using assms - apply - - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def pred_tcb_at'_def) - apply (erule_tac x=d in allE) - apply (erule_tac x=p in allE) - apply (clarsimp) - apply (drule(1) bspec) - apply (clarsimp simp: obj_at'_def) - done - -declare valid_queues_not_runnable'_not_ksQ[OF ByAssum, simp] - -lemma cancelSignal_queues[wp]: - "\Invariants_H.valid_queues and st_tcb_at' (Not \ runnable') t\ - cancelSignal t ae \\_. Invariants_H.valid_queues \" - apply (simp add: cancelSignal_def) - apply (wp sts_valid_queues) - apply (rule_tac Q="\_ s. \p. t \ set (ksReadyQueues s p)" in hoare_post_imp, simp) - apply (wp hoare_vcg_all_lift) - apply (wpc) - apply (wp)+ - apply (rule_tac Q="\_ s. Invariants_H.valid_queues s \ (\p. t \ set (ksReadyQueues s p))" in hoare_post_imp) - apply (clarsimp) - apply (wp) - apply (clarsimp) - done - -lemma (in delete_one_conc_pre) cancelIPC_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ - cancelIPC t \\rv. Invariants_H.valid_queues\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def - cong: Structures_H.thread_state.case_cong list.case_cong) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_pre) - apply (wpc - | wp hoare_vcg_conj_lift delete_one_queues threadSet_valid_queues - threadSet_valid_objs' sts_valid_queues setEndpoint_ksQ - hoare_vcg_all_lift threadSet_sch_act threadSet_weak_sch_act_wf - | simp add: o_def if_apply_def2 inQ_def - | rule hoare_drop_imps - | clarsimp simp: valid_tcb'_def tcb_cte_cases_def - elim!: pred_tcb'_weakenE)+ - apply (fastforce dest: valid_queues_not_runnable'_not_ksQ elim: pred_tcb'_weakenE) - done - -(* FIXME: move to Schedule_R *) -lemma tcbSchedDequeue_nonq[wp]: - "\Invariants_H.valid_queues and tcb_at' t and K (t = t')\ - tcbSchedDequeue t \\_ s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadGet_wp|simp)+ - apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def obj_at'_def projectKOs inQ_def) - done - -lemma sts_ksQ_oaQ: - "\Invariants_H.valid_queues\ - setThreadState st t - \\_ s. t \ set (ksReadyQueues s (d, p)) \ - obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\" - (is "\_\ _ \\_. ?POST\") - proof - - have RR: "\sch_act_simple and ?POST\ rescheduleRequired \\_. ?POST\" - apply (simp add: rescheduleRequired_def) - apply (wp) - apply (clarsimp) - apply (rule_tac - Q="(\s. action = ResumeCurrentThread \ action = ChooseNewThread) and ?POST" - in hoare_pre_imp, assumption) - apply (case_tac action) - apply (clarsimp)+ - apply (wp) - apply (clarsimp simp: sch_act_simple_def) - done - show ?thesis - apply (simp add: setThreadState_def) - apply (wp RR) - apply (rule_tac Q="\_. ?POST" in hoare_post_imp) - apply (clarsimp simp add: sch_act_simple_def) - apply (wp hoare_convert_imp) - apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (fastforce dest: bspec elim!: obj_at'_weakenE simp: inQ_def) - done - qed - -lemma (in delete_one_conc_pre) suspend_nonq: - "\Invariants_H.valid_queues and valid_objs' and tcb_at' t - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and (\s. t \ ksIdleThread s) and K (t = t')\ - suspend t - \\rv s. \d p. t' \ set (ksReadyQueues s (d, p))\" - apply (rule hoare_gen_asm) - apply (simp add: suspend_def) - unfolding updateRestartPC_def - apply (wp hoare_allI tcbSchedDequeue_t_notksQ sts_ksQ_oaQ) - apply wpsimp+ - done - lemma suspend_makes_inactive: "\K (t = t')\ suspend t \\rv. st_tcb_at' ((=) Inactive) t'\" apply (cases "t = t'", simp_all) @@ -1920,31 +1425,21 @@ lemma suspend_makes_inactive: declare threadSet_sch_act_sane [wp] declare sts_sch_act_sane [wp] -lemma tcbSchedEnqueue_ksQset_weak: - "\\s. t' \ set (ksReadyQueues s p)\ - tcbSchedEnqueue t - \\_ s. t' \ set (ksReadyQueues s p)\" (is "\?PRE\ _ \_\") - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_if_lift) - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, ((wp | clarsimp)+))+ - done - lemma tcbSchedEnqueue_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ tcbSchedEnqueue t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) lemma sts_sch_act_not_ct[wp]: "\\s. sch_act_not (ksCurThread s) s\ setThreadState st t \\_ s. sch_act_not (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + by (rule hoare_weaken_pre, wps, wp, simp) text \Cancelling all IPC in an endpoint or notification object\ lemma ep_cancel_corres_helper: - "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs) - ((\s. \t \ set list. tcb_at' t s) - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and Invariants_H.valid_queues and valid_queues' and valid_objs') + "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs and valid_queues + and pspace_aligned and pspace_distinct) + (valid_objs' and sym_heap_sched_pointers and valid_sched_pointers) (mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; tcb_sched_action tcb_sched_enqueue t @@ -1953,28 +1448,36 @@ lemma ep_cancel_corres_helper: y \ setThreadState Structures_H.thread_state.Restart t; tcbSchedEnqueue t od) list)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule_tac Q'="\s. \t \ set list. tcb_at' t s" in corres_cross_add_guard) + apply (fastforce elim: tcb_at_cross) apply (rule_tac S="{t. (fst t = snd t) \ fst t \ set list}" in corres_mapM_x) apply clarsimp apply (rule corres_guard_imp) apply (subst bind_return_unit, rule corres_split[OF _ tcbSchedEnqueue_corres]) + apply simp + apply (rule corres_guard_imp [OF setThreadState_corres]) + apply simp + apply (simp add: valid_tcb_state_def) + apply simp apply simp - apply (rule corres_guard_imp [OF setThreadState_corres]) - apply simp - apply (simp add: valid_tcb_state_def) - apply simp - apply (wp sts_valid_queues)+ - apply (force simp: tcb_at_is_etcb_at) - apply (fastforce elim: obj_at'_weakenE) - apply ((wp hoare_vcg_const_Ball_lift | simp)+)[1] - apply (rule hoare_pre) - apply (wp hoare_vcg_const_Ball_lift - weak_sch_act_wf_lift_linear sts_st_tcb' setThreadState_not_st - sts_valid_queues tcbSchedEnqueue_not_st - | simp)+ - apply (auto elim: obj_at'_weakenE simp: valid_tcb_state'_def) + apply (wpsimp wp: sts_st_tcb_at') + apply (wpsimp wp: sts_valid_objs' | strengthen valid_objs'_valid_tcbs')+ + apply fastforce + apply (wpsimp wp: hoare_vcg_const_Ball_lift set_thread_state_runnable_valid_queues + sts_st_tcb_at' sts_valid_objs' + simp: valid_tcb_state'_def)+ done +crunches set_simple_ko + for ready_qs_distinct[wp]: ready_qs_distinct + and in_correct_ready_q[wp]: in_correct_ready_q + (rule: ready_qs_distinct_lift wp: crunch_wps) + lemma ep_cancel_corres: "corres dc (invs and valid_sched and ep_at ep) (invs' and ep_at' ep) (cancel_all_ipc ep) (cancelAllIPC ep)" @@ -1982,10 +1485,10 @@ proof - have P: "\list. corres dc (\s. (\t \ set list. tcb_at t s) \ valid_pspace s \ ep_at ep s - \ valid_etcbs s \ weak_valid_sched_action s) + \ valid_etcbs s \ weak_valid_sched_action s \ valid_queues s) (\s. (\t \ set list. tcb_at' t s) \ valid_pspace' s \ ep_at' ep s \ weak_sch_act_wf (ksSchedulerAction s) s - \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s) + \ valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s) (do x \ set_endpoint ep Structures_A.IdleEP; x \ mapM_x (\t. do y \ set_thread_state t Structures_A.Restart; @@ -2007,22 +1510,24 @@ proof - apply (rule ep_cancel_corres_helper) apply (rule mapM_x_wp') apply (wp weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s" + apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') - apply (rule hoare_name_pre_state) - apply ((wp hoare_vcg_const_Ball_lift mapM_x_wp' - sts_valid_queues setThreadState_not_st sts_st_tcb' tcbSchedEnqueue_not_st - | clarsimp - | fastforce elim: obj_at'_weakenE simp: valid_tcb_state'_def)+)[2] - apply (rule hoare_name_pre_state) + apply ((wpsimp wp: hoare_vcg_const_Ball_lift mapM_x_wp' sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[3] + apply fastforce apply (wp hoare_vcg_const_Ball_lift set_ep_valid_objs' - | (clarsimp simp: valid_ep'_def) - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def elim!: valid_objs_valid_tcbE))+ + | (clarsimp simp: valid_ep'_def) + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def + | strengthen valid_objs'_valid_tcbs'))+ + apply fastforce done show ?thesis apply (simp add: cancel_all_ipc_def cancelAllIPC_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ep_sp']) apply (rule corres_guard_imp [OF getEndpoint_corres], simp+) apply (case_tac epa, simp_all add: ep_relation_def @@ -2050,6 +1555,8 @@ lemma cancelAllSignals_corres: "corres dc (invs and valid_sched and ntfn_at ntfn) (invs' and ntfn_at' ntfn) (cancel_all_signals ntfn) (cancelAllSignals ntfn)" apply (simp add: cancel_all_signals_def cancelAllSignals_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ntfn_sp']) apply (rule corres_guard_imp [OF getNotification_corres]) apply simp+ @@ -2060,22 +1567,27 @@ lemma cancelAllSignals_corres: apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule ep_cancel_corres_helper) apply (wp mapM_x_wp'[where 'b="det_ext state"] - weak_sch_act_wf_lift_linear setThreadState_not_st + weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ apply (rename_tac list) - apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s" + apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s + \ sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_objs' s + \ pspace_aligned' s \ pspace_distinct' s" in hoare_post_add) apply (rule mapM_x_wp') apply (rule hoare_name_pre_state) - apply (wpsimp wp: hoare_vcg_const_Ball_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st - simp: valid_tcb_state'_def) - apply (wp hoare_vcg_const_Ball_lift set_ntfn_aligned' set_ntfn_valid_objs' - weak_sch_act_wf_lift_linear - | simp)+ - apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def - valid_sched_def valid_sched_action_def valid_obj'_def projectKOs | erule obj_at_valid_objsE | drule ko_at_valid_objs')+ + apply (wpsimp wp: hoare_vcg_const_Ball_lift sts_st_tcb' sts_valid_objs' + simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+ + apply (wp hoare_vcg_const_Ball_lift set_ntfn_aligned' set_ntfn_valid_objs' + weak_sch_act_wf_lift_linear + | simp)+ + apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def + invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def valid_sched_def + valid_sched_action_def valid_obj'_def projectKOs invs_psp_aligned + invs_distinct valid_queues_ready_qs_distinct + | erule obj_at_valid_objsE | drule ko_at_valid_objs')+ done lemma ep'_Idle_case_helper: @@ -2128,9 +1640,8 @@ lemma cancel_all_invs'_helper: apply clarsimp apply (rule hoare_pre) apply (wp valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift valid_ioports_lift'' - hoare_vcg_const_Ball_lift untyped_ranges_zero_lift - sts_valid_queues sts_st_tcb' setThreadState_not_st - | simp add: cteCaps_of_def o_def)+ + hoare_vcg_const_Ball_lift untyped_ranges_zero_lift sts_st_tcb' + | simp add: cteCaps_of_def o_def)+ apply (unfold fun_upd_apply Invariants_H.tcb_st_refs_of'_simps) apply clarsimp apply (intro conjI) @@ -2138,7 +1649,7 @@ lemma cancel_all_invs'_helper: elim!: rsubst[where P=sym_refs] dest!: set_mono_suffix intro!: ext - | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE))+ + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def))+ done lemma ep_q_refs_max: @@ -2154,22 +1665,10 @@ lemma ep_q_refs_max: | case_tac ntfnptr)+ done -crunch ct' [wp]: setEndpoint "\s. P (ksCurThread s)" - (wp: setObject_ep_ct) - -crunch ct' [wp]: setNotification "\s. P (ksCurThread s)" - (wp: setObject_ntfn_ct) - -lemma tcbSchedEnqueue_cur_tcb'[wp]: - "\cur_tcb'\ tcbSchedEnqueue t \\_. cur_tcb'\" - by (simp add: tcbSchedEnqueue_def unless_def) - (wp threadSet_cur setQueue_cur | simp)+ - lemma rescheduleRequired_invs'[wp]: "\invs'\ rescheduleRequired \\rv. invs'\" apply (simp add: rescheduleRequired_def) apply (wp ssa_invs' | simp add: invs'_update_cnt | wpc)+ - apply (clarsimp simp: invs'_def valid_state'_def) done lemma invs_rct_ct_activatable': @@ -2296,6 +1795,7 @@ lemma rescheduleRequired_all_invs_but_ct_not_inQ: lemma cancelAllIPC_invs'[wp]: "\invs'\ cancelAllIPC ep_ptr \\rv. invs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (rule bind_wp[OF _ stateAssert_sp]) apply (wp rescheduleRequired_all_invs_but_ct_not_inQ cancel_all_invs'_helper hoare_vcg_const_Ball_lift valid_global_refs_lift' valid_arch_state_lift' @@ -2305,14 +1805,15 @@ lemma cancelAllIPC_invs'[wp]: prefer 2 apply assumption apply (rule hoare_strengthen_post [OF get_ep_sp']) + apply (rename_tac rv s) apply (clarsimp simp: invs'_def valid_state'_def valid_ep'_def) apply (frule obj_at_valid_objs', fastforce) apply (clarsimp simp: projectKOs valid_obj'_def) apply (rule conjI) - apply (case_tac r, simp_all add: valid_ep'_def)[1] + apply (case_tac rv, simp_all add: valid_ep'_def)[1] apply (rule conjI[rotated]) apply (drule(1) sym_refs_ko_atD') - apply (case_tac r, simp_all add: st_tcb_at_refs_of_rev')[1] + apply (case_tac rv, simp_all add: st_tcb_at_refs_of_rev')[1] apply (clarsimp elim!: if_live_state_refsE | drule(1) bspec | drule st_tcb_at_state_refs_ofD')+ apply (drule(2) ep_q_refs_max) @@ -2323,7 +1824,8 @@ lemma cancelAllIPC_invs'[wp]: lemma cancelAllSignals_invs'[wp]: "\invs'\ cancelAllSignals ntfn \\rv. invs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2357,12 +1859,14 @@ crunch valid_objs'[wp]: tcbSchedEnqueue valid_objs' (simp: unless_def valid_tcb'_def tcb_cte_cases_def) lemma cancelAllIPC_valid_objs'[wp]: - "\valid_objs'\ cancelAllIPC ep \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllIPC ep \\rv. valid_objs'\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp set_ep_valid_objs' setSchedulerAction_valid_objs') - apply (rule_tac Q="\rv s. valid_objs' s \ (\x\set (epQueue ep). tcb_at' x s)" + apply (rule_tac Q="\_ s. valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ (\x\set (epQueue ep). tcb_at' x s)" in hoare_post_imp) apply simp apply (simp add: Ball_def) @@ -2379,9 +1883,10 @@ lemma cancelAllIPC_valid_objs'[wp]: done lemma cancelAllSignals_valid_objs'[wp]: - "\valid_objs'\ cancelAllSignals ntfn \\rv. valid_objs'\" + "\valid_objs' and pspace_aligned' and pspace_distinct'\ cancelAllSignals ntfn \\rv. valid_objs'\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfna", simp_all) apply (wp, simp) apply (wp, simp) @@ -2433,19 +1938,17 @@ lemma setThreadState_not_tcb[wp]: "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ setThreadState st t \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: setThreadState_def setQueue_def - rescheduleRequired_def tcbSchedEnqueue_def - unless_def bitmap_fun_defs - cong: scheduler_action.case_cong cong del: if_cong - | wp | wpcw)+ - done + by (wpsimp wp: isRunnable_inv threadGet_wp hoare_drop_imps + simp: setThreadState_def setQueue_def + rescheduleRequired_def tcbSchedEnqueue_def tcbQueuePrepend_def + unless_def bitmap_fun_defs)+ lemma tcbSchedEnqueue_unlive: "\ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p and tcb_at' t\ tcbSchedEnqueue t \\_. ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp | simp add: setQueue_def bitmap_fun_defs)+ done @@ -2479,19 +1982,41 @@ lemma setObject_ko_wp_at': objBits_def[symmetric] ps_clear_upd in_magnitude_check v projectKOs) -lemma rescheduleRequired_unlive: - "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ - rescheduleRequired +lemma threadSet_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + threadSet f t \\rv. ko_wp_at' (Not \ live') p\" - apply (simp add: rescheduleRequired_def) - apply (wp | simp | wpc)+ - apply (simp add: tcbSchedEnqueue_def unless_def - threadSet_def setQueue_def threadGet_def) - apply (wp setObject_ko_wp_at getObject_tcb_wp - | simp add: objBits_simps' bitmap_fun_defs split del: if_split)+ - apply (clarsimp simp: o_def) - apply (drule obj_at_ko_at') - apply clarsimp + by (clarsimp simp: threadSet_def valid_def getObject_def projectKOs + setObject_def in_monad loadObject_default_def + ko_wp_at'_def split_def in_magnitude_check + objBits_simps' updateObject_default_def + ps_clear_upd) + +lemma tcbSchedEnqueue_unlive_other: + "\ko_wp_at' (Not \ live') p and K (p \ t)\ + tcbSchedEnqueue t + \\_. ko_wp_at' (Not \ live') p\" + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def setQueue_def) + apply (wpsimp wp: threadGet_wp threadSet_unlive_other simp: bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (frule (1) tcbQueueHead_ksReadyQueues) + apply (drule_tac x=p in spec) + apply (fastforce dest!: inQ_implies_tcbQueueds_of + simp: tcbQueueEmpty_def ko_wp_at'_def opt_pred_def opt_map_def projectKOs + split: option.splits) + done + +lemma rescheduleRequired_unlive[wp]: + "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ + rescheduleRequired + \\_. ko_wp_at' (Not \ live') p\" + supply comp_apply[simp del] + unfolding rescheduleRequired_def + apply (wpsimp wp: tcbSchedEnqueue_unlive_other) done lemmas setEndpoint_ko_wp_at' @@ -2501,7 +2026,8 @@ lemma cancelAllIPC_unlive: "\valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s)\ cancelAllIPC ep \\rv. ko_wp_at' (Not \ live') ep\" apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp']) apply (rule hoare_pre) apply (wp cancelAll_unlive_helper setEndpoint_ko_wp_at' hoare_vcg_const_Ball_lift rescheduleRequired_unlive @@ -2520,7 +2046,8 @@ lemma cancelAllSignals_unlive: \ obj_at' (\ko. ntfnBoundTCB ko = None) ntfnptr s\ cancelAllSignals ntfnptr \\rv. ko_wp_at' (Not \ live') ntfnptr\" apply (simp add: cancelAllSignals_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (case_tac "ntfnObj ntfn", simp_all add: setNotification_def) apply wp apply (fastforce simp: obj_at'_real_def projectKOs @@ -2580,30 +2107,27 @@ lemma cancelBadgedSends_filterM_helper': apply wp apply clarsimp apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append) - apply (drule spec, erule hoare_seq_ext[rotated]) - apply (rule hoare_seq_ext [OF _ gts_inv']) + apply (drule spec, erule bind_wp_fwd) + apply (rule bind_wp [OF _ gts_inv']) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_vcg_const_Ball_lift sts_sch_act' sch_act_wf_lift valid_irq_handlers_lift'' cur_tcb_lift irqs_masked_lift - sts_st_tcb' sts_valid_queues setThreadState_not_st valid_ioports_lift'' - tcbSchedEnqueue_not_st + sts_st_tcb' valid_ioports_lift'' untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ apply (frule insert_eqD, frule state_refs_of'_elemD) apply (clarsimp simp: valid_tcb_state'_def st_tcb_at_refs_of_rev') apply (frule pred_tcb_at') apply (rule conjI[rotated], blast) - apply clarsimp + apply (clarsimp simp: valid_pspace'_def cong: conj_cong) apply (intro conjI) - apply (clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE dest!: st_tcb_ex_cap'') - apply (fastforce dest!: st_tcb_ex_cap'') + apply (fastforce simp: valid_tcb'_def dest!: st_tcb_ex_cap'') apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) apply (erule delta_sym_refs) - apply (fastforce elim!: obj_atE' - simp: state_refs_of'_def projectKOs tcb_bound_refs'_def + by (fastforce elim!: obj_atE' + simp: state_refs_of'_def projectKOs tcb_bound_refs'_def subsetD symreftype_inverse' - split: if_split_asm)+ - done + split: if_split_asm)+ lemmas cancelBadgedSends_filterM_helper = spec [where x=Nil, OF cancelBadgedSends_filterM_helper', simplified] @@ -2613,12 +2137,13 @@ lemma cancelBadgedSends_invs[wp]: shows "\invs'\ cancelBadgedSends epptr badge \\rv. invs'\" apply (simp add: cancelBadgedSends_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp[OF _ stateAssert_sp]) + apply (rule bind_wp [OF _ get_ep_sp'], rename_tac ep) apply (case_tac ep, simp_all) apply ((wp | simp)+)[2] apply (subst bind_assoc [where g="\_. rescheduleRequired", symmetric])+ - apply (rule hoare_seq_ext + apply (rule bind_wp [OF rescheduleRequired_all_invs_but_ct_not_inQ]) apply (simp add: list_case_return cong: list.case_cong) apply (rule hoare_pre, wp valid_irq_node_lift irqs_masked_lift) @@ -2646,10 +2171,21 @@ lemma cancelBadgedSends_invs[wp]: crunch state_refs_of[wp]: tcb_sched_action "\s. P (state_refs_of s)" (ignore_del: tcb_sched_action) +lemma setEndpoint_valid_tcbs'[wp]: + "setEndpoint ePtr val \valid_tcbs'\" + unfolding setEndpoint_def + supply projectKOs[simp] + apply (wpsimp wp: setObject_valid_tcbs'[where P=\]) + apply (clarsimp simp: updateObject_default_def monad_simps) + apply fastforce + done + lemma cancelBadgedSends_corres: "corres dc (invs and valid_sched and ep_at epptr) (invs' and ep_at' epptr) (cancel_badged_sends epptr bdg) (cancelBadgedSends epptr bdg)" apply (simp add: cancel_badged_sends_def cancelBadgedSends_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) apply (rule corres_guard_imp) apply (rule corres_split[OF getEndpoint_corres _ get_simple_ko_sp get_ep_sp', where Q="invs and valid_sched" and Q'=invs']) @@ -2661,9 +2197,13 @@ lemma cancelBadgedSends_corres: apply (simp add: ep_relation_def) apply (rule corres_split_eqr[OF _ _ _ hoare_post_add[where R="\_. valid_objs'"]]) apply (rule_tac S="(=)" - and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ distinct xs \ valid_etcbs s" - and Q'="\xs s. (\x \ set xs. tcb_at' x s) \ weak_sch_act_wf (ksSchedulerAction s) s \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s" - in corres_mapM_list_all2[where r'="(=)"], + and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ + distinct xs \ valid_etcbs s \ + in_correct_ready_q s \ ready_qs_distinct s \ + pspace_aligned s \ pspace_distinct s" + and Q'="\_ s. valid_objs' s \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in corres_mapM_list_all2[where r'="(=)"], simp_all add: list_all2_refl)[1] apply (clarsimp simp: liftM_def[symmetric] o_def) apply (rule corres_guard_imp) @@ -2673,59 +2213,61 @@ lemma cancelBadgedSends_corres: apply (clarsimp simp: o_def dc_def[symmetric] liftM_def) apply (rule corres_split[OF setThreadState_corres]) apply simp - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (rule corres_trivial) apply simp apply wp+ - apply simp - apply (wp sts_valid_queues gts_st_tcb_at)+ + apply simp + apply (wp sts_st_tcb_at' gts_st_tcb_at sts_valid_objs' + | strengthen valid_objs'_valid_tcbs')+ apply (clarsimp simp: valid_tcb_state_def tcb_at_def st_tcb_def2 st_tcb_at_refs_of_rev dest!: state_refs_of_elemD elim!: tcb_at_is_etcb_at[rotated]) - apply (simp add: is_tcb_def) - apply simp + apply (simp add: valid_tcb_state'_def) apply (wp hoare_vcg_const_Ball_lift gts_wp | clarsimp)+ - apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_queues + apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_objs' | clarsimp simp: valid_tcb_state'_def)+ apply (rule corres_split[OF _ rescheduleRequired_corres]) apply (rule setEndpoint_corres) apply (simp split: list.split add: ep_relation_def) apply (wp weak_sch_act_wf_lift_linear)+ - apply (wp gts_st_tcb_at hoare_vcg_imp_lift mapM_wp' - sts_st_tcb' sts_valid_queues - set_thread_state_runnable_weak_valid_sched_action - | clarsimp simp: valid_tcb_state'_def)+ - apply (wp hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear set_ep_valid_objs' - | simp)+ + apply (wpsimp wp: mapM_wp' set_thread_state_runnable_weak_valid_sched_action + simp: valid_tcb_state'_def) + apply ((wpsimp wp: hoare_vcg_imp_lift mapM_wp' sts_valid_objs' simp: valid_tcb_state'_def + | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: set_ep_valid_objs')+ apply (clarsimp simp: conj_comms) apply (frule sym_refs_ko_atD, clarsimp+) apply (rule obj_at_valid_objsE, assumption+, clarsimp+) apply (clarsimp simp: valid_obj_def valid_ep_def valid_sched_def valid_sched_action_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) apply (rule conjI, erule obj_at_weakenE, clarsimp simp: is_ep) apply (clarsimp simp: st_tcb_at_refs_of_rev) + apply (rule conjI, fastforce) + apply clarsimp apply (drule(1) bspec, drule st_tcb_at_state_refs_ofD, clarsimp) apply (simp add: set_eq_subset) apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) apply (drule ko_at_valid_objs', clarsimp) apply (simp add: projectKOs) - apply (clarsimp simp: valid_obj'_def valid_ep'_def invs_weak_sch_act_wf - invs'_def valid_state'_def) + apply (fastforce simp: valid_obj'_def valid_ep'_def invs_weak_sch_act_wf + invs'_def valid_state'_def) done +crunches updateRestartPC + for tcb_at'[wp]: "tcb_at' t" + (simp: crunch_simps) + lemma suspend_unqueued: "\\\ suspend t \\rv. obj_at' (Not \ tcbQueued) t\" - apply (simp add: suspend_def unless_def tcbSchedDequeue_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift) - apply (simp add: threadGet_def| wp getObject_tcb_wp)+ - apply (rule hoare_strengthen_post, rule hoare_post_taut) - apply (fastforce simp: obj_at'_def projectKOs) - apply (rule hoare_post_taut) - apply wp+ - done - -crunch unqueued: prepareThreadDelete "obj_at' (\a. \ tcbQueued a) t" -crunch inactive: prepareThreadDelete "st_tcb_at' ((=) Inactive) t'" -crunch nonq: prepareThreadDelete " \s. \d p. t' \ set (ksReadyQueues s (d, p))" + unfolding suspend_def + by (wpsimp simp: comp_def wp: tcbSchedDequeue_not_tcbQueued) + +crunches prepareThreadDelete + for unqueued: "obj_at' (\a. \ tcbQueued a) t" + and inactive: "st_tcb_at' ((=) Inactive) t'" end end diff --git a/proof/refine/X64/Ipc_R.thy b/proof/refine/X64/Ipc_R.thy index b8bd0758fb..502c5429f3 100644 --- a/proof/refine/X64/Ipc_R.thy +++ b/proof/refine/X64/Ipc_R.thy @@ -13,9 +13,10 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemmas lookup_slot_wrapper_defs'[simp] = lookupSourceSlot_def lookupTargetSlot_def lookupPivotSlot_def -lemma getMessageInfo_corres: "corres ((=) \ message_info_map) - (tcb_at t) (tcb_at' t) - (get_message_info t) (getMessageInfo t)" +lemma getMessageInfo_corres: + "corres ((=) \ message_info_map) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_message_info t) (getMessageInfo t)" apply (rule corres_guard_imp) apply (unfold get_message_info_def getMessageInfo_def fun_app_def) apply (simp add: X64_H.msgInfoRegister_def @@ -278,11 +279,7 @@ lemmas unifyFailure_discard2 lemma deriveCap_not_null: "\\\ deriveCap slot cap \\rv. K (rv \ NullCap \ cap \ NullCap)\,-" apply (simp add: deriveCap_def split del: if_split) - apply (case_tac cap) - apply (simp_all add: Let_def isCap_simps) - apply wp - apply simp - done + by (case_tac cap; wpsimp simp: isCap_simps) lemma deriveCap_derived_foo: "\\s. \cap'. (cte_wp_at' (\cte. badge_derived' cap (cteCap cte) @@ -320,7 +317,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -364,7 +361,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -486,7 +483,7 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ cap.NullCap \ cte_wp_at (is_derived (cdt s) (a, b) cap') (a, b) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption @@ -498,13 +495,13 @@ next apply (rule_tac Q' ="\cap' s. (cap'\ capability.NullCap \ cte_wp_at' (\c. is_derived' (ctes_of s) (cte_map (a, b)) cap' (cteCap c)) (cte_map (a, b)) s \ QM s cap')" for QM - in hoare_post_imp_R) + in hoare_strengthen_postE_R) prefer 2 apply clarsimp apply assumption apply (subst imp_conjR) apply (rule hoare_vcg_conj_liftE_R) - apply (rule hoare_post_imp_R[OF deriveCap_derived]) + apply (rule hoare_strengthen_postE_R[OF deriveCap_derived]) apply (clarsimp simp:cte_wp_at_ctes_of) apply (wp deriveCap_derived_foo) apply (clarsimp simp: cte_wp_at_caps_of_state remove_rights_def @@ -584,7 +581,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -608,7 +605,7 @@ lemma cteInsert_assume_Null: apply (rule hoare_name_pre_state) apply (erule impCE) apply (simp add: cteInsert_def) - apply (rule hoare_seq_ext[OF _ getCTE_sp])+ + apply (rule bind_wp[OF _ getCTE_sp])+ apply (rule hoare_name_pre_state) apply (clarsimp simp: cte_wp_at_ctes_of) apply (erule hoare_pre(1)) @@ -627,7 +624,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -660,11 +657,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -713,8 +710,7 @@ lemma transferCapsToSlots_mdb[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_mdb'\" - apply (wp transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) - apply clarsimp + apply (wpsimp wp: transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) apply (frule valid_capAligned) apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) apply wp @@ -764,14 +760,6 @@ lemma tcts_sch_act[wp]: \\rv s. sch_act_wf (ksSchedulerAction s) s\" by (wp sch_act_wf_lift tcb_in_cur_domain'_lift transferCapsToSlots_pres1) -lemma tcts_vq[wp]: - "\Invariants_H.valid_queues\ transferCapsToSlots ep buffer n caps slots mi \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift transferCapsToSlots_pres1) - -lemma tcts_vq'[wp]: - "\valid_queues'\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_queues'\" - by (wp valid_queues_lift' transferCapsToSlots_pres1) - crunch state_refs_of' [wp]: setExtraBadge "\s. P (state_refs_of' s)" lemma tcts_state_refs_of'[wp]: @@ -860,7 +848,7 @@ lemma transferCapsToSlots_irq_handlers[wp]: and transferCaps_srcs caps\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_irq_handlers'\" - apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) + apply (wpsimp wp: transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) apply (clarsimp simp: is_derived'_def cte_wp_at_ctes_of badge_derived'_def) apply (erule(2) valid_irq_handlers_ctes_ofD) apply wp @@ -893,7 +881,7 @@ lemma transferCapsToSlots_ioports'[wp]: and transferCaps_srcs caps\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_ioports'\" - apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) + apply (wpsimp wp: transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) apply (clarsimp simp: valid_ioports'_derivedD) apply wp apply (clarsimp simp:cte_wp_at_ctes_of | intro ballI conjI)+ @@ -989,8 +977,8 @@ lemma tcts_zero_ranges[wp]: \ transferCaps_srcs caps s\ transferCapsToSlots ep buffer n caps slots mi \\rv. untyped_ranges_zero'\" - apply (wp transferCapsToSlots_presM[where emx=True and vo=True - and drv=True and pad=True]) + apply (wpsimp wp: transferCapsToSlots_presM[where emx=True and vo=True + and drv=True and pad=True]) apply (clarsimp simp: cte_wp_at_ctes_of) apply (simp add: cteCaps_of_def) apply (rule hoare_pre, wp untyped_ranges_zero_lift) @@ -1011,6 +999,11 @@ crunch ksDomScheduleIdx[wp]: setExtraBadge "\s. P (ksDomScheduleIdx s)" crunch ksDomSchedule[wp]: transferCapsToSlots "\s. P (ksDomSchedule s)" crunch ksDomScheduleIdx[wp]: transferCapsToSlots "\s. P (ksDomScheduleIdx s)" +crunches transferCapsToSlots + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift) lemma transferCapsToSlots_invs[wp]: "\\s. invs' s \ distinct slots @@ -1069,7 +1062,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1228,7 +1221,7 @@ lemmas copyMRs_typ_at_lifts[wp] = typ_at_lifts [OF copyMRs_typ_at'] lemma copy_mrs_invs'[wp]: "\ invs' and tcb_at' s and tcb_at' r \ copyMRs s sb r rb n \\rv. invs' \" - including no_pre + including classic_wp_pre apply (simp add: copyMRs_def) apply (wp dmo_invs' no_irq_mapM no_irq_storeWord| simp add: split_def) @@ -1264,9 +1257,6 @@ crunch aligned'[wp]: setMessageInfo pspace_aligned' crunch distinct'[wp]: setMessageInfo pspace_distinct' (wp: crunch_wps simp: crunch_simps) -crunch valid_objs'[wp]: storeWordUser valid_objs' -crunch valid_pspace'[wp]: storeWordUser valid_pspace' - lemma set_mrs_valid_objs' [wp]: "\valid_objs'\ setMRs t a msgs \\rv. valid_objs'\" apply (simp add: setMRs_def zipWithM_x_mapM split_def) @@ -1276,18 +1266,12 @@ lemma set_mrs_valid_objs' [wp]: crunch valid_objs'[wp]: copyMRs valid_objs' (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "Invariants_H.valid_queues'" - (simp: crunch_simps wp: hoare_drop_imps) - - lemma setMRs_invs_bits[wp]: "\valid_pspace'\ setMRs t buf mrs \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. sch_act_wf (ksSchedulerAction s) s\" "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ setMRs t buf mrs \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ setMRs t buf mrs \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ setMRs t buf mrs \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ setMRs t buf mrs \\rv s. P (state_refs_of' s)\" @@ -1304,8 +1288,6 @@ lemma copyMRs_invs_bits[wp]: "\valid_pspace'\ copyMRs s sb r rb n \\rv. valid_pspace'\" "\\s. sch_act_wf (ksSchedulerAction s) s\ copyMRs s sb r rb n \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ copyMRs s sb r rb n \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ copyMRs s sb r rb n \\rv. valid_queues'\" "\\s. P (state_refs_of' s)\ copyMRs s sb r rb n \\rv s. P (state_refs_of' s)\" @@ -1502,7 +1484,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -1563,17 +1545,17 @@ lemma msgFromLookupFailure_map[simp]: by (cases lf, simp_all add: lookup_failure_map_def msgFromLookupFailure_def) lemma asUser_getRestartPC_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (as_user t getRestartPC) (asUser t getRestartPC)" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t getRestartPC) (asUser t getRestartPC)" apply (rule asUser_corres') apply (rule corres_Id, simp, simp) apply (rule no_fail_getRestartPC) done lemma asUser_mapM_getRegister_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (as_user t (mapM getRegister regs)) - (asUser t (mapM getRegister regs))" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (mapM getRegister regs)) + (asUser t (mapM getRegister regs))" apply (rule asUser_corres') apply (rule corres_Id [OF refl refl]) apply (rule no_fail_mapM) @@ -1581,9 +1563,9 @@ lemma asUser_mapM_getRegister_corres: done lemma makeArchFaultMessage_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (make_arch_fault_msg f t) - (makeArchFaultMessage (arch_fault_map f) t)" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (make_arch_fault_msg f t) + (makeArchFaultMessage (arch_fault_map f) t)" apply (cases f, clarsimp simp: makeArchFaultMessage_def split: arch_fault.split) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) @@ -1592,9 +1574,9 @@ lemma makeArchFaultMessage_corres: done lemma makeFaultMessage_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (make_fault_msg ft t) - (makeFaultMessage (fault_map ft) t)" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (make_fault_msg ft t) + (makeFaultMessage (fault_map ft) t)" apply (cases ft, simp_all add: makeFaultMessage_def split del: if_split) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) @@ -1630,18 +1612,18 @@ lemmas threadget_fault_corres = lemma doFaultTransfer_corres: "corres dc (obj_at (\ko. \tcb ft. ko = TCB tcb \ tcb_fault tcb = Some ft) sender - and tcb_at receiver and case_option \ in_user_frame recv_buf) - (tcb_at' sender and tcb_at' receiver and - case_option \ valid_ipc_buffer_ptr' recv_buf) + and tcb_at receiver and case_option \ in_user_frame recv_buf + and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' recv_buf) (do_fault_transfer badge sender receiver recv_buf) (doFaultTransfer badge sender receiver recv_buf)" apply (clarsimp simp: do_fault_transfer_def doFaultTransfer_def split_def X64_H.badgeRegister_def badge_register_def) apply (rule_tac Q="\fault. K (\f. fault = Some f) and tcb_at sender and tcb_at receiver and - case_option \ in_user_frame recv_buf" - and Q'="\fault'. tcb_at' sender and tcb_at' receiver and - case_option \ valid_ipc_buffer_ptr' recv_buf" + case_option \ in_user_frame recv_buf and + pspace_aligned and pspace_distinct" + and Q'="\fault'. case_option \ valid_ipc_buffer_ptr' recv_buf" in corres_underlying_split) apply (rule corres_guard_imp) apply (rule threadget_fault_corres) @@ -1770,17 +1752,10 @@ crunch ifunsafe[wp]: doIPCTransfer "if_unsafe_then_cap'" crunch iflive[wp]: doIPCTransfer "if_live_then_nonz_cap'" (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM ball_conj_distrib ) -lemma valid_pspace_valid_objs'[elim!]: - "valid_pspace' s \ valid_objs' s" - by (simp add: valid_pspace'_def) crunch vp[wp]: doIPCTransfer "valid_pspace'" (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' wp: transferCapsToSlots_vp simp:ball_conj_distrib ) crunch sch_act_wf[wp]: doIPCTransfer "\s. sch_act_wf (ksSchedulerAction s) s" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq[wp]: doIPCTransfer "Invariants_H.valid_queues" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) -crunch vq'[wp]: doIPCTransfer "valid_queues'" - (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch state_refs_of[wp]: doIPCTransfer "\s. P (state_refs_of' s)" (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) crunch ct[wp]: doIPCTransfer "cur_tcb'" @@ -1806,12 +1781,12 @@ declare asUser_global_refs' [wp] lemma lec_valid_cap' [wp]: "\valid_objs'\ lookupExtraCaps thread xa mi \\rv s. (\x\set rv. s \' fst x)\, -" - apply (rule hoare_pre, rule hoare_post_imp_R) + apply (rule hoare_pre, rule hoare_strengthen_postE_R) apply (rule hoare_vcg_conj_lift_R[where R=valid_objs' and S="\_. valid_objs'"]) apply (rule lookupExtraCaps_srcs) apply wp apply (clarsimp simp: cte_wp_at_ctes_of) - apply (fastforce elim: ctes_of_valid') + apply fastforce apply simp done @@ -1870,7 +1845,7 @@ lemma sanitise_register_corres: sanitiseOrFlags_def sanitiseAndFlags_def) lemma handle_fault_reply_registers_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (do t' \ arch_get_sanitise_register_info t; y \ as_user t (zipWithM_x @@ -1898,7 +1873,7 @@ lemma handle_fault_reply_registers_corres: lemma handleFaultReply_corres: "ft' = fault_map ft \ - corres (=) (tcb_at t) (tcb_at' t) + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (handle_fault_reply ft t label msg) (handleFaultReply ft' t label msg)" apply (cases ft) @@ -1941,18 +1916,6 @@ lemma getThreadCallerSlot_inv: "\P\ getThreadCallerSlot t \\_. P\" by (simp add: getThreadCallerSlot_def, wp) -lemma deleteCallerCap_ct_not_ksQ: - "\invs' and ct_in_state' simple' and sch_act_sane - and (\s. ksCurThread s \ set (ksReadyQueues s p))\ - deleteCallerCap t - \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) - apply (wp getThreadCallerSlot_inv cteDeleteOne_ct_not_ksQ getCTE_wp) - apply (clarsimp simp: cte_wp_at_ctes_of) - done - -crunch tcb_at'[wp]: unbindNotification "tcb_at' x" - lemma finaliseCapTrue_standin_tcb_at' [wp]: "\tcb_at' x\ finaliseCapTrue_standin cap v2 \\_. tcb_at' x\" apply (simp add: finaliseCapTrue_standin_def Let_def) @@ -2109,39 +2072,11 @@ lemma cteDeleteOne_weak_sch_act[wp]: crunch weak_sch_act_wf[wp]: emptySlot "\s. weak_sch_act_wf (ksSchedulerAction s) s" crunch pred_tcb_at'[wp]: handleFaultReply "pred_tcb_at' proj P t" -crunch valid_queues[wp]: handleFaultReply "Invariants_H.valid_queues" -crunch valid_queues'[wp]: handleFaultReply "valid_queues'" crunch tcb_in_cur_domain'[wp]: handleFaultReply "tcb_in_cur_domain' t" crunch sch_act_wf[wp]: unbindNotification "\s. sch_act_wf (ksSchedulerAction s) s" (wp: sbn_sch_act') -crunch valid_queues'[wp]: cteDeleteOne valid_queues' - (simp: crunch_simps unless_def inQ_def - wp: crunch_wps sts_st_tcb' getObject_inv loadObject_default_inv - threadSet_valid_queues' rescheduleRequired_valid_queues'_weak) - -lemma cancelSignal_valid_queues'[wp]: - "\valid_queues'\ cancelSignal t ntfn \\rv. valid_queues'\" - apply (simp add: cancelSignal_def) - apply (rule hoare_pre) - apply (wp getNotification_wp| wpc | simp)+ - done - -lemma cancelIPC_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) \ cancelIPC t \\rv. valid_queues'\" - apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv liftM_def) - apply (rule hoare_seq_ext[OF _ gts_sp']) - apply (case_tac state, simp_all) defer 2 - apply (rule hoare_pre) - apply ((wp getEndpoint_wp getCTE_wp | wpc | simp)+)[8] - apply (wp cteDeleteOne_valid_queues') - apply (rule_tac Q="\_. valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) - apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of) - apply (wp threadSet_valid_queues' threadSet_sch_act| simp)+ - apply (clarsimp simp: inQ_def) - done - crunch valid_objs'[wp]: handleFaultReply valid_objs' lemma cte_wp_at_is_reply_cap_toI: @@ -2149,6 +2084,17 @@ lemma cte_wp_at_is_reply_cap_toI: \ cte_wp_at (is_reply_cap_to t) ptr s" by (fastforce simp: cte_wp_at_reply_cap_to_ex_rights) +crunches handle_fault_reply + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +crunches cteDeleteOne, doIPCTransfer, handleFaultReply + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + lemma doReplyTransfer_corres: "corres dc (einvs and tcb_at receiver and tcb_at sender @@ -2160,7 +2106,7 @@ lemma doReplyTransfer_corres: apply (simp add: do_reply_transfer_def doReplyTransfer_def cong: option.case_cong) apply (rule corres_underlying_split [OF _ _ gts_sp gts_sp']) apply (rule corres_guard_imp) - apply (rule getThreadState_corres, (clarsimp simp add: st_tcb_at_tcb_at)+) + apply (rule getThreadState_corres, (fastforce simp add: st_tcb_at_tcb_at)+) apply (rule_tac F = "awaiting_reply state" in corres_req) apply (clarsimp simp add: st_tcb_at_def obj_at_def is_tcb) apply (fastforce simp: invs_def valid_state_def intro: has_reply_cap_cte_wpD @@ -2194,8 +2140,12 @@ lemma doReplyTransfer_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp set_thread_state_runnable_valid_sched set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues sts_valid_objs' delete_one_tcbDomain_obj_at' - | simp add: valid_tcb_state'_def)+ + apply (wp set_thread_state_runnable_valid_sched + set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' + sts_valid_objs' delete_one_tcbDomain_obj_at' + | simp add: valid_tcb_state'_def + | strengthen valid_queues_in_correct_ready_q valid_sched_valid_queues + valid_queues_ready_qs_distinct)+ apply (strengthen cte_wp_at_reply_cap_can_fast_finalise) apply (wp hoare_vcg_conj_lift) apply (rule hoare_strengthen_post [OF do_ipc_transfer_non_null_cte_wp_at]) @@ -2204,12 +2154,16 @@ lemma doReplyTransfer_corres: apply (fastforce) apply (clarsimp simp:is_cap_simps) apply (wp weak_valid_sched_action_lift)+ - apply (rule_tac Q="\_. valid_queues' and valid_objs' and cur_tcb' and tcb_at' receiver and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp, simp add: sch_act_wf_weak) + apply (rule_tac Q="\_ s. valid_objs' s \ cur_tcb' s \ tcb_at' receiver s + \ sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp, simp add: sch_act_wf_weak) apply (wp tcb_in_cur_domain'_lift) defer apply (simp) apply (wp)+ - apply (clarsimp) + apply (clarsimp simp: invs_psp_aligned invs_distinct) apply (rule conjI, erule invs_valid_objs) apply (rule conjI, clarsimp)+ apply (rule conjI) @@ -2233,10 +2187,13 @@ lemma doReplyTransfer_corres: apply (rule threadset_corresT; clarsimp simp add: tcb_relation_def fault_rel_optionation_def tcb_cap_cases_def tcb_cte_cases_def exst_same_def) - apply (rule_tac P="valid_sched and cur_tcb and tcb_at receiver" + apply (rule_tac P="valid_sched and cur_tcb and tcb_at receiver + and pspace_aligned and pspace_distinct" and P'="tcb_at' receiver and cur_tcb' - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) - and Invariants_H.valid_queues and valid_queues' and valid_objs'" + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" in corres_inst) apply (case_tac rvb, simp_all)[1] apply (rule corres_guard_imp) @@ -2244,25 +2201,27 @@ lemma doReplyTransfer_corres: apply simp apply (fold dc_def, rule possibleSwitchTo_corres) apply simp - apply (wp static_imp_wp static_imp_conj_wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_st_tcb' sts_valid_queues | simp | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj + set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | simp + | force simp: valid_sched_def valid_sched_action_def + valid_tcb_state'_def)+ apply (rule corres_guard_imp) apply (rule setThreadState_corres) apply clarsimp+ apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' + thread_set_not_state_valid_sched threadSet_tcbDomain_triv threadSet_valid_objs' + threadSet_sched_pointers threadSet_valid_sched_pointers | simp add: valid_tcb_state'_def)+ - apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state - thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' - | simp add: runnable_def inQ_def valid_tcb'_def)+ - apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and valid_objs and pspace_aligned" + apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and + valid_objs and pspace_aligned and pspace_distinct" in hoare_strengthen_post [rotated], clarsimp) apply (wp) apply (rule hoare_chain [OF cap_delete_one_invs]) apply (assumption) - apply (rule conjI, clarsimp) - apply (clarsimp simp add: invs_def valid_state_def) + apply fastforce apply (rule_tac Q="\_. tcb_at' sender and tcb_at' receiver and invs'" in hoare_strengthen_post [rotated]) apply (solves\auto simp: invs'_def valid_state'_def\) @@ -2344,15 +2303,15 @@ lemma setupCallerCap_corres: tcb_cnode_index_def cte_level_bits_def) apply (simp add: cte_map_def tcbCallerSlot_def tcb_cnode_index_def cte_level_bits_def) - apply (rule_tac Q="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) apply (wp, (wp getSlotCap_wp)+) apply blast apply (rule no_fail_pre, wp) apply (clarsimp simp: cte_wp_at'_def cte_at'_def) - apply (rule_tac Q="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) apply (wp, (wp getCTE_wp')+) apply blast apply (rule no_fail_pre, wp) @@ -2409,7 +2368,7 @@ lemma possibleSwitchTo_weak_sch_act_wf[wp]: bitmap_fun_defs) apply (wp rescheduleRequired_weak_sch_act_wf weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] - getObject_tcb_wp static_imp_wp + getObject_tcb_wp hoare_weak_lift_imp | wpc)+ apply (clarsimp simp: obj_at'_def projectKOs weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) done @@ -2473,7 +2432,7 @@ proof - apply (rule setEndpoint_corres) apply (simp add: ep_relation_def) apply wp+ - apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def) + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_psp_aligned invs_distinct) apply clarsimp \ \concludes IdleEP if bl branch\ apply (simp add: ep_relation_def) @@ -2483,7 +2442,7 @@ proof - apply (rule setEndpoint_corres) apply (simp add: ep_relation_def) apply wp+ - apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def) + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_psp_aligned invs_distinct) apply clarsimp \ \concludes SendEP if bl branch\ apply (simp add: ep_relation_def) @@ -2522,10 +2481,12 @@ proof - apply (wp hoare_drop_imps)[1] apply (wp | simp)+ apply (wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases) - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)[1] apply (simp add: valid_tcb_state_def pred_conj_def) - apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg) + apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues)+ apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift | clarsimp simp: is_cap_simps)+)[1] apply (simp add: pred_conj_def) @@ -2534,7 +2495,7 @@ proof - apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift hoare_drop_imps)[1] apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply (simp) apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')+ apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def ep_redux_simps @@ -2590,17 +2551,19 @@ proof - apply (simp add: if_apply_def2) apply ((wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases | simp add: if_apply_def2 split del: if_split)+)[1] - apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + apply (wp sts_weak_sch_act_wf sts_valid_objs' sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases) apply (simp add: valid_tcb_state_def pred_conj_def) apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift - | clarsimp simp:is_cap_simps)+)[1] + | clarsimp simp: is_cap_simps + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues )+)[1] apply (simp add: valid_tcb_state'_def pred_conj_def) apply (strengthen sch_act_wf_weak) apply (wp weak_sch_act_wf_lift_linear hoare_drop_imps) apply (wp gts_st_tcb_at)+ apply (simp add: pred_conj_def cong: conj_cong) - apply (wp hoare_post_taut) + apply (wp hoare_TrueI) apply simp apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb') apply (clarsimp simp add: invs_def valid_state_def @@ -2620,25 +2583,18 @@ proof - done qed -crunch typ_at'[wp]: setMessageInfo "\s. P (typ_at' T p s)" - lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] (* Annotation added by Simon Winwood (Thu Jul 1 20:54:41 2010) using taint-mode *) declare tl_drop_1[simp] crunch cur[wp]: cancel_ipc "cur_tcb" - (wp: select_wp crunch_wps simp: crunch_simps) - -crunch valid_objs'[wp]: asUser "valid_objs'" + (wp: crunch_wps simp: crunch_simps) lemma valid_sched_weak_strg: "valid_sched s \ weak_valid_sched_action s" by (simp add: valid_sched_def valid_sched_action_def) -crunch weak_valid_sched_action[wp]: as_user weak_valid_sched_action - (wp: weak_valid_sched_action_lift) - lemma sendSignal_corres: "corres dc (einvs and ntfn_at ep) (invs' and ntfn_at' ep) (send_signal ep bg) (sendSignal ep bg)" @@ -2675,14 +2631,15 @@ lemma sendSignal_corres: apply (rule possibleSwitchTo_corres) apply wp apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' - sts_valid_queues sts_st_tcb' hoare_disjI2 + sts_st_tcb' sts_valid_objs' hoare_disjI2 cancel_ipc_cte_wp_at_not_reply_state | strengthen invs_vobjs_strgs invs_psp_aligned_strg valid_sched_weak_strg + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues | simp add: valid_tcb_state_def)+ apply (rule_tac Q="\rv. invs' and tcb_at' a" in hoare_strengthen_post) apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak - valid_tcb_state'_def) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak valid_tcb_state'_def) apply (rule setNotification_corres) apply (clarsimp simp add: ntfn_relation_def) apply (wp gts_wp gts_wp' | clarsimp)+ @@ -2708,23 +2665,23 @@ lemma sendSignal_corres: apply (rule corres_split[OF asUser_setRegister_corres]) apply (rule possibleSwitchTo_corres) apply ((wp | simp)+)[1] - apply (rule_tac Q="\_. Invariants_H.valid_queues and valid_queues' and - (\s. sch_act_wf (ksSchedulerAction s) s) and + apply (rule_tac Q="\_. (\s. sch_act_wf (ksSchedulerAction s) s) and cur_tcb' and - st_tcb_at' runnable' (hd list) and valid_objs'" + st_tcb_at' runnable' (hd list) and valid_objs' and + sym_heap_sched_pointers and valid_sched_pointers and + pspace_aligned' and pspace_distinct'" in hoare_post_imp, clarsimp simp: pred_tcb_at' elim!: sch_act_wf_weak) apply (wp | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ apply (wp set_simple_ko_valid_objs set_ntfn_aligned' set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def - valid_sched_action_def) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def + valid_sched_action_def) apply (auto simp: valid_ntfn'_def )[1] apply (clarsimp simp: invs'_def valid_state'_def) @@ -2742,16 +2699,14 @@ lemma sendSignal_corres: apply (wp cur_tcb_lift | simp)+ apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb - | simp)+ + apply (wpsimp wp: sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb) apply (wp set_ntfn_aligned' set_simple_ko_valid_objs set_ntfn_valid_objs' hoare_vcg_disj_lift weak_sch_act_wf_lift_linear | simp add: valid_tcb_state_def valid_tcb_state'_def)+ - apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def - valid_pspace_def neq_Nil_conv - ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def - split: option.splits) + apply (fastforce simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def neq_Nil_conv + ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def + split: option.splits) apply (auto simp: valid_ntfn'_def neq_Nil_conv invs'_def valid_state'_def weak_sch_act_wf_def split: option.splits)[1] @@ -2777,43 +2732,11 @@ lemma possibleSwitchTo_sch_act[wp]: possibleSwitchTo t \\rv s. sch_act_wf (ksSchedulerAction s) s\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadSet_sch_act setQueue_sch_act threadGet_wp + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp | simp add: unless_def | wpc)+ apply (auto simp: obj_at'_def projectKOs tcb_in_cur_domain'_def) done -lemma possibleSwitchTo_valid_queues[wp]: - "\Invariants_H.valid_queues and valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. Invariants_H.valid_queues\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp hoare_drop_imps | wpc | simp)+ - apply (auto simp: valid_tcb'_def weak_sch_act_wf_def - dest: pred_tcb_at' - elim!: valid_objs_valid_tcbE) - done - -lemma possibleSwitchTo_ksQ': - "\(\s. t' \ set (ksReadyQueues s p) \ sch_act_not t' s) and K(t' \ t)\ - possibleSwitchTo t - \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp - | wpc - | simp split del: if_split)+ - apply (auto simp: obj_at'_def) - done - -lemma possibleSwitchTo_valid_queues'[wp]: - "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) - and st_tcb_at' runnable' t\ - possibleSwitchTo t - \\rv. valid_queues'\" - apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadGet_wp | wpc | simp)+ - apply (auto simp: obj_at'_def) - done - crunches possibleSwitchTo for st_refs_of'[wp]: "\s. P (state_refs_of' s)" and cap_to'[wp]: "ex_nonz_cap_to' p" @@ -2822,15 +2745,15 @@ crunches possibleSwitchTo (wp: cur_tcb_lift crunch_wps) lemma possibleSwitchTo_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' t - and (\s. sch_act_wf (ksSchedulerAction s) s)\ - possibleSwitchTo t - \\rv. if_live_then_nonz_cap'\" + "\if_live_then_nonz_cap' and ex_nonz_cap_to' t and (\s. sch_act_wf (ksSchedulerAction s) s) + and pspace_aligned' and pspace_distinct'\ + possibleSwitchTo t + \\_. if_live_then_nonz_cap'\" apply (simp add: possibleSwitchTo_def curDomain_def) apply (wp | wpc | simp)+ apply (simp only: imp_conv_disj, wp hoare_vcg_all_lift hoare_vcg_disj_lift) apply (wp threadGet_wp)+ - apply (auto simp: obj_at'_def projectKOs) + apply (auto simp: obj_at'_def) done crunches possibleSwitchTo @@ -2858,10 +2781,6 @@ crunches sendSignal, setBoundNotification rule: irqs_masked_lift) end -lemma sts_running_valid_queues: - "runnable' st \ \ Invariants_H.valid_queues \ setThreadState st t \\_. Invariants_H.valid_queues \" - by (wp sts_valid_queues, clarsimp) - lemma ct_in_state_activatable_imp_simple'[simp]: "ct_in_state' activatable' s \ ct_in_state' simple' s" apply (simp add: ct_in_state'_def) @@ -2874,24 +2793,21 @@ lemma setThreadState_nonqueued_state_update: \ st \ {Inactive, Running, Restart, IdleThreadState} \ (st \ Inactive \ ex_nonz_cap_to' t s) \ (t = ksIdleThread s \ idle' st) - - \ (\ runnable' st \ sch_act_simple s) - \ (\ runnable' st \ (\p. t \ set (ksReadyQueues s p)))\ - setThreadState st t \\rv. invs'\" + \ (\ runnable' st \ sch_act_simple s)\ + setThreadState st t + \\_. invs'\" apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre, wp valid_irq_node_lift - sts_valid_queues - setThreadState_ct_not_inQ) + apply (rule hoare_pre, wp valid_irq_node_lift setThreadState_ct_not_inQ) apply (clarsimp simp: pred_tcb_at') apply (rule conjI, fastforce simp: valid_tcb_state'_def) apply (drule simple_st_tcb_at_state_refs_ofD') apply (drule bound_tcb_at_state_refs_ofD') - apply (rule conjI, fastforce) - apply clarsimp - apply (erule delta_sym_refs) - apply (fastforce split: if_split_asm) - apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def - split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (erule delta_sym_refs) + apply (fastforce split: if_split_asm) + apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm) + apply fastforce done lemma cteDeleteOne_reply_cap_to'[wp]: @@ -2900,7 +2816,7 @@ lemma cteDeleteOne_reply_cap_to'[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -2959,16 +2875,14 @@ lemma cancelAllIPC_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllIPC_def) apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) apply simp apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done lemma cancelAllSignals_not_rct[wp]: @@ -2977,12 +2891,10 @@ lemma cancelAllSignals_not_rct[wp]: \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" apply (simp add: cancelAllSignals_def) apply (wp | wpc)+ - apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) - apply simp - apply (rule mapM_x_wp_inv) - apply (wp)+ - apply (wp hoare_vcg_all_lift hoare_drop_imp) - apply (simp_all) + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wpsimp wp: hoare_vcg_all_lift hoare_drop_imp)+ done crunch not_rct[wp]: finaliseCapTrue_standin "\s. ksSchedulerAction s \ ResumeCurrentThread" @@ -3054,7 +2966,7 @@ lemma sai_invs'[wp]: "\invs' and ex_nonz_cap_to' ntfnptr\ sendSignal ntfnptr badge \\y. invs'\" unfolding sendSignal_def - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (case_tac "ntfnObj nTFN", simp_all) prefer 3 apply (rename_tac list) @@ -3066,7 +2978,6 @@ lemma sai_invs'[wp]: apply (clarsimp simp:conj_comms) apply (simp add: invs'_def valid_state'_def) apply (wp valid_irq_node_lift sts_valid_objs' setThreadState_ct_not_inQ - sts_valid_queues [where st="Structures_H.thread_state.Running", simplified] set_ntfn_valid_objs' cur_tcb_lift sts_st_tcb' hoare_convert_imp [OF setNotification_nosch] | simp split del: if_split)+ @@ -3153,6 +3064,8 @@ lemma replyFromKernel_corres: apply (rule setMessageInfo_corres) apply (wp hoare_case_option_wp hoare_valid_ipc_buffer_ptr_typ_at' | clarsimp)+ + apply fastforce + apply fastforce done lemma rfk_invs': @@ -3165,8 +3078,7 @@ lemma rfk_invs': crunch nosch[wp]: replyFromKernel "\s. P (ksSchedulerAction s)" lemma completeSignal_corres: - "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and valid_objs - \ \and obj_at (\ko. ko = Notification ntfn \ Ipc_A.isActive ntfn) ntfnptr\) + "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and pspace_distinct and valid_objs) (ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' and obj_at' isActive ntfnptr) (complete_signal ntfnptr tcb) (completeSignal ntfnptr tcb)" apply (simp add: complete_signal_def completeSignal_def) @@ -3190,10 +3102,9 @@ lemma completeSignal_corres: lemma doNBRecvFailedTransfer_corres: - "corres dc (tcb_at thread) - (tcb_at' thread) - (do_nbrecv_failed_transfer thread) - (doNBRecvFailedTransfer thread)" + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) \ + (do_nbrecv_failed_transfer thread) + (doNBRecvFailedTransfer thread)" unfolding do_nbrecv_failed_transfer_def doNBRecvFailedTransfer_def by (simp add: badgeRegister_def badge_register_def, rule asUser_setRegister_corres) @@ -3280,11 +3191,11 @@ lemma receiveIPC_corres: and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3)" and P'="tcb_at' a and tcb_at' thread and cur_tcb' - and Invariants_H.valid_queues - and valid_queues' and valid_pspace' and valid_objs' - and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct'" in corres_guard_imp [OF corres_if]) apply (simp add: fault_rel_optionation_def) apply (rule corres_if2 [OF _ setupCallerCap_corres setThreadState_corres]) @@ -3293,17 +3204,18 @@ lemma receiveIPC_corres: apply (rule corres_split[OF setThreadState_corres]) apply simp apply (rule possibleSwitchTo_corres) - apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action - | simp)+ - apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' - setThreadState_st_tcb + apply (wpsimp wp: sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action)+ + apply (wp sts_st_tcb_at'_cases sts_valid_objs' setThreadState_st_tcb | simp)+ - apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def - valid_sched_action_def) + apply (fastforce simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def + valid_sched_action_def) apply (clarsimp split: if_split_asm) apply (clarsimp | wp do_ipc_transfer_tcb_caps)+ - apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s" - in hoare_post_imp, erule sch_act_wf_weak) + apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s + \ sym_heap_sched_pointers s \ valid_sched_pointers s + \ pspace_aligned' s \ pspace_distinct' s" + in hoare_post_imp) + apply (fastforce elim: sch_act_wf_weak) apply (wp sts_st_tcb' gts_st_tcb_at | simp)+ apply (simp cong: list.case_cong) apply wp @@ -3326,13 +3238,13 @@ lemma receiveIPC_corres: apply wp+ apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp) apply simp - apply (clarsimp simp: valid_tcb_state_def) + apply (fastforce simp: valid_tcb_state_def) apply (clarsimp simp add: valid_tcb_state'_def) apply (wp get_simple_ko_wp[where f=Notification] getNotification_wp gbn_wp gbn_wp' hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_if_lift | wpc | simp add: ep_at_def2[symmetric, simplified] | clarsimp)+ apply (clarsimp simp: valid_cap_def invs_psp_aligned invs_valid_objs pred_tcb_at_def - valid_obj_def valid_tcb_def valid_bound_ntfn_def + valid_obj_def valid_tcb_def valid_bound_ntfn_def invs_distinct dest!: invs_valid_objs elim!: obj_at_valid_objsE split: option.splits) @@ -3369,7 +3281,7 @@ lemma receiveSignal_corres: apply (rule setNotification_corres) apply (simp add: ntfn_relation_def) apply wp+ - apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp+) + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, fastforce+) \ \WaitingNtfn\ apply (simp add: ntfn_relation_def) apply (rule corres_guard_imp) @@ -3380,7 +3292,7 @@ lemma receiveSignal_corres: apply (simp add: ntfn_relation_def) apply wp+ apply (rule corres_guard_imp) - apply (rule doNBRecvFailedTransfer_corres, simp+) + apply (rule doNBRecvFailedTransfer_corres, fastforce+) \ \ActiveNtfn\ apply (simp add: ntfn_relation_def) apply (rule corres_guard_imp) @@ -3450,10 +3362,9 @@ lemma sendFaultIPC_corres: | wp (once) sch_act_sane_lift)+)[1] apply (rule corres_trivial, simp add: lookup_failure_map_def) apply (clarsimp simp: st_tcb_at_tcb_at split: if_split) - apply (simp add: valid_cap_def) - apply (clarsimp simp: valid_cap'_def inQ_def) - apply auto[1] - apply (clarsimp simp: lookup_failure_map_def) + apply (fastforce simp: valid_cap_def) + apply (fastforce simp: valid_cap'_def inQ_def) + apply (fastforce simp: lookup_failure_map_def) apply wp+ apply (fastforce elim: st_tcb_at_tcb_at) apply fastforce @@ -3468,14 +3379,14 @@ lemma gets_the_noop_corres: done lemma handleDoubleFault_corres: - "corres dc (tcb_at thread) + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) (tcb_at' thread and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) (handle_double_fault thread f ft) (handleDoubleFault thread f' ft')" apply (simp add: handle_double_fault_def handleDoubleFault_def) apply (rule corres_guard_imp) apply (subst bind_return [symmetric], - rule corres_underlying_split [OF setThreadState_corres]) + rule corres_split [OF setThreadState_corres]) apply simp apply (rule corres_noop2) apply (simp add: exs_valid_def return_def) @@ -3499,10 +3410,6 @@ crunch typ_at'[wp]: receiveSignal "\s. P (typ_at' T p s)" lemmas receiveAIPC_typ_ats[wp] = typ_at_lifts [OF receiveSignal_typ_at'] -declare cart_singleton_empty[simp] - -declare cart_singleton_empty2[simp] - crunch aligned'[wp]: setupCallerCap "pspace_aligned'" (wp: crunch_wps) crunch distinct'[wp]: setupCallerCap "pspace_distinct'" @@ -3520,34 +3427,6 @@ lemma setupCallerCap_state_refs_of[wp]: apply (simp add: fun_upd_def cong: if_cong) done -crunch sch_act_wf: setupCallerCap - "\s. sch_act_wf (ksSchedulerAction s) s" - (wp: crunch_wps ssa_sch_act sts_sch_act rule: sch_act_wf_lift) - -lemma setCTE_valid_queues[wp]: - "\Invariants_H.valid_queues\ setCTE ptr val \\rv. Invariants_H.valid_queues\" - by (wp valid_queues_lift setCTE_pred_tcb_at') - -crunch vq[wp]: cteInsert "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadCallerSlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -crunch vq[wp]: getThreadReplySlot "Invariants_H.valid_queues" - (wp: crunch_wps) - -lemma setupCallerCap_vq[wp]: - "\Invariants_H.valid_queues and (\s. \p. send \ set (ksReadyQueues s p))\ - setupCallerCap send recv grant \\_. Invariants_H.valid_queues\" - apply (simp add: setupCallerCap_def) - apply (wp crunch_wps sts_valid_queues) - apply (fastforce simp: valid_queues_def obj_at'_def inQ_def) - done - -crunch vq'[wp]: setupCallerCap "valid_queues'" - (wp: crunch_wps) - lemma is_derived_ReplyCap' [simp]: "\m p g. is_derived' m p (capability.ReplyCap t False g) = (\c. \ g. c = capability.ReplyCap t True g)" @@ -3591,7 +3470,7 @@ lemma setupCallerCap_vp[wp]: declare haskell_assert_inv[wp del] lemma setupCallerCap_iflive[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender\ + "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_live_then_nonz_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3603,7 +3482,7 @@ lemma setupCallerCap_iflive[wp]: lemma setupCallerCap_ifunsafe[wp]: "\if_unsafe_then_cap' and valid_objs' and - ex_nonz_cap_to' rcvr and tcb_at' rcvr\ + ex_nonz_cap_to' rcvr and tcb_at' rcvr and pspace_aligned' and pspace_distinct'\ setupCallerCap sender rcvr grant \\rv. if_unsafe_then_cap'\" unfolding setupCallerCap_def getThreadCallerSlot_def @@ -3625,13 +3504,11 @@ lemma setupCallerCap_global_refs'[wp]: \\rv. valid_global_refs'\" unfolding setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def locateSlot_conv - apply (wp getSlotCap_cte_wp_at - | simp add: o_def unique_master_reply_cap' - | strengthen eq_imp_strg - | wp (once) getCTE_wp | clarsimp simp: cte_wp_at_ctes_of)+ - (* at setThreadState *) - apply (rule_tac Q="\_. valid_global_refs'" in hoare_post_imp, wpsimp+) - done + by (wp + | simp add: o_def unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) getCTE_wp + | wp (once) hoare_vcg_imp_lift' hoare_vcg_ex_lift | clarsimp simp: cte_wp_at_ctes_of)+ crunch valid_arch'[wp]: setupCallerCap "valid_arch_state'" (wp: hoare_drop_imps) @@ -3762,7 +3639,7 @@ lemma completeSignal_invs: completeSignal ntfnptr tcb \\_. invs'\" apply (simp add: completeSignal_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) apply (wp set_ntfn_minor_invs' | wpc | simp)+ apply (rule_tac Q="\_ s. (state_refs_of' s ntfnptr = ntfn_bound_refs' (ntfnBoundTCB ntfn)) @@ -3771,7 +3648,7 @@ lemma completeSignal_invs: \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) \ ntfnptr \ ksIdleThread s" in hoare_strengthen_post) - apply ((wp hoare_vcg_ex_lift static_imp_wp | wpc | simp add: valid_ntfn'_def)+)[1] + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def projectKOs split: option.splits) apply (blast dest: ntfn_q_refs_no_bound_refs') apply wp @@ -3821,20 +3698,29 @@ crunches possibleSwitchTo and ioports'[wp]: valid_ioports' (wp: valid_ioports_lift' possibleSwitchTo_ctes_of crunch_wps ignore: constOnFailure) +crunches asUser + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift wp: crunch_wps) + +crunches setupCallerCap, possibleSwitchTo, doIPCTransfer + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and valid_bitmaps[wp]: valid_bitmaps + (rule: sym_heap_sched_pointers_lift wp: crunch_wps simp: crunch_simps) + (* t = ksCurThread s *) lemma ri_invs' [wp]: "\invs' and sch_act_not t and ct_in_state' simple' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s)\ receiveIPC t cap isBlocking \\_. invs'\" (is "\?pre\ _ \_\") apply (clarsimp simp: receiveIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) - apply (rule hoare_seq_ext [OF _ gbn_sp']) - apply (rule hoare_seq_ext) + apply (rule bind_wp [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ gbn_sp']) + apply (rule bind_wp) (* set up precondition for old proof *) apply (rule_tac R="ko_at' ep (capEPPtr cap) and ?pre" in hoare_vcg_if_split) apply (wp completeSignal_invs) @@ -3844,7 +3730,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) apply (wp sts_sch_act' hoare_vcg_const_Ball_lift valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' o_def) @@ -3871,7 +3757,7 @@ lemma ri_invs' [wp]: apply (rule hoare_pre, wpc, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) apply (wp sts_sch_act' valid_irq_node_lift - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def o_def) @@ -3895,9 +3781,8 @@ lemma ri_invs' [wp]: apply (rename_tac sender queue) apply (rule hoare_pre) apply (wp valid_irq_node_lift hoare_drop_imps setEndpoint_valid_mdb' - set_ep_valid_objs' sts_st_tcb' sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ possibleSwitchTo_valid_queues - possibleSwitchTo_valid_queues' + set_ep_valid_objs' sts_st_tcb' sts_sch_act' + setThreadState_ct_not_inQ possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift setEndpoint_ksQ setEndpoint_ct' | simp add: valid_tcb_state'_def case_bool_If @@ -3915,8 +3800,6 @@ lemma ri_invs' [wp]: st_tcb_at_refs_of_rev' conj_ac split del: if_split cong: if_cong) - apply (frule_tac t=sender in valid_queues_not_runnable'_not_ksQ) - apply (erule pred_tcb'_weakenE, clarsimp) apply (subgoal_tac "sch_act_not sender s") prefer 2 apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) @@ -3950,7 +3833,6 @@ lemma ri_invs' [wp]: lemma rai_invs'[wp]: "\invs' and sch_act_not t and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s) and (\s. \ntfnptr. isNotificationCap cap @@ -3960,14 +3842,14 @@ lemma rai_invs'[wp]: receiveSignal t cap isBlocking \\_. invs'\" apply (simp add: receiveSignal_def) - apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rule bind_wp [OF _ get_ntfn_sp']) apply (rename_tac ep) apply (case_tac "ntfnObj ep") \ \ep = IdleNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp valid_irq_node_lift sts_sch_act' typ_at_lifts - sts_valid_queues setThreadState_ct_not_inQ + setThreadState_ct_not_inQ asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+ apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def) @@ -3985,12 +3867,12 @@ lemma rai_invs'[wp]: apply (clarsimp split: if_split_asm) apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def projectKOs) \ \ep = ActiveNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts static_imp_wp + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp asUser_urz | simp add: valid_ntfn'_def)+ apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) @@ -4005,7 +3887,7 @@ lemma rai_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ typ_at_lifts + setThreadState_ct_not_inQ typ_at_lifts asUser_urz | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def | wpc)+ apply (clarsimp simp: valid_tcb_state'_def) @@ -4033,7 +3915,7 @@ lemma rai_invs'[wp]: apply (auto simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def)[5] apply (fastforce simp: tcb_bound_refs'_def split: if_split_asm) - apply (clarsimp dest!: global'_no_ex_cap) + apply (fastforce dest!: global'_no_ex_cap) done lemma getCTE_cap_to_refs[wp]: @@ -4065,7 +3947,6 @@ lemma cteInsert_invs_bits[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ cteInsert a b c \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ cteInsert a b c \\rv. Invariants_H.valid_queues\" "\cur_tcb'\ cteInsert a b c \\rv. cur_tcb'\" "\\s. P (state_refs_of' s)\ cteInsert a b c @@ -4090,16 +3971,19 @@ crunch irqs_masked'[wp]: possibleSwitchTo "irqs_masked'" crunch urz[wp]: possibleSwitchTo "untyped_ranges_zero'" (simp: crunch_simps unless_def wp: crunch_wps) +crunches possibleSwitchTo + for pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + lemma si_invs'[wp]: "\invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) and sch_act_not t and ex_nonz_cap_to' ep and ex_nonz_cap_to' t\ sendIPC bl call ba cg cgr t ep \\rv. invs'\" supply if_split[split del] apply (simp add: sendIPC_def split del: if_split) - apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule bind_wp [OF _ get_ep_sp']) apply (case_tac epa) \ \epa = RecvEP\ apply simp @@ -4111,8 +3995,8 @@ lemma si_invs'[wp]: apply (rule_tac P="a\t" in hoare_gen_asm) apply (wp valid_irq_node_lift sts_valid_objs' set_ep_valid_objs' setEndpoint_valid_mdb' sts_st_tcb' sts_sch_act' - possibleSwitchTo_sch_act_not sts_valid_queues setThreadState_ct_not_inQ - possibleSwitchTo_ksQ' possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift sts_ksQ' + possibleSwitchTo_sch_act_not setThreadState_ct_not_inQ + possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift hoare_convert_imp [OF doIPCTransfer_sch_act doIPCTransfer_ct'] hoare_convert_imp [OF setEndpoint_nosch setEndpoint_ct'] hoare_drop_imp [where f="threadGet tcbFault t"] @@ -4166,8 +4050,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') apply (rule conjI, clarsimp elim!: obj_at'_weakenE) apply (subgoal_tac "ep \ t") @@ -4186,8 +4069,7 @@ lemma si_invs'[wp]: apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre, wp valid_irq_node_lift) apply (simp add: valid_ep'_def) - apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' - sts_valid_queues setThreadState_ct_not_inQ) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') apply (rule conjI, clarsimp elim!: obj_at'_weakenE) apply (frule obj_at_valid_objs', clarsimp) @@ -4214,23 +4096,19 @@ lemma si_invs'[wp]: lemma sfi_invs_plus': "\invs' and st_tcb_at' simple' t and sch_act_not t - and (\s. \p. t \ set (ksReadyQueues s p)) and ex_nonz_cap_to' t\ - sendFaultIPC t f - \\rv. invs'\, \\rv. invs' and st_tcb_at' simple' t - and (\s. \p. t \ set (ksReadyQueues s p)) - and sch_act_not t and (\s. ksIdleThread s \ t)\" + sendFaultIPC t f + \\_. invs'\, \\_. invs' and st_tcb_at' simple' t and sch_act_not t and (\s. ksIdleThread s \ t)\" apply (simp add: sendFaultIPC_def) apply (wp threadSet_invs_trivial threadSet_pred_tcb_no_state threadSet_cap_to' | wpc | simp)+ apply (rule_tac Q'="\rv s. invs' s \ sch_act_not t s \ st_tcb_at' simple' t s - \ (\p. t \ set (ksReadyQueues s p)) \ ex_nonz_cap_to' t s \ t \ ksIdleThread s \ (\r\zobj_refs' rv. ex_nonz_cap_to' r s)" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: inQ_def pred_tcb_at') apply (wp | simp)+ @@ -4238,12 +4116,16 @@ lemma sfi_invs_plus': apply (subst(asm) global'_no_ex_cap, auto) done +crunches send_fault_ipc + for pspace_aligned[wp]: pspace_aligned + and psapce_distinct[wp]: pspace_distinct + (simp: crunch_simps wp: crunch_wps) + lemma handleFault_corres: "fr f f' \ corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread and (%_. valid_fault f)) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_fault thread f) (handleFault thread f')" apply (simp add: handle_fault_def handleFault_def) @@ -4258,7 +4140,7 @@ lemma handleFault_corres: apply simp apply (rule handleDoubleFault_corres) apply wp+ - apply (rule hoare_post_impErr, rule sfi_invs_plus', simp_all)[1] + apply (rule hoare_strengthen_postE, rule sfi_invs_plus', simp_all)[1] apply clarsimp apply wp+ apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 invs_def @@ -4271,17 +4153,13 @@ lemma sts_invs_minor'': \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set (ksReadyQueues s p)) \ runnable' st) - and (\s. runnable' st \ obj_at' tcbQueued t s - \ st_tcb_at' runnable' t s) and (\s. \ runnable' st \ sch_act_not t s) and invs'\ setThreadState st t \\rv. invs'\" apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues - setThreadState_ct_not_inQ) + apply (wp valid_irq_node_lift sts_sch_act' setThreadState_ct_not_inQ) apply clarsimp apply (rule conjI) apply fastforce @@ -4296,12 +4174,11 @@ lemma sts_invs_minor'': apply (clarsimp dest!: st_tcb_at_state_refs_ofD' elim!: rsubst[where P=sym_refs] intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply (fastforce elim!: st_tcb_ex_cap'') done lemma hf_invs' [wp]: "\invs' and sch_act_not t - and (\s. \p. t \ set(ksReadyQueues s p)) and st_tcb_at' simple' t and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ handleFault t f \\r. invs'\" @@ -4309,7 +4186,7 @@ lemma hf_invs' [wp]: apply wp apply (simp add: handleDoubleFault_def) apply (wp sts_invs_minor'' dmo_invs')+ - apply (rule hoare_post_impErr, rule sfi_invs_plus', + apply (rule hoare_strengthen_postE, rule sfi_invs_plus', simp_all) apply (strengthen no_refs_simple_strg') apply clarsimp @@ -4341,8 +4218,8 @@ lemma si_blk_makes_simple': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' simple' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4361,8 +4238,8 @@ lemma si_blk_makes_runnable': sendIPC True call bdg x x' t' ep \\rv. st_tcb_at' runnable' t\" apply (simp add: sendIPC_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (case_tac xa, simp_all) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (case_tac rv, simp_all) apply (rename_tac list) apply (case_tac list, simp_all add: case_bool_If case_option_If split del: if_split cong: if_cong) @@ -4385,7 +4262,7 @@ lemma sfi_makes_simple': apply (simp add: sendFaultIPC_def cong: if_cong capability.case_cong bool.case_cong) apply (wpsimp wp: si_blk_makes_simple' threadSet_pred_tcb_no_state hoare_drop_imps - hoare_vcg_all_lift_R) + hoare_vcg_all_liftE_R) done lemma sfi_makes_runnable': @@ -4396,7 +4273,7 @@ lemma sfi_makes_runnable': apply (simp add: sendFaultIPC_def cong: if_cong capability.case_cong bool.case_cong) apply (wpsimp wp: si_blk_makes_runnable' threadSet_pred_tcb_no_state hoare_drop_imps - hoare_vcg_all_lift_R) + hoare_vcg_all_liftE_R) done lemma hf_makes_runnable_simple': @@ -4420,8 +4297,8 @@ lemma ri_makes_runnable_simple': apply (rule hoare_gen_asm)+ apply (simp add: receiveIPC_def) apply (case_tac cap, simp_all add: isEndpointCap_def) - apply (rule hoare_seq_ext [OF _ get_ep_inv']) - apply (rule hoare_seq_ext [OF _ gbn_sp']) + apply (rule bind_wp [OF _ get_ep_inv']) + apply (rule bind_wp [OF _ gbn_sp']) apply wp apply (rename_tac ep q r) apply (case_tac ep, simp_all) @@ -4455,7 +4332,7 @@ lemma sendSignal_st_tcb'_Running: sendSignal ntfnptr bdg \\_. st_tcb_at' (\st. st = Running \ P st) t\" apply (simp add: sendSignal_def) - apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp static_imp_wp + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp | wpc | clarsimp simp: pred_tcb_at')+ done diff --git a/proof/refine/X64/KHeap_R.thy b/proof/refine/X64/KHeap_R.thy index 6891699990..588d26121e 100644 --- a/proof/refine/X64/KHeap_R.thy +++ b/proof/refine/X64/KHeap_R.thy @@ -14,8 +14,46 @@ lemma lookupAround2_known1: "m x = Some y \ fst (lookupAround2 x m) = Some (x, y)" by (fastforce simp: lookupAround2_char1) +lemma koTypeOf_injectKO: + fixes v :: "'a :: pspace_storable" + shows "koTypeOf (injectKO v) = koType TYPE('a)" + apply (cut_tac v1=v in iffD2 [OF project_inject, OF refl]) + apply (simp add: project_koType[symmetric]) + done + context begin interpretation Arch . (*FIXME: arch_split*) +lemma setObject_modify_variable_size: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; obj_at' (\obj. objBits v = objBits obj) p s\ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + supply projectKOs[simp] + apply (clarsimp simp: setObject_def split_def exec_gets obj_at'_def lookupAround2_known1 + assert_opt_def updateObject_default_def bind_assoc) + apply (simp add: projectKO_def alignCheck_assert) + apply (simp add: project_inject objBits_def) + apply (clarsimp simp only: koTypeOf_injectKO) + apply (frule in_magnitude_check[where s'=s]) + apply blast + apply fastforce + apply (simp add: magnitudeCheck_assert in_monad bind_def gets_def oassert_opt_def + get_def return_def) + apply (simp add: simpler_modify_def) + done + +lemma setObject_modify: + fixes v :: "'a :: pspace_storable" shows + "\obj_at' (P :: 'a \ bool) p s; updateObject v = updateObject_default v; + (1 :: machine_word) < 2 ^ objBits v; \ko. P ko \ objBits ko = objBits v \ + \ setObject p v s = modify (ksPSpace_update (\ps. ps (p \ injectKO v))) s" + apply (rule setObject_modify_variable_size) + apply fastforce + apply fastforce + apply fastforce + unfolding obj_at'_def + by fastforce + lemma obj_at_getObject: assumes R: "\a b p q n ko s obj::'a::pspace_storable. @@ -100,9 +138,223 @@ lemmas typ_at_to_obj_at_arches lemmas page_table_at_obj_at' = page_table_at'_def[unfolded typ_at_to_obj_at_arches] +lemma tcb_at'_cross: + assumes p: "pspace_relation (kheap s) (ksPSpace s')" + assumes t: "tcb_at' ptr s'" + shows "tcb_at ptr s" + using assms + apply (clarsimp simp: obj_at'_def projectKOs) + apply (erule (1) pspace_dom_relatedE) + by (clarsimp simp: obj_relation_cuts_def2 obj_at_def cte_relation_def + other_obj_relation_def pte_relation_def is_tcb_def pde_relation_def + pdpte_relation_def pml4e_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm arch_kernel_obj.split_asm) + +lemma pspace_aligned_cross: + "\ pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ pspace_aligned' s'" + apply (clarsimp simp: pspace_aligned'_def pspace_aligned_def pspace_relation_def) + apply (rename_tac p' ko') + apply (prop_tac "p' \ pspace_dom (kheap s)", fastforce) + apply (thin_tac "pspace_dom k = p" for k p) + apply (clarsimp simp: pspace_dom_def) + apply (drule bspec, fastforce)+ + apply clarsimp + apply (rename_tac ko' a a' P ko) + apply (erule (1) obj_relation_cutsE; clarsimp simp: objBits_simps) + + \\CNode\ + apply (clarsimp simp: cte_map_def simp flip: shiftl_t2n') + apply (simp only: cteSizeBits_def cte_level_bits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken, simp) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self, simp) + + \\TCB\ + apply (clarsimp simp: tcbBlockSizeBits_def elim!: is_aligned_weaken) + + \\PTE\ + apply (clarsimp simp: archObjSize_def table_size_def ptTranslationBits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: word_size_bits_def) + apply (rule is_aligned_shift) + + \\PDE\ + apply (clarsimp simp: archObjSize_def table_size_def ptTranslationBits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: word_size_bits_def) + apply (rule is_aligned_shift) + + \\PDPTE\ + apply (clarsimp simp: archObjSize_def table_size_def ptTranslationBits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: word_size_bits_def) + apply (rule is_aligned_shift) + + \\PML4E\ + apply (clarsimp simp: archObjSize_def table_size_def ptTranslationBits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: word_size_bits_def) + apply (rule is_aligned_shift) + + \\DataPage\ + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (rule pbfs_atleast_pageBits) + apply (rule is_aligned_mult_triv2) + + \\other_obj_relation\ + apply (simp add: other_obj_relation_def) + apply (clarsimp simp: tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def + split: kernel_object.splits Structures_A.kernel_object.splits) + apply (fastforce simp: archObjSize_def split: arch_kernel_object.splits arch_kernel_obj.splits) + done + +lemma of_bl_shift_cte_level_bits: + "(of_bl z :: machine_word) << cte_level_bits \ mask (cte_level_bits + length z)" + by word_bitwise + (simp add: test_bit_of_bl bit_simps word_size cte_level_bits_def rev_bl_order_simps) + +lemma obj_relation_cuts_range_limit: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ + \ \x n. p' = p + x \ is_aligned x n \ n \ obj_bits ko \ x \ mask (obj_bits ko)" + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (drule (1) wf_cs_nD) + apply (clarsimp simp: cte_map_def simp flip: shiftl_t2n') + apply (rule_tac x=cte_level_bits in exI) + apply (simp add: is_aligned_shift of_bl_shift_cte_level_bits) + apply (rule_tac x=tcbBlockSizeBits in exI) + apply (simp add: tcbBlockSizeBits_def) + apply (rule_tac x=word_size_bits in exI, simp add: bit_simps is_aligned_shift mask_def, word_bitwise) + apply (rule_tac x=word_size_bits in exI, simp add: bit_simps is_aligned_shift mask_def, word_bitwise) + apply (rule_tac x=word_size_bits in exI, simp add: bit_simps is_aligned_shift mask_def, word_bitwise) + apply (rule_tac x=word_size_bits in exI, simp add: bit_simps is_aligned_shift mask_def, word_bitwise) + apply (rule_tac x=pageBits in exI) + apply (simp add: is_aligned_mult_triv2 pbfs_atleast_pageBits) + apply (simp add: mask_def shiftl_t2n mult_ac pbfs_less_wb') + apply (erule word_less_power_trans2, rule pbfs_atleast_pageBits) + apply (simp add: pbfs_less_wb'[unfolded word_bits_def, simplified]) + apply fastforce + done + +lemma obj_relation_cuts_range_mask_range: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko'; is_aligned p (obj_bits ko) \ + \ p' \ mask_range p (obj_bits ko)" + apply (drule (1) obj_relation_cuts_range_limit, clarsimp) + apply (rule conjI) + apply (rule word_plus_mono_right2; assumption?) + apply (simp add: is_aligned_no_overflow_mask) + apply (erule word_plus_mono_right) + apply (simp add: is_aligned_no_overflow_mask) + done + +lemma obj_relation_cuts_obj_bits: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ \ objBitsKO ko' \ obj_bits ko" + apply (erule (1) obj_relation_cutsE; + clarsimp simp: objBits_simps objBits_defs bit_simps cte_level_bits_def + pbfs_atleast_pageBits[simplified bit_simps]) + prefer 5 + apply (cases ko; simp add: other_obj_relation_def objBits_defs split: kernel_object.splits) + apply (rename_tac ako, case_tac ako; clarsimp simp: archObjSize_def)+ + done + +lemmas is_aligned_add_step_le' = is_aligned_add_step_le[simplified mask_2pm1 add_diff_eq] + +lemma pspace_distinct_cross: + "\ pspace_distinct s; pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ + pspace_distinct' s'" + apply (frule (1) pspace_aligned_cross) + apply (clarsimp simp: pspace_distinct'_def) + apply (rename_tac p' ko') + apply (rule pspace_dom_relatedE; assumption?) + apply (rename_tac p ko P) + apply (frule (1) pspace_alignedD') + apply (frule (1) pspace_alignedD) + apply (rule ps_clearI, assumption) + apply (case_tac ko'; simp add: objBits_simps objBits_defs pageBits_def) + apply (simp add: archObjSize_def pageBits_def split: arch_kernel_object.splits) + apply (rule ccontr, clarsimp) + apply (rename_tac x' ko_x') + apply (frule_tac x=x' in pspace_alignedD', assumption) + apply (rule_tac x=x' in pspace_dom_relatedE; assumption?) + apply (rename_tac x ko_x P') + apply (frule_tac p=x in pspace_alignedD, assumption) + apply (case_tac "p = x") + apply clarsimp + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (clarsimp simp: cte_relation_def cte_map_def objBits_simps) + apply (rule_tac n=cte_level_bits in is_aligned_add_step_le'; assumption?) + apply (rule is_aligned_add; (rule is_aligned_mult_triv2)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (rule is_aligned_add; (rule is_aligned_mult_triv2)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (simp add: cte_level_bits_def cteSizeBits_def) + apply (clarsimp simp: pte_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=word_size_bits in is_aligned_add_step_le'; simp add: word_size_bits_def) + apply (clarsimp simp: pde_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=word_size_bits in is_aligned_add_step_le'; simp add: word_size_bits_def) + apply (clarsimp simp: pdpte_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=word_size_bits in is_aligned_add_step_le'; simp add: word_size_bits_def) + apply (clarsimp simp: pml4e_relation_def objBits_simps archObjSize_def) + apply (rule_tac n=word_size_bits in is_aligned_add_step_le'; simp add: word_size_bits_def) + apply (simp add: objBitsKO_Data) + apply (rule_tac n=pageBits in is_aligned_add_step_le'; assumption?) + apply (case_tac ko; simp split: if_split_asm add: other_obj_relation_def) + apply (rename_tac ako, case_tac ako; simp add: is_other_obj_relation_type_def split: if_split_asm) + apply (frule (1) obj_relation_cuts_obj_bits) + apply (drule (2) obj_relation_cuts_range_mask_range)+ + apply (prop_tac "x' \ mask_range p' (objBitsKO ko')", simp add: mask_def add_diff_eq) + apply (frule_tac x=p and y=x in pspace_distinctD; assumption?) + apply (drule (4) mask_range_subsetD) + apply (erule (2) in_empty_interE) + done + +lemma aligned_distinct_obj_atI': + "\ ksPSpace s x = Some ko; pspace_aligned' s; pspace_distinct' s; ko = injectKO v \ + \ ko_at' v x s" + apply (simp add: obj_at'_def project_inject pspace_distinct'_def pspace_aligned'_def projectKOs) + apply (drule bspec, erule domI)+ + apply (clarsimp simp: bit_simps objBits_simps' word_bits_def + split: kernel_object.splits arch_kernel_object.splits) + done + +lemma pspace_relation_tcb_at': + assumes p: "pspace_relation (kheap a) (ksPSpace c)" + assumes t: "tcb_at t a" + assumes aligned: "pspace_aligned' c" + assumes distinct: "pspace_distinct' c" + shows "tcb_at' t c" + using assms + apply (clarsimp simp: obj_at_def projectKOs) + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: is_tcb tcb_relation_cut_def) + apply (simp split: kernel_object.split_asm) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb], simp) + apply (erule obj_at'_weakenE) + apply simp + done + +lemma tcb_at_cross: + "\tcb_at t s; pspace_aligned s; pspace_distinct s; pspace_relation (kheap s) (ksPSpace s')\ + \ tcb_at' t s'" + apply (drule (2) pspace_distinct_cross) + apply (drule (1) pspace_aligned_cross) + apply (erule (3) pspace_relation_tcb_at') + done lemma corres_get_tcb: - "corres (tcb_relation \ the) (tcb_at t) (tcb_at' t) (gets (get_tcb t)) (getObject t)" + "corres (tcb_relation \ the) (tcb_at t and pspace_aligned and pspace_distinct) \ + (gets (get_tcb t)) (getObject t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) apply (rule corres_no_failI) apply wp apply (clarsimp simp add: gets_def get_def return_def bind_def get_tcb_def) @@ -118,8 +370,7 @@ lemma corres_get_tcb: apply (drule bspec) apply clarsimp apply blast - apply (clarsimp simp add: other_obj_relation_def - lookupAround2_known1) + apply (clarsimp simp: tcb_relation_cut_def lookupAround2_known1) done lemma lookupAround2_same1[simp]: @@ -188,7 +439,7 @@ lemma obj_at_setObject1: setObject p (v::'a::pspace_storable) \ \rv. obj_at' (\x::'a::pspace_storable. True) t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad obj_at'_def projectKOs lookupAround2_char1 project_inject @@ -210,7 +461,7 @@ lemma obj_at_setObject2: setObject p (v::'a) \ \rv. obj_at' P t \" apply (simp add: setObject_def split_def) - apply (rule hoare_seq_ext [OF _ hoare_gets_post]) + apply (rule bind_wp [OF _ hoare_gets_sp]) apply (clarsimp simp: valid_def in_monad) apply (frule updateObject_type) apply (drule R) @@ -394,6 +645,40 @@ lemma setObject_tcb_strongest: ps_clear_upd) done +method setObject_easy_cases = + clarsimp simp: setObject_def in_monad split_def valid_def lookupAround2_char1, + erule rsubst[where P=P'], rule ext, + clarsimp simp: updateObject_cte updateObject_default_def in_monad projectKOs + typeError_def opt_map_def opt_pred_def projectKO_opts_defs + split: if_split_asm + Structures_H.kernel_object.split_asm + +lemma setObject_endpoint_tcbs_of'[wp]: + "setObject c (endpoint :: endpoint) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_notification_tcbs_of'[wp]: + "setObject c (notification :: notification) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedNexts_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedNexts_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbSchedPrevs_of[wp]: + "setObject c (cte :: cte) \\s. P' (tcbSchedPrevs_of s)\" + by setObject_easy_cases + +lemma setObject_cte_tcbQueued[wp]: + "setObject c (cte :: cte) \\s. P' (tcbQueued |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + +lemma setObject_cte_inQ[wp]: + "setObject c (cte :: cte) \\s. P' (inQ d p |< tcbs_of' s)\" + supply inQ_def[simp] + by setObject_easy_cases + lemma getObject_obj_at': assumes x: "\q n ko. loadObject p q n ko = (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" @@ -921,7 +1206,7 @@ lemma obj_relation_cut_same_type: \ (\sz sz'. a_type ko = AArch (ADeviceData sz) \ a_type ko' = AArch (ADeviceData sz'))" apply (rule ccontr) apply (simp add: obj_relation_cuts_def2 a_type_def) - by (auto simp: other_obj_relation_def cte_relation_def + by (auto simp: other_obj_relation_def cte_relation_def tcb_relation_cut_def pte_relation_def pde_relation_def pdpte_relation_def pml4e_relation_def split: Structures_A.kernel_object.split_asm if_split_asm @@ -939,6 +1224,16 @@ where "exst_same' (KOTCB tcb) (KOTCB tcb') = exst_same tcb tcb'" | "exst_same' _ _ = True" +lemma tcbs_of'_non_tcb_update: + "\typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ TCBT\ + \ tcbs_of' (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = tcbs_of' s'" + by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs + split: kernel_object.splits) + +lemma typ_at'_koTypeOf: + "ko_at' ob' ptr b \ typ_at' (koTypeOf (injectKO ob')) ptr b" + by (auto simp: typ_at'_def ko_wp_at'_def obj_at'_def project_inject projectKOs) + lemma setObject_other_corres: fixes ob' :: "'a :: pspace_storable" assumes x: "updateObject ob' = updateObject_default ob'" @@ -968,7 +1263,7 @@ lemma setObject_other_corres: apply (clarsimp simp add: caps_of_state_after_update cte_wp_at_after_update swp_def fun_upd_def obj_at_def) apply (subst conj_assoc[symmetric]) - apply (rule conjI[rotated]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) apply (clarsimp simp add: ghost_relation_def) apply (erule_tac x=ptr in allE)+ apply (clarsimp simp: obj_at_def a_type_def @@ -978,6 +1273,14 @@ lemma setObject_other_corres: apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) apply (elim conjE) apply (frule bspec, erule domI) + apply (prop_tac "typ_at' (koTypeOf (injectKO ob')) ptr b") + subgoal + by (clarsimp simp: typ_at'_def ko_wp_at'_def obj_at'_def projectKO_opts_defs + is_other_obj_relation_type_def a_type_def other_obj_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm kernel_object.split_asm + arch_kernel_object.split_asm) + apply clarsimp apply (rule conjI) apply (rule ballI, drule(1) bspec) apply (drule domD) @@ -986,28 +1289,30 @@ lemma setObject_other_corres: apply clarsimp apply (frule_tac ko'=ko and x'=ptr in obj_relation_cut_same_type, (fastforce simp add: is_other_obj_relation_type t)+) - apply (erule disjE) - apply (simp add: is_other_obj_relation_type t) - apply (erule disjE) - apply (insert t, - clarsimp simp: is_other_obj_relation_type_CapTable a_type_def) - apply (erule disjE) - apply (insert t, clarsimp simp: is_other_obj_relation_type_UserData a_type_def) - apply (insert t, clarsimp simp: is_other_obj_relation_type_DeviceData a_type_def) - apply (simp only: ekheap_relation_def) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (insert e) - apply atomize - apply (clarsimp simp: obj_at'_def) - apply (erule_tac x=obj in allE) - apply (clarsimp simp: projectKO_eq project_inject) - apply (case_tac ob; - simp_all add: a_type_def other_obj_relation_def etcb_relation_def - is_other_obj_relation_type t exst_same_def) - by (clarsimp simp: is_other_obj_relation_type t exst_same_def - split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits - X64_A.arch_kernel_obj.splits)+ + apply (insert t) + apply ((erule disjE + | clarsimp simp: is_other_obj_relation_type is_other_obj_relation_type_def a_type_def)+)[1] + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (insert e) + apply atomize + apply (clarsimp simp: obj_at'_def) + apply (erule_tac x=obj in allE) + apply (clarsimp simp: projectKO_eq project_inject) + apply (case_tac ob; + simp_all add: a_type_def other_obj_relation_def etcb_relation_def + is_other_obj_relation_type t exst_same_def) + apply (clarsimp simp: is_other_obj_relation_type t exst_same_def + split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits + arch_kernel_obj.splits)+ + \ \ready_queues_relation\ + apply (prop_tac "koTypeOf (injectKO ob') \ TCBT") + subgoal + by (clarsimp simp: other_obj_relation_def; cases ob; cases "injectKO ob'"; + simp split: arch_kernel_obj.split_asm) + by (fastforce dest: tcbs_of'_non_tcb_update) lemmas obj_at_simps = obj_at_def obj_at'_def projectKOs map_to_ctes_upd_other is_other_obj_relation_type_def @@ -1019,8 +1324,8 @@ lemma setEndpoint_corres: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres: @@ -1028,8 +1333,8 @@ lemma setNotification_corres: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma no_fail_getNotification [wp]: @@ -1100,13 +1405,14 @@ lemma typ_at'_valid_obj'_lift: apply (case_tac endpoint; simp add: valid_ep'_def, wp) apply (rename_tac notification) apply (case_tac "ntfnObj notification"; - simp add: valid_ntfn'_def valid_bound_tcb'_def split: option.splits, + simp add: valid_ntfn'_def split: option.splits, (wpsimp|rule conjI)+) apply (rename_tac tcb) apply (case_tac "tcbState tcb"; - simp add: valid_tcb'_def valid_tcb_state'_def split_def valid_bound_ntfn'_def - split: option.splits, - wpsimp) + simp add: valid_tcb'_def valid_tcb_state'_def split_def none_top_def + valid_bound_ntfn'_def; + wpsimp wp: hoare_case_option_wp hoare_case_option_wp2; + (clarsimp split: option.splits)?) apply (wpsimp simp: valid_cte'_def) apply (rename_tac arch_kernel_object) apply (case_tac arch_kernel_object; wpsimp) @@ -1389,32 +1695,6 @@ lemma set_ep_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma set_ep_valid_queues[wp]: - "\Invariants_H.valid_queues\ setEndpoint epptr ep \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setEndpoint_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ep_valid_queues'[wp]: - "\valid_queues'\ setEndpoint epptr ep \\rv. valid_queues'\" - apply (unfold setEndpoint_def) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: projectKOs ko_wp_at'_def) - done - lemma ct_in_state_thread_state_lift': assumes ct: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" assumes st: "\t. \st_tcb_at' P t\ f \\_. st_tcb_at' P t\" @@ -1614,34 +1894,6 @@ lemma set_ntfn_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ done -lemma set_ntfn_valid_queues[wp]: - "\Invariants_H.valid_queues\ setNotification p ntfn \\rv. Invariants_H.valid_queues\" - apply (simp add: Invariants_H.valid_queues_def) - apply (rule hoare_pre) - apply (wp hoare_vcg_conj_lift) - apply (simp add: setNotification_def valid_queues_no_bitmap_def) - apply (wp hoare_Ball_helper hoare_vcg_all_lift) - apply (rule obj_at_setObject2) - apply (clarsimp simp: updateObject_default_def in_monad) - apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] - | simp add: valid_queues_no_bitmap_def)+ - done - -lemma set_ntfn_valid_queues'[wp]: - "\valid_queues'\ setNotification p ntfn \\rv. valid_queues'\" - apply (unfold setNotification_def) - apply (rule setObject_ntfn_pre) - apply (simp only: valid_queues'_def imp_conv_disj - obj_at'_real_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (rule setObject_ko_wp_at) - apply simp - apply (simp add: objBits_simps') - apply simp - apply (wp updateObject_default_inv | simp)+ - apply (clarsimp simp: projectKOs ko_wp_at'_def) - done - lemma set_ntfn_state_refs_of'[wp]: "\\s. P ((state_refs_of' s) (epptr := ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn)))\ @@ -2052,6 +2304,21 @@ lemma setNotification_ct_idle_or_in_cur_domain'[wp]: crunch gsUntypedZeroRanges[wp]: setNotification "\s. P (gsUntypedZeroRanges s)" (wp: setObject_ksPSpace_only updateObject_default_inv) +lemma sym_heap_sched_pointers_lift: + assumes prevs: "\P. f \\s. P (tcbSchedPrevs_of s)\" + assumes nexts: "\P. f \\s. P (tcbSchedNexts_of s)\" + shows "f \sym_heap_sched_pointers\" + by (rule_tac f=tcbSchedPrevs_of in hoare_lift_Pf2; wpsimp wp: assms) + +crunches setNotification + for tcbSchedNexts_of[wp]: "\s. P (tcbSchedNexts_of s)" + and tcbSchedPrevs_of[wp]: "\s. P (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: updateObject_default_def) + lemma set_ntfn_minor_invs': "\invs' and obj_at' (\ntfn. ntfn_q_refs_of' (ntfnObj ntfn) = ntfn_q_refs_of' (ntfnObj val) \ ntfn_bound_refs' (ntfnBoundTCB ntfn) = ntfn_bound_refs' (ntfnBoundTCB val)) @@ -2061,9 +2328,10 @@ lemma set_ntfn_minor_invs': and (\s. ptr \ ksIdleThread s) \ setNotification ptr val \\rv. invs'\" - apply (clarsimp simp add: invs'_def valid_state'_def cteCaps_of_def) - apply (wp irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift, - simp_all add: o_def) + apply (clarsimp simp: invs'_def valid_state'_def cteCaps_of_def) + apply (wpsimp wp: irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift + sym_heap_sched_pointers_lift valid_bitmaps_lift + simp: o_def) apply (clarsimp elim!: rsubst[where P=sym_refs] intro!: ext dest!: obj_at_state_refs_ofD')+ @@ -2114,21 +2382,21 @@ lemma valid_globals_cte_wpD': lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -2136,7 +2404,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp @@ -2149,21 +2417,17 @@ crunch typ_at'[wp]: doMachineOp "\s. P (typ_at' T p s)" lemmas doMachineOp_typ_ats[wp] = typ_at_lifts [OF doMachineOp_typ_at'] lemma doMachineOp_invs_bits[wp]: - "\valid_pspace'\ doMachineOp m \\rv. valid_pspace'\" - "\\s. sch_act_wf (ksSchedulerAction s) s\ - doMachineOp m \\rv s. sch_act_wf (ksSchedulerAction s) s\" - "\Invariants_H.valid_queues\ doMachineOp m \\rv. Invariants_H.valid_queues\" - "\valid_queues'\ doMachineOp m \\rv. valid_queues'\" - "\\s. P (state_refs_of' s)\ - doMachineOp m - \\rv s. P (state_refs_of' s)\" - "\if_live_then_nonz_cap'\ doMachineOp m \\rv. if_live_then_nonz_cap'\" - "\cur_tcb'\ doMachineOp m \\rv. cur_tcb'\" - "\if_unsafe_then_cap'\ doMachineOp m \\rv. if_unsafe_then_cap'\" + "doMachineOp m \valid_pspace'\" + "doMachineOp m \\s. sch_act_wf (ksSchedulerAction s) s\" + "doMachineOp m \valid_bitmaps\" + "doMachineOp m \valid_sched_pointers\" + "doMachineOp m \\s. P (state_refs_of' s)\" + "doMachineOp m \if_live_then_nonz_cap'\" + "doMachineOp m \cur_tcb'\" + "doMachineOp m \if_unsafe_then_cap'\" by (simp add: doMachineOp_def split_def - valid_pspace'_def valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs - | wp cur_tcb_lift sch_act_wf_lift tcb_in_cur_domain'_lift - | fastforce elim: state_refs_of'_pspaceI)+ + | wp + | fastforce elim: state_refs_of'_pspaceI)+ crunch obj_at'[wp]: doMachineOp "\s. P (obj_at' P' p s)" @@ -2193,5 +2457,15 @@ lemma obj_at'_is_canonical: apply (clarsimp simp: obj_at'_def pspace_canonical'_def projectKOs) by (drule_tac x=t in bspec) clarsimp+ +lemma aligned'_distinct'_ko_wp_at'I: + "\ksPSpace s' x = Some ko; P ko; pspace_aligned' s'; pspace_distinct' s'\ + \ ko_wp_at' P x s'" + apply (simp add: ko_wp_at'_def pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply (cases ko; force) + done + +lemmas aligned'_distinct'_ko_at'I = aligned_distinct_obj_atI' + end end diff --git a/proof/refine/X64/LevityCatch.thy b/proof/refine/X64/LevityCatch.thy index 71e947dbdf..9ba1e50687 100644 --- a/proof/refine/X64/LevityCatch.thy +++ b/proof/refine/X64/LevityCatch.thy @@ -8,6 +8,7 @@ theory LevityCatch imports "BaseRefine.Include" "Lib.LemmaBucket" + "Lib.Corres_Method" begin (* Try again, clagged from Include *) @@ -39,14 +40,14 @@ lemma alignCheck_assert: lemma magnitudeCheck_inv: "\P\ magnitudeCheck x y n \\rv. P\" apply (clarsimp simp add: magnitudeCheck_def split: option.splits) - apply (wp hoare_when_wp) + apply (wp when_wp) apply simp done lemma alignCheck_inv: "\P\ alignCheck x n \\rv. P\" apply (simp add: alignCheck_def unless_def alignError_def) - apply (wp hoare_when_wp) + apply (wp when_wp) apply simp done diff --git a/proof/refine/X64/PageTableDuplicates.thy b/proof/refine/X64/PageTableDuplicates.thy index d9273d7979..3186bf6381 100644 --- a/proof/refine/X64/PageTableDuplicates.thy +++ b/proof/refine/X64/PageTableDuplicates.thy @@ -29,17 +29,17 @@ lemma foldr_data_map_insert[simp]: done crunch arch_inv[wp]: createNewObjects "\s. P (x64KSSKIMPML4 (ksArchState s))" - (simp: crunch_simps zipWithM_x_mapM wp: crunch_wps hoare_unless_wp) + (simp: crunch_simps zipWithM_x_mapM wp: crunch_wps unless_wp) crunch arch_inv[wp]: resetUntypedCap "\s. P (ksArchState s)" (simp: crunch_simps - wp: hoare_drop_imps hoare_unless_wp mapME_x_inv_wp + wp: hoare_drop_imps unless_wp mapME_x_inv_wp preemptionPoint_inv ignore: freeMemory) lemma mapM_x_mapM_valid: "\ P \ mapM_x f xs \\r. Q\ \ \P\mapM f xs \\r. Q\" - apply (simp add:NonDetMonadLemmaBucket.mapM_x_mapM) + apply (simp add: mapM_x_mapM) apply (clarsimp simp:valid_def return_def bind_def) apply (drule spec) apply (erule impE) diff --git a/proof/refine/X64/RAB_FN.thy b/proof/refine/X64/RAB_FN.thy index 376f1658d4..cbe2a89c2f 100644 --- a/proof/refine/X64/RAB_FN.thy +++ b/proof/refine/X64/RAB_FN.thy @@ -92,35 +92,35 @@ proof (induct cap capptr bits rule: resolveAddressBits.induct) apply (subst resolveAddressBits.simps, subst resolveAddressBitsFn.simps) apply (simp only: Let_def haskell_assertE_def K_bind_def) apply (rule monadic_rewrite_name_pre) - apply (rule monadic_rewrite_imp) + apply (rule monadic_rewrite_guard_imp) apply (rule_tac P="(=) s" in monadic_rewrite_trans) (* step 1, apply the induction hypothesis on the lhs *) apply (rule monadic_rewrite_named_if monadic_rewrite_named_bindE - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="returnOk y" for y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="x $ y" for x y] - monadic_rewrite_refl[THEN monadic_rewrite_imp, where f="assertE P" for P s] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="returnOk y" for y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="x $ y" for x y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="assertE P" for P s] TrueI)+ apply (rule_tac g="case nextCap of CNodeCap a b c d \ ?g nextCap cref bitsLeft - | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_imp) + | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_guard_imp) apply (wpc | rule monadic_rewrite_refl "1.hyps" | simp only: capability.case haskell_assertE_def simp_thms)+ apply (clarsimp simp: in_monad locateSlot_conv getSlotCap_def dest!: in_getCTE fst_stateAssertD) apply (fastforce elim: cte_wp_at_weakenE') - apply (rule monadic_rewrite_refl[THEN monadic_rewrite_imp], simp) + apply (rule monadic_rewrite_refl[THEN monadic_rewrite_guard_imp], simp) (* step 2, split and match based on the lhs structure *) apply (simp add: locateSlot_conv liftE_bindE unlessE_def whenE_def if_to_top_of_bindE assertE_def stateAssert_def bind_assoc assert_def if_to_top_of_bind getSlotCap_def split del: if_split cong: if_cong) - apply (rule monadic_rewrite_if_lhs monadic_rewrite_symb_exec_l'[OF get_wp] + apply (rule monadic_rewrite_if_l monadic_rewrite_symb_exec_l'[OF _ get_wp, rotated] empty_fail_get no_fail_get impI monadic_rewrite_refl get_wp | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P isCNodeCap_capUntypedPtr_capCNodePtr cong: if_cong split del: if_split)+ - apply (rule monadic_rewrite_symb_exec_l'[OF getCTE_inv _ _ _ getCTE_cte_wp_at]) + apply (rule monadic_rewrite_symb_exec_l'[OF _ getCTE_inv _ _ getCTE_cte_wp_at, rotated]) apply simp apply (rule impI, rule no_fail_getCTE) apply (simp add: monadic_rewrite_def simpler_gets_def return_def returnOk_def diff --git a/proof/refine/X64/Refine.thy b/proof/refine/X64/Refine.thy index 3d7ab23cb8..e3a38f0f62 100644 --- a/proof/refine/X64/Refine.thy +++ b/proof/refine/X64/Refine.thy @@ -80,7 +80,7 @@ lemma typ_at_UserDataI: apply (drule (1) bspec) apply clarsimp apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) - apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def + apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def tcb_relation_cut_def cte_relation_def other_obj_relation_def pde_relation_def pdpte_relation_def pml4e_relation_def split: Structures_A.kernel_object.split_asm @@ -111,7 +111,7 @@ lemma typ_at_DeviceDataI: apply (drule (1) bspec) apply clarsimp apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) - apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def + apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def tcb_relation_cut_def cte_relation_def other_obj_relation_def pde_relation_def pdpte_relation_def pml4e_relation_def split: Structures_A.kernel_object.split_asm @@ -280,7 +280,7 @@ lemma kernel_entry_invs: thread_set_ct_running thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext - static_imp_wp + hoare_weak_lift_imp | clarsimp simp add: tcb_cap_cases_def active_from_running)+ done @@ -296,18 +296,18 @@ definition lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_sched_act: "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_invs2: @@ -401,6 +401,23 @@ abbreviation valid_domain_list' :: "'a kernel_state_scheme \ bool" w lemmas valid_domain_list'_def = valid_domain_list_2_def +(* nothing extra needed on this architecture *) +defs fastpathKernelAssertions_def: + "fastpathKernelAssertions \ \s. True" + +lemma fastpathKernelAssertions_cross: + "\ (s,s') \ state_relation; invs s; valid_arch_state' s'\ \ fastpathKernelAssertions s'" + unfolding fastpathKernelAssertions_def + by simp + +(* this is only needed for callKernel, where we have invs' on concrete side *) +lemma corres_cross_over_fastpathKernelAssertions: + "\ \s. P s \ invs s; \s'. Q s' \ invs' s'; + corres r P (Q and fastpathKernelAssertions) f g \ \ + corres r P Q f g" + by (rule corres_cross_over_guard[where Q="Q and fastpathKernelAssertions"]) + (fastforce elim: fastpathKernelAssertions_cross)+ + defs kernelExitAssertions_def: "kernelExitAssertions s \ 0 < ksDomainTime s \ valid_domain_list' s" @@ -419,8 +436,8 @@ lemma kernelEntry_invs': (\s. 0 < ksDomainTime s) and valid_domain_list' \" apply (simp add: kernelEntry_def) apply (wp ckernel_invs callKernel_domain_time_left - threadSet_invs_trivial threadSet_ct_running' select_wp - TcbAcc_R.dmo_invs' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' + TcbAcc_R.dmo_invs' hoare_weak_lift_imp doMachineOp_sch_act_simple callKernel_domain_time_left | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' @@ -498,7 +515,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and (\s. 0 < ksDomainTime s) and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' select_wp + apply (wp device_update_invs' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -541,7 +558,7 @@ lemma kernel_corres': apply simp apply (rule handleInterrupt_corres[simplified dc_def]) apply simp - apply (wp hoare_drop_imps hoare_vcg_all_lift)[1] + apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift simp: schact_is_rct_def)[1] apply simp apply (rule_tac Q="\irq s. invs' s \ (\irq'. irq = Some irq' \ @@ -550,7 +567,7 @@ lemma kernel_corres': in hoare_post_imp) apply simp apply (wp doMachineOp_getActiveIRQ_IRQ_active handle_event_valid_sched | simp)+ - apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_post_impErr) + apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_strengthen_postE) apply wpsimp+ apply (simp add: invs'_def valid_state'_def) apply (rule corres_split[OF schedule_corres]) @@ -558,9 +575,9 @@ lemma kernel_corres': apply (wp schedule_invs' hoare_vcg_if_lift2 hoare_drop_imps handle_interrupt_valid_sched[unfolded non_kernel_IRQs_def, simplified] |simp)+ apply (rule_tac Q="\_. valid_sched and invs and valid_list" and E="\_. valid_sched and invs and valid_list" - in hoare_post_impErr) + in hoare_strengthen_postE) apply (wp handle_event_valid_sched |simp)+ - apply (clarsimp simp: active_from_running) + apply (clarsimp simp: active_from_running schact_is_rct_def) apply (clarsimp simp: active_from_running') done @@ -585,6 +602,8 @@ lemma kernel_corres: (\s. vs_valid_duplicates' (ksPSpace s))) (call_kernel event) (callKernel event)" unfolding callKernel_def K_bind_def + apply (rule corres_cross_over_fastpathKernelAssertions, blast+) + apply (rule corres_stateAssert_r) apply (rule corres_guard_imp) apply (rule corres_add_noop_lhs2) apply (simp only: bind_assoc[symmetric]) @@ -616,7 +635,7 @@ lemma entry_corres: apply (rule corres_split[OF getCurThread_corres]) apply (rule corres_split) apply simp - apply (rule threadset_corresT) + apply (rule threadset_corresT; simp) apply (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_set_def atcbContextSet_def) apply (clarsimp simp: tcb_cap_cases_def) @@ -628,15 +647,16 @@ lemma entry_corres: apply (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_get_def atcbContextGet_def) apply wp+ - apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, simp add: invs_def cur_tcb_def) + apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, fastforce simp: invs_def cur_tcb_def) apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) apply (wp thread_set_invs_trivial thread_set_ct_running threadSet_invs_trivial threadSet_ct_running' - select_wp thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state - | (wps, wp threadSet_st_tcb_at2) )+ - apply (clarsimp simp: invs_def cur_tcb_def) + schact_is_rct_def + | (wps, wp threadSet_st_tcb_at2))+ + apply (fastforce simp: invs_def cur_tcb_def) apply (clarsimp simp: ct_in_state'_def) done @@ -800,7 +820,7 @@ lemma domain_list_rel_eq: by (clarsimp simp: state_relation_def) crunch valid_objs': doUserOp, checkActiveIRQ valid_objs' - (wp: crunch_wps select_wp) + (wp: crunch_wps) lemma ckernel_invariant: "ADT_H uop \ full_invs'" diff --git a/proof/refine/X64/Retype_R.thy b/proof/refine/X64/Retype_R.thy index f5dcef93ed..1e87a9df47 100644 --- a/proof/refine/X64/Retype_R.thy +++ b/proof/refine/X64/Retype_R.thy @@ -60,8 +60,6 @@ lemma objBitsKO_bounded2[simp]: by (simp add: objBits_simps' word_bits_def pageBits_def archObjSize_def split: Structures_H.kernel_object.split arch_kernel_object.split) -declare select_singleton_is_return[simp] - definition APIType_capBits :: "X64_H.object_type \ nat \ nat" where @@ -317,7 +315,7 @@ lemma state_relation_null_filterE: null_filter (caps_of_state t) = null_filter (caps_of_state s); null_filter' (ctes_of t') = null_filter' (ctes_of s'); pspace_relation (kheap t) (ksPSpace t'); - ekheap_relation (ekheap t) (ksPSpace t'); + ekheap_relation (ekheap t) (ksPSpace t'); ready_queues_relation t t'; ghost_relation (kheap t) (gsUserPages t') (gsCNodes t'); valid_list s; pspace_aligned' s'; pspace_distinct' s'; valid_objs s; valid_mdb s; pspace_aligned' t'; pspace_distinct' t'; @@ -1005,7 +1003,7 @@ lemma retype_ekheap_relation: apply (intro impI conjI) apply clarsimp apply (drule_tac x=a in bspec,force) - apply (clarsimp simp add: other_obj_relation_def split: if_split_asm) + apply (clarsimp simp add: tcb_relation_cut_def split: if_split_asm) apply (case_tac ko,simp_all) apply (clarsimp simp add: makeObjectKO_def cong: if_cong split: sum.splits Structures_H.kernel_object.splits arch_kernel_object.splits X64_H.object_type.splits @@ -1195,6 +1193,149 @@ lemma update_gs_simps[simp]: gsUserPages_update (\ups x. if x \ ptrs then Some X64HugePage else ups x)" by (simp_all add: update_gs_def) +lemma retype_ksPSpace_dom_same: + fixes x v + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ksPSpace s' x = Some v \ + foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s') x + = Some v" +proof - + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + assume "ksPSpace s' x = Some v" + thus ?thesis + apply (clarsimp simp:foldr_upd_app_if[folded data_map_insert_def]) + apply (drule domI[where m = "ksPSpace s'"]) + apply (drule(1) IntI) + apply (erule_tac A = "A \ B" for A B in in_emptyE[rotated]) + apply (rule disjoint_subset[OF new_cap_addrs_subset[OF cover']]) + apply (clarsimp simp:ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + done +qed + +lemma retype_ksPSpace_None: + assumes ad: "pspace_aligned' s" "pspace_distinct' s" "pspace_bounded' s" + assumes pn: "pspace_no_overlap' ptr sz s" + assumes cover: "range_cover ptr sz (objBitsKO val + gbits) n" + shows "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" +proof - + note cover' = range_cover_rel[where sbit' = "objBitsKO val",OF cover _ refl,simplified] + show "\x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ ksPSpace s x = None" + apply (drule subsetD[OF new_cap_addrs_subset [OF cover' ]]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (fastforce simp: ptr_add_def p_assoc_help) + done +qed + +lemma retype_tcbSchedPrevs_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedPrevs_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedPrevs_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_tcbSchedNexts_of: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "tcbSchedNexts_of + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = tcbSchedNexts_of s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (rule ext) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm) + fastforce+ +qed + +lemma retype_inQ: + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "\d p. + inQ d p |< tcbs_of' + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\) + = inQ d p |< tcbs_of' s'" +proof - + note dom_same' = retype_ksPSpace_dom_same[OF vs' pn' ko cover num_r] + show ?thesis + apply (intro allI) + apply (rule ext) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def split: option.splits) + apply (intro impI conjI allI; (drule dom_same'; simp)?) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def] + split: if_split_asm kernel_object.split_asm) + using ko + by (cases ty; + fastforce simp add: makeObjectKO_def makeObject_tcb projectKOs + split: kernel_object.split_asm arch_kernel_object.split_asm object_type.split_asm + apiobject_type.split_asm if_split_asm + | fastforce)+ +qed + +lemma retype_ready_queues_relation: + assumes rlqr: "ready_queues_relation s s'" + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + assumes pn': "pspace_no_overlap' ptr sz s'" + assumes ko: "makeObjectKO dev ty = Some ko" + assumes cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + assumes num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ready_queues_relation + (s \kheap := foldr (\p. data_map_insert p (default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)\) + (s'\ksPSpace := foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s')\)" + using rlqr + unfolding ready_queues_relation_def Let_def + by (clarsimp simp: retype_tcbSchedNexts_of[OF vs' pn' ko cover num_r, simplified] + retype_tcbSchedPrevs_of[OF vs' pn' ko cover num_r, simplified] + retype_inQ[OF vs' pn' ko cover num_r, simplified]) + +lemma ksReadyQueues_update_gs[simp]: + "ksReadyQueues (update_gs tp us addrs s) = ksReadyQueues s" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + lemma retype_state_relation: notes data_map_insert_def[simp del] assumes sr: "(s, s') \ state_relation" @@ -1223,7 +1364,7 @@ lemma retype_state_relation: \ state_relation" (is "(ekheap_update (\_. ?eps) s\kheap := ?ps\, update_gs _ _ _ (s'\ksPSpace := ?ps'\)) \ state_relation") - proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) + proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) have cover':"range_cover ptr sz (objBitsKO ko) m" by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) @@ -1412,6 +1553,16 @@ lemma retype_state_relation: else cns x" in exI, simp) apply (rule_tac x=id in exI, simp)+ done + + have rdyqrel: "ready_queues_relation s s'" + using sr by (simp add: state_relation_def) + + thus "ready_queues_relation_2 (ready_queues s) (ksReadyQueues s') + (?ps' |> tcb_of' |> tcbSchedNext) (?ps' |> tcb_of' |> tcbSchedPrev) + (\d p. inQ d p |< (?ps' |> tcb_of'))" + using retype_ready_queues_relation[OF _ vs' pn' ko cover num_r] + by (clarsimp simp: ready_queues_relation_def Let_def) + qed lemma new_cap_addrs_fold': @@ -1522,7 +1673,7 @@ lemma retype_region_ext_modify_kheap_futz: apply (simp add: modify_def[symmetric]) done -lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=NonDetMonad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] +lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] lemma foldr_upd_app_if_eta_futz: "foldr (\p ps. ps(p \ f p)) as = (\g x. if x \ set as then Some (f x) else g x)" @@ -2434,7 +2585,6 @@ qed lemma other_objs_default_relation: "\ case ty of Structures_A.EndpointObject \ ko = injectKO (makeObject :: endpoint) | Structures_A.NotificationObject \ ko = injectKO (makeObject :: Structures_H.notification) - | Structures_A.TCBObject \ ko = injectKO (makeObject :: tcb) | _ \ False \ \ obj_relation_retype (default_object ty dev n) ko" apply (rule obj_relation_retype_other_obj) @@ -2455,6 +2605,13 @@ lemma other_objs_default_relation: split: Structures_A.apiobject_type.split_asm) done +lemma tcb_relation_retype: + "obj_relation_retype (default_object Structures_A.TCBObject dev n) (KOTCB makeObject)" + by (clarsimp simp: default_object_def obj_relation_retype_def tcb_relation_def default_tcb_def + makeObject_tcb makeObject_cte new_context_def newContext_def + fault_rel_optionation_def initContext_def default_arch_tcb_def newArchTCB_def + arch_tcb_relation_def objBits_simps' tcb_relation_cut_def) + lemma captable_relation_retype: "n < word_bits \ obj_relation_retype (default_object Structures_A.CapTableObject dev n) (KOCTE makeObject)" @@ -2597,6 +2754,7 @@ lemma ksMachineState_update_gs[simp]: "ksMachineState (update_gs tp us addrs s) = ksMachineState s" by (simp add: update_gs_def split: aobject_type.splits Structures_A.apiobject_type.splits) + lemma update_gs_ksMachineState_update_swap: "update_gs tp us addrs (ksMachineState_update f s) = ksMachineState_update f (update_gs tp us addrs s)" @@ -2605,7 +2763,6 @@ lemma update_gs_ksMachineState_update_swap: declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -3233,10 +3390,10 @@ proof (intro conjI impI) apply (rule_tac ptr="x + xa" in cte_wp_at_tcbI', assumption+) apply fastforce apply simp - apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound user_context) - apply (case_tac thread_state, simp_all add: valid_tcb_state'_def - valid_bound_ntfn'_def obj_at_disj' - split: option.splits)[2] + apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound tcbprev tcbnext user_context) + apply (case_tac thread_state, simp_all add: valid_tcb_state'_def valid_bound_tcb'_def + valid_bound_ntfn'_def obj_at_disj' none_top_def + split: option.splits)[4] apply (simp add: valid_cte'_def) apply (frule pspace_alignedD' [OF _ ad(1)]) apply (frule pspace_distinctD' [OF _ ad(2)]) @@ -3522,7 +3679,7 @@ lemma createObjects_orig_cte_wp_at2': apply (rule handy_prop_divs) apply (wp createObjects_orig_obj_at2'[where sz = sz], simp) apply (simp add: tcb_cte_cases_def) - including no_pre + including classic_wp_pre apply (wp handy_prop_divs createObjects_orig_obj_at2'[where sz = sz] | simp add: o_def cong: option.case_cong)+ done @@ -3543,7 +3700,7 @@ lemma createNewCaps_cte_wp_at2: \ pspace_no_overlap' ptr sz s\ createNewCaps ty ptr n objsz dev \\rv s. P (cte_wp_at' P' p s)\" - including no_pre + including classic_wp_pre apply (simp add: createNewCaps_def createObjects_def X64_H.toAPIType_def split del: if_split) apply (case_tac ty; simp add: createNewCaps_def createObjects_def Arch_createNewCaps_def @@ -3963,16 +4120,6 @@ lemma sch_act_wf_lift_asm: apply auto done -lemma valid_queues_lift_asm': - assumes tat: "\d p t. \\s. \ obj_at' (inQ d p) t s \ Q d p s\ f \\_ s. \ obj_at' (inQ d p) t s\" - and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" - shows "\\s. valid_queues' s \ (\d p. Q d p s)\ f \\_. valid_queues'\" - apply (simp only: valid_queues'_def imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - tat prq) - apply simp - done - lemma createObjects'_ct[wp]: "\\s. P (ksCurThread s)\ createObjects' p n v us \\rv s. P (ksCurThread s)\" by (rule createObjects_pspace_only, simp) @@ -4228,7 +4375,7 @@ lemma createNewCaps_idle'[wp]: apply (rename_tac apiobject_type) apply (case_tac apiobject_type, simp_all split del: if_split)[1] apply (wp, simp) - including no_pre + including classic_wp_pre apply (wp mapM_x_wp' createObjects_idle' threadSet_idle' @@ -4359,7 +4506,7 @@ lemma createNewCaps_ioports': lemma createObjects'_irq_states' [wp]: "\valid_irq_states'\ createObjects' a b c d \\_. valid_irq_states'\" apply (simp add: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc|simp add: alignError_def)+ + apply (wp unless_wp|wpc|simp add: alignError_def)+ apply fastforce done @@ -4371,34 +4518,156 @@ crunch ksMachine[wp]: createObjects "\s. P (ksMachineState s)" crunch cur_domain[wp]: createObjects "\s. P (ksCurDomain s)" (simp: unless_def) -lemma createNewCaps_valid_queues': - "\valid_queues' and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues'\" - apply (wp valid_queues_lift_asm' [OF createNewCaps_obj_at2]) - apply (clarsimp simp: projectKOs) - apply (simp add: makeObjectKO_def - split: object_type.split_asm - apiobject_type.split_asm) - apply (clarsimp simp: inQ_def) - apply (auto simp: makeObject_tcb - split: object_type.splits apiobject_type.splits) +lemma createObjects_valid_bitmaps: + "createObjects' ptr n val gbits \valid_bitmaps\" + apply (clarsimp simp: createObjects'_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def + bitmapQ_no_L1_orphans_def) done -lemma createNewCaps_valid_queues: - "\valid_queues and pspace_no_overlap' ptr sz - and pspace_aligned' and pspace_distinct' - and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ - createNewCaps ty ptr n us d - \\rv. valid_queues\" -apply (rule hoare_gen_asm) -apply (wp valid_queues_lift_asm createNewCaps_obj_at2[where sz=sz]) -apply (clarsimp simp: projectKO_opts_defs) -apply (simp add: inQ_def) -apply (wp createNewCaps_pred_tcb_at'[where sz=sz] | simp)+ -done +lemma valid_bitmaps_gsCNodes_update[simp]: + "valid_bitmaps (gsCNodes_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +lemma valid_bitmaps_gsUserPages_update[simp]: + "valid_bitmaps (gsUserPages_update f s) = valid_bitmaps s" + by (simp add: valid_bitmaps_def bitmapQ_defs) + +crunches curDomain, copyGlobalMappings + for valid_bitmaps[wp]: valid_bitmaps + and sched_pointers[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and valid_sched_pointers[wp]: valid_sched_pointers + (wp: crunch_wps valid_bitmaps_lift) + +lemma createNewCaps_valid_bitmaps: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_bitmaps s\ + createNewCaps ty ptr n us dev + \\_. valid_bitmaps\" + unfolding createNewCaps_def + apply (clarsimp simp: X64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_bitmaps) + by (wpsimp wp: createObjects_valid_bitmaps[simplified o_def] mapM_x_wp + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ + +lemma createObjects_sched_queues: + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True) + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + (is "\ \s. _ \ _ \ ?Pre s \ _ \\_. _\") +proof - + show ?thesis + apply (rule hoare_grab_asm) + apply (rule hoare_grab_asm) + proof - + assume not_0: "\ n = 0" + and cover: "range_cover ptr sz ((objBitsKO val) + gbits) n" + then show + "\\s. ?Pre s\ createObjects' ptr n val gbits \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + proof - + have shiftr_not_zero:" 1 \ ((of_nat n)::machine_word) << gbits" + using range_cover_not_zero_shift[OF not_0 cover,where gbits = gbits] + by (simp add:word_le_sub1) + show ?thesis + supply projectKOs[simp] + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: shiftL_nat data_map_insert_def[symmetric] + new_cap_addrs_fold'[OF shiftr_not_zero] + simp del: data_map_insert_def) + using range_cover.unat_of_nat_n_shift[OF cover, where gbits=gbits, simplified] + apply (clarsimp simp: foldr_upd_app_if) + apply (rule_tac a="tcbSchedNexts_of s" and b="tcbSchedPrevs_of s" + in rsubst2[rotated, OF sym sym, where P=P]) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply (rule ext) + apply (clarsimp simp: opt_map_def) + apply (frule (3) retype_ksPSpace_None[simplified mult.commute]) + apply (fastforce intro: cover) + apply fastforce + apply (clarsimp split: kernel_object.splits option.splits) + apply simp + done + qed + qed +qed + +lemma createNewCaps_sched_queues: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + assumes not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\ + createNewCaps ty ptr n us dev + \\_ s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + unfolding createNewCaps_def + apply (clarsimp simp: X64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (insert cover not_0) + apply (wpsimp wp: mapM_x_wp' createObjects_sched_queues + simp: curDomain_def) + by (wpsimp wp: mapM_x_wp' createObjects_sched_queues[simplified o_def] + threadSet_sched_pointers + | simp add: objBitsKO_def APIType_capBits_def valid_pspace'_def makeObject_tcb + objBits_def archObjSize_def createObjects_def + bit_simps + | intro conjI impI)+ + +lemma createObjects_valid_sched_pointers: + "\\s. valid_sched_pointers s + \ (case val of KOTCB tcb \ tcbSchedNext tcb = None \ tcbSchedPrev tcb = None + | _ \ True)\ + createObjects' ptr n val gbits + \\_. valid_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: createObjects'_def unless_def alignError_def split_def) + apply (wp case_option_wp[where P="\_. P" and P'=P for P, simplified] assert_inv + | clarsimp simp del: fun_upd_apply)+ + apply (clarsimp simp: valid_sched_pointers_def foldr_upd_app_if opt_pred_def opt_map_def comp_def) + apply (cases "tcb_of' val"; clarsimp) + done + +lemma createNewCaps_valid_sched_pointers: + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s \ valid_sched_pointers s\ + createNewCaps ty ptr n us dev + \\_. valid_sched_pointers\" + unfolding createNewCaps_def + apply (clarsimp simp: X64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wpsimp wp: createObjects_valid_sched_pointers) + by (wpsimp wp: createObjects_valid_sched_pointers[simplified o_def] mapM_x_wp + threadSet_valid_sched_pointers + | simp add: makeObject_tcb objBits_def createObjects_def + | intro conjI impI)+ lemma mapM_x_threadSet_valid_pspace: "\valid_pspace' and K (curdom \ maxDomain)\ @@ -4557,7 +4826,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def projectKOs st_tcb_at'_def) done qed @@ -4638,7 +4907,7 @@ lemma createObjects_null_filter': createObjects' ptr n val gbits \\addrs a. P (null_filter' (ctes_of a))\" apply (clarsimp simp: createObjects'_def split_def) - apply (wp hoare_unless_wp|wpc + apply (wp unless_wp|wpc | clarsimp simp:haskell_assert_def alignError_def split del: if_splits simp del:fun_upd_apply)+ apply (subst new_cap_addrs_fold') @@ -4786,12 +5055,13 @@ proof (rule hoare_gen_asm, elim conjE) createNewCaps_valid_arch_state valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createNewCaps_obj_at'] createNewCaps_irq_handlers' createNewCaps_vms createNewCaps_ioports' - createNewCaps_valid_queues - createNewCaps_valid_queues' createNewCaps_pred_tcb_at' cnc_ct_not_inQ createNewCaps_ct_idle_or_in_cur_domain' createNewCaps_sch_act_wf createNewCaps_urz[where sz=sz] + createNewCaps_sched_queues[OF cover not_0] + createNewCaps_valid_sched_pointers + createNewCaps_valid_bitmaps | simp)+ using not_0 apply (clarsimp simp: valid_pspace'_def) @@ -4864,35 +5134,6 @@ lemma createObjects_sch: apply (wp sch_act_wf_lift_asm createObjects_pred_tcb_at' createObjects_orig_obj_at3 | force)+ done -lemma createObjects_queues: - "\\s. valid_queues s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues\" - apply (wp valid_queues_lift_asm [unfolded pred_conj_def, OF createObjects_orig_obj_at3] - createObjects_pred_tcb_at' [unfolded pred_conj_def]) - apply fastforce - apply wp+ - apply fastforce - done - -lemma createObjects_queues': - assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" - shows - "\\s. valid_queues' s \ pspace_aligned' s \ pspace_distinct' s \ - pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ - createObjects ptr n val gbits - \\rv. valid_queues'\" - apply (simp add: createObjects_def) - apply (wp valid_queues_lift_asm') - apply (wp createObjects_orig_obj_at2') - apply clarsimp - apply assumption - apply wp - apply (clarsimp simp: no_tcb split: option.splits) - apply fastforce - done - lemma createObjects_no_cte_ifunsafe': assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" @@ -5158,43 +5399,53 @@ proof - apply (simp)+ done show ?thesis - apply (rule hoare_grab_asm)+ - apply (clarsimp simp: invs'_def valid_state'_def) - apply wp - apply (rule hoare_pre) - apply (rule hoare_vcg_conj_lift) - apply (simp add: createObjects_def,wp createObjects_valid_pspace_untyped') - apply (wp assms | simp add: objBits_def)+ - apply (wp createObjects_sch createObjects_queues) - apply (rule hoare_vcg_conj_lift) - apply (simp add: createObjects_def) - apply (wp createObjects_state_refs_of'') - apply (rule hoare_vcg_conj_lift) - apply (simp add: createObjects_def) - apply (wp createObjects_iflive') - apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift - createObjects_idle' createObjects_no_cte_valid_global - createObjects_valid_arch createObjects_irq_state - createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] createObjects_no_cte_ioports - assms | simp add: objBits_def )+ - apply (rule hoare_vcg_conj_lift) - apply (simp add: createObjects_def) - apply (wp createObjects_idle') - apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift - createObjects_idle' createObjects_no_cte_valid_global - createObjects_valid_arch createObjects_irq_state - createObjects_no_cte_irq_handlers createObjects_cur' - createObjects_queues' [OF no_tcb] assms - createObjects_pspace_domain_valid co_ct_not_inQ - createObjects_ct_idle_or_in_cur_domain' createObjects_no_cte_ioports - createObjects_untyped_ranges_zero'[OF moKO] - | simp)+ - apply clarsimp - apply ((intro conjI; assumption?); simp add: valid_pspace'_def objBits_def) - apply (fastforce simp add: no_cte no_tcb split_def split: option.splits) - apply (clarsimp simp: invs'_def no_tcb valid_state'_def no_cte split: option.splits) - done + apply (rule hoare_grab_asm)+ + apply (clarsimp simp: invs'_def valid_state'_def) + apply wp + apply (rule hoare_pre) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def,wp createObjects_valid_pspace_untyped') + apply (wp assms | simp add: objBits_def)+ + apply (wp createObjects_sch) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_state_refs_of'') + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_iflive') + apply (wp createObjects_no_cte_ifunsafe' assms) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_idle') + apply (wp irqs_masked_lift createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers createObjects_no_cte_ioports assms + | simp)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_sched_queues) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_sched_pointers) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wpsimp wp: createObjects_valid_bitmaps) + apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift + createObjects_idle' createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers createObjects_cur' + assms + createObjects_pspace_domain_valid co_ct_not_inQ + createObjects_ct_idle_or_in_cur_domain' + createObjects_untyped_ranges_zero'[OF moKO] + createObjects_sched_queues + | simp)+ + apply clarsimp + apply ((intro conjI; assumption?); simp add: valid_pspace'_def objBits_def) + apply (fastforce simp add: no_cte no_tcb split_def split: option.splits) + apply (auto simp: invs'_def no_tcb valid_state'_def no_cte + split: option.splits kernel_object.splits) + done qed lemma corres_retype_update_gsI: @@ -5230,7 +5481,7 @@ lemma gcd_corres: "corres (=) \ \ (gets cur_domain) curDomain" lemma retype_region2_extra_ext_mapM_x_corres: shows "corres dc (valid_etcbs and (\s. \addr\set addrs. tcb_at addr s)) - (\s. \addr\set addrs. tcb_at' addr s) + (\s. \addr\set addrs. obj_at' (Not \ tcbQueued) addr s) (retype_region2_extra_ext addrs Structures_A.apiobject_type.TCBObject) (mapM_x (\addr. do cdom \ curDomain; threadSet (tcbDomain_update (\_. cdom)) addr @@ -5241,7 +5492,7 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (rule corres_split_eqr[OF gcd_corres]) apply (rule_tac S="Id \ {(x, y). x \ set addrs}" and P="\s. (\t \ set addrs. tcb_at t s) \ valid_etcbs s" - and P'="\s. \t \ set addrs. tcb_at' t s" + and P'="\s. \t \ set addrs. obj_at' (Not \ tcbQueued) t s" in corres_mapM_x) apply simp apply (rule corres_guard_imp) @@ -5249,8 +5500,10 @@ lemma retype_region2_extra_ext_mapM_x_corres: apply (case_tac tcb') apply simp apply fastforce - apply fastforce + apply (fastforce simp: obj_at'_def) apply (wp hoare_vcg_ball_lift | simp)+ + apply (clarsimp simp: obj_at'_def) + apply fastforce apply auto[1] apply (wp | simp add: curDomain_def)+ done @@ -5283,10 +5536,11 @@ lemma retype_region2_obj_at: apply (auto simp: obj_at_def default_object_def is_tcb_def) done -lemma createObjects_tcb_at': +lemma createObjects_Not_tcbQueued: "\range_cover ptr sz (objBitsKO (injectKOS (makeObject::tcb))) n; n \ 0\ \ \\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ - createObjects ptr n (KOTCB makeObject) 0 \\ptrs s. \addr\set ptrs. tcb_at' addr s\" + createObjects ptr n (KOTCB makeObject) 0 + \\ptrs s. \addr\set ptrs. obj_at' (Not \ tcbQueued) addr s\" apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where val = "(makeObject :: tcb)"]]) apply (auto simp: obj_at'_def projectKOs project_inject objBitsKO_def objBits_def makeObject_tcb) done @@ -5394,8 +5648,9 @@ lemma corres_retype_region_createNewCaps: apply (rule corres_retype[where 'a = tcb], simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def makeObjectKO_def - other_objs_default_relation)[1] + tcb_relation_retype)[1] apply (fastforce simp: range_cover_def) + apply (simp add: tcb_relation_retype) apply (rule corres_split_nor) apply (simp add: APIType_map2_def) apply (rule retype_region2_extra_ext_mapM_x_corres) @@ -5405,7 +5660,7 @@ lemma corres_retype_region_createNewCaps: apply wp apply wp apply ((wp retype_region2_obj_at | simp add: APIType_map2_def)+)[1] - apply ((wp createObjects_tcb_at'[where sz=sz] + apply ((wp createObjects_Not_tcbQueued[where sz=sz] | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] apply simp apply simp @@ -5438,7 +5693,7 @@ lemma corres_retype_region_createNewCaps: \ \CapTable\ apply (subst retype_region2_extra_ext_trivial) apply (simp add: APIType_map2_def) - apply (subst bind_assoc_reverse[of "createObjects y n (KOCTE makeObject) us"]) + apply (subst bind_assoc_return_reverse[of "createObjects y n (KOCTE makeObject) us"]) apply (subst liftM_def[of "map (\addr. capability.CNodeCap addr us 0 0)", symmetric]) apply simp apply (rule corres_rel_imp) @@ -5547,7 +5802,7 @@ lemma corres_retype_region_createNewCaps: APIType_map2_def arch_default_cap_def) apply fastforce+ \ \PML4\ - apply (corressimp corres: corres_retype[where ty="Inr PML4Object" and 'a=pml4e and sz=sz, + apply (corresKsimp corres: corres_retype[where ty="Inr PML4Object" and 'a=pml4e and sz=sz, simplified, folded retype_region2_retype_region_PML4Obj] corresK: corresK_mapM_x_list_all2[where I="\xs s. valid_arch_state s \ pspace_aligned s \ valid_etcbs s \ diff --git a/proof/refine/X64/Schedule_R.thy b/proof/refine/X64/Schedule_R.thy index 6e7f0e7615..aa1b8dc051 100644 --- a/proof/refine/X64/Schedule_R.thy +++ b/proof/refine/X64/Schedule_R.thy @@ -10,16 +10,11 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] -lemma invs_no_cicd'_queues: - "invs_no_cicd' s \ valid_queues s" - unfolding invs_no_cicd'_def - by simp - lemma corres_if2: "\ G = G'; G \ corres r P P' a c; \ G' \ corres r Q Q' b d \ \ corres r (if G then P else Q) (if G' then P' else Q') (if G then a else b) (if G' then c else d)" @@ -41,7 +36,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -86,227 +81,231 @@ lemma schedule_choose_new_thread_sched_act_rct[wp]: unfolding schedule_choose_new_thread_def by wp +\ \This proof shares many similarities with the proof of @{thm tcbSchedEnqueue_corres}\ lemma tcbSchedAppend_corres: - notes trans_state_update'[symmetric, simp del] - shows - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues and valid_queues') (tcb_sched_action (tcb_sched_append) t) (tcbSchedAppend t)" - apply (simp only: tcbSchedAppend_def tcb_sched_action_def) - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - apply (simp add: unless_def when_def) - apply (rule corres_no_failI) - apply wp+ - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def) - - apply (subgoal_tac "tcb_sched_append t (ready_queues a (tcb_domain y) (tcb_priority y)) - = (ready_queues a (tcb_domain y) (tcb_priority y))") - apply (simp add: state_relation_def ready_queues_relation_def) - apply (clarsimp simp: tcb_sched_append_def state_relation_def - valid_queues'_def ready_queues_relation_def - ekheap_relation_def etcb_relation_def - obj_at'_def inQ_def projectKO_eq project_inject) - apply (drule_tac x=t in bspec,clarsimp) + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_append tcb_ptr) (tcbSchedAppend tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + supply projectKOs[simp] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_append_def get_tcb_queue_def + tcbSchedAppend_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce apply clarsimp - apply (clarsimp simp: unless_def when_def cong: if_cong) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_append_def) - apply (intro conjI impI) - apply (rule corres_guard_imp) - apply (rule setQueue_corres) - prefer 3 - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply simp - apply simp - apply (rule corres_split_noop_rhs2) - apply (rule addToBitmap_if_null_noop_corres) - apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] - apply wp+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - projectKO_eq project_inject) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueueAppend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: obj_at'_def) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp simp: setQueue_def tcbQueueAppend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply fast + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB) + apply (intro conjI impI; clarsimp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply clarsimp + apply (drule_tac x="the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))" + in spec) + subgoal by (auto simp: in_opt_pred opt_map_red) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply split: if_splits) + apply (case_tac "t = the (tcbQueueEnd (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def) + apply (intro conjI; clarsimp) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply opt_map_def obj_at'_def + queue_end_valid_def prev_queue_head_def + split: if_splits option.splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_append[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def opt_map_def split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def fun_upd_apply queue_end_valid_def split: if_splits) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply split: if_splits) + by (clarsimp simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def split: if_splits) + +lemma tcbQueueAppend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s \ (\end. tcbQueueEnd queue = Some end \ tcb_at' end s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + apply (clarsimp simp: tcbQueueEmpty_def valid_bound_tcb'_def split: option.splits) + done + +lemma tcbSchedAppend_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_objs'\" + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: threadSet_valid_objs' threadGet_wp hoare_vcg_all_lift) + apply (normalise_obj_at', rename_tac tcb "end") + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply clarsimp + apply (frule tcbQueueHead_iff_tcbQueueEnd) + apply (force dest!: obj_at'_tcbQueueEnd_ksReadyQueues simp: tcbQueueEmpty_def obj_at'_def) done - -crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue - for valid_pspace'[wp]: valid_pspace' - and valid_arch_state'[wp]: valid_arch_state' - (simp: unless_def) - crunches tcbSchedAppend, tcbSchedDequeue for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) crunch state_refs_of'[wp]: setQueue "\s. P (state_refs_of' s)" -lemma removeFromBitmap_valid_queues_no_bitmap_except[wp]: -" \ valid_queues_no_bitmap_except t \ - removeFromBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding bitmapQ_defs valid_queues_no_bitmap_except_def - by (wp| clarsimp simp: bitmap_fun_defs)+ - -lemma removeFromBitmap_bitmapQ: - "\ \s. True \ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" - unfolding bitmapQ_defs bitmap_fun_defs - by (wp| clarsimp simp: bitmap_fun_defs)+ - -lemma removeFromBitmap_valid_bitmapQ[wp]: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. ksReadyQueues s (d,p) = []) \ - removeFromBitmap d p - \\_. valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \ bitmapQ d p s \ ksReadyQueues s (d,p) = []) \" - by (rule hoare_pre) - (wp removeFromBitmap_valid_queues_no_bitmap_except removeFromBitmap_valid_bitmapQ_except - removeFromBitmap_bitmapQ, simp) - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed - -(* this should be the actual weakest precondition to establish valid_queues - under tagging a thread as not queued *) -lemma threadSet_valid_queues_dequeue_wp: - "\ valid_queues_no_bitmap_except t and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - (\s. \d p. t \ set (ksReadyQueues s (d,p))) \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues \" - unfolding threadSet_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def valid_queues_no_bitmap_def) - done - (* FIXME move *) lemmas obj_at'_conjI = obj_at_conj' -lemma setQueue_valid_queues_no_bitmap_except_dequeue_wp: - "\d p ts t. - \ \s. valid_queues_no_bitmap_except t s \ - (\t' \ set ts. obj_at' (inQ d p and runnable' \ tcbState) t' s) \ - t \ set ts \ distinct ts \ p \ maxPriority \ d \ maxDomain \ - setQueue d p ts - \\rv. valid_queues_no_bitmap_except t \" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by wp force - -definition (* if t is in a queue, it should be tagged with right priority and domain *) - "correct_queue t s \ \d p. t \ set(ksReadyQueues s (d, p)) \ - (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s)" - -lemma valid_queues_no_bitmap_correct_queueI[intro]: - "valid_queues_no_bitmap s \ correct_queue t s" - unfolding correct_queue_def valid_queues_no_bitmap_def - by (fastforce simp: obj_at'_def inQ_def) - - -lemma tcbSchedDequeue_valid_queues_weak: - "\ valid_queues_no_bitmap_except t and valid_bitmapQ and - bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and - correct_queue t and - obj_at' (\tcb. tcbDomain tcb \ maxDomain \ tcbPriority tcb \ maxPriority) t \ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" -proof - - show ?thesis - unfolding tcbSchedDequeue_def null_def valid_queues_def - apply wp (* stops on threadSet *) - apply (rule hoare_post_eq[OF _ threadSet_valid_queues_dequeue_wp], - simp add: valid_queues_def) - apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ - apply (wp hoare_vcg_imp_lift setQueue_valid_queues_no_bitmap_except_dequeue_wp - setQueue_valid_bitmapQ threadGet_const_tcb_at)+ - (* wp done *) - apply (normalise_obj_at') - apply (clarsimp simp: correct_queue_def) - apply (normalise_obj_at') - apply (fastforce simp add: valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def elim: obj_at'_weaken)+ - done -qed - -lemma tcbSchedDequeue_valid_queues: - "\Invariants_H.valid_queues - and obj_at' (\tcb. tcbDomain tcb \ maxDomain) t - and obj_at' (\tcb. tcbPriority tcb \ maxPriority) t\ - tcbSchedDequeue t - \\_. Invariants_H.valid_queues\" - apply (rule hoare_pre, rule tcbSchedDequeue_valid_queues_weak) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def) - done - -lemma tcbSchedAppend_valid_queues'[wp]: - (* most of this is identical to tcbSchedEnqueue_valid_queues' in TcbAcc_R *) - "\valid_queues' and tcb_at' t\ tcbSchedAppend t \\_. valid_queues'\" - apply (simp add: tcbSchedAppend_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp - apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def projectKOs valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -lemma threadSet_valid_queues'_dequeue: (* threadSet_valid_queues' is too weak for dequeue *) - "\\s. (\d p t'. obj_at' (inQ d p) t' s \ t' \ t \ t' \ set (ksReadyQueues s (d, p))) \ - obj_at' (inQ d p) t s \ - threadSet (tcbQueued_update (\_. False)) t - \\rv. valid_queues' \" - unfolding valid_queues'_def - apply (rule hoare_pre) - apply (wp hoare_vcg_all_lift) - apply (simp only: imp_conv_disj not_obj_at') - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (simp add: not_obj_at') - apply (clarsimp simp: typ_at_tcb') - apply normalise_obj_at' - apply (fastforce elim: obj_at'_weaken simp: inQ_def) - done - lemma setQueue_ksReadyQueues_lift: "\ \s. P (s\ksReadyQueues := (ksReadyQueues s)((d, p) := ts)\) ts \ setQueue d p ts @@ -314,116 +313,42 @@ lemma setQueue_ksReadyQueues_lift: unfolding setQueue_def by (wp, clarsimp simp: fun_upd_def cong: if_cong) -lemma tcbSchedDequeue_valid_queues'[wp]: - "\valid_queues' and tcb_at' t\ - tcbSchedDequeue t \\_. valid_queues'\" - unfolding tcbSchedDequeue_def - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - prefer 2 - apply (wp threadGet_const_tcb_at) - apply (fastforce simp: obj_at'_def) - apply clarsimp - apply (rename_tac queued) - apply (case_tac queued, simp_all) - apply wp - apply (rule_tac d=tdom and p=prio in threadSet_valid_queues'_dequeue) - apply (rule hoare_pre_post, assumption) - apply (wp | clarsimp simp: bitmap_fun_defs)+ - apply (wp hoare_vcg_all_lift setQueue_ksReadyQueues_lift) - apply clarsimp - apply (wp threadGet_obj_at' threadGet_const_tcb_at)+ - apply clarsimp - apply (rule context_conjI, clarsimp simp: obj_at'_def) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def|wp)+ - done - -crunch tcb_at'[wp]: tcbSchedEnqueue "tcb_at' t" - (simp: unless_def) -crunch tcb_at'[wp]: tcbSchedAppend "tcb_at' t" - (simp: unless_def) -crunch tcb_at'[wp]: tcbSchedDequeue "tcb_at' t" - -crunch state_refs_of'[wp]: tcbSchedEnqueue "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps unless_def) -crunch state_refs_of'[wp]: tcbSchedAppend "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps unless_def) -crunch state_refs_of'[wp]: tcbSchedDequeue "\s. P (state_refs_of' s)" - (wp: refl simp: crunch_simps) - -crunch cap_to'[wp]: tcbSchedEnqueue "ex_nonz_cap_to' p" - (simp: unless_def) -crunch cap_to'[wp]: tcbSchedAppend "ex_nonz_cap_to' p" - (simp: unless_def) -crunch cap_to'[wp]: tcbSchedDequeue "ex_nonz_cap_to' p" +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for tcb_at'[wp]: "tcb_at' t" + and cap_to'[wp]: "ex_nonz_cap_to' p" + and ifunsafe'[wp]: if_unsafe_then_cap' + (wp: crunch_wps simp: crunch_simps) lemma tcbSchedAppend_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedAppend tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedAppend_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedAppend_def + supply projectKOs[simp] + apply (wpsimp wp: tcbQueueAppend_if_live_then_nonz_cap' threadGet_wp simp: bitmap_fun_defs) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def st_tcb_at'_def obj_at'_def runnable_eq_active') + apply (clarsimp simp: tcbQueueEmpty_def) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ko_wp_at'_def inQ_def obj_at'_def tcbQueueEmpty_def) done lemma tcbSchedDequeue_iflive'[wp]: - "\if_live_then_nonz_cap'\ tcbSchedDequeue tcb \\_. if_live_then_nonz_cap'\" + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. if_live_then_nonz_cap'\" apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply ((wp | clarsimp simp: bitmap_fun_defs)+)[1] (* deal with removeFromBitmap *) - apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ - apply (rule_tac Q="\rv. \" in hoare_post_imp, fastforce) - apply (wp | simp add: crunch_simps)+ + apply (wpsimp wp: tcbQueueRemove_if_live_then_nonz_cap' threadGet_wp) + apply (fastforce elim: if_live_then_nonz_capE' simp: obj_at'_def ko_wp_at'_def projectKOs) done -crunch ifunsafe'[wp]: tcbSchedAppend if_unsafe_then_cap' - (simp: unless_def) -crunch ifunsafe'[wp]: tcbSchedDequeue if_unsafe_then_cap' - crunch idle'[wp]: tcbSchedAppend valid_idle' (simp: crunch_simps unless_def) -crunch global_refs'[wp]: tcbSchedEnqueue valid_global_refs' - (wp: threadSet_global_refs simp: unless_def) -crunch global_refs'[wp]: tcbSchedAppend valid_global_refs' - (wp: threadSet_global_refs simp: unless_def) -crunch global_refs'[wp]: tcbSchedDequeue valid_global_refs' - (wp: threadSet_global_refs) - -crunch irq_node'[wp]: tcbSchedEnqueue "\s. P (irq_node' s)" - (simp: unless_def) -crunch irq_node'[wp]: tcbSchedAppend "\s. P (irq_node' s)" - (simp: unless_def) -crunch irq_node'[wp]: tcbSchedDequeue "\s. P (irq_node' s)" - -crunch typ_at'[wp]: tcbSchedEnqueue "\s. P (typ_at' T p s)" - (simp: unless_def) -crunch typ_at'[wp]: tcbSchedAppend "\s. P (typ_at' T p s)" - (simp: unless_def) -crunch typ_at'[wp]: tcbSchedDequeue "\s. P (typ_at' T p s)" - -crunch ctes_of[wp]: tcbSchedEnqueue "\s. P (ctes_of s)" - (simp: unless_def) -crunch ctes_of[wp]: tcbSchedAppend "\s. P (ctes_of s)" - (simp: unless_def) -crunch ctes_of[wp]: tcbSchedDequeue "\s. P (ctes_of s)" - -crunch ksInterrupt[wp]: tcbSchedEnqueue "\s. P (ksInterruptState s)" - (simp: unless_def) -crunch ksInterrupt[wp]: tcbSchedAppend "\s. P (ksInterruptState s)" - (simp: unless_def) -crunch ksInterrupt[wp]: tcbSchedDequeue "\s. P (ksInterruptState s)" - -crunch irq_states[wp]: tcbSchedEnqueue valid_irq_states' - (simp: unless_def) -crunch irq_states[wp]: tcbSchedAppend valid_irq_states' - (simp: unless_def) -crunch irq_states[wp]: tcbSchedDequeue valid_irq_states' - -crunch ct'[wp]: tcbSchedEnqueue "\s. P (ksCurThread s)" - (simp: unless_def) -crunch ct'[wp]: tcbSchedAppend "\s. P (ksCurThread s)" - (simp: unless_def) -crunch ct'[wp]: tcbSchedDequeue "\s. P (ksCurThread s)" - lemma tcbSchedEnqueue_vms'[wp]: "\valid_machine_state'\ tcbSchedEnqueue t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) @@ -448,61 +373,121 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) including no_pre - apply (wp static_imp_wp hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) apply simp+ done +lemma threadSet_mdb': + "\valid_mdb' and obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF (f t)) t\ + threadSet f t + \\rv. valid_mdb'\" + supply projectKOs[simp] + apply (wpsimp wp: setObject_tcb_mdb' getTCB_wp simp: threadSet_def obj_at'_def) + apply fastforce + done + +lemma tcbSchedNext_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedNext_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbSchedPrev_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueueRemove_valid_mdb': + "\\s. valid_mdb' s \ valid_objs' s\ tcbQueueRemove q tcbPtr \\_. valid_mdb'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (fastforce simp: valid_tcb'_def obj_at'_def) + done + +lemma tcbQueuePrepend_valid_mdb': + "\valid_mdb' and tcb_at' tcbPtr + and (\s. \ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_valid_mdb': + "\\s. valid_mdb' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueEnd queue)) s)\ + tcbQueueAppend queue tcbPtr + \\_. valid_mdb'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueued_update_valid_mdb'[wp]: + "\valid_mdb' and tcb_at' tcbPtr\ threadSet (tcbQueued_update f) tcbPtr \\_. valid_mdb'\" + apply (wpsimp wp: threadSet_mdb') + apply (fastforce simp: obj_at'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma valid_mdb'_ksReadyQueuesL1Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL1Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma valid_mdb'_ksReadyQueuesL2Bitmap_update[simp]: + "valid_mdb' (ksReadyQueuesL2Bitmap_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma tcbSchedEnqueue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_mdb'\" + apply (clarsimp simp: tcbSchedEnqueue_def setQueue_def) + apply (wpsimp wp: tcbQueuePrepend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply normalise_obj_at' + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +crunches tcbSchedEnqueue + for cur_tcb'[wp]: cur_tcb' + (wp: threadSet_cur) + lemma tcbSchedEnqueue_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedEnqueue t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedEnqueue_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift valid_ioports_lift' - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedEnqueue_ct_not_inQ valid_ioports_lift' + simp: cteCaps_of_def o_def) done -crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" - (simp: unless_def) - lemma tcbSchedAppend_vms'[wp]: "\valid_machine_state'\ tcbSchedAppend t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedAppend_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done -crunch pspace_domain_valid[wp]: tcbSchedAppend "pspace_domain_valid" - (simp: unless_def) - -crunch ksCurDomain[wp]: tcbSchedAppend "\s. P (ksCurDomain s)" -(simp: unless_def) +lemma tcbQueueAppend_tcbPriority_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def projectKOs split: if_splits) -crunch ksIdleThread[wp]: tcbSchedAppend "\s. P (ksIdleThread s)" -(simp: unless_def) - -crunch ksDomSchedule[wp]: tcbSchedAppend "\s. P (ksDomSchedule s)" -(simp: unless_def) +lemma tcbQueueAppend_tcbDomain_obj_at'[wp]: + "tcbQueueAppend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueueAppend_def + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def projectKOs split: if_splits) lemma tcbSchedAppend_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedAppend t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + "tcbSchedAppend t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" apply (clarsimp simp: tcbSchedAppend_def) - apply (wpsimp simp: unless_def)+ - done + by wpsimp lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedAppend t \\_. tcb_in_cur_domain' t' \" @@ -510,36 +495,66 @@ lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: apply wp+ done -crunch ksDomScheduleIdx[wp]: tcbSchedAppend "\s. P (ksDomScheduleIdx s)" - (simp: unless_def) - crunches tcbSchedAppend, tcbSchedDequeue - for gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" - and arch'[wp]: "\s. P (ksArchState s)" + for arch'[wp]: "\s. P (ksArchState s)" and ioports'[wp]: valid_ioports' (simp: unless_def wp: valid_ioports_lift'') lemma tcbSchedAppend_sch_act_wf[wp]: - "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedAppend thread - \\rv s. sch_act_wf (ksSchedulerAction s) s\" - apply (simp add:tcbSchedAppend_def bitmap_fun_defs) - apply (wp hoare_unless_wp setQueue_sch_act threadGet_wp|simp)+ - apply (fastforce simp:typ_at'_def obj_at'_def) + "tcbSchedAppend thread \\s. sch_act_wf (ksSchedulerAction s) s\" + by (wpsimp wp: sch_act_wf_lift) + +lemma tcbSchedAppend_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedAppend tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedAppend_def + apply (wpsimp simp: tcbQueueAppend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp hoare_vcg_if_lift2) + apply (clarsimp simp: ksReadyQueues_asrt_def split: if_splits) + apply normalise_obj_at' + apply (force dest: tcbQueueHead_iff_tcbQueueEnd + simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def) + done + +lemma tcbSchedAppend_valid_mdb'[wp]: + "\valid_mdb' and valid_tcbs' and pspace_aligned' and pspace_distinct'\ + tcbSchedAppend tcbPtr + \\_. valid_mdb'\" + supply projectKOs[simp] + apply (clarsimp simp: tcbSchedAppend_def setQueue_def) + apply (wpsimp wp: tcbQueueAppend_valid_mdb' threadGet_wp simp: bitmap_fun_defs) + apply (fastforce dest: obj_at'_tcbQueueEnd_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done + +lemma tcbSchedAppend_valid_bitmaps[wp]: + "tcbSchedAppend tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) done lemma tcbSchedAppend_invs'[wp]: - "\invs' - and st_tcb_at' runnable' t - and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ - tcbSchedAppend t + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t \\_. invs'\" - apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp tcbSchedAppend_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def - | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma tcbSchedAppend_all_invs_but_ct_not_inQ': + "\invs'\ + tcbSchedAppend t + \\_. all_invs_but_ct_not_inQ'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift tcbSchedAppend_ct_not_inQ + ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) done lemma tcbSchedEnqueue_invs'_not_ResumeCurrentThread: @@ -562,70 +577,98 @@ lemma tcb_at'_has_tcbDomain: "tcb_at' t s \ \p. obj_at' (\tcb. tcbDomain tcb = p) t s" by (clarsimp simp add: obj_at'_def) -crunch ksMachine[wp]: tcbSchedDequeue "\s. P (ksMachineState s)" - (simp: unless_def) - lemma tcbSchedDequeue_vms'[wp]: "\valid_machine_state'\ tcbSchedDequeue t \\_. valid_machine_state'\" apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedDequeue_ksMachine) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) done -crunch pspace_domain_valid[wp]: tcbSchedDequeue "pspace_domain_valid" - -crunch ksCurDomain[wp]: tcbSchedDequeue "\s. P (ksCurDomain s)" -(simp: unless_def) - -crunch ksIdleThread[wp]: tcbSchedDequeue "\s. P (ksIdleThread s)" -(simp: unless_def) - -crunch ksDomSchedule[wp]: tcbSchedDequeue "\s. P (ksDomSchedule s)" -(simp: unless_def) - lemma tcbSchedDequeue_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ tcbSchedDequeue t \\_. tcb_in_cur_domain' t' \" apply (rule tcb_in_cur_domain'_lift) apply wp - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ - done - -lemma tcbSchedDequeue_tcbDomain[wp]: - "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ + apply (clarsimp simp: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: hoare_when_weak_wp getObject_tcb_wp threadGet_wp) done -lemma tcbSchedDequeue_tcbPriority[wp]: - "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ - tcbSchedDequeue t - \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" - apply (clarsimp simp: tcbSchedDequeue_def) - apply (wp hoare_when_weak_wp | simp)+ +lemma tcbSchedDequeue_valid_mdb'[wp]: + "\valid_mdb' and valid_objs'\ tcbSchedDequeue tcbPtr \\_. valid_mdb'\" + unfolding tcbSchedDequeue_def + apply (wpsimp simp: bitmap_fun_defs setQueue_def wp: threadSet_mdb' tcbQueueRemove_valid_mdb') + apply (rule_tac Q="\_. tcb_at' tcbPtr" in hoare_post_imp) + apply (fastforce simp: tcb_cte_cases_def cteSizeBits_def) + apply (wpsimp wp: threadGet_wp)+ + apply (fastforce simp: obj_at'_def) done -crunch ksDomScheduleIdx[wp]: tcbSchedDequeue "\s. P (ksDomScheduleIdx s)" - (simp: unless_def) - lemma tcbSchedDequeue_invs'[wp]: - "\invs' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs'\" - unfolding invs'_def valid_state'_def - apply (rule hoare_pre) - apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift - valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues - untyped_ranges_zero_lift - | simp add: cteCaps_of_def o_def)+ - apply (fastforce elim: valid_objs'_maxDomain valid_objs'_maxPriority simp: valid_pspace'_def)+ + "tcbSchedDequeue t \invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + untyped_ranges_zero_lift ct_idle_or_in_cur_domain'_lift2 cur_tcb_lift + simp: cteCaps_of_def o_def) + done + +lemma ready_qs_runnable_cross: + "\(s, s') \ state_relation; pspace_aligned s; pspace_distinct s; valid_queues s\ + \ ready_qs_runnable s'" + supply projectKOs[simp] + apply (clarsimp simp: ready_qs_runnable_def) + apply normalise_obj_at' + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x="tcbDomain ko" in spec) + apply (drule_tac x="tcbPriority ko" in spec) + apply clarsimp + apply (drule_tac x=t in bspec) + apply (fastforce simp: inQ_def in_opt_pred obj_at'_def opt_map_red) + apply (fastforce dest: st_tcb_at_runnable_cross simp: obj_at'_def st_tcb_at'_def) + done + +method add_ready_qs_runnable = + rule_tac Q'=ready_qs_runnable in corres_cross_add_guard, + (clarsimp simp: pred_conj_def)?, + (frule valid_sched_valid_queues)?, (frule invs_psp_aligned)?, (frule invs_distinct)?, + fastforce dest: ready_qs_runnable_cross + +defs idleThreadNotQueued_def: + "idleThreadNotQueued s \ obj_at' (Not \ tcbQueued) (ksIdleThread s) s" + +lemma idle_thread_not_queued: + "\valid_idle s; valid_queues s; valid_etcbs s\ + \ \ (\d p. idle_thread s \ set (ready_queues s d p))" + apply (clarsimp simp: valid_queues_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply clarsimp + apply (drule_tac x="idle_thread s" in bspec) + apply fastforce + apply (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def valid_etcbs_def) done +lemma valid_idle_tcb_at: + "valid_idle s \ tcb_at (idle_thread s) s" + by (clarsimp simp: valid_idle_def pred_tcb_at_def obj_at_def is_tcb_def) + lemma setCurThread_corres: - "corres dc \ \ (modify (cur_thread_update (\_. t))) (setCurThread t)" - apply (unfold setCurThread_def) + "corres dc (valid_idle and valid_queues and valid_etcbs and pspace_aligned and pspace_distinct) \ + (modify (cur_thread_update (\_. t))) (setCurThread t)" + supply projectKOs[simp] + apply (clarsimp simp: setCurThread_def) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (clarsimp simp: idleThreadNotQueued_def) + apply (frule (2) idle_thread_not_queued) + apply (frule state_relation_pspace_relation) + apply (frule state_relation_ready_queues_relation) + apply (frule state_relation_idle_thread) + apply (frule valid_idle_tcb_at) + apply (frule (3) tcb_at_cross) + apply (fastforce dest!: in_ready_q_tcbQueued_eq[THEN arg_cong_Not, THEN iffD1] + simp: obj_at'_def opt_pred_def opt_map_def) apply (rule corres_modify) apply (simp add: state_relation_def swp_def) done @@ -664,47 +707,62 @@ lemma arch_switch_thread_ksQ[wp]: apply (wp) done -crunch valid_queues[wp]: "Arch.switchToThread" "Invariants_H.valid_queues" -(wp: crunch_wps simp: crunch_simps) +crunches storeWordUser, setVMRoot, asUser, storeWordUser, Arch.switchToThread + for ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_objs'[wp]: valid_objs' + (wp: crunch_wps threadSet_sched_pointers simp: crunch_simps) + +crunches arch_switch_to_thread, arch_switch_to_idle_thread + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: ready_qs_distinct_lift simp: crunch_simps) + +crunches arch_switch_to_thread, arch_switch_to_idle_thread + for valid_idle[wp]: "\s::det_ext state. valid_idle s" + +lemma valid_queues_in_correct_ready_q[elim!]: + "valid_queues s \ in_correct_ready_q s" + by (clarsimp simp: valid_queues_def in_correct_ready_q_def) + +lemma valid_queues_ready_qs_distinct[elim!]: + "valid_queues s \ ready_qs_distinct s" + by (clarsimp simp: valid_queues_def ready_qs_distinct_def) lemma switchToThread_corres: "corres dc (valid_arch_state and valid_objs and valid_asid_map and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and valid_global_objs and unique_table_refs o caps_of_state - and st_tcb_at runnable t and valid_etcbs) - (valid_arch_state' and valid_pspace' and Invariants_H.valid_queues - and st_tcb_at' runnable' t and cur_tcb') + and st_tcb_at runnable t and valid_etcbs and valid_queues and valid_idle) + (no_0_obj' and sym_heap_sched_pointers and valid_pspace' and valid_arch_state') (switch_to_thread t) (switchToThread t)" - (is "corres _ ?PA ?PH _ _") - -proof - - have mainpart: "corres dc (?PA) (?PH) - (do y \ arch_switch_to_thread t; - y \ (tcb_sched_action tcb_sched_dequeue t); - modify (cur_thread_update (\_. t)) - od) - (do y \ Arch.switchToThread t; - y \ tcbSchedDequeue t; - setCurThread t - od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply add_ready_qs_runnable + apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) + apply (rule corres_symb_exec_l[OF _ _ get_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_l[OF _ _ assert_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_stateAssert_ignore) + apply (fastforce dest!: state_relation_ready_queues_relation intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce apply (rule corres_guard_imp) apply (rule corres_split[OF arch_switchToThread_corres]) apply (rule corres_split[OF tcbSchedDequeue_corres setCurThread_corres]) - apply (wp|clarsimp simp: tcb_at_is_etcb_at st_tcb_at_tcb_at)+ - done - - show ?thesis - apply - - apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) - apply (rule corres_symb_exec_l [where Q = "\ s rv. (?PA and (=) rv) s", - OF corres_symb_exec_l [OF mainpart]]) - apply (auto intro: no_fail_pre [OF no_fail_assert] - no_fail_pre [OF no_fail_get] - dest: st_tcb_at_tcb_at [THEN get_tcb_at] | - simp add: assert_def | wp)+ - done -qed + apply (wpsimp simp: is_tcb_def)+ + apply (fastforce intro!: st_tcb_at_tcb_at) + apply wpsimp + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + apply wpsimp + apply (fastforce dest!: st_tcb_at_tcb_at simp: tcb_at_def) + done lemma arch_switchToIdleThread_corres: "corres dc @@ -715,20 +773,26 @@ lemma arch_switchToIdleThread_corres: arch_switch_to_idle_thread Arch.switchToIdleThread" apply (simp add: arch_switch_to_idle_thread_def X64_H.switchToIdleThread_def) - apply (corressimp corres: getIdleThread_corres setVMRoot_corres) + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres) apply (clarsimp simp: valid_idle_def valid_idle'_def pred_tcb_at_def obj_at_def is_tcb obj_at'_def) done lemma switchToIdleThread_corres: - "corres dc invs invs_no_cicd' switch_to_idle_thread switchToIdleThread" + "corres dc + (invs and valid_queues and valid_etcbs) + invs_no_cicd' + switch_to_idle_thread switchToIdleThread" apply (simp add: switch_to_idle_thread_def Thread_H.switchToIdleThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_ignore, fastforce) apply (rule corres_guard_imp) apply (rule corres_split[OF getIdleThread_corres]) apply (rule corres_split[OF arch_switchToIdleThread_corres]) - apply (unfold setCurThread_def) - apply (rule corres_trivial, rule corres_modify) - apply (simp add: state_relation_def cdt_relation_def) - apply (wp+, simp+) + apply clarsimp + apply (rule setCurThread_corres) + apply wpsimp + apply (simp add: state_relation_def cdt_relation_def) + apply wpsimp+ apply (simp add: invs_unique_refs invs_valid_vs_lookup invs_valid_objs invs_valid_asid_map invs_arch_state invs_valid_global_objs invs_psp_aligned invs_distinct invs_valid_idle invs_vspace_objs) @@ -763,11 +827,9 @@ proof - apply (simp add: setCurThread_def) apply wp apply (clarsimp simp add: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def - valid_state'_def Invariants_H.valid_queues_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def valid_queues'_def ct_not_inQ_ct - ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + valid_state'_def sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def ct_not_inQ_ct + ct_idle_or_in_cur_domain'_def bitmapQ_defs valid_bitmaps_def cong: option.case_cong) done qed @@ -781,101 +843,20 @@ lemma setCurThread_invs: by (rule hoare_pre, rule setCurThread_invs_no_cicd') (simp add: invs'_to_invs_no_cicd'_def) -lemma valid_queues_not_runnable_not_queued: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" -proof (rule ccontr) - assume "\ obj_at' (Not \ tcbQueued) t s" - moreover from st have "typ_at' TCBT t s" - by (rule pred_tcb_at' [THEN tcb_at_typ_at' [THEN iffD1]]) - ultimately have "obj_at' tcbQueued t s" - by (clarsimp simp: not_obj_at' comp_def) - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbPriority] - obtain p where tp: "obj_at' (\tcb. tcbPriority tcb = p) t s" - by clarsimp - - moreover - from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbDomain] - obtain d where td: "obj_at' (\tcb. tcbDomain tcb = d) t s" - by clarsimp - - ultimately - have "t \ set (ksReadyQueues s (d, p))" using vq' - unfolding valid_queues'_def - apply - - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (drule_tac x=t in spec) - apply (erule impE) - apply (fastforce simp add: inQ_def obj_at'_def) - apply (assumption) - done - - with vq have "st_tcb_at' runnable' t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply - - apply clarsimp - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp simp add: st_tcb_at'_def) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (clarsimp) - done - - with st show False - apply - - apply (drule(1) pred_tcb_at_conj') - apply (clarsimp) - done -qed - -(* - * The idle thread is not part of any ready queues. - *) -lemma idle'_not_tcbQueued': - assumes vq: "Invariants_H.valid_queues s" - and vq': "valid_queues' s" - and idle: "valid_idle' s" - shows "obj_at' (Not \ tcbQueued) (ksIdleThread s) s" - proof - - from idle have stidle: "st_tcb_at' (Not \ runnable') (ksIdleThread s) s" - by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def projectKOs idle_tcb'_def) - - with vq vq' show ?thesis - by (rule valid_queues_not_runnable_not_queued) - qed - lemma setCurThread_invs_no_cicd'_idle_thread: - "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" -proof - - have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" - apply (simp add: ct_not_inQ_def o_def) - done - have idle'_activatable': "\ s t. st_tcb_at' idle' t s \ st_tcb_at' activatable' t s" - apply (clarsimp simp: st_tcb_at'_def o_def obj_at'_def) + "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\_. invs'\" + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def valid_idle'_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def bitmapQ_defs valid_bitmaps_def pred_tcb_at'_def + cong: option.case_cong) + apply (clarsimp simp: idle_tcb'_def ct_not_inQ_def ps_clear_def obj_at'_def st_tcb_at'_def + idleThreadNotQueued_def projectKOs) done - show ?thesis - apply (simp add: setCurThread_def) - apply wp - apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) - apply (frule (2) idle'_not_tcbQueued'[simplified o_def]) - apply (clarsimp simp add: ct_not_inQ_ct idle'_activatable' - invs'_def cur_tcb'_def valid_state'_def valid_idle'_def - sch_act_wf ct_in_state'_def state_refs_of'_def - ps_clear_def valid_irq_node'_def - ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def - valid_queues_def bitmapQ_defs valid_queues_no_bitmap_def valid_queues'_def - pred_tcb_at'_def - cong: option.case_cong) - apply (clarsimp simp: obj_at'_def projectKOs idle_tcb'_def) - done -qed lemma setCurThread_invs_idle_thread: "\invs' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" @@ -910,13 +891,13 @@ lemma Arch_switchToThread_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_not_tcbQueued: - "\ tcb_at' t \ tcbSchedDequeue t \ \_. obj_at' (\x. \ tcbQueued x) t \" + "\\\ tcbSchedDequeue t \\_. obj_at' (\x. \ tcbQueued x) t\" apply (simp add: tcbSchedDequeue_def) apply (wp|clarsimp)+ apply (rule_tac Q="\queued. obj_at' (\x. tcbQueued x = queued) t" in hoare_post_imp) - apply (clarsimp simp: obj_at'_def) - apply (wp threadGet_obj_at') - apply (simp) + apply (clarsimp simp: obj_at'_def) + apply (wpsimp wp: threadGet_wp)+ + apply (clarsimp simp: obj_at'_def) done lemma Arch_switchToThread_obj_at[wp]: @@ -938,10 +919,6 @@ crunch valid_irq_states'[wp]: asUser "valid_irq_states'" crunch valid_machine_state'[wp]: asUser "valid_machine_state'" (wp: crunch_wps simp: crunch_simps) -crunch valid_queues'[wp]: asUser "valid_queues'" -(wp: crunch_wps simp: crunch_simps) - - lemma asUser_valid_irq_node'[wp]: "\\s. valid_irq_node' (irq_node' s) s\ asUser t (setRegister f r) \\_ s. valid_irq_node' (irq_node' s) s\" @@ -964,14 +941,14 @@ lemma asUser_ct_not_inQ[wp]: "\ct_not_inQ\ asUser t (setRegister f r) \\_ . ct_not_inQ\" apply (clarsimp simp: submonad_asUser.fn_is_sm submonad_fn_def) - apply (rule hoare_seq_ext)+ + apply (rule bind_wp)+ prefer 4 apply (rule stateAssert_sp) prefer 3 apply (rule gets_inv) defer apply (rule select_f_inv) - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (clarsimp simp: projectKOs asUser_replace_def obj_at'_def fun_upd_def split: option.split kernel_object.split) apply wp @@ -1012,22 +989,17 @@ lemma Arch_switchToThread_invs_no_cicd': by (wp|rule setVMRoot_invs_no_cicd')+ lemma tcbSchedDequeue_invs_no_cicd'[wp]: - "\invs_no_cicd' and tcb_at' t\ - tcbSchedDequeue t - \\_. invs_no_cicd'\" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + "tcbSchedDequeue t \invs_no_cicd'\" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 - tcbSchedDequeue_valid_queues_weak untyped_ranges_zero_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp - apply (fastforce simp: valid_pspace'_def valid_queues_def - elim: valid_objs'_maxDomain valid_objs'_maxPriority intro: obj_at'_conjI) done lemma switchToThread_invs_no_cicd': - "\invs_no_cicd' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" + "\invs_no_cicd' and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def) apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued Arch_switchToThread_invs_no_cicd' Arch_switchToThread_pred_tcb') @@ -1035,7 +1007,7 @@ lemma switchToThread_invs_no_cicd': done lemma switchToThread_invs[wp]: - "\invs' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" + "\invs' and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" apply (simp add: Thread_H.switchToThread_def ) apply (wp threadSet_timeslice_invs setCurThread_invs Arch_switchToThread_invs dmo_invs' @@ -1078,8 +1050,7 @@ lemma dmo_cap_to'[wp]: lemma sct_cap_to'[wp]: "\ex_nonz_cap_to' p\ setCurThread t \\rv. ex_nonz_cap_to' p\" apply (simp add: setCurThread_def) - apply (wp ex_nonz_cap_to_pres') - apply (clarsimp elim!: cte_wp_at'_pspaceI)+ + apply (wpsimp wp: ex_nonz_cap_to_pres') done @@ -1111,61 +1082,6 @@ lemma obj_tcb_at': "obj_at' (\tcb::tcb. P tcb) t s \ tcb_at' t s" by (clarsimp simp: obj_at'_def) -lemma invs'_not_runnable_not_queued: - fixes s - assumes inv: "invs' s" - and st: "st_tcb_at' (Not \ runnable') t s" - shows "obj_at' (Not \ tcbQueued) t s" - apply (insert assms) - apply (rule valid_queues_not_runnable_not_queued) - apply (clarsimp simp add: invs'_def valid_state'_def)+ - done - -lemma valid_queues_not_tcbQueued_not_ksQ: - fixes s - assumes vq: "Invariants_H.valid_queues s" - and notq: "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" -proof (rule ccontr, simp , erule exE, erule exE) - fix d p - assume "t \ set (ksReadyQueues s (d, p))" - with vq have "obj_at' (inQ d p) t s" - unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def - apply clarify - apply (drule_tac x=d in spec) - apply (drule_tac x=p in spec) - apply (clarsimp) - apply (drule(1) bspec) - apply (erule obj_at'_weakenE) - apply (simp) - done - hence "obj_at' tcbQueued t s" - apply (rule obj_at'_weakenE) - apply (simp only: inQ_def) - done - with notq show "False" - by (clarsimp simp: obj_at'_def) -qed - -lemma not_tcbQueued_not_ksQ: - fixes s - assumes "invs' s" - and "obj_at' (Not \ tcbQueued) t s" - shows "\d p. t \ set (ksReadyQueues s (d, p))" - apply (insert assms) - apply (clarsimp simp add: invs'_def valid_state'_def) - apply (drule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (clarsimp) - done - -lemma ct_not_ksQ: - "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ - \ \p. ksCurThread s \ set (ksReadyQueues s p)" - apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) - apply (fastforce) - done - lemma setThreadState_rct: "\\s. (runnable' st \ ksCurThread s \ t) \ ksSchedulerAction s = ResumeCurrentThread\ @@ -1173,21 +1089,21 @@ lemma setThreadState_rct: \\_ s. ksSchedulerAction s = ResumeCurrentThread\" apply (simp add: setThreadState_def) apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) apply (clarsimp simp: when_def) - apply (case_tac x) + apply (case_tac rv) apply (clarsimp, wp)[1] apply (clarsimp) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF threadSet_ct threadSet_nosch]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ + apply (rule bind_wp [OF _ isRunnable_inv]) + apply (rule bind_wp [OF _ hoare_vcg_conj_lift [OF gct_wp gct_wp]]) apply (rename_tac ct) @@ -1237,21 +1153,24 @@ lemma bitmapQ_from_bitmap_lookup: done lemma lookupBitmapPriority_obj_at': - "\ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0; valid_queues_no_bitmap s; valid_bitmapQ s; - bitmapQ_no_L1_orphans s\ - \ obj_at' (inQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) and runnable' \ tcbState) - (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + "\ksReadyQueuesL1Bitmap s d \ 0; valid_bitmapQ s; bitmapQ_no_L1_orphans s; + ksReadyQueues_asrt s; ready_qs_runnable s; pspace_aligned' s; pspace_distinct' s\ + \ obj_at' (inQ d (lookupBitmapPriority d s) and runnable' \ tcbState) + (the (tcbQueueHead (ksReadyQueues s (d, lookupBitmapPriority d s)))) s" apply (drule (2) bitmapQ_from_bitmap_lookup) apply (simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)", simp) - apply (clarsimp, rename_tac t ts) - apply (drule cons_set_intro) - apply (drule (2) valid_queues_no_bitmap_objD) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def tcbQueueEmpty_def) + apply (drule_tac x=d in spec) + apply (drule_tac x="lookupBitmapPriority d s" in spec) + apply clarsimp + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (fastforce simp: obj_at'_and ready_qs_runnable_def obj_at'_def st_tcb_at'_def inQ_def + tcbQueueEmpty_def) done lemma bitmapL1_zero_ksReadyQueues: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ - \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. ksReadyQueues s (d,p) = [])" + \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. tcbQueueEmpty (ksReadyQueues s (d, p)))" apply (cases "ksReadyQueuesL1Bitmap s d = 0") apply (force simp add: bitmapQ_def valid_bitmapQ_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) @@ -1322,7 +1241,7 @@ lemma bitmapL1_highest_lookup: done lemma bitmapQ_ksReadyQueuesI: - "\ bitmapQ d p s ; valid_bitmapQ s \ \ ksReadyQueues s (d, p) \ []" + "\ bitmapQ d p s ; valid_bitmapQ s \ \ \ tcbQueueEmpty (ksReadyQueues s (d, p))" unfolding valid_bitmapQ_def by simp lemma getReadyQueuesL2Bitmap_inv[wp]: @@ -1331,24 +1250,22 @@ lemma getReadyQueuesL2Bitmap_inv[wp]: lemma switchToThread_lookupBitmapPriority_wp: "\\s. invs_no_cicd' s \ bitmapQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) s \ - t = hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)) \ + t = the (tcbQueueHead (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)))\ ThreadDecls_H.switchToThread t \\rv. invs'\" -proof - - have switchToThread_pre: - "\s p t.\ valid_queues s ; bitmapQ (ksCurDomain s) p s ; t = hd (ksReadyQueues s (ksCurDomain s,p)) \ - \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s" - unfolding valid_queues_def - apply (clarsimp dest!: bitmapQ_ksReadyQueuesI) - apply (case_tac "ksReadyQueues s (ksCurDomain s, p)", simp) - apply (rename_tac t ts) - apply (drule_tac t=t and p=p and d="ksCurDomain s" in valid_queues_no_bitmap_objD) - apply simp - apply (fastforce elim: obj_at'_weaken simp: inQ_def tcb_in_cur_domain'_def st_tcb_at'_def) - done - thus ?thesis - by (wp switchToThread_invs_no_cicd') (fastforce dest: invs_no_cicd'_queues) -qed + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd') + apply (auto elim!: pred_tcb'_weakenE) + apply (prop_tac "valid_bitmapQ s") + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_bitmaps_def) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def valid_bitmapQ_bitmapQ_simp) + apply (drule_tac x="ksCurDomain s" in spec) + apply (drule_tac x="lookupBitmapPriority (ksCurDomain s) s" in spec) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def) + apply (frule (3) obj_at'_tcbQueueHead_ksReadyQueues) + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) + done lemma switchToIdleThread_invs_no_cicd': "\invs_no_cicd'\ switchToIdleThread \\rv. invs'\" @@ -1360,7 +1277,7 @@ lemma switchToIdleThread_invs_no_cicd': crunch obj_at'[wp]: "Arch.switchToIdleThread" "\s. obj_at' P t s" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -1418,11 +1335,6 @@ lemma corres_assert_ret: apply (simp add: assert_def return_def fail_def) done -lemma corres_assert_assume_l: - "corres dc P Q (f ()) g - \ corres dc (P and (\s. P')) Q (assert P' >>= f) g" - by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) - lemma corres_assert_assume_r: "corres dc P Q f (g ()) \ corres dc P (Q and (\s. Q')) f (assert Q' >>= g)" @@ -1452,8 +1364,9 @@ lemma guarded_switch_to_corres: and valid_vspace_objs and pspace_aligned and pspace_distinct and valid_vs_lookup and valid_global_objs and unique_table_refs o caps_of_state - and st_tcb_at runnable t and valid_etcbs) - (valid_arch_state' and valid_pspace' and Invariants_H.valid_queues + and st_tcb_at runnable t and valid_etcbs + and valid_queues and valid_idle) + (valid_arch_state' and valid_pspace' and sym_heap_sched_pointers and st_tcb_at' runnable' t and cur_tcb') (guarded_switch_to t) (switchToThread t)" apply (simp add: guarded_switch_to_def) @@ -1463,8 +1376,8 @@ lemma guarded_switch_to_corres: apply (rule switchToThread_corres) apply (force simp: st_tcb_at_tcb_at) apply (wp gts_st_tcb_at) - apply (force simp: st_tcb_at_tcb_at)+ - done + apply (force simp: st_tcb_at_tcb_at)+ + done abbreviation "enumPrio \ [0.e.maxPriority]" @@ -1499,7 +1412,7 @@ lemma curDomain_corres: "corres (=) \ \ (gets cur_domain) (curDomain)" lemma curDomain_corres': "corres (=) \ (\s. ksCurDomain s \ maxDomain) - (gets cur_domain) (if 1 < numDomains then curDomain else return 0)" + (gets cur_domain) (if Suc 0 < numDomains then curDomain else return 0)" apply (case_tac "1 < numDomains"; simp) apply (rule corres_guard_imp[OF curDomain_corres]; solves simp) (* if we have only one domain, then we are in it *) @@ -1509,27 +1422,32 @@ lemma curDomain_corres': lemma lookupBitmapPriority_Max_eqI: "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d \ 0 \ - \ lookupBitmapPriority d s = (Max {prio. ksReadyQueues s (d, prio) \ []})" + \ lookupBitmapPriority d s = (Max {prio. \ tcbQueueEmpty (ksReadyQueues s (d, prio))})" apply (rule Max_eqI[simplified eq_commute]; simp) apply (fastforce simp: bitmapL1_highest_lookup valid_bitmapQ_bitmapQ_simp) apply (metis valid_bitmapQ_bitmapQ_simp bitmapQ_from_bitmap_lookup) done lemma corres_gets_queues_getReadyQueuesL1Bitmap: - "corres (\qs l1. ((l1 = 0) = (\p. qs p = []))) \ valid_queues + "corres (\qs l1. (l1 = 0) = (\p. qs p = [])) \ valid_bitmaps (gets (\s. ready_queues s d)) (getReadyQueuesL1Bitmap d)" - unfolding state_relation_def valid_queues_def getReadyQueuesL1Bitmap_def - by (clarsimp simp: bitmapL1_zero_ksReadyQueues ready_queues_relation_def) + unfolding state_relation_def valid_bitmaps_def getReadyQueuesL1Bitmap_def + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x=d in spec) + apply (fastforce simp: bitmapL1_zero_ksReadyQueues list_queue_relation_def tcbQueueEmpty_def) + done lemma guarded_switch_to_chooseThread_fragment_corres: "corres dc (P and st_tcb_at runnable t and invs and valid_sched) - (P' and st_tcb_at' runnable' t and invs_no_cicd') - (guarded_switch_to t) - (do runnable \ isRunnable t; - y \ assert runnable; - ThreadDecls_H.switchToThread t - od)" + (P' and invs_no_cicd') + (guarded_switch_to t) + (do runnable \ isRunnable t; + y \ assert runnable; + ThreadDecls_H.switchToThread t + od)" + apply (rule_tac Q'="st_tcb_at' runnable' t" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) unfolding guarded_switch_to_def isRunnable_def apply simp apply (rule corres_guard_imp) @@ -1544,35 +1462,50 @@ lemma guarded_switch_to_chooseThread_fragment_corres: simp: pred_tcb_at' runnable'_def all_invs_but_ct_idle_or_in_cur_domain'_def) done +lemma Max_prio_helper: + "ready_queues_relation s s' + \ Max {prio. ready_queues s d prio \ []} + = Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (d, prio))}" + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def tcbQueueEmpty_def) + apply (rule Max_eq_if) + apply fastforce + apply fastforce + apply (fastforce dest: heap_path_head) + apply clarsimp + apply (drule_tac x=d in spec) + apply (drule_tac x=b in spec) + apply force + done + lemma bitmap_lookup_queue_is_max_non_empty: - "\ valid_queues s'; (s, s') \ state_relation; invs s; + "\ valid_bitmaps s'; (s, s') \ state_relation; invs s; ksReadyQueuesL1Bitmap s' (ksCurDomain s') \ 0 \ - \ ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s') = - max_non_empty_queue (ready_queues s (cur_domain s))" - unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_queues_def - by (clarsimp simp add: max_non_empty_queue_def lookupBitmapPriority_Max_eqI - state_relation_def ready_queues_relation_def) + \ the (tcbQueueHead (ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s'))) + = hd (max_non_empty_queue (ready_queues s (cur_domain s)))" + apply (clarsimp simp: max_non_empty_queue_def valid_bitmaps_def lookupBitmapPriority_Max_eqI) + apply (frule curdomain_relation) + apply (drule state_relation_ready_queues_relation) + apply (simp add: Max_prio_helper) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (frule (2) bitmapL1_zero_ksReadyQueues[THEN arg_cong_Not, THEN iffD1]) + apply clarsimp + apply (cut_tac P="\x. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', x))" + in setcomp_Max_has_prop) + apply fastforce + apply (clarsimp simp: ready_queues_relation_def Let_def list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x="ksCurDomain s'" in spec) + apply (drule_tac x="Max {prio. \ tcbQueueEmpty (ksReadyQueues s' (ksCurDomain s', prio))}" + in spec) + using heap_path_head tcbQueueEmpty_def + by fastforce lemma ksReadyQueuesL1Bitmap_return_wp: "\\s. P (ksReadyQueuesL1Bitmap s d) s \ getReadyQueuesL1Bitmap d \\rv s. P rv s\" unfolding getReadyQueuesL1Bitmap_def by wp -lemma ksReadyQueuesL1Bitmap_st_tcb_at': - "\ ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0 ; valid_queues s \ - \ st_tcb_at' runnable' (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" - apply (drule bitmapQ_from_bitmap_lookup; clarsimp simp: valid_queues_def) - apply (clarsimp simp add: valid_bitmapQ_bitmapQ_simp) - apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)") - apply simp - apply (simp add: valid_queues_no_bitmap_def) - apply (erule_tac x="ksCurDomain s" in allE) - apply (erule_tac x="lookupBitmapPriority (ksCurDomain s) s" in allE) - apply (clarsimp simp: st_tcb_at'_def) - apply (erule obj_at'_weaken) - apply simp - done - lemma curDomain_or_return_0: "\ \P\ curDomain \\rv s. Q rv s \; \s. P s \ ksCurDomain s \ maxDomain \ \ \P\ if 1 < numDomains then curDomain else return 0 \\rv s. Q rv s \" @@ -1584,52 +1517,72 @@ lemma invs_no_cicd_ksCurDomain_maxDomain': "invs_no_cicd' s \ ksCurDomain s \ maxDomain" unfolding invs_no_cicd'_def by simp +crunches curDomain + for valid_bitmaps[wp]: valid_bitmaps + lemma chooseThread_corres: - "corres dc (invs and valid_sched) (invs_no_cicd') - choose_thread chooseThread" (is "corres _ ?PREI ?PREH _ _") + "corres dc (invs and valid_sched) invs_no_cicd' choose_thread chooseThread" + (is "corres _ ?PREI ?PREH _ _") proof - + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + show ?thesis - unfolding choose_thread_def chooseThread_def - apply (simp only: return_bind Let_def) - apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) - apply (rule corres_guard_imp) - apply (rule corres_split[OF curDomain_corres']) - apply clarsimp - apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) - apply (erule corres_if2[OF sym]) - apply (rule switchToIdleThread_corres) - apply (rule corres_symb_exec_r) - apply (rule corres_symb_exec_r) - apply (rule_tac - P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) \ - st_tcb_at runnable (hd (max_non_empty_queue queues)) s" and - P'="\s. (?PREH s \ st_tcb_at' runnable' (hd queue) s) \ - l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) \ - l1 \ 0 \ - queue = ksReadyQueues s (ksCurDomain s, - lookupBitmapPriority (ksCurDomain s) s)" and - F="hd queue = hd (max_non_empty_queue queues)" in corres_req) - apply (fastforce dest!: invs_no_cicd'_queues simp: bitmap_lookup_queue_is_max_non_empty) - apply clarsimp - apply (rule corres_guard_imp) - apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) - apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ - apply (clarsimp simp: if_apply_def2) - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) - apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ - apply (fastforce simp: invs_no_cicd'_def) - apply (clarsimp simp: valid_sched_def DetSchedInvs_AI.valid_queues_def max_non_empty_queue_def) - apply (erule_tac x="cur_domain s" in allE) - apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) - apply (case_tac "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []})") - apply (clarsimp) - apply (subgoal_tac - "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []}) \ []") - apply (fastforce elim!: setcomp_Max_has_prop)+ - apply (simp add: invs_no_cicd_ksCurDomain_maxDomain') - apply (clarsimp dest!: invs_no_cicd'_queues) - apply (fastforce intro: ksReadyQueuesL1Bitmap_st_tcb_at') - done + supply if_split[split del] + apply (clarsimp simp: choose_thread_def chooseThread_def) + apply add_ready_qs_runnable + apply (rule corres_stateAssert_add_assertion[rotated]) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_stateAssert_add_assertion[rotated]) + apply fastforce + apply (simp only: return_bind Let_def) + apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres']) + apply clarsimp + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (erule corres_if2[OF sym]) + apply (rule switchToIdleThread_corres) + apply (rule corres_symb_exec_r) + apply (rule corres_symb_exec_r) + apply (rule_tac P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) + \ st_tcb_at runnable (hd (max_non_empty_queue queues)) s" + and P'="\s. ?PREH s \ l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) + \ l1 \ 0 + \ queue = ksReadyQueues s (ksCurDomain s, + lookupBitmapPriority (ksCurDomain s) s)" + and F="the (tcbQueueHead queue) = hd (max_non_empty_queue queues)" + in corres_req) + apply (fastforce simp: bitmap_lookup_queue_is_max_non_empty + all_invs_but_ct_idle_or_in_cur_domain'_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) + apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) + apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ + apply (clarsimp simp: valid_sched_def max_non_empty_queue_def valid_queues_def split: if_splits) + apply (erule_tac x="cur_domain s" in allE) + apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) + apply (case_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio + \ []})") + apply (clarsimp) + apply (subgoal_tac "ready_queues s (cur_domain s) + (Max {prio. ready_queues s (cur_domain s) prio \ []}) + \ []") + apply fastforce + apply (fastforce elim!: setcomp_Max_has_prop) + apply fastforce + apply clarsimp + apply (frule invs_no_cicd_ksCurDomain_maxDomain') + apply (prop_tac "valid_bitmaps s") + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def) + apply (fastforce dest: one_domain_case split: if_splits) + done qed lemma thread_get_comm: "do x \ thread_get f p; y \ gets g; k x y od = @@ -1680,12 +1633,6 @@ lemma nextDomain_invs_no_cicd': all_invs_but_ct_idle_or_in_cur_domain'_def) done -lemma bind_dummy_ret_val: - "do y \ a; - b - od = do a; b od" - by simp - lemma scheduleChooseNewThread_fragment_corres: "corres dc (invs and valid_sched and (\s. scheduler_action s = choose_new_thread)) (invs' and (\s. ksSchedulerAction s = ChooseNewThread)) (do _ \ when (domainTime = 0) next_domain; @@ -1724,7 +1671,7 @@ lemma isHighestPrio_corres: assumes "d' = d" assumes "p' = p" shows - "corres ((=)) \ valid_queues + "corres ((=)) \ valid_bitmaps (gets (is_highest_prio d p)) (isHighestPrio d' p')" using assms @@ -1734,18 +1681,16 @@ lemma isHighestPrio_corres: apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) apply (rule corres_if_r'[where P'="\_. True",rotated]) apply (rule_tac corres_symb_exec_r) - apply (rule_tac - P="\s. q = ready_queues s d - " and - P'="\s. valid_queues s \ - l1 = ksReadyQueuesL1Bitmap s d \ - l1 \ 0 \ hprio = lookupBitmapPriority d s" and - F="hprio = Max {prio. q prio \ []}" in corres_req) - apply (elim conjE) - apply (clarsimp simp: valid_queues_def) - apply (subst lookupBitmapPriority_Max_eqI; blast?) - apply (fastforce simp: ready_queues_relation_def dest!: state_relationD) - apply fastforce + apply (rule_tac P="\s. q = ready_queues s d" + and P'="\s. valid_bitmaps s \ l1 = ksReadyQueuesL1Bitmap s d \ + l1 \ 0 \ hprio = lookupBitmapPriority d s" + and F="hprio = Max {prio. q prio \ []}" in corres_req) + apply (elim conjE) + apply (clarsimp simp: valid_bitmaps_def) + apply (subst lookupBitmapPriority_Max_eqI; blast?) + apply (fastforce dest: state_relation_ready_queues_relation Max_prio_helper[where d=d] + simp: tcbQueueEmpty_def) + apply fastforce apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imps ksReadyQueuesL1Bitmap_return_wp)+ done @@ -1757,9 +1702,8 @@ crunch inv[wp]: schedule_switch_thread_fastfail P crunch inv[wp]: scheduleSwitchThreadFastfail P lemma setSchedulerAction_invs': (* not in wp set, clobbered by ssa_wp *) - "\\s. invs' s \ setSchedulerAction ChooseNewThread \\_. invs' \" + "setSchedulerAction ChooseNewThread \invs' \" by (wpsimp simp: invs'_def cur_tcb'_def valid_state'_def valid_irq_node'_def ct_not_inQ_def - valid_queues_def valid_queues_no_bitmap_def valid_queues'_def ct_idle_or_in_cur_domain'_def) lemma scheduleChooseNewThread_corres: @@ -1791,6 +1735,51 @@ lemma ethread_get_when_corres: apply wpsimp+ done +lemma tcb_sched_enqueue_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_enqueue t \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_enqueue_def set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_append_in_correct_ready_q[wp]: + "tcb_sched_action tcb_sched_append tcb_ptr \in_correct_ready_q\ " + unfolding tcb_sched_action_def tcb_sched_append_def + apply wpsimp + apply (clarsimp simp: in_correct_ready_q_def obj_at_def etcb_at_def is_etcb_at_def + split: option.splits) + done + +lemma tcb_sched_enqueue_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_enqueue t \ready_qs_distinct\ " + unfolding tcb_sched_action_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma tcb_sched_append_ready_qs_distinct[wp]: + "tcb_sched_action tcb_sched_append t \ready_qs_distinct\ " + unfolding tcb_sched_action_def tcb_sched_append_def set_tcb_queue_def + apply (wpsimp wp: thread_get_wp') + apply (clarsimp simp: ready_qs_distinct_def etcb_at_def is_etcb_at_def split: option.splits) + done + +crunches set_scheduler_action + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps simp: in_correct_ready_q_def ready_qs_distinct_def) + +crunches reschedule_required + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps ignore_del: reschedule_required) + +lemma tcbSchedEnqueue_valid_pspace'[wp]: + "tcbSchedEnqueue tcbPtr \valid_pspace'\" + unfolding valid_pspace'_def + by wpsimp + lemma schedule_corres: "corres dc (invs and valid_sched and valid_list) invs' (Schedule_A.schedule) ThreadDecls_H.schedule" supply ethread_get_wp[wp del] @@ -1819,7 +1808,7 @@ lemma schedule_corres: apply (rule corres_split[OF thread_get_isRunnable_corres]) apply (rule corres_split) apply (rule corres_when, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule scheduleChooseNewThread_corres, simp) apply (wp thread_get_wp' tcbSchedEnqueue_invs' hoare_vcg_conj_lift hoare_drop_imps | clarsimp)+ @@ -1828,7 +1817,7 @@ lemma schedule_corres: rename_tac was_running wasRunning) apply (rule corres_split) apply (rule corres_when, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_split[OF getIdleThread_corres], rename_tac it it') apply (rule_tac F="was_running \ ct \ it" in corres_gen_asm) apply (rule corres_split) @@ -1844,7 +1833,7 @@ lemma schedule_corres: apply (rule corres_split[OF curDomain_corres]) apply (rule corres_split[OF isHighestPrio_corres]; simp only:) apply (rule corres_if, simp) - apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_split[OF tcbSchedEnqueue_corres], simp) apply (simp, fold dc_def) apply (rule corres_split) apply (rule setSchedulerAction_corres; simp) @@ -1858,7 +1847,7 @@ lemma schedule_corres: apply (wp tcb_sched_action_enqueue_valid_blocked hoare_vcg_all_lift enqueue_thread_queued) apply (wp tcbSchedEnqueue_invs'_not_ResumeCurrentThread) apply (rule corres_if, fastforce) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (simp, fold dc_def) apply (rule corres_split) apply (rule setSchedulerAction_corres; simp) @@ -1890,7 +1879,8 @@ lemma schedule_corres: in hoare_post_imp, fastforce) apply (wp add: tcb_sched_action_enqueue_valid_blocked_except tcbSchedEnqueue_invs'_not_ResumeCurrentThread thread_get_wp - del: gets_wp)+ + del: gets_wp + | strengthen valid_objs'_valid_tcbs')+ apply (clarsimp simp: conj_ac if_apply_def2 cong: imp_cong conj_cong del: hoare_gets) apply (wp gets_wp)+ @@ -1913,18 +1903,17 @@ lemma schedule_corres: weak_valid_sched_action_def tcb_at_is_etcb_at tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]] valid_blocked_except_def valid_blocked_def) - apply (clarsimp simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) + apply (fastforce simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) done (* choose new thread case *) apply (intro impI conjI allI tcb_at_invs | fastforce simp: invs_def cur_tcb_def valid_etcbs_def valid_sched_def st_tcb_at_def obj_at_def valid_state_def weak_valid_sched_action_def not_cur_thread_def)+ - apply (simp add: valid_sched_def valid_blocked_def valid_blocked_except_def) done (* haskell final subgoal *) - apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def + apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def valid_sched_def cong: imp_cong split: scheduler_action.splits) apply (fastforce simp: cur_tcb'_def valid_pspace'_def) done @@ -1938,11 +1927,8 @@ proof - apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def cur_tcb'_def - Invariants_H.valid_queues_def - state_refs_of'_def ps_clear_def - valid_irq_node'_def valid_queues'_def - tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def - bitmapQ_defs valid_queues_no_bitmap_def + state_refs_of'_def ps_clear_def valid_irq_node'_def + tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def bitmapQ_defs cong: option.case_cong) done qed @@ -1977,13 +1963,10 @@ lemma getDomainTime_wp[wp]: "\\s. P (ksDomainTime s) s \ by wp lemma switchToThread_ct_not_queued_2: - "\invs_no_cicd' and tcb_at' t\ switchToThread t \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" - (is "\_\ _ \\_. ?POST\") - apply (simp add: Thread_H.switchToThread_def) - apply (wp) - apply (simp add: X64_H.switchToThread_def setCurThread_def) - apply (wp tcbSchedDequeue_not_tcbQueued | simp )+ - done + "\invs_no_cicd' and tcb_at' t\ switchToThread t \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + unfolding Thread_H.switchToThread_def + by (wpsimp simp: X64_H.switchToThread_def setCurThread_def + wp: hoare_drop_imp tcbSchedDequeue_not_tcbQueued) lemma setCurThread_obj_at': "\ obj_at' P t \ setCurThread t \\rv s. obj_at' P (ksCurThread s) s \" @@ -1996,11 +1979,12 @@ proof - qed lemma switchToIdleThread_ct_not_queued_no_cicd': - "\ invs_no_cicd' \ switchToIdleThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" + "\invs_no_cicd'\ switchToIdleThread \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" apply (simp add: Thread_H.switchToIdleThread_def) apply (wp setCurThread_obj_at') - apply (rule idle'_not_tcbQueued') - apply (simp add: invs_no_cicd'_def)+ + apply (clarsimp simp: ready_qs_runnable_def) + apply (drule_tac x="ksIdleThread s" in spec) + apply (clarsimp simp: invs_no_cicd'_def valid_idle'_def st_tcb_at'_def idle_tcb'_def obj_at'_def) done lemma switchToIdleThread_activatable_2[wp]: @@ -2017,7 +2001,7 @@ lemma switchToThread_tcb_in_cur_domain': ThreadDecls_H.switchToThread thread \\y s. tcb_in_cur_domain' (ksCurThread s) s\" apply (simp add: Thread_H.switchToThread_def setCurThread_def) - apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued) + apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued hoare_drop_imps) done lemma chooseThread_invs_no_cicd'_posts: (* generic version *) @@ -2039,11 +2023,15 @@ proof - by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) @@ -2057,12 +2045,10 @@ proof - apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv switchToThread_ct_not_queued_2 assert_inv hoare_disjI2 switchToThread_tcb_in_cur_domain') - apply clarsimp - apply (clarsimp dest!: invs_no_cicd'_queues - simp: valid_queues_def lookupBitmapPriority_def[symmetric]) - apply (drule (3) lookupBitmapPriority_obj_at') - apply normalise_obj_at' - apply (fastforce simp: tcb_in_cur_domain'_def inQ_def elim: obj_at'_weaken) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done qed @@ -2101,19 +2087,26 @@ proof - (* FIXME this is almost identical to the chooseThread_invs_no_cicd'_posts proof, can generalise? *) show ?thesis - unfolding chooseThread_def Let_def curDomain_def + apply (clarsimp simp: chooseThread_def Let_def curDomain_def) + apply (rule bind_wp[OF _ stateAssert_sp])+ apply (simp only: return_bind, simp) - apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) - apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ - rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rule bind_wp[where Q'="\rv s. invs_no_cicd' s \ rv = ksCurDomain s + \ ksReadyQueues_asrt s \ ready_qs_runnable s"]) + apply (rule_tac Q'="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom \ + ksReadyQueues_asrt s \ ready_qs_runnable s" + in bind_wp) apply (rename_tac l1) apply (case_tac "l1 = 0") (* switch to idle thread *) - apply (simp, wp (once) switchToIdleThread_invs_no_cicd', simp) + apply (simp, wp switchToIdleThread_invs_no_cicd', simp) (* we have a thread to switch to *) apply (clarsimp simp: bitmap_fun_defs) apply (wp assert_inv) - apply (clarsimp dest!: invs_no_cicd'_queues simp: valid_queues_def) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_pspace'_def + valid_bitmaps_def) + apply (frule (6) lookupBitmapPriority_obj_at') + apply (clarsimp simp: tcb_in_cur_domain'_def obj_at'_def tcbQueueEmpty_def inQ_def) apply (fastforce elim: bitmapQ_from_bitmap_lookup simp: lookupBitmapPriority_def) apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ done @@ -2140,7 +2133,7 @@ lemma schedule_invs': "\invs'\ ThreadDecls_H.schedule \\rv. invs'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2150,7 +2143,7 @@ lemma schedule_invs': apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 - | wp hoare_disjI2[where Q="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] | strengthen invs'_invs_no_cicd @@ -2221,7 +2214,7 @@ lemma schedule_ct_activatable'[wp]: "\invs'\ ThreadDecls_H.schedule \\_. ct_in_state' activatable'\" supply ssa_wp[wp del] apply (simp add: schedule_def) - apply (rule_tac hoare_seq_ext, rename_tac t) + apply (rule_tac bind_wp, rename_tac t) apply (wp, wpc) \ \action = ResumeCurrentThread\ apply (wp)[1] @@ -2262,11 +2255,18 @@ lemma sbn_sch_act_sane: done lemma possibleSwitchTo_corres: - "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t) - (Invariants_H.valid_queues and valid_queues' and - (\s. weak_sch_act_wf (ksSchedulerAction s) s) and cur_tcb' and tcb_at' t and st_tcb_at' runnable' t and valid_objs') - (possible_switch_to t) - (possibleSwitchTo t)" + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t + and in_correct_ready_q and ready_qs_distinct and pspace_aligned and pspace_distinct) + ((\s. weak_sch_act_wf (ksSchedulerAction s) s) + and sym_heap_sched_pointers and valid_sched_pointers and valid_objs') + (possible_switch_to t) (possibleSwitchTo t)" + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule_tac Q'="tcb_at' t" in corres_cross_add_guard) + apply (fastforce dest!: st_tcb_at_tcb_at elim!: tcb_at_cross) supply ethread_get_wp[wp del] apply (simp add: possible_switch_to_def possibleSwitchTo_def cong: if_cong) apply (rule corres_guard_imp) @@ -2276,12 +2276,12 @@ lemma possibleSwitchTo_corres: apply (clarsimp simp: etcb_relation_def) apply (rule corres_split[OF getSchedulerAction_corres]) apply (rule corres_if, simp) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_if, simp) apply (case_tac action; simp) apply (rule corres_split[OF rescheduleRequired_corres]) - apply (rule tcbSchedEnqueue_corres) - apply (wp rescheduleRequired_valid_queues'_weak)+ + apply (rule tcbSchedEnqueue_corres, simp) + apply (wp reschedule_required_valid_queues | strengthen valid_objs'_valid_tcbs')+ apply (rule setSchedulerAction_corres, simp) apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imp[where f="ethread_get a b" for a b])+ @@ -2290,7 +2290,7 @@ lemma possibleSwitchTo_corres: apply (fastforce simp: valid_sched_def invs_def valid_state_def cur_tcb_def valid_sched_action_def weak_valid_sched_action_def tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]]) - apply (simp add: tcb_at_is_etcb_at) + apply fastforce done end diff --git a/proof/refine/X64/StateRelation.thy b/proof/refine/X64/StateRelation.thy index 88f7b61666..f8b3c00bd3 100644 --- a/proof/refine/X64/StateRelation.thy +++ b/proof/refine/X64/StateRelation.thy @@ -200,17 +200,24 @@ where \ tcb_bound_notification tcb = tcbBoundNotification tcb' \ tcb_mcpriority tcb = tcbMCP tcb'" + +\ \ + A pair of objects @{term "(obj, obj')"} should satisfy the following relation when, under further + mild assumptions, a @{term corres_underlying} lemma for @{term "set_object obj"} + and @{term "setObject obj'"} can be stated: see setObject_other_corres in KHeap_R. + + TCBs do not satisfy this relation because the tcbSchedPrev and tcbSchedNext fields of a TCB are + used to model the ready queues, and so an update to such a field would correspond to an update + to a ready queue (see ready_queues_relation below).\ definition other_obj_relation :: "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" where "other_obj_relation obj obj' \ - (case (obj, obj') of - (TCB tcb, KOTCB tcb') \ tcb_relation tcb tcb' - | (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' + case (obj, obj') of + (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' | (Notification ntfn, KONotification ntfn') \ ntfn_relation ntfn ntfn' - | (ArchObj (X64_A.ASIDPool pool), KOArch (KOASIDPool pool')) - \ asid_pool_relation pool pool' - | _ \ False)" + | (ArchObj (X64_A.ASIDPool pool), KOArch (KOASIDPool pool')) \ asid_pool_relation pool pool' + | _ \ False" primrec pml4e_relation' :: "X64_A.pml4e \ X64_H.pml4e \ bool" @@ -290,6 +297,12 @@ where | "aobj_relation_cuts (PageMapL4 pm) x = (\y. (x + (ucast y << word_size_bits), pml4e_relation y)) ` UNIV" +definition tcb_relation_cut :: "Structures_A.kernel_object \ kernel_object \ bool" where + "tcb_relation_cut obj obj' \ + case (obj, obj') of + (TCB t, KOTCB t') \ tcb_relation t t' + | _ \ False" + primrec obj_relation_cuts :: "Structures_A.kernel_object \ machine_word \ obj_relation_cuts" where @@ -297,17 +310,17 @@ where (if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)})" -| "obj_relation_cuts (TCB tcb) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (TCB tcb) x = {(x, tcb_relation_cut)}" | "obj_relation_cuts (Endpoint ep) x = {(x, other_obj_relation)}" | "obj_relation_cuts (Notification ntfn) x = {(x, other_obj_relation)}" | "obj_relation_cuts (ArchObj ao) x = aobj_relation_cuts ao x" - lemma obj_relation_cuts_def2: "obj_relation_cuts ko x = (case ko of CNode sz cs \ if well_formed_cnode_n sz cs then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} else {(x, \\)} + | TCB tcb \ {(x, tcb_relation_cut)} | ArchObj (PageTable pt) \ (\y. (x + (ucast y << word_size_bits), pte_relation y)) ` (UNIV :: 9 word set) | ArchObj (PageDirectory pd) \ (\y. (x + (ucast y << word_size_bits), pde_relation y)) @@ -326,6 +339,7 @@ lemma obj_relation_cuts_def3: "obj_relation_cuts ko x = (case (a_type ko) of ACapTable n \ {(cte_map (x, y), cte_relation y) | y. length y = n} + | ATCB \ {(x, tcb_relation_cut)} | AArch APageTable \ (\y. (x + (ucast y << word_size_bits), pte_relation y)) ` (UNIV :: 9 word set) | AArch APageDirectory \ (\y. (x + (ucast y << word_size_bits), pde_relation y)) @@ -345,22 +359,27 @@ lemma obj_relation_cuts_def3: done definition - "is_other_obj_relation_type tp \ - case tp of - ACapTable n \ False - | AArch APageTable \ False - | AArch APageDirectory \ False - | AArch APDPointerTable \ False - | AArch APageMapL4 \ False - | AArch (AUserData _) \ False - | AArch (ADeviceData _) \ False - | AGarbage _ \ False - | _ \ True" + "is_other_obj_relation_type tp \ + case tp of + ACapTable n \ False + | ATCB \ False + | AArch APageTable \ False + | AArch APageDirectory \ False + | AArch APDPointerTable \ False + | AArch APageMapL4 \ False + | AArch (AUserData _) \ False + | AArch (ADeviceData _) \ False + | AGarbage _ \ False + | _ \ True" lemma is_other_obj_relation_type_CapTable: "\ is_other_obj_relation_type (ACapTable n)" by (simp add: is_other_obj_relation_type_def) +lemma is_other_obj_relation_type_TCB: + "\ is_other_obj_relation_type ATCB" + by (simp add: is_other_obj_relation_type_def) + lemma is_other_obj_relation_type_UserData: "\ is_other_obj_relation_type (AArch (AUserData sz))" unfolding is_other_obj_relation_type_def by simp @@ -408,11 +427,55 @@ where "sched_act_relation choose_new_thread a' = (a' = ChooseNewThread)" | "sched_act_relation (switch_thread x) a' = (a' = SwitchToThread x)" -definition - ready_queues_relation :: "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) - \ (domain \ priority \ KernelStateData_H.ready_queue) \ bool" -where - "ready_queues_relation qs qs' \ \d p. (qs d p = qs' (d, p))" +definition queue_end_valid :: "obj_ref list \ tcb_queue \ bool" where + "queue_end_valid ts q \ + (ts = [] \ tcbQueueEnd q = None) \ (ts \ [] \ tcbQueueEnd q = Some (last ts))" + +definition prev_queue_head :: "tcb_queue \ (obj_ref \ 'a) \ bool" where + "prev_queue_head q prevs \ \head. tcbQueueHead q = Some head \ prevs head = None" + +lemma prev_queue_head_heap_upd: + "\prev_queue_head q prevs; Some r \ tcbQueueHead q\ \ prev_queue_head q (prevs(r := x))" + by (clarsimp simp: prev_queue_head_def) + +definition list_queue_relation :: + "obj_ref list \ tcb_queue \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) \ bool" + where + "list_queue_relation ts q nexts prevs \ + heap_ls nexts (tcbQueueHead q) ts \ queue_end_valid ts q \ prev_queue_head q prevs" + +lemma list_queue_relation_nil: + "list_queue_relation ts q nexts prevs \ ts = [] \ tcbQueueEmpty q" + by (fastforce dest: heap_path_head simp: tcbQueueEmpty_def list_queue_relation_def) + +definition ready_queue_relation :: + "Deterministic_A.domain \ Structures_A.priority + \ Deterministic_A.ready_queue \ ready_queue + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (obj_ref \ bool) \ bool" + where + "ready_queue_relation d p q q' nexts prevs flag \ + list_queue_relation q q' nexts prevs + \ (\t. flag t \ t \ set q) + \ (d > maxDomain \ p > maxPriority \ tcbQueueEmpty q')" + +definition ready_queues_relation_2 :: + "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) + \ (domain \ priority \ ready_queue) + \ (obj_ref \ obj_ref) \ (obj_ref \ obj_ref) + \ (domain \ priority \ obj_ref \ bool) \ bool" + where + "ready_queues_relation_2 qs qs' nexts prevs inQs \ + \d p. let q = qs d p; q' = qs' (d, p); flag = inQs d p in + ready_queue_relation d p q q' nexts prevs flag" + +abbreviation ready_queues_relation :: "det_state \ kernel_state \ bool" where + "ready_queues_relation s s' \ + ready_queues_relation_2 + (ready_queues s) (ksReadyQueues s') (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (\d p. inQ d p |< tcbs_of' s')" + +lemmas ready_queues_relation_def = ready_queues_relation_2_def definition ghost_relation :: "Structures_A.kheap \ (machine_word \ vmpage_size) \ (machine_word \ nat) \ bool" @@ -493,6 +556,7 @@ where \ x64_kernel_vspace s = x64KSKernelVSpace s' \ x64_allocated_io_ports s = x64KSAllocatedIOPorts s' \ x64_num_ioapics s = x64KSNumIOAPICs s' + \ x64_ioapic_nirqs s = x64KSIOAPICnIRQs s' \ x64_irq_relation (x64_irq_state s) (x64KSIRQState s')}" definition @@ -507,6 +571,8 @@ lemma obj_relation_cutsE: \sz cs z cap cte. \ ko = CNode sz cs; well_formed_cnode_n sz cs; y = cte_map (x, z); ko' = KOCTE cte; cs z = Some cap; cap_relation cap (cteCap cte) \ \ R; + \tcb tcb'. \ y = x; ko = TCB tcb; ko' = KOTCB tcb'; tcb_relation tcb tcb' \ + \ R; \pt (z :: 9 word) pte'. \ ko = ArchObj (PageTable pt); y = x + (ucast z << word_size_bits); ko' = KOArch (KOPTE pte'); pte_relation' (pt z) pte' \ \ R; @@ -524,8 +590,8 @@ lemma obj_relation_cutsE: \ y = x; other_obj_relation ko ko'; is_other_obj_relation_type (a_type ko) \ \ R \ \ R" apply (simp add: obj_relation_cuts_def2 is_other_obj_relation_type_def - a_type_def - split: Structures_A.kernel_object.split_asm if_split_asm + a_type_def tcb_relation_cut_def + split: Structures_A.kernel_object.split_asm if_split_asm kernel_object.split_asm X64_A.arch_kernel_obj.split_asm) apply ((clarsimp split: if_splits, force simp: cte_relation_def pte_relation_def pde_relation_def @@ -606,7 +672,7 @@ where pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') - \ ready_queues_relation (ready_queues s) (ksReadyQueues s') + \ ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') @@ -628,6 +694,10 @@ lemma curthread_relation: "(a, b) \ state_relation \ ksCurThread b = cur_thread a" by (simp add: state_relation_def) +lemma curdomain_relation[elim!]: + "(s, s') \ state_relation \ cur_domain s = ksCurDomain s'" + by (clarsimp simp: state_relation_def) + lemma state_relation_pspace_relation[elim!]: "(s,s') \ state_relation \ pspace_relation (kheap s) (ksPSpace s')" by (simp add: state_relation_def) @@ -636,12 +706,24 @@ lemma state_relation_ekheap_relation[elim!]: "(s,s') \ state_relation \ ekheap_relation (ekheap s) (ksPSpace s')" by (simp add: state_relation_def) +lemma state_relation_sched_act_relation[elim!]: + "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" + by (clarsimp simp: state_relation_def) + +lemma state_relation_ready_queues_relation[elim!]: + "(s, s') \ state_relation \ ready_queues_relation s s'" + by (simp add: state_relation_def) + +lemma state_relation_idle_thread[elim!]: + "(s, s') \ state_relation \ idle_thread s = ksIdleThread s'" + by (clarsimp simp: state_relation_def) + lemma state_relationD: assumes sr: "(s, s') \ state_relation" shows "pspace_relation (kheap s) (ksPSpace s') \ ekheap_relation (ekheap s) (ksPSpace s') \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') \ - ready_queues_relation (ready_queues s) (ksReadyQueues s') \ + ready_queues_relation s s' \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') \ @@ -663,7 +745,7 @@ lemma state_relationE [elim?]: and rl: "\pspace_relation (kheap s) (ksPSpace s'); ekheap_relation (ekheap s) (ksPSpace s'); sched_act_relation (scheduler_action s) (ksSchedulerAction s'); - ready_queues_relation (ready_queues s) (ksReadyQueues s'); + ready_queues_relation s s'; ghost_relation (kheap s) (gsUserPages s') (gsCNodes s'); cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s'); diff --git a/proof/refine/X64/Syscall_R.thy b/proof/refine/X64/Syscall_R.thy index 844fea8b6b..fdc856dc79 100644 --- a/proof/refine/X64/Syscall_R.thy +++ b/proof/refine/X64/Syscall_R.thy @@ -337,7 +337,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -350,16 +350,14 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: lemma setDomain_corres: "corres dc - (valid_etcbs and valid_sched and tcb_at tptr) - (invs' and sch_act_simple - and tcb_at' tptr and (\s. new_dom \ maxDomain)) - (set_domain tptr new_dom) - (setDomain tptr new_dom)" + (valid_etcbs and valid_sched and pspace_aligned and pspace_distinct and tcb_at tptr) + (invs' and sch_act_simple and tcb_at' tptr and (\s. new_dom \ maxDomain)) + (set_domain tptr new_dom) (setDomain tptr new_dom)" apply (rule corres_gen_asm2) apply (simp add: set_domain_def setDomain_def thread_set_domain_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split) apply (rule ethread_set_corres; simp) apply (clarsimp simp: etcb_relation_def) @@ -368,26 +366,38 @@ lemma setDomain_corres: apply (rule corres_split) apply clarsimp apply (rule corres_when[OF refl]) - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply (rule corres_when[OF refl]) apply (rule rescheduleRequired_corres) - apply ((wp hoare_drop_imps hoare_vcg_conj_lift | clarsimp| assumption)+)[5] - apply clarsimp - apply (rule_tac Q="\_. valid_objs' and valid_queues' and valid_queues and - (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" - in hoare_strengthen_post[rotated]) - apply (auto simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def)[1] - apply (wp threadSet_valid_objs' threadSet_valid_queues'_no_state - threadSet_valid_queues_no_state - threadSet_pred_tcb_no_state | simp)+ - apply (rule_tac Q = "\r s. invs' s \ (\p. tptr \ set (ksReadyQueues s p)) \ sch_act_simple s - \ tcb_at' tptr s" in hoare_strengthen_post[rotated]) - apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) - apply (clarsimp simp:valid_tcb'_def) - apply (drule(1) bspec) - apply (clarsimp simp:tcb_cte_cases_def) + apply (wpsimp wp: hoare_drop_imps) + apply ((wpsimp wp: hoare_drop_imps | strengthen valid_objs'_valid_tcbs')+)[1] + apply (wpsimp wp: gts_wp) + apply wpsimp + apply ((wpsimp wp: hoare_vcg_imp_lift' ethread_set_not_queued_valid_queues hoare_vcg_all_lift + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+)[1] + apply (rule_tac Q="\_. valid_objs' and sym_heap_sched_pointers and valid_sched_pointers + and pspace_aligned' and pspace_distinct' + and (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" + in hoare_strengthen_post[rotated]) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def) + apply (wpsimp wp: threadSet_valid_objs' threadSet_sched_pointers + threadSet_valid_sched_pointers)+ + apply (rule_tac Q="\_ s. valid_queues s \ not_queued tptr s + \ pspace_aligned s \ pspace_distinct s \ valid_etcbs s + \ weak_valid_sched_action s" + in hoare_post_imp) + apply (fastforce simp: pred_tcb_at_def obj_at_def) + apply (wpsimp wp: tcb_dequeue_not_queued) + apply (rule_tac Q = "\_ s. invs' s \ obj_at' (Not \ tcbQueued) tptr s \ sch_act_simple s + \ tcb_at' tptr s" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) + apply (clarsimp simp: valid_tcb'_def obj_at'_def) + apply (drule (1) bspec) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def) apply fastforce - apply (wp hoare_vcg_all_lift Tcb_R.tcbSchedDequeue_not_in_queue)+ + apply (wp hoare_vcg_all_lift tcbSchedDequeue_not_queued)+ apply clarsimp apply (frule tcb_at_is_etcb_at) apply simp+ @@ -395,12 +405,11 @@ lemma setDomain_corres: simp: valid_sched_def valid_sched_action_def) done - lemma performInvocation_corres: "\ inv_relation i i'; call \ block \ \ corres (dc \ (=)) (einvs and valid_invocation i - and simple_sched_action + and schact_is_rct and ct_active and (\s. (\w w2 b c. i = Invocations_A.InvokeEndpoint w w2 b c) \ st_tcb_at simple (cur_thread s) s)) (invs' and sch_act_simple and valid_invocation' i' and ct_active') @@ -450,14 +459,14 @@ lemma performInvocation_corres: apply (clarsimp simp: liftME_def) apply (rule corres_guard_imp) apply (erule invokeTCB_corres) - apply (simp)+ + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] \ \domain cap\ apply (clarsimp simp: invoke_domain_def) apply (rule corres_guard_imp) apply (rule corres_split[OF setDomain_corres]) apply (rule corres_trivial, simp) apply (wp)+ - apply (clarsimp+)[2] + apply (fastforce+)[2] \ \CNodes\ apply clarsimp apply (rule corres_guard_imp) @@ -465,7 +474,7 @@ lemma performInvocation_corres: apply assumption apply (rule corres_trivial, simp add: returnOk_def) apply wp+ - apply (clarsimp+)[2] + apply ((clarsimp dest!: schact_is_rct_simple)+)[2] apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) apply (rule corres_guard_imp, rule performIRQControl_corres, simp+) apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) @@ -688,7 +697,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -766,90 +775,71 @@ lemma doReply_invs[wp]: "\tcb_at' t and tcb_at' t' and cte_wp_at' (\cte. \grant. cteCap cte = ReplyCap t False grant) slot and invs' and sch_act_simple\ - doReplyTransfer t' t slot grant - \\rv. invs'\" + doReplyTransfer t' t slot grant + \\_. invs'\" apply (simp add: doReplyTransfer_def liftM_def) - apply (rule hoare_seq_ext [OF _ gts_sp']) - apply (rule hoare_seq_ext [OF _ assert_sp]) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ gts_sp']) + apply (rule bind_wp [OF _ assert_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (wp, wpc) - apply (wp) + apply wp apply (wp (once) sts_invs_minor'') - apply (simp) + apply simp apply (wp (once) sts_st_tcb') - apply (wp)[1] - apply (rule_tac Q="\rv s. invs' s - \ t \ ksIdleThread s - \ st_tcb_at' awaiting_reply' t s" + apply wp + apply (rule_tac Q="\_ s. invs' s \ t \ ksIdleThread s \ st_tcb_at' awaiting_reply' t s" in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) apply (wp cteDeleteOne_reply_pred_tcb_at)+ - apply (clarsimp) + apply clarsimp apply (rule_tac Q="\_. (\s. t \ ksIdleThread s) - and cte_wp_at' (\cte. \grant. cteCap cte = capability.ReplyCap t False grant) slot" - in hoare_strengthen_post [rotated]) + and cte_wp_at' (\cte. \grant. cteCap cte + = capability.ReplyCap t False grant) slot" + in hoare_strengthen_post [rotated]) apply (fastforce simp: cte_wp_at'_def) - apply (wp) + apply wp apply (rule hoare_strengthen_post [OF doIPCTransfer_non_null_cte_wp_at']) apply (erule conjE) apply assumption apply (erule cte_wp_at_weakenE') apply (fastforce) - apply (wp sts_invs_minor'' sts_st_tcb' static_imp_wp) - apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s - \ st_tcb_at' awaiting_reply' t s - \ t \ ksIdleThread s" - in hoare_post_imp) - apply (clarsimp) - apply (frule_tac t=t in invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) + apply (rule_tac Q="\_ s. invs' s \ sch_act_simple s + \ st_tcb_at' awaiting_reply' t s + \ t \ ksIdleThread s" + in hoare_post_imp) + apply clarsimp apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) - apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) - apply (clarsimp | drule(1) obj_at_conj')+ apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) apply (drule(1) pred_tcb_at_conj') apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") - apply (clarsimp) + apply clarsimp apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" - in pred_tcb'_weakenE) + in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) - apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 static_imp_wp + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp | clarsimp simp add: inQ_def)+ apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and st_tcb_at' awaiting_reply' t" in hoare_strengthen_post [rotated]) - apply (clarsimp) + apply clarsimp apply (rule conjI) - apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) - apply (rule conjI) - apply clarsimp - apply (clarsimp simp: obj_at'_def idle_tcb'_def pred_tcb_at'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def + idle_tcb'_def pred_tcb_at'_def) apply clarsimp apply (rule conjI) apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) apply (erule pred_tcb'_weakenE, clarsimp) - apply (rule conjI) apply (clarsimp simp : invs'_def valid_state'_def valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) - apply (rule conjI) - apply clarsimp - apply (frule invs'_not_runnable_not_queued) - apply (erule pred_tcb'_weakenE, clarsimp) - apply (frule (1) not_tcbQueued_not_ksQ) - apply simp - apply clarsimp apply (wp cteDeleteOne_reply_pred_tcb_at hoare_drop_imp hoare_allI)+ apply (clarsimp simp add: isReply_awaiting_reply' cte_wp_at_ctes_of) apply (auto dest!: st_tcb_idle'[rotated] simp:isCap_simps) @@ -859,35 +849,9 @@ lemma ct_active_runnable' [simp]: "ct_active' s \ ct_in_state' runnable' s" by (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) -lemma valid_irq_node_tcbSchedEnqueue[wp]: - "\\s. valid_irq_node' (irq_node' s) s \ tcbSchedEnqueue ptr - \\rv s'. valid_irq_node' (irq_node' s') s'\" - apply (rule hoare_pre) - apply (simp add:valid_irq_node'_def ) - apply (wp hoare_unless_wp hoare_vcg_all_lift | wps)+ - apply (simp add:tcbSchedEnqueue_def) - apply (wp hoare_unless_wp| simp)+ - apply (simp add:valid_irq_node'_def) - done - -lemma rescheduleRequired_valid_queues_but_ct_domain: - "\\s. Invariants_H.valid_queues s \ valid_objs' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) \ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - done - -lemma rescheduleRequired_valid_queues'_but_ct_domain: - "\\s. valid_queues' s - \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) - \ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def)+ - done +crunches tcbSchedEnqueue + for valid_irq_node[wp]: "\s. valid_irq_node' (irq_node' s) s" + (rule: valid_irq_node_lift) lemma tcbSchedEnqueue_valid_action: "\\s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\ @@ -898,9 +862,10 @@ lemma tcbSchedEnqueue_valid_action: done abbreviation (input) "all_invs_but_sch_extra \ - \s. valid_pspace' s \ Invariants_H.valid_queues s \ + \s. valid_pspace' s \ sym_refs (state_refs_of' s) \ if_live_then_nonz_cap' s \ + sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_bitmaps s \ if_unsafe_then_cap' s \ valid_idle' s \ valid_global_refs' s \ @@ -913,35 +878,27 @@ abbreviation (input) "all_invs_but_sch_extra \ valid_machine_state' s \ cur_tcb' s \ untyped_ranges_zero' s \ - valid_queues' s \ pspace_domain_valid s \ + pspace_domain_valid s \ ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s)" lemma rescheduleRequired_all_invs_but_extra: - "\\s. all_invs_but_sch_extra s\ - rescheduleRequired \\_. invs'\" + "\all_invs_but_sch_extra\ rescheduleRequired \\_. invs'\" apply (simp add: invs'_def valid_state'_def) - apply (rule hoare_pre) - apply (wp add:rescheduleRequired_ct_not_inQ - rescheduleRequired_sch_act' - rescheduleRequired_valid_queues_but_ct_domain - rescheduleRequired_valid_queues'_but_ct_domain - valid_irq_node_lift valid_irq_handlers_lift'' valid_ioports_lift'' - irqs_masked_lift cur_tcb_lift) + apply (wpsimp wp: rescheduleRequired_ct_not_inQ rescheduleRequired_sch_act' + valid_irq_node_lift valid_irq_handlers_lift'') apply auto done lemma threadSet_all_invs_but_sch_extra: - shows "\ tcb_at' t and (\s. (\p. t \ set (ksReadyQueues s p))) and - all_invs_but_sch_extra and sch_act_simple and + shows "\ tcb_at' t and all_invs_but_sch_extra and sch_act_simple and K (ds \ maxDomain) \ threadSet (tcbDomain_update (\_. ds)) t \\rv. all_invs_but_sch_extra \" apply (rule hoare_gen_asm) - apply (rule hoare_pre) apply (wp threadSet_valid_pspace'T_P[where P = False and Q = \ and Q' = \]) - apply (simp add:tcb_cte_cases_def)+ + apply (simp add:tcb_cte_cases_def)+ apply (wp threadSet_valid_pspace'T_P threadSet_state_refs_of'T_P[where f'=id and P'=False and Q=\ and g'=id and Q'=\] @@ -954,18 +911,14 @@ lemma threadSet_all_invs_but_sch_extra: valid_ioports_lift'' threadSet_ctes_ofT threadSet_not_inQ - threadSet_valid_queues'_no_state threadSet_tcbDomain_update_ct_idle_or_in_cur_domain' - threadSet_valid_queues threadSet_valid_dom_schedule' threadSet_iflive'T threadSet_ifunsafe'T - untyped_ranges_zero_lift + untyped_ranges_zero_lift threadSet_sched_pointers threadSet_valid_sched_pointers | simp add:tcb_cte_cases_def cteCaps_of_def o_def)+ apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift threadSet_pred_tcb_no_state | simp)+ - apply (clarsimp simp:sch_act_simple_def o_def cteCaps_of_def) - apply (intro conjI) - apply fastforce+ + apply (fastforce simp: sch_act_simple_def o_def cteCaps_of_def) done lemma threadSet_not_curthread_ct_domain: @@ -982,21 +935,19 @@ lemma setDomain_invs': (\y. domain \ maxDomain))\ setDomain ptr domain \\y. invs'\" apply (simp add:setDomain_def ) - apply (wp add: hoare_when_wp static_imp_wp static_imp_conj_wp rescheduleRequired_all_invs_but_extra + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" in hoare_strengthen_post[rotated]) apply (clarsimp simp:invs'_def valid_state'_def st_tcb_at'_def[symmetric] valid_pspace'_def) - apply (erule st_tcb_ex_cap'') apply simp - apply (case_tac st,simp_all)[1] apply (rule hoare_strengthen_post[OF hoare_vcg_conj_lift]) apply (rule threadSet_all_invs_but_sch_extra) prefer 2 apply clarsimp apply assumption - apply (wp static_imp_wp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain threadSet_tcbDomain_update_ct_not_inQ | simp)+ apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s \ domain \ maxDomain @@ -1008,17 +959,14 @@ lemma setDomain_invs': done lemma performInv_invs'[wp]: - "\invs' and sch_act_simple - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) - and ct_active' and valid_invocation' i\ - RetypeDecls_H.performInvocation block call i \\rv. invs'\" + "\invs' and sch_act_simple and ct_active' and valid_invocation' i\ + RetypeDecls_H.performInvocation block call i + \\_. invs'\" unfolding performInvocation_def apply (cases i) - apply ((clarsimp simp: simple_sane_strg sch_act_simple_def - ct_not_ksQ sch_act_sane_def - | wp tcbinv_invs' arch_performInvocation_invs' - setDomain_invs' - | rule conjI | erule active_ex_cap')+) + apply (clarsimp simp: simple_sane_strg sch_act_simple_def sch_act_sane_def + | wp tcbinv_invs' arch_performInvocation_invs' setDomain_invs' + | rule conjI | erule active_ex_cap')+ done lemma getSlotCap_to_refs[wp]: @@ -1100,7 +1048,7 @@ lemma lookupExtras_real_ctes[wp]: lemma lookupExtras_ctes[wp]: "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. cte_at' (snd x) s\,-" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule lookupExtras_real_ctes) apply (simp add: real_cte_at') done @@ -1182,11 +1130,14 @@ done lemmas set_thread_state_active_valid_sched = set_thread_state_runnable_valid_sched[simplified runnable_eq_active] -(*FIXME: move to NonDetMonadVCG.valid_validE_R *) +crunches reply_from_kernel + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + lemma handleInvocation_corres: "c \ b \ corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_invocation c b) @@ -1229,28 +1180,28 @@ lemma handleInvocation_corres: apply simp apply (simp add: when_def) apply (rule conjI, rule impI) - apply (rule reply_from_kernel_tcb_at) - apply (rule impI, wp+) - apply simp+ - apply (wp hoare_drop_imps)+ - apply simp - apply wp - apply simp - apply (rule_tac Q="\rv. einvs and simple_sched_action and valid_invocation rve + apply (wp reply_from_kernel_tcb_at) + apply wpsimp + apply wp+ + apply simp + apply (strengthen invs_psp_aligned invs_distinct) + apply (simp cong: conj_cong) + apply (simp cong: rev_conj_cong) + apply (wpsimp wp: hoare_drop_imps)+ + apply (rule_tac Q="\rv. einvs and schact_is_rct and valid_invocation rve and (\s. thread = cur_thread s) and st_tcb_at active thread" in hoare_post_imp) apply (clarsimp simp: simple_from_active ct_in_state_def elim!: st_tcb_weakenE) - apply (wp sts_st_tcb_at' set_thread_state_simple_sched_action - set_thread_state_active_valid_sched) + apply (wp sts_st_tcb_at' set_thread_state_schact_is_rct + set_thread_state_active_valid_sched) apply (rule_tac Q="\rv. invs' and valid_invocation' rve' and (\s. thread = ksCurThread s) and st_tcb_at' active' thread and (\s. ksSchedulerAction s = ResumeCurrentThread)" in hoare_post_imp) apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (clarsimp) apply (wp setThreadState_nonqueued_state_update setThreadState_st_tcb setThreadState_rct)[1] @@ -1261,14 +1212,14 @@ lemma handleInvocation_corres: apply (clarsimp simp: tcb_at_invs invs_valid_objs valid_tcb_state_def ct_in_state_def simple_from_active invs_mdb) - apply (clarsimp simp: msg_max_length_def word_bits_def) + apply (clarsimp simp: msg_max_length_def word_bits_def schact_is_rct_def + invs_psp_aligned invs_distinct) apply (erule st_tcb_ex_cap, clarsimp+) apply fastforce apply (clarsimp) apply (frule tcb_at_invs') apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def ct_not_inQ_def) - apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) apply (frule pred_tcb'_weakenE [where P=active' and P'=simple'], clarsimp) apply (frule(1) st_tcb_ex_cap'', fastforce) apply (clarsimp simp: valid_pspace'_def) @@ -1278,7 +1229,7 @@ lemma handleInvocation_corres: lemma ts_Restart_case_helper': "(case ts of Structures_H.Restart \ A | _ \ B) - = (if ts = Structures_H.Restart then A else B)" + = (if ts = Structures_H.Restart then A else B)" by (cases ts, simp_all) lemma gts_imp': @@ -1311,11 +1262,11 @@ lemma hinv_invs'[wp]: apply (simp add: handleInvocation_def split_def ts_Restart_case_helper') apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply simp apply (intro conjI impI) apply (wp gts_imp' | simp)+ - apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R[rotated]) + apply (rule_tac Q'="\rv. invs'" in hoare_strengthen_postE_R[rotated]) apply clarsimp apply (subgoal_tac "thread \ ksIdleThread s", simp_all)[1] apply (fastforce elim!: pred_tcb'_weakenE st_tcb_ex_cap'') @@ -1328,11 +1279,8 @@ lemma hinv_invs'[wp]: and st_tcb_at' active' thread" in hoare_post_imp) apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) - apply (clarsimp) apply (wp sts_invs_minor' setThreadState_st_tcb setThreadState_rct | simp)+ apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (fastforce simp add: tcb_at_invs' ct_in_state'_def simple_sane_strg sch_act_simple_def @@ -1341,12 +1289,13 @@ lemma hinv_invs'[wp]: done crunch typ_at'[wp]: handleFault "\s. P (typ_at' T p s)" + (wp: crunch_wps) lemmas handleFault_typ_ats[wp] = typ_at_lifts [OF handleFault_typ_at'] lemma handleSend_corres: "corres (dc \ dc) - (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (einvs and schact_is_rct and ct_active) (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') (handle_send blocking) (handleSend blocking)" @@ -1431,7 +1380,7 @@ lemma cteDeleteOne_reply_cap_to''[wp]: cteDeleteOne slot \\rv. ex_nonz_cap_to' p\" apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) - apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule bind_wp [OF _ getCTE_sp]) apply (rule hoare_assume_pre) apply (subgoal_tac "isReplyCap (cteCap cte) \ isNullCap (cteCap cte)") apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv @@ -1475,7 +1424,6 @@ lemma handleRecv_isBlocking_corres': and (\s. ex_nonz_cap_to (cur_thread s) s)) (invs' and ct_in_state' simple' and sch_act_sane - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ex_nonz_cap_to' (ksCurThread s) s)) (handle_recv isBlocking) (handleRecv isBlocking)" (is "corres dc (?pre1) (?pre2) (handle_recv _) (handleRecv _)") @@ -1538,8 +1486,7 @@ lemma handleRecv_isBlocking_corres': lemma handleRecv_isBlocking_corres: "corres dc (einvs and ct_active) - (invs' and ct_active' and sch_act_sane and - (\s. \p. ksCurThread s \ set (ksReadyQueues s p))) + (invs' and ct_active' and sch_act_sane) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp) apply (rule handleRecv_isBlocking_corres') @@ -1554,49 +1501,34 @@ lemma lookupCap_refs[wp]: "\invs'\ lookupCap t ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" by (simp add: lookupCap_def split_def | wp | simp add: o_def)+ -lemma deleteCallerCap_ksQ_ct': - "\invs' and ct_in_state' simple' and sch_act_sane and - (\s. ksCurThread s \ set (ksReadyQueues s p) \ thread = ksCurThread s)\ - deleteCallerCap thread - \\rv s. thread \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv s. thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p)" - in hoare_strengthen_post) - apply (wp deleteCallerCap_ct_not_ksQ) - apply auto - done - lemma hw_invs'[wp]: "\invs' and ct_in_state' simple' and sch_act_sane and (\s. ex_nonz_cap_to' (ksCurThread s) s) - and (\s. ksCurThread s \ ksIdleThread s) - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ + and (\s. ksCurThread s \ ksIdleThread s)\ handleRecv isBlocking \\r. invs'\" apply (simp add: handleRecv_def cong: if_cong) apply (rule hoare_pre) apply ((wp getNotification_wp | wpc | simp)+)[1] apply (clarsimp simp: ct_in_state'_def) apply ((wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp)+)[1] apply simp apply (wp deleteCallerCap_nonz_cap hoare_vcg_all_lift - deleteCallerCap_ksQ_ct' hoare_lift_Pf2[OF deleteCallerCap_simple deleteCallerCap_ct'] | wpc | simp add: ct_in_state'_def whenE_def split del: if_split)+ apply (rule validE_validE_R) apply (rule_tac Q="\rv s. invs' s \ sch_act_sane s - \ (\p. ksCurThread s \ set (ksReadyQueues s p)) \ thread = ksCurThread s \ ct_in_state' simple' s \ ex_nonz_cap_to' thread s \ thread \ ksIdleThread s \ (\x \ zobj_refs' rv. ex_nonz_cap_to' x s)" and E="\_ _. True" - in hoare_post_impErr[rotated]) + in hoare_strengthen_postE[rotated]) apply (clarsimp simp: isCap_simps ct_in_state'_def pred_tcb_at' invs_valid_objs' sch_act_sane_not obj_at'_def projectKOs pred_tcb_at'_def) apply (assumption) @@ -1613,34 +1545,45 @@ lemma setSchedulerAction_obj_at'[wp]: by (wp, clarsimp elim!: obj_at'_pspaceI) lemma handleYield_corres: - "corres dc einvs (invs' and ct_active' and (\s. ksSchedulerAction s = ResumeCurrentThread)) handle_yield handleYield" + "corres dc + (einvs and ct_active) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + handle_yield handleYield" apply (clarsimp simp: handle_yield_def handleYield_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getCurThread_corres]) apply simp - apply (rule corres_split[OF tcbSchedDequeue_corres]) - apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) + apply (rule corres_split[OF tcbSchedAppend_corres], simp) apply (rule rescheduleRequired_corres) - apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues | simp add: )+ - apply (simp add: invs_def valid_sched_def valid_sched_action_def - cur_tcb_def tcb_at_is_etcb_at) - apply clarsimp - apply (frule ct_active_runnable') - apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def - valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def) - apply (erule(1) valid_objs_valid_tcbE[OF valid_pspace_valid_objs']) - apply (simp add:valid_tcb'_def) + apply (wpsimp wp: weak_sch_act_wf_lift_linear + | strengthen valid_objs'_valid_tcbs' valid_queues_in_correct_ready_q + valid_queues_ready_qs_distinct)+ + apply (fastforce simp: invs_def valid_sched_def valid_sched_action_def + tcb_at_is_etcb_at valid_state_def valid_pspace_def ct_in_state_def + runnable_eq_active) + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def + valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def) + done + +lemma tcbSchedAppend_ct_in_state'[wp]: + "tcbSchedAppend t \ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) done lemma hy_invs': "\invs' and ct_active'\ handleYield \\r. invs' and ct_active'\" apply (simp add: handleYield_def) - apply (wp ct_in_state_thread_state_lift' - rescheduleRequired_all_invs_but_ct_not_inQ - tcbSchedAppend_invs_but_ct_not_inQ' | simp)+ - apply (clarsimp simp add: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def - valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def - ) + apply (wpsimp wp: ct_in_state_thread_state_lift' rescheduleRequired_all_invs_but_ct_not_inQ) + apply (rule_tac Q="\_. all_invs_but_ct_not_inQ' and ct_active'" in hoare_post_imp) + apply clarsimp + apply (subst pred_conj_def) + apply (rule hoare_vcg_conj_lift) + apply (rule tcbSchedAppend_all_invs_but_ct_not_inQ') + apply wpsimp + apply wpsimp + apply wpsimp apply (simp add:ct_active_runnable'[unfolded ct_in_state'_def]) done @@ -1737,7 +1680,7 @@ lemmas cteDeleteOne_st_tcb_at_simple'[wp] = cteDeleteOne_st_tcb_at[where P=simple', simplified] crunch st_tcb_at_simple'[wp]: handleReply "st_tcb_at' simple' t'" - (wp: hoare_post_taut crunch_wps sts_st_tcb_at'_cases + (wp: hoare_TrueI crunch_wps sts_st_tcb_at'_cases threadSet_pred_tcb_no_state ignore: setThreadState) @@ -1763,18 +1706,17 @@ lemma hr_ct_active'[wp]: "\invs' and ct_active'\ handleReply \\rv. ct_active'\" apply (simp add: handleReply_def getSlotCap_def getCurThread_def getThreadCallerSlot_def locateSlot_conv) - apply (rule hoare_seq_ext) - apply (rule ct_in_state'_decomp) - apply ((wp hoare_drop_imps | wpc | simp)+)[1] - apply (subst haskell_assert_def) - apply (wp hoare_vcg_all_lift getCTE_wp doReplyTransfer_st_tcb_at_active - | wpc | simp)+ + apply (rule bind_wp, rename_tac cur_thread) + apply (rule_tac t=cur_thread in ct_in_state'_decomp) + apply (wpsimp wp: getCTE_wp) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (wpsimp wp: getCTE_wp doReplyTransfer_st_tcb_at_active)+ apply (fastforce simp: ct_in_state'_def cte_wp_at_ctes_of valid_cap'_def - dest: ctes_of_valid') + dest: ctes_of_valid') done lemma handleCall_corres: - "corres (dc \ dc) (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + "corres (dc \ dc) (einvs and schact_is_rct and ct_active) (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') @@ -1836,7 +1778,7 @@ lemma handleReply_sane: "\sch_act_sane\ handleReply \\rv. sch_act_sane\" apply (simp add: handleReply_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) apply (rule hoare_pre) - apply (wp haskell_assert_wp doReplyTransfer_sane getCTE_wp'| wpc)+ + apply (wp doReplyTransfer_sane getCTE_wp'| wpc)+ apply (clarsimp simp: cte_wp_at_ctes_of) done @@ -1852,75 +1794,6 @@ lemma handleReply_nonz_cap_to_ct: crunch ksQ[wp]: handleFaultReply "\s. P (ksReadyQueues s p)" -lemma doReplyTransfer_ct_not_ksQ: - "\ invs' and sch_act_simple - and tcb_at' thread and tcb_at' word - and ct_in_state' simple' - and (\s. ksCurThread s \ word) - and (\s. \p. ksCurThread s \ set(ksReadyQueues s p))\ - doReplyTransfer thread word callerSlot g - \\rv s. \p. ksCurThread s \ set(ksReadyQueues s p)\" -proof - - have astct: "\t p. - \(\s. ksCurThread s \ set(ksReadyQueues s p) \ sch_act_sane s) - and (\s. ksCurThread s \ t)\ - possibleSwitchTo t \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps possibleSwitchTo_ct') - apply (wp possibleSwitchTo_ksQ') - apply (clarsimp simp: sch_act_sane_def) - done - have stsct: "\t st p. - \(\s. ksCurThread s \ set(ksReadyQueues s p)) and sch_act_simple\ - setThreadState st t - \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" - apply (rule hoare_weaken_pre) - apply (wps setThreadState_ct') - apply (wp hoare_vcg_all_lift sts_ksQ) - apply (clarsimp) - done - show ?thesis - apply (simp add: doReplyTransfer_def) - apply (wp, wpc) - apply (wp astct stsct hoare_vcg_all_lift - cteDeleteOne_ct_not_ksQ hoare_drop_imp - hoare_lift_Pf2 [OF cteDeleteOne_sch_act_not cteDeleteOne_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_pred_tcb_at' doIPCTransfer_ct'] - hoare_lift_Pf2 [OF doIPCTransfer_ksQ doIPCTransfer_ct'] - hoare_lift_Pf2 [OF threadSet_ksQ threadSet_ct] - hoare_lift_Pf2 [OF handleFaultReply_ksQ handleFaultReply_ct'] - | simp add: ct_in_state'_def)+ - apply (fastforce simp: sch_act_simple_def sch_act_sane_def ct_in_state'_def)+ - done -qed - -lemma handleReply_ct_not_ksQ: - "\invs' and sch_act_simple - and ct_in_state' simple' - and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ - handleReply - \\rv s. \p. ksCurThread s \ set (ksReadyQueues s p)\" - apply (simp add: handleReply_def del: split_paired_All) - apply (subst haskell_assert_def) - apply (wp | wpc)+ - apply (wp doReplyTransfer_ct_not_ksQ getThreadCallerSlot_inv)+ - apply (rule_tac Q="\cap. - (\s. \p. ksCurThread s \ set(ksReadyQueues s p)) - and invs' - and sch_act_simple - and (\s. thread = ksCurThread s) - and tcb_at' thread - and ct_in_state' simple' - and cte_wp_at' (\c. cteCap c = cap) callerSlot" - in hoare_post_imp) - apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def - cte_wp_at_ctes_of valid_cap'_def - dest!: ctes_of_valid') - apply (wp getSlotCap_cte_wp_at getThreadCallerSlot_inv)+ - apply (clarsimp) - done - -crunch valid_etcbs[wp]: possible_switch_to "valid_etcbs" crunch valid_etcbs[wp]: handle_recv "valid_etcbs" (wp: crunch_wps simp: crunch_simps) @@ -1933,11 +1806,10 @@ lemma handleReply_handleRecv_corres: apply (rule corres_split_nor[OF handleReply_corres]) apply (rule handleRecv_isBlocking_corres') apply (wp handle_reply_nonz_cap_to_ct handleReply_sane - handleReply_nonz_cap_to_ct handleReply_ct_not_ksQ handle_reply_valid_sched)+ + handleReply_nonz_cap_to_ct handle_reply_valid_sched)+ apply (fastforce simp: ct_in_state_def ct_in_state'_def simple_sane_strg elim!: st_tcb_weakenE st_tcb_ex_cap') apply (clarsimp simp: ct_in_state'_def) - apply (frule(1) ct_not_ksQ) apply (fastforce elim: pred_tcb'_weakenE) done @@ -1945,7 +1817,6 @@ lemma handleHypervisorFault_corres: "corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread and (%_. valid_fault f)) (invs' and sch_act_not thread - and (\s. \p. thread \ set(ksReadyQueues s p)) and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) (handle_hypervisor_fault w fault) (handleHypervisorFault w fault)" apply (cases fault; clarsimp simp add: handleHypervisorFault_def returnOk_def2) @@ -1954,21 +1825,20 @@ lemma handleHypervisorFault_corres: (* FIXME: move *) lemma handleEvent_corres: "corres (dc \ dc) (einvs and (\s. event \ Interrupt \ ct_running s) and - (\s. scheduler_action s = resume_cur_thread)) + schact_is_rct) (invs' and (\s. event \ Interrupt \ ct_running' s) and (\s. ksSchedulerAction s = ResumeCurrentThread)) (handle_event event) (handleEvent event)" (is "?handleEvent_corres") proof - have hw: - "\isBlocking. corres dc (einvs and ct_running and (\s. scheduler_action s = resume_cur_thread)) + "\isBlocking. corres dc (einvs and ct_running and schact_is_rct) (invs' and ct_running' and (\s. ksSchedulerAction s = ResumeCurrentThread)) (handle_recv isBlocking) (handleRecv isBlocking)" apply (rule corres_guard_imp [OF handleRecv_isBlocking_corres]) apply (clarsimp simp: ct_in_state_def ct_in_state'_def - elim!: st_tcb_weakenE pred_tcb'_weakenE - dest!: ct_not_ksQ)+ + elim!: st_tcb_weakenE pred_tcb'_weakenE)+ done show ?thesis apply (case_tac event) @@ -1983,7 +1853,7 @@ proof - corres_guard_imp[OF handleCall_corres] corres_guard_imp[OF handleYield_corres] active_from_running active_from_running' - simp: simple_sane_strg)[8] + simp: simple_sane_strg schact_is_rct_def)[8] apply (rule corres_underlying_split) apply (rule corres_guard_imp[OF getCurThread_corres], simp+) apply (rule handleFault_corres) @@ -1994,7 +1864,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2007,12 +1876,11 @@ proof - simp: ct_in_state_def valid_fault_def) apply wp apply clarsimp - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] apply (rule corres_guard_imp) - apply (rule corres_split_eqr[where R="\rv. einvs" + apply (rule corres_split_eqr[where R="\_. einvs" and R'="\rv s. \x. rv = Some x \ R'' x s" for R'']) apply (rule corres_machine_op) @@ -2022,10 +1890,7 @@ proof - apply (rule handleInterrupt_corres) apply (wp hoare_vcg_all_lift doMachineOp_getActiveIRQ_IRQ_active' - | simp | simp add: imp_conjR | wp (once) hoare_drop_imps)+ - apply force - apply simp apply (simp add: invs'_def valid_state'_def) apply (rule_tac corres_underlying_split) apply (rule corres_guard_imp, rule getCurThread_corres, simp+) @@ -2041,7 +1906,6 @@ proof - apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (fastforce simp: simple_sane_strg sch_act_simple_def ct_in_state'_def elim: st_tcb_ex_cap'' pred_tcb'_weakenE) apply (rule corres_underlying_split) @@ -2053,7 +1917,6 @@ proof - simp: ct_in_state_def) apply wp apply (clarsimp) - apply (frule(1) ct_not_ksQ) apply (auto simp: ct_in_state'_def sch_act_simple_def sch_act_sane_def elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] @@ -2141,10 +2004,8 @@ proof - apply (rename_tac syscall) apply (case_tac syscall, (wp handleReply_sane handleReply_nonz_cap_to_ct handleReply_ksCurThread - handleReply_ct_not_ksQ | clarsimp simp: active_from_running' simple_from_running' simple_sane_strg simp del: split_paired_All | rule conjI active_ex_cap' - | drule ct_not_ksQ[rotated] | strengthen nidle)+) apply (rule hoare_strengthen_post, rule hoare_weaken_pre, @@ -2156,7 +2017,6 @@ proof - | erule pred_tcb'_weakenE st_tcb_ex_cap'' | clarsimp simp: tcb_at_invs ct_in_state'_def simple_sane_strg sch_act_simple_def | drule st_tcb_at_idle_thread' - | drule ct_not_ksQ[rotated] | wpc | wp (once) hoare_drop_imps)+ done qed @@ -2192,7 +2052,6 @@ lemma hi_IRQInactive: -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: handleInvocation_def split_def) apply (wp syscall_valid' retype_pi_IRQInactive) - apply simp_all done lemma handleSend_IRQInactive: diff --git a/proof/refine/X64/TcbAcc_R.thy b/proof/refine/X64/TcbAcc_R.thy index ed09719ae8..f6a2a0e086 100644 --- a/proof/refine/X64/TcbAcc_R.thy +++ b/proof/refine/X64/TcbAcc_R.thy @@ -11,10 +11,8 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] -declare empty_fail_sequence_x[simp] declare storeWordUser_typ_at' [wp] (* Auxiliaries and basic properties of priority bitmap functions *) @@ -51,7 +49,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -60,10 +58,8 @@ lemma getHighestPrio_inv[wp]: unfolding bitmap_fun_defs by simp lemma valid_bitmapQ_bitmapQ_simp: - "\ valid_bitmapQ s \ \ - bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_def - by simp + "valid_bitmapQ s \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (simp add: valid_bitmapQ_def) lemma prioToL1Index_l1IndexToPrio_or_id: "\ unat (w'::priority) < 2 ^ wordRadix ; w < 2^(size w' - wordRadix) \ @@ -86,35 +82,18 @@ lemma l1IndexToPrio_wordRadix_mask[simp]: unfolding l1IndexToPrio_def by (simp add: wordRadix_def') -definition - (* when in the middle of updates, a particular queue might not be entirely valid *) - valid_queues_no_bitmap_except :: "machine_word \ kernel_state \ bool" -where - "valid_queues_no_bitmap_except t' \ \s. - (\d p. (\t \ set (ksReadyQueues s (d, p)). t \ t' \ obj_at' (inQ d p and runnable' \ tcbState) t s) - \ distinct (ksReadyQueues s (d, p)) - \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" - -lemma valid_queues_no_bitmap_exceptI[intro]: - "valid_queues_no_bitmap s \ valid_queues_no_bitmap_except t s" - unfolding valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def - by simp - lemma st_tcb_at_coerce_abstract: assumes t: "st_tcb_at' P t c" assumes sr: "(a, c) \ state_relation" shows "st_tcb_at (\st. \st'. thread_state_relation st st' \ P st') t a" using assms apply (clarsimp simp: state_relation_def pred_tcb_at'_def obj_at'_def - projectKOs objBits_simps) - apply (erule(1) pspace_dom_relatedE) - apply (erule(1) obj_relation_cutsE, simp_all) - apply (clarsimp simp: st_tcb_at_def obj_at_def other_obj_relation_def - tcb_relation_def - split: Structures_A.kernel_object.split_asm if_split_asm - X64_A.arch_kernel_obj.split_asm)+ - apply fastforce - done + projectKOs) + apply (erule (1) pspace_dom_relatedE) + apply (erule (1) obj_relation_cutsE, simp_all) + by (fastforce simp: st_tcb_at_def obj_at_def other_obj_relation_def tcb_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm)+ lemma st_tcb_at_runnable_coerce_concrete: assumes t: "st_tcb_at runnable t a" @@ -130,39 +109,13 @@ lemma st_tcb_at_runnable_coerce_concrete: apply (case_tac "tcb_state tcb"; simp) done -lemma pspace_relation_tcb_at': - assumes p: "pspace_relation (kheap a) (ksPSpace c)" - assumes t: "tcb_at t a" - assumes aligned: "pspace_aligned' c" - assumes distinct: "pspace_distinct' c" - shows "tcb_at' t c" using assms - apply (clarsimp simp: obj_at_def projectKOs) - apply (drule(1) pspace_relation_absD) - apply (clarsimp simp: is_tcb other_obj_relation_def) - apply (simp split: kernel_object.split_asm) - apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb], simp) - apply (erule obj_at'_weakenE) - apply simp - done - -lemma valid_objs_valid_tcbE: "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" +lemma valid_objs_valid_tcbE: + "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" apply (clarsimp simp add: projectKOs valid_objs'_def ran_def typ_at'_def ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) apply (fastforce simp: projectKO_def projectKO_opt_tcb return_def valid_tcb'_def) done -lemma valid_objs'_maxDomain: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) - done - -lemma valid_objs'_maxPriority: - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) - done - lemma doMachineOp_irq_states': assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" shows "\valid_irq_states'\ doMachineOp f \\rv. valid_irq_states'\" @@ -244,7 +197,7 @@ lemma preemptionPoint_irq [wp]: "\valid_irq_states'\ preemptionPoint -, \\irq s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" apply (simp add: preemptionPoint_def setWorkUnits_def modifyWorkUnits_def getWorkUnits_def) - apply (wp hoare_whenE_wp|wpc)+ + apply (wp whenE_wp|wpc)+ apply (rule hoare_post_imp) prefer 2 apply (rule doMachineOp_getActiveIRQ_IRQ_active) @@ -259,67 +212,217 @@ lemma updateObject_tcb_inv: "\P\ updateObject (obj::tcb) ko p q n \\rv. P\" by simp (rule updateObject_default_inv) +lemma st_tcb_at_runnable_cross: + "\ st_tcb_at runnable t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ st_tcb_at' runnable' t s'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (prop_tac "tcb_at t s", clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (drule (2) tcb_at_cross, fastforce simp: state_relation_def) + apply (erule (2) st_tcb_at_runnable_coerce_concrete) + done + +lemma cur_tcb_cross: + "\ cur_tcb s; pspace_aligned s; pspace_distinct s; (s,s') \ state_relation \ \ cur_tcb' s'" + apply (clarsimp simp: cur_tcb'_def cur_tcb_def state_relation_def) + apply (erule (3) tcb_at_cross) + done + +lemma valid_objs_valid_tcbE': + assumes "valid_objs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms + apply (clarsimp simp add: projectKOs valid_objs'_def ran_def typ_at'_def + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) + apply (fastforce simp: projectKO_def projectKO_opt_tcb return_def valid_tcb'_def) + done + +lemma valid_tcb'_tcbDomain_update: + "new_dom \ maxDomain \ + \tcb. valid_tcb' tcb s \ valid_tcb' (tcbDomain_update (\_. new_dom) tcb) s" + unfolding valid_tcb'_def + apply (clarsimp simp: tcb_cte_cases_def objBits_simps') + done + +lemma valid_tcb'_tcbState_update: + "\valid_tcb_state' st s; valid_tcb' tcb s\ \ + valid_tcb' (tcbState_update (\_. st) tcb) s" + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def objBits_simps') + done + +definition valid_tcbs' :: "kernel_state \ bool" where + "valid_tcbs' s' \ \ptr tcb. ksPSpace s' ptr = Some (KOTCB tcb) \ valid_tcb' tcb s'" + +lemma valid_objs'_valid_tcbs'[elim!]: + "valid_objs' s \ valid_tcbs' s" + by (auto simp: valid_objs'_def valid_tcbs'_def valid_obj'_def split: kernel_object.splits) + +lemma invs'_valid_tcbs'[elim!]: + "invs' s \ valid_tcbs' s" + by (fastforce del: valid_objs'_valid_tcbs' intro: valid_objs'_valid_tcbs') + +lemma valid_tcbs'_maxDomain: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" + by (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def projectKOs) + +lemmas valid_objs'_maxDomain = valid_tcbs'_maxDomain[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_maxPriority: + "\s t. \ valid_tcbs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" + by (clarsimp simp: valid_tcbs'_def obj_at'_def valid_tcb'_def projectKOs) + +lemmas valid_objs'_maxPriority = valid_tcbs'_maxPriority[OF valid_objs'_valid_tcbs'] + +lemma valid_tcbs'_obj_at': + assumes "valid_tcbs' s" + "tcb_at' t s" + "\tcb. ko_at' tcb t s \ valid_tcb' tcb s \ R s tcb" + shows "obj_at' (R s) t s" + using assms + apply (clarsimp simp add: valid_tcbs'_def ran_def typ_at'_def projectKOs + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) + done + +lemma update_valid_tcb'[simp]: + "\f. valid_tcb' tcb (ksReadyQueuesL1Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueuesL2Bitmap_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksReadyQueues_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksSchedulerAction_update f s) = valid_tcb' tcb s" + "\f. valid_tcb' tcb (ksDomainTime_update f s) = valid_tcb' tcb s" + by (auto simp: valid_tcb'_def valid_tcb_state'_def valid_bound_tcb'_def valid_bound_ntfn'_def + split: option.splits thread_state.splits) + +lemma update_valid_tcbs'[simp]: + "\f. valid_tcbs' (ksReadyQueuesL1Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueuesL2Bitmap_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksReadyQueues_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksSchedulerAction_update f s) = valid_tcbs' s" + "\f. valid_tcbs' (ksDomainTime_update f s) = valid_tcbs' s" + by (simp_all add: valid_tcbs'_def) + lemma setObject_update_TCB_corres': - assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" - assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" - assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'" + assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'" + assumes sched_pointers: "tcbSchedPrev new_tcb' = tcbSchedPrev tcb'" + "tcbSchedNext new_tcb' = tcbSchedNext tcb'" + assumes flag: "tcbQueued new_tcb' = tcbQueued tcb'" assumes r: "r () ()" - assumes exst: "exst_same tcb' tcbu'" - shows "corres r (ko_at (TCB tcb) add) - (ko_at' tcb' add) - (set_object add (TCB tcbu)) (setObject add tcbu')" - apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' tcbu'" in corres_req) + assumes exst: "exst_same tcb' new_tcb'" + shows "corres r (ko_at (TCB tcb) ptr) (ko_at' tcb' ptr) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" + apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' new_tcb'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) - apply (clarsimp simp: projectKOs other_obj_relation_def exst) - apply (rule corres_guard_imp) - apply (rule corres_rel_imp) - apply (rule setObject_other_corres[where P="(=) tcb'"]) - apply (rule ext)+ - apply simp - defer - apply (simp add: is_other_obj_relation_type_def - projectKOs objBits_simps' - other_obj_relation_def tcbs r)+ - apply (fastforce elim!: obj_at_weakenE dest: bspec[OF tables]) - apply (subst(asm) eq_commute, assumption) - apply (clarsimp simp: projectKOs obj_at'_def objBits_simps) - apply (subst map_to_ctes_upd_tcb, assumption+) - apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) - apply (subst if_not_P) - apply (fastforce dest: bspec [OF tables', OF ranI]) - apply simp + apply (clarsimp simp: tcb_relation_cut_def exst) + apply (clarsimp simp: projectKOs tcb_relation_cut_def exst) + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp: obj_at'_def) + apply (unfold set_object_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def projectKOs obj_at_def + updateObject_default_def in_magnitude_check obj_at'_def) + apply (rename_tac s s' t') + apply (prop_tac "t' = s'") + apply (clarsimp simp: magnitudeCheck_def in_monad split: option.splits) + apply (drule singleton_in_magnitude_check) + apply (prop_tac "map_to_ctes ((ksPSpace s') (ptr \ injectKO new_tcb')) + = map_to_ctes (ksPSpace s')") + apply (frule_tac tcb=new_tcb' and tcb=tcb' in map_to_ctes_upd_tcb) + apply (clarsimp simp: objBits_simps) + apply (clarsimp simp: objBits_simps ps_clear_def3 field_simps objBits_defs mask_def) + apply (insert tables')[1] + apply (rule ext) + apply (clarsimp split: if_splits) + apply blast + apply (prop_tac "obj_at (same_caps (TCB new_tcb)) ptr s") + using tables + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def assms) + apply (clarsimp simp add: state_relation_def) + apply (subst conj_assoc[symmetric]) + apply (extract_conjunct \match conclusion in "ghost_relation _ _ _" \ -\) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply clarsimp + apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply clarsimp + apply (rule conjI) + apply (simp only: pspace_relation_def simp_thms + pspace_dom_update[where x="kernel_object.TCB _" + and v="kernel_object.TCB _", + simplified a_type_def, simplified]) + apply (rule conjI) + using assms + apply (simp only: dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: tcb_relation_cut_def split: if_split_asm kernel_object.split_asm) + apply (rename_tac aa ba) + apply (drule_tac x="(aa, ba)" in bspec, simp) + apply clarsimp + apply (frule_tac ko'="kernel_object.TCB tcb" and x'=ptr in obj_relation_cut_same_type) + apply (simp add: tcb_relation_cut_def)+ + apply clarsimp + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule (1) bspec) + apply (insert exst) + apply (clarsimp simp: etcb_relation_def exst_same_def) + apply (extract_conjunct \match conclusion in "ready_queues_relation_2 _ _ _ _ _" \ -\) + apply (insert sched_pointers flag exst) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext new_tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev new_tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def) + apply (clarsimp simp: ready_queue_relation_def opt_pred_def opt_map_def exst_same_def inQ_def + split: option.splits) + apply (metis (no_types, lifting) tcb_of'_TCB) + apply (clarsimp simp: fun_upd_def caps_of_state_after_update cte_wp_at_after_update swp_def + obj_at_def) done lemma setObject_update_TCB_corres: - "\ tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'; - \(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb; - \(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'; - r () (); exst_same tcb' tcbu'\ - \ corres r (\s. get_tcb add s = Some tcb) - (\s'. (tcb', s') \ fst (getObject add s')) - (set_object add (TCB tcbu)) (setObject add tcbu')" + "\tcb_relation tcb tcb' \ tcb_relation new_tcb new_tcb'; + \(getF, v) \ ran tcb_cap_cases. getF new_tcb = getF tcb; + \(getF, v) \ ran tcb_cte_cases. getF new_tcb' = getF tcb'; + tcbSchedPrev new_tcb' = tcbSchedPrev tcb'; tcbSchedNext new_tcb' = tcbSchedNext tcb'; + tcbQueued new_tcb' = tcbQueued tcb'; exst_same tcb' new_tcb'; + r () ()\ \ + corres r + (\s. get_tcb ptr s = Some tcb) (\s'. (tcb', s') \ fst (getObject ptr s')) + (set_object ptr (TCB new_tcb)) (setObject ptr new_tcb')" apply (rule corres_guard_imp) - apply (erule (3) setObject_update_TCB_corres', force) - apply fastforce - apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def - loadObject_default_def projectKOs objBits_simps' - in_magnitude_check) + apply (erule (7) setObject_update_TCB_corres') + apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def projectKOs + loadObject_default_def objBits_simps' in_magnitude_check)+ done lemma getObject_TCB_corres: - "corres tcb_relation (tcb_at t) (tcb_at' t) + "corres tcb_relation (tcb_at t and pspace_aligned and pspace_distinct) \ (gets_the (get_tcb t)) (getObject t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) apply (rule corres_guard_imp) apply (rule corres_gets_the) apply (rule corres_get_tcb) apply (simp add: tcb_at_def) - apply assumption + apply simp done lemma threadGet_corres: assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ r (f tcb) (f' tcb')" - shows "corres r (tcb_at t) (tcb_at' t) (thread_get f t) (threadGet f' t)" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get f t) (threadGet f' t)" apply (simp add: thread_get_def threadGet_def) apply (fold liftM_def) apply simp @@ -341,7 +444,8 @@ lemma ball_tcb_cte_casesI: by (simp add: tcb_cte_cases_def) lemma all_tcbI: - "\ \a b c d e f g h i j k l m n p q. P (Thread a b c d e f g h i j k l m n p q) \ \ \tcb. P tcb" + "\ \a b c d e f g h i j k l m n p q r s. P (Thread a b c d e f g h i j k l m n p q r s) \ + \ \tcb. P tcb" by (rule allI, case_tac tcb, simp) lemma threadset_corresT: @@ -350,18 +454,23 @@ lemma threadset_corresT: assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes sched_pointers: "\tcb. tcbSchedPrev (f' tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (f' tcb) = tcbSchedNext tcb" + assumes flag: "\tcb. tcbQueued (f' tcb) = tcbQueued tcb" assumes e: "\tcb'. exst_same tcb' (f' tcb')" - shows "corres dc (tcb_at t) - (tcb_at' t) - (thread_set f t) (threadSet f' t)" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_set f t) (threadSet f' t)" apply (simp add: thread_set_def threadSet_def) apply (rule corres_guard_imp) apply (rule corres_split[OF getObject_TCB_corres]) apply (rule setObject_update_TCB_corres') - apply (erule x) - apply (rule y) - apply (clarsimp simp: bspec_split [OF spec [OF z]]) - apply fastforce + apply (erule x) + apply (rule y) + apply (clarsimp simp: bspec_split [OF spec [OF z]]) + apply fastforce + apply (rule sched_pointers) + apply (rule sched_pointers) + apply (rule flag) apply simp apply (rule e) apply wp+ @@ -374,33 +483,26 @@ lemma threadset_corresT: lemmas threadset_corres = threadset_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] -lemma pspace_relation_tcb_at: - assumes p: "pspace_relation (kheap a) (ksPSpace c)" - assumes t: "tcb_at' t c" - shows "tcb_at t a" using assms - apply (clarsimp simp: obj_at'_def projectKOs) - apply (erule(1) pspace_dom_relatedE) - apply (erule(1) obj_relation_cutsE) - apply (clarsimp simp: other_obj_relation_def is_tcb obj_at_def - split: Structures_A.kernel_object.split_asm if_split_asm - X64_A.arch_kernel_obj.split_asm)+ - done +lemmas pspace_relation_tcb_at = tcb_at'_cross lemma threadSet_corres_noopT: assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ tcb_relation tcb (fn tcb')" assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (fn tcb) = getF tcb" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" - shows "corres dc \ (tcb_at' t) - (return v) (threadSet fn t)" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (return v) (threadSet fn t)" proof - have S: "\t s. tcb_at t s \ return v s = (thread_set id t >>= (\x. return v)) s" apply (clarsimp simp: tcb_at_def) apply (simp add: return_def thread_set_def gets_the_def assert_def assert_opt_def simpler_gets_def set_object_def get_object_def put_def get_def bind_def) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s", simp) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) apply (simp add: map_upd_triv get_tcb_SomeD)+ done show ?thesis @@ -409,19 +511,15 @@ proof - defer apply (subst bind_return [symmetric], rule corres_underlying_split [OF threadset_corresT]) - apply (simp add: x) - apply simp - apply (rule y) + apply (simp add: x) + apply simp + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule corres_noop [where P=\ and P'=\]) - apply simp - apply (rule no_fail_pre, wpsimp+)[1] - apply wp+ - apply simp - apply (erule pspace_relation_tcb_at[rotated]) - apply clarsimp - apply simp - apply simp + apply wpsimp+ done qed @@ -435,14 +533,20 @@ lemma threadSet_corres_noop_splitT: getF (fn tcb) = getF tcb" assumes z: "corres r P Q' m m'" assumes w: "\P'\ threadSet fn t \\x. Q'\" + assumes s: "\tcb'. tcbSchedPrev (fn tcb') = tcbSchedPrev tcb'" + "\tcb'. tcbSchedNext (fn tcb') = tcbSchedNext tcb'" + assumes f: "\tcb'. tcbQueued (fn tcb') = tcbQueued tcb'" assumes e: "\tcb'. exst_same tcb' (fn tcb')" - shows "corres r P (tcb_at' t and P') - m (threadSet fn t >>= (\rv. m'))" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct and P) P' + m (threadSet fn t >>= (\rv. m'))" apply (rule corres_guard_imp) apply (subst return_bind[symmetric]) apply (rule corres_split_nor[OF threadSet_corres_noopT]) - apply (simp add: x) - apply (rule y) + apply (simp add: x) + apply (rule y) + apply (fastforce simp: s) + apply (fastforce simp: s) + apply (fastforce simp: f) apply (rule e) apply (rule z) apply (wp w)+ @@ -676,16 +780,23 @@ lemma threadSet_valid_pspace'T_P: assumes v: "\tcb. (P \ Q' (tcbBoundNotification tcb)) \ (\s. valid_bound_ntfn' (tcbBoundNotification tcb) s \ valid_bound_ntfn' (tcbBoundNotification (F tcb)) s)" - + assumes p: "\tcb. (P \ Q'' (tcbSchedPrev tcb)) \ + (\s. none_top tcb_at' (tcbSchedPrev tcb) s + \ none_top tcb_at' (tcbSchedPrev (F tcb)) s)" + assumes n: "\tcb. (P \ Q''' (tcbSchedNext tcb)) \ + (\s. none_top tcb_at' (tcbSchedNext tcb) s + \ none_top tcb_at' (tcbSchedNext (F tcb)) s)" assumes y: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" assumes u: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" assumes w: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" assumes w': "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" shows - "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s)\ - threadSet F t - \\rv. valid_pspace'\" + "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s + \ obj_at' (\tcb. Q'' (tcbSchedPrev tcb)) t s + \ obj_at' (\tcb. Q''' (tcbSchedNext tcb)) t s)\ + threadSet F t + \\_. valid_pspace'\" apply (simp add: valid_pspace'_def threadSet_def) apply (rule hoare_pre, wp setObject_tcb_valid_objs getObject_tcb_wp) @@ -693,7 +804,7 @@ lemma threadSet_valid_pspace'T_P: apply (erule(1) valid_objsE') apply (clarsimp simp add: valid_obj'_def valid_tcb'_def bspec_split [OF spec [OF x]] z - split_paired_Ball y u w v w') + split_paired_Ball y u w v w' p n) done lemmas threadSet_valid_pspace'T = @@ -767,6 +878,10 @@ lemma threadSet_iflive'T: \ tcbState (F tcb) \ Inactive \ tcbState (F tcb) \ IdleThreadState \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedNext tcb = None \ tcbSchedNext (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. tcbSchedPrev tcb = None \ tcbSchedPrev (F tcb) \ None + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb) \ ko_at' tcb t s) \ ex_nonz_cap_to' t s)\ threadSet F t @@ -774,8 +889,7 @@ lemma threadSet_iflive'T: apply (simp add: threadSet_def) apply (wp setObject_tcb_iflive' getObject_tcb_wp) apply (clarsimp simp: obj_at'_def projectKOs) - apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric]) - apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric]) + apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric])+ apply (rule conjI) apply (rule impI, clarsimp) apply (erule if_live_then_nonz_capE') @@ -791,7 +905,7 @@ lemma threadSet_cte_wp_at'T: getF (F tcb) = getF tcb" shows "\\s. P' (cte_wp_at' P p s)\ threadSet F t \\rv s. P' (cte_wp_at' P p s)\" apply (simp add: threadSet_def) - apply (rule hoare_seq_ext [where B="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) + apply (rule bind_wp[where Q'="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) apply (rule setObject_cte_wp_at2') apply (clarsimp simp: updateObject_default_def projectKOs in_monad objBits_simps' obj_at'_def in_magnitude_check prod_eq_iff) @@ -821,6 +935,12 @@ lemmas threadSet_ctes_of = lemmas threadSet_cap_to' = ex_nonz_cap_to_pres' [OF threadSet_cte_wp_at'] +lemma threadSet_cap_to: + "(\tcb. \(getF, v)\ran tcb_cte_cases. getF (f tcb) = getF tcb) + \ threadSet f tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: hoare_vcg_ex_lift threadSet_cte_wp_at' + simp: ex_nonz_cap_to'_def tcb_cte_cases_def objBits_simps') + lemma threadSet_idle'T: assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" shows @@ -859,30 +979,6 @@ lemma set_tcb_bitmapQ_no_L2_orphans[wp]: apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ done -lemma threadSet_valid_queues_no_bitmap: - "\ valid_queues_no_bitmap and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. valid_queues_no_bitmap \" - apply (simp add: threadSet_def) - apply wp - apply (simp add: Invariants_H.valid_queues_no_bitmap_def' pred_tcb_at'_def) - - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def projectKOs) - apply (fastforce) - done - lemma threadSet_valid_bitmapQ[wp]: "\ valid_bitmapQ \ threadSet f t \ \rv. valid_bitmapQ \" unfolding bitmapQ_defs threadSet_def @@ -901,73 +997,6 @@ lemma threadSet_valid_bitmapQ_no_L2_orphans[wp]: by (clarsimp simp: setObject_def split_def) (wp | simp add: updateObject_default_def)+ -lemma threadSet_valid_queues: - "\Invariants_H.valid_queues and - (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) - \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ - \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s - \ t \ set (ksReadyQueues s (d, p)) - )\ - threadSet f t - \\rv. Invariants_H.valid_queues\" - unfolding valid_queues_def - by (wp threadSet_valid_queues_no_bitmap;simp) - -definition - addToQs :: "(Structures_H.tcb \ Structures_H.tcb) - \ machine_word \ (domain \ priority \ machine_word list) - \ (domain \ priority \ machine_word list)" -where - "addToQs F t \ \qs (qdom, prio). if (\ko. \ inQ qdom prio (F ko)) - then t # qs (qdom, prio) - else qs (qdom, prio)" - -lemma addToQs_set_def: - "(t' \ set (addToQs F t qs (qdom, prio))) = (t' \ set (qs (qdom, prio)) - \ (t' = t \ (\ko. \ inQ qdom prio (F ko))))" - by (auto simp add: addToQs_def) - -lemma threadSet_valid_queues_addToQs: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update (addToQs F t) s)\ - threadSet F t - \\rv. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_set_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs split: if_split_asm) - done - -lemma threadSet_valid_queues_Qf: - "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko - \ t \ set (ksReadyQueues s (qdom, prio))) - \ valid_queues' (ksReadyQueues_update Qf s) - \ (\prio. set (Qf (ksReadyQueues s) prio) - \ set (addToQs F t (ksReadyQueues s) prio))\ - threadSet F t - \\rv. valid_queues'\" - apply (wp threadSet_valid_queues_addToQs) - apply (clarsimp simp: valid_queues'_def subset_iff) - done - -lemma addToQs_subset: - "set (qs p) \ set (addToQs F t qs p)" -by (clarsimp simp: addToQs_def split_def) - -lemmas threadSet_valid_queues' - = threadSet_valid_queues_Qf - [where Qf=id, simplified ksReadyQueues_update_id - id_apply addToQs_subset simp_thms] - lemma threadSet_cur: "\\s. cur_tcb' s\ threadSet f t \\rv s. cur_tcb' s\" apply (simp add: threadSet_def cur_tcb'_def) @@ -983,7 +1012,7 @@ lemma modifyReadyQueuesL1Bitmap_obj_at[wp]: crunches setThreadState, setBoundNotification for valid_arch' [wp]: valid_arch_state' - (simp: unless_def crunch_simps) + (simp: unless_def crunch_simps wp: crunch_wps) crunch ksInterrupt'[wp]: threadSet "\s. P (ksInterruptState s)" (wp: setObject_ksInterrupt updateObject_default_inv) @@ -1004,20 +1033,18 @@ lemma threadSet_obj_at'_really_strongest: "\\s. tcb_at' t s \ obj_at' (\obj. if t = t' then P (f obj) else P obj) t' s\ threadSet f t \\rv. obj_at' P t'\" apply (simp add: threadSet_def) - apply (rule hoare_wp_splits) - apply (rule setObject_tcb_strongest) - apply (simp only: imp_conv_disj) - apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) - apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) - apply simp - apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) - apply (rule getObject_inv_tcb) - apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply (wp setObject_tcb_strongest) + apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) + apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) apply simp - apply (simp add: objBits_simps') - apply (erule obj_at'_weakenE) - apply simp - apply (cases "t = t'", simp_all) + apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) + apply (rule getObject_inv_tcb) + apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply simp + apply (simp add: objBits_simps') + apply (erule obj_at'_weakenE) + apply simp + apply (cases "t = t'", simp_all) apply (rule OMG_getObject_tcb) apply wp done @@ -1241,57 +1268,103 @@ lemma threadSet_valid_dom_schedule': unfolding threadSet_def by (wp setObject_ksDomSchedule_inv hoare_Ball_helper) +lemma threadSet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (s\ksPSpace := (ksPSpace s)(t \ injectKO (f tcb))\)\ + threadSet f t + \\_. P\" + unfolding threadSet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (auto simp: obj_at'_def split: if_splits) + apply (erule rsubst[where P=P]) + apply (clarsimp simp: fun_upd_def) + apply (prop_tac "\ptr. psMap (ksPSpace s) ptr = ksPSpace s ptr") + apply fastforce + apply metis + done + +lemma threadSet_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb\ + \ threadSet F tcbPtr \\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst2[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_valid_sched_pointers: + "\\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb; \tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb; + \tcb. tcbQueued (F tcb) = tcbQueued tcb\ + \ threadSet F tcbPtr \valid_sched_pointers\" + unfolding valid_sched_pointers_def + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + by (fastforce simp: opt_pred_def opt_map_def obj_at'_def projectKOs split: option.splits if_splits) + +lemma threadSet_tcbSchedNexts_of: + "(\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb) \ + threadSet F t \\s. P (tcbSchedNexts_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_tcbSchedPrevs_of: + "(\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb) \ + threadSet F t \\s. P (tcbSchedPrevs_of s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_map_def obj_at'_def projectKOs) + done + +lemma threadSet_tcbQueued: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + threadSet F t \\s. P (tcbQueued |< tcbs_of' s)\" + apply (wpsimp wp: threadSet_wp getObject_tcb_wp) + apply (erule rsubst[where P=P]) + apply (fastforce simp: opt_pred_def opt_map_def obj_at'_def projectKOs) + done + +crunches threadSet + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueuesL1Bitmap[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ksReadyQueuesL2Bitmap[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + lemma threadSet_invs_trivialT: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" - shows - "\\s. invs' s \ - tcb_at' t s \ - (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ - (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ - ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ - (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (wp x w v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' valid_ioports_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a domains cteCaps_of_def |rule refl)+ - apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits + \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + shows "threadSet F t \invs'\" + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' valid_ioports_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbQueued + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of valid_bitmaps_lift + | clarsimp simp: assms cteCaps_of_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: assms obj_at'_def) lemmas threadSet_invs_trivial = threadSet_invs_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] @@ -1331,10 +1404,73 @@ lemma threadSet_valid_objs': apply (clarsimp elim!: obj_at'_weakenE) done +lemmas typ_at'_valid_tcb'_lift = + typ_at'_valid_obj'_lift[where obj="KOTCB tcb" for tcb, unfolded valid_obj'_def, simplified] + +lemmas setObject_valid_tcb' = typ_at'_valid_tcb'_lift[OF setObject_typ_at'] + +lemma setObject_valid_tcbs': + assumes preserve_valid_tcb': "\s s' ko ko' x n tcb tcb'. + \ (ko', s') \ fst (updateObject val ko ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, ko), n); + projectKO_opt ko = Some tcb; projectKO_opt ko' = Some tcb'; + valid_tcb' tcb s \ \ valid_tcb' tcb' s" + shows "\valid_tcbs' and P\ setObject ptr val \\rv. valid_tcbs'\" + unfolding valid_tcbs'_def + apply (clarsimp simp: valid_def) + apply (rename_tac s s' ptr' tcb) + apply (prop_tac "\tcb'. valid_tcb' tcb s \ valid_tcb' tcb s'") + apply clarsimp + apply (erule (1) use_valid[OF _ setObject_valid_tcb']) + apply (drule spec, erule mp) + apply (clarsimp simp: setObject_def in_monad split_def lookupAround2_char1) + apply (rename_tac s ptr' new_tcb' ptr'' old_tcb_ko' s' f) + apply (case_tac "ptr'' = ptr'"; clarsimp) + apply (prop_tac "\old_tcb' :: tcb. projectKO_opt old_tcb_ko' = Some old_tcb'") + apply (frule updateObject_type) + apply (case_tac old_tcb_ko'; clarsimp simp: project_inject) + apply (erule exE) + apply (rule preserve_valid_tcb', assumption+) + apply (simp add: prod_eqI lookupAround2_char1) + apply force + apply (clarsimp simp: project_inject) + apply (clarsimp simp: project_inject) + done + +lemma setObject_tcb_valid_tcbs': + "\valid_tcbs' and (tcb_at' t and valid_tcb' v)\ setObject t (v :: tcb) \\rv. valid_tcbs'\" + apply (rule setObject_valid_tcbs') + apply (clarsimp simp: updateObject_default_def in_monad project_inject) + done + +lemma threadSet_valid_tcb': + "\valid_tcb' tcb and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcb' tcb\" + apply (simp add: threadSet_def) + apply (wpsimp wp: setObject_valid_tcb') + done + +lemma threadSet_valid_tcbs': + "\valid_tcbs' and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\_. valid_tcbs'\" + apply (simp add: threadSet_def) + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (wpsimp wp: setObject_tcb_valid_tcbs') + apply (clarsimp simp: obj_at'_def valid_tcbs'_def projectKOs) + done + +lemma asUser_valid_tcbs'[wp]: + "asUser t f \valid_tcbs'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_valid_tcbs' hoare_drop_imps + simp: valid_tcb'_def tcb_cte_cases_def objBits_simps') + done + lemma asUser_corres': assumes y: "corres_underlying Id False True r \ \ f g" - shows "corres r (tcb_at t) - (tcb_at' t) + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ (as_user t f) (asUser t g)" proof - note arch_tcb_context_get_def[simp] @@ -1342,7 +1478,7 @@ lemma asUser_corres': note arch_tcb_context_set_def[simp] note atcbContextSet_def[simp] have L1: "corres (\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con) - (tcb_at t) (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) \ (gets_the (get_tcb t)) (threadGet (atcbContextGet o tcbArch) t)" apply (rule corres_guard_imp) apply (rule corres_gets_the) @@ -1368,6 +1504,8 @@ lemma asUser_corres': using y by (fastforce simp: corres_underlying_def select_f_def split_def Id_def) show ?thesis + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce elim: tcb_at_cross) apply (simp add: as_user_def asUser_def) apply (rule corres_guard_imp) apply (rule_tac r'="\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con" @@ -1391,7 +1529,7 @@ qed lemma asUser_corres: assumes y: "corres_underlying Id False True r \ \ f g" - shows "corres r (tcb_at t and invs) (tcb_at' t and invs') (as_user t f) (asUser t g)" + shows "corres r (tcb_at t and invs) invs' (as_user t f) (asUser t g)" apply (rule corres_guard_imp) apply (rule asUser_corres' [OF y]) apply (simp add: invs_def valid_state_def valid_pspace_def) @@ -1419,17 +1557,15 @@ proof - qed lemma asUser_getRegister_corres: - "corres (=) (tcb_at t) (tcb_at' t) - (as_user t (getRegister r)) (asUser t (getRegister r))" + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (getRegister r)) (asUser t (getRegister r))" apply (rule asUser_corres') apply (clarsimp simp: getRegister_def) done lemma user_getreg_inv'[wp]: "\P\ asUser t (getRegister r) \\x. P\" - apply (rule asUser_inv) - apply (simp_all add: getRegister_def) - done + by (wp asUser_inv) lemma asUser_typ_at' [wp]: "\\s. P (typ_at' T p s)\ asUser t' f \\rv s. P (typ_at' T p s)\" @@ -1441,7 +1577,6 @@ lemma asUser_invs[wp]: "\invs' and tcb_at' t\ asUser t m \\rv. invs'\" apply (simp add: asUser_def split_def) apply (wp hoare_drop_imps | simp)+ - apply (wp threadSet_invs_trivial hoare_drop_imps | simp)+ done @@ -1469,14 +1604,6 @@ lemma asUser_valid_pspace'[wp]: apply (wp threadSet_valid_pspace' hoare_drop_imps | simp)+ done -lemma asUser_valid_queues[wp]: - "\Invariants_H.valid_queues\ asUser t m \\rv. Invariants_H.valid_queues\" - apply (simp add: asUser_def split_def) - apply (wp hoare_drop_imps | simp)+ - - apply (wp threadSet_valid_queues hoare_drop_imps | simp)+ - done - lemma asUser_ifunsafe'[wp]: "\if_unsafe_then_cap'\ asUser t m \\rv. if_unsafe_then_cap'\" apply (simp add: asUser_def split_def) @@ -1563,14 +1690,13 @@ lemma no_fail_asUser [wp]: "no_fail \ f \ no_fail (tcb_at' t) (asUser t f)" apply (simp add: asUser_def split_def) apply wp - apply (simp add: no_fail_def) - apply (wp hoare_drop_imps) - apply simp + apply (simp add: no_fail_def) + apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+ done lemma asUser_setRegister_corres: - "corres dc (tcb_at t) - (tcb_at' t) + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ (as_user t (setRegister r v)) (asUser t (setRegister r v))" apply (simp add: setRegister_def) @@ -1579,7 +1705,7 @@ lemma asUser_setRegister_corres: done lemma getThreadState_corres: - "corres thread_state_relation (tcb_at t) (tcb_at' t) + "corres thread_state_relation (tcb_at t and pspace_aligned and pspace_distinct) \ (get_thread_state t) (getThreadState t)" apply (simp add: get_thread_state_def getThreadState_def) apply (rule threadGet_corres) @@ -1610,7 +1736,7 @@ lemma gts_inv'[wp]: "\P\ getThreadState t \\rv. by (simp add: getThreadState_def) wp lemma getBoundNotification_corres: - "corres (=) (tcb_at t) (tcb_at' t) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ (get_bound_notification t) (getBoundNotification t)" apply (simp add: get_bound_notification_def getBoundNotification_def) apply (rule threadGet_corres) @@ -1755,19 +1881,22 @@ lemma ethreadget_corres: apply (simp add: x) done -lemma setQueue_corres: - "corres dc \ \ (set_tcb_queue d p q) (setQueue d p q)" - apply (rule corres_no_failI) - apply wp - apply (clarsimp simp: setQueue_def in_monad set_tcb_queue_def return_def simpler_modify_def) - apply (fastforce simp: state_relation_def ready_queues_relation_def) - done - - -lemma getQueue_corres: "corres (=) \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" - apply (clarsimp simp add: getQueue_def state_relation_def ready_queues_relation_def get_tcb_queue_def gets_def) - apply (fold gets_def) - apply simp +lemma getQueue_corres: + "corres (\ls q. (ls = [] \ tcbQueueEmpty q) \ (ls \ [] \ tcbQueueHead q = Some (hd ls)) + \ queue_end_valid ls q) + \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" + apply (clarsimp simp: get_tcb_queue_def getQueue_def tcbQueueEmpty_def) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]) + apply (rule corres_symb_exec_r[OF _ gets_sp]) + apply clarsimp + apply (drule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def + list_queue_relation_def) + apply (drule_tac x=qdom in spec) + apply (drule_tac x=prio in spec) + apply (fastforce dest: heap_path_head) + apply wpsimp+ done lemma no_fail_return: @@ -1782,8 +1911,8 @@ lemma addToBitmap_noop_corres: (wp | simp add: state_relation_def | rule no_fail_pre)+ lemma addToBitmap_if_null_noop_corres: (* used this way in Haskell code *) - "corres dc \ \ (return ()) (if null queue then addToBitmap d p else return ())" - by (cases "null queue", simp_all add: addToBitmap_noop_corres) + "corres dc \ \ (return ()) (if tcbQueueEmpty queue then addToBitmap d p else return ())" + by (cases "tcbQueueHead queue", simp_all add: addToBitmap_noop_corres) lemma removeFromBitmap_corres_noop: "corres dc \ \ (return ()) (removeFromBitmap tdom prioa)" @@ -1800,54 +1929,701 @@ crunch typ_at'[wp]: removeFromBitmap "\s. P (typ_at' T p s)" lemmas addToBitmap_typ_ats [wp] = typ_at_lifts [OF addToBitmap_typ_at'] lemmas removeFromBitmap_typ_ats [wp] = typ_at_lifts [OF removeFromBitmap_typ_at'] +lemma ekheap_relation_tcb_domain_priority: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s t = Some (tcb); + ksPSpace s' t = Some (KOTCB tcb')\ + \ tcbDomain tcb' = tcb_domain tcb \ tcbPriority tcb' = tcb_priority tcb" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=t in bspec, blast) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def) + done + +lemma no_fail_thread_get[wp]: + "no_fail (tcb_at tcb_ptr) (thread_get f tcb_ptr)" + unfolding thread_get_def + apply wpsimp + apply (clarsimp simp: tcb_at_def) + done + +lemma pspace_relation_tcb_relation: + "\pspace_relation (kheap s) (ksPSpace s'); kheap s ptr = Some (TCB tcb); + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ tcb_relation tcb tcb'" + apply (clarsimp simp: pspace_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: tcb_relation_cut_def obj_at_def obj_at'_def) + done + +lemma pspace_relation_update_concrete_tcb: + "\pspace_relation s s'; s ptr = Some (TCB tcb); s' ptr = Some (KOTCB otcb'); + tcb_relation tcb tcb'\ + \ pspace_relation s (s'(ptr \ KOTCB tcb'))" + by (fastforce dest: pspace_relation_update_tcbs simp: map_upd_triv) + +lemma threadSet_pspace_relation: + fixes s :: det_state + assumes tcb_rel: "(\tcb tcb'. tcb_relation tcb tcb' \ tcb_relation tcb (F tcb'))" + shows "threadSet F tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply normalise_obj_at' + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply (clarsimp simp: obj_at_def is_tcb_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule pspace_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def projectKOs) + apply (frule (1) pspace_relation_tcb_relation) + apply (fastforce simp: obj_at'_def projectKOs) + apply (fastforce dest!: tcb_rel) + done + +lemma ekheap_relation_update_tcbs: + "\ ekheap_relation (ekheap s) (ksPSpace s'); ekheap s x = Some oetcb; + ksPSpace s' x = Some (KOTCB otcb'); etcb_relation etcb tcb' \ + \ ekheap_relation ((ekheap s)(x \ etcb)) ((ksPSpace s')(x \ KOTCB tcb'))" + by (simp add: ekheap_relation_def) + +lemma ekheap_relation_update_concrete_tcb: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB otcb'); + etcb_relation etcb tcb'\ + \ ekheap_relation (ekheap s) ((ksPSpace s')(ptr \ KOTCB tcb'))" + by (fastforce dest: ekheap_relation_update_tcbs simp: map_upd_triv) + +lemma ekheap_relation_etcb_relation: + "\ekheap_relation (ekheap s) (ksPSpace s'); ekheap s ptr = Some etcb; + ksPSpace s' ptr = Some (KOTCB tcb')\ + \ etcb_relation etcb tcb'" + apply (clarsimp simp: ekheap_relation_def) + apply (drule_tac x=ptr in bspec) + apply (fastforce simp: obj_at_def) + apply (clarsimp simp: obj_at_def obj_at'_def) + done + +lemma threadSet_ekheap_relation: + fixes s :: det_state + assumes etcb_rel: "(\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation etcb (F tcb'))" + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet F tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + supply fun_upd_apply[simp del] + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (frule tcb_at'_cross) + apply (fastforce simp: obj_at'_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_tcb_def is_etcb_at_def) + apply (rename_tac ko, case_tac ko; clarsimp) + apply (rule ekheap_relation_update_concrete_tcb) + apply fastforce + apply fastforce + apply (fastforce simp: obj_at'_def projectKOs) + apply (frule (1) ekheap_relation_etcb_relation) + apply (fastforce simp: obj_at'_def projectKOs) + apply (fastforce dest!: etcb_rel) + done + +lemma tcbQueued_update_pspace_relation[wp]: + fixes s :: det_state + shows "threadSet (tcbQueued_update f) tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueued_update_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + threadSet (tcbQueued_update f) tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + by (wpsimp wp: threadSet_ekheap_relation simp: etcb_relation_def) + +lemma tcbQueueRemove_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueRemove queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueRemove_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueRemove queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_ekheap_relation threadSet_pspace_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma threadSet_ghost_relation[wp]: + "threadSet f tcbPtr \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + unfolding threadSet_def setObject_def updateObject_default_def + apply (wpsimp wp: getObject_tcb_wp simp: updateObject_default_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma removeFromBitmap_ghost_relation[wp]: + "removeFromBitmap tdom prio \\s'. ghost_relation (kheap s) (gsUserPages s') (gsCNodes s')\" + by (rule_tac f=gsUserPages in hoare_lift_Pf2; wpsimp simp: bitmap_fun_defs) + +lemma tcbQueued_update_ctes_of[wp]: + "threadSet (tcbQueued_update f) t \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_of) + +lemma removeFromBitmap_ctes_of[wp]: + "removeFromBitmap tdom prio \\s. P (ctes_of s)\" + by (wpsimp simp: bitmap_fun_defs) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for ghost_relation_projs[wp]: "\s. P (gsUserPages s) (gsCNodes s)" + and ksArchState[wp]: "\s. P (ksArchState s)" + and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" + and ksDomainTime[wp]: "\s. P (ksDomainTime s)" + (wp: crunch_wps getObject_tcb_wp simp: setObject_def updateObject_default_def obj_at'_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, + setQueue, removeFromBitmap + for tcb_at'[wp]: "\s. tcb_at' tcbPtr s" + (wp: crunch_wps ignore: threadSet) + +lemma set_tcb_queue_projs: + "set_tcb_queue d p queue + \\s. P (kheap s) (cdt s) (is_original_cap s) (cur_thread s) (idle_thread s) (scheduler_action s) + (domain_list s) (domain_index s) (cur_domain s) (domain_time s) (machine_state s) + (interrupt_irq_node s) (interrupt_states s) (arch_state s) (caps_of_state s) + (work_units_completed s) (cdt_list s) (ekheap s)\" + by (wpsimp simp: set_tcb_queue_def) + +lemma set_tcb_queue_cte_at: + "set_tcb_queue d p queue \\s. P (swp cte_at s)\" + unfolding set_tcb_queue_def + apply wpsimp + apply (clarsimp simp: swp_def cte_wp_at_def) + done + +lemma set_tcb_queue_projs_inv: + "fst (set_tcb_queue d p queue s) = {(r, s')} \ + kheap s = kheap s' + \ ekheap s = ekheap s' + \ cdt s = cdt s' + \ is_original_cap s = is_original_cap s' + \ cur_thread s = cur_thread s' + \ idle_thread s = idle_thread s' + \ scheduler_action s = scheduler_action s' + \ domain_list s = domain_list s' + \ domain_index s = domain_index s' + \ cur_domain s = cur_domain s' + \ domain_time s = domain_time s' + \ machine_state s = machine_state s' + \ interrupt_irq_node s = interrupt_irq_node s' + \ interrupt_states s = interrupt_states s' + \ arch_state s = arch_state s' + \ caps_of_state s = caps_of_state s' + \ work_units_completed s = work_units_completed s' + \ cdt_list s = cdt_list s' + \ swp cte_at s = swp cte_at s'" + apply (drule singleton_eqD) + by (auto elim!: use_valid_inv[where E=\, simplified] + intro: set_tcb_queue_projs set_tcb_queue_cte_at) + +lemma set_tcb_queue_new_state: + "(rv, t) \ fst (set_tcb_queue d p queue s) \ + t = s\ready_queues := \dom prio. if dom = d \ prio = p then queue else ready_queues s dom prio\" + by (clarsimp simp: set_tcb_queue_def in_monad) + +lemma tcbQueuePrepend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueuePrepend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueuePrepend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueuePrepend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueAppend_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueAppend queue tcbPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation simp: tcb_relation_def) + +lemma tcbQueueAppend_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueAppend queue tcbPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueAppend_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation + simp: tcb_relation_def etcb_relation_def) + +lemma tcbQueueInsert_pspace_relation[wp]: + fixes s :: det_state + shows "tcbQueueInsert tcbPtr afterPtr \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation hoare_drop_imps simp: tcb_relation_def) + +lemma tcbQueueInsert_ekheap_relation[wp]: + fixes s :: det_state + shows + "\\s'. ekheap_relation (ekheap s) (ksPSpace s') \ pspace_relation (kheap s) (ksPSpace s') + \ valid_etcbs s\ + tcbQueueInsert tcbPtr afterPtr + \\_ s'. ekheap_relation (ekheap s) (ksPSpace s')\" + unfolding tcbQueueInsert_def + by (wpsimp wp: threadSet_pspace_relation threadSet_ekheap_relation hoare_drop_imps + simp: tcb_relation_def etcb_relation_def) + +lemma removeFromBitmap_pspace_relation[wp]: + fixes s :: det_state + shows "removeFromBitmap tdom prio \\s'. pspace_relation (kheap s) (ksPSpace s')\" + unfolding bitmap_fun_defs + by wpsimp + +crunches setQueue, removeFromBitmap + for valid_pspace'[wp]: valid_pspace' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node'[wp]: "\s. P (irq_node' s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + and valid_irq_states'[wp]: valid_irq_states' + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and valid_machine_state'[wp]: valid_machine_state' + and cur_tcb'[wp]: cur_tcb' + and ksPSpace[wp]: "\s. P (ksPSpace s)" + (wp: crunch_wps + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def cur_tcb'_def threadSet_cur + bitmap_fun_defs valid_machine_state'_def) + +crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue, setQueue + for pspace_aligned'[wp]: pspace_aligned' + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and pspace_distinct'[wp]: pspace_distinct' + and pspace_canonical'[wp]: pspace_canonical' + and no_0_obj'[wp]: no_0_obj' + and ksSchedulerAction[wp]: "\s. P (ksSchedulerAction s)" + and valid_global_refs'[wp]: valid_global_refs' + and valid_arch_state'[wp]: valid_arch_state' + and irq_node[wp]: "\s. P (irq_node' s)" + and typ_at[wp]: "\s. P (typ_at' T p s)" + and interrupt_state[wp]: "\s. P (ksInterruptState s)" + and valid_irq_state'[wp]: valid_irq_states' + and pspace_domain_valid[wp]: pspace_domain_valid + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + and pspace_in_kernel_mappings'[wp]: pspace_in_kernel_mappings' + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + (wp: crunch_wps threadSet_state_refs_of'[where f'=id and g'=id] + simp: crunch_simps tcb_cte_cases_def tcb_bound_refs'_def bitmap_fun_defs) + +lemma threadSet_ready_queues_relation: + "(\tcb. tcbQueued (F tcb) = tcbQueued tcb) \ + \\s'. ready_queues_relation s s' \ \ (tcbQueued |< tcbs_of' s') tcbPtr\ + threadSet F tcbPtr + \\_ s'. ready_queues_relation s s'\" + supply fun_upd_apply[simp del] + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: list_queue_relation_def obj_at'_def projectKOs) + apply (rename_tac tcb' d p) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce intro: heap_path_heap_upd_not_in + simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + apply (rule conjI) + apply (drule_tac x=tcbPtr in spec) + apply (clarsimp simp: prev_queue_head_def) + apply (prop_tac "ready_queues s d p \ []", fastforce) + apply (fastforce dest: heap_path_head simp: inQ_def opt_pred_def opt_map_def fun_upd_apply) + apply (auto simp: inQ_def opt_pred_def opt_map_def fun_upd_apply projectKOs split: option.splits) + done + +definition in_correct_ready_q_2 where + "in_correct_ready_q_2 queues ekh \ + \d p. \t \ set (queues d p). is_etcb_at' t ekh + \ etcb_at' (\t. tcb_priority t = p \ tcb_domain t = d) t ekh" + +abbreviation in_correct_ready_q :: "det_ext state \ bool" where + "in_correct_ready_q s \ in_correct_ready_q_2 (ready_queues s) (ekheap s)" + +lemmas in_correct_ready_q_def = in_correct_ready_q_2_def + +lemma in_correct_ready_q_lift: + assumes c: "\P. \\s. P (ekheap s)\ f \\rv s. P (ekheap s)\" + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \in_correct_ready_q\" + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +definition ready_qs_distinct :: "det_ext state \ bool" where + "ready_qs_distinct s \ \d p. distinct (ready_queues s d p)" + +lemma ready_qs_distinct_lift: + assumes r: "\P. f \\s. P (ready_queues s)\" + shows "f \ready_qs_distinct\" + unfolding ready_qs_distinct_def + apply (rule hoare_pre) + apply (wps assms | wpsimp)+ + done + +lemma ready_queues_disjoint: + "\in_correct_ready_q s; ready_qs_distinct s; d \ d' \ p \ p'\ + \ set (ready_queues s d p) \ set (ready_queues s d' p') = {}" + apply (clarsimp simp: ready_qs_distinct_def in_correct_ready_q_def) + apply (rule disjointI) + apply (frule_tac x=d in spec) + apply (drule_tac x=d' in spec) + apply (fastforce simp: etcb_at_def is_etcb_at_def split: option.splits) + done + +lemma isRunnable_sp: + "\P\ + isRunnable tcb_ptr + \\rv s. \tcb'. ko_at' tcb' tcb_ptr s + \ (rv = (tcbState tcb' = Running \ tcbState tcb' = Restart)) + \ P s\" + unfolding isRunnable_def getThreadState_def + apply (wpsimp wp: hoare_case_option_wp getObject_tcb_wp simp: threadGet_def) + apply (fastforce simp: obj_at'_def split: Structures_H.thread_state.splits) + done + +crunch (no_fail) no_fail[wp]: isRunnable + +defs ksReadyQueues_asrt_def: + "ksReadyQueues_asrt + \ \s'. \d p. \ts. ready_queue_relation d p ts (ksReadyQueues s' (d, p)) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s') + (inQ d p |< tcbs_of' s')" + +lemma ksReadyQueues_asrt_cross: + "ready_queues_relation s s' \ ksReadyQueues_asrt s'" + by (fastforce simp: ready_queues_relation_def Let_def ksReadyQueues_asrt_def) + +crunches addToBitmap + for ko_at'[wp]: "\s. P (ko_at' ko ptr s)" + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + and ksReadyQueues_asrt[wp]: ksReadyQueues_asrt + and st_tcb_at'[wp]: "\s. P (st_tcb_at' Q tcbPtr s)" + and valid_tcbs'[wp]: valid_tcbs' + (simp: bitmap_fun_defs ksReadyQueues_asrt_def) + +lemma tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueHead queue))" + by (fastforce dest: heap_path_head + simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueHead_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueHead queue)) s'" + by (fastforce dest!: tcbQueueHead_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma tcbQueueHead_iff_tcbQueueEnd: + "list_queue_relation ts q nexts prevs \ tcbQueueHead q \ None \ tcbQueueEnd q \ None" + apply (clarsimp simp: list_queue_relation_def queue_end_valid_def) + using heap_path_None + apply fastforce + done + +lemma tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts\ + \ \ tcbQueueEmpty queue \ (inQ d p |< tcbs_of' s') (the (tcbQueueEnd queue))" + apply (frule tcbQueueHead_iff_tcbQueueEnd) + by (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def) + +lemma obj_at'_tcbQueueEnd_ksReadyQueues: + "\list_queue_relation ts queue nexts prevs; + \t. (inQ d p |< tcbs_of' s') t \ t \ set ts; + pspace_aligned' s'; pspace_distinct' s'\ + \ \ tcbQueueEmpty queue \ obj_at' (inQ d p) (the (tcbQueueEnd queue)) s'" + by (fastforce dest!: tcbQueueEnd_ksReadyQueues intro: aligned'_distinct'_ko_wp_at'I + simp: obj_at'_real_def opt_map_def opt_pred_def split: option.splits) + +lemma thread_get_exs_valid[wp]: + "tcb_at tcb_ptr s \ \(=) s\ thread_get f tcb_ptr \\\_. (=) s\" + by (clarsimp simp: thread_get_def get_tcb_def gets_the_def gets_def return_def get_def + exs_valid_def tcb_at_def bind_def) + +lemma ethread_get_sp: + "\P\ ethread_get f ptr + \\rv. etcb_at (\tcb. f tcb = rv) ptr and P\" + apply wpsimp + apply (clarsimp simp: etcb_at_def split: option.splits) + done + +lemma ethread_get_exs_valid[wp]: + "\tcb_at tcb_ptr s; valid_etcbs s\ \ \(=) s\ ethread_get f tcb_ptr \\\_. (=) s\" + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: ethread_get_def get_etcb_def gets_the_def gets_def return_def get_def + is_etcb_at_def exs_valid_def bind_def) + done + +lemma no_fail_ethread_get[wp]: + "no_fail (tcb_at tcb_ptr and valid_etcbs) (ethread_get f tcb_ptr)" + unfolding ethread_get_def + apply wpsimp + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: is_etcb_at_def get_etcb_def) + done + +lemma threadGet_sp: + "\P\ threadGet f ptr \\rv s. \tcb :: tcb. ko_at' tcb ptr s \ f tcb = rv \ P s\" + unfolding threadGet_def setObject_def + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma in_set_ready_queues_inQ_eq: + "ready_queues_relation s s' \ t \ set (ready_queues s d p) \ (inQ d p |< tcbs_of' s') t" + by (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + +lemma in_ready_q_tcbQueued_eq: + "ready_queues_relation s s' + \ (\d p. t \ set (ready_queues s d p)) \ (tcbQueued |< tcbs_of' s') t" + apply (intro iffI) + apply clarsimp + apply (frule in_set_ready_queues_inQ_eq) + apply (fastforce simp: inQ_def opt_map_def opt_pred_def split: option.splits) + apply (fastforce simp: ready_queues_relation_def ready_queue_relation_def Let_def inQ_def + opt_pred_def + split: option.splits) + done + lemma tcbSchedEnqueue_corres: - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues and valid_queues') - (tcb_sched_action (tcb_sched_enqueue) t) (tcbSchedEnqueue t)" -proof - - have ready_queues_helper: - "\t tcb a b. \ ekheap a t = Some tcb; obj_at' tcbQueued t b ; valid_queues' b ; - ekheap_relation (ekheap a) (ksPSpace b) \ - \ t \ set (ksReadyQueues b (tcb_domain tcb, tcb_priority tcb))" - unfolding valid_queues'_def - by (fastforce dest: ekheap_relation_absD simp: obj_at'_def inQ_def etcb_relation_def projectKO_eq projectKO_tcb) - - show ?thesis unfolding tcbSchedEnqueue_def tcb_sched_action_def - apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, - where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at'; simp_all) - apply (rule no_fail_pre, wp, blast) - apply (case_tac queued; simp_all) - apply (rule corres_no_failI; simp add: no_fail_return) - apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc - assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def - set_tcb_queue_def simpler_modify_def ready_queues_relation_def - state_relation_def tcb_sched_enqueue_def) - apply (rule ready_queues_helper; auto) - apply (clarsimp simp: when_def) - apply (rule stronger_corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres) - apply (simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply simp - apply (rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_enqueue_def split del: if_split) - apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) - apply simp - apply (rule setQueue_corres[unfolded dc_def]) - apply (rule corres_split_noop_rhs2) - apply (fastforce intro: addToBitmap_noop_corres) - apply (fastforce intro: threadSet_corres_noop simp: tcb_relation_def exst_same_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def - projectKO_eq project_inject) - done -qed + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and st_tcb_at runnable tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs') + (tcb_sched_action tcb_sched_enqueue tcb_ptr) (tcbSchedEnqueue tcbPtr)" + supply if_split[split del] + heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + apply (rule_tac Q'="st_tcb_at' runnable' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: st_tcb_at_runnable_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q="tcb_at tcb_ptr" in corres_cross_add_abs_guard) + apply (fastforce dest: st_tcb_at_tcb_at) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (clarsimp simp: tcb_sched_action_def tcb_sched_enqueue_def get_tcb_queue_def + tcbSchedEnqueue_def getQueue_def unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac domain) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; (solves wpsimp)?) + apply (rename_tac priority) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ isRunnable_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) + apply wpsimp + apply (fastforce simp: st_tcb_at'_def runnable_eq_active' obj_at'_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (subst if_distrib[where f="set_tcb_queue domain prio" for domain prio]) + apply (rule corres_if_strong') + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + subgoal + by (fastforce dest: tcb_at_ekheap_dom pred_tcb_at_tcb_at + simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def projectKOs) + apply (find_goal \match conclusion in "corres _ _ _ _ (return ())" \ \-\\) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (clarsimp simp: set_tcb_queue_def) + apply (rule monadic_rewrite_guard_imp) + apply (rule monadic_rewrite_modify_noop) + apply (prop_tac "(\d p. if d = domain \ p = priority + then ready_queues s domain priority + else ready_queues s d p) + = ready_queues s") + apply (fastforce split: if_splits) + apply fastforce + apply clarsimp + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; (solves wpsimp)?) + + \ \break off the addToBitmap\ + apply (rule corres_add_noop_lhs) + apply (rule corres_underlying_split[rotated 2, + where Q="\_. P" and P=P and Q'="\_. P'" and P'=P' for P P']) + apply wpsimp + apply (wpsimp wp: hoare_vcg_if_lift hoare_vcg_ex_lift) + apply (corres corres: addToBitmap_if_null_noop_corres) + + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp simp: tcbQueuePrepend_def wp: hoare_vcg_if_lift2 | drule Some_to_the)+ + apply (clarsimp simp: ex_abs_underlying_def split: if_splits) + apply (frule state_relation_ready_queues_relation) + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + subgoal by (force dest!: obj_at'_tcbQueueHead_ksReadyQueues simp: obj_at'_def projectKOs) + + apply (rename_tac s rv t) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp simp: setQueue_def tcbQueuePrepend_def) + apply normalise_obj_at' + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def projectKOs) + apply (clarsimp split: if_splits) + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_nil) + apply (force dest!: spec simp: list_queue_relation_def) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" and s'=s' + in obj_at'_tcbQueueEnd_ksReadyQueues) + apply fast + apply auto[1] + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" and st="tcbQueueHead (ksReadyQueues s' (d, p))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (cut_tac xs="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + and st="tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))" + in heap_path_head') + apply (auto dest: spec simp: list_queue_relation_def tcbQueueEmpty_def)[1] + apply (clarsimp simp: list_queue_relation_def) + + apply (case_tac "\ (d = tcb_domain etcb \ p = tcb_priority etcb)") + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply (clarsimp simp: obj_at'_def opt_pred_def opt_map_def) + apply (metis inQ_def option.simps(5) tcb_of'_TCB projectKO_eq) + apply (intro conjI impI; simp) + + \ \the ready queue was originally empty\ + apply (rule heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (rule prev_queue_head_heap_upd) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + clarsimp simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply obj_at'_def split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \the ready queue was not originally empty\ + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (prop_tac "the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb))) + \ set (ready_queues s d p)") + apply (erule orthD2) + apply (clarsimp simp: tcbQueueEmpty_def) + apply (intro conjI impI allI) + apply (intro heap_path_heap_upd_not_in) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply simp + apply fastforce + apply (clarsimp simp: queue_end_valid_def fun_upd_apply split: if_splits) + apply (intro prev_queue_head_heap_upd) + apply (force simp: fun_upd_apply split: if_splits) + apply (case_tac "ready_queues s d p"; + force simp: fun_upd_apply tcbQueueEmpty_def split: if_splits) + apply (clarsimp simp: fun_upd_apply inQ_def split: if_splits) + apply (case_tac "ready_queues s d p"; force simp: tcbQueueEmpty_def) + apply (case_tac "t = tcbPtr") + apply (clarsimp simp: inQ_def fun_upd_apply obj_at'_def projectKOs split: if_splits) + apply (case_tac "t = the (tcbQueueHead (ksReadyQueues s' (tcb_domain etcb, tcb_priority etcb)))") + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def obj_at'_def fun_upd_apply projectKOs + split: option.splits) + apply metis + apply (clarsimp simp: inQ_def in_opt_pred opt_map_def fun_upd_apply) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + + \ \d = tcb_domain etcb \ p = tcb_priority etcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (cut_tac ts="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in tcbQueueHead_iff_tcbQueueEnd) + apply (force simp: list_queue_relation_def) + apply (frule valid_tcbs'_maxDomain[where t=tcbPtr], simp add: obj_at'_def projectKOs) + apply (frule valid_tcbs'_maxPriority[where t=tcbPtr], simp add: obj_at'_def projectKOs) + apply (drule valid_sched_pointersD[where t=tcbPtr]) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def projectKOs) + apply (clarsimp simp: in_opt_pred opt_map_red obj_at'_def projectKOs) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue was originally empty\ + apply (force simp: inQ_def in_opt_pred fun_upd_apply queue_end_valid_def prev_queue_head_def + opt_map_red obj_at'_def + split: if_splits) + + \ \the ready queue was not originally empty\ + apply (drule (2) heap_ls_prepend[where new=tcbPtr]) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply) + apply (rule conjI) + apply (subst opt_map_upd_triv) + apply (clarsimp simp: opt_map_def obj_at'_def fun_upd_apply split: if_splits) + apply (clarsimp simp: fun_upd_apply split: if_splits) + apply (rule conjI) + apply (clarsimp simp: fun_upd_apply queue_end_valid_def) + apply (rule conjI) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def split: if_splits) + by (auto dest!: hd_in_set simp: inQ_def in_opt_pred opt_map_def fun_upd_apply projectKOs + split: if_splits option.splits) definition weak_sch_act_wf :: "scheduler_action \ kernel_state \ bool" @@ -1874,7 +2650,11 @@ lemma getSchedulerAction_corres: done lemma rescheduleRequired_corres: - "corres dc (weak_valid_sched_action and valid_etcbs) (Invariants_H.valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + "corres dc + (weak_valid_sched_action and in_correct_ready_q and ready_qs_distinct and valid_etcbs + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_sched_pointers and valid_tcbs' + and pspace_aligned' and pspace_distinct') (reschedule_required) rescheduleRequired" apply (simp add: rescheduleRequired_def reschedule_required_def) apply (rule corres_guard_imp) @@ -1885,7 +2665,7 @@ lemma rescheduleRequired_corres: apply (case_tac action) apply simp apply simp - apply (rule tcbSchedEnqueue_corres) + apply (rule tcbSchedEnqueue_corres, simp) apply simp apply (rule setSchedulerAction_corres) apply simp @@ -1961,20 +2741,13 @@ lemmas addToBitmap_weak_sch_act_wf[wp] = weak_sch_act_wf_lift[OF addToBitmap_nosch] crunch st_tcb_at'[wp]: removeFromBitmap "st_tcb_at' P t" -crunch pred_tcb_at'[wp]: removeFromBitmap "pred_tcb_at' proj P t" - -crunch not_st_tcb_at'[wp]: removeFromBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: removeFromBitmap "\s. \ (pred_tcb_at' proj P' t) s" +crunch pred_tcb_at'[wp]: removeFromBitmap "\s. Q (pred_tcb_at' proj P t s)" -crunch st_tcb_at'[wp]: addToBitmap "st_tcb_at' P' t" -crunch pred_tcb_at'[wp]: addToBitmap "pred_tcb_at' proj P' t" +crunch pred_tcb_at'[wp]: addToBitmap "\s. Q (pred_tcb_at' proj P t s)" -crunch not_st_tcb_at'[wp]: addToBitmap "\s. \ (st_tcb_at' P' t) s" -crunch not_pred_tcb_at'[wp]: addToBitmap "\s. \ (pred_tcb_at' proj P' t) s" +crunch obj_at'[wp]: removeFromBitmap "\s. Q (obj_at' P t s)" -crunch obj_at'[wp]: removeFromBitmap "obj_at' P t" - -crunch obj_at'[wp]: addToBitmap "obj_at' P t" +crunch obj_at'[wp]: addToBitmap "\s. Q (obj_at' P t s)" lemma removeFromBitmap_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t\ removeFromBitmap tdom prio \\ya. tcb_in_cur_domain' t\" @@ -1991,9 +2764,11 @@ lemma addToBitmap_tcb_in_cur_domain'[wp]: done lemma tcbSchedDequeue_weak_sch_act_wf[wp]: - "\ \s. weak_sch_act_wf (ksSchedulerAction s) s \ tcbSchedDequeue a \ \_ s. weak_sch_act_wf (ksSchedulerAction s) s \" - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_weak_sch_act_wf removeFromBitmap_weak_sch_act_wf | simp add: crunch_simps)+ + "tcbSchedDequeue tcbPtr \\s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wp threadSet_weak_sch_act_wf getObject_tcb_wp removeFromBitmap_weak_sch_act_wf + | simp add: crunch_simps threadGet_def)+ + apply (clarsimp simp: obj_at'_def) done lemma dequeue_nothing_eq[simp]: @@ -2009,44 +2784,343 @@ lemma gets_the_exec: "f s \ None \ (do x \ ge return_def assert_opt_def) done +lemma tcbQueueRemove_no_fail: + "no_fail (\s. tcb_at' tcbPtr s + \ (\ts. list_queue_relation ts queue (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ sym_heap_sched_pointers s \ valid_objs' s) + (tcbQueueRemove queue tcbPtr)" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (frule (1) ko_at_valid_objs') + apply (fastforce simp: projectKOs) + apply (clarsimp simp: list_queue_relation_def) + apply (prop_tac "tcbQueueHead queue \ Some tcbPtr \ tcbSchedPrevs_of s tcbPtr \ None") + apply (rule impI) + apply (frule not_head_prev_not_None[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (fastforce dest: heap_path_head) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def valid_tcb'_def valid_bound_tcb'_def) + by (fastforce dest!: not_last_next_not_None[where p=tcbPtr] + simp: queue_end_valid_def opt_map_def obj_at'_def valid_obj'_def valid_tcb'_def + projectKOs) + +crunch (no_fail) no_fail[wp]: removeFromBitmap + +crunches removeFromBitmap + for ready_queues_relation[wp]: "ready_queues_relation s" + and list_queue_relation[wp]: + "\s'. list_queue_relation ts (P (ksReadyQueues s')) + (tcbSchedNexts_of s') (tcbSchedPrevs_of s')" + (simp: bitmap_fun_defs ready_queues_relation_def) + +\ \ + A direct analogue of tcbQueueRemove, used in tcb_sched_dequeue' below, so that within the proof of + tcbQueueRemove_corres, we may reason in terms of the list operations used within this function + rather than @{term filter}.\ +definition tcb_queue_remove :: "'a \ 'a list \ 'a list" where + "tcb_queue_remove a ls \ + if ls = [a] + then [] + else if a = hd ls + then tl ls + else if a = last ls + then butlast ls + else list_remove ls a" + +definition tcb_sched_dequeue' :: "obj_ref \ unit det_ext_monad" where + "tcb_sched_dequeue' tcb_ptr \ do + d \ ethread_get tcb_domain tcb_ptr; + prio \ ethread_get tcb_priority tcb_ptr; + queue \ get_tcb_queue d prio; + when (tcb_ptr \ set queue) $ set_tcb_queue d prio (tcb_queue_remove tcb_ptr queue) + od" + +lemma filter_tcb_queue_remove: + "\a \ set ls; distinct ls \ \ filter ((\) a) ls = tcb_queue_remove a ls" + apply (clarsimp simp: tcb_queue_remove_def) + apply (intro conjI impI) + apply (fastforce elim: filter_hd_equals_tl) + apply (fastforce elim: filter_last_equals_butlast) + apply (fastforce elim: filter_hd_equals_tl) + apply (frule split_list) + apply (clarsimp simp: list_remove_middle_distinct) + apply (subst filter_True | clarsimp simp: list_remove_none)+ + done + +lemma tcb_sched_dequeue_monadic_rewrite: + "monadic_rewrite False True (is_etcb_at t and (\s. \d p. distinct (ready_queues s d p))) + (tcb_sched_action tcb_sched_dequeue t) (tcb_sched_dequeue' t)" + supply if_split[split del] + apply (clarsimp simp: tcb_sched_dequeue'_def tcb_sched_dequeue_def tcb_sched_action_def + set_tcb_queue_def) + apply (rule monadic_rewrite_bind_tail)+ + apply (clarsimp simp: when_def) + apply (rule monadic_rewrite_if_r) + apply (rule_tac P="\_. distinct queue" in monadic_rewrite_guard_arg_cong) + apply (frule (1) filter_tcb_queue_remove) + apply (metis (mono_tags, lifting) filter_cong) + apply (rule monadic_rewrite_modify_noop) + apply (wpsimp wp: thread_get_wp)+ + apply (clarsimp simp: etcb_at_def split: option.splits) + apply (prop_tac "(\d' p. if d' = tcb_domain x2 \ p = tcb_priority x2 + then filter (\x. x \ t) (ready_queues s (tcb_domain x2) (tcb_priority x2)) + else ready_queues s d' p) + = ready_queues s") + apply (subst filter_True) + apply fastforce + apply (fastforce split: if_splits) + apply fastforce + done + +crunches removeFromBitmap + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + +lemma list_queue_relation_neighbour_in_set: + "\list_queue_relation ls q hp hp'; sym_heap hp hp'; p \ set ls\ + \ \nbr. (hp p = Some nbr \ nbr \ set ls) \ (hp' p = Some nbr \ nbr \ set ls)" + apply (rule heap_ls_neighbour_in_set) + apply (fastforce simp: list_queue_relation_def) + apply fastforce + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def) + apply fastforce + done + +lemma in_queue_not_head_or_not_tail_length_gt_1: + "\tcbPtr \ set ls; tcbQueueHead q \ Some tcbPtr \ tcbQueueEnd q \ Some tcbPtr; + list_queue_relation ls q nexts prevs\ + \ Suc 0 < length ls" + apply (clarsimp simp: list_queue_relation_def) + apply (cases ls; fastforce simp: queue_end_valid_def) + done + lemma tcbSchedDequeue_corres: - "corres dc (is_etcb_at t) (tcb_at' t and Invariants_H.valid_queues) - (tcb_sched_action tcb_sched_dequeue t) (tcbSchedDequeue t)" - apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) - apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) - defer - apply (wp threadGet_obj_at', simp, simp) - apply (rule no_fail_pre, wp, simp) - apply (case_tac queued) - defer - apply (simp add: when_def) - apply (rule corres_no_failI) - apply (wp) - apply (clarsimp simp: in_monad ethread_get_def set_tcb_queue_def is_etcb_at_def state_relation_def) - apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") - prefer 2 - subgoal by (force simp: tcb_sched_dequeue_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def - ready_queues_relation_def obj_at'_def inQ_def projectKO_eq project_inject) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (subst gets_the_exec) - apply (simp add: get_etcb_def) - apply (simp add: exec_gets simpler_modify_def get_etcb_def ready_queues_relation_def cong: if_cong get_tcb_queue_def) - apply (simp add: when_def) - apply (rule corres_guard_imp) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (rule ethreadget_corres, simp add: etcb_relation_def) - apply (rule corres_split[where r'="(=)"]) - apply (simp, rule getQueue_corres) - apply (rule corres_split_noop_rhs2) - apply (simp add: tcb_sched_dequeue_def) - apply (rule setQueue_corres) - apply (rule corres_split_noop_rhs) - apply (clarsimp, rule removeFromBitmap_corres_noop) - apply (rule threadSet_corres_noop; simp_all add: tcb_relation_def exst_same_def) - apply (wp | simp)+ + "tcb_ptr = tcbPtr \ + corres dc + (in_correct_ready_q and ready_qs_distinct and valid_etcbs and tcb_at tcb_ptr + and pspace_aligned and pspace_distinct) + (sym_heap_sched_pointers and valid_objs') + (tcb_sched_action tcb_sched_dequeue tcb_ptr) (tcbSchedDequeue tcbPtr)" + supply heap_path_append[simp del] fun_upd_apply[simp del] distinct_append[simp del] + list_remove_append[simp del] projectKOs[simp] + apply (rule_tac Q'="tcb_at' tcbPtr" in corres_cross_add_guard) + apply (fastforce intro!: tcb_at_cross simp: obj_at_def is_tcb_def) + apply (rule_tac Q'=pspace_aligned' in corres_cross_add_guard) + apply (fastforce dest: pspace_aligned_cross) + apply (rule_tac Q'=pspace_distinct' in corres_cross_add_guard) + apply (fastforce dest: pspace_distinct_cross) + apply (rule monadic_rewrite_corres_l[where P=P and Q=P for P, simplified]) + apply (rule monadic_rewrite_guard_imp[OF tcb_sched_dequeue_monadic_rewrite]) + apply (fastforce dest: tcb_at_is_etcb_at simp: in_correct_ready_q_def ready_qs_distinct_def) + apply (clarsimp simp: tcb_sched_dequeue'_def get_tcb_queue_def tcbSchedDequeue_def getQueue_def + unless_def when_def) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac dom) + apply (rule corres_symb_exec_l[OF _ _ ethread_get_sp]; wpsimp?) + apply (rename_tac prio) + apply (rule corres_symb_exec_l[OF _ _ gets_sp]; (solves wpsimp)?) + apply (rule corres_stateAssert_ignore) + apply (fastforce intro: ksReadyQueues_asrt_cross) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; (solves wpsimp)?) + apply (rule corres_if_strong'; fastforce?) + apply (frule state_relation_ready_queues_relation) + apply (frule in_ready_q_tcbQueued_eq[where t=tcbPtr]) + apply (fastforce simp: obj_at'_def opt_pred_def opt_map_def obj_at_def is_tcb_def + in_correct_ready_q_def etcb_at_def is_etcb_at_def) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ threadGet_sp]; wpsimp?) + apply (rule corres_symb_exec_r[OF _ gets_sp]; wpsimp?) + apply (rule corres_from_valid_det) + apply (fastforce intro: det_wp_modify det_wp_pre simp: set_tcb_queue_def) + apply (wpsimp wp: tcbQueueRemove_no_fail) + apply (fastforce dest: state_relation_ready_queues_relation + simp: ex_abs_underlying_def ready_queues_relation_def ready_queue_relation_def + Let_def inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: state_relation_def) + apply (intro hoare_vcg_conj_lift_pre_fix; + (solves \frule singleton_eqD, frule set_tcb_queue_projs_inv, wpsimp simp: swp_def\)?) + + \ \ready_queues_relation\ + apply (clarsimp simp: ready_queues_relation_def ready_queue_relation_def Let_def) + apply (intro hoare_allI) + apply (drule singleton_eqD) + apply (drule set_tcb_queue_new_state) + apply (wpsimp wp: threadSet_wp getObject_tcb_wp + simp: setQueue_def tcbQueueRemove_def + split_del: if_split) + apply (frule (1) tcb_at_is_etcb_at) + apply (clarsimp simp: obj_at_def is_etcb_at_def etcb_at_def) + apply normalise_obj_at' + apply (rename_tac s d p s' tcb' tcb etcb) + apply (frule_tac t=tcbPtr in ekheap_relation_tcb_domain_priority) + apply (force simp: obj_at_def) + apply (force simp: obj_at'_def) + + apply (case_tac "d \ tcb_domain etcb \ p \ tcb_priority etcb") + apply clarsimp + apply (cut_tac p=tcbPtr and ls="ready_queues s (tcb_domain etcb) (tcb_priority etcb)" + in list_queue_relation_neighbour_in_set) + apply (fastforce dest!: spec) + apply fastforce + apply fastforce + apply (cut_tac xs="ready_queues s d p" in heap_path_head') + apply (force dest!: spec simp: ready_queues_relation_def Let_def list_queue_relation_def) + apply (cut_tac d=d and d'="tcb_domain etcb" and p=p and p'="tcb_priority etcb" + in ready_queues_disjoint) + apply force + apply fastforce + apply fastforce + apply (cut_tac ts="ready_queues s d p" in list_queue_relation_nil) + apply fast + apply (clarsimp simp: tcbQueueEmpty_def) + apply (prop_tac "Some tcbPtr \ tcbQueueHead (ksReadyQueues s' (d, p))") + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply (prop_tac "tcbPtr \ set (ready_queues s d p)") + apply blast + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI; clarsimp) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (force intro!: heap_path_heap_upd_not_in simp: fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_heap_upd fun_upd_apply) + apply (clarsimp simp: inQ_def in_opt_pred fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + apply (clarsimp simp: etcb_at_def obj_at'_def) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (force simp: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply assumption + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply split: if_splits) + apply (force simp: not_emptyI opt_map_red) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (force simp: prev_queue_head_def fun_upd_apply opt_map_red opt_map_upd_triv) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (clarsimp simp: fun_upd_apply) + apply (clarsimp simp: fun_upd_apply) + + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI) + apply (intro heap_path_heap_upd_not_in) + apply (simp add: fun_upd_apply) + apply (force simp: not_emptyI opt_map_red) + apply (force simp: not_emptyI opt_map_red) + apply fastforce + apply (clarsimp simp: opt_map_red opt_map_upd_triv) + apply (intro prev_queue_head_heap_upd) + apply (force dest!: spec) + apply (metis hd_in_set not_emptyI option.sel option.simps(2)) + apply fastforce + subgoal + by (clarsimp simp: inQ_def opt_map_def opt_pred_def fun_upd_apply + split: if_splits option.splits) + + \ \d = tcb_domain tcb \ p = tcb_priority tcb\ + apply clarsimp + apply (drule_tac x="tcb_domain etcb" in spec) + apply (drule_tac x="tcb_priority etcb" in spec) + apply (clarsimp simp: list_queue_relation_def) + apply (frule heap_path_head') + apply (frule heap_ls_distinct) + apply (intro conjI; clarsimp simp: tcbQueueEmpty_def) + + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (intro conjI) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply tcb_queue_remove_def queue_end_valid_def heap_ls_unique + heap_path_last_end) + apply (simp add: fun_upd_apply prev_queue_head_def) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: tcb_queue_remove_def inQ_def opt_pred_def fun_upd_apply) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the head of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fastforce + apply (fastforce simp: list_queue_relation_def) + apply (frule list_not_head) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_tail_nonempty) + apply (frule (2) heap_ls_next_of_hd) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI allI) + apply (drule (1) heap_ls_remove_head_not_singleton) + apply (clarsimp simp: opt_map_red opt_map_upd_triv fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply last_tl) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply) + apply (case_tac "ready_queues s (tcb_domain etcb) (tcb_priority etcb)"; + clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (intro conjI; clarsimp) + + \ \tcbPtr is the end of the ready queue\ + apply (frule set_list_mem_nonempty) + apply (frule in_queue_not_head_or_not_tail_length_gt_1) + apply fast + apply (force dest!: spec simp: list_queue_relation_def) + apply (clarsimp simp: queue_end_valid_def) + apply (frule list_not_last) + apply (clarsimp simp: tcb_queue_remove_def) + apply (frule length_gt_1_imp_butlast_nonempty) + apply (frule (3) heap_ls_prev_of_last) + apply (clarsimp simp: obj_at'_def) + apply (intro conjI impI; clarsimp?) + apply (drule (1) heap_ls_remove_last_not_singleton) + apply (force elim!: rsubst3[where P=heap_ls] simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: opt_map_def fun_upd_apply) + apply (clarsimp simp: prev_queue_head_def fun_upd_apply opt_map_def) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply split: option.splits) + apply (meson distinct_in_butlast_not_last in_set_butlastD last_in_set not_last_in_set_butlast) + + \ \tcbPtr is in the middle of the ready queue\ + apply (clarsimp simp: obj_at'_def) + apply (frule set_list_mem_nonempty) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []", fastforce simp: queue_end_valid_def) + apply clarsimp + apply (frule (2) ptr_in_middle_prev_next) + apply fastforce + apply (clarsimp simp: tcb_queue_remove_def) + apply (prop_tac "tcbPtr \ last xs") + apply (clarsimp simp: distinct_append) + apply (prop_tac "tcbPtr \ hd ys") + apply (fastforce dest: hd_in_set simp: distinct_append) + apply (prop_tac "last xs \ hd ys") + apply (metis distinct_decompose2 hd_Cons_tl last_in_set) + apply (prop_tac "list_remove (xs @ tcbPtr # ys) tcbPtr = xs @ ys") + apply (simp add: list_remove_middle_distinct) + apply (intro conjI impI allI; (solves \clarsimp simp: distinct_append\)?) + apply (fastforce elim!: rsubst3[where P=heap_ls] + dest!: heap_ls_remove_middle hd_in_set last_in_set + simp: distinct_append not_emptyI opt_map_def fun_upd_apply) + apply (clarsimp simp: queue_end_valid_def fun_upd_apply) + apply (case_tac xs; + fastforce simp: prev_queue_head_def opt_map_def fun_upd_apply distinct_append) + apply (clarsimp simp: inQ_def opt_pred_def opt_map_def fun_upd_apply distinct_append + split: option.splits) done lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur_ts) od = @@ -2054,7 +3128,9 @@ lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur apply (simp add: get_thread_state_def thread_get_def) done -lemma thread_get_isRunnable_corres: "corres (=) (tcb_at t) (tcb_at' t) (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" +lemma thread_get_isRunnable_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" apply (simp add: isRunnable_def getThreadState_def threadGet_def thread_get_def) apply (fold liftM_def) @@ -2068,8 +3144,8 @@ lemma thread_get_isRunnable_corres: "corres (=) (tcb_at t) (tcb_at' t) (thread_g lemma setThreadState_corres: "thread_state_relation ts ts' \ corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (set_thread_state t ts) (setThreadState ts' t)" (is "?tsr \ corres dc ?Pre ?Pre' ?sts ?sts'") apply (simp add: set_thread_state_def setThreadState_def) @@ -2093,8 +3169,8 @@ lemma setThreadState_corres: lemma setBoundNotification_corres: "corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (set_bound_notification t ntfn) (setBoundNotification ntfn t)" apply (simp add: set_bound_notification_def setBoundNotification_def) apply (subst thread_set_def[simplified, symmetric]) @@ -2103,31 +3179,85 @@ lemma setBoundNotification_corres: crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification for tcb'[wp]: "tcb_at' addr" - (simp: unless_def) + +lemma tcbSchedNext_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedNext_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbSchedPrev_update_valid_objs'[wp]: + "\valid_objs' and valid_bound_tcb' ptrOpt\ + threadSet (tcbSchedPrev_update (\_. ptrOpt)) tcbPtr + \\_. valid_objs'\" + apply (wpsimp wp: threadSet_valid_objs') + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma tcbQueuePrepend_valid_objs'[wp]: + "\\s. valid_objs' s \ tcb_at' tcbPtr s + \ (\ tcbQueueEmpty queue \ tcb_at' (the (tcbQueueHead queue)) s)\ + tcbQueuePrepend queue tcbPtr + \\_. valid_objs'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: hoare_vcg_if_lift2 hoare_vcg_imp_lift' simp: tcbQueueEmpty_def) + +crunches addToBitmap + for valid_objs'[wp]: valid_objs' + (simp: unless_def crunch_simps wp: crunch_wps) + +lemma tcbSchedEnqueue_valid_objs'[wp]: + "\valid_objs' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. valid_objs'\" + unfolding tcbSchedEnqueue_def setQueue_def + apply (wpsimp wp: threadSet_valid_objs' getObject_tcb_wp simp: threadGet_def) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + done crunches rescheduleRequired, removeFromBitmap for valid_objs'[wp]: valid_objs' (simp: crunch_simps) -lemma tcbSchedDequeue_valid_objs' [wp]: "\ valid_objs' \ tcbSchedDequeue t \\_. valid_objs' \" - unfolding tcbSchedDequeue_def - apply (wp threadSet_valid_objs') - apply (clarsimp simp add: valid_tcb'_def tcb_cte_cases_def) - apply wp - apply (simp add: if_apply_def2) - apply (wp hoare_drop_imps) - apply (wp | simp cong: if_cong add: valid_tcb'_def tcb_cte_cases_def if_apply_def2)+ +lemmas ko_at_valid_objs'_pre = + ko_at_valid_objs'[simplified project_inject, atomized, simplified, rule_format] + +lemmas ep_ko_at_valid_objs_valid_ep' = + ko_at_valid_objs'_pre[where 'a=endpoint, simplified injectKO_defs valid_obj'_def, simplified] + +lemmas ntfn_ko_at_valid_objs_valid_ntfn' = + ko_at_valid_objs'_pre[where 'a=notification, simplified injectKO_defs valid_obj'_def, + simplified] + +lemmas tcb_ko_at_valid_objs_valid_tcb' = + ko_at_valid_objs'_pre[where 'a=tcb, simplified injectKO_defs valid_obj'_def, simplified] + +lemma tcbQueueRemove_valid_objs'[wp]: + "tcbQueueRemove queue tcbPtr \valid_objs'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (fastforce dest!: tcb_ko_at_valid_objs_valid_tcb' + simp: valid_tcb'_def valid_bound_tcb'_def obj_at'_def) done +lemma tcbSchedDequeue_valid_objs'[wp]: + "tcbSchedDequeue t \valid_objs'\" + unfolding tcbSchedDequeue_def setQueue_def + by (wpsimp wp: threadSet_valid_objs') + lemma sts_valid_objs': - "\valid_objs' and valid_tcb_state' st\ - setThreadState st t - \\rv. valid_objs'\" - apply (simp add: setThreadState_def setQueue_def isRunnable_def isStopped_def) - apply (wp threadSet_valid_objs') - apply (simp add: valid_tcb'_def tcb_cte_cases_def) - apply (wp threadSet_valid_objs' | simp)+ - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def) + "\valid_objs' and valid_tcb_state' st and pspace_aligned' and pspace_distinct'\ + setThreadState st t + \\_. valid_objs'\" + apply (wpsimp simp: setThreadState_def wp: threadSet_valid_objs') + apply (rule_tac Q="\_. valid_objs' and pspace_aligned' and pspace_distinct'" in hoare_post_imp) + apply fastforce + apply (wpsimp wp: threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) done lemma sbn_valid_objs': @@ -2170,19 +3300,12 @@ lemma sts'_valid_pspace'_inv[wp]: apply (simp add: tcb_cte_cases_def) done -crunch ct[wp]: setQueue "\s. P (ksCurThread s)" - -crunch cur_domain[wp]: setQueue "\s. P (ksCurDomain s)" - -crunch ct'[wp]: addToBitmap "\s. P (ksCurThread s)" -crunch ct'[wp]: removeFromBitmap "\s. P (ksCurThread s)" - lemma setQueue_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t\ setQueue d p xs \\_. tcb_in_cur_domain' t\" -apply (simp add: setQueue_def tcb_in_cur_domain'_def) -apply wp -apply (simp add: ps_clear_def projectKOs obj_at'_def) -done + apply (simp add: setQueue_def tcb_in_cur_domain'_def) + apply wp + apply (simp add: ps_clear_def projectKOs obj_at'_def) + done lemma sbn'_valid_pspace'_inv[wp]: "\ valid_pspace' and tcb_at' t and valid_bound_ntfn' ntfn \ @@ -2215,18 +3338,6 @@ lemma setQueue_valid_bitmapQ_except[wp]: unfolding setQueue_def bitmapQ_defs by (wp, clarsimp simp: bitmapQ_def) -lemma setQueue_valid_bitmapQ: (* enqueue only *) - "\ valid_bitmapQ and (\s. (ksReadyQueues s (d, p) = []) = (ts = [])) \ - setQueue d p ts - \\_. valid_bitmapQ \" - unfolding setQueue_def bitmapQ_defs - by (wp, clarsimp simp: bitmapQ_def) - -lemma setQueue_valid_queues': - "\valid_queues' and (\s. \t. obj_at' (inQ d p) t s \ t \ set ts)\ - setQueue d p ts \\_. valid_queues'\" - by (wp | simp add: valid_queues'_def setQueue_def)+ - lemma setQueue_cur: "\\s. cur_tcb' s\ setQueue d p ts \\rv s. cur_tcb' s\" unfolding setQueue_def cur_tcb'_def @@ -2348,14 +3459,14 @@ lemma threadSet_queued_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ threadSet (tcbQueued_update f) t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - including no_pre + including classic_wp_pre apply (simp add: sch_act_wf_cases split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -2364,9 +3475,17 @@ lemma threadSet_queued_sch_act_wf[wp]: apply (wp tcb_in_cur_domain'_lift | simp add: obj_at'_def)+ done +lemma tcbSchedNext_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedNext_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + +lemma tcbSchedPrev_update_pred_tcb_at'[wp]: + "threadSet (tcbSchedPrev_update f) t \\s. P (pred_tcb_at' proj P' t' s)\" + by (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + lemma tcbSchedEnqueue_pred_tcb_at'[wp]: "\\s. pred_tcb_at' proj P' t' s \ tcbSchedEnqueue t \\_ s. pred_tcb_at' proj P' t' s\" - apply (simp add: tcbSchedEnqueue_def when_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def when_def unless_def) apply (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ done @@ -2374,8 +3493,9 @@ lemma tcbSchedDequeue_sch_act_wf[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedDequeue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - unfolding tcbSchedDequeue_def - by (wp setQueue_sch_act | wp sch_act_wf_lift | simp add: if_apply_def2)+ + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wp setQueue_sch_act threadSet_tcbDomain_triv hoare_drop_imps + | wp sch_act_wf_lift | simp add: if_apply_def2)+ crunch nosch: tcbSchedDequeue "\s. P (ksSchedulerAction s)" @@ -2471,21 +3591,22 @@ lemma tcbSchedEnqueue_sch_act[wp]: "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. sch_act_wf (ksSchedulerAction s) s\" - by (simp add: tcbSchedEnqueue_def unless_def) - (wp setQueue_sch_act | wp sch_act_wf_lift | clarsimp)+ + by (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) + (wp setQueue_sch_act threadSet_tcbDomain_triv | wp sch_act_wf_lift | clarsimp)+ lemma tcbSchedEnqueue_weak_sch_act[wp]: "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ tcbSchedEnqueue t \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" - apply (simp add: tcbSchedEnqueue_def unless_def) + apply (simp add: tcbSchedEnqueue_def tcbQueuePrepend_def unless_def) apply (wp setQueue_sch_act threadSet_weak_sch_act_wf | clarsimp)+ done -lemma threadGet_wp: "\\s. tcb_at' t s \ (\tcb. ko_at' tcb t s \ P (f tcb) s)\ threadGet f t \P\" +lemma threadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ P (f tcb) s\ threadGet f t \P\" apply (simp add: threadGet_def) apply (wp getObject_tcb_wp) - apply clarsimp + apply (clarsimp simp: obj_at'_def) done lemma threadGet_const: @@ -2531,14 +3652,6 @@ lemma addToBitmap_bitmapQ: by (wpsimp simp: bitmap_fun_defs bitmapQ_def prioToL1Index_bit_set prioL2Index_bit_set simp_del: bit_exp_iff) -lemma addToBitmap_valid_queues_no_bitmap_except: -" \ valid_queues_no_bitmap_except t \ - addToBitmap d p - \\_. valid_queues_no_bitmap_except t \" - unfolding addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def valid_queues_no_bitmap_except_def - by (wp, clarsimp) - crunch norq[wp]: addToBitmap "\s. P (ksReadyQueues s)" (wp: updateObject_cte_inv hoare_drop_imps) crunch norq[wp]: removeFromBitmap "\s. P (ksReadyQueues s)" @@ -2570,9 +3683,8 @@ lemma prioToL1Index_complement_nth_w2p: lemma valid_bitmapQ_exceptE: "\ valid_bitmapQ_except d' p' s ; d \ d' \ p \ p' \ - \ bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" - unfolding valid_bitmapQ_except_def - by blast + \ bitmapQ d p s = (\ tcbQueueEmpty (ksReadyQueues s (d, p)))" + by (fastforce simp: valid_bitmapQ_except_def) lemma invertL1Index_eq_cancelD: "\ invertL1Index i = invertL1Index j ; i < l2BitmapSize ; j < l2BitmapSize \ @@ -2599,7 +3711,6 @@ lemma removeFromBitmap_bitmapQ_no_L2_orphans[wp]: unfolding bitmap_fun_defs apply (wp, clarsimp simp: bitmap_fun_defs bitmapQ_no_L2_orphans_def)+ apply (rule conjI, clarsimp) - apply (rule conjI, clarsimp) apply (clarsimp simp: complement_nth_w2p l2BitmapSize_def') apply clarsimp apply metis @@ -2688,22 +3799,15 @@ lemma addToBitmap_valid_bitmapQ_except: done lemma addToBitmap_valid_bitmapQ: -" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ \" -proof - - have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and - (\s. ksReadyQueues s (d,p) \ []) \ - addToBitmap d p - \\_. valid_bitmapQ_except d p and - bitmapQ_no_L2_orphans and (\s. bitmapQ d p s \ ksReadyQueues s (d,p) \ []) \" - by (wp addToBitmap_valid_queues_no_bitmap_except addToBitmap_valid_bitmapQ_except - addToBitmap_bitmapQ_no_L2_orphans addToBitmap_bitmapQ; simp) - - thus ?thesis - by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) -qed + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans + and (\s. \ tcbQueueEmpty (ksReadyQueues s (d,p)))\ + addToBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done lemma threadGet_const_tcb_at: "\\s. tcb_at' t s \ obj_at' (P s \ f) t s\ threadGet f t \\rv s. P s rv \" @@ -2721,12 +3825,6 @@ lemma threadGet_const_tcb_at_imp_lift: apply (clarsimp simp: obj_at'_def) done -lemma valid_queues_no_bitmap_objD: - "\ valid_queues_no_bitmap s; t \ set (ksReadyQueues s (d, p))\ - \ obj_at' (inQ d p and runnable' \ tcbState) t s" - unfolding valid_queues_no_bitmap_def - by metis - lemma setQueue_bitmapQ_no_L1_orphans[wp]: "\ bitmapQ_no_L1_orphans \ setQueue d p ts @@ -2746,136 +3844,17 @@ lemma setQueue_sets_queue[wp]: unfolding setQueue_def by (wp, simp) -lemma tcbSchedEnqueueOrAppend_valid_queues: - (* f is either (t#ts) or (ts @ [t]), so we define its properties generally *) - assumes f_set[simp]: "\ts. t \ set (f ts)" - assumes f_set_insert[simp]: "\ts. set (f ts) = insert t (set ts)" - assumes f_not_empty[simp]: "\ts. f ts \ []" - assumes f_distinct: "\ts. \ distinct ts ; t \ set ts \ \ distinct (f ts)" - shows "\Invariants_H.valid_queues and st_tcb_at' runnable' t and valid_objs' \ - do queued \ threadGet tcbQueued t; - unless queued $ - do tdom \ threadGet tcbDomain t; - prio \ threadGet tcbPriority t; - queue \ getQueue tdom prio; - setQueue tdom prio $ f queue; - when (null queue) $ addToBitmap tdom prio; - threadSet (tcbQueued_update (\_. True)) t - od - od - \\_. Invariants_H.valid_queues\" -proof - - - define could_run where "could_run == - \d p t. obj_at' (\tcb. inQ d p (tcbQueued_update (\_. True) tcb) \ runnable' (tcbState tcb)) t" - - have addToBitmap_could_run: - "\d p. \\s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\ - addToBitmap d p - \\_ s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\" - unfolding bitmap_fun_defs - by (wp, clarsimp simp: could_run_def) - - have setQueue_valid_queues_no_bitmap_except: - "\d p ts. - \ valid_queues_no_bitmap_except t and - (\s. ksReadyQueues s (d, p) = ts \ p \ maxPriority \ d \ maxDomain \ t \ set ts) \ - setQueue d p (f ts) - \\rv. valid_queues_no_bitmap_except t\" - unfolding setQueue_def valid_queues_no_bitmap_except_def null_def - by (wp, auto intro: f_distinct) - - have threadSet_valid_queues_could_run: - "\f. \ valid_queues_no_bitmap_except t and - (\s. \d p. t \ set (ksReadyQueues s (d,p)) \ could_run d p t s) and - valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans \ - threadSet (tcbQueued_update (\_. True)) t - \\rv. Invariants_H.valid_queues \" - unfolding threadSet_def could_run_def - apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) - apply (rule hoare_pre) - apply (simp add: valid_queues_def valid_queues_no_bitmap_def) - apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift - setObject_tcb_strongest) - apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def) - done - - have setQueue_could_run: "\d p ts. - \ valid_queues and (\_. t \ set ts) and - (\s. could_run d p t s) \ - setQueue d p ts - \\rv s. (\d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s)\" - unfolding setQueue_def valid_queues_def could_run_def - by wp (fastforce dest: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def) - - note hoare_vcg_if_lift[wp] hoare_vcg_conj_lift[wp] hoare_vcg_const_imp_lift[wp] - - show ?thesis - unfolding tcbSchedEnqueue_def null_def - apply (rule hoare_pre) - apply (rule hoare_seq_ext) - apply (simp add: unless_def) - apply (wp threadSet_valid_queues_could_run) - apply (wp addToBitmap_could_run addToBitmap_valid_bitmapQ - addToBitmap_valid_queues_no_bitmap_except addToBitmap_bitmapQ_no_L2_orphans)+ - apply (wp setQueue_valid_queues_no_bitmap_except setQueue_could_run - setQueue_valid_bitmapQ_except setQueue_sets_queue setQueue_valid_bitmapQ)+ - apply (wp threadGet_const_tcb_at_imp_lift | simp add: if_apply_def2)+ - apply clarsimp - apply (frule pred_tcb_at') - apply (frule (1) valid_objs'_maxDomain) - apply (frule (1) valid_objs'_maxPriority) - apply (clarsimp simp: valid_queues_def st_tcb_at'_def obj_at'_def valid_queues_no_bitmap_exceptI) - apply (fastforce dest!: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def could_run_def) - done -qed - -lemma tcbSchedEnqueue_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedEnqueue t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedEnqueue_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma tcbSchedAppend_valid_queues[wp]: - "\Invariants_H.valid_queues - and st_tcb_at' runnable' t - and valid_objs' \ - tcbSchedAppend t - \\_. Invariants_H.valid_queues\" - unfolding tcbSchedAppend_def - by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) - -lemma rescheduleRequired_valid_queues[wp]: - "\\s. Invariants_H.valid_queues s \ valid_objs' s \ - weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (fastforce simp: weak_sch_act_wf_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) - done - -lemma rescheduleRequired_valid_queues_sch_act_simple: - "\Invariants_H.valid_queues and sch_act_simple\ - rescheduleRequired - \\_. Invariants_H.valid_queues\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: Invariants_H.valid_queues_def sch_act_simple_def)+ - done - lemma rescheduleRequired_valid_bitmapQ_sch_act_simple: "\ valid_bitmapQ and sch_act_simple\ rescheduleRequired \\_. valid_bitmapQ \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. valid_bitmapQ s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. valid_bitmapQ s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2883,12 +3862,13 @@ lemma rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple: "\ bitmapQ_no_L1_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L1_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L1_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L1_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done @@ -2896,149 +3876,44 @@ lemma rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple: "\ bitmapQ_no_L2_orphans and sch_act_simple\ rescheduleRequired \\_. bitmapQ_no_L2_orphans \" - including no_pre + including classic_wp_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. bitmapQ_no_L2_orphans s \ - (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. bitmapQ_no_L2_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" + in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply (wp, fastforce) done lemma sts_valid_bitmapQ_sch_act_simple: "\valid_bitmapQ and sch_act_simple\ - setThreadState st t + setThreadState st t \\_. valid_bitmapQ \" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_valid_bitmapQ_sch_act_simple threadSet_valid_bitmapQ [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L2_orphans_sch_act_simple: "\ bitmapQ_no_L2_orphans and sch_act_simple\ - setThreadState st t + setThreadState st t \\_. bitmapQ_no_L2_orphans \" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L2_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma sts_valid_bitmapQ_no_L1_orphans_sch_act_simple: "\ bitmapQ_no_L1_orphans and sch_act_simple\ - setThreadState st t + setThreadState st t \\_. bitmapQ_no_L1_orphans \" apply (simp add: setThreadState_def) apply (wp rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple threadSet_valid_bitmapQ_no_L1_orphans [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sts_valid_queues: - "\\s. Invariants_H.valid_queues s \ - ((\p. t \ set(ksReadyQueues s p)) \ runnable' st)\ - setThreadState st t \\rv. Invariants_H.valid_queues\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues_sch_act_simple - threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - -lemma sbn_valid_queues: - "\\s. Invariants_H.valid_queues s\ - setBoundNotification ntfn t \\rv. Invariants_H.valid_queues\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues [THEN hoare_strengthen_post]) - apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ - done - - - -lemma addToBitmap_valid_queues'[wp]: - "\ valid_queues' \ addToBitmap d p \\_. valid_queues' \" - unfolding valid_queues'_def addToBitmap_def - modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def - by (wp, simp) - -lemma tcbSchedEnqueue_valid_queues'[wp]: - "\valid_queues' and st_tcb_at' runnable' t \ - tcbSchedEnqueue t - \\_. valid_queues'\" - apply (simp add: tcbSchedEnqueue_def) - apply (rule hoare_pre) - apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" - in hoare_seq_ext) - apply (rename_tac queued) - apply (case_tac queued; simp_all add: unless_def when_def) - apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ - apply (subst conj_commute, wp) - apply (rule hoare_pre_post, assumption) - apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def - getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) - apply wp - apply fastforce - apply wp - apply (subst conj_commute) - apply clarsimp - apply (rule_tac Q="\rv. valid_queues' - and obj_at' (\obj. \ tcbQueued obj) t - and obj_at' (\obj. tcbPriority obj = prio) t - and obj_at' (\obj. tcbDomain obj = tdom) t - and (\s. t \ set (ksReadyQueues s (tdom, prio)))" - in hoare_post_imp) - apply (clarsimp simp: valid_queues'_def obj_at'_def projectKOs inQ_def) - apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def inQ_def projectKOs valid_queues'_def) - apply (wp getObject_tcb_wp | simp add: threadGet_def)+ - apply (clarsimp simp: obj_at'_def) - done - -lemma rescheduleRequired_valid_queues'_weak[wp]: - "\\s. valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply wpsimp - apply (clarsimp simp: weak_sch_act_wf_def) - done - -lemma rescheduleRequired_valid_queues'_sch_act_simple: - "\valid_queues' and sch_act_simple\ - rescheduleRequired - \\_. valid_queues'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp | fastforce simp: valid_queues'_def sch_act_simple_def)+ - done - -lemma setThreadState_valid_queues'[wp]: - "\\s. valid_queues' s\ setThreadState st t \\rv. valid_queues'\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_valid_queues'_sch_act_simple) - apply (rule_tac Q="\_. valid_queues'" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma setBoundNotification_valid_queues'[wp]: - "\\s. valid_queues' s\ setBoundNotification ntfn t \\rv. valid_queues'\" - apply (simp add: setBoundNotification_def) - apply (wp threadSet_valid_queues') - apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) - done - -lemma valid_tcb'_tcbState_update: - "\ valid_tcb_state' st s; valid_tcb' tcb s \ \ valid_tcb' (tcbState_update (\_. st) tcb) s" - apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def valid_tcb_state'_def) - done - -lemma setThreadState_valid_objs'[wp]: - "\ valid_tcb_state' st and valid_objs' \ setThreadState st t \ \_. valid_objs' \" - apply (simp add: setThreadState_def) - apply (wp threadSet_valid_objs' | clarsimp simp: valid_tcb'_tcbState_update)+ + apply (clarsimp simp: sch_act_simple_def inQ_def)+ done lemma rescheduleRequired_ksQ: @@ -3047,10 +3922,10 @@ lemma rescheduleRequired_ksQ: \\_ s. P (ksReadyQueues s p)\" including no_pre apply (simp add: rescheduleRequired_def sch_act_simple_def) - apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) - \ P (ksReadyQueues s p)" in hoare_seq_ext) + apply (rule_tac Q'="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) \ P (ksReadyQueues s p)" + in bind_wp) apply wpsimp - apply (case_tac x; simp) + apply (case_tac rv; simp) apply wp done @@ -3066,17 +3941,6 @@ lemma sbn_ksQ: "\\s. P (ksReadyQueues s p)\ setBoundNotification ntfn t \\rv s. P (ksReadyQueues s p)\" by (simp add: setBoundNotification_def, wp) -lemma sts_ksQ: - "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (wp rescheduleRequired_ksQ) - apply (rule_tac Q="\_ s. P (ksReadyQueues s p)" in hoare_post_imp) - apply (clarsimp simp: sch_act_simple_def)+ - apply (wp, simp) - done - lemma setQueue_ksQ[wp]: "\\s. P ((ksReadyQueues s)((d, p) := q))\ setQueue d p q @@ -3084,22 +3948,6 @@ lemma setQueue_ksQ[wp]: by (simp add: setQueue_def fun_upd_def[symmetric] | wp)+ -lemma tcbSchedEnqueue_ksQ: - "\\s. t' \ set (ksReadyQueues s p) \ t' \ t \ - tcbSchedEnqueue t \\_ s. t' \ set (ksReadyQueues s p)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wpsimp wp: hoare_vcg_imp_lift threadGet_wp) - apply (drule obj_at_ko_at') - apply fastforce - done - -lemma rescheduleRequired_ksQ': - "\\s. t \ set (ksReadyQueues s p) \ sch_act_not t s \ - rescheduleRequired \\_ s. t \ set (ksReadyQueues s p)\" - apply (simp add: rescheduleRequired_def) - apply (wpsimp wp: tcbSchedEnqueue_ksQ) - done - lemma threadSet_tcbState_st_tcb_at': "\\s. P st \ threadSet (tcbState_update (\_. st)) t \\_. st_tcb_at' P t\" apply (simp add: threadSet_def pred_tcb_at'_def) @@ -3110,36 +3958,6 @@ lemma isRunnable_const: "\st_tcb_at' runnable' t\ isRunnable t \\runnable _. runnable \" by (rule isRunnable_wp) -lemma sts_ksQ': - "\\s. (runnable' st \ ksCurThread s \ t) \ P (ksReadyQueues s p)\ - setThreadState st t - \\_ s. P (ksReadyQueues s p)\" - apply (simp add: setThreadState_def) - apply (rule hoare_pre_disj') - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] - threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) - apply (clarsimp simp: when_def) - apply (case_tac x) - apply (clarsimp, wp)[1] - apply (clarsimp) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF threadSet_ct threadSet_ksQ]]) - apply (rule hoare_seq_ext [OF _ isRunnable_inv]) - apply (rule hoare_seq_ext [OF _ - hoare_vcg_conj_lift - [OF gct_wp gct_wp]]) - apply (rename_tac ct) - apply (case_tac "ct\t") - apply (clarsimp simp: when_def) - apply (wp)[1] - apply (clarsimp) - done - lemma valid_ipc_buffer_ptr'D: assumes yv: "y < unat max_ipc_words" and buf: "valid_ipc_buffer_ptr' a s" @@ -3266,14 +4084,16 @@ lemma thread_get_registers: done lemma getMRs_corres: - "corres (=) (tcb_at t) - (tcb_at' t and case_option \ valid_ipc_buffer_ptr' buf) + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' buf) (get_mrs t buf mi) (getMRs t buf (message_info_map mi))" proof - have S: "get = gets id" by (simp add: gets_def) - have T: "corres (\con regs. regs = map con msg_registers) (tcb_at t) (tcb_at' t) - (thread_get (arch_tcb_get_registers o tcb_arch) t) (asUser t (mapM getRegister X64_H.msgRegisters))" + have T: "corres (\con regs. regs = map con msg_registers) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (arch_tcb_get_registers o tcb_arch) t) + (asUser t (mapM getRegister X64_H.msgRegisters))" apply (subst thread_get_registers) apply (rule asUser_corres') apply (subst mapM_gets) @@ -3338,7 +4158,7 @@ lemma zipWithM_x_corres: apply (rule b) apply (rule a) apply (rule corres_trivial, simp) - apply (rule hoare_post_taut)+ + apply (rule hoare_TrueI)+ done @@ -3378,8 +4198,8 @@ lemma UserContext_fold: lemma setMRs_corres: assumes m: "mrs' = mrs" shows - "corres (=) (tcb_at t and case_option \ in_user_frame buf) - (tcb_at' t and case_option \ valid_ipc_buffer_ptr' buf) + "corres (=) (tcb_at t and case_option \ in_user_frame buf and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' buf) (set_mrs t buf mrs) (setMRs t buf mrs')" proof - have setRegister_def2: @@ -3440,9 +4260,9 @@ lemma copyMRs_corres: "corres (=) (tcb_at s and tcb_at r and case_option \ in_user_frame sb and case_option \ in_user_frame rb + and pspace_aligned and pspace_distinct and K (unat n \ msg_max_length)) - (tcb_at' s and tcb_at' r - and case_option \ valid_ipc_buffer_ptr' sb + (case_option \ valid_ipc_buffer_ptr' sb and case_option \ valid_ipc_buffer_ptr' rb) (copy_mrs s sb r rb n) (copyMRs s sb r rb n)" proof - @@ -3453,7 +4273,7 @@ proof - note R=R'[simplified] have as_user_bit: - "\v :: machine_word. corres dc (tcb_at s and tcb_at r) (tcb_at' s and tcb_at' r) + "\v :: machine_word. corres dc (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct) \ (mapM (\ra. do v \ as_user s (getRegister ra); as_user r (setRegister ra v) @@ -3596,9 +4416,8 @@ qed lemmas valid_ipc_buffer_cap_simps = valid_ipc_buffer_cap_def [split_simps cap.split arch_cap.split] lemma lookupIPCBuffer_corres': - "corres (=) (tcb_at t and valid_objs and pspace_aligned) - (tcb_at' t and valid_objs' and pspace_aligned' - and pspace_distinct' and no_0_obj') + "corres (=) (tcb_at t and valid_objs and pspace_aligned and pspace_distinct) + (valid_objs' and pspace_aligned' and pspace_distinct' and no_0_obj') (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" apply (simp add: lookup_ipc_buffer_def X64_H.lookupIPCBuffer_def) apply (rule corres_guard_imp) @@ -3643,9 +4462,8 @@ lemma lookupIPCBuffer_corres': done lemma lookupIPCBuffer_corres: - "corres (=) (tcb_at t and invs) - (tcb_at' t and invs') - (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + "corres (=) (tcb_at t and invs) invs' + (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" using lookupIPCBuffer_corres' by (rule corres_guard_imp, auto simp: invs'_def valid_state'_def) @@ -3695,14 +4513,14 @@ lemma ct_in_state'_decomp: shows "\\s. Pre s \ t = (ksCurThread s)\ f \\rv. ct_in_state' Prop\" apply (rule hoare_post_imp [where Q="\rv s. t = ksCurThread s \ st_tcb_at' Prop t s"]) apply (clarsimp simp add: ct_in_state'_def) - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (wp x y) apply simp done lemma ct_in_state'_set: "\\s. tcb_at' t s \ P st \ t = ksCurThread s\ setThreadState st t \\rv. ct_in_state' P\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule ct_in_state'_decomp[where t=t]) apply (wp setThreadState_ct') apply (wp setThreadState_st_tcb) @@ -3711,7 +4529,7 @@ lemma ct_in_state'_set: crunches setQueue, rescheduleRequired, tcbSchedDequeue for idle'[wp]: "valid_idle'" - (simp: crunch_simps) + (simp: crunch_simps wp: crunch_wps) lemma sts_valid_idle'[wp]: "\valid_idle' and valid_pspace' and @@ -3751,8 +4569,9 @@ lemma gbn_sp': lemma tcbSchedDequeue_tcbState_obj_at'[wp]: "\obj_at' (P \ tcbState) t'\ tcbSchedDequeue t \\rv. obj_at' (P \ tcbState) t'\" - apply (simp add: tcbSchedDequeue_def) - apply (wp | simp add: o_def split del: if_split cong: if_cong)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: getObject_tcb_wp simp: o_def threadGet_def) + apply (clarsimp simp: obj_at'_def) done crunch typ_at'[wp]: setQueue "\s. P' (typ_at' P t s)" @@ -3771,10 +4590,14 @@ lemma setQueue_pred_tcb_at[wp]: lemma tcbSchedDequeue_pred_tcb_at'[wp]: "\\s. P' (pred_tcb_at' proj P t' s)\ tcbSchedDequeue t \\_ s. P' (pred_tcb_at' proj P t' s)\" apply (rule_tac P=P' in P_bool_lift) - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ - apply (simp add: tcbSchedDequeue_def) - apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) + apply (simp add: tcbSchedDequeue_def tcbQueueRemove_def) + apply (wpsimp wp: threadSet_pred_tcb_no_state getObject_tcb_wp + simp: threadGet_def tcb_to_itcb'_def) + apply (clarsimp simp: obj_at'_def) done lemma sts_st_tcb': @@ -3871,39 +4694,156 @@ crunch nonz_cap[wp]: addToBitmap "ex_nonz_cap_to' t" crunch iflive'[wp]: removeFromBitmap if_live_then_nonz_cap' crunch nonz_cap[wp]: removeFromBitmap "ex_nonz_cap_to' t" -lemma tcbSchedEnqueue_iflive'[wp]: - "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ - tcbSchedEnqueue tcb \\_. if_live_then_nonz_cap'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ +crunches rescheduleRequired + for cap_to'[wp]: "ex_nonz_cap_to' p" + +lemma tcbQueued_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbQueued_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedNext_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedPrev_update_tcb_cte_cases: + "(getF, setF) \ ran tcb_cte_cases \ getF (tcbSchedPrev_update f tcb) = getF tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma tcbSchedNext_update_ctes_of[wp]: + "threadSet (tcbSchedNext_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_ctes_of[wp]: + "threadSet (tcbSchedPrev_update f) tptr \\s. P (ctes_of s)\" + by (wpsimp wp: threadSet_ctes_ofT simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedNext_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_ex_nonz_cap_to'[wp]: + "threadSet (tcbSchedPrev_update f) tptr \ex_nonz_cap_to' p\" + by (wpsimp wp: threadSet_cap_to simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbSchedNext_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedNext_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedNext_update_tcb_cte_cases) + +lemma tcbSchedPrev_update_iflive': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbSchedPrev_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbSchedPrev_update_tcb_cte_cases) + +lemma tcbQueued_update_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' t s\ + threadSet (tcbQueued_update f) t + \\_. if_live_then_nonz_cap'\" + by (wpsimp wp: threadSet_iflive'T simp: tcbQueued_update_tcb_cte_cases) + +lemma getTCB_wp: + "\\s. \ko :: tcb. ko_at' ko p s \ Q ko s\ getObject p \Q\" + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) done -lemma rescheduleRequired_iflive'[wp]: - "\if_live_then_nonz_cap' - and (\s. \t. ksSchedulerAction s = SwitchToThread t - \ st_tcb_at' runnable' t s)\ - rescheduleRequired - \\rv. if_live_then_nonz_cap'\" - apply (simp add: rescheduleRequired_def) - apply (wp | wpc | simp)+ - apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def) - apply (erule(1) if_live_then_nonz_capD') - apply (fastforce simp: projectKOs) +lemma tcbQueueRemove_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and valid_objs' and sym_heap_sched_pointers and ex_nonz_cap_to' tcbPtr\ + tcbQueueRemove q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueRemove_def + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_imp_lift' getTCB_wp) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (force dest: sym_heapD2[where p'=tcbPtr] sym_heapD1[where p=tcbPtr] + elim: if_live_then_nonz_capE' + simp: valid_tcb'_def opt_map_def obj_at'_def ko_wp_at'_def projectKOs) + done + +lemma tcbQueueRemove_ex_nonz_cap_to'[wp]: + "tcbQueueRemove q tcbPtr \ex_nonz_cap_to' tcbPtr'\" + unfolding tcbQueueRemove_def + by (wpsimp wp: threadSet_cap_to' hoare_drop_imps getTCB_wp) + +(* We could write this one as "\t. tcbQueueHead t \ ..." instead, but we can't do the same in + tcbQueueAppend_if_live_then_nonz_cap', and it's nicer if the two lemmas are symmetric *) +lemma tcbQueuePrepend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueHead q)) s)\ + tcbQueuePrepend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueuePrepend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' + hoare_vcg_if_lift2 hoare_vcg_imp_lift') + +lemma tcbQueueAppend_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ ex_nonz_cap_to' tcbPtr s + \ (\ tcbQueueEmpty q \ ex_nonz_cap_to' (the (tcbQueueEnd q)) s)\ + tcbQueueAppend q tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueAppend_def + by (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive') + +lemma tcbQueueInsert_if_live_then_nonz_cap': + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcbPtr and valid_objs' and sym_heap_sched_pointers\ + tcbQueueInsert tcbPtr afterPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbQueueInsert_def + supply projectKOs[simp] + apply (wpsimp wp: tcbSchedPrev_update_iflive' tcbSchedNext_update_iflive' getTCB_wp) + apply (intro conjI) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ko_wp_at'_def obj_at'_def) + apply (erule if_live_then_nonz_capE') + apply (frule_tac p'=afterPtr in sym_heapD2) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (frule (1) tcb_ko_at_valid_objs_valid_tcb') + apply (clarsimp simp: valid_tcb'_def ko_wp_at'_def obj_at'_def opt_map_def) + done + +lemma tcbSchedEnqueue_iflive'[wp]: + "\if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'\ + tcbSchedEnqueue tcbPtr + \\_. if_live_then_nonz_cap'\" + unfolding tcbSchedEnqueue_def + supply projectKOs[simp] + apply (wpsimp wp: tcbQueuePrepend_if_live_then_nonz_cap' threadGet_wp) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (frule_tac p=tcbPtr in if_live_then_nonz_capE') + apply (fastforce simp: ko_wp_at'_def obj_at'_def) + apply clarsimp + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: obj_at'_tcbQueueHead_ksReadyQueues + simp: ko_wp_at'_def inQ_def opt_pred_def opt_map_def obj_at'_def + split: option.splits) done +crunches rescheduleRequired + for iflive'[wp]: if_live_then_nonz_cap' + lemma sts_iflive'[wp]: "\\s. if_live_then_nonz_cap' s - \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s)\ - setThreadState st t + \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s) + \ pspace_aligned' s \ pspace_distinct' s\ + setThreadState st t \\rv. if_live_then_nonz_cap'\" apply (simp add: setThreadState_def setQueue_def) - apply (rule hoare_pre) - apply (wp | simp)+ - apply (rule_tac Q="\rv. if_live_then_nonz_cap'" in hoare_post_imp) - apply clarsimp - apply (wp threadSet_iflive' | simp)+ - apply auto - done + apply wpsimp + apply (rule_tac Q="\rv. if_live_then_nonz_cap' and pspace_aligned' and pspace_distinct'" + in hoare_post_imp) + apply clarsimp + apply (wpsimp wp: threadSet_iflive') + apply fastforce + done lemma sbn_iflive'[wp]: "\\s. if_live_then_nonz_cap' s @@ -4022,6 +4962,19 @@ lemma setBoundNotification_vms'[wp]: apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) done +lemma threadSet_ct_not_inQ: + "(\tcb. tcbQueued tcb = tcbQueued (F tcb)) + \ threadSet F tcbPtr \\s. P (ct_not_inQ s)\" + unfolding threadSet_def + supply projectKOs[simp] + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (erule rsubst[where P=P]) + by (fastforce simp: ct_not_inQ_def obj_at'_def objBits_simps ps_clear_def split: if_splits) + +crunches tcbQueuePrepend, tcbQueueAppend, tcbQueueInsert, tcbQueueRemove, addToBitmap + for ct_not_inQ[wp]: ct_not_inQ + (wp: threadSet_ct_not_inQ crunch_wps) + lemma tcbSchedEnqueue_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ tcbSchedEnqueue t \\_. ct_not_inQ\" @@ -4045,12 +4998,7 @@ lemma tcbSchedEnqueue_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedEnqueue_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -4077,12 +5025,7 @@ lemma tcbSchedAppend_ct_not_inQ: done show ?thesis apply (simp add: tcbSchedAppend_def unless_def null_def) - apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ - apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) - apply wp - apply assumption + apply (wpsimp wp: ts sq hoare_vcg_imp_lift' getTCB_wp simp: threadGet_def)+ done qed @@ -4098,11 +5041,6 @@ lemma rescheduleRequired_ct_not_inQ: apply (wp setSchedulerAction_direct) done -crunch nosch[wp]: tcbSchedEnqueue "\s. P (ksSchedulerAction s)" - (simp: unless_def) -crunch nosch[wp]: tcbSchedAppend "\s. P (ksSchedulerAction s)" - (simp: unless_def) - lemma rescheduleRequired_sa_cnt[wp]: "\\s. True \ rescheduleRequired \\_ s. ksSchedulerAction s = ChooseNewThread \" unfolding rescheduleRequired_def setSchedulerAction_def @@ -4111,12 +5049,10 @@ lemma rescheduleRequired_sa_cnt[wp]: lemma possibleSwitchTo_ct_not_inQ: "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ possibleSwitchTo t \\_. ct_not_inQ\" - (is "\?PRE\ _ \_\") apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wpsimp wp: static_imp_wp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ threadGet_wp - | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ - apply (fastforce simp: obj_at'_def) + | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ done lemma threadSet_tcbState_update_ct_not_inQ[wp]: @@ -4132,7 +5068,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4152,7 +5088,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4196,29 +5132,6 @@ lemma tcbSchedDequeue_ct_not_inQ[wp]: done qed -lemma tcbSchedEnqueue_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply (wp threadGet_wp | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obja) - apply fastforce - done - -lemma setThreadState_not_st: - "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) - \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" - apply (simp add: setThreadState_def rescheduleRequired_def) - apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st - | wpc - | rule hoare_drop_imps - | simp)+ - apply (clarsimp simp: obj_at'_def) - apply (case_tac obj) - apply fastforce - done - crunch ct_idle_or_in_cur_domain'[wp]: setQueue ct_idle_or_in_cur_domain' (simp: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) @@ -4247,17 +5160,8 @@ lemma removeFromBitmap_ct_idle_or_in_cur_domain'[wp]: | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ done -lemma tcbSchedEnqueue_ksCurDomain[wp]: - "\ \s. P (ksCurDomain s)\ tcbSchedEnqueue tptr \\_ s. P (ksCurDomain s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done - -lemma tcbSchedEnqueue_ksDomSchedule[wp]: - "\ \s. P (ksDomSchedule s)\ tcbSchedEnqueue tptr \\_ s. P (ksDomSchedule s)\" - apply (simp add: tcbSchedEnqueue_def unless_def) - apply wpsimp - done +crunches tcbQueuePrepend + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' lemma tcbSchedEnqueue_ct_idle_or_in_cur_domain'[wp]: "\ct_idle_or_in_cur_domain'\ tcbSchedEnqueue tptr \\_. ct_idle_or_in_cur_domain'\" @@ -4337,12 +5241,386 @@ lemma sts_utr[wp]: apply (wp untyped_ranges_zero_lift) done +lemma removeFromBitmap_bitmapQ: + "\\\ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" + unfolding bitmapQ_defs bitmap_fun_defs + by (wpsimp simp: bitmap_fun_defs) + +lemma removeFromBitmap_valid_bitmapQ[wp]: + "\valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans + and (\s. tcbQueueEmpty (ksReadyQueues s (d,p)))\ + removeFromBitmap d p + \\_. valid_bitmapQ\" + (is "\?pre\ _ \_\") + apply (rule_tac Q="\_ s. ?pre s \ \ bitmapQ d p s" in hoare_strengthen_post) + apply (wpsimp wp: removeFromBitmap_valid_bitmapQ_except removeFromBitmap_bitmapQ) + apply (fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) + done + +crunches tcbSchedDequeue + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + (wp: crunch_wps simp: crunch_simps) + +lemma setQueue_nonempty_valid_bitmapQ': + "\\s. valid_bitmapQ s \ \ tcbQueueEmpty (ksReadyQueues s (d, p))\ + setQueue d p queue + \\_ s. \ tcbQueueEmpty queue \ valid_bitmapQ s\" + apply (wpsimp simp: setQueue_def) + apply (fastforce simp: valid_bitmapQ_def bitmapQ_def) + done + +lemma threadSet_valid_bitmapQ_except[wp]: + "threadSet f tcbPtr \valid_bitmapQ_except d p\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + apply (clarsimp simp: valid_bitmapQ_except_def bitmapQ_def) + done + +lemma threadSet_bitmapQ: + "threadSet F t \bitmapQ domain priority\" + unfolding threadSet_def + apply (wpsimp wp: getTCB_wp simp: setObject_def updateObject_default_def) + by (clarsimp simp: bitmapQ_def) + +crunches tcbQueueRemove, tcbQueuePrepend, tcbQueueAppend + for valid_bitmapQ_except[wp]: "valid_bitmapQ_except d p" + and valid_bitmapQ[wp]: valid_bitmapQ + and bitmapQ[wp]: "bitmapQ tdom prio" + (wp: crunch_wps) + +lemma tcbQueued_imp_queue_nonempty: + "\list_queue_relation ts (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb)) nexts prevs; + \t. t \ set ts \ (inQ (tcbDomain tcb) (tcbPriority tcb) |< tcbs_of' s) t; + ko_at' tcb tcbPtr s; tcbQueued tcb\ + \ \ tcbQueueEmpty (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" + supply projectKOs[simp] + apply (clarsimp simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac x=tcbPtr in spec) + apply (fastforce dest: heap_path_head simp: inQ_def opt_map_def opt_pred_def obj_at'_def) + done + +lemma tcbSchedDequeue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedDequeue tcbPtr \\_. valid_bitmapQ\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + apply (wpsimp wp: setQueue_nonempty_valid_bitmapQ' hoare_vcg_conj_lift + hoare_vcg_if_lift2 hoare_vcg_const_imp_lift threadGet_wp + | wp (once) hoare_drop_imps)+ + by (fastforce dest!: tcbQueued_imp_queue_nonempty + simp: ready_queue_relation_def ksReadyQueues_asrt_def obj_at'_def) + +lemma tcbSchedDequeue_valid_bitmaps[wp]: + "tcbSchedDequeue tcbPtr \valid_bitmaps\" + by (wpsimp simp: valid_bitmaps_def) + +lemma setQueue_valid_bitmapQ': (* enqueue only *) + "\valid_bitmapQ_except d p and bitmapQ d p and K (\ tcbQueueEmpty q)\ + setQueue d p q + \\_. valid_bitmapQ\" + unfolding setQueue_def bitmapQ_defs + by (wpsimp simp: bitmapQ_def) + +lemma tcbSchedEnqueue_valid_bitmapQ[wp]: + "\valid_bitmaps\ tcbSchedEnqueue tcbPtr \\_. valid_bitmapQ\" + supply if_split[split del] + unfolding tcbSchedEnqueue_def + apply (wpsimp simp: tcbQueuePrepend_def + wp: setQueue_valid_bitmapQ' addToBitmap_valid_bitmapQ_except addToBitmap_bitmapQ + threadGet_wp) + apply (fastforce simp: valid_bitmaps_def valid_bitmapQ_def tcbQueueEmpty_def split: if_splits) + done + +crunches tcbSchedEnqueue, tcbSchedAppend + for bitmapQ_no_L1_orphans[wp]: bitmapQ_no_L1_orphans + and bitmapQ_no_L2_orphans[wp]: bitmapQ_no_L2_orphans + +lemma tcbSchedEnqueue_valid_bitmaps[wp]: + "tcbSchedEnqueue tcbPtr \valid_bitmaps\" + unfolding valid_bitmaps_def + apply wpsimp + apply (clarsimp simp: valid_bitmaps_def) + done + +crunches rescheduleRequired, threadSet, setThreadState + for valid_bitmaps[wp]: valid_bitmaps + (rule: valid_bitmaps_lift) + +lemma tcbSchedEnqueue_valid_sched_pointers[wp]: + "tcbSchedEnqueue tcbPtr \valid_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: tcbSchedEnqueue_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueuePrepend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp simp: valid_sched_pointers_def list_queue_relation_def) + apply (case_tac "ts = []", fastforce simp: tcbQueueEmpty_def) + by (intro conjI impI; + force dest!: hd_in_set heap_path_head + simp: inQ_def opt_pred_def opt_map_def obj_at'_def split: if_splits) + +lemma tcbSchedAppend_valid_sched_pointers[wp]: + "tcbSchedAppend tcbPtr \valid_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: tcbSchedAppend_def getQueue_def unless_def) + \ \we step forwards until we can step over the addToBitmap in order to avoid state blow-up\ + apply (intro bind_wp[OF _ stateAssert_sp] bind_wp[OF _ isRunnable_inv] + bind_wp[OF _ assert_sp] bind_wp[OF _ threadGet_sp] + bind_wp[OF _ gets_sp] + | rule hoare_when_cases, fastforce)+ + apply (forward_inv_step wp: hoare_vcg_ex_lift) + supply if_split[split del] + apply (wpsimp wp: getTCB_wp + simp: threadSet_def setObject_def updateObject_default_def tcbQueueAppend_def + setQueue_def) + apply (clarsimp simp: valid_sched_pointers_def) + apply (intro conjI impI) + apply (fastforce simp: opt_pred_def opt_map_def split: if_splits) + apply normalise_obj_at' + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + by (intro conjI impI; + clarsimp dest: last_in_set + simp: valid_sched_pointers_def opt_map_def list_queue_relation_def tcbQueueEmpty_def + queue_end_valid_def inQ_def opt_pred_def obj_at'_def + split: if_splits option.splits; + fastforce) + +lemma tcbSchedDequeue_valid_sched_pointers[wp]: + "\valid_sched_pointers and sym_heap_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. valid_sched_pointers\" + supply if_split[split del] fun_upd_apply[simp del] + supply projectKOs[simp] + apply (clarsimp simp: tcbSchedDequeue_def getQueue_def setQueue_def) + apply (wpsimp wp: threadSet_wp getTCB_wp threadGet_wp simp: tcbQueueRemove_def) + apply normalise_obj_at' + apply (rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (clarsimp split: if_splits) + apply (frule (1) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (fastforce simp: inQ_def opt_pred_def opt_map_def obj_at'_def) + apply (clarsimp simp: list_queue_relation_def) + apply (intro conjI impI) + \ \the ready queue is the singleton consisting of tcbPtr\ + apply (clarsimp simp: valid_sched_pointers_def) + apply (case_tac "ptr = tcbPtr") + apply (force dest!: heap_ls_last_None + simp: prev_queue_head_def queue_end_valid_def inQ_def opt_map_def obj_at'_def) + apply (simp add: fun_upd_def opt_pred_def) + \ \tcbPtr is the head of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def fun_upd_apply prev_queue_head_def + inQ_def opt_pred_def opt_map_def obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is the end of the ready queue\ + subgoal + by (auto dest!: heap_ls_last_None + simp: valid_sched_pointers_def queue_end_valid_def inQ_def opt_pred_def + opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits) + \ \tcbPtr is in the middle of the ready queue\ + apply (intro conjI impI allI) + by (clarsimp simp: valid_sched_pointers_def inQ_def opt_pred_def opt_map_def fun_upd_apply obj_at'_def + split: if_splits option.splits; + auto) + +lemma tcbQueueRemove_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts)\ + tcbQueueRemove q tcbPtr + \\_. sym_heap_sched_pointers\" + supply heap_path_append[simp del] + supply projectKOs[simp] + apply (clarsimp simp: tcbQueueRemove_def) + apply (wpsimp wp: threadSet_wp getTCB_wp) + apply (rename_tac tcb ts) + + \ \tcbPtr is the head of q, which is not a singleton\ + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: list_queue_relation_def Let_def) + apply (prop_tac "tcbSchedNext tcb \ Some tcbPtr") + apply (fastforce dest: heap_ls_no_loops[where p=tcbPtr] simp: opt_map_def obj_at'_def) + apply (fastforce intro: sym_heap_remove_only' + simp: prev_queue_head_def opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is the end of q, which is not a singleton\ + apply (intro impI) + apply (rule conjI) + apply clarsimp + apply (prop_tac "tcbSchedPrev tcb \ Some tcbPtr") + apply (fastforce dest!: heap_ls_prev_no_loops[where p=tcbPtr] + simp: list_queue_relation_def opt_map_def obj_at'_def) + apply (subst fun_upd_swap, fastforce) + apply (fastforce intro: sym_heap_remove_only simp: opt_map_red opt_map_upd_triv obj_at'_def) + + \ \tcbPtr is in the middle of q\ + apply (intro conjI impI allI) + apply (frule (2) list_queue_relation_neighbour_in_set[where p=tcbPtr]) + apply (frule split_list) + apply clarsimp + apply (rename_tac xs ys) + apply (prop_tac "xs \ [] \ ys \ []") + apply (fastforce simp: list_queue_relation_def queue_end_valid_def) + apply (clarsimp simp: list_queue_relation_def) + apply (frule (3) ptr_in_middle_prev_next) + apply (frule heap_ls_distinct) + apply (rename_tac afterPtr beforePtr xs ys) + apply (frule_tac before=beforePtr and middle=tcbPtr and after=afterPtr + in sym_heap_remove_middle_from_chain) + apply (fastforce dest: last_in_set simp: opt_map_def obj_at'_def) + apply (fastforce dest: hd_in_set simp: opt_map_def obj_at'_def) + apply (rule_tac hp="tcbSchedNexts_of s" in sym_heapD2) + apply fastforce + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: opt_map_def obj_at'_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def split: if_splits) + done + +lemma tcbQueuePrepend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueuePrepend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + supply projectKOs[simp] + apply (clarsimp simp: tcbQueuePrepend_def) + apply (wpsimp wp: threadSet_wp) + apply (prop_tac "tcbPtr \ the (tcbQueueHead q)") + apply (case_tac "ts = []"; + fastforce dest: heap_path_head simp: list_queue_relation_def tcbQueueEmpty_def) + apply (drule_tac a=tcbPtr and b="the (tcbQueueHead q)" in sym_heap_connect) + apply assumption + apply (clarsimp simp: list_queue_relation_def prev_queue_head_def tcbQueueEmpty_def) + apply (fastforce simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def tcbQueueEmpty_def) + done + +lemma tcbQueueInsert_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueInsert tcbPtr afterPtr + \\_. sym_heap_sched_pointers\" + supply projectKOs[simp] + apply (clarsimp simp: tcbQueueInsert_def) + \ \forwards step in order to name beforePtr below\ + apply (rule bind_wp[OF _ getObject_tcb_sp]) + apply (rule bind_wp[OF _ assert_sp]) + apply (rule hoare_ex_pre_conj[simplified conj_commute], rename_tac beforePtr) + apply (rule bind_wp[OF _ assert_sp]) + apply (wpsimp wp: threadSet_wp) + apply normalise_obj_at' + apply (prop_tac "tcbPtr \ afterPtr") + apply (clarsimp simp: list_queue_relation_def opt_map_red obj_at'_def) + apply (prop_tac "tcbPtr \ beforePtr") + apply (fastforce dest: sym_heap_None simp: opt_map_def obj_at'_def split: option.splits) + apply (prop_tac "tcbSchedNexts_of s beforePtr = Some afterPtr") + apply (fastforce intro: sym_heapD2 simp: opt_map_def obj_at'_def) + apply (fastforce dest: sym_heap_insert_into_middle_of_chain + simp: fun_upd_swap opt_map_red opt_map_upd_triv obj_at'_def) + done + +lemma tcbQueueAppend_sym_heap_sched_pointers: + "\\s. sym_heap_sched_pointers s + \ (\ts. list_queue_relation ts q (tcbSchedNexts_of s) (tcbSchedPrevs_of s) + \ tcbPtr \ set ts) + \ tcbSchedNexts_of s tcbPtr = None \ tcbSchedPrevs_of s tcbPtr = None\ + tcbQueueAppend q tcbPtr + \\_. sym_heap_sched_pointers\" + supply if_split[split del] + supply projectKOs[simp] + apply (clarsimp simp: tcbQueueAppend_def) + apply (wpsimp wp: threadSet_wp) + apply (clarsimp simp: tcbQueueEmpty_def list_queue_relation_def queue_end_valid_def obj_at'_def + split: if_splits) + apply fastforce + apply (drule_tac a="last ts" and b=tcbPtr in sym_heap_connect) + apply (fastforce dest: heap_ls_last_None) + apply assumption + apply (simp add: opt_map_red tcbQueueEmpty_def) + apply (subst fun_upd_swap, simp) + apply (fastforce simp: opt_map_red opt_map_upd_triv) + done + +lemma tcbQueued_update_sym_heap_sched_pointers[wp]: + "threadSet (tcbQueued_update f) tcbPtr \sym_heap_sched_pointers\" + by (rule sym_heap_sched_pointers_lift; + wpsimp wp: threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of) + +lemma tcbSchedEnqueue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedEnqueue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedEnqueue_def + supply projectKOs[simp] + apply (wpsimp wp: tcbQueuePrepend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedAppend_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedAppend tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedAppend_def + supply projectKOs[simp] + apply (wpsimp wp: tcbQueueAppend_sym_heap_sched_pointers threadGet_wp + simp: addToBitmap_def bitmap_fun_defs) + apply (normalise_obj_at', rename_tac tcb) + apply (clarsimp simp: ready_queue_relation_def ksReadyQueues_asrt_def) + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (fastforce dest!: spec[where x=tcbPtr] inQ_implies_tcbQueueds_of + simp: valid_sched_pointers_def opt_pred_def opt_map_def obj_at'_def) + done + +lemma tcbSchedDequeue_sym_heap_sched_pointers[wp]: + "\sym_heap_sched_pointers and valid_sched_pointers\ + tcbSchedDequeue tcbPtr + \\_. sym_heap_sched_pointers\" + unfolding tcbSchedDequeue_def + supply projectKOs[simp] + apply (wpsimp wp: tcbQueueRemove_sym_heap_sched_pointers hoare_vcg_if_lift2 threadGet_wp + simp: bitmap_fun_defs) + apply (fastforce simp: ready_queue_relation_def ksReadyQueues_asrt_def inQ_def opt_pred_def + opt_map_def obj_at'_def) + done + +crunches setThreadState + for valid_sched_pointers[wp]: valid_sched_pointers + and sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (simp: crunch_simps wp: crunch_wps threadSet_valid_sched_pointers threadSet_sched_pointers) + lemma sts_invs_minor': "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st \ (st \ Inactive \ \ idle' st \ st' \ Inactive \ \ idle' st')) t and (\s. t = ksIdleThread s \ idle' st) - and (\s. (\p. t \ set(ksReadyQueues s p)) \ runnable' st) and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) and sch_act_simple and invs'\ @@ -4351,21 +5629,21 @@ lemma sts_invs_minor': including no_pre apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp sts_valid_queues valid_irq_node_lift irqs_masked_lift - setThreadState_ct_not_inQ + apply (wp valid_irq_node_lift irqs_masked_lift + setThreadState_ct_not_inQ | simp add: cteCaps_of_def o_def)+ apply (clarsimp simp: sch_act_simple_def) apply (intro conjI) - apply clarsimp - defer - apply (clarsimp dest!: st_tcb_at_state_refs_ofD' - elim!: rsubst[where P=sym_refs] - intro!: ext) - apply (clarsimp elim!: st_tcb_ex_cap'') + apply clarsimp + defer + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (clarsimp elim!: st_tcb_ex_cap'') + apply fastforce + apply fastforce apply (frule tcb_in_valid_state', clarsimp+) - apply (cases st, simp_all add: valid_tcb_state'_def - split: Structures_H.thread_state.split_asm) - done + by (cases st; simp add: valid_tcb_state'_def split: Structures_H.thread_state.split_asm) lemma sts_cap_to'[wp]: "\ex_nonz_cap_to' p\ setThreadState st t \\rv. ex_nonz_cap_to' p\" @@ -4410,12 +5688,59 @@ lemma threadSet_ct_running': apply wp done +lemma tcbQueuePrepend_tcbPriority_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbQueuePrepend_def + supply projectKOs[simp] + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbQueuePrepend_tcbDomain_obj_at'[wp]: + "tcbQueuePrepend queue tptr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbQueuePrepend_def + supply projectKOs[simp] + apply (wpsimp wp: threadSet_wp) + by (auto simp: obj_at'_def objBits_simps ps_clear_def split: if_splits) + +lemma tcbSchedDequeue_tcbPriority[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedDequeue_tcbDomain[wp]: + "tcbSchedDequeue t \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedDequeue_def tcbQueueRemove_def + by (wpsimp wp: hoare_when_weak_wp hoare_drop_imps) + +lemma tcbSchedEnqueue_tcbPriority_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +lemma tcbSchedEnqueue_tcbDomain_obj_at'[wp]: + "tcbSchedEnqueue tcbPtr \obj_at' (\tcb. P (tcbDomain tcb)) t'\" + unfolding tcbSchedEnqueue_def setQueue_def + by wpsimp + +crunches rescheduleRequired + for tcbPriority_obj_at'[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t'" + and tcbDomain_obj_at'[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t'" + +lemma setThreadState_tcbPriority_obj_at'[wp]: + "setThreadState ts tptr \obj_at' (\tcb. P (tcbPriority tcb)) t'\" + unfolding setThreadState_def + supply projectKOs[simp] + apply (wpsimp wp: threadSet_wp) + apply (fastforce simp: obj_at'_def objBits_simps ps_clear_def) + done + lemma setThreadState_tcb_in_cur_domain'[wp]: "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" apply (simp add: tcb_in_cur_domain'_def) apply (rule hoare_pre) apply wps - apply (wp setThreadState_not_st | simp)+ + apply (simp add: setThreadState_def) + apply (wpsimp wp: threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps)+ done lemma asUser_global_refs': "\valid_global_refs'\ asUser t f \\rv. valid_global_refs'\" @@ -4561,10 +5886,13 @@ lemma set_eobject_corres': assumes e: "etcb_relation etcb tcb'" assumes z: "\s. obj_at' P ptr s \ map_to_ctes ((ksPSpace s) (ptr \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" - shows "corres dc (tcb_at ptr and is_etcb_at ptr) - (obj_at' (\ko. non_exst_same ko tcb') ptr - and obj_at' P ptr) - (set_eobject ptr etcb) (setObject ptr tcb')" + shows + "corres dc + (tcb_at ptr and is_etcb_at ptr) + (obj_at' (\ko. non_exst_same ko tcb') ptr and obj_at' P ptr + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcb' \ tcbPriority tcb \ tcbPriority tcb') + \ \ tcbQueued tcb) ptr) + (set_eobject ptr etcb) (setObject ptr tcb')" apply (rule corres_no_failI) apply (rule no_fail_pre) apply wp @@ -4585,21 +5913,35 @@ lemma set_eobject_corres': apply (drule(1) bspec) apply (clarsimp simp: non_exst_same_def) apply (case_tac bb; simp) - apply (clarsimp simp: obj_at'_def other_obj_relation_def cte_relation_def tcb_relation_def projectKOs split: if_split_asm)+ + apply (clarsimp simp: obj_at'_def other_obj_relation_def tcb_relation_cut_def cte_relation_def + tcb_relation_def projectKOs + split: if_split_asm)+ apply (clarsimp simp: aobj_relation_cuts_def split: X64_A.arch_kernel_obj.splits) apply (rename_tac arch_kernel_obj obj d p ts) apply (case_tac arch_kernel_obj; simp) apply (clarsimp simp: pte_relation_def pde_relation_def pdpte_relation_def pml4e_relation_def is_tcb_def split: if_split_asm)+ - apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) - apply (frule bspec, erule domI) - apply (rule ballI, drule(1) bspec) - apply (drule domD) - apply (clarsimp simp: obj_at'_def) - apply (clarsimp simp: projectKOs) - apply (insert e) - apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits X64_A.arch_kernel_obj.splits) + apply (extract_conjunct \match conclusion in "ekheap_relation _ _" \ -\) + apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: obj_at'_def) + apply (insert e) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type + split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) + apply (frule in_ready_q_tcbQueued_eq[where t=ptr]) + apply (rename_tac s' conctcb' abstcb exttcb) + apply (clarsimp simp: ready_queues_relation_def Let_def) + apply (prop_tac "(tcbSchedNexts_of s')(ptr := tcbSchedNext tcb') = tcbSchedNexts_of s'") + apply (fastforce simp: opt_map_def obj_at'_def non_exst_same_def projectKOs split: option.splits) + apply (prop_tac "(tcbSchedPrevs_of s')(ptr := tcbSchedPrev tcb') = tcbSchedPrevs_of s'") + apply (fastforce simp: opt_map_def obj_at'_def non_exst_same_def projectKOs split: option.splits) + apply (clarsimp simp: ready_queue_relation_def opt_map_def opt_pred_def obj_at'_def inQ_def + non_exst_same_def projectKOs + split: option.splits) + apply metis done lemma set_eobject_corres: @@ -4607,9 +5949,13 @@ lemma set_eobject_corres: assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" assumes r: "r () ()" - shows "corres r (tcb_at add and (\s. ekheap s add = Some etcb)) - (ko_at' tcb' add) - (set_eobject add etcbu) (setObject add tcbu')" + shows + "corres r + (tcb_at add and (\s. ekheap s add = Some etcb)) + (ko_at' tcb' add + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain tcbu' \ tcbPriority tcb \ tcbPriority tcbu') + \ \ tcbQueued tcb) add) + (set_eobject add etcbu) (setObject add tcbu')" apply (rule_tac F="non_exst_same tcb' tcbu' \ etcb_relation etcbu tcbu'" in corres_req) apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) apply (frule(1) pspace_relation_absD) @@ -4636,24 +5982,27 @@ lemma set_eobject_corres: lemma ethread_set_corresT: assumes x: "\tcb'. non_exst_same tcb' (f' tcb')" - assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. - getF (f' tcb) = getF tcb" - assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ - etcb_relation (f etcb) (f' tcb')" - shows "corres dc (tcb_at t and valid_etcbs) - (tcb_at' t) - (ethread_set f t) (threadSet f' t)" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (f' tcb) = getF tcb" + assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ etcb_relation (f etcb) (f' tcb')" + shows + "corres dc + (tcb_at t and valid_etcbs) + (tcb_at' t + and obj_at' (\tcb. (tcbDomain tcb \ tcbDomain (f' tcb) + \ tcbPriority tcb \ tcbPriority (f' tcb)) + \ \ tcbQueued tcb) t) + (ethread_set f t) (threadSet f' t)" apply (simp add: ethread_set_def threadSet_def bind_assoc) apply (rule corres_guard_imp) apply (rule corres_split[OF corres_get_etcb set_eobject_corres]) apply (rule x) apply (erule e) apply (simp add: z)+ - apply wp+ + apply (wp getObject_tcb_wp)+ apply clarsimp apply (simp add: valid_etcbs_def tcb_at_st_tcb_at[symmetric]) apply (force simp: tcb_at_def get_etcb_def obj_at_def) - apply simp + apply (clarsimp simp: obj_at'_def) done lemmas ethread_set_corres = diff --git a/proof/refine/X64/Tcb_R.thy b/proof/refine/X64/Tcb_R.thy index 6d9b7557ca..b93d1ee0f8 100644 --- a/proof/refine/X64/Tcb_R.thy +++ b/proof/refine/X64/Tcb_R.thy @@ -11,7 +11,7 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) lemma asUser_setNextPC_corres: - "corres dc (tcb_at t and invs) (tcb_at' t and invs') + "corres dc (tcb_at t and invs) invs' (as_user t (setNextPC v)) (asUser t (setNextPC v))" apply (rule asUser_corres) apply (rule corres_Id, simp, simp) @@ -46,15 +46,15 @@ lemma activateThread_corres: apply (rule corres_split_nor[OF asUser_setNextPC_corres]) apply (rule setThreadState_corres) apply (simp | wp weak_sch_act_wf_lift_linear)+ - apply (clarsimp simp: st_tcb_at_tcb_at) + apply (fastforce simp: st_tcb_at_tcb_at) apply fastforce apply (rule corres_guard_imp) apply (rule activateIdleThread_corres) apply (clarsimp elim!: st_tcb_weakenE) apply (clarsimp elim!: pred_tcb'_weakenE) apply (wp gts_st_tcb gts_st_tcb' gts_st_tcb_at)+ - apply (clarsimp simp: ct_in_state_def tcb_at_invs - elim!: st_tcb_weakenE) + apply (fastforce simp: ct_in_state_def tcb_at_invs + elim!: st_tcb_weakenE) apply (clarsimp simp: tcb_at_invs' ct_in_state'_def elim!: pred_tcb'_weakenE) done @@ -80,10 +80,10 @@ abbreviation lemma gts_st_tcb': "\tcb_at' t\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t\" - apply (rule hoare_vcg_precond_imp) + apply (rule hoare_weaken_pre) apply (rule hoare_post_imp[where Q="\rv s. \rv'. rv = rv' \ st_tcb_at' (\st. st = rv') t s"]) apply simp - apply (wp hoare_ex_wp) + apply (wp hoare_vcg_ex_lift) apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def) done @@ -98,13 +98,13 @@ lemma activate_invs': activateThread \\rv. invs' and (ct_running' or ct_idle')\" apply (simp add: activateThread_def) - apply (rule hoare_seq_ext) - apply (rule_tac B="\state s. invs' s \ sch_act_simple s - \ st_tcb_at' (\st. st = state) thread s - \ thread = ksCurThread s - \ (runnable' state \ idle' state)" in hoare_seq_ext) - apply (case_tac x, simp_all add: isTS_defs hoare_pre_cont - split del: if_splits cong: if_cong) + apply (rule bind_wp) + apply (rule_tac Q'="\state s. invs' s \ sch_act_simple s + \ st_tcb_at' (\st. st = state) thread s + \ thread = ksCurThread s + \ (runnable' state \ idle' state)" + in bind_wp) + apply (case_tac rv, simp_all add: isTS_defs) apply (wp) apply (clarsimp simp: ct_in_state'_def) apply (rule_tac Q="\rv. invs' and ct_idle'" in hoare_post_imp, simp) @@ -154,9 +154,8 @@ lemma activate_sch_act: activateThread \\rv s. P (ksSchedulerAction s)\" apply (simp add: activateThread_def getCurThread_def cong: if_cong Structures_H.thread_state.case_cong) - apply (rule hoare_seq_ext [OF _ gets_sp]) - apply (rule hoare_seq_ext[where B="\st s. (runnable' or idle') st - \ P (ksSchedulerAction s)"]) + apply (rule bind_wp [OF _ gets_sp]) + apply (rule bind_wp[where Q'="\st s. (runnable' or idle') st \ P (ksSchedulerAction s)"]) apply (rule hoare_pre) apply (wp | wpc | simp add: setThreadState_runnable_simp)+ apply (clarsimp simp: ct_in_state'_def cur_tcb'_def pred_tcb_at' @@ -197,13 +196,12 @@ lemma setupReplyMaster_weak_sch_act_wf[wp]: apply assumption done -crunches setupReplyMaster - for valid_queues[wp]: "Invariants_H.valid_queues" - and valid_queues'[wp]: "valid_queues'" - (wp: crunch_wps simp: crunch_simps) +crunches setup_reply_master + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct lemma restart_corres: - "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and ex_nonz_cap_to' t) (Tcb_A.restart t) (ThreadDecls_H.restart t)" apply (simp add: Tcb_A.restart_def Thread_H.restart_def) apply (simp add: isStopped_def2 liftM_def) @@ -215,16 +213,19 @@ lemma restart_corres: apply (rule corres_split_nor[OF setThreadState_corres]) apply clarsimp apply (rule corres_split[OF tcbSchedEnqueue_corres possibleSwitchTo_corres]) - apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_valid_queues sts_st_tcb' - | clarsimp simp: valid_tcb_state'_def)+ - apply (rule_tac Q="\rv. valid_sched and cur_tcb" in hoare_strengthen_post) - apply wp - apply (simp add: valid_sched_def valid_sched_action_def) - apply (rule_tac Q="\rv. invs' and tcb_at' t" in hoare_strengthen_post) - apply wp - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def) - apply wp+ - apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at) + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_objs' + | clarsimp simp: valid_tcb_state'_def | strengthen valid_objs'_valid_tcbs')+ + apply (rule_tac Q="\rv. valid_sched and cur_tcb and pspace_aligned and pspace_distinct" + in hoare_strengthen_post) + apply wp + apply (fastforce simp: valid_sched_def valid_sched_action_def) + apply (rule_tac Q="\rv. invs' and ex_nonz_cap_to' t" in hoare_strengthen_post) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def + valid_tcb_state'_def) + apply wp+ + apply (fastforce simp: valid_sched_def invs_def tcb_at_is_etcb_at) apply (clarsimp simp add: invs'_def valid_state'_def sch_act_wf_weak) done @@ -301,29 +302,30 @@ lemma invokeTCB_ReadRegisters_corres: apply (clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) done -crunch sch_act_simple [wp]: asUser "sch_act_simple" - (rule: sch_act_simple_lift) - -lemma invs_valid_queues': - "invs' s \ valid_queues' s" - by (clarsimp simp: invs'_def valid_state'_def) - -declare invs_valid_queues'[rule_format, elim!] - lemma einvs_valid_etcbs: "einvs s \ valid_etcbs s" by (clarsimp simp: valid_sched_def) lemma asUser_postModifyRegisters_corres: - "corres dc (tcb_at t) (tcb_at' t and tcb_at' ct) + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) (tcb_at' ct) (arch_post_modify_registers ct t) (asUser t $ postModifyRegisters ct t)" - apply (rule corres_guard_imp) - apply (clarsimp simp: arch_post_modify_registers_def postModifyRegisters_def when_def) - apply safe - apply (rule asUser_setRegister_corres) - apply (subst submonad_asUser.return) - apply (rule corres_stateAssert_assume) - by simp+ + apply (clarsimp simp: arch_post_modify_registers_def postModifyRegisters_def when_def) + apply (rule conjI; clarsimp) + apply (corres corres: asUser_setRegister_corres) + apply (subst submonad_asUser.return) + apply (rule corres_stateAssert_assume; simp) + done + +crunches Tcb_A.restart, IpcCancel_A.suspend + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +crunches restart + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + and valid_sched_pointers[wp]: valid_sched_pointers + and pspace_aligned'[wp]: pspace_aligned' + and pspace_distinct'[wp]: pspace_distinct' + (simp: crunch_simps wp: crunch_wps threadSet_sched_pointers threadSet_valid_sched_pointers) lemma invokeTCB_WriteRegisters_corres: "corres (dc \ (=)) (einvs and tcb_at dest and ex_nonz_cap_to dest) @@ -344,7 +346,7 @@ lemma invokeTCB_WriteRegisters_corres: mask_def user_vtop_def cong: if_cong) apply simp - apply (rule no_fail_pre, wp no_fail_mapM) + apply (wpsimp wp: no_fail_mapM no_fail_setRegister) apply (clarsimp simp: sanitiseOrFlags_def sanitiseAndFlags_def) apply ((safe)[1], (wp no_fail_setRegister | simp)+) apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]]) @@ -353,21 +355,23 @@ lemma invokeTCB_WriteRegisters_corres: apply (rule_tac P=\ and P'=\ in corres_inst) apply simp apply (wp+)[2] - apply ((wp static_imp_wp restart_invs' - | strengthen valid_sched_weak_strg einvs_valid_etcbs invs_valid_queues' invs_queues - invs_weak_sch_act_wf - | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def - dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] + apply ((wp hoare_weak_lift_imp restart_invs' + | strengthen valid_sched_weak_strg einvs_valid_etcbs + invs_weak_sch_act_wf + valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct + valid_sched_valid_queues valid_objs'_valid_tcbs' invs_valid_objs' + | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def + dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] apply (rule_tac Q="\_. einvs and tcb_at dest and ex_nonz_cap_to dest" in hoare_post_imp) apply (fastforce simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def dest!: idle_no_ex_cap) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest and ex_nonz_cap_to' dest" in hoare_post_imp) apply (fastforce simp: sch_act_wf_weak invs'_def valid_state'_def dest!: global'_no_ex_cap) apply wpsimp+ + apply fastforce + apply fastforce done -crunch it[wp]: suspend "\s. P (ksIdleThread s)" - lemma tcbSchedDequeue_ResumeCurrentThread_imp_notct[wp]: "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ tcbSchedDequeue t @@ -388,6 +392,10 @@ lemma suspend_ResumeCurrentThread_imp_notct[wp]: \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" by (wpsimp simp: suspend_def) +crunches restart, suspend + for cur_tcb'[wp]: cur_tcb' + (wp: crunch_wps threadSet_cur ignore: threadSet) + lemma invokeTCB_CopyRegisters_corres: "corres (dc \ (=)) (einvs and simple_sched_action and tcb_at dest and tcb_at src and ex_nonz_cap_to src and @@ -416,6 +424,8 @@ proof - apply simp apply simp apply (simp | wp)+ + apply fastforce + apply simp done have R: "\src src' des des' xs ys. \ src = src'; des = des'; xs = ys \ \ corres dc (tcb_at src and tcb_at des and invs) @@ -438,7 +448,7 @@ proof - apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) apply (rule asUser_setNextPC_corres) apply wp+ - apply simp+ + apply fastforce+ done show ?thesis apply (simp add: invokeTCB_def performTransfer_def) @@ -452,7 +462,7 @@ proof - apply (simp add: frame_registers_def frameRegisters_def) apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) apply (rule Q[OF refl refl]) - apply (wpsimp wp: mapM_x_wp' static_imp_wp)+ + apply (wpsimp wp: mapM_x_wp' hoare_weak_lift_imp)+ apply (rule corres_split_nor) apply (rule corres_when[OF refl]) apply (rule R[OF refl refl]) @@ -462,17 +472,20 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply ((solves \wp static_imp_wp\)+) - apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_post_imp) - apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def) - prefer 2 - apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_post_imp) - apply (clarsimp simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) - apply (wp mapM_x_wp' static_imp_wp | simp)+ - apply ((wp static_imp_wp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] - apply (wp suspend_nonz_cap_to_tcb static_imp_wp | simp add: if_apply_def2)+ + apply ((solves \wpsimp wp: hoare_weak_lift_imp\)+) + apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_post_imp) + apply (fastforce simp: invs_def valid_sched_weak_strg valid_sched_def) + prefer 2 + apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_post_imp) + apply (fastforce simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) + apply ((wp mapM_x_wp' hoare_weak_lift_imp | simp flip: cur_tcb'_def)+)[8] + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | + clarsimp simp: if_apply_def2 simp flip: cur_tcb'_def)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp + | simp add: if_apply_def2 flip: cur_tcb'_def)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def dest!: idle_no_ex_cap) + apply clarsimp apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) done qed @@ -515,41 +528,9 @@ lemma copyreg_invs': \\rv. invs'\" by (rule hoare_strengthen_post, rule copyreg_invs'', simp) -lemma threadSet_valid_queues_no_state: - "\Invariants_H.valid_queues and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. Invariants_H.valid_queues\" - apply (simp add: threadSet_def) - apply wp - apply (simp add: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (wp setObject_queues_unchanged_tcb - hoare_Ball_helper - hoare_vcg_all_lift - setObject_tcb_strongest)[1] - apply (wp getObject_tcb_wp) - apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) - apply (clarsimp simp: obj_at'_def) - done - -lemma threadSet_valid_queues'_no_state: - "(\tcb. tcbQueued tcb = tcbQueued (f tcb)) - \ \valid_queues' and (\s. \p. t \ set (ksReadyQueues s p))\ - threadSet f t \\_. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs inQ_def split: if_split_asm) - done - lemma isRunnable_corres: - "corres (\ts runn. runnable ts = runn) (tcb_at t) (tcb_at' t) - (get_thread_state t) (isRunnable t)" + "corres (\ts runn. runnable ts = runn) (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_thread_state t) (isRunnable t)" apply (simp add: isRunnable_def) apply (subst bind_return[symmetric]) apply (rule corres_guard_imp) @@ -570,16 +551,6 @@ lemma tcbSchedDequeue_not_queued: apply (wp tg_sp' [where P=\, simplified] | simp)+ done -lemma tcbSchedDequeue_not_in_queue: - "\p. \Invariants_H.valid_queues and tcb_at' t and valid_objs'\ tcbSchedDequeue t - \\rv s. t \ set (ksReadyQueues s p)\" - apply (rule_tac Q="\rv. Invariants_H.valid_queues and obj_at' (Not \ tcbQueued) t" - in hoare_post_imp) - apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def projectKOs inQ_def ) - apply (wp tcbSchedDequeue_not_queued tcbSchedDequeue_valid_queues | - simp add: valid_objs'_maxDomain valid_objs'_maxPriority)+ - done - lemma threadSet_ct_in_state': "(\tcb. tcbState (f tcb) = tcbState tcb) \ \ct_in_state' test\ threadSet f t \\rv. ct_in_state' test\" @@ -621,16 +592,17 @@ lemma tcbSchedDequeue_ct_in_state'[wp]: apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) done -crunch cur[wp]: tcbSchedDequeue cur_tcb' - lemma sp_corres2: - "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb) - (Invariants_H.valid_queues and valid_queues' and cur_tcb' and tcb_at' t - and (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and tcb_at t + and valid_queues and pspace_aligned and pspace_distinct) + (tcb_at' t and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs' and (\_. x \ maxPriority) + and sym_heap_sched_pointers and valid_sched_pointers and pspace_aligned' and pspace_distinct') + (set_priority t x) (setPriority t x)" apply (simp add: setPriority_def set_priority_def thread_set_priority_def) apply (rule stronger_corres_guard_imp) - apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres], simp) apply (rule corres_split[OF ethread_set_corres], simp_all)[1] apply (simp add: etcb_relation_def) apply (rule corres_split[OF isRunnable_corres]) @@ -640,34 +612,37 @@ lemma sp_corres2: apply (rule rescheduleRequired_corres) apply (rule possibleSwitchTo_corres) apply ((clarsimp - | wp static_imp_wp hoare_vcg_if_lift hoare_wp_combs gts_wp + | wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp isRunnable_wp)+)[4] - apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift) - apply clarsimp - apply ((wp hoare_drop_imps hoare_vcg_if_lift hoare_vcg_all_lift - isRunnable_wp threadSet_pred_tcb_no_state threadSet_valid_queues_no_state - threadSet_valid_queues'_no_state threadSet_cur threadSet_valid_objs_tcbPriority_update - threadSet_weak_sch_act_wf threadSet_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[1] - apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift hoare_vcg_disj_lift - tcbSchedDequeue_not_in_queue tcbSchedDequeue_valid_queues - tcbSchedDequeue_ct_in_state'[simplified ct_in_state'_def] - | simp add: etcb_relation_def)+)[2] + apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift + ethread_set_not_queued_valid_queues + | strengthen valid_queues_in_correct_ready_q valid_queues_ready_qs_distinct)+ + apply ((wp hoare_vcg_imp_lift' hoare_vcg_all_lift + isRunnable_wp threadSet_pred_tcb_no_state + threadSet_valid_objs_tcbPriority_update threadSet_sched_pointers + threadSet_valid_sched_pointers tcb_dequeue_not_queued tcbSchedDequeue_not_queued + threadSet_weak_sch_act_wf + | simp add: etcb_relation_def + | strengthen valid_objs'_valid_tcbs' + obj_at'_weakenE[where P="Not \ tcbQueued"] + | wps)+) apply (force simp: valid_etcbs_def tcb_at_st_tcb_at[symmetric] state_relation_def dest: pspace_relation_tcb_at intro: st_tcb_at_opeqI) - apply (force simp: state_relation_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) + apply clarsimp done -lemma setPriority_corres: "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) - (set_priority t x) (setPriority t x)" +lemma setPriority_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) + (set_priority t x) (setPriority t x)" apply (rule corres_guard_imp) apply (rule sp_corres2) - apply (clarsimp simp: valid_sched_def valid_sched_action_def) - apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak) + apply (fastforce simp: valid_sched_def valid_sched_action_def) + apply (fastforce simp: invs'_def valid_state'_def sch_act_wf_weak) done -lemma setMCPriority_corres: "corres dc (tcb_at t) (tcb_at' t) - (set_mcpriority t x) (setMCPriority t x)" +lemma setMCPriority_corres: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_mcpriority t x) (setMCPriority t x)" apply (rule corres_guard_imp) apply (clarsimp simp: setMCPriority_def set_mcpriority_def) apply (rule threadset_corresT) @@ -684,26 +659,21 @@ definition lemma out_corresT: assumes x: "\tcb v. \(getF, setF)\ran tcb_cap_cases. getF (fn v tcb) = getF tcb" assumes y: "\v. \tcb. \(getF, setF)\ran tcb_cte_cases. getF (fn' v tcb) = getF tcb" + assumes sched_pointers: "\tcb v. tcbSchedPrev (fn' v tcb) = tcbSchedPrev tcb" + "\tcb v. tcbSchedNext (fn' v tcb) = tcbSchedNext tcb" + assumes flag: "\tcb v. tcbQueued (fn' v tcb) = tcbQueued tcb" assumes e: "\tcb v. exst_same tcb (fn' v tcb)" shows "out_rel fn fn' v v' \ - corres dc (tcb_at t) - (tcb_at' t) + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ (option_update_thread t fn v) (case_option (return ()) (\x. threadSet (fn' x) t) v')" - apply (case_tac v, simp_all add: out_rel_def - option_update_thread_def) - apply clarsimp - apply (clarsimp simp add: threadset_corresT [OF _ x y e]) + apply (case_tac v, simp_all add: out_rel_def option_update_thread_def) + apply (clarsimp simp: threadset_corresT [OF _ x y sched_pointers flag e]) done lemmas out_corres = out_corresT [OF _ all_tcbI, OF ball_tcb_cap_casesI ball_tcb_cte_casesI] -crunch sch_act[wp]: tcbSchedEnqueue "\s. sch_act_wf (ksSchedulerAction s) s" - (simp: unless_def) - -crunch vq'[wp]: getCurThread valid_queues' - crunch ioports'[wp]: tcbSchedEnqueue valid_ioports' (wp: crunch_wps valid_ioports_lift'' simp: crunch_simps) @@ -711,37 +681,45 @@ lemma tcbSchedDequeue_sch_act_simple[wp]: "tcbSchedDequeue t \sch_act_simple\" by (wpsimp simp: sch_act_simple_def) +lemma tcbSchedNext_update_tcb_cte_cases: + "(a, b) \ ran tcb_cte_cases \ a (tcbPriority_update f tcb) = a tcb" + unfolding tcb_cte_cases_def + by (case_tac tcb; fastforce simp: objBits_simps') + +lemma threadSet_priority_invs': + "\invs' and tcb_at' t and K (p \ maxPriority)\ + threadSet (tcbPriority_update (\_. p)) t + \\_. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (wp threadSet_valid_pspace' + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + valid_irq_node_lift + valid_irq_handlers_lift'' + valid_ioports_lift' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + | clarsimp simp: cteCaps_of_def tcbSchedNext_update_tcb_cte_cases o_def | rule refl)+ + by (auto simp: obj_at'_def) + lemma setP_invs': "\invs' and tcb_at' t and K (p \ maxPriority)\ setPriority t p \\rv. invs'\" - apply (rule hoare_gen_asm) - apply (simp add: setPriority_def) - apply (wp rescheduleRequired_all_invs_but_ct_not_inQ) - apply simp - apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift') - unfolding st_tcb_at'_def - apply (strengthen not_obj_at'_strengthen, wp) - apply (wp hoare_vcg_imp_lift') - apply (rule_tac Q="\rv s. invs' s" in hoare_post_imp) - apply (clarsimp simp: invs_sch_act_wf' invs'_def invs_queues) - apply (clarsimp simp: valid_state'_def) - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (wp hoare_drop_imps threadSet_invs_trivial, - simp_all add: inQ_def cong: conj_cong)[1] - apply (rule_tac Q="\_. invs' and obj_at' (Not \ tcbQueued) t - and (\s. \d p. t \ set (ksReadyQueues s (d,p)))" - in hoare_post_imp) - apply (clarsimp simp: obj_at'_def inQ_def) - apply (wp tcbSchedDequeue_not_queued)+ - apply clarsimp - done + unfolding setPriority_def + by (wpsimp wp: rescheduleRequired_invs' threadSet_priority_invs') crunches setPriority, setMCPriority for typ_at'[wp]: "\s. P (typ_at' T p s)" and valid_cap[wp]: "valid_cap' c" - (simp: crunch_simps) + (simp: crunch_simps wp: crunch_wps) lemmas setPriority_typ_ats [wp] = typ_at_lifts [OF setPriority_typ_at'] @@ -1026,11 +1004,6 @@ lemma setMCPriority_valid_objs'[wp]: crunch sch_act_simple[wp]: setMCPriority sch_act_simple (wp: ssa_sch_act_simple crunch_wps rule: sch_act_simple_lift simp: crunch_simps) -(* For some reason, when this was embedded in a larger expression clarsimp wouldn't remove it. Adding it as a simp rule does *) -lemma inQ_tc_corres_helper: - "(\d p. (\tcb. tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d \ (tcbQueued tcb \ tcbDomain tcb \ d)) \ a \ set (ksReadyQueues s (d, p))) = True" - by clarsimp - abbreviation "valid_option_prio \ case_option True (\(p, auth). p \ maxPriority)" definition valid_tcb_invocation :: "tcbinvocation \ bool" where @@ -1039,9 +1012,9 @@ definition valid_tcb_invocation :: "tcbinvocation \ bool" where | _ \ True" lemma threadcontrol_corres_helper1: - "\ einvs and simple_sched_action\ - thread_set (tcb_ipc_buffer_update f) a - \\x. weak_valid_sched_action and valid_etcbs\" + "\einvs and simple_sched_action\ + thread_set (tcb_ipc_buffer_update f) a + \\_. weak_valid_sched_action and valid_etcbs\" apply (rule hoare_pre) apply (simp add: thread_set_def) apply (wp set_object_wp) @@ -1054,107 +1027,75 @@ lemma threadcontrol_corres_helper1: apply (clarsimp simp: is_tcb_def) done -lemma threadcontrol_corres_helper2: - "is_aligned a msg_align_bits \ \invs' and tcb_at' t\ - threadSet (tcbIPCBuffer_update (\_. a)) t - \\x s. Invariants_H.valid_queues s \ valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\" - by (wp threadSet_invs_trivial - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: inQ_def )+ - lemma threadcontrol_corres_helper3: - "\ einvs and simple_sched_action\ - check_cap_at aaa (ab, ba) (check_cap_at (cap.ThreadCap a) slot (cap_insert aaa (ab, ba) (a, tcb_cnode_index 4))) - \\x. weak_valid_sched_action and valid_etcbs \" - apply (rule hoare_pre) - apply (wp check_cap_inv | simp add:)+ - by (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def - get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + "\einvs and simple_sched_action\ + check_cap_at cap p (check_cap_at (cap.ThreadCap cap') slot (cap_insert cap p tcb_slot)) + \\_ s. weak_valid_sched_action s \ in_correct_ready_q s \ ready_qs_distinct s \ valid_etcbs s + \ pspace_aligned s \ pspace_distinct s\" + apply (wpsimp + | strengthen valid_sched_valid_queues valid_queues_in_correct_ready_q + valid_sched_weak_strg[rule_format] valid_queues_ready_qs_distinct)+ + apply (wpsimp wp: check_cap_inv) + apply (fastforce simp: valid_sched_def) + done lemma threadcontrol_corres_helper4: "isArchObjectCap ac \ - \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) - and valid_cap' ac \ - checkCapAt ac (cte_map (ab, ba)) - (checkCapAt (capability.ThreadCap a) (cte_map slot) - (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) - \\x. Invariants_H.valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\" - apply (wp - | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf - | clarsimp simp: )+ + \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) + and valid_cap' ac\ + checkCapAt ac (cte_map (ab, ba)) + (checkCapAt (capability.ThreadCap a) (cte_map slot) + (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) + \\_ s. sym_heap_sched_pointers s \ valid_sched_pointers s \ valid_tcbs' s \ pspace_aligned' s \ + pspace_distinct' s\" + apply (wpsimp wp: + | strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + invs_valid_objs' valid_objs'_valid_tcbs' invs_pspace_aligned' + invs_pspace_distinct')+ by (case_tac ac; clarsimp simp: capBadge_def isCap_simps tcb_cnode_index_def cte_map_def cte_wp_at'_def cte_level_bits_def) lemma threadSet_invs_trivialT2: - assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" - assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" - assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" - assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" - assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" - assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes + "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + "\tcb. tcbState (F tcb) = tcbState tcb" + "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + "\tcb. tcbSchedPrev (F tcb) = tcbSchedPrev tcb" + "\tcb. tcbSchedNext (F tcb) = tcbSchedNext tcb" + "\tcb. tcbQueued (F tcb) = tcbQueued tcb" + "\tcb. tcbDomain (F tcb) = tcbDomain tcb" + "\tcb. tcbPriority (F tcb) = tcbPriority tcb" + "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" shows - "\\s. invs' s - \ tcb_at' t s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits) - \ (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) - \ (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) - \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) - \ (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ - threadSet F t - \\rv. invs'\" -proof - - from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast - note threadSet_sch_actT_P[where P=False, simplified] - have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ - valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" - by (auto simp: z) - show ?thesis - apply (simp add: invs'_def valid_state'_def split del: if_split) - apply (rule hoare_pre) - apply (rule hoare_gen_asm [where P="(\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)"]) - apply (wp x v u b - threadSet_valid_pspace'T - threadSet_sch_actT_P[where P=False, simplified] - threadSet_valid_queues - threadSet_state_refs_of'T[where f'=id] - threadSet_iflive'T - threadSet_ifunsafe'T - threadSet_idle'T - threadSet_global_refsT - irqs_masked_lift - valid_irq_node_lift - valid_irq_handlers_lift'' valid_ioports_lift'' - threadSet_ctes_ofT - threadSet_not_inQ - threadSet_ct_idle_or_in_cur_domain' - threadSet_valid_dom_schedule' - threadSet_valid_queues' - threadSet_cur - untyped_ranges_zero_lift - |clarsimp simp: y z a domains cteCaps_of_def |rule refl)+ - apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def) - apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) - by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) -qed - -lemma threadSet_valid_queues'_no_state2: - "\ \tcb. tcbQueued tcb = tcbQueued (f tcb); - \tcb. tcbState tcb = tcbState (f tcb); - \tcb. tcbPriority tcb = tcbPriority (f tcb); - \tcb. tcbDomain tcb = tcbDomain (f tcb) \ - \ \valid_queues'\ threadSet f t \\_. valid_queues'\" - apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def - split del: if_split) - apply (simp only: imp_conv_disj) - apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) - apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ - apply (wp getObject_tcb_wp updateObject_default_inv - | simp split del: if_split)+ - apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs - objBits_simps addToQs_def - split del: if_split cong: if_cong) - apply (fastforce simp: projectKOs inQ_def split: if_split_asm) - done + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)\ + threadSet F t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P="\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits"]) + apply (wp threadSet_valid_pspace'T + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_global_refsT + valid_irq_node_lift + valid_irq_handlers_lift'' valid_ioports_lift'' + threadSet_ctes_ofT + threadSet_valid_dom_schedule' + untyped_ranges_zero_lift + sym_heap_sched_pointers_lift threadSet_valid_sched_pointers + threadSet_tcbSchedPrevs_of threadSet_tcbSchedNexts_of + threadSet_sch_actT_P[where P=False, simplified] + threadSet_state_refs_of'T[where f'=id] + threadSet_idle'T + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_cur + | clarsimp simp: assms cteCaps_of_def | rule refl)+ + apply (clarsimp simp: o_def) + by (auto simp: obj_at'_def) lemma getThreadBufferSlot_dom_tcb_cte_cases: "\\\ getThreadBufferSlot a \\rv s. rv \ (+) a ` dom tcb_cte_cases\" @@ -1165,10 +1106,6 @@ lemma tcb_at'_cteInsert[wp]: "\\s. tcb_at' (ksCurThread s) s\ cteInsert t x y \\_ s. tcb_at' (ksCurThread s) s\" by (rule hoare_weaken_pre, wps cteInsert_ct, wp, simp) -lemma tcb_at'_asUser[wp]: - "\\s. tcb_at' (ksCurThread s) s\ asUser a (setTCBIPCBuffer b) \\_ s. tcb_at' (ksCurThread s) s\" - by (rule hoare_weaken_pre, wps asUser_typ_ats(1), wp, simp) - lemma tcb_at'_threadSet[wp]: "\\s. tcb_at' (ksCurThread s) s\ threadSet (tcbIPCBuffer_update (\_. b)) a \\_ s. tcb_at' (ksCurThread s) s\" by (rule hoare_weaken_pre, wps threadSet_tcb', wp, simp) @@ -1177,6 +1114,14 @@ lemma cteDelete_it [wp]: "\\s. P (ksIdleThread s)\ cteDelete slot e \\_ s. P (ksIdleThread s)\" by (rule cteDelete_preservation) (wp | clarsimp)+ +lemma cteDelete_pspace_aligned'[wp]: + "cteDelete slot e \pspace_aligned'\" + by (rule cteDelete_preservation; wpsimp) + +lemma cteDelete_pspace_distinct'[wp]: + "cteDelete slot e \pspace_distinct'\" + by (rule cteDelete_preservation; wpsimp) + lemmas threadSet_invs_trivial2 = threadSet_invs_trivialT2 [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] @@ -1204,6 +1149,43 @@ lemma assertDerived_wp_weak: apply (wpsimp simp: assertDerived_def) done +lemma thread_set_ipc_weak_valid_sched_action: + "\ einvs and simple_sched_action\ + thread_set (tcb_ipc_buffer_update f) a + \\x. weak_valid_sched_action\" + apply (rule hoare_pre) + apply (simp add: thread_set_def) + apply (wp set_object_wp) + apply (simp | intro impI | elim exE conjE)+ + apply (frule get_tcb_SomeD) + apply (erule ssubst) + apply (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def + get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + done + +crunches cap_insert + for in_correct_ready_q[wp]: in_correct_ready_q + and ready_qs_distinct[wp]: ready_qs_distinct + (wp: crunch_wps ready_qs_distinct_lift) + +crunches cap_delete + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + (ignore_del: preemption_point + wp: crunch_wps + simp: crunch_simps OR_choiceE_def + ignore: wrap_ext_bool OR_choiceE) + +crunches option_update_thread + for aligned[wp]: "pspace_aligned" + and distinct[wp]: "pspace_distinct" + +lemma threadSet_invs_tcbIPCBuffer_update: + "\\s. invs' s \ (\tcb. is_aligned (tcbIPCBuffer (tcbIPCBuffer_update f tcb)) msg_align_bits)\ + threadSet (tcbIPCBuffer_update f) t + \\_. invs'\" + by (wp threadSet_invs_trivialT2; simp add: tcb_cte_cases_def cteSizeBits_def) + lemma transferCaps_corres: assumes x: "newroot_rel e e'" and y: "newroot_rel f f'" @@ -1246,8 +1228,8 @@ lemma transferCaps_corres: (invokeTCB (tcbinvocation.ThreadControl a sl' b' mcp_auth p_auth e' f' g'))" proof - have P: "\t v. corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t (tcb_fault_handler_update o (%x _. x)) (option_map to_bl v)) (case v of None \ return () @@ -1257,8 +1239,8 @@ proof - apply (safe, case_tac tcb', simp add: tcb_relation_def split: option.split) done have R: "\t v. corres dc - (tcb_at t) - (tcb_at' t) + (tcb_at t and pspace_aligned and pspace_distinct) + \ (option_update_thread t (tcb_ipc_buffer_update o (%x _. x)) v) (case v of None \ return () | Some x \ threadSet (tcbIPCBuffer_update (%_. x)) t)" @@ -1271,7 +1253,7 @@ proof - (case_option (return ()) (\p'. setPriority t (fst p')) p_auth)" apply (case_tac p_auth; clarsimp simp: setPriority_corres) done - have S': "\t x. corres dc (tcb_at t) (tcb_at' t) + have S': "\t x. corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ (case_option (return ()) (\(mcp, auth). set_mcpriority t mcp) mcp_auth) (case_option (return ()) (\mcp'. setMCPriority t (fst mcp')) mcp_auth)" apply(case_tac mcp_auth; clarsimp simp: setMCPriority_corres) @@ -1395,10 +1377,20 @@ proof - apply (rule corres_split[OF getCurThread_corres], clarsimp) apply (rule corres_when[OF refl rescheduleRequired_corres]) apply (wpsimp wp: gct_wp)+ - apply (wp hoare_drop_imp) - apply (rule threadcontrol_corres_helper1[unfolded pred_conj_def]) - apply (wp hoare_drop_imp) - apply (wp threadcontrol_corres_helper2 | wpc | simp)+ + apply (strengthen valid_queues_ready_qs_distinct) + apply (wpsimp wp: thread_set_ipc_weak_valid_sched_action thread_set_valid_queues + hoare_drop_imp) + apply clarsimp + apply (strengthen valid_objs'_valid_tcbs' invs_valid_objs')+ + apply (wpsimp wp: threadSet_sched_pointers threadSet_valid_sched_pointers hoare_drop_imp + threadSet_invs_tcbIPCBuffer_update) + apply (clarsimp simp: pred_conj_def) + apply (strengthen einvs_valid_etcbs valid_queues_in_correct_ready_q + valid_sched_valid_queues)+ + apply wp + apply (clarsimp simp: pred_conj_def) + apply (strengthen invs_sym_heap_sched_pointers invs_valid_sched_pointers + valid_objs'_valid_tcbs' invs_valid_objs') apply (wpsimp wp: cteDelete_invs' hoare_vcg_conj_lift) apply (fastforce simp: emptyable_def) apply fastforce @@ -1410,15 +1402,16 @@ proof - apply (rule_tac F="isArchObjectCap ac" in corres_gen_asm2) apply (rule corres_split_nor) apply (rule threadset_corres, - simp add: tcb_relation_def, (simp add: exst_same_def)+) - apply (rule corres_split_nor) + simp add: tcb_relation_def, (simp add: exst_same_def)+) + apply (rule corres_split) apply (erule checkCapAt_cteInsert_corres) apply (rule corres_split[OF getCurThread_corres], clarsimp) apply (rule corres_when[OF refl rescheduleRequired_corres]) apply (wp gct_wp)+ + apply (wp hoare_drop_imp) apply (wp hoare_drop_imp threadcontrol_corres_helper3)[1] apply (wp hoare_drop_imp threadcontrol_corres_helper4)[1] - apply (wp thread_set_tcb_ipc_buffer_cap_cleared_invs + apply (wp thread_set_tcb_ipc_buffer_cap_cleared_invs ready_qs_distinct_lift thread_set_cte_wp_at_trivial thread_set_not_state_valid_sched | simp add: ran_tcb_cap_cases)+ apply (wp threadSet_invs_trivial @@ -1427,15 +1420,14 @@ proof - cap_delete_valid_cap cteDelete_deletes cteDelete_invs' | strengthen use_no_cap_to_obj_asid_strg - | clarsimp simp: inQ_def inQ_tc_corres_helper)+ + | clarsimp simp: inQ_def)+ apply (clarsimp simp: cte_wp_at_caps_of_state dest!: is_cnode_or_valid_arch_cap_asid) apply (fastforce simp: emptyable_def) apply (clarsimp simp: inQ_def) apply (clarsimp simp: obj_at_def is_tcb) apply (rule cte_wp_at_tcbI, simp, fastforce, simp) - apply (clarsimp simp: cte_map_def tcb_cnode_index_def obj_at'_def - projectKOs objBits_simps) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def obj_at'_def objBits_simps projectKOs) apply (erule(2) cte_wp_at_tcbI', fastforce simp: objBits_defs cte_level_bits_def, simp) done have U: "getThreadCSpaceRoot a = return (cte_map (a, tcb_cnode_index 0))" @@ -1483,6 +1475,10 @@ proof - out_no_cap_to_trivial [OF ball_tcb_cap_casesI] checked_insert_no_cap_to note if_cong [cong] option.case_cong [cong] + \ \This proof is quite fragile and was written when bind_wp was added to the wp set later + in the theory dependencies, and so was matched with before alternatives. We re-add it here to + create a similar environment and avoid needing to rework the proof.\ + note bind_wp[wp] show ?thesis apply (simp add: invokeTCB_def liftE_bindE) apply (simp only: eq_commute[where a= "a"]) @@ -1498,7 +1494,7 @@ proof - apply wp apply wp apply (wpsimp wp: hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift - hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_all_liftE_R hoare_vcg_all_lift as_user_invs thread_set_ipc_tcb_cap_valid thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial @@ -1507,37 +1503,17 @@ proof - check_cap_inv[where P=valid_sched] (* from stuff *) check_cap_inv[where P="tcb_at p0" for p0] thread_set_not_state_valid_sched - cap_delete_deletes + check_cap_inv[where P=simple_sched_action] + cap_delete_deletes hoare_drop_imps cap_delete_valid_cap - simp: ran_tcb_cap_cases) + simp: ran_tcb_cap_cases + | strengthen simple_sched_action_sched_act_not)+ apply (strengthen use_no_cap_to_obj_asid_strg) apply (wpsimp wp: cap_delete_cte_at cap_delete_valid_cap) - apply (wpsimp wp: hoare_drop_imps) - apply ((wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_imp_lift' hoare_vcg_all_lift - threadSet_cte_wp_at' threadSet_invs_trivialT2 cteDelete_invs' - simp: tcb_cte_cases_def), (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cte_wp_at' - simp: tcb_cte_cases_def) - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' - simp: tcb_cte_cases_def, (fastforce+)[6]) - apply wpsimp - apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift - rescheduleRequired_invs' threadSet_cap_to' threadSet_invs_trivialT2 - threadSet_cte_wp_at' hoare_drop_imps - simp: tcb_cte_cases_def) - apply (clarsimp) - apply ((wpsimp wp: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + threadSet_invs_tcbIPCBuffer_update threadSet_cte_wp_at' + | strengthen simple_sched_action_sched_act_not)+ + apply ((wpsimp wp: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift threadSet_valid_objs' thread_set_not_state_valid_sched thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial @@ -1549,14 +1525,14 @@ proof - | strengthen tcb_cap_always_valid_strg tcb_at_invs use_no_cap_to_obj_asid_strg - | (erule exE, clarsimp simp: word_bits_def))+) + | (erule exE, clarsimp simp: word_bits_def) | wp (once) hoare_drop_imps)+) apply (strengthen valid_tcb_ipc_buffer_update) - apply (strengthen invs_valid_objs') + apply (strengthen invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct') apply (wpsimp wp: cteDelete_invs' hoare_vcg_imp_lift' hoare_vcg_all_lift) apply wpsimp apply wpsimp apply (clarsimp cong: imp_cong conj_cong simp: emptyable_def) - apply (rule_tac Q'="\_. ?T2_pre" in hoare_post_imp_R[simplified validE_R_def, rotated]) + apply (rule_tac Q'="\_. ?T2_pre" in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) (* beginning to deal with is_nondevice_page_cap *) apply (clarsimp simp: emptyable_def is_nondevice_page_cap_simps is_cap_simps is_cnode_or_valid_arch_def obj_ref_none_no_asid cap_asid_def @@ -1571,9 +1547,9 @@ proof - | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg)+)[1] apply (clarsimp cong: imp_cong conj_cong) apply (rule_tac Q'="\_. ?T2_pre' and (\s. valid_option_prio p_auth)" - in hoare_post_imp_R[simplified validE_R_def, rotated]) + in hoare_strengthen_postE_R[simplified validE_R_def, rotated]) apply (case_tac g'; clarsimp simp: isCap_simps ; clarsimp elim: invs_valid_objs' cong:imp_cong) - apply (wp add: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + apply (wp add: stuff hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift setMCPriority_invs' threadSet_valid_objs' thread_set_not_state_valid_sched setP_invs' typ_at_lifts [OF setPriority_typ_at'] @@ -1584,13 +1560,14 @@ proof - emptyable_def | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg - | wp (once) add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs + | wp add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs | (erule exE, clarsimp simp: word_bits_def))+ (* the last two subgoals *) apply (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] tcb_at_st_tcb_at[symmetric] tcb_cap_valid_def is_cnode_or_valid_arch_def invs_valid_objs emptyable_def obj_ref_none_no_asid no_cap_to_obj_with_diff_ref_Null is_valid_vtable_root_def is_cap_simps cap_asid_def vs_cap_ref_def arch_cap_fun_lift_def + invs_psp_aligned invs_distinct cong: conj_cong imp_cong split: option.split_asm) by (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def objBits_defs @@ -1635,31 +1612,31 @@ lemma tc_invs': apply (simp only: eq_commute[where a="a"]) apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp setMCPriority_invs' + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] - apply (wp add: setP_invs' static_imp_wp hoare_vcg_all_lift)+ + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) apply clarsimp apply wpfix apply assumption apply (rule case_option_wp_None_returnOk) - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ - apply (wpsimp wp: static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) - | wpsimp wp: static_imp_wp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] - hoare_vcg_const_imp_lift_R assertDerived_wp_weak static_imp_wpE cteDelete_deletes - hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_liftE_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs tcbIPCBufferSlot_def) by (auto dest!: isCapDs isReplyCapD isValidVTableRootD simp: isCap_simps) @@ -1675,7 +1652,7 @@ lemma setSchedulerAction_invs'[wp]: apply (simp add: setSchedulerAction_def) apply wp apply (clarsimp simp add: invs'_def valid_state'_def valid_irq_node'_def - valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs cur_tcb'_def + valid_queues_def bitmapQ_defs cur_tcb'_def ct_not_inQ_def) apply (simp add: ct_idle_or_in_cur_domain'_def) done @@ -1805,8 +1782,8 @@ lemma invokeTCB_corres: apply (rule TcbAcc_R.rescheduleRequired_corres) apply (rule corres_trivial, simp) apply (wpsimp wp: hoare_drop_imp)+ - apply (clarsimp simp: valid_sched_weak_strg einvs_valid_etcbs) - apply (clarsimp simp: Tcb_R.invs_valid_queues' Invariants_H.invs_queues) + apply (fastforce dest: valid_sched_valid_queues simp: valid_sched_weak_strg einvs_valid_etcbs) + apply fastforce done lemma tcbBoundNotification_caps_safe[simp]: @@ -1821,6 +1798,10 @@ lemma valid_bound_ntfn_lift: apply (wp typ_at_lifts[OF P])+ done +crunches setBoundNotification + for sym_heap_sched_pointers[wp]: sym_heap_sched_pointers + (ignore: threadSet wp: threadSet_sched_pointers) + lemma bindNotification_invs': "\bound_tcb_at' ((=) None) tcbptr and ex_nonz_cap_to' ntfnptr @@ -1831,9 +1812,9 @@ lemma bindNotification_invs': \\_. invs'\" including no_pre apply (simp add: bindNotification_def invs'_def valid_state'_def) - apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule bind_wp[OF _ get_ntfn_sp']) apply (rule hoare_pre) - apply (wp set_ntfn_valid_pspace' sbn_sch_act' sbn_valid_queues valid_irq_node_lift + apply (wp set_ntfn_valid_pspace' sbn_sch_act' valid_irq_node_lift setBoundNotification_ct_not_inQ valid_bound_ntfn_lift untyped_ranges_zero_lift | clarsimp dest!: global'_no_ex_cap simp: cteCaps_of_def)+ @@ -2004,8 +1985,8 @@ lemma eq_ucast_word8[simp]: done lemma checkPrio_corres: - "corres (ser \ dc) (tcb_at auth) (tcb_at' auth) - (check_prio p auth) (checkPrio p auth)" + "corres (ser \ dc) (tcb_at auth and pspace_aligned and pspace_distinct) \ + (check_prio p auth) (checkPrio p auth)" apply (simp add: check_prio_def checkPrio_def) apply (rule corres_guard_imp) apply (simp add: liftE_bindE) @@ -2027,15 +2008,16 @@ lemma decodeSetPriority_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) - (invs' and (\s. \x \ set extras'. s \' (fst x))) + (cur_tcb and valid_etcbs and + (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ fst x))) + (invs' and (\s. \x \ set extras'. s \' fst x)) (decode_set_priority args cap slot extras) (decodeSetPriority args cap' extras')" apply (cases args; cases extras; cases extras'; clarsimp simp: decode_set_priority_def decodeSetPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) @@ -2046,27 +2028,22 @@ lemma decodeSetMCPriority_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) - (invs' and (\s. \x \ set extras'. s \' (fst x))) + (cur_tcb and valid_etcbs and + (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ fst x))) + (invs' and (\s. \x \ set extras'. s \' fst x)) (decode_set_mcpriority args cap slot extras) (decodeSetMCPriority args cap' extras')" apply (cases args; cases extras; cases extras'; clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) apply (wpsimp simp: valid_cap_def valid_cap'_def)+ done -lemma valid_objs'_maxPriority': - "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbMCP tcb \ maxPriority) t s" - apply (erule (1) valid_objs_valid_tcbE) - apply (clarsimp simp: valid_tcb'_def) - done - lemma getMCP_sp: "\P\ threadGet tcbMCP t \\rv. mcpriority_tcb_at' (\st. st = rv) t and P\" apply (simp add: threadGet_def) @@ -2091,7 +2068,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: @@ -2100,7 +2077,7 @@ lemma checkPrio_lt_ct: lemma checkPrio_lt_ct_weak: "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. ucast prio \ mcp) auth s\, -" - apply (rule hoare_post_imp_R) + apply (rule hoare_strengthen_postE_R) apply (rule checkPrio_lt_ct) apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) by (rule le_ucast_ucast_le) simp @@ -2160,8 +2137,9 @@ lemma decodeSetSchedParams_corres: "\ cap_relation cap cap'; is_thread_cap cap; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ corres (ser \ tcbinv_relation) - (cur_tcb and valid_etcbs and (\s. \x \ set extras. s \ (fst x))) - (invs' and (\s. \x \ set extras'. s \' (fst x))) + (cur_tcb and valid_etcbs and + (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ fst x))) + (invs' and (\s. \x \ set extras'. s \' fst x)) (decode_set_sched_params args cap slot extras) (decodeSetSchedParams args cap' extras')" apply (simp add: decode_set_sched_params_def decodeSetSchedParams_def) @@ -2171,7 +2149,7 @@ lemma decodeSetSchedParams_corres: apply (clarsimp split: list.split simp: list_all2_Cons2) apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_split_norE[OF checkPrio_corres]) apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) @@ -2307,7 +2285,7 @@ lemma slotCapLongRunningDelete_corres: lemma slot_long_running_inv'[wp]: "\P\ slotCapLongRunningDelete ptr \\rv. P\" apply (simp add: slotCapLongRunningDelete_def) - apply (rule hoare_seq_ext [OF _ getCTE_inv]) + apply (rule bind_wp [OF _ getCTE_inv]) apply (rule hoare_pre, wpcw, (wp isFinalCapability_inv)+) apply simp done @@ -2473,11 +2451,11 @@ lemma decodeTCBConfigure_corres: apply (rule decodeSetIPCBuffer_corres; simp) apply (rule corres_splitEE) apply (rule decodeSetSpace_corres; simp) - apply (rule_tac F="is_thread_control set_params" in corres_gen_asm) - apply (rule_tac F="is_thread_control set_space" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_params" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_space" in corres_gen_asm) apply (rule_tac F="tcThreadCapSlot setSpace = cte_map slot" in corres_gen_asm2) apply (rule corres_trivial) - apply (clarsimp simp: returnOk_def is_thread_control_def2 is_cap_simps) + apply (clarsimp simp: tcb_invocation.is_ThreadControl_def returnOk_def is_cap_simps) apply (wp | simp add: invs_def valid_sched_def)+ done @@ -2508,15 +2486,13 @@ lemma decodeTCBConf_wf[wp]: apply (rule_tac Q'="\setSpace s. tcb_inv_wf' setSpace s \ tcb_inv_wf' setIPCParams s \ isThreadControl setSpace \ isThreadControl setIPCParams \ tcThread setSpace = t \ tcNewCRoot setSpace \ None" - in hoare_post_imp_R) + in hoare_strengthen_postE_R) apply wp apply (clarsimp simp: isThreadControl_def2 cong: option.case_cong) apply wpsimp apply (fastforce simp: isThreadControl_def2 objBits_defs) done -declare hoare_True_E_R [simp del] - lemma lsft_real_cte: "\valid_objs'\ lookupSlotForThread t x \\rv. real_cte_at' rv\, -" apply (simp add: lookupSlotForThread_def) @@ -2584,10 +2560,9 @@ notes if_cong[cong] shows lemma decodeUnbindNotification_corres: "corres (ser \ tcbinv_relation) - (tcb_at t) - (tcb_at' t) - (decode_unbind_notification (cap.ThreadCap t)) - (decodeUnbindNotification (capability.ThreadCap t))" + (tcb_at t and pspace_aligned and pspace_distinct) \ + (decode_unbind_notification (cap.ThreadCap t)) + (decodeUnbindNotification (capability.ThreadCap t))" apply (simp add: decode_unbind_notification_def decodeUnbindNotification_def) apply (rule corres_guard_imp) apply (rule corres_split_eqrE) @@ -2636,7 +2611,7 @@ lemma decodeTCBInvocation_corres: corres_guard_imp[OF decodeUnbindNotification_corres] corres_guard_imp[OF decodeSetTLSBase_corres], simp_all add: valid_cap_simps valid_cap_simps' invs_def valid_sched_def) - apply (auto simp: list_all2_map1 list_all2_map2 + apply (auto simp: list_all2_map1 list_all2_map2 valid_state_def elim!: list_all2_mono) done @@ -2705,7 +2680,7 @@ lemma restart_makes_simple': \\rv. st_tcb_at' simple' t\" apply (simp add: restart_def) apply (wp sts_st_tcb_at'_cases cancelIPC_simple - cancelIPC_st_tcb_at static_imp_wp | simp)+ + cancelIPC_st_tcb_at hoare_weak_lift_imp | simp)+ apply (rule hoare_strengthen_post [OF isStopped_inv]) prefer 2 apply assumption @@ -2735,6 +2710,7 @@ crunches getThreadBufferSlot, setPriority, setMCPriority lemma inv_tcb_IRQInactive: "\valid_irq_states'\ invokeTCB tcb_inv -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + including classic_wp_pre apply (simp add: invokeTCB_def) apply (rule hoare_pre) apply (wpc | diff --git a/proof/refine/X64/Untyped_R.thy b/proof/refine/X64/Untyped_R.thy index 76d0344225..cdb4f36e86 100644 --- a/proof/refine/X64/Untyped_R.thy +++ b/proof/refine/X64/Untyped_R.thy @@ -291,9 +291,9 @@ next toInteger_nat fromInteger_nat wordBits_def) apply (simp add: not_le) apply (rule whenE_throwError_corres, simp) - apply (clarsimp simp: fromAPIType_def X64_H.fromAPIType_def) + apply (clarsimp simp: fromAPIType_def) apply (rule whenE_throwError_corres, simp) - apply (clarsimp simp: fromAPIType_def X64_H.fromAPIType_def) + apply (clarsimp simp: fromAPIType_def) apply (rule_tac r' = "\cap cap'. cap_relation cap cap'" in corres_splitEE[OF corres_if]) apply simp @@ -726,7 +726,7 @@ lemma map_ensure_empty': apply (wp getCTE_wp') apply (clarsimp elim!: cte_wp_at_weakenE') apply (erule meta_allE) - apply (erule hoare_post_imp_R) + apply (erule hoare_strengthen_postE_R) apply clarsimp done @@ -1401,16 +1401,6 @@ crunch nosch[wp]: insertNewCaps "\s. P (ksSchedulerAction s)" crunch exst[wp]: set_cdt "\s. P (exst s)" -(*FIXME: Move to StateRelation*) -lemma state_relation_schact[elim!]: - "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" - apply (simp add: state_relation_def) - done - -lemma state_relation_queues[elim!]: "(s,s') \ state_relation \ ready_queues_relation (ready_queues s) (ksReadyQueues s')" - apply (simp add: state_relation_def) - done - lemma set_original_symb_exec_l: "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_original p b) (return x)" by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) @@ -1441,6 +1431,10 @@ lemma updateNewFreeIndex_noop_psp_corres: | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ done +crunches updateMDB, updateNewFreeIndex, setCTE + for rdyq_projs[wp]: + "\s. P (ksReadyQueues s) (tcbSchedNexts_of s) (tcbSchedPrevs_of s) (\d p. inQ d p |< tcbs_of' s)" + lemma insertNewCap_corres: notes if_cong[cong del] if_weak_cong[cong] shows @@ -3292,7 +3286,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 @@ -3616,7 +3610,7 @@ lemma updateFreeIndex_mdb_simple': and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" and unt_inc' :"untyped_inc' (ctes_of s)" and valid_objs' :"valid_objs' s" - and invp: "mdb_inv_preserve (ctes_of s) (ctes_of s(src \ cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -3718,8 +3712,8 @@ lemma updateFreeIndex_clear_invs': apply (simp add:updateCap_def) apply (wp setCTE_irq_handlers' getCTE_wp) apply (simp add:updateCap_def) - apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift - hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp setCTE_ioports' + apply (wp irqs_masked_lift cur_tcb_lift ct_idle_or_in_cur_domain'_lift setCTE_ioports' + hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp valid_bitmaps_lift | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] | simp add: getSlotCap_def | simp add: cte_wp_at_ctes_of)+ @@ -4145,15 +4139,17 @@ lemma idx_le_new_offs: end +context begin interpretation Arch . (*FIXME: arch_split*) + lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" by (simp add: valid_sched_def) crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" - (simp: crunch_simps wp: hoare_drop_imps hoare_unless_wp ignore: freeMemory) + (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) lemma deleteObjects_ksCurThread[wp]: "\\s. P (ksCurThread s)\ deleteObjects ptr sz \\_ s. P (ksCurThread s)\" @@ -4303,14 +4299,12 @@ context begin interpretation Arch . (*FIXME: arch_split*) lemma resetUntypedCap_corres: "untypinv_relation ui ui' \ corres (dc \ dc) - (invs and valid_untyped_inv_wcap ui - (Some (cap.UntypedCap dev ptr sz idx)) - and ct_active and einvs - and (\_. \ptr_base ptr' ty us slots dev'. ui = Invocations_A.Retype slot True - ptr_base ptr' ty us slots dev)) - (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') - (reset_untyped_cap slot) - (resetUntypedCap (cte_map slot))" + (einvs and schact_is_rct and ct_active + and valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev ptr sz idx)) + and (\_. \ptr_base ptr' ty us slots dev'. + ui = Invocations_A.Retype slot True ptr_base ptr' ty us slots dev)) + (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') + (reset_untyped_cap slot) (resetUntypedCap (cte_map slot))" apply (rule corres_gen_asm, clarsimp) apply (simp add: reset_untyped_cap_def resetUntypedCap_def liftE_bindE) @@ -4459,7 +4453,7 @@ lemma resetUntypedCap_corres: apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) apply (frule(1) descendants_range_ex_cte'[OF empty_descendants_range_in' _ order_refl], (simp add: isCap_simps)+) - apply (intro conjI impI; clarsimp) + apply (auto simp: descendants_range_in'_def valid_untyped'_def) done end @@ -4558,7 +4552,7 @@ lemma resetUntypedCap_invs_etc: ?f \\_. invs' and ?vu2 and ct_active' and ?psp\, \\_. invs'\") apply (simp add: resetUntypedCap_def getSlotCap_def liftE_bind_return_bindE_returnOk bindE_assoc) - apply (rule hoare_vcg_seqE[rotated]) + apply (rule bindE_wp_fwd) apply simp apply (rule getCTE_sp) apply (rule hoare_name_pre_stateE) @@ -4575,8 +4569,8 @@ lemma resetUntypedCap_invs_etc: (simp_all add: cte_wp_at_ctes_of)+)[1] apply (clarsimp simp: unlessE_def cte_wp_at_ctes_of split del: if_split) - apply (rule_tac B="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) - and ct_active' and ?psp" in hoare_vcg_seqE[rotated]) + apply (rule_tac Q'="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) and ct_active' and ?psp" + in bindE_wp_fwd) apply clarsimp apply (rule hoare_pre) apply (simp add: sch_act_simple_def) @@ -4618,7 +4612,7 @@ lemma resetUntypedCap_invs_etc: modify_map_def) apply auto[1] apply simp - apply (rule hoare_pre, rule hoare_post_impErr, + apply (rule hoare_pre, rule hoare_strengthen_postE, rule_tac P="\i. invs' and ?psp and ct_active' and valid_untyped_inv_wcap' ?ui (Some (UntypedCap dev ptr sz (if i = 0 then idx else (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - i) * 2 ^ resetChunkBits)))" @@ -4706,7 +4700,7 @@ lemma whenE_reset_resetUntypedCap_invs_etc: and ct_active' and pspace_no_overlap' (if reset then ptr else ptr') sz\, \\_. invs'\" apply (rule hoare_pre) - apply (wp hoare_whenE_wp resetUntypedCap_invs_etc[where idx=idx, + apply (wp whenE_wp resetUntypedCap_invs_etc[where idx=idx, simplified pred_conj_def conj_assoc] | simp)+ apply (clarsimp simp: cte_wp_at_ctes_of) @@ -4718,6 +4712,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) @@ -4729,10 +4725,13 @@ lemma (in range_cover) funky_aligned: context begin interpretation Arch . (*FIXME: arch_split*) +defs archOverlap_def: + "archOverlap \ \_ _. False" + lemma inv_untyped_corres': "\ untypinv_relation ui ui' \ \ corres (dc \ (=)) - (einvs and valid_untyped_inv ui and ct_active) + (einvs and valid_untyped_inv ui and ct_active and schact_is_rct) (invs' and valid_untyped_inv' ui' and ct_active') (invoke_untyped ui) (invokeUntyped ui')" apply (cases ui) @@ -4751,6 +4750,7 @@ lemma inv_untyped_corres': (cte_map cref) reset ptr_base ptr ao' us (map cte_map slots) dev" assume invs: "invs (s :: det_state)" "ct_active s" "valid_list s" "valid_sched s" + "schact_is_rct s" and invs': "invs' s'" "ct_active' s'" and sr: "(s, s') \ state_relation" and vui: "valid_untyped_inv_wcap ?ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" @@ -4972,7 +4972,8 @@ lemma inv_untyped_corres': show " corres (dc \ (=)) ((=) s) ((=) s') (invoke_untyped ?ui) (invokeUntyped ?ui')" - apply (clarsimp simp:invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc) + apply (clarsimp simp: invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc + archOverlap_def) apply (insert cover) apply (rule corres_guard_imp) apply (rule corres_split_norE) @@ -5062,9 +5063,9 @@ lemma inv_untyped_corres': \ valid_untyped_inv_wcap ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s \ (reset \ pspace_no_overlap {ptr && ~~ mask sz..(ptr && ~~ mask sz) + 2 ^ sz - 1} s) - " in hoare_post_imp_R) + " in hoare_strengthen_postE_R) apply (simp add: whenE_def split del: if_split, wp) - apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc, auto)[1] + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc, auto)[1] apply wp apply (clarsimp simp: ui cte_wp_at_caps_of_state bits_of_def untyped_range.simps) @@ -5105,7 +5106,7 @@ lemma inv_untyped_corres': apply (drule invoke_untyped_proofs.usable_range_disjoint) apply (clarsimp simp: field_simps mask_out_sub_mask shiftl_t2n) - apply ((rule validE_validE_R)?, rule hoare_post_impErr, + apply ((rule validE_validE_R)?, rule hoare_strengthen_postE, rule whenE_reset_resetUntypedCap_invs_etc[where ptr="ptr && ~~ mask sz" and ptr'=ptr and sz=sz and idx=idx and ui=ui' and dev=dev]) @@ -5146,7 +5147,7 @@ lemma inv_untyped_corres': apply (clarsimp simp only: pred_conj_def invs ui) apply (strengthen vui) apply (cut_tac vui invs invs') - apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs) + apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs schact_is_rct_def) apply (cut_tac vui' invs') apply (clarsimp simp: ui cte_wp_at_ctes_of if_apply_def2 ui') done @@ -5160,12 +5161,6 @@ crunch pred_tcb_at'[wp]: insertNewCap "pred_tcb_at' proj P t" crunch pred_tcb_at'[wp]: doMachineOp "pred_tcb_at' proj P t" (wp: crunch_wps) - -crunch irq_node[wp]: set_thread_state "\s. P (interrupt_irq_node s)" -crunch ctes_of [wp]: setQueue "\s. P (ctes_of s)" -crunch cte_wp_at [wp]: setQueue "cte_wp_at' P p" - (simp: cte_wp_at_ctes_of) - lemma sts_valid_untyped_inv': "\valid_untyped_inv' ui\ setThreadState st t \\rv. valid_untyped_inv' ui\" apply (cases ui, simp add: ex_cte_cap_to'_def) @@ -5176,7 +5171,7 @@ lemma sts_valid_untyped_inv': crunch nosch[wp]: invokeUntyped "\s. P (ksSchedulerAction s)" (simp: crunch_simps zipWithM_x_mapM - wp: crunch_wps hoare_unless_wp mapME_x_inv_wp preemptionPoint_inv) + wp: crunch_wps unless_wp mapME_x_inv_wp preemptionPoint_inv) crunch no_0_obj'[wp]: insertNewCap no_0_obj' (wp: crunch_wps) @@ -5202,11 +5197,8 @@ crunch norqL1[wp]: insertNewCap "\s. P (ksReadyQueuesL1Bitmap s)" (wp: crunch_wps) crunch norqL2[wp]: insertNewCap "\s. P (ksReadyQueuesL2Bitmap s)" (wp: crunch_wps) -crunch ct[wp]: insertNewCap "\s. P (ksCurThread s)" - (wp: crunch_wps) crunch state_refs_of'[wp]: insertNewCap "\s. P (state_refs_of' s)" (wp: crunch_wps) -crunch cteCaps[wp]: updateNewFreeIndex "\s. P (cteCaps_of s)" crunch if_unsafe_then_cap'[wp]: updateNewFreeIndex "if_unsafe_then_cap'" lemma insertNewCap_ifunsafe'[wp]: @@ -5311,39 +5303,29 @@ lemma insertNewCap_ioports': apply (wpsimp wp: setCTE_ioports' getCTE_wp) by (clarsimp simp: cte_wp_at_ctes_of) -crunch irq_states' [wp]: insertNewCap valid_irq_states' - (wp: getCTE_wp') - -crunch vq'[wp]: insertNewCap valid_queues' - (wp: crunch_wps) - -crunch irqs_masked' [wp]: insertNewCap irqs_masked' - (wp: crunch_wps rule: irqs_masked_lift) - -crunch valid_machine_state'[wp]: insertNewCap valid_machine_state' - (wp: crunch_wps) - -crunch pspace_domain_valid[wp]: insertNewCap pspace_domain_valid - (wp: crunch_wps) - -crunch ct_not_inQ[wp]: insertNewCap "ct_not_inQ" - (wp: crunch_wps) - -crunch tcbState_inv[wp]: insertNewCap "obj_at' (\tcb. P (tcbState tcb)) t" - (wp: crunch_simps hoare_drop_imps) -crunch tcbDomain_inv[wp]: insertNewCap "obj_at' (\tcb. P (tcbDomain tcb)) t" - (wp: crunch_simps hoare_drop_imps) -crunch tcbPriority_inv[wp]: insertNewCap "obj_at' (\tcb. P (tcbPriority tcb)) t" +crunches insertNewCap + for irq_states'[wp]: valid_irq_states' + and irqs_masked' [wp]: irqs_masked' + and valid_machine_state'[wp]: valid_machine_state' + and pspace_domain_valid[wp]: pspace_domain_valid + and ct_not_inQ[wp]: "ct_not_inQ" + and tcbState_inv[wp]: "obj_at' (\tcb. P (tcbState tcb)) t" + and tcbDomain_inv[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t" + and tcbPriority_inv[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t" + and sched_queues_projs[wp]: "\s. P (tcbSchedNexts_of s) (tcbSchedPrevs_of s)" + and tcbQueueds_of[wp]: "\s. P (tcbQueued |< tcbs_of' s)" + and valid_sched_pointers[wp]: valid_sched_pointers (wp: crunch_simps hoare_drop_imps) lemma insertNewCap_ct_idle_or_in_cur_domain'[wp]: "\ct_idle_or_in_cur_domain' and ct_active'\ insertNewCap parent slot cap \\_. ct_idle_or_in_cur_domain'\" -apply (wp ct_idle_or_in_cur_domain'_lift_futz[where Q=\]) -apply (rule_tac Q="\_. obj_at' (\tcb. tcbState tcb \ Structures_H.thread_state.Inactive) t and obj_at' (\tcb. d = tcbDomain tcb) t" - in hoare_strengthen_post) -apply (wp | clarsimp elim: obj_at'_weakenE)+ -apply (auto simp: obj_at'_def) -done + apply (wp ct_idle_or_in_cur_domain'_lift_futz[where Q=\]) + apply (rule_tac Q="\_. obj_at' (\tcb. tcbState tcb \ Structures_H.thread_state.Inactive) t and + obj_at' (\tcb. d = tcbDomain tcb) t" + in hoare_strengthen_post) + apply (wp | clarsimp elim: obj_at'_weakenE)+ + apply (auto simp: obj_at'_def) + done crunch ksDomScheduleIdx[wp]: insertNewCap "\s. P (ksDomScheduleIdx s)" (wp: crunch_simps hoare_drop_imps) @@ -5410,8 +5392,8 @@ lemma insertNewCap_invs': apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) apply (wp insertNewCap_valid_pspace' sch_act_wf_lift - valid_queues_lift cur_tcb_lift tcb_in_cur_domain'_lift - insertNewCap_valid_global_refs' + cur_tcb_lift tcb_in_cur_domain'_lift sym_heap_sched_pointers_lift + insertNewCap_valid_global_refs' valid_bitmaps_lift valid_arch_state_lift' insertNewCap_ioports' valid_irq_node_lift insertNewCap_valid_irq_handlers) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -5493,9 +5475,6 @@ lemma createNewCaps_cap_to': apply fastforce done -crunch it[wp]: copyGlobalMappings "\s. P (ksIdleThread s)" - (wp: mapM_x_wp' ignore: clearMemory) - lemma createNewCaps_idlethread[wp]: "\\s. P (ksIdleThread s)\ createNewCaps tp ptr sz us d \\rv s. P (ksIdleThread s)\" apply (simp add: createNewCaps_def toAPIType_def @@ -5526,8 +5505,6 @@ lemma createNewCaps_IRQHandler[wp]: apply (wp | wpc | simp add: image_def | rule hoare_pre_cont)+ done -crunch ksIdleThread[wp]: updateCap "\s. P (ksIdleThread s)" - lemma createNewCaps_ct_active': "\ct_active' and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n)\ createNewCaps ty ptr n us d @@ -5643,14 +5620,14 @@ lemma invokeUntyped_invs'': apply (clarsimp simp:invokeUntyped_def getSlotCap_def ui) apply (rule validE_valid) apply (rule hoare_pre) - apply (rule_tac B="\_ s. invs' s \ Q s \ ct_active' s + apply (rule_tac Q'="\_ s. invs' s \ Q s \ ct_active' s \ valid_untyped_inv_wcap' ui (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s) - " in hoare_vcg_seqE[rotated]) + " in bindE_wp_fwd) apply (simp only: whenE_def) apply wp - apply (rule hoare_post_impErr, rule combine_validE, + apply (rule hoare_strengthen_postE, rule combine_validE, rule resetUntypedCap_invs_etc, rule valid_validE, rule reset_Q') apply (clarsimp simp only: if_True) apply auto[1] @@ -5743,7 +5720,7 @@ lemma invokeUntyped_invs'[wp]: "\invs' and valid_untyped_inv' ui and ct_active'\ invokeUntyped ui \\rv. invs'\" - apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_post_taut, simplified]) + apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_TrueI, simplified]) apply auto done @@ -5814,7 +5791,7 @@ lemma resetUntypedCap_IRQInactive: (is "\?P\ resetUntypedCap slot \?Q\,\?E\") apply (simp add: resetUntypedCap_def) apply (rule hoare_pre) - apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_post_impErr] + apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_strengthen_postE] doMachineOp_irq_states' preemptionPoint_inv hoare_drop_imps | simp add: no_irq_clearMemory if_apply_def2)+ done @@ -5823,8 +5800,7 @@ lemma inv_untyped_IRQInactive: "\valid_irq_states'\ invokeUntyped ui -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" apply (simp add: invokeUntyped_def) - apply (rule hoare_pre) - apply (wp hoare_whenE_wp resetUntypedCap_IRQInactive | wpc | simp)+ + apply (wpsimp wp: resetUntypedCap_IRQInactive) done end diff --git a/proof/refine/X64/VSpace_R.thy b/proof/refine/X64/VSpace_R.thy index 0cb313bccc..ed1a2754a5 100644 --- a/proof/refine/X64/VSpace_R.thy +++ b/proof/refine/X64/VSpace_R.thy @@ -111,7 +111,7 @@ lemma asidBits_asid_bits[simp]: asidHighBits_def asid_low_bits_def) lemma handleVMFault_corres: - "corres (fr \ dc) (tcb_at thread) (tcb_at' thread) + "corres (fr \ dc) (tcb_at thread and pspace_aligned and pspace_distinct) \ (handle_vm_fault thread fault) (handleVMFault thread fault)" apply (simp add: X64_H.handleVMFault_def handle_vm_fault_def) apply (rule corres_guard_imp) @@ -347,13 +347,9 @@ lemma deleteASID_corres [corres]: apply (simp add: vs_refs_def) apply (rule image_eqI[rotated], erule graph_ofI) apply (simp add: mask_asid_low_bits_ucast_ucast) - apply wp - apply (simp add: o_def) - apply (wp getASID_wp) - apply clarsimp - apply assumption - apply wp+ - apply clarsimp + \ \rewrite assumption so that the goal can refer to the C variable instead of the abstract's.\ + apply (drule Some_to_the) + apply (wpsimp wp: getASID_wp)+ apply (clarsimp simp: valid_arch_state_def valid_asid_table_def dest!: invs_arch_state) apply blast @@ -546,7 +542,7 @@ lemma invalidatePageStructureCacheASID_corres' [corres]: "corres dc \ \ (invalidate_page_structure_cache_asid vspace asid) (X64_H.invalidatePageStructureCacheASID vspace' asid')" - by (corressimp simp: invalidate_page_structure_cache_asid_def + by (corresKsimp simp: invalidate_page_structure_cache_asid_def X64_H.invalidatePageStructureCacheASID_def invalidateLocalPageStructureCacheASID_def assms ucast_id @@ -586,7 +582,7 @@ lemma unmapPageTable_corres: apply simp apply (rule invalidatePageStructureCacheASID_corres) apply ((wpsimp wp: hoare_if get_pde_wp getPDE_wp)+)[8] - apply ((wpsimp wp: lookup_pd_slot_wp hoare_vcg_all_lift_R | wp (once) hoare_drop_imps)+)[2] + apply ((wpsimp wp: lookup_pd_slot_wp hoare_vcg_all_liftE_R | wp (once) hoare_drop_imps)+)[2] apply ((wp find_vspace_for_asid_wp)+)[4] apply (clarsimp simp: invs_def valid_state_def valid_pspace_def valid_arch_caps_def word_neq_0_conv[symmetric]) @@ -679,7 +675,7 @@ lemma unmapPage_corres: apply (simp add: page_entry_map_def) apply simp apply (rule storePTE_corres') - apply (((wpsimp wp: hoare_vcg_all_lift_R get_pte_wp getPTE_wp lookup_pt_slot_wp + apply (((wpsimp wp: hoare_vcg_all_liftE_R get_pte_wp getPTE_wp lookup_pt_slot_wp simp: unlessE_def is_aligned_pml4 if_apply_def2 split_del: if_split simp_del: dc_simp)+ @@ -692,7 +688,7 @@ lemma unmapPage_corres: apply (simp add: page_entry_map_def) apply simp apply (rule storePDE_corres') - apply (((wpsimp wp: hoare_vcg_all_lift_R get_pde_wp getPDE_wp lookup_pd_slot_wp + apply (((wpsimp wp: hoare_vcg_all_liftE_R get_pde_wp getPDE_wp lookup_pd_slot_wp simp: unlessE_def is_aligned_pml4 if_apply_def2 split_del: if_split simp_del: dc_simp)+ @@ -705,7 +701,7 @@ lemma unmapPage_corres: apply (simp add: page_entry_map_def) apply simp apply (rule storePDPTE_corres') - apply (((wpsimp wp: hoare_vcg_all_lift_R get_pdpte_wp getPDPTE_wp + apply (((wpsimp wp: hoare_vcg_all_liftE_R get_pdpte_wp getPDPTE_wp lookup_pdpt_slot_wp simp: unlessE_def is_aligned_pml4 if_apply_def2 split_del: if_split @@ -808,7 +804,7 @@ lemma message_info_from_data_eqv: lemma setMessageInfo_corres: "mi' = message_info_map mi \ - corres dc (tcb_at t) (tcb_at' t) + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ (set_message_info t mi) (setMessageInfo t mi')" apply (simp add: setMessageInfo_def set_message_info_def) apply (subgoal_tac "wordFromMessageInfo (message_info_map mi) = @@ -892,7 +888,7 @@ proof - apply (rule corres_fail[where P=\ and P'=\]) apply (simp add: same_refs_def) apply (rule corres_underlying_split[where r'=dc, OF _ corres_return_eq_same[OF refl] - hoare_post_taut hoare_post_taut]) + hoare_TrueI hoare_TrueI]) apply simp apply (rule invalidatePageStructureCacheASID_corres) apply (wpsimp simp: invs_psp_aligned)+ @@ -911,7 +907,7 @@ proof - apply (simp add: same_refs_def) apply simp apply (rule corres_underlying_split[where r'=dc, OF _ corres_return_eq_same[OF refl] - hoare_post_taut hoare_post_taut]) + hoare_TrueI hoare_TrueI]) apply (rule invalidatePageStructureCacheASID_corres) apply (wpsimp simp: invs_psp_aligned)+ apply (frule (1) mapping_map_pdpte, clarsimp) @@ -929,7 +925,7 @@ proof - apply (simp add: same_refs_def) apply simp apply (rule corres_underlying_split[where r'=dc, OF _ corres_return_eq_same[OF refl] - hoare_post_taut hoare_post_taut]) + hoare_TrueI hoare_TrueI]) apply (rule invalidatePageStructureCacheASID_corres) apply (wpsimp simp: invs_psp_aligned)+ apply (wp arch_update_cap_invs_map set_cap_valid_page_map_inv) @@ -954,7 +950,7 @@ proof - apply (simp add: perform_page_invocation_unmap_def performPageInvocationUnmap_def split_def) apply (rule corres_guard_imp) apply (rule corres_underlying_split[where r'=dc, OF _ corres_return_eq_same[OF refl] - hoare_post_taut hoare_post_taut]) + hoare_TrueI hoare_TrueI]) apply (rule corres_split) apply (rule unmapPage_corres[OF refl refl refl refl]) apply (rule corres_split[where r'=acap_relation]) @@ -1103,7 +1099,7 @@ lemma clear_pdpt_corres: crunches invalidatePageStructureCacheASID, unmapPageTable, unmapPageDirectory, unmapPDPT for typ_at'[wp]: "\s. P (typ_at' T p s)" - (wp: crunch_wps hoare_vcg_all_lift_R) + (wp: crunch_wps hoare_vcg_all_liftE_R) lemmas unmapPageTable_typ_ats[wp] = typ_at_lifts[OF unmapPageTable_typ_at'] lemmas unmapPageDirectory_typ_ats[wp] = typ_at_lifts[OF unmapPageDirectory_typ_at'] @@ -1546,7 +1542,7 @@ lemma getCurrentUserCR3_wp: lemma setVMRoot_invs [wp]: "\invs'\ setVMRoot p \\rv. invs'\" apply (simp add: setVMRoot_def getThreadVSpaceRoot_def setCurrentUserVSpaceRoot_def) - apply (wp hoare_whenE_wp getCurrentUserCR3_wp findVSpaceForASID_vs_at_wp + apply (wp whenE_wp getCurrentUserCR3_wp findVSpaceForASID_vs_at_wp | wpcw | clarsimp simp: if_apply_def2 asid_wf_0 | strengthen valid_cr3'_makeCR3)+ @@ -1555,7 +1551,7 @@ lemma setVMRoot_invs [wp]: lemma setVMRoot_invs_no_cicd': "\invs_no_cicd'\ setVMRoot p \\rv. invs_no_cicd'\" apply (simp add: setVMRoot_def getThreadVSpaceRoot_def setCurrentUserVSpaceRoot_def) - apply (wp hoare_whenE_wp getCurrentUserCR3_wp findVSpaceForASID_vs_at_wp + apply (wp whenE_wp getCurrentUserCR3_wp findVSpaceForASID_vs_at_wp | wpcw | clarsimp simp: if_apply_def2 asid_wf_0 | strengthen valid_cr3'_makeCR3)+ @@ -1711,22 +1707,6 @@ crunches storePDE, storePDPTE, storePML4E and norqL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" (simp: updateObject_default_def) -lemma storePDE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePDE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePDPTE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePDPTE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePML4E_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePML4E p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePDE_valid_queues' [wp]: - "\valid_queues'\ storePDE p pde \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePDE_state_refs' [wp]: "\\s. P (state_refs_of' s)\ storePDE p pde \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: storePDE_def) @@ -1751,10 +1731,6 @@ lemma setObject_pde_ksInt [wp]: "\\s. P (ksInterruptState s)\ setObject p (pde::pde) \\_. \s. P (ksInterruptState s)\" by (wp setObject_ksInterrupt updateObject_default_inv|simp)+ -lemma storePDPTE_valid_queues' [wp]: - "\valid_queues'\ storePDPTE p pdpte \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePDPTE_state_refs' [wp]: "\\s. P (state_refs_of' s)\ storePDPTE p pdpte \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: storePDPTE_def) @@ -1779,10 +1755,6 @@ lemma setObject_pdpte_ksInt [wp]: "\\s. P (ksInterruptState s)\ setObject p (pdpte::pdpte) \\_. \s. P (ksInterruptState s)\" by (wp setObject_ksInterrupt updateObject_default_inv|simp)+ -lemma storePML4E_valid_queues' [wp]: - "\valid_queues'\ storePML4E p pml4e \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePML4E_state_refs' [wp]: "\\s. P (state_refs_of' s)\ storePML4E p pml4e \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: storePML4E_def) @@ -2071,6 +2043,26 @@ crunches storePTE, storePDE, storePDPTE, storePML4E and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" (wp: setObject_ksPSpace_only updateObject_default_inv) +lemma storePTE_tcbs_of'[wp]: + "storePTE c (pte::pte) \\s. P' (tcbs_of' s)\" + unfolding storePTE_def + by setObject_easy_cases + +lemma storePDE_tcbs_of'[wp]: + "storePDE c (pde::pde) \\s. P' (tcbs_of' s)\" + unfolding storePDE_def + by setObject_easy_cases + +lemma storePDPTE_tcbs_of'[wp]: + "storePDPTE c (pdpte::pdpte) \\s. P' (tcbs_of' s)\" + unfolding storePDPTE_def + by setObject_easy_cases + +lemma storePML4E_tcbs_of'[wp]: + "storePML4E c (pml4e::pml4e) \\s. P' (tcbs_of' s)\" + unfolding storePML4E_def + by setObject_easy_cases + lemma storePDE_invs[wp]: "\invs' and valid_pde' pde\ storePDE p pde @@ -2081,7 +2073,7 @@ lemma storePDE_invs[wp]: irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' valid_ioports_lift'' - untyped_ranges_zero_lift + untyped_ranges_zero_lift valid_bitmaps_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp done @@ -2096,7 +2088,7 @@ lemma storePDPTE_invs[wp]: irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' valid_ioports_lift'' - untyped_ranges_zero_lift + untyped_ranges_zero_lift valid_bitmaps_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp done @@ -2111,7 +2103,7 @@ lemma storePML4E_invs[wp]: irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' valid_ioports_lift'' - untyped_ranges_zero_lift + untyped_ranges_zero_lift valid_bitmaps_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp done @@ -2139,14 +2131,6 @@ crunch norqL1[wp]: storePTE "\s. P (ksReadyQueuesL1Bitmap s)" crunch norqL2[wp]: storePTE "\s. P (ksReadyQueuesL2Bitmap s)" (simp: updateObject_default_def) -lemma storePTE_valid_queues [wp]: - "\Invariants_H.valid_queues\ storePTE p pde \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma storePTE_valid_queues' [wp]: - "\valid_queues'\ storePTE p pde \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma storePTE_iflive [wp]: "\if_live_then_nonz_cap'\ storePTE p pte \\rv. if_live_then_nonz_cap'\" apply (simp add: storePTE_def) @@ -2198,8 +2182,6 @@ lemma storePTE_valid_objs [wp]: apply (clarsimp simp: valid_obj'_def) done -crunch no_0_obj' [wp]: storePTE no_0_obj' - lemma storePTE_vms'[wp]: "\valid_machine_state'\ storePTE p pde \\_. valid_machine_state'\" apply (simp add: storePTE_def valid_machine_state'_def pointerInUserData_def @@ -2263,7 +2245,7 @@ lemma storePTE_invs [wp]: apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' valid_ioports_lift'' - untyped_ranges_zero_lift + untyped_ranges_zero_lift valid_bitmaps_lift | simp add: cteCaps_of_def o_def)+ apply clarsimp done @@ -2309,14 +2291,6 @@ lemma setASIDPool_qsL2 [wp]: "\\s. P (ksReadyQueuesL2Bitmap s)\ setObject p (ap::asidpool) \\rv s. P (ksReadyQueuesL2Bitmap s)\" by (wp setObject_qs updateObject_default_inv|simp)+ -lemma setASIDPool_valid_queues [wp]: - "\Invariants_H.valid_queues\ setObject p (ap::asidpool) \\_. Invariants_H.valid_queues\" - by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ - -lemma setASIDPool_valid_queues' [wp]: - "\valid_queues'\ setObject p (ap::asidpool) \\_. valid_queues'\" - by (wp valid_queues_lift') - lemma setASIDPool_state_refs' [wp]: "\\s. P (state_refs_of' s)\ setObject p (ap::asidpool) \\rv s. P (state_refs_of' s)\" apply (clarsimp simp: setObject_def valid_def in_monad split_def @@ -2422,6 +2396,10 @@ lemma setObject_ap_ksDomScheduleIdx [wp]: "\\s. P (ksDomScheduleIdx s)\ setObject p (ap::asidpool) \\_. \s. P (ksDomScheduleIdx s)\" by (wp updateObject_default_inv|simp add:setObject_def | wpc)+ +lemma setObject_asidpool_tcbs_of'[wp]: + "setObject c (asidpool::asidpool) \\s. P' (tcbs_of' s)\" + by setObject_easy_cases + lemma setASIDPool_invs [wp]: "\invs' and valid_asid_pool' ap\ setObject p (ap::asidpool) \\_. invs'\" apply (simp add: invs'_def valid_state'_def valid_pspace'_def) @@ -2430,7 +2408,7 @@ lemma setASIDPool_invs [wp]: valid_arch_state_lift' valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' valid_ioports_lift'' untyped_ranges_zero_lift - updateObject_default_inv + updateObject_default_inv valid_bitmaps_lift | simp add: cteCaps_of_def | rule setObject_ksPSpace_only)+ apply (clarsimp simp add: setObject_def o_def) diff --git a/proof/sep-capDL/Frame_SD.thy b/proof/sep-capDL/Frame_SD.thy index 861d47b6ab..3c7937e4ad 100644 --- a/proof/sep-capDL/Frame_SD.thy +++ b/proof/sep-capDL/Frame_SD.thy @@ -85,13 +85,13 @@ lemma disjoint_union_diff: by auto lemma intent_reset_update_slots_single: - "intent_reset (update_slots (object_slots obj(slot \ cap)) obj) - = update_slots (object_slots (intent_reset obj)(slot \ cap)) (intent_reset obj)" + "intent_reset (update_slots ((object_slots obj)(slot \ cap)) obj) + = update_slots ((object_slots (intent_reset obj))(slot \ cap)) (intent_reset obj)" by simp lemma object_clean_update_slots_single: - "object_clean (update_slots (object_slots obj(slot \ cap)) obj) - = update_slots (object_slots (object_clean obj)(slot \ reset_cap_asid cap)) (object_clean obj)" + "object_clean (update_slots ((object_slots obj)(slot \ cap)) obj) + = update_slots ((object_slots (object_clean obj))(slot \ reset_cap_asid cap)) (object_clean obj)" by (auto simp: object_clean_def intent_reset_def asid_reset_def update_slots_def object_slots_def fun_eq_iff split: cdl_object.splits) @@ -203,7 +203,7 @@ lemma object_clean_has_slots: lemma set_object_slot_wp_helper: "\\s. <(obj_id, slot) \c - \* R> s \ cdl_objects s obj_id = Some obj \ object_clean obj = object_clean obj'\ - set_object obj_id (update_slots (object_slots obj' (slot \ cap)) obj') + set_object obj_id (update_slots ((object_slots obj') (slot \ cap)) obj') \\rv. <(obj_id, slot) \c cap \* R>\" apply (clarsimp simp: set_object_def sep_any_def) apply wp @@ -230,7 +230,7 @@ lemma set_object_slot_wp: "\\s. <(obj_id, slot) \c - \* R> s \ cdl_objects s obj_id = Some obj \ (\obj'. object_clean obj = object_clean obj' \ - nobj = (update_slots (object_slots obj' (slot \ cap)) obj'))\ + nobj = (update_slots ((object_slots obj') (slot \ cap)) obj'))\ set_object obj_id nobj \\rv. <(obj_id, slot) \c cap \* R>\" apply (rule hoare_name_pre_state) @@ -316,7 +316,7 @@ lemma set_cap_wp: apply (case_tac ptr, rename_tac obj_id slot, clarsimp) apply (wp|wpc)+ apply (rule_tac obj = obj in set_object_slot_wp) - apply (wp select_wp |wpc)+ + apply (wp |wpc)+ apply clarsimp apply (clarsimp simp: update_slots_def has_slots_def split: cdl_object.splits) diff --git a/proof/sep-capDL/Helpers_SD.thy b/proof/sep-capDL/Helpers_SD.thy index dbc1289b16..8d2a1a4634 100644 --- a/proof/sep-capDL/Helpers_SD.thy +++ b/proof/sep-capDL/Helpers_SD.thy @@ -1034,7 +1034,7 @@ lemma derive_cap_wp: "\ P (derived_cap cap) \ derive_cap slot cap \P\, -" apply (clarsimp simp: derive_cap_def derived_cap_def) apply (clarsimp simp: validE_R_def derive_cap_def split:cdl_cap.splits) - apply (safe, (wp alternative_wp alternativeE_wp hoare_whenE_wp | + apply (safe, (wp whenE_wp | clarsimp simp: ensure_no_children_def)+ ) done diff --git a/proof/sep-capDL/Lookups_D.thy b/proof/sep-capDL/Lookups_D.thy index 57857ec82e..cd5691cc40 100644 --- a/proof/sep-capDL/Lookups_D.thy +++ b/proof/sep-capDL/Lookups_D.thy @@ -7,7 +7,7 @@ theory Lookups_D imports "DSpec.Syscall_D" - "Lib.OptionMonadND" + "Monads.Nondet_Reader_Option" begin type_synonym 'a lookup = "cdl_state \ 'a option" @@ -94,55 +94,4 @@ lemma gets_the_resolve_cap: apply (clarsimp simp: in_monad gets_the_get_cnode [symmetric]) done -definition resolve_address_bits' :: - "cdl_cap \ cdl_cptr \ nat \ (cdl_cap_ref \ nat) lookup" -where - "resolve_address_bits' cap cptr n \ odrop $ resolve_cap cap cptr n" - - - -definition - lookup_slot' :: "cdl_object_id \ cdl_cptr \ cdl_cap_ref lookup" -where - "lookup_slot' thread cptr \ - DO - cspace_root \ opt_cap (thread, tcb_cspace_slot); - (slot, _) \ resolve_address_bits' cspace_root cptr word_bits; - oreturn slot - OD" - -definition - lookup_cap' :: "cdl_object_id \ cdl_cptr \ cdl_cap lookup" -where - "lookup_cap' thread cptr \ - DO - slot \ lookup_slot' thread cptr; - opt_cap slot - OD" - -definition - lookup_cap_and_slot' :: "cdl_object_id \ cdl_cptr \ (cdl_cap \ cdl_cap_ref) lookup" -where - "lookup_cap_and_slot' thread cptr \ - DO - slot \ lookup_slot' thread cptr; - cap \ opt_cap slot; - oreturn (cap, slot) - OD" - -definition - lookup_object :: "cdl_object_id \ cdl_cptr \ cdl_object_id lookup" -where - "lookup_object thread cptr \ - DO - cap \ lookup_cap' thread cptr; - oreturn $ cap_object cap - OD" - -definition - lookup_extra_caps' :: "cdl_object_id \ cdl_cptr list \ (cdl_cap \ cdl_cap_ref) list lookup" -where - "lookup_extra_caps' thread cptrs \ - omap (\cptr. lookup_cap_and_slot' thread cptr) cptrs" - end diff --git a/proof/sep-capDL/README.md b/proof/sep-capDL/README.md index df2d14ff79..7b90cd1e65 100644 --- a/proof/sep-capDL/README.md +++ b/proof/sep-capDL/README.md @@ -30,9 +30,9 @@ and Andrew Boyton's PhD thesis. Building -------- -To build from the `l4v/` directory, run: +To build for the ARM architecture from the `l4v/` directory, run: - ./isabelle/bin/isabelle build -d . -v -b SepDSpec + L4V_ARCH=ARM ./run_tests SepDSpec Important Theories diff --git a/run_tests b/run_tests index 639e9fb595..a99ffbd0af 100755 --- a/run_tests +++ b/run_tests @@ -25,9 +25,9 @@ os.environ["ISABELLE_TIMING_LOG"]="3.0s" # Enable quick_and_dirty mode for various images if "QUICK_AND_DIRTY" in os.environ: - os.environ["AINVS_QUICK_AND_DIRTY"]=1 - os.environ["REFINE_QUICK_AND_DIRTY"]=1 - os.environ["CREFINE_QUICK_AND_DIRTY"]=1 + os.environ["AINVS_QUICK_AND_DIRTY"]="1" + os.environ["REFINE_QUICK_AND_DIRTY"]="1" + os.environ["CREFINE_QUICK_AND_DIRTY"]="1" print("Testing with QUICK_AND_DIRTY") # Lists of excluded tests for different archs @@ -64,12 +64,9 @@ EXCLUDE["RISCV64"]=[ EXCLUDE["AARCH64"]=[ # To be eliminated/refined as development progresses "ASepSpec", - "CKernel", - "BaseRefine", "Access", # Tools and unrelated content, removed for development - "CParser", "AutoCorres", "CamkesGlueSpec", "Sep_Algebra", @@ -83,7 +80,8 @@ EXCLUDE["AARCH64"]=[ "DSpec", "DBaseRefine", "CamkesGlueProofs", - "AsmRefine" + "AsmRefine", + "SimplExportAndRefine" ] # Check EXCLUDE is exhaustive over the available architectures @@ -121,15 +119,17 @@ if args.help: returncode = 0 for arch in archs: - print("Testing for L4V_ARCH=%s:" % arch) + features = os.environ.get("L4V_FEATURES", "") + plat = os.environ.get("L4V_PLAT", "") + num_domains = os.environ.get("INPUT_NUM_DOMAINS", "") + print(f"Testing for L4V_ARCH='{arch}', L4V_FEATURES='{features}', L4V_PLAT='{plat}', " + f"INPUT_NUM_DOMAINS='{num_domains}'") os.environ["L4V_ARCH"] = arch - # Test Orphanage when L4V_ARCH=ARM; - # we need to set this flag here to test the above equality in the ROOT file. - # To be removed when we finish proving Orphanage for ARM_HYP and X64 + # Provide L4V_ARCH_IS_ARM for Corres_Test in lib/ROOT if arch == "ARM": os.environ["L4V_ARCH_IS_ARM"] = arch - print("Testing Orphanage for ARM") + print("Setting L4V_ARCH_IS_ARM") elif "L4V_ARCH_IS_ARM" in os.environ: del os.environ["L4V_ARCH_IS_ARM"] diff --git a/spec/abstract/AARCH64/ArchDecode_A.thy b/spec/abstract/AARCH64/ArchDecode_A.thy index e3c36a8368..760949487e 100644 --- a/spec/abstract/AARCH64/ArchDecode_A.thy +++ b/spec/abstract/AARCH64/ArchDecode_A.thy @@ -53,16 +53,16 @@ definition arch_decode_irq_control_invocation :: else throwError IllegalOperation)" definition attribs_from_word :: "machine_word \ vm_attributes" where - "attribs_from_word w \ {attr. \w!!0 \ attr = Execute \ \w !! 2 \ attr = Device}" + "attribs_from_word w \ {attr. \w!!0 \ attr = Device \ \w !! 2 \ attr = Execute}" definition make_user_pte :: "paddr \ vm_attributes \ vm_rights \ vmpage_size \ pte" where "make_user_pte addr attr rights vm_size \ PagePTE addr (vm_size = ARMSmallPage) (attr - {Global}) rights" -definition check_vspace_root :: "arch_cap \ nat \ (obj_ref \ asid, 'z) se_monad" where +definition check_vspace_root :: "cap \ nat \ (obj_ref \ asid, 'z) se_monad" where "check_vspace_root cap arg_no \ case cap of - PageTableCap pt VSRootPT_T (Some (asid, _)) \ returnOk (pt, asid) + ArchObjectCap (PageTableCap pt VSRootPT_T (Some (asid, _))) \ returnOk (pt, asid) | _ \ throwError $ InvalidCapability arg_no" type_synonym 'z arch_decoder = @@ -79,7 +79,7 @@ definition decode_fr_inv_map :: "'z::state_ext arch_decoder" where attr = args ! 2; vspace_cap = fst (extra_caps ! 0) in doE - (pt, asid) \ check_vspace_root cap 1; + (pt, asid) \ check_vspace_root vspace_cap 1; pt' \ lookup_error_on_failure False $ find_vspace_for_asid asid; whenE (pt' \ pt) $ throwError $ InvalidCapability 1; check_vp_alignment pgsz vaddr; @@ -91,7 +91,7 @@ definition decode_fr_inv_map :: "'z::state_ext arch_decoder" where odE | None \ doE vtop \ returnOk $ vaddr + mask (pageBitsForSize pgsz); - whenE (vtop \ user_vtop) $ throwError $ InvalidArgument 0 + whenE (vtop > user_vtop) $ throwError $ InvalidArgument 0 odE; (level, slot) \ liftE $ gets_the $ pt_lookup_slot pt vaddr \ ptes_of; unlessE (pt_bits_left level = pg_bits) $ @@ -167,7 +167,7 @@ definition decode_pt_inv_map :: "'z::state_ext arch_decoder" where vspace_cap = fst (extra_caps ! 0) in doE whenE (mapped_address \ None) $ throwError $ InvalidCapability 0; - (pt, asid) \ check_vspace_root cap 1; + (pt, asid) \ check_vspace_root vspace_cap 1; whenE (user_vtop < vaddr) $ throwError $ InvalidArgument 0; pt' \ lookup_error_on_failure False $ find_vspace_for_asid asid; whenE (pt' \ pt) $ throwError $ InvalidCapability 1; @@ -225,7 +225,7 @@ definition decode_vs_inv_flush :: "'z::state_ext arch_decoder" where in doE whenE (end \ start) $ throwError $ InvalidArgument 1; whenE (end > pptrUserTop) $ throwError $ IllegalOperation; - (vspace, asid) \ check_vspace_root cap 0; + (vspace, asid) \ check_vspace_root (ArchObjectCap cap) 0; vspace' \ lookup_error_on_failure False $ find_vspace_for_asid asid; whenE (vspace' \ vspace) $ throwError $ InvalidCapability 0; frame_info \ liftE $ gets $ lookup_frame p start \ ptes_of; @@ -247,7 +247,7 @@ definition decode_vs_inv_flush :: "'z::state_ext arch_decoder" where definition decode_vspace_invocation :: "'z::state_ext arch_decoder" where "decode_vspace_invocation label args cte cap extra_caps \ - if isPageFlushLabel (invocation_type label) + if isVSpaceFlushLabel (invocation_type label) then decode_vs_inv_flush label args cte cap extra_caps else throwError IllegalOperation" diff --git a/spec/abstract/AARCH64/ArchInvocation_A.thy b/spec/abstract/AARCH64/ArchInvocation_A.thy index eadbda3450..a7a83569a6 100644 --- a/spec/abstract/AARCH64/ArchInvocation_A.thy +++ b/spec/abstract/AARCH64/ArchInvocation_A.thy @@ -13,9 +13,6 @@ begin context Arch begin global_naming AARCH64_A -(* FIXME AARCH64: import flush_type directly from Haskell *) -datatype flush_type = Clean | Invalidate | CleanInvalidate | Unify - text \ These datatypes encode the arguments to the various possible AARCH64-specific system calls. Selectors are defined for various fields for convenience elsewhere. diff --git a/spec/abstract/AARCH64/ArchVSpaceAcc_A.thy b/spec/abstract/AARCH64/ArchVSpaceAcc_A.thy index b466d46914..6334ccdacf 100644 --- a/spec/abstract/AARCH64/ArchVSpaceAcc_A.thy +++ b/spec/abstract/AARCH64/ArchVSpaceAcc_A.thy @@ -96,6 +96,7 @@ type_synonym ptes_of = "pt_type \ obj_ref \ pte" locale_abbrev ptes_of :: "'z::state_ext state \ ptes_of" where "ptes_of s pt_t p \ level_pte_of pt_t p (pts_of s)" +lemmas ptes_of_def = level_pte_of_def text \The following function takes a pointer to a PTE in kernel memory and returns the PTE.\ locale_abbrev get_pte :: "pt_type \ obj_ref \ (pte,'z::state_ext) s_monad" where diff --git a/spec/abstract/AARCH64/ArchVSpace_A.thy b/spec/abstract/AARCH64/ArchVSpace_A.thy index dcdb814e0a..40164aef35 100644 --- a/spec/abstract/AARCH64/ArchVSpace_A.thy +++ b/spec/abstract/AARCH64/ArchVSpace_A.thy @@ -50,7 +50,6 @@ definition vspace_for_pool :: "obj_ref \ asid \ (obj_ref (* this is what asid_map encodes in ARM/ARM_HYP; getASIDPoolEntry in Haskell *) definition entry_for_asid :: "asid \ 'z::state_ext state \ asid_pool_entry option" where "entry_for_asid asid = do { - oassert (0 < asid); pool_ptr \ pool_for_asid asid; entry_for_pool pool_ptr asid \ asid_pools_of }" @@ -68,6 +67,7 @@ definition update_asid_pool_entry :: definition vspace_for_asid :: "asid \ 'z::state_ext state \ obj_ref option" where "vspace_for_asid asid = do { + oassert (0 < asid); entry \ entry_for_asid asid; oreturn $ ap_vspace entry }" @@ -173,7 +173,7 @@ definition handle_vm_fault :: "obj_ref \ vmfault_type \ "handle_vm_fault thread fault \ case fault of ARMDataAbort \ doE addr \ liftE $ do_machine_op getFAR; - fault \ liftE $ do_machine_op getDFSR; + fault \ liftE $ do_machine_op getESR; cur_v \ liftE $ gets (arm_current_vcpu \ arch_state); addr \ if (\v. cur_v = Some (v, True)) \ \VCPU active\ then doE @@ -188,7 +188,7 @@ definition handle_vm_fault :: "obj_ref \ vmfault_type \ odE | ARMPrefetchAbort \ doE pc \ liftE $ as_user thread $ getRestartPC; - fault \ liftE $ do_machine_op getIFSR; + fault \ liftE $ do_machine_op getESR; cur_v \ liftE $ gets (arm_current_vcpu \ arch_state); pc \ if (\v. cur_v = Some (v, True)) \ \VCPU active\ then doE @@ -263,6 +263,8 @@ definition delete_asid :: "asid \ obj_ref \ (unit,'z::st when (\vmid. pool (asid_low_bits_of asid) = Some (ASIDPoolVSpace vmid pt)) $ do invalidate_tlb_by_asid asid; invalidate_asid_entry asid; + \ \re-read here, because @{text invalidate_asid_entry} changes the ASID pool:\ + pool \ get_asid_pool pool_ptr; pool' \ return $ pool (asid_low_bits_of asid := None); set_asid_pool pool_ptr pool'; tcb \ gets cur_thread; diff --git a/spec/abstract/AARCH64/Arch_A.thy b/spec/abstract/AARCH64/Arch_A.thy index 2fa80e78c5..2b01bf2cc3 100644 --- a/spec/abstract/AARCH64/Arch_A.thy +++ b/spec/abstract/AARCH64/Arch_A.thy @@ -95,32 +95,27 @@ definition perform_pg_inv_map :: old_pte \ get_pte level slot; set_cap (ArchObjectCap cap) ct_slot; store_pte level slot pte; + do_machine_op $ cleanByVA_PoU slot (addrFromPPtr slot); when (old_pte \ InvalidPTE) $ do (asid, vaddr) \ assert_opt $ acap_map_data cap; invalidate_tlb_by_asid_va asid vaddr od od" -definition perform_pg_inv_get_addr :: "obj_ref \ (unit,'z::state_ext) s_monad" where - "perform_pg_inv_get_addr ptr \ do - paddr \ return $ fromPAddr $ addrFromPPtr ptr; - ct \ gets cur_thread; - msg_transferred \ set_mrs ct Nothing [paddr]; - msg_info \ return $ MI msg_transferred 0 0 0; - set_message_info ct msg_info - od" +definition perform_pg_inv_get_addr :: + "obj_ref \ (data list,'z::state_ext) s_monad" where + "perform_pg_inv_get_addr ptr \ return [addrFromPPtr ptr]" definition do_flush :: "flush_type \ vspace_ref \ vspace_ref \ paddr \ unit machine_monad" where "do_flush type vstart vend pstart \ case type of Clean \ cleanCacheRange_RAM vstart vend pstart | Invalidate \ invalidateCacheRange_RAM vstart vend pstart - | CleanInvalidate \ invalidateCacheRange_RAM vstart vend pstart + | CleanInvalidate \ cleanInvalidateCacheRange_RAM vstart vend pstart | Unify \ do cleanCacheRange_PoU vstart vend pstart; dsb; invalidateCacheRange_I vstart vend pstart; - branchFlushRange vstart vend pstart; isb od" @@ -138,12 +133,18 @@ text \ The Frame capability confers the authority to map and unmap memory, to query the physical address of a page and to flush. \ -definition perform_page_invocation :: "page_invocation \ (unit,'z::state_ext) s_monad" where +definition perform_page_invocation :: "page_invocation \ (data list,'z::state_ext) s_monad" where "perform_page_invocation iv \ case iv of - PageMap cap ct_slot (pte,slot,level) \ perform_pg_inv_map cap ct_slot pte slot level - | PageUnmap cap ct_slot \ perform_pg_inv_unmap cap ct_slot + PageMap cap ct_slot (pte,slot,level) \ do + perform_pg_inv_map cap ct_slot pte slot level; + return [] + od + | PageUnmap cap ct_slot \ do perform_pg_inv_unmap cap ct_slot; return [] od | PageGetAddr ptr \ perform_pg_inv_get_addr ptr - | PageFlush type start end pstart space asid \ perform_flush type start end pstart space asid" + | PageFlush type start end pstart space asid \ do + perform_flush type start end pstart space asid; + return [] + od" definition perform_pt_inv_map :: @@ -162,7 +163,9 @@ definition perform_pt_inv_unmap :: "arch_cap \ cslot_ptr \ return $ acap_obj cap; unmap_page_table asid vaddr p; slots \ return [p, p + (1 << pte_bits) .e. p + mask (pt_bits (acap_pt_type cap))]; - mapM_x (swp (store_pte (acap_pt_type cap)) InvalidPTE) slots + mapM_x (swp (store_pte (acap_pt_type cap)) InvalidPTE) slots; + do_machine_op $ cleanCacheRange_PoU p (p + mask (pt_bits (acap_pt_type cap))) + (addrFromPPtr p) od | _ \ return (); old_cap \ liftM the_arch_cap $ get_cap ct_slot; @@ -192,7 +195,7 @@ definition arch_perform_invocation :: "arch_invocation \ (data list, "arch_perform_invocation i \ liftE $ case i of InvokeVSpace oper \ arch_no_return $ perform_vspace_invocation oper | InvokePageTable oper \ arch_no_return $ perform_page_table_invocation oper - | InvokePage oper \ arch_no_return $ perform_page_invocation oper + | InvokePage oper \ perform_page_invocation oper | InvokeASIDControl oper \ arch_no_return $ perform_asid_control_invocation oper | InvokeASIDPool oper \ arch_no_return $ perform_asid_pool_invocation oper | InvokeVCPU oper \ perform_vcpu_invocation oper" diff --git a/spec/abstract/AARCH64/Arch_Structs_A.thy b/spec/abstract/AARCH64/Arch_Structs_A.thy index 3289d4fa53..23df8699bb 100644 --- a/spec/abstract/AARCH64/Arch_Structs_A.thy +++ b/spec/abstract/AARCH64/Arch_Structs_A.thy @@ -12,7 +12,7 @@ imports "ExecSpec.Arch_Structs_B" ExceptionTypes_A VMRights_A - ExecSpec.Kernel_Config_Lemmas + ExecSpec.Arch_Kernel_Config_Lemmas begin context begin interpretation Arch . @@ -88,6 +88,11 @@ text \ value_type ppn_len = "ipa_size - pageBits" type_synonym ppn = "ppn_len word" +text \This lemma encodes @{typ ppn_len} value above as a term, so we can use it generically:\ +lemma ppn_len_def': + "ppn_len = ipa_size - pageBits" + by (simp add: ppn_len_val pageBits_def ipa_size_def Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) + datatype pte = InvalidPTE | PagePTE @@ -108,48 +113,7 @@ definition pte_base_addr :: "pte \ paddr" where definition ppn_from_pptr :: "obj_ref \ ppn" where "ppn_from_pptr p = ucast (addrFromPPtr p >> pageBits)" -definition vs_index_bits :: nat where - "vs_index_bits \ if config_ARM_PA_SIZE_BITS_40 then 10 else (9::nat)" - -lemma vs_index_bits_ge0[simp, intro!]: "0 < vs_index_bits" - by (simp add: vs_index_bits_def) - -(* A dependent-ish type in Isabelle. We use typedef here instead of value_type so that we can - retain a symbolic value (vs_index_bits) for the size of the type instead of getting a plain - number such as 9 or 10. *) -typedef vs_index_len = "{n :: nat. n < vs_index_bits}" by auto - -end - -instantiation AARCH64_A.vs_index_len :: len0 -begin - interpretation Arch . - definition len_of_vs_index_len: "len_of (x::vs_index_len itself) \ CARD(vs_index_len)" - instance .. -end - -instantiation AARCH64_A.vs_index_len :: len -begin - interpretation Arch . - instance - proof - show "0 < LENGTH(vs_index_len)" - by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len]) - qed -end - -context Arch begin global_naming AARCH64_A - -type_synonym vs_index = "vs_index_len word" - -type_synonym pt_index_len = 9 -type_synonym pt_index = "pt_index_len word" - -text \Sanity check:\ -lemma length_vs_index_len[simp]: - "LENGTH(vs_index_len) = vs_index_bits" - by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len]) - +(* Sanity check for page table type sizes -- ptTranslationBits not yet available at definition site *) lemma vs_index_ptTranslationBits: "ptTranslationBits VSRootPT_T = LENGTH(vs_index_len)" by (simp add: ptTranslationBits_def vs_index_bits_def) @@ -210,7 +174,7 @@ definition "default_vcpu \ \ vcpu_tcb = None, vcpu_vgic = default_gic_vcpu_interface, - vcpu_regs = (\_. 0) (VCPURegSCTLR := sctlrDefault), + vcpu_regs = (\_. 0) (VCPURegSCTLR := sctlrEL1VM), vcpu_vppi_masked = (\_. False), vcpu_vtimer = VirtTimer 0 \" @@ -304,7 +268,11 @@ datatype aobject_type = | VCPUObj definition arch_is_frame_type :: "aobject_type \ bool" where - "arch_is_frame_type aobj \ aobj \ PageTableObj" + "arch_is_frame_type aobj \ case aobj of + SmallPageObj \ True + | LargePageObj \ True + | HugePageObj \ True + | _ \ False" definition arch_default_cap :: "aobject_type \ obj_ref \ nat \ bool \ arch_cap" where "arch_default_cap tp r n dev \ case tp of diff --git a/spec/abstract/AARCH64/Hypervisor_A.thy b/spec/abstract/AARCH64/Hypervisor_A.thy index e507ae98f9..502c97635e 100644 --- a/spec/abstract/AARCH64/Hypervisor_A.thy +++ b/spec/abstract/AARCH64/Hypervisor_A.thy @@ -14,8 +14,17 @@ context Arch begin global_naming AARCH64_A fun handle_hypervisor_fault :: "machine_word \ hyp_fault_type \ (unit, 'z::state_ext) s_monad" where - "handle_hypervisor_fault thread (ARMVCPUFault hsr) = - handle_fault thread (ArchFault $ VCPUFault (ucast hsr))" + "handle_hypervisor_fault thread (ARMVCPUFault hsr) = do + fpu_enabled \ do_machine_op isFpuEnable; + if \fpu_enabled + then fail + else if hsr = 0x2000000 \ \@{text UNKNOWN_FAULT}\ + then do + esr \ do_machine_op getESR; + handle_fault thread (UserException (esr && mask 32) 0) + od + else handle_fault thread (ArchFault $ VCPUFault (ucast hsr)) + od" end end diff --git a/spec/abstract/AARCH64/Init_A.thy b/spec/abstract/AARCH64/Init_A.thy index 2e81b10f4b..3b08aa35ad 100644 --- a/spec/abstract/AARCH64/Init_A.thy +++ b/spec/abstract/AARCH64/Init_A.thy @@ -26,7 +26,7 @@ definition arm_global_pt_ptr :: obj_ref where (* Sufficiently aligned for irq type + cte_level_bits *) definition init_irq_node_ptr :: obj_ref where - "init_irq_node_ptr = pptr_base + 0x3000" + "init_irq_node_ptr = pptr_base + 0xc000" (* The highest user-virtual address that is still canonical. It can be larger than user_vtop, which is the highest address we allow to be mapped. @@ -41,7 +41,6 @@ definition canonical_user :: "vspace_ref" where definition init_vspace_uses :: "vspace_ref \ arm_vspace_region_use" where "init_vspace_uses p \ if p \ {pptr_base ..< pptr_base + (1 << 30)} then ArmVSpaceKernelWindow - else if p \ canonical_user then ArmVSpaceUserRegion else ArmVSpaceInvalidRegion" @@ -59,8 +58,8 @@ definition init_arch_state :: arch_state where (* The user-level global table in hyp mode is entirely empty. Kernel-level mappings are in a separate kernel page table, which is not modeled here. *) -definition global_pte :: "pt_index \ pte" where - "global_pte idx \ InvalidPTE" +definition global_pt_obj :: arch_kernel_obj where + "global_pt_obj \ PageTable (VSRootPT (\_. InvalidPTE))" definition init_kheap :: kheap where "init_kheap \ @@ -81,7 +80,8 @@ definition init_kheap :: kheap where tcb_bound_notification = None, tcb_mcpriority = minBound, tcb_arch = init_arch_tcb - \ + \, + arm_global_pt_ptr \ ArchObj global_pt_obj )" definition init_cdt :: cdt where diff --git a/spec/abstract/AARCH64/Machine_A.thy b/spec/abstract/AARCH64/Machine_A.thy index 590767dcb3..7932435383 100644 --- a/spec/abstract/AARCH64/Machine_A.thy +++ b/spec/abstract/AARCH64/Machine_A.thy @@ -38,7 +38,7 @@ type_synonym asid_len = 16 type_synonym asid_rep_len = asid_len type_synonym asid = "asid_rep_len word" -type_synonym vmid = "16 word" +type_synonym vmid = "8 word" text \ diff --git a/spec/abstract/AARCH64/VCPUAcc_A.thy b/spec/abstract/AARCH64/VCPUAcc_A.thy index 792bf6a4ec..b0c8bdfffa 100644 --- a/spec/abstract/AARCH64/VCPUAcc_A.thy +++ b/spec/abstract/AARCH64/VCPUAcc_A.thy @@ -103,6 +103,8 @@ definition restore_virt_timer :: "obj_ref \ (unit,'z::state_ext) s_m offset \ return $ cntvoff + ucast delta; vcpu_write_reg vcpu_ptr VCPURegCNTVOFF offset; vcpu_restore_reg vcpu_ptr VCPURegCNTVOFF; + \ \read again, so we don't have to reason about @{const vcpu_write_reg} changes in CRefine\ + vcpu \ get_vcpu vcpu_ptr; masked \ return $ (vcpu_vppi_masked vcpu (the $ irq_vppi_event_index irqVTimerEvent)); \ \we do not know here that irqVTimerEvent is IRQReserved, therefore not IRQInactive, so the only way to prove we don't unmask an inactive interrupt is to check\ @@ -120,7 +122,7 @@ definition vcpu_disable :: "obj_ref option \ (unit,'z::state_ext) s_ hcr \ do_machine_op get_gic_vcpu_ctrl_hcr; vgic_update vr (\vgic. vgic\ vgic_hcr := hcr \); vcpu_save_reg vr VCPURegSCTLR; - vcpu_save_reg vr VCPURegACTLR; \ \since FPU enabled\ + vcpu_save_reg vr VCPURegCPACR; \ \since FPU enabled\ do_machine_op isb od | _ \ return (); @@ -166,44 +168,13 @@ definition vcpu_invalidate_active :: "(unit,'z::state_ext) s_monad" where modify (\s. s\ arch_state := (arch_state s)\ arm_current_vcpu := None \\) od" -text \VCPU objects can be associated with and dissociated from TCBs.\ - -text \Removing the connection between a TCB and VCPU:\ -definition dissociate_vcpu_tcb :: "obj_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where - "dissociate_vcpu_tcb vr t \ do - t_vcpu \ arch_thread_get tcb_vcpu t; - v \ get_vcpu vr; - assert (t_vcpu = Some vr \ vcpu_tcb v = Some t); \ \make sure they were associated\ - cur_v \ gets (arm_current_vcpu \ arch_state); - when (\a. cur_v = Some (vr,a)) vcpu_invalidate_active; - arch_thread_set (\x. x \ tcb_vcpu := None \) t; - set_vcpu vr (v\ vcpu_tcb := None \); - as_user t $ do - sr \ getRegister SPSR_EL1; - setRegister SPSR_EL1 $ sanitise_register False SPSR_EL1 sr - od - od" - -text \Associating a TCB and VCPU, removing any potentially existing associations:\ -definition associate_vcpu_tcb :: "obj_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where - "associate_vcpu_tcb vr t \ do - t_vcpu \ arch_thread_get tcb_vcpu t; - case t_vcpu of - Some p \ dissociate_vcpu_tcb p t - | _ \ return (); - v \ get_vcpu vr; - case vcpu_tcb v of - Some p \ dissociate_vcpu_tcb vr p - | _ \ return (); - arch_thread_set (\x. x \ tcb_vcpu := Some vr \) t; - set_vcpu vr (v\ vcpu_tcb := Some t \) - od" - text \Register + context save for VCPUs\ definition vcpu_save :: "(obj_ref \ bool) option \ (unit,'z::state_ext) s_monad" where "vcpu_save vb \ case vb of Some (vr, active) \ do + do_machine_op dsb; + when active $ do vcpu_save_reg vr VCPURegSCTLR; hcr \ do_machine_op get_gic_vcpu_ctrl_hcr; @@ -227,8 +198,7 @@ definition vcpu_save :: "(obj_ref \ bool) option \ (unit,'z:: gicIndices; \ \armvVCPUSave\ - vcpu_save_reg_range vr VCPURegTTBR0 VCPURegSPSR_EL1; - do_machine_op isb + vcpu_save_reg_range vr VCPURegTTBR0 VCPURegSPSR_EL1 od | _ \ fail" @@ -292,6 +262,42 @@ definition vcpu_switch :: "obj_ref option \ (unit,'z::state_ext) s_m od)) od" + +text \VCPU objects can be associated with and dissociated from TCBs.\ + +text \Removing the connection between a TCB and VCPU:\ +definition dissociate_vcpu_tcb :: "obj_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where + "dissociate_vcpu_tcb vr t \ do + t_vcpu \ arch_thread_get tcb_vcpu t; + v \ get_vcpu vr; + assert (t_vcpu = Some vr \ vcpu_tcb v = Some t); \ \make sure they were associated\ + cur_v \ gets (arm_current_vcpu \ arch_state); + when (\a. cur_v = Some (vr,a)) vcpu_invalidate_active; + arch_thread_set (\x. x \ tcb_vcpu := None \) t; + set_vcpu vr (v\ vcpu_tcb := None \); + as_user t $ do + sr \ getRegister SPSR_EL1; + setRegister SPSR_EL1 $ sanitise_register False SPSR_EL1 sr + od + od" + +text \Associating a TCB and VCPU, removing any potentially existing associations:\ +definition associate_vcpu_tcb :: "obj_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where + "associate_vcpu_tcb vcpu_ptr t \ do + t_vcpu \ arch_thread_get tcb_vcpu t; + case t_vcpu of + Some p \ dissociate_vcpu_tcb p t + | _ \ return (); + v \ get_vcpu vcpu_ptr; + case vcpu_tcb v of + Some p \ dissociate_vcpu_tcb vcpu_ptr p + | _ \ return (); + arch_thread_set (\x. x \ tcb_vcpu := Some vcpu_ptr \) t; + set_vcpu vcpu_ptr (v\ vcpu_tcb := Some t \); + ct \ gets cur_thread; + when (t = ct) $ vcpu_switch (Some vcpu_ptr) + od" + text \ Prepare a given VCPU for removal: dissociate it, and clean up current VCPU state if necessary.\ diff --git a/spec/abstract/ARM/ArchDecode_A.thy b/spec/abstract/ARM/ArchDecode_A.thy index 1dbc7bfc00..beb47ffda9 100644 --- a/spec/abstract/ARM/ArchDecode_A.thy +++ b/spec/abstract/ARM/ArchDecode_A.thy @@ -241,12 +241,12 @@ where definition arch_data_to_obj_type :: "nat \ aobject_type option" where "arch_data_to_obj_type n \ - if n = 0 then Some SmallPageObj - else if n = 1 then Some LargePageObj - else if n = 2 then Some SectionObj - else if n = 3 then Some SuperSectionObj - else if n = 4 then Some PageTableObj - else if n = 5 then Some PageDirectoryObj + if n = 0 then Some PageDirectoryObj + else if n = 1 then Some SmallPageObj + else if n = 2 then Some LargePageObj + else if n = 3 then Some SectionObj + else if n = 4 then Some SuperSectionObj + else if n = 5 then Some PageTableObj else None" definition diff --git a/spec/abstract/ARM/ArchRetype_A.thy b/spec/abstract/ARM/ArchRetype_A.thy index e45c3dad70..e54b651510 100644 --- a/spec/abstract/ARM/ArchRetype_A.thy +++ b/spec/abstract/ARM/ArchRetype_A.thy @@ -38,7 +38,7 @@ where definition empty_context :: user_context where - "empty_context \ \_. 0" + "empty_context \ UserContext (\_. 0)" definition init_arch_tcb :: arch_tcb where "init_arch_tcb \ \ tcb_context = empty_context \" diff --git a/spec/abstract/ARM/Arch_Structs_A.thy b/spec/abstract/ARM/Arch_Structs_A.thy index f444dae9d5..8b9beb12c1 100644 --- a/spec/abstract/ARM/Arch_Structs_A.thy +++ b/spec/abstract/ARM/Arch_Structs_A.thy @@ -15,7 +15,7 @@ imports "ExecSpec.Arch_Structs_B" ExceptionTypes_A VMRights_A - ExecSpec.Kernel_Config_Lemmas + ExecSpec.Arch_Kernel_Config_Lemmas begin context Arch begin global_naming ARM_A @@ -300,7 +300,9 @@ definition "default_arch_tcb \ \ tcb_context = new_context\" -text \accesors for @{text "tcb_context"} inside @{text "arch_tcb"}\ +text \ + Accessors for @{text "tcb_context"} inside @{text "arch_tcb"}. These are later used to + implement @{text as_user}, i.e.\ need to be compatible with @{text user_monad}.\ definition arch_tcb_context_set :: "user_context \ arch_tcb \ arch_tcb" where @@ -311,15 +313,20 @@ definition where "arch_tcb_context_get a_tcb \ tcb_context a_tcb" +(* FIXME: the following means that we break the set/getRegister abstraction + and should move some of this into the machine interface (same as X64) *) +text \ + Accessors for the user register part of the @{text "arch_tcb"}. + (Because @{typ "register \ machine_word"} might not be equal to @{typ user_context}).\ definition arch_tcb_set_registers :: "(register \ machine_word) \ arch_tcb \ arch_tcb" where - "arch_tcb_set_registers \ arch_tcb_context_set" + "arch_tcb_set_registers regs a_tcb \ a_tcb \ tcb_context := UserContext regs \" definition arch_tcb_get_registers :: "arch_tcb \ register \ machine_word" where - "arch_tcb_get_registers \ arch_tcb_context_get" + "arch_tcb_get_registers a_tcb \ user_regs (tcb_context a_tcb)" end diff --git a/spec/abstract/ARM/Machine_A.thy b/spec/abstract/ARM/Machine_A.thy index 89731a6665..0e5c6b1cc2 100644 --- a/spec/abstract/ARM/Machine_A.thy +++ b/spec/abstract/ARM/Machine_A.thy @@ -13,7 +13,7 @@ chapter "ARM Machine Instantiation" theory Machine_A imports - "ExecSpec.MachineTypes" + "ExecSpec.MachineOps" begin context Arch begin global_naming ARM_A @@ -107,11 +107,9 @@ definition msg_label_bits :: nat where [simp]: "msg_label_bits \ 20" -type_synonym user_context = "register \ data" - definition new_context :: "user_context" where - "new_context \ (\r. 0) (CPSR := 0x150)" + "new_context \ UserContext ((\r. 0) (CPSR := 0x150))" text \The lowest virtual address in the kernel window. The kernel reserves the virtual addresses from here up in every virtual address space.\ diff --git a/spec/abstract/ARM_HYP/ArchDecode_A.thy b/spec/abstract/ARM_HYP/ArchDecode_A.thy index a69562a3ec..f06266fe0f 100644 --- a/spec/abstract/ARM_HYP/ArchDecode_A.thy +++ b/spec/abstract/ARM_HYP/ArchDecode_A.thy @@ -259,12 +259,12 @@ where definition arch_data_to_obj_type :: "nat \ aobject_type option" where "arch_data_to_obj_type n \ - if n = 0 then Some SmallPageObj - else if n = 1 then Some LargePageObj - else if n = 2 then Some SectionObj - else if n = 3 then Some SuperSectionObj - else if n = 4 then Some PageTableObj - else if n = 5 then Some PageDirectoryObj + if n = 0 then Some PageDirectoryObj + else if n = 1 then Some SmallPageObj + else if n = 2 then Some LargePageObj + else if n = 3 then Some SectionObj + else if n = 4 then Some SuperSectionObj + else if n = 5 then Some PageTableObj else if n = 6 then Some VCPUObj else None" diff --git a/spec/abstract/ARM_HYP/ArchRetype_A.thy b/spec/abstract/ARM_HYP/ArchRetype_A.thy index b0fc9eabda..45346e5530 100644 --- a/spec/abstract/ARM_HYP/ArchRetype_A.thy +++ b/spec/abstract/ARM_HYP/ArchRetype_A.thy @@ -38,7 +38,7 @@ where definition empty_context :: user_context where - "empty_context \ \_. 0" + "empty_context \ UserContext (\_. 0)" definition init_arch_tcb :: arch_tcb where "init_arch_tcb \ \ tcb_context = empty_context, tcb_vcpu = None \" diff --git a/spec/abstract/ARM_HYP/ArchVSpace_A.thy b/spec/abstract/ARM_HYP/ArchVSpace_A.thy index 13d20a44c8..93efae9649 100644 --- a/spec/abstract/ARM_HYP/ArchVSpace_A.thy +++ b/spec/abstract/ARM_HYP/ArchVSpace_A.thy @@ -1,4 +1,5 @@ (* + * Copyright 2022, Proofcraft Pty Ltd * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only diff --git a/spec/abstract/ARM_HYP/Arch_Structs_A.thy b/spec/abstract/ARM_HYP/Arch_Structs_A.thy index e03ef26aae..b3b60122ac 100644 --- a/spec/abstract/ARM_HYP/Arch_Structs_A.thy +++ b/spec/abstract/ARM_HYP/Arch_Structs_A.thy @@ -15,7 +15,7 @@ imports "ExecSpec.Arch_Structs_B" ExceptionTypes_A VMRights_A - ExecSpec.Kernel_Config_Lemmas + ExecSpec.Arch_Kernel_Config_Lemmas begin context Arch begin global_naming ARM_A @@ -398,7 +398,9 @@ definition tcb_context = new_context, tcb_vcpu = None \" -text \accesors for @{text "tcb_context"} inside @{text "arch_tcb"}\ +text \ + Accessors for @{text "tcb_context"} inside @{text "arch_tcb"}. These are later used to + implement @{text as_user}, i.e.\ need to be compatible with @{text user_monad}.\ definition arch_tcb_context_set :: "user_context \ arch_tcb \ arch_tcb" where @@ -409,15 +411,20 @@ definition where "arch_tcb_context_get a_tcb \ tcb_context a_tcb" +(* FIXME: the following means that we break the set/getRegister abstraction + and should move some of this into the machine interface (same as X64) *) +text \ + Accessors for the user register part of the @{text "arch_tcb"}. + (Because @{typ "register \ machine_word"} might not be equal to @{typ user_context}).\ definition arch_tcb_set_registers :: "(register \ machine_word) \ arch_tcb \ arch_tcb" where - "arch_tcb_set_registers \ arch_tcb_context_set" + "arch_tcb_set_registers regs a_tcb \ a_tcb \ tcb_context := UserContext regs \" definition arch_tcb_get_registers :: "arch_tcb \ register \ machine_word" where - "arch_tcb_get_registers \ arch_tcb_context_get" + "arch_tcb_get_registers a_tcb \ user_regs (tcb_context a_tcb)" end diff --git a/spec/abstract/ARM_HYP/Init_A.thy b/spec/abstract/ARM_HYP/Init_A.thy index a737025ba8..0d25ded079 100644 --- a/spec/abstract/ARM_HYP/Init_A.thy +++ b/spec/abstract/ARM_HYP/Init_A.thy @@ -77,8 +77,8 @@ definition tcb_bound_notification = None, tcb_mcpriority = minBound, tcb_arch = init_arch_tcb - \) - (us_global_pd_ptr \ us_global_pd)" + \, + us_global_pd_ptr \ us_global_pd)" definition "init_cdt \ Map.empty" diff --git a/spec/abstract/ARM_HYP/Machine_A.thy b/spec/abstract/ARM_HYP/Machine_A.thy index 6d96dadc9a..6787a6c4cb 100644 --- a/spec/abstract/ARM_HYP/Machine_A.thy +++ b/spec/abstract/ARM_HYP/Machine_A.thy @@ -13,7 +13,7 @@ chapter "ARM Machine Instantiation" theory Machine_A imports - "ExecSpec.MachineTypes" + "ExecSpec.MachineOps" begin context Arch begin global_naming ARM_A @@ -107,11 +107,9 @@ definition msg_label_bits :: nat where [simp]: "msg_label_bits \ 20" -type_synonym user_context = "register \ data" - definition new_context :: "user_context" where - "new_context \ (\r. 0) (CPSR := 0x150)" + "new_context \ UserContext ((\r. 0) (CPSR := 0x150))" text \The lowest virtual address in the kernel window. The kernel reserves the virtual addresses from here up in every virtual address space.\ diff --git a/spec/abstract/CSpace_A.thy b/spec/abstract/CSpace_A.thy index 9f244968a0..9083edc65b 100644 --- a/spec/abstract/CSpace_A.thy +++ b/spec/abstract/CSpace_A.thy @@ -15,7 +15,7 @@ imports ArchVSpace_A IpcCancel_A ArchCSpace_A - "Lib.NonDetMonadLemmas" + "Monads.Nondet_Lemmas" "HOL-Library.Prefix_Order" begin diff --git a/spec/abstract/Deterministic_A.thy b/spec/abstract/Deterministic_A.thy index cda6fbd3e5..f84aa166b8 100644 --- a/spec/abstract/Deterministic_A.thy +++ b/spec/abstract/Deterministic_A.thy @@ -107,7 +107,7 @@ text \The current scheduler action, which is part of the scheduling state.\ datatype scheduler_action = resume_cur_thread - | switch_thread obj_ref + | switch_thread (sch_act_target : obj_ref) | choose_new_thread type_synonym domain = word8 @@ -244,7 +244,7 @@ definition set_eobject :: "obj_ref \ etcb \ unit det_ext where "set_eobject ptr obj \ do es \ get; - ekh \ return $ ekheap es(ptr \ obj); + ekh \ return $ (ekheap es)(ptr \ obj); put (es\ekheap := ekh\) od" diff --git a/spec/abstract/Invocations_A.thy b/spec/abstract/Invocations_A.thy index c9ee26ab1a..4fab2d42aa 100644 --- a/spec/abstract/Invocations_A.thy +++ b/spec/abstract/Invocations_A.thy @@ -41,7 +41,7 @@ datatype tcb_invocation = WriteRegisters machine_word bool "machine_word list" arch_copy_register_sets | ReadRegisters machine_word bool machine_word arch_copy_register_sets | CopyRegisters machine_word machine_word bool bool bool bool arch_copy_register_sets - | ThreadControl machine_word cslot_ptr + | ThreadControl (tc_target: machine_word) (tc_slot: cslot_ptr) (tc_new_fault_ep: "cap_ref option") (tc_new_mcpriority: "(word8 * obj_ref) option") (tc_new_priority: "(word8 * obj_ref) option") diff --git a/spec/abstract/KHeap_A.thy b/spec/abstract/KHeap_A.thy index dff81ee501..3d9a4542e1 100644 --- a/spec/abstract/KHeap_A.thy +++ b/spec/abstract/KHeap_A.thy @@ -35,7 +35,7 @@ where kobj <- get_object ptr; assert (a_type kobj = a_type obj); s \ get; - put (s\kheap := kheap s(ptr \ obj)\) + put (s\kheap := (kheap s)(ptr \ obj)\) od" diff --git a/spec/abstract/README.md b/spec/abstract/README.md index 2f076ba450..79e0acb77c 100644 --- a/spec/abstract/README.md +++ b/spec/abstract/README.md @@ -14,7 +14,7 @@ specification. The specification draws in additional interface files from `design` and `machine`. The specification is written in monadic style. See -`l4v/lib/Monad_WP/NonDetMonad` for the definition of this monad. +`l4v/lib/Monads/NonDetMonad` for the definition of this monad. Top-Level Theory ---------------- @@ -45,14 +45,14 @@ The corresponding Isabelle session is `ASpec`. It is set up to build a human-readable PDF document. `Glossary_Doc` contains definitions of common seL4 terms. -To build, run in directory `l4v/spec`: +To build, run in directory `l4v/`: - make ASpec + L4V_ARCH=ARM ./run_test ASpec Remarks ------- - * Note that this specification is actually an extensible _family_ of + * Note that this specification is actually an extensible *family* of specifications, with predefined extension points. These points can either be left generic, as for most of the abstract invariant proofs, or they can be instantiated to more precise behaviour, such as in diff --git a/spec/abstract/RISCV64/Arch_Structs_A.thy b/spec/abstract/RISCV64/Arch_Structs_A.thy index 6c435d3c36..929c53d04f 100644 --- a/spec/abstract/RISCV64/Arch_Structs_A.thy +++ b/spec/abstract/RISCV64/Arch_Structs_A.thy @@ -11,7 +11,7 @@ imports "ExecSpec.Arch_Structs_B" ExceptionTypes_A VMRights_A - ExecSpec.Kernel_Config_Lemmas + ExecSpec.Arch_Kernel_Config_Lemmas begin context Arch begin global_naming RISCV64_A diff --git a/spec/abstract/RISCV64/Init_A.thy b/spec/abstract/RISCV64/Init_A.thy index 8ee8cc59b4..36602779ad 100644 --- a/spec/abstract/RISCV64/Init_A.thy +++ b/spec/abstract/RISCV64/Init_A.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: GPL-2.0-only @@ -37,12 +38,22 @@ definition canonical_user :: "vspace_ref" where (* Kernel and ELF window are constructed so that they can be covered with one max_pt_level entry each. This is not the layout the real kernel uses, but we are only trying to show that - the invariants are consistent. *) + the invariants are consistent. + + The values we pick here for the size of these regions constrain pptr_base and kernel_elf_base in + real kernel configurations, so we pick relatively small values that are reasonable lower bounds + for real platforms and that are still large enough to work for the examples. In particular, the + InfoFlow example gives a constraint that the kernel window is at least large enough to contain a + RISCVLargePage and a minimal set of other objects. This leads to picking values of: + 4M physical memory (1 << 22) and one page (1 << pageBits) for the kernel elf region. *) +definition kernel_window_bits :: nat where + "kernel_window_bits \ 22" + definition init_vspace_uses :: "vspace_ref \ riscvvspace_region_use" where "init_vspace_uses p \ - if p \ {pptr_base ..< pptr_base + (1 << 30)} then RISCVVSpaceKernelWindow - else if p \ {kernel_elf_base ..< kernel_elf_base + (1 << 20)} then RISCVVSpaceKernelELFWindow + if p \ {pptr_base ..< pptr_base + (1 << kernel_window_bits)} then RISCVVSpaceKernelWindow + else if p \ {kernel_elf_base ..< kernel_elf_base + (1 << pageBits)} then RISCVVSpaceKernelELFWindow else if p \ canonical_user then RISCVVSpaceUserRegion else RISCVVSpaceInvalidRegion" @@ -54,18 +65,25 @@ definition init_arch_state :: arch_state riscv_kernel_vspace = init_vspace_uses \" +definition toplevel_bits :: nat + where + "toplevel_bits = pt_bits_left max_pt_level" + +definition elf_index :: pt_index + where + "elf_index = ucast (pt_index max_pt_level kernel_elf_base)" -(* {pptr_base ..< pptr_base + (1 << 30)} is pt index 0x100 at max_pt_level, - {kernel_elf_base ..< kernel_elf_base + (1 << 20)} comes out to pt index 0x1FE. +(* {pptr_base ..< pptr_base + (1 << kernel_window_bits)} is pt index 0x100 at max_pt_level, + {kernel_elf_base ..< kernel_elf_base + (1 << pageBits)} comes out to elf_index. The rest is constructed such that the translation lines up with what the invariants want. *) definition global_pte :: "pt_index \ pte" where "global_pte idx \ if idx = 0x100 - then PagePTE ((ucast (idx && mask (ptTranslationBits - 1)) << ptTranslationBits * 2)) + then PagePTE ((ucast (idx && mask (ptTranslationBits - 1)) << ptTranslationBits * size max_pt_level)) {} vm_kernel_only - else if idx = 0x1FE - then PagePTE (2 << ptTranslationBits * 2) {} vm_kernel_only + else if idx = elf_index + then PagePTE (ucast ((kernelELFPAddrBase && ~~mask toplevel_bits) >> pageBits)) {} vm_kernel_only else InvalidPTE" definition init_global_pt :: kernel_object diff --git a/spec/abstract/Structures_A.thy b/spec/abstract/Structures_A.thy index 273cf00c5b..2092604ff5 100644 --- a/spec/abstract/Structures_A.thy +++ b/spec/abstract/Structures_A.thy @@ -139,10 +139,6 @@ definition case cap of CNodeCap oref bits guard \ (oref, bits, guard)" -definition - the_arch_cap :: "cap \ arch_cap" where - "the_arch_cap cap \ case cap of ArchObjectCap a \ a" - primrec (nonexhaustive) cap_ep_badge :: "cap \ badge" where diff --git a/spec/abstract/X64/ArchDecode_A.thy b/spec/abstract/X64/ArchDecode_A.thy index 1dbddd19a0..24a8aa9b3b 100644 --- a/spec/abstract/X64/ArchDecode_A.thy +++ b/spec/abstract/X64/ArchDecode_A.thy @@ -68,13 +68,11 @@ definition dest_slot \ lookup_target_slot cnode (data_to_cptr index) (unat depth); ensure_empty dest_slot; - \ \Following should be wrapped in to a function like what c did - since it is pc99 related, problem is where to put this function\ - numIOAPICs \ liftE $ gets (x64_num_ioapics \ arch_state); + ioapic_nirqs \ liftE $ gets (x64_ioapic_nirqs \ arch_state); whenE (numIOAPICs = 0) $ throwError IllegalOperation; whenE (ioapic > numIOAPICs - 1) $ throwError (RangeError 0 (numIOAPICs-1)); - whenE (pin > ioapicIRQLines - 1) $ throwError (RangeError 0 (ioapicIRQLines-1)); + whenE (pin > ucast (ioapic_nirqs ioapic - 1)) $ throwError (RangeError 0 (ucast (ioapic_nirqs ioapic - 1))); whenE (level > 1) $ throwError (RangeError 0 1); whenE (polarity > 1) $ throwError (RangeError 0 1); diff --git a/spec/abstract/X64/Arch_Structs_A.thy b/spec/abstract/X64/Arch_Structs_A.thy index 5d9d91ba35..8d35283924 100644 --- a/spec/abstract/X64/Arch_Structs_A.thy +++ b/spec/abstract/X64/Arch_Structs_A.thy @@ -11,7 +11,7 @@ imports "ExecSpec.Arch_Structs_B" ExceptionTypes_A VMRights_A - ExecSpec.Kernel_Config_Lemmas + ExecSpec.Arch_Kernel_Config_Lemmas begin context Arch begin global_naming X64_A @@ -310,6 +310,7 @@ record arch_state = x64_current_cr3 :: "X64_A.cr3" x64_allocated_io_ports :: "X64_A.io_port \ bool" x64_num_ioapics :: "64 word" + x64_ioapic_nirqs :: "machine_word \ 8 word" x64_irq_state :: "8 word \ X64_A.X64IRQState" (* FIXME x64-vtd: diff --git a/spec/abstract/X64/Init_A.thy b/spec/abstract/X64/Init_A.thy index 1d78c1a7de..45e1f95c65 100644 --- a/spec/abstract/X64/Init_A.thy +++ b/spec/abstract/X64/Init_A.thy @@ -56,6 +56,7 @@ definition x64_current_cr3 = cr3 0 0, x64_allocated_io_ports = \_. False, x64_num_ioapics = 1, + x64_ioapic_nirqs = \_. ucast ioapicIRQLines, x64_irq_state = K IRQFree \" diff --git a/spec/abstract/X64/Machine_A.thy b/spec/abstract/X64/Machine_A.thy index b8bbf00551..67cccb9218 100644 --- a/spec/abstract/X64/Machine_A.thy +++ b/spec/abstract/X64/Machine_A.thy @@ -13,7 +13,7 @@ chapter "x64 Machine Instantiation" theory Machine_A imports - "Lib.NonDetMonad" + "Monads.Nondet_Monad" "ExecSpec.MachineTypes" "ExecSpec.MachineOps" begin diff --git a/spec/capDL/CSpace_D.thy b/spec/capDL/CSpace_D.thy index 636579f52e..f9a39ed1e0 100644 --- a/spec/capDL/CSpace_D.thy +++ b/spec/capDL/CSpace_D.thy @@ -280,8 +280,8 @@ definition \ (('a \ 's) option \ ('b \ 's) option) set" where "monadic_rel_optionation_form f = - {(x, y). (x \ None \ y \ None \ the y \ fst (split f (the x))) - \ (x \ None \ y = None \ snd (split f (the x))) + {(x, y). (x \ None \ y \ None \ the y \ fst (case_prod f (the x))) + \ (x \ None \ y = None \ snd (case_prod f (the x))) \ (x = None \ y = None)}" definition diff --git a/spec/capDL/Monads_D.thy b/spec/capDL/Monads_D.thy index 1aed3149db..f605e45e61 100644 --- a/spec/capDL/Monads_D.thy +++ b/spec/capDL/Monads_D.thy @@ -11,7 +11,8 @@ theory Monads_D imports Types_D - "Lib.NonDetMonadVCG" + Monads.Nondet_In_Monad + Monads.Nondet_VCG begin (* Kernel state monad *) diff --git a/spec/capDL/README.md b/spec/capDL/README.md index a9018f375e..998950d9e9 100644 --- a/spec/capDL/README.md +++ b/spec/capDL/README.md @@ -44,8 +44,7 @@ tables is uniformly modelled as capabilities. Building -------- -The corresponding Isabelle session is `DSpec`. To build, run in directory -`l4v/spec`: - - make DSpec +The corresponding Isabelle session is `DSpec`. To build for the ARM +architecture, run in directory `l4v/`: + L4V_ARCH=ARM ./run_tests DSpec diff --git a/spec/capDL/Types_D.thy b/spec/capDL/Types_D.thy index 86d03959e1..4696ce163d 100644 --- a/spec/capDL/Types_D.thy +++ b/spec/capDL/Types_D.thy @@ -15,6 +15,7 @@ theory Types_D imports "ASpec.VMRights_A" Intents_D + "Lib.Lib" "Lib.SplitRule" "HOL-Combinatorics.Transposition" (* for Fun.swap *) begin diff --git a/spec/cspec/AARCH64/Kernel_C.thy b/spec/cspec/AARCH64/Kernel_C.thy new file mode 100644 index 0000000000..dcac934181 --- /dev/null +++ b/spec/cspec/AARCH64/Kernel_C.thy @@ -0,0 +1,134 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Kernel_C +imports + "ExecSpec.MachineTypes" + "CLib.CTranslationNICTA" + "AsmRefine.CommonOps" +begin + +external_file + "../c/build/$L4V_ARCH/kernel_all.c_pp" + +context begin interpretation Arch . + +requalify_types + machine_state + pt_array_len + vs_array_len + +end + +declare [[populate_globals=true]] + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* Sanity checks for array sizes. ptTranslationBits not yet available at definition site. *) +lemma ptTranslationBits_vs_index_bits: + "ptTranslationBits VSRootPT_T = vs_index_bits" + by (simp add: ptTranslationBits_def vs_index_bits_def) + +(* FIXME AARCH64: this is guaranteed to always succeed even though config_ARM_PA_SIZE_BITS_40 + is unfolded. It'd be nicer if we could also get something symbolic out of value_type, though *) +lemma ptTranslationBits_vs_array_len': + "2 ^ ptTranslationBits VSRootPT_T = vs_array_len" + by (simp add: vs_array_len_val ptTranslationBits_vs_index_bits vs_index_bits_def + Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) + +lemmas ptTranslationBits_vs_array_len = ptTranslationBits_vs_array_len'[unfolded vs_array_len_val] + +type_synonym cghost_state = + "(machine_word \ vmpage_size) \ \ \Frame sizes\ + (machine_word \ nat) \ \ \CNode sizes\ + (machine_word \ pt_type) \ \ \PT types\ + ghost_assertions" \ \ASMRefine assertions\ + +definition gs_clear_region :: "addr \ nat \ cghost_state \ cghost_state" where + "gs_clear_region ptr bits gs \ + (\x. if x \ {ptr..+2 ^ bits} then None else fst gs x, + \x. if x \ {ptr..+2 ^ bits} then None else fst (snd gs) x, + \x. if x \ {ptr..+2 ^ bits} then None else fst (snd (snd gs)) x, + snd (snd (snd gs)))" + +definition gs_new_frames:: "vmpage_size \ addr \ nat \ cghost_state \ cghost_state" where + "gs_new_frames sz ptr bits \ \gs. + if bits < pageBitsForSize sz then gs + else (\x. if \n\mask (bits - pageBitsForSize sz). + x = ptr + n * 2 ^ pageBitsForSize sz then Some sz + else fst gs x, snd gs)" + +definition gs_new_cnodes:: "nat \ addr \ nat \ cghost_state \ cghost_state" where + "gs_new_cnodes sz ptr bits \ \gs. + if bits < sz + 4 then gs + else (fst gs, \x. if \n\mask (bits - sz - 4). x = ptr + n * 2 ^ (sz + 4) + then Some sz + else fst (snd gs) x, snd (snd gs))" + +definition gs_new_pt_t:: "pt_type \ addr \ cghost_state \ cghost_state" where + "gs_new_pt_t pt_t ptr \ + \gs. (fst gs, fst (snd gs), (fst (snd (snd gs))) (ptr \ pt_t), snd (snd (snd gs)))" + +abbreviation gs_get_assn :: "int \ cghost_state \ machine_word" where + "gs_get_assn k \ ghost_assertion_data_get k (snd \ snd \ snd)" + +abbreviation gs_set_assn :: "int \ machine_word \ cghost_state \ cghost_state" where + "gs_set_assn k v \ ghost_assertion_data_set k v (apsnd \ apsnd \ apsnd)" + +declare [[record_codegen = false]] +declare [[allow_underscore_idents = true]] + +end + +(* Workaround for the fact that the retype annotations need the vmpage sizes*) +(* create appropriately qualified aliases *) +context begin interpretation Arch . global_naming vmpage_size +requalify_consts ARMSmallPage ARMLargePage ARMHugePage +end + +(* Also need pt_type constructors for retype annotations. We leave them available globally for C. *) +context begin interpretation Arch . +requalify_consts NormalPT_T VSRootPT_T +end + +definition + ctcb_size_bits :: nat +where + "ctcb_size_bits \ 10" + +definition + ctcb_offset :: "64 word" +where + "ctcb_offset \ 2 ^ ctcb_size_bits" + +lemmas ctcb_offset_defs = ctcb_offset_def ctcb_size_bits_def + +cond_sorry_modifies_proofs SORRY_MODIFIES_PROOFS + +install_C_file "../c/build/$L4V_ARCH/kernel_all.c_pp" + [machinety=machine_state, ghostty=cghost_state] + +text \Hide unqualified names conflicting with Kernel_Config names. Force use of Kernel_C prefix + for these:\ +hide_const (open) + numDomains + +text \Add a more usable name for the collection of ThreadState definitions\ +lemmas ThreadState_defs = StrictC'_thread_state_defs + +(* hide vmpage sizes again *) +hide_const + vmpage_size.ARMSmallPage + vmpage_size.ARMLargePage + vmpage_size.ARMHugePage + +(* re-allow fully qualified accesses (for consistency). Slightly clunky *) +context Arch begin +global_naming "AARCH64.vmpage_size" requalify_consts ARMSmallPage ARMLargePage ARMHugePage +global_naming "AARCH64" requalify_consts ARMSmallPage ARMLargePage ARMHugePage +end + +end diff --git a/spec/cspec/ARM/Kernel_C.thy b/spec/cspec/ARM/Kernel_C.thy index 89042169be..cd19383b65 100644 --- a/spec/cspec/ARM/Kernel_C.thy +++ b/spec/cspec/ARM/Kernel_C.thy @@ -98,6 +98,9 @@ text \Hide unqualified names conflicting with Kernel_Config names. Force u hide_const (open) numDomains +text \Add a more usable name for the collection of ThreadState definitions\ +lemmas ThreadState_defs = StrictC'_thread_state_defs + (* hide vmpage sizes again *) hide_const vmpage_size.ARMSmallPage diff --git a/spec/cspec/ARM_HYP/Kernel_C.thy b/spec/cspec/ARM_HYP/Kernel_C.thy index 89042169be..cd19383b65 100644 --- a/spec/cspec/ARM_HYP/Kernel_C.thy +++ b/spec/cspec/ARM_HYP/Kernel_C.thy @@ -98,6 +98,9 @@ text \Hide unqualified names conflicting with Kernel_Config names. Force u hide_const (open) numDomains +text \Add a more usable name for the collection of ThreadState definitions\ +lemmas ThreadState_defs = StrictC'_thread_state_defs + (* hide vmpage sizes again *) hide_const vmpage_size.ARMSmallPage diff --git a/spec/cspec/KernelState_C.thy b/spec/cspec/KernelState_C.thy index 0bd87b4af6..00b082fcee 100644 --- a/spec/cspec/KernelState_C.thy +++ b/spec/cspec/KernelState_C.thy @@ -4,6 +4,8 @@ * SPDX-License-Identifier: GPL-2.0-only *) +(* The base theory for generated bitfield proofs about the kernel *) + theory KernelState_C imports "Word_Lib.WordSetup" diff --git a/spec/cspec/README.md b/spec/cspec/README.md index f4fc56c6b0..9e058319a0 100644 --- a/spec/cspec/README.md +++ b/spec/cspec/README.md @@ -34,10 +34,10 @@ Building The corresponding Isabelle sessions for this module are `CKernel` and `CSpec`. `CSpec` contains `CKernel` plus automated bitfield proofs. -To build the image, run the corresponding session in directory `l4v/spec`, -e.g.: +To build the image for the ARM architecture, run the corresponding session in +directory `l4v/`, e.g.: - make CSpec + L4V_ARCH=ARM ./run_tests CSpec This will also configure and preprocess the kernel sources. @@ -52,6 +52,9 @@ indicating the architecture-specific definitions and proofs to use. The default architecture is `ARM` and will be selected if none is provided. See `l4v/spec/cspec/c/Makefile` for seL4 configuration details. +The build process has an option for providing a device tree overlay file if +desired, which can customise the memory regions available to the kernel. See +the [README](c/overlays/README.md) file in `c/overlays/` for more details. Remarks ------- diff --git a/spec/cspec/RISCV64/Kernel_C.thy b/spec/cspec/RISCV64/Kernel_C.thy index 9eabe14191..3e7efa727c 100644 --- a/spec/cspec/RISCV64/Kernel_C.thy +++ b/spec/cspec/RISCV64/Kernel_C.thy @@ -95,6 +95,9 @@ text \Hide unqualified names conflicting with Kernel_Config names. Force u hide_const (open) numDomains +text \Add a more usable name for the collection of ThreadState definitions\ +lemmas ThreadState_defs = StrictC'_thread_state_defs + (* hide vmpage sizes again *) hide_const vmpage_size.RISCVSmallPage diff --git a/spec/cspec/TypHeapLimits.thy b/spec/cspec/TypHeapLimits.thy index 441b72a69b..fe0e9c07dc 100644 --- a/spec/cspec/TypHeapLimits.thy +++ b/spec/cspec/TypHeapLimits.thy @@ -5,7 +5,7 @@ *) theory TypHeapLimits -imports "CLib.TypHeapLib" + imports CParser.TypHeapLib begin definition diff --git a/spec/cspec/X64/Kernel_C.thy b/spec/cspec/X64/Kernel_C.thy index d7dbd22450..5de120a414 100644 --- a/spec/cspec/X64/Kernel_C.thy +++ b/spec/cspec/X64/Kernel_C.thy @@ -98,6 +98,9 @@ text \Hide unqualified names conflicting with Kernel_Config names. Force u hide_const (open) numDomains +text \Add a more usable name for the collection of ThreadState definitions\ +lemmas ThreadState_defs = StrictC'_thread_state_defs + (* hide vmpage sizes again *) hide_const vmpage_size.X64SmallPage diff --git a/spec/cspec/c/Makefile b/spec/cspec/c/Makefile index 9879e37b9b..ceaf73a3b8 100644 --- a/spec/cspec/c/Makefile +++ b/spec/cspec/c/Makefile @@ -29,7 +29,7 @@ CONFIG_THY := ../../machine/${L4V_ARCH}/Kernel_Config.thy # called by ../../Makefile config: ${CONFIG_THY} -${CONFIG_THY}: ${KERNEL_CONFIG_ROOT}/.cmake_done +${CONFIG_THY}: ${CONFIG_DONE} ./gen-config-thy.py diff --git a/spec/cspec/c/export-kernel-builds.py b/spec/cspec/c/export-kernel-builds.py new file mode 100755 index 0000000000..093c5d28ec --- /dev/null +++ b/spec/cspec/c/export-kernel-builds.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 + +# Copyright 2023 Kry10 Limited +# SPDX-License-Identifier: BSD-2-Clause + +# Export kernel build artifacts for binary verification. This includes +# kernel binaries, disassembly, and if present, the C graph-lang model +# extracted from the Isabelle C spec. + +# GitHub l4v proof workflows use this to save kernel build outputs as +# workflow artifacts. The artifacts are then picked up by binary +# verification workflows. This ensures that the kernel builds used in +# binary verification are generated by the same toolchains used in the +# Isabelle proofs. + +# For binary verification, we perform builds at two optimisation levels. +# We also save a copy of the preprocessed source used in the Isabelle +# proofs, so we can later compare it to the preprocessed source used in +# binary verification. + +# We expect the L4V_ARCH environment variable to be set as usual. We use +# this to locate kernel sources built by the l4v proofs, and the C +# graph-lang file. Any other environment variables required by the l4v +# kernel build system are assumed to be already set. + +# We require a single command-line argument, which is the directory into +# which builds will be exported. Under this directory, builds will be +# placed into subdirectories according to their L4V_ARCH, L4V_FEATURES, +# and optimisation level. With L4V_ARCH=RISCV64 and L4V_FEATURES=MCS, for +# example: + +# RISCV64-MCS-O1/ +# RISCV64-MCS-O1/kernel_all.cpp +# RISCV64-MCS-O1/kernel.elf +# RISCV64-MCS-O1/... +# RISCV64-MCS-O2/ +# RISCV64-MCS-O2/kernel_all.cpp +# RISCV64-MCS-O2/kernel.elf +# RISCV64-MCS-O1/... + +import argparse +import os +import shutil +import subprocess +import sys + +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import List, NamedTuple, Optional + + +class L4vPaths(NamedTuple): + kernel_mk: Path + c_pp: Path + c_functions: Path + + +def get_l4v_paths(l4v_arch: str) -> L4vPaths: + l4v_c = Path(__file__).resolve().parent + l4v = l4v_c.parent.parent.parent + kernel_mk = l4v_c / 'kernel.mk' + c_pp = l4v_c / 'build' / l4v_arch / 'kernel_all.c_pp' + c_functions = l4v / 'proof' / 'asmrefine' / 'export' / l4v_arch / 'CFunDump.txt' + return L4vPaths(kernel_mk=kernel_mk, c_pp=c_pp, c_functions=c_functions) + + +def path_suffix(opt_suffix: Optional[str]) -> str: + return f'-{opt_suffix}' if opt_suffix else '' + + +class ExportConfig(NamedTuple): + export_root: Path + l4v_arch: str + l4v_features: Optional[str] + l4v_plat: Optional[str] + l4v_paths: L4vPaths + manifest: Optional[Path] + + def config_name(self, optimisation: str) -> str: + features = path_suffix(self.l4v_features) + plat = path_suffix(self.l4v_plat) + return f'{self.l4v_arch}{features}{plat}{optimisation}' + + def export_path(self, optimisation: str) -> Path: + return self.export_root / self.config_name(optimisation) + + def do_export(self, optimisation: str) -> None: + config_name = self.config_name(optimisation) + export_dir = self.export_path(optimisation) + + print(f'Exporting kernel build for {config_name} to {export_dir}...') + + if not self.l4v_paths.c_pp.is_file(): + print(' Note: No l4v kernel build found.') + if not self.l4v_paths.c_functions.is_file(): + print(' Note: No C graph-lang found.') + + with TemporaryDirectory() as build_tmp: + env = { + **os.environ, + 'KERNEL_BUILD_ROOT': Path(build_tmp) / 'build', + 'KERNEL_EXPORT_DIR': export_dir, + 'CONFIG_OPTIMISATION': optimisation + } + p = subprocess.run(['make', '-f', self.l4v_paths.kernel_mk, 'kernel_export'], + env=env, stdin=subprocess.DEVNULL) + if p.returncode != 0: + print(f'Error: Kernel build for {config_name} failed', file=sys.stderr) + sys.exit(1) + + # Copy the preprocessed source from the l4v build, so we can later compare + # it to the exported builds if binary verification fails. There's no point + # comparing here, because there will always be differences due to lines + # inserted by the preprocessor and config system. + if self.l4v_paths.c_pp.is_file(): + shutil.copyfile(self.l4v_paths.c_pp, export_dir / 'kernel_all.c_pp.l4v') + + # Copy the C graph-lang, if it exists. Note that C graph-lang might not + # have been produced, if SimplExportAndRefine is not enabled for this + # configuration, or if proofs were cached. It's therefore not an error for + # it not to be present. + if self.l4v_paths.c_functions.is_file(): + shutil.copyfile(self.l4v_paths.c_functions, export_dir / 'CFunctions.txt') + + # If a manifest was given on the command line, copy that to the output. + if self.manifest is not None: + shutil.copyfile(self.manifest, export_dir / 'manifest.xml') + + # Write a file describing the configuration. + with open(export_dir / 'config.env', 'w') as config_env: + config_env.write(f'L4V_ARCH={self.l4v_arch}\n') + config_env.write(f'L4V_FEATURES={self.l4v_features or ""}\n') + config_env.write(f'L4V_PLAT={self.l4v_plat or ""}\n') + config_env.write(f'CONFIG_OPTIMISATION={optimisation}\n') + + +class ExportCommand(NamedTuple): + config: ExportConfig + optimisations: List[str] + force: bool + + def run(self) -> None: + if not self.config.l4v_paths.c_functions.is_file() and not self.force: + print('Will not export kernel builds, because no C graph-lang was found.') + return + for optimisation in self.optimisations: + self.config.do_export(optimisation) + + +def parse_args() -> ExportCommand: + parser = argparse.ArgumentParser( + description='Export kernel build artifacts.') + parser.add_argument('--export-root', metavar='DIRECTORY', type=Path, required=True, + help='Export directory') + parser.add_argument('--manifest-xml', metavar='FILENAME', type=Path, + help='Repo manifest xml file') + parser.add_argument('-O', metavar='OPTIMISATION_LEVEL', type=str, nargs=1, + choices=['1', '2'], dest='optimisations', + help='Optimisation level') + parser.add_argument('--force', dest='force', action='store_true', + help='Build even when no C graph-lang is found') + parser.set_defaults(optimisations=['1', '2'], force=False) + args = parser.parse_args() + + l4v_arch = os.environ.get('L4V_ARCH') + if not l4v_arch: + print('error: L4V_ARCH not set', file=sys.stderr) + sys.exit(1) + + if args.manifest_xml and not args.manifest_xml.is_file(): + print(f'error: bad manifest: {self.manifest_xml}', file=sys.stderr) + sys.exit(1) + + config = ExportConfig(export_root=args.export_root, + l4v_arch=l4v_arch, + l4v_features=os.environ.get('L4V_FEATURES'), + l4v_plat=os.environ.get('L4V_PLAT'), + l4v_paths=get_l4v_paths(l4v_arch), + manifest=args.manifest_xml) + + optimisations = (f'-O{o}' for o in args.optimisations) + return ExportCommand(config=config, + optimisations=optimisations, + force=args.force) + + +def main(cmd: ExportCommand) -> int: + cmd.run() + return 0 + + +if __name__ == '__main__': + exit(main(parse_args())) diff --git a/spec/cspec/c/gen-config-thy.py b/spec/cspec/c/gen-config-thy.py index 69f560e7c2..3a9746508c 100755 --- a/spec/cspec/c/gen-config-thy.py +++ b/spec/cspec/c/gen-config-thy.py @@ -170,6 +170,7 @@ 'CONFIG_CTZ_64': (bool, None), 'CONFIG_CLZ_NO_BUILTIN': (bool, None), 'CONFIG_CTZ_NO_BUILTIN': (bool, None), + 'CONFIG_MAX_NUM_IOAPIC': (nat, 'maxNumIOAPIC'), } @@ -193,7 +194,7 @@ def parse_gen_config(gen_config_file: str) -> Dict[str, str]: """ Parse gen_config.h and return a dictionary of the values. """ - end_comment_re = re.compile(r' /\*.*\*/') + end_comment_re = re.compile(r'/\*.*\*/') config = {} with open(gen_config_file, 'r') as f: @@ -208,6 +209,24 @@ def parse_gen_config(gen_config_file: str) -> Dict[str, str]: return config +def parse_physBase(l4v_arch: str, devices_gen_h_file: str) -> str: + """ + Parse devices_gen.h and return the physBase value as string in hex or + decimal form. Return None for architectures that don't use physBase. + """ + if l4v_arch == 'X64': + return None + + physBase_re = re.compile(r'#define PHYS_BASE_RAW (0x[0-9a-fA-F]+|[0-9]+)') + with open(devices_gen_h_file, 'r') as f: + for line in f: + line = line.strip() + m = physBase_re.match(line) + if m: + return m.group(1) + raise Exception(f'Could not find PHYS_BASE_RAW in {devices_gen_h_file}') + + def add_defaults(config: Dict[str, str]): """ Defaults are boolean config keys that are mentioned in known_config_keys @@ -215,7 +234,7 @@ def add_defaults(config: Dict[str, str]): config file. """ for key, (_, name) in known_config_keys.items(): - if key not in config and not name is None: + if key not in config and name is not None: config[key] = 0 @@ -235,25 +254,27 @@ def add_defaults(config: Dict[str, str]): GENERATED -- DO NOT EDIT! Changes will be overwritten. This file was generated from {} + and {} by the script {}. *) """ -def write_config_thy(config_thy_path, config: Dict[str, str]): +def write_config_thy(header, config_thy_path, config: Dict[str, str], physBase=None): """ Write a Kernel_Config.thy file for a given configuration dict. """ file_name = path.realpath(path.join(config_thy_path, 'Kernel_Config.thy')) with open(file_name, 'w') as f: - f.write(theory_header.format( - date.today().year, - config_path, - path.realpath(__file__)) - ) + f.write(header) + + if physBase: + f.write('(* This value is PHYS_BASE_RAW in the devices_gen.h header listed above. *)\n') + f.write('definition physBase :: machine_word where\n') + f.write(f' "physBase \\ {physBase}"\n\n') - names = [] + names = ['physBase'] if physBase else [] for key, value in config.items(): type = type_of(key) if type is nat or type is word: @@ -278,26 +299,36 @@ def write_config_thy(config_thy_path, config: Dict[str, str]): pass f.write('\n(* These definitions should only be unfolded consciously and carefully: *)\n') - for name in names: f.write(f'hide_fact (open) {name}_def\n') f.write('\nend\n') - print(f'Wrote {file_name}.') + print(f'Wrote {file_name}') if __name__ == '__main__': - L4V_ARCH = getenv('L4V_ARCH') - if not L4V_ARCH: + l4v_arch = getenv('L4V_ARCH') + if not l4v_arch: print('L4V_ARCH environment variable not set') exit(1) this_dir = path.dirname(path.realpath(__file__)) + build_dir = path.join(this_dir, f"config-build/{l4v_arch}") - config_path = path.join(this_dir, f"config-build/{L4V_ARCH}/gen_config/kernel/gen_config.h") - thy_path = path.join(this_dir, f'../../machine/{L4V_ARCH}') + config_path = path.join(build_dir, "gen_config/kernel/gen_config.h") + devices_gen_path = path.join(build_dir, "gen_headers/plat/machine/devices_gen.h") + + thy_path = path.join(this_dir, f'../../machine/{l4v_arch}') config = parse_gen_config(config_path) + physBase = parse_physBase(l4v_arch, devices_gen_path) add_defaults(config) - write_config_thy(thy_path, config) + + header = theory_header.format( + date.today().year, + config_path, + devices_gen_path, + path.realpath(__file__) + ) + write_config_thy(header, thy_path, config, physBase) diff --git a/spec/cspec/c/kernel.mk b/spec/cspec/c/kernel.mk index edc2dae0e5..efcc48a92c 100644 --- a/spec/cspec/c/kernel.mk +++ b/spec/cspec/c/kernel.mk @@ -8,6 +8,10 @@ # It allows building the C kernel in locations other than the default one used by l4v, # and assumes that KERNEL_BUILD_ROOT has already been set to specify the build location. +ifndef KERNEL_BUILD_ROOT + $(error KERNEL_BUILD_ROOT is not set) +endif + ifndef L4V_REPO_PATH L4V_REPO_PATH := $(realpath $(dir $(lastword ${MAKEFILE_LIST}))../../..) endif @@ -17,20 +21,30 @@ ifndef SOURCE_ROOT endif CSPEC_DIR := ${L4V_REPO_PATH}/spec/cspec -PARSERPATH := ${L4V_REPO_PATH}/tools/c-parser/standalone-parser ifndef L4V_ARCH $(error L4V_ARCH is not set) endif +SEL4_CONFIG_NAME := ${L4V_ARCH}$(if ${L4V_FEATURES},_${L4V_FEATURES},)$(if ${L4V_PLAT},_${L4V_PLAT},) + ifndef CONFIG - CONFIG := ${SOURCE_ROOT}/configs/${L4V_ARCH}_$(if ${L4V_FEATURES},${L4V_FEATURES}_,)verified.cmake + CONFIG := ${SOURCE_ROOT}/configs/${SEL4_CONFIG_NAME}_verified.cmake endif ifndef CONFIG_DOMAIN_SCHEDULE CONFIG_DOMAIN_SCHEDULE := ${CSPEC_DIR}/c/config_sched.c endif +# Normally, this make file is used in a context where CONFIG_OPTIMISATION +# is not set, and the CMake KernelOptimisation setting is taken from the +# settings file named by the CONFIG variable (see above). +# However, the default can be overridden by setting CONFIG_OPTIMISATION +# before loading this make file. This is useful for binary verification. +ifdef CONFIG_OPTIMISATION + KERNEL_CMAKE_OPTIMISATION := -DKernelOptimisation=${CONFIG_OPTIMISATION} +endif + ifndef TOOLPREFIX ifndef TRY_TOOLPREFIX ifeq ($(findstring ARM, ${L4V_ARCH}),ARM) @@ -73,46 +87,54 @@ endif # We avoid this by excluding __pycache__ directories from the kernel dependencies. KERNEL_DEPS := $(shell find ${SOURCE_ROOT} -name .git -prune -o -name __pycache__ -prune -o -type f -print) +# The kernel build generates a large number of files, so we create a dummy file +# .cmake_done-${SEL4_CONFIG_NAME} to represent overall completion for make's +# dependency tracking. The ${SEL4_CONFIG_NAME} part makes sure we rebuild when +# we switch features or platforms. +BUILD_DONE = ${KERNEL_BUILD_ROOT}/.cmake_done-${SEL4_CONFIG_NAME} + # Top level rule for rebuilding kernel_all.c_pp -${KERNEL_BUILD_ROOT}/kernel_all.c_pp: ${KERNEL_BUILD_ROOT}/.cmake_done +${KERNEL_BUILD_ROOT}/kernel_all.c_pp: ${BUILD_DONE} cd ${KERNEL_BUILD_ROOT} && ninja kernel_all_pp_wrapper cp -a ${KERNEL_BUILD_ROOT}/kernel_all_pp.c $@ -# Various targets useful for binary verification. -${KERNEL_BUILD_ROOT}/kernel.elf: ${KERNEL_BUILD_ROOT}/kernel_all.c_pp - cd ${KERNEL_BUILD_ROOT} && ninja kernel.elf - -${KERNEL_BUILD_ROOT}/kernel.elf.rodata: ${KERNEL_BUILD_ROOT}/kernel.elf - ${OBJDUMP} -z -D $^ > $@ +ifneq ($(L4V_ARCH),X64) +OVERLAY_DIR := ${CSPEC_DIR}/c/overlays/${L4V_ARCH} +OVERLAY := ${OVERLAY_DIR}/overlay.dts +OVERLAY_OPT := -DKernelCustomDTSOverlay=${OVERLAY} +DEFAULT_OVERLAY := ${OVERLAY_DIR}/default-overlay.dts -${KERNEL_BUILD_ROOT}/kernel.elf.txt: ${KERNEL_BUILD_ROOT}/kernel.elf - ${OBJDUMP} -dz $^ > $@ - -${KERNEL_BUILD_ROOT}/kernel.elf.symtab: ${KERNEL_BUILD_ROOT}/kernel.elf - ${OBJDUMP} -t $^ > $@ +${OVERLAY}: ${DEFAULT_OVERLAY} + @cp $< $@ +endif -${KERNEL_BUILD_ROOT}/kernel.sigs: ${KERNEL_BUILD_ROOT}/kernel_all.c_pp - MAKEFILES= make -C ${PARSERPATH} ${PARSERPATH}/${L4V_ARCH}/c-parser - ${PARSERPATH}/${L4V_ARCH}/c-parser --cpp=${CPP} --underscore_idents --mmbytes $^ > $@.tmp - mv $@.tmp $@ +ifdef INPUT_NUM_DOMAINS +KERNEL_CMAKE_EXTRA_OPTIONS += -DKernelNumDomains=${INPUT_NUM_DOMAINS} +endif # Initialize the CMake build. We purge the build directory and start again # whenever any of the kernel sources change, so that we can reliably pick up # changes to the build config. -# This step also generates a large number of files, so we create a dummy file -# .cmake_done to represent overall completion for make's dependency tracking. -${KERNEL_BUILD_ROOT}/.cmake_done: ${KERNEL_DEPS} ${CONFIG_DOMAIN_SCHEDULE} - rm -rf ${KERNEL_BUILD_ROOT} - mkdir -p ${KERNEL_BUILD_ROOT} +${BUILD_DONE}: ${KERNEL_DEPS} ${CONFIG_DOMAIN_SCHEDULE} ${OVERLAY} + @rm -rf ${KERNEL_BUILD_ROOT} + @mkdir -p ${KERNEL_BUILD_ROOT} cd ${KERNEL_BUILD_ROOT} && \ cmake -C ${CONFIG} \ -DCROSS_COMPILER_PREFIX=${TOOLPREFIX} \ -DCMAKE_TOOLCHAIN_FILE=${SOURCE_ROOT}/gcc.cmake \ -DKernelDomainSchedule=${CONFIG_DOMAIN_SCHEDULE} \ - -DUMM_TYPES=$(abspath ${UMM_TYPES}) \ - -DCSPEC_DIR=${CSPEC_DIR} ${KERNEL_CMAKE_EXTRA_OPTIONS} \ + -DUMM_TYPES=$(abspath ${UMM_TYPES}) -DCSPEC_DIR=${CSPEC_DIR} \ + ${KERNEL_CMAKE_OPTIMISATION} ${KERNEL_CMAKE_EXTRA_OPTIONS} \ + ${OVERLAY_OPT} \ -G Ninja ${SOURCE_ROOT} - touch ${KERNEL_BUILD_ROOT}/.cmake_done + @touch ${BUILD_DONE} +ifneq ($(L4V_ARCH),X64) + @if [ "$$(diff -q ${OVERLAY} ${DEFAULT_OVERLAY})" ]; then \ + echo "++ Used custom overlay for $(L4V_ARCH)"; \ + else \ + echo "-- Used default overlay for $(L4V_ARCH)"; \ + fi +endif ${UMM_TYPES}: ${KERNEL_BUILD_ROOT}/kernel_all.c_pp ${CSPEC_DIR}/mk_umm_types.py --root $(L4V_REPO_PATH) ${KERNEL_BUILD_ROOT}/kernel_all.c_pp $@ @@ -120,12 +142,80 @@ ${UMM_TYPES}: ${KERNEL_BUILD_ROOT}/kernel_all.c_pp # This target generates config files and headers only. It does not invoke # the C tool chain or preprocessor. We force CMake to skip tests for these, # so that ASpec and ExecSpec can be built with fewer dependencies. -${KERNEL_CONFIG_ROOT}/.cmake_done: ${KERNEL_DEPS} gen-config-thy.py - rm -rf ${KERNEL_CONFIG_ROOT} - mkdir -p ${KERNEL_CONFIG_ROOT} +CONFIG_DONE = ${KERNEL_CONFIG_ROOT}/.cmake_done-${SEL4_CONFIG_NAME} +${CONFIG_DONE}: ${KERNEL_DEPS} gen-config-thy.py ${OVERLAY} + @rm -rf ${KERNEL_CONFIG_ROOT} + @mkdir -p ${KERNEL_CONFIG_ROOT} cd ${KERNEL_CONFIG_ROOT} && \ cmake -C ${CONFIG} \ -DCMAKE_TOOLCHAIN_FILE=${CSPEC_DIR}/c/no-compiler.cmake \ - ${KERNEL_CMAKE_EXTRA_OPTIONS} \ + ${KERNEL_CMAKE_OPTIMISATION} ${KERNEL_CMAKE_EXTRA_OPTIONS} \ + ${OVERLAY_OPT} \ -G Ninja ${SOURCE_ROOT} - touch ${KERNEL_CONFIG_ROOT}/.cmake_done + @touch ${CONFIG_DONE} +ifneq ($(L4V_ARCH),X64) + @if [ "$$(diff -q ${OVERLAY} ${DEFAULT_OVERLAY})" ]; then \ + echo "++ Used custom overlay for $(L4V_ARCH)"; \ + else \ + echo "-- Used default overlay for $(L4V_ARCH)"; \ + fi +endif + +# Various targets useful for binary verification. +${KERNEL_BUILD_ROOT}/kernel.elf: ${KERNEL_BUILD_ROOT}/kernel_all.c_pp + cd ${KERNEL_BUILD_ROOT} && ninja kernel.elf + +${KERNEL_BUILD_ROOT}/kernel.elf.rodata: ${KERNEL_BUILD_ROOT}/kernel.elf + ${OBJDUMP} -z -D $^ > $@ + +${KERNEL_BUILD_ROOT}/kernel.elf.txt: ${KERNEL_BUILD_ROOT}/kernel.elf + ${OBJDUMP} -dz $^ > $@ + +${KERNEL_BUILD_ROOT}/kernel.elf.symtab: ${KERNEL_BUILD_ROOT}/kernel.elf + ${OBJDUMP} -t $^ > $@ + +# Normally, this make file is used in a context where STANDALONE_C_PARSER_EXE +# and STANDALONE_C_PARSER_DIR are not set. Consequently the rule for `kernel.sigs` +# attempts to build a standalone C parser before using it. +# If that is not desirable, because you want to use a pre-built C parser, you +# can set STANDALONE_C_PARSER_EXE to the location of the pre-built C parser before +# loading this make file. The `kernel.sigs` rule will then skip the C parser build +# step. +ifndef STANDALONE_C_PARSER_EXE + STANDALONE_C_PARSER_DIR := ${L4V_REPO_PATH}/tools/c-parser/standalone-parser + STANDALONE_C_PARSER_EXE := ${STANDALONE_C_PARSER_DIR}/${L4V_ARCH}/c-parser +endif + +# We don't track dependencies of the C parser here. +${KERNEL_BUILD_ROOT}/kernel.sigs: ${KERNEL_BUILD_ROOT}/kernel_all.c_pp +ifdef STANDALONE_C_PARSER_DIR + ${MAKE} -C ${STANDALONE_C_PARSER_DIR} ${STANDALONE_C_PARSER_EXE} +endif + ${STANDALONE_C_PARSER_EXE} --cpp=${CPP} --underscore_idents --mmbytes $^ > $@.tmp + mv $@.tmp $@ + +# Export kernel build for binary verification. +ifndef KERNEL_EXPORT_DIR + KERNEL_EXPORT_DIR := ${CSPEC_DIR}/c/export/${SEL4_CONFIG_NAME} +endif + +KERNEL_EXPORT_ARTIFACTS := kernel_all.c_pp kernel.elf kernel.elf.rodata kernel.elf.symtab kernel.elf.txt kernel.sigs +KERNEL_EXPORT_ARTIFACT_PATHS := $(patsubst %, $(KERNEL_EXPORT_DIR)/%, $(KERNEL_EXPORT_ARTIFACTS)) + +${KERNEL_EXPORT_ARTIFACT_PATHS}: ${KERNEL_EXPORT_DIR}/%: ${KERNEL_BUILD_ROOT}/% + @mkdir -p ${KERNEL_EXPORT_DIR} + cp $< $@ + +# Also record the toolchain versions used. +KERNEL_EXPORT_EXTRAS := ${KERNEL_EXPORT_DIR}/gcc.version ${KERNEL_EXPORT_DIR}/binutils.version + +${KERNEL_EXPORT_DIR}/gcc.version: + @mkdir -p ${KERNEL_EXPORT_DIR} + ${TOOLPREFIX}gcc --version > $@ + +${KERNEL_EXPORT_DIR}/binutils.version: + @mkdir -p ${KERNEL_EXPORT_DIR} + ${OBJDUMP} --version > $@ + +kernel_export: ${KERNEL_EXPORT_ARTIFACT_PATHS} ${KERNEL_EXPORT_EXTRAS} +.PHONY: kernel_build_export diff --git a/spec/cspec/c/overlays/AARCH64/default-overlay.dts b/spec/cspec/c/overlays/AARCH64/default-overlay.dts new file mode 100644 index 0000000000..f01d7c1770 --- /dev/null +++ b/spec/cspec/c/overlays/AARCH64/default-overlay.dts @@ -0,0 +1,6 @@ +/* + * Copyright 2023, Proofcraft Pty Ltd + * SPDX-License-Identifier: GPL-2.0-only + */ + +/* Empty overlay file that will be used if no custom `overlay.dts` exists. */ diff --git a/spec/cspec/c/overlays/ARM/default-overlay.dts b/spec/cspec/c/overlays/ARM/default-overlay.dts new file mode 100644 index 0000000000..f01d7c1770 --- /dev/null +++ b/spec/cspec/c/overlays/ARM/default-overlay.dts @@ -0,0 +1,6 @@ +/* + * Copyright 2023, Proofcraft Pty Ltd + * SPDX-License-Identifier: GPL-2.0-only + */ + +/* Empty overlay file that will be used if no custom `overlay.dts` exists. */ diff --git a/spec/cspec/c/overlays/ARM_HYP/default-overlay.dts b/spec/cspec/c/overlays/ARM_HYP/default-overlay.dts new file mode 100644 index 0000000000..f01d7c1770 --- /dev/null +++ b/spec/cspec/c/overlays/ARM_HYP/default-overlay.dts @@ -0,0 +1,6 @@ +/* + * Copyright 2023, Proofcraft Pty Ltd + * SPDX-License-Identifier: GPL-2.0-only + */ + +/* Empty overlay file that will be used if no custom `overlay.dts` exists. */ diff --git a/spec/cspec/c/overlays/README.md b/spec/cspec/c/overlays/README.md new file mode 100644 index 0000000000..9e97ad9461 --- /dev/null +++ b/spec/cspec/c/overlays/README.md @@ -0,0 +1,22 @@ + + +# DTS Overlays + +This directory contains device tree overlay files that can be used to override +default platform parameters such as available memory regions. + +The default files in the repository are all empty. + +To provide an override, place a file `overlay.dts` into the respective +architecture directory, e.g. `ARM/overlay.dts`. + +The l4v build system will pick up this overlay file when it generates the +kernel configuration data and preprocessed kernel code. It will rebuild proof +sessions according to their dependencies. + +The `X64` build does not support overlays, and therefore does not provide a +default. diff --git a/spec/cspec/c/overlays/RISCV64/default-overlay.dts b/spec/cspec/c/overlays/RISCV64/default-overlay.dts new file mode 100644 index 0000000000..f01d7c1770 --- /dev/null +++ b/spec/cspec/c/overlays/RISCV64/default-overlay.dts @@ -0,0 +1,6 @@ +/* + * Copyright 2023, Proofcraft Pty Ltd + * SPDX-License-Identifier: GPL-2.0-only + */ + +/* Empty overlay file that will be used if no custom `overlay.dts` exists. */ diff --git a/spec/cspec/mk_umm_types.py b/spec/cspec/mk_umm_types.py index 97c8c88500..90f02f882b 100755 --- a/spec/cspec/mk_umm_types.py +++ b/spec/cspec/mk_umm_types.py @@ -52,7 +52,7 @@ def __exit__(self, type, value, traceback): begin declare [[allow_underscore_idents = true]] external_file "%(input)s" -setup {* IsarInstall.gen_umm_types_file "%(input)s" "%(output)s" *} +setup \ IsarInstall.gen_umm_types_file "%(input)s" "%(output)s" \ end """ diff --git a/spec/design/README.md b/spec/design/README.md index 3d7cc461a5..cab121639d 100644 --- a/spec/design/README.md +++ b/spec/design/README.md @@ -33,18 +33,19 @@ this level of abstraction. Building -------- -The corresponding Isabelle session is `ExecSpec`. Build in `l4v/spec/` with +The corresponding Isabelle session is `ExecSpec`. Build in `l4v` for the ARM +architecture with - make ExecSpec + L4V_ARCH=ARM ./run_tests ExecSpec Remarks ------- * for regenerating the design spec from Haskell sources, go to directory - `l4v/tools/haskell-translator` and run + `l4v/` and run - ./make_spec.sh + ./run_test haskell-translator * skeleton files that define which parts of which Haskell files get mapped to which Isabelle theories are found in the sub directories `skel` and diff --git a/spec/design/m-skel/AARCH64/MachineTypes.thy b/spec/design/m-skel/AARCH64/MachineTypes.thy index dbbf1f0284..7e750b414c 100644 --- a/spec/design/m-skel/AARCH64/MachineTypes.thy +++ b/spec/design/m-skel/AARCH64/MachineTypes.thy @@ -7,9 +7,10 @@ chapter "AARCH64 Machine Types" theory MachineTypes imports - "Word_Lib.WordSetup" - "Lib.OptionMonadND" - "Lib.HaskellLib_H" + Word_Lib.WordSetup + Monads.Nondet_Empty_Fail + Monads.Nondet_Reader_Option + Lib.HaskellLib_H Platform begin diff --git a/spec/design/m-skel/ARM/MachineTypes.thy b/spec/design/m-skel/ARM/MachineTypes.thy index 103ef0a177..28613511b3 100644 --- a/spec/design/m-skel/ARM/MachineTypes.thy +++ b/spec/design/m-skel/ARM/MachineTypes.thy @@ -8,8 +8,9 @@ chapter "ARM Machine Types" theory MachineTypes imports - "Word_Lib.WordSetup" - "Lib.OptionMonadND" + Word_Lib.WordSetup + Monads.Nondet_Empty_Fail + Monads.Nondet_Reader_Option Setup_Locale Platform begin diff --git a/spec/design/m-skel/ARM_HYP/MachineTypes.thy b/spec/design/m-skel/ARM_HYP/MachineTypes.thy index 8418a9b7dc..50d1b6a192 100644 --- a/spec/design/m-skel/ARM_HYP/MachineTypes.thy +++ b/spec/design/m-skel/ARM_HYP/MachineTypes.thy @@ -8,8 +8,9 @@ chapter \ARM\_HYP Machine Types\ theory MachineTypes imports - "Word_Lib.WordSetup" - "Lib.OptionMonadND" + Word_Lib.WordSetup + Monads.Nondet_Empty_Fail + Monads.Nondet_Reader_Option Setup_Locale Platform begin diff --git a/spec/design/m-skel/RISCV64/MachineTypes.thy b/spec/design/m-skel/RISCV64/MachineTypes.thy index 0b81bb3252..01ba79fbc3 100644 --- a/spec/design/m-skel/RISCV64/MachineTypes.thy +++ b/spec/design/m-skel/RISCV64/MachineTypes.thy @@ -8,9 +8,10 @@ chapter "RISCV 64bit Machine Types" theory MachineTypes imports - "Word_Lib.WordSetup" - "Lib.OptionMonadND" - "Lib.HaskellLib_H" + Word_Lib.WordSetup + Monads.Nondet_Empty_Fail + Monads.Nondet_Reader_Option + Lib.HaskellLib_H Platform begin diff --git a/spec/design/m-skel/X64/MachineTypes.thy b/spec/design/m-skel/X64/MachineTypes.thy index 25bd5f3f65..54bb5a930b 100644 --- a/spec/design/m-skel/X64/MachineTypes.thy +++ b/spec/design/m-skel/X64/MachineTypes.thy @@ -8,9 +8,10 @@ chapter "x86-64bit Machine Types" theory MachineTypes imports - "Word_Lib.WordSetup" - "Lib.OptionMonadND" - "Lib.HaskellLib_H" + Word_Lib.WordSetup + Monads.Nondet_Empty_Fail + Monads.Nondet_Reader_Option + Lib.HaskellLib_H Platform begin diff --git a/spec/design/skel/AARCH64/ArchIntermediate_H.thy b/spec/design/skel/AARCH64/ArchIntermediate_H.thy index fc109989b5..016a51f40c 100644 --- a/spec/design/skel/AARCH64/ArchIntermediate_H.thy +++ b/spec/design/skel/AARCH64/ArchIntermediate_H.thy @@ -25,10 +25,15 @@ private abbreviation (input) od)" private abbreviation (input) - "createNewTableCaps regionBase numObjects tableBits objectProto cap initialiseMappings \ (do + "createNewTableCaps regionBase numObjects ptType objectProto cap initialiseMappings \ (do + tableBits \ return (ptBits ptType); tableSize \ return (tableBits - objBits objectProto); addrs \ createObjects regionBase numObjects (injectKO objectProto) tableSize; pts \ return (map (PPtr \ fromPPtr) addrs); + modify (\ks. ks \ksArchState := + ksArchState ks \gsPTTypes := (\addr. + if addr `~elem~` map fromPPtr addrs then Just ptType + else gsPTTypes (ksArchState ks) addr)\\); initialiseMappings pts; return $ map (\pt. cap pt Nothing) pts od)" @@ -45,11 +50,11 @@ defs Arch_createNewCaps_def: | HugePageObject \ createNewFrameCaps regionBase numObjects dev (2 * ptTranslationBits NormalPT_T) ARMHugePage | VSpaceObject \ - createNewTableCaps regionBase numObjects (ptBits VSRootPT_T) (makeObject::pte) + createNewTableCaps regionBase numObjects VSRootPT_T (makeObject::pte) (\base addr. PageTableCap base VSRootPT_T addr) (\pts. return ()) | PageTableObject \ - createNewTableCaps regionBase numObjects (ptBits NormalPT_T) (makeObject::pte) + createNewTableCaps regionBase numObjects NormalPT_T (makeObject::pte) (\base addr. PageTableCap base NormalPT_T addr) (\pts. return ()) | VCPUObject \ (do diff --git a/spec/design/skel/AARCH64/ArchPSpace_H.thy b/spec/design/skel/AARCH64/ArchPSpace_H.thy new file mode 100644 index 0000000000..b766a985b5 --- /dev/null +++ b/spec/design/skel/AARCH64/ArchPSpace_H.thy @@ -0,0 +1,21 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming AARCH64_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/AARCH64.hs decls_only ONLY pTablePartialOverlap +#INCLUDE_HASKELL SEL4/Model/PSpace/AARCH64.hs NOT pTablePartialOverlap + +end (* context Arch *) + +end diff --git a/spec/design/skel/AARCH64/ArchRetypeDecls_H.thy b/spec/design/skel/AARCH64/ArchRetypeDecls_H.thy index df203bdb1c..2e0d9d9e68 100644 --- a/spec/design/skel/AARCH64/ArchRetypeDecls_H.thy +++ b/spec/design/skel/AARCH64/ArchRetypeDecls_H.thy @@ -21,7 +21,7 @@ context Arch begin global_naming AARCH64_H #INCLUDE_HASKELL_PREPARSE SEL4/Object/Structures/AARCH64.hs #INCLUDE_HASKELL SEL4/API/Invocation/AARCH64.hs CONTEXT AARCH64_H decls_only \ - NOT Invocation IRQControlInvocation isVSpaceFlushLabel isPageFlushLabel + NOT Invocation IRQControlInvocation isVSpaceFlushLabel isPageFlushLabel FlushType #INCLUDE_HASKELL SEL4/API/Invocation/AARCH64.hs CONTEXT AARCH64_H decls_only ONLY Invocation IRQControlInvocation diff --git a/spec/design/skel/AARCH64/ArchStructures_H.thy b/spec/design/skel/AARCH64/ArchStructures_H.thy index b11eeacf2e..67e0d81a69 100644 --- a/spec/design/skel/AARCH64/ArchStructures_H.thy +++ b/spec/design/skel/AARCH64/ArchStructures_H.thy @@ -35,7 +35,7 @@ defs makeVCPUObject_def: , vgicAPR= 0 , vgicLR= (\_. 0) \ - , vcpuRegs= funArray (const 0) aLU [(VCPURegSCTLR, sctlrDefault)] + , vcpuRegs= funArray (const 0) aLU [(VCPURegSCTLR, sctlrEL1VM)] , vcpuVPPIMasked= (\_. False) , vcpuVTimer= VirtTimer 0 \" diff --git a/spec/design/skel/AARCH64/ArchVSpace_H.thy b/spec/design/skel/AARCH64/ArchVSpace_H.thy index ffac44af5f..3da0ae4f31 100644 --- a/spec/design/skel/AARCH64/ArchVSpace_H.thy +++ b/spec/design/skel/AARCH64/ArchVSpace_H.thy @@ -30,7 +30,7 @@ where pte <- pteAtIndex level ptPtr vPtr; if isPageTablePTE pte then do - checkPTAt (getPPtrFromPTE pte); + checkPTAt NormalPT_T (getPPtrFromPTE pte); lookupPTSlotFromLevel (level - 1) (getPPtrFromPTE pte) vPtr od else return (ptBitsLeft level, ptSlotIndex level ptPtr vPtr) @@ -50,12 +50,15 @@ where if ptr = targetPtPtr then returnOk slot else doE - liftE $ checkPTAt ptr; + liftE $ checkPTAt NormalPT_T ptr; lookupPTFromLevel (level - 1) ptr vPtr targetPtPtr odE odE" -#INCLUDE_HASKELL SEL4/Kernel/VSpace/AARCH64.hs CONTEXT AARCH64_H bodies_only ArchInv=ArchRetypeDecls_H NOT lookupPTSlotFromLevel lookupPTFromLevel pteAtIndex getPPtrFromHWPTE isPageTablePTE ptBitsLeft checkPTAt +#INCLUDE_HASKELL SEL4/Kernel/VSpace/AARCH64.hs CONTEXT AARCH64_H bodies_only ArchInv=ArchRetypeDecls_H NOT lookupPTSlotFromLevel lookupPTFromLevel pteAtIndex getPPtrFromHWPTE isPageTablePTE ptBitsLeft checkPTAt checkValidMappingSize + +defs checkValidMappingSize_def: + "checkValidMappingSize sz \ stateAssert (\s. 2 ^ sz <= gsMaxObjectSize s) []" end diff --git a/spec/design/skel/AARCH64/Arch_Structs_B.thy b/spec/design/skel/AARCH64/Arch_Structs_B.thy index 6a1974685d..caa471f08a 100644 --- a/spec/design/skel/AARCH64/Arch_Structs_B.thy +++ b/spec/design/skel/AARCH64/Arch_Structs_B.thy @@ -17,6 +17,8 @@ context Arch begin global_naming AARCH64_H #INCLUDE_HASKELL SEL4/Model/StateData/AARCH64.hs CONTEXT AARCH64_H ONLY ArmVSpaceRegionUse +#INCLUDE_HASKELL SEL4/API/Invocation/AARCH64.hs CONTEXT AARCH64_H ONLY FlushType + end end diff --git a/spec/design/skel/AARCH64/Hardware_H.thy b/spec/design/skel/AARCH64/Hardware_H.thy index 30791faa9c..15325c504a 100644 --- a/spec/design/skel/AARCH64/Hardware_H.thy +++ b/spec/design/skel/AARCH64/Hardware_H.thy @@ -23,7 +23,7 @@ context Arch begin global_naming AARCH64_H pptrUserTop kernelELFBase kernelELFBaseOffset kernelELFPAddrBase \ addrFromKPPtr ptTranslationBits vmFaultTypeFSR setVSpaceRoot \ setIRQTrigger \ - config_ARM_PA_SIZE_BITS_40 fpuThreadDeleteOp \ + config_ARM_PA_SIZE_BITS_40 fpuThreadDeleteOp isFpuEnable \ hcrVCPU hcrNative sctlrDefault vgicHCREN gicVCPUMaxNumLR sctlrEL1VM \ get_gic_vcpu_ctrl_hcr set_gic_vcpu_ctrl_hcr get_gic_vcpu_ctrl_vmcr \ set_gic_vcpu_ctrl_vmcr get_gic_vcpu_ctrl_apr set_gic_vcpu_ctrl_apr \ diff --git a/spec/design/skel/API_H.thy b/spec/design/skel/API_H.thy index b34917a871..459cda9e74 100644 --- a/spec/design/skel/API_H.thy +++ b/spec/design/skel/API_H.thy @@ -14,6 +14,6 @@ text \collects all API modules\ #INCLUDE_HASKELL SEL4.lhs decls_only NOT callKernel -#INCLUDE_HASKELL SEL4.lhs NOT kernelExitAssertions +#INCLUDE_HASKELL SEL4.lhs NOT kernelExitAssertions fastpathKernelAssertions end diff --git a/spec/design/skel/ARM/ArchPSpace_H.thy b/spec/design/skel/ARM/ArchPSpace_H.thy new file mode 100644 index 0000000000..af3737ffb7 --- /dev/null +++ b/spec/design/skel/ARM/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming ARM_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/ARM.hs + +end (* context Arch *) + +end diff --git a/spec/design/skel/ARM/ArchTypes_H.thy b/spec/design/skel/ARM/ArchTypes_H.thy index 04887e2c72..e82300474d 100644 --- a/spec/design/skel/ARM/ArchTypes_H.thy +++ b/spec/design/skel/ARM/ArchTypes_H.thy @@ -34,12 +34,12 @@ interpretation Arch . definition enum_object_type: "enum_class.enum \ map APIObjectType (enum_class.enum :: apiobject_type list) @ - [SmallPageObject, + [PageDirectoryObject, + SmallPageObject, LargePageObject, SectionObject, SuperSectionObject, - PageTableObject, - PageDirectoryObject + PageTableObject ]" definition diff --git a/spec/design/skel/ARM/RegisterSet_H.thy b/spec/design/skel/ARM/RegisterSet_H.thy index 7af02c03e3..28821559dc 100644 --- a/spec/design/skel/ARM/RegisterSet_H.thy +++ b/spec/design/skel/ARM/RegisterSet_H.thy @@ -9,14 +9,14 @@ chapter "Register Set" theory RegisterSet_H imports "Lib.HaskellLib_H" - MachineTypes + MachineOps begin context Arch begin global_naming ARM_H definition - newContext :: "register => machine_word" + newContext :: "user_context" where - "newContext \ (K 0) aLU initContext" + "newContext \ UserContext ((K 0) aLU initContext)" end end diff --git a/spec/design/skel/ARM_HYP/ArchPSpace_H.thy b/spec/design/skel/ARM_HYP/ArchPSpace_H.thy new file mode 100644 index 0000000000..38644c9b44 --- /dev/null +++ b/spec/design/skel/ARM_HYP/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming ARM_HYP_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/ARM.hs + +end (* context Arch *) + +end diff --git a/spec/design/skel/ARM_HYP/ArchTypes_H.thy b/spec/design/skel/ARM_HYP/ArchTypes_H.thy index 807b774830..b80c0e045e 100644 --- a/spec/design/skel/ARM_HYP/ArchTypes_H.thy +++ b/spec/design/skel/ARM_HYP/ArchTypes_H.thy @@ -34,12 +34,12 @@ interpretation Arch . definition enum_object_type: "enum_class.enum \ map APIObjectType (enum_class.enum :: apiobject_type list) @ - [SmallPageObject, + [PageDirectoryObject, + SmallPageObject, LargePageObject, SectionObject, SuperSectionObject, PageTableObject, - PageDirectoryObject, VCPUObject ]" diff --git a/spec/design/skel/ARM_HYP/RegisterSet_H.thy b/spec/design/skel/ARM_HYP/RegisterSet_H.thy index b3f2196a28..81ec35c669 100644 --- a/spec/design/skel/ARM_HYP/RegisterSet_H.thy +++ b/spec/design/skel/ARM_HYP/RegisterSet_H.thy @@ -9,14 +9,14 @@ chapter "Register Set" theory RegisterSet_H imports "Lib.HaskellLib_H" - MachineTypes + MachineOps begin context Arch begin global_naming ARM_HYP_H definition - newContext :: "register => machine_word" + newContext :: "user_context" where - "newContext \ (K 0) aLU initContext" + "newContext \ UserContext ((K 0) aLU initContext)" end end diff --git a/spec/design/skel/CSpace_H.thy b/spec/design/skel/CSpace_H.thy index b3063dacda..79c8c7f4b6 100644 --- a/spec/design/skel/CSpace_H.thy +++ b/spec/design/skel/CSpace_H.thy @@ -29,7 +29,8 @@ termination done defs - resolveAddressBits_decl_def [simp]: + resolveAddressBits_decl_def: "CSpaceDecls_H.resolveAddressBits \ resolveAddressBits" +declare resolveAddressBits_decl_def[simp] end diff --git a/spec/design/skel/KernelInit_H.thy b/spec/design/skel/KernelInit_H.thy index 490c64222a..063d5c90c3 100644 --- a/spec/design/skel/KernelInit_H.thy +++ b/spec/design/skel/KernelInit_H.thy @@ -65,7 +65,7 @@ newKernelState_def: ksDomSchedule = newKSDomSchedule, ksCurDomain = newKSCurDomain, ksDomainTime = newKSDomainTime, - ksReadyQueues = const [], + ksReadyQueues = const (TcbQueue None None), ksReadyQueuesL1Bitmap = const 0, ksReadyQueuesL2Bitmap = const 0, ksCurThread = error [], diff --git a/spec/design/skel/KernelStateData_H.thy b/spec/design/skel/KernelStateData_H.thy index 82a0ab755c..abb803e422 100644 --- a/spec/design/skel/KernelStateData_H.thy +++ b/spec/design/skel/KernelStateData_H.thy @@ -30,9 +30,7 @@ requalify_types (in Arch) subsection "The Kernel State" -type_synonym ready_queue = "machine_word list" -translations -(type) "machine_word list" <= (type) "ready_queue" +type_synonym ready_queue = tcb_queue text \We pull a fast one on haskell here ... although Haskell expects a KernelMonad which is a StateT monad in KernelData that wraps a MachineMonad, @@ -85,7 +83,7 @@ where return r od" -#INCLUDE_HASKELL SEL4/Model/StateData.lhs NOT doMachineOp KernelState ReadyQueue Kernel assert stateAssert findM funArray newKernelState capHasProperty -#INCLUDE_HASKELL SEL4/Model/StateData.lhs decls_only ONLY capHasProperty +#INCLUDE_HASKELL SEL4/Model/StateData.lhs decls_only ONLY capHasProperty ksReadyQueues_asrt ready_qs_runnable idleThreadNotQueued +#INCLUDE_HASKELL SEL4/Model/StateData.lhs NOT doMachineOp KernelState ReadyQueue Kernel assert stateAssert findM funArray newKernelState capHasProperty ksReadyQueues_asrt ready_qs_runnable idleThreadNotQueued end diff --git a/spec/design/skel/PSpaceFuns_H.thy b/spec/design/skel/PSpaceFuns_H.thy index 81b5db6334..fcc0abdb39 100644 --- a/spec/design/skel/PSpaceFuns_H.thy +++ b/spec/design/skel/PSpaceFuns_H.thy @@ -10,6 +10,7 @@ theory PSpaceFuns_H imports ObjectInstances_H FaultMonad_H + ArchPSpace_H "Lib.DataMap" begin @@ -22,6 +23,9 @@ requalify_consts loadWord end +requalify_consts (in Arch) + deleteGhost + definition deleteRange :: "( machine_word , 'a ) DataMap.map \ machine_word \ nat \ ( machine_word , 'a ) DataMap.map" where "deleteRange m ptr bits \ let inRange = (\ x. x && ((- mask bits) - 1) = fromPPtr ptr) in @@ -32,6 +36,6 @@ where "deleteRange m ptr bits \ consts lookupAround2 :: "('k :: {linorder,finite}) \ ( 'k , 'a ) DataMap.map \ (('k * 'a) option * 'k option)" -#INCLUDE_HASKELL SEL4/Model/PSpace.lhs bodies_only Data.Map=DataMap NOT PSpace ptrBits ptrBitsForSize lookupAround maybeToMonad typeError alignError alignCheck sizeCheck objBits deletionIsSafe cNodePartialOverlap pointerInUserData ksASIDMapSafe deleteRange +#INCLUDE_HASKELL SEL4/Model/PSpace.lhs bodies_only Data.Map=DataMap NOT PSpace ptrBits ptrBitsForSize lookupAround maybeToMonad typeError alignError alignCheck sizeCheck objBits deletionIsSafe deletionIsSafe_delete_locale cNodePartialOverlap pointerInUserData ksASIDMapSafe deleteRange end diff --git a/spec/design/skel/RISCV64/ArchPSpace_H.thy b/spec/design/skel/RISCV64/ArchPSpace_H.thy new file mode 100644 index 0000000000..d692e99c12 --- /dev/null +++ b/spec/design/skel/RISCV64/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming RISCV64_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/RISCV64.hs + +end (* context Arch *) + +end diff --git a/spec/design/skel/RISCV64/Hardware_H.thy b/spec/design/skel/RISCV64/Hardware_H.thy index 2abd4d7ff9..9c3bcd2a9c 100644 --- a/spec/design/skel/RISCV64/Hardware_H.thy +++ b/spec/design/skel/RISCV64/Hardware_H.thy @@ -12,7 +12,7 @@ begin context Arch begin global_naming RISCV64_H -#INCLUDE_HASKELL SEL4/Machine/Hardware/RISCV64.hs Platform=Platform.RISCV64 CONTEXT RISCV64_H NOT plic_complete_claim getMemoryRegions getDeviceRegions getKernelDevices loadWord storeWord storeWordVM getActiveIRQ ackInterrupt maskInterrupt configureTimer resetTimer debugPrint getRestartPC setNextPC clearMemory clearMemoryVM initMemory freeMemory setHardwareASID wordFromPDE wordFromPTE VMFaultType HypFaultType VMPageSize pageBits pageBitsForSize toPAddr addrFromPPtr ptrFromPAddr sfence physBase paddrBase pptrBase pptrBaseOffset pptrUserTop kernelELFBase kernelELFBaseOffset kernelELFPAddrBase addrFromKPPtr ptTranslationBits vmFaultTypeFSR read_stval setVSpaceRoot hwASIDFlush setIRQTrigger +#INCLUDE_HASKELL SEL4/Machine/Hardware/RISCV64.hs Platform=Platform.RISCV64 CONTEXT RISCV64_H NOT plic_complete_claim getMemoryRegions getDeviceRegions getKernelDevices loadWord storeWord storeWordVM getActiveIRQ ackInterrupt maskInterrupt configureTimer resetTimer debugPrint getRestartPC setNextPC clearMemory clearMemoryVM initMemory freeMemory setHardwareASID wordFromPDE wordFromPTE VMFaultType HypFaultType VMPageSize pageBits pageBitsForSize toPAddr addrFromPPtr ptrFromPAddr sfence physBase paddrBase pptrBase pptrBaseOffset pptrTop pptrUserTop kernelELFBase kernelELFBaseOffset kernelELFPAddrBase addrFromKPPtr ptTranslationBits vmFaultTypeFSR read_stval setVSpaceRoot hwASIDFlush setIRQTrigger end diff --git a/spec/design/skel/Untyped_H.thy b/spec/design/skel/Untyped_H.thy index 44a6b16d77..ee629301bd 100644 --- a/spec/design/skel/Untyped_H.thy +++ b/spec/design/skel/Untyped_H.thy @@ -28,6 +28,8 @@ end consts cNodeOverlap :: "(machine_word \ nat option) \ (machine_word \ bool) \ bool" -#INCLUDE_HASKELL SEL4/Object/Untyped.lhs NOT cNodeOverlap canonicalAddressAssert +#INCLUDE_HASKELL SEL4/Object/Untyped.lhs decls_only ONLY archOverlap + +#INCLUDE_HASKELL SEL4/Object/Untyped.lhs NOT cNodeOverlap canonicalAddressAssert archOverlap end diff --git a/spec/design/skel/X64/ArchPSpace_H.thy b/spec/design/skel/X64/ArchPSpace_H.thy new file mode 100644 index 0000000000..0c9b7e8c3e --- /dev/null +++ b/spec/design/skel/X64/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming X64_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/X64.hs + +end (* context Arch *) + +end diff --git a/spec/haskell/Makefile b/spec/haskell/Makefile index b53a735561..39bce7e218 100644 --- a/spec/haskell/Makefile +++ b/spec/haskell/Makefile @@ -31,6 +31,13 @@ GHC_DEV_OPTS=--ghc-options="" all: build-aarch64 build-riscv build-arm build-arm-hyp-nosmmu build-x64 +# build targets by L4V_ARCH: +ARM: build-arm +ARM_HYP: build-arm-hyp-nosmmu +X64: build-x64 +RISCV64: build-riscv +AARCH64: build-aarch64 + sandbox: .stack-work build-arm: sandbox $(BOOT_FILES) diff --git a/spec/haskell/SEL4.cabal b/spec/haskell/SEL4.cabal index 9347acc987..a23531ee62 100644 --- a/spec/haskell/SEL4.cabal +++ b/spec/haskell/SEL4.cabal @@ -12,13 +12,13 @@ build-type: Custom license: GPL-2.0-only author: Philip Derrin et. al., NICTA synopsis: Executable specification for the seL4 Kernel -tested-with: GHC == 9.0.2 +tested-with: GHC == 9.2.8 homepage: http://sel4.systems/ custom-setup setup-depends: - base == 4.15.*, - Cabal == 3.4.1.0 + base == 4.16.*, + Cabal == 3.6.3.* Flag FFI description: Include the C language bindings @@ -47,7 +47,7 @@ Flag ArchAArch64 Library exposed-modules: SEL4 SEL4.Machine.Target - build-depends: mtl==2.2.*, base==4.15.*, array, containers, transformers + build-depends: mtl==2.2.*, base==4.16.*, array, containers, transformers if flag(FFI) -- FFIBindings currently relies on POSIX signal handlers. This could @@ -124,6 +124,7 @@ Library SEL4.Object.Instances.ARM SEL4.Object.TCB.ARM SEL4.Model.StateData.ARM + SEL4.Model.PSpace.ARM SEL4.Machine.RegisterSet.ARM SEL4.Machine.Hardware.ARM @@ -147,6 +148,7 @@ Library SEL4.Object.VCPU.ARM SEL4.Object.TCB.ARM SEL4.Model.StateData.ARM + SEL4.Model.PSpace.ARM SEL4.Machine.RegisterSet.ARM SEL4.Machine.Hardware.ARM @@ -168,6 +170,7 @@ Library SEL4.Object.IOPort.X64 SEL4.Object.TCB.X64 SEL4.Model.StateData.X64 + SEL4.Model.PSpace.X64 SEL4.Machine.RegisterSet.X64 SEL4.Machine.Hardware.X64 @@ -189,6 +192,7 @@ Library SEL4.Object.Instances.RISCV64 SEL4.Object.TCB.RISCV64 SEL4.Model.StateData.RISCV64 + SEL4.Model.PSpace.RISCV64 SEL4.Machine.RegisterSet.RISCV64 SEL4.Machine.Hardware.RISCV64 @@ -211,6 +215,7 @@ Library SEL4.Object.VCPU.AARCH64 SEL4.Object.TCB.AARCH64 SEL4.Model.StateData.AARCH64 + SEL4.Model.PSpace.AARCH64 SEL4.Machine.RegisterSet.AARCH64 SEL4.Machine.Hardware.AARCH64 @@ -224,6 +229,8 @@ Library -fno-warn-unrecognised-pragmas -fno-warn-unused-binds -fno-warn-unused-imports -fno-warn-unused-matches + -fno-warn-incomplete-record-updates + -fno-warn-incomplete-uni-patterns cpp-options: -- set via Setup.hs hook diff --git a/spec/haskell/src/Data/WordLib.lhs b/spec/haskell/src/Data/WordLib.lhs index 9d583c248a..8d2afca7e0 100644 --- a/spec/haskell/src/Data/WordLib.lhs +++ b/spec/haskell/src/Data/WordLib.lhs @@ -1,4 +1,5 @@ % +% Copyright 2023, Proofcraft Pty Ltd % Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) % % SPDX-License-Identifier: GPL-2.0-only @@ -17,6 +18,7 @@ > -- * Bytes required to store a word > -- * Selecting one of two alternatives depending on the size of the machine word > -- (32 or 64 bits) +> -- * Counting the number of trailing zeros in a word > > wordBits :: Int > wordBits = finiteBitSize (undefined::Word) @@ -32,4 +34,7 @@ > > wordRadix :: Int > wordRadix = wordSizeCase 5 6 - +> +> countTrailingZeros :: (Bits b, FiniteBits b) => b -> Int +> countTrailingZeros w = +> length . takeWhile not . map (testBit w) $ [0 .. finiteBitSize w - 1] diff --git a/spec/haskell/src/SEL4.lhs b/spec/haskell/src/SEL4.lhs index 4d180033a7..a1dbefadd6 100644 --- a/spec/haskell/src/SEL4.lhs +++ b/spec/haskell/src/SEL4.lhs @@ -39,6 +39,7 @@ faults, and system calls; the set of possible events is defined in > callKernel :: Event -> Kernel () > callKernel ev = do +> stateAssert fastpathKernelAssertions "Fast path assertions must hold" > runExceptT $ handleEvent ev > `catchError` (\_ -> withoutPreemption $ do > irq <- doMachineOp (getActiveIRQ True) @@ -51,3 +52,12 @@ This will be replaced by actual assertions in the proofs: > kernelExitAssertions :: KernelState -> Bool > kernelExitAssertions _ = True + +During refinement proofs, abstract invariants are used to show properties on the +design spec without corresponding invariants on the concrete level. Since the +fast path proofs do not have access to the abstract invariant level nor the +state relation, any extra properties need to be crossed over via this assertion. +This will be replaced by actual assertions in the proofs. + +> fastpathKernelAssertions :: KernelState -> Bool +> fastpathKernelAssertions _ = True diff --git a/spec/haskell/src/SEL4/API/InvocationLabels/AARCH64.hs b/spec/haskell/src/SEL4/API/InvocationLabels/AARCH64.hs index 9ec124ac0e..1b3ee21898 100644 --- a/spec/haskell/src/SEL4/API/InvocationLabels/AARCH64.hs +++ b/spec/haskell/src/SEL4/API/InvocationLabels/AARCH64.hs @@ -16,6 +16,7 @@ data ArchInvocationLabel | ARMVSpaceInvalidate_Data | ARMVSpaceCleanInvalidate_Data | ARMVSpaceUnify_Instruction + | ARMSMCCall | ARMPageTableMap | ARMPageTableUnmap | ARMPageMap diff --git a/spec/haskell/src/SEL4/Kernel/Hypervisor/AARCH64.hs b/spec/haskell/src/SEL4/Kernel/Hypervisor/AARCH64.hs index c945a08ee1..cd9e2790d7 100644 --- a/spec/haskell/src/SEL4/Kernel/Hypervisor/AARCH64.hs +++ b/spec/haskell/src/SEL4/Kernel/Hypervisor/AARCH64.hs @@ -7,14 +7,25 @@ module SEL4.Kernel.Hypervisor.AARCH64 where -import SEL4.Machine (PPtr(..)) +import SEL4.Machine (PPtr(..), mask) import SEL4.Model import SEL4.Object.Structures import SEL4.API.Failures import SEL4.Kernel.FaultHandler import SEL4.API.Failures.AARCH64 -import SEL4.Machine.Hardware.AARCH64 (HypFaultType(..)) +import SEL4.Machine.Hardware.AARCH64 (HypFaultType(..),isFpuEnable,getESR) +import Data.Bits handleHypervisorFault :: PPtr TCB -> HypFaultType -> Kernel () handleHypervisorFault thread (ARMVCPUFault hsr) = do - handleFault thread (ArchFault $ VCPUFault $ fromIntegral hsr) + fpu_enabled <- doMachineOp isFpuEnable + -- the C code makes extra checks on hsr to get to this branch, but the + -- handling of lazy FPU switching is out of scope of verification at this + -- time, so we omit the entire branch where an FPU fault could occur + if not fpu_enabled + then error "Lazy FPU switch is outside of current verification scope" + else if hsr == 0x2000000 -- UNKNOWN_FAULT + then do + esr <- doMachineOp getESR + handleFault thread (UserException (esr .&. mask 32) 0) + else handleFault thread (ArchFault $ VCPUFault $ fromIntegral hsr) diff --git a/spec/haskell/src/SEL4/Kernel/Thread.lhs b/spec/haskell/src/SEL4/Kernel/Thread.lhs index 0a0fcff881..e3215bd567 100644 --- a/spec/haskell/src/SEL4/Kernel/Thread.lhs +++ b/spec/haskell/src/SEL4/Kernel/Thread.lhs @@ -19,7 +19,7 @@ We use the C preprocessor to select a target architecture. \begin{impdetails} % {-# BOOT-IMPORTS: SEL4.Model SEL4.Machine SEL4.Object.Structures SEL4.Object.Instances() SEL4.API.Types #-} -% {-# BOOT-EXPORTS: setDomain setMCPriority setPriority getThreadState setThreadState setBoundNotification getBoundNotification doIPCTransfer isRunnable restart suspend doReplyTransfer tcbSchedEnqueue tcbSchedDequeue rescheduleRequired timerTick possibleSwitchTo #-} +% {-# BOOT-EXPORTS: setDomain setMCPriority setPriority getThreadState setThreadState setBoundNotification getBoundNotification doIPCTransfer isRunnable restart suspend doReplyTransfer tcbSchedEnqueue tcbSchedDequeue rescheduleRequired timerTick possibleSwitchTo tcbQueueEmpty tcbQueuePrepend tcbQueueAppend tcbQueueInsert tcbQueueRemove #-} > import Prelude hiding (Word) > import SEL4.Config @@ -35,6 +35,7 @@ We use the C preprocessor to select a target architecture. > import Data.Bits hiding (countLeadingZeros) > import Data.Array > import Data.WordLib +> import Data.Maybe(fromJust, isJust) \end{impdetails} @@ -405,13 +406,15 @@ Note also that the level 2 bitmap array is stored in reverse in order to get bet > chooseThread :: Kernel () > chooseThread = do +> stateAssert ksReadyQueues_asrt "" +> stateAssert ready_qs_runnable "threads in the ready queues are runnable'" > curdom <- if numDomains > 1 then curDomain else return 0 > l1 <- getReadyQueuesL1Bitmap curdom > if l1 /= 0 > then do > prio <- getHighestPrio curdom > queue <- getQueue curdom prio -> let thread = head queue +> let thread = fromJust $ tcbQueueHead queue > runnable <- isRunnable thread > assert runnable "Scheduled a non-runnable thread" > switchToThread thread @@ -424,6 +427,10 @@ To switch to a new thread, we call the architecture-specific thread switch funct > switchToThread :: PPtr TCB -> Kernel () > switchToThread thread = do +> runnable <- isRunnable thread +> assert runnable "thread must be runnable" +> stateAssert ksReadyQueues_asrt "" +> stateAssert ready_qs_runnable "threads in the ready queues are runnable'" > Arch.switchToThread thread > tcbSchedDequeue thread > setCurThread thread @@ -432,6 +439,7 @@ Switching to the idle thread is similar, except that we call a different archite > switchToIdleThread :: Kernel () > switchToIdleThread = do +> stateAssert ready_qs_runnable "threads in the ready queues are runnable'" > thread <- getIdleThread > Arch.switchToIdleThread > setCurThread thread @@ -597,41 +605,135 @@ The following two functions place a thread at the beginning or end of its priori > modifyReadyQueuesL1Bitmap tdom $ > (\w -> w .&. (complement $ bit l1index)) +> tcbQueueEmpty :: TcbQueue -> Bool +> tcbQueueEmpty queue = tcbQueueHead queue == Nothing + +> tcbQueuePrepend :: TcbQueue -> PPtr TCB -> Kernel TcbQueue +> tcbQueuePrepend queue tcbPtr = do +> q <- if tcbQueueEmpty queue +> then return $ queue { tcbQueueEnd = Just tcbPtr } +> else do +> threadSet (\t -> t { tcbSchedNext = tcbQueueHead queue }) tcbPtr +> threadSet (\t -> t { tcbSchedPrev = Just tcbPtr }) (fromJust $ tcbQueueHead queue) +> return $ queue + +> return $ q { tcbQueueHead = Just tcbPtr} + +> tcbQueueAppend :: TcbQueue -> PPtr TCB -> Kernel TcbQueue +> tcbQueueAppend queue tcbPtr = do +> q <- if tcbQueueEmpty queue +> then return $ queue { tcbQueueHead = Just tcbPtr } +> else do +> threadSet (\t -> t { tcbSchedPrev = tcbQueueEnd queue }) tcbPtr +> threadSet (\t -> t { tcbSchedNext = Just tcbPtr }) (fromJust $ tcbQueueEnd queue) +> return $ queue + +> return $ q { tcbQueueEnd = Just tcbPtr} + +Insert a thread into the middle of a queue, immediately before afterPtr, where afterPtr is not the head of the queue + +> tcbQueueInsert :: PPtr TCB -> PPtr TCB -> Kernel () +> tcbQueueInsert tcbPtr afterPtr = do +> tcb <- getObject afterPtr +> beforePtrOpt <- return $ tcbSchedPrev tcb +> assert (beforePtrOpt /= Nothing) "afterPtr must not be the head of the list" +> beforePtr <- return $ fromJust beforePtrOpt +> assert (beforePtr /= afterPtr) "the tcbSchedPrev pointer of a TCB must never point to itself" + +> threadSet (\t -> t { tcbSchedPrev = Just beforePtr }) tcbPtr +> threadSet (\t -> t { tcbSchedNext = Just afterPtr}) tcbPtr +> threadSet (\t -> t { tcbSchedPrev = Just tcbPtr }) afterPtr +> threadSet (\t -> t { tcbSchedNext = Just tcbPtr }) beforePtr + +Remove a thread from a queue, which must originally contain the thread + +> tcbQueueRemove :: TcbQueue -> PPtr TCB -> Kernel TcbQueue +> tcbQueueRemove queue tcbPtr = do +> tcb <- getObject tcbPtr +> beforePtrOpt <- return $ tcbSchedPrev tcb +> afterPtrOpt <- return $ tcbSchedNext tcb + +> if tcbQueueHead queue == Just tcbPtr && tcbQueueEnd queue == Just tcbPtr + +The queue is the singleton containing tcbPtr + +> then return $ TcbQueue { tcbQueueHead = Nothing, tcbQueueEnd = Nothing } +> else +> if tcbQueueHead queue == Just tcbPtr + +tcbPtr is the head of the queue + +> then do +> assert (afterPtrOpt /= Nothing) "the queue is not a singleton" +> threadSet (\t -> t { tcbSchedPrev = Nothing }) (fromJust $ afterPtrOpt) +> threadSet (\t -> t { tcbSchedNext = Nothing }) tcbPtr +> return $ queue { tcbQueueHead = afterPtrOpt } +> else +> if tcbQueueEnd queue == Just tcbPtr + +tcbPtr is the end of the queue + +> then do +> assert (beforePtrOpt /= Nothing) "the queue is not a singleton" +> threadSet (\t -> t { tcbSchedNext = Nothing }) (fromJust $ beforePtrOpt) +> threadSet (\t -> t { tcbSchedPrev = Nothing }) tcbPtr +> return $ queue { tcbQueueEnd = beforePtrOpt } +> else do + +tcbPtr is in the middle of the queue + +> assert (afterPtrOpt /= Nothing) "the queue is not a singleton" +> assert (beforePtrOpt /= Nothing) "the queue is not a singleton" +> threadSet (\t -> t { tcbSchedNext = afterPtrOpt }) (fromJust $ beforePtrOpt) +> threadSet (\t -> t { tcbSchedPrev = beforePtrOpt }) (fromJust $ afterPtrOpt) +> threadSet (\t -> t { tcbSchedPrev = Nothing }) tcbPtr +> threadSet (\t -> t { tcbSchedNext = Nothing }) tcbPtr +> return queue + > tcbSchedEnqueue :: PPtr TCB -> Kernel () > tcbSchedEnqueue thread = do +> stateAssert ksReadyQueues_asrt "" +> runnable <- isRunnable thread +> assert runnable "thread must be runnable" > queued <- threadGet tcbQueued thread > unless queued $ do > tdom <- threadGet tcbDomain thread > prio <- threadGet tcbPriority thread > queue <- getQueue tdom prio -> setQueue tdom prio $ thread : queue -> when (null queue) $ addToBitmap tdom prio +> when (tcbQueueEmpty queue) $ addToBitmap tdom prio +> queue' <- tcbQueuePrepend queue thread +> setQueue tdom prio queue' > threadSet (\t -> t { tcbQueued = True }) thread > tcbSchedAppend :: PPtr TCB -> Kernel () > tcbSchedAppend thread = do +> stateAssert ksReadyQueues_asrt "" +> runnable <- isRunnable thread +> assert runnable "thread must be runnable" > queued <- threadGet tcbQueued thread > unless queued $ do > tdom <- threadGet tcbDomain thread > prio <- threadGet tcbPriority thread > queue <- getQueue tdom prio -> setQueue tdom prio $ queue ++ [thread] -> when (null queue) $ addToBitmap tdom prio +> when (tcbQueueEmpty queue) $ addToBitmap tdom prio +> queue' <- tcbQueueAppend queue thread +> setQueue tdom prio queue' > threadSet (\t -> t { tcbQueued = True }) thread The following function dequeues a thread, if it is queued. > tcbSchedDequeue :: PPtr TCB -> Kernel () > tcbSchedDequeue thread = do +> stateAssert ksReadyQueues_asrt "" > queued <- threadGet tcbQueued thread > when queued $ do > tdom <- threadGet tcbDomain thread > prio <- threadGet tcbPriority thread > queue <- getQueue tdom prio -> let queue' = filter (/=thread) queue +> queue' <- tcbQueueRemove queue thread > setQueue tdom prio queue' -> when (null queue') $ removeFromBitmap tdom prio > threadSet (\t -> t { tcbQueued = False }) thread +> when (tcbQueueEmpty queue') $ removeFromBitmap tdom prio \subsubsection{Timer Ticks} diff --git a/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs b/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs index db1af5d659..d30d90e5af 100644 --- a/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs +++ b/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs @@ -68,7 +68,7 @@ lookupIPCBuffer isReceiver thread = do {- ASID Lookups -} --- FIXME AARCH64: make this a Reader Monad +-- FIXME: make this a Reader Monad when we move to MCS getPoolPtr :: ASID -> Kernel (Maybe (PPtr ASIDPool)) getPoolPtr asid = do assert (asid > 0) "ASID 0 is used for objects that are not mapped" @@ -76,7 +76,7 @@ getPoolPtr asid = do asidTable <- gets (armKSASIDTable . ksArchState) return $ asidTable!(asidHighBitsOf asid) --- FIXME AARCH64: make this a Reader Monad +-- FIXME: make this a Reader Monad when we move to MCS getASIDPoolEntry :: ASID -> Kernel (Maybe ASIDPoolEntry) getASIDPoolEntry asid = do poolPtr <- getPoolPtr asid @@ -104,7 +104,7 @@ findVSpaceForASID asid = do case maybeEntry of Just (ASIDPoolVSpace vmID ptr) -> do assert (ptr /= 0) "findVSpaceForASID: found null PD" - withoutFailure $ checkPTAt ptr + withoutFailure $ checkPTAt VSRootPT_T ptr return ptr _ -> throw $ InvalidRoot @@ -112,9 +112,9 @@ maybeVSpaceForASID :: ASID -> Kernel (Maybe (PPtr PTE)) maybeVSpaceForASID asid = liftM Just (findVSpaceForASID asid) `catchFailure` const (return Nothing) --- used in proofs only, will be translated to ptable_at. -checkPTAt :: PPtr PTE -> Kernel () -checkPTAt _ = return () +-- used in proofs only, will be translated to ptable_at + ghost state type. +checkPTAt :: PT_Type -> PPtr PTE -> Kernel () +checkPTAt _ _ = return () {- Locating Page Table Slots -} @@ -128,7 +128,8 @@ isPagePTE (PagePTE {}) = True isPagePTE _ = False getPPtrFromPTE :: PTE -> PPtr PTE -getPPtrFromPTE pte = ptrFromPAddr $ pteBaseAddress pte +getPPtrFromPTE pte = + ptrFromPAddr (if isPagePTE pte then pteBaseAddress pte else ptePPN pte `shiftL` pageBits) -- how many bits there are left to be translated at a given level (0 = bottom -- level). This counts the bits being translated by the levels below the current one, so @@ -172,7 +173,7 @@ lookupPTSlotFromLevel level ptPtr vPtr = do pte <- pteAtIndex level ptPtr vPtr if isPageTablePTE pte then do - checkPTAt (getPPtrFromPTE pte) + checkPTAt NormalPT_T (getPPtrFromPTE pte) lookupPTSlotFromLevel (level-1) (getPPtrFromPTE pte) vPtr else return (ptBitsLeft level, ptSlotIndex level ptPtr vPtr) @@ -180,14 +181,20 @@ lookupPTSlotFromLevel level ptPtr vPtr = do -- a given virtual address, together with the number of bits left to translate, -- indicating the size of the frame. lookupPTSlot :: PPtr PTE -> VPtr -> Kernel (Int, PPtr PTE) -lookupPTSlot = lookupPTSlotFromLevel maxPTLevel +lookupPTSlot pt vptr = do + checkPTAt VSRootPT_T pt + lookupPTSlotFromLevel maxPTLevel pt vptr lookupFrame :: PPtr PTE -> VPtr -> Kernel (Maybe (Int, PAddr)) lookupFrame vspaceRoot vPtr = do (bitsLeft, ptePtr) <- lookupPTSlot vspaceRoot vPtr pte <- getObject ptePtr if isPagePTE pte - then return $ Just (bitsLeft, pteBaseAddress pte) + then do + let baseAddr = pteBaseAddress pte + assert (fromPAddr baseAddr .&. mask bitsLeft == 0) + "frame address must be aligned" + return $ Just (bitsLeft, baseAddr) else return Nothing {- Page Table Modification -} @@ -197,7 +204,7 @@ lookupFrame vspaceRoot vPtr = do handleVMFault :: PPtr TCB -> VMFaultType -> KernelF Fault () handleVMFault _ ARMDataAbort = do addr <- withoutFailure $ doMachineOp getFAR - fault <- withoutFailure $ doMachineOp getDFSR + fault <- withoutFailure $ doMachineOp getESR active <- withoutFailure $ curVCPUActive addr <- if active then do @@ -212,7 +219,7 @@ handleVMFault _ ARMDataAbort = do handleVMFault thread ARMPrefetchAbort = do pc <- withoutFailure $ asUser thread $ getRestartPC - fault <- withoutFailure $ doMachineOp getIFSR + fault <- withoutFailure $ doMachineOp getESR active <- withoutFailure $ curVCPUActive pc <- if active then do @@ -256,7 +263,6 @@ doFlush flushType vstart vend pstart = cleanCacheRange_PoU vstart vend pstart dsb invalidateCacheRange_I vstart vend pstart - branchFlushRange vstart vend pstart isb {- Unmapping and Deletion -} @@ -299,6 +305,8 @@ deleteASID asid pt = do when (maybeRoot == Just pt) $ do invalidateTLBByASID asid invalidateASIDEntry asid + -- re-read pool, because invalidateASIDEntry changes it + ASIDPool pool <- getObject poolPtr let pool' = pool//[(asid .&. mask asidLowBits, Nothing)] setObject poolPtr $ ASIDPool pool' tcb <- getCurThread @@ -322,7 +330,7 @@ lookupPTFromLevel level ptPtr vPtr targetPtPtr = do if ptr == targetPtPtr then return slot else do - withoutFailure $ checkPTAt ptr + withoutFailure $ checkPTAt NormalPT_T ptr lookupPTFromLevel (level-1) ptr vPtr targetPtPtr unmapPageTable :: ASID -> VPtr -> PPtr PTE -> Kernel () @@ -382,6 +390,9 @@ setVMRoot tcb = do capPTBasePtr = vspaceRoot }) -> do vspaceRoot' <- findVSpaceForASID asid when (vspaceRoot /= vspaceRoot') $ throw InvalidRoot + assert (fromVPtr pptrBase <= fromPPtr vspaceRoot && + fromPPtr vspaceRoot < fromVPtr pptrTop) + "vspaceRoot must be in kernel window" withoutFailure $ armContextSwitch vspaceRoot asid _ -> throw InvalidRoot) (\_ -> setGlobalUserVSpace) @@ -404,7 +415,7 @@ loadVMID asid = do maybeEntry <- getASIDPoolEntry asid case maybeEntry of Just (ASIDPoolVSpace vmID ptr) -> return vmID - _ -> error ("loadVMID: no entry for asid") + _ -> fail "loadVMID: no entry for asid" invalidateASID :: ASID -> Kernel () invalidateASID = updateASIDPoolEntry (\entry -> Just $ entry { apVMID = Nothing }) @@ -459,24 +470,20 @@ isVTableRoot :: Capability -> Bool isVTableRoot (ArchObjectCap (PageTableCap { capPTType = VSRootPT_T })) = True isVTableRoot _ = False --- FIXME AARCH64: name indirection kept here for sync with C; both (C and --- Haskell) should define isValidVTableRoot directly -isValidNativeRoot :: Capability -> Bool -isValidNativeRoot cap = isVTableRoot cap && isJust (capPTMappedAddress (capCap cap)) +isValidVTableRoot :: Capability -> Bool +isValidVTableRoot cap = isVTableRoot cap && isJust (capPTMappedAddress (capCap cap)) --- if isValidNativeRoot holds, return VSpace and ASID, otherwise throw error +-- if isValidVTableRoot holds, return VSpace and ASID, otherwise throw error checkVSpaceRoot :: Capability -> Int -> KernelF SyscallError (PPtr PTE, ASID) checkVSpaceRoot vspaceCap argNo = case vspaceCap of ArchObjectCap (PageTableCap { + capPTType = VSRootPT_T, capPTMappedAddress = Just (asid, _), capPTBasePtr = vspace }) -> return (vspace, asid) _ -> throw $ InvalidCapability argNo -isValidVTableRoot :: Capability -> Bool -isValidVTableRoot = isValidNativeRoot - checkValidIPCBuffer :: VPtr -> Capability -> KernelF SyscallError () checkValidIPCBuffer vptr (ArchObjectCap (FrameCap {capFIsDevice = False})) = do when (vptr .&. mask ipcBufferSizeBits /= 0) $ throw AlignmentError @@ -537,6 +544,10 @@ decodeARMFrameInvocationMap cte cap vptr rightsMask attr vspaceCap = do let attributes = attribsFromWord attr let frameSize = capFSize cap let vmRights = maskVMRights (capFVMRights cap) $ rightsFromWord rightsMask + let basePtr = capFBasePtr cap + assert (fromVPtr pptrBase <= fromPPtr basePtr && + fromPPtr basePtr < fromVPtr pptrTop) + "cap ptr must be in kernel window" (vspace,asid) <- checkVSpaceRoot vspaceCap 1 vspaceCheck <- lookupErrorOnFailure False $ findVSpaceForASID asid when (vspaceCheck /= vspace) $ throw $ InvalidCapability 1 @@ -544,14 +555,14 @@ decodeARMFrameInvocationMap cte cap vptr rightsMask attr vspaceCap = do let pgBits = pageBitsForSize frameSize case capFMappedAddress cap of Just (asid', vaddr') -> do - when (asid' /= asid) $ throw $ InvalidCapability 0 - when (vaddr' /= vptr) $ throw $ InvalidArgument 2 + when (asid' /= asid) $ throw $ InvalidCapability 1 + when (vaddr' /= vptr) $ throw $ InvalidArgument 0 Nothing -> do let vtop = vptr + (bit pgBits - 1) when (vtop > pptrUserTop) $ throw $ InvalidArgument 0 (bitsLeft, slot) <- withoutFailure $ lookupPTSlot vspace vptr unless (bitsLeft == pgBits) $ throw $ FailedLookup False $ MissingCapability bitsLeft - let base = addrFromPPtr (capFBasePtr cap) + let base = addrFromPPtr basePtr return $ InvokePage $ PageMap { pageMapCap = cap { capFMappedAddress = Just (asid,vptr) }, pageMapCTSlot = cte, @@ -616,8 +627,11 @@ decodeARMPageTableInvocationMap cte cap vptr attr vspaceCap = do (bitsLeft, slot) <- withoutFailure $ lookupPTSlot vspace vptr oldPTE <- withoutFailure $ getObject slot when (bitsLeft == pageBits || oldPTE /= InvalidPTE) $ throw DeleteFirst + assert (fromVPtr pptrBase <= fromPPtr (capPTBasePtr cap) && + fromPPtr (capPTBasePtr cap) < fromVPtr pptrTop) + "cap ptr must be in kernel window" let pte = PageTablePTE { - pteBaseAddress = addrFromPPtr (capPTBasePtr cap) } + ptePPN = addrFromPPtr (capPTBasePtr cap) `shiftR` pageBits } let vptr = vptr .&. complement (mask bitsLeft) return $ InvokePageTable $ PageTableMap { ptMapCap = ArchObjectCap $ cap { capPTMappedAddress = Just (asid, vptr) }, @@ -716,7 +730,8 @@ decodeARMASIDPoolInvocation label cap@(ASIDPoolCap {}) extraCaps = case vspaceCap of ArchObjectCap (PageTableCap { capPTMappedAddress = Nothing }) -> do - when (not (isVTableRoot vspaceCap) || isJust (capPTMappedAddress cap)) $ + -- C checks for a mapping here, but our case already checks that + when (not (isVTableRoot vspaceCap)) $ throw $ InvalidCapability 1 asidTable <- withoutFailure $ gets (armKSASIDTable . ksArchState) let base = capASIDBase cap @@ -779,12 +794,16 @@ performPageTableInvocation (PageTableUnmap cap slot) = do unmapPageTable asid vaddr ptr let slots = [ptr, ptr + bit pteBits .. ptr + bit (ptBits (capPTType cap)) - 1] mapM_ (flip storePTE InvalidPTE) slots + doMachineOp $ + cleanCacheRange_PoU (VPtr $ fromPPtr $ ptr) + (VPtr $ fromPPtr $ (ptr + bit (ptBits (capPTType cap)) - 1)) + (addrFromPPtr ptr) _ -> return () ArchObjectCap cap <- getSlotCap slot updateCap slot (ArchObjectCap $ cap { capPTMappedAddress = Nothing }) -performPageInvocation :: PageInvocation -> Kernel () +performPageInvocation :: PageInvocation -> Kernel [Word] performPageInvocation (PageMap cap ctSlot (pte,slot)) = do oldPte <- getObject slot let tlbFlushRequired = oldPte /= InvalidPTE @@ -794,6 +813,7 @@ performPageInvocation (PageMap cap ctSlot (pte,slot)) = do when tlbFlushRequired $ do (asid, vaddr) <- return $ fromJust $ capFMappedAddress cap invalidateTLBByASIDVA asid vaddr + return [] performPageInvocation (PageUnmap cap ctSlot) = do case capFMappedAddress cap of @@ -801,23 +821,17 @@ performPageInvocation (PageUnmap cap ctSlot) = do _ -> return () ArchObjectCap cap <- getSlotCap ctSlot updateCap ctSlot (ArchObjectCap $ cap { capFMappedAddress = Nothing }) + return [] performPageInvocation (PageGetAddr ptr) = do - let paddr = fromPAddr $ addrFromPPtr ptr - ct <- getCurThread - msgTransferred <- setMRs ct Nothing [paddr] - msgInfo <- return $ MI { - msgLength = msgTransferred, - msgExtraCaps = 0, - msgCapsUnwrapped = 0, - msgLabel = 0 } - setMessageInfo ct msgInfo + return [fromPAddr $ addrFromPPtr ptr] performPageInvocation (PageFlush flushType vstart vend pstart space asid) = do let start = VPtr $ fromPPtr $ ptrFromPAddr pstart let end = start + (vend - vstart) when (start < end) $ do doMachineOp $ doFlush flushType start end pstart + return [] performASIDControlInvocation :: ASIDControlInvocation -> Kernel () @@ -849,13 +863,20 @@ performASIDPoolInvocation (Assign asid poolPtr ctSlot) = do performARMMMUInvocation :: ArchInv.Invocation -> KernelP [Word] performARMMMUInvocation i = withoutPreemption $ do case i of - InvokeVSpace oper -> performVSpaceInvocation oper - InvokePageTable oper -> performPageTableInvocation oper + InvokeVSpace oper -> do + performVSpaceInvocation oper + return [] + InvokePageTable oper -> do + performPageTableInvocation oper + return [] InvokePage oper -> performPageInvocation oper - InvokeASIDControl oper -> performASIDControlInvocation oper - InvokeASIDPool oper -> performASIDPoolInvocation oper + InvokeASIDControl oper -> do + performASIDControlInvocation oper + return [] + InvokeASIDPool oper -> do + performASIDPoolInvocation oper + return [] InvokeVCPU _ -> fail "performARMMMUInvocation: not an MMU invocation" - return $ [] storePTE :: PPtr PTE -> PTE -> Kernel () storePTE slot pte = do diff --git a/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs b/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs index 2519c67b17..2e452b472b 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs +++ b/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs @@ -71,10 +71,10 @@ paddrBase :: PAddr paddrBase = Platform.PAddr 0x0 pptrBase :: VPtr -pptrBase = VPtr 0xFFFFFFC000000000 +pptrBase = VPtr 0x0000008000000000 pptrTop :: VPtr -pptrTop = VPtr 0xFFFFFFFF80000000 +pptrTop = VPtr 0x000000FFC0000000 kernelELFPAddrBase :: PAddr kernelELFPAddrBase = toPAddr $ (fromPAddr Platform.physBase) + 0x4000000 @@ -161,6 +161,8 @@ setNextPC = setRegister (Register AARCH64.NextIP) -- It zeros every word to ensure that user tasks cannot access any private data -- that might previously have been stored in the region. +-- This function's abstract definition is in MachineOps.thy + clearMemory :: PPtr Word -> Int -> MachineMonad () clearMemory ptr byteLength = error "Unimplemented -- machine op" @@ -243,27 +245,15 @@ enableFpuEL01 = error "Unimplemented - machine op" getFAR :: MachineMonad VPtr getFAR = error "Unimplemented - machine op" -getDFSR :: MachineMonad Word -getDFSR = error "Unimplemented - machine op" - -getIFSR :: MachineMonad Word -getIFSR = error "Unimplemented - machine op" - - {- Hypervisor-specific status/control registers -} --- FIXME AARCH64: unused due to using asm intrinsics, but this should be fixed in C -getHSR :: MachineMonad Word -getHSR = error "Unimplemented - machine op" - --- FIXME AARCH64: unused due to using asm intrinsics, but this should be fixed in C setHCR :: Word -> MachineMonad () setHCR _hcr = error "Unimplemented - machine op" getESR :: MachineMonad Word getESR = error "Unimplemented - machine op" -addressTranslateS1 :: VPtr -> MachineMonad VPtr -- FIXME AARCH64: pending C PR +addressTranslateS1 :: VPtr -> MachineMonad VPtr addressTranslateS1 = error "Unimplemented - machine op" getSCTLR :: MachineMonad Word @@ -302,9 +292,9 @@ data VMRights deriving (Show, Eq) vmRightsToBits :: VMRights -> Word -vmRightsToBits VMKernelOnly = 1 -vmRightsToBits VMReadOnly = 2 -vmRightsToBits VMReadWrite = 3 +vmRightsToBits VMKernelOnly = 0 +vmRightsToBits VMReadOnly = 3 +vmRightsToBits VMReadWrite = 1 allowWrite :: VMRights -> Bool allowWrite VMKernelOnly = False @@ -341,7 +331,7 @@ data PTE pteDevice :: Bool, pteRights :: VMRights } | PageTablePTE { - pteBaseAddress :: PAddr } + ptePPN :: PAddr } deriving (Show, Eq) {- Simulator callbacks -} @@ -400,6 +390,9 @@ debugPrint str = liftIO $ putStrLn str fpuThreadDeleteOp :: Word -> MachineMonad () fpuThreadDeleteOp tcbPtr = error "Unimplemented callback" +isFpuEnable :: MachineMonad Bool +isFpuEnable = error "Unimplemented - lazy FPU switch abstracted as machine op" + {- GIC VCPU interface -} get_gic_vcpu_ctrl_hcr :: MachineMonad Word32 @@ -450,7 +443,7 @@ check_export_arch_timer = error "Unimplemented - machine op" {- Constants -} hcrVCPU = (0x80086039 :: Word) -- HCR_VCPU -hcrNative = (0x8e28703b :: Word) -- HCR_NATIVE +hcrNative = (0x8E28103B :: Word) -- HCR_NATIVE sctlrEL1VM = (0x34d58820 :: Word) -- SCTLR_EL1_VM sctlrDefault = (0x34d59824 :: Word) -- SCTLR_DEFAULT vgicHCREN = (0x1 :: Word32) -- VGIC_HCR_EN diff --git a/spec/haskell/src/SEL4/Machine/Hardware/ARM.lhs b/spec/haskell/src/SEL4/Machine/Hardware/ARM.lhs index b140649d7a..b4044c1cac 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/ARM.lhs +++ b/spec/haskell/src/SEL4/Machine/Hardware/ARM.lhs @@ -876,7 +876,7 @@ FIXME ARMHYP consider moving to platform code? #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT > hcrVCPU = (0x87039 :: Word) -- HCR_VCPU -> hcrNative = (0xfe8703b :: Word) -- HCR_NATIVE +> hcrNative = (0xFE8103B :: Word) -- HCR_NATIVE > vgicHCREN = (0x1 :: Word) -- VGIC_HCR_EN > sctlrDefault = (0xc5187c :: Word) -- SCTLR_DEFAULT > actlrDefault = (0x40 :: Word) -- ACTLR_DEFAULT diff --git a/spec/haskell/src/SEL4/Machine/RegisterSet/AARCH64.hs b/spec/haskell/src/SEL4/Machine/RegisterSet/AARCH64.hs index 7589bb3202..7bf4414118 100644 --- a/spec/haskell/src/SEL4/Machine/RegisterSet/AARCH64.hs +++ b/spec/haskell/src/SEL4/Machine/RegisterSet/AARCH64.hs @@ -86,6 +86,7 @@ data VCPUReg = | VCPURegISR | VCPURegVBAR | VCPURegTPIDR_EL1 + | VCPURegVMPIDR_EL2 | VCPURegSP_EL1 | VCPURegELR_EL1 | VCPURegSPSR_EL1 diff --git a/spec/haskell/src/SEL4/Machine/RegisterSet/ARM.lhs b/spec/haskell/src/SEL4/Machine/RegisterSet/ARM.lhs index af81be2874..87db201d18 100644 --- a/spec/haskell/src/SEL4/Machine/RegisterSet/ARM.lhs +++ b/spec/haskell/src/SEL4/Machine/RegisterSet/ARM.lhs @@ -81,6 +81,7 @@ This module defines the ARM register set. > | VCPURegR10fiq > | VCPURegR11fiq > | VCPURegR12fiq +> | VCPURegVMPIDR > | VCPURegSPSRsvc > | VCPURegSPSRabt > | VCPURegSPSRund diff --git a/spec/haskell/src/SEL4/Model/PSpace.lhs b/spec/haskell/src/SEL4/Model/PSpace.lhs index 72928a1786..1f8fc6d5f9 100644 --- a/spec/haskell/src/SEL4/Model/PSpace.lhs +++ b/spec/haskell/src/SEL4/Model/PSpace.lhs @@ -6,6 +6,14 @@ This module contains the data structure and operations for the physical memory model. +\begin{impdetails} + +This module uses the C preprocessor to select a target architecture. + +> {-# LANGUAGE CPP #-} + +\end{impdetails} + > module SEL4.Model.PSpace ( > PSpace, newPSpace, initPSpace, > PSpaceStorable, @@ -21,6 +29,8 @@ This module contains the data structure and operations for the physical memory m % {-# BOOT-EXPORTS: PSpace #PRegion newPSpace #-} > import Prelude hiding (Word) +> import qualified SEL4.Model.PSpace.TARGET as Arch + > import SEL4.Model.StateData > import SEL4.Object.Structures @@ -234,6 +244,8 @@ No type checks are performed when deleting objects; "deleteObjects" simply delet > alignError bits > stateAssert (deletionIsSafe ptr bits) > "Object deletion would leave dangling pointers" +> stateAssert (deletionIsSafe_delete_locale ptr bits) +> "Object deletion would leave dangling pointers" > doMachineOp $ freeMemory (PPtr (fromPPtr ptr)) bits > ps <- gets ksPSpace > let inRange = (\x -> x .&. ((- mask bits) - 1) == fromPPtr ptr) @@ -241,7 +253,7 @@ No type checks are performed when deleting objects; "deleteObjects" simply delet > let ps' = ps { psMap = map' } > modify (\ks -> ks { ksPSpace = ps'}) -Clear the ghost state for user pages and cnodes within the deleted range. +Clear the ghost state for user pages, cnodes, and arch-specific objects within the deleted range. > modify (\ks -> ks { gsUserPages = (\x -> if inRange x > then Nothing else gsUserPages ks x) }) @@ -249,13 +261,17 @@ Clear the ghost state for user pages and cnodes within the deleted range. > "Object deletion would split CNodes." > modify (\ks -> ks { gsCNodes = (\x -> if inRange x > then Nothing else gsCNodes ks x) }) +> Arch.deleteGhost ptr bits > stateAssert ksASIDMapSafe "Object deletion would leave dangling PD pointers" -In "deleteObjects" above, we assert "deletionIsSafe"; that is, that there are no pointers to these objects remaining elsewhere in the kernel state. Since we cannot easily check this in the Haskell model, we assume that it is always true; the assertion is strengthened during translation into Isabelle. +In "deleteObjects" above, we make two assertions, which, when taken together, say that there are no pointers to these objects remaining elsewhere in the kernel state. Since we cannot easily check this in the Haskell model, we assume that it is always true; the assertion is strengthened during translation into Isabelle. We separate these properties into two assertions, since they are shown to be true by different means. > deletionIsSafe :: PPtr a -> Int -> KernelState -> Bool > deletionIsSafe _ _ _ = True +> deletionIsSafe_delete_locale :: PPtr a -> Int -> KernelState -> Bool +> deletionIsSafe_delete_locale _ _ _ = True + We also assert that the ghost CNodes are all either completely deleted or unchanged; no CNode should be partially in the range and partially deleted. Again, this assertion requires logical quantifiers, and is inserted in translation. > cNodePartialOverlap :: (Word -> Maybe Int) -> (Word -> Bool) -> Bool diff --git a/spec/haskell/src/SEL4/Model/PSpace/AARCH64.hs b/spec/haskell/src/SEL4/Model/PSpace/AARCH64.hs new file mode 100644 index 0000000000..917c5785b0 --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/AARCH64.hs @@ -0,0 +1,31 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.AARCH64(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Model.StateData.AARCH64 +import SEL4.Machine.RegisterSet +import SEL4.Machine.Hardware.AARCH64 (PT_Type) + +import Data.Bits + +-- an assertion like cNodePartialOverlap, but for page tables, defined in Refine +pTablePartialOverlap :: (Word -> Maybe PT_Type) -> (Word -> Bool) -> Bool +pTablePartialOverlap _ _ = False + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = do + let inRange = (\x -> x .&. ((- mask bits) - 1) == fromPPtr ptr) + ptTypes <- gets (gsPTTypes . ksArchState) + let ptTypes' = (\x -> if inRange x then Nothing else ptTypes x) + stateAssert (\ks -> not (pTablePartialOverlap ptTypes inRange)) + "Object deletion would split page tables" + modify (\ks -> ks { ksArchState = (ksArchState ks) { gsPTTypes = ptTypes' } }) diff --git a/spec/haskell/src/SEL4/Model/PSpace/ARM.hs b/spec/haskell/src/SEL4/Model/PSpace/ARM.hs new file mode 100644 index 0000000000..f8ac3839f3 --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/ARM.hs @@ -0,0 +1,17 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.ARM(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Machine.RegisterSet + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = return () diff --git a/spec/haskell/src/SEL4/Model/PSpace/RISCV64.hs b/spec/haskell/src/SEL4/Model/PSpace/RISCV64.hs new file mode 100644 index 0000000000..16e9fc6ecd --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/RISCV64.hs @@ -0,0 +1,17 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.RISCV64(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Machine.RegisterSet + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = return () diff --git a/spec/haskell/src/SEL4/Model/PSpace/X64.hs b/spec/haskell/src/SEL4/Model/PSpace/X64.hs new file mode 100644 index 0000000000..b07a1a4f06 --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/X64.hs @@ -0,0 +1,17 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.X64(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Machine.RegisterSet + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = return () diff --git a/spec/haskell/src/SEL4/Model/StateData.lhs b/spec/haskell/src/SEL4/Model/StateData.lhs index 76663ed29c..29dc2ddaf6 100644 --- a/spec/haskell/src/SEL4/Model/StateData.lhs +++ b/spec/haskell/src/SEL4/Model/StateData.lhs @@ -127,7 +127,7 @@ Note that there is no error-signalling mechanism available to functions in "Kern The ready queue is simply a list of threads that are ready to run. Each thread in this list is at the same priority level. -> type ReadyQueue = [PPtr TCB] +> type ReadyQueue = TcbQueue This is a standard Haskell singly-linked list independent of the thread control block structures. However, in a real implementation, it @@ -153,7 +153,15 @@ replaces the previous one. > getCurThread = gets ksCurThread > setCurThread :: PPtr TCB -> Kernel () -> setCurThread tptr = modify (\ks -> ks { ksCurThread = tptr }) +> setCurThread tptr = do +> stateAssert idleThreadNotQueued "the idle thread cannot be in the ready queues" +> modify (\ks -> ks { ksCurThread = tptr }) + +In many places, we would like to be able to use the fact that threads in the +ready queues have runnable' thread state. We add an assertion that it does hold. + +> ready_qs_runnable :: KernelState -> Bool +> ready_qs_runnable _ = True Similarly, these functions access the idle thread pointer, the ready queue for a given priority level (adjusted to account for the active security domain), the requested action of the scheduler, and the interrupt handler state. @@ -232,7 +240,7 @@ A new kernel state structure contains an empty physical address space, a set of > ksCurDomain = 0, > ksDomainTime = 15, > ksReadyQueues = -> funPartialArray (const []) +> funPartialArray (const (TcbQueue {tcbQueueHead = Nothing, tcbQueueEnd = Nothing})) > ((0, 0), (fromIntegral numDomains, maxPriority)), > ksReadyQueuesL1Bitmap = funPartialArray (const 0) (0, fromIntegral numDomains), > ksReadyQueuesL2Bitmap = @@ -282,4 +290,12 @@ The function "findM" searches a list, returning the first item for which the giv > r <- f x > if r then return $ Just x else findM f xs +Several asserts about ksReadyQueues + +> ksReadyQueues_asrt :: KernelState -> Bool +> ksReadyQueues_asrt _ = True + +An assert that will say that the idle thread is not in a ready queue +> idleThreadNotQueued :: KernelState -> Bool +> idleThreadNotQueued _ = True diff --git a/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs b/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs index 832cabbbca..037fd06e5b 100644 --- a/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs +++ b/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs @@ -12,7 +12,7 @@ module SEL4.Model.StateData.AARCH64 where import Prelude hiding (Word) import SEL4.Machine -import SEL4.Machine.Hardware.AARCH64 (PTE(..), config_ARM_PA_SIZE_BITS_40) +import SEL4.Machine.Hardware.AARCH64 (PTE(..), PT_Type, config_ARM_PA_SIZE_BITS_40) import SEL4.Object.Structures.AARCH64 import Data.Array @@ -40,7 +40,8 @@ data KernelState = ARMKernelState { -- used e.g. for user threads with missing or invalid VSpace root armKSGlobalUserVSpace :: PPtr PTE, armHSCurVCPU :: Maybe (PPtr VCPU, Bool), - armKSGICVCPUNumListRegs :: Int + armKSGICVCPUNumListRegs :: Int, + gsPTTypes :: Word -> Maybe PT_Type } -- counting from 0 at bottom, i.e. number of levels = maxPTLevel + 1; diff --git a/spec/haskell/src/SEL4/Model/StateData/X64.lhs b/spec/haskell/src/SEL4/Model/StateData/X64.lhs index 73659eeeb5..7e99bd1d79 100644 --- a/spec/haskell/src/SEL4/Model/StateData/X64.lhs +++ b/spec/haskell/src/SEL4/Model/StateData/X64.lhs @@ -16,6 +16,7 @@ This module contains the architecture-specific kernel global data for the X86-64 > import SEL4.Object.Structures.X64 > import Data.Array +> import Data.Word(Word8) \end{impdetails} @@ -32,17 +33,17 @@ This module contains the architecture-specific kernel global data for the X86-64 > gdteBits = 3 > data KernelState = X64KernelState { -> x64KSASIDTable :: Array ASID (Maybe (PPtr ASIDPool)), -> x64KSSKIMPML4 :: PPtr PML4E, -> x64KSSKIMPDPTs :: [PPtr PDPTE], -> x64KSSKIMPDs :: [PPtr PDE], -> x64KSSKIMPTs :: [PPtr PTE], -> x64KSCurrentUserCR3 :: CR3, -> x64KSKernelVSpace :: PPtr Word -> X64VSpaceRegionUse, +> x64KSASIDTable :: Array ASID (Maybe (PPtr ASIDPool)), +> x64KSSKIMPML4 :: PPtr PML4E, +> x64KSSKIMPDPTs :: [PPtr PDPTE], +> x64KSSKIMPDs :: [PPtr PDE], +> x64KSSKIMPTs :: [PPtr PTE], +> x64KSCurrentUserCR3 :: CR3, +> x64KSKernelVSpace :: PPtr Word -> X64VSpaceRegionUse, > x64KSAllocatedIOPorts :: Array IOPort Bool, -> x64KSNumIOAPICs :: Word, -> x64KSIRQState :: Array IRQ X64IRQState} +> x64KSNumIOAPICs :: Word, +> x64KSIOAPICnIRQs :: Word -> Word8, +> x64KSIRQState :: Array IRQ X64IRQState} > newKernelState :: PAddr -> (KernelState, [PAddr]) > newKernelState _ = error "No initial state defined for x64" - diff --git a/spec/haskell/src/SEL4/Object/Endpoint.lhs b/spec/haskell/src/SEL4/Object/Endpoint.lhs index 497d50d575..873d91ea23 100644 --- a/spec/haskell/src/SEL4/Object/Endpoint.lhs +++ b/spec/haskell/src/SEL4/Object/Endpoint.lhs @@ -229,6 +229,7 @@ If an endpoint is deleted, then every pending IPC operation using it must be can > cancelAllIPC :: PPtr Endpoint -> Kernel () > cancelAllIPC epptr = do +> stateAssert ksReadyQueues_asrt "" > ep <- getEndpoint epptr > case ep of > IdleEP -> @@ -244,6 +245,7 @@ If a badged endpoint is recycled, then cancel every pending send operation using > cancelBadgedSends :: PPtr Endpoint -> Word -> Kernel () > cancelBadgedSends epptr badge = do +> stateAssert ksReadyQueues_asrt "" > ep <- getEndpoint epptr > case ep of > IdleEP -> return () diff --git a/spec/haskell/src/SEL4/Object/Instances.lhs b/spec/haskell/src/SEL4/Object/Instances.lhs index 5720f93ead..eb20a91117 100644 --- a/spec/haskell/src/SEL4/Object/Instances.lhs +++ b/spec/haskell/src/SEL4/Object/Instances.lhs @@ -138,6 +138,8 @@ By default, new threads are unable to change the security domains of other threa > tcbFaultHandler = CPtr 0, > tcbIPCBuffer = VPtr 0, > tcbBoundNotification = Nothing, +> tcbSchedPrev = Nothing, +> tcbSchedNext = Nothing, > tcbArch = newArchTCB } > injectKO = KOTCB > projectKO o = case o of diff --git a/spec/haskell/src/SEL4/Object/Interrupt/X64.lhs b/spec/haskell/src/SEL4/Object/Interrupt/X64.lhs index 6135fbc618..0e3ccfe14c 100644 --- a/spec/haskell/src/SEL4/Object/Interrupt/X64.lhs +++ b/spec/haskell/src/SEL4/Object/Interrupt/X64.lhs @@ -59,14 +59,14 @@ This module defines the machine-specific interrupt handling routines for x64. > > -- from ioapic_map_pin_to_vector > numIOAPICs <- withoutFailure $ gets (x64KSNumIOAPICs . ksArchState) +> ioAPICnIRQs <- withoutFailure $ gets (x64KSIOAPICnIRQs . ksArchState) > when (numIOAPICs == 0) $ throw IllegalOperation > rangeCheck ioapic 0 (numIOAPICs - 1) -> rangeCheck pin 0 (Arch.ioapicIRQLines - 1) +> rangeCheck pin 0 (ioAPICnIRQs ioapic - 1) > rangeCheck level (0::Word) 1 > rangeCheck polarity (0::Word) 1 > -> -- FIXME check semantics against toEnum, we might want == 0 here -> let vector = (fromIntegral $ fromEnum irq) + Arch.irqIntOffset +> let vector = fromIntegral (fromEnum irq) + Arch.irqIntOffset > return $ ArchInv.IssueIRQHandlerIOAPIC irq destSlot srcSlot ioapic > pin level polarity vector > diff --git a/spec/haskell/src/SEL4/Object/Notification.lhs b/spec/haskell/src/SEL4/Object/Notification.lhs index d462a19e81..ce735402f7 100644 --- a/spec/haskell/src/SEL4/Object/Notification.lhs +++ b/spec/haskell/src/SEL4/Object/Notification.lhs @@ -131,6 +131,7 @@ If a notification object is deleted, then pending receive operations must be can > cancelAllSignals :: PPtr Notification -> Kernel () > cancelAllSignals ntfnPtr = do +> stateAssert ksReadyQueues_asrt "" > ntfn <- getNotification ntfnPtr > case ntfnObj ntfn of > WaitingNtfn queue -> do diff --git a/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs b/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs index 1d3447f63c..b71e018100 100644 --- a/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs +++ b/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs @@ -158,6 +158,13 @@ placeNewDataObject regionBase sz isDevice = if isDevice then placeNewObject regionBase UserDataDevice sz else placeNewObject regionBase UserData sz +updatePTType :: PPtr () -> PT_Type -> Kernel () +updatePTType p pt_t = do + ptTypes <- gets (gsPTTypes . ksArchState) + let funupd = (\f x v y -> if y == x then v else f y) + let ptTypes' = funupd ptTypes (fromPPtr p) (Just pt_t) + modify (\ks -> ks { ksArchState = (ksArchState ks) { gsPTTypes = ptTypes' } }) + createObject :: ObjectType -> PPtr () -> Int -> Bool -> Kernel ArchCapability createObject t regionBase _ isDevice = let funupd = (\f x v y -> if y == x then v else f y) in @@ -189,10 +196,12 @@ createObject t regionBase _ isDevice = Arch.Types.PageTableObject -> do let ptSize = ptBits NormalPT_T - objBits (makeObject :: PTE) placeNewObject regionBase (makeObject :: PTE) ptSize + updatePTType regionBase NormalPT_T return $ PageTableCap (pointerCast regionBase) NormalPT_T Nothing Arch.Types.VSpaceObject -> do let ptSize = ptBits VSRootPT_T - objBits (makeObject :: PTE) placeNewObject regionBase (makeObject :: PTE) ptSize + updatePTType regionBase VSRootPT_T return $ PageTableCap (pointerCast regionBase) VSRootPT_T Nothing Arch.Types.VCPUObject -> do placeNewObject regionBase (makeObject :: VCPU) 0 diff --git a/spec/haskell/src/SEL4/Object/Structures.lhs b/spec/haskell/src/SEL4/Object/Structures.lhs index 1d0f8df2df..d3d1263e40 100644 --- a/spec/haskell/src/SEL4/Object/Structures.lhs +++ b/spec/haskell/src/SEL4/Object/Structures.lhs @@ -248,6 +248,14 @@ The TCB is used to store various data about the thread's current state: > tcbState :: ThreadState, > tcbMCP :: Priority, > tcbPriority :: Priority, + +\item a flag indicating whether the thread is a member of a ready queue. + Note that the flag is necessary, since although the tcbSchedPrev and tcbSchedNext fields listed below + are used only to navigate through a ready queue, we cannot say that a thread is queued + if and only if either its tcbSchedPrev or tcbSchedNext field is not Nothing. + For consider a thread that is the sole member of a ready queue. + It will have both its tcbSchedNext and tcbSchedPrev fields equal to Nothing, but it will still be tcbQueued. + > tcbQueued :: Bool, \item the thread's current fault state; @@ -270,6 +278,11 @@ The TCB is used to store various data about the thread's current state: > tcbBoundNotification :: Maybe (PPtr Notification), +\item the thread's pointers to the previous and next entries in a scheduling queue; + +> tcbSchedPrev :: Maybe (PPtr TCB), +> tcbSchedNext :: Maybe (PPtr TCB), + \item and any arch-specific TCB contents > tcbArch :: ArchTCB } @@ -403,7 +416,8 @@ This type is used to represent the required action, if any, of the scheduler nex \item IPC operations may request that the scheduler switch to a specific thread. -> | SwitchToThread (PPtr TCB) +> | SwitchToThread { +> schActTarget :: PPtr TCB } > deriving (Eq, Show) @@ -475,4 +489,6 @@ Various operations on the free index of an Untyped cap. > endPtr = capPtr cap + PPtr (2 ^ capBlockSize cap) - 1 > untypedZeroRange _ = Nothing - +> data TcbQueue = TcbQueue { +> tcbQueueHead :: Maybe (PPtr TCB), +> tcbQueueEnd :: Maybe (PPtr TCB) } diff --git a/spec/haskell/src/SEL4/Object/Structures/AARCH64.hs b/spec/haskell/src/SEL4/Object/Structures/AARCH64.hs index 224681b989..aa1d6ec3ac 100644 --- a/spec/haskell/src/SEL4/Object/Structures/AARCH64.hs +++ b/spec/haskell/src/SEL4/Object/Structures/AARCH64.hs @@ -145,7 +145,7 @@ makeVCPUObject = , vgicAPR = 0 , vgicLR = vgicLR } - , vcpuRegs = funArray (const 0) // [(VCPURegSCTLR, sctlrDefault)] + , vcpuRegs = funArray (const 0) // [(VCPURegSCTLR, sctlrEL1VM)] , vcpuVPPIMasked = funArray (const False) , vcpuVTimer = VirtTimer 0 } diff --git a/spec/haskell/src/SEL4/Object/Untyped.lhs b/spec/haskell/src/SEL4/Object/Untyped.lhs index 31ef37042d..1ff37172b5 100644 --- a/spec/haskell/src/SEL4/Object/Untyped.lhs +++ b/spec/haskell/src/SEL4/Object/Untyped.lhs @@ -213,10 +213,12 @@ Will be set to something stricter in Isabelle when required. For verification purposes a check is made that the region the objects are created in does not overlap with any existing CNodes. > let totalObjectSize = (length destSlots) `shiftL` (getObjectSize newType userSize) -> stateAssert (\x -> not (cNodeOverlap (gsCNodes x) -> (\x -> fromPPtr retypeBase <= x -> && x <= fromPPtr retypeBase + fromIntegral totalObjectSize - 1))) +> let inRange = (\x -> fromPPtr retypeBase <= x && +> x <= fromPPtr retypeBase + fromIntegral totalObjectSize - 1) +> stateAssert (\s -> not (cNodeOverlap (gsCNodes s) inRange)) > "CNodes present in region to be retyped." +> stateAssert (\s -> not (archOverlap s inRange)) +> "Arch specific non-overlap requirements." > assert (canonicalAddressAssert retypeBase) "Canonical ptr required on some architectures" > let freeRef = retypeBase + PPtr (fromIntegral totalObjectSize) > updateFreeIndex srcSlot (getFreeIndex base freeRef) @@ -230,4 +232,7 @@ This function performs the check that CNodes do not overlap with the retyping re > cNodeOverlap :: (Word -> Maybe Int) -> (Word -> Bool) -> Bool > cNodeOverlap _ _ = False +Architecture specific assertion similar to cNodeOverlap, for architectures that have variable-sized objects. +> archOverlap :: KernelState -> (Word -> Bool) -> Bool +> archOverlap _ _ = False diff --git a/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs b/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs index 5a896b0c1c..94a6fea4c5 100644 --- a/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs +++ b/spec/haskell/src/SEL4/Object/VCPU/AARCH64.hs @@ -39,12 +39,13 @@ import {-# SOURCE #-} SEL4.Object.Interrupt import Data.Bits hiding (countTrailingZeros) import Data.Word(Word8, Word16, Word32, Word64) +import Data.WordLib(countTrailingZeros) import Data.Array import Data.Maybe {- VCPU: Helper functions -} --- FIXME AARCH64: can be Reader Monad +-- FIXME: make this a Reader Monad when we move to MCS curVCPUActive :: Kernel Bool curVCPUActive = do vcpu <- gets (armHSCurVCPU . ksArchState) @@ -96,6 +97,8 @@ associateVCPUTCB vcpuPtr tcbPtr = do _ -> return () archThreadSet (\atcb -> atcb { atcbVCPUPtr = Just vcpuPtr }) tcbPtr setObject vcpuPtr $ vcpu { vcpuTCBPtr = Just tcbPtr } + ct <- getCurThread + when (tcbPtr == ct) $ vcpuSwitch (Just vcpuPtr) return [] {- VCPU: Update functions -} @@ -352,6 +355,8 @@ restoreVirtTimer vcpuPtr = do vcpuWriteReg vcpuPtr VCPURegCNTVOFF offset vcpuRestoreReg vcpuPtr VCPURegCNTVOFF + -- read vcpu again, so we don't have to reason about independence of vcpuWriteRegister changes + vcpu <- getObject vcpuPtr let vppi = fromJust $ irqVPPIEventIndex (IRQ irqVTimerEvent) let masked = (vcpuVPPIMasked vcpu) ! vppi safeToUnmask <- isIRQActive (IRQ irqVTimerEvent) @@ -401,7 +406,6 @@ vcpuDisable vcpuPtrOpt = do armvVCPUSave :: PPtr VCPU -> Bool -> Kernel () armvVCPUSave vcpuPtr active = do vcpuSaveRegRange vcpuPtr VCPURegTTBR0 VCPURegSPSR_EL1 - doMachineOp isb vcpuSave :: Maybe (PPtr VCPU, Bool) -> Kernel () vcpuSave (Just (vcpuPtr, active)) = do @@ -491,11 +495,6 @@ vcpuSwitch (Just new) = do {- VGICMaintenance -} --- FIXME AARCH64: try move this to a more generic location -countTrailingZeros :: (Bits b, FiniteBits b) => b -> Int -countTrailingZeros w = - length . takeWhile not . map (testBit w) $ [0 .. finiteBitSize w - 1] - vgicMaintenance :: Kernel () vgicMaintenance = do hsCurVCPU <- gets (armHSCurVCPU . ksArchState) diff --git a/spec/haskell/src/SEL4/Object/VCPU/ARM.lhs b/spec/haskell/src/SEL4/Object/VCPU/ARM.lhs index 7217076ef2..c277ced0ee 100644 --- a/spec/haskell/src/SEL4/Object/VCPU/ARM.lhs +++ b/spec/haskell/src/SEL4/Object/VCPU/ARM.lhs @@ -1,4 +1,5 @@ % +% Copyright 2022, Proofcraft Pty Ltd % Copyright 2014, General Dynamics C4 Systems % % SPDX-License-Identifier: GPL-2.0-only @@ -42,6 +43,7 @@ hypervisor extensions on ARM. > import Data.Bits hiding (countTrailingZeros) > import Data.Word(Word8, Word16, Word32, Word64) +> import Data.WordLib(countTrailingZeros) > import Data.Array > import Data.Maybe @@ -490,10 +492,6 @@ For initialisation, see makeVCPUObject. \subsection{VGICMaintenance} -> countTrailingZeros :: (Bits b, FiniteBits b) => b -> Int -> countTrailingZeros w = -> length . takeWhile not . map (testBit w) $ [0 .. finiteBitSize w - 1] - > vgicMaintenance :: Kernel () > vgicMaintenance = do > hsCurVCPU <- gets (armHSCurVCPU . ksArchState) diff --git a/spec/haskell/stack.yaml b/spec/haskell/stack.yaml index 42d98d3d38..91eced2a9e 100644 --- a/spec/haskell/stack.yaml +++ b/spec/haskell/stack.yaml @@ -7,8 +7,8 @@ # We use `stack` only to install GHC and cabal-install, not to build the project. # The rest of the build works via cabal -# Stackage LTS Haskell 19.12 (ghc-9.0.2) -resolver: lts-19.12 +# Stackage LTS Haskell 20.25 (ghc-9.2.8) +resolver: lts-20.25 packages: [] extra-deps: [] diff --git a/spec/machine/AARCH64/Arch_Kernel_Config_Lemmas.thy b/spec/machine/AARCH64/Arch_Kernel_Config_Lemmas.thy new file mode 100644 index 0000000000..273beb765c --- /dev/null +++ b/spec/machine/AARCH64/Arch_Kernel_Config_Lemmas.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Architecture-specific lemmas constraining Kernel_Config definitions *) + +theory Arch_Kernel_Config_Lemmas +imports + Kernel_Config_Lemmas + Platform +begin + +context Arch begin global_naming AARCH64 + +(* Currently no restrictions required *) + +end +end diff --git a/spec/machine/AARCH64/MachineOps.thy b/spec/machine/AARCH64/MachineOps.thy index 719edd78cf..a2d5b3650c 100644 --- a/spec/machine/AARCH64/MachineOps.thy +++ b/spec/machine/AARCH64/MachineOps.thy @@ -10,7 +10,7 @@ chapter "Machine Operations" theory MachineOps imports Word_Lib.WordSetup - Lib.NonDetMonad + Monads.Nondet_Monad MachineMonad begin @@ -104,10 +104,10 @@ definition plic_complete_claim :: "irq \ unit machine_monad" where "plic_complete_claim irq \ machine_op_lift (plic_complete_claim_impl irq)" text \ - Interrupts that cannot occur while the kernel is running (e.g. at preemption points), but - that can occur from user mode. Empty on AARCH64.\ + Interrupts that cannot occur while the kernel is running (e.g. at preemption points), + but that can occur from user mode.\ definition non_kernel_IRQs :: "irq set" where - "non_kernel_IRQs = {}" + "non_kernel_IRQs = {irqVGICMaintenance, irqVTimerEvent}" text \@{term getActiveIRQ} is oracle-based and deterministic to allow information flow proofs. It updates the IRQ state to the reflect the passage of time since last the IRQ, then it gets the active @@ -132,33 +132,6 @@ definition setInterruptMode :: "irq \ bool \ bool \ \irq levelTrigger polarityLow. return ()" -subsection "Clearing Memory" - -text \Clear memory contents to recycle it as user memory\ -definition clearMemory :: "machine_word \ nat \ unit machine_monad" where - "clearMemory ptr bytelength \ - mapM_x (\p. storeWord p 0) [ptr, ptr + word_size .e. ptr + (of_nat bytelength) - 1]" - -text \Haskell simulator interface stub.\ -definition clearMemoryVM :: "machine_word \ nat \ unit machine_monad" where - "clearMemoryVM ptr bits \ return ()" - -text \ - Initialize memory to be used as user memory. Note that zeroing out the memory is redundant - in the specifications. In any case, we cannot abstract from the call to cleanCacheRange, which - appears in the implementation.\ -abbreviation (input) "initMemory == clearMemory" - -text \ - Free memory that had been initialized as user memory. While freeing memory is a no-op in the - implementation, we zero out the underlying memory in the specifications to avoid garbage. If we - know that there is no garbage, we can compute from the implementation state what the exact memory - content in the specifications is.\ -definition freeMemory :: "machine_word \ nat \ unit machine_monad" where - "freeMemory ptr bits \ - mapM_x (\p. storeWord p 0) [ptr, ptr + word_size .e. ptr + 2 ^ bits - 1]" - - subsection "User Monad and Registers" type_synonym user_regs = "register \ machine_word" @@ -222,6 +195,15 @@ consts' fpuThreadDeleteOp_impl :: "machine_word \ unit machine_rest_ definition fpuThreadDeleteOp :: "machine_word \ unit machine_monad" where "fpuThreadDeleteOp thread_ptr \ machine_op_lift (fpuThreadDeleteOp_impl thread_ptr)" +(* FIXME this machine op is used to abstract the entire lazy FPU switch interrupt mechanism, + which can only trigger when the current thread's FPU is disabled and it performs an FPU + operation. We have no model for this mechanism or the state that it caches, so for + verification purposes we act as if the FPU is always enabled. + Future lazy FPU switch overhaul will involve the state that this operation reads, at which + point it should become a normal function. *) +definition isFpuEnable :: "bool machine_monad" where + "isFpuEnable \ return True" + subsection "Fault Registers" @@ -342,9 +324,9 @@ definition read_cntpct :: "64 word machine_monad" where subsection "Hypervisor Banked Registers" -consts' vcpuHardwareRegVal :: "vcpureg \ machine_state \ machine_word" +consts' vcpuHardwareReg_val :: "vcpureg \ machine_state \ machine_word" definition readVCPUHardwareReg :: "vcpureg \ machine_word machine_monad" where - "readVCPUHardwareReg reg \ gets (vcpuHardwareRegVal reg)" + "readVCPUHardwareReg reg \ gets (vcpuHardwareReg_val reg)" consts' writeVCPUHardwareReg_impl :: "vcpureg \ machine_word \ unit machine_rest_monad" definition writeVCPUHardwareReg :: "vcpureg \ machine_word \ unit machine_monad" where @@ -430,6 +412,35 @@ lemmas cache_machine_op_defs = branchFlushRange_def +subsection "Clearing Memory" + +text \Clear memory contents to recycle it as user memory\ +definition clearMemory :: "machine_word \ nat \ unit machine_monad" where + "clearMemory ptr bytelength \ do + mapM_x (\p. storeWord p 0) [ptr, ptr + word_size .e. ptr + (of_nat bytelength) - 1]; + cleanCacheRange_RAM ptr (ptr + of_nat bytelength - 1) (addrFromPPtr ptr) + od" + +text \Haskell simulator interface stub.\ +definition clearMemoryVM :: "machine_word \ nat \ unit machine_monad" where + "clearMemoryVM ptr bits \ return ()" + +text \ + Initialize memory to be used as user memory. Note that zeroing out the memory is redundant + in the specifications. In any case, we cannot abstract from the call to cleanCacheRange, which + appears in the implementation.\ +abbreviation (input) "initMemory == clearMemory" + +text \ + Free memory that had been initialized as user memory. While freeing memory is a no-op in the + implementation, we zero out the underlying memory in the specifications to avoid garbage. If we + know that there is no garbage, we can compute from the implementation state what the exact memory + content in the specifications is.\ +definition freeMemory :: "machine_word \ nat \ unit machine_monad" where + "freeMemory ptr bits \ + mapM_x (\p. storeWord p 0) [ptr, ptr + word_size .e. ptr + 2 ^ bits - 1]" + + subsection "Virtual Memory" consts' setVSpaceRoot_impl :: "paddr \ machine_word \ unit machine_rest_monad" diff --git a/spec/machine/AARCH64/Platform.thy b/spec/machine/AARCH64/Platform.thy index 312e2bc4b4..6d6c5c712a 100644 --- a/spec/machine/AARCH64/Platform.thy +++ b/spec/machine/AARCH64/Platform.thy @@ -12,12 +12,13 @@ imports "Word_Lib.WordSetup" "Lib.Defs" Setup_Locale - Kernel_Config_Lemmas + Kernel_Config begin context Arch begin global_naming AARCH64 -type_synonym irq = "9 word" (* match IRQ_CNODE_SLOT_BITS in seL4 config *) +type_synonym irq_len = 9 (* match IRQ_CNODE_SLOT_BITS in seL4 config *) +type_synonym irq = "irq_len word" type_synonym paddr = machine_word abbreviation (input) "toPAddr \ id" @@ -35,28 +36,28 @@ abbreviation (input) "fromPAddr \ id" of these constants. *) -(* FIXME AARCH64: canonical bit isn't used, addresses >= 2^48 are invalid *) +(* The canonical bit is the highest bit that can be set in a virtual address and still accepted + by the hardware. Any bit higher than that will be overwritten by sign extension, zero extension, + or result in a fault. + For AArch64 with hyp, addresses >= 2^48 are invalid, and sign-extension is not used by the + hardware. *) definition canonical_bit :: nat where "canonical_bit = 47" definition kdevBase :: machine_word where "kdevBase = 0x000000FFFFE00000" -(* FIXME AARCH64: are powers-of-2 definitions with sanity checks better than the raw numbers here? *) definition kernelELFBase :: machine_word where - "kernelELFBase = 0x8000000000 + 0x80000000" (* 2^39 + 2^31 *) - -lemma "kernelELFBase = 0x8080000000" (* Sanity check with C *) - by (simp add: kernelELFBase_def) + "kernelELFBase = 2^39 + physBase" definition kernelELFPAddrBase :: machine_word where - "kernelELFPAddrBase = 0x80000000" (* 2^31 *) + "kernelELFPAddrBase = physBase" definition kernelELFBaseOffset :: machine_word where "kernelELFBaseOffset = kernelELFBase - kernelELFPAddrBase" definition pptrBase :: machine_word where - "pptrBase = 0x8000000000" (* 2^39 | FIXME AARCH64: likely to be moved to 0x0 *) + "pptrBase = 0x8000000000" (* 2^39 *) definition pptrUserTop :: machine_word where "pptrUserTop \ mask (if config_ARM_PA_SIZE_BITS_40 then 40 else 44)" @@ -64,14 +65,8 @@ definition pptrUserTop :: machine_word where lemma "pptrUserTop = (if config_ARM_PA_SIZE_BITS_40 then 0xFFFFFFFFFF else 0xFFFFFFFFFFF)" (* Sanity check with C *) by (simp add: pptrUserTop_def mask_def) -(* FIXME AARCH64: we might want to remove this for improved genericity *) -schematic_goal pptrUserTop_def': (* direct constant definition *) - "AARCH64.pptrUserTop = numeral ?x" - by (simp add: AARCH64.pptrUserTop_def Kernel_Config.config_ARM_PA_SIZE_BITS_40_def mask_def - del: word_eq_numeral_iff_iszero) - definition pptrTop :: machine_word where - "pptrTop = 0xFFFFFFFF80000000" (* FIXME AARCH64: review; copy/paste from Haskell *) + "pptrTop = 2^40 - 2^30" (* FIXME AARCH64: see also seL4/seL4#957 *) definition paddrBase :: machine_word where "paddrBase \ 0" @@ -106,5 +101,64 @@ definition irqVTimerEvent :: irq where definition pageColourBits :: nat where "pageColourBits \ undefined" \ \not implemented on this platform\ + +section \Page table sizes\ + +definition vs_index_bits :: nat where + "vs_index_bits \ if config_ARM_PA_SIZE_BITS_40 then 10 else (9::nat)" + +end + +(* Need to declare code equation outside Arch locale *) +declare AARCH64.vs_index_bits_def[code] + +context Arch begin global_naming AARCH64 + +lemma vs_index_bits_ge0[simp, intro!]: "0 < vs_index_bits" + by (simp add: vs_index_bits_def) + +(* A dependent-ish type in Isabelle. We use typedef here instead of value_type so that we can + retain a symbolic value (vs_index_bits) for the size of the type instead of getting a plain + number such as 9 or 10. *) +typedef vs_index_len = "{n :: nat. n < vs_index_bits}" by auto + +end + +instantiation AARCH64.vs_index_len :: len0 +begin + interpretation Arch . + definition len_of_vs_index_len: "len_of (x::vs_index_len itself) \ CARD(vs_index_len)" + instance .. end + +instantiation AARCH64.vs_index_len :: len +begin + interpretation Arch . + instance + proof + show "0 < LENGTH(vs_index_len)" + by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len]) + qed +end + +context Arch begin global_naming AARCH64 + +type_synonym vs_index = "vs_index_len word" + +type_synonym pt_index_len = 9 +type_synonym pt_index = "pt_index_len word" + +text \Sanity check:\ +lemma length_vs_index_len[simp]: + "LENGTH(vs_index_len) = vs_index_bits" + by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len]) + + +section \C array sizes corresponding to page table sizes\ + +value_type pt_array_len = "(2::nat) ^ LENGTH(pt_index_len)" +value_type vs_array_len = "(2::nat) ^ vs_index_bits" + +end + end diff --git a/spec/machine/ARM/Arch_Kernel_Config_Lemmas.thy b/spec/machine/ARM/Arch_Kernel_Config_Lemmas.thy new file mode 100644 index 0000000000..e75bf1d3a6 --- /dev/null +++ b/spec/machine/ARM/Arch_Kernel_Config_Lemmas.thy @@ -0,0 +1,23 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Architecture-specific lemmas constraining Kernel_Config definitions *) + +theory Arch_Kernel_Config_Lemmas +imports + Kernel_Config_Lemmas + Platform +begin + +context Arch begin global_naming ARM + +(* note: 24 = pageBitsForSize ARMSuperSection, we do not have access to ASpec at this point *) +lemma physBase_aligned: + "is_aligned physBase 24" + by (simp add: is_aligned_def Kernel_Config.physBase_def) + +end +end diff --git a/spec/machine/ARM/MachineOps.thy b/spec/machine/ARM/MachineOps.thy index f0afc8b470..52bc600c45 100644 --- a/spec/machine/ARM/MachineOps.thy +++ b/spec/machine/ARM/MachineOps.thy @@ -495,19 +495,20 @@ definition section "User Monad" -type_synonym user_context = "register \ machine_word" +type_synonym user_regs = "register \ machine_word" + +datatype user_context = UserContext (user_regs : user_regs) type_synonym 'a user_monad = "(user_context, 'a) nondet_monad" -definition - getRegister :: "register \ machine_word user_monad" -where - "getRegister r \ gets (\uc. uc r)" +definition getRegister :: "register \ machine_word user_monad" where + "getRegister r \ gets (\s. user_regs s r)" -definition - setRegister :: "register \ machine_word \ unit user_monad" -where - "setRegister r v \ modify (\uc. uc (r := v))" +definition modify_registers :: "(user_regs \ user_regs) \ user_context \ user_context" where + "modify_registers f uc \ UserContext (f (user_regs uc))" + +definition setRegister :: "register \ machine_word \ unit user_monad" where + "setRegister r v \ modify (\s. UserContext ((user_regs s) (r := v)))" definition "getRestartPC \ getRegister FaultIP" diff --git a/spec/machine/ARM/Platform.thy b/spec/machine/ARM/Platform.thy index e2242fab02..91abe6940c 100644 --- a/spec/machine/ARM/Platform.thy +++ b/spec/machine/ARM/Platform.thy @@ -12,7 +12,7 @@ imports "Lib.Lib" "Word_Lib.WordSetup" Setup_Locale - Kernel_Config_Lemmas + Kernel_Config begin context Arch begin global_naming ARM @@ -39,15 +39,14 @@ definition cacheLineBits :: nat where definition cacheLine :: nat where "cacheLine = 2^cacheLineBits" -(* Arch specific kernel base address used for haskell spec *) +(* The first virtual address of the kernel's physical memory window *) definition pptrBase :: word32 where "pptrBase \ 0xe0000000" -definition physBase :: word32 where - "physBase \ 0x10000000" +abbreviation (input) "paddrBase \ physBase" definition pptrBaseOffset :: word32 where - "pptrBaseOffset \ pptrBase - physBase" + "pptrBaseOffset \ pptrBase - paddrBase" definition kernelELFPAddrBase :: word32 where "kernelELFPAddrBase \ physBase" diff --git a/spec/machine/ARM_HYP/Arch_Kernel_Config_Lemmas.thy b/spec/machine/ARM_HYP/Arch_Kernel_Config_Lemmas.thy new file mode 100644 index 0000000000..728ea9f88e --- /dev/null +++ b/spec/machine/ARM_HYP/Arch_Kernel_Config_Lemmas.thy @@ -0,0 +1,23 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Architecture-specific lemmas constraining Kernel_Config definitions *) + +theory Arch_Kernel_Config_Lemmas +imports + Kernel_Config_Lemmas + Platform +begin + +context Arch begin global_naming ARM_HYP + +(* note: 25 = pageBitsForSize ARMSuperSection, we do not have access to ASpec at this point *) +lemma physBase_aligned: + "is_aligned physBase 25" + by (simp add: is_aligned_def Kernel_Config.physBase_def) + +end +end diff --git a/spec/machine/ARM_HYP/MachineOps.thy b/spec/machine/ARM_HYP/MachineOps.thy index daeb2e3f30..d08071e0cd 100644 --- a/spec/machine/ARM_HYP/MachineOps.thy +++ b/spec/machine/ARM_HYP/MachineOps.thy @@ -705,11 +705,11 @@ where subsection "Hypervisor Banked Registers" consts' - vcpuHardwareRegVal :: "vcpureg \ machine_state \ machine_word" + vcpuHardwareReg_val :: "vcpureg \ machine_state \ machine_word" definition readVCPUHardwareReg :: "vcpureg \ machine_word machine_monad" where - "readVCPUHardwareReg reg \ gets (vcpuHardwareRegVal reg)" + "readVCPUHardwareReg reg \ gets (vcpuHardwareReg_val reg)" consts' writeVCPUHardwareReg_impl :: "vcpureg \ machine_word \ unit machine_rest_monad" @@ -720,19 +720,20 @@ where section "User Monad" -type_synonym user_context = "register \ machine_word" +type_synonym user_regs = "register \ machine_word" + +datatype user_context = UserContext (user_regs : user_regs) type_synonym 'a user_monad = "(user_context, 'a) nondet_monad" -definition - getRegister :: "register \ machine_word user_monad" -where - "getRegister r \ gets (\uc. uc r)" +definition getRegister :: "register \ machine_word user_monad" where + "getRegister r \ gets (\s. user_regs s r)" -definition - setRegister :: "register \ machine_word \ unit user_monad" -where - "setRegister r v \ modify (\uc. uc (r := v))" +definition modify_registers :: "(user_regs \ user_regs) \ user_context \ user_context" where + "modify_registers f uc \ UserContext (f (user_regs uc))" + +definition setRegister :: "register \ machine_word \ unit user_monad" where + "setRegister r v \ modify (\s. UserContext ((user_regs s) (r := v)))" definition "getRestartPC \ getRegister FaultIP" diff --git a/spec/machine/ARM_HYP/Platform.thy b/spec/machine/ARM_HYP/Platform.thy index 4048f98459..6c121747c6 100644 --- a/spec/machine/ARM_HYP/Platform.thy +++ b/spec/machine/ARM_HYP/Platform.thy @@ -12,7 +12,7 @@ imports "Lib.Lib" "Word_Lib.WordSetup" Setup_Locale - Kernel_Config_Lemmas + Kernel_Config begin context Arch begin global_naming ARM_HYP @@ -39,23 +39,20 @@ definition cacheLineBits :: nat where definition cacheLine :: nat where "cacheLine = 2^cacheLineBits" -(* Arch specific kernel base address used for haskell spec *) +(* The first virtual address of the kernel's physical memory window *) definition pptrBase :: word32 where "pptrBase \ 0xe0000000" -definition physBase :: word32 where - "physBase \ 0x80000000" - abbreviation (input) "paddrBase \ physBase" +definition pptrBaseOffset :: machine_word where + "pptrBaseOffset = pptrBase - paddrBase" + definition pptrTop :: "32 word" where "pptrTop \ 0xfff00000" definition paddrTop :: "32 word" where - "paddrTop \ pptrTop - (pptrBase - physBase)" - -definition pptrBaseOffset :: word32 where - "pptrBaseOffset \ pptrBase - physBase" + "paddrTop \ pptrTop - pptrBaseOffset" definition kernelELFPAddrBase :: word32 where "kernelELFPAddrBase \ physBase" diff --git a/spec/machine/Kernel_Config_Lemmas.thy b/spec/machine/Kernel_Config_Lemmas.thy index 52fa9cbc02..61a711d8f0 100644 --- a/spec/machine/Kernel_Config_Lemmas.thy +++ b/spec/machine/Kernel_Config_Lemmas.thy @@ -4,6 +4,8 @@ * SPDX-License-Identifier: GPL-2.0-only *) +(* Architecture-independent lemmas constraining Kernel_Config definitions *) + theory Kernel_Config_Lemmas imports "$L4V_ARCH/Kernel_Config" begin diff --git a/spec/machine/RISCV64/Arch_Kernel_Config_Lemmas.thy b/spec/machine/RISCV64/Arch_Kernel_Config_Lemmas.thy new file mode 100644 index 0000000000..ae9fd4d637 --- /dev/null +++ b/spec/machine/RISCV64/Arch_Kernel_Config_Lemmas.thy @@ -0,0 +1,46 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Architecture-specific lemmas constraining Kernel_Config definitions *) + +theory Arch_Kernel_Config_Lemmas +imports + Kernel_Config_Lemmas + Platform +begin + +context Arch begin global_naming RISCV64 + +lemma pptrBase_kernelELFBase: + "pptrBase < kernelELFBase" + by (simp add: pptrBase_def canonical_bit_def kernelELFBase_def kernelELFPAddrBase_def pptrTop_def + Kernel_Config.physBase_def mask_def) + +(* 12 in this lemma and below is pageBits, which is not yet defined in this theory. + Definition will be folded and the lemmas shadowed in AInvs. *) +lemma is_page_aligned_physBase: + "is_aligned physBase 12" + by (simp add: Kernel_Config.physBase_def is_aligned_def) + +(* 22 is kernel_window_bits, defined in Init_A. To be folded in AInvs. *) +lemma kernel_window_sufficient: + "pptrBase + (1 << 22) \ kernelELFBase" + unfolding pptrBase_def canonical_bit_def kernelELFBase_def kernelELFPAddrBase_def pptrTop_def + by (simp add: mask_def Kernel_Config.physBase_def) + +lemma kernel_elf_window_at_least_page: + "kernelELFBase + 2 ^ 12 \ kdevBase" + unfolding kernelELFBase_def kernelELFPAddrBase_def kdevBase_def pptrTop_def + by (simp add: mask_def Kernel_Config.physBase_def) + +(* This doesn't follow from alignment, because we need <, not \ *) +lemma kernelELFBase_no_overflow: + "kernelELFBase < kernelELFBase + 2 ^ 12" + unfolding kernelELFBase_def kernelELFPAddrBase_def pptrTop_def + by (simp add: mask_def Kernel_Config.physBase_def) + +end +end diff --git a/spec/machine/RISCV64/MachineOps.thy b/spec/machine/RISCV64/MachineOps.thy index a48560d6b7..a8d52be684 100644 --- a/spec/machine/RISCV64/MachineOps.thy +++ b/spec/machine/RISCV64/MachineOps.thy @@ -9,7 +9,7 @@ chapter "Machine Operations" theory MachineOps imports "Word_Lib.WordSetup" - "Lib.NonDetMonad" + "Monads.Nondet_Monad" MachineMonad begin diff --git a/spec/machine/RISCV64/Platform.thy b/spec/machine/RISCV64/Platform.thy index 80509de467..1443ed182c 100644 --- a/spec/machine/RISCV64/Platform.thy +++ b/spec/machine/RISCV64/Platform.thy @@ -12,7 +12,7 @@ imports "Word_Lib.WordSetup" "Lib.Defs" Setup_Locale - Kernel_Config_Lemmas + Kernel_Config begin context Arch begin global_naming RISCV64 @@ -105,16 +105,17 @@ definition kdevBase :: machine_word lemma "kdevBase = 0xFFFFFFFFC0000000" (* Sanity check with C *) by (simp add: kdevBase_def) -definition kernelELFBase :: machine_word +definition kernelELFPAddrBase :: machine_word where - "kernelELFBase = - (1 << 31) + 0x4000000" (* 2^64 - 2 GiB + 2^26 *) + "kernelELFPAddrBase = physBase + 0x4000000" -lemma "kernelELFBase = 0xFFFFFFFF84000000" (* Sanity check with C *) - by (simp add: kernelELFBase_def) +definition pptrTop :: machine_word + where + "pptrTop \ - (1 << 31)" -definition kernelELFPAddrBase :: machine_word +definition kernelELFBase :: machine_word where - "kernelELFPAddrBase = 0x84000000" + "kernelELFBase = pptrTop + (kernelELFPAddrBase && mask 30)" (* 2^64 - 2 GiB + ... *) definition kernelELFBaseOffset :: machine_word where diff --git a/spec/machine/X64/Arch_Kernel_Config_Lemmas.thy b/spec/machine/X64/Arch_Kernel_Config_Lemmas.thy new file mode 100644 index 0000000000..28599e6319 --- /dev/null +++ b/spec/machine/X64/Arch_Kernel_Config_Lemmas.thy @@ -0,0 +1,18 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Architecture-specific lemmas constraining Kernel_Config definitions *) + +theory Arch_Kernel_Config_Lemmas +imports + Kernel_Config_Lemmas + Platform +begin + +context Arch begin global_naming X64 + +end +end diff --git a/spec/machine/X64/MachineOps.thy b/spec/machine/X64/MachineOps.thy index 86a3912193..6ec113cc06 100644 --- a/spec/machine/X64/MachineOps.thy +++ b/spec/machine/X64/MachineOps.thy @@ -9,7 +9,7 @@ chapter "Machine Operations" theory MachineOps imports "Word_Lib.WordSetup" - "Lib.NonDetMonad" + "Monads.Nondet_Monad" MachineMonad begin diff --git a/spec/machine/X64/Platform.thy b/spec/machine/X64/Platform.thy index 897344e889..d6b16038d7 100644 --- a/spec/machine/X64/Platform.thy +++ b/spec/machine/X64/Platform.thy @@ -12,7 +12,7 @@ imports "Word_Lib.WordSetup" "Lib.Defs" Setup_Locale - Kernel_Config_Lemmas + Kernel_Config begin context Arch begin global_naming X64 diff --git a/spec/sep-abstract/README.md b/spec/sep-abstract/README.md index d3bb8b126a..b7a224c178 100644 --- a/spec/sep-abstract/README.md +++ b/spec/sep-abstract/README.md @@ -19,9 +19,9 @@ appropriately can be found in the `proof` directory under Building -------- -To build from the `l4v/` directory, run: +To build from the `l4v/` directory for the ARM architecture, run: - ./isabelle/bin/isabelle build -d . -v -b ASepSpec + L4V_ARCH=ARM ./run_tests ASepSpec Important Theories ------------------ diff --git a/spec/take-grant/README.md b/spec/take-grant/README.md index a04040c84d..83d338f80f 100644 --- a/spec/take-grant/README.md +++ b/spec/take-grant/README.md @@ -29,9 +29,10 @@ Overview Building -------- -The corresponding Isabelle session is `TakeGrant`. To build, run in directory `l4v/spec`: +The corresponding Isabelle session is `TakeGrant`. To build for the ARM +architecture, run in directory `l4v/`: - make TakeGrant + L4V_ARCH=ARM ./run_tests TakeGrant Remarks diff --git a/spec/tests.xml b/spec/tests.xml index 0e7433db7e..b520faa91c 100644 --- a/spec/tests.xml +++ b/spec/tests.xml @@ -15,8 +15,10 @@ --> - + + make c-config + make design-spec @@ -24,7 +26,7 @@ make TakeGrant - + make ASpec make ExecSpec make DSpec @@ -37,7 +39,7 @@ - make + make $L4V_ARCH diff --git a/sys-init/CreateIRQCaps_SI.thy b/sys-init/CreateIRQCaps_SI.thy index 6ca2679682..e266e04016 100644 --- a/sys-init/CreateIRQCaps_SI.thy +++ b/sys-init/CreateIRQCaps_SI.thy @@ -455,7 +455,7 @@ lemma create_irq_caps_sep: apply (elim conjE) apply (subst si_objects_extra_caps'_split, assumption+) apply (rule hoare_chain [OF create_irq_caps_sep_helper, where orig_caps1=orig_caps]) - apply (rule pred_andI) + apply (rule pred_conjI) apply sep_solve apply clarsimp apply clarsimp diff --git a/sys-init/CreateObjects_SI.thy b/sys-init/CreateObjects_SI.thy index 07bc9f6dbf..94ebae8fcc 100644 --- a/sys-init/CreateObjects_SI.thy +++ b/sys-init/CreateObjects_SI.thy @@ -38,7 +38,7 @@ lemma seL4_Untyped_Retype_has_children_wp: \\rv s. has_children parent (kernel_state s)\" apply (clarsimp simp: has_children_def is_cdt_parent_def) apply (subst ex_conj_increase)+ - apply (rule hoare_ex_wp)+ + apply (rule hoare_vcg_ex_lift)+ apply (rule hoare_chain) apply (rule seL4_Untyped_Retype_inc_no_preempt [where root_size=si_cnode_size and root_cnode_cap=si_cnode_cap and obj = obj @@ -218,7 +218,8 @@ lemma retype_untyped_wp: (assumption|simp add: unat_of_nat32 |rule offset_slot' [symmetric] guard_equal_si_cnode_cap)+) apply clarsimp apply sep_solve - apply (case_tac r) + apply (rename_tac rv s) + apply (case_tac rv) apply clarsimp apply sep_solve apply clarsimp @@ -1109,7 +1110,7 @@ lemma retype_untyped_loop_inv_helper: apply (rule valid_rv_split) apply (fact retype_untyped_loop_inv_fail) apply (fact retype_untyped_loop_inv_success) - apply (case_tac r, simp_all) + apply (simp split: if_split_asm) done lemma nth_mem_sub: diff --git a/sys-init/DuplicateCaps_SI.thy b/sys-init/DuplicateCaps_SI.thy index 2b23618894..24737e6ba6 100644 --- a/sys-init/DuplicateCaps_SI.thy +++ b/sys-init/DuplicateCaps_SI.thy @@ -256,7 +256,7 @@ lemma distinct_card': (* FIXME, move higher *) lemma distinct_length_filter': "distinct xs \ length [x\xs. P x] = card {x \ set xs. P x}" - by (metis distinct_length_filter set_conj_Int_simp inf_commute) + by (metis distinct_card' distinct_filter set_filter) lemma duplicate_caps_sep_no_rv: "\\si_caps_at t orig_caps spec dev {obj_id. real_object_at obj_id spec} \* diff --git a/sys-init/InitIRQ_SI.thy b/sys-init/InitIRQ_SI.thy index 573e869a3e..0c12209406 100644 --- a/sys-init/InitIRQ_SI.thy +++ b/sys-init/InitIRQ_SI.thy @@ -66,7 +66,7 @@ lemma seL4_IRQHandler_SetEndpoint_irq_initialised_helper_sep: (si_cnode_id, unat seL4_CapInitThreadCNode) \c si_cnode_cap \* (si_cnode_id, unat seL4_CapIRQControl) \c IrqControlCap \* si_asid \* R"]) - apply (intro pred_andI) + apply (intro pred_conjI) apply (clarsimp simp: object_type_is_object default_cap_def) apply (sep_drule sep_map_c_sep_map_s [where cap=NullCap]) apply (rule object_slots_object_default_state_NullCap', (simp add: object_type_has_slots)+) diff --git a/sys-init/InitVSpace_SI.thy b/sys-init/InitVSpace_SI.thy index 4bb2e02cc7..196f0f63dc 100644 --- a/sys-init/InitVSpace_SI.thy +++ b/sys-init/InitVSpace_SI.thy @@ -887,7 +887,7 @@ lemma set_asid_wp: apply (rule valid_si_caps_at_si_cap_at [where obj_id=obj_id], clarsimp+) apply (clarsimp simp: si_cap_at_def sep_conj_assoc sep_conj_exists) apply (subst ex_conj_increase)+ - apply (rule hoare_ex_wp)+ + apply (rule hoare_vcg_ex_lift)+ apply (rename_tac kobj_id) apply (rule hoare_grab_asm)+ apply wpsimp diff --git a/sys-init/Proof_SI.thy b/sys-init/Proof_SI.thy index 7ab91e07fb..f3f19273be 100644 --- a/sys-init/Proof_SI.thy +++ b/sys-init/Proof_SI.thy @@ -228,14 +228,14 @@ lemma sys_init_explicit: apply (insert distinct_card [symmetric, where xs ="[obj\obj_ids . cnode_or_tcb_at obj spec]"], simp) apply (frule distinct_card [symmetric]) apply (clarsimp simp: init_system_def, wp valid_case_prod') - apply (rule hoare_ex_wp, rename_tac t, rule_tac t=t in start_threads_sep [sep_wandise], simp) - apply (rule hoare_ex_wp, rename_tac t, rule_tac t=t and + apply (rule hoare_vcg_ex_lift, rename_tac t, rule_tac t=t in start_threads_sep [sep_wandise], simp) + apply (rule hoare_vcg_ex_lift, rename_tac t, rule_tac t=t and free_cptrs="[fstart .e. fend - 1]" in init_cspace_sep [sep_wandise]) - apply (rule hoare_ex_wp, rename_tac t, rule_tac t=t in init_tcbs_sep [sep_wandise]) - apply (rule hoare_ex_wp, rename_tac t, rule_tac t=t in init_vspace_sep [sep_wandise]) - apply (rule hoare_ex_wp, rename_tac t, rule_tac t=t in init_pd_asids_sep [sep_wandise]) - apply (rule hoare_ex_wp, rename_tac t, rule_tac t=t and dev=False in init_irqs_sep [sep_wandise]) - apply (rule hoare_ex_wp, rename_tac t, rule_tac t=t and dev=False and + apply (rule hoare_vcg_ex_lift, rename_tac t, rule_tac t=t in init_tcbs_sep [sep_wandise]) + apply (rule hoare_vcg_ex_lift, rename_tac t, rule_tac t=t in init_vspace_sep [sep_wandise]) + apply (rule hoare_vcg_ex_lift, rename_tac t, rule_tac t=t in init_pd_asids_sep [sep_wandise]) + apply (rule hoare_vcg_ex_lift, rename_tac t, rule_tac t=t and dev=False in init_irqs_sep [sep_wandise]) + apply (rule hoare_vcg_ex_lift, rename_tac t, rule_tac t=t and dev=False and untyped_cptrs = "[ustart .e. uend - 1]" and free_cptrs_orig = "[fstart .e. fend - 1]" in duplicate_caps_sep [sep_wandise]) apply (rule create_irq_caps_sep [where dev = False,sep_wandise, @@ -254,7 +254,7 @@ lemma sys_init_explicit: apply (subst objects_empty_by_parts, assumption)+ apply (subst objects_empty_objects_initialised_capless)+ apply (clarsimp simp: linorder_not_le) - apply (intro conjI allI impI pred_andI | sep_cancel+)+ + apply (intro conjI allI impI pred_conjI | sep_cancel+)+ apply fastforce apply (clarsimp simp: less_diff_conv) apply (rule list_all_drop, erule (1) le_list_all) diff --git a/sys-init/README.md b/sys-init/README.md index d1a5c4a867..c6d9631f42 100644 --- a/sys-init/README.md +++ b/sys-init/README.md @@ -22,13 +22,13 @@ The system initialiser and the proof are described in the Building -------- -To build from the `l4v/` directory, run: +To build from the `l4v/` directory for the ARM architecture, run: - make SysInit + L4V_ARCH=ARM ./run_tests SysInit To build the example capDL specifications, from the `l4v/` directory, run: - make SysInitExamples + L4V_ARCH=ARM ./run_tests SysInitExamples Important Theories diff --git a/tools/asmrefine/AARCH64/ArchSetup.thy b/tools/asmrefine/AARCH64/ArchSetup.thy new file mode 100644 index 0000000000..e2de86c9c6 --- /dev/null +++ b/tools/asmrefine/AARCH64/ArchSetup.thy @@ -0,0 +1,33 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory ArchSetup +imports + "CLib.CTranslationNICTA" +begin + +abbreviation (input) + "(arch_load_machine_word + (load_word32 :: word32 mem_read) + (load_word64 :: word64 mem_read) + :: machine_word mem_read) + \ load_word64" + +abbreviation (input) + "(arch_store_machine_word + (store_word32 :: word32 mem_upd) + (store_word64 :: word64 mem_upd) + :: machine_word mem_upd) + \ store_word64" + +abbreviation (input) + "(arch_machine_word_constructor + (from_word32 :: word32 \ 'a) + (from_word64 :: word64 \ 'a) + :: machine_word \ 'a) + \ from_word64" + +end diff --git a/tools/asmrefine/ExtraSpecs.thy b/tools/asmrefine/ExtraSpecs.thy index 80ed780285..d871a388b9 100644 --- a/tools/asmrefine/ExtraSpecs.thy +++ b/tools/asmrefine/ExtraSpecs.thy @@ -7,7 +7,7 @@ theory ExtraSpecs imports - "CLib.TypHeapLib" + "CParser.TypHeapLib" begin diff --git a/tools/asmrefine/FieldAccessors.thy b/tools/asmrefine/FieldAccessors.thy index 6cdb76316e..91eebad2fc 100644 --- a/tools/asmrefine/FieldAccessors.thy +++ b/tools/asmrefine/FieldAccessors.thy @@ -5,9 +5,9 @@ *) theory FieldAccessors - -imports "CLib.LemmaBucket_C" - + imports + Lib.Lib + CParser.LemmaBucket_C begin lemma h_val_mono: @@ -53,10 +53,10 @@ lemma heap_update_rotate: heap_update_list_rotate) lemma c_guard_align_of: - "\ align_of TYPE('a :: c_type) + size_of TYPE('a) < 2 ^ word_bits; - align_of TYPE('a) \ 0 \ \ - c_guard (Ptr (of_nat (align_of TYPE('a))) :: 'a ptr)" + "\ align_of TYPE('a :: c_type) + size_of TYPE('a) < 2 ^ word_bits; align_of TYPE('a) \ 0 \ \ + c_guard (Ptr (of_nat (align_of TYPE('a))) :: 'a ptr)" unfolding c_guard_def + supply word_neq_0_conv[simp del] apply (simp add: ptr_aligned_def unat_of_nat c_null_guard_def) apply (clarsimp simp: intvl_def word_bits_conv take_bit_nat_eq_self) apply (drule trans[rotated], rule sym, rule Abs_fnat_hom_add) diff --git a/tools/asmrefine/GraphLang.thy b/tools/asmrefine/GraphLang.thy index 6540e7db0d..12daf6cd78 100644 --- a/tools/asmrefine/GraphLang.thy +++ b/tools/asmrefine/GraphLang.thy @@ -7,7 +7,7 @@ theory GraphLang imports - "CLib.TypHeapLib" + "CParser.TypHeapLib" "Lib.SplitRule" "CommonOps" diff --git a/tools/asmrefine/GraphProof.thy b/tools/asmrefine/GraphProof.thy index 8d34c38288..fc1467f9c2 100644 --- a/tools/asmrefine/GraphProof.thy +++ b/tools/asmrefine/GraphProof.thy @@ -2180,7 +2180,6 @@ lemma stepwise_graph_refine_Basic: apply (frule_tac i=i and tr=tr in exec_trace_step_cases) apply (frule_tac i=j and tr=tr' in exec_trace_step_cases) apply (clarsimp simp: exec_graph_step_def exec_graph_invariant_Cons split: graph_function.split_asm) - apply (simp add: K_def) done lemma stepwise_graph_refine_Cond: @@ -2319,7 +2318,6 @@ lemma stepwise_graph_refine_inline_left: apply (frule_tac i=i and tr=tr in exec_trace_step_cases) apply (frule_tac i=j and tr=tr' in exec_trace_step_cases) apply (clarsimp simp: exec_graph_step_def exec_graph_invariant_Cons split: graph_function.split_asm) - apply (simp add: K_def) done lemma stepwise_graph_refine_end_inline_left: @@ -2340,7 +2338,6 @@ lemma stepwise_graph_refine_end_inline_left: apply (frule_tac i=i and tr=tr in exec_trace_step_cases) apply (frule_tac i=j and tr=tr' in exec_trace_step_cases) apply (clarsimp simp: exec_graph_step_def exec_graph_invariant_Cons split: graph_function.split_asm) - apply (simp add: K_def) done lemma stepwise_graph_refine_inline_right: @@ -2361,7 +2358,6 @@ lemma stepwise_graph_refine_inline_right: apply (frule_tac i=i and tr=tr in exec_trace_step_cases) apply (frule_tac i=j and tr=tr' in exec_trace_step_cases) apply (clarsimp simp: exec_graph_step_def exec_graph_invariant_Cons split: graph_function.split_asm) - apply (simp add: K_def) done lemma stepwise_graph_refine_end_inline_right: @@ -2382,7 +2378,6 @@ lemma stepwise_graph_refine_end_inline_right: apply (frule_tac i=i and tr=tr in exec_trace_step_cases) apply (frule_tac i=j and tr=tr' in exec_trace_step_cases) apply (clarsimp simp: exec_graph_step_def exec_graph_invariant_Cons split: graph_function.split_asm) - apply (simp add: K_def) done lemma stepwise_graph_refine_induct: diff --git a/tools/asmrefine/GraphRefine.thy b/tools/asmrefine/GraphRefine.thy index 3f8d20c523..e0907659fc 100644 --- a/tools/asmrefine/GraphRefine.thy +++ b/tools/asmrefine/GraphRefine.thy @@ -9,7 +9,8 @@ theory GraphRefine imports TailrecPre GraphLangLemmas - "CLib.LemmaBucket_C" + Lib.Lib + "CParser.LemmaBucket_C" ExtraSpecs begin @@ -1125,7 +1126,7 @@ lemma simpl_to_graph_call_next_step: (add_cont (call initf proc ret (\x y. com.Basic (f' x y))) con) n tS P I eqs out_eqs" apply (rule simpl_to_graph_name_simpl_state) - apply (clarsimp simp: call_def block_def graph) + apply (clarsimp simp: call_def block_def block_exn_def graph) apply (rule_tac i=0 and j=3 and P'="{initf sst}" and inp_eqs'="\gst _. eqs gst sst \ sst \ I" in simpl_to_graph_step_general) apply (simp add: init[THEN eq_implD] numeral_3_eq_3 eq_OO) diff --git a/tools/asmrefine/ProveGraphRefine.thy b/tools/asmrefine/ProveGraphRefine.thy index 278f459faa..dd2a61c57a 100644 --- a/tools/asmrefine/ProveGraphRefine.thy +++ b/tools/asmrefine/ProveGraphRefine.thy @@ -355,9 +355,11 @@ fun fold_of_nat_eq_Ifs ctxt tm = let in thm end val fold_of_nat_eq_Ifs_simproc = Simplifier.make_simproc - (Proof_Context.init_global @{theory}) "fold_of_nat_eq_Ifs" - { lhss = [@{term "If (x = 0) y z"}] + (Proof_Context.init_global @{theory}) + { name = "fold_of_nat_eq_Ifs" + , lhss = [@{term "If (x = 0) y z"}] , proc = fn _ => fn ctxt => try (fold_of_nat_eq_Ifs ctxt) o Thm.term_of + , identifier = [] } fun unfold_assertion_data_get_set_conv ctxt tm = let @@ -369,9 +371,11 @@ fun unfold_assertion_data_get_set_conv ctxt tm = let in Simplifier.rewrite (ctxt addsimps defs) (Thm.cterm_of ctxt tm) end val unfold_assertion_data_get_set = Simplifier.make_simproc - (Proof_Context.init_global @{theory}) "unfold_assertion_data_get" - { lhss = [@{term "ghost_assertion_data_get k acc s"}, @{term "ghost_assertion_data_set k v upd"}] + (Proof_Context.init_global @{theory}) + { name = "unfold_assertion_data_get" + , lhss = [@{term "ghost_assertion_data_get k acc s"}, @{term "ghost_assertion_data_set k v upd"}] , proc = fn _ => fn ctxt => SOME o (unfold_assertion_data_get_set_conv ctxt) o Thm.term_of + , identifier = [] } \ diff --git a/tools/asmrefine/SimplExport.thy b/tools/asmrefine/SimplExport.thy index 819acec43e..d75921d8b9 100644 --- a/tools/asmrefine/SimplExport.thy +++ b/tools/asmrefine/SimplExport.thy @@ -889,7 +889,7 @@ fun has_reads_globals (params : export_params) body = exists_Const (fn (s, T) => fun get_reads_calls ctxt params globals name = let val thm = Proof_Context.get_thm ctxt (name ^ "_body_def") - |> simplify (put_simpset HOL_basic_ss ctxt addsimps @{thms call_def block_def}) + |> simplify (put_simpset HOL_basic_ss ctxt addsimps @{thms call_def block_def block_exn_def}) fun calls (Const (@{const_name com.Call}, _) $ proc) = [proc] | calls (f $ x) = calls f @ calls x | calls (Abs (_, _, t)) = calls t @@ -1023,10 +1023,10 @@ fun emit_body ctxt outfile params (Const (@{const_name Seq}, _) $ a $ b) n c e = val proc_info = Hoare.get_data ctxt |> #proc_info val ret_vals = Symtab.lookup proc_info (Long_Name.base_name p) |> the |> #params - |> filter (fn (v, _) => v = HoarePackage.Out) - |> maps (snd #> read_const ctxt (#pfx params) + |> filter (fn (v, _, _) => v = HoarePackage.Out) + |> maps (#2 #> read_const ctxt (#pfx params) #> synthetic_updates ctxt params "rv#space#") - |> map fst + |> map #1 val p_short = unsuffix "_'proc" (Long_Name.base_name p) val no_read = mk_safe is_no_read_globals ctxt params p_short @@ -1069,10 +1069,10 @@ fun emit_body ctxt outfile params (Const (@{const_name Seq}, _) $ a $ b) n c e = fun emit_func_body ctxt outfile eparams name = let val proc_info = Hoare.get_data ctxt |> #proc_info val params = Symtab.lookup proc_info (name ^ "_'proc") - |> the |> #params + |> the |> #params |> map (fn (a, b, _) => (a, b)) |> map (apsnd (read_const ctxt (#pfx eparams) #> synthetic_updates ctxt eparams "" - #> map fst)) + #> map #1)) val no_read = mk_safe is_no_read_globals ctxt eparams name val no_write = mk_safe (K o is_no_write) ctxt eparams name diff --git a/tools/autocorres/AbstractArrays.thy b/tools/autocorres/AbstractArrays.thy index 3f6fea65e1..4d6279f206 100644 --- a/tools/autocorres/AbstractArrays.thy +++ b/tools/autocorres/AbstractArrays.thy @@ -6,7 +6,7 @@ theory AbstractArrays imports - "CLib.TypHeapLib" + "CParser.TypHeapLib" "Word_Lib.WordSetup" begin diff --git a/tools/autocorres/AutoCorres.thy b/tools/autocorres/AutoCorres.thy index 531b921d5c..884987b242 100644 --- a/tools/autocorres/AutoCorres.thy +++ b/tools/autocorres/AutoCorres.thy @@ -21,11 +21,11 @@ imports TypHeapSimple HeapLift WordAbstract - "Lib.OptionMonadWP" - "Lib.Apply_Trace" + "Monads.Reader_Option_VCG" + "Eisbach_Tools.Apply_Trace" AutoCorresSimpset - "Lib.MkTermAntiquote" - "Lib.TermPatternAntiquote" + "ML_Utils.MkTermAntiquote" + "ML_Utils.TermPatternAntiquote" keywords "autocorres" :: thy_decl begin @@ -42,9 +42,9 @@ declare hoare_wp_combs [wp del, wp_comb del] declare hoare_wp_state_combsE [wp del, wp_comb del] lemmas hoare_wp_combsE_autocorres [wp_comb] - = hoare_vcg_precond_impE hoare_vcg_precond_impE_R validE_validE_R + = hoare_weaken_preE hoare_weaken_preE_R validE_validE_R lemmas hoare_wp_combs_autocorres [wp_comb] - = hoare_vcg_precond_imp + = hoare_weaken_pre declare validNF_weaken_pre[wp_comb] declare validE_NF_weaken_pre[wp_comb] bundle nf_no_pre diff --git a/tools/autocorres/CCorresE.thy b/tools/autocorres/CCorresE.thy index 198ffa0a2c..918f7a885e 100644 --- a/tools/autocorres/CCorresE.thy +++ b/tools/autocorres/CCorresE.thy @@ -269,7 +269,7 @@ lemma ccorresE_termination': apply clarsimp apply (erule allE, erule (1) impE) apply (clarsimp split: sum.splits xstate.splits) - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply clarsimp apply (erule allE, erule impE, rule refl) apply clarsimp @@ -313,7 +313,7 @@ lemma ccorresE_While: apply (insert ccorresE_exec_Normal [OF body_refines])[1] apply clarsimp apply atomize - apply (erule allE2, erule (1) impE) + apply (erule allE, erule allE, erule (1) impE) apply (frule snd_whileLoopE_first_step, force simp: cond_match) apply clarsimp apply (erule impE) @@ -329,7 +329,7 @@ lemma ccorresE_While: apply (insert ccorresE_exec_Abrupt [OF body_refines])[1] apply clarsimp apply atomize - apply (erule allE2, erule (1) impE) + apply (erule allE, erule allE, erule (1) impE) apply (frule snd_whileLoopE_first_step, force simp: cond_match) apply clarsimp apply (subst whileLoopE_unroll) @@ -414,13 +414,10 @@ lemma ccorresE_symb_exec_l: apply (clarsimp simp: ccorresE_def validE_def valid_def exs_valid_def) apply (erule allE, erule impE, assumption)+ apply (clarsimp) - apply (erule (1) my_BallE) - apply clarsimp - apply (erule_tac x=aa and y=s in allE2) + apply (drule (1) bspec) apply clarsimp apply (monad_eq simp: Bex_def Ball_def split: xstate.splits) - apply fastforce - done + by fastforce lemma ccorresE_no_fail_term: " \ ccorresE st ct \ G G' A B; no_fail G A; s \ G'; G (st s); ct \ \ \ \ B \ Normal s" diff --git a/tools/autocorres/CorresXF.thy b/tools/autocorres/CorresXF.thy index 0099910b10..7bd6563548 100644 --- a/tools/autocorres/CorresXF.thy +++ b/tools/autocorres/CorresXF.thy @@ -51,12 +51,12 @@ lemma corresXF_simple_corresXF: apply clarsimp apply (erule allE, erule impE, force) apply (clarsimp split: sum.splits cong del: unit.case_cong) - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply clarsimp apply clarsimp apply (erule_tac x=s in allE) apply (clarsimp split: sum.splits cong del: unit.case_cong) - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply clarsimp done @@ -144,23 +144,12 @@ lemma corresXF_exec_normal: "\ corresXF st ret ex P A B; (Inr r', s') \ fst (B s); \ snd (A (st s)); P s \ \ (Inr (ret r' s'), st s') \ fst (A (st s))" using corresXF_simple_exec - apply (clarsimp simp: corresXF_def) - apply (clarsimp split: sum.splits) - apply (erule_tac x=s in allE) - apply clarsimp - apply (erule (1) my_BallE) - apply clarsimp - done + by (fastforce simp: corresXF_def) lemma corresXF_exec_except: "\ corresXF st ret ex P A B; (Inl r', s') \ fst (B s); \ snd (A (st s)); P s \ \ (Inl (ex r' s'), st s') \ fst (A (st s))" - apply (clarsimp simp: corresXF_def) - apply (erule allE, erule impE, force) - apply (clarsimp) - apply (erule (1) my_BallE) - apply (clarsimp split: sum.splits) - done + by (fastforce simp: corresXF_def) lemma corresXF_exec_fail: "\ corresXF st ret ex P A B; snd (B s); P s \ @@ -256,13 +245,11 @@ lemma corresXF_join: apply (subst (asm) corresXF_simple_corresXF[symmetric])+ apply (subst corresXF_simple_corresXF[symmetric]) apply (unfold bindE_def) - apply (erule corresXF_simple_join [where P'="\a b s. (case b of Inl r \ a = Inl (E r s) | Inr r \ a = Inr (V r s) \ P' (theRight a) r s)"]) + apply (erule corresXF_simple_join [where P'="\a b s. (case b of Inl r \ a = Inl (E r s) | Inr r \ a = Inr (V r s) \ P' (projr a) r s)"]) apply (simp add: corresXF_simple_def split: sum.splits unit.splits) - apply (clarsimp simp: NonDetMonad.lift_def - throwError_def return_def split: sum.splits - cong del: unit.case_cong) + apply (clarsimp simp: Nondet_Monad.lift_def throwError_def return_def) apply fastforce - apply (fastforce simp: NonDetMonad.validE_def split: sum.splits cong del: unit.case_cong) + apply (fastforce simp: Nondet_VCG.validE_def split: sum.splits cong del: unit.case_cong) apply simp done @@ -272,13 +259,11 @@ lemma corresXF_except: apply (subst (asm) corresXF_simple_corresXF[symmetric])+ apply (subst corresXF_simple_corresXF[symmetric]) apply (unfold handleE'_def) - apply (erule corresXF_simple_join [where P'="\a b s. (case b of Inr r \ a = Inr (V r s) | Inl r \ a = Inl (E r s) \ P' (theLeft a) r s)"]) + apply (erule corresXF_simple_join [where P'="\a b s. (case b of Inr r \ a = Inr (V r s) | Inl r \ a = Inl (E r s) \ P' (projl a) r s)"]) apply (simp add: corresXF_simple_def split: sum.splits unit.splits) - apply (clarsimp simp: NonDetMonad.lift_def throwError_def - return_def split: sum.splits unit.splits cong del: - unit.case_cong) + apply (clarsimp simp: Nondet_Monad.lift_def throwError_def return_def) apply fastforce - apply (clarsimp simp: NonDetMonad.validE_def split: sum.splits cong del: unit.case_cong) + apply (clarsimp simp: Nondet_VCG.validE_def split: sum.splits cong del: unit.case_cong) apply simp done @@ -315,19 +300,16 @@ lemma corresXF_simple_loop_terminates: apply (clarsimp simp: cond_match) apply atomize apply clarsimp - apply (erule allE2) - apply (erule impE) + apply (erule allE, erule allE, erule impE) apply (erule conjI) apply (clarsimp simp: cond_match) apply clarsimp apply (rule whileLoop_terminates.intros(2)) apply (clarsimp simp: cond_match) apply (clarsimp split: sum.splits) - apply (erule (1) my_BallE) - apply clarsimp - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply clarsimp - apply (erule_tac x=a and y=b in allE2) + apply (drule (1) bspec) apply clarsimp apply (frule use_valid [OF _ pred_inv]) apply (clarsimp simp: no_fail_def) @@ -579,18 +561,18 @@ lemma corresXF_while: apply (rule corresXF_simple_weaken_pre) apply (rule corresXF_simple_while [where P ="\x s. (case x of Inl _ \ True| Inr v \ P v s)" - and P'="\x s. P' (theRight x) s"]) + and P'="\x s. P' (projr x) s"]) apply (insert body_corres)[1] apply (subst (asm) corresXF_simple_corresXF[symmetric]) apply atomize - apply (erule_tac x="theRight x" in allE) - apply (clarsimp simp: corresXF_simple_def NonDetMonad.lift_def - throwError_def return_def split: sum.splits) + apply (erule_tac x="projr x" in allE) + apply (clarsimp simp: corresXF_simple_def Nondet_Monad.lift_def throwError_def return_def + split: sum.splits) apply (clarsimp simp: cond_match split: sum.splits) apply (clarsimp simp: lift_def split: sum.splits) apply (cut_tac pred_inv [unfolded validE_def, simplified lift_def]) apply (erule hoare_chain) - apply (monad_eq simp: NonDetMonad.lift_def whileLoopE_def split: sum.splits) + apply (monad_eq simp: Nondet_Monad.lift_def whileLoopE_def split: sum.splits) apply monad_eq apply (clarsimp simp: pred_imply split: sum.splits) apply (clarsimp simp: init_match pred_imply) @@ -671,7 +653,7 @@ proof - apply (erule impE) apply (erule contrapos_nn) apply (erule new_body_fails_more) - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply clarsimp apply (monad_eq simp: cond_match guardE_def split_def split: sum.splits) apply (drule snd_whileLoopE_first_step) @@ -708,9 +690,9 @@ lemma ccorresE_corresXF_merge: apply clarsimp apply (erule allE, erule impE, fastforce) apply (case_tac t; clarsimp) - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply (clarsimp split: sum.splits) - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply (clarsimp split: sum.splits) apply (drule no_throw_Inr, assumption) apply simp @@ -816,8 +798,7 @@ lemma corresXF_spec: \ corresXF st ret ex P (specE A) (specE A')" apply (monad_eq simp: corresXF_def specE_def spec_def Ball_def split: sum.splits) apply (frule_tac y=s' in surjD) - apply (clarsimp simp: image_def set_eq_UNIV) - apply metis + apply blast done lemma corresXF_throw: diff --git a/tools/autocorres/ExceptionRewrite.thy b/tools/autocorres/ExceptionRewrite.thy index 3ff8f06711..0d9ff7578a 100644 --- a/tools/autocorres/ExceptionRewrite.thy +++ b/tools/autocorres/ExceptionRewrite.thy @@ -58,12 +58,6 @@ lemma alwaysfail_noreturn: "always_fail P A \ no_return P A" lemma alwaysfail_nothrow: "always_fail P A \ no_throw P A" by (clarsimp simp: always_fail_def no_throw_def validE_def valid_def split: sum.splits) -lemma empty_fail_handleE: "\ empty_fail L; \r. empty_fail (R r) \ \ empty_fail (L R)" - apply (clarsimp simp: handleE_def handleE'_def) - apply (erule empty_fail_bind) - apply (clarsimp simp: empty_fail_error_bits split: sum.splits) - done - lemma no_return_bindE: "no_return (\_. True) A \ (A >>=E B) = A" apply (rule ext)+ @@ -75,10 +69,10 @@ lemma no_return_bindE: apply (erule disjE) apply clarsimp apply clarsimp - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply clarsimp apply clarsimp - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply (clarsimp split: sum.splits) apply (clarsimp simp: snd_bindE no_return_def validE_def valid_def) apply (erule_tac x=x in allE) @@ -120,20 +114,13 @@ lemma L1_condition_empty_fail: "\ empty_fail L; empty_fail R \ \ by (clarsimp simp: empty_fail_def L1_defs returnOk_def return_def split: condition_splits) lemma L1_seq_empty_fail: "\ empty_fail L; empty_fail R \ \ empty_fail (L1_seq L R)" - apply (clarsimp simp: L1_defs) - apply (erule (1) empty_fail_bindE) - done + by (clarsimp simp: L1_defs) lemma L1_catch_empty_fail: "\ empty_fail L; empty_fail R \ \ empty_fail (L1_catch L R)" - apply (clarsimp simp: L1_defs) - apply (erule (1) empty_fail_handleE) - done + by (clarsimp simp: L1_defs) lemma L1_while_empty_fail: "empty_fail B \ empty_fail (L1_while C B)" - apply (clarsimp simp: L1_while_def) - apply (rule empty_fail_whileLoopE) - apply simp - done + by (clarsimp simp: L1_while_def) (* * no_throw lemmas. @@ -167,7 +154,7 @@ lemma L1_while_nothrow: "no_throw \ B \ no_throw \ (L1 apply (clarsimp simp: no_throw_def) apply (rule validE_whileLoopE [where I="\_ _. True"]) apply simp - apply (erule validE_weaken, simp+) + apply (erule hoare_chainE, simp+) done lemma L1_catch_nothrow_lhs: "\ no_throw \ L \ \ no_throw \ (L1_catch L R)" @@ -215,7 +202,7 @@ lemma L1_seq_noreturn_lhs: "no_return \ L \ no_return \ no_return \ R \ \ no_return \ (L1_seq L R)" apply (clarsimp simp: L1_defs no_return_def no_throw_def) - apply (rule seqE [where B="\_ _. True"]) + apply (rule bindE_wp_fwd[where Q'="\_ _. True"]) apply (rule hoareE_TrueI) apply simp done @@ -344,13 +331,17 @@ lemma L1_catch_cond_seq: apply (rule L1_catch_single_cond) done +lemma unit_not_Inr: + "(a \ Inr ()) = (a = Inl ())" + by (cases a; clarsimp) + (* This exciting lemma lets up break up a L1_catch into two parts in * the exciting circumstance that "E" never returns. *) lemma L1_catch_seq_cond_noreturn_ex: "\ no_return \ E \ \ (L1_catch (L1_seq (L1_condition c A B) C) E) = (L1_seq (L1_catch (L1_condition c A B) E) (L1_catch C E))" apply (clarsimp simp: L1_defs) - apply (monad_eq simp: no_return_def valid_def validE_def Ball_def - Bex_def unit_Inl_or_Inr split:sum.splits) + apply (monad_eq simp: no_return_def valid_def validE_def Ball_def Bex_def unit_not_Inr + split: sum.splits) apply (safe, (metis Inr_not_Inl)+) done diff --git a/tools/autocorres/ExecConcrete.thy b/tools/autocorres/ExecConcrete.thy index 141de6e91a..5e5e417857 100644 --- a/tools/autocorres/ExecConcrete.thy +++ b/tools/autocorres/ExecConcrete.thy @@ -179,8 +179,8 @@ lemma corresXF_simple_exec_concrete: lemma corresXF_exec_concrete_self: "corresXF st (\r s. r) (\r s. r) P (exec_concrete st M) M" apply (subst corresXF_simple_corresXF [symmetric]) - apply clarsimp - apply (rule corresXF_simple_exec_concrete) + apply (simp add: surjective_sum[where f=id, simplified]) + apply (rule corresXF_simple_exec_concrete)+ done lemma corresXF_exec_concrete [intro?]: @@ -307,7 +307,7 @@ lemma corresXF_simple_exec_abstract: lemma corresXF_exec_abstract_self: "corresXF st (\r s. r) (\r s. r) P M (exec_abstract st M)" apply (subst corresXF_simple_corresXF [symmetric]) - apply clarsimp + apply (simp add: surjective_sum[where f=id, simplified]) apply (rule corresXF_simple_exec_abstract) done diff --git a/tools/autocorres/HeapLift.thy b/tools/autocorres/HeapLift.thy index 1555dc70fd..e45ccf6f0a 100644 --- a/tools/autocorres/HeapLift.thy +++ b/tools/autocorres/HeapLift.thy @@ -11,7 +11,7 @@ imports L2Defs ExecConcrete AbstractArrays - "CLib.LemmaBucket_C" + "CParser.LemmaBucket_C" begin definition "L2Tcorres st A C = corresXF st (\r _. r) (\r _. r) \ A C" @@ -157,9 +157,8 @@ definition "abs_spec st P (A :: ('a \ 'a) set) (C :: ('c \ 'c) set lemma L2Tcorres_spec [heap_abs]: "\ abs_spec st P A C \ \ L2Tcorres st (L2_seq (L2_guard P) (\_. (L2_spec A))) (L2_spec C)" - apply (monad_eq simp: corresXF_def L2Tcorres_def L2_defs image_def set_eq_UNIV - split_def Ball_def state_select_def abs_spec_def split: sum.splits) - done + by (monad_eq simp: corresXF_def L2Tcorres_def L2_defs image_def split_def Ball_def + state_select_def abs_spec_def) lemma abs_spec_constant [heap_abs]: "abs_spec st \ {(a, b). C} {(a, b). C}" @@ -1368,7 +1367,7 @@ lemma heap_abs_expr_c_guard_array [heap_abs]: apply (subst (asm) (2) set_array_addrs) apply force apply clarsimp - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply (drule (1) valid_typ_heap_c_guard) apply simp done diff --git a/tools/autocorres/L1Defs.thy b/tools/autocorres/L1Defs.thy index 76c24ebf5f..d336f54f28 100644 --- a/tools/autocorres/L1Defs.thy +++ b/tools/autocorres/L1Defs.thy @@ -222,7 +222,7 @@ lemma L1corres_call: (L1_call scope_setup (measure_call dest_fn) scope_teardown f) (call scope_setup dest scope_teardown (\_ t. Basic (f t)))" apply (clarsimp simp: L1corres_alt_def) - apply (unfold call_def block_def L1_call_def) + apply (unfold call_def block_def block_exn_def L1_call_def) apply (rule ccorresE_DynCom) apply clarsimp apply (rule ccorresE_get) @@ -256,7 +256,7 @@ lemma L1corres_reccall: (L1_call scope_setup (dest_fn m) scope_teardown f) (call scope_setup dest scope_teardown (\_ t. Basic (f t)))" apply (clarsimp simp: L1corres_alt_def) - apply (unfold call_def block_def L1_call_def) + apply (unfold call_def block_def block_exn_def L1_call_def) apply (rule ccorresE_DynCom) apply clarsimp apply (rule ccorresE_get) @@ -320,7 +320,7 @@ lemma L1corres_prepend_unknown_var': apply (monad_eq simp: Bex_def) apply metis apply (subst L1_init_def) - apply (wp del: hoare_vcg_prop) + apply (wpsimp wp_del: hoare_vcg_prop) done lemma L1_catch_seq_join: "no_throw \ A \ L1_seq A (L1_catch B C) = (L1_catch (L1_seq A B) C)" @@ -334,7 +334,7 @@ lemma no_throw_L1_init [simp]: "no_throw P (L1_init f)" apply (rule no_throw_bindE [where B=\]) apply simp apply simp - apply wp + apply wpsimp done lemma L1corres_prepend_unknown_var: diff --git a/tools/autocorres/L1Peephole.thy b/tools/autocorres/L1Peephole.thy index 3914b66be0..8614d80043 100644 --- a/tools/autocorres/L1Peephole.thy +++ b/tools/autocorres/L1Peephole.thy @@ -58,7 +58,7 @@ lemma L1_fail_propagate_catch [L1opt]: "(L1_seq (L1_catch L R) L1_fail) = (L1_catch (L1_seq L L1_fail) (L1_seq R L1_fail))" unfolding L1_defs apply (clarsimp simp: bindE_def handleE'_def handleE_def bind_assoc) - apply (rule arg_cong [where f="NonDetMonad.bind L"]) + apply (rule arg_cong [where f="Nondet_Monad.bind L"]) apply (fastforce split: sum.splits simp: throwError_def) done diff --git a/tools/autocorres/L1Valid.thy b/tools/autocorres/L1Valid.thy index fdd0b33451..52dca39fcd 100644 --- a/tools/autocorres/L1Valid.thy +++ b/tools/autocorres/L1Valid.thy @@ -29,7 +29,7 @@ lemma L1_spec_wp [wp]: "\ \s. \t. (s, t) \ f \ \s. \x. P () (f (\_. x) s) \ L1_init f \ P \, \ Q \" apply (unfold L1_init_def) - apply (wp select_wp) + apply wp apply fastforce done @@ -83,9 +83,9 @@ lemma L1_seq_lp: "\ \s. E2 () s \ E () s \ \ \P\ L1_seq A B \Q\, \E\" apply (clarsimp simp: L1_seq_def) - apply (rule seqE [rotated]) - apply (erule validE_weaken, simp+)[1] - apply (erule validE_weaken, simp+)[1] + apply (rule bindE_wp) + apply (erule hoare_chainE, simp+)[1] + apply (erule hoare_chainE, simp+)[1] done lemma L1_condition_lp: " @@ -100,8 +100,8 @@ lemma L1_condition_lp: " \P\ L1_condition c A B \Q\, \E\" apply (clarsimp simp: L1_condition_def) apply wp - apply (erule validE_weaken, simp+)[1] - apply (erule validE_weaken, simp+)[1] + apply (erule hoare_chainE, simp+)[1] + apply (erule hoare_chainE, simp+)[1] apply simp done @@ -117,8 +117,8 @@ lemma L1_catch_lp: " apply (clarsimp simp: L1_catch_def) including no_pre apply wp - apply (erule validE_weaken, simp+)[1] - apply (erule validE_weaken, simp+)[1] + apply (erule hoare_chainE, simp+)[1] + apply (erule hoare_chainE, simp+)[1] done lemma L1_init_lp: "\ \s. P s \ \x. Q () (f (\_. x) s) \ \ \P\ L1_init f \Q\, \E\" @@ -134,11 +134,11 @@ lemma L1_while_lp: and inv: " \s. Q' () s \ P' s" and inv': " \s. P' s \ Q' () s" shows "\ P \ L1_while c B \ Q \,\ E \" - apply (rule validE_weaken [where P'=P' and Q'=Q' and E'=E']) + apply (rule hoare_chainE [where P'=P' and Q'=Q' and E'=E']) apply (clarsimp simp: L1_while_def) apply (rule validE_whileLoopE [where I="\r s. P' s"]) apply simp - apply (rule validE_weaken [OF body_lp]) + apply (rule hoare_chainE [OF body_lp]) apply (clarsimp simp: p_impl) apply (clarsimp simp: inv) apply simp @@ -156,7 +156,7 @@ lemma L1_recguard_lp: \P\ L1_recguard v A \Q\, \E\" apply (clarsimp simp: L1_recguard_def) apply wp - apply (erule validE_weaken) + apply (erule hoare_chainE) apply assumption apply simp apply simp diff --git a/tools/autocorres/L2Defs.thy b/tools/autocorres/L2Defs.thy index aa6f5b01f6..d93d4b94eb 100644 --- a/tools/autocorres/L2Defs.thy +++ b/tools/autocorres/L2Defs.thy @@ -174,8 +174,7 @@ lemma L2corres_spec: apply (clarsimp simp: L2corres_def L2_defs L1_spec_def corresXF_def liftE_def spec_alt_def return_def bind_def select_def) apply (clarsimp simp: image_def) - apply (subst (asm) set_eq_UNIV) - apply metis + apply (smt (verit) UNIV_I mem_Collect_eq) done lemma L2corres_seq: @@ -259,7 +258,7 @@ lemma L2corres_while: apply (rule body_corres [unfolded L2corres_def]) apply (clarsimp simp: pred_imply) apply (clarsimp simp: cond_match) - apply (rule validE_weaken [OF inv_holds], (clarsimp simp: pred_imply2)+)[1] + apply (rule hoare_chainE [OF inv_holds], (clarsimp simp: pred_imply2)+)[1] apply (metis pred_extract pred_imply2) apply (metis pred_extract pred_imply2) apply simp @@ -280,7 +279,7 @@ lemma corresXF_E: apply clarsimp apply (erule allE, erule impE, fastforce) apply clarsimp - apply (erule (1) my_BallE) + apply (drule (1) bspec) apply (clarsimp split: sum.splits) done @@ -506,9 +505,7 @@ lemma L2_gets_bind: "\ \s s'. V s = V s' \ \r s. P s) (\_ _. False)" apply (clarsimp simp: monad_equiv_def L2_defs) - apply (wp select_wp) + apply wp apply force done diff --git a/tools/autocorres/L2Peephole.thy b/tools/autocorres/L2Peephole.thy index 68a2eeab24..6fbfb7a060 100644 --- a/tools/autocorres/L2Peephole.thy +++ b/tools/autocorres/L2Peephole.thy @@ -56,8 +56,8 @@ lemma L2_unknown_bind [L2opt]: apply (rule ext) apply (clarsimp simp: L2_seq_def L2_unknown_def) apply (clarsimp simp: liftE_def select_def bindE_def) - apply (clarsimp simp: NonDetMonad.lift_def bind_def) - apply (clarsimp simp: NonDetMonad.bind_def split_def) + apply (clarsimp simp: Nondet_Monad.lift_def bind_def) + apply (clarsimp simp: Nondet_Monad.bind_def split_def) apply (rule prod_eqI) apply (rule set_eqI) apply (clarsimp) @@ -126,7 +126,7 @@ lemma L2_fail_propagate_catch [L2opt]: apply (clarsimp simp: bindE_def) apply (clarsimp simp: handleE'_def handleE_def) apply (clarsimp simp: bind_assoc) - apply (rule arg_cong [where f="NonDetMonad.bind L"]) + apply (rule arg_cong [where f="Nondet_Monad.bind L"]) apply (rule ext)+ apply (clarsimp split: sum.splits) apply (clarsimp simp: throwError_def) diff --git a/tools/autocorres/MonadMono.thy b/tools/autocorres/MonadMono.thy index 25a083eed8..0463d06834 100644 --- a/tools/autocorres/MonadMono.thy +++ b/tools/autocorres/MonadMono.thy @@ -12,7 +12,8 @@ theory MonadMono imports NonDetMonadEx - "Lib.OptionMonadWP" + Monads.Nondet_While_Loop_Rules_Completeness + Monads.Reader_Option_VCG begin (* @@ -151,7 +152,7 @@ lemma monad_mono_step_bindE: apply (unfold bindE_def) apply (rule monad_mono_step_bind) apply simp - apply (monad_eq simp: monad_mono_step_def NonDetMonad.lift_def + apply (monad_eq simp: monad_mono_step_def Nondet_Monad.lift_def split: sum.splits) done @@ -284,15 +285,11 @@ definition "option_monad_mono f \ lemma option_monad_mono_eq: "(\m. f m = gets_the (f' m)) \ monad_mono f = option_monad_mono f'" apply (clarsimp simp: monad_mono_def option_monad_mono_def gets_the_def - gets_def get_def assert_opt_def return_def fail_def bind_def' split: option.splits) - apply (rule iff_allI iff_impI)+ - apply (rule_tac t = "\r. f' x s = Some r \ (\r'. f' y s = Some r') \ (\r'. f' y s = Some r' \ r = r')" - and s = "\r. f' x s = Some r \ f' y s = Some r" in subst) - apply (force intro: iff_allI iff_impI) - apply (rule iffI) - apply (metis (no_types) option.exhaust) - apply force - done + gets_def get_def assert_opt_def return_def fail_def bind_def' + split: option.splits) + apply (intro iff_allI iffI impI allI) + apply (metis option.collapse) + by fastforce lemma measure_ocall_ovalid [wp]: "\ \ m. ovalid P (x m) Q; option_monad_mono x \ \ ovalid P (measure_ocall x) Q" diff --git a/lib/NatBitwise.thy b/tools/autocorres/NatBitwise.thy similarity index 62% rename from lib/NatBitwise.thy rename to tools/autocorres/NatBitwise.thy index c2ac5293ca..7de19e00fa 100644 --- a/lib/NatBitwise.thy +++ b/tools/autocorres/NatBitwise.thy @@ -4,11 +4,11 @@ * SPDX-License-Identifier: BSD-2-Clause *) -(* Instance of bit ops for nat. Used by HaskellLib and AutoCorres. - * Lemmas about this instance should also go here. *) +(* Instance of bit ops for nat. + Lemmas about this instance should also go here. *) theory NatBitwise imports - Lib + Word_Lib.WordSetup begin instantiation nat :: lsb @@ -50,10 +50,20 @@ lemma nat_2p_eq_shiftl: lemmas shiftl_nat_alt_def = shiftl_nat_def +lemma nat_int_mul: + "nat (int a * b) = a * nat b" + by (simp add: nat_mult_distrib) + lemma shiftl_nat_def: "(x::nat) << y = nat (int x << y)" by (simp add: nat_int_mul push_bit_eq_mult shiftl_def) +lemma int_shiftl_less_cancel: + "n \ m \ ((x :: int) << n < y << m) = (x < y << (m - n))" + apply (drule le_Suc_ex) + apply (clarsimp simp: shiftl_int_def power_add) + done + lemma nat_shiftl_less_cancel: "n \ m \ ((x :: nat) << n < y << m) = (x < y << (m - n))" apply (simp add: nat_int_comparison(2) shiftl_nat_def shiftl_def) @@ -69,4 +79,16 @@ lemma nat_shiftl_lt_2p_bits: lemmas nat_eq_test_bit = bit_eq_iff lemmas nat_eq_test_bitI = bit_eq_iff[THEN iffD2, rule_format] +lemma int_2p_eq_shiftl: + "(2::int)^x = 1 << x" + by (simp add: shiftl_int_def) + +lemma int_shiftl_lt_2p_bits: + "0 \ (x::int) \ x < 1 << n \ \i \ n. \ x !! i" + apply (clarsimp simp: shiftl_int_def) + by (metis bit_take_bit_iff not_less take_bit_int_eq_self_iff) +\ \TODO: The converse should be true as well, but seems hard to prove.\ + +lemmas int_eq_test_bitI = bin_eq_iff[THEN iffD2, rule_format] + end \ No newline at end of file diff --git a/tools/autocorres/NonDetMonadEx.thy b/tools/autocorres/NonDetMonadEx.thy index 2d33c76729..3f836354cb 100644 --- a/tools/autocorres/NonDetMonadEx.thy +++ b/tools/autocorres/NonDetMonadEx.thy @@ -11,8 +11,12 @@ theory NonDetMonadEx imports "Word_Lib.WordSetup" - "Lib.NonDetMonadLemmaBucket" - "Lib.OptionMonadND" + "Monads.Nondet_VCG" + "Monads.Nondet_Monad_Equations" + "Monads.Nondet_More_VCG" + "Monads.Nondet_No_Throw" + "Monads.Nondet_No_Fail" + "Monads.Nondet_Reader_Option" begin (* @@ -82,7 +86,7 @@ lemma when_wp_nf [wp]: \ \ if P then Q else R () \ when P f \ R \!" by (monad_eq simp: validNF_def valid_def no_fail_def) -lemmas [wp] = hoare_whenE_wp +lemmas [wp] = whenE_wp lemma gets_the_wp_nf [wp]: "\\s. (f s \ None) \ Q (the (f s)) s\ gets_the f \Q\!" @@ -272,7 +276,7 @@ lemma whileLoop_to_fold: (\r. return (Q r)) i s) = return (if P i \ x then fold (\i r. (Q r)) [unat (P i) ..< unat x] i else i) s" (is "?LHS s = return (?RHS x) s") - apply (subst OptionMonadND.gets_the_return [symmetric]) + apply (subst gets_the_return [symmetric]) apply (subst gets_the_whileLoop) apply (rule gets_the_to_return) apply (subst owhile_to_fold) diff --git a/tools/autocorres/Polish.thy b/tools/autocorres/Polish.thy index bdfc1cd4c6..452e5c7830 100644 --- a/tools/autocorres/Polish.thy +++ b/tools/autocorres/Polish.thy @@ -475,9 +475,8 @@ lemma boringE_bind_K_bind [simp, polish]: done (* Misc *) - -declare pred_and_true_var [L2opt, polish] -declare pred_and_true [L2opt, polish] +declare pred_top_left_neutral [L2opt, polish] +declare pred_top_right_neutral [L2opt, polish] lemmas [polish] = rel_simps eq_numeral_extra diff --git a/tools/autocorres/README.md b/tools/autocorres/README.md index 6ecf1d1b70..e564e53413 100644 --- a/tools/autocorres/README.md +++ b/tools/autocorres/README.md @@ -15,8 +15,8 @@ in [Isabelle/HOL][1]. In particular, it uses Norrish's abstracts the result to produce a result that is (hopefully) more pleasant to reason about. - [1]: https://www.cl.cam.ac.uk/research/hvg/Isabelle/ - [2]: https://trustworthy.systems/software/TS/c-parser/ + [1]: https://isabelle.in.tum.de + [2]: https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md @@ -35,7 +35,7 @@ Contents of this README Installation ------------ -AutoCorres is packaged as a theory for Isabelle2021: +AutoCorres is packaged as a theory for Isabelle2022: https://isabelle.in.tum.de diff --git a/tools/autocorres/ROOT b/tools/autocorres/ROOT index f66d895cd5..58946f4f89 100644 --- a/tools/autocorres/ROOT +++ b/tools/autocorres/ROOT @@ -11,8 +11,7 @@ session AutoCorres = CParser + sessions "HOL-Eisbach" - Lib - CLib + Monads theories "DataStructures" "AutoCorres" diff --git a/tools/autocorres/TypHeapSimple.thy b/tools/autocorres/TypHeapSimple.thy index 3fcc98a9f1..d5e303c10a 100644 --- a/tools/autocorres/TypHeapSimple.thy +++ b/tools/autocorres/TypHeapSimple.thy @@ -15,7 +15,7 @@ theory TypHeapSimple imports - "CLib.TypHeapLib" + "CParser.TypHeapLib" begin (* @@ -512,7 +512,7 @@ lemma simple_lift_field_update': and xf_xfu: "fg_cons xf xfu" and cl: "simple_lift hp ptr = Some z" shows "(simple_lift (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - simple_lift hp(ptr \ xfu val z)" + (simple_lift hp)(ptr \ xfu val z)" (is "?LHS = ?RHS") proof (rule ext) fix p @@ -581,7 +581,7 @@ lemma simple_lift_field_update: and xf_xfu: "fg_cons xf (xfu o (\x _. x))" and cl: "simple_lift hp ptr = Some z" shows "(simple_lift (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - simple_lift hp(ptr \ xfu (\_. val) z)" + (simple_lift hp)(ptr \ xfu (\_. val) z)" (is "?LHS = ?RHS") apply (insert fl [unfolded field_ti_def]) apply (clarsimp split: option.splits) diff --git a/tools/autocorres/TypeStrengthen.thy b/tools/autocorres/TypeStrengthen.thy index 5f67a997f5..5106ca596c 100644 --- a/tools/autocorres/TypeStrengthen.thy +++ b/tools/autocorres/TypeStrengthen.thy @@ -14,7 +14,6 @@ theory TypeStrengthen imports L2Defs - "Lib.OptionMonadND" ExecConcrete begin @@ -101,6 +100,9 @@ lemma TS_return_L2_condition: "L2_condition (\_. c) (TS_return A) (TS_return B) = TS_return (if c then A else B)" by (monad_eq simp: L2_defs TS_return_def) +lemma split_distrib: "case_prod (\a b. T (f a b)) = (\x. T (case_prod (\a b. f a b) x))" + by (clarsimp simp: split_def) + lemmas [ts_rule pure] = TS_return_L2_gets TS_return_L2_seq diff --git a/tools/autocorres/WordAbstract.thy b/tools/autocorres/WordAbstract.thy index 5c58e4f84c..aa1eabe3d2 100644 --- a/tools/autocorres/WordAbstract.thy +++ b/tools/autocorres/WordAbstract.thy @@ -8,7 +8,7 @@ theory WordAbstract imports L2Defs ExecConcrete - Lib.NatBitwise + NatBitwise begin definition "WORD_MAX x \ ((2 ^ (len_of x - 1) - 1) :: int)" @@ -249,8 +249,7 @@ lemma sint_bitwise_abstract_binops: lemma abstract_val_signed_bitNOT: "abstract_val P x sint (x' :: 'a::len signed word) \ abstract_val P (NOT x) sint (NOT x')" - by (fastforce intro: int_eq_test_bitI - simp: nth_sint bin_nth_ops word_nth_neq test_bit_def'[symmetric] test_bit_wi[where 'a="'a signed"]) + by (fastforce intro: int_eq_test_bitI simp: min_less_iff_disj) lemma abstract_val_signed_unary_minus: "\ abstract_val P r sint r' \ \ @@ -322,8 +321,9 @@ lemma sint_shiftl_nonneg: apply (drule (1) int_shiftl_lt_2p_bits[rotated]) apply (clarsimp simp: min_def split: if_split_asm) apply (rule conjI; clarsimp) - apply (smt (z3) decr_length_less_iff diff_Suc_Suc diff_is_0_eq diff_le_mono diff_le_self - diff_zero le_def less_handy_casesE nat_less_le order_refl) + apply (smt (verit) One_nat_def bot_nat_0.extremum_uniqueI diff_Suc_eq_diff_pred + le_diff_iff le_diff_iff' len_gt_0 len_of_finite_1_def less_eq_Suc_le + nat_le_Suc_less_imp) using less_eq_decr_length_iff nat_le_linear by blast lemma abstract_val_signed_shiftl_signed: diff --git a/tools/autocorres/autocorres.ML b/tools/autocorres/autocorres.ML index 1f6bb9a4c2..6b768d7e21 100644 --- a/tools/autocorres/autocorres.ML +++ b/tools/autocorres/autocorres.ML @@ -114,7 +114,7 @@ fun named_option parser name elem_desc= expect "\"=\"" elem_desc parser) (* Generic parser for "NAME = STRING ..." *) -val named_opt = named_option (Scan.repeat Parse.text) +val named_opt = named_option (Scan.repeat Parse.embedded) (* Generic parser for "NAME = " *) val nat_opt = named_option Parse.nat @@ -145,7 +145,7 @@ val scope_depth_parser = (fn value => none_to_some (#scope_depth) value "autocorres: scope option specified multiple times") val c_locale_parser = - named_option Parse.text "c_locale" "locale name" >> + named_option Parse.embedded "c_locale" "locale name" >> (fn funcs => none_to_some (#c_locale) funcs "autocorres: c_locale option specified multiple times") @@ -156,8 +156,8 @@ val no_c_termination_parser = val ts_force_parser = ((Parse.reserved "ts_force" |-- expect "\"ts_force\"" "rule name" - (Parse.text :-- (fn name => expect name "\"=\"" (Parse.$$$ "="))) -- - Scan.repeat Parse.text)) >> + (Parse.embedded :-- (fn name => expect name "\"=\"" (Parse.$$$ "="))) -- + Scan.repeat Parse.embedded)) >> (fn ((rule, _), funcs) => fn opt => let val _ = @@ -213,22 +213,22 @@ val keep_going_parser = (fn _ => none_to_some (#keep_going) true "autocorres: keep_going option specified multiple times") val lifted_globals_field_prefix_parser = - named_option Parse.text "lifted_globals_field_prefix" "string" >> + named_option Parse.embedded "lifted_globals_field_prefix" "string" >> (fn funcs => none_to_some (#lifted_globals_field_prefix) funcs "autocorres: lifted_globals_field_prefix option specified multiple times") val lifted_globals_field_suffix_parser = - named_option Parse.text "lifted_globals_field_suffix" "string" >> + named_option Parse.embedded "lifted_globals_field_suffix" "string" >> (fn funcs => none_to_some (#lifted_globals_field_suffix) funcs "autocorres: lifted_globals_field_suffix option specified multiple times") val function_name_prefix_parser = - named_option Parse.text "function_name_prefix" "string" >> + named_option Parse.embedded "function_name_prefix" "string" >> (fn funcs => none_to_some (#function_name_prefix) funcs "autocorres: function_name_prefix option specified multiple times") val function_name_suffix_parser = - named_option Parse.text "function_name_suffix" "string" >> + named_option Parse.embedded "function_name_suffix" "string" >> (fn funcs => none_to_some (#function_name_suffix) funcs "autocorres: function_name_suffix option specified multiple times") @@ -291,7 +291,7 @@ let lifted_globals_field_suffix_parser || function_name_prefix_parser || function_name_suffix_parser) - |> !! (fn xs => K ("autocorres: unknown option " ^ quote (Parse.text (fst xs) |> #1))) + |> !! (fn xs => K ("autocorres: unknown option " ^ quote (Parse.embedded (fst xs) |> #1))) val options_parser = Parse.list option_parser >> (fn opt_fns => fold I opt_fns) in @@ -299,7 +299,7 @@ in (Scan.optional (Parse.$$$ "[" |-- options_parser --| Parse.$$$ "]") I >> (fn f => f (default_opts ()))) -- (* Filename *) - Parse.text + Parse.embedded end diff --git a/tools/autocorres/autocorres_trace.ML b/tools/autocorres/autocorres_trace.ML index 8522032283..ce2fd72279 100644 --- a/tools/autocorres/autocorres_trace.ML +++ b/tools/autocorres/autocorres_trace.ML @@ -171,25 +171,28 @@ fun my_unify_fact_tac ctxt subproof n state = case my_typ_match stateterm proofterm of NONE => Seq.empty | SOME typinsts => + \<^try>\ (case Thm.instantiate (TVars.make (map (fn (v, t) => (v, ctyp_of' t)) (Utils.nubBy fst typinsts)), Vars.empty) state of state' => let val stateterm' = nth (Thm.prems_of state') (n-1) in case my_match stateterm' proofterm of NONE => Seq.empty | SOME substs => + \<^try>\ let val substs' = Utils.nubBy #1 substs |> map (fn (var, args, t') => (var, my_lambda args t')) |> map (fn (v, t) => (v, cterm_of' t)) in - case Thm.instantiate (TVars.empty, Vars.make substs') state of state' => - (case Proof_Context.fact_tac ctxt [Variable.gen_all ctxt subproof] 1 state' |> Seq.pull of - NONE => Seq.empty - | r => Seq.make (fn () => r)) - handle _ => Seq.empty + \<^try>\ + case Thm.instantiate (TVars.empty, Vars.make substs') state of state' => + (case Proof_Context.fact_tac ctxt [Variable.gen_all ctxt subproof] 1 state' |> Seq.pull of + NONE => Seq.empty + | r => Seq.make (fn () => r)) + catch _ => Seq.empty\ end - handle _ => Seq.empty + catch _ => Seq.empty\ end) - handle _ => Seq.empty + catch _ => Seq.empty\ end end diff --git a/tools/autocorres/doc/quickstart/Chapter1_MinMax.thy b/tools/autocorres/doc/quickstart/Chapter1_MinMax.thy index 99aea61e16..25ad830ce8 100644 --- a/tools/autocorres/doc/quickstart/Chapter1_MinMax.thy +++ b/tools/autocorres/doc/quickstart/Chapter1_MinMax.thy @@ -69,8 +69,8 @@ text \ As mentioned earlier, AutoCorres does not handle C code directly. The first step is to apply the - C-Parser\footnote{\url{https://trustworthy.systems/software/TS/c-parser}} to - obtain a SIMPL translation. We do this using the \texttt{install-C-file} + C-Parser\footnote{\url{https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md}} + to obtain a SIMPL translation. We do this using the \texttt{install-C-file} command in Isabelle, as shown. \ diff --git a/tools/autocorres/doc/quickstart/ROOT b/tools/autocorres/doc/quickstart/ROOT index 12684d828b..87a190f211 100644 --- a/tools/autocorres/doc/quickstart/ROOT +++ b/tools/autocorres/doc/quickstart/ROOT @@ -5,7 +5,7 @@ *) session "AutoCorresQuickstart" = "AutoCorres" + - options [document = pdf, document_output = output, show_question_marks = false] + options [document = pdf, document_output = "output", show_question_marks = false] theories Chapter1_MinMax Chapter2_HoareHeap diff --git a/tools/autocorres/doc/quickstart/document/root.bib b/tools/autocorres/doc/quickstart/document/root.bib index 78fdd1bbec..4457e72d2d 100644 --- a/tools/autocorres/doc/quickstart/document/root.bib +++ b/tools/autocorres/doc/quickstart/document/root.bib @@ -25,7 +25,7 @@ @misc{CParser_download title = {{C-to-Isabelle} Parser, version 1.13.0}, year = 2013, month = may, - url = {https://trustworthy.systems/software/TS/c-parser/}, + url = {https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md}, note = {Accessed May 2016} } @@ -44,7 +44,7 @@ @article{Simpl-AFP journal = {Archive of Formal Proofs}, month = feb, year = 2008, - url = {https://www.isa-afp.org/entries/Simpl.shtml}, + url = {https://www.isa-afp.org/entries/Simpl.html}, note = {Formal proof development}, ISSN = {2150-914X}, } @@ -55,7 +55,7 @@ @article{Separation_Algebra-AFP journal = {Archive of Formal Proofs}, month = may, year = 2012, - url = {https://www.isa-afp.org/entries/Separation_Algebra.shtml}, + url = {https://www.isa-afp.org/entries/Separation_Algebra.html}, note = {Formal proof development}, ISSN = {2150-914x}, } diff --git a/tools/autocorres/local_var_extract.ML b/tools/autocorres/local_var_extract.ML index 9df3cbaf30..7d3812766b 100644 --- a/tools/autocorres/local_var_extract.ML +++ b/tools/autocorres/local_var_extract.ML @@ -32,7 +32,7 @@ val the' = Utils.the' (* Simpset we use for automated tactics. *) fun setup_l2_ss ctxt = put_simpset AUTOCORRES_SIMPSET ctxt - addsimps [@{thm pred_conj_def}] + addsimps @{thms pred_conj_def} (* Convert a set of variable names into an Isabelle list of strings. *) fun var_set_to_isa_list prog_info s = @@ -748,7 +748,7 @@ fun inject_return_vals ctxt prog_info name_map needed_returns allow_excess throw in mk_corresXF_thm' ctxt prog_info name_map needed_returns throw_vars (vars_read UNION preserved_vals) generated_term l1_term - (@{thm L2corres_inject_return} OF [thm, @{thm validE_weaken} OF [preserve_proof]]) + (@{thm L2corres_inject_return} OF [thm, @{thm hoare_chainE} OF [preserve_proof]]) end in (vars_read UNION preserved_vals, needed_returns, generated_term, generated_thm) @@ -950,7 +950,7 @@ in in mkthm block_reads rhs_rets generated_term (@{thm L2corres_seq} OF [lhs_thm, rhs_thm, - @{thm validE_weaken} OF [preserve_proof]]) + @{thm hoare_chainE} OF [preserve_proof]]) end in inject (block_reads, rhs_rets, generated_term, thm) @@ -986,7 +986,7 @@ in val preserve_proof = mk_multivar_preservation_proof ctxt prog_info name_map lhs_term needed_preserves in mkthm block_reads needed_vars generated_term - (@{thm L2corres_catch} OF [lhs_thm, rhs_thm, @{thm validE_weaken} OF [preserve_proof]]) + (@{thm L2corres_catch} OF [lhs_thm, rhs_thm, @{thm hoare_chainE} OF [preserve_proof]]) end in inject (block_reads, needed_vars, generated_term, thm) @@ -1071,7 +1071,7 @@ in ] @{thm L2corres_while} in mkthm (body_reads UNION read_vars UNION loop_iterators) loop_iterators generated_term - (base_thm OF [body_thm, @{thm validE_weaken} OF [preserve_proof]]) + (base_thm OF [body_thm, @{thm hoare_chainE} OF [preserve_proof]]) end in inject (body_reads UNION read_vars UNION loop_iterators, loop_iterators, generated_term, thm) @@ -1354,7 +1354,7 @@ fun get_l2corres_thm ctxt prog_info l1_infos l1_call_info do_opt trace_opt fn_na |> apply_tac "solve main goal" (resolve_tac ctxt [thm] 1) |> apply_tac "solve guard_imp" (REPEAT (FIRST [ resolve_tac ctxt @{thms HOL.refl} 1, - resolve_tac ctxt @{thms pred_andI} 1, + resolve_tac ctxt @{thms pred_conjI} 1, resolve_tac ctxt @{thms conjI} 1, CHANGED (asm_full_simp_tac (setup_l2_ss ctxt) 1)])) |> Goal.finish ctxt diff --git a/tools/autocorres/tests/examples/FactorialTest.thy b/tools/autocorres/tests/examples/FactorialTest.thy index f571d43684..0cad8e4130 100644 --- a/tools/autocorres/tests/examples/FactorialTest.thy +++ b/tools/autocorres/tests/examples/FactorialTest.thy @@ -10,7 +10,7 @@ Termination for recursive functions. theory FactorialTest imports "AutoCorres.AutoCorres" - "Lib.OptionMonadWP" + "Monads.Reader_Option_VCG" begin external_file "factorial.c" @@ -47,7 +47,7 @@ proof (induct n arbitrary: m rule: less_induct) show "no_ofail (\_. unat x < m) (factorial' m x)" apply (subst factorial'.simps) - apply (wp induct_asm ovalid_post_triv) + apply (wpsimp wp: induct_asm ovalid_post_triv) apply unat_arith done qed diff --git a/tools/autocorres/tests/examples/Memcpy.thy b/tools/autocorres/tests/examples/Memcpy.thy index 894d2fc92b..720c482e11 100644 --- a/tools/autocorres/tests/examples/Memcpy.thy +++ b/tools/autocorres/tests/examples/Memcpy.thy @@ -182,8 +182,7 @@ lemma memcpy_word: apply clarsimp apply (clarsimp simp: h_val_def)[1] apply (rule arg_cong[where f=from_bytes]) - apply (subst numeral_eqs(3))+ - apply simp + apply (simp add: numeral_nat) apply (rule_tac x=0 in allE, assumption, erule impE, unat_arith) apply (rule_tac x=1 in allE, assumption, erule impE, unat_arith) apply (rule_tac x=2 in allE, assumption, erule impE, unat_arith) @@ -383,7 +382,8 @@ lemma update_bytes_postpend: "length bs = x + 1 \ apply (clarsimp simp:ptr_add_def) apply (subst heap_update_list_concat_fold_hrs_mem) apply clarsimp+ - by (metis append_eq_conv_conj append_self_conv hd_drop_conv_nth2 lessI take_hd_drop) + apply (metis append.right_neutral append_eq_conv_conj lessI take_Suc_conv_app_nth) + done lemma h_val_not_id_general: fixes y :: "'a::mem_type ptr" @@ -600,8 +600,7 @@ lemma memcpy_wp': apply clarsimp apply (rule update_bytes_eq) apply (subgoal_tac "min (unat i) (unat (i + 1)) = unat i") - apply clarsimp - apply clarsimp + apply presburger apply unat_arith apply (clarsimp simp:ptr_add_def) apply clarsimp diff --git a/tools/autocorres/tests/proof-tests/word_abs_options.thy b/tools/autocorres/tests/proof-tests/word_abs_options.thy index cc02f7b90c..e6c326608e 100644 --- a/tools/autocorres/tests/proof-tests/word_abs_options.thy +++ b/tools/autocorres/tests/proof-tests/word_abs_options.thy @@ -18,22 +18,22 @@ context word_abs_options begin lemma "\ \ \ isum1' (a :: sword32) (b :: sword32) \ \r _. r = a + b \" unfolding isum1'_def - apply (wp refl) + apply (wp refl impI) done lemma "\ \ \ isum2' (a :: int) (b :: int) \ \r _. r = a + b \" unfolding isum2'_def - apply (wp refl) + apply (wp refl impI) done lemma "\ \ \ usum1' (a :: word32) (b :: word32) \ \r _. r = a + b \" unfolding usum1'_def - apply (wp refl) + apply (wp refl impI) done lemma "\ \ \ usum2' (a :: nat) (b :: nat) \ \r _. r = a + b \" unfolding usum2'_def - apply (wp refl) + apply (wp refl impI) done end diff --git a/tools/autocorres/tools/release.py b/tools/autocorres/tools/release.py index b323c6acdf..7acfbcb06f 100755 --- a/tools/autocorres/tools/release.py +++ b/tools/autocorres/tools/release.py @@ -125,7 +125,7 @@ def copy_manifest(output_dir, manifest_file, manifest_base, target): parser.add_argument('-r', '--repository', metavar='REPO', type=str, help='Path to the L4.verified repository base.', default=None) parser.add_argument('--archs', metavar='ARCH,...', - type=str, default='ARM,ARM_HYP,X64,RISCV64', + type=str, default='ARM,ARM_HYP,X64,RISCV64,AARCH64', help='L4V_ARCHs to include (comma-separated)') args = parser.parse_args() @@ -200,9 +200,10 @@ def copy_manifest(output_dir, manifest_file, manifest_base, target): shutil.copyfile(f_src, f_dest) # Copy various other files. - shutil.copyfile( - os.path.join(args.repository, 'lib', 'Word_Lib', 'ROOT'), - os.path.join(target_dir, 'lib', 'Word_Lib', 'ROOT')) + for session in ['Basics', 'Eisbach_Tools', 'ML_Utils', 'Monads', 'Word_Lib']: + shutil.copyfile( + os.path.join(args.repository, 'lib', session, 'ROOT'), + os.path.join(target_dir, 'lib', session, 'ROOT')) shutil.copyfile( os.path.join(release_files_dir, "ROOT.release"), os.path.join(target_dir, "autocorres", "ROOT")) @@ -222,37 +223,11 @@ def copy_manifest(output_dir, manifest_file, manifest_base, target): os.path.join(args.repository, "LICENSES"), os.path.join(target_dir, "LICENSES")) - # Extract dependent sessions in lib. FIXME: rather kludgy - print('Extracting sessions from lib/ROOT...') - # Set up ROOT for the tests dir, for the thydeps tool subprocess.check_call( ['make', 'tests/ROOT'], cwd=os.path.join(args.repository, 'tools', 'autocorres')) - lib_sessions = ['Lib', 'CLib'] - lib_ROOT = os.path.join(args.repository, 'lib', 'ROOT') - with open(lib_ROOT, 'r') as lib_root: - data = lib_root.read() - # Split out session specs. Assume ROOT file has standard indentation. - chunks = data.split('\nsession ') - # This will have the license header, etc. - header = chunks[0] - # Remaining sections. Try to remove comments - sessions = ['session ' + re.sub(r'\(\*.*?\*\)', '', x, flags=re.DOTALL) - for x in chunks[1:]] - - new_root = header - wanted_specs = {} - for wanted in lib_sessions: - spec = [spec for spec in sessions if spec.startswith('session %s ' % wanted)] - if len(spec) != 1: - print('error: %s session not found in %r' % (wanted, lib_ROOT)) - new_root += '\n' + spec[0] - - with open(os.path.join(target_dir, 'lib', 'ROOT'), 'w') as root_f: - root_f.write(new_root) - # For the examples, generate ".thy" files where appropriate, and also # generate an "All.thy" which contains all the examples. def gen_thy_file(c_file): diff --git a/tools/autocorres/tools/release_files/AUTOCORRES_FILES b/tools/autocorres/tools/release_files/AUTOCORRES_FILES index 1b492950bd..d9716dc5a7 100644 --- a/tools/autocorres/tools/release_files/AUTOCORRES_FILES +++ b/tools/autocorres/tools/release_files/AUTOCORRES_FILES @@ -33,6 +33,7 @@ exception_rewrite.ML: Proof frameworks and code to rewrite monadic specifications to avoid using exceptions where possible. +NatBitwise.thy: WordAbstract.thy: word_abstract.ML: Word abstraction framework and theorems. diff --git a/tools/autocorres/tools/release_files/CONTRIBUTORS b/tools/autocorres/tools/release_files/CONTRIBUTORS index 9b66767c25..9cb732548b 100644 --- a/tools/autocorres/tools/release_files/CONTRIBUTORS +++ b/tools/autocorres/tools/release_files/CONTRIBUTORS @@ -1,19 +1,21 @@ -Core Development Team ---------------------- +Core Developers +--------------- David Greenaway (inactive) - Japheth Lim + Japheth Lim (inactive) + + Gerwin Klein (maintenance) Contributions ------------- - Lars Noschinski + Lars Noschinski "owhile" definitions and related rules, as well as many other contributions to the proof libraries. - Matthew Brecknell + Matthew Brecknell (inactive) Maintenance; integration with seL4's C refinement framework. diff --git a/tools/autocorres/tools/release_files/ChangeLog b/tools/autocorres/tools/release_files/ChangeLog index 788709b726..e95f216e9b 100644 --- a/tools/autocorres/tools/release_files/ChangeLog +++ b/tools/autocorres/tools/release_files/ChangeLog @@ -1,38 +1,55 @@ +AutoCorres Change Log +===================== + +AutoCorres 1.10 (3 Nov 2023) +---------------------------- + + * Isabelle2023 edition of both AutoCorres and the C parser. + + * Restructured and cleaned up monad libraries. Removed dependencies + on unrelated l4v libraries. + +AutoCorres 1.9 (31 October 2022) +-------------------------------- + + * Isabelle2021-1 edition of both AutoCorres and the C parser. + AutoCorres 1.8 (31 October 2021) --------------- +-------------------------------- - * Isabelle2021 edition of both AutoCorres and the C parser. + * Isabelle2021 edition of both AutoCorres and the C parser. AutoCorres 1.7 (2 November 2020) --------------- +-------------------------------- - * Isabelle2020 edition of both AutoCorres and the C parser. + * Isabelle2020 edition of both AutoCorres and the C parser. - * Slight updates to wp: use "wp (once)" instead of "wp_once" + * Slight updates to wp: use "wp (once)" instead of "wp_once" AutoCorres 1.6.1 (3 October 2019) ----------------- - * Correct license for a C parser file. No code changes. +--------------------------------- + + * Correct license for a C parser file. No code changes. AutoCorres 1.6 (5 September 2019) --------------- +---------------------------------- - * Isabelle2019 edition of both AutoCorres and the C parser. + * Isabelle2019 edition of both AutoCorres and the C parser. - * Word abstraction has been extended to C bitwise operators. + * Word abstraction has been extended to C bitwise operators. AutoCorres 1.5 (10 September 2018) --------------- +---------------------------------- - * Isabelle2018 edition of both AutoCorres and the C parser. + * Isabelle2018 edition of both AutoCorres and the C parser. AutoCorres 1.4 (2 March 2018) --------------- +----------------------------- * Isabelle2017 edition of both AutoCorres and the C parser. AutoCorres 1.3 (3 April 2017) --------------- +----------------------------- * Isabelle2016-1 edition of both AutoCorres and the C parser. @@ -41,7 +58,7 @@ AutoCorres 1.3 (3 April 2017) must be selected using L4V_ARCH environment variable. AutoCorres 1.2 (31 March 2016) --------------- +------------------------------ * Isabelle2016 edition of both AutoCorres and the C parser. @@ -54,7 +71,7 @@ AutoCorres 1.2 (31 March 2016) * Several minor bug fixes and improvements. AutoCorres 1.1 (9 Oct 2015) --------------- +--------------------------- * Isabelle2015 edition of both AutoCorres and the C parser. @@ -72,7 +89,7 @@ AutoCorres 1.1 (9 Oct 2015) AutoCorres 1.0 (16 Dec 2014) --------------- +---------------------------- * New option “no_opt” to turn off simplifier stages. (Experimental) diff --git a/tools/autocorres/tools/release_files/README b/tools/autocorres/tools/release_files/README index 2a5430221f..45a68444ee 100644 --- a/tools/autocorres/tools/release_files/README +++ b/tools/autocorres/tools/release_files/README @@ -7,8 +7,8 @@ in [Isabelle/HOL][1]. In particular, it uses Norrish's abstracts the result to produce a result that is (hopefully) more pleasant to reason about. - [1]: https://www.cl.cam.ac.uk/research/hvg/Isabelle/ - [2]: https://trustworthy.systems/software/TS/c-parser/ + [1]: https://isabelle.in.tum.de/ + [2]: https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md @@ -28,11 +28,11 @@ Contents of this README Installation ------------ -AutoCorres is packaged as a theory for Isabelle2021: +AutoCorres is packaged as a theory for Isabelle2023: https://isabelle.in.tum.de -AutoCorres currently supports three platforms: ARM, X64, and RISCV64. +AutoCorres currently supports four platforms: ARM, AARCH64, X64, and RISCV64. The platform determines the sizes of C integral and pointer types. For ARM, the sizes are: @@ -40,6 +40,11 @@ For ARM, the sizes are: - 32 bits: pointers, long, int - 16 bits: short +For AARCH64: + - 64 bits: pointers, long long, long + - 32 bits: int + - 16 bits: short + For X64: - 64 bits: pointers, long long, long - 32 bits: int @@ -102,12 +107,12 @@ This package contains: * Michael Norrish's C parser, used to translate C code into Isabelle: - https://trustworthy.systems/software/TS/c-parser/ + https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md * Norbert Schirmer's Simpl language and associated VCG tool. The C parser translates C into Schirmer's Simpl language: - https://www.isa-afp.org/entries/Simpl.shtml + https://www.isa-afp.org/entries/Simpl.html * Code from SML/NJ, including an implementation of binary sets (Binaryset.ML) and the mllex and mlyacc tools @@ -120,9 +125,9 @@ This package contains: * Compatibility word libraries and associated lemmas, for assisting with reasoning about words (such as 32-bit words). - * Libraries from Data61 for defining and reasoning about monads, - including definitions for nondeterministic state monads and option - monads, along with a large proof library relating to these + * Libraries from the l4v repository for defining and reasoning about + monads, including definitions for nondeterministic state monads and + option monads, along with a large proof library relating to these definitions. * The "wp" weakest precondition tool, which can be used to diff --git a/tools/autocorres/tools/release_files/ROOT.release b/tools/autocorres/tools/release_files/ROOT.release index cbce7e7a8a..f6eb34b726 100644 --- a/tools/autocorres/tools/release_files/ROOT.release +++ b/tools/autocorres/tools/release_files/ROOT.release @@ -8,15 +8,14 @@ session AutoCorres = CParser + sessions "HOL-Eisbach" - Lib - CLib + Monads theories + "DataStructures" "AutoCorres" session AutoCorresTest in "tests" = AutoCorres + sessions "HOL-Number_Theory" - AutoCorres directories "parse-tests" "proof-tests" diff --git a/tools/autocorres/tools/release_files/ROOTS.base_dir b/tools/autocorres/tools/release_files/ROOTS.base_dir index 0d830b5b99..caf4ca8376 100644 --- a/tools/autocorres/tools/release_files/ROOTS.base_dir +++ b/tools/autocorres/tools/release_files/ROOTS.base_dir @@ -1,4 +1,7 @@ -lib +lib/Basics +lib/Eisbach_Tools +lib/ML_Utils +lib/Monads lib/Word_Lib c-parser autocorres diff --git a/tools/autocorres/utils.ML b/tools/autocorres/utils.ML index 4190c39c6b..66a28427b5 100644 --- a/tools/autocorres/utils.ML +++ b/tools/autocorres/utils.ML @@ -459,7 +459,7 @@ fun term_fold_map_top f x = *) fun simp_map f = Context.map_proof ( - Local_Theory.declaration {syntax = false, pervasive = false} ( + Local_Theory.declaration {syntax = false, pervasive = false, pos = @{here}} ( K (Simplifier.map_ss f))) |> Context.proof_map @@ -678,7 +678,7 @@ fun isa_str_list_to_ml t = fun chain_preds stateT [] = Abs ("s", stateT, @{term "HOL.True"}) | chain_preds _ [x] = x | chain_preds stateT (x::xs) = - Const (@{const_name "pred_conj"}, + Const (@{const_name "inf_class.inf"}, (stateT --> @{typ bool}) --> (stateT --> @{typ bool}) --> (stateT --> @{typ bool})) $ x $ (chain_preds stateT xs) @@ -950,8 +950,9 @@ fun solved_tac thm = (* Convenience function for making simprocs. *) fun mk_simproc' ctxt (name : string, pats : string list, proc : Proof.context -> cterm -> thm option) = let - in Simplifier.make_simproc ctxt name - {lhss = map (Proof_Context.read_term_pattern ctxt) pats, + in Simplifier.make_simproc ctxt + {name=name, identifier=[], + lhss = map (Proof_Context.read_term_pattern ctxt) pats, proc = K proc} end (* Get named_theorems in reverse order. We use this for translation rulesets diff --git a/tools/c-parser/CProof.thy b/tools/c-parser/CProof.thy index 834f264f54..767940384d 100644 --- a/tools/c-parser/CProof.thy +++ b/tools/c-parser/CProof.thy @@ -453,4 +453,37 @@ qed lemmas hoarep_Seq_nothrow = hoarep_Seq[OF empty_subsetI subset_refl] +lemma intvl_nowrap: + "\y \ 0; unat y + z \ 2 ^ len_of TYPE('a)\ \ x \ {x + y ..+ z}" for x :: "'a :: len word" + apply clarsimp + apply (drule intvlD) + apply clarsimp + apply (simp add: unat_arith_simps) + apply (simp split: if_split_asm) + apply (simp add: unat_of_nat) + done + +lemma is_aligned_ptr_aligned: + fixes p :: "'a :: c_type ptr" + assumes al: "is_aligned (ptr_val p) n" + and alignof: "align_of TYPE('a) = 2 ^ n" + shows "ptr_aligned p" + using al unfolding is_aligned_def ptr_aligned_def + by (simp add: alignof) + +lemma is_aligned_c_guard: + "is_aligned (ptr_val p) n + \ ptr_val p \ 0 + \ align_of TYPE('a) = 2 ^ m + \ size_of TYPE('a) \ 2 ^ n + \ m \ n + \ c_guard (p :: 'a :: c_type ptr)" + apply (clarsimp simp: c_guard_def c_null_guard_def) + apply (rule conjI) + apply (rule is_aligned_ptr_aligned, erule(1) is_aligned_weaken, simp) + apply (erule is_aligned_get_word_bits, simp_all) + apply (rule intvl_nowrap[where x=0, simplified], simp) + apply (erule is_aligned_no_wrap_le, simp+) + done + end diff --git a/tools/c-parser/CTranslation.thy b/tools/c-parser/CTranslation.thy index bdec639fb9..01bb857af7 100644 --- a/tools/c-parser/CTranslation.thy +++ b/tools/c-parser/CTranslation.thy @@ -11,7 +11,7 @@ imports "StaticFun" "IndirectCalls" "ModifiesProofs" - "Lib.MLUtils" + "ML_Utils.ML_Utils" "HOL-Eisbach.Eisbach" keywords "cond_sorry_modifies_proofs" diff --git a/tools/c-parser/HPInter.ML b/tools/c-parser/HPInter.ML index 5e49280630..a7a2034561 100644 --- a/tools/c-parser/HPInter.ML +++ b/tools/c-parser/HPInter.ML @@ -344,8 +344,8 @@ val globalsN = "_global_addresses" fun add_syntax (name,recp,inpars,outpars) thy = let open HoarePackage val name = suffix HoarePackage.proc_deco name - val pars = map (fn par => (In,varname par)) inpars@ - map (fn par => (Out,varname par)) outpars + val pars = map (fn par => (In, varname par, NONE)) inpars@ + map (fn par => (Out, varname par, NONE)) outpars val thy_decl = thy |> Context.theory_map (add_params Morphism.identity name pars) diff --git a/tools/c-parser/INSTALL.md b/tools/c-parser/INSTALL.md index 0fb46808f0..67f05281c0 100644 --- a/tools/c-parser/INSTALL.md +++ b/tools/c-parser/INSTALL.md @@ -9,7 +9,7 @@ NB: These instructions apply to the stand-alone release of the C parser. If this is in an L4.verified checkout, see the top-level README.md instead. -This code requires Isabelle2021 and the MLton SML compiler. +This code requires Isabelle2023 and the MLton SML compiler. The C parser supports multiple target architectures: diff --git a/lib/clib/LemmaBucket_C.thy b/tools/c-parser/LemmaBucket_C.thy similarity index 97% rename from lib/clib/LemmaBucket_C.thy rename to tools/c-parser/LemmaBucket_C.thy index 2e460c6478..7a505ca177 100644 --- a/lib/clib/LemmaBucket_C.thy +++ b/tools/c-parser/LemmaBucket_C.thy @@ -6,14 +6,10 @@ theory LemmaBucket_C imports - "Lib.Lib" - "Word_Lib.WordSetup" + Basics.CLib TypHeapLib - "CParser.ArrayAssertion" begin -declare word_neq_0_conv [simp del] - lemma Ptr_not_null_pointer_not_zero: "(Ptr p \ NULL)=(p\0)" by simp @@ -56,7 +52,7 @@ lemma to_bytes_word8: lemma byte_ptr_guarded:"ptr_val (x::8 word ptr) \ 0 \ c_guard x" unfolding c_guard_def c_null_guard_def ptr_aligned_def - by (clarsimp simp: intvl_Suc) + by (clarsimp simp: intvl_Suc simp del: word_neq_0_conv) lemma heap_update_list_append: fixes v :: word8 @@ -125,7 +121,8 @@ next apply (erule dvd_mult_left) done - hence "2 ^ bits dvd kp" by (simp add: dvd_reduce_multiple) + hence "2 ^ bits dvd kp" + by (simp add: dvd_add_right_iff) with kp have "kp = 0" apply - apply (erule contrapos_pp) @@ -143,7 +140,6 @@ next qed lemma intvl_mem_weaken: "x \ {p..+a - n} \ x \ {p..+a}" - apply - apply (drule intvlD) apply clarsimp apply (rule intvlI) @@ -265,16 +261,6 @@ next qed qed -lemma intvl_nowrap: - "\y \ 0; unat y + z \ 2 ^ len_of TYPE('a)\ \ x \ {x + y ..+ z}" for x :: "'a::len word" - apply clarsimp - apply (drule intvlD) - apply clarsimp - apply (simp add: unat_arith_simps) - apply (simp split: if_split_asm) - apply (simp add: unat_of_nat) - done - lemma heap_update_list_update: fixes v :: word8 shows "x \ y \ heap_update_list s xs (hp(y := v)) x = heap_update_list s xs hp x" @@ -362,7 +348,7 @@ lemma intvl_disjoint1: and blt: "b < 2 ^ len_of TYPE('a)" and dlt: "d < 2 ^ len_of TYPE('a)" shows "{a..+b} \ {c..+d} = {}" -proof (rule disjointI, rule notI) +proof (unfold disjoint_iff_not_equal, intro ballI notI) fix x y assume x: "x \ {a..+b}" and y: "y \ {c..+d}" and xy: "x = y" @@ -593,7 +579,7 @@ lemma access_in_array: apply (subst subst, assumption) apply (subst(asm) subst, assumption) apply simp - apply (simp add: size_of_def) + apply (simp add: size_of_def min.absorb_iff2[symmetric]) apply (subst le_diff_conv2) apply simp apply (fold mult_Suc, rule mult_le_mono1) @@ -622,16 +608,6 @@ lemma access_ti_list_array: apply (auto simp add: drop_take) done -lemma take_drop_foldl_concat: - "\ \y. y < m \ length (f y) = n; x < m \ - \ take n (drop (x * n) (foldl (@) [] (map f [0 ..< m]))) = f x" - apply (subst split_upt_on_n, assumption) - apply (simp only: foldl_concat_concat map_append) - apply (subst drop_append_miracle) - apply (induct x, simp_all)[1] - apply simp - done - definition array_ptr_index :: "(('a :: c_type)['b :: finite]) ptr \ bool \ nat \ 'a ptr" where @@ -897,7 +873,7 @@ lemma h_t_valid_Array_element': apply (simp add: align_of_def size_of_def addr_card_def card_word) apply (simp add: dvd_mod) apply (thin_tac "\x. P x" for P) - apply (clarsimp simp: intvl_def) + apply (clarsimp simp: intvl_def simp del: word_neq_0_conv) apply (drule_tac x="offs * size_of TYPE('a) + k" in spec) apply (drule mp) apply (simp add: array_ptr_index_def CTypesDefs.ptr_add_def field_simps) @@ -1101,11 +1077,11 @@ lemma ptr_retyp_disjoint2: "\ptr_retyp (p::'a::mem_type ptr) d,g \\<^sub>t q; {ptr_val p..+size_of TYPE('a)} \ {ptr_val q..+size_of TYPE('b)} = {} \ \ d,g \\<^sub>t (q::'b::mem_type ptr)" -apply(clarsimp simp: h_t_valid_def) -apply(erule ptr_retyp_valid_footprint_disjoint2) -apply(simp add: size_of_def) -apply fast -done + apply(clarsimp simp: h_t_valid_def) + apply(erule ptr_retyp_valid_footprint_disjoint2) + apply(simp add: size_of_def) + apply fast + done lemma ptr_retyp_disjoint_iff: "{ptr_val p..+size_of TYPE('a)} \ {ptr_val q..+size_of TYPE('b)} = {} diff --git a/tools/c-parser/README.md b/tools/c-parser/README.md index 87628fa5e8..f765baeea4 100644 --- a/tools/c-parser/README.md +++ b/tools/c-parser/README.md @@ -1,4 +1,5 @@ "xys"; - * The a is also chopped, since sometimes the bound variables - * are renamed, I think SELECT_GOAL in rename_goal is to blame - *) -fun remdeco' str = - let - fun chop (p::ps) (x::xs) = chop ps xs - | chop [] xs = [] - | chop (p::ps) [] = error "remdeco: code should never be reached"; - - fun remove prf (s as (x::xs)) = if is_prefix (op =) prf s then chop prf s - else (x::remove prf xs) - | remove prf [] = []; - - in String.implode (remove (String.explode deco) (String.explode str)) end; fun extern ctxt s = (case try (Proof_Context.extern_const ctxt o Lexicon.unmark_const) s of NONE => s | SOME s' => s'); -fun remdeco ctxt s = remdeco' (extern ctxt s); - -fun undeco ctxt (Const (c, T)) = Const (remdeco ctxt c, T) - | undeco ctxt ((f as Const (@{syntax_const "_free"},_)) $ Free (x, T)) = - (*f$*)Const (remdeco' x, T) - | undeco ctxt (Const _ $ _ $ ((Const (@{syntax_const "_free"},_)) $ Free (x, T))) = - (*f$*)Const (remdeco' x, T) - | undeco ctxt (Free (c, T)) = Const (remdeco' c, T) - | undeco ctxt x = x - fun varname x = x ^ deco -val dest_string = map (chr o HOLogic.dest_char) o HOLogic.dest_list; +val dest_string = implode o map (chr o HOLogic.dest_char) o HOLogic.dest_list; fun dest_string' t = (case try dest_string t of - SOME s => implode s + SOME s => s | NONE => (case t of Free (s,_) => s | Const (s,_) => Long_Name.base_name s | _ => raise TERM ("dest_string'",[t]))) +val state_hierarchy = Record.dest_recTs +fun stateT_id T = case (state_hierarchy T) of [] => NONE | Ts => SOME (last Ts); -fun is_state_space_var Tids t = - let - fun is_stateT T = (case stateT_id T of NONE => 0 - | SOME id => if member (op =) Tids id then ~1 else 0); - in - (case t of - Const _ $ Abs (_,T,_) => is_stateT T - | Free (_,T) => is_stateT T - | _ => 0) - end; - - -datatype callMode = Static | Parameter - -fun proc_name Static (Const (p,_)$_) = resuffix deco proc_deco (Long_Name.base_name p) - | proc_name Static (Const (@{const_name StateFun.lookup},_)$_$Free (p,_)$_) = - suffix proc_deco (remdeco' (Long_Name.base_name p)) - | proc_name Static p = dest_string' p - | proc_name Parameter (Const (p,_)) = resuffix deco proc_deco (Long_Name.base_name p) - | proc_name Parameter (Abs (_,_,Const (p,_)$Bound 0)) = - resuffix deco proc_deco (Long_Name.base_name p) - | proc_name Parameter (Abs (_,_,Const (@{const_name StateFun.lookup},_)$_$Free (p,_)$_)) = - suffix proc_deco (remdeco' (Long_Name.base_name p)) - | proc_name _ t = raise TERM ("proc_name",[t]); - - - -fun dest_call (Const (@{const_name Language.call},_)$init$pname$return$c) = - (init,pname,return,c,Static,true) - | dest_call (Const (@{const_name Language.fcall},_)$init$pname$return$_$c) = - (init,pname,return,c,Static,true) - | dest_call (Const (@{const_name Language.com.Call},_)$pname) = - (Bound 0,pname,Bound 0,Bound 0,Static,false) - | dest_call (Const (@{const_name Language.dynCall},_)$init$pname$return$c) = - (init,pname,return,c,Parameter,true) - | dest_call t = raise TERM ("Hoare.dest_call: unexpected term",[t]); - -fun dest_whileAnno (Const (@{const_name Language.whileAnnoG},_) $gs$b$I$V$c) = - (SOME gs,b,I,V,c,false) - | dest_whileAnno (Const (@{const_name Language.whileAnno},_) $b$I$V$c) = (NONE,b,I,V,c,false) - | dest_whileAnno (Const (@{const_name Language.whileAnnoGFix},_)$gs$b$I$V$c) = - (SOME gs,b,I,V,c,true) - | dest_whileAnno (Const (@{const_name Language.whileAnnoFix},_) $b$I$V$c) = (NONE,b,I,V,c,true) - | dest_whileAnno t = raise TERM ("Hoare.dest_while: unexpected term",[t]); - -fun dest_Guard (Const (@{const_name Language.com.Guard},_)$f$g$c) = (f,g,c,false) - | dest_Guard (Const (@{const_name Language.guaranteeStrip},_)$f$g$c) = (f,g,c,true) - | dest_Guard t = raise TERM ("Hoare.dest_guard: unexpected term",[t]); +fun globalsT (Type (_, T :: _)) = SOME T + | globalsT _ = NONE; +fun stateT_ids T = + (case stateT_id T of + NONE => NONE + | SOME sT => (case globalsT T of + NONE => SOME [sT] + | SOME gT => (case stateT_id gT of + NONE => SOME [sT] + | SOME gT' => SOME [sT,gT']))); (*** extend theory by procedure definition ***) @@ -540,34 +501,68 @@ fun dest_Guard (Const (@{const_name Language.com.Guard},_)$f$g$c) = (f,g,c,false fun add_declaration name decl thy = thy |> Named_Target.init [] name - |> Local_Theory.declaration {syntax = false, pervasive = false} decl + |> Local_Theory.declaration {syntax = false, pervasive = false, pos = \<^here>} decl |> Local_Theory.exit |> Proof_Context.theory_of; (* data kind 'HOL/hoare' *) +type lense = {lookup: term, update : term} + type proc_info = - {params: ((par_kind * string) list), - recursive: bool, - state_kind: state_kind} + { + params: ((par_kind * string * lense option) list), + recursive: bool, + state_kind: state_kind + }; + +type state_space = + { + name: string, + is_state_type: Proof.context -> typ -> bool, + generalise: Proof.context -> int -> tactic, + state_simprocs: simproc list, + state_upd_simprocs: simproc list, + state_ex_sel_eq_simprocs: simproc list, + is_defined: Proof.context -> xstring -> bool, + read_function_name: Proof.context -> xstring -> term, + lookup_tr: Proof.context -> xstring -> term, + update_tr: Proof.context -> xstring -> term, + is_lookup: Proof.context -> term -> bool, + lookup_tr': Proof.context -> term -> term, + dest_update_tr': Proof.context -> term -> term * term * term option, + update_tr': Proof.context -> term -> term + }; + type hoare_tac = (bool -> int -> tactic) -> Proof.context -> hoareMode -> int -> tactic; type hoare_data = - {proc_info: proc_info Symtab.table, - active_procs: string list list, - default_state_kind: state_kind, - generate_guard: (stamp * (Proof.context -> term -> term option)), - wp_tacs: (string * hoare_tac) list, - hoare_tacs: (string * hoare_tac) list, - vcg_simps: thm list}; + { + proc_info: proc_info Symtab.table, + active_procs: string list list, + default_state_kind: state_kind, + generate_guard: (stamp option * (Proof.context -> term -> term option)), + name_tr: (stamp option * (Proof.context -> bool -> string -> string)), + hoare_tacs: (string * hoare_tac) list, + vcg_simps: thm list, + state_spaces: (string * state_space) list + }; fun make_hoare_data proc_info active_procs default_state_kind generate_guard - wp_tacs hoare_tacs vcg_simps = + name_tr hoare_tacs vcg_simps state_spaces = {proc_info = proc_info, active_procs = active_procs, default_state_kind = default_state_kind, generate_guard = generate_guard, - wp_tacs = wp_tacs, hoare_tacs = hoare_tacs, vcg_simps = vcg_simps}; + name_tr = name_tr, hoare_tacs = hoare_tacs, vcg_simps = vcg_simps, state_spaces = state_spaces}; + +fun merge_stamped err_msg ((NONE, _), p) = p + | merge_stamped err_msg (p, (NONE,_)) = p + | merge_stamped err_msg ((SOME (stamp1:stamp), x), (SOME stamp2, _)) = + if stamp1 = stamp2 then (SOME stamp1, x) + else error err_msg; + +fun fast_merge merge (x, y) = if pointer_eq (x, y) then x else merge (x, y) structure Hoare_Data = Generic_Data ( @@ -577,33 +572,74 @@ structure Hoare_Data = Generic_Data (Symtab.empty: proc_info Symtab.table) ([]:string list list) (Function) - (stamp (),(K (K NONE)): Proof.context -> term -> term option) - ([]:(string * hoare_tac) list) + (NONE,(K (K NONE)): Proof.context -> term -> term option) + (NONE,(K (K I)): Proof.context -> bool -> string -> string) ([]:(string * hoare_tac) list) - ([]:thm list); + ([]:thm list) + ([]:(string * state_space) list); - (* FIXME exponential blowup due to append !? *) - fun merge ({proc_info = proc_info1, active_procs = active_procs1, + val merge = fast_merge (fn ({proc_info = proc_info1, active_procs = active_procs1, default_state_kind = _, - generate_guard = (stmp1,generate_gaurd1), - wp_tacs = wp_tacs1, hoare_tacs = hoare_tacs1, vcg_simps = vcg_simps1}, + generate_guard = generate_guard1, + name_tr = name_tr1, hoare_tacs = hoare_tacs1, vcg_simps = vcg_simps1, state_spaces=state_spaces1}, {proc_info = proc_info2, active_procs = active_procs2, default_state_kind = default_state_kind2, - generate_guard = (stmp2, _), - wp_tacs = wp_tacs2, hoare_tacs = hoare_tacs2, vcg_simps=vcg_simps2}) : T = - if stmp1=stmp2 then + generate_guard = generate_guard2, + name_tr = name_tr2, hoare_tacs = hoare_tacs2, vcg_simps=vcg_simps2, state_spaces=state_spaces2}) => make_hoare_data (Symtab.merge (K true) (proc_info1,proc_info2)) (active_procs1 @ active_procs2) (default_state_kind2) - (stmp1,generate_gaurd1) - (wp_tacs1 @ wp_tacs2) - (hoare_tacs1 @ hoare_tacs2) + (merge_stamped + "Theories have different aux. functions to generate guards, please resolve before merge" + (generate_guard1, generate_guard2)) + (merge_stamped + "Theories have different aux. functions to translate names, please resolve before merge" + (name_tr1, name_tr2)) + (AList.merge (op =) (K true) (hoare_tacs1, hoare_tacs2)) (Thm.merge_thms (vcg_simps1,vcg_simps2)) - else error ("Theories have different aux. functions to generate guards") + (AList.merge (op =) (K true) (state_spaces1, state_spaces2))) ); val get_data = Hoare_Data.get o Context.Proof; +(* state space representation dependent functions *) + +fun get_state_space_comps sel ctxt n = + AList.lookup (op =) (#state_spaces (Hoare_Data.get (Context.Proof ctxt))) n + |> Option.map sel |> these; + + +fun state_simprocs ctxt Record = [Record.simproc] + | state_simprocs ctxt Function = [Record.simproc, StateFun.lookup_simproc] + | state_simprocs ctxt (Other n) = get_state_space_comps (#state_simprocs) ctxt n; + + +fun state_upd_simprocs ctxt Record = [Record.upd_simproc] + | state_upd_simprocs ctxt Function = [StateFun.update_simproc] + | state_upd_simprocs ctxt (Other n) = get_state_space_comps (#state_upd_simprocs) ctxt n; + +fun state_ex_sel_eq_simprocs ctxt Record = [Record.ex_sel_eq_simproc] + | state_ex_sel_eq_simprocs ctxt Function = [StateFun.ex_lookup_eq_simproc] + | state_ex_sel_eq_simprocs ctxt (Other n) = get_state_space_comps (#state_ex_sel_eq_simprocs) ctxt n; + +val state_split_simp_tac = Record.split_simp_tac +val state_hierarchy = Record.dest_recTs + + +fun stateT_id T = case (state_hierarchy T) of [] => NONE | Ts => SOME (last Ts); + +fun globalsT (Type (_, T :: _)) = SOME T + | globalsT _ = NONE; + +fun stateT_ids T = + (case stateT_id T of + NONE => NONE + | SOME sT => (case globalsT T of + NONE => SOME [sT] + | SOME gT => (case stateT_id gT of + NONE => SOME [sT] + | SOME gT' => SOME [sT,gT']))); + (* access 'params' *) @@ -625,52 +661,52 @@ datatype 'a bodykind = BodyTyp of 'a | BodyTerm of 'a fun set_default_state_kind sk context = let - val {proc_info,active_procs,default_state_kind,generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} + val {proc_info,active_procs,default_state_kind,generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces, ...} = Hoare_Data.get context; val data = make_hoare_data proc_info active_procs sk - generate_guard wp_tacs hoare_tacs vcg_simps; + generate_guard name_tr hoare_tacs vcg_simps state_spaces; in Hoare_Data.put data context end; val get_default_state_kind = #default_state_kind o get_data; +fun get_default_state_space ctxt = + case get_default_state_kind ctxt of + Other sp => AList.lookup (op =) (#state_spaces (Hoare_Data.get (Context.Proof ctxt))) sp + | _ => NONE + fun add_active_procs phi ps context = let - val {proc_info,active_procs,default_state_kind,generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} + val {proc_info,active_procs,default_state_kind,generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces, ...} = Hoare_Data.get context; val data = make_hoare_data proc_info ((map (morph_name context phi) ps)::active_procs) default_state_kind - generate_guard wp_tacs hoare_tacs vcg_simps; + generate_guard name_tr hoare_tacs vcg_simps state_spaces; in Hoare_Data.put data context end; fun add_hoare_tacs tacs context = let - val {proc_info,active_procs, default_state_kind, generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} + val {proc_info,active_procs, default_state_kind, generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces,...} = Hoare_Data.get context; val data = make_hoare_data proc_info active_procs default_state_kind generate_guard - wp_tacs (hoare_tacs@tacs) vcg_simps; + name_tr (AList.merge (op =) (K true) (hoare_tacs, tacs)) vcg_simps state_spaces; in Hoare_Data.put data context end; fun map_vcg_simps f context = let - val {proc_info,active_procs,default_state_kind,generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} + val {proc_info,active_procs,default_state_kind,generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces,...} = Hoare_Data.get context; val data = make_hoare_data proc_info active_procs default_state_kind generate_guard - wp_tacs hoare_tacs (f vcg_simps); + name_tr hoare_tacs (f vcg_simps) state_spaces; in Hoare_Data.put data context end; -fun thy_attrib f = Thm.declaration_attribute (fn thm => map_vcg_simps (f thm)); - -val vcg_simpadd = Thm.add_thm -val vcg_simpdel = Thm.del_thm - -val vcg_simp_add = thy_attrib vcg_simpadd; -val vcg_simp_del = thy_attrib vcg_simpdel; +val vcg_simp_add = Thm.declaration_attribute (map_vcg_simps o Thm.add_thm o Thm.trim_context); +val vcg_simp_del = Thm.declaration_attribute (map_vcg_simps o Thm.del_thm); (* add 'procedure' *) @@ -687,18 +723,21 @@ fun map_proc_info_state_kind f {params,recursive,state_kind} = mk_proc_info params recursive (f state_kind); +fun morph_lense phi ({lookup, update}:lense) = + {lookup = Morphism.term phi lookup, update = Morphism.term phi update}:lense; fun add_params phi name frmls context = let - val {proc_info,active_procs,default_state_kind,generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} + val {proc_info,active_procs,default_state_kind,generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces, ...} = Hoare_Data.get context; - val params = map (apsnd (morph_name context phi)) frmls; + val params = map (fn (kind, name, lense_opt) => + (kind, morph_name context phi name, Option.map (morph_lense phi) lense_opt)) frmls; val f = map_proc_info_params (K params); val default = f empty_proc_info; - val proc_info' = Symtab.map_default (morph_name context phi name,default) f proc_info; + val proc_info' = Symtab.map_default (morph_name context phi name, default) f proc_info; val data = make_hoare_data proc_info' active_procs default_state_kind - generate_guard wp_tacs hoare_tacs vcg_simps; + generate_guard name_tr hoare_tacs vcg_simps state_spaces; in Hoare_Data.put data context end; fun get_params name ctxt = @@ -707,14 +746,14 @@ fun get_params name ctxt = fun add_recursive phi name context = let - val {proc_info,active_procs,default_state_kind,generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} + val {proc_info,active_procs,default_state_kind,generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces, ...} = Hoare_Data.get context; val f = map_proc_info_recursive (K true); val default = f empty_proc_info; val proc_info'= Symtab.map_default (morph_name context phi name,default) f proc_info; val data = make_hoare_data proc_info' active_procs default_state_kind - generate_guard wp_tacs hoare_tacs vcg_simps; + generate_guard name_tr hoare_tacs vcg_simps state_spaces; in Hoare_Data.put data context end; fun get_recursive name ctxt = @@ -722,14 +761,14 @@ fun get_recursive name ctxt = fun add_state_kind phi name sk context = let - val {proc_info,active_procs,default_state_kind,generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} + val {proc_info,active_procs,default_state_kind,generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces,...} = Hoare_Data.get context; val f = map_proc_info_state_kind (K sk); val default = f empty_proc_info; val proc_info'= Symtab.map_default (morph_name context phi name,default) f proc_info; val data = make_hoare_data proc_info' active_procs default_state_kind - generate_guard wp_tacs hoare_tacs vcg_simps; + generate_guard name_tr hoare_tacs vcg_simps state_spaces; in Hoare_Data.put data context end; fun get_state_kind name ctxt = @@ -737,15 +776,124 @@ fun get_state_kind name ctxt = fun install_generate_guard f context = let - val {proc_info,active_procs, default_state_kind, generate_guard,wp_tacs,hoare_tacs, - vcg_simps,...} = + val {proc_info,active_procs, default_state_kind, generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces,...} = Hoare_Data.get context; - val data = make_hoare_data proc_info active_procs default_state_kind (stamp (), f) - wp_tacs hoare_tacs vcg_simps + val data = make_hoare_data proc_info active_procs default_state_kind (SOME (stamp ()), f) + name_tr hoare_tacs vcg_simps state_spaces in Hoare_Data.put data context end; fun generate_guard ctxt = snd (#generate_guard (get_data ctxt)) ctxt; +fun install_state_space sp ctxt = + let + val {proc_info,active_procs, default_state_kind, generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces,...} = + Hoare_Data.get ctxt; + val data = make_hoare_data proc_info active_procs default_state_kind generate_guard + name_tr hoare_tacs vcg_simps (AList.update (op =) (#name sp, sp) state_spaces) + in Hoare_Data.put data ctxt end; + +fun generalise_other ctxt name = + Option.map #generalise (AList.lookup (op =) (#state_spaces (get_data ctxt)) name); + +fun install_name_tr f ctxt = + let + val {proc_info,active_procs, default_state_kind, generate_guard,name_tr,hoare_tacs, + vcg_simps,state_spaces,...} = + Hoare_Data.get ctxt; + val data = make_hoare_data proc_info active_procs default_state_kind generate_guard + (SOME (stamp ()), f) hoare_tacs vcg_simps state_spaces + in Hoare_Data.put data ctxt end; + +fun name_tr ctxt = snd (#name_tr (get_data ctxt)) ctxt; + + +(* utils for variable name decorations *) + + +(* removes the suffix of the string beginning with deco. + * "xys_'a" --> "xys"; + * The a is also chopped, since sometimes the bound variables + * are renamed, I think SELECT_GOAL in rename_goal is to blame + *) +fun remdeco' ctxt str = + let + fun chop (p::ps) (x::xs) = chop ps xs + | chop [] xs = [] + | chop (p::ps) [] = error "remdeco: code should never be reached"; + + fun remove prf (s as (x::xs)) = if is_prefix (op =) prf s then chop prf s + else (x::remove prf xs) + | remove prf [] = []; + + in name_tr ctxt false (String.implode (remove (String.explode deco) (String.explode str))) end; + + +fun remdeco ctxt s = remdeco' ctxt (extern ctxt s); + +fun undeco ctxt (Const (c, T)) = Const (remdeco ctxt c, T) + | undeco ctxt ((f as Const (@{syntax_const "_free"},_)) $ Free (x, T)) = + (*f$*)Const (remdeco' ctxt x, T) + | undeco ctxt (Const _ $ _ $ ((Const (@{syntax_const "_free"},_)) $ Free (x, T))) = + (*f$*)Const (remdeco' ctxt x, T) + | undeco ctxt (Free (c, T)) = Const (remdeco' ctxt c, T) + | undeco ctxt x = x + +fun is_state_space_var Tids t = + let + fun is_stateT T = (case stateT_id T of NONE => 0 + | SOME id => if member (op =) Tids id then ~1 else 0); + in + (case t of + Const _ $ Abs (_,T,_) => is_stateT T + | Free (_,T) => is_stateT T + | _ => 0) + end; + + +datatype callMode = Static | Parameter + +fun proc_name ctxt Static (Const (p,_)$_) = resuffix deco proc_deco (Long_Name.base_name p) + | proc_name ctxt Static (Const (@{const_name StateFun.lookup},_)$_$Free (p,_)$_) = + suffix proc_deco (remdeco' ctxt (Long_Name.base_name p)) + | proc_name ctxt Static p = dest_string' p + | proc_name ctxt Parameter (Const (p,_)) = resuffix deco proc_deco (Long_Name.base_name p) + | proc_name ctxt Parameter (Abs (_,_,Const (p,_)$Bound 0)) = + resuffix deco proc_deco (Long_Name.base_name p) + | proc_name ctxt Parameter (Abs (_,_,Const (@{const_name StateFun.lookup},_)$_$Free (p,_)$_)) = + suffix proc_deco (remdeco' ctxt (Long_Name.base_name p)) + | proc_name _ _ t = raise TERM ("proc_name",[t]); + + +fun dest_call (Const (@{const_name Language.call},_)$init$pname$return$c) = + (init,pname,return,c,Static,true,NONE) + | dest_call (Const (@{const_name Language.fcall},_)$init$pname$return$_$c) = + (init,pname,return,c,Static,true,NONE) + | dest_call (Const (@{const_name Language.com.Call},_)$pname) = + (Bound 0,pname,Bound 0,Bound 0,Static,false,NONE) + | dest_call (Const (@{const_name Language.dynCall},_)$init$pname$return$c) = + (init,pname,return,c,Parameter,true,NONE) + | dest_call (Const (@{const_name Language.call_exn},_)$init$pname$return$result_exn$c) = + (init,pname,return,c,Static,true,SOME result_exn) + | dest_call (Const (@{const_name Language.dynCall_exn},_)$init$pname$return$result_exn$c) = + (init,pname,return,c,Parameter,true,SOME result_exn) + | dest_call t = raise TERM ("Hoare.dest_call: unexpected term",[t]); + + +fun dest_whileAnno (Const (@{const_name Language.whileAnnoG},_) $gs$b$I$V$c) = + (SOME gs,b,I,V,c,false) + | dest_whileAnno (Const (@{const_name Language.whileAnno},_) $b$I$V$c) = (NONE,b,I,V,c,false) + | dest_whileAnno (Const (@{const_name Language.whileAnnoGFix},_)$gs$b$I$V$c) = + (SOME gs,b,I,V,c,true) + | dest_whileAnno (Const (@{const_name Language.whileAnnoFix},_) $b$I$V$c) = (NONE,b,I,V,c,true) + | dest_whileAnno t = raise TERM ("Hoare.dest_while: unexpected term",[t]); + +fun dest_Guard (Const (@{const_name Language.com.Guard},_)$f$g$c) = (f,g,c,false) + | dest_Guard (Const (@{const_name Language.guaranteeStrip},_)$f$g$c) = (f,g,c,true) + | dest_Guard t = raise TERM ("Hoare.dest_guard: unexpected term",[t]); + + fun check_procedures_definition procs thy = let @@ -775,7 +923,7 @@ fun check_procedures_definition procs thy = maps (fn (name,inpars,outpars,locals,_,_,_) => duplicate_pars name (inpars @ locals) @ duplicate_pars name (outpars @ locals)) procs; - (* FIXME: Check that no global variables are used as result parameters *) + (* fixme: Check that no global variables are used as result parameters *) val errs = err_already_defined @ err_duplicate_procs @ err_duplicate_pars; in if null errs then () else error (cat_lines errs) end; @@ -783,12 +931,13 @@ fun check_procedures_definition procs thy = fun add_parameter_info phi cname (name,(inpars,outpars,state_kind)) context = let fun par_deco' T = if T = "" then deco else par_deco (cname name); - val pars = map (fn (par,T) => (In,suffix (par_deco' T) par)) inpars@ - map (fn (par,T) => (Out,suffix (par_deco' T) par)) outpars; - in - context - |> add_params phi name pars - |> add_state_kind phi name state_kind + val pars = map (fn (par,T) => (In,suffix (par_deco' T) par, NONE)) inpars@ + map (fn (par,T) => (Out,suffix (par_deco' T) par, NONE)) outpars; + + val ctxt_decl = context + |> add_params phi name pars + |> add_state_kind phi name state_kind + in ctxt_decl end; fun mk_loc_exp xs = @@ -939,7 +1088,7 @@ fun procedures_definition locname procs thy = val context = Context.Theory thy |> fold (add_parameter_info Morphism.identity (unsuffix proc_deco)) name_pars - |> StateSpace.set_silent true + |> Config.put_generic StateSpace.silent true fun read_body (_, body) = Syntax.read_term (Context.proof_of context) body; @@ -947,7 +1096,7 @@ fun procedures_definition locname procs thy = val bodies = map read_body name_body; fun dcall t = (case try dest_call t of - SOME (_,p,_,_,m,_) => SOME (proc_name m p) + SOME (_,p,_,_,m,_,_) => SOME (proc_name (Context.proof_of context) m p) | _ => NONE); fun in_names x = if member (op =) names x then SOME x else NONE; fun add_edges n = fold (fn x => Graph.add_edge (n, x)); @@ -1014,7 +1163,7 @@ fun procedures_definition locname procs thy = let val name' = unsuffix proc_deco name; val fixes = [Element.Fixes [(Binding.name name, SOME proc_nameT, NoSyn)]]; - (* FIXME: may use HOLogic.typeT as soon as locale type-inference works properly *) + (* fixme: may use HOLogic.typeT as soon as locale type-inference works properly *) val pE = mk_loc_exp [intern_locale thy (suffix parametersN cname)]; val sN = suffix signatureN name'; in thy @@ -1038,7 +1187,7 @@ fun procedures_definition locname procs thy = val callees = filter_out (fn n => n = name) (get_calls name) val fixes = [Element.Fixes [(Binding.name name, SOME proc_nameT, NoSyn)]]; - (* FIXME: may use HOLogic.typeT as soon as locale type-inference works properly *) + (* fixme: may use HOLogic.typeT as soon as locale type-inference works properly *) val pE = mk_loc_exp (map (intern_locale thy) ([lname variablesN (the (my_clique name))]@ @@ -1058,7 +1207,7 @@ fun procedures_definition locname procs thy = ctxt |> Proof_Context.theory_of |> Named_Target.init [] lname - |> Local_Theory.declaration {syntax = false, pervasive = false} parameter_info_decl + |> Local_Theory.declaration {syntax = false, pervasive = false, pos = \<^here>} parameter_info_decl |> (fn lthy => if has_body name then snd (Local_Theory.define (def lthy) lthy) else lthy) @@ -1074,7 +1223,7 @@ fun procedures_definition locname procs thy = if has_body name then let - (* FIXME: All the read_term stuff is just because type-inference/abbrevs for + (* fixme: All the read_term stuff is just because type-inference/abbrevs for * new locale elements does not work right now; * We read the term to expand the abbreviations, then we print it again * (without folding the abbreviation) and reread as string *) @@ -1087,7 +1236,7 @@ fun procedures_definition locname procs thy = HOLogic.mk_eq (Free (gamma,fastype_of nt --> fastype_of rhs)$nt,rhs) val consts = Sign.consts_of thy; val eqs = - YXML.string_of_body (Term_XML.Encode.term consts (Consts.dummy_types consts eq)); + YXML.string_of_body (Term_XML.Encode.term consts (Consts.dummy_types consts eq)); val assms = Element.Assumes [((Binding.name (suffix bodyP name'), []),[(eqs,[])])] in [assms] end @@ -1328,7 +1477,12 @@ fun cond_rename_bvars cond name thm = val rename_bvars = cond_rename_bvars (K true); -fun trace_tac ctxt str st = (if Config.get ctxt hoare_trace then tracing str else (); all_tac st); +fun trace_msg ctxt str = if Config.get ctxt hoare_trace > 0 then tracing str else () +fun trace_tac ctxt str st = (trace_msg ctxt str; all_tac st); + +fun trace_subgoal_tac ctxt s i = + SUBGOAL (fn (prem, _) => trace_tac ctxt (s ^ (Syntax.string_of_term ctxt prem))) i + fun error_tac str st = (error str;no_tac st); @@ -1430,7 +1584,7 @@ fun mk_split_thms ctxt (vars as _::_) = end; fun prove_simp simps prop = - let val ([prop'], _) = Variable.importT_terms [prop] ctxt (* FIXME continue context!? *) + let val ([prop'], _) = Variable.importT_terms [prop] ctxt (* fixme continue context!? *) in Goal.prove_global thy [] [] prop' (fn {context = goal_ctxt, ...} => @@ -1559,17 +1713,19 @@ fun add_foldcongsimps simps thy = FoldCongData.map (fn ss => *) fun in_assertion_simp_tac ctxt state_kind thms i = let - val vcg_simps = #vcg_simps (get_data ctxt); - val fold_simps = get_foldcong_ss (Proof_Context.theory_of ctxt) + val thy = Proof_Context.theory_of ctxt + val vcg_simps = map (Thm.transfer thy) (#vcg_simps (get_data ctxt)); + val fold_simps = get_foldcong_ss thy + val state_simps = Named_Theorems.get ctxt @{named_theorems "state_simp"} in EVERY [simp_tac (put_simpset HOL_basic_ss ctxt addsimps ([mem_Collect_eq,@{thm Set.Un_iff},@{thm Set.Int_iff}, @{thm Set.empty_subsetI}, @{thm Set.empty_iff}, UNIV_I, - @{thm Hoare.Collect_False}]@thms@K_convs@vcg_simps) - addsimprocs (state_simprocs state_kind) + @{thm Hoare.Collect_False}]@state_simps@thms@K_convs@vcg_simps) + addsimprocs (state_simprocs ctxt state_kind) |> fold Simplifier.add_cong K_congs) i THEN_MAYBE - (simp_tac (put_simpset fold_simps ctxt addsimprocs [state_upd_simproc state_kind]) i) + (simp_tac (put_simpset fold_simps ctxt addsimprocs (state_upd_simprocs ctxt state_kind)) i) ] end; @@ -1606,12 +1762,16 @@ fun before_set2pred_simp_tac ctxt = (** simplification done by full_simp_tac **) (*****************************************************************************) +val Collect_subset_to_pred = +@{lemma \(\x. A x \ P x) + \ {x. A x} \ {x. P x}\ + by (rule subsetI, rule CollectI, drule CollectD, assumption)} + + fun set2pred_tac ctxt i thm = ((before_set2pred_simp_tac ctxt i) THEN_MAYBE (EVERY [trace_tac ctxt "set2pred", - resolve_tac ctxt [subsetI] i, - resolve_tac ctxt [CollectI] i, - dresolve_tac ctxt [CollectD] i, + resolve_tac ctxt [Collect_subset_to_pred] i, full_simp_tac (put_simpset HOL_basic_ss ctxt) i ])) thm @@ -1627,7 +1787,7 @@ fun set2pred_tac ctxt i thm = fun MaxSimpTac ctxt tac i = TRY (FIRST[resolve_tac ctxt [subset_refl] i, - set2pred_tac ctxt i THEN_MAYBE tac i, + (set2pred_tac ctxt i THEN_MAYBE tac i) ORELSE tac i, trace_tac ctxt "final_tac failed" ]); @@ -1667,7 +1827,7 @@ fun post_conforms_tac ctxt state_kind i = (fn i => (REPEAT (resolve_tac ctxt [allI,impI] i)) THEN (full_simp_tac (put_simpset HOL_basic_ss ctxt addsimps [mem_Collect_eq,@{thm Set.singleton_iff},@{thm Set.empty_iff},UNIV_I] - addsimprocs (state_simprocs state_kind)) i))) i]; + addsimprocs (state_simprocs ctxt state_kind)) i))) i]; fun dest_hoare_raw (Const(@{const_name HoarePartialDef.hoarep},_)$G$T$F$P$C$Q$A) = (P,C,Q,A,Partial,G,T,F) @@ -1689,7 +1849,7 @@ fun dest_hoare t = let val triple = (strip_qnt_body @{const_name "All"} o - HOLogic.dest_Trueprop o strip_qnt_body @{const_name Pure.all}) t; + HOLogic.dest_Trueprop o Logic.strip_assums_concl) t; in dest_hoare_raw triple end; @@ -1727,37 +1887,24 @@ val conseq1_ss_base = @K_convs @ @{thms simp_thms} @ @{thms ex_simps} @ @{thms all_simps}) delsimps [@{thm Hoare.all_imp_to_ex}] |> fold Simplifier.add_cong K_congs) -val conseq1_ss_record = - simpset_of (put_simpset conseq1_ss_base @{context} addsimprocs (state_simprocs Record)); -val conseq1_ss_fun = - simpset_of (put_simpset conseq1_ss_base @{context} addsimprocs (state_simprocs Function)); -fun conseq1_ss Record = conseq1_ss_record - | conseq1_ss Function = conseq1_ss_fun; val conseq2_ss_base = simpset_of (put_simpset HOL_basic_ss @{context} addsimps (@{thms Hoare.all_imp_eq_triv} @ @{thms simp_thms} @ @{thms ex_simps} @ @{thms all_simps}) delsimps [@{thm Hoare.all_imp_to_ex}] |> Simplifier.add_cong @{thm imp_cong}); - -val conseq2_ss_record = - simpset_of (put_simpset conseq2_ss_base @{context} - addsimprocs [state_upd_simproc Record, state_ex_sel_eq_simproc Record]); -val conseq2_ss_fun = - simpset_of (put_simpset conseq2_ss_base @{context} - addsimprocs [state_upd_simproc Function, state_ex_sel_eq_simproc Function]); -fun conseq2_ss Record = conseq2_ss_record - | conseq2_ss Function = conseq2_ss_fun; - in fun raw_conseq_simp_tac ctxt state_kind thms i = let val ctxt' = Config.put simp_depth_limit 0 ctxt; in - simp_tac (put_simpset (conseq1_ss state_kind) ctxt' addsimps thms) i + simp_tac (put_simpset conseq1_ss_base ctxt' + addsimprocs (state_simprocs ctxt' state_kind) + addsimps thms) i THEN_MAYBE - simp_tac (put_simpset (conseq2_ss state_kind) ctxt') i + simp_tac (put_simpset conseq2_ss_base ctxt' + addsimprocs (state_upd_simprocs ctxt' state_kind @ state_ex_sel_eq_simprocs ctxt' state_kind)) i end end @@ -1782,7 +1929,7 @@ fun gen_context_thms ctxt mode params G T F = val hoare = (case mode of Partial => @{const_name HoarePartialDef.hoarep} | Total => @{const_name HoareTotalDef.hoaret}); - (* FIXME: Use future Proof_Context.rename_vars or make closed term and remove by hand *) + (* fixme: Use future Proof_Context.rename_vars or make closed term and remove by hand *) (* fun free_params ps t = foldr (fn ((x,xT),t) => snd (variant_abs (x,xT,t))) (ps,t); val PpQA' = mkCallQuadruple (strip_qnt_body @{const_name Pure.all} (free_params params (Term.list_all (vars,PpQA)))); @@ -1810,10 +1957,11 @@ fun gen_context_thms ctxt mode params G T F = let val vars = map fst (strip_qnt_vars @{const_name All} (HOLogic.dest_Trueprop (Logic.strip_assums_concl prop))); + val [asmUN'] = adapt_aux_var ctxt true vars [get_aux_tvar (AsmUN mode)] in Goal.prove ctxt params [] prop (fn {context = ctxt', ...} => EVERY[trace_tac ctxt' "extracting specifications from hoare context", - resolve_tac ctxt' (adapt_aux_var ctxt' true vars [get_aux_tvar (AsmUN mode)]) 1, + resolve_tac ctxt' [asmUN'] 1, DEPTH_SOLVE_1 (resolve_tac ctxt' [subset_refl,refl] 1 ORELSE ((resolve_tac ctxt' [@{thm Hoare.subset_unI1}] 1 APPEND resolve_tac ctxt' [@{thm Hoare.subset_unI2}] 1) ORELSE @@ -1826,10 +1974,11 @@ fun gen_context_thms ctxt mode params G T F = val specs = hoare_context_specs mode G T F; in map (mk_prove mode) specs end; +fun is_modifies_assertion t = + exists_subterm (fn (Const (@{const_name Hoare.meq},_)) => true| _ => false) t fun is_modifies_clause t = - exists_subterm (fn (Const (@{const_name Hoare.meq},_)) => true| _ => false) - (#3 (dest_hoare (Logic.strip_assums_concl t))) + is_modifies_assertion (#3 (dest_hoare (Logic.strip_assums_concl t))) handle (TERM _) => false; val is_spec_clause = not o is_modifies_clause; @@ -1843,7 +1992,7 @@ fun swap_constr_destr f (t as (Const (@{const_name Fun.id},_))) = t | swap_constr_destr f (t as (Const (c,Type ("fun",[T,valT])))) = (Const (f c, Type ("fun",[valT,T])) handle Empty => raise TERM ("Hoare.swap_constr_destr",[t])) - | swap_constr_destr f (Const ("StateFun.map_fun",Type ("fun", (* FIXME unknown "StateFun.map_fun" !? *) + | swap_constr_destr f (Const ("StateFun.map_fun",Type ("fun", (* fixme: unknown "StateFun.map_fun" !? *) [Type ("fun",[T,valT]), Type ("fun",[Type ("fun",[xT,T']), Type ("fun",[xT',valT'])])]))$g) = @@ -1871,7 +2020,7 @@ fun swap_constr_destr f (t as (Const (@{const_name Fun.id},_))) = t Type ("fun",[bsT,asT])]))$swap_constr_destr f g) | swap_constr_destr f t = raise TERM ("Hoare.swap_constr_destr",[t]); -(* FIXME: unused? *) +(* fixme: unused? *) val destr_to_constr = let fun convert c = @@ -1881,17 +2030,16 @@ val destr_to_constr = in swap_constr_destr convert end; fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx - pname return has_args _ = + pname return has_args result_exn _ = let val thy = Proof_Context.theory_of ctxt; - val pname' = unsuffix proc_deco pname; + val pname' = chopsfx proc_deco pname; val spec = (case AList.lookup (op =) asms pname of SOME s => SOME s | NONE => try (Proof_Context.get_thm ctxt) (suffix spec_sfx pname')); - fun auxvars_for p t = (case first_subterm_dest (try dest_call) t of - SOME (vars,(_,p',_,_,m,_)) => (if m=Static andalso + SOME (vars,(_,p',_,_,m,_,_)) => (if m=Static andalso p=(dest_string' p') then SOME vars else NONE) @@ -1916,7 +2064,7 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx fun check_spec name P thm = (case try dest_hoare (Thm.concl_of thm) of SOME spc => (case try dest_call (#2 spc) of - SOME (_,p,_,_,m,_) => if proc_name m p = name andalso + SOME (_,p,_,_,m,_,_) => if proc_name ctxt m p = name andalso P (Thm.concl_of thm) then SOME (#5 spc,thm) else NONE @@ -1954,13 +2102,13 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx | solve_spec ctxt' augment_rule asmUN_rule augment_emptyFaults mode Parameter _ _ i = (* try to infer spec out of assumptions *) let - fun tac ({context = ctxt'', prems, ...}: Subgoal.focus) = - (case (find_dyn_specs pname is_spec_clause prems) of + fun tac thms = + (case (find_dyn_specs pname is_spec_clause thms) of (spec_mode,spec)::_ - => solve_spec ctxt'' augment_rule asmUN_rule augment_emptyFaults mode Parameter + => solve_spec ctxt' augment_rule asmUN_rule augment_emptyFaults mode Parameter (SOME spec_mode) (SOME spec) 1 | _ => all_tac) - in Subgoal.FOCUS tac ctxt' i end + in Subgoal.FOCUS (tac o #prems) ctxt i end val strip_spec_vars = strip_qnt_vars @{const_name All} o HOLogic.dest_Trueprop; @@ -1974,22 +2122,21 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx SOME (_,_,_,_,_,_,Theta,_) => get_auxvars_for pname Theta | _ => [])); - fun get_call_rule Static mode is_abr = - if is_abr then Proc mode else ProcNoAbr mode - | get_call_rule Parameter mode is_abr = - if is_abr then DynProcProcPar mode else DynProcProcParNoAbr mode; + fun get_call_rule' Static mode is_abr result_exn = + if is_abr then Proc mode result_exn else ProcNoAbr mode result_exn + | get_call_rule' Parameter mode is_abr result_exn = + if is_abr then DynProcProcPar mode result_exn else DynProcProcParNoAbr mode result_exn; val [call_rule,augment_ctxt_rule,asmUN_rule, augment_emptyFaults] = adapt_aux_var ctxt' true spec_vars (map get_aux_tvar - [get_call_rule cmode mode is_abr, + [get_call_rule' cmode mode is_abr result_exn, AugmentContext mode, AsmUN mode, AugmentEmptyFaults mode]); - in EVERY [resolve_tac ctxt' [call_rule] i, trace_tac ctxt' "call_tac -- basic_tac -- solving spec", - solve_spec ctxt' augment_ctxt_rule asmUN_rule augment_emptyFaults + solve_spec ctxt' augment_ctxt_rule asmUN_rule augment_emptyFaults mode cmode spec_mode spec spec_goal] end; @@ -2012,8 +2159,8 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx | SOME (_,c,Q,spec_abr,spec_mode,_,_,_) => case try dest_call c of NONE => (warning msg;(true,NONE,NONE,false)) - | SOME (_,p,_,_,m,spec_has_args) - => if proc_name m p = pname + | SOME (_,p,_,_,m,spec_has_args,_) + => if proc_name ctxt m p = pname then if (mode=Total andalso spec_mode=Partial) then (warning msg;(true,NONE,NONE,false)) else if is_empty_set spec_abr then @@ -2036,7 +2183,7 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx resolve_tac ctxt' [allI] (i+1), cont_tac ctxt' (i+1), trace_tac ctxt' "call_tac -- basic_tac -- simplify", - conseq_simp_tac ctxt' state_kind [@{thm StateSpace.upd_globals_def}] i, + conseq_simp_tac ctxt' state_kind (Named_Theorems.get ctxt @{named_theorems "state_simp"}) i, trace_tac ctxt' "call_tac -- basic_tac -- STOP --"] end; @@ -2111,8 +2258,8 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx | modify_updatesF subst _ glob ((globs$Z)) = (glob$Bound 1) (* may_not_modify *) | modify_updatesF _ _ _ t = raise TERM ("gen_call_tac.modify_updatesF",[t]); - fun modify_updates Record = modify_updatesR - | modify_updates _ = modify_updatesF + fun modify_updates Function = modify_updatesF + | modify_updates _ (* Record and Other *) = modify_updatesR fun globalsT (Const (gupd,T)) = domain_type (hd (binder_types T)) @@ -2139,23 +2286,23 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx val is_abr = not (is_empty_set modif_spec_abr); val emptyTheta = is_empty_set Theta; (*val emptyFaults = is_empty_set F;*) - val spec_has_args = #6 (dest_call call); + val (_,_,_,_,_,spec_has_args,_) = dest_call call; val () = if spec_has_args then error "procedure call in modifies-specification must be parameterless!" else (); val (mxprem,ModRet) = (case cmode of Static => (8,if is_abr - then if emptyTheta then (ProcModifyReturn mode) - else (ProcModifyReturnSameFaults mode) - else if emptyTheta then (ProcModifyReturnNoAbr mode) - else (ProcModifyReturnNoAbrSameFaults mode)) + then if emptyTheta then (ProcModifyReturn mode result_exn) + else (ProcModifyReturnSameFaults mode result_exn) + else if emptyTheta then (ProcModifyReturnNoAbr mode result_exn) + else (ProcModifyReturnNoAbrSameFaults mode result_exn)) | Parameter => (9,if is_abr - then if emptyTheta then (ProcProcParModifyReturn mode) - else (ProcProcParModifyReturnSameFaults mode) - else if emptyTheta then (ProcProcParModifyReturnNoAbr mode) - else (ProcProcParModifyReturnNoAbrSameFaults mode))); + then if emptyTheta then (ProcProcParModifyReturn mode result_exn) + else (ProcProcParModifyReturnSameFaults mode result_exn) + else if emptyTheta then (ProcProcParModifyReturnNoAbr mode result_exn) + else (ProcProcParModifyReturnNoAbrSameFaults mode result_exn))); val to_prove_prem = (case cmode of Static => 0 | Parameter => 1); val spec_goal = if is_abr then i + mxprem - 5 else i + mxprem - 6 @@ -2170,7 +2317,7 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx else return;*) val cret = Thm.cterm_of ctxt' return'; - val (_,_,return'_var,_,_,_) = nth (Thm.prems_of ModRet) to_prove_prem + val (_,_,return'_var,_,_,_,_) = nth (Thm.prems_of ModRet) to_prove_prem |> dest_hoare |> #2 |> dest_call; val ModRet' = infer_instantiate ctxt' [(#1 (dest_Var return'_var), cret)] ModRet; @@ -2181,8 +2328,9 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx (clarsimp_tac ((ctxt' |> put_claset (claset_of @{theory_context Set}) |> put_simpset (simpset_of @{theory_context Set})) - addsimps ([@{thm Hoare.mex_def},@{thm Hoare.meq_def},@{thm StateSpace.upd_globals_def}]@K_convs) - addsimprocs (state_upd_simproc Record::(state_simprocs state_kind)) + addsimps ([@{thm Hoare.mex_def},@{thm Hoare.meq_def}]@K_convs@ + (Named_Theorems.get ctxt @{named_theorems "state_simp"})) + addsimprocs (state_upd_simprocs ctxt Record @ state_simprocs ctxt state_kind) |> fold Simplifier.add_cong K_congs) i) THEN_MAYBE EVERY [trace_tac ctxt' "modify_tac: splitting record", @@ -2211,7 +2359,7 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx |> (fn SOME res => res | NONE => raise TERM ("get_call_tac.modify_tac: no proper modifies spec", [])); - fun specs_of_assms_tac ({context = ctxt', prems, ...}: Subgoal.focus) = + fun specs_of_assms_tac ({context = ctxt', prems, ...}: Subgoal.focus) = (case get_spec pname is_spec_clause prems of SOME (_,spec) => (case get_spec pname is_modifies_clause prems of SOME (_,modifies_thm) => modify_tac ctxt' (SOME spec) modifies_thm 1 @@ -2238,7 +2386,7 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx basic_tac ctxt spec))) end; - fun inline_bdy_tac has_args i = + fun inline_bdy_tac has_args result_exn i = (case try (Proof_Context.get_thm ctxt) (suffix bodyP pname') of NONE => no_tac | SOME impl => @@ -2249,7 +2397,7 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx "\". Inlining procedure!"); if has_args then EVERY [trace_tac ctxt "inline_bdy_tac args", - resolve_tac ctxt [CallBody mode] i, + resolve_tac ctxt [CallBody mode result_exn] i, resolve_tac ctxt [impl] (i+3), resolve_tac ctxt [allI] (i+2), resolve_tac ctxt [allI] (i+2), @@ -2268,13 +2416,13 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx (case cmode of Static => if get_recursive pname ctxt = SOME false andalso is_none spec - then inline_bdy_tac has_args + then inline_bdy_tac has_args result_exn else test_modify_in_ctxt_tac | Parameter => (case spec of - NONE => (tracing "no spec found!"; Subgoal.FOCUS specs_of_assms_tac ctxt) + NONE => (trace_msg ctxt "no spec found!"; Subgoal.FOCUS specs_of_assms_tac ctxt) | SOME spec => - (tracing "found spec!"; case check_spec pname is_spec_clause spec of + (trace_msg ctxt "found spec!"; case check_spec pname is_spec_clause spec of SOME _ => test_modify_in_ctxt_tac | NONE => (warning ("ignoring theorem " ^ (suffix spec_sfx pname') ^ "; no proper specification for procedure " ^pname'); @@ -2283,10 +2431,10 @@ fun gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx fun call_tac cont_tac mode state_kind state_space ctxt asms spec_sfx t = let - val (_,c,_,_,_,_,_,F) = dest_hoare (Logic.strip_assums_concl t); - fun gen_tac (_,pname,return,c,cmode,has_args) = + val (_,c,_,_,_,_,_,F) = dest_hoare t; + fun gen_tac (_,pname,return,c,cmode,has_args,result_exn) = gen_call_tac cont_tac mode cmode state_kind state_space ctxt asms spec_sfx - (proc_name cmode pname) return has_args F; + (proc_name ctxt cmode pname) return has_args result_exn F; in gen_tac (dest_call c) end handle TERM _ => K no_tac; @@ -2323,6 +2471,7 @@ fun guard_tac ctxt strip cont_tac mode (t,i) = solve_in_Faults_tac ctxt (i+2), cont_tac ctxt (i+1), triv_simp ctxt i] + in if is_empty_set F then EVERY [trace_tac ctxt "Guard: basic_tac", basic_tac i] else EVERY [trace_tac ctxt "Guard: trying guarantee_tac", guarantee_tac i ORELSE basic_tac i] end handle TERM _ => no_tac @@ -2336,27 +2485,33 @@ fun in_rel_simp ctxt = simp_tac (put_simpset HOL_basic_ss ctxt addsimps [@{thm Hoare.in_measure_iff},@{thm Hoare.in_lex_iff},@{thm Hoare.in_mlex_iff},@{thm Hoare.in_inv_image_iff}, @{thm split_conv}]); -fun while_annotate_tac ctxt inv i st = +fun while_annotate_tac ctxt inv state_space i st = let val annotateWhile = Thm.lift_rule (Thm.cprem_of st i) @{thm HoarePartial.reannotateWhileNoGuard}; - val lifted_inv = fold_rev Term.abs (Logic.strip_params (Logic.get_goal (Thm.prop_of st) i)) inv; + val params = Logic.strip_params (Logic.get_goal (Thm.prop_of st) i) + val first_state_idx = find_index (fn x => state_space (Free x) <> 0) (rev params) + val inv = if first_state_idx > 0 then incr_boundvars first_state_idx inv else inv + val lifted_inv = fold_rev Term.abs params inv; val invVar = (#1 o strip_comb o #3 o dest_whileAnno o #2 o dest_hoare) (List.last (Thm.prems_of annotateWhile)); val annotate = infer_instantiate ctxt [(#1 (dest_Var invVar), Thm.cterm_of ctxt lifted_inv)] annotateWhile; - in ((trace_tac ctxt ("annotating While with: " ^ Syntax.string_of_term ctxt lifted_inv )) + in ((trace_tac ctxt ("try annotating While with: " ^ Syntax.string_of_term ctxt lifted_inv )) THEN compose_tac ctxt (false,annotate,1) i) st end; -fun cond_annotate_tac ctxt inv mode (_,i) st = +fun cond_annotate_tac ctxt inv mode state_space (_,i) st = let val annotateCond = Thm.lift_rule (Thm.cprem_of st i) (CondInv' mode); - val lifted_inv = fold_rev Term.abs (Logic.strip_params (Logic.get_goal (Thm.prop_of st) i)) inv; + val params = Logic.strip_params (Logic.get_goal (Thm.prop_of st) i) + val first_state_idx = find_index (fn x => state_space (Free x) <> 0) (rev params) + val inv = if first_state_idx > 0 then incr_boundvars first_state_idx inv else inv + val lifted_inv = fold_rev Term.abs params inv; val invVar = List.last (Thm.prems_of annotateCond) |> dest_hoare |> #3 |> strip_comb |> #1; val annotate = infer_instantiate ctxt [(#1 (dest_Var invVar), Thm.cterm_of ctxt lifted_inv)] annotateCond; - in ((trace_tac ctxt ("annotating Cond with: "^ Syntax.string_of_term ctxt lifted_inv)) + in ((trace_tac ctxt ("try annotating Cond with: "^ Syntax.string_of_term ctxt lifted_inv)) THEN compose_tac ctxt (false,annotate,5) i) st end; @@ -2379,13 +2534,13 @@ fun basic_while_tac ctxt state_kind cont_tac tac mode i = EVERY [trace_tac ctxt "basic_while_tac: basic_tac", basic_tac i] end; -fun while_tac ctxt state_kind inv cont_tac tac mode t i= +fun while_tac ctxt state_kind state_space inv cont_tac tac mode t i= let val basic_tac = basic_while_tac ctxt state_kind cont_tac tac mode; in (case inv of NONE => basic_tac i - | SOME I => EVERY [while_annotate_tac ctxt I i, basic_tac i]) + | SOME I => EVERY [while_annotate_tac ctxt I state_space i, basic_tac i]) end handle TERM _ => no_tac @@ -2501,9 +2656,9 @@ fun prems_tac ctxt i = TRY (resolve_tac ctxt (Assumption.all_prems_of ctxt) i); -fun mk_proc_assoc thms = +fun mk_proc_assoc ctxt thms = let - fun name (_,p,_,_,cmode,_) = proc_name cmode p; + fun name (_,p,_,_,cmode,_,_) = proc_name ctxt cmode p; fun proc_name thm = thm |> Thm.concl_of |> dest_hoare |> #2 |> dest_call |> name; in map (fn thm => (proc_name thm,thm)) thms end; @@ -2519,7 +2674,10 @@ fun HoareTac annotate_inv exspecs let val (P,c,Q,A,_,G,T,F) = dest_hoare (Logic.strip_assums_concl (Logic.get_goal (Thm.prop_of st) 1)); - val wp_tacs = #wp_tacs (get_data ctxt); + val solve_modifies = spec_sfx = modifysfx andalso annotate_inv andalso mode = Partial andalso + is_modifies_assertion Q andalso is_modifies_assertion A + + val hoare_tacs = #hoare_tacs (get_data ctxt); val params = (strip_vars (Logic.get_goal (Thm.prop_of st) 1)); val Inv = (if annotate_inv @@ -2530,33 +2688,36 @@ fun HoareTac annotate_inv exspecs val exspecthms = map (Proof_Context.get_thm ctxt) exspecs; val asms = try (fn () => - mk_proc_assoc (gen_context_thms ctxt mode params G T F @ exspecthms)) () + mk_proc_assoc ctxt (gen_context_thms ctxt mode params G T F @ exspecthms)) () |> the_default []; fun while_annoG_tac (t,i) = whileAnnoG_tac ctxt (annotate_inv orelse strip_guards) mode t i; fun WlpTac tac i = (* WlpTac does not end with subset_refl *) FIRST - ([EVERY [resolve_tac ctxt [Seq mode] i,trace_tac ctxt "Seq",HoareRuleTac tac false ctxt (i+1)], - EVERY [resolve_tac ctxt [Catch mode] i,trace_tac ctxt "Catch",HoareRuleTac tac false ctxt (i+1)], - EVERY [resolve_tac ctxt [CondCatch mode] i,trace_tac ctxt "CondCatch",HoareRuleTac tac false ctxt (i+1)], - EVERY [resolve_tac ctxt [BSeq mode] i,trace_tac ctxt "BSeq",HoareRuleTac tac false ctxt (i+1)], + + ([EVERY [resolve_tac ctxt [Seq mode solve_modifies] i,trace_tac ctxt "Seq", HoareRuleTac tac false ctxt (i+1)], + EVERY [resolve_tac ctxt [Catch mode solve_modifies] i,trace_tac ctxt "Catch",HoareRuleTac tac false ctxt (i+1)], + EVERY [resolve_tac ctxt [CondCatch mode solve_modifies] i,trace_tac ctxt "CondCatch",HoareRuleTac tac false ctxt (i+1)], + EVERY [resolve_tac ctxt [BSeq mode solve_modifies] i,trace_tac ctxt "BSeq",HoareRuleTac tac false ctxt (i+1)], EVERY [resolve_tac ctxt [FCall mode] i,trace_tac ctxt "FCall"], EVERY [resolve_tac ctxt [GuardsNil mode] i,trace_tac ctxt "GuardsNil"], EVERY [resolve_tac ctxt [GuardsConsGuaranteeStrip mode] i, trace_tac ctxt "GuardsConsGuaranteeStrip"], EVERY [resolve_tac ctxt [GuardsCons mode] i,trace_tac ctxt "GuardsCons"], EVERY [SUBGOAL while_annoG_tac i] - ] - @ - map (mk_hoare_tac (fn p => HoareRuleTac tac p ctxt) ctxt mode i) wp_tacs) + ]) and HoareRuleTac tac pre_cond ctxt i st = - let fun call (t,i) = call_tac (HoareRuleTac tac false) + let + val _ = if Config.get ctxt hoare_trace > 1 then + print_tac ctxt ("HoareRuleTac (" ^ @{make_string} (pre_cond, i) ^ "):") st + else all_tac st + fun call (t,i) = call_tac (HoareRuleTac tac false) mode state_kind state_space ctxt asms spec_sfx t i fun cond_tac i = if annotate_inv andalso Config.get ctxt use_cond_inv_modifies then - EVERY[SUBGOAL (cond_annotate_tac ctxt (the Inv) mode) i, + EVERY[SUBGOAL (cond_annotate_tac ctxt (the Inv) mode state_space) i, HoareRuleTac tac false ctxt (i+4), HoareRuleTac tac false ctxt (i+3), BasicSimpTac ctxt state_kind true [] tac (i+2), @@ -2571,12 +2732,12 @@ fun HoareTac annotate_inv exspecs EVERY[resolve_tac ctxt [SwitchCons mode] i,trace_tac ctxt "SwitchCons", HoareRuleTac tac false ctxt (i+2), HoareRuleTac tac false ctxt (i+1)]; - fun while_tac' (t,i) = while_tac ctxt state_kind Inv + fun while_tac' (t,i) = while_tac ctxt state_kind state_space Inv (HoareRuleTac tac true) tac mode t i; in st |> ( (WlpTac tac i THEN HoareRuleTac tac pre_cond ctxt i) ORELSE - (FIRST([EVERY[resolve_tac ctxt [Skip mode] i,trace_tac ctxt "Skip"], + (TRY (FIRST([EVERY[resolve_tac ctxt [Skip mode] i, trace_tac ctxt "Skip"], EVERY[resolve_tac ctxt [BasicCond mode] i, trace_tac ctxt "BasicCond", assertion_simp_tac ctxt state_kind [] i], (resolve_tac ctxt [Basic mode] i THEN trace_tac ctxt "Basic") @@ -2589,8 +2750,8 @@ fun HoareTac annotate_inv exspecs EVERY[resolve_tac ctxt [Throw mode] i, trace_tac ctxt "Throw"], (resolve_tac ctxt [Raise mode] i THEN trace_tac ctxt "Raise") THEN_MAYBE (assertion_string_eq_simp_tac ctxt state_kind [] i), - cond_tac i, - switch_tac i, + EVERY[cond_tac i], + EVERY[switch_tac i], EVERY[resolve_tac ctxt [Block mode] i, trace_tac ctxt "Block", resolve_tac ctxt [allI] (i+2), resolve_tac ctxt [allI] (i+2), @@ -2600,7 +2761,7 @@ fun HoareTac annotate_inv exspecs HoareRuleTac tac false ctxt (i+1)], SUBGOAL while_tac' i, SUBGOAL (guard_tac ctxt (annotate_inv orelse strip_guards) - (HoareRuleTac tac false) mode) i, + (HoareRuleTac tac false) mode THEN' (K (trace_tac ctxt "guard_tac succeeded"))) i, EVERY[SUBGOAL (specAnno_tac ctxt state_kind (HoareRuleTac tac true) mode) i], EVERY[SUBGOAL (whileAnnoFix_tac ctxt state_kind @@ -2616,12 +2777,11 @@ fun HoareTac annotate_inv exspecs EVERY[trace_tac ctxt "calling call_tac",SUBGOAL call i], EVERY[trace_tac ctxt "LemmaAnno",SUBGOAL (lemAnno_tac ctxt state_kind mode) i]] @ - map (mk_hoare_tac (fn p => HoareRuleTac tac p ctxt) ctxt mode i) hoare_tacs) - THEN (if pre_cond - then EVERY [trace_tac ctxt "pre_cond", - TRY (BasicSimpTac ctxt state_kind true [] tac i), - (* FIXME: Do we need TRY *) - trace_tac ctxt "after BasicSimpTac"] + map (mk_hoare_tac (fn p => HoareRuleTac tac p ctxt) ctxt mode i) hoare_tacs)) + THEN (if pre_cond orelse solve_modifies + then EVERY [trace_tac ctxt ("pre_cond / solve_modfies: " ^ @{make_string} (pre_cond, solve_modifies)), + TRY (BasicSimpTac ctxt state_kind true (Named_Theorems.get ctxt @{named_theorems "state_simp"}) tac i), + trace_tac ctxt ("after BasicSimpTac " ^ string_of_int i)] else (resolve_tac ctxt [subset_refl] i)))) end; in ((K (EVERY [REPEAT (resolve_tac ctxt [allI] 1), HoareRuleTac tac true ctxt 1])) @@ -2640,7 +2800,7 @@ fun HoareStepTac strip_guards mode state_kind state_space spec_sfx ctxt tac st = let val (_,_,_,_,_,G,T,F) = dest_hoare (Logic.strip_assums_concl (Logic.get_goal (Thm.prop_of st) 1)); val params = (strip_vars (Logic.get_goal (Thm.prop_of st) 1)); - in mk_proc_assoc (gen_context_thms ctxt mode params G T F) + in mk_proc_assoc ctxt (gen_context_thms ctxt mode params G T F) end) () |> the_default []; @@ -2654,11 +2814,11 @@ fun HoareStepTac strip_guards mode state_kind state_space spec_sfx ctxt tac st = TRY (hyp_subst_tac_thin true ctxt i), BasicSimpTac ctxt state_kind true [] tac i] fun while_annoG_tac (t,i) = whileAnnoG_tac ctxt strip_guards mode t i; - - in st |> + val hoare_tacs = #hoare_tacs (get_data ctxt); + in st |> CHANGED ( (REPEAT (resolve_tac ctxt [allI] 1) THEN - FIRST [resolve_tac ctxt [subset_refl] 1, + FIRST ([resolve_tac ctxt [subset_refl] 1, EVERY[resolve_tac ctxt [Skip mode] 1,TRY (BasicSimpTac ctxt state_kind false [] tac 1)], EVERY[resolve_tac ctxt [BasicCond mode] 1,trace_tac ctxt "BasicCond", TRY (BasicSimpTac ctxt state_kind false [] tac 1)], @@ -2666,9 +2826,9 @@ fun HoareStepTac strip_guards mode state_kind state_space spec_sfx ctxt tac st = EVERY[resolve_tac ctxt [Throw mode] 1,TRY (BasicSimpTac ctxt state_kind false [] tac 1)], EVERY[resolve_tac ctxt [Raise mode] 1,TRY (assertion_string_eq_simp_tac ctxt state_kind [] 1)], resolve_tac ctxt [SeqSwap mode] 1 - THEN_MAYBE HoareStepTac strip_guards mode state_kind state_space spec_sfx - ctxt tac, - EVERY[resolve_tac ctxt [BSeq mode] 1, + THEN_MAYBE TRY (HoareStepTac strip_guards mode state_kind state_space spec_sfx + ctxt tac), + EVERY[resolve_tac ctxt [BSeq mode false] 1, prefer_tac 2 THEN_MAYBE HoareStepTac strip_guards mode state_kind state_space spec_sfx ctxt tac], @@ -2703,9 +2863,11 @@ fun HoareStepTac strip_guards mode state_kind state_space spec_sfx ctxt tac st = EVERY[resolve_tac ctxt [DynCom mode] 1], EVERY[SUBGOAL call 1, BasicSimpTac ctxt state_kind false [] tac 1], EVERY[SUBGOAL (lemAnno_tac ctxt state_kind mode) 1, - BasicSimpTac ctxt state_kind false [] tac 1], - final_simp_tac 1 - ]) + BasicSimpTac ctxt state_kind false [] tac 1] + + ] @ + map (mk_hoare_tac (K (K all_tac)) ctxt mode 1) hoare_tacs @ + [final_simp_tac 1]))) end; (*****************************************************************************) @@ -2717,12 +2879,12 @@ struct val globals = @{const_name StateSpace.state.globals}; -fun isState (Const _$Abs (s,T,t)) = +fun isState _ (Const _$Abs (s,T,t)) = (case (state_hierarchy T) of ((n,_)::_) => n = "StateSpace.state.state" andalso is_none (try dest_hoare_raw (strip_qnt_body @{const_name All} t)) | _ => false) - | isState _ = false; + | isState _ _ = false; fun isFreeState (Free (_,T)) = (case (state_hierarchy T) of @@ -2730,7 +2892,7 @@ fun isFreeState (Free (_,T)) = | _ => false) | isFreeState _ = false; -val abs_state = Option.map snd o first_subterm isFreeState; +fun abs_state _ = Option.map snd o first_subterm isFreeState; fun sel_eq (Const (x,_)$_) y = (x=y) @@ -2743,8 +2905,8 @@ fun bound xs (t as (Const (x,_)$_)) = in if i < 0 then (length xs, xs@[t]) else (i,xs) end | bound xs t = raise TERM ("RecordSplitState.bound",[t]); -fun abs_var _ (Const (x,T)$_) = - (remdeco' (Long_Name.base_name x),range_type T) +fun abs_var ctxt (Const (x,T)$_) = + (remdeco' ctxt (Long_Name.base_name x),range_type T) | abs_var _ t = raise TERM ("RecordSplitState.abs_var",[t]); fun fld_eq (x, _) y = (x = y) @@ -2820,7 +2982,7 @@ fun split_state ctxt s T t = val vars' = if Config.get ctxt sort_variables then sort_vars ctxt T vars else vars; in (abstract_vars vars' s t,rev vars') end; -fun ex_tac ctxt _ st = Record.split_simp_tac ctxt @{thms simp_thms} (K ~1) 1 st; +fun ex_tac ctxt _ i = Record.split_simp_tac ctxt @{thms simp_thms} (K ~1) i; end; @@ -2829,12 +2991,12 @@ struct val full_globalsN = @{const_name StateSpace.state.globals}; -fun isState (Const _$Abs (s,T,t)) = +fun isState _ (Const _$Abs (s,T,t)) = (case (state_hierarchy T) of ((n,_)::_) => n = "StateSpace.state.state" andalso is_none (try dest_hoare_raw (strip_qnt_body @{const_name All} t)) | _ => false) - | isState _ = false; + | isState _ _ = false; fun isFreeState (Free (_,T)) = (case (state_hierarchy T) of @@ -2842,10 +3004,10 @@ fun isFreeState (Free (_,T)) = | _ => false) | isFreeState _ = false; -val abs_state = Option.map snd o first_subterm isFreeState; +fun abs_state _ = Option.map snd o first_subterm isFreeState; fun comp_name t = - case try (implode o dest_string) t of + case try dest_string t of SOME str => str | NONE => (case t of Free (s,_) => s @@ -2915,13 +3077,13 @@ fun abstract_vars vars s t = val dummy = Bound 0; in fold_state_prop var app abs other 0 s dummy t end; -fun sort_vars _ vars = +fun sort_vars ctxt vars = let val fld_idx = idx (fn s1:string => fn s2 => s1 = s2); fun compare (_$_$n$(Const (s1,_)$_),_$_$m$(Const (s2,_)$_)) = let - val n' = remdeco' (comp_name n); - val m' = remdeco' (comp_name m); + val n' = remdeco' ctxt (comp_name n); + val m' = remdeco' ctxt (comp_name m); in if s1 = full_globalsN then if s2 = full_globalsN then string_ord (n',m') @@ -2938,7 +3100,7 @@ fun split_state ctxt s _ t = val vars' = if Config.get ctxt sort_variables then sort_vars ctxt vars else vars; in (abstract_vars vars' s t,rev vars') end; -fun abs_var _ t = (remdeco' (sel_name t), sel_type t); +fun abs_var ctxt t = (remdeco' ctxt (sel_name t), sel_type t); (* Proof for: EX x_1 ... x_n. P x_1 ... x_n * ==> EX s. P (lookup destr_1 "x_1" s) ... (lookup destr_n "x_n" s) @@ -2962,11 +3124,10 @@ val ss = in -fun ex_tac ctxt vs st = +fun ex_tac ctxt vs = SUBGOAL (fn (goal, i) => let val vs' = rev vs; - val (Const (_,exT)$_) = HOLogic.dest_Trueprop - (Logic.strip_imp_concl (Logic.get_goal (Thm.prop_of st) 1)); + val (Const (_,exT)$_) = HOLogic.dest_Trueprop (Logic.strip_imp_concl goal); val sT = domain_type (domain_type exT); val s0 = Const (@{const_name HOL.undefined},sT); @@ -3014,10 +3175,14 @@ fun ex_tac ctxt vs st = in (compose_tac ctxt (false,inst_rule, Thm.nprems_of exI) i st) end; - in EVERY [REPEAT_DETERM_N (length vs) (eresolve_tac ctxt [exE] 1), - lift_inst_ex_tac 1, - simp_tac (put_simpset ss ctxt) 1 - ] st end + in EVERY + [REPEAT_DETERM_N (length vs) (eresolve_tac ctxt [exE] i), + lift_inst_ex_tac i, + simp_tac (put_simpset ss ctxt) i + ] + end +) + end (* Test: What happens when there are no lookups., EX s. True *) @@ -3025,11 +3190,12 @@ end end; -structure GeneraliseRecord = GeneraliseFun (structure SplitState=RecordSplitState); -structure GeneraliseStateFun = GeneraliseFun (structure SplitState=FunSplitState); +structure GeneraliseRecord = Generalise (structure SplitState=RecordSplitState); +structure GeneraliseStateFun = Generalise (structure SplitState=FunSplitState); -fun generalise Record = GeneraliseRecord.GENERALISE - | generalise Function = GeneraliseStateFun.GENERALISE; +fun generalise _ Record = GeneraliseRecord.GENERALISE + | generalise _ Function = GeneraliseStateFun.GENERALISE + | generalise ctxt (Other i) = the (generalise_other ctxt i); (*****************************************************************************) (** record_vanish_tac splits up the records of a verification condition, **) @@ -3038,7 +3204,7 @@ fun generalise Record = GeneraliseRecord.GENERALISE (** form "!!s Z. s=Z ==> ..., where s and Z are records **) (*****************************************************************************) -(* FIXME: Check out if removing the useless vars is a performance issue. +(* fixme: Check out if removing the useless vars is a performance issue. If so, maybe we can remove all useless vars at once (no iterated simplification) or try to avoid introducing them. Bevore splitting the gaol we can simplifiy the goal with state_simproc this may leed @@ -3054,6 +3220,29 @@ fun record_vanish_tac ctxt state_kind state_space i = | no_spec _ = true; fun state_space_no_spec t = if state_space t <> 0 andalso no_spec t then ~1 else 0; + val state_split_tac = state_split_simp_tac ctxt rem_useless_vars_simps state_space_no_spec i + fun generalise_tac split_record st = + DETERM (generalise ctxt state_kind ctxt i) st + handle (exn as (TERM _)) => + let + val _ = warning ("record_vanish_tac: generalise subgoal " ^ string_of_int i ^ + " failed" ^ (if split_record then ", fallback to record split:\n " else "") ^ + Runtime.exn_message (Runtime.exn_context (SOME ctxt) exn)); + in + if split_record then + EVERY [ + state_split_tac, + full_simp_tac (ctxt + addsimprocs (state_simprocs ctxt state_kind @ + state_upd_simprocs ctxt state_kind) + addsimps (Named_Theorems.get ctxt @{named_theorems "state_simp"})) i, + trace_subgoal_tac ctxt "after record split and simp" i, + generalise_tac false, + trace_subgoal_tac ctxt "after 'generalise_tac false'" i + ] st + else all_tac st + end; + in EVERY [trace_tac ctxt "record_vanish_tac -- START --", REPEAT (eresolve_tac ctxt [conjE] i), trace_tac ctxt "record_vanish_tac -- hyp_subst_tac ctxt --", @@ -3063,12 +3252,8 @@ fun record_vanish_tac ctxt state_kind state_space i = want to split them to avoid naming conflicts and increase performance *) trace_tac ctxt "record_vanish_tac -- Splitting records --", if Config.get ctxt use_generalise orelse state_kind = Function - then generalise state_kind ctxt i - else state_split_simp_tac ctxt rem_useless_vars_simps state_space_no_spec i - (*THEN_MAYBE - EVERY [trace_tac ctxt "record_vanish_tac -- removing useless vars --", - full_simp_tac rem_useless_vars_simpset i, - trace_tac ctxt "record_vanish_tac -- STOP --"]*) + then EVERY [generalise_tac true] + else state_split_tac ] end else @@ -3099,7 +3284,7 @@ val state_fun_update_ss = @ @{thms list.inject list.distinct char.inject cong_exp_iff_simps simp_thms} @ K_fun_convs) addsimprocs [DistinctTreeProver.distinct_simproc ["distinct_fields", "distinct_fields_globals"]] - |> Simplifier.add_cong @{thm imp_cong} (* K_fun_congs FIXME: Stefan fragen*) + |> Simplifier.add_cong @{thm imp_cong} |> Splitter.add_split @{thm if_split}); in @@ -3125,13 +3310,7 @@ fun solve_modifies_tac ctxt state_kind state_space i st = else 0 | is_split_state t = 0; val simp_ctxt = put_simpset HOL_ss ctxt addsimps (@{thm Ex_True} :: @{thm Ex_False} :: Record.get_extinjects thy); - fun try_solve Record i = (*(SOLVE*) - (((fn k => (TRY (REPEAT_ALL_NEW (resolve_tac ctxt [conjI, impI, allI]) k))) - THEN_ALL_NEW - (fn k => EVERY [state_split_simp_tac ctxt [] is_split_state k, - simp_tac simp_ctxt k THEN_MAYBE rename_goal ctxt remdeco' k - ])) i) (*)*) - | try_solve _ i = + fun try_solve Function i = ((fn k => (TRY (REPEAT_ALL_NEW (resolve_tac ctxt [conjI, impI, allI]) k))) THEN_ALL_NEW (fn k => REPEAT (resolve_tac ctxt [exI] k) THEN @@ -3139,16 +3318,22 @@ fun solve_modifies_tac ctxt state_kind state_space i st = simp_tac (put_simpset state_fun_update_ss ctxt) k THEN_MAYBE (REPEAT_ALL_NEW (resolve_tac ctxt [conjI,impI,refl]) k))) i + | try_solve _ i = (*(SOLVE*) (* Record and Others *) + (((fn k => (TRY (REPEAT_ALL_NEW (resolve_tac ctxt [conjI, impI, allI]) k))) + THEN_ALL_NEW + (fn k => EVERY [state_split_simp_tac ctxt [] is_split_state k, + simp_tac simp_ctxt k THEN_MAYBE rename_goal ctxt (remdeco' ctxt) k + ])) i) (*)*) in ((trace_tac ctxt "solve_modifies_tac" THEN clarsimp_tac ((ctxt |> put_claset (claset_of @{theory_context HOL}) |> put_simpset (simpset_of @{theory_context Set})) - addsimps ([@{thm Hoare.mex_def},@{thm Hoare.meq_def}]@K_convs) - addsimprocs (state_upd_simproc Record::(state_simprocs Record)) + addsimps (@{thms Hoare.mex_def Hoare.meq_def} @K_convs@(Named_Theorems.get ctxt @{named_theorems "state_simp"})) + addsimprocs (state_upd_simprocs ctxt Record @ state_simprocs ctxt Record) |> fold Simplifier.add_cong K_congs) i) THEN_MAYBE - try_solve state_kind i) st + (try_solve state_kind i)) st end; end @@ -3158,7 +3343,7 @@ fun proc_lookup_simp_tac ctxt i st = val name = (Logic.concl_of_goal (Thm.prop_of st) i) |> dest_hoare |> #2 |> strip_comb |> #2 |> last |> strip_comb |> #2 |> last; (* the$(Gamma$name) or the$(strip$Gamma$name) *) - val pname = (unsuffix proc_deco (dest_string' name)); + val pname = chopsfx proc_deco (dest_string' name); val thms = map_filter I (map (try (Proof_Context.get_thm ctxt)) [suffix bodyP pname, suffix (body_def_sfx^"_def") pname, @@ -3171,9 +3356,9 @@ fun proc_lookup_in_dom_simp_tac ctxt i st = let val _$name$_ = (HOLogic.dest_Trueprop (Logic.concl_of_goal (Thm.prop_of st) i)); (* name : Gamma *) - val pname = (unsuffix proc_deco (dest_string' name)); + val pname = chopsfx proc_deco (dest_string' name); val thms = map_filter I (map (try (Proof_Context.get_thm ctxt)) - [suffix bodyP pname]); + [suffix bodyP pname, suffix "_def" pname]); in simp_tac (put_simpset HOL_basic_ss ctxt addsimps (@{thm Hoare.lookup_Some_in_dom} :: @{thm dom_strip} :: thms)) i st end) () @@ -3243,7 +3428,8 @@ fun HoareCallRuleTac state_kind state_space ctxt thms i st = fun basic_tac i = (((resolve_tac ctxt thms') THEN_ALL_NEW - (fn k => (SUBGOAL solve_sidecondition_tac k))) i) + (fn k => + (SUBGOAL solve_sidecondition_tac k))) i) in (basic_tac ORELSE' (fn k => @@ -3258,17 +3444,33 @@ fun vcg_polish_tac solve_modifies ctxt state_kind state_space i = if solve_modifies then solve_modifies_tac ctxt state_kind state_space i else record_vanish_tac ctxt state_kind state_space i - THEN_MAYBE EVERY [rename_goal ctxt remdeco' i(*, + THEN_MAYBE EVERY [rename_goal ctxt (remdeco' ctxt) i(*, simp_tac (HOL_basic_ss addsimps @{thms simp_thms})) i*)]; fun is_funtype (Type ("fun", _)) = true | is_funtype _ = false; +fun get_state_kind_extension ctxt T = + let + val sps = #state_spaces (Hoare_Data.get (Context.Proof ctxt)) + in + case find_first (fn (n, sp) => (#is_state_type sp) ctxt T) sps of + SOME (n, sp) => SOME n + | NONE => NONE + end + fun state_kind_of ctxt T = let val thy = Proof_Context.theory_of ctxt; val (s,sT) = nth (fst (Record.get_recT_fields thy T)) 1; - in if Long_Name.base_name s = "locals" andalso is_funtype sT then Function else Record end + in + case get_state_kind_extension ctxt T of + SOME n => Other n + | _ => if Long_Name.base_name s = "locals" andalso is_funtype sT then + Function + else + Record + end handle Subscript => Record; fun find_state_space_in_triple ctxt t = @@ -3373,16 +3575,21 @@ val vcg_step = gen_simp_method hoare_step_tac; val trace_hoare_users = Unsynchronized.ref false -fun print_subgoal_tac ctxt s i = - SUBGOAL (fn (prem, _) => trace_tac ctxt (s ^ (Syntax.string_of_term ctxt prem))) i - fun mk_hoare_thm thm _ ctxt _ i = - EVERY [resolve_tac ctxt [thm] i, - if !trace_hoare_users then print_subgoal_tac ctxt "Tracing: " i + EVERY [resolve_tac ctxt [Thm.transfer' ctxt thm] i, + if !trace_hoare_users then trace_subgoal_tac ctxt "Tracing: " i else all_tac] val vcg_hoare_add = - Thm.declaration_attribute (fn thm => add_hoare_tacs [(Thm.derivation_name thm, mk_hoare_thm thm)]) + let + fun get_name thm = + case Properties.get (Thm.get_tags thm) Markup.nameN of + SOME n => n + | NONE => error ("theorem with attribute 'vcg_hoare' must have a name") + in + Thm.declaration_attribute (fn thm => + add_hoare_tacs [(get_name thm, mk_hoare_thm (Thm.trim_context thm))]) + end exception UNDEF val vcg_hoare_del = @@ -3398,6 +3605,5 @@ val _ = #> Attrib.setup @{binding vcg_hoare} (Attrib.add_del vcg_hoare_add vcg_hoare_del) "declaration of wp rule for vcg") - (*#> add_wp_tacs initial_wp_tacs*) end; diff --git a/tools/c-parser/Simpl/hoare_syntax.ML b/tools/c-parser/Simpl/hoare_syntax.ML index 81906c78de..1c3dcda640 100644 --- a/tools/c-parser/Simpl/hoare_syntax.ML +++ b/tools/c-parser/Simpl/hoare_syntax.ML @@ -2,24 +2,10 @@ Author: Norbert Schirmer, TU Muenchen Copyright (C) 2004-2007 Norbert Schirmer - -This library is free software; you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as -published by the Free Software Foundation; either version 2.1 of the -License, or (at your option) any later version. - -This library is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public -License along with this library; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -USA +Copyright (c) 2022 Apple Inc. All rights reserved. *) -(* FIXME: Adapt guard generation to new syntax of op + etc. *) +(* fixme: Adapt guard generation to new syntax of op + etc. *) signature HOARE_SYNTAX = sig @@ -37,10 +23,12 @@ sig val basic_tr: Proof.context -> term list -> term val bexp_tr': string -> Proof.context -> term list -> term val bind_tr': Proof.context -> term list -> term - val call_ass_tr: bool -> bool -> Proof.context -> term list -> term + val call_ass_tr: bool -> bool -> term list -> Proof.context -> term list -> term val call_tr': Proof.context -> term list -> term - val call_tr: bool -> bool -> Proof.context -> term list -> term + val call_exn_tr': Proof.context -> term list -> term + val call_tr: bool -> bool -> term list -> Proof.context -> term list -> term val dyn_call_tr': Proof.context -> term list -> term + val dyn_call_exn_tr': Proof.context -> term list -> term val fcall_tr': Proof.context -> term list -> term val fcall_tr: Proof.context -> term list -> term val guarded_Assign_tr: Proof.context -> term list -> term @@ -68,7 +56,7 @@ sig val raise_tr': Proof.context -> term list -> term val raise_tr: Proof.context -> term list -> term val switch_tr': Proof.context -> term list -> term - val update_comp: Proof.context -> string list -> bool -> bool -> xstring -> term -> term -> term + val update_comp: Proof.context -> Hoare.lense option -> string list -> bool -> bool -> xstring -> term -> term -> term val use_call_tr': bool Config.T val whileAnnoGFix_tr': Proof.context -> term list -> term val whileAnnoG_tr': Proof.context -> term list -> term @@ -84,11 +72,11 @@ val globalsN = "globals"; val localsN = "locals"; val globals_updateN = suffix Record.updateN globalsN; val locals_updateN = suffix Record.updateN localsN; -val upd_globalsN = "upd_globals"; (* FIXME authentic syntax !? *) +val upd_globalsN = "upd_globals"; (* fixme authentic syntax !? *) val allocN = "alloc_'"; val freeN = "free_'"; -val Null = Syntax.free "Simpl_Heap.Null"; (* FIXME ?? *) +val Null = Syntax.free "Simpl_Heap.Null"; (* fixme ?? *) (** utils **) @@ -119,13 +107,21 @@ fun is_prefix_or_suffix s t = can (unprefix s) t orelse can (unsuffix s) t; +fun is_other ctxt = case Hoare.get_default_state_kind ctxt of Hoare.Other _ => true | _ => false + (** hoare data **) fun is_global_comp ctxt name = - (case StateSpace.get_comp (Context.Proof ctxt) name of - SOME (_, ln) => is_prefix_or_suffix "globals" (Long_Name.base_name ln) - | NONE => false); - + let + val res = case Hoare.get_default_state_space ctxt of + SOME {is_defined, ...} => not (is_defined ctxt name) + | _ => + (case StateSpace.get_comp' (Context.Proof ctxt) name of + SOME (_, lns) => forall (fn ln => is_prefix_or_suffix "globals" (Long_Name.base_name ln)) lns + | NONE => false) + in + res + end (** parsing and printing **) @@ -150,7 +146,7 @@ fun is_global ctxt name = exception UNDEFINED of string -(* FIXME: if is_state_var etc. is reimplemented, rethink about when adding the deco to +(* fixme: if is_state_var etc. is reimplemented, rethink about when adding the deco to the records *) fun first_successful_tr _ [] = raise TERM ("first_successful_tr: no success",[]) @@ -158,30 +154,48 @@ fun first_successful_tr _ [] = raise TERM ("first_successful_tr: no success",[]) | first_successful_tr f (x::xs) = f x handle TERM _ => first_successful_tr f xs; fun statespace_lookup_tr ctxt ps s n = - let - val cn = map Hoare.clique_name (#active_procs (Hoare.get_data ctxt)); - val procs = ps @ cn; - val names = - n :: map (fn p => (suffix (Hoare.par_deco p) (unsuffixI Hoare.deco n))) procs; - in first_successful_tr (StateSpace.gen_lookup_tr ctxt s) names - end; + case Hoare.get_default_state_space ctxt of + SOME {lookup_tr, ...} => lookup_tr ctxt n $ s + | _ => + let + val cn = map Hoare.clique_name (#active_procs (Hoare.get_data ctxt)); + val procs = ps @ cn; + val names = + (Hoare.name_tr ctxt true n) :: map (fn p => (suffix (Hoare.par_deco p) (unsuffixI Hoare.deco n))) procs; + in + first_successful_tr (StateSpace.gen_lookup_tr ctxt s) names + end + +fun K_rec_syntax v = Abs ("_", dummyT, incr_boundvars 1 v); + +fun statespace_update_tr ctxt NONE ps id n v = + (case Hoare.get_default_state_space ctxt of + SOME {update_tr, ...} => update_tr ctxt n $ K_rec_syntax v + | _ => + let + fun gen_update_tr id ctxt n v = + StateSpace.gen'_update_tr true id ctxt n v (Bound 0) |> dest_comb |> fst + + val cn = map Hoare.clique_name (#active_procs (Hoare.get_data ctxt)); + val procs = ps @ cn; + val names = + (Hoare.name_tr ctxt true n) :: map (fn p => (suffix (Hoare.par_deco p) (unsuffixI Hoare.deco n))) procs; + in first_successful_tr (fn n => gen_update_tr id ctxt n v) names + end) + | statespace_update_tr ctxt (SOME {lookup, update}) ps id n v = + update $ K_rec_syntax v -fun statespace_update_tr ctxt ps id n v s = - let - val cn = map Hoare.clique_name (#active_procs (Hoare.get_data ctxt)); - val procs = ps @ cn; - val names = - n :: map (fn p => (suffix (Hoare.par_deco p) (unsuffixI Hoare.deco n))) procs; - in first_successful_tr (fn n => StateSpace.gen_update_tr id ctxt n v s) names - end; local fun is_record_sel ctxt nm = let - val consts = Proof_Context.consts_of ctxt; - val exists_const = can (Consts.the_const consts) o intern_const_syntax consts; - val exists_abbrev = can (Consts.the_abbreviation consts) o intern_const_syntax consts; - in (exists_const nm) orelse (exists_abbrev nm) end; + val SOME (Const (c, T)) = try (Syntax.read_term ctxt) nm + val recT = domain_type T + val (flds, _) = Record.get_recT_fields (Proof_Context.theory_of ctxt) recT + in member (op =) (map fst flds) c end + handle TYPE _ => false + | Bind => false + | Match => false in fun lookup_comp ctxt ps name = @@ -196,7 +210,7 @@ fun lookup_comp ctxt ps name = in (fn s => statespace_lookup_tr ctxt ps (sel $ s) name) end; (* -FIXME: +fixme: update of global and local components: One should generally provide functions: glob_upd:: ('g => 'g) => 's => 's @@ -210,9 +224,9 @@ This would make the composition more straightforward... Basically one wants the map on a component rather then the update. Maps can be composed nicer... *) -fun K_rec_syntax v = Abs ("_", dummyT, incr_boundvars 1 v); -fun update_comp ctxt ps atomic id name value = + +fun update_comp ctxt lense_opt ps atomic id name value = if is_record_sel ctxt name then let @@ -220,7 +234,7 @@ fun update_comp ctxt ps atomic id name value = in if atomic andalso is_global ctxt name then (fn s => - Syntax.free globals_updateN $ (K_rec_syntax (upd $ (Syntax.free globalsN $ s))) $ s) + Syntax.free globals_updateN $ (*(K_rec_syntax*) upd $ s) else (fn s => upd $ s) end else @@ -231,8 +245,8 @@ fun update_comp ctxt ps atomic id name value = in fn s => if atomic then - upd $ (K_rec_syntax (statespace_update_tr ctxt ps id name value (sel $ s))) $ s - else statespace_update_tr ctxt ps id name value s + upd $ statespace_update_tr ctxt lense_opt ps id name value $ s + else statespace_update_tr ctxt lense_opt ps id name value $ s end; end; @@ -282,6 +296,17 @@ fun antiquoteOld_tr ctxt [s, n] = | Const (c, T) => lookup_comp ctxt [] (Hoare.varname c) s | _ => n $ s); +fun first_match [] t = raise Match + | first_match (f::fs) t = f t handle Match => first_match fs t + +fun lookup_tr' ctxt t = t |> first_match [ + fn t => + case Hoare.get_default_state_space ctxt of + SOME {lookup_tr', ...} => lookup_tr' ctxt t + | NONE => raise Match, + fn t => Hoare.undeco ctxt t] + + fun antiquote_tr' ctxt name = let fun is_state i t = @@ -291,7 +316,7 @@ fun antiquote_tr' ctxt name = i = j andalso member (op =) [globalsN, localsN] (Long_Name.base_name g) | _ => false); fun tr' i (t $ u) = - if is_state i u then Syntax.const name $ tr' i (Hoare.undeco ctxt t) + if is_state i u then Syntax.const name $ tr' i (lookup_tr' ctxt t) else tr' i t $ tr' i u | tr' i (Abs (x, T, t)) = Abs (x, T, tr' (i + 1) t) | tr' i a = if a = Bound i then raise Match else a; @@ -323,23 +348,24 @@ fun antiquote_applied_only_to P = in test 0 end; + fun antiquote_mult_tr' ctxt is_selector current old = let fun tr' i (t $ u) = state_test u (fn Bound j => - if j = i then Syntax.const current $ tr' i (Hoare.undeco ctxt t) + if j = i then Syntax.const current $ tr' i (lookup_tr' ctxt t) else if is_selector t (* other quantified states *) - then Syntax.const old $ Bound j $ tr' i (Hoare.undeco ctxt t) + then Syntax.const old $ Bound j $ tr' i (lookup_tr' ctxt t) else tr' i t $ tr' i u | pre as ((Const (m,_) $ Free _)) (* pre state *) => if (m = @{syntax_const "_bound"} orelse m = @{syntax_const "_free"}) andalso is_selector t - then Syntax.const old $ pre $ tr' i (Hoare.undeco ctxt t) + then Syntax.const old $ pre $ tr' i (lookup_tr' ctxt t) else tr' i t $ pre | pre as ((Const (m,_) $ Var _)) (* pre state *) => if m = @{syntax_const "_var"} andalso is_selector t - then Syntax.const old $ pre $ tr' i (Hoare.undeco ctxt t) + then Syntax.const old $ pre $ tr' i (lookup_tr' ctxt t) else tr' i t $ pre | u => tr' i t $ tr' i u) | tr' i (Abs (x, T, t)) = Abs (x, T, tr' (i + 1) t) @@ -360,14 +386,15 @@ fun app_quote_mult_tr' ctxt is_selector f (t :: ts) = | app_quote_mult_tr' _ _ _ _ = raise Match; + fun atomic_var_tr ctxt ps name value = - update_comp ctxt ps true false name value; + update_comp ctxt NONE ps true false name value; fun heap_var_tr ctxt hp p value = let fun upd s = - update_comp ctxt [] true false hp + update_comp ctxt NONE [] true false hp (Syntax.const @{const_syntax fun_upd} $ lookup_comp ctxt [] hp s $ p $ value) s; in upd end; @@ -543,14 +570,14 @@ fun new_tr ctxt (ts as [var,size,init]) = val g = Syntax.free globalsN $ Bound 0; val alloc = lookup_comp ctxt [] allocN (Bound 0); - val new = Syntax.free "new" $ (Syntax.const @{const_syntax set} $ alloc); (* FIXME new !? *) + val new = Syntax.free "new" $ (Syntax.const @{const_syntax set} $ alloc); (* fixme new !? *) fun mk_upd (var,v) = let val varn = Hoare.varname var; val var' = lookup_comp ctxt [] varn (Bound 0); in - update_comp ctxt [] false false varn + update_comp ctxt NONE [] false false varn (Syntax.const @{const_syntax fun_upd} $ var' $ new $ v) end; @@ -559,10 +586,10 @@ fun new_tr ctxt (ts as [var,size,init]) = val freetest = Syntax.const @{const_syntax Orderings.less_eq} $ size $ free; val alloc_upd = - update_comp ctxt [] false false allocN + update_comp ctxt NONE [] false false allocN (Syntax.const @{const_syntax Cons} $ new $ alloc); val free_upd = - update_comp ctxt [] false false freeN + update_comp ctxt NONE [] false false freeN (Syntax.const @{const_syntax Groups.minus} $ free $ size); val g' = @@ -592,14 +619,14 @@ fun nnew_tr ctxt (ts as [var,size,init]) = val g = Syntax.free globalsN $ Bound 0; val alloc = lookup_comp ctxt [] allocN (Bound 0); - val new = Syntax.free "new" $ (Syntax.const @{const_syntax set} $ alloc); (* FIXME new !? *) + val new = Syntax.free "new" $ (Syntax.const @{const_syntax set} $ alloc); (* fixme new !? *) fun mk_upd (var,v) = let val varn = Hoare.varname var; val var' = lookup_comp ctxt [] varn (Bound 0); in - update_comp ctxt [] false false varn + update_comp ctxt NONE [] false false varn (Syntax.const @{const_syntax fun_upd} $ var' $ new $ v) end; @@ -608,10 +635,10 @@ fun nnew_tr ctxt (ts as [var,size,init]) = val freetest = Syntax.const @{const_syntax Orderings.less_eq} $ size $ free; val alloc_upd = - update_comp ctxt [] false false allocN + update_comp ctxt NONE [] false false allocN (Syntax.const @{const_syntax Cons} $ new $ alloc); val free_upd = - update_comp ctxt [] false false freeN + update_comp ctxt NONE [] false false freeN (Syntax.const @{const_syntax Groups.minus} $ free $ size); val g' = @@ -633,7 +660,7 @@ fun loc_tr ctxt (ts as [init, bdy]) = fun dest_init (Const (@{syntax_const "_locinit"}, _) $ Const (var,_) $ v) = (var, v) | dest_init (Const (@{syntax_const "_locnoinit"}, _) $ Const (var, _)) = (var, Syntax.const antiquoteCur $ Syntax.free (Hoare.varname var)) - (* FIXME could skip this dummy initialisation v := v s and + (* fixme: could skip this dummy initialisation v := v s and derive non init variables in the print translation from the return function instead the init function *) | dest_init _ = raise Match; @@ -644,10 +671,10 @@ fun loc_tr ctxt (ts as [init, bdy]) = | dest_inits _ = raise Match; fun mk_init_upd (var, v) = - update_comp ctxt [] true false var (antiquoteCur_tr ctxt v); + update_comp ctxt NONE [] true false var (antiquoteCur_tr ctxt v); fun mk_ret_upd var = - update_comp ctxt [] true false var (lookup_comp ctxt [] var (Bound 1)); + update_comp ctxt NONE [] true false var (lookup_comp ctxt [] var (Bound 1)); val var_vals = map (apfst Hoare.varname) (dest_inits init); val ini = Abs ("s", dummyT, fold mk_init_upd var_vals (Bound 0)); @@ -672,7 +699,7 @@ local fun le l r = Syntax.const @{const_syntax Orderings.less} $ l $ r; -fun in_range t = Syntax.free "in_range" $ t; (* FIXME ?? *) +fun in_range t = Syntax.free "in_range" $ t; (* fixme ?? *) fun not_zero t = Syntax.const @{const_syntax Not} $ @@ -680,7 +707,7 @@ fun not_zero t = fun not_Null t = Syntax.const @{const_syntax Not} $ - (Syntax.const @{const_syntax HOL.eq} $ t $ Syntax.free "Simpl_Heap.Null"); (* FIXME ?? *) + (Syntax.const @{const_syntax HOL.eq} $ t $ Syntax.free "Simpl_Heap.Null"); (* fixme ?? *) fun in_length i l = Syntax.const @{const_syntax Orderings.less} $ i $ @@ -725,7 +752,7 @@ fun guard ctxt Ts (add as (Const (@{const_name Groups.plus},_) $ l $ r)) = | guard ctxt Ts (Const (@{const_name HOL.disj},_) $ l $ r) = guard ctxt Ts l & mk_imp (HOLogic.Not $ l,guard ctxt Ts r) | guard ctxt Ts (dv as (Const (@{const_name Rings.divide},_) $ l $ r)) = - guard ctxt Ts l & guard ctxt Ts r & SOME (not_zero r) & SOME (in_range dv) (* FIXME: Make more concrete guard...*) + guard ctxt Ts l & guard ctxt Ts r & SOME (not_zero r) & SOME (in_range dv) (* fixme: Make more concrete guard...*) | guard ctxt Ts (Const (@{const_name Rings.modulo},_) $ l $ r) = guard ctxt Ts l & guard ctxt Ts r & SOME (not_zero r) | guard ctxt Ts (un as (Const (@{const_name Groups.uminus},_) $ n)) = @@ -759,7 +786,7 @@ in in guard ctxt [T] t' end; end; -(* FIXME: make guard function a parameter of all parse-translations that need it.*) +(* fixme: make guard function a parameter of all parse-translations that need it.*) val _ = Theory.setup (Context.theory_map (Hoare.install_generate_guard mk_guard)); @@ -862,25 +889,25 @@ fun dest_actuals (Const (@{syntax_const "_actuals_empty"}, _)) = [] | dest_actuals t = raise TERM ("dest_actuals", [t]); -fun mk_call_tr ctxt grd Call formals pn pt actuals has_args cont = +fun mk_call_tr ctxt grd Call formals pn pt actuals has_args result_exn cont = let val fcall = cont <> NONE; val state_kind = the_default (Hoare.get_default_state_kind ctxt) (Hoare.get_state_kind pn ctxt); - fun init_par_tr name arg = - update_comp ctxt [] false false name (antiquoteCur_tr ctxt arg); + fun init_par_tr name lense_opt arg = + update_comp ctxt lense_opt [] false false name (antiquoteCur_tr ctxt arg); fun result_par_tr name arg = let fun offset_old n = 2; fun offset n = if is_global ctxt n then 0 else 2; + val lookup = lookup_comp ctxt [] name (Bound 1) in - update_tr ctxt [pn] offset offset_old - (lookup_comp ctxt [] name (Bound 1)) arg + update_tr ctxt [pn] offset offset_old lookup arg end; - val _ = if not (StateSpace.get_silent (Context.Proof ctxt)) andalso + val _ = if not (Config.get ctxt StateSpace.silent) andalso ((not fcall andalso length formals <> length actuals) orelse (fcall andalso length formals <> length actuals + 1)) @@ -895,8 +922,8 @@ fun mk_call_tr ctxt grd Call formals pn pt actuals has_args cont = [Syntax.const globals_updateN $ (K_rec_syntax (Const (globalsN, dummyT) $ Bound 0))]; val ret = Abs ("s", dummyT, Abs ("t", dummyT, Library.foldr (op $) (globals, Bound 1))); - val val_formals = filter (fn (kind, _) => kind = Hoare.In) formals; - val res_formals = filter (fn (kind, _) => kind = Hoare.Out) formals; + val val_formals = filter (fn (kind, _, _) => kind = Hoare.In) formals; + val res_formals = filter (fn (kind, _, _) => kind = Hoare.Out) formals; val (val_actuals, res_actuals) = chop (length val_formals) actuals; val init_bdy = @@ -904,12 +931,12 @@ fun mk_call_tr ctxt grd Call formals pn pt actuals has_args cont = val state = (case state_kind of Hoare.Record => Bound 0 - | Hoare.Function => Syntax.const localsN $ Bound 0); - val upds = fold2 (fn (_, name) => init_par_tr name) val_formals val_actuals state; + | _ => Syntax.const localsN $ Bound 0); + val upds = fold2 (fn (_, name, lense_opt) => init_par_tr name lense_opt) val_formals val_actuals state; in (case state_kind of Hoare.Record => upds - | Hoare.Function => Syntax.const locals_updateN $ K_rec_syntax upds $ Bound 0) + | _ => Syntax.const locals_updateN $ K_rec_syntax upds $ Bound 0) end; val init = Abs ("s", dummyT, init_bdy); @@ -919,20 +946,21 @@ fun mk_call_tr ctxt grd Call formals pn pt actuals has_args cont = NONE => (* Procedure call *) let val results = - map (fn ((_, name), arg) => result_par_tr name arg) + map (fn ((_, name, _), arg) => result_par_tr name arg) (rev (res_formals ~~ res_actuals)); val res = Abs ("i", dummyT, Abs ("t", dummyT, Syntax.const @{const_syntax Basic} $ Abs ("s", dummyT, fold_rev (fn f => fn s => f s) results (Bound 0)))); - in if has_args then Call $init $ pt $ ret $ res else Call $ pt end + val args = [init, pt, ret] @ result_exn @ [res] + in if has_args then list_comb (Call, args) else Call $ pt end | SOME c => (* Function call *) let val res = (case res_formals of - [(_, n)] => Abs ("s", dummyT, lookup_comp ctxt [] n (Bound 0)) + [(_, n, _)] => Abs ("s", dummyT, lookup_comp ctxt [] n (Bound 0)) | _ => - if StateSpace.get_silent (Context.Proof ctxt) + if Config.get ctxt StateSpace.silent then Abs ("s", dummyT, lookup_comp ctxt [] "dummy" (Bound 0)) else raise TERM ("call_tr: function " ^ pn ^ "may only have one result parameter", [])); in Call $ init $ pt $ ret $ res $ c end) @@ -949,79 +977,80 @@ fun mk_call_tr ctxt grd Call formals pn pt actuals has_args cont = end; -(* FIXME: What is prfx for, maybe unused *) -fun dest_procname ctxt prfx false (Const (p, _)) = - (prfx ^ suffix Hoare.proc_deco p, HOLogic.mk_string p) - | dest_procname ctxt prfx false (t as Free (p, T)) = - (prfx ^ suffix Hoare.proc_deco p, Free (suffix Hoare.proc_deco p, T)) - | dest_procname ctxt prfx false (Const (@{syntax_const "_free"},_) $ Free (p,T)) = - (prfx ^ suffix Hoare.proc_deco p, Free (suffix Hoare.proc_deco p, T)) - | dest_procname ctxt prfx false (t as (Const (@{syntax_const "_antiquoteCur"},_) $ Const (p, _))) = - (prfx ^ Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) - | dest_procname ctxt prfx false (t as (Const (@{syntax_const "_antiquoteCur"}, _) $ Free (p, _))) = - (prfx ^ Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) - | dest_procname ctxt prfx false (t as Const (p, _) $ _) = - (prfx ^ Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) (* antiquoteOld *) - | dest_procname ctxt prfx false (t as Free (p,_)$_) = - (prfx ^ Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) (* antiquoteOld *) - | dest_procname ctxt prfx false (t as Const (@{syntax_const "_antiquoteOld"}, _) $ _ $ Const (p, _)) = - (prfx ^ suffix Hoare.proc_deco p, t) - | dest_procname ctxt prfx false (t as Const (@{syntax_const "_antiquoteOld"}, _) $ _ $ Free (p,_)) = - (prfx ^ suffix Hoare.proc_deco p, t) - (* FIXME StateFun.lookup !? *) - | dest_procname ctxt prfx false (t as Const (@{const_name "StateFun.lookup"}, _) $ _ $ Free (p, _) $ _) = - (prfx ^ suffix Hoare.proc_deco (Hoare.remdeco' p), t) (* antiquoteOld *) - - | dest_procname ctxt prfx false t = (prfx, t) - | dest_procname ctxt prfx true t = +fun dest_procname ctxt false (Const (p, _)) = + (suffix Hoare.proc_deco p, HOLogic.mk_string p) + | dest_procname ctxt false (t as Free (p, T)) = + (case Hoare.get_default_state_space ctxt of + SOME {read_function_name, ...} => (p, read_function_name ctxt p) + | _ => (suffix Hoare.proc_deco p, Free (suffix Hoare.proc_deco p, T))) + | dest_procname ctxt false (Const (@{syntax_const "_free"},_) $ Free (p,T)) = + (suffix Hoare.proc_deco p, Free (suffix Hoare.proc_deco p, T)) + | dest_procname ctxt false (t as (Const (@{syntax_const "_antiquoteCur"},_) $ Const (p, _))) = + (Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) + | dest_procname ctxt false (t as (Const (@{syntax_const "_antiquoteCur"}, _) $ Free (p, _))) = + (Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) + | dest_procname ctxt false (t as Const (p, _) $ _) = + (Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) (* antiquoteOld *) + | dest_procname ctxt false (t as Free (p,_)$_) = + (Hoare.resuffix Hoare.deco Hoare.proc_deco p, t) (* antiquoteOld *) + | dest_procname ctxt false (t as Const (@{syntax_const "_antiquoteOld"}, _) $ _ $ Const (p, _)) = + (suffix Hoare.proc_deco p, t) + | dest_procname ctxt false (t as Const (@{syntax_const "_antiquoteOld"}, _) $ _ $ Free (p,_)) = + (suffix Hoare.proc_deco p, t) + | dest_procname ctxt false (t as Const (@{const_name "StateFun.lookup"}, _) $ _ $ Free (p, _) $ _) = + (suffix Hoare.proc_deco (Hoare.remdeco' ctxt p), t) (* antiquoteOld *) + + | dest_procname ctxt false t = ("", t) + | dest_procname ctxt true t = let fun quote t = Abs ("s", dummyT, antiquoteCur_tr ctxt t) in (case quote t of (t' as Abs (_, _, Free (p, _) $ Bound 0)) => - (prfx ^ Hoare.resuffix Hoare.deco Hoare.proc_deco p, t') - (* FIXME StateFun.lookup !? *) + (Hoare.resuffix Hoare.deco Hoare.proc_deco p, t') | (t' as Abs (_, _, Const (@{const_name "StateFun.lookup"}, _) $ _ $ Free (p, _) $ (_ $ Bound 0))) => - (prfx ^ suffix Hoare.proc_deco (Hoare.remdeco' p), t') - | t' => (prfx, t')) + (suffix Hoare.proc_deco (Hoare.remdeco' ctxt p), t') + | t' => ("", t')) end -fun gen_call_tr prfx dyn grd ctxt p actuals has_args cont = +fun gen_call_tr dyn grd ctxt p actuals has_args result_exn cont = let - fun Call false true NONE = Const (@{const_syntax call}, dummyT) - | Call false false NONE = Const (@{const_syntax Call}, dummyT) - | Call true true NONE = Const (@{const_syntax dynCall}, dummyT) - | Call false true (SOME c) = Const (@{const_syntax fcall}, dummyT) - | Call _ _ _ = raise TERM ("gen_call_tr: no proper procedure call", []); - - val (pn, pt) = dest_procname ctxt prfx dyn (Term_Position.strip_positions p); + fun Call false true [] NONE = Const (@{const_syntax call}, dummyT) + | Call false true _ NONE = Const (@{const_syntax call_exn}, dummyT) + | Call false false [] NONE = Const (@{const_syntax Call}, dummyT) + | Call true true [] NONE = Const (@{const_syntax dynCall}, dummyT) + | Call true true _ NONE = Const (@{const_syntax dynCall_exn}, dummyT) + | Call false true [] (SOME c) = Const (@{const_syntax fcall}, dummyT) + | Call _ _ _ _ = raise TERM ("gen_call_tr: no proper procedure call", []); + + val (pn, pt) = dest_procname ctxt dyn (Term_Position.strip_positions p); in (case Hoare.get_params pn ctxt of SOME formals => - mk_call_tr ctxt grd (Call dyn has_args cont) formals pn pt actuals has_args cont + mk_call_tr ctxt grd (Call dyn has_args result_exn cont) formals pn pt actuals has_args result_exn cont | NONE => - if StateSpace.get_silent (Context.Proof ctxt) - then mk_call_tr ctxt grd (Call dyn has_args cont) [] pn pt [] has_args cont + if Config.get ctxt StateSpace.silent + then mk_call_tr ctxt grd (Call dyn has_args result_exn cont) [] pn pt [] has_args result_exn cont else raise TERM ("gen_call_tr: procedure " ^ quote pn ^ " not defined", [])) end; -fun call_tr dyn grd ctxt [p, actuals] = - gen_call_tr "" dyn grd ctxt p (dest_actuals actuals) true NONE - | call_tr _ _ _ t = raise TERM ("call_tr", t); +fun call_tr dyn grd result_exn ctxt [p, actuals] = + gen_call_tr dyn grd ctxt p (dest_actuals actuals) true result_exn NONE + | call_tr _ _ _ _ t = raise TERM ("call_tr", t); -fun call_ass_tr dyn grd ctxt [l, p, actuals] = - gen_call_tr "" dyn grd ctxt p (dest_actuals actuals @ [l]) true NONE - | call_ass_tr _ _ _ t = raise TERM ("call_ass_tr", t); +fun call_ass_tr dyn grd result_exn ctxt [l, p, actuals] = + gen_call_tr dyn grd ctxt p (dest_actuals actuals @ [l]) true result_exn NONE + | call_ass_tr _ _ _ _ t = raise TERM ("call_ass_tr", t); fun proc_tr ctxt [p, actuals] = - gen_call_tr "" false false ctxt p (dest_actuals actuals) false NONE + gen_call_tr false false ctxt p (dest_actuals actuals) false [] NONE | proc_tr _ t = raise TERM ("proc_tr", t); fun proc_ass_tr ctxt [l, p, actuals] = - gen_call_tr "" false false ctxt p (dest_actuals actuals @ [l]) false NONE + gen_call_tr false false ctxt p (dest_actuals actuals @ [l]) false [] NONE | proc_ass_tr _ t = raise TERM ("proc_ass_tr", t); fun fcall_tr ctxt [p, actuals, c] = - gen_call_tr "" false false ctxt p (dest_actuals actuals) true (SOME c) + gen_call_tr false false ctxt p (dest_actuals actuals) true [] (SOME c) | fcall_tr _ t = raise TERM ("fcall_tr", t); @@ -1042,7 +1071,11 @@ fun update_name_tr' ctxt (Free x) = Const (upd_tr' ctxt x) | update_name_tr' ctxt ((c as Const (@{syntax_const "_free"}, _)) $ Free x) = (*c $*) Const (upd_tr' ctxt x) | update_name_tr' ctxt (Const x) = Const (upd_tr' ctxt x) - | update_name_tr' _ _ = raise Match; + | update_name_tr' ctxt t = + (case Hoare.get_default_state_space ctxt of + SOME {update_tr',...} => update_tr' ctxt t + | NONE => raise Match); + fun term_name_eq (Const (x, _)) (Const (y, _)) = (x = y) | term_name_eq (Free (x, _)) (Free (y, _)) = (x = y) @@ -1076,7 +1109,7 @@ fun list_mult_update_tr' l (r as Const (@{const_syntax list_multupd},_) $ var $ (Syntax.const @{const_syntax list_multsel} $ var $ idxs, values) | list_mult_update_tr' l r = (l, r); -fun update_tr' l (r as Const (@{const_syntax fun_upd}, _) $ +fun update_tr' ctxt l (r as Const (@{const_syntax fun_upd}, _) $ (hp as (Const (@{syntax_const "_antiquoteCur"}, _) $ _)) $ p $ value) = if term_name_eq l hp then (case value of @@ -1084,94 +1117,159 @@ fun update_tr' l (r as Const (@{const_syntax fun_upd}, _) $ | (Const (@{const_syntax list_multupd},_) $ _ $ _ $ _) => list_mult_update_tr' (l $ p) value | _ => (l $ p, value)) else (l, r) - | update_tr' l (r as Const (@{const_syntax list_update},_) $ + | update_tr' ctxt l (r as Const (@{const_syntax list_update},_) $ (var as (Const (@{syntax_const "_antiquoteCur"}, _) $ _)) $ i $ value) = if term_name_eq l var then list_update_tr' l r else (l, r) - | update_tr' l (r as Const (@{const_syntax list_multupd}, _) $ + | update_tr' ctxt l (r as Const (@{const_syntax list_multupd}, _) $ (var as (Const (@{syntax_const "_antiquoteCur"}, _) $ _)) $ idxs $ values) = if term_name_eq l var then list_mult_update_tr' l r else (l, r) - | update_tr' l r = (l, r); + | update_tr' ctxt l r = (l, r); -fun dest_K_rec (Abs (_, _, v)) = - if member (op =) (loose_bnos v) 0 then NONE else SOME (incr_boundvars ~1 v) - | dest_K_rec (Abs (_, _, Abs (_, _, v) $ Bound 0)) = (* eta expanded version *) +fun dest_K_rec (Abs (_, _, Abs (_, _, v) $ Bound 0)) = (* eta expanded version *) let val lbv = loose_bnos v; in if member (op =) lbv 0 orelse member (op =) lbv 1 then NONE else SOME (incr_boundvars ~2 v) end + | dest_K_rec (Abs (_, _, v)) = + if member (op =) (loose_bnos v) 0 then NONE else SOME (incr_boundvars ~1 v) + | dest_K_rec (Const (@{const_syntax K_statefun}, _) $ v) = SOME v | dest_K_rec _ = NONE; +fun the_Match (SOME x) = x + | the_Match (NONE) = raise Match + +fun dest_update ctxt (upd' $ dest $ constr $ n $ v $ s) = + (n, v, SOME s) + | dest_update ctxt (upd' $ dest $ constr $ n $ v) = + (n, v, NONE) + | dest_update ctxt t = + case Hoare.get_default_state_space ctxt of + SOME {dest_update_tr', ...} => dest_update_tr' ctxt t + | NONE => raise Match + local -fun uncover (upd,v) = - (case (upd, v) of - (Const (cupd, _), upd' $ dest $ constr $ n $ (Const (@{const_syntax K_statefun}, _) $ v') $ s) => +fun uncover ctxt (upd,v) = (upd, v) |> first_match [ + fn (Const (cupd, _), t) => if member (op =) [globals_updateN, locals_updateN] (Long_Name.base_name cupd) - then (case s of (Const (g, _) $ _) => - if member (op =) [localsN, globalsN] (Long_Name.base_name g) - then (n, v') - else raise Match - | _ => raise Match) - else (upd, v) - | (Const (gupd, _), upd' $ k $ s) => - (case dest_K_rec k of - SOME v' => - if Long_Name.base_name gupd = globals_updateN - then - (case s of - Const (gl, _) $ _ => - if Long_Name.base_name gl = globalsN (* assignment *) - then (upd',v') - else raise Match - | _ => raise Match) + then case dest_update ctxt t of + (n, v', SOME s) => (case s of (Const (g, _) $ _) => + if member (op =) [localsN, globalsN] (Long_Name.base_name g) + then (n, the_Match (dest_K_rec v')) + else raise Match + | _ => raise Match) + | (n, v', NONE) => (n, the_Match (dest_K_rec v')) + else (upd, v), + fn (upd, v ) => + (case (upd, v) of + (Const (gupd, _), t as (upd' $ k $ s)) => + (case dest_K_rec k of + SOME v' => + if Long_Name.base_name gupd = globals_updateN + then + (case s of + Const (gl, _) $ _ => + if Long_Name.base_name gl = globalsN (* assignment *) + then (upd',v') + else raise Match + | _ => raise Match) + else (upd, v) + | _ => (upd, v)) + | (Const (upd_glob, _), upd' $ v') => + if Long_Name.base_name upd_glob = upd_globalsN (* result parameter *) + then (upd', v') + else if Long_Name.base_name upd_glob = globals_updateN + then (case dest_K_rec v' of + SOME v'' => (upd',v'') + | _ => (upd, v)) else (upd, v) - | _ => (upd, v)) - | (Const (upd_glob, _), upd' $ v') => - if Long_Name.base_name upd_glob = upd_globalsN (* result parameter *) - then (upd', v') else (upd, v) - | _ => (upd, v)); + | _ => (upd, v))] in -fun global_upd_tr' upd k = +fun global_upd_tr' ctxt upd k = (case dest_K_rec k of - SOME v => uncover (upd, v) - | NONE => uncover (upd, k)); - + SOME v => uncover ctxt (upd, v) + | NONE => uncover ctxt (upd, k)) end; - -fun dest_updates (t as (upd as Const (u, _)) $ k $ state) = +fun dest_updates ctxt t = t |> first_match [ + fn (t as (upd as Const (u, _)) $ k $ state) => (case dest_K_rec k of SOME value => if member (op =) [globals_updateN, locals_updateN] (Long_Name.base_name u) - then dest_updates value + then dest_updates ctxt value else if can (unsuffix Record.updateN) u orelse Long_Name.base_name u = upd_globalsN - then (upd,value)::dest_updates state + then (upd,value)::dest_updates ctxt state else raise Match - | NONE => raise Match) - | dest_updates (t as (upd as Const (u,_))$k) = + | NONE => raise Match (*dest_updates ctxt k @ dest_updates ctxt state*) (* check for nested update (e.g. locals-stack) *) + (*handle Match => []*)), (* t could be just (globals $ s) *) + fn (t as (upd as Const (u,_))$k) => (case dest_K_rec k of SOME value => if member (op =) [globals_updateN, locals_updateN] (Long_Name.base_name u) - then dest_updates value + then dest_updates ctxt value else if can (unsuffix Record.updateN) u orelse Long_Name.base_name u = upd_globalsN then [(upd,value)] else if Long_Name.base_name u = globalsN then [] else raise Match - | NONE => []) (* t could be just (globals $ s) *) - | dest_updates ((Const (u, _)) $ _ $ _ $ n $ (Const (@{const_syntax K_statefun},_) $ value) $ state) = + | NONE => dest_updates ctxt k (* check for nested update (e.g. locals-stack) *) + handle Match => []), (* t could be just (globals $ s) *) + fn ((Const (u, _)) $ _ $ _ $ n $ k $ state) => if Long_Name.base_name u = Long_Name.base_name StateFun.updateN - then (n, value) :: dest_updates state - else raise Match - | dest_updates t = []; - -(* FIXME: externalize names properly before removing decoration! *) + then case dest_K_rec k of SOME value => (n, value) :: dest_updates ctxt state | _ => raise Match + else raise Match, + fn ((Const (u, _)) $ _ $ _ $ n $ k) => + if Long_Name.base_name u = Long_Name.base_name StateFun.updateN + then case dest_K_rec k of SOME value => [(n, value)] | _ => raise Match + else raise Match, + fn t => + (case Hoare.get_default_state_space ctxt of + SOME {dest_update_tr', ...} => + (case dest_update_tr' ctxt t of + (n, v, SOME s) => (n, the_Match (dest_K_rec v))::dest_updates ctxt s + | (n, v, NONE) => [(n, the_Match (dest_K_rec v))]) + | NONE => raise Match), + fn t => []] + +fun dest_updates ctxt t = t |> first_match [ + fn (t as (upd as Const (u, _)) $ k $ state) => + if member (op =) [globals_updateN, locals_updateN] (Long_Name.base_name u) then + dest_updates ctxt k @ dest_updates ctxt state + else if can (unsuffix Record.updateN) u orelse Long_Name.base_name u = upd_globalsN then + (upd, the_Match (dest_K_rec k))::dest_updates ctxt state + else raise Match, (* t could be just (globals $ s) *) + fn (t as (upd as Const (u,_))$k) => + if member (op =) [globals_updateN, locals_updateN] (Long_Name.base_name u) then + dest_updates ctxt k + else if can (unsuffix Record.updateN) u orelse Long_Name.base_name u = upd_globalsN then + [(upd, the_Match (dest_K_rec k))] + (*else if Long_Name.base_name u = globalsN then [] *) + else raise Match, + fn ((Const (u, _)) $ _ $ _ $ n $ k $ state) => + if Long_Name.base_name u = Long_Name.base_name StateFun.updateN then + (n, the_Match (dest_K_rec k)) :: dest_updates ctxt state + else raise Match, + fn ((Const (u, _)) $ _ $ _ $ n $ k) => + if Long_Name.base_name u = Long_Name.base_name StateFun.updateN then + [(n, the_Match (dest_K_rec k))] + else raise Match, + fn t => + (case Hoare.get_default_state_space ctxt of + SOME {dest_update_tr', ...} => + (case dest_update_tr' ctxt t of + (n, v, SOME s) => (n, the_Match (dest_K_rec v))::dest_updates ctxt s + | (n, v, NONE) => [(n, the_Match (dest_K_rec v))]) + | NONE => raise Match), + fn t => []] + + +(* fixme: externalize names properly before removing decoration! *) fun init_tr' ctxt [Abs (_,_,t)] = let val upds = - case dest_updates t of + case dest_updates ctxt t of us as [(Const (gupd, _), v)] => if Long_Name.base_name gupd = globals_updateN - then dest_updates v else us + then dest_updates ctxt v else us | us => us; val comps = @@ -1202,16 +1300,16 @@ fun tr' ctxt c (upd,v) = let val l = Syntax.const antiquoteCur $ update_name_tr' ctxt upd; val r = quote_tr' ctxt antiquoteCur (Abs ("s", dummyT, v)); - val (l', r') = update_tr' l r; + val (l', r') = update_tr' ctxt l r; in (c $ l' $ r') end; in fun app_assign_tr' c ctxt (Abs (s, _, upd $ v $ Bound 0) :: ts) = - tr' ctxt c (global_upd_tr' upd v) + tr' ctxt c (global_upd_tr' ctxt upd v) | app_assign_tr' c ctxt ((upd $ v) :: ts) = (case upd of u $ v => raise Match - | _ => tr' ctxt c (global_upd_tr' upd v)) + | _ => tr' ctxt c (global_upd_tr' ctxt upd v)) | app_assign_tr' _ _ _ = raise Match; end; @@ -1239,7 +1337,7 @@ fun basic_tr' ctxt [Abs (s, T, t)] = ((t' as (Const (@{const_syntax Let'},_) $ _ $ _)) $ Bound 0) => (true, t') | _ => (false, t); val (recomb, t'') = split_Let' t'; - val upds = dest_updates t''; + val upds = dest_updates ctxt t''; val _ = if length upds <= 1 andalso not has_let then raise Match else (); val ass = map (fn (u, v) => app_assign_tr' (Syntax.const @{syntax_const "_BAssign"}) ctxt @@ -1254,8 +1352,8 @@ fun loc_tr' ctxt [init, bdy, return, c] = (let val upds = (case init of - Abs (_, _, t as (upd $ v $ s)) => dest_updates t - | upd $ v => dest_updates (upd $ v $ Bound 0) + Abs (_, _, t as (upd $ v $ s)) => dest_updates ctxt t + | upd $ v => dest_updates ctxt (upd $ v $ Bound 0) | _ => raise Match); fun mk_locinit c v = @@ -1279,14 +1377,14 @@ fun loc_tr' ctxt [init, bdy, return, c] = if Long_Name.base_name lookup = Long_Name.base_name StateFun.lookupN andalso Long_Name.base_name locals = localsN then init_or_not c c' v - else mk_locinit (Hoare.remdeco' c) v - | _ => mk_locinit (Hoare.remdeco' c) v) + else mk_locinit (Hoare.remdeco' ctxt c) v + | _ => mk_locinit (Hoare.remdeco' ctxt c) v) | mk_init _ = raise Match; val inits = foldr1 (fn (t, u) => Syntax.const @{syntax_const "_locinits"} $ t $ u) (map mk_init (rev upds)); - in Syntax.const @{syntax_const "_Loc"} $ inits $ bdy end handle Fail _ => raise Match) + in Syntax.const @{syntax_const "_Loc"} $ inits $ bdy end handle Fail _ => raise Match | Empty => raise Match) | loc_tr' _ _ = raise Match; @@ -1299,46 +1397,48 @@ fun actuals_tr' acts = fun gen_call_tr' ctxt Call CallAss init p return c = let - fun get_init_updates (Abs (s, _, upds)) = dest_updates upds - | get_init_updates upds = dest_updates upds; + fun get_init_updates (Abs (s, _, upds)) = dest_updates ctxt upds + | get_init_updates upds = dest_updates ctxt upds; fun get_res_updates (Abs (i, _, Abs (t, _, Const (@{const_syntax Basic}, _) $ Abs (s, _, upds)))) = - dest_updates upds + dest_updates ctxt upds | get_res_updates (Abs (i, _, Abs (t, _, Const (@{const_syntax Basic}, _) $ upds))) = - dest_updates upds + dest_updates ctxt upds | get_res_updates _ = raise Match; - fun init_par_tr' par = + val init_upds = rev (get_init_updates init) + fun init_par_tr' par = Syntax.const @{syntax_const "_par"} $ quote_tr' ctxt antiquoteCur (Abs ("s", dummyT, par)); - val init_actuals = - map (fn (_, value) => init_par_tr' value) (rev (get_init_updates init)); + val init_actuals = + map (fn (_, value) => init_par_tr' value) init_upds; - fun tr' c (upd, v) = - let - val l = Syntax.const antiquoteCur $ update_name_tr' ctxt upd; - val r = - quote_tr' ctxt antiquoteCur + fun tr' c (upd, v) = + let + val l = Syntax.const antiquoteCur $ update_name_tr' ctxt upd; + val r = + quote_tr' ctxt antiquoteCur + (quote_tr' ctxt antiquoteCur (quote_tr' ctxt antiquoteCur - (quote_tr' ctxt antiquoteCur - (Abs ("i", dummyT, Abs ("t", dummyT, Abs ("s", dummyT, v)))))); - val (l', _) = update_tr' l r; - in c $ l' end; + (Abs ("i", dummyT, Abs ("t", dummyT, Abs ("s", dummyT, v)))))); + val (l', _) = update_tr' ctxt l r; + in c $ l' end; - fun ret_par_tr' (upd, v) = - tr' (Syntax.const @{syntax_const "_par"}) (global_upd_tr' upd v); + fun ret_par_tr' (upd, v) = + tr' (Syntax.const @{syntax_const "_par"}) (global_upd_tr' ctxt upd v); - val res_updates = rev (get_res_updates c); - val res_actuals = map ret_par_tr' res_updates; - in if Config.get ctxt use_call_tr' then + val res_updates = rev (get_res_updates c); + val res_actuals = map ret_par_tr' res_updates; + in + if Config.get ctxt use_call_tr' then (case res_actuals of [l] => CallAss $ l $ p $ actuals_tr' init_actuals | _ => Call $ p $ actuals_tr' (init_actuals @ res_actuals)) - else raise Match + else raise Match end; fun gen_fcall_tr' ctxt init p return result c = let - fun get_init_updates (Abs (s, _, upds)) = dest_updates upds + fun get_init_updates (Abs (s, _, upds)) = dest_updates ctxt upds | get_init_updates _ = raise Match; fun init_par_tr' par = @@ -1363,7 +1463,7 @@ fun pname_tr' ctxt ((f as Const (@{syntax_const "_free"}, _)) $ Free (p, T)) = | pname_tr' ctxt p = let (* from HOL strings to ML strings *) - fun dest_nib c = (* FIXME authentic syntax *) + fun dest_nib c = (* fixme authentic syntax *) (case raw_explode c of ["N", "i", "b", "b", "l", "e", h] => if "0" <= h andalso h <= "9" then ord h - ord "0" @@ -1391,6 +1491,12 @@ fun call_tr' ctxt [init, p, return, result] = (Const (@{syntax_const "_CallAss"}, dummyT)) init (pname_tr' ctxt p) return result | call_tr' _ _ = raise Match; +fun call_exn_tr' ctxt [init, p, return, result_exn, result] = + gen_call_tr' ctxt + (Const (@{syntax_const "_Call_exn"}, dummyT)) + (Const (@{syntax_const "_CallAss_exn"}, dummyT)) init (pname_tr' ctxt p) return result + | call_exn_tr' _ _ = raise Match; + fun dyn_call_tr' ctxt [init, p, return, result] = let val p' = quote_tr' ctxt antiquoteCur p @@ -1401,13 +1507,22 @@ fun dyn_call_tr' ctxt [init, p, return, result] = end | dyn_call_tr' _ _ = raise Match; +fun dyn_call_exn_tr' ctxt [init, p, return, result_exn, result] = + let val p' = quote_tr' ctxt antiquoteCur p + in + gen_call_tr' ctxt + (Const (@{syntax_const "_DynCall_exn"}, dummyT)) + (Const (@{syntax_const "_DynCallAss_exn"}, dummyT)) init p' return result + end + | dyn_call_exn_tr' _ _ = raise Match; + fun proc_tr' ctxt [p] = let val p' = pname_tr' ctxt p; - val pn = fst (dest_procname ctxt "" false p'); + val pn = fst (dest_procname ctxt false p'); val formals = the (Hoare.get_params pn ctxt) handle Option.Option => raise Match; - val val_formals = map_filter (fn (Hoare.In, p) => SOME p | _ => NONE) formals; - val res_formals = map_filter (fn (Hoare.Out, p) => SOME p | _ => NONE) formals; + val val_formals = map_filter (fn (Hoare.In, p, _) => SOME p | _ => NONE) formals; + val res_formals = map_filter (fn (Hoare.Out, p, _) => SOME p | _ => NONE) formals; fun mkpar n = Syntax.const @{syntax_const "_par"} $ (Syntax.const antiquoteCur $ Syntax.const (Hoare.remdeco ctxt n)); @@ -1439,7 +1554,10 @@ fun assert_tr' ctxt ((t as Abs (_, _, p)) :: ts) = fun selector (Const (c, T)) = Hoare.is_state_var c | selector (Const (l, _) $ _ $ _) = Long_Name.base_name l = Long_Name.base_name StateFun.lookupN - | selector t = false; + | selector t = + (case Hoare.get_default_state_space ctxt of + SOME {is_lookup,...} => is_lookup ctxt t + | _ => false) fun fix_state (Const (@{const_syntax HOL.eq},_) $ (Const (@{syntax_const "_free"}, _) $ _)) = true | fix_state (Const (@{const_syntax HOL.eq},_) $ (Const (@{syntax_const "_bound"}, _) $ _)) = true @@ -1477,9 +1595,9 @@ fun new_tr' ctxt | mk_init ((f as Const (@{syntax_const "_free"}, _)) $ Free (var, T), Const (@{const_syntax fun_upd},_) $ _ $ _ $ v) = Syntax.const @{syntax_const "_newinit"} $ - (f $ Free (Hoare.remdeco' var, T)) $ v; + (f $ Free (Hoare.remdeco' ctxt var, T)) $ v; - val inits_free_allocs = dest_updates inits_free_alloc; + val inits_free_allocs = dest_updates ctxt inits_free_alloc; val inits = map mk_init (take (length inits_free_allocs - 2) (inits_free_allocs)); val inits' = @@ -1489,10 +1607,10 @@ fun new_tr' ctxt let val l = Syntax.const antiquoteCur $ update_name_tr' ctxt upd; val r = quote_tr' ctxt antiquoteCur (Abs (s, dummyT, v)); - val (l', r') = update_tr' l r + val (l', r') = update_tr' ctxt l r in l' end; - val l = tr' (global_upd_tr' upd' null); + val l = tr' (global_upd_tr' ctxt upd' null); in Syntax.const @{syntax_const "_New"} $ l $ size $ inits' end | new_tr' _ _ = raise Match; @@ -1510,9 +1628,9 @@ fun nnew_tr' ctxt | mk_init ((f as Const (@{syntax_const "_free"}, _)) $ Free (var, T), Const (@{const_syntax fun_upd}, _) $_ $ _ $ v) = Syntax.const @{syntax_const "_newinit"} $ - (f $ Free (Hoare.remdeco' var, T)) $ v; + (f $ Free (Hoare.remdeco' ctxt var, T)) $ v; - val free_inits_allocs = dest_updates free_inits_alloc; + val free_inits_allocs = dest_updates ctxt free_inits_alloc; val inits = map mk_init (take (length free_inits_allocs - 2) (tl free_inits_allocs)); val inits' = @@ -1522,10 +1640,10 @@ fun nnew_tr' ctxt let val l = Syntax.const antiquoteCur $ update_name_tr' ctxt upd; val r = quote_tr' ctxt antiquoteCur (Abs (s, dummyT, v)); - val (l', r') = update_tr' l r; + val (l', r') = update_tr' ctxt l r; in l' end; - val l = tr' (global_upd_tr' upd' null); + val l = tr' (global_upd_tr' ctxt upd' null); in Syntax.const @{syntax_const "_NNew"} $ l $ size $ inits' end | nnew_tr' _ _ = raise Match; diff --git a/lib/clib/TypHeapLib.thy b/tools/c-parser/TypHeapLib.thy similarity index 98% rename from lib/clib/TypHeapLib.thy rename to tools/c-parser/TypHeapLib.thy index dc49c4d886..06f9232880 100644 --- a/lib/clib/TypHeapLib.thy +++ b/tools/c-parser/TypHeapLib.thy @@ -5,12 +5,11 @@ *) theory TypHeapLib -imports "CParser.CTranslation" + imports CTranslation begin (* This file contains everything you need to know and use for the - day-to-day solving of TypHeap related goals. See KernelState.thy for - abbreviations for cslift etc. *) + day-to-day solving of TypHeap related goals. *) section "Abbreviations and helpers" @@ -38,7 +37,7 @@ lemma c_guard_clift: lemma clift_heap_update: fixes p :: "'a :: mem_type ptr" - shows "hrs_htd hp \\<^sub>t p \ clift (hrs_mem_update (heap_update p v) hp) = clift hp(p \ v)" + shows "hrs_htd hp \\<^sub>t p \ clift (hrs_mem_update (heap_update p v) hp) = (clift hp)(p \ v)" unfolding hrs_mem_update_def apply (cases hp) apply (simp add: split_def hrs_htd_def) @@ -173,7 +172,7 @@ lemma clift_field_update: and eu: "export_uinfo t = export_uinfo (typ_info_t TYPE('b))" and cl: "clift hp ptr = Some z" shows "(clift (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - clift hp(ptr \ field_update (field_desc t) (to_bytes_p val) z)" + (clift hp)(ptr \ field_update (field_desc t) (to_bytes_p val) z)" (is "?LHS = ?RHS") proof - have cl': "clift (fst hp, snd hp) ptr = Some z" using cl by simp diff --git a/tools/c-parser/UMM_Proofs.ML b/tools/c-parser/UMM_Proofs.ML index ec29139590..ebd8bbbf77 100644 --- a/tools/c-parser/UMM_Proofs.ML +++ b/tools/c-parser/UMM_Proofs.ML @@ -406,28 +406,28 @@ fun umm_struct_calculation ((recname, flds), st, thy) = let Const(@{const_name "typ_name_itself"}, mk_itself_type recty --> typ_name_ty) $ Free("x", mk_itself_type recty) val typnameitself_rhs = mk_string recname - val typnameitself_triple = - ((Binding.name (recname ^ "_typ_name_itself"), - mk_defeqn(typnameitself_lhs, typnameitself_rhs)), - [Simplifier.simp_add]) + val typnameitself_tuple = + (Binding.name (recname ^ "_typ_name_itself"), + mk_defeqn(typnameitself_lhs, typnameitself_rhs)) (* the typ_tag definition *) val typtag_lhs = mk_typ_info_tm recty $ Free("x", mk_itself_type recty) val typtag_rhs = tag_tm - val typtag_triple = - ((Binding.name (recname ^ "_typ_tag"), - mk_defeqn(typtag_lhs, typtag_rhs)), - []) + val typtag_tuple = + (Binding.name (recname ^ "_typ_tag"), + mk_defeqn(typtag_lhs, typtag_rhs)) val typ_info_TYPE = mk_typ_info_of recty (* make the definitions *) val (typnameitself_thm, typtag_thm, thy) = - case Global_Theory.add_defs true - [typnameitself_triple, typtag_triple] thy + case fold_map Global_Theory.add_def_overloaded + [typnameitself_tuple, typtag_tuple] thy of ([x,y], thy) => (x,y,thy) | _ => raise Fail "UMM_Proofs: Bind error" + val (_, thy) = + Global_Theory.note_thms "" ((Binding.empty, []), [([typnameitself_thm], [Simplifier.simp_add])]) thy val thy = add_data_thms [("typ_name_itself", [typnameitself_thm])] thy val _ = phase "MEMTYPE" diff --git a/tools/c-parser/calculate_state.ML b/tools/c-parser/calculate_state.ML index 560cc7b03a..b7725eff0c 100644 --- a/tools/c-parser/calculate_state.ML +++ b/tools/c-parser/calculate_state.ML @@ -480,8 +480,12 @@ fun mk_thy_types cse install thy = let val thy = List.foldl rcddecls_phase0 thy sorted_structs + val abs_outfilnameN = (if Path.is_absolute (Path.explode outfilnameN) + then outfilnameN + else (Path.implode o Path.expand) + (Path.append (Resources.master_directory thy) (Path.explode outfilnameN))) (* Yuck, sorry *) - val _ = gen_umm_types_file cse outfilnameN + val _ = gen_umm_types_file cse abs_outfilnameN val arrays = List.filter (fn (_, sz) => sz <> 0) (Binaryset.listItems (get_array_mentions cse)) diff --git a/tools/c-parser/heapstatetype.ML b/tools/c-parser/heapstatetype.ML index 0ddd7d9d61..d9506d22d3 100644 --- a/tools/c-parser/heapstatetype.ML +++ b/tools/c-parser/heapstatetype.ML @@ -58,10 +58,9 @@ fun hst_prove_globals fullrecname thy = let ("hst_htd_update",hst_htd_update_lhs_t,hst_htd_update_rhs_t)] val defs = map (fn (n,l,r) => ((Binding.name (n ^ NameGeneration.global_rcd_name), - mk_defeqn(l recty', r hrs recty')), - [])) + mk_defeqn(l recty', r hrs recty')))) triples - val (hst_thms, thy) = Global_Theory.add_defs true defs thy + val (hst_thms, thy) = fold_map Global_Theory.add_def_overloaded defs thy val thy' = thy |> Context.theory_map (Simplifier.map_ss (fn ss => ss addsimps hst_thms)) val hst_instance_t = Logic.mk_of_class(recty, "SepFrame.heap_state_type") diff --git a/tools/c-parser/isar_install.ML b/tools/c-parser/isar_install.ML index 358c207457..0885eb978b 100644 --- a/tools/c-parser/isar_install.ML +++ b/tools/c-parser/isar_install.ML @@ -147,7 +147,9 @@ fun get_Csyntax thy s = let val cpp_option = case Config.get_global thy cpp_path of "" => NONE - | s => SOME s + | s => SOME (if Path.is_absolute (Path.explode s) + then s + else Path.implode (Path.append (Resources.master_directory thy) (Path.explode s))) val cpp_error_count = Config.get_global thy report_cpp_errors val (ast0, _) = StrictCParser.parse @@ -538,17 +540,17 @@ fun new_include s thy = C_Includes.map (fn sl => mk_thy_relative thy s::sl) thy val _ = Outer_Syntax.command @{command_keyword "new_C_include_dir"} "add a directory to the include path" - (P.text >> (Toplevel.theory o new_include)) + (P.embedded >> (Toplevel.theory o new_include)) val file_inclusion = let val typoptions = - P.reserved mtypN |-- (P.$$$ "=" |-- P.text >> MachineState) || - P.reserved ghosttypN |-- (P.$$$ "=" |-- P.text >> GhostState) || - P.reserved rootsN |-- (P.$$$ "=" |-- (P.$$$ "[" |-- P.enum1 "," P.text --| P.$$$ "]") >> CRoots) + P.reserved mtypN |-- (P.$$$ "=" |-- P.embedded >> MachineState) || + P.reserved ghosttypN |-- (P.$$$ "=" |-- P.embedded >> GhostState) || + P.reserved rootsN |-- (P.$$$ "=" |-- (P.$$$ "[" |-- P.enum1 "," P.embedded --| P.$$$ "]") >> CRoots) in ((Scan.option (P.$$$ memsafeN)) -- (Scan.option (P.$$$ typesN)) -- - (Scan.option (P.$$$ defsN)) -- P.text -- + (Scan.option (P.$$$ defsN)) -- P.embedded -- (Scan.option (P.$$$ "[" |-- P.enum1 "," typoptions --| P.$$$ "]"))) >> (Toplevel.theory o install_C_file) @@ -564,7 +566,7 @@ val _ = Outer_Syntax.command @{command_keyword "install_C_types"} "install types from a C file" - (P.text >> (Toplevel.theory o install_C_types)) + (P.embedded >> (Toplevel.theory o install_C_types)) end diff --git a/tools/c-parser/mkrelease b/tools/c-parser/mkrelease index 234d5d95b4..97afbdddd4 100755 --- a/tools/c-parser/mkrelease +++ b/tools/c-parser/mkrelease @@ -32,7 +32,8 @@ die () if [ $# -ne 1 ] then echo "Usage:" >&2 - die " $0 tag" >&2 + echo " $0 " + die "e.g. $0 1.20" >&2 fi # Get the directory that this script is running in. @@ -109,27 +110,17 @@ done # other misc files cp -v lib/Word_Lib/ROOT "$outputdir/src/lib/Word_Lib/" +cp -v lib/Basics/ROOT "$outputdir/src/lib/Basics/" +cp -v lib/ML_Utils/ROOT "$outputdir/src/lib/ML_Utils/" echo "Creating ROOTS file" cat >"$outputdir/src/ROOTS" < "$outputdir/src/lib/ROOT" < "$outputdir/src/c-parser/standalone-parser/Makefile" popd > /dev/null - echo "Making PDF of ctranslation file." cd "$outputdir/src/c-parser/doc" make ctranslation.pdf > /dev/null @@ -192,22 +182,6 @@ mv ctranslation.pdf "$outputdir/doc" popd > /dev/null -lookforlicense=$(find "$outputdir" \! -name '*.lex.sml' \! -name '*.grm.sml' \! -type d -exec grep -q @LICENSE \{\} \; -print) -if [ -n "$lookforlicense" ] -then - die "### @LICENSE detected in file(s) $lookforlicense" -else - echo "No @LICENSEs remain unexpanded - good." -fi - -lookformichaeln=$(find "$outputdir" \! -name RELEASES.md \! -type d -exec grep -q /michaeln \{\} \; -print) -if [ -n "$lookformichaeln" ] -then - die "### /michaeln detected in file(s) $lookformichaeln" -else - echo "No occurrences of \"/michaeln\" - good." -fi - echo -n "Compressing into $stem.tar.gz: " mv "$tmpdir/c-parser" "$tmpdir/$stem" diff --git a/tools/c-parser/modifies_proofs.ML b/tools/c-parser/modifies_proofs.ML index 814aae1832..95036c9e0c 100644 --- a/tools/c-parser/modifies_proofs.ML +++ b/tools/c-parser/modifies_proofs.ML @@ -444,6 +444,6 @@ val _ = Outer_Syntax.command @{command_keyword "cond_sorry_modifies_proofs"} "set sorry_modifies_proof option, conditional on env variable" - (Parse.text >> (Toplevel.generic_theory o cond_sorry_modifies_proofs)) + (Parse.embedded >> (Toplevel.generic_theory o cond_sorry_modifies_proofs)) end (* struct *) diff --git a/tools/c-parser/standalone-parser/Makefile b/tools/c-parser/standalone-parser/Makefile index ec008e30ee..9f42132751 100644 --- a/tools/c-parser/standalone-parser/Makefile +++ b/tools/c-parser/standalone-parser/Makefile @@ -40,17 +40,21 @@ TOKENIZERS=$(TOKENIZER_ARM) $(TOKENIZER_ARM_HYP) $(TOKENIZER_AARCH64) \ .PHONY: all cparser_tools stp_all standalone-cparser standalone-tokenizer +# Target "all" gains additional dependencies in the included Makefile below, so +# we keep "stp_all" as the first (= default) target +stp_all: standalone-cparser standalone-tokenizer all: stp_all -standalone-cparser stp_all: $(STPARSERS) -standalone-tokenizer stp_all: $(TOKENIZERS) +standalone-cparser: $(STPARSERS) +standalone-tokenizer: $(TOKENIZERS) include $(STP_PFX)/../Makefile STP_CLEAN_TARGETS := $(STPARSERS) $(TOKENIZERS) $(STP_PFX)/c-parser.o $(STP_PFX)/table.ML -$(STP_PFX)/table.ML: $(ISABELLE_HOME)/src/Pure/General/table.ML - sed -e '/ML.pretty-printing/,/final.declarations/d' < $< > $@ +$(STP_PFX)/table.ML: $(ISABELLE_HOME)/src/Pure/General/table.ML $(STP_PFX)/Makefile + sed -e '/\(\* cache \*\)/,/final.declarations/d' < $< | \ + sed -e "s/^ val unsynchronized_cache:.*a/ (* removed unsynchronized_cache *)/" > $@ $(ARCH_DIRS): mkdir -p $@ diff --git a/tools/c-parser/standalone-parser/basics.sml b/tools/c-parser/standalone-parser/basics.sml index ab2f9ca988..2f2a5491d6 100644 --- a/tools/c-parser/standalone-parser/basics.sml +++ b/tools/c-parser/standalone-parser/basics.sml @@ -72,6 +72,9 @@ struct fun K x y = x fun I x = x + fun the_default x (SOME y) = y + | the_default x NONE = x; + end open Basics @@ -145,9 +148,6 @@ struct fun uncurry f (x,y) = f x y - (*union of sets represented as lists: no repetitions*) - fun union eq = List.foldl (uncurry (insert eq)) - fun single x = [x] fun get_first f l = @@ -164,6 +164,4 @@ struct end -infix union - open Library diff --git a/tools/c-parser/standalone-parser/c-parser.mlb b/tools/c-parser/standalone-parser/c-parser.mlb index 18d5e68264..1aa358895a 100644 --- a/tools/c-parser/standalone-parser/c-parser.mlb +++ b/tools/c-parser/standalone-parser/c-parser.mlb @@ -12,6 +12,8 @@ in ../Feedback.ML ../Binaryset.ML basics.sml + library.ML + unsynchronized.ML ../topo_sort.ML ann "nonexhaustiveMatch ignore" diff --git a/tools/c-parser/standalone-parser/library.ML b/tools/c-parser/standalone-parser/library.ML index 378c57d883..f4a4f9b44f 100644 --- a/tools/c-parser/standalone-parser/library.ML +++ b/tools/c-parser/standalone-parser/library.ML @@ -7,13 +7,28 @@ signature LIBRARY = sig + val is_equal: order -> bool + + val build: ('a list -> 'a list) -> 'a list val sort : ('a * 'a -> order) -> 'a list -> 'a list + val foldl: ('a * 'b -> 'a) -> 'a * 'b list -> 'a + + val insert: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list + val remove: ('b * 'a -> bool) -> 'b -> 'a list -> 'a list + val update: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list + val union: ('a * 'a -> bool) -> 'a list -> 'a list -> 'a list + val merge: ('a * 'a -> bool) -> 'a list * 'a list -> 'a list + end structure Library : LIBRARY = struct +fun is_equal ord = ord = EQUAL; + +fun build (f: 'a list -> 'a list) = f []; + (*stable mergesort -- preserves order of equal elements*) fun mergesort unique ord = let @@ -63,4 +78,31 @@ fun mergesort unique ord = fun sort ord = mergesort false ord; +(* (op @) (e, [x1, ..., xn]) ===> ((e @ x1) @ x2) ... @ xn + for operators that associate to the left (TAIL RECURSIVE)*) +fun foldl (f: 'a * 'b -> 'a) : 'a * 'b list -> 'a = + let fun itl (e, []) = e + | itl (e, a::l) = itl (f(e, a), l) + in itl end; + +fun insert eq x xs = if member eq xs x then xs else x :: xs; + +fun remove eq x xs = if member eq xs x then filter_out (fn y => eq (x, y)) xs else xs; + +fun update eq x list = + (case list of + [] => [x] + | y :: rest => + if member eq rest x then x :: remove eq x list + else if eq (x, y) then list else x :: list); + +fun union eq = fold (insert eq); + +fun merge eq (xs, ys) = + if pointer_eq (xs, ys) then xs + else if null xs then ys + else fold_rev (insert eq) ys xs; + end + +val is_equal = Library.is_equal diff --git a/tools/c-parser/standalone-parser/tokenizer.mlb b/tools/c-parser/standalone-parser/tokenizer.mlb index 51e7dd06db..a7f01a43bc 100644 --- a/tools/c-parser/standalone-parser/tokenizer.mlb +++ b/tools/c-parser/standalone-parser/tokenizer.mlb @@ -9,6 +9,8 @@ $(SML_LIB)/basis/mlton.mlb (* for pointer equality *) ../Feedback.ML ../Binaryset.ML basics.sml +library.ML +unsynchronized.ML ../topo_sort.ML ann "nonexhaustiveMatch ignore" diff --git a/tools/c-parser/standalone-parser/unsynchronized.ML b/tools/c-parser/standalone-parser/unsynchronized.ML new file mode 100644 index 0000000000..114cec177e --- /dev/null +++ b/tools/c-parser/standalone-parser/unsynchronized.ML @@ -0,0 +1,36 @@ +(* SPDX-License-Identifier: BSD-3-Clause *) +(* SPDX-FileCopyrightText: Markus Wenzel, TU Muenchen *) + +(* Extracted from Isabelle sources (src/Pure/Concurrent/unsynchronized.ML), + reduced to work for mlton *) + +signature UNSYNCHRONIZED = +sig + datatype ref = datatype ref + val := : 'a ref * 'a -> unit + val ! : 'a ref -> 'a + val change: 'a ref -> ('a -> 'a) -> unit + val change_result: 'a ref -> ('a -> 'b * 'a) -> 'b + val inc: int ref -> int + val dec: int ref -> int + val add: int ref -> int -> int +end; + +structure Unsynchronized: UNSYNCHRONIZED = +struct + +(* regular references *) + +datatype ref = datatype ref; + +val op := = op :=; +val ! = !; + +fun change r f = r := f (! r); +fun change_result r f = let val (x, y) = f (! r) in r := y; x end; + +fun inc i = (i := ! i + (1: int); ! i); +fun dec i = (i := ! i - (1: int); ! i); +fun add i n = (i := ! i + (n: int); ! i); + +end; diff --git a/tools/c-parser/testfiles/factorial.thy b/tools/c-parser/testfiles/factorial.thy index f23f069344..5fac538963 100644 --- a/tools/c-parser/testfiles/factorial.thy +++ b/tools/c-parser/testfiles/factorial.thy @@ -5,7 +5,7 @@ *) theory factorial -imports "CParser.CTranslation" MachineWords +imports "CParser.CTranslation" "ARM/imports/MachineWords" begin declare hrs_simps [simp add] @@ -324,11 +324,13 @@ lemma (in factorial_global_addresses) mem_safe_factorial: shows "mem_safe (\ret__ptr_to_unsigned_long :== PROC factorial(\n)) \" apply(subst mem_safe_restrict) apply(rule intra_mem_safe) - apply (insert factorial_impl free_impl alloc_impl) + apply (insert factorial_impl free_impl alloc_impl)[1] apply(drule_tac t="Some C" in sym) - apply(simp_all add: restrict_map_def call_def block_def whileAnno_def - free_body_def alloc_body_def factorial_body_def creturn_def - split: if_split_asm option.splits) - by (force simp: intra_sc) + apply(simp add: restrict_map_def call_def block_def whileAnno_def block_exn_def + free_body_def alloc_body_def factorial_body_def creturn_def + split: if_split_asm option.splits) + subgoal by (force simp: intra_sc) + apply clarsimp + done end diff --git a/tools/c-parser/testfiles/fnptr.thy b/tools/c-parser/testfiles/fnptr.thy index 45788060fa..22c00ad5a6 100644 --- a/tools/c-parser/testfiles/fnptr.thy +++ b/tools/c-parser/testfiles/fnptr.thy @@ -36,74 +36,66 @@ definition lemma cvc_updates_global1: "!!x. \ \ \ \global1 = x \ symbols_ok \ \ret__int :== PROC callvoidcaller() \ \global1 = x + 1 \" -apply (hoare_rule HoarePartial.ProcNoRec1) -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply (rule creturn_wp) - apply vcg_step - apply vcg_step - defer - apply vcg_step - apply vcg_step - apply vcg_step - apply vcg_step - apply vcg_step - apply vcg_step - apply (rule creturn_wp) - apply vcg_step - apply vcg_step - apply (rule order_refl) - - apply (rule hoare_indirect_call_procs_consistent, - rule callable1_name) - apply vcg - - using callable1_name - apply (clarsimp simp: symbols_ok_def scast_id - procs_consistent_safe) -done + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + defer + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply (rule order_refl) + apply (rule hoare_indirect_call_procs_consistent, rule callable1_name) + apply vcg + using callable1_name + apply (clarsimp simp: symbols_ok_def procs_consistent_safe) + done lemma cic_returns_4: "\\ {| symbols_ok |} \ret__int :== PROC callintcaller() {| \ret__int = 4 |}" -apply (hoare_rule HoarePartial.ProcNoRec1) -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply (rule creturn_wp) -apply vcg_step + apply (hoare_rule HoarePartial.ProcNoRec1) + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step -apply (rule HoarePartial.CallBody - [where R = "%s t. { u. ret__int_' t = 4 }" - and \=\, OF _ _ _ intcaller_impl]) + apply (rule HoarePartial.CallBody + [where R = "%s t. { u. ret__int_' t = 4 }" + and \=\, OF _ _ _ intcaller_impl]) defer apply (simp only: intcaller_body_def) -apply (rule allI) -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply vcg_step -apply (rule creturn_wp) -apply vcg_step -apply vcg_step -apply (rule order_refl) -apply (rule hoare_indirect_call_procs_consistent) -apply (rule intcallable2_name) -apply simp -apply vcg -apply vcg -using intcallable2_name -apply (auto simp: symbols_ok_def procs_consistent_safe)[1] -done + apply (rule allI) + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply vcg_step + apply (rule order_refl) + apply (rule hoare_indirect_call_procs_consistent) + apply (rule intcallable2_name) + apply simp + apply vcg + apply vcg + using intcallable2_name + apply (auto simp: symbols_ok_def procs_consistent_safe)[1] + done end diff --git a/tools/c-parser/umm_heap/ArrayAssertion.thy b/tools/c-parser/umm_heap/ArrayAssertion.thy index 8d6a1e1b35..002bbb0585 100644 --- a/tools/c-parser/umm_heap/ArrayAssertion.thy +++ b/tools/c-parser/umm_heap/ArrayAssertion.thy @@ -106,6 +106,13 @@ lemma array_ptr_valid_array_assertionI: by (auto dest: array_ptr_valid_array_assertionD simp: array_assertion_shrink_right) +lemma h_t_array_valid_array_assertion: + "h_t_array_valid htd ptr n \ 0 < n + \ array_assertion ptr n htd" + apply (simp add: array_assertion_def) + apply (fastforce intro: exI[where x=0]) + done + text \Derived from array_assertion, an appropriate assertion for performing a pointer addition, or for dereferencing a pointer addition (the strong case). @@ -172,4 +179,54 @@ lemma ptr_arr_retyps_to_retyp: \ ptr_arr_retyps n (p :: ('c :: wf_type) ptr) = ptr_retyp (ptr_coerce p :: ('c['b]) ptr)" by (auto simp: ptr_arr_retyps_def ptr_retyp_def typ_slices_def typ_uinfo_array_tag_n_m_eq) +lemma size_td_uinfo_array_tag_n_m[simp]: + "size_td (uinfo_array_tag_n_m (t :: 'a :: c_type itself) n m) + = size_of (TYPE('a)) * n" + apply (induct n) + apply (simp add: uinfo_array_tag_n_m_def) + apply (simp add: uinfo_array_tag_n_m_def size_of_def) + done + +lemma typ_slice_list_array: + "x < size_td td * n + \ typ_slice_list (map (\i. DTPair td (nm i)) [0.. k < n + \ gd (p +\<^sub>p int k) + \ h_t_valid htd gd (p +\<^sub>p int k)" + supply if_split[split del] + apply (clarsimp simp: h_t_array_valid_def h_t_valid_def valid_footprint_def + size_of_def[symmetric, where t="TYPE('a)"] Let_def) + apply (drule_tac x="k * size_of TYPE('a) + y" in spec) + apply (drule mp) + apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) + apply (simp add: mult.commute) + apply (clarsimp simp: ptr_add_def add.assoc) + apply (erule map_le_trans[rotated]) + apply (clarsimp simp: uinfo_array_tag_n_m_def) + apply (subst typ_slice_list_array) + apply (frule_tac k="size_of TYPE('a)" in mult_le_mono1[where j=n, OF Suc_leI]) + apply (simp add: mult.commute size_of_def) + apply (simp add: size_of_def list_map_mono) + done + end diff --git a/tools/c-parser/umm_heap/Arrays.thy b/tools/c-parser/umm_heap/Arrays.thy index 98bbc7bcd4..1898c83fb4 100644 --- a/tools/c-parser/umm_heap/Arrays.thy +++ b/tools/c-parser/umm_heap/Arrays.thy @@ -255,7 +255,7 @@ lemma fcp_cong [cong]: bnf "('a,'n::finite) array" map: map_array sets: set_array - bd: "BNF_Cardinal_Arithmetic.csum natLeq (card_of (UNIV :: 'n set))" + bd: "BNF_Cardinal_Arithmetic.csum natLeq (card_suc (card_of (UNIV :: 'n set)))" rel: rel_array proof - show "map_array id = id" by simp @@ -273,11 +273,14 @@ next show "set_array \ map_array f = (`) f \ set_array" by (rule map_array_set_img) next - show "card_order (BNF_Cardinal_Arithmetic.csum natLeq (card_of UNIV))" - by (simp add: card_of_card_order_on card_order_csum natLeq_card_order) + show "card_order (BNF_Cardinal_Arithmetic.csum natLeq (card_suc (card_of UNIV)))" + by (simp add: card_order_bd_fun) next - show " BNF_Cardinal_Arithmetic.cinfinite (BNF_Cardinal_Arithmetic.csum natLeq (card_of UNIV))" + show "BNF_Cardinal_Arithmetic.cinfinite (BNF_Cardinal_Arithmetic.csum natLeq (card_suc (card_of UNIV)))" by (simp add: cinfinite_csum natLeq_cinfinite) +next + show "regularCard (BNF_Cardinal_Arithmetic.csum natLeq (card_suc (card_of UNIV)))" + by (simp add: regularCard_bd_fun) next fix R :: "'a \ 'b \ bool" and S :: "'b \ 'c \ bool" show "rel_array R OO rel_array S \ rel_array (R OO S)" @@ -297,12 +300,13 @@ next next fix x :: "'a['n::finite]" let ?U = "UNIV :: 'n set" - have "ordLeq3 (card_of (set_array x)) (card_of ?U)" by transfer (rule card_of_image) + have "ordLeq3 (card_of (set_array x)) (card_of ?U)" + by transfer (rule card_of_image) also - have "ordLeq3 (card_of ?U) (BNF_Cardinal_Arithmetic.csum natLeq (card_of ?U))" - by (rule ordLeq_csum2) (rule card_of_Card_order) + have "ordLess2 (card_of ?U) (BNF_Cardinal_Arithmetic.csum natLeq (card_suc (card_of ?U)))" + by (simp add: ordLess_bd_fun) finally - show "ordLeq3 (card_of (set_array x)) (BNF_Cardinal_Arithmetic.csum natLeq (card_of ?U))" . + show "ordLess2 (card_of (set_array x)) (BNF_Cardinal_Arithmetic.csum natLeq (card_suc (card_of ?U)))" . qed end diff --git a/tools/c-parser/umm_heap/StructSupport.thy b/tools/c-parser/umm_heap/StructSupport.thy index 1eff974eb8..a18f00d3b6 100644 --- a/tools/c-parser/umm_heap/StructSupport.thy +++ b/tools/c-parser/umm_heap/StructSupport.thy @@ -541,8 +541,8 @@ lemma lift_t_hrs_mem_update_fld: Some (adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x)), m')" and xf_xfu: "fg_cons xf (xfu \ (\x _. x))" and cl: "lift_t g hp ptr = Some z" - shows "(lift_t g (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - lift_t g hp(ptr \ xfu (\_. val) z)" + shows "((lift_t g) (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = + (lift_t g hp)(ptr \ xfu (\_. val) z)" (is "?LHS = ?RHS") proof - let ?ati = "adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x))" @@ -566,8 +566,8 @@ proof - qed also - have "\ = lift_t g hp(ptr \ update_ti_t (adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x))) - (to_bytes_p val) z)" + have "\ = (lift_t g hp)(ptr \ update_ti_t (adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x))) + (to_bytes_p val) z)" by (simp add: cl eui fl super_field_update_lookup) also have "\ = ?RHS" using xf_xfu diff --git a/tools/c-parser/umm_heap/TypHeap.thy b/tools/c-parser/umm_heap/TypHeap.thy index 6c73046c26..1378b095db 100644 --- a/tools/c-parser/umm_heap/TypHeap.thy +++ b/tools/c-parser/umm_heap/TypHeap.thy @@ -440,6 +440,21 @@ lemma proj_d_lift_state: apply(auto simp: proj_d_def lift_state_def Let_def split: option.splits) done +lemma proj_d_lift_state_hrs_htd_update[simp]: + "proj_d (lift_state (hrs_htd_update f hp)) = f (hrs_htd hp)" + by (cases hp) (simp add: hrs_htd_update_def proj_d_lift_state hrs_htd_def) + +lemma proj_d_lift_state_hrs_htd[simp]: + "proj_d (lift_state hp), g \\<^sub>t x = hrs_htd hp, g \\<^sub>t x" + apply (cases hp) + apply (simp add: proj_d_lift_state hrs_htd_def) + done + +lemma dom_lift_t_heap_update: + "dom (lift_t g (hrs_mem_update v hp)) = dom (lift_t g hp)" + by (clarsimp simp: lift_t_def lift_typ_heap_if s_valid_def hrs_htd_def hrs_mem_update_def + split_def dom_def) + lemma lift_state_proj [simp]: "wf_heap_val s \ lift_state (proj_h s,proj_d s) = s" apply (clarsimp simp: proj_h_def proj_d_def lift_state_def fun_eq_iff @@ -1784,7 +1799,7 @@ lemma field_names_same: lemma lift_t_heap_update: "d,g \\<^sub>t p \ lift_t g (heap_update p v h,d) = - (lift_t g (h,d) (p \ (v::'a::mem_type)))" + ((lift_t g (h,d)) (p \ (v::'a::mem_type)))" apply(subst lift_t_sub_field_update) apply fast apply(simp add: sub_typ_proper_def) diff --git a/tools/haskell-translator/lhs_pars.py b/tools/haskell-translator/lhs_pars.py index 433204c7d4..ecdd94604d 100644 --- a/tools/haskell-translator/lhs_pars.py +++ b/tools/haskell-translator/lhs_pars.py @@ -515,7 +515,7 @@ def type_transform(string): # apply everything locally then work back up bstring = braces.str(string, '(', ')') bits = bstring.split('->') - r = ' \ ' + r = r' \ ' if len(bits) == 1: bits = bstring.split(',') r = ' * ' @@ -612,7 +612,7 @@ def constructor_reversing(tokens): listToken = braces.str('(List %s)' % tokens[2], '(', ')') return [listToken, tokens[0]] elif tokens[0] == 'array': - arrow_token = braces.str('\', '(', ')') + arrow_token = braces.str(r'\', '(', ')') return [tokens[1], arrow_token, tokens[2]] elif tokens[0] == 'either': plus_token = braces.str('+', '(', ')') @@ -861,7 +861,7 @@ def named_extractor_update_lemma(ex_name, up_name): def named_extractor_definitions(name, map, type, header, constructors): lines = [] lines.append('primrec') - lines.append(' %s :: "%s \ %s"' + lines.append(' %s :: "%s \\ %s"' % (name, header, type)) lines.append('where') is_first = True @@ -883,7 +883,7 @@ def named_extractor_definitions(name, map, type, header, constructors): def named_update_definitions(name, map, type, header, constructors): lines = [] lines.append('primrec') - ra = '\' + ra = r'\' if len(type.split()) > 1: type = '(%s)' % type lines.append(' %s_update :: "(%s %s %s) %s %s %s %s"' @@ -916,17 +916,17 @@ def named_constructor_translation(name, map, header): lines.append('abbreviation (input)') l = ' %s_trans :: "' % name for n, type in map: - l = l + '(' + type + ') \ ' - l = l + '%s" ("%s\'_ \ %s= _' % (header, name, map[0][0]) + l = l + '(' + type + r') \ ' + l = l + '%s" ("%s\'_ \\ %s= _' % (header, name, map[0][0]) for n, type in map[1:]: l = l + ', %s= _' % n.replace("_", "'_") - l = l + ' \")' + l = l + r' \")' lines.append(l) lines.append('where') - l = ' "%s_ \ %s= v0' % (name, map[0][0]) + l = ' "%s_ \\ %s= v0' % (name, map[0][0]) for i, (n, type) in enumerate(map[1:]): l = l + ', %s= v%d' % (n, i + 1) - l = l + ' \ == %s' % name + l = l + ' \\ == %s' % name for i in range(len(map)): l = l + ' v%d' % i l = l + '"' @@ -938,15 +938,15 @@ def named_constructor_translation(name, map, header): def named_constructor_check(name, map, header): lines = [] lines.append('definition') - lines.append(' is%s :: "%s \ bool"' % (name, header)) + lines.append(' is%s :: "%s \\ bool"' % (name, header)) lines.append('where') - lines.append(' "is%s v \ case v of' % name) + lines.append(' "is%s v \\ case v of' % name) l = ' %s ' % name for i, _ in enumerate(map): l = l + 'v%d ' % i - l = l + '\ True' + l = l + r'\ True' lines.append(l) - lines.append(' | _ \ False"') + lines.append(r' | _ \ False"') return lines @@ -980,7 +980,7 @@ def type_wrapper_type(header, cons, rhs, d, decons=None): % (decons, header, header, header, header), 'where', ' %s_update_def[simp]:' % decons, - ' "%s_update f y \ f y"' % decons, + ' "%s_update f y \\ f y"' % decons, '' ]) lines.extend(named_constructor_translation(cons, [(decons, decons_type) @@ -1097,7 +1097,7 @@ def storable_instance_proofs(header, canonical, d): next_type_tag = next_type_tag + 1 proofs.extend([ '', 'defs (overloaded)', ' typetag_%s[simp]:' % header, - ' "typetag (x :: %s) \ %d"' % (header, next_type_tag), '' + ' "typetag (x :: %s) \\ %d"' % (header, next_type_tag), '' 'instance %s :: dynamic' % header, ' by (intro_classes, simp)' ]) @@ -1135,7 +1135,7 @@ def storable_instance_proofs(header, canonical, d): else: extradefs.extend([ ' loadObject_%s[simp]:' % header, - ' "(loadObject p q n obj) :: %s \' % header, + ' "(loadObject p q n obj) :: %s \\' % header, ' loadObject_default p q n obj"', ]) @@ -1152,7 +1152,7 @@ def storable_instance_proofs(header, canonical, d): else: extradefs.extend([ ' updateObject_%s[simp]:' % header, - ' "updateObject (val :: %s) \' % header, + ' "updateObject (val :: %s) \\' % header, ' updateObject_default val"', ]) @@ -1203,7 +1203,7 @@ def pspace_storable_instance_proofs(header, canonical, d): else: extradefs.extend([ ' loadObject_%s[simp]:' % header, - ' "(loadObject p q n obj) :: %s kernel \' % header, + ' "(loadObject p q n obj) :: %s kernel \\' % header, ' loadObject_default p q n obj"', ]) @@ -1220,7 +1220,7 @@ def pspace_storable_instance_proofs(header, canonical, d): else: extradefs.extend([ ' updateObject_%s[simp]:' % header, - ' "updateObject (val :: %s) \' % header, + ' "updateObject (val :: %s) \\' % header, ' updateObject_default val"', ]) @@ -1268,7 +1268,7 @@ def singular_canonical(): if call.current_context: lines.append('interpretation Arch .') lines.append('definition') - lines.append(' enum_%s: "enum_class.enum \ map %s enum"' + lines.append(' enum_%s: "enum_class.enum \\ map %s enum"' % (header, cons)) else: @@ -1280,7 +1280,7 @@ def singular_canonical(): if call.current_context: lines.append('interpretation Arch .') lines.append('definition') - lines.append(' enum_%s: "enum_class.enum \ ' % header) + lines.append(' enum_%s: "enum_class.enum \\ ' % header) if len(cons_no_args) == 0: lines.append(' []') else: @@ -1300,11 +1300,11 @@ def singular_canonical(): lines.append(' by (meson injI %s.inject)' % header) lines.append('') lines.append('definition') - lines.append(' "enum_class.enum_all (P :: %s \ bool) \ Ball UNIV P"' + lines.append(' "enum_class.enum_all (P :: %s \\ bool) \\ Ball UNIV P"' % header) lines.append('') lines.append('definition') - lines.append(' "enum_class.enum_ex (P :: %s \ bool) \ Bex UNIV P"' + lines.append(' "enum_class.enum_ex (P :: %s \\ bool) \\ Bex UNIV P"' % header) lines.append('') lines.append(' instance') @@ -1327,7 +1327,7 @@ def singular_canonical(): if call.current_context: lines.append('interpretation Arch .') lines.append('definition') - lines.append(' enum_alt_%s: "enum_alt \ ' % header) + lines.append(' enum_alt_%s: "enum_alt \\ ' % header) lines.append(' alt_from_ord (enum :: %s list)"' % header) lines.append('instance ..') lines.append('end') @@ -1401,9 +1401,9 @@ def bij_instance(classname, typename, constructor, fns): names2 = tuple([name + "'" for name in names]) fstr1 = fstr % names fstr2 = fstr % names2 - L.append(' %s_%s: "%s \' % (fname, typename, fstr1)) + L.append(' %s_%s: "%s \\' % (fname, typename, fstr1)) for name in names: - L.append(" case %s of %s %s' \" + L.append(" case %s of %s %s' \\" % (name, constructor, name)) if cast_return: L.append(' %s (%s)"' % (constructor, fstr2)) @@ -1466,25 +1466,25 @@ def body_transform(body, defined, sig, nopattern=False): bits = line.split() numLiftIO[0] = numLiftIO[0] + 1 rhs = '(%d :: Int) %s' % (numLiftIO[0], ' '.join(bits[1:])) - line = '%s\ underlying_arch_op %s' % (line, rhs) + line = '%s\\ underlying_arch_op %s' % (line, rhs) children = [] elif '=' in line: - line = '\'.join(line.split('=', 1)) + line = '\\'.join(line.split('=', 1)) elif leading_bar.match(children[0][0]): pass elif '=' in children[0][0]: (nextline, c2) = children[0] - children[0] = ('\'.join(nextline.split('=', 1)), c2) + children[0] = ('\\'.join(nextline.split('=', 1)), c2) else: warning('def of %s missing =\n' % defined, filename) if children and (children[-1][0].endswith('where') or children[-1][0].lstrip().startswith('where')): - bits = line.split('\') + bits = line.split(r'\') where_clause = where_clause_transform(children[-1]) children = children[:-1] if len(bits) == 2 and bits[1].strip(): - line = bits[0] + '\' + line = bits[0] + r'\' new_line = ' ' * len(line) + bits[1] children = [(new_line, children)] else: @@ -1499,7 +1499,7 @@ def body_transform(body, defined, sig, nopattern=False): (line, children) = do_clauses_transform((line, children), sig) if children and leading_bar.match(children[0][0]): - line = line + ' \' + line = line + r' \' children = guarded_body_transform(children, ' = ') children = where_clause + children @@ -1527,7 +1527,7 @@ def bracket_dollar_lambdas(xxx_todo_changeme): (line, children) = xxx_todo_changeme if dollar_lambda_regex.search(line): [left, right] = dollar_lambda_regex.split(line) - line = '%s(\%s' % (left, right) + line = '%s(\\%s' % (left, right) both = (line, children) if has_trailing_string(';', both): both = remove_trailing_string(';', both) @@ -1799,7 +1799,7 @@ def do_clauses_transform(xxx_todo_changeme3, rawsig, type=None): def do_clause_pattern(line, children, type, n=0): bits = line.split('<-') - default = [('\'.join(bits), children)] + default = [('\\'.join(bits), children)] if len(bits) == 1: return default (left, right) = line.split('<-', 1) @@ -1997,8 +1997,8 @@ def case_clauses_transform(xxx_todo_changeme5): x = str(bits[0]) + ':: ' + type_transform(str(bits[1])) if children and children[-1][0].strip().startswith('where'): - warning(f'where clause in case: %r, removing case.' % line, filename) - return (beforecase + '\ \case removed\ undefined', []) + warning('where clause in case: %r, removing case.' % line, filename) + return (beforecase + r'\ \case removed\ undefined', []) # where_clause = where_clause_transform(children[-1]) # children = children[:-1] # (in_stmt, l) = where_clause[-1] @@ -2040,7 +2040,7 @@ def case_clauses_transform(xxx_todo_changeme5): conv = get_case_conv(cases) if conv == '': warning('blanked case in caseconvs', filename) - return (beforecase + '\ \case removed\ undefined', []) + return (beforecase + r'\ \case removed\ undefined', []) if not conv: warning('no caseconv for %r\n' % (cases, ), filename) if cases not in cases_added: @@ -2050,7 +2050,7 @@ def case_clauses_transform(xxx_todo_changeme5): f.write('%s ---X>\n\n' % casestr) f.close() cases_added[cases] = 1 - return (beforecase + '\ \case removed\ undefined', []) + return (beforecase + r'\ \case removed\ undefined', []) conv = subs_nums_and_x(conv, x) new_line = beforecase + '(' + conv[0][0] @@ -2113,7 +2113,7 @@ def lhs_transform(line): if '(' not in line: return line - [left, right] = line.split('\') + [left, right] = line.split(r'\') ws = left[:len(left) - len(left.lstrip())] @@ -2124,17 +2124,17 @@ def lhs_transform(line): for (i, bit) in enumerate(bits): if bit.startswith('('): bits[i] = 'arg%d' % i - case = 'case arg%d of %s \ ' % (i, bit) + case = 'case arg%d of %s \\ ' % (i, bit) right = case + right - return ws + ' '.join([str(bit) for bit in bits]) + '\' + right + return ws + ' '.join([str(bit) for bit in bits]) + r'\' + right def lhs_de_underscore(line): if '_' not in line: return line - [left, right] = line.split('\') + [left, right] = line.split(r'\') ws = left[:len(left) - len(left.lstrip())] @@ -2145,7 +2145,7 @@ def lhs_de_underscore(line): if bit == '_': bits[i] = 'arg%d' % i - return ws + ' '.join([str(bit) for bit in bits]) + ' \' + right + return ws + ' '.join([str(bit) for bit in bits]) + r' \' + right regexes = [ @@ -2167,11 +2167,11 @@ def lhs_de_underscore(line): (re.compile('assert '), 'haskell_assert '), (re.compile('assertE '), 'haskell_assertE '), (re.compile('=='), '='), - (re.compile(r"\(/="), '(\x. x \'), - (re.compile('/='), '\'), + (re.compile(r"\(/="), r'(\x. x \'), + (re.compile('/='), r'\'), (re.compile('"([^"])*"'), '[]'), - (re.compile('&&'), '\'), - (re.compile('\|\|'), '\'), + (re.compile('&&'), r'\'), + (re.compile('\|\|'), r'\'), (re.compile(r"(\W)not(\s)"), r"\1Not\2"), (re.compile(r"(\W)and(\s)"), r"\1andList\2"), (re.compile(r"(\W)or(\s)"), r"\1orList\2"), @@ -2206,7 +2206,7 @@ def lhs_de_underscore(line): (re.compile(r"\$!"), r"$"), (re.compile('([^>])>='), r'\1\'), (re.compile('>>([^=])'), r'>>_\1'), - (re.compile('<='), '\'), + (re.compile('<='), r'\'), (re.compile(r" \\\\ "), " `~listSubtract~` "), (re.compile(r"(\s\w+)\s*@\s*\w+\s*{\s*}\s*\"), r"\1 \"), @@ -2402,9 +2402,9 @@ def get_case_conv(cases): 'Nothing': 'None', 'Left': 'Inl', 'Right': 'Inr', - 'PPtr': '\ \PPtr\', - 'Register': '\ \Register\', - 'Word': '\ \Word\', + 'PPtr': r'\ \PPtr\', + 'Register': r'\ \Register\', + 'Word': r'\ \Word\', } unique_ids_per_file = {} @@ -2428,9 +2428,9 @@ def all_constructor_conv(cases): bits[j] = 'v%d' % get_next_unique_id() pat = ' '.join(bits) if i == 0: - conv.append((' %s \ ' % pat, i)) + conv.append((' %s \\ ' % pat, i)) else: - conv.append(('| %s \ ' % pat, i)) + conv.append(('| %s \\ ' % pat, i)) return conv @@ -2455,9 +2455,9 @@ def extended_pattern_conv(cases): bits = [constructor_conv_table.get(bit, bit) for bit in bits] pat = ''.join(bits) if i == 0: - conv.append((' %s \ ' % pat, i)) + conv.append((' %s \\ ' % pat, i)) else: - conv.append(('| %s \ ' % pat, i)) + conv.append(('| %s \\ ' % pat, i)) return conv @@ -2590,7 +2590,7 @@ def primrec_transform(d): else: l = " " + l is_not_first = True - l = l.split('\') + l = l.split(r'\') assert len(l) == 2 l = '= ('.join(l) (l, c) = remove_trailing_string('"', (l, c)) @@ -2708,16 +2708,16 @@ def get_lambda_body_lines(d): line = line[1:] # find \ in first or 2nd line - if '\' not in line and '\' in children[0][0]: + if r'\' not in line and r'\' in children[0][0]: (l, c) = children[0] children = c + children[1:] line = line + l - [lhs, rhs] = line.split('\', 1) + [lhs, rhs] = line.split(r'\', 1) bits = lhs.split() args = bits[1:] assert fn in bits[0] - line = '(\' + ' '.join(args) + '. ' + rhs + line = r'(\' + ' '.join(args) + '. ' + rhs # lines = ['(* body of %s *)' % fn, line] + flatten_tree (children) lines = [line] + flatten_tree(children) assert (lines[-1].endswith('"'))